From 7a3604823cdbeb39c6cf7c1ff09efea9054b3e4b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 24 Jul 2023 02:29:26 +0200 Subject: [PATCH 0001/2361] Stricter tryParse --- src/IO/ReadHelpers.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 2636898c1b3..98fafe37cff 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -1398,6 +1398,7 @@ inline T parse(const char * data, size_t size) T res; ReadBufferFromMemory buf(data, size); readText(res, buf); + assertEOF(buf); return res; } @@ -1405,7 +1406,9 @@ template inline bool tryParse(T & res, const char * data, size_t size) { ReadBufferFromMemory buf(data, size); - return tryReadText(res, buf); + if (!tryReadText(res, buf)) + return false; + return buf.eof(); } template From 9dfed89dc85f017c1c8ebd0e077a07f793fab78b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 4 Aug 2023 20:28:34 +0200 Subject: [PATCH 0002/2361] Fix Keeper --- src/Coordination/KeeperSnapshotManager.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Coordination/KeeperSnapshotManager.cpp b/src/Coordination/KeeperSnapshotManager.cpp index a216272a9e1..f30a716206d 100644 --- a/src/Coordination/KeeperSnapshotManager.cpp +++ b/src/Coordination/KeeperSnapshotManager.cpp @@ -56,7 +56,7 @@ namespace std::filesystem::path path(snapshot_path); std::string filename = path.stem(); Strings name_parts; - splitInto<'_'>(name_parts, filename); + splitInto<'_', '.'>(name_parts, filename); return parse(name_parts[1]); } @@ -440,7 +440,6 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial } } - size_t active_sessions_size; readBinary(active_sessions_size, in); From aee9612e5127f9cbe0a05c12a08480fc5072ff44 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 4 Aug 2023 20:53:56 +0200 Subject: [PATCH 0003/2361] Fix strange trash --- src/Core/SettingsFields.cpp | 2 +- src/Core/SettingsFields.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index 9b8677a9888..207a7464228 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -191,7 +191,7 @@ SettingFieldMaxThreads & SettingFieldMaxThreads::operator=(const Field & f) String SettingFieldMaxThreads::toString() const { if (is_auto) - return "'auto(" + ::DB::toString(value) + ")'"; + return "auto(" + ::DB::toString(value) + ")"; else return ::DB::toString(value); } diff --git a/src/Core/SettingsFields.h b/src/Core/SettingsFields.h index 32fffd3af06..e25d688bcf4 100644 --- a/src/Core/SettingsFields.h +++ b/src/Core/SettingsFields.h @@ -155,7 +155,7 @@ struct SettingFieldMaxThreads operator UInt64() const { return value; } /// NOLINT explicit operator Field() const { return value; } - /// Writes "auto()" instead of simple "" if `is_auto==true`. + /// Writes "auto()" instead of simple "" if `is_auto == true`. String toString() const; void parseFromString(const String & str); From 347a2ed46cfa9b759d159735d43e5d303471f6d9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 5 Aug 2023 01:43:54 +0200 Subject: [PATCH 0004/2361] Fix a comment --- base/poco/Data/include/Poco/Data/TypeHandler.h | 2 +- programs/format/Format.cpp | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/base/poco/Data/include/Poco/Data/TypeHandler.h b/base/poco/Data/include/Poco/Data/TypeHandler.h index 34f88e986f7..e7633de7018 100644 --- a/base/poco/Data/include/Poco/Data/TypeHandler.h +++ b/base/poco/Data/include/Poco/Data/TypeHandler.h @@ -97,7 +97,7 @@ namespace Data /// /// static void extract(std::size_t pos, Person& obj, const Person& defVal, AbstractExtractor::Ptr pExt) /// { - /// // defVal is the default person we should use if we encunter NULL entries, so we take the individual fields + /// // defVal is the default person we should use if we encounter NULL entries, so we take the individual fields /// // as defaults. You can do more complex checking, ie return defVal if only one single entry of the fields is null etc... /// poco_assert_dbg (!pExt.isNull()); /// std::string lastName; diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index 43c66a32302..d7d61bbcd3b 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -163,13 +163,15 @@ int mainEntryClickHouseFormat(int argc, char ** argv) { ASTPtr res = parseQueryAndMovePosition( parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth); - /// For insert query with data(INSERT INTO ... VALUES ...), will lead to format fail, - /// should throw exception early and make exception message more readable. + + /// For insert query with data(INSERT INTO ... VALUES ...), that will lead to the formatting failure, + /// we should throw an exception early, and make exception message more readable. if (const auto * insert_query = res->as(); insert_query && insert_query->data) { throw Exception(DB::ErrorCodes::INVALID_FORMAT_INSERT_QUERY_WITH_DATA, "Can't format ASTInsertQuery with data, since data will be lost"); } + if (!quiet) { if (!backslash) From 576a8406946b085f9438328f3e8bc1b9e1f66fb2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 22 Oct 2023 07:20:28 +0200 Subject: [PATCH 0005/2361] Fix error --- src/Interpreters/Cluster.cpp | 36 +++++++++++++++++++++++++----------- src/Interpreters/Cluster.h | 2 +- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 82c3d48bc05..1819e00ac4f 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -225,21 +225,31 @@ String Cluster::Address::toFullString(bool use_compact_format) const } } -Cluster::Address Cluster::Address::fromFullString(const String & full_string) +Cluster::Address Cluster::Address::fromFullString(std::string_view full_string) { - const char * address_begin = full_string.data(); - const char * address_end = address_begin + full_string.size(); - - const char * user_pw_end = strchr(full_string.data(), '@'); + std::string_view user_password; + if (auto pos = full_string.find('@'); pos != std::string_view::npos) + user_password = full_string.substr(pos + 1); /// parsing with the new shard{shard_index}[_replica{replica_index}] format - if (!user_pw_end && startsWith(full_string, "shard")) + if (user_password.empty() && full_string.starts_with("shard")) { - const char * underscore = strchr(full_string.data(), '_'); - Address address; - address.shard_index = parse(address_begin + strlen("shard")); - address.replica_index = underscore ? parse(underscore + strlen("_replica")) : 0; + + if (auto underscore_pos = full_string.find('_'); underscore_pos != std::string_view::npos) + { + address.shard_index = parse(full_string.substr(0, underscore_pos).substr(strlen("shard"))); + + if (!full_string.substr(underscore_pos + 1).starts_with("replica")) + throw Exception(ErrorCodes::SYNTAX_ERROR, "Incorrect address '{}', should be in a form of `shardN` or `shardN_replicaM`", full_string); + + address.replica_index = parse(full_string.substr(underscore_pos + 1 + strlen("replica"))); + } + else + { + address.shard_index = parse(full_string.substr(strlen("shard"))); + address.replica_index = 0; + } return address; } @@ -250,9 +260,13 @@ Cluster::Address Cluster::Address::fromFullString(const String & full_string) /// - credentials are exposed in file name; /// - the file name can be too long. + const char * address_begin = full_string.data(); + const char * address_end = address_begin + full_string.size(); + const char * user_pw_end = strchr(address_begin, '@'); + Protocol::Secure secure = Protocol::Secure::Disable; const char * secure_tag = "+secure"; - if (endsWith(full_string, secure_tag)) + if (full_string.ends_with(secure_tag)) { address_end -= strlen(secure_tag); secure = Protocol::Secure::Enable; diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index b2bc03dd74d..042a43040c2 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -166,7 +166,7 @@ public: String toFullString(bool use_compact_format) const; /// Returns address with only shard index and replica index or full address without shard index and replica index - static Address fromFullString(const String & address_full_string); + static Address fromFullString(std::string_view full_string); /// Returns resolved address if it does resolve. std::optional getResolvedAddress() const; From f2757a5c645d4747d328744c0433a3acd389845c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 22 Oct 2023 07:23:25 +0200 Subject: [PATCH 0006/2361] Fix error --- src/Interpreters/Cluster.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 1819e00ac4f..027b3cdf76b 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -240,10 +240,16 @@ Cluster::Address Cluster::Address::fromFullString(std::string_view full_string) { address.shard_index = parse(full_string.substr(0, underscore_pos).substr(strlen("shard"))); - if (!full_string.substr(underscore_pos + 1).starts_with("replica")) - throw Exception(ErrorCodes::SYNTAX_ERROR, "Incorrect address '{}', should be in a form of `shardN` or `shardN_replicaM`", full_string); - - address.replica_index = parse(full_string.substr(underscore_pos + 1 + strlen("replica"))); + if (full_string.substr(underscore_pos + 1).starts_with("replica")) + { + address.replica_index = parse(full_string.substr(underscore_pos + 1 + strlen("replica"))); + } + else if (full_string.substr(underscore_pos + 1).starts_with("all_replicas")) + { + address.replica_index = 0; + } + else + throw Exception(ErrorCodes::SYNTAX_ERROR, "Incorrect address '{}', should be in a form of `shardN_all_replicas` or `shardN_replicaM`", full_string); } else { From 753ea3ab889684e2f994c424821b8f7923f55534 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 28 Oct 2023 20:04:10 +0200 Subject: [PATCH 0007/2361] Fix unit test --- src/Common/ZooKeeper/ZooKeeper.h | 1 + src/Coordination/RaftServerConfig.cpp | 8 ++++++-- src/Databases/TablesLoader.h | 2 ++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index c41d1d8dbab..15cbd8b2f89 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -548,6 +548,7 @@ public: void setZooKeeperLog(std::shared_ptr zk_log_); UInt32 getSessionUptime() const { return static_cast(session_uptime.elapsedSeconds()); } + bool hasReachedDeadline() const { return impl->hasReachedDeadline(); } void setServerCompletelyStarted(); diff --git a/src/Coordination/RaftServerConfig.cpp b/src/Coordination/RaftServerConfig.cpp index 929eeeb640e..bafc177b736 100644 --- a/src/Coordination/RaftServerConfig.cpp +++ b/src/Coordination/RaftServerConfig.cpp @@ -26,12 +26,16 @@ std::optional RaftServerConfig::parse(std::string_view server) if (!with_id_endpoint && !with_server_type && !with_priority) return std::nullopt; - const std::string_view id_str = parts[0]; + std::string_view id_str = parts[0]; if (!id_str.starts_with("server.")) return std::nullopt; + id_str = id_str.substr(7); + if (auto eq_pos = id_str.find('='); std::string_view::npos != eq_pos) + id_str = id_str.substr(0, eq_pos); + Int32 id; - if (!tryParse(id, std::next(id_str.begin(), 7))) + if (!tryParse(id, id_str)) return std::nullopt; if (id <= 0) return std::nullopt; diff --git a/src/Databases/TablesLoader.h b/src/Databases/TablesLoader.h index eb07351bd7f..31702db0345 100644 --- a/src/Databases/TablesLoader.h +++ b/src/Databases/TablesLoader.h @@ -1,4 +1,5 @@ #pragma once + #include #include #include @@ -12,6 +13,7 @@ #include #include + namespace Poco { class Logger; // NOLINT(cppcoreguidelines-virtual-class-destructor) From 0f3506bf7af20231a290ed97a59b0e233fbc7c6e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 28 Oct 2023 20:05:45 +0200 Subject: [PATCH 0008/2361] Fix integration test --- tests/integration/test_cgroup_limit/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_cgroup_limit/test.py b/tests/integration/test_cgroup_limit/test.py index e77b0f70960..5d56135d9ff 100644 --- a/tests/integration/test_cgroup_limit/test.py +++ b/tests/integration/test_cgroup_limit/test.py @@ -46,7 +46,7 @@ def test_cgroup_cpu_limit(): "clickhouse local -q \"select value from system.settings where name='max_threads'\"", num_cpus, ) - expect_output = (r"\'auto({})\'".format(math.ceil(num_cpus))).encode() + expect_output = (r"auto({})".format(math.ceil(num_cpus))).encode() assert ( result.strip() == expect_output ), f"fail for cpu limit={num_cpus}, result={result.strip()}, expect={expect_output}" From 1d81bd539bc4e2041d6a494ab915ae9ca52389d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 9 Aug 2023 10:14:10 +0000 Subject: [PATCH 0009/2361] Constexprify conditions --- src/Storages/Kafka/StorageKafka.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index c17defca673..723399bf206 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -939,14 +939,14 @@ void registerStorageKafka(StorageFactory & factory) /* move engine args to settings */ \ else \ { \ - if ((EVAL) == 1) \ + if constexpr ((EVAL) == 1) \ { \ engine_args[(ARG_NUM)-1] = \ evaluateConstantExpressionAsLiteral( \ engine_args[(ARG_NUM)-1], \ args.getLocalContext()); \ } \ - if ((EVAL) == 2) \ + if constexpr ((EVAL) == 2) \ { \ engine_args[(ARG_NUM)-1] = \ evaluateConstantExpressionOrIdentifierAsLiteral( \ From 72f21065bf9903d7f3e6178143dc093528ab0b86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 10 Aug 2023 10:56:17 +0000 Subject: [PATCH 0010/2361] Add keeper path to Kafka table engine --- src/Core/Settings.h | 1 + src/Storages/Kafka/KafkaSettings.h | 1 + src/Storages/Kafka/StorageKafka.cpp | 9 +++++++++ 3 files changed, 11 insertions(+) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index a6280a28436..bba0791e1fc 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -649,6 +649,7 @@ class IColumn; M(UInt64, max_size_to_preallocate_for_aggregation, 100'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before aggregation", 0) \ \ M(Bool, kafka_disable_num_consumers_limit, false, "Disable limit on kafka_num_consumers that depends on the number of available CPU cores", 0) \ + M(Bool, allow_experimental_kafka_store_offsets_in_keeper, false, "Allow experimental feature to store Kafka related offsets in Keeper", 0) \ M(Bool, enable_software_prefetch_in_aggregation, true, "Enable use of software prefetch in aggregation", 0) \ M(Bool, allow_aggregate_partitions_independently, false, "Enable independent aggregation of partitions on separate threads when partition key suits group by key. Beneficial when number of partitions close to number of cores and partitions have roughly the same size", 0) \ M(Bool, force_aggregate_partitions_independently, false, "Force the use of optimization when it is applicable, but heuristics decided not to use it", 0) \ diff --git a/src/Storages/Kafka/KafkaSettings.h b/src/Storages/Kafka/KafkaSettings.h index 075e79c96f0..590ee0cbbc5 100644 --- a/src/Storages/Kafka/KafkaSettings.h +++ b/src/Storages/Kafka/KafkaSettings.h @@ -31,6 +31,7 @@ class ASTStorage; M(StreamingHandleErrorMode, kafka_handle_error_mode, StreamingHandleErrorMode::DEFAULT, "How to handle errors for Kafka engine. Possible values: default (throw an exception after rabbitmq_skip_broken_messages broken messages), stream (save broken messages and errors in virtual columns _raw_message, _error).", 0) \ M(Bool, kafka_commit_on_select, false, "Commit messages when select query is made", 0) \ M(UInt64, kafka_max_rows_per_message, 1, "The maximum number of rows produced in one kafka message for row-based formats.", 0) \ + M(String, keeper_path, "", "TODO(antaljanosbenjamin)", 0) \ #define OBSOLETE_KAFKA_SETTINGS(M, ALIAS) \ MAKE_OBSOLETE(M, Char, kafka_row_delimiter, '\0') \ diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 723399bf206..361b17db2bd 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -76,6 +76,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int QUERY_NOT_ALLOWED; + extern const int SUPPORT_IS_DISABLED; } struct StorageKafkaInterceptors @@ -992,6 +993,7 @@ void registerStorageKafka(StorageFactory & factory) CHECK_KAFKA_STORAGE_ARGUMENT(15, kafka_handle_error_mode, 0) CHECK_KAFKA_STORAGE_ARGUMENT(16, kafka_commit_on_select, 0) CHECK_KAFKA_STORAGE_ARGUMENT(17, kafka_max_rows_per_message, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(18, keeper_path, 0) } #undef CHECK_KAFKA_STORAGE_ARGUMENT @@ -1040,6 +1042,13 @@ void registerStorageKafka(StorageFactory & factory) "See https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/#configuration"); } + if (kafka_settings->keeper_path.changed && !args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper){ + + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "Storing the Kafka offsets in Keeper is experimental. " + "Set `allow_experimental_kafka_store_offsets_in_keeper` setting to enable it"); + } + return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); }; From ba55c09fbced1b1cf268e4ee55ad0d1d2603f493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 15 Aug 2023 11:21:44 +0000 Subject: [PATCH 0011/2361] Add `KafkaStorage2` --- src/Storages/Kafka/KafkaConsumer2.cpp | 530 ++++++++++++++++ src/Storages/Kafka/KafkaConsumer2.h | 116 ++++ src/Storages/Kafka/KafkaSettings.h | 2 +- src/Storages/Kafka/KafkaSource2.cpp | 303 +++++++++ src/Storages/Kafka/KafkaSource2.h | 64 ++ src/Storages/Kafka/StorageKafka.cpp | 340 +--------- src/Storages/Kafka/StorageKafka.h | 6 +- src/Storages/Kafka/StorageKafka2.cpp | 722 ++++++++++++++++++++++ src/Storages/Kafka/StorageKafka2.h | 149 +++++ src/Storages/Kafka/StorageKafkaCommon.cpp | 386 ++++++++++++ src/Storages/Kafka/StorageKafkaCommon.h | 48 ++ 11 files changed, 2328 insertions(+), 338 deletions(-) create mode 100644 src/Storages/Kafka/KafkaConsumer2.cpp create mode 100644 src/Storages/Kafka/KafkaConsumer2.h create mode 100644 src/Storages/Kafka/KafkaSource2.cpp create mode 100644 src/Storages/Kafka/KafkaSource2.h create mode 100644 src/Storages/Kafka/StorageKafka2.cpp create mode 100644 src/Storages/Kafka/StorageKafka2.h create mode 100644 src/Storages/Kafka/StorageKafkaCommon.cpp create mode 100644 src/Storages/Kafka/StorageKafkaCommon.h diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp new file mode 100644 index 00000000000..ec32248af46 --- /dev/null +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -0,0 +1,530 @@ +// Needs to go first because its partial specialization of fmt::formatter +// should be defined before any instantiation +#include + +#include +#include + +#include + +#include +#include +#include + +#include +#include + +namespace CurrentMetrics +{ + extern const Metric KafkaAssignedPartitions; + extern const Metric KafkaConsumersWithAssignment; +} + +namespace ProfileEvents +{ + extern const Event KafkaRebalanceRevocations; + extern const Event KafkaRebalanceAssignments; + extern const Event KafkaRebalanceErrors; + extern const Event KafkaMessagesPolled; + extern const Event KafkaCommitFailures; + extern const Event KafkaCommits; + extern const Event KafkaConsumerErrors; +} + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int CANNOT_COMMIT_OFFSET; +} + +using namespace std::chrono_literals; +const auto MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS = 15000; +const std::size_t POLL_TIMEOUT_WO_ASSIGNMENT_MS = 50; +const auto DRAIN_TIMEOUT_MS = 5000ms; + + +KafkaConsumer2::KafkaConsumer2( + ConsumerPtr consumer_, + Poco::Logger * log_, + size_t max_batch_size, + size_t poll_timeout_, + bool intermediate_commit_, + const std::atomic & stopped_, + const Names & _topics) + : consumer(consumer_) + , log(log_) + , batch_size(max_batch_size) + , poll_timeout(poll_timeout_) + , intermediate_commit(intermediate_commit_) + , stopped(stopped_) + , current(messages.begin()) + , topics(_topics) +{ + // called (synchronously, during poll) when we enter the consumer group + consumer->set_assignment_callback([this](const cppkafka::TopicPartitionList & topic_partitions) + { + CurrentMetrics::add(CurrentMetrics::KafkaAssignedPartitions, topic_partitions.size()); + ProfileEvents::increment(ProfileEvents::KafkaRebalanceAssignments); + + if (topic_partitions.empty()) + { + LOG_INFO(log, "Got empty assignment: Not enough partitions in the topic for all consumers?"); + } + else + { + LOG_TRACE(log, "Topics/partitions assigned: {}", topic_partitions); + CurrentMetrics::add(CurrentMetrics::KafkaConsumersWithAssignment, 1); + } + + assignment = topic_partitions; + }); + + // called (synchronously, during poll) when we leave the consumer group + consumer->set_revocation_callback([this](const cppkafka::TopicPartitionList & topic_partitions) + { + CurrentMetrics::sub(CurrentMetrics::KafkaAssignedPartitions, topic_partitions.size()); + ProfileEvents::increment(ProfileEvents::KafkaRebalanceRevocations); + + // Rebalance is happening now, and now we have a chance to finish the work + // with topics/partitions we were working with before rebalance + LOG_TRACE(log, "Rebalance initiated. Revoking partitions: {}", topic_partitions); + + if (!topic_partitions.empty()) + { + CurrentMetrics::sub(CurrentMetrics::KafkaConsumersWithAssignment, 1); + } + + // we can not flush data to target from that point (it is pulled, not pushed) + // so the best we can now it to + // 1) repeat last commit in sync mode (async could be still in queue, we need to be sure is is properly committed before rebalance) + // 2) stop / brake the current reading: + // * clean buffered non-commited messages + // * set flag / flush + + cleanUnprocessed(); + + stalled_status = REBALANCE_HAPPENED; + assignment.reset(); + waited_for_assignment = 0; + + // for now we use slower (but reliable) sync commit in main loop, so no need to repeat + // try + // { + // consumer->commit(); + // } + // catch (cppkafka::HandleException & e) + // { + // LOG_WARNING(log, "Commit error: {}", e.what()); + // } + }); + + consumer->set_rebalance_error_callback([this](cppkafka::Error err) + { + LOG_ERROR(log, "Rebalance error: {}", err); + ProfileEvents::increment(ProfileEvents::KafkaRebalanceErrors); + }); +} + +KafkaConsumer2::~KafkaConsumer2() +{ + try + { + if (!consumer->get_subscription().empty()) + { + try + { + consumer->unsubscribe(); + } + catch (const cppkafka::HandleException & e) + { + LOG_ERROR(log, "Error during unsubscribe: {}", e.what()); + } + drain(); + } + } + catch (const cppkafka::HandleException & e) + { + LOG_ERROR(log, "Error while destructing consumer: {}", e.what()); + } + +} + +// Needed to drain rest of the messages / queued callback calls from the consumer +// after unsubscribe, otherwise consumer will hang on destruction +// see https://github.com/edenhill/librdkafka/issues/2077 +// https://github.com/confluentinc/confluent-kafka-go/issues/189 etc. +void KafkaConsumer2::drain() +{ + auto start_time = std::chrono::steady_clock::now(); + cppkafka::Error last_error(RD_KAFKA_RESP_ERR_NO_ERROR); + + while (true) + { + auto msg = consumer->poll(100ms); + if (!msg) + break; + + auto error = msg.get_error(); + + if (error) + { + if (msg.is_eof() || error == last_error) + { + break; + } + else + { + LOG_ERROR(log, "Error during draining: {}", error); + } + } + + // i don't stop draining on first error, + // only if it repeats once again sequentially + last_error = error; + + auto ts = std::chrono::steady_clock::now(); + if (std::chrono::duration_cast(ts-start_time) > DRAIN_TIMEOUT_MS) + { + LOG_ERROR(log, "Timeout during draining."); + break; + } + } +} + + +void KafkaConsumer2::commit() +{ + auto print_offsets = [this] (const char * prefix, const cppkafka::TopicPartitionList & offsets) + { + for (const auto & topic_part : offsets) + { + auto print_special_offset = [&topic_part] + { + switch (topic_part.get_offset()) + { + case cppkafka::TopicPartition::OFFSET_BEGINNING: return "BEGINNING"; + case cppkafka::TopicPartition::OFFSET_END: return "END"; + case cppkafka::TopicPartition::OFFSET_STORED: return "STORED"; + case cppkafka::TopicPartition::OFFSET_INVALID: return "INVALID"; + default: return ""; + } + }; + + if (topic_part.get_offset() < 0) + { + LOG_TRACE(log, "{} {} (topic: {}, partition: {})", prefix, print_special_offset(), topic_part.get_topic(), topic_part.get_partition()); + } + else + { + LOG_TRACE(log, "{} {} (topic: {}, partition: {})", prefix, topic_part.get_offset(), topic_part.get_topic(), topic_part.get_partition()); + } + } + }; + + print_offsets("Polled offset", consumer->get_offsets_position(consumer->get_assignment())); + + if (hasMorePolledMessages()) + { + LOG_WARNING(log, "Logical error. Non all polled messages were processed."); + } + + if (offsets_stored > 0) + { + // if we will do async commit here (which is faster) + // we may need to repeat commit in sync mode in revocation callback, + // but it seems like existing API doesn't allow us to to that + // in a controlled manner (i.e. we don't know the offsets to commit then) + + size_t max_retries = 5; + bool committed = false; + + while (!committed && max_retries > 0) + { + try + { + // See https://github.com/edenhill/librdkafka/issues/1470 + // broker may reject commit if during offsets.commit.timeout.ms (5000 by default), + // there were not enough replicas available for the __consumer_offsets topic. + // also some other temporary issues like client-server connectivity problems are possible + consumer->commit(); + committed = true; + print_offsets("Committed offset", consumer->get_offsets_committed(consumer->get_assignment())); + } + catch (const cppkafka::HandleException & e) + { + // If there were actually no offsets to commit, return. Retrying won't solve + // anything here + if (e.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) + committed = true; + else + LOG_ERROR(log, "Exception during commit attempt: {}", e.what()); + } + --max_retries; + } + + if (!committed) + { + // TODO: insert atomicity / transactions is needed here (possibility to rollback, on 2 phase commits) + ProfileEvents::increment(ProfileEvents::KafkaCommitFailures); + throw Exception(ErrorCodes::CANNOT_COMMIT_OFFSET, + "All commit attempts failed. Last block was already written to target table(s), " + "but was not committed to Kafka."); + } + else + { + ProfileEvents::increment(ProfileEvents::KafkaCommits); + } + + } + else + { + LOG_TRACE(log, "Nothing to commit."); + } + + offsets_stored = 0; +} + +void KafkaConsumer2::subscribe() +{ + LOG_TRACE(log, "Already subscribed to topics: [{}]", boost::algorithm::join(consumer->get_subscription(), ", ")); + + if (assignment.has_value()) + { + LOG_TRACE(log, "Already assigned to: {}", assignment.value()); + } + else + { + LOG_TRACE(log, "No assignment"); + } + + + size_t max_retries = 5; + + while (consumer->get_subscription().empty()) + { + --max_retries; + try + { + consumer->subscribe(topics); + // FIXME: if we failed to receive "subscribe" response while polling and destroy consumer now, then we may hang up. + // see https://github.com/edenhill/librdkafka/issues/2077 + } + catch (cppkafka::HandleException & e) + { + if (max_retries > 0 && e.get_error() == RD_KAFKA_RESP_ERR__TIMED_OUT) + continue; + throw; + } + } + + cleanUnprocessed(); + + // we can reset any flags (except of CONSUMER_STOPPED) before attempt of reading new block of data + if (stalled_status != CONSUMER_STOPPED) + stalled_status = NO_MESSAGES_RETURNED; +} + +void KafkaConsumer2::cleanUnprocessed() +{ + messages.clear(); + current = messages.begin(); + offsets_stored = 0; +} + +void KafkaConsumer2::unsubscribe() +{ + LOG_TRACE(log, "Re-joining claimed consumer after failure"); + cleanUnprocessed(); + + // it should not raise exception as used in destructor + try + { + // From docs: Any previous subscription will be unassigned and unsubscribed first. + consumer->subscribe(topics); + + // I wanted to avoid explicit unsubscribe as it requires draining the messages + // to close the consumer safely after unsubscribe + // see https://github.com/edenhill/librdkafka/issues/2077 + // https://github.com/confluentinc/confluent-kafka-go/issues/189 etc. + } + catch (const cppkafka::HandleException & e) + { + LOG_ERROR(log, "Exception from KafkaConsumer2::unsubscribe: {}", e.what()); + } + +} + + +void KafkaConsumer2::resetToLastCommitted(const char * msg) +{ + if (!assignment.has_value() || assignment->empty()) + { + LOG_TRACE(log, "Not assignned. Can't reset to last committed position."); + return; + } + auto committed_offset = consumer->get_offsets_committed(consumer->get_assignment()); + consumer->assign(committed_offset); + LOG_TRACE(log, "{} Returned to committed position: {}", msg, committed_offset); +} + +// it do the poll when needed +ReadBufferPtr KafkaConsumer2::consume() +{ + resetIfStopped(); + + if (polledDataUnusable()) + return nullptr; + + if (hasMorePolledMessages()) + return getNextMessage(); + + if (intermediate_commit) + commit(); + + while (true) + { + stalled_status = NO_MESSAGES_RETURNED; + + // we already wait enough for assignment in the past, + // let's make polls shorter and not block other consumer + // which can work successfully in parallel + // POLL_TIMEOUT_WO_ASSIGNMENT_MS (50ms) is 100% enough just to check if we got assignment + // (see https://github.com/ClickHouse/ClickHouse/issues/11218) + auto actual_poll_timeout_ms = (waited_for_assignment >= MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS) + ? std::min(POLL_TIMEOUT_WO_ASSIGNMENT_MS,poll_timeout) + : poll_timeout; + + /// Don't drop old messages immediately, since we may need them for virtual columns. + auto new_messages = consumer->poll_batch(batch_size, + std::chrono::milliseconds(actual_poll_timeout_ms)); + + resetIfStopped(); + if (stalled_status == CONSUMER_STOPPED) + { + return nullptr; + } + else if (stalled_status == REBALANCE_HAPPENED) + { + if (!new_messages.empty()) + { + // we have polled something just after rebalance. + // we will not use current batch, so we need to return to last committed position + // otherwise we will continue polling from that position + resetToLastCommitted("Rewind last poll after rebalance."); + } + return nullptr; + } + + if (new_messages.empty()) + { + // While we wait for an assignment after subscription, we'll poll zero messages anyway. + // If we're doing a manual select then it's better to get something after a wait, then immediate nothing. + if (!assignment.has_value()) + { + waited_for_assignment += poll_timeout; // slightly innaccurate, but rough calculation is ok. + if (waited_for_assignment < MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS) + { + continue; + } + else + { + LOG_WARNING(log, "Can't get assignment. Will keep trying."); + stalled_status = NO_ASSIGNMENT; + return nullptr; + } + } + else if (assignment->empty()) + { + LOG_TRACE(log, "Empty assignment."); + return nullptr; + } + else + { + LOG_TRACE(log, "Stalled"); + return nullptr; + } + } + else + { + messages = std::move(new_messages); + current = messages.begin(); + LOG_TRACE(log, "Polled batch of {} messages. Offsets position: {}", + messages.size(), consumer->get_offsets_position(consumer->get_assignment())); + break; + } + } + + filterMessageErrors(); + if (current == messages.end()) + { + LOG_ERROR(log, "Only errors left"); + stalled_status = ERRORS_RETURNED; + return nullptr; + } + + ProfileEvents::increment(ProfileEvents::KafkaMessagesPolled, messages.size()); + + stalled_status = NOT_STALLED; + return getNextMessage(); +} + +ReadBufferPtr KafkaConsumer2::getNextMessage() +{ + if (current == messages.end()) + return nullptr; + + const auto * data = current->get_payload().get_data(); + size_t size = current->get_payload().get_size(); + ++current; + + if (data) + return std::make_shared(data, size); + + return getNextMessage(); +} + +size_t KafkaConsumer2::filterMessageErrors() +{ + assert(current == messages.begin()); + + size_t skipped = std::erase_if(messages, [this](auto & message) + { + if (auto error = message.get_error()) + { + ProfileEvents::increment(ProfileEvents::KafkaConsumerErrors); + LOG_ERROR(log, "Consumer error: {}", error); + return true; + } + return false; + }); + + if (skipped) + LOG_ERROR(log, "There were {} messages with an error", skipped); + + return skipped; +} + +void KafkaConsumer2::resetIfStopped() +{ + // we can react on stop only during fetching data + // after block is formed (i.e. during copying data to MV / committing) we ignore stop attempts + if (stopped) + { + stalled_status = CONSUMER_STOPPED; + cleanUnprocessed(); + } +} + + +void KafkaConsumer2::storeLastReadMessageOffset() +{ + if (!isStalled()) + { + consumer->store_offset(*(current - 1)); + ++offsets_stored; + } +} + +} diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h new file mode 100644 index 00000000000..6562a65a9b0 --- /dev/null +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -0,0 +1,116 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace CurrentMetrics +{ + extern const Metric KafkaConsumers; +} + +namespace Poco +{ + class Logger; +} + +namespace DB +{ + +using ConsumerPtr = std::shared_ptr; + +class KafkaConsumer2 +{ +public: + KafkaConsumer2( + ConsumerPtr consumer_, + Poco::Logger * log_, + size_t max_batch_size, + size_t poll_timeout_, + bool intermediate_commit_, + const std::atomic & stopped_, + const Names & _topics + ); + + ~KafkaConsumer2(); + void commit(); // Commit all processed messages. + void subscribe(); // Subscribe internal consumer to topics. + void unsubscribe(); // Unsubscribe internal consumer in case of failure. + + auto pollTimeout() const { return poll_timeout; } + + inline bool hasMorePolledMessages() const + { + return (stalled_status == NOT_STALLED) && (current != messages.end()); + } + + inline bool polledDataUnusable() const + { + return (stalled_status != NOT_STALLED) && (stalled_status != NO_MESSAGES_RETURNED); + } + + inline bool isStalled() const { return stalled_status != NOT_STALLED; } + + void storeLastReadMessageOffset(); + void resetToLastCommitted(const char * msg); + + /// Polls batch of messages from Kafka and returns read buffer containing the next message or + /// nullptr when there are no messages to process. + ReadBufferPtr consume(); + + // Return values for the message that's being read. + String currentTopic() const { return current[-1].get_topic(); } + String currentKey() const { return current[-1].get_key(); } + auto currentOffset() const { return current[-1].get_offset(); } + auto currentPartition() const { return current[-1].get_partition(); } + auto currentTimestamp() const { return current[-1].get_timestamp(); } + const auto & currentHeaderList() const { return current[-1].get_header_list(); } + String currentPayload() const { return current[-1].get_payload(); } + +private: + using Messages = std::vector; + CurrentMetrics::Increment metric_increment{CurrentMetrics::KafkaConsumers}; + + enum StalledStatus + { + NOT_STALLED, + NO_MESSAGES_RETURNED, + REBALANCE_HAPPENED, + CONSUMER_STOPPED, + NO_ASSIGNMENT, + ERRORS_RETURNED + }; + + ConsumerPtr consumer; + Poco::Logger * log; + const size_t batch_size = 1; + const size_t poll_timeout = 0; + size_t offsets_stored = 0; + + StalledStatus stalled_status = NO_MESSAGES_RETURNED; + + bool intermediate_commit = true; + size_t waited_for_assignment = 0; + + const std::atomic & stopped; + + // order is important, need to be destructed before consumer + Messages messages; + Messages::const_iterator current; + + // order is important, need to be destructed before consumer + std::optional assignment; + const Names topics; + + void drain(); + void cleanUnprocessed(); + void resetIfStopped(); + /// Return number of messages with an error. + size_t filterMessageErrors(); + ReadBufferPtr getNextMessage(); +}; + +} diff --git a/src/Storages/Kafka/KafkaSettings.h b/src/Storages/Kafka/KafkaSettings.h index 590ee0cbbc5..748090165d7 100644 --- a/src/Storages/Kafka/KafkaSettings.h +++ b/src/Storages/Kafka/KafkaSettings.h @@ -31,7 +31,7 @@ class ASTStorage; M(StreamingHandleErrorMode, kafka_handle_error_mode, StreamingHandleErrorMode::DEFAULT, "How to handle errors for Kafka engine. Possible values: default (throw an exception after rabbitmq_skip_broken_messages broken messages), stream (save broken messages and errors in virtual columns _raw_message, _error).", 0) \ M(Bool, kafka_commit_on_select, false, "Commit messages when select query is made", 0) \ M(UInt64, kafka_max_rows_per_message, 1, "The maximum number of rows produced in one kafka message for row-based formats.", 0) \ - M(String, keeper_path, "", "TODO(antaljanosbenjamin)", 0) \ + M(String, kafka_keeper_path, "", "TODO(antaljanosbenjamin)", 0) \ #define OBSOLETE_KAFKA_SETTINGS(M, ALIAS) \ MAKE_OBSOLETE(M, Char, kafka_row_delimiter, '\0') \ diff --git a/src/Storages/Kafka/KafkaSource2.cpp b/src/Storages/Kafka/KafkaSource2.cpp new file mode 100644 index 00000000000..3e14c57e8e2 --- /dev/null +++ b/src/Storages/Kafka/KafkaSource2.cpp @@ -0,0 +1,303 @@ +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace ProfileEvents +{ + extern const Event KafkaMessagesRead; + extern const Event KafkaMessagesFailed; + extern const Event KafkaRowsRead; + extern const Event KafkaRowsRejected; +} + +namespace DB +{ +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +// with default poll timeout (500ms) it will give about 5 sec delay for doing 10 retries +// when selecting from empty topic +const auto MAX_FAILED_POLL_ATTEMPTS = 10; + +KafkaSource2::KafkaSource2( + StorageKafka2 & storage_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + const Names & columns, + Poco::Logger * log_, + size_t max_block_size_, + bool commit_in_suffix_) + : ISource(storage_snapshot_->getSampleBlockForColumns(columns)) + , storage(storage_) + , storage_snapshot(storage_snapshot_) + , context(context_) + , column_names(columns) + , log(log_) + , max_block_size(max_block_size_) + , commit_in_suffix(commit_in_suffix_) + , non_virtual_header(storage_snapshot->metadata->getSampleBlockNonMaterialized()) + , virtual_header(storage_snapshot->getSampleBlockForColumns(storage.getVirtualColumnNames())) + , handle_error_mode(storage.getHandleKafkaErrorMode()) +{ +} + +KafkaSource2::~KafkaSource2() +{ + if (!consumer) + return; + + if (broken) + consumer->unsubscribe(); + + storage.pushConsumer(consumer); +} + +bool KafkaSource2::checkTimeLimit() const +{ + if (max_execution_time != 0) + { + auto elapsed_ns = total_stopwatch.elapsed(); + + if (elapsed_ns > static_cast(max_execution_time.totalMicroseconds()) * 1000) + return false; + } + + return true; +} + +Chunk KafkaSource2::generateImpl() +{ + if (!consumer) + { + auto timeout = std::chrono::milliseconds(context->getSettingsRef().kafka_max_wait_ms.totalMilliseconds()); + consumer = storage.popConsumer(timeout); + + if (!consumer) + return {}; + + consumer->subscribe(); + + broken = true; + } + + if (is_finished) + return {}; + + is_finished = true; + // now it's one-time usage InputStream + // one block of the needed size (or with desired flush timeout) is formed in one internal iteration + // otherwise external iteration will reuse that and logic will became even more fuzzy + MutableColumns virtual_columns = virtual_header.cloneEmptyColumns(); + + auto put_error_to_stream = handle_error_mode == HandleKafkaErrorMode::STREAM; + + EmptyReadBuffer empty_buf; + auto input_format = FormatFactory::instance().getInput( + storage.getFormatName(), empty_buf, non_virtual_header, context, max_block_size, std::nullopt, 1); + + std::optional exception_message; + size_t total_rows = 0; + size_t failed_poll_attempts = 0; + + auto on_error = [&](const MutableColumns & result_columns, Exception & e) + { + ProfileEvents::increment(ProfileEvents::KafkaMessagesFailed); + + if (put_error_to_stream) + { + exception_message = e.message(); + for (const auto & column : result_columns) + { + // read_kafka_message could already push some rows to result_columns + // before exception, we need to fix it. + auto cur_rows = column->size(); + if (cur_rows > total_rows) + column->popBack(cur_rows - total_rows); + + // all data columns will get default value in case of error + column->insertDefault(); + } + + return 1; + } + else + { + e.addMessage("while parsing Kafka message (topic: {}, partition: {}, offset: {})'", + consumer->currentTopic(), consumer->currentPartition(), consumer->currentOffset()); + throw std::move(e); + } + }; + + StreamingFormatExecutor executor(non_virtual_header, input_format, std::move(on_error)); + + while (true) + { + size_t new_rows = 0; + exception_message.reset(); + if (auto buf = consumer->consume()) + { + ProfileEvents::increment(ProfileEvents::KafkaMessagesRead); + new_rows = executor.execute(*buf); + } + + if (new_rows) + { + // In read_kafka_message(), KafkaConsumer::nextImpl() + // will be called, that may make something unusable, i.e. clean + // KafkaConsumer::messages, which is accessed from + // KafkaConsumer::currentTopic() (and other helpers). + if (consumer->isStalled()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Polled messages became unusable"); + + ProfileEvents::increment(ProfileEvents::KafkaRowsRead, new_rows); + + consumer->storeLastReadMessageOffset(); + + auto topic = consumer->currentTopic(); + auto key = consumer->currentKey(); + auto offset = consumer->currentOffset(); + auto partition = consumer->currentPartition(); + auto timestamp_raw = consumer->currentTimestamp(); + auto header_list = consumer->currentHeaderList(); + + Array headers_names; + Array headers_values; + + if (!header_list.empty()) + { + headers_names.reserve(header_list.size()); + headers_values.reserve(header_list.size()); + for (const auto & header : header_list) + { + headers_names.emplace_back(header.get_name()); + headers_values.emplace_back(static_cast(header.get_value())); + } + } + + for (size_t i = 0; i < new_rows; ++i) + { + virtual_columns[0]->insert(topic); + virtual_columns[1]->insert(key); + virtual_columns[2]->insert(offset); + virtual_columns[3]->insert(partition); + if (timestamp_raw) + { + auto ts = timestamp_raw->get_timestamp(); + virtual_columns[4]->insert(std::chrono::duration_cast(ts).count()); + virtual_columns[5]->insert(DecimalField(std::chrono::duration_cast(ts).count(),3)); + } + else + { + virtual_columns[4]->insertDefault(); + virtual_columns[5]->insertDefault(); + } + virtual_columns[6]->insert(headers_names); + virtual_columns[7]->insert(headers_values); + if (put_error_to_stream) + { + if (exception_message) + { + auto payload = consumer->currentPayload(); + virtual_columns[8]->insert(payload); + virtual_columns[9]->insert(*exception_message); + } + else + { + virtual_columns[8]->insertDefault(); + virtual_columns[9]->insertDefault(); + } + } + } + + total_rows = total_rows + new_rows; + } + else if (consumer->polledDataUnusable()) + { + break; + } + else if (consumer->isStalled()) + { + ++failed_poll_attempts; + } + else + { + // We came here in case of tombstone (or sometimes zero-length) messages, and it is not something abnormal + // TODO: it seems like in case of put_error_to_stream=true we may need to process those differently + // currently we just skip them with note in logs. + consumer->storeLastReadMessageOffset(); + LOG_DEBUG(log, "Parsing of message (topic: {}, partition: {}, offset: {}) return no rows.", consumer->currentTopic(), consumer->currentPartition(), consumer->currentOffset()); + } + + if (!consumer->hasMorePolledMessages() + && (total_rows >= max_block_size || !checkTimeLimit() || failed_poll_attempts >= MAX_FAILED_POLL_ATTEMPTS)) + { + break; + } + } + + if (total_rows == 0) + { + return {}; + } + else if (consumer->polledDataUnusable()) + { + // the rows were counted already before by KafkaRowsRead, + // so let's count the rows we ignore separately + // (they will be retried after the rebalance) + ProfileEvents::increment(ProfileEvents::KafkaRowsRejected, total_rows); + return {}; + } + + /// MATERIALIZED columns can be added here, but I think + // they are not needed here: + // and it's misleading to use them here, + // as columns 'materialized' that way stays 'ephemeral' + // i.e. will not be stored anythere + // If needed any extra columns can be added using DEFAULT they can be added at MV level if needed. + + auto result_block = non_virtual_header.cloneWithColumns(executor.getResultColumns()); + auto virtual_block = virtual_header.cloneWithColumns(std::move(virtual_columns)); + + for (const auto & column : virtual_block.getColumnsWithTypeAndName()) + result_block.insert(column); + + auto converting_dag = ActionsDAG::makeConvertingActions( + result_block.cloneEmpty().getColumnsWithTypeAndName(), + getPort().getHeader().getColumnsWithTypeAndName(), + ActionsDAG::MatchColumnsMode::Name); + + auto converting_actions = std::make_shared(std::move(converting_dag)); + converting_actions->execute(result_block); + + return Chunk(result_block.getColumns(), result_block.rows()); +} + +Chunk KafkaSource2::generate() +{ + auto chunk = generateImpl(); + if (!chunk && commit_in_suffix) + commit(); + + return chunk; +} + +void KafkaSource2::commit() +{ + if (!consumer) + return; + + consumer->commit(); + + broken = false; +} + +} diff --git a/src/Storages/Kafka/KafkaSource2.h b/src/Storages/Kafka/KafkaSource2.h new file mode 100644 index 00000000000..0a49001a686 --- /dev/null +++ b/src/Storages/Kafka/KafkaSource2.h @@ -0,0 +1,64 @@ +#pragma once + +#include + +#include +#include +#include + + +namespace Poco +{ + class Logger; +} +namespace DB +{ + +class KafkaSource2 : public ISource +{ +public: + KafkaSource2( + StorageKafka2 & storage_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + const Names & columns, + Poco::Logger * log_, + size_t max_block_size_, + bool commit_in_suffix = false); + ~KafkaSource2() override; + + String getName() const override { return storage.getName(); } + + Chunk generate() override; + + void commit(); + bool isStalled() const { return !consumer || consumer->isStalled(); } + + void setTimeLimit(Poco::Timespan max_execution_time_) { max_execution_time = max_execution_time_; } + +private: + StorageKafka2 & storage; + StorageSnapshotPtr storage_snapshot; + ContextPtr context; + Names column_names; + Poco::Logger * log; + UInt64 max_block_size; + + KafkaConsumer2Ptr consumer; + bool broken = true; + bool is_finished = false; + bool commit_in_suffix; + + const Block non_virtual_header; + const Block virtual_header; + const HandleKafkaErrorMode handle_error_mode; + + Poco::Timespan max_execution_time = 0; + Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; + + bool checkTimeLimit() const; + + Chunk generateImpl(); +}; + +} diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 361b17db2bd..0c9285e2ef6 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -52,7 +53,6 @@ namespace CurrentMetrics { - extern const Metric KafkaLibrdkafkaThreads; extern const Metric KafkaBackgroundReads; extern const Metric KafkaConsumersInUse; extern const Metric KafkaWrites; @@ -73,175 +73,14 @@ namespace ErrorCodes { extern const int NOT_IMPLEMENTED; extern const int LOGICAL_ERROR; - extern const int BAD_ARGUMENTS; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int QUERY_NOT_ALLOWED; - extern const int SUPPORT_IS_DISABLED; } -struct StorageKafkaInterceptors -{ - static rd_kafka_resp_err_t rdKafkaOnThreadStart(rd_kafka_t *, rd_kafka_thread_type_t thread_type, const char *, void * ctx) - { - StorageKafka * self = reinterpret_cast(ctx); - CurrentMetrics::add(CurrentMetrics::KafkaLibrdkafkaThreads, 1); - - const auto & storage_id = self->getStorageID(); - const auto & table = storage_id.getTableName(); - - switch (thread_type) - { - case RD_KAFKA_THREAD_MAIN: - setThreadName(("rdk:m/" + table.substr(0, 9)).c_str()); - break; - case RD_KAFKA_THREAD_BACKGROUND: - setThreadName(("rdk:bg/" + table.substr(0, 8)).c_str()); - break; - case RD_KAFKA_THREAD_BROKER: - setThreadName(("rdk:b/" + table.substr(0, 9)).c_str()); - break; - } - - /// Create ThreadStatus to track memory allocations from librdkafka threads. - // - /// And store them in a separate list (thread_statuses) to make sure that they will be destroyed, - /// regardless how librdkafka calls the hooks. - /// But this can trigger use-after-free if librdkafka will not destroy threads after rd_kafka_wait_destroyed() - auto thread_status = std::make_shared(); - std::lock_guard lock(self->thread_statuses_mutex); - self->thread_statuses.emplace_back(std::move(thread_status)); - - return RD_KAFKA_RESP_ERR_NO_ERROR; - } - static rd_kafka_resp_err_t rdKafkaOnThreadExit(rd_kafka_t *, rd_kafka_thread_type_t, const char *, void * ctx) - { - StorageKafka * self = reinterpret_cast(ctx); - CurrentMetrics::sub(CurrentMetrics::KafkaLibrdkafkaThreads, 1); - - std::lock_guard lock(self->thread_statuses_mutex); - const auto it = std::find_if(self->thread_statuses.begin(), self->thread_statuses.end(), [](const auto & thread_status_ptr) - { - return thread_status_ptr.get() == current_thread; - }); - if (it == self->thread_statuses.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "No thread status for this librdkafka thread."); - - self->thread_statuses.erase(it); - - return RD_KAFKA_RESP_ERR_NO_ERROR; - } - - static rd_kafka_resp_err_t rdKafkaOnNew(rd_kafka_t * rk, const rd_kafka_conf_t *, void * ctx, char * /*errstr*/, size_t /*errstr_size*/) - { - StorageKafka * self = reinterpret_cast(ctx); - rd_kafka_resp_err_t status; - - status = rd_kafka_interceptor_add_on_thread_start(rk, "init-thread", rdKafkaOnThreadStart, ctx); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - { - LOG_ERROR(self->log, "Cannot set on thread start interceptor due to {} error", status); - return status; - } - - status = rd_kafka_interceptor_add_on_thread_exit(rk, "exit-thread", rdKafkaOnThreadExit, ctx); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(self->log, "Cannot set on thread exit interceptor due to {} error", status); - - return status; - } - - static rd_kafka_resp_err_t rdKafkaOnConfDup(rd_kafka_conf_t * new_conf, const rd_kafka_conf_t * /*old_conf*/, size_t /*filter_cnt*/, const char ** /*filter*/, void * ctx) - { - StorageKafka * self = reinterpret_cast(ctx); - rd_kafka_resp_err_t status; - - // cppkafka copies configuration multiple times - status = rd_kafka_conf_interceptor_add_on_conf_dup(new_conf, "init", rdKafkaOnConfDup, ctx); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - { - LOG_ERROR(self->log, "Cannot set on conf dup interceptor due to {} error", status); - return status; - } - - status = rd_kafka_conf_interceptor_add_on_new(new_conf, "init", rdKafkaOnNew, ctx); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(self->log, "Cannot set on conf new interceptor due to {} error", status); - - return status; - } -}; - namespace { const auto RESCHEDULE_MS = 500; const auto CLEANUP_TIMEOUT_MS = 3000; const auto MAX_THREAD_WORK_DURATION_MS = 60000; // once per minute leave do reschedule (we can't lock threads in pool forever) - - const String CONFIG_KAFKA_TAG = "kafka"; - const String CONFIG_KAFKA_TOPIC_TAG = "kafka_topic"; - const String CONFIG_NAME_TAG = "name"; - - /// Read server configuration into cppkafka configuration, used by global configuration and by legacy per-topic configuration - void loadFromConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & config_prefix) - { - /// Read all tags one level below - Poco::Util::AbstractConfiguration::Keys tags; - config.keys(config_prefix, tags); - - for (const auto & tag : tags) - { - if (tag.starts_with(CONFIG_KAFKA_TOPIC_TAG)) /// multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. - continue; /// used by new per-topic configuration, ignore - - const String setting_path = config_prefix + "." + tag; - const String setting_value = config.getString(setting_path); - - /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. - /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md - const String setting_name_in_kafka_config = (tag == "log_level") ? tag : boost::replace_all_copy(tag, "_", "."); - kafka_config.set(setting_name_in_kafka_config, setting_value); - } - } - - /// Read server configuration into cppkafa configuration, used by new per-topic configuration - void loadTopicConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const String & topic) - { - /// Read all tags one level below - Poco::Util::AbstractConfiguration::Keys tags; - config.keys(config_prefix, tags); - - for (const auto & tag : tags) - { - /// Only consider tag . Multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. - if (!tag.starts_with(CONFIG_KAFKA_TOPIC_TAG)) - continue; - - /// Read topic name between ... - const String kafka_topic_path = config_prefix + "." + tag; - const String kafpa_topic_name_path = kafka_topic_path + "." + CONFIG_NAME_TAG; - - const String topic_name = config.getString(kafpa_topic_name_path); - if (topic_name == topic) - { - /// Found it! Now read the per-topic configuration into cppkafka. - Poco::Util::AbstractConfiguration::Keys inner_tags; - config.keys(kafka_topic_path, inner_tags); - for (const auto & inner_tag : inner_tags) - { - if (inner_tag == CONFIG_NAME_TAG) - continue; // ignore - - /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. - /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md - const String setting_path = kafka_topic_path + "." + inner_tag; - const String setting_value = config.getString(setting_path); - - const String setting_name_in_kafka_config = (inner_tag == "log_level") ? inner_tag : boost::replace_all_copy(inner_tag, "_", "."); - kafka_config.set(setting_name_in_kafka_config, setting_value); - } - } - } - } } StorageKafka::StorageKafka( @@ -575,8 +414,8 @@ size_t StorageKafka::getPollTimeoutMillisecond() const String StorageKafka::getConfigPrefix() const { if (!collection_name.empty()) - return "named_collections." + collection_name + "." + CONFIG_KAFKA_TAG; /// Add one more level to separate librdkafka configuration. - return CONFIG_KAFKA_TAG; + return "named_collections." + collection_name + "." + String{KafkaConfigLoader::CONFIG_KAFKA_TAG}; /// Add one more level to separate librdkafka configuration. + return String{KafkaConfigLoader::CONFIG_KAFKA_TAG}; } void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config, @@ -590,7 +429,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config, const auto & config = getContext()->getConfigRef(); auto config_prefix = getConfigPrefix(); if (config.has(config_prefix)) - loadFromConfig(kafka_config, config, config_prefix); + KafkaConfigLoader::loadConfig(kafka_config, config, config_prefix); #if USE_KRB5 if (kafka_config.has_property("sasl.kerberos.kinit.cmd")) @@ -631,7 +470,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config, { const auto topic_config_key = config_prefix + "_" + topic; if (config.has(topic_config_key)) - loadFromConfig(kafka_config, config, topic_config_key); + KafkaConfigLoader::loadConfig(kafka_config, config, topic_config_key); } // Update consumer topic-specific configuration (new syntax). Example with topics "football" and "baseball": @@ -651,7 +490,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config, // Kafka-related is below . for (const auto & topic : topics) if (config.has(config_prefix)) - loadTopicConfig(kafka_config, config, config_prefix, topic); + KafkaConfigLoader::loadTopicConfig(kafka_config, config, config_prefix, topic); // No need to add any prefix, messages can be distinguished kafka_config.set_log_callback([this](cppkafka::KafkaHandleBase &, int level, const std::string & facility, const std::string & message) @@ -888,173 +727,6 @@ bool StorageKafka::streamToViews() return some_stream_is_stalled; } -void registerStorageKafka(StorageFactory & factory) -{ - auto creator_fn = [](const StorageFactory::Arguments & args) - { - ASTs & engine_args = args.engine_args; - size_t args_count = engine_args.size(); - const bool has_settings = args.storage_def->settings; - - auto kafka_settings = std::make_unique(); - String collection_name; - if (auto named_collection = tryGetNamedCollectionWithOverrides(args.engine_args, args.getLocalContext())) - { - for (const auto & setting : kafka_settings->all()) - { - const auto & setting_name = setting.getName(); - if (named_collection->has(setting_name)) - kafka_settings->set(setting_name, named_collection->get(setting_name)); - } - collection_name = assert_cast(args.engine_args[0].get())->name(); - } - - if (has_settings) - { - kafka_settings->loadFromQuery(*args.storage_def); - } - - // Check arguments and settings - #define CHECK_KAFKA_STORAGE_ARGUMENT(ARG_NUM, PAR_NAME, EVAL) \ - /* One of the four required arguments is not specified */ \ - if (args_count < (ARG_NUM) && (ARG_NUM) <= 4 && \ - !kafka_settings->PAR_NAME.changed) \ - { \ - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,\ - "Required parameter '{}' " \ - "for storage Kafka not specified", \ - #PAR_NAME); \ - } \ - if (args_count >= (ARG_NUM)) \ - { \ - /* The same argument is given in two places */ \ - if (has_settings && \ - kafka_settings->PAR_NAME.changed) \ - { \ - throw Exception(ErrorCodes::BAD_ARGUMENTS, \ - "The argument №{} of storage Kafka " \ - "and the parameter '{}' " \ - "in SETTINGS cannot be specified at the same time", \ - #ARG_NUM, #PAR_NAME); \ - } \ - /* move engine args to settings */ \ - else \ - { \ - if constexpr ((EVAL) == 1) \ - { \ - engine_args[(ARG_NUM)-1] = \ - evaluateConstantExpressionAsLiteral( \ - engine_args[(ARG_NUM)-1], \ - args.getLocalContext()); \ - } \ - if constexpr ((EVAL) == 2) \ - { \ - engine_args[(ARG_NUM)-1] = \ - evaluateConstantExpressionOrIdentifierAsLiteral( \ - engine_args[(ARG_NUM)-1], \ - args.getLocalContext()); \ - } \ - kafka_settings->PAR_NAME = \ - engine_args[(ARG_NUM)-1]->as().value; \ - } \ - } - - /** Arguments of engine is following: - * - Kafka broker list - * - List of topics - * - Group ID (may be a constant expression with a string result) - * - Message format (string) - * - Row delimiter - * - Schema (optional, if the format supports it) - * - Number of consumers - * - Max block size for background consumption - * - Skip (at least) unreadable messages number - * - Do intermediate commits when the batch consumed and handled - */ - - /* 0 = raw, 1 = evaluateConstantExpressionAsLiteral, 2=evaluateConstantExpressionOrIdentifierAsLiteral */ - /// In case of named collection we already validated the arguments. - if (collection_name.empty()) - { - CHECK_KAFKA_STORAGE_ARGUMENT(1, kafka_broker_list, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(2, kafka_topic_list, 1) - CHECK_KAFKA_STORAGE_ARGUMENT(3, kafka_group_name, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(4, kafka_format, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(5, kafka_row_delimiter, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(6, kafka_schema, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(7, kafka_num_consumers, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(8, kafka_max_block_size, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(9, kafka_skip_broken_messages, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(10, kafka_commit_every_batch, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(11, kafka_client_id, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(12, kafka_poll_timeout_ms, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(13, kafka_flush_interval_ms, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(14, kafka_thread_per_consumer, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(15, kafka_handle_error_mode, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(16, kafka_commit_on_select, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(17, kafka_max_rows_per_message, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(18, keeper_path, 0) - } - - #undef CHECK_KAFKA_STORAGE_ARGUMENT - - auto num_consumers = kafka_settings->kafka_num_consumers.value; - auto max_consumers = std::max(getNumberOfPhysicalCPUCores(), 16); - - if (!args.getLocalContext()->getSettingsRef().kafka_disable_num_consumers_limit && num_consumers > max_consumers) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "The number of consumers can not be bigger than {}. " - "A single consumer can read any number of partitions. " - "Extra consumers are relatively expensive, " - "and using a lot of them can lead to high memory and CPU usage. " - "To achieve better performance " - "of getting data from Kafka, consider using a setting kafka_thread_per_consumer=1, " - "and ensure you have enough threads " - "in MessageBrokerSchedulePool (background_message_broker_schedule_pool_size). " - "See also https://clickhouse.com/docs/en/integrations/kafka#tuning-performance", max_consumers); - } - else if (num_consumers < 1) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Number of consumers can not be lower than 1"); - } - - if (kafka_settings->kafka_max_block_size.changed && kafka_settings->kafka_max_block_size.value < 1) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "kafka_max_block_size can not be lower than 1"); - } - - if (kafka_settings->kafka_poll_max_batch_size.changed && kafka_settings->kafka_poll_max_batch_size.value < 1) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "kafka_poll_max_batch_size can not be lower than 1"); - } - NamesAndTypesList supported_columns; - for (const auto & column : args.columns) - { - if (column.default_desc.kind == ColumnDefaultKind::Alias) - supported_columns.emplace_back(column.name, column.type); - if (column.default_desc.kind == ColumnDefaultKind::Default && !column.default_desc.expression) - supported_columns.emplace_back(column.name, column.type); - } - // Kafka engine allows only ordinary columns without default expression or alias columns. - if (args.columns.getAll() != supported_columns) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "KafkaEngine doesn't support DEFAULT/MATERIALIZED/EPHEMERAL expressions for columns. " - "See https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/#configuration"); - } - - if (kafka_settings->keeper_path.changed && !args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper){ - - throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Storing the Kafka offsets in Keeper is experimental. " - "Set `allow_experimental_kafka_store_offsets_in_keeper` setting to enable it"); - } - - return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); - }; - - factory.registerStorage("Kafka", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, }); -} - NamesAndTypesList StorageKafka::getVirtuals() const { auto result = NamesAndTypesList{ diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index 9280809be0e..907923c587b 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -23,8 +23,7 @@ class Configuration; namespace DB { -class StorageSystemKafkaConsumers; - +template struct StorageKafkaInterceptors; using KafkaConsumerPtr = std::shared_ptr; @@ -35,7 +34,8 @@ using KafkaConsumerWeakPtr = std::weak_ptr; */ class StorageKafka final : public IStorage, WithContext { - friend struct StorageKafkaInterceptors; + using StorageKafkaInterceptors = StorageKafkaInterceptors; + friend StorageKafkaInterceptors; public: StorageKafka( diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp new file mode 100644 index 00000000000..7cb2a38067a --- /dev/null +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -0,0 +1,722 @@ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "Storages/ColumnDefault.h" +#include "config_version.h" + +#include +#include +#if USE_KRB5 +# include +#endif // USE_KRB5 + +namespace CurrentMetrics +{ +extern const Metric KafkaBackgroundReads; +extern const Metric KafkaConsumersInUse; +extern const Metric KafkaWrites; +} + +namespace ProfileEvents +{ +extern const Event KafkaDirectReads; +extern const Event KafkaBackgroundReads; +extern const Event KafkaWrites; +} + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; + extern const int QUERY_NOT_ALLOWED; +} + +namespace +{ + const auto RESCHEDULE_MS = 500; + const auto CLEANUP_TIMEOUT_MS = 3000; + const auto MAX_THREAD_WORK_DURATION_MS = 60000; // once per minute leave do reschedule (we can't lock threads in pool forever) +} + +StorageKafka2::StorageKafka2( + const StorageID & table_id_, + ContextPtr context_, + const ColumnsDescription & columns_, + std::unique_ptr kafka_settings_, + const String & collection_name_) + : IStorage(table_id_) + , WithContext(context_->getGlobalContext()) + , kafka_settings(std::move(kafka_settings_)) + , macros_info{.table_id = table_id_} + , topics(parseTopics(getContext()->getMacros()->expand(kafka_settings->kafka_topic_list.value, macros_info))) + , brokers(getContext()->getMacros()->expand(kafka_settings->kafka_broker_list.value, macros_info)) + , group(getContext()->getMacros()->expand(kafka_settings->kafka_group_name.value, macros_info)) + , client_id( + kafka_settings->kafka_client_id.value.empty() + ? getDefaultClientId(table_id_) + : getContext()->getMacros()->expand(kafka_settings->kafka_client_id.value, macros_info)) + , format_name(getContext()->getMacros()->expand(kafka_settings->kafka_format.value)) + , max_rows_per_message(kafka_settings->kafka_max_rows_per_message.value) + , schema_name(getContext()->getMacros()->expand(kafka_settings->kafka_schema.value, macros_info)) + , num_consumers(kafka_settings->kafka_num_consumers.value) + , log(&Poco::Logger::get("StorageKafka (" + table_id_.table_name + ")")) + , semaphore(0, static_cast(num_consumers)) + , intermediate_commit(kafka_settings->kafka_commit_every_batch.value) + , settings_adjustments(createSettingsAdjustments()) + , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) + , collection_name(collection_name_) +{ + if (kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM) + { + kafka_settings->input_format_allow_errors_num = 0; + kafka_settings->input_format_allow_errors_ratio = 0; + } + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(columns_); + setInMemoryMetadata(storage_metadata); + auto task_count = thread_per_consumer ? num_consumers : 1; + for (size_t i = 0; i < task_count; ++i) + { + auto task = getContext()->getMessageBrokerSchedulePool().createTask(log->name(), [this, i] { threadFunc(i); }); + task->deactivate(); + tasks.emplace_back(std::make_shared(std::move(task))); + } +} + +SettingsChanges StorageKafka2::createSettingsAdjustments() +{ + SettingsChanges result; + // Needed for backward compatibility + if (!kafka_settings->input_format_skip_unknown_fields.changed) + { + // Always skip unknown fields regardless of the context (JSON or TSKV) + kafka_settings->input_format_skip_unknown_fields = true; + } + + if (!kafka_settings->input_format_allow_errors_ratio.changed) + { + kafka_settings->input_format_allow_errors_ratio = 0.; + } + + if (!kafka_settings->input_format_allow_errors_num.changed) + { + kafka_settings->input_format_allow_errors_num = kafka_settings->kafka_skip_broken_messages.value; + } + + if (!schema_name.empty()) + result.emplace_back("format_schema", schema_name); + + for (const auto & setting : *kafka_settings) + { + const auto & name = setting.getName(); + if (name.find("kafka_") == std::string::npos) + result.emplace_back(name, setting.getValue()); + } + return result; +} + +Names StorageKafka2::parseTopics(String topic_list) +{ + Names result; + boost::split(result, topic_list, [](char c) { return c == ','; }); + for (String & topic : result) + { + boost::trim(topic); + } + return result; +} + +String StorageKafka2::getDefaultClientId(const StorageID & table_id_) +{ + return fmt::format("{}-{}-{}-{}", VERSION_NAME, getFQDNOrHostName(), table_id_.database_name, table_id_.table_name); +} + + +Pipe StorageKafka2::read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & /* query_info */, + ContextPtr local_context, + QueryProcessingStage::Enum /* processed_stage */, + size_t /* max_block_size */, + size_t /* num_streams */) +{ + if (num_created_consumers == 0) + return {}; + + if (!local_context->getSettingsRef().stream_like_engine_allow_direct_select) + throw Exception( + ErrorCodes::QUERY_NOT_ALLOWED, "Direct select is not allowed. To enable use setting `stream_like_engine_allow_direct_select`"); + + if (mv_attached) + throw Exception(ErrorCodes::QUERY_NOT_ALLOWED, "Cannot read from StorageKafka with attached materialized views"); + + ProfileEvents::increment(ProfileEvents::KafkaDirectReads); + + /// Always use all consumers at once, otherwise SELECT may not read messages from all partitions. + Pipes pipes; + pipes.reserve(num_created_consumers); + auto modified_context = Context::createCopy(local_context); + modified_context->applySettingsChanges(settings_adjustments); + + // Claim as many consumers as requested, but don't block + for (size_t i = 0; i < num_created_consumers; ++i) + { + /// Use block size of 1, otherwise LIMIT won't work properly as it will buffer excess messages in the last block + /// TODO: probably that leads to awful performance. + /// FIXME: seems that doesn't help with extra reading and committing unprocessed messages. + pipes.emplace_back(std::make_shared( + *this, storage_snapshot, modified_context, column_names, log, 1, kafka_settings->kafka_commit_on_select)); + } + + LOG_DEBUG(log, "Starting reading {} streams", pipes.size()); + return Pipe::unitePipes(std::move(pipes)); +} + + +SinkToStoragePtr +StorageKafka2::write(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context, bool /*async_insert*/) +{ + auto modified_context = Context::createCopy(local_context); + modified_context->applySettingsChanges(settings_adjustments); + + CurrentMetrics::Increment metric_increment{CurrentMetrics::KafkaWrites}; + ProfileEvents::increment(ProfileEvents::KafkaWrites); + + if (topics.size() > 1) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Can't write to Kafka table with multiple topics!"); + + cppkafka::Configuration conf; + conf.set("metadata.broker.list", brokers); + conf.set("client.id", client_id); + conf.set("client.software.name", VERSION_NAME); + conf.set("client.software.version", VERSION_DESCRIBE); + // TODO: fill required settings + updateConfiguration(conf); + + const Settings & settings = getContext()->getSettingsRef(); + size_t poll_timeout = settings.stream_poll_timeout_ms.totalMilliseconds(); + const auto & header = metadata_snapshot->getSampleBlockNonMaterialized(); + + auto producer = std::make_unique( + std::make_shared(conf), topics[0], std::chrono::milliseconds(poll_timeout), shutdown_called, header); + + size_t max_rows = max_rows_per_message; + /// Need for backward compatibility. + if (format_name == "Avro" && local_context->getSettingsRef().output_format_avro_rows_in_file.changed) + max_rows = local_context->getSettingsRef().output_format_avro_rows_in_file.value; + return std::make_shared(header, getFormatName(), max_rows, std::move(producer), getName(), modified_context); +} + + +void StorageKafka2::startup() +{ + for (size_t i = 0; i < num_consumers; ++i) + { + try + { + pushConsumer(createConsumer(i)); + ++num_created_consumers; + } + catch (const cppkafka::Exception &) + { + tryLogCurrentException(log); + } + } + + // Start the reader thread + for (auto & task : tasks) + { + task->holder->activateAndSchedule(); + } +} + + +void StorageKafka2::shutdown() +{ + for (auto & task : tasks) + { + // Interrupt streaming thread + task->stream_cancelled = true; + + LOG_TRACE(log, "Waiting for cleanup"); + task->holder->deactivate(); + } + + LOG_TRACE(log, "Closing consumers"); + for (size_t i = 0; i < num_created_consumers; ++i) + auto consumer = popConsumer(); + LOG_TRACE(log, "Consumers closed"); + + rd_kafka_wait_destroyed(CLEANUP_TIMEOUT_MS); +} + + +void StorageKafka2::pushConsumer(KafkaConsumer2Ptr consumer) +{ + std::lock_guard lock(mutex); + consumers.push_back(consumer); + semaphore.set(); + CurrentMetrics::sub(CurrentMetrics::KafkaConsumersInUse, 1); +} + + +KafkaConsumer2Ptr StorageKafka2::popConsumer() +{ + return popConsumer(std::chrono::milliseconds::zero()); +} + + +KafkaConsumer2Ptr StorageKafka2::popConsumer(std::chrono::milliseconds timeout) +{ + // Wait for the first free buffer + if (timeout == std::chrono::milliseconds::zero()) + semaphore.wait(); + else + { + if (!semaphore.tryWait(timeout.count())) + return nullptr; + } + + // Take the first available buffer from the list + std::lock_guard lock(mutex); + auto consumer = consumers.back(); + consumers.pop_back(); + CurrentMetrics::add(CurrentMetrics::KafkaConsumersInUse, 1); + return consumer; +} + + +KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) +{ + cppkafka::Configuration conf; + + conf.set("metadata.broker.list", brokers); + conf.set("group.id", group); + if (num_consumers > 1) + { + conf.set("client.id", fmt::format("{}-{}", client_id, consumer_number)); + } + else + { + conf.set("client.id", client_id); + } + conf.set("client.software.name", VERSION_NAME); + conf.set("client.software.version", VERSION_DESCRIBE); + conf.set("auto.offset.reset", "earliest"); // If no offset stored for this group, read all messages from the start + + // that allows to prevent fast draining of the librdkafka queue + // during building of single insert block. Improves performance + // significantly, but may lead to bigger memory consumption. + size_t default_queued_min_messages = 100000; // we don't want to decrease the default + conf.set("queued.min.messages", std::max(getMaxBlockSize(), default_queued_min_messages)); + + updateConfiguration(conf); + + // those settings should not be changed by users. + conf.set("enable.auto.commit", "false"); // We manually commit offsets after a stream successfully finished + conf.set("enable.auto.offset.store", "false"); // Update offset automatically - to commit them all at once. + conf.set("enable.partition.eof", "false"); // Ignore EOF messages + + // Create a consumer and subscribe to topics + auto consumer_impl = std::make_shared(conf); + consumer_impl->set_destroy_flags(RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE); + + /// NOTE: we pass |stream_cancelled| by reference here, so the buffers should not outlive the storage. + if (thread_per_consumer) + { + auto & stream_cancelled = tasks[consumer_number]->stream_cancelled; + return std::make_shared( + consumer_impl, log, getPollMaxBatchSize(), getPollTimeoutMillisecond(), intermediate_commit, stream_cancelled, topics); + } + return std::make_shared( + consumer_impl, + log, + getPollMaxBatchSize(), + getPollTimeoutMillisecond(), + intermediate_commit, + tasks.back()->stream_cancelled, + topics); +} + +size_t StorageKafka2::getMaxBlockSize() const +{ + return kafka_settings->kafka_max_block_size.changed ? kafka_settings->kafka_max_block_size.value + : (getContext()->getSettingsRef().max_insert_block_size.value / num_consumers); +} + +size_t StorageKafka2::getPollMaxBatchSize() const +{ + size_t batch_size = kafka_settings->kafka_poll_max_batch_size.changed ? kafka_settings->kafka_poll_max_batch_size.value + : getContext()->getSettingsRef().max_block_size.value; + + return std::min(batch_size, getMaxBlockSize()); +} + +size_t StorageKafka2::getPollTimeoutMillisecond() const +{ + return kafka_settings->kafka_poll_timeout_ms.changed ? kafka_settings->kafka_poll_timeout_ms.totalMilliseconds() + : getContext()->getSettingsRef().stream_poll_timeout_ms.totalMilliseconds(); +} + +String StorageKafka2::getConfigPrefix() const +{ + if (!collection_name.empty()) + return "named_collections." + collection_name + "." + + String{KafkaConfigLoader::CONFIG_KAFKA_TAG}; /// Add one more level to separate librdkafka configuration. + return String{KafkaConfigLoader::CONFIG_KAFKA_TAG}; +} + +void StorageKafka2::updateConfiguration(cppkafka::Configuration & kafka_config) +{ + // Update consumer configuration from the configuration. Example: + // + // 250 + // 100000 + // + const auto & config = getContext()->getConfigRef(); + auto config_prefix = getConfigPrefix(); + if (config.has(config_prefix)) + KafkaConfigLoader::loadConfig(kafka_config, config, config_prefix); + +#if USE_KRB5 + if (kafka_config.has_property("sasl.kerberos.kinit.cmd")) + LOG_WARNING(log, "sasl.kerberos.kinit.cmd configuration parameter is ignored."); + + kafka_config.set("sasl.kerberos.kinit.cmd", ""); + kafka_config.set("sasl.kerberos.min.time.before.relogin", "0"); + + if (kafka_config.has_property("sasl.kerberos.keytab") && kafka_config.has_property("sasl.kerberos.principal")) + { + String keytab = kafka_config.get("sasl.kerberos.keytab"); + String principal = kafka_config.get("sasl.kerberos.principal"); + LOG_DEBUG(log, "Running KerberosInit"); + try + { + kerberosInit(keytab, principal); + } + catch (const Exception & e) + { + LOG_ERROR(log, "KerberosInit failure: {}", getExceptionMessage(e, false)); + } + LOG_DEBUG(log, "Finished KerberosInit"); + } +#else // USE_KRB5 + if (kafka_config.has_property("sasl.kerberos.keytab") || kafka_config.has_property("sasl.kerberos.principal")) + LOG_WARNING(log, "Ignoring Kerberos-related parameters because ClickHouse was built without krb5 library support."); +#endif // USE_KRB5 + + // Update consumer topic-specific configuration (legacy syntax, retained for compatibility). Example with topic "football": + // + // 250 + // 100000 + // + // The legacy syntax has the problem that periods in topic names (e.g. "sports.football") are not supported because the Poco + // configuration framework hierarchy is based on periods as level separators. Besides that, per-topic tags at the same level + // as are ugly. + for (const auto & topic : topics) + { + const auto topic_config_key = config_prefix + "_" + topic; + if (config.has(topic_config_key)) + KafkaConfigLoader::loadConfig(kafka_config, config, topic_config_key); + } + + // Update consumer topic-specific configuration (new syntax). Example with topics "football" and "baseball": + // + // + // football + // 250 + // 5000 + // + // + // baseball + // 300 + // 2000 + // + // + // Advantages: The period restriction no longer applies (e.g. sports.football will work), everything + // Kafka-related is below . + for (const auto & topic : topics) + if (config.has(config_prefix)) + KafkaConfigLoader::loadTopicConfig(kafka_config, config, config_prefix, topic); + + // No need to add any prefix, messages can be distinguished + kafka_config.set_log_callback( + [this](cppkafka::KafkaHandleBase &, int level, const std::string & facility, const std::string & message) + { + auto [poco_level, client_logs_level] = parseSyslogLevel(level); + LOG_IMPL(log, client_logs_level, poco_level, "[rdk:{}] {}", facility, message); + }); + + // Configure interceptor to change thread name + // + // TODO: add interceptors support into the cppkafka. + // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibliity overrides it to noop. + { + // This should be safe, since we wait the rdkafka object anyway. + void * self = static_cast(this); + + int status; + + status = rd_kafka_conf_interceptor_add_on_new(kafka_config.get_handle(), "init", StorageKafkaInterceptors::rdKafkaOnNew, self); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(log, "Cannot set new interceptor due to {} error", status); + + // cppkafka always copy the configuration + status = rd_kafka_conf_interceptor_add_on_conf_dup( + kafka_config.get_handle(), "init", StorageKafkaInterceptors::rdKafkaOnConfDup, self); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(log, "Cannot set dup conf interceptor due to {} error", status); + } +} + +bool StorageKafka2::checkDependencies(const StorageID & table_id) +{ + // Check if all dependencies are attached + auto view_ids = DatabaseCatalog::instance().getDependentViews(table_id); + if (view_ids.empty()) + return true; + + // Check the dependencies are ready? + for (const auto & view_id : view_ids) + { + auto view = DatabaseCatalog::instance().tryGetTable(view_id, getContext()); + if (!view) + return false; + + // If it materialized view, check it's target table + auto * materialized_view = dynamic_cast(view.get()); + if (materialized_view && !materialized_view->tryGetTargetTable()) + return false; + + // Check all its dependencies + if (!checkDependencies(view_id)) + return false; + } + + return true; +} + +void StorageKafka2::threadFunc(size_t idx) +{ + assert(idx < tasks.size()); + auto task = tasks[idx]; + try + { + auto table_id = getStorageID(); + // Check if at least one direct dependency is attached + size_t num_views = DatabaseCatalog::instance().getDependentViews(table_id).size(); + if (num_views) + { + auto start_time = std::chrono::steady_clock::now(); + + mv_attached.store(true); + + // Keep streaming as long as there are attached views and streaming is not cancelled + while (!task->stream_cancelled && num_created_consumers > 0) + { + if (!checkDependencies(table_id)) + break; + + LOG_DEBUG(log, "Started streaming to {} attached views", num_views); + + // Exit the loop & reschedule if some stream stalled + auto some_stream_is_stalled = streamToViews(); + if (some_stream_is_stalled) + { + LOG_TRACE(log, "Stream(s) stalled. Reschedule."); + break; + } + + auto ts = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(ts - start_time); + if (duration.count() > MAX_THREAD_WORK_DURATION_MS) + { + LOG_TRACE(log, "Thread work duration limit exceeded. Reschedule."); + break; + } + } + } + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + + mv_attached.store(false); + + // Wait for attached views + if (!task->stream_cancelled) + task->holder->scheduleAfter(RESCHEDULE_MS); +} + + +bool StorageKafka2::streamToViews() +{ + Stopwatch watch; + + auto table_id = getStorageID(); + auto table = DatabaseCatalog::instance().getTable(table_id, getContext()); + if (!table) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Engine table {} doesn't exist.", table_id.getNameForLogs()); + + CurrentMetrics::Increment metric_increment{CurrentMetrics::KafkaBackgroundReads}; + ProfileEvents::increment(ProfileEvents::KafkaBackgroundReads); + + auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr(), getContext()); + + // Create an INSERT query for streaming data + auto insert = std::make_shared(); + insert->table_id = table_id; + + size_t block_size = getMaxBlockSize(); + + auto kafka_context = Context::createCopy(getContext()); + kafka_context->makeQueryContext(); + kafka_context->applySettingsChanges(settings_adjustments); + + // Create a stream for each consumer and join them in a union stream + // Only insert into dependent views and expect that input blocks contain virtual columns + InterpreterInsertQuery interpreter(insert, kafka_context, false, true, true); + auto block_io = interpreter.execute(); + + // Create a stream for each consumer and join them in a union stream + std::vector> sources; + Pipes pipes; + + auto stream_count = thread_per_consumer ? 1 : num_created_consumers; + sources.reserve(stream_count); + pipes.reserve(stream_count); + for (size_t i = 0; i < stream_count; ++i) + { + auto source = std::make_shared( + *this, storage_snapshot, kafka_context, block_io.pipeline.getHeader().getNames(), log, block_size, false); + sources.emplace_back(source); + pipes.emplace_back(source); + + // Limit read batch to maximum block size to allow DDL + StreamLocalLimits limits; + + Poco::Timespan max_execution_time = kafka_settings->kafka_flush_interval_ms.changed + ? kafka_settings->kafka_flush_interval_ms + : getContext()->getSettingsRef().stream_flush_interval_ms; + + source->setTimeLimit(max_execution_time); + } + + auto pipe = Pipe::unitePipes(std::move(pipes)); + + // We can't cancel during copyData, as it's not aware of commits and other kafka-related stuff. + // It will be cancelled on underlying layer (kafka buffer) + + std::atomic_size_t rows = 0; + { + block_io.pipeline.complete(std::move(pipe)); + + // we need to read all consumers in parallel (sequential read may lead to situation + // when some of consumers are not used, and will break some Kafka consumer invariants) + block_io.pipeline.setNumThreads(stream_count); + + block_io.pipeline.setProgressCallback([&](const Progress & progress) { rows += progress.read_rows.load(); }); + CompletedPipelineExecutor executor(block_io.pipeline); + executor.execute(); + } + + bool some_stream_is_stalled = false; + for (auto & source : sources) + { + some_stream_is_stalled = some_stream_is_stalled || source->isStalled(); + source->commit(); + } + + UInt64 milliseconds = watch.elapsedMilliseconds(); + LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(rows), table_id.getNameForLogs(), milliseconds); + + return some_stream_is_stalled; +} + +NamesAndTypesList StorageKafka2::getVirtuals() const +{ + auto result = NamesAndTypesList{ + {"_topic", std::make_shared(std::make_shared())}, + {"_key", std::make_shared()}, + {"_offset", std::make_shared()}, + {"_partition", std::make_shared()}, + {"_timestamp", std::make_shared(std::make_shared())}, + {"_timestamp_ms", std::make_shared(std::make_shared(3))}, + {"_headers.name", std::make_shared(std::make_shared())}, + {"_headers.value", std::make_shared(std::make_shared())}}; + if (kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM) + { + result.push_back({"_raw_message", std::make_shared()}); + result.push_back({"_error", std::make_shared()}); + } + return result; +} + +Names StorageKafka2::getVirtualColumnNames() const +{ + auto result = Names{ + "_topic", + "_key", + "_offset", + "_partition", + "_timestamp", + "_timestamp_ms", + "_headers.name", + "_headers.value", + }; + if (kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM) + { + result.push_back({"_raw_message"}); + result.push_back({"_error"}); + } + return result; +} + +} diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h new file mode 100644 index 00000000000..d0bc5cc78b7 --- /dev/null +++ b/src/Storages/Kafka/StorageKafka2.h @@ -0,0 +1,149 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +namespace cppkafka +{ + +class Configuration; + +} + +namespace DB +{ + +template +struct StorageKafkaInterceptors; + +using KafkaConsumer2Ptr = std::shared_ptr; + +/** Implements a Kafka queue table engine that can be used as a persistent queue / buffer, + * or as a basic building block for creating pipelines with a continuous insertion / ETL. + */ +class StorageKafka2 final : public IStorage, WithContext +{ + using StorageKafkaInterceptors = StorageKafkaInterceptors; + friend StorageKafkaInterceptors; + +public: + StorageKafka2( + const StorageID & table_id_, + ContextPtr context_, + const ColumnsDescription & columns_, + std::unique_ptr kafka_settings_, + const String & collection_name_); + + std::string getName() const override { return "Kafka"; } + + bool noPushingToViews() const override { return true; } + + void startup() override; + void shutdown() override; + + Pipe read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + size_t num_streams) override; + + SinkToStoragePtr write( + const ASTPtr & query, + const StorageMetadataPtr & /*metadata_snapshot*/, + ContextPtr context, + bool async_insert) override; + + /// We want to control the number of rows in a chunk inserted into Kafka + bool prefersLargeBlocks() const override { return false; } + + void pushConsumer(KafkaConsumer2Ptr consumer); + KafkaConsumer2Ptr popConsumer(); + KafkaConsumer2Ptr popConsumer(std::chrono::milliseconds timeout); + + const auto & getFormatName() const { return format_name; } + + NamesAndTypesList getVirtuals() const override; + Names getVirtualColumnNames() const; + HandleKafkaErrorMode getHandleKafkaErrorMode() const { return kafka_settings->kafka_handle_error_mode; } + +private: + // Configuration and state + std::unique_ptr kafka_settings; + Macros::MacroExpansionInfo macros_info; + const Names topics; + const String brokers; + const String group; + const String client_id; + const String format_name; + const size_t max_rows_per_message; + const String schema_name; + const size_t num_consumers; /// total number of consumers + Poco::Logger * log; + Poco::Semaphore semaphore; + const bool intermediate_commit; + const SettingsChanges settings_adjustments; + + std::atomic mv_attached = false; + + /// Can differ from num_consumers in case of exception in startup() (or if startup() hasn't been called). + /// In this case we still need to be able to shutdown() properly. + size_t num_created_consumers = 0; /// number of actually created consumers. + + std::vector consumers; /// available consumers + + std::mutex mutex; + + // Stream thread + struct TaskContext + { + BackgroundSchedulePool::TaskHolder holder; + std::atomic stream_cancelled {false}; + explicit TaskContext(BackgroundSchedulePool::TaskHolder&& task_) : holder(std::move(task_)) + { + } + }; + std::vector> tasks; + bool thread_per_consumer = false; + + /// For memory accounting in the librdkafka threads. + std::mutex thread_statuses_mutex; + std::list> thread_statuses; + + SettingsChanges createSettingsAdjustments(); + KafkaConsumer2Ptr createConsumer(size_t consumer_number); + + /// If named_collection is specified. + String collection_name; + + std::atomic shutdown_called = false; + + // Update Kafka configuration with values from CH user configuration. + void updateConfiguration(cppkafka::Configuration & kafka_config); + String getConfigPrefix() const; + void threadFunc(size_t idx); + + size_t getPollMaxBatchSize() const; + size_t getMaxBlockSize() const; + size_t getPollTimeoutMillisecond() const; + + static Names parseTopics(String topic_list); + static String getDefaultClientId(const StorageID & table_id_); + + bool streamToViews(); + bool checkDependencies(const StorageID & table_id); +}; + +} diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp new file mode 100644 index 00000000000..3a35272ac74 --- /dev/null +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -0,0 +1,386 @@ +#include + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +namespace CurrentMetrics +{ +extern const Metric KafkaLibrdkafkaThreads; +} + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int SUPPORT_IS_DISABLED; +} + +template +rd_kafka_resp_err_t +StorageKafkaInterceptors::rdKafkaOnThreadStart(rd_kafka_t *, rd_kafka_thread_type_t thread_type, const char *, void * ctx) +{ + TStorageKafka * self = reinterpret_cast(ctx); + CurrentMetrics::add(CurrentMetrics::KafkaLibrdkafkaThreads, 1); + + const auto & storage_id = self->getStorageID(); + const auto & table = storage_id.getTableName(); + + switch (thread_type) + { + case RD_KAFKA_THREAD_MAIN: + setThreadName(("rdk:m/" + table.substr(0, 9)).c_str()); + break; + case RD_KAFKA_THREAD_BACKGROUND: + setThreadName(("rdk:bg/" + table.substr(0, 8)).c_str()); + break; + case RD_KAFKA_THREAD_BROKER: + setThreadName(("rdk:b/" + table.substr(0, 9)).c_str()); + break; + } + + /// Create ThreadStatus to track memory allocations from librdkafka threads. + // + /// And store them in a separate list (thread_statuses) to make sure that they will be destroyed, + /// regardless how librdkafka calls the hooks. + /// But this can trigger use-after-free if librdkafka will not destroy threads after rd_kafka_wait_destroyed() + auto thread_status = std::make_shared(); + std::lock_guard lock(self->thread_statuses_mutex); + self->thread_statuses.emplace_back(std::move(thread_status)); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +template +rd_kafka_resp_err_t +StorageKafkaInterceptors::rdKafkaOnThreadExit(rd_kafka_t *, rd_kafka_thread_type_t, const char *, void * ctx) +{ + TStorageKafka * self = reinterpret_cast(ctx); + CurrentMetrics::sub(CurrentMetrics::KafkaLibrdkafkaThreads, 1); + + std::lock_guard lock(self->thread_statuses_mutex); + const auto it = std::find_if( + self->thread_statuses.begin(), + self->thread_statuses.end(), + [](const auto & thread_status_ptr) { return thread_status_ptr.get() == current_thread; }); + if (it == self->thread_statuses.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No thread status for this librdkafka thread."); + + self->thread_statuses.erase(it); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +template +rd_kafka_resp_err_t StorageKafkaInterceptors::rdKafkaOnNew( + rd_kafka_t * rk, const rd_kafka_conf_t *, void * ctx, char * /*errstr*/, size_t /*errstr_size*/) +{ + TStorageKafka * self = reinterpret_cast(ctx); + rd_kafka_resp_err_t status; + + status = rd_kafka_interceptor_add_on_thread_start(rk, "init-thread", rdKafkaOnThreadStart, ctx); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + { + LOG_ERROR(self->log, "Cannot set on thread start interceptor due to {} error", status); + return status; + } + + status = rd_kafka_interceptor_add_on_thread_exit(rk, "exit-thread", rdKafkaOnThreadExit, ctx); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(self->log, "Cannot set on thread exit interceptor due to {} error", status); + + return status; +} + +template +rd_kafka_resp_err_t StorageKafkaInterceptors::rdKafkaOnConfDup( + rd_kafka_conf_t * new_conf, const rd_kafka_conf_t * /*old_conf*/, size_t /*filter_cnt*/, const char ** /*filter*/, void * ctx) +{ + TStorageKafka * self = reinterpret_cast(ctx); + rd_kafka_resp_err_t status; + + // cppkafka copies configuration multiple times + status = rd_kafka_conf_interceptor_add_on_conf_dup(new_conf, "init", rdKafkaOnConfDup, ctx); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + { + LOG_ERROR(self->log, "Cannot set on conf dup interceptor due to {} error", status); + return status; + } + + status = rd_kafka_conf_interceptor_add_on_new(new_conf, "init", rdKafkaOnNew, ctx); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(self->log, "Cannot set on conf new interceptor due to {} error", status); + + return status; +} + +void KafkaConfigLoader::loadConfig( + cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & config_prefix) +{ + /// Read all tags one level below + Poco::Util::AbstractConfiguration::Keys tags; + config.keys(config_prefix, tags); + + for (const auto & tag : tags) + { + if (tag.starts_with(CONFIG_KAFKA_TOPIC_TAG)) /// multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. + continue; /// used by new per-topic configuration, ignore + + const String setting_path = config_prefix + "." + tag; + const String setting_value = config.getString(setting_path); + + /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. + /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md + const String setting_name_in_kafka_config = (tag == "log_level") ? tag : boost::replace_all_copy(tag, "_", "."); + kafka_config.set(setting_name_in_kafka_config, setting_value); + } +} + +void KafkaConfigLoader::loadTopicConfig( + cppkafka::Configuration & kafka_config, + const Poco::Util::AbstractConfiguration & config, + const String & config_prefix, + const String & topic) +{ + /// Read all tags one level below + Poco::Util::AbstractConfiguration::Keys tags; + config.keys(config_prefix, tags); + + for (const auto & tag : tags) + { + /// Only consider tag . Multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. + if (!tag.starts_with(CONFIG_KAFKA_TOPIC_TAG)) + continue; + + /// Read topic name between ... + const String kafka_topic_path = config_prefix + "." + tag; + const String kafpa_topic_name_path = kafka_topic_path + "." + String{CONFIG_NAME_TAG}; + + const String topic_name = config.getString(kafpa_topic_name_path); + if (topic_name == topic) + { + /// Found it! Now read the per-topic configuration into cppkafka. + Poco::Util::AbstractConfiguration::Keys inner_tags; + config.keys(kafka_topic_path, inner_tags); + for (const auto & inner_tag : inner_tags) + { + if (inner_tag == CONFIG_NAME_TAG) + continue; // ignore + + /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. + /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md + const String setting_path = kafka_topic_path + "." + inner_tag; + const String setting_value = config.getString(setting_path); + + const String setting_name_in_kafka_config + = (inner_tag == "log_level") ? inner_tag : boost::replace_all_copy(inner_tag, "_", "."); + kafka_config.set(setting_name_in_kafka_config, setting_value); + } + } + } +} + + +void registerStorageKafka(StorageFactory & factory) +{ + auto creator_fn = [](const StorageFactory::Arguments & args) -> std::shared_ptr + { + ASTs & engine_args = args.engine_args; + size_t args_count = engine_args.size(); + const bool has_settings = args.storage_def->settings; + + auto kafka_settings = std::make_unique(); + String collection_name; + if (auto named_collection = tryGetNamedCollectionWithOverrides(args.engine_args, args.getLocalContext())) + { + for (const auto & setting : kafka_settings->all()) + { + const auto & setting_name = setting.getName(); + if (named_collection->has(setting_name)) + kafka_settings->set(setting_name, named_collection->get(setting_name)); + } + collection_name = assert_cast(args.engine_args[0].get())->name(); + } + + if (has_settings) + { + kafka_settings->loadFromQuery(*args.storage_def); + } + +// Check arguments and settings +#define CHECK_KAFKA_STORAGE_ARGUMENT(ARG_NUM, PAR_NAME, EVAL) \ + /* One of the four required arguments is not specified */ \ + if (args_count < (ARG_NUM) && (ARG_NUM) <= 4 && !kafka_settings->PAR_NAME.changed) \ + { \ + throw Exception( \ + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, \ + "Required parameter '{}' " \ + "for storage Kafka not specified", \ + #PAR_NAME); \ + } \ + if (args_count >= (ARG_NUM)) \ + { \ + /* The same argument is given in two places */ \ + if (has_settings && kafka_settings->PAR_NAME.changed) \ + { \ + throw Exception( \ + ErrorCodes::BAD_ARGUMENTS, \ + "The argument №{} of storage Kafka " \ + "and the parameter '{}' " \ + "in SETTINGS cannot be specified at the same time", \ + #ARG_NUM, \ + #PAR_NAME); \ + } \ + /* move engine args to settings */ \ + else \ + { \ + if constexpr ((EVAL) == 1) \ + { \ + engine_args[(ARG_NUM)-1] = evaluateConstantExpressionAsLiteral(engine_args[(ARG_NUM)-1], args.getLocalContext()); \ + } \ + if constexpr ((EVAL) == 2) \ + { \ + engine_args[(ARG_NUM)-1] \ + = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[(ARG_NUM)-1], args.getLocalContext()); \ + } \ + kafka_settings->PAR_NAME = engine_args[(ARG_NUM)-1]->as().value; \ + } \ + } + + /** Arguments of engine is following: + * - Kafka broker list + * - List of topics + * - Group ID (may be a constant expression with a string result) + * - Message format (string) + * - Row delimiter + * - Schema (optional, if the format supports it) + * - Number of consumers + * - Max block size for background consumption + * - Skip (at least) unreadable messages number + * - Do intermediate commits when the batch consumed and handled + */ + + /* 0 = raw, 1 = evaluateConstantExpressionAsLiteral, 2=evaluateConstantExpressionOrIdentifierAsLiteral */ + /// In case of named collection we already validated the arguments. + if (collection_name.empty()) + { + CHECK_KAFKA_STORAGE_ARGUMENT(1, kafka_broker_list, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(2, kafka_topic_list, 1) + CHECK_KAFKA_STORAGE_ARGUMENT(3, kafka_group_name, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(4, kafka_format, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(5, kafka_row_delimiter, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(6, kafka_schema, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(7, kafka_num_consumers, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(8, kafka_max_block_size, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(9, kafka_skip_broken_messages, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(10, kafka_commit_every_batch, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(11, kafka_client_id, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(12, kafka_poll_timeout_ms, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(13, kafka_flush_interval_ms, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(14, kafka_thread_per_consumer, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(15, kafka_handle_error_mode, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(16, kafka_commit_on_select, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(17, kafka_max_rows_per_message, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(18, kafka_keeper_path, 0) + } + +#undef CHECK_KAFKA_STORAGE_ARGUMENT + + auto num_consumers = kafka_settings->kafka_num_consumers.value; + auto max_consumers = std::max(getNumberOfPhysicalCPUCores(), 16); + + if (!args.getLocalContext()->getSettingsRef().kafka_disable_num_consumers_limit && num_consumers > max_consumers) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "The number of consumers can not be bigger than {}. " + "A single consumer can read any number of partitions. " + "Extra consumers are relatively expensive, " + "and using a lot of them can lead to high memory and CPU usage. " + "To achieve better performance " + "of getting data from Kafka, consider using a setting kafka_thread_per_consumer=1, " + "and ensure you have enough threads " + "in MessageBrokerSchedulePool (background_message_broker_schedule_pool_size). " + "See also https://clickhouse.com/docs/integrations/kafka/kafka-table-engine#tuning-performance", + max_consumers); + } + else if (num_consumers < 1) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Number of consumers can not be lower than 1"); + } + + if (kafka_settings->kafka_max_block_size.changed && kafka_settings->kafka_max_block_size.value < 1) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "kafka_max_block_size can not be lower than 1"); + } + + if (kafka_settings->kafka_poll_max_batch_size.changed && kafka_settings->kafka_poll_max_batch_size.value < 1) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "kafka_poll_max_batch_size can not be lower than 1"); + } + NamesAndTypesList supported_columns; + for (const auto & column : args.columns) + { + if (column.default_desc.kind == ColumnDefaultKind::Alias) + supported_columns.emplace_back(column.name, column.type); + if (column.default_desc.kind == ColumnDefaultKind::Default && !column.default_desc.expression) + supported_columns.emplace_back(column.name, column.type); + } + // Kafka engine allows only ordinary columns without default expression or alias columns. + if (args.columns.getAll() != supported_columns) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "KafkaEngine doesn't support DEFAULT/MATERIALIZED/EPHEMERAL expressions for columns. " + "See https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/#configuration"); + } + + if (kafka_settings->kafka_keeper_path.changed) + { + if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper) + + throw Exception( + ErrorCodes::SUPPORT_IS_DISABLED, + "Storing the Kafka offsets in Keeper is experimental. " + "Set `allow_experimental_kafka_store_offsets_in_keeper` setting to enable it"); + + return std::make_shared( + args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); + } + + return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); + }; + + factory.registerStorage( + "Kafka", + creator_fn, + StorageFactory::StorageFeatures{ + .supports_settings = true, + }); +} + +template struct StorageKafkaInterceptors; +template struct StorageKafkaInterceptors; + +} diff --git a/src/Storages/Kafka/StorageKafkaCommon.h b/src/Storages/Kafka/StorageKafkaCommon.h new file mode 100644 index 00000000000..108dcbf19e2 --- /dev/null +++ b/src/Storages/Kafka/StorageKafkaCommon.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include + +namespace Poco +{ +namespace Util +{ + class AbstractConfiguration; +} +} + +namespace DB +{ +template +struct StorageKafkaInterceptors +{ + static rd_kafka_resp_err_t rdKafkaOnThreadStart(rd_kafka_t *, rd_kafka_thread_type_t thread_type, const char *, void * ctx); + + static rd_kafka_resp_err_t rdKafkaOnThreadExit(rd_kafka_t *, rd_kafka_thread_type_t, const char *, void * ctx); + + static rd_kafka_resp_err_t + rdKafkaOnNew(rd_kafka_t * rk, const rd_kafka_conf_t *, void * ctx, char * /*errstr*/, size_t /*errstr_size*/); + + static rd_kafka_resp_err_t rdKafkaOnConfDup( + rd_kafka_conf_t * new_conf, const rd_kafka_conf_t * /*old_conf*/, size_t /*filter_cnt*/, const char ** /*filter*/, void * ctx); +}; + +struct KafkaConfigLoader +{ + static constexpr std::string_view CONFIG_KAFKA_TAG = "kafka"; + static constexpr std::string_view CONFIG_KAFKA_TOPIC_TAG = "kafka_topic"; + static constexpr std::string_view CONFIG_NAME_TAG = "name"; + + /// Read server configuration into cppkafka configuration, used by global configuration and by legacy per-topic configuration + static void loadConfig( + cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & config_prefix); + + /// Read server configuration into cppkafa configuration, used by new per-topic configuration + static void loadTopicConfig( + cppkafka::Configuration & kafka_config, + const Poco::Util::AbstractConfiguration & config, + const String & config_prefix, + const String & topic); +}; +} From 86fab063382b8576292227b55a89a2363de3e1bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 1 Sep 2023 14:22:37 +0000 Subject: [PATCH 0012/2361] Very rudimentary version that works with single thread and consumer --- src/Common/ZooKeeper/ZooKeeper.cpp | 2 +- src/Storages/Kafka/KafkaConsumer2.cpp | 458 +++++++--------- src/Storages/Kafka/KafkaConsumer2.h | 97 +++- src/Storages/Kafka/KafkaSettings.h | 1 + src/Storages/Kafka/KafkaSource2.cpp | 303 ----------- src/Storages/Kafka/KafkaSource2.h | 64 --- src/Storages/Kafka/StorageKafka2.cpp | 627 +++++++++++++++++----- src/Storages/Kafka/StorageKafka2.h | 80 ++- src/Storages/Kafka/StorageKafkaCommon.cpp | 2 +- 9 files changed, 835 insertions(+), 799 deletions(-) delete mode 100644 src/Storages/Kafka/KafkaSource2.cpp delete mode 100644 src/Storages/Kafka/KafkaSource2.h diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 436a4e14f14..22e99c9120c 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -853,7 +853,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition & /// method is called. do { - /// Use getData insteand of exists to avoid watch leak. + /// Use getData instead of exists to avoid watch leak. impl->get(path, callback, std::make_shared(watch)); if (!state->event.tryWait(1000)) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index ec32248af46..61b6f801e9e 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -1,34 +1,39 @@ // Needs to go first because its partial specialization of fmt::formatter // should be defined before any instantiation +#include +#include +#include #include -#include #include +#include #include -#include -#include #include +#include +#include +#include #include #include +#include "base/scope_guard.h" namespace CurrentMetrics { - extern const Metric KafkaAssignedPartitions; - extern const Metric KafkaConsumersWithAssignment; +extern const Metric KafkaAssignedPartitions; +extern const Metric KafkaConsumersWithAssignment; } namespace ProfileEvents { - extern const Event KafkaRebalanceRevocations; - extern const Event KafkaRebalanceAssignments; - extern const Event KafkaRebalanceErrors; - extern const Event KafkaMessagesPolled; - extern const Event KafkaCommitFailures; - extern const Event KafkaCommits; - extern const Event KafkaConsumerErrors; +extern const Event KafkaRebalanceRevocations; +extern const Event KafkaRebalanceAssignments; +extern const Event KafkaRebalanceErrors; +extern const Event KafkaMessagesPolled; +extern const Event KafkaCommitFailures; +extern const Event KafkaCommits; +extern const Event KafkaConsumerErrors; } namespace DB @@ -45,6 +50,12 @@ const std::size_t POLL_TIMEOUT_WO_ASSIGNMENT_MS = 50; const auto DRAIN_TIMEOUT_MS = 5000ms; +bool KafkaConsumer2::TopicPartition::operator<(const TopicPartition & other) const +{ + return std::tie(topic, partition_id, offset) < std::tie(other.topic, other.partition_id, other.offset); +} + + KafkaConsumer2::KafkaConsumer2( ConsumerPtr consumer_, Poco::Logger * log_, @@ -63,68 +74,87 @@ KafkaConsumer2::KafkaConsumer2( , topics(_topics) { // called (synchronously, during poll) when we enter the consumer group - consumer->set_assignment_callback([this](const cppkafka::TopicPartitionList & topic_partitions) - { - CurrentMetrics::add(CurrentMetrics::KafkaAssignedPartitions, topic_partitions.size()); - ProfileEvents::increment(ProfileEvents::KafkaRebalanceAssignments); - - if (topic_partitions.empty()) + consumer->set_assignment_callback( + [this](const cppkafka::TopicPartitionList & topic_partitions) { - LOG_INFO(log, "Got empty assignment: Not enough partitions in the topic for all consumers?"); - } - else - { - LOG_TRACE(log, "Topics/partitions assigned: {}", topic_partitions); - CurrentMetrics::add(CurrentMetrics::KafkaConsumersWithAssignment, 1); - } + CurrentMetrics::add(CurrentMetrics::KafkaAssignedPartitions, topic_partitions.size()); + ProfileEvents::increment(ProfileEvents::KafkaRebalanceAssignments); - assignment = topic_partitions; - }); + if (topic_partitions.empty()) + { + LOG_INFO(log, "Got empty assignment: Not enough partitions in the topic for all consumers?"); + } + else + { + LOG_TRACE(log, "Topics/partitions assigned: {}", topic_partitions); + CurrentMetrics::add(CurrentMetrics::KafkaConsumersWithAssignment, 1); + } + + chassert(!assignment.has_value()); + + assignment.emplace(); + assignment->reserve(topic_partitions.size()); + needs_offset_update = true; + for (const auto & topic_partition : topic_partitions) + { + assignment->push_back(TopicPartition{topic_partition.get_topic(), topic_partition.get_partition(), INVALID_OFFSET}); + } + std::sort(assignment->begin(), assignment->end()); + + updateOffsets(topic_partitions); + }); // called (synchronously, during poll) when we leave the consumer group - consumer->set_revocation_callback([this](const cppkafka::TopicPartitionList & topic_partitions) - { - CurrentMetrics::sub(CurrentMetrics::KafkaAssignedPartitions, topic_partitions.size()); - ProfileEvents::increment(ProfileEvents::KafkaRebalanceRevocations); - - // Rebalance is happening now, and now we have a chance to finish the work - // with topics/partitions we were working with before rebalance - LOG_TRACE(log, "Rebalance initiated. Revoking partitions: {}", topic_partitions); - - if (!topic_partitions.empty()) + consumer->set_revocation_callback( + [this](const cppkafka::TopicPartitionList & topic_partitions) { - CurrentMetrics::sub(CurrentMetrics::KafkaConsumersWithAssignment, 1); - } + // TODO(antaljanosbenjamin): deal with revocation + CurrentMetrics::sub(CurrentMetrics::KafkaAssignedPartitions, topic_partitions.size()); + ProfileEvents::increment(ProfileEvents::KafkaRebalanceRevocations); - // we can not flush data to target from that point (it is pulled, not pushed) - // so the best we can now it to - // 1) repeat last commit in sync mode (async could be still in queue, we need to be sure is is properly committed before rebalance) - // 2) stop / brake the current reading: - // * clean buffered non-commited messages - // * set flag / flush + // Rebalance is happening now, and now we have a chance to finish the work + // with topics/partitions we were working with before rebalance + LOG_TRACE(log, "Rebalance initiated. Revoking partitions: {}", topic_partitions); - cleanUnprocessed(); + if (!topic_partitions.empty()) + { + CurrentMetrics::sub(CurrentMetrics::KafkaConsumersWithAssignment, 1); + } - stalled_status = REBALANCE_HAPPENED; - assignment.reset(); - waited_for_assignment = 0; + // we can not flush data to target from that point (it is pulled, not pushed) + // so the best we can now it to + // 1) repeat last commit in sync mode (async could be still in queue, we need to be sure is is properly committed before rebalance) + // 2) stop / brake the current reading: + // * clean buffered non-commited messages + // * set flag / flush - // for now we use slower (but reliable) sync commit in main loop, so no need to repeat - // try - // { - // consumer->commit(); - // } - // catch (cppkafka::HandleException & e) - // { - // LOG_WARNING(log, "Commit error: {}", e.what()); - // } - }); + cleanUnprocessed(); - consumer->set_rebalance_error_callback([this](cppkafka::Error err) - { - LOG_ERROR(log, "Rebalance error: {}", err); - ProfileEvents::increment(ProfileEvents::KafkaRebalanceErrors); - }); + stalled_status = StalledStatus::REBALANCE_HAPPENED; + assignment.reset(); + queues.clear(); + needs_offset_update = true; + waited_for_assignment = 0; + + // for now we use slower (but reliable) sync commit in main loop, so no need to repeat + // try + // { + // consumer->commit(); + // } + // catch (cppkafka::HandleException & e) + // { + // LOG_WARNING(log, "Commit error: {}", e.what()); + // } + }); + + consumer->set_rebalance_error_callback( + [this](cppkafka::Error err) + { + LOG_ERROR(log, "Rebalance error: {}", err); + ProfileEvents::increment(ProfileEvents::KafkaRebalanceErrors); + }); + + consumer->subscribe(topics); } KafkaConsumer2::~KafkaConsumer2() @@ -148,7 +178,6 @@ KafkaConsumer2::~KafkaConsumer2() { LOG_ERROR(log, "Error while destructing consumer: {}", e.what()); } - } // Needed to drain rest of the messages / queued callback calls from the consumer @@ -160,6 +189,9 @@ void KafkaConsumer2::drain() auto start_time = std::chrono::steady_clock::now(); cppkafka::Error last_error(RD_KAFKA_RESP_ERR_NO_ERROR); + for (auto & [tp, queue] : queues) + queue.forward_to_queue(consumer->get_consumer_queue()); + while (true) { auto msg = consumer->poll(100ms); @@ -185,7 +217,7 @@ void KafkaConsumer2::drain() last_error = error; auto ts = std::chrono::steady_clock::now(); - if (std::chrono::duration_cast(ts-start_time) > DRAIN_TIMEOUT_MS) + if (std::chrono::duration_cast(ts - start_time) > DRAIN_TIMEOUT_MS) { LOG_ERROR(log, "Timeout during draining."); break; @@ -193,139 +225,6 @@ void KafkaConsumer2::drain() } } - -void KafkaConsumer2::commit() -{ - auto print_offsets = [this] (const char * prefix, const cppkafka::TopicPartitionList & offsets) - { - for (const auto & topic_part : offsets) - { - auto print_special_offset = [&topic_part] - { - switch (topic_part.get_offset()) - { - case cppkafka::TopicPartition::OFFSET_BEGINNING: return "BEGINNING"; - case cppkafka::TopicPartition::OFFSET_END: return "END"; - case cppkafka::TopicPartition::OFFSET_STORED: return "STORED"; - case cppkafka::TopicPartition::OFFSET_INVALID: return "INVALID"; - default: return ""; - } - }; - - if (topic_part.get_offset() < 0) - { - LOG_TRACE(log, "{} {} (topic: {}, partition: {})", prefix, print_special_offset(), topic_part.get_topic(), topic_part.get_partition()); - } - else - { - LOG_TRACE(log, "{} {} (topic: {}, partition: {})", prefix, topic_part.get_offset(), topic_part.get_topic(), topic_part.get_partition()); - } - } - }; - - print_offsets("Polled offset", consumer->get_offsets_position(consumer->get_assignment())); - - if (hasMorePolledMessages()) - { - LOG_WARNING(log, "Logical error. Non all polled messages were processed."); - } - - if (offsets_stored > 0) - { - // if we will do async commit here (which is faster) - // we may need to repeat commit in sync mode in revocation callback, - // but it seems like existing API doesn't allow us to to that - // in a controlled manner (i.e. we don't know the offsets to commit then) - - size_t max_retries = 5; - bool committed = false; - - while (!committed && max_retries > 0) - { - try - { - // See https://github.com/edenhill/librdkafka/issues/1470 - // broker may reject commit if during offsets.commit.timeout.ms (5000 by default), - // there were not enough replicas available for the __consumer_offsets topic. - // also some other temporary issues like client-server connectivity problems are possible - consumer->commit(); - committed = true; - print_offsets("Committed offset", consumer->get_offsets_committed(consumer->get_assignment())); - } - catch (const cppkafka::HandleException & e) - { - // If there were actually no offsets to commit, return. Retrying won't solve - // anything here - if (e.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) - committed = true; - else - LOG_ERROR(log, "Exception during commit attempt: {}", e.what()); - } - --max_retries; - } - - if (!committed) - { - // TODO: insert atomicity / transactions is needed here (possibility to rollback, on 2 phase commits) - ProfileEvents::increment(ProfileEvents::KafkaCommitFailures); - throw Exception(ErrorCodes::CANNOT_COMMIT_OFFSET, - "All commit attempts failed. Last block was already written to target table(s), " - "but was not committed to Kafka."); - } - else - { - ProfileEvents::increment(ProfileEvents::KafkaCommits); - } - - } - else - { - LOG_TRACE(log, "Nothing to commit."); - } - - offsets_stored = 0; -} - -void KafkaConsumer2::subscribe() -{ - LOG_TRACE(log, "Already subscribed to topics: [{}]", boost::algorithm::join(consumer->get_subscription(), ", ")); - - if (assignment.has_value()) - { - LOG_TRACE(log, "Already assigned to: {}", assignment.value()); - } - else - { - LOG_TRACE(log, "No assignment"); - } - - - size_t max_retries = 5; - - while (consumer->get_subscription().empty()) - { - --max_retries; - try - { - consumer->subscribe(topics); - // FIXME: if we failed to receive "subscribe" response while polling and destroy consumer now, then we may hang up. - // see https://github.com/edenhill/librdkafka/issues/2077 - } - catch (cppkafka::HandleException & e) - { - if (max_retries > 0 && e.get_error() == RD_KAFKA_RESP_ERR__TIMED_OUT) - continue; - throw; - } - } - - cleanUnprocessed(); - - // we can reset any flags (except of CONSUMER_STOPPED) before attempt of reading new block of data - if (stalled_status != CONSUMER_STOPPED) - stalled_status = NO_MESSAGES_RETURNED; -} - void KafkaConsumer2::cleanUnprocessed() { messages.clear(); @@ -333,59 +232,100 @@ void KafkaConsumer2::cleanUnprocessed() offsets_stored = 0; } -void KafkaConsumer2::unsubscribe() +void KafkaConsumer2::pollEvents() { - LOG_TRACE(log, "Re-joining claimed consumer after failure"); - cleanUnprocessed(); + // All the partition queues are detached, so the consumer shouldn't be able to poll any messages + auto msg = consumer->poll(10ms); + chassert(!msg && "Consumer returned a message when it was not expected"); +}; - // it should not raise exception as used in destructor +KafkaConsumer2::TopicPartitionCounts KafkaConsumer2::getPartitionCounts() const +{ + TopicPartitionCounts result; try { - // From docs: Any previous subscription will be unassigned and unsubscribed first. - consumer->subscribe(topics); + auto metadata = consumer->get_metadata(); + auto topic_metadatas = metadata.get_topics(); - // I wanted to avoid explicit unsubscribe as it requires draining the messages - // to close the consumer safely after unsubscribe - // see https://github.com/edenhill/librdkafka/issues/2077 - // https://github.com/confluentinc/confluent-kafka-go/issues/189 etc. + for (auto & topic_metadata : topic_metadatas) + { + if (const auto it = std::find(topics.begin(), topics.end(), topic_metadata.get_name()); it != topics.end()) + { + result.push_back({topic_metadata.get_name(), topic_metadata.get_partitions().size()}); + } + } } - catch (const cppkafka::HandleException & e) + catch (cppkafka::HandleException & e) { - LOG_ERROR(log, "Exception from KafkaConsumer2::unsubscribe: {}", e.what()); + chassert(e.what() != nullptr); } - + return result; } - -void KafkaConsumer2::resetToLastCommitted(const char * msg) +bool KafkaConsumer2::polledDataUnusable(const TopicPartition & topic_partition) const { - if (!assignment.has_value() || assignment->empty()) + const auto consumer_in_wrong_state + = (stalled_status != StalledStatus::NOT_STALLED) && (stalled_status != StalledStatus::NO_MESSAGES_RETURNED); + const auto different_topic_partition = current == messages.end() + ? false + : (current->get_topic() != topic_partition.topic || current->get_partition() != topic_partition.partition_id); + return consumer_in_wrong_state || different_topic_partition; +} + +KafkaConsumer2::TopicPartitions const * KafkaConsumer2::getAssignment() const +{ + if (assignment.has_value()) { - LOG_TRACE(log, "Not assignned. Can't reset to last committed position."); - return; + return &*assignment; } - auto committed_offset = consumer->get_offsets_committed(consumer->get_assignment()); - consumer->assign(committed_offset); - LOG_TRACE(log, "{} Returned to committed position: {}", msg, committed_offset); + + return nullptr; +} + +void KafkaConsumer2::updateOffsets(const TopicPartitions & topic_partitions) +{ + // TODO(antaljanosbenjamin): Make sure topic_partitions and assignment is in sync. + cppkafka::TopicPartitionList original_topic_partitions; + original_topic_partitions.reserve(topic_partitions.size()); + std::transform( + topic_partitions.begin(), + topic_partitions.end(), + std::back_inserter(original_topic_partitions), + [](const TopicPartition & tp) { + return cppkafka::TopicPartition{tp.topic, tp.partition_id, tp.offset}; + }); + updateOffsets(original_topic_partitions); + needs_offset_update = false; + stalled_status = StalledStatus::NOT_STALLED; +} + +void KafkaConsumer2::updateOffsets(const cppkafka::TopicPartitionList & topic_partitions) +{ + queues.clear(); + // cppkafka itself calls assign(), but in order to detach the queues here we have to do the assignment manually. Later on we have to reassign the topic partitions with correct offsets. + consumer->assign(topic_partitions); + for (const auto & topic_partition : topic_partitions) + // This will also detach the partition queues from the consumer, thus the messages won't be forwarded without attaching them manually + queues.emplace( + TopicPartition{topic_partition.get_topic(), topic_partition.get_partition(), topic_partition.get_offset()}, + consumer->get_partition_queue(topic_partition)); } // it do the poll when needed -ReadBufferPtr KafkaConsumer2::consume() +ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition) { resetIfStopped(); - if (polledDataUnusable()) + if (polledDataUnusable(topic_partition)) return nullptr; if (hasMorePolledMessages()) return getNextMessage(); - if (intermediate_commit) - commit(); while (true) { - stalled_status = NO_MESSAGES_RETURNED; + stalled_status = StalledStatus::NO_MESSAGES_RETURNED; // we already wait enough for assignment in the past, // let's make polls shorter and not block other consumer @@ -393,27 +333,23 @@ ReadBufferPtr KafkaConsumer2::consume() // POLL_TIMEOUT_WO_ASSIGNMENT_MS (50ms) is 100% enough just to check if we got assignment // (see https://github.com/ClickHouse/ClickHouse/issues/11218) auto actual_poll_timeout_ms = (waited_for_assignment >= MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS) - ? std::min(POLL_TIMEOUT_WO_ASSIGNMENT_MS,poll_timeout) - : poll_timeout; + ? std::min(POLL_TIMEOUT_WO_ASSIGNMENT_MS, poll_timeout) + : poll_timeout; + + auto & queue_to_poll_from = queues[topic_partition]; + queue_to_poll_from.forward_to_queue(consumer->get_consumer_queue()); + SCOPE_EXIT({ queue_to_poll_from.disable_queue_forwarding(); }); /// Don't drop old messages immediately, since we may need them for virtual columns. - auto new_messages = consumer->poll_batch(batch_size, - std::chrono::milliseconds(actual_poll_timeout_ms)); + auto new_messages = consumer->poll_batch(batch_size, std::chrono::milliseconds(actual_poll_timeout_ms)); resetIfStopped(); - if (stalled_status == CONSUMER_STOPPED) + if (stalled_status == StalledStatus::CONSUMER_STOPPED) { return nullptr; } - else if (stalled_status == REBALANCE_HAPPENED) + else if (stalled_status == StalledStatus::REBALANCE_HAPPENED) { - if (!new_messages.empty()) - { - // we have polled something just after rebalance. - // we will not use current batch, so we need to return to last committed position - // otherwise we will continue polling from that position - resetToLastCommitted("Rewind last poll after rebalance."); - } return nullptr; } @@ -431,7 +367,7 @@ ReadBufferPtr KafkaConsumer2::consume() else { LOG_WARNING(log, "Can't get assignment. Will keep trying."); - stalled_status = NO_ASSIGNMENT; + stalled_status = StalledStatus::NO_ASSIGNMENT; return nullptr; } } @@ -450,8 +386,11 @@ ReadBufferPtr KafkaConsumer2::consume() { messages = std::move(new_messages); current = messages.begin(); - LOG_TRACE(log, "Polled batch of {} messages. Offsets position: {}", - messages.size(), consumer->get_offsets_position(consumer->get_assignment())); + LOG_TRACE( + log, + "Polled batch of {} messages. Offsets position: {}", + messages.size(), + consumer->get_offsets_position(consumer->get_assignment())); break; } } @@ -460,13 +399,13 @@ ReadBufferPtr KafkaConsumer2::consume() if (current == messages.end()) { LOG_ERROR(log, "Only errors left"); - stalled_status = ERRORS_RETURNED; + stalled_status = StalledStatus::ERRORS_RETURNED; return nullptr; } ProfileEvents::increment(ProfileEvents::KafkaMessagesPolled, messages.size()); - stalled_status = NOT_STALLED; + stalled_status = StalledStatus::NOT_STALLED; return getNextMessage(); } @@ -489,16 +428,18 @@ size_t KafkaConsumer2::filterMessageErrors() { assert(current == messages.begin()); - size_t skipped = std::erase_if(messages, [this](auto & message) - { - if (auto error = message.get_error()) + size_t skipped = std::erase_if( + messages, + [this](auto & message) { - ProfileEvents::increment(ProfileEvents::KafkaConsumerErrors); - LOG_ERROR(log, "Consumer error: {}", error); - return true; - } - return false; - }); + if (auto error = message.get_error()) + { + ProfileEvents::increment(ProfileEvents::KafkaConsumerErrors); + LOG_ERROR(log, "Consumer error: {}", error); + return true; + } + return false; + }); if (skipped) LOG_ERROR(log, "There were {} messages with an error", skipped); @@ -512,19 +453,8 @@ void KafkaConsumer2::resetIfStopped() // after block is formed (i.e. during copying data to MV / committing) we ignore stop attempts if (stopped) { - stalled_status = CONSUMER_STOPPED; + stalled_status = StalledStatus::CONSUMER_STOPPED; cleanUnprocessed(); } } - - -void KafkaConsumer2::storeLastReadMessageOffset() -{ - if (!isStalled()) - { - consumer->store_offset(*(current - 1)); - ++offsets_stored; - } -} - } diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index 6562a65a9b0..3341dc2c42f 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -1,11 +1,16 @@ #pragma once #include -#include #include - -#include #include +#include + +#include +#include +#include +#include + +#include namespace CurrentMetrics { @@ -25,6 +30,54 @@ using ConsumerPtr = std::shared_ptr; class KafkaConsumer2 { public: + static inline constexpr int INVALID_OFFSET = RD_KAFKA_OFFSET_INVALID; + static inline constexpr int BEGINNING_OFFSET = RD_KAFKA_OFFSET_BEGINNING; + static inline constexpr int END_OFFSET = RD_KAFKA_OFFSET_END; + + struct TopicPartition + { + String topic; + int32_t partition_id; + int64_t offset{INVALID_OFFSET}; + + bool operator==(const TopicPartition&) const = default; + bool operator<(const TopicPartition& other) const; + }; + + using TopicPartitions = std::vector; + + struct OnlyTopicNameAndPartitionIdHash + { + std::size_t operator()(const TopicPartition & tp) const + { + SipHash s; + s.update(tp.topic); + s.update(tp.partition_id); + return s.get64(); + } + }; + + struct OnlyTopicNameAndPartitionIdEquality + { + bool operator()(const TopicPartition & lhs, const TopicPartition & rhs) const + { + return lhs.topic == rhs.topic && lhs.partition_id == rhs.partition_id; + } + }; + + struct TopicPartitionCount + { + String topic; + size_t partition_count; + }; + + using TopicPartitionCounts = std::vector; + + // struct AssignmentChanges { + // TopicPartitions revoked_partitions; + // TopicPartitions new_partitions; + // }; + KafkaConsumer2( ConsumerPtr consumer_, Poco::Logger * log_, @@ -36,30 +89,34 @@ public: ); ~KafkaConsumer2(); - void commit(); // Commit all processed messages. - void subscribe(); // Subscribe internal consumer to topics. - void unsubscribe(); // Unsubscribe internal consumer in case of failure. + + void pollEvents(); + + TopicPartitionCounts getPartitionCounts() const; auto pollTimeout() const { return poll_timeout; } inline bool hasMorePolledMessages() const { - return (stalled_status == NOT_STALLED) && (current != messages.end()); + return (stalled_status == StalledStatus::NOT_STALLED) && (current != messages.end()); } - inline bool polledDataUnusable() const - { - return (stalled_status != NOT_STALLED) && (stalled_status != NO_MESSAGES_RETURNED); - } + inline bool isStalled() const { return stalled_status != StalledStatus::NOT_STALLED; } - inline bool isStalled() const { return stalled_status != NOT_STALLED; } + bool polledDataUnusable(const TopicPartition & topic_partition) const; - void storeLastReadMessageOffset(); - void resetToLastCommitted(const char * msg); + TopicPartitions const * getAssignment() const; + + // As the main source of offsets is not Kafka, the offsets needs to pushed to the consumer from outside + bool needsOffsetUpdate() const { return needs_offset_update; } + + // Returns true if it received new assignment and could update the internal state accordingly, false otherwise + void updateOffsets(const TopicPartitions & topic_partitions); /// Polls batch of messages from Kafka and returns read buffer containing the next message or /// nullptr when there are no messages to process. - ReadBufferPtr consume(); + /// TODO(antaljanosbenjamin): add batch size param + ReadBufferPtr consume(const TopicPartition & topic_partition); // Return values for the message that's being read. String currentTopic() const { return current[-1].get_topic(); } @@ -74,7 +131,7 @@ private: using Messages = std::vector; CurrentMetrics::Increment metric_increment{CurrentMetrics::KafkaConsumers}; - enum StalledStatus + enum class StalledStatus { NOT_STALLED, NO_MESSAGES_RETURNED, @@ -90,7 +147,7 @@ private: const size_t poll_timeout = 0; size_t offsets_stored = 0; - StalledStatus stalled_status = NO_MESSAGES_RETURNED; + StalledStatus stalled_status = StalledStatus::NO_MESSAGES_RETURNED; bool intermediate_commit = true; size_t waited_for_assignment = 0; @@ -102,7 +159,9 @@ private: Messages::const_iterator current; // order is important, need to be destructed before consumer - std::optional assignment; + std::optional assignment; + bool needs_offset_update{false}; + std::unordered_map queues; const Names topics; void drain(); @@ -111,6 +170,8 @@ private: /// Return number of messages with an error. size_t filterMessageErrors(); ReadBufferPtr getNextMessage(); + + void updateOffsets(const cppkafka::TopicPartitionList & topic_partitions); }; } diff --git a/src/Storages/Kafka/KafkaSettings.h b/src/Storages/Kafka/KafkaSettings.h index 748090165d7..3826857c24e 100644 --- a/src/Storages/Kafka/KafkaSettings.h +++ b/src/Storages/Kafka/KafkaSettings.h @@ -31,6 +31,7 @@ class ASTStorage; M(StreamingHandleErrorMode, kafka_handle_error_mode, StreamingHandleErrorMode::DEFAULT, "How to handle errors for Kafka engine. Possible values: default (throw an exception after rabbitmq_skip_broken_messages broken messages), stream (save broken messages and errors in virtual columns _raw_message, _error).", 0) \ M(Bool, kafka_commit_on_select, false, "Commit messages when select query is made", 0) \ M(UInt64, kafka_max_rows_per_message, 1, "The maximum number of rows produced in one kafka message for row-based formats.", 0) \ + /* TODO(antaljanosbenjamin): Probably this shouldn't be here, but only read as an argument */ \ M(String, kafka_keeper_path, "", "TODO(antaljanosbenjamin)", 0) \ #define OBSOLETE_KAFKA_SETTINGS(M, ALIAS) \ diff --git a/src/Storages/Kafka/KafkaSource2.cpp b/src/Storages/Kafka/KafkaSource2.cpp deleted file mode 100644 index 3e14c57e8e2..00000000000 --- a/src/Storages/Kafka/KafkaSource2.cpp +++ /dev/null @@ -1,303 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include - -#include - -namespace ProfileEvents -{ - extern const Event KafkaMessagesRead; - extern const Event KafkaMessagesFailed; - extern const Event KafkaRowsRead; - extern const Event KafkaRowsRejected; -} - -namespace DB -{ -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - -// with default poll timeout (500ms) it will give about 5 sec delay for doing 10 retries -// when selecting from empty topic -const auto MAX_FAILED_POLL_ATTEMPTS = 10; - -KafkaSource2::KafkaSource2( - StorageKafka2 & storage_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - const Names & columns, - Poco::Logger * log_, - size_t max_block_size_, - bool commit_in_suffix_) - : ISource(storage_snapshot_->getSampleBlockForColumns(columns)) - , storage(storage_) - , storage_snapshot(storage_snapshot_) - , context(context_) - , column_names(columns) - , log(log_) - , max_block_size(max_block_size_) - , commit_in_suffix(commit_in_suffix_) - , non_virtual_header(storage_snapshot->metadata->getSampleBlockNonMaterialized()) - , virtual_header(storage_snapshot->getSampleBlockForColumns(storage.getVirtualColumnNames())) - , handle_error_mode(storage.getHandleKafkaErrorMode()) -{ -} - -KafkaSource2::~KafkaSource2() -{ - if (!consumer) - return; - - if (broken) - consumer->unsubscribe(); - - storage.pushConsumer(consumer); -} - -bool KafkaSource2::checkTimeLimit() const -{ - if (max_execution_time != 0) - { - auto elapsed_ns = total_stopwatch.elapsed(); - - if (elapsed_ns > static_cast(max_execution_time.totalMicroseconds()) * 1000) - return false; - } - - return true; -} - -Chunk KafkaSource2::generateImpl() -{ - if (!consumer) - { - auto timeout = std::chrono::milliseconds(context->getSettingsRef().kafka_max_wait_ms.totalMilliseconds()); - consumer = storage.popConsumer(timeout); - - if (!consumer) - return {}; - - consumer->subscribe(); - - broken = true; - } - - if (is_finished) - return {}; - - is_finished = true; - // now it's one-time usage InputStream - // one block of the needed size (or with desired flush timeout) is formed in one internal iteration - // otherwise external iteration will reuse that and logic will became even more fuzzy - MutableColumns virtual_columns = virtual_header.cloneEmptyColumns(); - - auto put_error_to_stream = handle_error_mode == HandleKafkaErrorMode::STREAM; - - EmptyReadBuffer empty_buf; - auto input_format = FormatFactory::instance().getInput( - storage.getFormatName(), empty_buf, non_virtual_header, context, max_block_size, std::nullopt, 1); - - std::optional exception_message; - size_t total_rows = 0; - size_t failed_poll_attempts = 0; - - auto on_error = [&](const MutableColumns & result_columns, Exception & e) - { - ProfileEvents::increment(ProfileEvents::KafkaMessagesFailed); - - if (put_error_to_stream) - { - exception_message = e.message(); - for (const auto & column : result_columns) - { - // read_kafka_message could already push some rows to result_columns - // before exception, we need to fix it. - auto cur_rows = column->size(); - if (cur_rows > total_rows) - column->popBack(cur_rows - total_rows); - - // all data columns will get default value in case of error - column->insertDefault(); - } - - return 1; - } - else - { - e.addMessage("while parsing Kafka message (topic: {}, partition: {}, offset: {})'", - consumer->currentTopic(), consumer->currentPartition(), consumer->currentOffset()); - throw std::move(e); - } - }; - - StreamingFormatExecutor executor(non_virtual_header, input_format, std::move(on_error)); - - while (true) - { - size_t new_rows = 0; - exception_message.reset(); - if (auto buf = consumer->consume()) - { - ProfileEvents::increment(ProfileEvents::KafkaMessagesRead); - new_rows = executor.execute(*buf); - } - - if (new_rows) - { - // In read_kafka_message(), KafkaConsumer::nextImpl() - // will be called, that may make something unusable, i.e. clean - // KafkaConsumer::messages, which is accessed from - // KafkaConsumer::currentTopic() (and other helpers). - if (consumer->isStalled()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Polled messages became unusable"); - - ProfileEvents::increment(ProfileEvents::KafkaRowsRead, new_rows); - - consumer->storeLastReadMessageOffset(); - - auto topic = consumer->currentTopic(); - auto key = consumer->currentKey(); - auto offset = consumer->currentOffset(); - auto partition = consumer->currentPartition(); - auto timestamp_raw = consumer->currentTimestamp(); - auto header_list = consumer->currentHeaderList(); - - Array headers_names; - Array headers_values; - - if (!header_list.empty()) - { - headers_names.reserve(header_list.size()); - headers_values.reserve(header_list.size()); - for (const auto & header : header_list) - { - headers_names.emplace_back(header.get_name()); - headers_values.emplace_back(static_cast(header.get_value())); - } - } - - for (size_t i = 0; i < new_rows; ++i) - { - virtual_columns[0]->insert(topic); - virtual_columns[1]->insert(key); - virtual_columns[2]->insert(offset); - virtual_columns[3]->insert(partition); - if (timestamp_raw) - { - auto ts = timestamp_raw->get_timestamp(); - virtual_columns[4]->insert(std::chrono::duration_cast(ts).count()); - virtual_columns[5]->insert(DecimalField(std::chrono::duration_cast(ts).count(),3)); - } - else - { - virtual_columns[4]->insertDefault(); - virtual_columns[5]->insertDefault(); - } - virtual_columns[6]->insert(headers_names); - virtual_columns[7]->insert(headers_values); - if (put_error_to_stream) - { - if (exception_message) - { - auto payload = consumer->currentPayload(); - virtual_columns[8]->insert(payload); - virtual_columns[9]->insert(*exception_message); - } - else - { - virtual_columns[8]->insertDefault(); - virtual_columns[9]->insertDefault(); - } - } - } - - total_rows = total_rows + new_rows; - } - else if (consumer->polledDataUnusable()) - { - break; - } - else if (consumer->isStalled()) - { - ++failed_poll_attempts; - } - else - { - // We came here in case of tombstone (or sometimes zero-length) messages, and it is not something abnormal - // TODO: it seems like in case of put_error_to_stream=true we may need to process those differently - // currently we just skip them with note in logs. - consumer->storeLastReadMessageOffset(); - LOG_DEBUG(log, "Parsing of message (topic: {}, partition: {}, offset: {}) return no rows.", consumer->currentTopic(), consumer->currentPartition(), consumer->currentOffset()); - } - - if (!consumer->hasMorePolledMessages() - && (total_rows >= max_block_size || !checkTimeLimit() || failed_poll_attempts >= MAX_FAILED_POLL_ATTEMPTS)) - { - break; - } - } - - if (total_rows == 0) - { - return {}; - } - else if (consumer->polledDataUnusable()) - { - // the rows were counted already before by KafkaRowsRead, - // so let's count the rows we ignore separately - // (they will be retried after the rebalance) - ProfileEvents::increment(ProfileEvents::KafkaRowsRejected, total_rows); - return {}; - } - - /// MATERIALIZED columns can be added here, but I think - // they are not needed here: - // and it's misleading to use them here, - // as columns 'materialized' that way stays 'ephemeral' - // i.e. will not be stored anythere - // If needed any extra columns can be added using DEFAULT they can be added at MV level if needed. - - auto result_block = non_virtual_header.cloneWithColumns(executor.getResultColumns()); - auto virtual_block = virtual_header.cloneWithColumns(std::move(virtual_columns)); - - for (const auto & column : virtual_block.getColumnsWithTypeAndName()) - result_block.insert(column); - - auto converting_dag = ActionsDAG::makeConvertingActions( - result_block.cloneEmpty().getColumnsWithTypeAndName(), - getPort().getHeader().getColumnsWithTypeAndName(), - ActionsDAG::MatchColumnsMode::Name); - - auto converting_actions = std::make_shared(std::move(converting_dag)); - converting_actions->execute(result_block); - - return Chunk(result_block.getColumns(), result_block.rows()); -} - -Chunk KafkaSource2::generate() -{ - auto chunk = generateImpl(); - if (!chunk && commit_in_suffix) - commit(); - - return chunk; -} - -void KafkaSource2::commit() -{ - if (!consumer) - return; - - consumer->commit(); - - broken = false; -} - -} diff --git a/src/Storages/Kafka/KafkaSource2.h b/src/Storages/Kafka/KafkaSource2.h deleted file mode 100644 index 0a49001a686..00000000000 --- a/src/Storages/Kafka/KafkaSource2.h +++ /dev/null @@ -1,64 +0,0 @@ -#pragma once - -#include - -#include -#include -#include - - -namespace Poco -{ - class Logger; -} -namespace DB -{ - -class KafkaSource2 : public ISource -{ -public: - KafkaSource2( - StorageKafka2 & storage_, - const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, - const Names & columns, - Poco::Logger * log_, - size_t max_block_size_, - bool commit_in_suffix = false); - ~KafkaSource2() override; - - String getName() const override { return storage.getName(); } - - Chunk generate() override; - - void commit(); - bool isStalled() const { return !consumer || consumer->isStalled(); } - - void setTimeLimit(Poco::Timespan max_execution_time_) { max_execution_time = max_execution_time_; } - -private: - StorageKafka2 & storage; - StorageSnapshotPtr storage_snapshot; - ContextPtr context; - Names column_names; - Poco::Logger * log; - UInt64 max_block_size; - - KafkaConsumer2Ptr consumer; - bool broken = true; - bool is_finished = false; - bool commit_in_suffix; - - const Block non_virtual_header; - const Block virtual_header; - const HandleKafkaErrorMode handle_error_mode; - - Poco::Timespan max_execution_time = 0; - Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; - - bool checkTimeLimit() const; - - Chunk generateImpl(); -}; - -} diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 7cb2a38067a..a1e7a81b792 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -1,5 +1,4 @@ #include -#include #include #include @@ -9,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -18,39 +18,47 @@ #include #include #include +#include +#include #include #include +#include #include #include -#include #include +#include #include #include #include #include #include -#include -#include -#include -#include #include +#include #include #include +#include +#include +#include +#include #include #include #include #include #include - -#include "Storages/ColumnDefault.h" +#include "Storages/Kafka/KafkaConsumer2.h" #include "config_version.h" -#include -#include #if USE_KRB5 # include #endif // USE_KRB5 +#include +#include +#include +#include + +#include + namespace CurrentMetrics { extern const Metric KafkaBackgroundReads; @@ -62,6 +70,10 @@ namespace ProfileEvents { extern const Event KafkaDirectReads; extern const Event KafkaBackgroundReads; +extern const Event KafkaMessagesRead; +extern const Event KafkaMessagesFailed; +extern const Event KafkaRowsRead; +extern const Event KafkaRowsRejected; extern const Event KafkaWrites; } @@ -78,9 +90,11 @@ namespace ErrorCodes namespace { - const auto RESCHEDULE_MS = 500; - const auto CLEANUP_TIMEOUT_MS = 3000; - const auto MAX_THREAD_WORK_DURATION_MS = 60000; // once per minute leave do reschedule (we can't lock threads in pool forever) + constexpr auto RESCHEDULE_MS = 500; + // const auto CLEANUP_TIMEOUT_MS = 3000; + constexpr auto MAX_THREAD_WORK_DURATION_MS = 60000; // once per minute leave do reschedule (we can't lock threads in pool forever) + + constexpr auto MAX_FAILED_POLL_ATTEMPTS = 10; } StorageKafka2::StorageKafka2( @@ -91,6 +105,7 @@ StorageKafka2::StorageKafka2( const String & collection_name_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) + , keeper(getContext()->getZooKeeper()) , kafka_settings(std::move(kafka_settings_)) , macros_info{.table_id = table_id_} , topics(parseTopics(getContext()->getMacros()->expand(kafka_settings->kafka_topic_list.value, macros_info))) @@ -111,6 +126,12 @@ StorageKafka2::StorageKafka2( , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) , collection_name(collection_name_) { + if (kafka_settings->kafka_num_consumers != 1) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Multiple consumers not yet implemented!"); + + if (thread_per_consumer) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "The new Kafka storage cannot use multiple threads yet!"); + if (kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM) { kafka_settings->input_format_allow_errors_num = 0; @@ -119,6 +140,7 @@ StorageKafka2::StorageKafka2( StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); setInMemoryMetadata(storage_metadata); + auto task_count = thread_per_consumer ? num_consumers : 1; for (size_t i = 0; i < task_count; ++i) { @@ -178,44 +200,15 @@ String StorageKafka2::getDefaultClientId(const StorageID & table_id_) Pipe StorageKafka2::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, + const Names & /*column_names */, + const StorageSnapshotPtr & /* storage_snapshot */, SelectQueryInfo & /* query_info */, - ContextPtr local_context, + ContextPtr /* local_context */, QueryProcessingStage::Enum /* processed_stage */, size_t /* max_block_size */, size_t /* num_streams */) { - if (num_created_consumers == 0) - return {}; - - if (!local_context->getSettingsRef().stream_like_engine_allow_direct_select) - throw Exception( - ErrorCodes::QUERY_NOT_ALLOWED, "Direct select is not allowed. To enable use setting `stream_like_engine_allow_direct_select`"); - - if (mv_attached) - throw Exception(ErrorCodes::QUERY_NOT_ALLOWED, "Cannot read from StorageKafka with attached materialized views"); - - ProfileEvents::increment(ProfileEvents::KafkaDirectReads); - - /// Always use all consumers at once, otherwise SELECT may not read messages from all partitions. - Pipes pipes; - pipes.reserve(num_created_consumers); - auto modified_context = Context::createCopy(local_context); - modified_context->applySettingsChanges(settings_adjustments); - - // Claim as many consumers as requested, but don't block - for (size_t i = 0; i < num_created_consumers; ++i) - { - /// Use block size of 1, otherwise LIMIT won't work properly as it will buffer excess messages in the last block - /// TODO: probably that leads to awful performance. - /// FIXME: seems that doesn't help with extra reading and committing unprocessed messages. - pipes.emplace_back(std::make_shared( - *this, storage_snapshot, modified_context, column_names, log, 1, kafka_settings->kafka_commit_on_select)); - } - - LOG_DEBUG(log, "Starting reading {} streams", pipes.size()); - return Pipe::unitePipes(std::move(pipes)); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "You cannot read from the new Kafka storage!"); } @@ -260,7 +253,7 @@ void StorageKafka2::startup() { try { - pushConsumer(createConsumer(i)); + consumers.push_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i)}); ++num_created_consumers; } catch (const cppkafka::Exception &) @@ -269,6 +262,7 @@ void StorageKafka2::startup() } } + createKeeperNodes(consumers.front().consumer); // Start the reader thread for (auto & task : tasks) { @@ -289,49 +283,15 @@ void StorageKafka2::shutdown() } LOG_TRACE(log, "Closing consumers"); - for (size_t i = 0; i < num_created_consumers; ++i) - auto consumer = popConsumer(); + consumers.clear(); LOG_TRACE(log, "Consumers closed"); - - rd_kafka_wait_destroyed(CLEANUP_TIMEOUT_MS); } - -void StorageKafka2::pushConsumer(KafkaConsumer2Ptr consumer) +void StorageKafka2::drop() { - std::lock_guard lock(mutex); - consumers.push_back(consumer); - semaphore.set(); - CurrentMetrics::sub(CurrentMetrics::KafkaConsumersInUse, 1); + getZooKeeper().removeRecursive(kafka_settings->kafka_keeper_path); } - -KafkaConsumer2Ptr StorageKafka2::popConsumer() -{ - return popConsumer(std::chrono::milliseconds::zero()); -} - - -KafkaConsumer2Ptr StorageKafka2::popConsumer(std::chrono::milliseconds timeout) -{ - // Wait for the first free buffer - if (timeout == std::chrono::milliseconds::zero()) - semaphore.wait(); - else - { - if (!semaphore.tryWait(timeout.count())) - return nullptr; - } - - // Take the first available buffer from the list - std::lock_guard lock(mutex); - auto consumer = consumers.back(); - consumers.pop_back(); - CurrentMetrics::add(CurrentMetrics::KafkaConsumersInUse, 1); - return consumer; -} - - KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) { cppkafka::Configuration conf; @@ -350,9 +310,7 @@ KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) conf.set("client.software.version", VERSION_DESCRIBE); conf.set("auto.offset.reset", "earliest"); // If no offset stored for this group, read all messages from the start - // that allows to prevent fast draining of the librdkafka queue - // during building of single insert block. Improves performance - // significantly, but may lead to bigger memory consumption. + // that allows to prevent fast draining of the librdkafka queue during building of single insert block. Improves performance significantly, but may lead to bigger memory consumption. size_t default_queued_min_messages = 100000; // we don't want to decrease the default conf.set("queued.min.messages", std::max(getMaxBlockSize(), default_queued_min_messages)); @@ -370,10 +328,12 @@ KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) /// NOTE: we pass |stream_cancelled| by reference here, so the buffers should not outlive the storage. if (thread_per_consumer) { + // call subscribe; auto & stream_cancelled = tasks[consumer_number]->stream_cancelled; return std::make_shared( consumer_impl, log, getPollMaxBatchSize(), getPollTimeoutMillisecond(), intermediate_commit, stream_cancelled, topics); } + return std::make_shared( consumer_impl, log, @@ -487,16 +447,16 @@ void StorageKafka2::updateConfiguration(cppkafka::Configuration & kafka_config) // No need to add any prefix, messages can be distinguished kafka_config.set_log_callback( - [this](cppkafka::KafkaHandleBase &, int level, const std::string & facility, const std::string & message) + [this](cppkafka::KafkaHandleBase &, int /*level*/, const std::string & facility, const std::string & message) { - auto [poco_level, client_logs_level] = parseSyslogLevel(level); + auto [poco_level, client_logs_level] = parseSyslogLevel(1); LOG_IMPL(log, client_logs_level, poco_level, "[rdk:{}] {}", facility, message); }); // Configure interceptor to change thread name // // TODO: add interceptors support into the cppkafka. - // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibliity overrides it to noop. + // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatiblity overrides it to noop. { // This should be safe, since we wait the rdkafka object anyway. void * self = static_cast(this); @@ -542,6 +502,343 @@ bool StorageKafka2::checkDependencies(const StorageID & table_id) return true; } +namespace +{ + const std::string lock_file_name{"lock"}; + const std::string commit_file_name{"committed"}; + const std::string intent_file_name{"intention"}; + + std::optional getNumber(zkutil::ZooKeeper & keeper, const std::string & path) + { + std::string result; + if (!keeper.tryGet(path, result)) + return std::nullopt; + + return DB::parse(result); + } +} + +void StorageKafka2::createKeeperNodes(const KafkaConsumer2Ptr & consumer) +{ + // TODO(antaljanosbenjamin): check config with other StorageKafkas + const auto & keeper_path = kafka_settings->kafka_keeper_path.value; + + auto & keeper_ref = getZooKeeper(); + + if (keeper_ref.exists(keeper_path)) + { + return; + } + + keeper_ref.createAncestors(keeper_path); + Coordination::Requests ops; + ops.emplace_back(zkutil::makeCreateRequest(keeper_path, "", zkutil::CreateMode::Persistent)); + + ops.emplace_back(zkutil::makeCreateRequest(keeper_path + "/topics", "", zkutil::CreateMode::Persistent)); + + const auto topics_prefix = keeper_path + "/topics/"; + + const auto topic_partition_counts = consumer->getPartitionCounts(); + for (const auto & topic_partition_count : topic_partition_counts) + { + ops.emplace_back(zkutil::makeCreateRequest(topics_prefix + topic_partition_count.topic, "", zkutil::CreateMode::Persistent)); + ops.emplace_back( + zkutil::makeCreateRequest(topics_prefix + topic_partition_count.topic + "/partitions", "", zkutil::CreateMode::Persistent)); + const auto partitions_prefix = topics_prefix + topic_partition_count.topic + "/partitions/"; + // TODO(antaljanosbenjamin): handle changing number of partitions + for (auto partition_id{0U}; partition_id < topic_partition_count.partition_count; ++partition_id) + ops.emplace_back(zkutil::makeCreateRequest(partitions_prefix + toString(partition_id), "", zkutil::CreateMode::Persistent)); + } + + + Coordination::Responses responses; + const auto code = keeper_ref.tryMulti(ops, responses); + if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS) + { + zkutil::KeeperMultiException::check(code, ops, responses); + } +} + +std::optional StorageKafka2::lockTopicPartitions(const TopicPartitions & topic_partitions) +{ + // TODO(antaljanosbenjamin): Review this function with somebody who know keeper better than me + const auto uuid_as_string = toString(uuid); + + std::vector topic_partition_paths; + topic_partition_paths.reserve(topic_partitions.size()); + for (const auto & topic_partition : topic_partitions) + { + topic_partition_paths.emplace_back(getTopicPartitionPath(topic_partition)); + } + + Coordination::Requests ops; + + // for (const auto & topic_partition_path : topic_partition_paths) + // ops.push_back(zkutil::makeCheckRequest(topic_partition_path + lock_file_name, -1)); + + for (const auto & topic_partition_path : topic_partition_paths) + ops.push_back(zkutil::makeCreateRequest(topic_partition_path + lock_file_name, uuid_as_string, zkutil::CreateMode::Ephemeral)); + + bool success = false; + for (auto try_count{0}; try_count < 10; ++try_count) + { + Coordination::Responses responses; + // TODO(antaljanosbenjamin): this can go wrong if we start a new session simultaneously from multiple threads. + auto & keeper_ref = getZooKeeper(); + + if (const auto code = keeper_ref.tryMulti(ops, responses); code == Coordination::Error::ZOK) + { + success = true; + break; + } + else + { + zkutil::KeeperMultiException::check(code, ops, responses); + } + + // TODO(antaljanosbenjamin): Probably handle the most common errors + for (const auto & topic_partition_path : topic_partition_paths) + keeper_ref.waitForDisappear(topic_partition_path + lock_file_name); + } + + if (!success) + return std::nullopt; + + + // We have the locks + TopicPartitionLocks locks; + { + auto & keeper_ref = getZooKeeper(); + auto tp_it = topic_partitions.begin(); + auto path_it = topic_partition_paths.begin(); + for (; tp_it != topic_partitions.end(); ++tp_it, ++path_it) + { + using zkutil::EphemeralNodeHolder; + LockedTopicPartitionInfo lock_info{.lock = EphemeralNodeHolder::existing(*path_it + lock_file_name, keeper_ref)}; + + lock_info.committed_offset = getNumber(keeper_ref, *path_it + commit_file_name); + lock_info.intent_size = getNumber(keeper_ref, *path_it + intent_file_name); + + + locks.emplace(TopicPartition(*tp_it), std::move(lock_info)); + } + } + + return locks; +} + + +void StorageKafka2::saveCommittedOffset(const TopicPartition & topic_partition, int64_t committed_offset) +{ + const auto partition_prefix = getTopicPartitionPath(topic_partition); + auto & keeper_ref = getZooKeeper(); + keeper_ref.createOrUpdate(partition_prefix + commit_file_name, toString(committed_offset), zkutil::CreateMode::Persistent); + // This is best effort, if it fails we will try to remove in the next round + keeper_ref.tryRemove(partition_prefix + intent_file_name, -1); +} + +void StorageKafka2::saveIntent(const TopicPartition & topic_partition, int64_t intent) +{ + getZooKeeper().createOrUpdate( + getTopicPartitionPath(topic_partition) + intent_file_name, toString(intent), zkutil::CreateMode::Persistent); +} + + +StorageKafka2::PolledBatchInfo +StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & topic_partition, const ContextPtr & modified_context) +{ + PolledBatchInfo batch_info; + auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr(), getContext()); + Block non_virtual_header(storage_snapshot->metadata->getSampleBlockNonMaterialized()); + Block virtual_header(storage_snapshot->getSampleBlockForColumns(getVirtualColumnNames())); + + // now it's one-time usage InputStream + // one block of the needed size (or with desired flush timeout) is formed in one internal iteration + // otherwise external iteration will reuse that and logic will became even more fuzzy + MutableColumns virtual_columns = virtual_header.cloneEmptyColumns(); + + auto put_error_to_stream = kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM; + + EmptyReadBuffer empty_buf; + auto input_format = FormatFactory::instance().getInput( + getFormatName(), empty_buf, non_virtual_header, modified_context, getMaxBlockSize(), std::nullopt, 1); + + std::optional exception_message; + size_t total_rows = 0; + size_t failed_poll_attempts = 0; + + auto on_error = [&](const MutableColumns & result_columns, Exception & e) + { + ProfileEvents::increment(ProfileEvents::KafkaMessagesFailed); + + if (put_error_to_stream) + { + exception_message = e.message(); + for (const auto & column : result_columns) + { + // read_kafka_message could already push some rows to result_columns + // before exception, we need to fix it. + auto cur_rows = column->size(); + if (cur_rows > total_rows) + column->popBack(cur_rows - total_rows); + + // all data columns will get default value in case of error + column->insertDefault(); + } + + return 1; + } + else + { + e.addMessage( + "while parsing Kafka message (topic: {}, partition: {}, offset: {})'", + consumer.currentTopic(), + consumer.currentPartition(), + consumer.currentOffset()); + throw std::move(e); + } + }; + + StreamingFormatExecutor executor(non_virtual_header, input_format, std::move(on_error)); + + while (true) + { + size_t new_rows = 0; + exception_message.reset(); + if (auto buf = consumer.consume(topic_partition)) + { + ProfileEvents::increment(ProfileEvents::KafkaMessagesRead); + new_rows = executor.execute(*buf); + } + + if (new_rows) + { + // In read_kafka_message(), KafkaConsumer::nextImpl() + // will be called, that may make something unusable, i.e. clean + // KafkaConsumer::messages, which is accessed from + // KafkaConsumer::currentTopic() (and other helpers). + if (consumer.isStalled()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Polled messages became unusable"); + + ProfileEvents::increment(ProfileEvents::KafkaRowsRead, new_rows); + + const auto & header_list = consumer.currentHeaderList(); + + Array headers_names; + Array headers_values; + + if (!header_list.empty()) + { + headers_names.reserve(header_list.size()); + headers_values.reserve(header_list.size()); + for (const auto & header : header_list) + { + headers_names.emplace_back(header.get_name()); + headers_values.emplace_back(static_cast(header.get_value())); + } + } + + for (size_t i = 0; i < new_rows; ++i) + { + virtual_columns[0]->insert(consumer.currentTopic()); + virtual_columns[1]->insert(consumer.currentKey()); + virtual_columns[2]->insert(consumer.currentOffset()); + virtual_columns[3]->insert(consumer.currentPartition()); + + + auto timestamp_raw = consumer.currentTimestamp(); + if (timestamp_raw) + { + auto ts = timestamp_raw->get_timestamp(); + virtual_columns[4]->insert(std::chrono::duration_cast(ts).count()); + virtual_columns[5]->insert( + DecimalField(std::chrono::duration_cast(ts).count(), 3)); + } + else + { + virtual_columns[4]->insertDefault(); + virtual_columns[5]->insertDefault(); + } + virtual_columns[6]->insert(headers_names); + virtual_columns[7]->insert(headers_values); + if (put_error_to_stream) + { + if (exception_message) + { + virtual_columns[8]->insert(consumer.currentPayload()); + virtual_columns[9]->insert(*exception_message); + } + else + { + virtual_columns[8]->insertDefault(); + virtual_columns[9]->insertDefault(); + } + } + } + + total_rows = total_rows + new_rows; + batch_info.last_offset = consumer.currentOffset(); + } + else if (consumer.polledDataUnusable(topic_partition)) + { + break; + } + else if (consumer.isStalled()) + { + ++failed_poll_attempts; + } + else + { + // We came here in case of tombstone (or sometimes zero-length) messages, and it is not something abnormal + // TODO: it seems like in case of put_error_to_stream=true we may need to process those differently + // currently we just skip them with note in logs. + LOG_DEBUG( + log, + "Parsing of message (topic: {}, partition: {}, offset: {}) return no rows.", + consumer.currentTopic(), + consumer.currentPartition(), + consumer.currentOffset()); + } + + if (!consumer.hasMorePolledMessages() + && (total_rows >= kafka_settings->kafka_max_block_size || /*!checkTimeLimit() + ||*/ + failed_poll_attempts >= MAX_FAILED_POLL_ATTEMPTS)) + { + break; + } + } + + if (total_rows == 0) + { + return {}; + } + else if (consumer.polledDataUnusable(topic_partition)) + { + // the rows were counted already before by KafkaRowsRead, + // so let's count the rows we ignore separately + // (they will be retried after the rebalance) + ProfileEvents::increment(ProfileEvents::KafkaRowsRejected, total_rows); + return {}; + } + + /// MATERIALIZED columns can be added here, but I think + // they are not needed here: + // and it's misleading to use them here, + // as columns 'materialized' that way stays 'ephemeral' + // i.e. will not be stored anythere + // If needed any extra columns can be added using DEFAULT they can be added at MV level if needed. + + auto result_block = non_virtual_header.cloneWithColumns(executor.getResultColumns()); + auto virtual_block = virtual_header.cloneWithColumns(std::move(virtual_columns)); + + for (const auto & column : virtual_block.getColumnsWithTypeAndName()) + result_block.insert(column); + + batch_info.blocks.emplace_back(std::move(result_block)); + return batch_info; +} + void StorageKafka2::threadFunc(size_t idx) { assert(idx < tasks.size()); @@ -566,7 +863,7 @@ void StorageKafka2::threadFunc(size_t idx) LOG_DEBUG(log, "Started streaming to {} attached views", num_views); // Exit the loop & reschedule if some stream stalled - auto some_stream_is_stalled = streamToViews(); + auto some_stream_is_stalled = streamToViews(idx); if (some_stream_is_stalled) { LOG_TRACE(log, "Stream(s) stalled. Reschedule."); @@ -595,9 +892,18 @@ void StorageKafka2::threadFunc(size_t idx) task->holder->scheduleAfter(RESCHEDULE_MS); } - -bool StorageKafka2::streamToViews() +bool StorageKafka2::streamToViews(size_t idx) { + // What to do? + // 1. Select a topic partition to consume from + // 2. Do a casual poll for every other consumer to keep them alive + // 3. Get the necessary data from Keeper + // 4. Get the corresponding consumer + // 5. Pull messages + // 6. Create a BlockList from it + // 7. Execute the pipeline + // 8. Write the offset to Keeper + Stopwatch watch; auto table_id = getStorageID(); @@ -608,14 +914,56 @@ bool StorageKafka2::streamToViews() CurrentMetrics::Increment metric_increment{CurrentMetrics::KafkaBackgroundReads}; ProfileEvents::increment(ProfileEvents::KafkaBackgroundReads); - auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr(), getContext()); + auto & consumer_info = consumers[idx]; + auto & consumer = consumer_info.consumer; + // To keep the consumer alive + + LOG_TRACE(log, "Polling consumer #{} for events", idx); + consumer->pollEvents(); + if (nullptr == consumer->getAssignment()) + return true; + + + LOG_TRACE(log, "Consumer #{} has assignment", idx); + + if (consumer->needsOffsetUpdate()) + { + LOG_TRACE(log, "Consumer #{} needs update offset", idx); + consumer_info.consume_from_topic_partition_index = 0; + + consumer_info.locks.clear(); + consumer_info.topic_partitions.clear(); + + if (const auto * current_assignment = consumer->getAssignment(); nullptr != current_assignment) + { + auto maybe_locks = lockTopicPartitions(*current_assignment); + + if (!maybe_locks.has_value()) + { + // TODO(antaljanosbenjamin): signal this somehow to caller, maybe wait a bit longer. + return true; + } + + consumer_info.locks = std::move(*maybe_locks); + + consumer_info.topic_partitions.reserve(current_assignment->size()); + for (const auto& topic_partition : *current_assignment) { + TopicPartition topic_partition_copy{topic_partition}; + if( const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; maybe_committed_offset.has_value()) + topic_partition_copy.offset = *maybe_committed_offset + 1; + else + topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; + consumer_info.topic_partitions.push_back(std::move(topic_partition_copy)); + + } + } + consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); + } // Create an INSERT query for streaming data auto insert = std::make_shared(); insert->table_id = table_id; - size_t block_size = getMaxBlockSize(); - auto kafka_context = Context::createCopy(getContext()); kafka_context->makeQueryContext(); kafka_context->applySettingsChanges(settings_adjustments); @@ -625,59 +973,80 @@ bool StorageKafka2::streamToViews() InterpreterInsertQuery interpreter(insert, kafka_context, false, true, true); auto block_io = interpreter.execute(); - // Create a stream for each consumer and join them in a union stream - std::vector> sources; - Pipes pipes; + auto & topic_partition = consumer_info.topic_partitions[consumer_info.consume_from_topic_partition_index]; + LOG_TRACE( + log, + "Consumer #{} will fetch {}:{} (consume_from_topic_partition_index is {})", + idx, + topic_partition.topic, + topic_partition.partition_id, + consumer_info.consume_from_topic_partition_index); + consumer_info.consume_from_topic_partition_index + = (consumer_info.consume_from_topic_partition_index + 1) % consumer_info.topic_partitions.size(); - auto stream_count = thread_per_consumer ? 1 : num_created_consumers; - sources.reserve(stream_count); - pipes.reserve(stream_count); - for (size_t i = 0; i < stream_count; ++i) + auto [blocks, last_offset] = pollConsumer(*consumer_info.consumer, topic_partition, kafka_context); + + if (blocks.empty()) { - auto source = std::make_shared( - *this, storage_snapshot, kafka_context, block_io.pipeline.getHeader().getNames(), log, block_size, false); - sources.emplace_back(source); - pipes.emplace_back(source); - - // Limit read batch to maximum block size to allow DDL - StreamLocalLimits limits; - - Poco::Timespan max_execution_time = kafka_settings->kafka_flush_interval_ms.changed - ? kafka_settings->kafka_flush_interval_ms - : getContext()->getSettingsRef().stream_flush_interval_ms; - - source->setTimeLimit(max_execution_time); + LOG_TRACE(log, "Consumer #{} didn't get any messages", idx); + return true; } - auto pipe = Pipe::unitePipes(std::move(pipes)); + + auto converting_dag = ActionsDAG::makeConvertingActions( + blocks.front().cloneEmpty().getColumnsWithTypeAndName(), + block_io.pipeline.getHeader().getColumnsWithTypeAndName(), + ActionsDAG::MatchColumnsMode::Name); + + auto converting_actions = std::make_shared(std::move(converting_dag)); + + for (auto & block : blocks) + { + converting_actions->execute(block); + } // We can't cancel during copyData, as it's not aware of commits and other kafka-related stuff. // It will be cancelled on underlying layer (kafka buffer) + auto & lock_info = consumer_info.locks.at(topic_partition); + const auto intent = lock_info.committed_offset.value_or(0); + saveIntent(topic_partition, intent); std::atomic_size_t rows = 0; { - block_io.pipeline.complete(std::move(pipe)); - - // we need to read all consumers in parallel (sequential read may lead to situation - // when some of consumers are not used, and will break some Kafka consumer invariants) - block_io.pipeline.setNumThreads(stream_count); + block_io.pipeline.complete(Pipe{std::make_shared(std::move(blocks))}); block_io.pipeline.setProgressCallback([&](const Progress & progress) { rows += progress.read_rows.load(); }); CompletedPipelineExecutor executor(block_io.pipeline); executor.execute(); } - bool some_stream_is_stalled = false; - for (auto & source : sources) - { - some_stream_is_stalled = some_stream_is_stalled || source->isStalled(); - source->commit(); - } + saveCommittedOffset(topic_partition, last_offset); + lock_info.intent_size = intent; + lock_info.committed_offset = last_offset; + topic_partition.offset = last_offset; UInt64 milliseconds = watch.elapsedMilliseconds(); LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(rows), table_id.getNameForLogs(), milliseconds); - return some_stream_is_stalled; + return false; +} + + +zkutil::ZooKeeper & StorageKafka2::getZooKeeper() +{ + if (keeper->expired()) + { + keeper = keeper->startNewSession(); + //TODO(antaljanosbenjamin): handle ephemeral nodes + } + return *keeper; +} + + +std::string StorageKafka2::getTopicPartitionPath(const TopicPartition & topic_partition) +{ + return kafka_settings->kafka_keeper_path.value + "/topics/" + topic_partition.topic + "/partitions/" + + std::to_string(topic_partition.partition_id) + '/'; } NamesAndTypesList StorageKafka2::getVirtuals() const diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index d0bc5cc78b7..89d5019bd4b 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -1,17 +1,22 @@ #pragma once -#include #include +#include +#include #include #include #include +#include #include +#include +#include "Core/Block.h" #include -#include -#include #include +#include +#include +#include namespace cppkafka { @@ -51,6 +56,8 @@ public: void startup() override; void shutdown() override; + void drop() override; + Pipe read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, @@ -60,19 +67,12 @@ public: size_t max_block_size, size_t num_streams) override; - SinkToStoragePtr write( - const ASTPtr & query, - const StorageMetadataPtr & /*metadata_snapshot*/, - ContextPtr context, - bool async_insert) override; + SinkToStoragePtr + write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context, bool async_insert) override; /// We want to control the number of rows in a chunk inserted into Kafka bool prefersLargeBlocks() const override { return false; } - void pushConsumer(KafkaConsumer2Ptr consumer); - KafkaConsumer2Ptr popConsumer(); - KafkaConsumer2Ptr popConsumer(std::chrono::milliseconds timeout); - const auto & getFormatName() const { return format_name; } NamesAndTypesList getVirtuals() const override; @@ -81,6 +81,7 @@ public: private: // Configuration and state + zkutil::ZooKeeperPtr keeper; std::unique_ptr kafka_settings; Macros::MacroExpansionInfo macros_info; const Names topics; @@ -102,18 +103,45 @@ private: /// In this case we still need to be able to shutdown() properly. size_t num_created_consumers = 0; /// number of actually created consumers. - std::vector consumers; /// available consumers + using TopicPartition = KafkaConsumer2::TopicPartition; + using TopicPartitions = KafkaConsumer2::TopicPartitions; - std::mutex mutex; + struct LockedTopicPartitionInfo + { + zkutil::EphemeralNodeHolderPtr lock; + std::optional committed_offset; + std::optional intent_size; + }; + + using TopicPartitionLocks = std::unordered_map< + TopicPartition, + LockedTopicPartitionInfo, + KafkaConsumer2::OnlyTopicNameAndPartitionIdHash, + KafkaConsumer2::OnlyTopicNameAndPartitionIdEquality>; + + struct ConsumerAndAssignmentInfo + { + KafkaConsumer2Ptr consumer; /// available consumers + size_t consume_from_topic_partition_index{0}; + TopicPartitions topic_partitions; + // TODO(antaljanosbenjamin): maybe recreate the ephemeral node + TopicPartitionLocks locks; + }; + + struct PolledBatchInfo + { + BlocksList blocks; + int64_t last_offset; + }; + + std::vector consumers; // Stream thread struct TaskContext { BackgroundSchedulePool::TaskHolder holder; - std::atomic stream_cancelled {false}; - explicit TaskContext(BackgroundSchedulePool::TaskHolder&& task_) : holder(std::move(task_)) - { - } + std::atomic stream_cancelled{false}; + explicit TaskContext(BackgroundSchedulePool::TaskHolder && task_) : holder(std::move(task_)) { } }; std::vector> tasks; bool thread_per_consumer = false; @@ -129,6 +157,7 @@ private: String collection_name; std::atomic shutdown_called = false; + UUID uuid{UUIDHelpers::generateV4()}; // Update Kafka configuration with values from CH user configuration. void updateConfiguration(cppkafka::Configuration & kafka_config); @@ -142,8 +171,21 @@ private: static Names parseTopics(String topic_list); static String getDefaultClientId(const StorageID & table_id_); - bool streamToViews(); + bool streamToViews(size_t idx); bool checkDependencies(const StorageID & table_id); + + // Takes lock over topic partitions and set's the committed offset in topic_partitions + void createKeeperNodes(const KafkaConsumer2Ptr & consumer); + + std::optional lockTopicPartitions(const TopicPartitions & topic_partitions); + void saveCommittedOffset(const TopicPartition & topic_partition, int64_t committed_offset); + void saveIntent(const TopicPartition & topic_partition, int64_t intent); + + PolledBatchInfo pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & topic_partition, const ContextPtr & context); + + zkutil::ZooKeeper& getZooKeeper(); + + std::string getTopicPartitionPath(const TopicPartition& topic_partition ); }; } diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 3a35272ac74..0df4faa9030 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -356,7 +356,7 @@ void registerStorageKafka(StorageFactory & factory) "See https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/#configuration"); } - if (kafka_settings->kafka_keeper_path.changed) + if (!kafka_settings->kafka_keeper_path.value.empty()) { if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper) From 5c635d5210b81d313acd9d68c6da55367167230a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 4 Sep 2023 12:19:03 +0000 Subject: [PATCH 0013/2361] Clarify how assignment is handled in consumer --- src/Storages/Kafka/KafkaConsumer2.cpp | 11 +++++------ src/Storages/Kafka/KafkaConsumer2.h | 22 ++++++++++------------ src/Storages/Kafka/StorageKafka2.cpp | 4 ++-- 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 61b6f801e9e..a7911f1c2d1 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -99,9 +99,9 @@ KafkaConsumer2::KafkaConsumer2( { assignment->push_back(TopicPartition{topic_partition.get_topic(), topic_partition.get_partition(), INVALID_OFFSET}); } - std::sort(assignment->begin(), assignment->end()); - updateOffsets(topic_partitions); + // We need to initialize the queues here in order to detach them from the consumer queue. Otherwise `pollEvents` might eventually poll actual messages also. + initializeQueues(topic_partitions); }); // called (synchronously, during poll) when we leave the consumer group @@ -272,7 +272,7 @@ bool KafkaConsumer2::polledDataUnusable(const TopicPartition & topic_partition) return consumer_in_wrong_state || different_topic_partition; } -KafkaConsumer2::TopicPartitions const * KafkaConsumer2::getAssignment() const +KafkaConsumer2::TopicPartitions const * KafkaConsumer2::getKafkaAssignment() const { if (assignment.has_value()) { @@ -284,7 +284,6 @@ KafkaConsumer2::TopicPartitions const * KafkaConsumer2::getAssignment() const void KafkaConsumer2::updateOffsets(const TopicPartitions & topic_partitions) { - // TODO(antaljanosbenjamin): Make sure topic_partitions and assignment is in sync. cppkafka::TopicPartitionList original_topic_partitions; original_topic_partitions.reserve(topic_partitions.size()); std::transform( @@ -294,12 +293,12 @@ void KafkaConsumer2::updateOffsets(const TopicPartitions & topic_partitions) [](const TopicPartition & tp) { return cppkafka::TopicPartition{tp.topic, tp.partition_id, tp.offset}; }); - updateOffsets(original_topic_partitions); + initializeQueues(original_topic_partitions); needs_offset_update = false; stalled_status = StalledStatus::NOT_STALLED; } -void KafkaConsumer2::updateOffsets(const cppkafka::TopicPartitionList & topic_partitions) +void KafkaConsumer2::initializeQueues(const cppkafka::TopicPartitionList & topic_partitions) { queues.clear(); // cppkafka itself calls assign(), but in order to detach the queues here we have to do the assignment manually. Later on we have to reassign the topic partitions with correct offsets. diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index 3341dc2c42f..5b0f7fca465 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -14,12 +14,12 @@ namespace CurrentMetrics { - extern const Metric KafkaConsumers; +extern const Metric KafkaConsumers; } namespace Poco { - class Logger; +class Logger; } namespace DB @@ -40,8 +40,8 @@ public: int32_t partition_id; int64_t offset{INVALID_OFFSET}; - bool operator==(const TopicPartition&) const = default; - bool operator<(const TopicPartition& other) const; + bool operator==(const TopicPartition &) const = default; + bool operator<(const TopicPartition & other) const; }; using TopicPartitions = std::vector; @@ -85,8 +85,7 @@ public: size_t poll_timeout_, bool intermediate_commit_, const std::atomic & stopped_, - const Names & _topics - ); + const Names & _topics); ~KafkaConsumer2(); @@ -96,16 +95,15 @@ public: auto pollTimeout() const { return poll_timeout; } - inline bool hasMorePolledMessages() const - { - return (stalled_status == StalledStatus::NOT_STALLED) && (current != messages.end()); - } + inline bool hasMorePolledMessages() const { return (stalled_status == StalledStatus::NOT_STALLED) && (current != messages.end()); } inline bool isStalled() const { return stalled_status != StalledStatus::NOT_STALLED; } bool polledDataUnusable(const TopicPartition & topic_partition) const; - TopicPartitions const * getAssignment() const; + // Returns the topic partitions that the consumer got from rebalancing the consumer group. If the consumer received + // no topic partitions or all of them were revoked, it returns a null pointer. + TopicPartitions const * getKafkaAssignment() const; // As the main source of offsets is not Kafka, the offsets needs to pushed to the consumer from outside bool needsOffsetUpdate() const { return needs_offset_update; } @@ -171,7 +169,7 @@ private: size_t filterMessageErrors(); ReadBufferPtr getNextMessage(); - void updateOffsets(const cppkafka::TopicPartitionList & topic_partitions); + void initializeQueues(const cppkafka::TopicPartitionList & topic_partitions); }; } diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index a1e7a81b792..c01337c6305 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -920,7 +920,7 @@ bool StorageKafka2::streamToViews(size_t idx) LOG_TRACE(log, "Polling consumer #{} for events", idx); consumer->pollEvents(); - if (nullptr == consumer->getAssignment()) + if (nullptr == consumer->getKafkaAssignment()) return true; @@ -934,7 +934,7 @@ bool StorageKafka2::streamToViews(size_t idx) consumer_info.locks.clear(); consumer_info.topic_partitions.clear(); - if (const auto * current_assignment = consumer->getAssignment(); nullptr != current_assignment) + if (const auto * current_assignment = consumer->getKafkaAssignment(); nullptr != current_assignment) { auto maybe_locks = lockTopicPartitions(*current_assignment); From 30d5b93d58025629545ccb1d7e5d9559862828b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 4 Sep 2023 12:48:27 +0000 Subject: [PATCH 0014/2361] Remove outdated TODO --- src/Storages/Kafka/KafkaConsumer2.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index a7911f1c2d1..1032a5c8a5f 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -108,7 +108,6 @@ KafkaConsumer2::KafkaConsumer2( consumer->set_revocation_callback( [this](const cppkafka::TopicPartitionList & topic_partitions) { - // TODO(antaljanosbenjamin): deal with revocation CurrentMetrics::sub(CurrentMetrics::KafkaAssignedPartitions, topic_partitions.size()); ProfileEvents::increment(ProfileEvents::KafkaRebalanceRevocations); From 06160d2423de9ca59dbc5f8d26e3de26cfebc308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 4 Sep 2023 13:19:32 +0000 Subject: [PATCH 0015/2361] Do not erase already fetched messages on rebalance --- src/Storages/Kafka/KafkaConsumer2.cpp | 48 ++++--------------------- src/Storages/Kafka/KafkaConsumer2.h | 8 ++--- src/Storages/Kafka/StorageKafka2.cpp | 52 ++++++++++++++------------- 3 files changed, 35 insertions(+), 73 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 1032a5c8a5f..55f65f3ce74 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -120,30 +120,10 @@ KafkaConsumer2::KafkaConsumer2( CurrentMetrics::sub(CurrentMetrics::KafkaConsumersWithAssignment, 1); } - // we can not flush data to target from that point (it is pulled, not pushed) - // so the best we can now it to - // 1) repeat last commit in sync mode (async could be still in queue, we need to be sure is is properly committed before rebalance) - // 2) stop / brake the current reading: - // * clean buffered non-commited messages - // * set flag / flush - - cleanUnprocessed(); - - stalled_status = StalledStatus::REBALANCE_HAPPENED; assignment.reset(); queues.clear(); needs_offset_update = true; waited_for_assignment = 0; - - // for now we use slower (but reliable) sync commit in main loop, so no need to repeat - // try - // { - // consumer->commit(); - // } - // catch (cppkafka::HandleException & e) - // { - // LOG_WARNING(log, "Commit error: {}", e.what()); - // } }); consumer->set_rebalance_error_callback( @@ -170,7 +150,7 @@ KafkaConsumer2::~KafkaConsumer2() { LOG_ERROR(log, "Error during unsubscribe: {}", e.what()); } - drain(); + drainConsumerQueue(); } } catch (const cppkafka::HandleException & e) @@ -179,18 +159,15 @@ KafkaConsumer2::~KafkaConsumer2() } } -// Needed to drain rest of the messages / queued callback calls from the consumer -// after unsubscribe, otherwise consumer will hang on destruction +// Needed to drain rest of the messages / queued callback calls from the consumer after unsubscribe, otherwise consumer +// will hang on destruction. Partition queues doesn't have to be attached as events are not handled by those queues. // see https://github.com/edenhill/librdkafka/issues/2077 // https://github.com/confluentinc/confluent-kafka-go/issues/189 etc. -void KafkaConsumer2::drain() +void KafkaConsumer2::drainConsumerQueue() { auto start_time = std::chrono::steady_clock::now(); cppkafka::Error last_error(RD_KAFKA_RESP_ERR_NO_ERROR); - for (auto & [tp, queue] : queues) - queue.forward_to_queue(consumer->get_consumer_queue()); - while (true) { auto msg = consumer->poll(100ms); @@ -224,13 +201,6 @@ void KafkaConsumer2::drain() } } -void KafkaConsumer2::cleanUnprocessed() -{ - messages.clear(); - current = messages.begin(); - offsets_stored = 0; -} - void KafkaConsumer2::pollEvents() { // All the partition queues are detached, so the consumer shouldn't be able to poll any messages @@ -263,12 +233,10 @@ KafkaConsumer2::TopicPartitionCounts KafkaConsumer2::getPartitionCounts() const bool KafkaConsumer2::polledDataUnusable(const TopicPartition & topic_partition) const { - const auto consumer_in_wrong_state - = (stalled_status != StalledStatus::NOT_STALLED) && (stalled_status != StalledStatus::NO_MESSAGES_RETURNED); const auto different_topic_partition = current == messages.end() ? false : (current->get_topic() != topic_partition.topic || current->get_partition() != topic_partition.partition_id); - return consumer_in_wrong_state || different_topic_partition; + return different_topic_partition; } KafkaConsumer2::TopicPartitions const * KafkaConsumer2::getKafkaAssignment() const @@ -321,6 +289,7 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition) return getNextMessage(); + // TODO(antaljanosbenjamin): check if we should poll new messages or not while (true) { stalled_status = StalledStatus::NO_MESSAGES_RETURNED; @@ -346,10 +315,6 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition) { return nullptr; } - else if (stalled_status == StalledStatus::REBALANCE_HAPPENED) - { - return nullptr; - } if (new_messages.empty()) { @@ -452,7 +417,6 @@ void KafkaConsumer2::resetIfStopped() if (stopped) { stalled_status = StalledStatus::CONSUMER_STOPPED; - cleanUnprocessed(); } } } diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index 5b0f7fca465..d5351af1bd2 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -99,8 +99,6 @@ public: inline bool isStalled() const { return stalled_status != StalledStatus::NOT_STALLED; } - bool polledDataUnusable(const TopicPartition & topic_partition) const; - // Returns the topic partitions that the consumer got from rebalancing the consumer group. If the consumer received // no topic partitions or all of them were revoked, it returns a null pointer. TopicPartitions const * getKafkaAssignment() const; @@ -133,7 +131,6 @@ private: { NOT_STALLED, NO_MESSAGES_RETURNED, - REBALANCE_HAPPENED, CONSUMER_STOPPED, NO_ASSIGNMENT, ERRORS_RETURNED @@ -143,7 +140,6 @@ private: Poco::Logger * log; const size_t batch_size = 1; const size_t poll_timeout = 0; - size_t offsets_stored = 0; StalledStatus stalled_status = StalledStatus::NO_MESSAGES_RETURNED; @@ -162,8 +158,8 @@ private: std::unordered_map queues; const Names topics; - void drain(); - void cleanUnprocessed(); + bool polledDataUnusable(const TopicPartition & topic_partition) const; + void drainConsumerQueue(); void resetIfStopped(); /// Return number of messages with an error. size_t filterMessageErrors(); diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index c01337c6305..a1091d1ac42 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -701,6 +701,26 @@ StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & to StreamingFormatExecutor executor(non_virtual_header, input_format, std::move(on_error)); + + Poco::Timespan max_execution_time = kafka_settings->kafka_flush_interval_ms.changed + ? kafka_settings->kafka_flush_interval_ms + : getContext()->getSettingsRef().stream_flush_interval_ms; + + Stopwatch total_stopwatch{CLOCK_MONOTONIC_COARSE}; + + const auto check_time_limit = [&max_execution_time, &total_stopwatch]() + { + if (max_execution_time != 0) + { + auto elapsed_ns = total_stopwatch.elapsed(); + + if (elapsed_ns > static_cast(max_execution_time.totalMicroseconds()) * 1000) + return false; + } + + return true; + }; + while (true) { size_t new_rows = 0; @@ -713,13 +733,6 @@ StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & to if (new_rows) { - // In read_kafka_message(), KafkaConsumer::nextImpl() - // will be called, that may make something unusable, i.e. clean - // KafkaConsumer::messages, which is accessed from - // KafkaConsumer::currentTopic() (and other helpers). - if (consumer.isStalled()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Polled messages became unusable"); - ProfileEvents::increment(ProfileEvents::KafkaRowsRead, new_rows); const auto & header_list = consumer.currentHeaderList(); @@ -779,10 +792,7 @@ StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & to total_rows = total_rows + new_rows; batch_info.last_offset = consumer.currentOffset(); } - else if (consumer.polledDataUnusable(topic_partition)) - { - break; - } + // TODO(antaljanosbenjamin): think about this when rebalance is happening, because `isStalled()` will return true else if (consumer.isStalled()) { ++failed_poll_attempts; @@ -801,9 +811,8 @@ StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & to } if (!consumer.hasMorePolledMessages() - && (total_rows >= kafka_settings->kafka_max_block_size || /*!checkTimeLimit() - ||*/ - failed_poll_attempts >= MAX_FAILED_POLL_ATTEMPTS)) + && (total_rows >= kafka_settings->kafka_max_block_size || !check_time_limit() + || failed_poll_attempts >= MAX_FAILED_POLL_ATTEMPTS || consumer.needsOffsetUpdate())) { break; } @@ -813,14 +822,6 @@ StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & to { return {}; } - else if (consumer.polledDataUnusable(topic_partition)) - { - // the rows were counted already before by KafkaRowsRead, - // so let's count the rows we ignore separately - // (they will be retried after the rebalance) - ProfileEvents::increment(ProfileEvents::KafkaRowsRejected, total_rows); - return {}; - } /// MATERIALIZED columns can be added here, but I think // they are not needed here: @@ -947,14 +948,15 @@ bool StorageKafka2::streamToViews(size_t idx) consumer_info.locks = std::move(*maybe_locks); consumer_info.topic_partitions.reserve(current_assignment->size()); - for (const auto& topic_partition : *current_assignment) { + for (const auto & topic_partition : *current_assignment) + { TopicPartition topic_partition_copy{topic_partition}; - if( const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; maybe_committed_offset.has_value()) + if (const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; + maybe_committed_offset.has_value()) topic_partition_copy.offset = *maybe_committed_offset + 1; else topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; consumer_info.topic_partitions.push_back(std::move(topic_partition_copy)); - } } consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); From 76e05d7c949eb23543be35ba4992409c8cead865 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 4 Sep 2023 14:48:15 +0000 Subject: [PATCH 0016/2361] Move TODO to the correct place --- src/Storages/Kafka/StorageKafka2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index a1091d1ac42..2f2e390f7b9 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -583,7 +583,6 @@ std::optional StorageKafka2::lockTopicPartit for (auto try_count{0}; try_count < 10; ++try_count) { Coordination::Responses responses; - // TODO(antaljanosbenjamin): this can go wrong if we start a new session simultaneously from multiple threads. auto & keeper_ref = getZooKeeper(); if (const auto code = keeper_ref.tryMulti(ops, responses); code == Coordination::Error::ZOK) @@ -1038,6 +1037,7 @@ zkutil::ZooKeeper & StorageKafka2::getZooKeeper() { if (keeper->expired()) { + // TODO(antaljanosbenjamin): this can go wrong if we start a new session simultaneously from multiple threads. keeper = keeper->startNewSession(); //TODO(antaljanosbenjamin): handle ephemeral nodes } From fee6c0d0de2da883fd22b2713ba3a89836531377 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 4 Sep 2023 14:49:24 +0000 Subject: [PATCH 0017/2361] Handle errors properly --- src/Storages/Kafka/KafkaConsumer2.cpp | 2 ++ src/Storages/Kafka/StorageKafka2.cpp | 32 +++++++++++++++++++-------- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 55f65f3ce74..bcad31e82df 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -268,6 +268,8 @@ void KafkaConsumer2::updateOffsets(const TopicPartitions & topic_partitions) void KafkaConsumer2::initializeQueues(const cppkafka::TopicPartitionList & topic_partitions) { queues.clear(); + messages.clear(); + current = messages.end(); // cppkafka itself calls assign(), but in order to detach the queues here we have to do the assignment manually. Later on we have to reassign the topic partitions with correct offsets. consumer->assign(topic_partitions); for (const auto & topic_partition : topic_partitions) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 2f2e390f7b9..86f6dec71ad 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -618,7 +619,13 @@ std::optional StorageKafka2::lockTopicPartit lock_info.committed_offset = getNumber(keeper_ref, *path_it + commit_file_name); lock_info.intent_size = getNumber(keeper_ref, *path_it + intent_file_name); - + LOG_TRACE( + log, + "Locked topic partition: {}:{} at offset {} with intent size {}", + tp_it->topic, + tp_it->partition_id, + lock_info.committed_offset.value_or(0), + lock_info.intent_size.value_or(0)); locks.emplace(TopicPartition(*tp_it), std::move(lock_info)); } } @@ -985,15 +992,21 @@ bool StorageKafka2::streamToViews(size_t idx) consumer_info.consume_from_topic_partition_index = (consumer_info.consume_from_topic_partition_index + 1) % consumer_info.topic_partitions.size(); - auto [blocks, last_offset] = pollConsumer(*consumer_info.consumer, topic_partition, kafka_context); + bool needs_offset_reset = false; + SCOPE_EXIT({ + if (!needs_offset_reset) + return; + consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); + }); + auto [blocks, last_read_offset] = pollConsumer(*consumer_info.consumer, topic_partition, kafka_context); if (blocks.empty()) { LOG_TRACE(log, "Consumer #{} didn't get any messages", idx); + needs_offset_reset = false; return true; } - auto converting_dag = ActionsDAG::makeConvertingActions( blocks.front().cloneEmpty().getColumnsWithTypeAndName(), block_io.pipeline.getHeader().getColumnsWithTypeAndName(), @@ -1010,8 +1023,8 @@ bool StorageKafka2::streamToViews(size_t idx) // It will be cancelled on underlying layer (kafka buffer) auto & lock_info = consumer_info.locks.at(topic_partition); - const auto intent = lock_info.committed_offset.value_or(0); - saveIntent(topic_partition, intent); + lock_info.intent_size = last_read_offset - lock_info.committed_offset.value_or(0); + saveIntent(topic_partition, *lock_info.intent_size); std::atomic_size_t rows = 0; { block_io.pipeline.complete(Pipe{std::make_shared(std::move(blocks))}); @@ -1021,10 +1034,11 @@ bool StorageKafka2::streamToViews(size_t idx) executor.execute(); } - saveCommittedOffset(topic_partition, last_offset); - lock_info.intent_size = intent; - lock_info.committed_offset = last_offset; - topic_partition.offset = last_offset; + saveCommittedOffset(topic_partition, last_read_offset); + lock_info.intent_size.reset(); + lock_info.committed_offset = last_read_offset; + topic_partition.offset = last_read_offset + 1; + needs_offset_reset = false; UInt64 milliseconds = watch.elapsedMilliseconds(); LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(rows), table_id.getNameForLogs(), milliseconds); From cc3b48e63254a94d404b9ef7f98744c7b18caca7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 18 Sep 2023 12:41:08 +0000 Subject: [PATCH 0018/2361] fix typo --- src/Storages/Kafka/StorageKafkaCommon.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 0df4faa9030..6d9ea13b844 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -175,9 +175,9 @@ void KafkaConfigLoader::loadTopicConfig( /// Read topic name between ... const String kafka_topic_path = config_prefix + "." + tag; - const String kafpa_topic_name_path = kafka_topic_path + "." + String{CONFIG_NAME_TAG}; + const String kafka_topic_name_path = kafka_topic_path + "." + String{CONFIG_NAME_TAG}; - const String topic_name = config.getString(kafpa_topic_name_path); + const String topic_name = config.getString(kafka_topic_name_path); if (topic_name == topic) { /// Found it! Now read the per-topic configuration into cppkafka. From 83a3c463de518be7bf6237a26aee571cd01bd62e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 9 Oct 2023 15:23:18 +0000 Subject: [PATCH 0019/2361] Try to handle keeper session expiry --- src/Storages/Kafka/StorageKafka2.cpp | 209 +++++++++++++++------------ src/Storages/Kafka/StorageKafka2.h | 13 +- 2 files changed, 123 insertions(+), 99 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 86f6dec71ad..5fbab603928 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -84,20 +85,22 @@ namespace DB namespace ErrorCodes { - extern const int NOT_IMPLEMENTED; - extern const int LOGICAL_ERROR; - extern const int QUERY_NOT_ALLOWED; +extern const int NOT_IMPLEMENTED; +extern const int LOGICAL_ERROR; +extern const int QUERY_NOT_ALLOWED; } namespace { - constexpr auto RESCHEDULE_MS = 500; - // const auto CLEANUP_TIMEOUT_MS = 3000; - constexpr auto MAX_THREAD_WORK_DURATION_MS = 60000; // once per minute leave do reschedule (we can't lock threads in pool forever) +constexpr auto RESCHEDULE_MS = 500; +// const auto CLEANUP_TIMEOUT_MS = 3000; +constexpr auto MAX_THREAD_WORK_DURATION_MS = 60000; // once per minute leave do reschedule (we can't lock threads in pool forever) - constexpr auto MAX_FAILED_POLL_ATTEMPTS = 10; +constexpr auto MAX_FAILED_POLL_ATTEMPTS = 10; } +// TODO(antaljanosbenjamin): check performance + StorageKafka2::StorageKafka2( const StorageID & table_id_, ContextPtr context_, @@ -162,14 +165,10 @@ SettingsChanges StorageKafka2::createSettingsAdjustments() } if (!kafka_settings->input_format_allow_errors_ratio.changed) - { kafka_settings->input_format_allow_errors_ratio = 0.; - } if (!kafka_settings->input_format_allow_errors_num.changed) - { kafka_settings->input_format_allow_errors_num = kafka_settings->kafka_skip_broken_messages.value; - } if (!schema_name.empty()) result.emplace_back("format_schema", schema_name); @@ -188,9 +187,7 @@ Names StorageKafka2::parseTopics(String topic_list) Names result; boost::split(result, topic_list, [](char c) { return c == ','; }); for (String & topic : result) - { boost::trim(topic); - } return result; } @@ -254,7 +251,7 @@ void StorageKafka2::startup() { try { - consumers.push_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i)}); + consumers.push_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i), .keeper = keeper}); ++num_created_consumers; } catch (const cppkafka::Exception &) @@ -263,12 +260,21 @@ void StorageKafka2::startup() } } - createKeeperNodes(consumers.front().consumer); + try + { + createKeeperNodes(consumers.front().consumer); + } + catch (const Exception & ex) + { + if (ex.code() == ErrorCodes::LOGICAL_ERROR) + throw; + + tryLogCurrentException(log, __PRETTY_FUNCTION__); + } + // Start the reader thread for (auto & task : tasks) - { task->holder->activateAndSchedule(); - } } @@ -290,7 +296,7 @@ void StorageKafka2::shutdown() void StorageKafka2::drop() { - getZooKeeper().removeRecursive(kafka_settings->kafka_keeper_path); + getZooKeeper()->removeRecursive(kafka_settings->kafka_keeper_path); } KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) @@ -300,13 +306,9 @@ KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) conf.set("metadata.broker.list", brokers); conf.set("group.id", group); if (num_consumers > 1) - { conf.set("client.id", fmt::format("{}-{}", client_id, consumer_number)); - } else - { conf.set("client.id", client_id); - } conf.set("client.software.name", VERSION_NAME); conf.set("client.software.version", VERSION_DESCRIBE); conf.set("auto.offset.reset", "earliest"); // If no offset stored for this group, read all messages from the start @@ -505,33 +507,30 @@ bool StorageKafka2::checkDependencies(const StorageID & table_id) namespace { - const std::string lock_file_name{"lock"}; - const std::string commit_file_name{"committed"}; - const std::string intent_file_name{"intention"}; +const std::string lock_file_name{"lock"}; +const std::string commit_file_name{"committed"}; +const std::string intent_file_name{"intention"}; - std::optional getNumber(zkutil::ZooKeeper & keeper, const std::string & path) - { - std::string result; - if (!keeper.tryGet(path, result)) - return std::nullopt; +std::optional getNumber(zkutil::ZooKeeper & keeper, const std::string & path) +{ + std::string result; + if (!keeper.tryGet(path, result)) + return std::nullopt; - return DB::parse(result); - } + return DB::parse(result); +} } void StorageKafka2::createKeeperNodes(const KafkaConsumer2Ptr & consumer) { // TODO(antaljanosbenjamin): check config with other StorageKafkas + // TODO(antaljanosbenjamin): maybe also create a node in `keeper_path/replicas/` to note that this replica has the table? const auto & keeper_path = kafka_settings->kafka_keeper_path.value; - auto & keeper_ref = getZooKeeper(); - - if (keeper_ref.exists(keeper_path)) - { + if (keeper->exists(keeper_path)) return; - } - keeper_ref.createAncestors(keeper_path); + keeper->createAncestors(keeper_path); Coordination::Requests ops; ops.emplace_back(zkutil::makeCreateRequest(keeper_path, "", zkutil::CreateMode::Persistent)); @@ -553,14 +552,13 @@ void StorageKafka2::createKeeperNodes(const KafkaConsumer2Ptr & consumer) Coordination::Responses responses; - const auto code = keeper_ref.tryMulti(ops, responses); + const auto code = keeper->tryMulti(ops, responses); if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS) - { zkutil::KeeperMultiException::check(code, ops, responses); - } } -std::optional StorageKafka2::lockTopicPartitions(const TopicPartitions & topic_partitions) +std::optional +StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const TopicPartitions & topic_partitions) { // TODO(antaljanosbenjamin): Review this function with somebody who know keeper better than me const auto uuid_as_string = toString(uuid); @@ -568,9 +566,7 @@ std::optional StorageKafka2::lockTopicPartit std::vector topic_partition_paths; topic_partition_paths.reserve(topic_partitions.size()); for (const auto & topic_partition : topic_partitions) - { topic_partition_paths.emplace_back(getTopicPartitionPath(topic_partition)); - } Coordination::Requests ops; @@ -584,21 +580,18 @@ std::optional StorageKafka2::lockTopicPartit for (auto try_count{0}; try_count < 10; ++try_count) { Coordination::Responses responses; - auto & keeper_ref = getZooKeeper(); - if (const auto code = keeper_ref.tryMulti(ops, responses); code == Coordination::Error::ZOK) + if (const auto code = keeper_to_use.tryMulti(ops, responses); code == Coordination::Error::ZOK) { success = true; break; } - else - { + else if (code != Coordination::Error::ZNODEEXISTS) zkutil::KeeperMultiException::check(code, ops, responses); - } - // TODO(antaljanosbenjamin): Probably handle the most common errors + // TODO(antaljanosbenjamin): We shouldn't wait here, but let's give the other consumers to release the locks for (const auto & topic_partition_path : topic_partition_paths) - keeper_ref.waitForDisappear(topic_partition_path + lock_file_name); + keeper_to_use.waitForDisappear(topic_partition_path + lock_file_name); } if (!success) @@ -608,16 +601,15 @@ std::optional StorageKafka2::lockTopicPartit // We have the locks TopicPartitionLocks locks; { - auto & keeper_ref = getZooKeeper(); auto tp_it = topic_partitions.begin(); auto path_it = topic_partition_paths.begin(); for (; tp_it != topic_partitions.end(); ++tp_it, ++path_it) { using zkutil::EphemeralNodeHolder; - LockedTopicPartitionInfo lock_info{.lock = EphemeralNodeHolder::existing(*path_it + lock_file_name, keeper_ref)}; + LockedTopicPartitionInfo lock_info{.lock = EphemeralNodeHolder::existing(*path_it + lock_file_name, keeper_to_use)}; - lock_info.committed_offset = getNumber(keeper_ref, *path_it + commit_file_name); - lock_info.intent_size = getNumber(keeper_ref, *path_it + intent_file_name); + lock_info.committed_offset = getNumber(keeper_to_use, *path_it + commit_file_name); + lock_info.intent_size = getNumber(keeper_to_use, *path_it + intent_file_name); LOG_TRACE( log, @@ -634,18 +626,17 @@ std::optional StorageKafka2::lockTopicPartit } -void StorageKafka2::saveCommittedOffset(const TopicPartition & topic_partition, int64_t committed_offset) +void StorageKafka2::saveCommittedOffset(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, int64_t committed_offset) { const auto partition_prefix = getTopicPartitionPath(topic_partition); - auto & keeper_ref = getZooKeeper(); - keeper_ref.createOrUpdate(partition_prefix + commit_file_name, toString(committed_offset), zkutil::CreateMode::Persistent); + keeper_to_use.createOrUpdate(partition_prefix + commit_file_name, toString(committed_offset), zkutil::CreateMode::Persistent); // This is best effort, if it fails we will try to remove in the next round - keeper_ref.tryRemove(partition_prefix + intent_file_name, -1); + keeper_to_use.tryRemove(partition_prefix + intent_file_name, -1); } -void StorageKafka2::saveIntent(const TopicPartition & topic_partition, int64_t intent) +void StorageKafka2::saveIntent(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, int64_t intent) { - getZooKeeper().createOrUpdate( + keeper_to_use.createOrUpdate( getTopicPartitionPath(topic_partition) + intent_file_name, toString(intent), zkutil::CreateMode::Persistent); } @@ -825,9 +816,7 @@ StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & to } if (total_rows == 0) - { return {}; - } /// MATERIALIZED columns can be added here, but I think // they are not needed here: @@ -922,20 +911,52 @@ bool StorageKafka2::streamToViews(size_t idx) ProfileEvents::increment(ProfileEvents::KafkaBackgroundReads); auto & consumer_info = consumers[idx]; + try + { + LOG_TRACE(log, "Trying to consume from consumer {}", idx); + const auto rows = streamFromConsumer(consumer_info); + if (rows.has_value()) + { + const auto milliseconds = watch.elapsedMilliseconds(); + LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(*rows), table_id.getNameForLogs(), milliseconds); + } + else + { + LOG_DEBUG(log, "Couldn't push any rows"); + return true; + } + } + catch (const zkutil::KeeperException & e) + { + if (Coordination::isHardwareError(e.code)) + { + consumer_info.locks.clear(); + consumer_info.keeper = getZooKeeper(); + } + else + throw; + + // TODO(antaljanosbenjamin): Should we reschedule in case of keeper error? + } + return false; +} + + +std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInfo & consumer_info) +{ auto & consumer = consumer_info.consumer; + auto & keeper_to_use = *consumer_info.keeper; // To keep the consumer alive - LOG_TRACE(log, "Polling consumer #{} for events", idx); + LOG_TRACE(log, "Polling consumer for events"); consumer->pollEvents(); if (nullptr == consumer->getKafkaAssignment()) - return true; + return std::nullopt; + LOG_TRACE(log, "Consumer has assignment"); - - LOG_TRACE(log, "Consumer #{} has assignment", idx); - - if (consumer->needsOffsetUpdate()) + if (consumer->needsOffsetUpdate() || consumer_info.locks.empty()) { - LOG_TRACE(log, "Consumer #{} needs update offset", idx); + LOG_TRACE(log, "Consumer needs update offset"); consumer_info.consume_from_topic_partition_index = 0; consumer_info.locks.clear(); @@ -943,7 +964,7 @@ bool StorageKafka2::streamToViews(size_t idx) if (const auto * current_assignment = consumer->getKafkaAssignment(); nullptr != current_assignment) { - auto maybe_locks = lockTopicPartitions(*current_assignment); + auto maybe_locks = lockTopicPartitions(keeper_to_use, *current_assignment); if (!maybe_locks.has_value()) { @@ -962,15 +983,20 @@ bool StorageKafka2::streamToViews(size_t idx) topic_partition_copy.offset = *maybe_committed_offset + 1; else topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; + consumer_info.topic_partitions.push_back(std::move(topic_partition_copy)); } + consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); + } + else + { + LOG_TRACE(log, "Consumer lost assignment while trying to lock partitions"); } - consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); } // Create an INSERT query for streaming data auto insert = std::make_shared(); - insert->table_id = table_id; + insert->table_id = getStorageID(); auto kafka_context = Context::createCopy(getContext()); kafka_context->makeQueryContext(); @@ -984,15 +1010,14 @@ bool StorageKafka2::streamToViews(size_t idx) auto & topic_partition = consumer_info.topic_partitions[consumer_info.consume_from_topic_partition_index]; LOG_TRACE( log, - "Consumer #{} will fetch {}:{} (consume_from_topic_partition_index is {})", - idx, + "Will fetch {}:{} (consume_from_topic_partition_index is {})", topic_partition.topic, topic_partition.partition_id, consumer_info.consume_from_topic_partition_index); consumer_info.consume_from_topic_partition_index = (consumer_info.consume_from_topic_partition_index + 1) % consumer_info.topic_partitions.size(); - bool needs_offset_reset = false; + bool needs_offset_reset = true; SCOPE_EXIT({ if (!needs_offset_reset) return; @@ -1002,9 +1027,9 @@ bool StorageKafka2::streamToViews(size_t idx) if (blocks.empty()) { - LOG_TRACE(log, "Consumer #{} didn't get any messages", idx); + LOG_TRACE(log, "Didn't get any messages"); needs_offset_reset = false; - return true; + return std::nullopt; } auto converting_dag = ActionsDAG::makeConvertingActions( @@ -1015,16 +1040,14 @@ bool StorageKafka2::streamToViews(size_t idx) auto converting_actions = std::make_shared(std::move(converting_dag)); for (auto & block : blocks) - { converting_actions->execute(block); - } // We can't cancel during copyData, as it's not aware of commits and other kafka-related stuff. // It will be cancelled on underlying layer (kafka buffer) auto & lock_info = consumer_info.locks.at(topic_partition); lock_info.intent_size = last_read_offset - lock_info.committed_offset.value_or(0); - saveIntent(topic_partition, *lock_info.intent_size); + saveIntent(keeper_to_use, topic_partition, *lock_info.intent_size); std::atomic_size_t rows = 0; { block_io.pipeline.complete(Pipe{std::make_shared(std::move(blocks))}); @@ -1034,28 +1057,24 @@ bool StorageKafka2::streamToViews(size_t idx) executor.execute(); } - saveCommittedOffset(topic_partition, last_read_offset); + saveCommittedOffset(keeper_to_use, topic_partition, last_read_offset); lock_info.intent_size.reset(); lock_info.committed_offset = last_read_offset; topic_partition.offset = last_read_offset + 1; needs_offset_reset = false; - - UInt64 milliseconds = watch.elapsedMilliseconds(); - LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(rows), table_id.getNameForLogs(), milliseconds); - - return false; + return rows; } -zkutil::ZooKeeper & StorageKafka2::getZooKeeper() -{ - if (keeper->expired()) + zkutil::ZooKeeperPtr StorageKafka2::getZooKeeper() { - // TODO(antaljanosbenjamin): this can go wrong if we start a new session simultaneously from multiple threads. - keeper = keeper->startNewSession(); - //TODO(antaljanosbenjamin): handle ephemeral nodes - } - return *keeper; + if (keeper->expired()) + { + // TODO(antaljanosbenjamin): this can go wrong if we start a new session simultaneously from multiple threads. + keeper = keeper->startNewSession(); + //TODO(antaljanosbenjamin): handle ephemeral nodes + } + return keeper; } diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 89d5019bd4b..4bc4240fb48 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -81,6 +81,7 @@ public: private: // Configuration and state + std::mutex keeper_mutex; zkutil::ZooKeeperPtr keeper; std::unique_ptr kafka_settings; Macros::MacroExpansionInfo macros_info; @@ -125,6 +126,7 @@ private: size_t consume_from_topic_partition_index{0}; TopicPartitions topic_partitions; // TODO(antaljanosbenjamin): maybe recreate the ephemeral node + zkutil::ZooKeeperPtr keeper; TopicPartitionLocks locks; }; @@ -172,18 +174,21 @@ private: static String getDefaultClientId(const StorageID & table_id_); bool streamToViews(size_t idx); + + std::optional streamFromConsumer(ConsumerAndAssignmentInfo& consumer_info); + bool checkDependencies(const StorageID & table_id); // Takes lock over topic partitions and set's the committed offset in topic_partitions void createKeeperNodes(const KafkaConsumer2Ptr & consumer); - std::optional lockTopicPartitions(const TopicPartitions & topic_partitions); - void saveCommittedOffset(const TopicPartition & topic_partition, int64_t committed_offset); - void saveIntent(const TopicPartition & topic_partition, int64_t intent); + std::optional lockTopicPartitions(zkutil::ZooKeeper& keeper_to_use, const TopicPartitions & topic_partitions); + void saveCommittedOffset(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t committed_offset); + void saveIntent(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t intent); PolledBatchInfo pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & topic_partition, const ContextPtr & context); - zkutil::ZooKeeper& getZooKeeper(); + zkutil::ZooKeeperPtr getZooKeeper(); std::string getTopicPartitionPath(const TopicPartition& topic_partition ); }; From fa5150130d1db39f98b8c5ed191cc7bf4564a27d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Feb 2024 16:15:43 +0000 Subject: [PATCH 0020/2361] Fix compilation issues --- src/Storages/Kafka/StorageKafka2.cpp | 12 ++++++------ src/Storages/Kafka/StorageKafka2.h | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 5fbab603928..4ee8523a4a9 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -48,7 +48,7 @@ #include #include #include "Storages/Kafka/KafkaConsumer2.h" -#include "config_version.h" +#include "Common/config_version.h" #if USE_KRB5 # include @@ -136,7 +136,7 @@ StorageKafka2::StorageKafka2( if (thread_per_consumer) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "The new Kafka storage cannot use multiple threads yet!"); - if (kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM) + if (kafka_settings->kafka_handle_error_mode == StreamingHandleErrorMode::STREAM) { kafka_settings->input_format_allow_errors_num = 0; kafka_settings->input_format_allow_errors_ratio = 0; @@ -278,7 +278,7 @@ void StorageKafka2::startup() } -void StorageKafka2::shutdown() +void StorageKafka2::shutdown(bool) { for (auto & task : tasks) { @@ -654,7 +654,7 @@ StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & to // otherwise external iteration will reuse that and logic will became even more fuzzy MutableColumns virtual_columns = virtual_header.cloneEmptyColumns(); - auto put_error_to_stream = kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM; + auto put_error_to_stream = kafka_settings->kafka_handle_error_mode == StreamingHandleErrorMode::STREAM; EmptyReadBuffer empty_buf; auto input_format = FormatFactory::instance().getInput( @@ -1095,7 +1095,7 @@ NamesAndTypesList StorageKafka2::getVirtuals() const {"_timestamp_ms", std::make_shared(std::make_shared(3))}, {"_headers.name", std::make_shared(std::make_shared())}, {"_headers.value", std::make_shared(std::make_shared())}}; - if (kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM) + if (kafka_settings->kafka_handle_error_mode == StreamingHandleErrorMode::STREAM) { result.push_back({"_raw_message", std::make_shared()}); result.push_back({"_error", std::make_shared()}); @@ -1115,7 +1115,7 @@ Names StorageKafka2::getVirtualColumnNames() const "_headers.name", "_headers.value", }; - if (kafka_settings->kafka_handle_error_mode == HandleKafkaErrorMode::STREAM) + if (kafka_settings->kafka_handle_error_mode == StreamingHandleErrorMode::STREAM) { result.push_back({"_raw_message"}); result.push_back({"_error"}); diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 4bc4240fb48..17606ea60df 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -54,7 +54,7 @@ public: bool noPushingToViews() const override { return true; } void startup() override; - void shutdown() override; + void shutdown(bool is_drop) override; void drop() override; @@ -77,7 +77,7 @@ public: NamesAndTypesList getVirtuals() const override; Names getVirtualColumnNames() const; - HandleKafkaErrorMode getHandleKafkaErrorMode() const { return kafka_settings->kafka_handle_error_mode; } + StreamingHandleErrorMode getHandleKafkaErrorMode() const { return kafka_settings->kafka_handle_error_mode; } private: // Configuration and state From a4c176f0d508d98456a73a444b3d3892814df1e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 14 Feb 2024 17:57:27 +0000 Subject: [PATCH 0021/2361] Propagate intent size to consumer --- src/Storages/Kafka/KafkaConsumer2.cpp | 32 ++--- src/Storages/Kafka/KafkaConsumer2.h | 4 +- src/Storages/Kafka/StorageKafka2.cpp | 142 ++++++++++++---------- src/Storages/Kafka/StorageKafka2.h | 72 ++++++----- src/Storages/Kafka/StorageKafkaCommon.cpp | 6 +- 5 files changed, 137 insertions(+), 119 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index bcad31e82df..01169596e1a 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -280,7 +280,7 @@ void KafkaConsumer2::initializeQueues(const cppkafka::TopicPartitionList & topic } // it do the poll when needed -ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition) +ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition, const std::optional & message_count) { resetIfStopped(); @@ -288,10 +288,12 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition) return nullptr; if (hasMorePolledMessages()) - return getNextMessage(); + { + if (auto next_message = getNextMessage(); next_message) + return next_message; + } - - // TODO(antaljanosbenjamin): check if we should poll new messages or not + // TODO(antaljanosbenjamin): check if we should poll new messages or not while (true) { stalled_status = StalledStatus::NO_MESSAGES_RETURNED; @@ -309,8 +311,9 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition) queue_to_poll_from.forward_to_queue(consumer->get_consumer_queue()); SCOPE_EXIT({ queue_to_poll_from.disable_queue_forwarding(); }); + const auto messages_to_pull = message_count.value_or(batch_size); /// Don't drop old messages immediately, since we may need them for virtual columns. - auto new_messages = consumer->poll_batch(batch_size, std::chrono::milliseconds(actual_poll_timeout_ms)); + auto new_messages = consumer->poll_batch(messages_to_pull, std::chrono::milliseconds(actual_poll_timeout_ms)); resetIfStopped(); if (stalled_status == StalledStatus::CONSUMER_STOPPED) @@ -376,17 +379,18 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition) ReadBufferPtr KafkaConsumer2::getNextMessage() { - if (current == messages.end()) - return nullptr; + while (current != messages.end()) + { + const auto * data = current->get_payload().get_data(); + size_t size = current->get_payload().get_size(); + ++current; - const auto * data = current->get_payload().get_data(); - size_t size = current->get_payload().get_size(); - ++current; + // TODO(antaljanosbenjamin): When this can be nullptr? + if (data) + return std::make_shared(data, size); + } - if (data) - return std::make_shared(data, size); - - return getNextMessage(); + return nullptr; } size_t KafkaConsumer2::filterMessageErrors() diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index d5351af1bd2..b6967b0c5fb 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -103,7 +103,7 @@ public: // no topic partitions or all of them were revoked, it returns a null pointer. TopicPartitions const * getKafkaAssignment() const; - // As the main source of offsets is not Kafka, the offsets needs to pushed to the consumer from outside + // As the main source of offsets is not Kafka, the offsets needs to be pushed to the consumer from outside bool needsOffsetUpdate() const { return needs_offset_update; } // Returns true if it received new assignment and could update the internal state accordingly, false otherwise @@ -112,7 +112,7 @@ public: /// Polls batch of messages from Kafka and returns read buffer containing the next message or /// nullptr when there are no messages to process. /// TODO(antaljanosbenjamin): add batch size param - ReadBufferPtr consume(const TopicPartition & topic_partition); + ReadBufferPtr consume(const TopicPartition & topic_partition, const std::optional & message_count); // Return values for the message that's being read. String currentTopic() const { return current[-1].get_topic(); } diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 740398b2640..c658174b9d5 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -636,8 +636,11 @@ void StorageKafka2::saveIntent(zkutil::ZooKeeper & keeper_to_use, const TopicPar } -StorageKafka2::PolledBatchInfo -StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & topic_partition, const ContextPtr & modified_context) +StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( + KafkaConsumer2 & consumer, + const TopicPartition & topic_partition, + std::optional message_count, + const ContextPtr & modified_context) { PolledBatchInfo batch_info; auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr(), getContext()); @@ -717,7 +720,7 @@ StorageKafka2::pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & to { size_t new_rows = 0; exception_message.reset(); - if (auto buf = consumer.consume(topic_partition)) + if (auto buf = consumer.consume(topic_partition, message_count)) { ProfileEvents::increment(ProfileEvents::KafkaMessagesRead); new_rows = executor.execute(*buf); @@ -906,18 +909,72 @@ bool StorageKafka2::streamToViews(size_t idx) ProfileEvents::increment(ProfileEvents::KafkaBackgroundReads); auto & consumer_info = consumers[idx]; + auto & consumer = consumer_info.consumer; + + // To keep the consumer alive + LOG_TRACE(log, "Polling consumer for events"); + consumer->pollEvents(); + + if (consumer->needsOffsetUpdate() || consumer_info.locks.empty()) + { + // First release the locks so let other consumers acquire them ASAP + consumer_info.locks.clear(); + + const auto * current_assignment = consumer->getKafkaAssignment(); + if (current_assignment == nullptr) + { + // The consumer lost its assignment and haven't received a new one. + // TODO(antaljanosbenjamin): returning a proper value representing the state + // By returning true this function reports the current consumer as a "stalled" stream, which + return true; + } + LOG_TRACE(log, "Consumer needs update offset"); + consumer_info.consume_from_topic_partition_index = 0; + + consumer_info.locks.clear(); + consumer_info.topic_partitions.clear(); + + auto maybe_locks = lockTopicPartitions(*consumer_info.keeper, *current_assignment); + + if (!maybe_locks.has_value()) + { + // We couldn't acquire locks, probably some other consumers are still holding them. + return true; + } + + consumer_info.locks = std::move(*maybe_locks); + + consumer_info.topic_partitions.reserve(current_assignment->size()); + for (const auto & topic_partition : *current_assignment) + { + TopicPartition topic_partition_copy{topic_partition}; + if (const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; + maybe_committed_offset.has_value()) + topic_partition_copy.offset = *maybe_committed_offset + 1; + else + topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; + + consumer_info.topic_partitions.push_back(std::move(topic_partition_copy)); + } + consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); + + } + + LOG_TRACE(log, "Consumer has assignment"); + + // Here we will try to pull messages regardless if we loose our assignment try { LOG_TRACE(log, "Trying to consume from consumer {}", idx); - const auto rows = streamFromConsumer(consumer_info); - if (rows.has_value()) + const auto maybe_rows = streamFromConsumer(consumer_info); + if (maybe_rows.has_value()) { const auto milliseconds = watch.elapsedMilliseconds(); - LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(*rows), table_id.getNameForLogs(), milliseconds); + LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(*maybe_rows), table_id.getNameForLogs(), milliseconds); } else { - LOG_DEBUG(log, "Couldn't push any rows"); + LOG_DEBUG(log, "Couldn't stream any messages"); return true; } } @@ -939,56 +996,6 @@ bool StorageKafka2::streamToViews(size_t idx) std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInfo & consumer_info) { - auto & consumer = consumer_info.consumer; - auto & keeper_to_use = *consumer_info.keeper; - // To keep the consumer alive - - LOG_TRACE(log, "Polling consumer for events"); - consumer->pollEvents(); - if (nullptr == consumer->getKafkaAssignment()) - return std::nullopt; - LOG_TRACE(log, "Consumer has assignment"); - - if (consumer->needsOffsetUpdate() || consumer_info.locks.empty()) - { - LOG_TRACE(log, "Consumer needs update offset"); - consumer_info.consume_from_topic_partition_index = 0; - - consumer_info.locks.clear(); - consumer_info.topic_partitions.clear(); - - if (const auto * current_assignment = consumer->getKafkaAssignment(); nullptr != current_assignment) - { - auto maybe_locks = lockTopicPartitions(keeper_to_use, *current_assignment); - - if (!maybe_locks.has_value()) - { - // TODO(antaljanosbenjamin): signal this somehow to caller, maybe wait a bit longer. - return true; - } - - consumer_info.locks = std::move(*maybe_locks); - - consumer_info.topic_partitions.reserve(current_assignment->size()); - for (const auto & topic_partition : *current_assignment) - { - TopicPartition topic_partition_copy{topic_partition}; - if (const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; - maybe_committed_offset.has_value()) - topic_partition_copy.offset = *maybe_committed_offset + 1; - else - topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; - - consumer_info.topic_partitions.push_back(std::move(topic_partition_copy)); - } - consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); - } - else - { - LOG_TRACE(log, "Consumer lost assignment while trying to lock partitions"); - } - } - // Create an INSERT query for streaming data auto insert = std::make_shared(); insert->table_id = getStorageID(); @@ -1018,7 +1025,8 @@ std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInf return; consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); }); - auto [blocks, last_read_offset] = pollConsumer(*consumer_info.consumer, topic_partition, kafka_context); + auto [blocks, last_read_offset] + = pollConsumer(*consumer_info.consumer, topic_partition, consumer_info.locks[topic_partition].intent_size, kafka_context); if (blocks.empty()) { @@ -1040,6 +1048,7 @@ std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInf // We can't cancel during copyData, as it's not aware of commits and other kafka-related stuff. // It will be cancelled on underlying layer (kafka buffer) + auto & keeper_to_use = *consumer_info.keeper; auto & lock_info = consumer_info.locks.at(topic_partition); lock_info.intent_size = last_read_offset - lock_info.committed_offset.value_or(0); saveIntent(keeper_to_use, topic_partition, *lock_info.intent_size); @@ -1057,19 +1066,20 @@ std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInf lock_info.committed_offset = last_read_offset; topic_partition.offset = last_read_offset + 1; needs_offset_reset = false; + return rows; } - zkutil::ZooKeeperPtr StorageKafka2::getZooKeeper() +zkutil::ZooKeeperPtr StorageKafka2::getZooKeeper() +{ + if (keeper->expired()) { - if (keeper->expired()) - { - // TODO(antaljanosbenjamin): this can go wrong if we start a new session simultaneously from multiple threads. - keeper = keeper->startNewSession(); - //TODO(antaljanosbenjamin): handle ephemeral nodes - } - return keeper; + // TODO(antaljanosbenjamin): this can go wrong if we start a new session simultaneously from multiple threads. + keeper = keeper->startNewSession(); + //TODO(antaljanosbenjamin): handle ephemeral nodes + } + return keeper; } diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 17606ea60df..d3735e87afc 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -80,30 +80,6 @@ public: StreamingHandleErrorMode getHandleKafkaErrorMode() const { return kafka_settings->kafka_handle_error_mode; } private: - // Configuration and state - std::mutex keeper_mutex; - zkutil::ZooKeeperPtr keeper; - std::unique_ptr kafka_settings; - Macros::MacroExpansionInfo macros_info; - const Names topics; - const String brokers; - const String group; - const String client_id; - const String format_name; - const size_t max_rows_per_message; - const String schema_name; - const size_t num_consumers; /// total number of consumers - Poco::Logger * log; - Poco::Semaphore semaphore; - const bool intermediate_commit; - const SettingsChanges settings_adjustments; - - std::atomic mv_attached = false; - - /// Can differ from num_consumers in case of exception in startup() (or if startup() hasn't been called). - /// In this case we still need to be able to shutdown() properly. - size_t num_created_consumers = 0; /// number of actually created consumers. - using TopicPartition = KafkaConsumer2::TopicPartition; using TopicPartitions = KafkaConsumer2::TopicPartitions; @@ -136,8 +112,6 @@ private: int64_t last_offset; }; - std::vector consumers; - // Stream thread struct TaskContext { @@ -145,20 +119,48 @@ private: std::atomic stream_cancelled{false}; explicit TaskContext(BackgroundSchedulePool::TaskHolder && task_) : holder(std::move(task_)) { } }; + + enum class AssignmentChange + { + NotChanged, + Updated, + Lost + }; + + // Configuration and state + std::mutex keeper_mutex; + zkutil::ZooKeeperPtr keeper; + std::unique_ptr kafka_settings; + Macros::MacroExpansionInfo macros_info; + const Names topics; + const String brokers; + const String group; + const String client_id; + const String format_name; + const size_t max_rows_per_message; + const String schema_name; + const size_t num_consumers; /// total number of consumers + Poco::Logger * log; + Poco::Semaphore semaphore; + const bool intermediate_commit; + const SettingsChanges settings_adjustments; + std::atomic mv_attached = false; + /// Can differ from num_consumers in case of exception in startup() (or if startup() hasn't been called). + /// In this case we still need to be able to shutdown() properly. + size_t num_created_consumers = 0; /// number of actually created consumers. + std::vector consumers; std::vector> tasks; bool thread_per_consumer = false; - /// For memory accounting in the librdkafka threads. std::mutex thread_statuses_mutex; std::list> thread_statuses; + /// If named_collection is specified. + String collection_name; + std::atomic shutdown_called = false; SettingsChanges createSettingsAdjustments(); KafkaConsumer2Ptr createConsumer(size_t consumer_number); - /// If named_collection is specified. - String collection_name; - - std::atomic shutdown_called = false; UUID uuid{UUIDHelpers::generateV4()}; // Update Kafka configuration with values from CH user configuration. @@ -175,7 +177,7 @@ private: bool streamToViews(size_t idx); - std::optional streamFromConsumer(ConsumerAndAssignmentInfo& consumer_info); + std::optional streamFromConsumer(ConsumerAndAssignmentInfo & consumer_info); bool checkDependencies(const StorageID & table_id); @@ -186,7 +188,11 @@ private: void saveCommittedOffset(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t committed_offset); void saveIntent(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t intent); - PolledBatchInfo pollConsumer(KafkaConsumer2 & consumer, const TopicPartition & topic_partition, const ContextPtr & context); + PolledBatchInfo pollConsumer( + KafkaConsumer2 & consumer, + const TopicPartition & topic_partition, + std::optional message_count, + const ContextPtr & context); zkutil::ZooKeeperPtr getZooKeeper(); diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index ff5bdb3e5b6..b0f23c38163 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -363,15 +363,13 @@ void registerStorageKafka(StorageFactory & factory) throw Exception( ErrorCodes::SUPPORT_IS_DISABLED, - "Storing the Kafka offsets in Keeper is experimental. " - "Set `allow_experimental_kafka_store_offsets_in_keeper` setting to enable it"); + "Storing the Kafka offsets in Keeper is experimental. Set `allow_experimental_kafka_store_offsets_in_keeper` setting " + "to enable it"); return std::make_shared( args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); } - //return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); - // [[maybe_unused]] auto * a = new StorageKafka(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); }; From 0d46e7555b066298603b5e0cd4dc122e74863ebf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 20 Feb 2024 19:08:33 +0100 Subject: [PATCH 0022/2361] Fix unexpected behavior with FORMAT and SETTINGS parsing --- src/Parsers/ParserQueryWithOutput.cpp | 80 ++++++++++++++++++--------- 1 file changed, 54 insertions(+), 26 deletions(-) diff --git a/src/Parsers/ParserQueryWithOutput.cpp b/src/Parsers/ParserQueryWithOutput.cpp index 7a627ae5f6a..340abf27c31 100644 --- a/src/Parsers/ParserQueryWithOutput.cpp +++ b/src/Parsers/ParserQueryWithOutput.cpp @@ -150,37 +150,65 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec } + /// These two sections are allowed in an arbitrary order. ParserKeyword s_format("FORMAT"); - - if (s_format.ignore(pos, expected)) - { - ParserIdentifier format_p; - - if (!format_p.parse(pos, query_with_output.format, expected)) - return false; - setIdentifierSpecial(query_with_output.format); - - query_with_output.children.push_back(query_with_output.format); - } - - // SETTINGS key1 = value1, key2 = value2, ... ParserKeyword s_settings("SETTINGS"); - if (!query_with_output.settings_ast && s_settings.ignore(pos, expected)) - { - ParserSetQuery parser_settings(true); - if (!parser_settings.parse(pos, query_with_output.settings_ast, expected)) - return false; - query_with_output.children.push_back(query_with_output.settings_ast); - // SETTINGS after FORMAT is not parsed by the SELECT parser (ParserSelectQuery) - // Pass them manually, to apply in InterpreterSelectQuery::initSettings() - if (query->as()) + /** Why: let's take the following example: + * SELECT 1 UNION ALL SELECT 2 FORMAT TSV + * Each subquery can be put in parentheses and have its own settings: + * (SELECT 1 SETTINGS a=b) UNION ALL (SELECT 2 SETTINGS c=d) FORMAT TSV + * And the whole query can have settings: + * (SELECT 1 SETTINGS a=b) UNION ALL (SELECT 2 SETTINGS c=d) FORMAT TSV SETTINGS e=f + * A single query with output is parsed in the same way as the UNION ALL chain: + * SELECT 1 SETTINGS a=b FORMAT TSV SETTINGS e=f + * So while these forms have a slightly different meaning, they both exist: + * SELECT 1 SETTINGS a=b FORMAT TSV + * SELECT 1 FORMAT TSV SETTINGS e=f + * And due to this effect, the users expect that the FORMAT and SETTINGS may go in an arbitrary order. + * But while this work: + * (SELECT 1) UNION ALL (SELECT 2) FORMAT TSV SETTINGS d=f + * This does not work automatically, unless we explicitly allow different orders: + * (SELECT 1) UNION ALL (SELECT 2) SETTINGS d=f FORMAT TSV + * Inevitably, we also allow this: + * SELECT 1 SETTINGS a=b SETTINGS d=f FORMAT TSV + * ^^^^^^^^^^^^^^^^^^^^^ + * Because this part is consumed into ASTSelectWithUnionQuery + * and the rest into ASTQueryWithOutput. + */ + + for (size_t i = 0; i < 2; ++i) + { + if (!query_with_output.format && s_format.ignore(pos, expected)) { - auto settings = query_with_output.settings_ast->clone(); - assert_cast(settings.get())->print_in_format = false; - QueryWithOutputSettingsPushDownVisitor::Data data{settings}; - QueryWithOutputSettingsPushDownVisitor(data).visit(query); + ParserIdentifier format_p; + + if (!format_p.parse(pos, query_with_output.format, expected)) + return false; + setIdentifierSpecial(query_with_output.format); + + query_with_output.children.push_back(query_with_output.format); } + else if (!query_with_output.settings_ast && s_settings.ignore(pos, expected)) + { + // SETTINGS key1 = value1, key2 = value2, ... + ParserSetQuery parser_settings(true); + if (!parser_settings.parse(pos, query_with_output.settings_ast, expected)) + return false; + query_with_output.children.push_back(query_with_output.settings_ast); + + // SETTINGS after FORMAT is not parsed by the SELECT parser (ParserSelectQuery) + // Pass them manually, to apply in InterpreterSelectQuery::initSettings() + if (query->as()) + { + auto settings = query_with_output.settings_ast->clone(); + assert_cast(settings.get())->print_in_format = false; + QueryWithOutputSettingsPushDownVisitor::Data data{settings}; + QueryWithOutputSettingsPushDownVisitor(data).visit(query); + } + } + else + break; } node = std::move(query); From 4701f39b284cdd533b58a614b138325018228ed8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 22 Feb 2024 15:18:52 +0000 Subject: [PATCH 0023/2361] Poll directly from partition queues --- src/Storages/Kafka/KafkaConsumer2.cpp | 29 ++++++++++++--------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 01169596e1a..37829c1a181 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -46,7 +46,7 @@ namespace ErrorCodes using namespace std::chrono_literals; const auto MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS = 15000; -const std::size_t POLL_TIMEOUT_WO_ASSIGNMENT_MS = 50; +const auto POLL_TIMEOUT_WO_ASSIGNMENT = 50ms; const auto DRAIN_TIMEOUT_MS = 5000ms; @@ -203,9 +203,17 @@ void KafkaConsumer2::drainConsumerQueue() void KafkaConsumer2::pollEvents() { + // POLL_TIMEOUT_WO_ASSIGNMENT_MS (50ms) is 100% enough just to check if we got assignment + // (see https://github.com/ClickHouse/ClickHouse/issues/11218) + auto msg = consumer->poll(POLL_TIMEOUT_WO_ASSIGNMENT); + // All the partition queues are detached, so the consumer shouldn't be able to poll any messages - auto msg = consumer->poll(10ms); chassert(!msg && "Consumer returned a message when it was not expected"); + + auto consumer_queue = consumer->get_consumer_queue(); + // There should be events in the queue, so let's consume them all + while (consumer_queue.get_length() > 0) + consumer->poll(); }; KafkaConsumer2::TopicPartitionCounts KafkaConsumer2::getPartitionCounts() const @@ -298,22 +306,11 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition, co { stalled_status = StalledStatus::NO_MESSAGES_RETURNED; - // we already wait enough for assignment in the past, - // let's make polls shorter and not block other consumer - // which can work successfully in parallel - // POLL_TIMEOUT_WO_ASSIGNMENT_MS (50ms) is 100% enough just to check if we got assignment - // (see https://github.com/ClickHouse/ClickHouse/issues/11218) - auto actual_poll_timeout_ms = (waited_for_assignment >= MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS) - ? std::min(POLL_TIMEOUT_WO_ASSIGNMENT_MS, poll_timeout) - : poll_timeout; - - auto & queue_to_poll_from = queues[topic_partition]; - queue_to_poll_from.forward_to_queue(consumer->get_consumer_queue()); - SCOPE_EXIT({ queue_to_poll_from.disable_queue_forwarding(); }); - + auto & queue_to_poll_from = queues.at(topic_partition); + LOG_TRACE(log, "Batch size {}, offset {}", batch_size, topic_partition.offset); const auto messages_to_pull = message_count.value_or(batch_size); /// Don't drop old messages immediately, since we may need them for virtual columns. - auto new_messages = consumer->poll_batch(messages_to_pull, std::chrono::milliseconds(actual_poll_timeout_ms)); + auto new_messages = queue_to_poll_from.consume_batch(messages_to_pull, std::chrono::milliseconds(poll_timeout)); resetIfStopped(); if (stalled_status == StalledStatus::CONSUMER_STOPPED) From 266ef9081ddbeb200db251a477938cd0cfe5fc96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 27 Feb 2024 15:29:30 +0000 Subject: [PATCH 0024/2361] Make new StorageKafka able to run multiple threads --- src/Storages/Kafka/StorageKafka2.cpp | 143 +++++++++++---------------- 1 file changed, 60 insertions(+), 83 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index c658174b9d5..e465a3835b5 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -125,11 +125,8 @@ StorageKafka2::StorageKafka2( , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) , collection_name(collection_name_) { - if (kafka_settings->kafka_num_consumers != 1) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Multiple consumers not yet implemented!"); - - if (thread_per_consumer) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "The new Kafka storage cannot use multiple threads yet!"); + if (kafka_settings->kafka_num_consumers > 1 && !thread_per_consumer) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "With multiple consumer you have to use thread per consumer!"); if (kafka_settings->kafka_handle_error_mode == StreamingHandleErrorMode::STREAM) { @@ -239,7 +236,6 @@ StorageKafka2::write(const ASTPtr &, const StorageMetadataPtr & metadata_snapsho return std::make_shared(header, getFormatName(), max_rows, std::move(producer), getName(), modified_context); } - void StorageKafka2::startup() { for (size_t i = 0; i < num_consumers; ++i) @@ -445,9 +441,9 @@ void StorageKafka2::updateConfiguration(cppkafka::Configuration & kafka_config) // No need to add any prefix, messages can be distinguished kafka_config.set_log_callback( - [this](cppkafka::KafkaHandleBase &, int /*level*/, const std::string & facility, const std::string & message) + [this](cppkafka::KafkaHandleBase &, int level, const std::string & facility, const std::string & message) { - auto [poco_level, client_logs_level] = parseSyslogLevel(1); + auto [poco_level, client_logs_level] = parseSyslogLevel(level); LOG_IMPL(log, client_logs_level, poco_level, "[rdk:{}] {}", facility, message); }); @@ -555,7 +551,7 @@ void StorageKafka2::createKeeperNodes(const KafkaConsumer2Ptr & consumer) std::optional StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const TopicPartitions & topic_partitions) { - // TODO(antaljanosbenjamin): Review this function with somebody who know keeper better than me + // TODO(antaljanosbenjamin): Review this function with somebody who knows keeper better than me const auto uuid_as_string = toString(uuid); std::vector topic_partition_paths; @@ -565,35 +561,21 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi Coordination::Requests ops; - // for (const auto & topic_partition_path : topic_partition_paths) - // ops.push_back(zkutil::makeCheckRequest(topic_partition_path + lock_file_name, -1)); - for (const auto & topic_partition_path : topic_partition_paths) ops.push_back(zkutil::makeCreateRequest(topic_partition_path + lock_file_name, uuid_as_string, zkutil::CreateMode::Ephemeral)); - bool success = false; - for (auto try_count{0}; try_count < 10; ++try_count) - { Coordination::Responses responses; - if (const auto code = keeper_to_use.tryMulti(ops, responses); code == Coordination::Error::ZOK) + if (const auto code = keeper_to_use.tryMulti(ops, responses); code != Coordination::Error::ZOK) { - success = true; - break; + if (code != Coordination::Error::ZNODEEXISTS) + zkutil::KeeperMultiException::check(code, ops, responses); + + // TODO(antaljanosbenjamin): maybe check the content, if we have the locks, we can continue with them + return std::nullopt; } - else if (code != Coordination::Error::ZNODEEXISTS) - zkutil::KeeperMultiException::check(code, ops, responses); - // TODO(antaljanosbenjamin): We shouldn't wait here, but let's give the other consumers to release the locks - for (const auto & topic_partition_path : topic_partition_paths) - keeper_to_use.waitForDisappear(topic_partition_path + lock_file_name); - } - - if (!success) - return std::nullopt; - - - // We have the locks + // We have the locks, let's gather the information we needed TopicPartitionLocks locks; { auto tp_it = topic_partitions.begin(); @@ -915,56 +897,52 @@ bool StorageKafka2::streamToViews(size_t idx) LOG_TRACE(log, "Polling consumer for events"); consumer->pollEvents(); - if (consumer->needsOffsetUpdate() || consumer_info.locks.empty()) - { - // First release the locks so let other consumers acquire them ASAP - consumer_info.locks.clear(); - - const auto * current_assignment = consumer->getKafkaAssignment(); - if (current_assignment == nullptr) - { - // The consumer lost its assignment and haven't received a new one. - // TODO(antaljanosbenjamin): returning a proper value representing the state - // By returning true this function reports the current consumer as a "stalled" stream, which - return true; - } - LOG_TRACE(log, "Consumer needs update offset"); - consumer_info.consume_from_topic_partition_index = 0; - - consumer_info.locks.clear(); - consumer_info.topic_partitions.clear(); - - auto maybe_locks = lockTopicPartitions(*consumer_info.keeper, *current_assignment); - - if (!maybe_locks.has_value()) - { - // We couldn't acquire locks, probably some other consumers are still holding them. - return true; - } - - consumer_info.locks = std::move(*maybe_locks); - - consumer_info.topic_partitions.reserve(current_assignment->size()); - for (const auto & topic_partition : *current_assignment) - { - TopicPartition topic_partition_copy{topic_partition}; - if (const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; - maybe_committed_offset.has_value()) - topic_partition_copy.offset = *maybe_committed_offset + 1; - else - topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; - - consumer_info.topic_partitions.push_back(std::move(topic_partition_copy)); - } - consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); - - } - - LOG_TRACE(log, "Consumer has assignment"); - - // Here we will try to pull messages regardless if we loose our assignment try { + if (consumer->needsOffsetUpdate() || consumer_info.locks.empty()) + { + // First release the locks so let other consumers acquire them ASAP + consumer_info.locks.clear(); + + const auto * current_assignment = consumer->getKafkaAssignment(); + if (current_assignment == nullptr) + { + // The consumer lost its assignment and haven't received a new one. + // TODO(antaljanosbenjamin): returning a proper value representing the state + // By returning true this function reports the current consumer as a "stalled" stream, which + return true; + } + LOG_TRACE(log, "Consumer needs update offset"); + consumer_info.consume_from_topic_partition_index = 0; + + consumer_info.locks.clear(); + consumer_info.topic_partitions.clear(); + + auto maybe_locks = lockTopicPartitions(*consumer_info.keeper, *current_assignment); + + if (!maybe_locks.has_value()) + { + // We couldn't acquire locks, probably some other consumers are still holding them. + return true; + } + + consumer_info.locks = std::move(*maybe_locks); + + consumer_info.topic_partitions.reserve(current_assignment->size()); + for (const auto & topic_partition : *current_assignment) + { + TopicPartition topic_partition_copy{topic_partition}; + if (const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; + maybe_committed_offset.has_value()) + topic_partition_copy.offset = *maybe_committed_offset + 1; + else + topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; + + consumer_info.topic_partitions.push_back(std::move(topic_partition_copy)); + } + consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); + } + LOG_TRACE(log, "Trying to consume from consumer {}", idx); const auto maybe_rows = streamFromConsumer(consumer_info); if (maybe_rows.has_value()) @@ -982,13 +960,13 @@ bool StorageKafka2::streamToViews(size_t idx) { if (Coordination::isHardwareError(e.code)) { + // Clear ephemeral nodes here as we got a new keeper here consumer_info.locks.clear(); consumer_info.keeper = getZooKeeper(); + return true; } - else - throw; - // TODO(antaljanosbenjamin): Should we reschedule in case of keeper error? + throw; } return false; } @@ -1073,11 +1051,10 @@ std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInf zkutil::ZooKeeperPtr StorageKafka2::getZooKeeper() { + std::unique_lock lock{keeper_mutex}; if (keeper->expired()) { - // TODO(antaljanosbenjamin): this can go wrong if we start a new session simultaneously from multiple threads. keeper = keeper->startNewSession(); - //TODO(antaljanosbenjamin): handle ephemeral nodes } return keeper; } From 71ea7be9d82aa2d65bdc242e1a3ca37f78ea57e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 28 Feb 2024 10:27:19 +0000 Subject: [PATCH 0025/2361] Fix build --- src/Storages/Kafka/StorageKafka2.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index e465a3835b5..6e11a8f9264 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -564,16 +564,16 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi for (const auto & topic_partition_path : topic_partition_paths) ops.push_back(zkutil::makeCreateRequest(topic_partition_path + lock_file_name, uuid_as_string, zkutil::CreateMode::Ephemeral)); - Coordination::Responses responses; + Coordination::Responses responses; - if (const auto code = keeper_to_use.tryMulti(ops, responses); code != Coordination::Error::ZOK) - { - if (code != Coordination::Error::ZNODEEXISTS) - zkutil::KeeperMultiException::check(code, ops, responses); + if (const auto code = keeper_to_use.tryMulti(ops, responses); code != Coordination::Error::ZOK) + { + if (code != Coordination::Error::ZNODEEXISTS) + zkutil::KeeperMultiException::check(code, ops, responses); - // TODO(antaljanosbenjamin): maybe check the content, if we have the locks, we can continue with them - return std::nullopt; - } + // TODO(antaljanosbenjamin): maybe check the content, if we have the locks, we can continue with them + return std::nullopt; + } // We have the locks, let's gather the information we needed TopicPartitionLocks locks; From d7b34a80bbbf98ea11e0d679eeede076421748f1 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 22 Mar 2024 16:09:14 +0000 Subject: [PATCH 0026/2361] stash --- .../Net/include/Poco/Net/HTTPServerSession.h | 5 +++++ base/poco/Net/src/HTTPServerSession.cpp | 22 ++++++++++--------- .../library-bridge/LibraryBridgeHandlers.cpp | 16 +++++--------- .../library-bridge/LibraryBridgeHandlers.h | 6 ++--- programs/odbc-bridge/PingHandler.cpp | 2 +- programs/server/Server.cpp | 1 + src/Core/ServerSettings.h | 1 + src/IO/HTTPCommon.cpp | 15 +++++++++---- src/IO/HTTPCommon.h | 2 +- src/Server/HTTP/HTTPServer.cpp | 3 ++- src/Server/HTTP/HTTPServerResponse.h | 2 ++ .../WriteBufferFromHTTPServerResponse.cpp | 4 +--- .../HTTP/WriteBufferFromHTTPServerResponse.h | 2 -- src/Server/HTTPHandler.cpp | 3 +-- src/Server/InterserverIOHTTPHandler.cpp | 3 +-- src/Server/PrometheusRequestHandler.cpp | 10 ++------- src/Server/PrometheusRequestHandler.h | 7 +----- src/Server/ReplicasStatusHandler.cpp | 3 +-- src/Server/StaticRequestHandler.cpp | 10 ++++----- src/Server/WebUIRequestHandler.cpp | 9 +++----- .../00408_http_keep_alive.reference | 6 ++--- .../0_stateless/00501_http_head.reference | 4 ++-- 22 files changed, 63 insertions(+), 73 deletions(-) diff --git a/base/poco/Net/include/Poco/Net/HTTPServerSession.h b/base/poco/Net/include/Poco/Net/HTTPServerSession.h index ec928af304f..192d71962bc 100644 --- a/base/poco/Net/include/Poco/Net/HTTPServerSession.h +++ b/base/poco/Net/include/Poco/Net/HTTPServerSession.h @@ -56,10 +56,15 @@ namespace Net SocketAddress serverAddress(); /// Returns the server's address. + size_t getKeepAliveTimeout() const { return _params->getKeepAliveTimeout().totalSeconds(); } + + size_t getMaxKeepAliveRequests() const { return _params->getMaxKeepAliveRequests(); } + private: bool _firstRequest; Poco::Timespan _keepAliveTimeout; int _maxKeepAliveRequests; + HTTPServerParams::Ptr _params; }; diff --git a/base/poco/Net/src/HTTPServerSession.cpp b/base/poco/Net/src/HTTPServerSession.cpp index d4f2b24879e..3ea689cb0cf 100644 --- a/base/poco/Net/src/HTTPServerSession.cpp +++ b/base/poco/Net/src/HTTPServerSession.cpp @@ -19,11 +19,12 @@ namespace Poco { namespace Net { -HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParams::Ptr pParams): - HTTPSession(socket, pParams->getKeepAlive()), - _firstRequest(true), - _keepAliveTimeout(pParams->getKeepAliveTimeout()), - _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests()) +HTTPServerSession::HTTPServerSession(const StreamSocket & socket, HTTPServerParams::Ptr pParams) + : HTTPSession(socket, pParams->getKeepAlive()) + , _firstRequest(true) + , _keepAliveTimeout(pParams->getKeepAliveTimeout()) + , _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests()) + , _params(pParams) { setTimeout(pParams->getTimeout()); } @@ -46,11 +47,12 @@ bool HTTPServerSession::hasMoreRequests() } else if (_maxKeepAliveRequests != 0 && getKeepAlive()) { - if (_maxKeepAliveRequests > 0) - --_maxKeepAliveRequests; - return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ); - } - else return false; + if (_maxKeepAliveRequests > 0) + --_maxKeepAliveRequests; + return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ); + } + else + return false; } diff --git a/programs/library-bridge/LibraryBridgeHandlers.cpp b/programs/library-bridge/LibraryBridgeHandlers.cpp index 26d887cfc98..094cef6716d 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.cpp +++ b/programs/library-bridge/LibraryBridgeHandlers.cpp @@ -374,10 +374,8 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ } -ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_) - : WithContext(context_) - , keep_alive_timeout(keep_alive_timeout_) - , log(getLogger("ExternalDictionaryLibraryBridgeExistsHandler")) +ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(ContextPtr context_) + : WithContext(context_), log(getLogger("ExternalDictionaryLibraryBridgeExistsHandler")) { } @@ -401,7 +399,7 @@ void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerReque String res = library_handler ? "1" : "0"; - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); LOG_TRACE(log, "Sending ping response: {} (dictionary id: {})", res, dictionary_id); response.sendBuffer(res.data(), res.size()); } @@ -617,10 +615,8 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ } -CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_) - : WithContext(context_) - , keep_alive_timeout(keep_alive_timeout_) - , log(getLogger("CatBoostLibraryBridgeExistsHandler")) +CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(ContextPtr context_) + : WithContext(context_), log(getLogger("CatBoostLibraryBridgeExistsHandler")) { } @@ -634,7 +630,7 @@ void CatBoostLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & reque String res = "1"; - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); LOG_TRACE(log, "Sending ping response: {}", res); response.sendBuffer(res.data(), res.size()); } diff --git a/programs/library-bridge/LibraryBridgeHandlers.h b/programs/library-bridge/LibraryBridgeHandlers.h index 1db71eb24cb..83bca24ce1f 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.h +++ b/programs/library-bridge/LibraryBridgeHandlers.h @@ -34,12 +34,11 @@ private: class ExternalDictionaryLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContext { public: - ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_); + ExternalDictionaryLibraryBridgeExistsHandler(ContextPtr context_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: - const size_t keep_alive_timeout; LoggerPtr log; }; @@ -77,12 +76,11 @@ private: class CatBoostLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContext { public: - CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_); + CatBoostLibraryBridgeExistsHandler(ContextPtr context_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: - const size_t keep_alive_timeout; LoggerPtr log; }; diff --git a/programs/odbc-bridge/PingHandler.cpp b/programs/odbc-bridge/PingHandler.cpp index 80d0e2bf4a9..e5d094fb7eb 100644 --- a/programs/odbc-bridge/PingHandler.cpp +++ b/programs/odbc-bridge/PingHandler.cpp @@ -10,7 +10,7 @@ void PingHandler::handleRequest(HTTPServerRequest & /* request */, HTTPServerRes { try { - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); const char * data = "Ok.\n"; response.sendBuffer(data, strlen(data)); } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index b67a4eccd15..b741cd7f644 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -2251,6 +2251,7 @@ void Server::createServers( Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams; http_params->setTimeout(settings.http_receive_timeout); http_params->setKeepAliveTimeout(global_context->getServerSettings().keep_alive_timeout); + http_params->setMaxKeepAliveRequests(static_cast(global_context->getServerSettings().max_keep_alive_requests)); Poco::Util::AbstractConfiguration::Keys protocols; config.keys("protocols", protocols); diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index da82cdea5a4..7480d94e81d 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -113,6 +113,7 @@ namespace DB M(Bool, async_load_databases, false, "Enable asynchronous loading of databases and tables to speedup server startup. Queries to not yet loaded entity will be blocked until load is finished.", 0) \ M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0) \ M(Seconds, keep_alive_timeout, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \ + M(UInt64, max_keep_alive_requests, 10000, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \ M(Seconds, replicated_fetches_http_connection_timeout, 0, "HTTP connection timeout for part fetch requests. Inherited from default profile `http_connection_timeout` if not set explicitly.", 0) \ M(Seconds, replicated_fetches_http_send_timeout, 0, "HTTP send timeout for part fetch requests. Inherited from default profile `http_send_timeout` if not set explicitly.", 0) \ M(Seconds, replicated_fetches_http_receive_timeout, 0, "HTTP receive timeout for fetch part requests. Inherited from default profile `http_receive_timeout` if not set explicitly.", 0) \ diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 09f7724d613..2b3f7f062bc 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -33,14 +34,20 @@ namespace ErrorCodes extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS; } -void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_timeout) +void setResponseDefaultHeaders(HTTPServerResponse & response) { if (!response.getKeepAlive()) return; - Poco::Timespan timeout(keep_alive_timeout, 0); - if (timeout.totalSeconds()) - response.set("Keep-Alive", "timeout=" + std::to_string(timeout.totalSeconds())); + const size_t keep_alive_timeout = response.getSession().getKeepAliveTimeout(); + const size_t keep_alive_max_requests = response.getSession().getMaxKeepAliveRequests(); + if (keep_alive_timeout) + { + if (keep_alive_max_requests) + response.set("Keep-Alive", fmt::format("timeout={}, max={}", keep_alive_timeout, keep_alive_max_requests)); + else + response.set("Keep-Alive", fmt::format("timeout={}", keep_alive_timeout)); + } } HTTPSessionPtr makeHTTPSession( diff --git a/src/IO/HTTPCommon.h b/src/IO/HTTPCommon.h index 63dffcf6878..fa6086224f5 100644 --- a/src/IO/HTTPCommon.h +++ b/src/IO/HTTPCommon.h @@ -54,7 +54,7 @@ private: using HTTPSessionPtr = std::shared_ptr; -void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_timeout); +void setResponseDefaultHeaders(HTTPServerResponse & response); /// Create session object to perform requests and set required parameters. HTTPSessionPtr makeHTTPSession( diff --git a/src/Server/HTTP/HTTPServer.cpp b/src/Server/HTTP/HTTPServer.cpp index 90bdebf6451..9b8feae3e26 100644 --- a/src/Server/HTTP/HTTPServer.cpp +++ b/src/Server/HTTP/HTTPServer.cpp @@ -13,7 +13,8 @@ HTTPServer::HTTPServer( Poco::Net::HTTPServerParams::Ptr params, const ProfileEvents::Event & read_event, const ProfileEvents::Event & write_event) - : TCPServer(new HTTPServerConnectionFactory(context, params, factory_, read_event, write_event), thread_pool, socket_, params), factory(factory_) + : TCPServer(new HTTPServerConnectionFactory(context, params, factory_, read_event, write_event), thread_pool, socket_, params) + , factory(factory_) { } diff --git a/src/Server/HTTP/HTTPServerResponse.h b/src/Server/HTTP/HTTPServerResponse.h index 8edb785e7c5..9793fc8b24b 100644 --- a/src/Server/HTTP/HTTPServerResponse.h +++ b/src/Server/HTTP/HTTPServerResponse.h @@ -245,6 +245,8 @@ public: void attachRequest(HTTPServerRequest * request_) { request = request_; } + const Poco::Net::HTTPServerSession & getSession() const { return session; } + private: Poco::Net::HTTPServerSession & session; HTTPServerRequest * request = nullptr; diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp index 8098671a903..a39f6de51d0 100644 --- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp +++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp @@ -30,7 +30,7 @@ void WriteBufferFromHTTPServerResponse::startSendHeaders() if (add_cors_header) response.set("Access-Control-Allow-Origin", "*"); - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); std::stringstream header; //STYLE_CHECK_ALLOW_STD_STRING_STREAM response.beginWrite(header); @@ -119,12 +119,10 @@ void WriteBufferFromHTTPServerResponse::nextImpl() WriteBufferFromHTTPServerResponse::WriteBufferFromHTTPServerResponse( HTTPServerResponse & response_, bool is_http_method_head_, - UInt64 keep_alive_timeout_, const ProfileEvents::Event & write_event_) : HTTPWriteBuffer(response_.getSocket(), write_event_) , response(response_) , is_http_method_head(is_http_method_head_) - , keep_alive_timeout(keep_alive_timeout_) { } diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h index a3952b7c553..f0c80f24582 100644 --- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h +++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h @@ -29,7 +29,6 @@ public: WriteBufferFromHTTPServerResponse( HTTPServerResponse & response_, bool is_http_method_head_, - UInt64 keep_alive_timeout_, const ProfileEvents::Event & write_event_ = ProfileEvents::end()); ~WriteBufferFromHTTPServerResponse() override; @@ -91,7 +90,6 @@ private: bool is_http_method_head; bool add_cors_header = false; - size_t keep_alive_timeout = 0; bool initialized = false; diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index c112eefec6c..ac6c9d6a0a5 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -621,7 +621,6 @@ void HTTPHandler::processQuery( std::make_shared( response, request.getMethod() == HTTPRequest::HTTP_HEAD, - context->getServerSettings().keep_alive_timeout.totalSeconds(), write_event); used_output.out = used_output.out_holder; used_output.out_maybe_compressed = used_output.out_holder; @@ -926,7 +925,7 @@ try if (!used_output.out_holder && !used_output.exception_is_written) { /// If nothing was sent yet and we don't even know if we must compress the response. - WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT).writeln(s); + WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD).writeln(s); } else if (used_output.out_maybe_compressed) { diff --git a/src/Server/InterserverIOHTTPHandler.cpp b/src/Server/InterserverIOHTTPHandler.cpp index 28045380cd7..9a87992731c 100644 --- a/src/Server/InterserverIOHTTPHandler.cpp +++ b/src/Server/InterserverIOHTTPHandler.cpp @@ -87,9 +87,8 @@ void InterserverIOHTTPHandler::handleRequest(HTTPServerRequest & request, HTTPSe response.setChunkedTransferEncoding(true); Output used_output; - const auto keep_alive_timeout = server.context()->getServerSettings().keep_alive_timeout.totalSeconds(); used_output.out = std::make_shared( - response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout, write_event); + response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, write_event); auto finalize_output = [&] { diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index dff960f7031..0ad5f907467 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -18,21 +18,15 @@ void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPSe { try { - /// Raw config reference is used here to avoid dependency on Context and ServerSettings. - /// This is painful, because this class is also used in a build with CLICKHOUSE_KEEPER_STANDALONE_BUILD=1 - /// And there ordinary Context is replaced with a tiny clone. - const auto & config = server.config(); - unsigned keep_alive_timeout = config.getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT); - /// In order to make keep-alive works. if (request.getVersion() == HTTPServerRequest::HTTP_1_1) response.setChunkedTransferEncoding(true); - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); response.setContentType("text/plain; version=0.0.4; charset=UTF-8"); - WriteBufferFromHTTPServerResponse wb(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout, write_event); + WriteBufferFromHTTPServerResponse wb(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, write_event); try { metrics_writer->write(wb); diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index d120752c8c5..cc7848d1dd0 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -12,15 +12,10 @@ class IServer; class PrometheusRequestHandler : public HTTPRequestHandler { private: - IServer & server; PrometheusMetricsWriterPtr metrics_writer; public: - PrometheusRequestHandler(IServer & server_, PrometheusMetricsWriterPtr metrics_writer_) - : server(server_) - , metrics_writer(std::move(metrics_writer_)) - { - } + PrometheusRequestHandler(IServer &, PrometheusMetricsWriterPtr metrics_writer_) : metrics_writer(std::move(metrics_writer_)) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; }; diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index 91c6bd722d3..964e3834037 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -84,8 +84,7 @@ void ReplicasStatusHandler::handleRequest(HTTPServerRequest & request, HTTPServe } } - const auto & server_settings = getContext()->getServerSettings(); - setResponseDefaultHeaders(response, server_settings.keep_alive_timeout.totalSeconds()); + setResponseDefaultHeaders(response); if (!ok) { diff --git a/src/Server/StaticRequestHandler.cpp b/src/Server/StaticRequestHandler.cpp index 67bf3875de4..3d618031875 100644 --- a/src/Server/StaticRequestHandler.cpp +++ b/src/Server/StaticRequestHandler.cpp @@ -33,10 +33,9 @@ namespace ErrorCodes extern const int INVALID_CONFIG_PARAMETER; } -static inline std::unique_ptr -responseWriteBuffer(HTTPServerRequest & request, HTTPServerResponse & response, UInt64 keep_alive_timeout) +static inline std::unique_ptr responseWriteBuffer(HTTPServerRequest & request, HTTPServerResponse & response) { - auto buf = std::unique_ptr(new WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD, keep_alive_timeout)); + auto buf = std::unique_ptr(new WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD)); /// The client can pass a HTTP header indicating supported compression method (gzip or deflate). String http_response_compression_methods = request.get("Accept-Encoding", ""); @@ -89,8 +88,7 @@ static inline void trySendExceptionToClient( void StaticRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { - auto keep_alive_timeout = server.context()->getServerSettings().keep_alive_timeout.totalSeconds(); - auto out = responseWriteBuffer(request, response, keep_alive_timeout); + auto out = responseWriteBuffer(request, response); try { @@ -105,7 +103,7 @@ void StaticRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServer "The Transfer-Encoding is not chunked and there " "is no Content-Length header for POST request"); - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); response.setStatusAndReason(Poco::Net::HTTPResponse::HTTPStatus(status)); writeResponse(*out); } diff --git a/src/Server/WebUIRequestHandler.cpp b/src/Server/WebUIRequestHandler.cpp index 68d3ff0b325..faad9d57519 100644 --- a/src/Server/WebUIRequestHandler.cpp +++ b/src/Server/WebUIRequestHandler.cpp @@ -29,18 +29,15 @@ DashboardWebUIRequestHandler::DashboardWebUIRequestHandler(IServer & server_) : BinaryWebUIRequestHandler::BinaryWebUIRequestHandler(IServer & server_) : server(server_) {} JavaScriptWebUIRequestHandler::JavaScriptWebUIRequestHandler(IServer & server_) : server(server_) {} -static void handle(const IServer & server, HTTPServerRequest & request, HTTPServerResponse & response, std::string_view html) +static void handle(const IServer &, HTTPServerRequest & request, HTTPServerResponse & response, std::string_view html) { - auto keep_alive_timeout = server.context()->getServerSettings().keep_alive_timeout.totalSeconds(); - response.setContentType("text/html; charset=UTF-8"); if (request.getVersion() == HTTPServerRequest::HTTP_1_1) response.setChunkedTransferEncoding(true); - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_OK); - WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD, keep_alive_timeout).write(html.data(), html.size()); - + WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD).write(html.data(), html.size()); } void PlayWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) diff --git a/tests/queries/0_stateless/00408_http_keep_alive.reference b/tests/queries/0_stateless/00408_http_keep_alive.reference index 17a7fd690a8..d5d7dacce9e 100644 --- a/tests/queries/0_stateless/00408_http_keep_alive.reference +++ b/tests/queries/0_stateless/00408_http_keep_alive.reference @@ -1,6 +1,6 @@ < Connection: Keep-Alive -< Keep-Alive: timeout=10 +< Keep-Alive: timeout=10, max=10000 < Connection: Keep-Alive -< Keep-Alive: timeout=10 +< Keep-Alive: timeout=10, max=10000 < Connection: Keep-Alive -< Keep-Alive: timeout=10 +< Keep-Alive: timeout=10, max=10000 diff --git a/tests/queries/0_stateless/00501_http_head.reference b/tests/queries/0_stateless/00501_http_head.reference index 8351327b356..db82132b145 100644 --- a/tests/queries/0_stateless/00501_http_head.reference +++ b/tests/queries/0_stateless/00501_http_head.reference @@ -2,11 +2,11 @@ HTTP/1.1 200 OK Connection: Keep-Alive Content-Type: text/tab-separated-values; charset=UTF-8 Transfer-Encoding: chunked -Keep-Alive: timeout=10 +Keep-Alive: timeout=10, max=10000 HTTP/1.1 200 OK Connection: Keep-Alive Content-Type: text/tab-separated-values; charset=UTF-8 Transfer-Encoding: chunked -Keep-Alive: timeout=10 +Keep-Alive: timeout=10, max=10000 From 7aace4d876173ce18ade57ec1bdc332efff7ce80 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 22 Mar 2024 17:24:25 +0000 Subject: [PATCH 0027/2361] add test --- .../test_server_keep_alive/__init__.py | 0 .../configs/keep_alive_settings.xml | 4 ++ .../test_server_keep_alive/test.py | 46 +++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 tests/integration/test_server_keep_alive/__init__.py create mode 100644 tests/integration/test_server_keep_alive/configs/keep_alive_settings.xml create mode 100644 tests/integration/test_server_keep_alive/test.py diff --git a/tests/integration/test_server_keep_alive/__init__.py b/tests/integration/test_server_keep_alive/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_server_keep_alive/configs/keep_alive_settings.xml b/tests/integration/test_server_keep_alive/configs/keep_alive_settings.xml new file mode 100644 index 00000000000..06e68044817 --- /dev/null +++ b/tests/integration/test_server_keep_alive/configs/keep_alive_settings.xml @@ -0,0 +1,4 @@ + + 3600 + 5 + diff --git a/tests/integration/test_server_keep_alive/test.py b/tests/integration/test_server_keep_alive/test.py new file mode 100644 index 00000000000..0f88fe47673 --- /dev/null +++ b/tests/integration/test_server_keep_alive/test.py @@ -0,0 +1,46 @@ +import logging +import pytest +import requests + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node", main_configs=["configs/keep_alive_settings.xml"]) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def test_requests_with_keep_alive(start_cluster): + # In this test we have `keep_alive_timeout` set to one hour to never trigger connection reset by timeout, `max_keep_alive_requests` is set to 5. + # We expect server to close connection after each 5 requests. We detect connection reset by change in src port. + # So the first 5 requests should come from the same port, the following 5 requests should come from another port. + session = requests.Session() + for i in range(10): + session.get( + f"http://{node.ip_address}:8123/?query=select%201&log_comment=test_requests_with_keep_alive_{i}" + ) + + ports = node.query( + """ + SYSTEM FLUSH LOGS; + + SELECT port + FROM system.query_log + WHERE log_comment like 'test_requests_with_keep_alive_%' AND type = 'QueryFinish' + ORDER BY log_comment + """ + ).split("\n")[:-1] + + expected = 5 * [ports[0]] + [ports[5]] * 5 + + assert ports == expected From 146d7603388ca161ee3340ab1f582971a4e45a03 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 22 Mar 2024 17:38:06 +0000 Subject: [PATCH 0028/2361] rm more --- programs/keeper/Keeper.cpp | 2 +- src/IO/HTTPCommon.cpp | 1 - src/Server/HTTP/HTTPServer.cpp | 3 +-- src/Server/HTTPHandlerFactory.cpp | 10 ++++------ src/Server/HTTPHandlerFactory.h | 8 ++------ src/Server/PrometheusRequestHandler.cpp | 13 +++---------- src/Server/PrometheusRequestHandler.h | 2 +- src/Server/WebUIRequestHandler.cpp | 14 +++++++------- 8 files changed, 19 insertions(+), 34 deletions(-) diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index a558ed64bf9..238964fb25e 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -507,7 +507,7 @@ try "Prometheus: http://" + address.toString(), std::make_unique( std::move(my_http_context), - createPrometheusMainHandlerFactory(*this, config_getter(), metrics_writer, "PrometheusHandler-factory"), + createPrometheusMainHandlerFactory(config_getter(), metrics_writer, "PrometheusHandler-factory"), server_pool, socket, http_params)); diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 2b3f7f062bc..56226941228 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -1,4 +1,3 @@ -#include #include #include diff --git a/src/Server/HTTP/HTTPServer.cpp b/src/Server/HTTP/HTTPServer.cpp index 9b8feae3e26..90bdebf6451 100644 --- a/src/Server/HTTP/HTTPServer.cpp +++ b/src/Server/HTTP/HTTPServer.cpp @@ -13,8 +13,7 @@ HTTPServer::HTTPServer( Poco::Net::HTTPServerParams::Ptr params, const ProfileEvents::Event & read_event, const ProfileEvents::Event & write_event) - : TCPServer(new HTTPServerConnectionFactory(context, params, factory_, read_event, write_event), thread_pool, socket_, params) - , factory(factory_) + : TCPServer(new HTTPServerConnectionFactory(context, params, factory_, read_event, write_event), thread_pool, socket_, params), factory(factory_) { } diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 9a67e576345..23d4c081d2d 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -123,7 +123,7 @@ static inline auto createHandlersFactoryFromConfig( } else if (handler_type == "prometheus") { - main_handler_factory->addHandler(createPrometheusHandlerFactory(server, config, async_metrics, prefix + "." + key)); + main_handler_factory->addHandler(createPrometheusHandlerFactory(config, async_metrics, prefix + "." + key)); } else if (handler_type == "replicas_status") { @@ -202,7 +202,7 @@ HTTPRequestHandlerFactoryPtr createHandlerFactory(IServer & server, const Poco:: else if (name == "PrometheusHandler-factory") { auto metrics_writer = std::make_shared(config, "prometheus", async_metrics); - return createPrometheusMainHandlerFactory(server, config, metrics_writer, name); + return createPrometheusMainHandlerFactory(config, metrics_writer, name); } throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown HTTP handler factory name."); @@ -294,10 +294,8 @@ void addDefaultHandlersFactory( if (config.has("prometheus") && config.getInt("prometheus.port", 0) == 0) { auto writer = std::make_shared(config, "prometheus", async_metrics); - auto creator = [&server, writer] () -> std::unique_ptr - { - return std::make_unique(server, writer); - }; + auto creator + = [writer]() -> std::unique_ptr { return std::make_unique(writer); }; auto prometheus_handler = std::make_shared>(std::move(creator)); prometheus_handler->attachStrictPath(config.getString("prometheus.endpoint", "/metrics")); prometheus_handler->allowGetAndHeadRequest(); diff --git a/src/Server/HTTPHandlerFactory.h b/src/Server/HTTPHandlerFactory.h index ac18c36e6c9..5c1a12d9e06 100644 --- a/src/Server/HTTPHandlerFactory.h +++ b/src/Server/HTTPHandlerFactory.h @@ -126,14 +126,10 @@ HTTPRequestHandlerFactoryPtr createReplicasStatusHandlerFactory(IServer & server const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix); -HTTPRequestHandlerFactoryPtr -createPrometheusHandlerFactory(IServer & server, - const Poco::Util::AbstractConfiguration & config, - AsynchronousMetrics & async_metrics, - const std::string & config_prefix); +HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactory( + const Poco::Util::AbstractConfiguration & config, AsynchronousMetrics & async_metrics, const std::string & config_prefix); HTTPRequestHandlerFactoryPtr createPrometheusMainHandlerFactory( - IServer & server, const Poco::Util::AbstractConfiguration & config, PrometheusMetricsWriterPtr metrics_writer, const std::string & name); diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 0ad5f907467..1a04311116f 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -44,16 +44,12 @@ void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPSe } HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactory( - IServer & server, const Poco::Util::AbstractConfiguration & config, AsynchronousMetrics & async_metrics, const std::string & config_prefix) { auto writer = std::make_shared(config, config_prefix + ".handler", async_metrics); - auto creator = [&server, writer]() -> std::unique_ptr - { - return std::make_unique(server, writer); - }; + auto creator = [writer]() -> std::unique_ptr { return std::make_unique(writer); }; auto factory = std::make_shared>(std::move(creator)); factory->addFiltersFromConfig(config, config_prefix); @@ -61,13 +57,10 @@ HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactory( } HTTPRequestHandlerFactoryPtr createPrometheusMainHandlerFactory( - IServer & server, const Poco::Util::AbstractConfiguration & config, PrometheusMetricsWriterPtr metrics_writer, const std::string & name) + const Poco::Util::AbstractConfiguration & config, PrometheusMetricsWriterPtr metrics_writer, const std::string & name) { auto factory = std::make_shared(name); - auto creator = [&server, metrics_writer] - { - return std::make_unique(server, metrics_writer); - }; + auto creator = [metrics_writer] { return std::make_unique(metrics_writer); }; auto handler = std::make_shared>(std::move(creator)); handler->attachStrictPath(config.getString("prometheus.endpoint", "/metrics")); diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index cc7848d1dd0..7f4d3c14f62 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -15,7 +15,7 @@ private: PrometheusMetricsWriterPtr metrics_writer; public: - PrometheusRequestHandler(IServer &, PrometheusMetricsWriterPtr metrics_writer_) : metrics_writer(std::move(metrics_writer_)) { } + PrometheusRequestHandler(PrometheusMetricsWriterPtr metrics_writer_) : metrics_writer(std::move(metrics_writer_)) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; }; diff --git a/src/Server/WebUIRequestHandler.cpp b/src/Server/WebUIRequestHandler.cpp index faad9d57519..e43412550f9 100644 --- a/src/Server/WebUIRequestHandler.cpp +++ b/src/Server/WebUIRequestHandler.cpp @@ -29,7 +29,7 @@ DashboardWebUIRequestHandler::DashboardWebUIRequestHandler(IServer & server_) : BinaryWebUIRequestHandler::BinaryWebUIRequestHandler(IServer & server_) : server(server_) {} JavaScriptWebUIRequestHandler::JavaScriptWebUIRequestHandler(IServer & server_) : server(server_) {} -static void handle(const IServer &, HTTPServerRequest & request, HTTPServerResponse & response, std::string_view html) +static void handle(HTTPServerRequest & request, HTTPServerResponse & response, std::string_view html) { response.setContentType("text/html; charset=UTF-8"); if (request.getVersion() == HTTPServerRequest::HTTP_1_1) @@ -42,7 +42,7 @@ static void handle(const IServer &, HTTPServerRequest & request, HTTPServerRespo void PlayWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) { - handle(server, request, response, {reinterpret_cast(gresource_play_htmlData), gresource_play_htmlSize}); + handle(request, response, {reinterpret_cast(gresource_play_htmlData), gresource_play_htmlSize}); } void DashboardWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) @@ -60,23 +60,23 @@ void DashboardWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HT static re2::RE2 lz_string_url = R"(https://[^\s"'`]+lz-string[^\s"'`]*\.js)"; RE2::Replace(&html, lz_string_url, "/js/lz-string.js"); - handle(server, request, response, html); + handle(request, response, html); } void BinaryWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) { - handle(server, request, response, {reinterpret_cast(gresource_binary_htmlData), gresource_binary_htmlSize}); + handle(request, response, {reinterpret_cast(gresource_binary_htmlData), gresource_binary_htmlSize}); } void JavaScriptWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) { if (request.getURI() == "/js/uplot.js") { - handle(server, request, response, {reinterpret_cast(gresource_uplot_jsData), gresource_uplot_jsSize}); + handle(request, response, {reinterpret_cast(gresource_uplot_jsData), gresource_uplot_jsSize}); } else if (request.getURI() == "/js/lz-string.js") { - handle(server, request, response, {reinterpret_cast(gresource_lz_string_jsData), gresource_lz_string_jsSize}); + handle(request, response, {reinterpret_cast(gresource_lz_string_jsData), gresource_lz_string_jsSize}); } else { @@ -84,7 +84,7 @@ void JavaScriptWebUIRequestHandler::handleRequest(HTTPServerRequest & request, H *response.send() << "Not found.\n"; } - handle(server, request, response, {reinterpret_cast(gresource_binary_htmlData), gresource_binary_htmlSize}); + handle(request, response, {reinterpret_cast(gresource_binary_htmlData), gresource_binary_htmlSize}); } } From bd04fc5346d83e8450fa98578e325923a609abda Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 22 Mar 2024 17:41:53 +0000 Subject: [PATCH 0029/2361] rename test --- tests/integration/test_server_keep_alive/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_server_keep_alive/test.py b/tests/integration/test_server_keep_alive/test.py index 0f88fe47673..96f08a37adb 100644 --- a/tests/integration/test_server_keep_alive/test.py +++ b/tests/integration/test_server_keep_alive/test.py @@ -20,7 +20,7 @@ def start_cluster(): cluster.shutdown() -def test_requests_with_keep_alive(start_cluster): +def test_max_keep_alive_requests_on_user_side(start_cluster): # In this test we have `keep_alive_timeout` set to one hour to never trigger connection reset by timeout, `max_keep_alive_requests` is set to 5. # We expect server to close connection after each 5 requests. We detect connection reset by change in src port. # So the first 5 requests should come from the same port, the following 5 requests should come from another port. From c153fae0b8377aeca5e636a7ad0370e6ada42688 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 22 Mar 2024 19:10:06 +0000 Subject: [PATCH 0030/2361] add docs --- .../server-configuration-parameters/settings.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 07c9a2b88ab..0efb5a9e6e4 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1298,6 +1298,16 @@ The number of seconds that ClickHouse waits for incoming requests before closing 10 ``` +## max_keep_alive_requests {#max-keep-alive-requests} + +Maximal number of requests through a single keep-alive connection until it will be closed by ClickHouse server. Default to 10000. + +**Example** + +``` xml +10 +``` + ## listen_host {#listen_host} Restriction on hosts that requests can come from. If you want the server to answer all of them, specify `::`. From b93f483a0e2f312d50685fd499d2f52717b83925 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 22 Mar 2024 20:07:12 +0000 Subject: [PATCH 0031/2361] fix build --- programs/library-bridge/LibraryBridge.cpp | 2 +- .../LibraryBridgeHandlerFactory.cpp | 10 ++++------ .../LibraryBridgeHandlerFactory.h | 2 -- .../library-bridge/LibraryBridgeHandlers.cpp | 17 ++++++----------- programs/library-bridge/LibraryBridgeHandlers.h | 6 ++---- programs/odbc-bridge/ColumnInfoHandler.cpp | 5 +---- programs/odbc-bridge/ColumnInfoHandler.h | 8 +------- programs/odbc-bridge/IdentifierQuoteHandler.cpp | 2 +- programs/odbc-bridge/IdentifierQuoteHandler.h | 8 +------- programs/odbc-bridge/MainHandler.cpp | 2 +- programs/odbc-bridge/MainHandler.h | 3 --- programs/odbc-bridge/ODBCHandlerFactory.cpp | 10 +++++----- programs/odbc-bridge/SchemaAllowedHandler.cpp | 2 +- programs/odbc-bridge/SchemaAllowedHandler.h | 8 +------- 14 files changed, 25 insertions(+), 60 deletions(-) diff --git a/programs/library-bridge/LibraryBridge.cpp b/programs/library-bridge/LibraryBridge.cpp index 8a07ca57104..f86e469a307 100644 --- a/programs/library-bridge/LibraryBridge.cpp +++ b/programs/library-bridge/LibraryBridge.cpp @@ -25,7 +25,7 @@ std::string LibraryBridge::bridgeName() const LibraryBridge::HandlerFactoryPtr LibraryBridge::getHandlerFactoryPtr(ContextPtr context) const { - return std::make_shared("LibraryRequestHandlerFactory", keep_alive_timeout, context); + return std::make_shared("LibraryRequestHandlerFactory", context); } } diff --git a/programs/library-bridge/LibraryBridgeHandlerFactory.cpp b/programs/library-bridge/LibraryBridgeHandlerFactory.cpp index e5ab22f2d40..234904c6265 100644 --- a/programs/library-bridge/LibraryBridgeHandlerFactory.cpp +++ b/programs/library-bridge/LibraryBridgeHandlerFactory.cpp @@ -9,12 +9,10 @@ namespace DB { LibraryBridgeHandlerFactory::LibraryBridgeHandlerFactory( const std::string & name_, - size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , log(getLogger(name_)) , name(name_) - , keep_alive_timeout(keep_alive_timeout_) { } @@ -26,17 +24,17 @@ std::unique_ptr LibraryBridgeHandlerFactory::createRequestHa if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET) { if (uri.getPath() == "/extdict_ping") - return std::make_unique(keep_alive_timeout, getContext()); + return std::make_unique(getContext()); else if (uri.getPath() == "/catboost_ping") - return std::make_unique(keep_alive_timeout, getContext()); + return std::make_unique(getContext()); } if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST) { if (uri.getPath() == "/extdict_request") - return std::make_unique(keep_alive_timeout, getContext()); + return std::make_unique(getContext()); else if (uri.getPath() == "/catboost_request") - return std::make_unique(keep_alive_timeout, getContext()); + return std::make_unique(getContext()); } return nullptr; diff --git a/programs/library-bridge/LibraryBridgeHandlerFactory.h b/programs/library-bridge/LibraryBridgeHandlerFactory.h index 5b0f088bc29..c65394efa3b 100644 --- a/programs/library-bridge/LibraryBridgeHandlerFactory.h +++ b/programs/library-bridge/LibraryBridgeHandlerFactory.h @@ -13,7 +13,6 @@ class LibraryBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContex public: LibraryBridgeHandlerFactory( const std::string & name_, - size_t keep_alive_timeout_, ContextPtr context_); std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; @@ -21,7 +20,6 @@ public: private: LoggerPtr log; const std::string name; - const size_t keep_alive_timeout; }; } diff --git a/programs/library-bridge/LibraryBridgeHandlers.cpp b/programs/library-bridge/LibraryBridgeHandlers.cpp index 094cef6716d..bd8faf76188 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.cpp +++ b/programs/library-bridge/LibraryBridgeHandlers.cpp @@ -86,10 +86,8 @@ static void writeData(Block data, OutputFormatPtr format) } -ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_) - : WithContext(context_) - , keep_alive_timeout(keep_alive_timeout_) - , log(getLogger("ExternalDictionaryLibraryBridgeRequestHandler")) +ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(ContextPtr context_) + : WithContext(context_), log(getLogger("ExternalDictionaryLibraryBridgeRequestHandler")) { } @@ -136,7 +134,7 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ const String & dictionary_id = params.get("dictionary_id"); LOG_TRACE(log, "Library method: '{}', dictionary id: {}", method, dictionary_id); - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD); try { @@ -410,11 +408,8 @@ void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerReque } -CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler( - size_t keep_alive_timeout_, ContextPtr context_) - : WithContext(context_) - , keep_alive_timeout(keep_alive_timeout_) - , log(getLogger("CatBoostLibraryBridgeRequestHandler")) +CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler(ContextPtr context_) + : WithContext(context_), log(getLogger("CatBoostLibraryBridgeRequestHandler")) { } @@ -453,7 +448,7 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ const String & method = params.get("method"); LOG_TRACE(log, "Library method: '{}'", method); - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD); try { diff --git a/programs/library-bridge/LibraryBridgeHandlers.h b/programs/library-bridge/LibraryBridgeHandlers.h index 83bca24ce1f..70e3c9c78da 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.h +++ b/programs/library-bridge/LibraryBridgeHandlers.h @@ -18,14 +18,13 @@ namespace DB class ExternalDictionaryLibraryBridgeRequestHandler : public HTTPRequestHandler, WithContext { public: - ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_); + ExternalDictionaryLibraryBridgeRequestHandler(ContextPtr context_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: static constexpr inline auto FORMAT = "RowBinary"; - const size_t keep_alive_timeout; LoggerPtr log; }; @@ -62,12 +61,11 @@ private: class CatBoostLibraryBridgeRequestHandler : public HTTPRequestHandler, WithContext { public: - CatBoostLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_); + CatBoostLibraryBridgeRequestHandler(ContextPtr context_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: - const size_t keep_alive_timeout; LoggerPtr log; }; diff --git a/programs/odbc-bridge/ColumnInfoHandler.cpp b/programs/odbc-bridge/ColumnInfoHandler.cpp index 4cb15de3b2c..438062e8169 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.cpp +++ b/programs/odbc-bridge/ColumnInfoHandler.cpp @@ -200,10 +200,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ if (columns.empty()) throw Exception(ErrorCodes::UNKNOWN_TABLE, "Columns definition was not returned"); - WriteBufferFromHTTPServerResponse out( - response, - request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, - keep_alive_timeout); + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD); try { writeStringBinary(columns.toString(), out); diff --git a/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h index ca7044fdf32..f16e09ec3f9 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.h +++ b/programs/odbc-bridge/ColumnInfoHandler.h @@ -16,18 +16,12 @@ namespace DB class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext { public: - ODBCColumnsInfoHandler(size_t keep_alive_timeout_, ContextPtr context_) - : WithContext(context_) - , log(getLogger("ODBCColumnsInfoHandler")) - , keep_alive_timeout(keep_alive_timeout_) - { - } + ODBCColumnsInfoHandler(ContextPtr context_) : WithContext(context_), log(getLogger("ODBCColumnsInfoHandler")) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: LoggerPtr log; - size_t keep_alive_timeout; }; } diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.cpp b/programs/odbc-bridge/IdentifierQuoteHandler.cpp index cf5acdc4534..0bd1e8758cd 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.cpp +++ b/programs/odbc-bridge/IdentifierQuoteHandler.cpp @@ -73,7 +73,7 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ auto identifier = getIdentifierQuote(std::move(connection)); - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD); try { writeStringBinary(identifier, out); diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h index 7b78c5b4b93..c0e07795ea5 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.h +++ b/programs/odbc-bridge/IdentifierQuoteHandler.h @@ -14,18 +14,12 @@ namespace DB class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext { public: - IdentifierQuoteHandler(size_t keep_alive_timeout_, ContextPtr context_) - : WithContext(context_) - , log(getLogger("IdentifierQuoteHandler")) - , keep_alive_timeout(keep_alive_timeout_) - { - } + IdentifierQuoteHandler(ContextPtr context_) : WithContext(context_), log(getLogger("IdentifierQuoteHandler")) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: LoggerPtr log; - size_t keep_alive_timeout; }; } diff --git a/programs/odbc-bridge/MainHandler.cpp b/programs/odbc-bridge/MainHandler.cpp index e350afa2b10..b086397446e 100644 --- a/programs/odbc-bridge/MainHandler.cpp +++ b/programs/odbc-bridge/MainHandler.cpp @@ -131,7 +131,7 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse return; } - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD); try { diff --git a/programs/odbc-bridge/MainHandler.h b/programs/odbc-bridge/MainHandler.h index ed0c6b2e28c..0fcad61d274 100644 --- a/programs/odbc-bridge/MainHandler.h +++ b/programs/odbc-bridge/MainHandler.h @@ -20,12 +20,10 @@ class ODBCHandler : public HTTPRequestHandler, WithContext { public: ODBCHandler( - size_t keep_alive_timeout_, ContextPtr context_, const String & mode_) : WithContext(context_) , log(getLogger("ODBCHandler")) - , keep_alive_timeout(keep_alive_timeout_) , mode(mode_) { } @@ -35,7 +33,6 @@ public: private: LoggerPtr log; - size_t keep_alive_timeout; String mode; static inline std::mutex mutex; diff --git a/programs/odbc-bridge/ODBCHandlerFactory.cpp b/programs/odbc-bridge/ODBCHandlerFactory.cpp index eebb0c24c7a..7f095666447 100644 --- a/programs/odbc-bridge/ODBCHandlerFactory.cpp +++ b/programs/odbc-bridge/ODBCHandlerFactory.cpp @@ -30,26 +30,26 @@ std::unique_ptr ODBCBridgeHandlerFactory::createRequestHandl if (uri.getPath() == "/columns_info") #if USE_ODBC - return std::make_unique(keep_alive_timeout, getContext()); + return std::make_unique(getContext()); #else return nullptr; #endif else if (uri.getPath() == "/identifier_quote") #if USE_ODBC - return std::make_unique(keep_alive_timeout, getContext()); + return std::make_unique(getContext()); #else return nullptr; #endif else if (uri.getPath() == "/schema_allowed") #if USE_ODBC - return std::make_unique(keep_alive_timeout, getContext()); + return std::make_unique(getContext()); #else return nullptr; #endif else if (uri.getPath() == "/write") - return std::make_unique(keep_alive_timeout, getContext(), "write"); + return std::make_unique(getContext(), "write"); else - return std::make_unique(keep_alive_timeout, getContext(), "read"); + return std::make_unique(getContext(), "read"); } return nullptr; } diff --git a/programs/odbc-bridge/SchemaAllowedHandler.cpp b/programs/odbc-bridge/SchemaAllowedHandler.cpp index c7025ca4311..5dc0cb3aa2b 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.cpp +++ b/programs/odbc-bridge/SchemaAllowedHandler.cpp @@ -86,7 +86,7 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer bool result = isSchemaAllowed(std::move(connection)); - WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout); + WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD); try { writeBoolText(result, out); diff --git a/programs/odbc-bridge/SchemaAllowedHandler.h b/programs/odbc-bridge/SchemaAllowedHandler.h index 8dc725dbb33..e73c0a2cb26 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.h +++ b/programs/odbc-bridge/SchemaAllowedHandler.h @@ -17,18 +17,12 @@ class Context; class SchemaAllowedHandler : public HTTPRequestHandler, WithContext { public: - SchemaAllowedHandler(size_t keep_alive_timeout_, ContextPtr context_) - : WithContext(context_) - , log(getLogger("SchemaAllowedHandler")) - , keep_alive_timeout(keep_alive_timeout_) - { - } + SchemaAllowedHandler(ContextPtr context_) : WithContext(context_), log(getLogger("SchemaAllowedHandler")) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: LoggerPtr log; - size_t keep_alive_timeout; }; } From c33511dcb9314da6b64b11cf21d231c9d3896dad Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 22 Mar 2024 20:24:53 +0000 Subject: [PATCH 0032/2361] remove more --- programs/odbc-bridge/ODBCBridge.cpp | 2 +- programs/odbc-bridge/ODBCHandlerFactory.cpp | 9 +++------ programs/odbc-bridge/ODBCHandlerFactory.h | 3 +-- programs/odbc-bridge/PingHandler.h | 4 ---- 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/programs/odbc-bridge/ODBCBridge.cpp b/programs/odbc-bridge/ODBCBridge.cpp index e91cc3158df..2cde5bbf9f5 100644 --- a/programs/odbc-bridge/ODBCBridge.cpp +++ b/programs/odbc-bridge/ODBCBridge.cpp @@ -25,7 +25,7 @@ std::string ODBCBridge::bridgeName() const ODBCBridge::HandlerFactoryPtr ODBCBridge::getHandlerFactoryPtr(ContextPtr context) const { - return std::make_shared("ODBCRequestHandlerFactory-factory", keep_alive_timeout, context); + return std::make_shared("ODBCRequestHandlerFactory-factory", context); } } diff --git a/programs/odbc-bridge/ODBCHandlerFactory.cpp b/programs/odbc-bridge/ODBCHandlerFactory.cpp index 7f095666447..b5d0be908f4 100644 --- a/programs/odbc-bridge/ODBCHandlerFactory.cpp +++ b/programs/odbc-bridge/ODBCHandlerFactory.cpp @@ -9,11 +9,8 @@ namespace DB { -ODBCBridgeHandlerFactory::ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_) - : WithContext(context_) - , log(getLogger(name_)) - , name(name_) - , keep_alive_timeout(keep_alive_timeout_) +ODBCBridgeHandlerFactory::ODBCBridgeHandlerFactory(const std::string & name_, ContextPtr context_) + : WithContext(context_), log(getLogger(name_)), name(name_) { } @@ -23,7 +20,7 @@ std::unique_ptr ODBCBridgeHandlerFactory::createRequestHandl LOG_TRACE(log, "Request URI: {}", uri.toString()); if (uri.getPath() == "/ping" && request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET) - return std::make_unique(keep_alive_timeout); + return std::make_unique(); if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST) { diff --git a/programs/odbc-bridge/ODBCHandlerFactory.h b/programs/odbc-bridge/ODBCHandlerFactory.h index 4aaf1b55453..f4a2717dc9f 100644 --- a/programs/odbc-bridge/ODBCHandlerFactory.h +++ b/programs/odbc-bridge/ODBCHandlerFactory.h @@ -17,14 +17,13 @@ namespace DB class ODBCBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContext { public: - ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_); + ODBCBridgeHandlerFactory(const std::string & name_, ContextPtr context_); std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; private: LoggerPtr log; std::string name; - size_t keep_alive_timeout; }; } diff --git a/programs/odbc-bridge/PingHandler.h b/programs/odbc-bridge/PingHandler.h index c5447107e0c..4c557bd3cf6 100644 --- a/programs/odbc-bridge/PingHandler.h +++ b/programs/odbc-bridge/PingHandler.h @@ -9,11 +9,7 @@ namespace DB class PingHandler : public HTTPRequestHandler { public: - explicit PingHandler(size_t keep_alive_timeout_) : keep_alive_timeout(keep_alive_timeout_) {} void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; - -private: - size_t keep_alive_timeout; }; } From 1115fa4bc74d840e8fc3230908310cb7311dd0d0 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 25 Mar 2024 15:13:13 +0000 Subject: [PATCH 0033/2361] fix tidy --- src/Server/PrometheusRequestHandler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index 7f4d3c14f62..a1bd18b394a 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -15,7 +15,7 @@ private: PrometheusMetricsWriterPtr metrics_writer; public: - PrometheusRequestHandler(PrometheusMetricsWriterPtr metrics_writer_) : metrics_writer(std::move(metrics_writer_)) { } + explicit PrometheusRequestHandler(PrometheusMetricsWriterPtr metrics_writer_) : metrics_writer(std::move(metrics_writer_)) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; }; From 1cfbc548bb415e89e294a22da0eea59302269c37 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 25 Mar 2024 17:28:51 +0100 Subject: [PATCH 0034/2361] Fix copy-paste Co-authored-by: Michael Lex --- src/Core/ServerSettings.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index 7480d94e81d..4a22082cdda 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -113,7 +113,7 @@ namespace DB M(Bool, async_load_databases, false, "Enable asynchronous loading of databases and tables to speedup server startup. Queries to not yet loaded entity will be blocked until load is finished.", 0) \ M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0) \ M(Seconds, keep_alive_timeout, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \ - M(UInt64, max_keep_alive_requests, 10000, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \ + M(UInt64, max_keep_alive_requests, 10000, "The maximum number of requests handled via a single http keepalive connection before the server closes this connection.", 0) \ M(Seconds, replicated_fetches_http_connection_timeout, 0, "HTTP connection timeout for part fetch requests. Inherited from default profile `http_connection_timeout` if not set explicitly.", 0) \ M(Seconds, replicated_fetches_http_send_timeout, 0, "HTTP send timeout for part fetch requests. Inherited from default profile `http_send_timeout` if not set explicitly.", 0) \ M(Seconds, replicated_fetches_http_receive_timeout, 0, "HTTP receive timeout for fetch part requests. Inherited from default profile `http_receive_timeout` if not set explicitly.", 0) \ From 3c2915934f31d82176b65e1e998eca030671d872 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 26 Mar 2024 04:29:34 +0000 Subject: [PATCH 0035/2361] update fuzzers --- ..._function_state_deserialization_fuzzer.cpp | 23 +++++++++++++++++++ src/Client/ClientBase.cpp | 2 +- src/Core/fuzzers/names_and_types_fuzzer.cpp | 22 ++++++++++++++++++ .../data_type_deserialization_fuzzer.cpp | 22 ++++++++++++++++++ src/Formats/fuzzers/format_fuzzer.cpp | 20 ++++++++++++++++ .../fuzzers/codegen_fuzzer/CMakeLists.txt | 2 +- .../codegen_fuzzer/codegen_select_fuzzer.cpp | 2 +- src/Parsers/fuzzers/create_parser_fuzzer.cpp | 2 +- .../fuzzers/columns_description_fuzzer.cpp | 22 ++++++++++++++++++ 9 files changed, 113 insertions(+), 4 deletions(-) diff --git a/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp b/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp index 425364efb9c..9d490432c60 100644 --- a/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp +++ b/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp @@ -12,10 +12,33 @@ #include +#include + #include #include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +class IFunctionBase; +using FunctionBasePtr = std::shared_ptr; + +FunctionBasePtr createFunctionBaseCast( + ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); +} + +} + + extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 767a9b2b9f9..bdee2233b27 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -2729,7 +2729,7 @@ void ClientBase::runLibFuzzer() for (auto & arg : fuzzer_args_holder) fuzzer_args.emplace_back(arg.data()); - int fuzzer_argc = fuzzer_args.size(); + int fuzzer_argc = static_cast(fuzzer_args.size()); char ** fuzzer_argv = fuzzer_args.data(); LLVMFuzzerRunDriver(&fuzzer_argc, &fuzzer_argv, [](const uint8_t * data, size_t size) diff --git a/src/Core/fuzzers/names_and_types_fuzzer.cpp b/src/Core/fuzzers/names_and_types_fuzzer.cpp index 6fdd8703014..bc8cb7af61f 100644 --- a/src/Core/fuzzers/names_and_types_fuzzer.cpp +++ b/src/Core/fuzzers/names_and_types_fuzzer.cpp @@ -1,7 +1,29 @@ +#include +#include #include #include +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +class IFunctionBase; +using FunctionBasePtr = std::shared_ptr; + +FunctionBasePtr createFunctionBaseCast( + ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); +} + +} + + extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try diff --git a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp index 0ae325871fb..f1b03147929 100644 --- a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp +++ b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp @@ -8,11 +8,33 @@ #include #include +#include + #include #include +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +class IFunctionBase; +using FunctionBasePtr = std::shared_ptr; + +FunctionBasePtr createFunctionBaseCast( + ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); +} + +} + + extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try diff --git a/src/Formats/fuzzers/format_fuzzer.cpp b/src/Formats/fuzzers/format_fuzzer.cpp index 46661e4828c..4426301b6e7 100644 --- a/src/Formats/fuzzers/format_fuzzer.cpp +++ b/src/Formats/fuzzers/format_fuzzer.cpp @@ -21,6 +21,26 @@ #include +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +class IFunctionBase; +using FunctionBasePtr = std::shared_ptr; + +FunctionBasePtr createFunctionBaseCast( + ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); +} + +} + + extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try diff --git a/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt b/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt index 20fd951d390..74fdcff79f7 100644 --- a/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt +++ b/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt @@ -39,7 +39,7 @@ set(CMAKE_INCLUDE_CURRENT_DIR TRUE) clickhouse_add_executable(codegen_select_fuzzer ${FUZZER_SRCS}) -set_source_files_properties("${PROTO_SRCS}" "out.cpp" PROPERTIES COMPILE_FLAGS "-Wno-reserved-identifier") +set_source_files_properties("${PROTO_SRCS}" "out.cpp" PROPERTIES COMPILE_FLAGS "-Wno-reserved-identifier -Wno-extra-semi-stmt -Wno-used-but-marked-unused") # contrib/libprotobuf-mutator/src/libfuzzer/libfuzzer_macro.h:143:44: error: no newline at end of file [-Werror,-Wnewline-eof] target_compile_options (codegen_select_fuzzer PRIVATE -Wno-newline-eof) diff --git a/src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp b/src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp index 9310d7d59f7..55daa370651 100644 --- a/src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp +++ b/src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp @@ -27,7 +27,7 @@ DEFINE_BINARY_PROTO_FUZZER(const Sentence& main) DB::ParserQueryWithOutput parser(input.data() + input.size()); try { - DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); + DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); DB::WriteBufferFromOStream out(std::cerr, 4096); DB::formatAST(*ast, out); diff --git a/src/Parsers/fuzzers/create_parser_fuzzer.cpp b/src/Parsers/fuzzers/create_parser_fuzzer.cpp index 854885ad33b..1d5c3e27232 100644 --- a/src/Parsers/fuzzers/create_parser_fuzzer.cpp +++ b/src/Parsers/fuzzers/create_parser_fuzzer.cpp @@ -14,7 +14,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) std::string input = std::string(reinterpret_cast(data), size); DB::ParserCreateQuery parser; - DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000); + DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000, 0); const UInt64 max_ast_depth = 1000; ast->checkDepth(max_ast_depth); diff --git a/src/Storages/fuzzers/columns_description_fuzzer.cpp b/src/Storages/fuzzers/columns_description_fuzzer.cpp index b703a1e7051..cb0c6168225 100644 --- a/src/Storages/fuzzers/columns_description_fuzzer.cpp +++ b/src/Storages/fuzzers/columns_description_fuzzer.cpp @@ -1,4 +1,26 @@ +#include #include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +class IFunctionBase; +using FunctionBasePtr = std::shared_ptr; + +FunctionBasePtr createFunctionBaseCast( + ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); +} + +} extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) From cb40fd7d0c9096a8df8d5e7c2e9924f66d51e061 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 26 Mar 2024 15:27:01 +0000 Subject: [PATCH 0036/2361] minor fixes --- .../fuzzers/aggregate_function_state_deserialization_fuzzer.cpp | 2 +- src/Core/fuzzers/names_and_types_fuzzer.cpp | 2 +- src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp | 2 +- src/Formats/fuzzers/format_fuzzer.cpp | 2 +- src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp | 2 +- src/Parsers/fuzzers/create_parser_fuzzer.cpp | 2 +- src/Storages/fuzzers/columns_description_fuzzer.cpp | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp b/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp index 9d490432c60..a956d9906bc 100644 --- a/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp +++ b/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp @@ -33,7 +33,7 @@ using FunctionBasePtr = std::shared_ptr; FunctionBasePtr createFunctionBaseCast( ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for aggregate_function_state_deserialization_fuzzer"); } } diff --git a/src/Core/fuzzers/names_and_types_fuzzer.cpp b/src/Core/fuzzers/names_and_types_fuzzer.cpp index bc8cb7af61f..74debedf2a3 100644 --- a/src/Core/fuzzers/names_and_types_fuzzer.cpp +++ b/src/Core/fuzzers/names_and_types_fuzzer.cpp @@ -18,7 +18,7 @@ using FunctionBasePtr = std::shared_ptr; FunctionBasePtr createFunctionBaseCast( ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for names_and_types_fuzzer"); } } diff --git a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp index f1b03147929..7d9a0513d18 100644 --- a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp +++ b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp @@ -29,7 +29,7 @@ using FunctionBasePtr = std::shared_ptr; FunctionBasePtr createFunctionBaseCast( ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for data_type_deserialization_fuzzer"); } } diff --git a/src/Formats/fuzzers/format_fuzzer.cpp b/src/Formats/fuzzers/format_fuzzer.cpp index 4426301b6e7..2c1ec65e54d 100644 --- a/src/Formats/fuzzers/format_fuzzer.cpp +++ b/src/Formats/fuzzers/format_fuzzer.cpp @@ -35,7 +35,7 @@ using FunctionBasePtr = std::shared_ptr; FunctionBasePtr createFunctionBaseCast( ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for format_fuzzer"); } } diff --git a/src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp b/src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp index 55daa370651..6b25b581532 100644 --- a/src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp +++ b/src/Parsers/fuzzers/codegen_fuzzer/codegen_select_fuzzer.cpp @@ -27,7 +27,7 @@ DEFINE_BINARY_PROTO_FUZZER(const Sentence& main) DB::ParserQueryWithOutput parser(input.data() + input.size()); try { - DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, 0); + DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, DB::DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); DB::WriteBufferFromOStream out(std::cerr, 4096); DB::formatAST(*ast, out); diff --git a/src/Parsers/fuzzers/create_parser_fuzzer.cpp b/src/Parsers/fuzzers/create_parser_fuzzer.cpp index 1d5c3e27232..bab8db5671d 100644 --- a/src/Parsers/fuzzers/create_parser_fuzzer.cpp +++ b/src/Parsers/fuzzers/create_parser_fuzzer.cpp @@ -14,7 +14,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) std::string input = std::string(reinterpret_cast(data), size); DB::ParserCreateQuery parser; - DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000, 0); + DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000, DB::DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); const UInt64 max_ast_depth = 1000; ast->checkDepth(max_ast_depth); diff --git a/src/Storages/fuzzers/columns_description_fuzzer.cpp b/src/Storages/fuzzers/columns_description_fuzzer.cpp index cb0c6168225..ac285ea50f7 100644 --- a/src/Storages/fuzzers/columns_description_fuzzer.cpp +++ b/src/Storages/fuzzers/columns_description_fuzzer.cpp @@ -17,7 +17,7 @@ using FunctionBasePtr = std::shared_ptr; FunctionBasePtr createFunctionBaseCast( ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for Library Bridge"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for columns_description_fuzzer"); } } From 64e6c6a2fcf2f7017ec3749ad05eed2daeeb4b42 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 26 Mar 2024 22:31:40 +0000 Subject: [PATCH 0037/2361] fix tidy --- programs/library-bridge/LibraryBridgeHandlers.h | 8 ++++---- programs/odbc-bridge/ColumnInfoHandler.h | 2 +- programs/odbc-bridge/IdentifierQuoteHandler.h | 2 +- programs/odbc-bridge/SchemaAllowedHandler.h | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/programs/library-bridge/LibraryBridgeHandlers.h b/programs/library-bridge/LibraryBridgeHandlers.h index 70e3c9c78da..582619e174e 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.h +++ b/programs/library-bridge/LibraryBridgeHandlers.h @@ -18,7 +18,7 @@ namespace DB class ExternalDictionaryLibraryBridgeRequestHandler : public HTTPRequestHandler, WithContext { public: - ExternalDictionaryLibraryBridgeRequestHandler(ContextPtr context_); + explicit ExternalDictionaryLibraryBridgeRequestHandler(ContextPtr context_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; @@ -33,7 +33,7 @@ private: class ExternalDictionaryLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContext { public: - ExternalDictionaryLibraryBridgeExistsHandler(ContextPtr context_); + explicit ExternalDictionaryLibraryBridgeExistsHandler(ContextPtr context_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; @@ -61,7 +61,7 @@ private: class CatBoostLibraryBridgeRequestHandler : public HTTPRequestHandler, WithContext { public: - CatBoostLibraryBridgeRequestHandler(ContextPtr context_); + explicit CatBoostLibraryBridgeRequestHandler(ContextPtr context_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; @@ -74,7 +74,7 @@ private: class CatBoostLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContext { public: - CatBoostLibraryBridgeExistsHandler(ContextPtr context_); + explicit CatBoostLibraryBridgeExistsHandler(ContextPtr context_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; diff --git a/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h index f16e09ec3f9..bbbf0da218b 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.h +++ b/programs/odbc-bridge/ColumnInfoHandler.h @@ -16,7 +16,7 @@ namespace DB class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext { public: - ODBCColumnsInfoHandler(ContextPtr context_) : WithContext(context_), log(getLogger("ODBCColumnsInfoHandler")) { } + explicit ODBCColumnsInfoHandler(ContextPtr context_) : WithContext(context_), log(getLogger("ODBCColumnsInfoHandler")) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h index c0e07795ea5..a85b56a9f6a 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.h +++ b/programs/odbc-bridge/IdentifierQuoteHandler.h @@ -14,7 +14,7 @@ namespace DB class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext { public: - IdentifierQuoteHandler(ContextPtr context_) : WithContext(context_), log(getLogger("IdentifierQuoteHandler")) { } + explicit IdentifierQuoteHandler(ContextPtr context_) : WithContext(context_), log(getLogger("IdentifierQuoteHandler")) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; diff --git a/programs/odbc-bridge/SchemaAllowedHandler.h b/programs/odbc-bridge/SchemaAllowedHandler.h index e73c0a2cb26..59022151b53 100644 --- a/programs/odbc-bridge/SchemaAllowedHandler.h +++ b/programs/odbc-bridge/SchemaAllowedHandler.h @@ -17,7 +17,7 @@ class Context; class SchemaAllowedHandler : public HTTPRequestHandler, WithContext { public: - SchemaAllowedHandler(ContextPtr context_) : WithContext(context_), log(getLogger("SchemaAllowedHandler")) { } + explicit SchemaAllowedHandler(ContextPtr context_) : WithContext(context_), log(getLogger("SchemaAllowedHandler")) { } void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; From 8357bc7b1b2d48e808b63cc0aa6fb7c7aa36e98b Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 31 Mar 2024 23:33:35 +0000 Subject: [PATCH 0038/2361] fix build --- base/base/CMakeLists.txt | 2 +- cmake/sanitize.cmake | 2 +- programs/CMakeLists.txt | 2 +- src/CMakeLists.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/base/base/CMakeLists.txt b/base/base/CMakeLists.txt index 27aa0bd6baf..7b1da9ab4ad 100644 --- a/base/base/CMakeLists.txt +++ b/base/base/CMakeLists.txt @@ -1,4 +1,4 @@ -add_compile_options($<$,$>:${COVERAGE_FLAGS}>) +add_compile_options("$<$,$>:${COVERAGE_FLAGS}>") if (USE_CLANG_TIDY) set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index 9d53b2004b4..227d96357b5 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -64,7 +64,7 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat if (WITH_COVERAGE) message (STATUS "Enabled instrumentation for code coverage") - set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping") + set(COVERAGE_FLAGS -fprofile-instr-generate -fcoverage-mapping) endif() option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF) diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 0d91de2dad8..aa7781498c8 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -1,4 +1,4 @@ -add_compile_options($<$,$>:${COVERAGE_FLAGS}>) +add_compile_options("$<$,$>:${COVERAGE_FLAGS}>") if (USE_CLANG_TIDY) set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 73aa409e995..bd603c9f15e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -add_compile_options($<$,$>:${COVERAGE_FLAGS}>) +add_compile_options("$<$,$>:${COVERAGE_FLAGS}>") if (USE_INCLUDE_WHAT_YOU_USE) set (CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) From 99e25d762c2db3c544dd5590726fc039b1828d16 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 1 Apr 2024 12:28:51 +0000 Subject: [PATCH 0039/2361] remove WITH_COVERAGE for fuzzers build --- docker/packager/packager | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/packager/packager b/docker/packager/packager index 23fc26bc1a4..355149df38c 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -276,7 +276,6 @@ def parse_env_variables( elif package_type == "fuzzers": cmake_flags.append("-DENABLE_FUZZING=1") cmake_flags.append("-DENABLE_PROTOBUF=1") - cmake_flags.append("-DWITH_COVERAGE=1") # Reduce linking and building time by avoid *install/all dependencies cmake_flags.append("-DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON") From 8d667ad5a34d1ba3d9008a5a6308598483281b35 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 1 Apr 2024 22:55:51 +0000 Subject: [PATCH 0040/2361] fix build.sh --- docker/packager/binary-builder/build.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/packager/binary-builder/build.sh b/docker/packager/binary-builder/build.sh index 032aceb0af3..cbd14b1eac2 100755 --- a/docker/packager/binary-builder/build.sh +++ b/docker/packager/binary-builder/build.sh @@ -108,7 +108,8 @@ if [ -n "$MAKE_DEB" ]; then bash -x /build/packages/build fi -mv ./programs/clickhouse* /output || mv ./programs/*_fuzzer /output +mv ./programs/clickhouse* /output ||: +mv ./programs/*_fuzzer /output ||: [ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output [ -x ./programs/self-extracting/clickhouse-stripped ] && mv ./programs/self-extracting/clickhouse-stripped /output mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds From db3d923d4cae57254cadcef7f6997f3912d46515 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 3 Apr 2024 20:25:29 +0000 Subject: [PATCH 0041/2361] return WITH_COVERAGE, fix build --- cmake/sanitize.cmake | 3 ++- docker/packager/packager | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index 227d96357b5..9f4fa7081c6 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -64,7 +64,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat if (WITH_COVERAGE) message (STATUS "Enabled instrumentation for code coverage") - set(COVERAGE_FLAGS -fprofile-instr-generate -fcoverage-mapping) + set (COVERAGE_FLAGS -fprofile-instr-generate -fcoverage-mapping) + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping") endif() option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF) diff --git a/docker/packager/packager b/docker/packager/packager index 355149df38c..23fc26bc1a4 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -276,6 +276,7 @@ def parse_env_variables( elif package_type == "fuzzers": cmake_flags.append("-DENABLE_FUZZING=1") cmake_flags.append("-DENABLE_PROTOBUF=1") + cmake_flags.append("-DWITH_COVERAGE=1") # Reduce linking and building time by avoid *install/all dependencies cmake_flags.append("-DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON") From 56b03ee22a380722e94b83f0d2d3736cb16c2847 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 8 Apr 2024 15:28:20 +0000 Subject: [PATCH 0042/2361] Handle macros properly in create query --- src/Storages/Kafka/KafkaSettings.h | 2 +- src/Storages/Kafka/StorageKafkaCommon.cpp | 75 +++++++++++++++++++---- 2 files changed, 65 insertions(+), 12 deletions(-) diff --git a/src/Storages/Kafka/KafkaSettings.h b/src/Storages/Kafka/KafkaSettings.h index 705fc9f4826..c9ee42b54aa 100644 --- a/src/Storages/Kafka/KafkaSettings.h +++ b/src/Storages/Kafka/KafkaSettings.h @@ -38,8 +38,8 @@ const auto KAFKA_CONSUMERS_POOL_TTL_MS_MAX = 600'000; M(StreamingHandleErrorMode, kafka_handle_error_mode, StreamingHandleErrorMode::DEFAULT, "How to handle errors for Kafka engine. Possible values: default (throw an exception after rabbitmq_skip_broken_messages broken messages), stream (save broken messages and errors in virtual columns _raw_message, _error).", 0) \ M(Bool, kafka_commit_on_select, false, "Commit messages when select query is made", 0) \ M(UInt64, kafka_max_rows_per_message, 1, "The maximum number of rows produced in one kafka message for row-based formats.", 0) \ - /* TODO(antaljanosbenjamin): Probably this shouldn't be here, but only read as an argument */ \ M(String, kafka_keeper_path, "", "TODO(antaljanosbenjamin)", 0) \ + M(String, kafka_replica_name, "", "TODO(antaljanosbenjamin)", 0) \ #define OBSOLETE_KAFKA_SETTINGS(M, ALIAS) \ MAKE_OBSOLETE(M, Char, kafka_row_delimiter, '\0') \ diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index b0f23c38163..1319c871f4d 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -1,6 +1,7 @@ #include +#include #include #include #include @@ -303,7 +304,6 @@ void registerStorageKafka(StorageFactory & factory) CHECK_KAFKA_STORAGE_ARGUMENT(15, kafka_handle_error_mode, 0) CHECK_KAFKA_STORAGE_ARGUMENT(16, kafka_commit_on_select, 0) CHECK_KAFKA_STORAGE_ARGUMENT(17, kafka_max_rows_per_message, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(18, kafka_keeper_path, 0) } #undef CHECK_KAFKA_STORAGE_ARGUMENT @@ -357,20 +357,73 @@ void registerStorageKafka(StorageFactory & factory) "See https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/#configuration"); } - if (!kafka_settings->kafka_keeper_path.value.empty()) - { - if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper) + const auto has_keeper_path = kafka_settings->kafka_keeper_path.changed && !kafka_settings->kafka_keeper_path.value.empty(); + const auto has_replica_name = kafka_settings->kafka_replica_name.changed && !kafka_settings->kafka_replica_name.value.empty(); - throw Exception( - ErrorCodes::SUPPORT_IS_DISABLED, - "Storing the Kafka offsets in Keeper is experimental. Set `allow_experimental_kafka_store_offsets_in_keeper` setting " - "to enable it"); - - return std::make_shared( + if (!has_keeper_path && !has_replica_name) + return std::make_shared( args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); + + if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper) + throw Exception( + ErrorCodes::SUPPORT_IS_DISABLED, + "Storing the Kafka offsets in Keeper is experimental. Set `allow_experimental_kafka_store_offsets_in_keeper` setting " + "to enable it"); + + if (!has_keeper_path || !has_replica_name) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Either specify both zookeeper path and replica name or none of them"); + + const auto is_on_cluster = args.getLocalContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; + const auto is_replicated_database = args.getLocalContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY + && DatabaseCatalog::instance().getDatabase(args.table_id.database_name)->getEngineName() == "Replicated"; + + // TODO(antaljanosbenjamin): attach query? + // TODO(antaljanosbenjamin): why not on single atomic database? + const auto allow_uuid_macro = is_on_cluster || is_replicated_database || args.query.attach; + + auto context = args.getContext(); + /// Unfold {database} and {table} macro on table creation, so table can be renamed. + if (!args.attach) + { + Macros::MacroExpansionInfo info; + /// NOTE: it's not recursive + info.expand_special_macros_only = true; + info.table_id = args.table_id; + // TODO(antaljanosbenjamin): why to skip UUID here? + info.table_id.uuid = UUIDHelpers::Nil; + kafka_settings->kafka_keeper_path.value = context->getMacros()->expand(kafka_settings->kafka_keeper_path.value, info); + + info.level = 0; + kafka_settings->kafka_replica_name.value = context->getMacros()->expand(kafka_settings->kafka_replica_name.value, info); } - return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); + + auto * settings_query = args.storage_def->settings; + chassert(settings_query != nullptr && "Unexpected settings query in StorageKafka"); + + settings_query->changes.setSetting("kafka_keeper_path", kafka_settings->kafka_keeper_path.value); + settings_query->changes.setSetting("kafka_replica_name", kafka_settings->kafka_replica_name.value); + + /// Expand other macros (such as {shard} and {replica}). We do not expand them on previous step + /// to make possible copying metadata files between replicas. + Macros::MacroExpansionInfo info; + info.table_id = args.table_id; + if (is_replicated_database) + { + auto database = DatabaseCatalog::instance().getDatabase(args.table_id.database_name); + info.shard = getReplicatedDatabaseShardName(database); + info.replica = getReplicatedDatabaseReplicaName(database); + } + if (!allow_uuid_macro) + info.table_id.uuid = UUIDHelpers::Nil; + kafka_settings->kafka_keeper_path.value = context->getMacros()->expand(kafka_settings->kafka_keeper_path.value, info); + + info.level = 0; + info.table_id.uuid = UUIDHelpers::Nil; + kafka_settings->kafka_replica_name.value = context->getMacros()->expand(kafka_settings->kafka_replica_name.value, info); + + return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); }; factory.registerStorage( From f0fdeb6d3ef9ae51098d11a0a22bb138b6a58df0 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sat, 24 Feb 2024 22:36:29 +0100 Subject: [PATCH 0043/2361] move adaptive task size calculation into base pool --- src/Core/Settings.h | 3 +- .../MergeTree/MergeTreePrefetchedReadPool.cpp | 24 +-------- src/Storages/MergeTree/MergeTreeReadPool.cpp | 53 +++--------------- src/Storages/MergeTree/MergeTreeReadPool.h | 1 - .../MergeTree/MergeTreeReadPoolBase.cpp | 54 +++++++++++++++++-- .../MergeTree/MergeTreeReadPoolBase.h | 2 +- .../MergeTreeReadPoolParallelReplicas.cpp | 1 + ...rgeTreeReadPoolParallelReplicasInOrder.cpp | 1 + src/Storages/MergeTree/MergeTreeReadTask.h | 2 + 9 files changed, 64 insertions(+), 77 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index d5ea9534e6c..a5fcb537539 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -765,7 +765,7 @@ class IColumn; M(UInt64, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem, (20 * 8192), "If at least as many lines are read from one file, the reading can be parallelized, when reading from remote filesystem.", 0) \ M(UInt64, merge_tree_min_bytes_for_concurrent_read_for_remote_filesystem, (24 * 10 * 1024 * 1024), "If at least as many bytes are read from one file, the reading can be parallelized, when reading from remote filesystem.", 0) \ M(UInt64, remote_read_min_bytes_for_seek, 4 * DBMS_DEFAULT_BUFFER_SIZE, "Min bytes required for remote read (url, s3) to do seek, instead of read with ignore.", 0) \ - M(UInt64, merge_tree_min_bytes_per_task_for_remote_reading, 4 * DBMS_DEFAULT_BUFFER_SIZE, "Min bytes to read per task.", 0) \ + M(UInt64, merge_tree_min_bytes_per_task_for_remote_reading, 2 * DBMS_DEFAULT_BUFFER_SIZE, "Min bytes to read per task.", 0) ALIAS(filesystem_prefetch_min_bytes_for_single_read_task) \ M(Bool, merge_tree_use_const_size_tasks_for_remote_reading, true, "Whether to use constant size tasks for reading from a remote table.", 0) \ M(Bool, merge_tree_determine_task_size_by_prewhere_columns, true, "Whether to use only prewhere columns size to determine reading task size.", 0) \ M(UInt64, merge_tree_compact_parts_min_granules_to_multibuffer_read, 16, "Only available in ClickHouse Cloud", 0) \ @@ -806,7 +806,6 @@ class IColumn; M(UInt64, prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE, "The maximum size of the prefetch buffer to read from the filesystem.", 0) \ M(UInt64, filesystem_prefetch_step_bytes, 0, "Prefetch step in bytes. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \ M(UInt64, filesystem_prefetch_step_marks, 0, "Prefetch step in marks. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \ - M(UInt64, filesystem_prefetch_min_bytes_for_single_read_task, "2Mi", "Do not parallelize within one file read less than this amount of bytes. E.g. one reader will not receive a read task of size less than this amount. This setting is recommended to avoid spikes of time for aws getObject requests to aws", 0) \ M(UInt64, filesystem_prefetch_max_memory_usage, "1Gi", "Maximum memory usage for prefetches.", 0) \ M(UInt64, filesystem_prefetches_limit, 200, "Maximum number of prefetches. Zero means unlimited. A setting `filesystem_prefetches_max_memory_usage` is more recommended if you want to limit the number of prefetches", 0) \ \ diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index c19b4ddd8a2..b752c0d0d80 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -411,29 +411,7 @@ void MergeTreePrefetchedReadPool::fillPerThreadTasks(size_t threads, size_t sum_ 1, static_cast(std::round(static_cast(settings.filesystem_prefetch_step_bytes) / part_stat.approx_size_of_mark))); } - /// This limit is important to avoid spikes of slow aws getObject requests when parallelizing within one file. - /// (The default is taken from here https://docs.aws.amazon.com/whitepapers/latest/s3-optimizing-performance-best-practices/use-byte-range-fetches.html). - if (part_stat.approx_size_of_mark - && settings.filesystem_prefetch_min_bytes_for_single_read_task - && part_stat.approx_size_of_mark < settings.filesystem_prefetch_min_bytes_for_single_read_task) - { - const size_t min_prefetch_step_marks_by_total_cols = static_cast( - std::ceil(static_cast(settings.filesystem_prefetch_min_bytes_for_single_read_task) / part_stat.approx_size_of_mark)); - - /// At least one task to start working on it right now and another one to prefetch in the meantime. - const size_t new_min_prefetch_step_marks = std::min(min_prefetch_step_marks_by_total_cols, sum_marks / threads / 2); - if (min_prefetch_step_marks < new_min_prefetch_step_marks) - { - LOG_DEBUG(log, "Increasing min prefetch step from {} to {}", min_prefetch_step_marks, new_min_prefetch_step_marks); - min_prefetch_step_marks = new_min_prefetch_step_marks; - } - } - - if (part_stat.prefetch_step_marks < min_prefetch_step_marks) - { - LOG_DEBUG(log, "Increasing prefetch step from {} to {}", part_stat.prefetch_step_marks, min_prefetch_step_marks); - part_stat.prefetch_step_marks = min_prefetch_step_marks; - } + part_stat.prefetch_step_marks = std::max(part_stat.prefetch_step_marks, per_part_infos[i]->min_marks_per_task); LOG_DEBUG( log, diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index e525f7f5f65..2f9ec317386 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -25,14 +25,6 @@ extern const int CANNOT_SCHEDULE_TASK; extern const int LOGICAL_ERROR; } -size_t getApproxSizeOfPart(const IMergeTreeDataPart & part, const Names & columns_to_read) -{ - ColumnSize columns_size{}; - for (const auto & col_name : columns_to_read) - columns_size.add(part.getColumnSize(col_name)); - return columns_size.data_compressed; -} - MergeTreeReadPool::MergeTreeReadPool( RangesInDataParts && parts_, VirtualFields shared_virtual_fields_, @@ -53,38 +45,9 @@ MergeTreeReadPool::MergeTreeReadPool( column_names_, settings_, context_) - , min_marks_for_concurrent_read(pool_settings.min_marks_for_concurrent_read) , backoff_settings{context_->getSettingsRef()} , backoff_state{pool_settings.threads} { - if (std::ranges::count(is_part_on_remote_disk, true)) - { - const auto & settings = context_->getSettingsRef(); - - size_t total_compressed_bytes = 0; - size_t total_marks = 0; - for (const auto & part : parts_ranges) - { - const auto & columns = settings.merge_tree_determine_task_size_by_prewhere_columns && prewhere_info - ? prewhere_info->prewhere_actions->getRequiredColumnsNames() - : column_names_; - - total_compressed_bytes += getApproxSizeOfPart(*part.data_part, columns); - total_marks += part.getMarksCount(); - } - - if (total_marks) - { - const auto min_bytes_per_task = settings.merge_tree_min_bytes_per_task_for_remote_reading; - const auto avg_mark_bytes = std::max(total_compressed_bytes / total_marks, 1); - /// We're taking min here because number of tasks shouldn't be too low - it will make task stealing impossible. - const auto heuristic_min_marks = std::min(total_marks / pool_settings.threads, min_bytes_per_task / avg_mark_bytes); - - if (heuristic_min_marks > min_marks_for_concurrent_read) - min_marks_for_concurrent_read = heuristic_min_marks; - } - } - fillPerThreadInfo(pool_settings.threads, pool_settings.sum_marks); } @@ -129,15 +92,16 @@ MergeTreeReadTaskPtr MergeTreeReadPool::getTask(size_t task_idx, MergeTreeReadTa const auto part_idx = thread_task.part_idx; auto & marks_in_part = thread_tasks.sum_marks_in_parts.back(); + const auto min_marks_per_task = per_part_infos[part_idx]->min_marks_per_task; size_t need_marks; if (is_part_on_remote_disk[part_idx] && !pool_settings.use_const_size_tasks_for_remote_reading) need_marks = marks_in_part; else /// Get whole part to read if it is small enough. - need_marks = std::min(marks_in_part, min_marks_for_concurrent_read); + need_marks = std::min(marks_in_part, min_marks_per_task); /// Do not leave too little rows in part for next time. - if (marks_in_part > need_marks && marks_in_part - need_marks < min_marks_for_concurrent_read / 2) + if (marks_in_part > need_marks && marks_in_part - need_marks < min_marks_per_task / 2) need_marks = marks_in_part; MarkRanges ranges_to_get_from_part; @@ -256,8 +220,6 @@ void MergeTreeReadPool::fillPerThreadInfo(size_t threads, size_t sum_marks) parts_queue.push(std::move(info.second)); } - LOG_DEBUG(log, "min_marks_for_concurrent_read={}", min_marks_for_concurrent_read); - const size_t min_marks_per_thread = (sum_marks - 1) / threads + 1; for (size_t i = 0; i < threads && !parts_queue.empty(); ++i) @@ -270,15 +232,14 @@ void MergeTreeReadPool::fillPerThreadInfo(size_t threads, size_t sum_marks) auto & part_with_ranges = current_parts.back().part; size_t & marks_in_part = current_parts.back().sum_marks; const auto part_idx = current_parts.back().part_idx; + const auto min_marks_per_task = per_part_infos[part_idx]->min_marks_per_task; /// Do not get too few rows from part. - if (marks_in_part >= min_marks_for_concurrent_read && - need_marks < min_marks_for_concurrent_read) - need_marks = min_marks_for_concurrent_read; + if (marks_in_part >= min_marks_per_task && need_marks < min_marks_per_task) + need_marks = min_marks_per_task; /// Do not leave too few rows in part for next time. - if (marks_in_part > need_marks && - marks_in_part - need_marks < min_marks_for_concurrent_read) + if (marks_in_part > need_marks && marks_in_part - need_marks < min_marks_per_task) need_marks = marks_in_part; MarkRanges ranges_to_get_from_part; diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index cb0e8a9657f..b79af82ddb2 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -78,7 +78,6 @@ private: void fillPerThreadInfo(size_t threads, size_t sum_marks); mutable std::mutex mutex; - size_t min_marks_for_concurrent_read = 0; /// State to track numbers of slow reads. struct BackoffState diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 0cbb0a86b2f..397491a952d 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -1,6 +1,7 @@ -#include -#include +#include #include +#include +#include namespace DB @@ -29,10 +30,53 @@ MergeTreeReadPoolBase::MergeTreeReadPoolBase( , header(storage_snapshot->getSampleBlockForColumns(column_names)) , profile_callback([this](ReadBufferFromFileBase::ProfileInfo info_) { profileFeedback(info_); }) { - fillPerPartInfos(); + fillPerPartInfos(context_->getSettingsRef()); } -void MergeTreeReadPoolBase::fillPerPartInfos() +static size_t getApproxSizeOfPart(const IMergeTreeDataPart & part, const Names & columns_to_read) +{ + ColumnSize columns_size{}; + for (const auto & col_name : columns_to_read) + columns_size.add(part.getColumnSize(col_name)); + return columns_size.data_compressed; +} + +static size_t calculateMinMarksPerTask( + const RangesInDataPart & part, + const Names & columns_to_read, + PrewhereInfoPtr prewhere_info, + const MergeTreeReadPoolBase::PoolSettings & pool_settings, + const Settings & settings) +{ + size_t min_marks_per_task = pool_settings.min_marks_for_concurrent_read; + const size_t part_marks_count = part.getMarksCount(); + if (part_marks_count && part.data_part->isStoredOnRemoteDisk()) + { + const auto & columns = settings.merge_tree_determine_task_size_by_prewhere_columns && prewhere_info + ? prewhere_info->prewhere_actions->getRequiredColumnsNames() + : columns_to_read; + const size_t part_compressed_bytes = getApproxSizeOfPart(*part.data_part, columns); + + const auto avg_mark_bytes = std::max(part_compressed_bytes / part_marks_count, 1); + const auto min_bytes_per_task = settings.merge_tree_min_bytes_per_task_for_remote_reading; + /// We're taking min here because number of tasks shouldn't be too low - it will make task stealing impossible. + const auto heuristic_min_marks + = std::min(pool_settings.sum_marks / pool_settings.threads / 2, min_bytes_per_task / avg_mark_bytes); + if (heuristic_min_marks > min_marks_per_task) + { + LOG_DEBUG( + &Poco::Logger::get("MergeTreeReadPoolBase"), + "Increasing min_marks_per_task from {} to {} based on columns size heuristic", + min_marks_per_task, + heuristic_min_marks); + min_marks_per_task = heuristic_min_marks; + } + } + LOG_DEBUG(&Poco::Logger::get("MergeTreeReadPoolBase"), "Will use min_marks_per_task={}", min_marks_per_task); + return min_marks_per_task; +} + +void MergeTreeReadPoolBase::fillPerPartInfos(const Settings & settings) { per_part_infos.reserve(parts_ranges.size()); is_part_on_remote_disk.reserve(parts_ranges.size()); @@ -83,6 +127,8 @@ void MergeTreeReadPoolBase::fillPerPartInfos() } is_part_on_remote_disk.push_back(part_with_ranges.data_part->isStoredOnRemoteDisk()); + read_task_info.min_marks_per_task + = calculateMinMarksPerTask(part_with_ranges, column_names, prewhere_info, pool_settings, settings); per_part_infos.push_back(std::make_shared(std::move(read_task_info))); } } diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.h b/src/Storages/MergeTree/MergeTreeReadPoolBase.h index 1b5bfec5898..123f7538ba8 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.h +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.h @@ -48,7 +48,7 @@ protected: const UncompressedCachePtr owned_uncompressed_cache; const Block header; - void fillPerPartInfos(); + void fillPerPartInfos(const Settings & settings); std::vector getPerPartSumMarks() const; MergeTreeReadTaskPtr createTask( diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp index 38035d97f56..d542abb2985 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp @@ -33,6 +33,7 @@ MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas( context_) , extension(std::move(extension_)) , coordination_mode(CoordinationMode::Default) + , min_marks_per_task(pool_settings.min_marks_for_concurrent_read) { extension.all_callback( InitialAllRangesAnnouncement(coordination_mode, parts_ranges.getDescriptions(), extension.number_of_current_replica)); diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp index 01c0a9f91be..e8228007f81 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp @@ -32,6 +32,7 @@ MergeTreeReadPoolParallelReplicasInOrder::MergeTreeReadPoolParallelReplicasInOrd context_) , extension(std::move(extension_)) , mode(mode_) + , min_marks_per_task(pool_settings.min_marks_for_concurrent_read) { for (const auto & part : parts_ranges) request.push_back({part.data_part->info, MarkRanges{}}); diff --git a/src/Storages/MergeTree/MergeTreeReadTask.h b/src/Storages/MergeTree/MergeTreeReadTask.h index c8bb501c0e8..4c94a6afe64 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.h +++ b/src/Storages/MergeTree/MergeTreeReadTask.h @@ -66,6 +66,8 @@ struct MergeTreeReadTaskInfo MergeTreeBlockSizePredictorPtr shared_size_predictor; /// TODO: comment VirtualFields const_virtual_fields; + /// The amount of data to read per task based on size of the queried columns. + size_t min_marks_per_task = 0; }; using MergeTreeReadTaskInfoPtr = std::shared_ptr; From 85072356dbb4e298113d3a3a262374b1140fc8f5 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sat, 24 Feb 2024 22:42:21 +0100 Subject: [PATCH 0044/2361] support in parallel replicas --- .../MergeTree/MergeTreeReadPoolParallelReplicas.cpp | 9 ++++++--- .../MergeTree/MergeTreeReadPoolParallelReplicas.h | 1 + .../MergeTreeReadPoolParallelReplicasInOrder.cpp | 11 +++++------ .../MergeTreeReadPoolParallelReplicasInOrder.h | 1 + 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp index d542abb2985..33eaf5a49bd 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp @@ -35,6 +35,9 @@ MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas( , coordination_mode(CoordinationMode::Default) , min_marks_per_task(pool_settings.min_marks_for_concurrent_read) { + for (const auto & info : per_part_infos) + min_marks_per_task = std::max(min_marks_per_task, info->min_marks_per_task); + extension.all_callback( InitialAllRangesAnnouncement(coordination_mode, parts_ranges.getDescriptions(), extension.number_of_current_replica)); } @@ -51,7 +54,7 @@ MergeTreeReadTaskPtr MergeTreeReadPoolParallelReplicas::getTask(size_t /*task_id auto result = extension.callback(ParallelReadRequest( coordination_mode, extension.number_of_current_replica, - pool_settings.min_marks_for_concurrent_read * pool_settings.threads, + min_marks_per_task * pool_settings.threads, /// For Default coordination mode we don't need to pass part names. RangesInDataPartsDescription{})); @@ -77,9 +80,9 @@ MergeTreeReadTaskPtr MergeTreeReadPoolParallelReplicas::getTask(size_t /*task_id MarkRanges ranges_to_read; size_t current_sum_marks = 0; - while (current_sum_marks < pool_settings.min_marks_for_concurrent_read && !current_task.ranges.empty()) + while (current_sum_marks < min_marks_per_task && !current_task.ranges.empty()) { - auto diff = pool_settings.min_marks_for_concurrent_read - current_sum_marks; + auto diff = min_marks_per_task - current_sum_marks; auto range = current_task.ranges.front(); if (range.getNumberOfMarks() > diff) { diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h index ca159edb91c..6ba63cc2c9a 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h @@ -32,6 +32,7 @@ private: const ParallelReadingExtension extension; const CoordinationMode coordination_mode; + size_t min_marks_per_task{0}; RangesInDataPartsDescription buffered_ranges; bool no_more_tasks_available{false}; LoggerPtr log = getLogger("MergeTreeReadPoolParallelReplicas"); diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp index e8228007f81..6b5cf978423 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp @@ -34,6 +34,9 @@ MergeTreeReadPoolParallelReplicasInOrder::MergeTreeReadPoolParallelReplicasInOrd , mode(mode_) , min_marks_per_task(pool_settings.min_marks_for_concurrent_read) { + for (const auto & info : per_part_infos) + min_marks_per_task = std::max(min_marks_per_task, info->min_marks_per_task); + for (const auto & part : parts_ranges) request.push_back({part.data_part->info, MarkRanges{}}); @@ -77,12 +80,8 @@ MergeTreeReadTaskPtr MergeTreeReadPoolParallelReplicasInOrder::getTask(size_t ta if (no_more_tasks) return nullptr; - auto response = extension.callback(ParallelReadRequest( - mode, - extension.number_of_current_replica, - pool_settings.min_marks_for_concurrent_read * request.size(), - request - )); + auto response + = extension.callback(ParallelReadRequest(mode, extension.number_of_current_replica, min_marks_per_task * request.size(), request)); if (!response || response->description.empty() || response->finish) { diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h index 4fe3f7a699c..22841bea212 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h @@ -30,6 +30,7 @@ private: const ParallelReadingExtension extension; const CoordinationMode mode; + size_t min_marks_per_task{0}; bool no_more_tasks{false}; RangesInDataPartsDescription request; RangesInDataPartsDescription buffered_tasks; From 281327b4ced9aadf421646095d71ad59ba6c70ba Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 26 Feb 2024 18:41:15 +0100 Subject: [PATCH 0045/2361] register settings change --- src/Core/SettingsChangesHistory.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index f43ca154d56..6e38976b4b1 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -85,7 +85,8 @@ namespace SettingsChangesHistory /// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972) static std::map settings_changes_history = { - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, + {"24.4", {{"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, + {"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, }}, {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, From e8e980932fdcc87934a3d4591d9a42130a4d60a4 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 28 Feb 2024 20:06:12 +0100 Subject: [PATCH 0046/2361] fix --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index bee42c3ddde..c4ba92d483e 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -945,6 +945,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( PoolSettings pool_settings { + .threads = num_streams, + .sum_marks = parts_with_ranges.getMarksCountAllParts(), .min_marks_for_concurrent_read = info.min_marks_for_concurrent_read, .preferred_block_size_bytes = settings.preferred_block_size_bytes, .use_uncompressed_cache = info.use_uncompressed_cache, From 29729c0ea8fcf2543c8d1a2f996773b54829c86d Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 6 Mar 2024 23:42:37 +0100 Subject: [PATCH 0047/2361] use whole part size for Compact parts --- src/Storages/MergeTree/MergeTreeReadPoolBase.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 397491a952d..2bf19e8cde5 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -38,7 +38,8 @@ static size_t getApproxSizeOfPart(const IMergeTreeDataPart & part, const Names & ColumnSize columns_size{}; for (const auto & col_name : columns_to_read) columns_size.add(part.getColumnSize(col_name)); - return columns_size.data_compressed; + /// For compact parts we don't know individual column sizes, let's use whole part size as approximation + return columns_size.data_compressed ? columns_size.data_compressed : part.getBytesOnDisk(); } static size_t calculateMinMarksPerTask( From af6c4a3f2d247a3b39bd48f33e16758972e31723 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 26 Mar 2024 22:09:17 +0000 Subject: [PATCH 0048/2361] fix noisy log --- src/Storages/MergeTree/MergeTreeReadPoolBase.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 2bf19e8cde5..9d2484ecd04 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -65,7 +65,7 @@ static size_t calculateMinMarksPerTask( = std::min(pool_settings.sum_marks / pool_settings.threads / 2, min_bytes_per_task / avg_mark_bytes); if (heuristic_min_marks > min_marks_per_task) { - LOG_DEBUG( + LOG_TEST( &Poco::Logger::get("MergeTreeReadPoolBase"), "Increasing min_marks_per_task from {} to {} based on columns size heuristic", min_marks_per_task, @@ -73,7 +73,7 @@ static size_t calculateMinMarksPerTask( min_marks_per_task = heuristic_min_marks; } } - LOG_DEBUG(&Poco::Logger::get("MergeTreeReadPoolBase"), "Will use min_marks_per_task={}", min_marks_per_task); + LOG_TEST(&Poco::Logger::get("MergeTreeReadPoolBase"), "Will use min_marks_per_task={}", min_marks_per_task); return min_marks_per_task; } From bc1042a497257526c13a08d86fc736466cd454b5 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 28 Mar 2024 19:11:18 +0000 Subject: [PATCH 0049/2361] fix test --- tests/queries/0_stateless/02532_send_logs_level_test.reference | 1 + tests/queries/0_stateless/02532_send_logs_level_test.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.reference b/tests/queries/0_stateless/02532_send_logs_level_test.reference index 7e51b888d9c..72f4ea06184 100644 --- a/tests/queries/0_stateless/02532_send_logs_level_test.reference +++ b/tests/queries/0_stateless/02532_send_logs_level_test.reference @@ -1,3 +1,4 @@ + MergeTreeReadPoolBase: Will use min_marks_per_task=24 MergeTreeMarksLoader: Loading marks from path data.cmrk3 MergeTreeRangeReader: First reader returned: num_rows: 1, columns: 1, total_rows_per_granule: 1, no filter, column[0]: Int32(size = 1), requested columns: key MergeTreeRangeReader: read() returned num_rows: 1, columns: 1, total_rows_per_granule: 1, no filter, column[0]: Int32(size = 1), sample block key diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.sh b/tests/queries/0_stateless/02532_send_logs_level_test.sh index 4afc6d4496b..08b3058c5a0 100755 --- a/tests/queries/0_stateless/02532_send_logs_level_test.sh +++ b/tests/queries/0_stateless/02532_send_logs_level_test.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash -# Tags: no-s3-storage, no-debug +# Tags: no-s3-storage, no-debug, no-random-merge-tree-settings # - no-s3-storage - S3 has additional logging # - no-debug - debug builds also has additional logging +# - no-random-merge-tree-settings - changes content of log messages CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 8b2bd3cfd7654fee98df6f024bcf7e4b6b4f2b49 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 26 Mar 2024 20:48:49 +0000 Subject: [PATCH 0050/2361] impl --- src/Common/AsynchronousMetrics.cpp | 187 +++++++++++++++++++++-------- src/Common/AsynchronousMetrics.h | 6 + 2 files changed, 142 insertions(+), 51 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index ab54b180fbf..cf9e8d21bd8 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -1,18 +1,19 @@ -#include -#include -#include -#include -#include -#include -#include -#include +#include #include #include +#include #include #include #include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include #include "config.h" @@ -78,6 +79,7 @@ AsynchronousMetrics::AsynchronousMetrics( openFileIfExists("/sys/fs/cgroup/memory.current", cgroupmem_usage_in_bytes); } openFileIfExists("/sys/fs/cgroup/cpu.max", cgroupcpu_max); + openFileIfExists("/sys/fs/cgroup/cpu.stat", cgroupcpu_stat); /// CGroups v1 if (!cgroupmem_limit_in_bytes) @@ -90,6 +92,8 @@ AsynchronousMetrics::AsynchronousMetrics( openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_period_us", cgroupcpu_cfs_period); openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_quota_us", cgroupcpu_cfs_quota); } + if (!cgroupcpu_stat) + openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat); openFileIfExists("/proc/sys/vm/max_map_count", vm_max_map_count); openFileIfExists("/proc/self/maps", vm_maps); @@ -561,6 +565,82 @@ AsynchronousMetrics::NetworkInterfaceStatValues::operator-(const AsynchronousMet #endif +void AsynchronousMetrics::applyCPUMetricsUpdate( + AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier) +{ + new_values["OSUserTime" + cpu_suffix] + = {delta_values.user * multiplier, + "The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the " + "host machine, not just clickhouse-server." + " This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline " + "stalls, branch mispredictions, running another SMT core)." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSNiceTime" + cpu_suffix] + = {delta_values.nice * multiplier, + "The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all " + "the processes on the host machine, not just clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSSystemTime" + cpu_suffix] + = {delta_values.system * multiplier, + "The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIdleTime" + cpu_suffix] + = {delta_values.idle * multiplier, + "The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This " + "is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." + " This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline " + "stalls, branch mispredictions, running another SMT core)." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIOWaitTime" + cpu_suffix] + = {delta_values.iowait * multiplier, + "The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as " + "the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just " + "clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIrqTime" + cpu_suffix] + = {delta_values.irq * multiplier, + "The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " A high number of this metric may indicate hardware misconfiguration or a very high network load." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSSoftIrqTime" + cpu_suffix] + = {delta_values.softirq * multiplier, + "The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " A high number of this metric may indicate inefficient software running on the system." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSStealTime" + cpu_suffix] + = {delta_values.steal * multiplier, + "The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide " + "metric, it includes all the processes on the host machine, not just clickhouse-server." + " Not every virtualized environments present this metric, and most of them don't." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSGuestTime" + cpu_suffix] + = {delta_values.guest * multiplier, + "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man " + "procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." + " This metric is irrelevant for ClickHouse, but still exists for completeness." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSGuestNiceTime" + cpu_suffix] + = {delta_values.guest_nice * multiplier, + "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest " + "was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host " + "machine, not just clickhouse-server." + " This metric is irrelevant for ClickHouse, but still exists for completeness." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; +} + void AsynchronousMetrics::update(TimePoint update_time, bool force_update) { Stopwatch watch; @@ -821,16 +901,57 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) new_values["CGroupMaxCPU"] = { max_cpu_cgroups, "The maximum number of CPU cores according to CGroups."}; } - if (proc_stat) + int64_t hz = sysconf(_SC_CLK_TCK); + if (-1 == hz) + throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ"); + + if (cgroupcpu_stat || cgroupcpuacct_stat) + { + ReadBufferFromFilePRead & in = cgroupcpu_stat ? *cgroupcpu_stat : *cgroupcpuacct_stat; + ProcStatValuesCPU current_values{}; + + /// We re-read the file from the beginning each time + in.rewind(); + + while (!in.eof()) + { + String name; + readStringUntilWhitespace(name, in); + skipWhitespaceIfAny(in); + + /// `user_usec` for cgroup v2 and `user` for cgroup v1 + if (name.starts_with("user")) + { + readText(current_values.user, in); + skipToNextLineOrEOF(in); + } + /// `system_usec` for cgroup v2 and `system` for cgroup v1 + else if (name.starts_with("system")) + { + readText(current_values.system, in); + skipToNextLineOrEOF(in); + } + else + skipToNextLineOrEOF(in); + } + + if (!first_run) + { + const ProcStatValuesCPU delta_values = current_values - proc_stat_values_all_cpus; + const auto cgroup_specific_divisor = cgroupcpu_stat ? 1e6 : hz; + const double multiplier = 1.0 / cgroup_specific_divisor + / (std::chrono::duration_cast(time_since_previous_update).count() / 1e9); + applyCPUMetricsUpdate(new_values, /*cpu_suffix=*/"", delta_values, multiplier); + } + + proc_stat_values_all_cpus = current_values; + } + else if (proc_stat) { try { proc_stat->rewind(); - int64_t hz = sysconf(_SC_CLK_TCK); - if (-1 == hz) - throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ"); - double multiplier = 1.0 / hz / (std::chrono::duration_cast(time_since_previous_update).count() / 1e9); size_t num_cpus = 0; @@ -876,43 +997,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) else delta_values_all_cpus = delta_values; - new_values["OSUserTime" + cpu_suffix] = { delta_values.user * multiplier, - "The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core)." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSNiceTime" + cpu_suffix] = { delta_values.nice * multiplier, - "The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSSystemTime" + cpu_suffix] = { delta_values.system * multiplier, - "The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIdleTime" + cpu_suffix] = { delta_values.idle * multiplier, - "The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core)." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIOWaitTime" + cpu_suffix] = { delta_values.iowait * multiplier, - "The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIrqTime" + cpu_suffix] = { delta_values.irq * multiplier, - "The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " A high number of this metric may indicate hardware misconfiguration or a very high network load." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSSoftIrqTime" + cpu_suffix] = { delta_values.softirq * multiplier, - "The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " A high number of this metric may indicate inefficient software running on the system." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSStealTime" + cpu_suffix] = { delta_values.steal * multiplier, - "The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " Not every virtualized environments present this metric, and most of them don't." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSGuestTime" + cpu_suffix] = { delta_values.guest * multiplier, - "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This metric is irrelevant for ClickHouse, but still exists for completeness." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSGuestNiceTime" + cpu_suffix] = { delta_values.guest_nice * multiplier, - "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This metric is irrelevant for ClickHouse, but still exists for completeness." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; + applyCPUMetricsUpdate(new_values, cpu_suffix, delta_values, multiplier); } prev_values = current_values; diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index 4b3d28e80c5..caebcd4cdef 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -122,6 +122,8 @@ private: std::optional cgroupcpu_cfs_period TSA_GUARDED_BY(data_mutex); std::optional cgroupcpu_cfs_quota TSA_GUARDED_BY(data_mutex); std::optional cgroupcpu_max TSA_GUARDED_BY(data_mutex); + std::optional cgroupcpu_stat TSA_GUARDED_BY(data_mutex); + std::optional cgroupcpuacct_stat TSA_GUARDED_BY(data_mutex); std::optional vm_max_map_count TSA_GUARDED_BY(data_mutex); std::optional vm_maps TSA_GUARDED_BY(data_mutex); @@ -217,6 +219,10 @@ private: void openBlockDevices(); void openSensorsChips(); void openEDAC(); + + void applyCPUMetricsUpdate( + AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier); + #endif void run(); From 85e8a5678783521442a6e61bcd00ba6167302b6a Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 27 Mar 2024 17:02:52 +0000 Subject: [PATCH 0051/2361] normalized metrics --- src/Common/AsynchronousMetrics.cpp | 170 ++++++++++++++++++----------- src/Common/AsynchronousMetrics.h | 6 + 2 files changed, 115 insertions(+), 61 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index cf9e8d21bd8..59595e701c1 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -641,6 +641,73 @@ void AsynchronousMetrics::applyCPUMetricsUpdate( "them [0..num cores]."}; } +void AsynchronousMetrics::applyNormalizedCPUMetricsUpdate( + AsynchronousMetricValues & new_values, double num_cpus_to_normalize, const ProcStatValuesCPU & delta_values_all_cpus, double multiplier) +{ + chassert(num_cpus_to_normalize); + + new_values["OSUserTimeNormalized"] + = {delta_values_all_cpus.user * multiplier / num_cpus_to_normalize, + "The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSNiceTimeNormalized"] + = {delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize, + "The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSSystemTimeNormalized"] + = {delta_values_all_cpus.system * multiplier / num_cpus_to_normalize, + "The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIdleTimeNormalized"] + = {delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIOWaitTimeNormalized"] + = {delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIrqTimeNormalized"] + = {delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of " + "the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSSoftIrqTimeNormalized"] + = {delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize, + "The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval " + "regardless of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSStealTimeNormalized"] + = {delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize, + "The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSGuestTimeNormalized"] + = {delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize, + "The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSGuestNiceTimeNormalized"] + = {delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize, + "The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval " + "regardless of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; +} + void AsynchronousMetrics::update(TimePoint update_time, bool force_update) { Stopwatch watch; @@ -907,44 +974,56 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) if (cgroupcpu_stat || cgroupcpuacct_stat) { - ReadBufferFromFilePRead & in = cgroupcpu_stat ? *cgroupcpu_stat : *cgroupcpuacct_stat; - ProcStatValuesCPU current_values{}; - - /// We re-read the file from the beginning each time - in.rewind(); - - while (!in.eof()) + try { - String name; - readStringUntilWhitespace(name, in); - skipWhitespaceIfAny(in); + ReadBufferFromFilePRead & in = cgroupcpu_stat ? *cgroupcpu_stat : *cgroupcpuacct_stat; + ProcStatValuesCPU current_values{}; - /// `user_usec` for cgroup v2 and `user` for cgroup v1 - if (name.starts_with("user")) + /// We re-read the file from the beginning each time + in.rewind(); + + while (!in.eof()) { - readText(current_values.user, in); - skipToNextLineOrEOF(in); + String name; + readStringUntilWhitespace(name, in); + skipWhitespaceIfAny(in); + + /// `user_usec` for cgroup v2 and `user` for cgroup v1 + if (name.starts_with("user")) + { + readText(current_values.user, in); + skipToNextLineOrEOF(in); + } + /// `system_usec` for cgroup v2 and `system` for cgroup v1 + else if (name.starts_with("system")) + { + readText(current_values.system, in); + skipToNextLineOrEOF(in); + } + else + skipToNextLineOrEOF(in); } - /// `system_usec` for cgroup v2 and `system` for cgroup v1 - else if (name.starts_with("system")) + + if (!first_run) { - readText(current_values.system, in); - skipToNextLineOrEOF(in); + const ProcStatValuesCPU delta_values = current_values - proc_stat_values_all_cpus; + const auto cgroup_specific_divisor = cgroupcpu_stat ? 1e6 : hz; + const double multiplier = 1.0 / cgroup_specific_divisor + / (std::chrono::duration_cast(time_since_previous_update).count() / 1e9); + applyCPUMetricsUpdate(new_values, /*cpu_suffix=*/"", delta_values, multiplier); + if (max_cpu_cgroups > 0) + applyNormalizedCPUMetricsUpdate(new_values, max_cpu_cgroups, delta_values, multiplier); } - else - skipToNextLineOrEOF(in); + + proc_stat_values_all_cpus = current_values; } - - if (!first_run) + catch (...) { - const ProcStatValuesCPU delta_values = current_values - proc_stat_values_all_cpus; - const auto cgroup_specific_divisor = cgroupcpu_stat ? 1e6 : hz; - const double multiplier = 1.0 / cgroup_specific_divisor - / (std::chrono::duration_cast(time_since_previous_update).count() / 1e9); - applyCPUMetricsUpdate(new_values, /*cpu_suffix=*/"", delta_values, multiplier); + tryLogCurrentException(__PRETTY_FUNCTION__); + openFileIfExists("/sys/fs/cgroup/cpu.stat", cgroupcpu_stat); + if (!cgroupcpu_stat) + openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat); } - - proc_stat_values_all_cpus = current_values; } else if (proc_stat) { @@ -1053,38 +1132,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) Float64 num_cpus_to_normalize = max_cpu_cgroups > 0 ? max_cpu_cgroups : num_cpus; if (num_cpus_to_normalize > 0) - { - new_values["OSUserTimeNormalized"] = { delta_values_all_cpus.user * multiplier / num_cpus_to_normalize, - "The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSNiceTimeNormalized"] = { delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize, - "The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSSystemTimeNormalized"] = { delta_values_all_cpus.system * multiplier / num_cpus_to_normalize, - "The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIdleTimeNormalized"] = { delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIOWaitTimeNormalized"] = { delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIrqTimeNormalized"] = { delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSSoftIrqTimeNormalized"] = { delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize, - "The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSStealTimeNormalized"] = { delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize, - "The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSGuestTimeNormalized"] = { delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize, - "The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSGuestNiceTimeNormalized"] = { delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize, - "The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - } + applyNormalizedCPUMetricsUpdate(new_values, num_cpus_to_normalize, delta_values_all_cpus, multiplier); } proc_stat_values_other = current_other_values; diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index caebcd4cdef..2b58fd78044 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -223,6 +223,12 @@ private: void applyCPUMetricsUpdate( AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier); + void applyNormalizedCPUMetricsUpdate( + AsynchronousMetricValues & new_values, + double num_cpus_to_normalize, + const ProcStatValuesCPU & delta_values_all_cpus, + double multiplier); + #endif void run(); From 4aaae7fd4d3340131515be83764e56b5f5c17c13 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 27 Mar 2024 19:49:00 +0000 Subject: [PATCH 0052/2361] add test --- .../test_async_metrics_in_cgroup/test.py | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 tests/integration/test_async_metrics_in_cgroup/test.py diff --git a/tests/integration/test_async_metrics_in_cgroup/test.py b/tests/integration/test_async_metrics_in_cgroup/test.py new file mode 100644 index 00000000000..1bba42cb980 --- /dev/null +++ b/tests/integration/test_async_metrics_in_cgroup/test.py @@ -0,0 +1,77 @@ +import pytest +import subprocess +import time + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node") + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_check_client_logs_level(start_cluster): + # check that our metrics sources actually exist + assert ( + subprocess.Popen("test -f /sys/fs/cgroup/cpu.stat".split(" ")).wait() == 0 + or subprocess.Popen( + "test -f /sys/fs/cgroup/cpuacct/cpuacct.stat".split(" ") + ).wait() + == 0 + ) + + # first let's spawn some cpu-intensive process outside of the container and check that it doesn't accounted by ClickHouse server + proc = subprocess.Popen( + "openssl speed -multi 8".split(" "), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + time.sleep(5) + + metric = node.query( + """ + SYSTEM FLUSH LOGS; + + SELECT max(value) + FROM ( + SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value + FROM system.asynchronous_metric_log + WHERE event_time >= now() - 60 AND metric = 'OSUserTime' + GROUP BY t + ) + """ + ).strip("\n") + + assert float(metric) <= 2 + + proc.kill() + + # then let's test that we will account cpu time spent by the server itself + node.query( + "SELECT cityHash64(*) FROM system.numbers_mt FORMAT Null SETTINGS max_execution_time=5, max_threads=8", + ignore_error=True, + ) + + metric = node.query( + """ + SYSTEM FLUSH LOGS; + + SELECT max(value) + FROM ( + SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value + FROM system.asynchronous_metric_log + WHERE event_time >= now() - 60 AND metric = 'OSUserTime' + GROUP BY t + ) + """ + ).strip("\n") + + assert 4 <= float(metric) <= 12 From 75011d6f21e4948bf86fd52e2330fe2f2d8fa922 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 27 Mar 2024 20:22:15 +0000 Subject: [PATCH 0053/2361] fix style --- tests/integration/test_async_metrics_in_cgroup/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/integration/test_async_metrics_in_cgroup/__init__.py diff --git a/tests/integration/test_async_metrics_in_cgroup/__init__.py b/tests/integration/test_async_metrics_in_cgroup/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From d84a01cabfbb97a8b875620292f843c1247e6382 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 28 Mar 2024 20:46:02 +0000 Subject: [PATCH 0054/2361] better --- src/Common/AsynchronousMetrics.cpp | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 59595e701c1..0943232e776 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -565,6 +565,7 @@ AsynchronousMetrics::NetworkInterfaceStatValues::operator-(const AsynchronousMet #endif +#if defined(OS_LINUX) void AsynchronousMetrics::applyCPUMetricsUpdate( AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier) { @@ -707,6 +708,7 @@ void AsynchronousMetrics::applyNormalizedCPUMetricsUpdate( " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " "non-uniform, and still get the average resource utilization metric."}; } +#endif void AsynchronousMetrics::update(TimePoint update_time, bool force_update) { @@ -968,10 +970,6 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) new_values["CGroupMaxCPU"] = { max_cpu_cgroups, "The maximum number of CPU cores according to CGroups."}; } - int64_t hz = sysconf(_SC_CLK_TCK); - if (-1 == hz) - throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ"); - if (cgroupcpu_stat || cgroupcpuacct_stat) { try @@ -1006,10 +1004,14 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) if (!first_run) { - const ProcStatValuesCPU delta_values = current_values - proc_stat_values_all_cpus; - const auto cgroup_specific_divisor = cgroupcpu_stat ? 1e6 : hz; - const double multiplier = 1.0 / cgroup_specific_divisor + int64_t hz = sysconf(_SC_CLK_TCK); + if (-1 == hz) + throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ"); + const auto cgroup_version_specific_divisor = cgroupcpu_stat ? 1e6 : hz; + const double multiplier = 1.0 / cgroup_version_specific_divisor / (std::chrono::duration_cast(time_since_previous_update).count() / 1e9); + + const ProcStatValuesCPU delta_values = current_values - proc_stat_values_all_cpus; applyCPUMetricsUpdate(new_values, /*cpu_suffix=*/"", delta_values, multiplier); if (max_cpu_cgroups > 0) applyNormalizedCPUMetricsUpdate(new_values, max_cpu_cgroups, delta_values, multiplier); @@ -1031,6 +1033,10 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) { proc_stat->rewind(); + int64_t hz = sysconf(_SC_CLK_TCK); + if (-1 == hz) + throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ"); + double multiplier = 1.0 / hz / (std::chrono::duration_cast(time_since_previous_update).count() / 1e9); size_t num_cpus = 0; From 77e3ff7ff50b0e78235ab9a8ee88b258bdcaf510 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 28 Mar 2024 21:18:29 +0000 Subject: [PATCH 0055/2361] fix test --- tests/integration/test_async_metrics_in_cgroup/test.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_async_metrics_in_cgroup/test.py b/tests/integration/test_async_metrics_in_cgroup/test.py index 1bba42cb980..e63d53e1485 100644 --- a/tests/integration/test_async_metrics_in_cgroup/test.py +++ b/tests/integration/test_async_metrics_in_cgroup/test.py @@ -17,7 +17,7 @@ def start_cluster(): cluster.shutdown() -def test_check_client_logs_level(start_cluster): +def test_user_cpu_accounting(start_cluster): # check that our metrics sources actually exist assert ( subprocess.Popen("test -f /sys/fs/cgroup/cpu.stat".split(" ")).wait() == 0 @@ -50,7 +50,7 @@ def test_check_client_logs_level(start_cluster): """ ).strip("\n") - assert float(metric) <= 2 + assert float(metric) < 2 proc.kill() @@ -74,4 +74,5 @@ def test_check_client_logs_level(start_cluster): """ ).strip("\n") - assert 4 <= float(metric) <= 12 + # this check is really weak, but CI is tough place and we cannot guarantee that test process will get many cpu time + assert float(metric) > 1 From bc6a82d9cd68a8a4af3ef92b9a91eaa3be0aa347 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 9 Apr 2024 18:35:11 +0000 Subject: [PATCH 0056/2361] fix test --- .../test_async_metrics_in_cgroup/test.py | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/tests/integration/test_async_metrics_in_cgroup/test.py b/tests/integration/test_async_metrics_in_cgroup/test.py index e63d53e1485..00951c95a0e 100644 --- a/tests/integration/test_async_metrics_in_cgroup/test.py +++ b/tests/integration/test_async_metrics_in_cgroup/test.py @@ -18,6 +18,9 @@ def start_cluster(): def test_user_cpu_accounting(start_cluster): + if node.is_built_with_sanitizer(): + pytest.skip("Disabled for sanitizers") + # check that our metrics sources actually exist assert ( subprocess.Popen("test -f /sys/fs/cgroup/cpu.stat".split(" ")).wait() == 0 @@ -38,14 +41,12 @@ def test_user_cpu_accounting(start_cluster): metric = node.query( """ - SYSTEM FLUSH LOGS; - SELECT max(value) FROM ( SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value - FROM system.asynchronous_metric_log - WHERE event_time >= now() - 60 AND metric = 'OSUserTime' - GROUP BY t + FROM system.asynchronous_metric_log + WHERE event_time >= now() - 60 AND metric = 'OSUserTime' + GROUP BY t ) """ ).strip("\n") @@ -56,20 +57,18 @@ def test_user_cpu_accounting(start_cluster): # then let's test that we will account cpu time spent by the server itself node.query( - "SELECT cityHash64(*) FROM system.numbers_mt FORMAT Null SETTINGS max_execution_time=5, max_threads=8", + "SELECT cityHash64(*) FROM system.numbers_mt FORMAT Null SETTINGS max_execution_time=10", ignore_error=True, ) metric = node.query( """ - SYSTEM FLUSH LOGS; - SELECT max(value) FROM ( SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value - FROM system.asynchronous_metric_log - WHERE event_time >= now() - 60 AND metric = 'OSUserTime' - GROUP BY t + FROM system.asynchronous_metric_log + WHERE event_time >= now() - 60 AND metric = 'OSUserTime' + GROUP BY t ) """ ).strip("\n") From 421ace6271af8dfceaf7e0ccd7475032c694b30a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 1 May 2024 13:03:31 +0000 Subject: [PATCH 0057/2361] Use proper max block size in case of setting is not specified --- src/Storages/Kafka/StorageKafka2.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 6e11a8f9264..35073638e8b 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -788,9 +788,16 @@ StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( } if (!consumer.hasMorePolledMessages() - && (total_rows >= kafka_settings->kafka_max_block_size || !check_time_limit() + && (total_rows >= getMaxBlockSize() || !check_time_limit() || failed_poll_attempts >= MAX_FAILED_POLL_ATTEMPTS || consumer.needsOffsetUpdate())) { + LOG_TRACE( + log, + "Stopped collecting message for current batch. There are {} failed polled attempts, {} total rows and consumer needs " + "offset update is {}", + failed_poll_attempts, + total_rows, + consumer.needsOffsetUpdate()); break; } } From e31df2e29ae4e17f6f73fda383fba628bf626031 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 2 May 2024 09:13:22 +0000 Subject: [PATCH 0058/2361] Explain some decisions --- src/Storages/Kafka/StorageKafkaCommon.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 1319c871f4d..0b7b877a0f0 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -378,19 +378,21 @@ void registerStorageKafka(StorageFactory & factory) const auto is_replicated_database = args.getLocalContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY && DatabaseCatalog::instance().getDatabase(args.table_id.database_name)->getEngineName() == "Replicated"; - // TODO(antaljanosbenjamin): attach query? - // TODO(antaljanosbenjamin): why not on single atomic database? + // UUID macro is only allowed: + // - with Atomic database only with ON CLUSTER queries, otherwise it is easy to misuse: each replica would have separate uuid generated. + // - with Replicated database + // - with attach queries, as those are used on server startup const auto allow_uuid_macro = is_on_cluster || is_replicated_database || args.query.attach; auto context = args.getContext(); - /// Unfold {database} and {table} macro on table creation, so table can be renamed. + // Unfold {database} and {table} macro on table creation, so table can be renamed. if (!args.attach) { Macros::MacroExpansionInfo info; /// NOTE: it's not recursive info.expand_special_macros_only = true; info.table_id = args.table_id; - // TODO(antaljanosbenjamin): why to skip UUID here? + // We could probably unfold UUID here too, but let's keep it similar to ReplicatedMergeTree, which doesn't do the unfolding. info.table_id.uuid = UUIDHelpers::Nil; kafka_settings->kafka_keeper_path.value = context->getMacros()->expand(kafka_settings->kafka_keeper_path.value, info); @@ -405,14 +407,14 @@ void registerStorageKafka(StorageFactory & factory) settings_query->changes.setSetting("kafka_keeper_path", kafka_settings->kafka_keeper_path.value); settings_query->changes.setSetting("kafka_replica_name", kafka_settings->kafka_replica_name.value); - /// Expand other macros (such as {shard} and {replica}). We do not expand them on previous step - /// to make possible copying metadata files between replicas. + // Expand other macros (such as {replica}). We do not expand them on previous step to make possible copying metadata files between replicas. + // Disable expanding {shard} macro, because it can lead to incorrect behavior and it doesn't make sense to shard Kafka tables. Macros::MacroExpansionInfo info; info.table_id = args.table_id; if (is_replicated_database) { auto database = DatabaseCatalog::instance().getDatabase(args.table_id.database_name); - info.shard = getReplicatedDatabaseShardName(database); + info.shard.reset(); info.replica = getReplicatedDatabaseReplicaName(database); } if (!allow_uuid_macro) From 05e823a1e9eff9d0df0b6473c19eddc03811d016 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 14 May 2024 15:37:20 +0000 Subject: [PATCH 0059/2361] add chunked wrapper to native protocol --- src/Client/Connection.cpp | 30 ++++-- src/Client/Connection.h | 8 +- src/Core/ProtocolDefines.h | 5 +- src/IO/ReadBufferFromPocoSocket.cpp | 54 +++++++--- src/IO/ReadBufferFromPocoSocket.h | 3 + src/IO/ReadBufferFromPocoSocketChunked.cpp | 114 +++++++++++++++++++++ src/IO/ReadBufferFromPocoSocketChunked.h | 32 ++++++ src/IO/WriteBufferFromPocoSocketChunked.h | 56 ++++++++++ src/Server/TCPHandler.cpp | 50 +++++++-- src/Server/TCPHandler.h | 6 +- 10 files changed, 322 insertions(+), 36 deletions(-) create mode 100644 src/IO/ReadBufferFromPocoSocketChunked.cpp create mode 100644 src/IO/ReadBufferFromPocoSocketChunked.h create mode 100644 src/IO/WriteBufferFromPocoSocketChunked.h diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 483201509c4..970768e515e 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -4,8 +4,6 @@ #include #include #include -#include -#include #include #include #include @@ -191,10 +189,10 @@ void Connection::connect(const ConnectionTimeouts & timeouts) , tcp_keep_alive_timeout_in_sec); } - in = std::make_shared(*socket); + in = std::make_shared(*socket); in->setAsyncCallback(async_callback); - out = std::make_shared(*socket); + out = std::make_shared(*socket); out->setAsyncCallback(async_callback); connected = true; setDescription(); @@ -205,6 +203,12 @@ void Connection::connect(const ConnectionTimeouts & timeouts) if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) sendAddendum(); + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + in->enableChunked(); + out->enableChunked(); + } + LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.", server_name, server_version_major, server_version_minor, server_version_patch); } @@ -567,6 +571,7 @@ bool Connection::ping(const ConnectionTimeouts & timeouts) UInt64 pong = 0; writeVarUInt(Protocol::Client::Ping, *out); + out->finishPacket(); out->next(); if (in->eof()) @@ -611,6 +616,7 @@ TablesStatusResponse Connection::getTablesStatus(const ConnectionTimeouts & time writeVarUInt(Protocol::Client::TablesStatusRequest, *out); request.write(*out, server_revision); + out->finishPacket(); out->next(); UInt64 response_type = 0; @@ -762,6 +768,8 @@ void Connection::sendQuery( block_profile_events_in.reset(); block_out.reset(); + out->finishPacket(); + /// Send empty block which means end of data. if (!with_pending_data) { @@ -778,6 +786,7 @@ void Connection::sendCancel() return; writeVarUInt(Protocol::Client::Cancel, *out); + out->finishPacket(); out->next(); } @@ -804,6 +813,8 @@ void Connection::sendData(const Block & block, const String & name, bool scalar) block_out->write(block); maybe_compressed_out->next(); + if (!block) + out->finishPacket(); out->next(); if (throttler) @@ -814,6 +825,7 @@ void Connection::sendIgnoredPartUUIDs(const std::vector & uuids) { writeVarUInt(Protocol::Client::IgnoredPartUUIDs, *out); writeVectorBinary(uuids, *out); + out->finishPacket(); out->next(); } @@ -823,6 +835,7 @@ void Connection::sendReadTaskResponse(const String & response) writeVarUInt(Protocol::Client::ReadTaskResponse, *out); writeVarUInt(DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION, *out); writeStringBinary(response, *out); + out->finishPacket(); out->next(); } @@ -831,6 +844,7 @@ void Connection::sendMergeTreeReadTaskResponse(const ParallelReadResponse & resp { writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out); response.serialize(*out); + out->finishPacket(); out->next(); } @@ -848,6 +862,8 @@ void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String copyData(input, *out); else copyData(input, *out, size); + + out->finishPacket(); out->next(); } @@ -876,6 +892,8 @@ void Connection::sendScalarsData(Scalars & data) sendData(elem.second, elem.first, true /* scalar */); } + out->finishPacket(); + out_bytes = out->count() - out_bytes; maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes; double elapsed = watch.elapsedSeconds(); @@ -1018,13 +1036,13 @@ std::optional Connection::getResolvedAddress() const bool Connection::poll(size_t timeout_microseconds) { - return static_cast(*in).poll(timeout_microseconds); + return in->poll(timeout_microseconds); } bool Connection::hasReadPendingData() const { - return last_input_packet_type.has_value() || static_cast(*in).hasPendingData(); + return last_input_packet_type.has_value() || in->hasPendingData(); } diff --git a/src/Client/Connection.h b/src/Client/Connection.h index 9632eb9d948..e7a6d948204 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -8,8 +8,8 @@ #include -#include -#include +#include +#include #include #include @@ -207,8 +207,8 @@ private: String server_display_name; std::unique_ptr socket; - std::shared_ptr in; - std::shared_ptr out; + std::shared_ptr in; + std::shared_ptr out; std::optional last_input_packet_type; String query_id; diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index 159a4c28b6d..837801edcbb 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -79,6 +79,9 @@ static constexpr auto DBMS_MIN_REVISION_WITH_SSH_AUTHENTICATION = 54466; /// Send read-only flag for Replicated tables as well static constexpr auto DBMS_MIN_REVISION_WITH_TABLE_READ_ONLY_CHECK = 54467; +/// Packets size header +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS = 54468; + /// Version of ClickHouse TCP protocol. /// /// Should be incremented manually on protocol changes. @@ -86,6 +89,6 @@ static constexpr auto DBMS_MIN_REVISION_WITH_TABLE_READ_ONLY_CHECK = 54467; /// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, /// later is just a number for server version (one number instead of commit SHA) /// for simplicity (sometimes it may be more convenient in some use cases). -static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54467; +static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54468; } diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index 26cdee4140c..5fb7ea0440c 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -32,25 +32,13 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -bool ReadBufferFromPocoSocket::nextImpl() +size_t ReadBufferFromPocoSocket::readSocket(Position begin, size_t size) { ssize_t bytes_read = 0; - Stopwatch watch; - - SCOPE_EXIT({ - /// NOTE: it is quite inaccurate on high loads since the thread could be replaced by another one - ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); - ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read); - }); /// Add more details to exceptions. try { - CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); - - if (internal_buffer.size() > INT_MAX) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); - /// If async_callback is specified, set socket to non-blocking mode /// and try to read data from it, if socket is not ready for reading, /// run async_callback and try again later. @@ -61,7 +49,7 @@ bool ReadBufferFromPocoSocket::nextImpl() socket.setBlocking(false); SCOPE_EXIT(socket.setBlocking(true)); bool secure = socket.secure(); - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(begin, static_cast(size)); /// Check EAGAIN and ERR_SSL_WANT_READ/ERR_SSL_WANT_WRITE for secure socket (reading from secure socket can write too). while (bytes_read < 0 && (errno == EAGAIN || (secure && (checkSSLWantRead(bytes_read) || checkSSLWantWrite(bytes_read))))) @@ -73,12 +61,12 @@ bool ReadBufferFromPocoSocket::nextImpl() async_callback(socket.impl()->sockfd(), socket.getReceiveTimeout(), AsyncEventTimeoutType::RECEIVE, socket_description, AsyncTaskExecutor::Event::READ | AsyncTaskExecutor::Event::ERROR); /// Try to read again. - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(begin, static_cast(size)); } } else { - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(begin, static_cast(size)); } } catch (const Poco::Net::NetException & e) @@ -99,6 +87,40 @@ bool ReadBufferFromPocoSocket::nextImpl() if (bytes_read < 0) throw NetException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot read from socket (peer: {}, local: {})", peer_address.toString(), socket.address().toString()); + return bytes_read; +} + +bool ReadBufferFromPocoSocket::readSocketExact(Position begin, size_t size) +{ + for (size_t bytes_left = size; bytes_left > 0;) + { + size_t ret = readSocket(begin + size - bytes_left, bytes_left); + if (ret == 0) + return false; + bytes_left -= ret; + } + + return true; +} + +bool ReadBufferFromPocoSocket::nextImpl() +{ + ssize_t bytes_read = 0; + Stopwatch watch; + + SCOPE_EXIT({ + /// NOTE: it is quite inaccurate on high loads since the thread could be replaced by another one + ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); + ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read); + }); + + CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); + + if (internal_buffer.size() > INT_MAX) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); + + bytes_read = readSocket(internal_buffer.begin(), internal_buffer.size()); + if (read_event != ProfileEvents::end()) ProfileEvents::increment(read_event, bytes_read); diff --git a/src/IO/ReadBufferFromPocoSocket.h b/src/IO/ReadBufferFromPocoSocket.h index 76156612764..c40a54ed7ae 100644 --- a/src/IO/ReadBufferFromPocoSocket.h +++ b/src/IO/ReadBufferFromPocoSocket.h @@ -32,6 +32,9 @@ public: void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); } + size_t readSocket(Position begin, size_t size); + bool readSocketExact(Position begin, size_t size); + private: AsyncCallback async_callback; std::string socket_description; diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp new file mode 100644 index 00000000000..f0a157a7e1c --- /dev/null +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -0,0 +1,114 @@ +#include +#include + + +namespace DB::ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace DB +{ +ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size) + : ReadBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size) +{} + +ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) + : ReadBuffer(nullptr, 0), log(getLogger("Protocol")), buffer_socket(socket_, read_event_, buf_size) +{ + chassert(buf_size <= std::numeric_limits::max()); + + working_buffer = buffer_socket.buffer(); + pos = buffer_socket.position(); +} + +void ReadBufferFromPocoSocketChunked::enableChunked() +{ + chunked = true; +} + +bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) +{ + buffer_socket.position() = pos + skip_next; + return buffer_socket.poll(timeout_microseconds); +} + +void ReadBufferFromPocoSocketChunked::setAsyncCallback(AsyncCallback async_callback_) +{ + buffer_socket.setAsyncCallback(async_callback_); +} + +bool ReadBufferFromPocoSocketChunked::startChunk() +{ + do { + if (buffer_socket.read(reinterpret_cast(&chunk_left), sizeof(chunk_left)) == 0) + return false; + if (chunk_left == 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received"); + } while (chunk_left == 0); + + return nextChunk(); +} + +bool ReadBufferFromPocoSocketChunked::nextChunk() +{ + static bool start = false; + + if (chunk_left == 0) { + start = true; + return startChunk(); + } + + if (buffer_socket.available() == 0) + if (!buffer_socket.next()) + return false; + if (start) + LOG_TEST(log, "Packet recieve started. Message {}, size {}", static_cast(*buffer_socket.position()), chunk_left); + else + LOG_TEST(log, "Packet recieve continued. Size {}", chunk_left); + + start = false; + + nextimpl_working_buffer_offset = buffer_socket.offset(); + + if (buffer_socket.available() < chunk_left) + { + working_buffer.resize(buffer_socket.offset() + buffer_socket.available()); + chunk_left -= buffer_socket.available(); + return true; + } + + working_buffer.resize(buffer_socket.offset() + chunk_left); + skip_next = std::min(static_cast(4), buffer_socket.available() - chunk_left); + + if (skip_next > 0) + std::memcpy(&chunk_left, buffer_socket.position() + chunk_left, skip_next); + if (4 > skip_next) + if (!buffer_socket.readSocketExact(reinterpret_cast(&chunk_left) + skip_next, 4 - skip_next)) + return false; + + if (chunk_left == 0) + LOG_TEST(log, "Packet recieve ended."); + + return true; +} + + +bool ReadBufferFromPocoSocketChunked::nextImpl() +{ + buffer_socket.position() = pos + skip_next; + skip_next = 0; + + if (chunked) + return nextChunk(); + + if (!buffer_socket.next()) + return false; + + pos = buffer_socket.position(); + working_buffer.resize(offset() + buffer_socket.available()); + + return true; +} + +} diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h new file mode 100644 index 00000000000..3d7d91ac93a --- /dev/null +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class ReadBufferFromPocoSocketChunked: public ReadBuffer +{ +public: + explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + + void enableChunked(); + bool poll(size_t timeout_microseconds); + void setAsyncCallback(AsyncCallback async_callback_); + +protected: + bool startChunk(); + bool nextChunk(); + bool nextImpl() override; + +private: + LoggerPtr log; + ReadBufferFromPocoSocket buffer_socket; + bool chunked = false; + UInt32 chunk_left = 0; // chunk left to read from socket + UInt8 skip_next = 0; // skip already processed bytes in buffer_socket +}; + +} diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h new file mode 100644 index 00000000000..b316393aab6 --- /dev/null +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -0,0 +1,56 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +class WriteBufferFromPocoSocketChunked: public WriteBufferFromPocoSocket +{ +public: + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, buf_size), log(getLogger("Protocol")) {} + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, write_event_, buf_size), log(getLogger("Protocol")) {} + + void enableChunked() { chunked = true; } + void finishPacket() + { + if (!chunked) + return; + + next(); + + if (finished) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: attempt to send empty chunk"); + + LOG_TEST(log, "Packet send ended."); + finished = true; + + UInt32 s = 0; + socketSendBytes(reinterpret_cast(&s), sizeof(s)); + } +protected: + void nextImpl() override + { + if (chunked) + { + UInt32 s = static_cast(offset()); + if (finished) + LOG_TEST(log, "Packet send started. Message {}, size {}", static_cast(*buffer().begin()), s); + else + LOG_TEST(log, "Packet send continued. Size {}", s); + + finished = false; + socketSendBytes(reinterpret_cast(&s), sizeof(s)); + } + + WriteBufferFromPocoSocket::nextImpl(); + } +private: + LoggerPtr log; + bool chunked = false; + bool finished = true; +}; + +} diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index ae2f150c4a1..aa33988fdc4 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -19,8 +19,6 @@ #include #include #include -#include -#include #include #include #include @@ -253,8 +251,8 @@ void TCPHandler::runImpl() socket().setSendTimeout(send_timeout); socket().setNoDelay(true); - in = std::make_shared(socket(), read_event); - out = std::make_shared(socket(), write_event); + in = std::make_shared(socket(), read_event); + out = std::make_shared(socket(), write_event); /// Support for PROXY protocol if (parse_proxy_protocol && !receiveProxyHeader()) @@ -289,6 +287,12 @@ void TCPHandler::runImpl() if (!default_database.empty()) session->sessionContext()->setCurrentDatabase(default_database); } + + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + in->enableChunked(); + out->enableChunked(); + } } catch (const Exception & e) /// Typical for an incorrect username, password, or address. { @@ -320,7 +324,7 @@ void TCPHandler::runImpl() { Stopwatch idle_time; UInt64 timeout_ms = std::min(poll_interval, idle_connection_timeout) * 1000000; - while (tcp_server.isOpen() && !server.isCancelled() && !static_cast(*in).poll(timeout_ms)) + while (tcp_server.isOpen() && !server.isCancelled() && !in->poll(timeout_ms)) { if (idle_time.elapsedSeconds() > idle_connection_timeout) { @@ -788,7 +792,7 @@ bool TCPHandler::readDataNext() /// We are waiting for a packet from the client. Thus, every `POLL_INTERVAL` seconds check whether we need to shut down. while (true) { - if (static_cast(*in).poll(timeout_us)) + if (in->poll(timeout_us)) { /// If client disconnected. if (in->eof()) @@ -1154,6 +1158,8 @@ void TCPHandler::processTablesStatusRequest() } response.write(*out, client_tcp_protocol_version); + + out->finishPacket(); } void TCPHandler::receiveUnexpectedTablesStatusRequest() @@ -1174,6 +1180,8 @@ void TCPHandler::sendPartUUIDs() writeVarUInt(Protocol::Server::PartUUIDs, *out); writeVectorBinary(uuids, *out); + + out->finishPacket(); out->next(); } } @@ -1182,6 +1190,8 @@ void TCPHandler::sendPartUUIDs() void TCPHandler::sendReadTaskRequestAssumeLocked() { writeVarUInt(Protocol::Server::ReadTaskRequest, *out); + + out->finishPacket(); out->next(); } @@ -1190,6 +1200,8 @@ void TCPHandler::sendMergeTreeAllRangesAnnouncementAssumeLocked(InitialAllRanges { writeVarUInt(Protocol::Server::MergeTreeAllRangesAnnouncement, *out); announcement.serialize(*out); + + out->finishPacket(); out->next(); } @@ -1198,6 +1210,8 @@ void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(ParallelReadRequest re { writeVarUInt(Protocol::Server::MergeTreeReadTaskRequest, *out); request.serialize(*out); + + out->finishPacket(); out->next(); } @@ -1206,6 +1220,8 @@ void TCPHandler::sendProfileInfo(const ProfileInfo & info) { writeVarUInt(Protocol::Server::ProfileInfo, *out); info.write(*out); + + out->finishPacket(); out->next(); } @@ -1221,6 +1237,8 @@ void TCPHandler::sendTotals(const Block & totals) state.block_out->write(totals); state.maybe_compressed_out->next(); + + out->finishPacket(); out->next(); } } @@ -1237,6 +1255,8 @@ void TCPHandler::sendExtremes(const Block & extremes) state.block_out->write(extremes); state.maybe_compressed_out->next(); + + out->finishPacket(); out->next(); } } @@ -1254,6 +1274,8 @@ void TCPHandler::sendProfileEvents() writeStringBinary("", *out); state.profile_events_block_out->write(block); + + out->finishPacket(); out->next(); auto elapsed_milliseconds = stopwatch.elapsedMilliseconds(); @@ -1291,6 +1313,8 @@ void TCPHandler::sendTimezone() LOG_DEBUG(log, "TCPHandler::sendTimezone(): {}", tz); writeVarUInt(Protocol::Server::TimezoneUpdate, *out); writeStringBinary(tz, *out); + + out->finishPacket(); out->next(); } @@ -1636,6 +1660,7 @@ bool TCPHandler::receivePacket() case Protocol::Client::Ping: writeVarUInt(Protocol::Server::Pong, *out); + out->finishPacket(); out->next(); return false; @@ -2152,7 +2177,7 @@ QueryState::CancellationStatus TCPHandler::getQueryCancellationStatus() after_check_cancelled.restart(); /// During request execution the only packet that can come from the client is stopping the query. - if (static_cast(*in).poll(0)) + if (in->poll(0)) { if (in->eof()) { @@ -2216,6 +2241,8 @@ void TCPHandler::sendData(const Block & block) state.block_out->write(block); state.maybe_compressed_out->next(); + + out->finishPacket(); out->next(); } catch (...) @@ -2251,6 +2278,8 @@ void TCPHandler::sendLogData(const Block & block) writeStringBinary("", *out); state.logs_block_out->write(block); + + out->finishPacket(); out->next(); } @@ -2262,6 +2291,7 @@ void TCPHandler::sendTableColumns(const ColumnsDescription & columns) writeStringBinary("", *out); writeStringBinary(columns.toString(), *out); + out->finishPacket(); out->next(); } @@ -2271,6 +2301,8 @@ void TCPHandler::sendException(const Exception & e, bool with_stack_trace) writeVarUInt(Protocol::Server::Exception, *out); writeException(e, *out, with_stack_trace); + + out->finishPacket(); out->next(); } @@ -2281,6 +2313,8 @@ void TCPHandler::sendEndOfStream() state.io.setAllDataSent(); writeVarUInt(Protocol::Server::EndOfStream, *out); + + out->finishPacket(); out->next(); } @@ -2299,6 +2333,8 @@ void TCPHandler::sendProgress() increment.elapsed_ns = current_elapsed_ns - state.prev_elapsed_ns; state.prev_elapsed_ns = current_elapsed_ns; increment.write(*out, client_tcp_protocol_version); + + out->finishPacket(); out->next(); } diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 191617f1905..67d77381167 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include "IServer.h" #include "Interpreters/AsynchronousInsertQueue.h" @@ -204,8 +206,8 @@ private: ClientInfo::QueryKind query_kind = ClientInfo::QueryKind::NO_QUERY; /// Streams for reading/writing from/to client connection socket. - std::shared_ptr in; - std::shared_ptr out; + std::shared_ptr in; + std::shared_ptr out; ProfileEvents::Event read_event; ProfileEvents::Event write_event; From daf8277e55058e42fddafc49416164d5cb0ab601 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 14 May 2024 16:00:58 +0000 Subject: [PATCH 0060/2361] fix --- src/IO/ReadBufferFromPocoSocketChunked.cpp | 17 ++++++++--------- src/IO/ReadBufferFromPocoSocketChunked.h | 1 + 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index f0a157a7e1c..33bed2a32c4 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -52,22 +52,21 @@ bool ReadBufferFromPocoSocketChunked::startChunk() bool ReadBufferFromPocoSocketChunked::nextChunk() { - static bool start = false; - - if (chunk_left == 0) { - start = true; + if (chunk_left == 0) + { + started = true; return startChunk(); } if (buffer_socket.available() == 0) if (!buffer_socket.next()) return false; - if (start) - LOG_TEST(log, "Packet recieve started. Message {}, size {}", static_cast(*buffer_socket.position()), chunk_left); + if (started) + LOG_TEST(log, "Packet receive started. Message {}, size {}", static_cast(*buffer_socket.position()), chunk_left); else - LOG_TEST(log, "Packet recieve continued. Size {}", chunk_left); + LOG_TEST(log, "Packet receive continued. Size {}", chunk_left); - start = false; + started = false; nextimpl_working_buffer_offset = buffer_socket.offset(); @@ -88,7 +87,7 @@ bool ReadBufferFromPocoSocketChunked::nextChunk() return false; if (chunk_left == 0) - LOG_TEST(log, "Packet recieve ended."); + LOG_TEST(log, "Packet receive ended."); return true; } diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index 3d7d91ac93a..5930285e18a 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -27,6 +27,7 @@ private: bool chunked = false; UInt32 chunk_left = 0; // chunk left to read from socket UInt8 skip_next = 0; // skip already processed bytes in buffer_socket + bool started = false; }; } From dfdf31f1b6efbbda847a693a22969c2187a949f7 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 14 May 2024 18:09:11 +0000 Subject: [PATCH 0061/2361] host-net conversion --- src/IO/NetUtils.h | 26 ++++++++++++++++++++++ src/IO/ReadBufferFromPocoSocketChunked.cpp | 16 ++++++++----- src/IO/WriteBufferFromPocoSocketChunked.h | 2 ++ 3 files changed, 38 insertions(+), 6 deletions(-) create mode 100644 src/IO/NetUtils.h diff --git a/src/IO/NetUtils.h b/src/IO/NetUtils.h new file mode 100644 index 00000000000..ac6b5eec9a7 --- /dev/null +++ b/src/IO/NetUtils.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +template +constexpr T netToHost(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T hostToNet(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +} diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 33bed2a32c4..27903761934 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB::ErrorCodes @@ -9,6 +10,7 @@ namespace DB::ErrorCodes namespace DB { + ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size) : ReadBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size) {} @@ -40,12 +42,12 @@ void ReadBufferFromPocoSocketChunked::setAsyncCallback(AsyncCallback async_callb bool ReadBufferFromPocoSocketChunked::startChunk() { - do { - if (buffer_socket.read(reinterpret_cast(&chunk_left), sizeof(chunk_left)) == 0) - return false; - if (chunk_left == 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received"); - } while (chunk_left == 0); + if (buffer_socket.read(reinterpret_cast(&chunk_left), sizeof(chunk_left)) == 0) + return false; + if (chunk_left == 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received"); + + chunk_left = netToHost(chunk_left); return nextChunk(); } @@ -86,6 +88,8 @@ bool ReadBufferFromPocoSocketChunked::nextChunk() if (!buffer_socket.readSocketExact(reinterpret_cast(&chunk_left) + skip_next, 4 - skip_next)) return false; + chunk_left = netToHost(chunk_left); + if (chunk_left == 0) LOG_TEST(log, "Packet receive ended."); diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index b316393aab6..4481dfdedfc 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB @@ -42,6 +43,7 @@ protected: LOG_TEST(log, "Packet send continued. Size {}", s); finished = false; + s = hostToNet(s); socketSendBytes(reinterpret_cast(&s), sizeof(s)); } From 88a833335f7e7e9fae85e74d250677f415905292 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 14 May 2024 20:25:26 +0000 Subject: [PATCH 0062/2361] fix --- src/IO/WriteBufferFromPocoSocketChunked.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 4481dfdedfc..39cdd93501b 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -8,6 +8,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + class WriteBufferFromPocoSocketChunked: public WriteBufferFromPocoSocket { public: From ad204887a2516e5053035c709735bf6c99ddba21 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 15 May 2024 20:47:54 +0000 Subject: [PATCH 0063/2361] bugs fixed, switch chunk length to little endian --- src/Client/Connection.cpp | 2 +- src/IO/NetUtils.h | 32 +++++++++++++++++ src/IO/ReadBufferFromPocoSocketChunked.cpp | 42 +++++++++++++++------- src/IO/ReadBufferFromPocoSocketChunked.h | 3 +- src/IO/WriteBufferFromPocoSocketChunked.h | 2 +- 5 files changed, 65 insertions(+), 16 deletions(-) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 970768e515e..3a0f3771e7a 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -1042,7 +1042,7 @@ bool Connection::poll(size_t timeout_microseconds) bool Connection::hasReadPendingData() const { - return last_input_packet_type.has_value() || in->hasPendingData(); + return last_input_packet_type.has_value() || in->hasBufferedData(); } diff --git a/src/IO/NetUtils.h b/src/IO/NetUtils.h index ac6b5eec9a7..12f09524ae7 100644 --- a/src/IO/NetUtils.h +++ b/src/IO/NetUtils.h @@ -23,4 +23,36 @@ constexpr T hostToNet(T value) noexcept return value; } +template +constexpr T toLittleEndian(T value) noexcept +{ + if constexpr (std::endian::native == std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T toBigEndian(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T fromLittleEndian(T value) noexcept +{ + if constexpr (std::endian::native == std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T fromBigEndian(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + } diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 27903761934..247d8c8ec6a 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -27,11 +27,14 @@ ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Sock void ReadBufferFromPocoSocketChunked::enableChunked() { chunked = true; + buffer_socket.position() = pos; } bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) { - buffer_socket.position() = pos + skip_next; + if (!chunked) + buffer_socket.position() = pos; + return buffer_socket.poll(timeout_microseconds); } @@ -42,12 +45,12 @@ void ReadBufferFromPocoSocketChunked::setAsyncCallback(AsyncCallback async_callb bool ReadBufferFromPocoSocketChunked::startChunk() { - if (buffer_socket.read(reinterpret_cast(&chunk_left), sizeof(chunk_left)) == 0) + if (buffer_socket.read(reinterpret_cast(&chunk_left), sizeof(chunk_left)) < sizeof(chunk_left)) return false; if (chunk_left == 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received"); - chunk_left = netToHost(chunk_left); + chunk_left = fromLittleEndian(chunk_left); return nextChunk(); } @@ -76,19 +79,23 @@ bool ReadBufferFromPocoSocketChunked::nextChunk() { working_buffer.resize(buffer_socket.offset() + buffer_socket.available()); chunk_left -= buffer_socket.available(); + buffer_socket.position() += buffer_socket.available(); return true; } working_buffer.resize(buffer_socket.offset() + chunk_left); - skip_next = std::min(static_cast(4), buffer_socket.available() - chunk_left); + UInt8 buffered = std::min(static_cast(4), buffer_socket.available() - chunk_left); - if (skip_next > 0) - std::memcpy(&chunk_left, buffer_socket.position() + chunk_left, skip_next); - if (4 > skip_next) - if (!buffer_socket.readSocketExact(reinterpret_cast(&chunk_left) + skip_next, 4 - skip_next)) + buffer_socket.position() += chunk_left; + if (buffered > 0) + std::memcpy(&chunk_left, buffer_socket.position(), buffered); + buffer_socket.position() += buffered; + + if (4 > buffered) + if (!buffer_socket.readSocketExact(reinterpret_cast(&chunk_left) + buffered, 4 - buffered)) return false; - chunk_left = netToHost(chunk_left); + chunk_left = fromLittleEndian(chunk_left); if (chunk_left == 0) LOG_TEST(log, "Packet receive ended."); @@ -99,14 +106,23 @@ bool ReadBufferFromPocoSocketChunked::nextChunk() bool ReadBufferFromPocoSocketChunked::nextImpl() { - buffer_socket.position() = pos + skip_next; - skip_next = 0; - if (chunked) - return nextChunk(); + { + if (!nextChunk()) + { + pos = buffer_socket.position(); + return false; + } + return true; + } + + buffer_socket.position() = pos; if (!buffer_socket.next()) + { + pos = buffer_socket.position(); return false; + } pos = buffer_socket.position(); working_buffer.resize(offset() + buffer_socket.available()); diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index 5930285e18a..6f99db4489a 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -16,6 +16,8 @@ public: bool poll(size_t timeout_microseconds); void setAsyncCallback(AsyncCallback async_callback_); + bool hasBufferedData() const { return hasPendingData() || buffer_socket.hasPendingData(); } + protected: bool startChunk(); bool nextChunk(); @@ -26,7 +28,6 @@ private: ReadBufferFromPocoSocket buffer_socket; bool chunked = false; UInt32 chunk_left = 0; // chunk left to read from socket - UInt8 skip_next = 0; // skip already processed bytes in buffer_socket bool started = false; }; diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 39cdd93501b..070e87feff2 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -48,7 +48,7 @@ protected: LOG_TEST(log, "Packet send continued. Size {}", s); finished = false; - s = hostToNet(s); + s = toLittleEndian(s); socketSendBytes(reinterpret_cast(&s), sizeof(s)); } From 6378184c7f004e211d86c3fd7a4f482e45b01a59 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 20 May 2024 14:15:47 +0000 Subject: [PATCH 0064/2361] fix, add some introspection functionality --- src/IO/ReadBufferFromPocoSocketChunked.cpp | 3 ++- src/IO/ReadBufferFromPocoSocketChunked.h | 5 +++++ src/IO/WriteBufferFromPocoSocketChunked.h | 10 ++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 247d8c8ec6a..4d40d8b4f14 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -16,7 +16,7 @@ ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Sock {} ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) - : ReadBuffer(nullptr, 0), log(getLogger("Protocol")), buffer_socket(socket_, read_event_, buf_size) + : ReadBuffer(nullptr, 0), log(getLogger("Protocol")), peer_address(socket_.peerAddress()), our_address(socket_.address()), buffer_socket(socket_, read_event_, buf_size) { chassert(buf_size <= std::numeric_limits::max()); @@ -28,6 +28,7 @@ void ReadBufferFromPocoSocketChunked::enableChunked() { chunked = true; buffer_socket.position() = pos; + working_buffer.resize(offset()); } bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index 6f99db4489a..c70363cf7d8 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -18,6 +18,9 @@ public: bool hasBufferedData() const { return hasPendingData() || buffer_socket.hasPendingData(); } + Poco::Net::SocketAddress peerAddress() { return peer_address; } + Poco::Net::SocketAddress ourAddress() { return our_address; } + protected: bool startChunk(); bool nextChunk(); @@ -25,6 +28,8 @@ protected: private: LoggerPtr log; + Poco::Net::SocketAddress peer_address; + Poco::Net::SocketAddress our_address; ReadBufferFromPocoSocket buffer_socket; bool chunked = false; UInt32 chunk_left = 0; // chunk left to read from socket diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 070e87feff2..6c35db62c0c 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -54,6 +54,16 @@ protected: WriteBufferFromPocoSocket::nextImpl(); } + + Poco::Net::SocketAddress peerAddress() + { + return peer_address; + } + + Poco::Net::SocketAddress ourAddress() + { + return our_address; + } private: LoggerPtr log; bool chunked = false; From 5308256c67c5781916018c321273f04fd21c4545 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 20 May 2024 16:25:19 +0000 Subject: [PATCH 0065/2361] enable chunked before processing defaul database --- src/Server/TCPHandler.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index b3dbd118d8b..070cd0e3247 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -277,6 +277,12 @@ void TCPHandler::runImpl() if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) receiveAddendum(); + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + in->enableChunked(); + out->enableChunked(); + } + if (!is_interserver_mode) { /// If session created, then settings in session context has been updated. @@ -287,12 +293,6 @@ void TCPHandler::runImpl() if (!default_database.empty()) session->sessionContext()->setCurrentDatabase(default_database); } - - if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) - { - in->enableChunked(); - out->enableChunked(); - } } catch (const Exception & e) /// Typical for an incorrect username, password, or address. { From 9e747cd45312302935cbf15ea518808d4ac9c8c8 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 22 May 2024 01:20:00 +0000 Subject: [PATCH 0066/2361] fix bug with profile stats in WriteBufferFromPocoSocket --- src/IO/WriteBufferFromPocoSocket.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp index 10d9fd131cd..e29b3b2cddd 100644 --- a/src/IO/WriteBufferFromPocoSocket.cpp +++ b/src/IO/WriteBufferFromPocoSocket.cpp @@ -183,6 +183,7 @@ WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_ , socket(socket_) , peer_address(socket.peerAddress()) , our_address(socket.address()) + , write_event(ProfileEvents::end()) , socket_description("socket (" + peer_address.toString() + ")") { } From 34702b30bcfe3401991fe7c792c02a80185acdf2 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 22 May 2024 03:21:10 +0000 Subject: [PATCH 0067/2361] fix test --- .../0_stateless/02532_send_logs_level_test.reference | 3 --- tests/queries/0_stateless/02532_send_logs_level_test.sh | 8 ++++++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.reference b/tests/queries/0_stateless/02532_send_logs_level_test.reference index 7e51b888d9c..e69de29bb2d 100644 --- a/tests/queries/0_stateless/02532_send_logs_level_test.reference +++ b/tests/queries/0_stateless/02532_send_logs_level_test.reference @@ -1,3 +0,0 @@ - MergeTreeMarksLoader: Loading marks from path data.cmrk3 - MergeTreeRangeReader: First reader returned: num_rows: 1, columns: 1, total_rows_per_granule: 1, no filter, column[0]: Int32(size = 1), requested columns: key - MergeTreeRangeReader: read() returned num_rows: 1, columns: 1, total_rows_per_granule: 1, no filter, column[0]: Int32(size = 1), sample block key diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.sh b/tests/queries/0_stateless/02532_send_logs_level_test.sh index 4afc6d4496b..f2940e9c005 100755 --- a/tests/queries/0_stateless/02532_send_logs_level_test.sh +++ b/tests/queries/0_stateless/02532_send_logs_level_test.sh @@ -17,6 +17,10 @@ $CLICKHOUSE_CLIENT -nm -q " # instead of "last" value, hence you cannot simply append another # --send_logs_level here. CLICKHOUSE_CLIENT_CLEAN=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=test/g') -$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& grep -o -e '.*' -e '.*' -$CLICKHOUSE_CLIENT -q "drop table data" +set -e + +trap "$CLICKHOUSE_CLIENT -q 'drop table data'" EXIT + +$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& (! grep -q -o -e '.*') +$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& grep -q -o -e '.*' From 6c3556dfda92ea9d04ff5db8427a58aa7ab35750 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 22 May 2024 04:07:52 +0000 Subject: [PATCH 0068/2361] fix test --- tests/queries/0_stateless/02532_send_logs_level_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.sh b/tests/queries/0_stateless/02532_send_logs_level_test.sh index f2940e9c005..b74fcf78ad1 100755 --- a/tests/queries/0_stateless/02532_send_logs_level_test.sh +++ b/tests/queries/0_stateless/02532_send_logs_level_test.sh @@ -20,7 +20,7 @@ CLICKHOUSE_CLIENT_CLEAN=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level set -e -trap "$CLICKHOUSE_CLIENT -q 'drop table data'" EXIT +trap '$CLICKHOUSE_CLIENT -q "drop table data"' EXIT $CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& (! grep -q -o -e '.*') $CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& grep -q -o -e '.*' From 69cd5ae549cf7acc4de756a70c9b632d139e50fe Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 22 May 2024 16:39:25 +0000 Subject: [PATCH 0069/2361] process possibly remaining message after network error --- src/Client/ClientBase.cpp | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index b6f821794f1..f3e53efd994 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -951,6 +951,8 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) } catch (Exception & e) { + if (server_exception) + server_exception->rethrow(); if (!is_interactive) e.addMessage("(in query: {})", full_query); throw; @@ -1069,19 +1071,28 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa QueryInterruptHandler::start(signals_before_stop); SCOPE_EXIT({ QueryInterruptHandler::stop(); }); - connection->sendQuery( - connection_parameters.timeouts, - query, - query_parameters, - global_context->getCurrentQueryId(), - query_processing_stage, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), - true, - [&](const Progress & progress) { onProgress(progress); }); + try { + connection->sendQuery( + connection_parameters.timeouts, + query, + query_parameters, + global_context->getCurrentQueryId(), + query_processing_stage, + &global_context->getSettingsRef(), + &global_context->getClientInfo(), + true, + [&](const Progress & progress) { onProgress(progress); }); + + if (send_external_tables) + sendExternalTables(parsed_query); + } + catch (const NetException &) + { + // We still want to attempt to process whatever we already recieved or can recieve (socket receive buffer can be not empty) + receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); + throw; + } - if (send_external_tables) - sendExternalTables(parsed_query); receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); break; From 99bd796011aee169f3c4de25b07b330094c4a41a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 22 May 2024 16:58:50 +0000 Subject: [PATCH 0070/2361] fix spelling --- src/Client/ClientBase.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index f3e53efd994..1b8fe83eb51 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1088,7 +1088,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa } catch (const NetException &) { - // We still want to attempt to process whatever we already recieved or can recieve (socket receive buffer can be not empty) + // We still want to attempt to process whatever we already received or can receive (socket receive buffer can be not empty) receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); throw; } From 94bc0a1e966d95b8a2180f9504ed93592d2026ed Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 23 May 2024 22:01:32 +0000 Subject: [PATCH 0071/2361] add config parameters and client arguments, make default notchunked_optional --- programs/benchmark/Benchmark.cpp | 52 +++++++++++++++++- src/Client/ClientBase.cpp | 39 ++++++++++++++ src/Client/Connection.cpp | 54 ++++++++++++++++++- src/Client/Connection.h | 5 ++ src/Client/ConnectionParameters.cpp | 3 ++ src/Client/ConnectionParameters.h | 2 + src/Client/ConnectionPool.cpp | 6 ++- src/Client/ConnectionPool.h | 15 +++++- .../ClickHouseDictionarySource.cpp | 8 ++- src/Dictionaries/ClickHouseDictionarySource.h | 2 + src/Interpreters/Cluster.cpp | 11 +++- src/Interpreters/Cluster.h | 2 + src/Server/TCPHandler.cpp | 44 ++++++++++++++- src/Server/TCPHandler.h | 2 + .../DistributedAsyncInsertDirectoryQueue.cpp | 2 + src/Storages/StorageReplicatedMergeTree.cpp | 3 +- 16 files changed, 240 insertions(+), 10 deletions(-) diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index 48dca82eb2b..251761e0bad 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -75,6 +75,8 @@ public: const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, + const String & proto_recv_chunked_, const String & quota_key_, const String & stage, bool randomize_, @@ -128,7 +130,9 @@ public: connections.emplace_back(std::make_unique( concurrency, cur_host, cur_port, - default_database_, user_, password_, quota_key_, + default_database_, user_, password_, + proto_send_chunked_, proto_recv_chunked_, + quota_key_, /* cluster_= */ "", /* cluster_secret_= */ "", /* client_name_= */ std::string(DEFAULT_CLIENT_NAME), @@ -662,6 +666,50 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) Strings hosts = options.count("host") ? options["host"].as() : Strings({"localhost"}); + String proto_send_chunked {"notchunked_optional"}; + String proto_recv_chunked {"notchunked_optional"}; + + if (options.count("proto_caps")) + { + std::string proto_caps_str = options["proto_caps"].as(); + + std::vector proto_caps; + splitInto<','>(proto_caps, proto_caps_str); + + for (auto cap_str : proto_caps) + { + std::string direction; + + if (cap_str.starts_with("send_")) + { + direction = "send"; + cap_str = cap_str.substr(std::string_view("send_").size()); + } + else if (cap_str.starts_with("recv_")) + { + direction = "recv"; + cap_str = cap_str.substr(std::string_view("recv_").size()); + } + + if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str); + + if (direction.empty()) + { + proto_send_chunked = cap_str; + proto_recv_chunked = cap_str; + } + else + { + if (direction == "send") + proto_send_chunked = cap_str; + else + proto_recv_chunked = cap_str; + } + } + } + + Benchmark benchmark( options["concurrency"].as(), options["delay"].as(), @@ -673,6 +721,8 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) options["database"].as(), options["user"].as(), options["password"].as(), + proto_send_chunked, + proto_recv_chunked, options["quota_key"].as(), options["stage"].as(), options.count("randomize"), diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 1b8fe83eb51..0bceee6ea4d 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -75,9 +75,11 @@ #include #include #include +#include #include #include +#include #include "config.h" namespace fs = std::filesystem; @@ -2993,6 +2995,8 @@ void ClientBase::init(int argc, char ** argv) ("config-file,C", po::value(), "config-file path") + ("proto_caps", po::value(), "enable/disable chunked protocol: chunked_optional, notchunked, notchunked_optional, send_chunked, send_chunked_optional, send_notchunked, send_notchunked_optional, recv_chunked, recv_chunked_optional, recv_notchunked, recv_notchunked_optional") + ("query,q", po::value>()->multitoken(), R"(query; can be specified multiple times (--query "SELECT 1" --query "SELECT 2"...))") ("queries-file", po::value>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)") ("multiquery,n", "If specified, multiple queries separated by semicolons can be listed after --query. For convenience, it is also possible to omit --query and pass the queries directly after --multiquery.") @@ -3162,6 +3166,41 @@ void ClientBase::init(int argc, char ** argv) if (options.count("server_logs_file")) server_logs_file = options["server_logs_file"].as(); + if (options.count("proto_caps")) + { + std::string proto_caps_str = options["proto_caps"].as(); + + std::vector proto_caps; + splitInto<','>(proto_caps, proto_caps_str); + + for (auto cap_str : proto_caps) + { + std::string direction; + + if (cap_str.starts_with("send_")) + { + direction = "send"; + cap_str = cap_str.substr(std::string_view("send_").size()); + } + else if (cap_str.starts_with("recv_")) + { + direction = "recv"; + cap_str = cap_str.substr(std::string_view("recv_").size()); + } + + if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str); + + if (direction.empty()) + { + config().setString("proto_caps.send", std::string(cap_str)); + config().setString("proto_caps.recv", std::string(cap_str)); + } + else + config().setString("proto_caps." + direction, std::string(cap_str)); + } + } + query_processing_stage = QueryProcessingStage::fromString(options["stage"].as()); query_kind = parseQueryKind(options["query_kind"].as()); profile_events.print = options.count("print-profile-events"); diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 082fe8d5098..9327b694d29 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -71,6 +71,7 @@ Connection::~Connection() = default; Connection::Connection(const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, const String & proto_recv_chunked_, [[maybe_unused]] const SSHKey & ssh_private_key_, const String & quota_key_, const String & cluster_, @@ -80,6 +81,7 @@ Connection::Connection(const String & host_, UInt16 port_, Protocol::Secure secure_) : host(host_), port(port_), default_database(default_database_) , user(user_), password(password_) + , proto_send_chunked(proto_send_chunked_), proto_recv_chunked(proto_recv_chunked_) #if USE_SSH , ssh_private_key(ssh_private_key_) #endif @@ -206,13 +208,46 @@ void Connection::connect(const ConnectionTimeouts & timeouts) sendHello(); receiveHello(timeouts.handshake_timeout); + bool out_chunked = false; + bool in_chunked = false; + + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) + { + bool chunked_srv = chunked_srv_str.starts_with("chunked"); + bool optional_srv = chunked_srv_str.ends_with("_optional"); + bool chunked_cl = chunked_cl_str.starts_with("chunked"); + bool optional_cl = chunked_cl_str.ends_with("_optional"); + + if (optional_srv) + return chunked_cl; + if (optional_cl) + return chunked_srv; + if (chunked_cl != chunked_srv) + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: {} set to {}, server requires {}", + direction, + chunked_cl ? "chunked" : "notchunked", + chunked_srv ? "chunked" : "notchunked"); + + return chunked_srv; + }; + + out_chunked = is_chunked(proto_recv_chunked_srv, proto_send_chunked, "send"); + in_chunked = is_chunked(proto_send_chunked_srv, proto_recv_chunked, "recv"); + } + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) sendAddendum(); if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) { - in->enableChunked(); - out->enableChunked(); + if (out_chunked) + out->enableChunked(); + if (in_chunked) + in->enableChunked(); } LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.", @@ -359,6 +394,13 @@ void Connection::sendAddendum() { if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY) writeStringBinary(quota_key, *out); + + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + writeStringBinary(proto_send_chunked, *out); + writeStringBinary(proto_recv_chunked, *out); + } + out->next(); } @@ -438,6 +480,12 @@ void Connection::receiveHello(const Poco::Timespan & handshake_timeout) else server_version_patch = server_revision; + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + readStringBinary(proto_send_chunked_srv, *in); + readStringBinary(proto_recv_chunked_srv, *in); + } + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) { UInt64 rules_size; @@ -1327,6 +1375,8 @@ ServerConnectionPtr Connection::createConnection(const ConnectionParameters & pa parameters.default_database, parameters.user, parameters.password, + parameters.proto_send_chunked, + parameters.proto_recv_chunked, parameters.ssh_private_key, parameters.quota_key, "", /* cluster */ diff --git a/src/Client/Connection.h b/src/Client/Connection.h index e7a6d948204..a04ccd44627 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -52,6 +52,7 @@ public: Connection(const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, const String & proto_recv_chunked_, const SSHKey & ssh_private_key_, const String & quota_key_, const String & cluster_, @@ -169,6 +170,10 @@ private: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; + String proto_send_chunked_srv; + String proto_recv_chunked_srv; #if USE_SSH SSHKey ssh_private_key; #endif diff --git a/src/Client/ConnectionParameters.cpp b/src/Client/ConnectionParameters.cpp index 774f3375f63..430c462084a 100644 --- a/src/Client/ConnectionParameters.cpp +++ b/src/Client/ConnectionParameters.cpp @@ -103,6 +103,9 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati #endif } + proto_send_chunked = config.getString("proto_caps.send", "notchunked_optional"); + proto_recv_chunked = config.getString("proto_caps.recv", "notchunked_optional"); + quota_key = config.getString("quota_key", ""); /// By default compression is disabled if address looks like localhost. diff --git a/src/Client/ConnectionParameters.h b/src/Client/ConnectionParameters.h index f23522d48b3..85174924016 100644 --- a/src/Client/ConnectionParameters.h +++ b/src/Client/ConnectionParameters.h @@ -20,6 +20,8 @@ struct ConnectionParameters std::string default_database; std::string user; std::string password; + std::string proto_send_chunked; + std::string proto_recv_chunked; std::string quota_key; SSHKey ssh_private_key; Protocol::Secure security = Protocol::Secure::Disable; diff --git a/src/Client/ConnectionPool.cpp b/src/Client/ConnectionPool.cpp index 5cabb1465d1..05cb97cadc7 100644 --- a/src/Client/ConnectionPool.cpp +++ b/src/Client/ConnectionPool.cpp @@ -12,6 +12,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get( String default_database, String user, String password, + String proto_send_chunked, + String proto_recv_chunked, String quota_key, String cluster, String cluster_secret, @@ -21,7 +23,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get( Priority priority) { Key key{ - max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority}; + max_connections, host, port, default_database, user, password, proto_send_chunked, proto_recv_chunked, quota_key, cluster, cluster_secret, client_name, compression, secure, priority}; std::lock_guard lock(mutex); auto [it, inserted] = pools.emplace(key, ConnectionPoolPtr{}); @@ -38,6 +40,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get( default_database, user, password, + proto_send_chunked, + proto_recv_chunked, quota_key, cluster, cluster_secret, diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h index d35c2552461..2df97dfb454 100644 --- a/src/Client/ConnectionPool.h +++ b/src/Client/ConnectionPool.h @@ -72,6 +72,8 @@ public: const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, + const String & proto_recv_chunked_, const String & quota_key_, const String & cluster_, const String & cluster_secret_, @@ -84,6 +86,8 @@ public: , default_database(default_database_) , user(user_) , password(password_) + , proto_send_chunked(proto_send_chunked_) + , proto_recv_chunked(proto_recv_chunked_) , quota_key(quota_key_) , cluster(cluster_) , cluster_secret(cluster_secret_) @@ -123,7 +127,9 @@ protected: { return std::make_shared( host, port, - default_database, user, password, SSHKey(), quota_key, + default_database, user, password, + proto_send_chunked, proto_recv_chunked, + SSHKey(), quota_key, cluster, cluster_secret, client_name, compression, secure); } @@ -132,6 +138,8 @@ private: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; String quota_key; /// For inter-server authorization @@ -157,6 +165,8 @@ public: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; String quota_key; String cluster; String cluster_secret; @@ -180,6 +190,8 @@ public: String default_database, String user, String password, + String proto_send_chunked, + String proto_recv_chunked, String quota_key, String cluster, String cluster_secret, @@ -197,6 +209,7 @@ inline bool operator==(const ConnectionPoolFactory::Key & lhs, const ConnectionP { return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port && lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password + && lhs.proto_send_chunked == rhs.proto_send_chunked && lhs.proto_recv_chunked == rhs.proto_recv_chunked && lhs.quota_key == rhs.quota_key && lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name && lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority; diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index bf16f315ddf..3b096da92c6 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -51,6 +51,8 @@ namespace configuration.db, configuration.user, configuration.password, + configuration.proto_send_chunked, + configuration.proto_recv_chunked, configuration.quota_key, "", /* cluster */ "", /* cluster_secret */ @@ -222,7 +224,7 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) { validateNamedCollection( *named_collection, {}, ValidateKeysMultiset{ - "secure", "host", "hostname", "port", "user", "username", "password", "quota_key", "name", + "secure", "host", "hostname", "port", "user", "username", "password", "proto_send_chunked", "proto_recv_chunked", "quota_key", "name", "db", "database", "table","query", "where", "invalidate_query", "update_field", "update_lag"}); const auto secure = named_collection->getOrDefault("secure", false); @@ -234,6 +236,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = named_collection->getAnyOrDefault({"user", "username"}, "default"), .password = named_collection->getOrDefault("password", ""), + .proto_send_chunked = named_collection->getOrDefault("proto_send_chunked", "notchunked_optional"), + .proto_recv_chunked = named_collection->getOrDefault("proto_recv_chunked", "notchunked_optional"), .quota_key = named_collection->getOrDefault("quota_key", ""), .db = named_collection->getAnyOrDefault({"db", "database"}, default_database), .table = named_collection->getOrDefault("table", ""), @@ -258,6 +262,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = config.getString(settings_config_prefix + ".user", "default"), .password = config.getString(settings_config_prefix + ".password", ""), + .proto_send_chunked = config.getString(settings_config_prefix + ".proto_caps.send", "notchunked_optional"), + .proto_recv_chunked = config.getString(settings_config_prefix + ".proto_caps.recv", "notchunked_optional"), .quota_key = config.getString(settings_config_prefix + ".quota_key", ""), .db = config.getString(settings_config_prefix + ".db", default_database), .table = config.getString(settings_config_prefix + ".table", ""), diff --git a/src/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h index 3357514eab2..faf9e5f8009 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.h +++ b/src/Dictionaries/ClickHouseDictionarySource.h @@ -23,6 +23,8 @@ public: const std::string host; const std::string user; const std::string password; + const std::string proto_send_chunked; + const std::string proto_recv_chunked; const std::string quota_key; const std::string db; const std::string table; diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 59c98491c14..1d7ccd484d0 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -113,6 +113,9 @@ Cluster::Address::Address( secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable; priority = Priority{config.getInt(config_prefix + ".priority", 1)}; + proto_send_chunked = config.getString(config_prefix + ".proto_caps.send", "notchunked_optional"); + proto_recv_chunked = config.getString(config_prefix + ".proto_caps.recv", "notchunked_optional"); + const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port"; auto default_port = config.getInt(port_type, 0); @@ -425,7 +428,9 @@ Cluster::Cluster(const Poco::Util::AbstractConfiguration & config, auto pool = ConnectionPoolFactory::instance().get( static_cast(settings.distributed_connections_pool_size), address.host_name, address.port, - address.default_database, address.user, address.password, address.quota_key, + address.default_database, address.user, address.password, + address.proto_send_chunked, address.proto_recv_chunked, + address.quota_key, address.cluster, address.cluster_secret, "server", address.compression, address.secure, address.priority); @@ -589,6 +594,8 @@ void Cluster::addShard( replica.default_database, replica.user, replica.password, + replica.proto_send_chunked, + replica.proto_recv_chunked, replica.quota_key, replica.cluster, replica.cluster_secret, @@ -744,6 +751,8 @@ Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Setti address.default_database, address.user, address.password, + address.proto_send_chunked, + address.proto_recv_chunked, address.quota_key, address.cluster, address.cluster_secret, diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index dc5790ac339..c993af5fc5e 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -114,6 +114,8 @@ public: UInt16 port{0}; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; String quota_key; /// For inter-server authorization diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 2071eac3a68..c7db25c4c3a 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1,6 +1,7 @@ #include "Interpreters/AsynchronousInsertQueue.h" #include "Interpreters/SquashingTransform.h" #include "Parsers/ASTInsertQuery.h" +#include #include #include #include @@ -99,6 +100,7 @@ namespace DB::ErrorCodes extern const int SUPPORT_IS_DISABLED; extern const int UNSUPPORTED_METHOD; extern const int USER_EXPIRED; + extern const int NETWORK_ERROR; } namespace @@ -279,8 +281,35 @@ void TCPHandler::runImpl() if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) { - in->enableChunked(); - out->enableChunked(); + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) + { + bool chunked_srv = chunked_srv_str.starts_with("chunked"); + bool optional_srv = chunked_srv_str.ends_with("_optional"); + bool chunked_cl = chunked_cl_str.starts_with("chunked"); + bool optional_cl = chunked_cl_str.ends_with("_optional"); + + if (optional_srv) + return chunked_cl; + if (optional_cl) + return chunked_srv; + if (chunked_cl != chunked_srv) + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: {} is {}, client requested {}", + direction, + chunked_srv ? "chunked" : "notchunked", + chunked_cl ? "chunked" : "notchunked"); + + return chunked_srv; + }; + + bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "notchunked_optional"), proto_recv_chunked_cl, "send"); + bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "notchunked_optional"), proto_send_chunked_cl, "recv"); + + if (out_chunked) + out->enableChunked(); + if (in_chunked) + in->enableChunked(); } if (!is_interserver_mode) @@ -1575,6 +1604,12 @@ void TCPHandler::receiveAddendum() if (!is_interserver_mode) session->setQuotaClientKey(quota_key); + + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + readStringBinary(proto_send_chunked_cl, *in); + readStringBinary(proto_recv_chunked_cl, *in); + } } @@ -1608,6 +1643,11 @@ void TCPHandler::sendHello() writeStringBinary(server_display_name, *out); if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) writeVarUInt(VERSION_PATCH, *out); + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + writeStringBinary(server.config().getString("proto_caps.send", "notchunked_optional"), *out); + writeStringBinary(server.config().getString("proto_caps.recv", "notchunked_optional"), *out); + } if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) { auto rules = server.context()->getAccessControl().getPasswordComplexityRules(); diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 67d77381167..baef92b9fa0 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -188,6 +188,8 @@ private: UInt64 client_version_minor = 0; UInt64 client_version_patch = 0; UInt32 client_tcp_protocol_version = 0; + String proto_send_chunked_cl; + String proto_recv_chunked_cl; String quota_key; /// Connection settings, which are extracted from a context. diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp index d471c67553d..dd318f34148 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp @@ -273,6 +273,8 @@ ConnectionPoolWithFailoverPtr DistributedAsyncInsertDirectoryQueue::createPool(c address.default_database, address.user, address.password, + address.proto_send_chunked, + address.proto_recv_chunked, address.quota_key, address.cluster, address.cluster_secret, diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 378b81c6d18..4475e265395 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5664,7 +5664,8 @@ std::optional StorageReplicatedMergeTree::distributedWriteFromClu { auto connection = std::make_shared( node.host_name, node.port, query_context->getGlobalContext()->getCurrentDatabase(), - node.user, node.password, SSHKey(), node.quota_key, node.cluster, node.cluster_secret, + node.user, node.password, node.proto_send_chunked, node.proto_recv_chunked, + SSHKey(), node.quota_key, node.cluster, node.cluster_secret, "ParallelInsertSelectInititiator", node.compression, node.secure From 147ad42df09f374df971d6bed36ccf67c97d87a9 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 24 May 2024 03:36:29 +0000 Subject: [PATCH 0072/2361] fix notchunked mode in ReadBufferFromPocoSocketChunked --- src/IO/ReadBufferFromPocoSocketChunked.cpp | 7 +++++++ src/IO/ReadBufferFromPocoSocketChunked.h | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 4d40d8b4f14..a67a5bb41a9 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -44,6 +44,13 @@ void ReadBufferFromPocoSocketChunked::setAsyncCallback(AsyncCallback async_callb buffer_socket.setAsyncCallback(async_callback_); } +bool ReadBufferFromPocoSocketChunked::hasBufferedData() const +{ + if (chunked) + return hasPendingData() || buffer_socket.hasPendingData(); + return hasPendingData(); +} + bool ReadBufferFromPocoSocketChunked::startChunk() { if (buffer_socket.read(reinterpret_cast(&chunk_left), sizeof(chunk_left)) < sizeof(chunk_left)) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index c70363cf7d8..b0f5dd7dc5f 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -16,7 +16,7 @@ public: bool poll(size_t timeout_microseconds); void setAsyncCallback(AsyncCallback async_callback_); - bool hasBufferedData() const { return hasPendingData() || buffer_socket.hasPendingData(); } + bool hasBufferedData() const; Poco::Net::SocketAddress peerAddress() { return peer_address; } Poco::Net::SocketAddress ourAddress() { return our_address; } From 89205d78a68879399129b64f78cd27f7602bf373 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 31 May 2024 04:18:36 +0000 Subject: [PATCH 0073/2361] major refactoring --- src/Client/Connection.cpp | 2 +- src/IO/ReadBufferFromPocoSocket.cpp | 51 ++---- src/IO/ReadBufferFromPocoSocket.h | 20 ++- src/IO/ReadBufferFromPocoSocketChunked.cpp | 183 ++++++++++++--------- src/IO/ReadBufferFromPocoSocketChunked.h | 98 +++++++++-- 5 files changed, 222 insertions(+), 132 deletions(-) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 9327b694d29..c221124932a 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -1101,7 +1101,7 @@ bool Connection::poll(size_t timeout_microseconds) bool Connection::hasReadPendingData() const { - return last_input_packet_type.has_value() || in->hasBufferedData(); + return last_input_packet_type.has_value() || in->hasPendingData(); } diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index 5fb7ea0440c..5c338ef18bc 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -32,9 +32,16 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -size_t ReadBufferFromPocoSocket::readSocket(Position begin, size_t size) +ssize_t ReadBufferFromPocoSocketBase::socketReceiveBytesImpl(char * ptr, size_t size) { ssize_t bytes_read = 0; + Stopwatch watch; + + SCOPE_EXIT({ + /// NOTE: it is quite inaccurate on high loads since the thread could be replaced by another one + ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); + ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read); + }); /// Add more details to exceptions. try @@ -49,7 +56,7 @@ size_t ReadBufferFromPocoSocket::readSocket(Position begin, size_t size) socket.setBlocking(false); SCOPE_EXIT(socket.setBlocking(true)); bool secure = socket.secure(); - bytes_read = socket.impl()->receiveBytes(begin, static_cast(size)); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); /// Check EAGAIN and ERR_SSL_WANT_READ/ERR_SSL_WANT_WRITE for secure socket (reading from secure socket can write too). while (bytes_read < 0 && (errno == EAGAIN || (secure && (checkSSLWantRead(bytes_read) || checkSSLWantWrite(bytes_read))))) @@ -61,12 +68,12 @@ size_t ReadBufferFromPocoSocket::readSocket(Position begin, size_t size) async_callback(socket.impl()->sockfd(), socket.getReceiveTimeout(), AsyncEventTimeoutType::RECEIVE, socket_description, AsyncTaskExecutor::Event::READ | AsyncTaskExecutor::Event::ERROR); /// Try to read again. - bytes_read = socket.impl()->receiveBytes(begin, static_cast(size)); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); } } else { - bytes_read = socket.impl()->receiveBytes(begin, static_cast(size)); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); } } catch (const Poco::Net::NetException & e) @@ -90,36 +97,12 @@ size_t ReadBufferFromPocoSocket::readSocket(Position begin, size_t size) return bytes_read; } -bool ReadBufferFromPocoSocket::readSocketExact(Position begin, size_t size) +bool ReadBufferFromPocoSocketBase::nextImpl() { - for (size_t bytes_left = size; bytes_left > 0;) - { - size_t ret = readSocket(begin + size - bytes_left, bytes_left); - if (ret == 0) - return false; - bytes_left -= ret; - } - - return true; -} - -bool ReadBufferFromPocoSocket::nextImpl() -{ - ssize_t bytes_read = 0; - Stopwatch watch; - - SCOPE_EXIT({ - /// NOTE: it is quite inaccurate on high loads since the thread could be replaced by another one - ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); - ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read); - }); - - CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); - if (internal_buffer.size() > INT_MAX) throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); - bytes_read = readSocket(internal_buffer.begin(), internal_buffer.size()); + ssize_t bytes_read = socketReceiveBytesImpl(internal_buffer.begin(), internal_buffer.size()); if (read_event != ProfileEvents::end()) ProfileEvents::increment(read_event, bytes_read); @@ -132,7 +115,7 @@ bool ReadBufferFromPocoSocket::nextImpl() return true; } -ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size) +ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, size_t buf_size) : BufferWithOwnMemory(buf_size) , socket(socket_) , peer_address(socket.peerAddress()) @@ -141,13 +124,13 @@ ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, { } -ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) - : ReadBufferFromPocoSocket(socket_, buf_size) +ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) + : ReadBufferFromPocoSocketBase(socket_, buf_size) { read_event = read_event_; } -bool ReadBufferFromPocoSocket::poll(size_t timeout_microseconds) const +bool ReadBufferFromPocoSocketBase::poll(size_t timeout_microseconds) const { if (available()) return true; diff --git a/src/IO/ReadBufferFromPocoSocket.h b/src/IO/ReadBufferFromPocoSocket.h index c40a54ed7ae..a36bea6d679 100644 --- a/src/IO/ReadBufferFromPocoSocket.h +++ b/src/IO/ReadBufferFromPocoSocket.h @@ -9,7 +9,7 @@ namespace DB { /// Works with the ready Poco::Net::Socket. Blocking operations. -class ReadBufferFromPocoSocket : public BufferWithOwnMemory +class ReadBufferFromPocoSocketBase : public BufferWithOwnMemory { protected: Poco::Net::Socket & socket; @@ -25,19 +25,29 @@ protected: bool nextImpl() override; public: - explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); - explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); bool poll(size_t timeout_microseconds) const; void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); } - size_t readSocket(Position begin, size_t size); - bool readSocketExact(Position begin, size_t size); + ssize_t socketReceiveBytesImpl(char * ptr, size_t size); private: AsyncCallback async_callback; std::string socket_description; }; +class ReadBufferFromPocoSocket : public ReadBufferFromPocoSocketBase +{ +public: + explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) + : ReadBufferFromPocoSocketBase(socket_, buf_size = DBMS_DEFAULT_BUFFER_SIZE) + {} + explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) + : ReadBufferFromPocoSocketBase(socket_, read_event_, buf_size) + {} +}; + } diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index a67a5bb41a9..3cc8710407e 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -16,126 +16,149 @@ ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Sock {} ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) - : ReadBuffer(nullptr, 0), log(getLogger("Protocol")), peer_address(socket_.peerAddress()), our_address(socket_.address()), buffer_socket(socket_, read_event_, buf_size) + : ReadBufferFromPocoSocketBase(socket_, read_event_, buf_size), our_address(socket_.address()), log(getLogger("Protocol")) + { chassert(buf_size <= std::numeric_limits::max()); - - working_buffer = buffer_socket.buffer(); - pos = buffer_socket.position(); } void ReadBufferFromPocoSocketChunked::enableChunked() { - chunked = true; - buffer_socket.position() = pos; + if (chunked) + return; + chunked = 1; + data_end = buffer().end(); working_buffer.resize(offset()); + chunk_left = 0; + next_chunk = 0; } -bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) -{ - if (!chunked) - buffer_socket.position() = pos; - - return buffer_socket.poll(timeout_microseconds); -} - -void ReadBufferFromPocoSocketChunked::setAsyncCallback(AsyncCallback async_callback_) -{ - buffer_socket.setAsyncCallback(async_callback_); -} - -bool ReadBufferFromPocoSocketChunked::hasBufferedData() const +bool ReadBufferFromPocoSocketChunked::hasPendingData() const { if (chunked) - return hasPendingData() || buffer_socket.hasPendingData(); - return hasPendingData(); + return available() || static_cast(data_end - working_buffer.end()) > sizeof(next_chunk); + + return ReadBufferFromPocoSocketBase::hasPendingData(); } -bool ReadBufferFromPocoSocketChunked::startChunk() +bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) const { - if (buffer_socket.read(reinterpret_cast(&chunk_left), sizeof(chunk_left)) < sizeof(chunk_left)) - return false; - if (chunk_left == 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received"); + if (chunked) + if (available() || static_cast(data_end - working_buffer.end()) > sizeof(next_chunk)) + return true; - chunk_left = fromLittleEndian(chunk_left); - - return nextChunk(); + return ReadBufferFromPocoSocketBase::poll(timeout_microseconds); } -bool ReadBufferFromPocoSocketChunked::nextChunk() -{ - if (chunk_left == 0) - { - started = true; - return startChunk(); - } - if (buffer_socket.available() == 0) - if (!buffer_socket.next()) +bool ReadBufferFromPocoSocketChunked::load_next_chunk(Position c_pos, bool cont) +{ + auto buffered = std::min(static_cast(data_end - c_pos), sizeof(next_chunk)); + + if (buffered) + std::memcpy(&next_chunk, c_pos, buffered); + if (buffered < sizeof(next_chunk)) + if (socketReceiveBytesImpl(reinterpret_cast(&next_chunk) + buffered, sizeof(next_chunk) - buffered) < static_cast(sizeof(next_chunk) - buffered)) return false; - if (started) - LOG_TEST(log, "Packet receive started. Message {}, size {}", static_cast(*buffer_socket.position()), chunk_left); - else - LOG_TEST(log, "Packet receive continued. Size {}", chunk_left); + next_chunk = fromLittleEndian(next_chunk); - started = false; - - nextimpl_working_buffer_offset = buffer_socket.offset(); - - if (buffer_socket.available() < chunk_left) + if (next_chunk) { - working_buffer.resize(buffer_socket.offset() + buffer_socket.available()); - chunk_left -= buffer_socket.available(); - buffer_socket.position() += buffer_socket.available(); + if (cont) + LOG_TEST(log, "Packet receive continued. Size {}", next_chunk); + } + else + LOG_TEST(log, "Packet receive ended."); + + return true; +} + +bool ReadBufferFromPocoSocketChunked::process_chunk_left(Position c_pos) +{ + if (data_end - c_pos < chunk_left) + { + working_buffer.resize(data_end - buffer().begin()); + nextimpl_working_buffer_offset = c_pos - buffer().begin(); + chunk_left -= (data_end - c_pos); return true; } - working_buffer.resize(buffer_socket.offset() + chunk_left); - UInt8 buffered = std::min(static_cast(4), buffer_socket.available() - chunk_left); + nextimpl_working_buffer_offset = c_pos - buffer().begin(); + working_buffer.resize(nextimpl_working_buffer_offset + chunk_left); - buffer_socket.position() += chunk_left; - if (buffered > 0) - std::memcpy(&chunk_left, buffer_socket.position(), buffered); - buffer_socket.position() += buffered; + c_pos += chunk_left; - if (4 > buffered) - if (!buffer_socket.readSocketExact(reinterpret_cast(&chunk_left) + buffered, 4 - buffered)) - return false; - - chunk_left = fromLittleEndian(chunk_left); - - if (chunk_left == 0) - LOG_TEST(log, "Packet receive ended."); + if (!load_next_chunk(c_pos, true)) + return false; + chunk_left = 0; return true; } bool ReadBufferFromPocoSocketChunked::nextImpl() { - if (chunked) + if (!chunked) + return ReadBufferFromPocoSocketBase::nextImpl(); + + auto c_pos = pos; + + if (chunk_left == 0) { - if (!nextChunk()) + if (next_chunk == 0) { - pos = buffer_socket.position(); - return false; + if (chunked == 1) + chunked = 2; // first chunked block - no end marker + else + c_pos = pos + sizeof(next_chunk); // bypass chunk end marker + + if (c_pos > data_end) + c_pos = data_end; + + if (!load_next_chunk(c_pos)) + return false; + + chunk_left = next_chunk; + next_chunk = 0; + + c_pos += sizeof(next_chunk); + + if (c_pos >= data_end) + { + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); + } + + LOG_TEST(log, "Packet receive started. Message {}, size {}", static_cast(*c_pos), chunk_left); + } + else + { + c_pos += sizeof(next_chunk); + if (c_pos >= data_end) + { + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); + } + + chunk_left = next_chunk; + next_chunk = 0; } - return true; } - - buffer_socket.position() = pos; - - if (!buffer_socket.next()) + else { - pos = buffer_socket.position(); - return false; + chassert(c_pos == data_end); + + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); } - pos = buffer_socket.position(); - working_buffer.resize(offset() + buffer_socket.available()); - - return true; + return process_chunk_left(c_pos); } } diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index b0f5dd7dc5f..851a90042ac 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -3,37 +3,111 @@ #include #include +/* + +Handshake +============= + | 'Hello' type + | handshake exchange + | chunked protocol negotiation + +============= + + +Basic chunk: + +============= +Chunk begins | 0x12345678 chunk size, 4 bytes little endian + +------------- + | Packet type always follows beginning of the chunk + | packet data + +------------- +Chunk ends | 0x00000000 4 zero bytes + +============= + + + + +Datastream chunk: + +============= +Chunk begins | 0x12345678 + +------------- + | Packet type + | packet data + +------------- + | Packet type + | packet data + +------------- +...arbitrary number ..... +of packets... ..... + +------------- + | Packet type + | packet data + +------------- +Chunk ends | 0x00000000 + +============= + + + +Multipart chunk: + +============= +Chunk begins | 0x12345678 chunk part size, 4 bytes little endian + +------------- + | Packet type + | packet data + +------------- + | Packet type + | (partial) packet data + +============= +Chunk continues | 0x12345678 chunk next part size, 4 bytes little endian + +============= + | possibly previous packet's data + +------------- + | Packet type + | packet data + +------------- +...arbitrary number ..... +of chunk parts... ..... + +------------- + | Packet type + | packet data + +------------- +Chunk ends | 0x00000000 + +============= + +*/ + namespace DB { -class ReadBufferFromPocoSocketChunked: public ReadBuffer +class ReadBufferFromPocoSocketChunked: public ReadBufferFromPocoSocketBase { public: + using ReadBufferFromPocoSocketBase::setAsyncCallback; + explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); void enableChunked(); - bool poll(size_t timeout_microseconds); - void setAsyncCallback(AsyncCallback async_callback_); - bool hasBufferedData() const; + bool hasPendingData() const; + + bool poll(size_t timeout_microseconds) const; Poco::Net::SocketAddress peerAddress() { return peer_address; } Poco::Net::SocketAddress ourAddress() { return our_address; } protected: - bool startChunk(); - bool nextChunk(); + bool load_next_chunk(Position c_pos, bool cont = false); + bool process_chunk_left(Position c_pos); bool nextImpl() override; +protected: + Poco::Net::SocketAddress our_address; + private: LoggerPtr log; - Poco::Net::SocketAddress peer_address; - Poco::Net::SocketAddress our_address; - ReadBufferFromPocoSocket buffer_socket; - bool chunked = false; - UInt32 chunk_left = 0; // chunk left to read from socket - bool started = false; + Position data_end = nullptr; // end position of data in the internal_buffer + UInt32 chunk_left = 0; // chunk left to read from socket + UInt32 next_chunk = 0; // size of the next cnunk + UInt8 chunked = 0; // 0 - disabled; 1 - started; 2 - enabled; }; } From 4545f3af52d8046cd2a1b54fc22fd0d592a48a31 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 31 May 2024 04:35:01 +0000 Subject: [PATCH 0074/2361] fix --- src/IO/ReadBufferFromPocoSocketChunked.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 3cc8710407e..59c56b9d008 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -121,6 +121,9 @@ bool ReadBufferFromPocoSocketChunked::nextImpl() chunk_left = next_chunk; next_chunk = 0; + if (chunk_left == 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received"); + c_pos += sizeof(next_chunk); if (c_pos >= data_end) From d1bc58f23254ca781b6645bafb9c7cdf00326a04 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 31 May 2024 05:05:18 +0000 Subject: [PATCH 0075/2361] fix --- src/IO/ReadBufferFromPocoSocket.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index 5c338ef18bc..af58efc7e10 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -43,6 +43,8 @@ ssize_t ReadBufferFromPocoSocketBase::socketReceiveBytesImpl(char * ptr, size_t ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read); }); + CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); + /// Add more details to exceptions. try { From 1dc381dbc1f0b7b53d8707b9515a0d3f6ad3f442 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 31 May 2024 05:07:40 +0000 Subject: [PATCH 0076/2361] fix --- src/IO/ReadBufferFromPocoSocketChunked.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index 851a90042ac..749ee042a7c 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -23,8 +23,6 @@ Chunk ends | 0x00000000 4 zero bytes +============= - - Datastream chunk: +============= Chunk begins | 0x12345678 @@ -45,7 +43,6 @@ Chunk ends | 0x00000000 +============= - Multipart chunk: +============= Chunk begins | 0x12345678 chunk part size, 4 bytes little endian From fdccba97a3c7d1097034bc6b0994b7f37bc5721e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 31 May 2024 06:35:04 +0000 Subject: [PATCH 0077/2361] set chunked for testing --- src/Client/ConnectionParameters.cpp | 4 ++-- src/Server/TCPHandler.cpp | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Client/ConnectionParameters.cpp b/src/Client/ConnectionParameters.cpp index 430c462084a..b6ed242acd4 100644 --- a/src/Client/ConnectionParameters.cpp +++ b/src/Client/ConnectionParameters.cpp @@ -103,8 +103,8 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati #endif } - proto_send_chunked = config.getString("proto_caps.send", "notchunked_optional"); - proto_recv_chunked = config.getString("proto_caps.recv", "notchunked_optional"); + proto_send_chunked = config.getString("proto_caps.send", "chunked"); + proto_recv_chunked = config.getString("proto_caps.recv", "chunked"); quota_key = config.getString("quota_key", ""); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index c7db25c4c3a..47e5f982a93 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -303,8 +303,8 @@ void TCPHandler::runImpl() return chunked_srv; }; - bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "notchunked_optional"), proto_recv_chunked_cl, "send"); - bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "notchunked_optional"), proto_send_chunked_cl, "recv"); + bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "chunked"), proto_recv_chunked_cl, "send"); + bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "chunked"), proto_send_chunked_cl, "recv"); if (out_chunked) out->enableChunked(); @@ -1645,8 +1645,8 @@ void TCPHandler::sendHello() writeVarUInt(VERSION_PATCH, *out); if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) { - writeStringBinary(server.config().getString("proto_caps.send", "notchunked_optional"), *out); - writeStringBinary(server.config().getString("proto_caps.recv", "notchunked_optional"), *out); + writeStringBinary(server.config().getString("proto_caps.send", "chunked"), *out); + writeStringBinary(server.config().getString("proto_caps.recv", "chunked"), *out); } if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) { From e3d57ab117391c3b99a8937783320a8c59e0b196 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 31 May 2024 16:05:42 +0000 Subject: [PATCH 0078/2361] set default protocol to notchunked_optional for cluster clients --- src/Interpreters/Cluster.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index c993af5fc5e..f3146ac0134 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -114,8 +114,8 @@ public: UInt16 port{0}; String user; String password; - String proto_send_chunked; - String proto_recv_chunked; + String proto_send_chunked = "notchunked_optional"; + String proto_recv_chunked = "notchunked_optional"; String quota_key; /// For inter-server authorization From f11f41491087099c63ee9f98b6bf8a27a8e87ed9 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 2 Jun 2024 07:25:48 +0000 Subject: [PATCH 0079/2361] fix special case of testing feature for chunked protocol --- src/Server/TCPHandler.cpp | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 47e5f982a93..da276e1c404 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -2268,16 +2268,26 @@ void TCPHandler::sendData(const Block & block) } writeVarUInt(Protocol::Server::Data, *out); - /// Send external table name (empty name is the main table) - writeStringBinary("", *out); /// For testing hedged requests if (block.rows() > 0 && query_context->getSettingsRef().sleep_in_send_data_ms.totalMilliseconds()) { + /// This strange sequence is needed in case of chunked protocol is enabled, in order for client not to + /// hang on recieving of at least packet type - chunk will not be processed unless either chunk footer + /// or chunk continuation header is recieved - first 'next' is sending starting chunk containing packet type + /// and second 'next' is sending chunk continuation header. + out->next(); + /// Send external table name (empty name is the main table) + writeStringBinary("", *out); out->next(); std::chrono::milliseconds ms(query_context->getSettingsRef().sleep_in_send_data_ms.totalMilliseconds()); std::this_thread::sleep_for(ms); } + else + { + /// Send external table name (empty name is the main table) + writeStringBinary("", *out); + } state.block_out->write(block); state.maybe_compressed_out->next(); From eaeabd8d374e2e28a6208fb9ea1ea7835676c7e5 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 2 Jun 2024 13:03:48 +0000 Subject: [PATCH 0080/2361] fix typos --- src/Server/TCPHandler.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index da276e1c404..1a64ec1dd10 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -2273,8 +2273,8 @@ void TCPHandler::sendData(const Block & block) if (block.rows() > 0 && query_context->getSettingsRef().sleep_in_send_data_ms.totalMilliseconds()) { /// This strange sequence is needed in case of chunked protocol is enabled, in order for client not to - /// hang on recieving of at least packet type - chunk will not be processed unless either chunk footer - /// or chunk continuation header is recieved - first 'next' is sending starting chunk containing packet type + /// hang on receiving of at least packet type - chunk will not be processed unless either chunk footer + /// or chunk continuation header is received - first 'next' is sending starting chunk containing packet type /// and second 'next' is sending chunk continuation header. out->next(); /// Send external table name (empty name is the main table) From e0be652f4de803198b406dcbda5b1f1ac6938a9c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 3 Jun 2024 07:24:28 +0000 Subject: [PATCH 0081/2361] fix test, better log, fix defaults for client --- src/Client/ConnectionParameters.cpp | 4 ++-- src/Client/ConnectionParameters.h | 4 ++-- src/IO/ReadBufferFromPocoSocketChunked.cpp | 6 +++--- src/IO/WriteBufferFromPocoSocketChunked.h | 6 +++--- tests/integration/test_hedged_requests/test.py | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/Client/ConnectionParameters.cpp b/src/Client/ConnectionParameters.cpp index b6ed242acd4..430c462084a 100644 --- a/src/Client/ConnectionParameters.cpp +++ b/src/Client/ConnectionParameters.cpp @@ -103,8 +103,8 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati #endif } - proto_send_chunked = config.getString("proto_caps.send", "chunked"); - proto_recv_chunked = config.getString("proto_caps.recv", "chunked"); + proto_send_chunked = config.getString("proto_caps.send", "notchunked_optional"); + proto_recv_chunked = config.getString("proto_caps.recv", "notchunked_optional"); quota_key = config.getString("quota_key", ""); diff --git a/src/Client/ConnectionParameters.h b/src/Client/ConnectionParameters.h index 85174924016..52fe7bd9b2b 100644 --- a/src/Client/ConnectionParameters.h +++ b/src/Client/ConnectionParameters.h @@ -20,8 +20,8 @@ struct ConnectionParameters std::string default_database; std::string user; std::string password; - std::string proto_send_chunked; - std::string proto_recv_chunked; + std::string proto_send_chunked = "notchunked_optional"; + std::string proto_recv_chunked = "notchunked_optional"; std::string quota_key; SSHKey ssh_private_key; Protocol::Secure security = Protocol::Secure::Disable; diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 59c56b9d008..328b70bdb9b 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -65,10 +65,10 @@ bool ReadBufferFromPocoSocketChunked::load_next_chunk(Position c_pos, bool cont) if (next_chunk) { if (cont) - LOG_TEST(log, "Packet receive continued. Size {}", next_chunk); + LOG_TEST(log, "{} <- {} Chunk receive continued. Size {}", ourAddress().toString(), peerAddress().toString(), next_chunk); } else - LOG_TEST(log, "Packet receive ended."); + LOG_TEST(log, "{} <- {} Chunk receive ended.", ourAddress().toString(), peerAddress().toString()); return true; } @@ -134,7 +134,7 @@ bool ReadBufferFromPocoSocketChunked::nextImpl() c_pos = buffer().begin(); } - LOG_TEST(log, "Packet receive started. Message {}, size {}", static_cast(*c_pos), chunk_left); + LOG_TEST(log, "{} <- {} Chunk receive started. Message {}, size {}", ourAddress().toString(), peerAddress().toString(), static_cast(*c_pos), chunk_left); } else { diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 6c35db62c0c..7c6ab53dc91 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -30,7 +30,7 @@ public: if (finished) throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: attempt to send empty chunk"); - LOG_TEST(log, "Packet send ended."); + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); finished = true; UInt32 s = 0; @@ -43,9 +43,9 @@ protected: { UInt32 s = static_cast(offset()); if (finished) - LOG_TEST(log, "Packet send started. Message {}, size {}", static_cast(*buffer().begin()), s); + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", ourAddress().toString(), peerAddress().toString(), static_cast(*buffer().begin()), s); else - LOG_TEST(log, "Packet send continued. Size {}", s); + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), s); finished = false; s = toLittleEndian(s); diff --git a/tests/integration/test_hedged_requests/test.py b/tests/integration/test_hedged_requests/test.py index 02ecf3c1367..0d72f7c45b1 100644 --- a/tests/integration/test_hedged_requests/test.py +++ b/tests/integration/test_hedged_requests/test.py @@ -333,7 +333,7 @@ def test_receive_timeout2(started_cluster): # in packet receiving but there are replicas in process of # connection establishing. update_configs( - node_1_sleep_in_send_data=4000, + node_1_sleep_in_send_data=5000, node_2_sleep_in_send_tables_status=2000, node_3_sleep_in_send_tables_status=2000, ) From 66e387562659e9712088e09427d4c050e9f22c1f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 3 Jun 2024 09:55:51 +0000 Subject: [PATCH 0082/2361] fix tidy build --- src/IO/ReadBufferFromPocoSocket.h | 2 +- src/IO/ReadBufferFromPocoSocketChunked.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/IO/ReadBufferFromPocoSocket.h b/src/IO/ReadBufferFromPocoSocket.h index a36bea6d679..912388adaac 100644 --- a/src/IO/ReadBufferFromPocoSocket.h +++ b/src/IO/ReadBufferFromPocoSocket.h @@ -43,7 +43,7 @@ class ReadBufferFromPocoSocket : public ReadBufferFromPocoSocketBase { public: explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) - : ReadBufferFromPocoSocketBase(socket_, buf_size = DBMS_DEFAULT_BUFFER_SIZE) + : ReadBufferFromPocoSocketBase(socket_, buf_size) {} explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : ReadBufferFromPocoSocketBase(socket_, read_event_, buf_size) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index 749ee042a7c..acf0edafe0a 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -96,7 +96,6 @@ protected: bool process_chunk_left(Position c_pos); bool nextImpl() override; -protected: Poco::Net::SocketAddress our_address; private: From 1cda4596adfc9ca384a28da80a91159641952e36 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 3 Jun 2024 11:51:01 +0000 Subject: [PATCH 0083/2361] fix tidy build --- src/IO/ReadBufferFromPocoSocketChunked.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 328b70bdb9b..6ed6b63289c 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -101,7 +101,7 @@ bool ReadBufferFromPocoSocketChunked::nextImpl() if (!chunked) return ReadBufferFromPocoSocketBase::nextImpl(); - auto c_pos = pos; + auto * c_pos = pos; if (chunk_left == 0) { From 00ffb48924366de453ae6bd416d56be7d0a9568d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 4 Jun 2024 14:27:08 +0000 Subject: [PATCH 0084/2361] Handle table drop properly --- src/Storages/Kafka/KafkaConsumer2.cpp | 2 +- src/Storages/Kafka/KafkaConsumer2.h | 7 +- src/Storages/Kafka/StorageKafka2.cpp | 326 +++++++++++++++++++++----- src/Storages/Kafka/StorageKafka2.h | 13 +- 4 files changed, 281 insertions(+), 67 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index d2c2d7d8022..e32db78fb65 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -58,7 +58,7 @@ bool KafkaConsumer2::TopicPartition::operator<(const TopicPartition & other) con KafkaConsumer2::KafkaConsumer2( ConsumerPtr consumer_, - Poco::Logger * log_, + LoggerPtr log_, size_t max_batch_size, size_t poll_timeout_, const std::atomic & stopped_, diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index cde23ebf812..8eb21cf0364 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -10,8 +10,6 @@ #include #include -#include - namespace CurrentMetrics { extern const Metric KafkaConsumers; @@ -80,7 +78,7 @@ public: KafkaConsumer2( ConsumerPtr consumer_, - Poco::Logger * log_, + LoggerPtr log_, size_t max_batch_size, size_t poll_timeout_, const std::atomic & stopped_, @@ -110,7 +108,6 @@ public: /// Polls batch of messages from Kafka and returns read buffer containing the next message or /// nullptr when there are no messages to process. - /// TODO(antaljanosbenjamin): add batch size param ReadBufferPtr consume(const TopicPartition & topic_partition, const std::optional & message_count); // Return values for the message that's being read. @@ -136,7 +133,7 @@ private: }; ConsumerPtr consumer; - Poco::Logger * log; + LoggerPtr log; const size_t batch_size = 1; const size_t poll_timeout = 0; diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index ee038ae118b..b971ed4b42b 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -60,6 +60,7 @@ #include #include +#include #include namespace CurrentMetrics @@ -84,18 +85,22 @@ extern const Event KafkaWrites; namespace DB { +namespace fs = std::filesystem; + namespace ErrorCodes { extern const int NOT_IMPLEMENTED; extern const int LOGICAL_ERROR; extern const int QUERY_NOT_ALLOWED; +extern const int REPLICA_ALREADY_EXISTS; +extern const int TABLE_IS_DROPPED; +extern const int TABLE_WAS_NOT_DROPPED; } namespace { constexpr auto MAX_FAILED_POLL_ATTEMPTS = 10; } -// TODO(antaljanosbenjamin): check performance StorageKafka2::StorageKafka2( const StorageID & table_id_, @@ -119,7 +124,7 @@ StorageKafka2::StorageKafka2( , max_rows_per_message(kafka_settings->kafka_max_rows_per_message.value) , schema_name(getContext()->getMacros()->expand(kafka_settings->kafka_schema.value, macros_info)) , num_consumers(kafka_settings->kafka_num_consumers.value) - , log(&Poco::Logger::get("StorageKafka (" + table_id_.table_name + ")")) + , log(getLogger("StorageKafka2 (" + table_id_.table_name + ")")) , semaphore(0, static_cast(num_consumers)) , settings_adjustments(createSettingsAdjustments()) , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) @@ -145,6 +150,39 @@ StorageKafka2::StorageKafka2( task->deactivate(); tasks.emplace_back(std::make_shared(std::move(task))); } + + for (size_t i = 0; i < num_consumers; ++i) + { + try + { + consumers.push_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i), .keeper = keeper}); + ++num_created_consumers; + } + catch (const cppkafka::Exception &) + { + tryLogCurrentException(log); + } + } + for (auto try_count = 0; try_count < 5; ++try_count) + { + bool all_had_assignment = true; + for (auto & consumer_info : consumers) + { + if (nullptr == consumer_info.consumer->getKafkaAssignment()) + { + all_had_assignment = false; + consumer_info.consumer->pollEvents(); + } + } + + if (all_had_assignment) + break; + } + + const auto first_replica = createTableIfNotExists(consumers.front().consumer); + + if (!first_replica) + createReplica(); } VirtualColumnsDescription StorageKafka2::createVirtuals(StreamingHandleErrorMode handle_error_mode) @@ -257,31 +295,6 @@ StorageKafka2::write(const ASTPtr &, const StorageMetadataPtr & metadata_snapsho void StorageKafka2::startup() { - for (size_t i = 0; i < num_consumers; ++i) - { - try - { - consumers.push_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i), .keeper = keeper}); - ++num_created_consumers; - } - catch (const cppkafka::Exception &) - { - tryLogCurrentException(log); - } - } - - try - { - createKeeperNodes(consumers.front().consumer); - } - catch (const Exception & ex) - { - if (ex.code() == ErrorCodes::LOGICAL_ERROR) - throw; - - tryLogCurrentException(log, __PRETTY_FUNCTION__); - } - // Start the reader thread for (auto & task : tasks) task->holder->activateAndSchedule(); @@ -306,7 +319,7 @@ void StorageKafka2::shutdown(bool) void StorageKafka2::drop() { - getZooKeeper()->removeRecursive(kafka_settings->kafka_keeper_path); + dropReplica(); } KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) @@ -548,48 +561,244 @@ std::optional getNumber(zkutil::ZooKeeper & keeper, const std::string & } } -void StorageKafka2::createKeeperNodes(const KafkaConsumer2Ptr & consumer) +bool StorageKafka2::createTableIfNotExists(const KafkaConsumer2Ptr & consumer) { - // TODO(antaljanosbenjamin): check config with other StorageKafkas - // TODO(antaljanosbenjamin): maybe also create a node in `keeper_path/replicas/` to note that this replica has the table? - const auto & keeper_path = kafka_settings->kafka_keeper_path.value; + const auto & keeper_path = fs::path(kafka_settings->kafka_keeper_path.value); - if (keeper->exists(keeper_path)) - return; + const auto & replicas_path = keeper_path / "replicas"; - keeper->createAncestors(keeper_path); - Coordination::Requests ops; - ops.emplace_back(zkutil::makeCreateRequest(keeper_path, "", zkutil::CreateMode::Persistent)); - - ops.emplace_back(zkutil::makeCreateRequest(keeper_path + "/topics", "", zkutil::CreateMode::Persistent)); - - const auto topics_prefix = keeper_path + "/topics/"; - - const auto topic_partition_counts = consumer->getPartitionCounts(); - for (const auto & topic_partition_count : topic_partition_counts) + for (auto i = 0; i < 1000; ++i) { - ops.emplace_back(zkutil::makeCreateRequest(topics_prefix + topic_partition_count.topic, "", zkutil::CreateMode::Persistent)); + if (keeper->exists(replicas_path)) + { + LOG_DEBUG(log, "This table {} is already created, will add new replica", String(keeper_path)); + return false; + } + + /// There are leftovers from incompletely dropped table. + if (keeper->exists(keeper_path / "dropped")) + { + /// This condition may happen when the previous drop attempt was not completed + /// or when table is dropped by another replica right now. + /// This is Ok because another replica is definitely going to drop the table. + + LOG_WARNING(log, "Removing leftovers from table {} (this might take several minutes)", String(keeper_path)); + String drop_lock_path = keeper_path / "dropped" / "lock"; + Coordination::Error code = keeper->tryCreate(drop_lock_path, "", zkutil::CreateMode::Ephemeral); + + if (code == Coordination::Error::ZNONODE || code == Coordination::Error::ZNODEEXISTS) + { + LOG_WARNING(log, "The leftovers from table {} were removed by another replica", String(keeper_path)); + } + else if (code != Coordination::Error::ZOK) + { + throw Coordination::Exception::fromPath(code, drop_lock_path); + } + else + { + auto metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(drop_lock_path, *keeper); + if (!removeTableNodesFromZooKeeper(metadata_drop_lock)) + { + /// Someone is recursively removing table right now, we cannot create new table until old one is removed + continue; + } + } + } + + keeper->createAncestors(keeper_path); + Coordination::Requests ops; + + ops.emplace_back(zkutil::makeCreateRequest(keeper_path, "", zkutil::CreateMode::Persistent)); + + const auto topics_path = keeper_path / "topics"; + ops.emplace_back(zkutil::makeCreateRequest(topics_path, "", zkutil::CreateMode::Persistent)); + + + const auto topic_partition_counts = consumer->getPartitionCounts(); + for (const auto & topic_partition_count : topic_partition_counts) + { + LOG_DEBUG( + log, + "Creating path in keeper for topic {} with {} partitions", + topic_partition_count.topic, + topic_partition_count.partition_count); + ops.emplace_back(zkutil::makeCreateRequest(topics_path / topic_partition_count.topic, "", zkutil::CreateMode::Persistent)); + + const auto partitions_path = topics_path / topic_partition_count.topic / "partitions"; + ops.emplace_back(zkutil::makeCreateRequest(partitions_path, "", zkutil::CreateMode::Persistent)); + // TODO(antaljanosbenjamin): handle changing number of partitions + for (auto partition_id{0U}; partition_id < topic_partition_count.partition_count; ++partition_id) + ops.emplace_back(zkutil::makeCreateRequest(partitions_path / toString(partition_id), "", zkutil::CreateMode::Persistent)); + } + + // Create the first replica + ops.emplace_back(zkutil::makeCreateRequest(replicas_path, "", zkutil::CreateMode::Persistent)); ops.emplace_back( - zkutil::makeCreateRequest(topics_prefix + topic_partition_count.topic + "/partitions", "", zkutil::CreateMode::Persistent)); - const auto partitions_prefix = topics_prefix + topic_partition_count.topic + "/partitions/"; - // TODO(antaljanosbenjamin): handle changing number of partitions - for (auto partition_id{0U}; partition_id < topic_partition_count.partition_count; ++partition_id) - ops.emplace_back(zkutil::makeCreateRequest(partitions_prefix + toString(partition_id), "", zkutil::CreateMode::Persistent)); + zkutil::makeCreateRequest(replicas_path / kafka_settings->kafka_replica_name.value, "", zkutil::CreateMode::Persistent)); + + + Coordination::Responses responses; + const auto code = keeper->tryMulti(ops, responses); + if (code == Coordination::Error::ZNODEEXISTS) + { + LOG_INFO(log, "It looks like the table {} was created by another replica at the same moment, will retry", String(keeper_path)); + continue; + } + else if (code != Coordination::Error::ZOK) + { + zkutil::KeeperMultiException::check(code, ops, responses); + } + + LOG_INFO(log, "Table {} created successfully ", String(keeper_path)); + + return true; } + throw Exception( + ErrorCodes::REPLICA_ALREADY_EXISTS, + "Cannot create table, because it is created concurrently every time or because " + "of wrong zookeeper_path or because of logical error"); +} + +bool StorageKafka2::removeTableNodesFromZooKeeper(const zkutil::EphemeralNodeHolder::Ptr & drop_lock) +{ + bool completely_removed = false; + + Strings children; + if (const auto code = keeper->tryGetChildren(kafka_settings->kafka_keeper_path.value, children); code == Coordination::Error::ZNONODE) + throw Exception(ErrorCodes::LOGICAL_ERROR, "There is a race condition between creation and removal. It's a bug"); + + const auto keeper_path = fs::path(kafka_settings->kafka_keeper_path.value); + for (const auto & child : children) + if (child != "dropped") + keeper->tryRemoveRecursive(keeper_path / child); + + Coordination::Requests ops; Coordination::Responses responses; - const auto code = keeper->tryMulti(ops, responses); - if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS) + ops.emplace_back(zkutil::makeRemoveRequest(drop_lock->getPath(), -1)); + ops.emplace_back(zkutil::makeRemoveRequest(keeper_path / "dropped", -1)); + ops.emplace_back(zkutil::makeRemoveRequest(keeper_path, -1)); + const auto code = keeper->tryMulti(ops, responses, /* check_session_valid */ true); + + if (code == Coordination::Error::ZNONODE) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, "There is a race condition between creation and removal of replicated table. It's a bug"); + } + else if (code == Coordination::Error::ZNOTEMPTY) + { + LOG_ERROR( + log, + "Table was not completely removed from Keeper, {} still exists and may contain some garbage," + "but someone is removing it right now.", + kafka_settings->kafka_keeper_path.value); + } + else if (code != Coordination::Error::ZOK) + { + /// It is still possible that ZooKeeper session is expired or server is killed in the middle of the delete operation. zkutil::KeeperMultiException::check(code, ops, responses); + } + else + { + drop_lock->setAlreadyRemoved(); + completely_removed = true; + LOG_INFO(log, "Table {} was successfully removed from ZooKeeper", kafka_settings->kafka_keeper_path.value); + } + + return completely_removed; +} + +void StorageKafka2::createReplica() +{ + const auto replica_path = kafka_settings->kafka_keeper_path.value + "/replicas/" + kafka_settings->kafka_replica_name.value; + const auto code = keeper->tryCreate(replica_path, "", zkutil::CreateMode::Persistent); + if (code == Coordination::Error::ZNODEEXISTS) + throw Exception(ErrorCodes::REPLICA_ALREADY_EXISTS, "Replica {} already exists", replica_path); + else if (code == Coordination::Error::ZNONODE) + throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {} was suddenly removed", kafka_settings->kafka_keeper_path.value); + else if (code != Coordination::Error::ZOK) + throw Coordination::Exception::fromPath(code, replica_path); + + LOG_INFO(log, "Replica {} created", replica_path); +} + + +void StorageKafka2::dropReplica() +{ + if (keeper->expired()) + throw Exception(ErrorCodes::TABLE_WAS_NOT_DROPPED, "Table was not dropped because ZooKeeper session has expired."); + + auto replica_path = kafka_settings->kafka_keeper_path.value + "/replicas/" + kafka_settings->kafka_replica_name.value; + + LOG_INFO(log, "Removing replica {}", replica_path); + + if (!keeper->exists(replica_path)) + { + LOG_INFO(log, "Removing replica {} does not exist", replica_path); + return; + } + + { + keeper->tryRemoveChildrenRecursive(replica_path); + + if (keeper->tryRemove(replica_path) != Coordination::Error::ZOK) + LOG_ERROR(log, "Replica was not completely removed from Keeper, {} still exists and may contain some garbage.", replica_path); + } + + /// Check that `zookeeper_path` exists: it could have been deleted by another replica after execution of previous line. + Strings replicas; + if (Coordination::Error::ZOK != keeper->tryGetChildren(kafka_settings->kafka_keeper_path.value + "/replicas", replicas) + || !replicas.empty()) + return; + + LOG_INFO(log, "{} is the last replica, will remove table", replica_path); + + /** At this moment, another replica can be created and we cannot remove the table. + * Try to remove /replicas node first. If we successfully removed it, + * it guarantees that we are the only replica that proceed to remove the table + * and no new replicas can be created after that moment (it requires the existence of /replicas node). + * and table cannot be recreated with new /replicas node on another servers while we are removing data, + * because table creation is executed in single transaction that will conflict with remaining nodes. + */ + + /// Node /dropped works like a lock that protects from concurrent removal of old table and creation of new table. + /// But recursive removal may fail in the middle of operation leaving some garbage in zookeeper_path, so + /// we remove it on table creation if there is /dropped node. Creating thread may remove /dropped node created by + /// removing thread, and it causes race condition if removing thread is not finished yet. + /// To avoid this we also create ephemeral child before starting recursive removal. + /// (The existence of child node does not allow to remove parent node). + Coordination::Requests ops; + Coordination::Responses responses; + String drop_lock_path = kafka_settings->kafka_keeper_path.value + "/dropped/lock"; + ops.emplace_back(zkutil::makeRemoveRequest(kafka_settings->kafka_keeper_path.value + "/replicas", -1)); + ops.emplace_back(zkutil::makeCreateRequest(kafka_settings->kafka_keeper_path.value + "/dropped", "", zkutil::CreateMode::Persistent)); + ops.emplace_back(zkutil::makeCreateRequest(drop_lock_path, "", zkutil::CreateMode::Ephemeral)); + Coordination::Error code = keeper->tryMulti(ops, responses); + + if (code == Coordination::Error::ZNONODE || code == Coordination::Error::ZNODEEXISTS) + { + LOG_WARNING(log, "Table {} is already started to be removing by another replica right now", replica_path); + } + else if (code == Coordination::Error::ZNOTEMPTY) + { + LOG_WARNING(log, "Another replica was suddenly created, will keep the table {}", replica_path); + } + else if (code != Coordination::Error::ZOK) + { + zkutil::KeeperMultiException::check(code, ops, responses); + } + else + { + auto drop_lock = zkutil::EphemeralNodeHolder::existing(drop_lock_path, *keeper); + LOG_INFO(log, "Removing table {} (this might take several minutes)", kafka_settings->kafka_keeper_path.value); + removeTableNodesFromZooKeeper(drop_lock); + } } std::optional StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const TopicPartitions & topic_partitions) { - // TODO(antaljanosbenjamin): Review this function with somebody who knows keeper better than me - const auto uuid_as_string = toString(uuid); - std::vector topic_partition_paths; topic_partition_paths.reserve(topic_partitions.size()); for (const auto & topic_partition : topic_partitions) @@ -598,8 +807,11 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi Coordination::Requests ops; for (const auto & topic_partition_path : topic_partition_paths) - ops.push_back(zkutil::makeCreateRequest(topic_partition_path + lock_file_name, uuid_as_string, zkutil::CreateMode::Ephemeral)); - + { + LOG_TRACE(log, "Creating locking ops for: {}", topic_partition_path + lock_file_name); + ops.push_back(zkutil::makeCreateRequest( + topic_partition_path + lock_file_name, kafka_settings->kafka_replica_name.value, zkutil::CreateMode::Ephemeral)); + } Coordination::Responses responses; if (const auto code = keeper_to_use.tryMulti(ops, responses); code != Coordination::Error::ZOK) diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 3cec473c746..c383357d79f 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -139,7 +138,7 @@ private: const size_t max_rows_per_message; const String schema_name; const size_t num_consumers; /// total number of consumers - Poco::Logger * log; + LoggerPtr log; Poco::Semaphore semaphore; const SettingsChanges settings_adjustments; std::atomic mv_attached = false; @@ -176,8 +175,6 @@ private: // Load Kafka properties from producer configuration void updateProducerConfiguration(cppkafka::Configuration & kafka_config); - UUID uuid{UUIDHelpers::generateV4()}; - String getConfigPrefix() const; void threadFunc(size_t idx); @@ -194,6 +191,14 @@ private: bool checkDependencies(const StorageID & table_id); + // Returns true if this is the first replica + bool createTableIfNotExists(const KafkaConsumer2Ptr & consumer); + // Returns true if all of the nodes were cleaned up + bool removeTableNodesFromZooKeeper(const zkutil::EphemeralNodeHolder::Ptr & drop_lock); + // Creates only the replica in ZooKeeper. Shouldn't be called on the first replica as it is created in createTableIfNotExists + void createReplica(); + void dropReplica(); + // Takes lock over topic partitions and set's the committed offset in topic_partitions void createKeeperNodes(const KafkaConsumer2Ptr & consumer); From e38e7d806c30ec5a295aa14fcadc53e2b9bc0ff2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 4 Jun 2024 14:28:38 +0000 Subject: [PATCH 0085/2361] Save offsets properly --- src/Storages/Kafka/StorageKafka2.cpp | 14 ++++++++++++-- src/Storages/Kafka/StorageKafka2.h | 5 +---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index b971ed4b42b..aed27c27e62 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -851,16 +851,26 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi } -void StorageKafka2::saveCommittedOffset(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, int64_t committed_offset) +void StorageKafka2::saveCommittedOffset( + zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, const int64_t last_read_offset) { + const auto committed_offset = last_read_offset + 1; const auto partition_prefix = getTopicPartitionPath(topic_partition); keeper_to_use.createOrUpdate(partition_prefix + commit_file_name, toString(committed_offset), zkutil::CreateMode::Persistent); // This is best effort, if it fails we will try to remove in the next round keeper_to_use.tryRemove(partition_prefix + intent_file_name, -1); + LOG_TEST(log, "Saved offset {} for topic-partition [{}:{}]", committed_offset, topic_partition.topic, topic_partition.partition_id); } void StorageKafka2::saveIntent(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, int64_t intent) { + LOG_TEST( + log, + "Saving intent of {} for topic-partition [{}:{}] at offset {}", + intent, + topic_partition.topic, + topic_partition.partition_id, + topic_partition.offset); keeper_to_use.createOrUpdate( getTopicPartitionPath(topic_partition) + intent_file_name, toString(intent), zkutil::CreateMode::Persistent); } @@ -1189,7 +1199,7 @@ bool StorageKafka2::streamToViews(size_t idx) TopicPartition topic_partition_copy{topic_partition}; if (const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; maybe_committed_offset.has_value()) - topic_partition_copy.offset = *maybe_committed_offset + 1; + topic_partition_copy.offset = *maybe_committed_offset; else topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index c383357d79f..c184053fc4d 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -99,7 +99,6 @@ private: KafkaConsumer2Ptr consumer; /// available consumers size_t consume_from_topic_partition_index{0}; TopicPartitions topic_partitions; - // TODO(antaljanosbenjamin): maybe recreate the ephemeral node zkutil::ZooKeeperPtr keeper; TopicPartitionLocks locks; }; @@ -200,10 +199,8 @@ private: void dropReplica(); // Takes lock over topic partitions and set's the committed offset in topic_partitions - void createKeeperNodes(const KafkaConsumer2Ptr & consumer); - std::optional lockTopicPartitions(zkutil::ZooKeeper& keeper_to_use, const TopicPartitions & topic_partitions); - void saveCommittedOffset(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t committed_offset); + void saveCommittedOffset(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t last_read_offset); void saveIntent(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t intent); PolledBatchInfo pollConsumer( From b59cb914cd1a10e2c1753505674d44f05553d218 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 4 Jun 2024 14:40:16 +0000 Subject: [PATCH 0086/2361] Limit polls in `pollEvents` --- src/Storages/Kafka/KafkaConsumer2.cpp | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index e32db78fb65..dabef7702cf 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -45,9 +45,9 @@ namespace ErrorCodes } using namespace std::chrono_literals; -const auto MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS = 15000; -const auto POLL_TIMEOUT_WO_ASSIGNMENT = 50ms; -const auto DRAIN_TIMEOUT_MS = 5000ms; +static constexpr auto MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS = 15000; +static constexpr auto EVENT_POLL_TIMEOUT = 50ms; +static constexpr auto DRAIN_TIMEOUT_MS = 5000ms; bool KafkaConsumer2::TopicPartition::operator<(const TopicPartition & other) const @@ -201,17 +201,21 @@ void KafkaConsumer2::drainConsumerQueue() void KafkaConsumer2::pollEvents() { - // POLL_TIMEOUT_WO_ASSIGNMENT_MS (50ms) is 100% enough just to check if we got assignment - // (see https://github.com/ClickHouse/ClickHouse/issues/11218) - auto msg = consumer->poll(POLL_TIMEOUT_WO_ASSIGNMENT); - + static constexpr int64_t max_tries = 5; + auto consumer_has_subscription = !consumer->get_subscription().empty(); + for(auto i = 0; i < max_tries && !consumer_has_subscription; ++i) + { + consumer->subscribe(topics); + consumer_has_subscription = !consumer->get_subscription().empty(); + } + auto msg = consumer->poll(EVENT_POLL_TIMEOUT); + LOG_TRACE(log, "Consumer has subscription: {}", consumer_has_subscription); // All the partition queues are detached, so the consumer shouldn't be able to poll any messages chassert(!msg && "Consumer returned a message when it was not expected"); auto consumer_queue = consumer->get_consumer_queue(); - // There should be events in the queue, so let's consume them all - while (consumer_queue.get_length() > 0) - consumer->poll(); + for(auto i = 0; i < max_tries && consumer_queue.get_length() > 0; ++i) + consumer->poll(EVENT_POLL_TIMEOUT); }; KafkaConsumer2::TopicPartitionCounts KafkaConsumer2::getPartitionCounts() const From 4a0a2d8ca17f4c2a477a5510e3ad09ec573e03b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 4 Jun 2024 15:51:26 +0000 Subject: [PATCH 0087/2361] Save the offset to Kafka also to not duplicate messages when table is recreated --- src/Storages/Kafka/KafkaConsumer2.cpp | 66 +++++++++++++++++++++++++-- src/Storages/Kafka/KafkaConsumer2.h | 2 + src/Storages/Kafka/StorageKafka2.cpp | 20 ++++---- src/Storages/Kafka/StorageKafka2.h | 2 +- 4 files changed, 76 insertions(+), 14 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index dabef7702cf..7ae816f1a0f 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -95,7 +95,8 @@ KafkaConsumer2::KafkaConsumer2( needs_offset_update = true; for (const auto & topic_partition : topic_partitions) { - assignment->push_back(TopicPartition{topic_partition.get_topic(), topic_partition.get_partition(), INVALID_OFFSET}); + assignment->push_back( + TopicPartition{topic_partition.get_topic(), topic_partition.get_partition(), topic_partition.get_offset()}); } // We need to initialize the queues here in order to detach them from the consumer queue. Otherwise `pollEvents` might eventually poll actual messages also. @@ -376,6 +377,67 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition, co return getNextMessage(); } +void KafkaConsumer2::commit(const TopicPartition & topic_partition) +{ + static constexpr auto max_retries = 5; + bool committed = false; + + LOG_TEST( + log, + "Trying to commit offset {} to Kafka for topic-partition [{}:{}]", + topic_partition.offset, + topic_partition.topic, + topic_partition.partition_id); + + const auto topic_partition_list = std::vector{cppkafka::TopicPartition{ + topic_partition.topic, + topic_partition.partition_id, + topic_partition.offset, + }}; + for (auto try_count = 0; try_count < max_retries && !committed; ++try_count) + { + try + { + // See https://github.com/edenhill/librdkafka/issues/1470 + // broker may reject commit if during offsets.commit.timeout.ms (5000 by default), + // there were not enough replicas available for the __consumer_offsets topic. + // also some other temporary issues like client-server connectivity problems are possible + + consumer->commit(topic_partition_list); + committed = true; + LOG_INFO( + log, + "Committed offset {} to Kafka for topic-partition [{}:{}]", + topic_partition.offset, + topic_partition.topic, + topic_partition.partition_id); + } + catch (const cppkafka::HandleException & e) + { + // If there were actually no offsets to commit, return. Retrying won't solve + // anything here + if (e.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) + committed = true; + else + LOG_ERROR(log, "Exception during commit attempt: {}", e.what()); + } + } + + if (!committed) + { + // The failure is not the biggest issue, it only counts when a table is dropped and recreated, otherwise the offsets are taken from keeper. + ProfileEvents::increment(ProfileEvents::KafkaCommitFailures); + LOG_INFO( + log, + "All commit attempts failed. Last block was already written to target table(s), " + "but was not committed to Kafka."); + } + else + { + ProfileEvents::increment(ProfileEvents::KafkaCommits); + } +} + ReadBufferPtr KafkaConsumer2::getNextMessage() { while (current != messages.end()) @@ -417,8 +479,6 @@ size_t KafkaConsumer2::filterMessageErrors() void KafkaConsumer2::resetIfStopped() { - // we can react on stop only during fetching data - // after block is formed (i.e. during copying data to MV / committing) we ignore stop attempts if (stopped) { stalled_status = StalledStatus::CONSUMER_STOPPED; diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index 8eb21cf0364..16d12c8723d 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -110,6 +110,8 @@ public: /// nullptr when there are no messages to process. ReadBufferPtr consume(const TopicPartition & topic_partition, const std::optional & message_count); + void commit(const TopicPartition& topic_partition); + // Return values for the message that's being read. String currentTopic() const { return current[-1].get_topic(); } String currentKey() const { return current[-1].get_key(); } diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index aed27c27e62..c5033be519f 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -852,14 +852,13 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi void StorageKafka2::saveCommittedOffset( - zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, const int64_t last_read_offset) + zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition) { - const auto committed_offset = last_read_offset + 1; const auto partition_prefix = getTopicPartitionPath(topic_partition); - keeper_to_use.createOrUpdate(partition_prefix + commit_file_name, toString(committed_offset), zkutil::CreateMode::Persistent); + keeper_to_use.createOrUpdate(partition_prefix + commit_file_name, toString(topic_partition.offset), zkutil::CreateMode::Persistent); // This is best effort, if it fails we will try to remove in the next round keeper_to_use.tryRemove(partition_prefix + intent_file_name, -1); - LOG_TEST(log, "Saved offset {} for topic-partition [{}:{}]", committed_offset, topic_partition.topic, topic_partition.partition_id); + LOG_TEST(log, "Saved offset {} for topic-partition [{}:{}]", topic_partition.offset, topic_partition.topic, topic_partition.partition_id); } void StorageKafka2::saveIntent(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, int64_t intent) @@ -1199,9 +1198,10 @@ bool StorageKafka2::streamToViews(size_t idx) TopicPartition topic_partition_copy{topic_partition}; if (const auto & maybe_committed_offset = consumer_info.locks.at(topic_partition).committed_offset; maybe_committed_offset.has_value()) + { topic_partition_copy.offset = *maybe_committed_offset; - else - topic_partition_copy.offset = KafkaConsumer2::BEGINNING_OFFSET; + } + // in case no saved offset, we will get the offset from Kafka as a best effort. This is important to not to duplicate message when recreating the table. consumer_info.topic_partitions.push_back(std::move(topic_partition_copy)); } @@ -1303,11 +1303,11 @@ std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInf CompletedPipelineExecutor executor(block_io.pipeline); executor.execute(); } - - saveCommittedOffset(keeper_to_use, topic_partition, last_read_offset); - lock_info.intent_size.reset(); - lock_info.committed_offset = last_read_offset; + lock_info.committed_offset = last_read_offset + 1; topic_partition.offset = last_read_offset + 1; + consumer_info.consumer->commit(topic_partition); + saveCommittedOffset(keeper_to_use, topic_partition); + lock_info.intent_size.reset(); needs_offset_reset = false; return rows; diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index c184053fc4d..e8cfcac2689 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -200,7 +200,7 @@ private: // Takes lock over topic partitions and set's the committed offset in topic_partitions std::optional lockTopicPartitions(zkutil::ZooKeeper& keeper_to_use, const TopicPartitions & topic_partitions); - void saveCommittedOffset(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t last_read_offset); + void saveCommittedOffset(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition); void saveIntent(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t intent); PolledBatchInfo pollConsumer( From c0eea71ab387602ec0d760e21fc4954079552ae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 4 Jun 2024 15:52:59 +0000 Subject: [PATCH 0088/2361] Make some tests work with the new storage kafka too --- tests/integration/test_storage_kafka/test.py | 1987 ++++++++++-------- 1 file changed, 1052 insertions(+), 935 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 8393e88db88..96438b5efa1 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -29,6 +29,7 @@ from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnecti from kafka.protocol.admin import DescribeGroupsRequest_v1 from kafka.protocol.group import MemberAssignment from kafka.admin import NewTopic +from contextlib import contextmanager # protoc --version @@ -46,6 +47,11 @@ if is_arm(): # TODO: add test for run-time offset update in CH, if we manually update it on Kafka side. # TODO: add test for SELECT LIMIT is working. + +KAFKA_TOPIC_NEW = "new_t" +KAFKA_CONSUMER_GROUP_NEW = "new_cg" + + cluster = ClickHouseCluster(__file__) instance = cluster.add_instance( "instance", @@ -57,8 +63,8 @@ instance = cluster.add_instance( "kafka_broker": "kafka1", "kafka_topic_old": "old", "kafka_group_name_old": "old", - "kafka_topic_new": "new", - "kafka_group_name_new": "new", + "kafka_topic_new": KAFKA_TOPIC_NEW, + "kafka_group_name_new": KAFKA_CONSUMER_GROUP_NEW, "kafka_client_id": "instance", "kafka_format_json_each_row": "JSONEachRow", }, @@ -142,6 +148,22 @@ def kafka_delete_topic(admin_client, topic, max_retries=50): raise Exception(f"Failed to delete topics {topic}, {result}") +@contextmanager +def kafka_topic( + admin_client, + topic_name, + num_partitions=1, + replication_factor=1, + max_retries=50, + config=None +): + kafka_create_topic(admin_client, topic_name, num_partitions, replication_factor, max_retries, config) + try: + yield None + finally: + # Code to release resource, e.g.: + kafka_delete_topic(admin_client, topic_name, max_retries) + def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15): logging.debug( "kafka_produce server:{}:{} topic:{}".format( @@ -283,11 +305,82 @@ def avro_confluent_message(schema_registry_client, value): ) return serializer.encode_record_with_schema("test_subject", schema, value) +def create_settings_string(settings): + if settings is None: + return "" + + def format_value(value): + if isinstance(value, str): + return f"'{value}'" + return str(value) + + settings_string = "SETTINGS " + keys = settings.keys() + first_key = next(iter(settings)) + settings_string += str(first_key) + " = " + format_value(settings[first_key]) + for key in keys: + if key == first_key: + continue + settings_string +=", " + str(key) + " = " + format_value(settings[key]) + return settings_string + + +def generate_old_create_table_query( + table_name, + columns_def, + database="test", + brokers="{kafka_broker}:19092", + topic_list="{kafka_topic_new}", + consumer_group="{kafka_group_name_new}", + format="{kafka_format_json_each_row}", + row_delimiter= "\\n", + keeper_path=None, # it is not used, but it is easier to handle keeper_path and replica_name like this + replica_name=None, + settings=None): + + settings_string=create_settings_string(settings) + query = f"""CREATE TABLE {database}.{table_name} ({columns_def}) ENGINE = Kafka('{brokers}', '{topic_list}', '{consumer_group}', '{format}', '{row_delimiter}') +{settings_string}""" + logging.debug(f"Generated old create query: {query}") + return query + +def generate_new_create_table_query( + table_name, + columns_def, + database="test", + brokers="{kafka_broker}:19092", + topic_list="{kafka_topic_new}", + consumer_group="{kafka_group_name_new}", + format="{kafka_format_json_each_row}", + row_delimiter= "\\n", + keeper_path=None, + replica_name=None, + settings=None): + if settings is None: + settings = {} + if keeper_path is None: + keeper_path = f"/clickhouse/{{database}}/{table_name}" + if replica_name is None: + replica_name = "r1" + settings["kafka_keeper_path"] = keeper_path + settings["kafka_replica_name"] = replica_name + settings_string=create_settings_string(settings) + query = f"""CREATE TABLE {database}.{table_name} ({columns_def}) ENGINE = Kafka('{brokers}', '{topic_list}', '{consumer_group}', '{format}', '{row_delimiter}') +{settings_string} +SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1""" + logging.debug(f"Generated new create query: {query}") + return query + +def get_topic_postfix(generator): + if generator == generate_old_create_table_query: + return "old" + if generator == generate_new_create_table_query: + return "new" + raise "Unexpected generator" # Tests - - -def test_kafka_column_types(kafka_cluster): +@pytest.mark.parametrize('create_query_generator, do_direct_read', [(generate_old_create_table_query, True), (generate_new_create_table_query, False)]) +def test_kafka_column_types(kafka_cluster, create_query_generator, do_direct_read): def assert_returned_exception(e): assert e.value.returncode == 36 assert ( @@ -297,57 +390,14 @@ def test_kafka_column_types(kafka_cluster): # check column with DEFAULT expression with pytest.raises(QueryRuntimeException) as exception: - instance.query( - """ - CREATE TABLE test.kafka (a Int, b Int DEFAULT 0) - ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_new}', '{kafka_group_name_new}', '{kafka_format_json_each_row}', '\\n') - """ - ) + instance.query(create_query_generator('kafka', 'a Int, b Int DEFAULT 0')) assert_returned_exception(exception) # check EPHEMERAL with pytest.raises(QueryRuntimeException) as exception: - instance.query( - """ - CREATE TABLE test.kafka (a Int, b Int EPHEMERAL) - ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_new}', '{kafka_group_name_new}', '{kafka_format_json_each_row}', '\\n') - """ - ) + instance.query(create_query_generator('kafka', 'a Int, b Int EPHEMERAL')) assert_returned_exception(exception) - # check ALIAS - instance.query( - """ - CREATE TABLE test.kafka (a Int, b String Alias toString(a)) - ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_new}', '{kafka_group_name_new}', '{kafka_format_json_each_row}', '\\n') - SETTINGS kafka_commit_on_select = 1; - """ - ) - messages = [] - for i in range(5): - messages.append(json.dumps({"a": i})) - kafka_produce(kafka_cluster, "new", messages) - result = "" - expected = TSV( - """ -0\t0 -1\t1 -2\t2 -3\t3 -4\t4 - """ - ) - retries = 50 - while retries > 0: - result += instance.query("SELECT a, b FROM test.kafka", ignore_error=True) - if TSV(result) == expected: - break - retries -= 1 - - assert TSV(result) == expected - - instance.query("DROP TABLE test.kafka SYNC") - # check MATERIALIZED with pytest.raises(QueryRuntimeException) as exception: instance.query( @@ -358,6 +408,35 @@ def test_kafka_column_types(kafka_cluster): ) assert_returned_exception(exception) + if do_direct_read: + # check ALIAS + instance.query(create_query_generator("kafka", "a Int, b String Alias toString(a)", settings={"kafka_commit_on_select":1})) + messages = [] + for i in range(5): + messages.append(json.dumps({"a": i})) + kafka_produce(kafka_cluster, KAFKA_TOPIC_NEW, messages) + result = "" + expected = TSV( + """ + 0\t0 + 1\t1 + 2\t2 + 3\t3 + 4\t4 + """ + ) + retries = 50 + while retries > 0: + result += instance.query("SELECT a, b FROM test.kafka", ignore_error=True) + if TSV(result) == expected: + break + retries -= 1 + time.sleep(0.5) + + assert TSV(result) == expected + + instance.query("DROP TABLE test.kafka SYNC") + def test_kafka_settings_old_syntax(kafka_cluster): assert TSV( @@ -423,16 +502,16 @@ def test_kafka_settings_new_syntax(kafka_cluster): messages = [] for i in range(25): messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, "new", messages) + kafka_produce(kafka_cluster, KAFKA_TOPIC_NEW, messages) # Insert couple of malformed messages. - kafka_produce(kafka_cluster, "new", ["}{very_broken_message,"]) - kafka_produce(kafka_cluster, "new", ["}another{very_broken_message,"]) + kafka_produce(kafka_cluster, KAFKA_TOPIC_NEW, ["}{very_broken_message,"]) + kafka_produce(kafka_cluster, KAFKA_TOPIC_NEW, ["}another{very_broken_message,"]) messages = [] for i in range(25, 50): messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, "new", messages) + kafka_produce(kafka_cluster, KAFKA_TOPIC_NEW, messages) result = "" while True: @@ -519,8 +598,8 @@ def test_kafka_json_as_string(kafka_cluster): "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows" ) - -def test_kafka_formats(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_formats(kafka_cluster, create_query_generator): schema_registry_client = CachedSchemaRegistryClient( "http://localhost:{}".format(kafka_cluster.schema_registry_port) ) @@ -649,7 +728,7 @@ def test_kafka_formats(kafka_cluster): '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', ], - "extra_settings": ", format_template_row='template_row.format'", + "extra_settings": {"format_template_row":"template_row.format"}, }, "Regexp": { "data_sample": [ @@ -660,7 +739,7 @@ def test_kafka_formats(kafka_cluster): # On empty message exception happens: Line "" doesn't match the regexp.: (at row 1) # /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse ], - "extra_settings": r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'", + "extra_settings": {"format_regexp":r"\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)", "format_regexp_escaping_rule": "Escaped"}, }, ## BINARY FORMATS # dumped with @@ -732,7 +811,7 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse # /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse ], - "extra_settings": ", kafka_schema='test:TestMessage'", + "extra_settings": {"kafka_schema":"test:TestMessage"}, }, "ORC": { "data_sample": [ @@ -756,7 +835,7 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse # /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse ], - "extra_settings": ", kafka_schema='test:TestRecordStruct'", + "extra_settings": {"kafka_schema":"test:TestRecordStruct"}, }, "Parquet": { "data_sample": [ @@ -791,9 +870,9 @@ def test_kafka_formats(kafka_cluster): {"id": 0, "blockNo": 0, "val1": str("AM"), "val2": 0.5, "val3": 1}, ), ], - "extra_settings": ", format_avro_schema_registry_url='http://{}:{}'".format( + "extra_settings": {"format_avro_schema_registry_url":"http://{}:{}".format( kafka_cluster.schema_registry_host, kafka_cluster.schema_registry_port - ), + )}, "supports_empty_value": True, }, "Avro": { @@ -837,31 +916,25 @@ def test_kafka_formats(kafka_cluster): }, } + topic_postfix = str(hash(create_query_generator)) for format_name, format_opts in list(all_formats.items()): - logging.debug(("Set up {}".format(format_name))) - topic_name = "format_tests_{}".format(format_name) + logging.debug(f"Set up {format_name}") + topic_name = f"format_tests_{format_name}-{topic_postfix}" data_sample = format_opts["data_sample"] data_prefix = [] # prepend empty value when supported if format_opts.get("supports_empty_value", False): data_prefix = data_prefix + [""] kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample) + + extra_settings = format_opts.get("extra_settings") or {} + extra_settings["kafka_flush_interval_ms"] = 1000 + instance.query( """ DROP TABLE IF EXISTS test.kafka_{format_name}; - CREATE TABLE test.kafka_{format_name} ( - id Int64, - blockNo UInt16, - val1 String, - val2 Float32, - val3 UInt8 - ) ENGINE = Kafka() - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{topic_name}', - kafka_group_name = '{topic_name}_group', - kafka_format = '{format_name}', - kafka_flush_interval_ms = 1000 {extra_settings}; + {create_query}; DROP TABLE IF EXISTS test.kafka_{format_name}_mv; @@ -870,7 +943,13 @@ def test_kafka_formats(kafka_cluster): """.format( topic_name=topic_name, format_name=format_name, - extra_settings=format_opts.get("extra_settings") or "", + create_query=create_query_generator( + f"kafka_{format_name}", + "id Int64, blockNo UInt16, val1 String, val2 Float32, val3 UInt8", + topic_list=f"{topic_name}", + consumer_group=f"{topic_name}_group", + format=format_name, + settings=extra_settings), ) ) raw_expected = """\ @@ -905,13 +984,13 @@ def test_kafka_formats(kafka_cluster): for format_name, format_opts in list(all_formats.items()): logging.debug(("Checking {}".format(format_name))) - topic_name = f"format_tests_{format_name}" + topic_name = f"format_tests_{format_name}-{topic_postfix}" # shift offsets by 1 if format supports empty value offsets = ( [1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2] ) - result = instance.query( - "SELECT * FROM test.kafka_{format_name}_mv;".format(format_name=format_name) + result = instance.query_with_retry( + "SELECT * FROM test.kafka_{format_name}_mv;".format(format_name=format_name), check_callback=lambda x: x.count('\n') == raw_expected.count('\n') ) expected = raw_expected.format( topic_name=topic_name, @@ -1591,134 +1670,132 @@ def test_kafka_protobuf_no_delimiter(kafka_cluster): """ assert TSV(result) == TSV(expected) - -def test_kafka_materialized_view(kafka_cluster): - instance.query( - """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'mv', - kafka_group_name = 'mv', - kafka_format = 'JSONEachRow', - kafka_row_delimiter = '\\n'; - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ - ) - - messages = [] - for i in range(50): - messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, "mv", messages) - - while True: - result = instance.query("SELECT * FROM test.view") - if kafka_check_result(result): - break - - instance.query( - """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) - - kafka_check_result(result, True) - - -def test_kafka_recreate_kafka_table(kafka_cluster): - """ - Checks that materialized view work properly after dropping and recreating the Kafka table. - """ - # line for backporting: - # admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092") +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_materialized_view(kafka_cluster, create_query_generator): + topic_name="mv" admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) + with kafka_topic(admin_client, topic_name): + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + DROP TABLE IF EXISTS test.kafka; + + {create_query_generator("kafka", "key UInt64, value UInt64", topic_list=topic_name, consumer_group="mv")}; + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; + """ + ) + + messages = [] + for i in range(50): + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, topic_name, messages) + + result = instance.query_with_retry("SELECT * FROM test.view", check_callback=kafka_check_result) + + kafka_check_result(result, True) + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; + DROP TABLE test.kafka; + """ + ) + + +# TODO(antaljanosbenjamin): fails with the new, because it doesn't store the offsets... +@pytest.mark.parametrize('create_query_generator, thread_per_consumer, log_line', [ + (generate_new_create_table_query,1,r"kafka.*Saved offset [0-9]+ for topic-partition \[recreate_kafka_table:[0-9]+"), + (generate_old_create_table_query,0,"kafka.*Committed offset [0-9]+.*recreate_kafka_table"), +]) +def test_kafka_recreate_kafka_table(kafka_cluster, create_query_generator, thread_per_consumer, log_line): + """ + Checks that materialized view work properly after dropping and recreating the Kafka table. + """ + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "recreate_kafka_table" - kafka_create_topic(admin_client, topic_name, num_partitions=6) - instance.query( + with kafka_topic(admin_client, topic_name, num_partitions=6): + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group="recreate_kafka_table_group", + settings={ + "kafka_num_consumers": 4, + "kafka_flush_interval_ms": 1000, + "kafka_skip_broken_messages": 1048577, + "kafka_thread_per_consumer": thread_per_consumer, + }) + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + {create_query}; + + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'recreate_kafka_table', - kafka_group_name = 'recreate_kafka_table_group', - kafka_format = 'JSONEachRow', - kafka_num_consumers = 6, - kafka_flush_interval_ms = 1000, - kafka_skip_broken_messages = 1048577; + ) - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ - ) + messages = [] + for i in range(120): + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "recreate_kafka_table", messages) - messages = [] - for i in range(120): - messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, "recreate_kafka_table", messages) + instance.wait_for_log_line( + log_line, + repetitions=6, + look_behind_lines=100, + ) - instance.wait_for_log_line( - "kafka.*Committed offset [0-9]+.*recreate_kafka_table", - repetitions=6, - look_behind_lines=100, - ) - - instance.query( + instance.query( + """ + DROP TABLE test.kafka; """ - DROP TABLE test.kafka; - """ - ) + ) - kafka_produce(kafka_cluster, "recreate_kafka_table", messages) + kafka_produce(kafka_cluster, "recreate_kafka_table", messages) - instance.query( + instance.query(create_query) + + instance.wait_for_log_line( + log_line, + repetitions=6, + look_behind_lines=100, + ) + + # data was not flushed yet (it will be flushed 7.5 sec after creating MV) + assert int(instance.query("SELECT count() FROM test.view")) == 240 + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.kafka; + DROP TABLE test.view; """ - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'recreate_kafka_table', - kafka_group_name = 'recreate_kafka_table_group', - kafka_format = 'JSONEachRow', - kafka_num_consumers = 6, - kafka_flush_interval_ms = 1000, - kafka_skip_broken_messages = 1048577; - """ - ) - - instance.wait_for_log_line( - "kafka.*Committed offset [0-9]+.*recreate_kafka_table", - repetitions=6, - look_behind_lines=100, - ) - - # data was not flushed yet (it will be flushed 7.5 sec after creating MV) - assert int(instance.query("SELECT count() FROM test.view")) == 240 - - instance.query( - """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) - kafka_delete_topic(admin_client, topic_name) + ) -def test_librdkafka_compression(kafka_cluster): +@pytest.mark.parametrize('create_query_generator, log_line', [ + (generate_old_create_table_query, "Committed offset {offset}"), + (generate_new_create_table_query, r"kafka.*Saved offset [0-9]+ for topic-partition \[{topic}:[0-9]+\]") +]) +def test_librdkafka_compression(kafka_cluster, create_query_generator, log_line): """ Regression for UB in snappy-c (that is used in librdkafka), backport pr is [1]. @@ -1754,139 +1831,144 @@ def test_librdkafka_compression(kafka_cluster): expected = "\n".join(expected) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + for compression_type in supported_compression_types: logging.debug(("Check compression {}".format(compression_type))) topic_name = "test_librdkafka_compression_{}".format(compression_type) - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + topic_config = {"compression.type": compression_type} + with kafka_topic(admin_client, topic_name, config=topic_config): + instance.query( + """{create_query}; + + CREATE MATERIALIZED VIEW test.consumer Engine=Log AS + SELECT * FROM test.kafka; + """.format( + create_query=create_query_generator( + "kafka", + "key UInt64, value String", + topic_list=topic_name, + #brokers="kafka1:19092", + #consumer_group=f"{topic_name}_group", + format="JSONEachRow", + settings={"kafka_flush_interval_ms": 1000}), + ) + ) + + kafka_produce(kafka_cluster, topic_name, messages) + + instance.wait_for_log_line(log_line.format(offset=number_of_messages, topic=topic_name)) + + result = instance.query("SELECT * FROM test.consumer") + assert TSV(result) == TSV(expected) + + instance.query("DROP TABLE test.kafka SYNC") + instance.query("DROP TABLE test.consumer SYNC") + + +# TODO(antaljanosbenjamin): It fails with the new if the topic is not created explicitly +@pytest.mark.parametrize('create_query_generator', [generate_new_create_table_query, generate_old_create_table_query]) +def test_kafka_materialized_view_with_subquery(kafka_cluster, create_query_generator): + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + + topic_name = "mysq" + logging.debug(f"Using topic {topic_name}") + + with kafka_topic(admin_client, topic_name): + create_query = create_query_generator("kafka", "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name) + instance.query( + f""" + DROP TABLE IF EXISTS test.kafka; + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + + {create_query}; + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM (SELECT * FROM test.kafka); + """ ) - kafka_create_topic( - admin_client, topic_name, config={"compression.type": compression_type} - ) + messages = [] + for i in range(50): + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, topic_name, messages) + + result = instance.query_with_retry("SELECT * FROM test.view", check_callback=kafka_check_result) instance.query( """ - CREATE TABLE test.kafka (key UInt64, value String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{topic_name}', - kafka_group_name = '{topic_name}_group', - kafka_format = 'JSONEachRow', - kafka_flush_interval_ms = 1000; - CREATE MATERIALIZED VIEW test.consumer Engine=Log AS - SELECT * FROM test.kafka; - """.format( - topic_name=topic_name - ) + DROP TABLE test.consumer; + DROP TABLE test.view; + """ ) + kafka_check_result(result, True) + + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_many_materialized_views(kafka_cluster, create_query_generator): + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + topic_name = f"mmv-{get_topic_postfix(create_query_generator)}" + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=f"{topic_name}-group" + ) + with kafka_topic(admin_client, topic_name): + instance.query( + f""" + DROP TABLE IF EXISTS test.view1; + DROP TABLE IF EXISTS test.view2; + DROP TABLE IF EXISTS test.consumer1; + DROP TABLE IF EXISTS test.consumer2; + {create_query}; + CREATE TABLE test.view1 (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE TABLE test.view2 (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS + SELECT * FROM test.kafka; + CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS + SELECT * FROM test.kafka; + """ + ) + + messages = [] + for i in range(50): + messages.append(json.dumps({"key": i, "value": i})) kafka_produce(kafka_cluster, topic_name, messages) - instance.wait_for_log_line("Committed offset {}".format(number_of_messages)) + result1 = instance.query_with_retry("SELECT * FROM test.view1", check_callback=kafka_check_result) + result2 = instance.query_with_retry("SELECT * FROM test.view2", check_callback=kafka_check_result) - result = instance.query("SELECT * FROM test.consumer") - assert TSV(result) == TSV(expected) - - instance.query("DROP TABLE test.kafka SYNC") - instance.query("DROP TABLE test.consumer SYNC") - kafka_delete_topic(admin_client, topic_name) - - -def test_kafka_materialized_view_with_subquery(kafka_cluster): - instance.query( + instance.query( + """ + DROP TABLE test.consumer1; + DROP TABLE test.consumer2; + DROP TABLE test.view1; + DROP TABLE test.view2; """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'mvsq', - kafka_group_name = 'mvsq', - kafka_format = 'JSONEachRow', - kafka_row_delimiter = '\\n'; - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM (SELECT * FROM test.kafka); - """ - ) - - messages = [] - for i in range(50): - messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, "mvsq", messages) - - while True: - result = instance.query("SELECT * FROM test.view") - if kafka_check_result(result): - break - - instance.query( - """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) - - kafka_check_result(result, True) - - -def test_kafka_many_materialized_views(kafka_cluster): - instance.query( - """ - DROP TABLE IF EXISTS test.view1; - DROP TABLE IF EXISTS test.view2; - DROP TABLE IF EXISTS test.consumer1; - DROP TABLE IF EXISTS test.consumer2; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'mmv', - kafka_group_name = 'mmv', - kafka_format = 'JSONEachRow', - kafka_row_delimiter = '\\n'; - CREATE TABLE test.view1 (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE TABLE test.view2 (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS - SELECT * FROM test.kafka; - CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS - SELECT * FROM test.kafka; - """ - ) - - messages = [] - for i in range(50): - messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, "mmv", messages) - - while True: - result1 = instance.query("SELECT * FROM test.view1") - result2 = instance.query("SELECT * FROM test.view2") - if kafka_check_result(result1) and kafka_check_result(result2): - break - - instance.query( - """ - DROP TABLE test.consumer1; - DROP TABLE test.consumer2; - DROP TABLE test.view1; - DROP TABLE test.view2; - """ - ) - - kafka_check_result(result1, True) - kafka_check_result(result2, True) + ) + kafka_check_result(result1, True) + kafka_check_result(result2, True) +# TODO(antaljanosbenjamin) def test_kafka_flush_on_big_message(kafka_cluster): - # Create batchs of messages of size ~100Kb + # Create batches of messages of size ~100Kb kafka_messages = 1000 batch_messages = 1000 messages = [ @@ -1989,7 +2071,8 @@ def test_kafka_virtual_columns(kafka_cluster): kafka_check_result(result, True, "test_kafka_virtual1.reference") -def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_virtual_columns_with_materialized_view(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) @@ -1997,65 +2080,77 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", } - kafka_create_topic(admin_client, "virt2", config=topic_config) - - instance.query( - """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'virt2', - kafka_group_name = 'virt2', - kafka_format = 'JSONEachRow', - kafka_row_delimiter = '\\n'; - CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC'))) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka; - """ + # the topic name is hardcoded in reference, it doesn't worth to create two reference files to have separate topics, + # as the context manager will always clean up the topic + topic_name = "virt2" + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=f"{topic_name}-group" ) + with kafka_topic(admin_client, topic_name, config=topic_config): - messages = [] - for i in range(50): - messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, "virt2", messages, 0) + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + {create_query}; + CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC'))) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka; + """ + ) - sql = "SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key, key" - result = instance.query(sql) - iterations = 0 - while ( - not kafka_check_result(result, False, "test_kafka_virtual2.reference") - and iterations < 10 - ): - time.sleep(3) - iterations += 1 - result = instance.query(sql) + messages = [] + for i in range(50): + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, topic_name, messages, 0) - kafka_check_result(result, True, "test_kafka_virtual2.reference") + def check_callback(result): + return kafka_check_result(result, False, "test_kafka_virtual2.reference") + result = instance.query_with_retry( + "SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key, key", + check_callback=check_callback) + + kafka_check_result(result, True, "test_kafka_virtual2.reference") + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; + """ + ) + +def insert_with_retry(instance, values, table_name="kafka", max_try_couunt=5): + try_count = 0 + while True: + logging.debug(f"Inserting, try_count is {try_count}") + try: + try_count += 1 + instance.query("INSERT INTO test.kafka VALUES {}".format(values)) + break + except QueryRuntimeException as e: + if "Local: Timed out." in str(e) and try_count < max_try_couunt: + continue + else: + raise + + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_insert(kafka_cluster, create_query_generator): + topic_name = "insert1" + get_topic_postfix(create_query_generator) instance.query( - """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) - - -def test_kafka_insert(kafka_cluster): - instance.query( - """ - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'insert1', - kafka_group_name = 'insert1', - kafka_format = 'TSV', - kafka_commit_on_select = 1, - kafka_row_delimiter = '\\n'; - """ + create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format="TSV" + ) ) values = [] @@ -2063,91 +2158,88 @@ def test_kafka_insert(kafka_cluster): values.append("({i}, {i})".format(i=i)) values = ",".join(values) - while True: - try: - instance.query("INSERT INTO test.kafka VALUES {}".format(values)) - break - except QueryRuntimeException as e: - if "Local: Timed out." in str(e): - continue - else: - raise + insert_with_retry(instance, values) messages = [] - while True: - messages.extend(kafka_consume(kafka_cluster, "insert1")) + try_count = 0 + while True and try_count < 5: + try_count += 1 + messages.extend(kafka_consume(kafka_cluster, topic_name)) if len(messages) == 50: break + time.sleep(0.1) result = "\n".join(messages) kafka_check_result(result, True) -def test_kafka_produce_consume(kafka_cluster): - instance.query( - """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'insert2', - kafka_group_name = 'insert2', - kafka_format = 'TSV', - kafka_row_delimiter = '\\n'; - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_produce_consume(kafka_cluster, create_query_generator): + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) - messages_num = 10000 + topic_name = "insert2" + get_topic_postfix(create_query_generator) - def insert(): - values = [] - for i in range(messages_num): - values.append("({i}, {i})".format(i=i)) - values = ",".join(values) - - while True: - try: - instance.query("INSERT INTO test.kafka VALUES {}".format(values)) - break - except QueryRuntimeException as e: - if "Local: Timed out." in str(e): - continue - else: - raise - - threads = [] - threads_num = 16 - for _ in range(threads_num): - threads.append(threading.Thread(target=insert)) - for thread in threads: - time.sleep(random.uniform(0, 1)) - thread.start() - - while True: - result = instance.query("SELECT count() FROM test.view") - time.sleep(1) - if int(result) == messages_num * threads_num: - break - - instance.query( + with kafka_topic(admin_client, topic_name): + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format="TSV" + ) + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + {create_query}; + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) + ) - for thread in threads: - thread.join() + messages_num = 10000 - assert ( - int(result) == messages_num * threads_num - ), "ClickHouse lost some messages: {}".format(result) + def insert(): + values = [] + for i in range(messages_num): + values.append("({i}, {i})".format(i=i)) + values = ",".join(values) + + insert_with_retry(instance, values) + + threads = [] + threads_num = 16 + for _ in range(threads_num): + threads.append(threading.Thread(target=insert)) + for thread in threads: + time.sleep(random.uniform(0, 1)) + thread.start() + + expected_row_count = messages_num * threads_num + result = instance.query_with_retry( + "SELECT count() FROM test.view", + sleep_time=1, + retry_count=20, + check_callback=lambda result: int(result) == expected_row_count) + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; + """ + ) + + for thread in threads: + thread.join() + + assert ( + int(result) == expected_row_count + ), "ClickHouse lost some messages: {}".format(result) def test_kafka_commit_on_block_write(kafka_cluster): @@ -2226,8 +2318,11 @@ def test_kafka_commit_on_block_write(kafka_cluster): assert result == 1, "Messages from kafka get duplicated!" - -def test_kafka_virtual_columns2(kafka_cluster): +@pytest.mark.parametrize('create_query_generator, thread_per_consumer, log_line', [ + (generate_old_create_table_query,0,"kafka.*Committed offset 2.*virt2_[01]"), + (generate_new_create_table_query,1,r"kafka.*Saved offset 2[0-9]* for topic-partition \[virt2_[01]:[0-9]+"), +]) +def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, thread_per_consumer, log_line): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) @@ -2236,139 +2331,142 @@ def test_kafka_virtual_columns2(kafka_cluster): # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", } - kafka_create_topic(admin_client, "virt2_0", num_partitions=2, config=topic_config) - kafka_create_topic(admin_client, "virt2_1", num_partitions=2, config=topic_config) + topic_name_0 = "virt2_0" + topic_name_1 = "virt2_1" + consumer_group = "virt2"+get_topic_postfix(create_query_generator) + with kafka_topic(admin_client, topic_name_0, num_partitions=2, config=topic_config): + with kafka_topic(admin_client, topic_name_1, num_partitions=2, config=topic_config): + create_query = create_query_generator( + "kafka", + "value UInt64", + topic_list=f"{topic_name_0},{topic_name_1}", + consumer_group=consumer_group, + settings={ + "kafka_num_consumers":2, + "kafka_thread_per_consumer": thread_per_consumer, + } + ) - instance.query( + instance.query( + f""" + {create_query}; + + CREATE MATERIALIZED VIEW test.view Engine=Log AS + SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka; + """ + ) + + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(cluster.kafka_port), + value_serializer=producer_serializer, + key_serializer=producer_serializer, + ) + + producer.send( + topic=topic_name_0, + value=json.dumps({"value": 1}), + partition=0, + key="k1", + timestamp_ms=1577836801001, + headers=[("content-encoding", b"base64")], + ) + producer.send( + topic=topic_name_0, + value=json.dumps({"value": 2}), + partition=0, + key="k2", + timestamp_ms=1577836802002, + headers=[ + ("empty_value", b""), + ("", b"empty name"), + ("", b""), + ("repetition", b"1"), + ("repetition", b"2"), + ], + ) + producer.flush() + + producer.send( + topic=topic_name_0, + value=json.dumps({"value": 3}), + partition=1, + key="k3", + timestamp_ms=1577836803003, + headers=[("b", b"b"), ("a", b"a")], + ) + producer.send( + topic=topic_name_0, + value=json.dumps({"value": 4}), + partition=1, + key="k4", + timestamp_ms=1577836804004, + headers=[("a", b"a"), ("b", b"b")], + ) + producer.flush() + + producer.send( + topic=topic_name_1, + value=json.dumps({"value": 5}), + partition=0, + key="k5", + timestamp_ms=1577836805005, + ) + producer.send( + topic=topic_name_1, + value=json.dumps({"value": 6}), + partition=0, + key="k6", + timestamp_ms=1577836806006, + ) + producer.flush() + + producer.send( + topic=topic_name_1, + value=json.dumps({"value": 7}), + partition=1, + key="k7", + timestamp_ms=1577836807007, + ) + producer.send( + topic=topic_name_1, + value=json.dumps({"value": 8}), + partition=1, + key="k8", + timestamp_ms=1577836808008, + ) + producer.flush() + + instance.wait_for_log_line(log_line, repetitions=4, look_behind_lines=6000) + + members = describe_consumer_group(kafka_cluster, consumer_group) + # pprint.pprint(members) + # members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0' + # members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1' + + result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) + + expected = f"""\ + 1 k1 {topic_name_0} 0 0 1577836801 1577836801001 ['content-encoding'] ['base64'] + 2 k2 {topic_name_0} 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2'] + 3 k3 {topic_name_0} 1 0 1577836803 1577836803003 ['b','a'] ['b','a'] + 4 k4 {topic_name_0} 1 1 1577836804 1577836804004 ['a','b'] ['a','b'] + 5 k5 {topic_name_1} 0 0 1577836805 1577836805005 [] [] + 6 k6 {topic_name_1} 0 1 1577836806 1577836806006 [] [] + 7 k7 {topic_name_1} 1 0 1577836807 1577836807007 [] [] + 8 k8 {topic_name_1} 1 1 1577836808 1577836808008 [] [] """ - CREATE TABLE test.kafka (value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'virt2_0,virt2_1', - kafka_group_name = 'virt2', - kafka_num_consumers = 2, - kafka_format = 'JSONEachRow'; - CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka; - """ - ) - - producer = KafkaProducer( - bootstrap_servers="localhost:{}".format(cluster.kafka_port), - value_serializer=producer_serializer, - key_serializer=producer_serializer, - ) - - producer.send( - topic="virt2_0", - value=json.dumps({"value": 1}), - partition=0, - key="k1", - timestamp_ms=1577836801001, - headers=[("content-encoding", b"base64")], - ) - producer.send( - topic="virt2_0", - value=json.dumps({"value": 2}), - partition=0, - key="k2", - timestamp_ms=1577836802002, - headers=[ - ("empty_value", b""), - ("", b"empty name"), - ("", b""), - ("repetition", b"1"), - ("repetition", b"2"), - ], - ) - producer.flush() - - producer.send( - topic="virt2_0", - value=json.dumps({"value": 3}), - partition=1, - key="k3", - timestamp_ms=1577836803003, - headers=[("b", b"b"), ("a", b"a")], - ) - producer.send( - topic="virt2_0", - value=json.dumps({"value": 4}), - partition=1, - key="k4", - timestamp_ms=1577836804004, - headers=[("a", b"a"), ("b", b"b")], - ) - producer.flush() - - producer.send( - topic="virt2_1", - value=json.dumps({"value": 5}), - partition=0, - key="k5", - timestamp_ms=1577836805005, - ) - producer.send( - topic="virt2_1", - value=json.dumps({"value": 6}), - partition=0, - key="k6", - timestamp_ms=1577836806006, - ) - producer.flush() - - producer.send( - topic="virt2_1", - value=json.dumps({"value": 7}), - partition=1, - key="k7", - timestamp_ms=1577836807007, - ) - producer.send( - topic="virt2_1", - value=json.dumps({"value": 8}), - partition=1, - key="k8", - timestamp_ms=1577836808008, - ) - producer.flush() - - instance.wait_for_log_line( - "kafka.*Committed offset 2.*virt2_[01]", repetitions=4, look_behind_lines=6000 - ) - - members = describe_consumer_group(kafka_cluster, "virt2") - # pprint.pprint(members) - # members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0' - # members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1' - - result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) - - expected = """\ -1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64'] -2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2'] -3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a'] -4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b'] -5 k5 virt2_1 0 0 1577836805 1577836805005 [] [] -6 k6 virt2_1 0 1 1577836806 1577836806006 [] [] -7 k7 virt2_1 1 0 1577836807 1577836807007 [] [] -8 k8 virt2_1 1 1 1577836808 1577836808008 [] [] -""" - - assert TSV(result) == TSV(expected) - - instance.query( - """ - DROP TABLE test.kafka; - DROP TABLE test.view; - """ - ) - kafka_delete_topic(admin_client, "virt2_0") - kafka_delete_topic(admin_client, "virt2_1") - instance.rotate_logs() + assert TSV(result) == TSV(expected) + instance.query( + """ + DROP TABLE test.kafka; + DROP TABLE test.view; + """ + ) + instance.rotate_logs() +# TODO(antaljanosbenjamin) def test_kafka_producer_consumer_separate_settings(kafka_cluster): instance.query( """ @@ -2446,7 +2544,12 @@ def test_kafka_producer_consumer_separate_settings(kafka_cluster): assert property_in_log in kafka_producer_applyed_properties -def test_kafka_produce_key_timestamp(kafka_cluster): +# TODO(antaljanosbenjamin) +@pytest.mark.parametrize('create_query_generator, log_line', [ + #(generate_new_create_table_query,"Saved offset 5"), + (generate_old_create_table_query, "Committed offset 5"), +]) +def test_kafka_produce_key_timestamp(kafka_cluster, create_query_generator, log_line): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) @@ -2456,74 +2559,75 @@ def test_kafka_produce_key_timestamp(kafka_cluster): # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", } - kafka_create_topic(admin_client, topic_name, config=topic_config) - instance.query( + with kafka_topic(admin_client, topic_name, config=topic_config): + + writer_create_query = create_query_generator( + "kafka_writer", + "key UInt64, value UInt64, _key String, _timestamp DateTime('UTC')", + topic_list=topic_name, + consumer_group=topic_name, + format="TSV") + reader_create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC')", + topic_list=topic_name, + consumer_group=topic_name, + format="TSV") + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + {writer_create_query}; + {reader_create_query}; + CREATE MATERIALIZED VIEW test.view Engine=Log AS + SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka; """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC')) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'insert3', - kafka_group_name = 'insert3', - kafka_format = 'TSV', - kafka_row_delimiter = '\\n'; + ) - CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC')) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'insert3', - kafka_group_name = 'insert3', - kafka_format = 'TSV', - kafka_row_delimiter = '\\n'; + instance.query( + "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( + 1, 1, "k1", 1577836801 + ) + ) + instance.query( + "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( + 2, 2, "k2", 1577836802 + ) + ) + instance.query( + "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format( + 3, 3, "k3", 1577836803, 4, 4, "k4", 1577836804 + ) + ) + instance.query( + "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( + 5, 5, "k5", 1577836805 + ) + ) - CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka; + # instance.wait_for_log_line(log_line) + + expected = """\ + 1 1 k1 1577836801 k1 insert3 0 0 1577836801 + 2 2 k2 1577836802 k2 insert3 0 1 1577836802 + 3 3 k3 1577836803 k3 insert3 0 2 1577836803 + 4 4 k4 1577836804 k4 insert3 0 3 1577836804 + 5 5 k5 1577836805 k5 insert3 0 4 1577836805 """ - ) - instance.query( - "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( - 1, 1, "k1", 1577836801 - ) - ) - instance.query( - "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( - 2, 2, "k2", 1577836802 - ) - ) - instance.query( - "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format( - 3, 3, "k3", 1577836803, 4, 4, "k4", 1577836804 - ) - ) - instance.query( - "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( - 5, 5, "k5", 1577836805 - ) - ) + result = instance.query_with_retry("SELECT * FROM test.view ORDER BY value", ignore_error=True, check_callback=lambda res: TSV(res) == TSV(expected)) - instance.wait_for_log_line("Committed offset 5") - - result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) - - # logging.debug(result) - - expected = """\ -1 1 k1 1577836801 k1 insert3 0 0 1577836801 -2 2 k2 1577836802 k2 insert3 0 1 1577836802 -3 3 k3 1577836803 k3 insert3 0 2 1577836803 -4 4 k4 1577836804 k4 insert3 0 3 1577836804 -5 5 k5 1577836805 k5 insert3 0 4 1577836805 -""" - - assert TSV(result) == TSV(expected) - - kafka_delete_topic(admin_client, topic_name) + # logging.debug(result) -def test_kafka_insert_avro(kafka_cluster): + + assert TSV(result) == TSV(expected) + + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_insert_avro(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) @@ -2531,49 +2635,51 @@ def test_kafka_insert_avro(kafka_cluster): # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", } - kafka_create_topic(admin_client, "avro1", config=topic_config) - - instance.query( - """ - DROP TABLE IF EXISTS test.kafka; - CREATE TABLE test.kafka (key UInt64, value UInt64, _timestamp DateTime('UTC')) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'avro1', - kafka_group_name = 'avro1', - kafka_commit_on_select = 1, - kafka_format = 'Avro'; - """ - ) - - instance.query( - "INSERT INTO test.kafka select number*10 as key, number*100 as value, 1636505534 as _timestamp from numbers(4) SETTINGS output_format_avro_rows_in_file = 2, output_format_avro_codec = 'deflate'" - ) - - messages = [] - while True: - messages.extend( - kafka_consume( - kafka_cluster, "avro1", needDecode=False, timestamp=1636505534 - ) + topic_name="avro1" + get_topic_postfix(create_query_generator) + with kafka_topic(admin_client, topic_name, config=topic_config): + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64, _timestamp DateTime('UTC')", + topic_list=topic_name, + consumer_group=topic_name, + format="Avro", + ) + instance.query( + f""" + DROP TABLE IF EXISTS test.kafka; + {create_query} + """ ) - if len(messages) == 2: - break - result = "" - for a_message in messages: - result += decode_avro(a_message) + "\n" + instance.query( + "INSERT INTO test.kafka select number*10 as key, number*100 as value, 1636505534 as _timestamp from numbers(4) SETTINGS output_format_avro_rows_in_file = 2, output_format_avro_codec = 'deflate'" + ) - expected_result = """{'key': 0, 'value': 0, '_timestamp': 1636505534} + messages = [] + while True: + messages.extend( + kafka_consume( + kafka_cluster, topic_name, needDecode=False, timestamp=1636505534 + ) + ) + if len(messages) == 2: + break + + result = "" + for a_message in messages: + result += decode_avro(a_message) + "\n" + + expected_result = """{'key': 0, 'value': 0, '_timestamp': 1636505534} {'key': 10, 'value': 100, '_timestamp': 1636505534} {'key': 20, 'value': 200, '_timestamp': 1636505534} {'key': 30, 'value': 300, '_timestamp': 1636505534} """ - assert result == expected_result + assert result == expected_result +# TODO(antaljanosbenjamin) def test_kafka_produce_consume_avro(kafka_cluster): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) @@ -2633,194 +2739,207 @@ def test_kafka_produce_consume_avro(kafka_cluster): kafka_delete_topic(admin_client, topic_name) -def test_kafka_flush_by_time(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_flush_by_time(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) - topic_name = "flush_by_time" - kafka_create_topic(admin_client, topic_name) + topic_name = "flush_by_time" + get_topic_postfix(create_query_generator) - instance.query( - """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'flush_by_time', - kafka_group_name = 'flush_by_time', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 100, - kafka_row_delimiter = '\\n'; - - SELECT * FROM test.kafka; - - CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3)) - ENGINE = MergeTree() - ORDER BY key; - """ - ) - - cancel = threading.Event() - - def produce(): - while not cancel.is_set(): - messages = [] - messages.append(json.dumps({"key": 0, "value": 0})) - kafka_produce(kafka_cluster, "flush_by_time", messages) - time.sleep(0.8) - - kafka_thread = threading.Thread(target=produce) - kafka_thread.start() - - instance.query( - """ - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ - ) - - time.sleep(18) - - result = instance.query("SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view") - - cancel.set() - kafka_thread.join() - - # kafka_cluster.open_bash_shell('instance') - - instance.query( - """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) - - assert TSV(result) == TSV("1 1") - kafka_delete_topic(admin_client, topic_name) - - -def test_kafka_flush_by_block_size(kafka_cluster): - cancel = threading.Event() - - def produce(): - while not cancel.is_set(): - messages = [] - messages.append(json.dumps({"key": 0, "value": 0})) - kafka_produce(kafka_cluster, "flush_by_block_size", messages) - - kafka_thread = threading.Thread(target=produce) - kafka_thread.start() - - instance.query( - """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'flush_by_block_size', - kafka_group_name = 'flush_by_block_size', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 100, - kafka_poll_max_batch_size = 1, - kafka_flush_interval_ms = 120000, /* should not flush by time during test */ - kafka_row_delimiter = '\\n'; - - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ - ) - - # Wait for Kafka engine to consume this data - while 1 != int( - instance.query( - "SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'" + with kafka_topic(admin_client, topic_name): + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + settings={ + "kafka_max_block_size":100, + } ) - ): - time.sleep(0.5) + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; - cancel.set() - kafka_thread.join() + {create_query}; - # more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0). - result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'") - # logging.debug(result) - - instance.query( + CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3)) + ENGINE = MergeTree() + ORDER BY key; """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ + ) + + cancel = threading.Event() + + def produce(): + while not cancel.is_set(): + messages = [json.dumps({"key": 0, "value": 0})] + kafka_produce(kafka_cluster, topic_name, messages) + time.sleep(0.8) + + kafka_thread = threading.Thread(target=produce) + kafka_thread.start() + + instance.query( + """ + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; + """ + ) + + time.sleep(18) + + result = instance.query("SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view") + + cancel.set() + kafka_thread.join() + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; + """ + ) + + assert TSV(result) == TSV("1 1") + + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_flush_by_block_size(kafka_cluster, create_query_generator): + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) + topic_name = "flush_by_block_size" + get_topic_postfix(create_query_generator) - # 100 = first poll should return 100 messages (and rows) - # not waiting for stream_flush_interval_ms - assert ( - int(result) == 100 - ), "Messages from kafka should be flushed when block of size kafka_max_block_size is formed!" + cancel = threading.Event() + def produce(): + while not cancel.is_set(): + messages = [] + messages.append(json.dumps({"key": 0, "value": 0})) + kafka_produce(kafka_cluster, topic_name, messages) + + kafka_thread = threading.Thread(target=produce) + + with kafka_topic(admin_client, topic_name): + kafka_thread.start() + + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + settings={ + "kafka_max_block_size": 100, + "kafka_poll_max_batch_size": 1, + "kafka_flush_interval_ms": 120000, + } + ) + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + + {create_query}; + + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; + """ + ) + + # Wait for Kafka engine to consume this data + while 1 != int( + instance.query( + "SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'" + ) + ): + time.sleep(0.5) + + cancel.set() + kafka_thread.join() + + # more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0). + result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'") + # logging.debug(result) + + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; + """ + ) + + # 100 = first poll should return 100 messages (and rows) + # not waiting for stream_flush_interval_ms + assert ( + int(result) == 100 + ), "Messages from kafka should be flushed when block of size kafka_max_block_size is formed!" -def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) - topic_name = "topic_with_multiple_partitions2" - kafka_create_topic(admin_client, topic_name, num_partitions=10) - - instance.query( + topic_name = "topic_with_multiple_partitions2" + get_topic_postfix(create_query_generator) + with kafka_topic(admin_client, topic_name): + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + settings={ + "kafka_max_block_size": 211, + "kafka_flush_interval_ms": 500, + } + ) + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + {create_query}; + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'topic_with_multiple_partitions2', - kafka_group_name = 'topic_with_multiple_partitions2', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 211, - kafka_flush_interval_ms = 500; - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ - ) + ) - messages = [] - count = 0 - for dummy_msg in range(1000): - rows = [] - for dummy_row in range(random.randrange(3, 10)): - count = count + 1 - rows.append(json.dumps({"key": count, "value": count})) - messages.append("\n".join(rows)) - kafka_produce(kafka_cluster, "topic_with_multiple_partitions2", messages) + messages = [] + count = 0 + for dummy_msg in range(1000): + rows = [] + for dummy_row in range(random.randrange(3, 10)): + count = count + 1 + rows.append(json.dumps({"key": count, "value": count})) + messages.append("\n".join(rows)) + kafka_produce(kafka_cluster, topic_name, messages) - instance.wait_for_log_line("kafka.*Stalled", repetitions=5) + instance.wait_for_log_line("kafka.*Stalled", repetitions=5) - result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") - logging.debug(result) - assert TSV(result) == TSV("{0}\t{0}\t{0}".format(count)) + result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") + logging.debug(result) + assert TSV(result) == TSV("{0}\t{0}\t{0}".format(count)) - instance.query( + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) - kafka_delete_topic(admin_client, topic_name) + ) -def test_kafka_rebalance(kafka_cluster): +@pytest.mark.parametrize('create_query_generator, log_line', [ + (generate_old_create_table_query, "{}.*Polled offset [0-9]+"), + (generate_new_create_table_query, "{}.*Saved offset"), +]) +def test_kafka_rebalance(kafka_cluster, create_query_generator, log_line): NUMBER_OF_CONSURRENT_CONSUMERS = 11 instance.query( @@ -2841,151 +2960,149 @@ def test_kafka_rebalance(kafka_cluster): """ ) - # kafka_cluster.open_bash_shell('instance') - - # time.sleep(2) - admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) - topic_name = "topic_with_multiple_partitions" - kafka_create_topic(admin_client, topic_name, num_partitions=11) + topic_name = "topic_with_multiple_partitions" + get_topic_postfix(create_query_generator) + table_name_prefix = "kafka_consumer" + keeper_path = f"/clickhouse/{{database}}/{table_name_prefix}" + with kafka_topic(admin_client, topic_name, num_partitions=11): - cancel = threading.Event() + cancel = threading.Event() - msg_index = [0] + msg_index = [0] - def produce(): - while not cancel.is_set(): - messages = [] - for _ in range(59): - messages.append( - json.dumps({"key": msg_index[0], "value": msg_index[0]}) - ) - msg_index[0] += 1 - kafka_produce(kafka_cluster, "topic_with_multiple_partitions", messages) + def produce(): + while not cancel.is_set(): + messages = [] + for _ in range(59): + messages.append( + json.dumps({"key": msg_index[0], "value": msg_index[0]}) + ) + msg_index[0] += 1 + kafka_produce(kafka_cluster, topic_name, messages) - kafka_thread = threading.Thread(target=produce) - kafka_thread.start() + kafka_thread = threading.Thread(target=produce) + kafka_thread.start() - for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS): - table_name = "kafka_consumer{}".format(consumer_index) - logging.debug(("Setting up {}".format(table_name))) + for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS): + table_name = f"{table_name_prefix}{consumer_index}" + replica_name = f"r{consumer_index}" + logging.debug(f"Setting up {consumer_index}") - instance.query( - """ - DROP TABLE IF EXISTS test.{0}; - DROP TABLE IF EXISTS test.{0}_mv; - CREATE TABLE test.{0} (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'topic_with_multiple_partitions', - kafka_group_name = 'rebalance_test_group', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 33, - kafka_flush_interval_ms = 500; - CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS - SELECT - key, - value, - _topic, - _key, - _offset, - _partition, - _timestamp, - '{0}' as _consumed_by - FROM test.{0}; - """.format( - table_name + create_query = create_query_generator( + table_name, + "key UInt64, value UInt64", + topic_list=topic_name, + keeper_path=keeper_path, + replica_name=replica_name, + settings={ + "kafka_max_block_size": 33, + "kafka_flush_interval_ms": 500, + } ) - ) + instance.query( + f""" + DROP TABLE IF EXISTS test.{table_name}; + DROP TABLE IF EXISTS test.{table_name}_mv; + {create_query}; + CREATE MATERIALIZED VIEW test.{table_name}_mv TO test.destination AS + SELECT + key, + value, + _topic, + _key, + _offset, + _partition, + _timestamp, + '{table_name}' as _consumed_by + FROM test.{table_name}; + """ + ) + # kafka_cluster.open_bash_shell('instance') + # Waiting for test.kafka_consumerX to start consume ... + instance.wait_for_log_line(log_line.format(table_name)) + + cancel.set() + + # I leave last one working by intent (to finish consuming after all rebalances) + for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1): + logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index))) + instance.query( + "DROP TABLE IF EXISTS test.kafka_consumer{} SYNC".format(consumer_index) + ) + + # logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')) # kafka_cluster.open_bash_shell('instance') - # Waiting for test.kafka_consumerX to start consume ... - instance.wait_for_log_line( - "kafka_consumer{}.*Polled offset [0-9]+".format(consumer_index) - ) - cancel.set() + while 1: + messages_consumed = int( + instance.query("SELECT uniqExact(key) FROM test.destination") + ) + if messages_consumed >= msg_index[0]: + break + time.sleep(1) + logging.debug( + ( + "Waiting for finishing consuming (have {}, should be {})".format( + messages_consumed, msg_index[0] + ) + ) + ) - # I leave last one working by intent (to finish consuming after all rebalances) - for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1): - logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index))) - instance.query( - "DROP TABLE IF EXISTS test.kafka_consumer{} SYNC".format(consumer_index) - ) - - # logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')) - # kafka_cluster.open_bash_shell('instance') - - while 1: - messages_consumed = int( - instance.query("SELECT uniqExact(key) FROM test.destination") - ) - if messages_consumed >= msg_index[0]: - break - time.sleep(1) logging.debug( ( - "Waiting for finishing consuming (have {}, should be {})".format( - messages_consumed, msg_index[0] + instance.query( + "SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination" ) ) ) - logging.debug( - ( - instance.query( - "SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination" - ) + # Some queries to debug... + # SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1) + # select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0; + # SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key; + # select _partition from test.destination group by _partition having count() <> max(_offset) + 1; + # select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset; + # SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset; + + # CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', + # kafka_topic_list = 'topic_with_multiple_partitions', + # kafka_group_name = 'rebalance_test_group_reference', + # kafka_format = 'JSONEachRow', + # kafka_max_block_size = 100000; + # + # CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS + # SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by + # FROM test.reference; + # + # select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = ''; + + result = int( + instance.query("SELECT count() == uniqExact(key) FROM test.destination") ) - ) - # Some queries to debug... - # SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1) - # select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0; - # SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key; - # select _partition from test.destination group by _partition having count() <> max(_offset) + 1; - # select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset; - # SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset; + for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS): + logging.debug(("kafka_consumer{}".format(consumer_index))) + table_name = "kafka_consumer{}".format(consumer_index) + instance.query( + """ + DROP TABLE IF EXISTS test.{0}; + DROP TABLE IF EXISTS test.{0}_mv; + """.format( + table_name + ) + ) - # CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', - # kafka_topic_list = 'topic_with_multiple_partitions', - # kafka_group_name = 'rebalance_test_group_reference', - # kafka_format = 'JSONEachRow', - # kafka_max_block_size = 100000; - # - # CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS - # SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by - # FROM test.reference; - # - # select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = ''; - - result = int( - instance.query("SELECT count() == uniqExact(key) FROM test.destination") - ) - - for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS): - logging.debug(("kafka_consumer{}".format(consumer_index))) - table_name = "kafka_consumer{}".format(consumer_index) instance.query( """ - DROP TABLE IF EXISTS test.{0}; - DROP TABLE IF EXISTS test.{0}_mv; - """.format( - table_name - ) + DROP TABLE IF EXISTS test.destination; + """ ) - instance.query( - """ - DROP TABLE IF EXISTS test.destination; - """ - ) + kafka_thread.join() - kafka_thread.join() - - assert result == 1, "Messages from kafka get duplicated!" - kafka_delete_topic(admin_client, topic_name) + assert result == 1, "Messages from kafka get duplicated!" def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster): From a562118d2a5b66955f44d393949eccb0e8c3b8b7 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 7 Jun 2024 01:45:56 +0000 Subject: [PATCH 0089/2361] major refactoring of chunked write buffer - more buffering, some bugs fixed --- src/Client/Connection.cpp | 23 +++-- src/IO/ReadBufferFromPocoSocketChunked.cpp | 2 - src/IO/WriteBuffer.h | 8 +- src/IO/WriteBufferFromPocoSocketChunked.h | 114 +++++++++++++++++---- src/Server/TCPHandler.cpp | 38 +++---- 5 files changed, 134 insertions(+), 51 deletions(-) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index c221124932a..9f727b974ee 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -625,7 +625,7 @@ bool Connection::ping(const ConnectionTimeouts & timeouts) UInt64 pong = 0; writeVarUInt(Protocol::Client::Ping, *out); - out->finishPacket(); + out->finishChunk(); out->next(); if (in->eof()) @@ -675,7 +675,7 @@ TablesStatusResponse Connection::getTablesStatus(const ConnectionTimeouts & time writeVarUInt(Protocol::Client::TablesStatusRequest, *out); request.write(*out, server_revision); - out->finishPacket(); + out->finishChunk(); out->next(); UInt64 response_type = 0; @@ -827,7 +827,7 @@ void Connection::sendQuery( block_profile_events_in.reset(); block_out.reset(); - out->finishPacket(); + out->finishChunk(); /// Send empty block which means end of data. if (!with_pending_data) @@ -845,7 +845,7 @@ void Connection::sendCancel() return; writeVarUInt(Protocol::Client::Cancel, *out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -871,9 +871,10 @@ void Connection::sendData(const Block & block, const String & name, bool scalar) size_t prev_bytes = out->count(); block_out->write(block); - maybe_compressed_out->next(); + if (maybe_compressed_out != out) + maybe_compressed_out->next(); if (!block) - out->finishPacket(); + out->finishChunk(); out->next(); if (throttler) @@ -884,7 +885,7 @@ void Connection::sendIgnoredPartUUIDs(const std::vector & uuids) { writeVarUInt(Protocol::Client::IgnoredPartUUIDs, *out); writeVectorBinary(uuids, *out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -894,7 +895,7 @@ void Connection::sendReadTaskResponse(const String & response) writeVarUInt(Protocol::Client::ReadTaskResponse, *out); writeVarUInt(DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION, *out); writeStringBinary(response, *out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -903,7 +904,7 @@ void Connection::sendMergeTreeReadTaskResponse(const ParallelReadResponse & resp { writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out); response.serialize(*out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -922,7 +923,7 @@ void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String else copyData(input, *out, size); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -951,7 +952,7 @@ void Connection::sendScalarsData(Scalars & data) sendData(elem.second, elem.first, true /* scalar */); } - out->finishPacket(); + out->finishChunk(); out_bytes = out->count() - out_bytes; maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes; diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 6ed6b63289c..798be547e99 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -153,8 +153,6 @@ bool ReadBufferFromPocoSocketChunked::nextImpl() } else { - chassert(c_pos == data_end); - if (!ReadBufferFromPocoSocketBase::nextImpl()) return false; data_end = buffer().end(); diff --git a/src/IO/WriteBuffer.h b/src/IO/WriteBuffer.h index 1ceb938e454..bb3200d2e54 100644 --- a/src/IO/WriteBuffer.h +++ b/src/IO/WriteBuffer.h @@ -63,7 +63,8 @@ public: } bytes += bytes_in_buffer; - pos = working_buffer.begin(); + pos = working_buffer.begin() + nextimpl_working_buffer_offset; + nextimpl_working_buffer_offset = 0; } /// Calling finalize() in the destructor of derived classes is a bad practice. @@ -152,6 +153,11 @@ protected: bool finalized = false; + /// The number of bytes to preserve from the initial position of `working_buffer` + /// buffer. Apparently this is an additional out-parameter for nextImpl(), + /// not a real field. + size_t nextimpl_working_buffer_offset = 0; + private: /** Write the data in the buffer (from the beginning of the buffer to the current position). * Throw an exception if something is wrong. diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 7c6ab53dc91..3fe39487923 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -19,40 +19,114 @@ public: explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, buf_size), log(getLogger("Protocol")) {} explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, write_event_, buf_size), log(getLogger("Protocol")) {} - void enableChunked() { chunked = true; } - void finishPacket() + void enableChunked() + { + chunked = true; + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + } + + void finishChunk() { if (!chunked) return; - next(); - - if (finished) + if (pos <= reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: attempt to send empty chunk"); - LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); - finished = true; + /// Fill up current chunk size + *chunk_size_ptr = toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr))); - UInt32 s = 0; - socketSendBytes(reinterpret_cast(&s), sizeof(s)); + if (!chunk_started) + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", + ourAddress().toString(), peerAddress().toString(), + static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), + *chunk_size_ptr); + else + chunk_started = false; + + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); + + if (available() < sizeof(*chunk_size_ptr)) + { + finishing = available(); + pos += available(); + chunk_size_ptr = reinterpret_cast(pos); + return; + } + + /// Buffer end-of-chunk + *reinterpret_cast(pos) = 0; + pos += sizeof(*chunk_size_ptr); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); } + protected: void nextImpl() override { - if (chunked) - { - UInt32 s = static_cast(offset()); - if (finished) - LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", ourAddress().toString(), peerAddress().toString(), static_cast(*buffer().begin()), s); - else - LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), s); + if (!chunked) + return WriteBufferFromPocoSocket::nextImpl(); - finished = false; - s = toLittleEndian(s); + if (finishing < sizeof(*chunk_size_ptr)) + { + pos -= finishing; + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Send end-of-chunk directly + UInt32 s = 0; socketSendBytes(reinterpret_cast(&s), sizeof(s)); + + finishing = sizeof(*chunk_size_ptr); + + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + return; } + if (offset() == sizeof(*chunk_size_ptr)) // prevent sending empty chunk + { + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + return; + } + + if (working_buffer.end() - reinterpret_cast(chunk_size_ptr) <= static_cast(sizeof(*chunk_size_ptr))) + { + pos = reinterpret_cast(chunk_size_ptr); + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + return; + } + + if (pos - reinterpret_cast(chunk_size_ptr) == sizeof(*chunk_size_ptr)) + pos -= sizeof(*chunk_size_ptr); + else /// Fill up current chunk size + { + *chunk_size_ptr = toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr))); + if (!chunk_started) + { + chunk_started = true; + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", + ourAddress().toString(), peerAddress().toString(), + static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), + *chunk_size_ptr); + } + else + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); + } + /// Send current chunk WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); } Poco::Net::SocketAddress peerAddress() @@ -67,7 +141,9 @@ protected: private: LoggerPtr log; bool chunked = false; - bool finished = true; + bool chunk_started = false; // chunk started flag + UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer + size_t finishing = sizeof(*chunk_size_ptr); // indicates not enough buffer for end-of-chunk marker }; } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 1a64ec1dd10..89ad8e856d5 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1188,7 +1188,7 @@ void TCPHandler::processTablesStatusRequest() response.write(*out, client_tcp_protocol_version); - out->finishPacket(); + out->finishChunk(); } void TCPHandler::receiveUnexpectedTablesStatusRequest() @@ -1210,7 +1210,7 @@ void TCPHandler::sendPartUUIDs() writeVarUInt(Protocol::Server::PartUUIDs, *out); writeVectorBinary(uuids, *out); - out->finishPacket(); + out->finishChunk(); out->next(); } } @@ -1220,7 +1220,7 @@ void TCPHandler::sendReadTaskRequestAssumeLocked() { writeVarUInt(Protocol::Server::ReadTaskRequest, *out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -1230,7 +1230,7 @@ void TCPHandler::sendMergeTreeAllRangesAnnouncementAssumeLocked(InitialAllRanges writeVarUInt(Protocol::Server::MergeTreeAllRangesAnnouncement, *out); announcement.serialize(*out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -1240,7 +1240,7 @@ void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(ParallelReadRequest re writeVarUInt(Protocol::Server::MergeTreeReadTaskRequest, *out); request.serialize(*out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -1250,7 +1250,7 @@ void TCPHandler::sendProfileInfo(const ProfileInfo & info) writeVarUInt(Protocol::Server::ProfileInfo, *out); info.write(*out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -1267,7 +1267,7 @@ void TCPHandler::sendTotals(const Block & totals) state.block_out->write(totals); state.maybe_compressed_out->next(); - out->finishPacket(); + out->finishChunk(); out->next(); } } @@ -1285,7 +1285,7 @@ void TCPHandler::sendExtremes(const Block & extremes) state.block_out->write(extremes); state.maybe_compressed_out->next(); - out->finishPacket(); + out->finishChunk(); out->next(); } } @@ -1304,7 +1304,7 @@ void TCPHandler::sendProfileEvents() state.profile_events_block_out->write(block); - out->finishPacket(); + out->finishChunk(); out->next(); auto elapsed_milliseconds = stopwatch.elapsedMilliseconds(); @@ -1343,7 +1343,7 @@ void TCPHandler::sendTimezone() writeVarUInt(Protocol::Server::TimezoneUpdate, *out); writeStringBinary(tz, *out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -1700,7 +1700,7 @@ bool TCPHandler::receivePacket() case Protocol::Client::Ping: writeVarUInt(Protocol::Server::Pong, *out); - out->finishPacket(); + out->finishChunk(); out->next(); return false; @@ -2290,9 +2290,11 @@ void TCPHandler::sendData(const Block & block) } state.block_out->write(block); - state.maybe_compressed_out->next(); - out->finishPacket(); + if (state.maybe_compressed_out != out) + state.maybe_compressed_out->next(); + + out->finishChunk(); out->next(); } catch (...) @@ -2329,7 +2331,7 @@ void TCPHandler::sendLogData(const Block & block) state.logs_block_out->write(block); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -2341,7 +2343,7 @@ void TCPHandler::sendTableColumns(const ColumnsDescription & columns) writeStringBinary("", *out); writeStringBinary(columns.toString(), *out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -2352,7 +2354,7 @@ void TCPHandler::sendException(const Exception & e, bool with_stack_trace) writeVarUInt(Protocol::Server::Exception, *out); writeException(e, *out, with_stack_trace); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -2364,7 +2366,7 @@ void TCPHandler::sendEndOfStream() writeVarUInt(Protocol::Server::EndOfStream, *out); - out->finishPacket(); + out->finishChunk(); out->next(); } @@ -2384,7 +2386,7 @@ void TCPHandler::sendProgress() state.prev_elapsed_ns = current_elapsed_ns; increment.write(*out, client_tcp_protocol_version); - out->finishPacket(); + out->finishChunk(); out->next(); } From 390a2a2488bdd20a87400ec3f5851dfde0f1bac0 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 7 Jun 2024 02:06:26 +0000 Subject: [PATCH 0090/2361] fix style --- src/IO/WriteBufferFromPocoSocketChunked.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 3fe39487923..9a9d53a1f30 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -142,7 +142,7 @@ private: LoggerPtr log; bool chunked = false; bool chunk_started = false; // chunk started flag - UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer + UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer size_t finishing = sizeof(*chunk_size_ptr); // indicates not enough buffer for end-of-chunk marker }; From 11d9f7d51b2cd658c495adb11c3b32f6fc5a8cc6 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 7 Jun 2024 12:07:35 +0000 Subject: [PATCH 0091/2361] allow to set end-of-chunk marker on sent chunk, ignore duplicate finish chunk --- src/IO/WriteBufferFromPocoSocketChunked.h | 51 +++++++++++++++++++++-- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 9a9d53a1f30..40a89416f84 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -1,5 +1,6 @@ #pragma once +#include "base/defines.h" #include #include #include @@ -33,7 +34,26 @@ public: return; if (pos <= reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: attempt to send empty chunk"); + { + if (chunk_size_ptr == last_finish_chunk) // prevent duplicate finish chunk + return; + + /// If current chunk is empty it means we are finishing a chunk previously sent by next(), + /// we want to convert current chunk header into end-of-chunk marker and initialize next chunk. + /// We don't need to wary about if it's the end of the buffer because next() always sends the whole buffer + /// so it should be a beginning of the buffer. + + chassert(reinterpret_cast(chunk_size_ptr) == working_buffer.begin()); + + *chunk_size_ptr = 0; + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + + last_finish_chunk = chunk_size_ptr; + + return; + } /// Fill up current chunk size *chunk_size_ptr = toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr))); @@ -62,6 +82,8 @@ public: /// Initialize next chunk chunk_size_ptr = reinterpret_cast(pos); pos += std::min(available(), sizeof(*chunk_size_ptr)); + + last_finish_chunk = chunk_size_ptr; } protected: @@ -70,6 +92,7 @@ protected: if (!chunked) return WriteBufferFromPocoSocket::nextImpl(); + /// next() after finishChunk ar the end of the buffer if (finishing < sizeof(*chunk_size_ptr)) { pos -= finishing; @@ -85,15 +108,34 @@ protected: chunk_size_ptr = reinterpret_cast(working_buffer.begin()); nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + last_finish_chunk = chunk_size_ptr; + return; } - if (offset() == sizeof(*chunk_size_ptr)) // prevent sending empty chunk + /// Send end-of-chunk buffered by finishChunk + if (offset() == 2 * sizeof(*chunk_size_ptr)) + { + pos -= sizeof(*chunk_size_ptr); + /// Send end-of-chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = chunk_size_ptr; + + return; + } + + /// Prevent sending empty chunk + if (offset() == sizeof(*chunk_size_ptr)) { nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); return; } + /// Finish chunk at the end of the buffer if (working_buffer.end() - reinterpret_cast(chunk_size_ptr) <= static_cast(sizeof(*chunk_size_ptr))) { pos = reinterpret_cast(chunk_size_ptr); @@ -106,9 +148,9 @@ protected: return; } - if (pos - reinterpret_cast(chunk_size_ptr) == sizeof(*chunk_size_ptr)) + if (pos - reinterpret_cast(chunk_size_ptr) == sizeof(*chunk_size_ptr)) // next() after finishChunk pos -= sizeof(*chunk_size_ptr); - else /// Fill up current chunk size + else // fill up current chunk size { *chunk_size_ptr = toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr))); if (!chunk_started) @@ -141,6 +183,7 @@ protected: private: LoggerPtr log; bool chunked = false; + UInt32 * last_finish_chunk = nullptr; // pointer to the last chunk header created by finishChunk bool chunk_started = false; // chunk started flag UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer size_t finishing = sizeof(*chunk_size_ptr); // indicates not enough buffer for end-of-chunk marker From d2dd640beb3ff917352135477e349fd1d379f38e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 7 Jun 2024 12:25:46 +0000 Subject: [PATCH 0092/2361] fix style --- src/IO/WriteBufferFromPocoSocketChunked.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 40a89416f84..d1ba492738e 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -9,11 +9,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - class WriteBufferFromPocoSocketChunked: public WriteBufferFromPocoSocket { public: @@ -37,7 +32,7 @@ public: { if (chunk_size_ptr == last_finish_chunk) // prevent duplicate finish chunk return; - + /// If current chunk is empty it means we are finishing a chunk previously sent by next(), /// we want to convert current chunk header into end-of-chunk marker and initialize next chunk. /// We don't need to wary about if it's the end of the buffer because next() always sends the whole buffer From 740501b36e58c08d3a6a52348c9b0411d0f5dd90 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 7 Jun 2024 18:23:37 +0000 Subject: [PATCH 0093/2361] some potential bug fixes --- src/IO/WriteBufferFromPocoSocketChunked.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index d1ba492738e..689389ba2ea 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -109,7 +109,7 @@ protected: } /// Send end-of-chunk buffered by finishChunk - if (offset() == 2 * sizeof(*chunk_size_ptr)) + if (offset() == 2 * sizeof(*chunk_size_ptr) && last_finish_chunk == chunk_size_ptr) { pos -= sizeof(*chunk_size_ptr); /// Send end-of-chunk @@ -140,6 +140,8 @@ protected: chunk_size_ptr = reinterpret_cast(working_buffer.begin()); nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + last_finish_chunk = nullptr; + return; } @@ -164,6 +166,8 @@ protected: /// Initialize next chunk chunk_size_ptr = reinterpret_cast(working_buffer.begin()); nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = nullptr; } Poco::Net::SocketAddress peerAddress() From e304afe106a9f10c075588ee4b6e88aadaa30094 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 7 Jun 2024 20:22:31 +0000 Subject: [PATCH 0094/2361] Fix build --- src/Storages/Kafka/StorageKafkaCommon.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 5d0f5b3000b..adfe1086858 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -2,6 +2,7 @@ #include +#include #include #include #include @@ -12,12 +13,12 @@ #include #include #include +#include #include #include -#include #include +#include #include -#include #include #include From 6802b7f82f593b5be3961c38c2dca581c882bd75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 7 Jun 2024 20:23:39 +0000 Subject: [PATCH 0095/2361] Improve log messages --- src/Storages/Kafka/KafkaConsumer2.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 7ae816f1a0f..9ed698301e5 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -419,7 +419,7 @@ void KafkaConsumer2::commit(const TopicPartition & topic_partition) if (e.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) committed = true; else - LOG_ERROR(log, "Exception during commit attempt: {}", e.what()); + LOG_WARNING(log, "Exception during commit attempt: {}", e.what()); } } @@ -429,8 +429,7 @@ void KafkaConsumer2::commit(const TopicPartition & topic_partition) ProfileEvents::increment(ProfileEvents::KafkaCommitFailures); LOG_INFO( log, - "All commit attempts failed. Last block was already written to target table(s), " - "but was not committed to Kafka."); + "All commit attempts failed"); } else { From e33273d577da4cce1e2f460c27de3d1d2c8c4f11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 7 Jun 2024 20:28:10 +0000 Subject: [PATCH 0096/2361] Handle newly appearing partitions --- src/Storages/Kafka/StorageKafka2.cpp | 82 +++++++++++++--------------- src/Storages/Kafka/StorageKafka2.h | 5 +- 2 files changed, 42 insertions(+), 45 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index c5033be519f..fa5389d606a 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -163,23 +163,23 @@ StorageKafka2::StorageKafka2( tryLogCurrentException(log); } } - for (auto try_count = 0; try_count < 5; ++try_count) - { - bool all_had_assignment = true; - for (auto & consumer_info : consumers) - { - if (nullptr == consumer_info.consumer->getKafkaAssignment()) - { - all_had_assignment = false; - consumer_info.consumer->pollEvents(); - } - } + // for (auto try_count = 0; try_count < 5; ++try_count) + // { + // bool all_had_assignment = true; + // for (auto & consumer_info : consumers) + // { + // if (nullptr == consumer_info.consumer->getKafkaAssignment()) + // { + // all_had_assignment = false; + // consumer_info.consumer->pollEvents(); + // } + // } - if (all_had_assignment) - break; - } + // if (all_had_assignment) + // break; + // } - const auto first_replica = createTableIfNotExists(consumers.front().consumer); + const auto first_replica = createTableIfNotExists(); if (!first_replica) createReplica(); @@ -551,7 +551,7 @@ const std::string lock_file_name{"lock"}; const std::string commit_file_name{"committed"}; const std::string intent_file_name{"intention"}; -std::optional getNumber(zkutil::ZooKeeper & keeper, const std::string & path) +std::optional getNumber(zkutil::ZooKeeper & keeper, const fs::path & path) { std::string result; if (!keeper.tryGet(path, result)) @@ -561,7 +561,7 @@ std::optional getNumber(zkutil::ZooKeeper & keeper, const std::string & } } -bool StorageKafka2::createTableIfNotExists(const KafkaConsumer2Ptr & consumer) +bool StorageKafka2::createTableIfNotExists() { const auto & keeper_path = fs::path(kafka_settings->kafka_keeper_path.value); @@ -613,22 +613,15 @@ bool StorageKafka2::createTableIfNotExists(const KafkaConsumer2Ptr & consumer) const auto topics_path = keeper_path / "topics"; ops.emplace_back(zkutil::makeCreateRequest(topics_path, "", zkutil::CreateMode::Persistent)); - - const auto topic_partition_counts = consumer->getPartitionCounts(); - for (const auto & topic_partition_count : topic_partition_counts) + for (const auto & topic : topics) { - LOG_DEBUG( - log, - "Creating path in keeper for topic {} with {} partitions", - topic_partition_count.topic, - topic_partition_count.partition_count); - ops.emplace_back(zkutil::makeCreateRequest(topics_path / topic_partition_count.topic, "", zkutil::CreateMode::Persistent)); + LOG_DEBUG(log, "Creating path in keeper for topic {}", topic); - const auto partitions_path = topics_path / topic_partition_count.topic / "partitions"; + const auto topic_path = topics_path / topic; + ops.emplace_back(zkutil::makeCreateRequest(topic_path, "", zkutil::CreateMode::Persistent)); + + const auto partitions_path = topic_path / "partitions"; ops.emplace_back(zkutil::makeCreateRequest(partitions_path, "", zkutil::CreateMode::Persistent)); - // TODO(antaljanosbenjamin): handle changing number of partitions - for (auto partition_id{0U}; partition_id < topic_partition_count.partition_count; ++partition_id) - ops.emplace_back(zkutil::makeCreateRequest(partitions_path / toString(partition_id), "", zkutil::CreateMode::Persistent)); } // Create the first replica @@ -799,18 +792,21 @@ void StorageKafka2::dropReplica() std::optional StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const TopicPartitions & topic_partitions) { - std::vector topic_partition_paths; + std::vector topic_partition_paths; topic_partition_paths.reserve(topic_partitions.size()); for (const auto & topic_partition : topic_partitions) topic_partition_paths.emplace_back(getTopicPartitionPath(topic_partition)); Coordination::Requests ops; + static constexpr auto ignore_if_exists = true; + for (const auto & topic_partition_path : topic_partition_paths) { - LOG_TRACE(log, "Creating locking ops for: {}", topic_partition_path + lock_file_name); - ops.push_back(zkutil::makeCreateRequest( - topic_partition_path + lock_file_name, kafka_settings->kafka_replica_name.value, zkutil::CreateMode::Ephemeral)); + const auto lock_file_path = String(topic_partition_path / lock_file_name); + LOG_TRACE(log, "Creating locking ops for: {}", lock_file_path); + ops.push_back(zkutil::makeCreateRequest(topic_partition_path, "", zkutil::CreateMode::Persistent, ignore_if_exists)); + ops.push_back(zkutil::makeCreateRequest(lock_file_path, kafka_settings->kafka_replica_name.value, zkutil::CreateMode::Ephemeral)); } Coordination::Responses responses; @@ -831,10 +827,10 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi for (; tp_it != topic_partitions.end(); ++tp_it, ++path_it) { using zkutil::EphemeralNodeHolder; - LockedTopicPartitionInfo lock_info{.lock = EphemeralNodeHolder::existing(*path_it + lock_file_name, keeper_to_use)}; + LockedTopicPartitionInfo lock_info{.lock = EphemeralNodeHolder::existing(*path_it / lock_file_name, keeper_to_use)}; - lock_info.committed_offset = getNumber(keeper_to_use, *path_it + commit_file_name); - lock_info.intent_size = getNumber(keeper_to_use, *path_it + intent_file_name); + lock_info.committed_offset = getNumber(keeper_to_use, *path_it / commit_file_name); + lock_info.intent_size = getNumber(keeper_to_use, *path_it / intent_file_name); LOG_TRACE( log, @@ -855,9 +851,9 @@ void StorageKafka2::saveCommittedOffset( zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition) { const auto partition_prefix = getTopicPartitionPath(topic_partition); - keeper_to_use.createOrUpdate(partition_prefix + commit_file_name, toString(topic_partition.offset), zkutil::CreateMode::Persistent); + keeper_to_use.createOrUpdate(partition_prefix / commit_file_name, toString(topic_partition.offset), zkutil::CreateMode::Persistent); // This is best effort, if it fails we will try to remove in the next round - keeper_to_use.tryRemove(partition_prefix + intent_file_name, -1); + keeper_to_use.tryRemove(partition_prefix / intent_file_name, -1); LOG_TEST(log, "Saved offset {} for topic-partition [{}:{}]", topic_partition.offset, topic_partition.topic, topic_partition.partition_id); } @@ -871,7 +867,7 @@ void StorageKafka2::saveIntent(zkutil::ZooKeeper & keeper_to_use, const TopicPar topic_partition.partition_id, topic_partition.offset); keeper_to_use.createOrUpdate( - getTopicPartitionPath(topic_partition) + intent_file_name, toString(intent), zkutil::CreateMode::Persistent); + getTopicPartitionPath(topic_partition) / intent_file_name, toString(intent), zkutil::CreateMode::Persistent); } @@ -1325,10 +1321,10 @@ zkutil::ZooKeeperPtr StorageKafka2::getZooKeeper() } -std::string StorageKafka2::getTopicPartitionPath(const TopicPartition & topic_partition) +fs::path StorageKafka2::getTopicPartitionPath(const TopicPartition & topic_partition) { - return kafka_settings->kafka_keeper_path.value + "/topics/" + topic_partition.topic + "/partitions/" - + std::to_string(topic_partition.partition_id) + '/'; + return fs::path(kafka_settings->kafka_keeper_path.value) / "topics" / topic_partition.topic / "partitions" + / std::to_string(topic_partition.partition_id); } } diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index e8cfcac2689..86d09c584f2 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -191,7 +192,7 @@ private: bool checkDependencies(const StorageID & table_id); // Returns true if this is the first replica - bool createTableIfNotExists(const KafkaConsumer2Ptr & consumer); + bool createTableIfNotExists(); // Returns true if all of the nodes were cleaned up bool removeTableNodesFromZooKeeper(const zkutil::EphemeralNodeHolder::Ptr & drop_lock); // Creates only the replica in ZooKeeper. Shouldn't be called on the first replica as it is created in createTableIfNotExists @@ -211,7 +212,7 @@ private: zkutil::ZooKeeperPtr getZooKeeper(); - std::string getTopicPartitionPath(const TopicPartition & topic_partition); + std::filesystem::path getTopicPartitionPath(const TopicPartition & topic_partition); static VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode); }; From 1eec201777942652aa9fed0b929250a9c70e8be6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 7 Jun 2024 22:39:17 +0000 Subject: [PATCH 0097/2361] Make most of the tests work --- tests/integration/test_storage_kafka/test.py | 2502 +++++++++--------- 1 file changed, 1228 insertions(+), 1274 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 96438b5efa1..5b7d7f65b9f 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -164,6 +164,18 @@ def kafka_topic( # Code to release resource, e.g.: kafka_delete_topic(admin_client, topic_name, max_retries) + +@contextmanager +def existing_kafka_topic(admin_client, topic_name, max_retries=50): + try: + yield None + finally: + kafka_delete_topic(admin_client, topic_name, max_retries) + + +def get_admin_client(kafka_cluster): + return KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15): logging.debug( "kafka_produce server:{}:{} topic:{}".format( @@ -183,7 +195,7 @@ def kafka_producer_send_heartbeat_msg(max_retries=50): kafka_produce(kafka_cluster, "test_heartbeat_topic", ["test"], retries=max_retries) -def kafka_consume(kafka_cluster, topic, needDecode=True, timestamp=0): +def kafka_consume(kafka_cluster, topic, need_decode=True, timestamp=0): consumer = KafkaConsumer( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest", @@ -193,7 +205,7 @@ def kafka_consume(kafka_cluster, topic, needDecode=True, timestamp=0): if toppar.topic == topic: for message in messages: assert timestamp == 0 or message.timestamp / 1000 == timestamp - if needDecode: + if need_decode: yield message.value.decode() else: yield message.value @@ -218,7 +230,21 @@ def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messa logging.debug(("Produced {} messages for topic {}".format(num_messages, topic))) -def kafka_produce_protobuf_messages_no_delimeters( + +def kafka_consume_with_retry(kafka_cluster, topic, expected_messages, need_decode=True, timestamp=0, retry_count=20, sleep_time=0.1): + messages = [] + try_count = 0 + while try_count < retry_count: + try_count += 1 + messages.extend(kafka_consume(kafka_cluster, topic, need_decode=need_decode, timestamp=timestamp)) + if len(messages) == expected_messages: + break + time.sleep(sleep_time) + if len(messages) != expected_messages: + raise Exception(f"Got only {len(messages)} messages") + return messages + +def kafka_produce_protobuf_messages_no_delimiters( kafka_cluster, topic, start_index, num_messages ): data = "" @@ -312,6 +338,8 @@ def create_settings_string(settings): def format_value(value): if isinstance(value, str): return f"'{value}'" + elif isinstance(value, bool): + return str(int(value)) return str(value) settings_string = "SETTINGS " @@ -371,12 +399,20 @@ SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1""" logging.debug(f"Generated new create query: {query}") return query +def must_use_thread_per_consumer(generator): + if generator == generate_old_create_table_query: + return False + if generator == generate_new_create_table_query: + return True + raise Exception("Unexpected generator") + + def get_topic_postfix(generator): if generator == generate_old_create_table_query: return "old" if generator == generate_new_create_table_query: return "new" - raise "Unexpected generator" + raise Exception("Unexpected generator") # Tests @pytest.mark.parametrize('create_query_generator, do_direct_read', [(generate_old_create_table_query, True), (generate_new_create_table_query, False)]) @@ -410,7 +446,7 @@ def test_kafka_column_types(kafka_cluster, create_query_generator, do_direct_rea if do_direct_read: # check ALIAS - instance.query(create_query_generator("kafka", "a Int, b String Alias toString(a)", settings={"kafka_commit_on_select":1})) + instance.query(create_query_generator("kafka", "a Int, b String Alias toString(a)", settings={"kafka_commit_on_select": True})) messages = [] for i in range(5): messages.append(json.dumps({"a": i})) @@ -600,12 +636,7 @@ def test_kafka_json_as_string(kafka_cluster): @pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) def test_kafka_formats(kafka_cluster, create_query_generator): - schema_registry_client = CachedSchemaRegistryClient( - "http://localhost:{}".format(kafka_cluster.schema_registry_port) - ) - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) + schema_registry_client = CachedSchemaRegistryClient({"url":f"http://localhost:{kafka_cluster.schema_registry_port}"}) # data was dumped from clickhouse itself in a following manner # clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g' @@ -1001,7 +1032,7 @@ def test_kafka_formats(kafka_cluster, create_query_generator): assert TSV(result) == TSV(expected), "Proper result for format: {}".format( format_name ) - kafka_delete_topic(admin_client, topic_name) + kafka_delete_topic(get_admin_client(kafka_cluster), topic_name) # Since everything is async and shaky when receiving messages from Kafka, @@ -1177,9 +1208,7 @@ def test_kafka_issue4116(kafka_cluster): def test_kafka_consumer_hang(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) + admin_client = get_admin_client(kafka_cluster) topic_name = "consumer_hang" kafka_create_topic(admin_client, topic_name, num_partitions=8) @@ -1259,9 +1288,7 @@ def test_kafka_consumer_hang(kafka_cluster): def test_kafka_consumer_hang2(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) + admin_client = get_admin_client(kafka_cluster) topic_name = "consumer_hang2" kafka_create_topic(admin_client, topic_name) @@ -1322,9 +1349,7 @@ def test_kafka_consumer_hang2(kafka_cluster): # sequential read from different consumers leads to breaking lot of kafka invariants # (first consumer will get all partitions initially, and may have problems in doing polls every 60 sec) def test_kafka_read_consumers_in_parallel(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) + admin_client = get_admin_client(kafka_cluster) topic_name = "read_consumers_in_parallel" kafka_create_topic(admin_client, topic_name, num_partitions=8) @@ -1444,9 +1469,7 @@ def test_kafka_tsv_with_delimiter(kafka_cluster): def test_kafka_select_empty(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) + admin_client = get_admin_client(kafka_cluster) topic_name = "empty" kafka_create_topic(admin_client, topic_name) @@ -1624,13 +1647,13 @@ def test_kafka_protobuf_no_delimiter(kafka_cluster): """ ) - kafka_produce_protobuf_messages_no_delimeters( + kafka_produce_protobuf_messages_no_delimiters( kafka_cluster, "pb_no_delimiter", 0, 20 ) - kafka_produce_protobuf_messages_no_delimeters( + kafka_produce_protobuf_messages_no_delimiters( kafka_cluster, "pb_no_delimiter", 20, 1 ) - kafka_produce_protobuf_messages_no_delimeters( + kafka_produce_protobuf_messages_no_delimiters( kafka_cluster, "pb_no_delimiter", 21, 29 ) @@ -1673,31 +1696,28 @@ def test_kafka_protobuf_no_delimiter(kafka_cluster): @pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) def test_kafka_materialized_view(kafka_cluster, create_query_generator): topic_name="mv" - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + DROP TABLE IF EXISTS test.kafka; + + {create_query_generator("kafka", "key UInt64, value UInt64", topic_list=topic_name, consumer_group="mv")}; + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; + """ ) - with kafka_topic(admin_client, topic_name): - instance.query( - f""" - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - DROP TABLE IF EXISTS test.kafka; - - {create_query_generator("kafka", "key UInt64, value UInt64", topic_list=topic_name, consumer_group="mv")}; - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ - ) - - messages = [] - for i in range(50): - messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, topic_name, messages) + messages = [] + for i in range(50): + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, topic_name, messages) + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): result = instance.query_with_retry("SELECT * FROM test.view", check_callback=kafka_check_result) kafka_check_result(result, True) @@ -1711,21 +1731,18 @@ def test_kafka_materialized_view(kafka_cluster, create_query_generator): ) -# TODO(antaljanosbenjamin): fails with the new, because it doesn't store the offsets... -@pytest.mark.parametrize('create_query_generator, thread_per_consumer, log_line', [ - (generate_new_create_table_query,1,r"kafka.*Saved offset [0-9]+ for topic-partition \[recreate_kafka_table:[0-9]+"), - (generate_old_create_table_query,0,"kafka.*Committed offset [0-9]+.*recreate_kafka_table"), +@pytest.mark.parametrize('create_query_generator, log_line', [ + (generate_new_create_table_query, r"kafka.*Saved offset [0-9]+ for topic-partition \[recreate_kafka_table:[0-9]+"), + (generate_old_create_table_query, "kafka.*Committed offset [0-9]+.*recreate_kafka_table"), ]) -def test_kafka_recreate_kafka_table(kafka_cluster, create_query_generator, thread_per_consumer, log_line): +def test_kafka_recreate_kafka_table(kafka_cluster, create_query_generator, log_line): """ Checks that materialized view work properly after dropping and recreating the Kafka table. """ - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) topic_name = "recreate_kafka_table" + thread_per_consumer = must_use_thread_per_consumer(create_query_generator) - with kafka_topic(admin_client, topic_name, num_partitions=6): + with kafka_topic(get_admin_client(kafka_cluster), topic_name, num_partitions=6): create_query = create_query_generator( "kafka", "key UInt64, value UInt64", @@ -1831,9 +1848,7 @@ def test_librdkafka_compression(kafka_cluster, create_query_generator, log_line) expected = "\n".join(expected) - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) + admin_client = get_admin_client(kafka_cluster) for compression_type in supported_compression_types: logging.debug(("Check compression {}".format(compression_type))) @@ -1869,39 +1884,34 @@ def test_librdkafka_compression(kafka_cluster, create_query_generator, log_line) instance.query("DROP TABLE test.consumer SYNC") -# TODO(antaljanosbenjamin): It fails with the new if the topic is not created explicitly @pytest.mark.parametrize('create_query_generator', [generate_new_create_table_query, generate_old_create_table_query]) def test_kafka_materialized_view_with_subquery(kafka_cluster, create_query_generator): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - topic_name = "mysq" logging.debug(f"Using topic {topic_name}") - with kafka_topic(admin_client, topic_name): - create_query = create_query_generator("kafka", "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name) - instance.query( - f""" - DROP TABLE IF EXISTS test.kafka; - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; + create_query = create_query_generator("kafka", "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name) + instance.query( + f""" + DROP TABLE IF EXISTS test.kafka; + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; - {create_query}; - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM (SELECT * FROM test.kafka); - """ - ) + {create_query}; + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM (SELECT * FROM test.kafka); + """ + ) - messages = [] - for i in range(50): - messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, topic_name, messages) + messages = [] + for i in range(50): + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, topic_name, messages) - result = instance.query_with_retry("SELECT * FROM test.view", check_callback=kafka_check_result) + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): + result = instance.query_with_retry("SELECT * FROM test.view", check_callback=kafka_check_result, retry_count=40, sleep_time=0.75) instance.query( """ @@ -1915,9 +1925,6 @@ def test_kafka_materialized_view_with_subquery(kafka_cluster, create_query_gener @pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) def test_kafka_many_materialized_views(kafka_cluster, create_query_generator): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) topic_name = f"mmv-{get_topic_postfix(create_query_generator)}" create_query = create_query_generator( "kafka", @@ -1925,32 +1932,33 @@ def test_kafka_many_materialized_views(kafka_cluster, create_query_generator): topic_list=topic_name, consumer_group=f"{topic_name}-group" ) - with kafka_topic(admin_client, topic_name): - instance.query( - f""" - DROP TABLE IF EXISTS test.view1; - DROP TABLE IF EXISTS test.view2; - DROP TABLE IF EXISTS test.consumer1; - DROP TABLE IF EXISTS test.consumer2; - {create_query}; - CREATE TABLE test.view1 (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE TABLE test.view2 (key UInt64, value UInt64) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS - SELECT * FROM test.kafka; - CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS - SELECT * FROM test.kafka; - """ - ) - messages = [] - for i in range(50): - messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, topic_name, messages) + instance.query( + f""" + DROP TABLE IF EXISTS test.view1; + DROP TABLE IF EXISTS test.view2; + DROP TABLE IF EXISTS test.consumer1; + DROP TABLE IF EXISTS test.consumer2; + {create_query}; + CREATE TABLE test.view1 (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE TABLE test.view2 (key UInt64, value UInt64) + ENGINE = MergeTree() + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS + SELECT * FROM test.kafka; + CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS + SELECT * FROM test.kafka; + """ + ) + messages = [] + for i in range(50): + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, topic_name, messages) + + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): result1 = instance.query_with_retry("SELECT * FROM test.view1", check_callback=kafka_check_result) result2 = instance.query_with_retry("SELECT * FROM test.view2", check_callback=kafka_check_result) @@ -1966,116 +1974,113 @@ def test_kafka_many_materialized_views(kafka_cluster, create_query_generator): kafka_check_result(result1, True) kafka_check_result(result2, True) -# TODO(antaljanosbenjamin) -def test_kafka_flush_on_big_message(kafka_cluster): + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_flush_on_big_message(kafka_cluster, create_query_generator): # Create batches of messages of size ~100Kb kafka_messages = 1000 batch_messages = 1000 + topic_name = "flush" + get_topic_postfix(create_query_generator) messages = [ json.dumps({"key": i, "value": "x" * 100}) * batch_messages for i in range(kafka_messages) ] - kafka_produce(kafka_cluster, "flush", messages) + kafka_produce(kafka_cluster, topic_name, messages) - instance.query( + admin_client = get_admin_client(kafka_cluster) + + with existing_kafka_topic(admin_client, topic_name): + create_query = create_query_generator( + "kafka", + "key UInt64, value String", + topic_list=topic_name, + consumer_group=topic_name, + settings={"kafka_max_block_size": 10} + ) + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + {create_query}; + CREATE TABLE test.view (key UInt64, value String) + ENGINE = MergeTree + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka (key UInt64, value String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'flush', - kafka_group_name = 'flush', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 10; - CREATE TABLE test.view (key UInt64, value String) - ENGINE = MergeTree - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ - ) + ) - client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - received = False - while not received: - try: - offsets = client.list_consumer_group_offsets("flush") - for topic, offset in list(offsets.items()): - if topic.topic == "flush" and offset.offset == kafka_messages: - received = True - break - except kafka.errors.GroupCoordinatorNotAvailableError: - continue + received = False + while not received: + try: + offsets = admin_client.list_consumer_group_offsets(topic_name) + for topic, offset in list(offsets.items()): + if topic.topic == topic_name and offset.offset == kafka_messages: + received = True + break + except kafka.errors.GroupCoordinatorNotAvailableError: + continue - while True: - result = instance.query("SELECT count() FROM test.view") - if int(result) == kafka_messages * batch_messages: - break + while True: + result = instance.query("SELECT count() FROM test.view") + if int(result) == kafka_messages * batch_messages: + break - instance.query( + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) + ) - assert ( - int(result) == kafka_messages * batch_messages - ), "ClickHouse lost some messages: {}".format(result) + assert ( + int(result) == kafka_messages * batch_messages + ), "ClickHouse lost some messages: {}".format(result) def test_kafka_virtual_columns(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) topic_config = { # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", } - kafka_create_topic(admin_client, "virt1", config=topic_config) - - instance.query( - """ - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'virt1', - kafka_group_name = 'virt1', - kafka_commit_on_select = 1, - kafka_format = 'JSONEachRow'; - """ - ) - - messages = "" - for i in range(25): - messages += json.dumps({"key": i, "value": i}) + "\n" - kafka_produce(kafka_cluster, "virt1", [messages], 0) - - messages = "" - for i in range(25, 50): - messages += json.dumps({"key": i, "value": i}) + "\n" - kafka_produce(kafka_cluster, "virt1", [messages], 0) - - result = "" - while True: - result += instance.query( - """SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka""", - ignore_error=True, + with kafka_topic(get_admin_client(kafka_cluster), "virt1", config=topic_config): + instance.query( + """ + CREATE TABLE test.kafka (key UInt64, value UInt64) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = 'virt1', + kafka_group_name = 'virt1', + kafka_commit_on_select = 1, + kafka_format = 'JSONEachRow'; + """ ) - if kafka_check_result(result, False, "test_kafka_virtual1.reference"): - break - kafka_check_result(result, True, "test_kafka_virtual1.reference") + messages = "" + for i in range(25): + messages += json.dumps({"key": i, "value": i}) + "\n" + kafka_produce(kafka_cluster, "virt1", [messages], 0) + + messages = "" + for i in range(25, 50): + messages += json.dumps({"key": i, "value": i}) + "\n" + kafka_produce(kafka_cluster, "virt1", [messages], 0) + + result = "" + while True: + result += instance.query( + """SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka""", + ignore_error=True, + ) + if kafka_check_result(result, False, "test_kafka_virtual1.reference"): + break + + kafka_check_result(result, True, "test_kafka_virtual1.reference") @pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) def test_kafka_virtual_columns_with_materialized_view(kafka_cluster, create_query_generator): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) topic_config = { # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", @@ -2089,7 +2094,7 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster, create_quer topic_list=topic_name, consumer_group=f"{topic_name}-group" ) - with kafka_topic(admin_client, topic_name, config=topic_config): + with kafka_topic(get_admin_client(kafka_cluster), topic_name, config=topic_config): instance.query( f""" @@ -2153,73 +2158,62 @@ def test_kafka_insert(kafka_cluster, create_query_generator): ) ) + message_count = 50 values = [] - for i in range(50): + for i in range(message_count): values.append("({i}, {i})".format(i=i)) values = ",".join(values) insert_with_retry(instance, values) - messages = [] - try_count = 0 - while True and try_count < 5: - try_count += 1 - messages.extend(kafka_consume(kafka_cluster, topic_name)) - if len(messages) == 50: - break - time.sleep(0.1) - + messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count) result = "\n".join(messages) kafka_check_result(result, True) @pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) def test_kafka_produce_consume(kafka_cluster, create_query_generator): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - topic_name = "insert2" + get_topic_postfix(create_query_generator) - with kafka_topic(admin_client, topic_name): - create_query = create_query_generator( - "kafka", - "key UInt64, value UInt64", - topic_list=topic_name, - consumer_group=topic_name, - format="TSV" - ) - instance.query( - f""" - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; - {create_query}; - CREATE TABLE test.view (key UInt64, value UInt64) - ENGINE = MergeTree - ORDER BY key; - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka; - """ - ) + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format="TSV" + ) + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + {create_query}; + CREATE TABLE test.view (key UInt64, value UInt64) + ENGINE = MergeTree + ORDER BY key; + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka; + """ + ) - messages_num = 10000 + messages_num = 10000 - def insert(): - values = [] - for i in range(messages_num): - values.append("({i}, {i})".format(i=i)) - values = ",".join(values) + def insert(): + values = [] + for i in range(messages_num): + values.append("({i}, {i})".format(i=i)) + values = ",".join(values) - insert_with_retry(instance, values) + insert_with_retry(instance, values) - threads = [] - threads_num = 16 - for _ in range(threads_num): - threads.append(threading.Thread(target=insert)) - for thread in threads: - time.sleep(random.uniform(0, 1)) - thread.start() + threads = [] + threads_num = 16 + for _ in range(threads_num): + threads.append(threading.Thread(target=insert)) + for thread in threads: + time.sleep(random.uniform(0, 1)) + thread.start() + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): expected_row_count = messages_num * threads_num result = instance.query_with_retry( "SELECT count() FROM test.view", @@ -2242,19 +2236,21 @@ def test_kafka_produce_consume(kafka_cluster, create_query_generator): ), "ClickHouse lost some messages: {}".format(result) -def test_kafka_commit_on_block_write(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_commit_on_block_write(kafka_cluster, create_query_generator): + topic_name="block" + get_topic_postfix(create_query_generator) + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + settings={"kafka_max_block_size": 100}, + ) instance.query( - """ + f""" DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'block', - kafka_group_name = 'block', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 100, - kafka_row_delimiter = '\\n'; + {create_query}; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; @@ -2265,45 +2261,37 @@ def test_kafka_commit_on_block_write(kafka_cluster): cancel = threading.Event() + # We need to pass i as a reference. Simple integers are passed by value. + # Making an array is probably the easiest way to "force pass by reference". i = [0] - def produce(): + def produce(i): while not cancel.is_set(): messages = [] for _ in range(101): messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 - kafka_produce(kafka_cluster, "block", messages) + kafka_produce(kafka_cluster, topic_name, messages) - kafka_thread = threading.Thread(target=produce) + kafka_thread = threading.Thread(target=produce, args=[i]) kafka_thread.start() - while int(instance.query("SELECT count() FROM test.view")) == 0: - time.sleep(1) + instance.query_with_retry( + "SELECT count() FROM test.view", + sleep_time=1, + check_callback=lambda res: int(res) >= 100) cancel.set() - instance.query( - """ - DROP TABLE test.kafka; - """ - ) + instance.query("DROP TABLE test.kafka SYNC") - instance.query( - """ - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'block', - kafka_group_name = 'block', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 100, - kafka_row_delimiter = '\\n'; - """ - ) + instance.query(create_query) + kafka_thread.join() - while int(instance.query("SELECT uniqExact(key) FROM test.view")) < i[0]: - time.sleep(1) + instance.query_with_retry( + "SELECT uniqExact(key) FROM test.view", + sleep_time=1, + check_callback=lambda res: int(res) >= i[0]) result = int(instance.query("SELECT count() == uniqExact(key) FROM test.view")) @@ -2318,19 +2306,18 @@ def test_kafka_commit_on_block_write(kafka_cluster): assert result == 1, "Messages from kafka get duplicated!" -@pytest.mark.parametrize('create_query_generator, thread_per_consumer, log_line', [ - (generate_old_create_table_query,0,"kafka.*Committed offset 2.*virt2_[01]"), - (generate_new_create_table_query,1,r"kafka.*Saved offset 2[0-9]* for topic-partition \[virt2_[01]:[0-9]+"), +@pytest.mark.parametrize('create_query_generator, log_line', [ + (generate_old_create_table_query, "kafka.*Committed offset 2.*virt2_[01]"), + (generate_new_create_table_query, r"kafka.*Saved offset 2[0-9]* for topic-partition \[virt2_[01]:[0-9]+"), ]) -def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, thread_per_consumer, log_line): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) +def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line): + admin_client = get_admin_client(kafka_cluster) topic_config = { # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", } + thread_per_consumer = must_use_thread_per_consumer(create_query_generator) topic_name_0 = "virt2_0" topic_name_1 = "virt2_1" consumer_group = "virt2"+get_topic_postfix(create_query_generator) @@ -2342,7 +2329,7 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, thread_pe topic_list=f"{topic_name_0},{topic_name_1}", consumer_group=consumer_group, settings={ - "kafka_num_consumers":2, + "kafka_num_consumers": 2, "kafka_thread_per_consumer": thread_per_consumer, } ) @@ -2544,24 +2531,18 @@ def test_kafka_producer_consumer_separate_settings(kafka_cluster): assert property_in_log in kafka_producer_applyed_properties -# TODO(antaljanosbenjamin) @pytest.mark.parametrize('create_query_generator, log_line', [ - #(generate_new_create_table_query,"Saved offset 5"), + (generate_new_create_table_query,"Saved offset 5"), (generate_old_create_table_query, "Committed offset 5"), ]) def test_kafka_produce_key_timestamp(kafka_cluster, create_query_generator, log_line): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - topic_name = "insert3" topic_config = { # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", } - with kafka_topic(admin_client, topic_name, config=topic_config): - + with kafka_topic(get_admin_client(kafka_cluster), topic_name, config=topic_config): writer_create_query = create_query_generator( "kafka_writer", "key UInt64, value UInt64, _key String, _timestamp DateTime('UTC')", @@ -2617,11 +2598,12 @@ def test_kafka_produce_key_timestamp(kafka_cluster, create_query_generator, log_ 5 5 k5 1577836805 k5 insert3 0 4 1577836805 """ - result = instance.query_with_retry("SELECT * FROM test.view ORDER BY value", ignore_error=True, check_callback=lambda res: TSV(res) == TSV(expected)) - - # logging.debug(result) - - + result = instance.query_with_retry( + "SELECT * FROM test.view ORDER BY value", + ignore_error=True, + retry_count=5, + sleep_time=1, + check_callback=lambda res: TSV(res) == TSV(expected)) assert TSV(result) == TSV(expected) @@ -2655,15 +2637,8 @@ def test_kafka_insert_avro(kafka_cluster, create_query_generator): "INSERT INTO test.kafka select number*10 as key, number*100 as value, 1636505534 as _timestamp from numbers(4) SETTINGS output_format_avro_rows_in_file = 2, output_format_avro_codec = 'deflate'" ) - messages = [] - while True: - messages.extend( - kafka_consume( - kafka_cluster, topic_name, needDecode=False, timestamp=1636505534 - ) - ) - if len(messages) == 2: - break + message_count = 2 + messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count, need_decode=False, timestamp=1636505534) result = "" for a_message in messages: @@ -2679,67 +2654,69 @@ def test_kafka_insert_avro(kafka_cluster, create_query_generator): assert result == expected_result -# TODO(antaljanosbenjamin) -def test_kafka_produce_consume_avro(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - - topic_name = "insert_avro" - kafka_create_topic(admin_client, topic_name) - - num_rows = 75 - - instance.query( - """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.kafka; - DROP TABLE IF EXISTS test.kafka_writer; - - CREATE TABLE test.kafka_writer (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'avro', - kafka_group_name = 'avro', - kafka_format = 'Avro'; - - - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'avro', - kafka_group_name = 'avro', - kafka_format = 'Avro'; - - CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT key, value FROM test.kafka; - """ - ) - - instance.query( - "INSERT INTO test.kafka_writer select number*10 as key, number*100 as value from numbers({num_rows}) SETTINGS output_format_avro_rows_in_file = 7".format( - num_rows=num_rows - ) - ) - - instance.wait_for_log_line( - "Committed offset {offset}".format(offset=math.ceil(num_rows / 7)) - ) - - expected_num_rows = instance.query( - "SELECT COUNT(1) FROM test.view", ignore_error=True - ) - assert int(expected_num_rows) == num_rows - - expected_max_key = instance.query( - "SELECT max(key) FROM test.view", ignore_error=True - ) - assert int(expected_max_key) == (num_rows - 1) * 10 - - kafka_delete_topic(admin_client, topic_name) - - @pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_produce_consume_avro(kafka_cluster, create_query_generator): + topic_name = "insert_avro" + get_topic_postfix(create_query_generator) + with kafka_topic(get_admin_client(kafka_cluster), topic_name): + + num_rows = 75 + + writer_create_query = create_query_generator( + "kafka_writer", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format="Avro", + ) + + reader_create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format="Avro", + ) + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.kafka; + DROP TABLE IF EXISTS test.kafka_writer; + + {writer_create_query}; + {reader_create_query}; + + CREATE MATERIALIZED VIEW test.view Engine=Log AS + SELECT key, value FROM test.kafka; + """ + ) + + instance.query( + "INSERT INTO test.kafka_writer select number*10 as key, number*100 as value from numbers({num_rows}) SETTINGS output_format_avro_rows_in_file = 7".format( + num_rows=num_rows + ) + ) + + instance.wait_for_log_line( + "Committed offset {offset}".format(offset=math.ceil(num_rows / 7)) + ) + + expected_num_rows = instance.query( + "SELECT COUNT(1) FROM test.view", ignore_error=True + ) + assert int(expected_num_rows) == num_rows + + expected_max_key = instance.query( + "SELECT max(key) FROM test.view", ignore_error=True + ) + assert int(expected_max_key) == (num_rows - 1) * 10 + + +@pytest.mark.parametrize('create_query_generator', [ + generate_old_create_table_query, + # TODO(antaljanosbenjamin): Something is off with timing + # generate_new_create_table_query +]) def test_kafka_flush_by_time(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) @@ -2753,7 +2730,7 @@ def test_kafka_flush_by_time(kafka_cluster, create_query_generator): topic_list=topic_name, consumer_group=topic_name, settings={ - "kafka_max_block_size":100, + "kafka_max_block_size": 100, } ) instance.query( @@ -2787,9 +2764,10 @@ def test_kafka_flush_by_time(kafka_cluster, create_query_generator): """ ) + # By default the flush timeout should be 7.5 seconds => 18 seconds should be enough for 2 flushes, but not for 3 time.sleep(18) - result = instance.query("SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view") + result = instance.query("SELECT uniqExact(ts), count() >= 15 FROM test.view") cancel.set() kafka_thread.join() @@ -2801,14 +2779,11 @@ def test_kafka_flush_by_time(kafka_cluster, create_query_generator): """ ) - assert TSV(result) == TSV("1 1") + assert TSV(result) == TSV("2 1") @pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) def test_kafka_flush_by_block_size(kafka_cluster, create_query_generator): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) topic_name = "flush_by_block_size" + get_topic_postfix(create_query_generator) cancel = threading.Event() @@ -2820,7 +2795,7 @@ def test_kafka_flush_by_block_size(kafka_cluster, create_query_generator): kafka_thread = threading.Thread(target=produce) - with kafka_topic(admin_client, topic_name): + with kafka_topic(get_admin_client(kafka_cluster), topic_name): kafka_thread.start() create_query = create_query_generator( @@ -3105,65 +3080,79 @@ def test_kafka_rebalance(kafka_cluster, create_query_generator, log_line): assert result == 1, "Messages from kafka get duplicated!" -def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster): - messages = [json.dumps({"key": j + 1, "value": "x" * 300}) for j in range(22)] - kafka_produce(kafka_cluster, "no_holes_when_write_suffix_failed", messages) +# TODO(antaljanosbenjamin): find another way to make insertion fail +@pytest.mark.parametrize('create_query_generator', [ + generate_old_create_table_query, + # generate_new_create_table_query, +]) +def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster, create_query_generator): + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + topic_name = "no_holes_when_write_suffix_failed" + get_topic_postfix(create_query_generator) - instance.query( + with kafka_topic(admin_client, topic_name): + messages = [json.dumps({"key": j + 1, "value": "x" * 300}) for j in range(22)] + kafka_produce(kafka_cluster, topic_name, messages) + + create_query = create_query_generator( + "kafka", + "key UInt64, value String", + topic_list=topic_name, + consumer_group=topic_name, + settings={ + "kafka_max_block_size": 20, + "kafka_flush_interval_ms": 2000, + } + ) + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.consumer; + + {create_query}; + + CREATE TABLE test.view (key UInt64, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/{topic_name}', 'node1') + ORDER BY key; """ - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.consumer; + ) - CREATE TABLE test.kafka (key UInt64, value String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'no_holes_when_write_suffix_failed', - kafka_group_name = 'no_holes_when_write_suffix_failed', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 20, - kafka_flush_interval_ms = 2000; + # init PartitionManager (it starts container) earlier + pm = PartitionManager() - CREATE TABLE test.view (key UInt64, value String) - ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1') - ORDER BY key; - """ - ) - - # init PartitionManager (it starts container) earlier - pm = PartitionManager() - - instance.query( + instance.query( + """ + CREATE MATERIALIZED VIEW test.consumer TO test.view AS + SELECT * FROM test.kafka + WHERE NOT sleepEachRow(0.25); """ - CREATE MATERIALIZED VIEW test.consumer TO test.view AS - SELECT * FROM test.kafka - WHERE NOT sleepEachRow(0.25); - """ - ) + ) - instance.wait_for_log_line("Polled batch of 20 messages") - # the tricky part here is that disconnect should happen after write prefix, but before write suffix - # we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages" - # while materialized view is working to inject zookeeper failure - pm.drop_instance_zk_connections(instance) - instance.wait_for_log_line( - "Error.*(Connection loss|Coordination::Exception).*while pushing to view" - ) - pm.heal_all() - instance.wait_for_log_line("Committed offset 22") + instance.wait_for_log_line("Polled batch of 20 messages") + # the tricky part here is that disconnect should happen after write prefix, but before write suffix + # we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages" + # while materialized view is working to inject zookeeper failure + pm.drop_instance_zk_connections(instance) + instance.wait_for_log_line( + "Error.*(Connection loss|Coordination::Exception).*while pushing to view" + ) + pm.heal_all() + instance.wait_for_log_line("Committed offset 22") - result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") - logging.debug(result) + result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") + logging.debug(result) - # kafka_cluster.open_bash_shell('instance') + # kafka_cluster.open_bash_shell('instance') - instance.query( + instance.query( + """ + DROP TABLE test.consumer; + DROP TABLE test.view; """ - DROP TABLE test.consumer; - DROP TABLE test.view; - """ - ) + ) - assert TSV(result) == TSV("22\t22\t22") + assert TSV(result) == TSV("22\t22\t22") def test_exception_from_destructor(kafka_cluster): @@ -3209,12 +3198,25 @@ def test_exception_from_destructor(kafka_cluster): assert TSV(instance.query("SELECT 1")) == TSV("1") -def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_commits_of_unprocessed_messages_on_drop(kafka_cluster, create_query_generator): + topic_name = "commits_of_unprocessed_messages_on_drop" + get_topic_postfix(create_query_generator) messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(1)] - kafka_produce(kafka_cluster, "commits_of_unprocessed_messages_on_drop", messages) + kafka_produce(kafka_cluster, topic_name, messages) + + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=f"{topic_name}_test_group", + settings={ + "kafka_max_block_size": 1000, + "kafka_flush_interval_ms": 1000, + } + ) instance.query( - """ + f""" DROP TABLE IF EXISTS test.destination SYNC; CREATE TABLE test.destination ( key UInt64, @@ -3229,14 +3231,7 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): ENGINE = MergeTree() ORDER BY key; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'commits_of_unprocessed_messages_on_drop', - kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 1000, - kafka_flush_interval_ms = 1000; + {create_query}; CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS SELECT @@ -3265,7 +3260,7 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 kafka_produce( - kafka_cluster, "commits_of_unprocessed_messages_on_drop", messages + kafka_cluster, topic_name, messages ) time.sleep(0.5) @@ -3279,17 +3274,18 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): """ ) + new_create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=f"{topic_name}_test_group", + settings={ + "kafka_max_block_size": 10000, + "kafka_flush_interval_ms": 1000, + } + ) instance.query( - """ - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'commits_of_unprocessed_messages_on_drop', - kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 10000, - kafka_flush_interval_ms = 1000; - """ + new_create_query ) cancel.set() @@ -3314,20 +3310,26 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): assert TSV(result) == TSV("{0}\t{0}\t{0}".format(i[0] - 1)), "Missing data!" -def test_bad_reschedule(kafka_cluster): - messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(20000)] - kafka_produce(kafka_cluster, "test_bad_reschedule", messages) +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_bad_reschedule(kafka_cluster, create_query_generator): + topic_name = "test_bad_reschedule" + get_topic_postfix(create_query_generator) + messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(20000)] + kafka_produce(kafka_cluster, topic_name, messages) + + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + settings={ + "kafka_max_block_size": 1000, + "kafka_flush_interval_ms": 1000, + } + ) instance.query( - """ - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'test_bad_reschedule', - kafka_group_name = 'test_bad_reschedule', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 1000, - kafka_flush_interval_ms = 1000; + f""" + {create_query}; CREATE MATERIALIZED VIEW test.destination Engine=Log AS SELECT @@ -3425,21 +3427,23 @@ def test_kafka_duplicates_when_commit_failed(kafka_cluster): # if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval -# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead - +# that behavior is a bit questionable - we can just take a bigger pauses between polls instead - # to do more job in a single pass, and give more rest for a thread. # But in cases of some peaky loads in kafka topic the current contract sounds more predictable and # easier to understand, so let's keep it as is for now. # also we can came to eof because we drained librdkafka internal queue too fast -def test_premature_flush_on_eof(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_premature_flush_on_eof(kafka_cluster, create_query_generator): + topic_name = "premature_flush_on_eof" + get_topic_postfix(create_query_generator) + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + ) instance.query( - """ - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'premature_flush_on_eof', - kafka_group_name = 'premature_flush_on_eof', - kafka_format = 'JSONEachRow'; - SELECT * FROM test.kafka LIMIT 1; + f""" + {create_query}; CREATE TABLE test.destination ( key UInt64, value UInt64, @@ -3455,13 +3459,13 @@ def test_premature_flush_on_eof(kafka_cluster): """ ) - # messages created here will be consumed immedeately after MV creation + # messages created here will be consumed immediately after MV creation # reaching topic EOF. - # But we should not do flush immedeately after reaching EOF, because + # But we should not do flush immediately after reaching EOF, because # next poll can return more data, and we should respect kafka_flush_interval_ms # and try to form bigger block - messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(1)] - kafka_produce(kafka_cluster, "premature_flush_on_eof", messages) + messages = [json.dumps({"key": 1, "value": 1})] + kafka_produce(kafka_cluster, topic_name, messages) instance.query( """ @@ -3485,7 +3489,7 @@ def test_premature_flush_on_eof(kafka_cluster): instance.wait_for_log_line("Stalled") # produce more messages after delay - kafka_produce(kafka_cluster, "premature_flush_on_eof", messages) + kafka_produce(kafka_cluster, topic_name, messages) # data was not flushed yet (it will be flushed 7.5 sec after creating MV) assert int(instance.query("SELECT count() FROM test.destination")) == 0 @@ -3506,58 +3510,75 @@ def test_premature_flush_on_eof(kafka_cluster): ) -def test_kafka_unavailable(kafka_cluster): - messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(20000)] - kafka_produce(kafka_cluster, "test_bad_reschedule", messages) +@pytest.mark.parametrize('create_query_generator, do_direct_read', [ + (generate_old_create_table_query, True), + (generate_new_create_table_query, False) +]) +def test_kafka_unavailable(kafka_cluster, create_query_generator, do_direct_read): + number_of_messages=20000 + topic_name = "test_bad_reschedule" + get_topic_postfix(create_query_generator) + messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(number_of_messages)] + kafka_produce(kafka_cluster, topic_name, messages) - kafka_cluster.pause_container("kafka1") + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): + kafka_cluster.pause_container("kafka1") - instance.query( + create_query = create_query_generator( + "test_bad_reschedule", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + settings={"kafka_max_block_size": 1000} + ) + instance.query( + f""" + {create_query}; + + CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS + SELECT + key, + now() as consume_ts, + value, + _topic, + _key, + _offset, + _partition, + _timestamp + FROM test.test_bad_reschedule; """ - CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'test_bad_reschedule', - kafka_group_name = 'test_bad_reschedule', - kafka_format = 'JSONEachRow', - kafka_commit_on_select = 1, - kafka_max_block_size = 1000; + ) - CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS - SELECT - key, - now() as consume_ts, - value, - _topic, - _key, - _offset, - _partition, - _timestamp - FROM test.test_bad_reschedule; - """ - ) + if do_direct_read: + instance.query("SELECT * FROM test.test_bad_reschedule") + instance.query("SELECT count() FROM test.destination_unavailable") - instance.query("SELECT * FROM test.test_bad_reschedule") - instance.query("SELECT count() FROM test.destination_unavailable") + # enough to trigger issue + time.sleep(30) + kafka_cluster.unpause_container("kafka1") - # enough to trigger issue - time.sleep(30) - kafka_cluster.unpause_container("kafka1") + result = instance.query_with_retry( + "SELECT count() FROM test.destination_unavailable", + sleep_time=1, + check_callback=lambda res: int(res) == number_of_messages) - while ( - int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000 - ): - print("Waiting for consume") - time.sleep(1) + assert int(result) == number_of_messages -def test_kafka_issue14202(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_issue14202(kafka_cluster, create_query_generator): """ INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure """ + topic_name = "issue14202" + get_topic_postfix(create_query_generator) + create_query = create_query_generator( + "kafka_q", + "t UInt64, some_string String", + topic_list=topic_name, + consumer_group=topic_name, + ) instance.query( - """ + f""" CREATE TABLE test.empty_table ( dt Date, some_string String @@ -3566,12 +3587,7 @@ def test_kafka_issue14202(kafka_cluster): PARTITION BY toYYYYMM(dt) ORDER BY some_string; - CREATE TABLE test.kafka_q (t UInt64, `some_string` String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'issue14202', - kafka_group_name = 'issue14202', - kafka_format = 'JSONEachRow'; + {create_query}; """ ) @@ -3622,20 +3638,25 @@ def random_string(size=8): return "".join(random.choices(string.ascii_uppercase + string.digits, k=size)) -def test_kafka_engine_put_errors_to_stream(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_engine_put_errors_to_stream(kafka_cluster, create_query_generator): + topic_name = "kafka_engine_put_errors_to_stream" + get_topic_postfix(create_query_generator) + create_query = create_query_generator( + "kafka", + "i Int64, s String", + topic_list=topic_name, + consumer_group=topic_name, + settings={ + "kafka_max_block_size": 128, + "kafka_handle_error_mode": "stream", + } + ) instance.query( - """ + f""" DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka_data; DROP TABLE IF EXISTS test.kafka_errors; - CREATE TABLE test.kafka (i Int64, s String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'kafka_engine_put_errors_to_stream', - kafka_group_name = 'kafka_engine_put_errors_to_stream', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 128, - kafka_handle_error_mode = 'stream'; + {create_query}; CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String) ENGINE = MergeTree ORDER BY i @@ -3663,19 +3684,20 @@ def test_kafka_engine_put_errors_to_stream(kafka_cluster): json.dumps({"i": "n_" + random_string(4), "s": random_string(8)}) ) - kafka_produce(kafka_cluster, "kafka_engine_put_errors_to_stream", messages) - instance.wait_for_log_line("Committed offset 128") + kafka_produce(kafka_cluster, topic_name, messages) + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): + instance.wait_for_log_line("Committed offset 128") - assert TSV(instance.query("SELECT count() FROM test.kafka_data")) == TSV("64") - assert TSV(instance.query("SELECT count() FROM test.kafka_errors")) == TSV("64") + assert TSV(instance.query("SELECT count() FROM test.kafka_data")) == TSV("64") + assert TSV(instance.query("SELECT count() FROM test.kafka_errors")) == TSV("64") - instance.query( + instance.query( + """ + DROP TABLE test.kafka; + DROP TABLE test.kafka_data; + DROP TABLE test.kafka_errors; """ - DROP TABLE test.kafka; - DROP TABLE test.kafka_data; - DROP TABLE test.kafka_errors; - """ - ) + ) def gen_normal_json(): @@ -3704,21 +3726,27 @@ def gen_message_with_jsons(jsons=10, malformed=0): return s.getvalue() -def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster, create_query_generator): + topic_name = "kafka_engine_put_errors_to_stream_with_random_malformed_json" + get_topic_postfix(create_query_generator) + create_query = create_query_generator( + "kafka", + "i Int64, s String", + topic_list=topic_name, + consumer_group=topic_name, + settings={ + "kafka_max_block_size": 100, + "kafka_poll_max_batch_size": 1, + "kafka_handle_error_mode": "stream", + } + ) + instance.query( - """ + f""" DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka_data; DROP TABLE IF EXISTS test.kafka_errors; - CREATE TABLE test.kafka (i Int64, s String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'kafka_engine_put_errors_to_stream_with_random_malformed_json', - kafka_group_name = 'kafka_engine_put_errors_to_stream_with_random_malformed_json', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 100, - kafka_poll_max_batch_size = 1, - kafka_handle_error_mode = 'stream'; + {create_query}; CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String) ENGINE = MergeTree ORDER BY i @@ -3743,28 +3771,25 @@ def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_clus else: messages.append(gen_message_with_jsons(10, 0)) - kafka_produce( - kafka_cluster, - "kafka_engine_put_errors_to_stream_with_random_malformed_json", - messages, - ) + kafka_produce(kafka_cluster, topic_name, messages) + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): + instance.wait_for_log_line("Committed offset 128") + # 64 good messages, each containing 10 rows + assert TSV(instance.query("SELECT count() FROM test.kafka_data")) == TSV("640") + # 64 bad messages, each containing some broken row + assert TSV(instance.query("SELECT count() FROM test.kafka_errors")) == TSV("64") - instance.wait_for_log_line("Committed offset 128") - # 64 good messages, each containing 10 rows - assert TSV(instance.query("SELECT count() FROM test.kafka_data")) == TSV("640") - # 64 bad messages, each containing some broken row - assert TSV(instance.query("SELECT count() FROM test.kafka_errors")) == TSV("64") - - instance.query( + instance.query( + """ + DROP TABLE test.kafka; + DROP TABLE test.kafka_data; + DROP TABLE test.kafka_errors; """ - DROP TABLE test.kafka; - DROP TABLE test.kafka_data; - DROP TABLE test.kafka_errors; - """ - ) + ) -def test_kafka_formats_with_broken_message(kafka_cluster): +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator): # data was dumped from clickhouse itself in a following manner # clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g' admin_client = KafkaAdminClient( @@ -3782,7 +3807,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message '{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}', ], - "expected": """{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}""", + "expected": { + "raw_message":"{\"id\":\"0\",\"blockNo\":\"BAD\",\"val1\":\"AM\",\"val2\":0.5,\"val3\":1}", + "error":"Cannot parse input: expected '\"' before: 'BAD\",\"val1\":\"AM\",\"val2\":0.5,\"val3\":1}': (while reading the value of key blockNo)" + }, "supports_empty_value": True, "printable": True, }, @@ -3795,7 +3823,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message '["0", "BAD", "AM", 0.5, 1]', ], - "expected": """{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}""", + "expected": { + "raw_message":"[\"0\", \"BAD\", \"AM\", 0.5, 1]", + "error":"Cannot parse input: expected '\"' before: 'BAD\", \"AM\", 0.5, 1]': (while reading the value of key blockNo)" + }, "supports_empty_value": True, "printable": True, }, @@ -3807,7 +3838,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message '["0", "BAD", "AM", 0.5, 1]', ], - "expected": """{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}""", + "expected": { + "raw_message":"[\"0\", \"BAD\", \"AM\", 0.5, 1]", + "error":"Cannot parse JSON string: expected opening quote" + }, "printable": True, }, "TSKV": { @@ -3818,7 +3852,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message "id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n", ], - "expected": '{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}', + "expected": { + "raw_message":"id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n", + "error":"Found garbage after field in TSKV format: blockNo: (at row 1)\n" + }, "printable": True, }, "CSV": { @@ -3829,7 +3866,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message '0,"BAD","AM",0.5,1\n', ], - "expected": """{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n'"}""", + "expected": { + "raw_message":"0,\"BAD\",\"AM\",0.5,1\n", + "error":"Cannot parse input: expected '\"' before: 'BAD\",\"AM\",0.5,1\\n'" + }, "printable": True, "supports_empty_value": True, }, @@ -3841,7 +3881,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message "0\tBAD\tAM\t0.5\t1\n", ], - "expected": """{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n'"}""", + "expected": { + "raw_message":"0\tBAD\tAM\t0.5\t1\n", + "error":"Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n'" + }, "supports_empty_value": True, "printable": True, }, @@ -3853,7 +3896,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message '"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n', ], - "expected": """{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n'"}""", + "expected": { + "raw_message":"\"id\",\"blockNo\",\"val1\",\"val2\",\"val3\"\n0,\"BAD\",\"AM\",0.5,1\n", + "error":"Cannot parse input: expected '\"' before: 'BAD\",\"AM\",0.5,1\\n'" + }, "printable": True, }, "Values": { @@ -3864,7 +3910,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message "(0,'BAD','AM',0.5,1)", ], - "expected": r"""{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception"}""", + "expected": { + "raw_message":"(0,'BAD','AM',0.5,1)", + "error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero/NULL instead of throwing exception" + }, "supports_empty_value": True, "printable": True, }, @@ -3876,7 +3925,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message "id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n", ], - "expected": """{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n"}""", + "expected": { + "raw_message":"id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n", + "error":"Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n" + }, "supports_empty_value": True, "printable": True, }, @@ -3888,7 +3940,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n", ], - "expected": """{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n'"}""", + "expected": { + "raw_message":"id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n", + "error":"Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n'" + }, "printable": True, }, "Native": { @@ -3899,7 +3954,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message b"\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01", ], - "expected": """{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}""", + "expected": { + "raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801", + "error":"Cannot convert: String to UInt16" + }, "printable": False, }, "RowBinary": { @@ -3910,7 +3968,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message b"\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01", ], - "expected": '{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}', + "expected": { + "raw_message":"00000000000000000342414402414D0000003F01", + "error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\n" + }, "printable": False, }, "RowBinaryWithNamesAndTypes": { @@ -3921,7 +3982,10 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01", ], - "expected": '{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Type of \'blockNo\' must be UInt16, not String"}', + "expected": { + "raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01", + "error":"Type of 'blockNo' must be UInt16, not String" + }, "printable": False, }, "ORC": { @@ -3932,15 +3996,19 @@ def test_kafka_formats_with_broken_message(kafka_cluster): # broken message b"\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", ], - "expected": r"""{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}""", + "expected": { + "raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318", + "error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero/NULL instead of throwing exception." + }, "printable": False, }, } topic_name_prefix = "format_tests_4_stream_" + topic_name_postfix = get_topic_postfix(create_query_generator) for format_name, format_opts in list(all_formats.items()): logging.debug(f"Set up {format_name}") - topic_name = f"{topic_name_prefix}{format_name}" + topic_name = f"{topic_name_prefix}{format_name}{topic_name_postfix}" data_sample = format_opts["data_sample"] data_prefix = [] raw_message = "_raw_message" @@ -3950,23 +4018,22 @@ def test_kafka_formats_with_broken_message(kafka_cluster): if format_opts.get("printable", False) == False: raw_message = "hex(_raw_message)" kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample) + create_query = create_query_generator( + f"kafka_{format_name}", + "id Int64, blockNo UInt16, val1 String, val2 Float32, val3 UInt8", + topic_list=topic_name, + consumer_group=topic_name, + format=format_name, + settings={ + "kafka_handle_error_mode": "stream", + "kafka_flush_interval_ms": 1000, + } + ) instance.query( - """ + f""" DROP TABLE IF EXISTS test.kafka_{format_name}; - CREATE TABLE test.kafka_{format_name} ( - id Int64, - blockNo UInt16, - val1 String, - val2 Float32, - val3 UInt8 - ) ENGINE = Kafka() - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{topic_name}', - kafka_group_name = '{topic_name}', - kafka_format = '{format_name}', - kafka_handle_error_mode = 'stream', - kafka_flush_interval_ms = 1000 {extra_settings}; + {create_query}; DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv; CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS @@ -3977,12 +4044,7 @@ def test_kafka_formats_with_broken_message(kafka_cluster): CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name} WHERE length(_error) > 0; - """.format( - topic_name=topic_name, - format_name=format_name, - raw_message=raw_message, - extra_settings=format_opts.get("extra_settings") or "", - ) + """ ) raw_expected = """\ @@ -4017,7 +4079,7 @@ def test_kafka_formats_with_broken_message(kafka_cluster): for format_name, format_opts in list(all_formats.items()): logging.debug(f"Checking {format_name}") - topic_name = f"{topic_name_prefix}{format_name}" + topic_name = f"{topic_name_prefix}{format_name}{topic_name_postfix}" # shift offsets by 1 if format supports empty value offsets = ( [1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2] @@ -4037,229 +4099,201 @@ def test_kafka_formats_with_broken_message(kafka_cluster): assert TSV(result) == TSV(expected), "Proper result for format: {}".format( format_name ) - errors_result = ast.literal_eval( + errors_result = json.loads( instance.query( "SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow".format( format_name=format_name ) ) ) - errors_expected = ast.literal_eval(format_opts["expected"]) # print(errors_result.strip()) # print(errors_expected.strip()) assert ( - errors_result["raw_message"] == errors_expected["raw_message"] + errors_result["raw_message"] == format_opts["expected"]["raw_message"] ), "Proper raw_message for format: {}".format(format_name) # Errors text can change, just checking prefixes assert ( - errors_expected["error"] in errors_result["error"] + format_opts["expected"]["error"] in errors_result["error"] ), "Proper error for format: {}".format(format_name) kafka_delete_topic(admin_client, topic_name) -def wait_for_new_data(table_name, prev_count=0, max_retries=120): - retries = 0 - while True: - new_count = int(instance.query("SELECT count() FROM {}".format(table_name))) - print(new_count) - if new_count > prev_count: - return new_count - else: - retries += 1 - time.sleep(0.5) - if retries > max_retries: - raise Exception("No new data :(") +@pytest.mark.parametrize('create_query_generator', [ + generate_old_create_table_query, + # generate_new_create_table_query TODO(antaljanosbenjamin): crashes CH +]) +def test_kafka_consumer_failover(kafka_cluster, create_query_generator): + topic_name = "kafka_consumer_failover" + get_topic_postfix(create_query_generator) + with kafka_topic(get_admin_client(kafka_cluster), topic_name, num_partitions=2): + consumer_group = f"{topic_name}_group" + create_queries = [] + for counter in range(3): + create_queries.append(create_query_generator( + f"kafka{counter+1}", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=consumer_group, + settings={ + "kafka_max_block_size": 1, + "kafka_poll_timeout_ms": 200, + } + )) -def test_kafka_consumer_failover(kafka_cluster): - # for backporting: - # admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092") - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) + instance.query( + f""" + {create_queries[0]}; + {create_queries[1]}; + {create_queries[2]}; - topic_name = "kafka_consumer_failover" - kafka_create_topic(admin_client, topic_name, num_partitions=2) + CREATE TABLE test.destination ( + key UInt64, + value UInt64, + _consumed_by LowCardinality(String) + ) + ENGINE = MergeTree() + ORDER BY key; - instance.query( - """ - DROP TABLE IF EXISTS test.kafka; - DROP TABLE IF EXISTS test.kafka2; + CREATE MATERIALIZED VIEW test.kafka1_mv TO test.destination AS + SELECT key, value, 'kafka1' as _consumed_by + FROM test.kafka1; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'kafka_consumer_failover', - kafka_group_name = 'kafka_consumer_failover_group', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 1, - kafka_poll_timeout_ms = 200; + CREATE MATERIALIZED VIEW test.kafka2_mv TO test.destination AS + SELECT key, value, 'kafka2' as _consumed_by + FROM test.kafka2; - CREATE TABLE test.kafka2 (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'kafka_consumer_failover', - kafka_group_name = 'kafka_consumer_failover_group', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 1, - kafka_poll_timeout_ms = 200; - - CREATE TABLE test.kafka3 (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'kafka_consumer_failover', - kafka_group_name = 'kafka_consumer_failover_group', - kafka_format = 'JSONEachRow', - kafka_max_block_size = 1, - kafka_poll_timeout_ms = 200; - - CREATE TABLE test.destination ( - key UInt64, - value UInt64, - _consumed_by LowCardinality(String) + CREATE MATERIALIZED VIEW test.kafka3_mv TO test.destination AS + SELECT key, value, 'kafka3' as _consumed_by + FROM test.kafka3; + """ ) - ENGINE = MergeTree() - ORDER BY key; - CREATE MATERIALIZED VIEW test.kafka_mv TO test.destination AS - SELECT key, value, 'kafka' as _consumed_by - FROM test.kafka; + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(cluster.kafka_port), + value_serializer=producer_serializer, + key_serializer=producer_serializer, + ) - CREATE MATERIALIZED VIEW test.kafka2_mv TO test.destination AS - SELECT key, value, 'kafka2' as _consumed_by - FROM test.kafka2; + ## all 3 attached, 2 working + producer.send( + topic=topic_name, + value=json.dumps({"key": 1, "value": 1}), + partition=0, + ) + producer.send( + topic=topic_name, + value=json.dumps({"key": 1, "value": 1}), + partition=1, + ) + producer.flush() - CREATE MATERIALIZED VIEW test.kafka3_mv TO test.destination AS - SELECT key, value, 'kafka3' as _consumed_by - FROM test.kafka3; - """ - ) + count_query = "SELECT count() FROM test.destination" + prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > 0) - producer = KafkaProducer( - bootstrap_servers="localhost:{}".format(cluster.kafka_port), - value_serializer=producer_serializer, - key_serializer=producer_serializer, - ) + ## 2 attached, 2 working + instance.query("DETACH TABLE test.kafka1") + producer.send( + topic=topic_name, + value=json.dumps({"key": 2, "value": 2}), + partition=0, + ) + producer.send( + topic=topic_name, + value=json.dumps({"key": 2, "value": 2}), + partition=1, + ) + producer.flush() + prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) - ## all 3 attached, 2 working - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 1, "value": 1}), - partition=0, - ) - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 1, "value": 1}), - partition=1, - ) - producer.flush() - prev_count = wait_for_new_data("test.destination") + ## 1 attached, 1 working + instance.query("DETACH TABLE test.kafka2") + producer.send( + topic=topic_name, + value=json.dumps({"key": 3, "value": 3}), + partition=0, + ) + producer.send( + topic=topic_name, + value=json.dumps({"key": 3, "value": 3}), + partition=1, + ) + producer.flush() + prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) - ## 2 attached, 2 working - instance.query("DETACH TABLE test.kafka") - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 2, "value": 2}), - partition=0, - ) - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 2, "value": 2}), - partition=1, - ) - producer.flush() - prev_count = wait_for_new_data("test.destination", prev_count) + ## 2 attached, 2 working + instance.query("ATTACH TABLE test.kafka1") + producer.send( + topic=topic_name, + value=json.dumps({"key": 4, "value": 4}), + partition=0, + ) + producer.send( + topic=topic_name, + value=json.dumps({"key": 4, "value": 4}), + partition=1, + ) + producer.flush() + prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) - ## 1 attached, 1 working - instance.query("DETACH TABLE test.kafka2") - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 3, "value": 3}), - partition=0, - ) - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 3, "value": 3}), - partition=1, - ) - producer.flush() - prev_count = wait_for_new_data("test.destination", prev_count) + ## 1 attached, 1 working + instance.query("DETACH TABLE test.kafka3") + producer.send( + topic=topic_name, + value=json.dumps({"key": 5, "value": 5}), + partition=0, + ) + producer.send( + topic=topic_name, + value=json.dumps({"key": 5, "value": 5}), + partition=1, + ) + producer.flush() + prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) - ## 2 attached, 2 working - instance.query("ATTACH TABLE test.kafka") - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 4, "value": 4}), - partition=0, - ) - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 4, "value": 4}), - partition=1, - ) - producer.flush() - prev_count = wait_for_new_data("test.destination", prev_count) + ## 2 attached, 2 working + instance.query("ATTACH TABLE test.kafka2") + producer.send( + topic=topic_name, + value=json.dumps({"key": 6, "value": 6}), + partition=0, + ) + producer.send( + topic=topic_name, + value=json.dumps({"key": 6, "value": 6}), + partition=1, + ) + producer.flush() + prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) - ## 1 attached, 1 working - instance.query("DETACH TABLE test.kafka3") - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 5, "value": 5}), - partition=0, - ) - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 5, "value": 5}), - partition=1, - ) - producer.flush() - prev_count = wait_for_new_data("test.destination", prev_count) + ## 3 attached, 2 working + instance.query("ATTACH TABLE test.kafka3") + producer.send( + topic=topic_name, + value=json.dumps({"key": 7, "value": 7}), + partition=0, + ) + producer.send( + topic=topic_name, + value=json.dumps({"key": 7, "value": 7}), + partition=1, + ) + producer.flush() + prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) - ## 2 attached, 2 working - instance.query("ATTACH TABLE test.kafka2") - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 6, "value": 6}), - partition=0, - ) - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 6, "value": 6}), - partition=1, - ) - producer.flush() - prev_count = wait_for_new_data("test.destination", prev_count) - - ## 3 attached, 2 working - instance.query("ATTACH TABLE test.kafka3") - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 7, "value": 7}), - partition=0, - ) - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 7, "value": 7}), - partition=1, - ) - producer.flush() - prev_count = wait_for_new_data("test.destination", prev_count) - - ## 2 attached, same 2 working - instance.query("DETACH TABLE test.kafka3") - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 8, "value": 8}), - partition=0, - ) - producer.send( - topic="kafka_consumer_failover", - value=json.dumps({"key": 8, "value": 8}), - partition=1, - ) - producer.flush() - prev_count = wait_for_new_data("test.destination", prev_count) - kafka_delete_topic(admin_client, topic_name) + ## 2 attached, same 2 working + instance.query("DETACH TABLE test.kafka3") + producer.send( + topic=topic_name, + value=json.dumps({"key": 8, "value": 8}), + partition=0, + ) + producer.send( + topic=topic_name, + value=json.dumps({"key": 8, "value": 8}), + partition=1, + ) + producer.flush() + prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) def test_kafka_predefined_configuration(kafka_cluster): @@ -4289,269 +4323,238 @@ def test_kafka_predefined_configuration(kafka_cluster): # https://github.com/ClickHouse/ClickHouse/issues/26643 -def test_issue26643(kafka_cluster): - # for backporting: - # admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092") - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_issue26643(kafka_cluster, create_query_generator): producer = KafkaProducer( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer, ) + topic_name = "test_issue26643" + get_topic_postfix(create_query_generator) + thread_per_consumer = must_use_thread_per_consumer(create_query_generator) - topic_list = [] - topic_list.append( - NewTopic(name="test_issue26643", num_partitions=4, replication_factor=1) - ) - admin_client.create_topics(new_topics=topic_list, validate_only=False) - - msg = message_with_repeated_pb2.Message( - tnow=1629000000, - server="server1", - clien="host1", - sPort=443, - cPort=50000, - r=[ - message_with_repeated_pb2.dd( - name="1", type=444, ttl=123123, data=b"adsfasd" - ), - message_with_repeated_pb2.dd(name="2"), - ], - method="GET", - ) - - data = b"" - serialized_msg = msg.SerializeToString() - data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - - msg = message_with_repeated_pb2.Message(tnow=1629000002) - - serialized_msg = msg.SerializeToString() - data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - - producer.send(topic="test_issue26643", value=data) - - data = _VarintBytes(len(serialized_msg)) + serialized_msg - producer.send(topic="test_issue26643", value=data) - producer.flush() - - instance.query( - """ - CREATE TABLE IF NOT EXISTS test.test_queue - ( - `tnow` UInt32, - `server` String, - `client` String, - `sPort` UInt16, - `cPort` UInt16, - `r.name` Array(String), - `r.class` Array(UInt16), - `r.type` Array(UInt16), - `r.ttl` Array(UInt32), - `r.data` Array(String), - `method` String + with kafka_topic(get_admin_client(kafka_cluster), topic_name): + msg = message_with_repeated_pb2.Message( + tnow=1629000000, + server="server1", + clien="host1", + sPort=443, + cPort=50000, + r=[ + message_with_repeated_pb2.dd( + name="1", type=444, ttl=123123, data=b"adsfasd" + ), + message_with_repeated_pb2.dd(name="2"), + ], + method="GET", ) - ENGINE = Kafka - SETTINGS - kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'test_issue26643', - kafka_group_name = 'test_issue26643_group', - kafka_format = 'Protobuf', - kafka_schema = 'message_with_repeated.proto:Message', - kafka_num_consumers = 4, - kafka_skip_broken_messages = 10000; - SET allow_suspicious_low_cardinality_types=1; + data = b"" + serialized_msg = msg.SerializeToString() + data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - CREATE TABLE test.log - ( - `tnow` DateTime('Asia/Istanbul') CODEC(DoubleDelta, LZ4), - `server` LowCardinality(String), - `client` LowCardinality(String), - `sPort` LowCardinality(UInt16), - `cPort` UInt16 CODEC(T64, LZ4), - `r.name` Array(String), - `r.class` Array(LowCardinality(UInt16)), - `r.type` Array(LowCardinality(UInt16)), - `r.ttl` Array(LowCardinality(UInt32)), - `r.data` Array(String), - `method` LowCardinality(String) + msg = message_with_repeated_pb2.Message(tnow=1629000002) + + serialized_msg = msg.SerializeToString() + data = data + _VarintBytes(len(serialized_msg)) + serialized_msg + + producer.send(topic_name, value=data) + + data = _VarintBytes(len(serialized_msg)) + serialized_msg + producer.send(topic_name, value=data) + producer.flush() + + create_query = create_query_generator( + "test_queue", + """`tnow` UInt32, + `server` String, + `client` String, + `sPort` UInt16, + `cPort` UInt16, + `r.name` Array(String), + `r.class` Array(UInt16), + `r.type` Array(UInt16), + `r.ttl` Array(UInt32), + `r.data` Array(String), + `method` String""", + topic_list=topic_name, + consumer_group=f"{topic_name}_group", + format="Protobuf", + settings={ + "kafka_schema": "message_with_repeated.proto:Message", + "kafka_skip_broken_messages": 10000, + "kafka_thread_per_consumer": thread_per_consumer, + } ) - ENGINE = MergeTree - PARTITION BY toYYYYMMDD(tnow) - ORDER BY (tnow, server) - TTL toDate(tnow) + toIntervalMonth(1000) - SETTINGS index_granularity = 16384, merge_with_ttl_timeout = 7200; - CREATE MATERIALIZED VIEW test.test_consumer TO test.log AS - SELECT - toDateTime(a.tnow) AS tnow, - a.server AS server, - a.client AS client, - a.sPort AS sPort, - a.cPort AS cPort, - a.`r.name` AS `r.name`, - a.`r.class` AS `r.class`, - a.`r.type` AS `r.type`, - a.`r.ttl` AS `r.ttl`, - a.`r.data` AS `r.data`, - a.method AS method - FROM test.test_queue AS a; - """ - ) + instance.query( + f""" + {create_query}; - instance.wait_for_log_line("Committed offset") - result = instance.query("SELECT * FROM test.log") + SET allow_suspicious_low_cardinality_types=1; - expected = """\ -2021-08-15 07:00:00 server1 443 50000 ['1','2'] [0,0] [444,0] [123123,0] ['adsfasd',''] GET -2021-08-15 07:00:02 0 0 [] [] [] [] [] -2021-08-15 07:00:02 0 0 [] [] [] [] [] -""" - assert TSV(result) == TSV(expected) + CREATE TABLE test.log + ( + `tnow` DateTime('Asia/Istanbul') CODEC(DoubleDelta, LZ4), + `server` LowCardinality(String), + `client` LowCardinality(String), + `sPort` LowCardinality(UInt16), + `cPort` UInt16 CODEC(T64, LZ4), + `r.name` Array(String), + `r.class` Array(LowCardinality(UInt16)), + `r.type` Array(LowCardinality(UInt16)), + `r.ttl` Array(LowCardinality(UInt32)), + `r.data` Array(String), + `method` LowCardinality(String) + ) + ENGINE = MergeTree + PARTITION BY toYYYYMMDD(tnow) + ORDER BY (tnow, server) + TTL toDate(tnow) + toIntervalMonth(1000) + SETTINGS index_granularity = 16384, merge_with_ttl_timeout = 7200; - # kafka_cluster.open_bash_shell('instance') + CREATE MATERIALIZED VIEW test.test_consumer TO test.log AS + SELECT + toDateTime(a.tnow) AS tnow, + a.server AS server, + a.client AS client, + a.sPort AS sPort, + a.cPort AS cPort, + a.`r.name` AS `r.name`, + a.`r.class` AS `r.class`, + a.`r.type` AS `r.type`, + a.`r.ttl` AS `r.ttl`, + a.`r.data` AS `r.data`, + a.method AS method + FROM test.test_queue AS a; + """ + ) + + instance.wait_for_log_line("Committed offset") + result = instance.query("SELECT * FROM test.log") + + expected = """\ + 2021-08-15 07:00:00 server1 443 50000 ['1','2'] [0,0] [444,0] [123123,0] ['adsfasd',''] GET + 2021-08-15 07:00:02 0 0 [] [] [] [] [] + 2021-08-15 07:00:02 0 0 [] [] [] [] [] + """ + assert TSV(result) == TSV(expected) -def test_num_consumers_limit(kafka_cluster): + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_num_consumers_limit(kafka_cluster, create_query_generator): instance.query("DROP TABLE IF EXISTS test.kafka") - error = instance.query_and_get_error( - """ - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n', '', 100) - SETTINGS kafka_commit_on_select = 1; - """ - ) + thread_per_consumer = must_use_thread_per_consumer(create_query_generator) - assert "BAD_ARGUMENTS" in error + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + settings={ + "kafka_num_consumers": 100, + "kafka_thread_per_consumer": thread_per_consumer + } + ) + error = instance.query_and_get_error(create_query) + + assert "BAD_ARGUMENTS" in error and "The number of consumers can not be bigger than" in error instance.query( - """ + f""" SET kafka_disable_num_consumers_limit = 1; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n', '', 100) - SETTINGS kafka_commit_on_select = 1; + {create_query}; """ ) instance.query("DROP TABLE test.kafka") -def test_format_with_prefix_and_suffix(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_format_with_prefix_and_suffix(kafka_cluster, create_query_generator): + topic_name = "custom" + get_topic_postfix(create_query_generator) - kafka_create_topic(admin_client, "custom") + with kafka_topic(get_admin_client(kafka_cluster), topic_name): + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format="CustomSeparated" + ) + instance.query( + f""" + DROP TABLE IF EXISTS test.kafka; + {create_query}; + """ + ) - instance.query( + instance.query( + "INSERT INTO test.kafka select number*10 as key, number*100 as value from numbers(2) settings format_custom_result_before_delimiter='\n', format_custom_result_after_delimiter='\n'" + ) + + message_count = 2 + messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count) + + assert len(messages) == 2 + + assert ( + "".join(messages) == "\n0\t0\n\n\n10\t100\n\n" + ) + + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_max_rows_per_message(kafka_cluster, create_query_generator): + topic_name = "custom_max_rows_per_message" + get_topic_postfix(create_query_generator) + + with kafka_topic(get_admin_client(kafka_cluster), topic_name): + num_rows = 5 + + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format="CustomSeparated", + settings={ + "format_custom_result_before_delimiter": "\n", + "format_custom_result_after_delimiter": "\n", + "kafka_max_rows_per_message": 3, + } + ) + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.kafka; + {create_query}; + + CREATE MATERIALIZED VIEW test.view Engine=Log AS + SELECT key, value FROM test.kafka; """ - DROP TABLE IF EXISTS test.kafka; + ) - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'custom', - kafka_group_name = 'custom', - kafka_format = 'CustomSeparated'; - """ - ) + instance.query( + f"INSERT INTO test.kafka select number*10 as key, number*100 as value from numbers({num_rows}) settings format_custom_result_before_delimiter='\n', format_custom_result_after_delimiter='\n'" + ) - instance.query( - "INSERT INTO test.kafka select number*10 as key, number*100 as value from numbers(2) settings format_custom_result_before_delimiter='\n', format_custom_result_after_delimiter='\n'" - ) + message_count = 2 + messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count) - messages = [] + assert len(messages) == message_count - attempt = 0 - while attempt < 100: - messages.extend(kafka_consume(kafka_cluster, "custom")) - if len(messages) == 2: - break - attempt += 1 + assert ( + "".join(messages) + == "\n0\t0\n10\t100\n20\t200\n\n\n30\t300\n40\t400\n\n" + ) - assert len(messages) == 2 + instance.query_with_retry("SELECT count() FROM test.view", check_callback=lambda res: int(res) == num_rows) - assert ( - "".join(messages) == "\n0\t0\n\n\n10\t100\n\n" - ) - - kafka_delete_topic(admin_client, "custom") + result = instance.query("SELECT * FROM test.view") + assert result == "0\t0\n10\t100\n20\t200\n30\t300\n40\t400\n" -def test_max_rows_per_message(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - - topic = "custom_max_rows_per_message" - - kafka_create_topic(admin_client, topic) - - num_rows = 5 - - instance.query( - f""" - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.kafka; - - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{topic}', - kafka_group_name = '{topic}', - kafka_format = 'CustomSeparated', - format_custom_result_before_delimiter = '\n', - format_custom_result_after_delimiter = '\n', - kafka_max_rows_per_message = 3; - - CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT key, value FROM test.kafka; - """ - ) - - instance.query( - f"INSERT INTO test.kafka select number*10 as key, number*100 as value from numbers({num_rows}) settings format_custom_result_before_delimiter='\n', format_custom_result_after_delimiter='\n'" - ) - - messages = [] - - attempt = 0 - while attempt < 500: - messages.extend(kafka_consume(kafka_cluster, topic)) - if len(messages) == 2: - break - attempt += 1 - - assert len(messages) == 2 - - assert ( - "".join(messages) - == "\n0\t0\n10\t100\n20\t200\n\n\n30\t300\n40\t400\n\n" - ) - - attempt = 0 - rows = 0 - while attempt < 500: - rows = int(instance.query("SELECT count() FROM test.view")) - if rows == num_rows: - break - attempt += 1 - - assert rows == num_rows - - result = instance.query("SELECT * FROM test.view") - assert result == "0\t0\n10\t100\n20\t200\n30\t300\n40\t400\n" - - kafka_delete_topic(admin_client, topic) - - -def test_row_based_formats(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_row_based_formats(kafka_cluster, create_query_generator): + admin_client = get_admin_client(kafka_cluster) for format_name in [ "TSV", @@ -4571,121 +4574,99 @@ def test_row_based_formats(kafka_cluster): "RowBinaryWithNamesAndTypes", "MsgPack", ]: - print(format_name) + logging.debug("Checking {format_name}") - kafka_create_topic(admin_client, format_name) + topic_name = format_name + get_topic_postfix(create_query_generator) - num_rows = 10 + with kafka_topic(admin_client, topic_name): + num_rows = 10 + max_rows_per_message = 5 + message_count = num_rows / max_rows_per_message + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format=format_name, + settings={"kafka_max_rows_per_message": max_rows_per_message} + ) + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.kafka; + + {create_query}; + + CREATE MATERIALIZED VIEW test.view Engine=Log AS + SELECT key, value FROM test.kafka; + + INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}); + """ + ) + + messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count, need_decode=False) + + assert len(messages) == message_count + + instance.query_with_retry("SELECT count() FROM test.view", check_callback=lambda res: int(res) == num_rows) + + result = instance.query("SELECT * FROM test.view") + expected = "" + for i in range(num_rows): + expected += str(i * 10) + "\t" + str(i * 100) + "\n" + assert result == expected + + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_block_based_formats_1(kafka_cluster, create_query_generator): + topic_name = "pretty_space" + get_topic_postfix(create_query_generator) + + with kafka_topic(get_admin_client(kafka_cluster), topic_name): + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format="PrettySpace" + ) instance.query( f""" - DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.kafka; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{format_name}', - kafka_group_name = '{format_name}', - kafka_format = '{format_name}', - kafka_max_rows_per_message = 5; + {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT key, value FROM test.kafka; - - INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}); + INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers(5) settings max_block_size=2, optimize_trivial_insert_select=0, output_format_pretty_color=1, output_format_pretty_row_numbers=0; """ ) - messages = [] + message_count = 3 + messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count) + assert len(messages) == 3 - attempt = 0 - while attempt < 500: - messages.extend(kafka_consume(kafka_cluster, format_name, needDecode=False)) - if len(messages) == 2: - break - attempt += 1 + data = [] + for message in messages: + splitted = message.split("\n") + assert splitted[0] == " \x1b[1mkey\x1b[0m \x1b[1mvalue\x1b[0m" + assert splitted[1] == "" + assert splitted[-1] == "" + data += [line.split() for line in splitted[2:-1]] - assert len(messages) == 2 - - attempt = 0 - rows = 0 - while attempt < 500: - rows = int(instance.query("SELECT count() FROM test.view")) - if rows == num_rows: - break - attempt += 1 - - assert rows == num_rows - - result = instance.query("SELECT * FROM test.view") - expected = "" - for i in range(num_rows): - expected += str(i * 10) + "\t" + str(i * 100) + "\n" - assert result == expected - - kafka_delete_topic(admin_client, format_name) + assert data == [ + ["0", "0"], + ["10", "100"], + ["20", "200"], + ["30", "300"], + ["40", "400"], + ] -def test_block_based_formats_1(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - - topic = "pretty_space" - kafka_create_topic(admin_client, topic) - - instance.query( - f""" - DROP TABLE IF EXISTS test.kafka; - - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{topic}', - kafka_group_name = '{topic}', - kafka_format = 'PrettySpace'; - - INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers(5) settings max_block_size=2, optimize_trivial_insert_select=0, output_format_pretty_color=1, output_format_pretty_row_numbers=0; - """ - ) - - messages = [] - - attempt = 0 - while attempt < 500: - messages.extend(kafka_consume(kafka_cluster, topic)) - if len(messages) == 3: - break - attempt += 1 - - assert len(messages) == 3 - - data = [] - for message in messages: - splitted = message.split("\n") - assert splitted[0] == " \x1b[1mkey\x1b[0m \x1b[1mvalue\x1b[0m" - assert splitted[1] == "" - assert splitted[-1] == "" - data += [line.split() for line in splitted[2:-1]] - - assert data == [ - ["0", "0"], - ["10", "100"], - ["20", "200"], - ["30", "300"], - ["40", "400"], - ] - - kafka_delete_topic(admin_client, topic) - - -def test_block_based_formats_2(kafka_cluster): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_block_based_formats_2(kafka_cluster, create_query_generator): + admin_client = get_admin_client(kafka_cluster) num_rows = 100 + message_count = 9 for format_name in [ "JSONColumns", @@ -4695,55 +4676,41 @@ def test_block_based_formats_2(kafka_cluster): "ORC", "JSONCompactColumns", ]: - kafka_create_topic(admin_client, format_name) + topic_name = format_name + get_topic_postfix(create_query_generator) + with kafka_topic(admin_client, topic_name): + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + format=format_name + ) - instance.query( - f""" - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.kafka; + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.kafka; - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{format_name}', - kafka_group_name = '{format_name}', - kafka_format = '{format_name}'; + {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT key, value FROM test.kafka; + CREATE MATERIALIZED VIEW test.view Engine=Log AS + SELECT key, value FROM test.kafka; - INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}) settings max_block_size=12, optimize_trivial_insert_select=0; - """ - ) + INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}) settings max_block_size=12, optimize_trivial_insert_select=0; + """ + ) + messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count, need_decode=False) + assert len(messages) == message_count - messages = [] + rows = int(instance.query_with_retry("SELECT count() FROM test.view", check_callback=lambda res: int(res) == num_rows)) - attempt = 0 - while attempt < 500: - messages.extend(kafka_consume(kafka_cluster, format_name, needDecode=False)) - if len(messages) == 9: - break - attempt += 1 + assert rows == num_rows - assert len(messages) == 9 - - attempt = 0 - rows = 0 - while attempt < 500: - rows = int(instance.query("SELECT count() FROM test.view")) - if rows == num_rows: - break - attempt += 1 - - assert rows == num_rows - - result = instance.query("SELECT * FROM test.view ORDER by key") - expected = "" - for i in range(num_rows): - expected += str(i * 10) + "\t" + str(i * 100) + "\n" - assert result == expected - - kafka_delete_topic(admin_client, format_name) + result = instance.query("SELECT * FROM test.view ORDER by key") + expected = "" + for i in range(num_rows): + expected += str(i * 10) + "\t" + str(i * 100) + "\n" + assert result == expected def test_system_kafka_consumers(kafka_cluster): @@ -5081,137 +5048,124 @@ def test_formats_errors(kafka_cluster): "HiveText", "MySQLDump", ]: - kafka_create_topic(admin_client, format_name) - table_name = f"kafka_{format_name}" + with kafka_topic(admin_client, format_name): + table_name = f"kafka_{format_name}" + + instance.query( + f""" + DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.{table_name}; + + CREATE TABLE test.{table_name} (key UInt64, value UInt64) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{format_name}', + kafka_group_name = '{format_name}', + kafka_format = '{format_name}', + kafka_max_rows_per_message = 5, + format_template_row='template_row.format', + format_regexp='id: (.+?)', + input_format_with_names_use_header=0, + format_schema='key_value_message:Message'; + + CREATE MATERIALIZED VIEW test.view Engine=Log AS + SELECT key, value FROM test.{table_name}; + """ + ) + + kafka_produce( + kafka_cluster, + format_name, + ["Broken message\nBroken message\nBroken message\n"], + ) + + num_errors = int( + instance.query_with_retry( + f"SELECT length(exceptions.text) from system.kafka_consumers where database = 'test' and table = '{table_name}'", + check_callback=lambda res: int(res) > 0 + ) + ) + + assert num_errors > 0 + + instance.query(f"DROP TABLE test.{table_name}") + instance.query("DROP TABLE test.view") + + +@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +def test_multiple_read_in_materialized_views(kafka_cluster, create_query_generator): + topic_name = "multiple_read_from_mv" + get_topic_postfix(create_query_generator) + + with kafka_topic(get_admin_client(kafka_cluster), topic_name): + create_query = create_query_generator( + "kafka_multiple_read_input", + "id Int64", + topic_list=topic_name, + consumer_group=topic_name, + ) instance.query( f""" - DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.{table_name}; + DROP TABLE IF EXISTS test.kafka_multiple_read_input; + DROP TABLE IF EXISTS test.kafka_multiple_read_table; + DROP TABLE IF EXISTS test.kafka_multiple_read_mv; - CREATE TABLE test.{table_name} (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{format_name}', - kafka_group_name = '{format_name}', - kafka_format = '{format_name}', - kafka_max_rows_per_message = 5, - format_template_row='template_row.format', - format_regexp='id: (.+?)', - input_format_with_names_use_header=0, - format_schema='key_value_message:Message'; + {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT key, value FROM test.{table_name}; - """ + CREATE TABLE test.kafka_multiple_read_table (id Int64) + ENGINE = MergeTree + ORDER BY id; + + + CREATE MATERIALIZED VIEW test.kafka_multiple_read_mv TO test.kafka_multiple_read_table AS + SELECT id + FROM test.kafka_multiple_read_input + WHERE id NOT IN ( + SELECT id + FROM test.kafka_multiple_read_table + WHERE id IN ( + SELECT id + FROM test.kafka_multiple_read_input + ) + ); + """ ) kafka_produce( - kafka_cluster, - format_name, - ["Broken message\nBroken message\nBroken message\n"], + kafka_cluster, topic_name, [json.dumps({"id": 42}), json.dumps({"id": 43})] ) - attempt = 0 - num_errors = 0 - while attempt < 200: - num_errors = int( - instance.query( - f"SELECT length(exceptions.text) from system.kafka_consumers where database = 'test' and table = '{table_name}'" - ) - ) - if num_errors > 0: - break - attempt += 1 + expected_result = "42\n43\n" + res = instance.query_with_retry( + f"SELECT id FROM test.kafka_multiple_read_table ORDER BY id", + check_callback=lambda res: res == expected_result, + ) + assert res == expected_result - assert num_errors > 0 + # Verify that the query deduplicates the records as it meant to be + messages = [] + for _ in range(0, 10): + messages.append(json.dumps({"id": 42})) + messages.append(json.dumps({"id": 43})) - kafka_delete_topic(admin_client, format_name) - instance.query(f"DROP TABLE test.{table_name}") - instance.query("DROP TABLE test.view") + messages.append(json.dumps({"id": 44})) + kafka_produce(kafka_cluster, topic_name, messages) -def test_multiple_read_in_materialized_views(kafka_cluster, max_retries=15): - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) + expected_result = "42\n43\n44\n" + res = instance.query_with_retry( + f"SELECT id FROM test.kafka_multiple_read_table ORDER BY id", + check_callback=lambda res: res == expected_result, + ) + assert res == expected_result - topic = "multiple_read_from_mv" - kafka_create_topic(admin_client, topic) - - instance.query( - f""" - DROP TABLE IF EXISTS test.kafka_multiple_read_input; - DROP TABLE IF EXISTS test.kafka_multiple_read_table; - DROP TABLE IF EXISTS test.kafka_multiple_read_mv; - - CREATE TABLE test.kafka_multiple_read_input (id Int64) - ENGINE = Kafka - SETTINGS - kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{topic}', - kafka_group_name = '{topic}', - kafka_format = 'JSONEachRow'; - - CREATE TABLE test.kafka_multiple_read_table (id Int64) - ENGINE = MergeTree - ORDER BY id; - - - CREATE MATERIALIZED VIEW IF NOT EXISTS test.kafka_multiple_read_mv TO test.kafka_multiple_read_table AS - SELECT id - FROM test.kafka_multiple_read_input - WHERE id NOT IN ( - SELECT id - FROM test.kafka_multiple_read_table - WHERE id IN ( - SELECT id - FROM test.kafka_multiple_read_input - ) - ); - """ - ) - - kafka_produce( - kafka_cluster, topic, [json.dumps({"id": 42}), json.dumps({"id": 43})] - ) - - expected_result = "42\n43\n" - res = instance.query_with_retry( - f"SELECT id FROM test.kafka_multiple_read_table ORDER BY id", - retry_count=30, - sleep_time=0.5, - check_callback=lambda res: res == expected_result, - ) - assert res == expected_result - - # Verify that the query deduplicates the records as it meant to be - messages = [] - for i in range(0, 10): - messages.append(json.dumps({"id": 42})) - messages.append(json.dumps({"id": 43})) - - messages.append(json.dumps({"id": 44})) - - kafka_produce(kafka_cluster, topic, messages) - - expected_result = "42\n43\n44\n" - res = instance.query_with_retry( - f"SELECT id FROM test.kafka_multiple_read_table ORDER BY id", - retry_count=30, - sleep_time=0.5, - check_callback=lambda res: res == expected_result, - ) - assert res == expected_result - - kafka_delete_topic(admin_client, topic) - instance.query( - f""" - DROP TABLE test.kafka_multiple_read_input; - DROP TABLE test.kafka_multiple_read_table; - DROP TABLE test.kafka_multiple_read_mv; - """ - ) + instance.query( + f""" + DROP TABLE test.kafka_multiple_read_input; + DROP TABLE test.kafka_multiple_read_table; + DROP TABLE test.kafka_multiple_read_mv; + """ + ) if __name__ == "__main__": From 64af6ec596747d4c20b04d54502b047d3a1d1b3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 7 Jun 2024 22:44:38 +0000 Subject: [PATCH 0098/2361] Clean up some TODOs --- src/Storages/Kafka/KafkaConsumer2.cpp | 1 - src/Storages/Kafka/StorageKafka2.cpp | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 9ed698301e5..1320b939612 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -304,7 +304,6 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition, co return next_message; } - // TODO(antaljanosbenjamin): check if we should poll new messages or not while (true) { stalled_status = StalledStatus::NO_MESSAGES_RETURNED; diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index fa5389d606a..911de671fb7 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -815,7 +815,7 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi if (code != Coordination::Error::ZNODEEXISTS) zkutil::KeeperMultiException::check(code, ops, responses); - // TODO(antaljanosbenjamin): maybe check the content, if we have the locks, we can continue with them + // Possible optimization: check the content of logfiles, if we locked them, then we can clean them up and retry to lock them. return std::nullopt; } @@ -1022,7 +1022,6 @@ StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( total_rows = total_rows + new_rows; batch_info.last_offset = consumer.currentOffset(); } - // TODO(antaljanosbenjamin): think about this when rebalance is happening, because `isStalled()` will return true else if (consumer.isStalled()) { ++failed_poll_attempts; @@ -1168,7 +1167,6 @@ bool StorageKafka2::streamToViews(size_t idx) if (current_assignment == nullptr) { // The consumer lost its assignment and haven't received a new one. - // TODO(antaljanosbenjamin): returning a proper value representing the state // By returning true this function reports the current consumer as a "stalled" stream, which return true; } From 26851f1d348034b76f6a849796c4020b7d120735 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 7 Jun 2024 22:56:26 +0000 Subject: [PATCH 0099/2361] Automatic style fix --- tests/integration/test_storage_kafka/test.py | 754 +++++++++++++------ 1 file changed, 511 insertions(+), 243 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 5b7d7f65b9f..ad4e0a0877f 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -155,9 +155,16 @@ def kafka_topic( num_partitions=1, replication_factor=1, max_retries=50, - config=None + config=None, ): - kafka_create_topic(admin_client, topic_name, num_partitions, replication_factor, max_retries, config) + kafka_create_topic( + admin_client, + topic_name, + num_partitions, + replication_factor, + max_retries, + config, + ) try: yield None finally: @@ -174,7 +181,10 @@ def existing_kafka_topic(admin_client, topic_name, max_retries=50): def get_admin_client(kafka_cluster): - return KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + return KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15): logging.debug( @@ -230,13 +240,24 @@ def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messa logging.debug(("Produced {} messages for topic {}".format(num_messages, topic))) - -def kafka_consume_with_retry(kafka_cluster, topic, expected_messages, need_decode=True, timestamp=0, retry_count=20, sleep_time=0.1): +def kafka_consume_with_retry( + kafka_cluster, + topic, + expected_messages, + need_decode=True, + timestamp=0, + retry_count=20, + sleep_time=0.1, +): messages = [] try_count = 0 while try_count < retry_count: try_count += 1 - messages.extend(kafka_consume(kafka_cluster, topic, need_decode=need_decode, timestamp=timestamp)) + messages.extend( + kafka_consume( + kafka_cluster, topic, need_decode=need_decode, timestamp=timestamp + ) + ) if len(messages) == expected_messages: break time.sleep(sleep_time) @@ -244,6 +265,7 @@ def kafka_consume_with_retry(kafka_cluster, topic, expected_messages, need_decod raise Exception(f"Got only {len(messages)} messages") return messages + def kafka_produce_protobuf_messages_no_delimiters( kafka_cluster, topic, start_index, num_messages ): @@ -331,6 +353,7 @@ def avro_confluent_message(schema_registry_client, value): ) return serializer.encode_record_with_schema("test_subject", schema, value) + def create_settings_string(settings): if settings is None: return "" @@ -349,41 +372,43 @@ def create_settings_string(settings): for key in keys: if key == first_key: continue - settings_string +=", " + str(key) + " = " + format_value(settings[key]) + settings_string += ", " + str(key) + " = " + format_value(settings[key]) return settings_string def generate_old_create_table_query( - table_name, - columns_def, - database="test", - brokers="{kafka_broker}:19092", - topic_list="{kafka_topic_new}", - consumer_group="{kafka_group_name_new}", - format="{kafka_format_json_each_row}", - row_delimiter= "\\n", - keeper_path=None, # it is not used, but it is easier to handle keeper_path and replica_name like this - replica_name=None, - settings=None): - - settings_string=create_settings_string(settings) + table_name, + columns_def, + database="test", + brokers="{kafka_broker}:19092", + topic_list="{kafka_topic_new}", + consumer_group="{kafka_group_name_new}", + format="{kafka_format_json_each_row}", + row_delimiter="\\n", + keeper_path=None, # it is not used, but it is easier to handle keeper_path and replica_name like this + replica_name=None, + settings=None, +): + settings_string = create_settings_string(settings) query = f"""CREATE TABLE {database}.{table_name} ({columns_def}) ENGINE = Kafka('{brokers}', '{topic_list}', '{consumer_group}', '{format}', '{row_delimiter}') {settings_string}""" logging.debug(f"Generated old create query: {query}") return query + def generate_new_create_table_query( - table_name, - columns_def, - database="test", - brokers="{kafka_broker}:19092", - topic_list="{kafka_topic_new}", - consumer_group="{kafka_group_name_new}", - format="{kafka_format_json_each_row}", - row_delimiter= "\\n", - keeper_path=None, - replica_name=None, - settings=None): + table_name, + columns_def, + database="test", + brokers="{kafka_broker}:19092", + topic_list="{kafka_topic_new}", + consumer_group="{kafka_group_name_new}", + format="{kafka_format_json_each_row}", + row_delimiter="\\n", + keeper_path=None, + replica_name=None, + settings=None, +): if settings is None: settings = {} if keeper_path is None: @@ -392,13 +417,14 @@ def generate_new_create_table_query( replica_name = "r1" settings["kafka_keeper_path"] = keeper_path settings["kafka_replica_name"] = replica_name - settings_string=create_settings_string(settings) + settings_string = create_settings_string(settings) query = f"""CREATE TABLE {database}.{table_name} ({columns_def}) ENGINE = Kafka('{brokers}', '{topic_list}', '{consumer_group}', '{format}', '{row_delimiter}') {settings_string} SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1""" logging.debug(f"Generated new create query: {query}") return query + def must_use_thread_per_consumer(generator): if generator == generate_old_create_table_query: return False @@ -414,8 +440,12 @@ def get_topic_postfix(generator): return "new" raise Exception("Unexpected generator") + # Tests -@pytest.mark.parametrize('create_query_generator, do_direct_read', [(generate_old_create_table_query, True), (generate_new_create_table_query, False)]) +@pytest.mark.parametrize( + "create_query_generator, do_direct_read", + [(generate_old_create_table_query, True), (generate_new_create_table_query, False)], +) def test_kafka_column_types(kafka_cluster, create_query_generator, do_direct_read): def assert_returned_exception(e): assert e.value.returncode == 36 @@ -426,12 +456,12 @@ def test_kafka_column_types(kafka_cluster, create_query_generator, do_direct_rea # check column with DEFAULT expression with pytest.raises(QueryRuntimeException) as exception: - instance.query(create_query_generator('kafka', 'a Int, b Int DEFAULT 0')) + instance.query(create_query_generator("kafka", "a Int, b Int DEFAULT 0")) assert_returned_exception(exception) # check EPHEMERAL with pytest.raises(QueryRuntimeException) as exception: - instance.query(create_query_generator('kafka', 'a Int, b Int EPHEMERAL')) + instance.query(create_query_generator("kafka", "a Int, b Int EPHEMERAL")) assert_returned_exception(exception) # check MATERIALIZED @@ -446,7 +476,13 @@ def test_kafka_column_types(kafka_cluster, create_query_generator, do_direct_rea if do_direct_read: # check ALIAS - instance.query(create_query_generator("kafka", "a Int, b String Alias toString(a)", settings={"kafka_commit_on_select": True})) + instance.query( + create_query_generator( + "kafka", + "a Int, b String Alias toString(a)", + settings={"kafka_commit_on_select": True}, + ) + ) messages = [] for i in range(5): messages.append(json.dumps({"a": i})) @@ -634,9 +670,15 @@ def test_kafka_json_as_string(kafka_cluster): "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows" ) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) + +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_formats(kafka_cluster, create_query_generator): - schema_registry_client = CachedSchemaRegistryClient({"url":f"http://localhost:{kafka_cluster.schema_registry_port}"}) + schema_registry_client = CachedSchemaRegistryClient( + {"url": f"http://localhost:{kafka_cluster.schema_registry_port}"} + ) # data was dumped from clickhouse itself in a following manner # clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g' @@ -759,7 +801,7 @@ def test_kafka_formats(kafka_cluster, create_query_generator): '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', ], - "extra_settings": {"format_template_row":"template_row.format"}, + "extra_settings": {"format_template_row": "template_row.format"}, }, "Regexp": { "data_sample": [ @@ -770,7 +812,10 @@ def test_kafka_formats(kafka_cluster, create_query_generator): # On empty message exception happens: Line "" doesn't match the regexp.: (at row 1) # /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse ], - "extra_settings": {"format_regexp":r"\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)", "format_regexp_escaping_rule": "Escaped"}, + "extra_settings": { + "format_regexp": r"\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)", + "format_regexp_escaping_rule": "Escaped", + }, }, ## BINARY FORMATS # dumped with @@ -842,7 +887,7 @@ def test_kafka_formats(kafka_cluster, create_query_generator): # /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse # /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse ], - "extra_settings": {"kafka_schema":"test:TestMessage"}, + "extra_settings": {"kafka_schema": "test:TestMessage"}, }, "ORC": { "data_sample": [ @@ -866,7 +911,7 @@ def test_kafka_formats(kafka_cluster, create_query_generator): # /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse # /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse ], - "extra_settings": {"kafka_schema":"test:TestRecordStruct"}, + "extra_settings": {"kafka_schema": "test:TestRecordStruct"}, }, "Parquet": { "data_sample": [ @@ -901,9 +946,12 @@ def test_kafka_formats(kafka_cluster, create_query_generator): {"id": 0, "blockNo": 0, "val1": str("AM"), "val2": 0.5, "val3": 1}, ), ], - "extra_settings": {"format_avro_schema_registry_url":"http://{}:{}".format( - kafka_cluster.schema_registry_host, kafka_cluster.schema_registry_port - )}, + "extra_settings": { + "format_avro_schema_registry_url": "http://{}:{}".format( + kafka_cluster.schema_registry_host, + kafka_cluster.schema_registry_port, + ) + }, "supports_empty_value": True, }, "Avro": { @@ -980,7 +1028,8 @@ def test_kafka_formats(kafka_cluster, create_query_generator): topic_list=f"{topic_name}", consumer_group=f"{topic_name}_group", format=format_name, - settings=extra_settings), + settings=extra_settings, + ), ) ) raw_expected = """\ @@ -1021,7 +1070,10 @@ def test_kafka_formats(kafka_cluster, create_query_generator): [1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2] ) result = instance.query_with_retry( - "SELECT * FROM test.kafka_{format_name}_mv;".format(format_name=format_name), check_callback=lambda x: x.count('\n') == raw_expected.count('\n') + "SELECT * FROM test.kafka_{format_name}_mv;".format( + format_name=format_name + ), + check_callback=lambda x: x.count("\n") == raw_expected.count("\n"), ) expected = raw_expected.format( topic_name=topic_name, @@ -1693,9 +1745,13 @@ def test_kafka_protobuf_no_delimiter(kafka_cluster): """ assert TSV(result) == TSV(expected) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) + +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_materialized_view(kafka_cluster, create_query_generator): - topic_name="mv" + topic_name = "mv" instance.query( f""" @@ -1718,7 +1774,9 @@ def test_kafka_materialized_view(kafka_cluster, create_query_generator): kafka_produce(kafka_cluster, topic_name, messages) with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): - result = instance.query_with_retry("SELECT * FROM test.view", check_callback=kafka_check_result) + result = instance.query_with_retry( + "SELECT * FROM test.view", check_callback=kafka_check_result + ) kafka_check_result(result, True) @@ -1731,10 +1789,19 @@ def test_kafka_materialized_view(kafka_cluster, create_query_generator): ) -@pytest.mark.parametrize('create_query_generator, log_line', [ - (generate_new_create_table_query, r"kafka.*Saved offset [0-9]+ for topic-partition \[recreate_kafka_table:[0-9]+"), - (generate_old_create_table_query, "kafka.*Committed offset [0-9]+.*recreate_kafka_table"), -]) +@pytest.mark.parametrize( + "create_query_generator, log_line", + [ + ( + generate_new_create_table_query, + r"kafka.*Saved offset [0-9]+ for topic-partition \[recreate_kafka_table:[0-9]+", + ), + ( + generate_old_create_table_query, + "kafka.*Committed offset [0-9]+.*recreate_kafka_table", + ), + ], +) def test_kafka_recreate_kafka_table(kafka_cluster, create_query_generator, log_line): """ Checks that materialized view work properly after dropping and recreating the Kafka table. @@ -1753,7 +1820,8 @@ def test_kafka_recreate_kafka_table(kafka_cluster, create_query_generator, log_l "kafka_flush_interval_ms": 1000, "kafka_skip_broken_messages": 1048577, "kafka_thread_per_consumer": thread_per_consumer, - }) + }, + ) instance.query( f""" @@ -1808,10 +1876,16 @@ def test_kafka_recreate_kafka_table(kafka_cluster, create_query_generator, log_l ) -@pytest.mark.parametrize('create_query_generator, log_line', [ - (generate_old_create_table_query, "Committed offset {offset}"), - (generate_new_create_table_query, r"kafka.*Saved offset [0-9]+ for topic-partition \[{topic}:[0-9]+\]") -]) +@pytest.mark.parametrize( + "create_query_generator, log_line", + [ + (generate_old_create_table_query, "Committed offset {offset}"), + ( + generate_new_create_table_query, + r"kafka.*Saved offset [0-9]+ for topic-partition \[{topic}:[0-9]+\]", + ), + ], +) def test_librdkafka_compression(kafka_cluster, create_query_generator, log_line): """ Regression for UB in snappy-c (that is used in librdkafka), @@ -1866,16 +1940,19 @@ def test_librdkafka_compression(kafka_cluster, create_query_generator, log_line) "kafka", "key UInt64, value String", topic_list=topic_name, - #brokers="kafka1:19092", - #consumer_group=f"{topic_name}_group", + # brokers="kafka1:19092", + # consumer_group=f"{topic_name}_group", format="JSONEachRow", - settings={"kafka_flush_interval_ms": 1000}), + settings={"kafka_flush_interval_ms": 1000}, + ), ) ) kafka_produce(kafka_cluster, topic_name, messages) - instance.wait_for_log_line(log_line.format(offset=number_of_messages, topic=topic_name)) + instance.wait_for_log_line( + log_line.format(offset=number_of_messages, topic=topic_name) + ) result = instance.query("SELECT * FROM test.consumer") assert TSV(result) == TSV(expected) @@ -1884,12 +1961,20 @@ def test_librdkafka_compression(kafka_cluster, create_query_generator, log_line) instance.query("DROP TABLE test.consumer SYNC") -@pytest.mark.parametrize('create_query_generator', [generate_new_create_table_query, generate_old_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_new_create_table_query, generate_old_create_table_query], +) def test_kafka_materialized_view_with_subquery(kafka_cluster, create_query_generator): topic_name = "mysq" logging.debug(f"Using topic {topic_name}") - create_query = create_query_generator("kafka", "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name) + create_query = create_query_generator( + "kafka", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=topic_name, + ) instance.query( f""" DROP TABLE IF EXISTS test.kafka; @@ -1911,7 +1996,12 @@ def test_kafka_materialized_view_with_subquery(kafka_cluster, create_query_gener kafka_produce(kafka_cluster, topic_name, messages) with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): - result = instance.query_with_retry("SELECT * FROM test.view", check_callback=kafka_check_result, retry_count=40, sleep_time=0.75) + result = instance.query_with_retry( + "SELECT * FROM test.view", + check_callback=kafka_check_result, + retry_count=40, + sleep_time=0.75, + ) instance.query( """ @@ -1923,14 +2013,17 @@ def test_kafka_materialized_view_with_subquery(kafka_cluster, create_query_gener kafka_check_result(result, True) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_many_materialized_views(kafka_cluster, create_query_generator): topic_name = f"mmv-{get_topic_postfix(create_query_generator)}" create_query = create_query_generator( "kafka", "key UInt64, value UInt64", topic_list=topic_name, - consumer_group=f"{topic_name}-group" + consumer_group=f"{topic_name}-group", ) instance.query( @@ -1959,8 +2052,12 @@ def test_kafka_many_materialized_views(kafka_cluster, create_query_generator): kafka_produce(kafka_cluster, topic_name, messages) with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): - result1 = instance.query_with_retry("SELECT * FROM test.view1", check_callback=kafka_check_result) - result2 = instance.query_with_retry("SELECT * FROM test.view2", check_callback=kafka_check_result) + result1 = instance.query_with_retry( + "SELECT * FROM test.view1", check_callback=kafka_check_result + ) + result2 = instance.query_with_retry( + "SELECT * FROM test.view2", check_callback=kafka_check_result + ) instance.query( """ @@ -1975,7 +2072,10 @@ def test_kafka_many_materialized_views(kafka_cluster, create_query_generator): kafka_check_result(result2, True) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_flush_on_big_message(kafka_cluster, create_query_generator): # Create batches of messages of size ~100Kb kafka_messages = 1000 @@ -1995,7 +2095,7 @@ def test_kafka_flush_on_big_message(kafka_cluster, create_query_generator): "key UInt64, value String", topic_list=topic_name, consumer_group=topic_name, - settings={"kafka_max_block_size": 10} + settings={"kafka_max_block_size": 10}, ) instance.query( @@ -2079,8 +2179,13 @@ def test_kafka_virtual_columns(kafka_cluster): kafka_check_result(result, True, "test_kafka_virtual1.reference") -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) -def test_kafka_virtual_columns_with_materialized_view(kafka_cluster, create_query_generator): +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) +def test_kafka_virtual_columns_with_materialized_view( + kafka_cluster, create_query_generator +): topic_config = { # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", @@ -2092,10 +2197,9 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster, create_quer "kafka", "key UInt64, value UInt64", topic_list=topic_name, - consumer_group=f"{topic_name}-group" + consumer_group=f"{topic_name}-group", ) with kafka_topic(get_admin_client(kafka_cluster), topic_name, config=topic_config): - instance.query( f""" DROP TABLE IF EXISTS test.view; @@ -2116,9 +2220,11 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster, create_quer def check_callback(result): return kafka_check_result(result, False, "test_kafka_virtual2.reference") + result = instance.query_with_retry( "SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key, key", - check_callback=check_callback) + check_callback=check_callback, + ) kafka_check_result(result, True, "test_kafka_virtual2.reference") @@ -2129,6 +2235,7 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster, create_quer """ ) + def insert_with_retry(instance, values, table_name="kafka", max_try_couunt=5): try_count = 0 while True: @@ -2144,7 +2251,10 @@ def insert_with_retry(instance, values, table_name="kafka", max_try_couunt=5): raise -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_insert(kafka_cluster, create_query_generator): topic_name = "insert1" + get_topic_postfix(create_query_generator) @@ -2154,8 +2264,8 @@ def test_kafka_insert(kafka_cluster, create_query_generator): "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name, - format="TSV" - ) + format="TSV", + ) ) message_count = 50 @@ -2171,7 +2281,10 @@ def test_kafka_insert(kafka_cluster, create_query_generator): kafka_check_result(result, True) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_produce_consume(kafka_cluster, create_query_generator): topic_name = "insert2" + get_topic_postfix(create_query_generator) @@ -2180,7 +2293,7 @@ def test_kafka_produce_consume(kafka_cluster, create_query_generator): "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name, - format="TSV" + format="TSV", ) instance.query( f""" @@ -2219,7 +2332,8 @@ def test_kafka_produce_consume(kafka_cluster, create_query_generator): "SELECT count() FROM test.view", sleep_time=1, retry_count=20, - check_callback=lambda result: int(result) == expected_row_count) + check_callback=lambda result: int(result) == expected_row_count, + ) instance.query( """ @@ -2236,9 +2350,12 @@ def test_kafka_produce_consume(kafka_cluster, create_query_generator): ), "ClickHouse lost some messages: {}".format(result) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_commit_on_block_write(kafka_cluster, create_query_generator): - topic_name="block" + get_topic_postfix(create_query_generator) + topic_name = "block" + get_topic_postfix(create_query_generator) create_query = create_query_generator( "kafka", "key UInt64, value UInt64", @@ -2279,7 +2396,8 @@ def test_kafka_commit_on_block_write(kafka_cluster, create_query_generator): instance.query_with_retry( "SELECT count() FROM test.view", sleep_time=1, - check_callback=lambda res: int(res) >= 100) + check_callback=lambda res: int(res) >= 100, + ) cancel.set() @@ -2291,7 +2409,8 @@ def test_kafka_commit_on_block_write(kafka_cluster, create_query_generator): instance.query_with_retry( "SELECT uniqExact(key) FROM test.view", sleep_time=1, - check_callback=lambda res: int(res) >= i[0]) + check_callback=lambda res: int(res) >= i[0], + ) result = int(instance.query("SELECT count() == uniqExact(key) FROM test.view")) @@ -2306,10 +2425,17 @@ def test_kafka_commit_on_block_write(kafka_cluster, create_query_generator): assert result == 1, "Messages from kafka get duplicated!" -@pytest.mark.parametrize('create_query_generator, log_line', [ - (generate_old_create_table_query, "kafka.*Committed offset 2.*virt2_[01]"), - (generate_new_create_table_query, r"kafka.*Saved offset 2[0-9]* for topic-partition \[virt2_[01]:[0-9]+"), -]) + +@pytest.mark.parametrize( + "create_query_generator, log_line", + [ + (generate_old_create_table_query, "kafka.*Committed offset 2.*virt2_[01]"), + ( + generate_new_create_table_query, + r"kafka.*Saved offset 2[0-9]* for topic-partition \[virt2_[01]:[0-9]+", + ), + ], +) def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line): admin_client = get_admin_client(kafka_cluster) @@ -2320,9 +2446,11 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line) thread_per_consumer = must_use_thread_per_consumer(create_query_generator) topic_name_0 = "virt2_0" topic_name_1 = "virt2_1" - consumer_group = "virt2"+get_topic_postfix(create_query_generator) + consumer_group = "virt2" + get_topic_postfix(create_query_generator) with kafka_topic(admin_client, topic_name_0, num_partitions=2, config=topic_config): - with kafka_topic(admin_client, topic_name_1, num_partitions=2, config=topic_config): + with kafka_topic( + admin_client, topic_name_1, num_partitions=2, config=topic_config + ): create_query = create_query_generator( "kafka", "value UInt64", @@ -2331,7 +2459,7 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line) settings={ "kafka_num_consumers": 2, "kafka_thread_per_consumer": thread_per_consumer, - } + }, ) instance.query( @@ -2430,7 +2558,9 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line) # members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0' # members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1' - result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) + result = instance.query( + "SELECT * FROM test.view ORDER BY value", ignore_error=True + ) expected = f"""\ 1 k1 {topic_name_0} 0 0 1577836801 1577836801001 ['content-encoding'] ['base64'] @@ -2453,6 +2583,7 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line) ) instance.rotate_logs() + # TODO(antaljanosbenjamin) def test_kafka_producer_consumer_separate_settings(kafka_cluster): instance.query( @@ -2531,10 +2662,13 @@ def test_kafka_producer_consumer_separate_settings(kafka_cluster): assert property_in_log in kafka_producer_applyed_properties -@pytest.mark.parametrize('create_query_generator, log_line', [ - (generate_new_create_table_query,"Saved offset 5"), - (generate_old_create_table_query, "Committed offset 5"), -]) +@pytest.mark.parametrize( + "create_query_generator, log_line", + [ + (generate_new_create_table_query, "Saved offset 5"), + (generate_old_create_table_query, "Committed offset 5"), + ], +) def test_kafka_produce_key_timestamp(kafka_cluster, create_query_generator, log_line): topic_name = "insert3" topic_config = { @@ -2548,13 +2682,15 @@ def test_kafka_produce_key_timestamp(kafka_cluster, create_query_generator, log_ "key UInt64, value UInt64, _key String, _timestamp DateTime('UTC')", topic_list=topic_name, consumer_group=topic_name, - format="TSV") + format="TSV", + ) reader_create_query = create_query_generator( "kafka", "key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC')", topic_list=topic_name, consumer_group=topic_name, - format="TSV") + format="TSV", + ) instance.query( f""" @@ -2603,12 +2739,16 @@ def test_kafka_produce_key_timestamp(kafka_cluster, create_query_generator, log_ ignore_error=True, retry_count=5, sleep_time=1, - check_callback=lambda res: TSV(res) == TSV(expected)) + check_callback=lambda res: TSV(res) == TSV(expected), + ) assert TSV(result) == TSV(expected) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_insert_avro(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) @@ -2617,7 +2757,7 @@ def test_kafka_insert_avro(kafka_cluster, create_query_generator): # default retention, since predefined timestamp_ms is used. "retention.ms": "-1", } - topic_name="avro1" + get_topic_postfix(create_query_generator) + topic_name = "avro1" + get_topic_postfix(create_query_generator) with kafka_topic(admin_client, topic_name, config=topic_config): create_query = create_query_generator( "kafka", @@ -2638,7 +2778,13 @@ def test_kafka_insert_avro(kafka_cluster, create_query_generator): ) message_count = 2 - messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count, need_decode=False, timestamp=1636505534) + messages = kafka_consume_with_retry( + kafka_cluster, + topic_name, + message_count, + need_decode=False, + timestamp=1636505534, + ) result = "" for a_message in messages: @@ -2654,11 +2800,13 @@ def test_kafka_insert_avro(kafka_cluster, create_query_generator): assert result == expected_result -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_produce_consume_avro(kafka_cluster, create_query_generator): topic_name = "insert_avro" + get_topic_postfix(create_query_generator) with kafka_topic(get_admin_client(kafka_cluster), topic_name): - num_rows = 75 writer_create_query = create_query_generator( @@ -2712,11 +2860,14 @@ def test_kafka_produce_consume_avro(kafka_cluster, create_query_generator): assert int(expected_max_key) == (num_rows - 1) * 10 -@pytest.mark.parametrize('create_query_generator', [ - generate_old_create_table_query, - # TODO(antaljanosbenjamin): Something is off with timing - # generate_new_create_table_query -]) +@pytest.mark.parametrize( + "create_query_generator", + [ + generate_old_create_table_query, + # TODO(antaljanosbenjamin): Something is off with timing + # generate_new_create_table_query + ], +) def test_kafka_flush_by_time(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) @@ -2731,7 +2882,7 @@ def test_kafka_flush_by_time(kafka_cluster, create_query_generator): consumer_group=topic_name, settings={ "kafka_max_block_size": 100, - } + }, ) instance.query( f""" @@ -2782,11 +2933,15 @@ def test_kafka_flush_by_time(kafka_cluster, create_query_generator): assert TSV(result) == TSV("2 1") -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_flush_by_block_size(kafka_cluster, create_query_generator): topic_name = "flush_by_block_size" + get_topic_postfix(create_query_generator) cancel = threading.Event() + def produce(): while not cancel.is_set(): messages = [] @@ -2807,7 +2962,7 @@ def test_kafka_flush_by_block_size(kafka_cluster, create_query_generator): "kafka_max_block_size": 100, "kafka_poll_max_batch_size": 1, "kafka_flush_interval_ms": 120000, - } + }, ) instance.query( @@ -2855,13 +3010,20 @@ def test_kafka_flush_by_block_size(kafka_cluster, create_query_generator): ), "Messages from kafka should be flushed when block of size kafka_max_block_size is formed!" -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) -def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster, create_query_generator): +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) +def test_kafka_lot_of_partitions_partial_commit_of_bulk( + kafka_cluster, create_query_generator +): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) - topic_name = "topic_with_multiple_partitions2" + get_topic_postfix(create_query_generator) + topic_name = "topic_with_multiple_partitions2" + get_topic_postfix( + create_query_generator + ) with kafka_topic(admin_client, topic_name): create_query = create_query_generator( "kafka", @@ -2871,7 +3033,7 @@ def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster, create_qu settings={ "kafka_max_block_size": 211, "kafka_flush_interval_ms": 500, - } + }, ) instance.query( f""" @@ -2898,7 +3060,9 @@ def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster, create_qu instance.wait_for_log_line("kafka.*Stalled", repetitions=5) - result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") + result = instance.query( + "SELECT count(), uniqExact(key), max(key) FROM test.view" + ) logging.debug(result) assert TSV(result) == TSV("{0}\t{0}\t{0}".format(count)) @@ -2910,10 +3074,13 @@ def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster, create_qu ) -@pytest.mark.parametrize('create_query_generator, log_line', [ - (generate_old_create_table_query, "{}.*Polled offset [0-9]+"), - (generate_new_create_table_query, "{}.*Saved offset"), -]) +@pytest.mark.parametrize( + "create_query_generator, log_line", + [ + (generate_old_create_table_query, "{}.*Polled offset [0-9]+"), + (generate_new_create_table_query, "{}.*Saved offset"), + ], +) def test_kafka_rebalance(kafka_cluster, create_query_generator, log_line): NUMBER_OF_CONSURRENT_CONSUMERS = 11 @@ -2938,11 +3105,12 @@ def test_kafka_rebalance(kafka_cluster, create_query_generator, log_line): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) - topic_name = "topic_with_multiple_partitions" + get_topic_postfix(create_query_generator) + topic_name = "topic_with_multiple_partitions" + get_topic_postfix( + create_query_generator + ) table_name_prefix = "kafka_consumer" keeper_path = f"/clickhouse/{{database}}/{table_name_prefix}" with kafka_topic(admin_client, topic_name, num_partitions=11): - cancel = threading.Event() msg_index = [0] @@ -2974,7 +3142,7 @@ def test_kafka_rebalance(kafka_cluster, create_query_generator, log_line): settings={ "kafka_max_block_size": 33, "kafka_flush_interval_ms": 500, - } + }, ) instance.query( f""" @@ -3081,15 +3249,20 @@ def test_kafka_rebalance(kafka_cluster, create_query_generator, log_line): # TODO(antaljanosbenjamin): find another way to make insertion fail -@pytest.mark.parametrize('create_query_generator', [ - generate_old_create_table_query, - # generate_new_create_table_query, -]) +@pytest.mark.parametrize( + "create_query_generator", + [ + generate_old_create_table_query, + # generate_new_create_table_query, + ], +) def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) - topic_name = "no_holes_when_write_suffix_failed" + get_topic_postfix(create_query_generator) + topic_name = "no_holes_when_write_suffix_failed" + get_topic_postfix( + create_query_generator + ) with kafka_topic(admin_client, topic_name): messages = [json.dumps({"key": j + 1, "value": "x" * 300}) for j in range(22)] @@ -3103,7 +3276,7 @@ def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster, create_query_gen settings={ "kafka_max_block_size": 20, "kafka_flush_interval_ms": 2000, - } + }, ) instance.query( f""" @@ -3140,7 +3313,9 @@ def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster, create_query_gen pm.heal_all() instance.wait_for_log_line("Committed offset 22") - result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") + result = instance.query( + "SELECT count(), uniqExact(key), max(key) FROM test.view" + ) logging.debug(result) # kafka_cluster.open_bash_shell('instance') @@ -3198,9 +3373,14 @@ def test_exception_from_destructor(kafka_cluster): assert TSV(instance.query("SELECT 1")) == TSV("1") -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_commits_of_unprocessed_messages_on_drop(kafka_cluster, create_query_generator): - topic_name = "commits_of_unprocessed_messages_on_drop" + get_topic_postfix(create_query_generator) + topic_name = "commits_of_unprocessed_messages_on_drop" + get_topic_postfix( + create_query_generator + ) messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(1)] kafka_produce(kafka_cluster, topic_name, messages) @@ -3213,7 +3393,7 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster, create_query_gen settings={ "kafka_max_block_size": 1000, "kafka_flush_interval_ms": 1000, - } + }, ) instance.query( f""" @@ -3259,9 +3439,7 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster, create_query_gen for _ in range(113): messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 - kafka_produce( - kafka_cluster, topic_name, messages - ) + kafka_produce(kafka_cluster, topic_name, messages) time.sleep(0.5) kafka_thread = threading.Thread(target=produce) @@ -3282,11 +3460,9 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster, create_query_gen settings={ "kafka_max_block_size": 10000, "kafka_flush_interval_ms": 1000, - } - ) - instance.query( - new_create_query + }, ) + instance.query(new_create_query) cancel.set() instance.wait_for_log_line("kafka.*Stalled", repetitions=5) @@ -3310,7 +3486,10 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster, create_query_gen assert TSV(result) == TSV("{0}\t{0}\t{0}".format(i[0] - 1)), "Missing data!" -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_bad_reschedule(kafka_cluster, create_query_generator): topic_name = "test_bad_reschedule" + get_topic_postfix(create_query_generator) @@ -3325,7 +3504,7 @@ def test_bad_reschedule(kafka_cluster, create_query_generator): settings={ "kafka_max_block_size": 1000, "kafka_flush_interval_ms": 1000, - } + }, ) instance.query( f""" @@ -3432,7 +3611,10 @@ def test_kafka_duplicates_when_commit_failed(kafka_cluster): # But in cases of some peaky loads in kafka topic the current contract sounds more predictable and # easier to understand, so let's keep it as is for now. # also we can came to eof because we drained librdkafka internal queue too fast -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_premature_flush_on_eof(kafka_cluster, create_query_generator): topic_name = "premature_flush_on_eof" + get_topic_postfix(create_query_generator) create_query = create_query_generator( @@ -3510,14 +3692,16 @@ def test_premature_flush_on_eof(kafka_cluster, create_query_generator): ) -@pytest.mark.parametrize('create_query_generator, do_direct_read', [ - (generate_old_create_table_query, True), - (generate_new_create_table_query, False) -]) +@pytest.mark.parametrize( + "create_query_generator, do_direct_read", + [(generate_old_create_table_query, True), (generate_new_create_table_query, False)], +) def test_kafka_unavailable(kafka_cluster, create_query_generator, do_direct_read): - number_of_messages=20000 + number_of_messages = 20000 topic_name = "test_bad_reschedule" + get_topic_postfix(create_query_generator) - messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(number_of_messages)] + messages = [ + json.dumps({"key": j + 1, "value": j + 1}) for j in range(number_of_messages) + ] kafka_produce(kafka_cluster, topic_name, messages) with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): @@ -3528,7 +3712,7 @@ def test_kafka_unavailable(kafka_cluster, create_query_generator, do_direct_read "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name, - settings={"kafka_max_block_size": 1000} + settings={"kafka_max_block_size": 1000}, ) instance.query( f""" @@ -3559,12 +3743,16 @@ def test_kafka_unavailable(kafka_cluster, create_query_generator, do_direct_read result = instance.query_with_retry( "SELECT count() FROM test.destination_unavailable", sleep_time=1, - check_callback=lambda res: int(res) == number_of_messages) + check_callback=lambda res: int(res) == number_of_messages, + ) assert int(result) == number_of_messages -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_issue14202(kafka_cluster, create_query_generator): """ INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure @@ -3638,9 +3826,14 @@ def random_string(size=8): return "".join(random.choices(string.ascii_uppercase + string.digits, k=size)) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_engine_put_errors_to_stream(kafka_cluster, create_query_generator): - topic_name = "kafka_engine_put_errors_to_stream" + get_topic_postfix(create_query_generator) + topic_name = "kafka_engine_put_errors_to_stream" + get_topic_postfix( + create_query_generator + ) create_query = create_query_generator( "kafka", "i Int64, s String", @@ -3649,7 +3842,7 @@ def test_kafka_engine_put_errors_to_stream(kafka_cluster, create_query_generator settings={ "kafka_max_block_size": 128, "kafka_handle_error_mode": "stream", - } + }, ) instance.query( f""" @@ -3726,9 +3919,17 @@ def gen_message_with_jsons(jsons=10, malformed=0): return s.getvalue() -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) -def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster, create_query_generator): - topic_name = "kafka_engine_put_errors_to_stream_with_random_malformed_json" + get_topic_postfix(create_query_generator) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) +def test_kafka_engine_put_errors_to_stream_with_random_malformed_json( + kafka_cluster, create_query_generator +): + topic_name = ( + "kafka_engine_put_errors_to_stream_with_random_malformed_json" + + get_topic_postfix(create_query_generator) + ) create_query = create_query_generator( "kafka", "i Int64, s String", @@ -3738,7 +3939,7 @@ def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_clus "kafka_max_block_size": 100, "kafka_poll_max_batch_size": 1, "kafka_handle_error_mode": "stream", - } + }, ) instance.query( @@ -3788,7 +3989,10 @@ def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_clus ) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator): # data was dumped from clickhouse itself in a following manner # clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g' @@ -3808,8 +4012,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator '{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}', ], "expected": { - "raw_message":"{\"id\":\"0\",\"blockNo\":\"BAD\",\"val1\":\"AM\",\"val2\":0.5,\"val3\":1}", - "error":"Cannot parse input: expected '\"' before: 'BAD\",\"val1\":\"AM\",\"val2\":0.5,\"val3\":1}': (while reading the value of key blockNo)" + "raw_message": '{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}', + "error": 'Cannot parse input: expected \'"\' before: \'BAD","val1":"AM","val2":0.5,"val3":1}\': (while reading the value of key blockNo)', }, "supports_empty_value": True, "printable": True, @@ -3824,8 +4028,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator '["0", "BAD", "AM", 0.5, 1]', ], "expected": { - "raw_message":"[\"0\", \"BAD\", \"AM\", 0.5, 1]", - "error":"Cannot parse input: expected '\"' before: 'BAD\", \"AM\", 0.5, 1]': (while reading the value of key blockNo)" + "raw_message": '["0", "BAD", "AM", 0.5, 1]', + "error": "Cannot parse input: expected '\"' before: 'BAD\", \"AM\", 0.5, 1]': (while reading the value of key blockNo)", }, "supports_empty_value": True, "printable": True, @@ -3839,8 +4043,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator '["0", "BAD", "AM", 0.5, 1]', ], "expected": { - "raw_message":"[\"0\", \"BAD\", \"AM\", 0.5, 1]", - "error":"Cannot parse JSON string: expected opening quote" + "raw_message": '["0", "BAD", "AM", 0.5, 1]', + "error": "Cannot parse JSON string: expected opening quote", }, "printable": True, }, @@ -3853,8 +4057,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator "id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n", ], "expected": { - "raw_message":"id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n", - "error":"Found garbage after field in TSKV format: blockNo: (at row 1)\n" + "raw_message": "id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n", + "error": "Found garbage after field in TSKV format: blockNo: (at row 1)\n", }, "printable": True, }, @@ -3867,8 +4071,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator '0,"BAD","AM",0.5,1\n', ], "expected": { - "raw_message":"0,\"BAD\",\"AM\",0.5,1\n", - "error":"Cannot parse input: expected '\"' before: 'BAD\",\"AM\",0.5,1\\n'" + "raw_message": '0,"BAD","AM",0.5,1\n', + "error": "Cannot parse input: expected '\"' before: 'BAD\",\"AM\",0.5,1\\n'", }, "printable": True, "supports_empty_value": True, @@ -3882,8 +4086,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator "0\tBAD\tAM\t0.5\t1\n", ], "expected": { - "raw_message":"0\tBAD\tAM\t0.5\t1\n", - "error":"Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n'" + "raw_message": "0\tBAD\tAM\t0.5\t1\n", + "error": "Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n'", }, "supports_empty_value": True, "printable": True, @@ -3897,8 +4101,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator '"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n', ], "expected": { - "raw_message":"\"id\",\"blockNo\",\"val1\",\"val2\",\"val3\"\n0,\"BAD\",\"AM\",0.5,1\n", - "error":"Cannot parse input: expected '\"' before: 'BAD\",\"AM\",0.5,1\\n'" + "raw_message": '"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n', + "error": "Cannot parse input: expected '\"' before: 'BAD\",\"AM\",0.5,1\\n'", }, "printable": True, }, @@ -3911,8 +4115,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator "(0,'BAD','AM',0.5,1)", ], "expected": { - "raw_message":"(0,'BAD','AM',0.5,1)", - "error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero/NULL instead of throwing exception" + "raw_message": "(0,'BAD','AM',0.5,1)", + "error": "Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero/NULL instead of throwing exception", }, "supports_empty_value": True, "printable": True, @@ -3926,8 +4130,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator "id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n", ], "expected": { - "raw_message":"id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n", - "error":"Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n" + "raw_message": "id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n", + "error": "Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n", }, "supports_empty_value": True, "printable": True, @@ -3941,8 +4145,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n", ], "expected": { - "raw_message":"id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n", - "error":"Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n'" + "raw_message": "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n", + "error": "Cannot parse input: expected '\\t' before: 'BAD\\tAM\\t0.5\\t1\\n'", }, "printable": True, }, @@ -3955,8 +4159,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator b"\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01", ], "expected": { - "raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801", - "error":"Cannot convert: String to UInt16" + "raw_message": "050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801", + "error": "Cannot convert: String to UInt16", }, "printable": False, }, @@ -3969,8 +4173,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator b"\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01", ], "expected": { - "raw_message":"00000000000000000342414402414D0000003F01", - "error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\n" + "raw_message": "00000000000000000342414402414D0000003F01", + "error": "Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\n", }, "printable": False, }, @@ -3983,8 +4187,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01", ], "expected": { - "raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01", - "error":"Type of 'blockNo' must be UInt16, not String" + "raw_message": "0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01", + "error": "Type of 'blockNo' must be UInt16, not String", }, "printable": False, }, @@ -3997,8 +4201,8 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator b"\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", ], "expected": { - "raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318", - "error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero/NULL instead of throwing exception." + "raw_message": "4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318", + "error": "Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero/NULL instead of throwing exception.", }, "printable": False, }, @@ -4027,7 +4231,7 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator settings={ "kafka_handle_error_mode": "stream", "kafka_flush_interval_ms": 1000, - } + }, ) instance.query( f""" @@ -4118,10 +4322,13 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator kafka_delete_topic(admin_client, topic_name) -@pytest.mark.parametrize('create_query_generator', [ - generate_old_create_table_query, - # generate_new_create_table_query TODO(antaljanosbenjamin): crashes CH -]) +@pytest.mark.parametrize( + "create_query_generator", + [ + generate_old_create_table_query, + # generate_new_create_table_query TODO(antaljanosbenjamin): crashes CH + ], +) def test_kafka_consumer_failover(kafka_cluster, create_query_generator): topic_name = "kafka_consumer_failover" + get_topic_postfix(create_query_generator) @@ -4129,16 +4336,18 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): consumer_group = f"{topic_name}_group" create_queries = [] for counter in range(3): - create_queries.append(create_query_generator( - f"kafka{counter+1}", - "key UInt64, value UInt64", - topic_list=topic_name, - consumer_group=consumer_group, - settings={ - "kafka_max_block_size": 1, - "kafka_poll_timeout_ms": 200, - } - )) + create_queries.append( + create_query_generator( + f"kafka{counter+1}", + "key UInt64, value UInt64", + topic_list=topic_name, + consumer_group=consumer_group, + settings={ + "kafka_max_block_size": 1, + "kafka_poll_timeout_ms": 200, + }, + ) + ) instance.query( f""" @@ -4188,7 +4397,9 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): producer.flush() count_query = "SELECT count() FROM test.destination" - prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > 0) + prev_count = instance.query_with_retry( + count_query, check_callback=lambda res: int(res) > 0 + ) ## 2 attached, 2 working instance.query("DETACH TABLE test.kafka1") @@ -4203,7 +4414,9 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): partition=1, ) producer.flush() - prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) + prev_count = instance.query_with_retry( + count_query, check_callback=lambda res: int(res) > prev_count + ) ## 1 attached, 1 working instance.query("DETACH TABLE test.kafka2") @@ -4218,7 +4431,9 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): partition=1, ) producer.flush() - prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) + prev_count = instance.query_with_retry( + count_query, check_callback=lambda res: int(res) > prev_count + ) ## 2 attached, 2 working instance.query("ATTACH TABLE test.kafka1") @@ -4233,7 +4448,9 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): partition=1, ) producer.flush() - prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) + prev_count = instance.query_with_retry( + count_query, check_callback=lambda res: int(res) > prev_count + ) ## 1 attached, 1 working instance.query("DETACH TABLE test.kafka3") @@ -4248,7 +4465,9 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): partition=1, ) producer.flush() - prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) + prev_count = instance.query_with_retry( + count_query, check_callback=lambda res: int(res) > prev_count + ) ## 2 attached, 2 working instance.query("ATTACH TABLE test.kafka2") @@ -4263,7 +4482,9 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): partition=1, ) producer.flush() - prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) + prev_count = instance.query_with_retry( + count_query, check_callback=lambda res: int(res) > prev_count + ) ## 3 attached, 2 working instance.query("ATTACH TABLE test.kafka3") @@ -4278,7 +4499,9 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): partition=1, ) producer.flush() - prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) + prev_count = instance.query_with_retry( + count_query, check_callback=lambda res: int(res) > prev_count + ) ## 2 attached, same 2 working instance.query("DETACH TABLE test.kafka3") @@ -4293,7 +4516,9 @@ def test_kafka_consumer_failover(kafka_cluster, create_query_generator): partition=1, ) producer.flush() - prev_count = instance.query_with_retry(count_query, check_callback=lambda res: int(res) > prev_count) + prev_count = instance.query_with_retry( + count_query, check_callback=lambda res: int(res) > prev_count + ) def test_kafka_predefined_configuration(kafka_cluster): @@ -4323,7 +4548,10 @@ def test_kafka_predefined_configuration(kafka_cluster): # https://github.com/ClickHouse/ClickHouse/issues/26643 -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_issue26643(kafka_cluster, create_query_generator): producer = KafkaProducer( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), @@ -4383,7 +4611,7 @@ def test_issue26643(kafka_cluster, create_query_generator): "kafka_schema": "message_with_repeated.proto:Message", "kafka_skip_broken_messages": 10000, "kafka_thread_per_consumer": thread_per_consumer, - } + }, ) instance.query( @@ -4440,8 +4668,10 @@ def test_issue26643(kafka_cluster, create_query_generator): assert TSV(result) == TSV(expected) - -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_num_consumers_limit(kafka_cluster, create_query_generator): instance.query("DROP TABLE IF EXISTS test.kafka") @@ -4452,12 +4682,15 @@ def test_num_consumers_limit(kafka_cluster, create_query_generator): "key UInt64, value UInt64", settings={ "kafka_num_consumers": 100, - "kafka_thread_per_consumer": thread_per_consumer - } + "kafka_thread_per_consumer": thread_per_consumer, + }, ) error = instance.query_and_get_error(create_query) - assert "BAD_ARGUMENTS" in error and "The number of consumers can not be bigger than" in error + assert ( + "BAD_ARGUMENTS" in error + and "The number of consumers can not be bigger than" in error + ) instance.query( f""" @@ -4469,7 +4702,10 @@ def test_num_consumers_limit(kafka_cluster, create_query_generator): instance.query("DROP TABLE test.kafka") -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_format_with_prefix_and_suffix(kafka_cluster, create_query_generator): topic_name = "custom" + get_topic_postfix(create_query_generator) @@ -4479,7 +4715,7 @@ def test_format_with_prefix_and_suffix(kafka_cluster, create_query_generator): "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name, - format="CustomSeparated" + format="CustomSeparated", ) instance.query( f""" @@ -4498,13 +4734,19 @@ def test_format_with_prefix_and_suffix(kafka_cluster, create_query_generator): assert len(messages) == 2 assert ( - "".join(messages) == "\n0\t0\n\n\n10\t100\n\n" + "".join(messages) + == "\n0\t0\n\n\n10\t100\n\n" ) -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_max_rows_per_message(kafka_cluster, create_query_generator): - topic_name = "custom_max_rows_per_message" + get_topic_postfix(create_query_generator) + topic_name = "custom_max_rows_per_message" + get_topic_postfix( + create_query_generator + ) with kafka_topic(get_admin_client(kafka_cluster), topic_name): num_rows = 5 @@ -4519,7 +4761,7 @@ def test_max_rows_per_message(kafka_cluster, create_query_generator): "format_custom_result_before_delimiter": "\n", "format_custom_result_after_delimiter": "\n", "kafka_max_rows_per_message": 3, - } + }, ) instance.query( f""" @@ -4546,13 +4788,19 @@ def test_max_rows_per_message(kafka_cluster, create_query_generator): == "\n0\t0\n10\t100\n20\t200\n\n\n30\t300\n40\t400\n\n" ) - instance.query_with_retry("SELECT count() FROM test.view", check_callback=lambda res: int(res) == num_rows) + instance.query_with_retry( + "SELECT count() FROM test.view", + check_callback=lambda res: int(res) == num_rows, + ) result = instance.query("SELECT * FROM test.view") assert result == "0\t0\n10\t100\n20\t200\n30\t300\n40\t400\n" -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_row_based_formats(kafka_cluster, create_query_generator): admin_client = get_admin_client(kafka_cluster) @@ -4589,7 +4837,7 @@ def test_row_based_formats(kafka_cluster, create_query_generator): topic_list=topic_name, consumer_group=topic_name, format=format_name, - settings={"kafka_max_rows_per_message": max_rows_per_message} + settings={"kafka_max_rows_per_message": max_rows_per_message}, ) instance.query( @@ -4606,11 +4854,16 @@ def test_row_based_formats(kafka_cluster, create_query_generator): """ ) - messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count, need_decode=False) + messages = kafka_consume_with_retry( + kafka_cluster, topic_name, message_count, need_decode=False + ) assert len(messages) == message_count - instance.query_with_retry("SELECT count() FROM test.view", check_callback=lambda res: int(res) == num_rows) + instance.query_with_retry( + "SELECT count() FROM test.view", + check_callback=lambda res: int(res) == num_rows, + ) result = instance.query("SELECT * FROM test.view") expected = "" @@ -4619,7 +4872,10 @@ def test_row_based_formats(kafka_cluster, create_query_generator): assert result == expected -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_block_based_formats_1(kafka_cluster, create_query_generator): topic_name = "pretty_space" + get_topic_postfix(create_query_generator) @@ -4629,7 +4885,7 @@ def test_block_based_formats_1(kafka_cluster, create_query_generator): "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name, - format="PrettySpace" + format="PrettySpace", ) instance.query( f""" @@ -4662,7 +4918,10 @@ def test_block_based_formats_1(kafka_cluster, create_query_generator): ] -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_block_based_formats_2(kafka_cluster, create_query_generator): admin_client = get_admin_client(kafka_cluster) num_rows = 100 @@ -4683,7 +4942,7 @@ def test_block_based_formats_2(kafka_cluster, create_query_generator): "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name, - format=format_name + format=format_name, ) instance.query( @@ -4699,10 +4958,17 @@ def test_block_based_formats_2(kafka_cluster, create_query_generator): INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}) settings max_block_size=12, optimize_trivial_insert_select=0; """ ) - messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count, need_decode=False) + messages = kafka_consume_with_retry( + kafka_cluster, topic_name, message_count, need_decode=False + ) assert len(messages) == message_count - rows = int(instance.query_with_retry("SELECT count() FROM test.view", check_callback=lambda res: int(res) == num_rows)) + rows = int( + instance.query_with_retry( + "SELECT count() FROM test.view", + check_callback=lambda res: int(res) == num_rows, + ) + ) assert rows == num_rows @@ -5049,7 +5315,6 @@ def test_formats_errors(kafka_cluster): "MySQLDump", ]: with kafka_topic(admin_client, format_name): - table_name = f"kafka_{format_name}" instance.query( @@ -5081,9 +5346,9 @@ def test_formats_errors(kafka_cluster): ) num_errors = int( - instance.query_with_retry( + instance.query_with_retry( f"SELECT length(exceptions.text) from system.kafka_consumers where database = 'test' and table = '{table_name}'", - check_callback=lambda res: int(res) > 0 + check_callback=lambda res: int(res) > 0, ) ) @@ -5093,7 +5358,10 @@ def test_formats_errors(kafka_cluster): instance.query("DROP TABLE test.view") -@pytest.mark.parametrize('create_query_generator', [generate_old_create_table_query, generate_new_create_table_query]) +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) def test_multiple_read_in_materialized_views(kafka_cluster, create_query_generator): topic_name = "multiple_read_from_mv" + get_topic_postfix(create_query_generator) From 419660d1b08741c563253927b55e2d7666e737ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 8 Jun 2024 21:52:28 +0000 Subject: [PATCH 0100/2361] Fix one more test --- src/Storages/Kafka/KafkaConsumer2.cpp | 36 +----------------- src/Storages/Kafka/KafkaConsumer2.h | 2 - src/Storages/Kafka/StorageKafka2.cpp | 40 +++++++++----------- src/Storages/Kafka/StorageKafka2.h | 2 + tests/integration/test_storage_kafka/test.py | 7 +--- 5 files changed, 23 insertions(+), 64 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 1320b939612..8a6c7e31910 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -17,7 +17,6 @@ #include #include -#include "base/scope_guard.h" namespace CurrentMetrics { @@ -45,7 +44,6 @@ namespace ErrorCodes } using namespace std::chrono_literals; -static constexpr auto MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS = 15000; static constexpr auto EVENT_POLL_TIMEOUT = 50ms; static constexpr auto DRAIN_TIMEOUT_MS = 5000ms; @@ -122,7 +120,6 @@ KafkaConsumer2::KafkaConsumer2( assignment.reset(); queues.clear(); needs_offset_update = true; - waited_for_assignment = 0; }); consumer->set_rebalance_error_callback( @@ -210,13 +207,8 @@ void KafkaConsumer2::pollEvents() consumer_has_subscription = !consumer->get_subscription().empty(); } auto msg = consumer->poll(EVENT_POLL_TIMEOUT); - LOG_TRACE(log, "Consumer has subscription: {}", consumer_has_subscription); // All the partition queues are detached, so the consumer shouldn't be able to poll any messages chassert(!msg && "Consumer returned a message when it was not expected"); - - auto consumer_queue = consumer->get_consumer_queue(); - for(auto i = 0; i < max_tries && consumer_queue.get_length() > 0; ++i) - consumer->poll(EVENT_POLL_TIMEOUT); }; KafkaConsumer2::TopicPartitionCounts KafkaConsumer2::getPartitionCounts() const @@ -322,32 +314,8 @@ ReadBufferPtr KafkaConsumer2::consume(const TopicPartition & topic_partition, co if (new_messages.empty()) { - // While we wait for an assignment after subscription, we'll poll zero messages anyway. - // If we're doing a manual select then it's better to get something after a wait, then immediate nothing. - if (!assignment.has_value()) - { - waited_for_assignment += poll_timeout; // slightly innaccurate, but rough calculation is ok. - if (waited_for_assignment < MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS) - { - continue; - } - else - { - LOG_WARNING(log, "Can't get assignment. Will keep trying."); - stalled_status = StalledStatus::NO_ASSIGNMENT; - return nullptr; - } - } - else if (assignment->empty()) - { - LOG_TRACE(log, "Empty assignment."); - return nullptr; - } - else - { - LOG_TRACE(log, "Stalled"); - return nullptr; - } + LOG_TRACE(log, "Stalled"); + return nullptr; } else { diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index 16d12c8723d..ff0fae35b67 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -141,8 +141,6 @@ private: StalledStatus stalled_status = StalledStatus::NO_MESSAGES_RETURNED; - size_t waited_for_assignment = 0; - const std::atomic & stopped; // order is important, need to be destructed before consumer diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 911de671fb7..bf2df473793 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -100,6 +100,7 @@ extern const int TABLE_WAS_NOT_DROPPED; namespace { constexpr auto MAX_FAILED_POLL_ATTEMPTS = 10; +constexpr auto MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS = 15000; } StorageKafka2::StorageKafka2( @@ -163,21 +164,6 @@ StorageKafka2::StorageKafka2( tryLogCurrentException(log); } } - // for (auto try_count = 0; try_count < 5; ++try_count) - // { - // bool all_had_assignment = true; - // for (auto & consumer_info : consumers) - // { - // if (nullptr == consumer_info.consumer->getKafkaAssignment()) - // { - // all_had_assignment = false; - // consumer_info.consumer->pollEvents(); - // } - // } - - // if (all_had_assignment) - // break; - // } const auto first_replica = createTableIfNotExists(); @@ -875,8 +861,10 @@ StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( KafkaConsumer2 & consumer, const TopicPartition & topic_partition, std::optional message_count, + Stopwatch & total_stopwatch, const ContextPtr & modified_context) { + LOG_TEST(log, "Polling consumer"); PolledBatchInfo batch_info; auto storage_snapshot = getStorageSnapshot(getInMemoryMetadataPtr(), getContext()); Block non_virtual_header(storage_snapshot->metadata->getSampleBlockNonMaterialized()); @@ -936,9 +924,7 @@ StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( ? kafka_settings->kafka_flush_interval_ms : getContext()->getSettingsRef().stream_flush_interval_ms; - Stopwatch total_stopwatch{CLOCK_MONOTONIC_COARSE}; - - const auto check_time_limit = [&max_execution_time, &total_stopwatch]() + const auto check_time_limit = [&max_execution_time, &total_stopwatch, this]() { if (max_execution_time != 0) { @@ -1139,8 +1125,6 @@ bool StorageKafka2::streamToViews(size_t idx) // 7. Execute the pipeline // 8. Write the offset to Keeper - Stopwatch watch; - auto table_id = getStorageID(); auto table = DatabaseCatalog::instance().getTable(table_id, getContext()); if (!table) @@ -1150,12 +1134,20 @@ bool StorageKafka2::streamToViews(size_t idx) ProfileEvents::increment(ProfileEvents::KafkaBackgroundReads); auto & consumer_info = consumers[idx]; + consumer_info.watch.restart(); auto & consumer = consumer_info.consumer; // To keep the consumer alive + const auto wait_for_assignment = consumer_info.locks.empty(); LOG_TRACE(log, "Polling consumer for events"); consumer->pollEvents(); + if (wait_for_assignment) + { + while (nullptr == consumer->getKafkaAssignment() && consumer_info.watch.elapsedMilliseconds() < MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS) + consumer->pollEvents(); + } + try { if (consumer->needsOffsetUpdate() || consumer_info.locks.empty()) @@ -1168,6 +1160,7 @@ bool StorageKafka2::streamToViews(size_t idx) { // The consumer lost its assignment and haven't received a new one. // By returning true this function reports the current consumer as a "stalled" stream, which + LOG_TRACE(log, "No assignment"); return true; } LOG_TRACE(log, "Consumer needs update offset"); @@ -1181,6 +1174,7 @@ bool StorageKafka2::streamToViews(size_t idx) if (!maybe_locks.has_value()) { // We couldn't acquire locks, probably some other consumers are still holding them. + LOG_TRACE(log, "Couldn't acquire locks"); return true; } @@ -1206,7 +1200,7 @@ bool StorageKafka2::streamToViews(size_t idx) const auto maybe_rows = streamFromConsumer(consumer_info); if (maybe_rows.has_value()) { - const auto milliseconds = watch.elapsedMilliseconds(); + const auto milliseconds = consumer_info.watch.elapsedMilliseconds(); LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(*maybe_rows), table_id.getNameForLogs(), milliseconds); } else @@ -1262,8 +1256,8 @@ std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInf return; consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); }); - auto [blocks, last_read_offset] - = pollConsumer(*consumer_info.consumer, topic_partition, consumer_info.locks[topic_partition].intent_size, kafka_context); + auto [blocks, last_read_offset] = pollConsumer( + *consumer_info.consumer, topic_partition, consumer_info.locks[topic_partition].intent_size, consumer_info.watch, kafka_context); if (blocks.empty()) { diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 86d09c584f2..0fd19f66f52 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -102,6 +102,7 @@ private: TopicPartitions topic_partitions; zkutil::ZooKeeperPtr keeper; TopicPartitionLocks locks; + Stopwatch watch{CLOCK_MONOTONIC_COARSE}; }; struct PolledBatchInfo @@ -208,6 +209,7 @@ private: KafkaConsumer2 & consumer, const TopicPartition & topic_partition, std::optional message_count, + Stopwatch & watch, const ContextPtr & context); zkutil::ZooKeeperPtr getZooKeeper(); diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index ad4e0a0877f..19374ef2c96 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -2860,13 +2860,10 @@ def test_kafka_produce_consume_avro(kafka_cluster, create_query_generator): assert int(expected_max_key) == (num_rows - 1) * 10 + @pytest.mark.parametrize( "create_query_generator", - [ - generate_old_create_table_query, - # TODO(antaljanosbenjamin): Something is off with timing - # generate_new_create_table_query - ], + [generate_old_create_table_query, generate_new_create_table_query], ) def test_kafka_flush_by_time(kafka_cluster, create_query_generator): admin_client = KafkaAdminClient( From f9f43283808b5551b5f07b716d9c4c20e970af46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 8 Jun 2024 21:57:19 +0000 Subject: [PATCH 0101/2361] Style fixes --- src/Storages/Kafka/KafkaConsumer2.cpp | 11 ++-------- src/Storages/Kafka/KafkaConsumer2.h | 2 +- src/Storages/Kafka/StorageKafka2.cpp | 30 +++++++++++---------------- src/Storages/Kafka/StorageKafka2.h | 12 +++++------ 4 files changed, 21 insertions(+), 34 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 8a6c7e31910..1e2ea3fd43a 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -38,11 +38,6 @@ extern const Event KafkaConsumerErrors; namespace DB { -namespace ErrorCodes -{ - extern const int CANNOT_COMMIT_OFFSET; -} - using namespace std::chrono_literals; static constexpr auto EVENT_POLL_TIMEOUT = 50ms; static constexpr auto DRAIN_TIMEOUT_MS = 5000ms; @@ -201,7 +196,7 @@ void KafkaConsumer2::pollEvents() { static constexpr int64_t max_tries = 5; auto consumer_has_subscription = !consumer->get_subscription().empty(); - for(auto i = 0; i < max_tries && !consumer_has_subscription; ++i) + for (auto i = 0; i < max_tries && !consumer_has_subscription; ++i) { consumer->subscribe(topics); consumer_has_subscription = !consumer->get_subscription().empty(); @@ -394,9 +389,7 @@ void KafkaConsumer2::commit(const TopicPartition & topic_partition) { // The failure is not the biggest issue, it only counts when a table is dropped and recreated, otherwise the offsets are taken from keeper. ProfileEvents::increment(ProfileEvents::KafkaCommitFailures); - LOG_INFO( - log, - "All commit attempts failed"); + LOG_INFO(log, "All commit attempts failed"); } else { diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index ff0fae35b67..d7ec227d0bd 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -110,7 +110,7 @@ public: /// nullptr when there are no messages to process. ReadBufferPtr consume(const TopicPartition & topic_partition, const std::optional & message_count); - void commit(const TopicPartition& topic_partition); + void commit(const TopicPartition & topic_partition); // Return values for the message that's being read. String currentTopic() const { return current[-1].get_topic(); } diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index bf2df473793..c28a45d1f32 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -29,18 +30,18 @@ #include #include #include -#include #include #include #include #include #include -#include +#include "Common/config_version.h" #include #include #include #include #include +#include #include #include #include @@ -49,7 +50,6 @@ #include #include #include "Storages/Kafka/KafkaConsumer2.h" -#include "Common/config_version.h" #if USE_KRB5 # include @@ -65,8 +65,8 @@ namespace CurrentMetrics { +// TODO: Add proper metrics, similar to old StorageKafka extern const Metric KafkaBackgroundReads; -extern const Metric KafkaConsumersInUse; extern const Metric KafkaWrites; } @@ -77,7 +77,6 @@ extern const Event KafkaBackgroundReads; extern const Event KafkaMessagesRead; extern const Event KafkaMessagesFailed; extern const Event KafkaRowsRead; -extern const Event KafkaRowsRejected; extern const Event KafkaWrites; } @@ -91,7 +90,6 @@ namespace ErrorCodes { extern const int NOT_IMPLEMENTED; extern const int LOGICAL_ERROR; -extern const int QUERY_NOT_ALLOWED; extern const int REPLICA_ALREADY_EXISTS; extern const int TABLE_IS_DROPPED; extern const int TABLE_WAS_NOT_DROPPED; @@ -324,12 +322,7 @@ KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) } return std::make_shared( - consumer_impl, - log, - getPollMaxBatchSize(), - getPollTimeoutMillisecond(), - tasks.back()->stream_cancelled, - topics); + consumer_impl, log, getPollMaxBatchSize(), getPollTimeoutMillisecond(), tasks.back()->stream_cancelled, topics); } @@ -833,14 +826,14 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi } -void StorageKafka2::saveCommittedOffset( - zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition) +void StorageKafka2::saveCommittedOffset(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition) { const auto partition_prefix = getTopicPartitionPath(topic_partition); keeper_to_use.createOrUpdate(partition_prefix / commit_file_name, toString(topic_partition.offset), zkutil::CreateMode::Persistent); // This is best effort, if it fails we will try to remove in the next round keeper_to_use.tryRemove(partition_prefix / intent_file_name, -1); - LOG_TEST(log, "Saved offset {} for topic-partition [{}:{}]", topic_partition.offset, topic_partition.topic, topic_partition.partition_id); + LOG_TEST( + log, "Saved offset {} for topic-partition [{}:{}]", topic_partition.offset, topic_partition.topic, topic_partition.partition_id); } void StorageKafka2::saveIntent(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, int64_t intent) @@ -1026,8 +1019,8 @@ StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( } if (!consumer.hasMorePolledMessages() - && (total_rows >= getMaxBlockSize() || !check_time_limit() - || failed_poll_attempts >= MAX_FAILED_POLL_ATTEMPTS || consumer.needsOffsetUpdate())) + && (total_rows >= getMaxBlockSize() || !check_time_limit() || failed_poll_attempts >= MAX_FAILED_POLL_ATTEMPTS + || consumer.needsOffsetUpdate())) { LOG_TRACE( log, @@ -1201,7 +1194,8 @@ bool StorageKafka2::streamToViews(size_t idx) if (maybe_rows.has_value()) { const auto milliseconds = consumer_info.watch.elapsedMilliseconds(); - LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(*maybe_rows), table_id.getNameForLogs(), milliseconds); + LOG_DEBUG( + log, "Pushing {} rows to {} took {} ms.", formatReadableQuantity(*maybe_rows), table_id.getNameForLogs(), milliseconds); } else { diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 0fd19f66f52..a2cbdce51a0 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -1,21 +1,21 @@ #pragma once #include +#include #include #include #include #include -#include #include #include +#include #include -#include #include #include -#include #include +#include #include #include @@ -201,9 +201,9 @@ private: void dropReplica(); // Takes lock over topic partitions and set's the committed offset in topic_partitions - std::optional lockTopicPartitions(zkutil::ZooKeeper& keeper_to_use, const TopicPartitions & topic_partitions); - void saveCommittedOffset(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition); - void saveIntent(zkutil::ZooKeeper& keeper_to_use,const TopicPartition & topic_partition, int64_t intent); + std::optional lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const TopicPartitions & topic_partitions); + void saveCommittedOffset(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition); + void saveIntent(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, int64_t intent); PolledBatchInfo pollConsumer( KafkaConsumer2 & consumer, From 6df87b1cbfdcee0c70a87be66cb2b00d21ef2e84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 8 Jun 2024 22:02:21 +0000 Subject: [PATCH 0102/2361] Fix build --- src/Storages/Kafka/StorageKafka2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index c28a45d1f32..0bc484b4d45 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -917,7 +917,7 @@ StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( ? kafka_settings->kafka_flush_interval_ms : getContext()->getSettingsRef().stream_flush_interval_ms; - const auto check_time_limit = [&max_execution_time, &total_stopwatch, this]() + const auto check_time_limit = [&max_execution_time, &total_stopwatch]() { if (max_execution_time != 0) { From be64d1bd32526b0f9b60d67f6ad31ea806c55797 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 8 Jun 2024 22:16:52 +0000 Subject: [PATCH 0103/2361] Fix tests --- tests/integration/test_storage_kafka/test.py | 22 +++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 19374ef2c96..d4d47311483 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -48,6 +48,8 @@ if is_arm(): # TODO: add test for SELECT LIMIT is working. +KAFKA_TOPIC_OLD = "old_t" +KAFKA_CONSUMER_GROUP_OLD = "old_cg" KAFKA_TOPIC_NEW = "new_t" KAFKA_CONSUMER_GROUP_NEW = "new_cg" @@ -61,8 +63,8 @@ instance = cluster.add_instance( with_zookeeper=True, # For Replicated Table macros={ "kafka_broker": "kafka1", - "kafka_topic_old": "old", - "kafka_group_name_old": "old", + "kafka_topic_old": KAFKA_TOPIC_OLD, + "kafka_group_name_old": KAFKA_CONSUMER_GROUP_OLD, "kafka_topic_new": KAFKA_TOPIC_NEW, "kafka_group_name_new": KAFKA_CONSUMER_GROUP_NEW, "kafka_client_id": "instance", @@ -517,13 +519,13 @@ def test_kafka_settings_old_syntax(kafka_cluster): ignore_error=True, ) ) == TSV( - """kafka_broker kafka1 + f"""kafka_broker kafka1 kafka_client_id instance kafka_format_json_each_row JSONEachRow -kafka_group_name_new new -kafka_group_name_old old -kafka_topic_new new -kafka_topic_old old +kafka_group_name_new {KAFKA_CONSUMER_GROUP_NEW} +kafka_group_name_old {KAFKA_CONSUMER_GROUP_OLD} +kafka_topic_new new_t +kafka_topic_old old_t """ ) @@ -540,7 +542,7 @@ kafka_topic_old old messages = [] for i in range(50): messages.append(json.dumps({"key": i, "value": i})) - kafka_produce(kafka_cluster, "old", messages) + kafka_produce(kafka_cluster, KAFKA_TOPIC_OLD, messages) result = "" while True: @@ -550,7 +552,7 @@ kafka_topic_old old kafka_check_result(result, True) - members = describe_consumer_group(kafka_cluster, "old") + members = describe_consumer_group(kafka_cluster, KAFKA_CONSUMER_GROUP_OLD) assert members[0]["client_id"] == "ClickHouse-instance-test-kafka" # text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose")) @@ -593,7 +595,7 @@ def test_kafka_settings_new_syntax(kafka_cluster): kafka_check_result(result, True) - members = describe_consumer_group(kafka_cluster, "new") + members = describe_consumer_group(kafka_cluster, KAFKA_CONSUMER_GROUP_NEW) assert members[0]["client_id"] == "instance test 1234" From 7b90eccd57e2ae0a2379a1fb72c5e5ea48d552b2 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 8 Jun 2024 22:26:00 +0000 Subject: [PATCH 0104/2361] Automatic style fix --- tests/integration/test_storage_kafka/test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index d4d47311483..74532de4878 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -2862,7 +2862,6 @@ def test_kafka_produce_consume_avro(kafka_cluster, create_query_generator): assert int(expected_max_key) == (num_rows - 1) * 10 - @pytest.mark.parametrize( "create_query_generator", [generate_old_create_table_query, generate_new_create_table_query], From eb8064aea67c01ef65ebd1c307c77f16bfc93199 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 8 Jun 2024 23:13:58 +0000 Subject: [PATCH 0105/2361] Style fix --- src/Storages/Kafka/StorageKafka2.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 0bc484b4d45..15a5388aa0b 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -72,7 +72,6 @@ extern const Metric KafkaWrites; namespace ProfileEvents { -extern const Event KafkaDirectReads; extern const Event KafkaBackgroundReads; extern const Event KafkaMessagesRead; extern const Event KafkaMessagesFailed; From 90b5ad3613ea7e3b4dea202975407569d0aaee84 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 9 Jun 2024 19:31:20 +0000 Subject: [PATCH 0106/2361] fix tidy build --- src/IO/WriteBufferFromPocoSocketChunked.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 689389ba2ea..ecc33180140 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -85,7 +85,10 @@ protected: void nextImpl() override { if (!chunked) - return WriteBufferFromPocoSocket::nextImpl(); + { + WriteBufferFromPocoSocket::nextImpl(); + return; + } /// next() after finishChunk ar the end of the buffer if (finishing < sizeof(*chunk_size_ptr)) From b40c93165142579e871c064d02d2d60d6c526e86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 11 Jun 2024 09:20:22 +0000 Subject: [PATCH 0107/2361] Set `shutdown-called` flag on shutdown to stop writes --- src/Storages/Kafka/StorageKafka2.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 15a5388aa0b..6a30045fd1f 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -286,6 +286,7 @@ void StorageKafka2::startup() void StorageKafka2::shutdown(bool) { + shutdown_called = true; for (auto & task : tasks) { // Interrupt streaming thread From 0691c01427fac16a1b469a4ec11104e7d49569b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 11 Jun 2024 09:21:12 +0000 Subject: [PATCH 0108/2361] Fix crash in case of consumer receives no consumer groups on assignment --- src/Storages/Kafka/StorageKafka2.cpp | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 6a30045fd1f..f75e6044ddd 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -289,9 +289,13 @@ void StorageKafka2::shutdown(bool) shutdown_called = true; for (auto & task : tasks) { + LOG_TRACE(log, "Cancelling streams"); // Interrupt streaming thread task->stream_cancelled = true; + } + for (auto & task : tasks) + { LOG_TRACE(log, "Waiting for cleanup"); task->holder->deactivate(); } @@ -1108,16 +1112,8 @@ void StorageKafka2::threadFunc(size_t idx) bool StorageKafka2::streamToViews(size_t idx) { - // What to do? - // 1. Select a topic partition to consume from - // 2. Do a casual poll for every other consumer to keep them alive - // 3. Get the necessary data from Keeper - // 4. Get the corresponding consumer - // 5. Pull messages - // 6. Create a BlockList from it - // 7. Execute the pipeline - // 8. Write the offset to Keeper - + // This function is written assuming that each consumer has their own thread. This means once this is changed, this function should be revisited. + // The return values should be revisited, as stalling all consumers because of a single one stalled is not a good idea. auto table_id = getStorageID(); auto table = DatabaseCatalog::instance().getTable(table_id, getContext()); if (!table) @@ -1189,6 +1185,11 @@ bool StorageKafka2::streamToViews(size_t idx) consumer_info.consumer->updateOffsets(consumer_info.topic_partitions); } + if (consumer_info.topic_partitions.empty()) + { + LOG_TRACE(log, "Consumer {} has assignment, but has no partitions, probably because there are more consumers in the consumer group than partitions.", idx); + return true; + } LOG_TRACE(log, "Trying to consume from consumer {}", idx); const auto maybe_rows = streamFromConsumer(consumer_info); if (maybe_rows.has_value()) From fb49cf503e4159549348c76ebf9c3ca686b9f02f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 11 Jun 2024 16:47:05 +0000 Subject: [PATCH 0109/2361] some fixes --- src/IO/WriteBufferFromPocoSocketChunked.h | 31 ++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index ecc33180140..4325ab2bd4b 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -35,7 +35,7 @@ public: /// If current chunk is empty it means we are finishing a chunk previously sent by next(), /// we want to convert current chunk header into end-of-chunk marker and initialize next chunk. - /// We don't need to wary about if it's the end of the buffer because next() always sends the whole buffer + /// We don't need to worry about if it's the end of the buffer because next() always sends the whole buffer /// so it should be a beginning of the buffer. chassert(reinterpret_cast(chunk_size_ptr) == working_buffer.begin()); @@ -50,6 +50,13 @@ public: return; } + /// Previously finished chunk wasn't sent yet + if (last_finish_chunk == chunk_size_ptr) + { + chunk_started = false; + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); + } + /// Fill up current chunk size *chunk_size_ptr = toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr))); @@ -59,7 +66,10 @@ public: static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), *chunk_size_ptr); else + { chunk_started = false; + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); + } LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); @@ -81,6 +91,18 @@ public: last_finish_chunk = chunk_size_ptr; } + ~WriteBufferFromPocoSocketChunked() override + { + try + { + finalize(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } + protected: void nextImpl() override { @@ -173,6 +195,13 @@ protected: last_finish_chunk = nullptr; } + void finalizeImpl() override + { + if (offset() == sizeof(*chunk_size_ptr)) + pos -= sizeof(*chunk_size_ptr); + WriteBufferFromPocoSocket::finalizeImpl(); + } + Poco::Net::SocketAddress peerAddress() { return peer_address; From 48d47d26a4f18e180fa5602ad0aa89cc7af234a4 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 14 Sep 2023 14:29:35 +0000 Subject: [PATCH 0110/2361] Add simple unit test for full sorting join --- .../Transforms/MergeJoinTransform.cpp | 71 +++++++++----- .../Transforms/MergeJoinTransform.h | 30 ++++-- .../tests/gtest_merge_join_algorithm.cpp | 95 +++++++++++++++++++ 3 files changed, 166 insertions(+), 30 deletions(-) create mode 100644 src/Processors/tests/gtest_merge_join_algorithm.cpp diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 159a3244fe9..a9fd7978249 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -270,35 +270,45 @@ bool FullMergeJoinCursor::fullyCompleted() const } MergeJoinAlgorithm::MergeJoinAlgorithm( - JoinPtr table_join_, + JoinKind kind_, + JoinStrictness strictness_, + const TableJoin::JoinOnClause & on_clause_, const Blocks & input_headers, size_t max_block_size_) - : table_join(table_join_) + : kind(kind_) + , strictness(strictness_) , max_block_size(max_block_size_) , log(getLogger("MergeJoinAlgorithm")) { if (input_headers.size() != 2) throw Exception(ErrorCodes::LOGICAL_ERROR, "MergeJoinAlgorithm requires exactly two inputs"); - auto strictness = table_join->getTableJoin().strictness(); if (strictness != JoinStrictness::Any && strictness != JoinStrictness::All) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm is not implemented for strictness {}", strictness); - auto kind = table_join->getTableJoin().kind(); if (!isInner(kind) && !isLeft(kind) && !isRight(kind) && !isFull(kind)) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm is not implemented for kind {}", kind); - const auto & join_on = table_join->getTableJoin().getOnlyClause(); - - if (join_on.on_filter_condition_left || join_on.on_filter_condition_right) + if (on_clause_.on_filter_condition_left || on_clause_.on_filter_condition_right) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm does not support ON filter conditions"); cursors = { - createCursor(input_headers[0], join_on.key_names_left), - createCursor(input_headers[1], join_on.key_names_right) + createCursor(input_headers[0], on_clause_.key_names_left), + createCursor(input_headers[1], on_clause_.key_names_right) }; - for (const auto & [left_key, right_key] : table_join->getTableJoin().leftToRightKeyRemap()) +MergeJoinAlgorithm::MergeJoinAlgorithm( + JoinPtr join_ptr, + const Blocks & input_headers, + size_t max_block_size_) + : MergeJoinAlgorithm( + join_ptr->getTableJoin().kind(), + join_ptr->getTableJoin().strictness(), + join_ptr->getTableJoin().getOnlyClause(), + input_headers, + max_block_size_) +{ + for (const auto & [left_key, right_key] : join_ptr->getTableJoin().leftToRightKeyRemap()) { size_t left_idx = input_headers[0].getPositionByName(left_key); size_t right_idx = input_headers[1].getPositionByName(right_key); @@ -398,7 +408,7 @@ struct AllJoinImpl size_t lnum = nextDistinct(left_cursor.cursor); size_t rnum = nextDistinct(right_cursor.cursor); - bool all_fit_in_block = std::max(left_map.size(), right_map.size()) + lnum * rnum <= max_block_size; + bool all_fit_in_block = !max_block_size || std::max(left_map.size(), right_map.size()) + lnum * rnum <= max_block_size; bool have_all_ranges = left_cursor.cursor.isValid() && right_cursor.cursor.isValid(); if (all_fit_in_block && have_all_ranges) { @@ -498,7 +508,7 @@ std::optional MergeJoinAlgorithm::handleAllJoinState } size_t total_rows = 0; - while (total_rows < max_block_size) + while (!max_block_size || total_rows < max_block_size) { const auto & left_range = all_join_state->getLeft(); const auto & right_range = all_join_state->getRight(); @@ -523,7 +533,7 @@ std::optional MergeJoinAlgorithm::handleAllJoinState return {}; } -MergeJoinAlgorithm::Status MergeJoinAlgorithm::allJoin(JoinKind kind) +MergeJoinAlgorithm::Status MergeJoinAlgorithm::allJoin() { PaddedPODArray idx_map[2]; @@ -671,8 +681,6 @@ std::optional MergeJoinAlgorithm::handleAnyJoinState if (any_join_state.empty()) return {}; - auto kind = table_join->getTableJoin().kind(); - Chunk result; for (size_t source_num = 0; source_num < 2; ++source_num) @@ -717,7 +725,7 @@ std::optional MergeJoinAlgorithm::handleAnyJoinState return {}; } -MergeJoinAlgorithm::Status MergeJoinAlgorithm::anyJoin(JoinKind kind) +MergeJoinAlgorithm::Status MergeJoinAlgorithm::anyJoin() { if (auto result = handleAnyJoinState()) return std::move(*result); @@ -804,8 +812,6 @@ Chunk MergeJoinAlgorithm::createBlockWithDefaults(size_t source_num) IMergingAlgorithm::Status MergeJoinAlgorithm::merge() { - auto kind = table_join->getTableJoin().kind(); - if (!cursors[0]->cursor.isValid() && !cursors[0]->fullyCompleted()) return Status(0); @@ -849,13 +855,11 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge() } } - auto strictness = table_join->getTableJoin().strictness(); - if (strictness == JoinStrictness::Any) - return anyJoin(kind); + return anyJoin(); if (strictness == JoinStrictness::All) - return allJoin(kind); + return allJoin(); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported strictness '{}'", strictness); } @@ -874,9 +878,30 @@ MergeJoinTransform::MergeJoinTransform( /* always_read_till_end_= */ false, /* empty_chunk_on_finish_= */ true, table_join, input_headers, max_block_size) +<<<<<<< HEAD , log(getLogger("MergeJoinTransform")) +======= +>>>>>>> b4a16f38320 (Add simple unit test for full sorting join) +{ +} + +MergeJoinTransform::MergeJoinTransform( + JoinKind kind_, + JoinStrictness strictness_, + const TableJoin::JoinOnClause & on_clause_, + const Blocks & input_headers, + const Block & output_header, + size_t max_block_size, + UInt64 limit_hint_) + : IMergingTransform( + input_headers, + output_header, + /* have_all_inputs_= */ true, + limit_hint_, + /* always_read_till_end_= */ false, + /* empty_chunk_on_finish_= */ true, + kind_, strictness_, on_clause_, input_headers, max_block_size) { - LOG_TRACE(log, "Use MergeJoinTransform"); } void MergeJoinTransform::onFinish() diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index cf9331abd59..0b0efa33722 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -19,6 +20,7 @@ #include #include #include +#include namespace Poco { class Logger; } @@ -227,7 +229,13 @@ private: class MergeJoinAlgorithm final : public IMergingAlgorithm { public: - explicit MergeJoinAlgorithm(JoinPtr table_join, const Blocks & input_headers, size_t max_block_size_); + MergeJoinAlgorithm(JoinKind kind_, + JoinStrictness strictness_, + const TableJoin::JoinOnClause & on_clause_, + const Blocks & input_headers, + size_t max_block_size_); + + MergeJoinAlgorithm(JoinPtr join_ptr, const Blocks & input_headers, size_t max_block_size_); const char * getName() const override { return "MergeJoinAlgorithm"; } void initialize(Inputs inputs) override; @@ -238,10 +246,10 @@ public: private: std::optional handleAnyJoinState(); - Status anyJoin(JoinKind kind); + Status anyJoin(); std::optional handleAllJoinState(); - Status allJoin(JoinKind kind); + Status allJoin(); Chunk createBlockWithDefaults(size_t source_num); Chunk createBlockWithDefaults(size_t source_num, size_t start, size_t num_rows) const; @@ -251,11 +259,12 @@ private: std::array cursors; - /// Keep some state to make connection between data in different blocks + /// Keep some state to make handle data from different blocks AnyJoinState any_join_state; std::unique_ptr all_join_state; - JoinPtr table_join; + JoinKind kind; + JoinStrictness strictness; size_t max_block_size; int null_direction_hint = 1; @@ -285,12 +294,19 @@ public: size_t max_block_size, UInt64 limit_hint = 0); + MergeJoinTransform( + JoinKind kind_, + JoinStrictness strictness_, + const TableJoin::JoinOnClause & on_clause_, + const Blocks & input_headers, + const Block & output_header, + size_t max_block_size, + UInt64 limit_hint_ = 0); + String getName() const override { return "MergeJoinTransform"; } protected: void onFinish() override; - - LoggerPtr log; }; } diff --git a/src/Processors/tests/gtest_merge_join_algorithm.cpp b/src/Processors/tests/gtest_merge_join_algorithm.cpp new file mode 100644 index 00000000000..9a8b70efc17 --- /dev/null +++ b/src/Processors/tests/gtest_merge_join_algorithm.cpp @@ -0,0 +1,95 @@ +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + + +using namespace DB; + + +QueryPipeline buildJoinPipeline(std::shared_ptr left_source, std::shared_ptr right_source) +{ + Blocks inputs; + inputs.emplace_back(left_source->getPort().getHeader()); + inputs.emplace_back(right_source->getPort().getHeader()); + Block out_header = { + ColumnWithTypeAndName(ColumnUInt8::create(), std::make_shared(), "t1.x"), + ColumnWithTypeAndName(ColumnUInt8::create(), std::make_shared(), "t2.x"), + }; + + TableJoin::JoinOnClause on_clause; + on_clause.key_names_left = {"x"}; + on_clause.key_names_right = {"x"}; + auto joining = std::make_shared( + JoinKind::Inner, + JoinStrictness::All, + on_clause, + inputs, out_header, /* max_block_size = */ 0); + + chassert(joining->getInputs().size() == 2); + + connect(left_source->getPort(), joining->getInputs().front()); + connect(right_source->getPort(), joining->getInputs().back()); + + auto * output_port = &joining->getOutputPort(); + + auto processors = std::make_shared(); + processors->emplace_back(std::move(left_source)); + processors->emplace_back(std::move(right_source)); + processors->emplace_back(std::move(joining)); + + QueryPipeline pipeline(QueryPlanResourceHolder{}, processors, output_port); + return pipeline; +} + + +std::shared_ptr createSourceWithSingleValue(size_t rows_per_chunk, size_t total_chunks) +{ + Block header = { + ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "x") + }; + + Chunks chunks; + + for (size_t i = 0; i < total_chunks; ++i) + { + auto col = ColumnUInt64::create(rows_per_chunk, 1); + chunks.emplace_back(Columns{std::move(col)}, rows_per_chunk); + } + + return std::make_shared(std::move(header), std::move(chunks)); +} + +TEST(FullSortingJoin, Simple) +try +{ + auto left_source = createSourceWithSingleValue(3, 10); + auto right_source = createSourceWithSingleValue(2, 15); + + auto pipeline = buildJoinPipeline(left_source, right_source); + PullingPipelineExecutor executor(pipeline); + + Block block; + + size_t total_result_rows = 0; + while (executor.pull(block)) + { + total_result_rows += block.rows(); + } + ASSERT_EQ(total_result_rows, 3 * 10 * 2 * 15); +} +catch (Exception & e) +{ + std::cout << e.getStackTraceString() << std::endl; + throw; +} From 2412f8521985c8d31322ed04baa502c4e7543ef6 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 18 Sep 2023 16:14:02 +0000 Subject: [PATCH 0111/2361] wip full sorting asof join --- .../Transforms/MergeJoinTransform.cpp | 138 ++++++++- .../Transforms/MergeJoinTransform.h | 38 ++- .../tests/gtest_full_sorting_join.cpp | 287 ++++++++++++++++++ .../tests/gtest_merge_join_algorithm.cpp | 95 ------ 4 files changed, 446 insertions(+), 112 deletions(-) create mode 100644 src/Processors/tests/gtest_full_sorting_join.cpp delete mode 100644 src/Processors/tests/gtest_merge_join_algorithm.cpp diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index a9fd7978249..8370e548fcb 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -34,13 +34,15 @@ namespace ErrorCodes namespace { -FullMergeJoinCursorPtr createCursor(const Block & block, const Names & columns) +constexpr UInt64 DEFAULT_VALUE_INDEX = std::numeric_limits::max(); + +FullMergeJoinCursorPtr createCursor(const Block & block, const Names & columns, JoinStrictness strictness) { SortDescription desc; desc.reserve(columns.size()); for (const auto & name : columns) desc.emplace_back(name); - return std::make_unique(materializeBlock(block), desc); + return std::make_unique(materializeBlock(block), desc, strictness == JoinStrictness::Asof); } template @@ -90,9 +92,10 @@ int nullableCompareAt(const IColumn & left_column, const IColumn & right_column, int ALWAYS_INLINE compareCursors(const SortCursorImpl & lhs, size_t lpos, const SortCursorImpl & rhs, size_t rpos, + size_t key_length, int null_direction_hint) { - for (size_t i = 0; i < lhs.sort_columns_size; ++i) + for (size_t i = 0; i < key_length; ++i) { /// TODO(@vdimir): use nullableCompareAt only if there's nullable columns int cmp = nullableCompareAt(*lhs.sort_columns[i], *rhs.sort_columns[i], lpos, rpos, null_direction_hint); @@ -104,13 +107,18 @@ int ALWAYS_INLINE compareCursors(const SortCursorImpl & lhs, size_t lpos, int ALWAYS_INLINE compareCursors(const SortCursorImpl & lhs, const SortCursorImpl & rhs, int null_direction_hint) { - return compareCursors(lhs, lhs.getRow(), rhs, rhs.getRow(), null_direction_hint); + return compareCursors(lhs, lhs.getRow(), rhs, rhs.getRow(), lhs.sort_columns_size, null_direction_hint); +} + +int compareAsofCursors(const FullMergeJoinCursor & lhs, const FullMergeJoinCursor & rhs) +{ + return nullableCompareAt(lhs.getAsofColumn(), rhs.getAsofColumn(), lhs->getRow(), rhs->getRow()); } bool ALWAYS_INLINE totallyLess(SortCursorImpl & lhs, SortCursorImpl & rhs, int null_direction_hint) { /// The last row of left cursor is less than the current row of the right cursor. - int cmp = compareCursors(lhs, lhs.rows - 1, rhs, rhs.getRow(), null_direction_hint); + int cmp = compareCursors(lhs, lhs.rows - 1, rhs, rhs.getRow(), lhs.sort_columns_size, null_direction_hint); return cmp < 0; } @@ -222,11 +230,11 @@ Chunk getRowFromChunk(const Chunk & chunk, size_t pos) return result; } -void inline addRange(PaddedPODArray & left_map, size_t start, size_t end) +void inline addRange(PaddedPODArray & values, UInt64 start, UInt64 end) { assert(end > start); - for (size_t i = start; i < end; ++i) - left_map.push_back(i); + for (UInt64 i = start; i < end; ++i) + values.push_back(i); } void inline addMany(PaddedPODArray & left_or_right_map, size_t idx, size_t num) @@ -235,6 +243,11 @@ void inline addMany(PaddedPODArray & left_or_right_map, size_t idx, size left_or_right_map.push_back(idx); } +void inline addMany(PaddedPODArray & values, UInt64 value, size_t num) +{ + values.resize_fill(values.size() + num, value); +} + } const Chunk & FullMergeJoinCursor::getCurrent() const @@ -283,9 +296,15 @@ MergeJoinAlgorithm::MergeJoinAlgorithm( if (input_headers.size() != 2) throw Exception(ErrorCodes::LOGICAL_ERROR, "MergeJoinAlgorithm requires exactly two inputs"); - if (strictness != JoinStrictness::Any && strictness != JoinStrictness::All) + if (strictness != JoinStrictness::Any && strictness != JoinStrictness::All && strictness != JoinStrictness::Asof) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm is not implemented for strictness {}", strictness); + if (strictness == JoinStrictness::Asof) + { + if (kind != JoinKind::Left && kind != JoinKind::Inner) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm does not implement ASOF {} join", kind); + } + if (!isInner(kind) && !isLeft(kind) && !isRight(kind) && !isFull(kind)) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm is not implemented for kind {}", kind); @@ -293,8 +312,8 @@ MergeJoinAlgorithm::MergeJoinAlgorithm( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm does not support ON filter conditions"); cursors = { - createCursor(input_headers[0], on_clause_.key_names_left), - createCursor(input_headers[1], on_clause_.key_names_right) + createCursor(input_headers[0], on_clause_.key_names_left, strictness), + createCursor(input_headers[1], on_clause_.key_names_right, strictness), }; MergeJoinAlgorithm::MergeJoinAlgorithm( @@ -313,6 +332,8 @@ MergeJoinAlgorithm::MergeJoinAlgorithm( size_t left_idx = input_headers[0].getPositionByName(left_key); size_t right_idx = input_headers[1].getPositionByName(right_key); left_to_right_key_remap[left_idx] = right_idx; + if (strictness == JoinStrictness::Asof) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm does not support ASOF joins USING"); } const auto *smjPtr = typeid_cast(table_join.get()); @@ -321,6 +342,19 @@ MergeJoinAlgorithm::MergeJoinAlgorithm( null_direction_hint = smjPtr->getNullDirection(); } + if (strictness == JoinStrictness::Asof) + setAsofInequality(join_ptr->getTableJoin().getAsofInequality()); +} + +void MergeJoinAlgorithm::setAsofInequality(ASOFJoinInequality asof_inequality_) +{ + if (strictness != JoinStrictness::Asof) + throw Exception(ErrorCodes::LOGICAL_ERROR, "setAsofInequality is only supported for ASOF joins"); + + if (asof_inequality_ == ASOFJoinInequality::None) + throw Exception(ErrorCodes::LOGICAL_ERROR, "ASOF inequality cannot be None"); + + asof_inequality = asof_inequality_; } void MergeJoinAlgorithm::logElapsed(double seconds) @@ -770,6 +804,81 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::anyJoin() return Status(std::move(result)); } + +MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() +{ + auto & left_cursor = *cursors[0]; + if (!left_cursor->isValid()) + return Status(0); + + auto & right_cursor = *cursors[1]; + if (!right_cursor->isValid()) + return Status(1); + + PaddedPODArray left_map; + PaddedPODArray right_map; + + while (left_cursor->isValid() && right_cursor->isValid()) + { + auto lpos = left_cursor->getRow(); + auto rpos = right_cursor->getRow(); + auto cmp = compareCursors(*left_cursor, *right_cursor); + if (cmp == 0) + { + auto asof_cmp = compareAsofCursors(left_cursor, right_cursor); + if ((asof_inequality == ASOFJoinInequality::Less && asof_cmp <= -1) + || (asof_inequality == ASOFJoinInequality::LessOrEquals && asof_cmp <= 0)) + { + /// First row in right table that is greater (or equal) than current row in left table + /// matches asof join condition the best + left_map.push_back(lpos); + right_map.push_back(rpos); + left_cursor->next(); + continue; + } + + if (asof_inequality == ASOFJoinInequality::Less || asof_inequality == ASOFJoinInequality::LessOrEquals) + { + /// Asof condition is not (yet) satisfied, skip row in right table + right_cursor->next(); + continue; + } + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "TODO: implement ASOF equality join"); + } + else if (cmp < 0) + { + /// no matches for rows in left table, just pass them through + size_t num = nextDistinct(*left_cursor); + if (isLeft(kind)) + { + /// return them with default values at right side + addRange(left_map, lpos, lpos + num); + addMany(right_map, DEFAULT_VALUE_INDEX, num); + } + } + else + { + /// skip rows in right table until we find match for current row in left table + nextDistinct(*right_cursor); + } + } + + chassert(left_map.size() == right_map.size()); + Chunk result; + { + Columns lcols = indexColumns(left_cursor.getCurrent().getColumns(), left_map); + for (auto & col : lcols) + result.addColumn(std::move(col)); + + Columns rcols = indexColumns(right_cursor.getCurrent().getColumns(), right_map); + for (auto & col : rcols) + result.addColumn(std::move(col)); + } + UNUSED(asof_inequality); + return Status(std::move(result)); +} + + /// if `source_num == 0` get data from left cursor and fill defaults at right /// otherwise - vice versa Chunk MergeJoinAlgorithm::createBlockWithDefaults(size_t source_num, size_t start, size_t num_rows) const @@ -861,6 +970,9 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge() if (strictness == JoinStrictness::All) return allJoin(); + if (strictness == JoinStrictness::Asof) + return asofJoin(); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported strictness '{}'", strictness); } @@ -878,10 +990,6 @@ MergeJoinTransform::MergeJoinTransform( /* always_read_till_end_= */ false, /* empty_chunk_on_finish_= */ true, table_join, input_headers, max_block_size) -<<<<<<< HEAD - , log(getLogger("MergeJoinTransform")) -======= ->>>>>>> b4a16f38320 (Add simple unit test for full sorting join) { } diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index 0b0efa33722..3ee01e57992 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -195,10 +195,27 @@ private: class FullMergeJoinCursor : boost::noncopyable { public: - explicit FullMergeJoinCursor(const Block & sample_block_, const SortDescription & description_) + FullMergeJoinCursor( + const Block & sample_block_, + const SortDescription & description_, + bool is_asof = false) : sample_block(sample_block_.cloneEmpty()) , desc(description_) { + if (desc.size() == 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty sort description for FullMergeJoinCursor"); + + if (is_asof) + { + /// For ASOF join prefix of sort description is used for equality comparison + /// and the last column is used for inequality comparison and is handled separately + + auto asof_column_description = desc.back(); + desc.pop_back(); + + chassert(asof_column_description.direction == 1 && asof_column_description.nulls_direction == 1); + asof_column_position = sample_block.getPositionByName(asof_column_description.column_name); + } } bool fullyCompleted() const; @@ -209,17 +226,27 @@ public: SortCursorImpl * operator-> () { return &cursor; } const SortCursorImpl * operator-> () const { return &cursor; } + SortCursorImpl & operator* () { return cursor; } + const SortCursorImpl & operator* () const { return cursor; } + SortCursorImpl cursor; const Block & sampleBlock() const { return sample_block; } Columns sampleColumns() const { return sample_block.getColumns(); } + const IColumn & getAsofColumn() const + { + return *cursor.all_columns[asof_column_position]; + } + private: Block sample_block; SortDescription desc; Chunk current_chunk; bool recieved_all_blocks = false; + + size_t asof_column_position; }; /* @@ -242,8 +269,9 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; - void logElapsed(double seconds); + void setAsofInequality(ASOFJoinInequality asof_inequality_); + void logElapsed(double seconds); private: std::optional handleAnyJoinState(); Status anyJoin(); @@ -251,13 +279,17 @@ private: std::optional handleAllJoinState(); Status allJoin(); + Status asofJoin(); + Chunk createBlockWithDefaults(size_t source_num); Chunk createBlockWithDefaults(size_t source_num, size_t start, size_t num_rows) const; + /// For `USING` join key columns should have values from right side instead of defaults std::unordered_map left_to_right_key_remap; std::array cursors; + ASOFJoinInequality asof_inequality = ASOFJoinInequality::None; /// Keep some state to make handle data from different blocks AnyJoinState any_join_state; @@ -305,6 +337,8 @@ public: String getName() const override { return "MergeJoinTransform"; } + void setAsofInequality(ASOFJoinInequality asof_inequality_) { algorithm.setAsofInequality(asof_inequality_); } + protected: void onFinish() override; }; diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp new file mode 100644 index 00000000000..888e280b55f --- /dev/null +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -0,0 +1,287 @@ +#include + + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + + +using namespace DB; + +UInt64 getAndPrintRandomSeed() +{ + UInt64 seed = randomSeed(); + std::cerr << "TEST_RANDOM_SEED: " << seed << std::endl; + return seed; +} + +static UInt64 TEST_RANDOM_SEED = getAndPrintRandomSeed(); + +static pcg64 rng(TEST_RANDOM_SEED); + + +QueryPipeline buildJoinPipeline( + std::shared_ptr left_source, + std::shared_ptr right_source, + size_t key_length = 1, + JoinKind kind = JoinKind::Inner, + JoinStrictness strictness = JoinStrictness::All, + ASOFJoinInequality asof_inequality = ASOFJoinInequality::None) +{ + Blocks inputs; + inputs.emplace_back(left_source->getPort().getHeader()); + inputs.emplace_back(right_source->getPort().getHeader()); + + Block out_header; + for (const auto & input : inputs) + { + for (ColumnWithTypeAndName column : input) + { + if (&input == &inputs.front()) + column.name = "t1." + column.name; + else + column.name = "t2." + column.name; + out_header.insert(column); + } + } + + TableJoin::JoinOnClause on_clause; + for (size_t i = 0; i < key_length; ++i) + { + on_clause.key_names_left.emplace_back(inputs[0].getByPosition(i).name); + on_clause.key_names_right.emplace_back(inputs[1].getByPosition(i).name); + } + + auto joining = std::make_shared( + kind, + strictness, + on_clause, + inputs, out_header, /* max_block_size = */ 0); + + if (asof_inequality != ASOFJoinInequality::None) + joining->setAsofInequality(asof_inequality); + + chassert(joining->getInputs().size() == 2); + + connect(left_source->getPort(), joining->getInputs().front()); + connect(right_source->getPort(), joining->getInputs().back()); + + auto * output_port = &joining->getOutputPort(); + + auto processors = std::make_shared(); + processors->emplace_back(std::move(left_source)); + processors->emplace_back(std::move(right_source)); + processors->emplace_back(std::move(joining)); + + QueryPipeline pipeline(QueryPlanResourceHolder{}, processors, output_port); + return pipeline; +} + + +std::shared_ptr oneColumnSource(const std::vector> & values) +{ + Block header = { ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "x") }; + Chunks chunks; + for (const auto & chunk_values : values) + { + auto column = ColumnUInt64::create(); + for (auto n : chunk_values) + column->insertValue(n); + chunks.emplace_back(Chunk(Columns{std::move(column)}, chunk_values.size())); + } + return std::make_shared(header, std::move(chunks)); +} + + +TEST(FullSortingJoin, Simple) +try +{ + auto left_source = oneColumnSource({ {1, 2, 3, 4, 5} }); + auto right_source = oneColumnSource({ {1}, {2}, {3}, {4}, {5} }); + + auto pipeline = buildJoinPipeline(left_source, right_source); + PullingPipelineExecutor executor(pipeline); + + Block block; + + size_t total_result_rows = 0; + while (executor.pull(block)) + total_result_rows += block.rows(); + + ASSERT_EQ(total_result_rows, 5); +} +catch (Exception & e) +{ + std::cout << e.getStackTraceString() << std::endl; + throw; +} + +std::shared_ptr sourceFromRows( + const Block & header, const std::vector> & values, double break_prob = 0.0) +{ + Chunks chunks; + auto columns = header.cloneEmptyColumns(); + + std::uniform_real_distribution<> prob_dis(0.0, 1.0); + + + for (auto row : values) + { + if (!columns.empty() && (row.empty() || prob_dis(rng) < break_prob)) + { + size_t rows = columns.front()->size(); + chunks.emplace_back(std::move(columns), rows); + columns = header.cloneEmptyColumns(); + continue; + } + + for (size_t i = 0; i < columns.size(); ++i) + columns[i]->insert(row[i]); + } + + if (!columns.empty()) + chunks.emplace_back(std::move(columns), columns.front()->size()); + + return std::make_shared(header, std::move(chunks)); +} + + +std::vector> getValuesFromBlock(const Block & block, const Names & names) +{ + std::vector> result; + for (size_t i = 0; i < block.rows(); ++i) + { + auto & row = result.emplace_back(); + for (const auto & name : names) + block.getByName(name).column->get(i, row.emplace_back()); + } + return result; +} + + +Block executePipeline(QueryPipeline & pipeline) +{ + PullingPipelineExecutor executor(pipeline); + + Blocks result_blocks; + while (true) + { + Block block; + bool is_ok = executor.pull(block); + if (!is_ok) + break; + result_blocks.emplace_back(std::move(block)); + } + + return concatenateBlocks(result_blocks); +} + +TEST(FullSortingJoin, Asof) +try +{ + const std::vector chunk_break = {}; + + auto left_source = sourceFromRows({ + ColumnWithTypeAndName(ColumnString::create(), std::make_shared(), "key"), + ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "t"), + }, { + {"AMZN", 3}, + {"AMZN", 4}, + {"AMZN", 6}, + }); + + auto right_source = sourceFromRows({ + ColumnWithTypeAndName(ColumnString::create(), std::make_shared(), "key"), + ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "t"), + ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "value"), + }, { + {"AAPL", 1, 97}, + chunk_break, + {"AAPL", 2, 98}, + {"AAPL", 3, 99}, + {"AMZN", 1, 100}, + {"AMZN", 2, 110}, + chunk_break, + {"AMZN", 4, 130}, + {"AMZN", 5, 140}, + }); + + auto pipeline = buildJoinPipeline( + left_source, right_source, /* key_length = */ 2, + JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals); + + Block result_block = executePipeline(pipeline); + auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); + ASSERT_EQ(values.size(), 2); + ASSERT_EQ(values[0], (std::vector{"AMZN", 3u, 4u, 130u})); + ASSERT_EQ(values[1], (std::vector{"AMZN", 4u, 4u, 130u})); +} +catch (Exception & e) +{ + std::cout << e.getStackTraceString() << std::endl; + throw; +} + + +TEST(FullSortingJoin, AsofOnlyColumn) +try +{ + const std::vector chunk_break = {}; + + auto left_source = oneColumnSource({ {3}, {3, 3, 3}, {3, 5, 5, 6}, {9, 9}, {10, 20} }); + + UInt64 p = std::uniform_int_distribution<>(0, 2)(rng); + double break_prob = p == 0 ? 0.0 : (p == 1 ? 0.5 : 1.0); + + auto right_source = sourceFromRows({ + ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "t"), + ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "value"), + }, { + {1, 101}, + {2, 102}, + {4, 104}, + {5, 105}, + {11, 111}, + {15, 115}, + }, + break_prob); + + auto pipeline = buildJoinPipeline( + left_source, right_source, /* key_length = */ 1, + JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals); + + Block result_block = executePipeline(pipeline); + + ASSERT_EQ( + assert_cast(result_block.getByName("t1.x").column.get())->getData(), + (ColumnUInt64::Container{3, 3, 3, 3, 3, 5, 5, 6, 9, 9, 10}) + ); + + ASSERT_EQ( + assert_cast(result_block.getByName("t2.t").column.get())->getData(), + (ColumnUInt64::Container{4, 4, 4, 4, 4, 5, 5, 11, 11, 11, 15}) + ); + + ASSERT_EQ( + assert_cast(result_block.getByName("t2.value").column.get())->getData(), + (ColumnUInt64::Container{104, 104, 104, 104, 104, 105, 105, 111, 111, 111, 115}) + ); +} +catch (Exception & e) +{ + std::cout << e.getStackTraceString() << std::endl; + throw; +} diff --git a/src/Processors/tests/gtest_merge_join_algorithm.cpp b/src/Processors/tests/gtest_merge_join_algorithm.cpp deleted file mode 100644 index 9a8b70efc17..00000000000 --- a/src/Processors/tests/gtest_merge_join_algorithm.cpp +++ /dev/null @@ -1,95 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - - -using namespace DB; - - -QueryPipeline buildJoinPipeline(std::shared_ptr left_source, std::shared_ptr right_source) -{ - Blocks inputs; - inputs.emplace_back(left_source->getPort().getHeader()); - inputs.emplace_back(right_source->getPort().getHeader()); - Block out_header = { - ColumnWithTypeAndName(ColumnUInt8::create(), std::make_shared(), "t1.x"), - ColumnWithTypeAndName(ColumnUInt8::create(), std::make_shared(), "t2.x"), - }; - - TableJoin::JoinOnClause on_clause; - on_clause.key_names_left = {"x"}; - on_clause.key_names_right = {"x"}; - auto joining = std::make_shared( - JoinKind::Inner, - JoinStrictness::All, - on_clause, - inputs, out_header, /* max_block_size = */ 0); - - chassert(joining->getInputs().size() == 2); - - connect(left_source->getPort(), joining->getInputs().front()); - connect(right_source->getPort(), joining->getInputs().back()); - - auto * output_port = &joining->getOutputPort(); - - auto processors = std::make_shared(); - processors->emplace_back(std::move(left_source)); - processors->emplace_back(std::move(right_source)); - processors->emplace_back(std::move(joining)); - - QueryPipeline pipeline(QueryPlanResourceHolder{}, processors, output_port); - return pipeline; -} - - -std::shared_ptr createSourceWithSingleValue(size_t rows_per_chunk, size_t total_chunks) -{ - Block header = { - ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "x") - }; - - Chunks chunks; - - for (size_t i = 0; i < total_chunks; ++i) - { - auto col = ColumnUInt64::create(rows_per_chunk, 1); - chunks.emplace_back(Columns{std::move(col)}, rows_per_chunk); - } - - return std::make_shared(std::move(header), std::move(chunks)); -} - -TEST(FullSortingJoin, Simple) -try -{ - auto left_source = createSourceWithSingleValue(3, 10); - auto right_source = createSourceWithSingleValue(2, 15); - - auto pipeline = buildJoinPipeline(left_source, right_source); - PullingPipelineExecutor executor(pipeline); - - Block block; - - size_t total_result_rows = 0; - while (executor.pull(block)) - { - total_result_rows += block.rows(); - } - ASSERT_EQ(total_result_rows, 3 * 10 * 2 * 15); -} -catch (Exception & e) -{ - std::cout << e.getStackTraceString() << std::endl; - throw; -} From 4079f25a3865f23f23cec9c43aefe241b58e7972 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 20 Sep 2023 12:33:29 +0000 Subject: [PATCH 0112/2361] fix test --- .../tests/gtest_full_sorting_join.cpp | 35 +++++++++++++------ 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 888e280b55f..b3b462ee1e3 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -134,26 +134,37 @@ std::shared_ptr sourceFromRows( { Chunks chunks; auto columns = header.cloneEmptyColumns(); + chassert(!columns.empty()); std::uniform_real_distribution<> prob_dis(0.0, 1.0); - - for (auto row : values) + for (const auto & row : values) { - if (!columns.empty() && (row.empty() || prob_dis(rng) < break_prob)) + if (!columns.front()->empty() && (row.empty() || prob_dis(rng) < break_prob)) { size_t rows = columns.front()->size(); chunks.emplace_back(std::move(columns), rows); columns = header.cloneEmptyColumns(); - continue; + if (row.empty()) + continue; } + chassert(row.size() == columns.size()); for (size_t i = 0; i < columns.size(); ++i) columns[i]->insert(row[i]); } - if (!columns.empty()) - chunks.emplace_back(std::move(columns), columns.front()->size()); + if (!columns.front()->empty()) + { + size_t rows = columns.front()->size(); + chunks.emplace_back(std::move(columns), rows); + } + + /// Check that code above is correct. + size_t total_result_rows = 0; + for (const auto & chunk : chunks) + total_result_rows += chunk.getNumRows(); + chassert(total_result_rows == values.size()); return std::make_shared(header, std::move(chunks)); } @@ -225,9 +236,11 @@ try Block result_block = executePipeline(pipeline); auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); - ASSERT_EQ(values.size(), 2); - ASSERT_EQ(values[0], (std::vector{"AMZN", 3u, 4u, 130u})); - ASSERT_EQ(values[1], (std::vector{"AMZN", 4u, 4u, 130u})); + + ASSERT_EQ(values, (std::vector>{ + {"AMZN", 3u, 4u, 130u}, + {"AMZN", 4u, 4u, 130u}, + })); } catch (Exception & e) { @@ -272,12 +285,12 @@ try ASSERT_EQ( assert_cast(result_block.getByName("t2.t").column.get())->getData(), - (ColumnUInt64::Container{4, 4, 4, 4, 4, 5, 5, 11, 11, 11, 15}) + (ColumnUInt64::Container{4, 4, 4, 4, 4, 5, 5, 11, 11, 11, 11}) ); ASSERT_EQ( assert_cast(result_block.getByName("t2.value").column.get())->getData(), - (ColumnUInt64::Container{104, 104, 104, 104, 104, 105, 105, 111, 111, 111, 115}) + (ColumnUInt64::Container{104, 104, 104, 104, 104, 105, 105, 111, 111, 111, 111}) ); } catch (Exception & e) From 984d94e5f10e7e1c3cc953e1f5394c153e8fdedc Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 20 Sep 2023 15:46:19 +0000 Subject: [PATCH 0113/2361] upd gtest_full_sorting_join --- .../tests/gtest_full_sorting_join.cpp | 154 +++++++++--------- 1 file changed, 76 insertions(+), 78 deletions(-) diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index b3b462ee1e3..4d7ce25a7e8 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -93,7 +93,7 @@ QueryPipeline buildJoinPipeline( std::shared_ptr oneColumnSource(const std::vector> & values) { - Block header = { ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "x") }; + Block header = { ColumnWithTypeAndName(std::make_shared(), "x") }; Chunks chunks; for (const auto & chunk_values : values) { @@ -129,45 +129,46 @@ catch (Exception & e) throw; } -std::shared_ptr sourceFromRows( - const Block & header, const std::vector> & values, double break_prob = 0.0) +class SourceChunksBuilder { +public: + explicit SourceChunksBuilder(const Block & header_) + : header(header_) + { + current_chunk = header.cloneEmptyColumns(); + chassert(!current_chunk.empty()); + } + + SourceChunksBuilder & addRow(const std::vector & row) + { + chassert(row.size() == current_chunk.size()); + for (size_t i = 0; i < current_chunk.size(); ++i) + current_chunk[i]->insert(row[i]); + return *this; + } + + SourceChunksBuilder & addChunk() + { + if (current_chunk.front()->empty()) + return *this; + + size_t rows = current_chunk.front()->size(); + chunks.emplace_back(std::move(current_chunk), rows); + current_chunk = header.cloneEmptyColumns(); + return *this; + } + + std::shared_ptr build() + { + addChunk(); + return std::make_shared(header, std::move(chunks)); + } + +private: + Block header; Chunks chunks; - auto columns = header.cloneEmptyColumns(); - chassert(!columns.empty()); - - std::uniform_real_distribution<> prob_dis(0.0, 1.0); - - for (const auto & row : values) - { - if (!columns.front()->empty() && (row.empty() || prob_dis(rng) < break_prob)) - { - size_t rows = columns.front()->size(); - chunks.emplace_back(std::move(columns), rows); - columns = header.cloneEmptyColumns(); - if (row.empty()) - continue; - } - - chassert(row.size() == columns.size()); - for (size_t i = 0; i < columns.size(); ++i) - columns[i]->insert(row[i]); - } - - if (!columns.front()->empty()) - { - size_t rows = columns.front()->size(); - chunks.emplace_back(std::move(columns), rows); - } - - /// Check that code above is correct. - size_t total_result_rows = 0; - for (const auto & chunk : chunks) - total_result_rows += chunk.getNumRows(); - chassert(total_result_rows == values.size()); - - return std::make_shared(header, std::move(chunks)); -} + MutableColumns current_chunk; +}; std::vector> getValuesFromBlock(const Block & block, const Names & names) @@ -203,32 +204,30 @@ Block executePipeline(QueryPipeline & pipeline) TEST(FullSortingJoin, Asof) try { - const std::vector chunk_break = {}; + auto left_source = SourceChunksBuilder({ + {std::make_shared(), "key"}, + {std::make_shared(), "t"}, + }) + .addRow({"AMZN", 3}) + .addRow({"AMZN", 4}) + .addRow({"AMZN", 6}) + .build(); - auto left_source = sourceFromRows({ - ColumnWithTypeAndName(ColumnString::create(), std::make_shared(), "key"), - ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "t"), - }, { - {"AMZN", 3}, - {"AMZN", 4}, - {"AMZN", 6}, - }); - - auto right_source = sourceFromRows({ - ColumnWithTypeAndName(ColumnString::create(), std::make_shared(), "key"), - ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "t"), - ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "value"), - }, { - {"AAPL", 1, 97}, - chunk_break, - {"AAPL", 2, 98}, - {"AAPL", 3, 99}, - {"AMZN", 1, 100}, - {"AMZN", 2, 110}, - chunk_break, - {"AMZN", 4, 130}, - {"AMZN", 5, 140}, - }); + auto right_source = SourceChunksBuilder({ + {std::make_shared(), "key"}, + {std::make_shared(), "t"}, + {std::make_shared(), "value"}, + }) + .addRow({"AAPL", 1, 97}) + .addChunk() + .addRow({"AAPL", 2, 98}) + .addRow({"AAPL", 3, 99}) + .addRow({"AMZN", 1, 100}) + .addRow({"AMZN", 2, 110}) + .addChunk() + .addRow({"AMZN", 4, 130}) + .addRow({"AMZN", 5, 140}) + .build(); auto pipeline = buildJoinPipeline( left_source, right_source, /* key_length = */ 2, @@ -252,25 +251,24 @@ catch (Exception & e) TEST(FullSortingJoin, AsofOnlyColumn) try { - const std::vector chunk_break = {}; - auto left_source = oneColumnSource({ {3}, {3, 3, 3}, {3, 5, 5, 6}, {9, 9}, {10, 20} }); UInt64 p = std::uniform_int_distribution<>(0, 2)(rng); - double break_prob = p == 0 ? 0.0 : (p == 1 ? 0.5 : 1.0); - auto right_source = sourceFromRows({ - ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "t"), - ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared(), "value"), - }, { - {1, 101}, - {2, 102}, - {4, 104}, - {5, 105}, - {11, 111}, - {15, 115}, - }, - break_prob); + SourceChunksBuilder right_source_builder({ + {std::make_shared(), "t"}, + {std::make_shared(), "value"}, + }); + + double break_prob = p == 0 ? 0.0 : (p == 1 ? 0.5 : 1.0); + std::uniform_real_distribution<> prob_dis(0.0, 1.0); + for (const auto & row : std::vector>{ {1, 101}, {2, 102}, {4, 104}, {5, 105}, {11, 111}, {15, 115} }) + { + right_source_builder.addRow(row); + if (prob_dis(rng) < break_prob) + right_source_builder.addChunk(); + } + auto right_source = right_source_builder.build(); auto pipeline = buildJoinPipeline( left_source, right_source, /* key_length = */ 1, From 6330b466aa326e40eb7abca2699554c98241b342 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 21 Sep 2023 11:19:20 +0000 Subject: [PATCH 0114/2361] Add randomized test FullSortingJoin.AsofGeneratedTestData --- src/Common/getRandomASCIIString.cpp | 7 +- src/Common/getRandomASCIIString.h | 3 + .../Transforms/MergeJoinTransform.cpp | 2 +- .../tests/gtest_full_sorting_join.cpp | 121 +++++++++++++++--- 4 files changed, 116 insertions(+), 17 deletions(-) diff --git a/src/Common/getRandomASCIIString.cpp b/src/Common/getRandomASCIIString.cpp index 594b4cd3228..a295277b453 100644 --- a/src/Common/getRandomASCIIString.cpp +++ b/src/Common/getRandomASCIIString.cpp @@ -6,12 +6,17 @@ namespace DB { String getRandomASCIIString(size_t length) +{ + return getRandomASCIIString(length, thread_local_rng); +} + +String getRandomASCIIString(size_t length, pcg64 & rng) { std::uniform_int_distribution distribution('a', 'z'); String res; res.resize(length); for (auto & c : res) - c = distribution(thread_local_rng); + c = distribution(rng); return res; } diff --git a/src/Common/getRandomASCIIString.h b/src/Common/getRandomASCIIString.h index 627d2700ce3..19e1ff7120e 100644 --- a/src/Common/getRandomASCIIString.h +++ b/src/Common/getRandomASCIIString.h @@ -2,11 +2,14 @@ #include +#include + namespace DB { /// Slow random string. Useful for random names and things like this. Not for generating data. String getRandomASCIIString(size_t length); +String getRandomASCIIString(size_t length, pcg64 & rng); } diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 8370e548fcb..bfde5892289 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -874,7 +874,7 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() for (auto & col : rcols) result.addColumn(std::move(col)); } - UNUSED(asof_inequality); + return Status(std::move(result)); } diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 4d7ce25a7e8..741e945bfdb 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -1,30 +1,31 @@ #include - -#include #include #include -#include -#include -#include -#include -#include -#include - #include +#include +#include + #include + #include +#include +#include +#include +#include +#include #include +#include using namespace DB; UInt64 getAndPrintRandomSeed() { UInt64 seed = randomSeed(); - std::cerr << "TEST_RANDOM_SEED: " << seed << std::endl; + std::cerr << __FILE__ << "::" << "TEST_RANDOM_SEED = " << seed << "ull" << std::endl; return seed; } @@ -132,6 +133,8 @@ catch (Exception & e) class SourceChunksBuilder { public: + double break_prob = 0.0; + explicit SourceChunksBuilder(const Block & header_) : header(header_) { @@ -144,6 +147,10 @@ public: chassert(row.size() == current_chunk.size()); for (size_t i = 0; i < current_chunk.size(); ++i) current_chunk[i]->insert(row[i]); + + if (break_prob > 0.0 && std::uniform_real_distribution<>(0.0, 1.0)(rng) < break_prob) + addChunk(); + return *this; } @@ -184,7 +191,7 @@ std::vector> getValuesFromBlock(const Block & block, const Na } -Block executePipeline(QueryPipeline & pipeline) +Block executePipeline(QueryPipeline && pipeline) { PullingPipelineExecutor executor(pipeline); @@ -233,7 +240,7 @@ try left_source, right_source, /* key_length = */ 2, JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals); - Block result_block = executePipeline(pipeline); + Block result_block = executePipeline(std::move(pipeline)); auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); ASSERT_EQ(values, (std::vector>{ @@ -253,13 +260,12 @@ try { auto left_source = oneColumnSource({ {3}, {3, 3, 3}, {3, 5, 5, 6}, {9, 9}, {10, 20} }); - UInt64 p = std::uniform_int_distribution<>(0, 2)(rng); - SourceChunksBuilder right_source_builder({ {std::make_shared(), "t"}, {std::make_shared(), "value"}, }); + UInt64 p = std::uniform_int_distribution<>(0, 2)(rng); double break_prob = p == 0 ? 0.0 : (p == 1 ? 0.5 : 1.0); std::uniform_real_distribution<> prob_dis(0.0, 1.0); for (const auto & row : std::vector>{ {1, 101}, {2, 102}, {4, 104}, {5, 105}, {11, 111}, {15, 115} }) @@ -274,7 +280,7 @@ try left_source, right_source, /* key_length = */ 1, JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals); - Block result_block = executePipeline(pipeline); + Block result_block = executePipeline(std::move(pipeline)); ASSERT_EQ( assert_cast(result_block.getByName("t1.x").column.get())->getData(), @@ -296,3 +302,88 @@ catch (Exception & e) std::cout << e.getStackTraceString() << std::endl; throw; } + +TEST(FullSortingJoin, AsofGeneratedTestData) +try +{ + auto left_source_builder = SourceChunksBuilder({ + {std::make_shared(), "k1"}, + {std::make_shared(), "k2"}, + {std::make_shared(), "t"}, + {std::make_shared(), "attr"}, + }); + + auto right_source_builder = SourceChunksBuilder({ + {std::make_shared(), "k1"}, + {std::make_shared(), "k2"}, + {std::make_shared(), "t"}, + {std::make_shared(), "attr"}, + }); + + /// uniform_int_distribution to have 0.0 and 1.0 probabilities + left_source_builder.break_prob = std::uniform_int_distribution<>(0, 5)(rng) / 5.0; + right_source_builder.break_prob = std::uniform_int_distribution<>(0, 5)(rng) / 5.0; + + auto get_next_key = [](UInt64 & k1, String & k2) + { + size_t str_len = std::uniform_int_distribution<>(1, 10)(rng); + String new_k2 = getRandomASCIIString(str_len, rng); + if (new_k2.compare(k2) <= 0) + ++k1; + k2 = new_k2; + }; + + ColumnUInt64::Container expected; + + UInt64 k1 = 0; + String k2 = "asdfg"; + auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); + for (size_t key_num = 0; key_num < key_num_total; ++key_num) + { + UInt64 left_t = 0; + size_t num_left_rows = std::uniform_int_distribution<>(1, 100)(rng); + for (size_t i = 0; i < num_left_rows; ++i) + { + left_t += std::uniform_int_distribution<>(1, 10)(rng); + + left_source_builder.addRow({k1, k2, left_t, 10 * left_t}); + expected.push_back(10 * left_t); + + auto num_matches = 1 + std::poisson_distribution<>(4)(rng); + + size_t right_t = left_t; + for (size_t j = 0; j < num_matches; ++j) + { + right_t += std::uniform_int_distribution<>(0, 3)(rng); + right_source_builder.addRow({k1, k2, right_t, j == 0 ? 100 * left_t : 0}); + } + /// next left_t should be greater than right_t not to match with previous rows + left_t = right_t; + } + + /// generate some rows with greater left_t to check that they are not matched + num_left_rows = std::uniform_int_distribution<>(1, 100)(rng); + for (size_t i = 0; i < num_left_rows; ++i) + { + left_t += std::uniform_int_distribution<>(1, 10)(rng); + left_source_builder.addRow({k1, k2, left_t, 10 * left_t}); + } + + get_next_key(k1, k2); + } + + Block result_block = executePipeline(buildJoinPipeline( + left_source_builder.build(), right_source_builder.build(), + /* key_length = */ 3, + JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals)); + + ASSERT_EQ(assert_cast(block.getByName("t1.attr").column.get())->getData(), expected); + + for (auto & e : expected) + e = 10 * e; + ASSERT_EQ(assert_cast(block.getByName("t2.attr").column.get())->getData(), expected); +} +catch (Exception & e) { + std::cout << e.getStackTraceString() << std::endl; + throw; +} From da4f35556100847d527f194cee4b2f99b50bfa58 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 21 Sep 2023 11:38:37 +0000 Subject: [PATCH 0115/2361] upd FullSortingJoin.AsofGeneratedTestData --- .../tests/gtest_full_sorting_join.cpp | 56 +++++++++++++++---- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 741e945bfdb..49c9f58b1d5 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -208,6 +208,14 @@ Block executePipeline(QueryPipeline && pipeline) return concatenateBlocks(result_blocks); } +template +void checkColumn(const typename ColumnVector::Container & expected, const Block & block, const std::string & name) +{ + const auto & actual = assert_cast *>(block.getByName(name).column.get())->getData(); + EXPECT_EQ(actual.size(), expected.size()); + ASSERT_EQ(actual, expected); +} + TEST(FullSortingJoin, Asof) try { @@ -306,18 +314,27 @@ catch (Exception & e) TEST(FullSortingJoin, AsofGeneratedTestData) try { + std::vector join_kinds = {JoinKind::Inner, JoinKind::Left}; + auto join_kind = join_kinds[std::uniform_int_distribution(0, join_kinds.size() - 1)(rng)]; + + std::vector asof_inequalities = { + ASOFJoinInequality::Less, ASOFJoinInequality::LessOrEquals, + // ASOFJoinInequality::Greater, ASOFJoinInequality::GreaterOrEquals, + }; + auto asof_inequality = asof_inequalities[std::uniform_int_distribution(0, asof_inequalities.size() - 1)(rng)]; + auto left_source_builder = SourceChunksBuilder({ {std::make_shared(), "k1"}, {std::make_shared(), "k2"}, {std::make_shared(), "t"}, - {std::make_shared(), "attr"}, + {std::make_shared(), "attr"}, }); auto right_source_builder = SourceChunksBuilder({ {std::make_shared(), "k1"}, {std::make_shared(), "k2"}, {std::make_shared(), "t"}, - {std::make_shared(), "attr"}, + {std::make_shared(), "attr"}, }); /// uniform_int_distribution to have 0.0 and 1.0 probabilities @@ -333,14 +350,14 @@ try k2 = new_k2; }; - ColumnUInt64::Container expected; + ColumnInt64::Container expected; UInt64 k1 = 0; String k2 = "asdfg"; auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); for (size_t key_num = 0; key_num < key_num_total; ++key_num) { - UInt64 left_t = 0; + Int64 left_t = 0; size_t num_left_rows = std::uniform_int_distribution<>(1, 100)(rng); for (size_t i = 0; i < num_left_rows; ++i) { @@ -351,11 +368,22 @@ try auto num_matches = 1 + std::poisson_distribution<>(4)(rng); - size_t right_t = left_t; + auto right_t = left_t; for (size_t j = 0; j < num_matches; ++j) { - right_t += std::uniform_int_distribution<>(0, 3)(rng); - right_source_builder.addRow({k1, k2, right_t, j == 0 ? 100 * left_t : 0}); + int min_step = 1; + if (asof_inequality == ASOFJoinInequality::LessOrEquals || asof_inequality == ASOFJoinInequality::GreaterOrEquals) + min_step = 0; + right_t += std::uniform_int_distribution<>(min_step, 3)(rng); + + bool is_match = false; + + if (asof_inequality == ASOFJoinInequality::LessOrEquals || asof_inequality == ASOFJoinInequality::Less) + is_match = j == 0; + else if (asof_inequality == ASOFJoinInequality::GreaterOrEquals || asof_inequality == ASOFJoinInequality::Greater) + is_match = j == num_matches - 1; + + right_source_builder.addRow({k1, k2, right_t, is_match ? 100 * left_t : -1}); } /// next left_t should be greater than right_t not to match with previous rows left_t = right_t; @@ -366,7 +394,10 @@ try for (size_t i = 0; i < num_left_rows; ++i) { left_t += std::uniform_int_distribution<>(1, 10)(rng); - left_source_builder.addRow({k1, k2, left_t, 10 * left_t}); + left_source_builder.addRow({k1, k2, left_t, -10 * left_t}); + + if (join_kind == JoinKind::Left) + expected.push_back(-10 * left_t); } get_next_key(k1, k2); @@ -375,13 +406,14 @@ try Block result_block = executePipeline(buildJoinPipeline( left_source_builder.build(), right_source_builder.build(), /* key_length = */ 3, - JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals)); + join_kind, JoinStrictness::Asof, asof_inequality)); - ASSERT_EQ(assert_cast(block.getByName("t1.attr").column.get())->getData(), expected); + checkColumn(expected, result_block, "t1.attr"); for (auto & e : expected) - e = 10 * e; - ASSERT_EQ(assert_cast(block.getByName("t2.attr").column.get())->getData(), expected); + e = e < 0 ? 0 : 10 * e; /// non matched rows from left table have negative attr + + checkColumn(expected, result_block, "t2.attr"); } catch (Exception & e) { std::cout << e.getStackTraceString() << std::endl; From 97e3ee6e661620187bf115d948115936e36cdaf4 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 21 Sep 2023 11:40:41 +0000 Subject: [PATCH 0116/2361] upd gtest_full_sorting_join --- .../tests/gtest_full_sorting_join.cpp | 92 +++++++++++-------- 1 file changed, 54 insertions(+), 38 deletions(-) diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 49c9f58b1d5..bbd321a78d7 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -142,7 +142,7 @@ public: chassert(!current_chunk.empty()); } - SourceChunksBuilder & addRow(const std::vector & row) + void addRow(const std::vector & row) { chassert(row.size() == current_chunk.size()); for (size_t i = 0; i < current_chunk.size(); ++i) @@ -150,25 +150,29 @@ public: if (break_prob > 0.0 && std::uniform_real_distribution<>(0.0, 1.0)(rng) < break_prob) addChunk(); - - return *this; } - SourceChunksBuilder & addChunk() + void addChunk() { if (current_chunk.front()->empty()) - return *this; + return; size_t rows = current_chunk.front()->size(); chunks.emplace_back(std::move(current_chunk), rows); current_chunk = header.cloneEmptyColumns(); - return *this; + return; } std::shared_ptr build() { addChunk(); - return std::make_shared(header, std::move(chunks)); + + /// copy chunk to allow reusing same builder + Chunks chunks_copy; + chunks_copy.reserve(chunks.size()); + for (const auto & chunk : chunks) + chunks_copy.emplace_back(chunk.clone()); + return std::make_shared(header, std::move(chunks_copy)); } private: @@ -219,42 +223,54 @@ void checkColumn(const typename ColumnVector::Container & expected, const Blo TEST(FullSortingJoin, Asof) try { - auto left_source = SourceChunksBuilder({ + SourceChunksBuilder left_source({ {std::make_shared(), "key"}, {std::make_shared(), "t"}, - }) - .addRow({"AMZN", 3}) - .addRow({"AMZN", 4}) - .addRow({"AMZN", 6}) - .build(); + }); - auto right_source = SourceChunksBuilder({ + left_source.addRow({"AMZN", 3}); + left_source.addRow({"AMZN", 4}); + left_source.addRow({"AMZN", 6}); + + SourceChunksBuilder right_source({ {std::make_shared(), "key"}, {std::make_shared(), "t"}, {std::make_shared(), "value"}, - }) - .addRow({"AAPL", 1, 97}) - .addChunk() - .addRow({"AAPL", 2, 98}) - .addRow({"AAPL", 3, 99}) - .addRow({"AMZN", 1, 100}) - .addRow({"AMZN", 2, 110}) - .addChunk() - .addRow({"AMZN", 4, 130}) - .addRow({"AMZN", 5, 140}) - .build(); + }); + right_source.addRow({"AAPL", 1, 97}); + right_source.addChunk(); + right_source.addRow({"AAPL", 2, 98}); + right_source.addRow({"AAPL", 3, 99}); + right_source.addRow({"AMZN", 1, 100}); + right_source.addRow({"AMZN", 2, 110}); + right_source.addChunk(); + right_source.addRow({"AMZN", 4, 130}); + right_source.addRow({"AMZN", 5, 140}); - auto pipeline = buildJoinPipeline( - left_source, right_source, /* key_length = */ 2, - JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals); + { + Block result_block = executePipeline(buildJoinPipeline( + left_source.build(), right_source.build(), /* key_length = */ 2, + JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals)); + auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); - Block result_block = executePipeline(std::move(pipeline)); - auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); + ASSERT_EQ(values, (std::vector>{ + {"AMZN", 3u, 4u, 130u}, + {"AMZN", 4u, 4u, 130u}, + })); + } - ASSERT_EQ(values, (std::vector>{ - {"AMZN", 3u, 4u, 130u}, - {"AMZN", 4u, 4u, 130u}, - })); + { + Block result_block = executePipeline(buildJoinPipeline( + left_source.build(), right_source.build(), /* key_length = */ 2, + JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::GreaterOrEquals)); + auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); + + ASSERT_EQ(values, (std::vector>{ + {"AMZN", 3u, 2u, 110u}, + {"AMZN", 4u, 4u, 130u}, + {"AMZN", 6u, 5u, 140u}, + })); + } } catch (Exception & e) { @@ -314,23 +330,23 @@ catch (Exception & e) TEST(FullSortingJoin, AsofGeneratedTestData) try { - std::vector join_kinds = {JoinKind::Inner, JoinKind::Left}; + std::array join_kinds{JoinKind::Inner, JoinKind::Left}; auto join_kind = join_kinds[std::uniform_int_distribution(0, join_kinds.size() - 1)(rng)]; - std::vector asof_inequalities = { + std::array asof_inequalities{ ASOFJoinInequality::Less, ASOFJoinInequality::LessOrEquals, // ASOFJoinInequality::Greater, ASOFJoinInequality::GreaterOrEquals, }; auto asof_inequality = asof_inequalities[std::uniform_int_distribution(0, asof_inequalities.size() - 1)(rng)]; - auto left_source_builder = SourceChunksBuilder({ + SourceChunksBuilder left_source_builder({ {std::make_shared(), "k1"}, {std::make_shared(), "k2"}, {std::make_shared(), "t"}, {std::make_shared(), "attr"}, }); - auto right_source_builder = SourceChunksBuilder({ + SourceChunksBuilder right_source_builder({ {std::make_shared(), "k1"}, {std::make_shared(), "k2"}, {std::make_shared(), "t"}, From 4a1a7d4c6278246e097cf0583b401aa8ac3775fa Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 25 Sep 2023 10:54:08 +0000 Subject: [PATCH 0117/2361] Add randomized test FullSortingJoin.Any --- .../Transforms/MergeJoinTransform.cpp | 28 ++- .../Transforms/MergeJoinTransform.h | 5 + .../tests/gtest_full_sorting_join.cpp | 181 ++++++++++++++---- 3 files changed, 166 insertions(+), 48 deletions(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index bfde5892289..f5e277ea8c8 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -630,7 +630,7 @@ struct AnyJoinImpl FullMergeJoinCursor & right_cursor, PaddedPODArray & left_map, PaddedPODArray & right_map, - AnyJoinState & state, + AnyJoinState & any_join_state, int null_direction_hint) { assert(enabled); @@ -691,21 +691,21 @@ struct AnyJoinImpl } } - /// Remember index of last joined row to propagate it to next block + /// Remember last joined row to propagate it to next block - state.setValue({}); + any_join_state.setValue({}); if (!left_cursor->isValid()) { - state.set(0, left_cursor.cursor); + any_join_state.set(0, left_cursor.cursor); if (cmp == 0 && isLeft(kind)) - state.setValue(getRowFromChunk(right_cursor.getCurrent(), rpos)); + any_join_state.setValue(getRowFromChunk(right_cursor.getCurrent(), rpos)); } if (!right_cursor->isValid()) { - state.set(1, right_cursor.cursor); + any_join_state.set(1, right_cursor.cursor); if (cmp == 0 && isRight(kind)) - state.setValue(getRowFromChunk(left_cursor.getCurrent(), lpos)); + any_join_state.setValue(getRowFromChunk(left_cursor.getCurrent(), lpos)); } } }; @@ -720,7 +720,6 @@ std::optional MergeJoinAlgorithm::handleAnyJoinState for (size_t source_num = 0; source_num < 2; ++source_num) { auto & current = *cursors[source_num]; - auto & state = any_join_state; if (any_join_state.keys[source_num].equals(current.cursor)) { size_t start_pos = current->getRow(); @@ -728,25 +727,22 @@ std::optional MergeJoinAlgorithm::handleAnyJoinState if (length && isLeft(kind) && source_num == 0) { - if (state.value) - result = copyChunkResized(current.getCurrent(), state.value, start_pos, length); + if (any_join_state.value) + result = copyChunkResized(current.getCurrent(), any_join_state.value, start_pos, length); else result = createBlockWithDefaults(source_num, start_pos, length); } if (length && isRight(kind) && source_num == 1) { - if (state.value) - result = copyChunkResized(state.value, current.getCurrent(), start_pos, length); + if (any_join_state.value) + result = copyChunkResized(any_join_state.value, current.getCurrent(), start_pos, length); else result = createBlockWithDefaults(source_num, start_pos, length); } - /// We've found row with other key, no need to skip more rows with current key if (current->isValid()) - { - state.keys[source_num].reset(); - } + any_join_state.keys[source_num].reset(); } else { diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index 3ee01e57992..375c9ebd3cc 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -85,6 +85,11 @@ public: keys[source_num] = JoinKeyRow(cursor, cursor.rows - 1); } + void reset(size_t source_num) + { + keys[source_num].reset(); + } + void setValue(Chunk value_) { value = std::move(value_); } bool empty() const { return keys[0].row.empty() && keys[1].row.empty(); } diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index bbd321a78d7..1dc410bd6b8 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -213,13 +213,142 @@ Block executePipeline(QueryPipeline && pipeline) } template -void checkColumn(const typename ColumnVector::Container & expected, const Block & block, const std::string & name) +void assertColumnVectorEq(const typename ColumnVector::Container & expected, const Block & block, const std::string & name) { const auto & actual = assert_cast *>(block.getByName(name).column.get())->getData(); EXPECT_EQ(actual.size(), expected.size()); ASSERT_EQ(actual, expected); } +template +void assertColumnEq(const IColumn & expected, const Block & block, const std::string & name) +{ + const ColumnPtr & actual = block.getByName(name).column; + ASSERT_TRUE(checkColumn(*actual)); + ASSERT_TRUE(checkColumn(expected)); + EXPECT_EQ(actual->size(), expected.size()); + + auto dump_val = [](const IColumn & col, size_t i) -> String + { + Field value; + col.get(i, value); + return value.dump(); + }; + + size_t num_rows = std::min(actual->size(), expected.size()); + for (size_t i = 0; i < num_rows; ++i) + ASSERT_EQ(actual->compareAt(i, i, expected, 1), 0) << dump_val(*actual, i) << " != " << dump_val(expected, i) << " at row " << i; +} + +template +T getRandomFrom(const std::initializer_list & opts) +{ + std::vector options(opts.begin(), opts.end()); + size_t idx = std::uniform_int_distribution(0, options.size() - 1)(rng); + return options[idx]; +} + +/// Used to have accurate 0.0 and 1.0 probabilities +double getRandomDoubleQuantized(size_t quants = 5) +{ + return std::uniform_int_distribution(0, quants)(rng) / static_cast(quants); +} + +void generateNextKey(UInt64 & k1, String & k2) +{ + size_t str_len = std::uniform_int_distribution<>(1, 10)(rng); + String new_k2 = getRandomASCIIString(str_len, rng); + if (new_k2.compare(k2) <= 0) + ++k1; + k2 = new_k2; +} + +TEST(FullSortingJoin, Any) +try +{ + JoinKind kind = getRandomFrom({JoinKind::Inner, JoinKind::Left, JoinKind::Right}); + + SourceChunksBuilder left_source({ + {std::make_shared(), "k1"}, + {std::make_shared(), "k2"}, + {std::make_shared(), "attr"}, + }); + + SourceChunksBuilder right_source({ + {std::make_shared(), "k1"}, + {std::make_shared(), "k2"}, + {std::make_shared(), "attr"}, + }); + + left_source.break_prob = getRandomDoubleQuantized(); + right_source.break_prob = getRandomDoubleQuantized(); + + size_t num_keys = std::uniform_int_distribution<>(100, 1000)(rng); + + auto expected_left = ColumnString::create(); + auto expected_right = ColumnString::create(); + + UInt64 k1 = 0; + String k2 = ""; + + auto get_attr = [&](const String & side, size_t idx) -> String + { + return toString(k1) + "_" + k2 + "_" + side + "_" + toString(idx); + }; + + for (size_t i = 0; i < num_keys; ++i) + { + generateNextKey(k1, k2); + + /// Key is present in left, right or both tables. Both tables is more probable. + size_t key_presence = std::uniform_int_distribution<>(0, 10)(rng); + + size_t num_rows_left = key_presence == 0 ? 0 : std::uniform_int_distribution<>(1, 10)(rng); + for (size_t j = 0; j < num_rows_left; ++j) + left_source.addRow({k1, k2, get_attr("left", j)}); + + size_t num_rows_right = key_presence == 1 ? 0 : std::uniform_int_distribution<>(1, 10)(rng); + for (size_t j = 0; j < num_rows_right; ++j) + right_source.addRow({k1, k2, get_attr("right", j)}); + + String left_attr = num_rows_left ? get_attr("left", 0) : ""; + String right_attr = num_rows_right ? get_attr("right", 0) : ""; + + if (kind == JoinKind::Inner && num_rows_left && num_rows_right) + { + expected_left->insert(left_attr); + expected_right->insert(right_attr); + } + else if (kind == JoinKind::Left) + { + for (size_t j = 0; j < num_rows_left; ++j) + { + expected_left->insert(get_attr("left", j)); + expected_right->insert(right_attr); + } + } + else if (kind == JoinKind::Right) + { + for (size_t j = 0; j < num_rows_right; ++j) + { + expected_left->insert(left_attr); + expected_right->insert(get_attr("right", j)); + } + } + } + + Block result_block = executePipeline(buildJoinPipeline( + left_source.build(), right_source.build(), /* key_length = */ 2, + kind, JoinStrictness::Any)); + assertColumnEq(*expected_left, result_block, "t1.attr"); + assertColumnEq(*expected_right, result_block, "t2.attr"); +} +catch (Exception & e) +{ + std::cout << e.getStackTraceString() << std::endl; + throw; +} + TEST(FullSortingJoin, Asof) try { @@ -259,18 +388,18 @@ try })); } - { - Block result_block = executePipeline(buildJoinPipeline( - left_source.build(), right_source.build(), /* key_length = */ 2, - JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::GreaterOrEquals)); - auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); + // { + // Block result_block = executePipeline(buildJoinPipeline( + // left_source.build(), right_source.build(), /* key_length = */ 2, + // JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::GreaterOrEquals)); + // auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); - ASSERT_EQ(values, (std::vector>{ - {"AMZN", 3u, 2u, 110u}, - {"AMZN", 4u, 4u, 130u}, - {"AMZN", 6u, 5u, 140u}, - })); - } + // ASSERT_EQ(values, (std::vector>{ + // {"AMZN", 3u, 2u, 110u}, + // {"AMZN", 4u, 4u, 130u}, + // {"AMZN", 6u, 5u, 140u}, + // })); + // } } catch (Exception & e) { @@ -330,14 +459,12 @@ catch (Exception & e) TEST(FullSortingJoin, AsofGeneratedTestData) try { - std::array join_kinds{JoinKind::Inner, JoinKind::Left}; - auto join_kind = join_kinds[std::uniform_int_distribution(0, join_kinds.size() - 1)(rng)]; + auto join_kind = getRandomFrom({JoinKind::Inner, JoinKind::Left}); - std::array asof_inequalities{ + auto asof_inequality = getRandomFrom({ ASOFJoinInequality::Less, ASOFJoinInequality::LessOrEquals, // ASOFJoinInequality::Greater, ASOFJoinInequality::GreaterOrEquals, - }; - auto asof_inequality = asof_inequalities[std::uniform_int_distribution(0, asof_inequalities.size() - 1)(rng)]; + }); SourceChunksBuilder left_source_builder({ {std::make_shared(), "k1"}, @@ -353,18 +480,8 @@ try {std::make_shared(), "attr"}, }); - /// uniform_int_distribution to have 0.0 and 1.0 probabilities - left_source_builder.break_prob = std::uniform_int_distribution<>(0, 5)(rng) / 5.0; - right_source_builder.break_prob = std::uniform_int_distribution<>(0, 5)(rng) / 5.0; - - auto get_next_key = [](UInt64 & k1, String & k2) - { - size_t str_len = std::uniform_int_distribution<>(1, 10)(rng); - String new_k2 = getRandomASCIIString(str_len, rng); - if (new_k2.compare(k2) <= 0) - ++k1; - k2 = new_k2; - }; + left_source_builder.break_prob = getRandomDoubleQuantized(); + right_source_builder.break_prob = getRandomDoubleQuantized(); ColumnInt64::Container expected; @@ -416,7 +533,7 @@ try expected.push_back(-10 * left_t); } - get_next_key(k1, k2); + generateNextKey(k1, k2); } Block result_block = executePipeline(buildJoinPipeline( @@ -424,12 +541,12 @@ try /* key_length = */ 3, join_kind, JoinStrictness::Asof, asof_inequality)); - checkColumn(expected, result_block, "t1.attr"); + assertColumnVectorEq(expected, result_block, "t1.attr"); for (auto & e : expected) e = e < 0 ? 0 : 10 * e; /// non matched rows from left table have negative attr - checkColumn(expected, result_block, "t2.attr"); + assertColumnVectorEq(expected, result_block, "t2.attr"); } catch (Exception & e) { std::cout << e.getStackTraceString() << std::endl; From 7e0c2d7bcb88f6d8575fc1f0e288e94f13456f87 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 26 Sep 2023 12:25:12 +0000 Subject: [PATCH 0118/2361] wip full sorting asof join --- .../Transforms/MergeJoinTransform.cpp | 300 +++++++++++++-- .../Transforms/MergeJoinTransform.h | 89 ++--- .../tests/gtest_full_sorting_join.cpp | 346 ++++++++++++++---- .../02276_full_sort_join_unsupported.sql | 2 +- 4 files changed, 575 insertions(+), 162 deletions(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index f5e277ea8c8..638f2f1cb10 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -34,8 +34,6 @@ namespace ErrorCodes namespace { -constexpr UInt64 DEFAULT_VALUE_INDEX = std::numeric_limits::max(); - FullMergeJoinCursorPtr createCursor(const Block & block, const Names & columns, JoinStrictness strictness) { SortDescription desc; @@ -112,7 +110,7 @@ int ALWAYS_INLINE compareCursors(const SortCursorImpl & lhs, const SortCursorImp int compareAsofCursors(const FullMergeJoinCursor & lhs, const FullMergeJoinCursor & rhs) { - return nullableCompareAt(lhs.getAsofColumn(), rhs.getAsofColumn(), lhs->getRow(), rhs->getRow()); + return nullableCompareAt(*lhs.getAsofColumn(), *rhs.getAsofColumn(), lhs->getRow(), rhs->getRow()); } bool ALWAYS_INLINE totallyLess(SortCursorImpl & lhs, SortCursorImpl & rhs, int null_direction_hint) @@ -250,6 +248,87 @@ void inline addMany(PaddedPODArray & values, UInt64 value, size_t num) } +JoinKeyRow::JoinKeyRow(const FullMergeJoinCursor & cursor, size_t pos) +{ + row.reserve(cursor->sort_columns.size()); + for (const auto & col : cursor->sort_columns) + { + auto new_col = col->cloneEmpty(); + new_col->insertFrom(*col, pos); + row.push_back(std::move(new_col)); + } + if (auto asof_column = cursor.getAsofColumn()) + { + auto new_col = asof_column->cloneEmpty(); + new_col->insertFrom(*asof_column, pos); + row.push_back(std::move(new_col)); + } +} + +void JoinKeyRow::reset() +{ + row.clear(); +} + +bool JoinKeyRow::equals(const FullMergeJoinCursor & cursor) const +{ + if (row.empty()) + return false; + + assert(this->row.size() == cursor->sort_columns_size); + for (size_t i = 0; i < cursor->sort_columns_size; ++i) + { + int cmp = this->row[i]->compareAt(0, cursor->getRow(), *(cursor->sort_columns[i]), cursor->desc[i].nulls_direction); + if (cmp != 0) + return false; + } + return true; +} + +bool JoinKeyRow::asofMatch(const FullMergeJoinCursor & cursor, ASOFJoinInequality asof_inequality) const +{ + if (!equals(cursor)) + return false; + int cmp = cursor.getAsofColumn()->compareAt(cursor->getRow(), 0, *row.back(), 1); + return (asof_inequality == ASOFJoinInequality::Less && cmp < 0) + || (asof_inequality == ASOFJoinInequality::LessOrEquals && cmp <= 0) + || (asof_inequality == ASOFJoinInequality::Greater && cmp > 0) + || (asof_inequality == ASOFJoinInequality::GreaterOrEquals && cmp >= 0); +} + +void AnyJoinState::set(size_t source_num, const FullMergeJoinCursor & cursor) +{ + assert(cursor->rows); + keys[source_num] = JoinKeyRow(cursor, cursor->rows - 1); +} + +void AnyJoinState::reset(size_t source_num) +{ + keys[source_num].reset(); + value.clear(); +} + +void AnyJoinState::setValue(Chunk value_) +{ + value = std::move(value_); +} + +bool AnyJoinState::empty() const { return keys[0].row.empty() && keys[1].row.empty(); } + + +void AsofJoinState::set(const FullMergeJoinCursor & rcursor, size_t rpos) +{ + key = JoinKeyRow(rcursor, rpos); + value = rcursor.getCurrent().clone(); + value_row = rpos; +} + +void AsofJoinState::reset() +{ + key.reset(); + value.clear(); +} + const Chunk & FullMergeJoinCursor::getCurrent() const { return current_chunk; @@ -282,6 +361,31 @@ bool FullMergeJoinCursor::fullyCompleted() const return !cursor.isValid() && recieved_all_blocks; } +String FullMergeJoinCursor::dump() const +{ + Strings row_dump; + if (cursor.isValid()) + { + Field val; + for (size_t i = 0; i < cursor.sort_columns_size; ++i) + { + cursor.sort_columns[i]->get(cursor.getRow(), val); + row_dump.push_back(val.dump()); + } + + if (auto * asof_column = getAsofColumn()) + { + asof_column->get(cursor.getRow(), val); + row_dump.push_back(val.dump()); + } + } + + return fmt::format("<{}/{}{}>[{}]", + cursor.getRow(), cursor.rows, + recieved_all_blocks ? "(finished)" : "", + fmt::join(row_dump, ", ")); +} + MergeJoinAlgorithm::MergeJoinAlgorithm( JoinKind kind_, JoinStrictness strictness_, @@ -456,7 +560,7 @@ struct AllJoinImpl else { assert(state == nullptr); - state = std::make_unique(left_cursor.cursor, lpos, right_cursor.cursor, rpos); + state = std::make_unique(left_cursor, lpos, right_cursor, rpos); state->addRange(0, left_cursor.getCurrent().clone(), lpos, lnum); state->addRange(1, right_cursor.getCurrent().clone(), rpos, rnum); return; @@ -501,6 +605,17 @@ void dispatchKind(JoinKind kind, Args && ... args) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported join kind: \"{}\"", kind); } +MutableColumns MergeJoinAlgorithm::getEmptyResultColumns() const +{ + MutableColumns result_cols; + for (size_t i = 0; i < 2; ++i) + { + for (const auto & col : cursors[i]->sampleColumns()) + result_cols.push_back(col->cloneEmpty()); + } + return result_cols; +} + std::optional MergeJoinAlgorithm::handleAllJoinState() { if (all_join_state && all_join_state->finished()) @@ -514,7 +629,7 @@ std::optional MergeJoinAlgorithm::handleAllJoinState /// Accumulate blocks with same key in all_join_state for (size_t i = 0; i < 2; ++i) { - if (cursors[i]->cursor.isValid() && all_join_state->keys[i].equals(cursors[i]->cursor)) + if (cursors[i]->cursor.isValid() && all_join_state->keys[i].equals(*cursors[i])) { size_t pos = cursors[i]->cursor.getRow(); size_t num = nextDistinct(cursors[i]->cursor); @@ -534,12 +649,7 @@ std::optional MergeJoinAlgorithm::handleAllJoinState stat.max_blocks_loaded = std::max(stat.max_blocks_loaded, all_join_state->blocksStored()); /// join all rows with current key - MutableColumns result_cols; - for (size_t i = 0; i < 2; ++i) - { - for (const auto & col : cursors[i]->sampleColumns()) - result_cols.push_back(col->cloneEmpty()); - } + MutableColumns result_cols = getEmptyResultColumns(); size_t total_rows = 0; while (!max_block_size || total_rows < max_block_size) @@ -567,6 +677,52 @@ std::optional MergeJoinAlgorithm::handleAllJoinState return {}; } +std::optional MergeJoinAlgorithm::handleAsofJoinState() +{ + if (strictness != JoinStrictness::Asof) + return {}; + + if (!cursors[1]->fullyCompleted()) + return {}; + + auto & left_cursor = *cursors[0]; + size_t lpos = left_cursor->getRow(); + const auto & left_columns = left_cursor.getCurrent().getColumns(); + + MutableColumns result_cols = getEmptyResultColumns(); + + while (left_cursor->isValid() && asof_join_state.hasMatch(left_cursor, asof_inequality)) + { + size_t i = 0; + for (const auto & col : left_columns) + result_cols[i++]->insertFrom(*col, lpos); + for (const auto & col : asof_join_state.value.getColumns()) + result_cols[i++]->insertFrom(*col, asof_join_state.value_row); + chassert(i == result_cols.size()); + left_cursor->next(); + } + + while (isLeft(kind) && left_cursor->isValid()) + { + /// return row with default values at right side + size_t i = 0; + for (const auto & col : left_columns) + result_cols[i++]->insertFrom(*col, lpos); + for (; i < result_cols.size(); ++i) + result_cols[i]->insertDefault(); + chassert(i == result_cols.size()); + + left_cursor->next(); + } + + size_t result_rows = result_cols.empty() ? 0 : result_cols.front()->size(); + if (result_rows) + return Status(Chunk(std::move(result_cols), result_rows)); + + return {}; +} + + MergeJoinAlgorithm::Status MergeJoinAlgorithm::allJoin() { PaddedPODArray idx_map[2]; @@ -696,14 +852,14 @@ struct AnyJoinImpl any_join_state.setValue({}); if (!left_cursor->isValid()) { - any_join_state.set(0, left_cursor.cursor); + any_join_state.set(0, left_cursor); if (cmp == 0 && isLeft(kind)) any_join_state.setValue(getRowFromChunk(right_cursor.getCurrent(), rpos)); } if (!right_cursor->isValid()) { - any_join_state.set(1, right_cursor.cursor); + any_join_state.set(1, right_cursor); if (cmp == 0 && isRight(kind)) any_join_state.setValue(getRowFromChunk(left_cursor.getCurrent(), lpos)); } @@ -720,7 +876,7 @@ std::optional MergeJoinAlgorithm::handleAnyJoinState for (size_t source_num = 0; source_num < 2; ++source_num) { auto & current = *cursors[source_num]; - if (any_join_state.keys[source_num].equals(current.cursor)) + if (any_join_state.keys[source_num].equals(current)) { size_t start_pos = current->getRow(); size_t length = nextDistinct(current.cursor); @@ -811,24 +967,35 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() if (!right_cursor->isValid()) return Status(1); - PaddedPODArray left_map; - PaddedPODArray right_map; + const auto & left_columns = left_cursor.getCurrent().getColumns(); + const auto & right_columns = right_cursor.getCurrent().getColumns(); + + MutableColumns result_cols = getEmptyResultColumns(); while (left_cursor->isValid() && right_cursor->isValid()) { auto lpos = left_cursor->getRow(); auto rpos = right_cursor->getRow(); auto cmp = compareCursors(*left_cursor, *right_cursor); + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ({}) <=> ({}) -> {}", __FILE__, __LINE__, left_cursor.dump(), right_cursor.dump(), cmp); + if (cmp == 0) { auto asof_cmp = compareAsofCursors(left_cursor, right_cursor); + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ({}) <=> ({}) -> asof {}", __FILE__, __LINE__, left_cursor.dump(), right_cursor.dump(), asof_cmp); + if ((asof_inequality == ASOFJoinInequality::Less && asof_cmp <= -1) || (asof_inequality == ASOFJoinInequality::LessOrEquals && asof_cmp <= 0)) { /// First row in right table that is greater (or equal) than current row in left table /// matches asof join condition the best - left_map.push_back(lpos); - right_map.push_back(rpos); + size_t i = 0; + for (const auto & col : left_columns) + result_cols[i++]->insertFrom(*col, lpos); + for (const auto & col : right_columns) + result_cols[i++]->insertFrom(*col, rpos); + chassert(i == result_cols.size()); + left_cursor->next(); continue; } @@ -839,39 +1006,99 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() right_cursor->next(); continue; } + + if ((asof_inequality == ASOFJoinInequality::Greater && asof_cmp >= 1) + || (asof_inequality == ASOFJoinInequality::GreaterOrEquals && asof_cmp >= 0)) + { + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + /// condition is satisfied, remember this row and move next to try to find better match + asof_join_state.set(right_cursor, rpos); + right_cursor->next(); + continue; + } + + if (asof_inequality == ASOFJoinInequality::Greater || asof_inequality == ASOFJoinInequality::GreaterOrEquals) + { + /// Asof condition is not satisfied anymore, use last matched row from right table + if (asof_join_state.hasMatch(left_cursor, asof_inequality)) + { + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + size_t i = 0; + for (const auto & col : left_columns) + result_cols[i++]->insertFrom(*col, lpos); + for (const auto & col : asof_join_state.value.getColumns()) + result_cols[i++]->insertFrom(*col, asof_join_state.value_row); + chassert(i == result_cols.size()); + } + else + { + asof_join_state.reset(); + if (isLeft(kind)) + { + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + + /// return row with default values at right side + size_t i = 0; + for (const auto & col : left_columns) + result_cols[i++]->insertFrom(*col, lpos); + for (; i < result_cols.size(); ++i) + result_cols[i]->insertDefault(); + chassert(i == result_cols.size()); + } + } + left_cursor->next(); + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + continue; + } + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "TODO: implement ASOF equality join"); } else if (cmp < 0) { + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + if (asof_join_state.hasMatch(left_cursor, asof_inequality)) + { + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + + size_t i = 0; + for (const auto & col : left_columns) + result_cols[i++]->insertFrom(*col, lpos); + for (const auto & col : asof_join_state.value.getColumns()) + result_cols[i++]->insertFrom(*col, asof_join_state.value_row); + chassert(i == result_cols.size()); + left_cursor->next(); + continue; + } + else + { + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + asof_join_state.reset(); + } + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + /// no matches for rows in left table, just pass them through size_t num = nextDistinct(*left_cursor); - if (isLeft(kind)) + if (isLeft(kind) && num) { /// return them with default values at right side - addRange(left_map, lpos, lpos + num); - addMany(right_map, DEFAULT_VALUE_INDEX, num); + size_t i = 0; + for (const auto & col : left_columns) + result_cols[i++]->insertRangeFrom(*col, lpos, num); + for (; i < result_cols.size(); ++i) + result_cols[i]->insertManyDefaults(num); + chassert(i == result_cols.size()); } } else { + // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); + /// skip rows in right table until we find match for current row in left table nextDistinct(*right_cursor); } } - - chassert(left_map.size() == right_map.size()); - Chunk result; - { - Columns lcols = indexColumns(left_cursor.getCurrent().getColumns(), left_map); - for (auto & col : lcols) - result.addColumn(std::move(col)); - - Columns rcols = indexColumns(right_cursor.getCurrent().getColumns(), right_map); - for (auto & col : rcols) - result.addColumn(std::move(col)); - } - - return Status(std::move(result)); + size_t num_rows = result_cols.empty() ? 0 : result_cols.front()->size(); + return Status(Chunk(std::move(result_cols), num_rows)); } @@ -929,6 +1156,9 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge() return std::move(*result); } + if (auto result = handleAsofJoinState()) + return std::move(*result); + if (cursors[0]->fullyCompleted() || cursors[1]->fullyCompleted()) { if (!cursors[0]->fullyCompleted() && isLeftOrFull(kind)) diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index 375c9ebd3cc..dbdda0b166b 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -37,62 +37,28 @@ using FullMergeJoinCursorPtr = std::unique_ptr; /// Used instead of storing previous block struct JoinKeyRow { - std::vector row; - JoinKeyRow() = default; - explicit JoinKeyRow(const SortCursorImpl & impl_, size_t pos) - { - row.reserve(impl_.sort_columns.size()); - for (const auto & col : impl_.sort_columns) - { - auto new_col = col->cloneEmpty(); - new_col->insertFrom(*col, pos); - row.push_back(std::move(new_col)); - } - } + JoinKeyRow(const FullMergeJoinCursor & cursor, size_t pos); - void reset() - { - row.clear(); - } + bool equals(const FullMergeJoinCursor & cursor) const; + bool asofMatch(const FullMergeJoinCursor & cursor, ASOFJoinInequality asof_inequality) const; - bool equals(const SortCursorImpl & impl) const - { - if (row.empty()) - return false; + void reset(); - assert(this->row.size() == impl.sort_columns_size); - for (size_t i = 0; i < impl.sort_columns_size; ++i) - { - int cmp = this->row[i]->compareAt(0, impl.getRow(), *impl.sort_columns[i], impl.desc[i].nulls_direction); - if (cmp != 0) - return false; - } - return true; - } + std::vector row; }; /// Remembers previous key if it was joined in previous block class AnyJoinState : boost::noncopyable { public: - AnyJoinState() = default; + void set(size_t source_num, const FullMergeJoinCursor & cursor); + void setValue(Chunk value_); - void set(size_t source_num, const SortCursorImpl & cursor) - { - assert(cursor.rows); - keys[source_num] = JoinKeyRow(cursor, cursor.rows - 1); - } + void reset(size_t source_num); - void reset(size_t source_num) - { - keys[source_num].reset(); - } - - void setValue(Chunk value_) { value = std::move(value_); } - - bool empty() const { return keys[0].row.empty() && keys[1].row.empty(); } + bool empty() const; /// current keys JoinKeyRow keys[2]; @@ -125,8 +91,8 @@ public: Chunk chunk; }; - AllJoinState(const SortCursorImpl & lcursor, size_t lpos, - const SortCursorImpl & rcursor, size_t rpos) + AllJoinState(const FullMergeJoinCursor & lcursor, size_t lpos, + const FullMergeJoinCursor & rcursor, size_t rpos) : keys{JoinKeyRow(lcursor, lpos), JoinKeyRow(rcursor, rpos)} { } @@ -194,6 +160,25 @@ private: size_t ridx = 0; }; + +class AsofJoinState : boost::noncopyable +{ +public: + void set(const FullMergeJoinCursor & rcursor, size_t rpos); + void reset(); + + bool hasMatch(const FullMergeJoinCursor & cursor, ASOFJoinInequality asof_inequality) + { + if (value.empty()) + return false; + return key.asofMatch(cursor, asof_inequality); + } + + JoinKeyRow key; + Chunk value; + size_t value_row = 0; +}; + /* * Wrapper for SortCursorImpl */ @@ -239,11 +224,15 @@ public: const Block & sampleBlock() const { return sample_block; } Columns sampleColumns() const { return sample_block.getColumns(); } - const IColumn & getAsofColumn() const + const IColumn * getAsofColumn() const { - return *cursor.all_columns[asof_column_position]; + if (!asof_column_position) + return nullptr; + return cursor.all_columns[*asof_column_position]; } + String dump() const; + private: Block sample_block; SortDescription desc; @@ -251,7 +240,7 @@ private: Chunk current_chunk; bool recieved_all_blocks = false; - size_t asof_column_position; + std::optional asof_column_position; }; /* @@ -284,12 +273,13 @@ private: std::optional handleAllJoinState(); Status allJoin(); + std::optional handleAsofJoinState(); Status asofJoin(); + MutableColumns getEmptyResultColumns() const; Chunk createBlockWithDefaults(size_t source_num); Chunk createBlockWithDefaults(size_t source_num, size_t start, size_t num_rows) const; - /// For `USING` join key columns should have values from right side instead of defaults std::unordered_map left_to_right_key_remap; @@ -299,6 +289,7 @@ private: /// Keep some state to make handle data from different blocks AnyJoinState any_join_state; std::unique_ptr all_join_state; + AsofJoinState asof_join_state; JoinKind kind; JoinStrictness strictness; diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 1dc410bd6b8..4e0727779b7 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -18,19 +18,53 @@ #include #include +#include +#include + + #include using namespace DB; +namespace +{ + +[[ maybe_unused ]] +String dumpBlock(std::shared_ptr source) +{ + WriteBufferFromOwnString buf; + { + Block header = source->getPort().getHeader(); + QueryPipeline pipeline(source); + auto format = std::make_shared(buf, header, FormatSettings{}, false); + pipeline.complete(std::move(format)); + + CompletedPipelineExecutor executor(pipeline); + executor.execute(); + } + return buf.str(); +} + +[[ maybe_unused ]] +String dumpBlock(const Block & block) +{ + Block header = block.cloneEmpty(); + Chunk data(block.getColumns(), block.rows()); + auto source = std::make_shared(header, std::move(data)); + return dumpBlock(std::move(source)); +} + UInt64 getAndPrintRandomSeed() { UInt64 seed = randomSeed(); - std::cerr << __FILE__ << "::" << "TEST_RANDOM_SEED = " << seed << "ull" << std::endl; + if (const char * random_seed = std::getenv("TEST_RANDOM_SEED")) // NOLINT(concurrency-mt-unsafe) + seed = std::stoull(random_seed); + + std::cerr << __FILE__ << " :: " << "TEST_RANDOM_SEED=" << seed << std::endl; return seed; } static UInt64 TEST_RANDOM_SEED = getAndPrintRandomSeed(); - static pcg64 rng(TEST_RANDOM_SEED); @@ -94,42 +128,29 @@ QueryPipeline buildJoinPipeline( std::shared_ptr oneColumnSource(const std::vector> & values) { - Block header = { ColumnWithTypeAndName(std::make_shared(), "x") }; + Block header = { + ColumnWithTypeAndName(std::make_shared(), "key"), + ColumnWithTypeAndName(std::make_shared(), "idx"), + }; + + UInt64 idx = 0; Chunks chunks; for (const auto & chunk_values : values) { - auto column = ColumnUInt64::create(); + auto key_column = ColumnUInt64::create(); + auto idx_column = ColumnUInt64::create(); + for (auto n : chunk_values) - column->insertValue(n); - chunks.emplace_back(Chunk(Columns{std::move(column)}, chunk_values.size())); + { + key_column->insertValue(n); + idx_column->insertValue(idx); + ++idx; + } + chunks.emplace_back(Chunk(Columns{std::move(key_column), std::move(idx_column)}, chunk_values.size())); } return std::make_shared(header, std::move(chunks)); } - -TEST(FullSortingJoin, Simple) -try -{ - auto left_source = oneColumnSource({ {1, 2, 3, 4, 5} }); - auto right_source = oneColumnSource({ {1}, {2}, {3}, {4}, {5} }); - - auto pipeline = buildJoinPipeline(left_source, right_source); - PullingPipelineExecutor executor(pipeline); - - Block block; - - size_t total_result_rows = 0; - while (executor.pull(block)) - total_result_rows += block.rows(); - - ASSERT_EQ(total_result_rows, 5); -} -catch (Exception & e) -{ - std::cout << e.getStackTraceString() << std::endl; - throw; -} - class SourceChunksBuilder { public: @@ -163,7 +184,7 @@ public: return; } - std::shared_ptr build() + std::shared_ptr getSource() { addChunk(); @@ -215,9 +236,11 @@ Block executePipeline(QueryPipeline && pipeline) template void assertColumnVectorEq(const typename ColumnVector::Container & expected, const Block & block, const std::string & name) { - const auto & actual = assert_cast *>(block.getByName(name).column.get())->getData(); - EXPECT_EQ(actual.size(), expected.size()); - ASSERT_EQ(actual, expected); + const auto * actual = typeid_cast *>(block.getByName(name).column.get()); + ASSERT_TRUE(actual) << "unexpected column type: " << block.getByName(name).column->dumpStructure() << "expected: " << typeid(ColumnVector).name(); + + EXPECT_EQ(actual->getData().size(), expected.size()); + ASSERT_EQ(actual->getData(), expected) << "column name: " << name; } template @@ -263,7 +286,90 @@ void generateNextKey(UInt64 & k1, String & k2) k2 = new_k2; } -TEST(FullSortingJoin, Any) +bool isStrict(ASOFJoinInequality inequality) +{ + return inequality == ASOFJoinInequality::Less || inequality == ASOFJoinInequality::Greater; +} + +} + +TEST(FullSortingJoin, AllAnyOneKey) +try +{ + { + SCOPED_TRACE("Inner All"); + Block result = executePipeline(buildJoinPipeline( + oneColumnSource({ {1, 2, 3, 4, 5} }), + oneColumnSource({ {1}, {2}, {3}, {4}, {5} }), + 1, JoinKind::Inner, JoinStrictness::All)); + + assertColumnVectorEq(ColumnUInt64::Container({0, 1, 2, 3, 4}), result, "t1.idx"); + assertColumnVectorEq(ColumnUInt64::Container({0, 1, 2, 3, 4}), result, "t2.idx"); + } + { + SCOPED_TRACE("Inner Any"); + Block result = executePipeline(buildJoinPipeline( + oneColumnSource({ {1, 2, 3, 4, 5} }), + oneColumnSource({ {1}, {2}, {3}, {4}, {5} }), + 1, JoinKind::Inner, JoinStrictness::Any)); + assertColumnVectorEq(ColumnUInt64::Container({0, 1, 2, 3, 4}), result, "t1.idx"); + assertColumnVectorEq(ColumnUInt64::Container({0, 1, 2, 3, 4}), result, "t2.idx"); + } + { + SCOPED_TRACE("Inner All"); + Block result = executePipeline(buildJoinPipeline( + oneColumnSource({ {2, 2, 2}, {2, 3}, {3, 5} }), + oneColumnSource({ {1, 1, 1}, {2, 2}, {3, 4} }), + 1, JoinKind::Inner, JoinStrictness::All)); + assertColumnVectorEq(ColumnUInt64::Container({0, 1, 2, 0, 1, 2, 3, 3, 4, 5}), result, "t1.idx"); + assertColumnVectorEq(ColumnUInt64::Container({3, 3, 3, 4, 4, 4, 3, 4, 5, 5}), result, "t2.idx"); + } + { + SCOPED_TRACE("Inner Any"); + Block result = executePipeline(buildJoinPipeline( + oneColumnSource({ {2, 2, 2}, {2, 3}, {3, 5} }), + oneColumnSource({ {1, 1, 1}, {2, 2}, {3, 4} }), + 1, JoinKind::Inner, JoinStrictness::Any)); + assertColumnVectorEq(ColumnUInt64::Container({0, 4}), result, "t1.idx"); + assertColumnVectorEq(ColumnUInt64::Container({3, 5}), result, "t2.idx"); + } + { + SCOPED_TRACE("Inner Any"); + Block result = executePipeline(buildJoinPipeline( + oneColumnSource({ {2, 2, 2, 2}, {3}, {3, 5} }), + oneColumnSource({ {1, 1, 1, 2}, {2}, {3, 4} }), + 1, JoinKind::Inner, JoinStrictness::Any)); + assertColumnVectorEq(ColumnUInt64::Container({0, 4}), result, "t1.idx"); + assertColumnVectorEq(ColumnUInt64::Container({3, 5}), result, "t2.idx"); + } + { + + SCOPED_TRACE("Left Any"); + Block result = executePipeline(buildJoinPipeline( + oneColumnSource({ {2, 2, 2}, {2, 3}, {3, 5} }), + oneColumnSource({ {1, 1, 1}, {2, 2}, {3, 4} }), + 1, JoinKind::Left, JoinStrictness::Any)); + assertColumnVectorEq(ColumnUInt64::Container({0, 1, 2, 3, 4, 5, 6}), result, "t1.idx"); + assertColumnVectorEq(ColumnUInt64::Container({3, 3, 3, 3, 5, 5, 0}), result, "t2.idx"); + } + { + SCOPED_TRACE("Left Any"); + Block result = executePipeline(buildJoinPipeline( + oneColumnSource({ {2, 2, 2, 2}, {3}, {3, 5} }), + oneColumnSource({ {1, 1, 1, 2}, {2}, {3, 4} }), + 1, JoinKind::Left, JoinStrictness::Any)); + assertColumnVectorEq(ColumnUInt64::Container({0, 1, 2, 3, 4, 5, 6}), result, "t1.idx"); + assertColumnVectorEq(ColumnUInt64::Container({3, 3, 3, 3, 5, 5, 0}), result, "t2.idx"); + } +} +catch (Exception & e) +{ + std::cout << e.getStackTraceString() << std::endl; + throw; +} + + +TEST(FullSortingJoin, AnyRandomized) try { JoinKind kind = getRandomFrom({JoinKind::Inner, JoinKind::Left, JoinKind::Right}); @@ -288,7 +394,7 @@ try auto expected_left = ColumnString::create(); auto expected_right = ColumnString::create(); - UInt64 k1 = 0; + UInt64 k1 = 1; String k2 = ""; auto get_attr = [&](const String & side, size_t idx) -> String @@ -338,7 +444,7 @@ try } Block result_block = executePipeline(buildJoinPipeline( - left_source.build(), right_source.build(), /* key_length = */ 2, + left_source.getSource(), right_source.getSource(), /* key_length = */ 2, kind, JoinStrictness::Any)); assertColumnEq(*expected_left, result_block, "t1.attr"); assertColumnEq(*expected_right, result_block, "t2.attr"); @@ -356,10 +462,10 @@ try {std::make_shared(), "key"}, {std::make_shared(), "t"}, }); - left_source.addRow({"AMZN", 3}); left_source.addRow({"AMZN", 4}); left_source.addRow({"AMZN", 6}); + left_source.addRow({"SBUX", 10}); SourceChunksBuilder right_source({ {std::make_shared(), "key"}, @@ -371,14 +477,19 @@ try right_source.addRow({"AAPL", 2, 98}); right_source.addRow({"AAPL", 3, 99}); right_source.addRow({"AMZN", 1, 100}); + right_source.addRow({"AMZN", 2, 0}); + right_source.addChunk(); right_source.addRow({"AMZN", 2, 110}); right_source.addChunk(); right_source.addRow({"AMZN", 4, 130}); right_source.addRow({"AMZN", 5, 140}); + right_source.addRow({"SBUX", 8, 180}); + right_source.addChunk(); + right_source.addRow({"SBUX", 9, 190}); { Block result_block = executePipeline(buildJoinPipeline( - left_source.build(), right_source.build(), /* key_length = */ 2, + left_source.getSource(), right_source.getSource(), /* key_length = */ 2, JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::LessOrEquals)); auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); @@ -388,18 +499,19 @@ try })); } - // { - // Block result_block = executePipeline(buildJoinPipeline( - // left_source.build(), right_source.build(), /* key_length = */ 2, - // JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::GreaterOrEquals)); - // auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); + { + Block result_block = executePipeline(buildJoinPipeline( + left_source.getSource(), right_source.getSource(), /* key_length = */ 2, + JoinKind::Inner, JoinStrictness::Asof, ASOFJoinInequality::GreaterOrEquals)); + auto values = getValuesFromBlock(result_block, {"t1.key", "t1.t", "t2.t", "t2.value"}); - // ASSERT_EQ(values, (std::vector>{ - // {"AMZN", 3u, 2u, 110u}, - // {"AMZN", 4u, 4u, 130u}, - // {"AMZN", 6u, 5u, 140u}, - // })); - // } + ASSERT_EQ(values, (std::vector>{ + {"AMZN", 3u, 2u, 110u}, + {"AMZN", 4u, 4u, 130u}, + {"AMZN", 6u, 5u, 140u}, + {"SBUX", 10u, 9u, 190u}, + })); + } } catch (Exception & e) { @@ -418,8 +530,7 @@ try {std::make_shared(), "value"}, }); - UInt64 p = std::uniform_int_distribution<>(0, 2)(rng); - double break_prob = p == 0 ? 0.0 : (p == 1 ? 0.5 : 1.0); + double break_prob = getRandomDoubleQuantized(2); std::uniform_real_distribution<> prob_dis(0.0, 1.0); for (const auto & row : std::vector>{ {1, 101}, {2, 102}, {4, 104}, {5, 105}, {11, 111}, {15, 115} }) { @@ -427,7 +538,7 @@ try if (prob_dis(rng) < break_prob) right_source_builder.addChunk(); } - auto right_source = right_source_builder.build(); + auto right_source = right_source_builder.getSource(); auto pipeline = buildJoinPipeline( left_source, right_source, /* key_length = */ 1, @@ -436,7 +547,7 @@ try Block result_block = executePipeline(std::move(pipeline)); ASSERT_EQ( - assert_cast(result_block.getByName("t1.x").column.get())->getData(), + assert_cast(result_block.getByName("t1.key").column.get())->getData(), (ColumnUInt64::Container{3, 3, 3, 3, 3, 5, 5, 6, 9, 9, 10}) ); @@ -456,15 +567,14 @@ catch (Exception & e) throw; } -TEST(FullSortingJoin, AsofGeneratedTestData) +TEST(FullSortingJoin, AsofLessGeneratedTestData) try { - auto join_kind = getRandomFrom({JoinKind::Inner, JoinKind::Left}); + auto join_kind = getRandomFrom({ JoinKind::Inner, JoinKind::Left }); - auto asof_inequality = getRandomFrom({ - ASOFJoinInequality::Less, ASOFJoinInequality::LessOrEquals, - // ASOFJoinInequality::Greater, ASOFJoinInequality::GreaterOrEquals, - }); + auto asof_inequality = getRandomFrom({ ASOFJoinInequality::Less, ASOFJoinInequality::LessOrEquals }); + + SCOPED_TRACE(fmt::format("{} {}", join_kind, asof_inequality)); SourceChunksBuilder left_source_builder({ {std::make_shared(), "k1"}, @@ -485,11 +595,13 @@ try ColumnInt64::Container expected; - UInt64 k1 = 0; - String k2 = "asdfg"; + UInt64 k1 = 1; + String k2 = ""; auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); for (size_t key_num = 0; key_num < key_num_total; ++key_num) { + generateNextKey(k1, k2); + Int64 left_t = 0; size_t num_left_rows = std::uniform_int_distribution<>(1, 100)(rng); for (size_t i = 0; i < num_left_rows; ++i) @@ -504,18 +616,10 @@ try auto right_t = left_t; for (size_t j = 0; j < num_matches; ++j) { - int min_step = 1; - if (asof_inequality == ASOFJoinInequality::LessOrEquals || asof_inequality == ASOFJoinInequality::GreaterOrEquals) - min_step = 0; + int min_step = isStrict(asof_inequality) ? 1 : 0; right_t += std::uniform_int_distribution<>(min_step, 3)(rng); - bool is_match = false; - - if (asof_inequality == ASOFJoinInequality::LessOrEquals || asof_inequality == ASOFJoinInequality::Less) - is_match = j == 0; - else if (asof_inequality == ASOFJoinInequality::GreaterOrEquals || asof_inequality == ASOFJoinInequality::Greater) - is_match = j == num_matches - 1; - + bool is_match = j == 0; right_source_builder.addRow({k1, k2, right_t, is_match ? 100 * left_t : -1}); } /// next left_t should be greater than right_t not to match with previous rows @@ -523,7 +627,7 @@ try } /// generate some rows with greater left_t to check that they are not matched - num_left_rows = std::uniform_int_distribution<>(1, 100)(rng); + num_left_rows = std::bernoulli_distribution(0.5)(rng) ? std::uniform_int_distribution<>(1, 100)(rng) : 0; for (size_t i = 0; i < num_left_rows; ++i) { left_t += std::uniform_int_distribution<>(1, 10)(rng); @@ -532,12 +636,10 @@ try if (join_kind == JoinKind::Left) expected.push_back(-10 * left_t); } - - generateNextKey(k1, k2); } Block result_block = executePipeline(buildJoinPipeline( - left_source_builder.build(), right_source_builder.build(), + left_source_builder.getSource(), right_source_builder.getSource(), /* key_length = */ 3, join_kind, JoinStrictness::Asof, asof_inequality)); @@ -548,7 +650,97 @@ try assertColumnVectorEq(expected, result_block, "t2.attr"); } -catch (Exception & e) { +catch (Exception & e) +{ + std::cout << e.getStackTraceString() << std::endl; + throw; +} + +TEST(FullSortingJoin, AsofGreaterGeneratedTestData) +try +{ + auto join_kind = getRandomFrom({ JoinKind::Inner, JoinKind::Left }); + + auto asof_inequality = getRandomFrom({ ASOFJoinInequality::Greater, ASOFJoinInequality::GreaterOrEquals }); + + SCOPED_TRACE(fmt::format("{} {}", join_kind, asof_inequality)); + + SourceChunksBuilder left_source_builder({ + {std::make_shared(), "k1"}, + {std::make_shared(), "k2"}, + {std::make_shared(), "t"}, + {std::make_shared(), "attr"}, + }); + + SourceChunksBuilder right_source_builder({ + {std::make_shared(), "k1"}, + {std::make_shared(), "k2"}, + {std::make_shared(), "t"}, + {std::make_shared(), "attr"}, + }); + + left_source_builder.break_prob = getRandomDoubleQuantized(); + right_source_builder.break_prob = getRandomDoubleQuantized(); + + ColumnInt64::Container expected; + + UInt64 k1 = 1; + String k2 = ""; + UInt64 left_t = 0; + + auto key_num_total = std::uniform_int_distribution<>(1, 100)(rng); + for (size_t key_num = 0; key_num < key_num_total; ++key_num) + { + generateNextKey(k1, k2); + + /// generate some rows with smaller left_t to check that they are not matched + size_t num_left_rows = std::bernoulli_distribution(0.5)(rng) ? std::uniform_int_distribution<>(1, 10)(rng) : 0; + for (size_t i = 0; i < num_left_rows; ++i) + { + left_t += std::uniform_int_distribution<>(1, 10)(rng); + left_source_builder.addRow({k1, k2, left_t, -10 * left_t}); + + if (join_kind == JoinKind::Left) + expected.push_back(-10 * left_t); + } + + if (std::bernoulli_distribution(0.1)(rng)) + continue; + + size_t num_right_matches = std::uniform_int_distribution<>(1, 10)(rng); + auto right_t = left_t + std::uniform_int_distribution<>(isStrict(asof_inequality) ? 0 : 1, 10)(rng); + for (size_t j = 0; j < num_right_matches; ++j) + { + right_t += std::uniform_int_distribution<>(0, 3)(rng); + bool is_match = j == num_right_matches - 1; + right_source_builder.addRow({k1, k2, right_t, is_match ? 100 * right_t : -1}); + } + + /// next left_t should be greater than (or equals) right_t to match with previous rows + left_t = right_t + std::uniform_int_distribution<>(isStrict(asof_inequality) ? 1 : 0, 10)(rng); + size_t num_left_matches = std::uniform_int_distribution<>(1, 10)(rng); + for (size_t j = 0; j < num_left_matches; ++j) + { + left_t += std::uniform_int_distribution<>(0, 3)(rng); + left_source_builder.addRow({k1, k2, left_t, 10 * right_t}); + expected.push_back(10 * right_t); + } + } + + Block result_block = executePipeline(buildJoinPipeline( + left_source_builder.getSource(), right_source_builder.getSource(), + /* key_length = */ 3, + join_kind, JoinStrictness::Asof, asof_inequality)); + + assertColumnVectorEq(expected, result_block, "t1.attr"); + + for (auto & e : expected) + e = e < 0 ? 0 : 10 * e; /// non matched rows from left table have negative attr + + assertColumnVectorEq(expected, result_block, "t2.attr"); +} +catch (Exception & e) +{ std::cout << e.getStackTraceString() << std::endl; throw; } diff --git a/tests/queries/0_stateless/02276_full_sort_join_unsupported.sql b/tests/queries/0_stateless/02276_full_sort_join_unsupported.sql index a4e60ff54dd..03936107563 100644 --- a/tests/queries/0_stateless/02276_full_sort_join_unsupported.sql +++ b/tests/queries/0_stateless/02276_full_sort_join_unsupported.sql @@ -19,7 +19,7 @@ SELECT * FROM t1 ANTI JOIN t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENT SELECT * FROM t1 SEMI JOIN t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENTED } -SELECT * FROM t1 ASOF JOIN t2 ON t1.key = t2.key AND t1.val > t2.val; -- { serverError NOT_IMPLEMENTED } +-- SELECT * FROM t1 ASOF JOIN t2 ON t1.key = t2.key AND t1.val > t2.val; -- { serverError NOT_IMPLEMENTED } SELECT * FROM t1 ANY JOIN t2 ON t1.key = t2.key SETTINGS any_join_distinct_right_table_keys = 1; -- { serverError NOT_IMPLEMENTED } From b6b55cfb187ddcbb6316d905c60d9be481bfb1a4 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 27 Sep 2023 09:05:50 +0000 Subject: [PATCH 0119/2361] fix asof join --- src/Processors/Transforms/MergeJoinTransform.cpp | 2 +- src/Processors/tests/gtest_full_sorting_join.cpp | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 638f2f1cb10..ad564557c36 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -1171,7 +1171,7 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge() } /// check if blocks are not intersecting at all - if (int cmp = totallyCompare(cursors[0]->cursor, cursors[1]->cursor, null_direction_hint); cmp != 0) + if (int cmp = totallyCompare(cursors[0]->cursor, cursors[1]->cursor, null_direction_hint); cmp != 0 && strictness != JoinStrictness::Asof) { if (cmp < 0) { diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 4e0727779b7..2ecf7805df4 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -30,13 +30,13 @@ namespace { [[ maybe_unused ]] -String dumpBlock(std::shared_ptr source) +String dumpBlockSource(std::shared_ptr source, bool mono_block = false) { WriteBufferFromOwnString buf; { Block header = source->getPort().getHeader(); QueryPipeline pipeline(source); - auto format = std::make_shared(buf, header, FormatSettings{}, false); + auto format = std::make_shared(buf, header, FormatSettings{}, mono_block); pipeline.complete(std::move(format)); CompletedPipelineExecutor executor(pipeline); @@ -51,7 +51,7 @@ String dumpBlock(const Block & block) Block header = block.cloneEmpty(); Chunk data(block.getColumns(), block.rows()); auto source = std::make_shared(header, std::move(data)); - return dumpBlock(std::move(source)); + return dumpBlockSource(std::move(source)); } UInt64 getAndPrintRandomSeed() @@ -732,6 +732,10 @@ try /* key_length = */ 3, join_kind, JoinStrictness::Asof, asof_inequality)); + // std::cerr << "============ left ============" << std::endl << dumpBlockSource(left_source_builder.getSource()) << std::endl; + // std::cerr << "============ right ============" << std::endl << dumpBlockSource(right_source_builder.getSource()) << std::endl; + // std::cerr << "============ result ============" << std::endl << dumpBlock(result_block) << std::endl; + assertColumnVectorEq(expected, result_block, "t1.attr"); for (auto & e : expected) From d015a023bb85c72c2747cebed12d065d5aad1159 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 27 Sep 2023 09:56:30 +0000 Subject: [PATCH 0120/2361] unique rng seed per each test in gtest_full_sorting_join --- .../tests/gtest_full_sorting_join.cpp | 122 ++++++++++-------- 1 file changed, 69 insertions(+), 53 deletions(-) diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 2ecf7805df4..2bd3357eff9 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -54,20 +54,6 @@ String dumpBlock(const Block & block) return dumpBlockSource(std::move(source)); } -UInt64 getAndPrintRandomSeed() -{ - UInt64 seed = randomSeed(); - if (const char * random_seed = std::getenv("TEST_RANDOM_SEED")) // NOLINT(concurrency-mt-unsafe) - seed = std::stoull(random_seed); - - std::cerr << __FILE__ << " :: " << "TEST_RANDOM_SEED=" << seed << std::endl; - return seed; -} - -static UInt64 TEST_RANDOM_SEED = getAndPrintRandomSeed(); -static pcg64 rng(TEST_RANDOM_SEED); - - QueryPipeline buildJoinPipeline( std::shared_ptr left_source, std::shared_ptr right_source, @@ -154,7 +140,6 @@ std::shared_ptr oneColumnSource(const std::vector> class SourceChunksBuilder { public: - double break_prob = 0.0; explicit SourceChunksBuilder(const Block & header_) : header(header_) @@ -163,13 +148,20 @@ public: chassert(!current_chunk.empty()); } + void setBreakProbability(pcg64 & rng_) + { + /// random probability with possibility to have exact 0.0 and 1.0 values + break_prob = std::uniform_int_distribution(0, 5)(rng_) / static_cast(5); + rng = &rng_; + } + void addRow(const std::vector & row) { chassert(row.size() == current_chunk.size()); for (size_t i = 0; i < current_chunk.size(); ++i) current_chunk[i]->insert(row[i]); - if (break_prob > 0.0 && std::uniform_real_distribution<>(0.0, 1.0)(rng) < break_prob) + if (rng && std::uniform_real_distribution<>(0.0, 1.0)(*rng) < break_prob) addChunk(); } @@ -200,6 +192,9 @@ private: Block header; Chunks chunks; MutableColumns current_chunk; + + pcg64 * rng = nullptr; + double break_prob = 0.0; }; @@ -239,8 +234,20 @@ void assertColumnVectorEq(const typename ColumnVector::Container & expected, const auto * actual = typeid_cast *>(block.getByName(name).column.get()); ASSERT_TRUE(actual) << "unexpected column type: " << block.getByName(name).column->dumpStructure() << "expected: " << typeid(ColumnVector).name(); + auto get_first_diff = [&]() -> String + { + const auto & actual_data = actual->getData(); + size_t num_rows = std::min(expected.size(), actual_data.size()); + for (size_t i = 0; i < num_rows; ++i) + { + if (expected[i] != actual_data[i]) + return fmt::format(", expected: {}, actual: {} at row {}", expected[i], actual_data[i], i); + } + return ""; + }; + EXPECT_EQ(actual->getData().size(), expected.size()); - ASSERT_EQ(actual->getData(), expected) << "column name: " << name; + ASSERT_EQ(actual->getData(), expected) << "column name: " << name << get_first_diff(); } template @@ -264,20 +271,14 @@ void assertColumnEq(const IColumn & expected, const Block & block, const std::st } template -T getRandomFrom(const std::initializer_list & opts) +T getRandomFrom(pcg64 & rng, const std::initializer_list & opts) { std::vector options(opts.begin(), opts.end()); size_t idx = std::uniform_int_distribution(0, options.size() - 1)(rng); return options[idx]; } -/// Used to have accurate 0.0 and 1.0 probabilities -double getRandomDoubleQuantized(size_t quants = 5) -{ - return std::uniform_int_distribution(0, quants)(rng) / static_cast(quants); -} - -void generateNextKey(UInt64 & k1, String & k2) +void generateNextKey(pcg64 & rng, UInt64 & k1, String & k2) { size_t str_len = std::uniform_int_distribution<>(1, 10)(rng); String new_k2 = getRandomASCIIString(str_len, rng); @@ -293,6 +294,28 @@ bool isStrict(ASOFJoinInequality inequality) } +class FullSortingJoinRandomized : public ::testing::Test +{ +public: + FullSortingJoinRandomized() = default; + + void SetUp() override + { + UInt64 seed = randomSeed(); + if (const char * random_seed = std::getenv("TEST_RANDOM_SEED")) // NOLINT(concurrency-mt-unsafe) + seed = std::stoull(random_seed); + + std::cerr << "TEST_RANDOM_SEED=" << seed << std::endl; + rng = pcg64(seed); + } + + void TearDown() override + { + } + + pcg64 rng; +}; + TEST(FullSortingJoin, AllAnyOneKey) try { @@ -369,10 +392,10 @@ catch (Exception & e) } -TEST(FullSortingJoin, AnyRandomized) +TEST_F(FullSortingJoinRandomized, Any) try { - JoinKind kind = getRandomFrom({JoinKind::Inner, JoinKind::Left, JoinKind::Right}); + JoinKind kind = getRandomFrom(rng, {JoinKind::Inner, JoinKind::Left, JoinKind::Right}); SourceChunksBuilder left_source({ {std::make_shared(), "k1"}, @@ -386,8 +409,8 @@ try {std::make_shared(), "attr"}, }); - left_source.break_prob = getRandomDoubleQuantized(); - right_source.break_prob = getRandomDoubleQuantized(); + left_source.setBreakProbability(rng); + right_source.setBreakProbability(rng); size_t num_keys = std::uniform_int_distribution<>(100, 1000)(rng); @@ -404,7 +427,7 @@ try for (size_t i = 0; i < num_keys; ++i) { - generateNextKey(k1, k2); + generateNextKey(rng, k1, k2); /// Key is present in left, right or both tables. Both tables is more probable. size_t key_presence = std::uniform_int_distribution<>(0, 10)(rng); @@ -520,7 +543,7 @@ catch (Exception & e) } -TEST(FullSortingJoin, AsofOnlyColumn) +TEST_F(FullSortingJoinRandomized, AsofOnlyColumn) try { auto left_source = oneColumnSource({ {3}, {3, 3, 3}, {3, 5, 5, 6}, {9, 9}, {10, 20} }); @@ -530,14 +553,11 @@ try {std::make_shared(), "value"}, }); - double break_prob = getRandomDoubleQuantized(2); - std::uniform_real_distribution<> prob_dis(0.0, 1.0); + right_source_builder.setBreakProbability(rng); + for (const auto & row : std::vector>{ {1, 101}, {2, 102}, {4, 104}, {5, 105}, {11, 111}, {15, 115} }) - { right_source_builder.addRow(row); - if (prob_dis(rng) < break_prob) - right_source_builder.addChunk(); - } + auto right_source = right_source_builder.getSource(); auto pipeline = buildJoinPipeline( @@ -567,12 +587,12 @@ catch (Exception & e) throw; } -TEST(FullSortingJoin, AsofLessGeneratedTestData) +TEST_F(FullSortingJoinRandomized, AsofLessGeneratedTestData) try { - auto join_kind = getRandomFrom({ JoinKind::Inner, JoinKind::Left }); + auto join_kind = getRandomFrom(rng, { JoinKind::Inner, JoinKind::Left }); - auto asof_inequality = getRandomFrom({ ASOFJoinInequality::Less, ASOFJoinInequality::LessOrEquals }); + auto asof_inequality = getRandomFrom(rng, { ASOFJoinInequality::Less, ASOFJoinInequality::LessOrEquals }); SCOPED_TRACE(fmt::format("{} {}", join_kind, asof_inequality)); @@ -590,8 +610,8 @@ try {std::make_shared(), "attr"}, }); - left_source_builder.break_prob = getRandomDoubleQuantized(); - right_source_builder.break_prob = getRandomDoubleQuantized(); + left_source_builder.setBreakProbability(rng); + right_source_builder.setBreakProbability(rng); ColumnInt64::Container expected; @@ -600,7 +620,7 @@ try auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); for (size_t key_num = 0; key_num < key_num_total; ++key_num) { - generateNextKey(k1, k2); + generateNextKey(rng, k1, k2); Int64 left_t = 0; size_t num_left_rows = std::uniform_int_distribution<>(1, 100)(rng); @@ -656,12 +676,12 @@ catch (Exception & e) throw; } -TEST(FullSortingJoin, AsofGreaterGeneratedTestData) +TEST_F(FullSortingJoinRandomized, AsofGreaterGeneratedTestData) try { - auto join_kind = getRandomFrom({ JoinKind::Inner, JoinKind::Left }); + auto join_kind = getRandomFrom(rng, { JoinKind::Inner, JoinKind::Left }); - auto asof_inequality = getRandomFrom({ ASOFJoinInequality::Greater, ASOFJoinInequality::GreaterOrEquals }); + auto asof_inequality = getRandomFrom(rng, { ASOFJoinInequality::Greater, ASOFJoinInequality::GreaterOrEquals }); SCOPED_TRACE(fmt::format("{} {}", join_kind, asof_inequality)); @@ -679,8 +699,8 @@ try {std::make_shared(), "attr"}, }); - left_source_builder.break_prob = getRandomDoubleQuantized(); - right_source_builder.break_prob = getRandomDoubleQuantized(); + left_source_builder.setBreakProbability(rng); + right_source_builder.setBreakProbability(rng); ColumnInt64::Container expected; @@ -691,7 +711,7 @@ try auto key_num_total = std::uniform_int_distribution<>(1, 100)(rng); for (size_t key_num = 0; key_num < key_num_total; ++key_num) { - generateNextKey(k1, k2); + generateNextKey(rng, k1, k2); /// generate some rows with smaller left_t to check that they are not matched size_t num_left_rows = std::bernoulli_distribution(0.5)(rng) ? std::uniform_int_distribution<>(1, 10)(rng) : 0; @@ -732,10 +752,6 @@ try /* key_length = */ 3, join_kind, JoinStrictness::Asof, asof_inequality)); - // std::cerr << "============ left ============" << std::endl << dumpBlockSource(left_source_builder.getSource()) << std::endl; - // std::cerr << "============ right ============" << std::endl << dumpBlockSource(right_source_builder.getSource()) << std::endl; - // std::cerr << "============ result ============" << std::endl << dumpBlock(result_block) << std::endl; - assertColumnVectorEq(expected, result_block, "t1.attr"); for (auto & e : expected) From 3e9090cbebb3cdb97f4dbb4b9d03c7e9d9e242b1 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 27 Sep 2023 11:14:37 +0000 Subject: [PATCH 0121/2361] fix --- .../sql-reference/statements/select/join.md | 5 ++- .../Transforms/MergeJoinTransform.cpp | 41 ++++++++++++------- .../Transforms/MergeJoinTransform.h | 23 +---------- .../tests/gtest_full_sorting_join.cpp | 36 +++++++++------- 4 files changed, 52 insertions(+), 53 deletions(-) diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 34c6016235a..96d9d26977d 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -297,7 +297,7 @@ Algorithm requires the special column in tables. This column: - Must contain an ordered sequence. - Can be one of the following types: [Int, UInt](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md), [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md), [Decimal](../../../sql-reference/data-types/decimal.md). -- Can’t be the only column in the `JOIN` clause. +- For `hash` join algorithm it can’t be the only column in the `JOIN` clause. Syntax `ASOF JOIN ... ON`: @@ -337,7 +337,8 @@ For example, consider the following tables: `ASOF JOIN` can take the timestamp of a user event from `table_1` and find an event in `table_2` where the timestamp is closest to the timestamp of the event from `table_1` corresponding to the closest match condition. Equal timestamp values are the closest if available. Here, the `user_id` column can be used for joining on equality and the `ev_time` column can be used for joining on the closest match. In our example, `event_1_1` can be joined with `event_2_1` and `event_1_2` can be joined with `event_2_3`, but `event_2_2` can’t be joined. :::note -`ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine. +`ASOF JOIN` is supported only by `hash` and `full_sorting_merge` join algorithms. +It's **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine. ::: ## PASTE JOIN Usage diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index ad564557c36..0d9eab248d8 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -329,6 +329,26 @@ void AsofJoinState::reset() value.clear(); } +FullMergeJoinCursor::FullMergeJoinCursor(const Block & sample_block_, const SortDescription & description_, bool is_asof) + : sample_block(sample_block_.cloneEmpty()) + , desc(description_) +{ + if (desc.size() == 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty sort description for FullMergeJoinCursor"); + + if (is_asof) + { + /// For ASOF join prefix of sort description is used for equality comparison + /// and the last column is used for inequality comparison and is handled separately + + auto asof_column_description = desc.back(); + desc.pop_back(); + + chassert(asof_column_description.direction == 1 && asof_column_description.nulls_direction == 1); + asof_column_position = sample_block.getPositionByName(asof_column_description.column_name); + } +} + const Chunk & FullMergeJoinCursor::getCurrent() const { return current_chunk; @@ -686,7 +706,6 @@ std::optional MergeJoinAlgorithm::handleAsofJoinStat return {}; auto & left_cursor = *cursors[0]; - size_t lpos = left_cursor->getRow(); const auto & left_columns = left_cursor.getCurrent().getColumns(); MutableColumns result_cols = getEmptyResultColumns(); @@ -695,7 +714,7 @@ std::optional MergeJoinAlgorithm::handleAsofJoinStat { size_t i = 0; for (const auto & col : left_columns) - result_cols[i++]->insertFrom(*col, lpos); + result_cols[i++]->insertFrom(*col, left_cursor->getRow()); for (const auto & col : asof_join_state.value.getColumns()) result_cols[i++]->insertFrom(*col, asof_join_state.value_row); chassert(i == result_cols.size()); @@ -707,7 +726,7 @@ std::optional MergeJoinAlgorithm::handleAsofJoinStat /// return row with default values at right side size_t i = 0; for (const auto & col : left_columns) - result_cols[i++]->insertFrom(*col, lpos); + result_cols[i++]->insertFrom(*col, left_cursor->getRow()); for (; i < result_cols.size(); ++i) result_cols[i]->insertDefault(); chassert(i == result_cols.size()); @@ -977,12 +996,10 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() auto lpos = left_cursor->getRow(); auto rpos = right_cursor->getRow(); auto cmp = compareCursors(*left_cursor, *right_cursor); - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ({}) <=> ({}) -> {}", __FILE__, __LINE__, left_cursor.dump(), right_cursor.dump(), cmp); if (cmp == 0) { auto asof_cmp = compareAsofCursors(left_cursor, right_cursor); - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ({}) <=> ({}) -> asof {}", __FILE__, __LINE__, left_cursor.dump(), right_cursor.dump(), asof_cmp); if ((asof_inequality == ASOFJoinInequality::Less && asof_cmp <= -1) || (asof_inequality == ASOFJoinInequality::LessOrEquals && asof_cmp <= 0)) @@ -1010,7 +1027,6 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() if ((asof_inequality == ASOFJoinInequality::Greater && asof_cmp >= 1) || (asof_inequality == ASOFJoinInequality::GreaterOrEquals && asof_cmp >= 0)) { - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); /// condition is satisfied, remember this row and move next to try to find better match asof_join_state.set(right_cursor, rpos); right_cursor->next(); @@ -1022,7 +1038,6 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() /// Asof condition is not satisfied anymore, use last matched row from right table if (asof_join_state.hasMatch(left_cursor, asof_inequality)) { - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); size_t i = 0; for (const auto & col : left_columns) result_cols[i++]->insertFrom(*col, lpos); @@ -1035,7 +1050,6 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() asof_join_state.reset(); if (isLeft(kind)) { - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); /// return row with default values at right side size_t i = 0; @@ -1047,7 +1061,6 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() } } left_cursor->next(); - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); continue; } @@ -1055,10 +1068,8 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() } else if (cmp < 0) { - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); if (asof_join_state.hasMatch(left_cursor, asof_inequality)) { - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); size_t i = 0; for (const auto & col : left_columns) @@ -1071,13 +1082,12 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() } else { - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); asof_join_state.reset(); } - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); /// no matches for rows in left table, just pass them through size_t num = nextDistinct(*left_cursor); + if (isLeft(kind) && num) { /// return them with default values at right side @@ -1091,7 +1101,6 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() } else { - // LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{} ", __FILE__, __LINE__); /// skip rows in right table until we find match for current row in left table nextDistinct(*right_cursor); @@ -1106,6 +1115,7 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() /// otherwise - vice versa Chunk MergeJoinAlgorithm::createBlockWithDefaults(size_t source_num, size_t start, size_t num_rows) const { + ColumnRawPtrs cols; { const auto & columns_left = source_num == 0 ? cursors[0]->getCurrent().getColumns() : cursors[0]->sampleColumns(); @@ -1128,7 +1138,6 @@ Chunk MergeJoinAlgorithm::createBlockWithDefaults(size_t source_num, size_t star cols.push_back(col.get()); } } - Chunk result_chunk; copyColumnsResized(cols, start, num_rows, result_chunk); return result_chunk; @@ -1144,6 +1153,7 @@ Chunk MergeJoinAlgorithm::createBlockWithDefaults(size_t source_num) IMergingAlgorithm::Status MergeJoinAlgorithm::merge() { + if (!cursors[0]->cursor.isValid() && !cursors[0]->fullyCompleted()) return Status(0); @@ -1161,6 +1171,7 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge() if (cursors[0]->fullyCompleted() || cursors[1]->fullyCompleted()) { + if (!cursors[0]->fullyCompleted() && isLeftOrFull(kind)) return Status(createBlockWithDefaults(0)); diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index dbdda0b166b..c8ba857781e 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -185,28 +185,7 @@ public: class FullMergeJoinCursor : boost::noncopyable { public: - FullMergeJoinCursor( - const Block & sample_block_, - const SortDescription & description_, - bool is_asof = false) - : sample_block(sample_block_.cloneEmpty()) - , desc(description_) - { - if (desc.size() == 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty sort description for FullMergeJoinCursor"); - - if (is_asof) - { - /// For ASOF join prefix of sort description is used for equality comparison - /// and the last column is used for inequality comparison and is handled separately - - auto asof_column_description = desc.back(); - desc.pop_back(); - - chassert(asof_column_description.direction == 1 && asof_column_description.nulls_direction == 1); - asof_column_position = sample_block.getPositionByName(asof_column_description.column_name); - } - } + FullMergeJoinCursor(const Block & sample_block_, const SortDescription & description_, bool is_asof = false); bool fullyCompleted() const; void setChunk(Chunk && chunk); diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 2bd3357eff9..e3423aa0386 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -294,18 +295,25 @@ bool isStrict(ASOFJoinInequality inequality) } -class FullSortingJoinRandomized : public ::testing::Test +class FullSortingJoinTest : public ::testing::Test { public: - FullSortingJoinRandomized() = default; + FullSortingJoinTest() = default; void SetUp() override { + Poco::AutoPtr channel(new Poco::ConsoleChannel(std::cerr)); + Poco::Logger::root().setChannel(channel); + if (const char * test_log_level = std::getenv("TEST_LOG_LEVEL")) // NOLINT(concurrency-mt-unsafe) + Poco::Logger::root().setLevel(test_log_level); + else + Poco::Logger::root().setLevel("none"); + + UInt64 seed = randomSeed(); if (const char * random_seed = std::getenv("TEST_RANDOM_SEED")) // NOLINT(concurrency-mt-unsafe) seed = std::stoull(random_seed); - - std::cerr << "TEST_RANDOM_SEED=" << seed << std::endl; + std::cout << "TEST_RANDOM_SEED=" << seed << std::endl; rng = pcg64(seed); } @@ -316,7 +324,7 @@ public: pcg64 rng; }; -TEST(FullSortingJoin, AllAnyOneKey) +TEST_F(FullSortingJoinTest, AllAnyOneKey) try { { @@ -392,7 +400,7 @@ catch (Exception & e) } -TEST_F(FullSortingJoinRandomized, Any) +TEST_F(FullSortingJoinTest, AnySimple) try { JoinKind kind = getRandomFrom(rng, {JoinKind::Inner, JoinKind::Left, JoinKind::Right}); @@ -478,7 +486,7 @@ catch (Exception & e) throw; } -TEST(FullSortingJoin, Asof) +TEST_F(FullSortingJoinTest, AsofSimple) try { SourceChunksBuilder left_source({ @@ -543,7 +551,7 @@ catch (Exception & e) } -TEST_F(FullSortingJoinRandomized, AsofOnlyColumn) +TEST_F(FullSortingJoinTest, AsofOnlyColumn) try { auto left_source = oneColumnSource({ {3}, {3, 3, 3}, {3, 5, 5, 6}, {9, 9}, {10, 20} }); @@ -587,7 +595,7 @@ catch (Exception & e) throw; } -TEST_F(FullSortingJoinRandomized, AsofLessGeneratedTestData) +TEST_F(FullSortingJoinTest, AsofLessGeneratedTestData) try { auto join_kind = getRandomFrom(rng, { JoinKind::Inner, JoinKind::Left }); @@ -676,7 +684,7 @@ catch (Exception & e) throw; } -TEST_F(FullSortingJoinRandomized, AsofGreaterGeneratedTestData) +TEST_F(FullSortingJoinTest, AsofGreaterGeneratedTestData) try { auto join_kind = getRandomFrom(rng, { JoinKind::Inner, JoinKind::Left }); @@ -708,13 +716,13 @@ try String k2 = ""; UInt64 left_t = 0; - auto key_num_total = std::uniform_int_distribution<>(1, 100)(rng); + auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); for (size_t key_num = 0; key_num < key_num_total; ++key_num) { generateNextKey(rng, k1, k2); /// generate some rows with smaller left_t to check that they are not matched - size_t num_left_rows = std::bernoulli_distribution(0.5)(rng) ? std::uniform_int_distribution<>(1, 10)(rng) : 0; + size_t num_left_rows = std::bernoulli_distribution(0.5)(rng) ? std::uniform_int_distribution<>(1, 100)(rng) : 0; for (size_t i = 0; i < num_left_rows; ++i) { left_t += std::uniform_int_distribution<>(1, 10)(rng); @@ -737,8 +745,8 @@ try } /// next left_t should be greater than (or equals) right_t to match with previous rows - left_t = right_t + std::uniform_int_distribution<>(isStrict(asof_inequality) ? 1 : 0, 10)(rng); - size_t num_left_matches = std::uniform_int_distribution<>(1, 10)(rng); + left_t = right_t + std::uniform_int_distribution<>(isStrict(asof_inequality) ? 1 : 0, 100)(rng); + size_t num_left_matches = std::uniform_int_distribution<>(1, 100)(rng); for (size_t j = 0; j < num_left_matches; ++j) { left_t += std::uniform_int_distribution<>(0, 3)(rng); From a8690947c705339113266c1ec8db05695a36e472 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 28 Sep 2023 12:50:24 +0000 Subject: [PATCH 0122/2361] enable stateless tessts for full sorting asof join --- src/Interpreters/InterpreterSelectQuery.cpp | 7 ++- src/Planner/PlannerJoinTree.cpp | 6 ++- .../Transforms/MergeJoinTransform.cpp | 2 - .../00927_asof_join_correct_bt.reference | 15 +++++++ .../00927_asof_join_correct_bt.sql | 30 ++++++++----- .../00927_asof_join_long.reference | 1 + .../0_stateless/00927_asof_join_long.sql | 17 ++++++- .../00927_asof_join_noninclusive.reference | 29 ++++++++++++ .../00927_asof_join_noninclusive.sql | 5 ++- .../00927_asof_join_other_types.reference | 45 +++++++++++++++++++ .../00927_asof_join_other_types.sh | 27 ----------- .../00927_asof_join_other_types.sql.j2 | 27 +++++++++++ .../0_stateless/00927_asof_joins.reference | 15 +++++++ .../queries/0_stateless/00927_asof_joins.sql | 8 +++- .../0_stateless/00976_asof_join_on.reference | 38 ++++++++++++++++ ..._join_on.sql => 00976_asof_join_on.sql.j2} | 8 ++++ 16 files changed, 231 insertions(+), 49 deletions(-) delete mode 100755 tests/queries/0_stateless/00927_asof_join_other_types.sh create mode 100755 tests/queries/0_stateless/00927_asof_join_other_types.sql.j2 rename tests/queries/0_stateless/{00976_asof_join_on.sql => 00976_asof_join_on.sql.j2} (90%) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index b72399df2c1..a0b53ad42d1 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1726,7 +1726,10 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

getCurrentDataStream().header, join_clause.key_names_right); - if (settings.max_rows_in_set_to_optimize_join > 0 && kind_allows_filtering && has_non_const_keys) + if (settings.max_rows_in_set_to_optimize_join > 0 && join_type_allows_filtering && has_non_const_keys) { auto * left_set = add_create_set(query_plan, join_clause.key_names_left, JoinTableSide::Left); auto * right_set = add_create_set(*joined_plan, join_clause.key_names_right, JoinTableSide::Right); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index aa46f65d2d4..63acf194139 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -1484,7 +1484,9 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ { const auto & join_clause = table_join->getOnlyClause(); - bool kind_allows_filtering = isInner(join_kind) || isLeft(join_kind) || isRight(join_kind); + bool join_type_allows_filtering = (join_strictness == JoinStrictness::All || join_strictness == JoinStrictness::Any) + && (isInner(join_kind) || isLeft(join_kind) || isRight(join_kind)); + auto has_non_const = [](const Block & block, const auto & keys) { @@ -1504,7 +1506,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ bool has_non_const_keys = has_non_const(left_plan.getCurrentDataStream().header, join_clause.key_names_left) && has_non_const(right_plan.getCurrentDataStream().header, join_clause.key_names_right); - if (settings.max_rows_in_set_to_optimize_join > 0 && kind_allows_filtering && has_non_const_keys) + if (settings.max_rows_in_set_to_optimize_join > 0 && join_type_allows_filtering && has_non_const_keys) { auto * left_set = add_create_set(left_plan, join_clause.key_names_left, JoinTableSide::Left); auto * right_set = add_create_set(right_plan, join_clause.key_names_right, JoinTableSide::Right); diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 0d9eab248d8..df56ffc2871 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -456,8 +456,6 @@ MergeJoinAlgorithm::MergeJoinAlgorithm( size_t left_idx = input_headers[0].getPositionByName(left_key); size_t right_idx = input_headers[1].getPositionByName(right_key); left_to_right_key_remap[left_idx] = right_idx; - if (strictness == JoinStrictness::Asof) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm does not support ASOF joins USING"); } const auto *smjPtr = typeid_cast(table_join.get()); diff --git a/tests/queries/0_stateless/00927_asof_join_correct_bt.reference b/tests/queries/0_stateless/00927_asof_join_correct_bt.reference index bb199d0159a..a398f9604fd 100644 --- a/tests/queries/0_stateless/00927_asof_join_correct_bt.reference +++ b/tests/queries/0_stateless/00927_asof_join_correct_bt.reference @@ -13,3 +13,18 @@ 1 103 3 2 102 1 1 104 4 4 104 1 1 105 5 4 104 1 +1 101 1 0 0 0 +1 102 2 2 102 1 +1 103 3 2 102 1 +1 104 4 4 104 1 +1 105 5 4 104 1 +1 101 1 0 0 0 +1 102 2 2 102 1 +1 103 3 2 102 1 +1 104 4 4 104 1 +1 105 5 4 104 1 +1 101 1 0 0 0 +1 102 2 2 102 1 +1 103 3 2 102 1 +1 104 4 4 104 1 +1 105 5 4 104 1 diff --git a/tests/queries/0_stateless/00927_asof_join_correct_bt.sql b/tests/queries/0_stateless/00927_asof_join_correct_bt.sql index 281a81d51c0..761d6bacde6 100644 --- a/tests/queries/0_stateless/00927_asof_join_correct_bt.sql +++ b/tests/queries/0_stateless/00927_asof_join_correct_bt.sql @@ -4,20 +4,26 @@ DROP TABLE IF EXISTS B; CREATE TABLE A(k UInt32, t UInt32, a UInt64) ENGINE = MergeTree() ORDER BY (k, t); INSERT INTO A(k,t,a) VALUES (1,101,1),(1,102,2),(1,103,3),(1,104,4),(1,105,5); -CREATE TABLE B(k UInt32, t UInt32, b UInt64) ENGINE = MergeTree() ORDER BY (k, t); -INSERT INTO B(k,t,b) VALUES (1,102,2), (1,104,4); -SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (A.k, A.t); -DROP TABLE B; +CREATE TABLE B1(k UInt32, t UInt32, b UInt64) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO B1(k,t,b) VALUES (1,102,2), (1,104,4); +CREATE TABLE B2(t UInt32, k UInt32, b UInt64) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO B2(k,t,b) VALUES (1,102,2), (1,104,4); -CREATE TABLE B(t UInt32, k UInt32, b UInt64) ENGINE = MergeTree() ORDER BY (k, t); -INSERT INTO B(k,t,b) VALUES (1,102,2), (1,104,4); -SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (A.k, A.t); -DROP TABLE B; +CREATE TABLE B3(k UInt32, b UInt64, t UInt32) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO B3(k,t,b) VALUES (1,102,2), (1,104,4); -CREATE TABLE B(k UInt32, b UInt64, t UInt32) ENGINE = MergeTree() ORDER BY (k, t); -INSERT INTO B(k,t,b) VALUES (1,102,2), (1,104,4); -SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (A.k, A.t); -DROP TABLE B; +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B1 B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B2 B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B3 B USING(k,t) ORDER BY (A.k, A.t); + +SET join_algorithm = 'full_sorting_merge'; +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B1 B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B2 B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B3 B USING(k,t) ORDER BY (A.k, A.t); + +DROP TABLE B1; +DROP TABLE B2; +DROP TABLE B3; DROP TABLE A; diff --git a/tests/queries/0_stateless/00927_asof_join_long.reference b/tests/queries/0_stateless/00927_asof_join_long.reference index d4f015c68e4..ec40d2bc463 100644 --- a/tests/queries/0_stateless/00927_asof_join_long.reference +++ b/tests/queries/0_stateless/00927_asof_join_long.reference @@ -1 +1,2 @@ 3000000 +3000000 diff --git a/tests/queries/0_stateless/00927_asof_join_long.sql b/tests/queries/0_stateless/00927_asof_join_long.sql index c03a06d48d4..7a73875e93e 100644 --- a/tests/queries/0_stateless/00927_asof_join_long.sql +++ b/tests/queries/0_stateless/00927_asof_join_long.sql @@ -2,15 +2,28 @@ DROP TABLE IF EXISTS tvs; +-- to use different algorithms for in subquery +SET allow_experimental_analyzer = 1; + CREATE TABLE tvs(k UInt32, t UInt32, tv UInt64) ENGINE = Memory; INSERT INTO tvs(k,t,tv) SELECT k, t, t FROM (SELECT toUInt32(number) AS k FROM numbers(1000)) keys -CROSS JOIN (SELECT toUInt32(number * 3) as t FROM numbers(10000)) tv_times; +CROSS JOIN (SELECT toUInt32(number * 3) as t FROM numbers(10000)) tv_times +SETTINGS join_algorithm = 'hash'; SELECT SUM(trades.price - tvs.tv) FROM (SELECT k, t, t as price FROM (SELECT toUInt32(number) AS k FROM numbers(1000)) keys - CROSS JOIN (SELECT toUInt32(number * 10) AS t FROM numbers(3000)) trade_times) trades + CROSS JOIN (SELECT toUInt32(number * 10) AS t FROM numbers(3000)) trade_times + SETTINGS join_algorithm = 'hash') trades ASOF LEFT JOIN tvs USING(k,t); +SELECT SUM(trades.price - tvs.tv) FROM +(SELECT k, t, t as price + FROM (SELECT toUInt32(number) AS k FROM numbers(1000)) keys + CROSS JOIN (SELECT toUInt32(number * 10) AS t FROM numbers(3000)) trade_times + SETTINGS join_algorithm = 'hash') trades +ASOF LEFT JOIN tvs USING(k,t) +SETTINGS join_algorithm = 'full_sorting_merge'; + DROP TABLE tvs; diff --git a/tests/queries/0_stateless/00927_asof_join_noninclusive.reference b/tests/queries/0_stateless/00927_asof_join_noninclusive.reference index fe2844a2a43..d856372fb4a 100644 --- a/tests/queries/0_stateless/00927_asof_join_noninclusive.reference +++ b/tests/queries/0_stateless/00927_asof_join_noninclusive.reference @@ -27,3 +27,32 @@ 2 1970-01-01 00:00:03 3 3 1970-01-01 00:00:03 2 2 1970-01-01 00:00:04 4 3 1970-01-01 00:00:03 2 2 1970-01-01 00:00:05 5 3 1970-01-01 00:00:03 2 +1 1970-01-01 00:00:01 1 0 1970-01-01 00:00:00 0 +1 1970-01-01 00:00:02 2 2 1970-01-01 00:00:02 1 +1 1970-01-01 00:00:03 3 2 1970-01-01 00:00:02 1 +1 1970-01-01 00:00:04 4 4 1970-01-01 00:00:04 1 +1 1970-01-01 00:00:05 5 4 1970-01-01 00:00:04 1 +2 1970-01-01 00:00:01 1 0 1970-01-01 00:00:00 0 +2 1970-01-01 00:00:02 2 0 1970-01-01 00:00:00 0 +2 1970-01-01 00:00:03 3 3 1970-01-01 00:00:03 2 +2 1970-01-01 00:00:04 4 3 1970-01-01 00:00:03 2 +2 1970-01-01 00:00:05 5 3 1970-01-01 00:00:03 2 +3 1970-01-01 00:00:01 1 0 1970-01-01 00:00:00 0 +3 1970-01-01 00:00:02 2 0 1970-01-01 00:00:00 0 +3 1970-01-01 00:00:03 3 0 1970-01-01 00:00:00 0 +3 1970-01-01 00:00:04 4 0 1970-01-01 00:00:00 0 +3 1970-01-01 00:00:05 5 0 1970-01-01 00:00:00 0 +1 1970-01-01 00:00:02 2 2 1970-01-01 00:00:02 1 +1 1970-01-01 00:00:03 3 2 1970-01-01 00:00:02 1 +1 1970-01-01 00:00:04 4 4 1970-01-01 00:00:04 1 +1 1970-01-01 00:00:05 5 4 1970-01-01 00:00:04 1 +2 1970-01-01 00:00:03 3 3 1970-01-01 00:00:03 2 +2 1970-01-01 00:00:04 4 3 1970-01-01 00:00:03 2 +2 1970-01-01 00:00:05 5 3 1970-01-01 00:00:03 2 +1 1970-01-01 00:00:02 2 2 1970-01-01 00:00:02 1 +1 1970-01-01 00:00:03 3 2 1970-01-01 00:00:02 1 +1 1970-01-01 00:00:04 4 4 1970-01-01 00:00:04 1 +1 1970-01-01 00:00:05 5 4 1970-01-01 00:00:04 1 +2 1970-01-01 00:00:03 3 3 1970-01-01 00:00:03 2 +2 1970-01-01 00:00:04 4 3 1970-01-01 00:00:03 2 +2 1970-01-01 00:00:05 5 3 1970-01-01 00:00:03 2 diff --git a/tests/queries/0_stateless/00927_asof_join_noninclusive.sql b/tests/queries/0_stateless/00927_asof_join_noninclusive.sql index 5f15f3b593d..3cc99df4462 100644 --- a/tests/queries/0_stateless/00927_asof_join_noninclusive.sql +++ b/tests/queries/0_stateless/00927_asof_join_noninclusive.sql @@ -11,9 +11,12 @@ INSERT INTO B(k,t,b) VALUES (1,2,2),(1,4,4); INSERT INTO B(k,t,b) VALUES (2,3,3); SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (A.k, A.t); - SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF INNER JOIN B ON A.k == B.k AND A.t >= B.t ORDER BY (A.k, A.t); +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF JOIN B USING(k,t) ORDER BY (A.k, A.t); +SET join_algorithm = 'full_sorting_merge'; +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF INNER JOIN B ON A.k == B.k AND A.t >= B.t ORDER BY (A.k, A.t); SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF JOIN B USING(k,t) ORDER BY (A.k, A.t); DROP TABLE A; diff --git a/tests/queries/0_stateless/00927_asof_join_other_types.reference b/tests/queries/0_stateless/00927_asof_join_other_types.reference index 80c85ec1ae3..ddbc24ff925 100644 --- a/tests/queries/0_stateless/00927_asof_join_other_types.reference +++ b/tests/queries/0_stateless/00927_asof_join_other_types.reference @@ -1,27 +1,72 @@ +- 2 1 1 0 2 3 3 3 2 5 5 3 +- 2 1 1 0 2 3 3 3 2 5 5 3 +- 2 1 1 0 2 3 3 3 2 5 5 3 +- 2 1 1 0 2 3 3 3 2 5 5 3 +- +2 1 1 0 +2 3 3 3 +2 5 5 3 +- +2 1 1 0 +2 3 3 3 +2 5 5 3 +- +2 1 1 0 +2 3 3 3 +2 5 5 3 +- +2 1 1 0 +2 3 3 3 +2 5 5 3 +- 2 1970-01-01 02:00:01 1 0 2 1970-01-01 02:00:03 3 3 2 1970-01-01 02:00:05 5 3 +- +2 1970-01-01 02:00:01 1 0 +2 1970-01-01 02:00:03 3 3 +2 1970-01-01 02:00:05 5 3 +- 2 1 1 0 2 3 3 3 2 5 5 3 +- 2 1 1 0 2 3 3 3 2 5 5 3 +- 2 1 1 0 2 3 3 3 2 5 5 3 +- +2 1 1 0 +2 3 3 3 +2 5 5 3 +- +2 1 1 0 +2 3 3 3 +2 5 5 3 +- +2 1 1 0 +2 3 3 3 +2 5 5 3 +- +2 1970-01-01 02:00:00.001 1 0 +2 1970-01-01 02:00:00.003 3 3 +2 1970-01-01 02:00:00.005 5 3 +- 2 1970-01-01 02:00:00.001 1 0 2 1970-01-01 02:00:00.003 3 3 2 1970-01-01 02:00:00.005 5 3 diff --git a/tests/queries/0_stateless/00927_asof_join_other_types.sh b/tests/queries/0_stateless/00927_asof_join_other_types.sh deleted file mode 100755 index 10173a3e43f..00000000000 --- a/tests/queries/0_stateless/00927_asof_join_other_types.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -set -e - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - -for typename in "UInt32" "UInt64" "Float64" "Float32" "DateTime('Asia/Istanbul')" "Decimal32(5)" "Decimal64(5)" "Decimal128(5)" "DateTime64(3, 'Asia/Istanbul')" -do - $CLICKHOUSE_CLIENT -mn <= B.t ORDER BY (A.a, A.t); SELECT count() FROM A ASOF LEFT JOIN B ON A.a == B.b AND B.t <= A.t; SELECT A.a, A.t, B.b, B.t FROM A ASOF INNER JOIN B ON B.t <= A.t AND A.a == B.b ORDER BY (A.a, A.t); @@ -28,5 +34,7 @@ ASOF INNER JOIN (SELECT * FROM B UNION ALL SELECT 1, 3) AS B ON B.t <= A.t AND A WHERE B.t != 3 ORDER BY (A.a, A.t) ; +{% endfor %} + DROP TABLE A; DROP TABLE B; From 256ad60115ddd1eb8c8a5597478e057926fe30f6 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 28 Sep 2023 12:52:20 +0000 Subject: [PATCH 0123/2361] fix style, clang tidy --- src/Planner/PlannerJoinTree.cpp | 8 +-- .../Transforms/MergeJoinTransform.cpp | 59 +++++++++------- .../Transforms/MergeJoinTransform.h | 2 +- .../tests/gtest_full_sorting_join.cpp | 27 +------- .../00927_asof_join_other_types.sql.j2 | 0 .../01116_asof_join_dolbyzerr.reference | 3 + .../0_stateless/01116_asof_join_dolbyzerr.sql | 14 ++++ .../02240_asof_join_biginteger.reference | 4 ++ .../02240_asof_join_biginteger.sql | 8 +++ ...n_in_left_table_clause_asof_join.reference | 1 + ...unction_in_left_table_clause_asof_join.sql | 12 ++++ .../03143_asof_join_ddb_long.reference | 2 + .../0_stateless/03143_asof_join_ddb_long.sql | 48 +++++++++++++ .../03144_asof_join_ddb_doubles.reference | 58 ++++++++++++++++ .../03144_asof_join_ddb_doubles.sql | 64 +++++++++++++++++ ...03145_asof_join_ddb_inequalities.reference | 68 +++++++++++++++++++ .../03145_asof_join_ddb_inequalities.sql | 63 +++++++++++++++++ .../03146_asof_join_ddb_merge_long.reference | 2 + .../03146_asof_join_ddb_merge_long.sql.j2 | 37 ++++++++++ 19 files changed, 424 insertions(+), 56 deletions(-) mode change 100755 => 100644 tests/queries/0_stateless/00927_asof_join_other_types.sql.j2 create mode 100644 tests/queries/0_stateless/03143_asof_join_ddb_long.reference create mode 100644 tests/queries/0_stateless/03143_asof_join_ddb_long.sql create mode 100644 tests/queries/0_stateless/03144_asof_join_ddb_doubles.reference create mode 100644 tests/queries/0_stateless/03144_asof_join_ddb_doubles.sql create mode 100644 tests/queries/0_stateless/03145_asof_join_ddb_inequalities.reference create mode 100644 tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql create mode 100644 tests/queries/0_stateless/03146_asof_join_ddb_merge_long.reference create mode 100644 tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 63acf194139..a9d9e11f458 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -75,7 +75,6 @@ namespace ErrorCodes extern const int INVALID_JOIN_ON_EXPRESSION; extern const int LOGICAL_ERROR; extern const int NOT_IMPLEMENTED; - extern const int SYNTAX_ERROR; extern const int ACCESS_DENIED; extern const int PARAMETER_OUT_OF_BOUND; extern const int TOO_MANY_COLUMNS; @@ -1357,12 +1356,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ { if (!join_clause.hasASOF()) throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, - "JOIN {} no inequality in ASOF JOIN ON section.", - join_node.formatASTForErrorMessage()); - - if (table_join_clause.key_names_left.size() <= 1) - throw Exception(ErrorCodes::SYNTAX_ERROR, - "JOIN {} ASOF join needs at least one equi-join column", + "JOIN {} no inequality in ASOF JOIN ON section", join_node.formatASTForErrorMessage()); } diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index df56ffc2871..26e1ebb0b60 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -43,6 +43,13 @@ FullMergeJoinCursorPtr createCursor(const Block & block, const Names & columns, return std::make_unique(materializeBlock(block), desc, strictness == JoinStrictness::Asof); } +bool isNullAt(const IColumn & column, size_t row) +{ + if (const auto * nullable = checkAndGetColumn(column)) + return nullable->isNullAt(row); + return false; +} + template int nullableCompareAt(const IColumn & left_column, const IColumn & right_column, size_t lhs_pos, size_t rhs_pos, int null_direction_hint) { @@ -54,7 +61,7 @@ int nullableCompareAt(const IColumn & left_column, const IColumn & right_column, if (left_nullable && right_nullable) { int res = left_nullable->compareAt(lhs_pos, rhs_pos, right_column, null_direction_hint); - if (res) + if (res != 0) return res; /// NULL != NULL case @@ -108,9 +115,9 @@ int ALWAYS_INLINE compareCursors(const SortCursorImpl & lhs, const SortCursorImp return compareCursors(lhs, lhs.getRow(), rhs, rhs.getRow(), lhs.sort_columns_size, null_direction_hint); } -int compareAsofCursors(const FullMergeJoinCursor & lhs, const FullMergeJoinCursor & rhs) +int compareAsofCursors(const FullMergeJoinCursor & lhs, const FullMergeJoinCursor & rhs, int null_direction_hint) { - return nullableCompareAt(*lhs.getAsofColumn(), *rhs.getAsofColumn(), lhs->getRow(), rhs->getRow()); + return nullableCompareAt(*lhs.getAsofColumn(), *rhs.getAsofColumn(), lhs->getRow(), rhs->getRow(), null_direction_hint); } bool ALWAYS_INLINE totallyLess(SortCursorImpl & lhs, SortCursorImpl & rhs, int null_direction_hint) @@ -235,12 +242,6 @@ void inline addRange(PaddedPODArray & values, UInt64 start, UInt64 end) values.push_back(i); } -void inline addMany(PaddedPODArray & left_or_right_map, size_t idx, size_t num) -{ - for (size_t i = 0; i < num; ++i) - left_or_right_map.push_back(idx); -} - void inline addMany(PaddedPODArray & values, UInt64 value, size_t num) { values.resize_fill(values.size() + num, value); @@ -257,7 +258,7 @@ JoinKeyRow::JoinKeyRow(const FullMergeJoinCursor & cursor, size_t pos) new_col->insertFrom(*col, pos); row.push_back(std::move(new_col)); } - if (auto asof_column = cursor.getAsofColumn()) + if (const auto * asof_column = cursor.getAsofColumn()) { auto new_col = asof_column->cloneEmpty(); new_col->insertFrom(*asof_column, pos); @@ -275,10 +276,10 @@ bool JoinKeyRow::equals(const FullMergeJoinCursor & cursor) const if (row.empty()) return false; - assert(this->row.size() == cursor->sort_columns_size); for (size_t i = 0; i < cursor->sort_columns_size; ++i) { - int cmp = this->row[i]->compareAt(0, cursor->getRow(), *(cursor->sort_columns[i]), cursor->desc[i].nulls_direction); + // int cmp = this->row[i]->compareAt(0, cursor->getRow(), *(cursor->sort_columns[i]), cursor->desc[i].nulls_direction); + int cmp = nullableCompareAt(*this->row[i], *cursor->sort_columns[i], 0, cursor->getRow(), cursor->desc[i].nulls_direction); if (cmp != 0) return false; } @@ -287,9 +288,16 @@ bool JoinKeyRow::equals(const FullMergeJoinCursor & cursor) const bool JoinKeyRow::asofMatch(const FullMergeJoinCursor & cursor, ASOFJoinInequality asof_inequality) const { + chassert(this->row.size() == cursor->sort_columns_size + 1); if (!equals(cursor)) return false; - int cmp = cursor.getAsofColumn()->compareAt(cursor->getRow(), 0, *row.back(), 1); + + const auto & asof_row = row.back(); + if (isNullAt(*asof_row, 0) || isNullAt(*cursor.getAsofColumn(), cursor->getRow())) + return false; + + int cmp = cursor.getAsofColumn()->compareAt(cursor->getRow(), 0, *asof_row, 1); + return (asof_inequality == ASOFJoinInequality::Less && cmp < 0) || (asof_inequality == ASOFJoinInequality::LessOrEquals && cmp <= 0) || (asof_inequality == ASOFJoinInequality::Greater && cmp > 0) @@ -333,7 +341,7 @@ FullMergeJoinCursor::FullMergeJoinCursor(const Block & sample_block_, const Sort : sample_block(sample_block_.cloneEmpty()) , desc(description_) { - if (desc.size() == 0) + if (desc.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty sort description for FullMergeJoinCursor"); if (is_asof) @@ -393,7 +401,7 @@ String FullMergeJoinCursor::dump() const row_dump.push_back(val.dump()); } - if (auto * asof_column = getAsofColumn()) + if (const auto * asof_column = getAsofColumn()) { asof_column->get(cursor.getRow(), val); row_dump.push_back(val.dump()); @@ -436,9 +444,10 @@ MergeJoinAlgorithm::MergeJoinAlgorithm( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeJoinAlgorithm does not support ON filter conditions"); cursors = { - createCursor(input_headers[0], on_clause_.key_names_left, strictness), - createCursor(input_headers[1], on_clause_.key_names_right, strictness), + createCursor(input_headers[0], on_clause_.key_names_left, strictness), + createCursor(input_headers[1], on_clause_.key_names_right, strictness), }; +} MergeJoinAlgorithm::MergeJoinAlgorithm( JoinPtr join_ptr, @@ -458,7 +467,7 @@ MergeJoinAlgorithm::MergeJoinAlgorithm( left_to_right_key_remap[left_idx] = right_idx; } - const auto *smjPtr = typeid_cast(table_join.get()); + const auto *smjPtr = typeid_cast(join_ptr.get()); if (smjPtr) { null_direction_hint = smjPtr->getNullDirection(); @@ -993,11 +1002,18 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() { auto lpos = left_cursor->getRow(); auto rpos = right_cursor->getRow(); - auto cmp = compareCursors(*left_cursor, *right_cursor); + auto cmp = compareCursors(*left_cursor, *right_cursor, null_direction_hint); + if (cmp == 0) + { + if (isNullAt(*left_cursor.getAsofColumn(), lpos)) + cmp = -1; + if (isNullAt(*right_cursor.getAsofColumn(), rpos)) + cmp = 1; + } if (cmp == 0) { - auto asof_cmp = compareAsofCursors(left_cursor, right_cursor); + auto asof_cmp = compareAsofCursors(left_cursor, right_cursor, null_direction_hint); if ((asof_inequality == ASOFJoinInequality::Less && asof_cmp <= -1) || (asof_inequality == ASOFJoinInequality::LessOrEquals && asof_cmp <= 0)) @@ -1048,7 +1064,6 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() asof_join_state.reset(); if (isLeft(kind)) { - /// return row with default values at right side size_t i = 0; for (const auto & col : left_columns) @@ -1068,7 +1083,6 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() { if (asof_join_state.hasMatch(left_cursor, asof_inequality)) { - size_t i = 0; for (const auto & col : left_columns) result_cols[i++]->insertFrom(*col, lpos); @@ -1099,7 +1113,6 @@ MergeJoinAlgorithm::Status MergeJoinAlgorithm::asofJoin() } else { - /// skip rows in right table until we find match for current row in left table nextDistinct(*right_cursor); } diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index c8ba857781e..15bf13381b8 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -167,7 +167,7 @@ public: void set(const FullMergeJoinCursor & rcursor, size_t rpos); void reset(); - bool hasMatch(const FullMergeJoinCursor & cursor, ASOFJoinInequality asof_inequality) + bool hasMatch(const FullMergeJoinCursor & cursor, ASOFJoinInequality asof_inequality) const { if (value.empty()) return false; diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index e3423aa0386..a3fda006eb8 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -3,6 +3,8 @@ #include #include #include +#include +#include #include #include @@ -30,31 +32,6 @@ using namespace DB; namespace { -[[ maybe_unused ]] -String dumpBlockSource(std::shared_ptr source, bool mono_block = false) -{ - WriteBufferFromOwnString buf; - { - Block header = source->getPort().getHeader(); - QueryPipeline pipeline(source); - auto format = std::make_shared(buf, header, FormatSettings{}, mono_block); - pipeline.complete(std::move(format)); - - CompletedPipelineExecutor executor(pipeline); - executor.execute(); - } - return buf.str(); -} - -[[ maybe_unused ]] -String dumpBlock(const Block & block) -{ - Block header = block.cloneEmpty(); - Chunk data(block.getColumns(), block.rows()); - auto source = std::make_shared(header, std::move(data)); - return dumpBlockSource(std::move(source)); -} - QueryPipeline buildJoinPipeline( std::shared_ptr left_source, std::shared_ptr right_source, diff --git a/tests/queries/0_stateless/00927_asof_join_other_types.sql.j2 b/tests/queries/0_stateless/00927_asof_join_other_types.sql.j2 old mode 100755 new mode 100644 diff --git a/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference index 1055a67ea5b..0aa1a85f19d 100644 --- a/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference +++ b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.reference @@ -1,3 +1,6 @@ v1 o1 ['s2','s1'] v1 o2 ['s4'] v2 o3 ['s5','s3'] +v1 o1 ['s2','s1'] +v1 o2 ['s4'] +v2 o3 ['s5','s3'] diff --git a/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql index 8a94b6ddd24..652cb35cf2a 100644 --- a/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql +++ b/tests/queries/0_stateless/01116_asof_join_dolbyzerr.sql @@ -16,3 +16,17 @@ GROUP BY ORDER BY visitorId ASC, orderId ASC; + +SELECT + visitorId, + orderId, + groupUniqArray(sessionId) +FROM sessions +ASOF INNER JOIN orders ON (sessions.visitorId = orders.visitorId) AND (sessions.date <= orders.date) +GROUP BY + visitorId, + orderId +ORDER BY + visitorId ASC, + orderId ASC +SETTINGS join_algorithm = 'full_sorting_merge'; diff --git a/tests/queries/0_stateless/02240_asof_join_biginteger.reference b/tests/queries/0_stateless/02240_asof_join_biginteger.reference index cac55eec430..f7eb4d74375 100644 --- a/tests/queries/0_stateless/02240_asof_join_biginteger.reference +++ b/tests/queries/0_stateless/02240_asof_join_biginteger.reference @@ -2,3 +2,7 @@ 0 340282366920938463463374607431768211457 0 18446744073709551617 0 340282366920938463463374607431768211457 +0 18446744073709551617 +0 340282366920938463463374607431768211457 +0 18446744073709551617 +0 340282366920938463463374607431768211457 diff --git a/tests/queries/0_stateless/02240_asof_join_biginteger.sql b/tests/queries/0_stateless/02240_asof_join_biginteger.sql index 6dc5b00f116..a5c1faae4ea 100644 --- a/tests/queries/0_stateless/02240_asof_join_biginteger.sql +++ b/tests/queries/0_stateless/02240_asof_join_biginteger.sql @@ -3,3 +3,11 @@ select * from (select 0 as k, toInt256('340282366920938463463374607431768211457' select * from (select 0 as k, toUInt128('18446744073709551617') as v) t1 asof join (select 0 as k, toUInt128('18446744073709551616') as v) t2 using(k, v); select * from (select 0 as k, toUInt256('340282366920938463463374607431768211457') as v) t1 asof join (select 0 as k, toUInt256('340282366920938463463374607431768211456') as v) t2 using(k, v); + +SET join_algorithm = 'full_sorting_merge'; + +select * from (select 0 as k, toInt128('18446744073709551617') as v) t1 asof join (select 0 as k, toInt128('18446744073709551616') as v) t2 using(k, v); +select * from (select 0 as k, toInt256('340282366920938463463374607431768211457') as v) t1 asof join (select 0 as k, toInt256('340282366920938463463374607431768211456') as v) t2 using(k, v); + +select * from (select 0 as k, toUInt128('18446744073709551617') as v) t1 asof join (select 0 as k, toUInt128('18446744073709551616') as v) t2 using(k, v); +select * from (select 0 as k, toUInt256('340282366920938463463374607431768211457') as v) t1 asof join (select 0 as k, toUInt256('340282366920938463463374607431768211456') as v) t2 using(k, v); diff --git a/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.reference b/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.reference index d00491fd7e5..6ed281c757a 100644 --- a/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.reference +++ b/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.reference @@ -1 +1,2 @@ 1 +1 diff --git a/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.sql b/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.sql index 13dfb5debe7..6aa70a379c1 100644 --- a/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.sql +++ b/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.sql @@ -6,3 +6,15 @@ ASOF LEFT JOIN ( select 1 as session_id, 4 as id ) as visitors ON visitors.session_id <= sessions.id AND arrayFirst(a -> a, arrayMap((a) -> a, sessions.arr)) = visitors.id +; + +select count(*) +from ( + select 1 as id, [1, 2, 3] as arr +) as sessions +ASOF LEFT JOIN ( + select 1 as session_id, 4 as id +) as visitors +ON visitors.session_id <= sessions.id AND arrayFirst(a -> a, arrayMap((a) -> a, sessions.arr)) = visitors.id +SETTINGS join_algorithm = 'full_sorting_merge' +; diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.reference b/tests/queries/0_stateless/03143_asof_join_ddb_long.reference new file mode 100644 index 00000000000..2850a8aba98 --- /dev/null +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.reference @@ -0,0 +1,2 @@ +49999983751397 10000032 +49999983751397 10000032 diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql new file mode 100644 index 00000000000..c421702bb00 --- /dev/null +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql @@ -0,0 +1,48 @@ +-- Tags: long + +DROP TABLE IF EXISTS build; +DROP TABLE IF EXISTS skewed_probe; + +CREATE TABLE build ENGINE = MergeTree ORDER BY (key, begin) +AS + SELECT + toDateTime('1990-03-21 13:00:00') + INTERVAL number MINUTE AS begin, + number % 4 AS key, + number AS value + FROM numbers(0, 10000000); + +CREATE TABLE skewed_probe ENGINE = MergeTree ORDER BY (key, begin) +AS + SELECT + toDateTime('1990-04-21 13:00:01') + INTERVAL number MINUTE AS begin, + 0 AS key + FROM numbers(0, 5) + UNION ALL + SELECT + toDateTime('1990-05-21 13:00:01') + INTERVAL number MINUTE AS begin, + 1 AS key + FROM numbers(0, 10) + UNION ALL + SELECT + toDateTime('1990-06-21 13:00:01') + INTERVAL number MINUTE AS begin, + 2 AS key + FROM numbers(0, 20) + UNION ALL + SELECT + toDateTime('1990-03-21 13:00:01') + INTERVAL number MINUTE AS begin, + 3 AS key + FROM numbers(0, 10000000); + + +SELECT SUM(value), COUNT(*) +FROM skewed_probe +ASOF JOIN build +USING (key, begin) +; + +SELECT SUM(value), COUNT(*) +FROM skewed_probe +ASOF JOIN build +USING (key, begin) +SETTINGS join_algorithm = 'full_sorting_merge' +; diff --git a/tests/queries/0_stateless/03144_asof_join_ddb_doubles.reference b/tests/queries/0_stateless/03144_asof_join_ddb_doubles.reference new file mode 100644 index 00000000000..f130f0a3f3b --- /dev/null +++ b/tests/queries/0_stateless/03144_asof_join_ddb_doubles.reference @@ -0,0 +1,58 @@ +1 0 +2 0 +3 1 +4 1 +5 1 +6 2 +7 2 +8 3 +9 3 +0 0 +1 0 +2 0 +3 1 +4 1 +5 1 +6 2 +7 2 +8 3 +9 3 +1 1 0 +1 2 0 +1 3 1 +1 4 1 +1 5 1 +1 6 2 +1 7 2 +1 8 3 +1 9 3 +2 0 10 +2 1 10 +2 2 10 +2 3 10 +2 4 10 +2 5 10 +2 6 10 +2 7 20 +2 8 20 +2 9 20 +1 0 0 +1 1 0 +1 2 0 +1 3 1 +1 4 1 +1 5 1 +1 6 2 +1 7 2 +1 8 3 +1 9 3 +2 0 10 +2 1 10 +2 2 10 +2 3 10 +2 4 10 +2 5 10 +2 6 10 +2 7 20 +2 8 20 +2 9 20 diff --git a/tests/queries/0_stateless/03144_asof_join_ddb_doubles.sql b/tests/queries/0_stateless/03144_asof_join_ddb_doubles.sql new file mode 100644 index 00000000000..ef16ced3082 --- /dev/null +++ b/tests/queries/0_stateless/03144_asof_join_ddb_doubles.sql @@ -0,0 +1,64 @@ +SET join_algorithm = 'full_sorting_merge'; + +DROP TABLE IF EXISTS events0; + +CREATE TABLE events0 ( + begin Float64, + value Int32 +) ENGINE = MergeTree ORDER BY begin; + +INSERT INTO events0 VALUES (1.0, 0), (3.0, 1), (6.0, 2), (8.0, 3); + +SELECT p.ts, e.value +FROM + (SELECT number :: Float64 AS ts FROM numbers(10)) p +ASOF JOIN events0 e +ON p.ts >= e.begin +ORDER BY p.ts ASC; + +SELECT p.ts, e.value +FROM + (SELECT number :: Float64 AS ts FROM numbers(10)) p +ASOF LEFT JOIN events0 e +ON p.ts >= e.begin +ORDER BY p.ts ASC +-- SETTINGS join_use_nulls = 1 +; + +DROP TABLE IF EXISTS events0; + +DROP TABLE IF EXISTS events; +DROP TABLE IF EXISTS probes; + +CREATE TABLE events ( + key Int32, + begin Float64, + value Int32 +) ENGINE = MergeTree ORDER BY (key, begin); + +INSERT INTO events VALUES (1, 1.0, 0), (1, 3.0, 1), (1, 6.0, 2), (1, 8.0, 3), (2, 0.0, 10), (2, 7.0, 20), (2, 11.0, 30); + +CREATE TABLE probes ( + key Int32, + ts Float64 +) ENGINE = MergeTree ORDER BY (key, ts) AS +SELECT + key.number, + ts.number +FROM + numbers(1, 2) as key, + numbers(10) as ts +SETTINGS join_algorithm = 'hash'; + +SELECT p.key, p.ts, e.value +FROM probes p +ASOF JOIN events e +ON p.key = e.key AND p.ts >= e.begin +ORDER BY p.key, p.ts ASC; + +SELECT p.key, p.ts, e.value +FROM probes p +ASOF LEFT JOIN events e +ON p.key = e.key AND p.ts >= e.begin +ORDER BY p.key, p.ts ASC NULLS FIRST; + diff --git a/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.reference b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.reference new file mode 100644 index 00000000000..73c4f7dfe25 --- /dev/null +++ b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.reference @@ -0,0 +1,68 @@ +2023-03-21 12:00:00 1970-01-01 00:00:00 -1 +2023-03-21 13:00:00 1970-01-01 00:00:00 -1 +2023-03-21 14:00:00 2023-03-21 13:00:00 0 +2023-03-21 15:00:00 2023-03-21 14:00:00 1 +2023-03-21 16:00:00 2023-03-21 15:00:00 2 +2023-03-21 17:00:00 2023-03-21 16:00:00 3 +2023-03-21 18:00:00 2023-03-21 16:00:00 3 +2023-03-21 19:00:00 2023-03-21 16:00:00 3 +2023-03-21 20:00:00 2023-03-21 16:00:00 3 +2023-03-21 21:00:00 2023-03-21 16:00:00 3 +2027-10-18 11:03:27 2023-03-21 16:00:00 3 +2023-03-21 12:00:00 1970-01-01 00:00:00 -1 +2023-03-21 13:00:00 1970-01-01 00:00:00 -1 +2023-03-21 14:00:00 2023-03-21 13:00:00 0 +2023-03-21 15:00:00 2023-03-21 14:00:00 1 +2023-03-21 16:00:00 2023-03-21 15:00:00 2 +2023-03-21 17:00:00 2023-03-21 16:00:00 3 +2023-03-21 18:00:00 2023-03-21 16:00:00 3 +2023-03-21 19:00:00 2023-03-21 16:00:00 3 +2023-03-21 20:00:00 2023-03-21 16:00:00 3 +2023-03-21 21:00:00 2023-03-21 16:00:00 3 +2027-10-18 11:03:27 2023-03-21 16:00:00 3 +\N \N \N +2023-03-21 12:00:00 2023-03-21 13:00:00 0 +2023-03-21 13:00:00 2023-03-21 13:00:00 0 +2023-03-21 14:00:00 2023-03-21 14:00:00 1 +2023-03-21 15:00:00 2023-03-21 15:00:00 2 +2023-03-21 16:00:00 2023-03-21 16:00:00 3 +2023-03-21 17:00:00 2027-10-18 11:03:27 9 +2023-03-21 18:00:00 2027-10-18 11:03:27 9 +2023-03-21 19:00:00 2027-10-18 11:03:27 9 +2023-03-21 20:00:00 2027-10-18 11:03:27 9 +2023-03-21 21:00:00 2027-10-18 11:03:27 9 +2027-10-18 11:03:27 2027-10-18 11:03:27 9 +2023-03-21 12:00:00 2023-03-21 13:00:00 0 +2023-03-21 13:00:00 2023-03-21 13:00:00 0 +2023-03-21 14:00:00 2023-03-21 14:00:00 1 +2023-03-21 15:00:00 2023-03-21 15:00:00 2 +2023-03-21 16:00:00 2023-03-21 16:00:00 3 +2023-03-21 17:00:00 2027-10-18 11:03:27 9 +2023-03-21 18:00:00 2027-10-18 11:03:27 9 +2023-03-21 19:00:00 2027-10-18 11:03:27 9 +2023-03-21 20:00:00 2027-10-18 11:03:27 9 +2023-03-21 21:00:00 2027-10-18 11:03:27 9 +2027-10-18 11:03:27 2027-10-18 11:03:27 9 +\N \N \N +2023-03-21 12:00:00 2023-03-21 13:00:00 0 +2023-03-21 13:00:00 2023-03-21 14:00:00 1 +2023-03-21 14:00:00 2023-03-21 15:00:00 2 +2023-03-21 15:00:00 2023-03-21 16:00:00 3 +2023-03-21 16:00:00 2027-10-18 11:03:27 9 +2023-03-21 17:00:00 2027-10-18 11:03:27 9 +2023-03-21 18:00:00 2027-10-18 11:03:27 9 +2023-03-21 19:00:00 2027-10-18 11:03:27 9 +2023-03-21 20:00:00 2027-10-18 11:03:27 9 +2023-03-21 21:00:00 2027-10-18 11:03:27 9 +2023-03-21 12:00:00 2023-03-21 13:00:00 0 +2023-03-21 13:00:00 2023-03-21 14:00:00 1 +2023-03-21 14:00:00 2023-03-21 15:00:00 2 +2023-03-21 15:00:00 2023-03-21 16:00:00 3 +2023-03-21 16:00:00 2027-10-18 11:03:27 9 +2023-03-21 17:00:00 2027-10-18 11:03:27 9 +2023-03-21 18:00:00 2027-10-18 11:03:27 9 +2023-03-21 19:00:00 2027-10-18 11:03:27 9 +2023-03-21 20:00:00 2027-10-18 11:03:27 9 +2023-03-21 21:00:00 2027-10-18 11:03:27 9 +2027-10-18 11:03:27 \N \N +\N \N \N diff --git a/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql new file mode 100644 index 00000000000..69de17541c1 --- /dev/null +++ b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS events0; +DROP TABLE IF EXISTS probe0; + +SET join_algorithm = 'full_sorting_merge'; + +CREATE TABLE events0 ( + begin Nullable(DateTime('UTC')), + value Int32 +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO events0 SELECT toDateTime('2023-03-21 13:00:00', 'UTC') + INTERVAL number HOUR, number FROM numbers(4); +INSERT INTO events0 VALUES (NULL, -10),('0000-01-01 00:00:00', -1), ('9999-12-31 23:59:59', 9); + +CREATE TABLE probe0 ( + begin Nullable(DateTime('UTC')) +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO probe0 SELECT toDateTime('2023-03-21 12:00:00', 'UTC') + INTERVAl number HOUR FROM numbers(10); +INSERT INTO probe0 VALUES (NULL),('9999-12-31 23:59:59'); + +SET join_use_nulls = 1; + +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF JOIN events0 e +ON p.begin > e.begin +ORDER BY p.begin ASC; + +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF LEFT JOIN events0 e +ON p.begin > e.begin +ORDER BY p.begin ASC +; + +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF JOIN events0 e +ON p.begin <= e.begin +ORDER BY p.begin ASC; + +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF LEFT JOIN events0 e +ON p.begin <= e.begin +ORDER BY p.begin ASC; + +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF JOIN events0 e +ON p.begin < e.begin +ORDER BY p.begin ASC +; + +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF LEFT JOIN events0 e +ON p.begin < e.begin +ORDER BY p.begin ASC; + + +DROP TABLE IF EXISTS events0; +DROP TABLE IF EXISTS probe0; diff --git a/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.reference b/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.reference new file mode 100644 index 00000000000..ca481c7fff0 --- /dev/null +++ b/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.reference @@ -0,0 +1,2 @@ +26790 1488 +26790 1488 diff --git a/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 b/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 new file mode 100644 index 00000000000..551bac0cc06 --- /dev/null +++ b/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 @@ -0,0 +1,37 @@ +SET allow_experimental_analyzer=1; + +SET session_timezone = 'UTC'; + +{% for join_algorithm in ['default', 'full_sorting_merge'] -%} + +SET join_algorithm = '{{ join_algorithm }}'; + +-- TODO: support enable for full_sorting_merge +-- SET join_use_nulls = 1; + +WITH build AS ( + SELECT + tk.number AS k, + toDateTime('2021-01-01 00:00:00') + INTERVAL i.number SECONDS AS t, + i.number % 37 AS v + FROM numbers(3000000) AS i + CROSS JOIN numbers(2) AS tk + SETTINGS join_algorithm = 'hash', join_use_nulls = 0 +), +probe AS ( + SELECT + tk.number AS k, + toDateTime('2021-01-01 00:00:30') + INTERVAL tt.number HOUR AS t + FROM numbers(2) AS tk + CROSS JOIN numbers(toUInt32((toDateTime('2021-02-01 00:00:30') - toDateTime('2021-01-01 00:00:30')) / 3600)) AS tt + SETTINGS join_algorithm = 'hash', join_use_nulls = 0 +) +SELECT + SUM(v) AS v, + COUNT(*) AS n +FROM probe +ASOF LEFT JOIN build +USING (k, t) +; + +{% endfor -%} From 29494d0bc6cc1b0f82a887a06f0643ea4880e681 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 1 May 2024 15:08:50 +0000 Subject: [PATCH 0124/2361] add asof_join_ddb tests --- .../03144_asof_join_ddb_doubles.sql | 1 + .../03145_asof_join_ddb_inequalities.sql | 1 + .../03147_asof_join_ddb_missing.reference | 10 + .../03147_asof_join_ddb_missing.sql | 186 ++++++++++++++++++ .../03148_asof_join_ddb_subquery.reference | 4 + .../03148_asof_join_ddb_subquery.sql | 29 +++ .../03149_asof_join_ddb_timestamps.reference | 56 ++++++ .../03149_asof_join_ddb_timestamps.sql | 95 +++++++++ 8 files changed, 382 insertions(+) create mode 100644 tests/queries/0_stateless/03147_asof_join_ddb_missing.reference create mode 100644 tests/queries/0_stateless/03147_asof_join_ddb_missing.sql create mode 100644 tests/queries/0_stateless/03148_asof_join_ddb_subquery.reference create mode 100644 tests/queries/0_stateless/03148_asof_join_ddb_subquery.sql create mode 100644 tests/queries/0_stateless/03149_asof_join_ddb_timestamps.reference create mode 100644 tests/queries/0_stateless/03149_asof_join_ddb_timestamps.sql diff --git a/tests/queries/0_stateless/03144_asof_join_ddb_doubles.sql b/tests/queries/0_stateless/03144_asof_join_ddb_doubles.sql index ef16ced3082..87aece14628 100644 --- a/tests/queries/0_stateless/03144_asof_join_ddb_doubles.sql +++ b/tests/queries/0_stateless/03144_asof_join_ddb_doubles.sql @@ -1,4 +1,5 @@ SET join_algorithm = 'full_sorting_merge'; +SET allow_experimental_analyzer = 1; DROP TABLE IF EXISTS events0; diff --git a/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql index 69de17541c1..ce4badbd597 100644 --- a/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql +++ b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql @@ -1,6 +1,7 @@ DROP TABLE IF EXISTS events0; DROP TABLE IF EXISTS probe0; +SET allow_experimental_analyzer = 1; SET join_algorithm = 'full_sorting_merge'; CREATE TABLE events0 ( diff --git a/tests/queries/0_stateless/03147_asof_join_ddb_missing.reference b/tests/queries/0_stateless/03147_asof_join_ddb_missing.reference new file mode 100644 index 00000000000..11eb84463f4 --- /dev/null +++ b/tests/queries/0_stateless/03147_asof_join_ddb_missing.reference @@ -0,0 +1,10 @@ +108 +108 27 +513 +1218 +3528 +14553 +121275 +1495503 +12462525 +1249625025 diff --git a/tests/queries/0_stateless/03147_asof_join_ddb_missing.sql b/tests/queries/0_stateless/03147_asof_join_ddb_missing.sql new file mode 100644 index 00000000000..95a5f8ab3ff --- /dev/null +++ b/tests/queries/0_stateless/03147_asof_join_ddb_missing.sql @@ -0,0 +1,186 @@ +SET allow_experimental_analyzer=1; + +SET session_timezone = 'UTC'; +SET joined_subquery_requires_alias = 0; +SET allow_experimental_analyzer = 1; +SET join_algorithm = 'full_sorting_merge'; + +-- # 10 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(10), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # Coverage: Missing right side bin +WITH build AS ( + SELECT + k * 2 AS k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(10), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + intDiv(k, 2) AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v), COUNT(*) +FROM probe ASOF JOIN build USING (k, t); + +-- # 20 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(20), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 30 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(30), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 50 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(50), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 100 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(100), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 100 dates, 50 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(100), (SELECT number AS k FROM numbers(50)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 1000 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(1000), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 1000 dates, 50 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(1000), (SELECT number AS k FROM numbers(50)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 10000 dates, 50 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(10000), (SELECT number AS k FROM numbers(50)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); diff --git a/tests/queries/0_stateless/03148_asof_join_ddb_subquery.reference b/tests/queries/0_stateless/03148_asof_join_ddb_subquery.reference new file mode 100644 index 00000000000..387a4a8f249 --- /dev/null +++ b/tests/queries/0_stateless/03148_asof_join_ddb_subquery.reference @@ -0,0 +1,4 @@ +1 1 +3 1 +6 1 +8 1 diff --git a/tests/queries/0_stateless/03148_asof_join_ddb_subquery.sql b/tests/queries/0_stateless/03148_asof_join_ddb_subquery.sql new file mode 100644 index 00000000000..2ddf0f09b1e --- /dev/null +++ b/tests/queries/0_stateless/03148_asof_join_ddb_subquery.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS events; +CREATE TABLE events (begin Float64, value Int32) ENGINE = MergeTree() ORDER BY begin; + +INSERT INTO events VALUES (1, 0), (3, 1), (6, 2), (8, 3); + +SET allow_experimental_analyzer = 1; +SET join_algorithm = 'full_sorting_merge'; +SET joined_subquery_requires_alias = 0; + +SELECT + begin, + value IN ( + SELECT e1.value + FROM ( + SELECT * + FROM events e1 + WHERE e1.value = events.value + ) AS e1 + ASOF JOIN ( + SELECT number :: Float64 AS begin + FROM numbers(10) + WHERE number >= 1 AND number < 10 + ) + USING (begin) + ) +FROM events +ORDER BY begin ASC; + +DROP TABLE IF EXISTS events; diff --git a/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.reference b/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.reference new file mode 100644 index 00000000000..7cfc85d23a5 --- /dev/null +++ b/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.reference @@ -0,0 +1,56 @@ +2023-03-21 13:00:00 0 +2023-03-21 14:00:00 1 +2023-03-21 15:00:00 2 +2023-03-21 16:00:00 3 +2023-03-21 17:00:00 3 +2023-03-21 18:00:00 3 +2023-03-21 19:00:00 3 +2023-03-21 20:00:00 3 +2023-03-21 21:00:00 3 +2106-02-07 06:28:15 9 +2023-03-21 13:00:00 0 +2023-03-21 14:00:00 1 +2023-03-21 15:00:00 2 +2023-03-21 16:00:00 3 +2023-03-21 17:00:00 3 +2023-03-21 18:00:00 3 +2023-03-21 19:00:00 3 +2023-03-21 20:00:00 3 +2023-03-21 21:00:00 3 +2106-02-07 06:28:15 9 +2023-03-21 12:00:00 \N +2023-03-21 13:00:00 0 +2023-03-21 14:00:00 1 +2023-03-21 15:00:00 2 +2023-03-21 16:00:00 3 +2023-03-21 17:00:00 3 +2023-03-21 18:00:00 3 +2023-03-21 19:00:00 3 +2023-03-21 20:00:00 3 +2023-03-21 21:00:00 3 +2106-02-07 06:28:15 9 +\N \N +2023-03-21 12:00:00 0 +2023-03-21 13:00:00 0 +2023-03-21 14:00:00 1 +2023-03-21 15:00:00 2 +2023-03-21 16:00:00 3 +2023-03-21 17:00:00 3 +2023-03-21 18:00:00 3 +2023-03-21 19:00:00 3 +2023-03-21 20:00:00 3 +2023-03-21 21:00:00 3 +2106-02-07 06:28:15 9 +\N 0 +2023-03-21 12:00:00 \N +2023-03-21 13:00:00 \N +2023-03-21 14:00:00 \N +2023-03-21 15:00:00 \N +2023-03-21 16:00:00 \N +2023-03-21 17:00:00 \N +2023-03-21 18:00:00 \N +2023-03-21 19:00:00 \N +2023-03-21 20:00:00 \N +2023-03-21 21:00:00 \N +2106-02-07 06:28:15 \N +\N \N diff --git a/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.sql b/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.sql new file mode 100644 index 00000000000..ff4518a3775 --- /dev/null +++ b/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.sql @@ -0,0 +1,95 @@ +DROP TABLE IF EXISTS events0; +DROP TABLE IF EXISTS probe0; + +SET session_timezone = 'UTC'; +SET allow_experimental_analyzer = 1; +SET join_algorithm = 'full_sorting_merge'; +SET join_use_nulls = 1; + +CREATE TABLE events0 +ENGINE = MergeTree() +ORDER BY COALESCE(begin, toDateTime('9999-12-31 23:59:59')) +AS +SELECT + toNullable(toDateTime('2023-03-21 13:00:00') + INTERVAL number HOUR) AS begin, + number AS value +FROM numbers(4); + +INSERT INTO events0 VALUES (NULL, -1), (toDateTime('9999-12-31 23:59:59'), 9); + +CREATE TABLE probe0 +ENGINE = MergeTree() +ORDER BY COALESCE(begin, toDateTime('9999-12-31 23:59:59')) +AS +SELECT + toNullable(toDateTime('2023-03-21 12:00:00') + INTERVAL number HOUR) AS begin +FROM numbers(10); + +INSERT INTO probe0 VALUES (NULL), (toDateTime('9999-12-31 23:59:59')); + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF JOIN events0 e ON p.begin >= e.begin +ORDER BY p.begin ASC; + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF JOIN events0 e USING (begin) +ORDER BY p.begin ASC +SETTINGS join_use_nulls = 0 +; + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF LEFT JOIN events0 e ON p.begin >= e.begin +ORDER BY p.begin ASC; + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF LEFT JOIN events0 e USING (begin) +ORDER BY p.begin ASC +SETTINGS join_use_nulls = 0 +; + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF RIGHT JOIN events0 e ON p.begin >= e.begin +ORDER BY e.begin ASC; -- { serverError NOT_IMPLEMENTED} + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF RIGHT JOIN events0 e USING (begin) +ORDER BY e.begin ASC; -- { serverError NOT_IMPLEMENTED} + + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF LEFT JOIN ( + SELECT * FROM events0 WHERE log(value + 5) > 10 + ) e ON p.begin >= e.begin +ORDER BY p.begin ASC; + + +DROP TABLE IF EXISTS events0; +DROP TABLE IF EXISTS probe0; From d37f03201aaac06b1a175ac0b177c17d945dddbb Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 7 May 2024 10:07:43 +0000 Subject: [PATCH 0125/2361] fix --- src/Processors/Transforms/MergeJoinTransform.cpp | 2 -- tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 26e1ebb0b60..9e6904f0613 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -1173,9 +1173,7 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge() if (auto result = handleAllJoinState()) - { return std::move(*result); - } if (auto result = handleAsofJoinState()) return std::move(*result); diff --git a/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 b/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 index 551bac0cc06..44c54ae2a39 100644 --- a/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 +++ b/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 @@ -1,3 +1,5 @@ +-- Tags: long + SET allow_experimental_analyzer=1; SET session_timezone = 'UTC'; From 558b73aba4f89db4a2b3e5ed0754749caddd5dfb Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 7 May 2024 14:20:40 +0000 Subject: [PATCH 0126/2361] t --- tests/queries/0_stateless/03143_asof_join_ddb_long.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql index c421702bb00..a41b667e6c8 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql @@ -1,5 +1,7 @@ -- Tags: long +-- https://s3.amazonaws.com/clickhouse-test-reports/55051/07f288862c56b0a98379a07101062689b0460788/stateless_tests_flaky_check__asan_.html + DROP TABLE IF EXISTS build; DROP TABLE IF EXISTS skewed_probe; From f710a67fb2e72361801f0ae9a45ccbd07e2c7f30 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 22 May 2024 12:27:04 +0000 Subject: [PATCH 0127/2361] set timezone in 03143_asof_join_ddb_long --- src/Processors/Transforms/MergeJoinTransform.cpp | 4 ++-- tests/queries/0_stateless/03143_asof_join_ddb_long.sql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 9e6904f0613..38b63a856f6 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -45,8 +45,8 @@ FullMergeJoinCursorPtr createCursor(const Block & block, const Names & columns, bool isNullAt(const IColumn & column, size_t row) { - if (const auto * nullable = checkAndGetColumn(column)) - return nullable->isNullAt(row); + if (const auto * nullable_column = checkAndGetColumn(&column)) + return nullable_column->isNullAt(row); return false; } diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql index a41b667e6c8..17a67511030 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql @@ -1,10 +1,10 @@ -- Tags: long --- https://s3.amazonaws.com/clickhouse-test-reports/55051/07f288862c56b0a98379a07101062689b0460788/stateless_tests_flaky_check__asan_.html - DROP TABLE IF EXISTS build; DROP TABLE IF EXISTS skewed_probe; +SET session_timezone = 'UTC'; + CREATE TABLE build ENGINE = MergeTree ORDER BY (key, begin) AS SELECT From 11f4ed75fd18a853e70c94f10596d5b3b09a35dd Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 5 Jun 2024 12:26:05 +0200 Subject: [PATCH 0128/2361] fix build --- src/Processors/tests/gtest_full_sorting_join.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index a3fda006eb8..7294a1b381a 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -151,7 +151,6 @@ public: size_t rows = current_chunk.front()->size(); chunks.emplace_back(std::move(current_chunk), rows); current_chunk = header.cloneEmptyColumns(); - return; } std::shared_ptr getSource() @@ -403,7 +402,7 @@ try auto expected_right = ColumnString::create(); UInt64 k1 = 1; - String k2 = ""; + String k2; auto get_attr = [&](const String & side, size_t idx) -> String { @@ -601,7 +600,7 @@ try ColumnInt64::Container expected; UInt64 k1 = 1; - String k2 = ""; + String k2; auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); for (size_t key_num = 0; key_num < key_num_total; ++key_num) { @@ -690,7 +689,7 @@ try ColumnInt64::Container expected; UInt64 k1 = 1; - String k2 = ""; + String k2; UInt64 left_t = 0; auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); From 11bda3f5f749375301fdc944693984a744ba81ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 12 Jun 2024 18:23:50 +0000 Subject: [PATCH 0129/2361] Create consumers in startup --- src/Storages/Kafka/StorageKafka2.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index f75e6044ddd..de27adbbe3f 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -278,6 +278,18 @@ StorageKafka2::write(const ASTPtr &, const StorageMetadataPtr & metadata_snapsho void StorageKafka2::startup() { + for (size_t i = 0; i < num_consumers; ++i) + { + try + { + consumers.emplace_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i), .keeper = keeper}); + ++num_created_consumers; + } + catch (const cppkafka::Exception &) + { + tryLogCurrentException(log); + } + } // Start the reader thread for (auto & task : tasks) task->holder->activateAndSchedule(); From ba76a06f5677e7de556781a4c06cc947f392e0c5 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 14 Jun 2024 01:35:08 +0000 Subject: [PATCH 0130/2361] potentially very serious bug is fixed for secure socket --- src/IO/ReadBufferFromPocoSocket.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index af58efc7e10..6361fed01dd 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -134,11 +134,14 @@ ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & s bool ReadBufferFromPocoSocketBase::poll(size_t timeout_microseconds) const { - if (available()) + /// For secure socket it is important to check if any remaining data available in underlying decryption buffer - + /// read always retrives the whole encrypted frame from the wire and puts it into underlying buffer while returning only requested size - + /// further poll() can block though there is still data to read in the underlying decryption buffer. + if (available() || socket.impl()->available()) return true; Stopwatch watch; - bool res = socket.poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); + bool res = socket.impl()->poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); return res; } From 97aea863767a58fd65274777913865201ea906e3 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 14 Jun 2024 01:56:05 +0000 Subject: [PATCH 0131/2361] fix style --- src/IO/ReadBufferFromPocoSocket.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index 6361fed01dd..bbf9f96404f 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -135,7 +135,7 @@ ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & s bool ReadBufferFromPocoSocketBase::poll(size_t timeout_microseconds) const { /// For secure socket it is important to check if any remaining data available in underlying decryption buffer - - /// read always retrives the whole encrypted frame from the wire and puts it into underlying buffer while returning only requested size - + /// read always retrieves the whole encrypted frame from the wire and puts it into underlying buffer while returning only requested size - /// further poll() can block though there is still data to read in the underlying decryption buffer. if (available() || socket.impl()->available()) return true; From 99456d2fa630455ebf1a2422b5e827ec056f4130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 14 Jun 2024 17:21:15 +0200 Subject: [PATCH 0132/2361] Fix unexpeced size of low cardinality column in functions --- src/Functions/IFunction.cpp | 101 ++++++++++++++++++++++++------------ 1 file changed, 68 insertions(+), 33 deletions(-) diff --git a/src/Functions/IFunction.cpp b/src/Functions/IFunction.cpp index 31695fc95d5..9217071ca11 100644 --- a/src/Functions/IFunction.cpp +++ b/src/Functions/IFunction.cpp @@ -50,43 +50,78 @@ bool allArgumentsAreConstants(const ColumnsWithTypeAndName & args) ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count) { - size_t num_rows = input_rows_count; + /// We return the LC indexes so the LC can be reconstructed with the function result ColumnPtr indexes; - /// Find first LowCardinality column and replace it to nested dictionary. - for (auto & column : args) + size_t number_low_cardinality_columns = 0; + size_t last_low_cardinality = 0; + size_t number_const_columns = 0; + size_t number_full_columns = 0; + + for (size_t i = 0; i < args.size(); i++) { - if (const auto * low_cardinality_column = checkAndGetColumn(column.column.get())) + auto const & arg = args[i]; + if (checkAndGetColumn(arg.column.get())) { - /// Single LowCardinality column is supported now. - if (indexes) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single dictionary argument for function."); - - const auto * low_cardinality_type = checkAndGetDataType(column.type.get()); - - if (!low_cardinality_type) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Incompatible type for LowCardinality column: {}", - column.type->getName()); - - if (can_be_executed_on_default_arguments) - { - /// Normal case, when function can be executed on values' default. - column.column = low_cardinality_column->getDictionary().getNestedColumn(); - indexes = low_cardinality_column->getIndexesPtr(); - } - else - { - /// Special case when default value can't be used. Example: 1 % LowCardinality(Int). - /// LowCardinality always contains default, so 1 % 0 will throw exception in normal case. - auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size()); - column.column = dict_encoded.dictionary; - indexes = dict_encoded.indexes; - } - - num_rows = column.column->size(); - column.type = low_cardinality_type->getDictionaryType(); + number_low_cardinality_columns++; + last_low_cardinality = i; } + else if (checkAndGetColumn(arg.column.get())) + number_const_columns++; + else + number_full_columns++; + } + + if (!number_low_cardinality_columns && !number_const_columns) + return nullptr; + + if (number_full_columns > 0 || number_low_cardinality_columns > 1) + { + /// If there is a single full column, we can't replace the LC column with its dictionary, as it won't match + /// the size or order of the full columns. Same if there are 2 or more low cardinality columns + for (auto & arg : args) + { + if (const auto * column_lc = checkAndGetColumn(arg.column.get())) + { + arg.column = recursiveRemoveLowCardinality(arg.column); + chassert(arg.column->size() == input_rows_count); + + const auto * low_cardinality_type = checkAndGetDataType(arg.type.get()); + if (!low_cardinality_type) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", arg.type->getName()); + arg.type = recursiveRemoveLowCardinality(arg.type); + } + } + } + else if (number_low_cardinality_columns == 1) + { + auto & lc_arg = args[last_low_cardinality]; + + const auto * low_cardinality_type = checkAndGetDataType(lc_arg.type.get()); + if (!low_cardinality_type) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", lc_arg.type->getName()); + + const auto * low_cardinality_column = checkAndGetColumn(lc_arg.column.get()); + chassert(low_cardinality_column); + + if (can_be_executed_on_default_arguments) + { + /// Normal case, when function can be executed on values' default. + lc_arg.column = low_cardinality_column->getDictionary().getNestedColumn(); + indexes = low_cardinality_column->getIndexesPtr(); + } + else + { + /// Special case when default value can't be used. Example: 1 % LowCardinality(Int). + /// LowCardinality always contains default, so 1 % 0 will throw exception in normal case. + auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size()); + lc_arg.column = dict_encoded.dictionary; + indexes = dict_encoded.indexes; + } + + /// The new column will have a different number of rows, normally less but occasionally it might be more (NULL) + input_rows_count = lc_arg.column->size(); + lc_arg.type = low_cardinality_type->getDictionaryType(); } /// Change size of constants. @@ -94,7 +129,7 @@ ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( { if (const auto * column_const = checkAndGetColumn(column.column.get())) { - column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), num_rows); + column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), input_rows_count); column.type = recursiveRemoveLowCardinality(column.type); } } From 14a13d54c0ff56b0e6326ac75bb7136e44d814d1 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 14 Jun 2024 15:56:14 +0000 Subject: [PATCH 0133/2361] fix UB misaligned address --- src/IO/WriteBufferFromPocoSocketChunked.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 4325ab2bd4b..c668ea2c505 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -6,6 +6,18 @@ #include +namespace +{ + +template +const T & setValue(T * typed_ptr, std::type_identity_t val) +{ + memcpy(typed_ptr, &val, sizeof(T)); + return *typed_ptr; +} + +} + namespace DB { @@ -40,7 +52,7 @@ public: chassert(reinterpret_cast(chunk_size_ptr) == working_buffer.begin()); - *chunk_size_ptr = 0; + setValue(chunk_size_ptr, 0); /// Initialize next chunk chunk_size_ptr = reinterpret_cast(pos); pos += std::min(available(), sizeof(*chunk_size_ptr)); @@ -58,7 +70,7 @@ public: } /// Fill up current chunk size - *chunk_size_ptr = toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr))); + setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); if (!chunk_started) LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", @@ -174,7 +186,7 @@ protected: pos -= sizeof(*chunk_size_ptr); else // fill up current chunk size { - *chunk_size_ptr = toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr))); + setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); if (!chunk_started) { chunk_started = true; From 5b082051451356b2c1d3152489e5d51cd75d2d6a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 15 Jun 2024 00:22:51 +0000 Subject: [PATCH 0134/2361] some refactoring --- src/IO/WriteBufferFromPocoSocketChunked.cpp | 207 ++++++++++++++++++ src/IO/WriteBufferFromPocoSocketChunked.h | 220 +------------------- 2 files changed, 217 insertions(+), 210 deletions(-) create mode 100644 src/IO/WriteBufferFromPocoSocketChunked.cpp diff --git a/src/IO/WriteBufferFromPocoSocketChunked.cpp b/src/IO/WriteBufferFromPocoSocketChunked.cpp new file mode 100644 index 00000000000..324f8ae3a02 --- /dev/null +++ b/src/IO/WriteBufferFromPocoSocketChunked.cpp @@ -0,0 +1,207 @@ +#include +#include +#include + + +namespace +{ + +template +const T & setValue(T * typed_ptr, std::type_identity_t val) +{ + memcpy(typed_ptr, &val, sizeof(T)); + return *typed_ptr; +} + +} + +namespace DB +{ + +void WriteBufferFromPocoSocketChunked::enableChunked() +{ + chunked = true; + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); +} + +void WriteBufferFromPocoSocketChunked::finishChunk() +{ + if (!chunked) + return; + + if (pos <= reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr)) + { + if (chunk_size_ptr == last_finish_chunk) // prevent duplicate finish chunk + return; + + /// If current chunk is empty it means we are finishing a chunk previously sent by next(), + /// we want to convert current chunk header into end-of-chunk marker and initialize next chunk. + /// We don't need to worry about if it's the end of the buffer because next() always sends the whole buffer + /// so it should be a beginning of the buffer. + + chassert(reinterpret_cast(chunk_size_ptr) == working_buffer.begin()); + + setValue(chunk_size_ptr, 0); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + + last_finish_chunk = chunk_size_ptr; + + return; + } + + /// Previously finished chunk wasn't sent yet + if (last_finish_chunk == chunk_size_ptr) + { + chunk_started = false; + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); + } + + /// Fill up current chunk size + setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); + + if (!chunk_started) + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", + ourAddress().toString(), peerAddress().toString(), + static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), + *chunk_size_ptr); + else + { + chunk_started = false; + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); + } + + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); + + if (available() < sizeof(*chunk_size_ptr)) + { + finishing = available(); + pos += available(); + chunk_size_ptr = reinterpret_cast(pos); + return; + } + + /// Buffer end-of-chunk + *reinterpret_cast(pos) = 0; + pos += sizeof(*chunk_size_ptr); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + + last_finish_chunk = chunk_size_ptr; +} + +WriteBufferFromPocoSocketChunked::~WriteBufferFromPocoSocketChunked() +{ + try + { + finalize(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } +} + +void WriteBufferFromPocoSocketChunked::nextImpl() +{ + if (!chunked) + { + WriteBufferFromPocoSocket::nextImpl(); + return; + } + + /// next() after finishChunk ar the end of the buffer + if (finishing < sizeof(*chunk_size_ptr)) + { + pos -= finishing; + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Send end-of-chunk directly + UInt32 s = 0; + socketSendBytes(reinterpret_cast(&s), sizeof(s)); + + finishing = sizeof(*chunk_size_ptr); + + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = chunk_size_ptr; + + return; + } + + /// Send end-of-chunk buffered by finishChunk + if (offset() == 2 * sizeof(*chunk_size_ptr) && last_finish_chunk == chunk_size_ptr) + { + pos -= sizeof(*chunk_size_ptr); + /// Send end-of-chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = chunk_size_ptr; + + return; + } + + /// Prevent sending empty chunk + if (offset() == sizeof(*chunk_size_ptr)) + { + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + return; + } + + /// Finish chunk at the end of the buffer + if (working_buffer.end() - reinterpret_cast(chunk_size_ptr) <= static_cast(sizeof(*chunk_size_ptr))) + { + pos = reinterpret_cast(chunk_size_ptr); + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = nullptr; + + return; + } + + if (pos - reinterpret_cast(chunk_size_ptr) == sizeof(*chunk_size_ptr)) // next() after finishChunk + pos -= sizeof(*chunk_size_ptr); + else // fill up current chunk size + { + setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); + if (!chunk_started) + { + chunk_started = true; + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", + ourAddress().toString(), peerAddress().toString(), + static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), + *chunk_size_ptr); + } + else + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); + } + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = nullptr; +} + +void WriteBufferFromPocoSocketChunked::finalizeImpl() +{ + if (offset() == sizeof(*chunk_size_ptr)) + pos -= sizeof(*chunk_size_ptr); + WriteBufferFromPocoSocket::finalizeImpl(); +} + +} diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index c668ea2c505..269c6d66dda 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -1,23 +1,9 @@ #pragma once -#include "base/defines.h" #include #include -#include -namespace -{ - -template -const T & setValue(T * typed_ptr, std::type_identity_t val) -{ - memcpy(typed_ptr, &val, sizeof(T)); - return *typed_ptr; -} - -} - namespace DB { @@ -27,208 +13,22 @@ public: explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, buf_size), log(getLogger("Protocol")) {} explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, write_event_, buf_size), log(getLogger("Protocol")) {} - void enableChunked() - { - chunked = true; - /// Initialize next chunk - chunk_size_ptr = reinterpret_cast(pos); - pos += std::min(available(), sizeof(*chunk_size_ptr)); - } - - void finishChunk() - { - if (!chunked) - return; - - if (pos <= reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr)) - { - if (chunk_size_ptr == last_finish_chunk) // prevent duplicate finish chunk - return; - - /// If current chunk is empty it means we are finishing a chunk previously sent by next(), - /// we want to convert current chunk header into end-of-chunk marker and initialize next chunk. - /// We don't need to worry about if it's the end of the buffer because next() always sends the whole buffer - /// so it should be a beginning of the buffer. - - chassert(reinterpret_cast(chunk_size_ptr) == working_buffer.begin()); - - setValue(chunk_size_ptr, 0); - /// Initialize next chunk - chunk_size_ptr = reinterpret_cast(pos); - pos += std::min(available(), sizeof(*chunk_size_ptr)); - - last_finish_chunk = chunk_size_ptr; - - return; - } - - /// Previously finished chunk wasn't sent yet - if (last_finish_chunk == chunk_size_ptr) - { - chunk_started = false; - LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); - } - - /// Fill up current chunk size - setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); - - if (!chunk_started) - LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", - ourAddress().toString(), peerAddress().toString(), - static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), - *chunk_size_ptr); - else - { - chunk_started = false; - LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); - } - - LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); - - if (available() < sizeof(*chunk_size_ptr)) - { - finishing = available(); - pos += available(); - chunk_size_ptr = reinterpret_cast(pos); - return; - } - - /// Buffer end-of-chunk - *reinterpret_cast(pos) = 0; - pos += sizeof(*chunk_size_ptr); - /// Initialize next chunk - chunk_size_ptr = reinterpret_cast(pos); - pos += std::min(available(), sizeof(*chunk_size_ptr)); - - last_finish_chunk = chunk_size_ptr; - } - - ~WriteBufferFromPocoSocketChunked() override - { - try - { - finalize(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } + void enableChunked(); + void finishChunk(); + ~WriteBufferFromPocoSocketChunked() override; protected: - void nextImpl() override - { - if (!chunked) - { - WriteBufferFromPocoSocket::nextImpl(); - return; - } + void nextImpl() override; + void finalizeImpl() override; + Poco::Net::SocketAddress peerAddress() const { return peer_address; } + Poco::Net::SocketAddress ourAddress() const { return our_address; } - /// next() after finishChunk ar the end of the buffer - if (finishing < sizeof(*chunk_size_ptr)) - { - pos -= finishing; - /// Send current chunk - WriteBufferFromPocoSocket::nextImpl(); - /// Send end-of-chunk directly - UInt32 s = 0; - socketSendBytes(reinterpret_cast(&s), sizeof(s)); - - finishing = sizeof(*chunk_size_ptr); - - /// Initialize next chunk - chunk_size_ptr = reinterpret_cast(working_buffer.begin()); - nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); - - last_finish_chunk = chunk_size_ptr; - - return; - } - - /// Send end-of-chunk buffered by finishChunk - if (offset() == 2 * sizeof(*chunk_size_ptr) && last_finish_chunk == chunk_size_ptr) - { - pos -= sizeof(*chunk_size_ptr); - /// Send end-of-chunk - WriteBufferFromPocoSocket::nextImpl(); - /// Initialize next chunk - chunk_size_ptr = reinterpret_cast(working_buffer.begin()); - nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); - - last_finish_chunk = chunk_size_ptr; - - return; - } - - /// Prevent sending empty chunk - if (offset() == sizeof(*chunk_size_ptr)) - { - nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); - return; - } - - /// Finish chunk at the end of the buffer - if (working_buffer.end() - reinterpret_cast(chunk_size_ptr) <= static_cast(sizeof(*chunk_size_ptr))) - { - pos = reinterpret_cast(chunk_size_ptr); - /// Send current chunk - WriteBufferFromPocoSocket::nextImpl(); - /// Initialize next chunk - chunk_size_ptr = reinterpret_cast(working_buffer.begin()); - nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); - - last_finish_chunk = nullptr; - - return; - } - - if (pos - reinterpret_cast(chunk_size_ptr) == sizeof(*chunk_size_ptr)) // next() after finishChunk - pos -= sizeof(*chunk_size_ptr); - else // fill up current chunk size - { - setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); - if (!chunk_started) - { - chunk_started = true; - LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", - ourAddress().toString(), peerAddress().toString(), - static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), - *chunk_size_ptr); - } - else - LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); - } - /// Send current chunk - WriteBufferFromPocoSocket::nextImpl(); - /// Initialize next chunk - chunk_size_ptr = reinterpret_cast(working_buffer.begin()); - nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); - - last_finish_chunk = nullptr; - } - - void finalizeImpl() override - { - if (offset() == sizeof(*chunk_size_ptr)) - pos -= sizeof(*chunk_size_ptr); - WriteBufferFromPocoSocket::finalizeImpl(); - } - - Poco::Net::SocketAddress peerAddress() - { - return peer_address; - } - - Poco::Net::SocketAddress ourAddress() - { - return our_address; - } private: LoggerPtr log; bool chunked = false; - UInt32 * last_finish_chunk = nullptr; // pointer to the last chunk header created by finishChunk - bool chunk_started = false; // chunk started flag - UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer + UInt32 * last_finish_chunk = nullptr; // pointer to the last chunk header created by finishChunk + bool chunk_started = false; // chunk started flag + UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer size_t finishing = sizeof(*chunk_size_ptr); // indicates not enough buffer for end-of-chunk marker }; From 852318449f2ab8e0bfaeca9904b90936d751d177 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sat, 15 Jun 2024 20:17:43 +0200 Subject: [PATCH 0135/2361] Check perf tests --- src/Common/PODArray_fwd.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Common/PODArray_fwd.h b/src/Common/PODArray_fwd.h index d570a90e467..bd780eb51b0 100644 --- a/src/Common/PODArray_fwd.h +++ b/src/Common/PODArray_fwd.h @@ -17,12 +17,12 @@ constexpr size_t integerRoundUp(size_t value, size_t dividend) } template , size_t pad_right_ = 0, + typename TAllocator = Allocator, size_t pad_right_ = 0, size_t pad_left_ = 0> class PODArray; /** For columns. Padding is enough to read and write xmm-register at the address of the last element. */ -template > +template > using PaddedPODArray = PODArray; /** A helper for declaring PODArray that uses inline memory. @@ -32,6 +32,6 @@ using PaddedPODArray = PODArray using PODArrayWithStackMemory = PODArray, rounded_bytes, alignof(T)>>; + AllocatorWithStackMemory, rounded_bytes, alignof(T)>>; } From 7568de2202a7fa99539d8d46092315bf3c7fe5e6 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sat, 15 Jun 2024 22:03:53 +0100 Subject: [PATCH 0136/2361] Revert "Check perf tests" This reverts commit 852318449f2ab8e0bfaeca9904b90936d751d177. --- src/Common/PODArray_fwd.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Common/PODArray_fwd.h b/src/Common/PODArray_fwd.h index bd780eb51b0..d570a90e467 100644 --- a/src/Common/PODArray_fwd.h +++ b/src/Common/PODArray_fwd.h @@ -17,12 +17,12 @@ constexpr size_t integerRoundUp(size_t value, size_t dividend) } template , size_t pad_right_ = 0, + typename TAllocator = Allocator, size_t pad_right_ = 0, size_t pad_left_ = 0> class PODArray; /** For columns. Padding is enough to read and write xmm-register at the address of the last element. */ -template > +template > using PaddedPODArray = PODArray; /** A helper for declaring PODArray that uses inline memory. @@ -32,6 +32,6 @@ using PaddedPODArray = PODArray using PODArrayWithStackMemory = PODArray, rounded_bytes, alignof(T)>>; + AllocatorWithStackMemory, rounded_bytes, alignof(T)>>; } From 57d036e5899b52fd4fdab9447630e01bf3d5382b Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sat, 15 Jun 2024 22:04:24 +0100 Subject: [PATCH 0137/2361] impl --- tests/performance/scripts/perf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/performance/scripts/perf.py b/tests/performance/scripts/perf.py index e98c158249a..94f145d82db 100755 --- a/tests/performance/scripts/perf.py +++ b/tests/performance/scripts/perf.py @@ -427,6 +427,8 @@ for query_index in queries_to_run: for conn_index, c in enumerate(this_query_connections): try: + c.execute("SYSTEM JEMALLOC PURGE") + res = c.execute( q, query_id=run_id, From e91dd71d4e55fe80d1c230a87eee7ad84333d9c3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 17 Jun 2024 07:01:06 +0200 Subject: [PATCH 0138/2361] Settings normalization --- src/Interpreters/InterpreterSetQuery.cpp | 26 ++++++++++-------------- src/Parsers/ParserQueryWithOutput.cpp | 11 ---------- 2 files changed, 11 insertions(+), 26 deletions(-) diff --git a/src/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp index 7e68fc5c4c1..cac44c7747b 100644 --- a/src/Interpreters/InterpreterSetQuery.cpp +++ b/src/Interpreters/InterpreterSetQuery.cpp @@ -9,6 +9,7 @@ #include #include + namespace DB { @@ -45,9 +46,7 @@ static void applySettingsFromSelectWithUnion(const ASTSelectWithUnionQuery & sel // It is flattened later, when we process UNION ALL/DISTINCT. const auto * last_select = children.back()->as(); if (last_select && last_select->settings()) - { InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext(); - } } void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMutablePtr context_) @@ -55,6 +54,16 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta if (!ast) return; + /// First apply the outermost settings. Then they could be overridden by deeper settings. + if (const auto * query_with_output = dynamic_cast(ast.get())) + { + if (query_with_output->settings_ast) + InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext(); + + if (const auto * create_query = ast->as(); create_query && create_query->select) + applySettingsFromSelectWithUnion(create_query->select->as(), context_); + } + if (const auto * select_query = ast->as()) { if (auto new_settings = select_query->settings()) @@ -71,19 +80,6 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta applySettingsFromQuery(explain_query->getExplainedQuery(), context_); } - else if (const auto * query_with_output = dynamic_cast(ast.get())) - { - if (query_with_output->settings_ast) - InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext(); - - if (const auto * create_query = ast->as()) - { - if (create_query->select) - { - applySettingsFromSelectWithUnion(create_query->select->as(), context_); - } - } - } else if (auto * insert_query = ast->as()) { context_->setInsertFormat(insert_query->format); diff --git a/src/Parsers/ParserQueryWithOutput.cpp b/src/Parsers/ParserQueryWithOutput.cpp index 6d8a1258555..ac8f7d560e0 100644 --- a/src/Parsers/ParserQueryWithOutput.cpp +++ b/src/Parsers/ParserQueryWithOutput.cpp @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -198,16 +197,6 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!parser_settings.parse(pos, query_with_output.settings_ast, expected)) return false; query_with_output.children.push_back(query_with_output.settings_ast); - - // SETTINGS after FORMAT is not parsed by the SELECT parser (ParserSelectQuery) - // Pass them manually, to apply in InterpreterSelectQuery::initSettings() - if (query->as()) - { - auto settings = query_with_output.settings_ast->clone(); - assert_cast(settings.get())->print_in_format = false; - QueryWithOutputSettingsPushDownVisitor::Data data{settings}; - QueryWithOutputSettingsPushDownVisitor(data).visit(query); - } } else break; From f5da9e424075fa755edd1a869e199f4861011be2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 17 Jun 2024 07:01:44 +0200 Subject: [PATCH 0139/2361] Add a test --- ...QueryWithOutputSettingsPushDownVisitor.cpp | 56 ------------------- .../QueryWithOutputSettingsPushDownVisitor.h | 39 ------------- .../03172_format_settings_clauses.reference | 14 +++++ .../03172_format_settings_clauses.sql | 30 ++++++++++ 4 files changed, 44 insertions(+), 95 deletions(-) delete mode 100644 src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp delete mode 100644 src/Parsers/QueryWithOutputSettingsPushDownVisitor.h create mode 100644 tests/queries/0_stateless/03172_format_settings_clauses.reference create mode 100644 tests/queries/0_stateless/03172_format_settings_clauses.sql diff --git a/src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp b/src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp deleted file mode 100644 index 8cf0d0063ae..00000000000 --- a/src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp +++ /dev/null @@ -1,56 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include -#include - -namespace DB -{ - -bool QueryWithOutputSettingsPushDownMatcher::needChildVisit(ASTPtr & node, const ASTPtr & child) -{ - if (node->as()) - return true; - if (node->as()) - return true; - if (child->as()) - return true; - return false; -} - -void QueryWithOutputSettingsPushDownMatcher::visit(ASTPtr & ast, Data & data) -{ - if (auto * select_query = ast->as()) - visit(*select_query, ast, data); -} - -void QueryWithOutputSettingsPushDownMatcher::visit(ASTSelectQuery & select_query, ASTPtr &, Data & data) -{ - ASTPtr select_settings_ast = select_query.settings(); - if (!select_settings_ast) - { - select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, data.settings_ast->clone()); - return; - } - - SettingsChanges & select_settings = select_settings_ast->as().changes; - SettingsChanges & settings = data.settings_ast->as().changes; - - for (auto & setting : settings) - { - auto it = std::find_if(select_settings.begin(), select_settings.end(), [&](auto & select_setting) - { - return select_setting.name == setting.name; - }); - if (it == select_settings.end()) - select_settings.push_back(setting); - else - it->value = setting.value; - } -} - -} diff --git a/src/Parsers/QueryWithOutputSettingsPushDownVisitor.h b/src/Parsers/QueryWithOutputSettingsPushDownVisitor.h deleted file mode 100644 index fde8a07b555..00000000000 --- a/src/Parsers/QueryWithOutputSettingsPushDownVisitor.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -class ASTSelectQuery; -struct SettingChange; -class SettingsChanges; - -/// Pushdown SETTINGS clause that goes after FORMAT to the SELECT query: -/// (since settings after FORMAT parsed separately not in the ParserSelectQuery but in ParserQueryWithOutput) -/// -/// SELECT 1 FORMAT Null SETTINGS max_block_size = 1 -> -/// SELECT 1 SETTINGS max_block_size = 1 FORMAT Null SETTINGS max_block_size = 1 -/// -/// Otherwise settings after FORMAT will not be applied. -class QueryWithOutputSettingsPushDownMatcher -{ -public: - using Visitor = InDepthNodeVisitor; - - struct Data - { - const ASTPtr & settings_ast; - }; - - static bool needChildVisit(ASTPtr & node, const ASTPtr & child); - static void visit(ASTPtr & ast, Data & data); - -private: - static void visit(ASTSelectQuery &, ASTPtr &, Data &); -}; - -using QueryWithOutputSettingsPushDownVisitor = QueryWithOutputSettingsPushDownMatcher::Visitor; - -} diff --git a/tests/queries/0_stateless/03172_format_settings_clauses.reference b/tests/queries/0_stateless/03172_format_settings_clauses.reference new file mode 100644 index 00000000000..8a98b137f4b --- /dev/null +++ b/tests/queries/0_stateless/03172_format_settings_clauses.reference @@ -0,0 +1,14 @@ +1 +2 +1 +2 +1 +2 +1 +1 +3 +3 +3 +3 +3 +1 diff --git a/tests/queries/0_stateless/03172_format_settings_clauses.sql b/tests/queries/0_stateless/03172_format_settings_clauses.sql new file mode 100644 index 00000000000..0d1aa4dcfbb --- /dev/null +++ b/tests/queries/0_stateless/03172_format_settings_clauses.sql @@ -0,0 +1,30 @@ +SET max_block_size = 10, max_threads = 1; + +-- Take the following example: +SELECT 1 UNION ALL SELECT 2 FORMAT TSV; + +-- Each subquery can be put in parentheses and have its own settings: +(SELECT getSetting('max_block_size') SETTINGS max_block_size = 1) UNION ALL (SELECT getSetting('max_block_size') SETTINGS max_block_size = 2) FORMAT TSV; + +-- And the whole query can have settings: +(SELECT getSetting('max_block_size') SETTINGS max_block_size = 1) UNION ALL (SELECT getSetting('max_block_size') SETTINGS max_block_size = 2) FORMAT TSV SETTINGS max_block_size = 3; + +-- A single query with output is parsed in the same way as the UNION ALL chain: +SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 FORMAT TSV SETTINGS max_block_size = 3; + +-- So while these forms have a slightly different meaning, they both exist: +SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 FORMAT TSV; +SELECT getSetting('max_block_size') FORMAT TSV SETTINGS max_block_size = 3; + +-- And due to this effect, the users expect that the FORMAT and SETTINGS may go in an arbitrary order. +-- But while this work: +(SELECT getSetting('max_block_size')) UNION ALL (SELECT getSetting('max_block_size')) FORMAT TSV SETTINGS max_block_size = 3; + +-- This does not work automatically, unless we explicitly allow different orders: +(SELECT getSetting('max_block_size')) UNION ALL (SELECT getSetting('max_block_size')) SETTINGS max_block_size = 3 FORMAT TSV; + +-- Inevitably, we allow this: +SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 SETTINGS max_block_size = 3 FORMAT TSV; +/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/ +-- Because this part is consumed into ASTSelectWithUnionQuery +-- and the rest into ASTQueryWithOutput. From 778807a8883901debc1bfeb72e17b37eb06bcaf0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 17 Jun 2024 08:22:36 +0200 Subject: [PATCH 0140/2361] Fix error; remove garbage --- src/Client/ClientBase.cpp | 33 +++------------------------------ 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 854cc3fef8b..ad4964b4b7c 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -1937,41 +1938,13 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin global_context->setSettings(*old_settings); }); - auto apply_query_settings = [&](const IAST & settings_ast) - { - if (!old_settings) - old_settings.emplace(global_context->getSettingsRef()); - global_context->applySettingsChanges(settings_ast.as()->changes); - global_context->resetSettingsToDefaultValue(settings_ast.as()->default_settings); - }; - - const auto * insert = parsed_query->as(); - if (const auto * select = parsed_query->as(); select && select->settings()) - apply_query_settings(*select->settings()); - else if (const auto * select_with_union = parsed_query->as()) - { - const ASTs & children = select_with_union->list_of_selects->children; - if (!children.empty()) - { - // On the client it is enough to apply settings only for the - // last SELECT, since the only thing that is important to apply - // on the client is format settings. - const auto * last_select = children.back()->as(); - if (last_select && last_select->settings()) - { - apply_query_settings(*last_select->settings()); - } - } - } - else if (const auto * query_with_output = parsed_query->as(); query_with_output && query_with_output->settings_ast) - apply_query_settings(*query_with_output->settings_ast); - else if (insert && insert->settings_ast) - apply_query_settings(*insert->settings_ast); + InterpreterSetQuery::applySettingsFromQuery(parsed_query, global_context); if (!connection->checkConnected(connection_parameters.timeouts)) connect(); ASTPtr input_function; + const auto * insert = parsed_query->as(); if (insert && insert->select) insert->tryFindInputFunction(input_function); From 87a2e5f39018ef16ab6fdd79ad934dce6c45aaf0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 17 Jun 2024 08:30:55 +0200 Subject: [PATCH 0141/2361] Fix error --- src/Client/ClientBase.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index ad4964b4b7c..958a1f50813 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1932,10 +1932,9 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin { /// Temporarily apply query settings to context. - std::optional old_settings; + Settings old_settings = global_context->getSettings(); SCOPE_EXIT_SAFE({ - if (old_settings) - global_context->setSettings(*old_settings); + global_context->setSettings(old_settings); }); InterpreterSetQuery::applySettingsFromQuery(parsed_query, global_context); From 67a539292e5e62f0bf470d11a5b224ef105bea02 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 17 Jun 2024 08:33:11 +0200 Subject: [PATCH 0142/2361] Update test --- tests/queries/0_stateless/01401_FORMAT_SETTINGS.reference | 4 ++-- tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01401_FORMAT_SETTINGS.reference b/tests/queries/0_stateless/01401_FORMAT_SETTINGS.reference index 22405bf1866..a8b99666654 100644 --- a/tests/queries/0_stateless/01401_FORMAT_SETTINGS.reference +++ b/tests/queries/0_stateless/01401_FORMAT_SETTINGS.reference @@ -1,7 +1,7 @@ 1 1 1 -1 -1 +2 +1 2 2 diff --git a/tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh b/tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh index b70c28422c9..173cc949500 100755 --- a/tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh +++ b/tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh @@ -13,7 +13,7 @@ ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) FORMAT CSV SETTINGS max_block_size = 1' # push down append ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_compress_block_size = 1 FORMAT CSV SETTINGS max_block_size = 1' -# overwrite on push down (since these settings goes latest) +# not overwrite on push down ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_block_size = 2 FORMAT CSV SETTINGS max_block_size = 1' # on push-down ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_block_size = 1 FORMAT CSV' From 4817657375a37c374eb4be72793cba434c16a815 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 17 Jun 2024 23:43:03 +0200 Subject: [PATCH 0143/2361] Revert "Revert "Fix AWS ECS"" --- contrib/aws-crt-cpp | 2 +- .../ProxyConfigurationResolverProvider.cpp | 35 +++++++------------ .../ProxyConfigurationResolverProvider.h | 5 ++- src/IO/ReadWriteBufferFromHTTP.cpp | 2 +- src/IO/S3/Client.cpp | 6 ++-- src/IO/S3/PocoHTTPClient.cpp | 20 +++++++++-- src/IO/S3/PocoHTTPClient.h | 14 ++++---- src/IO/S3/PocoHTTPClientFactory.cpp | 5 ++- .../0_stateless/03170_ecs_crash.reference | 4 +++ tests/queries/0_stateless/03170_ecs_crash.sh | 9 +++++ 10 files changed, 61 insertions(+), 41 deletions(-) create mode 100644 tests/queries/0_stateless/03170_ecs_crash.reference create mode 100755 tests/queries/0_stateless/03170_ecs_crash.sh diff --git a/contrib/aws-crt-cpp b/contrib/aws-crt-cpp index f532d6abc0d..0217761556a 160000 --- a/contrib/aws-crt-cpp +++ b/contrib/aws-crt-cpp @@ -1 +1 @@ -Subproject commit f532d6abc0d2b0d8b5d6fe9e7c51eaedbe4afbd0 +Subproject commit 0217761556a7ba7ec537fe933d0ab1159096746e diff --git a/src/Common/ProxyConfigurationResolverProvider.cpp b/src/Common/ProxyConfigurationResolverProvider.cpp index b06073121e7..a46837bfdb9 100644 --- a/src/Common/ProxyConfigurationResolverProvider.cpp +++ b/src/Common/ProxyConfigurationResolverProvider.cpp @@ -112,9 +112,8 @@ namespace return configuration.has(config_prefix + ".uri"); } - /* - * New syntax requires protocol prefix " or " - * */ + /* New syntax requires protocol prefix " or " + */ std::optional getProtocolPrefix( ProxyConfiguration::Protocol request_protocol, const String & config_prefix, @@ -130,22 +129,18 @@ namespace return protocol_prefix; } - template std::optional calculatePrefixBasedOnSettingsSyntax( + bool new_syntax, ProxyConfiguration::Protocol request_protocol, const String & config_prefix, const Poco::Util::AbstractConfiguration & configuration ) { if (!configuration.has(config_prefix)) - { return std::nullopt; - } - if constexpr (new_syntax) - { + if (new_syntax) return getProtocolPrefix(request_protocol, config_prefix, configuration); - } return config_prefix; } @@ -155,24 +150,21 @@ std::shared_ptr ProxyConfigurationResolverProvider:: Protocol request_protocol, const Poco::Util::AbstractConfiguration & configuration) { - if (auto resolver = getFromSettings(request_protocol, "proxy", configuration)) - { + if (auto resolver = getFromSettings(true, request_protocol, "proxy", configuration)) return resolver; - } return std::make_shared( request_protocol, isTunnelingDisabledForHTTPSRequestsOverHTTPProxy(configuration)); } -template std::shared_ptr ProxyConfigurationResolverProvider::getFromSettings( + bool new_syntax, Protocol request_protocol, const String & config_prefix, - const Poco::Util::AbstractConfiguration & configuration -) + const Poco::Util::AbstractConfiguration & configuration) { - auto prefix_opt = calculatePrefixBasedOnSettingsSyntax(request_protocol, config_prefix, configuration); + auto prefix_opt = calculatePrefixBasedOnSettingsSyntax(new_syntax, request_protocol, config_prefix, configuration); if (!prefix_opt) { @@ -195,20 +187,17 @@ std::shared_ptr ProxyConfigurationResolverProvider:: std::shared_ptr ProxyConfigurationResolverProvider::getFromOldSettingsFormat( Protocol request_protocol, const String & config_prefix, - const Poco::Util::AbstractConfiguration & configuration -) + const Poco::Util::AbstractConfiguration & configuration) { - /* - * First try to get it from settings only using the combination of config_prefix and configuration. + /* First try to get it from settings only using the combination of config_prefix and configuration. * This logic exists for backward compatibility with old S3 storage specific proxy configuration. * */ - if (auto resolver = ProxyConfigurationResolverProvider::getFromSettings(request_protocol, config_prefix + ".proxy", configuration)) + if (auto resolver = ProxyConfigurationResolverProvider::getFromSettings(false, request_protocol, config_prefix + ".proxy", configuration)) { return resolver; } - /* - * In case the combination of config_prefix and configuration does not provide a resolver, try to get it from general / new settings. + /* In case the combination of config_prefix and configuration does not provide a resolver, try to get it from general / new settings. * Falls back to Environment resolver if no configuration is found. * */ return ProxyConfigurationResolverProvider::get(request_protocol, configuration); diff --git a/src/Common/ProxyConfigurationResolverProvider.h b/src/Common/ProxyConfigurationResolverProvider.h index ebf22f7e92a..357b218e499 100644 --- a/src/Common/ProxyConfigurationResolverProvider.h +++ b/src/Common/ProxyConfigurationResolverProvider.h @@ -33,12 +33,11 @@ public: ); private: - template static std::shared_ptr getFromSettings( + bool is_new_syntax, Protocol protocol, const String & config_prefix, - const Poco::Util::AbstractConfiguration & configuration - ); + const Poco::Util::AbstractConfiguration & configuration); }; } diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index 303ffb744b5..4f883a9b4ed 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -221,7 +221,7 @@ ReadWriteBufferFromHTTP::ReadWriteBufferFromHTTP( if (iter == http_header_entries.end()) { - http_header_entries.emplace_back(user_agent, fmt::format("ClickHouse/{}", VERSION_STRING)); + http_header_entries.emplace_back(user_agent, fmt::format("ClickHouse/{}{}", VERSION_STRING, VERSION_OFFICIAL)); } if (!delay_initialization && use_external_buffer) diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 9229342b8c1..cbb61deea9f 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -972,10 +972,10 @@ PocoHTTPClientConfiguration ClientFactory::createClientConfiguration( // NOLINT { auto context = Context::getGlobalContextInstance(); chassert(context); - auto proxy_configuration_resolver = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::protocolFromString(protocol), context->getConfigRef()); + auto proxy_configuration_resolver = ProxyConfigurationResolverProvider::get(ProxyConfiguration::protocolFromString(protocol), context->getConfigRef()); - auto per_request_configuration = [=] () { return proxy_configuration_resolver->resolve(); }; - auto error_report = [=] (const DB::ProxyConfiguration & req) { proxy_configuration_resolver->errorReport(req); }; + auto per_request_configuration = [=]{ return proxy_configuration_resolver->resolve(); }; + auto error_report = [=](const ProxyConfiguration & req) { proxy_configuration_resolver->errorReport(req); }; auto config = PocoHTTPClientConfiguration( per_request_configuration, diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index 1cef43530e0..04982f14f36 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -1,4 +1,5 @@ #include +#include #include "config.h" #if USE_AWS_S3 @@ -17,6 +18,7 @@ #include #include #include +#include #include #include @@ -29,6 +31,7 @@ #include + static const int SUCCESS_RESPONSE_MIN = 200; static const int SUCCESS_RESPONSE_MAX = 299; @@ -84,7 +87,7 @@ namespace DB::S3 { PocoHTTPClientConfiguration::PocoHTTPClientConfiguration( - std::function per_request_configuration_, + std::function per_request_configuration_, const String & force_region_, const RemoteHostFilter & remote_host_filter_, unsigned int s3_max_redirects_, @@ -94,7 +97,7 @@ PocoHTTPClientConfiguration::PocoHTTPClientConfiguration( bool s3_use_adaptive_timeouts_, const ThrottlerPtr & get_request_throttler_, const ThrottlerPtr & put_request_throttler_, - std::function error_report_) + std::function error_report_) : per_request_configuration(per_request_configuration_) , force_region(force_region_) , remote_host_filter(remote_host_filter_) @@ -107,6 +110,8 @@ PocoHTTPClientConfiguration::PocoHTTPClientConfiguration( , s3_use_adaptive_timeouts(s3_use_adaptive_timeouts_) , error_report(error_report_) { + /// This is used to identify configurations created by us. + userAgent = std::string(VERSION_FULL) + VERSION_OFFICIAL; } void PocoHTTPClientConfiguration::updateSchemeAndRegion() @@ -166,6 +171,17 @@ PocoHTTPClient::PocoHTTPClient(const PocoHTTPClientConfiguration & client_config { } +PocoHTTPClient::PocoHTTPClient(const Aws::Client::ClientConfiguration & client_configuration) + : timeouts(ConnectionTimeouts() + .withConnectionTimeout(Poco::Timespan(client_configuration.connectTimeoutMs * 1000)) + .withSendTimeout(Poco::Timespan(client_configuration.requestTimeoutMs * 1000)) + .withReceiveTimeout(Poco::Timespan(client_configuration.requestTimeoutMs * 1000)) + .withTCPKeepAliveTimeout(Poco::Timespan( + client_configuration.enableTcpKeepAlive ? client_configuration.tcpKeepAliveIntervalMs * 1000 : 0))), + remote_host_filter(Context::getGlobalContextInstance()->getRemoteHostFilter()) +{ +} + std::shared_ptr PocoHTTPClient::MakeRequest( const std::shared_ptr & request, Aws::Utils::RateLimits::RateLimiterInterface * readLimiter, diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 88251b964e2..18a21649167 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -38,7 +38,7 @@ class PocoHTTPClient; struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration { - std::function per_request_configuration; + std::function per_request_configuration; String force_region; const RemoteHostFilter & remote_host_filter; unsigned int s3_max_redirects; @@ -54,13 +54,13 @@ struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration size_t http_keep_alive_timeout = DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT; size_t http_keep_alive_max_requests = DEFAULT_HTTP_KEEP_ALIVE_MAX_REQUEST; - std::function error_report; + std::function error_report; void updateSchemeAndRegion(); private: PocoHTTPClientConfiguration( - std::function per_request_configuration_, + std::function per_request_configuration_, const String & force_region_, const RemoteHostFilter & remote_host_filter_, unsigned int s3_max_redirects_, @@ -70,8 +70,7 @@ private: bool s3_use_adaptive_timeouts_, const ThrottlerPtr & get_request_throttler_, const ThrottlerPtr & put_request_throttler_, - std::function error_report_ - ); + std::function error_report_); /// Constructor of Aws::Client::ClientConfiguration must be called after AWS SDK initialization. friend ClientFactory; @@ -120,6 +119,7 @@ class PocoHTTPClient : public Aws::Http::HttpClient { public: explicit PocoHTTPClient(const PocoHTTPClientConfiguration & client_configuration); + explicit PocoHTTPClient(const Aws::Client::ClientConfiguration & client_configuration); ~PocoHTTPClient() override = default; std::shared_ptr MakeRequest( @@ -166,8 +166,8 @@ protected: static S3MetricKind getMetricKind(const Aws::Http::HttpRequest & request); void addMetric(const Aws::Http::HttpRequest & request, S3MetricType type, ProfileEvents::Count amount = 1) const; - std::function per_request_configuration; - std::function error_report; + std::function per_request_configuration; + std::function error_report; ConnectionTimeouts timeouts; const RemoteHostFilter & remote_host_filter; unsigned int s3_max_redirects; diff --git a/src/IO/S3/PocoHTTPClientFactory.cpp b/src/IO/S3/PocoHTTPClientFactory.cpp index ef7af2d01ba..abec907778c 100644 --- a/src/IO/S3/PocoHTTPClientFactory.cpp +++ b/src/IO/S3/PocoHTTPClientFactory.cpp @@ -15,7 +15,10 @@ namespace DB::S3 std::shared_ptr PocoHTTPClientFactory::CreateHttpClient(const Aws::Client::ClientConfiguration & client_configuration) const { - return std::make_shared(static_cast(client_configuration)); + if (client_configuration.userAgent.starts_with("ClickHouse")) + return std::make_shared(static_cast(client_configuration)); + else /// This client is created inside the AWS SDK with default settings to obtain ECS credentials from localhost. + return std::make_shared(client_configuration); } std::shared_ptr PocoHTTPClientFactory::CreateHttpRequest( diff --git a/tests/queries/0_stateless/03170_ecs_crash.reference b/tests/queries/0_stateless/03170_ecs_crash.reference new file mode 100644 index 00000000000..acd7c60768b --- /dev/null +++ b/tests/queries/0_stateless/03170_ecs_crash.reference @@ -0,0 +1,4 @@ +1 2 3 +4 5 6 +7 8 9 +0 0 0 diff --git a/tests/queries/0_stateless/03170_ecs_crash.sh b/tests/queries/0_stateless/03170_ecs_crash.sh new file mode 100755 index 00000000000..fa6870c4cf2 --- /dev/null +++ b/tests/queries/0_stateless/03170_ecs_crash.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Previous versions crashed in attempt to use this authentication method (regardless of whether it was able to authenticate): +AWS_CONTAINER_CREDENTIALS_FULL_URI=http://localhost:1338/latest/meta-data/container/security-credentials $CLICKHOUSE_LOCAL -q "select * from s3('http://localhost:11111/test/a.tsv')" From c448b0e2ae366c25b5ef5943d550acd181b04fd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 18 Jun 2024 08:21:10 +0000 Subject: [PATCH 0144/2361] Do not require setting experimental flag on attach --- src/Storages/Kafka/StorageKafkaCommon.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index adfe1086858..2e2d53dd8d1 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -467,7 +467,7 @@ void registerStorageKafka(StorageFactory & factory) return std::make_shared( args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); - if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper) + if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper && !args.query.attach) throw Exception( ErrorCodes::SUPPORT_IS_DISABLED, "Storing the Kafka offsets in Keeper is experimental. Set `allow_experimental_kafka_store_offsets_in_keeper` setting " From 20bac3ed5fff99b7c31a185506022c0cb39ade85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 18 Jun 2024 10:25:46 +0000 Subject: [PATCH 0145/2361] Remove unnecessary rdkafka ops --- src/Storages/Kafka/KafkaConsumer2.cpp | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 1e2ea3fd43a..41ce7c43131 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -194,16 +194,13 @@ void KafkaConsumer2::drainConsumerQueue() void KafkaConsumer2::pollEvents() { - static constexpr int64_t max_tries = 5; - auto consumer_has_subscription = !consumer->get_subscription().empty(); - for (auto i = 0; i < max_tries && !consumer_has_subscription; ++i) - { - consumer->subscribe(topics); - consumer_has_subscription = !consumer->get_subscription().empty(); - } auto msg = consumer->poll(EVENT_POLL_TIMEOUT); // All the partition queues are detached, so the consumer shouldn't be able to poll any messages chassert(!msg && "Consumer returned a message when it was not expected"); + + // static constexpr int64_t max_tries = 5; + // for(auto i = 0; i < max_tries; ++i) + // consumer->poll(EVENT_POLL_TIMEOUT); }; KafkaConsumer2::TopicPartitionCounts KafkaConsumer2::getPartitionCounts() const From 766130bc98c116d198343f8fee6e0e5527fad712 Mon Sep 17 00:00:00 2001 From: skyoct Date: Tue, 18 Jun 2024 19:16:32 +0800 Subject: [PATCH 0146/2361] feat: add etag for object storage --- .../AzureBlobStorage/AzureObjectStorage.cpp | 2 ++ src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp | 2 +- src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp | 1 + src/Disks/ObjectStorages/IObjectStorage.h | 1 + src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 4 +++- src/IO/S3/getObjectInfo.cpp | 2 ++ src/IO/S3/getObjectInfo.h | 1 + .../ObjectStorage/StorageObjectStorageSource.cpp | 3 ++- src/Storages/VirtualColumnUtils.cpp | 10 +++++++++- src/Storages/VirtualColumnUtils.h | 2 +- 10 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index e7ecf7cd515..e4b85b79ab4 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -86,6 +86,7 @@ private: Poco::Timestamp::fromEpochTime( std::chrono::duration_cast( static_cast(blob.Details.LastModified).time_since_epoch()).count()), + blob.Details.ETag.ToString(), {}})); } @@ -186,6 +187,7 @@ void AzureObjectStorage::listObjects(const std::string & path, RelativePathsWith Poco::Timestamp::fromEpochTime( std::chrono::duration_cast( static_cast(blob.Details.LastModified).time_since_epoch()).count()), + blob.Details.ETag.ToString(), {}})); } diff --git a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp index 44854633d65..e9114c75077 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp @@ -205,7 +205,7 @@ void DiskObjectStorageMetadata::addObject(ObjectStorageKey key, size_t size) } total_size += size; - keys_with_meta.emplace_back(std::move(key), ObjectMetadata{size, {}, {}}); + keys_with_meta.emplace_back(std::move(key), ObjectMetadata{size, {}, {}, {}}); } ObjectKeyWithMetadata DiskObjectStorageMetadata::popLastObject() diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp index dcb2af9d4d3..a28f1888020 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp @@ -221,6 +221,7 @@ void HDFSObjectStorage::listObjects(const std::string & path, RelativePathsWithM ObjectMetadata{ static_cast(ls.file_info[i].mSize), Poco::Timestamp::fromEpochTime(ls.file_info[i].mLastMod), + "", {}})); } diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index 7bc9e4073db..c1402522c5f 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -54,6 +54,7 @@ struct ObjectMetadata { uint64_t size_bytes = 0; Poco::Timestamp last_modified; + std::string etag; ObjectAttributes attributes; }; diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 63e11dcd8c8..7b2f71a828e 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -145,7 +145,7 @@ private: auto objects = outcome.GetResult().GetContents(); for (const auto & object : objects) { - ObjectMetadata metadata{static_cast(object.GetSize()), Poco::Timestamp::fromEpochTime(object.GetLastModified().Seconds()), {}}; + ObjectMetadata metadata{static_cast(object.GetSize()), Poco::Timestamp::fromEpochTime(object.GetLastModified().Seconds()), object.GetETag(), {}}; batch.emplace_back(std::make_shared(object.GetKey(), std::move(metadata))); } @@ -329,6 +329,7 @@ void S3ObjectStorage::listObjects(const std::string & path, RelativePathsWithMet ObjectMetadata{ static_cast(object.GetSize()), Poco::Timestamp::fromEpochTime(object.GetLastModified().Seconds()), + object.GetETag(), {}})); if (max_keys) @@ -476,6 +477,7 @@ ObjectMetadata S3ObjectStorage::getObjectMetadata(const std::string & path) cons ObjectMetadata result; result.size_bytes = object_info.size; result.last_modified = Poco::Timestamp::fromEpochTime(object_info.last_modification_time); + result.etag = object_info.etag; result.attributes = object_info.metadata; return result; diff --git a/src/IO/S3/getObjectInfo.cpp b/src/IO/S3/getObjectInfo.cpp index 9271ad820e4..afa4079c261 100644 --- a/src/IO/S3/getObjectInfo.cpp +++ b/src/IO/S3/getObjectInfo.cpp @@ -54,6 +54,8 @@ namespace ObjectInfo object_info; object_info.size = static_cast(result.GetContentLength()); object_info.last_modification_time = result.GetLastModified().Seconds(); + String etag(result.GetETag.c_str(), result.GetETag().size()); + object_info.etag = etag; if (with_metadata) object_info.metadata = result.GetMetadata(); diff --git a/src/IO/S3/getObjectInfo.h b/src/IO/S3/getObjectInfo.h index 32f34f74069..2fec407f70e 100644 --- a/src/IO/S3/getObjectInfo.h +++ b/src/IO/S3/getObjectInfo.h @@ -15,6 +15,7 @@ struct ObjectInfo { size_t size = 0; time_t last_modification_time = 0; + String etag = ""; std::map metadata = {}; /// Set only if getObjectInfo() is called with `with_metadata = true`. }; diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 2fc6993369d..8554fd9235d 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -201,7 +201,8 @@ Chunk StorageObjectStorageSource::generate() .path = getUniqueStoragePathIdentifier(*configuration, reader.getObjectInfo(), false), .size = object_info.metadata->size_bytes, .filename = &filename, - .last_modified = object_info.metadata->last_modified + .last_modified = object_info.metadata->last_modified, + .etag = &(object_info.metadata->etag) }); return chunk; } diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 778c9e13adb..960fff371a7 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -112,7 +112,7 @@ void filterBlockWithDAG(ActionsDAGPtr dag, Block & block, ContextPtr context) NameSet getVirtualNamesForFileLikeStorage() { - return {"_path", "_file", "_size", "_time"}; + return {"_path", "_file", "_size", "_time", "_etag", "_last_modified"}; } VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns) @@ -131,6 +131,7 @@ VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription add_virtual("_file", std::make_shared(std::make_shared())); add_virtual("_size", makeNullable(std::make_shared())); add_virtual("_time", makeNullable(std::make_shared())); + add_virtual("_etag", std::make_shared(std::make_shared())); return desc; } @@ -226,6 +227,13 @@ void addRequestedFileLikeStorageVirtualsToChunk( else chunk.addColumn(virtual_column.type->createColumnConstWithDefaultValue(chunk.getNumRows())->convertToFullColumnIfConst()); } + else if (virtual_column.name == "_etag") + { + if (virtual_values.etag) + chunk.addColumn(virtual_column.type->createColumnConst(chunk.getNumRows(), (*virtual_values.etag))->convertToFullColumnIfConst()); + else + chunk.addColumn(virtual_column.type->createColumnConstWithDefaultValue(chunk.getNumRows())->convertToFullColumnIfConst()); + } } } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index fbfbdd6c6cc..dc178277556 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -74,7 +74,7 @@ struct VirtualsForFileLikeStorage std::optional size { std::nullopt }; const String * filename { nullptr }; std::optional last_modified { std::nullopt }; - + const String * etag { nullptr }; }; void addRequestedFileLikeStorageVirtualsToChunk( From 7fa6111865093e0f7e396faae4013275df371fcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 18 Jun 2024 11:41:46 +0000 Subject: [PATCH 0147/2361] Make StorageKafka2 handle keeper session better --- src/Storages/Kafka/StorageKafka2.cpp | 384 +++++++++++++++++++-------- src/Storages/Kafka/StorageKafka2.h | 35 ++- 2 files changed, 303 insertions(+), 116 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index de27adbbe3f..86330cbf122 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -92,6 +93,8 @@ extern const int LOGICAL_ERROR; extern const int REPLICA_ALREADY_EXISTS; extern const int TABLE_IS_DROPPED; extern const int TABLE_WAS_NOT_DROPPED; +extern const int NO_ZOOKEEPER; +extern const int REPLICA_IS_ALREADY_ACTIVE; } namespace @@ -109,6 +112,8 @@ StorageKafka2::StorageKafka2( : IStorage(table_id_) , WithContext(context_->getGlobalContext()) , keeper(getContext()->getZooKeeper()) + , keeper_path(kafka_settings_->kafka_keeper_path.value) + , replica_path(keeper_path + "/replicas/" + kafka_settings_->kafka_replica_name.value) , kafka_settings(std::move(kafka_settings_)) , macros_info{.table_id = table_id_} , topics(parseTopics(getContext()->getMacros()->expand(kafka_settings->kafka_topic_list.value, macros_info))) @@ -122,11 +127,12 @@ StorageKafka2::StorageKafka2( , max_rows_per_message(kafka_settings->kafka_max_rows_per_message.value) , schema_name(getContext()->getMacros()->expand(kafka_settings->kafka_schema.value, macros_info)) , num_consumers(kafka_settings->kafka_num_consumers.value) - , log(getLogger("StorageKafka2 (" + table_id_.table_name + ")")) + , log(getLogger(String("StorageKafka2 ") + table_id_.getNameForLogs())) , semaphore(0, static_cast(num_consumers)) , settings_adjustments(createSettingsAdjustments()) , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) , collection_name(collection_name_) + , active_node_identifier(toString(ServerUUID::get())) { if (kafka_settings->kafka_num_consumers > 1 && !thread_per_consumer) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "With multiple consumer you have to use thread per consumer!"); @@ -149,23 +155,13 @@ StorageKafka2::StorageKafka2( tasks.emplace_back(std::make_shared(std::move(task))); } - for (size_t i = 0; i < num_consumers; ++i) - { - try - { - consumers.push_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i), .keeper = keeper}); - ++num_created_consumers; - } - catch (const cppkafka::Exception &) - { - tryLogCurrentException(log); - } - } - const auto first_replica = createTableIfNotExists(); if (!first_replica) createReplica(); + + activating_task = getContext()->getSchedulePool().createTask(log->name() + "(activating task)", [this]() { activate(); }); + activating_task->deactivate(); } VirtualColumnsDescription StorageKafka2::createVirtuals(StreamingHandleErrorMode handle_error_mode) @@ -189,6 +185,150 @@ VirtualColumnsDescription StorageKafka2::createVirtuals(StreamingHandleErrorMode return desc; } +void StorageKafka2::partialShutdown() +{ + for (auto & task : tasks) + { + LOG_TRACE(log, "Cancelling streams"); + task->stream_cancelled = true; + } + + for (auto & task : tasks) + { + LOG_TRACE(log, "Waiting for cleanup"); + task->holder->deactivate(); + } + is_active = false; +} + +bool StorageKafka2::activate() +{ + LOG_TEST(log, "activate task"); + if (is_active && !getZooKeeper()->expired()) + { + LOG_TEST(log, "No need to activate"); + return true; + } + + if (first_time) + { + LOG_DEBUG(log, "Activating replica"); + assert(!is_active); + } + else if (!is_active) + { + LOG_WARNING(log, "Table was not active. Will try to activate it"); + } + else if (getZooKeeper()->expired()) + { + LOG_WARNING(log, "ZooKeeper session has expired. Switching to a new session"); + partialShutdown(); + } + else + { + UNREACHABLE(); + } + + try + { + setZooKeeper(); + } + catch (const Coordination::Exception &) + { + /// The exception when you try to zookeeper_init usually happens if DNS does not work or the connection with ZK fails + tryLogCurrentException(log, "Failed to establish a new ZK connection. Will try again"); + assert(!is_active); + return false; + } + + if (shutdown_called) + return false; + + auto activate_in_keeper = [this]() + { + try + { + auto zookeeper = getZooKeeper(); + + String is_active_path = fs::path(replica_path) / "is_active"; + zookeeper->deleteEphemeralNodeIfContentMatches(is_active_path, active_node_identifier); + + /// Simultaneously declare that this replica is active, and update the host. + Coordination::Requests ops; + ops.emplace_back(zkutil::makeCreateRequest(is_active_path, active_node_identifier, zkutil::CreateMode::Ephemeral)); + + try + { + zookeeper->create(is_active_path, active_node_identifier, zkutil::CreateMode::Ephemeral); + } + catch (const Coordination::Exception & e) + { + if (e.code == Coordination::Error::ZNODEEXISTS) + throw Exception( + ErrorCodes::REPLICA_IS_ALREADY_ACTIVE, + "Replica {} appears to be already active. If you're sure it's not, " + "try again in a minute or remove znode {}/is_active manually", + replica_path, + replica_path); + + throw; + } + replica_is_active_node = zkutil::EphemeralNodeHolder::existing(is_active_path, *zookeeper); + + return true; + } + catch (...) + { + replica_is_active_node = nullptr; + + try + { + throw; + } + catch (const Coordination::Exception & e) + { + LOG_ERROR(log, "Couldn't start replica: {}. {}", e.what(), DB::getCurrentExceptionMessage(true)); + return false; + } + catch (const Exception & e) + { + if (e.code() != ErrorCodes::REPLICA_IS_ALREADY_ACTIVE) + throw; + + LOG_ERROR(log, "Couldn't start replica: {}. {}", e.what(), DB::getCurrentExceptionMessage(true)); + return false; + } + } + }; + + if (!activate_in_keeper()) + { + assert(storage.is_readonly); + return false; + } + + is_active = true; + + // Start the reader threads + for (auto & task : tasks) + { + task->stream_cancelled = false; + task->holder->activateAndSchedule(); + } + + if (first_time) + first_time = false; + + LOG_DEBUG(log, "Table activated successfully"); + return true; +} + +void StorageKafka2::assertActive() const +{ + // TODO(antaljanosbenjamin): change LOGICAL_ERROR to something sensible + if (!is_active) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table is not active (replica path: {})", replica_path); +} SettingsChanges StorageKafka2::createSettingsAdjustments() { @@ -282,7 +422,8 @@ void StorageKafka2::startup() { try { - consumers.emplace_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i), .keeper = keeper}); + consumers.push_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i), .keeper = getZooKeeper()}); + LOG_DEBUG(log, "Created #{} consumer", num_created_consumers); ++num_created_consumers; } catch (const cppkafka::Exception &) @@ -290,28 +431,15 @@ void StorageKafka2::startup() tryLogCurrentException(log); } } - // Start the reader thread - for (auto & task : tasks) - task->holder->activateAndSchedule(); + activating_task->activateAndSchedule(); } void StorageKafka2::shutdown(bool) { shutdown_called = true; - for (auto & task : tasks) - { - LOG_TRACE(log, "Cancelling streams"); - // Interrupt streaming thread - task->stream_cancelled = true; - } - - for (auto & task : tasks) - { - LOG_TRACE(log, "Waiting for cleanup"); - task->holder->deactivate(); - } - + activating_task->deactivate(); + partialShutdown(); LOG_TRACE(log, "Closing consumers"); consumers.clear(); LOG_TRACE(log, "Consumers closed"); @@ -372,7 +500,7 @@ cppkafka::Configuration StorageKafka2::getConsumerConfiguration(size_t consumer_ conf.set("enable.partition.eof", "false"); // Ignore EOF messages for (auto & property : conf.get_all()) - LOG_TRACE(log, "Consumer set property {}:{}", property.first, property.second); + LOG_TEST(log, "Consumer set property {}:{}", property.first, property.second); return conf; } @@ -389,7 +517,7 @@ cppkafka::Configuration StorageKafka2::getProducerConfiguration() updateProducerConfiguration(conf); for (auto & property : conf.get_all()) - LOG_TRACE(log, "Producer set property {}:{}", property.first, property.second); + LOG_TEST(log, "Producer set property {}:{}", property.first, property.second); return conf; } @@ -558,32 +686,32 @@ std::optional getNumber(zkutil::ZooKeeper & keeper, const fs::path & pa bool StorageKafka2::createTableIfNotExists() { - const auto & keeper_path = fs::path(kafka_settings->kafka_keeper_path.value); - - const auto & replicas_path = keeper_path / "replicas"; + // Heavily based on StorageReplicatedMergeTree::createTableIfNotExists + const auto my_keeper_path = fs::path(keeper_path); + const auto replicas_path = my_keeper_path / "replicas"; for (auto i = 0; i < 1000; ++i) { if (keeper->exists(replicas_path)) { - LOG_DEBUG(log, "This table {} is already created, will add new replica", String(keeper_path)); + LOG_DEBUG(log, "This table {} is already created, will add new replica", keeper_path); return false; } /// There are leftovers from incompletely dropped table. - if (keeper->exists(keeper_path / "dropped")) + if (keeper->exists(my_keeper_path / "dropped")) { /// This condition may happen when the previous drop attempt was not completed /// or when table is dropped by another replica right now. /// This is Ok because another replica is definitely going to drop the table. - LOG_WARNING(log, "Removing leftovers from table {} (this might take several minutes)", String(keeper_path)); - String drop_lock_path = keeper_path / "dropped" / "lock"; + LOG_WARNING(log, "Removing leftovers from table {}", keeper_path); + String drop_lock_path = my_keeper_path / "dropped" / "lock"; Coordination::Error code = keeper->tryCreate(drop_lock_path, "", zkutil::CreateMode::Ephemeral); if (code == Coordination::Error::ZNONODE || code == Coordination::Error::ZNODEEXISTS) { - LOG_WARNING(log, "The leftovers from table {} were removed by another replica", String(keeper_path)); + LOG_WARNING(log, "The leftovers from table {} were removed by another replica", keeper_path); } else if (code != Coordination::Error::ZOK) { @@ -592,7 +720,7 @@ bool StorageKafka2::createTableIfNotExists() else { auto metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(drop_lock_path, *keeper); - if (!removeTableNodesFromZooKeeper(metadata_drop_lock)) + if (!removeTableNodesFromZooKeeper(keeper, metadata_drop_lock)) { /// Someone is recursively removing table right now, we cannot create new table until old one is removed continue; @@ -605,7 +733,7 @@ bool StorageKafka2::createTableIfNotExists() ops.emplace_back(zkutil::makeCreateRequest(keeper_path, "", zkutil::CreateMode::Persistent)); - const auto topics_path = keeper_path / "topics"; + const auto topics_path = my_keeper_path / "topics"; ops.emplace_back(zkutil::makeCreateRequest(topics_path, "", zkutil::CreateMode::Persistent)); for (const auto & topic : topics) @@ -621,15 +749,14 @@ bool StorageKafka2::createTableIfNotExists() // Create the first replica ops.emplace_back(zkutil::makeCreateRequest(replicas_path, "", zkutil::CreateMode::Persistent)); - ops.emplace_back( - zkutil::makeCreateRequest(replicas_path / kafka_settings->kafka_replica_name.value, "", zkutil::CreateMode::Persistent)); + ops.emplace_back(zkutil::makeCreateRequest(replica_path, "", zkutil::CreateMode::Persistent)); Coordination::Responses responses; const auto code = keeper->tryMulti(ops, responses); if (code == Coordination::Error::ZNODEEXISTS) { - LOG_INFO(log, "It looks like the table {} was created by another replica at the same moment, will retry", String(keeper_path)); + LOG_INFO(log, "It looks like the table {} was created by another replica at the same moment, will retry", keeper_path); continue; } else if (code != Coordination::Error::ZOK) @@ -637,7 +764,7 @@ bool StorageKafka2::createTableIfNotExists() zkutil::KeeperMultiException::check(code, ops, responses); } - LOG_INFO(log, "Table {} created successfully ", String(keeper_path)); + LOG_INFO(log, "Table {} created successfully ", keeper_path); return true; } @@ -649,25 +776,25 @@ bool StorageKafka2::createTableIfNotExists() } -bool StorageKafka2::removeTableNodesFromZooKeeper(const zkutil::EphemeralNodeHolder::Ptr & drop_lock) +bool StorageKafka2::removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr keeper_to_use, const zkutil::EphemeralNodeHolder::Ptr & drop_lock) { bool completely_removed = false; Strings children; - if (const auto code = keeper->tryGetChildren(kafka_settings->kafka_keeper_path.value, children); code == Coordination::Error::ZNONODE) + if (const auto code = keeper_to_use->tryGetChildren(keeper_path, children); code == Coordination::Error::ZNONODE) throw Exception(ErrorCodes::LOGICAL_ERROR, "There is a race condition between creation and removal. It's a bug"); - const auto keeper_path = fs::path(kafka_settings->kafka_keeper_path.value); + const auto my_keeper_path = fs::path(keeper_path); for (const auto & child : children) if (child != "dropped") - keeper->tryRemoveRecursive(keeper_path / child); + keeper_to_use->tryRemoveRecursive(my_keeper_path / child); Coordination::Requests ops; Coordination::Responses responses; ops.emplace_back(zkutil::makeRemoveRequest(drop_lock->getPath(), -1)); - ops.emplace_back(zkutil::makeRemoveRequest(keeper_path / "dropped", -1)); - ops.emplace_back(zkutil::makeRemoveRequest(keeper_path, -1)); - const auto code = keeper->tryMulti(ops, responses, /* check_session_valid */ true); + ops.emplace_back(zkutil::makeRemoveRequest(my_keeper_path / "dropped", -1)); + ops.emplace_back(zkutil::makeRemoveRequest(my_keeper_path, -1)); + const auto code = keeper_to_use->tryMulti(ops, responses, /* check_session_valid */ true); if (code == Coordination::Error::ZNONODE) { @@ -680,7 +807,7 @@ bool StorageKafka2::removeTableNodesFromZooKeeper(const zkutil::EphemeralNodeHol log, "Table was not completely removed from Keeper, {} still exists and may contain some garbage," "but someone is removing it right now.", - kafka_settings->kafka_keeper_path.value); + keeper_path); } else if (code != Coordination::Error::ZOK) { @@ -691,7 +818,7 @@ bool StorageKafka2::removeTableNodesFromZooKeeper(const zkutil::EphemeralNodeHol { drop_lock->setAlreadyRemoved(); completely_removed = true; - LOG_INFO(log, "Table {} was successfully removed from ZooKeeper", kafka_settings->kafka_keeper_path.value); + LOG_INFO(log, "Table {} was successfully removed from ZooKeeper", keeper_path); } return completely_removed; @@ -699,45 +826,50 @@ bool StorageKafka2::removeTableNodesFromZooKeeper(const zkutil::EphemeralNodeHol void StorageKafka2::createReplica() { - const auto replica_path = kafka_settings->kafka_keeper_path.value + "/replicas/" + kafka_settings->kafka_replica_name.value; + LOG_INFO(log, "Creating replica {}", replica_path); + // TODO: This can cause issues if a new table is created with the same path. To make this work, we should store some metadata + // about the table to be able to identify that the same table is created, not a new one. const auto code = keeper->tryCreate(replica_path, "", zkutil::CreateMode::Persistent); - if (code == Coordination::Error::ZNODEEXISTS) - throw Exception(ErrorCodes::REPLICA_ALREADY_EXISTS, "Replica {} already exists", replica_path); - else if (code == Coordination::Error::ZNONODE) - throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {} was suddenly removed", kafka_settings->kafka_keeper_path.value); - else if (code != Coordination::Error::ZOK) - throw Coordination::Exception::fromPath(code, replica_path); - LOG_INFO(log, "Replica {} created", replica_path); + switch (code) + { + case Coordination::Error::ZNODEEXISTS: + LOG_INFO(log, "Replica {} already exists, will try to use it", replica_path); + break; + case Coordination::Error::ZOK: + LOG_INFO(log, "Replica {} created", replica_path); + break; + case Coordination::Error::ZNONODE: + throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {} was suddenly removed", keeper_path); + default: + throw Coordination::Exception::fromPath(code, replica_path); + } } void StorageKafka2::dropReplica() { - if (keeper->expired()) - throw Exception(ErrorCodes::TABLE_WAS_NOT_DROPPED, "Table was not dropped because ZooKeeper session has expired."); - - auto replica_path = kafka_settings->kafka_keeper_path.value + "/replicas/" + kafka_settings->kafka_replica_name.value; + LOG_INFO(log, "Trying to drop replica {}", replica_path); + auto my_keeper = getZooKeeperIfTableShutDown(); LOG_INFO(log, "Removing replica {}", replica_path); - if (!keeper->exists(replica_path)) + if (!my_keeper->exists(replica_path)) { LOG_INFO(log, "Removing replica {} does not exist", replica_path); return; } { - keeper->tryRemoveChildrenRecursive(replica_path); + my_keeper->tryRemoveChildrenRecursive(replica_path); - if (keeper->tryRemove(replica_path) != Coordination::Error::ZOK) + if (my_keeper->tryRemove(replica_path) != Coordination::Error::ZOK) LOG_ERROR(log, "Replica was not completely removed from Keeper, {} still exists and may contain some garbage.", replica_path); } /// Check that `zookeeper_path` exists: it could have been deleted by another replica after execution of previous line. Strings replicas; - if (Coordination::Error::ZOK != keeper->tryGetChildren(kafka_settings->kafka_keeper_path.value + "/replicas", replicas) - || !replicas.empty()) + if (Coordination::Error::ZOK != my_keeper->tryGetChildren(keeper_path + "/replicas", replicas) || !replicas.empty()) return; LOG_INFO(log, "{} is the last replica, will remove table", replica_path); @@ -758,11 +890,12 @@ void StorageKafka2::dropReplica() /// (The existence of child node does not allow to remove parent node). Coordination::Requests ops; Coordination::Responses responses; - String drop_lock_path = kafka_settings->kafka_keeper_path.value + "/dropped/lock"; - ops.emplace_back(zkutil::makeRemoveRequest(kafka_settings->kafka_keeper_path.value + "/replicas", -1)); - ops.emplace_back(zkutil::makeCreateRequest(kafka_settings->kafka_keeper_path.value + "/dropped", "", zkutil::CreateMode::Persistent)); + fs::path my_keeper_path = keeper_path; + String drop_lock_path = my_keeper_path / "dropped" / "lock"; + ops.emplace_back(zkutil::makeRemoveRequest(my_keeper_path / "replicas", -1)); + ops.emplace_back(zkutil::makeCreateRequest(my_keeper_path / "dropped", "", zkutil::CreateMode::Persistent)); ops.emplace_back(zkutil::makeCreateRequest(drop_lock_path, "", zkutil::CreateMode::Ephemeral)); - Coordination::Error code = keeper->tryMulti(ops, responses); + Coordination::Error code = my_keeper->tryMulti(ops, responses); if (code == Coordination::Error::ZNONODE || code == Coordination::Error::ZNODEEXISTS) { @@ -778,9 +911,9 @@ void StorageKafka2::dropReplica() } else { - auto drop_lock = zkutil::EphemeralNodeHolder::existing(drop_lock_path, *keeper); - LOG_INFO(log, "Removing table {} (this might take several minutes)", kafka_settings->kafka_keeper_path.value); - removeTableNodesFromZooKeeper(drop_lock); + auto drop_lock = zkutil::EphemeralNodeHolder::existing(drop_lock_path, *my_keeper); + LOG_INFO(log, "Removing table {} (this might take several minutes)", keeper_path); + removeTableNodesFromZooKeeper(my_keeper, drop_lock); } } @@ -1073,6 +1206,7 @@ void StorageKafka2::threadFunc(size_t idx) { assert(idx < tasks.size()); auto task = tasks[idx]; + std::optional maybe_stall_reason; try { auto table_id = getStorageID(); @@ -1082,21 +1216,19 @@ void StorageKafka2::threadFunc(size_t idx) { auto start_time = std::chrono::steady_clock::now(); - mv_attached.store(true); - // Keep streaming as long as there are attached views and streaming is not cancelled while (!task->stream_cancelled && num_created_consumers > 0) { + maybe_stall_reason.reset(); if (!checkDependencies(table_id)) break; LOG_DEBUG(log, "Started streaming to {} attached views", num_views); // Exit the loop & reschedule if some stream stalled - auto some_stream_is_stalled = streamToViews(idx); - if (some_stream_is_stalled) + if (maybe_stall_reason = streamToViews(idx); maybe_stall_reason.has_value()) { - LOG_TRACE(log, "Stream(s) stalled. Reschedule."); + LOG_TRACE(log, "Stream stalled."); break; } @@ -1115,14 +1247,18 @@ void StorageKafka2::threadFunc(size_t idx) tryLogCurrentException(__PRETTY_FUNCTION__); } - mv_attached.store(false); - - // Wait for attached views if (!task->stream_cancelled) - task->holder->scheduleAfter(KAFKA_RESCHEDULE_MS); + { + // Keeper related problems should be solved relatively fast, it makes sense wait less time + if (maybe_stall_reason.has_value() + && (*maybe_stall_reason == StallReason::KeeperSessionEnded || *maybe_stall_reason == StallReason::CouldNotAcquireLocks)) + task->holder->scheduleAfter(KAFKA_RESCHEDULE_MS / 10); + else + task->holder->scheduleAfter(KAFKA_RESCHEDULE_MS); + } } -bool StorageKafka2::streamToViews(size_t idx) +std::optional StorageKafka2::streamToViews(size_t idx) { // This function is written assuming that each consumer has their own thread. This means once this is changed, this function should be revisited. // The return values should be revisited, as stalling all consumers because of a single one stalled is not a good idea. @@ -1140,21 +1276,24 @@ bool StorageKafka2::streamToViews(size_t idx) // To keep the consumer alive const auto wait_for_assignment = consumer_info.locks.empty(); - LOG_TRACE(log, "Polling consumer for events"); + LOG_TRACE(log, "Polling consumer {} for events", idx); consumer->pollEvents(); if (wait_for_assignment) { while (nullptr == consumer->getKafkaAssignment() && consumer_info.watch.elapsedMilliseconds() < MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS) consumer->pollEvents(); + LOG_INFO(log, "Consumer has assignment: {}", nullptr == consumer->getKafkaAssignment()); } try { if (consumer->needsOffsetUpdate() || consumer_info.locks.empty()) { + LOG_TRACE(log, "Consumer needs update offset"); // First release the locks so let other consumers acquire them ASAP consumer_info.locks.clear(); + consumer_info.topic_partitions.clear(); const auto * current_assignment = consumer->getKafkaAssignment(); if (current_assignment == nullptr) @@ -1162,13 +1301,15 @@ bool StorageKafka2::streamToViews(size_t idx) // The consumer lost its assignment and haven't received a new one. // By returning true this function reports the current consumer as a "stalled" stream, which LOG_TRACE(log, "No assignment"); - return true; + return StallReason::NoAssignment; } - LOG_TRACE(log, "Consumer needs update offset"); consumer_info.consume_from_topic_partition_index = 0; - consumer_info.locks.clear(); - consumer_info.topic_partitions.clear(); + if (consumer_info.keeper->expired()) + { + consumer_info.keeper = getZooKeeperAndAssertActive(); + LOG_TEST(log, "Got new zookeeper"); + } auto maybe_locks = lockTopicPartitions(*consumer_info.keeper, *current_assignment); @@ -1176,7 +1317,7 @@ bool StorageKafka2::streamToViews(size_t idx) { // We couldn't acquire locks, probably some other consumers are still holding them. LOG_TRACE(log, "Couldn't acquire locks"); - return true; + return StallReason::CouldNotAcquireLocks; } consumer_info.locks = std::move(*maybe_locks); @@ -1200,7 +1341,7 @@ bool StorageKafka2::streamToViews(size_t idx) if (consumer_info.topic_partitions.empty()) { LOG_TRACE(log, "Consumer {} has assignment, but has no partitions, probably because there are more consumers in the consumer group than partitions.", idx); - return true; + return StallReason::NoPartitions; } LOG_TRACE(log, "Trying to consume from consumer {}", idx); const auto maybe_rows = streamFromConsumer(consumer_info); @@ -1213,22 +1354,22 @@ bool StorageKafka2::streamToViews(size_t idx) else { LOG_DEBUG(log, "Couldn't stream any messages"); - return true; + return StallReason::NoMessages; } } catch (const zkutil::KeeperException & e) { if (Coordination::isHardwareError(e.code)) { - // Clear ephemeral nodes here as we got a new keeper here + LOG_INFO(log, "Cleaning up topic-partitions locks because of exception: {}", e.displayText()); consumer_info.locks.clear(); - consumer_info.keeper = getZooKeeper(); - return true; + activating_task->schedule(); + return StallReason::KeeperSessionEnded; } throw; } - return false; + return {}; } @@ -1300,30 +1441,51 @@ std::optional StorageKafka2::streamFromConsumer(ConsumerAndAssignmentInf } lock_info.committed_offset = last_read_offset + 1; topic_partition.offset = last_read_offset + 1; - consumer_info.consumer->commit(topic_partition); saveCommittedOffset(keeper_to_use, topic_partition); + consumer_info.consumer->commit(topic_partition); lock_info.intent_size.reset(); needs_offset_reset = false; return rows; } - -zkutil::ZooKeeperPtr StorageKafka2::getZooKeeper() +void StorageKafka2::setZooKeeper() +{ + std::unique_lock lock{keeper_mutex}; + keeper = getContext()->getZooKeeper(); +} + +zkutil::ZooKeeperPtr StorageKafka2::tryGetZooKeeper() const { std::unique_lock lock{keeper_mutex}; - if (keeper->expired()) - { - keeper = keeper->startNewSession(); - } return keeper; } +zkutil::ZooKeeperPtr StorageKafka2::getZooKeeper() const +{ + auto res = tryGetZooKeeper(); + if (!res) + throw Exception(ErrorCodes::NO_ZOOKEEPER, "Cannot get ZooKeeper"); + return res; +} + +zkutil::ZooKeeperPtr StorageKafka2::getZooKeeperAndAssertActive() const +{ + auto res = getZooKeeper(); + assertActive(); + return res; +} + +zkutil::ZooKeeperPtr StorageKafka2::getZooKeeperIfTableShutDown() const +{ + zkutil::ZooKeeperPtr new_zookeeper = getContext()->getZooKeeper(); + new_zookeeper->sync(keeper_path); + return new_zookeeper; +} fs::path StorageKafka2::getTopicPartitionPath(const TopicPartition & topic_partition) { - return fs::path(kafka_settings->kafka_keeper_path.value) / "topics" / topic_partition.topic / "partitions" - / std::to_string(topic_partition.partition_id); + return fs::path(keeper_path) / "topics" / topic_partition.topic / "partitions" / std::to_string(topic_partition.partition_id); } } diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index a2cbdce51a0..99c97caf9da 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -127,8 +127,10 @@ private: }; // Configuration and state - std::mutex keeper_mutex; + mutable std::mutex keeper_mutex; zkutil::ZooKeeperPtr keeper; + String keeper_path; + String replica_path; std::unique_ptr kafka_settings; Macros::MacroExpansionInfo macros_info; const Names topics; @@ -142,7 +144,6 @@ private: LoggerPtr log; Poco::Semaphore semaphore; const SettingsChanges settings_adjustments; - std::atomic mv_attached = false; /// Can differ from num_consumers in case of exception in startup() (or if startup() hasn't been called). /// In this case we still need to be able to shutdown() properly. size_t num_created_consumers = 0; /// number of actually created consumers. @@ -156,6 +157,16 @@ private: String collection_name; std::atomic shutdown_called = false; + // Handling replica activation. + std::atomic is_active = false; + zkutil::EphemeralNodeHolderPtr replica_is_active_node; + BackgroundSchedulePool::TaskHolder activating_task; + String active_node_identifier; + bool first_time = true; + bool activate(); + void partialShutdown(); + + void assertActive() const; SettingsChanges createSettingsAdjustments(); KafkaConsumer2Ptr createConsumer(size_t consumer_number); // Returns full consumer related configuration, also the configuration @@ -186,7 +197,16 @@ private: static Names parseTopics(String topic_list); static String getDefaultClientId(const StorageID & table_id_); - bool streamToViews(size_t idx); + enum class StallReason + { + NoAssignment, + CouldNotAcquireLocks, + NoPartitions, + NoMessages, + KeeperSessionEnded, + }; + + std::optional streamToViews(size_t idx); std::optional streamFromConsumer(ConsumerAndAssignmentInfo & consumer_info); @@ -195,7 +215,7 @@ private: // Returns true if this is the first replica bool createTableIfNotExists(); // Returns true if all of the nodes were cleaned up - bool removeTableNodesFromZooKeeper(const zkutil::EphemeralNodeHolder::Ptr & drop_lock); + bool removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr keeper_to_use, const zkutil::EphemeralNodeHolder::Ptr & drop_lock); // Creates only the replica in ZooKeeper. Shouldn't be called on the first replica as it is created in createTableIfNotExists void createReplica(); void dropReplica(); @@ -212,7 +232,12 @@ private: Stopwatch & watch, const ContextPtr & context); - zkutil::ZooKeeperPtr getZooKeeper(); + void setZooKeeper(); + zkutil::ZooKeeperPtr tryGetZooKeeper() const; + zkutil::ZooKeeperPtr getZooKeeper() const; + zkutil::ZooKeeperPtr getZooKeeperAndAssertActive() const; + zkutil::ZooKeeperPtr getZooKeeperIfTableShutDown() const; + std::filesystem::path getTopicPartitionPath(const TopicPartition & topic_partition); From 6e1a9015100d00113c6367619bb63bd9f04ecb0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 18 Jun 2024 11:42:58 +0000 Subject: [PATCH 0148/2361] Make big chunk of integration tests to work with new storage kafka --- tests/integration/test_storage_kafka/test.py | 90 +++++++++++--------- 1 file changed, 49 insertions(+), 41 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 74532de4878..994ed8cb7b1 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -1856,6 +1856,8 @@ def test_kafka_recreate_kafka_table(kafka_cluster, create_query_generator, log_l """ ) + instance.rotate_logs() + kafka_produce(kafka_cluster, "recreate_kafka_table", messages) instance.query(create_query) @@ -1935,15 +1937,17 @@ def test_librdkafka_compression(kafka_cluster, create_query_generator, log_line) instance.query( """{create_query}; - CREATE MATERIALIZED VIEW test.consumer Engine=Log AS + CREATE TABLE test.view (key UInt64, value String) + ENGINE = MergeTree() + ORDER BY key; + + CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; """.format( create_query=create_query_generator( "kafka", "key UInt64, value String", topic_list=topic_name, - # brokers="kafka1:19092", - # consumer_group=f"{topic_name}_group", format="JSONEachRow", settings={"kafka_flush_interval_ms": 1000}, ), @@ -1955,12 +1959,12 @@ def test_librdkafka_compression(kafka_cluster, create_query_generator, log_line) instance.wait_for_log_line( log_line.format(offset=number_of_messages, topic=topic_name) ) - - result = instance.query("SELECT * FROM test.consumer") + result = instance.query("SELECT * FROM test.view") assert TSV(result) == TSV(expected) instance.query("DROP TABLE test.kafka SYNC") instance.query("DROP TABLE test.consumer SYNC") + instance.query("DROP TABLE test.view SYNC") @pytest.mark.parametrize( @@ -2586,22 +2590,23 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line) instance.rotate_logs() -# TODO(antaljanosbenjamin) -def test_kafka_producer_consumer_separate_settings(kafka_cluster): +@pytest.mark.parametrize( + "create_query_generator, do_direct_read", + [(generate_old_create_table_query, True), (generate_new_create_table_query, False)], +) +def test_kafka_producer_consumer_separate_settings(kafka_cluster, create_query_generator, do_direct_read): + instance.rotate_logs() instance.query( - """ - DROP TABLE IF EXISTS test.test_kafka; - CREATE TABLE test.test_kafka (key UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = 'separate_settings', - kafka_group_name = 'test', - kafka_format = 'JSONEachRow', - kafka_row_delimiter = '\\n'; - """ + create_query_generator( + "test_kafka", + "key UInt64", + topic_list="separate_settings", + consumer_group="test" + ) ) - instance.query("SELECT * FROM test.test_kafka") + if do_direct_read: + instance.query("SELECT * FROM test.test_kafka") instance.query("INSERT INTO test.test_kafka VALUES (1)") assert instance.contains_in_log("Kafka producer created") @@ -2616,11 +2621,11 @@ def test_kafka_producer_consumer_separate_settings(kafka_cluster): # and producer configurations assert "heartbeat.interval.ms" in warn - kafka_consumer_applyed_properties = instance.grep_in_log("Consumer set property") - kafka_producer_applyed_properties = instance.grep_in_log("Producer set property") + kafka_consumer_applied_properties = instance.grep_in_log("Consumer set property") + kafka_producer_applied_properties = instance.grep_in_log("Producer set property") - assert kafka_consumer_applyed_properties is not None - assert kafka_producer_applyed_properties is not None + assert kafka_consumer_applied_properties is not None + assert kafka_producer_applied_properties is not None # global settings should be applied for consumer and producer global_settings = { @@ -2630,38 +2635,38 @@ def test_kafka_producer_consumer_separate_settings(kafka_cluster): for name, value in global_settings.items(): property_in_log = f"{name}:{value}" - assert property_in_log in kafka_consumer_applyed_properties - assert property_in_log in kafka_producer_applyed_properties + assert property_in_log in kafka_consumer_applied_properties + assert property_in_log in kafka_producer_applied_properties settings_topic__separate_settings__consumer = {"session.timeout.ms": "6001"} for name, value in settings_topic__separate_settings__consumer.items(): property_in_log = f"{name}:{value}" - assert property_in_log in kafka_consumer_applyed_properties - assert property_in_log not in kafka_producer_applyed_properties + assert property_in_log in kafka_consumer_applied_properties + assert property_in_log not in kafka_producer_applied_properties producer_settings = {"transaction.timeout.ms": "60001"} for name, value in producer_settings.items(): property_in_log = f"{name}:{value}" - assert property_in_log not in kafka_consumer_applyed_properties - assert property_in_log in kafka_producer_applyed_properties + assert property_in_log not in kafka_consumer_applied_properties + assert property_in_log in kafka_producer_applied_properties # Should be ignored, because it is inside producer tag producer_legacy_syntax__topic_separate_settings = {"message.timeout.ms": "300001"} for name, value in producer_legacy_syntax__topic_separate_settings.items(): property_in_log = f"{name}:{value}" - assert property_in_log not in kafka_consumer_applyed_properties - assert property_in_log not in kafka_producer_applyed_properties + assert property_in_log not in kafka_consumer_applied_properties + assert property_in_log not in kafka_producer_applied_properties # Old syntax, applied on consumer and producer legacy_syntax__topic_separated_settings = {"heartbeat.interval.ms": "302"} for name, value in legacy_syntax__topic_separated_settings.items(): property_in_log = f"{name}:{value}" - assert property_in_log in kafka_consumer_applyed_properties - assert property_in_log in kafka_producer_applyed_properties + assert property_in_log in kafka_consumer_applied_properties + assert property_in_log in kafka_producer_applied_properties @pytest.mark.parametrize( @@ -4324,7 +4329,7 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator "create_query_generator", [ generate_old_create_table_query, - # generate_new_create_table_query TODO(antaljanosbenjamin): crashes CH + generate_new_create_table_query, ], ) def test_kafka_consumer_failover(kafka_cluster, create_query_generator): @@ -4823,6 +4828,7 @@ def test_row_based_formats(kafka_cluster, create_query_generator): logging.debug("Checking {format_name}") topic_name = format_name + get_topic_postfix(create_query_generator) + table_name = f"kafka_{format_name}" with kafka_topic(admin_client, topic_name): num_rows = 10 @@ -4830,7 +4836,7 @@ def test_row_based_formats(kafka_cluster, create_query_generator): message_count = num_rows / max_rows_per_message create_query = create_query_generator( - "kafka", + table_name, "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name, @@ -4841,14 +4847,14 @@ def test_row_based_formats(kafka_cluster, create_query_generator): instance.query( f""" DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.kafka; + DROP TABLE IF EXISTS test.{table_name}; {create_query}; CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT key, value FROM test.kafka; + SELECT key, value FROM test.{table_name}; - INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}); + INSERT INTO test.{table_name} SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}); """ ) @@ -4934,9 +4940,11 @@ def test_block_based_formats_2(kafka_cluster, create_query_generator): "JSONCompactColumns", ]: topic_name = format_name + get_topic_postfix(create_query_generator) + table_name = f"kafka_{format_name}" + logging.debug(f"Checking format {format_name}") with kafka_topic(admin_client, topic_name): create_query = create_query_generator( - "kafka", + table_name, "key UInt64, value UInt64", topic_list=topic_name, consumer_group=topic_name, @@ -4946,14 +4954,14 @@ def test_block_based_formats_2(kafka_cluster, create_query_generator): instance.query( f""" DROP TABLE IF EXISTS test.view; - DROP TABLE IF EXISTS test.kafka; + DROP TABLE IF EXISTS test.{table_name}; {create_query}; CREATE MATERIALIZED VIEW test.view Engine=Log AS - SELECT key, value FROM test.kafka; + SELECT key, value FROM test.{table_name}; - INSERT INTO test.kafka SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}) settings max_block_size=12, optimize_trivial_insert_select=0; + INSERT INTO test.{table_name} SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}) settings max_block_size=12, optimize_trivial_insert_select=0; """ ) messages = kafka_consume_with_retry( From 03500bbe2f62859cfe734b97558eeebf63332101 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 18 Jun 2024 11:43:05 +0000 Subject: [PATCH 0149/2361] Style fix --- tests/integration/test_storage_kafka/configs/kafka.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_storage_kafka/configs/kafka.xml b/tests/integration/test_storage_kafka/configs/kafka.xml index b10db879b72..a846fdbb295 100644 --- a/tests/integration/test_storage_kafka/configs/kafka.xml +++ b/tests/integration/test_storage_kafka/configs/kafka.xml @@ -48,7 +48,7 @@ - 30001 + 30001 60001 From f2e4ec28cb034c709321e9c74ceafc1a6e28f26a Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 18 Jun 2024 11:51:42 +0000 Subject: [PATCH 0150/2361] Automatic style fix --- tests/integration/test_storage_kafka/test.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 994ed8cb7b1..81132a9a60f 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -2594,14 +2594,16 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line) "create_query_generator, do_direct_read", [(generate_old_create_table_query, True), (generate_new_create_table_query, False)], ) -def test_kafka_producer_consumer_separate_settings(kafka_cluster, create_query_generator, do_direct_read): +def test_kafka_producer_consumer_separate_settings( + kafka_cluster, create_query_generator, do_direct_read +): instance.rotate_logs() instance.query( create_query_generator( "test_kafka", "key UInt64", topic_list="separate_settings", - consumer_group="test" + consumer_group="test", ) ) From 1df895f3dadcbb65d246927b79f42144e5fc1af2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 18 Jun 2024 14:04:40 +0200 Subject: [PATCH 0151/2361] Minor changes --- src/Access/SettingsConstraints.cpp | 8 ++++---- src/Interpreters/InterpreterSetQuery.cpp | 10 +++++----- src/Interpreters/InterpreterSetQuery.h | 2 +- .../03003_compatibility_setting_bad_value.sql | 1 - 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/Access/SettingsConstraints.cpp b/src/Access/SettingsConstraints.cpp index a274f6b54f2..7506e365035 100644 --- a/src/Access/SettingsConstraints.cpp +++ b/src/Access/SettingsConstraints.cpp @@ -219,8 +219,8 @@ void SettingsConstraints::clamp(const Settings & current_settings, SettingsChang }); } -template -bool getNewValueToCheck(const T & current_settings, SettingChange & change, Field & new_value, bool throw_on_failure) +template +bool getNewValueToCheck(const SettingsT & current_settings, SettingChange & change, Field & new_value, bool throw_on_failure) { Field current_value; bool has_current_value = current_settings.tryGet(change.name, current_value); @@ -230,12 +230,12 @@ bool getNewValueToCheck(const T & current_settings, SettingChange & change, Fiel return false; if (throw_on_failure) - new_value = T::castValueUtil(change.name, change.value); + new_value = SettingsT::castValueUtil(change.name, change.value); else { try { - new_value = T::castValueUtil(change.name, change.value); + new_value = SettingsT::castValueUtil(change.name, change.value); } catch (...) { diff --git a/src/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp index cac44c7747b..15d4ba56d8d 100644 --- a/src/Interpreters/InterpreterSetQuery.cpp +++ b/src/Interpreters/InterpreterSetQuery.cpp @@ -46,7 +46,7 @@ static void applySettingsFromSelectWithUnion(const ASTSelectWithUnionQuery & sel // It is flattened later, when we process UNION ALL/DISTINCT. const auto * last_select = children.back()->as(); if (last_select && last_select->settings()) - InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext(); + InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext(false); } void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMutablePtr context_) @@ -58,7 +58,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta if (const auto * query_with_output = dynamic_cast(ast.get())) { if (query_with_output->settings_ast) - InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext(); + InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext(false); if (const auto * create_query = ast->as(); create_query && create_query->select) applySettingsFromSelectWithUnion(create_query->select->as(), context_); @@ -67,7 +67,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta if (const auto * select_query = ast->as()) { if (auto new_settings = select_query->settings()) - InterpreterSetQuery(new_settings, context_).executeForCurrentContext(); + InterpreterSetQuery(new_settings, context_).executeForCurrentContext(false); } else if (const auto * select_with_union_query = ast->as()) { @@ -76,7 +76,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta else if (const auto * explain_query = ast->as()) { if (explain_query->settings_ast) - InterpreterSetQuery(explain_query->settings_ast, context_).executeForCurrentContext(); + InterpreterSetQuery(explain_query->settings_ast, context_).executeForCurrentContext(false); applySettingsFromQuery(explain_query->getExplainedQuery(), context_); } @@ -84,7 +84,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta { context_->setInsertFormat(insert_query->format); if (insert_query->settings_ast) - InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext(); + InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext(false); } } diff --git a/src/Interpreters/InterpreterSetQuery.h b/src/Interpreters/InterpreterSetQuery.h index 2438762f347..f50105c39f4 100644 --- a/src/Interpreters/InterpreterSetQuery.h +++ b/src/Interpreters/InterpreterSetQuery.h @@ -23,7 +23,7 @@ public: /** Set setting for current context (query context). * It is used for interpretation of SETTINGS clause in SELECT query. */ - void executeForCurrentContext(bool ignore_setting_constraints = false); + void executeForCurrentContext(bool ignore_setting_constraints); bool supportsTransactions() const override { return true; } diff --git a/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql index 48e98798c51..b9fbfd917fc 100644 --- a/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql +++ b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql @@ -1,2 +1 @@ select 42 settings compatibility=NULL; -- {clientError BAD_ARGUMENTS} - From 11d54f4809a8b58773f13664e6b842cb6c7dce48 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 18 Jun 2024 15:18:53 +0200 Subject: [PATCH 0152/2361] Pass-through settings from the client --- programs/client/Client.cpp | 3 +++ programs/server/Server.cpp | 2 +- src/Access/AccessControl.cpp | 8 +++++++- src/Access/AccessControl.h | 5 ++++- src/Client/ClientBase.cpp | 1 - .../0_stateless/03003_compatibility_setting_bad_value.sql | 2 +- 6 files changed, 16 insertions(+), 5 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index efe23d57478..22a035fbd71 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1184,6 +1184,9 @@ void Client::processConfig() global_context->setQueryKindInitial(); global_context->setQuotaClientKey(config().getString("quota_key", "")); global_context->setQueryKind(query_kind); + + /// Allow to pass-through unknown settings to the server. + global_context->getAccessControl().allowAllSettings(); } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 6414f7f6ea5..2f1d07790e1 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1824,7 +1824,7 @@ try auto & access_control = global_context->getAccessControl(); try { - access_control.setUpFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); }); + access_control.setupFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); }); } catch (...) { diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index c3bb42160ad..9831621d6ac 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -264,7 +264,7 @@ AccessControl::AccessControl() AccessControl::~AccessControl() = default; -void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_, +void AccessControl::setupFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_, const zkutil::GetZooKeeper & get_zookeeper_function_) { if (config_.has("custom_settings_prefixes")) @@ -852,4 +852,10 @@ const ExternalAuthenticators & AccessControl::getExternalAuthenticators() const return *external_authenticators; } + +void AccessControl::allowAllSettings() +{ + custom_settings_prefixes->registerPrefixes({""}); +} + } diff --git a/src/Access/AccessControl.h b/src/Access/AccessControl.h index d1537219a06..f408f6dfb0d 100644 --- a/src/Access/AccessControl.h +++ b/src/Access/AccessControl.h @@ -54,7 +54,7 @@ public: ~AccessControl() override; /// Initializes access storage (user directories). - void setUpFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_, + void setupFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_, const zkutil::GetZooKeeper & get_zookeeper_function_); /// Parses access entities from a configuration loaded from users.xml. @@ -235,6 +235,9 @@ public: /// Gets manager of notifications. AccessChangesNotifier & getChangesNotifier(); + /// Allow all setting names - this can be used in clients to pass-through unknown settings to the server. + void allowAllSettings(); + private: class ContextAccessCache; class CustomSettingsPrefixes; diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 958a1f50813..617a56cfd95 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -2958,7 +2958,6 @@ void ClientBase::init(int argc, char ** argv) boost::replace_all(arg, "−", "--"); } - OptionsDescription options_description; options_description.main_description.emplace(createOptionsDescription("Main options", terminal_width)); diff --git a/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql index b9fbfd917fc..3a09eec7452 100644 --- a/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql +++ b/tests/queries/0_stateless/03003_compatibility_setting_bad_value.sql @@ -1 +1 @@ -select 42 settings compatibility=NULL; -- {clientError BAD_ARGUMENTS} +select 42 settings compatibility=NULL; -- {clientError BAD_GET} From 65fc6fe8a363f0c80d947c62fcaf3a5f434086ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 18 Jun 2024 14:30:23 +0000 Subject: [PATCH 0153/2361] Fix style --- src/Storages/Kafka/KafkaConsumer2.cpp | 3 ++- src/Storages/Kafka/StorageKafka2.cpp | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 41ce7c43131..84b6f5153ed 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -252,7 +252,8 @@ void KafkaConsumer2::updateOffsets(const TopicPartitions & topic_partitions) topic_partitions.begin(), topic_partitions.end(), std::back_inserter(original_topic_partitions), - [](const TopicPartition & tp) { + [](const TopicPartition & tp) + { return cppkafka::TopicPartition{tp.topic, tp.partition_id, tp.offset}; }); initializeQueues(original_topic_partitions); diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 86330cbf122..080201cae9e 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -92,7 +92,6 @@ extern const int NOT_IMPLEMENTED; extern const int LOGICAL_ERROR; extern const int REPLICA_ALREADY_EXISTS; extern const int TABLE_IS_DROPPED; -extern const int TABLE_WAS_NOT_DROPPED; extern const int NO_ZOOKEEPER; extern const int REPLICA_IS_ALREADY_ACTIVE; } From 38537a00aa9e7185b69c066cd6809c54487ecf4e Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 18 Jun 2024 18:16:12 +0000 Subject: [PATCH 0154/2361] Make ActionsDAGPtr unique_ptr. --- src/Core/InterpolateDescription.cpp | 2 +- src/Core/InterpolateDescription.h | 2 +- src/Functions/indexHint.h | 4 +- src/Interpreters/ActionsDAG.cpp | 46 ++++++++------- src/Interpreters/ActionsDAG.h | 11 ++-- src/Interpreters/ActionsVisitor.cpp | 4 +- src/Interpreters/ActionsVisitor.h | 2 +- src/Interpreters/ExpressionActions.cpp | 20 ++++++- src/Interpreters/ExpressionActions.h | 6 +- src/Interpreters/ExpressionAnalyzer.cpp | 27 ++++----- src/Interpreters/ExpressionAnalyzer.h | 2 +- src/Interpreters/InterpreterInsertQuery.cpp | 4 +- src/Interpreters/InterpreterSelectQuery.cpp | 14 ++--- src/Interpreters/MutationsInterpreter.cpp | 2 +- src/Interpreters/TableJoin.cpp | 4 +- src/Interpreters/WindowDescription.h | 6 +- src/Interpreters/addMissingDefaults.cpp | 2 +- src/Interpreters/addMissingDefaults.h | 2 +- src/Interpreters/inplaceBlockConversions.cpp | 2 +- src/Interpreters/inplaceBlockConversions.h | 2 +- src/Planner/CollectTableExpressionData.cpp | 6 +- src/Planner/Planner.cpp | 49 ++++++++-------- src/Planner/PlannerActionsVisitor.cpp | 6 +- src/Planner/PlannerJoinTree.cpp | 58 ++++++++++--------- src/Planner/PlannerJoinTree.h | 2 +- src/Planner/PlannerJoins.cpp | 12 ++-- src/Planner/PlannerWindowFunctions.cpp | 13 ++++- src/Planner/PlannerWindowFunctions.h | 2 +- src/Planner/Utils.cpp | 2 +- src/Processors/QueryPlan/AggregatingStep.cpp | 4 +- src/Processors/QueryPlan/CubeStep.cpp | 4 +- src/Processors/QueryPlan/ExpressionStep.cpp | 12 ++-- src/Processors/QueryPlan/ExpressionStep.h | 2 +- src/Processors/QueryPlan/FilterStep.cpp | 11 ++-- src/Processors/QueryPlan/FilterStep.h | 2 +- .../Optimizations/distinctReadInOrder.cpp | 12 ++-- .../Optimizations/filterPushDown.cpp | 2 +- .../Optimizations/optimizePrewhere.cpp | 2 +- .../optimizePrimaryKeyCondition.cpp | 6 +- .../Optimizations/optimizeReadInOrder.cpp | 4 +- .../optimizeUseAggregateProjection.cpp | 4 +- .../optimizeUseNormalProjection.cpp | 2 +- .../Optimizations/projectionsCommon.cpp | 4 +- .../Optimizations/projectionsCommon.h | 2 +- .../Optimizations/removeRedundantDistinct.cpp | 23 ++++---- .../QueryPlan/ReadFromMergeTree.cpp | 38 ++++++------ src/Processors/QueryPlan/ReadFromMergeTree.h | 2 +- .../QueryPlan/ReadFromSystemNumbersStep.cpp | 2 +- .../QueryPlan/SourceStepWithFilter.cpp | 8 +-- .../QueryPlan/SourceStepWithFilter.h | 3 +- src/Processors/QueryPlan/TotalsHavingStep.cpp | 8 +-- src/Processors/QueryPlan/TotalsHavingStep.h | 2 +- src/Processors/QueryPlan/WindowStep.h | 2 +- src/Processors/SourceWithKeyCondition.h | 4 +- .../Transforms/FillingTransform.cpp | 2 +- src/Storages/Hive/StorageHive.cpp | 8 +-- src/Storages/KeyDescription.cpp | 2 +- src/Storages/MergeTree/KeyCondition.cpp | 4 +- src/Storages/MergeTree/KeyCondition.h | 2 +- src/Storages/MergeTree/MergeTreeData.cpp | 8 +-- src/Storages/MergeTree/MergeTreeData.h | 2 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 8 +-- .../MergeTree/MergeTreeDataSelectExecutor.h | 4 +- .../MergeTree/MergeTreeIndexAnnoy.cpp | 2 +- src/Storages/MergeTree/MergeTreeIndexAnnoy.h | 2 +- .../MergeTree/MergeTreeIndexBloomFilter.cpp | 4 +- .../MergeTree/MergeTreeIndexBloomFilter.h | 4 +- .../MergeTreeIndexBloomFilterText.cpp | 4 +- .../MergeTree/MergeTreeIndexBloomFilterText.h | 4 +- .../MergeTree/MergeTreeIndexFullText.cpp | 4 +- .../MergeTree/MergeTreeIndexFullText.h | 4 +- .../MergeTree/MergeTreeIndexHypothesis.cpp | 2 +- .../MergeTree/MergeTreeIndexHypothesis.h | 2 +- .../MergeTree/MergeTreeIndexMinMax.cpp | 6 +- src/Storages/MergeTree/MergeTreeIndexMinMax.h | 4 +- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 6 +- src/Storages/MergeTree/MergeTreeIndexSet.h | 4 +- .../MergeTree/MergeTreeIndexUSearch.cpp | 2 +- .../MergeTree/MergeTreeIndexUSearch.h | 2 +- src/Storages/MergeTree/MergeTreeIndices.h | 2 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 4 +- .../MergeTree/MergeTreeSequentialSource.cpp | 4 +- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 32 +++++----- src/Storages/MergeTree/PartitionPruner.cpp | 2 +- src/Storages/MergeTree/PartitionPruner.h | 2 +- .../StorageObjectStorageSource.cpp | 2 +- src/Storages/SelectQueryInfo.h | 4 +- src/Storages/StorageBuffer.cpp | 4 +- src/Storages/StorageDistributed.cpp | 2 +- src/Storages/StorageFile.cpp | 2 +- src/Storages/StorageMerge.cpp | 10 ++-- src/Storages/StorageURL.h | 2 +- src/Storages/StorageValues.cpp | 4 +- src/Storages/StorageView.cpp | 2 +- src/Storages/TTLDescription.cpp | 2 +- src/Storages/VirtualColumnUtils.cpp | 4 +- src/Storages/VirtualColumnUtils.h | 2 +- src/Storages/WindowView/StorageWindowView.cpp | 12 ++-- 98 files changed, 355 insertions(+), 318 deletions(-) diff --git a/src/Core/InterpolateDescription.cpp b/src/Core/InterpolateDescription.cpp index d828c2e85e9..76bbefdcfd7 100644 --- a/src/Core/InterpolateDescription.cpp +++ b/src/Core/InterpolateDescription.cpp @@ -14,7 +14,7 @@ namespace DB { InterpolateDescription::InterpolateDescription(ActionsDAGPtr actions_, const Aliases & aliases) - : actions(actions_) + : actions(std::move(actions_)) { for (const auto & name_type : actions->getRequiredColumns()) { diff --git a/src/Core/InterpolateDescription.h b/src/Core/InterpolateDescription.h index 62d7120508b..73579aebee4 100644 --- a/src/Core/InterpolateDescription.h +++ b/src/Core/InterpolateDescription.h @@ -11,7 +11,7 @@ namespace DB { class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; using Aliases = std::unordered_map; /// Interpolate description diff --git a/src/Functions/indexHint.h b/src/Functions/indexHint.h index 3b71c7a5585..8fd7b751760 100644 --- a/src/Functions/indexHint.h +++ b/src/Functions/indexHint.h @@ -2,14 +2,12 @@ #include #include #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; - /** The `indexHint` function takes any number of any arguments and always returns one. * * This function has a special meaning (see ExpressionAnalyzer, KeyCondition) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 34f3e0a98bd..23e1e5ee152 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -626,7 +626,7 @@ void ActionsDAG::removeAliasesForFilter(const std::string & filter_name) ActionsDAGPtr ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases) { - auto actions = std::make_shared(); + auto actions = std::make_unique(); std::unordered_map copy_map; struct Frame @@ -1248,25 +1248,29 @@ bool ActionsDAG::removeUnusedResult(const std::string & column_name) ActionsDAGPtr ActionsDAG::clone() const { - auto actions = std::make_shared(); + std::unordered_map old_to_new_nodes; + return clone(old_to_new_nodes); +} - std::unordered_map copy_map; +ActionsDAGPtr ActionsDAG::clone(std::unordered_map & old_to_new_nodes) const +{ + auto actions = std::make_unique(); for (const auto & node : nodes) { auto & copy_node = actions->nodes.emplace_back(node); - copy_map[&node] = ©_node; + old_to_new_nodes[&node] = ©_node; } for (auto & node : actions->nodes) for (auto & child : node.children) - child = copy_map[child]; + child = old_to_new_nodes[child]; for (const auto & output_node : outputs) - actions->outputs.push_back(copy_map[output_node]); + actions->outputs.push_back(old_to_new_nodes[output_node]); for (const auto & input_node : inputs) - actions->inputs.push_back(copy_map[input_node]); + actions->inputs.push_back(old_to_new_nodes[input_node]); return actions; } @@ -1421,7 +1425,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( if (add_casted_columns && mode != MatchColumnsMode::Name) throw Exception(ErrorCodes::LOGICAL_ERROR, "Converting with add_casted_columns supported only for MatchColumnsMode::Name"); - auto actions_dag = std::make_shared(source); + auto actions_dag = std::make_unique(source); NodeRawConstPtrs projection(num_result_columns); FunctionOverloadResolverPtr func_builder_materialize = std::make_unique(std::make_shared()); @@ -1549,7 +1553,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( ActionsDAGPtr ActionsDAG::makeAddingColumnActions(ColumnWithTypeAndName column) { - auto adding_column_action = std::make_shared(); + auto adding_column_action = std::make_unique(); FunctionOverloadResolverPtr func_builder_materialize = std::make_unique(std::make_shared()); @@ -1570,7 +1574,7 @@ ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second) /// Some actions could become unused. Do not drop inputs to preserve the header. first.removeUnusedActions(false); - return std::make_shared(std::move(first)); + return std::make_unique(std::move(first)); } void ActionsDAG::mergeInplace(ActionsDAG && second) @@ -1963,12 +1967,12 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set split second_inputs.push_back(cur.to_second); } - auto first_actions = std::make_shared(); + auto first_actions = std::make_unique(); first_actions->nodes.swap(first_nodes); first_actions->outputs.swap(first_outputs); first_actions->inputs.swap(first_inputs); - auto second_actions = std::make_shared(); + auto second_actions = std::make_unique(); second_actions->nodes.swap(second_nodes); second_actions->outputs.swap(second_outputs); second_actions->inputs.swap(second_inputs); @@ -2302,7 +2306,7 @@ ActionsDAGPtr ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjuncti if (conjunction.empty()) return nullptr; - auto actions = std::make_shared(); + auto actions = std::make_unique(); FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); @@ -2866,7 +2870,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( bool visited_children = false; }; - auto result_dag = std::make_shared(); + auto result_dag = std::make_unique(); std::unordered_map result_inputs; std::unordered_map node_to_result_node; @@ -2964,7 +2968,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( const auto & index_hint_args = index_hint->getActions()->getOutputs(); if (index_hint_args.empty()) - index_hint_filter_dag = std::make_shared(); + index_hint_filter_dag = std::make_unique(); else index_hint_filter_dag = buildFilterActionsDAG(index_hint_args, node_name_to_input_node_column, @@ -3108,10 +3112,10 @@ ActionsDAG::NodeRawConstPtrs ActionsDAG::filterNodesByAllowedInputs( return nodes; } -FindOriginalNodeForOutputName::FindOriginalNodeForOutputName(const ActionsDAGPtr & actions_) - :actions(actions_) +FindOriginalNodeForOutputName::FindOriginalNodeForOutputName(const ActionsDAG & actions_) + //: actions(actions_) { - const auto & actions_outputs = actions->getOutputs(); + const auto & actions_outputs = actions_.getOutputs(); for (const auto * output_node : actions_outputs) { /// find input node which refers to the output node @@ -3147,10 +3151,10 @@ const ActionsDAG::Node * FindOriginalNodeForOutputName::find(const String & outp return it->second; } -FindAliasForInputName::FindAliasForInputName(const ActionsDAGPtr & actions_) - :actions(actions_) +FindAliasForInputName::FindAliasForInputName(const ActionsDAG & actions_) + //: actions(actions_) { - const auto & actions_outputs = actions->getOutputs(); + const auto & actions_outputs = actions_.getOutputs(); for (const auto * output_node : actions_outputs) { /// find input node which corresponds to alias diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index c9974fd849c..4a840885b6a 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -12,7 +12,7 @@ namespace DB { class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; class IExecutableFunction; using ExecutableFunctionPtr = std::shared_ptr; @@ -262,6 +262,7 @@ public: #endif ActionsDAGPtr clone() const; + ActionsDAGPtr clone(std::unordered_map & old_to_new_nodes) const; static ActionsDAGPtr cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases); @@ -480,11 +481,11 @@ class FindOriginalNodeForOutputName using NameToNodeIndex = std::unordered_map; public: - explicit FindOriginalNodeForOutputName(const ActionsDAGPtr & actions); + explicit FindOriginalNodeForOutputName(const ActionsDAG & actions); const ActionsDAG::Node * find(const String & output_name); private: - ActionsDAGPtr actions; + //const ActionsDAG & actions; NameToNodeIndex index; }; @@ -493,11 +494,11 @@ class FindAliasForInputName using NameToNodeIndex = std::unordered_map; public: - explicit FindAliasForInputName(const ActionsDAGPtr & actions); + explicit FindAliasForInputName(const ActionsDAG & actions); const ActionsDAG::Node * find(const String & name); private: - ActionsDAGPtr actions; + //const ActionsDAG & actions; NameToNodeIndex index; }; diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 9e56d740e5e..1838a7b04b9 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -1009,7 +1009,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & dag.project(args); auto index_hint = std::make_shared(); - index_hint->setActions(std::make_shared(std::move(dag))); + index_hint->setActions(std::make_unique(std::move(dag))); // Arguments are removed. We add function instead of constant column to avoid constant folding. data.addFunction(std::make_unique(index_hint), {}, column_name); @@ -1272,7 +1272,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & lambda_dag.removeUnusedActions(Names(1, result_name)); auto lambda_actions = std::make_shared( - std::make_shared(std::move(lambda_dag)), + std::make_unique(std::move(lambda_dag)), ExpressionActionsSettings::fromContext(data.getContext(), CompileExpressions::yes)); DataTypePtr result_type = lambda_actions->getSampleBlock().getByName(result_name).type; diff --git a/src/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h index 46d2d60e461..496d9b9b587 100644 --- a/src/Interpreters/ActionsVisitor.h +++ b/src/Interpreters/ActionsVisitor.h @@ -22,7 +22,7 @@ class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; class IFunctionOverloadResolver; using FunctionOverloadResolverPtr = std::shared_ptr; diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 7f96c927d82..7cbf5afd763 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -73,7 +73,25 @@ ExpressionActions::ExpressionActions(ActionsDAGPtr actions_dag_, const Expressio ExpressionActionsPtr ExpressionActions::clone() const { - return std::make_shared(*this); + auto copy = std::make_shared(ExpressionActions()); + + std::unordered_map copy_map; + copy->actions_dag = actions_dag->clone(copy_map); + copy->actions = actions; + for (auto & action : copy->actions) + action.node = copy_map[action.node]; + + copy->num_columns = num_columns; + + copy->required_columns = required_columns; + copy->input_positions = input_positions; + copy->result_positions = result_positions; + copy->sample_block = sample_block; + + copy->project_inputs = project_inputs; + copy->settings = settings; + + return copy; } namespace diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index ddffe022215..63ea989bd5e 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -84,10 +84,9 @@ private: ExpressionActionsSettings settings; public: - ExpressionActions() = delete; explicit ExpressionActions(ActionsDAGPtr actions_dag_, const ExpressionActionsSettings & settings_ = {}, bool project_inputs_ = false); - ExpressionActions(const ExpressionActions &) = default; - ExpressionActions & operator=(const ExpressionActions &) = default; + ExpressionActions(ExpressionActions &&) = default; + ExpressionActions & operator=(ExpressionActions &&) = default; const Actions & getActions() const { return actions; } const std::list & getNodes() const { return actions_dag->getNodes(); } @@ -131,6 +130,7 @@ public: ExpressionActionsPtr clone() const; private: + ExpressionActions() = default; void checkLimits(const ColumnsWithTypeAndName & columns) const; void linearizeActions(const std::unordered_set & lazy_executed_nodes); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 62cddd9caf7..be00e37c751 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -658,7 +658,7 @@ void ExpressionAnalyzer::makeWindowDescriptionFromAST(const Context & context_, with_alias->getColumnName(), 1 /* direction */, 1 /* nulls_direction */)); - auto actions_dag = std::make_shared(aggregated_columns); + auto actions_dag = std::make_unique(aggregated_columns); getRootActions(column_ast, false, *actions_dag); desc.partition_by_actions.push_back(std::move(actions_dag)); } @@ -679,7 +679,7 @@ void ExpressionAnalyzer::makeWindowDescriptionFromAST(const Context & context_, order_by_element.direction, order_by_element.nulls_direction)); - auto actions_dag = std::make_shared(aggregated_columns); + auto actions_dag = std::make_unique(aggregated_columns); getRootActions(column_ast, false, *actions_dag); desc.order_by_actions.push_back(std::move(actions_dag)); } @@ -823,13 +823,14 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAG & actions) makeWindowDescriptionFromAST(*current_context, window_descriptions, desc, &definition); + auto full_sort_description = desc.full_sort_description; + auto [it, inserted] = window_descriptions.insert( - {default_window_name, desc}); + {default_window_name, std::move(desc)}); if (!inserted) { - assert(it->second.full_sort_description - == desc.full_sort_description); + assert(it->second.full_sort_description == full_sort_description); } it->second.window_functions.push_back(window_function); @@ -1353,10 +1354,10 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain { for (auto & child : asts) { - auto actions_dag = std::make_shared(columns_after_join); + auto actions_dag = std::make_unique(columns_after_join); getRootActions(child, only_types, *actions_dag); group_by_elements_actions.emplace_back( - std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); + std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); } } @@ -1606,10 +1607,10 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy(Expr { for (const auto & child : select_query->orderBy()->children) { - auto actions_dag = std::make_shared(columns_after_join); + auto actions_dag = std::make_unique(columns_after_join); getRootActions(child, only_types, *actions_dag); order_by_elements_actions.emplace_back( - std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); + std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); } } @@ -1799,7 +1800,7 @@ ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool remov ActionsDAGPtr ExpressionAnalyzer::getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs) { - auto actions = std::make_shared(constant_inputs); + auto actions = std::make_unique(constant_inputs); getRootActions(query, true /* no_makeset_for_subqueries */, *actions, true /* only_consts */); return actions; } @@ -1807,7 +1808,7 @@ ActionsDAGPtr ExpressionAnalyzer::getConstActionsDAG(const ColumnsWithTypeAndNam ExpressionActionsPtr ExpressionAnalyzer::getConstActions(const ColumnsWithTypeAndName & constant_inputs) { auto actions = getConstActionsDAG(constant_inputs); - return std::make_shared(actions, ExpressionActionsSettings::fromContext(getContext())); + return std::make_shared(std::move(actions), ExpressionActionsSettings::fromContext(getContext())); } std::unique_ptr SelectQueryExpressionAnalyzer::getJoinedPlan() @@ -1878,7 +1879,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (prewhere_dag_and_flags) { - auto dag = std::make_shared(std::move(prewhere_dag_and_flags->dag)); + auto dag = std::make_unique(std::move(prewhere_dag_and_flags->dag)); prewhere_info = std::make_shared(std::move(dag), query.prewhere()->getColumnName()); prewhere_dag_and_flags.reset(); } @@ -1945,7 +1946,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( { auto dag = prewhere_dag_and_flags->dag.clone(); ExpressionActions( - dag, + std::move(dag), ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName()); /// If the filter column is a constant, record it. diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 12d6dce8f72..e44a5891e77 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -39,7 +39,7 @@ class ArrayJoinAction; using ArrayJoinActionPtr = std::shared_ptr; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; /// Create columns in block or return false if not possible bool sanitizeBlock(Block & block, bool throw_if_cannot_create_column = false); diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 927bafe4bfb..4dbdebd0d06 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -384,7 +384,7 @@ Chain InterpreterInsertQuery::buildPreSinkChain( context_ptr, null_as_default); - auto adding_missing_defaults_actions = std::make_shared(adding_missing_defaults_dag); + auto adding_missing_defaults_actions = std::make_shared(std::move(adding_missing_defaults_dag)); /// Actually we don't know structure of input blocks from query/table, /// because some clients break insertion protocol (columns != header) @@ -597,7 +597,7 @@ BlockIO InterpreterInsertQuery::execute() pipeline.getHeader().getColumnsWithTypeAndName(), header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Position); - auto actions = std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); + auto actions = std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); pipeline.addSimpleTransform([&](const Block & in_header) -> ProcessorPtr { diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index f5b54ec64cb..64a17a7ba87 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1302,7 +1302,7 @@ static InterpolateDescriptionPtr getInterpolateDescription( result_columns, ActionsDAG::MatchColumnsMode::Position, true); ActionsDAGPtr merge_dag = ActionsDAG::merge(std::move(*actions->clone()), std::move(*conv_dag)); - interpolate_descr = std::make_shared(merge_dag, aliases); + interpolate_descr = std::make_shared(std::move(merge_dag), aliases); } return interpolate_descr; @@ -2042,7 +2042,7 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, - std::make_shared(prewhere_info.row_level_filter), + std::make_shared(prewhere_info.row_level_filter->clone()), prewhere_info.row_level_column_name, true); }); } @@ -2050,7 +2050,7 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, std::make_shared(prewhere_info.prewhere_actions), + header, std::make_shared(prewhere_info.prewhere_actions->clone()), prewhere_info.prewhere_column_name, prewhere_info.remove_prewhere_column); }); } @@ -2094,8 +2094,8 @@ void InterpreterSelectQuery::applyFiltersToPrewhereInAnalysis(ExpressionAnalysis if (does_storage_support_prewhere && shouldMoveToPrewhere()) { /// Execute row level filter in prewhere as a part of "move to prewhere" optimization. - analysis.prewhere_info = std::make_shared(analysis.filter_info->actions, analysis.filter_info->column_name); - analysis.prewhere_info->remove_prewhere_column = analysis.filter_info->do_remove_column; + analysis.prewhere_info = std::make_shared(std::move(analysis.filter_info->actions), analysis.filter_info->column_name); + analysis.prewhere_info->remove_prewhere_column = std::move(analysis.filter_info->do_remove_column); analysis.prewhere_info->need_filter = true; analysis.filter_info = nullptr; } @@ -2103,8 +2103,8 @@ void InterpreterSelectQuery::applyFiltersToPrewhereInAnalysis(ExpressionAnalysis else { /// Add row level security actions to prewhere. - analysis.prewhere_info->row_level_filter = analysis.filter_info->actions; - analysis.prewhere_info->row_level_column_name = analysis.filter_info->column_name; + analysis.prewhere_info->row_level_filter = std::move(analysis.filter_info->actions); + analysis.prewhere_info->row_level_column_name = std::move(analysis.filter_info->column_name); analysis.filter_info = nullptr; } } diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 6d3a4f30b34..1bb770bf561 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1211,7 +1211,7 @@ void MutationsInterpreter::Source::read( MergeTreeSequentialSourceType::Mutation, plan, *data, storage_snapshot, part, required_columns, - apply_deleted_mask_, filter, context_, + apply_deleted_mask_, std::move(filter), context_, getLogger("MutationsInterpreter")); } else diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index 6191eb73fd4..baf3a743f40 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -470,7 +470,7 @@ static ActionsDAGPtr createWrapWithTupleActions( if (column_names_to_wrap.empty()) return nullptr; - auto actions_dag = std::make_shared(source_columns); + auto actions_dag = std::make_unique(source_columns); FunctionOverloadResolverPtr func_builder = std::make_unique(std::make_shared()); @@ -616,7 +616,7 @@ TableJoin::createConvertingActions( mergeDags(right_dag, std::move(new_right_dag)); } - return {left_dag, right_dag}; + return {std::move(left_dag), std::move(right_dag)}; } template diff --git a/src/Interpreters/WindowDescription.h b/src/Interpreters/WindowDescription.h index c26e4517c9a..17bfe619c30 100644 --- a/src/Interpreters/WindowDescription.h +++ b/src/Interpreters/WindowDescription.h @@ -14,7 +14,7 @@ namespace DB class ASTFunction; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; struct WindowFunctionDescription { @@ -93,8 +93,8 @@ struct WindowDescription // then by ORDER BY. This field holds this combined sort order. SortDescription full_sort_description; - std::vector partition_by_actions; - std::vector order_by_actions; + std::vector> partition_by_actions; + std::vector> order_by_actions; WindowFrame frame; diff --git a/src/Interpreters/addMissingDefaults.cpp b/src/Interpreters/addMissingDefaults.cpp index fbf17d7efb7..929999c8c37 100644 --- a/src/Interpreters/addMissingDefaults.cpp +++ b/src/Interpreters/addMissingDefaults.cpp @@ -21,7 +21,7 @@ ActionsDAGPtr addMissingDefaults( ContextPtr context, bool null_as_default) { - auto actions = std::make_shared(header.getColumnsWithTypeAndName()); + auto actions = std::make_unique(header.getColumnsWithTypeAndName()); auto & index = actions->getOutputs(); /// For missing columns of nested structure, you need to create not a column of empty arrays, but a column of arrays of correct lengths. diff --git a/src/Interpreters/addMissingDefaults.h b/src/Interpreters/addMissingDefaults.h index 0a3d4de478c..94afd806dfd 100644 --- a/src/Interpreters/addMissingDefaults.h +++ b/src/Interpreters/addMissingDefaults.h @@ -15,7 +15,7 @@ class NamesAndTypesList; class ColumnsDescription; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; /** Adds three types of columns into block * 1. Columns, that are missed inside request, but present in table without defaults (missed columns) diff --git a/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp index 239cce5b427..b000264ae33 100644 --- a/src/Interpreters/inplaceBlockConversions.cpp +++ b/src/Interpreters/inplaceBlockConversions.cpp @@ -163,7 +163,7 @@ ActionsDAGPtr createExpressions( auto syntax_result = TreeRewriter(context).analyze(expr_list, header.getNamesAndTypesList()); auto expression_analyzer = ExpressionAnalyzer{expr_list, syntax_result, context}; - auto dag = std::make_shared(header.getNamesAndTypesList()); + auto dag = std::make_unique(header.getNamesAndTypesList()); auto actions = expression_analyzer.getActionsDAG(true, !save_unneeded_columns); dag = ActionsDAG::merge(std::move(*dag), std::move(*actions)); diff --git a/src/Interpreters/inplaceBlockConversions.h b/src/Interpreters/inplaceBlockConversions.h index bea44bf6db9..ffc77561e79 100644 --- a/src/Interpreters/inplaceBlockConversions.h +++ b/src/Interpreters/inplaceBlockConversions.h @@ -24,7 +24,7 @@ struct StorageInMemoryMetadata; using StorageMetadataPtr = std::shared_ptr; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; /// Create actions which adds missing defaults to block according to required_columns using columns description /// or substitute NULL into DEFAULT value in case of INSERT SELECT query (null_as_default) if according setting is 1. diff --git a/src/Planner/CollectTableExpressionData.cpp b/src/Planner/CollectTableExpressionData.cpp index d5e39a9f123..162d3fe8d11 100644 --- a/src/Planner/CollectTableExpressionData.cpp +++ b/src/Planner/CollectTableExpressionData.cpp @@ -88,7 +88,7 @@ public: auto column_identifier = planner_context->getGlobalPlannerContext()->createColumnIdentifier(node); - ActionsDAGPtr alias_column_actions_dag = std::make_shared(); + ActionsDAGPtr alias_column_actions_dag = std::make_unique(); PlannerActionsVisitor actions_visitor(planner_context, false); auto outputs = actions_visitor.visit(*alias_column_actions_dag, column_node->getExpression()); if (outputs.size() != 1) @@ -97,7 +97,7 @@ public: const auto & column_name = column_node->getColumnName(); const auto & alias_node = alias_column_actions_dag->addAlias(*outputs[0], column_name); alias_column_actions_dag->addOrReplaceInOutputs(alias_node); - table_expression_data.addAliasColumn(column_node->getColumn(), column_identifier, alias_column_actions_dag, select_added_columns); + table_expression_data.addAliasColumn(column_node->getColumn(), column_identifier, std::move(alias_column_actions_dag), select_added_columns); } return; @@ -335,7 +335,7 @@ void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContextPtr collect_source_columns_visitor.setKeepAliasColumns(false); collect_source_columns_visitor.visit(query_node_typed.getPrewhere()); - auto prewhere_actions_dag = std::make_shared(); + auto prewhere_actions_dag = std::make_unique(); QueryTreeNodePtr query_tree_node = query_node_typed.getPrewhere(); diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 2d42ed73223..681ae7e6ac4 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -213,7 +213,7 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & if (!read_from_dummy) continue; - auto filter_actions = read_from_dummy->getFilterActionsDAG(); + auto filter_actions = read_from_dummy->detachFilterActionsDAG(); const auto & table_node = dummy_storage_to_table.at(&read_from_dummy->getStorage()); res[table_node] = FiltersForTableExpression{std::move(filter_actions), read_from_dummy->getPrewhereInfo()}; } @@ -331,13 +331,13 @@ public: void addExpressionStep(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression_actions, const std::string & step_description, - std::vector & result_actions_to_execute) + std::vector & result_actions_to_execute) { auto actions = expression_actions->dag.clone(); if (expression_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - result_actions_to_execute.push_back(actions); + result_actions_to_execute.push_back(actions.get()); auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), actions); expression_step->setStepDescription(step_description); query_plan.addStep(std::move(expression_step)); @@ -346,13 +346,13 @@ void addExpressionStep(QueryPlan & query_plan, void addFilterStep(QueryPlan & query_plan, const FilterAnalysisResult & filter_analysis_result, const std::string & step_description, - std::vector & result_actions_to_execute) + std::vector & result_actions_to_execute) { auto actions = filter_analysis_result.filter_actions->dag.clone(); if (filter_analysis_result.filter_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - result_actions_to_execute.push_back(actions); + result_actions_to_execute.push_back(actions.get()); auto where_step = std::make_unique(query_plan.getCurrentDataStream(), actions, filter_analysis_result.filter_column_name, @@ -544,7 +544,7 @@ void addTotalsHavingStep(QueryPlan & query_plan, const QueryAnalysisResult & query_analysis_result, const PlannerContextPtr & planner_context, const QueryNode & query_node, - std::vector & result_actions_to_execute) + std::vector & result_actions_to_execute) { const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); @@ -560,7 +560,7 @@ void addTotalsHavingStep(QueryPlan & query_plan, if (having_analysis_result.filter_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - result_actions_to_execute.push_back(actions); + result_actions_to_execute.push_back(actions.get()); } auto totals_having_step = std::make_unique( @@ -714,7 +714,7 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, if (query_node.hasInterpolate()) { - auto interpolate_actions_dag = std::make_shared(); + auto interpolate_actions_dag = std::make_unique(); auto query_plan_columns = query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); for (auto & query_plan_column : query_plan_columns) { @@ -885,7 +885,7 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, const PlannerContextPtr & planner_context, const PlannerQueryProcessingInfo & query_processing_info, const QueryTreeNodePtr & query_tree, - std::vector & result_actions_to_execute) + std::vector & result_actions_to_execute) { const auto & query_node = query_tree->as(); @@ -932,14 +932,14 @@ void addWindowSteps(QueryPlan & query_plan, const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); - auto window_descriptions = window_analysis_result.window_descriptions; - sortWindowDescriptions(window_descriptions); + const auto & window_descriptions = window_analysis_result.window_descriptions; + auto perm = sortWindowDescriptions(window_descriptions); size_t window_descriptions_size = window_descriptions.size(); for (size_t i = 0; i < window_descriptions_size; ++i) { - const auto & window_description = window_descriptions[i]; + const auto & window_description = window_descriptions[perm[i]]; /** We don't need to sort again if the input from previous window already * has suitable sorting. Also don't create sort steps when there are no @@ -952,8 +952,9 @@ void addWindowSteps(QueryPlan & query_plan, bool need_sort = !window_description.full_sort_description.empty(); if (need_sort && i != 0) { - need_sort = !sortDescriptionIsPrefix(window_description.full_sort_description, window_descriptions[i - 1].full_sort_description) - || (settings.max_threads != 1 && window_description.partition_by.size() != window_descriptions[i - 1].partition_by.size()); + auto prev = perm[i - 1]; + need_sort = !sortDescriptionIsPrefix(window_description.full_sort_description, window_descriptions[prev].full_sort_description) + || (settings.max_threads != 1 && window_description.partition_by.size() != window_descriptions[prev].partition_by.size()); } if (need_sort) { @@ -1054,9 +1055,9 @@ void addOffsetStep(QueryPlan & query_plan, const QueryAnalysisResult & query_ana } } -void collectSetsFromActionsDAG(const ActionsDAGPtr & dag, std::unordered_set & useful_sets) +void collectSetsFromActionsDAG(const ActionsDAG & dag, std::unordered_set & useful_sets) { - for (const auto & node : dag->getNodes()) + for (const auto & node : dag.getNodes()) { if (node.column) { @@ -1075,7 +1076,7 @@ void collectSetsFromActionsDAG(const ActionsDAGPtr & dag, std::unordered_set(adaptor->getFunction().get())) { - collectSetsFromActionsDAG(index_hint->getActions(), useful_sets); + collectSetsFromActionsDAG(*index_hint->getActions(), useful_sets); } } } @@ -1086,13 +1087,13 @@ void addBuildSubqueriesForSetsStepIfNeeded( QueryPlan & query_plan, const SelectQueryOptions & select_query_options, const PlannerContextPtr & planner_context, - const std::vector & result_actions_to_execute) + const std::vector & result_actions_to_execute) { auto subqueries = planner_context->getPreparedSets().getSubqueries(); std::unordered_set useful_sets; - for (const auto & actions_to_execute : result_actions_to_execute) - collectSetsFromActionsDAG(actions_to_execute, useful_sets); + for (const auto * actions_to_execute : result_actions_to_execute) + collectSetsFromActionsDAG(*actions_to_execute, useful_sets); auto predicate = [&useful_sets](const auto & set) { return !useful_sets.contains(set.get()); }; auto it = std::remove_if(subqueries.begin(), subqueries.end(), std::move(predicate)); @@ -1448,7 +1449,7 @@ void Planner::buildPlanForQueryNode() if (it != table_filters.end()) { const auto & filters = it->second; - table_expression_data.setFilterActions(filters.filter_actions); + table_expression_data.setFilterActions(filters.filter_actions->clone()); table_expression_data.setPrewhereInfo(filters.prewhere_info); } } @@ -1539,15 +1540,15 @@ void Planner::buildPlanForQueryNode() planner_context, query_processing_info); - std::vector result_actions_to_execute = std::move(join_tree_query_plan.actions_dags); + std::vector result_actions_to_execute = std::move(join_tree_query_plan.actions_dags); for (auto & [_, table_expression_data] : planner_context->getTableExpressionNodeToData()) { if (table_expression_data.getPrewhereFilterActions()) - result_actions_to_execute.push_back(table_expression_data.getPrewhereFilterActions()); + result_actions_to_execute.push_back(table_expression_data.getPrewhereFilterActions().get()); if (table_expression_data.getRowLevelFilterActions()) - result_actions_to_execute.push_back(table_expression_data.getRowLevelFilterActions()); + result_actions_to_execute.push_back(table_expression_data.getRowLevelFilterActions().get()); } if (query_processing_info.isIntermediateStage()) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 7a12d5d690d..59ec7778e21 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -757,7 +757,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi lambda_arguments_names_and_types.emplace_back(lambda_argument_name, std::move(lambda_argument_type)); } - auto lambda_actions_dag = std::make_shared(); + auto lambda_actions_dag = std::make_unique(); actions_stack.emplace_back(*lambda_actions_dag, node); auto [lambda_expression_node_name, levels] = visitImpl(lambda_node.getExpression()); @@ -765,7 +765,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi lambda_actions_dag->removeUnusedActions(Names(1, lambda_expression_node_name)); auto expression_actions_settings = ExpressionActionsSettings::fromContext(planner_context->getQueryContext(), CompileExpressions::yes); - auto lambda_actions = std::make_shared(lambda_actions_dag, expression_actions_settings); + auto lambda_actions = std::make_shared(std::move(lambda_actions_dag), expression_actions_settings); Names captured_column_names; ActionsDAG::NodeRawConstPtrs lambda_children; @@ -879,7 +879,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi const auto & function_node = node->as(); auto function_node_name = action_node_name_helper.calculateActionNodeName(node); - auto index_hint_actions_dag = std::make_shared(); + auto index_hint_actions_dag = std::make_unique(); auto & index_hint_actions_dag_outputs = index_hint_actions_dag->getOutputs(); std::unordered_set index_hint_actions_dag_output_node_names; PlannerActionsVisitor actions_visitor(planner_context); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 43b223172e6..918cfad703e 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -591,7 +591,7 @@ UInt64 mainQueryNodeBlockSizeByLimit(const SelectQueryInfo & select_query_info) std::unique_ptr createComputeAliasColumnsStep( const std::unordered_map & alias_column_expressions, const DataStream & current_data_stream) { - ActionsDAGPtr merged_alias_columns_actions_dag = std::make_shared(current_data_stream.header.getColumnsWithTypeAndName()); + ActionsDAGPtr merged_alias_columns_actions_dag = std::make_unique(current_data_stream.header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs action_dag_outputs = merged_alias_columns_actions_dag->getInputs(); for (const auto & [column_name, alias_column_actions_dag] : alias_column_expressions) @@ -646,7 +646,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto table_expression_query_info = select_query_info; table_expression_query_info.table_expression = table_expression; - table_expression_query_info.filter_actions_dag = table_expression_data.getFilterActions(); + table_expression_query_info.filter_actions_dag = table_expression_data.getFilterActions()->clone(); table_expression_query_info.analyzer_can_use_parallel_replicas_on_follower = table_node == planner_context->getGlobalPlannerContext()->parallel_replicas_table; size_t max_streams = settings.max_threads; @@ -776,7 +776,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (prewhere_actions) { prewhere_info = std::make_shared(); - prewhere_info->prewhere_actions = prewhere_actions; + prewhere_info->prewhere_actions = prewhere_actions->clone(); prewhere_info->prewhere_column_name = prewhere_actions->getOutputs().at(0)->result_name; prewhere_info->remove_prewhere_column = true; prewhere_info->need_filter = true; @@ -787,7 +787,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres const auto & columns_names = table_expression_data.getColumnNames(); std::vector> where_filters; - const auto add_filter = [&](const FilterDAGInfo & filter_info, std::string description) + const auto add_filter = [&](FilterDAGInfo & filter_info, std::string description) { if (!filter_info.actions) return; @@ -805,34 +805,34 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (!prewhere_info->prewhere_actions) { - prewhere_info->prewhere_actions = filter_info.actions; + prewhere_info->prewhere_actions = std::move(filter_info.actions); prewhere_info->prewhere_column_name = filter_info.column_name; prewhere_info->remove_prewhere_column = filter_info.do_remove_column; prewhere_info->need_filter = true; } else if (!prewhere_info->row_level_filter) { - prewhere_info->row_level_filter = filter_info.actions; + prewhere_info->row_level_filter = std::move(filter_info.actions); prewhere_info->row_level_column_name = filter_info.column_name; prewhere_info->need_filter = true; } else { - where_filters.emplace_back(filter_info, std::move(description)); + where_filters.emplace_back(std::move(filter_info), std::move(description)); } } else { - where_filters.emplace_back(filter_info, std::move(description)); + where_filters.emplace_back(std::move(filter_info), std::move(description)); } }; auto row_policy_filter_info = buildRowPolicyFilterIfNeeded(storage, table_expression_query_info, planner_context, used_row_policies); - add_filter(row_policy_filter_info, "Row-level security filter"); if (row_policy_filter_info.actions) - table_expression_data.setRowLevelFilterActions(row_policy_filter_info.actions); + table_expression_data.setRowLevelFilterActions(row_policy_filter_info.actions->clone()); + add_filter(row_policy_filter_info, "Row-level security filter"); if (query_context->getParallelReplicasMode() == Context::ParallelReplicasMode::CUSTOM_KEY) { @@ -1063,7 +1063,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (from_stage == QueryProcessingStage::FetchColumns) { - auto rename_actions_dag = std::make_shared(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + auto rename_actions_dag = std::make_unique(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs updated_actions_dag_outputs; for (auto & output_node : rename_actions_dag->getOutputs()) @@ -1077,7 +1077,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres rename_actions_dag->getOutputs() = std::move(updated_actions_dag_outputs); - auto rename_step = std::make_unique(query_plan.getCurrentDataStream(), rename_actions_dag); + auto rename_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(rename_actions_dag)); rename_step->setStepDescription("Change column names to column identifiers"); query_plan.addStep(std::move(rename_step)); } @@ -1117,7 +1117,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres void joinCastPlanColumnsToNullable(QueryPlan & plan_to_add_cast, PlannerContextPtr & planner_context, const FunctionOverloadResolverPtr & to_nullable_function) { - auto cast_actions_dag = std::make_shared(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); + auto cast_actions_dag = std::make_unique(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); for (auto & output_node : cast_actions_dag->getOutputs()) { @@ -1178,6 +1178,9 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ join_table_expression, planner_context); + left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.left_join_expressions_actions.get()); + left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.right_join_expressions_actions.get()); + join_clauses_and_actions.left_join_expressions_actions->appendInputsForUnusedColumns(left_plan.getCurrentDataStream().header); auto left_join_expressions_actions_step = std::make_unique(left_plan.getCurrentDataStream(), join_clauses_and_actions.left_join_expressions_actions); left_join_expressions_actions_step->setStepDescription("JOIN actions"); @@ -1223,7 +1226,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ auto join_cast_plan_output_nodes = [&](QueryPlan & plan_to_add_cast, std::unordered_map & plan_column_name_to_cast_type) { - auto cast_actions_dag = std::make_shared(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); + auto cast_actions_dag = std::make_unique(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); for (auto & output_node : cast_actions_dag->getOutputs()) { @@ -1381,9 +1384,10 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ if (join_clauses_and_actions.mixed_join_expressions_actions) { + left_join_tree_query_plan.actions_dags.push_back(join_clauses_and_actions.mixed_join_expressions_actions.get()); ExpressionActionsPtr & mixed_join_expression = table_join->getMixedJoinExpression(); mixed_join_expression = std::make_shared( - join_clauses_and_actions.mixed_join_expressions_actions, + std::move(join_clauses_and_actions.mixed_join_expressions_actions), ExpressionActionsSettings::fromContext(planner_context->getQueryContext())); } } @@ -1537,7 +1541,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ result_plan.unitePlans(std::move(join_step), {std::move(plans)}); } - auto drop_unused_columns_after_join_actions_dag = std::make_shared(result_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + auto drop_unused_columns_after_join_actions_dag = std::make_unique(result_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs drop_unused_columns_after_join_actions_dag_updated_outputs; std::unordered_set drop_unused_columns_after_join_actions_dag_updated_outputs_names; std::optional first_skipped_column_node_index; @@ -1582,14 +1586,14 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ left_join_tree_query_plan.used_row_policies.insert(right_join_tree_query_plan_row_policy); /// Collect all required actions dags in `left_join_tree_query_plan.actions_dags` - for (auto && action_dag : right_join_tree_query_plan.actions_dags) + for (const auto * action_dag : right_join_tree_query_plan.actions_dags) left_join_tree_query_plan.actions_dags.emplace_back(action_dag); - if (join_clauses_and_actions.left_join_expressions_actions) - left_join_tree_query_plan.actions_dags.emplace_back(std::move(join_clauses_and_actions.left_join_expressions_actions)); - if (join_clauses_and_actions.right_join_expressions_actions) - left_join_tree_query_plan.actions_dags.emplace_back(std::move(join_clauses_and_actions.right_join_expressions_actions)); - if (join_clauses_and_actions.mixed_join_expressions_actions) - left_join_tree_query_plan.actions_dags.push_back(join_clauses_and_actions.mixed_join_expressions_actions); + // if (join_clauses_and_actions.left_join_expressions_actions) + // left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.left_join_expressions_actions.get()); + // if (join_clauses_and_actions.right_join_expressions_actions) + // left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.right_join_expressions_actions.get()); + // if (join_clauses_and_actions.mixed_join_expressions_actions) + // left_join_tree_query_plan.actions_dags.push_back(join_clauses_and_actions.mixed_join_expressions_actions.get()); auto mapping = std::move(left_join_tree_query_plan.query_node_to_plan_step_mapping); auto & r_mapping = right_join_tree_query_plan.query_node_to_plan_step_mapping; @@ -1619,7 +1623,7 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ auto plan = std::move(join_tree_query_plan.query_plan); auto plan_output_columns = plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); - ActionsDAGPtr array_join_action_dag = std::make_shared(plan_output_columns); + ActionsDAGPtr array_join_action_dag = std::make_unique(plan_output_columns); PlannerActionsVisitor actions_visitor(planner_context); std::unordered_set array_join_expressions_output_nodes; @@ -1642,13 +1646,13 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ array_join_action_dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); - join_tree_query_plan.actions_dags.push_back(array_join_action_dag); + join_tree_query_plan.actions_dags.push_back(array_join_action_dag.get()); - auto array_join_actions = std::make_unique(plan.getCurrentDataStream(), array_join_action_dag); + auto array_join_actions = std::make_unique(plan.getCurrentDataStream(), std::move(array_join_action_dag)); array_join_actions->setStepDescription("ARRAY JOIN actions"); plan.addStep(std::move(array_join_actions)); - auto drop_unused_columns_before_array_join_actions_dag = std::make_shared(plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + auto drop_unused_columns_before_array_join_actions_dag = std::make_unique(plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs drop_unused_columns_before_array_join_actions_dag_updated_outputs; std::unordered_set drop_unused_columns_before_array_join_actions_dag_updated_outputs_names; diff --git a/src/Planner/PlannerJoinTree.h b/src/Planner/PlannerJoinTree.h index 9110b2bfef9..675079427eb 100644 --- a/src/Planner/PlannerJoinTree.h +++ b/src/Planner/PlannerJoinTree.h @@ -16,7 +16,7 @@ struct JoinTreeQueryPlan QueryPlan query_plan; QueryProcessingStage::Enum from_stage; std::set used_row_policies{}; - std::vector actions_dags{}; + std::vector actions_dags{}; std::unordered_map query_node_to_plan_step_mapping{}; }; diff --git a/src/Planner/PlannerJoins.cpp b/src/Planner/PlannerJoins.cpp index 84efdd21336..45842c0d705 100644 --- a/src/Planner/PlannerJoins.cpp +++ b/src/Planner/PlannerJoins.cpp @@ -376,8 +376,8 @@ JoinClausesAndActions buildJoinClausesAndActions( const JoinNode & join_node, const PlannerContextPtr & planner_context) { - ActionsDAGPtr left_join_actions = std::make_shared(left_table_expression_columns); - ActionsDAGPtr right_join_actions = std::make_shared(right_table_expression_columns); + ActionsDAGPtr left_join_actions = std::make_unique(left_table_expression_columns); + ActionsDAGPtr right_join_actions = std::make_unique(right_table_expression_columns); ColumnsWithTypeAndName mixed_table_expression_columns; for (const auto & left_column : left_table_expression_columns) { @@ -387,7 +387,7 @@ JoinClausesAndActions buildJoinClausesAndActions( { mixed_table_expression_columns.push_back(right_column); } - ActionsDAGPtr mixed_join_actions = std::make_shared(mixed_table_expression_columns); + ActionsDAGPtr mixed_join_actions = std::make_unique(mixed_table_expression_columns); /** It is possible to have constant value in JOIN ON section, that we need to ignore during DAG construction. * If we do not ignore it, this function will be replaced by underlying constant. @@ -601,7 +601,7 @@ JoinClausesAndActions buildJoinClausesAndActions( /// So, for each column, we recalculate the value of the whole expression from JOIN ON to check if rows should be joined. if (result.join_clauses.size() > 1) { - auto mixed_join_expressions_actions = std::make_shared(mixed_table_expression_columns); + auto mixed_join_expressions_actions = std::make_unique(mixed_table_expression_columns); PlannerActionsVisitor join_expression_visitor(planner_context); auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(*mixed_join_expressions_actions, join_expression); if (join_expression_dag_node_raw_pointers.size() != 1) @@ -611,14 +611,14 @@ JoinClausesAndActions buildJoinClausesAndActions( mixed_join_expressions_actions->addOrReplaceInOutputs(*join_expression_dag_node_raw_pointers[0]); Names required_names{join_expression_dag_node_raw_pointers[0]->result_name}; mixed_join_expressions_actions->removeUnusedActions(required_names); - result.mixed_join_expressions_actions = mixed_join_expressions_actions; + result.mixed_join_expressions_actions = std::move(mixed_join_expressions_actions); } else { const auto & join_clause = result.join_clauses.front(); const auto & mixed_filter_condition_nodes = join_clause.getMixedFilterConditionNodes(); auto mixed_join_expressions_actions = ActionsDAG::buildFilterActionsDAG(mixed_filter_condition_nodes, {}, true); - result.mixed_join_expressions_actions = mixed_join_expressions_actions; + result.mixed_join_expressions_actions = std::move(mixed_join_expressions_actions); } auto outputs = result.mixed_join_expressions_actions->getOutputs(); if (outputs.size() != 1) diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp index ce74d82c08d..9deceeef9a3 100644 --- a/src/Planner/PlannerWindowFunctions.cpp +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -120,7 +120,7 @@ std::vector extractWindowDescriptions(const QueryTreeNodes & return result; } -void sortWindowDescriptions(std::vector & window_descriptions) +std::vector sortWindowDescriptions(const std::vector & window_descriptions) { auto window_description_comparator = [](const WindowDescription & lhs, const WindowDescription & rhs) { @@ -151,7 +151,16 @@ void sortWindowDescriptions(std::vector & window_descriptions return left.size() > right.size(); }; - ::sort(window_descriptions.begin(), window_descriptions.end(), window_description_comparator); + auto comparator = [&](size_t lhs, size_t rhs) + { + return window_description_comparator(window_descriptions[lhs], window_descriptions[rhs]); + }; + + std::vector perm(window_descriptions.size()); + std::iota(perm.begin(), perm.end(), 0U); + ::sort(perm.begin(), perm.end(), comparator); + + return perm; } } diff --git a/src/Planner/PlannerWindowFunctions.h b/src/Planner/PlannerWindowFunctions.h index 1552ef5a71f..3039ecefc4b 100644 --- a/src/Planner/PlannerWindowFunctions.h +++ b/src/Planner/PlannerWindowFunctions.h @@ -15,6 +15,6 @@ std::vector extractWindowDescriptions(const QueryTreeNodes & /** Try to sort window descriptions in such an order that the window with the longest * sort description goes first, and all window that use its prefixes follow. */ -void sortWindowDescriptions(std::vector & window_descriptions); +std::vector sortWindowDescriptions(const std::vector & window_descriptions); } diff --git a/src/Planner/Utils.cpp b/src/Planner/Utils.cpp index 18a6d297838..493ecf5ef53 100644 --- a/src/Planner/Utils.cpp +++ b/src/Planner/Utils.cpp @@ -440,7 +440,7 @@ FilterDAGInfo buildFilterInfo(QueryTreeNodePtr filter_query_tree, collectSourceColumns(filter_query_tree, planner_context, false /*keep_alias_columns*/); collectSets(filter_query_tree, *planner_context); - auto filter_actions_dag = std::make_shared(); + auto filter_actions_dag = std::make_unique(); PlannerActionsVisitor actions_visitor(planner_context, false /*use_column_identifier_as_action_node_name*/); auto expression_nodes = actions_visitor.visit(*filter_actions_dag, filter_query_tree); diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp index 0d7e05af1de..64ba7f7cd2a 100644 --- a/src/Processors/QueryPlan/AggregatingStep.cpp +++ b/src/Processors/QueryPlan/AggregatingStep.cpp @@ -303,7 +303,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B const auto & header = ports[set_counter]->getHeader(); /// Here we create a DAG which fills missing keys and adds `__grouping_set` column - auto dag = std::make_shared(header.getColumnsWithTypeAndName()); + auto dag = std::make_unique(header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs outputs; outputs.reserve(output_header.columns() + 1); @@ -347,7 +347,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B } dag->getOutputs().swap(outputs); - auto expression = std::make_shared(dag, settings.getActionsSettings()); + auto expression = std::make_shared(std::move(dag), settings.getActionsSettings()); auto transform = std::make_shared(header, expression); connect(*ports[set_counter], transform->getInputPort()); diff --git a/src/Processors/QueryPlan/CubeStep.cpp b/src/Processors/QueryPlan/CubeStep.cpp index d010a3327a6..b6c70061987 100644 --- a/src/Processors/QueryPlan/CubeStep.cpp +++ b/src/Processors/QueryPlan/CubeStep.cpp @@ -36,7 +36,7 @@ CubeStep::CubeStep(const DataStream & input_stream_, Aggregator::Params params_, ProcessorPtr addGroupingSetForTotals(const Block & header, const Names & keys, bool use_nulls, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number) { - auto dag = std::make_shared(header.getColumnsWithTypeAndName()); + auto dag = std::make_unique(header.getColumnsWithTypeAndName()); auto & outputs = dag->getOutputs(); if (use_nulls) @@ -59,7 +59,7 @@ ProcessorPtr addGroupingSetForTotals(const Block & header, const Names & keys, b grouping_node = &dag->materializeNode(*grouping_node); outputs.insert(outputs.begin(), grouping_node); - auto expression = std::make_shared(dag, settings.getActionsSettings()); + auto expression = std::make_shared(std::move(dag), settings.getActionsSettings()); return std::make_shared(header, expression); } diff --git a/src/Processors/QueryPlan/ExpressionStep.cpp b/src/Processors/QueryPlan/ExpressionStep.cpp index 0ccb0c4492a..90ac94a1ace 100644 --- a/src/Processors/QueryPlan/ExpressionStep.cpp +++ b/src/Processors/QueryPlan/ExpressionStep.cpp @@ -30,13 +30,13 @@ ExpressionStep::ExpressionStep(const DataStream & input_stream_, const ActionsDA input_stream_, ExpressionTransform::transformHeader(input_stream_.header, *actions_dag_), getTraits(actions_dag_, input_stream_.header, input_stream_.sort_description)) - , actions_dag(actions_dag_) + , actions_dag(actions_dag_->clone()) { } void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression = std::make_shared(actions_dag, settings.getActionsSettings()); + auto expression = std::make_shared(actions_dag->clone(), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header) { @@ -49,7 +49,7 @@ void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const Bu pipeline.getHeader().getColumnsWithTypeAndName(), output_stream->header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto convert_actions = std::make_shared(convert_actions_dag, settings.getActionsSettings()); + auto convert_actions = std::make_shared(std::move(convert_actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header) { @@ -61,13 +61,13 @@ void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const Bu void ExpressionStep::describeActions(FormatSettings & settings) const { String prefix(settings.offset, settings.indent_char); - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag->clone()); expression->describeActions(settings.out, prefix); } void ExpressionStep::describeActions(JSONBuilder::JSONMap & map) const { - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag->clone()); map.add("Expression", expression->toTree()); } @@ -79,7 +79,7 @@ void ExpressionStep::updateOutputStream() if (!getDataStreamTraits().preserves_sorting) return; - FindAliasForInputName alias_finder(actions_dag); + FindAliasForInputName alias_finder(*actions_dag); const auto & input_sort_description = getInputStreams().front().sort_description; for (size_t i = 0, s = input_sort_description.size(); i < s; ++i) { diff --git a/src/Processors/QueryPlan/ExpressionStep.h b/src/Processors/QueryPlan/ExpressionStep.h index 3eef14ac129..ebbac8217cb 100644 --- a/src/Processors/QueryPlan/ExpressionStep.h +++ b/src/Processors/QueryPlan/ExpressionStep.h @@ -5,7 +5,7 @@ namespace DB { class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; class ExpressionTransform; class JoiningTransform; diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 72934665b5c..ef9f1d17822 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -46,7 +46,6 @@ FilterStep::FilterStep( filter_column_name_, remove_filter_column_), getTraits(actions_dag_, input_stream_.header, input_stream_.sort_description, remove_filter_column_, filter_column_name_)) - , actions_dag(actions_dag_) , filter_column_name(std::move(filter_column_name_)) , remove_filter_column(remove_filter_column_) { @@ -56,7 +55,7 @@ FilterStep::FilterStep( void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression = std::make_shared(actions_dag, settings.getActionsSettings()); + auto expression = std::make_shared(std::move(actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) { @@ -70,7 +69,7 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ pipeline.getHeader().getColumnsWithTypeAndName(), output_stream->header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto convert_actions = std::make_shared(convert_actions_dag, settings.getActionsSettings()); + auto convert_actions = std::make_shared(std::move(convert_actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header) { @@ -88,7 +87,7 @@ void FilterStep::describeActions(FormatSettings & settings) const settings.out << " (removed)"; settings.out << '\n'; - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag->clone()); expression->describeActions(settings.out, prefix); } @@ -97,7 +96,7 @@ void FilterStep::describeActions(JSONBuilder::JSONMap & map) const map.add("Filter Column", filter_column_name); map.add("Removes Filter", remove_filter_column); - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag->clone()); map.add("Expression", expression->toTree()); } @@ -111,7 +110,7 @@ void FilterStep::updateOutputStream() if (!getDataStreamTraits().preserves_sorting) return; - FindAliasForInputName alias_finder(actions_dag); + FindAliasForInputName alias_finder(*actions_dag); const auto & input_sort_description = getInputStreams().front().sort_description; for (size_t i = 0, s = input_sort_description.size(); i < s; ++i) { diff --git a/src/Processors/QueryPlan/FilterStep.h b/src/Processors/QueryPlan/FilterStep.h index 939d0900c86..0f894a570b7 100644 --- a/src/Processors/QueryPlan/FilterStep.h +++ b/src/Processors/QueryPlan/FilterStep.h @@ -5,7 +5,7 @@ namespace DB { class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; /// Implements WHERE, HAVING operations. See FilterTransform. class FilterStep : public ITransformingStep diff --git a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp index 0a3a4094a66..87e16b5a244 100644 --- a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp @@ -10,7 +10,7 @@ namespace DB::QueryPlanOptimizations { /// build actions DAG from stack of steps -static ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_stack) +static ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_stack) { if (dag_stack.empty()) return nullptr; @@ -27,10 +27,10 @@ static ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_st } static std::set -getOriginalDistinctColumns(const ColumnsWithTypeAndName & distinct_columns, std::vector & dag_stack) +getOriginalDistinctColumns(const ColumnsWithTypeAndName & distinct_columns, std::vector & dag_stack) { auto actions = buildActionsForPlanPath(dag_stack); - FindOriginalNodeForOutputName original_node_finder(actions); + FindOriginalNodeForOutputName original_node_finder(*actions); std::set original_distinct_columns; for (const auto & column : distinct_columns) { @@ -65,7 +65,7 @@ size_t tryDistinctReadInOrder(QueryPlan::Node * parent_node) /// (3) gather actions DAG to find original names for columns in distinct step later std::vector steps_to_update; QueryPlan::Node * node = parent_node; - std::vector dag_stack; + std::vector dag_stack; while (!node->children.empty()) { auto * step = dynamic_cast(node->step.get()); @@ -79,9 +79,9 @@ size_t tryDistinctReadInOrder(QueryPlan::Node * parent_node) steps_to_update.push_back(step); if (const auto * const expr = typeid_cast(step); expr) - dag_stack.push_back(expr->getExpression()); + dag_stack.push_back(expr->getExpression().get()); else if (const auto * const filter = typeid_cast(step); filter) - dag_stack.push_back(filter->getExpression()); + dag_stack.push_back(filter->getExpression().get()); node = node->children.front(); } diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 263598bdca7..ff1cefff09a 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -611,7 +611,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (auto * read_from_merge = typeid_cast(child.get())) { - FilterDAGInfo info{filter->getExpression(), filter->getFilterColumnName(), filter->removesFilterColumn()}; + FilterDAGInfo info{filter->getExpression()->clone(), filter->getFilterColumnName(), filter->removesFilterColumn()}; read_from_merge->addFilter(std::move(info)); std::swap(*parent_node, *child_node); return 1; diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp index 1badd315200..13b691da888 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp @@ -108,7 +108,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) prewhere_info->need_filter = true; prewhere_info->remove_prewhere_column = optimize_result.fully_moved_to_prewhere && filter_step->removesFilterColumn(); - auto filter_expression = filter_step->getExpression(); + auto filter_expression = filter_step->getExpression()->clone(); const auto & filter_column_name = filter_step->getFilterColumnName(); if (prewhere_info->remove_prewhere_column) diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp index dbcaf5f00a7..e57d3319076 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp @@ -18,15 +18,15 @@ void optimizePrimaryKeyCondition(const Stack & stack) const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); if (storage_prewhere_info) { - source_step_with_filter->addFilter(storage_prewhere_info->prewhere_actions, storage_prewhere_info->prewhere_column_name); + source_step_with_filter->addFilter(storage_prewhere_info->prewhere_actions->clone(), storage_prewhere_info->prewhere_column_name); if (storage_prewhere_info->row_level_filter) - source_step_with_filter->addFilter(storage_prewhere_info->row_level_filter, storage_prewhere_info->row_level_column_name); + source_step_with_filter->addFilter(storage_prewhere_info->row_level_filter->clone(), storage_prewhere_info->row_level_column_name); } for (auto iter = stack.rbegin() + 1; iter != stack.rend(); ++iter) { if (auto * filter_step = typeid_cast(iter->node->step.get())) - source_step_with_filter->addFilter(filter_step->getExpression(), filter_step->getFilterColumnName()); + source_step_with_filter->addFilter(filter_step->getExpression()->clone(), filter_step->getFilterColumnName()); /// Note: actually, plan optimizations merge Filter and Expression steps. /// Ideally, chain should look like (Expression -> ...) -> (Filter -> ...) -> ReadFromStorage, diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 537555afa2a..8e782e68db8 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -1066,13 +1066,13 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, for (const auto & actions_dag : window_desc.partition_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(actions_dag->clone(), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } for (const auto & actions_dag : window_desc.order_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(actions_dag, ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(actions_dag->clone(), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } auto order_optimizer = std::make_shared( diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 70327bc95b4..7c45ef48252 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -486,7 +486,7 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( auto block = reading.getMergeTreeData().getMinMaxCountProjectionBlock( metadata, candidate.dag->getRequiredColumnsNames(), - (dag.filter_node ? dag.dag : nullptr), + (dag.filter_node ? dag.dag.get() : nullptr), parts, max_added_blocks.get(), context); @@ -675,7 +675,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu query_info, context, max_added_blocks, - candidate.dag); + candidate.dag.get()); if (!analyzed) continue; diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp index 0af3869ccf1..c7e96d66817 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp @@ -172,7 +172,7 @@ std::optional optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod query_info, context, max_added_blocks, - query.filter_node ? query.dag : nullptr); + query.filter_node ? query.dag.get() : nullptr); if (!analyzed) continue; diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index af1578d6af8..d8b40b22904 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -213,7 +213,7 @@ bool analyzeProjectionCandidate( const SelectQueryInfo & query_info, const ContextPtr & context, const std::shared_ptr & max_added_blocks, - const ActionsDAGPtr & dag) + const ActionsDAG * dag) { MergeTreeData::DataPartsVector projection_parts; MergeTreeData::DataPartsVector normal_parts; @@ -238,7 +238,7 @@ bool analyzeProjectionCandidate( auto projection_query_info = query_info; projection_query_info.prewhere_info = nullptr; - projection_query_info.filter_actions_dag = dag; + projection_query_info.filter_actions_dag = dag->clone(); auto projection_result_ptr = reader.estimateNumMarksToRead( std::move(projection_parts), diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h index e1e106b988e..59ad3a43b97 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h @@ -60,6 +60,6 @@ bool analyzeProjectionCandidate( const SelectQueryInfo & query_info, const ContextPtr & context, const std::shared_ptr & max_added_blocks, - const ActionsDAGPtr & dag); + const ActionsDAG * dag); } diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp index 51df25b35f4..d3c75c988e7 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp @@ -65,7 +65,7 @@ namespace } /// build actions DAG from stack of steps - ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_stack) + ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_stack) { if (dag_stack.empty()) return nullptr; @@ -83,7 +83,7 @@ namespace } bool compareAggregationKeysWithDistinctColumns( - const Names & aggregation_keys, const DistinctColumns & distinct_columns, std::vector> actions_chain) + const Names & aggregation_keys, const DistinctColumns & distinct_columns, std::vector> actions_chain) { logDebug("aggregation_keys", aggregation_keys); logDebug("aggregation_keys size", aggregation_keys.size()); @@ -93,7 +93,8 @@ namespace std::set source_columns; for (auto & actions : actions_chain) { - FindOriginalNodeForOutputName original_node_finder(buildActionsForPlanPath(actions)); + auto tmp_actions = buildActionsForPlanPath(actions); + FindOriginalNodeForOutputName original_node_finder(*tmp_actions); for (const auto & column : current_columns) { logDebug("distinct column name", column); @@ -152,8 +153,8 @@ namespace const DistinctStep * distinct_step = typeid_cast(distinct_node->step.get()); chassert(distinct_step); - std::vector dag_stack; - std::vector> actions_chain; + std::vector dag_stack; + std::vector> actions_chain; const DistinctStep * inner_distinct_step = nullptr; const IQueryPlanStep * aggregation_before_distinct = nullptr; const QueryPlan::Node * node = distinct_node; @@ -182,9 +183,9 @@ namespace } if (const auto * const expr = typeid_cast(current_step); expr) - dag_stack.push_back(expr->getExpression()); + dag_stack.push_back(expr->getExpression().get()); else if (const auto * const filter = typeid_cast(current_step); filter) - dag_stack.push_back(filter->getExpression()); + dag_stack.push_back(filter->getExpression().get()); node = node->children.front(); if (inner_distinct_step = typeid_cast(node->step.get()); inner_distinct_step) @@ -222,7 +223,7 @@ namespace chassert(distinct_step); const auto distinct_columns = getDistinctColumns(distinct_step); - std::vector dag_stack; + std::vector dag_stack; const DistinctStep * inner_distinct_step = nullptr; const QueryPlan::Node * node = distinct_node; while (!node->children.empty()) @@ -235,9 +236,9 @@ namespace } if (const auto * const expr = typeid_cast(current_step); expr) - dag_stack.push_back(expr->getExpression()); + dag_stack.push_back(expr->getExpression().get()); else if (const auto * const filter = typeid_cast(current_step); filter) - dag_stack.push_back(filter->getExpression()); + dag_stack.push_back(filter->getExpression().get()); node = node->children.front(); inner_distinct_step = typeid_cast(node->step.get()); @@ -267,7 +268,7 @@ namespace logActionsDAG("distinct pass: merged DAG", path_actions); /// compare columns of two DISTINCTs - FindOriginalNodeForOutputName original_node_finder(path_actions); + FindOriginalNodeForOutputName original_node_finder(*path_actions); for (const auto & column : distinct_columns) { const auto * alias_node = original_node_finder.find(String(column)); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index b2d8aa0e218..e5370c1c130 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -174,7 +174,7 @@ static void updateSortDescriptionForOutputStream( { if (prewhere_info->prewhere_actions) { - FindOriginalNodeForOutputName original_column_finder(prewhere_info->prewhere_actions); + FindOriginalNodeForOutputName original_column_finder(*prewhere_info->prewhere_actions); for (auto & column : original_header) { const auto * original_node = original_column_finder.find(column.name); @@ -185,7 +185,7 @@ static void updateSortDescriptionForOutputStream( if (prewhere_info->row_level_filter) { - FindOriginalNodeForOutputName original_column_finder(prewhere_info->row_level_filter); + FindOriginalNodeForOutputName original_column_finder(*prewhere_info->row_level_filter); for (auto & column : original_header) { const auto * original_node = original_column_finder.find(column.name); @@ -830,10 +830,10 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ pipes[0].getHeader().getColumnsWithTypeAndName(), pipes[1].getHeader().getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); + auto converting_expr = std::make_shared(std::move(conversion_action)); pipes[0].addSimpleTransform( - [conversion_action](const Block & header) + [converting_expr](const Block & header) { - auto converting_expr = std::make_shared(conversion_action); return std::make_shared(header, converting_expr); }); return Pipe::unitePipes(std::move(pipes)); @@ -849,7 +849,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ static ActionsDAGPtr createProjection(const Block & header) { - return std::make_shared(header.getNamesAndTypesList()); + return std::make_unique(header.getNamesAndTypesList()); } Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( @@ -1046,7 +1046,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( for (size_t j = 0; j < prefix_size; ++j) sort_description.emplace_back(sorting_columns[j], input_order_info->direction); - auto sorting_key_expr = std::make_shared(sorting_key_prefix_expr); + auto sorting_key_expr = std::make_shared(std::move(sorting_key_prefix_expr)); auto merge_streams = [&](Pipe & pipe) { @@ -1341,10 +1341,10 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( pipes[0].getHeader().getColumnsWithTypeAndName(), pipes[1].getHeader().getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); + auto converting_expr = std::make_shared(std::move(conversion_action)); pipes[0].addSimpleTransform( - [conversion_action](const Block & header) + [converting_expr](const Block & header) { - auto converting_expr = std::make_shared(conversion_action); return std::make_shared(header, converting_expr); }); return Pipe::unitePipes(std::move(pipes)); @@ -1378,7 +1378,7 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead( static void buildIndexes( std::optional & indexes, - ActionsDAGPtr filter_actions_dag, + const ActionsDAG * filter_actions_dag, const MergeTreeData & data, const MergeTreeData::DataPartsVector & parts, const ContextPtr & context, @@ -1518,11 +1518,11 @@ void ReadFromMergeTree::applyFilters(ActionDAGNodes added_filter_nodes) /// (1) SourceStepWithFilter::filter_nodes, (2) query_info.filter_actions_dag. Make sure there are consistent. /// TODO: Get rid of filter_actions_dag in query_info after we move analysis of /// parallel replicas and unused shards into optimization, similar to projection analysis. - query_info.filter_actions_dag = filter_actions_dag; + query_info.filter_actions_dag = std::move(filter_actions_dag); buildIndexes( indexes, - filter_actions_dag, + query_info.filter_actions_dag.get(), data, prepared_parts, context, @@ -1564,7 +1564,7 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead( const Names & primary_key_column_names = primary_key.column_names; if (!indexes) - buildIndexes(indexes, query_info_.filter_actions_dag, data, parts, context_, query_info_, metadata_snapshot); + buildIndexes(indexes, query_info_.filter_actions_dag.get(), data, parts, context_, query_info_, metadata_snapshot); if (indexes->part_values && indexes->part_values->empty()) return std::make_shared(std::move(result)); @@ -1993,7 +1993,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result.sampling.use_sampling) { - auto sampling_actions = std::make_shared(result.sampling.filter_expression); + auto sampling_actions = std::make_shared(result.sampling.filter_expression->clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( @@ -2031,7 +2031,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result_projection) { - auto projection_actions = std::make_shared(result_projection); + auto projection_actions = std::make_shared(result_projection->clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, projection_actions); @@ -2048,7 +2048,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons ActionsDAG::MatchColumnsMode::Name, true); - auto converting_dag_expr = std::make_shared(convert_actions_dag); + auto converting_dag_expr = std::make_shared(std::move(convert_actions_dag)); pipe.addSimpleTransform([&](const Block & header) { @@ -2126,7 +2126,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(prewhere_info->prewhere_actions); + auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); expression->describeActions(format_settings.out, prefix); } @@ -2135,7 +2135,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(prewhere_info->row_level_filter); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); expression->describeActions(format_settings.out, prefix); } } @@ -2161,7 +2161,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(prewhere_info->prewhere_actions); + auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -2171,7 +2171,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(prewhere_info->row_level_filter); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index caa8aa2e1bd..e32507e1f22 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -23,7 +23,7 @@ struct MergeTreeDataSelectSamplingData bool read_nothing = false; Float64 used_sample_factor = 1.0; std::shared_ptr filter_function; - ActionsDAGPtr filter_expression; + std::shared_ptr filter_expression; }; struct UsefulSkipIndexes diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index 11371578c79..b9b239c721b 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -441,7 +441,7 @@ Pipe ReadFromSystemNumbersStep::makePipe() chassert(numbers_storage.step != UInt64{0}); /// Build rpn of query filters - KeyCondition condition(filter_actions_dag, context, column_names, key_expression); + KeyCondition condition(filter_actions_dag.get(), context, column_names, key_expression); if (condition.extractPlainRanges(ranges)) { diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.cpp b/src/Processors/QueryPlan/SourceStepWithFilter.cpp index ad0940b90b9..b91debc8239 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.cpp +++ b/src/Processors/QueryPlan/SourceStepWithFilter.cpp @@ -110,7 +110,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(prewhere_info->prewhere_actions); + auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); expression->describeActions(format_settings.out, prefix); } @@ -119,7 +119,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(prewhere_info->row_level_filter); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); expression->describeActions(format_settings.out, prefix); } } @@ -137,7 +137,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(prewhere_info->prewhere_actions); + auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -147,7 +147,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(prewhere_info->row_level_filter); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.h b/src/Processors/QueryPlan/SourceStepWithFilter.h index 126d4824fff..8ac0cc24ed1 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.h +++ b/src/Processors/QueryPlan/SourceStepWithFilter.h @@ -33,6 +33,7 @@ public: } const ActionsDAGPtr & getFilterActionsDAG() const { return filter_actions_dag; } + ActionsDAGPtr detachFilterActionsDAG() { return std::move(filter_actions_dag); } const SelectQueryInfo & getQueryInfo() const { return query_info; } const PrewhereInfoPtr & getPrewhereInfo() const { return prewhere_info; } @@ -53,7 +54,7 @@ public: void applyFilters() { applyFilters(std::move(filter_nodes)); - filter_dags = {}; + filter_dags.clear(); } virtual void applyFilters(ActionDAGNodes added_filter_nodes); diff --git a/src/Processors/QueryPlan/TotalsHavingStep.cpp b/src/Processors/QueryPlan/TotalsHavingStep.cpp index ac5e144bf4a..45de6c31d24 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.cpp +++ b/src/Processors/QueryPlan/TotalsHavingStep.cpp @@ -46,7 +46,7 @@ TotalsHavingStep::TotalsHavingStep( getTraits(!filter_column_.empty())) , aggregates(aggregates_) , overflow_row(overflow_row_) - , actions_dag(actions_dag_) + , actions_dag(actions_dag_->clone()) , filter_column_name(filter_column_) , remove_filter(remove_filter_) , totals_mode(totals_mode_) @@ -57,7 +57,7 @@ TotalsHavingStep::TotalsHavingStep( void TotalsHavingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression_actions = actions_dag ? std::make_shared(actions_dag, settings.getActionsSettings()) : nullptr; + auto expression_actions = actions_dag ? std::make_shared(actions_dag->clone(), settings.getActionsSettings()) : nullptr; auto totals_having = std::make_shared( pipeline.getHeader(), @@ -100,7 +100,7 @@ void TotalsHavingStep::describeActions(FormatSettings & settings) const if (actions_dag) { bool first = true; - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag->clone()); for (const auto & action : expression->getActions()) { settings.out << prefix << (first ? "Actions: " @@ -117,7 +117,7 @@ void TotalsHavingStep::describeActions(JSONBuilder::JSONMap & map) const if (actions_dag) { map.add("Filter column", filter_column_name); - auto expression = std::make_shared(actions_dag); + auto expression = std::make_shared(actions_dag->clone()); map.add("Expression", expression->toTree()); } } diff --git a/src/Processors/QueryPlan/TotalsHavingStep.h b/src/Processors/QueryPlan/TotalsHavingStep.h index a81bc7bb1a9..52ef5437701 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.h +++ b/src/Processors/QueryPlan/TotalsHavingStep.h @@ -6,7 +6,7 @@ namespace DB { class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; enum class TotalsMode : uint8_t; diff --git a/src/Processors/QueryPlan/WindowStep.h b/src/Processors/QueryPlan/WindowStep.h index 74a0e5930c7..47883e5edf6 100644 --- a/src/Processors/QueryPlan/WindowStep.h +++ b/src/Processors/QueryPlan/WindowStep.h @@ -7,7 +7,7 @@ namespace DB { class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; class WindowTransform; diff --git a/src/Processors/SourceWithKeyCondition.h b/src/Processors/SourceWithKeyCondition.h index ee155d6f78c..fcf576637ff 100644 --- a/src/Processors/SourceWithKeyCondition.h +++ b/src/Processors/SourceWithKeyCondition.h @@ -16,13 +16,13 @@ protected: /// Represents pushed down filters in source std::shared_ptr key_condition; - void setKeyConditionImpl(const ActionsDAGPtr & filter_actions_dag, ContextPtr context, const Block & keys) + void setKeyConditionImpl(const ActionsDAG * filter_actions_dag, ContextPtr context, const Block & keys) { key_condition = std::make_shared( filter_actions_dag, context, keys.getNames(), - std::make_shared(std::make_shared(keys.getColumnsWithTypeAndName()))); + std::make_shared(std::make_unique(keys.getColumnsWithTypeAndName()))); } public: diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index bb38c3e1dc5..95267bc24e0 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -203,7 +203,7 @@ FillingTransform::FillingTransform( , use_with_fill_by_sorting_prefix(use_with_fill_by_sorting_prefix_) { if (interpolate_description) - interpolate_actions = std::make_shared(interpolate_description->actions); + interpolate_actions = std::make_shared(interpolate_description->actions->clone()); std::vector is_fill_column(header_.columns()); for (size_t i = 0, size = fill_description.size(); i < size; ++i) diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 28d8128e052..2cd51259549 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -516,7 +516,7 @@ void StorageHive::initMinMaxIndexExpression() partition_names = partition_name_types.getNames(); partition_types = partition_name_types.getTypes(); partition_minmax_idx_expr = std::make_shared( - std::make_shared(partition_name_types), ExpressionActionsSettings::fromContext(getContext())); + std::make_unique(partition_name_types), ExpressionActionsSettings::fromContext(getContext())); } NamesAndTypesList all_name_types = metadata_snapshot->getColumns().getAllPhysical(); @@ -526,7 +526,7 @@ void StorageHive::initMinMaxIndexExpression() hivefile_name_types.push_back(column); } hivefile_minmax_idx_expr = std::make_shared( - std::make_shared(hivefile_name_types), ExpressionActionsSettings::fromContext(getContext())); + std::make_unique(hivefile_name_types), ExpressionActionsSettings::fromContext(getContext())); } ASTPtr StorageHive::extractKeyExpressionList(const ASTPtr & node) @@ -647,7 +647,7 @@ HiveFiles StorageHive::collectHiveFilesFromPartition( for (size_t i = 0; i < partition_names.size(); ++i) ranges.emplace_back(fields[i]); - const KeyCondition partition_key_condition(filter_actions_dag, getContext(), partition_names, partition_minmax_idx_expr); + const KeyCondition partition_key_condition(filter_actions_dag.get(), getContext(), partition_names, partition_minmax_idx_expr); if (!partition_key_condition.checkInHyperrectangle(ranges, partition_types).can_be_true) return {}; } @@ -715,7 +715,7 @@ HiveFilePtr StorageHive::getHiveFileIfNeeded( if (prune_level >= PruneLevel::File) { - const KeyCondition hivefile_key_condition(filter_actions_dag, getContext(), hivefile_name_types.getNames(), hivefile_minmax_idx_expr); + const KeyCondition hivefile_key_condition(filter_actions_dag.get(), getContext(), hivefile_name_types.getNames(), hivefile_minmax_idx_expr); if (hive_file->useFileMinMaxIndex()) { /// Load file level minmax index and apply diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index 2a697fa5654..e03ecc05064 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -160,7 +160,7 @@ KeyDescription KeyDescription::buildEmptyKey() { KeyDescription result; result.expression_list_ast = std::make_shared(); - result.expression = std::make_shared(std::make_shared(), ExpressionActionsSettings{}); + result.expression = std::make_shared(std::make_unique(), ExpressionActionsSettings{}); return result; } diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index f8cf19120c7..48ec5529af0 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -691,7 +691,7 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( ActionsDAGPtr KeyCondition::cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context) { - auto res = std::make_shared(); + auto res = std::make_unique(); std::unordered_map to_inverted; @@ -777,7 +777,7 @@ void KeyCondition::getAllSpaceFillingCurves() } KeyCondition::KeyCondition( - ActionsDAGPtr filter_dag, + const ActionsDAG * filter_dag, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr_, diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 2bc3b108e02..14ef74ea113 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -41,7 +41,7 @@ class KeyCondition public: /// Construct key condition from ActionsDAG nodes KeyCondition( - ActionsDAGPtr filter_dag, + const ActionsDAG * filter_dag, ContextPtr context, const Names & key_column_names, const ExpressionActionsPtr & key_expr, diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 2e0ea4cdbcd..7b642c34f37 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -487,7 +487,7 @@ ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByP ASTPtr expression_ast; ConditionSelectivityEstimator result; - PartitionPruner partition_pruner(storage_snapshot->metadata, filter_dag, local_context); + PartitionPruner partition_pruner(storage_snapshot->metadata, filter_dag.get(), local_context); if (partition_pruner.isUseless()) { @@ -746,7 +746,7 @@ ExpressionActionsPtr MergeTreeData::getMinMaxExpr(const KeyDescription & partiti if (!partition_key.column_names.empty()) partition_key_columns = partition_key.expression->getRequiredColumnsWithTypes(); - return std::make_shared(std::make_shared(partition_key_columns), settings); + return std::make_shared(std::make_unique(partition_key_columns), settings); } Names MergeTreeData::getMinMaxColumnsNames(const KeyDescription & partition_key) @@ -1152,7 +1152,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( if (!virtual_columns_block.has(input->result_name)) valid = false; - PartitionPruner partition_pruner(metadata_snapshot, filter_dag, local_context, true /* strict */); + PartitionPruner partition_pruner(metadata_snapshot, filter_dag.get(), local_context, true /* strict */); if (partition_pruner.isUseless() && !valid) return {}; @@ -6819,7 +6819,7 @@ using PartitionIdToMaxBlock = std::unordered_map; Block MergeTreeData::getMinMaxCountProjectionBlock( const StorageMetadataPtr & metadata_snapshot, const Names & required_columns, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, const DataPartsVector & parts, const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context) const diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index c6f736a4afd..52916d85fef 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -403,7 +403,7 @@ public: Block getMinMaxCountProjectionBlock( const StorageMetadataPtr & metadata_snapshot, const Names & required_columns, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, const DataPartsVector & parts, const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context) const; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 2e287ff3042..61b8b6fdaa8 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -442,7 +442,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( } void MergeTreeDataSelectExecutor::buildKeyConditionFromPartOffset( - std::optional & part_offset_condition, const ActionsDAGPtr & filter_dag, ContextPtr context) + std::optional & part_offset_condition, const ActionsDAG * filter_dag, ContextPtr context) { if (!filter_dag) return; @@ -463,10 +463,10 @@ void MergeTreeDataSelectExecutor::buildKeyConditionFromPartOffset( return; part_offset_condition.emplace(KeyCondition{ - dag, + dag.get(), context, sample.getNames(), - std::make_shared(std::make_shared(sample.getColumnsWithTypeAndName()), ExpressionActionsSettings{}), + std::make_shared(std::make_unique(sample.getColumnsWithTypeAndName()), ExpressionActionsSettings{}), {}}); } @@ -474,7 +474,7 @@ std::optional> MergeTreeDataSelectExecutor::filterPar const StorageMetadataPtr & metadata_snapshot, const MergeTreeData & data, const MergeTreeData::DataPartsVector & parts, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, ContextPtr context) { if (!filter_dag) diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 788355c1e59..39bff5eacd6 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -161,7 +161,7 @@ public: /// If possible, construct optional key condition from predicates containing _part_offset column. static void buildKeyConditionFromPartOffset( - std::optional & part_offset_condition, const ActionsDAGPtr & filter_dag, ContextPtr context); + std::optional & part_offset_condition, const ActionsDAG * filter_dag, ContextPtr context); /// If possible, filter using expression on virtual columns. /// Example: SELECT count() FROM table WHERE _part = 'part_name' @@ -170,7 +170,7 @@ public: const StorageMetadataPtr & metadata_snapshot, const MergeTreeData & data, const MergeTreeData::DataPartsVector & parts, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, ContextPtr context); /// Filter parts using minmax index and partition key. diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp index e492ca0aec2..457c85eaa46 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp @@ -332,7 +332,7 @@ MergeTreeIndexConditionPtr MergeTreeIndexAnnoy::createIndexCondition(const Selec return std::make_shared(index, query, distance_function, context); }; -MergeTreeIndexConditionPtr MergeTreeIndexAnnoy::createIndexCondition(const ActionsDAGPtr &, ContextPtr) const +MergeTreeIndexConditionPtr MergeTreeIndexAnnoy::createIndexCondition(const ActionsDAG *, ContextPtr) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeIndexAnnoy cannot be created with ActionsDAG"); } diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h index d511ab84859..282920c608e 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h @@ -99,7 +99,7 @@ public: MergeTreeIndexGranulePtr createIndexGranule() const override; MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAGPtr &, ContextPtr) const override; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; bool isVectorSearch() const override { return true; } private: diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index fc5147bb56c..c6a00751f25 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -201,7 +201,7 @@ bool maybeTrueOnBloomFilter(const IColumn * hash_column, const BloomFilterPtr & } MergeTreeIndexConditionBloomFilter::MergeTreeIndexConditionBloomFilter( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context_, const Block & header_, size_t hash_functions_) + const ActionsDAG * filter_actions_dag, ContextPtr context_, const Block & header_, size_t hash_functions_) : WithContext(context_), header(header_), hash_functions(hash_functions_) { if (!filter_actions_dag) @@ -897,7 +897,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexBloomFilter::createIndexAggregator(con return std::make_shared(bits_per_row, hash_functions, index.column_names); } -MergeTreeIndexConditionPtr MergeTreeIndexBloomFilter::createIndexCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const +MergeTreeIndexConditionPtr MergeTreeIndexBloomFilter::createIndexCondition(const ActionsDAG * filter_actions_dag, ContextPtr context) const { return std::make_shared(filter_actions_dag, context, index.sample_block, hash_functions); } diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h index d66c4b8b6ca..bd1b137176a 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.h @@ -69,7 +69,7 @@ public: std::vector> predicate; }; - MergeTreeIndexConditionBloomFilter(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_, const Block & header_, size_t hash_functions_); + MergeTreeIndexConditionBloomFilter(const ActionsDAG * filter_actions_dag, ContextPtr context_, const Block & header_, size_t hash_functions_); bool alwaysUnknownOrTrue() const override; @@ -142,7 +142,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG * filter_actions_dag, ContextPtr context) const override; private: size_t bits_per_row; diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp index 8cf58687125..5b6813d12e3 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp @@ -138,7 +138,7 @@ void MergeTreeIndexAggregatorBloomFilterText::update(const Block & block, size_t } MergeTreeConditionBloomFilterText::MergeTreeConditionBloomFilterText( - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context, const Block & index_sample_block, const BloomFilterParameters & params_, @@ -733,7 +733,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexBloomFilterText::createIndexAggregator } MergeTreeIndexConditionPtr MergeTreeIndexBloomFilterText::createIndexCondition( - const ActionsDAGPtr & filter_dag, ContextPtr context) const + const ActionsDAG * filter_dag, ContextPtr context) const { return std::make_shared(filter_dag, context, index.sample_block, params, token_extractor.get()); } diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.h b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.h index 6fd969030df..fe042884550 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.h +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.h @@ -62,7 +62,7 @@ class MergeTreeConditionBloomFilterText final : public IMergeTreeIndexCondition { public: MergeTreeConditionBloomFilterText( - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context, const Block & index_sample_block, const BloomFilterParameters & params_, @@ -163,7 +163,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_dag, ContextPtr context) const override; + const ActionsDAG * filter_dag, ContextPtr context) const override; BloomFilterParameters params; /// Function for selecting next token. diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index 47ce24b91eb..cd6af68ebcc 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -186,7 +186,7 @@ void MergeTreeIndexAggregatorFullText::update(const Block & block, size_t * pos, } MergeTreeConditionFullText::MergeTreeConditionFullText( - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context_, const Block & index_sample_block, const GinFilterParameters & params_, @@ -768,7 +768,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexFullText::createIndexAggregatorForPart } MergeTreeIndexConditionPtr MergeTreeIndexFullText::createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const + const ActionsDAG * filter_actions_dag, ContextPtr context) const { return std::make_shared(filter_actions_dag, context, index.sample_block, params, token_extractor.get()); }; diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.h b/src/Storages/MergeTree/MergeTreeIndexFullText.h index 1a5e848e5ac..8e0b1a22acb 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.h +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.h @@ -63,7 +63,7 @@ class MergeTreeConditionFullText final : public IMergeTreeIndexCondition, WithCo { public: MergeTreeConditionFullText( - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context, const Block & index_sample_block, const GinFilterParameters & params_, @@ -170,7 +170,7 @@ public: MergeTreeIndexGranulePtr createIndexGranule() const override; MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexAggregatorPtr createIndexAggregatorForPart(const GinIndexStorePtr & store, const MergeTreeWriterSettings & /*settings*/) const override; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG * filter_actions_dag, ContextPtr context) const override; GinFilterParameters params; /// Function for selecting next token. diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp index 0995e2724ec..cd8065ecadf 100644 --- a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp @@ -79,7 +79,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexHypothesis::createIndexAggregator(cons } MergeTreeIndexConditionPtr MergeTreeIndexHypothesis::createIndexCondition( - const ActionsDAGPtr &, ContextPtr) const + const ActionsDAG *, ContextPtr) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not supported"); } diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesis.h b/src/Storages/MergeTree/MergeTreeIndexHypothesis.h index 130e708d76f..e60335fe724 100644 --- a/src/Storages/MergeTree/MergeTreeIndexHypothesis.h +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesis.h @@ -69,7 +69,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + const ActionsDAG * filter_actions_dag, ContextPtr context) const override; MergeTreeIndexMergedConditionPtr createIndexMergedCondition( const SelectQueryInfo & query_info, StorageMetadataPtr storage_metadata) const override; diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp index 20dfed8cf8f..c60d63a59ba 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp @@ -157,7 +157,7 @@ void MergeTreeIndexAggregatorMinMax::update(const Block & block, size_t * pos, s namespace { -KeyCondition buildCondition(const IndexDescription & index, const ActionsDAGPtr & filter_actions_dag, ContextPtr context) +KeyCondition buildCondition(const IndexDescription & index, const ActionsDAG * filter_actions_dag, ContextPtr context) { return KeyCondition{filter_actions_dag, context, index.column_names, index.expression}; } @@ -165,7 +165,7 @@ KeyCondition buildCondition(const IndexDescription & index, const ActionsDAGPtr } MergeTreeIndexConditionMinMax::MergeTreeIndexConditionMinMax( - const IndexDescription & index, const ActionsDAGPtr & filter_actions_dag, ContextPtr context) + const IndexDescription & index, const ActionsDAG * filter_actions_dag, ContextPtr context) : index_data_types(index.data_types) , condition(buildCondition(index, filter_actions_dag, context)) { @@ -198,7 +198,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexMinMax::createIndexAggregator(const Me } MergeTreeIndexConditionPtr MergeTreeIndexMinMax::createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const + const ActionsDAG * filter_actions_dag, ContextPtr context) const { return std::make_shared(index, filter_actions_dag, context); } diff --git a/src/Storages/MergeTree/MergeTreeIndexMinMax.h b/src/Storages/MergeTree/MergeTreeIndexMinMax.h index dca26fb7b28..c5031ccbb27 100644 --- a/src/Storages/MergeTree/MergeTreeIndexMinMax.h +++ b/src/Storages/MergeTree/MergeTreeIndexMinMax.h @@ -50,7 +50,7 @@ class MergeTreeIndexConditionMinMax final : public IMergeTreeIndexCondition public: MergeTreeIndexConditionMinMax( const IndexDescription & index, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, ContextPtr context); bool alwaysUnknownOrTrue() const override; @@ -77,7 +77,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + const ActionsDAG * filter_actions_dag, ContextPtr context) const override; const char* getSerializedFileExtension() const override { return ".idx2"; } MergeTreeIndexFormat getDeserializedFormat(const IDataPartStorage & data_part_storage, const std::string & path_prefix) const override; /// NOLINT diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index b11cbf1e034..7c65381b05b 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -245,7 +245,7 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( const String & index_name_, const Block & index_sample_block, size_t max_rows_, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, ContextPtr context) : index_name(index_name_) , max_rows(max_rows_) @@ -272,9 +272,9 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( filter_actions_dag->getOutputs()[0] = &traverseDAG(*filter_actions_dag_node, filter_actions_dag, context, node_to_result_node); filter_actions_dag->removeUnusedActions(); - actions = std::make_shared(filter_actions_dag); actions_output_column_name = filter_actions_dag->getOutputs().at(0)->result_name; + actions = std::make_shared(std::move(filter_actions_dag)); } bool MergeTreeIndexConditionSet::alwaysUnknownOrTrue() const @@ -544,7 +544,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexSet::createIndexAggregator(const Merge } MergeTreeIndexConditionPtr MergeTreeIndexSet::createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const + const ActionsDAG * filter_actions_dag, ContextPtr context) const { return std::make_shared(index.name, index.sample_block, max_rows, filter_actions_dag, context); } diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.h b/src/Storages/MergeTree/MergeTreeIndexSet.h index 6efc2effafd..abd40b3cf9d 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.h +++ b/src/Storages/MergeTree/MergeTreeIndexSet.h @@ -83,7 +83,7 @@ public: const String & index_name_, const Block & index_sample_block, size_t max_rows_, - const ActionsDAGPtr & filter_dag, + const ActionsDAG * filter_dag, ContextPtr context); bool alwaysUnknownOrTrue() const override; @@ -138,7 +138,7 @@ public: MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + const ActionsDAG * filter_actions_dag, ContextPtr context) const override; size_t max_rows = 0; }; diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp index c9df7210569..59a4b0fbf9c 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp @@ -367,7 +367,7 @@ MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const Sel return std::make_shared(index, query, distance_function, context); }; -MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const ActionsDAGPtr &, ContextPtr) const +MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const ActionsDAG *, ContextPtr) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeIndexAnnoy cannot be created with ActionsDAG"); } diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.h b/src/Storages/MergeTree/MergeTreeIndexUSearch.h index 5107cfee371..41de94402c9 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.h +++ b/src/Storages/MergeTree/MergeTreeIndexUSearch.h @@ -101,7 +101,7 @@ public: MergeTreeIndexGranulePtr createIndexGranule() const override; MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAGPtr &, ContextPtr) const override; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; bool isVectorSearch() const override { return true; } private: diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index a9f1fa9378f..1be73e1c811 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -167,7 +167,7 @@ struct IMergeTreeIndex } virtual MergeTreeIndexConditionPtr createIndexCondition( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const = 0; + const ActionsDAG * filter_actions_dag, ContextPtr context) const = 0; virtual bool isVectorSearch() const { return false; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 78b67de1a7e..8fa5b2cc955 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -80,7 +80,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep row_level_filter_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(prewhere_info->row_level_filter, actions_settings), + .actions = std::make_shared(prewhere_info->row_level_filter->clone(), actions_settings), .filter_column_name = prewhere_info->row_level_column_name, .remove_filter_column = true, .need_filter = true, @@ -96,7 +96,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep prewhere_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(prewhere_info->prewhere_actions, actions_settings), + .actions = std::make_shared(prewhere_info->prewhere_actions->clone(), actions_settings), .filter_column_name = prewhere_info->prewhere_column_name, .remove_filter_column = prewhere_info->remove_prewhere_column, .need_filter = prewhere_info->need_filter, diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 02f8d6f4f6a..98b35a3ca2c 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -376,7 +376,7 @@ public: { const auto & primary_key = storage_snapshot->metadata->getPrimaryKey(); const Names & primary_key_column_names = primary_key.column_names; - KeyCondition key_condition(filter, context, primary_key_column_names, primary_key.expression); + KeyCondition key_condition(filter.get(), context, primary_key_column_names, primary_key.expression); LOG_DEBUG(log, "Key condition: {}", key_condition.toString()); if (!key_condition.alwaysFalse()) @@ -437,7 +437,7 @@ void createReadFromPartStep( auto reading = std::make_unique(type, storage, storage_snapshot, std::move(data_part), std::move(columns_to_read), apply_deleted_mask, - filter, std::move(context), log); + std::move(filter), std::move(context), log); plan.addStep(std::move(reading)); } diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 43e3b0c505a..25596b42951 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -50,7 +50,7 @@ void fillRequiredColumns(const ActionsDAG::Node * node, std::unordered_map; const ActionsDAG::Node & addClonedDAGToDAG( size_t step, const ActionsDAG::Node * original_dag_node, - ActionsDAGPtr new_dag, + const ActionsDAGPtr & new_dag, OriginalToNewNodeMap & node_remap, NodeNameToLastUsedStepMap & node_to_step_map) { @@ -72,7 +72,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { /// If the node is already in the new DAG, return it const auto & node_ref = node_remap.at(node_name); - if (node_ref.dag == new_dag) + if (node_ref.dag == new_dag.get()) return *node_ref.node; /// If the node is known from the previous steps, add it as an input, except for constants @@ -80,7 +80,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { node_ref.dag->addOrReplaceInOutputs(*node_ref.node); const auto & new_node = new_dag->addInput(node_ref.node->result_name, node_ref.node->result_type); - node_remap[node_name] = {new_dag, &new_node}; /// TODO: here we update the node reference. Is it always correct? + node_remap[node_name] = {new_dag.get(), &new_node}; /// TODO: here we update the node reference. Is it always correct? /// Remember the index of the last step which reuses this node. /// We cannot remove this node from the outputs before that step. @@ -93,7 +93,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( if (original_dag_node->type == ActionsDAG::ActionType::INPUT) { const auto & new_node = new_dag->addInput(original_dag_node->result_name, original_dag_node->result_type); - node_remap[node_name] = {new_dag, &new_node}; + node_remap[node_name] = {new_dag.get(), &new_node}; return new_node; } @@ -102,7 +102,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { const auto & new_node = new_dag->addColumn( ColumnWithTypeAndName(original_dag_node->column, original_dag_node->result_type, original_dag_node->result_name)); - node_remap[node_name] = {new_dag, &new_node}; + node_remap[node_name] = {new_dag.get(), &new_node}; return new_node; } @@ -110,7 +110,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { const auto & alias_child = addClonedDAGToDAG(step, original_dag_node->children[0], new_dag, node_remap, node_to_step_map); const auto & new_node = new_dag->addAlias(alias_child, original_dag_node->result_name); - node_remap[node_name] = {new_dag, &new_node}; + node_remap[node_name] = {new_dag.get(), &new_node}; return new_node; } @@ -125,7 +125,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( } const auto & new_node = new_dag->addFunction(original_dag_node->function_base, new_children, original_dag_node->result_name); - node_remap[node_name] = {new_dag, &new_node}; + node_remap[node_name] = {new_dag.get(), &new_node}; return new_node; } @@ -133,13 +133,13 @@ const ActionsDAG::Node & addClonedDAGToDAG( } const ActionsDAG::Node & addFunction( - ActionsDAGPtr new_dag, + const ActionsDAGPtr & new_dag, const FunctionOverloadResolverPtr & function, ActionsDAG::NodeRawConstPtrs children, OriginalToNewNodeMap & node_remap) { const auto & new_node = new_dag->addFunction(function, children, ""); - node_remap[new_node.result_name] = {new_dag, &new_node}; + node_remap[new_node.result_name] = {new_dag.get(), &new_node}; return new_node; } @@ -147,7 +147,7 @@ const ActionsDAG::Node & addFunction( /// This is different from ActionsDAG::addCast() because it set the name equal to the original name effectively hiding the value before cast, /// but it might be required for further steps with its original uncasted type. const ActionsDAG::Node & addCast( - ActionsDAGPtr dag, + const ActionsDAGPtr & dag, const ActionsDAG::Node & node_to_cast, const String & type_name, OriginalToNewNodeMap & node_remap) @@ -173,7 +173,7 @@ const ActionsDAG::Node & addCast( /// 1. produces a result with the proper Nullable or non-Nullable UInt8 type and /// 2. makes sure that the result contains only 0 or 1 values even if the source column contains non-boolean values. const ActionsDAG::Node & addAndTrue( - ActionsDAGPtr dag, + const ActionsDAGPtr & dag, const ActionsDAG::Node & filter_node_to_normalize, OriginalToNewNodeMap & node_remap) { @@ -258,7 +258,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction for (size_t step_index = 0; step_index < condition_groups.size(); ++step_index) { const auto & condition_group = condition_groups[step_index]; - ActionsDAGPtr step_dag = std::make_shared(); + ActionsDAGPtr step_dag = std::make_unique(); String result_name; std::vector new_condition_nodes; @@ -299,7 +299,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction } } - steps.push_back({step_dag, result_name}); + steps.push_back({std::move(step_dag), result_name}); } /// 6. Find all outputs of the original DAG @@ -345,11 +345,11 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction { for (size_t step_index = 0; step_index < steps.size(); ++step_index) { - const auto & step = steps[step_index]; + auto & step = steps[step_index]; PrewhereExprStep new_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(step.actions, actions_settings), + .actions = std::make_shared(std::move(step.actions), actions_settings), .filter_column_name = step.column_name, /// Don't remove if it's in the list of original outputs .remove_filter_column = diff --git a/src/Storages/MergeTree/PartitionPruner.cpp b/src/Storages/MergeTree/PartitionPruner.cpp index 9de7b238f57..6df7b5aa054 100644 --- a/src/Storages/MergeTree/PartitionPruner.cpp +++ b/src/Storages/MergeTree/PartitionPruner.cpp @@ -4,7 +4,7 @@ namespace DB { -PartitionPruner::PartitionPruner(const StorageMetadataPtr & metadata, ActionsDAGPtr filter_actions_dag, ContextPtr context, bool strict) +PartitionPruner::PartitionPruner(const StorageMetadataPtr & metadata, const ActionsDAG * filter_actions_dag, ContextPtr context, bool strict) : partition_key(MergeTreePartition::adjustPartitionKey(metadata, context)) , partition_condition(filter_actions_dag, context, partition_key.column_names, partition_key.expression, true /* single_point */) , useless((strict && partition_condition.isRelaxed()) || partition_condition.alwaysUnknownOrTrue()) diff --git a/src/Storages/MergeTree/PartitionPruner.h b/src/Storages/MergeTree/PartitionPruner.h index ca24559ca01..d89dfb7b245 100644 --- a/src/Storages/MergeTree/PartitionPruner.h +++ b/src/Storages/MergeTree/PartitionPruner.h @@ -13,7 +13,7 @@ namespace DB class PartitionPruner { public: - PartitionPruner(const StorageMetadataPtr & metadata, ActionsDAGPtr filter_actions_dag, ContextPtr context, bool strict = false); + PartitionPruner(const StorageMetadataPtr & metadata, const ActionsDAG * filter_actions_dag, ContextPtr context, bool strict = false); bool canBePruned(const IMergeTreeDataPart & part) const; diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 2fc6993369d..f640fb9ba0a 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -79,7 +79,7 @@ StorageObjectStorageSource::~StorageObjectStorageSource() void StorageObjectStorageSource::setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) { - setKeyConditionImpl(filter_actions_dag, context_, read_from_format_info.format_header); + setKeyConditionImpl(filter_actions_dag.get(), context_, read_from_format_info.format_header); } std::string StorageObjectStorageSource::getUniqueStoragePathIdentifier( diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 52b6674c93d..654b8b788fe 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -19,7 +19,7 @@ class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; class ActionsDAG; -using ActionsDAGPtr = std::shared_ptr; +using ActionsDAGPtr = std::unique_ptr; struct PrewhereInfo; using PrewhereInfoPtr = std::shared_ptr; @@ -192,7 +192,7 @@ struct SelectQueryInfo ASTPtr parallel_replica_custom_key_ast; /// Filter actions dag for current storage - ActionsDAGPtr filter_actions_dag; + std::shared_ptr filter_actions_dag; ReadInOrderOptimizerPtr order_optimizer; /// Can be modified while reading from storage diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index a3f6b6afc5d..9bddf4f0230 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -432,7 +432,7 @@ void StorageBuffer::read( { return std::make_shared( header, - std::make_shared(query_info.prewhere_info->row_level_filter, actions_settings), + std::make_shared(query_info.prewhere_info->row_level_filter->clone(), actions_settings), query_info.prewhere_info->row_level_column_name, false); }); @@ -442,7 +442,7 @@ void StorageBuffer::read( { return std::make_shared( header, - std::make_shared(query_info.prewhere_info->prewhere_actions, actions_settings), + std::make_shared(query_info.prewhere_info->prewhere_actions->clone(), actions_settings), query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column); }); diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 5048ef4788e..be421e8e2bc 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -1120,7 +1120,7 @@ static ActionsDAGPtr getFilterFromQuery(const ASTPtr & ast, ContextPtr context) if (!source) return nullptr; - return source->getFilterActionsDAG(); + return source->detachFilterActionsDAG(); } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index aaf84f6f82c..702c257bfb6 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -1235,7 +1235,7 @@ StorageFileSource::~StorageFileSource() void StorageFileSource::setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) { - setKeyConditionImpl(filter_actions_dag, context_, block_for_format); + setKeyConditionImpl(filter_actions_dag.get(), context_, block_for_format); } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index e2f92f08d7a..c42e3058347 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1235,7 +1235,7 @@ ReadFromMerge::RowPolicyData::RowPolicyData(RowPolicyFilterPtr row_policy_filter auto expression_analyzer = ExpressionAnalyzer{expr, syntax_result, local_context}; actions_dag = expression_analyzer.getActionsDAG(false /* add_aliases */, false /* project_result */); - filter_actions = std::make_shared(actions_dag, + filter_actions = std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); const auto & required_columns = filter_actions->getRequiredColumnsWithTypes(); const auto & sample_block_columns = filter_actions->getSampleBlock().getNamesAndTypesList(); @@ -1273,12 +1273,12 @@ void ReadFromMerge::RowPolicyData::extendNames(Names & names) const void ReadFromMerge::RowPolicyData::addStorageFilter(SourceStepWithFilter * step) const { - step->addFilter(actions_dag, filter_column_name); + step->addFilter(actions_dag->clone(), filter_column_name); } void ReadFromMerge::RowPolicyData::addFilterTransform(QueryPlan & plan) const { - auto filter_step = std::make_unique(plan.getCurrentDataStream(), actions_dag, filter_column_name, true /* remove filter column */); + auto filter_step = std::make_unique(plan.getCurrentDataStream(), actions_dag->clone(), filter_column_name, true /* remove filter column */); plan.addStep(std::move(filter_step)); } @@ -1471,7 +1471,7 @@ void ReadFromMerge::convertAndFilterSourceStream( { pipe_columns.emplace_back(NameAndTypePair(alias.name, alias.type)); - auto actions_dag = std::make_shared(pipe_columns); + auto actions_dag = std::make_unique(pipe_columns); QueryTreeNodePtr query_tree = buildQueryTree(alias.expression, local_context); query_tree->setAlias(alias.name); @@ -1486,7 +1486,7 @@ void ReadFromMerge::convertAndFilterSourceStream( throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected to have 1 output but got {}", nodes.size()); actions_dag->addOrReplaceInOutputs(actions_dag->addAlias(*nodes.front(), alias.name)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), actions_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(actions_dag)); child.plan.addStep(std::move(expression_step)); } } diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index f550ccb2bc4..c336f597f41 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -187,7 +187,7 @@ public: void setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) override { - setKeyConditionImpl(filter_actions_dag, context_, block_for_format); + setKeyConditionImpl(filter_actions_dag.get(), context_, block_for_format); } Chunk generate() override; diff --git a/src/Storages/StorageValues.cpp b/src/Storages/StorageValues.cpp index 894b1404a21..4d73f8e5c87 100644 --- a/src/Storages/StorageValues.cpp +++ b/src/Storages/StorageValues.cpp @@ -48,14 +48,14 @@ Pipe StorageValues::read( if (!prepared_pipe.empty()) { - auto dag = std::make_shared(prepared_pipe.getHeader().getColumnsWithTypeAndName()); + auto dag = std::make_unique(prepared_pipe.getHeader().getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs outputs; outputs.reserve(column_names.size()); for (const auto & name : column_names) outputs.push_back(dag->getOutputs()[prepared_pipe.getHeader().getPositionByName(name)]); dag->getOutputs().swap(outputs); - auto expression = std::make_shared(dag); + auto expression = std::make_shared(std::move(dag)); prepared_pipe.addSimpleTransform([&](const Block & header) { diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 016de94c17c..2c0d5c5ca85 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -177,7 +177,7 @@ void StorageView::read( /// It's expected that the columns read from storage are not constant. /// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery. - auto materializing_actions = std::make_shared(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + auto materializing_actions = std::make_unique(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); materializing_actions->addMaterializingOutputActions(); auto materializing = std::make_unique(query_plan.getCurrentDataStream(), std::move(materializing_actions)); diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index f831465277d..56f65b57367 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -183,7 +183,7 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType dag->getOutputs() = {col}; dag->removeUnusedActions(); - result.expression = std::make_shared(dag, ExpressionActionsSettings::fromContext(context_copy)); + result.expression = std::make_shared(std::move(dag), ExpressionActionsSettings::fromContext(context_copy)); result.sets = analyzer.getPreparedSets(); return result; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 778c9e13adb..6f7d1d4c39f 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -77,10 +77,10 @@ void buildSetsForDAG(const ActionsDAGPtr & dag, const ContextPtr & context) } } -void filterBlockWithDAG(ActionsDAGPtr dag, Block & block, ContextPtr context) +void filterBlockWithDAG(const ActionsDAGPtr & dag, Block & block, ContextPtr context) { buildSetsForDAG(dag, context); - auto actions = std::make_shared(dag); + auto actions = std::make_shared(dag->clone()); Block block_with_filter = block; actions->execute(block_with_filter, /*dry_run=*/ false, /*allow_duplicates_in_input=*/ true); diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index fbfbdd6c6cc..0cf8470bc60 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -23,7 +23,7 @@ namespace VirtualColumnUtils void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context); /// Just filters block. Block should contain all the required columns. -void filterBlockWithDAG(ActionsDAGPtr dag, Block & block, ContextPtr context); +void filterBlockWithDAG(const ActionsDAGPtr & dag, Block & block, ContextPtr context); /// Builds sets used by ActionsDAG inplace. void buildSetsForDAG(const ActionsDAGPtr & dag, const ContextPtr & context); diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 8bca1c97aad..da4e751a88a 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -567,7 +567,7 @@ std::pair StorageWindowView::getNewBlocks(UInt32 watermark) builder.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, std::make_shared(filter_expression), filter_function->getColumnName(), true); + header, std::make_shared(std::move(filter_expression)), filter_function->getColumnName(), true); }); /// Adding window column @@ -592,7 +592,7 @@ std::pair StorageWindowView::getNewBlocks(UInt32 watermark) new_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); auto actions = std::make_shared( - convert_actions_dag, ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); + std::move(convert_actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); builder.addSimpleTransform([&](const Block & stream_header) { return std::make_shared(stream_header, actions); @@ -700,7 +700,7 @@ inline void StorageWindowView::fire(UInt32 watermark) getTargetTable()->getInMemoryMetadataPtr()->getColumns(), getContext(), getContext()->getSettingsRef().insert_null_as_default); - auto adding_missing_defaults_actions = std::make_shared(adding_missing_defaults_dag); + auto adding_missing_defaults_actions = std::make_shared(std::move(adding_missing_defaults_dag)); pipe.addSimpleTransform([&](const Block & stream_header) { return std::make_shared(stream_header, adding_missing_defaults_actions); @@ -711,7 +711,7 @@ inline void StorageWindowView::fire(UInt32 watermark) block_io.pipeline.getHeader().getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Position); auto actions = std::make_shared( - convert_actions_dag, + std::move(convert_actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes)); pipe.addSimpleTransform([&](const Block & stream_header) { @@ -1475,7 +1475,7 @@ void StorageWindowView::writeIntoWindowView( pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, std::make_shared(filter_expression), + header, std::make_shared(std::move(filter_expression)), filter_function->getColumnName(), true); }); } @@ -1583,7 +1583,7 @@ void StorageWindowView::writeIntoWindowView( output->getHeader().getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); auto convert_actions = std::make_shared( - convert_actions_dag, ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); + std::move(convert_actions_dag), ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); builder.addSimpleTransform([&](const Block & header) { return std::make_shared(header, convert_actions); }); } From 968c1d94bdf20814f406cfd26af120cc78c50486 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 18 Jun 2024 20:07:43 +0000 Subject: [PATCH 0155/2361] Add description to settings and settings changes --- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.h | 1 + src/Storages/Kafka/KafkaSettings.h | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index a2759285174..df675a09a04 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -705,7 +705,7 @@ class IColumn; M(UInt64, max_size_to_preallocate_for_aggregation, 100'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before aggregation", 0) \ \ M(Bool, kafka_disable_num_consumers_limit, false, "Disable limit on kafka_num_consumers that depends on the number of available CPU cores", 0) \ - M(Bool, allow_experimental_kafka_store_offsets_in_keeper, false, "Allow experimental feature to store Kafka related offsets in Keeper", 0) \ + M(Bool, allow_experimental_kafka_store_offsets_in_keeper, false, "Allow experimental feature to store Kafka related offsets in ClickHouse Keeper. When enabled a ClickHouse Keeper path and replica name can be specified to the Kafka table engine. As a result instead of the regular Kafka engine, a new type of storage engine will be used that stores the committed offsets primarily in ClickHouse Keeper", 0) \ M(Bool, enable_software_prefetch_in_aggregation, true, "Enable use of software prefetch in aggregation", 0) \ M(Bool, allow_aggregate_partitions_independently, false, "Enable independent aggregation of partitions on separate threads when partition key suits group by key. Beneficial when number of partitions close to number of cores and partitions have roughly the same size", 0) \ M(Bool, force_aggregate_partitions_independently, false, "Force the use of optimization when it is applicable, but heuristics decided not to use it", 0) \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index 9352b22132f..6dcf8ddb95d 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -95,6 +95,7 @@ static std::map sett {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, + {"allow_experimental_kafka_store_offsets_in_keeper", false, false, "Allow the usage of experimental Kafka storage engine that stores the committed offsets in ClickHouse Keeper"}, }}, {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, diff --git a/src/Storages/Kafka/KafkaSettings.h b/src/Storages/Kafka/KafkaSettings.h index c9ee42b54aa..9ca5e189f0e 100644 --- a/src/Storages/Kafka/KafkaSettings.h +++ b/src/Storages/Kafka/KafkaSettings.h @@ -38,8 +38,8 @@ const auto KAFKA_CONSUMERS_POOL_TTL_MS_MAX = 600'000; M(StreamingHandleErrorMode, kafka_handle_error_mode, StreamingHandleErrorMode::DEFAULT, "How to handle errors for Kafka engine. Possible values: default (throw an exception after rabbitmq_skip_broken_messages broken messages), stream (save broken messages and errors in virtual columns _raw_message, _error).", 0) \ M(Bool, kafka_commit_on_select, false, "Commit messages when select query is made", 0) \ M(UInt64, kafka_max_rows_per_message, 1, "The maximum number of rows produced in one kafka message for row-based formats.", 0) \ - M(String, kafka_keeper_path, "", "TODO(antaljanosbenjamin)", 0) \ - M(String, kafka_replica_name, "", "TODO(antaljanosbenjamin)", 0) \ + M(String, kafka_keeper_path, "", "The path to the table in ClickHouse Keeper", 0) \ + M(String, kafka_replica_name, "", "The replica name in ClickHouse Keeper", 0) \ #define OBSOLETE_KAFKA_SETTINGS(M, ALIAS) \ MAKE_OBSOLETE(M, Char, kafka_row_delimiter, '\0') \ From 29546d1655b53676868361534f4002d6c339253e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 18 Jun 2024 20:17:32 +0000 Subject: [PATCH 0156/2361] Add minimal docs --- .../table-engines/integrations/kafka.md | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 7bdc856c9fd..f899fea97de 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -251,6 +251,43 @@ The number of rows in one Kafka message depends on whether the format is row-bas - For row-based formats the number of rows in one Kafka message can be controlled by setting `kafka_max_rows_per_message`. - For block-based formats we cannot divide block into smaller parts, but the number of rows in one block can be controlled by general setting [max_block_size](../../../operations/settings/settings.md#setting-max_block_size). +## Experimental engine to store committed offsets in ClickHouse Keeper + +If `allow_experimental_kafka_store_offsets_in_keeper` is enabled, then two more settings can be specified to the Kafka table engine: + - `kafka_keeper_path` specifies the path to the table in ClickHouse Keeper + - `kafka_replica_name` specifies the replica name in ClickHouse Keeper + +Either both of the settings must be specified or neither of them. When both of them is specified, then a new, experimental Kafka engine will be used. The new engine doesn't depend on storing the committed offsets in Kafka,but stores them in ClickHouse Keeper. It still tries to commit the offsets to Kafka, but it only depends on those offsets when the table is created. In any other circumstances (table is restarted, or recovered after some error) the offsets stored in ClickHouse Keeper will be used to consume messages from. Apart from the committed offset, it also stores how many messages were consumed in the last batch, so if the insert fails, the same amount of messages will be consumed, thus enabling deduplication if necessary. + +Example: + +``` sql +CREATE TABLE experimental_kafka (key UInt64, value UInt64) +ENGINE = Kafka('localhost:19092', 'my-topic', 'my-consumer', 'JSONEachRow') +SETTINGS + kafka_keeper_path = '/clickhouse/{database}/experimental_kafka', + kafka_replica_name = 'r1' +SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1; +``` + +Or to utilize the `uuid` and `replica` macros similarly to ReplicatedMergeTree: + +``` sql +CREATE TABLE experimental_kafka (key UInt64, value UInt64) +ENGINE = Kafka('localhost:19092', 'my-topic', 'my-consumer', 'JSONEachRow') +SETTINGS + kafka_keeper_path = '/clickhouse/{database}/{uuid}', + kafka_replica_name = '{replica}' +SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1; +``` + +### Known limitations + +As the new engine is experimental, it is not production ready yet. There are few known limitations of the implementation: + - The biggest limitation is the engine doesn't support direct reading from Kafka topic (insertion works, but reading doesn't), thus the direct `SELECT` queries will fail. + - Rapidly dropping and recreating the table or specifying the same ClickHouse Keeper path to different engines might cause issues. As best practice you can use the `{uuid}` to avoid clashing paths. + - To make repeatable reads possible messages cannot be consumed from multiple partitions on a single thread. On the other hand the Kafka consumers has to be polled regularly to keep them alive. As a result of these two we decided to only allow creating multiple consumer if `kafka_thread_per_consumer` is enabled, otherwise it is too complicated to avoid issues regarding polling consumers regularly. + **See Also** - [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) From e348186ba26072eb76e549cd2a0adcd801c92bc6 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 18 Jun 2024 21:30:37 +0000 Subject: [PATCH 0157/2361] clear hint in table engine and sources --- src/Access/Common/AccessRightsElement.cpp | 6 +- src/Access/Common/AccessRightsElement.h | 1 + src/Access/ContextAccess.cpp | 80 ++++++++++++++++++----- 3 files changed, 70 insertions(+), 17 deletions(-) diff --git a/src/Access/Common/AccessRightsElement.cpp b/src/Access/Common/AccessRightsElement.cpp index 24ff4e7631b..2ee13d6b94f 100644 --- a/src/Access/Common/AccessRightsElement.cpp +++ b/src/Access/Common/AccessRightsElement.cpp @@ -224,7 +224,11 @@ void AccessRightsElement::replaceEmptyDatabase(const String & current_database) String AccessRightsElement::toString() const { return toStringImpl(*this, true); } String AccessRightsElement::toStringWithoutOptions() const { return toStringImpl(*this, false); } - +String AccessRightsElement::toStringWithoutONClause() const +{ + String result{access_flags.toKeywords().front()}; + return result + " ON {db.table}"; +} bool AccessRightsElements::empty() const { return std::all_of(begin(), end(), [](const AccessRightsElement & e) { return e.empty(); }); } diff --git a/src/Access/Common/AccessRightsElement.h b/src/Access/Common/AccessRightsElement.h index ba625fc43df..49764fc727f 100644 --- a/src/Access/Common/AccessRightsElement.h +++ b/src/Access/Common/AccessRightsElement.h @@ -89,6 +89,7 @@ struct AccessRightsElement /// Returns a human-readable representation like "GRANT SELECT, UPDATE(x, y) ON db.table". String toString() const; String toStringWithoutOptions() const; + String toStringWithoutONClause() const; }; diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 2a658d7aaa2..3ce30a0b681 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -37,6 +37,24 @@ namespace ErrorCodes namespace { + static const std::vector> source_and_table_engines = { + {AccessType::FILE, "File"}, + {AccessType::URL, "URL"}, + {AccessType::REMOTE, "Distributed"}, + {AccessType::MONGO, "MongoDB"}, + {AccessType::REDIS, "Redis"}, + {AccessType::MYSQL, "MySQL"}, + {AccessType::POSTGRES, "PostgreSQL"}, + {AccessType::SQLITE, "SQLite"}, + {AccessType::ODBC, "ODBC"}, + {AccessType::JDBC, "JDBC"}, + {AccessType::HDFS, "HDFS"}, + {AccessType::S3, "S3"}, + {AccessType::HIVE, "Hive"}, + {AccessType::AZURE, "AzureBlobStorage"} + }; + + AccessRights mixAccessRightsFromUserAndRoles(const User & user, const EnabledRolesInfo & roles_info) { AccessRights res = user.access; @@ -205,22 +223,6 @@ namespace } /// There is overlap between AccessType sources and table engines, so the following code avoids user granting twice. - static const std::vector> source_and_table_engines = { - {AccessType::FILE, "File"}, - {AccessType::URL, "URL"}, - {AccessType::REMOTE, "Distributed"}, - {AccessType::MONGO, "MongoDB"}, - {AccessType::REDIS, "Redis"}, - {AccessType::MYSQL, "MySQL"}, - {AccessType::POSTGRES, "PostgreSQL"}, - {AccessType::SQLITE, "SQLite"}, - {AccessType::ODBC, "ODBC"}, - {AccessType::JDBC, "JDBC"}, - {AccessType::HDFS, "HDFS"}, - {AccessType::S3, "S3"}, - {AccessType::HIVE, "Hive"}, - {AccessType::AZURE, "AzureBlobStorage"} - }; /// Sync SOURCE and TABLE_ENGINE, so only need to check TABLE_ENGINE later. if (access_control.doesTableEnginesRequireGrant()) @@ -555,6 +557,18 @@ std::shared_ptr ContextAccess::getAccessRightsWithImplicit() return nothing_granted; } +/// Just Dummy to pass compile. +template +static std::string_view getTableEngineName(const Args &... args[[maybe_unused]]) +{ + return ""; +} + +template +static std::string_view getTableEngineName(std::string_view name, const Args &... args[[maybe_unused]]) +{ + return name; +} template bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... args) const @@ -611,6 +625,40 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg if (!granted) { + /// As we check the SOURCES from the Table Engine logic, direct prompt about Table Engine would be misleading since + /// SOURCES is not granted actually. In order to solve this, turn the prompt logic back to Sources. + if (flags & AccessType::TABLE_ENGINE && !access_control->doesTableEnginesRequireGrant()) + { + AccessFlags newFlags; + + String table_engine_name{getTableEngineName(args...)}; + for (const auto & source_and_table_engine : source_and_table_engines) + { + const auto & table_engine = std::get<1>(source_and_table_engine); + if (table_engine != table_engine_name) continue; + const auto & source = std::get<0>(source_and_table_engine); + /// Set the flags from Table Engine to SOURCES so that prompts can be meaningful. + newFlags = source; + break; + } + + if (newFlags == AccessType::NONE) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Didn't find the target Source from the Table Engine"); + + if (grant_option && acs->isGranted(flags, args...)) + { + return access_denied(ErrorCodes::ACCESS_DENIED, + "{}: Not enough privileges. " + "The required privileges have been granted, but without grant option. " + "To execute this query, it's necessary to have the grant {} WITH GRANT OPTION", + AccessRightsElement{newFlags}.toStringWithoutONClause()); + } + + return access_denied(ErrorCodes::ACCESS_DENIED, + "{}: Not enough privileges. To execute this query, it's necessary to have the grant {}", + AccessRightsElement{newFlags}.toStringWithoutONClause() + (grant_option ? " WITH GRANT OPTION" : "")); + } + if (grant_option && acs->isGranted(flags, args...)) { return access_denied(ErrorCodes::ACCESS_DENIED, From f7e81e1ae2752020c076990395349ccd2d69cf2b Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 18 Jun 2024 21:59:07 +0000 Subject: [PATCH 0158/2361] fix --- src/Access/ContextAccess.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 3ce30a0b681..de0e7e3d777 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -625,8 +625,8 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg if (!granted) { - /// As we check the SOURCES from the Table Engine logic, direct prompt about Table Engine would be misleading since - /// SOURCES is not granted actually. In order to solve this, turn the prompt logic back to Sources. + /// As we check the SOURCES from the Table Engine logic, direct prompt about Table Engine would be misleading + /// since SOURCES is not granted actually. In order to solve this, turn the prompt logic back to Sources. if (flags & AccessType::TABLE_ENGINE && !access_control->doesTableEnginesRequireGrant()) { AccessFlags newFlags; From a2ee0668f12c8cd1b88b8c4ad46c15271a5a1fd2 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 19 Jun 2024 02:20:22 +0000 Subject: [PATCH 0159/2361] fix --- src/Access/ContextAccess.cpp | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index de0e7e3d777..4620561053b 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -37,7 +37,7 @@ namespace ErrorCodes namespace { - static const std::vector> source_and_table_engines = { + const std::vector> source_and_table_engines = { {AccessType::FILE, "File"}, {AccessType::URL, "URL"}, {AccessType::REMOTE, "Distributed"}, @@ -268,6 +268,11 @@ namespace template std::string_view getDatabase(std::string_view arg1, const OtherArgs &...) { return arg1; } + + std::string_view getTableEngine() { return {}; } + + template + std::string_view getTableEngine(std::string_view arg1, const OtherArgs &...) { return arg1; } } @@ -557,18 +562,6 @@ std::shared_ptr ContextAccess::getAccessRightsWithImplicit() return nothing_granted; } -/// Just Dummy to pass compile. -template -static std::string_view getTableEngineName(const Args &... args[[maybe_unused]]) -{ - return ""; -} - -template -static std::string_view getTableEngineName(std::string_view name, const Args &... args[[maybe_unused]]) -{ - return name; -} template bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... args) const @@ -631,7 +624,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg { AccessFlags newFlags; - String table_engine_name{getTableEngineName(args...)}; + String table_engine_name{getTableEngine(args...)}; for (const auto & source_and_table_engine : source_and_table_engines) { const auto & table_engine = std::get<1>(source_and_table_engine); @@ -642,7 +635,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg break; } - if (newFlags == AccessType::NONE) + if (newFlags.isEmpty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Didn't find the target Source from the Table Engine"); if (grant_option && acs->isGranted(flags, args...)) From 8ca47905ef000d3eb72a89d9dfcd9b989fce4203 Mon Sep 17 00:00:00 2001 From: skyoct Date: Wed, 19 Jun 2024 14:06:36 +0800 Subject: [PATCH 0160/2361] fix get tag --- src/IO/S3/getObjectInfo.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/IO/S3/getObjectInfo.cpp b/src/IO/S3/getObjectInfo.cpp index afa4079c261..a21fb9fce54 100644 --- a/src/IO/S3/getObjectInfo.cpp +++ b/src/IO/S3/getObjectInfo.cpp @@ -54,8 +54,7 @@ namespace ObjectInfo object_info; object_info.size = static_cast(result.GetContentLength()); object_info.last_modification_time = result.GetLastModified().Seconds(); - String etag(result.GetETag.c_str(), result.GetETag().size()); - object_info.etag = etag; + object_info.etag = result.GetETag(); if (with_metadata) object_info.metadata = result.GetMetadata(); From 11456d5815e1ea1398924ecb6648cb504be9a5dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 19 Jun 2024 09:43:14 +0000 Subject: [PATCH 0161/2361] Fix build with new fmt --- src/Storages/Kafka/KafkaConsumer.cpp | 1 + src/Storages/Kafka/KafkaConsumer.h | 5 ----- src/Storages/Kafka/KafkaConsumer2.cpp | 24 ++++++++++++------------ src/Storages/Kafka/StorageKafka2.cpp | 8 ++++---- src/Storages/Kafka/StorageKafka2.h | 4 ++-- src/Storages/Kafka/StorageKafkaCommon.h | 12 ++++++++++++ 6 files changed, 31 insertions(+), 23 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer.cpp b/src/Storages/Kafka/KafkaConsumer.cpp index 9ba42b9875e..f4385163323 100644 --- a/src/Storages/Kafka/KafkaConsumer.cpp +++ b/src/Storages/Kafka/KafkaConsumer.cpp @@ -9,6 +9,7 @@ #include #include +#include #include #include diff --git a/src/Storages/Kafka/KafkaConsumer.h b/src/Storages/Kafka/KafkaConsumer.h index 4daf8652c3b..a3bc97779b3 100644 --- a/src/Storages/Kafka/KafkaConsumer.h +++ b/src/Storages/Kafka/KafkaConsumer.h @@ -1,14 +1,12 @@ #pragma once #include -#include #include #include #include #include -#include #include namespace CurrentMetrics @@ -199,6 +197,3 @@ private: }; } - -template <> struct fmt::formatter : fmt::ostream_formatter {}; -template <> struct fmt::formatter : fmt::ostream_formatter {}; diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 84b6f5153ed..d59a06bc672 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -1,22 +1,22 @@ -// Needs to go first because its partial specialization of fmt::formatter -// should be defined before any instantiation -#include -#include -#include -#include - -#include #include +#include +#include +#include +#include +#include +#include +#include + +#include +#include #include +#include +#include #include #include -#include -#include -#include -#include namespace CurrentMetrics { diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 080201cae9e..ce3630e39af 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -954,10 +954,10 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi for (; tp_it != topic_partitions.end(); ++tp_it, ++path_it) { using zkutil::EphemeralNodeHolder; - LockedTopicPartitionInfo lock_info{.lock = EphemeralNodeHolder::existing(*path_it / lock_file_name, keeper_to_use)}; - - lock_info.committed_offset = getNumber(keeper_to_use, *path_it / commit_file_name); - lock_info.intent_size = getNumber(keeper_to_use, *path_it / intent_file_name); + LockedTopicPartitionInfo lock_info{ + EphemeralNodeHolder::existing(*path_it / lock_file_name, keeper_to_use), + getNumber(keeper_to_use, *path_it / commit_file_name), + getNumber(keeper_to_use, *path_it / intent_file_name)}; LOG_TRACE( log, diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 99c97caf9da..0d6734ac0eb 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -99,9 +99,9 @@ private: { KafkaConsumer2Ptr consumer; /// available consumers size_t consume_from_topic_partition_index{0}; - TopicPartitions topic_partitions; + TopicPartitions topic_partitions{}; zkutil::ZooKeeperPtr keeper; - TopicPartitionLocks locks; + TopicPartitionLocks locks{}; Stopwatch watch{CLOCK_MONOTONIC_COARSE}; }; diff --git a/src/Storages/Kafka/StorageKafkaCommon.h b/src/Storages/Kafka/StorageKafkaCommon.h index 59a7983136d..bed09e9a9cd 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.h +++ b/src/Storages/Kafka/StorageKafkaCommon.h @@ -3,6 +3,8 @@ #include #include #include +#include +#include #include namespace Poco @@ -59,3 +61,13 @@ struct KafkaConfigLoader const Names & topics); }; } + + +template <> +struct fmt::formatter : fmt::ostream_formatter +{ +}; +template <> +struct fmt::formatter : fmt::ostream_formatter +{ +}; From aee61f7ea2ed2d5a71b6bbe604b4865c4f0cd5b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 19 Jun 2024 11:27:25 +0000 Subject: [PATCH 0162/2361] Fix asserts --- src/Storages/Kafka/StorageKafka2.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index ce3630e39af..754c2dfa926 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -302,7 +302,7 @@ bool StorageKafka2::activate() if (!activate_in_keeper()) { - assert(storage.is_readonly); + assert(!is_active); return false; } @@ -1203,7 +1203,7 @@ StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( void StorageKafka2::threadFunc(size_t idx) { - assert(idx < tasks.size()); + chassert(idx < tasks.size()); auto task = tasks[idx]; std::optional maybe_stall_reason; try From 7523d8b1aacf8b4a9b2fa6d7bc5e54f3ee61ffec Mon Sep 17 00:00:00 2001 From: skyoct Date: Wed, 19 Jun 2024 21:24:26 +0800 Subject: [PATCH 0163/2361] Feat add docs --- docs/en/engines/table-engines/integrations/s3.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 93f4a187656..d664c37bd0f 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -146,6 +146,7 @@ Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading fr - `_file` — Name of the file. Type: `LowCardinalty(String)`. - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +- `_etag` — ETag of the file. Type: `LowCardinalty(String)`. If the etag is unknown, the value is `NULL`. For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns). From b125f8166f32648572c2ed0d540ded56a97ac628 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 19 Jun 2024 16:41:50 +0100 Subject: [PATCH 0164/2361] impl --- tests/performance/array_reduce.xml | 15 ++++++--------- tests/performance/scripts/perf.py | 14 ++++++++++++-- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/tests/performance/array_reduce.xml b/tests/performance/array_reduce.xml index 8e271c155f3..0f50eba43bf 100644 --- a/tests/performance/array_reduce.xml +++ b/tests/performance/array_reduce.xml @@ -1,11 +1,8 @@ - - - - SELECT arrayReduce('count', range(100000000)) - SELECT arrayReduce('sum', range(100000000)) - SELECT arrayReduceInRanges('count', [(1, 100000000)], range(100000000)) - SELECT arrayReduceInRanges('sum', [(1, 100000000)], range(100000000)) - SELECT arrayReduceInRanges('count', arrayZip(range(1000000), range(1000000)), range(100000000))[123456] - SELECT arrayReduceInRanges('sum', arrayZip(range(1000000), range(1000000)), range(100000000))[123456] + SELECT arrayReduce('count', range(1000000)) FROM numbers_mt(500000000) format Null + SELECT arrayReduce('sum', range(1000000)) FROM numbers_mt(500000000) format Null + SELECT arrayReduceInRanges('count', [(1, 1000000)], range(1000000)) FROM numbers_mt(500000000) format Null + SELECT arrayReduceInRanges('sum', [(1, 1000000)], range(1000000)) FROM numbers_mt(500000000) format Null + SELECT arrayReduceInRanges('count', arrayZip(range(1000000), range(1000000)), range(1000000))[123456] + SELECT arrayReduceInRanges('sum', arrayZip(range(1000000), range(1000000)), range(1000000))[123456] diff --git a/tests/performance/scripts/perf.py b/tests/performance/scripts/perf.py index 94f145d82db..f89784a0e0b 100755 --- a/tests/performance/scripts/perf.py +++ b/tests/performance/scripts/perf.py @@ -345,6 +345,18 @@ for query_index in queries_to_run: print(f"display-name\t{query_index}\t{tsv_escape(query_display_name)}") + for conn_index, c in enumerate(all_connections): + try: + c.execute("SYSTEM JEMALLOC PURGE") + + print( + f"purging jemalloc arenas\t{conn_index}\t{c.last_query.elapsed}" + ) + except KeyboardInterrupt: + raise + except: + continue + # Prewarm: run once on both servers. Helps to bring the data into memory, # precompile the queries, etc. # A query might not run on the old server if it uses a function added in the @@ -427,8 +439,6 @@ for query_index in queries_to_run: for conn_index, c in enumerate(this_query_connections): try: - c.execute("SYSTEM JEMALLOC PURGE") - res = c.execute( q, query_id=run_id, From 16c3e36b5a2f3203eb87161f4320ea5e70865fc4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 19 Jun 2024 22:50:52 +0200 Subject: [PATCH 0165/2361] Simplification --- src/Client/ClientBase.cpp | 142 +++++++++++++++++------------------ src/Client/ClientBase.h | 1 + src/Interpreters/Session.cpp | 8 +- src/Interpreters/Session.h | 3 +- 4 files changed, 74 insertions(+), 80 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 617a56cfd95..490a560de2d 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -439,7 +439,7 @@ void ClientBase::sendExternalTables(ASTPtr parsed_query) std::vector data; for (auto & table : external_tables) - data.emplace_back(table.getData(global_context)); + data.emplace_back(table.getData(query_context)); connection->sendExternalTablesData(data); } @@ -652,10 +652,10 @@ try /// intermixed with data with parallel formatting. /// It may increase code complexity significantly. if (!extras_into_stdout || select_only_into_file) - output_format = global_context->getOutputFormatParallelIfPossible( + output_format = query_context->getOutputFormatParallelIfPossible( current_format, out_file_buf ? *out_file_buf : *out_buf, block); else - output_format = global_context->getOutputFormat( + output_format = query_context->getOutputFormat( current_format, out_file_buf ? *out_file_buf : *out_buf, block); output_format->setAutoFlush(); @@ -949,7 +949,7 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) /// But for asynchronous inserts we don't extract data, because it's needed /// to be done on server side in that case (for coalescing the data from multiple inserts on server side). const auto * insert = parsed_query->as(); - if (insert && isSyncInsertWithData(*insert, global_context)) + if (insert && isSyncInsertWithData(*insert, query_context)) query_to_execute = full_query.substr(0, insert->data - full_query.data()); else query_to_execute = full_query; @@ -1067,7 +1067,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa } } - const auto & settings = global_context->getSettingsRef(); + const auto & settings = query_context->getSettingsRef(); const Int32 signals_before_stop = settings.partial_result_on_first_cancel ? 2 : 1; int retries_left = 10; @@ -1082,10 +1082,10 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa connection_parameters.timeouts, query, query_parameters, - global_context->getCurrentQueryId(), + query_context->getCurrentQueryId(), query_processing_stage, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), + &query_context->getSettingsRef(), + &query_context->getClientInfo(), true, [&](const Progress & progress) { onProgress(progress); }); @@ -1275,7 +1275,7 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { - global_context->setSetting("session_timezone", tz); + query_context->setSetting("session_timezone", tz); } @@ -1471,13 +1471,13 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de void ClientBase::setInsertionTable(const ASTInsertQuery & insert_query) { - if (!global_context->hasInsertionTable() && insert_query.table) + if (!query_context->hasInsertionTable() && insert_query.table) { String table = insert_query.table->as().shortName(); if (!table.empty()) { String database = insert_query.database ? insert_query.database->as().shortName() : ""; - global_context->setInsertionTable(StorageID(database, table)); + query_context->setInsertionTable(StorageID(database, table)); } } } @@ -1528,7 +1528,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars const auto & parsed_insert_query = parsed_query->as(); if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && !isStdinNotEmptyAndValid(std_in)))) { - const auto & settings = global_context->getSettingsRef(); + const auto & settings = query_context->getSettingsRef(); if (settings.throw_if_no_data_to_insert) throw Exception(ErrorCodes::NO_DATA_TO_INSERT, "No data to insert"); else @@ -1542,10 +1542,10 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars connection_parameters.timeouts, query, query_parameters, - global_context->getCurrentQueryId(), + query_context->getCurrentQueryId(), query_processing_stage, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), + &query_context->getSettingsRef(), + &query_context->getClientInfo(), true, [&](const Progress & progress) { onProgress(progress); }); @@ -1593,7 +1593,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des /// Set callback to be called on file progress. if (tty_buf) - progress_indication.setFileProgressCallback(global_context, *tty_buf); + progress_indication.setFileProgressCallback(query_context, *tty_buf); } /// If data fetched from file (maybe compressed file) @@ -1627,10 +1627,10 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des } StorageFile::CommonArguments args{ - WithContext(global_context), + WithContext(query_context), parsed_insert_query->table_id, current_format, - getFormatSettings(global_context), + getFormatSettings(query_context), compression_method, columns_for_storage_file, ConstraintsDescription{}, @@ -1638,7 +1638,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des {}, String{}, }; - StoragePtr storage = std::make_shared(in_file, global_context->getUserFilesPath(), args); + StoragePtr storage = std::make_shared(in_file, query_context->getUserFilesPath(), args); storage->startup(); SelectQueryInfo query_info; @@ -1647,18 +1647,18 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des auto metadata = storage->getInMemoryMetadataPtr(); QueryPlan plan; storage->read( - plan, - sample.getNames(), - storage->getStorageSnapshot(metadata, global_context), - query_info, - global_context, - {}, - global_context->getSettingsRef().max_block_size, - getNumberOfPhysicalCPUCores()); + plan, + sample.getNames(), + storage->getStorageSnapshot(metadata, query_context), + query_info, + query_context, + {}, + query_context->getSettingsRef().max_block_size, + getNumberOfPhysicalCPUCores()); auto builder = plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(global_context), - BuildQueryPipelineSettings::fromContext(global_context)); + QueryPlanOptimizationSettings::fromContext(query_context), + BuildQueryPipelineSettings::fromContext(query_context)); QueryPlanResourceHolder resources; auto pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); @@ -1719,14 +1719,14 @@ void ClientBase::sendDataFrom(ReadBuffer & buf, Block & sample, const ColumnsDes current_format = insert->format; } - auto source = global_context->getInputFormat(current_format, buf, sample, insert_format_max_block_size); + auto source = query_context->getInputFormat(current_format, buf, sample, insert_format_max_block_size); Pipe pipe(source); if (columns_description.hasDefaults()) { pipe.addSimpleTransform([&](const Block & header) { - return std::make_shared(header, columns_description, *source, global_context); + return std::make_shared(header, columns_description, *source, query_context); }); } @@ -1872,6 +1872,9 @@ void ClientBase::cancelQuery() void ClientBase::processParsedSingleQuery(const String & full_query, const String & query_to_execute, ASTPtr parsed_query, std::optional echo_query_, bool report_error) { + query_context = Context::createCopy(global_context); + query_context->makeQueryContext(); + resetOutput(); have_error = false; cancelled = false; @@ -1888,12 +1891,12 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin if (is_interactive) { - global_context->setCurrentQueryId(""); + query_context->setCurrentQueryId(""); // Generate a new query_id for (const auto & query_id_format : query_id_formats) { writeString(query_id_format.first, std_out); - writeString(fmt::format(fmt::runtime(query_id_format.second), fmt::arg("query_id", global_context->getCurrentQueryId())), std_out); + writeString(fmt::format(fmt::runtime(query_id_format.second), fmt::arg("query_id", query_context->getCurrentQueryId())), std_out); writeChar('\n', std_out); std_out.next(); } @@ -1920,7 +1923,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin auto password = auth_data->getPassword(); if (password) - global_context->getAccessControl().checkPasswordComplexityRules(*password); + query_context->getAccessControl().checkPasswordComplexityRules(*password); } } } @@ -1930,47 +1933,40 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin progress_indication.resetProgress(); profile_events.watch.restart(); + /// Apply query settings to context, as they can affect the behavior on client-side. + InterpreterSetQuery::applySettingsFromQuery(parsed_query, query_context); + + if (!connection->checkConnected(connection_parameters.timeouts)) + connect(); + + ASTPtr input_function; + const auto * insert = parsed_query->as(); + if (insert && insert->select) + insert->tryFindInputFunction(input_function); + + bool is_async_insert_with_inlined_data = query_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); + + if (is_async_insert_with_inlined_data) { - /// Temporarily apply query settings to context. - Settings old_settings = global_context->getSettings(); - SCOPE_EXIT_SAFE({ - global_context->setSettings(old_settings); - }); + bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && isStdinNotEmptyAndValid(std_in); + bool have_external_data = have_data_in_stdin || insert->infile; - InterpreterSetQuery::applySettingsFromQuery(parsed_query, global_context); - - if (!connection->checkConnected(connection_parameters.timeouts)) - connect(); - - ASTPtr input_function; - const auto * insert = parsed_query->as(); - if (insert && insert->select) - insert->tryFindInputFunction(input_function); - - bool is_async_insert_with_inlined_data = global_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); - - if (is_async_insert_with_inlined_data) - { - bool have_data_in_stdin = !is_interactive && !stdin_is_a_tty && isStdinNotEmptyAndValid(std_in); - bool have_external_data = have_data_in_stdin || insert->infile; - - if (have_external_data) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Processing async inserts with both inlined and external data (from stdin or infile) is not supported"); - } - - /// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately. - if (insert && (!insert->select || input_function) && !is_async_insert_with_inlined_data) - { - if (input_function && insert->format.empty()) - throw Exception(ErrorCodes::INVALID_USAGE_OF_INPUT, "FORMAT must be specified for function input()"); - - processInsertQuery(query_to_execute, parsed_query); - } - else - processOrdinaryQuery(query_to_execute, parsed_query); + if (have_external_data) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Processing async inserts with both inlined and external data (from stdin or infile) is not supported"); } + /// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately. + if (insert && (!insert->select || input_function) && !is_async_insert_with_inlined_data) + { + if (input_function && insert->format.empty()) + throw Exception(ErrorCodes::INVALID_USAGE_OF_INPUT, "FORMAT must be specified for function input()"); + + processInsertQuery(query_to_execute, parsed_query); + } + else + processOrdinaryQuery(query_to_execute, parsed_query); + /// Do not change context (current DB, settings) in case of an exception. if (!have_error) { @@ -2651,10 +2647,8 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name) if (!has_log_comment) { - Settings settings = global_context->getSettings(); /// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]" - settings.log_comment = fs::absolute(fs::path(file_name)); - global_context->setSettings(settings); + global_context->setSetting("log_comment", String(fs::absolute(fs::path(file_name)))); } return executeMultiQuery(queries_from_file); diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 220fcddc038..228a9d65ea7 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -198,6 +198,7 @@ protected: /// since other members can use them. SharedContextHolder shared_context; ContextMutablePtr global_context; + ContextMutablePtr query_context; bool is_interactive = false; /// Use either interactive line editing interface or batch mode. bool is_multiquery = false; diff --git a/src/Interpreters/Session.cpp b/src/Interpreters/Session.cpp index 396562189e0..9dd686290db 100644 --- a/src/Interpreters/Session.cpp +++ b/src/Interpreters/Session.cpp @@ -532,7 +532,7 @@ ContextMutablePtr Session::makeSessionContext() session_context->checkSettingsConstraints(settings_from_auth_server, SettingSource::QUERY); session_context->applySettingsChanges(settings_from_auth_server); - recordLoginSucess(session_context); + recordLoginSuccess(session_context); return session_context; } @@ -596,7 +596,7 @@ ContextMutablePtr Session::makeSessionContext(const String & session_name_, std: { session_name_ }, max_sessions_for_user); - recordLoginSucess(session_context); + recordLoginSuccess(session_context); return session_context; } @@ -672,13 +672,13 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t user = query_context->getUser(); /// Interserver does not create session context - recordLoginSucess(query_context); + recordLoginSuccess(query_context); return query_context; } -void Session::recordLoginSucess(ContextPtr login_context) const +void Session::recordLoginSuccess(ContextPtr login_context) const { if (notified_session_log_about_login) return; diff --git a/src/Interpreters/Session.h b/src/Interpreters/Session.h index 14f6f806acd..fc41c78e666 100644 --- a/src/Interpreters/Session.h +++ b/src/Interpreters/Session.h @@ -102,8 +102,7 @@ public: private: std::shared_ptr getSessionLog() const; ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const; - void recordLoginSucess(ContextPtr login_context) const; - + void recordLoginSuccess(ContextPtr login_context) const; mutable bool notified_session_log_about_login = false; const UUID auth_id; From fa5d4cfea183c64e3ff088f922c0960a3c3951e0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 20 Jun 2024 00:41:14 +0200 Subject: [PATCH 0166/2361] Fix error --- src/Client/ClientBase.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 490a560de2d..8fcb9632be0 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -949,7 +949,7 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) /// But for asynchronous inserts we don't extract data, because it's needed /// to be done on server side in that case (for coalescing the data from multiple inserts on server side). const auto * insert = parsed_query->as(); - if (insert && isSyncInsertWithData(*insert, query_context)) + if (insert && isSyncInsertWithData(*insert, global_context)) query_to_execute = full_query.substr(0, insert->data - full_query.data()); else query_to_execute = full_query; From fdfa6adbfa9b44f0943a39f5188285aff4329640 Mon Sep 17 00:00:00 2001 From: skyoct Date: Thu, 20 Jun 2024 22:54:33 +0800 Subject: [PATCH 0167/2361] ignore docs dict etag --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 64ff3e8e2cb..1be52597133 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -248,6 +248,7 @@ DoubleDelta Doxygen Durre ECMA +ETag Ecto EdgeAngle EdgeLengthKm From ff72bbb18d1e78f1edd11e2d04afdbfef00b1b9d Mon Sep 17 00:00:00 2001 From: skyoct Date: Thu, 20 Jun 2024 23:07:16 +0800 Subject: [PATCH 0168/2361] ignore etag dict --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 1be52597133..dda0cf51455 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1569,6 +1569,7 @@ enum's enums erfc errorCodeToName +etag evalMLMethod exFAT expiryMsec From 6f1f416700a32cf95af15b79543e27cbcffe2f14 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 20 Jun 2024 17:37:05 +0100 Subject: [PATCH 0169/2361] one more test --- tests/performance/final_big_column.xml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/performance/final_big_column.xml b/tests/performance/final_big_column.xml index 1fd586d2d90..4bfdfdf804f 100644 --- a/tests/performance/final_big_column.xml +++ b/tests/performance/final_big_column.xml @@ -1,6 +1,7 @@ - 1 + + 8 20G @@ -10,8 +11,8 @@ PARTITION BY toYYYYMM(d) ORDER BY key - INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(5000000) - INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(5000000) + INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers_mt(5000000) + INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers_mt(5000000) SELECT * FROM optimized_select_final FINAL FORMAT Null SETTINGS max_threads = 8 SELECT * FROM optimized_select_final FINAL WHERE key % 10 = 0 FORMAT Null From a8d1d1ecff5f287beada5af68269e597f2be9df7 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 14 Jun 2024 01:24:17 +0000 Subject: [PATCH 0170/2361] Allow query profiler period to be longer than 4 seconds --- src/Common/QueryProfiler.cpp | 20 ++++++++++---------- src/Common/QueryProfiler.h | 10 +++++----- src/Interpreters/ThreadStatusExt.cpp | 12 ++++++------ 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index 746010b5462..78bc7ee7e54 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -110,7 +110,7 @@ namespace errno = saved_errno; } - [[maybe_unused]] constexpr UInt32 TIMER_PRECISION = 1e9; + [[maybe_unused]] constexpr UInt64 TIMER_PRECISION = 1e9; } namespace ErrorCodes @@ -167,18 +167,18 @@ void Timer::createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal } } -void Timer::set(UInt32 period) +void Timer::set(UInt64 period) { /// Too high frequency can introduce infinite busy loop of signal handlers. We will limit maximum frequency (with 1000 signals per second). - period = std::max(period, 1000000); + period = std::max(period, 1000000); /// Randomize offset as uniform random value from 0 to period - 1. /// It will allow to sample short queries even if timer period is large. /// (For example, with period of 1 second, query with 50 ms duration will be sampled with 1 / 20 probability). /// It also helps to avoid interference (moire). - UInt32 period_rand = std::uniform_int_distribution(0, period)(thread_local_rng); + UInt64 period_rand = std::uniform_int_distribution(0, period)(thread_local_rng); - struct timespec interval{.tv_sec = period / TIMER_PRECISION, .tv_nsec = period % TIMER_PRECISION}; - struct timespec offset{.tv_sec = period_rand / TIMER_PRECISION, .tv_nsec = period_rand % TIMER_PRECISION}; + struct timespec interval{.tv_sec = time_t(period / TIMER_PRECISION), .tv_nsec = long(period % TIMER_PRECISION)}; + struct timespec offset{.tv_sec = time_t(period_rand / TIMER_PRECISION), .tv_nsec = long(period_rand % TIMER_PRECISION)}; struct itimerspec timer_spec = {.it_interval = interval, .it_value = offset}; if (timer_settime(*timer_id, 0, &timer_spec, nullptr)) @@ -229,7 +229,7 @@ void Timer::cleanup() template QueryProfilerBase::QueryProfilerBase( - [[maybe_unused]] UInt64 thread_id, [[maybe_unused]] int clock_type, [[maybe_unused]] UInt32 period, [[maybe_unused]] int pause_signal_) + [[maybe_unused]] UInt64 thread_id, [[maybe_unused]] int clock_type, [[maybe_unused]] UInt64 period, [[maybe_unused]] int pause_signal_) : log(getLogger("QueryProfiler")), pause_signal(pause_signal_) { #if defined(SANITIZER) @@ -270,7 +270,7 @@ QueryProfilerBase::QueryProfilerBase( template -void QueryProfilerBase::setPeriod([[maybe_unused]] UInt32 period_) +void QueryProfilerBase::setPeriod([[maybe_unused]] UInt64 period_) { #if defined(SANITIZER) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers"); @@ -307,7 +307,7 @@ void QueryProfilerBase::cleanup() template class QueryProfilerBase; template class QueryProfilerBase; -QueryProfilerReal::QueryProfilerReal(UInt64 thread_id, UInt32 period) +QueryProfilerReal::QueryProfilerReal(UInt64 thread_id, UInt64 period) : QueryProfilerBase(thread_id, CLOCK_MONOTONIC, period, SIGUSR1) {} @@ -320,7 +320,7 @@ void QueryProfilerReal::signalHandler(int sig, siginfo_t * info, void * context) writeTraceInfo(TraceType::Real, sig, info, context); } -QueryProfilerCPU::QueryProfilerCPU(UInt64 thread_id, UInt32 period) +QueryProfilerCPU::QueryProfilerCPU(UInt64 thread_id, UInt64 period) : QueryProfilerBase(thread_id, CLOCK_THREAD_CPUTIME_ID, period, SIGUSR2) {} diff --git a/src/Common/QueryProfiler.h b/src/Common/QueryProfiler.h index ea4cc73bca6..e3ab0b2e094 100644 --- a/src/Common/QueryProfiler.h +++ b/src/Common/QueryProfiler.h @@ -40,7 +40,7 @@ public: ~Timer(); void createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal); - void set(UInt32 period); + void set(UInt64 period); void stop(); void cleanup(); @@ -54,10 +54,10 @@ template class QueryProfilerBase { public: - QueryProfilerBase(UInt64 thread_id, int clock_type, UInt32 period, int pause_signal_); + QueryProfilerBase(UInt64 thread_id, int clock_type, UInt64 period, int pause_signal_); ~QueryProfilerBase(); - void setPeriod(UInt32 period_); + void setPeriod(UInt64 period_); private: void cleanup(); @@ -76,7 +76,7 @@ private: class QueryProfilerReal : public QueryProfilerBase { public: - QueryProfilerReal(UInt64 thread_id, UInt32 period); /// NOLINT + QueryProfilerReal(UInt64 thread_id, UInt64 period); /// NOLINT static void signalHandler(int sig, siginfo_t * info, void * context); }; @@ -85,7 +85,7 @@ public: class QueryProfilerCPU : public QueryProfilerBase { public: - QueryProfilerCPU(UInt64 thread_id, UInt32 period); /// NOLINT + QueryProfilerCPU(UInt64 thread_id, UInt64 period); /// NOLINT static void signalHandler(int sig, siginfo_t * info, void * context); }; diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 9ca521a4ab3..0ce183944cc 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -470,11 +470,11 @@ void ThreadStatus::initGlobalProfiler([[maybe_unused]] UInt64 global_profiler_re { if (global_profiler_real_time_period > 0) query_profiler_real = std::make_unique(thread_id, - /* period= */ static_cast(global_profiler_real_time_period)); + /* period= */ global_profiler_real_time_period); if (global_profiler_cpu_time_period > 0) query_profiler_cpu = std::make_unique(thread_id, - /* period= */ static_cast(global_profiler_cpu_time_period)); + /* period= */ global_profiler_cpu_time_period); } catch (...) { @@ -503,18 +503,18 @@ void ThreadStatus::initQueryProfiler() { if (!query_profiler_real) query_profiler_real = std::make_unique(thread_id, - /* period= */ static_cast(settings.query_profiler_real_time_period_ns)); + /* period= */ settings.query_profiler_real_time_period_ns); else - query_profiler_real->setPeriod(static_cast(settings.query_profiler_real_time_period_ns)); + query_profiler_real->setPeriod(settings.query_profiler_real_time_period_ns); } if (settings.query_profiler_cpu_time_period_ns > 0) { if (!query_profiler_cpu) query_profiler_cpu = std::make_unique(thread_id, - /* period= */ static_cast(settings.query_profiler_cpu_time_period_ns)); + /* period= */ settings.query_profiler_cpu_time_period_ns); else - query_profiler_cpu->setPeriod(static_cast(settings.query_profiler_cpu_time_period_ns)); + query_profiler_cpu->setPeriod(settings.query_profiler_cpu_time_period_ns); } } catch (...) From 26e396184a3bc1aca1d9501d32cad1ed56b77e9d Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 20 Jun 2024 18:49:27 +0000 Subject: [PATCH 0171/2361] Try to appease clang-tidy --- src/Common/QueryProfiler.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index 78bc7ee7e54..85c92ec292d 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -177,8 +177,8 @@ void Timer::set(UInt64 period) /// It also helps to avoid interference (moire). UInt64 period_rand = std::uniform_int_distribution(0, period)(thread_local_rng); - struct timespec interval{.tv_sec = time_t(period / TIMER_PRECISION), .tv_nsec = long(period % TIMER_PRECISION)}; - struct timespec offset{.tv_sec = time_t(period_rand / TIMER_PRECISION), .tv_nsec = long(period_rand % TIMER_PRECISION)}; + struct timespec interval{.tv_sec = time_t(period / TIMER_PRECISION), .tv_nsec = int64_t(period % TIMER_PRECISION)}; + struct timespec offset{.tv_sec = time_t(period_rand / TIMER_PRECISION), .tv_nsec = int64_t(period_rand % TIMER_PRECISION)}; struct itimerspec timer_spec = {.it_interval = interval, .it_value = offset}; if (timer_settime(*timer_id, 0, &timer_spec, nullptr)) From 16baecf5a67083ecd23d621f10ec0c6250178e32 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 13 Jun 2024 22:01:57 +0000 Subject: [PATCH 0172/2361] attach_gdb.lib: print registers before all stacks --- docker/test/stateless/attach_gdb.lib | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/attach_gdb.lib b/docker/test/stateless/attach_gdb.lib index eb54f920b98..251fb886df6 100644 --- a/docker/test/stateless/attach_gdb.lib +++ b/docker/test/stateless/attach_gdb.lib @@ -25,8 +25,11 @@ handle SIG$RTMIN nostop noprint pass info signals continue backtrace full -thread apply all backtrace full info registers +p "top 1 KiB of the stack:" +p/x *(uint64_t[128]*)$sp +maintenance info sections +thread apply all backtrace full disassemble /s up disassemble /s From 557cd2c08f4791e10e1f2914bd4974c51531bf41 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 20 Jun 2024 18:56:06 +0000 Subject: [PATCH 0173/2361] Escape the $ --- docker/test/stateless/attach_gdb.lib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/attach_gdb.lib b/docker/test/stateless/attach_gdb.lib index 251fb886df6..a3616ac1a04 100644 --- a/docker/test/stateless/attach_gdb.lib +++ b/docker/test/stateless/attach_gdb.lib @@ -27,7 +27,7 @@ continue backtrace full info registers p "top 1 KiB of the stack:" -p/x *(uint64_t[128]*)$sp +p/x *(uint64_t[128]*)"'$sp'" maintenance info sections thread apply all backtrace full disassemble /s From 3ab8ba0d4ab0e4446883bf57ba3d859d1ee49a52 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 27 May 2024 21:33:54 +0000 Subject: [PATCH 0174/2361] Update zlib-ng from 2.0.2 to 2.1.6 --- contrib/zlib-ng | 2 +- contrib/zlib-ng-cmake/CMakeLists.txt | 84 +++++++++++++++------------- 2 files changed, 47 insertions(+), 39 deletions(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index 50f0eae1a41..bfb184bb0fb 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit 50f0eae1a411764cd6d1e85b3ce471438acd3c1c +Subproject commit bfb184bb0fbdabe82f3a36d209e56c3e2c33866a diff --git a/contrib/zlib-ng-cmake/CMakeLists.txt b/contrib/zlib-ng-cmake/CMakeLists.txt index 79f343bfc75..78a5f1ae3a8 100644 --- a/contrib/zlib-ng-cmake/CMakeLists.txt +++ b/contrib/zlib-ng-cmake/CMakeLists.txt @@ -14,6 +14,7 @@ add_definitions(-DHAVE_VISIBILITY_HIDDEN) add_definitions(-DHAVE_VISIBILITY_INTERNAL) add_definitions(-DHAVE_BUILTIN_CTZ) add_definitions(-DHAVE_BUILTIN_CTZLL) +add_definitions(-DHAVE_ATTRIBUTE_ALIGNED) set(ZLIB_ARCH_SRCS) set(ZLIB_ARCH_HDRS) @@ -26,14 +27,15 @@ if(ARCH_AARCH64) add_definitions(-DARM_FEATURES) add_definitions(-DARM_AUXV_HAS_CRC32 -DARM_ASM_HWCAP) add_definitions(-DARM_AUXV_HAS_NEON) - add_definitions(-DARM_ACLE_CRC_HASH) - add_definitions(-DARM_NEON_ADLER32 -DARM_NEON_CHUNKSET -DARM_NEON_SLIDEHASH) + add_definitions(-DARM_ACLE) + add_definitions(-DARM_NEON) - list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm.h) - list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/armfeature.c) + list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm_features.h) + list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/arm_features.c) set(ACLE_SRCS ${ARCHDIR}/crc32_acle.c ${ARCHDIR}/insert_string_acle.c) list(APPEND ZLIB_ARCH_SRCS ${ACLE_SRCS}) - set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/chunkset_neon.c ${ARCHDIR}/slide_neon.c) + set(NEON_SRCS ${ARCHDIR}/adler32_neon.c ${ARCHDIR}/chunkset_neon.c + ${ARCHDIR}/compare256_neon.c ${ARCHDIR}/slide_hash_neon.c) list(APPEND ZLIB_ARCH_SRCS ${NEON_SRCS}) elseif(ARCH_PPC64LE) @@ -41,50 +43,47 @@ elseif(ARCH_PPC64LE) add_definitions(-DPOWER8) add_definitions(-DPOWER_FEATURES) - add_definitions(-DPOWER8_VSX_ADLER32) - add_definitions(-DPOWER8_VSX_SLIDEHASH) + add_definitions(-DPOWER8_VSX) + add_definitions(-DPOWER8_VSX_CRC32) - list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power.h) - list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power.c) - set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/slide_hash_power8.c) + list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power_features.h) + list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power_features.c) + set(POWER8_SRCS ${ARCHDIR}/adler32_power8.c ${ARCHDIR}/chunkset_power8.c ${ARCHDIR}/slide_hash_power8.c) + list(APPEND POWER8_SRCS ${ARCHDIR}/crc32_power8.c) list(APPEND ZLIB_ARCH_SRCS ${POWER8_SRCS}) elseif(ARCH_AMD64) set(ARCHDIR "${SOURCE_DIR}/arch/x86") add_definitions(-DX86_FEATURES) - list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/x86.h) - list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/x86.c) + list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/x86_features.h) + list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/x86_features.c) if(ENABLE_AVX2) - add_definitions(-DX86_AVX2 -DX86_AVX2_ADLER32 -DX86_AVX_CHUNKSET) - set(AVX2_SRCS ${ARCHDIR}/slide_avx.c) - list(APPEND AVX2_SRCS ${ARCHDIR}/chunkset_avx.c) - list(APPEND AVX2_SRCS ${ARCHDIR}/compare258_avx.c) - list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx.c) + add_definitions(-DX86_AVX2) + set(AVX2_SRCS ${ARCHDIR}/slide_hash_avx2.c) + list(APPEND AVX2_SRCS ${ARCHDIR}/chunkset_avx2.c) + list(APPEND AVX2_SRCS ${ARCHDIR}/compare256_avx2.c) + list(APPEND AVX2_SRCS ${ARCHDIR}/adler32_avx2.c) list(APPEND ZLIB_ARCH_SRCS ${AVX2_SRCS}) endif() if(ENABLE_SSE42) - add_definitions(-DX86_SSE42_CRC_HASH) - set(SSE42_SRCS ${ARCHDIR}/insert_string_sse.c) - list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS}) - add_definitions(-DX86_SSE42_CRC_INTRIN) - add_definitions(-DX86_SSE42_CMP_STR) - set(SSE42_SRCS ${ARCHDIR}/compare258_sse.c) + add_definitions(-DX86_SSE42) + set(SSE42_SRCS ${ARCHDIR}/adler32_sse42.c ${ARCHDIR}/insert_string_sse42.c) list(APPEND ZLIB_ARCH_SRCS ${SSE42_SRCS}) endif() if(ENABLE_SSSE3) - add_definitions(-DX86_SSSE3 -DX86_SSSE3_ADLER32) - set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c) + add_definitions(-DX86_SSSE3) + set(SSSE3_SRCS ${ARCHDIR}/adler32_ssse3.c ${ARCHDIR}/chunkset_ssse3.c) list(APPEND ZLIB_ARCH_SRCS ${SSSE3_SRCS}) endif() if(ENABLE_PCLMULQDQ) add_definitions(-DX86_PCLMULQDQ_CRC) - set(PCLMULQDQ_SRCS ${ARCHDIR}/crc_folding.c) + set(PCLMULQDQ_SRCS ${ARCHDIR}/crc32_pclmulqdq.c) list(APPEND ZLIB_ARCH_SRCS ${PCLMULQDQ_SRCS}) endif() - add_definitions(-DX86_SSE2 -DX86_SSE2_CHUNKSET -DX86_SSE2_SLIDEHASH) - set(SSE2_SRCS ${ARCHDIR}/chunkset_sse.c ${ARCHDIR}/slide_sse.c) + add_definitions(-DX86_SSE2) + set(SSE2_SRCS ${ARCHDIR}/chunkset_sse2.c ${ARCHDIR}/compare256_sse2.c ${ARCHDIR}/slide_hash_sse2.c) list(APPEND ZLIB_ARCH_SRCS ${SSE2_SRCS}) add_definitions(-DX86_NOCHECK_SSE2) endif () @@ -106,39 +105,45 @@ generate_cmakein(${SOURCE_DIR}/zconf.h.in ${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cm set(ZLIB_SRCS ${SOURCE_DIR}/adler32.c + ${SOURCE_DIR}/adler32_fold.c ${SOURCE_DIR}/chunkset.c - ${SOURCE_DIR}/compare258.c + ${SOURCE_DIR}/compare256.c ${SOURCE_DIR}/compress.c - ${SOURCE_DIR}/crc32.c - ${SOURCE_DIR}/crc32_comb.c + ${SOURCE_DIR}/cpu_features.c + ${SOURCE_DIR}/crc32_braid.c + ${SOURCE_DIR}/crc32_braid_comb.c + ${SOURCE_DIR}/crc32_fold.c ${SOURCE_DIR}/deflate.c ${SOURCE_DIR}/deflate_fast.c + ${SOURCE_DIR}/deflate_huff.c ${SOURCE_DIR}/deflate_medium.c ${SOURCE_DIR}/deflate_quick.c + ${SOURCE_DIR}/deflate_rle.c ${SOURCE_DIR}/deflate_slow.c + ${SOURCE_DIR}/deflate_stored.c ${SOURCE_DIR}/functable.c ${SOURCE_DIR}/infback.c - ${SOURCE_DIR}/inffast.c ${SOURCE_DIR}/inflate.c ${SOURCE_DIR}/inftrees.c ${SOURCE_DIR}/insert_string.c + ${SOURCE_DIR}/insert_string_roll.c + ${SOURCE_DIR}/slide_hash.c ${SOURCE_DIR}/trees.c ${SOURCE_DIR}/uncompr.c ${SOURCE_DIR}/zutil.c +) + +set(ZLIB_GZFILE_SRCS ${SOURCE_DIR}/gzlib.c - ${SOURCE_DIR}/gzread.c + ${CMAKE_CURRENT_BINARY_DIR}/gzread.c ${SOURCE_DIR}/gzwrite.c ) -set(ZLIB_ALL_SRCS ${ZLIB_SRCS} ${ZLIB_ARCH_SRCS}) +set(ZLIB_ALL_SRCS ${ZLIB_SRCS} ${ZLIB_ARCH_SRCS} ${ZLIB_GZFILE_SRCS}) add_library(_zlib ${ZLIB_ALL_SRCS}) add_library(ch_contrib::zlib ALIAS _zlib) -# https://github.com/zlib-ng/zlib-ng/pull/733 -# This is disabed by default -add_compile_definitions(Z_TLS=__thread) - if(HAVE_UNISTD_H) SET(ZCONF_UNISTD_LINE "#if 1 /* was set to #if 1 by configure/cmake/etc */") else() @@ -153,6 +158,9 @@ endif() set(ZLIB_PC ${CMAKE_CURRENT_BINARY_DIR}/zlib.pc) configure_file(${SOURCE_DIR}/zlib.pc.cmakein ${ZLIB_PC} @ONLY) configure_file(${CMAKE_CURRENT_BINARY_DIR}/zconf.h.cmakein ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY) +configure_file(${SOURCE_DIR}/zlib.h.in ${CMAKE_CURRENT_BINARY_DIR}/zlib.h @ONLY) +configure_file(${SOURCE_DIR}/zlib_name_mangling.h.in ${CMAKE_CURRENT_BINARY_DIR}/zlib_name_mangling.h @ONLY) +configure_file(${SOURCE_DIR}/gzread.c.in ${CMAKE_CURRENT_BINARY_DIR}/gzread.c @ONLY) # We should use same defines when including zlib.h as used when zlib compiled target_compile_definitions (_zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP) From 62d6e3d3396fbcbcc6f666ac9b7bc2b60fe828b2 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 29 May 2024 00:19:25 +0000 Subject: [PATCH 0175/2361] Fix ARM --- contrib/zlib-ng-cmake/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/zlib-ng-cmake/CMakeLists.txt b/contrib/zlib-ng-cmake/CMakeLists.txt index 78a5f1ae3a8..40397e14f20 100644 --- a/contrib/zlib-ng-cmake/CMakeLists.txt +++ b/contrib/zlib-ng-cmake/CMakeLists.txt @@ -28,7 +28,9 @@ if(ARCH_AARCH64) add_definitions(-DARM_AUXV_HAS_CRC32 -DARM_ASM_HWCAP) add_definitions(-DARM_AUXV_HAS_NEON) add_definitions(-DARM_ACLE) + add_definitions(-DHAVE_ARM_ACLE_H) add_definitions(-DARM_NEON) + add_definitions(-DARM_NEON_HASLD4) list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/arm_features.h) list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/arm_features.c) From 8115926eb61779d669509f16863730557882b3c8 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 29 May 2024 04:49:19 +0000 Subject: [PATCH 0176/2361] Fix ARM some more --- contrib/zlib-ng-cmake/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/zlib-ng-cmake/CMakeLists.txt b/contrib/zlib-ng-cmake/CMakeLists.txt index 40397e14f20..1ed1d98ed66 100644 --- a/contrib/zlib-ng-cmake/CMakeLists.txt +++ b/contrib/zlib-ng-cmake/CMakeLists.txt @@ -25,6 +25,7 @@ if(ARCH_AARCH64) set(ARCHDIR "${SOURCE_DIR}/arch/arm") add_definitions(-DARM_FEATURES) + add_definitions(-DHAVE_SYS_AUXV_H) add_definitions(-DARM_AUXV_HAS_CRC32 -DARM_ASM_HWCAP) add_definitions(-DARM_AUXV_HAS_NEON) add_definitions(-DARM_ACLE) From 06781efcb785a0504d9599e6a1446e235caef13d Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 20 Jun 2024 22:59:36 +0000 Subject: [PATCH 0177/2361] Switch to a cleaner update, hopefully fix builds --- contrib/zlib-ng | 2 +- contrib/zlib-ng-cmake/CMakeLists.txt | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index bfb184bb0fb..c19ba056b7c 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit bfb184bb0fbdabe82f3a36d209e56c3e2c33866a +Subproject commit c19ba056b7cc8029bb80f509956090c7ded58032 diff --git a/contrib/zlib-ng-cmake/CMakeLists.txt b/contrib/zlib-ng-cmake/CMakeLists.txt index 1ed1d98ed66..91a8eb2e0a3 100644 --- a/contrib/zlib-ng-cmake/CMakeLists.txt +++ b/contrib/zlib-ng-cmake/CMakeLists.txt @@ -15,6 +15,7 @@ add_definitions(-DHAVE_VISIBILITY_INTERNAL) add_definitions(-DHAVE_BUILTIN_CTZ) add_definitions(-DHAVE_BUILTIN_CTZLL) add_definitions(-DHAVE_ATTRIBUTE_ALIGNED) +add_definitions(-DHAVE_POSIX_MEMALIGN) set(ZLIB_ARCH_SRCS) set(ZLIB_ARCH_HDRS) @@ -44,10 +45,16 @@ if(ARCH_AARCH64) elseif(ARCH_PPC64LE) set(ARCHDIR "${SOURCE_DIR}/arch/power") - add_definitions(-DPOWER8) add_definitions(-DPOWER_FEATURES) - add_definitions(-DPOWER8_VSX) - add_definitions(-DPOWER8_VSX_CRC32) + add_definitions(-DHAVE_SYS_AUXV_H) + + if(POWER9) + add_definitions(-DPOWER9) + else() + add_definitions(-DPOWER8) + add_definitions(-DPOWER8_VSX) + add_definitions(-DPOWER8_VSX_CRC32) + endif() list(APPEND ZLIB_ARCH_HDRS ${ARCHDIR}/power_features.h) list(APPEND ZLIB_ARCH_SRCS ${ARCHDIR}/power_features.c) From 91dc9a69d844d781c7d4f94ca01d0a9bbe1a1f29 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 21 Jun 2024 22:03:38 +0100 Subject: [PATCH 0178/2361] fix final_big_column --- tests/performance/final_big_column.xml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/performance/final_big_column.xml b/tests/performance/final_big_column.xml index 4bfdfdf804f..bc7c3570db6 100644 --- a/tests/performance/final_big_column.xml +++ b/tests/performance/final_big_column.xml @@ -1,7 +1,6 @@ - - 8 + 1 20G @@ -11,8 +10,8 @@ PARTITION BY toYYYYMM(d) ORDER BY key - INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers_mt(5000000) - INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers_mt(5000000) + INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(1000000) + INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(1000000) SELECT * FROM optimized_select_final FINAL FORMAT Null SETTINGS max_threads = 8 SELECT * FROM optimized_select_final FINAL WHERE key % 10 = 0 FORMAT Null From cb0a692ba061d910712f0144cb2b5308db5d033f Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 21 Jun 2024 23:42:56 +0100 Subject: [PATCH 0179/2361] fix read_from_comp_parts --- tests/performance/read_from_comp_parts.xml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/performance/read_from_comp_parts.xml b/tests/performance/read_from_comp_parts.xml index a625075588a..055df51d12d 100644 --- a/tests/performance/read_from_comp_parts.xml +++ b/tests/performance/read_from_comp_parts.xml @@ -5,15 +5,16 @@ ORDER BY (c1, c2) SETTINGS min_rows_for_wide_part = 1000000000 AS SELECT * - FROM generateRandom('c1 UInt32, c2 UInt64, s1 String, arr1 Array(UInt32), c3 UInt64, s2 String', 0, 30, 30) + FROM generateRandom('c1 UInt32, c2 UInt64, s1 String, arr1 Array(UInt32), c3 UInt64, s2 String', 0, 5, 6) LIMIT 50000000 + SETTINGS max_insert_threads = 8 8 - SELECT count() FROM mt_comp_parts WHERE NOT ignore(c1) + SELECT count() FROM mt_comp_parts WHERE NOT ignore(s1) SELECT count() FROM mt_comp_parts WHERE NOT ignore(c2, s1, arr1, s2) SELECT count() FROM mt_comp_parts WHERE NOT ignore(c1, s1, c3) SELECT count() FROM mt_comp_parts WHERE NOT ignore(c1, c2, c3) From e064171a68fcac110d27ff36a51b7a6bb4fbb251 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 23 Jun 2024 03:58:11 +0200 Subject: [PATCH 0180/2361] Fix errors --- programs/client/Client.cpp | 10 ---------- programs/local/LocalServer.cpp | 3 --- src/Client/ClientBase.cpp | 4 +++- src/Client/LocalConnection.cpp | 1 - src/Client/LocalConnection.h | 2 -- .../0_stateless/00857_global_joinsavel_table_alias.sql | 1 - 6 files changed, 3 insertions(+), 18 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 22a035fbd71..ab02d9fac74 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -311,7 +310,6 @@ int Client::main(const std::vector & /*args*/) try { UseSSL use_ssl; - auto & thread_status = MainThreadStatus::getInstance(); setupSignalHandler(); std::cout << std::fixed << std::setprecision(3); @@ -326,14 +324,6 @@ try initTTYBuffer(toProgressOption(config().getString("progress", "default"))); ASTAlterCommand::setFormatAlterCommandsWithParentheses(true); - { - // All that just to set DB::CurrentThread::get().getGlobalContext() - // which is required for client timezone (pushed from server) to work. - auto thread_group = std::make_shared(); - const_cast(thread_group->global_context) = global_context; - thread_status.attachToGroup(thread_group, false); - } - /// Includes delayed_interactive. if (is_interactive) { diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index cb1c35743b2..e5f4bac852c 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -27,10 +27,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -48,7 +46,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 38aa0ed8b14..03f088f2b61 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1872,7 +1872,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin ASTPtr parsed_query, std::optional echo_query_, bool report_error) { query_context = Context::createCopy(global_context); - query_context->makeQueryContext(); + CurrentThread::QueryScope query_scope(query_context); resetOutput(); have_error = false; @@ -2926,6 +2926,8 @@ void ClientBase::init(int argc, char ** argv) /// Don't parse options with Poco library, we prefer neat boost::program_options. stopOptionsProcessing(); + MainThreadStatus::getInstance(); + stdin_is_a_tty = isatty(STDIN_FILENO); stdout_is_a_tty = isatty(STDOUT_FILENO); stderr_is_a_tty = isatty(STDERR_FILENO); diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index c7494e31605..e63e5793505 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -125,7 +125,6 @@ void LocalConnection::sendQuery( state->query_id = query_id; state->query = query; - state->query_scope_holder = std::make_unique(query_context); state->stage = QueryProcessingStage::Enum(stage); state->profile_queue = std::make_shared(std::numeric_limits::max()); CurrentThread::attachInternalProfileEventsQueue(state->profile_queue); diff --git a/src/Client/LocalConnection.h b/src/Client/LocalConnection.h index 899d134cce5..bdd0b481529 100644 --- a/src/Client/LocalConnection.h +++ b/src/Client/LocalConnection.h @@ -61,8 +61,6 @@ struct LocalQueryState /// Time after the last check to stop the request and send the progress. Stopwatch after_send_progress; Stopwatch after_send_profile_events; - - std::unique_ptr query_scope_holder; }; diff --git a/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql b/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql index 2044a9b8d22..092b071cb48 100644 --- a/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql +++ b/tests/queries/0_stateless/00857_global_joinsavel_table_alias.sql @@ -1,4 +1,3 @@ - DROP TABLE IF EXISTS local_table; DROP TABLE IF EXISTS other_table; From 84f81c61853f34d765475309932d73af55e25d0f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 23 Jun 2024 17:18:32 +0200 Subject: [PATCH 0181/2361] Fix error --- src/Client/Suggest.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 0188ebc8173..c1f163939e8 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -96,6 +96,10 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p loading_thread = std::thread([my_context = Context::createCopy(context), connection_parameters, suggestion_limit, this] { ThreadStatus thread_status; + my_context->makeQueryContext(); + auto group = ThreadGroup::createForQuery(my_context); + CurrentThread::attachToGroup(group); + for (size_t retry = 0; retry < 10; ++retry) { try From 602fa5cbadba2924bc4e57f26f5f37b00d7b086e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 23 Jun 2024 17:28:54 +0200 Subject: [PATCH 0182/2361] Fix error --- programs/local/LocalServer.cpp | 1 - src/Client/ClientBase.h | 3 --- 2 files changed, 4 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index e5f4bac852c..45641b999b6 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -428,7 +428,6 @@ int LocalServer::main(const std::vector & /*args*/) try { UseSSL use_ssl; - thread_status.emplace(); StackTrace::setShowAddresses(config().getBool("show_addresses_in_stack_traces", true)); diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 228a9d65ea7..83b99696373 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -244,9 +244,6 @@ protected: Settings cmd_settings; MergeTreeSettings cmd_merge_tree_settings; - /// thread status should be destructed before shared context because it relies on process list. - std::optional thread_status; - ServerConnectionPtr connection; ConnectionParameters connection_parameters; From e0ef26285a5cda87e5073c6a0aaecb67f9609a96 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 23 Jun 2024 19:46:00 +0200 Subject: [PATCH 0183/2361] Update submodule --- contrib/aws | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/aws b/contrib/aws index 1c2946bfcb7..6463c9cbf47 160000 --- a/contrib/aws +++ b/contrib/aws @@ -1 +1 @@ -Subproject commit 1c2946bfcb7f1e3ae0a858de0b59d4f1a7b4ccaf +Subproject commit 6463c9cbf47cab78e4a4fa97a866942f201c6a58 From a93385836f1c2ab24927a3db1e04de6126a1fd53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sun, 23 Jun 2024 19:32:47 +0000 Subject: [PATCH 0184/2361] Expect errors when polling for events --- src/Storages/Kafka/KafkaConsumer2.cpp | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index d59a06bc672..d471c263653 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -194,13 +194,17 @@ void KafkaConsumer2::drainConsumerQueue() void KafkaConsumer2::pollEvents() { - auto msg = consumer->poll(EVENT_POLL_TIMEOUT); - // All the partition queues are detached, so the consumer shouldn't be able to poll any messages - chassert(!msg && "Consumer returned a message when it was not expected"); - - // static constexpr int64_t max_tries = 5; - // for(auto i = 0; i < max_tries; ++i) - // consumer->poll(EVENT_POLL_TIMEOUT); + static constexpr auto max_tries = 5; + for (auto i = 0; i < max_tries; ++i) + { + auto msg = consumer->poll(EVENT_POLL_TIMEOUT); + if (!msg) + return; + // All the partition queues are detached, so the consumer shouldn't be able to poll any real messages + const auto err = msg.get_error(); + chassert(RD_KAFKA_RESP_ERR_NO_ERROR != err.get_error() && "Consumer returned a message when it was not expected"); + LOG_ERROR(log, "Consumer received error while polling events, code {}, error '{}'", err.get_error(), err.to_string()); + } }; KafkaConsumer2::TopicPartitionCounts KafkaConsumer2::getPartitionCounts() const From 9948150b87c6ee4531e0130de095035e2f228ec1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 24 Jun 2024 00:19:20 +0200 Subject: [PATCH 0185/2361] Fix error --- src/IO/S3/PocoHTTPClient.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 18a21649167..3b7ec4d1d9c 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -172,7 +172,7 @@ protected: const RemoteHostFilter & remote_host_filter; unsigned int s3_max_redirects; bool s3_use_adaptive_timeouts = true; - bool enable_s3_requests_logging; + bool enable_s3_requests_logging = false; bool for_disk_s3; /// Limits get request per second rate for GET, SELECT and all other requests, excluding throttled by put throttler From 8cc25827edf45a9928b5ba403a334298d53191c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 24 Jun 2024 08:30:37 +0000 Subject: [PATCH 0186/2361] Extend known limitations --- docs/en/engines/table-engines/integrations/kafka.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index f899fea97de..8c9cd18d117 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -287,6 +287,7 @@ As the new engine is experimental, it is not production ready yet. There are few - The biggest limitation is the engine doesn't support direct reading from Kafka topic (insertion works, but reading doesn't), thus the direct `SELECT` queries will fail. - Rapidly dropping and recreating the table or specifying the same ClickHouse Keeper path to different engines might cause issues. As best practice you can use the `{uuid}` to avoid clashing paths. - To make repeatable reads possible messages cannot be consumed from multiple partitions on a single thread. On the other hand the Kafka consumers has to be polled regularly to keep them alive. As a result of these two we decided to only allow creating multiple consumer if `kafka_thread_per_consumer` is enabled, otherwise it is too complicated to avoid issues regarding polling consumers regularly. + - Consumers created by the new storage engine do not show up in [`system.kafka_consumers`](../../../operations/system-tables/kafka_consumers.md) table. **See Also** From 15e20f56fa4a9a497e4cc247aa9215cb840203f9 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Fri, 21 Jun 2024 16:29:25 +0800 Subject: [PATCH 0187/2361] Add statistics cmsketch --- contrib/datasketches-cpp-cmake/CMakeLists.txt | 1 + .../statements/alter/statistics.md | 2 +- src/CMakeLists.txt | 1 + .../Statistics/CMSketchStatistics.cpp | 82 +++++++++++++++++++ src/Storages/Statistics/CMSketchStatistics.h | 39 +++++++++ .../ConditionSelectivityEstimator.cpp | 35 +++----- .../ConditionSelectivityEstimator.h | 5 +- src/Storages/Statistics/Statistics.cpp | 41 +++++++++- src/Storages/Statistics/Statistics.h | 5 +- src/Storages/StatisticsDescription.cpp | 4 + src/Storages/StatisticsDescription.h | 1 + .../03174_statistics_cmsketch.reference | 26 ++++++ .../0_stateless/03174_statistics_cmsketch.sql | 78 ++++++++++++++++++ 13 files changed, 290 insertions(+), 30 deletions(-) create mode 100644 src/Storages/Statistics/CMSketchStatistics.cpp create mode 100644 src/Storages/Statistics/CMSketchStatistics.h create mode 100644 tests/queries/0_stateless/03174_statistics_cmsketch.reference create mode 100644 tests/queries/0_stateless/03174_statistics_cmsketch.sql diff --git a/contrib/datasketches-cpp-cmake/CMakeLists.txt b/contrib/datasketches-cpp-cmake/CMakeLists.txt index b12a88ad57b..497d6956d0e 100644 --- a/contrib/datasketches-cpp-cmake/CMakeLists.txt +++ b/contrib/datasketches-cpp-cmake/CMakeLists.txt @@ -9,6 +9,7 @@ set(DATASKETCHES_LIBRARY theta) add_library(_datasketches INTERFACE) target_include_directories(_datasketches SYSTEM BEFORE INTERFACE "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include" + "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/count/include" "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include") add_library(ch_contrib::datasketches ALIAS _datasketches) diff --git a/docs/en/sql-reference/statements/alter/statistics.md b/docs/en/sql-reference/statements/alter/statistics.md index 80024781f88..22ff740d410 100644 --- a/docs/en/sql-reference/statements/alter/statistics.md +++ b/docs/en/sql-reference/statements/alter/statistics.md @@ -25,7 +25,7 @@ Also, they are replicated, syncing statistics metadata via ZooKeeper. There is an example adding two statistics types to two columns: ``` -ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq; +ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq, CMSketch; ``` :::note diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 84aaec17a5b..4efb6004172 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -546,6 +546,7 @@ endif() if (TARGET ch_contrib::datasketches) target_link_libraries (clickhouse_aggregate_functions PRIVATE ch_contrib::datasketches) + dbms_target_link_libraries(PRIVATE ch_contrib::datasketches) endif () target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4) diff --git a/src/Storages/Statistics/CMSketchStatistics.cpp b/src/Storages/Statistics/CMSketchStatistics.cpp new file mode 100644 index 00000000000..2c217d30278 --- /dev/null +++ b/src/Storages/Statistics/CMSketchStatistics.cpp @@ -0,0 +1,82 @@ +#include +#include +#include +#include +#include + +#if USE_DATASKETCHES + +namespace DB +{ + +namespace ErrorCodes +{ +extern const int ILLEGAL_STATISTICS; +} + + +CMSketchStatistics::CMSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) + : IStatistics(stat_), data(CMSKETCH_HASH_COUNT, CMSKETCH_BUCKET_COUNT), data_type(data_type_) +{ +} + +Float64 CMSketchStatistics::estimateEqual(const Field & value) const +{ + if (auto float_val = getFloat64(value)) + return data.get_estimate(&float_val.value(), 8); + if (auto string_val = getString(value)) + return data.get_estimate(string_val->data(), string_val->size()); + UNREACHABLE(); +} + +void CMSketchStatistics::serialize(WriteBuffer & buf) +{ + auto bytes = data.serialize(); + writeIntBinary(static_cast(bytes.size()), buf); + buf.write(reinterpret_cast(bytes.data()), bytes.size()); +} + +void CMSketchStatistics::deserialize(ReadBuffer & buf) +{ + UInt64 size; + readIntBinary(size, buf); + String s; + s.reserve(size); + buf.readStrict(s.data(), size); /// Extra copy can be avoided by implementing count_min_sketch::deserialize with ReadBuffer + auto read_sketch = datasketches::count_min_sketch::deserialize(s.data(), size, datasketches::DEFAULT_SEED); + data.merge(read_sketch); +} + +void CMSketchStatistics::update(const ColumnPtr & column) +{ + size_t size = column->size(); + + for (size_t i = 0; i < size; ++i) + { + Field f; + column->get(i, f); + if (f.isNull()) + continue; + if (auto float_val = getFloat64(f)) + data.update(&float_val.value(), 8, 1.0); + else if (auto string_val = getString(f)) + data.update(*string_val, 1.0); + } +} + +void CMSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +{ + data_type = removeNullable(data_type); + data_type = removeLowCardinalityAndNullable(data_type); + if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) + throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'cmsketch' does not support type {}", data_type->getName()); +} + +StatisticsPtr CMSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) +{ + return std::make_shared(stat, data_type); +} + +} + +#endif diff --git a/src/Storages/Statistics/CMSketchStatistics.h b/src/Storages/Statistics/CMSketchStatistics.h new file mode 100644 index 00000000000..03964614d57 --- /dev/null +++ b/src/Storages/Statistics/CMSketchStatistics.h @@ -0,0 +1,39 @@ +#pragma once + +#if USE_DATASKETCHES + +#include +#include +#include + +namespace DB +{ + +/// CMSketchStatistics is used to estimate expression like col = 'value' or col in ('v1', 'v2'). +class CMSketchStatistics : public IStatistics +{ +public: + explicit CMSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); + + Float64 estimateEqual(const Field & value) const; + + void serialize(WriteBuffer & buf) override; + + void deserialize(ReadBuffer & buf) override; + + void update(const ColumnPtr & column) override; + +private: + static constexpr size_t CMSKETCH_HASH_COUNT = 8; + static constexpr size_t CMSKETCH_BUCKET_COUNT = 2048; + + datasketches::count_min_sketch data; + DataTypePtr data_type; +}; + +StatisticsPtr CMSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); +void CMSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); + +} + +#endif diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp index 757136fdf42..73c5c549a5d 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp @@ -35,11 +35,14 @@ Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateGreat return rows - estimateLess(val, rows); } -Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateEqual(Float64 val, Float64 rows) const +Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateEqual(Field val, Float64 rows) const { + auto float_val = getFloat64(val); if (part_statistics.empty()) { - if (val < - threshold || val > threshold) + if (!float_val) + return default_unknown_cond_factor * rows; + else if (float_val.value() < - threshold || float_val.value() > threshold) return default_normal_cond_factor * rows; else return default_good_cond_factor * rows; @@ -87,7 +90,7 @@ static std::pair tryToExtractSingleColumn(const RPNBuilderTreeNod return result; } -std::pair ConditionSelectivityEstimator::extractBinaryOp(const RPNBuilderTreeNode & node, const String & column_name) const +std::pair ConditionSelectivityEstimator::extractBinaryOp(const RPNBuilderTreeNode & node, const String & column_name) const { if (!node.isFunction()) return {}; @@ -123,18 +126,7 @@ std::pair ConditionSelectivityEstimator::extractBinaryOp(const DataTypePtr output_type; if (!constant_node->tryGetConstant(output_value, output_type)) return {}; - - const auto type = output_value.getType(); - Float64 value; - if (type == Field::Types::Int64) - value = output_value.get(); - else if (type == Field::Types::UInt64) - value = output_value.get(); - else if (type == Field::Types::Float64) - value = output_value.get(); - else - return {}; - return std::make_pair(function_name, value); + return std::make_pair(function_name, output_value); } Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode & node) const @@ -142,7 +134,7 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode auto result = tryToExtractSingleColumn(node); if (result.second != 1) { - return default_unknown_cond_factor; + return default_unknown_cond_factor * total_rows; } String col = result.first; auto it = column_estimators.find(col); @@ -152,19 +144,16 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode bool dummy = total_rows == 0; ColumnSelectivityEstimator estimator; if (it != column_estimators.end()) - { estimator = it->second; - } else - { dummy = true; - } auto [op, val] = extractBinaryOp(node, col); + auto float_val = getFloat64(val); if (op == "equals") { if (dummy) { - if (val < - threshold || val > threshold) + if (!float_val || (float_val < - threshold || float_val > threshold)) return default_normal_cond_factor * total_rows; else return default_good_cond_factor * total_rows; @@ -175,13 +164,13 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode { if (dummy) return default_normal_cond_factor * total_rows; - return estimator.estimateLess(val, total_rows); + return estimator.estimateLess(float_val.value(), total_rows); } else if (op == "greater" || op == "greaterOrEquals") { if (dummy) return default_normal_cond_factor * total_rows; - return estimator.estimateGreater(val, total_rows); + return estimator.estimateGreater(float_val.value(), total_rows); } else return default_unknown_cond_factor * total_rows; diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.h b/src/Storages/Statistics/ConditionSelectivityEstimator.h index f0599742276..9bf4940e563 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.h +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB { @@ -24,7 +25,7 @@ private: Float64 estimateGreater(Float64 val, Float64 rows) const; - Float64 estimateEqual(Float64 val, Float64 rows) const; + Float64 estimateEqual(Field val, Float64 rows) const; }; static constexpr auto default_good_cond_factor = 0.1; @@ -37,7 +38,7 @@ private: UInt64 total_rows = 0; std::set part_names; std::map column_estimators; - std::pair extractBinaryOp(const RPNBuilderTreeNode & node, const String & column_name) const; + std::pair extractBinaryOp(const RPNBuilderTreeNode & node, const String & column_name) const; public: /// TODO: Support the condition consists of CNF/DNF like (cond1 and cond2) or (cond3) ... diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index fed0bd61c03..b35d653b7ec 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -26,6 +27,28 @@ enum StatisticsFileVersion : UInt16 V0 = 0, }; +std::optional getFloat64(const Field & f) +{ + const auto type = f.getType(); + Float64 value; + if (type == Field::Types::Int64) + value = f.get(); + else if (type == Field::Types::UInt64) + value = f.get(); + else if (type == Field::Types::Float64) + value = f.get(); + else + return {}; + return value; +} + +std::optional getString(const Field & f) +{ + if (f.getType() == Field::Types::String) + return f.get(); + return {}; +} + IStatistics::IStatistics(const SingleStatisticsDescription & stat_) : stat(stat_) {} ColumnStatistics::ColumnStatistics(const ColumnStatisticsDescription & stats_desc_) @@ -54,9 +77,10 @@ Float64 ColumnStatistics::estimateGreater(Float64 val) const return rows - estimateLess(val); } -Float64 ColumnStatistics::estimateEqual(Float64 val) const +Float64 ColumnStatistics::estimateEqual(Field val) const { - if (stats.contains(StatisticsType::Uniq) && stats.contains(StatisticsType::TDigest)) + auto float_val = getFloat64(val); + if (float_val && stats.contains(StatisticsType::Uniq) && stats.contains(StatisticsType::TDigest)) { auto uniq_static = std::static_pointer_cast(stats.at(StatisticsType::Uniq)); /// 2048 is the default number of buckets in TDigest. In this case, TDigest stores exactly one value (with many rows) @@ -64,9 +88,16 @@ Float64 ColumnStatistics::estimateEqual(Float64 val) const if (uniq_static->getCardinality() < 2048) { auto tdigest_static = std::static_pointer_cast(stats.at(StatisticsType::TDigest)); - return tdigest_static->estimateEqual(val); + return tdigest_static->estimateEqual(float_val.value()); } } +#if USE_DATASKETCHES + if (stats.contains(StatisticsType::CMSketch)) + { + auto cmsketch_static = std::static_pointer_cast(stats.at(StatisticsType::CMSketch)); + return cmsketch_static->estimateEqual(val); + } +#endif if (val < - ConditionSelectivityEstimator::threshold || val > ConditionSelectivityEstimator::threshold) return rows * ConditionSelectivityEstimator::default_normal_cond_factor; else @@ -145,6 +176,10 @@ MergeTreeStatisticsFactory::MergeTreeStatisticsFactory() registerCreator(StatisticsType::Uniq, UniqCreator); registerValidator(StatisticsType::TDigest, TDigestValidator); registerValidator(StatisticsType::Uniq, UniqValidator); +#if USE_DATASKETCHES + registerCreator(StatisticsType::CMSketch, CMSketchCreator); + registerValidator(StatisticsType::CMSketch, CMSketchValidator); +#endif } MergeTreeStatisticsFactory & MergeTreeStatisticsFactory::instance() diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index 2ab1337af02..f6121d72256 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -58,7 +59,7 @@ public: Float64 estimateGreater(Float64 val) const; - Float64 estimateEqual(Float64 val) const; + Float64 estimateEqual(Field val) const; private: @@ -100,4 +101,6 @@ private: Validators validators; }; +std::optional getFloat64(const Field & f); +std::optional getString(const Field & f); } diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index f10fb78f933..08c79043ac4 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -54,6 +54,8 @@ static StatisticsType stringToStatisticsType(String type) return StatisticsType::TDigest; if (type == "uniq") return StatisticsType::Uniq; + if (type == "cmsketch") + return StatisticsType::CMSketch; throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are `tdigest` and `uniq`.", type); } @@ -65,6 +67,8 @@ String SingleStatisticsDescription::getTypeName() const return "TDigest"; case StatisticsType::Uniq: return "Uniq"; + case StatisticsType::CMSketch: + return "CMSketch"; default: throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are `tdigest` and `uniq`.", type); } diff --git a/src/Storages/StatisticsDescription.h b/src/Storages/StatisticsDescription.h index 4862fb79d45..a2005f59de1 100644 --- a/src/Storages/StatisticsDescription.h +++ b/src/Storages/StatisticsDescription.h @@ -13,6 +13,7 @@ enum class StatisticsType : UInt8 { TDigest = 0, Uniq = 1, + CMSketch = 2, Max = 63, }; diff --git a/tests/queries/0_stateless/03174_statistics_cmsketch.reference b/tests/queries/0_stateless/03174_statistics_cmsketch.reference new file mode 100644 index 00000000000..ea77f317a31 --- /dev/null +++ b/tests/queries/0_stateless/03174_statistics_cmsketch.reference @@ -0,0 +1,26 @@ +CREATE TABLE default.t1\n(\n `a` String STATISTICS(cmsketch),\n `b` Int64 STATISTICS(cmsketch),\n `c` UInt64 STATISTICS(cmsketch),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'_String), equals(c, 0), greater(b, 0)) (removed) +After drop statistics for a + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(b, 0), equals(c, 0), equals(a, \'0\'_String)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(c, 0), equals(a, \'0\'_String), greater(b, 0)) (removed) +LowCardinality + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) +Nullable + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) +LowCardinality(Nullable) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) diff --git a/tests/queries/0_stateless/03174_statistics_cmsketch.sql b/tests/queries/0_stateless/03174_statistics_cmsketch.sql new file mode 100644 index 00000000000..c45d6186fdf --- /dev/null +++ b/tests/queries/0_stateless/03174_statistics_cmsketch.sql @@ -0,0 +1,78 @@ +-- Tags: no-fasttest +DROP TABLE IF EXISTS t1; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; + +CREATE TABLE t1 +( + a String STATISTICS(cmsketch), + b Int64 STATISTICS(cmsketch), + c UInt64 STATISTICS(cmsketch), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; + +SHOW CREATE TABLE t1; + +INSERT INTO t1 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b > 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE t1 DROP STATISTICS a; + +SELECT 'After drop statistics for a'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b > 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +SET allow_suspicious_low_cardinality_types=1; +CREATE TABLE t2 +( + a LowCardinality(String) STATISTICS(cmsketch), + b Int64 STATISTICS(cmsketch), + c UInt64 STATISTICS(cmsketch), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t2 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'LowCardinality'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t2 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + + +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t3 +( + a Nullable(String) STATISTICS(cmsketch), + b Int64 STATISTICS(cmsketch), + c UInt64 STATISTICS(cmsketch), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t3 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'Nullable'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t3 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; + +CREATE TABLE t4 +( + a LowCardinality(Nullable(String)) STATISTICS(cmsketch), + b Int64 STATISTICS(cmsketch), + c UInt64 STATISTICS(cmsketch), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t4 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'LowCardinality(Nullable)'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t4 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +DROP TABLE IF EXISTS t4; From a9331c3bcdb6911497fca06c0db5ebb1afd7b2a7 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 24 Jun 2024 18:26:38 +0800 Subject: [PATCH 0188/2361] Rname cmsketch to countmixsketch --- .../statements/alter/statistics.md | 2 +- src/Storages/Statistics/CMSketchStatistics.h | 39 ------------------- ...stics.cpp => CountMinSketchStatistics.cpp} | 24 ++++++------ .../Statistics/CountMinSketchStatistics.h | 39 +++++++++++++++++++ src/Storages/Statistics/Statistics.cpp | 24 ++++++------ src/Storages/StatisticsDescription.cpp | 8 ++-- src/Storages/StatisticsDescription.h | 2 +- ...03174_statistics_countminsketch.reference} | 2 +- ...ql => 03174_statistics_countminsketch.sql} | 24 ++++++------ 9 files changed, 82 insertions(+), 82 deletions(-) delete mode 100644 src/Storages/Statistics/CMSketchStatistics.h rename src/Storages/Statistics/{CMSketchStatistics.cpp => CountMinSketchStatistics.cpp} (66%) create mode 100644 src/Storages/Statistics/CountMinSketchStatistics.h rename tests/queries/0_stateless/{03174_statistics_cmsketch.reference => 03174_statistics_countminsketch.reference} (80%) rename tests/queries/0_stateless/{03174_statistics_cmsketch.sql => 03174_statistics_countminsketch.sql} (84%) diff --git a/docs/en/sql-reference/statements/alter/statistics.md b/docs/en/sql-reference/statements/alter/statistics.md index 22ff740d410..0d1fa59cf86 100644 --- a/docs/en/sql-reference/statements/alter/statistics.md +++ b/docs/en/sql-reference/statements/alter/statistics.md @@ -25,7 +25,7 @@ Also, they are replicated, syncing statistics metadata via ZooKeeper. There is an example adding two statistics types to two columns: ``` -ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq, CMSketch; +ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq, CountMinSketch; ``` :::note diff --git a/src/Storages/Statistics/CMSketchStatistics.h b/src/Storages/Statistics/CMSketchStatistics.h deleted file mode 100644 index 03964614d57..00000000000 --- a/src/Storages/Statistics/CMSketchStatistics.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -#if USE_DATASKETCHES - -#include -#include -#include - -namespace DB -{ - -/// CMSketchStatistics is used to estimate expression like col = 'value' or col in ('v1', 'v2'). -class CMSketchStatistics : public IStatistics -{ -public: - explicit CMSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); - - Float64 estimateEqual(const Field & value) const; - - void serialize(WriteBuffer & buf) override; - - void deserialize(ReadBuffer & buf) override; - - void update(const ColumnPtr & column) override; - -private: - static constexpr size_t CMSKETCH_HASH_COUNT = 8; - static constexpr size_t CMSKETCH_BUCKET_COUNT = 2048; - - datasketches::count_min_sketch data; - DataTypePtr data_type; -}; - -StatisticsPtr CMSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); -void CMSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); - -} - -#endif diff --git a/src/Storages/Statistics/CMSketchStatistics.cpp b/src/Storages/Statistics/CountMinSketchStatistics.cpp similarity index 66% rename from src/Storages/Statistics/CMSketchStatistics.cpp rename to src/Storages/Statistics/CountMinSketchStatistics.cpp index 2c217d30278..d0f52e3f6df 100644 --- a/src/Storages/Statistics/CMSketchStatistics.cpp +++ b/src/Storages/Statistics/CountMinSketchStatistics.cpp @@ -1,8 +1,8 @@ -#include #include +#include #include #include -#include +#include #if USE_DATASKETCHES @@ -15,12 +15,12 @@ extern const int ILLEGAL_STATISTICS; } -CMSketchStatistics::CMSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) - : IStatistics(stat_), data(CMSKETCH_HASH_COUNT, CMSKETCH_BUCKET_COUNT), data_type(data_type_) +CountMinSketchStatistics::CountMinSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) + : IStatistics(stat_), data(HASH_COUNT, BUCKET_COUNT), data_type(data_type_) { } -Float64 CMSketchStatistics::estimateEqual(const Field & value) const +Float64 CountMinSketchStatistics::estimateEqual(const Field & value) const { if (auto float_val = getFloat64(value)) return data.get_estimate(&float_val.value(), 8); @@ -29,14 +29,14 @@ Float64 CMSketchStatistics::estimateEqual(const Field & value) const UNREACHABLE(); } -void CMSketchStatistics::serialize(WriteBuffer & buf) +void CountMinSketchStatistics::serialize(WriteBuffer & buf) { auto bytes = data.serialize(); writeIntBinary(static_cast(bytes.size()), buf); buf.write(reinterpret_cast(bytes.data()), bytes.size()); } -void CMSketchStatistics::deserialize(ReadBuffer & buf) +void CountMinSketchStatistics::deserialize(ReadBuffer & buf) { UInt64 size; readIntBinary(size, buf); @@ -47,7 +47,7 @@ void CMSketchStatistics::deserialize(ReadBuffer & buf) data.merge(read_sketch); } -void CMSketchStatistics::update(const ColumnPtr & column) +void CountMinSketchStatistics::update(const ColumnPtr & column) { size_t size = column->size(); @@ -64,17 +64,17 @@ void CMSketchStatistics::update(const ColumnPtr & column) } } -void CMSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) - throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'cmsketch' does not support type {}", data_type->getName()); + throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'countminsketch' does not support type {}", data_type->getName()); } -StatisticsPtr CMSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) +StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { - return std::make_shared(stat, data_type); + return std::make_shared(stat, data_type); } } diff --git a/src/Storages/Statistics/CountMinSketchStatistics.h b/src/Storages/Statistics/CountMinSketchStatistics.h new file mode 100644 index 00000000000..23ea2cf25c9 --- /dev/null +++ b/src/Storages/Statistics/CountMinSketchStatistics.h @@ -0,0 +1,39 @@ +#pragma once + +#if USE_DATASKETCHES + +#include +#include +#include + +namespace DB +{ + +/// CountMinSketchStatistics is used to estimate expression like col = 'value' or col in ('v1', 'v2'). +class CountMinSketchStatistics : public IStatistics +{ +public: + explicit CountMinSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); + + Float64 estimateEqual(const Field & value) const; + + void serialize(WriteBuffer & buf) override; + + void deserialize(ReadBuffer & buf) override; + + void update(const ColumnPtr & column) override; + +private: + static constexpr size_t HASH_COUNT = 8; + static constexpr size_t BUCKET_COUNT = 2048; + + datasketches::count_min_sketch data; + DataTypePtr data_type; +}; + +StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); +void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); + +} + +#endif diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index b35d653b7ec..2404d234d7a 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -1,15 +1,15 @@ #include #include -#include -#include -#include -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include +#include +#include #include namespace DB @@ -92,10 +92,10 @@ Float64 ColumnStatistics::estimateEqual(Field val) const } } #if USE_DATASKETCHES - if (stats.contains(StatisticsType::CMSketch)) + if (stats.contains(StatisticsType::CountMinSketch)) { - auto cmsketch_static = std::static_pointer_cast(stats.at(StatisticsType::CMSketch)); - return cmsketch_static->estimateEqual(val); + auto count_min_sketch_static = std::static_pointer_cast(stats.at(StatisticsType::CountMinSketch)); + return count_min_sketch_static->estimateEqual(val); } #endif if (val < - ConditionSelectivityEstimator::threshold || val > ConditionSelectivityEstimator::threshold) @@ -177,8 +177,8 @@ MergeTreeStatisticsFactory::MergeTreeStatisticsFactory() registerValidator(StatisticsType::TDigest, TDigestValidator); registerValidator(StatisticsType::Uniq, UniqValidator); #if USE_DATASKETCHES - registerCreator(StatisticsType::CMSketch, CMSketchCreator); - registerValidator(StatisticsType::CMSketch, CMSketchValidator); + registerCreator(StatisticsType::CountMinSketch, CountMinSketchCreator); + registerValidator(StatisticsType::CountMinSketch, CountMinSketchValidator); #endif } diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index 08c79043ac4..23339ca3cfe 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -54,8 +54,8 @@ static StatisticsType stringToStatisticsType(String type) return StatisticsType::TDigest; if (type == "uniq") return StatisticsType::Uniq; - if (type == "cmsketch") - return StatisticsType::CMSketch; + if (type == "countminsketch") + return StatisticsType::CountMinSketch; throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are `tdigest` and `uniq`.", type); } @@ -67,8 +67,8 @@ String SingleStatisticsDescription::getTypeName() const return "TDigest"; case StatisticsType::Uniq: return "Uniq"; - case StatisticsType::CMSketch: - return "CMSketch"; + case StatisticsType::CountMinSketch: + return "CountMinSketch"; default: throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are `tdigest` and `uniq`.", type); } diff --git a/src/Storages/StatisticsDescription.h b/src/Storages/StatisticsDescription.h index a2005f59de1..03b8fb0d583 100644 --- a/src/Storages/StatisticsDescription.h +++ b/src/Storages/StatisticsDescription.h @@ -13,7 +13,7 @@ enum class StatisticsType : UInt8 { TDigest = 0, Uniq = 1, - CMSketch = 2, + CountMinSketch = 2, Max = 63, }; diff --git a/tests/queries/0_stateless/03174_statistics_cmsketch.reference b/tests/queries/0_stateless/03174_statistics_countminsketch.reference similarity index 80% rename from tests/queries/0_stateless/03174_statistics_cmsketch.reference rename to tests/queries/0_stateless/03174_statistics_countminsketch.reference index ea77f317a31..3cec7dd7168 100644 --- a/tests/queries/0_stateless/03174_statistics_cmsketch.reference +++ b/tests/queries/0_stateless/03174_statistics_countminsketch.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.t1\n(\n `a` String STATISTICS(cmsketch),\n `b` Int64 STATISTICS(cmsketch),\n `c` UInt64 STATISTICS(cmsketch),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +CREATE TABLE default.t1\n(\n `a` String STATISTICS(countminsketch),\n `b` Int64 STATISTICS(countminsketch),\n `c` UInt64 STATISTICS(countminsketch),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 Prewhere info Prewhere filter Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) diff --git a/tests/queries/0_stateless/03174_statistics_cmsketch.sql b/tests/queries/0_stateless/03174_statistics_countminsketch.sql similarity index 84% rename from tests/queries/0_stateless/03174_statistics_cmsketch.sql rename to tests/queries/0_stateless/03174_statistics_countminsketch.sql index c45d6186fdf..0d9673309b4 100644 --- a/tests/queries/0_stateless/03174_statistics_cmsketch.sql +++ b/tests/queries/0_stateless/03174_statistics_countminsketch.sql @@ -6,9 +6,9 @@ SET allow_statistics_optimize = 1; CREATE TABLE t1 ( - a String STATISTICS(cmsketch), - b Int64 STATISTICS(cmsketch), - c UInt64 STATISTICS(cmsketch), + a String STATISTICS(countminsketch), + b Int64 STATISTICS(countminsketch), + c UInt64 STATISTICS(countminsketch), pk String, ) Engine = MergeTree() ORDER BY pk SETTINGS min_bytes_for_wide_part = 0; @@ -31,9 +31,9 @@ DROP TABLE IF EXISTS t2; SET allow_suspicious_low_cardinality_types=1; CREATE TABLE t2 ( - a LowCardinality(String) STATISTICS(cmsketch), - b Int64 STATISTICS(cmsketch), - c UInt64 STATISTICS(cmsketch), + a LowCardinality(String) STATISTICS(countminsketch), + b Int64 STATISTICS(countminsketch), + c UInt64 STATISTICS(countminsketch), pk String, ) Engine = MergeTree() ORDER BY pk SETTINGS min_bytes_for_wide_part = 0; @@ -48,9 +48,9 @@ DROP TABLE IF EXISTS t3; CREATE TABLE t3 ( - a Nullable(String) STATISTICS(cmsketch), - b Int64 STATISTICS(cmsketch), - c UInt64 STATISTICS(cmsketch), + a Nullable(String) STATISTICS(countminsketch), + b Int64 STATISTICS(countminsketch), + c UInt64 STATISTICS(countminsketch), pk String, ) Engine = MergeTree() ORDER BY pk SETTINGS min_bytes_for_wide_part = 0; @@ -64,9 +64,9 @@ DROP TABLE IF EXISTS t4; CREATE TABLE t4 ( - a LowCardinality(Nullable(String)) STATISTICS(cmsketch), - b Int64 STATISTICS(cmsketch), - c UInt64 STATISTICS(cmsketch), + a LowCardinality(Nullable(String)) STATISTICS(countminsketch), + b Int64 STATISTICS(countminsketch), + c UInt64 STATISTICS(countminsketch), pk String, ) Engine = MergeTree() ORDER BY pk SETTINGS min_bytes_for_wide_part = 0; From 531a7e3592aa165418b03b1603f30b4935990b81 Mon Sep 17 00:00:00 2001 From: skyoct Date: Mon, 24 Jun 2024 18:26:40 +0800 Subject: [PATCH 0189/2361] fix etag init --- src/IO/S3/getObjectInfo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/S3/getObjectInfo.h b/src/IO/S3/getObjectInfo.h index 2fec407f70e..30d4c627d37 100644 --- a/src/IO/S3/getObjectInfo.h +++ b/src/IO/S3/getObjectInfo.h @@ -15,7 +15,7 @@ struct ObjectInfo { size_t size = 0; time_t last_modification_time = 0; - String etag = ""; + String etag; std::map metadata = {}; /// Set only if getObjectInfo() is called with `with_metadata = true`. }; From 051290e6c912108986c896916db087c71230a121 Mon Sep 17 00:00:00 2001 From: serxa Date: Mon, 24 Jun 2024 12:26:46 +0000 Subject: [PATCH 0190/2361] Add throughput introspection for scheduler queues --- src/Common/EventRateMeter.h | 18 ++++++++++++++++-- src/Common/Scheduler/ISchedulerNode.h | 14 ++++++++++++++ src/Common/Scheduler/Nodes/FairPolicy.h | 3 +-- src/Common/Scheduler/Nodes/FifoQueue.h | 3 +-- src/Common/Scheduler/Nodes/PriorityPolicy.h | 3 +-- .../Scheduler/Nodes/SemaphoreConstraint.h | 3 +-- .../Scheduler/Nodes/ThrottlerConstraint.h | 3 +-- src/Common/Scheduler/SchedulerRoot.h | 3 +-- src/Storages/System/StorageSystemScheduler.cpp | 2 ++ 9 files changed, 38 insertions(+), 14 deletions(-) diff --git a/src/Common/EventRateMeter.h b/src/Common/EventRateMeter.h index 3a21a80ce8b..4c38d1d9371 100644 --- a/src/Common/EventRateMeter.h +++ b/src/Common/EventRateMeter.h @@ -14,8 +14,9 @@ namespace DB class EventRateMeter { public: - explicit EventRateMeter(double now, double period_) + explicit EventRateMeter(double now, double period_, double step_ = 0.0) : period(period_) + , step(step_) , half_decay_time(period * std::numbers::ln2) // for `ExponentiallySmoothedAverage::sumWeights()` to be equal to `1/period` { reset(now); @@ -38,7 +39,16 @@ public: if (now - period <= start) // precise counting mode events = ExponentiallySmoothedAverage(events.value + count, now); else // exponential smoothing mode - events.add(count, now, half_decay_time); + { + // Adding events too often lead to low precision due to smoothing too often, so we buffer new events and add them in steps + step_count += count; + if (step_start + step <= now) + { + events.add(step_count, now, half_decay_time); + step_start = now; + step_count = 0; + } + } } /// Compute average event rate throughout `[now - period, now]` period. @@ -58,16 +68,20 @@ public: void reset(double now) { start = now; + step_start = now; events = ExponentiallySmoothedAverage(); data_points = 0; } private: const double period; + const double step; // duration of a step const double half_decay_time; double start; // Instant in past without events before it; when measurement started or reset ExponentiallySmoothedAverage events; // Estimated number of events in the last `period` size_t data_points = 0; + double step_start; // start instant of the last step + double step_count = 0.0; // number of events accumulated since step start }; } diff --git a/src/Common/Scheduler/ISchedulerNode.h b/src/Common/Scheduler/ISchedulerNode.h index 81b491b0eda..c051829e336 100644 --- a/src/Common/Scheduler/ISchedulerNode.h +++ b/src/Common/Scheduler/ISchedulerNode.h @@ -3,6 +3,8 @@ #include #include #include +#include +#include #include #include @@ -176,6 +178,14 @@ protected: /// Postponed to be handled in scheduler thread, so it is intended to be called from outside. void scheduleActivation(); + /// Helper for introspection metrics + void incrementDequeued(ResourceCost cost) + { + dequeued_requests++; + dequeued_cost += cost; + throughput.add(static_cast(clock_gettime_ns())/1e9, cost); + } + public: EventQueue * const event_queue; String basename; @@ -189,6 +199,10 @@ public: std::atomic dequeued_cost{0}; std::atomic canceled_cost{0}; std::atomic busy_periods{0}; + + /// Average dequeued_cost per second + /// WARNING: Should only be accessed from the scheduler thread, so that locking is not required + EventRateMeter throughput{static_cast(clock_gettime_ns())/1e9, 2, 1}; }; using SchedulerNodePtr = std::shared_ptr; diff --git a/src/Common/Scheduler/Nodes/FairPolicy.h b/src/Common/Scheduler/Nodes/FairPolicy.h index 0a4e55c253b..fba637e979e 100644 --- a/src/Common/Scheduler/Nodes/FairPolicy.h +++ b/src/Common/Scheduler/Nodes/FairPolicy.h @@ -188,8 +188,7 @@ public: if (request) { - dequeued_requests++; - dequeued_cost += request->cost; + incrementDequeued(request->cost); return {request, heap_size > 0}; } } diff --git a/src/Common/Scheduler/Nodes/FifoQueue.h b/src/Common/Scheduler/Nodes/FifoQueue.h index 9ec997c06d2..9fbc6d1ae65 100644 --- a/src/Common/Scheduler/Nodes/FifoQueue.h +++ b/src/Common/Scheduler/Nodes/FifoQueue.h @@ -59,8 +59,7 @@ public: if (requests.empty()) busy_periods++; queue_cost -= result->cost; - dequeued_requests++; - dequeued_cost += result->cost; + incrementDequeued(result->cost); return {result, !requests.empty()}; } diff --git a/src/Common/Scheduler/Nodes/PriorityPolicy.h b/src/Common/Scheduler/Nodes/PriorityPolicy.h index 22a5155cfeb..91dc95600d5 100644 --- a/src/Common/Scheduler/Nodes/PriorityPolicy.h +++ b/src/Common/Scheduler/Nodes/PriorityPolicy.h @@ -122,8 +122,7 @@ public: if (request) { - dequeued_requests++; - dequeued_cost += request->cost; + incrementDequeued(request->cost); return {request, !items.empty()}; } } diff --git a/src/Common/Scheduler/Nodes/SemaphoreConstraint.h b/src/Common/Scheduler/Nodes/SemaphoreConstraint.h index 10fce536f5d..92c6af9db18 100644 --- a/src/Common/Scheduler/Nodes/SemaphoreConstraint.h +++ b/src/Common/Scheduler/Nodes/SemaphoreConstraint.h @@ -81,8 +81,7 @@ public: child_active = child_now_active; if (!active()) busy_periods++; - dequeued_requests++; - dequeued_cost += request->cost; + incrementDequeued(request->cost); return {request, active()}; } diff --git a/src/Common/Scheduler/Nodes/ThrottlerConstraint.h b/src/Common/Scheduler/Nodes/ThrottlerConstraint.h index f4a5795bb2b..56866336f50 100644 --- a/src/Common/Scheduler/Nodes/ThrottlerConstraint.h +++ b/src/Common/Scheduler/Nodes/ThrottlerConstraint.h @@ -89,8 +89,7 @@ public: child_active = child_now_active; if (!active()) busy_periods++; - dequeued_requests++; - dequeued_cost += request->cost; + incrementDequeued(request->cost); return {request, active()}; } diff --git a/src/Common/Scheduler/SchedulerRoot.h b/src/Common/Scheduler/SchedulerRoot.h index 7af42fdbbea..5307aadc3cc 100644 --- a/src/Common/Scheduler/SchedulerRoot.h +++ b/src/Common/Scheduler/SchedulerRoot.h @@ -162,8 +162,7 @@ public: if (request == nullptr) // Possible in case of request cancel, just retry continue; - dequeued_requests++; - dequeued_cost += request->cost; + incrementDequeued(request->cost); return {request, current != nullptr}; } } diff --git a/src/Storages/System/StorageSystemScheduler.cpp b/src/Storages/System/StorageSystemScheduler.cpp index 339a59e88a5..b42c807d6fc 100644 --- a/src/Storages/System/StorageSystemScheduler.cpp +++ b/src/Storages/System/StorageSystemScheduler.cpp @@ -31,6 +31,7 @@ ColumnsDescription StorageSystemScheduler::getColumnsDescription() {"dequeued_requests", std::make_shared(), "The total number of resource requests dequeued from this node."}, {"canceled_requests", std::make_shared(), "The total number of resource requests canceled from this node."}, {"dequeued_cost", std::make_shared(), "The sum of costs (e.g. size in bytes) of all requests dequeued from this node."}, + {"throughput", std::make_shared(), "Current average throughput (dequeued cost per second)."}, {"canceled_cost", std::make_shared(), "The sum of costs (e.g. size in bytes) of all requests canceled from this node."}, {"busy_periods", std::make_shared(), "The total number of deactivations of this node."}, {"vruntime", std::make_shared(std::make_shared()), @@ -96,6 +97,7 @@ void StorageSystemScheduler::fillData(MutableColumns & res_columns, ContextPtr c res_columns[i++]->insert(node->dequeued_requests.load()); res_columns[i++]->insert(node->canceled_requests.load()); res_columns[i++]->insert(node->dequeued_cost.load()); + res_columns[i++]->insert(node->throughput.rate(static_cast(clock_gettime_ns())/1e9)); res_columns[i++]->insert(node->canceled_cost.load()); res_columns[i++]->insert(node->busy_periods.load()); From 7a27b10ef5a9a6a86e985d5922b85038d1cb03ee Mon Sep 17 00:00:00 2001 From: AntiTopQuark Date: Mon, 24 Jun 2024 23:39:47 +0800 Subject: [PATCH 0191/2361] support MinMax hyperrectangle for MergeTreeIndexSet --- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 66 ++++++++++++++++++-- src/Storages/MergeTree/MergeTreeIndexSet.h | 11 +++- 2 files changed, 70 insertions(+), 7 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index b11cbf1e034..e5708b67e03 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -44,10 +44,12 @@ MergeTreeIndexGranuleSet::MergeTreeIndexGranuleSet( const String & index_name_, const Block & index_sample_block_, size_t max_rows_, - MutableColumns && mutable_columns_) + MutableColumns && mutable_columns_, + std::vector && set_hyperrectangle_) : index_name(index_name_) , max_rows(max_rows_) , block(index_sample_block_.cloneWithColumns(std::move(mutable_columns_))) + , set_hyperrectangle(std::move(set_hyperrectangle_)) { } @@ -85,6 +87,15 @@ void MergeTreeIndexGranuleSet::serializeBinary(WriteBuffer & ostr) const serialization->serializeBinaryBulkWithMultipleStreams(column, 0, size(), settings, state); serialization->serializeBinaryBulkStateSuffix(settings, state); } + + for (size_t i = 0; i < num_columns; ++i) + { + const DataTypePtr & type = block.getByPosition(i).type; + auto serialization = type->getDefaultSerialization(); + + serialization->serializeBinary(set_hyperrectangle[i].left, ostr, {}); + serialization->serializeBinary(set_hyperrectangle[i].right, ostr, {}); + } } void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) @@ -117,6 +128,25 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeInd serialization->deserializeBinaryBulkStatePrefix(settings, state, nullptr); serialization->deserializeBinaryBulkWithMultipleStreams(elem.column, rows_to_read, settings, state, nullptr); } + + set_hyperrectangle.clear(); + Field min_val; + Field max_val; + for (size_t i = 0; i < num_columns; ++i) + { + const DataTypePtr & type = block.getByPosition(i).type; + auto serialization = type->getDefaultSerialization(); + + serialization->deserializeBinary(min_val, istr, {}); + serialization->deserializeBinary(max_val, istr, {}); + + // NULL_LAST + if (min_val.isNull()) + min_val = POSITIVE_INFINITY; + if (max_val.isNull()) + max_val = POSITIVE_INFINITY; + set_hyperrectangle.emplace_back(min_val, true, max_val, true); + } } @@ -182,10 +212,29 @@ void MergeTreeIndexAggregatorSet::update(const Block & block, size_t * pos, size if (has_new_data) { + FieldRef field_min; + FieldRef field_max; for (size_t i = 0; i < columns.size(); ++i) { auto filtered_column = block.getByName(index_columns[i]).column->filter(filter, block.rows()); columns[i]->insertRangeFrom(*filtered_column, 0, filtered_column->size()); + + if (const auto * column_nullable = typeid_cast(filtered_column.get())) + column_nullable->getExtremesNullLast(field_min, field_max); + else + filtered_column->getExtremes(field_min, field_max); + + if (set_hyperrectangle.size() <= i) + { + set_hyperrectangle.emplace_back(field_min, true, field_max, true); + } + else + { + set_hyperrectangle[i].left + = applyVisitor(FieldVisitorAccurateLess(), set_hyperrectangle[i].left, field_min) ? set_hyperrectangle[i].left : field_min; + set_hyperrectangle[i].right + = applyVisitor(FieldVisitorAccurateLess(), set_hyperrectangle[i].right, field_max) ? field_max : set_hyperrectangle[i].right; + } } } @@ -221,7 +270,7 @@ bool MergeTreeIndexAggregatorSet::buildFilter( MergeTreeIndexGranulePtr MergeTreeIndexAggregatorSet::getGranuleAndReset() { - auto granule = std::make_shared(index_name, index_sample_block, max_rows, std::move(columns)); + auto granule = std::make_shared(index_name, index_sample_block, max_rows, std::move(columns), std::move(set_hyperrectangle)); switch (data.type) { @@ -240,15 +289,22 @@ MergeTreeIndexGranulePtr MergeTreeIndexAggregatorSet::getGranuleAndReset() return granule; } +KeyCondition buildCondition(const IndexDescription & index, const ActionsDAGPtr & filter_actions_dag, ContextPtr context) +{ + return KeyCondition{filter_actions_dag, context, index.column_names, index.expression}; +} MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( const String & index_name_, const Block & index_sample_block, size_t max_rows_, const ActionsDAGPtr & filter_dag, - ContextPtr context) + ContextPtr context, + const IndexDescription & index_description) : index_name(index_name_) , max_rows(max_rows_) + , index_data_types(index_description.data_types) + , condition(buildCondition(index_description, filter_dag, context)) { for (const auto & name : index_sample_block.getNames()) if (!key_columns.contains(name)) @@ -302,7 +358,7 @@ bool MergeTreeIndexConditionSet::mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx if (!column->isNullAt(i) && (column->get64(i) & 1)) return true; - return false; + return condition.checkInHyperrectangle(granule.set_hyperrectangle, index_data_types).can_be_true; } @@ -546,7 +602,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexSet::createIndexAggregator(const Merge MergeTreeIndexConditionPtr MergeTreeIndexSet::createIndexCondition( const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const { - return std::make_shared(index.name, index.sample_block, max_rows, filter_actions_dag, context); + return std::make_shared(index.name, index.sample_block, max_rows, filter_actions_dag, context, index); } MergeTreeIndexPtr setIndexCreator(const IndexDescription & index) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.h b/src/Storages/MergeTree/MergeTreeIndexSet.h index 6efc2effafd..4fe79cb03c5 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.h +++ b/src/Storages/MergeTree/MergeTreeIndexSet.h @@ -22,7 +22,8 @@ struct MergeTreeIndexGranuleSet final : public IMergeTreeIndexGranule const String & index_name_, const Block & index_sample_block_, size_t max_rows_, - MutableColumns && columns_); + MutableColumns && columns_, + std::vector && set_hyperrectangle_); void serializeBinary(WriteBuffer & ostr) const override; void deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) override; @@ -36,6 +37,7 @@ struct MergeTreeIndexGranuleSet final : public IMergeTreeIndexGranule const size_t max_rows; Block block; + std::vector set_hyperrectangle; }; @@ -73,6 +75,7 @@ private: ClearableSetVariants data; Sizes key_sizes; MutableColumns columns; + std::vector set_hyperrectangle; }; @@ -84,7 +87,8 @@ public: const Block & index_sample_block, size_t max_rows_, const ActionsDAGPtr & filter_dag, - ContextPtr context); + ContextPtr context, + const IndexDescription & index_description); bool alwaysUnknownOrTrue() const override; @@ -119,6 +123,9 @@ private: std::unordered_set key_columns; ExpressionActionsPtr actions; String actions_output_column_name; + + DataTypes index_data_types; + KeyCondition condition; }; From 3b377fabe70c447304fa36d04f0495c0e9432d9c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 25 Jun 2024 02:49:40 +0200 Subject: [PATCH 0192/2361] Update submodule --- contrib/aws | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/aws b/contrib/aws index 1c2946bfcb7..d5450d76abd 160000 --- a/contrib/aws +++ b/contrib/aws @@ -1 +1 @@ -Subproject commit 1c2946bfcb7f1e3ae0a858de0b59d4f1a7b4ccaf +Subproject commit d5450d76abda556ce145ddabe7e0cc6a7644ec59 From 49634db3ba2961dadbdc1689f0a4ef1ecdb8bea1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 25 Jun 2024 02:51:57 +0200 Subject: [PATCH 0193/2361] Update submodule --- contrib/aws-crt-cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/aws-crt-cpp b/contrib/aws-crt-cpp index f532d6abc0d..e5aa45cacfd 160000 --- a/contrib/aws-crt-cpp +++ b/contrib/aws-crt-cpp @@ -1 +1 @@ -Subproject commit f532d6abc0d2b0d8b5d6fe9e7c51eaedbe4afbd0 +Subproject commit e5aa45cacfdcda7719ead38760e7c61076f5745f From 5e4d3b2e43b04eb0bc0e44c082c86612127726e6 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 4 Jan 2024 00:05:57 +0000 Subject: [PATCH 0194/2361] Refreshable materialized views improvements --- .../system-tables/view_refreshes.md | 3 +- .../sql-reference/statements/create/view.md | 28 +++- docs/en/sql-reference/statements/system.md | 6 +- src/Common/ErrorCodes.cpp | 1 + src/Interpreters/InterpreterSystemQuery.cpp | 4 + src/Parsers/ASTRefreshStrategy.cpp | 3 +- src/Parsers/ASTRefreshStrategy.h | 1 + src/Parsers/ASTSystemQuery.h | 1 + src/Parsers/ParserRefreshStrategy.cpp | 9 ++ src/Parsers/ParserSystemQuery.cpp | 1 + src/Storages/MaterializedView/RefreshSet.h | 9 +- .../MaterializedView/RefreshSettings.h | 6 +- src/Storages/MaterializedView/RefreshTask.cpp | 144 +++++++++++------- src/Storages/MaterializedView/RefreshTask.h | 28 ++-- .../MaterializedView/RefreshTask_fwd.h | 2 - src/Storages/StorageMaterializedView.cpp | 75 +++++---- src/Storages/StorageMaterializedView.h | 4 +- .../System/StorageSystemViewRefreshes.cpp | 4 +- ...2_refreshable_materialized_views.reference | 33 +++- .../02932_refreshable_materialized_views.sh | 110 ++++++++++--- 20 files changed, 327 insertions(+), 145 deletions(-) diff --git a/docs/en/operations/system-tables/view_refreshes.md b/docs/en/operations/system-tables/view_refreshes.md index 12377507b39..e792e0d095d 100644 --- a/docs/en/operations/system-tables/view_refreshes.md +++ b/docs/en/operations/system-tables/view_refreshes.md @@ -17,7 +17,8 @@ Columns: - `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took. - `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start. - `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled. -- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Exception'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace. +- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Error'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace. +- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — If nonzero, the current or next refresh is a retry (see `refresh_retries` refresh setting), and `retry` is the 1-based index of that retry. - `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation. - `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1. - `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far. diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 1fabb6d8cc7..3e0766794f1 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -159,6 +159,8 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name REFRESH EVERY|AFTER interval [OFFSET interval] RANDOMIZE FOR interval DEPENDS ON [db.]name [, [db.]name [, ...]] +SETTINGS name = value [, name = value [, ...]] +[APPEND] [TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY] AS SELECT ... ``` @@ -167,18 +169,23 @@ where `interval` is a sequence of simple intervals: number SECOND|MINUTE|HOUR|DAY|WEEK|MONTH|YEAR ``` -Periodically runs the corresponding query and stores its result in a table, atomically replacing the table's previous contents. +Periodically runs the corresponding query and stores its result in a table. + * If the query says `APPEND`, each refresh inserts rows into the table without deleting existing rows. The insert is not atomic, just like a regular INSERT SELECT. + * Otherwise each refresh atomically replaces the table's previous contents. Differences from regular non-refreshable materialized views: - * No insert trigger. I.e. when new data is inserted into the table specified in SELECT, it's *not* automatically pushed to the refreshable materialized view. The periodic refresh runs the entire query and replaces the entire table. + * No insert trigger. I.e. when new data is inserted into the table specified in SELECT, it's *not* automatically pushed to the refreshable materialized view. The periodic refresh runs the entire query. * No restrictions on the SELECT query. Table functions (e.g. `url()`), views, UNION, JOIN, are all allowed. +:::note +The settings in the The settings in `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query. +::: + :::note Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations: * not compatible with Replicated database or table engines * It is not supported in ClickHouse Cloud * require [Atomic database engine](../../../engines/database-engines/atomic.md), - * no retries for failed refresh - we just skip to the next scheduled refresh time, * no limit on number of concurrent refreshes. ::: @@ -243,15 +250,22 @@ A few more examples: `DEPENDS ON` only works between refreshable materialized views. Listing a regular table in the `DEPENDS ON` list will prevent the view from ever refreshing (dependencies can be removed with `ALTER`, see below). ::: +### Settings + +Available refresh settings: + * `refresh_retries` - How many times to retry if refresh query fails with an exception. If all retries fail, skip to the next scheduled refresh time. 0 means no retries, -1 means infinite retries. Default: 0. + * `refresh_retry_initial_backoff_ms` - Delay before the first retry, if `refresh_retries` is not zero. Each subsequent retry doubles the delay, up to `refresh_retry_max_backoff_ms`. Default: 100 ms. + * `refresh_retry_max_backoff_ms` - Limit on the exponential growth of delay between refresh attempts. Default: 60000 ms (1 minute). + ### Changing Refresh Parameters {#changing-refresh-parameters} To change refresh parameters: ``` -ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPENDS ON ...] +ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPENDS ON ...] [SETTINGS ...] ``` :::note -This replaces refresh schedule *and* dependencies. If the table had a `DEPENDS ON`, doing a `MODIFY REFRESH` without `DEPENDS ON` will remove the dependencies. +This replaces *all* refresh parameters at once: schedule, dependencies, settings, and APPEND-ness. E.g. if the table had a `DEPENDS ON`, doing a `MODIFY REFRESH` without `DEPENDS ON` will remove the dependencies. ::: ### Other operations @@ -260,6 +274,10 @@ The status of all refreshable materialized views is available in table [`system. To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views). +:::note +Fun fact: the refresh query is allowed to read from the view that's being refreshed, seeing pre-refresh version of the data. This means you can implement Conway's game of life: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA== +::: + ## Window View [Experimental] :::info diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index e6d3439d2b9..7b8d2339516 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -389,7 +389,7 @@ SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_ After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. The following modifiers are supported: - If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue. - - If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed. + - If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed. Additionally, the LIGHTWEIGHT modifier supports an optional FROM 'srcReplicas' clause, where 'srcReplicas' is a comma-separated list of source replica names. This extension allows for more targeted synchronization by focusing only on replication tasks originating from the specified source replicas. - If a `PULL` modifier was specified then the query pulls new replication queue entries from ZooKeeper, but does not wait for anything to be processed. @@ -515,6 +515,10 @@ Trigger an immediate out-of-schedule refresh of a given view. SYSTEM REFRESH VIEW [db.]name ``` +### REFRESH VIEW + +Wait for the currently running refresh to complete. If the refresh fails, throws an exception. If no refresh is running, completes immediately, throwing an exception if previous refresh failed. + ### STOP VIEW, STOP VIEWS Disable periodic refreshing of the given view or all refreshable views. If a refresh is in progress, cancel it too. diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index b1b8e2367a4..38fed9f3198 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -604,6 +604,7 @@ M(723, PARQUET_EXCEPTION) \ M(724, TOO_MANY_TABLES) \ M(725, TOO_MANY_DATABASES) \ + M(726, REFRESH_FAILED) \ \ M(900, DISTRIBUTED_CACHE_ERROR) \ M(901, CANNOT_USE_DISTRIBUTED_CACHE) \ diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index d3526941b33..111817729e2 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -664,6 +664,9 @@ BlockIO InterpreterSystemQuery::execute() case Type::REFRESH_VIEW: getRefreshTask()->run(); break; + case Type::WAIT_VIEW: + getRefreshTask()->wait(); + break; case Type::CANCEL_VIEW: getRefreshTask()->cancel(); break; @@ -1411,6 +1414,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() break; } case Type::REFRESH_VIEW: + case Type::WAIT_VIEW: case Type::START_VIEW: case Type::START_VIEWS: case Type::STOP_VIEW: diff --git a/src/Parsers/ASTRefreshStrategy.cpp b/src/Parsers/ASTRefreshStrategy.cpp index 2e0c6ee4638..d10c1b4e7f5 100644 --- a/src/Parsers/ASTRefreshStrategy.cpp +++ b/src/Parsers/ASTRefreshStrategy.cpp @@ -20,7 +20,6 @@ ASTPtr ASTRefreshStrategy::clone() const res->set(res->settings, settings->clone()); if (dependencies) res->set(res->dependencies, dependencies->clone()); - res->schedule_kind = schedule_kind; return res; } @@ -66,6 +65,8 @@ void ASTRefreshStrategy::formatImpl( f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << " SETTINGS " << (f_settings.hilite ? hilite_none : ""); settings->formatImpl(f_settings, state, frame); } + if (append) + f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << " APPEND" << (f_settings.hilite ? hilite_none : ""); } } diff --git a/src/Parsers/ASTRefreshStrategy.h b/src/Parsers/ASTRefreshStrategy.h index ca248b76b40..bb5ac97c054 100644 --- a/src/Parsers/ASTRefreshStrategy.h +++ b/src/Parsers/ASTRefreshStrategy.h @@ -24,6 +24,7 @@ public: ASTTimeInterval * offset = nullptr; ASTTimeInterval * spread = nullptr; RefreshScheduleKind schedule_kind{RefreshScheduleKind::UNKNOWN}; + bool append = false; String getID(char) const override { return "Refresh strategy definition"; } diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 167e724dcee..59de90b1d8e 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -95,6 +95,7 @@ public: START_CLEANUP, RESET_COVERAGE, REFRESH_VIEW, + WAIT_VIEW, START_VIEW, START_VIEWS, STOP_VIEW, diff --git a/src/Parsers/ParserRefreshStrategy.cpp b/src/Parsers/ParserRefreshStrategy.cpp index e7912293d85..8d19312996a 100644 --- a/src/Parsers/ParserRefreshStrategy.cpp +++ b/src/Parsers/ParserRefreshStrategy.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace DB { @@ -95,7 +96,15 @@ bool ParserRefreshStrategy::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!ParserSetQuery{true}.parse(pos, settings, expected)) return false; refresh->set(refresh->settings, settings); + + /// Validate. + RefreshSettings parsed_settings; + parsed_settings.applyChanges(refresh->settings->changes); } + + if (ParserKeyword{"APPEND"}.ignore(pos, expected)) + refresh->append = true; + node = refresh; return true; } diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index 0545c3e5568..4b89b3817f0 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -421,6 +421,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & break; case Type::REFRESH_VIEW: + case Type::WAIT_VIEW: case Type::START_VIEW: case Type::STOP_VIEW: case Type::CANCEL_VIEW: diff --git a/src/Storages/MaterializedView/RefreshSet.h b/src/Storages/MaterializedView/RefreshSet.h index eff445023a6..c5ec227bd92 100644 --- a/src/Storages/MaterializedView/RefreshSet.h +++ b/src/Storages/MaterializedView/RefreshSet.h @@ -11,7 +11,7 @@ namespace DB using DatabaseAndTableNameSet = std::unordered_set; -enum class RefreshState : RefreshTaskStateUnderlying +enum class RefreshState { Disabled = 0, Scheduled, @@ -19,11 +19,11 @@ enum class RefreshState : RefreshTaskStateUnderlying Running, }; -enum class LastRefreshResult : RefreshTaskStateUnderlying +enum class LastRefreshResult { Unknown = 0, Cancelled, - Exception, + Error, Finished }; @@ -37,7 +37,8 @@ struct RefreshInfo UInt64 last_attempt_duration_ms = 0; UInt32 next_refresh_time = 0; UInt64 refresh_count = 0; - String exception_message; // if last_refresh_result is Exception + UInt64 retry = 0; + String exception_message; // if last_refresh_result is Error std::vector remaining_dependencies; ProgressValues progress; }; diff --git a/src/Storages/MaterializedView/RefreshSettings.h b/src/Storages/MaterializedView/RefreshSettings.h index 814c7e52b32..23676538788 100644 --- a/src/Storages/MaterializedView/RefreshSettings.h +++ b/src/Storages/MaterializedView/RefreshSettings.h @@ -6,8 +6,10 @@ namespace DB { #define LIST_OF_REFRESH_SETTINGS(M, ALIAS) \ - /// TODO: Add settings - /// M(UInt64, name, 42, "...", 0) + M(Int64, refresh_retries, 0, "How many times to retry refresh query if it fails. If all attempts fail, wait for the next refresh time according to schedule. 0 to disable retries. -1 for infinite retries.", 0) \ + M(UInt64, refresh_retry_initial_backoff_ms, 100, "Delay before the first retry if refresh query fails (if refresh_retries setting is not zero). Each subsequent retry doubles the delay, up to refresh_retry_max_backoff_ms.", 0) \ + M(UInt64, refresh_retry_max_backoff_ms, 60'000, "Limit on the exponential growth of delay between refresh attempts, if they keep failing and refresh_retries is positive.", 0) \ + DECLARE_SETTINGS_TRAITS(RefreshSettingsTraits, LIST_OF_REFRESH_SETTINGS) diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index bc8cb0ce69a..83930ed7be9 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -23,41 +23,42 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int QUERY_WAS_CANCELLED; + extern const int REFRESH_FAILED; } RefreshTask::RefreshTask( - const ASTRefreshStrategy & strategy) + StorageMaterializedView * view_, const DB::ASTRefreshStrategy & strategy) : log(getLogger("RefreshTask")) + , view(view_) , refresh_schedule(strategy) -{} + , refresh_append(strategy.append) +{ + if (strategy.settings != nullptr) + refresh_settings.applyChanges(strategy.settings->changes); +} RefreshTaskHolder RefreshTask::create( - const StorageMaterializedView & view, + StorageMaterializedView * view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy) { - auto task = std::make_shared(strategy); + auto task = std::make_shared(view, strategy); - task->refresh_task = context->getSchedulePool().createTask("MaterializedViewRefresherTask", - [self = task->weak_from_this()] - { - if (auto t = self.lock()) - t->refreshTask(); - }); + task->refresh_task = context->getSchedulePool().createTask("RefreshTask", + [self = task.get()] { self->refreshTask(); }); std::vector deps; if (strategy.dependencies) for (auto && dependency : strategy.dependencies->children) deps.emplace_back(dependency->as()); - context->getRefreshSet().emplace(view.getStorageID(), deps, task); + context->getRefreshSet().emplace(view->getStorageID(), deps, task); return task; } -void RefreshTask::initializeAndStart(std::shared_ptr view) +void RefreshTask::initializeAndStart() { - view_to_refresh = view; if (view->getContext()->getSettingsRef().stop_refreshable_materialized_views_on_startup) stop_requested = true; populateDependencies(); @@ -103,7 +104,11 @@ void RefreshTask::alterRefreshParams(const DB::ASTRefreshStrategy & new_strategy if (arriveDependency(id) && !std::exchange(refresh_immediately, true)) refresh_task->schedule(); - /// TODO: Update settings once we have them. + refresh_settings = {}; + if (new_strategy.settings != nullptr) + refresh_settings.applyChanges(new_strategy.settings->changes); + + refresh_append = new_strategy.append; } RefreshInfo RefreshTask::getInfo() const @@ -112,7 +117,7 @@ RefreshInfo RefreshTask::getInfo() const auto res = info; res.view_id = set_handle.getID(); res.remaining_dependencies.assign(remaining_dependencies.begin(), remaining_dependencies.end()); - if (res.last_refresh_result != LastRefreshResult::Exception) + if (res.last_refresh_result != LastRefreshResult::Error) res.exception_message.clear(); res.progress = progress.getValues(); return res; @@ -140,6 +145,8 @@ void RefreshTask::run() std::lock_guard guard(mutex); if (std::exchange(refresh_immediately, true)) return; + next_refresh_prescribed = std::chrono::floor(currentTime()); + next_refresh_actual = currentTime(); refresh_task->schedule(); } @@ -150,6 +157,14 @@ void RefreshTask::cancel() refresh_task->schedule(); } +void RefreshTask::wait() +{ + std::unique_lock lock(mutex); + refresh_cv.wait(lock, [&] { return info.state != RefreshState::Running; }); + if (info.last_refresh_result == LastRefreshResult::Error) + throw Exception(ErrorCodes::REFRESH_FAILED, "Refresh failed: {}", info.exception_message); +} + void RefreshTask::shutdown() { { @@ -167,6 +182,8 @@ void RefreshTask::shutdown() /// (Also, RefreshSet holds a shared_ptr to us.) std::lock_guard guard(mutex); set_handle.reset(); + + view = nullptr; } void RefreshTask::notify(const StorageID & parent_id, std::chrono::sys_seconds parent_next_prescribed_time) @@ -233,6 +250,7 @@ void RefreshTask::refreshTask() chassert(lock.owns_lock()); interrupt_execution.store(false); + refresh_cv.notify_all(); // we'll assign info.state before unlocking the mutex if (stop_requested) { @@ -244,7 +262,7 @@ void RefreshTask::refreshTask() if (!refresh_immediately) { auto now = currentTime(); - if (now >= next_refresh_with_spread) + if (now >= next_refresh_actual) { if (arriveTime()) refresh_immediately = true; @@ -257,7 +275,7 @@ void RefreshTask::refreshTask() else { size_t delay_ms = std::chrono::duration_cast( - next_refresh_with_spread - now).count(); + next_refresh_actual - now).count(); /// If we're in a test that fakes the clock, poll every 100ms. if (fake_clock.load(std::memory_order_relaxed) != INT64_MIN) @@ -271,19 +289,9 @@ void RefreshTask::refreshTask() /// Perform a refresh. + bool append = refresh_append; refresh_immediately = false; - - auto view = lockView(); - if (!view) - { - /// The view was dropped. This RefreshTask should be destroyed soon too. - /// (Maybe this is unreachable.) - info.state = RefreshState::Disabled; - break; - } - info.state = RefreshState::Running; - CurrentMetrics::Increment metric_inc(CurrentMetrics::RefreshingViews); lock.unlock(); @@ -294,7 +302,7 @@ void RefreshTask::refreshTask() try { - executeRefreshUnlocked(view); + executeRefreshUnlocked(append); refreshed = true; } catch (...) @@ -318,18 +326,16 @@ void RefreshTask::refreshTask() if (exception) { - info.last_refresh_result = LastRefreshResult::Exception; + info.last_refresh_result = LastRefreshResult::Error; info.exception_message = *exception; - - /// TODO: Do a few retries with exponential backoff. - advanceNextRefreshTime(now); + scheduleRetryOrSkipToNextRefresh(now); } else if (!refreshed) { info.last_refresh_result = LastRefreshResult::Cancelled; /// Make sure we don't just start another refresh immediately. - if (!stop_requested && now >= next_refresh_with_spread) + if (!stop_requested) advanceNextRefreshTime(now); } else @@ -362,17 +368,18 @@ void RefreshTask::refreshTask() } } -void RefreshTask::executeRefreshUnlocked(std::shared_ptr view) +void RefreshTask::executeRefreshUnlocked(bool append) { LOG_DEBUG(log, "Refreshing view {}", view->getStorageID().getFullTableName()); progress.reset(); - /// Create a table. - auto [refresh_context, refresh_query] = view->prepareRefresh(); - - StorageID stale_table = StorageID::createEmpty(); + ContextMutablePtr refresh_context; + std::optional table_to_drop; try { + /// Create a table. + auto refresh_query = view->prepareRefresh(append, refresh_context, table_to_drop); + /// Run the query. { CurrentThread::QueryScope query_scope(refresh_context); // create a thread group for the query @@ -424,37 +431,67 @@ void RefreshTask::executeRefreshUnlocked(std::shared_ptrexchangeTargetTable(refresh_query->table_id, refresh_context); + if (!append) + table_to_drop = view->exchangeTargetTable(refresh_query->table_id, refresh_context); } catch (...) { - try + if (table_to_drop.has_value()) { - InterpreterDropQuery::executeDropQuery( - ASTDropQuery::Kind::Drop, view->getContext(), refresh_context, refresh_query->table_id, /*sync*/ false, /*ignore_sync_setting*/ true); - } - catch (...) - { - tryLogCurrentException(log, "Failed to drop temporary table after a failed refresh"); - /// Let's ignore this and keep going, at risk of accumulating many trash tables if this keeps happening. + try + { + InterpreterDropQuery::executeDropQuery( + ASTDropQuery::Kind::Drop, view->getContext(), refresh_context, table_to_drop.value(), /*sync*/ false, /*ignore_sync_setting*/ true); + } + catch (...) + { + tryLogCurrentException(log, "Failed to drop temporary table after a failed refresh"); + /// Let's ignore this and keep going, at risk of accumulating many trash tables if this keeps happening. + } } throw; } /// Drop the old table (outside the try-catch so we don't try to drop the other table if this fails). - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, view->getContext(), refresh_context, stale_table, /*sync*/ true, /*ignore_sync_setting*/ true); + if (table_to_drop.has_value()) + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, view->getContext(), refresh_context, table_to_drop.value(), /*sync*/ true, /*ignore_sync_setting*/ true); } void RefreshTask::advanceNextRefreshTime(std::chrono::system_clock::time_point now) { std::chrono::sys_seconds next = refresh_schedule.prescribeNext(next_refresh_prescribed, now); next_refresh_prescribed = next; - next_refresh_with_spread = refresh_schedule.addRandomSpread(next); + next_refresh_actual = refresh_schedule.addRandomSpread(next); - auto secs = std::chrono::floor(next_refresh_with_spread); + num_retries = 0; + info.retry = num_retries; + + auto secs = std::chrono::floor(next_refresh_actual); info.next_refresh_time = UInt32(secs.time_since_epoch().count()); } +void RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now) +{ + if (refresh_settings.refresh_retries >= 0 && num_retries >= refresh_settings.refresh_retries) + { + advanceNextRefreshTime(now); + return; + } + + num_retries += 1; + info.retry = num_retries; + + UInt64 delay_ms; + UInt64 multiplier = UInt64(1) << std::min(num_retries - 1, Int64(62)); + /// Overflow check: a*b <= c iff a <= c/b iff a <= floor(c/b). + if (refresh_settings.refresh_retry_initial_backoff_ms <= refresh_settings.refresh_retry_max_backoff_ms / multiplier) + delay_ms = refresh_settings.refresh_retry_initial_backoff_ms * multiplier; + else + delay_ms = refresh_settings.refresh_retry_max_backoff_ms; + + next_refresh_actual = now + std::chrono::milliseconds(delay_ms); +} + bool RefreshTask::arriveDependency(const StorageID & parent) { remaining_dependencies.erase(parent); @@ -495,11 +532,6 @@ void RefreshTask::interruptExecution() } } -std::shared_ptr RefreshTask::lockView() -{ - return std::static_pointer_cast(view_to_refresh.lock()); -} - std::chrono::system_clock::time_point RefreshTask::currentTime() const { Int64 fake = fake_clock.load(std::memory_order::relaxed); diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index 1f050a97cd9..9281c0469a3 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -22,15 +22,15 @@ class RefreshTask : public std::enable_shared_from_this { public: /// Never call it manually, public for shared_ptr construction only - explicit RefreshTask(const ASTRefreshStrategy & strategy); + RefreshTask(StorageMaterializedView * view_, const ASTRefreshStrategy & strategy); /// The only proper way to construct task static RefreshTaskHolder create( - const StorageMaterializedView & view, + StorageMaterializedView * view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy); - void initializeAndStart(std::shared_ptr view); + void initializeAndStart(); /// Call when renaming the materialized view. void rename(StorageID new_id); @@ -52,6 +52,11 @@ public: /// Cancel task execution void cancel(); + /// Waits for the currently running refresh attempt to complete. + /// If the refresh fails, throws an exception. + /// If no refresh is running, completes immediately, throwing an exception if previous refresh failed. + void wait(); + /// Permanently disable task scheduling and remove this table from RefreshSet. void shutdown(); @@ -66,7 +71,7 @@ public: private: LoggerPtr log = nullptr; - std::weak_ptr view_to_refresh; + StorageMaterializedView * view; /// Protects interrupt_execution and running_executor. /// Can be locked while holding `mutex`. @@ -83,7 +88,9 @@ private: mutable std::mutex mutex; RefreshSchedule refresh_schedule; - RefreshSettings refresh_settings; // TODO: populate, use, update on alter + RefreshSettings refresh_settings; + bool refresh_append; + RefreshSet::Handle set_handle; /// StorageIDs of our dependencies that we're waiting for. @@ -111,7 +118,8 @@ private: /// E.g. for REFRESH EVERY 1 DAY, yesterday's refresh of the dependency shouldn't trigger today's /// refresh of the dependent even if it happened today (e.g. it was slow or had random spread > 1 day). std::chrono::sys_seconds next_refresh_prescribed; - std::chrono::system_clock::time_point next_refresh_with_spread; + std::chrono::system_clock::time_point next_refresh_actual; + Int64 num_retries = 0; /// Calls refreshTask() from background thread. BackgroundSchedulePool::TaskHolder refresh_task; @@ -122,6 +130,7 @@ private: /// Just for observability. RefreshInfo info; Progress progress; + std::condition_variable refresh_cv; // notified when info.state changes /// The main loop of the refresh task. It examines the state, sees what needs to be /// done and does it. If there's nothing to do at the moment, returns; it's then scheduled again, @@ -133,11 +142,14 @@ private: /// Perform an actual refresh: create new table, run INSERT SELECT, exchange tables, drop old table. /// Mutex must be unlocked. Called only from refresh_task. - void executeRefreshUnlocked(std::shared_ptr view); + void executeRefreshUnlocked(bool append); /// Assigns next_refresh_* void advanceNextRefreshTime(std::chrono::system_clock::time_point now); + /// Either advances next_refresh_actual using exponential backoff or does advanceNextRefreshTime(). + void scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now); + /// Returns true if all dependencies are fulfilled now. Refills remaining_dependencies in this case. bool arriveDependency(const StorageID & parent); bool arriveTime(); @@ -145,8 +157,6 @@ private: void interruptExecution(); - std::shared_ptr lockView(); - std::chrono::system_clock::time_point currentTime() const; }; diff --git a/src/Storages/MaterializedView/RefreshTask_fwd.h b/src/Storages/MaterializedView/RefreshTask_fwd.h index 1f366962eb6..9e389316c64 100644 --- a/src/Storages/MaterializedView/RefreshTask_fwd.h +++ b/src/Storages/MaterializedView/RefreshTask_fwd.h @@ -8,8 +8,6 @@ namespace DB class RefreshTask; -using RefreshTaskStateUnderlying = UInt8; using RefreshTaskHolder = std::shared_ptr; -using RefreshTaskObserver = std::weak_ptr; } diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 316f398b476..500312fd245 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -3,6 +3,8 @@ #include #include +#include +#include #include #include @@ -14,6 +16,7 @@ #include #include #include +#include #include #include @@ -141,6 +144,13 @@ StorageMaterializedView::StorageMaterializedView( if (point_to_itself_by_uuid || point_to_itself_by_name) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Materialized view {} cannot point to itself", table_id_.getFullTableName()); + if (query.refresh_strategy) + { + fixed_uuid = false; + refresher = RefreshTask::create(this, getContext(), *query.refresh_strategy); + refresh_on_start = mode < LoadingStrictnessLevel::ATTACH && !query.is_create_empty; + } + if (!has_inner_table) { target_table_id = query.to_table_id; @@ -190,16 +200,6 @@ StorageMaterializedView::StorageMaterializedView( target_table_id = DatabaseCatalog::instance().getTable({manual_create_query->getDatabase(), manual_create_query->getTable()}, getContext())->getStorageID(); } - - if (query.refresh_strategy) - { - fixed_uuid = false; - refresher = RefreshTask::create( - *this, - getContext(), - *query.refresh_strategy); - refresh_on_start = mode < LoadingStrictnessLevel::ATTACH && !query.is_create_empty; - } } QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage( @@ -370,40 +370,57 @@ bool StorageMaterializedView::optimize( return storage_ptr->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, cleanup, local_context); } -std::tuple> StorageMaterializedView::prepareRefresh() const +std::shared_ptr StorageMaterializedView::prepareRefresh(bool append, ContextMutablePtr & out_context, std::optional & out_temp_table_id) const { auto refresh_context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(getContext()); + out_context = refresh_context; /// Generate a random query id. refresh_context->setCurrentQueryId(""); CurrentThread::QueryScope query_scope(refresh_context); auto inner_table_id = getTargetTableId(); - auto new_table_name = ".tmp" + generateInnerTableName(getStorageID()); - auto db = DatabaseCatalog::instance().getDatabase(inner_table_id.database_name); + StorageID target_table = inner_table_id; - auto create_table_query = db->getCreateTableQuery(inner_table_id.table_name, getContext()); - auto & create_query = create_table_query->as(); - create_query.setTable(new_table_name); - create_query.setDatabase(db->getDatabaseName()); - create_query.create_or_replace = true; - create_query.replace_table = true; - create_query.uuid = UUIDHelpers::Nil; + if (!append) + { + auto new_table_name = ".tmp" + generateInnerTableName(getStorageID()); - InterpreterCreateQuery create_interpreter(create_table_query, refresh_context); - create_interpreter.setInternal(true); - create_interpreter.execute(); + auto create_table_query = db->getCreateTableQuery(inner_table_id.table_name, getContext()); + auto & create_query = create_table_query->as(); + create_query.setTable(new_table_name); + create_query.setDatabase(db->getDatabaseName()); + create_query.create_or_replace = true; + create_query.replace_table = true; + create_query.uuid = UUIDHelpers::Nil; - StorageID fresh_table = DatabaseCatalog::instance().getTable({create_query.getDatabase(), create_query.getTable()}, getContext())->getStorageID(); + InterpreterCreateQuery create_interpreter(create_table_query, refresh_context); + create_interpreter.setInternal(true); + create_interpreter.execute(); + + target_table = DatabaseCatalog::instance().getTable({create_query.getDatabase(), create_query.getTable()}, getContext())->getStorageID(); + out_temp_table_id = target_table; + } auto insert_query = std::make_shared(); insert_query->select = getInMemoryMetadataPtr()->getSelectQuery().select_query; - insert_query->setTable(fresh_table.table_name); - insert_query->setDatabase(fresh_table.database_name); - insert_query->table_id = fresh_table; + insert_query->setTable(target_table.table_name); + insert_query->setDatabase(target_table.database_name); + insert_query->table_id = target_table; - return {refresh_context, insert_query}; + Block header; + if (refresh_context->getSettingsRef().allow_experimental_analyzer) + header = InterpreterSelectQueryAnalyzer::getSampleBlock(insert_query->select, refresh_context); + else + header = InterpreterSelectWithUnionQuery(insert_query->select, refresh_context, SelectQueryOptions()).getSampleBlock(); + + auto columns = std::make_shared(','); + for (const String & name : header.getNames()) + columns->children.push_back(std::make_shared(name)); + insert_query->columns = std::move(columns); + + return insert_query; } StorageID StorageMaterializedView::exchangeTargetTable(StorageID fresh_table, ContextPtr refresh_context) @@ -569,7 +586,7 @@ void StorageMaterializedView::startup() if (refresher) { - refresher->initializeAndStart(std::static_pointer_cast(shared_from_this())); + refresher->initializeAndStart(); if (refresh_on_start) refresher->run(); diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 5ecd2ec3819..12c0dc651e9 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -121,7 +121,9 @@ private: /// Prepare to refresh a refreshable materialized view: create query context, create temporary /// table, form the insert-select query. - std::tuple> prepareRefresh() const; + /// The output arguments may be assigned before throwing an exception, in which case the caller + /// must drop the temp table before rethrowing. + std::shared_ptr prepareRefresh(bool append, ContextMutablePtr & out_context, std::optional & out_temp_table_id) const; StorageID exchangeTargetTable(StorageID fresh_table, ContextPtr refresh_context); void setTargetTableId(StorageID id); diff --git a/src/Storages/System/StorageSystemViewRefreshes.cpp b/src/Storages/System/StorageSystemViewRefreshes.cpp index 30539ed6b6a..95c89bd0214 100644 --- a/src/Storages/System/StorageSystemViewRefreshes.cpp +++ b/src/Storages/System/StorageSystemViewRefreshes.cpp @@ -32,8 +32,9 @@ ColumnsDescription StorageSystemViewRefreshes::getColumnsDescription() "If status = 'WaitingForDependencies', a refresh is ready to start as soon as these dependencies are fulfilled." }, {"exception", std::make_shared(), - "if last_refresh_result = 'Exception', i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace." + "if last_refresh_result = 'Error', i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace." }, + {"retry", std::make_shared(), "How many failed attempts there were so far, for the current refresh."}, {"refresh_count", std::make_shared(), "Number of successful refreshes since last server restart or table creation."}, {"progress", std::make_shared(), "Progress of the current refresh, between 0 and 1."}, {"elapsed", std::make_shared(), "The amount of nanoseconds the current refresh took."}, @@ -85,6 +86,7 @@ void StorageSystemViewRefreshes::fillData( res_columns[i++]->insert(Array(deps)); res_columns[i++]->insert(refresh.exception_message); + res_columns[i++]->insert(refresh.retry); res_columns[i++]->insert(refresh.refresh_count); res_columns[i++]->insert(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read); res_columns[i++]->insert(refresh.progress.elapsed_ns / 1e9); diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference index 2eb41590af1..8d4878f4e3f 100644 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference @@ -3,21 +3,25 @@ CREATE MATERIALIZED VIEW default.a\nREFRESH AFTER 2 SECOND\n(\n `x` UInt64\n) <2: refreshed> 3 1 1 <3: time difference at least> 1000 <4: next refresh in> 2 +<4.1: fake clock> Scheduled 2050-01-01 00:00:01 2050-01-01 00:00:02 <4.5: altered> Scheduled Finished 2052-01-01 00:00:00 CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT x * 2 AS x\nFROM default.src <5: no refresh> 3 <6: refreshed> 2 <7: refreshed> Scheduled Finished 2054-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT x * 10 AS y\nFROM default.a +<7.5: created dependent> 2052-11-11 11:11:11 <8: refreshed> 20 <9: refreshed> a Scheduled Finished 2054-01-01 00:00:00 <9: refreshed> b Scheduled Finished 2054-01-01 00:00:00 -<10: waiting> a Scheduled [] 2054-01-01 00:00:00 -<10: waiting> b WaitingForDependencies ['default.a'] 2054-01-01 00:00:00 +<9.2: dropping> 0 2 +<9.4: dropped> 0 2 +<10: creating> a Scheduled [] 2054-01-01 00:00:00 +<10: creating> b WaitingForDependencies ['default.a'] 2054-01-01 00:00:00 <11: chain-refreshed a> 4 <12: chain-refreshed b> 40 -<13: chain-refreshed> a Scheduled [] Finished 2054-01-01 00:00:01 2056-01-01 00:00:00 -<13: chain-refreshed> b Scheduled ['default.a'] Finished 2054-01-24 23:22:21 2056-01-01 00:00:00 +<13: chain-refreshed> a Scheduled [] Finished 2054-01-01 00:00:01 2056-01-01 00:00:00 1 +<13: chain-refreshed> b Scheduled ['default.a'] Finished 2054-01-24 23:22:21 2056-01-01 00:00:00 1 <14: waiting for next cycle> a Scheduled [] 2058-01-01 00:00:00 <14: waiting for next cycle> b WaitingForDependencies ['default.a'] 2060-01-01 00:00:00 <15: chain-refreshed a> 6 @@ -34,11 +38,24 @@ CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nE <24: rename during refresh> 1 <25: rename during refresh> f Running <27: cancelled> f Scheduled -CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 +<28: drop during refresh> 0 0 +CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT 42 <29: randomize> 1 1 CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src <30: to existing table> 10 <31: to existing table> 10 <31: to existing table> 20 -<32: empty> i Scheduled Unknown -<32: empty> j Scheduled Finished +<31.5: will retry> Error 1 +<31.6: did retry> 10 +<32: empty> i Scheduled Unknown 0 +<32: empty> j Scheduled Finished 0 +<34: append> 10 +<35: append> 10 +<35: append> 20 +<35: append> 30 +<36: not append> 20 +<36: not append> 30 +<37: append chain> 100 +<38: append chain> 100 +<38: append chain> 100 +<38: append chain> 200 diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index 89942e25b67..ad6fa3043a1 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -21,9 +21,9 @@ $CLICKHOUSE_CLIENT -nq " refresh after 2 second engine Memory empty - as select number as x from numbers(2) union all select rand64() as x" -$CLICKHOUSE_CLIENT -nq "select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes"; -$CLICKHOUSE_CLIENT -nq "show create a" + as select number as x from numbers(2) union all select rand64() as x; + select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes; + show create a;" # Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] do @@ -56,16 +56,15 @@ $CLICKHOUSE_CLIENT -nq " # Create a source table from which views will read. $CLICKHOUSE_CLIENT -nq " - create table src (x Int8) engine Memory as select 1" + create table src (x Int8) engine Memory as select 1;" # Switch to fake clock, change refresh schedule, change query. $CLICKHOUSE_CLIENT -nq " - system test view a set fake time '2050-01-01 00:00:01';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2050-01-01 00:00:01 2050-01-01 00:00:03' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " + system test view a set fake time '2050-01-01 00:00:01'; + system wait view a; + system refresh view a; + system wait view a; + select '<4.1: fake clock>', status, last_refresh_time, next_refresh_time from refreshes; alter table a modify refresh every 2 year; alter table a modify query select x*2 as x from src; select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; @@ -87,11 +86,9 @@ $CLICKHOUSE_CLIENT -nq " create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; show create b; system test view b set fake time '2052-11-11 11:11:11'; - system refresh view b;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2052-11-11 11:11:11' ] -do - sleep 0.1 -done + system refresh view b; + system wait view b; + select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';" # Next refresh shouldn't start until the dependency refreshes. $CLICKHOUSE_CLIENT -nq " select '<8: refreshed>', * from b; @@ -101,11 +98,20 @@ while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshe do sleep 0.1 done -# Update source table (by dropping and re-creating it - to test that tables are looked up by name -# rather than uuid), kick off refresh of the dependency. + +# Drop the source table, check that refresh fails and doesn't leave a temp table behind. $CLICKHOUSE_CLIENT -nq " - select '<10: waiting>', view, status, remaining_dependencies, next_refresh_time from refreshes; + select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase(); drop table src; + system refresh view a;" +$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -nq " + select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();" + +# Create the source table again, check that refresh succeeds (in particular that tables are looked +# up by name rather than uuid). +$CLICKHOUSE_CLIENT -nq " + select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes; create table src (x Int16) engine Memory as select 2; system test view a set fake time '2054-01-01 00:00:01';" while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] @@ -116,7 +122,7 @@ done $CLICKHOUSE_CLIENT -nq " select '<11: chain-refreshed a>', * from a; select '<12: chain-refreshed b>', * from b; - select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception from refreshes;" + select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;" # Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to # catch up to the same cycle. @@ -172,7 +178,7 @@ $CLICKHOUSE_CLIENT -nq " drop table b; create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; drop table src;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Exception' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Error' ] do sleep 0.1 done @@ -246,7 +252,8 @@ done # Drop. $CLICKHOUSE_CLIENT -nq " drop table f; - select '<28: drop during refresh>', view, status from refreshes;" + select '<28: drop during refresh>', view, status from refreshes; + select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()" # Try OFFSET and RANDOMIZE FOR. $CLICKHOUSE_CLIENT -nq " @@ -283,21 +290,74 @@ done $CLICKHOUSE_CLIENT -nq " select '<31: to existing table>', * from dest; drop table dest; - drop table src; drop table h;" +# Retries. +$CLICKHOUSE_CLIENT -nq " + create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;" +$CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -nq " + select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes; + truncate table src; + insert into src values (1);" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] +do + sleep 0.1 +done +$CLICKHOUSE_CLIENT -nq " + select '<31.6: did retry>', x from h2; + drop table h2" + # EMPTY $CLICKHOUSE_CLIENT -nq " create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2); - create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2)" + create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);" while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] do sleep 0.1 done $CLICKHOUSE_CLIENT -nq " - select '<32: empty>', view, status, last_refresh_result from refreshes order by view; + select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view; drop table i; - drop table j" + drop table j;" + +# APPEND +$CLICKHOUSE_CLIENT -nq " + create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src; + select '<33: append>', * from k; + system refresh view k; + system wait view k; + select '<34: append>', * from k; + truncate table src; + insert into src values (2), (3); + system refresh view k; + system wait view k; + select '<35: append>', * from k order by x;" +# ALTER to non-APPEND +$CLICKHOUSE_CLIENT -nq " + alter table k modify refresh every 10 year; + system refresh view k; + system wait view k; + select '<36: not append>', * from k order by x; + drop table k; + truncate table src;" + +# APPEND + TO + regular materialized view reading from it. +$CLICKHOUSE_CLIENT -nq " + create table mid (x Int64) engine MergeTree order by x; + create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src; + create materialized view m (x Int64) engine Memory as select x*10 as x from mid; + insert into src values (1); + system refresh view l; + system wait view l; + select '<37: append chain>', * from m; + insert into src values (2); + system refresh view l; + system wait view l; + select '<38: append chain>', * from m order by x; + drop table l; + drop table m; + drop table mid;" $CLICKHOUSE_CLIENT -nq " drop table refreshes;" From 361aa4ffadcad1f68c9157bd44a835ac828de1d4 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 26 Jan 2024 23:03:31 +0000 Subject: [PATCH 0195/2361] Fix build --- src/Parsers/ParserRefreshStrategy.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Parsers/ParserRefreshStrategy.cpp b/src/Parsers/ParserRefreshStrategy.cpp index 8d19312996a..de7eea93614 100644 --- a/src/Parsers/ParserRefreshStrategy.cpp +++ b/src/Parsers/ParserRefreshStrategy.cpp @@ -7,7 +7,6 @@ #include #include #include -#include namespace DB { @@ -96,10 +95,6 @@ bool ParserRefreshStrategy::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!ParserSetQuery{true}.parse(pos, settings, expected)) return false; refresh->set(refresh->settings, settings); - - /// Validate. - RefreshSettings parsed_settings; - parsed_settings.applyChanges(refresh->settings->changes); } if (ParserKeyword{"APPEND"}.ignore(pos, expected)) From dcf783c0edad02c45ede10a981465c67d6991c45 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 1 Feb 2024 22:42:30 +0000 Subject: [PATCH 0196/2361] Check column names when creating table, fix a thing, move code around a little --- src/Interpreters/InterpreterCreateQuery.cpp | 148 ++++++++++++------ src/Interpreters/InterpreterCreateQuery.h | 2 + src/Parsers/ASTCreateQuery.cpp | 2 + src/Parsers/ASTRenameQuery.h | 13 ++ src/Storages/IStorage.h | 2 +- src/Storages/MaterializedView/RefreshTask.cpp | 25 ++- src/Storages/MaterializedView/RefreshTask.h | 24 ++- src/Storages/StorageMaterializedView.cpp | 76 +++++---- src/Storages/StorageMaterializedView.h | 15 +- .../02932_refreshable_materialized_views.sh | 9 +- 10 files changed, 208 insertions(+), 108 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 7272e10b801..ee57c376b31 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -120,6 +120,7 @@ namespace ErrorCodes extern const int SUPPORT_IS_DISABLED; extern const int TOO_MANY_TABLES; extern const int TOO_MANY_DATABASES; + extern const int THERE_IS_NO_COLUMN; } namespace fs = std::filesystem; @@ -852,6 +853,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti } properties.columns = ColumnsDescription(as_select_sample.getNamesAndTypesList()); + properties.columns_inferred_from_select_query = true; } else if (create.as_table_function) { @@ -939,6 +941,99 @@ void validateVirtualColumns(const IStorage & storage) } } +void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTCreateQuery & create, const TableProperties & properties, const DatabasePtr & database) +{ + /// This is not strict validation, just catches common errors that would make the view not work. + /// It's possible to circumvent these checks by ALTERing the view or target table after creation. + + String table_engine; + if (create.storage && create.storage->engine) + table_engine = create.storage->engine->name; + + NamesAndTypesList all_output_columns; + bool check_columns = false; + if (create.to_table_id) + { + if (StoragePtr to_table = DatabaseCatalog::instance().tryGetTable( + {create.to_table_id.database_name, create.to_table_id.table_name, create.to_table_id.uuid}, + getContext() + )) + { + all_output_columns = to_table->getInMemoryMetadataPtr()->getSampleBlock().getNamesAndTypesList(); + check_columns = true; + table_engine = to_table->getName(); + } + } + else if (!properties.columns_inferred_from_select_query) + { + all_output_columns = properties.columns.getInsertable(); + check_columns = true; + } + + if (create.refresh_strategy && !create.refresh_strategy->append) + { + if (database && database->getEngineName() != "Atomic") + throw Exception(ErrorCodes::INCORRECT_QUERY, + "Refreshable materialized views (except with APPEND) only support Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName()); + } + + if (check_columns) + { + Block input_block; + + if (getContext()->getSettingsRef().allow_experimental_analyzer) + { + input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); + } + else + { + input_block = InterpreterSelectWithUnionQuery(create.select->clone(), + getContext(), + SelectQueryOptions().analyze()).getSampleBlock(); + } + + std::unordered_map output_types; + for (const NameAndTypePair & nt : all_output_columns) + output_types[nt.name] = nt.type; + + ColumnsWithTypeAndName input_columns; + ColumnsWithTypeAndName output_columns; + for (const auto & input_column : input_block) + { + auto it = output_types.find(input_column.name); + if (it != output_types.end()) + { + input_columns.push_back(input_column.cloneEmpty()); + output_columns.push_back(ColumnWithTypeAndName(it->second->createColumn(), it->second, input_column.name)); + } + else if (create.refresh_strategy) + { + /// Unrecognized columns produced by SELECT query are allowed for regular materialized + /// views, but not for refreshable ones. This is in part because it was easier to + /// implement, in part because refreshable views have less concern about ALTERing target + /// tables. + /// + /// The motivating scenario for allowing this in regular MV is ALTERing the table+query. + /// Suppose the user removes a column from target table, then a minute later + /// correspondingly updates the view's query to not produce that column. + /// If MV didn't allow unrecognized columns then during that minute all INSERTs into the + /// source table would fail - unacceptable. + /// For refreshable views, during that minute refreshes will fail - acceptable. + throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "SELECT query outputs column with name '{}', which is not found in the target table. Use 'AS' to assign alias that matches a column name.", input_column.name); + } + } + + if (input_columns.empty()) + throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "None of the columns produced by the SELECT query are present in the target table. Use 'AS' to assign aliases that match column names."); + + ActionsDAG::makeConvertingActions( + input_columns, + output_columns, + ActionsDAG::MatchColumnsMode::Position + ); + } +} + namespace { void checkTemporaryTableEngineName(const String & name) @@ -1059,13 +1154,6 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data throw Exception(ErrorCodes::LOGICAL_ERROR, "Table UUID is not specified in DDL log"); } - if (create.refresh_strategy && database->getEngineName() != "Atomic") - throw Exception(ErrorCodes::INCORRECT_QUERY, - "Refreshable materialized view requires Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName()); - /// TODO: Support Replicated databases, only with Shared/ReplicatedMergeTree. - /// Figure out how to make the refreshed data appear all at once on other - /// replicas; maybe a replicated SYSTEM SYNC REPLICA query before the rename? - if (database->getUUID() != UUIDHelpers::Nil) { if (create.attach && !from_path && create.uuid == UUIDHelpers::Nil) @@ -1257,54 +1345,16 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) /// Set and retrieve list of columns, indices and constraints. Set table engine if needed. Rewrite query in canonical way. TableProperties properties = getTablePropertiesAndNormalizeCreateQuery(create, mode); - /// Check type compatible for materialized dest table and select columns - if (create.select && create.is_materialized_view && create.to_table_id && mode <= LoadingStrictnessLevel::CREATE) - { - if (StoragePtr to_table = DatabaseCatalog::instance().tryGetTable( - {create.to_table_id.database_name, create.to_table_id.table_name, create.to_table_id.uuid}, - getContext() - )) - { - Block input_block; - - if (getContext()->getSettingsRef().allow_experimental_analyzer) - { - input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); - } - else - { - input_block = InterpreterSelectWithUnionQuery(create.select->clone(), - getContext(), - SelectQueryOptions().analyze()).getSampleBlock(); - } - - Block output_block = to_table->getInMemoryMetadataPtr()->getSampleBlock(); - - ColumnsWithTypeAndName input_columns; - ColumnsWithTypeAndName output_columns; - for (const auto & input_column : input_block) - { - if (const auto * output_column = output_block.findByName(input_column.name)) - { - input_columns.push_back(input_column.cloneEmpty()); - output_columns.push_back(output_column->cloneEmpty()); - } - } - - ActionsDAG::makeConvertingActions( - input_columns, - output_columns, - ActionsDAG::MatchColumnsMode::Position - ); - } - } - DatabasePtr database; bool need_add_to_database = !create.temporary; // In case of an ON CLUSTER query, the database may not be present on the initiator node if (need_add_to_database) database = DatabaseCatalog::instance().tryGetDatabase(database_name); + /// Check type compatible for materialized dest table and select columns + if (create.select && create.is_materialized_view && mode <= LoadingStrictnessLevel::CREATE) + validateMaterializedViewColumnsAndEngine(create, properties, database); + if (database && database->getEngineName() == "Replicated" && create.select) { bool is_storage_replicated = false; diff --git a/src/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h index 70ef29e6b07..c51c0e757df 100644 --- a/src/Interpreters/InterpreterCreateQuery.h +++ b/src/Interpreters/InterpreterCreateQuery.h @@ -91,6 +91,7 @@ private: IndicesDescription indices; ConstraintsDescription constraints; ProjectionsDescription projections; + bool columns_inferred_from_select_query = false; }; BlockIO createDatabase(ASTCreateQuery & create); @@ -99,6 +100,7 @@ private: /// Calculate list of columns, constraints, indices, etc... of table. Rewrite query in canonical way. TableProperties getTablePropertiesAndNormalizeCreateQuery(ASTCreateQuery & create, LoadingStrictnessLevel mode) const; void validateTableStructure(const ASTCreateQuery & create, const TableProperties & properties) const; + void validateMaterializedViewColumnsAndEngine(const ASTCreateQuery & create, const TableProperties & properties, const DatabasePtr & database); void setEngine(ASTCreateQuery & create) const; AccessRightsElements getRequiredAccess() const; diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index 3e5c6a9d86e..96a29348326 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -254,6 +254,8 @@ ASTPtr ASTCreateQuery::clone() const res->set(res->dictionary, dictionary->clone()); } + if (refresh_strategy) + res->set(res->refresh_strategy, refresh_strategy->clone()); if (as_table_function) res->set(res->as_table_function, as_table_function->clone()); if (comment) diff --git a/src/Parsers/ASTRenameQuery.h b/src/Parsers/ASTRenameQuery.h index d51c382f374..39fc4f787ec 100644 --- a/src/Parsers/ASTRenameQuery.h +++ b/src/Parsers/ASTRenameQuery.h @@ -141,6 +141,19 @@ public: QueryKind getQueryKind() const override { return QueryKind::Rename; } + void addElement(const String & from_db, const String & from_table, const String & to_db, const String & to_table) + { + auto identifier = [&](const String & name) -> ASTPtr + { + if (name.empty()) + return nullptr; + ASTPtr ast = std::make_shared(name); + children.push_back(ast); + return ast; + }; + elements.push_back(Element {.from = Table {.database = identifier(from_db), .table = identifier(from_table)}, .to = Table {.database = identifier(to_db), .table = identifier(to_table)}}); + } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override { diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 98afd844046..7d854f35029 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -103,7 +103,7 @@ public: IStorage(const IStorage &) = delete; IStorage & operator=(const IStorage &) = delete; - /// The main name of the table type (for example, StorageMergeTree). + /// The main name of the table type (e.g. Memory, MergeTree, CollapsingMergeTree). virtual std::string getName() const = 0; /// The name of the table. diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index 83930ed7be9..e593023caee 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -37,7 +37,7 @@ RefreshTask::RefreshTask( refresh_settings.applyChanges(strategy.settings->changes); } -RefreshTaskHolder RefreshTask::create( +OwnedRefreshTask RefreshTask::create( StorageMaterializedView * view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy) @@ -54,7 +54,7 @@ RefreshTaskHolder RefreshTask::create( context->getRefreshSet().emplace(view->getStorageID(), deps, task); - return task; + return OwnedRefreshTask(task); } void RefreshTask::initializeAndStart() @@ -169,6 +169,10 @@ void RefreshTask::shutdown() { { std::lock_guard guard(mutex); + + if (view == nullptr) + return; // already shut down + stop_requested = true; interruptExecution(); } @@ -373,7 +377,7 @@ void RefreshTask::executeRefreshUnlocked(bool append) LOG_DEBUG(log, "Refreshing view {}", view->getStorageID().getFullTableName()); progress.reset(); - ContextMutablePtr refresh_context; + ContextMutablePtr refresh_context = view->createRefreshContext(); std::optional table_to_drop; try { @@ -437,24 +441,13 @@ void RefreshTask::executeRefreshUnlocked(bool append) catch (...) { if (table_to_drop.has_value()) - { - try - { - InterpreterDropQuery::executeDropQuery( - ASTDropQuery::Kind::Drop, view->getContext(), refresh_context, table_to_drop.value(), /*sync*/ false, /*ignore_sync_setting*/ true); - } - catch (...) - { - tryLogCurrentException(log, "Failed to drop temporary table after a failed refresh"); - /// Let's ignore this and keep going, at risk of accumulating many trash tables if this keeps happening. - } - } + view->dropTempTable(table_to_drop.value(), refresh_context); throw; } /// Drop the old table (outside the try-catch so we don't try to drop the other table if this fails). if (table_to_drop.has_value()) - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, view->getContext(), refresh_context, table_to_drop.value(), /*sync*/ true, /*ignore_sync_setting*/ true); + view->dropTempTable(table_to_drop.value(), refresh_context); } void RefreshTask::advanceNextRefreshTime(std::chrono::system_clock::time_point now) diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index 9281c0469a3..411eb6fa7b4 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -17,6 +17,7 @@ class PipelineExecutor; class StorageMaterializedView; class ASTRefreshStrategy; +struct OwnedRefreshTask; class RefreshTask : public std::enable_shared_from_this { @@ -25,12 +26,12 @@ public: RefreshTask(StorageMaterializedView * view_, const ASTRefreshStrategy & strategy); /// The only proper way to construct task - static RefreshTaskHolder create( + static OwnedRefreshTask create( StorageMaterializedView * view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy); - void initializeAndStart(); + void initializeAndStart(); // called at most once /// Call when renaming the materialized view. void rename(StorageID new_id); @@ -58,6 +59,8 @@ public: void wait(); /// Permanently disable task scheduling and remove this table from RefreshSet. + /// Ok to call multiple times, but not in parallel. + /// Ok to call even if initializeAndStart() wasn't called or failed. void shutdown(); /// Notify dependent task @@ -160,4 +163,21 @@ private: std::chrono::system_clock::time_point currentTime() const; }; +/// Wrapper around shared_ptr, calls shutdown() in destructor. +struct OwnedRefreshTask +{ + RefreshTaskHolder ptr; + + OwnedRefreshTask() = default; + OwnedRefreshTask(RefreshTaskHolder p) : ptr(std::move(p)) {} + OwnedRefreshTask(OwnedRefreshTask &&) = default; + OwnedRefreshTask & operator=(OwnedRefreshTask &&) = default; + + ~OwnedRefreshTask() { if (ptr) ptr->shutdown(); } + + RefreshTask* operator->() const { return ptr.get(); } + RefreshTask& operator*() const { return *ptr; } + operator bool() const { return ptr != nullptr; } +}; + } diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 500312fd245..9f793e06c09 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -370,21 +370,28 @@ bool StorageMaterializedView::optimize( return storage_ptr->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, cleanup, local_context); } -std::shared_ptr StorageMaterializedView::prepareRefresh(bool append, ContextMutablePtr & out_context, std::optional & out_temp_table_id) const +ContextMutablePtr StorageMaterializedView::createRefreshContext() const { auto refresh_context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(getContext()); - out_context = refresh_context; + refresh_context->setQueryKind(ClientInfo::QueryKind::INITIAL_QUERY); /// Generate a random query id. refresh_context->setCurrentQueryId(""); + /// TODO: Set view's definer as the current user in refresh_context, so that the correct user's + /// quotas and permissions apply for this query. + return refresh_context; +} - CurrentThread::QueryScope query_scope(refresh_context); - +std::shared_ptr StorageMaterializedView::prepareRefresh(bool append, ContextMutablePtr refresh_context, std::optional & out_temp_table_id) const +{ auto inner_table_id = getTargetTableId(); - auto db = DatabaseCatalog::instance().getDatabase(inner_table_id.database_name); StorageID target_table = inner_table_id; if (!append) { + CurrentThread::QueryScope query_scope(refresh_context); + + auto db = DatabaseCatalog::instance().getDatabase(inner_table_id.database_name); + String db_name = db->getDatabaseName(); auto new_table_name = ".tmp" + generateInnerTableName(getStorageID()); auto create_table_query = db->getCreateTableQuery(inner_table_id.table_name, getContext()); @@ -399,7 +406,7 @@ std::shared_ptr StorageMaterializedView::prepareRefresh(bool app create_interpreter.setInternal(true); create_interpreter.execute(); - target_table = DatabaseCatalog::instance().getTable({create_query.getDatabase(), create_query.getTable()}, getContext())->getStorageID(); + target_table = DatabaseCatalog::instance().getTable({db_name, new_table_name}, getContext())->getStorageID(); out_temp_table_id = target_table; } @@ -425,6 +432,9 @@ std::shared_ptr StorageMaterializedView::prepareRefresh(bool app StorageID StorageMaterializedView::exchangeTargetTable(StorageID fresh_table, ContextPtr refresh_context) { + /// Known problem: if the target table was ALTERed during refresh, this will effectively revert + /// the ALTER. + auto stale_table_id = getTargetTableId(); auto db = DatabaseCatalog::instance().getDatabase(stale_table_id.database_name); @@ -432,13 +442,34 @@ StorageID StorageMaterializedView::exchangeTargetTable(StorageID fresh_table, Co CurrentThread::QueryScope query_scope(refresh_context); - target_db->renameTable( - refresh_context, fresh_table.table_name, *db, stale_table_id.table_name, /*exchange=*/true, /*dictionary=*/false); + auto rename_query = std::make_shared(); + rename_query->exchange = true; + rename_query->addElement(fresh_table.database_name, fresh_table.table_name, stale_table_id.database_name, stale_table_id.table_name); - std::swap(stale_table_id.database_name, fresh_table.database_name); - std::swap(stale_table_id.table_name, fresh_table.table_name); - setTargetTableId(std::move(fresh_table)); - return stale_table_id; + InterpreterRenameQuery(rename_query, refresh_context).execute(); + + return fresh_table; +} + +void StorageMaterializedView::dropTempTable(StorageID table_id, ContextMutablePtr refresh_context) +{ + CurrentThread::QueryScope query_scope(refresh_context); + + try + { + auto drop_query = std::make_shared(); + drop_query->setDatabase(table_id.database_name); + drop_query->setTable(table_id.table_name); + drop_query->kind = ASTDropQuery::Kind::Drop; + drop_query->if_exists = true; + drop_query->sync = false; + + InterpreterDropQuery(drop_query, refresh_context).execute(); + } + catch (...) + { + tryLogCurrentException(&Poco::Logger::get("StorageMaterializedView"), "Failed to drop temporary table after refresh"); + } } void StorageMaterializedView::alter( @@ -543,20 +574,7 @@ void StorageMaterializedView::renameInMemory(const StorageID & new_table_id) ASTRenameQuery::Elements rename_elements; assert(inner_table_id.database_name == old_table_id.database_name); - ASTRenameQuery::Element elem - { - ASTRenameQuery::Table - { - inner_table_id.database_name.empty() ? nullptr : std::make_shared(inner_table_id.database_name), - std::make_shared(inner_table_id.table_name) - }, - ASTRenameQuery::Table - { - new_table_id.database_name.empty() ? nullptr : std::make_shared(new_table_id.database_name), - std::make_shared(new_target_table_name) - } - }; - rename_elements.emplace_back(std::move(elem)); + rename->addElement(inner_table_id.database_name, inner_table_id.table_name, new_table_id.database_name, new_table_id.table_name); auto rename = std::make_shared(std::move(rename_elements)); InterpreterRenameQuery(rename, getContext()).execute(); @@ -715,12 +733,6 @@ StorageID StorageMaterializedView::getTargetTableId() const return id; } -void StorageMaterializedView::setTargetTableId(DB::StorageID id) -{ - std::lock_guard guard(target_table_id_mutex); - target_table_id = std::move(id); -} - void StorageMaterializedView::updateTargetTableId(std::optional database_name, std::optional table_name) { std::lock_guard guard(target_table_id_mutex); diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 12c0dc651e9..70487c1fad3 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -5,7 +5,7 @@ #include #include -#include +#include namespace DB { @@ -106,7 +106,7 @@ private: /// Will be initialized in constructor StorageID target_table_id = StorageID::createEmpty(); - RefreshTaskHolder refresher; + OwnedRefreshTask refresher; bool refresh_on_start = false; bool has_inner_table = false; @@ -119,14 +119,15 @@ private: void checkStatementCanBeForwarded() const; - /// Prepare to refresh a refreshable materialized view: create query context, create temporary - /// table, form the insert-select query. - /// The output arguments may be assigned before throwing an exception, in which case the caller + ContextMutablePtr createRefreshContext() const; + /// Prepare to refresh a refreshable materialized view: create temporary table and form the + /// insert-select query. + /// out_temp_table_id may be assigned before throwing an exception, in which case the caller /// must drop the temp table before rethrowing. - std::shared_ptr prepareRefresh(bool append, ContextMutablePtr & out_context, std::optional & out_temp_table_id) const; + std::shared_ptr prepareRefresh(bool append, ContextMutablePtr refresh_context, std::optional & out_temp_table_id) const; StorageID exchangeTargetTable(StorageID fresh_table, ContextPtr refresh_context); + void dropTempTable(StorageID table, ContextMutablePtr refresh_context); - void setTargetTableId(StorageID id); void updateTargetTableId(std::optional database_name, std::optional table_name); }; diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index ad6fa3043a1..528a97535cc 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -257,7 +257,7 @@ $CLICKHOUSE_CLIENT -nq " # Try OFFSET and RANDOMIZE FOR. $CLICKHOUSE_CLIENT -nq " - create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42; + create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x; show create g; system test view g set fake time '2050-02-03 15:30:13';" while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] @@ -359,5 +359,12 @@ $CLICKHOUSE_CLIENT -nq " drop table m; drop table mid;" +# Failing to create inner table. +$CLICKHOUSE_CLIENT -nq " + create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected" +$CLICKHOUSE_CLIENT -nq " + create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2); + drop table n;" + $CLICKHOUSE_CLIENT -nq " drop table refreshes;" From 09f07838af87cdbf3d1c2a829009ce81f7ddd178 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 2 Feb 2024 00:46:35 +0000 Subject: [PATCH 0197/2361] Add 'ness' (as in 'APPEND-ness') to aspell-dict.txt --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index f1fcd19ea4a..b1e5c208b1c 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -2083,6 +2083,7 @@ namequota namespaces natively nats +ness nestjs netloc ngram From be04b4439c8d7cf9b313cc6e1d750820653d95ef Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 2 Feb 2024 05:49:14 +0000 Subject: [PATCH 0198/2361] Unbreak --- src/Storages/StorageMaterializedView.cpp | 12 +++++++++++- src/Storages/StorageMaterializedView.h | 1 + .../02932_refreshable_materialized_views.reference | 3 ++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 9f793e06c09..459fc300159 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -448,7 +448,11 @@ StorageID StorageMaterializedView::exchangeTargetTable(StorageID fresh_table, Co InterpreterRenameQuery(rename_query, refresh_context).execute(); - return fresh_table; + std::swap(stale_table_id.database_name, fresh_table.database_name); + std::swap(stale_table_id.table_name, fresh_table.table_name); + + setTargetTableId(std::move(fresh_table)); + return stale_table_id; } void StorageMaterializedView::dropTempTable(StorageID table_id, ContextMutablePtr refresh_context) @@ -733,6 +737,12 @@ StorageID StorageMaterializedView::getTargetTableId() const return id; } +void StorageMaterializedView::setTargetTableId(DB::StorageID id) +{ + std::lock_guard guard(target_table_id_mutex); + target_table_id = std::move(id); +} + void StorageMaterializedView::updateTargetTableId(std::optional database_name, std::optional table_name) { std::lock_guard guard(target_table_id_mutex); diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 70487c1fad3..a09ee07b3f6 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -128,6 +128,7 @@ private: StorageID exchangeTargetTable(StorageID fresh_table, ContextPtr refresh_context); void dropTempTable(StorageID table, ContextMutablePtr refresh_context); + void setTargetTableId(StorageID id); void updateTargetTableId(std::optional database_name, std::optional table_name); }; diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference index 8d4878f4e3f..3bc0d59a608 100644 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference @@ -39,7 +39,7 @@ CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nE <25: rename during refresh> f Running <27: cancelled> f Scheduled <28: drop during refresh> 0 0 -CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT 42 +CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT 42 AS x <29: randomize> 1 1 CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src <30: to existing table> 10 @@ -59,3 +59,4 @@ CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n <38: append chain> 100 <38: append chain> 100 <38: append chain> 200 +creating MergeTree without ORDER BY failed, as expected From 285936fe3fa2fd7bd850b2b10588ad7216938ed4 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 8 Feb 2024 02:55:03 +0000 Subject: [PATCH 0199/2361] Fix renaming --- src/Storages/StorageMaterializedView.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 459fc300159..3691fd5e212 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -578,7 +578,7 @@ void StorageMaterializedView::renameInMemory(const StorageID & new_table_id) ASTRenameQuery::Elements rename_elements; assert(inner_table_id.database_name == old_table_id.database_name); - rename->addElement(inner_table_id.database_name, inner_table_id.table_name, new_table_id.database_name, new_table_id.table_name); + rename->addElement(inner_table_id.database_name, inner_table_id.table_name, new_table_id.database_name, new_target_table_name); auto rename = std::make_shared(std::move(rename_elements)); InterpreterRenameQuery(rename, getContext()).execute(); From f1ea73624956e80d63ce59b98e0bbf19bfbacb08 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 8 Feb 2024 03:03:12 +0000 Subject: [PATCH 0200/2361] Fix missing column aliases in other tests --- .../queries/0_stateless/00982_low_cardinality_setting_in_mv.sql | 2 +- tests/queries/0_stateless/01160_table_dependencies.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql b/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql index 7192642bcde..e545dec90b7 100644 --- a/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql +++ b/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql @@ -4,7 +4,7 @@ DROP TABLE IF EXISTS mat_view; CREATE TABLE test1 (a LowCardinality(String)) ENGINE=MergeTree() ORDER BY a; CREATE TABLE test2 (a UInt64) engine=MergeTree() ORDER BY a; -CREATE MATERIALIZED VIEW test_mv TO test2 AS SELECT toUInt64(a = 'test') FROM test1; +CREATE MATERIALIZED VIEW test_mv TO test2 AS SELECT toUInt64(a = 'test') AS a FROM test1; DROP TABLE test_mv; DROP TABLE test1; diff --git a/tests/queries/0_stateless/01160_table_dependencies.sh b/tests/queries/0_stateless/01160_table_dependencies.sh index acb6522e9e2..b72acf62610 100755 --- a/tests/queries/0_stateless/01160_table_dependencies.sh +++ b/tests/queries/0_stateless/01160_table_dependencies.sh @@ -35,7 +35,7 @@ arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from s $CLICKHOUSE_CLIENT -q "select '====='" $CLICKHOUSE_CLIENT -q "alter table t add column x int default in(1, $CLICKHOUSE_DATABASE.s), drop column y" -$CLICKHOUSE_CLIENT -q "create materialized view mv to s as select n from t where n in (select n from join)" +$CLICKHOUSE_CLIENT -q "create materialized view mv to s as select n as x from t where n in (select n from join)" $CLICKHOUSE_CLIENT -q "select table, arraySort(dependencies_table), arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from system.tables where database=currentDatabase() order by table" From 7575115069b0a0072b0e4af7c849c3eabf55d7ea Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Sat, 10 Feb 2024 01:57:28 +0000 Subject: [PATCH 0201/2361] Fix race conditions in the test, more small fixes to other tests --- tests/queries/0_stateless/02345_implicit_transaction.sql | 2 +- .../0_stateless/02932_refreshable_materialized_views.sh | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02345_implicit_transaction.sql b/tests/queries/0_stateless/02345_implicit_transaction.sql index ee2e0a07c3e..9496de71e13 100644 --- a/tests/queries/0_stateless/02345_implicit_transaction.sql +++ b/tests/queries/0_stateless/02345_implicit_transaction.sql @@ -3,7 +3,7 @@ CREATE TABLE landing (n Int64) engine=MergeTree order by n; CREATE TABLE target (n Int64) engine=MergeTree order by n; CREATE MATERIALIZED VIEW landing_to_target TO target AS - SELECT n + throwIf(n == 3333) + SELECT n + throwIf(n == 3333) AS n FROM landing; INSERT INTO landing SELECT * FROM numbers(10000); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index 528a97535cc..2324041728e 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -298,8 +298,10 @@ $CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" $CLICKHOUSE_CLIENT -nq " select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes; - truncate table src; - insert into src values (1);" + create table src2 empty as src; + insert into src2 values (1) + exchange tables src and src2; + drop table src2;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] do sleep 0.1 @@ -336,6 +338,7 @@ $CLICKHOUSE_CLIENT -nq " # ALTER to non-APPEND $CLICKHOUSE_CLIENT -nq " alter table k modify refresh every 10 year; + system wait view k; system refresh view k; system wait view k; select '<36: not append>', * from k order by x; From 279d9c78ee30eafec6a3d2dc711d2c4e0496944a Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 19 Feb 2024 14:52:19 +0000 Subject: [PATCH 0202/2361] Fix race in wait --- src/Storages/MaterializedView/RefreshTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index e593023caee..fdcf358cc20 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -160,7 +160,7 @@ void RefreshTask::cancel() void RefreshTask::wait() { std::unique_lock lock(mutex); - refresh_cv.wait(lock, [&] { return info.state != RefreshState::Running; }); + refresh_cv.wait(lock, [&] { return info.state != RefreshState::Running && !refresh_immediately; }); if (info.last_refresh_result == LastRefreshResult::Error) throw Exception(ErrorCodes::REFRESH_FAILED, "Refresh failed: {}", info.exception_message); } From 78a3d0d4f71554c7f07e92f3996b56a28d5acbdf Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 21 Feb 2024 12:51:19 +0000 Subject: [PATCH 0203/2361] Fix parallel_mv perf test --- tests/performance/parallel_mv.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/performance/parallel_mv.xml b/tests/performance/parallel_mv.xml index 5b856740a19..0bf5ed1be09 100644 --- a/tests/performance/parallel_mv.xml +++ b/tests/performance/parallel_mv.xml @@ -11,13 +11,13 @@ create table mt_4 (n UInt64, s String) engine = MergeTree order by tuple() create materialized view mv_1 to mt_1 as - select number, toString(number) from main_table where number % 13 != 0 + select number as n, toString(number) as s from main_table where number % 13 != 0 create materialized view mv_2 to mt_2 as - select number, toString(number) from main_table where number % 13 != 1 + select number as n, toString(number) as s from main_table where number % 13 != 1 create materialized view mv_3 to mt_3 as - select number, toString(number) from main_table where number % 13 != 3 + select number as n, toString(number) as s from main_table where number % 13 != 3 create materialized view mv_4 to mt_4 as - select number, toString(number) from main_table where number % 13 != 4 + select number as n, toString(number) as s from main_table where number % 13 != 4 SYSTEM STOP MERGES main_table SYSTEM STOP MERGES mt_1 From ef5070c1e66e41686eef983272b9a434257c34bf Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Sat, 9 Mar 2024 02:12:53 +0000 Subject: [PATCH 0204/2361] Fix conflict --- src/Parsers/ASTSystemQuery.cpp | 1 + .../02932_refreshable_materialized_views.reference | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index a730ea0ba3d..a183eb4c0f6 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -375,6 +375,7 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s case Type::START_VIEW: case Type::STOP_VIEW: case Type::CANCEL_VIEW: + case Type::WAIT_VIEW: { settings.ostr << ' '; print_database_table(); diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference index 3bc0d59a608..3c6c8b2d778 100644 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference @@ -3,13 +3,13 @@ CREATE MATERIALIZED VIEW default.a\nREFRESH AFTER 2 SECOND\n(\n `x` UInt64\n) <2: refreshed> 3 1 1 <3: time difference at least> 1000 <4: next refresh in> 2 -<4.1: fake clock> Scheduled 2050-01-01 00:00:01 2050-01-01 00:00:02 +<4.1: fake clock> Scheduled 2050-01-01 00:00:01 2050-01-01 00:00:03 <4.5: altered> Scheduled Finished 2052-01-01 00:00:00 CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT x * 2 AS x\nFROM default.src <5: no refresh> 3 <6: refreshed> 2 <7: refreshed> Scheduled Finished 2054-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT x * 10 AS y\nFROM default.a +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a <7.5: created dependent> 2052-11-11 11:11:11 <8: refreshed> 20 <9: refreshed> a Scheduled Finished 2054-01-01 00:00:00 @@ -39,7 +39,7 @@ CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nE <25: rename during refresh> f Running <27: cancelled> f Scheduled <28: drop during refresh> 0 0 -CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nDEFINER = default SQL SECURITY DEFINER\nAS SELECT 42 AS x +CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 AS x <29: randomize> 1 1 CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src <30: to existing table> 10 From 5ce3aaf49ef947e4481e8d7934ee84190ef04542 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Sat, 23 Mar 2024 01:29:24 +0000 Subject: [PATCH 0205/2361] Fix conflicts --- src/Parsers/ParserRefreshStrategy.cpp | 2 +- src/Storages/StorageMaterializedView.cpp | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Parsers/ParserRefreshStrategy.cpp b/src/Parsers/ParserRefreshStrategy.cpp index de7eea93614..4f3b7c66558 100644 --- a/src/Parsers/ParserRefreshStrategy.cpp +++ b/src/Parsers/ParserRefreshStrategy.cpp @@ -97,7 +97,7 @@ bool ParserRefreshStrategy::parseImpl(Pos & pos, ASTPtr & node, Expected & expec refresh->set(refresh->settings, settings); } - if (ParserKeyword{"APPEND"}.ignore(pos, expected)) + if (ParserKeyword{Keyword::APPEND}.ignore(pos, expected)) refresh->append = true; node = refresh; diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 3691fd5e212..a78bb44eb66 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -575,12 +575,11 @@ void StorageMaterializedView::renameInMemory(const StorageID & new_table_id) { auto new_target_table_name = generateInnerTableName(new_table_id); - ASTRenameQuery::Elements rename_elements; assert(inner_table_id.database_name == old_table_id.database_name); + auto rename = std::make_shared(); rename->addElement(inner_table_id.database_name, inner_table_id.table_name, new_table_id.database_name, new_target_table_name); - auto rename = std::make_shared(std::move(rename_elements)); InterpreterRenameQuery(rename, getContext()).execute(); updateTargetTableId(new_table_id.database_name, new_target_table_name); } From 0549d154058169e9f4831923c2c2d2e8fdc295b5 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 26 Mar 2024 04:10:33 +0000 Subject: [PATCH 0206/2361] tidy --- src/Storages/MaterializedView/RefreshTask.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index 411eb6fa7b4..e8e3e45ace3 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -169,7 +169,7 @@ struct OwnedRefreshTask RefreshTaskHolder ptr; OwnedRefreshTask() = default; - OwnedRefreshTask(RefreshTaskHolder p) : ptr(std::move(p)) {} + explicit OwnedRefreshTask(RefreshTaskHolder p) : ptr(std::move(p)) {} OwnedRefreshTask(OwnedRefreshTask &&) = default; OwnedRefreshTask & operator=(OwnedRefreshTask &&) = default; @@ -177,7 +177,7 @@ struct OwnedRefreshTask RefreshTask* operator->() const { return ptr.get(); } RefreshTask& operator*() const { return *ptr; } - operator bool() const { return ptr != nullptr; } + explicit operator bool() const { return ptr != nullptr; } }; } From 90c2b29ea28afa99f4a2dd7919ffc3274cf04764 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 11 Jun 2024 01:44:32 +0000 Subject: [PATCH 0207/2361] Log retry information, remove outdated comment --- src/Storages/MaterializedView/RefreshTask.cpp | 24 +++++++++---------- src/Storages/MaterializedView/RefreshTask.h | 3 ++- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index fdcf358cc20..71feece13e6 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -1,7 +1,6 @@ #include -#include - +#include #include #include #include @@ -10,6 +9,7 @@ #include #include #include +#include namespace CurrentMetrics { @@ -312,13 +312,7 @@ void RefreshTask::refreshTask() catch (...) { if (!interrupt_execution.load()) - { - PreformattedMessage message = getCurrentExceptionMessageAndPattern(true); - auto text = message.text; - message.text = fmt::format("Refresh failed: {}", message.text); - LOG_ERROR(log, message); - exception = text; - } + exception = getCurrentExceptionMessage(true); } lock.lock(); @@ -332,7 +326,10 @@ void RefreshTask::refreshTask() { info.last_refresh_result = LastRefreshResult::Error; info.exception_message = *exception; - scheduleRetryOrSkipToNextRefresh(now); + + String retry_info = scheduleRetryOrSkipToNextRefresh(now); + + LOG_ERROR(log, "Refresh failed ({}): {}", retry_info, *exception); } else if (!refreshed) { @@ -445,7 +442,6 @@ void RefreshTask::executeRefreshUnlocked(bool append) throw; } - /// Drop the old table (outside the try-catch so we don't try to drop the other table if this fails). if (table_to_drop.has_value()) view->dropTempTable(table_to_drop.value(), refresh_context); } @@ -463,12 +459,13 @@ void RefreshTask::advanceNextRefreshTime(std::chrono::system_clock::time_point n info.next_refresh_time = UInt32(secs.time_since_epoch().count()); } -void RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now) +String RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now) { + Int64 attempt_number = num_retries + 1; if (refresh_settings.refresh_retries >= 0 && num_retries >= refresh_settings.refresh_retries) { advanceNextRefreshTime(now); - return; + return fmt::format("attempt {}/{}, next refresh at {:%Y.%m.%d %T}", attempt_number, refresh_settings.refresh_retries + 1, next_refresh_actual); } num_retries += 1; @@ -483,6 +480,7 @@ void RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::ti delay_ms = refresh_settings.refresh_retry_max_backoff_ms; next_refresh_actual = now + std::chrono::milliseconds(delay_ms); + return fmt::format("attempt {}/{}, retry in {} seconds", attempt_number, refresh_settings.refresh_retries, (delay_ms + 500) / 1000); } bool RefreshTask::arriveDependency(const StorageID & parent) diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index e8e3e45ace3..54253548795 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -151,7 +151,8 @@ private: void advanceNextRefreshTime(std::chrono::system_clock::time_point now); /// Either advances next_refresh_actual using exponential backoff or does advanceNextRefreshTime(). - void scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now); + /// Returns human-readable information for logging. + String scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now); /// Returns true if all dependencies are fulfilled now. Refills remaining_dependencies in this case. bool arriveDependency(const StorageID & parent); From f40b45b78cb5838af70df8eb9611370a14a7e7b9 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 11 Jun 2024 02:20:29 +0000 Subject: [PATCH 0208/2361] Avoid fmt/chrono.h because clang refuses to compile it --- src/Storages/MaterializedView/RefreshTask.cpp | 15 ++++++--------- src/Storages/MaterializedView/RefreshTask.h | 3 +-- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index 71feece13e6..781cefbf449 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -1,6 +1,5 @@ #include -#include #include #include #include @@ -326,10 +325,9 @@ void RefreshTask::refreshTask() { info.last_refresh_result = LastRefreshResult::Error; info.exception_message = *exception; - - String retry_info = scheduleRetryOrSkipToNextRefresh(now); - - LOG_ERROR(log, "Refresh failed ({}): {}", retry_info, *exception); + Int64 attempt_number = num_retries + 1; + scheduleRetryOrSkipToNextRefresh(now); + LOG_ERROR(log, "Refresh failed (attempt {}/{}): {}", attempt_number, refresh_settings.refresh_retries + 1, *exception); } else if (!refreshed) { @@ -459,13 +457,12 @@ void RefreshTask::advanceNextRefreshTime(std::chrono::system_clock::time_point n info.next_refresh_time = UInt32(secs.time_since_epoch().count()); } -String RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now) +void RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now) { - Int64 attempt_number = num_retries + 1; if (refresh_settings.refresh_retries >= 0 && num_retries >= refresh_settings.refresh_retries) { advanceNextRefreshTime(now); - return fmt::format("attempt {}/{}, next refresh at {:%Y.%m.%d %T}", attempt_number, refresh_settings.refresh_retries + 1, next_refresh_actual); + return; } num_retries += 1; @@ -480,7 +477,7 @@ String RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock:: delay_ms = refresh_settings.refresh_retry_max_backoff_ms; next_refresh_actual = now + std::chrono::milliseconds(delay_ms); - return fmt::format("attempt {}/{}, retry in {} seconds", attempt_number, refresh_settings.refresh_retries, (delay_ms + 500) / 1000); + return; } bool RefreshTask::arriveDependency(const StorageID & parent) diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index 54253548795..e8e3e45ace3 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -151,8 +151,7 @@ private: void advanceNextRefreshTime(std::chrono::system_clock::time_point now); /// Either advances next_refresh_actual using exponential backoff or does advanceNextRefreshTime(). - /// Returns human-readable information for logging. - String scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now); + void scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now); /// Returns true if all dependencies are fulfilled now. Refills remaining_dependencies in this case. bool arriveDependency(const StorageID & parent); From e1b9844086f7cd31b1d0bd2fa7de80d521325e5f Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 11 Jun 2024 05:29:44 +0000 Subject: [PATCH 0209/2361] Lint --- src/Storages/MaterializedView/RefreshTask.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index 781cefbf449..6cc85cc0f82 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -477,7 +477,6 @@ void RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::ti delay_ms = refresh_settings.refresh_retry_max_backoff_ms; next_refresh_actual = now + std::chrono::milliseconds(delay_ms); - return; } bool RefreshTask::arriveDependency(const StorageID & parent) From 2e8a7ecc57a103997c5c659960c91d28fab1aa86 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 11 Jun 2024 17:43:02 +0000 Subject: [PATCH 0210/2361] Remove unused variable --- src/Interpreters/InterpreterCreateQuery.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index ee57c376b31..6e5b500cd69 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -946,10 +946,6 @@ void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTC /// This is not strict validation, just catches common errors that would make the view not work. /// It's possible to circumvent these checks by ALTERing the view or target table after creation. - String table_engine; - if (create.storage && create.storage->engine) - table_engine = create.storage->engine->name; - NamesAndTypesList all_output_columns; bool check_columns = false; if (create.to_table_id) @@ -961,7 +957,6 @@ void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTC { all_output_columns = to_table->getInMemoryMetadataPtr()->getSampleBlock().getNamesAndTypesList(); check_columns = true; - table_engine = to_table->getName(); } } else if (!properties.columns_inferred_from_select_query) From f4c13a1d9ad82dbc4382729490e3226c81073c1c Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 11 Jun 2024 19:13:29 +0000 Subject: [PATCH 0211/2361] Add compatibility setting allow_materialized_view_with_bad_select --- src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.h | 1 + src/Interpreters/InterpreterCreateQuery.cpp | 44 +++++++++---------- .../test_replicated_database/test.py | 1 + .../02932_refreshable_materialized_views.sh | 8 ++++ 5 files changed, 33 insertions(+), 22 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index fbab72446a0..954e21dd988 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -635,6 +635,7 @@ class IColumn; M(Bool, update_insert_deduplication_token_in_dependent_materialized_views, false, "Should update insert deduplication token with table identifier during insert in dependent materialized views.", 0) \ M(Bool, materialized_views_ignore_errors, false, "Allows to ignore errors for MATERIALIZED VIEW, and deliver original block to the table regardless of MVs", 0) \ M(Bool, ignore_materialized_views_with_dropped_target_table, false, "Ignore MVs with dropped target table during pushing to views", 0) \ + M(Bool, allow_materialized_view_with_bad_select, false, "Allow CREATE MATERIALIZED VIEW with SELECT query that references nonexistent tables or columns, or outputs columns with unexpected names (which will be ignored). It must still be syntactically valid. Doesn't apply to refreshable MVs. Doesn't apply if the MV schema needs to be inferred from the SELECT query (i.e. if the CREATE has no column list and no TO table). Can be used for creating MV before its source table.", 0) \ M(Bool, allow_experimental_refreshable_materialized_view, false, "Allow refreshable materialized views (CREATE MATERIALIZED VIEW REFRESH ...).", 0) \ M(Bool, stop_refreshable_materialized_views_on_startup, false, "On server startup, prevent scheduling of refreshable materialized views, as if with SYSTEM STOP VIEWS. You can manually start them with SYSTEM START VIEWS or SYSTEM START VIEW afterwards. Also applies to newly created views. Has no effect on non-refreshable materialized views.", 0) \ M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index fbc414d4f2f..9d3832e451d 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -115,6 +115,7 @@ static const std::mapgetEngineName()); } + Block input_block; + bool relaxed = getContext()->getSettingsRef().allow_materialized_view_with_bad_select; + if (check_columns) { - Block input_block; - - if (getContext()->getSettingsRef().allow_experimental_analyzer) + try { - input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); + if (getContext()->getSettingsRef().allow_experimental_analyzer) + { + input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); + } + else + { + input_block = InterpreterSelectWithUnionQuery(create.select->clone(), + getContext(), + SelectQueryOptions().analyze()).getSampleBlock(); + } } - else + catch (Exception &) { - input_block = InterpreterSelectWithUnionQuery(create.select->clone(), - getContext(), - SelectQueryOptions().analyze()).getSampleBlock(); + if (!relaxed) + throw; + check_columns = false; } + } + if (check_columns) + { std::unordered_map output_types; for (const NameAndTypePair & nt : all_output_columns) output_types[nt.name] = nt.type; @@ -1001,21 +1014,8 @@ void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTC input_columns.push_back(input_column.cloneEmpty()); output_columns.push_back(ColumnWithTypeAndName(it->second->createColumn(), it->second, input_column.name)); } - else if (create.refresh_strategy) - { - /// Unrecognized columns produced by SELECT query are allowed for regular materialized - /// views, but not for refreshable ones. This is in part because it was easier to - /// implement, in part because refreshable views have less concern about ALTERing target - /// tables. - /// - /// The motivating scenario for allowing this in regular MV is ALTERing the table+query. - /// Suppose the user removes a column from target table, then a minute later - /// correspondingly updates the view's query to not produce that column. - /// If MV didn't allow unrecognized columns then during that minute all INSERTs into the - /// source table would fail - unacceptable. - /// For refreshable views, during that minute refreshes will fail - acceptable. + else if (!relaxed) throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "SELECT query outputs column with name '{}', which is not found in the target table. Use 'AS' to assign alias that matches a column name.", input_column.name); - } } if (input_columns.empty()) diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index f23384b5c04..4ecb1aefba7 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -708,6 +708,7 @@ def create_some_tables(db): "distributed_ddl_task_timeout": 0, "allow_experimental_object_type": 1, "allow_suspicious_codecs": 1, + "allow_materialized_view_with_bad_select": 1, } main_node.query(f"CREATE TABLE {db}.t1 (n int) ENGINE=Memory", settings=settings) dummy_node.query( diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index 2324041728e..580d1bdc190 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -369,5 +369,13 @@ $CLICKHOUSE_CLIENT -nq " create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2); drop table n;" +# Reading from table that doesn't exist yet. +$CLICKHOUSE_CLIENT -nq " + create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } + create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } + create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE } + create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1; + drop table o;" + $CLICKHOUSE_CLIENT -nq " drop table refreshes;" From b3b70b037ff1b7f317ac5fe0b5219e1308971581 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 12 Jun 2024 00:25:31 +0000 Subject: [PATCH 0212/2361] Revert some of the strictness, because of comment in 01019_materialized_view_select_extra_columns --- src/Core/Settings.h | 2 +- src/Interpreters/InterpreterCreateQuery.cpp | 18 +++++++++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 954e21dd988..93e60541352 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -635,7 +635,7 @@ class IColumn; M(Bool, update_insert_deduplication_token_in_dependent_materialized_views, false, "Should update insert deduplication token with table identifier during insert in dependent materialized views.", 0) \ M(Bool, materialized_views_ignore_errors, false, "Allows to ignore errors for MATERIALIZED VIEW, and deliver original block to the table regardless of MVs", 0) \ M(Bool, ignore_materialized_views_with_dropped_target_table, false, "Ignore MVs with dropped target table during pushing to views", 0) \ - M(Bool, allow_materialized_view_with_bad_select, false, "Allow CREATE MATERIALIZED VIEW with SELECT query that references nonexistent tables or columns, or outputs columns with unexpected names (which will be ignored). It must still be syntactically valid. Doesn't apply to refreshable MVs. Doesn't apply if the MV schema needs to be inferred from the SELECT query (i.e. if the CREATE has no column list and no TO table). Can be used for creating MV before its source table.", 0) \ + M(Bool, allow_materialized_view_with_bad_select, false, "Allow CREATE MATERIALIZED VIEW with SELECT query that references nonexistent tables or columns. It must still be syntactically valid. Doesn't apply to refreshable MVs. Doesn't apply if the MV schema needs to be inferred from the SELECT query (i.e. if the CREATE has no column list and no TO table). Can be used for creating MV before its source table.", 0) \ M(Bool, allow_experimental_refreshable_materialized_view, false, "Allow refreshable materialized views (CREATE MATERIALIZED VIEW REFRESH ...).", 0) \ M(Bool, stop_refreshable_materialized_views_on_startup, false, "On server startup, prevent scheduling of refreshable materialized views, as if with SYSTEM STOP VIEWS. You can manually start them with SYSTEM START VIEWS or SYSTEM START VIEW afterwards. Also applies to newly created views. Has no effect on non-refreshable materialized views.", 0) \ M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \ diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index f0c450f2ff3..1f6cf31e3d3 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -973,7 +973,6 @@ void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTC } Block input_block; - bool relaxed = getContext()->getSettingsRef().allow_materialized_view_with_bad_select; if (check_columns) { @@ -992,7 +991,7 @@ void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTC } catch (Exception &) { - if (!relaxed) + if (!getContext()->getSettingsRef().allow_materialized_view_with_bad_select) throw; check_columns = false; } @@ -1014,8 +1013,21 @@ void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTC input_columns.push_back(input_column.cloneEmpty()); output_columns.push_back(ColumnWithTypeAndName(it->second->createColumn(), it->second, input_column.name)); } - else if (!relaxed) + else if (create.refresh_strategy) + { + /// Unrecognized columns produced by SELECT query are allowed by regular materialized + /// views, but not by refreshable ones. This is in part because it was easier to + /// implement, in part because refreshable views have less concern about ALTERing target + /// tables. + /// + /// The motivating scenario for allowing this in regular MV is ALTERing the table+query. + /// Suppose the user removes a column from target table, then a minute later + /// correspondingly updates the view's query to not produce that column. + /// If MV didn't allow unrecognized columns then during that minute all INSERTs into the + /// source table would fail - unacceptable. + /// For refreshable views, during that minute refreshes will fail - acceptable. throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "SELECT query outputs column with name '{}', which is not found in the target table. Use 'AS' to assign alias that matches a column name.", input_column.name); + } } if (input_columns.empty()) From 9957fe1734260cecd383227abc9ab75283e95d9b Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 20 Jun 2024 18:38:12 +0000 Subject: [PATCH 0213/2361] Comment fixes --- docs/en/sql-reference/statements/create/view.md | 16 ++++++++-------- src/Interpreters/InterpreterCreateQuery.cpp | 3 ++- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 3e0766794f1..950d8207028 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -13,8 +13,8 @@ Creates a new view. Views can be [normal](#normal-view), [materialized](#materia Syntax: ``` sql -CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] -[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] +CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] +[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] AS SELECT ... ``` @@ -54,8 +54,8 @@ SELECT * FROM view(column1=value1, column2=value2 ...) ## Materialized View ``` sql -CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] -[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] +CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] +[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] AS SELECT ... ``` @@ -90,7 +90,7 @@ Given that `POPULATE` works like `CREATE TABLE ... AS SELECT ...` it has limitat - It is not supported with Replicated database - It is not supported in ClickHouse cloud -Instead a separate `INSERT ... SELECT` can be used. +Instead a separate `INSERT ... SELECT` can be used. ::: A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`. @@ -108,7 +108,7 @@ To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop `DEFINER` and `SQL SECURITY` allow you to specify which ClickHouse user to use when executing the view's underlying query. `SQL SECURITY` has three legal values: `DEFINER`, `INVOKER`, or `NONE`. You can specify any existing user or `CURRENT_USER` in the `DEFINER` clause. -The following table will explain which rights are required for which user in order to select from view. +The following table will explain which rights are required for which user in order to select from view. Note that regardless of the SQL security option, in every case it is still required to have `GRANT SELECT ON ` in order to read from it. | SQL security option | View | Materialized View | @@ -128,7 +128,7 @@ If `DEFINER`/`SQL SECURITY` aren't specified, the default values are used: If a view is attached without `DEFINER`/`SQL SECURITY` specified, the default value is `SQL SECURITY NONE` for the materialized view and `SQL SECURITY INVOKER` for the normal view. -To change SQL security for an existing view, use +To change SQL security for an existing view, use ```sql ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }] ``` @@ -178,7 +178,7 @@ Differences from regular non-refreshable materialized views: * No restrictions on the SELECT query. Table functions (e.g. `url()`), views, UNION, JOIN, are all allowed. :::note -The settings in the The settings in `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query. +The settings in the `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query. ::: :::note diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 1f6cf31e3d3..186df8f2108 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -944,7 +944,8 @@ void validateVirtualColumns(const IStorage & storage) void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTCreateQuery & create, const TableProperties & properties, const DatabasePtr & database) { /// This is not strict validation, just catches common errors that would make the view not work. - /// It's possible to circumvent these checks by ALTERing the view or target table after creation. + /// It's possible to circumvent these checks by ALTERing the view or target table after creation; + /// we should probably do some of these checks on ALTER as well. NamesAndTypesList all_output_columns; bool check_columns = false; From f72b0335cdc1d76b5aeb6d130900a68a8f4c8e3b Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Sat, 22 Jun 2024 02:27:06 +0000 Subject: [PATCH 0214/2361] Test workaround --- .../queries/0_stateless/02932_refreshable_materialized_views.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index 580d1bdc190..d5b07287936 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -298,7 +298,7 @@ $CLICKHOUSE_CLIENT -nq " $CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" $CLICKHOUSE_CLIENT -nq " select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes; - create table src2 empty as src; + create table src2 (x Int8) engine Memory; insert into src2 values (1) exchange tables src and src2; drop table src2;" From b0ac0327d4d38a918e9be8fc499038cc43cafb2c Mon Sep 17 00:00:00 2001 From: serxa Date: Tue, 25 Jun 2024 11:48:29 +0000 Subject: [PATCH 0215/2361] Fix bug in EventRateMeter It was relying on ExponentiallySmoothedCounter::get() which is designed for specific 1 second time interval between points. Now sum of weights is computed separatly in `duration` field, giving very accurate measurements independent of interval. --- src/Common/EventRateMeter.h | 52 ++++++---------- src/Common/ProgressIndication.h | 2 +- src/Common/tests/gtest_event_rate_meter.cpp | 68 +++++++++++++++++++++ 3 files changed, 86 insertions(+), 36 deletions(-) create mode 100644 src/Common/tests/gtest_event_rate_meter.cpp diff --git a/src/Common/EventRateMeter.h b/src/Common/EventRateMeter.h index 4c38d1d9371..b8a9112428f 100644 --- a/src/Common/EventRateMeter.h +++ b/src/Common/EventRateMeter.h @@ -4,8 +4,6 @@ #include -#include - namespace DB { @@ -14,10 +12,10 @@ namespace DB class EventRateMeter { public: - explicit EventRateMeter(double now, double period_, double step_ = 0.0) + explicit EventRateMeter(double now, double period_, size_t heating_ = 0) : period(period_) - , step(step_) - , half_decay_time(period * std::numbers::ln2) // for `ExponentiallySmoothedAverage::sumWeights()` to be equal to `1/period` + , max_interval(period * 10) + , heating(heating_) { reset(now); } @@ -30,25 +28,11 @@ public: { // Remove data for initial heating stage that can present at the beginning of a query. // Otherwise it leads to wrong gradual increase of average value, turning algorithm into not very reactive. - if (count != 0.0 && ++data_points < 5) - { - start = events.time; - events = ExponentiallySmoothedAverage(); - } + if (count != 0.0 && data_points++ <= heating) + reset(events.time, data_points); - if (now - period <= start) // precise counting mode - events = ExponentiallySmoothedAverage(events.value + count, now); - else // exponential smoothing mode - { - // Adding events too often lead to low precision due to smoothing too often, so we buffer new events and add them in steps - step_count += count; - if (step_start + step <= now) - { - events.add(step_count, now, half_decay_time); - step_start = now; - step_count = 0; - } - } + duration.add(std::min(max_interval, now - duration.time), now, period); + events.add(count, now, period); } /// Compute average event rate throughout `[now - period, now]` period. @@ -59,29 +43,27 @@ public: add(now, 0); if (unlikely(now <= start)) return 0; - if (now - period <= start) // precise counting mode - return events.value / (now - start); - else // exponential smoothing mode - return events.get(half_decay_time); // equals to `events.value / period` + + // We do not use .get() because sum of weights will anyway be canceled out (optimization) + return events.value / duration.value; } - void reset(double now) + void reset(double now, size_t data_points_ = 0) { start = now; - step_start = now; events = ExponentiallySmoothedAverage(); - data_points = 0; + duration = ExponentiallySmoothedAverage(); + data_points = data_points_; } private: const double period; - const double step; // duration of a step - const double half_decay_time; + const double max_interval; + const size_t heating; double start; // Instant in past without events before it; when measurement started or reset - ExponentiallySmoothedAverage events; // Estimated number of events in the last `period` + ExponentiallySmoothedAverage duration; // Current duration of a period + ExponentiallySmoothedAverage events; // Estimated number of events in last `duration` seconds size_t data_points = 0; - double step_start; // start instant of the last step - double step_count = 0.0; // number of events accumulated since step start }; } diff --git a/src/Common/ProgressIndication.h b/src/Common/ProgressIndication.h index a9965785889..d925077a072 100644 --- a/src/Common/ProgressIndication.h +++ b/src/Common/ProgressIndication.h @@ -91,7 +91,7 @@ private: bool write_progress_on_update = false; - EventRateMeter cpu_usage_meter{static_cast(clock_gettime_ns()), 2'000'000'000 /*ns*/}; // average cpu utilization last 2 second + EventRateMeter cpu_usage_meter{static_cast(clock_gettime_ns()), 2'000'000'000 /*ns*/, 4}; // average cpu utilization last 2 second, skip first 4 points HostToTimesMap hosts_data; /// In case of all of the above: /// - clickhouse-local diff --git a/src/Common/tests/gtest_event_rate_meter.cpp b/src/Common/tests/gtest_event_rate_meter.cpp new file mode 100644 index 00000000000..91ceec5eef7 --- /dev/null +++ b/src/Common/tests/gtest_event_rate_meter.cpp @@ -0,0 +1,68 @@ +#include + +#include + +#include + + +TEST(EventRateMeter, ExponentiallySmoothedAverage) +{ + double target = 100.0; + + // The test is only correct for timestep of 1 second because of + // how sum of weights is implemented inside `ExponentiallySmoothedAverage` + double time_step = 1.0; + + for (double half_decay_time : { 0.1, 1.0, 10.0, 100.0}) + { + DB::ExponentiallySmoothedAverage esa; + + int steps = static_cast(half_decay_time * 30 / time_step); + for (int i = 1; i <= steps; ++i) + esa.add(target * time_step, i * time_step, half_decay_time); + double measured = esa.get(half_decay_time); + ASSERT_LE(std::fabs(measured - target), 1e-5 * target); + } +} + +TEST(EventRateMeter, ConstantRate) +{ + double target = 100.0; + + for (double period : {0.1, 1.0, 10.0}) + { + for (double time_step : {0.001, 0.01, 0.1, 1.0}) + { + DB::EventRateMeter erm(0.0, period); + + int steps = static_cast(period * 30 / time_step); + for (int i = 1; i <= steps; ++i) + erm.add(i * time_step, target * time_step); + double measured = erm.rate(steps * time_step); + // std::cout << "T=" << period << " dt=" << time_step << " measured=" << measured << std::endl; + ASSERT_LE(std::fabs(measured - target), 1e-5 * target); + } + } +} + +TEST(EventRateMeter, PreciseStart) +{ + double target = 100.0; + + for (double period : {0.1, 1.0, 10.0}) + { + for (double time_step : {0.001, 0.01, 0.1, 1.0}) + { + DB::EventRateMeter erm(0.0, period); + + int steps = static_cast(period / time_step); + for (int i = 1; i <= steps; ++i) + { + erm.add(i * time_step, target * time_step); + double measured = erm.rate(i * time_step); + // std::cout << "T=" << period << " dt=" << time_step << " measured=" << measured << std::endl; + ASSERT_LE(std::fabs(measured - target), 1e-5 * target); + } + } + } +} From e29c8f9aeefb188efc2052d08f123e45b5b3309b Mon Sep 17 00:00:00 2001 From: AntiTopQuark Date: Mon, 24 Jun 2024 23:39:47 +0800 Subject: [PATCH 0216/2361] support MinMax hyperrectangle for MergeTreeIndexSet --- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 67 ++++++++++++++++++-- src/Storages/MergeTree/MergeTreeIndexSet.h | 11 +++- 2 files changed, 72 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index b11cbf1e034..14ab266ded6 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -44,10 +44,12 @@ MergeTreeIndexGranuleSet::MergeTreeIndexGranuleSet( const String & index_name_, const Block & index_sample_block_, size_t max_rows_, - MutableColumns && mutable_columns_) + MutableColumns && mutable_columns_, + std::vector && set_hyperrectangle_) : index_name(index_name_) , max_rows(max_rows_) , block(index_sample_block_.cloneWithColumns(std::move(mutable_columns_))) + , set_hyperrectangle(std::move(set_hyperrectangle_)) { } @@ -85,6 +87,15 @@ void MergeTreeIndexGranuleSet::serializeBinary(WriteBuffer & ostr) const serialization->serializeBinaryBulkWithMultipleStreams(column, 0, size(), settings, state); serialization->serializeBinaryBulkStateSuffix(settings, state); } + + for (size_t i = 0; i < num_columns; ++i) + { + const DataTypePtr & type = block.getByPosition(i).type; + auto serialization = type->getDefaultSerialization(); + + serialization->serializeBinary(set_hyperrectangle[i].left, ostr, {}); + serialization->serializeBinary(set_hyperrectangle[i].right, ostr, {}); + } } void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) @@ -117,6 +128,25 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeInd serialization->deserializeBinaryBulkStatePrefix(settings, state, nullptr); serialization->deserializeBinaryBulkWithMultipleStreams(elem.column, rows_to_read, settings, state, nullptr); } + + set_hyperrectangle.clear(); + Field min_val; + Field max_val; + for (size_t i = 0; i < num_columns; ++i) + { + const DataTypePtr & type = block.getByPosition(i).type; + auto serialization = type->getDefaultSerialization(); + + serialization->deserializeBinary(min_val, istr, {}); + serialization->deserializeBinary(max_val, istr, {}); + + // NULL_LAST + if (min_val.isNull()) + min_val = POSITIVE_INFINITY; + if (max_val.isNull()) + max_val = POSITIVE_INFINITY; + set_hyperrectangle.emplace_back(min_val, true, max_val, true); + } } @@ -182,10 +212,29 @@ void MergeTreeIndexAggregatorSet::update(const Block & block, size_t * pos, size if (has_new_data) { + FieldRef field_min; + FieldRef field_max; for (size_t i = 0; i < columns.size(); ++i) { auto filtered_column = block.getByName(index_columns[i]).column->filter(filter, block.rows()); columns[i]->insertRangeFrom(*filtered_column, 0, filtered_column->size()); + + if (const auto * column_nullable = typeid_cast(filtered_column.get())) + column_nullable->getExtremesNullLast(field_min, field_max); + else + filtered_column->getExtremes(field_min, field_max); + + if (set_hyperrectangle.size() <= i) + { + set_hyperrectangle.emplace_back(field_min, true, field_max, true); + } + else + { + set_hyperrectangle[i].left + = applyVisitor(FieldVisitorAccurateLess(), set_hyperrectangle[i].left, field_min) ? set_hyperrectangle[i].left : field_min; + set_hyperrectangle[i].right + = applyVisitor(FieldVisitorAccurateLess(), set_hyperrectangle[i].right, field_max) ? field_max : set_hyperrectangle[i].right; + } } } @@ -221,7 +270,7 @@ bool MergeTreeIndexAggregatorSet::buildFilter( MergeTreeIndexGranulePtr MergeTreeIndexAggregatorSet::getGranuleAndReset() { - auto granule = std::make_shared(index_name, index_sample_block, max_rows, std::move(columns)); + auto granule = std::make_shared(index_name, index_sample_block, max_rows, std::move(columns), std::move(set_hyperrectangle)); switch (data.type) { @@ -240,15 +289,22 @@ MergeTreeIndexGranulePtr MergeTreeIndexAggregatorSet::getGranuleAndReset() return granule; } +KeyCondition buildCondition(const IndexDescription & index, const ActionsDAGPtr & filter_actions_dag, ContextPtr context) +{ + return KeyCondition{filter_actions_dag, context, index.column_names, index.expression}; +} MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( const String & index_name_, const Block & index_sample_block, size_t max_rows_, const ActionsDAGPtr & filter_dag, - ContextPtr context) + ContextPtr context, + const IndexDescription & index_description) : index_name(index_name_) , max_rows(max_rows_) + , index_data_types(index_description.data_types) + , condition(buildCondition(index_description, filter_dag, context)) { for (const auto & name : index_sample_block.getNames()) if (!key_columns.contains(name)) @@ -292,6 +348,9 @@ bool MergeTreeIndexConditionSet::mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx size_t size = granule.size(); if (size == 0 || (max_rows != 0 && size > max_rows)) return true; + + if (!condition.checkInHyperrectangle(granule.set_hyperrectangle, index_data_types).can_be_true) + return false; Block result = granule.block; actions->execute(result); @@ -546,7 +605,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexSet::createIndexAggregator(const Merge MergeTreeIndexConditionPtr MergeTreeIndexSet::createIndexCondition( const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const { - return std::make_shared(index.name, index.sample_block, max_rows, filter_actions_dag, context); + return std::make_shared(index.name, index.sample_block, max_rows, filter_actions_dag, context, index); } MergeTreeIndexPtr setIndexCreator(const IndexDescription & index) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.h b/src/Storages/MergeTree/MergeTreeIndexSet.h index 6efc2effafd..4fe79cb03c5 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.h +++ b/src/Storages/MergeTree/MergeTreeIndexSet.h @@ -22,7 +22,8 @@ struct MergeTreeIndexGranuleSet final : public IMergeTreeIndexGranule const String & index_name_, const Block & index_sample_block_, size_t max_rows_, - MutableColumns && columns_); + MutableColumns && columns_, + std::vector && set_hyperrectangle_); void serializeBinary(WriteBuffer & ostr) const override; void deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) override; @@ -36,6 +37,7 @@ struct MergeTreeIndexGranuleSet final : public IMergeTreeIndexGranule const size_t max_rows; Block block; + std::vector set_hyperrectangle; }; @@ -73,6 +75,7 @@ private: ClearableSetVariants data; Sizes key_sizes; MutableColumns columns; + std::vector set_hyperrectangle; }; @@ -84,7 +87,8 @@ public: const Block & index_sample_block, size_t max_rows_, const ActionsDAGPtr & filter_dag, - ContextPtr context); + ContextPtr context, + const IndexDescription & index_description); bool alwaysUnknownOrTrue() const override; @@ -119,6 +123,9 @@ private: std::unordered_set key_columns; ExpressionActionsPtr actions; String actions_output_column_name; + + DataTypes index_data_types; + KeyCondition condition; }; From 5d9d29e37086e19dba852097e7820aff83072a8d Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 25 Jun 2024 15:00:07 +0000 Subject: [PATCH 0217/2361] Fixing some crashes --- src/Interpreters/ActionsDAG.cpp | 15 ++++++++----- src/Interpreters/ActionsDAG.h | 5 +++-- src/Interpreters/ExpressionActions.cpp | 8 ++++--- src/Interpreters/ExpressionAnalyzer.cpp | 4 ++-- src/Interpreters/InterpreterSelectQuery.cpp | 14 ++++++------ src/Interpreters/MutationsInterpreter.cpp | 4 ++-- src/Planner/Planner.cpp | 18 ++++++++------- src/Planner/PlannerJoinTree.cpp | 22 +++++++++---------- src/Planner/PlannerJoins.cpp | 4 ++-- src/Planner/PlannerWindowFunctions.cpp | 4 +++- src/Processors/QueryPlan/ExpressionStep.cpp | 8 +++---- src/Processors/QueryPlan/FilterStep.cpp | 6 ++--- .../Optimizations/distinctReadInOrder.cpp | 4 ++-- .../Optimizations/filterPushDown.cpp | 4 ++-- .../QueryPlan/Optimizations/liftUpUnion.cpp | 2 +- .../Optimizations/optimizePrewhere.cpp | 2 +- .../optimizePrimaryKeyCondition.cpp | 6 ++--- .../Optimizations/optimizeReadInOrder.cpp | 8 +++---- .../optimizeUseAggregateProjection.cpp | 2 +- .../Optimizations/projectionsCommon.cpp | 6 ++--- .../Optimizations/removeRedundantDistinct.cpp | 4 ++-- .../QueryPlan/ReadFromMergeTree.cpp | 16 +++++++------- .../QueryPlan/SourceStepWithFilter.cpp | 8 +++---- src/Processors/QueryPlan/TotalsHavingStep.cpp | 8 +++---- .../Transforms/FillingTransform.cpp | 2 +- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 4 ++-- .../MergeTree/MergeTreeSelectProcessor.cpp | 4 ++-- src/Storages/SelectQueryInfo.h | 4 ++-- src/Storages/StorageBuffer.cpp | 8 +++---- src/Storages/StorageMerge.cpp | 6 ++--- src/Storages/VirtualColumnUtils.cpp | 4 ++-- 31 files changed, 112 insertions(+), 102 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 23e1e5ee152..c2626285235 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1246,17 +1246,20 @@ bool ActionsDAG::removeUnusedResult(const std::string & column_name) return true; } -ActionsDAGPtr ActionsDAG::clone() const +ActionsDAGPtr ActionsDAG::clone(const ActionsDAG * from) { std::unordered_map old_to_new_nodes; - return clone(old_to_new_nodes); + return ActionsDAG::clone(from, old_to_new_nodes); } -ActionsDAGPtr ActionsDAG::clone(std::unordered_map & old_to_new_nodes) const +ActionsDAGPtr ActionsDAG::clone(const ActionsDAG * from, std::unordered_map & old_to_new_nodes) { + if (!from) + return nullptr; + auto actions = std::make_unique(); - for (const auto & node : nodes) + for (const auto & node : from->nodes) { auto & copy_node = actions->nodes.emplace_back(node); old_to_new_nodes[&node] = ©_node; @@ -1266,10 +1269,10 @@ ActionsDAGPtr ActionsDAG::clone(std::unordered_map & old_t for (auto & child : node.children) child = old_to_new_nodes[child]; - for (const auto & output_node : outputs) + for (const auto & output_node : from->outputs) actions->outputs.push_back(old_to_new_nodes[output_node]); - for (const auto & input_node : inputs) + for (const auto & input_node : from->inputs) actions->inputs.push_back(old_to_new_nodes[input_node]); return actions; diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 4a840885b6a..7ca3d1c1b0d 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -261,8 +261,9 @@ public: void compileExpressions(size_t min_count_to_compile_expression, const std::unordered_set & lazy_executed_nodes = {}); #endif - ActionsDAGPtr clone() const; - ActionsDAGPtr clone(std::unordered_map & old_to_new_nodes) const; + static ActionsDAGPtr clone(const ActionsDAGPtr & from) { return clone(from.get()); } + static ActionsDAGPtr clone(const ActionsDAG * from); + static ActionsDAGPtr clone(const ActionsDAG * from, std::unordered_map & old_to_new_nodes); static ActionsDAGPtr cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases); diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 7cbf5afd763..2eca31fc75e 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -53,7 +53,7 @@ ExpressionActions::ExpressionActions(ActionsDAGPtr actions_dag_, const Expressio : project_inputs(project_inputs_) , settings(settings_) { - actions_dag = actions_dag_->clone(); + actions_dag = ActionsDAG::clone(actions_dag_); /// It's important to determine lazy executed nodes before compiling expressions. std::unordered_set lazy_executed_nodes = processShortCircuitFunctions(*actions_dag, settings.short_circuit_function_evaluation); @@ -76,15 +76,17 @@ ExpressionActionsPtr ExpressionActions::clone() const auto copy = std::make_shared(ExpressionActions()); std::unordered_map copy_map; - copy->actions_dag = actions_dag->clone(copy_map); + copy->actions_dag = ActionsDAG::clone(actions_dag.get(), copy_map); copy->actions = actions; for (auto & action : copy->actions) action.node = copy_map[action.node]; + for (const auto * input : copy->actions_dag->getInputs()) + copy->input_positions.emplace(input->result_name, input_positions.at(input->result_name)); + copy->num_columns = num_columns; copy->required_columns = required_columns; - copy->input_positions = input_positions; copy->result_positions = result_positions; copy->sample_block = sample_block; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index be00e37c751..6b49365b492 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1944,7 +1944,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( Block before_prewhere_sample = source_header; if (sanitizeBlock(before_prewhere_sample)) { - auto dag = prewhere_dag_and_flags->dag.clone(); + auto dag = ActionsDAG::clone(&prewhere_dag_and_flags->dag); ExpressionActions( std::move(dag), ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); @@ -1980,7 +1980,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (sanitizeBlock(before_where_sample)) { ExpressionActions( - before_where->dag.clone(), + ActionsDAG::clone(&before_where->dag), ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_where_sample); auto & column_elem diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 64a17a7ba87..71eb7dc64f8 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1300,7 +1300,7 @@ static InterpolateDescriptionPtr getInterpolateDescription( ActionsDAGPtr actions = analyzer.getActionsDAG(true); ActionsDAGPtr conv_dag = ActionsDAG::makeConvertingActions(actions->getResultColumns(), result_columns, ActionsDAG::MatchColumnsMode::Position, true); - ActionsDAGPtr merge_dag = ActionsDAG::merge(std::move(*actions->clone()), std::move(*conv_dag)); + ActionsDAGPtr merge_dag = ActionsDAG::merge(std::move(* ActionsDAG::clone(actions)), std::move(*conv_dag)); interpolate_descr = std::make_shared(std::move(merge_dag), aliases); } @@ -2042,7 +2042,7 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, - std::make_shared(prewhere_info.row_level_filter->clone()), + std::make_shared(ActionsDAG::clone(prewhere_info.row_level_filter)), prewhere_info.row_level_column_name, true); }); } @@ -2050,7 +2050,7 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, std::make_shared(prewhere_info.prewhere_actions->clone()), + header, std::make_shared(ActionsDAG::clone(prewhere_info.prewhere_actions)), prewhere_info.prewhere_column_name, prewhere_info.remove_prewhere_column); }); } @@ -2578,7 +2578,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression, bool remove_filter) { - auto dag = expression->dag.clone(); + auto dag = ActionsDAG::clone(&expression->dag); if (expression->project_input) dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); @@ -2752,7 +2752,7 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool void InterpreterSelectQuery::executeHaving(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression, bool remove_filter) { - auto dag = expression->dag.clone(); + auto dag = ActionsDAG::clone(&expression->dag); if (expression->project_input) dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); @@ -2770,7 +2770,7 @@ void InterpreterSelectQuery::executeTotalsAndHaving( ActionsDAGPtr dag; if (expression) { - dag = expression->dag.clone(); + dag = ActionsDAG::clone(&expression->dag); if (expression->project_input) dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); } @@ -2819,7 +2819,7 @@ void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const Act if (!expression) return; - auto dag = expression->dag.clone(); + auto dag = ActionsDAG::clone(&expression->dag); if (expression->project_input) dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 1bb770bf561..704c5ce7d8b 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1278,7 +1278,7 @@ QueryPipelineBuilder MutationsInterpreter::addStreamsForLaterStages(const std::v if (i < stage.filter_column_names.size()) { - auto dag = step->actions()->dag.clone(); + auto dag = ActionsDAG::clone(&step->actions()->dag); if (step->actions()->project_input) dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); /// Execute DELETEs. @@ -1286,7 +1286,7 @@ QueryPipelineBuilder MutationsInterpreter::addStreamsForLaterStages(const std::v } else { - auto dag = step->actions()->dag.clone(); + auto dag = ActionsDAG::clone(&step->actions()->dag); if (step->actions()->project_input) dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); /// Execute UPDATE or final projection. diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 681ae7e6ac4..dddb7531519 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -333,12 +333,12 @@ void addExpressionStep(QueryPlan & query_plan, const std::string & step_description, std::vector & result_actions_to_execute) { - auto actions = expression_actions->dag.clone(); + auto actions = ActionsDAG::clone(&expression_actions->dag); if (expression_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - result_actions_to_execute.push_back(actions.get()); auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), actions); + result_actions_to_execute.push_back(expression_step->getExpression().get()); expression_step->setStepDescription(step_description); query_plan.addStep(std::move(expression_step)); } @@ -348,15 +348,15 @@ void addFilterStep(QueryPlan & query_plan, const std::string & step_description, std::vector & result_actions_to_execute) { - auto actions = filter_analysis_result.filter_actions->dag.clone(); + auto actions = ActionsDAG::clone(&filter_analysis_result.filter_actions->dag); if (filter_analysis_result.filter_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - result_actions_to_execute.push_back(actions.get()); auto where_step = std::make_unique(query_plan.getCurrentDataStream(), actions, filter_analysis_result.filter_column_name, filter_analysis_result.remove_filter_column); + result_actions_to_execute.push_back(where_step->getExpression().get()); where_step->setStepDescription(step_description); query_plan.addStep(std::move(where_step)); } @@ -556,11 +556,9 @@ void addTotalsHavingStep(QueryPlan & query_plan, ActionsDAGPtr actions; if (having_analysis_result.filter_actions) { - actions = having_analysis_result.filter_actions->dag.clone(); + actions = ActionsDAG::clone(&having_analysis_result.filter_actions->dag); if (having_analysis_result.filter_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - - result_actions_to_execute.push_back(actions.get()); } auto totals_having_step = std::make_unique( @@ -573,6 +571,10 @@ void addTotalsHavingStep(QueryPlan & query_plan, settings.totals_mode, settings.totals_auto_threshold, need_finalize); + + if (having_analysis_result.filter_actions) + result_actions_to_execute.push_back(totals_having_step->getActions().get()); + query_plan.addStep(std::move(totals_having_step)); } @@ -1449,7 +1451,7 @@ void Planner::buildPlanForQueryNode() if (it != table_filters.end()) { const auto & filters = it->second; - table_expression_data.setFilterActions(filters.filter_actions->clone()); + table_expression_data.setFilterActions(ActionsDAG::clone(filters.filter_actions)); table_expression_data.setPrewhereInfo(filters.prewhere_info); } } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 918cfad703e..16b5e363bfd 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -646,7 +646,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto table_expression_query_info = select_query_info; table_expression_query_info.table_expression = table_expression; - table_expression_query_info.filter_actions_dag = table_expression_data.getFilterActions()->clone(); + table_expression_query_info.filter_actions_dag = ActionsDAG::clone(table_expression_data.getFilterActions()); table_expression_query_info.analyzer_can_use_parallel_replicas_on_follower = table_node == planner_context->getGlobalPlannerContext()->parallel_replicas_table; size_t max_streams = settings.max_threads; @@ -776,7 +776,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (prewhere_actions) { prewhere_info = std::make_shared(); - prewhere_info->prewhere_actions = prewhere_actions->clone(); + prewhere_info->prewhere_actions = ActionsDAG::clone(prewhere_actions); prewhere_info->prewhere_column_name = prewhere_actions->getOutputs().at(0)->result_name; prewhere_info->remove_prewhere_column = true; prewhere_info->need_filter = true; @@ -831,7 +831,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto row_policy_filter_info = buildRowPolicyFilterIfNeeded(storage, table_expression_query_info, planner_context, used_row_policies); if (row_policy_filter_info.actions) - table_expression_data.setRowLevelFilterActions(row_policy_filter_info.actions->clone()); + table_expression_data.setRowLevelFilterActions(ActionsDAG::clone(row_policy_filter_info.actions)); add_filter(row_policy_filter_info, "Row-level security filter"); if (query_context->getParallelReplicasMode() == Context::ParallelReplicasMode::CUSTOM_KEY) @@ -1178,17 +1178,16 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ join_table_expression, planner_context); - left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.left_join_expressions_actions.get()); - left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.right_join_expressions_actions.get()); - join_clauses_and_actions.left_join_expressions_actions->appendInputsForUnusedColumns(left_plan.getCurrentDataStream().header); auto left_join_expressions_actions_step = std::make_unique(left_plan.getCurrentDataStream(), join_clauses_and_actions.left_join_expressions_actions); left_join_expressions_actions_step->setStepDescription("JOIN actions"); + left_join_tree_query_plan.actions_dags.emplace_back(left_join_expressions_actions_step->getExpression().get()); left_plan.addStep(std::move(left_join_expressions_actions_step)); join_clauses_and_actions.right_join_expressions_actions->appendInputsForUnusedColumns(right_plan.getCurrentDataStream().header); auto right_join_expressions_actions_step = std::make_unique(right_plan.getCurrentDataStream(), join_clauses_and_actions.right_join_expressions_actions); right_join_expressions_actions_step->setStepDescription("JOIN actions"); + right_join_tree_query_plan.actions_dags.emplace_back(right_join_expressions_actions_step->getExpression().get()); right_plan.addStep(std::move(right_join_expressions_actions_step)); } @@ -1434,7 +1433,8 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ auto result_plan = QueryPlan(); - if (join_algorithm->isFilled()) + bool is_filled_join = join_algorithm->isFilled(); + if (is_filled_join) { auto filled_join_step = std::make_unique( left_plan.getCurrentDataStream(), @@ -1586,8 +1586,9 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ left_join_tree_query_plan.used_row_policies.insert(right_join_tree_query_plan_row_policy); /// Collect all required actions dags in `left_join_tree_query_plan.actions_dags` - for (const auto * action_dag : right_join_tree_query_plan.actions_dags) - left_join_tree_query_plan.actions_dags.emplace_back(action_dag); + if (!is_filled_join) + for (const auto * action_dag : right_join_tree_query_plan.actions_dags) + left_join_tree_query_plan.actions_dags.emplace_back(action_dag); // if (join_clauses_and_actions.left_join_expressions_actions) // left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.left_join_expressions_actions.get()); // if (join_clauses_and_actions.right_join_expressions_actions) @@ -1646,10 +1647,9 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ array_join_action_dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); - join_tree_query_plan.actions_dags.push_back(array_join_action_dag.get()); - auto array_join_actions = std::make_unique(plan.getCurrentDataStream(), std::move(array_join_action_dag)); array_join_actions->setStepDescription("ARRAY JOIN actions"); + join_tree_query_plan.actions_dags.push_back(array_join_actions->getExpression().get()); plan.addStep(std::move(array_join_actions)); auto drop_unused_columns_before_array_join_actions_dag = std::make_unique(plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); diff --git a/src/Planner/PlannerJoins.cpp b/src/Planner/PlannerJoins.cpp index 45842c0d705..23b6a805ab9 100644 --- a/src/Planner/PlannerJoins.cpp +++ b/src/Planner/PlannerJoins.cpp @@ -588,10 +588,10 @@ JoinClausesAndActions buildJoinClausesAndActions( } } - result.left_join_expressions_actions = left_join_actions->clone(); + result.left_join_expressions_actions = ActionsDAG::clone(left_join_actions); result.left_join_tmp_expression_actions = std::move(left_join_actions); result.left_join_expressions_actions->removeUnusedActions(join_left_actions_names); - result.right_join_expressions_actions = right_join_actions->clone(); + result.right_join_expressions_actions = ActionsDAG::clone(right_join_actions); result.right_join_tmp_expression_actions = std::move(right_join_actions); result.right_join_expressions_actions->removeUnusedActions(join_right_actions_names); diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp index 9deceeef9a3..b9e11578dbc 100644 --- a/src/Planner/PlannerWindowFunctions.cpp +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -157,7 +157,9 @@ std::vector sortWindowDescriptions(const std::vector }; std::vector perm(window_descriptions.size()); - std::iota(perm.begin(), perm.end(), 0U); + for (size_t i = 0; i < perm.size(); ++i) + perm[i] = i; + ::sort(perm.begin(), perm.end(), comparator); return perm; diff --git a/src/Processors/QueryPlan/ExpressionStep.cpp b/src/Processors/QueryPlan/ExpressionStep.cpp index 90ac94a1ace..50bc2e1533e 100644 --- a/src/Processors/QueryPlan/ExpressionStep.cpp +++ b/src/Processors/QueryPlan/ExpressionStep.cpp @@ -30,13 +30,13 @@ ExpressionStep::ExpressionStep(const DataStream & input_stream_, const ActionsDA input_stream_, ExpressionTransform::transformHeader(input_stream_.header, *actions_dag_), getTraits(actions_dag_, input_stream_.header, input_stream_.sort_description)) - , actions_dag(actions_dag_->clone()) + , actions_dag(ActionsDAG::clone(actions_dag_)) { } void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression = std::make_shared(actions_dag->clone(), settings.getActionsSettings()); + auto expression = std::make_shared(ActionsDAG::clone(actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header) { @@ -61,13 +61,13 @@ void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const Bu void ExpressionStep::describeActions(FormatSettings & settings) const { String prefix(settings.offset, settings.indent_char); - auto expression = std::make_shared(actions_dag->clone()); + auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); expression->describeActions(settings.out, prefix); } void ExpressionStep::describeActions(JSONBuilder::JSONMap & map) const { - auto expression = std::make_shared(actions_dag->clone()); + auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); map.add("Expression", expression->toTree()); } diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index ef9f1d17822..7883461f45a 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -49,7 +49,7 @@ FilterStep::FilterStep( , filter_column_name(std::move(filter_column_name_)) , remove_filter_column(remove_filter_column_) { - actions_dag = actions_dag->clone(); + actions_dag = ActionsDAG::clone(actions_dag_); actions_dag->removeAliasesForFilter(filter_column_name); } @@ -87,7 +87,7 @@ void FilterStep::describeActions(FormatSettings & settings) const settings.out << " (removed)"; settings.out << '\n'; - auto expression = std::make_shared(actions_dag->clone()); + auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); expression->describeActions(settings.out, prefix); } @@ -96,7 +96,7 @@ void FilterStep::describeActions(JSONBuilder::JSONMap & map) const map.add("Filter Column", filter_column_name); map.add("Removes Filter", remove_filter_column); - auto expression = std::make_shared(actions_dag->clone()); + auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); map.add("Expression", expression->toTree()); } diff --git a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp index 87e16b5a244..6cdc3cb4eb0 100644 --- a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp @@ -15,11 +15,11 @@ static ActionsDAGPtr buildActionsForPlanPath(std::vector & d if (dag_stack.empty()) return nullptr; - ActionsDAGPtr path_actions = dag_stack.back()->clone(); + ActionsDAGPtr path_actions = ActionsDAG::clone(dag_stack.back()); dag_stack.pop_back(); while (!dag_stack.empty()) { - ActionsDAGPtr clone = dag_stack.back()->clone(); + ActionsDAGPtr clone = ActionsDAG::clone(dag_stack.back()); dag_stack.pop_back(); path_actions->mergeInplace(std::move(*clone)); } diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index ff1cefff09a..f26cd79dd97 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -597,7 +597,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes filter_node.step = std::make_unique( filter_node.children.front()->step->getOutputStream(), - filter->getExpression()->clone(), + ActionsDAG::clone(filter->getExpression()), filter->getFilterColumnName(), filter->removesFilterColumn()); } @@ -611,7 +611,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (auto * read_from_merge = typeid_cast(child.get())) { - FilterDAGInfo info{filter->getExpression()->clone(), filter->getFilterColumnName(), filter->removesFilterColumn()}; + FilterDAGInfo info{ActionsDAG::clone(filter->getExpression()), filter->getFilterColumnName(), filter->removesFilterColumn()}; read_from_merge->addFilter(std::move(info)); std::swap(*parent_node, *child_node); return 1; diff --git a/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp b/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp index 35d8b1a35e4..4629bc0af53 100644 --- a/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp +++ b/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp @@ -49,7 +49,7 @@ size_t tryLiftUpUnion(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) expr_node.step = std::make_unique( expr_node.children.front()->step->getOutputStream(), - expression->getExpression()->clone()); + ActionsDAG::clone(expression->getExpression())); } /// - Expression - Something diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp index 13b691da888..afe1406b65f 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp @@ -108,7 +108,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) prewhere_info->need_filter = true; prewhere_info->remove_prewhere_column = optimize_result.fully_moved_to_prewhere && filter_step->removesFilterColumn(); - auto filter_expression = filter_step->getExpression()->clone(); + auto filter_expression = ActionsDAG::clone(filter_step->getExpression()); const auto & filter_column_name = filter_step->getFilterColumnName(); if (prewhere_info->remove_prewhere_column) diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp index e57d3319076..e5ded92b105 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp @@ -18,15 +18,15 @@ void optimizePrimaryKeyCondition(const Stack & stack) const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); if (storage_prewhere_info) { - source_step_with_filter->addFilter(storage_prewhere_info->prewhere_actions->clone(), storage_prewhere_info->prewhere_column_name); + source_step_with_filter->addFilter(ActionsDAG::clone(storage_prewhere_info->prewhere_actions), storage_prewhere_info->prewhere_column_name); if (storage_prewhere_info->row_level_filter) - source_step_with_filter->addFilter(storage_prewhere_info->row_level_filter->clone(), storage_prewhere_info->row_level_column_name); + source_step_with_filter->addFilter(ActionsDAG::clone(storage_prewhere_info->row_level_filter), storage_prewhere_info->row_level_column_name); } for (auto iter = stack.rbegin() + 1; iter != stack.rend(); ++iter) { if (auto * filter_step = typeid_cast(iter->node->step.get())) - source_step_with_filter->addFilter(filter_step->getExpression()->clone(), filter_step->getFilterColumnName()); + source_step_with_filter->addFilter(ActionsDAG::clone(filter_step->getExpression()), filter_step->getFilterColumnName()); /// Note: actually, plan optimizations merge Filter and Expression steps. /// Ideally, chain should look like (Expression -> ...) -> (Filter -> ...) -> ReadFromStorage, diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 8e782e68db8..a8bd98d7460 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -173,9 +173,9 @@ static void appendFixedColumnsFromFilterExpression(const ActionsDAG::Node & filt static void appendExpression(ActionsDAGPtr & dag, const ActionsDAGPtr & expression) { if (dag) - dag->mergeInplace(std::move(*expression->clone())); + dag->mergeInplace(std::move(*ActionsDAG::clone(expression))); else - dag = expression->clone(); + dag = ActionsDAG::clone(expression); } /// This function builds a common DAG which is a merge of DAGs from Filter and Expression steps chain. @@ -1066,13 +1066,13 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, for (const auto & actions_dag : window_desc.partition_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(actions_dag->clone(), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(ActionsDAG::clone(actions_dag.get()), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } for (const auto & actions_dag : window_desc.order_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(actions_dag->clone(), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(ActionsDAG::clone(actions_dag.get()), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } auto order_optimizer = std::make_shared( diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 7c45ef48252..da057bd25c2 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -77,7 +77,7 @@ static AggregateProjectionInfo getAggregatingProjectionInfo( AggregateProjectionInfo info; info.context = interpreter.getContext(); - info.before_aggregation = analysis_result.before_aggregation->dag.clone(); + info.before_aggregation = ActionsDAG::clone(&analysis_result.before_aggregation->dag); info.keys = query_analyzer->aggregationKeys().getNames(); info.aggregates = query_analyzer->aggregates(); diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index d8b40b22904..0e2ad96a419 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -67,9 +67,9 @@ std::shared_ptr getMaxAddedBlocks(ReadFromMergeTree * rea void QueryDAG::appendExpression(const ActionsDAGPtr & expression) { if (dag) - dag->mergeInplace(std::move(*expression->clone())); + dag->mergeInplace(std::move(*ActionsDAG::clone(expression))); else - dag = expression->clone(); + dag = ActionsDAG::clone(expression); } const ActionsDAG::Node * findInOutputs(ActionsDAG & dag, const std::string & name, bool remove) @@ -238,7 +238,7 @@ bool analyzeProjectionCandidate( auto projection_query_info = query_info; projection_query_info.prewhere_info = nullptr; - projection_query_info.filter_actions_dag = dag->clone(); + projection_query_info.filter_actions_dag = ActionsDAG::clone(dag); auto projection_result_ptr = reader.estimateNumMarksToRead( std::move(projection_parts), diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp index d3c75c988e7..81a8a537830 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp @@ -70,11 +70,11 @@ namespace if (dag_stack.empty()) return nullptr; - ActionsDAGPtr path_actions = dag_stack.back()->clone(); + ActionsDAGPtr path_actions = ActionsDAG::clone(dag_stack.back()); dag_stack.pop_back(); while (!dag_stack.empty()) { - ActionsDAGPtr clone = dag_stack.back()->clone(); + ActionsDAGPtr clone = ActionsDAG::clone(dag_stack.back()); logActionsDAG("DAG to merge", clone); dag_stack.pop_back(); path_actions->mergeInplace(std::move(*clone)); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index e5370c1c130..d711b3e8472 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -798,7 +798,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ info.use_uncompressed_cache); }; - auto sorting_expr = std::make_shared(metadata_for_reading->getSortingKey().expression->getActionsDAG().clone()); + auto sorting_expr = std::make_shared(ActionsDAG::clone(&metadata_for_reading->getSortingKey().expression->getActionsDAG())); SplitPartsWithRangesByPrimaryKeyResult split_ranges_result = splitPartsWithRangesByPrimaryKey( metadata_for_reading->getPrimaryKey(), @@ -1211,7 +1211,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( /// we will store lonely parts with level > 0 to use parallel select on them. RangesInDataParts non_intersecting_parts_by_primary_key; - auto sorting_expr = std::make_shared(metadata_for_reading->getSortingKey().expression->getActionsDAG().clone()); + auto sorting_expr = std::make_shared(ActionsDAG::clone(&metadata_for_reading->getSortingKey().expression->getActionsDAG())); if (prewhere_info) { @@ -1993,7 +1993,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result.sampling.use_sampling) { - auto sampling_actions = std::make_shared(result.sampling.filter_expression->clone()); + auto sampling_actions = std::make_shared(ActionsDAG::clone(result.sampling.filter_expression.get())); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( @@ -2031,7 +2031,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result_projection) { - auto projection_actions = std::make_shared(result_projection->clone()); + auto projection_actions = std::make_shared(ActionsDAG::clone(result_projection)); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, projection_actions); @@ -2126,7 +2126,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); + auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions)); expression->describeActions(format_settings.out, prefix); } @@ -2135,7 +2135,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); + auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter)); expression->describeActions(format_settings.out, prefix); } } @@ -2161,7 +2161,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); + auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions)); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -2171,7 +2171,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); + auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter)); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.cpp b/src/Processors/QueryPlan/SourceStepWithFilter.cpp index b91debc8239..79b225e7f93 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.cpp +++ b/src/Processors/QueryPlan/SourceStepWithFilter.cpp @@ -110,7 +110,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); + auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions)); expression->describeActions(format_settings.out, prefix); } @@ -119,7 +119,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); + auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter)); expression->describeActions(format_settings.out, prefix); } } @@ -137,7 +137,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); + auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions)); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -147,7 +147,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); + auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter)); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/TotalsHavingStep.cpp b/src/Processors/QueryPlan/TotalsHavingStep.cpp index 45de6c31d24..19632b1862f 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.cpp +++ b/src/Processors/QueryPlan/TotalsHavingStep.cpp @@ -46,7 +46,7 @@ TotalsHavingStep::TotalsHavingStep( getTraits(!filter_column_.empty())) , aggregates(aggregates_) , overflow_row(overflow_row_) - , actions_dag(actions_dag_->clone()) + , actions_dag(ActionsDAG::clone(actions_dag_)) , filter_column_name(filter_column_) , remove_filter(remove_filter_) , totals_mode(totals_mode_) @@ -57,7 +57,7 @@ TotalsHavingStep::TotalsHavingStep( void TotalsHavingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression_actions = actions_dag ? std::make_shared(actions_dag->clone(), settings.getActionsSettings()) : nullptr; + auto expression_actions = actions_dag ? std::make_shared(ActionsDAG::clone(actions_dag), settings.getActionsSettings()) : nullptr; auto totals_having = std::make_shared( pipeline.getHeader(), @@ -100,7 +100,7 @@ void TotalsHavingStep::describeActions(FormatSettings & settings) const if (actions_dag) { bool first = true; - auto expression = std::make_shared(actions_dag->clone()); + auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); for (const auto & action : expression->getActions()) { settings.out << prefix << (first ? "Actions: " @@ -117,7 +117,7 @@ void TotalsHavingStep::describeActions(JSONBuilder::JSONMap & map) const if (actions_dag) { map.add("Filter column", filter_column_name); - auto expression = std::make_shared(actions_dag->clone()); + auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); map.add("Expression", expression->toTree()); } } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 95267bc24e0..bbe57fc6441 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -203,7 +203,7 @@ FillingTransform::FillingTransform( , use_with_fill_by_sorting_prefix(use_with_fill_by_sorting_prefix_) { if (interpolate_description) - interpolate_actions = std::make_shared(interpolate_description->actions->clone()); + interpolate_actions = std::make_shared(ActionsDAG::clone(interpolate_description->actions)); std::vector is_fill_column(header_.columns()); for (size_t i = 0, size = fill_description.size(); i < size; ++i) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 7c65381b05b..8d4ef69b1b9 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -265,7 +265,7 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( if (!set->buildOrderedSetInplace(context)) return; - auto filter_actions_dag = filter_dag->clone(); + auto filter_actions_dag = ActionsDAG::clone(filter_dag); const auto * filter_actions_dag_node = filter_actions_dag->getOutputs().at(0); std::unordered_map node_to_result_node; @@ -319,7 +319,7 @@ static const ActionsDAG::NodeRawConstPtrs & getArguments(const ActionsDAG::Node return index_hint.getActions()->getOutputs(); /// Import the DAG and map argument pointers. - ActionsDAGPtr actions_clone = index_hint.getActions()->clone(); + ActionsDAGPtr actions_clone = ActionsDAG::clone(index_hint.getActions()); chassert(storage); result_dag_or_null->mergeNodes(std::move(*actions_clone), storage); return *storage; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 8fa5b2cc955..e924f853524 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -80,7 +80,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep row_level_filter_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(prewhere_info->row_level_filter->clone(), actions_settings), + .actions = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter), actions_settings), .filter_column_name = prewhere_info->row_level_column_name, .remove_filter_column = true, .need_filter = true, @@ -96,7 +96,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep prewhere_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(prewhere_info->prewhere_actions->clone(), actions_settings), + .actions = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions), actions_settings), .filter_column_name = prewhere_info->prewhere_column_name, .remove_filter_column = prewhere_info->remove_prewhere_column, .need_filter = prewhere_info->need_filter, diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 654b8b788fe..0b7035504ae 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -66,10 +66,10 @@ struct PrewhereInfo PrewhereInfoPtr prewhere_info = std::make_shared(); if (row_level_filter) - prewhere_info->row_level_filter = row_level_filter->clone(); + prewhere_info->row_level_filter = ActionsDAG::clone(row_level_filter); if (prewhere_actions) - prewhere_info->prewhere_actions = prewhere_actions->clone(); + prewhere_info->prewhere_actions = ActionsDAG::clone(prewhere_actions); prewhere_info->row_level_column_name = row_level_column_name; prewhere_info->prewhere_column_name = prewhere_column_name; diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 9bddf4f0230..695b31d0c80 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -312,7 +312,7 @@ void StorageBuffer::read( if (src_table_query_info.prewhere_info->row_level_filter) { src_table_query_info.prewhere_info->row_level_filter = ActionsDAG::merge( - std::move(*actions_dag->clone()), + std::move(*ActionsDAG::clone(actions_dag)), std::move(*src_table_query_info.prewhere_info->row_level_filter)); src_table_query_info.prewhere_info->row_level_filter->removeUnusedActions(); @@ -321,7 +321,7 @@ void StorageBuffer::read( if (src_table_query_info.prewhere_info->prewhere_actions) { src_table_query_info.prewhere_info->prewhere_actions = ActionsDAG::merge( - std::move(*actions_dag->clone()), + std::move(*ActionsDAG::clone(actions_dag)), std::move(*src_table_query_info.prewhere_info->prewhere_actions)); src_table_query_info.prewhere_info->prewhere_actions->removeUnusedActions(); @@ -432,7 +432,7 @@ void StorageBuffer::read( { return std::make_shared( header, - std::make_shared(query_info.prewhere_info->row_level_filter->clone(), actions_settings), + std::make_shared(ActionsDAG::clone(query_info.prewhere_info->row_level_filter), actions_settings), query_info.prewhere_info->row_level_column_name, false); }); @@ -442,7 +442,7 @@ void StorageBuffer::read( { return std::make_shared( header, - std::make_shared(query_info.prewhere_info->prewhere_actions->clone(), actions_settings), + std::make_shared(ActionsDAG::clone(query_info.prewhere_info->prewhere_actions), actions_settings), query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column); }); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index c42e3058347..d21a6dc20dd 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -662,7 +662,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ { auto filter_step = std::make_unique( child.plan.getCurrentDataStream(), - filter_info.actions->clone(), + ActionsDAG::clone(filter_info.actions), filter_info.column_name, filter_info.do_remove_column); @@ -1273,12 +1273,12 @@ void ReadFromMerge::RowPolicyData::extendNames(Names & names) const void ReadFromMerge::RowPolicyData::addStorageFilter(SourceStepWithFilter * step) const { - step->addFilter(actions_dag->clone(), filter_column_name); + step->addFilter(ActionsDAG::clone(actions_dag), filter_column_name); } void ReadFromMerge::RowPolicyData::addFilterTransform(QueryPlan & plan) const { - auto filter_step = std::make_unique(plan.getCurrentDataStream(), actions_dag->clone(), filter_column_name, true /* remove filter column */); + auto filter_step = std::make_unique(plan.getCurrentDataStream(), ActionsDAG::clone(actions_dag), filter_column_name, true /* remove filter column */); plan.addStep(std::move(filter_step)); } diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 6f7d1d4c39f..1bd5e80a4f9 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -80,7 +80,7 @@ void buildSetsForDAG(const ActionsDAGPtr & dag, const ContextPtr & context) void filterBlockWithDAG(const ActionsDAGPtr & dag, Block & block, ContextPtr context) { buildSetsForDAG(dag, context); - auto actions = std::make_shared(dag->clone()); + auto actions = std::make_shared(ActionsDAG::clone(dag)); Block block_with_filter = block; actions->execute(block_with_filter, /*dry_run=*/ false, /*allow_duplicates_in_input=*/ true); @@ -318,7 +318,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { - auto index_hint_dag = index_hint->getActions()->clone(); + auto index_hint_dag = ActionsDAG::clone(index_hint->getActions()); ActionsDAG::NodeRawConstPtrs atoms; for (const auto & output : index_hint_dag->getOutputs()) if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes)) From 068e1c55545dfc4e004b8b5970b3f237033444cd Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 25 Jun 2024 15:59:00 +0000 Subject: [PATCH 0218/2361] Fixing build. --- src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp b/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp index 6ace1b3b5ce..97de69b1134 100644 --- a/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp +++ b/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp @@ -84,12 +84,12 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &) if (child_actions->hasArrayJoin()) return 0; - auto actions = child_actions->clone(); + auto actions = ActionsDAG::clone(child_actions); const auto & child_filter_node = actions->findInOutputs(child_filter->getFilterColumnName()); if (child_filter->removesFilterColumn()) removeFromOutputs(*actions, child_filter_node); - actions->mergeInplace(std::move(*parent_actions->clone())); + actions->mergeInplace(std::move(*ActionsDAG::clone(parent_actions))); const auto & parent_filter_node = actions->findInOutputs(parent_filter->getFilterColumnName()); if (parent_filter->removesFilterColumn()) From 3149e51e9254b268c41ade796d3652d0c2dec8f7 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 25 Jun 2024 17:36:07 +0000 Subject: [PATCH 0219/2361] Fix other crashes. --- src/Storages/WindowView/StorageWindowView.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 5822e46f9f8..8f39f0da5af 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -563,11 +563,11 @@ std::pair StorageWindowView::getNewBlocks(UInt32 watermark) auto syntax_result = TreeRewriter(getContext()).analyze(filter_function, builder.getHeader().getNamesAndTypesList()); auto filter_expression = ExpressionAnalyzer(filter_function, syntax_result, getContext()).getActionsDAG(false); + auto filter_actions = std::make_shared(std::move(filter_expression)); builder.addSimpleTransform([&](const Block & header) { - return std::make_shared( - header, std::make_shared(std::move(filter_expression)), filter_function->getColumnName(), true); + return std::make_shared(header, filter_actions, filter_function->getColumnName(), true); }); /// Adding window column From aadf1536a40bd53c6a1b6359cf652854f134599b Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 25 Jun 2024 22:28:01 +0000 Subject: [PATCH 0220/2361] fix protocol --- src/IO/WriteBufferFromPocoSocketChunked.cpp | 30 ++++++++------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.cpp b/src/IO/WriteBufferFromPocoSocketChunked.cpp index 324f8ae3a02..a83b976ae09 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.cpp +++ b/src/IO/WriteBufferFromPocoSocketChunked.cpp @@ -24,6 +24,8 @@ void WriteBufferFromPocoSocketChunked::enableChunked() /// Initialize next chunk chunk_size_ptr = reinterpret_cast(pos); pos += std::min(available(), sizeof(*chunk_size_ptr)); + /// Pretend finishChunk() was just called to prevent sending empty chunk if finishChunk() called immediately + last_finish_chunk = chunk_size_ptr; } void WriteBufferFromPocoSocketChunked::finishChunk() @@ -33,7 +35,8 @@ void WriteBufferFromPocoSocketChunked::finishChunk() if (pos <= reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr)) { - if (chunk_size_ptr == last_finish_chunk) // prevent duplicate finish chunk + /// Prevent duplicate finish chunk (and finish chunk right after enableChunked()) + if (chunk_size_ptr == last_finish_chunk) return; /// If current chunk is empty it means we are finishing a chunk previously sent by next(), @@ -85,7 +88,7 @@ void WriteBufferFromPocoSocketChunked::finishChunk() } /// Buffer end-of-chunk - *reinterpret_cast(pos) = 0; + setValue(reinterpret_cast(pos), 0); pos += sizeof(*chunk_size_ptr); /// Initialize next chunk chunk_size_ptr = reinterpret_cast(pos); @@ -114,7 +117,7 @@ void WriteBufferFromPocoSocketChunked::nextImpl() return; } - /// next() after finishChunk ar the end of the buffer + /// next() after finishChunk at the end of the buffer if (finishing < sizeof(*chunk_size_ptr)) { pos -= finishing; @@ -135,21 +138,6 @@ void WriteBufferFromPocoSocketChunked::nextImpl() return; } - /// Send end-of-chunk buffered by finishChunk - if (offset() == 2 * sizeof(*chunk_size_ptr) && last_finish_chunk == chunk_size_ptr) - { - pos -= sizeof(*chunk_size_ptr); - /// Send end-of-chunk - WriteBufferFromPocoSocket::nextImpl(); - /// Initialize next chunk - chunk_size_ptr = reinterpret_cast(working_buffer.begin()); - nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); - - last_finish_chunk = chunk_size_ptr; - - return; - } - /// Prevent sending empty chunk if (offset() == sizeof(*chunk_size_ptr)) { @@ -172,8 +160,12 @@ void WriteBufferFromPocoSocketChunked::nextImpl() return; } + bool initialize_last_finish_chunk = false; if (pos - reinterpret_cast(chunk_size_ptr) == sizeof(*chunk_size_ptr)) // next() after finishChunk + { pos -= sizeof(*chunk_size_ptr); + initialize_last_finish_chunk = true; + } else // fill up current chunk size { setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); @@ -194,7 +186,7 @@ void WriteBufferFromPocoSocketChunked::nextImpl() chunk_size_ptr = reinterpret_cast(working_buffer.begin()); nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); - last_finish_chunk = nullptr; + last_finish_chunk = initialize_last_finish_chunk ? chunk_size_ptr : nullptr; } void WriteBufferFromPocoSocketChunked::finalizeImpl() From 9eec8344279082a3d02583c092f3c90b85a76fa3 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 26 Jun 2024 03:19:16 +0000 Subject: [PATCH 0221/2361] better chunked protocol negotiation, comments, review suggestions --- src/Client/Connection.cpp | 37 +++++++++++++++------- src/IO/ReadBufferFromPocoSocketChunked.cpp | 11 ++++--- src/IO/ReadBufferFromPocoSocketChunked.h | 4 +-- src/IO/WriteBufferFromPocoSocketChunked.h | 10 ++++-- src/Server/TCPHandler.cpp | 17 +++++++--- src/Server/TCPHandler.h | 4 +-- 6 files changed, 56 insertions(+), 27 deletions(-) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 9f727b974ee..c41229c7226 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -208,11 +208,20 @@ void Connection::connect(const ConnectionTimeouts & timeouts) sendHello(); receiveHello(timeouts.handshake_timeout); - bool out_chunked = false; - bool in_chunked = false; - if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) { + /// Client side of chunked protocol negotiation. + /// Server advertises its protocol capabilities (separate for send and recieve channels) by sending + /// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional. + /// Not optional types are strict meaning that server only supports this type, optional means that + /// server prefer this type but capable to work in opposite. + /// Client selects which type it is going to communicate based on the settings from config or arguments, + /// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake. + /// Client can detect if server's protocol capabilities are not compatible with client's settings (for example + /// server strictly requires chunked protocol but client's settings only allowes notchunked protocol) - in such case + /// client should interrup this connection. However if client continues with incompatible protocol type request, server + /// will send appropriate exception and disconnect client. + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) { bool chunked_srv = chunked_srv_str.starts_with("chunked"); @@ -235,20 +244,24 @@ void Connection::connect(const ConnectionTimeouts & timeouts) return chunked_srv; }; - out_chunked = is_chunked(proto_recv_chunked_srv, proto_send_chunked, "send"); - in_chunked = is_chunked(proto_send_chunked_srv, proto_recv_chunked, "recv"); + proto_send_chunked = is_chunked(proto_recv_chunked_srv, proto_send_chunked, "send") ? "chunked" : "notchunked"; + proto_recv_chunked = is_chunked(proto_send_chunked_srv, proto_recv_chunked, "recv") ? "chunked" : "notchunked"; + } + else + { + if (proto_send_chunked == "chunked" || proto_recv_chunked == "chunked") + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: server's version is too old and doesn't support chunked protocol while client settings require it."); } if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) sendAddendum(); - if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) - { - if (out_chunked) - out->enableChunked(); - if (in_chunked) - in->enableChunked(); - } + if (proto_send_chunked == "chunked") + out->enableChunked(); + if (proto_recv_chunked == "chunked") + in->enableChunked(); LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.", server_name, server_version_major, server_version_minor, server_version_patch); diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 798be547e99..07598f2adf4 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -28,6 +28,7 @@ void ReadBufferFromPocoSocketChunked::enableChunked() return; chunked = 1; data_end = buffer().end(); + /// Resize working buffer so any next read will call nextImpl working_buffer.resize(offset()); chunk_left = 0; next_chunk = 0; @@ -51,7 +52,7 @@ bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) const } -bool ReadBufferFromPocoSocketChunked::load_next_chunk(Position c_pos, bool cont) +bool ReadBufferFromPocoSocketChunked::loadNextChunk(Position c_pos, bool cont) { auto buffered = std::min(static_cast(data_end - c_pos), sizeof(next_chunk)); @@ -73,7 +74,7 @@ bool ReadBufferFromPocoSocketChunked::load_next_chunk(Position c_pos, bool cont) return true; } -bool ReadBufferFromPocoSocketChunked::process_chunk_left(Position c_pos) +bool ReadBufferFromPocoSocketChunked::processChunkLeft(Position c_pos) { if (data_end - c_pos < chunk_left) { @@ -88,7 +89,7 @@ bool ReadBufferFromPocoSocketChunked::process_chunk_left(Position c_pos) c_pos += chunk_left; - if (!load_next_chunk(c_pos, true)) + if (!loadNextChunk(c_pos, true)) return false; chunk_left = 0; @@ -115,7 +116,7 @@ bool ReadBufferFromPocoSocketChunked::nextImpl() if (c_pos > data_end) c_pos = data_end; - if (!load_next_chunk(c_pos)) + if (!loadNextChunk(c_pos)) return false; chunk_left = next_chunk; @@ -159,7 +160,7 @@ bool ReadBufferFromPocoSocketChunked::nextImpl() c_pos = buffer().begin(); } - return process_chunk_left(c_pos); + return processChunkLeft(c_pos); } } diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index acf0edafe0a..943a50f5d08 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -92,8 +92,8 @@ public: Poco::Net::SocketAddress ourAddress() { return our_address; } protected: - bool load_next_chunk(Position c_pos, bool cont = false); - bool process_chunk_left(Position c_pos); + bool loadNextChunk(Position c_pos, bool cont = false); + bool processChunkLeft(Position c_pos); bool nextImpl() override; Poco::Net::SocketAddress our_address; diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 269c6d66dda..8270ca445c9 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -10,8 +10,14 @@ namespace DB class WriteBufferFromPocoSocketChunked: public WriteBufferFromPocoSocket { public: - explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, buf_size), log(getLogger("Protocol")) {} - explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, write_event_, buf_size), log(getLogger("Protocol")) {} + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, buf_size), log(getLogger("Protocol")) + { + chassert(buf_size <= std::numeric_limits>::max() && buf_size > sizeof(*chunk_size_ptr)); + } + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, write_event_, buf_size), log(getLogger("Protocol")) + { + chassert(buf_size <= std::numeric_limits>::max() && buf_size > sizeof(*chunk_size_ptr)); + } void enableChunked(); void finishChunk(); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 960860a3c13..3093c508c22 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -277,19 +277,28 @@ void TCPHandler::runImpl() if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) receiveAddendum(); - if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) { + /// Server side of chunked protocol negotiation. + /// Server advertises its protocol capabilities (separate for send and recieve channels) by sending + /// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional. + /// Not optional types are strict meaning that server only supports this type, optional means that + /// server prefer this type but capable to work in opposite. + /// Client selects which type it is going to communicate based on the settings from config or arguments, + /// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake. + /// Client can detect if server's protocol capabilities are not compatible with client's settings (for example + /// server strictly requires chunked protocol but client's settings only allowes notchunked protocol) - in such case + /// client should interrup this connection. However if client continues with incompatible protocol type request, server + /// will send appropriate exception and disconnect client. + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) { bool chunked_srv = chunked_srv_str.starts_with("chunked"); bool optional_srv = chunked_srv_str.ends_with("_optional"); bool chunked_cl = chunked_cl_str.starts_with("chunked"); - bool optional_cl = chunked_cl_str.ends_with("_optional"); if (optional_srv) return chunked_cl; - if (optional_cl) - return chunked_srv; + if (chunked_cl != chunked_srv) throw NetException( ErrorCodes::NETWORK_ERROR, diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 88c8fc6d52c..f6400161041 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -187,8 +187,8 @@ private: UInt64 client_version_minor = 0; UInt64 client_version_patch = 0; UInt32 client_tcp_protocol_version = 0; - String proto_send_chunked_cl; - String proto_recv_chunked_cl; + String proto_send_chunked_cl = "notchunked"; + String proto_recv_chunked_cl = "notchunked"; String quota_key; /// Connection settings, which are extracted from a context. From 6112ef710c2d949c3c8824fcf0e7c148f5deaea4 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 26 Jun 2024 03:43:28 +0000 Subject: [PATCH 0222/2361] fix style --- src/Client/Connection.cpp | 8 ++++---- src/Server/TCPHandler.cpp | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index c41229c7226..14ffff10081 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -211,17 +211,17 @@ void Connection::connect(const ConnectionTimeouts & timeouts) if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) { /// Client side of chunked protocol negotiation. - /// Server advertises its protocol capabilities (separate for send and recieve channels) by sending + /// Server advertises its protocol capabilities (separate for send and receive channels) by sending /// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional. /// Not optional types are strict meaning that server only supports this type, optional means that /// server prefer this type but capable to work in opposite. /// Client selects which type it is going to communicate based on the settings from config or arguments, /// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake. /// Client can detect if server's protocol capabilities are not compatible with client's settings (for example - /// server strictly requires chunked protocol but client's settings only allowes notchunked protocol) - in such case - /// client should interrup this connection. However if client continues with incompatible protocol type request, server + /// server strictly requires chunked protocol but client's settings only allows notchunked protocol) - in such case + /// client should interrupt this connection. However if client continues with incompatible protocol type request, server /// will send appropriate exception and disconnect client. - + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) { bool chunked_srv = chunked_srv_str.starts_with("chunked"); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 3093c508c22..d5afb624e77 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -279,15 +279,15 @@ void TCPHandler::runImpl() { /// Server side of chunked protocol negotiation. - /// Server advertises its protocol capabilities (separate for send and recieve channels) by sending + /// Server advertises its protocol capabilities (separate for send and receive channels) by sending /// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional. /// Not optional types are strict meaning that server only supports this type, optional means that /// server prefer this type but capable to work in opposite. /// Client selects which type it is going to communicate based on the settings from config or arguments, /// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake. /// Client can detect if server's protocol capabilities are not compatible with client's settings (for example - /// server strictly requires chunked protocol but client's settings only allowes notchunked protocol) - in such case - /// client should interrup this connection. However if client continues with incompatible protocol type request, server + /// server strictly requires chunked protocol but client's settings only allows notchunked protocol) - in such case + /// client should interrupt this connection. However if client continues with incompatible protocol type request, server /// will send appropriate exception and disconnect client. auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) From 3f3305a63a1218dc944ac7b3a8540f084a57a039 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 26 Jun 2024 04:33:52 +0000 Subject: [PATCH 0223/2361] fix server settings --- src/Server/TCPHandler.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index d5afb624e77..40fd3848455 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -310,8 +310,8 @@ void TCPHandler::runImpl() return chunked_srv; }; - bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "chunked"), proto_recv_chunked_cl, "send"); - bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "chunked"), proto_send_chunked_cl, "recv"); + bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "chunked_optional"), proto_recv_chunked_cl, "send"); + bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "chunked_optional"), proto_send_chunked_cl, "recv"); if (out_chunked) out->enableChunked(); From 6c0c6fd9fe452631467fcb4da4bfd764bb487bea Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Tue, 25 Jun 2024 14:54:50 +0800 Subject: [PATCH 0224/2361] Fix error caused by type miss match --- src/Storages/Statistics/Statistics.cpp | 16 ++++++++++++++++ src/Storages/Statistics/Statistics.h | 2 ++ .../03174_statistics_countminsketch.reference | 14 +++++++------- .../03174_statistics_countminsketch.sql | 14 +++++++------- 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 2404d234d7a..7afd8b8cd12 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -49,6 +49,20 @@ std::optional getString(const Field & f) return {}; } +bool checkType(const Field & f) +{ + switch (f.getType()) + { + case Field::Types::Int64: + case Field::Types::UInt64: + case Field::Types::Float64: + case Field::Types::String: + return true; + default: + return false; + } +} + IStatistics::IStatistics(const SingleStatisticsDescription & stat_) : stat(stat_) {} ColumnStatistics::ColumnStatistics(const ColumnStatisticsDescription & stats_desc_) @@ -94,6 +108,8 @@ Float64 ColumnStatistics::estimateEqual(Field val) const #if USE_DATASKETCHES if (stats.contains(StatisticsType::CountMinSketch)) { + if (!checkType(val)) + return rows * ConditionSelectivityEstimator::default_normal_cond_factor; auto count_min_sketch_static = std::static_pointer_cast(stats.at(StatisticsType::CountMinSketch)); return count_min_sketch_static->estimateEqual(val); } diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index f6121d72256..b56a1959ea0 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -103,4 +103,6 @@ private: std::optional getFloat64(const Field & f); std::optional getString(const Field & f); +bool checkType(const Field & f); + } diff --git a/tests/queries/0_stateless/03174_statistics_countminsketch.reference b/tests/queries/0_stateless/03174_statistics_countminsketch.reference index 3cec7dd7168..b5a35c22835 100644 --- a/tests/queries/0_stateless/03174_statistics_countminsketch.reference +++ b/tests/queries/0_stateless/03174_statistics_countminsketch.reference @@ -1,26 +1,26 @@ CREATE TABLE default.t1\n(\n `a` String STATISTICS(countminsketch),\n `b` Int64 STATISTICS(countminsketch),\n `c` UInt64 STATISTICS(countminsketch),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 Prewhere info Prewhere filter - Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) + Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) Prewhere info Prewhere filter - Prewhere filter column: and(equals(a, \'0\'_String), equals(c, 0), greater(b, 0)) (removed) + Prewhere filter column: and(equals(a, \'0\'), equals(c, 0), greater(b, 0)) (removed) After drop statistics for a Prewhere info Prewhere filter - Prewhere filter column: and(equals(b, 0), equals(c, 0), equals(a, \'0\'_String)) (removed) + Prewhere filter column: and(equals(b, 0), equals(c, 0), equals(a, \'0\')) (removed) Prewhere info Prewhere filter - Prewhere filter column: and(equals(c, 0), equals(a, \'0\'_String), greater(b, 0)) (removed) + Prewhere filter column: and(equals(c, 0), equals(a, \'0\'), greater(b, 0)) (removed) LowCardinality Prewhere info Prewhere filter - Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) + Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) Nullable Prewhere info Prewhere filter - Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) + Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) LowCardinality(Nullable) Prewhere info Prewhere filter - Prewhere filter column: and(equals(a, \'0\'_String), equals(b, 0), equals(c, 0)) (removed) + Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) diff --git a/tests/queries/0_stateless/03174_statistics_countminsketch.sql b/tests/queries/0_stateless/03174_statistics_countminsketch.sql index 0d9673309b4..200c2a4a531 100644 --- a/tests/queries/0_stateless/03174_statistics_countminsketch.sql +++ b/tests/queries/0_stateless/03174_statistics_countminsketch.sql @@ -17,14 +17,14 @@ SHOW CREATE TABLE t1; INSERT INTO t1 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b > 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b > 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; ALTER TABLE t1 DROP STATISTICS a; SELECT 'After drop statistics for a'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b > 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b > 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; @@ -40,7 +40,7 @@ SETTINGS min_bytes_for_wide_part = 0; INSERT INTO t2 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; SELECT 'LowCardinality'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t2 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t2 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; DROP TABLE IF EXISTS t2; @@ -57,7 +57,7 @@ SETTINGS min_bytes_for_wide_part = 0; INSERT INTO t3 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; SELECT 'Nullable'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t3 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t3 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; DROP TABLE IF EXISTS t3; DROP TABLE IF EXISTS t4; @@ -73,6 +73,6 @@ SETTINGS min_bytes_for_wide_part = 0; INSERT INTO t4 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; SELECT 'LowCardinality(Nullable)'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t4 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t4 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; DROP TABLE IF EXISTS t4; From 7b2868a1ef6ea32b4bdaac8c13186868a9aec4fe Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 26 Jun 2024 10:09:04 +0800 Subject: [PATCH 0225/2361] A little code refactoring --- .../ConditionSelectivityEstimator.cpp | 4 +-- .../Statistics/CountMinSketchStatistics.cpp | 31 ++++++++++++++----- .../Statistics/CountMinSketchStatistics.h | 2 ++ src/Storages/Statistics/Statistics.cpp | 27 +++++----------- src/Storages/Statistics/Statistics.h | 7 ++--- 5 files changed, 37 insertions(+), 34 deletions(-) diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp index 73c5c549a5d..437b39cb537 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp @@ -37,7 +37,7 @@ Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateGreat Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateEqual(Field val, Float64 rows) const { - auto float_val = getFloat64(val); + auto float_val = IStatistics::getFloat64(val); if (part_statistics.empty()) { if (!float_val) @@ -148,7 +148,7 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode else dummy = true; auto [op, val] = extractBinaryOp(node, col); - auto float_val = getFloat64(val); + auto float_val = IStatistics::getFloat64(val); if (op == "equals") { if (dummy) diff --git a/src/Storages/Statistics/CountMinSketchStatistics.cpp b/src/Storages/Statistics/CountMinSketchStatistics.cpp index d0f52e3f6df..53415ff3946 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.cpp +++ b/src/Storages/Statistics/CountMinSketchStatistics.cpp @@ -22,13 +22,27 @@ CountMinSketchStatistics::CountMinSketchStatistics(const SingleStatisticsDescrip Float64 CountMinSketchStatistics::estimateEqual(const Field & value) const { - if (auto float_val = getFloat64(value)) + if (auto float_val = IStatistics::getFloat64(value)) return data.get_estimate(&float_val.value(), 8); - if (auto string_val = getString(value)) + if (auto string_val = IStatistics::getString(value)) return data.get_estimate(string_val->data(), string_val->size()); UNREACHABLE(); } +bool CountMinSketchStatistics::checkType(const Field & f) +{ + switch (f.getType()) + { + case Field::Types::Int64: + case Field::Types::UInt64: + case Field::Types::Float64: + case Field::Types::String: + return true; + default: + return false; + } +} + void CountMinSketchStatistics::serialize(WriteBuffer & buf) { auto bytes = data.serialize(); @@ -55,12 +69,13 @@ void CountMinSketchStatistics::update(const ColumnPtr & column) { Field f; column->get(i, f); - if (f.isNull()) - continue; - if (auto float_val = getFloat64(f)) - data.update(&float_val.value(), 8, 1.0); - else if (auto string_val = getString(f)) - data.update(*string_val, 1.0); + if (checkType(f)) + { + if (auto float_val = IStatistics::getFloat64(f)) + data.update(&float_val.value(), 8, 1.0); + else if (auto string_val = IStatistics::getString(f)) + data.update(*string_val, 1.0); + } } } diff --git a/src/Storages/Statistics/CountMinSketchStatistics.h b/src/Storages/Statistics/CountMinSketchStatistics.h index 23ea2cf25c9..37f0aa48749 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.h +++ b/src/Storages/Statistics/CountMinSketchStatistics.h @@ -23,6 +23,8 @@ public: void update(const ColumnPtr & column) override; + bool checkType(const Field & f); + private: static constexpr size_t HASH_COUNT = 8; static constexpr size_t BUCKET_COUNT = 2048; diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 7afd8b8cd12..9b184f14bfb 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -27,7 +27,7 @@ enum StatisticsFileVersion : UInt16 V0 = 0, }; -std::optional getFloat64(const Field & f) +std::optional IStatistics::getFloat64(const Field & f) { const auto type = f.getType(); Float64 value; @@ -42,27 +42,13 @@ std::optional getFloat64(const Field & f) return value; } -std::optional getString(const Field & f) +std::optional IStatistics::getString(const Field & f) { if (f.getType() == Field::Types::String) return f.get(); return {}; } -bool checkType(const Field & f) -{ - switch (f.getType()) - { - case Field::Types::Int64: - case Field::Types::UInt64: - case Field::Types::Float64: - case Field::Types::String: - return true; - default: - return false; - } -} - IStatistics::IStatistics(const SingleStatisticsDescription & stat_) : stat(stat_) {} ColumnStatistics::ColumnStatistics(const ColumnStatisticsDescription & stats_desc_) @@ -93,7 +79,7 @@ Float64 ColumnStatistics::estimateGreater(Float64 val) const Float64 ColumnStatistics::estimateEqual(Field val) const { - auto float_val = getFloat64(val); + auto float_val = IStatistics::getFloat64(val); if (float_val && stats.contains(StatisticsType::Uniq) && stats.contains(StatisticsType::TDigest)) { auto uniq_static = std::static_pointer_cast(stats.at(StatisticsType::Uniq)); @@ -108,10 +94,11 @@ Float64 ColumnStatistics::estimateEqual(Field val) const #if USE_DATASKETCHES if (stats.contains(StatisticsType::CountMinSketch)) { - if (!checkType(val)) - return rows * ConditionSelectivityEstimator::default_normal_cond_factor; auto count_min_sketch_static = std::static_pointer_cast(stats.at(StatisticsType::CountMinSketch)); - return count_min_sketch_static->estimateEqual(val); + if (!count_min_sketch_static->checkType(val)) + return rows * ConditionSelectivityEstimator::default_normal_cond_factor; + else + return count_min_sketch_static->estimateEqual(val); } #endif if (val < - ConditionSelectivityEstimator::threshold || val > ConditionSelectivityEstimator::threshold) diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index b56a1959ea0..b6df6e45b70 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -35,6 +35,9 @@ public: virtual void update(const ColumnPtr & column) = 0; + static std::optional getFloat64(const Field & f); + static std::optional getString(const Field & f); + protected: SingleStatisticsDescription stat; }; @@ -101,8 +104,4 @@ private: Validators validators; }; -std::optional getFloat64(const Field & f); -std::optional getString(const Field & f); -bool checkType(const Field & f); - } From cc67efd7898483fe165eb0e72e8e61818027aab0 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 26 Jun 2024 12:39:50 +0000 Subject: [PATCH 0226/2361] Some fixups --- .../mergetree-family/mergetree.md | 11 +++++++--- .../statements/alter/statistics.md | 4 ++-- src/CMakeLists.txt | 3 +-- .../Statistics/CountMinSketchStatistics.cpp | 20 ++++++++++--------- .../Statistics/CountMinSketchStatistics.h | 16 +++++++-------- src/Storages/Statistics/Statistics.cpp | 2 ++ src/Storages/Statistics/Statistics.h | 1 - src/Storages/Statistics/TDigestStatistics.h | 1 - src/Storages/Statistics/UniqStatistics.h | 1 - src/Storages/StatisticsDescription.cpp | 8 ++++---- 10 files changed, 36 insertions(+), 31 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index f0c4e1b0e34..bb681faaf1e 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -991,13 +991,18 @@ They can be used for prewhere optimization only if we enable `set allow_statisti #### Available Types of Column Statistics {#available-types-of-column-statistics} + - `TDigest` - Stores distribution of values from numeric columns in [TDigest](https://github.com/tdunning/t-digest) sketch. + [TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns. - `Uniq` - - Estimate the number of distinct values of a column by HyperLogLog. + + [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains. + +- `CountMin` + + [Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column. ## Column-level Settings {#column-level-settings} diff --git a/docs/en/sql-reference/statements/alter/statistics.md b/docs/en/sql-reference/statements/alter/statistics.md index 0d1fa59cf86..6880cef0e5c 100644 --- a/docs/en/sql-reference/statements/alter/statistics.md +++ b/docs/en/sql-reference/statements/alter/statistics.md @@ -25,9 +25,9 @@ Also, they are replicated, syncing statistics metadata via ZooKeeper. There is an example adding two statistics types to two columns: ``` -ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq, CountMinSketch; +ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq; ``` -:::note +:::note Statistic manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 4efb6004172..b7138d0700e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -545,8 +545,7 @@ if (TARGET ch_contrib::libpqxx) endif() if (TARGET ch_contrib::datasketches) - target_link_libraries (clickhouse_aggregate_functions PRIVATE ch_contrib::datasketches) - dbms_target_link_libraries(PRIVATE ch_contrib::datasketches) + dbms_target_link_libraries(PUBLIC ch_contrib::datasketches) endif () target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4) diff --git a/src/Storages/Statistics/CountMinSketchStatistics.cpp b/src/Storages/Statistics/CountMinSketchStatistics.cpp index 53415ff3946..03c76c9709b 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.cpp +++ b/src/Storages/Statistics/CountMinSketchStatistics.cpp @@ -1,8 +1,8 @@ +#include #include #include #include #include -#include #if USE_DATASKETCHES @@ -16,16 +16,18 @@ extern const int ILLEGAL_STATISTICS; CountMinSketchStatistics::CountMinSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) - : IStatistics(stat_), data(HASH_COUNT, BUCKET_COUNT), data_type(data_type_) + : IStatistics(stat_) + , sketch(HASH_COUNT, BUCKET_COUNT) + , data_type(data_type_) { } Float64 CountMinSketchStatistics::estimateEqual(const Field & value) const { if (auto float_val = IStatistics::getFloat64(value)) - return data.get_estimate(&float_val.value(), 8); + return sketch.get_estimate(&float_val.value(), 8); if (auto string_val = IStatistics::getString(value)) - return data.get_estimate(string_val->data(), string_val->size()); + return sketch.get_estimate(string_val->data(), string_val->size()); UNREACHABLE(); } @@ -45,7 +47,7 @@ bool CountMinSketchStatistics::checkType(const Field & f) void CountMinSketchStatistics::serialize(WriteBuffer & buf) { - auto bytes = data.serialize(); + auto bytes = sketch.serialize(); writeIntBinary(static_cast(bytes.size()), buf); buf.write(reinterpret_cast(bytes.data()), bytes.size()); } @@ -58,7 +60,7 @@ void CountMinSketchStatistics::deserialize(ReadBuffer & buf) s.reserve(size); buf.readStrict(s.data(), size); /// Extra copy can be avoided by implementing count_min_sketch::deserialize with ReadBuffer auto read_sketch = datasketches::count_min_sketch::deserialize(s.data(), size, datasketches::DEFAULT_SEED); - data.merge(read_sketch); + sketch.merge(read_sketch); } void CountMinSketchStatistics::update(const ColumnPtr & column) @@ -72,9 +74,9 @@ void CountMinSketchStatistics::update(const ColumnPtr & column) if (checkType(f)) { if (auto float_val = IStatistics::getFloat64(f)) - data.update(&float_val.value(), 8, 1.0); + sketch.update(&float_val.value(), 8, 1.0); else if (auto string_val = IStatistics::getString(f)) - data.update(*string_val, 1.0); + sketch.update(*string_val, 1.0); } } } @@ -84,7 +86,7 @@ void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr da data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) - throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'countminsketch' does not support type {}", data_type->getName()); + throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'countmin' does not support type {}", data_type->getName()); } StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) diff --git a/src/Storages/Statistics/CountMinSketchStatistics.h b/src/Storages/Statistics/CountMinSketchStatistics.h index 37f0aa48749..a112de4b3aa 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.h +++ b/src/Storages/Statistics/CountMinSketchStatistics.h @@ -1,24 +1,24 @@ #pragma once +#include + +#include "config.h" + #if USE_DATASKETCHES -#include #include -#include namespace DB { -/// CountMinSketchStatistics is used to estimate expression like col = 'value' or col in ('v1', 'v2'). class CountMinSketchStatistics : public IStatistics { public: - explicit CountMinSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); + CountMinSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); Float64 estimateEqual(const Field & value) const; void serialize(WriteBuffer & buf) override; - void deserialize(ReadBuffer & buf) override; void update(const ColumnPtr & column) override; @@ -26,10 +26,10 @@ public: bool checkType(const Field & f); private: - static constexpr size_t HASH_COUNT = 8; - static constexpr size_t BUCKET_COUNT = 2048; + static constexpr auto HASH_COUNT = 8uz; + static constexpr auto BUCKET_COUNT = 2048uz; - datasketches::count_min_sketch data; + datasketches::count_min_sketch sketch; DataTypePtr data_type; }; diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 9b184f14bfb..36339d7456f 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -12,6 +12,8 @@ #include #include +#include "config.h" /// USE_DATASKETCHES + namespace DB { diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index b6df6e45b70..9cc50a8ad4d 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -30,7 +30,6 @@ public: virtual ~IStatistics() = default; virtual void serialize(WriteBuffer & buf) = 0; - virtual void deserialize(ReadBuffer & buf) = 0; virtual void update(const ColumnPtr & column) = 0; diff --git a/src/Storages/Statistics/TDigestStatistics.h b/src/Storages/Statistics/TDigestStatistics.h index 7c361b8751f..396d371aae7 100644 --- a/src/Storages/Statistics/TDigestStatistics.h +++ b/src/Storages/Statistics/TDigestStatistics.h @@ -18,7 +18,6 @@ public: Float64 estimateEqual(Float64 val) const; void serialize(WriteBuffer & buf) override; - void deserialize(ReadBuffer & buf) override; void update(const ColumnPtr & column) override; diff --git a/src/Storages/Statistics/UniqStatistics.h b/src/Storages/Statistics/UniqStatistics.h index 0d86a6e458a..16bf5858ce1 100644 --- a/src/Storages/Statistics/UniqStatistics.h +++ b/src/Storages/Statistics/UniqStatistics.h @@ -17,7 +17,6 @@ public: UInt64 getCardinality(); void serialize(WriteBuffer & buf) override; - void deserialize(ReadBuffer & buf) override; void update(const ColumnPtr & column) override; diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index 23339ca3cfe..8fa3400d3a6 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -54,9 +54,9 @@ static StatisticsType stringToStatisticsType(String type) return StatisticsType::TDigest; if (type == "uniq") return StatisticsType::Uniq; - if (type == "countminsketch") + if (type == "count_min") return StatisticsType::CountMinSketch; - throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are `tdigest` and `uniq`.", type); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'tdigest', 'uniq', and 'count_min'.", type); } String SingleStatisticsDescription::getTypeName() const @@ -68,9 +68,9 @@ String SingleStatisticsDescription::getTypeName() const case StatisticsType::Uniq: return "Uniq"; case StatisticsType::CountMinSketch: - return "CountMinSketch"; + return "count_min"; default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are `tdigest` and `uniq`.", type); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'tdigest', 'uniq', and 'count_min'.", type); } } From 32e6bed4ee8aecf97ddd289ca869f8da096d58af Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 26 Jun 2024 14:04:33 +0000 Subject: [PATCH 0227/2361] bug fix, ubsan paranoia fix --- src/IO/WriteBufferFromPocoSocketChunked.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.cpp b/src/IO/WriteBufferFromPocoSocketChunked.cpp index a83b976ae09..b6d9efda815 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.cpp +++ b/src/IO/WriteBufferFromPocoSocketChunked.cpp @@ -7,10 +7,9 @@ namespace { template -const T & setValue(T * typed_ptr, std::type_identity_t val) +void setValue(T * typed_ptr, std::type_identity_t val) { - memcpy(typed_ptr, &val, sizeof(T)); - return *typed_ptr; + memcpy(static_cast(typed_ptr), &val, sizeof(T)); } } @@ -84,6 +83,7 @@ void WriteBufferFromPocoSocketChunked::finishChunk() finishing = available(); pos += available(); chunk_size_ptr = reinterpret_cast(pos); + last_finish_chunk = chunk_size_ptr; return; } From a2ff2a35f0ebdf0cc3b7341b1f798b4d74d780bf Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 26 Jun 2024 14:15:54 +0000 Subject: [PATCH 0228/2361] More fixups --- .../Statistics/CountMinSketchStatistics.cpp | 13 +++++++------ src/Storages/Statistics/CountMinSketchStatistics.h | 6 ++++-- src/Storages/Statistics/TDigestStatistics.cpp | 10 +++++----- src/Storages/Statistics/TDigestStatistics.h | 2 +- src/Storages/Statistics/UniqStatistics.h | 2 +- 5 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/Storages/Statistics/CountMinSketchStatistics.cpp b/src/Storages/Statistics/CountMinSketchStatistics.cpp index 03c76c9709b..c22aa966a15 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.cpp +++ b/src/Storages/Statistics/CountMinSketchStatistics.cpp @@ -47,7 +47,7 @@ bool CountMinSketchStatistics::checkType(const Field & f) void CountMinSketchStatistics::serialize(WriteBuffer & buf) { - auto bytes = sketch.serialize(); + Sketch::vector_bytes bytes = sketch.serialize(); writeIntBinary(static_cast(bytes.size()), buf); buf.write(reinterpret_cast(bytes.data()), bytes.size()); } @@ -56,11 +56,12 @@ void CountMinSketchStatistics::deserialize(ReadBuffer & buf) { UInt64 size; readIntBinary(size, buf); - String s; - s.reserve(size); - buf.readStrict(s.data(), size); /// Extra copy can be avoided by implementing count_min_sketch::deserialize with ReadBuffer - auto read_sketch = datasketches::count_min_sketch::deserialize(s.data(), size, datasketches::DEFAULT_SEED); - sketch.merge(read_sketch); + + Sketch::vector_bytes bytes; + bytes.reserve(size); + buf.readStrict(reinterpret_cast(bytes.data()), size); + + sketch = datasketches::count_min_sketch::deserialize(bytes.data(), size, datasketches::DEFAULT_SEED); } void CountMinSketchStatistics::update(const ColumnPtr & column) diff --git a/src/Storages/Statistics/CountMinSketchStatistics.h b/src/Storages/Statistics/CountMinSketchStatistics.h index a112de4b3aa..2a4f65196a2 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.h +++ b/src/Storages/Statistics/CountMinSketchStatistics.h @@ -29,12 +29,14 @@ private: static constexpr auto HASH_COUNT = 8uz; static constexpr auto BUCKET_COUNT = 2048uz; - datasketches::count_min_sketch sketch; + using Sketch = datasketches::count_min_sketch; + Sketch sketch; + DataTypePtr data_type; }; -StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); } diff --git a/src/Storages/Statistics/TDigestStatistics.cpp b/src/Storages/Statistics/TDigestStatistics.cpp index aa5662c979d..d390619ea36 100644 --- a/src/Storages/Statistics/TDigestStatistics.cpp +++ b/src/Storages/Statistics/TDigestStatistics.cpp @@ -45,11 +45,6 @@ void TDigestStatistics::update(const ColumnPtr & column) } } -StatisticsPtr TDigestCreator(const SingleStatisticsDescription & stat, DataTypePtr) -{ - return std::make_shared(stat); -} - void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); @@ -57,4 +52,9 @@ void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'tdigest' does not support type {}", data_type->getName()); } +StatisticsPtr TDigestCreator(const SingleStatisticsDescription & stat, DataTypePtr) +{ + return std::make_shared(stat); +} + } diff --git a/src/Storages/Statistics/TDigestStatistics.h b/src/Storages/Statistics/TDigestStatistics.h index 396d371aae7..2113572ac38 100644 --- a/src/Storages/Statistics/TDigestStatistics.h +++ b/src/Storages/Statistics/TDigestStatistics.h @@ -25,7 +25,7 @@ private: QuantileTDigest data; }; -StatisticsPtr TDigestCreator(const SingleStatisticsDescription & stat, DataTypePtr); void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr TDigestCreator(const SingleStatisticsDescription & stat, DataTypePtr); } diff --git a/src/Storages/Statistics/UniqStatistics.h b/src/Storages/Statistics/UniqStatistics.h index 16bf5858ce1..bf097620a86 100644 --- a/src/Storages/Statistics/UniqStatistics.h +++ b/src/Storages/Statistics/UniqStatistics.h @@ -29,7 +29,7 @@ private: }; -StatisticsPtr UniqCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type); void UniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr UniqCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type); } From 15b36c1ece293157b636a3efdb4727b73c58637f Mon Sep 17 00:00:00 2001 From: skyoct Date: Wed, 26 Jun 2024 22:28:31 +0800 Subject: [PATCH 0229/2361] add s3 tests --- src/Storages/ObjectStorage/StorageObjectStorageSource.cpp | 2 +- tests/queries/0_stateless/02245_s3_virtual_columns.reference | 4 ++-- tests/queries/0_stateless/02245_s3_virtual_columns.sql | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 31d46a93e58..46b4b34cb24 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -202,7 +202,7 @@ Chunk StorageObjectStorageSource::generate() .size = object_info->metadata->size_bytes, .filename = &filename, .last_modified = object_info->metadata->last_modified, - .etag = &(object_info.metadata->etag) + .etag = &(object_info->metadata->etag) }); return chunk; } diff --git a/tests/queries/0_stateless/02245_s3_virtual_columns.reference b/tests/queries/0_stateless/02245_s3_virtual_columns.reference index 09383c51888..b0af2e54dfd 100644 --- a/tests/queries/0_stateless/02245_s3_virtual_columns.reference +++ b/tests/queries/0_stateless/02245_s3_virtual_columns.reference @@ -11,5 +11,5 @@ create table test_02245_2 (a UInt64, _path Int32) engine = S3(s3_conn, filename= insert into test_02245_2 select 1, 2 settings s3_truncate_on_insert=1; select * from test_02245_2; 1 2 -select _path from test_02245_2; -2 +select _path, isNotNull(_etag) from test_02245_2; +2 1 diff --git a/tests/queries/0_stateless/02245_s3_virtual_columns.sql b/tests/queries/0_stateless/02245_s3_virtual_columns.sql index e86344d2094..a66b212e5c7 100644 --- a/tests/queries/0_stateless/02245_s3_virtual_columns.sql +++ b/tests/queries/0_stateless/02245_s3_virtual_columns.sql @@ -12,4 +12,4 @@ drop table if exists test_02245_2; create table test_02245_2 (a UInt64, _path Int32) engine = S3(s3_conn, filename='test_02245_2', format=Parquet); insert into test_02245_2 select 1, 2 settings s3_truncate_on_insert=1; select * from test_02245_2; -select _path from test_02245_2; +select _path, isNotNull(_etag) from test_02245_2; From 30a9c38c9596b40555c8ec041257b53cd10b9abc Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 26 Jun 2024 20:43:13 +0000 Subject: [PATCH 0230/2361] fix buffer size check --- src/IO/ReadBufferFromPocoSocketChunked.cpp | 10 +++++----- src/IO/WriteBufferFromPocoSocketChunked.cpp | 11 +++++++++++ src/IO/WriteBufferFromPocoSocketChunked.h | 11 +++-------- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 07598f2adf4..93afeadba60 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -16,11 +16,11 @@ ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Sock {} ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) - : ReadBufferFromPocoSocketBase(socket_, read_event_, buf_size), our_address(socket_.address()), log(getLogger("Protocol")) - -{ - chassert(buf_size <= std::numeric_limits::max()); -} + : ReadBufferFromPocoSocketBase( + socket_, read_event_, + std::min(buf_size, static_cast(std::numeric_limits::max()))), + our_address(socket_.address()), log(getLogger("Protocol")) +{} void ReadBufferFromPocoSocketChunked::enableChunked() { diff --git a/src/IO/WriteBufferFromPocoSocketChunked.cpp b/src/IO/WriteBufferFromPocoSocketChunked.cpp index b6d9efda815..98c5126c24b 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.cpp +++ b/src/IO/WriteBufferFromPocoSocketChunked.cpp @@ -17,6 +17,17 @@ void setValue(T * typed_ptr, std::type_identity_t val) namespace DB { +WriteBufferFromPocoSocketChunked::WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size) + : WriteBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size) +{} + +WriteBufferFromPocoSocketChunked::WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size) + : WriteBufferFromPocoSocket( + socket_, write_event_, + std::clamp(buf_size, sizeof(*chunk_size_ptr) + 1, static_cast(std::numeric_limits>::max()))), + log(getLogger("Protocol")) +{} + void WriteBufferFromPocoSocketChunked::enableChunked() { chunked = true; diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h index 8270ca445c9..13a277e3bfb 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.h +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB @@ -10,14 +11,8 @@ namespace DB class WriteBufferFromPocoSocketChunked: public WriteBufferFromPocoSocket { public: - explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, buf_size), log(getLogger("Protocol")) - { - chassert(buf_size <= std::numeric_limits>::max() && buf_size > sizeof(*chunk_size_ptr)); - } - explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) : WriteBufferFromPocoSocket(socket_, write_event_, buf_size), log(getLogger("Protocol")) - { - chassert(buf_size <= std::numeric_limits>::max() && buf_size > sizeof(*chunk_size_ptr)); - } + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); void enableChunked(); void finishChunk(); From 91598d10e9ab6f9d7054ed7e4204665e85b7636c Mon Sep 17 00:00:00 2001 From: Tobias Florek Date: Thu, 27 Jun 2024 09:29:06 +0200 Subject: [PATCH 0231/2361] document declarative ssh-keys authentication --- docs/en/operations/settings/settings-users.md | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/docs/en/operations/settings/settings-users.md b/docs/en/operations/settings/settings-users.md index 96477f777a9..ef1e58fd18e 100644 --- a/docs/en/operations/settings/settings-users.md +++ b/docs/en/operations/settings/settings-users.md @@ -22,6 +22,21 @@ Structure of the `users` section: + + + ssh-ed25519 + AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj + + + ecdsa-sha2-nistp256 + AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNxeV2uN5UY6CUbCzTA1rXfYimKQA5ivNIqxdax4bcMXz4D0nSk2l5E1TkR5mG8EBWtmExSPbcEPJ8V7lyWWbA8= + + + ssh-rsa + AAAAB3NzaC1yc2EAAAADAQABAAABgQCpgqL1SHhPVBOTFlOm0pu+cYBbADzC2jL41sPMawYCJHDyHuq7t+htaVVh2fRgpAPmSEnLEC2d4BEIKMtPK3bfR8plJqVXlLt6Q8t4b1oUlnjb3VPA9P6iGcW7CV1FBkZQEVx8ckOfJ3F+kI5VsrRlEDgiecm/C1VPl0/9M2llW/mPUMaD65cM9nlZgM/hUeBrfxOEqM11gDYxEZm1aRSbZoY4dfdm3vzvpSQ6lrCrkjn3X2aSmaCLcOWJhfBWMovNDB8uiPuw54g3ioZ++qEQMlfxVsqXDGYhXCrsArOVuW/5RbReO79BvXqdssiYShfwo+GhQ0+aLWMIW/jgBkkqx/n7uKLzCMX7b2F+aebRYFh+/QXEj7SnihdVfr9ud6NN3MWzZ1ltfIczlEcFLrLJ1Yq57wW6wXtviWh59WvTWFiPejGjeSjjJyqqB49tKdFVFuBnIU5u/bch2DXVgiAEdQwUrIp1ACoYPq22HFFAYUJrL32y7RxX3PGzuAv3LOc= + + + 0|1 @@ -79,6 +94,24 @@ Password can be specified in plaintext or in SHA256 (hex format). The first line of the result is the password. The second line is the corresponding double SHA1 hash. +### username/ssh-key {#user-sshkey} + +This setting allows authenticating with SSH keys. + +Given a SSH key (as generated by `ssh-keygen`) like +``` +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj john@example.com +``` +The `ssh_key` element is expected to be +``` + + ssh-ed25519 + AAAAC3NzaC1lZDI1NTE5AAAAIDNf0r6vRl24Ix3tv2IgPmNPO2ATa2krvt80DdcTatLj + +``` + +Substitute `ssh-ed25519` with `ssh-rsa` or `ecdsa-sha2-nistp256` for the other supported algorithms. + ### access_management {#access_management-user-setting} This setting enables or disables using of SQL-driven [access control and account management](../../guides/sre/user-management/index.md#access-control) for the user. From 7cfbd1427011a34b07ac65a39d5ae4a0bfc34141 Mon Sep 17 00:00:00 2001 From: skyoct Date: Thu, 27 Jun 2024 18:05:19 +0800 Subject: [PATCH 0232/2361] fix tests --- tests/queries/0_stateless/02245_s3_virtual_columns.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02245_s3_virtual_columns.reference b/tests/queries/0_stateless/02245_s3_virtual_columns.reference index b0af2e54dfd..3822f6ffa0f 100644 --- a/tests/queries/0_stateless/02245_s3_virtual_columns.reference +++ b/tests/queries/0_stateless/02245_s3_virtual_columns.reference @@ -12,4 +12,4 @@ insert into test_02245_2 select 1, 2 settings s3_truncate_on_insert=1; select * from test_02245_2; 1 2 select _path, isNotNull(_etag) from test_02245_2; -2 1 +2 1 From 71d71bd5fe8884b5f2b11e06302dce8e511f5b7c Mon Sep 17 00:00:00 2001 From: morning-color Date: Thu, 27 Jun 2024 20:06:14 +0800 Subject: [PATCH 0233/2361] Add rows_before_group_by_counter --- src/Client/ClientBase.cpp | 2 + src/Core/Settings.h | 1 + src/Processors/Formats/IOutputFormat.cpp | 3 +- src/Processors/Formats/IOutputFormat.h | 8 ++++ src/Processors/Formats/LazyOutputFormat.cpp | 4 ++ src/Processors/Formats/LazyOutputFormat.h | 1 + .../Formats/PullingOutputFormat.cpp | 5 ++- src/Processors/Formats/PullingOutputFormat.h | 1 + src/Processors/IProcessor.h | 6 +++ src/Processors/Sources/DelayedSource.h | 2 + src/Processors/Sources/RemoteSource.cpp | 30 ++++++++++----- src/Processors/Sources/RemoteSource.h | 3 ++ .../Transforms/AggregatingTransform.cpp | 4 +- .../Transforms/AggregatingTransform.h | 11 ++++-- src/QueryPipeline/ProfileInfo.cpp | 17 +++++++++ src/QueryPipeline/ProfileInfo.h | 13 +++++++ src/QueryPipeline/QueryPipeline.cpp | 37 +++++++++++++------ src/Server/GRPCServer.cpp | 1 + src/Server/grpc_protos/clickhouse_grpc.proto | 2 + 19 files changed, 123 insertions(+), 28 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 15a4836ef7a..dbb67d230d5 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -522,6 +522,8 @@ void ClientBase::onProfileInfo(const ProfileInfo & profile_info) { if (profile_info.hasAppliedLimit() && output_format) output_format->setRowsBeforeLimit(profile_info.getRowsBeforeLimit()); + if (profile_info.hasAppliedGroupBy() && output_format) + output_format->setRowsBeforeGroupBy(profile_info.getRowsBeforeGroupBy()); } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index e3c122467bd..09291d4300d 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1200,6 +1200,7 @@ class IColumn; M(Bool, insert_distributed_one_random_shard, false, "If setting is enabled, inserting into distributed table will choose a random shard to write when there is no sharding key", 0) \ \ M(Bool, exact_rows_before_limit, false, "When enabled, ClickHouse will provide exact value for rows_before_limit_at_least statistic, but with the cost that the data before limit will have to be read completely", 0) \ + M(Bool, exact_rows_before_group_by, false, "When enabled, ClickHouse will provide exact value for rows_before_group_by_at_least statistic, but with the cost that the data before group by will have to be read completely", 0) \ M(UInt64, cross_to_inner_join_rewrite, 1, "Use inner join instead of comma/cross join if there are joining expressions in the WHERE section. Values: 0 - no rewrite, 1 - apply if possible for comma/cross, 2 - force rewrite all comma joins, cross - if possible", 0) \ \ M(Bool, output_format_arrow_low_cardinality_as_dictionary, false, "Enable output LowCardinality type as Dictionary Arrow type", 0) \ diff --git a/src/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp index 88a6fb1e92f..4191bf9f0fe 100644 --- a/src/Processors/Formats/IOutputFormat.cpp +++ b/src/Processors/Formats/IOutputFormat.cpp @@ -71,7 +71,8 @@ void IOutputFormat::work() { if (rows_before_limit_counter && rows_before_limit_counter->hasAppliedLimit()) setRowsBeforeLimit(rows_before_limit_counter->get()); - + if (rows_before_group_by_counter && rows_before_group_by_counter->hasAppliedLimit()) + setRowsBeforeGroupBy(rows_before_group_by_counter->get()); finalize(); if (auto_flush) flush(); diff --git a/src/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h index cae2ab7691e..9bb7cccb612 100644 --- a/src/Processors/Formats/IOutputFormat.h +++ b/src/Processors/Formats/IOutputFormat.h @@ -41,6 +41,12 @@ public: /// Counter to calculate rows_before_limit_at_least in processors pipeline. void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit_counter.swap(counter); } + /// Value for rows_before_group_by_at_least field. + virtual void setRowsBeforeGroupBy(size_t /*rows_before_limit*/) { } + + /// Counter to calculate rows_before_group_by_at_least in processors pipeline. + void setRowsBeforeGroupByCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_group_by_counter.swap(counter); } + /// Notify about progress. Method could be called from different threads. /// Passed value are delta, that must be summarized. virtual void onProgress(const Progress & /*progress*/) {} @@ -151,6 +157,7 @@ protected: Progress progress; bool applied_limit = false; size_t rows_before_limit = 0; + size_t rows_before_group_by = 0; Chunk totals; Chunk extremes; }; @@ -185,6 +192,7 @@ protected: bool need_write_suffix = true; RowsBeforeLimitCounterPtr rows_before_limit_counter; + RowsBeforeGroupByCounterPtr rows_before_group_by_counter; Statistics statistics; private: diff --git a/src/Processors/Formats/LazyOutputFormat.cpp b/src/Processors/Formats/LazyOutputFormat.cpp index 4f6b10dd068..63423628e57 100644 --- a/src/Processors/Formats/LazyOutputFormat.cpp +++ b/src/Processors/Formats/LazyOutputFormat.cpp @@ -45,4 +45,8 @@ void LazyOutputFormat::setRowsBeforeLimit(size_t rows_before_limit) info.setRowsBeforeLimit(rows_before_limit); } +void LazyOutputFormat::setRowsBeforeGroupBy(size_t rows_before_group_by) +{ + info.setRowsBeforeGroupBy(rows_before_group_by); +} } diff --git a/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h index 9cf609ed2d7..83abb2ff1a1 100644 --- a/src/Processors/Formats/LazyOutputFormat.h +++ b/src/Processors/Formats/LazyOutputFormat.h @@ -28,6 +28,7 @@ public: ProfileInfo & getProfileInfo() { return info; } void setRowsBeforeLimit(size_t rows_before_limit) override; + void setRowsBeforeGroupBy(size_t rows_before_group_by) override; void onCancel() override { diff --git a/src/Processors/Formats/PullingOutputFormat.cpp b/src/Processors/Formats/PullingOutputFormat.cpp index b2378e62d34..646755deb6b 100644 --- a/src/Processors/Formats/PullingOutputFormat.cpp +++ b/src/Processors/Formats/PullingOutputFormat.cpp @@ -42,5 +42,8 @@ void PullingOutputFormat::setRowsBeforeLimit(size_t rows_before_limit) { info.setRowsBeforeLimit(rows_before_limit); } - +void PullingOutputFormat::setRowsBeforeGroupBy(size_t rows_before_group_by) +{ + info.setRowsBeforeGroupBy(rows_before_group_by); +} } diff --git a/src/Processors/Formats/PullingOutputFormat.h b/src/Processors/Formats/PullingOutputFormat.h index a8efb8dd962..c4d8cf4aab2 100644 --- a/src/Processors/Formats/PullingOutputFormat.h +++ b/src/Processors/Formats/PullingOutputFormat.h @@ -22,6 +22,7 @@ public: ProfileInfo & getProfileInfo() { return info; } void setRowsBeforeLimit(size_t rows_before_limit) override; + void setRowsBeforeGroupBy(size_t rows_before_group_by) override; bool expectMaterializedColumns() const override { return false; } diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 63f32d8deb7..0df4b3168e3 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -24,6 +24,8 @@ using StorageLimitsList = std::list; class RowsBeforeLimitCounter; using RowsBeforeLimitCounterPtr = std::shared_ptr; +using RowsBeforeGroupByCounterPtr = std::shared_ptr; + class IProcessor; using ProcessorPtr = std::shared_ptr; using Processors = std::vector; @@ -366,6 +368,10 @@ public: /// This counter is used to calculate the number of rows right before any filtration of LimitTransform. virtual void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr /* counter */) {} + /// Set rows_before_group_by counter for current processor. + /// This counter is used to calculate the number of rows right before AggregatingTransform. + virtual void setRowsBeforeGroupByCounter(RowsBeforeGroupByCounterPtr /* counter */) { } + protected: virtual void onCancel() {} diff --git a/src/Processors/Sources/DelayedSource.h b/src/Processors/Sources/DelayedSource.h index 0b2751e18a6..bd100f29a47 100644 --- a/src/Processors/Sources/DelayedSource.h +++ b/src/Processors/Sources/DelayedSource.h @@ -31,12 +31,14 @@ public: OutputPort * getExtremesPort() { return extremes; } void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit.swap(counter); } + void setRowsBeforeGroupByCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_group_by.swap(counter); } private: QueryPlanResourceHolder resources; Creator creator; Processors processors; RowsBeforeLimitCounterPtr rows_before_limit; + RowsBeforeLimitCounterPtr rows_before_group_by; /// Outputs for DelayedSource. OutputPort * main = nullptr; diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 3d7dd3f76b8..a78db630786 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -35,16 +35,25 @@ RemoteSource::RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation progress(value.read_rows, value.read_bytes); }); - query_executor->setProfileInfoCallback([this](const ProfileInfo & info) - { - if (rows_before_limit) + query_executor->setProfileInfoCallback( + [this](const ProfileInfo & info) { - if (info.hasAppliedLimit()) - rows_before_limit->add(info.getRowsBeforeLimit()); - else - manually_add_rows_before_limit_counter = true; /// Remote subquery doesn't contain a limit - } - }); + if (rows_before_limit) + { + if (info.hasAppliedLimit()) + rows_before_limit->add(info.getRowsBeforeLimit()); + else + manually_add_rows_before_limit_counter = true; /// Remote subquery doesn't contain a limit + } + + if (rows_before_group_by) + { + if (info.hasAppliedGroupBy()) + rows_before_group_by->add(info.getRowsBeforeGroupBy()); + else + manually_add_rows_before_group_by_counter = true; /// Remote subquery doesn't contain a group by + } + }); } RemoteSource::~RemoteSource() = default; @@ -162,7 +171,8 @@ std::optional RemoteSource::tryGenerate() { if (manually_add_rows_before_limit_counter) rows_before_limit->add(rows); - + if (manually_add_rows_before_group_by_counter) + rows_before_group_by->add(rows); query_executor->finish(); return {}; } diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index 052567bc261..b2ea6d50e01 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -26,6 +26,7 @@ public: String getName() const override { return "Remote"; } void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit.swap(counter); } + void setRowsBeforeGroupByCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_group_by.swap(counter); } /// Stop reading from stream if output port is finished. void onUpdatePorts() override; @@ -45,6 +46,7 @@ private: bool add_aggregation_info = false; RemoteQueryExecutorPtr query_executor; RowsBeforeLimitCounterPtr rows_before_limit; + RowsBeforeLimitCounterPtr rows_before_group_by; const bool async_read; const bool async_query_sending; @@ -52,6 +54,7 @@ private: int fd = -1; size_t rows = 0; bool manually_add_rows_before_limit_counter = false; + bool manually_add_rows_before_group_by_counter = false; }; /// Totals source from RemoteQueryExecutor. diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 65f0612d738..7c0e222f89b 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -8,7 +8,6 @@ #include #include #include - #include @@ -684,7 +683,8 @@ void AggregatingTransform::consume(Chunk chunk) LOG_TRACE(log, "Aggregating"); is_consume_started = true; } - + if (rows_before_group_by_at_least) + rows_before_group_by_at_least->add(num_rows); src_rows += num_rows; src_bytes += chunk.bytes(); diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index e167acde067..6e7b04f9191 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -3,11 +3,13 @@ #include #include #include -#include -#include -#include +#include #include #include +#include +#include +#include + namespace CurrentMetrics { @@ -167,6 +169,7 @@ public: Status prepare() override; void work() override; Processors expandPipeline() override; + void setRowsBeforeGroupByCounter(RowsBeforeGroupByCounterPtr counter) override { rows_before_group_by_at_least.swap(counter); } protected: void consume(Chunk chunk); @@ -210,6 +213,8 @@ private: bool is_consume_started = false; + RowsBeforeGroupByCounterPtr rows_before_group_by_at_least; + void initGenerate(); }; diff --git a/src/QueryPipeline/ProfileInfo.cpp b/src/QueryPipeline/ProfileInfo.cpp index ee0ff8c69bf..cec179ecfad 100644 --- a/src/QueryPipeline/ProfileInfo.cpp +++ b/src/QueryPipeline/ProfileInfo.cpp @@ -16,6 +16,8 @@ void ProfileInfo::read(ReadBuffer & in) readBinary(applied_limit, in); readVarUInt(rows_before_limit, in); readBinary(calculated_rows_before_limit, in); + readBinary(applied_group_by, in); + readVarUInt(rows_before_group_by, in); } @@ -27,6 +29,8 @@ void ProfileInfo::write(WriteBuffer & out) const writeBinary(hasAppliedLimit(), out); writeVarUInt(getRowsBeforeLimit(), out); writeBinary(calculated_rows_before_limit, out); + writeBinary(hasAppliedGroupBy(), out); + writeVarUInt(getRowsBeforeGroupBy(), out); } @@ -41,6 +45,8 @@ void ProfileInfo::setFrom(const ProfileInfo & rhs, bool skip_block_size_info) applied_limit = rhs.applied_limit; rows_before_limit = rhs.rows_before_limit; calculated_rows_before_limit = rhs.calculated_rows_before_limit; + applied_group_by = rhs.applied_group_by; + rows_before_group_by = rhs.rows_before_group_by; } @@ -57,6 +63,17 @@ bool ProfileInfo::hasAppliedLimit() const return applied_limit; } +size_t ProfileInfo::getRowsBeforeGroupBy() const +{ + return rows_before_group_by; +} + + +bool ProfileInfo::hasAppliedGroupBy() const +{ + return applied_group_by; +} + void ProfileInfo::update(Block & block) { diff --git a/src/QueryPipeline/ProfileInfo.h b/src/QueryPipeline/ProfileInfo.h index 7a0a0c304e2..141adc7430d 100644 --- a/src/QueryPipeline/ProfileInfo.h +++ b/src/QueryPipeline/ProfileInfo.h @@ -32,6 +32,9 @@ struct ProfileInfo size_t getRowsBeforeLimit() const; bool hasAppliedLimit() const; + size_t getRowsBeforeGroupBy() const; + bool hasAppliedGroupBy() const; + void update(Block & block); void update(size_t num_rows, size_t num_bytes); @@ -51,11 +54,21 @@ struct ProfileInfo rows_before_limit = rows_before_limit_; } + /// Only for Processors. + void setRowsBeforeGroupBy(size_t rows_before_group_by_) + { + applied_group_by = true; + rows_before_group_by = rows_before_group_by_; + } + private: /// For these fields we make accessors, because they must be calculated beforehand. mutable bool applied_limit = false; /// Whether LIMIT was applied mutable size_t rows_before_limit = 0; mutable bool calculated_rows_before_limit = false; /// Whether the field rows_before_limit was calculated + + mutable bool applied_group_by = false; /// Whether GROUP BY was applied + mutable size_t rows_before_group_by = 0; }; } diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index 935c006c217..5e0885ed4e8 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -1,15 +1,13 @@ #include #include -#include -#include -#include -#include #include #include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include @@ -17,15 +15,18 @@ #include #include #include -#include +#include #include +#include #include #include #include #include -#include #include -#include +#include +#include +#include +#include namespace DB @@ -273,7 +274,20 @@ static void initRowsBeforeLimit(IOutputFormat * output_format) output_format->setRowsBeforeLimitCounter(rows_before_limit_at_least); } } - +static void initRowsBeforeGroupBy(std::shared_ptr processors, IOutputFormat * output_format) +{ + if (!processors->empty()) + { + RowsBeforeGroupByCounterPtr rows_before_group_by_at_least = std::make_shared(); + for (auto & processor : *processors) + { + if (auto transform = std::dynamic_pointer_cast(processor)) + transform->setRowsBeforeGroupByCounter(rows_before_group_by_at_least); + } + rows_before_group_by_at_least->add(0); + output_format->setRowsBeforeLimitCounter(rows_before_group_by_at_least); + } +} QueryPipeline::QueryPipeline( QueryPlanResourceHolder resources_, @@ -521,6 +535,7 @@ void QueryPipeline::complete(std::shared_ptr format) extremes = nullptr; initRowsBeforeLimit(format.get()); + initRowsBeforeGroupBy(processors, format.get()); output_format = format.get(); processors->emplace_back(std::move(format)); diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index 10b59751b22..37e4342f3b0 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -1577,6 +1577,7 @@ namespace stats.set_allocated_bytes(info.bytes); stats.set_applied_limit(info.hasAppliedLimit()); stats.set_rows_before_limit(info.getRowsBeforeLimit()); + stats.set_rows_before_group_by(info.getRowsBeforeGroupBy()); } void Call::addLogsToResult() diff --git a/src/Server/grpc_protos/clickhouse_grpc.proto b/src/Server/grpc_protos/clickhouse_grpc.proto index c9ba6f28506..02b6988b8c0 100644 --- a/src/Server/grpc_protos/clickhouse_grpc.proto +++ b/src/Server/grpc_protos/clickhouse_grpc.proto @@ -179,6 +179,8 @@ message Stats { uint64 allocated_bytes = 3; bool applied_limit = 4; uint64 rows_before_limit = 5; + bool applied_group_by = 6; + uint64 rows_before_group_by = 7; } message Exception { From 3f192248e86749fd23ab3a0fa8da36ab4ba0543c Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 27 Jun 2024 22:10:57 +0800 Subject: [PATCH 0234/2361] Some fixups from code review. Fix null data_type in ColumnStatisticsDescription. Update total_rows event if part has no statistics --- src/Storages/MergeTree/MergeTreeData.cpp | 14 ++- .../ConditionSelectivityEstimator.h | 1 + .../Statistics/CountMinSketchStatistics.cpp | 38 +++----- .../Statistics/CountMinSketchStatistics.h | 8 +- src/Storages/Statistics/Statistics.cpp | 49 +++++++--- src/Storages/Statistics/Statistics.h | 3 + src/Storages/Statistics/TDigestStatistics.cpp | 14 ++- src/Storages/Statistics/UniqStatistics.cpp | 2 + src/Storages/StatisticsDescription.cpp | 7 +- ...64_statistics_estimate_predicate.reference | 25 +++++ .../02864_statistics_estimate_predicate.sql | 94 +++++++++++++++++++ .../02864_statistics_uniq.reference | 35 ------- .../0_stateless/02864_statistics_uniq.sql | 71 -------------- .../03174_statistics_countminsketch.reference | 26 ----- .../03174_statistics_countminsketch.sql | 78 --------------- 15 files changed, 198 insertions(+), 267 deletions(-) create mode 100644 tests/queries/0_stateless/02864_statistics_estimate_predicate.reference create mode 100644 tests/queries/0_stateless/02864_statistics_estimate_predicate.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_uniq.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_uniq.sql delete mode 100644 tests/queries/0_stateless/03174_statistics_countminsketch.reference delete mode 100644 tests/queries/0_stateless/03174_statistics_countminsketch.sql diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 2e0ea4cdbcd..364076bd802 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -497,8 +497,11 @@ ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByP { auto stats = part->loadStatistics(); /// TODO: We only have one stats file for every part. - for (const auto & stat : stats) - result.merge(part->info.getPartNameV1(), part->rows_count, stat); + if (stats.empty()) /// No statistics still need add rows count. + result.addRows(part->rows_count); + else + for (const auto & stat : stats) + result.merge(part->info.getPartNameV1(), part->rows_count, stat); } catch (...) { @@ -513,8 +516,11 @@ ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByP if (!partition_pruner.canBePruned(*part)) { auto stats = part->loadStatistics(); - for (const auto & stat : stats) - result.merge(part->info.getPartNameV1(), part->rows_count, stat); + if (stats.empty()) + result.addRows(part->rows_count); + else + for (const auto & stat : stats) + result.merge(part->info.getPartNameV1(), part->rows_count, stat); } } catch (...) diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.h b/src/Storages/Statistics/ConditionSelectivityEstimator.h index 9bf4940e563..ff6218e7ef1 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.h +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.h @@ -46,6 +46,7 @@ public: Float64 estimateRowCount(const RPNBuilderTreeNode & node) const; void merge(String part_name, UInt64 part_rows, ColumnStatisticsPtr column_stat); + void addRows(UInt64 part_rows) { total_rows += part_rows; } }; } diff --git a/src/Storages/Statistics/CountMinSketchStatistics.cpp b/src/Storages/Statistics/CountMinSketchStatistics.cpp index c22aa966a15..497570bd2d1 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.cpp +++ b/src/Storages/Statistics/CountMinSketchStatistics.cpp @@ -1,6 +1,6 @@ #include -#include #include +#include #include #include @@ -17,7 +17,7 @@ extern const int ILLEGAL_STATISTICS; CountMinSketchStatistics::CountMinSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) : IStatistics(stat_) - , sketch(HASH_COUNT, BUCKET_COUNT) + , sketch(num_hashes, num_buckets) , data_type(data_type_) { } @@ -31,20 +31,6 @@ Float64 CountMinSketchStatistics::estimateEqual(const Field & value) const UNREACHABLE(); } -bool CountMinSketchStatistics::checkType(const Field & f) -{ - switch (f.getType()) - { - case Field::Types::Int64: - case Field::Types::UInt64: - case Field::Types::Float64: - case Field::Types::String: - return true; - default: - return false; - } -} - void CountMinSketchStatistics::serialize(WriteBuffer & buf) { Sketch::vector_bytes bytes = sketch.serialize(); @@ -61,24 +47,24 @@ void CountMinSketchStatistics::deserialize(ReadBuffer & buf) bytes.reserve(size); buf.readStrict(reinterpret_cast(bytes.data()), size); - sketch = datasketches::count_min_sketch::deserialize(bytes.data(), size, datasketches::DEFAULT_SEED); + sketch = datasketches::count_min_sketch::deserialize(bytes.data(), size); } void CountMinSketchStatistics::update(const ColumnPtr & column) { size_t size = column->size(); - for (size_t i = 0; i < size; ++i) { Field f; column->get(i, f); - if (checkType(f)) - { - if (auto float_val = IStatistics::getFloat64(f)) - sketch.update(&float_val.value(), 8, 1.0); - else if (auto string_val = IStatistics::getString(f)) - sketch.update(*string_val, 1.0); - } + + if (f.isNull()) + continue; + + if (auto float_val = IStatistics::getFloat64(f)) + sketch.update(&float_val, 8, 1); + else + sketch.update(f.get(), 1); } } @@ -87,7 +73,7 @@ void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr da data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) - throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'countmin' does not support type {}", data_type->getName()); + throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) diff --git a/src/Storages/Statistics/CountMinSketchStatistics.h b/src/Storages/Statistics/CountMinSketchStatistics.h index 2a4f65196a2..52046a19b64 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.h +++ b/src/Storages/Statistics/CountMinSketchStatistics.h @@ -23,13 +23,11 @@ public: void update(const ColumnPtr & column) override; - bool checkType(const Field & f); - private: - static constexpr auto HASH_COUNT = 8uz; - static constexpr auto BUCKET_COUNT = 2048uz; + static constexpr auto num_hashes = 8uz; + static constexpr auto num_buckets = 2048uz; - using Sketch = datasketches::count_min_sketch; + using Sketch = datasketches::count_min_sketch; Sketch sketch; DataTypePtr data_type; diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 36339d7456f..9db0ccf1023 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -31,17 +31,39 @@ enum StatisticsFileVersion : UInt16 std::optional IStatistics::getFloat64(const Field & f) { - const auto type = f.getType(); - Float64 value; - if (type == Field::Types::Int64) - value = f.get(); - else if (type == Field::Types::UInt64) - value = f.get(); - else if (type == Field::Types::Float64) - value = f.get(); - else - return {}; - return value; + switch (f.getType()) + { + case Field::Types::Bool: + return f.get(); + case Field::Types::Int64: + return f.get(); + case Field::Types::UInt64: + return f.get(); + case Field::Types::Float64: + return f.get(); + case Field::Types::Int128: + return f.get(); + case Field::Types::UInt128: + return f.get(); + case Field::Types::Int256: + return f.get(); + case Field::Types::UInt256: + return f.get(); + case Field::Types::Decimal32: + return f.get().getValue().value; + case Field::Types::Decimal64: + return f.get().getValue().value; + case Field::Types::Decimal128: + return f.get().getValue().value; + case Field::Types::Decimal256: + return f.get().getValue().value; + case Field::Types::IPv4: + return f.get().toUnderType(); + case Field::Types::IPv6: + return f.get().toUnderType(); + default: + return {}; + } } std::optional IStatistics::getString(const Field & f) @@ -97,10 +119,7 @@ Float64 ColumnStatistics::estimateEqual(Field val) const if (stats.contains(StatisticsType::CountMinSketch)) { auto count_min_sketch_static = std::static_pointer_cast(stats.at(StatisticsType::CountMinSketch)); - if (!count_min_sketch_static->checkType(val)) - return rows * ConditionSelectivityEstimator::default_normal_cond_factor; - else - return count_min_sketch_static->estimateEqual(val); + return count_min_sketch_static->estimateEqual(val); } #endif if (val < - ConditionSelectivityEstimator::threshold || val > ConditionSelectivityEstimator::threshold) diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index 9cc50a8ad4d..acea6d3435d 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -34,6 +34,9 @@ public: virtual void update(const ColumnPtr & column) = 0; + /// Convert filed to Float64, used when estimating the number of rows. + /// Return a Float64 value if f can be represented by number, otherwise return null. + /// See IDataType::isValueRepresentedByNumber static std::optional getFloat64(const Field & f); static std::optional getString(const Field & f); diff --git a/src/Storages/Statistics/TDigestStatistics.cpp b/src/Storages/Statistics/TDigestStatistics.cpp index d390619ea36..7156ca8e84c 100644 --- a/src/Storages/Statistics/TDigestStatistics.cpp +++ b/src/Storages/Statistics/TDigestStatistics.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB { @@ -36,18 +37,23 @@ void TDigestStatistics::deserialize(ReadBuffer & buf) void TDigestStatistics::update(const ColumnPtr & column) { size_t size = column->size(); - for (size_t i = 0; i < size; ++i) { - /// TODO: support more types. - Float64 value = column->getFloat64(i); - data.add(value, 1); + Field f; + column->get(i, f); + + if (f.isNull()) + continue; + + if (auto float_val = IStatistics::getFloat64(f)) + data.add(*float_val, 1); } } void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); + data_type = removeLowCardinalityAndNullable(data_type); if (!data_type->isValueRepresentedByNumber()) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'tdigest' does not support type {}", data_type->getName()); } diff --git a/src/Storages/Statistics/UniqStatistics.cpp b/src/Storages/Statistics/UniqStatistics.cpp index fc748e769ca..9e65a2fee15 100644 --- a/src/Storages/Statistics/UniqStatistics.cpp +++ b/src/Storages/Statistics/UniqStatistics.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { @@ -54,6 +55,7 @@ void UniqStatistics::update(const ColumnPtr & column) void UniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); + data_type = removeLowCardinalityAndNullable(data_type); if (!data_type->isValueRepresentedByNumber()) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'uniq' does not support type {}", data_type->getName()); } diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index 8fa3400d3a6..d59ffb5c8e9 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -103,10 +103,9 @@ void ColumnStatisticsDescription::merge(const ColumnStatisticsDescription & othe chassert(merging_column_type); if (column_name.empty()) - { column_name = merging_column_name; - data_type = merging_column_type; - } + + data_type = merging_column_type; for (const auto & [stats_type, stats_desc]: other.types_to_desc) { @@ -125,6 +124,7 @@ void ColumnStatisticsDescription::assign(const ColumnStatisticsDescription & oth throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot assign statistics from column {} to {}", column_name, other.column_name); types_to_desc = other.types_to_desc; + data_type = other.data_type; } void ColumnStatisticsDescription::clear() @@ -163,6 +163,7 @@ std::vector ColumnStatisticsDescription::fromAST(co const auto & column = columns.getPhysical(physical_column_name); stats.column_name = column.name; + stats.data_type = column.type; stats.types_to_desc = statistics_types; result.push_back(stats); } diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference new file mode 100644 index 00000000000..7215d5fef58 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference @@ -0,0 +1,25 @@ +CREATE TABLE default.t1\n(\n `a` String,\n `b` UInt64,\n `c` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +Test statistics TDigest: + Prewhere info + Prewhere filter + Prewhere filter column: and(less(c, -98), greater(b, 0)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(c, -98), equals(b, 0)) (removed) +Test statistics Uniq: + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(b, 0), equals(c, 0)) (removed) +Test statistics count_min: + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) +Test statistics multi-types: + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900_UInt16)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) +Test LowCardinality and Nullable data type: +t2 diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql new file mode 100644 index 00000000000..a608f18a354 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql @@ -0,0 +1,94 @@ +-- Tags: no-fasttest +DROP TABLE IF EXISTS t1 SYNC; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET allow_suspicious_low_cardinality_types=1; +SET mutations_sync = 2; + +CREATE TABLE t1 +( + a String, + b UInt64, + c Int64, + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; + +SHOW CREATE TABLE t1; + +INSERT INTO t1 select toString(number % 10000), number % 1000, -(number % 100), generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'Test statistics TDigest:'; + +ALTER TABLE t1 ADD STATISTICS b, c TYPE tdigest; +ALTER TABLE t1 MATERIALIZE STATISTICS b, c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b > 0/*9990*/ and c < -98/*100*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b = 0/*1000*/ and c < -98/*100*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE t1 DROP STATISTICS b, c; + + +SELECT 'Test statistics Uniq:'; + +ALTER TABLE t1 ADD STATISTICS b TYPE uniq, tdigest; +ALTER TABLE t1 MATERIALIZE STATISTICS b; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0/*1000*/ and b = 0/*10*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE t1 DROP STATISTICS b; + + +SELECT 'Test statistics count_min:'; + +ALTER TABLE t1 ADD STATISTICS a TYPE count_min; +ALTER TABLE t1 ADD STATISTICS b TYPE count_min; +ALTER TABLE t1 ADD STATISTICS c TYPE count_min; +ALTER TABLE t1 MATERIALIZE STATISTICS a, b, c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE t1 DROP STATISTICS a, b, c; + + +SELECT 'Test statistics multi-types:'; + +ALTER TABLE t1 ADD STATISTICS a TYPE count_min; +ALTER TABLE t1 ADD STATISTICS b TYPE count_min, uniq, tdigest; +ALTER TABLE t1 ADD STATISTICS c TYPE count_min, uniq, tdigest; +ALTER TABLE t1 MATERIALIZE STATISTICS a, b, c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE t1 DROP STATISTICS a, b, c; + +DROP TABLE IF EXISTS t1 SYNC; + + +SELECT 'Test LowCardinality and Nullable data type:'; +DROP TABLE IF EXISTS t2 SYNC; +SET allow_suspicious_low_cardinality_types=1; +CREATE TABLE t2 +( + a LowCardinality(Int64) STATISTICS(uniq, tdigest, count_min), + b Nullable(Int64) STATISTICS(uniq, tdigest, count_min), + c LowCardinality(Nullable(Int64)) STATISTICS(uniq, tdigest, count_min), + pk String, +) Engine = MergeTree() ORDER BY pk; + +select table from system.tables where name = 't2'; + +DROP TABLE IF EXISTS t2 SYNC; diff --git a/tests/queries/0_stateless/02864_statistics_uniq.reference b/tests/queries/0_stateless/02864_statistics_uniq.reference deleted file mode 100644 index 77786dbdd8c..00000000000 --- a/tests/queries/0_stateless/02864_statistics_uniq.reference +++ /dev/null @@ -1,35 +0,0 @@ -CREATE TABLE default.t1\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest, uniq),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) -After modify TDigest - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(c, -1), less(a, 10), less(b, 10)) (removed) -After drop - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(c, -1), less(b, 10)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_uniq.sql b/tests/queries/0_stateless/02864_statistics_uniq.sql deleted file mode 100644 index c6b51d2a377..00000000000 --- a/tests/queries/0_stateless/02864_statistics_uniq.sql +++ /dev/null @@ -1,71 +0,0 @@ -DROP TABLE IF EXISTS t1; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; - -CREATE TABLE t1 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Int64 STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE t1; - -INSERT INTO t1 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; -INSERT INTO t1 select 0, 0, 11, generateUUIDv4(); - -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -OPTIMIZE TABLE t1 FINAL; - -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT 'After modify TDigest'; -ALTER TABLE t1 MODIFY STATISTICS c TYPE TDigest; -ALTER TABLE t1 MATERIALIZE STATISTICS c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - - -ALTER TABLE t1 DROP STATISTICS c; - -SELECT 'After drop'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -DROP TABLE IF EXISTS t1; -DROP TABLE IF EXISTS t2; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t2 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c LowCardinality(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t2 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t2; -DROP TABLE IF EXISTS t3; - -CREATE TABLE t3 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Nullable(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t3 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t3; diff --git a/tests/queries/0_stateless/03174_statistics_countminsketch.reference b/tests/queries/0_stateless/03174_statistics_countminsketch.reference deleted file mode 100644 index b5a35c22835..00000000000 --- a/tests/queries/0_stateless/03174_statistics_countminsketch.reference +++ /dev/null @@ -1,26 +0,0 @@ -CREATE TABLE default.t1\n(\n `a` String STATISTICS(countminsketch),\n `b` Int64 STATISTICS(countminsketch),\n `c` UInt64 STATISTICS(countminsketch),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(c, 0), greater(b, 0)) (removed) -After drop statistics for a - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(b, 0), equals(c, 0), equals(a, \'0\')) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 0), equals(a, \'0\'), greater(b, 0)) (removed) -LowCardinality - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) -Nullable - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) -LowCardinality(Nullable) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) diff --git a/tests/queries/0_stateless/03174_statistics_countminsketch.sql b/tests/queries/0_stateless/03174_statistics_countminsketch.sql deleted file mode 100644 index 200c2a4a531..00000000000 --- a/tests/queries/0_stateless/03174_statistics_countminsketch.sql +++ /dev/null @@ -1,78 +0,0 @@ --- Tags: no-fasttest -DROP TABLE IF EXISTS t1; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; - -CREATE TABLE t1 -( - a String STATISTICS(countminsketch), - b Int64 STATISTICS(countminsketch), - c UInt64 STATISTICS(countminsketch), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE t1; - -INSERT INTO t1 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b > 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE t1 DROP STATISTICS a; - -SELECT 'After drop statistics for a'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0 and b > 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -DROP TABLE IF EXISTS t1; -DROP TABLE IF EXISTS t2; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t2 -( - a LowCardinality(String) STATISTICS(countminsketch), - b Int64 STATISTICS(countminsketch), - c UInt64 STATISTICS(countminsketch), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t2 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'LowCardinality'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t2 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - - -DROP TABLE IF EXISTS t2; -DROP TABLE IF EXISTS t3; - -CREATE TABLE t3 -( - a Nullable(String) STATISTICS(countminsketch), - b Int64 STATISTICS(countminsketch), - c UInt64 STATISTICS(countminsketch), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t3 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'Nullable'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t3 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -DROP TABLE IF EXISTS t3; -DROP TABLE IF EXISTS t4; - -CREATE TABLE t4 -( - a LowCardinality(Nullable(String)) STATISTICS(countminsketch), - b Int64 STATISTICS(countminsketch), - c UInt64 STATISTICS(countminsketch), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t4 select toString(number % 1000), number % 100, number % 10, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'LowCardinality(Nullable)'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t4 WHERE c = 0 and b = 0 and a = '0') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -DROP TABLE IF EXISTS t4; From 1c9df94a0a0b6fc9eb7ef302b47ac909c029e345 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 27 Jun 2024 21:26:39 +0200 Subject: [PATCH 0235/2361] Replace fix with LOGICAL_ERROR --- src/Functions/IFunction.cpp | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/src/Functions/IFunction.cpp b/src/Functions/IFunction.cpp index 9217071ca11..76ae8f33fbd 100644 --- a/src/Functions/IFunction.cpp +++ b/src/Functions/IFunction.cpp @@ -47,6 +47,9 @@ bool allArgumentsAreConstants(const ColumnsWithTypeAndName & args) return true; } +/// Replaces single low cardinality column in a function call by its dictionary +/// This can only happen after the arguments have been adapted in IFunctionOverloadResolver::getReturnType +/// as it's only possible if there is one low cardinality column and, optionally, const columns ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count) { @@ -77,21 +80,8 @@ ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( if (number_full_columns > 0 || number_low_cardinality_columns > 1) { - /// If there is a single full column, we can't replace the LC column with its dictionary, as it won't match - /// the size or order of the full columns. Same if there are 2 or more low cardinality columns - for (auto & arg : args) - { - if (const auto * column_lc = checkAndGetColumn(arg.column.get())) - { - arg.column = recursiveRemoveLowCardinality(arg.column); - chassert(arg.column->size() == input_rows_count); - - const auto * low_cardinality_type = checkAndGetDataType(arg.type.get()); - if (!low_cardinality_type) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", arg.type->getName()); - arg.type = recursiveRemoveLowCardinality(arg.type); - } - } + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected low cardinality types found. Low cardinality: {}. Full {}. Const {}", + number_low_cardinality_columns, number_full_columns, number_const_columns); } else if (number_low_cardinality_columns == 1) { @@ -124,7 +114,7 @@ ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( lc_arg.type = low_cardinality_type->getDictionaryType(); } - /// Change size of constants. + /// Change size of constants for (auto & column : args) { if (const auto * column_const = checkAndGetColumn(column.column.get())) @@ -305,6 +295,8 @@ ColumnPtr IExecutableFunction::executeWithoutSparseColumns(const ColumnsWithType bool can_be_executed_on_default_arguments = canBeExecutedOnDefaultArguments(); const auto & dictionary_type = res_low_cardinality_type->getDictionaryType(); + /// The arguments should have been adapted in IFunctionOverloadResolver::getReturnType + /// So there is only one low cardinality column (and optionally some const columns) and no full column ColumnPtr indexes = replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( columns_without_low_cardinality, can_be_executed_on_default_arguments, input_rows_count); From c185d60375dd0fbc18d062197875ba463326f44a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 27 Jun 2024 21:59:14 +0200 Subject: [PATCH 0236/2361] Try an ugly fix --- src/Planner/PlannerActionsVisitor.cpp | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 7a12d5d690d..13e96dc7016 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -485,16 +485,28 @@ public: return node; } - const ActionsDAG::Node * addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column) + [[nodiscard]] String addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column) { auto it = node_name_to_node.find(node_name); + if (it != node_name_to_node.end() && it->second->column) + return {node_name}; + if (it != node_name_to_node.end()) - return it->second; + { + /// There is a node with this name, but it doesn't have a column + /// This likely happens because we executed the query until WithMergeableState with a const node in the + /// WHERE clause. As the results of headers are materialized, the column was removed + /// Let's add a new column and keep this + String dupped_name{node_name + "_dupped"}; + const auto * node = &actions_dag.addColumn(column); + node_name_to_node[dupped_name] = node; + return dupped_name; + } const auto * node = &actions_dag.addColumn(column); node_name_to_node[node->result_name] = node; - return node; + return {node_name}; } template @@ -723,7 +735,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi column.type = constant_type; column.column = column.type->createColumnConst(1, constant_literal); - actions_stack[0].addConstantIfNecessary(constant_node_name, column); + String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) @@ -732,8 +744,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi actions_stack_node.addInputConstantColumnIfNecessary(constant_node_name, column); } - return {constant_node_name, Levels(0)}; - + return {final_name, Levels(0)}; } PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitLambda(const QueryTreeNodePtr & node) @@ -862,7 +873,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma else column.column = std::move(column_set); - actions_stack[0].addConstantIfNecessary(column.name, column); + String final_name = actions_stack[0].addConstantIfNecessary(column.name, column); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) @@ -871,7 +882,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma actions_stack_node.addInputConstantColumnIfNecessary(column.name, column); } - return {column.name, Levels(0)}; + return {final_name, Levels(0)}; } PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitIndexHintFunction(const QueryTreeNodePtr & node) From 1e54b213850bdd34fc1c251a531da04ae9cb03aa Mon Sep 17 00:00:00 2001 From: AntiTopQuark Date: Fri, 28 Jun 2024 22:43:26 +0800 Subject: [PATCH 0237/2361] fix fast test for MergeTreeIndexSet --- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index c2a574beb90..36844648ac7 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -361,7 +361,7 @@ bool MergeTreeIndexConditionSet::mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx if (!column->isNullAt(i) && (column->get64(i) & 1)) return true; - return condition.checkInHyperrectangle(granule.set_hyperrectangle, index_data_types).can_be_true; + return false; } From fed573ffee1bb57f0b9cff7f6f1c7d5a542af51c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 28 Jun 2024 17:12:10 +0200 Subject: [PATCH 0238/2361] Extend fix --- src/Planner/PlannerActionsVisitor.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 13e96dc7016..1c9553032c2 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -535,7 +535,7 @@ public: } private: - std::unordered_map node_name_to_node; + std::unordered_map node_name_to_node; ActionsDAG & actions_dag; QueryTreeNodePtr scope_node; }; @@ -741,7 +741,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi for (size_t i = 1; i < actions_stack_size; ++i) { auto & actions_stack_node = actions_stack[i]; - actions_stack_node.addInputConstantColumnIfNecessary(constant_node_name, column); + actions_stack_node.addInputConstantColumnIfNecessary(final_name, column); } return {final_name, Levels(0)}; @@ -879,7 +879,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma for (size_t i = 1; i < actions_stack_size; ++i) { auto & actions_stack_node = actions_stack[i]; - actions_stack_node.addInputConstantColumnIfNecessary(column.name, column); + actions_stack_node.addInputConstantColumnIfNecessary(final_name, column); } return {final_name, Levels(0)}; From c998ec1e4f1b91f8ca20c2bd5a7acb6ac8d2e1b1 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 29 Jun 2024 02:40:22 +0000 Subject: [PATCH 0239/2361] add test and better naming --- src/Access/Common/AccessRightsElement.cpp | 4 +- src/Access/Common/AccessRightsElement.h | 2 +- src/Access/ContextAccess.cpp | 10 +-- ...xml => config_with_table_engine_grant.xml} | 0 .../config_without_table_engine_grant.xml | 5 ++ ...est.py => test_with_table_engine_grant.py} | 2 +- .../test_without_table_engine_grant.py | 81 +++++++++++++++++++ 7 files changed, 95 insertions(+), 9 deletions(-) rename tests/integration/test_grant_and_revoke/configs/{config.xml => config_with_table_engine_grant.xml} (100%) create mode 100644 tests/integration/test_grant_and_revoke/configs/config_without_table_engine_grant.xml rename tests/integration/test_grant_and_revoke/{test.py => test_with_table_engine_grant.py} (99%) create mode 100644 tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py diff --git a/src/Access/Common/AccessRightsElement.cpp b/src/Access/Common/AccessRightsElement.cpp index 2ee13d6b94f..63bda09a51b 100644 --- a/src/Access/Common/AccessRightsElement.cpp +++ b/src/Access/Common/AccessRightsElement.cpp @@ -224,10 +224,10 @@ void AccessRightsElement::replaceEmptyDatabase(const String & current_database) String AccessRightsElement::toString() const { return toStringImpl(*this, true); } String AccessRightsElement::toStringWithoutOptions() const { return toStringImpl(*this, false); } -String AccessRightsElement::toStringWithoutONClause() const +String AccessRightsElement::toStringForAccessTypeSource() const { String result{access_flags.toKeywords().front()}; - return result + " ON {db.table}"; + return result + " ON *.*"; } bool AccessRightsElements::empty() const { return std::all_of(begin(), end(), [](const AccessRightsElement & e) { return e.empty(); }); } diff --git a/src/Access/Common/AccessRightsElement.h b/src/Access/Common/AccessRightsElement.h index 49764fc727f..78e94e6f2e4 100644 --- a/src/Access/Common/AccessRightsElement.h +++ b/src/Access/Common/AccessRightsElement.h @@ -89,7 +89,7 @@ struct AccessRightsElement /// Returns a human-readable representation like "GRANT SELECT, UPDATE(x, y) ON db.table". String toString() const; String toStringWithoutOptions() const; - String toStringWithoutONClause() const; + String toStringForAccessTypeSource() const; }; diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 4620561053b..8ff1fc8ed21 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -622,7 +622,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg /// since SOURCES is not granted actually. In order to solve this, turn the prompt logic back to Sources. if (flags & AccessType::TABLE_ENGINE && !access_control->doesTableEnginesRequireGrant()) { - AccessFlags newFlags; + AccessFlags new_flags; String table_engine_name{getTableEngine(args...)}; for (const auto & source_and_table_engine : source_and_table_engines) @@ -631,11 +631,11 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg if (table_engine != table_engine_name) continue; const auto & source = std::get<0>(source_and_table_engine); /// Set the flags from Table Engine to SOURCES so that prompts can be meaningful. - newFlags = source; + new_flags = source; break; } - if (newFlags.isEmpty()) + if (new_flags.isEmpty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Didn't find the target Source from the Table Engine"); if (grant_option && acs->isGranted(flags, args...)) @@ -644,12 +644,12 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg "{}: Not enough privileges. " "The required privileges have been granted, but without grant option. " "To execute this query, it's necessary to have the grant {} WITH GRANT OPTION", - AccessRightsElement{newFlags}.toStringWithoutONClause()); + AccessRightsElement{new_flags}.toStringForAccessTypeSource()); } return access_denied(ErrorCodes::ACCESS_DENIED, "{}: Not enough privileges. To execute this query, it's necessary to have the grant {}", - AccessRightsElement{newFlags}.toStringWithoutONClause() + (grant_option ? " WITH GRANT OPTION" : "")); + AccessRightsElement{new_flags}.toStringForAccessTypeSource() + (grant_option ? " WITH GRANT OPTION" : "")); } if (grant_option && acs->isGranted(flags, args...)) diff --git a/tests/integration/test_grant_and_revoke/configs/config.xml b/tests/integration/test_grant_and_revoke/configs/config_with_table_engine_grant.xml similarity index 100% rename from tests/integration/test_grant_and_revoke/configs/config.xml rename to tests/integration/test_grant_and_revoke/configs/config_with_table_engine_grant.xml diff --git a/tests/integration/test_grant_and_revoke/configs/config_without_table_engine_grant.xml b/tests/integration/test_grant_and_revoke/configs/config_without_table_engine_grant.xml new file mode 100644 index 00000000000..d3571f281f5 --- /dev/null +++ b/tests/integration/test_grant_and_revoke/configs/config_without_table_engine_grant.xml @@ -0,0 +1,5 @@ + + + false + + diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test_with_table_engine_grant.py similarity index 99% rename from tests/integration/test_grant_and_revoke/test.py rename to tests/integration/test_grant_and_revoke/test_with_table_engine_grant.py index e533cced1e4..25ca7913e4e 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test_with_table_engine_grant.py @@ -5,7 +5,7 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) instance = cluster.add_instance( "instance", - main_configs=["configs/config.xml"], + main_configs=["configs/config_with_table_engine_grant.xml"], user_configs=["configs/users.d/users.xml"], ) diff --git a/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py b/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py new file mode 100644 index 00000000000..210bb8ec465 --- /dev/null +++ b/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py @@ -0,0 +1,81 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance( + "instance", + main_configs=["configs/config_without_table_engine_grant.xml"], + user_configs=["configs/users.d/users.xml"], +) + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + + instance.query("CREATE DATABASE test") + + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def cleanup_after_test(): + try: + yield + finally: + instance.query("DROP USER IF EXISTS A") + instance.query("DROP TABLE IF EXISTS test.table1") + + +def test_table_engine_and_source_grant(): + instance.query("DROP USER IF EXISTS A") + instance.query("CREATE USER A") + instance.query("GRANT CREATE TABLE ON test.table1 TO A") + + instance.query("GRANT POSTGRES ON *.* TO A") + + instance.query( + """ + CREATE TABLE test.table1(a Integer) + engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy'); + """, + user="A", + ) + + instance.query("DROP TABLE test.table1") + + instance.query("REVOKE POSTGRES ON *.* FROM A") + + assert "Not enough privileges" in instance.query_and_get_error( + """ + CREATE TABLE test.table1(a Integer) + engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy'); + """, + user="A", + ) + + # expecting grant POSTGRES instead of grant PostgreSQL due to discrepancy between source access type and table engine + assert "grant POSTGRES ON *.*" in instance.query_and_get_error( + """ + CREATE TABLE test.table1(a Integer) + engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy'); + """, + user="A", + ) + + instance.query("GRANT SOURCES ON *.* TO A") + + instance.query( + """ + CREATE TABLE test.table1(a Integer) + engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy'); + """, + user="A", + ) + + instance.query("DROP TABLE test.table1") From d7b3c3e8a97835c9f7987a5852ef9469770f8560 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 29 Jun 2024 21:59:04 +0200 Subject: [PATCH 0240/2361] Add review suggestion --- src/Interpreters/InterpreterSetQuery.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp index 15d4ba56d8d..2ae35c4313b 100644 --- a/src/Interpreters/InterpreterSetQuery.cpp +++ b/src/Interpreters/InterpreterSetQuery.cpp @@ -46,7 +46,7 @@ static void applySettingsFromSelectWithUnion(const ASTSelectWithUnionQuery & sel // It is flattened later, when we process UNION ALL/DISTINCT. const auto * last_select = children.back()->as(); if (last_select && last_select->settings()) - InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext(false); + InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext(/* ignore_setting_constraints= */ false); } void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMutablePtr context_) @@ -58,7 +58,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta if (const auto * query_with_output = dynamic_cast(ast.get())) { if (query_with_output->settings_ast) - InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext(false); + InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false); if (const auto * create_query = ast->as(); create_query && create_query->select) applySettingsFromSelectWithUnion(create_query->select->as(), context_); @@ -67,7 +67,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta if (const auto * select_query = ast->as()) { if (auto new_settings = select_query->settings()) - InterpreterSetQuery(new_settings, context_).executeForCurrentContext(false); + InterpreterSetQuery(new_settings, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false); } else if (const auto * select_with_union_query = ast->as()) { @@ -76,7 +76,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta else if (const auto * explain_query = ast->as()) { if (explain_query->settings_ast) - InterpreterSetQuery(explain_query->settings_ast, context_).executeForCurrentContext(false); + InterpreterSetQuery(explain_query->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false); applySettingsFromQuery(explain_query->getExplainedQuery(), context_); } @@ -84,7 +84,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta { context_->setInsertFormat(insert_query->format); if (insert_query->settings_ast) - InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext(false); + InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false); } } From 9e33e3dd4dd3c1aad3de32c382da15cfe1bb4917 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 30 Jun 2024 04:44:52 +0200 Subject: [PATCH 0241/2361] Update TablesLoader.h --- src/Databases/TablesLoader.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Databases/TablesLoader.h b/src/Databases/TablesLoader.h index 5df52be4e97..bf469d83245 100644 --- a/src/Databases/TablesLoader.h +++ b/src/Databases/TablesLoader.h @@ -14,7 +14,6 @@ #include - namespace Poco { class Logger; // NOLINT(cppcoreguidelines-virtual-class-destructor) From 02928bb207348019a0968454e0f7d6baa6e511af Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 30 Jun 2024 05:09:53 +0200 Subject: [PATCH 0242/2361] Use Atomic database by default in clickhouse-local --- programs/local/LocalServer.cpp | 12 ++++++------ src/Databases/DatabaseAtomic.cpp | 14 +++++++++++--- src/Databases/DatabaseAtomic.h | 1 + src/Databases/DatabaseOnDisk.cpp | 12 ++++++++++-- src/Databases/DatabaseOnDisk.h | 4 +++- src/Databases/DatabaseOrdinary.cpp | 2 +- 6 files changed, 32 insertions(+), 13 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 670dc378b97..27fe90f68cc 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -48,7 +49,6 @@ #include #include #include -#include #include #include #include @@ -192,11 +192,11 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str return system_database; } -static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) +static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context) { - auto databaseCombiner = std::make_shared(name_, context_); - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); - databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); + auto databaseCombiner = std::make_shared(name_, context); + databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context)); + databaseCombiner->registerNextDatabase(std::make_shared(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context)); return databaseCombiner; } @@ -341,7 +341,7 @@ std::string LocalServer::getInitialCreateTableQuery() else table_structure = "(" + table_structure + ")"; - return fmt::format("CREATE TABLE {} {} ENGINE = File({}, {});", + return fmt::format("CREATE TEMPORARY TABLE {} {} ENGINE = File({}, {});", table_name, table_structure, data_format, table_file); } diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index ccab72cfbae..985d79773aa 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -49,9 +49,6 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, c , db_uuid(uuid) { assert(db_uuid != UUIDHelpers::Nil); - fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); - fs::create_directories(path_to_table_symlinks); - tryCreateMetadataSymlink(); } DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, ContextPtr context_) @@ -59,6 +56,13 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C { } +void DatabaseAtomic::createDirectories() +{ + fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); + fs::create_directories(path_to_table_symlinks); + tryCreateMetadataSymlink(); +} + String DatabaseAtomic::getTableDataPath(const String & table_name) const { std::lock_guard lock(mutex); @@ -95,6 +99,7 @@ void DatabaseAtomic::drop(ContextPtr) void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, const StoragePtr & table, const String & relative_table_path) { assert(relative_table_path != data_path && !relative_table_path.empty()); + createDirectories(); DetachedTables not_in_use; std::lock_guard lock(mutex); not_in_use = cleanupDetachedTables(); @@ -309,6 +314,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora const String & table_metadata_tmp_path, const String & table_metadata_path, ContextPtr query_context) { + createDirectories(); DetachedTables not_in_use; auto table_data_path = getTableDataPath(query); try @@ -572,6 +578,7 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new { /// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard + createDirectories(); waitDatabaseStarted(); bool check_ref_deps = query_context->getSettingsRef().check_referential_table_dependencies; @@ -663,4 +670,5 @@ void registerDatabaseAtomic(DatabaseFactory & factory) }; factory.registerDatabase("Atomic", create_fn); } + } diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index b59edd479ba..26ab7657354 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -75,6 +75,7 @@ protected: using DetachedTables = std::unordered_map; [[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex); + void createDirectories(); void tryCreateMetadataSymlink(); virtual bool allowMoveTableToOtherDatabaseEngine(IDatabase & /*to_database*/) const { return false; } diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index b8154372116..3a56c124726 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -170,7 +170,12 @@ DatabaseOnDisk::DatabaseOnDisk( , metadata_path(metadata_path_) , data_path(data_path_) { - fs::create_directories(local_context->getPath() + data_path); +} + + +void DatabaseOnDisk::createDirectories() +{ + fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path); fs::create_directories(metadata_path); } @@ -188,6 +193,8 @@ void DatabaseOnDisk::createTable( const StoragePtr & table, const ASTPtr & query) { + createDirectories(); + const auto & settings = local_context->getSettingsRef(); const auto & create = query->as(); assert(table_name == create.getTable()); @@ -255,7 +262,6 @@ void DatabaseOnDisk::createTable( } commitCreateTable(create, table, table_metadata_tmp_path, table_metadata_path, local_context); - removeDetachedPermanentlyFlag(local_context, table_name, table_metadata_path, false); } @@ -283,6 +289,8 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora { try { + createDirectories(); + /// Add a table to the map of known tables. attachTable(query_context, query.getTable(), table, getTableDataPath(query)); diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 12656068643..00e7a2850b8 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -64,7 +64,7 @@ public: time_t getObjectMetadataModificationTime(const String & object_name) const override; String getDataPath() const override { return data_path; } - String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } + String getTableDataPath(const String & table_name) const override { return std::filesystem::path(data_path) / escapeForFileName(table_name) / ""; } String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } @@ -99,6 +99,8 @@ protected: virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach); virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {} + void createDirectories(); + const String metadata_path; const String data_path; }; diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 7d4bb07e8ef..5b5c09b039d 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -52,7 +52,7 @@ static constexpr size_t METADATA_FILE_BUFFER_SIZE = 32768; static constexpr const char * const CONVERT_TO_REPLICATED_FLAG_NAME = "convert_to_replicated"; DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata_path_, ContextPtr context_) - : DatabaseOrdinary(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseOrdinary (" + name_ + ")", context_) + : DatabaseOrdinary(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseOrdinary (" + name_ + ")", context_) { } From eea2d51cfe908767d6c5602a5c8f3137c757065b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jul 2024 03:07:10 +0200 Subject: [PATCH 0243/2361] Fix fast test --- src/Databases/DatabaseAtomic.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 985d79773aa..c3e93a8a324 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -451,6 +451,9 @@ void DatabaseAtomic::beforeLoadingMetadata(ContextMutablePtr /*context*/, Loadin if (mode < LoadingStrictnessLevel::FORCE_RESTORE) return; + if (!fs::exists(path_to_table_symlinks)) + return; + /// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken for (const auto & table_path : fs::directory_iterator(path_to_table_symlinks)) { From 79fc80a3ea25309ca7f1fe7cf61d1be56526cdbc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jul 2024 03:57:48 +0200 Subject: [PATCH 0244/2361] Fix error --- src/Databases/DatabaseOnDisk.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 3a56c124726..832769fd043 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -611,6 +611,10 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const { + fs::path metadata_path = getMetadataPath(); + if (!fs::exists(metadata_path)) + return; + auto process_tmp_drop_metadata_file = [&](const String & file_name) { assert(getUUID() == UUIDHelpers::Nil); @@ -634,7 +638,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat std::vector> metadata_files; fs::directory_iterator dir_end; - for (fs::directory_iterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it) + for (fs::directory_iterator dir_it(metadata_path); dir_it != dir_end; ++dir_it) { String file_name = dir_it->path().filename(); /// For '.svn', '.gitignore' directory and similar. From 4c9238a1dd9433bac7de920b6c0955ecd53a3df9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jul 2024 03:58:27 +0200 Subject: [PATCH 0245/2361] Fix error --- src/Databases/DatabaseOnDisk.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 832769fd043..c6b4c38a656 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -611,7 +611,6 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const { - fs::path metadata_path = getMetadataPath(); if (!fs::exists(metadata_path)) return; From 2b6a47c13ee054ddf63db41445aab8a0b7187340 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jul 2024 06:10:13 +0200 Subject: [PATCH 0246/2361] Fix errors --- src/Databases/DatabaseAtomic.cpp | 1 + src/Databases/DatabaseOnDisk.cpp | 1 + 2 files changed, 2 insertions(+) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index c3e93a8a324..bebb645670b 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -189,6 +189,7 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_ if (exchange && !supportsAtomicRename()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported"); + createDirectories(); waitDatabaseStarted(); auto & other_db = dynamic_cast(to_database); diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index c6b4c38a656..faac4b23701 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -416,6 +416,7 @@ void DatabaseOnDisk::renameTable( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported"); } + createDirectories(); waitDatabaseStarted(); auto table_data_relative_path = getTableDataPath(table_name); From ed68a29c8b3e64fa42ff8ea4f445789f72d39d46 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jul 2024 06:34:33 +0200 Subject: [PATCH 0247/2361] Fix error --- src/Databases/DatabasesOverlay.cpp | 12 ++++++++++++ src/Databases/DatabasesOverlay.h | 1 + 2 files changed, 13 insertions(+) diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 801356b3dd7..02a0aab8230 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -178,6 +178,18 @@ String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const return result; } +UUID DatabasesOverlay::getUUID() const +{ + UUID result = UUIDHelpers::Nil; + for (const auto & db : databases) + { + result = db->getUUID(); + if (result != UUIDHelpers::Nil) + break; + } + return result; +} + UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const { UUID result = UUIDHelpers::Nil; diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h index b0c7e7e4032..5f6d4e601d3 100644 --- a/src/Databases/DatabasesOverlay.h +++ b/src/Databases/DatabasesOverlay.h @@ -41,6 +41,7 @@ public: String getTableDataPath(const String & table_name) const override; String getTableDataPath(const ASTCreateQuery & query) const override; + UUID getUUID() const override; UUID tryGetTableUUID(const String & table_name) const override; void drop(ContextPtr context) override; From 229ff9af6e1c89b8d6c3e0f29945d0baccde273a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jul 2024 06:34:54 +0200 Subject: [PATCH 0248/2361] Remove old comment --- src/Interpreters/StorageID.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/StorageID.h b/src/Interpreters/StorageID.h index f9afbc7b98d..ad55d16e284 100644 --- a/src/Interpreters/StorageID.h +++ b/src/Interpreters/StorageID.h @@ -27,7 +27,6 @@ class ASTQueryWithTableAndOutput; class ASTTableIdentifier; class Context; -// TODO(ilezhankin): refactor and merge |ASTTableIdentifier| struct StorageID { String database_name; From f58f6cfa737b1f18bab76ac8869f769b1908ea09 Mon Sep 17 00:00:00 2001 From: skyoct Date: Mon, 1 Jul 2024 18:26:54 +0800 Subject: [PATCH 0249/2361] remove _last_modified field --- src/Storages/VirtualColumnUtils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 960fff371a7..a557848698a 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -112,7 +112,7 @@ void filterBlockWithDAG(ActionsDAGPtr dag, Block & block, ContextPtr context) NameSet getVirtualNamesForFileLikeStorage() { - return {"_path", "_file", "_size", "_time", "_etag", "_last_modified"}; + return {"_path", "_file", "_size", "_time", "_etag"}; } VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns) From 85baa91ba4774c449ef72d7f42278397598205b9 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 1 Jul 2024 15:26:19 +0200 Subject: [PATCH 0250/2361] Added spell exception --- .../aspell-ignore/en/aspell-dict.txt | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 2bd949f102d..d100b1bc2d9 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -48,7 +48,6 @@ AutoML Autocompletion AvroConfluent BIGINT -bigrams BIGSERIAL BORO BSON @@ -223,7 +222,6 @@ DatabaseOrdinaryThreadsActive DateTime DateTimes DbCL -deallocated Decrypted Deduplicate Deduplication @@ -295,7 +293,6 @@ FilesystemMainPathUsedBytes FilesystemMainPathUsedINodes FixedString FlameGraph -flameGraph Flink ForEach FreeBSD @@ -1009,7 +1006,6 @@ UncompressedCacheBytes UncompressedCacheCells UnidirectionalEdgeIsValid UniqThetaSketch -unigrams Updatable Uppercased Uptime @@ -1221,6 +1217,7 @@ basename bcrypt benchmarking bfloat +bigrams binlog bitAnd bitCount @@ -1470,6 +1467,7 @@ dbeaver dbgen dbms ddl +deallocated deallocation deallocations debian @@ -1509,11 +1507,11 @@ deserializing destructor destructors detectCharset -detectTonality detectLanguage detectLanguageMixed detectLanguageUnknown detectProgrammingLanguage +detectTonality determinator deterministically dictGet @@ -1529,8 +1527,8 @@ dictIsIn disableProtocols disjunction disjunctions -displaySecretsInShowAndSelect displayName +displaySecretsInShowAndSelect distro divideDecimal dmesg @@ -1580,11 +1578,11 @@ evalMLMethod exFAT expiryMsec exponentialMovingAverage -exponentialmovingaverage exponentialTimeDecayedAvg exponentialTimeDecayedCount exponentialTimeDecayedMax exponentialTimeDecayedSum +exponentialmovingaverage expr exprN extendedVerification @@ -1621,6 +1619,7 @@ firstSignificantSubdomainCustom firstSignificantSubdomainCustomRFC firstSignificantSubdomainRFC fixedstring +flameGraph flamegraph flatbuffers flattenTuple @@ -1803,8 +1802,8 @@ incrementing indexHint indexOf infi -infty inflight +infty initcap initcapUTF initialQueryID @@ -1952,9 +1951,9 @@ loghouse london lookups loongarch -lowcardinality lowCardinalityIndices lowCardinalityKeys +lowcardinality lowerUTF lowercased lttb @@ -2262,9 +2261,9 @@ proleptic prometheus proportionsZTest proto -protocol protobuf protobufsingle +protocol proxied pseudorandom pseudorandomize @@ -2516,6 +2515,7 @@ sqlite sqrt src srcReplicas +sshkey stacktrace stacktraces startsWith @@ -2808,6 +2808,7 @@ unescaping unhex unicode unidimensional +unigrams unintuitive uniq uniqCombined From 74a5d56f1a404a7271b5a177e4902d0c319195d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 1 Jul 2024 19:32:22 +0200 Subject: [PATCH 0251/2361] Fix interpolate and add tests --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 4 +- src/Functions/IFunction.cpp | 6 ++ src/Planner/Planner.cpp | 7 +- src/Planner/PlannerActionsVisitor.cpp | 36 ++++++--- src/Planner/PlannerActionsVisitor.h | 9 ++- ..._no_aggregates_and_constant_keys.reference | 4 +- ...nality_group_by_distributed_plan.reference | 55 +++++++++++++ ..._cardinality_group_by_distributed_plan.sql | 80 +++++++++++++++++++ 8 files changed, 185 insertions(+), 16 deletions(-) create mode 100644 tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.reference create mode 100644 tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.sql diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 8860050c5b9..165256479ce 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -4100,7 +4100,9 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo auto * column_to_interpolate = interpolate_node_typed.getExpression()->as(); if (!column_to_interpolate) - throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found", + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "INTERPOLATE can work only for identifiers, but {} is found", interpolate_node_typed.getExpression()->formatASTForErrorMessage()); auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName(); diff --git a/src/Functions/IFunction.cpp b/src/Functions/IFunction.cpp index 76ae8f33fbd..8b092ba9b6e 100644 --- a/src/Functions/IFunction.cpp +++ b/src/Functions/IFunction.cpp @@ -80,8 +80,14 @@ ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( if (number_full_columns > 0 || number_low_cardinality_columns > 1) { + /// This should not be possible but currently there are multiple tests in CI failing because of it + /// TODO: Fix those cases, then enable this exception +#if 0 throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected low cardinality types found. Low cardinality: {}. Full {}. Const {}", number_low_cardinality_columns, number_full_columns, number_const_columns); +#else + return nullptr; +#endif } else if (number_low_cardinality_columns == 1) { diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 2d42ed73223..93a4ea01ff0 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -742,7 +742,12 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, { auto & interpolate_node_typed = interpolate_node->as(); - PlannerActionsVisitor planner_actions_visitor(planner_context); + PlannerActionsVisitor planner_actions_visitor( + planner_context, + /* use_column_identifier_as_action_node_name_, (default value)*/ true, + /// Prefer the INPUT to CONSTANT nodes (actions must be non constant) + /* prefer_const_column_to_input */ false); + auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag, interpolate_node_typed.getExpression()); if (expression_to_interpolate_expression_nodes.size() != 1) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 1c9553032c2..a199420c9bd 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -485,19 +485,24 @@ public: return node; } - [[nodiscard]] String addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column) + [[nodiscard]] String + addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column, bool prefer_const_column_to_input) { + chassert(column.column != nullptr); auto it = node_name_to_node.find(node_name); - if (it != node_name_to_node.end() && it->second->column) + if (it != node_name_to_node.end() && (!prefer_const_column_to_input || it->second->column)) return {node_name}; if (it != node_name_to_node.end()) { /// There is a node with this name, but it doesn't have a column /// This likely happens because we executed the query until WithMergeableState with a const node in the - /// WHERE clause. As the results of headers are materialized, the column was removed + /// WHERE clause and, as the results of headers are materialized, the column was removed /// Let's add a new column and keep this String dupped_name{node_name + "_dupped"}; + if (node_name_to_node.find(dupped_name) != node_name_to_node.end()) + return dupped_name; + const auto * node = &actions_dag.addColumn(column); node_name_to_node[dupped_name] = node; return dupped_name; @@ -543,9 +548,11 @@ private: class PlannerActionsVisitorImpl { public: - PlannerActionsVisitorImpl(ActionsDAG & actions_dag, + PlannerActionsVisitorImpl( + ActionsDAG & actions_dag, const PlannerContextPtr & planner_context_, - bool use_column_identifier_as_action_node_name_); + bool use_column_identifier_as_action_node_name_, + bool prefer_const_column_to_input_); ActionsDAG::NodeRawConstPtrs visit(QueryTreeNodePtr expression_node); @@ -605,14 +612,18 @@ private: const PlannerContextPtr planner_context; ActionNodeNameHelper action_node_name_helper; bool use_column_identifier_as_action_node_name; + bool prefer_const_column_to_input; }; -PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAG & actions_dag, +PlannerActionsVisitorImpl::PlannerActionsVisitorImpl( + ActionsDAG & actions_dag, const PlannerContextPtr & planner_context_, - bool use_column_identifier_as_action_node_name_) + bool use_column_identifier_as_action_node_name_, + bool prefer_const_column_to_input_) : planner_context(planner_context_) , action_node_name_helper(node_to_node_name, *planner_context, use_column_identifier_as_action_node_name_) , use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_) + , prefer_const_column_to_input(prefer_const_column_to_input_) { actions_stack.emplace_back(actions_dag, nullptr); } @@ -735,7 +746,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi column.type = constant_type; column.column = column.type->createColumnConst(1, constant_literal); - String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column); + String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column, prefer_const_column_to_input); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) @@ -873,7 +884,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma else column.column = std::move(column_set); - String final_name = actions_stack[0].addConstantIfNecessary(column.name, column); + String final_name = actions_stack[0].addConstantIfNecessary(column.name, column, prefer_const_column_to_input); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) @@ -1019,14 +1030,17 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi } -PlannerActionsVisitor::PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_) +PlannerActionsVisitor::PlannerActionsVisitor( + const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_, bool prefer_const_column_to_input_) : planner_context(planner_context_) , use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_) + , prefer_const_column_to_input(prefer_const_column_to_input_) {} ActionsDAG::NodeRawConstPtrs PlannerActionsVisitor::visit(ActionsDAG & actions_dag, QueryTreeNodePtr expression_node) { - PlannerActionsVisitorImpl actions_visitor_impl(actions_dag, planner_context, use_column_identifier_as_action_node_name); + PlannerActionsVisitorImpl actions_visitor_impl( + actions_dag, planner_context, use_column_identifier_as_action_node_name, prefer_const_column_to_input); return actions_visitor_impl.visit(expression_node); } diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index 6bb32047327..4bec2d2bb8a 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -27,11 +27,17 @@ using PlannerContextPtr = std::shared_ptr; * During actions build, there is special handling for following functions: * 1. Aggregate functions are added in actions dag as INPUT nodes. Aggregate functions arguments are not added. * 2. For function `in` and its variants, already collected sets from planner context are used. + * 3. When building actions that use CONSTANT nodes, by default we ignore pre-existing INPUTs if those don't have + * a column (a const column always has a column). This is for compatibility with previous headers. We disable this + * behaviour when we explicitly want to override CONSTANT nodes with the input (resolving InterpolateNode for example) */ class PlannerActionsVisitor { public: - explicit PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_ = true); + explicit PlannerActionsVisitor( + const PlannerContextPtr & planner_context_, + bool use_column_identifier_as_action_node_name_ = true, + bool prefer_const_column_to_input_ = true); /** Add actions necessary to calculate expression node into expression dag. * Necessary actions are not added in actions dag output. @@ -42,6 +48,7 @@ public: private: const PlannerContextPtr planner_context; bool use_column_identifier_as_action_node_name = true; + bool prefer_const_column_to_input = true; }; /** Calculate query tree expression node action dag name and add them into node to name map. diff --git a/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference index 63b8a9d14fc..fc77ed8a241 100644 --- a/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference +++ b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference @@ -8,13 +8,13 @@ 40 41 -0 +41 2 42 2 42 43 -0 +43 11 11 diff --git a/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.reference b/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.reference new file mode 100644 index 00000000000..1508c24f410 --- /dev/null +++ b/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.reference @@ -0,0 +1,55 @@ +-- { echoOn } +SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8))) +FROM system.one +GROUP BY '666'; +6.666.8 +SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8))) +FROM remote('127.0.0.{1,1}', 'system.one') +GROUP BY '666'; +6.666.8 +-- https://github.com/ClickHouse/ClickHouse/issues/63006 +SELECT + 6, + concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a, + concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b +FROM system.one +GROUP BY toNullable(6) + WITH ROLLUP +WITH TOTALS; +6 World666666 \N +6 World666666 \N + +6 World666666 \N +SELECT + 6, + concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a, + concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b +FROM remote('127.0.0.1') +GROUP BY toNullable(6) + WITH ROLLUP + WITH TOTALS; +6 World666666 \N +6 World666666 \N + +6 World666666 \N +-- { echoOn } +SELECT + '%', + tuple(concat('%', 1, toLowCardinality(toLowCardinality(toNullable(materialize(1)))), currentDatabase(), 101., toNullable(13), '%AS%id_02%', toNullable(toNullable(10)), toLowCardinality(toNullable(10)), 10, 10)), + (toDecimal128(99.67, 6), 36, 61, 14) +FROM dist_03174 +WHERE dummy IN (0, '255') +GROUP BY + toNullable(13), + (99.67, 61, toLowCardinality(14)); +% ('%11default10113%AS%id_02%10101010') (99.67,36,61,14) +-- { echoOn } +SELECT + 38, + concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3))) +FROM set_index_not__fuzz_0 +GROUP BY + toNullable(3), + concat(concat(CAST(NULL, 'Nullable(Int8)'), toNullable(3))) +FORMAT Null +SETTINGS max_threads = 1, allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 3, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1; diff --git a/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.sql b/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.sql new file mode 100644 index 00000000000..d397d30e285 --- /dev/null +++ b/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.sql @@ -0,0 +1,80 @@ +-- There are various tests that check that group by keys don't propagate into functions replacing const arguments +-- by full (empty) columns + +DROP TABLE IF EXISTS dist_03174; +DROP TABLE IF EXISTS set_index_not__fuzz_0; + +-- https://github.com/ClickHouse/ClickHouse/issues/63006 + +SET allow_experimental_analyzer=1; + +-- { echoOn } +SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8))) +FROM system.one +GROUP BY '666'; + +SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8))) +FROM remote('127.0.0.{1,1}', 'system.one') +GROUP BY '666'; + +-- https://github.com/ClickHouse/ClickHouse/issues/63006 +SELECT + 6, + concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a, + concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b +FROM system.one +GROUP BY toNullable(6) + WITH ROLLUP +WITH TOTALS; + +SELECT + 6, + concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a, + concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b +FROM remote('127.0.0.1') +GROUP BY toNullable(6) + WITH ROLLUP + WITH TOTALS; + +-- https://github.com/ClickHouse/ClickHouse/issues/64945 +-- { echoOff } +CREATE TABLE dist_03174 AS system.one ENGINE = Distributed(test_cluster_two_shards, system, one, dummy); + +-- { echoOn } +SELECT + '%', + tuple(concat('%', 1, toLowCardinality(toLowCardinality(toNullable(materialize(1)))), currentDatabase(), 101., toNullable(13), '%AS%id_02%', toNullable(toNullable(10)), toLowCardinality(toNullable(10)), 10, 10)), + (toDecimal128(99.67, 6), 36, 61, 14) +FROM dist_03174 +WHERE dummy IN (0, '255') +GROUP BY + toNullable(13), + (99.67, 61, toLowCardinality(14)); + +-- Parallel replicas +-- { echoOff } +CREATE TABLE set_index_not__fuzz_0 +( + `name` String, + `status` Enum8('alive' = 0, 'rip' = 1), + INDEX idx_status status TYPE set(2) GRANULARITY 1 +) +ENGINE = MergeTree() +ORDER BY name; + +INSERT INTO set_index_not__fuzz_0 SELECT * FROM generateRandom() LIMIT 10; + +-- { echoOn } +SELECT + 38, + concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3))) +FROM set_index_not__fuzz_0 +GROUP BY + toNullable(3), + concat(concat(CAST(NULL, 'Nullable(Int8)'), toNullable(3))) +FORMAT Null +SETTINGS max_threads = 1, allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 3, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1; + +-- { echoOff } +DROP TABLE IF EXISTS dist_03174; +DROP TABLE IF EXISTS set_index_not__fuzz_0; From f8a117affc055fc15396374ed8b258b13b5adc3f Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 1 Jul 2024 20:23:39 +0000 Subject: [PATCH 0252/2361] Split the test --- ...efreshable_materialized_views_1.reference} | 30 --- .../02932_refreshable_materialized_views_1.sh | 179 +++++++++++++++++ ...refreshable_materialized_views_2.reference | 30 +++ ...02932_refreshable_materialized_views_2.sh} | 184 ++---------------- 4 files changed, 222 insertions(+), 201 deletions(-) rename tests/queries/0_stateless/{02932_refreshable_materialized_views.reference => 02932_refreshable_materialized_views_1.reference} (66%) create mode 100755 tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh create mode 100644 tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference rename tests/queries/0_stateless/{02932_refreshable_materialized_views.sh => 02932_refreshable_materialized_views_2.sh} (54%) diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference similarity index 66% rename from tests/queries/0_stateless/02932_refreshable_materialized_views.reference rename to tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference index 3c6c8b2d778..bfc6add90a7 100644 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference @@ -30,33 +30,3 @@ CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n( <17: chain-refreshed> b Scheduled 2062-01-01 00:00:00 <18: removed dependency> b Scheduled [] 2062-03-03 03:03:03 2064-01-01 00:00:00 5 CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a -<19: exception> 1 -<20: unexception> 1 -<21: rename> 1 -<22: rename> d Finished -<23: simple refresh> 1 -<24: rename during refresh> 1 -<25: rename during refresh> f Running -<27: cancelled> f Scheduled -<28: drop during refresh> 0 0 -CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 AS x -<29: randomize> 1 1 -CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src -<30: to existing table> 10 -<31: to existing table> 10 -<31: to existing table> 20 -<31.5: will retry> Error 1 -<31.6: did retry> 10 -<32: empty> i Scheduled Unknown 0 -<32: empty> j Scheduled Finished 0 -<34: append> 10 -<35: append> 10 -<35: append> 20 -<35: append> 30 -<36: not append> 20 -<36: not append> 30 -<37: append chain> 100 -<38: append chain> 100 -<38: append chain> 100 -<38: append chain> 200 -creating MergeTree without ORDER BY failed, as expected diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh new file mode 100755 index 00000000000..03d752b995d --- /dev/null +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash +# Tags: atomic-database + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Set session timezone to UTC to make all DateTime formatting and parsing use UTC, because refresh +# scheduling is done in UTC. +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" + +$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" + + +# Basic refreshing. +$CLICKHOUSE_CLIENT -nq " + create materialized view a + refresh after 2 second + engine Memory + empty + as select number as x from numbers(2) union all select rand64() as x; + select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes; + show create a;" +# Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] +do + sleep 0.5 +done +start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" +# Check table contents. +$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" +# Wait for table contents to change. +res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" +while : +do + res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" + [ "$res2" == "$res1" ] || break + sleep 0.5 +done +# Wait for another change. +while : +do + res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" + [ "$res3" == "$res2" ] || break + sleep 0.5 +done +# Check that the two changes were at least 1 second apart, in particular that we're not refreshing +# like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer +# to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. +$CLICKHOUSE_CLIENT -nq " + select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000); + select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" + +# Create a source table from which views will read. +$CLICKHOUSE_CLIENT -nq " + create table src (x Int8) engine Memory as select 1;" + +# Switch to fake clock, change refresh schedule, change query. +$CLICKHOUSE_CLIENT -nq " + system test view a set fake time '2050-01-01 00:00:01'; + system wait view a; + system refresh view a; + system wait view a; + select '<4.1: fake clock>', status, last_refresh_time, next_refresh_time from refreshes; + alter table a modify refresh every 2 year; + alter table a modify query select x*2 as x from src; + select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; + show create a;" +# Advance time to trigger the refresh. +$CLICKHOUSE_CLIENT -nq " + select '<5: no refresh>', count() from a; + system test view a set fake time '2052-02-03 04:05:06';" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<6: refreshed>', * from a; + select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" + +# Create a dependent view, refresh it once. +$CLICKHOUSE_CLIENT -nq " + create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; + show create b; + system test view b set fake time '2052-11-11 11:11:11'; + system refresh view b; + system wait view b; + select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';" +# Next refresh shouldn't start until the dependency refreshes. +$CLICKHOUSE_CLIENT -nq " + select '<8: refreshed>', * from b; + select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; + system test view b set fake time '2054-01-24 23:22:21';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] +do + sleep 0.5 +done + +# Drop the source table, check that refresh fails and doesn't leave a temp table behind. +$CLICKHOUSE_CLIENT -nq " + select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase(); + drop table src; + system refresh view a;" +$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -nq " + select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();" + +# Create the source table again, check that refresh succeeds (in particular that tables are looked +# up by name rather than uuid). +$CLICKHOUSE_CLIENT -nq " + select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes; + create table src (x Int16) engine Memory as select 2; + system test view a set fake time '2054-01-01 00:00:01';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] +do + sleep 0.5 +done +# Both tables should've refreshed. +$CLICKHOUSE_CLIENT -nq " + select '<11: chain-refreshed a>', * from a; + select '<12: chain-refreshed b>', * from b; + select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;" + +# Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to +# catch up to the same cycle. +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2059-01-01 00:00:00'; + system refresh view b;" +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2061-01-01 00:00:00'; + system test view a set fake time '2057-01-01 00:00:00';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] +do + sleep 0.5 +done +sleep 1 +$CLICKHOUSE_CLIENT -nq " + select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; + truncate src; + insert into src values (3); + system test view a set fake time '2060-02-02 02:02:02';" +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<15: chain-refreshed a>', * from a; + select '<16: chain-refreshed b>', * from b; + select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" + +# Get to WaitingForDependencies state and remove the depencency. +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2062-03-03 03:03:03'" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + alter table b modify refresh every 2 year" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; + show create b;" + +$CLICKHOUSE_CLIENT -nq " + drop table src; + drop table a; + drop table b; + drop table refreshes;" diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference new file mode 100644 index 00000000000..eb4a0498260 --- /dev/null +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference @@ -0,0 +1,30 @@ +<19: exception> 1 +<20: unexception> 1 +<21: rename> 1 +<22: rename> d Finished +<23: simple refresh> 1 +<24: rename during refresh> 1 +<25: rename during refresh> f Running +<27: cancelled> f Scheduled +<28: drop during refresh> 0 0 +CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 AS x +<29: randomize> 1 1 +CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src +<30: to existing table> 10 +<31: to existing table> 10 +<31: to existing table> 20 +<31.5: will retry> Error 1 +<31.6: did retry> 10 +<32: empty> i Scheduled Unknown 0 +<32: empty> j Scheduled Finished 0 +<34: append> 10 +<35: append> 10 +<35: append> 20 +<35: append> 30 +<36: not append> 20 +<36: not append> 30 +<37: append chain> 100 +<38: append chain> 100 +<38: append chain> 100 +<38: append chain> 200 +creating MergeTree without ORDER BY failed, as expected diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh similarity index 54% rename from tests/queries/0_stateless/02932_refreshable_materialized_views.sh rename to tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh index d5b07287936..b62ea9fb638 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh @@ -15,172 +15,14 @@ CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_ma $CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" -# Basic refreshing. -$CLICKHOUSE_CLIENT -nq " - create materialized view a - refresh after 2 second - engine Memory - empty - as select number as x from numbers(2) union all select rand64() as x; - select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes; - show create a;" -# Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] -do - sleep 0.1 -done -start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" -# Check table contents. -$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" -# Wait for table contents to change. -res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" -while : -do - res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" - [ "$res2" == "$res1" ] || break - sleep 0.1 -done -# Wait for another change. -while : -do - res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" - [ "$res3" == "$res2" ] || break - sleep 0.1 -done -# Check that the two changes were at least 1 second apart, in particular that we're not refreshing -# like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer -# to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. -$CLICKHOUSE_CLIENT -nq " - select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000); - select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" - -# Create a source table from which views will read. -$CLICKHOUSE_CLIENT -nq " - create table src (x Int8) engine Memory as select 1;" - -# Switch to fake clock, change refresh schedule, change query. -$CLICKHOUSE_CLIENT -nq " - system test view a set fake time '2050-01-01 00:00:01'; - system wait view a; - system refresh view a; - system wait view a; - select '<4.1: fake clock>', status, last_refresh_time, next_refresh_time from refreshes; - alter table a modify refresh every 2 year; - alter table a modify query select x*2 as x from src; - select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; - show create a;" -# Advance time to trigger the refresh. -$CLICKHOUSE_CLIENT -nq " - select '<5: no refresh>', count() from a; - system test view a set fake time '2052-02-03 04:05:06';" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<6: refreshed>', * from a; - select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" - -# Create a dependent view, refresh it once. -$CLICKHOUSE_CLIENT -nq " - create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; - show create b; - system test view b set fake time '2052-11-11 11:11:11'; - system refresh view b; - system wait view b; - select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';" -# Next refresh shouldn't start until the dependency refreshes. -$CLICKHOUSE_CLIENT -nq " - select '<8: refreshed>', * from b; - select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; - system test view b set fake time '2054-01-24 23:22:21';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] -do - sleep 0.1 -done - -# Drop the source table, check that refresh fails and doesn't leave a temp table behind. -$CLICKHOUSE_CLIENT -nq " - select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase(); - drop table src; - system refresh view a;" -$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" -$CLICKHOUSE_CLIENT -nq " - select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();" - -# Create the source table again, check that refresh succeeds (in particular that tables are looked -# up by name rather than uuid). -$CLICKHOUSE_CLIENT -nq " - select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes; - create table src (x Int16) engine Memory as select 2; - system test view a set fake time '2054-01-01 00:00:01';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] -do - sleep 0.1 -done -# Both tables should've refreshed. -$CLICKHOUSE_CLIENT -nq " - select '<11: chain-refreshed a>', * from a; - select '<12: chain-refreshed b>', * from b; - select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;" - -# Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to -# catch up to the same cycle. -$CLICKHOUSE_CLIENT -nq " - system test view b set fake time '2059-01-01 00:00:00'; - system refresh view b;" -while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - system test view b set fake time '2061-01-01 00:00:00'; - system test view a set fake time '2057-01-01 00:00:00';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] -do - sleep 0.1 -done -sleep 1 -$CLICKHOUSE_CLIENT -nq " - select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; - truncate src; - insert into src values (3); - system test view a set fake time '2060-02-02 02:02:02';" -while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<15: chain-refreshed a>', * from a; - select '<16: chain-refreshed b>', * from b; - select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" - -# Get to WaitingForDependencies state and remove the depencency. -$CLICKHOUSE_CLIENT -nq " - system test view b set fake time '2062-03-03 03:03:03'" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - alter table b modify refresh every 2 year" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; - show create b;" - # Select from a table that doesn't exist, get an exception. $CLICKHOUSE_CLIENT -nq " - drop table a; - drop table b; + create table src (x Int8) engine Memory as select 1; create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; drop table src;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Error' ] do - sleep 0.1 + sleep 0.5 done # Check exception, create src, expect successful refresh. $CLICKHOUSE_CLIENT -nq " @@ -189,7 +31,7 @@ $CLICKHOUSE_CLIENT -nq " system refresh view c;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do - sleep 0.1 + sleep 0.5 done # Rename table. $CLICKHOUSE_CLIENT -nq " @@ -207,7 +49,7 @@ $CLICKHOUSE_CLIENT -nq " create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do - sleep 0.1 + sleep 0.5 done # Stop refreshes. $CLICKHOUSE_CLIENT -nq " @@ -215,7 +57,7 @@ $CLICKHOUSE_CLIENT -nq " system stop view e;" while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] do - sleep 0.1 + sleep 0.5 done # Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure # we wait for a slow refresh, not a previous fast one.) @@ -224,7 +66,7 @@ $CLICKHOUSE_CLIENT -nq " system start view e;" while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] do - sleep 0.1 + sleep 0.5 done # Rename. $CLICKHOUSE_CLIENT -nq " @@ -238,7 +80,7 @@ $CLICKHOUSE_CLIENT -nq " system cancel view f;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Cancelled' ] do - sleep 0.1 + sleep 0.5 done # Check that another refresh doesn't immediately start after the cancelled one. sleep 1 @@ -247,7 +89,7 @@ $CLICKHOUSE_CLIENT -nq " system refresh view f;" while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] do - sleep 0.1 + sleep 0.5 done # Drop. $CLICKHOUSE_CLIENT -nq " @@ -262,7 +104,7 @@ $CLICKHOUSE_CLIENT -nq " system test view g set fake time '2050-02-03 15:30:13';" while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] do - sleep 0.1 + sleep 0.5 done $CLICKHOUSE_CLIENT -nq " with '2050-02-10 04:00:00'::DateTime as expected @@ -278,14 +120,14 @@ $CLICKHOUSE_CLIENT -nq " show create h;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] do - sleep 0.1 + sleep 0.5 done $CLICKHOUSE_CLIENT -nq " select '<30: to existing table>', * from dest; insert into src values (2);" while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ] do - sleep 0.1 + sleep 0.5 done $CLICKHOUSE_CLIENT -nq " select '<31: to existing table>', * from dest; @@ -304,7 +146,7 @@ $CLICKHOUSE_CLIENT -nq " drop table src2;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] do - sleep 0.1 + sleep 0.5 done $CLICKHOUSE_CLIENT -nq " select '<31.6: did retry>', x from h2; @@ -316,7 +158,7 @@ $CLICKHOUSE_CLIENT -nq " create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);" while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] do - sleep 0.1 + sleep 0.5 done $CLICKHOUSE_CLIENT -nq " select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view; From fcb01277e143011154c659b9461891e6a28017c7 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 1 Jul 2024 20:25:16 +0000 Subject: [PATCH 0253/2361] July --- src/Core/SettingsChangesHistory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index b0c4f256e45..c20a1dee3c2 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -87,6 +87,7 @@ namespace SettingsChangesHistory static const std::map settings_changes_history = { {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, + {"allow_materialized_view_with_bad_select", true, false, "Stricter validation in CREATE MATERIALIZED VIEW"}, }}, {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, @@ -117,7 +118,6 @@ static const std::map Date: Tue, 2 Jul 2024 03:10:26 +0000 Subject: [PATCH 0254/2361] Fix unrelated setting --- src/Core/SettingsChangesHistory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 2806ccf98dc..c17e6642ac2 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -60,6 +60,7 @@ static std::initializer_list Date: Tue, 2 Jul 2024 05:32:12 +0200 Subject: [PATCH 0255/2361] Fix tests --- src/Databases/DatabaseAtomic.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index bebb645670b..c06fc98d0b9 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -195,6 +195,9 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_ auto & other_db = dynamic_cast(to_database); bool inside_database = this == &other_db; + if (!inside_database) + other_db.createDirectories(); + String old_metadata_path = getObjectMetadataPath(table_name); String new_metadata_path = to_database.getObjectMetadataPath(to_table_name); From 0e559ff7b94c55fe35d0db7174e523180f46e998 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 2 Jul 2024 11:50:43 +0200 Subject: [PATCH 0256/2361] Better name for flag --- src/Planner/Planner.cpp | 2 +- src/Planner/PlannerActionsVisitor.cpp | 26 ++++++++++++++------------ src/Planner/PlannerActionsVisitor.h | 4 ++-- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 93a4ea01ff0..260fbabf26a 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -746,7 +746,7 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, planner_context, /* use_column_identifier_as_action_node_name_, (default value)*/ true, /// Prefer the INPUT to CONSTANT nodes (actions must be non constant) - /* prefer_const_column_to_input */ false); + /* always_use_const_column_for_constant_nodes */ false); auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag, interpolate_node_typed.getExpression()); diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index a199420c9bd..e9e1f4edcc2 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -485,12 +485,12 @@ public: return node; } - [[nodiscard]] String - addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column, bool prefer_const_column_to_input) + [[nodiscard]] String addConstantIfNecessary( + const std::string & node_name, const ColumnWithTypeAndName & column, bool always_use_const_column_for_constant_nodes) { chassert(column.column != nullptr); auto it = node_name_to_node.find(node_name); - if (it != node_name_to_node.end() && (!prefer_const_column_to_input || it->second->column)) + if (it != node_name_to_node.end() && (!always_use_const_column_for_constant_nodes || it->second->column)) return {node_name}; if (it != node_name_to_node.end()) @@ -552,7 +552,7 @@ public: ActionsDAG & actions_dag, const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_, - bool prefer_const_column_to_input_); + bool always_use_const_column_for_constant_nodes_); ActionsDAG::NodeRawConstPtrs visit(QueryTreeNodePtr expression_node); @@ -612,18 +612,18 @@ private: const PlannerContextPtr planner_context; ActionNodeNameHelper action_node_name_helper; bool use_column_identifier_as_action_node_name; - bool prefer_const_column_to_input; + bool always_use_const_column_for_constant_nodes; }; PlannerActionsVisitorImpl::PlannerActionsVisitorImpl( ActionsDAG & actions_dag, const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_, - bool prefer_const_column_to_input_) + bool always_use_const_column_for_constant_nodes_) : planner_context(planner_context_) , action_node_name_helper(node_to_node_name, *planner_context, use_column_identifier_as_action_node_name_) , use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_) - , prefer_const_column_to_input(prefer_const_column_to_input_) + , always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_) { actions_stack.emplace_back(actions_dag, nullptr); } @@ -746,7 +746,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi column.type = constant_type; column.column = column.type->createColumnConst(1, constant_literal); - String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column, prefer_const_column_to_input); + String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column, always_use_const_column_for_constant_nodes); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) @@ -884,7 +884,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma else column.column = std::move(column_set); - String final_name = actions_stack[0].addConstantIfNecessary(column.name, column, prefer_const_column_to_input); + String final_name = actions_stack[0].addConstantIfNecessary(column.name, column, always_use_const_column_for_constant_nodes); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) @@ -1031,16 +1031,18 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi } PlannerActionsVisitor::PlannerActionsVisitor( - const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_, bool prefer_const_column_to_input_) + const PlannerContextPtr & planner_context_, + bool use_column_identifier_as_action_node_name_, + bool always_use_const_column_for_constant_nodes_) : planner_context(planner_context_) , use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_) - , prefer_const_column_to_input(prefer_const_column_to_input_) + , always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_) {} ActionsDAG::NodeRawConstPtrs PlannerActionsVisitor::visit(ActionsDAG & actions_dag, QueryTreeNodePtr expression_node) { PlannerActionsVisitorImpl actions_visitor_impl( - actions_dag, planner_context, use_column_identifier_as_action_node_name, prefer_const_column_to_input); + actions_dag, planner_context, use_column_identifier_as_action_node_name, always_use_const_column_for_constant_nodes); return actions_visitor_impl.visit(expression_node); } diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index 4bec2d2bb8a..1dbd149bc4b 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -37,7 +37,7 @@ public: explicit PlannerActionsVisitor( const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_ = true, - bool prefer_const_column_to_input_ = true); + bool always_use_const_column_for_constant_nodes_ = true); /** Add actions necessary to calculate expression node into expression dag. * Necessary actions are not added in actions dag output. @@ -48,7 +48,7 @@ public: private: const PlannerContextPtr planner_context; bool use_column_identifier_as_action_node_name = true; - bool prefer_const_column_to_input = true; + bool always_use_const_column_for_constant_nodes = true; }; /** Calculate query tree expression node action dag name and add them into node to name map. From 04b8b1e76c467ae527202a75141ac8981a1c4ac5 Mon Sep 17 00:00:00 2001 From: yariks5s Date: Tue, 2 Jul 2024 14:01:19 +0000 Subject: [PATCH 0257/2361] initial commit for Hive-style partitioning --- src/Core/Settings.h | 5 + src/Core/SettingsChangesHistory.h | 5 + .../ObjectStorage/StorageObjectStorage.cpp | 32 ++- .../StorageObjectStorageSource.cpp | 14 +- src/Storages/StorageFile.cpp | 39 +++- src/Storages/StorageURL.cpp | 16 +- src/Storages/VirtualColumnUtils.cpp | 52 ++++- src/Storages/VirtualColumnUtils.h | 7 +- .../__init__.py | 0 .../configs/cluster_azure.xml | 39 ++++ .../configs/cluster_hdfs.xml | 33 +++ .../configs/disable_profilers_azure.xml | 9 + .../configs/macro_hdfs.xml | 5 + .../configs/named_collections_azure.xml | 14 ++ .../configs/schema_cache_azure.xml | 3 + .../configs/schema_cache_hdfs.xml | 3 + .../configs/users_azure.xml | 9 + .../test_azure.py | 204 ++++++++++++++++++ .../test_hdfs.py | 81 +++++++ .../03203_hive_style_partitioning.reference | 96 +++++++++ .../03203_hive_style_partitioning.sh | 93 ++++++++ .../column1=Gordon/sample.parquet | Bin 0 -> 1308 bytes .../column1=Schmidt/sample.parquet | Bin 0 -> 1308 bytes .../column0=Elizabeth/sample.parquet | Bin 0 -> 1308 bytes .../sample.parquet | Bin 0 -> 1308 bytes .../column1=Gordon/sample.parquet | Bin 0 -> 1308 bytes .../column1=Schmidt/sample.parquet | Bin 0 -> 1308 bytes .../coumn0=Elizabeth/sample.parquet | Bin 0 -> 1308 bytes .../sample.parquet | Bin 0 -> 1308 bytes 29 files changed, 749 insertions(+), 10 deletions(-) create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/__init__.py create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_azure.xml create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_hdfs.xml create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/disable_profilers_azure.xml create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/macro_hdfs.xml create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/named_collections_azure.xml create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_azure.xml create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_hdfs.xml create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/users_azure.xml create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py create mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py create mode 100644 tests/queries/0_stateless/03203_hive_style_partitioning.reference create mode 100755 tests/queries/0_stateless/03203_hive_style_partitioning.sh create mode 100644 tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet create mode 100644 tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet create mode 100644 tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/sample.parquet create mode 100644 tests/queries/0_stateless/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet create mode 100644 tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Gordon/sample.parquet create mode 100644 tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Schmidt/sample.parquet create mode 100644 tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/sample.parquet create mode 100644 tests/queries/0_stateless/data_minio/hive_partitioning/non_existing_column=Elizabeth/sample.parquet diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 14fe0924b40..738c0129d2d 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1106,6 +1106,11 @@ class IColumn; M(Bool, input_format_tsv_skip_trailing_empty_lines, false, "Skip trailing empty lines in TSV format", 0) \ M(Bool, input_format_custom_skip_trailing_empty_lines, false, "Skip trailing empty lines in CustomSeparated format", 0) \ M(Bool, input_format_tsv_crlf_end_of_line, false, "If it is set true, file function will read TSV format with \\r\\n instead of \\n.", 0) \ + M(Bool, file_hive_partitioning, false, "Allows to use hive partitioning for file format", 0)\ + M(Bool, url_hive_partitioning, false, "Allows to use hive partitioning for url format", 0)\ + M(Bool, s3_hive_partitioning, false, "Allows to use hive partitioning for s3 format", 0)\ + M(Bool, azure_blob_storage_hive_partitioning, false, "Allows to use hive partitioning for AzureBlobStorage format", 0)\ + M(Bool, hdfs_hive_partitioning, false, "Allows to use hive partitioning for hdfs format", 0)\ \ M(Bool, input_format_native_allow_types_conversion, true, "Allow data types conversion in Native input format", 0) \ \ diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index 4ac25a649b7..dd778149674 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -118,6 +118,11 @@ static const std::map +#include #include #include @@ -32,6 +33,19 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } + +bool checkIfHiveSettingEnabled(const ContextPtr & context, const std::string & storage_type_name) +{ + if (storage_type_name == "s3") + return context->getSettings().s3_hive_partitioning; + else if (storage_type_name == "hdfs") + return context->getSettings().hdfs_hive_partitioning; + else if (storage_type_name == "azure") + return context->getSettings().azure_blob_storage_hive_partitioning; + else + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported storage type: {}", storage_type_name); +} + StorageObjectStorage::StorageObjectStorage( ConfigurationPtr configuration_, ObjectStoragePtr object_storage_, @@ -60,7 +74,23 @@ StorageObjectStorage::StorageObjectStorage( metadata.setConstraints(constraints_); metadata.setComment(comment); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns())); + auto file_iterator = StorageObjectStorageSource::createFileIterator( + configuration, + object_storage, + distributed_processing_, + context, + {}, // predicate + metadata.getColumns().getAll(), // virtual_columns + nullptr, // read_keys + {} // file_progress_callback + ); + + Strings paths; + + if (checkIfHiveSettingEnabled(context, configuration->getTypeName())) + if (auto file = file_iterator->next(0)) + paths = {file->getPath()}; + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), paths)); setInMemoryMetadata(metadata); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index aef783fc3c4..2741cfecf6b 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -13,6 +13,7 @@ #include #include #include +#include namespace fs = std::filesystem; @@ -195,13 +196,24 @@ Chunk StorageObjectStorageSource::generate() const auto & object_info = reader.getObjectInfo(); const auto & filename = object_info->getFileName(); chassert(object_info->metadata); + + auto hive_map = VirtualColumnUtils::parsePartitionMapFromPath(object_info->getPath()); + bool contains_virtual_column = std::any_of(hive_map.begin(), hive_map.end(), + [&](const auto& pair) { + return read_from_format_info.requested_virtual_columns.contains(pair.first); + }); + + if (!contains_virtual_column) + hive_map.clear(); // If we cannot find any virual column in requested, we don't add any of them to chunk + VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( chunk, read_from_format_info.requested_virtual_columns, { .path = getUniqueStoragePathIdentifier(*configuration, *object_info, false), .size = object_info->metadata->size_bytes, .filename = &filename, - .last_modified = object_info->metadata->last_modified + .last_modified = object_info->metadata->last_modified, + .hive_partitioning_map = hive_map }); return chunk; } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 7f39ff615f0..0c32f29cb34 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -1095,7 +1096,11 @@ void StorageFile::setStorageMetadata(CommonArguments args) storage_metadata.setConstraints(args.constraints); storage_metadata.setComment(args.comment); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + + Strings paths_for_virtuals; + if (args.getContext()->getSettingsRef().file_hive_partitioning) + paths_for_virtuals = paths; + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), paths_for_virtuals)); } @@ -1437,6 +1442,15 @@ Chunk StorageFileSource::generate() chunk_size = input_format->getApproxBytesReadForChunk(); progress(num_rows, chunk_size ? chunk_size : chunk.bytes()); + std::map hive_map; + if (getContext()->getSettingsRef().file_hive_partitioning) + { + hive_map = VirtualColumnUtils::parsePartitionMapFromPath(current_path); + + for (const auto& item : hive_map) + requested_virtual_columns.push_back(NameAndTypePair(item.first, std::make_shared())); + } + /// Enrich with virtual columns. VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( chunk, requested_virtual_columns, @@ -1444,7 +1458,8 @@ Chunk StorageFileSource::generate() .path = current_path, .size = current_file_size, .filename = (filename_override.has_value() ? &filename_override.value() : nullptr), - .last_modified = current_file_last_modified + .last_modified = current_file_last_modified, + .hive_partitioning_map = hive_map }); return chunk; @@ -1621,6 +1636,16 @@ void ReadFromFile::createIterator(const ActionsDAG::Node * predicate) storage->distributed_processing); } +void addPartitionColumnsToInfoHeader(Strings paths, ReadFromFormatInfo & info) +{ + for (const auto& path : paths) + { + auto map = VirtualColumnUtils::parsePartitionMapFromPath(path); + for (const auto& item : map) + info.source_header.insertUnique(ColumnWithTypeAndName(std::make_shared(), item.first)); + } +} + void ReadFromFile::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { createIterator(nullptr); @@ -1628,10 +1653,20 @@ void ReadFromFile::initializePipeline(QueryPipelineBuilder & pipeline, const Bui size_t num_streams = max_num_streams; size_t files_to_read = 0; + Strings paths; if (storage->archive_info) + { files_to_read = storage->archive_info->paths_to_archives.size(); + paths = storage->archive_info->paths_to_archives; + } else + { files_to_read = storage->paths.size(); + paths = storage->paths; + } + + if (getContext()->getSettingsRef().file_hive_partitioning) + addPartitionColumnsToInfoHeader(paths, info); if (max_num_streams > files_to_read) num_streams = files_to_read; diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 895da028fc2..f6374701fc2 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -151,7 +152,11 @@ IStorageURLBase::IStorageURLBase( storage_metadata.setConstraints(constraints_); storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + + Strings uri_for_partitioning; + if (context_->getSettingsRef().url_hive_partitioning) + uri_for_partitioning = {uri}; + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), uri_for_partitioning)); } @@ -410,12 +415,17 @@ Chunk StorageURLSource::generate() size_t chunk_size = 0; if (input_format) chunk_size = input_format->getApproxBytesReadForChunk(); + std::map hive_map; + if (getContext()->getSettingsRef().url_hive_partitioning) + hive_map = VirtualColumnUtils::parsePartitionMapFromPath(curr_uri.getPath()); + progress(num_rows, chunk_size ? chunk_size : chunk.bytes()); VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( chunk, requested_virtual_columns, { .path = curr_uri.getPath(), - .size = current_file_size + .size = current_file_size, + .hive_partitioning_map = hive_map }); return chunk; } @@ -1170,6 +1180,7 @@ void ReadFromURL::createIterator(const ActionsDAG::Node * predicate) void ReadFromURL::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { createIterator(nullptr); + const auto & settings = context->getSettingsRef(); if (is_empty_glob) { @@ -1180,7 +1191,6 @@ void ReadFromURL::initializePipeline(QueryPipelineBuilder & pipeline, const Buil Pipes pipes; pipes.reserve(num_streams); - const auto & settings = context->getSettingsRef(); const size_t max_parsing_threads = num_streams >= settings.max_parsing_threads ? 1 : (settings.max_parsing_threads / num_streams); for (size_t i = 0; i < num_streams; ++i) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 778c9e13adb..0b79e3b7a16 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -1,4 +1,3 @@ -#include #include #include #include @@ -37,6 +36,7 @@ #include #include +#include #include #include "Functions/FunctionsLogical.h" #include "Functions/IFunction.h" @@ -115,7 +115,22 @@ NameSet getVirtualNamesForFileLikeStorage() return {"_path", "_file", "_size", "_time"}; } -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns) +Strings parseVirtualColumnNameFromPath(const std::string & path) +{ + std::string pattern = "/([^/]+)=([^/]+)"; + // Map to store the key-value pairs + std::map key_values; + + re2::StringPiece input_piece(path); + std::string key; + Strings result; + while (RE2::FindAndConsume(&input_piece, pattern, &key)) + result.push_back(key); + + return result; +} + +VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, Strings paths) { VirtualColumnsDescription desc; @@ -132,6 +147,13 @@ VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription add_virtual("_size", makeNullable(std::make_shared())); add_virtual("_time", makeNullable(std::make_shared())); + for (const auto& path : paths) + { + auto names = parseVirtualColumnNameFromPath(path); + for (const auto& name : names) + add_virtual("_" + name, std::make_shared(std::make_shared())); + } + return desc; } @@ -178,6 +200,8 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const { if (column.name == "_file" || column.name == "_path") block.insert({column.type->createColumn(), column.type, column.name}); + if (!getVirtualNamesForFileLikeStorage().contains(column.name)) + block.insert({column.type->createColumn(), column.type, column.name}); } block.insert({ColumnUInt64::create(), std::make_shared(), "_idx"}); @@ -189,6 +213,21 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const return block.getByName("_idx").column; } +std::map parsePartitionMapFromPath(const std::string & path) +{ + std::string pattern = "/([^/]+)=([^/]+)"; // Regex to capture key=value pairs + // Map to store the key-value pairs + std::map key_values; + + re2::StringPiece input_piece(path); + std::string key; + std::string value; + while (RE2::FindAndConsume(&input_piece, pattern, &key, &value)) + key_values["_" + key] = value; + + return key_values; +} + void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, VirtualsForFileLikeStorage virtual_values) @@ -226,6 +265,15 @@ void addRequestedFileLikeStorageVirtualsToChunk( else chunk.addColumn(virtual_column.type->createColumnConstWithDefaultValue(chunk.getNumRows())->convertToFullColumnIfConst()); } + else + { + auto it = virtual_values.hive_partitioning_map.find(virtual_column.getNameInStorage()); + if (it != virtual_values.hive_partitioning_map.end()) + { + chunk.addColumn(virtual_column.getTypeInStorage()->createColumnConst(chunk.getNumRows(), it->second)->convertToFullColumnIfConst()); + virtual_values.hive_partitioning_map.erase(it); + } + } } } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index fbfbdd6c6cc..a03d4c7447f 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -6,6 +6,8 @@ #include #include +#include +#include #include @@ -47,7 +49,7 @@ auto extractSingleValueFromBlock(const Block & block, const String & name) } NameSet getVirtualNamesForFileLikeStorage(); -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns); +VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, Strings paths = {}); ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); @@ -74,9 +76,12 @@ struct VirtualsForFileLikeStorage std::optional size { std::nullopt }; const String * filename { nullptr }; std::optional last_modified { std::nullopt }; + std::map hive_partitioning_map; }; +std::map parsePartitionMapFromPath(const std::string & path); + void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, VirtualsForFileLikeStorage virtual_values); diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/__init__.py b/tests/integration/test_hive_style_partitioning_hdfs_azure/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_azure.xml new file mode 100644 index 00000000000..ffa4673c9ee --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_azure.xml @@ -0,0 +1,39 @@ + + + + + + node_0 + 9000 + + + node_1 + 9000 + + + node_2 + 9000 + + + + + + + + node_0 + 9000 + + + + + node_1 + 19000 + + + + + + + simple_cluster + + \ No newline at end of file diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_hdfs.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_hdfs.xml new file mode 100644 index 00000000000..b99b21ea40b --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_hdfs.xml @@ -0,0 +1,33 @@ + + + + + + node1 + 9000 + + + + + node1 + 19000 + + + + + + + + 127.0.0.1 + 9000 + + + + + 127.0.0.2 + 9000 + + + + + diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/disable_profilers_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/disable_profilers_azure.xml new file mode 100644 index 00000000000..a39badbf8ec --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/disable_profilers_azure.xml @@ -0,0 +1,9 @@ + + + + + 0 + 0 + + + diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/macro_hdfs.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/macro_hdfs.xml new file mode 100644 index 00000000000..c2e11b47a5e --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/macro_hdfs.xml @@ -0,0 +1,5 @@ + + + test_cluster_two_shards + + \ No newline at end of file diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/named_collections_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/named_collections_azure.xml new file mode 100644 index 00000000000..bd7f9ff97f1 --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/named_collections_azure.xml @@ -0,0 +1,14 @@ + + + + cont + test_simple_write_named.csv + key UInt64, data String + CSV + + + devstoreaccount1 + Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== + + + diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_azure.xml new file mode 100644 index 00000000000..e2168ecd06d --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_azure.xml @@ -0,0 +1,3 @@ + + 2 + \ No newline at end of file diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_hdfs.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_hdfs.xml new file mode 100644 index 00000000000..37639649b5f --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_hdfs.xml @@ -0,0 +1,3 @@ + + 2 + \ No newline at end of file diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/users_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/users_azure.xml new file mode 100644 index 00000000000..4b6ba057ecb --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/users_azure.xml @@ -0,0 +1,9 @@ + + + + + default + 1 + + + diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py new file mode 100644 index 00000000000..c9b2c9fec2e --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 + +import pytest +import time + +from helpers.cluster import ClickHouseCluster, is_arm +import re + +from azure.storage.blob import BlobServiceClient +from helpers.cluster import ClickHouseCluster, ClickHouseInstance + +if is_arm(): + pytestmark = pytest.mark.skip + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance( + "node", + main_configs=["configs/named_collections_azure.xml", "configs/schema_cache_azure.xml"], + user_configs=["configs/disable_profilers_azure.xml", "configs/users_azure.xml"], + with_azurite=True, + ) + cluster.start() + container_client = cluster.blob_service_client.get_container_client("cont") + container_client.create_container() + yield cluster + finally: + cluster.shutdown() + + +def azure_query( + node, query, expect_error=False, try_num=10, settings={}, query_on_retry=None +): + for i in range(try_num): + try: + if expect_error: + return node.query_and_get_error(query, settings=settings) + else: + return node.query(query, settings=settings) + except Exception as ex: + retriable_errors = [ + "DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response", + "DB::Exception: Azure::Core::Http::TransportException: Connection closed before getting full response or response is less than expected", + "DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response", + "DB::Exception: Azure::Core::Http::TransportException: Error while polling for socket ready read", + "Azure::Core::Http::TransportException, e.what() = Connection was closed by the server while trying to read a response", + "Azure::Core::Http::TransportException, e.what() = Connection closed before getting full response or response is less than expected", + "Azure::Core::Http::TransportException, e.what() = Connection was closed by the server while trying to read a response", + "Azure::Core::Http::TransportException, e.what() = Error while polling for socket ready read", + ] + retry = False + for error in retriable_errors: + if error in str(ex): + retry = True + print(f"Try num: {i}. Having retriable error: {ex}") + time.sleep(i) + break + if not retry or i == try_num - 1: + raise Exception(ex) + if query_on_retry is not None: + node.query(query_on_retry) + continue + + +def get_azure_file_content(filename, port): + container_name = "cont" + connection_string = ( + f"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;" + f"AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;" + f"BlobEndpoint=http://127.0.0.1:{port}/devstoreaccount1;" + ) + blob_service_client = BlobServiceClient.from_connection_string( + str(connection_string) + ) + container_client = blob_service_client.get_container_client(container_name) + blob_client = container_client.get_blob_client(filename) + download_stream = blob_client.download_blob() + return download_stream.readall().decode("utf-8") + + +@pytest.fixture(autouse=True, scope="function") +def delete_all_files(cluster): + port = cluster.env_variables["AZURITE_PORT"] + connection_string = ( + f"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;" + f"AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;" + f"BlobEndpoint=http://127.0.0.1:{port}/devstoreaccount1;" + ) + blob_service_client = BlobServiceClient.from_connection_string(connection_string) + containers = blob_service_client.list_containers() + for container in containers: + container_client = blob_service_client.get_container_client(container) + blob_list = container_client.list_blobs() + for blob in blob_list: + print(blob) + blob_client = container_client.get_blob_client(blob) + blob_client.delete_blob() + + assert len(list(container_client.list_blobs())) == 0 + + yield + + +def test_azure_partitioning_with_one_parameter(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values = f"('Elizabeth', 'Gordon')" + path = "a/column1=Elizabeth/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}')" + ) + assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ + "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth".format( + bucket="cont", max_path=path + ) + ] + + query = ( + f"SELECT column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ + "Gordon" + ] + +def test_azure_partitioning_with_two_parameters(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values_1 = f"('Elizabeth', 'Gordon')" + values_2 = f"('Emilia', 'Gregor')" + path = "a/column1=Elizabeth/column2=Gordon/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ + "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth\tGordon".format( + bucket="cont", max_path=path + ) + ] + + query = ( + f"SELECT column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2;" + ) + assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ + "Elizabeth" + ] + + query = ( + f"SELECT column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2 AND column1=_column1;" + ) + assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ + "Elizabeth" + ] + +def test_azure_partitioning_without_setting(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values_1 = f"('Elizabeth', 'Gordon')" + values_2 = f"('Emilia', 'Gregor')" + path = "a/column1=Elizabeth/column2=Gordon/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + pattern = re.compile(r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL) + + with pytest.raises(Exception, match=pattern): + azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 0}) diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py new file mode 100644 index 00000000000..38641b63960 --- /dev/null +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 + +import pytest + +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster, is_arm +import re + +from helpers.cluster import ClickHouseCluster + +if is_arm(): + pytestmark = pytest.mark.skip + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/macro_hdfs.xml", + "configs/schema_cache_hdfs.xml", + "configs/cluster_hdfs.xml", + ], + with_hdfs=True, +) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + +def test_hdfs_partitioning_with_one_parameter(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data( + f"/column0=Elizabeth/parquet_1", f"Elizabeth\tGordon\n" + ) + assert ( + hdfs_api.read_data(f"/column0=Elizabeth/parquet_1") + == f"Elizabeth\tGordon\n" + ) + + r = node1.query( + "SELECT _column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/parquet_1', 'TSV')", settings={"hdfs_hive_partitioning": 1} + ) + assert (r == f"Elizabeth\n") + +def test_hdfs_partitioning_with_two_parameters(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data( + f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n" + ) + assert ( + hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") + == f"Elizabeth\tGordon\n" + ) + + r = node1.query( + "SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", settings={"hdfs_hive_partitioning": 1} + ) + assert (r == f"Gordon\n") + +def test_hdfs_partitioning_without_setting(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data( + f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n" + ) + assert ( + hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") + == f"Elizabeth\tGordon\n" + ) + pattern = re.compile(r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL) + + with pytest.raises(QueryRuntimeException, match=pattern): + node1.query(f"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", settings={"hdfs_hive_partitioning": 0}) + +if __name__ == "__main__": + cluster.start() + input("Cluster created, press any key to destroy...") + cluster.shutdown() diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference new file mode 100644 index 00000000000..6ef1fcdf652 --- /dev/null +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -0,0 +1,96 @@ +TESTING THE FILE HIVE PARTITIONING +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +Eva Schmidt Elizabeth Schmidt +Samuel Schmidt Elizabeth Schmidt +Eva Schmidt Elizabeth +Samuel Schmidt Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +1 +TESTING THE URL PARTITIONING +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +Eva Schmidt Elizabeth Schmidt +Samuel Schmidt Elizabeth Schmidt +Eva Schmidt Elizabeth +Samuel Schmidt Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +1 +TESTING THE S3 PARTITIONING +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +Eva Schmidt Elizabeth Schmidt +Samuel Schmidt Elizabeth Schmidt +Eva Schmidt Elizabeth +Samuel Schmidt Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +1 diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh new file mode 100755 index 00000000000..a5d4c85a33b --- /dev/null +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" + + +$CLICKHOUSE_LOCAL -n -q """set file_hive_partitioning = 1; + +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; + +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; + +SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" + +$CLICKHOUSE_LOCAL -n -q """set file_hive_partitioning = 0; + +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" + + +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" + + +$CLICKHOUSE_LOCAL -n -q """set url_hive_partitioning = 1; + +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; + +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; + +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _non_existing_column FROM url('http://localhost:11111/test/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" + +$CLICKHOUSE_LOCAL -n -q """set url_hive_partitioning = 0; + +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" + + +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" + + +$CLICKHOUSE_LOCAL -n -q """set s3_hive_partitioning = 1; + +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; + +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; + +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _non_existing_column FROM s3('http://localhost:11111/test/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" + +$CLICKHOUSE_LOCAL -n -q """set s3_hive_partitioning = 0; + +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" + diff --git a/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Gordon/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Gordon/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Schmidt/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Schmidt/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/non_existing_column=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/non_existing_column=Elizabeth/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 From 83462d743e76dcfa8fd35b8b30335682f86d9374 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:13:44 +0200 Subject: [PATCH 0258/2361] enhance SettingsChangesHistory.cpp --- src/Core/SettingsChangesHistory.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index b0725340f46..607f9b6d858 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -59,6 +59,11 @@ static std::initializer_list Date: Tue, 2 Jul 2024 14:43:45 +0000 Subject: [PATCH 0259/2361] style check --- .../ObjectStorage/StorageObjectStorage.cpp | 2 +- .../StorageObjectStorageSource.cpp | 7 +-- .../test_azure.py | 43 +++++++++++++------ .../test_hdfs.py | 32 ++++++++------ .../03203_hive_style_partitioning.sh | 24 +++++++---- 5 files changed, 69 insertions(+), 39 deletions(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index b169f02940e..ae7c211330c 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -86,7 +86,7 @@ StorageObjectStorage::StorageObjectStorage( ); Strings paths; - + if (checkIfHiveSettingEnabled(context, configuration->getTypeName())) if (auto file = file_iterator->next(0)) paths = {file->getPath()}; diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 2741cfecf6b..afb23961312 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -198,13 +198,14 @@ Chunk StorageObjectStorageSource::generate() chassert(object_info->metadata); auto hive_map = VirtualColumnUtils::parsePartitionMapFromPath(object_info->getPath()); - bool contains_virtual_column = std::any_of(hive_map.begin(), hive_map.end(), - [&](const auto& pair) { + bool contains_virtual_column = std::any_of(hive_map.begin(), hive_map.end(), + [&](const auto& pair) + { return read_from_format_info.requested_virtual_columns.contains(pair.first); }); if (!contains_virtual_column) - hive_map.clear(); // If we cannot find any virual column in requested, we don't add any of them to chunk + hive_map.clear(); // If we cannot find any virtual column in requested, we don't add any of them to chunk VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( chunk, read_from_format_info.requested_virtual_columns, diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py index c9b2c9fec2e..0be697821f0 100644 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py @@ -12,14 +12,21 @@ from helpers.cluster import ClickHouseCluster, ClickHouseInstance if is_arm(): pytestmark = pytest.mark.skip + @pytest.fixture(scope="module") def cluster(): try: cluster = ClickHouseCluster(__file__) cluster.add_instance( "node", - main_configs=["configs/named_collections_azure.xml", "configs/schema_cache_azure.xml"], - user_configs=["configs/disable_profilers_azure.xml", "configs/users_azure.xml"], + main_configs=[ + "configs/named_collections_azure.xml", + "configs/schema_cache_azure.xml", + ], + user_configs=[ + "configs/disable_profilers_azure.xml", + "configs/users_azure.xml", + ], with_azurite=True, ) cluster.start() @@ -121,7 +128,9 @@ def test_azure_partitioning_with_one_parameter(cluster): f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " f"blob_path='{path}', format='CSV', structure='{table_format}')" ) - assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == [ "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth".format( bucket="cont", max_path=path ) @@ -132,9 +141,10 @@ def test_azure_partitioning_with_one_parameter(cluster): f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" ) - assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ - "Gordon" - ] + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == ["Gordon"] + def test_azure_partitioning_with_two_parameters(cluster): # type: (ClickHouseCluster) -> None @@ -155,7 +165,9 @@ def test_azure_partitioning_with_two_parameters(cluster): f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" ) - assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == [ "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth\tGordon".format( bucket="cont", max_path=path ) @@ -166,18 +178,19 @@ def test_azure_partitioning_with_two_parameters(cluster): f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2;" ) - assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ - "Elizabeth" - ] + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == ["Elizabeth"] query = ( f"SELECT column1 FROM azureBlobStorage(azure_conf2, " f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2 AND column1=_column1;" ) - assert azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 1}).splitlines() == [ - "Elizabeth" - ] + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == ["Elizabeth"] + def test_azure_partitioning_without_setting(cluster): # type: (ClickHouseCluster) -> None @@ -198,7 +211,9 @@ def test_azure_partitioning_without_setting(cluster): f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" ) - pattern = re.compile(r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL) + pattern = re.compile( + r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL + ) with pytest.raises(Exception, match=pattern): azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 0}) diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py index 38641b63960..4667d18688a 100644 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py +++ b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py @@ -31,20 +31,18 @@ def started_cluster(): finally: cluster.shutdown() + def test_hdfs_partitioning_with_one_parameter(started_cluster): hdfs_api = started_cluster.hdfs_api - hdfs_api.write_data( - f"/column0=Elizabeth/parquet_1", f"Elizabeth\tGordon\n" - ) - assert ( - hdfs_api.read_data(f"/column0=Elizabeth/parquet_1") - == f"Elizabeth\tGordon\n" - ) + hdfs_api.write_data(f"/column0=Elizabeth/parquet_1", f"Elizabeth\tGordon\n") + assert hdfs_api.read_data(f"/column0=Elizabeth/parquet_1") == f"Elizabeth\tGordon\n" r = node1.query( - "SELECT _column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/parquet_1', 'TSV')", settings={"hdfs_hive_partitioning": 1} + "SELECT _column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/parquet_1', 'TSV')", + settings={"hdfs_hive_partitioning": 1}, ) - assert (r == f"Elizabeth\n") + assert r == f"Elizabeth\n" + def test_hdfs_partitioning_with_two_parameters(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -57,9 +55,11 @@ def test_hdfs_partitioning_with_two_parameters(started_cluster): ) r = node1.query( - "SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", settings={"hdfs_hive_partitioning": 1} + "SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", + settings={"hdfs_hive_partitioning": 1}, ) - assert (r == f"Gordon\n") + assert r == f"Gordon\n" + def test_hdfs_partitioning_without_setting(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -70,10 +70,16 @@ def test_hdfs_partitioning_without_setting(started_cluster): hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") == f"Elizabeth\tGordon\n" ) - pattern = re.compile(r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL) + pattern = re.compile( + r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL + ) with pytest.raises(QueryRuntimeException, match=pattern): - node1.query(f"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", settings={"hdfs_hive_partitioning": 0}) + node1.query( + f"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", + settings={"hdfs_hive_partitioning": 0}, + ) + if __name__ == "__main__": cluster.start() diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index a5d4c85a33b..83a8f87a813 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -8,7 +8,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" -$CLICKHOUSE_LOCAL -n -q """set file_hive_partitioning = 1; +$CLICKHOUSE_LOCAL -n -q """ +set file_hive_partitioning = 1; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -31,13 +32,15 @@ SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=*/sample.pa $CLICKHOUSE_LOCAL -n -q """set file_hive_partitioning = 0; -SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" -$CLICKHOUSE_LOCAL -n -q """set url_hive_partitioning = 1; +$CLICKHOUSE_LOCAL -n -q """ +set url_hive_partitioning = 1; SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -60,13 +63,15 @@ SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=*/ $CLICKHOUSE_LOCAL -n -q """set url_hive_partitioning = 0; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" +SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" -$CLICKHOUSE_LOCAL -n -q """set s3_hive_partitioning = 1; +$CLICKHOUSE_LOCAL -n -q """ +set s3_hive_partitioning = 1; SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -85,9 +90,12 @@ SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/c SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; SELECT *, _non_existing_column FROM s3('http://localhost:11111/test/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=*/sample.parquet') WHERE column0 = _column0; +""" -$CLICKHOUSE_LOCAL -n -q """set s3_hive_partitioning = 0; +$CLICKHOUSE_LOCAL -n -q """ +set s3_hive_partitioning = 0; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10;""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" +SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" From cd5cdcc124f95204a6f63e8a1ce4d7148d8fec7f Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 2 Jul 2024 17:00:59 +0200 Subject: [PATCH 0260/2361] Shellcheck fix --- tests/queries/0_stateless/03203_hive_style_partitioning.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 83a8f87a813..98c039f3454 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -30,7 +30,8 @@ SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/c SELECT *, _non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" -$CLICKHOUSE_LOCAL -n -q """set file_hive_partitioning = 0; +$CLICKHOUSE_LOCAL -n -q """ +set file_hive_partitioning = 0; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" @@ -61,7 +62,8 @@ SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=El SELECT *, _non_existing_column FROM url('http://localhost:11111/test/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" -$CLICKHOUSE_LOCAL -n -q """set url_hive_partitioning = 0; +$CLICKHOUSE_LOCAL -n -q """ +set url_hive_partitioning = 0; SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" @@ -98,4 +100,3 @@ set s3_hive_partitioning = 0; SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" - From dc9dc1676d8f8af74c20173927c6027623cc788c Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 2 Jul 2024 17:37:47 +0200 Subject: [PATCH 0261/2361] add default for map in VirtualsForFileLikeStorage --- src/Storages/VirtualColumnUtils.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index a03d4c7447f..f9b49cc48ed 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -76,7 +76,7 @@ struct VirtualsForFileLikeStorage std::optional size { std::nullopt }; const String * filename { nullptr }; std::optional last_modified { std::nullopt }; - std::map hive_partitioning_map; + std::map hive_partitioning_map {}; }; From 9be404c9c7b1e14410928a4aef8396664d1e364e Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 2 Jul 2024 18:02:57 +0000 Subject: [PATCH 0262/2361] Fix another case. --- src/Planner/PlannerJoinTree.cpp | 2 +- src/Storages/StorageMerge.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 16b5e363bfd..604d3366484 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -1383,11 +1383,11 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ if (join_clauses_and_actions.mixed_join_expressions_actions) { - left_join_tree_query_plan.actions_dags.push_back(join_clauses_and_actions.mixed_join_expressions_actions.get()); ExpressionActionsPtr & mixed_join_expression = table_join->getMixedJoinExpression(); mixed_join_expression = std::make_shared( std::move(join_clauses_and_actions.mixed_join_expressions_actions), ExpressionActionsSettings::fromContext(planner_context->getQueryContext())); + left_join_tree_query_plan.actions_dags.push_back(&mixed_join_expression->getActionsDAG()); } } else if (join_node.isUsingJoinExpression()) diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 5c5ce4ecc2e..c3fdad3a8f2 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1240,7 +1240,7 @@ ReadFromMerge::RowPolicyData::RowPolicyData(RowPolicyFilterPtr row_policy_filter auto expression_analyzer = ExpressionAnalyzer{expr, syntax_result, local_context}; actions_dag = expression_analyzer.getActionsDAG(false /* add_aliases */, false /* project_result */); - filter_actions = std::make_shared(std::move(actions_dag), + filter_actions = std::make_shared(ActionsDAG::clone(actions_dag), ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); const auto & required_columns = filter_actions->getRequiredColumnsWithTypes(); const auto & sample_block_columns = filter_actions->getSampleBlock().getNamesAndTypesList(); From 57818990f201562d0b6938c1b8de78d16bac471f Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 2 Jul 2024 20:41:08 +0000 Subject: [PATCH 0263/2361] fix the test --- src/Access/ContextAccess.cpp | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 8ff1fc8ed21..f534c334318 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -618,6 +618,22 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg if (!granted) { + auto access_denied_no_grant = [&](AccessFlags access_flags, FmtArgs && ...fmt_args) + { + if (grant_option && acs->isGranted(access_flags, fmt_args...)) + { + return access_denied(ErrorCodes::ACCESS_DENIED, + "{}: Not enough privileges. " + "The required privileges have been granted, but without grant option. " + "To execute this query, it's necessary to have the grant {} WITH GRANT OPTION", + AccessRightsElement{access_flags, fmt_args...}.toStringWithoutOptions()); + } + + return access_denied(ErrorCodes::ACCESS_DENIED, + "{}: Not enough privileges. To execute this query, it's necessary to have the grant {}", + AccessRightsElement{access_flags, fmt_args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : "")); + }; + /// As we check the SOURCES from the Table Engine logic, direct prompt about Table Engine would be misleading /// since SOURCES is not granted actually. In order to solve this, turn the prompt logic back to Sources. if (flags & AccessType::TABLE_ENGINE && !access_control->doesTableEnginesRequireGrant()) @@ -635,8 +651,9 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg break; } + /// Might happen in the case of grant Table Engine on A (but not source), then revoke A. if (new_flags.isEmpty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Didn't find the target Source from the Table Engine"); + return access_denied_no_grant(flags, args...); if (grant_option && acs->isGranted(flags, args...)) { @@ -652,18 +669,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg AccessRightsElement{new_flags}.toStringForAccessTypeSource() + (grant_option ? " WITH GRANT OPTION" : "")); } - if (grant_option && acs->isGranted(flags, args...)) - { - return access_denied(ErrorCodes::ACCESS_DENIED, - "{}: Not enough privileges. " - "The required privileges have been granted, but without grant option. " - "To execute this query, it's necessary to have the grant {} WITH GRANT OPTION", - AccessRightsElement{flags, args...}.toStringWithoutOptions()); - } - - return access_denied(ErrorCodes::ACCESS_DENIED, - "{}: Not enough privileges. To execute this query, it's necessary to have the grant {}", - AccessRightsElement{flags, args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : "")); + return access_denied_no_grant(flags, args...); } struct PrecalculatedFlags From 9df30e4a0fe501c8e5eb1815e7a01c73d0c37ad2 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 3 Jul 2024 02:02:24 +0000 Subject: [PATCH 0264/2361] Fix 'Not-ready Set is passed' in system tables --- src/Interpreters/PreparedSets.h | 15 ++++++++++--- src/Planner/Planner.cpp | 2 +- src/Storages/RocksDB/StorageSystemRocksDB.cpp | 8 +++++++ src/Storages/RocksDB/StorageSystemRocksDB.h | 1 + src/Storages/StorageMergeTreeIndex.cpp | 22 ++++++++++++++----- src/Storages/StorageMergeTreeIndex.h | 2 +- .../System/IStorageSystemOneBlock.cpp | 18 ++++++++++++--- src/Storages/System/IStorageSystemOneBlock.h | 8 +++++++ src/Storages/System/StorageSystemColumns.cpp | 20 +++++++++++++---- .../StorageSystemDataSkippingIndices.cpp | 17 +++++++++++--- .../System/StorageSystemDatabases.cpp | 8 +++++++ src/Storages/System/StorageSystemDatabases.h | 1 + .../System/StorageSystemDistributionQueue.cpp | 7 ++++++ .../System/StorageSystemDistributionQueue.h | 1 + .../System/StorageSystemMutations.cpp | 7 ++++++ src/Storages/System/StorageSystemMutations.h | 1 + .../StorageSystemPartMovesBetweenShards.cpp | 8 +++++++ .../StorageSystemPartMovesBetweenShards.h | 1 + src/Storages/System/StorageSystemReplicas.cpp | 19 +++++++++++++--- .../System/StorageSystemReplicationQueue.cpp | 8 +++++++ .../System/StorageSystemReplicationQueue.h | 1 + src/Storages/VirtualColumnUtils.h | 9 ++++++++ .../0_stateless/02841_not_ready_set_bug.sh | 17 ++++++++++++++ 23 files changed, 177 insertions(+), 24 deletions(-) diff --git a/src/Interpreters/PreparedSets.h b/src/Interpreters/PreparedSets.h index bf99a8ece3c..a6aee974d0e 100644 --- a/src/Interpreters/PreparedSets.h +++ b/src/Interpreters/PreparedSets.h @@ -90,9 +90,18 @@ private: using FutureSetFromTuplePtr = std::shared_ptr; -/// Set from subquery can be built inplace for PK or in CreatingSet step. -/// If use_index_for_in_with_subqueries_max_values is reached, set for PK won't be created, -/// but ordinary set would be created instead. +/// Set from subquery can be filled (by running the subquery) in one of two ways: +/// 1. During query analysis. Specifically, inside `SourceStepWithFilter::applyFilters()`. +/// Useful if the query plan depends on the set contents, e.g. to determine which files to read. +/// 2. During query execution. This is the preferred way. +/// Sets are created by CreatingSetStep, which runs before other steps. +/// Be careful: to build the set during query analysis, the `buildSetInplace()` call must happen +/// inside `SourceStepWithFilter::applyFilters()`. Calling it later, e.g. from `initializePipeline()` +/// will result in LOGICAL_ERROR "Not-ready Set is passed" (because a CreatingSetStep was already +/// added to pipeline but hasn't executed yet). +/// +/// If use_index_for_in_with_subqueries_max_values is reached, the built set won't be suitable for +/// key analysis, but will work with function IN (the set will contain only hashes of elements). class FutureSetFromSubquery final : public FutureSet { public: diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 2d42ed73223..8d855b8e619 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1103,7 +1103,7 @@ void addBuildSubqueriesForSetsStepIfNeeded( auto query_tree = subquery->detachQueryTree(); auto subquery_options = select_query_options.subquery(); /// I don't know if this is a good decision, - /// But for now it is done in the same way as in old analyzer. + /// but for now it is done in the same way as in old analyzer. /// This would not ignore limits for subqueries (affects mutations only). /// See test_build_sets_from_multiple_threads-analyzer. subquery_options.ignore_limits = false; diff --git a/src/Storages/RocksDB/StorageSystemRocksDB.cpp b/src/Storages/RocksDB/StorageSystemRocksDB.cpp index 5105b190fd9..b6cd58f58cc 100644 --- a/src/Storages/RocksDB/StorageSystemRocksDB.cpp +++ b/src/Storages/RocksDB/StorageSystemRocksDB.cpp @@ -40,6 +40,14 @@ ColumnsDescription StorageSystemRocksDB::getColumnsDescription() } +Block StorageSystemRocksDB::getFilterSampleBlock() const +{ + return { + { {}, std::make_shared(), "database" }, + { {}, std::make_shared(), "table" }, + }; +} + void StorageSystemRocksDB::fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const { const auto access = context->getAccess(); diff --git a/src/Storages/RocksDB/StorageSystemRocksDB.h b/src/Storages/RocksDB/StorageSystemRocksDB.h index ec351c75446..be3bfaa860c 100644 --- a/src/Storages/RocksDB/StorageSystemRocksDB.h +++ b/src/Storages/RocksDB/StorageSystemRocksDB.h @@ -22,6 +22,7 @@ protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const override; + Block getFilterSampleBlock() const override; }; } diff --git a/src/Storages/StorageMergeTreeIndex.cpp b/src/Storages/StorageMergeTreeIndex.cpp index 0b1ad02f8c9..90d01d356e9 100644 --- a/src/Storages/StorageMergeTreeIndex.cpp +++ b/src/Storages/StorageMergeTreeIndex.cpp @@ -275,7 +275,7 @@ public: private: std::shared_ptr storage; Poco::Logger * log; - const ActionsDAG::Node * predicate = nullptr; + ActionsDAGPtr virtual_columns_filter; }; void ReadFromMergeTreeIndex::applyFilters(ActionDAGNodes added_filter_nodes) @@ -283,7 +283,17 @@ void ReadFromMergeTreeIndex::applyFilters(ActionDAGNodes added_filter_nodes) SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); + { + Block block_to_filter + { + { {}, std::make_shared(), StorageMergeTreeIndex::part_name_column.name }, + }; + + virtual_columns_filter = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); + + if (virtual_columns_filter) + VirtualColumnUtils::buildSetsForDAG(virtual_columns_filter, context); + } } void StorageMergeTreeIndex::read( @@ -335,7 +345,7 @@ void StorageMergeTreeIndex::read( void ReadFromMergeTreeIndex::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - auto filtered_parts = storage->getFilteredDataParts(predicate, context); + auto filtered_parts = storage->getFilteredDataParts(virtual_columns_filter, context); LOG_DEBUG(log, "Reading index{}from {} parts of table {}", storage->with_marks ? " with marks " : " ", @@ -345,9 +355,9 @@ void ReadFromMergeTreeIndex::initializePipeline(QueryPipelineBuilder & pipeline, pipeline.init(Pipe(std::make_shared(getOutputStream().header, storage->key_sample_block, std::move(filtered_parts), context, storage->with_marks))); } -MergeTreeData::DataPartsVector StorageMergeTreeIndex::getFilteredDataParts(const ActionsDAG::Node * predicate, const ContextPtr & context) const +MergeTreeData::DataPartsVector StorageMergeTreeIndex::getFilteredDataParts(ActionsDAGPtr virtual_columns_filter, const ContextPtr & context) const { - if (!predicate) + if (!virtual_columns_filter) return data_parts; auto all_part_names = ColumnString::create(); @@ -355,7 +365,7 @@ MergeTreeData::DataPartsVector StorageMergeTreeIndex::getFilteredDataParts(const all_part_names->insert(part->name); Block filtered_block{{std::move(all_part_names), std::make_shared(), part_name_column.name}}; - VirtualColumnUtils::filterBlockWithPredicate(predicate, filtered_block, context); + VirtualColumnUtils::filterBlockWithDAG(virtual_columns_filter, filtered_block, context); if (!filtered_block.rows()) return {}; diff --git a/src/Storages/StorageMergeTreeIndex.h b/src/Storages/StorageMergeTreeIndex.h index a1fb61d5a56..652a2d6eeaf 100644 --- a/src/Storages/StorageMergeTreeIndex.h +++ b/src/Storages/StorageMergeTreeIndex.h @@ -36,7 +36,7 @@ public: private: friend class ReadFromMergeTreeIndex; - MergeTreeData::DataPartsVector getFilteredDataParts(const ActionsDAG::Node * predicate, const ContextPtr & context) const; + MergeTreeData::DataPartsVector getFilteredDataParts(ActionsDAGPtr virtual_columns_filter, const ContextPtr & context) const; StoragePtr source_table; bool with_marks; diff --git a/src/Storages/System/IStorageSystemOneBlock.cpp b/src/Storages/System/IStorageSystemOneBlock.cpp index 456b7c4f90b..7cde31905aa 100644 --- a/src/Storages/System/IStorageSystemOneBlock.cpp +++ b/src/Storages/System/IStorageSystemOneBlock.cpp @@ -5,6 +5,7 @@ // #include #include #include +#include #include #include #include @@ -44,7 +45,7 @@ public: private: std::shared_ptr storage; std::vector columns_mask; - const ActionsDAG::Node * predicate = nullptr; + ActionsDAGPtr filter; }; void IStorageSystemOneBlock::read( @@ -81,6 +82,7 @@ void ReadFromSystemOneBlock::initializePipeline(QueryPipelineBuilder & pipeline, { const auto & sample_block = getOutputStream().header; MutableColumns res_columns = sample_block.cloneEmptyColumns(); + auto predicate = filter ? filter->getOutputs().at(0) : nullptr; storage->fillData(res_columns, context, predicate, std::move(columns_mask)); UInt64 num_rows = res_columns.at(0)->size(); @@ -93,8 +95,18 @@ void ReadFromSystemOneBlock::applyFilters(ActionDAGNodes added_filter_nodes) { SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); - if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); + if (!filter_actions_dag) + return; + + Block sample = storage->getFilterSampleBlock(); + if (sample.columns() == 0) + return; + + filter = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &sample); + + /// Must prepare sets here, initializePipeline() would be too late, see comment on FutureSetFromSubquery. + if (filter) + VirtualColumnUtils::buildSetsForDAG(filter, context); } } diff --git a/src/Storages/System/IStorageSystemOneBlock.h b/src/Storages/System/IStorageSystemOneBlock.h index a20434fd97e..a47875c2537 100644 --- a/src/Storages/System/IStorageSystemOneBlock.h +++ b/src/Storages/System/IStorageSystemOneBlock.h @@ -22,8 +22,16 @@ class Context; class IStorageSystemOneBlock : public IStorage { protected: + /// If this method uses `predicate`, getFilterSampleBlock() must list all columns to which + /// it's applied. (Otherwise there'll be a LOGICAL_ERROR "Not-ready Set is passed" on subqueries.) virtual void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector columns_mask) const = 0; + /// Columns to which fillData() applies the `predicate`. + virtual Block getFilterSampleBlock() const + { + return {}; + } + virtual bool supportsColumnsMask() const { return false; } friend class ReadFromSystemOneBlock; diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 8dd8d3b6154..9502a7ee2a3 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -337,7 +337,7 @@ private: std::shared_ptr storage; std::vector columns_mask; const size_t max_block_size; - const ActionsDAG::Node * predicate = nullptr; + ActionsDAGPtr virtual_columns_filter; }; void ReadFromSystemColumns::applyFilters(ActionDAGNodes added_filter_nodes) @@ -345,7 +345,17 @@ void ReadFromSystemColumns::applyFilters(ActionDAGNodes added_filter_nodes) SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); + { + Block block_to_filter; + block_to_filter.insert(ColumnWithTypeAndName(ColumnString::create(), std::make_shared(), "database")); + block_to_filter.insert(ColumnWithTypeAndName(ColumnString::create(), std::make_shared(), "table")); + + virtual_columns_filter = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); + + /// Must prepare sets here, initializePipeline() would be too late, see comment on FutureSetFromSubquery. + if (virtual_columns_filter) + VirtualColumnUtils::buildSetsForDAG(virtual_columns_filter, context); + } } void StorageSystemColumns::read( @@ -407,7 +417,8 @@ void ReadFromSystemColumns::initializePipeline(QueryPipelineBuilder & pipeline, block_to_filter.insert(ColumnWithTypeAndName(std::move(database_column_mut), std::make_shared(), "database")); /// Filter block with `database` column. - VirtualColumnUtils::filterBlockWithPredicate(predicate, block_to_filter, context); + if (virtual_columns_filter) + VirtualColumnUtils::filterBlockWithPredicate(virtual_columns_filter->getOutputs().at(0), block_to_filter, context); if (!block_to_filter.rows()) { @@ -455,7 +466,8 @@ void ReadFromSystemColumns::initializePipeline(QueryPipelineBuilder & pipeline, } /// Filter block with `database` and `table` columns. - VirtualColumnUtils::filterBlockWithPredicate(predicate, block_to_filter, context); + if (virtual_columns_filter) + VirtualColumnUtils::filterBlockWithDAG(virtual_columns_filter, block_to_filter, context); if (!block_to_filter.rows()) { diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.cpp b/src/Storages/System/StorageSystemDataSkippingIndices.cpp index 093adc59cc6..a6bba44e257 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.cpp +++ b/src/Storages/System/StorageSystemDataSkippingIndices.cpp @@ -214,7 +214,7 @@ private: std::shared_ptr storage; std::vector columns_mask; const size_t max_block_size; - const ActionsDAG::Node * predicate = nullptr; + ActionsDAGPtr virtual_columns_filter; }; void ReadFromSystemDataSkippingIndices::applyFilters(ActionDAGNodes added_filter_nodes) @@ -222,7 +222,17 @@ void ReadFromSystemDataSkippingIndices::applyFilters(ActionDAGNodes added_filter SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); + { + Block block_to_filter + { + { ColumnString::create(), std::make_shared(), "database" }, + }; + + virtual_columns_filter = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); + + if (virtual_columns_filter) + VirtualColumnUtils::buildSetsForDAG(virtual_columns_filter, context); + } } void StorageSystemDataSkippingIndices::read( @@ -268,7 +278,8 @@ void ReadFromSystemDataSkippingIndices::initializePipeline(QueryPipelineBuilder /// Condition on "database" in a query acts like an index. Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; - VirtualColumnUtils::filterBlockWithPredicate(predicate, block, context); + if (virtual_columns_filter) + VirtualColumnUtils::filterBlockWithDAG(virtual_columns_filter, block, context); ColumnPtr & filtered_databases = block.getByPosition(0).column; pipeline.init(Pipe(std::make_shared( diff --git a/src/Storages/System/StorageSystemDatabases.cpp b/src/Storages/System/StorageSystemDatabases.cpp index 1dbb187c418..0585506a661 100644 --- a/src/Storages/System/StorageSystemDatabases.cpp +++ b/src/Storages/System/StorageSystemDatabases.cpp @@ -73,6 +73,14 @@ static String getEngineFull(const ContextPtr & ctx, const DatabasePtr & database return engine_full; } +Block StorageSystemDatabases::getFilterSampleBlock() const +{ + return { + { {}, std::make_shared(), "engine" }, + { {}, std::make_shared(), "uuid" }, + }; +} + static ColumnPtr getFilteredDatabases(const Databases & databases, const ActionsDAG::Node * predicate, ContextPtr context) { MutableColumnPtr name_column = ColumnString::create(); diff --git a/src/Storages/System/StorageSystemDatabases.h b/src/Storages/System/StorageSystemDatabases.h index fa55f0aea32..d10b350435b 100644 --- a/src/Storages/System/StorageSystemDatabases.h +++ b/src/Storages/System/StorageSystemDatabases.h @@ -27,6 +27,7 @@ protected: bool supportsColumnsMask() const override { return true; } void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector columns_mask) const override; + Block getFilterSampleBlock() const override; }; } diff --git a/src/Storages/System/StorageSystemDistributionQueue.cpp b/src/Storages/System/StorageSystemDistributionQueue.cpp index e2058448904..dab318a9c1c 100644 --- a/src/Storages/System/StorageSystemDistributionQueue.cpp +++ b/src/Storages/System/StorageSystemDistributionQueue.cpp @@ -107,6 +107,13 @@ ColumnsDescription StorageSystemDistributionQueue::getColumnsDescription() }; } +Block StorageSystemDistributionQueue::getFilterSampleBlock() const +{ + return { + { {}, std::make_shared(), "database" }, + { {}, std::make_shared(), "table" }, + }; +} void StorageSystemDistributionQueue::fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const { diff --git a/src/Storages/System/StorageSystemDistributionQueue.h b/src/Storages/System/StorageSystemDistributionQueue.h index 159a86bf082..27d777a4762 100644 --- a/src/Storages/System/StorageSystemDistributionQueue.h +++ b/src/Storages/System/StorageSystemDistributionQueue.h @@ -22,6 +22,7 @@ protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const override; + Block getFilterSampleBlock() const override; }; } diff --git a/src/Storages/System/StorageSystemMutations.cpp b/src/Storages/System/StorageSystemMutations.cpp index 94656008029..df9a71310e5 100644 --- a/src/Storages/System/StorageSystemMutations.cpp +++ b/src/Storages/System/StorageSystemMutations.cpp @@ -46,6 +46,13 @@ ColumnsDescription StorageSystemMutations::getColumnsDescription() }; } +Block StorageSystemMutations::getFilterSampleBlock() const +{ + return { + { {}, std::make_shared(), "database" }, + { {}, std::make_shared(), "table" }, + }; +} void StorageSystemMutations::fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const { diff --git a/src/Storages/System/StorageSystemMutations.h b/src/Storages/System/StorageSystemMutations.h index c60157cd853..5341838a65e 100644 --- a/src/Storages/System/StorageSystemMutations.h +++ b/src/Storages/System/StorageSystemMutations.h @@ -22,6 +22,7 @@ protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const override; + Block getFilterSampleBlock() const override; }; } diff --git a/src/Storages/System/StorageSystemPartMovesBetweenShards.cpp b/src/Storages/System/StorageSystemPartMovesBetweenShards.cpp index 9cba92bca12..ab74b205a96 100644 --- a/src/Storages/System/StorageSystemPartMovesBetweenShards.cpp +++ b/src/Storages/System/StorageSystemPartMovesBetweenShards.cpp @@ -43,6 +43,14 @@ ColumnsDescription StorageSystemPartMovesBetweenShards::getColumnsDescription() } +Block StorageSystemPartMovesBetweenShards::getFilterSampleBlock() const +{ + return { + { {}, std::make_shared(), "database" }, + { {}, std::make_shared(), "table" }, + }; +} + void StorageSystemPartMovesBetweenShards::fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const { const auto access = context->getAccess(); diff --git a/src/Storages/System/StorageSystemPartMovesBetweenShards.h b/src/Storages/System/StorageSystemPartMovesBetweenShards.h index 6a859d4de80..bc6133fcaaa 100644 --- a/src/Storages/System/StorageSystemPartMovesBetweenShards.h +++ b/src/Storages/System/StorageSystemPartMovesBetweenShards.h @@ -20,6 +20,7 @@ protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const override; + Block getFilterSampleBlock() const override; }; } diff --git a/src/Storages/System/StorageSystemReplicas.cpp b/src/Storages/System/StorageSystemReplicas.cpp index 3bd5fd290db..9fb4dc5ed6f 100644 --- a/src/Storages/System/StorageSystemReplicas.cpp +++ b/src/Storages/System/StorageSystemReplicas.cpp @@ -285,7 +285,7 @@ private: const bool with_zk_fields; const size_t max_block_size; std::shared_ptr impl; - const ActionsDAG::Node * predicate = nullptr; + ActionsDAGPtr virtual_columns_filter; }; void ReadFromSystemReplicas::applyFilters(ActionDAGNodes added_filter_nodes) @@ -293,7 +293,19 @@ void ReadFromSystemReplicas::applyFilters(ActionDAGNodes added_filter_nodes) SourceStepWithFilter::applyFilters(std::move(added_filter_nodes)); if (filter_actions_dag) - predicate = filter_actions_dag->getOutputs().at(0); + { + Block block_to_filter + { + { ColumnString::create(), std::make_shared(), "database" }, + { ColumnString::create(), std::make_shared(), "table" }, + { ColumnString::create(), std::make_shared(), "engine" }, + }; + + virtual_columns_filter = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); + + if (virtual_columns_filter) + VirtualColumnUtils::buildSetsForDAG(virtual_columns_filter, context); + } } void StorageSystemReplicas::read( @@ -430,7 +442,8 @@ void ReadFromSystemReplicas::initializePipeline(QueryPipelineBuilder & pipeline, { col_engine, std::make_shared(), "engine" }, }; - VirtualColumnUtils::filterBlockWithPredicate(predicate, filtered_block, context); + if (virtual_columns_filter) + VirtualColumnUtils::filterBlockWithDAG(virtual_columns_filter, filtered_block, context); if (!filtered_block.rows()) { diff --git a/src/Storages/System/StorageSystemReplicationQueue.cpp b/src/Storages/System/StorageSystemReplicationQueue.cpp index 14b641f46c7..a50982de5f0 100644 --- a/src/Storages/System/StorageSystemReplicationQueue.cpp +++ b/src/Storages/System/StorageSystemReplicationQueue.cpp @@ -62,6 +62,14 @@ ColumnsDescription StorageSystemReplicationQueue::getColumnsDescription() } +Block StorageSystemReplicationQueue::getFilterSampleBlock() const +{ + return { + { {}, std::make_shared(), "database" }, + { {}, std::make_shared(), "table" }, + }; +} + void StorageSystemReplicationQueue::fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const { const auto access = context->getAccess(); diff --git a/src/Storages/System/StorageSystemReplicationQueue.h b/src/Storages/System/StorageSystemReplicationQueue.h index a9e57851be1..bcf351381ee 100644 --- a/src/Storages/System/StorageSystemReplicationQueue.h +++ b/src/Storages/System/StorageSystemReplicationQueue.h @@ -21,6 +21,7 @@ public: protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const override; + Block getFilterSampleBlock() const override; }; } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index fbfbdd6c6cc..ebec807189e 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -18,6 +18,15 @@ class NamesAndTypesList; namespace VirtualColumnUtils { +/// The filtering functions are tricky to use correctly. +/// There are 2 ways: +/// 1. Call filterBlockWithPredicate() or filterBlockWithDAG() inside SourceStepWithFilter::applyFilters(). +/// 2. Call splitFilterDagForAllowedInputs() and buildSetsForDAG() inside SourceStepWithFilter::applyFilters(). +/// Then call filterBlockWithPredicate() or filterBlockWithDAG() in initializePipeline(). +/// +/// Otherwise calling filter*() outside applyFilters() will throw "Not-ready Set is passed" +/// if there are subqueries. + /// Similar to filterBlockWithQuery, but uses ActionsDAG as a predicate. /// Basically it is filterBlockWithDAG(splitFilterDagForAllowedInputs). void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context); diff --git a/tests/queries/0_stateless/02841_not_ready_set_bug.sh b/tests/queries/0_stateless/02841_not_ready_set_bug.sh index 9b2f3b0698e..556e2f52de2 100755 --- a/tests/queries/0_stateless/02841_not_ready_set_bug.sh +++ b/tests/queries/0_stateless/02841_not_ready_set_bug.sh @@ -11,3 +11,20 @@ $CLICKHOUSE_CLIENT --max_threads=2 --max_result_rows=1 --result_overflow_mode=br $CLICKHOUSE_CLIENT -q "SELECT * FROM system.tables WHERE 1 in (SELECT number from numbers(2)) AND database = currentDatabase() format Null" $CLICKHOUSE_CLIENT -q "SELECT xor(1, 0) FROM system.parts WHERE 1 IN (SELECT 1) FORMAT Null" + +# (Not all of these tests are effective because some of these tables are empty.) +$CLICKHOUSE_CLIENT -nq " + select * from system.columns where table in (select '123'); + select * from system.replicas where database in (select '123'); + select * from system.data_skipping_indices where database in (select '123'); + select * from system.databases where name in (select '123'); + select * from system.mutations where table in (select '123'); + select * from system.part_moves_between_shards where database in (select '123'); + select * from system.replication_queue where database in (select '123'); + select * from system.distribution_queue where database in (select '123'); +" +$CLICKHOUSE_CLIENT -nq " + create table a (x Int8) engine MergeTree order by x; + insert into a values (1); + select * from mergeTreeIndex(currentDatabase(), 'a') where part_name in (select '123'); +" From 8a8170d00c90545bea3e8e4881feee7b8a7fb4b7 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 3 Jul 2024 03:19:31 +0000 Subject: [PATCH 0265/2361] Style --- src/Storages/System/StorageSystemReplicationQueue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemReplicationQueue.h b/src/Storages/System/StorageSystemReplicationQueue.h index bcf351381ee..82a4d68f300 100644 --- a/src/Storages/System/StorageSystemReplicationQueue.h +++ b/src/Storages/System/StorageSystemReplicationQueue.h @@ -21,7 +21,7 @@ public: protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node * predicate, std::vector) const override; - Block getFilterSampleBlock() const override; + Block getFilterSampleBlock() const override; }; } From 6a06024983a78aaab9b7cbe6e9533255debebdb3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 3 Jul 2024 10:25:40 +0000 Subject: [PATCH 0266/2361] Fix for prewhere optimization. --- src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp index afe1406b65f..f203d831750 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp @@ -59,7 +59,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) /// TODO: We can also check for UnionStep, such as StorageBuffer and local distributed plans. QueryPlan::Node * filter_node = (stack.rbegin() + 1)->node; - const auto * filter_step = typeid_cast(filter_node->step.get()); + auto * filter_step = typeid_cast(filter_node->step.get()); if (!filter_step) return; @@ -108,7 +108,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) prewhere_info->need_filter = true; prewhere_info->remove_prewhere_column = optimize_result.fully_moved_to_prewhere && filter_step->removesFilterColumn(); - auto filter_expression = ActionsDAG::clone(filter_step->getExpression()); + auto filter_expression = std::move(filter_step->getExpression()); const auto & filter_column_name = filter_step->getFilterColumnName(); if (prewhere_info->remove_prewhere_column) @@ -121,7 +121,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) outputs.resize(size); } - auto split_result = filter_step->getExpression()->split(optimize_result.prewhere_nodes, true, true); + auto split_result = filter_expression->split(optimize_result.prewhere_nodes, true, true); /// This is the leak of abstraction. /// Splited actions may have inputs which are needed only for PREWHERE. From 61f863c4e1f1d99483af78824d1c5792059dc400 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 3 Jul 2024 13:47:18 +0000 Subject: [PATCH 0267/2361] fix ambiguous override of non-virtual --- src/Client/Connection.cpp | 2 +- src/IO/ReadBufferFromPocoSocketChunked.cpp | 8 ++++---- src/IO/ReadBufferFromPocoSocketChunked.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 803f68c69d6..198518d6314 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -1122,7 +1122,7 @@ bool Connection::poll(size_t timeout_microseconds) bool Connection::hasReadPendingData() const { - return last_input_packet_type.has_value() || in->hasPendingData(); + return last_input_packet_type.has_value() || in->hasBufferedData(); } diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp index 93afeadba60..4a1e3732a55 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.cpp +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -34,12 +34,12 @@ void ReadBufferFromPocoSocketChunked::enableChunked() next_chunk = 0; } -bool ReadBufferFromPocoSocketChunked::hasPendingData() const +bool ReadBufferFromPocoSocketChunked::hasBufferedData() const { - if (chunked) - return available() || static_cast(data_end - working_buffer.end()) > sizeof(next_chunk); + if (available()) + return true; - return ReadBufferFromPocoSocketBase::hasPendingData(); + return chunked && (static_cast(data_end - working_buffer.end()) > sizeof(next_chunk)); } bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) const diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h index 943a50f5d08..8bc4024b978 100644 --- a/src/IO/ReadBufferFromPocoSocketChunked.h +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -84,7 +84,7 @@ public: void enableChunked(); - bool hasPendingData() const; + bool hasBufferedData() const; bool poll(size_t timeout_microseconds) const; From 41c62ca6636572b4d7654dc0dc329740bb5c7425 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 3 Jul 2024 14:44:47 +0000 Subject: [PATCH 0268/2361] better --- src/Planner/Planner.cpp | 91 +++++++++++---------------------- src/Planner/PlannerJoinTree.cpp | 25 ++++----- src/Planner/PlannerJoinTree.h | 4 +- src/Planner/Utils.cpp | 30 +++++++++++ src/Planner/Utils.h | 3 ++ 5 files changed, 75 insertions(+), 78 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index dddb7531519..16ee6de73c4 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -10,7 +10,6 @@ #include #include -#include #include #include @@ -331,14 +330,14 @@ public: void addExpressionStep(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression_actions, const std::string & step_description, - std::vector & result_actions_to_execute) + UsefulSets & useful_sets) { auto actions = ActionsDAG::clone(&expression_actions->dag); if (expression_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), actions); - result_actions_to_execute.push_back(expression_step->getExpression().get()); + appendSetsFromActionsDAG(*expression_step->getExpression(), useful_sets); expression_step->setStepDescription(step_description); query_plan.addStep(std::move(expression_step)); } @@ -346,7 +345,7 @@ void addExpressionStep(QueryPlan & query_plan, void addFilterStep(QueryPlan & query_plan, const FilterAnalysisResult & filter_analysis_result, const std::string & step_description, - std::vector & result_actions_to_execute) + UsefulSets & useful_sets) { auto actions = ActionsDAG::clone(&filter_analysis_result.filter_actions->dag); if (filter_analysis_result.filter_actions->project_input) @@ -356,7 +355,7 @@ void addFilterStep(QueryPlan & query_plan, actions, filter_analysis_result.filter_column_name, filter_analysis_result.remove_filter_column); - result_actions_to_execute.push_back(where_step->getExpression().get()); + appendSetsFromActionsDAG(*where_step->getExpression(), useful_sets); where_step->setStepDescription(step_description); query_plan.addStep(std::move(where_step)); } @@ -544,7 +543,7 @@ void addTotalsHavingStep(QueryPlan & query_plan, const QueryAnalysisResult & query_analysis_result, const PlannerContextPtr & planner_context, const QueryNode & query_node, - std::vector & result_actions_to_execute) + UsefulSets & useful_sets) { const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); @@ -573,7 +572,7 @@ void addTotalsHavingStep(QueryPlan & query_plan, need_finalize); if (having_analysis_result.filter_actions) - result_actions_to_execute.push_back(totals_having_step->getActions().get()); + appendSetsFromActionsDAG(*totals_having_step->getActions(), useful_sets); query_plan.addStep(std::move(totals_having_step)); } @@ -887,7 +886,7 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, const PlannerContextPtr & planner_context, const PlannerQueryProcessingInfo & query_processing_info, const QueryTreeNodePtr & query_tree, - std::vector & result_actions_to_execute) + UsefulSets & useful_sets) { const auto & query_node = query_tree->as(); @@ -919,7 +918,7 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, if (expressions_analysis_result.hasLimitBy()) { const auto & limit_by_analysis_result = expressions_analysis_result.getLimitBy(); - addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", result_actions_to_execute); + addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets); addLimitByStep(query_plan, limit_by_analysis_result, query_node); } @@ -1057,47 +1056,15 @@ void addOffsetStep(QueryPlan & query_plan, const QueryAnalysisResult & query_ana } } -void collectSetsFromActionsDAG(const ActionsDAG & dag, std::unordered_set & useful_sets) -{ - for (const auto & node : dag.getNodes()) - { - if (node.column) - { - const IColumn * column = node.column.get(); - if (const auto * column_const = typeid_cast(column)) - column = &column_const->getDataColumn(); - - if (const auto * column_set = typeid_cast(column)) - useful_sets.insert(column_set->getData().get()); - } - - if (node.type == ActionsDAG::ActionType::FUNCTION && node.function_base->getName() == "indexHint") - { - ActionsDAG::NodeRawConstPtrs children; - if (const auto * adaptor = typeid_cast(node.function_base.get())) - { - if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) - { - collectSetsFromActionsDAG(*index_hint->getActions(), useful_sets); - } - } - } - } -} - void addBuildSubqueriesForSetsStepIfNeeded( QueryPlan & query_plan, const SelectQueryOptions & select_query_options, const PlannerContextPtr & planner_context, - const std::vector & result_actions_to_execute) + const UsefulSets & useful_sets) { auto subqueries = planner_context->getPreparedSets().getSubqueries(); - std::unordered_set useful_sets; - for (const auto * actions_to_execute : result_actions_to_execute) - collectSetsFromActionsDAG(*actions_to_execute, useful_sets); - - auto predicate = [&useful_sets](const auto & set) { return !useful_sets.contains(set.get()); }; + auto predicate = [&useful_sets](const auto & set) { return !useful_sets.contains(set); }; auto it = std::remove_if(subqueries.begin(), subqueries.end(), std::move(predicate)); subqueries.erase(it, subqueries.end()); @@ -1542,15 +1509,15 @@ void Planner::buildPlanForQueryNode() planner_context, query_processing_info); - std::vector result_actions_to_execute = std::move(join_tree_query_plan.actions_dags); + auto useful_sets = std::move(join_tree_query_plan.useful_sets); for (auto & [_, table_expression_data] : planner_context->getTableExpressionNodeToData()) { if (table_expression_data.getPrewhereFilterActions()) - result_actions_to_execute.push_back(table_expression_data.getPrewhereFilterActions().get()); + appendSetsFromActionsDAG(*table_expression_data.getPrewhereFilterActions(), useful_sets); if (table_expression_data.getRowLevelFilterActions()) - result_actions_to_execute.push_back(table_expression_data.getRowLevelFilterActions().get()); + appendSetsFromActionsDAG(*table_expression_data.getRowLevelFilterActions(), useful_sets); } if (query_processing_info.isIntermediateStage()) @@ -1561,7 +1528,7 @@ void Planner::buildPlanForQueryNode() planner_context, query_processing_info, query_tree, - result_actions_to_execute); + useful_sets); if (expression_analysis_result.hasAggregation()) { @@ -1573,13 +1540,13 @@ void Planner::buildPlanForQueryNode() if (query_processing_info.isFirstStage()) { if (expression_analysis_result.hasWhere()) - addFilterStep(query_plan, expression_analysis_result.getWhere(), "WHERE", result_actions_to_execute); + addFilterStep(query_plan, expression_analysis_result.getWhere(), "WHERE", useful_sets); if (expression_analysis_result.hasAggregation()) { const auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); if (aggregation_analysis_result.before_aggregation_actions) - addExpressionStep(query_plan, aggregation_analysis_result.before_aggregation_actions, "Before GROUP BY", result_actions_to_execute); + addExpressionStep(query_plan, aggregation_analysis_result.before_aggregation_actions, "Before GROUP BY", useful_sets); addAggregationStep(query_plan, aggregation_analysis_result, query_analysis_result, planner_context, select_query_info); } @@ -1598,7 +1565,7 @@ void Planner::buildPlanForQueryNode() */ const auto & window_analysis_result = expression_analysis_result.getWindow(); if (window_analysis_result.before_window_actions) - addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before WINDOW", result_actions_to_execute); + addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before WINDOW", useful_sets); } else { @@ -1607,7 +1574,7 @@ void Planner::buildPlanForQueryNode() * now, on shards (first_stage). */ const auto & projection_analysis_result = expression_analysis_result.getProjection(); - addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", result_actions_to_execute); + addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", useful_sets); if (query_node.isDistinct()) { @@ -1623,7 +1590,7 @@ void Planner::buildPlanForQueryNode() if (expression_analysis_result.hasSort()) { const auto & sort_analysis_result = expression_analysis_result.getSort(); - addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", result_actions_to_execute); + addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", useful_sets); } } } @@ -1634,7 +1601,7 @@ void Planner::buildPlanForQueryNode() planner_context, query_processing_info, query_tree, - result_actions_to_execute); + useful_sets); } if (query_processing_info.isSecondStage() || query_processing_info.isFromAggregationState()) @@ -1656,14 +1623,14 @@ void Planner::buildPlanForQueryNode() if (query_node.isGroupByWithTotals()) { - addTotalsHavingStep(query_plan, expression_analysis_result, query_analysis_result, planner_context, query_node, result_actions_to_execute); + addTotalsHavingStep(query_plan, expression_analysis_result, query_analysis_result, planner_context, query_node, useful_sets); having_executed = true; } addCubeOrRollupStepIfNeeded(query_plan, aggregation_analysis_result, query_analysis_result, planner_context, select_query_info, query_node); if (!having_executed && expression_analysis_result.hasHaving()) - addFilterStep(query_plan, expression_analysis_result.getHaving(), "HAVING", result_actions_to_execute); + addFilterStep(query_plan, expression_analysis_result.getHaving(), "HAVING", useful_sets); } if (query_processing_info.isFromAggregationState()) @@ -1678,16 +1645,16 @@ void Planner::buildPlanForQueryNode() { const auto & window_analysis_result = expression_analysis_result.getWindow(); if (expression_analysis_result.hasAggregation()) - addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before window functions", result_actions_to_execute); + addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before window functions", useful_sets); addWindowSteps(query_plan, planner_context, window_analysis_result); } if (expression_analysis_result.hasQualify()) - addFilterStep(query_plan, expression_analysis_result.getQualify(), "QUALIFY", result_actions_to_execute); + addFilterStep(query_plan, expression_analysis_result.getQualify(), "QUALIFY", useful_sets); const auto & projection_analysis_result = expression_analysis_result.getProjection(); - addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", result_actions_to_execute); + addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", useful_sets); if (query_node.isDistinct()) { @@ -1703,7 +1670,7 @@ void Planner::buildPlanForQueryNode() if (expression_analysis_result.hasSort()) { const auto & sort_analysis_result = expression_analysis_result.getSort(); - addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", result_actions_to_execute); + addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", useful_sets); } } else @@ -1756,7 +1723,7 @@ void Planner::buildPlanForQueryNode() if (!query_processing_info.isFromAggregationState() && expression_analysis_result.hasLimitBy()) { const auto & limit_by_analysis_result = expression_analysis_result.getLimitBy(); - addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", result_actions_to_execute); + addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets); addLimitByStep(query_plan, limit_by_analysis_result, query_node); } @@ -1788,7 +1755,7 @@ void Planner::buildPlanForQueryNode() if (!query_processing_info.isToAggregationState()) { const auto & projection_analysis_result = expression_analysis_result.getProjection(); - addExpressionStep(query_plan, projection_analysis_result.project_names_actions, "Project names", result_actions_to_execute); + addExpressionStep(query_plan, projection_analysis_result.project_names_actions, "Project names", useful_sets); } // For additional_result_filter setting @@ -1796,7 +1763,7 @@ void Planner::buildPlanForQueryNode() } if (!select_query_options.only_analyze) - addBuildSubqueriesForSetsStepIfNeeded(query_plan, select_query_options, planner_context, result_actions_to_execute); + addBuildSubqueriesForSetsStepIfNeeded(query_plan, select_query_options, planner_context, useful_sets); query_node_to_plan_step_mapping[&query_node] = query_plan.getRootNode(); } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 604d3366484..94054588d40 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -1181,13 +1181,13 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ join_clauses_and_actions.left_join_expressions_actions->appendInputsForUnusedColumns(left_plan.getCurrentDataStream().header); auto left_join_expressions_actions_step = std::make_unique(left_plan.getCurrentDataStream(), join_clauses_and_actions.left_join_expressions_actions); left_join_expressions_actions_step->setStepDescription("JOIN actions"); - left_join_tree_query_plan.actions_dags.emplace_back(left_join_expressions_actions_step->getExpression().get()); + appendSetsFromActionsDAG(*left_join_expressions_actions_step->getExpression(), left_join_tree_query_plan.useful_sets); left_plan.addStep(std::move(left_join_expressions_actions_step)); join_clauses_and_actions.right_join_expressions_actions->appendInputsForUnusedColumns(right_plan.getCurrentDataStream().header); auto right_join_expressions_actions_step = std::make_unique(right_plan.getCurrentDataStream(), join_clauses_and_actions.right_join_expressions_actions); right_join_expressions_actions_step->setStepDescription("JOIN actions"); - right_join_tree_query_plan.actions_dags.emplace_back(right_join_expressions_actions_step->getExpression().get()); + appendSetsFromActionsDAG(*right_join_expressions_actions_step->getExpression(), right_join_tree_query_plan.useful_sets); right_plan.addStep(std::move(right_join_expressions_actions_step)); } @@ -1387,7 +1387,8 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ mixed_join_expression = std::make_shared( std::move(join_clauses_and_actions.mixed_join_expressions_actions), ExpressionActionsSettings::fromContext(planner_context->getQueryContext())); - left_join_tree_query_plan.actions_dags.push_back(&mixed_join_expression->getActionsDAG()); + + appendSetsFromActionsDAG(mixed_join_expression->getActionsDAG(), left_join_tree_query_plan.useful_sets); } } else if (join_node.isUsingJoinExpression()) @@ -1585,16 +1586,10 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ for (const auto & right_join_tree_query_plan_row_policy : right_join_tree_query_plan.used_row_policies) left_join_tree_query_plan.used_row_policies.insert(right_join_tree_query_plan_row_policy); - /// Collect all required actions dags in `left_join_tree_query_plan.actions_dags` + /// Collect all required actions sets in `left_join_tree_query_plan.useful_sets` if (!is_filled_join) - for (const auto * action_dag : right_join_tree_query_plan.actions_dags) - left_join_tree_query_plan.actions_dags.emplace_back(action_dag); - // if (join_clauses_and_actions.left_join_expressions_actions) - // left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.left_join_expressions_actions.get()); - // if (join_clauses_and_actions.right_join_expressions_actions) - // left_join_tree_query_plan.actions_dags.emplace_back(join_clauses_and_actions.right_join_expressions_actions.get()); - // if (join_clauses_and_actions.mixed_join_expressions_actions) - // left_join_tree_query_plan.actions_dags.push_back(join_clauses_and_actions.mixed_join_expressions_actions.get()); + for (const auto & useful_set : right_join_tree_query_plan.useful_sets) + left_join_tree_query_plan.useful_sets.insert(useful_set); auto mapping = std::move(left_join_tree_query_plan.query_node_to_plan_step_mapping); auto & r_mapping = right_join_tree_query_plan.query_node_to_plan_step_mapping; @@ -1604,7 +1599,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ .query_plan = std::move(result_plan), .from_stage = QueryProcessingStage::FetchColumns, .used_row_policies = std::move(left_join_tree_query_plan.used_row_policies), - .actions_dags = std::move(left_join_tree_query_plan.actions_dags), + .useful_sets = std::move(left_join_tree_query_plan.useful_sets), .query_node_to_plan_step_mapping = std::move(mapping), }; } @@ -1649,7 +1644,7 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ auto array_join_actions = std::make_unique(plan.getCurrentDataStream(), std::move(array_join_action_dag)); array_join_actions->setStepDescription("ARRAY JOIN actions"); - join_tree_query_plan.actions_dags.push_back(array_join_actions->getExpression().get()); + appendSetsFromActionsDAG(*array_join_actions->getExpression(), join_tree_query_plan.useful_sets); plan.addStep(std::move(array_join_actions)); auto drop_unused_columns_before_array_join_actions_dag = std::make_unique(plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); @@ -1690,7 +1685,7 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ .query_plan = std::move(plan), .from_stage = QueryProcessingStage::FetchColumns, .used_row_policies = std::move(join_tree_query_plan.used_row_policies), - .actions_dags = std::move(join_tree_query_plan.actions_dags), + .useful_sets = std::move(join_tree_query_plan.useful_sets), .query_node_to_plan_step_mapping = std::move(join_tree_query_plan.query_node_to_plan_step_mapping), }; } diff --git a/src/Planner/PlannerJoinTree.h b/src/Planner/PlannerJoinTree.h index 675079427eb..bc58e802a09 100644 --- a/src/Planner/PlannerJoinTree.h +++ b/src/Planner/PlannerJoinTree.h @@ -11,12 +11,14 @@ namespace DB { +using UsefulSets = std::unordered_set; + struct JoinTreeQueryPlan { QueryPlan query_plan; QueryProcessingStage::Enum from_stage; std::set used_row_policies{}; - std::vector actions_dags{}; + UsefulSets useful_sets; std::unordered_map query_node_to_plan_step_mapping{}; }; diff --git a/src/Planner/Utils.cpp b/src/Planner/Utils.cpp index 493ecf5ef53..7ac53e0f8c1 100644 --- a/src/Planner/Utils.cpp +++ b/src/Planner/Utils.cpp @@ -11,10 +11,12 @@ #include #include +#include #include #include +#include #include @@ -475,4 +477,32 @@ ASTPtr parseAdditionalResultFilter(const Settings & settings) return additional_result_filter_ast; } +void appendSetsFromActionsDAG(const ActionsDAG & dag, UsefulSets & useful_sets) +{ + for (const auto & node : dag.getNodes()) + { + if (node.column) + { + const IColumn * column = node.column.get(); + if (const auto * column_const = typeid_cast(column)) + column = &column_const->getDataColumn(); + + if (const auto * column_set = typeid_cast(column)) + useful_sets.insert(column_set->getData()); + } + + if (node.type == ActionsDAG::ActionType::FUNCTION && node.function_base->getName() == "indexHint") + { + ActionsDAG::NodeRawConstPtrs children; + if (const auto * adaptor = typeid_cast(node.function_base.get())) + { + if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) + { + appendSetsFromActionsDAG(*index_hint->getActions(), useful_sets); + } + } + } + } +} + } diff --git a/src/Planner/Utils.h b/src/Planner/Utils.h index 3172847f053..ae60976a8d6 100644 --- a/src/Planner/Utils.h +++ b/src/Planner/Utils.h @@ -88,4 +88,7 @@ FilterDAGInfo buildFilterInfo(QueryTreeNodePtr filter_query_tree, ASTPtr parseAdditionalResultFilter(const Settings & settings); +using UsefulSets = std::unordered_set; +void appendSetsFromActionsDAG(const ActionsDAG & dag, UsefulSets & useful_sets); + } From 5d16ba57aa84ef82ccf7e34a4635ad1d14e7859d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 3 Jul 2024 14:46:49 +0000 Subject: [PATCH 0269/2361] Update version_date.tsv and changelogs after v24.3.5.46-lts --- docs/changelogs/v24.3.5.46-lts.md | 40 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 41 insertions(+) create mode 100644 docs/changelogs/v24.3.5.46-lts.md diff --git a/docs/changelogs/v24.3.5.46-lts.md b/docs/changelogs/v24.3.5.46-lts.md new file mode 100644 index 00000000000..1f2b7c8b0b7 --- /dev/null +++ b/docs/changelogs/v24.3.5.46-lts.md @@ -0,0 +1,40 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.5.46-lts (fe54cead6b6) FIXME as compared to v24.3.4.147-lts (31a7bdc346d) + +#### Improvement +* Backported in [#65463](https://github.com/ClickHouse/ClickHouse/issues/65463): Reload certificate chain during certificate reload. [#61671](https://github.com/ClickHouse/ClickHouse/pull/61671) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). +* Backported in [#65882](https://github.com/ClickHouse/ClickHouse/issues/65882): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#65302](https://github.com/ClickHouse/ClickHouse/issues/65302): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Backported in [#65892](https://github.com/ClickHouse/ClickHouse/issues/65892): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Backported in [#65283](https://github.com/ClickHouse/ClickHouse/issues/65283): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#65370](https://github.com/ClickHouse/ClickHouse/issues/65370): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#65446](https://github.com/ClickHouse/ClickHouse/issues/65446): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#65708](https://github.com/ClickHouse/ClickHouse/issues/65708): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#65352](https://github.com/ClickHouse/ClickHouse/issues/65352): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#65327](https://github.com/ClickHouse/ClickHouse/issues/65327): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)). +* Backported in [#65538](https://github.com/ClickHouse/ClickHouse/issues/65538): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)). +* Backported in [#65576](https://github.com/ClickHouse/ClickHouse/issues/65576): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)). +* Backported in [#65159](https://github.com/ClickHouse/ClickHouse/issues/65159): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#65615](https://github.com/ClickHouse/ClickHouse/issues/65615): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#65728](https://github.com/ClickHouse/ClickHouse/issues/65728): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#65261](https://github.com/ClickHouse/ClickHouse/issues/65261): Fix the bug in Hashed and Hashed_Array dictionary short circuit evaluation, which may read uninitialized number, leading to various errors. [#65256](https://github.com/ClickHouse/ClickHouse/pull/65256) ([jsc0218](https://github.com/jsc0218)). +* Backported in [#65667](https://github.com/ClickHouse/ClickHouse/issues/65667): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65784](https://github.com/ClickHouse/ClickHouse/issues/65784): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#65929](https://github.com/ClickHouse/ClickHouse/issues/65929): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#65824](https://github.com/ClickHouse/ClickHouse/issues/65824): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#65223](https://github.com/ClickHouse/ClickHouse/issues/65223): Capture weak_ptr of ContextAccess for safety. [#65051](https://github.com/ClickHouse/ClickHouse/pull/65051) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#65901](https://github.com/ClickHouse/ClickHouse/issues/65901): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 8112ed9083b..8e748a2c2ca 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -6,6 +6,7 @@ v24.5.1.1763-stable 2024-06-01 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.5.46-lts 2024-07-03 v24.3.4.147-lts 2024-06-13 v24.3.3.102-lts 2024-05-01 v24.3.2.23-lts 2024-04-03 From d0f36e09a964c1e8a3040d6cfd8b3edfec47474d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Jul 2024 04:58:27 +0200 Subject: [PATCH 0270/2361] Fix error --- src/Databases/DatabaseAtomic.cpp | 3 +++ src/Databases/DatabaseAtomic.h | 2 ++ src/Databases/DatabaseOnDisk.cpp | 2 ++ src/Databases/DatabaseOnDisk.h | 1 + 4 files changed, 8 insertions(+) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index c06fc98d0b9..7f198042e44 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -58,6 +58,9 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C void DatabaseAtomic::createDirectories() { + if (database_atomic_directories_created.test_and_set()) + return; + DatabaseOnDisk::createDirectories(); fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); fs::create_directories(path_to_table_symlinks); tryCreateMetadataSymlink(); diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index 26ab7657354..9df300daa20 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -75,7 +75,9 @@ protected: using DetachedTables = std::unordered_map; [[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex); + std::atomic_flag database_atomic_directories_created = ATOMIC_FLAG_INIT; void createDirectories(); + void tryCreateMetadataSymlink(); virtual bool allowMoveTableToOtherDatabaseEngine(IDatabase & /*to_database*/) const { return false; } diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index faac4b23701..0a0ad589a9f 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -175,6 +175,8 @@ DatabaseOnDisk::DatabaseOnDisk( void DatabaseOnDisk::createDirectories() { + if (directories_created.test_and_set()) + return; fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path); fs::create_directories(metadata_path); } diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 00e7a2850b8..a8be674a4e2 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -99,6 +99,7 @@ protected: virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach); virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {} + std::atomic_flag directories_created = ATOMIC_FLAG_INIT; void createDirectories(); const String metadata_path; From b52937e8580055a126f3d263cad893f212ce07b9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Jul 2024 05:00:18 +0200 Subject: [PATCH 0271/2361] Better test --- tests/queries/0_stateless/01191_rename_dictionary.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01191_rename_dictionary.sql b/tests/queries/0_stateless/01191_rename_dictionary.sql index c5012dabc81..be95e5a7d4b 100644 --- a/tests/queries/0_stateless/01191_rename_dictionary.sql +++ b/tests/queries/0_stateless/01191_rename_dictionary.sql @@ -27,6 +27,7 @@ RENAME DICTIONARY test_01191.t TO test_01191.dict1; -- {serverError INCORRECT_QU DROP DICTIONARY test_01191.t; -- {serverError INCORRECT_QUERY} DROP TABLE test_01191.t; +DROP DATABASE IF EXISTS dummy_db; CREATE DATABASE dummy_db ENGINE=Atomic; RENAME DICTIONARY test_01191.dict TO dummy_db.dict1; RENAME DICTIONARY dummy_db.dict1 TO test_01191.dict; From ee0985c5b4dbb15d28c9a034b60e438099b5c5bc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Jul 2024 05:01:35 +0200 Subject: [PATCH 0272/2361] Fix test --- .../02141_clickhouse_local_interactive_table.reference | 4 ++-- .../0_stateless/02141_clickhouse_local_interactive_table.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference index 0bb8966cbe4..0e74c0a083e 100644 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') -CREATE TABLE foo.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh index 934d87616ac..3a95e59416a 100755 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh @@ -4,5 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' -$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' +$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' +$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' From bf312f200cb69267741778b2af04bf7a6854a2f6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Jul 2024 05:24:42 +0200 Subject: [PATCH 0273/2361] DatabaseOverlay: add support for rename --- programs/local/LocalServer.cpp | 8 ++++---- src/Databases/DatabasesOverlay.cpp | 33 ++++++++++++++++++++++++++++++ src/Databases/DatabasesOverlay.h | 8 ++++++++ 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index cda276c8407..41bb5604a52 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -201,10 +201,10 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context) { - auto databaseCombiner = std::make_shared(name_, context); - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context)); - databaseCombiner->registerNextDatabase(std::make_shared(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context)); - return databaseCombiner; + auto overlay = std::make_shared(name_, context); + overlay->registerNextDatabase(std::make_shared(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context)); + overlay->registerNextDatabase(std::make_shared(name_, "", context)); + return overlay; } /// If path is specified and not empty, will try to setup server environment and load existing metadata diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 02a0aab8230..e1a457920cf 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -124,6 +124,39 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab getEngineName()); } +void DatabasesOverlay::renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) +{ + for (auto & db : databases) + { + if (db->isTableExist(name, current_context)) + { + if (DatabasesOverlay * to_overlay_database = typeid_cast(&to_database)) + { + /// Renaming from Overlay database inside itself or into another Overlay database. + /// Just use the first database in the overlay as a destination. + if (to_overlay_database->databases.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The destination Overlay database {} does not have any members", to_database.getDatabaseName()); + + db->renameTable(current_context, name, *to_overlay_database->databases[0], to_name, exchange, dictionary); + } + else + { + /// Renaming into a different type of database. E.g. from Overlay on top of Atomic database into just Atomic database. + db->renameTable(current_context, name, to_database, to_name, exchange, dictionary); + } + + return; + } + } + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(getDatabaseName()), backQuote(name)); +} + ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const { ASTPtr result = nullptr; diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h index 5f6d4e601d3..40c653e5cb5 100644 --- a/src/Databases/DatabasesOverlay.h +++ b/src/Databases/DatabasesOverlay.h @@ -35,6 +35,14 @@ public: StoragePtr detachTable(ContextPtr context, const String & table_name) override; + void renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) override; + ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override; ASTPtr getCreateDatabaseQuery() const override; From a70710e3f19aea4434aebbd07233b8e681e4e9e4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Jul 2024 05:27:56 +0200 Subject: [PATCH 0274/2361] Add a test --- .../03199_atomic_clickhouse_local.reference | 6 +++++ .../03199_atomic_clickhouse_local.sh | 24 +++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 tests/queries/0_stateless/03199_atomic_clickhouse_local.reference create mode 100755 tests/queries/0_stateless/03199_atomic_clickhouse_local.sh diff --git a/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference b/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference new file mode 100644 index 00000000000..1975397394b --- /dev/null +++ b/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference @@ -0,0 +1,6 @@ +123 +Hello +['Hello','world'] +Hello +Hello +['Hello','world'] diff --git a/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh b/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh new file mode 100755 index 00000000000..edaa83b8f95 --- /dev/null +++ b/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_LOCAL} -n " +CREATE TABLE test (x UInt8) ORDER BY x; +INSERT INTO test VALUES (123); +SELECT * FROM test; +CREATE OR REPLACE TABLE test (s String) ORDER BY s; +INSERT INTO test VALUES ('Hello'); +SELECT * FROM test; +RENAME TABLE test TO test2; +CREATE OR REPLACE TABLE test (s Array(String)) ORDER BY s; +INSERT INTO test VALUES (['Hello', 'world']); +SELECT * FROM test; +SELECT * FROM test2; +EXCHANGE TABLES test AND test2; +SELECT * FROM test; +SELECT * FROM test2; +DROP TABLE test; +DROP TABLE test2; +" From cb5d5863467a14cebdbc2dc1c6e4d72afe256515 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 4 Jul 2024 05:49:55 +0200 Subject: [PATCH 0275/2361] Fix style --- src/Databases/DatabasesOverlay.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index e1a457920cf..495733e15fd 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -14,6 +14,8 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int CANNOT_GET_CREATE_TABLE_QUERY; + extern const int BAD_ARGUMENTS; + extern const int UNKNOWN_TABLE; } DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_) From 9036ce9725c6d273e84d44b862cdecf31fe36d9c Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 4 Jul 2024 11:09:47 +0800 Subject: [PATCH 0276/2361] Some fixups after merging --- .../mergetree-family/mergetree.md | 2 +- src/Storages/MergeTree/MergeTreeData.cpp | 16 ++++------ .../ConditionSelectivityEstimator.cpp | 11 ++----- .../ConditionSelectivityEstimator.h | 22 +++++++------- src/Storages/Statistics/Statistics.cpp | 29 +++++++++---------- src/Storages/Statistics/Statistics.h | 5 ++-- ...stics.cpp => StatisticsCountMinSketch.cpp} | 20 +++++++------ ...tatistics.h => StatisticsCountMinSketch.h} | 9 ++---- src/Storages/Statistics/StatisticsTDigest.cpp | 18 ++++++++---- src/Storages/Statistics/StatisticsTDigest.h | 2 +- src/Storages/Statistics/TDigestStatistics.cpp | 0 src/Storages/StatisticsDescription.cpp | 5 ---- .../02864_statistics_estimate_predicate.sql | 2 ++ 13 files changed, 65 insertions(+), 76 deletions(-) rename src/Storages/Statistics/{CountMinSketchStatistics.cpp => StatisticsCountMinSketch.cpp} (77%) rename src/Storages/Statistics/{CountMinSketchStatistics.h => StatisticsCountMinSketch.h} (71%) delete mode 100644 src/Storages/Statistics/TDigestStatistics.cpp diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 6466aebfaa3..7ffbd9a5bae 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -1001,7 +1001,7 @@ They can be used for prewhere optimization only if we enable `set allow_statisti - `count_min` - [count min sketch](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column. + [Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column. ## Column-level Settings {#column-level-settings} diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 6f65f65a4a0..edcd443910e 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -496,11 +496,9 @@ ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByP { auto stats = part->loadStatistics(); /// TODO: We only have one stats file for every part. - if (stats.empty()) /// No statistics still need add rows count. - result.addRows(part->rows_count); - else - for (const auto & stat : stats) - result.merge(part->info.getPartNameV1(), part->rows_count, stat); + result.addRows(part->rows_count); + for (const auto & stat : stats) + result.merge(part->info.getPartNameV1(), stat); } catch (...) { @@ -515,11 +513,9 @@ ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByP if (!partition_pruner.canBePruned(*part)) { auto stats = part->loadStatistics(); - if (stats.empty()) - result.addRows(part->rows_count); - else - for (const auto & stat : stats) - result.merge(part->info.getPartNameV1(), part->rows_count, stat); + result.addRows(part->rows_count); + for (const auto & stat : stats) + result.merge(part->info.getPartNameV1(), stat); } } catch (...) diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp index 437b39cb537..1755f0eb4df 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp @@ -35,7 +35,7 @@ Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateGreat return rows - estimateLess(val, rows); } -Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateEqual(Field val, Float64 rows) const +Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateEqual(const Field & val, Float64 rows) const { auto float_val = IStatistics::getFloat64(val); if (part_statistics.empty()) @@ -141,7 +141,7 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode /// If there the estimator of the column is not found or there are no data at all, /// we use dummy estimation. - bool dummy = total_rows == 0; + bool dummy = false; ColumnSelectivityEstimator estimator; if (it != column_estimators.end()) estimator = it->second; @@ -176,13 +176,8 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode return default_unknown_cond_factor * total_rows; } -void ConditionSelectivityEstimator::merge(String part_name, UInt64 part_rows, ColumnStatisticsPtr column_stat) +void ConditionSelectivityEstimator::merge(String part_name, ColumnStatisticsPtr column_stat) { - if (!part_names.contains(part_name)) - { - total_rows += part_rows; - part_names.insert(part_name); - } if (column_stat != nullptr) column_estimators[column_stat->columnName()].merge(part_name, column_stat); } diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.h b/src/Storages/Statistics/ConditionSelectivityEstimator.h index ff6218e7ef1..b9127fcd5bf 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.h +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.h @@ -11,6 +11,14 @@ class RPNBuilderTreeNode; /// It estimates the selectivity of a condition. class ConditionSelectivityEstimator { +public: + /// TODO: Support the condition consists of CNF/DNF like (cond1 and cond2) or (cond3) ... + /// Right now we only support simple condition like col = val / col < val + Float64 estimateRowCount(const RPNBuilderTreeNode & node) const; + + void merge(String part_name, ColumnStatisticsPtr column_stat); + void addRows(UInt64 part_rows) { total_rows += part_rows; } + private: friend class ColumnStatistics; struct ColumnSelectivityEstimator @@ -25,9 +33,11 @@ private: Float64 estimateGreater(Float64 val, Float64 rows) const; - Float64 estimateEqual(Field val, Float64 rows) const; + Float64 estimateEqual(const Field & val, Float64 rows) const; }; + std::pair extractBinaryOp(const RPNBuilderTreeNode & node, const String & column_name) const; + static constexpr auto default_good_cond_factor = 0.1; static constexpr auto default_normal_cond_factor = 0.5; static constexpr auto default_unknown_cond_factor = 1.0; @@ -36,17 +46,7 @@ private: static constexpr auto threshold = 2; UInt64 total_rows = 0; - std::set part_names; std::map column_estimators; - std::pair extractBinaryOp(const RPNBuilderTreeNode & node, const String & column_name) const; - -public: - /// TODO: Support the condition consists of CNF/DNF like (cond1 and cond2) or (cond3) ... - /// Right now we only support simple condition like col = val / col < val - Float64 estimateRowCount(const RPNBuilderTreeNode & node) const; - - void merge(String part_name, UInt64 part_rows, ColumnStatisticsPtr column_stat); - void addRows(UInt64 part_rows) { total_rows += part_rows; } }; } diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index d3776a2d38c..cd94ed716cd 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -1,12 +1,12 @@ -#include -#include -#include -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -93,7 +93,7 @@ UInt64 IStatistics::estimateCardinality() const throw Exception(ErrorCodes::LOGICAL_ERROR, "Cardinality estimation is not implemented for this type of statistics"); } -Float64 IStatistics::estimateEqual(Float64 /*val*/) const +Float64 IStatistics::estimateEqual(const Field & /*val*/) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Equality estimation is not implemented for this type of statistics"); } @@ -125,23 +125,20 @@ Float64 ColumnStatistics::estimateGreater(Float64 val) const return rows - estimateLess(val); } -Float64 ColumnStatistics::estimateEqual(Field val) const +Float64 ColumnStatistics::estimateEqual(const Field & val) const { auto float_val = IStatistics::getFloat64(val); - if (stats.contains(StatisticsType::Uniq) && stats.contains(StatisticsType::TDigest)) + if (float_val.has_value() && stats.contains(StatisticsType::Uniq) && stats.contains(StatisticsType::TDigest)) { /// 2048 is the default number of buckets in TDigest. In this case, TDigest stores exactly one value (with many rows) for every bucket. if (stats.at(StatisticsType::Uniq)->estimateCardinality() < 2048) - return stats.at(StatisticsType::TDigest)->estimateEqual(float_val); + return stats.at(StatisticsType::TDigest)->estimateEqual(val); } #if USE_DATASKETCHES if (stats.contains(StatisticsType::CountMinSketch)) - { - auto count_min_sketch_static = std::static_pointer_cast(stats.at(StatisticsType::CountMinSketch)); - return count_min_sketch_static->estimateEqual(val); - } + return stats.at(StatisticsType::CountMinSketch)->estimateEqual(val); #endif - if (float_val < - ConditionSelectivityEstimator::threshold || float_val > ConditionSelectivityEstimator::threshold) + if (!float_val.has_value() && (float_val < - ConditionSelectivityEstimator::threshold || float_val > ConditionSelectivityEstimator::threshold)) return rows * ConditionSelectivityEstimator::default_normal_cond_factor; else return rows * ConditionSelectivityEstimator::default_good_cond_factor; diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index 4b953ddb364..c9bf3ca4847 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -35,7 +35,7 @@ public: /// Per-value estimations. /// Throws if the statistics object is not able to do a meaningful estimation. - virtual Float64 estimateEqual(Float64 val) const; /// cardinality of val in the column + virtual Float64 estimateEqual(const Field & val) const; /// cardinality of val in the column virtual Float64 estimateLess(Float64 val) const; /// summarized cardinality of values < val in the column /// Convert filed to Float64, used when estimating the number of rows. @@ -67,8 +67,7 @@ public: Float64 estimateLess(Float64 val) const; Float64 estimateGreater(Float64 val) const; - Float64 estimateEqual(Float64 val) const; - Float64 estimateEqual(Field val) const; + Float64 estimateEqual(const Field & val) const; private: friend class MergeTreeStatisticsFactory; diff --git a/src/Storages/Statistics/CountMinSketchStatistics.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp similarity index 77% rename from src/Storages/Statistics/CountMinSketchStatistics.cpp rename to src/Storages/Statistics/StatisticsCountMinSketch.cpp index 497570bd2d1..dd8ceef4e2d 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -1,8 +1,8 @@ -#include -#include #include +#include #include #include +#include #if USE_DATASKETCHES @@ -14,15 +14,17 @@ namespace ErrorCodes extern const int ILLEGAL_STATISTICS; } +static constexpr auto num_hashes = 8uz; +static constexpr auto num_buckets = 2048uz; -CountMinSketchStatistics::CountMinSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) +StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) : IStatistics(stat_) , sketch(num_hashes, num_buckets) , data_type(data_type_) { } -Float64 CountMinSketchStatistics::estimateEqual(const Field & value) const +Float64 StatisticsCountMinSketch::estimateEqual(const Field & value) const { if (auto float_val = IStatistics::getFloat64(value)) return sketch.get_estimate(&float_val.value(), 8); @@ -31,14 +33,14 @@ Float64 CountMinSketchStatistics::estimateEqual(const Field & value) const UNREACHABLE(); } -void CountMinSketchStatistics::serialize(WriteBuffer & buf) +void StatisticsCountMinSketch::serialize(WriteBuffer & buf) { Sketch::vector_bytes bytes = sketch.serialize(); writeIntBinary(static_cast(bytes.size()), buf); buf.write(reinterpret_cast(bytes.data()), bytes.size()); } -void CountMinSketchStatistics::deserialize(ReadBuffer & buf) +void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) { UInt64 size; readIntBinary(size, buf); @@ -47,10 +49,10 @@ void CountMinSketchStatistics::deserialize(ReadBuffer & buf) bytes.reserve(size); buf.readStrict(reinterpret_cast(bytes.data()), size); - sketch = datasketches::count_min_sketch::deserialize(bytes.data(), size); + sketch = Sketch::deserialize(bytes.data(), size); } -void CountMinSketchStatistics::update(const ColumnPtr & column) +void StatisticsCountMinSketch::update(const ColumnPtr & column) { size_t size = column->size(); for (size_t i = 0; i < size; ++i) @@ -78,7 +80,7 @@ void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr da StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { - return std::make_shared(stat, data_type); + return std::make_shared(stat, data_type); } } diff --git a/src/Storages/Statistics/CountMinSketchStatistics.h b/src/Storages/Statistics/StatisticsCountMinSketch.h similarity index 71% rename from src/Storages/Statistics/CountMinSketchStatistics.h rename to src/Storages/Statistics/StatisticsCountMinSketch.h index 52046a19b64..3ea3f2dbd3b 100644 --- a/src/Storages/Statistics/CountMinSketchStatistics.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -11,12 +11,12 @@ namespace DB { -class CountMinSketchStatistics : public IStatistics +class StatisticsCountMinSketch : public IStatistics { public: - CountMinSketchStatistics(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); + StatisticsCountMinSketch(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); - Float64 estimateEqual(const Field & value) const; + Float64 estimateEqual(const Field & value) const override; void serialize(WriteBuffer & buf) override; void deserialize(ReadBuffer & buf) override; @@ -24,9 +24,6 @@ public: void update(const ColumnPtr & column) override; private: - static constexpr auto num_hashes = 8uz; - static constexpr auto num_buckets = 2048uz; - using Sketch = datasketches::count_min_sketch; Sketch sketch; diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index 0747197370c..306338b4ba2 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB { @@ -16,12 +17,16 @@ StatisticsTDigest::StatisticsTDigest(const SingleStatisticsDescription & stat_) void StatisticsTDigest::update(const ColumnPtr & column) { size_t rows = column->size(); - for (size_t row = 0; row < rows; ++row) { - /// TODO: support more types. - Float64 value = column->getFloat64(row); - t_digest.add(value, 1); + Field f; + column->get(row, f); + + if (f.isNull()) + continue; + + if (auto float_val = IStatistics::getFloat64(f)) + t_digest.add(*float_val, 1); } } @@ -40,14 +45,15 @@ Float64 StatisticsTDigest::estimateLess(Float64 val) const return t_digest.getCountLessThan(val); } -Float64 StatisticsTDigest::estimateEqual(Float64 val) const +Float64 StatisticsTDigest::estimateEqual(const Field & val) const { - return t_digest.getCountEqual(val); + return t_digest.getCountEqual(IStatistics::getFloat64(val).value()); } void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); + data_type = removeLowCardinalityAndNullable(data_type); if (!data_type->isValueRepresentedByNumber()) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'tdigest' do not support type {}", data_type->getName()); } diff --git a/src/Storages/Statistics/StatisticsTDigest.h b/src/Storages/Statistics/StatisticsTDigest.h index d3a3bf115ee..8016faac7c6 100644 --- a/src/Storages/Statistics/StatisticsTDigest.h +++ b/src/Storages/Statistics/StatisticsTDigest.h @@ -17,7 +17,7 @@ public: void deserialize(ReadBuffer & buf) override; Float64 estimateLess(Float64 val) const override; - Float64 estimateEqual(Float64 val) const override; + Float64 estimateEqual(const Field & val) const override; private: QuantileTDigest t_digest; diff --git a/src/Storages/Statistics/TDigestStatistics.cpp b/src/Storages/Statistics/TDigestStatistics.cpp deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index d59ffb5c8e9..8aa954f5eb5 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -1,19 +1,14 @@ #include -#include #include #include #include #include -#include -#include #include #include #include -#include #include -#include namespace DB { diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql index a608f18a354..61bae842631 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql @@ -27,6 +27,7 @@ ALTER TABLE t1 MATERIALIZE STATISTICS b, c; SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b > 0/*9990*/ and c < -98/*100*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b = 0/*1000*/ and c < -98/*100*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; @@ -69,6 +70,7 @@ ALTER TABLE t1 MATERIALIZE STATISTICS a, b, c; SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; From 84853f6d4b6e37160888f46ba61a8a1e70d7583a Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 4 Jul 2024 16:18:11 +0800 Subject: [PATCH 0277/2361] Fix style checking --- ...64_statistics_estimate_predicate.reference | 2 +- .../02864_statistics_estimate_predicate.sql | 64 ++++++++++--------- .../0_stateless/02864_statistics_uniq.sql | 0 3 files changed, 34 insertions(+), 32 deletions(-) delete mode 100644 tests/queries/0_stateless/02864_statistics_uniq.sql diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference index 7215d5fef58..7c22f308ab9 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference @@ -22,4 +22,4 @@ Test statistics multi-types: Prewhere filter Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) Test LowCardinality and Nullable data type: -t2 +tab2 diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql index 61bae842631..7fcb85d80f5 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql @@ -1,12 +1,14 @@ -- Tags: no-fasttest -DROP TABLE IF EXISTS t1 SYNC; +-- Tests statistics usages in prewhere optimization. + +DROP TABLE IF EXISTS tab SYNC; SET allow_experimental_statistics = 1; SET allow_statistics_optimize = 1; SET allow_suspicious_low_cardinality_types=1; SET mutations_sync = 2; -CREATE TABLE t1 +CREATE TABLE tab ( a String, b UInt64, @@ -15,75 +17,75 @@ CREATE TABLE t1 ) Engine = MergeTree() ORDER BY pk SETTINGS min_bytes_for_wide_part = 0; -SHOW CREATE TABLE t1; +SHOW CREATE TABLE tab; -INSERT INTO t1 select toString(number % 10000), number % 1000, -(number % 100), generateUUIDv4() FROM system.numbers LIMIT 10000; +INSERT INTO tab select toString(number % 10000), number % 1000, -(number % 100), generateUUIDv4() FROM system.numbers LIMIT 10000; SELECT 'Test statistics TDigest:'; -ALTER TABLE t1 ADD STATISTICS b, c TYPE tdigest; -ALTER TABLE t1 MATERIALIZE STATISTICS b, c; +ALTER TABLE tab ADD STATISTICS b, c TYPE tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS b, c; SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b > 0/*9990*/ and c < -98/*100*/) +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b > 0/*9990*/ and c < -98/*100*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b = 0/*1000*/ and c < -98/*100*/) +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b = 0/*1000*/ and c < -98/*100*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -ALTER TABLE t1 DROP STATISTICS b, c; +ALTER TABLE tab DROP STATISTICS b, c; SELECT 'Test statistics Uniq:'; -ALTER TABLE t1 ADD STATISTICS b TYPE uniq, tdigest; -ALTER TABLE t1 MATERIALIZE STATISTICS b; +ALTER TABLE tab ADD STATISTICS b TYPE uniq, tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS b; SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0/*1000*/ and b = 0/*10*/) +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*1000*/ and b = 0/*10*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -ALTER TABLE t1 DROP STATISTICS b; +ALTER TABLE tab DROP STATISTICS b; SELECT 'Test statistics count_min:'; -ALTER TABLE t1 ADD STATISTICS a TYPE count_min; -ALTER TABLE t1 ADD STATISTICS b TYPE count_min; -ALTER TABLE t1 ADD STATISTICS c TYPE count_min; -ALTER TABLE t1 MATERIALIZE STATISTICS a, b, c; +ALTER TABLE tab ADD STATISTICS a TYPE count_min; +ALTER TABLE tab ADD STATISTICS b TYPE count_min; +ALTER TABLE tab ADD STATISTICS c TYPE count_min; +ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -ALTER TABLE t1 DROP STATISTICS a, b, c; +ALTER TABLE tab DROP STATISTICS a, b, c; SELECT 'Test statistics multi-types:'; -ALTER TABLE t1 ADD STATISTICS a TYPE count_min; -ALTER TABLE t1 ADD STATISTICS b TYPE count_min, uniq, tdigest; -ALTER TABLE t1 ADD STATISTICS c TYPE count_min, uniq, tdigest; -ALTER TABLE t1 MATERIALIZE STATISTICS a, b, c; +ALTER TABLE tab ADD STATISTICS a TYPE count_min; +ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; +ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -ALTER TABLE t1 DROP STATISTICS a, b, c; +ALTER TABLE tab DROP STATISTICS a, b, c; -DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS tab SYNC; SELECT 'Test LowCardinality and Nullable data type:'; -DROP TABLE IF EXISTS t2 SYNC; +DROP TABLE IF EXISTS tab2 SYNC; SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t2 +CREATE TABLE tab2 ( a LowCardinality(Int64) STATISTICS(uniq, tdigest, count_min), b Nullable(Int64) STATISTICS(uniq, tdigest, count_min), @@ -91,6 +93,6 @@ CREATE TABLE t2 pk String, ) Engine = MergeTree() ORDER BY pk; -select table from system.tables where name = 't2'; +select name from system.tables where name = 'tab2' and database = currentDatabase(); -DROP TABLE IF EXISTS t2 SYNC; +DROP TABLE IF EXISTS tab2 SYNC; diff --git a/tests/queries/0_stateless/02864_statistics_uniq.sql b/tests/queries/0_stateless/02864_statistics_uniq.sql deleted file mode 100644 index e69de29bb2d..00000000000 From a99c803ddf137e91f43c8f26f549f74f71eab102 Mon Sep 17 00:00:00 2001 From: morning-color Date: Thu, 4 Jul 2024 17:26:06 +0800 Subject: [PATCH 0278/2361] Rename rows_before_group_by_at_least to rows_before_aggregation_at_least --- src/Client/ClientBase.cpp | 4 +- src/Core/Settings.h | 2 +- src/Formats/JSONUtils.cpp | 9 +- src/Formats/JSONUtils.h | 2 + src/Processors/Formats/IOutputFormat.cpp | 4 +- src/Processors/Formats/IOutputFormat.h | 15 +- ...ONColumnsWithMetadataBlockOutputFormat.cpp | 2 + ...JSONColumnsWithMetadataBlockOutputFormat.h | 5 + .../Formats/Impl/JSONRowOutputFormat.cpp | 2 + .../Formats/Impl/JSONRowOutputFormat.h | 5 + .../Impl/ParallelFormattingOutputFormat.h | 6 + .../Impl/TemplateBlockOutputFormat.cpp | 17 +- .../Formats/Impl/TemplateBlockOutputFormat.h | 8 +- .../Formats/Impl/XMLRowOutputFormat.cpp | 11 + .../Formats/Impl/XMLRowOutputFormat.h | 6 + src/Processors/Formats/LazyOutputFormat.cpp | 4 +- src/Processors/Formats/LazyOutputFormat.h | 2 +- .../Formats/PullingOutputFormat.cpp | 4 +- src/Processors/Formats/PullingOutputFormat.h | 2 +- src/Processors/IProcessor.h | 6 +- src/Processors/RowsBeforeLimitCounter.h | 2 +- src/Processors/Sources/DelayedSource.cpp | 6 + src/Processors/Sources/DelayedSource.h | 4 +- src/Processors/Sources/RemoteSource.cpp | 12 +- src/Processors/Sources/RemoteSource.h | 8 +- .../Transforms/AggregatingTransform.cpp | 4 +- .../Transforms/AggregatingTransform.h | 7 +- src/QueryPipeline/ProfileInfo.cpp | 20 +- src/QueryPipeline/ProfileInfo.h | 16 +- src/QueryPipeline/QueryPipeline.cpp | 21 +- src/Server/GRPCServer.cpp | 3 +- src/Server/grpc_protos/clickhouse_grpc.proto | 4 +- ...74_exact_rows_before_aggregation.reference | 355 ++++++++++++++++++ .../03174_exact_rows_before_aggregation.sql | 31 ++ 34 files changed, 538 insertions(+), 71 deletions(-) create mode 100644 tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference create mode 100644 tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index dbb67d230d5..4b82f30776f 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -522,8 +522,8 @@ void ClientBase::onProfileInfo(const ProfileInfo & profile_info) { if (profile_info.hasAppliedLimit() && output_format) output_format->setRowsBeforeLimit(profile_info.getRowsBeforeLimit()); - if (profile_info.hasAppliedGroupBy() && output_format) - output_format->setRowsBeforeGroupBy(profile_info.getRowsBeforeGroupBy()); + if (profile_info.hasAppliedAggregation() && output_format) + output_format->setRowsBeforeAggregation(profile_info.getRowsBeforeAggregation()); } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 09291d4300d..2296a880bd6 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1200,7 +1200,7 @@ class IColumn; M(Bool, insert_distributed_one_random_shard, false, "If setting is enabled, inserting into distributed table will choose a random shard to write when there is no sharding key", 0) \ \ M(Bool, exact_rows_before_limit, false, "When enabled, ClickHouse will provide exact value for rows_before_limit_at_least statistic, but with the cost that the data before limit will have to be read completely", 0) \ - M(Bool, exact_rows_before_group_by, false, "When enabled, ClickHouse will provide exact value for rows_before_group_by_at_least statistic, but with the cost that the data before group by will have to be read completely", 0) \ + M(Bool, rows_before_aggregation, false, "When enabled, ClickHouse will provide exact value for rows_before_aggregation_at_least statistic, represents the number of rows read before aggregation", 0) \ M(UInt64, cross_to_inner_join_rewrite, 1, "Use inner join instead of comma/cross join if there are joining expressions in the WHERE section. Values: 0 - no rewrite, 1 - apply if possible for comma/cross, 2 - force rewrite all comma joins, cross - if possible", 0) \ \ M(Bool, output_format_arrow_low_cardinality_as_dictionary, false, "Enable output LowCardinality type as Dictionary Arrow type", 0) \ diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index f0985f4a6b7..363e9344770 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -487,6 +487,8 @@ namespace JSONUtils size_t rows, size_t rows_before_limit, bool applied_limit, + size_t rows_before_aggregation, + bool applied_aggregation, const Stopwatch & watch, const Progress & progress, bool write_statistics, @@ -502,7 +504,12 @@ namespace JSONUtils writeTitle("rows_before_limit_at_least", out, 1, " "); writeIntText(rows_before_limit, out); } - + if (applied_aggregation) + { + writeFieldDelimiter(out, 2); + writeTitle("rows_before_aggregation_at_least", out, 1, " "); + writeIntText(rows_before_aggregation, out); + } if (write_statistics) { writeFieldDelimiter(out, 2); diff --git a/src/Formats/JSONUtils.h b/src/Formats/JSONUtils.h index 7ee111c1285..e2ac3467971 100644 --- a/src/Formats/JSONUtils.h +++ b/src/Formats/JSONUtils.h @@ -104,6 +104,8 @@ namespace JSONUtils size_t rows, size_t rows_before_limit, bool applied_limit, + size_t rows_before_aggregation, + bool applied_aggregation, const Stopwatch & watch, const Progress & progress, bool write_statistics, diff --git a/src/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp index 4191bf9f0fe..7eaecfab3dc 100644 --- a/src/Processors/Formats/IOutputFormat.cpp +++ b/src/Processors/Formats/IOutputFormat.cpp @@ -71,8 +71,8 @@ void IOutputFormat::work() { if (rows_before_limit_counter && rows_before_limit_counter->hasAppliedLimit()) setRowsBeforeLimit(rows_before_limit_counter->get()); - if (rows_before_group_by_counter && rows_before_group_by_counter->hasAppliedLimit()) - setRowsBeforeGroupBy(rows_before_group_by_counter->get()); + if (rows_before_aggregation_counter && rows_before_aggregation_counter->hasAppliedLimit()) + setRowsBeforeAggregation(rows_before_aggregation_counter->get()); finalize(); if (auto_flush) flush(); diff --git a/src/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h index 9bb7cccb612..1119797e7ff 100644 --- a/src/Processors/Formats/IOutputFormat.h +++ b/src/Processors/Formats/IOutputFormat.h @@ -36,16 +36,16 @@ public: void setAutoFlush() { auto_flush = true; } /// Value for rows_before_limit_at_least field. - virtual void setRowsBeforeLimit(size_t /*rows_before_limit*/) {} + virtual void setRowsBeforeLimit(size_t /*rows*/) { } /// Counter to calculate rows_before_limit_at_least in processors pipeline. void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit_counter.swap(counter); } - /// Value for rows_before_group_by_at_least field. - virtual void setRowsBeforeGroupBy(size_t /*rows_before_limit*/) { } + /// Value for rows_before_aggregation_at_least field. + virtual void setRowsBeforeAggregation(size_t /*rows*/) { } - /// Counter to calculate rows_before_group_by_at_least in processors pipeline. - void setRowsBeforeGroupByCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_group_by_counter.swap(counter); } + /// Counter to calculate rows_before_aggregation_at_least in processors pipeline. + void setRowsBeforeAggregationCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_aggregation_counter.swap(counter); } /// Notify about progress. Method could be called from different threads. /// Passed value are delta, that must be summarized. @@ -157,7 +157,8 @@ protected: Progress progress; bool applied_limit = false; size_t rows_before_limit = 0; - size_t rows_before_group_by = 0; + bool applied_aggregation = false; + size_t rows_before_aggregation = 0; Chunk totals; Chunk extremes; }; @@ -192,7 +193,7 @@ protected: bool need_write_suffix = true; RowsBeforeLimitCounterPtr rows_before_limit_counter; - RowsBeforeGroupByCounterPtr rows_before_group_by_counter; + RowsBeforeAggregationCounterPtr rows_before_aggregation_counter; Statistics statistics; private: diff --git a/src/Processors/Formats/Impl/JSONColumnsWithMetadataBlockOutputFormat.cpp b/src/Processors/Formats/Impl/JSONColumnsWithMetadataBlockOutputFormat.cpp index 1e8f57aa9a6..2f285e3d202 100644 --- a/src/Processors/Formats/Impl/JSONColumnsWithMetadataBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONColumnsWithMetadataBlockOutputFormat.cpp @@ -81,6 +81,8 @@ void JSONColumnsWithMetadataBlockOutputFormat::finalizeImpl() rows, statistics.rows_before_limit, statistics.applied_limit, + statistics.rows_before_aggregation, + statistics.applied_aggregation, statistics.watch, statistics.progress, format_settings.write_statistics, diff --git a/src/Processors/Formats/Impl/JSONColumnsWithMetadataBlockOutputFormat.h b/src/Processors/Formats/Impl/JSONColumnsWithMetadataBlockOutputFormat.h index c72b4d87234..e5208440483 100644 --- a/src/Processors/Formats/Impl/JSONColumnsWithMetadataBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONColumnsWithMetadataBlockOutputFormat.h @@ -44,6 +44,11 @@ public: String getName() const override { return "JSONCompactColumnsBlockOutputFormat"; } void setRowsBeforeLimit(size_t rows_before_limit_) override { statistics.rows_before_limit = rows_before_limit_; statistics.applied_limit = true; } + void setRowsBeforeAggregation(size_t rows_before_aggregation_) override + { + statistics.rows_before_aggregation = rows_before_aggregation_; + statistics.applied_aggregation = true; + } void onProgress(const Progress & progress_) override { statistics.progress.incrementPiecewiseAtomically(progress_); } protected: diff --git a/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp index 20182d84917..fec24b10c11 100644 --- a/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONRowOutputFormat.cpp @@ -116,6 +116,8 @@ void JSONRowOutputFormat::finalizeImpl() row_count, statistics.rows_before_limit, statistics.applied_limit, + statistics.rows_before_aggregation, + statistics.applied_aggregation, statistics.watch, statistics.progress, settings.write_statistics && exception_message.empty(), diff --git a/src/Processors/Formats/Impl/JSONRowOutputFormat.h b/src/Processors/Formats/Impl/JSONRowOutputFormat.h index a38cd0e8db9..c36adb5ee3e 100644 --- a/src/Processors/Formats/Impl/JSONRowOutputFormat.h +++ b/src/Processors/Formats/Impl/JSONRowOutputFormat.h @@ -35,6 +35,11 @@ public: statistics.applied_limit = true; statistics.rows_before_limit = rows_before_limit_; } + void setRowsBeforeAggregation(size_t rows_before_aggregation_) override + { + statistics.applied_aggregation = true; + statistics.rows_before_aggregation = rows_before_aggregation_; + } protected: void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index 341141dd633..66f8701161e 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -313,6 +313,12 @@ private: statistics.rows_before_limit = rows_before_limit; statistics.applied_limit = true; } + void setRowsBeforeAggregation(size_t rows_before_aggregation) override + { + std::lock_guard lock(statistics_mutex); + statistics.rows_before_aggregation = rows_before_aggregation; + statistics.applied_aggregation = true; + } }; } diff --git a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp index 1c43a0fa331..4bd6684dff4 100644 --- a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp @@ -42,9 +42,11 @@ TemplateBlockOutputFormat::TemplateBlockOutputFormat(const Block & header_, Writ case static_cast(ResultsetPart::TimeElapsed): case static_cast(ResultsetPart::RowsRead): case static_cast(ResultsetPart::BytesRead): + case static_cast(ResultsetPart::RowsBeforeAggregation): if (format.escaping_rules[i] == EscapingRule::None) - format.throwInvalidFormat("Serialization type for output part rows, rows_before_limit, time, " - "rows_read or bytes_read is not specified", i); + format.throwInvalidFormat( + "Serialization type for output part rows, rows, time, " + "rows_read or bytes_read is not specified", i); break; default: format.throwInvalidFormat("Invalid output part", i); @@ -80,7 +82,7 @@ TemplateBlockOutputFormat::ResultsetPart TemplateBlockOutputFormat::stringToResu return ResultsetPart::ExtremesMax; else if (part == "rows") return ResultsetPart::Rows; - else if (part == "rows_before_limit") + else if (part == "rows") return ResultsetPart::RowsBeforeLimit; else if (part == "time") return ResultsetPart::TimeElapsed; @@ -88,6 +90,8 @@ TemplateBlockOutputFormat::ResultsetPart TemplateBlockOutputFormat::stringToResu return ResultsetPart::RowsRead; else if (part == "bytes_read") return ResultsetPart::BytesRead; + else if (part == "rows_before_aggregation") + return ResultsetPart::RowsBeforeAggregation; else throw Exception(ErrorCodes::SYNTAX_ERROR, "Unknown output part {}", part); } @@ -161,7 +165,7 @@ void TemplateBlockOutputFormat::finalizeImpl() break; case ResultsetPart::RowsBeforeLimit: if (!statistics.applied_limit) - format.throwInvalidFormat("Cannot print rows_before_limit for this request", i); + format.throwInvalidFormat("Cannot print rows for this request", i); writeValue(statistics.rows_before_limit, format.escaping_rules[i]); break; case ResultsetPart::TimeElapsed: @@ -173,6 +177,11 @@ void TemplateBlockOutputFormat::finalizeImpl() case ResultsetPart::BytesRead: writeValue(statistics.progress.read_bytes.load(), format.escaping_rules[i]); break; + case ResultsetPart::RowsBeforeAggregation: + if (!statistics.applied_aggregation) + format.throwInvalidFormat("Cannot print rows_before_aggregation for this request", i); + writeValue(statistics.rows_before_aggregation, format.escaping_rules[i]); + break; default: break; } diff --git a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h index 53d98849482..5e88d79b4a8 100644 --- a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.h @@ -21,6 +21,11 @@ public: String getName() const override { return "TemplateBlockOutputFormat"; } void setRowsBeforeLimit(size_t rows_before_limit_) override { statistics.rows_before_limit = rows_before_limit_; statistics.applied_limit = true; } + void setRowsBeforeAggregation(size_t rows_before_aggregation_) override + { + statistics.rows_before_aggregation = rows_before_aggregation_; + statistics.applied_aggregation = true; + } void onProgress(const Progress & progress_) override { statistics.progress.incrementPiecewiseAtomically(progress_); } enum class ResultsetPart : size_t @@ -33,7 +38,8 @@ public: RowsBeforeLimit, TimeElapsed, RowsRead, - BytesRead + BytesRead, + RowsBeforeAggregation }; static ResultsetPart stringToResultsetPart(const String & part); diff --git a/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp b/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp index 52c161c3208..2fd0536ed02 100644 --- a/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp @@ -191,6 +191,7 @@ void XMLRowOutputFormat::finalizeImpl() writeRowsBeforeLimitAtLeast(); + writeRowsBeforeAggregationAtLeast(); if (!exception_message.empty()) writeException(); @@ -219,6 +220,16 @@ void XMLRowOutputFormat::writeRowsBeforeLimitAtLeast() } } +void XMLRowOutputFormat::writeRowsBeforeAggregationAtLeast() +{ + if (statistics.applied_aggregation) + { + writeCString("\t", *ostr); + writeIntText(statistics.rows_before_aggregation, *ostr); + writeCString("\n", *ostr); + } +} + void XMLRowOutputFormat::writeStatistics() { writeCString("\t\n", *ostr); diff --git a/src/Processors/Formats/Impl/XMLRowOutputFormat.h b/src/Processors/Formats/Impl/XMLRowOutputFormat.h index daf03539d0b..792acd118c8 100644 --- a/src/Processors/Formats/Impl/XMLRowOutputFormat.h +++ b/src/Processors/Formats/Impl/XMLRowOutputFormat.h @@ -48,6 +48,11 @@ private: statistics.rows_before_limit = rows_before_limit_; } + void setRowsBeforeAggregation(size_t rows_before_aggregation_) override + { + statistics.applied_aggregation = true; + statistics.rows_before_aggregation = rows_before_aggregation_; + } void onRowsReadBeforeUpdate() override { row_count = getRowsReadBefore(); } void onProgress(const Progress & value) override; @@ -56,6 +61,7 @@ private: void writeExtremesElement(const char * title, const Columns & columns, size_t row_num); void writeRowsBeforeLimitAtLeast(); + void writeRowsBeforeAggregationAtLeast(); void writeStatistics(); void writeException(); diff --git a/src/Processors/Formats/LazyOutputFormat.cpp b/src/Processors/Formats/LazyOutputFormat.cpp index 63423628e57..dc099765870 100644 --- a/src/Processors/Formats/LazyOutputFormat.cpp +++ b/src/Processors/Formats/LazyOutputFormat.cpp @@ -45,8 +45,8 @@ void LazyOutputFormat::setRowsBeforeLimit(size_t rows_before_limit) info.setRowsBeforeLimit(rows_before_limit); } -void LazyOutputFormat::setRowsBeforeGroupBy(size_t rows_before_group_by) +void LazyOutputFormat::setRowsBeforeAggregation(size_t rows_before_aggregation) { - info.setRowsBeforeGroupBy(rows_before_group_by); + info.setRowsBeforeAggregation(rows_before_aggregation); } } diff --git a/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h index 83abb2ff1a1..b0936e3d258 100644 --- a/src/Processors/Formats/LazyOutputFormat.h +++ b/src/Processors/Formats/LazyOutputFormat.h @@ -28,7 +28,7 @@ public: ProfileInfo & getProfileInfo() { return info; } void setRowsBeforeLimit(size_t rows_before_limit) override; - void setRowsBeforeGroupBy(size_t rows_before_group_by) override; + void setRowsBeforeAggregation(size_t rows_before_aggregation) override; void onCancel() override { diff --git a/src/Processors/Formats/PullingOutputFormat.cpp b/src/Processors/Formats/PullingOutputFormat.cpp index 646755deb6b..37050fb9675 100644 --- a/src/Processors/Formats/PullingOutputFormat.cpp +++ b/src/Processors/Formats/PullingOutputFormat.cpp @@ -42,8 +42,8 @@ void PullingOutputFormat::setRowsBeforeLimit(size_t rows_before_limit) { info.setRowsBeforeLimit(rows_before_limit); } -void PullingOutputFormat::setRowsBeforeGroupBy(size_t rows_before_group_by) +void PullingOutputFormat::setRowsBeforeAggregation(size_t rows_before_aggregation) { - info.setRowsBeforeGroupBy(rows_before_group_by); + info.setRowsBeforeAggregation(rows_before_aggregation); } } diff --git a/src/Processors/Formats/PullingOutputFormat.h b/src/Processors/Formats/PullingOutputFormat.h index c4d8cf4aab2..f2546cca180 100644 --- a/src/Processors/Formats/PullingOutputFormat.h +++ b/src/Processors/Formats/PullingOutputFormat.h @@ -22,7 +22,7 @@ public: ProfileInfo & getProfileInfo() { return info; } void setRowsBeforeLimit(size_t rows_before_limit) override; - void setRowsBeforeGroupBy(size_t rows_before_group_by) override; + void setRowsBeforeAggregation(size_t rows_before_aggregation) override; bool expectMaterializedColumns() const override { return false; } diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 0df4b3168e3..a06958fed73 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -24,7 +24,7 @@ using StorageLimitsList = std::list; class RowsBeforeLimitCounter; using RowsBeforeLimitCounterPtr = std::shared_ptr; -using RowsBeforeGroupByCounterPtr = std::shared_ptr; +using RowsBeforeAggregationCounterPtr = std::shared_ptr; class IProcessor; using ProcessorPtr = std::shared_ptr; @@ -368,9 +368,9 @@ public: /// This counter is used to calculate the number of rows right before any filtration of LimitTransform. virtual void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr /* counter */) {} - /// Set rows_before_group_by counter for current processor. + /// Set rows_before_aggregation counter for current processor. /// This counter is used to calculate the number of rows right before AggregatingTransform. - virtual void setRowsBeforeGroupByCounter(RowsBeforeGroupByCounterPtr /* counter */) { } + virtual void setRowsBeforeAggregationCounter(RowsBeforeAggregationCounterPtr /* counter */) { } protected: virtual void onCancel() {} diff --git a/src/Processors/RowsBeforeLimitCounter.h b/src/Processors/RowsBeforeLimitCounter.h index f5eb40ff84a..5aa867ffc43 100644 --- a/src/Processors/RowsBeforeLimitCounter.h +++ b/src/Processors/RowsBeforeLimitCounter.h @@ -5,7 +5,7 @@ namespace DB { -/// This class helps to calculate rows_before_limit_at_least. +/// This class helps to calculate rows_before_limit_at_least and rows_before_aggregation_at_least. class RowsBeforeLimitCounter { public: diff --git a/src/Processors/Sources/DelayedSource.cpp b/src/Processors/Sources/DelayedSource.cpp index f7928f89015..788017e3df0 100644 --- a/src/Processors/Sources/DelayedSource.cpp +++ b/src/Processors/Sources/DelayedSource.cpp @@ -139,6 +139,12 @@ void DelayedSource::work() processor->setRowsBeforeLimitCounter(rows_before_limit); } + if (rows_before_aggregation) + { + for (auto & processor : processors) + processor->setRowsBeforeAggregationCounter(rows_before_aggregation); + } + synchronizePorts(totals_output, totals, header, processors); synchronizePorts(extremes_output, extremes, header, processors); } diff --git a/src/Processors/Sources/DelayedSource.h b/src/Processors/Sources/DelayedSource.h index bd100f29a47..3138a1ab42a 100644 --- a/src/Processors/Sources/DelayedSource.h +++ b/src/Processors/Sources/DelayedSource.h @@ -31,14 +31,14 @@ public: OutputPort * getExtremesPort() { return extremes; } void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit.swap(counter); } - void setRowsBeforeGroupByCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_group_by.swap(counter); } + void setRowsBeforeAggregationCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_aggregation.swap(counter); } private: QueryPlanResourceHolder resources; Creator creator; Processors processors; RowsBeforeLimitCounterPtr rows_before_limit; - RowsBeforeLimitCounterPtr rows_before_group_by; + RowsBeforeLimitCounterPtr rows_before_aggregation; /// Outputs for DelayedSource. OutputPort * main = nullptr; diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index a78db630786..9a6fe239ee6 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -46,12 +46,12 @@ RemoteSource::RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation manually_add_rows_before_limit_counter = true; /// Remote subquery doesn't contain a limit } - if (rows_before_group_by) + if (rows_before_aggregation) { - if (info.hasAppliedGroupBy()) - rows_before_group_by->add(info.getRowsBeforeGroupBy()); + if (info.hasAppliedAggregation()) + rows_before_aggregation->add(info.getRowsBeforeAggregation()); else - manually_add_rows_before_group_by_counter = true; /// Remote subquery doesn't contain a group by + manually_add_rows_before_aggregation_counter = true; /// Remote subquery doesn't contain a group by } }); } @@ -171,8 +171,8 @@ std::optional RemoteSource::tryGenerate() { if (manually_add_rows_before_limit_counter) rows_before_limit->add(rows); - if (manually_add_rows_before_group_by_counter) - rows_before_group_by->add(rows); + if (manually_add_rows_before_aggregation_counter) + rows_before_aggregation->add(rows); query_executor->finish(); return {}; } diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index b2ea6d50e01..c2f4d6842bb 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -3,8 +3,8 @@ #include #include #include -#include +#include namespace DB { @@ -26,7 +26,7 @@ public: String getName() const override { return "Remote"; } void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit.swap(counter); } - void setRowsBeforeGroupByCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_group_by.swap(counter); } + void setRowsBeforeAggregationCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_aggregation.swap(counter); } /// Stop reading from stream if output port is finished. void onUpdatePorts() override; @@ -46,7 +46,7 @@ private: bool add_aggregation_info = false; RemoteQueryExecutorPtr query_executor; RowsBeforeLimitCounterPtr rows_before_limit; - RowsBeforeLimitCounterPtr rows_before_group_by; + RowsBeforeLimitCounterPtr rows_before_aggregation; const bool async_read; const bool async_query_sending; @@ -54,7 +54,7 @@ private: int fd = -1; size_t rows = 0; bool manually_add_rows_before_limit_counter = false; - bool manually_add_rows_before_group_by_counter = false; + bool manually_add_rows_before_aggregation_counter = false; }; /// Totals source from RemoteQueryExecutor. diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 7c0e222f89b..684de0a3e8c 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -683,8 +683,8 @@ void AggregatingTransform::consume(Chunk chunk) LOG_TRACE(log, "Aggregating"); is_consume_started = true; } - if (rows_before_group_by_at_least) - rows_before_group_by_at_least->add(num_rows); + if (rows_before_aggregation_at_least) + rows_before_aggregation_at_least->add(num_rows); src_rows += num_rows; src_bytes += chunk.bytes(); diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index 6e7b04f9191..53939ea6a99 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -169,7 +169,10 @@ public: Status prepare() override; void work() override; Processors expandPipeline() override; - void setRowsBeforeGroupByCounter(RowsBeforeGroupByCounterPtr counter) override { rows_before_group_by_at_least.swap(counter); } + void setRowsBeforeAggregationCounter(RowsBeforeAggregationCounterPtr counter) override + { + rows_before_aggregation_at_least.swap(counter); + } protected: void consume(Chunk chunk); @@ -213,7 +216,7 @@ private: bool is_consume_started = false; - RowsBeforeGroupByCounterPtr rows_before_group_by_at_least; + RowsBeforeAggregationCounterPtr rows_before_aggregation_at_least; void initGenerate(); }; diff --git a/src/QueryPipeline/ProfileInfo.cpp b/src/QueryPipeline/ProfileInfo.cpp index cec179ecfad..87729b7c90e 100644 --- a/src/QueryPipeline/ProfileInfo.cpp +++ b/src/QueryPipeline/ProfileInfo.cpp @@ -16,8 +16,8 @@ void ProfileInfo::read(ReadBuffer & in) readBinary(applied_limit, in); readVarUInt(rows_before_limit, in); readBinary(calculated_rows_before_limit, in); - readBinary(applied_group_by, in); - readVarUInt(rows_before_group_by, in); + readBinary(applied_aggregation, in); + readVarUInt(rows_before_aggregation, in); } @@ -29,8 +29,8 @@ void ProfileInfo::write(WriteBuffer & out) const writeBinary(hasAppliedLimit(), out); writeVarUInt(getRowsBeforeLimit(), out); writeBinary(calculated_rows_before_limit, out); - writeBinary(hasAppliedGroupBy(), out); - writeVarUInt(getRowsBeforeGroupBy(), out); + writeBinary(hasAppliedAggregation(), out); + writeVarUInt(getRowsBeforeAggregation(), out); } @@ -45,8 +45,8 @@ void ProfileInfo::setFrom(const ProfileInfo & rhs, bool skip_block_size_info) applied_limit = rhs.applied_limit; rows_before_limit = rhs.rows_before_limit; calculated_rows_before_limit = rhs.calculated_rows_before_limit; - applied_group_by = rhs.applied_group_by; - rows_before_group_by = rhs.rows_before_group_by; + applied_aggregation = rhs.applied_aggregation; + rows_before_aggregation = rhs.rows_before_aggregation; } @@ -63,15 +63,15 @@ bool ProfileInfo::hasAppliedLimit() const return applied_limit; } -size_t ProfileInfo::getRowsBeforeGroupBy() const +size_t ProfileInfo::getRowsBeforeAggregation() const { - return rows_before_group_by; + return rows_before_aggregation; } -bool ProfileInfo::hasAppliedGroupBy() const +bool ProfileInfo::hasAppliedAggregation() const { - return applied_group_by; + return applied_aggregation; } diff --git a/src/QueryPipeline/ProfileInfo.h b/src/QueryPipeline/ProfileInfo.h index 141adc7430d..e2467afd6f4 100644 --- a/src/QueryPipeline/ProfileInfo.h +++ b/src/QueryPipeline/ProfileInfo.h @@ -32,8 +32,8 @@ struct ProfileInfo size_t getRowsBeforeLimit() const; bool hasAppliedLimit() const; - size_t getRowsBeforeGroupBy() const; - bool hasAppliedGroupBy() const; + size_t getRowsBeforeAggregation() const; + bool hasAppliedAggregation() const; void update(Block & block); void update(size_t num_rows, size_t num_bytes); @@ -55,20 +55,20 @@ struct ProfileInfo } /// Only for Processors. - void setRowsBeforeGroupBy(size_t rows_before_group_by_) + void setRowsBeforeAggregation(size_t rows_before_aggregation_) { - applied_group_by = true; - rows_before_group_by = rows_before_group_by_; + applied_aggregation = true; + rows_before_aggregation = rows_before_aggregation_; } private: /// For these fields we make accessors, because they must be calculated beforehand. mutable bool applied_limit = false; /// Whether LIMIT was applied mutable size_t rows_before_limit = 0; - mutable bool calculated_rows_before_limit = false; /// Whether the field rows_before_limit was calculated + mutable bool calculated_rows_before_limit = false; /// Whether the field rows was calculated - mutable bool applied_group_by = false; /// Whether GROUP BY was applied - mutable size_t rows_before_group_by = 0; + mutable bool applied_aggregation = false; /// Whether GROUP BY was applied + mutable size_t rows_before_aggregation = 0; }; } diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index 5e0885ed4e8..0e3eec21d50 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -274,18 +274,20 @@ static void initRowsBeforeLimit(IOutputFormat * output_format) output_format->setRowsBeforeLimitCounter(rows_before_limit_at_least); } } -static void initRowsBeforeGroupBy(std::shared_ptr processors, IOutputFormat * output_format) +static void initRowsBeforeAggregation(std::shared_ptr processors, IOutputFormat * output_format) { if (!processors->empty()) { - RowsBeforeGroupByCounterPtr rows_before_group_by_at_least = std::make_shared(); + RowsBeforeAggregationCounterPtr rows_before_aggregation_at_least = std::make_shared(); for (auto & processor : *processors) { if (auto transform = std::dynamic_pointer_cast(processor)) - transform->setRowsBeforeGroupByCounter(rows_before_group_by_at_least); + transform->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); + if (auto remote = std::dynamic_pointer_cast(processor)) + remote->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); } - rows_before_group_by_at_least->add(0); - output_format->setRowsBeforeLimitCounter(rows_before_group_by_at_least); + rows_before_aggregation_at_least->add(0); + output_format->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); } } @@ -535,7 +537,14 @@ void QueryPipeline::complete(std::shared_ptr format) extremes = nullptr; initRowsBeforeLimit(format.get()); - initRowsBeforeGroupBy(processors, format.get()); + for (const auto context : resources.interpreter_context) + { + if (context->getSettingsRef().rows_before_aggregation) + { + initRowsBeforeAggregation(processors, format.get()); + break; + } + } output_format = format.get(); processors->emplace_back(std::move(format)); diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index 37e4342f3b0..9651ce8f660 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -1577,7 +1577,8 @@ namespace stats.set_allocated_bytes(info.bytes); stats.set_applied_limit(info.hasAppliedLimit()); stats.set_rows_before_limit(info.getRowsBeforeLimit()); - stats.set_rows_before_group_by(info.getRowsBeforeGroupBy()); + stats.set_applied_aggregation(info.hasAppliedAggregation()); + stats.set_rows_before_aggregation(info.getRowsBeforeAggregation()); } void Call::addLogsToResult() diff --git a/src/Server/grpc_protos/clickhouse_grpc.proto b/src/Server/grpc_protos/clickhouse_grpc.proto index 02b6988b8c0..2ada4e8a641 100644 --- a/src/Server/grpc_protos/clickhouse_grpc.proto +++ b/src/Server/grpc_protos/clickhouse_grpc.proto @@ -179,8 +179,8 @@ message Stats { uint64 allocated_bytes = 3; bool applied_limit = 4; uint64 rows_before_limit = 5; - bool applied_group_by = 6; - uint64 rows_before_group_by = 7; + bool applied_aggregation = 6; + uint64 rows_before_aggregation = 7; } message Exception { diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference new file mode 100644 index 00000000000..8ad00273bbf --- /dev/null +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference @@ -0,0 +1,355 @@ +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9] + ], + + "rows": 10, + + "rows_before_aggregation_at_least": 10 +} + + + + + + i + Int32 + + + + + + 0 + + + 1 + + + 2 + + + 3 + + + 4 + + + 5 + + + 6 + + + 7 + + + 8 + + + 9 + + + 10 + 10 + +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [10], + [11], + [12] + ], + + "rows": 3, + + "rows_before_aggregation_at_least": 3 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0] + ], + + "rows": 1, + + "rows_before_limit_at_least": 20, + + "rows_before_aggregation_at_least": 20 +} +{ + "meta": + [ + { + "name": "max(i)", + "type": "Int32" + } + ], + + "data": + [ + [19] + ], + + "rows": 1, + + "rows_before_aggregation_at_least": 20 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9], + [10], + [11], + [12], + [13], + [14], + [15], + [16], + [17], + [18], + [19], + [20], + [21], + [22], + [23], + [24], + [25], + [26], + [27], + [28], + [29] + ], + + "rows": 30, + + "rows_before_limit_at_least": 60, + + "rows_before_aggregation_at_least": 60 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9], + [10], + [11], + [12], + [13], + [14], + [15], + [16], + [17], + [18], + [19] + ], + + "rows": 20, + + "rows_before_limit_at_least": 40, + + "rows_before_aggregation_at_least": 40 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9], + [10], + [11], + [12], + [13], + [14], + [15], + [16], + [17], + [18], + [19], + [20], + [21], + [22], + [23], + [24], + [25], + [26], + [27], + [28], + [29] + ], + + "rows": 30, + + "rows_before_limit_at_least": 30, + + "rows_before_aggregation_at_least": 60 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9], + [10], + [11], + [12], + [13], + [14], + [15], + [16], + [17], + [18], + [19] + ], + + "rows": 20, + + "rows_before_limit_at_least": 20, + + "rows_before_aggregation_at_least": 40 +} +{ + "meta": + [ + { + "name": "max(i)", + "type": "Int32" + } + ], + + "data": + [ + [19] + ], + + "rows": 1, + + "rows_before_limit_at_least": 1, + + "rows_before_aggregation_at_least": 40 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [4], + [3], + [2], + [5], + [1], + [6], + [7], + [9], + [8] + ], + + "rows": 10, + + "rows_before_limit_at_least": 10, + + "rows_before_aggregation_at_least": 20 +} diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql new file mode 100644 index 00000000000..6a3759c11e8 --- /dev/null +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql @@ -0,0 +1,31 @@ +-- Tags: no-parallel, no-random-merge-tree-settings + +drop table if exists test; + +create table test (i int) engine MergeTree order by tuple(); + +insert into test select arrayJoin(range(10000)); + +set rows_before_aggregation = 1, output_format_write_statistics = 0, max_block_size = 100; + +select * from test where i < 10 group by i order by i FORMAT JSONCompact; +select * from test where i < 10 group by i order by i FORMAT XML; + +select * from test group by i having i in (10, 11, 12) order by i FORMAT JSONCompact; + +select * from test where i < 20 group by i order by i limit 1 FORMAT JSONCompact; + +select max(i) from test where i < 20 FORMAT JSONCompact; + +set prefer_localhost_replica = 0; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 group by i order by i FORMAT JSONCompact; + +set prefer_localhost_replica = 1; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 group by i order by i FORMAT JSONCompact; +select max(i) from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 FORMAT JSONCompact; + +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i FORMAT JSONCompact; + +drop table if exists test; \ No newline at end of file From 92ce1368b3aa2056e05a89ad013d7fa818f9888d Mon Sep 17 00:00:00 2001 From: morning-color Date: Thu, 4 Jul 2024 18:03:40 +0800 Subject: [PATCH 0279/2361] Rename RowsBeforeLimitCounter to RowsBeforeStepCounter --- src/Processors/Formats/IOutputFormat.cpp | 4 +-- src/Processors/Formats/IOutputFormat.h | 4 +-- src/Processors/IProcessor.h | 6 ++-- src/Processors/LimitTransform.h | 4 +-- src/Processors/OffsetTransform.h | 4 +-- src/Processors/RowsBeforeLimitCounter.h | 36 ------------------- src/Processors/RowsBeforeStepCounter.h | 36 +++++++++++++++++++ src/Processors/Sources/RemoteSource.h | 2 +- .../Transforms/AggregatingTransform.h | 2 +- .../Transforms/PartialSortingTransform.h | 4 +-- src/QueryPipeline/QueryPipeline.cpp | 4 +-- src/QueryPipeline/QueryPipelineBuilder.cpp | 2 +- ...74_exact_rows_before_aggregation.reference | 27 +++++++++++--- .../03174_exact_rows_before_aggregation.sql | 9 +++-- 14 files changed, 81 insertions(+), 63 deletions(-) delete mode 100644 src/Processors/RowsBeforeLimitCounter.h create mode 100644 src/Processors/RowsBeforeStepCounter.h diff --git a/src/Processors/Formats/IOutputFormat.cpp b/src/Processors/Formats/IOutputFormat.cpp index 7eaecfab3dc..97628778adb 100644 --- a/src/Processors/Formats/IOutputFormat.cpp +++ b/src/Processors/Formats/IOutputFormat.cpp @@ -69,9 +69,9 @@ void IOutputFormat::work() if (finished && !finalized) { - if (rows_before_limit_counter && rows_before_limit_counter->hasAppliedLimit()) + if (rows_before_limit_counter && rows_before_limit_counter->hasAppliedStep()) setRowsBeforeLimit(rows_before_limit_counter->get()); - if (rows_before_aggregation_counter && rows_before_aggregation_counter->hasAppliedLimit()) + if (rows_before_aggregation_counter && rows_before_aggregation_counter->hasAppliedStep()) setRowsBeforeAggregation(rows_before_aggregation_counter->get()); finalize(); if (auto_flush) diff --git a/src/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h index 1119797e7ff..721e02f1c1d 100644 --- a/src/Processors/Formats/IOutputFormat.h +++ b/src/Processors/Formats/IOutputFormat.h @@ -1,9 +1,9 @@ #pragma once #include -#include -#include #include +#include +#include #include namespace DB diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index a06958fed73..ccdd6308de5 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -21,10 +21,10 @@ class IQueryPlanStep; struct StorageLimits; using StorageLimitsList = std::list; -class RowsBeforeLimitCounter; -using RowsBeforeLimitCounterPtr = std::shared_ptr; +class RowsBeforeStepCounter; +using RowsBeforeLimitCounterPtr = std::shared_ptr; -using RowsBeforeAggregationCounterPtr = std::shared_ptr; +using RowsBeforeAggregationCounterPtr = std::shared_ptr; class IProcessor; using ProcessorPtr = std::shared_ptr; diff --git a/src/Processors/LimitTransform.h b/src/Processors/LimitTransform.h index 33ff968985f..515203f6829 100644 --- a/src/Processors/LimitTransform.h +++ b/src/Processors/LimitTransform.h @@ -1,8 +1,8 @@ #pragma once -#include -#include #include +#include +#include namespace DB { diff --git a/src/Processors/OffsetTransform.h b/src/Processors/OffsetTransform.h index 79a7d15fe0b..7ef16518540 100644 --- a/src/Processors/OffsetTransform.h +++ b/src/Processors/OffsetTransform.h @@ -1,8 +1,8 @@ #pragma once -#include -#include #include +#include +#include namespace DB { diff --git a/src/Processors/RowsBeforeLimitCounter.h b/src/Processors/RowsBeforeLimitCounter.h deleted file mode 100644 index 5aa867ffc43..00000000000 --- a/src/Processors/RowsBeforeLimitCounter.h +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once -#include -#include - -namespace DB -{ - -/// This class helps to calculate rows_before_limit_at_least and rows_before_aggregation_at_least. -class RowsBeforeLimitCounter -{ -public: - void add(uint64_t rows) - { - setAppliedLimit(); - rows_before_limit.fetch_add(rows, std::memory_order_release); - } - - void set(uint64_t rows) - { - setAppliedLimit(); - rows_before_limit.store(rows, std::memory_order_release); - } - - uint64_t get() const { return rows_before_limit.load(std::memory_order_acquire); } - - void setAppliedLimit() { has_applied_limit.store(true, std::memory_order_release); } - bool hasAppliedLimit() const { return has_applied_limit.load(std::memory_order_acquire); } - -private: - std::atomic rows_before_limit = 0; - std::atomic_bool has_applied_limit = false; -}; - -using RowsBeforeLimitCounterPtr = std::shared_ptr; - -} diff --git a/src/Processors/RowsBeforeStepCounter.h b/src/Processors/RowsBeforeStepCounter.h new file mode 100644 index 00000000000..d9912bfa076 --- /dev/null +++ b/src/Processors/RowsBeforeStepCounter.h @@ -0,0 +1,36 @@ +#pragma once +#include +#include + +namespace DB +{ + +/// This class helps to calculate rows_before_limit_at_least and rows_before_aggregation_at_least. +class RowsBeforeStepCounter +{ +public: + void add(uint64_t rows) + { + setAppliedStep(); + rows_before_step.fetch_add(rows, std::memory_order_release); + } + + void set(uint64_t rows) + { + setAppliedStep(); + rows_before_step.store(rows, std::memory_order_release); + } + + uint64_t get() const { return rows_before_step.load(std::memory_order_acquire); } + + void setAppliedStep() { has_applied_step.store(true, std::memory_order_release); } + bool hasAppliedStep() const { return has_applied_step.load(std::memory_order_acquire); } + +private: + std::atomic rows_before_step = 0; + std::atomic_bool has_applied_step = false; +}; + +using RowsBeforeLimitCounterPtr = std::shared_ptr; + +} diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index c2f4d6842bb..bbc563ec5fe 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index 53939ea6a99..9f9638175f0 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/Processors/Transforms/PartialSortingTransform.h b/src/Processors/Transforms/PartialSortingTransform.h index 8f25c93037f..abb4b290322 100644 --- a/src/Processors/Transforms/PartialSortingTransform.h +++ b/src/Processors/Transforms/PartialSortingTransform.h @@ -1,7 +1,7 @@ #pragma once -#include -#include #include +#include +#include #include namespace DB diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index 0e3eec21d50..acccbed2585 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -262,7 +262,7 @@ static void initRowsBeforeLimit(IOutputFormat * output_format) if (!processors.empty()) { - rows_before_limit_at_least = std::make_shared(); + rows_before_limit_at_least = std::make_shared(); for (auto & processor : processors) processor->setRowsBeforeLimitCounter(rows_before_limit_at_least); @@ -278,7 +278,7 @@ static void initRowsBeforeAggregation(std::shared_ptr processors, IO { if (!processors->empty()) { - RowsBeforeAggregationCounterPtr rows_before_aggregation_at_least = std::make_shared(); + RowsBeforeAggregationCounterPtr rows_before_aggregation_at_least = std::make_shared(); for (auto & processor : *processors) { if (auto transform = std::dynamic_pointer_cast(processor)) diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 803d1686ad7..d276fed60a2 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference index 8ad00273bbf..36db9721599 100644 --- a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference @@ -101,12 +101,29 @@ "data": [ - [0] + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9], + [10], + [11], + [12], + [13], + [14], + [15], + [16], + [17], + [18], + [19] ], - "rows": 1, - - "rows_before_limit_at_least": 20, + "rows": 20, "rows_before_aggregation_at_least": 20 } @@ -126,6 +143,8 @@ "rows": 1, + "rows_before_limit_at_least": 1, + "rows_before_aggregation_at_least": 20 } { diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql index 6a3759c11e8..8ccbce42706 100644 --- a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql @@ -6,16 +6,15 @@ create table test (i int) engine MergeTree order by tuple(); insert into test select arrayJoin(range(10000)); -set rows_before_aggregation = 1, output_format_write_statistics = 0, max_block_size = 100; +set rows_before_aggregation = 1, exact_rows_before_limit = 1, output_format_write_statistics = 0, max_block_size = 100; select * from test where i < 10 group by i order by i FORMAT JSONCompact; select * from test where i < 10 group by i order by i FORMAT XML; select * from test group by i having i in (10, 11, 12) order by i FORMAT JSONCompact; -select * from test where i < 20 group by i order by i limit 1 FORMAT JSONCompact; - -select max(i) from test where i < 20 FORMAT JSONCompact; +select * from test where i < 20 group by i order by i FORMAT JSONCompact; +select max(i) from test where i < 20 limit 1 FORMAT JSONCompact; set prefer_localhost_replica = 0; select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; @@ -26,6 +25,6 @@ select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 group by i order by i FORMAT JSONCompact; select max(i) from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 FORMAT JSONCompact; -select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i FORMAT JSONCompact; +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i limit 10 FORMAT JSONCompact; drop table if exists test; \ No newline at end of file From 355f144cda1838acce8f89f54e3ae84300263ea3 Mon Sep 17 00:00:00 2001 From: morning-color Date: Thu, 4 Jul 2024 18:11:26 +0800 Subject: [PATCH 0280/2361] Fix rename bug --- src/Processors/Formats/IOutputFormat.h | 4 ++-- src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h index 721e02f1c1d..40ac1317618 100644 --- a/src/Processors/Formats/IOutputFormat.h +++ b/src/Processors/Formats/IOutputFormat.h @@ -36,13 +36,13 @@ public: void setAutoFlush() { auto_flush = true; } /// Value for rows_before_limit_at_least field. - virtual void setRowsBeforeLimit(size_t /*rows*/) { } + virtual void setRowsBeforeLimit(size_t /*rows_before_limit*/) { } /// Counter to calculate rows_before_limit_at_least in processors pipeline. void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit_counter.swap(counter); } /// Value for rows_before_aggregation_at_least field. - virtual void setRowsBeforeAggregation(size_t /*rows*/) { } + virtual void setRowsBeforeAggregation(size_t /*rows_before_limit*/) { } /// Counter to calculate rows_before_aggregation_at_least in processors pipeline. void setRowsBeforeAggregationCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_aggregation_counter.swap(counter); } diff --git a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp index 4bd6684dff4..5d6db17aaa2 100644 --- a/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/TemplateBlockOutputFormat.cpp @@ -82,7 +82,7 @@ TemplateBlockOutputFormat::ResultsetPart TemplateBlockOutputFormat::stringToResu return ResultsetPart::ExtremesMax; else if (part == "rows") return ResultsetPart::Rows; - else if (part == "rows") + else if (part == "rows_before_limit") return ResultsetPart::RowsBeforeLimit; else if (part == "time") return ResultsetPart::TimeElapsed; @@ -165,7 +165,7 @@ void TemplateBlockOutputFormat::finalizeImpl() break; case ResultsetPart::RowsBeforeLimit: if (!statistics.applied_limit) - format.throwInvalidFormat("Cannot print rows for this request", i); + format.throwInvalidFormat("Cannot print rows_before_limit for this request", i); writeValue(statistics.rows_before_limit, format.escaping_rules[i]); break; case ResultsetPart::TimeElapsed: From 5f42e1518277e54c34fce274b5cf46cd5069cb2a Mon Sep 17 00:00:00 2001 From: morning-color Date: Thu, 4 Jul 2024 19:58:22 +0800 Subject: [PATCH 0281/2361] Fix Bug --- src/QueryPipeline/QueryPipeline.cpp | 14 ++++++++++---- .../03174_exact_rows_before_aggregation.sql | 2 +- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index acccbed2585..311c8a60531 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -276,17 +276,23 @@ static void initRowsBeforeLimit(IOutputFormat * output_format) } static void initRowsBeforeAggregation(std::shared_ptr processors, IOutputFormat * output_format) { + bool has_aggregation = false; + if (!processors->empty()) { RowsBeforeAggregationCounterPtr rows_before_aggregation_at_least = std::make_shared(); - for (auto & processor : *processors) + for (auto processor : *processors) { if (auto transform = std::dynamic_pointer_cast(processor)) + { transform->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); - if (auto remote = std::dynamic_pointer_cast(processor)) - remote->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); + has_aggregation = true; + } + if (typeid_cast(processor.get()) || typeid_cast(processor.get())) + processor->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); } - rows_before_aggregation_at_least->add(0); + if (has_aggregation) + rows_before_aggregation_at_least->add(0); output_format->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); } } diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql index 8ccbce42706..31b817e8a65 100644 --- a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql @@ -27,4 +27,4 @@ select max(i) from cluster(test_cluster_two_shards, currentDatabase(), test) whe select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i limit 10 FORMAT JSONCompact; -drop table if exists test; \ No newline at end of file +drop table if exists test; From 77408c9fed1e67e6abd1e2ff1c2054a59438b729 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 4 Jul 2024 18:22:58 +0000 Subject: [PATCH 0282/2361] Conflict --- .../02933_replicated_database_forbid_create_as_select.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh index 15f169d880f..34df0544e13 100755 --- a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh +++ b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh @@ -12,14 +12,14 @@ ${CLICKHOUSE_CLIENT} --query "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = # Non-replicated engines are allowed ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test (id UInt64) ENGINE = MergeTree() ORDER BY id AS SELECT 1" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv (id UInt64) ENGINE = MergeTree() ORDER BY id POPULATE AS SELECT 1" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv (id UInt64) ENGINE = MergeTree() ORDER BY id POPULATE AS SELECT 1 AS id" # Replicated storafes are forbidden ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1 AS id" |& grep -cm1 "SUPPORT_IS_DISABLED" # But it is allowed with the special setting ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" --database_replicated_allow_heavy_create=1 -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" --database_replicated_allow_heavy_create=1 +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1 AS id" --database_replicated_allow_heavy_create=1 ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" From 07c0a72e5f06283d0d70f076eb8f33ee1339e9c8 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 4 Jul 2024 18:30:55 +0000 Subject: [PATCH 0283/2361] Style --- src/Storages/System/IStorageSystemOneBlock.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/System/IStorageSystemOneBlock.cpp b/src/Storages/System/IStorageSystemOneBlock.cpp index 7cde31905aa..308b34510ea 100644 --- a/src/Storages/System/IStorageSystemOneBlock.cpp +++ b/src/Storages/System/IStorageSystemOneBlock.cpp @@ -80,9 +80,9 @@ void IStorageSystemOneBlock::read( void ReadFromSystemOneBlock::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - const auto & sample_block = getOutputStream().header; + const Block & sample_block = getOutputStream().header; MutableColumns res_columns = sample_block.cloneEmptyColumns(); - auto predicate = filter ? filter->getOutputs().at(0) : nullptr; + const ActionsDAG::Node * predicate = filter ? filter->getOutputs().at(0) : nullptr; storage->fillData(res_columns, context, predicate, std::move(columns_mask)); UInt64 num_rows = res_columns.at(0)->size(); From 90df83438f9866363690239ee9ec386a303dc3ba Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 5 Jul 2024 04:42:58 +0000 Subject: [PATCH 0284/2361] more shellcheck --- docker/test/stateless/attach_gdb.lib | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/attach_gdb.lib b/docker/test/stateless/attach_gdb.lib index a3616ac1a04..a0d1b8af6d8 100644 --- a/docker/test/stateless/attach_gdb.lib +++ b/docker/test/stateless/attach_gdb.lib @@ -11,7 +11,8 @@ function attach_gdb_to_clickhouse() # explicitly ignore non-fatal signals that are used by server. # Number of SIGRTMIN can be determined only in runtime. RTMIN=$(kill -l SIGRTMIN) - echo " + # shellcheck disable=SC2016 + echo " set follow-fork-mode parent handle SIGHUP nostop noprint pass handle SIGINT nostop noprint pass From 942f7d7532059cf931242ce5c94a39ea0344b50b Mon Sep 17 00:00:00 2001 From: yariks5s Date: Fri, 5 Jul 2024 12:44:31 +0000 Subject: [PATCH 0285/2361] fixes after review --- programs/obfuscator/Obfuscator.cpp | 4 +- src/Core/Settings.h | 6 +- src/Core/SettingsChangesHistory.cpp | 6 +- src/Formats/ReadSchemaUtils.cpp | 23 +- src/Formats/ReadSchemaUtils.h | 6 +- .../DataLakes/IStorageDataLake.h | 3 +- .../ObjectStorage/ReadBufferIterator.cpp | 2 +- .../ObjectStorage/ReadBufferIterator.h | 2 +- .../ObjectStorage/StorageObjectStorage.cpp | 60 ++--- .../ObjectStorage/StorageObjectStorage.h | 6 + .../StorageObjectStorageCluster.cpp | 3 +- .../StorageObjectStorageSource.cpp | 13 +- src/Storages/ObjectStorage/Utils.cpp | 7 +- src/Storages/ObjectStorage/Utils.h | 1 + .../StorageObjectStorageQueue.cpp | 3 +- src/Storages/StorageFile.cpp | 53 ++--- src/Storages/StorageURL.cpp | 24 +- src/Storages/VirtualColumnUtils.cpp | 63 +++-- src/Storages/VirtualColumnUtils.h | 9 +- src/TableFunctions/TableFunctionFormat.cpp | 10 +- .../TableFunctionObjectStorage.cpp | 3 +- .../__init__.py | 0 .../configs/cluster_azure.xml | 39 ---- .../configs/cluster_hdfs.xml | 33 --- .../configs/disable_profilers_azure.xml | 9 - .../configs/macro_hdfs.xml | 5 - .../configs/named_collections_azure.xml | 14 -- .../configs/schema_cache_azure.xml | 3 - .../configs/schema_cache_hdfs.xml | 3 - .../configs/users_azure.xml | 9 - .../test_azure.py | 219 ------------------ .../test_hdfs.py | 87 ------- .../test_storage_azure_blob_storage/test.py | 110 +++++++++ tests/integration/test_storage_hdfs/test.py | 49 ++++ 34 files changed, 304 insertions(+), 583 deletions(-) delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/__init__.py delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_azure.xml delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_hdfs.xml delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/disable_profilers_azure.xml delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/macro_hdfs.xml delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/named_collections_azure.xml delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_azure.xml delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_hdfs.xml delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/configs/users_azure.xml delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py delete mode 100644 tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 688ae1a1143..11e85bc1302 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1307,7 +1307,9 @@ try throw ErrnoException(ErrorCodes::CANNOT_SEEK_THROUGH_FILE, "Input must be seekable file (it will be read twice)"); SingleReadBufferIterator read_buffer_iterator(std::move(file)); - schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, context_const); + + std::string sample_string; + schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, sample_string, context_const); } else { diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 8399d3925db..65b93b893b6 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -935,6 +935,7 @@ class IColumn; M(Bool, iceberg_engine_ignore_schema_evolution, false, "Ignore schema evolution in Iceberg table engine and read all data using latest schema saved on table creation. Note that it can lead to incorrect result", 0) \ M(Bool, allow_deprecated_error_prone_window_functions, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)", 0) \ M(Bool, allow_deprecated_snowflake_conversion_functions, false, "Enables deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake.", 0) \ + M(Bool, use_hive_partitioning, false, "Allows to use hive partitioning for File, URL, S3, AzureBlobStorage and HDFS engines.", 0)\ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS. @@ -1106,11 +1107,6 @@ class IColumn; M(Bool, input_format_tsv_skip_trailing_empty_lines, false, "Skip trailing empty lines in TSV format", 0) \ M(Bool, input_format_custom_skip_trailing_empty_lines, false, "Skip trailing empty lines in CustomSeparated format", 0) \ M(Bool, input_format_tsv_crlf_end_of_line, false, "If it is set true, file function will read TSV format with \\r\\n instead of \\n.", 0) \ - M(Bool, file_hive_partitioning, false, "Allows to use hive partitioning for file format", 0)\ - M(Bool, url_hive_partitioning, false, "Allows to use hive partitioning for url format", 0)\ - M(Bool, s3_hive_partitioning, false, "Allows to use hive partitioning for s3 format", 0)\ - M(Bool, azure_blob_storage_hive_partitioning, false, "Allows to use hive partitioning for AzureBlobStorage format", 0)\ - M(Bool, hdfs_hive_partitioning, false, "Allows to use hive partitioning for hdfs format", 0)\ \ M(Bool, input_format_native_allow_types_conversion, true, "Allow data types conversion in Native input format", 0) \ \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 607f9b6d858..b676cd85ce6 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -59,11 +59,7 @@ static std::initializer_list readSchemaFromFormatImpl( std::optional format_name, const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, + std::string & sample_path, const ContextPtr & context) try { @@ -143,6 +144,10 @@ try { iterator_data = read_buffer_iterator.next(); + /// Extracting the File path for hive-style partitioning + if (sample_path.empty()) + sample_path = read_buffer_iterator.getLastFilePath(); + /// Read buffer iterator can determine the data format if it's unknown. /// For example by scanning schema cache or by finding new file with format extension. if (!format_name && iterator_data.format_name) @@ -163,7 +168,7 @@ try return {*iterator_data.cached_columns, *format_name}; } - schemas_for_union_mode.emplace_back(iterator_data.cached_columns->getAll(), read_buffer_iterator.getLastFileName()); + schemas_for_union_mode.emplace_back(iterator_data.cached_columns->getAll(), read_buffer_iterator.getLastFilePath()); continue; } @@ -249,7 +254,7 @@ try if (!names_and_types.empty()) read_buffer_iterator.setSchemaToLastFile(ColumnsDescription(names_and_types)); - schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFileName()); + schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFilePath()); } catch (...) { @@ -410,7 +415,7 @@ try throw Exception(ErrorCodes::CANNOT_DETECT_FORMAT, "The data format cannot be detected by the contents of the files. You can specify the format manually"); read_buffer_iterator.setSchemaToLastFile(ColumnsDescription(names_and_types)); - schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFileName()); + schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFilePath()); } if (format_name && mode == SchemaInferenceMode::DEFAULT) @@ -526,9 +531,9 @@ try } catch (Exception & e) { - auto file_name = read_buffer_iterator.getLastFileName(); - if (!file_name.empty()) - e.addMessage(fmt::format("(in file/uri {})", file_name)); + auto file_path = read_buffer_iterator.getLastFilePath(); + if (!file_path.empty()) + e.addMessage(fmt::format("(in file/uri {})", file_path)); throw; } @@ -536,17 +541,19 @@ ColumnsDescription readSchemaFromFormat( const String & format_name, const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, + std::string & sample_path, const ContextPtr & context) { - return readSchemaFromFormatImpl(format_name, format_settings, read_buffer_iterator, context).first; + return readSchemaFromFormatImpl(format_name, format_settings, read_buffer_iterator, sample_path, context).first; } std::pair detectFormatAndReadSchema( const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, + std::string & sample_path, const ContextPtr & context) { - return readSchemaFromFormatImpl(std::nullopt, format_settings, read_buffer_iterator, context); + return readSchemaFromFormatImpl(std::nullopt, format_settings, read_buffer_iterator, sample_path, context); } SchemaCache::Key getKeyForSchemaCache( diff --git a/src/Formats/ReadSchemaUtils.h b/src/Formats/ReadSchemaUtils.h index bb5e068f696..6c562a06bf0 100644 --- a/src/Formats/ReadSchemaUtils.h +++ b/src/Formats/ReadSchemaUtils.h @@ -56,8 +56,8 @@ struct IReadBufferIterator /// Set auto detected format name. virtual void setFormatName(const String & /*format_name*/) {} - /// Get last processed file name for better exception messages. - virtual String getLastFileName() const { return ""; } + /// Get last processed file path for better exception messages. + virtual String getLastFilePath() const { return ""; } /// Return true if method recreateLastReadBuffer is implemented. virtual bool supportsLastReadBufferRecreation() const { return false; } @@ -122,6 +122,7 @@ ColumnsDescription readSchemaFromFormat( const String & format_name, const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, + std::string & sample_path, const ContextPtr & context); /// Try to detect the format of the data and it's schema. @@ -131,6 +132,7 @@ ColumnsDescription readSchemaFromFormat( std::pair detectFormatAndReadSchema( const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, + std::string & sample_path, const ContextPtr & context); SchemaCache::Key getKeyForSchemaCache(const String & source, const String & format, const std::optional & format_settings, const ContextPtr & context); diff --git a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h b/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h index 83865c47eb8..5c40cda442b 100644 --- a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h +++ b/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h @@ -89,8 +89,9 @@ public: { ConfigurationPtr configuration = base_configuration->clone(); configuration->setPaths(metadata->getDataFiles()); + std::string sample_string; return Storage::resolveSchemaFromData( - object_storage_, configuration, format_settings_, local_context); + object_storage_, configuration, format_settings_, sample_string, local_context); } } diff --git a/src/Storages/ObjectStorage/ReadBufferIterator.cpp b/src/Storages/ObjectStorage/ReadBufferIterator.cpp index 78cdc442f64..a47049791ae 100644 --- a/src/Storages/ObjectStorage/ReadBufferIterator.cpp +++ b/src/Storages/ObjectStorage/ReadBufferIterator.cpp @@ -131,7 +131,7 @@ void ReadBufferIterator::setFormatName(const String & format_name) format = format_name; } -String ReadBufferIterator::getLastFileName() const +String ReadBufferIterator::getLastFilePath() const { if (current_object_info) return current_object_info->getPath(); diff --git a/src/Storages/ObjectStorage/ReadBufferIterator.h b/src/Storages/ObjectStorage/ReadBufferIterator.h index 6eeb52ec2ed..b81aebb7b07 100644 --- a/src/Storages/ObjectStorage/ReadBufferIterator.h +++ b/src/Storages/ObjectStorage/ReadBufferIterator.h @@ -33,7 +33,7 @@ public: void setResultingSchema(const ColumnsDescription & columns) override; - String getLastFileName() const override; + String getLastFilePath() const override; void setFormatName(const String & format_name) override; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index ae7c211330c..717f48983f3 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -33,17 +33,22 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } - -bool checkIfHiveSettingEnabled(const ContextPtr & context, const std::string & storage_type_name) +std::string StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, ContextPtr context) { - if (storage_type_name == "s3") - return context->getSettings().s3_hive_partitioning; - else if (storage_type_name == "hdfs") - return context->getSettings().hdfs_hive_partitioning; - else if (storage_type_name == "azure") - return context->getSettings().azure_blob_storage_hive_partitioning; - else - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported storage type: {}", storage_type_name); + auto file_iterator = StorageObjectStorageSource::createFileIterator( + configuration, + object_storage, + distributed_processing, + context, + {}, // predicate + metadata.getColumns().getAll(), // virtual_columns + nullptr, // read_keys + {} // file_progress_callback + ); + + if (auto file = file_iterator->next(0)) + return file->getPath(); + return ""; } StorageObjectStorage::StorageObjectStorage( @@ -66,7 +71,9 @@ StorageObjectStorage::StorageObjectStorage( , log(getLogger(fmt::format("Storage{}({})", configuration->getEngineName(), table_id_.getFullTableName()))) { ColumnsDescription columns{columns_}; - resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, context); + + std::string sample_path; + resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, sample_path, context); configuration->check(context); StorageInMemoryMetadata metadata; @@ -74,23 +81,13 @@ StorageObjectStorage::StorageObjectStorage( metadata.setConstraints(constraints_); metadata.setComment(comment); - auto file_iterator = StorageObjectStorageSource::createFileIterator( - configuration, - object_storage, - distributed_processing_, - context, - {}, // predicate - metadata.getColumns().getAll(), // virtual_columns - nullptr, // read_keys - {} // file_progress_callback - ); + + if (sample_path.empty() && context->getSettings().use_hive_partitioning) + sample_path = getPathSample(metadata, context); + else if (!context->getSettings().use_hive_partitioning) + sample_path = ""; - Strings paths; - - if (checkIfHiveSettingEnabled(context, configuration->getTypeName())) - if (auto file = file_iterator->next(0)) - paths = {file->getPath()}; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), paths)); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), sample_path)); setInMemoryMetadata(metadata); } @@ -386,33 +383,36 @@ ColumnsDescription StorageObjectStorage::resolveSchemaFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context) { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - return readSchemaFromFormat(configuration->format, format_settings, *iterator, context); + return readSchemaFromFormat(configuration->format, format_settings, *iterator, sample_path, context); } std::string StorageObjectStorage::resolveFormatFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context) { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - return detectFormatAndReadSchema(format_settings, *iterator, context).second; + return detectFormatAndReadSchema(format_settings, *iterator, sample_path, context).second; } std::pair StorageObjectStorage::resolveSchemaAndFormatFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context) { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - auto [columns, format] = detectFormatAndReadSchema(format_settings, *iterator, context); + auto [columns, format] = detectFormatAndReadSchema(format_settings, *iterator, sample_path, context); configuration->format = format; return std::pair(columns, format); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index cf8ec113653..dd7ec31c970 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -42,6 +42,7 @@ public: size_t list_object_keys_size; bool throw_on_zero_files_match; bool ignore_non_existent_file; + bool use_hive_partitioning; }; StorageObjectStorage( @@ -100,23 +101,28 @@ public: const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context); static std::string resolveFormatFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context); static std::pair resolveSchemaAndFormatFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context); protected: virtual void updateConfiguration(ContextPtr local_context); + std::string getPathSample(StorageInMemoryMetadata metadata, ContextPtr context); + static std::unique_ptr createReadBufferIterator( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, diff --git a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp index 78f568d8ae2..0dc4b845a47 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp @@ -33,7 +33,8 @@ StorageObjectStorageCluster::StorageObjectStorageCluster( , object_storage(object_storage_) { ColumnsDescription columns{columns_}; - resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, {}, context_); + std::string sample_path; + resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, {}, sample_path, context_); configuration->check(context_); StorageInMemoryMetadata metadata; diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index afb23961312..ecb3ff9d856 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -197,16 +197,6 @@ Chunk StorageObjectStorageSource::generate() const auto & filename = object_info->getFileName(); chassert(object_info->metadata); - auto hive_map = VirtualColumnUtils::parsePartitionMapFromPath(object_info->getPath()); - bool contains_virtual_column = std::any_of(hive_map.begin(), hive_map.end(), - [&](const auto& pair) - { - return read_from_format_info.requested_virtual_columns.contains(pair.first); - }); - - if (!contains_virtual_column) - hive_map.clear(); // If we cannot find any virtual column in requested, we don't add any of them to chunk - VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( chunk, read_from_format_info.requested_virtual_columns, { @@ -214,8 +204,7 @@ Chunk StorageObjectStorageSource::generate() .size = object_info->metadata->size_bytes, .filename = &filename, .last_modified = object_info->metadata->last_modified, - .hive_partitioning_map = hive_map - }); + }, object_info->getPath()); return chunk; } diff --git a/src/Storages/ObjectStorage/Utils.cpp b/src/Storages/ObjectStorage/Utils.cpp index e49e14d2a0c..73410d959e0 100644 --- a/src/Storages/ObjectStorage/Utils.cpp +++ b/src/Storages/ObjectStorage/Utils.cpp @@ -49,19 +49,20 @@ void resolveSchemaAndFormat( ObjectStoragePtr object_storage, const StorageObjectStorage::ConfigurationPtr & configuration, std::optional format_settings, + std::string & sample_path, const ContextPtr & context) { if (columns.empty()) { if (format == "auto") std::tie(columns, format) = - StorageObjectStorage::resolveSchemaAndFormatFromData(object_storage, configuration, format_settings, context); + StorageObjectStorage::resolveSchemaAndFormatFromData(object_storage, configuration, format_settings, sample_path, context); else - columns = StorageObjectStorage::resolveSchemaFromData(object_storage, configuration, format_settings, context); + columns = StorageObjectStorage::resolveSchemaFromData(object_storage, configuration, format_settings, sample_path, context); } else if (format == "auto") { - format = StorageObjectStorage::resolveFormatFromData(object_storage, configuration, format_settings, context); + format = StorageObjectStorage::resolveFormatFromData(object_storage, configuration, format_settings, sample_path, context); } if (!columns.hasOnlyOrdinary()) diff --git a/src/Storages/ObjectStorage/Utils.h b/src/Storages/ObjectStorage/Utils.h index 2077999df41..7ee14f50979 100644 --- a/src/Storages/ObjectStorage/Utils.h +++ b/src/Storages/ObjectStorage/Utils.h @@ -19,6 +19,7 @@ void resolveSchemaAndFormat( ObjectStoragePtr object_storage, const StorageObjectStorage::ConfigurationPtr & configuration, std::optional format_settings, + std::string & sample_path, const ContextPtr & context); } diff --git a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp index 95265cde9ea..c12cdddeec7 100644 --- a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp +++ b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp @@ -160,7 +160,8 @@ StorageObjectStorageQueue::StorageObjectStorageQueue( configuration->check(context_); ColumnsDescription columns{columns_}; - resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, context_); + std::string sample_path; + resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, sample_path, context_); configuration->check(context_); StorageInMemoryMetadata storage_metadata; diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 0c32f29cb34..9751d596fff 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -502,7 +502,7 @@ namespace StorageFile::getSchemaCache(getContext()).addManyColumns(cache_keys, columns); } - String getLastFileName() const override + String getLastFilePath() const override { if (current_index != 0) return paths[current_index - 1]; @@ -777,7 +777,7 @@ namespace format = format_name; } - String getLastFileName() const override + String getLastFilePath() const override { return last_read_file_path; } @@ -880,10 +880,11 @@ std::pair StorageFile::getTableStructureAndFormatFro auto read_buffer_iterator = SingleReadBufferIterator(std::move(read_buf)); ColumnsDescription columns; + std::string sample_path; if (format) - columns = readSchemaFromFormat(*format, format_settings, read_buffer_iterator, context); + columns = readSchemaFromFormat(*format, format_settings, read_buffer_iterator, sample_path, context); else - std::tie(columns, format) = detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); + std::tie(columns, format) = detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); peekable_read_buffer_from_fd = read_buffer_iterator.releaseBuffer(); if (peekable_read_buffer_from_fd) @@ -928,20 +929,21 @@ std::pair StorageFile::getTableStructureAndFormatFro } + std::string sample_path; if (archive_info) { ReadBufferFromArchiveIterator read_buffer_iterator(*archive_info, format, format_settings, context); if (format) - return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, context), *format}; + return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, sample_path, context), *format}; - return detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); + return detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); } ReadBufferFromFileIterator read_buffer_iterator(paths, format, compression_method, format_settings, context); if (format) - return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, context), *format}; + return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, sample_path, context), *format}; - return detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); + return detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); } ColumnsDescription StorageFile::getTableStructureFromFile( @@ -1097,10 +1099,10 @@ void StorageFile::setStorageMetadata(CommonArguments args) storage_metadata.setComment(args.comment); setInMemoryMetadata(storage_metadata); - Strings paths_for_virtuals; - if (args.getContext()->getSettingsRef().file_hive_partitioning) - paths_for_virtuals = paths; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), paths_for_virtuals)); + std::string path_for_virtuals; + if (args.getContext()->getSettingsRef().use_hive_partitioning && !paths.empty()) + path_for_virtuals = paths[0]; + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), path_for_virtuals, format_settings.value_or(FormatSettings{}))); } @@ -1442,14 +1444,9 @@ Chunk StorageFileSource::generate() chunk_size = input_format->getApproxBytesReadForChunk(); progress(num_rows, chunk_size ? chunk_size : chunk.bytes()); - std::map hive_map; - if (getContext()->getSettingsRef().file_hive_partitioning) - { - hive_map = VirtualColumnUtils::parsePartitionMapFromPath(current_path); - - for (const auto& item : hive_map) - requested_virtual_columns.push_back(NameAndTypePair(item.first, std::make_shared())); - } + std::string hive_partitioning_path; + if (getContext()->getSettingsRef().use_hive_partitioning) + hive_partitioning_path = current_path; /// Enrich with virtual columns. VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( @@ -1459,8 +1456,7 @@ Chunk StorageFileSource::generate() .size = current_file_size, .filename = (filename_override.has_value() ? &filename_override.value() : nullptr), .last_modified = current_file_last_modified, - .hive_partitioning_map = hive_map - }); + }, hive_partitioning_path); return chunk; } @@ -1636,16 +1632,6 @@ void ReadFromFile::createIterator(const ActionsDAG::Node * predicate) storage->distributed_processing); } -void addPartitionColumnsToInfoHeader(Strings paths, ReadFromFormatInfo & info) -{ - for (const auto& path : paths) - { - auto map = VirtualColumnUtils::parsePartitionMapFromPath(path); - for (const auto& item : map) - info.source_header.insertUnique(ColumnWithTypeAndName(std::make_shared(), item.first)); - } -} - void ReadFromFile::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { createIterator(nullptr); @@ -1665,9 +1651,6 @@ void ReadFromFile::initializePipeline(QueryPipelineBuilder & pipeline, const Bui paths = storage->paths; } - if (getContext()->getSettingsRef().file_hive_partitioning) - addPartitionColumnsToInfoHeader(paths, info); - if (max_num_streams > files_to_read) num_streams = files_to_read; diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index f6374701fc2..59c5465a381 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -153,10 +153,10 @@ IStorageURLBase::IStorageURLBase( storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); - Strings uri_for_partitioning; - if (context_->getSettingsRef().url_hive_partitioning) - uri_for_partitioning = {uri}; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), uri_for_partitioning)); + std::string uri_for_partitioning; + if (context_->getSettingsRef().use_hive_partitioning) + uri_for_partitioning = uri; + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), uri_for_partitioning, format_settings.value_or(FormatSettings{}))); } @@ -415,9 +415,9 @@ Chunk StorageURLSource::generate() size_t chunk_size = 0; if (input_format) chunk_size = input_format->getApproxBytesReadForChunk(); - std::map hive_map; - if (getContext()->getSettingsRef().url_hive_partitioning) - hive_map = VirtualColumnUtils::parsePartitionMapFromPath(curr_uri.getPath()); + std::string hive_partitioning_path; + if (getContext()->getSettingsRef().use_hive_partitioning) + hive_partitioning_path = curr_uri.getPath(); progress(num_rows, chunk_size ? chunk_size : chunk.bytes()); VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( @@ -425,8 +425,7 @@ Chunk StorageURLSource::generate() { .path = curr_uri.getPath(), .size = current_file_size, - .hive_partitioning_map = hive_map - }); + }, hive_partitioning_path); return chunk; } @@ -859,7 +858,7 @@ namespace format = format_name; } - String getLastFileName() const override { return current_url_option; } + String getLastFilePath() const override { return current_url_option; } bool supportsLastReadBufferRecreation() const override { return true; } @@ -960,9 +959,10 @@ std::pair IStorageURLBase::getTableStructureAndForma urls_to_check = {uri}; ReadBufferIterator read_buffer_iterator(urls_to_check, format, compression_method, headers, format_settings, context); + std::string sample_path; if (format) - return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, context), *format}; - return detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); + return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, sample_path, context), *format}; + return detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); } ColumnsDescription IStorageURLBase::getTableStructureFromData( diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 0b79e3b7a16..379b14d8e51 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -38,6 +38,7 @@ #include #include #include +#include #include "Functions/FunctionsLogical.h" #include "Functions/IFunction.h" #include "Functions/IFunctionAdaptors.h" @@ -115,22 +116,19 @@ NameSet getVirtualNamesForFileLikeStorage() return {"_path", "_file", "_size", "_time"}; } -Strings parseVirtualColumnNameFromPath(const std::string & path) +std::map parseFromPath(const std::string& path) { std::string pattern = "/([^/]+)=([^/]+)"; - // Map to store the key-value pairs - std::map key_values; - re2::StringPiece input_piece(path); - std::string key; - Strings result; - while (RE2::FindAndConsume(&input_piece, pattern, &key)) - result.push_back(key); - return result; + std::map key_values; + std::string key, value; + while (RE2::FindAndConsume(&input_piece, pattern, &key, &value)) + key_values["_" + key] = value; + return key_values; } -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, Strings paths) +VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, std::string path, FormatSettings settings) { VirtualColumnsDescription desc; @@ -147,11 +145,13 @@ VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription add_virtual("_size", makeNullable(std::make_shared())); add_virtual("_time", makeNullable(std::make_shared())); - for (const auto& path : paths) + auto map = parseFromPath(path); + for (const auto& item : map) { - auto names = parseVirtualColumnNameFromPath(path); - for (const auto& name : names) - add_virtual("_" + name, std::make_shared(std::make_shared())); + auto type = tryInferDataTypeForSingleField(item.second, settings); + if (type == nullptr) + type = std::make_shared(); + add_virtual(item.first, std::make_shared(type)); } return desc; @@ -213,25 +213,11 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const return block.getByName("_idx").column; } -std::map parsePartitionMapFromPath(const std::string & path) -{ - std::string pattern = "/([^/]+)=([^/]+)"; // Regex to capture key=value pairs - // Map to store the key-value pairs - std::map key_values; - - re2::StringPiece input_piece(path); - std::string key; - std::string value; - while (RE2::FindAndConsume(&input_piece, pattern, &key, &value)) - key_values["_" + key] = value; - - return key_values; -} - void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values) + VirtualsForFileLikeStorage virtual_values, const std::string & hive_partitioning_path) { + auto hive_map = parseFromPath(hive_partitioning_path); for (const auto & virtual_column : requested_virtual_columns) { if (virtual_column.name == "_path") @@ -265,13 +251,22 @@ void addRequestedFileLikeStorageVirtualsToChunk( else chunk.addColumn(virtual_column.type->createColumnConstWithDefaultValue(chunk.getNumRows())->convertToFullColumnIfConst()); } - else + else if (!hive_map.empty()) { - auto it = virtual_values.hive_partitioning_map.find(virtual_column.getNameInStorage()); - if (it != virtual_values.hive_partitioning_map.end()) + bool contains_virtual_column = std::any_of(hive_map.begin(), hive_map.end(), + [&](const auto& pair) + { + return requested_virtual_columns.contains(pair.first); + }); + + if (!contains_virtual_column) + hive_map.clear(); // If we cannot find any virtual column in requested, we don't add any of them to chunk + + auto it = hive_map.find(virtual_column.getNameInStorage()); + if (it != hive_map.end()) { chunk.addColumn(virtual_column.getTypeInStorage()->createColumnConst(chunk.getNumRows(), it->second)->convertToFullColumnIfConst()); - virtual_values.hive_partitioning_map.erase(it); + hive_map.erase(it); } } } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index f9b49cc48ed..72922be60bd 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -49,7 +50,7 @@ auto extractSingleValueFromBlock(const Block & block, const String & name) } NameSet getVirtualNamesForFileLikeStorage(); -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, Strings paths = {}); +VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, std::string path = "", FormatSettings settings = FormatSettings()); ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); @@ -76,15 +77,13 @@ struct VirtualsForFileLikeStorage std::optional size { std::nullopt }; const String * filename { nullptr }; std::optional last_modified { std::nullopt }; - std::map hive_partitioning_map {}; - }; -std::map parsePartitionMapFromPath(const std::string & path); +std::map parseFromPath(const std::string& path); void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values); + VirtualsForFileLikeStorage virtual_values, const std::string & hive_partitioning_path = ""); } } diff --git a/src/TableFunctions/TableFunctionFormat.cpp b/src/TableFunctions/TableFunctionFormat.cpp index ad2a142a140..66152cb0c91 100644 --- a/src/TableFunctions/TableFunctionFormat.cpp +++ b/src/TableFunctions/TableFunctionFormat.cpp @@ -85,9 +85,10 @@ ColumnsDescription TableFunctionFormat::getActualTableStructure(ContextPtr conte if (structure == "auto") { SingleReadBufferIterator read_buffer_iterator(std::make_unique(data)); + std::string sample_path; if (format == "auto") - return detectFormatAndReadSchema(std::nullopt, read_buffer_iterator, context).first; - return readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, context); + return detectFormatAndReadSchema(std::nullopt, read_buffer_iterator, sample_path, context).first; + return readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, sample_path, context); } return parseColumnsListFromString(structure, context); } @@ -131,11 +132,12 @@ StoragePtr TableFunctionFormat::executeImpl(const ASTPtr & /*ast_function*/, Con String format_name = format; if (structure == "auto") { + std::string sample_path; SingleReadBufferIterator read_buffer_iterator(std::make_unique(data)); if (format_name == "auto") - std::tie(columns, format_name) = detectFormatAndReadSchema(std::nullopt, read_buffer_iterator, context); + std::tie(columns, format_name) = detectFormatAndReadSchema(std::nullopt, read_buffer_iterator, sample_path, context); else - columns = readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, context); + columns = readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, sample_path, context); } else { diff --git a/src/TableFunctions/TableFunctionObjectStorage.cpp b/src/TableFunctions/TableFunctionObjectStorage.cpp index 550d9cc799b..39392a4c44c 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.cpp +++ b/src/TableFunctions/TableFunctionObjectStorage.cpp @@ -84,7 +84,8 @@ ColumnsDescription TableFunctionObjectStorage< context->checkAccess(getSourceAccessType()); ColumnsDescription columns; auto storage = getObjectStorage(context, !is_insert_query); - resolveSchemaAndFormat(columns, configuration->format, storage, configuration, std::nullopt, context); + std::string sample_path; + resolveSchemaAndFormat(columns, configuration->format, storage, configuration, std::nullopt, sample_path, context); return columns; } else diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/__init__.py b/tests/integration/test_hive_style_partitioning_hdfs_azure/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_azure.xml deleted file mode 100644 index ffa4673c9ee..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_azure.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - node_0 - 9000 - - - node_1 - 9000 - - - node_2 - 9000 - - - - - - - - node_0 - 9000 - - - - - node_1 - 19000 - - - - - - - simple_cluster - - \ No newline at end of file diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_hdfs.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_hdfs.xml deleted file mode 100644 index b99b21ea40b..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/cluster_hdfs.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - node1 - 9000 - - - - - node1 - 19000 - - - - - - - - 127.0.0.1 - 9000 - - - - - 127.0.0.2 - 9000 - - - - - diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/disable_profilers_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/disable_profilers_azure.xml deleted file mode 100644 index a39badbf8ec..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/disable_profilers_azure.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - 0 - 0 - - - diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/macro_hdfs.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/macro_hdfs.xml deleted file mode 100644 index c2e11b47a5e..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/macro_hdfs.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - test_cluster_two_shards - - \ No newline at end of file diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/named_collections_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/named_collections_azure.xml deleted file mode 100644 index bd7f9ff97f1..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/named_collections_azure.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - cont - test_simple_write_named.csv - key UInt64, data String - CSV - - - devstoreaccount1 - Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== - - - diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_azure.xml deleted file mode 100644 index e2168ecd06d..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_azure.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 2 - \ No newline at end of file diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_hdfs.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_hdfs.xml deleted file mode 100644 index 37639649b5f..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/schema_cache_hdfs.xml +++ /dev/null @@ -1,3 +0,0 @@ - - 2 - \ No newline at end of file diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/users_azure.xml b/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/users_azure.xml deleted file mode 100644 index 4b6ba057ecb..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/configs/users_azure.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - default - 1 - - - diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py deleted file mode 100644 index 0be697821f0..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_azure.py +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import time - -from helpers.cluster import ClickHouseCluster, is_arm -import re - -from azure.storage.blob import BlobServiceClient -from helpers.cluster import ClickHouseCluster, ClickHouseInstance - -if is_arm(): - pytestmark = pytest.mark.skip - - -@pytest.fixture(scope="module") -def cluster(): - try: - cluster = ClickHouseCluster(__file__) - cluster.add_instance( - "node", - main_configs=[ - "configs/named_collections_azure.xml", - "configs/schema_cache_azure.xml", - ], - user_configs=[ - "configs/disable_profilers_azure.xml", - "configs/users_azure.xml", - ], - with_azurite=True, - ) - cluster.start() - container_client = cluster.blob_service_client.get_container_client("cont") - container_client.create_container() - yield cluster - finally: - cluster.shutdown() - - -def azure_query( - node, query, expect_error=False, try_num=10, settings={}, query_on_retry=None -): - for i in range(try_num): - try: - if expect_error: - return node.query_and_get_error(query, settings=settings) - else: - return node.query(query, settings=settings) - except Exception as ex: - retriable_errors = [ - "DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response", - "DB::Exception: Azure::Core::Http::TransportException: Connection closed before getting full response or response is less than expected", - "DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response", - "DB::Exception: Azure::Core::Http::TransportException: Error while polling for socket ready read", - "Azure::Core::Http::TransportException, e.what() = Connection was closed by the server while trying to read a response", - "Azure::Core::Http::TransportException, e.what() = Connection closed before getting full response or response is less than expected", - "Azure::Core::Http::TransportException, e.what() = Connection was closed by the server while trying to read a response", - "Azure::Core::Http::TransportException, e.what() = Error while polling for socket ready read", - ] - retry = False - for error in retriable_errors: - if error in str(ex): - retry = True - print(f"Try num: {i}. Having retriable error: {ex}") - time.sleep(i) - break - if not retry or i == try_num - 1: - raise Exception(ex) - if query_on_retry is not None: - node.query(query_on_retry) - continue - - -def get_azure_file_content(filename, port): - container_name = "cont" - connection_string = ( - f"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;" - f"AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;" - f"BlobEndpoint=http://127.0.0.1:{port}/devstoreaccount1;" - ) - blob_service_client = BlobServiceClient.from_connection_string( - str(connection_string) - ) - container_client = blob_service_client.get_container_client(container_name) - blob_client = container_client.get_blob_client(filename) - download_stream = blob_client.download_blob() - return download_stream.readall().decode("utf-8") - - -@pytest.fixture(autouse=True, scope="function") -def delete_all_files(cluster): - port = cluster.env_variables["AZURITE_PORT"] - connection_string = ( - f"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;" - f"AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;" - f"BlobEndpoint=http://127.0.0.1:{port}/devstoreaccount1;" - ) - blob_service_client = BlobServiceClient.from_connection_string(connection_string) - containers = blob_service_client.list_containers() - for container in containers: - container_client = blob_service_client.get_container_client(container) - blob_list = container_client.list_blobs() - for blob in blob_list: - print(blob) - blob_client = container_client.get_blob_client(blob) - blob_client.delete_blob() - - assert len(list(container_client.list_blobs())) == 0 - - yield - - -def test_azure_partitioning_with_one_parameter(cluster): - # type: (ClickHouseCluster) -> None - node = cluster.instances["node"] # type: ClickHouseInstance - table_format = "column1 String, column2 String" - values = f"('Elizabeth', 'Gordon')" - path = "a/column1=Elizabeth/sample.csv" - - azure_query( - node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", - ) - - query = ( - f"SELECT column1, column2, _file, _path, _column1 FROM azureBlobStorage(azure_conf2, " - f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " - f"blob_path='{path}', format='CSV', structure='{table_format}')" - ) - assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} - ).splitlines() == [ - "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth".format( - bucket="cont", max_path=path - ) - ] - - query = ( - f"SELECT column2 FROM azureBlobStorage(azure_conf2, " - f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " - f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" - ) - assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} - ).splitlines() == ["Gordon"] - - -def test_azure_partitioning_with_two_parameters(cluster): - # type: (ClickHouseCluster) -> None - node = cluster.instances["node"] # type: ClickHouseInstance - table_format = "column1 String, column2 String" - values_1 = f"('Elizabeth', 'Gordon')" - values_2 = f"('Emilia', 'Gregor')" - path = "a/column1=Elizabeth/column2=Gordon/sample.csv" - - azure_query( - node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", - ) - - query = ( - f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, " - f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " - f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" - ) - assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} - ).splitlines() == [ - "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth\tGordon".format( - bucket="cont", max_path=path - ) - ] - - query = ( - f"SELECT column1 FROM azureBlobStorage(azure_conf2, " - f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " - f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2;" - ) - assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} - ).splitlines() == ["Elizabeth"] - - query = ( - f"SELECT column1 FROM azureBlobStorage(azure_conf2, " - f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " - f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2 AND column1=_column1;" - ) - assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} - ).splitlines() == ["Elizabeth"] - - -def test_azure_partitioning_without_setting(cluster): - # type: (ClickHouseCluster) -> None - node = cluster.instances["node"] # type: ClickHouseInstance - table_format = "column1 String, column2 String" - values_1 = f"('Elizabeth', 'Gordon')" - values_2 = f"('Emilia', 'Gregor')" - path = "a/column1=Elizabeth/column2=Gordon/sample.csv" - - azure_query( - node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", - ) - - query = ( - f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, " - f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " - f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" - ) - pattern = re.compile( - r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL - ) - - with pytest.raises(Exception, match=pattern): - azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 0}) diff --git a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py b/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py deleted file mode 100644 index 4667d18688a..00000000000 --- a/tests/integration/test_hive_style_partitioning_hdfs_azure/test_hdfs.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 - -import pytest - -from helpers.client import QueryRuntimeException -from helpers.cluster import ClickHouseCluster, is_arm -import re - -from helpers.cluster import ClickHouseCluster - -if is_arm(): - pytestmark = pytest.mark.skip - -cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance( - "node1", - main_configs=[ - "configs/macro_hdfs.xml", - "configs/schema_cache_hdfs.xml", - "configs/cluster_hdfs.xml", - ], - with_hdfs=True, -) - - -@pytest.fixture(scope="module") -def started_cluster(): - try: - cluster.start() - yield cluster - finally: - cluster.shutdown() - - -def test_hdfs_partitioning_with_one_parameter(started_cluster): - hdfs_api = started_cluster.hdfs_api - hdfs_api.write_data(f"/column0=Elizabeth/parquet_1", f"Elizabeth\tGordon\n") - assert hdfs_api.read_data(f"/column0=Elizabeth/parquet_1") == f"Elizabeth\tGordon\n" - - r = node1.query( - "SELECT _column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/parquet_1', 'TSV')", - settings={"hdfs_hive_partitioning": 1}, - ) - assert r == f"Elizabeth\n" - - -def test_hdfs_partitioning_with_two_parameters(started_cluster): - hdfs_api = started_cluster.hdfs_api - hdfs_api.write_data( - f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n" - ) - assert ( - hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") - == f"Elizabeth\tGordon\n" - ) - - r = node1.query( - "SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", - settings={"hdfs_hive_partitioning": 1}, - ) - assert r == f"Gordon\n" - - -def test_hdfs_partitioning_without_setting(started_cluster): - hdfs_api = started_cluster.hdfs_api - hdfs_api.write_data( - f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n" - ) - assert ( - hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") - == f"Elizabeth\tGordon\n" - ) - pattern = re.compile( - r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL - ) - - with pytest.raises(QueryRuntimeException, match=pattern): - node1.query( - f"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", - settings={"hdfs_hive_partitioning": 0}, - ) - - -if __name__ == "__main__": - cluster.start() - input("Cluster created, press any key to destroy...") - cluster.shutdown() diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index 20b004a7605..893df6d23aa 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -5,6 +5,7 @@ import json import logging import os import io +import re import random import threading import time @@ -1462,3 +1463,112 @@ def test_insert_create_new_file(cluster): assert TSV(res) == TSV( "test_create_new_file.csv\t1\ntest_create_new_file.1.csv\t2\n" ) + + +def test_hive_partitioning_with_one_parameter(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values = f"('Elizabeth', 'Gordon')" + path = "a/column1=Elizabeth/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}')" + ) + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == [ + "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth".format( + bucket="cont", max_path=path + ) + ] + + query = ( + f"SELECT column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == ["Gordon"] + + +def test_hive_partitioning_with_two_parameters(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values_1 = f"('Elizabeth', 'Gordon')" + values_2 = f"('Emilia', 'Gregor')" + path = "a/column1=Elizabeth/column2=Gordon/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == [ + "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth\tGordon".format( + bucket="cont", max_path=path + ) + ] + + query = ( + f"SELECT column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2;" + ) + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == ["Elizabeth"] + + query = ( + f"SELECT column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2 AND column1=_column1;" + ) + assert azure_query( + node, query, settings={"azure_blob_storage_hive_partitioning": 1} + ).splitlines() == ["Elizabeth"] + + +def test_hive_partitioning_without_setting(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values_1 = f"('Elizabeth', 'Gordon')" + values_2 = f"('Emilia', 'Gregor')" + path = "a/column1=Elizabeth/column2=Gordon/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + pattern = re.compile( + r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL + ) + + with pytest.raises(Exception, match=pattern): + azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 0}) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 47d8f44c0b7..8071b520a4f 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -3,6 +3,7 @@ import os import pytest import time from helpers.cluster import ClickHouseCluster, is_arm +from helpers.client import QueryRuntimeException from helpers.test_tools import TSV from pyhdfs import HdfsClient @@ -1180,6 +1181,54 @@ def test_respect_object_existence_on_partitioned_write(started_cluster): assert int(result) == 44 +def test_hive_partitioning_with_one_parameter(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data(f"/column0=Elizabeth/parquet_1", f"Elizabeth\tGordon\n") + assert hdfs_api.read_data(f"/column0=Elizabeth/parquet_1") == f"Elizabeth\tGordon\n" + + r = node1.query( + "SELECT _column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/parquet_1', 'TSV')", + settings={"hdfs_hive_partitioning": 1}, + ) + assert r == f"Elizabeth\n" + + +def test_hive_partitioning_with_two_parameters(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data( + f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n" + ) + assert ( + hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") + == f"Elizabeth\tGordon\n" + ) + + r = node1.query( + "SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", + settings={"hdfs_hive_partitioning": 1}, + ) + assert r == f"Gordon\n" + + +def test_hive_partitioning_without_setting(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data( + f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n" + ) + assert ( + hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") + == f"Elizabeth\tGordon\n" + ) + pattern = re.compile( + r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL + ) + + with pytest.raises(QueryRuntimeException, match=pattern): + node1.query( + f"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", + settings={"hdfs_hive_partitioning": 0}, + ) + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") From 3fb50aa5d8560f3cd5f4fc9b35bfa47a60d2ca80 Mon Sep 17 00:00:00 2001 From: yariks5s Date: Fri, 5 Jul 2024 13:28:47 +0000 Subject: [PATCH 0286/2361] style fix --- src/Storages/ObjectStorage/StorageObjectStorage.cpp | 1 - tests/integration/test_storage_hdfs/test.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 717f48983f3..4b5b514e67d 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -81,7 +81,6 @@ StorageObjectStorage::StorageObjectStorage( metadata.setConstraints(constraints_); metadata.setComment(comment); - if (sample_path.empty() && context->getSettings().use_hive_partitioning) sample_path = getPathSample(metadata, context); else if (!context->getSettings().use_hive_partitioning) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 8071b520a4f..da46756841d 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -1229,6 +1229,7 @@ def test_hive_partitioning_without_setting(started_cluster): settings={"hdfs_hive_partitioning": 0}, ) + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") From 6c4c17f119fb95d66f894a92ce8c91fa6664ff5b Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Fri, 5 Jul 2024 16:44:26 +0200 Subject: [PATCH 0287/2361] remove use_hive_partitioning from query settings --- src/Storages/ObjectStorage/StorageObjectStorage.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index 3fbfc3aacd7..f97d2620fe5 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -43,7 +43,6 @@ public: size_t list_object_keys_size; bool throw_on_zero_files_match; bool ignore_non_existent_file; - bool use_hive_partitioning; }; StorageObjectStorage( From 7843313f8e09eb018a481b4ba70fcf5fc147105e Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 5 Jul 2024 18:20:50 +0200 Subject: [PATCH 0288/2361] Update PlannerJoinTree.h --- src/Planner/PlannerJoinTree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Planner/PlannerJoinTree.h b/src/Planner/PlannerJoinTree.h index bc58e802a09..259622b1d50 100644 --- a/src/Planner/PlannerJoinTree.h +++ b/src/Planner/PlannerJoinTree.h @@ -18,7 +18,7 @@ struct JoinTreeQueryPlan QueryPlan query_plan; QueryProcessingStage::Enum from_stage; std::set used_row_policies{}; - UsefulSets useful_sets; + UsefulSets useful_sets{}; std::unordered_map query_node_to_plan_step_mapping{}; }; From 9064bb1b8389e301f45bc78a5365665292f51c6e Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Fri, 5 Jul 2024 19:36:59 +0200 Subject: [PATCH 0289/2361] fix settings in tests --- .../test_storage_azure_blob_storage/test.py | 12 ++++++------ tests/integration/test_storage_hdfs/test.py | 6 +++--- .../0_stateless/03203_hive_style_partitioning.sh | 12 ++++++------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index f2a1f9e35a9..6966abfee4f 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -1484,7 +1484,7 @@ def test_hive_partitioning_with_one_parameter(cluster): f"blob_path='{path}', format='CSV', structure='{table_format}')" ) assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} + node, query, settings={"use_hive_partitioning": 1} ).splitlines() == [ "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth".format( bucket="cont", max_path=path @@ -1497,7 +1497,7 @@ def test_hive_partitioning_with_one_parameter(cluster): f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" ) assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} + node, query, settings={"use_hive_partitioning": 1} ).splitlines() == ["Gordon"] @@ -1521,7 +1521,7 @@ def test_hive_partitioning_with_two_parameters(cluster): f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" ) assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} + node, query, settings={"use_hive_partitioning": 1} ).splitlines() == [ "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth\tGordon".format( bucket="cont", max_path=path @@ -1534,7 +1534,7 @@ def test_hive_partitioning_with_two_parameters(cluster): f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2;" ) assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} + node, query, settings={"use_hive_partitioning": 1} ).splitlines() == ["Elizabeth"] query = ( @@ -1543,7 +1543,7 @@ def test_hive_partitioning_with_two_parameters(cluster): f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2 AND column1=_column1;" ) assert azure_query( - node, query, settings={"azure_blob_storage_hive_partitioning": 1} + node, query, settings={"use_hive_partitioning": 1} ).splitlines() == ["Elizabeth"] @@ -1571,4 +1571,4 @@ def test_hive_partitioning_without_setting(cluster): ) with pytest.raises(Exception, match=pattern): - azure_query(node, query, settings={"azure_blob_storage_hive_partitioning": 0}) + azure_query(node, query, settings={"use_hive_partitioning": 0}) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index da46756841d..aa3efb8ba4a 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -1188,7 +1188,7 @@ def test_hive_partitioning_with_one_parameter(started_cluster): r = node1.query( "SELECT _column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/parquet_1', 'TSV')", - settings={"hdfs_hive_partitioning": 1}, + settings={"use_hive_partitioning": 1}, ) assert r == f"Elizabeth\n" @@ -1205,7 +1205,7 @@ def test_hive_partitioning_with_two_parameters(started_cluster): r = node1.query( "SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", - settings={"hdfs_hive_partitioning": 1}, + settings={"use_hive_partitioning": 1}, ) assert r == f"Gordon\n" @@ -1226,7 +1226,7 @@ def test_hive_partitioning_without_setting(started_cluster): with pytest.raises(QueryRuntimeException, match=pattern): node1.query( f"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", - settings={"hdfs_hive_partitioning": 0}, + settings={"use_hive_partitioning": 0}, ) diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 98c039f3454..544fd17ffff 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" $CLICKHOUSE_LOCAL -n -q """ -set file_hive_partitioning = 1; +set use_hive_partitioning = 1; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -31,7 +31,7 @@ SELECT *, _non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_exi SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" $CLICKHOUSE_LOCAL -n -q """ -set file_hive_partitioning = 0; +set use_hive_partitioning = 0; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" @@ -41,7 +41,7 @@ $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" $CLICKHOUSE_LOCAL -n -q """ -set url_hive_partitioning = 1; +set use_hive_partitioning = 1; SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -63,7 +63,7 @@ SELECT *, _non_existing_column FROM url('http://localhost:11111/test/partitionin SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" $CLICKHOUSE_LOCAL -n -q """ -set url_hive_partitioning = 0; +set use_hive_partitioning = 0; SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" @@ -73,7 +73,7 @@ $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" $CLICKHOUSE_LOCAL -n -q """ -set s3_hive_partitioning = 1; +set use_hive_partitioning = 1; SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -96,7 +96,7 @@ SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=*/s """ $CLICKHOUSE_LOCAL -n -q """ -set s3_hive_partitioning = 0; +set use_hive_partitioning = 0; SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" From 2257f9a2aee5e8a5c5e178e5f7ccaf269018756a Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 5 Jul 2024 17:49:50 +0000 Subject: [PATCH 0290/2361] Change ActionsDAGPtr to ActionsDAG where possible. --- src/Core/InterpolateDescription.cpp | 6 +- src/Core/InterpolateDescription.h | 7 +- src/Functions/indexHint.h | 6 +- src/Interpreters/ActionsDAG.cpp | 148 +++++++++--------- src/Interpreters/ActionsDAG.h | 46 +++--- src/Interpreters/ActionsVisitor.cpp | 4 +- src/Interpreters/ExpressionActions.cpp | 19 ++- src/Interpreters/ExpressionActions.h | 8 +- src/Interpreters/ExpressionAnalyzer.cpp | 51 +++--- src/Interpreters/ExpressionAnalyzer.h | 6 +- src/Interpreters/GlobalSubqueriesVisitor.h | 2 +- .../IInterpreterUnionOrSelectQuery.cpp | 12 +- src/Interpreters/InterpreterSelectQuery.cpp | 49 +++--- src/Interpreters/InterpreterSelectQuery.h | 2 +- src/Interpreters/MutationsInterpreter.cpp | 14 +- .../MySQL/InterpretersMySQLDDLQuery.cpp | 2 +- src/Interpreters/TableJoin.cpp | 49 +++--- src/Interpreters/TableJoin.h | 10 +- src/Interpreters/addMissingDefaults.cpp | 24 +-- src/Interpreters/addMissingDefaults.h | 2 +- .../evaluateConstantExpression.cpp | 2 +- src/Interpreters/inplaceBlockConversions.cpp | 16 +- src/Interpreters/inplaceBlockConversions.h | 6 +- src/Planner/Planner.cpp | 46 +++--- src/Planner/PlannerActionsVisitor.cpp | 14 +- src/Planner/PlannerContext.h | 2 +- src/Planner/PlannerJoinTree.cpp | 81 +++++----- src/Planner/PlannerJoins.cpp | 56 +++---- src/Planner/PlannerJoins.h | 10 +- src/Planner/Utils.cpp | 10 +- src/Processors/QueryPlan/AggregatingStep.cpp | 16 +- src/Processors/QueryPlan/CubeStep.cpp | 12 +- .../QueryPlan/DistributedCreateLocalPlan.cpp | 2 +- src/Processors/QueryPlan/ExpressionStep.cpp | 22 +-- src/Processors/QueryPlan/ExpressionStep.h | 11 +- src/Processors/QueryPlan/FilterStep.cpp | 20 +-- src/Processors/QueryPlan/FilterStep.h | 12 +- .../convertOuterJoinToInnerJoin.cpp | 4 +- .../Optimizations/distinctReadInOrder.cpp | 4 +- .../Optimizations/filterPushDown.cpp | 50 +++--- .../Optimizations/liftUpArrayJoin.cpp | 8 +- .../Optimizations/liftUpFunctions.cpp | 6 +- .../QueryPlan/Optimizations/liftUpUnion.cpp | 2 +- .../Optimizations/mergeExpressions.cpp | 45 +++--- .../Optimizations/optimizePrewhere.cpp | 19 +-- .../optimizePrimaryKeyConditionAndLimit.cpp | 6 +- .../Optimizations/optimizeReadInOrder.cpp | 14 +- .../optimizeUseAggregateProjection.cpp | 30 ++-- .../optimizeUseNormalProjection.cpp | 22 +-- .../Optimizations/projectionsCommon.cpp | 14 +- .../Optimizations/projectionsCommon.h | 4 +- .../Optimizations/removeRedundantDistinct.cpp | 12 +- .../Optimizations/removeRedundantSorting.cpp | 4 +- .../QueryPlan/Optimizations/splitFilter.cpp | 14 +- .../useDataParallelAggregation.cpp | 6 +- src/Processors/QueryPlan/PartsSplitter.cpp | 2 +- .../QueryPlan/ReadFromMergeTree.cpp | 39 ++--- src/Processors/QueryPlan/ReadFromMergeTree.h | 8 +- .../QueryPlan/ReadFromSystemNumbersStep.cpp | 2 +- .../QueryPlan/SourceStepWithFilter.cpp | 8 +- .../QueryPlan/SourceStepWithFilter.h | 6 +- src/Processors/QueryPlan/TotalsHavingStep.cpp | 14 +- src/Processors/QueryPlan/TotalsHavingStep.h | 7 +- src/Processors/SourceWithKeyCondition.h | 8 +- .../Transforms/AddingDefaultsTransform.cpp | 2 +- .../Transforms/FillingTransform.cpp | 2 +- src/Storages/Hive/StorageHive.cpp | 24 +-- src/Storages/Hive/StorageHive.h | 10 +- src/Storages/IStorage.cpp | 2 +- src/Storages/IStorage.h | 4 +- src/Storages/KVStorageUtils.cpp | 2 +- src/Storages/KVStorageUtils.h | 2 +- src/Storages/KeyDescription.cpp | 2 +- src/Storages/MergeTree/IMergeTreeReader.cpp | 4 +- src/Storages/MergeTree/KeyCondition.cpp | 4 +- src/Storages/MergeTree/MergeTreeData.cpp | 10 +- src/Storages/MergeTree/MergeTreeData.h | 4 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 4 +- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 38 ++--- src/Storages/MergeTree/MergeTreeIndexSet.h | 6 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 4 +- .../MergeTree/MergeTreeSequentialSource.cpp | 8 +- .../MergeTree/MergeTreeSequentialSource.h | 2 +- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 2 +- .../MergeTree/MergeTreeWhereOptimizer.cpp | 4 +- .../MergeTree/MergeTreeWhereOptimizer.h | 2 +- src/Storages/MergeTree/RPNBuilder.cpp | 4 +- .../StorageObjectStorageSource.cpp | 4 +- .../StorageObjectStorageSource.h | 2 +- .../ReadFinalForExternalReplicaStorage.cpp | 2 +- src/Storages/SelectQueryInfo.h | 12 +- src/Storages/StorageBuffer.cpp | 12 +- src/Storages/StorageDistributed.cpp | 4 +- src/Storages/StorageFile.cpp | 4 +- src/Storages/StorageFile.h | 2 +- src/Storages/StorageMaterializedView.cpp | 4 +- src/Storages/StorageMerge.cpp | 30 ++-- src/Storages/StorageMerge.h | 2 +- src/Storages/StorageMergeTree.cpp | 2 +- src/Storages/StorageMergeTree.h | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.h | 2 +- src/Storages/StorageTableFunction.h | 2 +- src/Storages/StorageURL.h | 4 +- src/Storages/StorageValues.cpp | 6 +- src/Storages/StorageView.cpp | 6 +- .../System/StorageSystemStackTrace.cpp | 4 +- src/Storages/TTLDescription.cpp | 8 +- src/Storages/VirtualColumnUtils.cpp | 12 +- src/Storages/WindowView/StorageWindowView.cpp | 2 +- 110 files changed, 718 insertions(+), 721 deletions(-) diff --git a/src/Core/InterpolateDescription.cpp b/src/Core/InterpolateDescription.cpp index 76bbefdcfd7..86681fdb591 100644 --- a/src/Core/InterpolateDescription.cpp +++ b/src/Core/InterpolateDescription.cpp @@ -13,10 +13,10 @@ namespace DB { - InterpolateDescription::InterpolateDescription(ActionsDAGPtr actions_, const Aliases & aliases) + InterpolateDescription::InterpolateDescription(ActionsDAG actions_, const Aliases & aliases) : actions(std::move(actions_)) { - for (const auto & name_type : actions->getRequiredColumns()) + for (const auto & name_type : actions.getRequiredColumns()) { if (const auto & p = aliases.find(name_type.name); p != aliases.end()) required_columns_map[p->second->getColumnName()] = name_type; @@ -24,7 +24,7 @@ namespace DB required_columns_map[name_type.name] = name_type; } - for (const ColumnWithTypeAndName & column : actions->getResultColumns()) + for (const ColumnWithTypeAndName & column : actions.getResultColumns()) { std::string name = column.name; if (const auto & p = aliases.find(name); p != aliases.end()) diff --git a/src/Core/InterpolateDescription.h b/src/Core/InterpolateDescription.h index 73579aebee4..eeead71d780 100644 --- a/src/Core/InterpolateDescription.h +++ b/src/Core/InterpolateDescription.h @@ -5,21 +5,20 @@ #include #include #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; using Aliases = std::unordered_map; /// Interpolate description struct InterpolateDescription { - explicit InterpolateDescription(ActionsDAGPtr actions, const Aliases & aliases); + explicit InterpolateDescription(ActionsDAG actions, const Aliases & aliases); - ActionsDAGPtr actions; + ActionsDAG actions; std::unordered_map required_columns_map; /// input column name -> {alias, type} std::unordered_set result_columns_set; /// result block columns diff --git a/src/Functions/indexHint.h b/src/Functions/indexHint.h index 8fd7b751760..3ab8a021ae1 100644 --- a/src/Functions/indexHint.h +++ b/src/Functions/indexHint.h @@ -58,11 +58,11 @@ public: return DataTypeUInt8().createColumnConst(input_rows_count, 1u); } - void setActions(ActionsDAGPtr actions_) { actions = std::move(actions_); } - const ActionsDAGPtr & getActions() const { return actions; } + void setActions(ActionsDAG actions_) { actions = std::move(actions_); } + const ActionsDAG & getActions() const { return actions; } private: - ActionsDAGPtr actions; + ActionsDAG actions; }; } diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index c2626285235..04be9d23c32 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -961,9 +961,9 @@ NameSet ActionsDAG::foldActionsByProjection( } -ActionsDAGPtr ActionsDAG::foldActionsByProjection(const std::unordered_map & new_inputs, const NodeRawConstPtrs & required_outputs) +ActionsDAG ActionsDAG::foldActionsByProjection(const std::unordered_map & new_inputs, const NodeRawConstPtrs & required_outputs) { - auto dag = std::make_unique(); + ActionsDAG dag; std::unordered_map inputs_mapping; std::unordered_map mapping; struct Frame @@ -1003,9 +1003,9 @@ ActionsDAGPtr ActionsDAG::foldActionsByProjection(const std::unordered_mapresult_name != rename->result_name; const auto & input_name = should_rename ? rename->result_name : new_input->result_name; - mapped_input = &dag->addInput(input_name, new_input->result_type); + mapped_input = &dag.addInput(input_name, new_input->result_type); if (should_rename) - mapped_input = &dag->addAlias(*mapped_input, new_input->result_name); + mapped_input = &dag.addAlias(*mapped_input, new_input->result_name); } node = mapped_input; @@ -1034,7 +1034,7 @@ ActionsDAGPtr ActionsDAG::foldActionsByProjection(const std::unordered_mapresult_name, frame.node->result_name); - auto & node = dag->nodes.emplace_back(*frame.node); + auto & node = dag.nodes.emplace_back(*frame.node); for (auto & child : node.children) child = mapping[child]; @@ -1049,8 +1049,8 @@ ActionsDAGPtr ActionsDAG::foldActionsByProjection(const std::unordered_mapresult_name != mapped_output->result_name) - mapped_output = &dag->addAlias(*mapped_output, output->result_name); - dag->outputs.push_back(mapped_output); + mapped_output = &dag.addAlias(*mapped_output, output->result_name); + dag.outputs.push_back(mapped_output); } return dag; @@ -1411,7 +1411,7 @@ const ActionsDAG::Node & ActionsDAG::materializeNode(const Node & node) return addAlias(*func, name); } -ActionsDAGPtr ActionsDAG::makeConvertingActions( +ActionsDAG ActionsDAG::makeConvertingActions( const ColumnsWithTypeAndName & source, const ColumnsWithTypeAndName & result, MatchColumnsMode mode, @@ -1428,7 +1428,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( if (add_casted_columns && mode != MatchColumnsMode::Name) throw Exception(ErrorCodes::LOGICAL_ERROR, "Converting with add_casted_columns supported only for MatchColumnsMode::Name"); - auto actions_dag = std::make_unique(source); + ActionsDAG actions_dag(source); NodeRawConstPtrs projection(num_result_columns); FunctionOverloadResolverPtr func_builder_materialize = std::make_unique(std::make_shared()); @@ -1436,9 +1436,9 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( std::unordered_map> inputs; if (mode == MatchColumnsMode::Name) { - size_t input_nodes_size = actions_dag->inputs.size(); + size_t input_nodes_size = actions_dag.inputs.size(); for (size_t pos = 0; pos < input_nodes_size; ++pos) - inputs[actions_dag->inputs[pos]->result_name].push_back(pos); + inputs[actions_dag.inputs[pos]->result_name].push_back(pos); } for (size_t result_col_num = 0; result_col_num < num_result_columns; ++result_col_num) @@ -1451,7 +1451,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( { case MatchColumnsMode::Position: { - src_node = dst_node = actions_dag->inputs[result_col_num]; + src_node = dst_node = actions_dag.inputs[result_col_num]; break; } @@ -1462,7 +1462,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( { const auto * res_const = typeid_cast(res_elem.column.get()); if (ignore_constant_values && res_const) - src_node = dst_node = &actions_dag->addColumn(res_elem); + src_node = dst_node = &actions_dag.addColumn(res_elem); else throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Cannot find column `{}` in source stream, there are only columns: [{}]", @@ -1470,7 +1470,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( } else { - src_node = dst_node = actions_dag->inputs[input.front()]; + src_node = dst_node = actions_dag.inputs[input.front()]; input.pop_front(); } break; @@ -1483,7 +1483,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( if (const auto * src_const = typeid_cast(dst_node->column.get())) { if (ignore_constant_values) - dst_node = &actions_dag->addColumn(res_elem); + dst_node = &actions_dag.addColumn(res_elem); else if (res_const->getField() != src_const->getField()) throw Exception( ErrorCodes::ILLEGAL_COLUMN, @@ -1505,7 +1505,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( column.column = DataTypeString().createColumnConst(0, column.name); column.type = std::make_shared(); - const auto * right_arg = &actions_dag->addColumn(std::move(column)); + const auto * right_arg = &actions_dag.addColumn(std::move(column)); const auto * left_arg = dst_node; CastDiagnostic diagnostic = {dst_node->result_name, res_elem.name}; @@ -1513,13 +1513,13 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( = createInternalCastOverloadResolver(CastType::nonAccurate, std::move(diagnostic)); NodeRawConstPtrs children = { left_arg, right_arg }; - dst_node = &actions_dag->addFunction(func_builder_cast, std::move(children), {}); + dst_node = &actions_dag.addFunction(func_builder_cast, std::move(children), {}); } if (dst_node->column && isColumnConst(*dst_node->column) && !(res_elem.column && isColumnConst(*res_elem.column))) { NodeRawConstPtrs children = {dst_node}; - dst_node = &actions_dag->addFunction(func_builder_materialize, std::move(children), {}); + dst_node = &actions_dag.addFunction(func_builder_materialize, std::move(children), {}); } if (dst_node->result_name != res_elem.name) @@ -1538,7 +1538,7 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( } else { - dst_node = &actions_dag->addAlias(*dst_node, res_elem.name); + dst_node = &actions_dag.addAlias(*dst_node, res_elem.name); projection[result_col_num] = dst_node; } } @@ -1548,36 +1548,36 @@ ActionsDAGPtr ActionsDAG::makeConvertingActions( } } - actions_dag->outputs.swap(projection); - actions_dag->removeUnusedActions(false); + actions_dag.outputs.swap(projection); + actions_dag.removeUnusedActions(false); return actions_dag; } -ActionsDAGPtr ActionsDAG::makeAddingColumnActions(ColumnWithTypeAndName column) +ActionsDAG ActionsDAG::makeAddingColumnActions(ColumnWithTypeAndName column) { - auto adding_column_action = std::make_unique(); + ActionsDAG adding_column_action; FunctionOverloadResolverPtr func_builder_materialize = std::make_unique(std::make_shared()); auto column_name = column.name; - const auto * column_node = &adding_column_action->addColumn(std::move(column)); + const auto * column_node = &adding_column_action.addColumn(std::move(column)); NodeRawConstPtrs inputs = {column_node}; - const auto & function_node = adding_column_action->addFunction(func_builder_materialize, std::move(inputs), {}); - const auto & alias_node = adding_column_action->addAlias(function_node, std::move(column_name)); + const auto & function_node = adding_column_action.addFunction(func_builder_materialize, std::move(inputs), {}); + const auto & alias_node = adding_column_action.addAlias(function_node, std::move(column_name)); - adding_column_action->outputs.push_back(&alias_node); + adding_column_action.outputs.push_back(&alias_node); return adding_column_action; } -ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second) +ActionsDAG ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second) { first.mergeInplace(std::move(second)); /// Some actions could become unused. Do not drop inputs to preserve the header. first.removeUnusedActions(false); - return std::make_unique(std::move(first)); + return std::move(first); } void ActionsDAG::mergeInplace(ActionsDAG && second) @@ -1970,15 +1970,15 @@ ActionsDAG::SplitResult ActionsDAG::split(std::unordered_set split second_inputs.push_back(cur.to_second); } - auto first_actions = std::make_unique(); - first_actions->nodes.swap(first_nodes); - first_actions->outputs.swap(first_outputs); - first_actions->inputs.swap(first_inputs); + ActionsDAG first_actions; + first_actions.nodes.swap(first_nodes); + first_actions.outputs.swap(first_outputs); + first_actions.inputs.swap(first_inputs); - auto second_actions = std::make_unique(); - second_actions->nodes.swap(second_nodes); - second_actions->outputs.swap(second_outputs); - second_actions->inputs.swap(second_inputs); + ActionsDAG second_actions; + second_actions.nodes.swap(second_nodes); + second_actions.outputs.swap(second_outputs); + second_actions.inputs.swap(second_inputs); std::unordered_map split_nodes_mapping; if (create_split_nodes_mapping) @@ -2098,7 +2098,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBySortingDescription(const NameS return res; } -bool ActionsDAG::isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header) +bool ActionsDAG::isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header) const { const auto * filter_node = tryFindInOutputs(filter_name); if (!filter_node) @@ -2122,7 +2122,7 @@ bool ActionsDAG::isFilterAlwaysFalseForDefaultValueInputs(const std::string & fi input_node_name_to_default_input_column.emplace(input->result_name, std::move(constant_column_with_type_and_name)); } - ActionsDAGPtr filter_with_default_value_inputs; + std::optional filter_with_default_value_inputs; try { @@ -2304,12 +2304,12 @@ ColumnsWithTypeAndName prepareFunctionArguments(const ActionsDAG::NodeRawConstPt /// /// Result actions add single column with conjunction result (it is always first in outputs). /// No other columns are added or removed. -ActionsDAGPtr ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjunction, const ColumnsWithTypeAndName & all_inputs) +std::optional ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjunction, const ColumnsWithTypeAndName & all_inputs) { if (conjunction.empty()) - return nullptr; + return {}; - auto actions = std::make_unique(); + ActionsDAG actions; FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); @@ -2350,7 +2350,7 @@ ActionsDAGPtr ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjuncti if (cur.next_child_to_visit == cur.node->children.size()) { - auto & node = actions->nodes.emplace_back(*cur.node); + auto & node = actions.nodes.emplace_back(*cur.node); nodes_mapping[cur.node] = &node; for (auto & child : node.children) @@ -2373,33 +2373,33 @@ ActionsDAGPtr ActionsDAG::createActionsForConjunction(NodeRawConstPtrs conjuncti for (const auto * predicate : conjunction) args.emplace_back(nodes_mapping[predicate]); - result_predicate = &actions->addFunction(func_builder_and, std::move(args), {}); + result_predicate = &actions.addFunction(func_builder_and, std::move(args), {}); } - actions->outputs.push_back(result_predicate); + actions.outputs.push_back(result_predicate); for (const auto & col : all_inputs) { const Node * input; auto & list = required_inputs[col.name]; if (list.empty()) - input = &actions->addInput(col); + input = &actions.addInput(col); else { input = list.front(); list.pop_front(); - actions->inputs.push_back(input); + actions.inputs.push_back(input); } /// We should not add result_predicate into the outputs for the second time. if (input->result_name != result_predicate->result_name) - actions->outputs.push_back(input); + actions.outputs.push_back(input); } return actions; } -ActionsDAGPtr ActionsDAG::splitActionsForFilterPushDown( +std::optional ActionsDAG::splitActionsForFilterPushDown( const std::string & filter_name, bool removes_filter, const Names & available_inputs, @@ -2415,7 +2415,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilterPushDown( /// If condition is constant let's do nothing. /// It means there is nothing to push down or optimization was already applied. if (predicate->type == ActionType::COLUMN) - return nullptr; + return {}; std::unordered_set allowed_nodes; @@ -2439,7 +2439,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilterPushDown( auto conjunction = getConjunctionNodes(predicate, allowed_nodes); if (conjunction.allowed.empty()) - return nullptr; + return {}; chassert(predicate->result_type); @@ -2451,13 +2451,13 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilterPushDown( && !conjunction.rejected.front()->result_type->equals(*predicate->result_type)) { /// No further optimization can be done - return nullptr; + return {}; } } auto actions = createActionsForConjunction(conjunction.allowed, all_inputs); if (!actions) - return nullptr; + return {}; /// Now, when actions are created, update the current DAG. removeUnusedConjunctions(std::move(conjunction.rejected), predicate, removes_filter); @@ -2562,11 +2562,11 @@ ActionsDAG::ActionsForJOINFilterPushDown ActionsDAG::splitActionsForJOINFilterPu auto left_stream_filter_to_push_down = createActionsForConjunction(left_stream_allowed_conjunctions, left_stream_header.getColumnsWithTypeAndName()); auto right_stream_filter_to_push_down = createActionsForConjunction(right_stream_allowed_conjunctions, right_stream_header.getColumnsWithTypeAndName()); - auto replace_equivalent_columns_in_filter = [](const ActionsDAGPtr & filter, + auto replace_equivalent_columns_in_filter = [](const ActionsDAG & filter, const Block & stream_header, const std::unordered_map & columns_to_replace) { - auto updated_filter = ActionsDAG::buildFilterActionsDAG({filter->getOutputs()[0]}, columns_to_replace); + auto updated_filter = ActionsDAG::buildFilterActionsDAG({filter.getOutputs()[0]}, columns_to_replace); chassert(updated_filter->getOutputs().size() == 1); /** If result filter to left or right stream has column that is one of the stream inputs, we need distinguish filter column from @@ -2587,7 +2587,7 @@ ActionsDAG::ActionsForJOINFilterPushDown ActionsDAG::splitActionsForJOINFilterPu for (const auto & input : updated_filter->getInputs()) updated_filter_inputs[input->result_name].push_back(input); - for (const auto & input : filter->getInputs()) + for (const auto & input : filter.getInputs()) { if (updated_filter_inputs.contains(input->result_name)) continue; @@ -2625,12 +2625,12 @@ ActionsDAG::ActionsForJOINFilterPushDown ActionsDAG::splitActionsForJOINFilterPu }; if (left_stream_filter_to_push_down) - left_stream_filter_to_push_down = replace_equivalent_columns_in_filter(left_stream_filter_to_push_down, + left_stream_filter_to_push_down = replace_equivalent_columns_in_filter(*left_stream_filter_to_push_down, left_stream_header, equivalent_right_stream_column_to_left_stream_column); if (right_stream_filter_to_push_down) - right_stream_filter_to_push_down = replace_equivalent_columns_in_filter(right_stream_filter_to_push_down, + right_stream_filter_to_push_down = replace_equivalent_columns_in_filter(*right_stream_filter_to_push_down, right_stream_header, equivalent_left_stream_column_to_right_stream_column); @@ -2859,13 +2859,13 @@ bool ActionsDAG::isSortingPreserved( return true; } -ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( +std::optional ActionsDAG::buildFilterActionsDAG( const NodeRawConstPtrs & filter_nodes, const std::unordered_map & node_name_to_input_node_column, bool single_output_condition_node) { if (filter_nodes.empty()) - return nullptr; + return {}; struct Frame { @@ -2873,7 +2873,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( bool visited_children = false; }; - auto result_dag = std::make_unique(); + ActionsDAG result_dag; std::unordered_map result_inputs; std::unordered_map node_to_result_node; @@ -2904,7 +2904,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( { auto & result_input = result_inputs[input_node_it->second.name]; if (!result_input) - result_input = &result_dag->addInput(input_node_it->second); + result_input = &result_dag.addInput(input_node_it->second); node_to_result_node.emplace(node, result_input); nodes_to_process.pop_back(); @@ -2931,25 +2931,25 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( { auto & result_input = result_inputs[node->result_name]; if (!result_input) - result_input = &result_dag->addInput({node->column, node->result_type, node->result_name}); + result_input = &result_dag.addInput({node->column, node->result_type, node->result_name}); result_node = result_input; break; } case ActionsDAG::ActionType::COLUMN: { - result_node = &result_dag->addColumn({node->column, node->result_type, node->result_name}); + result_node = &result_dag.addColumn({node->column, node->result_type, node->result_name}); break; } case ActionsDAG::ActionType::ALIAS: { const auto * child = node->children.front(); - result_node = &result_dag->addAlias(*(node_to_result_node.find(child)->second), node->result_name); + result_node = &result_dag.addAlias(*(node_to_result_node.find(child)->second), node->result_name); break; } case ActionsDAG::ActionType::ARRAY_JOIN: { const auto * child = node->children.front(); - result_node = &result_dag->addArrayJoin(*(node_to_result_node.find(child)->second), {}); + result_node = &result_dag.addArrayJoin(*(node_to_result_node.find(child)->second), {}); break; } case ActionsDAG::ActionType::FUNCTION: @@ -2967,13 +2967,11 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( { if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { - ActionsDAGPtr index_hint_filter_dag; - const auto & index_hint_args = index_hint->getActions()->getOutputs(); + ActionsDAG index_hint_filter_dag; + const auto & index_hint_args = index_hint->getActions().getOutputs(); - if (index_hint_args.empty()) - index_hint_filter_dag = std::make_unique(); - else - index_hint_filter_dag = buildFilterActionsDAG(index_hint_args, + if (!index_hint_args.empty()) + index_hint_filter_dag = *buildFilterActionsDAG(index_hint_args, node_name_to_input_node_column, false /*single_output_condition_node*/); @@ -2995,7 +2993,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( auto [arguments, all_const] = getFunctionArguments(function_children); auto function_base = function_overload_resolver ? function_overload_resolver->build(arguments) : node->function_base; - result_node = &result_dag->addFunctionImpl( + result_node = &result_dag.addFunctionImpl( function_base, std::move(function_children), std::move(arguments), @@ -3010,7 +3008,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( nodes_to_process.pop_back(); } - auto & result_dag_outputs = result_dag->getOutputs(); + auto & result_dag_outputs = result_dag.getOutputs(); result_dag_outputs.reserve(filter_nodes_size); for (const auto & node : filter_nodes) @@ -3019,7 +3017,7 @@ ActionsDAGPtr ActionsDAG::buildFilterActionsDAG( if (result_dag_outputs.size() > 1 && single_output_condition_node) { FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - result_dag_outputs = { &result_dag->addFunction(func_builder_and, result_dag_outputs, {}) }; + result_dag_outputs = { &result_dag.addFunction(func_builder_and, result_dag_outputs, {}) }; } return result_dag; diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 7ca3d1c1b0d..cf6a91b9fe7 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -247,7 +247,7 @@ public: /// c * d e /// \ / /// c * d - e - static ActionsDAGPtr foldActionsByProjection( + static ActionsDAG foldActionsByProjection( const std::unordered_map & new_inputs, const NodeRawConstPtrs & required_outputs); @@ -303,7 +303,7 @@ public: /// @param ignore_constant_values - Do not check that constants are same. Use value from result_header. /// @param add_casted_columns - Create new columns with converted values instead of replacing original. /// @param new_names - Output parameter for new column names when add_casted_columns is used. - static ActionsDAGPtr makeConvertingActions( + static ActionsDAG makeConvertingActions( const ColumnsWithTypeAndName & source, const ColumnsWithTypeAndName & result, MatchColumnsMode mode, @@ -312,13 +312,13 @@ public: NameToNameMap * new_names = nullptr); /// Create expression which add const column and then materialize it. - static ActionsDAGPtr makeAddingColumnActions(ColumnWithTypeAndName column); + static ActionsDAG makeAddingColumnActions(ColumnWithTypeAndName column); /// Create ActionsDAG which represents expression equivalent to applying first and second actions consequently. /// Is used to replace `(first -> second)` expression chain to single `merge(first, second)` expression. /// If first.settings.project_input is set, then outputs of `first` must include inputs of `second`. /// Otherwise, any two actions may be combined. - static ActionsDAGPtr merge(ActionsDAG && first, ActionsDAG && second); + static ActionsDAG merge(ActionsDAG && first, ActionsDAG && second); /// The result is similar to merge(*this, second); /// Invariant : no nodes are removed from the first (this) DAG. @@ -329,12 +329,7 @@ public: /// *out_outputs is filled with pointers to the nodes corresponding to second.getOutputs(). void mergeNodes(ActionsDAG && second, NodeRawConstPtrs * out_outputs = nullptr); - struct SplitResult - { - ActionsDAGPtr first; - ActionsDAGPtr second; - std::unordered_map split_nodes_mapping; - }; + struct SplitResult; /// Split ActionsDAG into two DAGs, where first part contains all nodes from split_nodes and their children. /// Execution of first then second parts on block is equivalent to execution of initial DAG. @@ -362,7 +357,7 @@ public: * @param filter_name - name of filter node in current DAG. * @param input_stream_header - input stream header. */ - bool isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header); + bool isFilterAlwaysFalseForDefaultValueInputs(const std::string & filter_name, const Block & input_stream_header) const; /// Create actions which may calculate part of filter using only available_inputs. /// If nothing may be calculated, returns nullptr. @@ -381,19 +376,13 @@ public: /// columns will be transformed like `x, y, z` -> `z > 0, z, x, y` -(remove filter)-> `z, x, y`. /// To avoid it, add inputs from `all_inputs` list, /// so actions `x, y, z -> z > 0, x, y, z` -(remove filter)-> `x, y, z` will not change columns order. - ActionsDAGPtr splitActionsForFilterPushDown( + std::optional splitActionsForFilterPushDown( const std::string & filter_name, bool removes_filter, const Names & available_inputs, const ColumnsWithTypeAndName & all_inputs); - struct ActionsForJOINFilterPushDown - { - ActionsDAGPtr left_stream_filter_to_push_down; - bool left_stream_filter_removes_filter; - ActionsDAGPtr right_stream_filter_to_push_down; - bool right_stream_filter_removes_filter; - }; + struct ActionsForJOINFilterPushDown; /** Split actions for JOIN filter push down. * @@ -440,7 +429,7 @@ public: * * If single_output_condition_node = false, result dag has multiple output nodes. */ - static ActionsDAGPtr buildFilterActionsDAG( + static std::optional buildFilterActionsDAG( const NodeRawConstPtrs & filter_nodes, const std::unordered_map & node_name_to_input_node_column = {}, bool single_output_condition_node = true); @@ -472,11 +461,26 @@ private: void compileFunctions(size_t min_count_to_compile_expression, const std::unordered_set & lazy_executed_nodes = {}); #endif - static ActionsDAGPtr createActionsForConjunction(NodeRawConstPtrs conjunction, const ColumnsWithTypeAndName & all_inputs); + static std::optional createActionsForConjunction(NodeRawConstPtrs conjunction, const ColumnsWithTypeAndName & all_inputs); void removeUnusedConjunctions(NodeRawConstPtrs rejected_conjunctions, Node * predicate, bool removes_filter); }; +struct ActionsDAG::SplitResult +{ + ActionsDAG first; + ActionsDAG second; + std::unordered_map split_nodes_mapping; +}; + +struct ActionsDAG::ActionsForJOINFilterPushDown +{ + std::optional left_stream_filter_to_push_down; + bool left_stream_filter_removes_filter; + std::optional right_stream_filter_to_push_down; + bool right_stream_filter_removes_filter; +}; + class FindOriginalNodeForOutputName { using NameToNodeIndex = std::unordered_map; diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 036b5ba9be0..c2dcdcd34e7 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -1022,7 +1022,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & dag.project(args); auto index_hint = std::make_shared(); - index_hint->setActions(std::make_unique(std::move(dag))); + index_hint->setActions(std::move(dag)); // Arguments are removed. We add function instead of constant column to avoid constant folding. data.addFunction(std::make_unique(index_hint), {}, column_name); @@ -1285,7 +1285,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & lambda_dag.removeUnusedActions(Names(1, result_name)); auto lambda_actions = std::make_shared( - std::make_unique(std::move(lambda_dag)), + std::move(lambda_dag), ExpressionActionsSettings::fromContext(data.getContext(), CompileExpressions::yes)); DataTypePtr result_type = lambda_actions->getSampleBlock().getByName(result_name).type; diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 2eca31fc75e..399f4f2ff4f 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -49,14 +49,13 @@ namespace ErrorCodes static std::unordered_set processShortCircuitFunctions(const ActionsDAG & actions_dag, ShortCircuitFunctionEvaluation short_circuit_function_evaluation); -ExpressionActions::ExpressionActions(ActionsDAGPtr actions_dag_, const ExpressionActionsSettings & settings_, bool project_inputs_) - : project_inputs(project_inputs_) +ExpressionActions::ExpressionActions(ActionsDAG actions_dag_, const ExpressionActionsSettings & settings_, bool project_inputs_) + : actions_dag(std::move(actions_dag_)) + , project_inputs(project_inputs_) , settings(settings_) { - actions_dag = ActionsDAG::clone(actions_dag_); - /// It's important to determine lazy executed nodes before compiling expressions. - std::unordered_set lazy_executed_nodes = processShortCircuitFunctions(*actions_dag, settings.short_circuit_function_evaluation); + std::unordered_set lazy_executed_nodes = processShortCircuitFunctions(actions_dag, settings.short_circuit_function_evaluation); #if USE_EMBEDDED_COMPILER if (settings.can_compile_expressions && settings.compile_expressions == CompileExpressions::yes) @@ -68,7 +67,7 @@ ExpressionActions::ExpressionActions(ActionsDAGPtr actions_dag_, const Expressio if (settings.max_temporary_columns && num_columns > settings.max_temporary_columns) throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS, "Too many temporary columns: {}. Maximum: {}", - actions_dag->dumpNames(), settings.max_temporary_columns); + actions_dag.dumpNames(), settings.max_temporary_columns); } ExpressionActionsPtr ExpressionActions::clone() const @@ -76,12 +75,12 @@ ExpressionActionsPtr ExpressionActions::clone() const auto copy = std::make_shared(ExpressionActions()); std::unordered_map copy_map; - copy->actions_dag = ActionsDAG::clone(actions_dag.get(), copy_map); + copy->actions_dag = std::move(*ActionsDAG::clone(&actions_dag, copy_map)); copy->actions = actions; for (auto & action : copy->actions) action.node = copy_map[action.node]; - for (const auto * input : copy->actions_dag->getInputs()) + for (const auto * input : copy->actions_dag.getInputs()) copy->input_positions.emplace(input->result_name, input_positions.at(input->result_name)); copy->num_columns = num_columns; @@ -357,8 +356,8 @@ void ExpressionActions::linearizeActions(const std::unordered_setgetOutputs(); - const auto & inputs = actions_dag->getInputs(); + const auto & outputs = actions_dag.getOutputs(); + const auto & inputs = actions_dag.getInputs(); auto reverse_info = getActionsDAGReverseInfo(nodes, outputs); std::vector data; diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index 63ea989bd5e..6ff39ee07f7 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -70,7 +70,7 @@ public: using NameToInputMap = std::unordered_map>; private: - ActionsDAGPtr actions_dag; + ActionsDAG actions_dag; Actions actions; size_t num_columns = 0; @@ -84,13 +84,13 @@ private: ExpressionActionsSettings settings; public: - explicit ExpressionActions(ActionsDAGPtr actions_dag_, const ExpressionActionsSettings & settings_ = {}, bool project_inputs_ = false); + explicit ExpressionActions(ActionsDAG actions_dag_, const ExpressionActionsSettings & settings_ = {}, bool project_inputs_ = false); ExpressionActions(ExpressionActions &&) = default; ExpressionActions & operator=(ExpressionActions &&) = default; const Actions & getActions() const { return actions; } - const std::list & getNodes() const { return actions_dag->getNodes(); } - const ActionsDAG & getActionsDAG() const { return *actions_dag; } + const std::list & getNodes() const { return actions_dag.getNodes(); } + const ActionsDAG & getActionsDAG() const { return actions_dag; } const ColumnNumbers & getResultPositions() const { return result_positions; } const ExpressionActionsSettings & getSettings() const { return settings; } diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 6b49365b492..068b6f290fa 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -928,7 +928,7 @@ JoinPtr SelectQueryExpressionAnalyzer::appendJoin( { const ColumnsWithTypeAndName & left_sample_columns = chain.getLastStep().getResultColumns(); - ActionsDAGPtr converting_actions; + std::optional converting_actions; JoinPtr join = makeJoin(*syntax->ast_join, left_sample_columns, converting_actions); if (converting_actions) @@ -1039,7 +1039,7 @@ static std::unique_ptr buildJoinedPlan( /// Actions which need to be calculated on joined block. auto joined_block_actions = analyzed_join.createJoinedBlockActions(context); NamesWithAliases required_columns_with_aliases = analyzed_join.getRequiredColumns( - Block(joined_block_actions->getResultColumns()), joined_block_actions->getRequiredColumns().getNames()); + Block(joined_block_actions.getResultColumns()), joined_block_actions.getRequiredColumns().getNames()); Names original_right_column_names; for (auto & pr : required_columns_with_aliases) @@ -1060,17 +1060,17 @@ static std::unique_ptr buildJoinedPlan( interpreter->buildQueryPlan(*joined_plan); { Block original_right_columns = interpreter->getSampleBlock(); - auto rename_dag = std::make_unique(original_right_columns.getColumnsWithTypeAndName()); + ActionsDAG rename_dag(original_right_columns.getColumnsWithTypeAndName()); for (const auto & name_with_alias : required_columns_with_aliases) { if (name_with_alias.first != name_with_alias.second && original_right_columns.has(name_with_alias.first)) { auto pos = original_right_columns.getPositionByName(name_with_alias.first); - const auto & alias = rename_dag->addAlias(*rename_dag->getInputs()[pos], name_with_alias.second); - rename_dag->getOutputs()[pos] = &alias; + const auto & alias = rename_dag.addAlias(*rename_dag.getInputs()[pos], name_with_alias.second); + rename_dag.getOutputs()[pos] = &alias; } } - rename_dag->appendInputsForUnusedColumns(joined_plan->getCurrentDataStream().header); + rename_dag.appendInputsForUnusedColumns(joined_plan->getCurrentDataStream().header); auto rename_step = std::make_unique(joined_plan->getCurrentDataStream(), std::move(rename_dag)); rename_step->setStepDescription("Rename joined columns"); joined_plan->addStep(std::move(rename_step)); @@ -1130,14 +1130,14 @@ std::shared_ptr tryKeyValueJoin(std::shared_ptr a JoinPtr SelectQueryExpressionAnalyzer::makeJoin( const ASTTablesInSelectQueryElement & join_element, const ColumnsWithTypeAndName & left_columns, - ActionsDAGPtr & left_convert_actions) + std::optional & left_convert_actions) { /// Two JOINs are not supported with the same subquery, but different USINGs. if (joined_plan) throw Exception(ErrorCodes::LOGICAL_ERROR, "Table join was already created for query"); - ActionsDAGPtr right_convert_actions = nullptr; + std::optional right_convert_actions; const auto & analyzed_join = syntax->analyzed_join; @@ -1145,7 +1145,7 @@ JoinPtr SelectQueryExpressionAnalyzer::makeJoin( { auto joined_block_actions = analyzed_join->createJoinedBlockActions(getContext()); NamesWithAliases required_columns_with_aliases = analyzed_join->getRequiredColumns( - Block(joined_block_actions->getResultColumns()), joined_block_actions->getRequiredColumns().getNames()); + Block(joined_block_actions.getResultColumns()), joined_block_actions.getRequiredColumns().getNames()); Names original_right_column_names; for (auto & pr : required_columns_with_aliases) @@ -1162,7 +1162,7 @@ JoinPtr SelectQueryExpressionAnalyzer::makeJoin( std::tie(left_convert_actions, right_convert_actions) = analyzed_join->createConvertingActions(left_columns, right_columns); if (right_convert_actions) { - auto converting_step = std::make_unique(joined_plan->getCurrentDataStream(), right_convert_actions); + auto converting_step = std::make_unique(joined_plan->getCurrentDataStream(), std::move(*right_convert_actions)); converting_step->setStepDescription("Convert joined columns"); joined_plan->addStep(std::move(converting_step)); } @@ -1354,8 +1354,8 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain { for (auto & child : asts) { - auto actions_dag = std::make_unique(columns_after_join); - getRootActions(child, only_types, *actions_dag); + ActionsDAG actions_dag(columns_after_join); + getRootActions(child, only_types, actions_dag); group_by_elements_actions.emplace_back( std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); } @@ -1471,7 +1471,7 @@ void SelectQueryExpressionAnalyzer::appendGroupByModifiers(ActionsDAG & before_a ExpressionActionsChain::Step & step = chain.addStep(before_aggregation.getNamesAndTypesList()); step.required_output = std::move(required_output); - step.actions()->dag = std::move(*ActionsDAG::makeConvertingActions(source_columns, result_columns, ActionsDAG::MatchColumnsMode::Position)); + step.actions()->dag = ActionsDAG::makeConvertingActions(source_columns, result_columns, ActionsDAG::MatchColumnsMode::Position); } void SelectQueryExpressionAnalyzer::appendSelectSkipWindowExpressions(ExpressionActionsChain::Step & step, ASTPtr const & node) @@ -1607,8 +1607,8 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy(Expr { for (const auto & child : select_query->orderBy()->children) { - auto actions_dag = std::make_unique(columns_after_join); - getRootActions(child, only_types, *actions_dag); + ActionsDAG actions_dag(columns_after_join); + getRootActions(child, only_types, actions_dag); order_by_elements_actions.emplace_back( std::make_shared(std::move(actions_dag), ExpressionActionsSettings::fromContext(getContext(), CompileExpressions::yes))); } @@ -1737,7 +1737,7 @@ void ExpressionAnalyzer::appendExpression(ExpressionActionsChain & chain, const step.addRequiredOutput(expr->getColumnName()); } -ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool remove_unused_result) +ActionsDAG ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool remove_unused_result) { ActionsDAG actions_dag(aggregated_columns); NamesWithAliases result_columns; @@ -1789,7 +1789,7 @@ ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool remove_un actions_dag.removeUnusedActions(name_set); } - return std::make_unique(std::move(actions_dag)); + return actions_dag; } ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool remove_unused_result, CompileExpressions compile_expressions) @@ -1798,10 +1798,10 @@ ExpressionActionsPtr ExpressionAnalyzer::getActions(bool add_aliases, bool remov getActionsDAG(add_aliases, remove_unused_result), ExpressionActionsSettings::fromContext(getContext(), compile_expressions), add_aliases && remove_unused_result); } -ActionsDAGPtr ExpressionAnalyzer::getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs) +ActionsDAG ExpressionAnalyzer::getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs) { - auto actions = std::make_unique(constant_inputs); - getRootActions(query, true /* no_makeset_for_subqueries */, *actions, true /* only_consts */); + ActionsDAG actions(constant_inputs); + getRootActions(query, true /* no_makeset_for_subqueries */, actions, true /* only_consts */); return actions; } @@ -1879,8 +1879,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (prewhere_dag_and_flags) { - auto dag = std::make_unique(std::move(prewhere_dag_and_flags->dag)); - prewhere_info = std::make_shared(std::move(dag), query.prewhere()->getColumnName()); + prewhere_info = std::make_shared(std::move(prewhere_dag_and_flags->dag), query.prewhere()->getColumnName()); prewhere_dag_and_flags.reset(); } @@ -1944,7 +1943,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( Block before_prewhere_sample = source_header; if (sanitizeBlock(before_prewhere_sample)) { - auto dag = ActionsDAG::clone(&prewhere_dag_and_flags->dag); + ActionsDAG dag = std::move(*ActionsDAG::clone(&prewhere_dag_and_flags->dag)); ExpressionActions( std::move(dag), ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); @@ -1980,7 +1979,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (sanitizeBlock(before_where_sample)) { ExpressionActions( - ActionsDAG::clone(&before_where->dag), + std::move(*ActionsDAG::clone(&before_where->dag)), ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_where_sample); auto & column_elem @@ -2054,7 +2053,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( auto & step = chain.lastStep(query_analyzer.aggregated_columns); auto & actions = step.actions()->dag; - actions = std::move(*ActionsDAG::merge(std::move(actions), std::move(*converting))); + actions = ActionsDAG::merge(std::move(actions), std::move(converting)); } } @@ -2235,7 +2234,7 @@ void ExpressionAnalysisResult::checkActions() const /// Check that PREWHERE doesn't contain unusual actions. Unusual actions are that can change number of rows. if (hasPrewhere()) { - auto check_actions = [](const ActionsDAGPtr & actions) + auto check_actions = [](const std::optional & actions) { if (actions) for (const auto & node : actions->getNodes()) diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index e44a5891e77..737d36eb504 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -117,12 +117,12 @@ public: /// If add_aliases, only the calculated values in the desired order and add aliases. /// If also remove_unused_result, than only aliases remain in the output block. /// Otherwise, only temporary columns will be deleted from the block. - ActionsDAGPtr getActionsDAG(bool add_aliases, bool remove_unused_result = true); + ActionsDAG getActionsDAG(bool add_aliases, bool remove_unused_result = true); ExpressionActionsPtr getActions(bool add_aliases, bool remove_unused_result = true, CompileExpressions compile_expressions = CompileExpressions::no); /// Get actions to evaluate a constant expression. The function adds constants and applies functions that depend only on constants. /// Does not execute subqueries. - ActionsDAGPtr getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs = {}); + ActionsDAG getConstActionsDAG(const ColumnsWithTypeAndName & constant_inputs = {}); ExpressionActionsPtr getConstActions(const ColumnsWithTypeAndName & constant_inputs = {}); /** Sets that require a subquery to be create. @@ -367,7 +367,7 @@ private: JoinPtr makeJoin( const ASTTablesInSelectQueryElement & join_element, const ColumnsWithTypeAndName & left_columns, - ActionsDAGPtr & left_convert_actions); + std::optional & left_convert_actions); const ASTSelectQuery * getAggregatingQuery() const; diff --git a/src/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h index 64b6eb5dce9..fcf0d591918 100644 --- a/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/src/Interpreters/GlobalSubqueriesVisitor.h @@ -295,7 +295,7 @@ private: { auto joined_block_actions = data.table_join->createJoinedBlockActions(data.getContext()); NamesWithAliases required_columns_with_aliases = data.table_join->getRequiredColumns( - Block(joined_block_actions->getResultColumns()), joined_block_actions->getRequiredColumns().getNames()); + Block(joined_block_actions.getResultColumns()), joined_block_actions.getRequiredColumns().getNames()); for (auto & pr : required_columns_with_aliases) required_columns.push_back(pr.first); diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index fed29b410db..288d06d2220 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -99,16 +99,16 @@ static ASTPtr parseAdditionalPostFilter(const Context & context) "additional filter", settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks); } -static ActionsDAGPtr makeAdditionalPostFilter(ASTPtr & ast, ContextPtr context, const Block & header) +static ActionsDAG makeAdditionalPostFilter(ASTPtr & ast, ContextPtr context, const Block & header) { auto syntax_result = TreeRewriter(context).analyze(ast, header.getNamesAndTypesList()); String result_column_name = ast->getColumnName(); auto dag = ExpressionAnalyzer(ast, syntax_result, context).getActionsDAG(false, false); - const ActionsDAG::Node * result_node = &dag->findInOutputs(result_column_name); - auto & outputs = dag->getOutputs(); + const ActionsDAG::Node * result_node = &dag.findInOutputs(result_column_name); + auto & outputs = dag.getOutputs(); outputs.clear(); - outputs.reserve(dag->getInputs().size() + 1); - for (const auto * node : dag->getInputs()) + outputs.reserve(dag.getInputs().size() + 1); + for (const auto * node : dag.getInputs()) outputs.push_back(node); outputs.push_back(result_node); @@ -126,7 +126,7 @@ void IInterpreterUnionOrSelectQuery::addAdditionalPostFilter(QueryPlan & plan) c return; auto dag = makeAdditionalPostFilter(ast, context, plan.getCurrentDataStream().header); - std::string filter_name = dag->getOutputs().back()->result_name; + std::string filter_name = dag.getOutputs().back()->result_name; auto filter_step = std::make_unique( plan.getCurrentDataStream(), std::move(dag), std::move(filter_name), true); filter_step->setStepDescription("Additional result filter"); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 09a7e440f31..cde6e305005 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -175,7 +175,7 @@ FilterDAGInfoPtr generateFilterActions( /// Using separate expression analyzer to prevent any possible alias injection auto syntax_result = TreeRewriter(context).analyzeSelect(query_ast, TreeRewriterResult({}, storage, storage_snapshot)); SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot, {}, false, {}, prepared_sets); - filter_info->actions = std::make_unique(std::move(analyzer.simpleSelectActions()->dag)); + filter_info->actions = std::move(analyzer.simpleSelectActions()->dag); filter_info->column_name = expr_list->children.at(0)->getColumnName(); filter_info->actions->removeUnusedActions(NameSet{filter_info->column_name}); @@ -938,7 +938,8 @@ bool InterpreterSelectQuery::adjustParallelReplicasAfterAnalysis() } } - query_info_copy.filter_actions_dag = ActionsDAG::buildFilterActionsDAG(added_filter_nodes.nodes); + if (auto filter_actions_dag = ActionsDAG::buildFilterActionsDAG(added_filter_nodes.nodes)) + query_info_copy.filter_actions_dag = std::make_shared(std::move(*filter_actions_dag)); UInt64 rows_to_read = storage_merge_tree->estimateNumberOfRowsToRead(context, storage_snapshot, query_info_copy); /// Note that we treat an estimation of 0 rows as a real estimation size_t number_of_replicas_to_use = rows_to_read / settings.parallel_replicas_min_number_of_rows_per_replica; @@ -973,7 +974,7 @@ void InterpreterSelectQuery::buildQueryPlan(QueryPlan & query_plan) ActionsDAG::MatchColumnsMode::Name, true); - auto converting = std::make_unique(query_plan.getCurrentDataStream(), convert_actions_dag); + auto converting = std::make_unique(query_plan.getCurrentDataStream(), std::move(convert_actions_dag)); query_plan.addStep(std::move(converting)); } @@ -1297,10 +1298,10 @@ static InterpolateDescriptionPtr getInterpolateDescription( auto syntax_result = TreeRewriter(context).analyze(exprs, source_columns); ExpressionAnalyzer analyzer(exprs, syntax_result, context); - ActionsDAGPtr actions = analyzer.getActionsDAG(true); - ActionsDAGPtr conv_dag = ActionsDAG::makeConvertingActions(actions->getResultColumns(), + ActionsDAG actions = analyzer.getActionsDAG(true); + ActionsDAG conv_dag = ActionsDAG::makeConvertingActions(actions.getResultColumns(), result_columns, ActionsDAG::MatchColumnsMode::Position, true); - ActionsDAGPtr merge_dag = ActionsDAG::merge(std::move(* ActionsDAG::clone(actions)), std::move(*conv_dag)); + ActionsDAG merge_dag = ActionsDAG::merge(std::move(actions), std::move(conv_dag)); interpolate_descr = std::make_shared(std::move(merge_dag), aliases); } @@ -1485,7 +1486,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.filter_info->actions, + std::move(*ActionsDAG::clone(&*expressions.filter_info->actions)), expressions.filter_info->column_name, expressions.filter_info->do_remove_column); @@ -1499,7 +1500,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.prewhere_info->row_level_filter, + std::move(*ActionsDAG::clone(&*expressions.prewhere_info->row_level_filter)), expressions.prewhere_info->row_level_column_name, true); @@ -1509,7 +1510,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.prewhere_info->prewhere_actions, + std::move(*ActionsDAG::clone(&*expressions.prewhere_info->prewhere_actions)), expressions.prewhere_info->prewhere_column_name, expressions.prewhere_info->remove_prewhere_column); @@ -1611,7 +1612,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.filter_info->actions, + std::move(*ActionsDAG::clone(&*expressions.filter_info->actions)), expressions.filter_info->column_name, expressions.filter_info->do_remove_column); @@ -1623,7 +1624,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - new_filter_info->actions, + std::move(*ActionsDAG::clone(&*new_filter_info->actions)), new_filter_info->column_name, new_filter_info->do_remove_column); @@ -2045,7 +2046,7 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, - std::make_shared(ActionsDAG::clone(prewhere_info.row_level_filter)), + std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info.row_level_filter))), prewhere_info.row_level_column_name, true); }); } @@ -2053,7 +2054,7 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, std::make_shared(ActionsDAG::clone(prewhere_info.prewhere_actions)), + header, std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info.prewhere_actions))), prewhere_info.prewhere_column_name, prewhere_info.remove_prewhere_column); }); } @@ -2106,7 +2107,7 @@ void InterpreterSelectQuery::applyFiltersToPrewhereInAnalysis(ExpressionAnalysis else { /// Add row level security actions to prewhere. - analysis.prewhere_info->row_level_filter = std::move(analysis.filter_info->actions); + analysis.prewhere_info->row_level_filter = std::move(*analysis.filter_info->actions); analysis.prewhere_info->row_level_column_name = std::move(analysis.filter_info->column_name); analysis.filter_info = nullptr; } @@ -2323,7 +2324,7 @@ std::optional InterpreterSelectQuery::getTrivialCount(UInt64 max_paralle if (!filter_actions_dag) return {}; - return storage->totalRowsByPartitionPredicate(filter_actions_dag, context); + return storage->totalRowsByPartitionPredicate(*filter_actions_dag, context); } } @@ -2573,7 +2574,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc /// Aliases in table declaration. if (processing_stage == QueryProcessingStage::FetchColumns && alias_actions) { - auto table_aliases = std::make_unique(query_plan.getCurrentDataStream(), alias_actions); + auto table_aliases = std::make_unique(query_plan.getCurrentDataStream(), std::move(*ActionsDAG::clone(&*alias_actions))); table_aliases->setStepDescription("Add table aliases"); query_plan.addStep(std::move(table_aliases)); } @@ -2581,9 +2582,9 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression, bool remove_filter) { - auto dag = ActionsDAG::clone(&expression->dag); + auto dag = std::move(*ActionsDAG::clone(&expression->dag)); if (expression->project_input) - dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); auto where_step = std::make_unique( query_plan.getCurrentDataStream(), std::move(dag), getSelectQuery().where()->getColumnName(), remove_filter); @@ -2755,9 +2756,9 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool void InterpreterSelectQuery::executeHaving(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression, bool remove_filter) { - auto dag = ActionsDAG::clone(&expression->dag); + auto dag = std::move(*ActionsDAG::clone(&expression->dag)); if (expression->project_input) - dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); auto having_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(dag), getSelectQuery().having()->getColumnName(), remove_filter); @@ -2770,10 +2771,10 @@ void InterpreterSelectQuery::executeHaving(QueryPlan & query_plan, const Actions void InterpreterSelectQuery::executeTotalsAndHaving( QueryPlan & query_plan, bool has_having, const ActionsAndProjectInputsFlagPtr & expression, bool remove_filter, bool overflow_row, bool final) { - ActionsDAGPtr dag; + std::optional dag; if (expression) { - dag = ActionsDAG::clone(&expression->dag); + dag = std::move(*ActionsDAG::clone(&expression->dag)); if (expression->project_input) dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); } @@ -2822,9 +2823,9 @@ void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const Act if (!expression) return; - auto dag = ActionsDAG::clone(&expression->dag); + ActionsDAG dag = std::move(*ActionsDAG::clone(&expression->dag)); if (expression->project_input) - dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(dag)); diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index d4ed19d45ea..ed6dd8af3b2 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -240,7 +240,7 @@ private: Block source_header; /// Actions to calculate ALIAS if required. - ActionsDAGPtr alias_actions; + std::optional alias_actions; /// The subquery interpreter, if the subquery std::unique_ptr interpreter_subquery; diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 704c5ce7d8b..2372d26e83f 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1197,7 +1197,7 @@ void MutationsInterpreter::Source::read( const auto & names = first_stage.filter_column_names; size_t num_filters = names.size(); - ActionsDAGPtr filter; + std::optional filter; if (!first_stage.filter_column_names.empty()) { ActionsDAG::NodeRawConstPtrs nodes(num_filters); @@ -1278,19 +1278,19 @@ QueryPipelineBuilder MutationsInterpreter::addStreamsForLaterStages(const std::v if (i < stage.filter_column_names.size()) { - auto dag = ActionsDAG::clone(&step->actions()->dag); + auto dag = std::move(*ActionsDAG::clone(&step->actions()->dag)); if (step->actions()->project_input) - dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(plan.getCurrentDataStream().header); /// Execute DELETEs. - plan.addStep(std::make_unique(plan.getCurrentDataStream(), dag, stage.filter_column_names[i], false)); + plan.addStep(std::make_unique(plan.getCurrentDataStream(), std::move(dag), stage.filter_column_names[i], false)); } else { - auto dag = ActionsDAG::clone(&step->actions()->dag); + auto dag = std::move(*ActionsDAG::clone(&step->actions()->dag)); if (step->actions()->project_input) - dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); + dag.appendInputsForUnusedColumns(plan.getCurrentDataStream().header); /// Execute UPDATE or final projection. - plan.addStep(std::make_unique(plan.getCurrentDataStream(), dag)); + plan.addStep(std::make_unique(plan.getCurrentDataStream(), std::move(dag))); } } diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 4821d607d0e..c1d7acf0775 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -182,7 +182,7 @@ static NamesAndTypesList getNames(const ASTFunction & expr, ContextPtr context, ASTPtr temp_ast = expr.clone(); auto syntax = TreeRewriter(context).analyze(temp_ast, columns); - auto required_columns = ExpressionAnalyzer(temp_ast, syntax, context).getActionsDAG(false)->getRequiredColumns(); + auto required_columns = ExpressionAnalyzer(temp_ast, syntax, context).getActionsDAG(false).getRequiredColumns(); return required_columns; } diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index baf3a743f40..c8c926db13c 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -462,19 +462,19 @@ static void makeColumnNameUnique(const ColumnsWithTypeAndName & source_columns, } } -static ActionsDAGPtr createWrapWithTupleActions( +static std::optional createWrapWithTupleActions( const ColumnsWithTypeAndName & source_columns, std::unordered_set && column_names_to_wrap, NameToNameMap & new_names) { if (column_names_to_wrap.empty()) - return nullptr; + return {}; - auto actions_dag = std::make_unique(source_columns); + ActionsDAG actions_dag(source_columns); FunctionOverloadResolverPtr func_builder = std::make_unique(std::make_shared()); - for (const auto * input_node : actions_dag->getInputs()) + for (const auto * input_node : actions_dag.getInputs()) { const auto & column_name = input_node->result_name; auto it = column_names_to_wrap.find(column_name); @@ -485,9 +485,9 @@ static ActionsDAGPtr createWrapWithTupleActions( String node_name = "__wrapNullsafe(" + column_name + ")"; makeColumnNameUnique(source_columns, node_name); - const auto & dst_node = actions_dag->addFunction(func_builder, {input_node}, node_name); + const auto & dst_node = actions_dag.addFunction(func_builder, {input_node}, node_name); new_names[column_name] = dst_node.result_name; - actions_dag->addOrReplaceInOutputs(dst_node); + actions_dag.addOrReplaceInOutputs(dst_node); } if (!column_names_to_wrap.empty()) @@ -537,21 +537,23 @@ std::pair TableJoin::getKeysForNullSafeComparion(const Columns return {left_keys_to_wrap, right_keys_to_wrap}; } -static void mergeDags(ActionsDAGPtr & result_dag, ActionsDAGPtr && new_dag) +static void mergeDags(std::optional & result_dag, std::optional && new_dag) { + if (!new_dag) + return; if (result_dag) result_dag->mergeInplace(std::move(*new_dag)); else result_dag = std::move(new_dag); } -std::pair +std::pair, std::optional> TableJoin::createConvertingActions( const ColumnsWithTypeAndName & left_sample_columns, const ColumnsWithTypeAndName & right_sample_columns) { - ActionsDAGPtr left_dag = nullptr; - ActionsDAGPtr right_dag = nullptr; + std::optional left_dag; + std::optional right_dag; /** If the types are not equal, we need to convert them to a common type. * Example: * SELECT * FROM t1 JOIN t2 ON t1.a = t2.b @@ -693,7 +695,7 @@ void TableJoin::inferJoinKeyCommonType(const LeftNamesAndTypes & left, const Rig } } -static ActionsDAGPtr changeKeyTypes(const ColumnsWithTypeAndName & cols_src, +static std::optional changeKeyTypes(const ColumnsWithTypeAndName & cols_src, const TableJoin::NameToTypeMap & type_mapping, bool add_new_cols, NameToNameMap & key_column_rename) @@ -710,7 +712,7 @@ static ActionsDAGPtr changeKeyTypes(const ColumnsWithTypeAndName & cols_src, } } if (!has_some_to_do) - return nullptr; + return {}; return ActionsDAG::makeConvertingActions( /* source= */ cols_src, @@ -721,7 +723,7 @@ static ActionsDAGPtr changeKeyTypes(const ColumnsWithTypeAndName & cols_src, /* new_names= */ &key_column_rename); } -static ActionsDAGPtr changeTypesToNullable( +static std::optional changeTypesToNullable( const ColumnsWithTypeAndName & cols_src, const NameSet & exception_cols) { @@ -737,7 +739,7 @@ static ActionsDAGPtr changeTypesToNullable( } if (!has_some_to_do) - return nullptr; + return {}; return ActionsDAG::makeConvertingActions( /* source= */ cols_src, @@ -748,29 +750,29 @@ static ActionsDAGPtr changeTypesToNullable( /* new_names= */ nullptr); } -ActionsDAGPtr TableJoin::applyKeyConvertToTable( +std::optional TableJoin::applyKeyConvertToTable( const ColumnsWithTypeAndName & cols_src, const NameToTypeMap & type_mapping, JoinTableSide table_side, NameToNameMap & key_column_rename) { if (type_mapping.empty()) - return nullptr; + return {}; /// Create DAG to convert key columns - ActionsDAGPtr convert_dag = changeKeyTypes(cols_src, type_mapping, !hasUsing(), key_column_rename); + auto convert_dag = changeKeyTypes(cols_src, type_mapping, !hasUsing(), key_column_rename); applyRename(table_side, key_column_rename); return convert_dag; } -ActionsDAGPtr TableJoin::applyNullsafeWrapper( +std::optional TableJoin::applyNullsafeWrapper( const ColumnsWithTypeAndName & cols_src, const NameSet & columns_for_nullsafe_comparison, JoinTableSide table_side, NameToNameMap & key_column_rename) { if (columns_for_nullsafe_comparison.empty()) - return nullptr; + return {}; std::unordered_set column_names_to_wrap; for (const auto & name : columns_for_nullsafe_comparison) @@ -784,7 +786,7 @@ ActionsDAGPtr TableJoin::applyNullsafeWrapper( } /// Create DAG to wrap keys with tuple for null-safe comparison - ActionsDAGPtr null_safe_wrap_dag = createWrapWithTupleActions(cols_src, std::move(column_names_to_wrap), key_column_rename); + auto null_safe_wrap_dag = createWrapWithTupleActions(cols_src, std::move(column_names_to_wrap), key_column_rename); for (auto & clause : clauses) { for (size_t i : clause.nullsafe_compare_key_indexes) @@ -799,7 +801,7 @@ ActionsDAGPtr TableJoin::applyNullsafeWrapper( return null_safe_wrap_dag; } -ActionsDAGPtr TableJoin::applyJoinUseNullsConversion( +std::optional TableJoin::applyJoinUseNullsConversion( const ColumnsWithTypeAndName & cols_src, const NameToNameMap & key_column_rename) { @@ -809,8 +811,7 @@ ActionsDAGPtr TableJoin::applyJoinUseNullsConversion( exclude_columns.insert(it.second); /// Create DAG to make columns nullable if needed - ActionsDAGPtr add_nullable_dag = changeTypesToNullable(cols_src, exclude_columns); - return add_nullable_dag; + return changeTypesToNullable(cols_src, exclude_columns); } void TableJoin::setStorageJoin(std::shared_ptr storage) @@ -957,7 +958,7 @@ bool TableJoin::allowParallelHashJoin() const return true; } -ActionsDAGPtr TableJoin::createJoinedBlockActions(ContextPtr context) const +ActionsDAG TableJoin::createJoinedBlockActions(ContextPtr context) const { ASTPtr expression_list = rightKeysList(); auto syntax_result = TreeRewriter(context).analyze(expression_list, columnsFromJoinedTable()); diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index 8e83233e54c..a057d46b94d 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -202,19 +202,19 @@ private: Names requiredJoinedNames() const; /// Create converting actions and change key column names if required - ActionsDAGPtr applyKeyConvertToTable( + std::optional applyKeyConvertToTable( const ColumnsWithTypeAndName & cols_src, const NameToTypeMap & type_mapping, JoinTableSide table_side, NameToNameMap & key_column_rename); - ActionsDAGPtr applyNullsafeWrapper( + std::optional applyNullsafeWrapper( const ColumnsWithTypeAndName & cols_src, const NameSet & columns_for_nullsafe_comparison, JoinTableSide table_side, NameToNameMap & key_column_rename); - ActionsDAGPtr applyJoinUseNullsConversion( + std::optional applyJoinUseNullsConversion( const ColumnsWithTypeAndName & cols_src, const NameToNameMap & key_column_rename); @@ -264,7 +264,7 @@ public: TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data; } - ActionsDAGPtr createJoinedBlockActions(ContextPtr context) const; + ActionsDAG createJoinedBlockActions(ContextPtr context) const; const std::vector & getEnabledJoinAlgorithms() const { return join_algorithm; } @@ -379,7 +379,7 @@ public: /// Calculate converting actions, rename key columns in required /// For `USING` join we will convert key columns inplace and affect into types in the result table /// For `JOIN ON` we will create new columns with converted keys to join by. - std::pair + std::pair, std::optional> createConvertingActions( const ColumnsWithTypeAndName & left_sample_columns, const ColumnsWithTypeAndName & right_sample_columns); diff --git a/src/Interpreters/addMissingDefaults.cpp b/src/Interpreters/addMissingDefaults.cpp index 929999c8c37..27d79e86622 100644 --- a/src/Interpreters/addMissingDefaults.cpp +++ b/src/Interpreters/addMissingDefaults.cpp @@ -14,15 +14,15 @@ namespace DB { -ActionsDAGPtr addMissingDefaults( +ActionsDAG addMissingDefaults( const Block & header, const NamesAndTypesList & required_columns, const ColumnsDescription & columns, ContextPtr context, bool null_as_default) { - auto actions = std::make_unique(header.getColumnsWithTypeAndName()); - auto & index = actions->getOutputs(); + ActionsDAG actions(header.getColumnsWithTypeAndName()); + auto & index = actions.getOutputs(); /// For missing columns of nested structure, you need to create not a column of empty arrays, but a column of arrays of correct lengths. /// First, remember the offset columns for all arrays in the block. @@ -40,7 +40,7 @@ ActionsDAGPtr addMissingDefaults( if (group.empty()) group.push_back(nullptr); - group.push_back(actions->getInputs()[i]); + group.push_back(actions.getInputs()[i]); } } @@ -62,11 +62,11 @@ ActionsDAGPtr addMissingDefaults( { const auto & nested_type = array_type->getNestedType(); ColumnPtr nested_column = nested_type->createColumnConstWithDefaultValue(0); - const auto & constant = actions->addColumn({nested_column, nested_type, column.name}); + const auto & constant = actions.addColumn({nested_column, nested_type, column.name}); auto & group = nested_groups[offsets_name]; group[0] = &constant; - index.push_back(&actions->addFunction(func_builder_replicate, group, constant.result_name)); + index.push_back(&actions.addFunction(func_builder_replicate, group, constant.result_name)); continue; } @@ -75,17 +75,17 @@ ActionsDAGPtr addMissingDefaults( * it can be full (or the interpreter may decide that it is constant everywhere). */ auto new_column = column.type->createColumnConstWithDefaultValue(0); - const auto * col = &actions->addColumn({new_column, column.type, column.name}); - index.push_back(&actions->materializeNode(*col)); + const auto * col = &actions.addColumn({new_column, column.type, column.name}); + index.push_back(&actions.materializeNode(*col)); } /// Computes explicitly specified values by default and materialized columns. - if (auto dag = evaluateMissingDefaults(actions->getResultColumns(), required_columns, columns, context, true, null_as_default)) - actions = ActionsDAG::merge(std::move(*actions), std::move(*dag)); + if (auto dag = evaluateMissingDefaults(actions.getResultColumns(), required_columns, columns, context, true, null_as_default)) + actions = ActionsDAG::merge(std::move(actions), std::move(*dag)); /// Removes unused columns and reorders result. - actions->removeUnusedActions(required_columns.getNames(), false); - actions->addMaterializingOutputActions(); + actions.removeUnusedActions(required_columns.getNames(), false); + actions.addMaterializingOutputActions(); return actions; } diff --git a/src/Interpreters/addMissingDefaults.h b/src/Interpreters/addMissingDefaults.h index 94afd806dfd..5299bae9745 100644 --- a/src/Interpreters/addMissingDefaults.h +++ b/src/Interpreters/addMissingDefaults.h @@ -24,7 +24,7 @@ using ActionsDAGPtr = std::unique_ptr; * Also can substitute NULL with DEFAULT value in case of INSERT SELECT query (null_as_default) if according setting is 1. * All three types of columns are materialized (not constants). */ -ActionsDAGPtr addMissingDefaults( +ActionsDAG addMissingDefaults( const Block & header, const NamesAndTypesList & required_columns, const ColumnsDescription & columns, ContextPtr context, bool null_as_default = false); } diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 4e1a2bcf5ee..d5d9fce0dbd 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -89,7 +89,7 @@ std::optional evaluateConstantExpressionImpl(c ColumnPtr result_column; DataTypePtr result_type; String result_name = ast->getColumnName(); - for (const auto & action_node : actions->getOutputs()) + for (const auto & action_node : actions.getOutputs()) { if ((action_node->result_name == result_name) && action_node->column) { diff --git a/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp index b000264ae33..62f8aea86d1 100644 --- a/src/Interpreters/inplaceBlockConversions.cpp +++ b/src/Interpreters/inplaceBlockConversions.cpp @@ -152,22 +152,20 @@ ASTPtr convertRequiredExpressions(Block & block, const NamesAndTypesList & requi return conversion_expr_list; } -ActionsDAGPtr createExpressions( +std::optional createExpressions( const Block & header, ASTPtr expr_list, bool save_unneeded_columns, ContextPtr context) { if (!expr_list) - return nullptr; + return {}; auto syntax_result = TreeRewriter(context).analyze(expr_list, header.getNamesAndTypesList()); auto expression_analyzer = ExpressionAnalyzer{expr_list, syntax_result, context}; - auto dag = std::make_unique(header.getNamesAndTypesList()); + ActionsDAG dag(header.getNamesAndTypesList()); auto actions = expression_analyzer.getActionsDAG(true, !save_unneeded_columns); - dag = ActionsDAG::merge(std::move(*dag), std::move(*actions)); - - return dag; + return ActionsDAG::merge(std::move(dag), std::move(actions)); } } @@ -180,7 +178,7 @@ void performRequiredConversions(Block & block, const NamesAndTypesList & require if (auto dag = createExpressions(block, conversion_expr_list, true, context)) { - auto expression = std::make_shared(std::move(dag), ExpressionActionsSettings::fromContext(context)); + auto expression = std::make_shared(std::move(*dag), ExpressionActionsSettings::fromContext(context)); expression->execute(block); } } @@ -195,7 +193,7 @@ bool needConvertAnyNullToDefault(const Block & header, const NamesAndTypesList & return false; } -ActionsDAGPtr evaluateMissingDefaults( +std::optional evaluateMissingDefaults( const Block & header, const NamesAndTypesList & required_columns, const ColumnsDescription & columns, @@ -204,7 +202,7 @@ ActionsDAGPtr evaluateMissingDefaults( bool null_as_default) { if (!columns.hasDefaults() && (!null_as_default || !needConvertAnyNullToDefault(header, required_columns, columns))) - return nullptr; + return {}; ASTPtr expr_list = defaultRequiredExpressions(header, required_columns, columns, null_as_default); return createExpressions(header, expr_list, save_unneeded_columns, context); diff --git a/src/Interpreters/inplaceBlockConversions.h b/src/Interpreters/inplaceBlockConversions.h index ffc77561e79..570eb75dd4a 100644 --- a/src/Interpreters/inplaceBlockConversions.h +++ b/src/Interpreters/inplaceBlockConversions.h @@ -5,9 +5,6 @@ #include #include -#include -#include - namespace DB { @@ -24,12 +21,11 @@ struct StorageInMemoryMetadata; using StorageMetadataPtr = std::shared_ptr; class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; /// Create actions which adds missing defaults to block according to required_columns using columns description /// or substitute NULL into DEFAULT value in case of INSERT SELECT query (null_as_default) if according setting is 1. /// Return nullptr if no actions required. -ActionsDAGPtr evaluateMissingDefaults( +std::optional evaluateMissingDefaults( const Block & header, const NamesAndTypesList & required_columns, const ColumnsDescription & columns, diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 16ee6de73c4..48e42099ce8 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -332,12 +332,12 @@ void addExpressionStep(QueryPlan & query_plan, const std::string & step_description, UsefulSets & useful_sets) { - auto actions = ActionsDAG::clone(&expression_actions->dag); + auto actions = std::move(*ActionsDAG::clone(&expression_actions->dag)); if (expression_actions->project_input) - actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + actions.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); - auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), actions); - appendSetsFromActionsDAG(*expression_step->getExpression(), useful_sets); + auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(actions)); + appendSetsFromActionsDAG(expression_step->getExpression(), useful_sets); expression_step->setStepDescription(step_description); query_plan.addStep(std::move(expression_step)); } @@ -347,15 +347,15 @@ void addFilterStep(QueryPlan & query_plan, const std::string & step_description, UsefulSets & useful_sets) { - auto actions = ActionsDAG::clone(&filter_analysis_result.filter_actions->dag); + auto actions = std::move(*ActionsDAG::clone(&filter_analysis_result.filter_actions->dag)); if (filter_analysis_result.filter_actions->project_input) - actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); + actions.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); auto where_step = std::make_unique(query_plan.getCurrentDataStream(), - actions, + std::move(actions), filter_analysis_result.filter_column_name, filter_analysis_result.remove_filter_column); - appendSetsFromActionsDAG(*where_step->getExpression(), useful_sets); + appendSetsFromActionsDAG(where_step->getExpression(), useful_sets); where_step->setStepDescription(step_description); query_plan.addStep(std::move(where_step)); } @@ -552,10 +552,10 @@ void addTotalsHavingStep(QueryPlan & query_plan, const auto & having_analysis_result = expression_analysis_result.getHaving(); bool need_finalize = !query_node.isGroupByWithRollup() && !query_node.isGroupByWithCube(); - ActionsDAGPtr actions; + std::optional actions; if (having_analysis_result.filter_actions) { - actions = ActionsDAG::clone(&having_analysis_result.filter_actions->dag); + actions = std::move(*ActionsDAG::clone(&having_analysis_result.filter_actions->dag)); if (having_analysis_result.filter_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); } @@ -564,7 +564,7 @@ void addTotalsHavingStep(QueryPlan & query_plan, query_plan.getCurrentDataStream(), aggregation_analysis_result.aggregate_descriptions, query_analysis_result.aggregate_overflow_row, - actions, + std::move(actions), having_analysis_result.filter_column_name, having_analysis_result.remove_filter_column, settings.totals_mode, @@ -715,13 +715,13 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, if (query_node.hasInterpolate()) { - auto interpolate_actions_dag = std::make_unique(); + ActionsDAG interpolate_actions_dag; auto query_plan_columns = query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); for (auto & query_plan_column : query_plan_columns) { /// INTERPOLATE actions dag input columns must be non constant query_plan_column.column = nullptr; - interpolate_actions_dag->addInput(query_plan_column); + interpolate_actions_dag.addInput(query_plan_column); } auto & interpolate_list_node = query_node.getInterpolate()->as(); @@ -729,12 +729,12 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, if (interpolate_list_nodes.empty()) { - for (const auto * input_node : interpolate_actions_dag->getInputs()) + for (const auto * input_node : interpolate_actions_dag.getInputs()) { if (column_names_with_fill.contains(input_node->result_name)) continue; - interpolate_actions_dag->getOutputs().push_back(input_node); + interpolate_actions_dag.getOutputs().push_back(input_node); } } else @@ -744,12 +744,12 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, auto & interpolate_node_typed = interpolate_node->as(); PlannerActionsVisitor planner_actions_visitor(planner_context); - auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag, + auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(interpolate_actions_dag, interpolate_node_typed.getExpression()); if (expression_to_interpolate_expression_nodes.size() != 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expression to interpolate expected to have single action node"); - auto interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag, + auto interpolate_expression_nodes = planner_actions_visitor.visit(interpolate_actions_dag, interpolate_node_typed.getInterpolateExpression()); if (interpolate_expression_nodes.size() != 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Interpolate expression expected to have single action node"); @@ -760,16 +760,16 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, const auto * interpolate_expression = interpolate_expression_nodes[0]; if (!interpolate_expression->result_type->equals(*expression_to_interpolate->result_type)) { - interpolate_expression = &interpolate_actions_dag->addCast(*interpolate_expression, + interpolate_expression = &interpolate_actions_dag.addCast(*interpolate_expression, expression_to_interpolate->result_type, interpolate_expression->result_name); } - const auto * alias_node = &interpolate_actions_dag->addAlias(*interpolate_expression, expression_to_interpolate_name); - interpolate_actions_dag->getOutputs().push_back(alias_node); + const auto * alias_node = &interpolate_actions_dag.addAlias(*interpolate_expression, expression_to_interpolate_name); + interpolate_actions_dag.getOutputs().push_back(alias_node); } - interpolate_actions_dag->removeUnusedActions(); + interpolate_actions_dag.removeUnusedActions(); } Aliases empty_aliases; @@ -1130,7 +1130,7 @@ void addAdditionalFilterStepIfNeeded(QueryPlan & query_plan, return; auto filter_step = std::make_unique(query_plan.getCurrentDataStream(), - filter_info.actions, + std::move(*filter_info.actions), filter_info.column_name, filter_info.do_remove_column); filter_step->setStepDescription("additional result filter"); @@ -1418,7 +1418,7 @@ void Planner::buildPlanForQueryNode() if (it != table_filters.end()) { const auto & filters = it->second; - table_expression_data.setFilterActions(ActionsDAG::clone(filters.filter_actions)); + table_expression_data.setFilterActions(ActionsDAG::clone(&*filters.filter_actions)); table_expression_data.setPrewhereInfo(filters.prewhere_info); } } diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 59ec7778e21..4c0c9bc7937 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -757,12 +757,12 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi lambda_arguments_names_and_types.emplace_back(lambda_argument_name, std::move(lambda_argument_type)); } - auto lambda_actions_dag = std::make_unique(); - actions_stack.emplace_back(*lambda_actions_dag, node); + ActionsDAG lambda_actions_dag; + actions_stack.emplace_back(lambda_actions_dag, node); auto [lambda_expression_node_name, levels] = visitImpl(lambda_node.getExpression()); - lambda_actions_dag->getOutputs().push_back(actions_stack.back().getNodeOrThrow(lambda_expression_node_name)); - lambda_actions_dag->removeUnusedActions(Names(1, lambda_expression_node_name)); + lambda_actions_dag.getOutputs().push_back(actions_stack.back().getNodeOrThrow(lambda_expression_node_name)); + lambda_actions_dag.removeUnusedActions(Names(1, lambda_expression_node_name)); auto expression_actions_settings = ExpressionActionsSettings::fromContext(planner_context->getQueryContext(), CompileExpressions::yes); auto lambda_actions = std::make_shared(std::move(lambda_actions_dag), expression_actions_settings); @@ -879,14 +879,14 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi const auto & function_node = node->as(); auto function_node_name = action_node_name_helper.calculateActionNodeName(node); - auto index_hint_actions_dag = std::make_unique(); - auto & index_hint_actions_dag_outputs = index_hint_actions_dag->getOutputs(); + ActionsDAG index_hint_actions_dag; + auto & index_hint_actions_dag_outputs = index_hint_actions_dag.getOutputs(); std::unordered_set index_hint_actions_dag_output_node_names; PlannerActionsVisitor actions_visitor(planner_context); for (const auto & argument : function_node.getArguments()) { - auto index_hint_argument_expression_dag_nodes = actions_visitor.visit(*index_hint_actions_dag, argument); + auto index_hint_argument_expression_dag_nodes = actions_visitor.visit(index_hint_actions_dag, argument); for (auto & expression_dag_node : index_hint_argument_expression_dag_nodes) { diff --git a/src/Planner/PlannerContext.h b/src/Planner/PlannerContext.h index 418240fa34e..f35772ef7c0 100644 --- a/src/Planner/PlannerContext.h +++ b/src/Planner/PlannerContext.h @@ -25,7 +25,7 @@ class TableNode; struct FiltersForTableExpression { - ActionsDAGPtr filter_actions; + std::optional filter_actions; PrewhereInfoPtr prewhere_info; }; diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 94054588d40..fa3a3483a8e 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -591,19 +591,19 @@ UInt64 mainQueryNodeBlockSizeByLimit(const SelectQueryInfo & select_query_info) std::unique_ptr createComputeAliasColumnsStep( const std::unordered_map & alias_column_expressions, const DataStream & current_data_stream) { - ActionsDAGPtr merged_alias_columns_actions_dag = std::make_unique(current_data_stream.header.getColumnsWithTypeAndName()); - ActionsDAG::NodeRawConstPtrs action_dag_outputs = merged_alias_columns_actions_dag->getInputs(); + ActionsDAG merged_alias_columns_actions_dag(current_data_stream.header.getColumnsWithTypeAndName()); + ActionsDAG::NodeRawConstPtrs action_dag_outputs = merged_alias_columns_actions_dag.getInputs(); for (const auto & [column_name, alias_column_actions_dag] : alias_column_expressions) { const auto & current_outputs = alias_column_actions_dag->getOutputs(); action_dag_outputs.insert(action_dag_outputs.end(), current_outputs.begin(), current_outputs.end()); - merged_alias_columns_actions_dag->mergeNodes(std::move(*alias_column_actions_dag)); + merged_alias_columns_actions_dag.mergeNodes(std::move(*alias_column_actions_dag)); } for (const auto * output_node : action_dag_outputs) - merged_alias_columns_actions_dag->addOrReplaceInOutputs(*output_node); - merged_alias_columns_actions_dag->removeUnusedActions(false); + merged_alias_columns_actions_dag.addOrReplaceInOutputs(*output_node); + merged_alias_columns_actions_dag.removeUnusedActions(false); auto alias_column_step = std::make_unique(current_data_stream, std::move(merged_alias_columns_actions_dag)); alias_column_step->setStepDescription("Compute alias columns"); @@ -776,7 +776,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (prewhere_actions) { prewhere_info = std::make_shared(); - prewhere_info->prewhere_actions = ActionsDAG::clone(prewhere_actions); + prewhere_info->prewhere_actions = std::move(*ActionsDAG::clone(prewhere_actions)); prewhere_info->prewhere_column_name = prewhere_actions->getOutputs().at(0)->result_name; prewhere_info->remove_prewhere_column = true; prewhere_info->need_filter = true; @@ -805,14 +805,14 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (!prewhere_info->prewhere_actions) { - prewhere_info->prewhere_actions = std::move(filter_info.actions); + prewhere_info->prewhere_actions = std::move(*filter_info.actions); prewhere_info->prewhere_column_name = filter_info.column_name; prewhere_info->remove_prewhere_column = filter_info.do_remove_column; prewhere_info->need_filter = true; } else if (!prewhere_info->row_level_filter) { - prewhere_info->row_level_filter = std::move(filter_info.actions); + prewhere_info->row_level_filter = std::move(*filter_info.actions); prewhere_info->row_level_column_name = filter_info.column_name; prewhere_info->need_filter = true; } @@ -831,7 +831,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto row_policy_filter_info = buildRowPolicyFilterIfNeeded(storage, table_expression_query_info, planner_context, used_row_policies); if (row_policy_filter_info.actions) - table_expression_data.setRowLevelFilterActions(ActionsDAG::clone(row_policy_filter_info.actions)); + table_expression_data.setRowLevelFilterActions(ActionsDAG::clone(&*row_policy_filter_info.actions)); add_filter(row_policy_filter_info, "Row-level security filter"); if (query_context->getParallelReplicasMode() == Context::ParallelReplicasMode::CUSTOM_KEY) @@ -964,15 +964,14 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan.addStep(std::move(alias_column_step)); } - for (const auto & filter_info_and_description : where_filters) + for (auto && [filter_info, description] : where_filters) { - const auto & [filter_info, description] = filter_info_and_description; if (query_plan.isInitialized() && from_stage == QueryProcessingStage::FetchColumns && filter_info.actions) { auto filter_step = std::make_unique(query_plan.getCurrentDataStream(), - filter_info.actions, + std::move(*filter_info.actions), filter_info.column_name, filter_info.do_remove_column); filter_step->setStepDescription(description); @@ -1063,19 +1062,19 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (from_stage == QueryProcessingStage::FetchColumns) { - auto rename_actions_dag = std::make_unique(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG rename_actions_dag(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs updated_actions_dag_outputs; - for (auto & output_node : rename_actions_dag->getOutputs()) + for (auto & output_node : rename_actions_dag.getOutputs()) { const auto * column_identifier = table_expression_data.getColumnIdentifierOrNull(output_node->result_name); if (!column_identifier) continue; - updated_actions_dag_outputs.push_back(&rename_actions_dag->addAlias(*output_node, *column_identifier)); + updated_actions_dag_outputs.push_back(&rename_actions_dag.addAlias(*output_node, *column_identifier)); } - rename_actions_dag->getOutputs() = std::move(updated_actions_dag_outputs); + rename_actions_dag.getOutputs() = std::move(updated_actions_dag_outputs); auto rename_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(rename_actions_dag)); rename_step->setStepDescription("Change column names to column identifiers"); @@ -1117,9 +1116,9 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres void joinCastPlanColumnsToNullable(QueryPlan & plan_to_add_cast, PlannerContextPtr & planner_context, const FunctionOverloadResolverPtr & to_nullable_function) { - auto cast_actions_dag = std::make_unique(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG cast_actions_dag(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); - for (auto & output_node : cast_actions_dag->getOutputs()) + for (auto & output_node : cast_actions_dag.getOutputs()) { if (planner_context->getGlobalPlannerContext()->hasColumnIdentifier(output_node->result_name)) { @@ -1128,11 +1127,11 @@ void joinCastPlanColumnsToNullable(QueryPlan & plan_to_add_cast, PlannerContextP type_to_check = type_to_check_low_cardinality->getDictionaryType(); if (type_to_check->canBeInsideNullable()) - output_node = &cast_actions_dag->addFunction(to_nullable_function, {output_node}, output_node->result_name); + output_node = &cast_actions_dag.addFunction(to_nullable_function, {output_node}, output_node->result_name); } } - cast_actions_dag->appendInputsForUnusedColumns(plan_to_add_cast.getCurrentDataStream().header); + cast_actions_dag.appendInputsForUnusedColumns(plan_to_add_cast.getCurrentDataStream().header); auto cast_join_columns_step = std::make_unique(plan_to_add_cast.getCurrentDataStream(), std::move(cast_actions_dag)); cast_join_columns_step->setStepDescription("Cast JOIN columns to Nullable"); plan_to_add_cast.addStep(std::move(cast_join_columns_step)); @@ -1178,16 +1177,16 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ join_table_expression, planner_context); - join_clauses_and_actions.left_join_expressions_actions->appendInputsForUnusedColumns(left_plan.getCurrentDataStream().header); - auto left_join_expressions_actions_step = std::make_unique(left_plan.getCurrentDataStream(), join_clauses_and_actions.left_join_expressions_actions); + join_clauses_and_actions.left_join_expressions_actions.appendInputsForUnusedColumns(left_plan.getCurrentDataStream().header); + auto left_join_expressions_actions_step = std::make_unique(left_plan.getCurrentDataStream(), std::move(join_clauses_and_actions.left_join_expressions_actions)); left_join_expressions_actions_step->setStepDescription("JOIN actions"); - appendSetsFromActionsDAG(*left_join_expressions_actions_step->getExpression(), left_join_tree_query_plan.useful_sets); + appendSetsFromActionsDAG(left_join_expressions_actions_step->getExpression(), left_join_tree_query_plan.useful_sets); left_plan.addStep(std::move(left_join_expressions_actions_step)); - join_clauses_and_actions.right_join_expressions_actions->appendInputsForUnusedColumns(right_plan.getCurrentDataStream().header); - auto right_join_expressions_actions_step = std::make_unique(right_plan.getCurrentDataStream(), join_clauses_and_actions.right_join_expressions_actions); + join_clauses_and_actions.right_join_expressions_actions.appendInputsForUnusedColumns(right_plan.getCurrentDataStream().header); + auto right_join_expressions_actions_step = std::make_unique(right_plan.getCurrentDataStream(), std::move(join_clauses_and_actions.right_join_expressions_actions)); right_join_expressions_actions_step->setStepDescription("JOIN actions"); - appendSetsFromActionsDAG(*right_join_expressions_actions_step->getExpression(), right_join_tree_query_plan.useful_sets); + appendSetsFromActionsDAG(right_join_expressions_actions_step->getExpression(), right_join_tree_query_plan.useful_sets); right_plan.addStep(std::move(right_join_expressions_actions_step)); } @@ -1225,19 +1224,19 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ auto join_cast_plan_output_nodes = [&](QueryPlan & plan_to_add_cast, std::unordered_map & plan_column_name_to_cast_type) { - auto cast_actions_dag = std::make_unique(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG cast_actions_dag(plan_to_add_cast.getCurrentDataStream().header.getColumnsWithTypeAndName()); - for (auto & output_node : cast_actions_dag->getOutputs()) + for (auto & output_node : cast_actions_dag.getOutputs()) { auto it = plan_column_name_to_cast_type.find(output_node->result_name); if (it == plan_column_name_to_cast_type.end()) continue; const auto & cast_type = it->second; - output_node = &cast_actions_dag->addCast(*output_node, cast_type, output_node->result_name); + output_node = &cast_actions_dag.addCast(*output_node, cast_type, output_node->result_name); } - cast_actions_dag->appendInputsForUnusedColumns(plan_to_add_cast.getCurrentDataStream().header); + cast_actions_dag.appendInputsForUnusedColumns(plan_to_add_cast.getCurrentDataStream().header); auto cast_join_columns_step = std::make_unique(plan_to_add_cast.getCurrentDataStream(), std::move(cast_actions_dag)); cast_join_columns_step->setStepDescription("Cast JOIN USING columns"); @@ -1385,7 +1384,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ { ExpressionActionsPtr & mixed_join_expression = table_join->getMixedJoinExpression(); mixed_join_expression = std::make_shared( - std::move(join_clauses_and_actions.mixed_join_expressions_actions), + std::move(*join_clauses_and_actions.mixed_join_expressions_actions), ExpressionActionsSettings::fromContext(planner_context->getQueryContext())); appendSetsFromActionsDAG(mixed_join_expression->getActionsDAG(), left_join_tree_query_plan.useful_sets); @@ -1542,12 +1541,12 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ result_plan.unitePlans(std::move(join_step), {std::move(plans)}); } - auto drop_unused_columns_after_join_actions_dag = std::make_unique(result_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG drop_unused_columns_after_join_actions_dag(result_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs drop_unused_columns_after_join_actions_dag_updated_outputs; std::unordered_set drop_unused_columns_after_join_actions_dag_updated_outputs_names; std::optional first_skipped_column_node_index; - auto & drop_unused_columns_after_join_actions_dag_outputs = drop_unused_columns_after_join_actions_dag->getOutputs(); + auto & drop_unused_columns_after_join_actions_dag_outputs = drop_unused_columns_after_join_actions_dag.getOutputs(); size_t drop_unused_columns_after_join_actions_dag_outputs_size = drop_unused_columns_after_join_actions_dag_outputs.size(); for (size_t i = 0; i < drop_unused_columns_after_join_actions_dag_outputs_size; ++i) @@ -1619,7 +1618,7 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ auto plan = std::move(join_tree_query_plan.query_plan); auto plan_output_columns = plan.getCurrentDataStream().header.getColumnsWithTypeAndName(); - ActionsDAGPtr array_join_action_dag = std::make_unique(plan_output_columns); + ActionsDAG array_join_action_dag(plan_output_columns); PlannerActionsVisitor actions_visitor(planner_context); std::unordered_set array_join_expressions_output_nodes; @@ -1630,28 +1629,28 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_ array_join_column_names.insert(array_join_column_identifier); auto & array_join_expression_column = array_join_expression->as(); - auto expression_dag_index_nodes = actions_visitor.visit(*array_join_action_dag, array_join_expression_column.getExpressionOrThrow()); + auto expression_dag_index_nodes = actions_visitor.visit(array_join_action_dag, array_join_expression_column.getExpressionOrThrow()); for (auto & expression_dag_index_node : expression_dag_index_nodes) { - const auto * array_join_column_node = &array_join_action_dag->addAlias(*expression_dag_index_node, array_join_column_identifier); - array_join_action_dag->getOutputs().push_back(array_join_column_node); + const auto * array_join_column_node = &array_join_action_dag.addAlias(*expression_dag_index_node, array_join_column_identifier); + array_join_action_dag.getOutputs().push_back(array_join_column_node); array_join_expressions_output_nodes.insert(array_join_column_node->result_name); } } - array_join_action_dag->appendInputsForUnusedColumns(plan.getCurrentDataStream().header); + array_join_action_dag.appendInputsForUnusedColumns(plan.getCurrentDataStream().header); auto array_join_actions = std::make_unique(plan.getCurrentDataStream(), std::move(array_join_action_dag)); array_join_actions->setStepDescription("ARRAY JOIN actions"); - appendSetsFromActionsDAG(*array_join_actions->getExpression(), join_tree_query_plan.useful_sets); + appendSetsFromActionsDAG(array_join_actions->getExpression(), join_tree_query_plan.useful_sets); plan.addStep(std::move(array_join_actions)); - auto drop_unused_columns_before_array_join_actions_dag = std::make_unique(plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + ActionsDAG drop_unused_columns_before_array_join_actions_dag(plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs drop_unused_columns_before_array_join_actions_dag_updated_outputs; std::unordered_set drop_unused_columns_before_array_join_actions_dag_updated_outputs_names; - auto & drop_unused_columns_before_array_join_actions_dag_outputs = drop_unused_columns_before_array_join_actions_dag->getOutputs(); + auto & drop_unused_columns_before_array_join_actions_dag_outputs = drop_unused_columns_before_array_join_actions_dag.getOutputs(); size_t drop_unused_columns_before_array_join_actions_dag_outputs_size = drop_unused_columns_before_array_join_actions_dag_outputs.size(); for (size_t i = 0; i < drop_unused_columns_before_array_join_actions_dag_outputs_size; ++i) diff --git a/src/Planner/PlannerJoins.cpp b/src/Planner/PlannerJoins.cpp index 23b6a805ab9..db9678d91a6 100644 --- a/src/Planner/PlannerJoins.cpp +++ b/src/Planner/PlannerJoins.cpp @@ -177,13 +177,13 @@ std::set extractJoinTableSidesFromExpression(//const ActionsDAG:: } const ActionsDAG::Node * appendExpression( - ActionsDAGPtr & dag, + ActionsDAG & dag, const QueryTreeNodePtr & expression, const PlannerContextPtr & planner_context, const JoinNode & join_node) { PlannerActionsVisitor join_expression_visitor(planner_context); - auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(*dag, expression); + auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(dag, expression); if (join_expression_dag_node_raw_pointers.size() != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "JOIN {} ON clause contains multiple expressions", @@ -193,9 +193,9 @@ const ActionsDAG::Node * appendExpression( } void buildJoinClause( - ActionsDAGPtr & left_dag, - ActionsDAGPtr & right_dag, - ActionsDAGPtr & mixed_dag, + ActionsDAG & left_dag, + ActionsDAG & right_dag, + ActionsDAG & mixed_dag, const PlannerContextPtr & planner_context, const QueryTreeNodePtr & join_expression, const TableExpressionSet & left_table_expressions, @@ -376,8 +376,8 @@ JoinClausesAndActions buildJoinClausesAndActions( const JoinNode & join_node, const PlannerContextPtr & planner_context) { - ActionsDAGPtr left_join_actions = std::make_unique(left_table_expression_columns); - ActionsDAGPtr right_join_actions = std::make_unique(right_table_expression_columns); + ActionsDAG left_join_actions(left_table_expression_columns); + ActionsDAG right_join_actions(right_table_expression_columns); ColumnsWithTypeAndName mixed_table_expression_columns; for (const auto & left_column : left_table_expression_columns) { @@ -387,7 +387,7 @@ JoinClausesAndActions buildJoinClausesAndActions( { mixed_table_expression_columns.push_back(right_column); } - ActionsDAGPtr mixed_join_actions = std::make_unique(mixed_table_expression_columns); + ActionsDAG mixed_join_actions(mixed_table_expression_columns); /** It is possible to have constant value in JOIN ON section, that we need to ignore during DAG construction. * If we do not ignore it, this function will be replaced by underlying constant. @@ -498,12 +498,12 @@ JoinClausesAndActions buildJoinClausesAndActions( { const ActionsDAG::Node * dag_filter_condition_node = nullptr; if (left_filter_condition_nodes.size() > 1) - dag_filter_condition_node = &left_join_actions->addFunction(and_function, left_filter_condition_nodes, {}); + dag_filter_condition_node = &left_join_actions.addFunction(and_function, left_filter_condition_nodes, {}); else dag_filter_condition_node = left_filter_condition_nodes[0]; join_clause.getLeftFilterConditionNodes() = {dag_filter_condition_node}; - left_join_actions->addOrReplaceInOutputs(*dag_filter_condition_node); + left_join_actions.addOrReplaceInOutputs(*dag_filter_condition_node); add_necessary_name_if_needed(JoinTableSide::Left, dag_filter_condition_node->result_name); } @@ -514,12 +514,12 @@ JoinClausesAndActions buildJoinClausesAndActions( const ActionsDAG::Node * dag_filter_condition_node = nullptr; if (right_filter_condition_nodes.size() > 1) - dag_filter_condition_node = &right_join_actions->addFunction(and_function, right_filter_condition_nodes, {}); + dag_filter_condition_node = &right_join_actions.addFunction(and_function, right_filter_condition_nodes, {}); else dag_filter_condition_node = right_filter_condition_nodes[0]; join_clause.getRightFilterConditionNodes() = {dag_filter_condition_node}; - right_join_actions->addOrReplaceInOutputs(*dag_filter_condition_node); + right_join_actions.addOrReplaceInOutputs(*dag_filter_condition_node); add_necessary_name_if_needed(JoinTableSide::Right, dag_filter_condition_node->result_name); } @@ -556,10 +556,10 @@ JoinClausesAndActions buildJoinClausesAndActions( } if (!left_key_node->result_type->equals(*common_type)) - left_key_node = &left_join_actions->addCast(*left_key_node, common_type, {}); + left_key_node = &left_join_actions.addCast(*left_key_node, common_type, {}); if (!right_key_node->result_type->equals(*common_type)) - right_key_node = &right_join_actions->addCast(*right_key_node, common_type, {}); + right_key_node = &right_join_actions.addCast(*right_key_node, common_type, {}); } if (join_clause.isNullsafeCompareKey(i) && left_key_node->result_type->isNullable() && right_key_node->result_type->isNullable()) @@ -576,24 +576,24 @@ JoinClausesAndActions buildJoinClausesAndActions( * SELECT * FROM t1 JOIN t2 ON tuple(t1.a) == tuple(t2.b) */ auto wrap_nullsafe_function = FunctionFactory::instance().get("tuple", planner_context->getQueryContext()); - left_key_node = &left_join_actions->addFunction(wrap_nullsafe_function, {left_key_node}, {}); - right_key_node = &right_join_actions->addFunction(wrap_nullsafe_function, {right_key_node}, {}); + left_key_node = &left_join_actions.addFunction(wrap_nullsafe_function, {left_key_node}, {}); + right_key_node = &right_join_actions.addFunction(wrap_nullsafe_function, {right_key_node}, {}); } - left_join_actions->addOrReplaceInOutputs(*left_key_node); - right_join_actions->addOrReplaceInOutputs(*right_key_node); + left_join_actions.addOrReplaceInOutputs(*left_key_node); + right_join_actions.addOrReplaceInOutputs(*right_key_node); add_necessary_name_if_needed(JoinTableSide::Left, left_key_node->result_name); add_necessary_name_if_needed(JoinTableSide::Right, right_key_node->result_name); } } - result.left_join_expressions_actions = ActionsDAG::clone(left_join_actions); - result.left_join_tmp_expression_actions = std::move(left_join_actions); - result.left_join_expressions_actions->removeUnusedActions(join_left_actions_names); - result.right_join_expressions_actions = ActionsDAG::clone(right_join_actions); - result.right_join_tmp_expression_actions = std::move(right_join_actions); - result.right_join_expressions_actions->removeUnusedActions(join_right_actions_names); + result.left_join_expressions_actions = std::move(left_join_actions); + //result.left_join_tmp_expression_actions = std::move(left_join_actions); + result.left_join_expressions_actions.removeUnusedActions(join_left_actions_names); + result.right_join_expressions_actions = std::move(right_join_actions); + //result.right_join_tmp_expression_actions = std::move(right_join_actions); + result.right_join_expressions_actions.removeUnusedActions(join_right_actions_names); if (is_inequal_join) { @@ -601,16 +601,16 @@ JoinClausesAndActions buildJoinClausesAndActions( /// So, for each column, we recalculate the value of the whole expression from JOIN ON to check if rows should be joined. if (result.join_clauses.size() > 1) { - auto mixed_join_expressions_actions = std::make_unique(mixed_table_expression_columns); + ActionsDAG mixed_join_expressions_actions(mixed_table_expression_columns); PlannerActionsVisitor join_expression_visitor(planner_context); - auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(*mixed_join_expressions_actions, join_expression); + auto join_expression_dag_node_raw_pointers = join_expression_visitor.visit(mixed_join_expressions_actions, join_expression); if (join_expression_dag_node_raw_pointers.size() != 1) throw Exception( ErrorCodes::LOGICAL_ERROR, "JOIN {} ON clause contains multiple expressions", join_node.formatASTForErrorMessage()); - mixed_join_expressions_actions->addOrReplaceInOutputs(*join_expression_dag_node_raw_pointers[0]); + mixed_join_expressions_actions.addOrReplaceInOutputs(*join_expression_dag_node_raw_pointers[0]); Names required_names{join_expression_dag_node_raw_pointers[0]->result_name}; - mixed_join_expressions_actions->removeUnusedActions(required_names); + mixed_join_expressions_actions.removeUnusedActions(required_names); result.mixed_join_expressions_actions = std::move(mixed_join_expressions_actions); } else diff --git a/src/Planner/PlannerJoins.h b/src/Planner/PlannerJoins.h index 8adf6edd7ea..3735c373acc 100644 --- a/src/Planner/PlannerJoins.h +++ b/src/Planner/PlannerJoins.h @@ -182,15 +182,15 @@ struct JoinClausesAndActions /// Join clauses. Actions dag nodes point into join_expression_actions. JoinClauses join_clauses; /// Whole JOIN ON section expressions - ActionsDAGPtr left_join_tmp_expression_actions; - ActionsDAGPtr right_join_tmp_expression_actions; + // ActionsDAGPtr left_join_tmp_expression_actions; + // ActionsDAGPtr right_join_tmp_expression_actions; /// Left join expressions actions - ActionsDAGPtr left_join_expressions_actions; + ActionsDAG left_join_expressions_actions; /// Right join expressions actions - ActionsDAGPtr right_join_expressions_actions; + ActionsDAG right_join_expressions_actions; /// Originally used for inequal join. it's the total join expression. /// If there is no inequal join conditions, it's null. - ActionsDAGPtr mixed_join_expressions_actions; + std::optional mixed_join_expressions_actions; }; /** Calculate join clauses and actions for JOIN ON section. diff --git a/src/Planner/Utils.cpp b/src/Planner/Utils.cpp index 7ac53e0f8c1..e9f9c51d338 100644 --- a/src/Planner/Utils.cpp +++ b/src/Planner/Utils.cpp @@ -442,22 +442,22 @@ FilterDAGInfo buildFilterInfo(QueryTreeNodePtr filter_query_tree, collectSourceColumns(filter_query_tree, planner_context, false /*keep_alias_columns*/); collectSets(filter_query_tree, *planner_context); - auto filter_actions_dag = std::make_unique(); + ActionsDAG filter_actions_dag; PlannerActionsVisitor actions_visitor(planner_context, false /*use_column_identifier_as_action_node_name*/); - auto expression_nodes = actions_visitor.visit(*filter_actions_dag, filter_query_tree); + auto expression_nodes = actions_visitor.visit(filter_actions_dag, filter_query_tree); if (expression_nodes.size() != 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filter actions must return single output node. Actual {}", expression_nodes.size()); - auto & filter_actions_outputs = filter_actions_dag->getOutputs(); + auto & filter_actions_outputs = filter_actions_dag.getOutputs(); filter_actions_outputs = std::move(expression_nodes); std::string filter_node_name = filter_actions_outputs[0]->result_name; bool remove_filter_column = true; - for (const auto & filter_input_node : filter_actions_dag->getInputs()) + for (const auto & filter_input_node : filter_actions_dag.getInputs()) if (table_expression_required_names_without_filter.contains(filter_input_node->result_name)) filter_actions_outputs.push_back(filter_input_node); @@ -498,7 +498,7 @@ void appendSetsFromActionsDAG(const ActionsDAG & dag, UsefulSets & useful_sets) { if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { - appendSetsFromActionsDAG(*index_hint->getActions(), useful_sets); + appendSetsFromActionsDAG(index_hint->getActions(), useful_sets); } } } diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp index 64ba7f7cd2a..f31de80b22d 100644 --- a/src/Processors/QueryPlan/AggregatingStep.cpp +++ b/src/Processors/QueryPlan/AggregatingStep.cpp @@ -303,15 +303,15 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B const auto & header = ports[set_counter]->getHeader(); /// Here we create a DAG which fills missing keys and adds `__grouping_set` column - auto dag = std::make_unique(header.getColumnsWithTypeAndName()); + ActionsDAG dag(header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs outputs; outputs.reserve(output_header.columns() + 1); auto grouping_col = ColumnConst::create(ColumnUInt64::create(1, set_counter), 0); - const auto * grouping_node = &dag->addColumn( + const auto * grouping_node = &dag.addColumn( {ColumnPtr(std::move(grouping_col)), std::make_shared(), "__grouping_set"}); - grouping_node = &dag->materializeNode(*grouping_node); + grouping_node = &dag.materializeNode(*grouping_node); outputs.push_back(grouping_node); const auto & missing_columns = grouping_sets_params[set_counter].missing_keys; @@ -332,21 +332,21 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B column_with_default->finalize(); auto column = ColumnConst::create(std::move(column_with_default), 0); - const auto * node = &dag->addColumn({ColumnPtr(std::move(column)), col.type, col.name}); - node = &dag->materializeNode(*node); + const auto * node = &dag.addColumn({ColumnPtr(std::move(column)), col.type, col.name}); + node = &dag.materializeNode(*node); outputs.push_back(node); } else { - const auto * column_node = dag->getOutputs()[header.getPositionByName(col.name)]; + const auto * column_node = dag.getOutputs()[header.getPositionByName(col.name)]; if (used_it != used_keys.end() && group_by_use_nulls && column_node->result_type->canBeInsideNullable()) - outputs.push_back(&dag->addFunction(to_nullable_function, { column_node }, col.name)); + outputs.push_back(&dag.addFunction(to_nullable_function, { column_node }, col.name)); else outputs.push_back(column_node); } } - dag->getOutputs().swap(outputs); + dag.getOutputs().swap(outputs); auto expression = std::make_shared(std::move(dag), settings.getActionsSettings()); auto transform = std::make_shared(header, expression); diff --git a/src/Processors/QueryPlan/CubeStep.cpp b/src/Processors/QueryPlan/CubeStep.cpp index b6c70061987..3a98f8e4612 100644 --- a/src/Processors/QueryPlan/CubeStep.cpp +++ b/src/Processors/QueryPlan/CubeStep.cpp @@ -36,27 +36,27 @@ CubeStep::CubeStep(const DataStream & input_stream_, Aggregator::Params params_, ProcessorPtr addGroupingSetForTotals(const Block & header, const Names & keys, bool use_nulls, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number) { - auto dag = std::make_unique(header.getColumnsWithTypeAndName()); - auto & outputs = dag->getOutputs(); + ActionsDAG dag(header.getColumnsWithTypeAndName()); + auto & outputs = dag.getOutputs(); if (use_nulls) { auto to_nullable = FunctionFactory::instance().get("toNullable", nullptr); for (const auto & key : keys) { - const auto * node = dag->getOutputs()[header.getPositionByName(key)]; + const auto * node = dag.getOutputs()[header.getPositionByName(key)]; if (node->result_type->canBeInsideNullable()) { - dag->addOrReplaceInOutputs(dag->addFunction(to_nullable, { node }, node->result_name)); + dag.addOrReplaceInOutputs(dag.addFunction(to_nullable, { node }, node->result_name)); } } } auto grouping_col = ColumnUInt64::create(1, grouping_set_number); - const auto * grouping_node = &dag->addColumn( + const auto * grouping_node = &dag.addColumn( {ColumnPtr(std::move(grouping_col)), std::make_shared(), "__grouping_set"}); - grouping_node = &dag->materializeNode(*grouping_node); + grouping_node = &dag.materializeNode(*grouping_node); outputs.insert(outputs.begin(), grouping_node); auto expression = std::make_shared(std::move(dag), settings.getActionsSettings()); diff --git a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp index 1f4f271fa6e..1c199ebedb3 100644 --- a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp +++ b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp @@ -32,7 +32,7 @@ void addConvertingActions(QueryPlan & plan, const Block & header, bool has_missi }; auto convert_actions_dag = get_converting_dag(plan.getCurrentDataStream().header, header); - auto converting = std::make_unique(plan.getCurrentDataStream(), convert_actions_dag); + auto converting = std::make_unique(plan.getCurrentDataStream(), std::move(convert_actions_dag)); plan.addStep(std::move(converting)); } diff --git a/src/Processors/QueryPlan/ExpressionStep.cpp b/src/Processors/QueryPlan/ExpressionStep.cpp index 50bc2e1533e..94098f443d9 100644 --- a/src/Processors/QueryPlan/ExpressionStep.cpp +++ b/src/Processors/QueryPlan/ExpressionStep.cpp @@ -10,33 +10,33 @@ namespace DB { -static ITransformingStep::Traits getTraits(const ActionsDAGPtr & actions, const Block & header, const SortDescription & sort_description) +static ITransformingStep::Traits getTraits(const ActionsDAG & actions, const Block & header, const SortDescription & sort_description) { return ITransformingStep::Traits { { .returns_single_stream = false, .preserves_number_of_streams = true, - .preserves_sorting = actions->isSortingPreserved(header, sort_description), + .preserves_sorting = actions.isSortingPreserved(header, sort_description), }, { - .preserves_number_of_rows = !actions->hasArrayJoin(), + .preserves_number_of_rows = !actions.hasArrayJoin(), } }; } -ExpressionStep::ExpressionStep(const DataStream & input_stream_, const ActionsDAGPtr & actions_dag_) +ExpressionStep::ExpressionStep(const DataStream & input_stream_, ActionsDAG actions_dag_) : ITransformingStep( input_stream_, - ExpressionTransform::transformHeader(input_stream_.header, *actions_dag_), + ExpressionTransform::transformHeader(input_stream_.header, actions_dag_), getTraits(actions_dag_, input_stream_.header, input_stream_.sort_description)) - , actions_dag(ActionsDAG::clone(actions_dag_)) + , actions_dag(std::move(actions_dag_)) { } void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression = std::make_shared(ActionsDAG::clone(actions_dag), settings.getActionsSettings()); + auto expression = std::make_shared(std::move(actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header) { @@ -61,25 +61,25 @@ void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const Bu void ExpressionStep::describeActions(FormatSettings & settings) const { String prefix(settings.offset, settings.indent_char); - auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag))); expression->describeActions(settings.out, prefix); } void ExpressionStep::describeActions(JSONBuilder::JSONMap & map) const { - auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag))); map.add("Expression", expression->toTree()); } void ExpressionStep::updateOutputStream() { output_stream = createOutputStream( - input_streams.front(), ExpressionTransform::transformHeader(input_streams.front().header, *actions_dag), getDataStreamTraits()); + input_streams.front(), ExpressionTransform::transformHeader(input_streams.front().header, actions_dag), getDataStreamTraits()); if (!getDataStreamTraits().preserves_sorting) return; - FindAliasForInputName alias_finder(*actions_dag); + FindAliasForInputName alias_finder(actions_dag); const auto & input_sort_description = getInputStreams().front().sort_description; for (size_t i = 0, s = input_sort_description.size(); i < s; ++i) { diff --git a/src/Processors/QueryPlan/ExpressionStep.h b/src/Processors/QueryPlan/ExpressionStep.h index ebbac8217cb..f2926318cbc 100644 --- a/src/Processors/QueryPlan/ExpressionStep.h +++ b/src/Processors/QueryPlan/ExpressionStep.h @@ -1,12 +1,10 @@ #pragma once #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; - class ExpressionTransform; class JoiningTransform; @@ -15,21 +13,22 @@ class ExpressionStep : public ITransformingStep { public: - explicit ExpressionStep(const DataStream & input_stream_, const ActionsDAGPtr & actions_dag_); + explicit ExpressionStep(const DataStream & input_stream_, ActionsDAG actions_dag_); String getName() const override { return "Expression"; } void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) override; void describeActions(FormatSettings & settings) const override; - const ActionsDAGPtr & getExpression() const { return actions_dag; } + ActionsDAG & getExpression() { return actions_dag; } + const ActionsDAG & getExpression() const { return actions_dag; } void describeActions(JSONBuilder::JSONMap & map) const override; private: void updateOutputStream() override; - ActionsDAGPtr actions_dag; + ActionsDAG actions_dag; }; } diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 7883461f45a..5f15c5defac 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -9,9 +9,9 @@ namespace DB { -static ITransformingStep::Traits getTraits(const ActionsDAGPtr & expression, const Block & header, const SortDescription & sort_description, bool remove_filter_column, const String & filter_column_name) +static ITransformingStep::Traits getTraits(const ActionsDAG & expression, const Block & header, const SortDescription & sort_description, bool remove_filter_column, const String & filter_column_name) { - bool preserves_sorting = expression->isSortingPreserved(header, sort_description, remove_filter_column ? filter_column_name : ""); + bool preserves_sorting = expression.isSortingPreserved(header, sort_description, remove_filter_column ? filter_column_name : ""); if (remove_filter_column) { preserves_sorting &= std::find_if( @@ -35,22 +35,22 @@ static ITransformingStep::Traits getTraits(const ActionsDAGPtr & expression, con FilterStep::FilterStep( const DataStream & input_stream_, - const ActionsDAGPtr & actions_dag_, + ActionsDAG actions_dag_, String filter_column_name_, bool remove_filter_column_) : ITransformingStep( input_stream_, FilterTransform::transformHeader( input_stream_.header, - actions_dag_.get(), + &actions_dag_, filter_column_name_, remove_filter_column_), getTraits(actions_dag_, input_stream_.header, input_stream_.sort_description, remove_filter_column_, filter_column_name_)) + , actions_dag(std::move(actions_dag_)) , filter_column_name(std::move(filter_column_name_)) , remove_filter_column(remove_filter_column_) { - actions_dag = ActionsDAG::clone(actions_dag_); - actions_dag->removeAliasesForFilter(filter_column_name); + actions_dag.removeAliasesForFilter(filter_column_name); } void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) @@ -87,7 +87,7 @@ void FilterStep::describeActions(FormatSettings & settings) const settings.out << " (removed)"; settings.out << '\n'; - auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag))); expression->describeActions(settings.out, prefix); } @@ -96,7 +96,7 @@ void FilterStep::describeActions(JSONBuilder::JSONMap & map) const map.add("Filter Column", filter_column_name); map.add("Removes Filter", remove_filter_column); - auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag))); map.add("Expression", expression->toTree()); } @@ -104,13 +104,13 @@ void FilterStep::updateOutputStream() { output_stream = createOutputStream( input_streams.front(), - FilterTransform::transformHeader(input_streams.front().header, actions_dag.get(), filter_column_name, remove_filter_column), + FilterTransform::transformHeader(input_streams.front().header, &actions_dag, filter_column_name, remove_filter_column), getDataStreamTraits()); if (!getDataStreamTraits().preserves_sorting) return; - FindAliasForInputName alias_finder(*actions_dag); + FindAliasForInputName alias_finder(actions_dag); const auto & input_sort_description = getInputStreams().front().sort_description; for (size_t i = 0, s = input_sort_description.size(); i < s; ++i) { diff --git a/src/Processors/QueryPlan/FilterStep.h b/src/Processors/QueryPlan/FilterStep.h index 0f894a570b7..b5a31bef5fc 100644 --- a/src/Processors/QueryPlan/FilterStep.h +++ b/src/Processors/QueryPlan/FilterStep.h @@ -1,19 +1,17 @@ #pragma once #include +#include namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; - /// Implements WHERE, HAVING operations. See FilterTransform. class FilterStep : public ITransformingStep { public: FilterStep( const DataStream & input_stream_, - const ActionsDAGPtr & actions_dag_, + ActionsDAG actions_dag_, String filter_column_name_, bool remove_filter_column_); @@ -23,15 +21,15 @@ public: void describeActions(JSONBuilder::JSONMap & map) const override; void describeActions(FormatSettings & settings) const override; - const ActionsDAGPtr & getExpression() const { return actions_dag; } - ActionsDAGPtr & getExpression() { return actions_dag; } + const ActionsDAG & getExpression() const { return actions_dag; } + ActionsDAG & getExpression() { return actions_dag; } const String & getFilterColumnName() const { return filter_column_name; } bool removesFilterColumn() const { return remove_filter_column; } private: void updateOutputStream() override; - ActionsDAGPtr actions_dag; + ActionsDAG actions_dag; String filter_column_name; bool remove_filter_column; }; diff --git a/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp b/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp index d90f0e152e7..be468419cfb 100644 --- a/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp +++ b/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp @@ -45,10 +45,10 @@ size_t tryConvertOuterJoinToInnerJoin(QueryPlan::Node * parent_node, QueryPlan:: bool right_stream_safe = true; if (check_left_stream) - left_stream_safe = filter_dag->isFilterAlwaysFalseForDefaultValueInputs(filter_column_name, left_stream_input_header); + left_stream_safe = filter_dag.isFilterAlwaysFalseForDefaultValueInputs(filter_column_name, left_stream_input_header); if (check_right_stream) - right_stream_safe = filter_dag->isFilterAlwaysFalseForDefaultValueInputs(filter_column_name, right_stream_input_header); + right_stream_safe = filter_dag.isFilterAlwaysFalseForDefaultValueInputs(filter_column_name, right_stream_input_header); if (!left_stream_safe || !right_stream_safe) return 0; diff --git a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp index 6cdc3cb4eb0..8666912514e 100644 --- a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp @@ -79,9 +79,9 @@ size_t tryDistinctReadInOrder(QueryPlan::Node * parent_node) steps_to_update.push_back(step); if (const auto * const expr = typeid_cast(step); expr) - dag_stack.push_back(expr->getExpression().get()); + dag_stack.push_back(&expr->getExpression()); else if (const auto * const filter = typeid_cast(step); filter) - dag_stack.push_back(filter->getExpression().get()); + dag_stack.push_back(&filter->getExpression()); node = node->children.front(); } diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index f26cd79dd97..411b20b1a32 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -101,7 +101,7 @@ static NameSet findIdentifiersOfNode(const ActionsDAG::Node * node) return res; } -static ActionsDAGPtr splitFilter(QueryPlan::Node * parent_node, const Names & available_inputs, size_t child_idx = 0) +static std::optional splitFilter(QueryPlan::Node * parent_node, const Names & available_inputs, size_t child_idx = 0) { QueryPlan::Node * child_node = parent_node->children.front(); checkChildrenSize(child_node, child_idx + 1); @@ -110,16 +110,16 @@ static ActionsDAGPtr splitFilter(QueryPlan::Node * parent_node, const Names & av auto & child = child_node->step; auto * filter = assert_cast(parent.get()); - const auto & expression = filter->getExpression(); + auto & expression = filter->getExpression(); const auto & filter_column_name = filter->getFilterColumnName(); bool removes_filter = filter->removesFilterColumn(); const auto & all_inputs = child->getInputStreams()[child_idx].header.getColumnsWithTypeAndName(); - return expression->splitActionsForFilterPushDown(filter_column_name, removes_filter, available_inputs, all_inputs); + return expression.splitActionsForFilterPushDown(filter_column_name, removes_filter, available_inputs, all_inputs); } static size_t -addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, const ActionsDAGPtr & split_filter, +addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, ActionsDAG split_filter, bool can_remove_filter = true, size_t child_idx = 0, bool update_parent_filter = true) { QueryPlan::Node * child_node = parent_node->children.front(); @@ -129,14 +129,14 @@ addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, auto & child = child_node->step; auto * filter = assert_cast(parent.get()); - const auto & expression = filter->getExpression(); + auto & expression = filter->getExpression(); const auto & filter_column_name = filter->getFilterColumnName(); - const auto * filter_node = expression->tryFindInOutputs(filter_column_name); + const auto * filter_node = expression.tryFindInOutputs(filter_column_name); if (update_parent_filter && !filter_node && !filter->removesFilterColumn()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", - filter_column_name, expression->dumpDAG()); + filter_column_name, expression.dumpDAG()); /// Add new Filter step before Child. /// Expression/Filter -> Child -> Something @@ -147,10 +147,10 @@ addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, /// Expression/Filter -> Child -> Filter -> Something /// New filter column is the first one. - String split_filter_column_name = split_filter->getOutputs().front()->result_name; + String split_filter_column_name = split_filter.getOutputs().front()->result_name; node.step = std::make_unique( - node.children.at(0)->step->getOutputStream(), split_filter, std::move(split_filter_column_name), can_remove_filter); + node.children.at(0)->step->getOutputStream(), std::move(split_filter), std::move(split_filter_column_name), can_remove_filter); if (auto * transforming_step = dynamic_cast(child.get())) { @@ -176,7 +176,7 @@ addNewFilterStepOrThrow(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, { /// This means that all predicates of filter were pushed down. /// Replace current actions to expression, as we don't need to filter anything. - parent = std::make_unique(child->getOutputStream(), expression); + parent = std::make_unique(child->getOutputStream(), std::move(expression)); } else { @@ -192,7 +192,7 @@ tryAddNewFilterStep(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, con bool can_remove_filter = true, size_t child_idx = 0) { if (auto split_filter = splitFilter(parent_node, allowed_inputs, child_idx)) - return addNewFilterStepOrThrow(parent_node, nodes, split_filter, can_remove_filter, child_idx); + return addNewFilterStepOrThrow(parent_node, nodes, std::move(*split_filter), can_remove_filter, child_idx); return 0; } @@ -332,7 +332,7 @@ static size_t tryPushDownOverJoinStep(QueryPlan::Node * parent_node, QueryPlan:: Names left_stream_available_columns_to_push_down = get_available_columns_for_filter(true /*push_to_left_stream*/, left_stream_filter_push_down_input_columns_available); Names right_stream_available_columns_to_push_down = get_available_columns_for_filter(false /*push_to_left_stream*/, right_stream_filter_push_down_input_columns_available); - auto join_filter_push_down_actions = filter->getExpression()->splitActionsForJOINFilterPushDown(filter->getFilterColumnName(), + auto join_filter_push_down_actions = filter->getExpression().splitActionsForJOINFilterPushDown(filter->getFilterColumnName(), filter->removesFilterColumn(), left_stream_available_columns_to_push_down, left_stream_input_header, @@ -346,42 +346,44 @@ static size_t tryPushDownOverJoinStep(QueryPlan::Node * parent_node, QueryPlan:: if (join_filter_push_down_actions.left_stream_filter_to_push_down) { + const auto & result_name = join_filter_push_down_actions.left_stream_filter_to_push_down->getOutputs()[0]->result_name; updated_steps += addNewFilterStepOrThrow(parent_node, nodes, - join_filter_push_down_actions.left_stream_filter_to_push_down, + std::move(*join_filter_push_down_actions.left_stream_filter_to_push_down), join_filter_push_down_actions.left_stream_filter_removes_filter, 0 /*child_idx*/, false /*update_parent_filter*/); LOG_DEBUG(&Poco::Logger::get("QueryPlanOptimizations"), "Pushed down filter {} to the {} side of join", - join_filter_push_down_actions.left_stream_filter_to_push_down->getOutputs()[0]->result_name, + result_name, JoinKind::Left); } if (join_filter_push_down_actions.right_stream_filter_to_push_down && allow_push_down_to_right) { + const auto & result_name = join_filter_push_down_actions.right_stream_filter_to_push_down->getOutputs()[0]->result_name; updated_steps += addNewFilterStepOrThrow(parent_node, nodes, - join_filter_push_down_actions.right_stream_filter_to_push_down, + std::move(*join_filter_push_down_actions.right_stream_filter_to_push_down), join_filter_push_down_actions.right_stream_filter_removes_filter, 1 /*child_idx*/, false /*update_parent_filter*/); LOG_DEBUG(&Poco::Logger::get("QueryPlanOptimizations"), "Pushed down filter {} to the {} side of join", - join_filter_push_down_actions.right_stream_filter_to_push_down->getOutputs()[0]->result_name, + result_name, JoinKind::Right); } if (updated_steps > 0) { const auto & filter_column_name = filter->getFilterColumnName(); - const auto & filter_expression = filter->getExpression(); + auto & filter_expression = filter->getExpression(); - const auto * filter_node = filter_expression->tryFindInOutputs(filter_column_name); + const auto * filter_node = filter_expression.tryFindInOutputs(filter_column_name); if (!filter_node && !filter->removesFilterColumn()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", - filter_column_name, filter_expression->dumpDAG()); + filter_column_name, filter_expression.dumpDAG()); /// Filter column was replaced to constant. @@ -391,7 +393,7 @@ static size_t tryPushDownOverJoinStep(QueryPlan::Node * parent_node, QueryPlan:: { /// This means that all predicates of filter were pushed down. /// Replace current actions to expression, as we don't need to filter anything. - parent = std::make_unique(child->getOutputStream(), filter_expression); + parent = std::make_unique(child->getOutputStream(), std::move(filter_expression)); } else { @@ -416,7 +418,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (!filter) return 0; - if (filter->getExpression()->hasStatefulFunctions()) + if (filter->getExpression().hasStatefulFunctions()) return 0; if (auto * aggregating = typeid_cast(child.get())) @@ -430,7 +432,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes return 0; const auto & actions = filter->getExpression(); - const auto & filter_node = actions->findInOutputs(filter->getFilterColumnName()); + const auto & filter_node = actions.findInOutputs(filter->getFilterColumnName()); auto identifiers_in_predicate = findIdentifiersOfNode(&filter_node); @@ -597,7 +599,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes filter_node.step = std::make_unique( filter_node.children.front()->step->getOutputStream(), - ActionsDAG::clone(filter->getExpression()), + std::move(*ActionsDAG::clone(&filter->getExpression())), filter->getFilterColumnName(), filter->removesFilterColumn()); } @@ -611,7 +613,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (auto * read_from_merge = typeid_cast(child.get())) { - FilterDAGInfo info{ActionsDAG::clone(filter->getExpression()), filter->getFilterColumnName(), filter->removesFilterColumn()}; + FilterDAGInfo info{std::move(*ActionsDAG::clone(&filter->getExpression())), filter->getFilterColumnName(), filter->removesFilterColumn()}; read_from_merge->addFilter(std::move(info)); std::swap(*parent_node, *child_node); return 1; diff --git a/src/Processors/QueryPlan/Optimizations/liftUpArrayJoin.cpp b/src/Processors/QueryPlan/Optimizations/liftUpArrayJoin.cpp index 36aab41df49..0d4f2330119 100644 --- a/src/Processors/QueryPlan/Optimizations/liftUpArrayJoin.cpp +++ b/src/Processors/QueryPlan/Optimizations/liftUpArrayJoin.cpp @@ -28,10 +28,10 @@ size_t tryLiftUpArrayJoin(QueryPlan::Node * parent_node, QueryPlan::Nodes & node const auto & expression = expression_step ? expression_step->getExpression() : filter_step->getExpression(); - auto split_actions = expression->splitActionsBeforeArrayJoin(array_join->columns); + auto split_actions = expression.splitActionsBeforeArrayJoin(array_join->columns); /// No actions can be moved before ARRAY JOIN. - if (split_actions.first->trivial()) + if (split_actions.first.trivial()) return 0; auto description = parent->getStepDescription(); @@ -49,9 +49,9 @@ size_t tryLiftUpArrayJoin(QueryPlan::Node * parent_node, QueryPlan::Nodes & node array_join_step->updateInputStream(node.step->getOutputStream()); if (expression_step) - parent = std::make_unique(array_join_step->getOutputStream(), split_actions.second); + parent = std::make_unique(array_join_step->getOutputStream(), std::move(split_actions.second)); else - parent = std::make_unique(array_join_step->getOutputStream(), split_actions.second, + parent = std::make_unique(array_join_step->getOutputStream(), std::move(split_actions.second), filter_step->getFilterColumnName(), filter_step->removesFilterColumn()); parent->setStepDescription(description + " [split]"); diff --git a/src/Processors/QueryPlan/Optimizations/liftUpFunctions.cpp b/src/Processors/QueryPlan/Optimizations/liftUpFunctions.cpp index b280e2d3cc6..7794ddae8fa 100644 --- a/src/Processors/QueryPlan/Optimizations/liftUpFunctions.cpp +++ b/src/Processors/QueryPlan/Optimizations/liftUpFunctions.cpp @@ -66,13 +66,13 @@ size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan: NameSet sort_columns; for (const auto & col : sorting_step->getSortDescription()) sort_columns.insert(col.column_name); - auto [needed_for_sorting, unneeded_for_sorting, _] = expression_step->getExpression()->splitActionsBySortingDescription(sort_columns); + auto [needed_for_sorting, unneeded_for_sorting, _] = expression_step->getExpression().splitActionsBySortingDescription(sort_columns); // No calculations can be postponed. - if (unneeded_for_sorting->trivial()) + if (unneeded_for_sorting.trivial()) return 0; - if (!areNodesConvertableToBlock(needed_for_sorting->getOutputs()) || !areNodesConvertableToBlock(unneeded_for_sorting->getInputs())) + if (!areNodesConvertableToBlock(needed_for_sorting.getOutputs()) || !areNodesConvertableToBlock(unneeded_for_sorting.getInputs())) return 0; // Sorting (parent_node) -> Expression (child_node) diff --git a/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp b/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp index 4629bc0af53..53f59198d0f 100644 --- a/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp +++ b/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp @@ -49,7 +49,7 @@ size_t tryLiftUpUnion(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) expr_node.step = std::make_unique( expr_node.children.front()->step->getOutputStream(), - ActionsDAG::clone(expression->getExpression())); + std::move(*ActionsDAG::clone(&expression->getExpression()))); } /// - Expression - Something diff --git a/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp b/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp index 97de69b1134..d7ca96e4c64 100644 --- a/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp +++ b/src/Processors/QueryPlan/Optimizations/mergeExpressions.cpp @@ -38,18 +38,18 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &) if (parent_expr && child_expr) { - const auto & child_actions = child_expr->getExpression(); - const auto & parent_actions = parent_expr->getExpression(); + auto & child_actions = child_expr->getExpression(); + auto & parent_actions = parent_expr->getExpression(); /// We cannot combine actions with arrayJoin and stateful function because we not always can reorder them. /// Example: select rowNumberInBlock() from (select arrayJoin([1, 2])) /// Such a query will return two zeroes if we combine actions together. - if (child_actions->hasArrayJoin() && parent_actions->hasStatefulFunctions()) + if (child_actions.hasArrayJoin() && parent_actions.hasStatefulFunctions()) return 0; - auto merged = ActionsDAG::merge(std::move(*child_actions), std::move(*parent_actions)); + auto merged = ActionsDAG::merge(std::move(child_actions), std::move(parent_actions)); - auto expr = std::make_unique(child_expr->getInputStreams().front(), merged); + auto expr = std::make_unique(child_expr->getInputStreams().front(), std::move(merged)); expr->setStepDescription("(" + parent_expr->getStepDescription() + " + " + child_expr->getStepDescription() + ")"); parent_node->step = std::move(expr); @@ -58,16 +58,16 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &) } else if (parent_filter && child_expr) { - const auto & child_actions = child_expr->getExpression(); - const auto & parent_actions = parent_filter->getExpression(); + auto & child_actions = child_expr->getExpression(); + auto & parent_actions = parent_filter->getExpression(); - if (child_actions->hasArrayJoin() && parent_actions->hasStatefulFunctions()) + if (child_actions.hasArrayJoin() && parent_actions.hasStatefulFunctions()) return 0; - auto merged = ActionsDAG::merge(std::move(*child_actions), std::move(*parent_actions)); + auto merged = ActionsDAG::merge(std::move(child_actions), std::move(parent_actions)); auto filter = std::make_unique(child_expr->getInputStreams().front(), - merged, + std::move(merged), parent_filter->getFilterColumnName(), parent_filter->removesFilterColumn()); filter->setStepDescription("(" + parent_filter->getStepDescription() + " + " + child_expr->getStepDescription() + ")"); @@ -78,32 +78,31 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &) } else if (parent_filter && child_filter) { - const auto & child_actions = child_filter->getExpression(); - const auto & parent_actions = parent_filter->getExpression(); + auto & child_actions = child_filter->getExpression(); + auto & parent_actions = parent_filter->getExpression(); - if (child_actions->hasArrayJoin()) + if (child_actions.hasArrayJoin()) return 0; - auto actions = ActionsDAG::clone(child_actions); - const auto & child_filter_node = actions->findInOutputs(child_filter->getFilterColumnName()); + const auto & child_filter_node = child_actions.findInOutputs(child_filter->getFilterColumnName()); if (child_filter->removesFilterColumn()) - removeFromOutputs(*actions, child_filter_node); + removeFromOutputs(child_actions, child_filter_node); - actions->mergeInplace(std::move(*ActionsDAG::clone(parent_actions))); + child_actions.mergeInplace(std::move(parent_actions)); - const auto & parent_filter_node = actions->findInOutputs(parent_filter->getFilterColumnName()); + const auto & parent_filter_node = child_actions.findInOutputs(parent_filter->getFilterColumnName()); if (parent_filter->removesFilterColumn()) - removeFromOutputs(*actions, parent_filter_node); + removeFromOutputs(child_actions, parent_filter_node); FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - const auto & condition = actions->addFunction(func_builder_and, {&child_filter_node, &parent_filter_node}, {}); - auto & outputs = actions->getOutputs(); + const auto & condition = child_actions.addFunction(func_builder_and, {&child_filter_node, &parent_filter_node}, {}); + auto & outputs = child_actions.getOutputs(); outputs.insert(outputs.begin(), &condition); - actions->removeUnusedActions(false); + child_actions.removeUnusedActions(false); auto filter = std::make_unique(child_filter->getInputStreams().front(), - actions, + std::move(child_actions), condition.result_name, true); filter->setStepDescription("(" + parent_filter->getStepDescription() + " + " + child_filter->getStepDescription() + ")"); diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp index f203d831750..0d9e050d6cb 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp @@ -83,10 +83,11 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) Names queried_columns = source_step_with_filter->requiredSourceColumns(); + const auto & source_filter_actions_dag = source_step_with_filter->getFilterActionsDAG(); MergeTreeWhereOptimizer where_optimizer{ std::move(column_compressed_sizes), storage_metadata, - storage.getConditionSelectivityEstimatorByPredicate(storage_snapshot, source_step_with_filter->getFilterActionsDAG(), context), + storage.getConditionSelectivityEstimatorByPredicate(storage_snapshot, source_filter_actions_dag ? &*source_filter_actions_dag : nullptr, context), queried_columns, storage.supportedPrewhereColumns(), getLogger("QueryPlanOptimizePrewhere")}; @@ -113,15 +114,15 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) if (prewhere_info->remove_prewhere_column) { - removeFromOutput(*filter_expression, filter_column_name); - auto & outputs = filter_expression->getOutputs(); + removeFromOutput(filter_expression, filter_column_name); + auto & outputs = filter_expression.getOutputs(); size_t size = outputs.size(); outputs.insert(outputs.end(), optimize_result.prewhere_nodes.begin(), optimize_result.prewhere_nodes.end()); - filter_expression->removeUnusedActions(false); + filter_expression.removeUnusedActions(false); outputs.resize(size); } - auto split_result = filter_expression->split(optimize_result.prewhere_nodes, true, true); + auto split_result = filter_expression.split(optimize_result.prewhere_nodes, true, true); /// This is the leak of abstraction. /// Splited actions may have inputs which are needed only for PREWHERE. @@ -137,15 +138,15 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) /// So, here we restore removed inputs for PREWHERE actions { std::unordered_set first_outputs( - split_result.first->getOutputs().begin(), split_result.first->getOutputs().end()); - for (const auto * input : split_result.first->getInputs()) + split_result.first.getOutputs().begin(), split_result.first.getOutputs().end()); + for (const auto * input : split_result.first.getInputs()) { if (!first_outputs.contains(input)) { - split_result.first->getOutputs().push_back(input); + split_result.first.getOutputs().push_back(input); /// Add column to second actions as input. /// Do not add it to result, so it would be removed. - split_result.second->addInput(input->result_name, input->result_type); + split_result.second.addInput(input->result_name, input->result_type); } } } diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp index 0afddede708..71a7ca327b1 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp @@ -18,16 +18,16 @@ void optimizePrimaryKeyConditionAndLimit(const Stack & stack) const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); if (storage_prewhere_info) { - source_step_with_filter->addFilter(ActionsDAG::clone(storage_prewhere_info->prewhere_actions), storage_prewhere_info->prewhere_column_name); + source_step_with_filter->addFilter(ActionsDAG::clone(&*storage_prewhere_info->prewhere_actions), storage_prewhere_info->prewhere_column_name); if (storage_prewhere_info->row_level_filter) - source_step_with_filter->addFilter(ActionsDAG::clone(storage_prewhere_info->row_level_filter), storage_prewhere_info->row_level_column_name); + source_step_with_filter->addFilter(ActionsDAG::clone(&*storage_prewhere_info->row_level_filter), storage_prewhere_info->row_level_column_name); } for (auto iter = stack.rbegin() + 1; iter != stack.rend(); ++iter) { if (auto * filter_step = typeid_cast(iter->node->step.get())) { - source_step_with_filter->addFilter(ActionsDAG::clone(filter_step->getExpression()), filter_step->getFilterColumnName()); + source_step_with_filter->addFilter(ActionsDAG::clone(&filter_step->getExpression()), filter_step->getFilterColumnName()); } else if (auto * limit_step = typeid_cast(iter->node->step.get())) { diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index a8bd98d7460..b3747b81215 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -170,12 +170,12 @@ static void appendFixedColumnsFromFilterExpression(const ActionsDAG::Node & filt } } -static void appendExpression(ActionsDAGPtr & dag, const ActionsDAGPtr & expression) +static void appendExpression(ActionsDAGPtr & dag, const ActionsDAG & expression) { if (dag) - dag->mergeInplace(std::move(*ActionsDAG::clone(expression))); + dag->mergeInplace(std::move(*ActionsDAG::clone(&expression))); else - dag = ActionsDAG::clone(expression); + dag = ActionsDAG::clone(&expression); } /// This function builds a common DAG which is a merge of DAGs from Filter and Expression steps chain. @@ -193,7 +193,7 @@ void buildSortingDAG(QueryPlan::Node & node, ActionsDAGPtr & dag, FixedColumns & if (prewhere_info->prewhere_actions) { //std::cerr << "====== Adding prewhere " << std::endl; - appendExpression(dag, prewhere_info->prewhere_actions); + appendExpression(dag, *prewhere_info->prewhere_actions); if (const auto * filter_expression = dag->tryFindInOutputs(prewhere_info->prewhere_column_name)) appendFixedColumnsFromFilterExpression(*filter_expression, fixed_columns); } @@ -211,7 +211,7 @@ void buildSortingDAG(QueryPlan::Node & node, ActionsDAGPtr & dag, FixedColumns & const auto & actions = expression->getExpression(); /// Should ignore limit because arrayJoin() can reduce the number of rows in case of empty array. - if (actions->hasArrayJoin()) + if (actions.hasArrayJoin()) limit = 0; appendExpression(dag, actions); @@ -1066,13 +1066,13 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, for (const auto & actions_dag : window_desc.partition_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(ActionsDAG::clone(actions_dag.get()), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(std::move(*ActionsDAG::clone(actions_dag.get())), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } for (const auto & actions_dag : window_desc.order_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(ActionsDAG::clone(actions_dag.get()), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(std::move(*ActionsDAG::clone(actions_dag.get())), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } auto order_optimizer = std::make_shared( diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index da057bd25c2..34e9c8aac0e 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -273,7 +273,7 @@ static void appendAggregateFunctions( } } -ActionsDAGPtr analyzeAggregateProjection( +std::optional analyzeAggregateProjection( const AggregateProjectionInfo & info, const QueryDAG & query, const DAGIndex & query_index, @@ -393,7 +393,7 @@ ActionsDAGPtr analyzeAggregateProjection( // LOG_TRACE(getLogger("optimizeUseProjections"), "Folding actions by projection"); auto proj_dag = query.dag->foldActionsByProjection(new_inputs, query_key_nodes); - appendAggregateFunctions(*proj_dag, aggregates, *matched_aggregates); + appendAggregateFunctions(proj_dag, aggregates, *matched_aggregates); return proj_dag; } @@ -405,7 +405,7 @@ struct AggregateProjectionCandidate : public ProjectionCandidate /// Actions which need to be applied to columns from projection /// in order to get all the columns required for aggregation. - ActionsDAGPtr dag; + ActionsDAG dag; }; struct MinMaxProjectionCandidate @@ -480,13 +480,13 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( if (auto proj_dag = analyzeAggregateProjection(info, dag, query_index, keys, aggregates)) { // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); - AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(proj_dag)}; + AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(*proj_dag)}; // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection sample block {}", sample_block.dumpStructure()); auto block = reading.getMergeTreeData().getMinMaxCountProjectionBlock( metadata, - candidate.dag->getRequiredColumnsNames(), - (dag.filter_node ? dag.dag.get() : nullptr), + candidate.dag.getRequiredColumnsNames(), + (dag.filter_node ? &*dag.dag : nullptr), parts, max_added_blocks.get(), context); @@ -536,7 +536,7 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( if (auto proj_dag = analyzeAggregateProjection(info, dag, query_index, keys, aggregates)) { // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); - AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(proj_dag)}; + AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(*proj_dag)}; candidate.projection = projection; candidates.real.emplace_back(std::move(candidate)); } @@ -664,7 +664,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu /// Selecting best candidate. for (auto & candidate : candidates.real) { - auto required_column_names = candidate.dag->getRequiredColumnsNames(); + auto required_column_names = candidate.dag.getRequiredColumnsNames(); bool analyzed = analyzeProjectionCandidate( candidate, @@ -675,7 +675,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu query_info, context, max_added_blocks, - candidate.dag.get()); + &candidate.dag); if (!analyzed) continue; @@ -765,7 +765,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu projection_reading = reader.readFromParts( /* parts = */ {}, /* alter_conversions = */ {}, - best_candidate->dag->getRequiredColumnsNames(), + best_candidate->dag.getRequiredColumnsNames(), proj_snapshot, projection_query_info, context, @@ -777,7 +777,7 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu if (!projection_reading) { - auto header = proj_snapshot->getSampleBlockForColumns(best_candidate->dag->getRequiredColumnsNames()); + auto header = proj_snapshot->getSampleBlockForColumns(best_candidate->dag.getRequiredColumnsNames()); Pipe pipe(std::make_shared(std::move(header))); projection_reading = std::make_unique(std::move(pipe)); } @@ -808,17 +808,19 @@ std::optional optimizeUseAggregateProjections(QueryPlan::Node & node, Qu if (best_candidate) { aggregate_projection_node = &nodes.emplace_back(); + if (candidates.has_filter) { + const auto & result_name = best_candidate->dag.getOutputs().front()->result_name; aggregate_projection_node->step = std::make_unique( projection_reading_node.step->getOutputStream(), - best_candidate->dag, - best_candidate->dag->getOutputs().front()->result_name, + std::move(best_candidate->dag), + result_name, true); } else aggregate_projection_node->step - = std::make_unique(projection_reading_node.step->getOutputStream(), best_candidate->dag); + = std::make_unique(projection_reading_node.step->getOutputStream(), std::move(best_candidate->dag)); aggregate_projection_node->children.push_back(&projection_reading_node); } diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp index c7e96d66817..c0af178f08e 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp @@ -23,7 +23,7 @@ struct NormalProjectionCandidate : public ProjectionCandidate { }; -static ActionsDAGPtr makeMaterializingDAG(const Block & proj_header, const Block main_header) +static std::optional makeMaterializingDAG(const Block & proj_header, const Block main_header) { /// Materialize constants in case we don't have it in output header. /// This may happen e.g. if we have PREWHERE. @@ -31,7 +31,7 @@ static ActionsDAGPtr makeMaterializingDAG(const Block & proj_header, const Block size_t num_columns = main_header.columns(); /// This is a error; will have block structure mismatch later. if (proj_header.columns() != num_columns) - return nullptr; + return {}; std::vector const_positions; for (size_t i = 0; i < num_columns; ++i) @@ -45,17 +45,17 @@ static ActionsDAGPtr makeMaterializingDAG(const Block & proj_header, const Block } if (const_positions.empty()) - return nullptr; + return {}; - ActionsDAGPtr dag = std::make_unique(); - auto & outputs = dag->getOutputs(); + ActionsDAG dag; + auto & outputs = dag.getOutputs(); for (const auto & col : proj_header.getColumnsWithTypeAndName()) - outputs.push_back(&dag->addInput(col)); + outputs.push_back(&dag.addInput(col)); for (auto pos : const_positions) { auto & output = outputs[pos]; - output = &dag->materializeNode(*output); + output = &dag.materializeNode(*output); } return dag; @@ -172,7 +172,7 @@ std::optional optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod query_info, context, max_added_blocks, - query.filter_node ? query.dag.get() : nullptr); + query.filter_node ? &*query.dag : nullptr); if (!analyzed) continue; @@ -242,14 +242,14 @@ std::optional optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod { expr_or_filter_node.step = std::make_unique( projection_reading_node.step->getOutputStream(), - query.dag, + std::move(*query.dag), query.filter_node->result_name, true); } else expr_or_filter_node.step = std::make_unique( projection_reading_node.step->getOutputStream(), - query.dag); + std::move(*query.dag)); expr_or_filter_node.children.push_back(&projection_reading_node); next_node = &expr_or_filter_node; @@ -267,7 +267,7 @@ std::optional optimizeUseNormalProjections(Stack & stack, QueryPlan::Nod if (auto materializing = makeMaterializingDAG(proj_stream->header, main_stream.header)) { - auto converting = std::make_unique(*proj_stream, materializing); + auto converting = std::make_unique(*proj_stream, std::move(*materializing)); proj_stream = &converting->getOutputStream(); auto & expr_node = nodes.emplace_back(); expr_node.step = std::move(converting); diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index 0e2ad96a419..fb2e6c2096e 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -64,12 +64,12 @@ std::shared_ptr getMaxAddedBlocks(ReadFromMergeTree * rea return {}; } -void QueryDAG::appendExpression(const ActionsDAGPtr & expression) +void QueryDAG::appendExpression(const ActionsDAG & expression) { if (dag) - dag->mergeInplace(std::move(*ActionsDAG::clone(expression))); + dag->mergeInplace(std::move(*ActionsDAG::clone(&expression))); else - dag = ActionsDAG::clone(expression); + dag = std::move(*ActionsDAG::clone(&expression)); } const ActionsDAG::Node * findInOutputs(ActionsDAG & dag, const std::string & name, bool remove) @@ -120,7 +120,7 @@ bool QueryDAG::buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & { if (prewhere_info->row_level_filter) { - appendExpression(prewhere_info->row_level_filter); + appendExpression(*prewhere_info->row_level_filter); if (const auto * filter_expression = findInOutputs(*dag, prewhere_info->row_level_column_name, false)) filter_nodes.push_back(filter_expression); else @@ -129,7 +129,7 @@ bool QueryDAG::buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & if (prewhere_info->prewhere_actions) { - appendExpression(prewhere_info->prewhere_actions); + appendExpression(*prewhere_info->prewhere_actions); if (const auto * filter_expression = findInOutputs(*dag, prewhere_info->prewhere_column_name, prewhere_info->remove_prewhere_column)) filter_nodes.push_back(filter_expression); @@ -149,7 +149,7 @@ bool QueryDAG::buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & if (auto * expression = typeid_cast(step)) { const auto & actions = expression->getExpression(); - if (actions->hasArrayJoin()) + if (actions.hasArrayJoin()) return false; appendExpression(actions); @@ -159,7 +159,7 @@ bool QueryDAG::buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & if (auto * filter = typeid_cast(step)) { const auto & actions = filter->getExpression(); - if (actions->hasArrayJoin()) + if (actions.hasArrayJoin()) return false; appendExpression(actions); diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h index 59ad3a43b97..ee0dfddc326 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h @@ -25,14 +25,14 @@ std::shared_ptr getMaxAddedBlocks(ReadFromMergeTree * rea /// Additionally, for all the Filter steps, we collect filter conditions into filter_nodes. struct QueryDAG { - ActionsDAGPtr dag; + std::optional dag; const ActionsDAG::Node * filter_node = nullptr; bool build(QueryPlan::Node & node); private: bool buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & filter_nodes); - void appendExpression(const ActionsDAGPtr & expression); + void appendExpression(const ActionsDAG & expression); }; struct ProjectionCandidate diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp index 81a8a537830..d0acd8221d4 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp @@ -132,10 +132,10 @@ namespace return true; if (const auto * const expr = typeid_cast(step); expr) - return !expr->getExpression()->hasArrayJoin(); + return !expr->getExpression().hasArrayJoin(); if (const auto * const filter = typeid_cast(step); filter) - return !filter->getExpression()->hasArrayJoin(); + return !filter->getExpression().hasArrayJoin(); if (typeid_cast(step) || typeid_cast(step) || typeid_cast(step) || typeid_cast(step)) @@ -183,9 +183,9 @@ namespace } if (const auto * const expr = typeid_cast(current_step); expr) - dag_stack.push_back(expr->getExpression().get()); + dag_stack.push_back(&expr->getExpression()); else if (const auto * const filter = typeid_cast(current_step); filter) - dag_stack.push_back(filter->getExpression().get()); + dag_stack.push_back(&filter->getExpression()); node = node->children.front(); if (inner_distinct_step = typeid_cast(node->step.get()); inner_distinct_step) @@ -236,9 +236,9 @@ namespace } if (const auto * const expr = typeid_cast(current_step); expr) - dag_stack.push_back(expr->getExpression().get()); + dag_stack.push_back(&expr->getExpression()); else if (const auto * const filter = typeid_cast(current_step); filter) - dag_stack.push_back(filter->getExpression().get()); + dag_stack.push_back(&filter->getExpression()); node = node->children.front(); inner_distinct_step = typeid_cast(node->step.get()); diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp index 632eba6ab5f..7cac7bee6ec 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp @@ -213,12 +213,12 @@ private: logStep("checking for stateful function", node); if (const auto * expr = typeid_cast(step); expr) { - if (expr->getExpression()->hasStatefulFunctions()) + if (expr->getExpression().hasStatefulFunctions()) return false; } else if (const auto * filter = typeid_cast(step); filter) { - if (filter->getExpression()->hasStatefulFunctions()) + if (filter->getExpression().hasStatefulFunctions()) return false; } else diff --git a/src/Processors/QueryPlan/Optimizations/splitFilter.cpp b/src/Processors/QueryPlan/Optimizations/splitFilter.cpp index 561ad7302c6..6aed57634b0 100644 --- a/src/Processors/QueryPlan/Optimizations/splitFilter.cpp +++ b/src/Processors/QueryPlan/Optimizations/splitFilter.cpp @@ -17,13 +17,13 @@ size_t trySplitFilter(QueryPlan::Node * node, QueryPlan::Nodes & nodes) const std::string & filter_column_name = filter_step->getFilterColumnName(); /// Do not split if there are function like runningDifference. - if (expr->hasStatefulFunctions()) + if (expr.hasStatefulFunctions()) return 0; bool filter_name_clashs_with_input = false; if (filter_step->removesFilterColumn()) { - for (const auto * input : expr->getInputs()) + for (const auto * input : expr.getInputs()) { if (input->result_name == filter_column_name) { @@ -33,14 +33,14 @@ size_t trySplitFilter(QueryPlan::Node * node, QueryPlan::Nodes & nodes) } } - auto split = expr->splitActionsForFilter(filter_column_name); + auto split = expr.splitActionsForFilter(filter_column_name); - if (split.second->trivial()) + if (split.second.trivial()) return 0; bool remove_filter = false; if (filter_step->removesFilterColumn()) - remove_filter = split.second->removeUnusedResult(filter_column_name); + remove_filter = split.second.removeUnusedResult(filter_column_name); auto description = filter_step->getStepDescription(); @@ -53,11 +53,11 @@ size_t trySplitFilter(QueryPlan::Node * node, QueryPlan::Nodes & nodes) { split_filter_name = "__split_filter"; - for (auto & filter_output : split.first->getOutputs()) + for (auto & filter_output : split.first.getOutputs()) { if (filter_output->result_name == filter_column_name) { - filter_output = &split.first->addAlias(*filter_output, split_filter_name); + filter_output = &split.first.addAlias(*filter_output, split_filter_name); break; } } diff --git a/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp b/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp index 124cb735d5a..7e0260c0040 100644 --- a/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp +++ b/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp @@ -146,16 +146,16 @@ bool allOutputsDependsOnlyOnAllowedNodes( /// 3. We match partition key actions with group by key actions to find col1', ..., coln' in partition key actions. /// 4. We check that partition key is indeed a deterministic function of col1', ..., coln'. bool isPartitionKeySuitsGroupByKey( - const ReadFromMergeTree & reading, const ActionsDAGPtr & group_by_actions, const AggregatingStep & aggregating) + const ReadFromMergeTree & reading, const ActionsDAG & group_by_actions, const AggregatingStep & aggregating) { if (aggregating.isGroupingSets()) return false; - if (group_by_actions->hasArrayJoin() || group_by_actions->hasStatefulFunctions() || group_by_actions->hasNonDeterministic()) + if (group_by_actions.hasArrayJoin() || group_by_actions.hasStatefulFunctions() || group_by_actions.hasNonDeterministic()) return false; /// We are interested only in calculations required to obtain group by keys (and not aggregate function arguments for example). - auto key_nodes = group_by_actions->findInOutpus(aggregating.getParams().keys); + auto key_nodes = group_by_actions.findInOutpus(aggregating.getParams().keys); auto group_by_key_actions = ActionsDAG::cloneSubDAG(key_nodes, /*remove_aliases=*/ true); const auto & gb_key_required_columns = group_by_key_actions->getRequiredColumnsNames(); diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index ed4b1906635..a12fce95b10 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -943,7 +943,7 @@ SplitPartsWithRangesByPrimaryKeyResult splitPartsWithRangesByPrimaryKey( auto syntax_result = TreeRewriter(context).analyze(filter_function, primary_key.expression->getRequiredColumnsWithTypes()); auto actions = ExpressionAnalyzer(filter_function, syntax_result, context).getActionsDAG(false); - reorderColumns(*actions, result.merging_pipes[i].getHeader(), filter_function->getColumnName()); + reorderColumns(actions, result.merging_pipes[i].getHeader(), filter_function->getColumnName()); ExpressionActionsPtr expression_actions = std::make_shared(std::move(actions)); auto description = fmt::format( "filter values in ({}, {}]", i ? ::toString(borders[i - 1]) : "-inf", i < borders.size() ? ::toString(borders[i]) : "+inf"); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index eca3cc54ce9..bc878e7ee49 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -799,7 +799,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ info.use_uncompressed_cache); }; - auto sorting_expr = std::make_shared(ActionsDAG::clone(&metadata_for_reading->getSortingKey().expression->getActionsDAG())); + auto sorting_expr = std::make_shared(std::move(*ActionsDAG::clone(&metadata_for_reading->getSortingKey().expression->getActionsDAG()))); SplitPartsWithRangesByPrimaryKeyResult split_ranges_result = splitPartsWithRangesByPrimaryKey( metadata_for_reading->getPrimaryKey(), @@ -848,16 +848,16 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ info.use_uncompressed_cache); } -static ActionsDAGPtr createProjection(const Block & header) +static ActionsDAG createProjection(const Block & header) { - return std::make_unique(header.getNamesAndTypesList()); + return ActionsDAG(header.getNamesAndTypesList()); } Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & column_names, - ActionsDAGPtr & out_projection, + std::optional & out_projection, const InputOrderInfoPtr & input_order_info) { const auto & settings = context->getSettingsRef(); @@ -1171,7 +1171,7 @@ bool ReadFromMergeTree::doNotMergePartsAcrossPartitionsFinal() const } Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( - RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & origin_column_names, const Names & column_names, ActionsDAGPtr & out_projection) + RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & origin_column_names, const Names & column_names, std::optional & out_projection) { const auto & settings = context->getSettingsRef(); const auto & data_settings = data.getSettings(); @@ -1212,7 +1212,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( /// we will store lonely parts with level > 0 to use parallel select on them. RangesInDataParts non_intersecting_parts_by_primary_key; - auto sorting_expr = std::make_shared(ActionsDAG::clone(&metadata_for_reading->getSortingKey().expression->getActionsDAG())); + auto sorting_expr = std::make_shared(std::move(*ActionsDAG::clone(&metadata_for_reading->getSortingKey().expression->getActionsDAG()))); if (prewhere_info) { @@ -1333,7 +1333,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( if (!merging_pipes.empty() && !no_merging_pipes.empty()) { - out_projection = nullptr; /// We do projection here + out_projection = {}; /// We do projection here Pipes pipes; pipes.resize(2); pipes[0] = Pipe::unitePipes(std::move(merging_pipes)); @@ -1519,7 +1519,8 @@ void ReadFromMergeTree::applyFilters(ActionDAGNodes added_filter_nodes) /// (1) SourceStepWithFilter::filter_nodes, (2) query_info.filter_actions_dag. Make sure there are consistent. /// TODO: Get rid of filter_actions_dag in query_info after we move analysis of /// parallel replicas and unused shards into optimization, similar to projection analysis. - query_info.filter_actions_dag = std::move(filter_actions_dag); + if (filter_actions_dag) + query_info.filter_actions_dag = std::make_shared(std::move(*filter_actions_dag)); buildIndexes( indexes, @@ -1833,7 +1834,7 @@ bool ReadFromMergeTree::isQueryWithSampling() const } Pipe ReadFromMergeTree::spreadMarkRanges( - RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, ActionsDAGPtr & result_projection) + RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection) { const bool final = isQueryWithFinal(); Names column_names_to_read = result.column_names_to_read; @@ -1894,7 +1895,7 @@ Pipe ReadFromMergeTree::spreadMarkRanges( } } -Pipe ReadFromMergeTree::groupStreamsByPartition(AnalysisResult & result, ActionsDAGPtr & result_projection) +Pipe ReadFromMergeTree::groupStreamsByPartition(AnalysisResult & result, std::optional & result_projection) { auto && parts_with_ranges = std::move(result.parts_with_ranges); @@ -1983,7 +1984,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons /// Projection, that needed to drop columns, which have appeared by execution /// of some extra expressions, and to allow execute the same expressions later. /// NOTE: It may lead to double computation of expressions. - ActionsDAGPtr result_projection; + std::optional result_projection; Pipe pipe = output_each_partition_through_separate_port ? groupStreamsByPartition(result, result_projection) @@ -2000,7 +2001,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result.sampling.use_sampling) { - auto sampling_actions = std::make_shared(ActionsDAG::clone(result.sampling.filter_expression.get())); + auto sampling_actions = std::make_shared(std::move(*ActionsDAG::clone(result.sampling.filter_expression.get()))); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( @@ -2013,12 +2014,12 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons Block cur_header = pipe.getHeader(); - auto append_actions = [&result_projection](ActionsDAGPtr actions) + auto append_actions = [&result_projection](ActionsDAG actions) { if (!result_projection) result_projection = std::move(actions); else - result_projection = ActionsDAG::merge(std::move(*result_projection), std::move(*actions)); + result_projection = ActionsDAG::merge(std::move(*result_projection), std::move(actions)); }; if (result_projection) @@ -2038,7 +2039,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result_projection) { - auto projection_actions = std::make_shared(ActionsDAG::clone(result_projection)); + auto projection_actions = std::make_shared(std::move(*result_projection)); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, projection_actions); @@ -2133,7 +2134,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions))); expression->describeActions(format_settings.out, prefix); } @@ -2142,7 +2143,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter))); expression->describeActions(format_settings.out, prefix); } } @@ -2168,7 +2169,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions))); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -2178,7 +2179,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter))); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index e32507e1f22..a12f53924c3 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -243,9 +243,9 @@ private: Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); - Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, ActionsDAGPtr & result_projection); + Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); - Pipe groupStreamsByPartition(AnalysisResult & result, ActionsDAGPtr & result_projection); + Pipe groupStreamsByPartition(AnalysisResult & result, std::optional & result_projection); Pipe spreadMarkRangesAmongStreams(RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & column_names); @@ -253,13 +253,13 @@ private: RangesInDataParts && parts_with_ranges, size_t num_streams, const Names & column_names, - ActionsDAGPtr & out_projection, + std::optional & out_projection, const InputOrderInfoPtr & input_order_info); bool doNotMergePartsAcrossPartitionsFinal() const; Pipe spreadMarkRangesAmongStreamsFinal( - RangesInDataParts && parts, size_t num_streams, const Names & origin_column_names, const Names & column_names, ActionsDAGPtr & out_projection); + RangesInDataParts && parts, size_t num_streams, const Names & origin_column_names, const Names & column_names, std::optional & out_projection); ReadFromMergeTree::AnalysisResult getAnalysisResult() const; diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index 90fe609a17d..ca98f7c2110 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -441,7 +441,7 @@ Pipe ReadFromSystemNumbersStep::makePipe() chassert(numbers_storage.step != UInt64{0}); /// Build rpn of query filters - KeyCondition condition(filter_actions_dag.get(), context, column_names, key_expression); + KeyCondition condition(filter_actions_dag ? &*filter_actions_dag : nullptr, context, column_names, key_expression); if (condition.extractPlainRanges(ranges)) { diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.cpp b/src/Processors/QueryPlan/SourceStepWithFilter.cpp index 79b225e7f93..55c9b5e442e 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.cpp +++ b/src/Processors/QueryPlan/SourceStepWithFilter.cpp @@ -110,7 +110,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions))); expression->describeActions(format_settings.out, prefix); } @@ -119,7 +119,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter))); expression->describeActions(format_settings.out, prefix); } } @@ -137,7 +137,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions))); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -147,7 +147,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter))); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.h b/src/Processors/QueryPlan/SourceStepWithFilter.h index 91b62efa860..f7a030c0628 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.h +++ b/src/Processors/QueryPlan/SourceStepWithFilter.h @@ -33,8 +33,8 @@ public: { } - const ActionsDAGPtr & getFilterActionsDAG() const { return filter_actions_dag; } - ActionsDAGPtr detachFilterActionsDAG() { return std::move(filter_actions_dag); } + const std::optional & getFilterActionsDAG() const { return filter_actions_dag; } + std::optional detachFilterActionsDAG() { return std::move(filter_actions_dag); } const SelectQueryInfo & getQueryInfo() const { return query_info; } const PrewhereInfoPtr & getPrewhereInfo() const { return prewhere_info; } @@ -81,7 +81,7 @@ protected: ContextPtr context; std::optional limit; - ActionsDAGPtr filter_actions_dag; + std::optional filter_actions_dag; private: /// Will be cleared after applyFilters() is called. diff --git a/src/Processors/QueryPlan/TotalsHavingStep.cpp b/src/Processors/QueryPlan/TotalsHavingStep.cpp index 19632b1862f..4aa4f10ac86 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.cpp +++ b/src/Processors/QueryPlan/TotalsHavingStep.cpp @@ -28,7 +28,7 @@ TotalsHavingStep::TotalsHavingStep( const DataStream & input_stream_, const AggregateDescriptions & aggregates_, bool overflow_row_, - const ActionsDAGPtr & actions_dag_, + std::optional actions_dag_, const std::string & filter_column_, bool remove_filter_, TotalsMode totals_mode_, @@ -38,7 +38,7 @@ TotalsHavingStep::TotalsHavingStep( input_stream_, TotalsHavingTransform::transformHeader( input_stream_.header, - actions_dag_.get(), + actions_dag_ ? &*actions_dag_ : nullptr, filter_column_, remove_filter_, final_, @@ -46,7 +46,7 @@ TotalsHavingStep::TotalsHavingStep( getTraits(!filter_column_.empty())) , aggregates(aggregates_) , overflow_row(overflow_row_) - , actions_dag(ActionsDAG::clone(actions_dag_)) + , actions_dag(std::move(actions_dag_)) , filter_column_name(filter_column_) , remove_filter(remove_filter_) , totals_mode(totals_mode_) @@ -57,7 +57,7 @@ TotalsHavingStep::TotalsHavingStep( void TotalsHavingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto expression_actions = actions_dag ? std::make_shared(ActionsDAG::clone(actions_dag), settings.getActionsSettings()) : nullptr; + auto expression_actions = actions_dag ? std::make_shared(std::move(*actions_dag), settings.getActionsSettings()) : nullptr; auto totals_having = std::make_shared( pipeline.getHeader(), @@ -100,7 +100,7 @@ void TotalsHavingStep::describeActions(FormatSettings & settings) const if (actions_dag) { bool first = true; - auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(getActions()))); for (const auto & action : expression->getActions()) { settings.out << prefix << (first ? "Actions: " @@ -117,7 +117,7 @@ void TotalsHavingStep::describeActions(JSONBuilder::JSONMap & map) const if (actions_dag) { map.add("Filter column", filter_column_name); - auto expression = std::make_shared(ActionsDAG::clone(actions_dag)); + auto expression = std::make_shared(std::move(*ActionsDAG::clone(getActions()))); map.add("Expression", expression->toTree()); } } @@ -128,7 +128,7 @@ void TotalsHavingStep::updateOutputStream() input_streams.front(), TotalsHavingTransform::transformHeader( input_streams.front().header, - actions_dag.get(), + getActions(), filter_column_name, remove_filter, final, diff --git a/src/Processors/QueryPlan/TotalsHavingStep.h b/src/Processors/QueryPlan/TotalsHavingStep.h index 52ef5437701..927b8d99de3 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.h +++ b/src/Processors/QueryPlan/TotalsHavingStep.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include namespace DB { @@ -18,7 +19,7 @@ public: const DataStream & input_stream_, const AggregateDescriptions & aggregates_, bool overflow_row_, - const ActionsDAGPtr & actions_dag_, + std::optional actions_dag_, const std::string & filter_column_, bool remove_filter_, TotalsMode totals_mode_, @@ -32,7 +33,7 @@ public: void describeActions(JSONBuilder::JSONMap & map) const override; void describeActions(FormatSettings & settings) const override; - const ActionsDAGPtr & getActions() const { return actions_dag; } + const ActionsDAG * getActions() const { return actions_dag ? &*actions_dag : nullptr; } private: void updateOutputStream() override; @@ -40,7 +41,7 @@ private: const AggregateDescriptions aggregates; bool overflow_row; - ActionsDAGPtr actions_dag; + std::optional actions_dag; String filter_column_name; bool remove_filter; TotalsMode totals_mode; diff --git a/src/Processors/SourceWithKeyCondition.h b/src/Processors/SourceWithKeyCondition.h index fcf576637ff..cfd3eb236b7 100644 --- a/src/Processors/SourceWithKeyCondition.h +++ b/src/Processors/SourceWithKeyCondition.h @@ -16,13 +16,13 @@ protected: /// Represents pushed down filters in source std::shared_ptr key_condition; - void setKeyConditionImpl(const ActionsDAG * filter_actions_dag, ContextPtr context, const Block & keys) + void setKeyConditionImpl(const std::optional & filter_actions_dag, ContextPtr context, const Block & keys) { key_condition = std::make_shared( - filter_actions_dag, + filter_actions_dag ? &*filter_actions_dag : nullptr, context, keys.getNames(), - std::make_shared(std::make_unique(keys.getColumnsWithTypeAndName()))); + std::make_shared(ActionsDAG(keys.getColumnsWithTypeAndName()))); } public: @@ -33,6 +33,6 @@ public: virtual void setKeyCondition(const std::shared_ptr & key_condition_) { key_condition = key_condition_; } /// Set key_condition created by filter_actions_dag and context. - virtual void setKeyCondition(const ActionsDAGPtr & /*filter_actions_dag*/, ContextPtr /*context*/) { } + virtual void setKeyCondition(const std::optional & /*filter_actions_dag*/, ContextPtr /*context*/) { } }; } diff --git a/src/Processors/Transforms/AddingDefaultsTransform.cpp b/src/Processors/Transforms/AddingDefaultsTransform.cpp index 7945b3999c1..da4d3a0041b 100644 --- a/src/Processors/Transforms/AddingDefaultsTransform.cpp +++ b/src/Processors/Transforms/AddingDefaultsTransform.cpp @@ -178,7 +178,7 @@ void AddingDefaultsTransform::transform(Chunk & chunk) auto dag = evaluateMissingDefaults(evaluate_block, header.getNamesAndTypesList(), columns, context, false); if (dag) { - auto actions = std::make_shared(std::move(dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes), true); + auto actions = std::make_shared(std::move(*dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes), true); actions->execute(evaluate_block); } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index bbe57fc6441..36ffc515f43 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -203,7 +203,7 @@ FillingTransform::FillingTransform( , use_with_fill_by_sorting_prefix(use_with_fill_by_sorting_prefix_) { if (interpolate_description) - interpolate_actions = std::make_shared(ActionsDAG::clone(interpolate_description->actions)); + interpolate_actions = std::make_shared(std::move(*ActionsDAG::clone(&interpolate_description->actions))); std::vector is_fill_column(header_.columns()); for (size_t i = 0, size = fill_description.size(); i < size; ++i) diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 2cd51259549..da5a45f36d5 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -516,7 +516,7 @@ void StorageHive::initMinMaxIndexExpression() partition_names = partition_name_types.getNames(); partition_types = partition_name_types.getTypes(); partition_minmax_idx_expr = std::make_shared( - std::make_unique(partition_name_types), ExpressionActionsSettings::fromContext(getContext())); + ActionsDAG(partition_name_types), ExpressionActionsSettings::fromContext(getContext())); } NamesAndTypesList all_name_types = metadata_snapshot->getColumns().getAllPhysical(); @@ -526,7 +526,7 @@ void StorageHive::initMinMaxIndexExpression() hivefile_name_types.push_back(column); } hivefile_minmax_idx_expr = std::make_shared( - std::make_unique(hivefile_name_types), ExpressionActionsSettings::fromContext(getContext())); + ActionsDAG(hivefile_name_types), ExpressionActionsSettings::fromContext(getContext())); } ASTPtr StorageHive::extractKeyExpressionList(const ASTPtr & node) @@ -583,7 +583,7 @@ static HiveFilePtr createHiveFile( HiveFiles StorageHive::collectHiveFilesFromPartition( const Apache::Hadoop::Hive::Partition & partition, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, const ContextPtr & context_, @@ -647,7 +647,7 @@ HiveFiles StorageHive::collectHiveFilesFromPartition( for (size_t i = 0; i < partition_names.size(); ++i) ranges.emplace_back(fields[i]); - const KeyCondition partition_key_condition(filter_actions_dag.get(), getContext(), partition_names, partition_minmax_idx_expr); + const KeyCondition partition_key_condition(filter_actions_dag, getContext(), partition_names, partition_minmax_idx_expr); if (!partition_key_condition.checkInHyperrectangle(ranges, partition_types).can_be_true) return {}; } @@ -681,7 +681,7 @@ StorageHive::listDirectory(const String & path, const HiveTableMetadataPtr & hiv HiveFilePtr StorageHive::getHiveFileIfNeeded( const FileInfo & file_info, const FieldVector & fields, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const ContextPtr & context_, PruneLevel prune_level) const @@ -715,7 +715,7 @@ HiveFilePtr StorageHive::getHiveFileIfNeeded( if (prune_level >= PruneLevel::File) { - const KeyCondition hivefile_key_condition(filter_actions_dag.get(), getContext(), hivefile_name_types.getNames(), hivefile_minmax_idx_expr); + const KeyCondition hivefile_key_condition(filter_actions_dag, getContext(), hivefile_name_types.getNames(), hivefile_minmax_idx_expr); if (hive_file->useFileMinMaxIndex()) { /// Load file level minmax index and apply @@ -828,7 +828,7 @@ void ReadFromHive::createFiles() if (hive_files) return; - hive_files = storage->collectHiveFiles(num_streams, filter_actions_dag, hive_table_metadata, fs, context); + hive_files = storage->collectHiveFiles(num_streams, filter_actions_dag ? &*filter_actions_dag : nullptr, hive_table_metadata, fs, context); LOG_INFO(log, "Collect {} hive files to read", hive_files->size()); } @@ -950,7 +950,7 @@ void ReadFromHive::initializePipeline(QueryPipelineBuilder & pipeline, const Bui HiveFiles StorageHive::collectHiveFiles( size_t max_threads, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, const ContextPtr & context_, @@ -1023,12 +1023,12 @@ SinkToStoragePtr StorageHive::write(const ASTPtr & /*query*/, const StorageMetad std::optional StorageHive::totalRows(const Settings & settings) const { /// query_info is not used when prune_level == PruneLevel::None - return totalRowsImpl(settings, nullptr, getContext(), PruneLevel::None); + return totalRowsImpl(settings, {}, getContext(), PruneLevel::None); } -std::optional StorageHive::totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) const +std::optional StorageHive::totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr context_) const { - return totalRowsImpl(context_->getSettingsRef(), filter_actions_dag, context_, PruneLevel::Partition); + return totalRowsImpl(context_->getSettingsRef(), &filter_actions_dag, context_, PruneLevel::Partition); } void StorageHive::checkAlterIsPossible(const AlterCommands & commands, ContextPtr /*local_context*/) const @@ -1043,7 +1043,7 @@ void StorageHive::checkAlterIsPossible(const AlterCommands & commands, ContextPt } std::optional -StorageHive::totalRowsImpl(const Settings & settings, const ActionsDAGPtr & filter_actions_dag, ContextPtr context_, PruneLevel prune_level) const +StorageHive::totalRowsImpl(const Settings & settings, const ActionsDAG * filter_actions_dag, ContextPtr context_, PruneLevel prune_level) const { /// Row-based format like Text doesn't support totalRowsByPartitionPredicate if (!supportsSubsetOfColumns()) diff --git a/src/Storages/Hive/StorageHive.h b/src/Storages/Hive/StorageHive.h index 8a457dd6e01..e16df22e138 100644 --- a/src/Storages/Hive/StorageHive.h +++ b/src/Storages/Hive/StorageHive.h @@ -57,7 +57,7 @@ public: bool supportsSubsetOfColumns() const; std::optional totalRows(const Settings & settings) const override; - std::optional totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) const override; + std::optional totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr context_) const override; void checkAlterIsPossible(const AlterCommands & commands, ContextPtr local_context) const override; protected: @@ -90,7 +90,7 @@ private: HiveFiles collectHiveFiles( size_t max_threads, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, const ContextPtr & context_, @@ -98,7 +98,7 @@ private: HiveFiles collectHiveFilesFromPartition( const Apache::Hadoop::Hive::Partition & partition, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const HDFSFSPtr & fs, const ContextPtr & context_, @@ -107,7 +107,7 @@ private: HiveFilePtr getHiveFileIfNeeded( const FileInfo & file_info, const FieldVector & fields, - const ActionsDAGPtr & filter_actions_dag, + const ActionsDAG * filter_actions_dag, const HiveTableMetadataPtr & hive_table_metadata, const ContextPtr & context_, PruneLevel prune_level = PruneLevel::Max) const; @@ -115,7 +115,7 @@ private: void lazyInitialize(); std::optional - totalRowsImpl(const Settings & settings, const ActionsDAGPtr & filter_actions_dag, ContextPtr context_, PruneLevel prune_level) const; + totalRowsImpl(const Settings & settings, const ActionsDAG * filter_actions_dag, ContextPtr context_, PruneLevel prune_level) const; String hive_metastore_url; diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 1f7ac23eb82..57f79a2cd7f 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -236,7 +236,7 @@ StorageID IStorage::getStorageID() const return storage_id; } -ConditionSelectivityEstimator IStorage::getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAGPtr &, ContextPtr) const +ConditionSelectivityEstimator IStorage::getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAG *, ContextPtr) const { return {}; } diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 98afd844046..c86f18d5d3b 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -135,7 +135,7 @@ public: /// Returns true if the storage supports queries with the PREWHERE section. virtual bool supportsPrewhere() const { return false; } - virtual ConditionSelectivityEstimator getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAGPtr &, ContextPtr) const; + virtual ConditionSelectivityEstimator getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAG *, ContextPtr) const; /// Returns which columns supports PREWHERE, or empty std::nullopt if all columns is supported. /// This is needed for engines whose aggregates data from multiple tables, like Merge. @@ -682,7 +682,7 @@ public: virtual std::optional totalRows(const Settings &) const { return {}; } /// Same as above but also take partition predicate into account. - virtual std::optional totalRowsByPartitionPredicate(const ActionsDAGPtr &, ContextPtr) const { return {}; } + virtual std::optional totalRowsByPartitionPredicate(const ActionsDAG &, ContextPtr) const { return {}; } /// If it is possible to quickly determine exact number of bytes for the table on storage: /// - memory (approximated, resident) diff --git a/src/Storages/KVStorageUtils.cpp b/src/Storages/KVStorageUtils.cpp index 94319aef3b8..88783246e10 100644 --- a/src/Storages/KVStorageUtils.cpp +++ b/src/Storages/KVStorageUtils.cpp @@ -231,7 +231,7 @@ bool traverseDAGFilter( } std::pair getFilterKeys( - const String & primary_key, const DataTypePtr & primary_key_type, const ActionsDAGPtr & filter_actions_dag, const ContextPtr & context) + const String & primary_key, const DataTypePtr & primary_key_type, const std::optional & filter_actions_dag, const ContextPtr & context) { if (!filter_actions_dag) return {{}, true}; diff --git a/src/Storages/KVStorageUtils.h b/src/Storages/KVStorageUtils.h index e20a1ce4f37..64108290270 100644 --- a/src/Storages/KVStorageUtils.h +++ b/src/Storages/KVStorageUtils.h @@ -22,7 +22,7 @@ std::pair getFilterKeys( const std::string & primary_key, const DataTypePtr & primary_key_type, const SelectQueryInfo & query_info, const ContextPtr & context); std::pair getFilterKeys( - const String & primary_key, const DataTypePtr & primary_key_type, const ActionsDAGPtr & filter_actions_dag, const ContextPtr & context); + const String & primary_key, const DataTypePtr & primary_key_type, const std::optional & filter_actions_dag, const ContextPtr & context); template void fillColumns(const K & key, const V & value, size_t key_pos, const Block & header, MutableColumns & columns) diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index e03ecc05064..7e43966556e 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -160,7 +160,7 @@ KeyDescription KeyDescription::buildEmptyKey() { KeyDescription result; result.expression_list_ast = std::make_shared(); - result.expression = std::make_shared(std::make_unique(), ExpressionActionsSettings{}); + result.expression = std::make_shared(ActionsDAG(), ExpressionActionsSettings{}); return result; } diff --git a/src/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp index 4ad7f6ef991..264b2b397f4 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -163,8 +163,8 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns if (dag) { dag->addMaterializingOutputActions(); - auto actions = std::make_shared< - ExpressionActions>(std::move(dag), + auto actions = std::make_shared( + std::move(*dag), ExpressionActionsSettings::fromSettings(data_part_info_for_read->getContext()->getSettingsRef())); actions->execute(additional_columns); } diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 1efded3b064..d781345d834 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -628,7 +628,7 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { const auto & index_hint_dag = index_hint->getActions(); - children = index_hint_dag->getOutputs(); + children = index_hint_dag.getOutputs(); for (auto & arg : children) arg = &cloneASTWithInversionPushDown(*arg, inverted_dag, to_inverted, context, need_inversion); @@ -729,7 +729,7 @@ Block KeyCondition::getBlockWithConstants( if (syntax_analyzer_result) { auto actions = ExpressionAnalyzer(query, syntax_analyzer_result, context).getConstActionsDAG(); - for (const auto & action_node : actions->getOutputs()) + for (const auto & action_node : actions.getOutputs()) { if (action_node->column) result.insert(ColumnWithTypeAndName{action_node->column, action_node->result_type, action_node->result_name}); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 7b642c34f37..334c8c9c5ac 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -472,7 +472,7 @@ StoragePolicyPtr MergeTreeData::getStoragePolicy() const } ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByPredicate( - const StorageSnapshotPtr & storage_snapshot, const ActionsDAGPtr & filter_dag, ContextPtr local_context) const + const StorageSnapshotPtr & storage_snapshot, const ActionsDAG * filter_dag, ContextPtr local_context) const { if (!local_context->getSettings().allow_statistics_optimize) return {}; @@ -487,7 +487,7 @@ ConditionSelectivityEstimator MergeTreeData::getConditionSelectivityEstimatorByP ASTPtr expression_ast; ConditionSelectivityEstimator result; - PartitionPruner partition_pruner(storage_snapshot->metadata, filter_dag.get(), local_context); + PartitionPruner partition_pruner(storage_snapshot->metadata, filter_dag, local_context); if (partition_pruner.isUseless()) { @@ -746,7 +746,7 @@ ExpressionActionsPtr MergeTreeData::getMinMaxExpr(const KeyDescription & partiti if (!partition_key.column_names.empty()) partition_key_columns = partition_key.expression->getRequiredColumnsWithTypes(); - return std::make_shared(std::make_unique(partition_key_columns), settings); + return std::make_shared(ActionsDAG(partition_key_columns), settings); } Names MergeTreeData::getMinMaxColumnsNames(const KeyDescription & partition_key) @@ -1134,7 +1134,7 @@ Block MergeTreeData::getBlockWithVirtualsForFilter( std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( - const ActionsDAGPtr & filter_actions_dag, ContextPtr local_context, const DataPartsVector & parts) const + const ActionsDAG & filter_actions_dag, ContextPtr local_context, const DataPartsVector & parts) const { if (parts.empty()) return 0; @@ -1142,7 +1142,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr); if (!filter_dag) return {}; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 52916d85fef..e490e4b0bf9 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -426,7 +426,7 @@ public: bool supportsPrewhere() const override { return true; } - ConditionSelectivityEstimator getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAGPtr &, ContextPtr) const override; + ConditionSelectivityEstimator getConditionSelectivityEstimatorByPredicate(const StorageSnapshotPtr &, const ActionsDAG *, ContextPtr) const override; bool supportsFinal() const override; @@ -1227,7 +1227,7 @@ protected: boost::iterator_range range, const ColumnsDescription & storage_columns); std::optional totalRowsByPartitionPredicateImpl( - const ActionsDAGPtr & filter_actions_dag, ContextPtr context, const DataPartsVector & parts) const; + const ActionsDAG & filter_actions_dag, ContextPtr context, const DataPartsVector & parts) const; static decltype(auto) getStateModifier(DataPartState state) { diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 61b8b6fdaa8..5a5b6d4a6e1 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -428,7 +428,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( ASTPtr query = sampling.filter_function; auto syntax_result = TreeRewriter(context).analyze(query, available_real_columns); - sampling.filter_expression = ExpressionAnalyzer(sampling.filter_function, syntax_result, context).getActionsDAG(false); + sampling.filter_expression = std::make_shared(ExpressionAnalyzer(sampling.filter_function, syntax_result, context).getActionsDAG(false)); } } @@ -466,7 +466,7 @@ void MergeTreeDataSelectExecutor::buildKeyConditionFromPartOffset( dag.get(), context, sample.getNames(), - std::make_shared(std::make_unique(sample.getColumnsWithTypeAndName()), ExpressionActionsSettings{}), + std::make_shared(ActionsDAG(sample.getColumnsWithTypeAndName()), ExpressionActionsSettings{}), {}}); } diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 8d4ef69b1b9..ca31ffc9de5 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -265,15 +265,15 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( if (!set->buildOrderedSetInplace(context)) return; - auto filter_actions_dag = ActionsDAG::clone(filter_dag); - const auto * filter_actions_dag_node = filter_actions_dag->getOutputs().at(0); + auto filter_actions_dag = std::move(*ActionsDAG::clone(filter_dag)); + const auto * filter_actions_dag_node = filter_actions_dag.getOutputs().at(0); std::unordered_map node_to_result_node; - filter_actions_dag->getOutputs()[0] = &traverseDAG(*filter_actions_dag_node, filter_actions_dag, context, node_to_result_node); + filter_actions_dag.getOutputs()[0] = &traverseDAG(*filter_actions_dag_node, filter_actions_dag, context, node_to_result_node); - filter_actions_dag->removeUnusedActions(); + filter_actions_dag.removeUnusedActions(); - actions_output_column_name = filter_actions_dag->getOutputs().at(0)->result_name; + actions_output_column_name = filter_actions_dag.getOutputs().at(0)->result_name; actions = std::make_shared(std::move(filter_actions_dag)); } @@ -306,7 +306,7 @@ bool MergeTreeIndexConditionSet::mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx } -static const ActionsDAG::NodeRawConstPtrs & getArguments(const ActionsDAG::Node & node, const ActionsDAGPtr & result_dag_or_null, ActionsDAG::NodeRawConstPtrs * storage) +static const ActionsDAG::NodeRawConstPtrs & getArguments(const ActionsDAG::Node & node, ActionsDAG * result_dag_or_null, ActionsDAG::NodeRawConstPtrs * storage) { chassert(node.type == ActionsDAG::ActionType::FUNCTION); if (node.function_base->getName() != "indexHint") @@ -316,17 +316,17 @@ static const ActionsDAG::NodeRawConstPtrs & getArguments(const ActionsDAG::Node const auto & adaptor = typeid_cast(*node.function_base); const auto & index_hint = typeid_cast(*adaptor.getFunction()); if (!result_dag_or_null) - return index_hint.getActions()->getOutputs(); + return index_hint.getActions().getOutputs(); /// Import the DAG and map argument pointers. - ActionsDAGPtr actions_clone = ActionsDAG::clone(index_hint.getActions()); + auto actions_clone = std::move(*ActionsDAG::clone(&index_hint.getActions())); chassert(storage); - result_dag_or_null->mergeNodes(std::move(*actions_clone), storage); + result_dag_or_null->mergeNodes(std::move(actions_clone), storage); return *storage; } const ActionsDAG::Node & MergeTreeIndexConditionSet::traverseDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context, std::unordered_map & node_to_result_node) const { @@ -348,7 +348,7 @@ const ActionsDAG::Node & MergeTreeIndexConditionSet::traverseDAG(const ActionsDA atom_node_ptr->type == ActionsDAG::ActionType::FUNCTION) { auto bit_wrapper_function = FunctionFactory::instance().get("__bitWrapperFunc", context); - result_node = &result_dag->addFunction(bit_wrapper_function, {atom_node_ptr}, {}); + result_node = &result_dag.addFunction(bit_wrapper_function, {atom_node_ptr}, {}); } } else @@ -359,14 +359,14 @@ const ActionsDAG::Node & MergeTreeIndexConditionSet::traverseDAG(const ActionsDA unknown_field_column_with_type.type = std::make_shared(); unknown_field_column_with_type.column = unknown_field_column_with_type.type->createColumnConst(1, UNKNOWN_FIELD); - result_node = &result_dag->addColumn(unknown_field_column_with_type); + result_node = &result_dag.addColumn(unknown_field_column_with_type); } node_to_result_node.emplace(&node, result_node); return *result_node; } -const ActionsDAG::Node * MergeTreeIndexConditionSet::atomFromDAG(const ActionsDAG::Node & node, ActionsDAGPtr & result_dag, const ContextPtr & context) const +const ActionsDAG::Node * MergeTreeIndexConditionSet::atomFromDAG(const ActionsDAG::Node & node, ActionsDAG & result_dag, const ContextPtr & context) const { /// Function, literal or column @@ -386,7 +386,7 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::atomFromDAG(const ActionsDA const auto * result_node = node_to_check; if (node.type != ActionsDAG::ActionType::INPUT) - result_node = &result_dag->addInput(column_name, node.result_type); + result_node = &result_dag.addInput(column_name, node.result_type); return result_node; } @@ -407,11 +407,11 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::atomFromDAG(const ActionsDA return nullptr; } - return &result_dag->addFunction(node.function_base, children, {}); + return &result_dag.addFunction(node.function_base, children, {}); } const ActionsDAG::Node * MergeTreeIndexConditionSet::operatorFromDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context, std::unordered_map & node_to_result_node) const { @@ -429,7 +429,7 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::operatorFromDAG(const Actio auto function_name = node_to_check->function->getName(); ActionsDAG::NodeRawConstPtrs temp_ptrs_to_argument; - const auto & arguments = getArguments(*node_to_check, result_dag, &temp_ptrs_to_argument); + const auto & arguments = getArguments(*node_to_check, &result_dag, &temp_ptrs_to_argument); size_t arguments_size = arguments.size(); if (function_name == "not") @@ -440,7 +440,7 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::operatorFromDAG(const Actio const ActionsDAG::Node * argument = &traverseDAG(*arguments[0], result_dag, context, node_to_result_node); auto bit_swap_last_two_function = FunctionFactory::instance().get("__bitSwapLastTwo", context); - return &result_dag->addFunction(bit_swap_last_two_function, {argument}, {}); + return &result_dag.addFunction(bit_swap_last_two_function, {argument}, {}); } else if (function_name == "and" || function_name == "indexHint" || function_name == "or") { @@ -468,7 +468,7 @@ const ActionsDAG::Node * MergeTreeIndexConditionSet::operatorFromDAG(const Actio const auto * before_last_argument = children.back(); children.pop_back(); - last_argument = &result_dag->addFunction(function, {before_last_argument, last_argument}, {}); + last_argument = &result_dag.addFunction(function, {before_last_argument, last_argument}, {}); } return last_argument; diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.h b/src/Storages/MergeTree/MergeTreeIndexSet.h index abd40b3cf9d..03b02515e47 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.h +++ b/src/Storages/MergeTree/MergeTreeIndexSet.h @@ -93,16 +93,16 @@ public: ~MergeTreeIndexConditionSet() override = default; private: const ActionsDAG::Node & traverseDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context, std::unordered_map & node_to_result_node) const; const ActionsDAG::Node * atomFromDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context) const; const ActionsDAG::Node * operatorFromDAG(const ActionsDAG::Node & node, - ActionsDAGPtr & result_dag, + ActionsDAG & result_dag, const ContextPtr & context, std::unordered_map & node_to_result_node) const; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index e924f853524..aec2f988e8d 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -80,7 +80,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep row_level_filter_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(ActionsDAG::clone(prewhere_info->row_level_filter), actions_settings), + .actions = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter)), actions_settings), .filter_column_name = prewhere_info->row_level_column_name, .remove_filter_column = true, .need_filter = true, @@ -96,7 +96,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep prewhere_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(ActionsDAG::clone(prewhere_info->prewhere_actions), actions_settings), + .actions = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions)), actions_settings), .filter_column_name = prewhere_info->prewhere_column_name, .remove_filter_column = prewhere_info->remove_prewhere_column, .need_filter = prewhere_info->need_filter, diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 98b35a3ca2c..15917d59c9f 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -349,7 +349,7 @@ public: MergeTreeData::DataPartPtr data_part_, Names columns_to_read_, bool apply_deleted_mask_, - ActionsDAGPtr filter_, + std::optional filter_, ContextPtr context_, LoggerPtr log_) : ISourceStep(DataStream{.header = storage_snapshot_->getSampleBlockForColumns(columns_to_read_)}) @@ -376,7 +376,7 @@ public: { const auto & primary_key = storage_snapshot->metadata->getPrimaryKey(); const Names & primary_key_column_names = primary_key.column_names; - KeyCondition key_condition(filter.get(), context, primary_key_column_names, primary_key.expression); + KeyCondition key_condition(&*filter, context, primary_key_column_names, primary_key.expression); LOG_DEBUG(log, "Key condition: {}", key_condition.toString()); if (!key_condition.alwaysFalse()) @@ -417,7 +417,7 @@ private: MergeTreeData::DataPartPtr data_part; Names columns_to_read; bool apply_deleted_mask; - ActionsDAGPtr filter; + std::optional filter; ContextPtr context; LoggerPtr log; }; @@ -430,7 +430,7 @@ void createReadFromPartStep( MergeTreeData::DataPartPtr data_part, Names columns_to_read, bool apply_deleted_mask, - ActionsDAGPtr filter, + std::optional filter, ContextPtr context, LoggerPtr log) { diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.h b/src/Storages/MergeTree/MergeTreeSequentialSource.h index e6f055f776c..1b05512b9a3 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.h +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.h @@ -38,7 +38,7 @@ void createReadFromPartStep( MergeTreeData::DataPartPtr data_part, Names columns_to_read, bool apply_deleted_mask, - ActionsDAGPtr filter, + std::optional filter, ContextPtr context, LoggerPtr log); diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 25596b42951..116edf5b9cb 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -349,7 +349,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction PrewhereExprStep new_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(std::move(step.actions), actions_settings), + .actions = std::make_shared(std::move(*step.actions), actions_settings), .filter_column_name = step.column_name, /// Don't remove if it's in the list of original outputs .remove_filter_column = diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index a9a5fddace4..8c389f00780 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -112,7 +112,7 @@ void MergeTreeWhereOptimizer::optimize(SelectQueryInfo & select_query_info, cons LOG_DEBUG(log, "MergeTreeWhereOptimizer: condition \"{}\" moved to PREWHERE", select.prewhere()->formatForLogging(log_queries_cut_to_length)); } -MergeTreeWhereOptimizer::FilterActionsOptimizeResult MergeTreeWhereOptimizer::optimize(const ActionsDAGPtr & filter_dag, +MergeTreeWhereOptimizer::FilterActionsOptimizeResult MergeTreeWhereOptimizer::optimize(const ActionsDAG & filter_dag, const std::string & filter_column_name, const ContextPtr & context, bool is_final) @@ -126,7 +126,7 @@ MergeTreeWhereOptimizer::FilterActionsOptimizeResult MergeTreeWhereOptimizer::op where_optimizer_context.use_statistics = context->getSettingsRef().allow_statistics_optimize; RPNBuilderTreeContext tree_context(context); - RPNBuilderTreeNode node(&filter_dag->findInOutputs(filter_column_name), tree_context); + RPNBuilderTreeNode node(&filter_dag.findInOutputs(filter_column_name), tree_context); auto optimize_result = optimizeImpl(node, where_optimizer_context); if (!optimize_result) diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.h b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h index ba6b4660924..a3d035675c6 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.h +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h @@ -52,7 +52,7 @@ public: bool fully_moved_to_prewhere = false; }; - FilterActionsOptimizeResult optimize(const ActionsDAGPtr & filter_dag, + FilterActionsOptimizeResult optimize(const ActionsDAG & filter_dag, const std::string & filter_column_name, const ContextPtr & context, bool is_final); diff --git a/src/Storages/MergeTree/RPNBuilder.cpp b/src/Storages/MergeTree/RPNBuilder.cpp index 4a18d606bb7..915a0e84902 100644 --- a/src/Storages/MergeTree/RPNBuilder.cpp +++ b/src/Storages/MergeTree/RPNBuilder.cpp @@ -398,7 +398,7 @@ size_t RPNBuilderFunctionTreeNode::getArgumentsSize() const { const auto * adaptor = typeid_cast(dag_node->function_base.get()); const auto * index_hint = typeid_cast(adaptor->getFunction().get()); - return index_hint->getActions()->getOutputs().size(); + return index_hint->getActions().getOutputs().size(); } return dag_node->children.size(); @@ -426,7 +426,7 @@ RPNBuilderTreeNode RPNBuilderFunctionTreeNode::getArgumentAt(size_t index) const { const auto & adaptor = typeid_cast(*dag_node->function_base); const auto & index_hint = typeid_cast(*adaptor.getFunction()); - return RPNBuilderTreeNode(index_hint.getActions()->getOutputs()[index], tree_context); + return RPNBuilderTreeNode(index_hint.getActions().getOutputs()[index], tree_context); } return RPNBuilderTreeNode(dag_node->children[index], tree_context); diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 503f542f2bd..d114608d8f1 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -77,9 +77,9 @@ StorageObjectStorageSource::~StorageObjectStorageSource() create_reader_pool->wait(); } -void StorageObjectStorageSource::setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) +void StorageObjectStorageSource::setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) { - setKeyConditionImpl(filter_actions_dag.get(), context_, read_from_format_info.format_header); + setKeyConditionImpl(filter_actions_dag, context_, read_from_format_info.format_header); } std::string StorageObjectStorageSource::getUniqueStoragePathIdentifier( diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.h b/src/Storages/ObjectStorage/StorageObjectStorageSource.h index fd7c7aa7102..01ce980feaa 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.h @@ -45,7 +45,7 @@ public: String getName() const override { return name; } - void setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) override; + void setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) override; Chunk generate() override; diff --git a/src/Storages/ReadFinalForExternalReplicaStorage.cpp b/src/Storages/ReadFinalForExternalReplicaStorage.cpp index e1d52eefc20..393d3f3fbb9 100644 --- a/src/Storages/ReadFinalForExternalReplicaStorage.cpp +++ b/src/Storages/ReadFinalForExternalReplicaStorage.cpp @@ -79,7 +79,7 @@ void readFinalFromNestedStorage( auto step = std::make_unique( query_plan.getCurrentDataStream(), - actions, + std::move(actions), filter_column_name, false); diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 5f48d5e795e..5276870c037 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -46,9 +46,9 @@ struct PrewhereInfo { /// Actions for row level security filter. Applied separately before prewhere_actions. /// This actions are separate because prewhere condition should not be executed over filtered rows. - ActionsDAGPtr row_level_filter; + std::optional row_level_filter; /// Actions which are executed on block in order to get filter column for prewhere step. - ActionsDAGPtr prewhere_actions; + std::optional prewhere_actions; String row_level_column_name; String prewhere_column_name; bool remove_prewhere_column = false; @@ -56,7 +56,7 @@ struct PrewhereInfo bool generated_by_optimizer = false; PrewhereInfo() = default; - explicit PrewhereInfo(ActionsDAGPtr prewhere_actions_, String prewhere_column_name_) + explicit PrewhereInfo(std::optional prewhere_actions_, String prewhere_column_name_) : prewhere_actions(std::move(prewhere_actions_)), prewhere_column_name(std::move(prewhere_column_name_)) {} std::string dump() const; @@ -66,10 +66,10 @@ struct PrewhereInfo PrewhereInfoPtr prewhere_info = std::make_shared(); if (row_level_filter) - prewhere_info->row_level_filter = ActionsDAG::clone(row_level_filter); + prewhere_info->row_level_filter = std::move(*ActionsDAG::clone(&*row_level_filter)); if (prewhere_actions) - prewhere_info->prewhere_actions = ActionsDAG::clone(prewhere_actions); + prewhere_info->prewhere_actions = std::move(*ActionsDAG::clone(&*prewhere_actions)); prewhere_info->row_level_column_name = row_level_column_name; prewhere_info->prewhere_column_name = prewhere_column_name; @@ -93,7 +93,7 @@ struct FilterInfo /// Same as FilterInfo, but with ActionsDAG. struct FilterDAGInfo { - ActionsDAGPtr actions; + std::optional actions; String column_name; bool do_remove_column = false; diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 695b31d0c80..fdddd84ab59 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -312,7 +312,7 @@ void StorageBuffer::read( if (src_table_query_info.prewhere_info->row_level_filter) { src_table_query_info.prewhere_info->row_level_filter = ActionsDAG::merge( - std::move(*ActionsDAG::clone(actions_dag)), + std::move(*ActionsDAG::clone(&actions_dag)), std::move(*src_table_query_info.prewhere_info->row_level_filter)); src_table_query_info.prewhere_info->row_level_filter->removeUnusedActions(); @@ -321,7 +321,7 @@ void StorageBuffer::read( if (src_table_query_info.prewhere_info->prewhere_actions) { src_table_query_info.prewhere_info->prewhere_actions = ActionsDAG::merge( - std::move(*ActionsDAG::clone(actions_dag)), + std::move(*ActionsDAG::clone(&actions_dag)), std::move(*src_table_query_info.prewhere_info->prewhere_actions)); src_table_query_info.prewhere_info->prewhere_actions->removeUnusedActions(); @@ -353,7 +353,7 @@ void StorageBuffer::read( header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto converting = std::make_unique(query_plan.getCurrentDataStream(), actions_dag); + auto converting = std::make_unique(query_plan.getCurrentDataStream(), std::move(actions_dag)); converting->setStepDescription("Convert destination table columns to Buffer table structure"); query_plan.addStep(std::move(converting)); @@ -432,7 +432,7 @@ void StorageBuffer::read( { return std::make_shared( header, - std::make_shared(ActionsDAG::clone(query_info.prewhere_info->row_level_filter), actions_settings), + std::make_shared(std::move(*ActionsDAG::clone(&*query_info.prewhere_info->row_level_filter)), actions_settings), query_info.prewhere_info->row_level_column_name, false); }); @@ -442,7 +442,7 @@ void StorageBuffer::read( { return std::make_shared( header, - std::make_shared(ActionsDAG::clone(query_info.prewhere_info->prewhere_actions), actions_settings), + std::make_shared(std::move(*ActionsDAG::clone(&*query_info.prewhere_info->prewhere_actions)), actions_settings), query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column); }); @@ -472,7 +472,7 @@ void StorageBuffer::read( result_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto converting = std::make_unique(query_plan.getCurrentDataStream(), convert_actions_dag); + auto converting = std::make_unique(query_plan.getCurrentDataStream(), std::move(convert_actions_dag)); query_plan.addStep(std::move(converting)); } diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 3d91da240cc..6f8a9189941 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -1074,7 +1074,7 @@ std::optional StorageDistributed::distributedWriteBetweenDistribu return pipeline; } -static ActionsDAGPtr getFilterFromQuery(const ASTPtr & ast, ContextPtr context) +static std::optional getFilterFromQuery(const ASTPtr & ast, ContextPtr context) { QueryPlan plan; SelectQueryOptions options; @@ -1118,7 +1118,7 @@ static ActionsDAGPtr getFilterFromQuery(const ASTPtr & ast, ContextPtr context) } if (!source) - return nullptr; + return {}; return source->detachFilterActionsDAG(); } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 2422bcd700b..4611371a471 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -1233,9 +1233,9 @@ StorageFileSource::~StorageFileSource() beforeDestroy(); } -void StorageFileSource::setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) +void StorageFileSource::setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) { - setKeyConditionImpl(filter_actions_dag.get(), context_, block_for_format); + setKeyConditionImpl(filter_actions_dag, context_, block_for_format); } diff --git a/src/Storages/StorageFile.h b/src/Storages/StorageFile.h index ac094aeb489..e9424527997 100644 --- a/src/Storages/StorageFile.h +++ b/src/Storages/StorageFile.h @@ -265,7 +265,7 @@ private: return storage->getName(); } - void setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) override; + void setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) override; bool tryGetCountFromCache(const struct stat & file_stat); diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 316f398b476..8b6a9a4d4bb 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -273,8 +273,8 @@ void StorageMaterializedView::read( * They may be added in case of distributed query with JOIN. * In that case underlying table returns joined columns as well. */ - converting_actions->removeUnusedActions(); - auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), converting_actions); + converting_actions.removeUnusedActions(); + auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(converting_actions)); converting_step->setStepDescription("Convert target table structure to MaterializedView structure"); query_plan.addStep(std::move(converting_step)); } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index c3fdad3a8f2..374abd0b0a5 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -407,7 +407,7 @@ void ReadFromMerge::addFilter(FilterDAGInfo filter) { output_stream->header = FilterTransform::transformHeader( output_stream->header, - filter.actions.get(), + filter.actions ? &*filter.actions : nullptr, filter.column_name, filter.do_remove_column); pushed_down_filters.push_back(std::move(filter)); @@ -628,7 +628,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ auto alias_actions = ExpressionAnalyzer(required_columns_expr_list, syntax_result, context).getActionsDAG(true); - column_names_as_aliases = alias_actions->getRequiredColumns().getNames(); + column_names_as_aliases = alias_actions.getRequiredColumns().getNames(); if (column_names_as_aliases.empty()) column_names_as_aliases.push_back(ExpressionActions::getSmallestColumn(storage_metadata_snapshot->getColumns().getAllPhysical()).name); } @@ -662,7 +662,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ { auto filter_step = std::make_unique( child.plan.getCurrentDataStream(), - ActionsDAG::clone(filter_info.actions), + std::move(*ActionsDAG::clone(&*filter_info.actions)), filter_info.column_name, filter_info.do_remove_column); @@ -1060,7 +1060,7 @@ void ReadFromMerge::addVirtualColumns( column.column = column.type->createColumnConst(0, Field(database_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), adding_column_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(adding_column_dag)); child.plan.addStep(std::move(expression_step)); plan_header = child.plan.getCurrentDataStream().header; } @@ -1074,7 +1074,7 @@ void ReadFromMerge::addVirtualColumns( column.column = column.type->createColumnConst(0, Field(table_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), adding_column_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(adding_column_dag)); child.plan.addStep(std::move(expression_step)); plan_header = child.plan.getCurrentDataStream().header; } @@ -1089,7 +1089,7 @@ void ReadFromMerge::addVirtualColumns( column.column = column.type->createColumnConst(0, Field(database_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), adding_column_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(adding_column_dag)); child.plan.addStep(std::move(expression_step)); plan_header = child.plan.getCurrentDataStream().header; } @@ -1102,7 +1102,7 @@ void ReadFromMerge::addVirtualColumns( column.column = column.type->createColumnConst(0, Field(table_name)); auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), adding_column_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(adding_column_dag)); child.plan.addStep(std::move(expression_step)); plan_header = child.plan.getCurrentDataStream().header; } @@ -1240,7 +1240,7 @@ ReadFromMerge::RowPolicyData::RowPolicyData(RowPolicyFilterPtr row_policy_filter auto expression_analyzer = ExpressionAnalyzer{expr, syntax_result, local_context}; actions_dag = expression_analyzer.getActionsDAG(false /* add_aliases */, false /* project_result */); - filter_actions = std::make_shared(ActionsDAG::clone(actions_dag), + filter_actions = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag)), ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); const auto & required_columns = filter_actions->getRequiredColumnsWithTypes(); const auto & sample_block_columns = filter_actions->getSampleBlock().getNamesAndTypesList(); @@ -1278,12 +1278,12 @@ void ReadFromMerge::RowPolicyData::extendNames(Names & names) const void ReadFromMerge::RowPolicyData::addStorageFilter(SourceStepWithFilter * step) const { - step->addFilter(ActionsDAG::clone(actions_dag), filter_column_name); + step->addFilter(ActionsDAG::clone(&actions_dag), filter_column_name); } void ReadFromMerge::RowPolicyData::addFilterTransform(QueryPlan & plan) const { - auto filter_step = std::make_unique(plan.getCurrentDataStream(), ActionsDAG::clone(actions_dag), filter_column_name, true /* remove filter column */); + auto filter_step = std::make_unique(plan.getCurrentDataStream(), std::move(*ActionsDAG::clone(&actions_dag)), filter_column_name, true /* remove filter column */); plan.addStep(std::move(filter_step)); } @@ -1476,7 +1476,7 @@ void ReadFromMerge::convertAndFilterSourceStream( { pipe_columns.emplace_back(NameAndTypePair(alias.name, alias.type)); - auto actions_dag = std::make_unique(pipe_columns); + ActionsDAG actions_dag(pipe_columns); QueryTreeNodePtr query_tree = buildQueryTree(alias.expression, local_context); query_tree->setAlias(alias.name); @@ -1485,12 +1485,12 @@ void ReadFromMerge::convertAndFilterSourceStream( query_analysis_pass.run(query_tree, local_context); PlannerActionsVisitor actions_visitor(modified_query_info.planner_context, false /*use_column_identifier_as_action_node_name*/); - const auto & nodes = actions_visitor.visit(*actions_dag, query_tree); + const auto & nodes = actions_visitor.visit(actions_dag, query_tree); if (nodes.size() != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected to have 1 output but got {}", nodes.size()); - actions_dag->addOrReplaceInOutputs(actions_dag->addAlias(*nodes.front(), alias.name)); + actions_dag.addOrReplaceInOutputs(actions_dag.addAlias(*nodes.front(), alias.name)); auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(actions_dag)); child.plan.addStep(std::move(expression_step)); } @@ -1506,7 +1506,7 @@ void ReadFromMerge::convertAndFilterSourceStream( auto dag = std::make_shared(pipe_columns); auto actions_dag = expression_analyzer.getActionsDAG(true, false); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), actions_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(actions_dag)); child.plan.addStep(std::move(expression_step)); } } @@ -1524,7 +1524,7 @@ void ReadFromMerge::convertAndFilterSourceStream( header.getColumnsWithTypeAndName(), convert_actions_match_columns_mode); - auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), convert_actions_dag); + auto expression_step = std::make_unique(child.plan.getCurrentDataStream(), std::move(convert_actions_dag)); child.plan.addStep(std::move(expression_step)); } diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index 94b34256d02..d6f2deca7fd 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -225,7 +225,7 @@ private: private: std::string filter_column_name; // complex filter, may contain logic operations - ActionsDAGPtr actions_dag; + ActionsDAG actions_dag; ExpressionActionsPtr filter_actions; StorageMetadataPtr storage_metadata_snapshot; }; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 9352f772ce1..b1a8a81914c 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -245,7 +245,7 @@ std::optional StorageMergeTree::totalRows(const Settings &) const return getTotalActiveSizeInRows(); } -std::optional StorageMergeTree::totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr local_context) const +std::optional StorageMergeTree::totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr local_context) const { auto parts = getVisibleDataPartsVector(local_context); return totalRowsByPartitionPredicateImpl(filter_actions_dag, local_context, parts); diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 4d819508934..56324449b34 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -65,7 +65,7 @@ public: size_t num_streams) override; std::optional totalRows(const Settings &) const override; - std::optional totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr) const override; + std::optional totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr) const override; std::optional totalBytes(const Settings &) const override; std::optional totalBytesUncompressed(const Settings &) const override; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index db58d0081c6..b472710b6d8 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5556,7 +5556,7 @@ std::optional StorageReplicatedMergeTree::totalRows(const Settings & set return res; } -std::optional StorageReplicatedMergeTree::totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr local_context) const +std::optional StorageReplicatedMergeTree::totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr local_context) const { DataPartsVector parts; foreachActiveParts([&](auto & part) { parts.push_back(part); }, local_context->getSettingsRef().select_sequential_consistency); diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index f96206ce657..2e54f17d5d5 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -159,7 +159,7 @@ public: size_t num_streams) override; std::optional totalRows(const Settings & settings) const override; - std::optional totalRowsByPartitionPredicate(const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const override; + std::optional totalRowsByPartitionPredicate(const ActionsDAG & filter_actions_dag, ContextPtr context) const override; std::optional totalBytes(const Settings & settings) const override; std::optional totalBytesUncompressed(const Settings & settings) const override; diff --git a/src/Storages/StorageTableFunction.h b/src/Storages/StorageTableFunction.h index 9507eb6ed8a..345dd62c687 100644 --- a/src/Storages/StorageTableFunction.h +++ b/src/Storages/StorageTableFunction.h @@ -112,7 +112,7 @@ public: auto step = std::make_unique( query_plan.getCurrentDataStream(), - convert_actions_dag); + std::move(convert_actions_dag)); step->setStepDescription("Converting columns"); query_plan.addStep(std::move(step)); diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index c336f597f41..ec1f803750e 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -185,9 +185,9 @@ public: String getName() const override { return name; } - void setKeyCondition(const ActionsDAGPtr & filter_actions_dag, ContextPtr context_) override + void setKeyCondition(const std::optional & filter_actions_dag, ContextPtr context_) override { - setKeyConditionImpl(filter_actions_dag.get(), context_, block_for_format); + setKeyConditionImpl(filter_actions_dag, context_, block_for_format); } Chunk generate() override; diff --git a/src/Storages/StorageValues.cpp b/src/Storages/StorageValues.cpp index 4d73f8e5c87..c1ca6244866 100644 --- a/src/Storages/StorageValues.cpp +++ b/src/Storages/StorageValues.cpp @@ -48,13 +48,13 @@ Pipe StorageValues::read( if (!prepared_pipe.empty()) { - auto dag = std::make_unique(prepared_pipe.getHeader().getColumnsWithTypeAndName()); + ActionsDAG dag(prepared_pipe.getHeader().getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs outputs; outputs.reserve(column_names.size()); for (const auto & name : column_names) - outputs.push_back(dag->getOutputs()[prepared_pipe.getHeader().getPositionByName(name)]); + outputs.push_back(dag.getOutputs()[prepared_pipe.getHeader().getPositionByName(name)]); - dag->getOutputs().swap(outputs); + dag.getOutputs().swap(outputs); auto expression = std::make_shared(std::move(dag)); prepared_pipe.addSimpleTransform([&](const Block & header) diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 2c0d5c5ca85..e2c4d67c8d1 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -177,8 +177,8 @@ void StorageView::read( /// It's expected that the columns read from storage are not constant. /// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery. - auto materializing_actions = std::make_unique(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); - materializing_actions->addMaterializingOutputActions(); + ActionsDAG materializing_actions(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + materializing_actions.addMaterializingOutputActions(); auto materializing = std::make_unique(query_plan.getCurrentDataStream(), std::move(materializing_actions)); materializing->setStepDescription("Materialize constants after VIEW subquery"); @@ -203,7 +203,7 @@ void StorageView::read( expected_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto converting = std::make_unique(query_plan.getCurrentDataStream(), convert_actions_dag); + auto converting = std::make_unique(query_plan.getCurrentDataStream(), std::move(convert_actions_dag)); converting->setStepDescription("Convert VIEW subquery result to VIEW table structure"); query_plan.addStep(std::move(converting)); } diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index ba7433fb9ae..f4e6fe3df5f 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -276,7 +276,7 @@ public: StackTraceSource( const Names & column_names, Block header_, - ActionsDAGPtr && filter_dag_, + std::optional filter_dag_, ContextPtr context_, UInt64 max_block_size_, LoggerPtr log_) @@ -422,7 +422,7 @@ protected: private: ContextPtr context; Block header; - const ActionsDAGPtr filter_dag; + const std::optional filter_dag; const ActionsDAG::Node * predicate; const size_t max_block_size; diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index 56f65b57367..a32eef20aed 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -176,12 +176,12 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType ExpressionAnalyzer analyzer(ast, syntax_analyzer_result, context_copy); auto dag = analyzer.getActionsDAG(false); - const auto * col = &dag->findInOutputs(ast->getColumnName()); + const auto * col = &dag.findInOutputs(ast->getColumnName()); if (col->result_name != ttl_string) - col = &dag->addAlias(*col, ttl_string); + col = &dag.addAlias(*col, ttl_string); - dag->getOutputs() = {col}; - dag->removeUnusedActions(); + dag.getOutputs() = {col}; + dag.removeUnusedActions(); result.expression = std::make_shared(std::move(dag), ExpressionActionsSettings::fromContext(context_copy)); result.sets = analyzer.getPreparedSets(); diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 1bd5e80a4f9..7f54c6a6ee3 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -80,7 +80,7 @@ void buildSetsForDAG(const ActionsDAGPtr & dag, const ContextPtr & context) void filterBlockWithDAG(const ActionsDAGPtr & dag, Block & block, ContextPtr context) { buildSetsForDAG(dag, context); - auto actions = std::make_shared(ActionsDAG::clone(dag)); + auto actions = std::make_shared(std::move(*ActionsDAG::clone(dag))); Block block_with_filter = block; actions->execute(block_with_filter, /*dry_run=*/ false, /*allow_duplicates_in_input=*/ true); @@ -318,9 +318,9 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { - auto index_hint_dag = ActionsDAG::clone(index_hint->getActions()); + auto index_hint_dag = std::move(*ActionsDAG::clone(&index_hint->getActions())); ActionsDAG::NodeRawConstPtrs atoms; - for (const auto & output : index_hint_dag->getOutputs()) + for (const auto & output : index_hint_dag.getOutputs()) if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes)) atoms.push_back(child_copy); @@ -331,13 +331,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( if (atoms.size() > 1) { FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - res = &index_hint_dag->addFunction(func_builder_and, atoms, {}); + res = &index_hint_dag.addFunction(func_builder_and, atoms, {}); } if (!res->result_type->equals(*node->result_type)) - res = &index_hint_dag->addCast(*res, node->result_type, {}); + res = &index_hint_dag.addCast(*res, node->result_type, {}); - additional_nodes.splice(additional_nodes.end(), ActionsDAG::detachNodes(std::move(*index_hint_dag))); + additional_nodes.splice(additional_nodes.end(), ActionsDAG::detachNodes(std::move(index_hint_dag))); return res; } } diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 8f39f0da5af..30ae1f95593 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1130,7 +1130,7 @@ void StorageWindowView::read( { auto converting_actions = ActionsDAG::makeConvertingActions( target_header.getColumnsWithTypeAndName(), wv_header.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Name); - auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), converting_actions); + auto converting_step = std::make_unique(query_plan.getCurrentDataStream(), std::move(converting_actions)); converting_step->setStepDescription("Convert Target table structure to WindowView structure"); query_plan.addStep(std::move(converting_step)); } From 17c6b97cbcb90e19c236708116f20ef3f88cc9c1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 6 Jul 2024 03:16:13 +0200 Subject: [PATCH 0291/2361] Fix error --- programs/local/LocalServer.cpp | 7 ++++++- src/Databases/IDatabase.h | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 41bb5604a52..269bffc2d56 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -737,7 +737,12 @@ void LocalServer::processConfig() DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase(); std::string default_database = server_settings.default_database; - DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context)); + { + DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context); + if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil) + DatabaseCatalog::instance().addUUIDMapping(uuid); + DatabaseCatalog::instance().attachDatabase(default_database, std::move(database)); + } global_context->setCurrentDatabase(default_database); if (getClientConfiguration().has("path")) diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index b00f2fe4baf..3065c8ae6b5 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -358,6 +358,7 @@ public: std::lock_guard lock{mutex}; return database_name; } + /// Get UUID of database. virtual UUID getUUID() const { return UUIDHelpers::Nil; } From ebb10d7f8fe16e533593178a1778632c00a3c1b7 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 6 Jul 2024 02:12:01 +0000 Subject: [PATCH 0292/2361] add rebuild option in projection and LWD --- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 2 +- src/Core/SettingsEnums.cpp | 3 +- src/Core/SettingsEnums.h | 1 + src/Interpreters/InterpreterDeleteQuery.cpp | 56 +++++++++++++++---- ...61_lightweight_delete_projection.reference | 3 + .../03161_lightweight_delete_projection.sql | 27 +++++++++ 7 files changed, 80 insertions(+), 14 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 4a343d864db..bd691fe0dee 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -612,7 +612,7 @@ class IColumn; M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \ M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) ALIAS(allow_experimental_lightweight_delete) \ M(UInt64, lightweight_deletes_sync, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes", 0) \ - M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete.", 0) \ + M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection of this table then do lightweight delete, or do lightweight delete then rebuild projections.", 0) \ M(Bool, apply_deleted_mask, true, "Enables filtering out rows deleted with lightweight DELETE. If disabled, a query will be able to read those rows. This is useful for debugging and \"undelete\" scenarios", 0) \ M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \ M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 6af6b4b15aa..951dd4d74f3 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -61,7 +61,7 @@ static std::initializer_listgetSettingsRef().lightweight_mutation_projection_mode; - if (mode == LightweightMutationProjectionMode::THROW) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DELETE query is not supported for table {} as it has projections. " - "User should drop all the projections manually before running the query", - table->getStorageID().getFullTableName()); - } - else if (mode == LightweightMutationProjectionMode::DROP) + + auto dropOrClearProjections = [&](bool isDrop) { std::vector all_projections = metadata_snapshot->projections.getAllRegisteredNames(); - context->setSetting("mutations_sync", Field(context->getSettingsRef().lightweight_deletes_sync)); - /// Drop projections first so that lightweight delete can be performed. for (const auto & projection : all_projections) { String alter_query = "ALTER TABLE " + table->getStorageID().getFullTableName() + (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster)) - + " DROP PROJECTION IF EXISTS " + projection; + + (isDrop ? " DROP" : " CLEAR") +" PROJECTION " + projection; ParserAlterQuery parser; ASTPtr alter_ast = parseQuery( @@ -151,6 +143,48 @@ BlockIO InterpreterDeleteQuery::execute() InterpreterAlterQuery alter_interpreter(alter_ast, context); alter_interpreter.execute(); } + + return all_projections; + }; + + if (mode == LightweightMutationProjectionMode::THROW) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DELETE query is not supported for table {} as it has projections. " + "User should drop all the projections manually before running the query", + table->getStorageID().getFullTableName()); + } + else if (mode == LightweightMutationProjectionMode::DROP) + { + dropOrClearProjections(true); + } + else if (mode == LightweightMutationProjectionMode::REBUILD) + { + std::vector all_projections{dropOrClearProjections(false)}; + BlockIO res = lightweightDelete(); + + for (const auto & projection : all_projections) + { + String alter_query = + "ALTER TABLE " + table->getStorageID().getFullTableName() + + (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster)) + + " MATERIALIZE PROJECTION " + projection; + + ParserAlterQuery parser; + ASTPtr alter_ast = parseQuery( + parser, + alter_query.data(), + alter_query.data() + alter_query.size(), + "ALTER query", + 0, + DBMS_DEFAULT_MAX_PARSER_DEPTH, + DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); + + InterpreterAlterQuery alter_interpreter(alter_ast, context); + alter_interpreter.execute(); + } + + return res; } else { diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index c5a6cbab0bc..307d3cb53fc 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -1,2 +1,5 @@ 1231 John 33 8888 Alice 50 +6666 Ksenia 48 +8888 Alice 50 +p users 3 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index b189388e356..fb32646b46a 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -19,6 +19,8 @@ DELETE FROM users WHERE uid = 8888 SETTINGS lightweight_mutation_projection_mode DELETE FROM users WHERE uid = 6666 SETTINGS lightweight_mutation_projection_mode = 'drop'; +SYSTEM FLUSH LOGS; + -- expecting no projection SELECT name, @@ -29,3 +31,28 @@ WHERE (database = currentDatabase()) AND (`table` = 'users'); SELECT * FROM users ORDER BY uid; DROP TABLE users; + +CREATE TABLE users ( + uid Int16, + name String, + age Int16, + projection p (select * order by age) +) ENGINE = MergeTree order by uid; + +INSERT INTO users VALUES (1231, 'John', 33), (6666, 'Ksenia', 48), (8888, 'Alice', 50); + +DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'rebuild'; + +SELECT * FROM users ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- expecting projection p with 3 rows is active +SELECT + name, + `table`, + rows, +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users') AND active = 1; + +DROP TABLE users; \ No newline at end of file From 6e5e680797f5b2147455826e4e223c27be5039a6 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 6 Jul 2024 05:42:21 +0000 Subject: [PATCH 0293/2361] bump libprotobuf-mutator, fix build --- contrib/libprotobuf-mutator | 2 +- src/AggregateFunctions/fuzzers/CMakeLists.txt | 2 +- ..._function_state_deserialization_fuzzer.cpp | 24 +------------------ src/Core/fuzzers/CMakeLists.txt | 2 +- src/Core/fuzzers/names_and_types_fuzzer.cpp | 22 ----------------- src/DataTypes/fuzzers/CMakeLists.txt | 2 +- .../data_type_deserialization_fuzzer.cpp | 22 ----------------- src/Formats/fuzzers/CMakeLists.txt | 2 +- src/Formats/fuzzers/format_fuzzer.cpp | 21 ---------------- src/Storages/fuzzers/CMakeLists.txt | 2 +- .../fuzzers/columns_description_fuzzer.cpp | 21 ---------------- 11 files changed, 7 insertions(+), 115 deletions(-) diff --git a/contrib/libprotobuf-mutator b/contrib/libprotobuf-mutator index a304ec48dcf..b922c8ab900 160000 --- a/contrib/libprotobuf-mutator +++ b/contrib/libprotobuf-mutator @@ -1 +1 @@ -Subproject commit a304ec48dcf15d942607032151f7e9ee504b5dcf +Subproject commit b922c8ab9004ef9944982e4f165e2747b13223fa diff --git a/src/AggregateFunctions/fuzzers/CMakeLists.txt b/src/AggregateFunctions/fuzzers/CMakeLists.txt index 907a275b4b3..1ce0c52feb7 100644 --- a/src/AggregateFunctions/fuzzers/CMakeLists.txt +++ b/src/AggregateFunctions/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS}) -target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions) +target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE clickhouse_functions clickhouse_aggregate_functions) diff --git a/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp b/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp index a956d9906bc..31fc93e4288 100644 --- a/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp +++ b/src/AggregateFunctions/fuzzers/aggregate_function_state_deserialization_fuzzer.cpp @@ -12,33 +12,11 @@ #include -#include - +#include #include #include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - -class IFunctionBase; -using FunctionBasePtr = std::shared_ptr; - -FunctionBasePtr createFunctionBaseCast( - ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for aggregate_function_state_deserialization_fuzzer"); -} - -} - - extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try diff --git a/src/Core/fuzzers/CMakeLists.txt b/src/Core/fuzzers/CMakeLists.txt index 51db6fa0b53..61d6b9629eb 100644 --- a/src/Core/fuzzers/CMakeLists.txt +++ b/src/Core/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp) -target_link_libraries (names_and_types_fuzzer PRIVATE dbms) +target_link_libraries (names_and_types_fuzzer PRIVATE clickhouse_functions) diff --git a/src/Core/fuzzers/names_and_types_fuzzer.cpp b/src/Core/fuzzers/names_and_types_fuzzer.cpp index 74debedf2a3..6fdd8703014 100644 --- a/src/Core/fuzzers/names_and_types_fuzzer.cpp +++ b/src/Core/fuzzers/names_and_types_fuzzer.cpp @@ -1,29 +1,7 @@ -#include -#include #include #include -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - -class IFunctionBase; -using FunctionBasePtr = std::shared_ptr; - -FunctionBasePtr createFunctionBaseCast( - ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for names_and_types_fuzzer"); -} - -} - - extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try diff --git a/src/DataTypes/fuzzers/CMakeLists.txt b/src/DataTypes/fuzzers/CMakeLists.txt index 939bf5f5e3f..e54ef0a860c 100644 --- a/src/DataTypes/fuzzers/CMakeLists.txt +++ b/src/DataTypes/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS}) -target_link_libraries(data_type_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions) +target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_functions clickhouse_aggregate_functions) diff --git a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp index 7d9a0513d18..0ae325871fb 100644 --- a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp +++ b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp @@ -8,33 +8,11 @@ #include #include -#include - #include #include -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - -class IFunctionBase; -using FunctionBasePtr = std::shared_ptr; - -FunctionBasePtr createFunctionBaseCast( - ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for data_type_deserialization_fuzzer"); -} - -} - - extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try diff --git a/src/Formats/fuzzers/CMakeLists.txt b/src/Formats/fuzzers/CMakeLists.txt index 38009aeec1d..b8a7e78b6e2 100644 --- a/src/Formats/fuzzers/CMakeLists.txt +++ b/src/Formats/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(format_fuzzer format_fuzzer.cpp ${SRCS}) -target_link_libraries(format_fuzzer PRIVATE dbms clickhouse_aggregate_functions) +target_link_libraries(format_fuzzer PRIVATE clickhouse_functions clickhouse_aggregate_functions) diff --git a/src/Formats/fuzzers/format_fuzzer.cpp b/src/Formats/fuzzers/format_fuzzer.cpp index 2c1ec65e54d..27f7d7b292f 100644 --- a/src/Formats/fuzzers/format_fuzzer.cpp +++ b/src/Formats/fuzzers/format_fuzzer.cpp @@ -3,7 +3,6 @@ #include #include -#include #include #include @@ -21,26 +20,6 @@ #include -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - -class IFunctionBase; -using FunctionBasePtr = std::shared_ptr; - -FunctionBasePtr createFunctionBaseCast( - ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for format_fuzzer"); -} - -} - - extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try diff --git a/src/Storages/fuzzers/CMakeLists.txt b/src/Storages/fuzzers/CMakeLists.txt index 719b9b77cd9..7bee2da2e26 100644 --- a/src/Storages/fuzzers/CMakeLists.txt +++ b/src/Storages/fuzzers/CMakeLists.txt @@ -4,4 +4,4 @@ clickhouse_add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.c target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms) clickhouse_add_executable (columns_description_fuzzer columns_description_fuzzer.cpp) -target_link_libraries (columns_description_fuzzer PRIVATE dbms) +target_link_libraries (columns_description_fuzzer PRIVATE clickhouse_functions) diff --git a/src/Storages/fuzzers/columns_description_fuzzer.cpp b/src/Storages/fuzzers/columns_description_fuzzer.cpp index ac285ea50f7..e10e0cc52f5 100644 --- a/src/Storages/fuzzers/columns_description_fuzzer.cpp +++ b/src/Storages/fuzzers/columns_description_fuzzer.cpp @@ -1,28 +1,7 @@ -#include #include #include -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - -class IFunctionBase; -using FunctionBasePtr = std::shared_ptr; - -FunctionBasePtr createFunctionBaseCast( - ContextPtr, const char *, const ColumnsWithTypeAndName &, const DataTypePtr &, std::optional, CastType) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Type conversions are not implemented for columns_description_fuzzer"); -} - -} - - extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) { try From c13371166c9a9545155c9274d36c3b91a1ade5cb Mon Sep 17 00:00:00 2001 From: yariks5s Date: Sun, 7 Jul 2024 22:25:15 +0000 Subject: [PATCH 0294/2361] add import re to tests --- tests/integration/test_storage_hdfs/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index da46756841d..9a166cba2ab 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -2,6 +2,7 @@ import os import pytest import time +import re from helpers.cluster import ClickHouseCluster, is_arm from helpers.client import QueryRuntimeException from helpers.test_tools import TSV From a3c4cbfce257f171e66e04598ac9eae548c3836f Mon Sep 17 00:00:00 2001 From: yariks5s Date: Mon, 8 Jul 2024 13:57:54 +0000 Subject: [PATCH 0295/2361] clang-tidy, fix cp with minio --- docker/test/stateless/setup_minio.sh | 2 +- .../03203_hive_style_partitioning.sh | 52 +++++++++---------- utils/keeper-bench/Runner.cpp | 3 +- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh index 2b9433edd20..aacb9d88a45 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/docker/test/stateless/setup_minio.sh @@ -101,7 +101,7 @@ upload_data() { # shellcheck disable=SC2045 for file in $(ls "${data_path}"); do echo "${file}"; - ./mc cp "${data_path}"/"${file}" clickminio/test/"${file}"; + ./mc cp --recursive "${data_path}"/"${file}" clickminio/test/"${file}"; done } diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 544fd17ffff..334bfef4f02 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -43,29 +43,29 @@ $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" $CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; -SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _non_existing_column FROM url('http://localhost:11111/test/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" +SELECT *, _non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" $CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; -SELECT *, _column0 FROM url('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" @@ -75,28 +75,28 @@ $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" $CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; -SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _non_existing_column FROM s3('http://localhost:11111/test/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=*/sample.parquet') WHERE column0 = _column0; +SELECT *, _non_existing_column FROM s3('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = _column0; """ $CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; -SELECT *, _column0 FROM s3('http://localhost:11111/test/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" diff --git a/utils/keeper-bench/Runner.cpp b/utils/keeper-bench/Runner.cpp index 587e015b340..f8a0e37d1a9 100644 --- a/utils/keeper-bench/Runner.cpp +++ b/utils/keeper-bench/Runner.cpp @@ -544,7 +544,8 @@ struct ZooKeeperRequestFromLogReader file_read_buf = DB::wrapReadBufferWithCompressionMethod(std::move(file_read_buf), compression_method); DB::SingleReadBufferIterator read_buffer_iterator(std::move(file_read_buf)); - auto [columns_description, format] = DB::detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); + std::string sample_path; + auto [columns_description, format] = DB::detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); DB::ColumnsWithTypeAndName columns; columns.reserve(columns_description.size()); From 1237f93182db21f00df9ca7913619ee63d75850b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 8 Jul 2024 15:06:52 +0000 Subject: [PATCH 0296/2361] Fixing some crashes. --- src/Interpreters/ExpressionActions.cpp | 2 +- src/Planner/Planner.cpp | 8 +++++--- src/Planner/PlannerJoins.cpp | 8 ++++---- src/Planner/PlannerJoins.h | 4 ++-- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- 5 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 399f4f2ff4f..1c6c3f2556b 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -59,7 +59,7 @@ ExpressionActions::ExpressionActions(ActionsDAG actions_dag_, const ExpressionAc #if USE_EMBEDDED_COMPILER if (settings.can_compile_expressions && settings.compile_expressions == CompileExpressions::yes) - actions_dag->compileExpressions(settings.min_count_to_compile_expression, lazy_executed_nodes); + actions_dag.compileExpressions(settings.min_count_to_compile_expression, lazy_executed_nodes); #endif linearizeActions(lazy_executed_nodes); diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 48e42099ce8..0b10cef82ce 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -212,9 +212,11 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & if (!read_from_dummy) continue; - auto filter_actions = read_from_dummy->detachFilterActionsDAG(); - const auto & table_node = dummy_storage_to_table.at(&read_from_dummy->getStorage()); - res[table_node] = FiltersForTableExpression{std::move(filter_actions), read_from_dummy->getPrewhereInfo()}; + if (auto filter_actions = read_from_dummy->detachFilterActionsDAG()) + { + const auto & table_node = dummy_storage_to_table.at(&read_from_dummy->getStorage()); + res[table_node] = FiltersForTableExpression{std::move(filter_actions), read_from_dummy->getPrewhereInfo()}; + } } return res; diff --git a/src/Planner/PlannerJoins.cpp b/src/Planner/PlannerJoins.cpp index db9678d91a6..7772336f7c0 100644 --- a/src/Planner/PlannerJoins.cpp +++ b/src/Planner/PlannerJoins.cpp @@ -588,11 +588,11 @@ JoinClausesAndActions buildJoinClausesAndActions( } } - result.left_join_expressions_actions = std::move(left_join_actions); - //result.left_join_tmp_expression_actions = std::move(left_join_actions); + result.left_join_expressions_actions = std::move(*ActionsDAG::clone(&left_join_actions)); + result.left_join_tmp_expression_actions = std::move(left_join_actions); result.left_join_expressions_actions.removeUnusedActions(join_left_actions_names); - result.right_join_expressions_actions = std::move(right_join_actions); - //result.right_join_tmp_expression_actions = std::move(right_join_actions); + result.right_join_expressions_actions = std::move(*ActionsDAG::clone(&right_join_actions)); + result.right_join_tmp_expression_actions = std::move(right_join_actions); result.right_join_expressions_actions.removeUnusedActions(join_right_actions_names); if (is_inequal_join) diff --git a/src/Planner/PlannerJoins.h b/src/Planner/PlannerJoins.h index 3735c373acc..d8665ab7739 100644 --- a/src/Planner/PlannerJoins.h +++ b/src/Planner/PlannerJoins.h @@ -182,8 +182,8 @@ struct JoinClausesAndActions /// Join clauses. Actions dag nodes point into join_expression_actions. JoinClauses join_clauses; /// Whole JOIN ON section expressions - // ActionsDAGPtr left_join_tmp_expression_actions; - // ActionsDAGPtr right_join_tmp_expression_actions; + ActionsDAG left_join_tmp_expression_actions; + ActionsDAG right_join_tmp_expression_actions; /// Left join expressions actions ActionsDAG left_join_expressions_actions; /// Right join expressions actions diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index bc878e7ee49..9ca79fde26f 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1520,7 +1520,7 @@ void ReadFromMergeTree::applyFilters(ActionDAGNodes added_filter_nodes) /// TODO: Get rid of filter_actions_dag in query_info after we move analysis of /// parallel replicas and unused shards into optimization, similar to projection analysis. if (filter_actions_dag) - query_info.filter_actions_dag = std::make_shared(std::move(*filter_actions_dag)); + query_info.filter_actions_dag = std::make_shared(std::move(*ActionsDAG::clone(&*filter_actions_dag))); buildIndexes( indexes, From a2b17b01f9561fd1853851932a2ae77513c49e26 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 8 Jul 2024 17:47:56 +0000 Subject: [PATCH 0297/2361] fix filling of missed subcolumns --- src/Interpreters/inplaceBlockConversions.cpp | 38 ++++++++---- src/Storages/MergeTree/IMergeTreeReader.cpp | 48 +++++++++++---- .../MergeTree/MergeTreeReaderCompact.cpp | 60 ++++++++++++------- .../MergeTree/MergeTreeReaderCompact.h | 1 + .../03203_fill_missed_subcolumns.reference | 31 ++++++++++ .../03203_fill_missed_subcolumns.sql | 47 +++++++++++++++ 6 files changed, 181 insertions(+), 44 deletions(-) create mode 100644 tests/queries/0_stateless/03203_fill_missed_subcolumns.reference create mode 100644 tests/queries/0_stateless/03203_fill_missed_subcolumns.sql diff --git a/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp index 239cce5b427..f7d8a2a2daf 100644 --- a/src/Interpreters/inplaceBlockConversions.cpp +++ b/src/Interpreters/inplaceBlockConversions.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -35,8 +36,13 @@ namespace /// Add all required expressions for missing columns calculation void addDefaultRequiredExpressionsRecursively( - const Block & block, const String & required_column_name, DataTypePtr required_column_type, - const ColumnsDescription & columns, ASTPtr default_expr_list_accum, NameSet & added_columns, bool null_as_default) + const Block & block, + const String & required_column_name, + DataTypePtr required_column_type, + const ColumnsDescription & columns, + ASTPtr default_expr_list_accum, + NameSet & added_columns, + bool null_as_default) { checkStackSize(); @@ -273,6 +279,20 @@ static std::unordered_map collectOffsetsColumns( return offsets_columns; } +static ColumnPtr createColumnWithDefaultValue(const IDataType & data_type, const String & subcolumn_name, size_t num_rows) +{ + auto column = data_type.createColumnConstWithDefaultValue(num_rows); + + if (subcolumn_name.empty()) + return column->convertToFullColumnIfConst(); + + /// Firstly get subcolumn from const column and then replicate. + column = assert_cast(*column).getDataColumnPtr(); + column = data_type.getSubcolumn(subcolumn_name, column); + + return ColumnConst::create(std::move(column), num_rows)->convertToFullColumnIfConst(); +} + void fillMissingColumns( Columns & res_columns, size_t num_rows, @@ -298,21 +318,19 @@ void fillMissingColumns( auto requested_column = requested_columns.begin(); for (size_t i = 0; i < num_columns; ++i, ++requested_column) { - const auto & [name, type] = *requested_column; - - if (res_columns[i] && partially_read_columns.contains(name)) + if (res_columns[i] && partially_read_columns.contains(requested_column->name)) res_columns[i] = nullptr; if (res_columns[i]) continue; - if (metadata_snapshot && metadata_snapshot->getColumns().hasDefault(name)) + if (metadata_snapshot && metadata_snapshot->getColumns().hasDefault(requested_column->getNameInStorage())) continue; std::vector current_offsets; size_t num_dimensions = 0; - const auto * array_type = typeid_cast(type.get()); + const auto * array_type = typeid_cast(requested_column->type.get()); if (array_type && !offsets_columns.empty()) { num_dimensions = getNumberOfDimensions(*array_type); @@ -348,10 +366,10 @@ void fillMissingColumns( if (!current_offsets.empty()) { size_t num_empty_dimensions = num_dimensions - current_offsets.size(); - auto scalar_type = createArrayOfType(getBaseTypeOfArray(type), num_empty_dimensions); + auto scalar_type = createArrayOfType(getBaseTypeOfArray(requested_column->getTypeInStorage()), num_empty_dimensions); size_t data_size = assert_cast(*current_offsets.back()).getData().back(); - res_columns[i] = scalar_type->createColumnConstWithDefaultValue(data_size)->convertToFullColumnIfConst(); + res_columns[i] = createColumnWithDefaultValue(*scalar_type, requested_column->getSubcolumnName(), data_size); for (auto it = current_offsets.rbegin(); it != current_offsets.rend(); ++it) res_columns[i] = ColumnArray::create(res_columns[i], *it); @@ -360,7 +378,7 @@ void fillMissingColumns( { /// We must turn a constant column into a full column because the interpreter could infer /// that it is constant everywhere but in some blocks (from other parts) it can be a full column. - res_columns[i] = type->createColumnConstWithDefaultValue(num_rows)->convertToFullColumnIfConst(); + res_columns[i] = createColumnWithDefaultValue(*requested_column->getTypeInStorage(), requested_column->getSubcolumnName(), num_rows); } } } diff --git a/src/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp index 4ad7f6ef991..aff1001163e 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -144,19 +145,26 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns throw Exception(ErrorCodes::LOGICAL_ERROR, "invalid number of columns passed to MergeTreeReader::fillMissingColumns. " "Expected {}, got {}", num_columns, res_columns.size()); - /// Convert columns list to block. - /// TODO: rewrite with columns interface. It will be possible after changes in ExpressionActions. - auto name_and_type = requested_columns.begin(); - for (size_t pos = 0; pos < num_columns; ++pos, ++name_and_type) - { - if (res_columns[pos] == nullptr) - continue; + NameSet full_requested_columns_set; + NamesAndTypesList full_requested_columns; - additional_columns.insert({res_columns[pos], name_and_type->type, name_and_type->name}); + /// Convert columns list to block. And convert subcolumns to full columns. + /// TODO: rewrite with columns interface. It will be possible after changes in ExpressionActions. + + auto it = requested_columns.begin(); + for (size_t pos = 0; pos < num_columns; ++pos, ++it) + { + auto name_in_storage = it->getNameInStorage(); + + if (full_requested_columns_set.emplace(name_in_storage).second) + full_requested_columns.emplace_back(name_in_storage, it->getTypeInStorage()); + + if (res_columns[pos]) + additional_columns.insert({res_columns[pos], it->type, it->name}); } auto dag = DB::evaluateMissingDefaults( - additional_columns, requested_columns, + additional_columns, full_requested_columns, storage_snapshot->metadata->getColumns(), data_part_info_for_read->getContext()); @@ -170,9 +178,18 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns } /// Move columns from block. - name_and_type = requested_columns.begin(); - for (size_t pos = 0; pos < num_columns; ++pos, ++name_and_type) - res_columns[pos] = std::move(additional_columns.getByName(name_and_type->name).column); + it = requested_columns.begin(); + for (size_t pos = 0; pos < num_columns; ++pos, ++it) + { + auto name_in_storage = it->getNameInStorage(); + res_columns[pos] = additional_columns.getByName(name_in_storage).column; + + if (it->isSubcolumn()) + { + const auto & type_in_storage = it->getTypeInStorage(); + res_columns[pos] = type_in_storage->getSubcolumn(it->getSubcolumnName(), res_columns[pos]); + } + } } catch (Exception & e) { @@ -192,7 +209,12 @@ bool IMergeTreeReader::isSubcolumnOffsetsOfNested(const String & name_in_storage if (!data_part_info_for_read->isWidePart() || subcolumn_name != "size0") return false; - return Nested::isSubcolumnOfNested(name_in_storage, part_columns); + auto split = Nested::splitName(name_in_storage); + if (split.second.empty()) + return false; + + auto nested_column = part_columns.tryGetColumn(GetColumnsOptions::All, split.first); + return nested_column && isNested(nested_column->type); } String IMergeTreeReader::getColumnNameInPart(const NameAndTypePair & required_column) const diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index a2b8f0ad96f..fde9dafffb8 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -60,39 +60,25 @@ void MergeTreeReaderCompact::fillColumnPositions() for (size_t i = 0; i < columns_num; ++i) { - const auto & column_to_read = columns_to_read[i]; - + auto & column_to_read = columns_to_read[i]; auto position = data_part_info_for_read->getColumnPosition(column_to_read.getNameInStorage()); - bool is_array = isArray(column_to_read.type); if (column_to_read.isSubcolumn()) { - auto storage_column_from_part = getColumnInPart( - {column_to_read.getNameInStorage(), column_to_read.getTypeInStorage()}); + NameAndTypePair column_in_storage{column_to_read.getNameInStorage(), column_to_read.getTypeInStorage()}; + auto storage_column_from_part = getColumnInPart(column_in_storage); auto subcolumn_name = column_to_read.getSubcolumnName(); if (!storage_column_from_part.type->hasSubcolumn(subcolumn_name)) position.reset(); } + column_positions[i] = std::move(position); + /// If array of Nested column is missing in part, /// we have to read its offsets if they exist. - if (!position && is_array) - { - auto column_to_read_with_subcolumns = getColumnConvertedToSubcolumnOfNested(column_to_read); - auto name_level_for_offsets = findColumnForOffsets(column_to_read_with_subcolumns); - - if (name_level_for_offsets.has_value()) - { - column_positions[i] = data_part_info_for_read->getColumnPosition(name_level_for_offsets->first); - columns_for_offsets[i] = name_level_for_offsets; - partially_read_columns.insert(column_to_read.name); - } - } - else - { - column_positions[i] = std::move(position); - } + if (!column_positions[i]) + findPositionForMissedNested(i); } } @@ -125,6 +111,38 @@ NameAndTypePair MergeTreeReaderCompact::getColumnConvertedToSubcolumnOfNested(co Nested::concatenateName(name_in_storage, subcolumn_name)); } +void MergeTreeReaderCompact::findPositionForMissedNested(size_t pos) +{ + auto & column = columns_to_read[pos]; + + bool is_array = isArray(column.type); + bool is_offsets_subcolumn = isArray(column.getTypeInStorage()) && column.getSubcolumnName() == "size0"; + + if (!is_array && !is_offsets_subcolumn) + return; + + NameAndTypePair column_in_storage{column.getNameInStorage(), column.getTypeInStorage()}; + + auto column_to_read_with_subcolumns = getColumnConvertedToSubcolumnOfNested(column_in_storage); + auto name_level_for_offsets = findColumnForOffsets(column_to_read_with_subcolumns); + + if (!name_level_for_offsets) + return; + + column_positions[pos] = data_part_info_for_read->getColumnPosition(name_level_for_offsets->first); + + if (is_offsets_subcolumn) + { + /// Read offsets from antoher array from the same Nested column. + column = {name_level_for_offsets->first, column.getSubcolumnName(), column.getTypeInStorage(), column.type}; + } + else + { + columns_for_offsets[pos] = std::move(name_level_for_offsets); + partially_read_columns.insert(column.name); + } +} + void MergeTreeReaderCompact::readData( const NameAndTypePair & name_and_type, ColumnPtr & column, diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.h b/src/Storages/MergeTree/MergeTreeReaderCompact.h index a783e595af5..22eabd47930 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.h +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.h @@ -36,6 +36,7 @@ public: protected: void fillColumnPositions(); NameAndTypePair getColumnConvertedToSubcolumnOfNested(const NameAndTypePair & column); + void findPositionForMissedNested(size_t pos); using InputStreamGetter = ISerialization::InputStreamGetter; diff --git a/tests/queries/0_stateless/03203_fill_missed_subcolumns.reference b/tests/queries/0_stateless/03203_fill_missed_subcolumns.reference new file mode 100644 index 00000000000..872eb7da3c8 --- /dev/null +++ b/tests/queries/0_stateless/03203_fill_missed_subcolumns.reference @@ -0,0 +1,31 @@ +0 +2 +4 +6 +8 +0 +2 +4 +6 +8 +1 ['aaa',NULL] [NULL,NULL] +2 ['ccc'] [NULL] +3 [NULL] [NULL] +4 [NULL,'bbb'] ['ddd',NULL] +5 [NULL] [NULL] +1 2 2 +2 1 1 +3 1 1 +4 2 2 +5 1 1 +1 [0,1] [1,1] +2 [0] [1] +3 [1] [1] +4 [1,0] [0,1] +5 [1] [1] +1 ('foo','bar') [1,NULL,3] +2 ('aaa','bbb') [1,NULL,3] +3 ('ccc','ddd') [4,5,6] +1 foo bar 3 [0,1,0] +2 foo bar 3 [0,1,0] +3 ccc ddd 3 [0,0,0] diff --git a/tests/queries/0_stateless/03203_fill_missed_subcolumns.sql b/tests/queries/0_stateless/03203_fill_missed_subcolumns.sql new file mode 100644 index 00000000000..2789c9de35c --- /dev/null +++ b/tests/queries/0_stateless/03203_fill_missed_subcolumns.sql @@ -0,0 +1,47 @@ +DROP TABLE IF EXISTS t_missed_subcolumns; + +-- .null subcolumn + +CREATE TABLE t_missed_subcolumns (x UInt32) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t_missed_subcolumns SELECT * FROM numbers(10); + +ALTER TABLE t_missed_subcolumns ADD COLUMN `y` Nullable(UInt32); + +INSERT INTO t_missed_subcolumns SELECT number, if(number % 2, NULL, number) FROM numbers(10); + +SELECT x FROM t_missed_subcolumns WHERE y IS NOT NULL SETTINGS optimize_functions_to_subcolumns = 1; +SELECT x FROM t_missed_subcolumns WHERE y IS NOT NULL SETTINGS optimize_functions_to_subcolumns = 0; + +DROP TABLE IF EXISTS t_missed_subcolumns; + +-- .null and .size0 subcolumn in array + +CREATE TABLE t_missed_subcolumns (id UInt64, `n.a` Array(Nullable(String))) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_missed_subcolumns VALUES (1, ['aaa', NULL]) (2, ['ccc']) (3, [NULL]); +ALTER TABLE t_missed_subcolumns ADD COLUMN `n.b` Array(Nullable(String)); +INSERT INTO t_missed_subcolumns VALUES (4, [NULL, 'bbb'], ['ddd', NULL]), (5, [NULL], [NULL]); + +SELECT id, n.a, n.b FROM t_missed_subcolumns ORDER BY id; +SELECT id, n.a.size0, n.b.size0 FROM t_missed_subcolumns ORDER BY id; +SELECT id, n.a.null, n.b.null FROM t_missed_subcolumns ORDER BY id; + +DROP TABLE IF EXISTS t_missed_subcolumns; + +-- subcolumns and custom defaults + +CREATE TABLE t_missed_subcolumns (id UInt64) ENGINE = MergeTree ORDER BY id; +SYSTEM STOP MERGES t_missed_subcolumns; + +INSERT INTO t_missed_subcolumns VALUES (1); + +ALTER TABLE t_missed_subcolumns ADD COLUMN t Tuple(a String, b String) DEFAULT ('foo', 'bar'); +INSERT INTO t_missed_subcolumns VALUES (2, ('aaa', 'bbb')); + +ALTER TABLE t_missed_subcolumns ADD COLUMN arr Array(Nullable(UInt64)) DEFAULT [1, NULL, 3]; +INSERT INTO t_missed_subcolumns VALUES (3, ('ccc', 'ddd'), [4, 5, 6]); + +SELECT id, t, arr FROM t_missed_subcolumns ORDER BY id; +SELECT id, t.a, t.b, arr.size0, arr.null FROM t_missed_subcolumns ORDER BY id; + +DROP TABLE t_missed_subcolumns; From 1505cb2b788b1e7d1f5721117f33e061e7ee21e1 Mon Sep 17 00:00:00 2001 From: skyoct Date: Tue, 9 Jul 2024 13:33:00 +0800 Subject: [PATCH 0298/2361] fix etag init --- src/Storages/ObjectStorage/StorageObjectStorageSource.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index b3377e902ec..4e41e32fd32 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -201,8 +201,9 @@ Chunk StorageObjectStorageSource::generate() {.path = getUniqueStoragePathIdentifier(*configuration, *object_info, false), .size = object_info->isArchive() ? object_info->fileSizeInArchive() : object_info->metadata->size_bytes, .filename = &filename, - .etag = &(object_info->metadata->etag), - .last_modified = object_info->metadata->last_modified}); + .last_modified = object_info->metadata->last_modified, + .etag = &(object_info->metadata->etag) + }); const auto & partition_columns = configuration->getPartitionColumns(); if (!partition_columns.empty() && chunk_size && chunk.hasColumns()) From 943a8bce304c5610ea69a6aea7fdf354c5f71944 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Fri, 5 Jul 2024 15:41:21 +0800 Subject: [PATCH 0299/2361] Estimate should be based on column data type not only constant value --- .../ConditionSelectivityEstimator.cpp | 12 +-- .../ConditionSelectivityEstimator.h | 4 +- src/Storages/Statistics/Statistics.cpp | 38 ++-------- src/Storages/Statistics/Statistics.h | 20 ++--- .../Statistics/StatisticsCountMinSketch.cpp | 76 ++++++++++++------- .../Statistics/StatisticsCountMinSketch.h | 6 +- src/Storages/Statistics/StatisticsTDigest.cpp | 25 +++--- src/Storages/Statistics/StatisticsTDigest.h | 2 +- src/Storages/StatisticsDescription.cpp | 4 +- ...64_statistics_estimate_predicate.reference | 2 +- 10 files changed, 100 insertions(+), 89 deletions(-) diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp index 1755f0eb4df..19e3157d99a 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp @@ -16,7 +16,7 @@ void ConditionSelectivityEstimator::ColumnSelectivityEstimator::merge(String par part_statistics[part_name] = stats; } -Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateLess(Float64 val, Float64 rows) const +Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateLess(const Field & val, Float64 rows) const { if (part_statistics.empty()) return default_normal_cond_factor * rows; @@ -30,14 +30,14 @@ Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateLess( return result * rows / part_rows; } -Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateGreater(Float64 val, Float64 rows) const +Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateGreater(const Field & val, Float64 rows) const { return rows - estimateLess(val, rows); } Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateEqual(const Field & val, Float64 rows) const { - auto float_val = IStatistics::getFloat64(val); + auto float_val = StatisticsUtils::tryConvertToFloat64(val); if (part_statistics.empty()) { if (!float_val) @@ -148,7 +148,7 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode else dummy = true; auto [op, val] = extractBinaryOp(node, col); - auto float_val = IStatistics::getFloat64(val); + auto float_val = StatisticsUtils::tryConvertToFloat64(val); if (op == "equals") { if (dummy) @@ -164,13 +164,13 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode { if (dummy) return default_normal_cond_factor * total_rows; - return estimator.estimateLess(float_val.value(), total_rows); + return estimator.estimateLess(val, total_rows); } else if (op == "greater" || op == "greaterOrEquals") { if (dummy) return default_normal_cond_factor * total_rows; - return estimator.estimateGreater(float_val.value(), total_rows); + return estimator.estimateGreater(val, total_rows); } else return default_unknown_cond_factor * total_rows; diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.h b/src/Storages/Statistics/ConditionSelectivityEstimator.h index b9127fcd5bf..ce7fdd12e92 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.h +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.h @@ -29,9 +29,9 @@ private: void merge(String part_name, ColumnStatisticsPtr stats); - Float64 estimateLess(Float64 val, Float64 rows) const; + Float64 estimateLess(const Field & val, Float64 rows) const; - Float64 estimateGreater(Float64 val, Float64 rows) const; + Float64 estimateGreater(const Field & val, Float64 rows) const; Float64 estimateEqual(const Field & val, Float64 rows) const; }; diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index cd94ed716cd..5663c55b263 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -1,8 +1,8 @@ +#include #include #include #include #include -#include #include #include #include @@ -27,44 +27,22 @@ enum StatisticsFileVersion : UInt16 V0 = 0, }; -std::optional IStatistics::getFloat64(const Field & f) +std::optional StatisticsUtils::tryConvertToFloat64(const Field & f) { switch (f.getType()) { - case Field::Types::Bool: - return f.get(); case Field::Types::Int64: return f.get(); case Field::Types::UInt64: return f.get(); case Field::Types::Float64: return f.get(); - case Field::Types::Int128: - return f.get(); - case Field::Types::UInt128: - return f.get(); - case Field::Types::Int256: - return f.get(); - case Field::Types::UInt256: - return f.get(); - case Field::Types::Decimal32: - return f.get().getValue().value; - case Field::Types::Decimal64: - return f.get().getValue().value; - case Field::Types::Decimal128: - return f.get().getValue().value; - case Field::Types::Decimal256: - return f.get().getValue().value; - case Field::Types::IPv4: - return f.get().toUnderType(); - case Field::Types::IPv6: - return f.get().toUnderType(); default: return {}; } } -std::optional IStatistics::getString(const Field & f) +std::optional StatisticsUtils::tryConvertToString(const DB::Field & f) { if (f.getType() == Field::Types::String) return f.get(); @@ -98,7 +76,7 @@ Float64 IStatistics::estimateEqual(const Field & /*val*/) const throw Exception(ErrorCodes::LOGICAL_ERROR, "Equality estimation is not implemented for this type of statistics"); } -Float64 IStatistics::estimateLess(Float64 /*val*/) const +Float64 IStatistics::estimateLess(const Field & /*val*/) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Less-than estimation is not implemented for this type of statistics"); } @@ -113,21 +91,21 @@ Float64 IStatistics::estimateLess(Float64 /*val*/) const /// For that reason, all estimation are performed in a central place (here), and we don't simply pass the predicate to the first statistics /// object that supports it natively. -Float64 ColumnStatistics::estimateLess(Float64 val) const +Float64 ColumnStatistics::estimateLess(const Field & val) const { if (stats.contains(StatisticsType::TDigest)) return stats.at(StatisticsType::TDigest)->estimateLess(val); return rows * ConditionSelectivityEstimator::default_normal_cond_factor; } -Float64 ColumnStatistics::estimateGreater(Float64 val) const +Float64 ColumnStatistics::estimateGreater(const Field & val) const { return rows - estimateLess(val); } Float64 ColumnStatistics::estimateEqual(const Field & val) const { - auto float_val = IStatistics::getFloat64(val); + auto float_val = StatisticsUtils::tryConvertToFloat64(val); if (float_val.has_value() && stats.contains(StatisticsType::Uniq) && stats.contains(StatisticsType::TDigest)) { /// 2048 is the default number of buckets in TDigest. In this case, TDigest stores exactly one value (with many rows) for every bucket. @@ -254,7 +232,7 @@ ColumnStatisticsPtr MergeTreeStatisticsFactory::get(const ColumnStatisticsDescri { auto it = creators.find(type); if (it == creators.end()) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'tdigest' 'uniq' 'count_min'", type); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'tdigest' 'uniq' and 'count_min'", type); auto stat_ptr = (it->second)(desc, stats.data_type); column_stat->stats[type] = stat_ptr; } diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index c9bf3ca4847..0df5359adfc 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -14,6 +14,14 @@ namespace DB constexpr auto STATS_FILE_PREFIX = "statistics_"; constexpr auto STATS_FILE_SUFFIX = ".stats"; + +///Returns std::nullopt if input Field cannot be converted to a concrete value +struct StatisticsUtils +{ + static std::optional tryConvertToFloat64(const Field & f); + static std::optional tryConvertToString(const Field & f); +}; + /// Statistics describe properties of the values in the column, /// e.g. how many unique values exist, /// what are the N most frequent values, @@ -36,13 +44,7 @@ public: /// Per-value estimations. /// Throws if the statistics object is not able to do a meaningful estimation. virtual Float64 estimateEqual(const Field & val) const; /// cardinality of val in the column - virtual Float64 estimateLess(Float64 val) const; /// summarized cardinality of values < val in the column - - /// Convert filed to Float64, used when estimating the number of rows. - /// Return a Float64 value if f can be represented by number, otherwise return null. - /// See IDataType::isValueRepresentedByNumber - static std::optional getFloat64(const Field & f); - static std::optional getString(const Field & f); + virtual Float64 estimateLess(const Field & val) const; /// summarized cardinality of values < val in the column protected: SingleStatisticsDescription stat; @@ -65,8 +67,8 @@ public: void update(const ColumnPtr & column); - Float64 estimateLess(Float64 val) const; - Float64 estimateGreater(Float64 val) const; + Float64 estimateLess(const Field & val) const; + Float64 estimateGreater(const Field & val) const; Float64 estimateEqual(const Field & val) const; private: diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index dd8ceef4e2d..abd5cf17946 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -3,6 +3,8 @@ #include #include #include +#include +#include #if USE_DATASKETCHES @@ -12,10 +14,13 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_STATISTICS; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; } -static constexpr auto num_hashes = 8uz; -static constexpr auto num_buckets = 2048uz; +/// Constants chosen based on rolling dices, which provides an error tolerance of 0.1% (ε = 0.001) and a confidence level of 99.9% (δ = 0.001). +/// And sketch the size is 152kb. +static constexpr auto num_hashes = 7uz; +static constexpr auto num_buckets = 2718uz; StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) : IStatistics(stat_) @@ -24,13 +29,49 @@ StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescrip { } -Float64 StatisticsCountMinSketch::estimateEqual(const Field & value) const +Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const { - if (auto float_val = IStatistics::getFloat64(value)) - return sketch.get_estimate(&float_val.value(), 8); - if (auto string_val = IStatistics::getString(value)) - return sketch.get_estimate(string_val->data(), string_val->size()); - UNREACHABLE(); + if (data_type->isValueRepresentedByNumber()) + { + /// convertFieldToType will + /// 1. convert string to number, date, datetime, IPv4, Decimal etc + /// 2. do other conversion + /// 3. return null if val larger than the range of data_type + auto val_converted = convertFieldToType(val, *data_type); + if (val_converted.isNull()) + return 0; + + /// We will get the proper data type of val_converted, for example, UInt8 for 1, UInt16 for 257. + auto data_type_converted = applyVisitor(FieldToDataType(), val_converted); + DataTypes data_types = {data_type, data_type_converted}; + auto super_type = tryGetLeastSupertype(data_types); + + /// If data_type is UInt8 but val_typed is UInt16, we should return 0. + if (!super_type->equals(*data_type)) + return 0; + + return sketch.get_estimate(&val_converted, data_type->getSizeOfValueInMemory()); + } + + if (isStringOrFixedString(data_type)) + { + return sketch.get_estimate(val.get()); + } + + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Statistics 'count_min' does not support estimate constant value of type {}", val.getTypeName()); +} + +void StatisticsCountMinSketch::update(const ColumnPtr & column) +{ + size_t size = column->size(); + for (size_t row = 0; row < size; ++row) + { + if (column->isNullAt(row)) + continue; + auto data = column->getDataAt(row); + sketch.update(data.data, data.size, 1); + } } void StatisticsCountMinSketch::serialize(WriteBuffer & buf) @@ -52,28 +93,11 @@ void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) sketch = Sketch::deserialize(bytes.data(), size); } -void StatisticsCountMinSketch::update(const ColumnPtr & column) -{ - size_t size = column->size(); - for (size_t i = 0; i < size; ++i) - { - Field f; - column->get(i, f); - - if (f.isNull()) - continue; - - if (auto float_val = IStatistics::getFloat64(f)) - sketch.update(&float_val, 8, 1); - else - sketch.update(f.get(), 1); - } -} - void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); + /// Numeric, String family, IPv4, IPv6, Date family, Enum family are supported. if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.h b/src/Storages/Statistics/StatisticsCountMinSketch.h index 3ea3f2dbd3b..2cc93d6e75a 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -16,13 +16,13 @@ class StatisticsCountMinSketch : public IStatistics public: StatisticsCountMinSketch(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); - Float64 estimateEqual(const Field & value) const override; + Float64 estimateEqual(const Field & val) const override; + + void update(const ColumnPtr & column) override; void serialize(WriteBuffer & buf) override; void deserialize(ReadBuffer & buf) override; - void update(const ColumnPtr & column) override; - private: using Sketch = datasketches::count_min_sketch; Sketch sketch; diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index 306338b4ba2..c19d0a0328c 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -6,7 +6,8 @@ namespace DB { namespace ErrorCodes { - extern const int ILLEGAL_STATISTICS; +extern const int ILLEGAL_STATISTICS; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; } StatisticsTDigest::StatisticsTDigest(const SingleStatisticsDescription & stat_) @@ -19,14 +20,14 @@ void StatisticsTDigest::update(const ColumnPtr & column) size_t rows = column->size(); for (size_t row = 0; row < rows; ++row) { - Field f; - column->get(row, f); + Field field; + column->get(row, field); - if (f.isNull()) + if (field.isNull()) continue; - if (auto float_val = IStatistics::getFloat64(f)) - t_digest.add(*float_val, 1); + if (auto field_as_float = StatisticsUtils::tryConvertToFloat64(field)) + t_digest.add(*field_as_float, 1); } } @@ -40,14 +41,20 @@ void StatisticsTDigest::deserialize(ReadBuffer & buf) t_digest.deserialize(buf); } -Float64 StatisticsTDigest::estimateLess(Float64 val) const +Float64 StatisticsTDigest::estimateLess(const Field & val) const { - return t_digest.getCountLessThan(val); + auto val_as_float = StatisticsUtils::tryConvertToFloat64(val); + if (val_as_float) + return t_digest.getCountLessThan(*val_as_float); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Statistics 'tdigest' does not support estimate constant value of type {}", val.getTypeName()); } Float64 StatisticsTDigest::estimateEqual(const Field & val) const { - return t_digest.getCountEqual(IStatistics::getFloat64(val).value()); + auto val_as_float = StatisticsUtils::tryConvertToFloat64(val); + if (val_as_float) + return t_digest.getCountEqual(*val_as_float); + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Statistics 'tdigest' does not support estimate constant value of type {}", val.getTypeName()); } void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type) diff --git a/src/Storages/Statistics/StatisticsTDigest.h b/src/Storages/Statistics/StatisticsTDigest.h index 8016faac7c6..801d0787eaf 100644 --- a/src/Storages/Statistics/StatisticsTDigest.h +++ b/src/Storages/Statistics/StatisticsTDigest.h @@ -16,7 +16,7 @@ public: void serialize(WriteBuffer & buf) override; void deserialize(ReadBuffer & buf) override; - Float64 estimateLess(Float64 val) const override; + Float64 estimateLess(const Field & val) const override; Float64 estimateEqual(const Field & val) const override; private: diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index 8aa954f5eb5..9c5fd3604b2 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -51,7 +51,7 @@ static StatisticsType stringToStatisticsType(String type) return StatisticsType::Uniq; if (type == "count_min") return StatisticsType::CountMinSketch; - throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'tdigest', 'uniq', and 'count_min'.", type); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'tdigest', 'uniq' and 'count_min'.", type); } String SingleStatisticsDescription::getTypeName() const @@ -65,7 +65,7 @@ String SingleStatisticsDescription::getTypeName() const case StatisticsType::CountMinSketch: return "count_min"; default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'tdigest', 'uniq', and 'count_min'.", type); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'tdigest', 'uniq' and 'count_min'.", type); } } diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference index 7c22f308ab9..83b921af511 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.t1\n(\n `a` String,\n `b` UInt64,\n `c` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +CREATE TABLE default.tab\n(\n `a` String,\n `b` UInt64,\n `c` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 Test statistics TDigest: Prewhere info Prewhere filter From 849fb83c9770fedb937dc59df73c0cc172e115bf Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Tue, 9 Jul 2024 17:37:17 +0800 Subject: [PATCH 0300/2361] add function printf --- .../functions/string-replace-functions.md | 25 ++ src/Functions/printf.cpp | 308 ++++++++++++++++++ .../0_stateless/032010_printf.reference | 16 + tests/queries/0_stateless/032010_printf.sql | 24 ++ 4 files changed, 373 insertions(+) create mode 100644 src/Functions/printf.cpp create mode 100644 tests/queries/0_stateless/032010_printf.reference create mode 100644 tests/queries/0_stateless/032010_printf.sql diff --git a/docs/en/sql-reference/functions/string-replace-functions.md b/docs/en/sql-reference/functions/string-replace-functions.md index 8793ebdd1a3..177790c983e 100644 --- a/docs/en/sql-reference/functions/string-replace-functions.md +++ b/docs/en/sql-reference/functions/string-replace-functions.md @@ -223,3 +223,28 @@ SELECT translateUTF8('Münchener Straße', 'üß', 'us') AS res; │ Munchener Strase │ └──────────────────┘ ``` + +## printf + +The `printf` function formats the given string with the values (strings, integers, floating-points etc.) listed in the arguments, similar to printf function in C++. The format string can contain format specifiers starting with `%` character. Anything not contained in `%` and the following format specifier is considered literal text and copied verbatim into the output. Literal `%` character can be escaped by `%%`. + +**Syntax** + +``` sql +printf(format, arg1, arg2, ...) +``` + +**Example** + +Query: + +``` sql +select printf('%%%s %s %d', 'Hello', 'World', 2024); +``` + + +``` response +┌─printf('%%%s %s %d', 'Hello', 'World', 2024)─┠+│ %Hello World 2024 │ +└──────────────────────────────────────────────┘ +``` diff --git a/src/Functions/printf.cpp b/src/Functions/printf.cpp new file mode 100644 index 00000000000..cb21d5e39ad --- /dev/null +++ b/src/Functions/printf.cpp @@ -0,0 +1,308 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + +namespace +{ + +class FunctionPrintf : public IFunction +{ +private: + ContextPtr context; + FunctionOverloadResolverPtr function_concat; + + struct Instruction + { + std::string_view format; + size_t rows; + bool is_literal; /// format is literal string without any argument + ColumnWithTypeAndName input; /// Only used when is_literal is false + + ColumnWithTypeAndName execute() + { + if (is_literal) + return executeLiteral(format); + else if (isColumnConst(*input.column)) + return executeConstant(input); + else + return executeNonconstant(input); + } + + String toString() const + { + std::ostringstream oss; + oss << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() + << std::endl; + return oss.str(); + } + + private: + ColumnWithTypeAndName executeLiteral(std::string_view literal) + { + ColumnWithTypeAndName res; + auto str_col = ColumnString::create(); + str_col->insert(fmt::sprintf(literal)); + res.column = ColumnConst::create(std::move(str_col), rows); + res.type = std::make_shared(); + return res; + } + + ColumnWithTypeAndName executeConstant(const ColumnWithTypeAndName & arg) + { + ColumnWithTypeAndName tmp_arg = arg; + const auto & const_col = static_cast(*arg.column); + tmp_arg.column = const_col.getDataColumnPtr(); + + ColumnWithTypeAndName tmp_res = executeNonconstant(tmp_arg); + return ColumnWithTypeAndName{ColumnConst::create(tmp_res.column, arg.column->size()), tmp_res.type, tmp_res.name}; + } + + ColumnWithTypeAndName executeNonconstant(const ColumnWithTypeAndName & arg) + { + size_t size = arg.column->size(); + auto res_col = ColumnString::create(); + auto & res_str = static_cast(*res_col); + auto & res_offsets = res_str.getOffsets(); + auto & res_chars = res_str.getChars(); + res_offsets.reserve_exact(size); + res_chars.reserve(format.size() * size * 2); + + String s; + WhichDataType which(arg.type); + +#define EXECUTE_BY_TYPE(IS_TYPE, GET_TYPE) \ + else if (which.IS_TYPE()) \ + { \ + for (size_t i = 0; i < size; ++i) \ + { \ + auto a = arg.column->GET_TYPE(i); \ + s = fmt::sprintf(format, a); \ + res_str.insertData(s.data(), s.size()); \ + } \ + } + + if (false) + ; + EXECUTE_BY_TYPE(isNativeInt, getInt) + EXECUTE_BY_TYPE(isNativeUInt, getUInt) + EXECUTE_BY_TYPE(isFloat32, getFloat32) + EXECUTE_BY_TYPE(isFloat64, getFloat64) + else if (which.isStringOrFixedString()) + { + for (size_t i = 0; i < size; ++i) + { + auto a = arg.column->getDataAt(i).toView(); + s = fmt::sprintf(format, a); + res_str.insertData(s.data(), s.size()); + } + } + else throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "The argument type of function {} is {}, but native numeric or string type is expected", + FunctionPrintf::name, + arg.type->getName()); +#undef EXECUTE_BY_TYPE + + ColumnWithTypeAndName res; + res.name = arg.name; + res.type = std::make_shared(); + res.column = std::move(res_col); + return res; + } + }; + +public: + static constexpr auto name = "printf"; + + static FunctionPtr create(ContextPtr context) { return std::make_shared(context); } + + explicit FunctionPrintf(ContextPtr context_) + : context(context_), function_concat(FunctionFactory::instance().get("concat", context)) { } + + String getName() const override { return name; } + + bool isVariadic() const override { return true; } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + + size_t getNumberOfArguments() const override { return 0; } + + bool useDefaultImplementationForConstants() const override { return false; } + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0}; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + if (arguments.empty()) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be at least 1", + getName(), + arguments.size()); + + /// First pattern argument must have string type + if (!isString(arguments[0])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "The first argument type of function {} is {}, but String type is expected", + getName(), + arguments[0]->getName()); + + for (size_t i = 1; i < arguments.size(); ++i) + { + if (!isNativeNumber(arguments[i]) && !isStringOrFixedString(arguments[i])) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "The {}-th argument type of function {} is {}, but native numeric or string type is expected", + i + 1, + getName(), + arguments[i]->getName()); + } + return std::make_shared(); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + const ColumnPtr & c0 = arguments[0].column; + const ColumnConst * c0_const_string = typeid_cast(&*c0); + if (!c0_const_string) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "First argument of function {} must be constant string", getName()); + + String format = c0_const_string->getValue(); + auto instructions = buildInstructions(format, arguments, input_rows_count); + + ColumnsWithTypeAndName concat_args(instructions.size()); + for (size_t i = 0; i < instructions.size(); ++i) + { + std::cout << "instruction[" << i << "]:" << instructions[i].toString() << std::endl; + concat_args[i] = instructions[i].execute(); + std::cout << "concat_args[" << i << "]:" << concat_args[i].dumpStructure() << std::endl; + } + + auto res = function_concat->build(concat_args)->execute(concat_args, std::make_shared(), input_rows_count); + return res; + } + +private: + std::vector buildInstructions(const String & format , const ColumnsWithTypeAndName & arguments, size_t input_rows_count) const + { + std::vector instructions; + instructions.reserve(arguments.size()); + + auto append_instruction = [&](const char * begin, const char * end, const ColumnWithTypeAndName & arg) + { + Instruction instr; + instr.rows = input_rows_count; + instr.format = std::string_view(begin, end - begin); + + size_t size = end - begin; + if (size > 1 && begin[0] == '%' and begin[1] != '%') + { + instr.is_literal = false; + instr.input = arg; + } + else + { + instr.is_literal = true; + } + instructions.emplace_back(std::move(instr)); + }; + + auto check_index_range = [&](size_t idx) + { + if (idx >= arguments.size()) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, but format is {}", + getName(), + arguments.size(), + format); + }; + + const char * begin = format.data(); + const char * end = format.data() + format.size(); + const char * curr = begin; + size_t idx = 0; + while (curr < end) + { + const char * tmp = curr; + bool is_first = curr == begin; /// If current instruction is the first one + bool is_literal = false; /// If current instruction is literal string without any argument + if (is_first) + { + if (*curr != '%') + is_literal = true; + else if (curr + 1 < end && *(curr + 1) == '%') + is_literal = true; + else + ++idx; /// Skip first argument if first instruction is not literal + } + + if (!is_literal) + ++curr; + + while (curr < end) + { + if (*curr != '%') + ++curr; + else if (curr + 1 < end && *(curr + 1) == '%') + curr += 2; + else + { + check_index_range(idx); + append_instruction(tmp, curr, arguments[idx]); + ++idx; + break; + } + } + + if (curr == end) + { + check_index_range(idx); + append_instruction(tmp, curr, arguments[idx]); + ++idx; + } + } + + /// Check if all arguments are used + if (idx != arguments.size()) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, but format is {}", + getName(), + arguments.size(), + format); + + return instructions; + } +}; + +} + +REGISTER_FUNCTION(Printf) +{ + factory.registerFunction(); +} + +} diff --git a/tests/queries/0_stateless/032010_printf.reference b/tests/queries/0_stateless/032010_printf.reference new file mode 100644 index 00000000000..58501cbd0fc --- /dev/null +++ b/tests/queries/0_stateless/032010_printf.reference @@ -0,0 +1,16 @@ +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/032010_printf.sql b/tests/queries/0_stateless/032010_printf.sql new file mode 100644 index 00000000000..58fe081e499 --- /dev/null +++ b/tests/queries/0_stateless/032010_printf.sql @@ -0,0 +1,24 @@ +-- Testing integer formats +select printf('%%d: %d', 123) = '%d: 123'; +select printf('%%i: %i', 123) = '%i: 123'; +select printf('%%u: %u', 123) = '%u: 123'; +select printf('%%o: %o', 123) = '%o: 173'; +select printf('%%x: %x', 123) = '%x: 7b'; +select printf('%%X: %X', 123) = '%X: 7B'; + +-- Testing floating point formats +select printf('%%f: %f', 123.456) = '%f: 123.456000'; +select printf('%%F: %F', 123.456) = '%F: 123.456000'; +select printf('%%e: %e', 123.456) = '%e: 1.234560e+02'; +select printf('%%E: %E', 123.456) = '%E: 1.234560E+02'; +select printf('%%g: %g', 123.456) = '%g: 123.456'; +select printf('%%G: %G', 123.456) = '%G: 123.456'; +select printf('%%a: %a', 123.456) = '%a: 0x1.edd2f1a9fbe77p+6'; +select printf('%%A: %A', 123.456) = '%A: 0X1.EDD2F1A9FBE77P+6'; + +-- Testing character formats +select printf('%%s: %s', 'abc') = '%s: abc'; + + +-- Testing the %% specifier +select printf('%%%%: %%') = '%%: %'; \ No newline at end of file From 64de996c03a6d0d685da6d6aa6c401a9ce710d8f Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Tue, 9 Jul 2024 17:46:44 +0800 Subject: [PATCH 0301/2361] Fix address sanitizer container-overflow --- src/Storages/Statistics/StatisticsCountMinSketch.cpp | 7 +++---- .../02864_statistics_estimate_predicate.reference | 2 +- .../02864_statistics_estimate_predicate.sql | 12 ++++++------ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index abd5cf17946..3e91b4052c4 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -33,10 +33,9 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const { if (data_type->isValueRepresentedByNumber()) { - /// convertFieldToType will + /// 'val' maybe number or string, method 'convertFieldToType' will /// 1. convert string to number, date, datetime, IPv4, Decimal etc - /// 2. do other conversion - /// 3. return null if val larger than the range of data_type + /// 2. return null if val larger than the range of data_type auto val_converted = convertFieldToType(val, *data_type); if (val_converted.isNull()) return 0; @@ -87,7 +86,7 @@ void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) readIntBinary(size, buf); Sketch::vector_bytes bytes; - bytes.reserve(size); + bytes.resize(size); /// To avoid 'container-overflow' in AddressSanitizer checking buf.readStrict(reinterpret_cast(bytes.data()), size); sketch = Sketch::deserialize(bytes.data(), size); diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference index 83b921af511..4e41c32750f 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference @@ -17,7 +17,7 @@ Test statistics count_min: Test statistics multi-types: Prewhere info Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900_UInt16)) (removed) + Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900)) (removed) Prewhere info Prewhere filter Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql index 7fcb85d80f5..91b4f2d05cb 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql @@ -26,11 +26,11 @@ SELECT 'Test statistics TDigest:'; ALTER TABLE tab ADD STATISTICS b, c TYPE tdigest; ALTER TABLE tab MATERIALIZE STATISTICS b, c; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b > 0/*9990*/ and c < -98/*100*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b = 0/*1000*/ and c < -98/*100*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; @@ -41,7 +41,7 @@ SELECT 'Test statistics Uniq:'; ALTER TABLE tab ADD STATISTICS b TYPE uniq, tdigest; ALTER TABLE tab MATERIALIZE STATISTICS b; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*1000*/ and b = 0/*10*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; @@ -55,7 +55,7 @@ ALTER TABLE tab ADD STATISTICS b TYPE count_min; ALTER TABLE tab ADD STATISTICS c TYPE count_min; ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; @@ -69,11 +69,11 @@ ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_String', '') +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; From 3cc6a133c64861f4493849905950abb5cc1fbaac Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 9 Jul 2024 12:38:16 +0200 Subject: [PATCH 0302/2361] Update setup_minio.sh --- docker/test/stateless/setup_minio.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh index aacb9d88a45..0e344cbb9c4 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/docker/test/stateless/setup_minio.sh @@ -101,7 +101,7 @@ upload_data() { # shellcheck disable=SC2045 for file in $(ls "${data_path}"); do echo "${file}"; - ./mc cp --recursive "${data_path}"/"${file}" clickminio/test/"${file}"; + ./mc cp --recursive "${data_path}"/ clickminio/test/; done } @@ -148,4 +148,4 @@ main() { setup_aws_credentials } -main "$@" \ No newline at end of file +main "$@" From 362bf4befcd55de5f49e2665c7c7f9483a700dc8 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 9 Jul 2024 17:30:08 +0200 Subject: [PATCH 0303/2361] Update setup_minio.sh --- docker/test/stateless/setup_minio.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh index 0e344cbb9c4..8bd75f16321 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/docker/test/stateless/setup_minio.sh @@ -101,7 +101,7 @@ upload_data() { # shellcheck disable=SC2045 for file in $(ls "${data_path}"); do echo "${file}"; - ./mc cp --recursive "${data_path}"/ clickminio/test/; + ./mc cp "${data_path}"/"${file}" clickminio/test/"${file}"; done } From 1761102b3a071143b40039ee1e83666ebffa88fb Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 9 Jul 2024 17:31:41 +0200 Subject: [PATCH 0304/2361] fix path --- .../column1=Gordon/sample.parquet | Bin .../column1=Schmidt/sample.parquet | Bin .../sample.parquet | Bin 3 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/data_minio/hive_partitioning/{coumn0=Elizabeth => column0=Elizabeth}/column1=Gordon/sample.parquet (100%) rename tests/queries/0_stateless/data_minio/hive_partitioning/{coumn0=Elizabeth => column0=Elizabeth}/column1=Schmidt/sample.parquet (100%) rename tests/queries/0_stateless/data_minio/hive_partitioning/{coumn0=Elizabeth => column0=Elizabeth}/sample.parquet (100%) diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Gordon/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet similarity index 100% rename from tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Gordon/sample.parquet rename to tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Schmidt/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet similarity index 100% rename from tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/column1=Schmidt/sample.parquet rename to tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/sample.parquet similarity index 100% rename from tests/queries/0_stateless/data_minio/hive_partitioning/coumn0=Elizabeth/sample.parquet rename to tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/sample.parquet From fbaf6119eaad06c7ceb4eaa54cc346fbad20f50a Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 9 Jul 2024 17:08:19 +0000 Subject: [PATCH 0305/2361] fix indexHint with fuzzer --- src/Functions/indexHint.h | 4 ++++ tests/queries/0_stateless/03204_index_hint_fuzzer.reference | 1 + tests/queries/0_stateless/03204_index_hint_fuzzer.sql | 1 + 3 files changed, 6 insertions(+) create mode 100644 tests/queries/0_stateless/03204_index_hint_fuzzer.reference create mode 100644 tests/queries/0_stateless/03204_index_hint_fuzzer.sql diff --git a/src/Functions/indexHint.h b/src/Functions/indexHint.h index 3b71c7a5585..a71e1555c78 100644 --- a/src/Functions/indexHint.h +++ b/src/Functions/indexHint.h @@ -42,6 +42,10 @@ public: bool useDefaultImplementationForNulls() const override { return false; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + + bool useDefaultImplementationForSparseColumns() const override { return false; } + bool isSuitableForConstantFolding() const override { return false; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } diff --git a/tests/queries/0_stateless/03204_index_hint_fuzzer.reference b/tests/queries/0_stateless/03204_index_hint_fuzzer.reference new file mode 100644 index 00000000000..f750ba35102 --- /dev/null +++ b/tests/queries/0_stateless/03204_index_hint_fuzzer.reference @@ -0,0 +1 @@ +(1,1) diff --git a/tests/queries/0_stateless/03204_index_hint_fuzzer.sql b/tests/queries/0_stateless/03204_index_hint_fuzzer.sql new file mode 100644 index 00000000000..5794f3eee53 --- /dev/null +++ b/tests/queries/0_stateless/03204_index_hint_fuzzer.sql @@ -0,0 +1 @@ +SELECT tuple(indexHint(toLowCardinality('aaa')), 1); From 9db80a6e2d14c6341c7afc66aeaf6998c98f9f8a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 9 Jul 2024 17:47:05 +0000 Subject: [PATCH 0306/2361] more testing with chunked --- programs/benchmark/Benchmark.cpp | 4 ++-- src/Client/ConnectionParameters.cpp | 4 ++-- src/Client/ConnectionParameters.h | 4 ++-- src/Dictionaries/ClickHouseDictionarySource.cpp | 8 ++++---- src/Interpreters/Cluster.cpp | 4 ++-- src/Interpreters/Cluster.h | 4 ++-- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index 251761e0bad..0a7faf5ec01 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -666,8 +666,8 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) Strings hosts = options.count("host") ? options["host"].as() : Strings({"localhost"}); - String proto_send_chunked {"notchunked_optional"}; - String proto_recv_chunked {"notchunked_optional"}; + String proto_send_chunked {"chunked"}; + String proto_recv_chunked {"chunked"}; if (options.count("proto_caps")) { diff --git a/src/Client/ConnectionParameters.cpp b/src/Client/ConnectionParameters.cpp index 4bca65083c4..50af589dba3 100644 --- a/src/Client/ConnectionParameters.cpp +++ b/src/Client/ConnectionParameters.cpp @@ -107,8 +107,8 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati } } - proto_send_chunked = config.getString("proto_caps.send", "notchunked_optional"); - proto_recv_chunked = config.getString("proto_caps.recv", "notchunked_optional"); + proto_send_chunked = config.getString("proto_caps.send", "chunked"); + proto_recv_chunked = config.getString("proto_caps.recv", "chunked"); quota_key = config.getString("quota_key", ""); diff --git a/src/Client/ConnectionParameters.h b/src/Client/ConnectionParameters.h index 71057a2b543..ef4df17143e 100644 --- a/src/Client/ConnectionParameters.h +++ b/src/Client/ConnectionParameters.h @@ -20,8 +20,8 @@ struct ConnectionParameters std::string default_database; std::string user; std::string password; - std::string proto_send_chunked = "notchunked_optional"; - std::string proto_recv_chunked = "notchunked_optional"; + std::string proto_send_chunked = "chunked"; + std::string proto_recv_chunked = "chunked"; std::string quota_key; SSHKey ssh_private_key; std::string jwt; diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index 3b096da92c6..14c6aac24f6 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -236,8 +236,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = named_collection->getAnyOrDefault({"user", "username"}, "default"), .password = named_collection->getOrDefault("password", ""), - .proto_send_chunked = named_collection->getOrDefault("proto_send_chunked", "notchunked_optional"), - .proto_recv_chunked = named_collection->getOrDefault("proto_recv_chunked", "notchunked_optional"), + .proto_send_chunked = named_collection->getOrDefault("proto_send_chunked", "chunked"), + .proto_recv_chunked = named_collection->getOrDefault("proto_recv_chunked", "chunked"), .quota_key = named_collection->getOrDefault("quota_key", ""), .db = named_collection->getAnyOrDefault({"db", "database"}, default_database), .table = named_collection->getOrDefault("table", ""), @@ -262,8 +262,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = config.getString(settings_config_prefix + ".user", "default"), .password = config.getString(settings_config_prefix + ".password", ""), - .proto_send_chunked = config.getString(settings_config_prefix + ".proto_caps.send", "notchunked_optional"), - .proto_recv_chunked = config.getString(settings_config_prefix + ".proto_caps.recv", "notchunked_optional"), + .proto_send_chunked = config.getString(settings_config_prefix + ".proto_caps.send", "chunked"), + .proto_recv_chunked = config.getString(settings_config_prefix + ".proto_caps.recv", "chunked"), .quota_key = config.getString(settings_config_prefix + ".quota_key", ""), .db = config.getString(settings_config_prefix + ".db", default_database), .table = config.getString(settings_config_prefix + ".table", ""), diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 1d7ccd484d0..9b227fcc1fc 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -113,8 +113,8 @@ Cluster::Address::Address( secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable; priority = Priority{config.getInt(config_prefix + ".priority", 1)}; - proto_send_chunked = config.getString(config_prefix + ".proto_caps.send", "notchunked_optional"); - proto_recv_chunked = config.getString(config_prefix + ".proto_caps.recv", "notchunked_optional"); + proto_send_chunked = config.getString(config_prefix + ".proto_caps.send", "chunked"); + proto_recv_chunked = config.getString(config_prefix + ".proto_caps.recv", "chunked"); const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port"; auto default_port = config.getInt(port_type, 0); diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index f3146ac0134..009ef15df6c 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -114,8 +114,8 @@ public: UInt16 port{0}; String user; String password; - String proto_send_chunked = "notchunked_optional"; - String proto_recv_chunked = "notchunked_optional"; + String proto_send_chunked = "chunked"; + String proto_recv_chunked = "chunked"; String quota_key; /// For inter-server authorization From d7f08ffdb74b4fce89eff3133e36a5f50fc4ef0b Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 9 Jul 2024 21:01:37 +0200 Subject: [PATCH 0307/2361] Update setup_minio.sh --- docker/test/stateless/setup_minio.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh index 8bd75f16321..49837fdb1ac 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/docker/test/stateless/setup_minio.sh @@ -99,10 +99,7 @@ upload_data() { # iterating over globs will cause redundant file variable to be # a path to a file, not a filename # shellcheck disable=SC2045 - for file in $(ls "${data_path}"); do - echo "${file}"; - ./mc cp "${data_path}"/"${file}" clickminio/test/"${file}"; - done + ./mc cp --recursive "${data_path}"/ clickminio/test/ } setup_aws_credentials() { From 246f421f2402799fd11b22a608b4d0d497cb8438 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 9 Jul 2024 16:33:57 +0200 Subject: [PATCH 0308/2361] merge tree sink cancel delayed_chunk --- .../Transforms/ExceptionKeepingTransform.h | 2 +- src/Storages/MergeTree/MergeTreeSink.cpp | 9 +++++++++ .../MergeTree/ReplicatedMergeTreeSink.cpp | 15 +++++++++++++++ src/Storages/MergeTree/ReplicatedMergeTreeSink.h | 2 ++ 4 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/Processors/Transforms/ExceptionKeepingTransform.h b/src/Processors/Transforms/ExceptionKeepingTransform.h index 000b5da798a..9aa33a8cbe5 100644 --- a/src/Processors/Transforms/ExceptionKeepingTransform.h +++ b/src/Processors/Transforms/ExceptionKeepingTransform.h @@ -52,7 +52,7 @@ protected: virtual void onConsume(Chunk chunk) = 0; virtual GenerateResult onGenerate() = 0; virtual void onFinish() {} - virtual void onException(std::exception_ptr /* exception */) {} + virtual void onException(std::exception_ptr /* exception */) { } public: ExceptionKeepingTransform(const Block & in_header, const Block & out_header, bool ignore_on_start_and_finish_ = true); diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 05751e0fa6f..2e8f6db6868 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -56,6 +56,15 @@ void MergeTreeSink::onFinish() void MergeTreeSink::onCancel() { + if (!delayed_chunk) + return; + + for (auto & partition : delayed_chunk->partitions) + { + partition.temp_part.cancel(); + } + + delayed_chunk.reset(); } void MergeTreeSink::consume(Chunk chunk) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 4b4f4c33e7d..93f82a5a789 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -1155,6 +1155,21 @@ void ReplicatedMergeTreeSinkImpl::onFinish() finishDelayedChunk(std::make_shared(zookeeper)); } + +template +void ReplicatedMergeTreeSinkImpl::onCancel() +{ + if (!delayed_chunk) + return; + + for (auto & partition : delayed_chunk->partitions) + { + partition.temp_part.cancel(); + } + + delayed_chunk.reset(); +} + template void ReplicatedMergeTreeSinkImpl::waitForQuorum( const ZooKeeperWithFaultInjectionPtr & zookeeper, diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index 39623c20584..b1796a35ed2 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -54,6 +54,8 @@ public: void consume(Chunk chunk) override; void onFinish() override; + void onCancel() override; + String getName() const override { return "ReplicatedMergeTreeSink"; } /// For ATTACHing existing data on filesystem. From 2794b7bf84faf91cfb92d4a8fb76bb3a8183de44 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 10 Jul 2024 00:20:11 +0000 Subject: [PATCH 0309/2361] defaults to notchunked, add docs to server's and client's configs --- programs/benchmark/Benchmark.cpp | 4 ++-- programs/client/clickhouse-client.xml | 15 +++++++++++++++ programs/server/config.xml | 15 +++++++++++++++ src/Client/ConnectionParameters.cpp | 4 ++-- src/Client/ConnectionParameters.h | 4 ++-- src/Dictionaries/ClickHouseDictionarySource.cpp | 8 ++++---- src/Interpreters/Cluster.cpp | 4 ++-- src/Interpreters/Cluster.h | 4 ++-- src/Server/TCPHandler.cpp | 8 ++++---- 9 files changed, 48 insertions(+), 18 deletions(-) diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index 0a7faf5ec01..36f774a3c12 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -666,8 +666,8 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) Strings hosts = options.count("host") ? options["host"].as() : Strings({"localhost"}); - String proto_send_chunked {"chunked"}; - String proto_recv_chunked {"chunked"}; + String proto_send_chunked {"notchunked"}; + String proto_recv_chunked {"notchunked"}; if (options.count("proto_caps")) { diff --git a/programs/client/clickhouse-client.xml b/programs/client/clickhouse-client.xml index d0deb818c1e..376e64906e2 100644 --- a/programs/client/clickhouse-client.xml +++ b/programs/client/clickhouse-client.xml @@ -37,6 +37,21 @@ {display_name} \e[1;31m:)\e[0m + + + 9000 + + + diff --git a/src/Client/ConnectionParameters.cpp b/src/Client/ConnectionParameters.cpp index 50af589dba3..4d0a9ffa08c 100644 --- a/src/Client/ConnectionParameters.cpp +++ b/src/Client/ConnectionParameters.cpp @@ -107,8 +107,8 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati } } - proto_send_chunked = config.getString("proto_caps.send", "chunked"); - proto_recv_chunked = config.getString("proto_caps.recv", "chunked"); + proto_send_chunked = config.getString("proto_caps.send", "notchunked"); + proto_recv_chunked = config.getString("proto_caps.recv", "notchunked"); quota_key = config.getString("quota_key", ""); diff --git a/src/Client/ConnectionParameters.h b/src/Client/ConnectionParameters.h index ef4df17143e..382bfe34a3d 100644 --- a/src/Client/ConnectionParameters.h +++ b/src/Client/ConnectionParameters.h @@ -20,8 +20,8 @@ struct ConnectionParameters std::string default_database; std::string user; std::string password; - std::string proto_send_chunked = "chunked"; - std::string proto_recv_chunked = "chunked"; + std::string proto_send_chunked = "notchunked"; + std::string proto_recv_chunked = "notchunked"; std::string quota_key; SSHKey ssh_private_key; std::string jwt; diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index 14c6aac24f6..b36d53a6159 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -236,8 +236,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = named_collection->getAnyOrDefault({"user", "username"}, "default"), .password = named_collection->getOrDefault("password", ""), - .proto_send_chunked = named_collection->getOrDefault("proto_send_chunked", "chunked"), - .proto_recv_chunked = named_collection->getOrDefault("proto_recv_chunked", "chunked"), + .proto_send_chunked = named_collection->getOrDefault("proto_send_chunked", "notchunked"), + .proto_recv_chunked = named_collection->getOrDefault("proto_recv_chunked", "notchunked"), .quota_key = named_collection->getOrDefault("quota_key", ""), .db = named_collection->getAnyOrDefault({"db", "database"}, default_database), .table = named_collection->getOrDefault("table", ""), @@ -262,8 +262,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = config.getString(settings_config_prefix + ".user", "default"), .password = config.getString(settings_config_prefix + ".password", ""), - .proto_send_chunked = config.getString(settings_config_prefix + ".proto_caps.send", "chunked"), - .proto_recv_chunked = config.getString(settings_config_prefix + ".proto_caps.recv", "chunked"), + .proto_send_chunked = config.getString(settings_config_prefix + ".proto_caps.send", "notchunked"), + .proto_recv_chunked = config.getString(settings_config_prefix + ".proto_caps.recv", "notchunked"), .quota_key = config.getString(settings_config_prefix + ".quota_key", ""), .db = config.getString(settings_config_prefix + ".db", default_database), .table = config.getString(settings_config_prefix + ".table", ""), diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 9b227fcc1fc..dd9e35834eb 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -113,8 +113,8 @@ Cluster::Address::Address( secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable; priority = Priority{config.getInt(config_prefix + ".priority", 1)}; - proto_send_chunked = config.getString(config_prefix + ".proto_caps.send", "chunked"); - proto_recv_chunked = config.getString(config_prefix + ".proto_caps.recv", "chunked"); + proto_send_chunked = config.getString(config_prefix + ".proto_caps.send", "notchunked"); + proto_recv_chunked = config.getString(config_prefix + ".proto_caps.recv", "notchunked"); const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port"; auto default_port = config.getInt(port_type, 0); diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index 009ef15df6c..c69d77668ab 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -114,8 +114,8 @@ public: UInt16 port{0}; String user; String password; - String proto_send_chunked = "chunked"; - String proto_recv_chunked = "chunked"; + String proto_send_chunked = "notchunked"; + String proto_recv_chunked = "notchunked"; String quota_key; /// For inter-server authorization diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 40fd3848455..9c5e5e9c572 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -310,8 +310,8 @@ void TCPHandler::runImpl() return chunked_srv; }; - bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "chunked_optional"), proto_recv_chunked_cl, "send"); - bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "chunked_optional"), proto_send_chunked_cl, "recv"); + bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "notchunked"), proto_recv_chunked_cl, "send"); + bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "notchunked"), proto_send_chunked_cl, "recv"); if (out_chunked) out->enableChunked(); @@ -1660,8 +1660,8 @@ void TCPHandler::sendHello() writeVarUInt(VERSION_PATCH, *out); if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) { - writeStringBinary(server.config().getString("proto_caps.send", "chunked"), *out); - writeStringBinary(server.config().getString("proto_caps.recv", "chunked"), *out); + writeStringBinary(server.config().getString("proto_caps.send", "notchunked"), *out); + writeStringBinary(server.config().getString("proto_caps.recv", "notchunked"), *out); } if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) { From e0e3842772ead940f53346cc087ea54e5e6aa8fa Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 10 Jul 2024 10:15:33 +0800 Subject: [PATCH 0310/2361] support printf --- src/Functions/printf.cpp | 6 +++--- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Functions/printf.cpp b/src/Functions/printf.cpp index cb21d5e39ad..b2a0143a3f8 100644 --- a/src/Functions/printf.cpp +++ b/src/Functions/printf.cpp @@ -52,7 +52,7 @@ private: String toString() const { - std::ostringstream oss; + std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM oss << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() << std::endl; return oss.str(); @@ -195,9 +195,9 @@ public: ColumnsWithTypeAndName concat_args(instructions.size()); for (size_t i = 0; i < instructions.size(); ++i) { - std::cout << "instruction[" << i << "]:" << instructions[i].toString() << std::endl; + // std::cout << "instruction[" << i << "]:" << instructions[i].toString() << std::endl; concat_args[i] = instructions[i].execute(); - std::cout << "concat_args[" << i << "]:" << concat_args[i].dumpStructure() << std::endl; + // std::cout << "concat_args[" << i << "]:" << concat_args[i].dumpStructure() << std::endl; } auto res = function_concat->build(concat_args)->execute(concat_args, std::make_shared(), input_rows_count); diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 943caf918d6..21a9b759466 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -2264,6 +2264,7 @@ prettyspacemonoblock prettyspacenoescapes prettyspacenoescapesmonoblock prewhere +printf privateKeyFile privateKeyPassphraseHandler prlimit From 8b1bc00e9a6462a6dce2946510d83a85bc69a139 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 10 Jul 2024 10:52:53 +0800 Subject: [PATCH 0311/2361] fix style --- src/Functions/printf.cpp | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/Functions/printf.cpp b/src/Functions/printf.cpp index b2a0143a3f8..c7c6bd228a7 100644 --- a/src/Functions/printf.cpp +++ b/src/Functions/printf.cpp @@ -9,19 +9,19 @@ #include #include +#include #include #include #include #include -#include namespace DB { namespace ErrorCodes { - extern const int ILLEGAL_COLUMN; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int ILLEGAL_COLUMN; +extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; } namespace @@ -54,7 +54,7 @@ private: { std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM oss << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() - << std::endl; + << std::endl; return oss.str(); } @@ -93,15 +93,15 @@ private: WhichDataType which(arg.type); #define EXECUTE_BY_TYPE(IS_TYPE, GET_TYPE) \ - else if (which.IS_TYPE()) \ - { \ - for (size_t i = 0; i < size; ++i) \ - { \ - auto a = arg.column->GET_TYPE(i); \ - s = fmt::sprintf(format, a); \ - res_str.insertData(s.data(), s.size()); \ - } \ - } + else if (which.IS_TYPE()) \ + { \ + for (size_t i = 0; i < size; ++i) \ + { \ + auto a = arg.column->GET_TYPE(i); \ + s = fmt::sprintf(format, a); \ + res_str.insertData(s.data(), s.size()); \ + } \ + } if (false) ; @@ -205,7 +205,8 @@ public: } private: - std::vector buildInstructions(const String & format , const ColumnsWithTypeAndName & arguments, size_t input_rows_count) const + std::vector + buildInstructions(const String & format, const ColumnsWithTypeAndName & arguments, size_t input_rows_count) const { std::vector instructions; instructions.reserve(arguments.size()); @@ -248,7 +249,7 @@ private: { const char * tmp = curr; bool is_first = curr == begin; /// If current instruction is the first one - bool is_literal = false; /// If current instruction is literal string without any argument + bool is_literal = false; /// If current instruction is literal string without any argument if (is_first) { if (*curr != '%') From 6cd6319ba70945e7ae50447772c57d61e488e72e Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 9 Jul 2024 15:32:28 +0000 Subject: [PATCH 0312/2361] Properly convert boolean literals in query tree --- src/Interpreters/convertFieldToType.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 184c263dbdb..d87d4a73e37 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -214,6 +214,10 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } else if (type.isValueRepresentedByNumber() && src.getType() != Field::Types::String) { + /// Bool is not represented in which_type, so we need to type it separately + if (isInt64OrUInt64orBoolFieldType(src.getType()) && type.getName() == "Bool") + return bool(src.safeGet()); + if (which_type.isUInt8()) return convertNumericType(src, type); if (which_type.isUInt16()) return convertNumericType(src, type); if (which_type.isUInt32()) return convertNumericType(src, type); From 294eaaeabd60140d4f4f6ae892b015d6fe4f551c Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 9 Jul 2024 15:43:06 +0000 Subject: [PATCH 0313/2361] Test bool in gtest_transform_query_for_external_database --- .../gtest_transform_query_for_external_database.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/Storages/tests/gtest_transform_query_for_external_database.cpp b/src/Storages/tests/gtest_transform_query_for_external_database.cpp index 6765e112bb9..5a63c118e2d 100644 --- a/src/Storages/tests/gtest_transform_query_for_external_database.cpp +++ b/src/Storages/tests/gtest_transform_query_for_external_database.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -74,6 +75,7 @@ private: {"a", std::make_shared()}, {"b", std::make_shared()}, {"foo", std::make_shared()}, + {"is_value", DataTypeFactory::instance().get("Bool")}, }), TableWithColumnNamesAndTypes( createDBAndTable("table2"), @@ -411,6 +413,14 @@ TEST(TransformQueryForExternalDatabase, Analyzer) R"(SELECT "column" FROM "test"."table")"); check(state, 1, {"column", "apply_id", "apply_type", "apply_status", "create_time", "field", "value", "a", "b", "foo"}, - "SELECT * FROM table WHERE (column) IN (1)", + "SELECT * EXCEPT (is_value) FROM table WHERE (column) IN (1)", R"(SELECT "column", "apply_id", "apply_type", "apply_status", "create_time", "field", "value", "a", "b", "foo" FROM "test"."table" WHERE "column" IN (1))"); + + check(state, 1, {"is_value"}, + "SELECT is_value FROM table WHERE is_value = true", + R"(SELECT "is_value" FROM "test"."table" WHERE "is_value" = true)"); + + check(state, 1, {"is_value"}, + "SELECT is_value FROM table WHERE is_value = 1", + R"(SELECT "is_value" FROM "test"."table" WHERE "is_value" = 1)"); } From 46fbc23e093a85f431ac8afdc33f52fe267506c0 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 9 Jul 2024 17:11:21 +0000 Subject: [PATCH 0314/2361] update 02952_conjunction_optimization.reference --- .../0_stateless/02952_conjunction_optimization.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02952_conjunction_optimization.reference b/tests/queries/0_stateless/02952_conjunction_optimization.reference index eeadfaae21d..8af0abefd3a 100644 --- a/tests/queries/0_stateless/02952_conjunction_optimization.reference +++ b/tests/queries/0_stateless/02952_conjunction_optimization.reference @@ -32,7 +32,7 @@ QUERY id: 0 FUNCTION id: 5, function_name: and, function_type: ordinary, result_type: Bool ARGUMENTS LIST id: 6, nodes: 2 - CONSTANT id: 7, constant_value: UInt64_1, constant_value_type: Bool + CONSTANT id: 7, constant_value: Bool_1, constant_value_type: Bool FUNCTION id: 8, function_name: notIn, function_type: ordinary, result_type: UInt8 ARGUMENTS LIST id: 9, nodes: 2 From 11a8de50a6283277c585fa2bad74aad1712fb1f2 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 10 Jul 2024 08:56:35 +0000 Subject: [PATCH 0315/2361] Revert "Disable broken cases from 02911_join_on_nullsafe_optimization" This reverts commit 513ce9fa2f3bb0d2cc1774a07272a249b40f475f. --- ...2911_join_on_nullsafe_optimization.reference | 17 +++++++++++++---- .../02911_join_on_nullsafe_optimization.sql | 5 ++--- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference index 4eb7e74446d..f0463509b80 100644 --- a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference @@ -36,10 +36,19 @@ SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS N 3 3 3 33 \N \N \N \N -- aliases defined in the join condition are valid --- FIXME(@vdimir) broken query formatting for the following queries: --- SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; --- SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; - +SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; +1 42 \N \N \N 0 +2 2 2 2 1 1 +3 3 3 33 1 1 +\N \N 4 42 \N 0 +\N \N \N \N \N 1 +SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; +1 42 \N \N \N 0 +2 2 2 2 1 1 +3 3 3 33 1 1 +\N \N 4 42 \N 0 +\N \N \N \N \N 0 +\N \N \N \N \N 0 -- check for non-nullable columns for which `is null` is replaced with constant SELECT * FROM t1n as t1 JOIN t2n as t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; 2 2 2 2 diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql index f7813e2a1b4..67918f4302f 100644 --- a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql @@ -36,9 +36,8 @@ SELECT x = y OR (x IS NULL AND y IS NULL) FROM t1 ORDER BY x NULLS LAST; SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; -- aliases defined in the join condition are valid --- FIXME(@vdimir) broken query formatting for the following queries: --- SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; --- SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; +SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; +SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; -- check for non-nullable columns for which `is null` is replaced with constant SELECT * FROM t1n as t1 JOIN t2n as t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; From 6e762d404456debca4ee2d5ccce94deb32c3fbad Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 10 Jul 2024 08:57:33 +0000 Subject: [PATCH 0316/2361] Fix aliased JOIN ON expression formatting --- src/Parsers/ASTTablesInSelectQuery.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Parsers/ASTTablesInSelectQuery.cpp b/src/Parsers/ASTTablesInSelectQuery.cpp index d22a4eca0fc..dbb2a008bae 100644 --- a/src/Parsers/ASTTablesInSelectQuery.cpp +++ b/src/Parsers/ASTTablesInSelectQuery.cpp @@ -235,7 +235,12 @@ void ASTTableJoin::formatImplAfterTable(const FormatSettings & settings, FormatS else if (on_expression) { settings.ostr << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : ""); + bool on_has_alias = !on_expression->tryGetAlias().empty(); + if (on_has_alias) + settings.ostr << "("; on_expression->formatImpl(settings, state, frame); + if (on_has_alias) + settings.ostr << ")"; } } From b8944abe0ec73ca386e851ce96d5f7ddaf3d254e Mon Sep 17 00:00:00 2001 From: Ruihang Xia Date: Wed, 10 Jul 2024 19:01:52 +0800 Subject: [PATCH 0317/2361] refactor: avoid unneed calculation in SeriesPeriodDetect Signed-off-by: Ruihang Xia --- src/Functions/seriesPeriodDetectFFT.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/Functions/seriesPeriodDetectFFT.cpp b/src/Functions/seriesPeriodDetectFFT.cpp index 471354235d5..ecf8398bbd5 100644 --- a/src/Functions/seriesPeriodDetectFFT.cpp +++ b/src/Functions/seriesPeriodDetectFFT.cpp @@ -153,12 +153,8 @@ public: return true; } - std::vector xfreq(spec_len); double step = 0.5 / (spec_len - 1); - for (size_t i = 0; i < spec_len; ++i) - xfreq[i] = i * step; - - auto freq = xfreq[idx]; + auto freq = idx * step; period = std::round(1 / freq); return true; From 7b58722c07e8feb6acca5f0762411a55b8c58915 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 10 Jul 2024 13:15:44 +0200 Subject: [PATCH 0318/2361] Update setup_minio.sh --- docker/test/stateless/setup_minio.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh index 49837fdb1ac..02e3d117de2 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/docker/test/stateless/setup_minio.sh @@ -99,7 +99,9 @@ upload_data() { # iterating over globs will cause redundant file variable to be # a path to a file, not a filename # shellcheck disable=SC2045 - ./mc cp --recursive "${data_path}"/ clickminio/test/ + if [ -d "${data_path}" ]; then + ./mc cp --recursive "${data_path}"/ clickminio/test/ + fi } setup_aws_credentials() { From 3ea555524dd08af3181a5d1896cbe518c0d10736 Mon Sep 17 00:00:00 2001 From: AntiTopQuark Date: Thu, 11 Jul 2024 00:20:40 +0800 Subject: [PATCH 0319/2361] resolve comments --- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 41 ++++++-------------- src/Storages/MergeTree/MergeTreeIndexSet.h | 2 - 2 files changed, 11 insertions(+), 32 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 36844648ac7..284d47ef9e7 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -87,15 +87,6 @@ void MergeTreeIndexGranuleSet::serializeBinary(WriteBuffer & ostr) const serialization->serializeBinaryBulkWithMultipleStreams(column, 0, size(), settings, state); serialization->serializeBinaryBulkStateSuffix(settings, state); } - - for (size_t i = 0; i < num_columns; ++i) - { - const DataTypePtr & type = block.getByPosition(i).type; - auto serialization = type->getDefaultSerialization(); - - serialization->serializeBinary(set_hyperrectangle[i].left, ostr, {}); - serialization->serializeBinary(set_hyperrectangle[i].right, ostr, {}); - } } void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) @@ -117,6 +108,10 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeInd settings.getter = [&](ISerialization::SubstreamPath) -> ReadBuffer * { return &istr; }; settings.position_independent_encoding = false; + set_hyperrectangle.clear(); + Field min_val; + Field max_val; + for (size_t i = 0; i < num_columns; ++i) { auto & elem = block.getByPosition(i); @@ -127,24 +122,12 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeInd serialization->deserializeBinaryBulkStatePrefix(settings, state, nullptr); serialization->deserializeBinaryBulkWithMultipleStreams(elem.column, rows_to_read, settings, state, nullptr); - } - set_hyperrectangle.clear(); - Field min_val; - Field max_val; - for (size_t i = 0; i < num_columns; ++i) - { - const DataTypePtr & type = block.getByPosition(i).type; - auto serialization = type->getDefaultSerialization(); + if (const auto * column_nullable = typeid_cast(elem.column.get())) + column_nullable->getExtremesNullLast(min_val, max_val); + else + elem.column->getExtremes(min_val, max_val); - serialization->deserializeBinary(min_val, istr, {}); - serialization->deserializeBinary(max_val, istr, {}); - - // NULL_LAST - if (min_val.isNull()) - min_val = POSITIVE_INFINITY; - if (max_val.isNull()) - max_val = POSITIVE_INFINITY; set_hyperrectangle.emplace_back(min_val, true, max_val, true); } } @@ -295,18 +278,16 @@ KeyCondition buildCondition(const IndexDescription & index, const ActionsDAGPtr } MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( - const String & index_name_, - const Block & index_sample_block, size_t max_rows_, const ActionsDAGPtr & filter_dag, ContextPtr context, const IndexDescription & index_description) - : index_name(index_name_) + : index_name(index_description.name) , max_rows(max_rows_) , index_data_types(index_description.data_types) , condition(buildCondition(index_description, filter_dag, context)) { - for (const auto & name : index_sample_block.getNames()) + for (const auto & name : index_description.sample_block.getNames()) if (!key_columns.contains(name)) key_columns.insert(name); @@ -605,7 +586,7 @@ MergeTreeIndexAggregatorPtr MergeTreeIndexSet::createIndexAggregator(const Merge MergeTreeIndexConditionPtr MergeTreeIndexSet::createIndexCondition( const ActionsDAGPtr & filter_actions_dag, ContextPtr context) const { - return std::make_shared(index.name, index.sample_block, max_rows, filter_actions_dag, context, index); + return std::make_shared(max_rows, filter_actions_dag, context, index); } MergeTreeIndexPtr setIndexCreator(const IndexDescription & index) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.h b/src/Storages/MergeTree/MergeTreeIndexSet.h index 85f6c73149b..168262360fc 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.h +++ b/src/Storages/MergeTree/MergeTreeIndexSet.h @@ -83,8 +83,6 @@ class MergeTreeIndexConditionSet final : public IMergeTreeIndexCondition { public: MergeTreeIndexConditionSet( - const String & index_name_, - const Block & index_sample_block, size_t max_rows_, const ActionsDAGPtr & filter_dag, ContextPtr context, From 614e899e8d286b5b25f34f0cd94163905dceffeb Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 10 Jul 2024 18:52:09 +0200 Subject: [PATCH 0320/2361] onCancel is concurrent call --- .../Executors/CompletedPipelineExecutor.cpp | 7 +++ .../Formats/Impl/ArrowBlockInputFormat.h | 2 +- .../Formats/Impl/DWARFBlockInputFormat.h | 2 +- .../Formats/Impl/NativeORCBlockInputFormat.h | 2 +- .../Formats/Impl/ORCBlockInputFormat.h | 2 +- .../Impl/ParallelFormattingOutputFormat.h | 2 +- .../Impl/ParallelParsingInputFormat.cpp | 2 +- .../Formats/Impl/ParallelParsingInputFormat.h | 2 +- .../Formats/Impl/ParquetBlockInputFormat.h | 2 +- .../Formats/Impl/ParquetBlockOutputFormat.cpp | 2 +- .../Formats/Impl/ParquetBlockOutputFormat.h | 2 +- .../Formats/Impl/ParquetMetadataInputFormat.h | 2 +- src/Processors/Formats/LazyOutputFormat.h | 2 +- src/Processors/IProcessor.cpp | 2 +- src/Processors/IProcessor.h | 2 +- src/Processors/Sources/RemoteSource.cpp | 2 +- src/Processors/Sources/RemoteSource.h | 2 +- .../Transforms/AggregatingTransform.cpp | 2 +- src/Server/TCPHandler.cpp | 3 ++ src/Storages/Distributed/DistributedSink.cpp | 2 +- src/Storages/Distributed/DistributedSink.h | 2 +- src/Storages/LiveView/LiveViewEventsSource.h | 2 +- src/Storages/LiveView/LiveViewSource.h | 2 +- .../MergeTree/MergeTreeDataWriter.cpp | 4 ++ src/Storages/MergeTree/MergeTreeSink.cpp | 32 +++++++------ src/Storages/MergeTree/MergeTreeSink.h | 1 - src/Storages/MergeTree/MergeTreeSource.cpp | 2 +- src/Storages/MergeTree/MergeTreeSource.h | 2 +- .../MergeTree/ReplicatedMergeTreeSink.cpp | 48 ++++++++++++------- .../MergeTree/ReplicatedMergeTreeSink.h | 2 - src/Storages/MessageQueueSink.h | 6 ++- .../StorageObjectStorageSink.cpp | 28 ++++------- .../ObjectStorage/StorageObjectStorageSink.h | 8 +--- src/Storages/StorageFile.cpp | 37 ++++---------- src/Storages/StorageURL.cpp | 26 ++++------ src/Storages/StorageURL.h | 6 +-- 36 files changed, 123 insertions(+), 131 deletions(-) diff --git a/src/Processors/Executors/CompletedPipelineExecutor.cpp b/src/Processors/Executors/CompletedPipelineExecutor.cpp index 598a51bf0c7..909e742ffbf 100644 --- a/src/Processors/Executors/CompletedPipelineExecutor.cpp +++ b/src/Processors/Executors/CompletedPipelineExecutor.cpp @@ -3,6 +3,7 @@ #include #include #include +#include "Common/Logger.h" #include #include #include @@ -97,7 +98,10 @@ void CompletedPipelineExecutor::execute() break; if (is_cancelled_callback()) + { + LOG_INFO(getLogger("CompletedPipelineExecutor"), "execute CancelCallback FULLY_CANCELLED"); data->executor->cancel(); + } } if (data->has_exception) @@ -116,7 +120,10 @@ CompletedPipelineExecutor::~CompletedPipelineExecutor() try { if (data && data->executor) + { + LOG_INFO(getLogger("CompletedPipelineExecutor"), "~CompletedPipelineExecutor"); data->executor->cancel(); + } } catch (...) { diff --git a/src/Processors/Formats/Impl/ArrowBlockInputFormat.h b/src/Processors/Formats/Impl/ArrowBlockInputFormat.h index cdbc5e57e4e..4fe01d0be12 100644 --- a/src/Processors/Formats/Impl/ArrowBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ArrowBlockInputFormat.h @@ -32,7 +32,7 @@ public: private: Chunk read() override; - void onCancel() override + void onCancelX() override { is_stopped = 1; } diff --git a/src/Processors/Formats/Impl/DWARFBlockInputFormat.h b/src/Processors/Formats/Impl/DWARFBlockInputFormat.h index d8f5fc3d896..6cab5d34994 100644 --- a/src/Processors/Formats/Impl/DWARFBlockInputFormat.h +++ b/src/Processors/Formats/Impl/DWARFBlockInputFormat.h @@ -32,7 +32,7 @@ public: protected: Chunk read() override; - void onCancel() override + void onCancelX() override { is_stopped = 1; } diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.h b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.h index a3ef9ed4b8f..de9925e3737 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.h +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.h @@ -64,7 +64,7 @@ public: protected: Chunk read() override; - void onCancel() override { is_stopped = 1; } + void onCancelX() override { is_stopped = 1; } private: void prepareFileReader(); diff --git a/src/Processors/Formats/Impl/ORCBlockInputFormat.h b/src/Processors/Formats/Impl/ORCBlockInputFormat.h index 34630345849..167436ad4b9 100644 --- a/src/Processors/Formats/Impl/ORCBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ORCBlockInputFormat.h @@ -34,7 +34,7 @@ public: protected: Chunk read() override; - void onCancel() override + void onCancelX() override { is_stopped = 1; } diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index 341141dd633..40774fcfbfa 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -122,7 +122,7 @@ public: started_prefix = true; } - void onCancel() override + void onCancelX() override { finishAndWait(); } diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index 447adb1ed48..d38a299cb6e 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -176,7 +176,7 @@ Chunk ParallelParsingInputFormat::read() if (background_exception) { lock.unlock(); - onCancel(); + onCancelX(); std::rethrow_exception(background_exception); } diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index 963ccd88def..eed40dc43e5 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -137,7 +137,7 @@ private: Chunk read() final; - void onCancel() final + void onCancelX() final { /* * The format parsers themselves are not being cancelled here, so we'll diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h index 24735ee4371..0123329f026 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h @@ -68,7 +68,7 @@ public: private: Chunk read() override; - void onCancel() override + void onCancelX() override { is_stopped = 1; } diff --git a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp index a5d334f4f1d..d08c91d286b 100644 --- a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp @@ -268,7 +268,7 @@ void ParquetBlockOutputFormat::resetFormatterImpl() staging_bytes = 0; } -void ParquetBlockOutputFormat::onCancel() +void ParquetBlockOutputFormat::onCancelX() { is_stopped = true; } diff --git a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h index 422bae5c315..0704ba0ed90 100644 --- a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h @@ -112,7 +112,7 @@ private: void consume(Chunk) override; void finalizeImpl() override; void resetFormatterImpl() override; - void onCancel() override; + void onCancelX() override; void writeRowGroup(std::vector chunks); void writeUsingArrow(std::vector chunks); diff --git a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.h b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.h index ff63d78fa44..35180d202d8 100644 --- a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.h +++ b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.h @@ -65,7 +65,7 @@ public: private: Chunk read() override; - void onCancel() override + void onCancelX() override { is_stopped = 1; } diff --git a/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h index 9cf609ed2d7..a245df8234d 100644 --- a/src/Processors/Formats/LazyOutputFormat.h +++ b/src/Processors/Formats/LazyOutputFormat.h @@ -29,7 +29,7 @@ public: void setRowsBeforeLimit(size_t rows_before_limit) override; - void onCancel() override + void onCancelX() override { queue.clearAndFinish(); } diff --git a/src/Processors/IProcessor.cpp b/src/Processors/IProcessor.cpp index f403aca2280..4d95bb5f3e0 100644 --- a/src/Processors/IProcessor.cpp +++ b/src/Processors/IProcessor.cpp @@ -16,7 +16,7 @@ void IProcessor::cancel() if (already_cancelled) return; - onCancel(); + onCancelX(); } String IProcessor::debug() const diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 6f779e7a8d4..9ef7d83eefa 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -363,7 +363,7 @@ public: virtual void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr /* counter */) {} protected: - virtual void onCancel() {} + virtual void onCancelX() {} std::atomic is_cancelled{false}; diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 3d7dd3f76b8..44cf26e0b01 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -182,7 +182,7 @@ std::optional RemoteSource::tryGenerate() return chunk; } -void RemoteSource::onCancel() +void RemoteSource::onCancelX() { query_executor->cancel(); } diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index 052567bc261..880eb234bfb 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -36,7 +36,7 @@ public: protected: std::optional tryGenerate() override; - void onCancel() override; + void onCancelX() override; private: bool was_query_sent = false; diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index cdbe194cfac..e42c1fd3a8d 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -375,7 +375,7 @@ public: return prepareTwoLevel(); } - void onCancel() override + void onCancelX() override { shared_data->is_cancelled.store(true, std::memory_order_seq_cst); } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index ac1423f87c1..9fffad26a72 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -556,7 +556,10 @@ void TCPHandler::runImpl() std::scoped_lock lock(out_mutex, task_callback_mutex); if (getQueryCancellationStatus() == CancellationStatus::FULLY_CANCELLED) + { + LOG_INFO(log, "CancelCallback FULLY_CANCELLED"); return true; + } sendProgress(); sendSelectProfileEvents(); diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index e556bda2561..6283594e0d2 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -596,7 +596,7 @@ void DistributedSink::onFinish() } } -void DistributedSink::onCancel() +void DistributedSink::onCancelX() { std::lock_guard lock(execution_mutex); if (pool && !pool->finished()) diff --git a/src/Storages/Distributed/DistributedSink.h b/src/Storages/Distributed/DistributedSink.h index a4c95633595..1dac3eeba6d 100644 --- a/src/Storages/Distributed/DistributedSink.h +++ b/src/Storages/Distributed/DistributedSink.h @@ -53,7 +53,7 @@ public: void onFinish() override; private: - void onCancel() override; + void onCancelX() override; IColumn::Selector createSelector(const Block & source_block) const; diff --git a/src/Storages/LiveView/LiveViewEventsSource.h b/src/Storages/LiveView/LiveViewEventsSource.h index de10a98e1a2..d1ed222c185 100644 --- a/src/Storages/LiveView/LiveViewEventsSource.h +++ b/src/Storages/LiveView/LiveViewEventsSource.h @@ -54,7 +54,7 @@ public: String getName() const override { return "LiveViewEventsSource"; } - void onCancel() override + void onCancelX() override { if (storage->shutdown_called) return; diff --git a/src/Storages/LiveView/LiveViewSource.h b/src/Storages/LiveView/LiveViewSource.h index f8b428fc04d..83589067cf5 100644 --- a/src/Storages/LiveView/LiveViewSource.h +++ b/src/Storages/LiveView/LiveViewSource.h @@ -36,7 +36,7 @@ public: String getName() const override { return "LiveViewSource"; } - void onCancel() override + void onCancelX() override { if (storage->shutdown_called) return; diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 5c8aa32949d..0aaa7909a0f 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -13,6 +13,8 @@ #include #include #include +#include "Common/Logger.h" +#include "Common/logger_useful.h" #include #include #include @@ -182,6 +184,8 @@ void updateTTL( void MergeTreeDataWriter::TemporaryPart::cancel() { + LOG_INFO(getLogger("MergeTreeDataWriter"), "TemporaryPart cancel"); + try { /// An exception context is needed to proper delete write buffers without finalization diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 2e8f6db6868..d2e34665962 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -26,7 +26,23 @@ struct MergeTreeSink::DelayedChunk }; -MergeTreeSink::~MergeTreeSink() = default; +MergeTreeSink::~MergeTreeSink() +{ + size_t addr = delayed_chunk ? size_t(delayed_chunk.get()) : 0; + LOG_INFO(storage.log, "~ReplicatedMergeTreeSinkImpl, delayed_chunk {}, called from {}", addr, StackTrace().toString()); + + if (!delayed_chunk) + return; + + for (auto & partition : delayed_chunk->partitions) + { + partition.temp_part.cancel(); + } + + delayed_chunk.reset(); + + LOG_INFO(storage.log, "~ReplicatedMergeTreeSinkImpl end"); +} MergeTreeSink::MergeTreeSink( StorageMergeTree & storage_, @@ -51,22 +67,10 @@ void MergeTreeSink::onStart() void MergeTreeSink::onFinish() { + chassert(!isCancelled()); finishDelayedChunk(); } -void MergeTreeSink::onCancel() -{ - if (!delayed_chunk) - return; - - for (auto & partition : delayed_chunk->partitions) - { - partition.temp_part.cancel(); - } - - delayed_chunk.reset(); -} - void MergeTreeSink::consume(Chunk chunk) { if (num_blocks_processed > 0) diff --git a/src/Storages/MergeTree/MergeTreeSink.h b/src/Storages/MergeTree/MergeTreeSink.h index cf6715a3415..07ab3850df2 100644 --- a/src/Storages/MergeTree/MergeTreeSink.h +++ b/src/Storages/MergeTree/MergeTreeSink.h @@ -28,7 +28,6 @@ public: void consume(Chunk chunk) override; void onStart() override; void onFinish() override; - void onCancel() override; private: StorageMergeTree & storage; diff --git a/src/Storages/MergeTree/MergeTreeSource.cpp b/src/Storages/MergeTree/MergeTreeSource.cpp index e323b9f9ee7..4070ccf4433 100644 --- a/src/Storages/MergeTree/MergeTreeSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSource.cpp @@ -149,7 +149,7 @@ std::string MergeTreeSource::getName() const return processor->getName(); } -void MergeTreeSource::onCancel() +void MergeTreeSource::onCancelX() { processor->cancel(); } diff --git a/src/Storages/MergeTree/MergeTreeSource.h b/src/Storages/MergeTree/MergeTreeSource.h index fc39b4f9b09..c7092aa26b1 100644 --- a/src/Storages/MergeTree/MergeTreeSource.h +++ b/src/Storages/MergeTree/MergeTreeSource.h @@ -26,7 +26,7 @@ public: protected: std::optional tryGenerate() override; - void onCancel() override; + void onCancelX() override; private: MergeTreeSelectProcessorPtr processor; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 93f82a5a789..6c7ed9bdae0 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -4,11 +4,13 @@ #include #include #include "Common/Exception.h" +#include "Common/StackTrace.h" #include #include #include #include #include +#include "base/defines.h" #include #include #include @@ -151,7 +153,23 @@ ReplicatedMergeTreeSinkImpl::ReplicatedMergeTreeSinkImpl( } template -ReplicatedMergeTreeSinkImpl::~ReplicatedMergeTreeSinkImpl() = default; +ReplicatedMergeTreeSinkImpl::~ReplicatedMergeTreeSinkImpl() +{ + size_t addr = delayed_chunk ? size_t(delayed_chunk.get()) : 0; + LOG_INFO(log, "~ReplicatedMergeTreeSinkImpl, delayed_chunk {}, called from {}", addr, StackTrace().toString()); + + if (!delayed_chunk) + return; + + for (auto & partition : delayed_chunk->partitions) + { + partition.temp_part.cancel(); + } + + delayed_chunk.reset(); + + LOG_INFO(log, "~ReplicatedMergeTreeSinkImpl end"); +} template size_t ReplicatedMergeTreeSinkImpl::checkQuorumPrecondition(const ZooKeeperWithFaultInjectionPtr & zookeeper) @@ -255,6 +273,8 @@ size_t ReplicatedMergeTreeSinkImpl::checkQuorumPrecondition(const template void ReplicatedMergeTreeSinkImpl::consume(Chunk chunk) { + LOG_INFO(log, "consume"); + if (num_blocks_processed > 0) storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event, context, false); @@ -428,6 +448,9 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk chunk) template<> void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithFaultInjectionPtr & zookeeper) { + size_t addr = delayed_chunk ? size_t(delayed_chunk.get()) : 0; + LOG_INFO(log, "finishDelayedChunk {}", addr); + if (!delayed_chunk) return; @@ -457,16 +480,22 @@ void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithF { auto counters_snapshot = std::make_shared(partition.part_counters.getPartiallyAtomicSnapshot()); PartLog::addNewPart(storage.getContext(), PartLog::PartLogEntry(part, partition.elapsed_ns, counters_snapshot), ExecutionStatus::fromCurrentException("", true)); + + size_t addr1 = delayed_chunk ? size_t(delayed_chunk.get()) : 0; + LOG_INFO(log, "finishDelayedChunk exception, delayed_chunk {}", addr1); throw; } } delayed_chunk.reset(); + + LOG_INFO(log, "finishDelayedChunk end, delayed_chunk {}", bool(delayed_chunk)); } template<> void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithFaultInjectionPtr & zookeeper) { + if (!delayed_chunk) return; @@ -1151,25 +1180,12 @@ void ReplicatedMergeTreeSinkImpl::onStart() template void ReplicatedMergeTreeSinkImpl::onFinish() { + chassert(!isCancelled()); + auto zookeeper = storage.getZooKeeper(); finishDelayedChunk(std::make_shared(zookeeper)); } - -template -void ReplicatedMergeTreeSinkImpl::onCancel() -{ - if (!delayed_chunk) - return; - - for (auto & partition : delayed_chunk->partitions) - { - partition.temp_part.cancel(); - } - - delayed_chunk.reset(); -} - template void ReplicatedMergeTreeSinkImpl::waitForQuorum( const ZooKeeperWithFaultInjectionPtr & zookeeper, diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index b1796a35ed2..39623c20584 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -54,8 +54,6 @@ public: void consume(Chunk chunk) override; void onFinish() override; - void onCancel() override; - String getName() const override { return "ReplicatedMergeTreeSink"; } /// For ATTACHing existing data on filesystem. diff --git a/src/Storages/MessageQueueSink.h b/src/Storages/MessageQueueSink.h index b3c1e61734f..38754e9475e 100644 --- a/src/Storages/MessageQueueSink.h +++ b/src/Storages/MessageQueueSink.h @@ -33,13 +33,17 @@ public: const String & storage_name_, const ContextPtr & context_); + ~MessageQueueSink() override + { + onFinish(); + } + String getName() const override { return storage_name + "Sink"; } void consume(Chunk chunk) override; void onStart() override; void onFinish() override; - void onCancel() override { onFinish(); } void onException(std::exception_ptr /* exception */) override { onFinish(); } protected: diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp index f2f6eac333c..3bd0e88ecdb 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp @@ -2,6 +2,7 @@ #include #include #include +#include "base/defines.h" #include namespace DB @@ -41,31 +42,16 @@ StorageObjectStorageSink::StorageObjectStorageSink( void StorageObjectStorageSink::consume(Chunk chunk) { - std::lock_guard lock(cancel_mutex); - if (cancelled) + if (isCancelled()) return; writer->write(getHeader().cloneWithColumns(chunk.detachColumns())); } -void StorageObjectStorageSink::onCancel() -{ - std::lock_guard lock(cancel_mutex); - cancelBuffers(); - releaseBuffers(); - cancelled = true; -} - -void StorageObjectStorageSink::onException(std::exception_ptr) -{ - std::lock_guard lock(cancel_mutex); - cancelBuffers(); - releaseBuffers(); -} - void StorageObjectStorageSink::onFinish() { - std::lock_guard lock(cancel_mutex); + chassert(!isCancelled()); finalizeBuffers(); + releaseBuffers(); } void StorageObjectStorageSink::finalizeBuffers() @@ -119,6 +105,12 @@ PartitionedStorageObjectStorageSink::PartitionedStorageObjectStorageSink( { } +StorageObjectStorageSink::~StorageObjectStorageSink() +{ + if (isCancelled()) + cancelBuffers(); +} + SinkPtr PartitionedStorageObjectStorageSink::createSinkForPartition(const String & partition_id) { auto partition_bucket = replaceWildcards(configuration->getNamespace(), partition_id); diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSink.h b/src/Storages/ObjectStorage/StorageObjectStorageSink.h index e0081193686..578290a92a5 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSink.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageSink.h @@ -18,22 +18,18 @@ public: ContextPtr context, const std::string & blob_path = ""); + ~StorageObjectStorageSink() override; + String getName() const override { return "StorageObjectStorageSink"; } void consume(Chunk chunk) override; - void onCancel() override; - - void onException(std::exception_ptr exception) override; - void onFinish() override; private: const Block sample_block; std::unique_ptr write_buf; OutputFormatPtr writer; - bool cancelled = false; - std::mutex cancel_mutex; void finalizeBuffers(); void releaseBuffers(); diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 7f39ff615f0..855667b1cc6 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -52,6 +52,7 @@ #include #include #include +#include "base/defines.h" #include #include @@ -1747,6 +1748,12 @@ public: initialize(); } + ~StorageFileSink() override + { + if (isCancelled()) + cancelBuffers(); + } + void initialize() { std::unique_ptr naked_buffer; @@ -1780,37 +1787,14 @@ public: void consume(Chunk chunk) override { - std::lock_guard cancel_lock(cancel_mutex); - if (cancelled) + if (isCancelled()) return; writer->write(getHeader().cloneWithColumns(chunk.detachColumns())); } - void onCancel() override - { - std::lock_guard cancel_lock(cancel_mutex); - cancelBuffers(); - releaseBuffers(); - cancelled = true; - } - - void onException(std::exception_ptr exception) override - { - std::lock_guard cancel_lock(cancel_mutex); - try - { - std::rethrow_exception(exception); - } - catch (...) - { - /// An exception context is needed to proper delete write buffers without finalization - releaseBuffers(); - } - } - void onFinish() override { - std::lock_guard cancel_lock(cancel_mutex); + chassert(!isCancelled()); finalizeBuffers(); } @@ -1865,9 +1849,6 @@ private: int flags; std::unique_lock lock; - - std::mutex cancel_mutex; - bool cancelled = false; }; class PartitionedStorageFileSink : public PartitionedSink diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 895da028fc2..6f600393263 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -567,31 +567,15 @@ StorageURLSink::StorageURLSink( void StorageURLSink::consume(Chunk chunk) { - std::lock_guard lock(cancel_mutex); - if (cancelled) + if (isCancelled()) return; writer->write(getHeader().cloneWithColumns(chunk.detachColumns())); } -void StorageURLSink::onCancel() -{ - std::lock_guard lock(cancel_mutex); - cancelBuffers(); - releaseBuffers(); - cancelled = true; -} - -void StorageURLSink::onException(std::exception_ptr) -{ - std::lock_guard lock(cancel_mutex); - cancelBuffers(); - releaseBuffers(); -} - void StorageURLSink::onFinish() { - std::lock_guard lock(cancel_mutex); finalizeBuffers(); + releaseBuffers(); } void StorageURLSink::finalizeBuffers() @@ -1396,6 +1380,11 @@ StorageURLWithFailover::StorageURLWithFailover( } } +StorageURLSink::~StorageURLSink() +{ + if (isCancelled()) + cancelBuffers(); +} FormatSettings StorageURL::getFormatSettingsFromArgs(const StorageFactory::Arguments & args) { @@ -1586,4 +1575,5 @@ void registerStorageURL(StorageFactory & factory) .source_access_type = AccessType::URL, }); } + } diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index fa7cc6eeeef..12a49d3dff5 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -250,10 +250,10 @@ public: const HTTPHeaderEntries & headers = {}, const String & method = Poco::Net::HTTPRequest::HTTP_POST); + ~StorageURLSink() override; + std::string getName() const override { return "StorageURLSink"; } void consume(Chunk chunk) override; - void onCancel() override; - void onException(std::exception_ptr exception) override; void onFinish() override; private: @@ -263,8 +263,6 @@ private: std::unique_ptr write_buf; OutputFormatPtr writer; - std::mutex cancel_mutex; - bool cancelled = false; }; class StorageURL : public IStorageURLBase From a751719a33e2691426bdb057eaf509a74e84753d Mon Sep 17 00:00:00 2001 From: yariks5s Date: Wed, 10 Jul 2024 17:44:06 +0000 Subject: [PATCH 0321/2361] fixes due to review --- docs/en/operations/settings/settings.md | 6 +++++ docs/en/sql-reference/table-functions/file.md | 17 ++++++++++++ docs/en/sql-reference/table-functions/hdfs.md | 17 ++++++++++++ docs/en/sql-reference/table-functions/s3.md | 17 ++++++++++++ programs/obfuscator/Obfuscator.cpp | 3 +-- src/Formats/ReadSchemaUtils.cpp | 11 ++------ src/Formats/ReadSchemaUtils.h | 2 -- src/Storages/Hive/StorageHive.cpp | 2 +- .../DataLakes/IStorageDataLake.h | 4 +-- .../ObjectStorage/StorageObjectStorage.cpp | 14 ++++++---- .../StorageObjectStorageCluster.cpp | 2 +- .../StorageObjectStorageSource.cpp | 11 +++++--- .../StorageObjectStorageSource.h | 3 ++- .../StorageObjectStorageQueue.cpp | 2 +- src/Storages/StorageFile.cpp | 20 +++++++------- src/Storages/StorageFileCluster.cpp | 2 +- src/Storages/StorageURL.cpp | 23 +++++++++++----- src/Storages/StorageURLCluster.cpp | 2 +- src/Storages/VirtualColumnUtils.cpp | 25 +++++++++++------- src/Storages/VirtualColumnUtils.h | 9 +++++-- src/TableFunctions/TableFunctionFormat.cpp | 10 +++---- .../03203_hive_style_partitioning.reference | 5 +++- .../03203_hive_style_partitioning.sh | 11 +++++--- .../array=[1,2,3]/float=42.42/sample.parquet | Bin 0 -> 1308 bytes .../number=42/date=2020-01-01/sample.parquet | Bin 0 -> 1308 bytes 25 files changed, 152 insertions(+), 66 deletions(-) create mode 100644 tests/queries/0_stateless/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet create mode 100644 tests/queries/0_stateless/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 1d74a63b972..e100e0f27f7 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5591,3 +5591,9 @@ Default value: `10000000`. Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached. Default value: `1GiB`. + +## use_hive_partitioning + +Allows the usage of Hive-style partitioning in queries. When enabled, ClickHouse interprets and maintains table partitions in a way that is consistent with the Hive partitioning scheme, which is commonly used in Hadoop ecosystems. + +Default value: `0`. diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 3a3162dad9a..88af3663552 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -198,6 +198,23 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +## Hive-style partitioning {#hive-style-patitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_patitioning = 1; +SELECT _specified_column from file('/specified_column=specified_data/file.txt'); +``` + +``` reference +specified_data +``` + ## Settings {#settings} - [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 28cba5ccc6a..beb1ad12532 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -99,6 +99,23 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +## Hive-style partitioning {#hive-style-patitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_patitioning = 1; +SELECT _specified_column from HDFS('hdfs://hdfs1:9000/specified_column=specified_data/file.txt'); +``` + +``` reference +specified_data +``` + ## Storage Settings {#storage-settings} - [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 35e5d86034c..45c4caa1a13 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -274,6 +274,23 @@ FROM s3( - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. In case of archive shows uncompressed file size of the file inside the archive. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +## Hive-style partitioning {#hive-style-patitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_patitioning = 1; +SELECT _specified_column from HDFS('hdfs://hdfs1:9000/specified_column=specified_data/file.txt'); +``` + +``` reference +specified_data +``` + ## Storage Settings {#storage-settings} - [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 11e85bc1302..4c3981c1d01 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1308,8 +1308,7 @@ try SingleReadBufferIterator read_buffer_iterator(std::move(file)); - std::string sample_string; - schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, sample_string, context_const); + schema_columns = readSchemaFromFormat(input_format, {}, read_buffer_iterator, context_const); } else { diff --git a/src/Formats/ReadSchemaUtils.cpp b/src/Formats/ReadSchemaUtils.cpp index 1e70840f91f..1920459c378 100644 --- a/src/Formats/ReadSchemaUtils.cpp +++ b/src/Formats/ReadSchemaUtils.cpp @@ -94,7 +94,6 @@ std::pair readSchemaFromFormatImpl( std::optional format_name, const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, - std::string & sample_path, const ContextPtr & context) try { @@ -144,10 +143,6 @@ try { iterator_data = read_buffer_iterator.next(); - /// Extracting the File path for hive-style partitioning - if (sample_path.empty()) - sample_path = read_buffer_iterator.getLastFilePath(); - /// Read buffer iterator can determine the data format if it's unknown. /// For example by scanning schema cache or by finding new file with format extension. if (!format_name && iterator_data.format_name) @@ -541,19 +536,17 @@ ColumnsDescription readSchemaFromFormat( const String & format_name, const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, - std::string & sample_path, const ContextPtr & context) { - return readSchemaFromFormatImpl(format_name, format_settings, read_buffer_iterator, sample_path, context).first; + return readSchemaFromFormatImpl(format_name, format_settings, read_buffer_iterator, context).first; } std::pair detectFormatAndReadSchema( const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, - std::string & sample_path, const ContextPtr & context) { - return readSchemaFromFormatImpl(std::nullopt, format_settings, read_buffer_iterator, sample_path, context); + return readSchemaFromFormatImpl(std::nullopt, format_settings, read_buffer_iterator, context); } SchemaCache::Key getKeyForSchemaCache( diff --git a/src/Formats/ReadSchemaUtils.h b/src/Formats/ReadSchemaUtils.h index 6c562a06bf0..7168e7f0817 100644 --- a/src/Formats/ReadSchemaUtils.h +++ b/src/Formats/ReadSchemaUtils.h @@ -122,7 +122,6 @@ ColumnsDescription readSchemaFromFormat( const String & format_name, const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, - std::string & sample_path, const ContextPtr & context); /// Try to detect the format of the data and it's schema. @@ -132,7 +131,6 @@ ColumnsDescription readSchemaFromFormat( std::pair detectFormatAndReadSchema( const std::optional & format_settings, IReadBufferIterator & read_buffer_iterator, - std::string & sample_path, const ContextPtr & context); SchemaCache::Key getKeyForSchemaCache(const String & source, const String & format, const std::optional & format_settings, const ContextPtr & context); diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 28d8128e052..255dadea387 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -445,7 +445,7 @@ StorageHive::StorageHive( storage_metadata.partition_key = KeyDescription::getKeyFromAST(partition_by_ast, storage_metadata.columns, getContext()); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), getContext())); } void StorageHive::lazyInitialize() diff --git a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h b/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h index a651be6017f..ec8e740b1c9 100644 --- a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h +++ b/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h @@ -89,9 +89,9 @@ public: { ConfigurationPtr configuration = base_configuration->clone(); configuration->setPaths(metadata->getDataFiles()); - std::string sample_string; + std::string sample_path; return Storage::resolveSchemaFromData( - object_storage_, configuration, format_settings_, sample_string, local_context); + object_storage_, configuration, format_settings_, sample_path, local_context); } } diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 8ab8b6b6881..48e9118e321 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -43,7 +43,8 @@ std::string StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata {}, // predicate metadata.getColumns().getAll(), // virtual_columns nullptr, // read_keys - {} // file_progress_callback + {}, // file_progress_callback + true // override_settings_for_hive_partitioning ); if (auto file = file_iterator->next(0)) @@ -86,7 +87,7 @@ StorageObjectStorage::StorageObjectStorage( else if (!context->getSettings().use_hive_partitioning) sample_path = ""; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), sample_path)); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context, sample_path)); setInMemoryMetadata(metadata); } @@ -396,7 +397,8 @@ ColumnsDescription StorageObjectStorage::resolveSchemaFromData( { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - return readSchemaFromFormat(configuration->format, format_settings, *iterator, sample_path, context); + sample_path = iterator->getLastFilePath(); + return readSchemaFromFormat(configuration->format, format_settings, *iterator, context); } std::string StorageObjectStorage::resolveFormatFromData( @@ -408,7 +410,8 @@ std::string StorageObjectStorage::resolveFormatFromData( { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - return detectFormatAndReadSchema(format_settings, *iterator, sample_path, context).second; + sample_path = iterator->getLastFilePath(); + return detectFormatAndReadSchema(format_settings, *iterator, context).second; } std::pair StorageObjectStorage::resolveSchemaAndFormatFromData( @@ -420,7 +423,8 @@ std::pair StorageObjectStorage::resolveSchemaAn { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - auto [columns, format] = detectFormatAndReadSchema(format_settings, *iterator, sample_path, context); + sample_path = iterator->getLastFilePath(); + auto [columns, format] = detectFormatAndReadSchema(format_settings, *iterator, context); configuration->format = format; return std::pair(columns, format); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp index 0dc4b845a47..92327b4cde0 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp @@ -41,7 +41,7 @@ StorageObjectStorageCluster::StorageObjectStorageCluster( metadata.setColumns(columns); metadata.setConstraints(constraints_); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context_)); setInMemoryMetadata(metadata); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 88ae0a2319c..e5c9318de5d 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -105,7 +105,8 @@ std::shared_ptr StorageObjectStorageSourc const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, ObjectInfos * read_keys, - std::function file_progress_callback) + std::function file_progress_callback, + bool override_settings_for_hive_partitioning) { if (distributed_processing) return std::make_shared( @@ -122,11 +123,14 @@ std::shared_ptr StorageObjectStorageSourc std::unique_ptr iterator; if (configuration->isPathWithGlobs()) { + bool throw_on_zero_files_match = settings.throw_on_zero_files_match; + if (override_settings_for_hive_partitioning) + throw_on_zero_files_match = false; /// Iterate through disclosed globs and make a source for each file iterator = std::make_unique( object_storage, configuration, predicate, virtual_columns, local_context, is_archive ? nullptr : read_keys, settings.list_object_keys_size, - settings.throw_on_zero_files_match, file_progress_callback); + throw_on_zero_files_match, file_progress_callback); } else { @@ -204,7 +208,8 @@ Chunk StorageObjectStorageSource::generate() .size = object_info->isArchive() ? object_info->fileSizeInArchive() : object_info->metadata->size_bytes, .filename = &filename, .last_modified = object_info->metadata->last_modified, - }, object_info->getPath()); + .hive_partitioning_path = object_info->getPath(), + }); const auto & partition_columns = configuration->getPartitionColumns(); if (!partition_columns.empty() && chunk_size && chunk.hasColumns()) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.h b/src/Storages/ObjectStorage/StorageObjectStorageSource.h index 271b38fa75c..a99bb068372 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.h @@ -58,7 +58,8 @@ public: const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, ObjectInfos * read_keys, - std::function file_progress_callback = {}); + std::function file_progress_callback = {}, + bool override_settings_for_hive_partitioning = false); static std::string getUniqueStoragePathIdentifier( const Configuration & configuration, diff --git a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp index 98f8cdc7e7a..a9239e3ad06 100644 --- a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp +++ b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp @@ -168,7 +168,7 @@ StorageObjectStorageQueue::StorageObjectStorageQueue( storage_metadata.setColumns(columns); storage_metadata.setConstraints(constraints_); storage_metadata.setComment(comment); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context_)); setInMemoryMetadata(storage_metadata); LOG_INFO(log, "Using zookeeper path: {}", zk_path.string()); diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 9751d596fff..e6b9137444e 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -52,6 +52,7 @@ #include #include #include +#include "Formats/FormatSettings.h" #include #include @@ -880,11 +881,10 @@ std::pair StorageFile::getTableStructureAndFormatFro auto read_buffer_iterator = SingleReadBufferIterator(std::move(read_buf)); ColumnsDescription columns; - std::string sample_path; if (format) - columns = readSchemaFromFormat(*format, format_settings, read_buffer_iterator, sample_path, context); + columns = readSchemaFromFormat(*format, format_settings, read_buffer_iterator, context); else - std::tie(columns, format) = detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); + std::tie(columns, format) = detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); peekable_read_buffer_from_fd = read_buffer_iterator.releaseBuffer(); if (peekable_read_buffer_from_fd) @@ -929,21 +929,20 @@ std::pair StorageFile::getTableStructureAndFormatFro } - std::string sample_path; if (archive_info) { ReadBufferFromArchiveIterator read_buffer_iterator(*archive_info, format, format_settings, context); if (format) - return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, sample_path, context), *format}; + return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, context), *format}; - return detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); + return detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); } ReadBufferFromFileIterator read_buffer_iterator(paths, format, compression_method, format_settings, context); if (format) - return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, sample_path, context), *format}; + return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, context), *format}; - return detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); + return detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); } ColumnsDescription StorageFile::getTableStructureFromFile( @@ -1102,7 +1101,7 @@ void StorageFile::setStorageMetadata(CommonArguments args) std::string path_for_virtuals; if (args.getContext()->getSettingsRef().use_hive_partitioning && !paths.empty()) path_for_virtuals = paths[0]; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), path_for_virtuals, format_settings.value_or(FormatSettings{}))); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), args.getContext(), path_for_virtuals, format_settings.value_or(FormatSettings{}))); } @@ -1456,7 +1455,8 @@ Chunk StorageFileSource::generate() .size = current_file_size, .filename = (filename_override.has_value() ? &filename_override.value() : nullptr), .last_modified = current_file_last_modified, - }, hive_partitioning_path); + .hive_partitioning_path = hive_partitioning_path, + }); return chunk; } diff --git a/src/Storages/StorageFileCluster.cpp b/src/Storages/StorageFileCluster.cpp index d43e242f70c..f7684182e79 100644 --- a/src/Storages/StorageFileCluster.cpp +++ b/src/Storages/StorageFileCluster.cpp @@ -61,7 +61,7 @@ StorageFileCluster::StorageFileCluster( storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context)); } void StorageFileCluster::updateQueryToSendIfNeeded(DB::ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const DB::ContextPtr & context) diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 59c5465a381..5da42638b87 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -99,6 +99,17 @@ static ConnectionTimeouts getHTTPTimeouts(ContextPtr context) return ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), context->getServerSettings().keep_alive_timeout); } +String getSampleURI(String uri, ContextPtr context) +{ + if (urlWithGlobs(uri)) + { + auto uris = parseRemoteDescription(uri, 0, uri.size(), ',', context->getSettingsRef().glob_expansion_max_elements); + if (!uris.empty()) + return uris[0]; + } + return uri; +} + IStorageURLBase::IStorageURLBase( const String & uri_, const ContextPtr & context_, @@ -155,8 +166,8 @@ IStorageURLBase::IStorageURLBase( std::string uri_for_partitioning; if (context_->getSettingsRef().use_hive_partitioning) - uri_for_partitioning = uri; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), uri_for_partitioning, format_settings.value_or(FormatSettings{}))); + uri_for_partitioning = getSampleURI(uri, context_); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context_, uri_for_partitioning, format_settings.value_or(FormatSettings{}))); } @@ -425,7 +436,8 @@ Chunk StorageURLSource::generate() { .path = curr_uri.getPath(), .size = current_file_size, - }, hive_partitioning_path); + .hive_partitioning_path = hive_partitioning_path, + }); return chunk; } @@ -959,10 +971,9 @@ std::pair IStorageURLBase::getTableStructureAndForma urls_to_check = {uri}; ReadBufferIterator read_buffer_iterator(urls_to_check, format, compression_method, headers, format_settings, context); - std::string sample_path; if (format) - return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, sample_path, context), *format}; - return detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); + return {readSchemaFromFormat(*format, format_settings, read_buffer_iterator, context), *format}; + return detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); } ColumnsDescription IStorageURLBase::getTableStructureFromData( diff --git a/src/Storages/StorageURLCluster.cpp b/src/Storages/StorageURLCluster.cpp index 2e7c63d0097..592bd71f546 100644 --- a/src/Storages/StorageURLCluster.cpp +++ b/src/Storages/StorageURLCluster.cpp @@ -75,7 +75,7 @@ StorageURLCluster::StorageURLCluster( storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context)); } void StorageURLCluster::updateQueryToSendIfNeeded(ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const ContextPtr & context) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 379b14d8e51..b7669c65992 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -39,10 +39,13 @@ #include #include #include +#include +#include #include "Functions/FunctionsLogical.h" #include "Functions/IFunction.h" #include "Functions/IFunctionAdaptors.h" #include "Functions/indexHint.h" +#include #include #include #include @@ -116,7 +119,7 @@ NameSet getVirtualNamesForFileLikeStorage() return {"_path", "_file", "_size", "_time"}; } -std::map parseFromPath(const std::string& path) +std::map parseHivePartitioningKeysAndValues(const std::string& path) { std::string pattern = "/([^/]+)=([^/]+)"; re2::StringPiece input_piece(path); @@ -128,7 +131,7 @@ std::map parseFromPath(const std::string& path) return key_values; } -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, std::string path, FormatSettings settings) +VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, const ContextPtr & context, std::string path, std::optional format_settings_) { VirtualColumnsDescription desc; @@ -145,13 +148,17 @@ VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription add_virtual("_size", makeNullable(std::make_shared())); add_virtual("_time", makeNullable(std::make_shared())); - auto map = parseFromPath(path); - for (const auto& item : map) + auto map = parseHivePartitioningKeysAndValues(path); + for (auto& item : map) { - auto type = tryInferDataTypeForSingleField(item.second, settings); + auto format_settings = format_settings_ ? *format_settings_ : getFormatSettings(context); + auto type = tryInferDataTypeByEscapingRule(item.second, format_settings, FormatSettings::EscapingRule::Raw); if (type == nullptr) type = std::make_shared(); - add_virtual(item.first, std::make_shared(type)); + if (type->canBeInsideLowCardinality()) + add_virtual(item.first, std::make_shared(type)); + else + add_virtual(item.first, type); } return desc; @@ -215,9 +222,9 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values, const std::string & hive_partitioning_path) + VirtualsForFileLikeStorage virtual_values) { - auto hive_map = parseFromPath(hive_partitioning_path); + auto hive_map = parseHivePartitioningKeysAndValues(virtual_values.hive_partitioning_path); for (const auto & virtual_column : requested_virtual_columns) { if (virtual_column.name == "_path") @@ -265,7 +272,7 @@ void addRequestedFileLikeStorageVirtualsToChunk( auto it = hive_map.find(virtual_column.getNameInStorage()); if (it != hive_map.end()) { - chunk.addColumn(virtual_column.getTypeInStorage()->createColumnConst(chunk.getNumRows(), it->second)->convertToFullColumnIfConst()); + chunk.addColumn(virtual_column.type->createColumnConst(chunk.getNumRows(), convertFieldToType(Field(it->second), *virtual_column.type))->convertToFullColumnIfConst()); hive_map.erase(it); } } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 72922be60bd..594253a32c1 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -50,7 +50,11 @@ auto extractSingleValueFromBlock(const Block & block, const String & name) } NameSet getVirtualNamesForFileLikeStorage(); -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, std::string path = "", FormatSettings settings = FormatSettings()); +VirtualColumnsDescription getVirtualsForFileLikeStorage( + const ColumnsDescription & storage_columns, + const ContextPtr & context, + std::string sample_path = "", + std::optional format_settings_ = std::nullopt); ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); @@ -77,13 +81,14 @@ struct VirtualsForFileLikeStorage std::optional size { std::nullopt }; const String * filename { nullptr }; std::optional last_modified { std::nullopt }; + const String & hive_partitioning_path = ""; }; std::map parseFromPath(const std::string& path); void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values, const std::string & hive_partitioning_path = ""); + VirtualsForFileLikeStorage virtual_values); } } diff --git a/src/TableFunctions/TableFunctionFormat.cpp b/src/TableFunctions/TableFunctionFormat.cpp index 66152cb0c91..ad2a142a140 100644 --- a/src/TableFunctions/TableFunctionFormat.cpp +++ b/src/TableFunctions/TableFunctionFormat.cpp @@ -85,10 +85,9 @@ ColumnsDescription TableFunctionFormat::getActualTableStructure(ContextPtr conte if (structure == "auto") { SingleReadBufferIterator read_buffer_iterator(std::make_unique(data)); - std::string sample_path; if (format == "auto") - return detectFormatAndReadSchema(std::nullopt, read_buffer_iterator, sample_path, context).first; - return readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, sample_path, context); + return detectFormatAndReadSchema(std::nullopt, read_buffer_iterator, context).first; + return readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, context); } return parseColumnsListFromString(structure, context); } @@ -132,12 +131,11 @@ StoragePtr TableFunctionFormat::executeImpl(const ASTPtr & /*ast_function*/, Con String format_name = format; if (structure == "auto") { - std::string sample_path; SingleReadBufferIterator read_buffer_iterator(std::make_unique(data)); if (format_name == "auto") - std::tie(columns, format_name) = detectFormatAndReadSchema(std::nullopt, read_buffer_iterator, sample_path, context); + std::tie(columns, format_name) = detectFormatAndReadSchema(std::nullopt, read_buffer_iterator, context); else - columns = readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, sample_path, context); + columns = readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, context); } else { diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference index 6ef1fcdf652..e0f46caf1c8 100644 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.reference +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -60,7 +60,10 @@ Stanley Gibson Elizabeth Eugenia Greer Elizabeth Jeffery Delgado Elizabeth Clara Cross Elizabeth -Elizabeth Gordon Elizabeth +42 2020-01-01 +[1,2,3] 42.42 +Array(Int64) LowCardinality(Float64) +101 1 TESTING THE S3 PARTITIONING first last Elizabeth diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 334bfef4f02..9d805b39b8a 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -28,7 +28,13 @@ SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=E SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; SELECT *, _non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; -SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=*/sample.parquet') WHERE column0 = _column0; + +SELECT _number, _date FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') LIMIT 1; +SELECT _array, _float FROM file('$CURDIR/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet') LIMIT 1; +SELECT toTypeName(_array), toTypeName(_float) FROM file('$CURDIR/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet') LIMIT 1; +SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE _number = 42; +""" $CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; @@ -59,8 +65,7 @@ SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/colum SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; -SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = _column0;""" +SELECT *, _non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;""" $CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; diff --git a/tests/queries/0_stateless/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 From c41424d197fe5e583100944bf4a2e47216e664c9 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 11 Jul 2024 10:52:55 +0800 Subject: [PATCH 0322/2361] Throw exception when datasketches is not enabled --- .../Statistics/StatisticsCountMinSketch.cpp | 37 ++++++--- .../Statistics/StatisticsCountMinSketch.h | 4 +- ...64_statistics_estimate_predicate.reference | 12 --- .../02864_statistics_estimate_predicate.sql | 82 +++++++++---------- 4 files changed, 69 insertions(+), 66 deletions(-) diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index 3e91b4052c4..c25372c69ee 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -13,7 +13,6 @@ namespace DB namespace ErrorCodes { -extern const int ILLEGAL_STATISTICS; extern const int ILLEGAL_TYPE_OF_ARGUMENT; } @@ -23,9 +22,7 @@ static constexpr auto num_hashes = 7uz; static constexpr auto num_buckets = 2718uz; StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) - : IStatistics(stat_) - , sketch(num_hashes, num_buckets) - , data_type(data_type_) + : IStatistics(stat_), sketch(num_hashes, num_buckets), data_type(data_type_) { } @@ -40,7 +37,7 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const if (val_converted.isNull()) return 0; - /// We will get the proper data type of val_converted, for example, UInt8 for 1, UInt16 for 257. + /// We will get the proper data type of val_converted, for example, Int8 for 1, Int16 for 257. auto data_type_converted = applyVisitor(FieldToDataType(), val_converted); DataTypes data_types = {data_type, data_type_converted}; auto super_type = tryGetLeastSupertype(data_types); @@ -58,7 +55,9 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const } throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Statistics 'count_min' does not support estimate constant value of type {}", val.getTypeName()); + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Statistics 'count_min' does not support estimate constant value of type {}", + val.getTypeName()); } void StatisticsCountMinSketch::update(const ColumnPtr & column) @@ -92,20 +91,36 @@ void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) sketch = Sketch::deserialize(bytes.data(), size); } +} + +#endif + + +namespace DB +{ + +namespace ErrorCodes +{ +extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME; +extern const int ILLEGAL_STATISTICS; +} + void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); - /// Numeric, String family, IPv4, IPv6, Date family, Enum family are supported. + /// Data types of Numeric, String family, IPv4, IPv6, Date family, Enum family are supported. if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { +#if USE_DATASKETCHES return std::make_shared(stat, data_type); -} - -} - +#else + throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "Statistics of type 'count_min' is not supported in this build, to enable it turn on USE_DATASKETCHES when building."); #endif +} + +} diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.h b/src/Storages/Statistics/StatisticsCountMinSketch.h index 2cc93d6e75a..f4d49d37bca 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -30,9 +30,9 @@ private: DataTypePtr data_type; }; +#endif + void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); } - -#endif diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference index 4e41c32750f..4027aeefe7b 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference @@ -11,15 +11,3 @@ Test statistics Uniq: Prewhere filter Prewhere filter column: and(equals(b, 0), equals(c, 0)) (removed) Test statistics count_min: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) -Test statistics multi-types: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) -Test LowCardinality and Nullable data type: -tab2 diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql index 91b4f2d05cb..6647182ab05 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql @@ -55,44 +55,44 @@ ALTER TABLE tab ADD STATISTICS b TYPE count_min; ALTER TABLE tab ADD STATISTICS c TYPE count_min; ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS a, b, c; - - -SELECT 'Test statistics multi-types:'; - -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; -ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; -ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS a, b, c; - -DROP TABLE IF EXISTS tab SYNC; - - -SELECT 'Test LowCardinality and Nullable data type:'; -DROP TABLE IF EXISTS tab2 SYNC; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE tab2 -( - a LowCardinality(Int64) STATISTICS(uniq, tdigest, count_min), - b Nullable(Int64) STATISTICS(uniq, tdigest, count_min), - c LowCardinality(Nullable(Int64)) STATISTICS(uniq, tdigest, count_min), - pk String, -) Engine = MergeTree() ORDER BY pk; - -select name from system.tables where name = 'tab2' and database = currentDatabase(); - -DROP TABLE IF EXISTS tab2 SYNC; +-- SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +-- FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx +-- WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +-- +-- ALTER TABLE tab DROP STATISTICS a, b, c; +-- +-- +-- SELECT 'Test statistics multi-types:'; +-- +-- ALTER TABLE tab ADD STATISTICS a TYPE count_min; +-- ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; +-- ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; +-- ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; +-- +-- SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +-- FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) +-- WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +-- +-- SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +-- FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) +-- WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +-- +-- ALTER TABLE tab DROP STATISTICS a, b, c; +-- +-- DROP TABLE IF EXISTS tab SYNC; +-- +-- +-- SELECT 'Test LowCardinality and Nullable data type:'; +-- DROP TABLE IF EXISTS tab2 SYNC; +-- SET allow_suspicious_low_cardinality_types=1; +-- CREATE TABLE tab2 +-- ( +-- a LowCardinality(Int64) STATISTICS(uniq, tdigest, count_min), +-- b Nullable(Int64) STATISTICS(uniq, tdigest, count_min), +-- c LowCardinality(Nullable(Int64)) STATISTICS(uniq, tdigest, count_min), +-- pk String, +-- ) Engine = MergeTree() ORDER BY pk; +-- +-- select name from system.tables where name = 'tab2' and database = currentDatabase(); +-- +-- DROP TABLE IF EXISTS tab2 SYNC; From 1f33eb32b0c80b9dde27a8d7aa9ad26c271aceae Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 11 Jul 2024 03:02:15 +0000 Subject: [PATCH 0323/2361] try to drop projection correctly --- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 2 +- src/Core/SettingsEnums.cpp | 3 +- src/Core/SettingsEnums.h | 3 +- src/Interpreters/InterpreterDeleteQuery.cpp | 61 +------------------ src/Interpreters/MutationsInterpreter.cpp | 6 +- src/Storages/MergeTree/MutateTask.cpp | 7 ++- ...61_lightweight_delete_projection.reference | 5 -- .../03161_lightweight_delete_projection.sql | 33 +--------- 9 files changed, 18 insertions(+), 104 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index c884f8f80c4..f7b44ea775c 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -588,7 +588,7 @@ class IColumn; M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \ M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) ALIAS(allow_experimental_lightweight_delete) \ M(UInt64, lightweight_deletes_sync, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes", 0) \ - M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection of this table then do lightweight delete, or do lightweight delete then rebuild projections.", 0) \ + M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts.", 0) \ M(Bool, apply_deleted_mask, true, "Enables filtering out rows deleted with lightweight DELETE. If disabled, a query will be able to read those rows. This is useful for debugging and \"undelete\" scenarios", 0) \ M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \ M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 5174cf82c2e..194292a467e 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -70,7 +70,7 @@ static std::initializer_listgetSettingsRef().lightweight_mutation_projection_mode; - auto dropOrClearProjections = [&](bool isDrop) - { - std::vector all_projections = metadata_snapshot->projections.getAllRegisteredNames(); - - /// Drop projections first so that lightweight delete can be performed. - for (const auto & projection : all_projections) - { - String alter_query = - "ALTER TABLE " + table->getStorageID().getFullTableName() - + (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster)) - + (isDrop ? " DROP" : " CLEAR") +" PROJECTION " + projection; - - ParserAlterQuery parser; - ASTPtr alter_ast = parseQuery( - parser, - alter_query.data(), - alter_query.data() + alter_query.size(), - "ALTER query", - 0, - DBMS_DEFAULT_MAX_PARSER_DEPTH, - DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); - - InterpreterAlterQuery alter_interpreter(alter_ast, context); - alter_interpreter.execute(); - } - - return all_projections; - }; - if (mode == LightweightMutationProjectionMode::THROW) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, @@ -156,43 +127,13 @@ BlockIO InterpreterDeleteQuery::execute() } else if (mode == LightweightMutationProjectionMode::DROP) { - dropOrClearProjections(true); - } - else if (mode == LightweightMutationProjectionMode::REBUILD) - { - std::vector all_projections{dropOrClearProjections(false)}; - BlockIO res = lightweightDelete(); - - for (const auto & projection : all_projections) - { - String alter_query = - "ALTER TABLE " + table->getStorageID().getFullTableName() - + (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster)) - + " MATERIALIZE PROJECTION " + projection; - - ParserAlterQuery parser; - ASTPtr alter_ast = parseQuery( - parser, - alter_query.data(), - alter_query.data() + alter_query.size(), - "ALTER query", - 0, - DBMS_DEFAULT_MAX_PARSER_DEPTH, - DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); - - InterpreterAlterQuery alter_interpreter(alter_ast, context); - alter_interpreter.execute(); - } - - return res; + return lightweightDelete(); } else { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unrecognized lightweight_mutation_projection_mode, only throw and drop are allowed."); } - - return lightweightDelete(); } throw Exception(ErrorCodes::BAD_ARGUMENTS, diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 6d3a4f30b34..ace285bcfc9 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -487,7 +487,11 @@ static void validateUpdateColumns( if (column_name == RowExistsColumn::name) { if (!source.supportsLightweightDelete()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Lightweight delete is not supported for table"); + { + // if (!source.getStorage()->isMergeTree() + // || context->getSettingsRef().lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW) + // throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Lightweight delete is not supported for table"); + } } else if (virtual_columns.tryGet(column_name)) { diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index a552ee89aee..8ca987eb1f8 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1042,6 +1042,8 @@ struct MutationContext /// Whether we need to count lightweight delete rows in this mutation bool count_lightweight_deleted_rows; + + bool lightweight_mutation_mode; }; using MutationContextPtr = std::shared_ptr; @@ -1571,7 +1573,7 @@ private: } else { - if (ctx->source_part->checksums.has(projection.getDirectoryName())) + if (!ctx->lightweight_mutation_mode && ctx->source_part->checksums.has(projection.getDirectoryName())) entries_to_hardlink.insert(projection.getDirectoryName()); } } @@ -2255,7 +2257,8 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); - if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && ctx->updated_header.has(RowExistsColumn::name)) + ctx->lightweight_mutation_mode = ctx->updated_header.has(RowExistsColumn::name); + if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && ctx->lightweight_mutation_mode) { /// This mutation contains lightweight delete and we need to count the deleted rows, /// Reset existing_rows_count of new data part to 0 and it will be updated while writing _row_exists column diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index 307d3cb53fc..e69de29bb2d 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -1,5 +0,0 @@ -1231 John 33 -8888 Alice 50 -6666 Ksenia 48 -8888 Alice 50 -p users 3 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index fb32646b46a..4e674fa0cfd 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -10,14 +10,12 @@ CREATE TABLE users ( ) ENGINE = MergeTree order by uid; INSERT INTO users VALUES (1231, 'John', 33); -INSERT INTO users VALUES (6666, 'Ksenia', 48); -INSERT INTO users VALUES (8888, 'Alice', 50); DELETE FROM users WHERE 1; -- { serverError NOT_IMPLEMENTED } -DELETE FROM users WHERE uid = 8888 SETTINGS lightweight_mutation_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } +DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } -DELETE FROM users WHERE uid = 6666 SETTINGS lightweight_mutation_projection_mode = 'drop'; +DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'drop'; SYSTEM FLUSH LOGS; @@ -26,33 +24,8 @@ SELECT name, `table` FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users'); +WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); SELECT * FROM users ORDER BY uid; -DROP TABLE users; - -CREATE TABLE users ( - uid Int16, - name String, - age Int16, - projection p (select * order by age) -) ENGINE = MergeTree order by uid; - -INSERT INTO users VALUES (1231, 'John', 33), (6666, 'Ksenia', 48), (8888, 'Alice', 50); - -DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'rebuild'; - -SELECT * FROM users ORDER BY uid; - -SYSTEM FLUSH LOGS; - --- expecting projection p with 3 rows is active -SELECT - name, - `table`, - rows, -FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND active = 1; - DROP TABLE users; \ No newline at end of file From 96d063bcc39712c5a21a8e51244a9e216af8536a Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 11 Jul 2024 12:10:44 +0800 Subject: [PATCH 0324/2361] renmae ut files --- src/Functions/printf.cpp | 118 +++++++++++------- ...erence => 03203_function_printf.reference} | 0 ...0_printf.sql => 03203_function_printf.sql} | 0 3 files changed, 73 insertions(+), 45 deletions(-) rename tests/queries/0_stateless/{032010_printf.reference => 03203_function_printf.reference} (100%) rename tests/queries/0_stateless/{032010_printf.sql => 03203_function_printf.sql} (100%) diff --git a/src/Functions/printf.cpp b/src/Functions/printf.cpp index c7c6bd228a7..247c4a65daf 100644 --- a/src/Functions/printf.cpp +++ b/src/Functions/printf.cpp @@ -40,7 +40,7 @@ private: bool is_literal; /// format is literal string without any argument ColumnWithTypeAndName input; /// Only used when is_literal is false - ColumnWithTypeAndName execute() + ColumnWithTypeAndName execute() const { if (is_literal) return executeLiteral(format); @@ -50,7 +50,7 @@ private: return executeNonconstant(input); } - String toString() const + [[maybe_unused]] String toString() const { std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM oss << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() @@ -59,7 +59,7 @@ private: } private: - ColumnWithTypeAndName executeLiteral(std::string_view literal) + ColumnWithTypeAndName executeLiteral(std::string_view literal) const { ColumnWithTypeAndName res; auto str_col = ColumnString::create(); @@ -69,7 +69,7 @@ private: return res; } - ColumnWithTypeAndName executeConstant(const ColumnWithTypeAndName & arg) + ColumnWithTypeAndName executeConstant(const ColumnWithTypeAndName & arg) const { ColumnWithTypeAndName tmp_arg = arg; const auto & const_col = static_cast(*arg.column); @@ -79,57 +79,85 @@ private: return ColumnWithTypeAndName{ColumnConst::create(tmp_res.column, arg.column->size()), tmp_res.type, tmp_res.name}; } - ColumnWithTypeAndName executeNonconstant(const ColumnWithTypeAndName & arg) + template + bool executeNumber(const IColumn & column, ColumnString::Chars & res_chars, ColumnString::Offsets & res_offsets) const + { + const ColumnVector * concrete_column = checkAndGetColumn>(&column); + if (!concrete_column) + return false; + + String s; + size_t curr_offset = 0; + const auto & data = concrete_column->getData(); + for (size_t i = 0; i < data.size(); ++i) + { + T a = data[i]; + s = fmt::sprintf(format, static_cast>(a)); + memcpy(&res_chars[curr_offset], s.data(), s.size()); + res_chars[curr_offset + s.size()] = 0; + + curr_offset += s.size() + 1; + res_offsets[i] = curr_offset; + } + return true; + } + + template + bool executeString(const IColumn & column, ColumnString::Chars & res_chars, ColumnString::Offsets & res_offsets) const + { + const COLUMN * concrete_column = checkAndGetColumn(&column); + if (!concrete_column) + return false; + + String s; + size_t curr_offset = 0; + for (size_t i = 0; i < concrete_column->size(); ++i) + { + auto a = concrete_column->getDataAt(i).toView(); + s = fmt::sprintf(format, a); + memcpy(&res_chars[curr_offset], s.data(), s.size()); + res_chars[curr_offset + s.size()] = 0; + + curr_offset += s.size() + 1; + res_offsets[i] = curr_offset; + } + return true; + } + + ColumnWithTypeAndName executeNonconstant(const ColumnWithTypeAndName & arg) const { size_t size = arg.column->size(); auto res_col = ColumnString::create(); auto & res_str = static_cast(*res_col); auto & res_offsets = res_str.getOffsets(); auto & res_chars = res_str.getChars(); - res_offsets.reserve_exact(size); - res_chars.reserve(format.size() * size * 2); + res_offsets.resize_exact(size); + res_chars.reserve(format.size() * size); - String s; WhichDataType which(arg.type); - -#define EXECUTE_BY_TYPE(IS_TYPE, GET_TYPE) \ - else if (which.IS_TYPE()) \ - { \ - for (size_t i = 0; i < size; ++i) \ - { \ - auto a = arg.column->GET_TYPE(i); \ - s = fmt::sprintf(format, a); \ - res_str.insertData(s.data(), s.size()); \ - } \ - } - - if (false) - ; - EXECUTE_BY_TYPE(isNativeInt, getInt) - EXECUTE_BY_TYPE(isNativeUInt, getUInt) - EXECUTE_BY_TYPE(isFloat32, getFloat32) - EXECUTE_BY_TYPE(isFloat64, getFloat64) - else if (which.isStringOrFixedString()) + if (which.isNativeNumber() + && (executeNumber(*arg.column, res_chars, res_offsets) || executeNumber(*arg.column, res_chars, res_offsets) + || executeNumber(*arg.column, res_chars, res_offsets) + || executeNumber(*arg.column, res_chars, res_offsets) + || executeNumber(*arg.column, res_chars, res_offsets) || executeNumber(*arg.column, res_chars, res_offsets) + || executeNumber(*arg.column, res_chars, res_offsets) + || executeNumber(*arg.column, res_chars, res_offsets))) { - for (size_t i = 0; i < size; ++i) - { - auto a = arg.column->getDataAt(i).toView(); - s = fmt::sprintf(format, a); - res_str.insertData(s.data(), s.size()); - } + return {std::move(res_col), std::make_shared(), arg.name}; } - else throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "The argument type of function {} is {}, but native numeric or string type is expected", - FunctionPrintf::name, - arg.type->getName()); -#undef EXECUTE_BY_TYPE - - ColumnWithTypeAndName res; - res.name = arg.name; - res.type = std::make_shared(); - res.column = std::move(res_col); - return res; + else if ( + which.isStringOrFixedString() + && (executeString(*arg.column, res_chars, res_offsets) + || executeString(*arg.column, res_chars, res_offsets))) + { + return {std::move(res_col), std::make_shared(), arg.name}; + } + else + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "The argument type of function {} is {}, but native numeric or string type is expected", + FunctionPrintf::name, + arg.type->getName()); } }; diff --git a/tests/queries/0_stateless/032010_printf.reference b/tests/queries/0_stateless/03203_function_printf.reference similarity index 100% rename from tests/queries/0_stateless/032010_printf.reference rename to tests/queries/0_stateless/03203_function_printf.reference diff --git a/tests/queries/0_stateless/032010_printf.sql b/tests/queries/0_stateless/03203_function_printf.sql similarity index 100% rename from tests/queries/0_stateless/032010_printf.sql rename to tests/queries/0_stateless/03203_function_printf.sql From 88851ddb569f9ae8c61420bde99d2ad5f3d76889 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 11 Jul 2024 12:15:44 +0800 Subject: [PATCH 0325/2361] improve uts --- .../0_stateless/03203_function_printf.reference | 5 +++++ .../queries/0_stateless/03203_function_printf.sql | 14 ++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03203_function_printf.reference b/tests/queries/0_stateless/03203_function_printf.reference index 58501cbd0fc..338ecb0183d 100644 --- a/tests/queries/0_stateless/03203_function_printf.reference +++ b/tests/queries/0_stateless/03203_function_printf.reference @@ -14,3 +14,8 @@ 1 1 1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/03203_function_printf.sql b/tests/queries/0_stateless/03203_function_printf.sql index 58fe081e499..c41cbf0b5e9 100644 --- a/tests/queries/0_stateless/03203_function_printf.sql +++ b/tests/queries/0_stateless/03203_function_printf.sql @@ -19,6 +19,16 @@ select printf('%%A: %A', 123.456) = '%A: 0X1.EDD2F1A9FBE77P+6'; -- Testing character formats select printf('%%s: %s', 'abc') = '%s: abc'; - -- Testing the %% specifier -select printf('%%%%: %%') = '%%: %'; \ No newline at end of file +select printf('%%%%: %%') = '%%: %'; + +-- Testing integer formats with precision +select printf('%%.5d: %.5d', 123) = '%.5d: 00123'; + +-- Testing floating point formats with precision +select printf('%%.2f: %.2f', 123.456) = '%.2f: 123.46'; +select printf('%%.2e: %.2e', 123.456) = '%.2e: 1.23e+02'; +select printf('%%.2g: %.2g', 123.456) = '%.2g: 1.2e+02'; + +-- Testing character formats with precision +select printf('%%.2s: %.2s', 'abc') = '%.2s: ab'; \ No newline at end of file From 465442ff7b9e7c7897c8dc754d1f6e4052303257 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 11 Jul 2024 11:19:44 +0800 Subject: [PATCH 0326/2361] Fix build error --- .../Statistics/StatisticsCountMinSketch.cpp | 14 ++-- .../Statistics/StatisticsCountMinSketch.h | 5 +- ...64_statistics_estimate_predicate.reference | 12 +++ .../02864_statistics_estimate_predicate.sql | 82 +++++++++---------- 4 files changed, 66 insertions(+), 47 deletions(-) diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index c25372c69ee..ff985b06ee3 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -113,14 +113,18 @@ void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr da if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } - +#if USE_DATASKETCHES StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { -#if USE_DATASKETCHES return std::make_shared(stat, data_type); -#else - throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "Statistics of type 'count_min' is not supported in this build, to enable it turn on USE_DATASKETCHES when building."); -#endif } +#else +StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription &, DataTypePtr) +{ + throw Exception( + ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, + "Statistics of type 'count_min' is not supported in this build, to enable it turn on USE_DATASKETCHES when building."); +} +#endif } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.h b/src/Storages/Statistics/StatisticsCountMinSketch.h index f4d49d37bca..d6141f9f73a 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -30,9 +30,12 @@ private: DataTypePtr data_type; }; +} + #endif +namespace DB +{ void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); - } diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference index 4027aeefe7b..4e41c32750f 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference @@ -11,3 +11,15 @@ Test statistics Uniq: Prewhere filter Prewhere filter column: and(equals(b, 0), equals(c, 0)) (removed) Test statistics count_min: + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) +Test statistics multi-types: + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) +Test LowCardinality and Nullable data type: +tab2 diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql index 6647182ab05..91b4f2d05cb 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql +++ b/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql @@ -55,44 +55,44 @@ ALTER TABLE tab ADD STATISTICS b TYPE count_min; ALTER TABLE tab ADD STATISTICS c TYPE count_min; ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; --- SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') --- FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx --- WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; --- --- ALTER TABLE tab DROP STATISTICS a, b, c; --- --- --- SELECT 'Test statistics multi-types:'; --- --- ALTER TABLE tab ADD STATISTICS a TYPE count_min; --- ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; --- ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; --- ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; --- --- SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') --- FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) --- WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; --- --- SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') --- FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) --- WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; --- --- ALTER TABLE tab DROP STATISTICS a, b, c; --- --- DROP TABLE IF EXISTS tab SYNC; --- --- --- SELECT 'Test LowCardinality and Nullable data type:'; --- DROP TABLE IF EXISTS tab2 SYNC; --- SET allow_suspicious_low_cardinality_types=1; --- CREATE TABLE tab2 --- ( --- a LowCardinality(Int64) STATISTICS(uniq, tdigest, count_min), --- b Nullable(Int64) STATISTICS(uniq, tdigest, count_min), --- c LowCardinality(Nullable(Int64)) STATISTICS(uniq, tdigest, count_min), --- pk String, --- ) Engine = MergeTree() ORDER BY pk; --- --- select name from system.tables where name = 'tab2' and database = currentDatabase(); --- --- DROP TABLE IF EXISTS tab2 SYNC; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE tab DROP STATISTICS a, b, c; + + +SELECT 'Test statistics multi-types:'; + +ALTER TABLE tab ADD STATISTICS a TYPE count_min; +ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; +ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE tab DROP STATISTICS a, b, c; + +DROP TABLE IF EXISTS tab SYNC; + + +SELECT 'Test LowCardinality and Nullable data type:'; +DROP TABLE IF EXISTS tab2 SYNC; +SET allow_suspicious_low_cardinality_types=1; +CREATE TABLE tab2 +( + a LowCardinality(Int64) STATISTICS(uniq, tdigest, count_min), + b Nullable(Int64) STATISTICS(uniq, tdigest, count_min), + c LowCardinality(Nullable(Int64)) STATISTICS(uniq, tdigest, count_min), + pk String, +) Engine = MergeTree() ORDER BY pk; + +select name from system.tables where name = 'tab2' and database = currentDatabase(); + +DROP TABLE IF EXISTS tab2 SYNC; From d988399aa68ad8a6bc412bfc48d9fdefe63c1657 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 11 Jul 2024 14:32:24 +0800 Subject: [PATCH 0327/2361] fix failed uts --- src/Functions/printf.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Functions/printf.cpp b/src/Functions/printf.cpp index 247c4a65daf..a890b886338 100644 --- a/src/Functions/printf.cpp +++ b/src/Functions/printf.cpp @@ -141,7 +141,9 @@ private: || executeNumber(*arg.column, res_chars, res_offsets) || executeNumber(*arg.column, res_chars, res_offsets) || executeNumber(*arg.column, res_chars, res_offsets) || executeNumber(*arg.column, res_chars, res_offsets) - || executeNumber(*arg.column, res_chars, res_offsets))) + || executeNumber(*arg.column, res_chars, res_offsets) + || executeNumber(*arg.column, res_chars, res_offsets) + || executeNumber(*arg.column, res_chars, res_offsets))) { return {std::move(res_col), std::make_shared(), arg.name}; } From 21ca5f2d65c936a2c5b5fbc8f3f0c40d0ce60a6a Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 11 Jul 2024 15:32:05 +0800 Subject: [PATCH 0328/2361] fix failed ut --- .../02415_all_new_functions_must_be_documented.reference | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index a152066a460..873b6bbb660 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -562,6 +562,7 @@ positionCaseInsensitive positionCaseInsensitiveUTF8 positionUTF8 pow +printf proportionsZTest protocol queryID From fa65e374dcb66c2e927f52ea521e9a8586feef65 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Thu, 11 Jul 2024 11:57:33 +0200 Subject: [PATCH 0329/2361] fix docs --- docs/en/sql-reference/table-functions/file.md | 4 ++-- docs/en/sql-reference/table-functions/hdfs.md | 4 ++-- docs/en/sql-reference/table-functions/s3.md | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index d21f523ab8e..838a7ab61de 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -206,7 +206,7 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. -## Hive-style partitioning {#hive-style-patitioning} +## Hive-style partitioning {#hive-style-partitioning} When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. @@ -215,7 +215,7 @@ When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtu Use virtual column, created with Hive-style partitioning ``` sql -SET use_hive_patitioning = 1; +SET use_hive_partitioning = 1; SELECT _specified_column from file('/specified_column=specified_data/file.txt'); ``` diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index beb1ad12532..fc84c431066 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -99,7 +99,7 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. -## Hive-style partitioning {#hive-style-patitioning} +## Hive-style partitioning {#hive-style-partitioning} When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. @@ -108,7 +108,7 @@ When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtu Use virtual column, created with Hive-style partitioning ``` sql -SET use_hive_patitioning = 1; +SET use_hive_partitioning = 1; SELECT _specified_column from HDFS('hdfs://hdfs1:9000/specified_column=specified_data/file.txt'); ``` diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 45c4caa1a13..15074a77475 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -274,7 +274,7 @@ FROM s3( - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. In case of archive shows uncompressed file size of the file inside the archive. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. -## Hive-style partitioning {#hive-style-patitioning} +## Hive-style partitioning {#hive-style-partitioning} When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. @@ -283,7 +283,7 @@ When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtu Use virtual column, created with Hive-style partitioning ``` sql -SET use_hive_patitioning = 1; +SET use_hive_partitioning = 1; SELECT _specified_column from HDFS('hdfs://hdfs1:9000/specified_column=specified_data/file.txt'); ``` From a84dd6b7710da54e89722318755d4e4e70984e2b Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 11 Jul 2024 10:37:00 +0000 Subject: [PATCH 0330/2361] Fix some review comments --- .../Transforms/MergeJoinTransform.cpp | 2 +- .../tests/gtest_full_sorting_join.cpp | 48 +++++++++++++------ .../00927_asof_join_correct_bt.reference | 8 ++++ .../00927_asof_join_correct_bt.sql | 3 ++ .../02276_full_sort_join_unsupported.sql | 2 - ...03145_asof_join_ddb_inequalities.reference | 5 ++ .../03145_asof_join_ddb_inequalities.sql | 12 +++-- .../03146_asof_join_ddb_merge_long.sql.j2 | 2 +- .../03149_asof_join_ddb_timestamps.sql | 2 +- 9 files changed, 59 insertions(+), 25 deletions(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 38a59cd6d9a..3b69ddaec06 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -43,7 +43,7 @@ FullMergeJoinCursorPtr createCursor(const Block & block, const Names & columns, return std::make_unique(block, desc, strictness == JoinStrictness::Asof); } -bool isNullAt(const IColumn & column, size_t row) +bool ALWAYS_INLINE isNullAt(const IColumn & column, size_t row) { if (const auto * nullable_column = checkAndGetColumn(&column)) return nullable_column->isNullAt(row); diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index 7294a1b381a..f678d7984e8 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -484,7 +484,7 @@ try right_source.addRow({"AAPL", 2, 98}); right_source.addRow({"AAPL", 3, 99}); right_source.addRow({"AMZN", 1, 100}); - right_source.addRow({"AMZN", 2, 0}); + right_source.addRow({"AMZN", 2, 110}); right_source.addChunk(); right_source.addRow({"AMZN", 2, 110}); right_source.addChunk(); @@ -574,12 +574,15 @@ catch (Exception & e) TEST_F(FullSortingJoinTest, AsofLessGeneratedTestData) try { - auto join_kind = getRandomFrom(rng, { JoinKind::Inner, JoinKind::Left }); + /// Generate data random and build expected result at the same time. + /// Test specific combinations of join kind and inequality per each run + auto join_kind = getRandomFrom(rng, { JoinKind::Inner, JoinKind::Left }); auto asof_inequality = getRandomFrom(rng, { ASOFJoinInequality::Less, ASOFJoinInequality::LessOrEquals }); SCOPED_TRACE(fmt::format("{} {}", join_kind, asof_inequality)); + /// Key is complex, `k1, k2` for equality and `t` for asof SourceChunksBuilder left_source_builder({ {std::make_shared(), "k1"}, {std::make_shared(), "k2"}, @@ -594,9 +597,11 @@ try {std::make_shared(), "attr"}, }); + /// How small generated block should be left_source_builder.setBreakProbability(rng); right_source_builder.setBreakProbability(rng); + /// We are going to generate sorted data and remember expected result ColumnInt64::Container expected; UInt64 k1 = 1; @@ -604,29 +609,34 @@ try auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); for (size_t key_num = 0; key_num < key_num_total; ++key_num) { + /// Generate new key greater than previous generateNextKey(rng, k1, k2); Int64 left_t = 0; + /// Generate several rows for the key size_t num_left_rows = std::uniform_int_distribution<>(1, 100)(rng); for (size_t i = 0; i < num_left_rows; ++i) { + /// t is strictly greater than previous left_t += std::uniform_int_distribution<>(1, 10)(rng); - left_source_builder.addRow({k1, k2, left_t, 10 * left_t}); - expected.push_back(10 * left_t); + auto left_arrtibute_value = 10 * left_t; + left_source_builder.addRow({k1, k2, left_t, left_arrtibute_value}); + expected.push_back(left_arrtibute_value); auto num_matches = 1 + std::poisson_distribution<>(4)(rng); - + /// Generate several matches in the right table auto right_t = left_t; for (size_t j = 0; j < num_matches; ++j) { int min_step = isStrict(asof_inequality) ? 1 : 0; right_t += std::uniform_int_distribution<>(min_step, 3)(rng); + /// First row should match bool is_match = j == 0; - right_source_builder.addRow({k1, k2, right_t, is_match ? 100 * left_t : -1}); + right_source_builder.addRow({k1, k2, right_t, is_match ? 10 * left_arrtibute_value : -1}); } - /// next left_t should be greater than right_t not to match with previous rows + /// Next left_t should be greater than right_t not to match with previous rows left_t = right_t; } @@ -650,7 +660,9 @@ try assertColumnVectorEq(expected, result_block, "t1.attr"); for (auto & e : expected) - e = e < 0 ? 0 : 10 * e; /// non matched rows from left table have negative attr + /// Non matched rows from left table have negative attr + /// Value if attribute in right table is 10 times greater than in left table + e = e < 0 ? 0 : 10 * e; assertColumnVectorEq(expected, result_block, "t2.attr"); } @@ -663,8 +675,10 @@ catch (Exception & e) TEST_F(FullSortingJoinTest, AsofGreaterGeneratedTestData) try { - auto join_kind = getRandomFrom(rng, { JoinKind::Inner, JoinKind::Left }); + /// Generate data random and build expected result at the same time. + /// Test specific combinations of join kind and inequality per each run + auto join_kind = getRandomFrom(rng, { JoinKind::Inner, JoinKind::Left }); auto asof_inequality = getRandomFrom(rng, { ASOFJoinInequality::Greater, ASOFJoinInequality::GreaterOrEquals }); SCOPED_TRACE(fmt::format("{} {}", join_kind, asof_inequality)); @@ -695,9 +709,10 @@ try auto key_num_total = std::uniform_int_distribution<>(1, 1000)(rng); for (size_t key_num = 0; key_num < key_num_total; ++key_num) { + /// Generate new key greater than previous generateNextKey(rng, k1, k2); - /// generate some rows with smaller left_t to check that they are not matched + /// Generate some rows with smaller left_t to check that they are not matched size_t num_left_rows = std::bernoulli_distribution(0.5)(rng) ? std::uniform_int_distribution<>(1, 100)(rng) : 0; for (size_t i = 0; i < num_left_rows; ++i) { @@ -713,21 +728,22 @@ try size_t num_right_matches = std::uniform_int_distribution<>(1, 10)(rng); auto right_t = left_t + std::uniform_int_distribution<>(isStrict(asof_inequality) ? 0 : 1, 10)(rng); + auto attribute_value = 10 * right_t; for (size_t j = 0; j < num_right_matches; ++j) { right_t += std::uniform_int_distribution<>(0, 3)(rng); bool is_match = j == num_right_matches - 1; - right_source_builder.addRow({k1, k2, right_t, is_match ? 100 * right_t : -1}); + right_source_builder.addRow({k1, k2, right_t, is_match ? 10 * attribute_value : -1}); } - /// next left_t should be greater than (or equals) right_t to match with previous rows + /// Next left_t should be greater than (or equals) right_t to match with previous rows left_t = right_t + std::uniform_int_distribution<>(isStrict(asof_inequality) ? 1 : 0, 100)(rng); size_t num_left_matches = std::uniform_int_distribution<>(1, 100)(rng); for (size_t j = 0; j < num_left_matches; ++j) { left_t += std::uniform_int_distribution<>(0, 3)(rng); - left_source_builder.addRow({k1, k2, left_t, 10 * right_t}); - expected.push_back(10 * right_t); + left_source_builder.addRow({k1, k2, left_t, attribute_value}); + expected.push_back(attribute_value); } } @@ -739,7 +755,9 @@ try assertColumnVectorEq(expected, result_block, "t1.attr"); for (auto & e : expected) - e = e < 0 ? 0 : 10 * e; /// non matched rows from left table have negative attr + /// Non matched rows from left table have negative attr + /// Value if attribute in right table is 10 times greater than in left table + e = e < 0 ? 0 : 10 * e; assertColumnVectorEq(expected, result_block, "t2.attr"); } diff --git a/tests/queries/0_stateless/00927_asof_join_correct_bt.reference b/tests/queries/0_stateless/00927_asof_join_correct_bt.reference index a398f9604fd..28c48d2e290 100644 --- a/tests/queries/0_stateless/00927_asof_join_correct_bt.reference +++ b/tests/queries/0_stateless/00927_asof_join_correct_bt.reference @@ -1,28 +1,36 @@ +-- { echoOn } +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B1 B USING(k,t) ORDER BY (A.k, A.t); 1 101 1 0 0 0 1 102 2 2 102 1 1 103 3 2 102 1 1 104 4 4 104 1 1 105 5 4 104 1 +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B2 B USING(k,t) ORDER BY (A.k, A.t); 1 101 1 0 0 0 1 102 2 2 102 1 1 103 3 2 102 1 1 104 4 4 104 1 1 105 5 4 104 1 +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B3 B USING(k,t) ORDER BY (A.k, A.t); 1 101 1 0 0 0 1 102 2 2 102 1 1 103 3 2 102 1 1 104 4 4 104 1 1 105 5 4 104 1 +SET join_algorithm = 'full_sorting_merge'; +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B1 B USING(k,t) ORDER BY (A.k, A.t); 1 101 1 0 0 0 1 102 2 2 102 1 1 103 3 2 102 1 1 104 4 4 104 1 1 105 5 4 104 1 +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B2 B USING(k,t) ORDER BY (A.k, A.t); 1 101 1 0 0 0 1 102 2 2 102 1 1 103 3 2 102 1 1 104 4 4 104 1 1 105 5 4 104 1 +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B3 B USING(k,t) ORDER BY (A.k, A.t); 1 101 1 0 0 0 1 102 2 2 102 1 1 103 3 2 102 1 diff --git a/tests/queries/0_stateless/00927_asof_join_correct_bt.sql b/tests/queries/0_stateless/00927_asof_join_correct_bt.sql index 761d6bacde6..d796b62d3b3 100644 --- a/tests/queries/0_stateless/00927_asof_join_correct_bt.sql +++ b/tests/queries/0_stateless/00927_asof_join_correct_bt.sql @@ -13,6 +13,7 @@ INSERT INTO B2(k,t,b) VALUES (1,102,2), (1,104,4); CREATE TABLE B3(k UInt32, b UInt64, t UInt32) ENGINE = MergeTree() ORDER BY (k, t); INSERT INTO B3(k,t,b) VALUES (1,102,2), (1,104,4); +-- { echoOn } SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B1 B USING(k,t) ORDER BY (A.k, A.t); SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B2 B USING(k,t) ORDER BY (A.k, A.t); SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B3 B USING(k,t) ORDER BY (A.k, A.t); @@ -22,6 +23,8 @@ SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B1 B USING(k,t) ORDER SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B2 B USING(k,t) ORDER BY (A.k, A.t); SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B3 B USING(k,t) ORDER BY (A.k, A.t); +-- { echoOff } + DROP TABLE B1; DROP TABLE B2; DROP TABLE B3; diff --git a/tests/queries/0_stateless/02276_full_sort_join_unsupported.sql b/tests/queries/0_stateless/02276_full_sort_join_unsupported.sql index 03936107563..0b10101d8f2 100644 --- a/tests/queries/0_stateless/02276_full_sort_join_unsupported.sql +++ b/tests/queries/0_stateless/02276_full_sort_join_unsupported.sql @@ -19,8 +19,6 @@ SELECT * FROM t1 ANTI JOIN t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENT SELECT * FROM t1 SEMI JOIN t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENTED } --- SELECT * FROM t1 ASOF JOIN t2 ON t1.key = t2.key AND t1.val > t2.val; -- { serverError NOT_IMPLEMENTED } - SELECT * FROM t1 ANY JOIN t2 ON t1.key = t2.key SETTINGS any_join_distinct_right_table_keys = 1; -- { serverError NOT_IMPLEMENTED } SELECT * FROM t1 JOIN t2 USING (key) SETTINGS join_use_nulls = 1; -- { serverError NOT_IMPLEMENTED } diff --git a/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.reference b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.reference index 73c4f7dfe25..4aac918c98c 100644 --- a/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.reference +++ b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.reference @@ -1,3 +1,4 @@ +- 2023-03-21 12:00:00 1970-01-01 00:00:00 -1 2023-03-21 13:00:00 1970-01-01 00:00:00 -1 2023-03-21 14:00:00 2023-03-21 13:00:00 0 @@ -9,6 +10,7 @@ 2023-03-21 20:00:00 2023-03-21 16:00:00 3 2023-03-21 21:00:00 2023-03-21 16:00:00 3 2027-10-18 11:03:27 2023-03-21 16:00:00 3 +- 2023-03-21 12:00:00 1970-01-01 00:00:00 -1 2023-03-21 13:00:00 1970-01-01 00:00:00 -1 2023-03-21 14:00:00 2023-03-21 13:00:00 0 @@ -32,6 +34,7 @@ 2023-03-21 20:00:00 2027-10-18 11:03:27 9 2023-03-21 21:00:00 2027-10-18 11:03:27 9 2027-10-18 11:03:27 2027-10-18 11:03:27 9 +- 2023-03-21 12:00:00 2023-03-21 13:00:00 0 2023-03-21 13:00:00 2023-03-21 13:00:00 0 2023-03-21 14:00:00 2023-03-21 14:00:00 1 @@ -44,6 +47,7 @@ 2023-03-21 21:00:00 2027-10-18 11:03:27 9 2027-10-18 11:03:27 2027-10-18 11:03:27 9 \N \N \N +- 2023-03-21 12:00:00 2023-03-21 13:00:00 0 2023-03-21 13:00:00 2023-03-21 14:00:00 1 2023-03-21 14:00:00 2023-03-21 15:00:00 2 @@ -54,6 +58,7 @@ 2023-03-21 19:00:00 2027-10-18 11:03:27 9 2023-03-21 20:00:00 2027-10-18 11:03:27 9 2023-03-21 21:00:00 2027-10-18 11:03:27 9 +- 2023-03-21 12:00:00 2023-03-21 13:00:00 0 2023-03-21 13:00:00 2023-03-21 14:00:00 1 2023-03-21 14:00:00 2023-03-21 15:00:00 2 diff --git a/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql index ce4badbd597..d67aa254bd6 100644 --- a/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql +++ b/tests/queries/0_stateless/03145_asof_join_ddb_inequalities.sql @@ -21,18 +21,19 @@ INSERT INTO probe0 VALUES (NULL),('9999-12-31 23:59:59'); SET join_use_nulls = 1; +SELECT '-'; SELECT p.begin, e.begin, e.value FROM probe0 p ASOF JOIN events0 e ON p.begin > e.begin ORDER BY p.begin ASC; +SELECT '-'; SELECT p.begin, e.begin, e.value FROM probe0 p ASOF LEFT JOIN events0 e ON p.begin > e.begin -ORDER BY p.begin ASC -; +ORDER BY p.begin ASC; SELECT p.begin, e.begin, e.value FROM probe0 p @@ -40,25 +41,26 @@ ASOF JOIN events0 e ON p.begin <= e.begin ORDER BY p.begin ASC; +SELECT '-'; SELECT p.begin, e.begin, e.value FROM probe0 p ASOF LEFT JOIN events0 e ON p.begin <= e.begin ORDER BY p.begin ASC; +SELECT '-'; SELECT p.begin, e.begin, e.value FROM probe0 p ASOF JOIN events0 e ON p.begin < e.begin -ORDER BY p.begin ASC -; +ORDER BY p.begin ASC; +SELECT '-'; SELECT p.begin, e.begin, e.value FROM probe0 p ASOF LEFT JOIN events0 e ON p.begin < e.begin ORDER BY p.begin ASC; - DROP TABLE IF EXISTS events0; DROP TABLE IF EXISTS probe0; diff --git a/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 b/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 index 44c54ae2a39..49ba70c471e 100644 --- a/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 +++ b/tests/queries/0_stateless/03146_asof_join_ddb_merge_long.sql.j2 @@ -8,7 +8,7 @@ SET session_timezone = 'UTC'; SET join_algorithm = '{{ join_algorithm }}'; --- TODO: support enable for full_sorting_merge +-- TODO: enable once USING and `join_use_nulls` is supported by `full_sorting_merge` -- SET join_use_nulls = 1; WITH build AS ( diff --git a/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.sql b/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.sql index ff4518a3775..cd83d62dc70 100644 --- a/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.sql +++ b/tests/queries/0_stateless/03149_asof_join_ddb_timestamps.sql @@ -87,7 +87,7 @@ FROM probe0 p ASOF LEFT JOIN ( SELECT * FROM events0 WHERE log(value + 5) > 10 - ) e ON p.begin >= e.begin + ) e ON p.begin + INTERVAL 2 HOUR >= e.begin + INTERVAL 1 HOUR ORDER BY p.begin ASC; From eb085ea585d10f077d1ce66ee3f663ca016d24e8 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 11 Jul 2024 13:06:29 +0000 Subject: [PATCH 0331/2361] fix --- src/Interpreters/MutationsInterpreter.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index ace285bcfc9..c2341463041 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -488,7 +488,7 @@ static void validateUpdateColumns( { if (!source.supportsLightweightDelete()) { - // if (!source.getStorage()->isMergeTree() + // if (!source.getStorage()->isMergeTree() // || context->getSettingsRef().lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW) // throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Lightweight delete is not supported for table"); } From df104abcc60366df3946a23c52c2e67a92dcb545 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Thu, 11 Jul 2024 17:20:19 +0200 Subject: [PATCH 0332/2361] try to fix tests --- .../03203_hive_style_partitioning.reference | 8 ++++---- .../03203_hive_style_partitioning.sh | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference index e0f46caf1c8..0e6b6052946 100644 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.reference +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -29,6 +29,10 @@ Eugenia Greer Elizabeth Jeffery Delgado Elizabeth Clara Cross Elizabeth Elizabeth Gordon Elizabeth +42 2020-01-01 +[1,2,3] 42.42 +Array(Int64) LowCardinality(Float64) +101 1 TESTING THE URL PARTITIONING first last Elizabeth @@ -60,10 +64,6 @@ Stanley Gibson Elizabeth Eugenia Greer Elizabeth Jeffery Delgado Elizabeth Clara Cross Elizabeth -42 2020-01-01 -[1,2,3] 42.42 -Array(Int64) LowCardinality(Float64) -101 1 TESTING THE S3 PARTITIONING first last Elizabeth diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 9d805b39b8a..e74f24bfd80 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -5,10 +5,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" +$CLICKHOUSE_CLIENT -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 1; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -36,17 +36,17 @@ SELECT toTypeName(_array), toTypeName(_float) FROM file('$CURDIR/data_hive/parti SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE _number = 42; """ -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" -$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" +$CLICKHOUSE_CLIENT -q "SELECT 'TESTING THE URL PARTITIONING'" -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 1; SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -67,17 +67,17 @@ SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/colum SELECT *, _non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;""" -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" -$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" +$CLICKHOUSE_CLIENT -q "SELECT 'TESTING THE S3 PARTITIONING'" -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 1; SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -100,7 +100,7 @@ SELECT *, _non_existing_column FROM s3('http://localhost:11111/test/hive_partiti SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = _column0; """ -$CLICKHOUSE_LOCAL -n -q """ +$CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; From 9257c4aac299836dc3b1e215c8fd8ba9b190d3b4 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 11 Jul 2024 15:31:51 +0000 Subject: [PATCH 0333/2361] change support lightweight delete condition --- src/Interpreters/InterpreterDeleteQuery.cpp | 5 +++-- src/Interpreters/MutationsInterpreter.cpp | 6 +----- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 4 +--- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index 0f081c522dd..a7d0264f0b0 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -60,6 +60,7 @@ BlockIO InterpreterDeleteQuery::execute() auto table_lock = table->lockForShare(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); auto metadata_snapshot = table->getInMemoryMetadataPtr(); + bool hasProjection = table->hasProjection(); auto lightweightDelete = [&]() { @@ -107,13 +108,13 @@ BlockIO InterpreterDeleteQuery::execute() table->mutate(mutation_commands, getContext()); return {}; } - else if (table->supportsLightweightDelete()) + else if (!hasProjection && table->supportsLightweightDelete()) { return lightweightDelete(); } else { - if (table->hasProjection()) + if (hasProjection) { auto context = Context::createCopy(getContext()); auto mode = context->getSettingsRef().lightweight_mutation_projection_mode; diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index c2341463041..6d3a4f30b34 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -487,11 +487,7 @@ static void validateUpdateColumns( if (column_name == RowExistsColumn::name) { if (!source.supportsLightweightDelete()) - { - // if (!source.getStorage()->isMergeTree() - // || context->getSettingsRef().lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW) - // throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Lightweight delete is not supported for table"); - } + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Lightweight delete is not supported for table"); } else if (virtual_columns.tryGet(column_name)) { diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index c2e0e778220..0ef8bcfc681 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1641,11 +1641,9 @@ void IMergeTreeDataPart::loadColumns(bool require) } -/// Project part / part with project parts / compact part doesn't support LWD. bool IMergeTreeDataPart::supportLightweightDeleteMutate() const { - return (part_type == MergeTreeDataPartType::Wide || part_type == MergeTreeDataPartType::Compact) && - parent_part == nullptr && projection_parts.empty(); + return (part_type == MergeTreeDataPartType::Wide || part_type == MergeTreeDataPartType::Compact); } bool IMergeTreeDataPart::hasLightweightDelete() const From 4f11dbc7f372d46769da4ab3af6db83b7967faa0 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 11 Jul 2024 18:25:33 +0000 Subject: [PATCH 0334/2361] fix with wide part --- src/Storages/MergeTree/MutateTask.cpp | 11 +++--- .../03161_lightweight_delete_projection.sql | 36 ++++++++++++++++++- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 8ca987eb1f8..57784067720 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1043,7 +1043,7 @@ struct MutationContext /// Whether we need to count lightweight delete rows in this mutation bool count_lightweight_deleted_rows; - bool lightweight_mutation_mode; + bool lightweight_delete_mode; }; using MutationContextPtr = std::shared_ptr; @@ -1573,7 +1573,7 @@ private: } else { - if (!ctx->lightweight_mutation_mode && ctx->source_part->checksums.has(projection.getDirectoryName())) + if (!ctx->lightweight_delete_mode && ctx->source_part->checksums.has(projection.getDirectoryName())) entries_to_hardlink.insert(projection.getDirectoryName()); } } @@ -1843,7 +1843,8 @@ private: hardlinked_files.insert(it->name()); } } - else if (!endsWith(it->name(), ".tmp_proj")) // ignore projection tmp merge dir + /// Ignore projection tmp merge dir, and under lightweight delete mode ignore projection files. + else if (!endsWith(it->name(), ".tmp_proj") && !ctx->lightweight_delete_mode) { // it's a projection part directory ctx->new_data_part->getDataPartStorage().createProjection(destination); @@ -2257,8 +2258,8 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); - ctx->lightweight_mutation_mode = ctx->updated_header.has(RowExistsColumn::name); - if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && ctx->lightweight_mutation_mode) + ctx->lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); + if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && ctx->lightweight_delete_mode) { /// This mutation contains lightweight delete and we need to count the deleted rows, /// Reset existing_rows_count of new data part to 0 and it will be updated while writing _row_exists column diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 4e674fa0cfd..bfeb0127fa4 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,13 +1,47 @@ DROP TABLE IF EXISTS users; +-- compact part CREATE TABLE users ( uid Int16, name String, age Int16, projection p1 (select count(), age group by age), projection p2 (select age, name group by age, name) -) ENGINE = MergeTree order by uid; +) ENGINE = MergeTree order by uid +SETTINGS min_bytes_for_wide_part = 10485760; + +INSERT INTO users VALUES (1231, 'John', 33); + +DELETE FROM users WHERE 1; -- { serverError NOT_IMPLEMENTED } + +DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } + +DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'drop'; + +SYSTEM FLUSH LOGS; + +-- expecting no projection +SELECT + name, + `table` +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); + +SELECT * FROM users ORDER BY uid; + +DROP TABLE users; + + +-- wide part +CREATE TABLE users ( + uid Int16, + name String, + age Int16, + projection p1 (select count(), age group by age), + projection p2 (select age, name group by age, name) +) ENGINE = MergeTree order by uid +SETTINGS min_bytes_for_wide_part = 0; INSERT INTO users VALUES (1231, 'John', 33); From df9211c345e8bcfc53ed392a351e6320991240d1 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 11 Jul 2024 18:32:38 +0000 Subject: [PATCH 0335/2361] fix --- src/Storages/MergeTree/MutateTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 57784067720..2adcb49d6a3 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1844,7 +1844,7 @@ private: } } /// Ignore projection tmp merge dir, and under lightweight delete mode ignore projection files. - else if (!endsWith(it->name(), ".tmp_proj") && !ctx->lightweight_delete_mode) + else if (!endsWith(it->name(), ".tmp_proj") && !ctx->lightweight_delete_mode) { // it's a projection part directory ctx->new_data_part->getDataPartStorage().createProjection(destination); From 10dd4a9fe66914899ec5e5c89e5b9cc24096f64c Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Thu, 11 Jul 2024 21:16:47 +0200 Subject: [PATCH 0336/2361] debugging tests --- .../0_stateless/03203_hive_style_partitioning.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index e74f24bfd80..14b4a116596 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -5,10 +5,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" -$CLICKHOUSE_CLIENT -n -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -36,17 +36,17 @@ SELECT toTypeName(_array), toTypeName(_float) FROM file('$CURDIR/data_hive/parti SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE _number = 42; """ -$CLICKHOUSE_CLIENT -n -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" -$CLICKHOUSE_CLIENT -q "SELECT 'TESTING THE URL PARTITIONING'" +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" -$CLICKHOUSE_CLIENT -n -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; @@ -67,14 +67,14 @@ SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/colum SELECT *, _non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;""" -$CLICKHOUSE_CLIENT -n -q """ +$CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -c "UNKNOWN_IDENTIFIER" -$CLICKHOUSE_CLIENT -q "SELECT 'TESTING THE S3 PARTITIONING'" +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" $CLICKHOUSE_CLIENT -n -q """ From 243edcc8aa622d47a90cb4fba33f5079269e39b8 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 12 Jul 2024 01:01:52 +0200 Subject: [PATCH 0337/2361] add logs and metrics about rejected connections in Poco --- .../Foundation/include/Poco/ErrorHandler.h | 8 + base/poco/Foundation/src/ErrorHandler.cpp | 92 ++++--- base/poco/Net/src/SocketImpl.cpp | 1 + base/poco/Net/src/TCPServer.cpp | 212 +++++++-------- base/poco/Net/src/TCPServerDispatcher.cpp | 247 ++++++++++-------- programs/keeper/Keeper.cpp | 2 +- programs/server/Server.cpp | 4 +- src/Common/AsynchronousMetrics.cpp | 31 ++- src/Common/AsynchronousMetrics.h | 1 + src/Common/ErrorHandlers.h | 25 ++ src/Server/ProtocolServerAdapter.cpp | 2 + src/Server/ProtocolServerAdapter.h | 3 + 12 files changed, 367 insertions(+), 261 deletions(-) diff --git a/base/poco/Foundation/include/Poco/ErrorHandler.h b/base/poco/Foundation/include/Poco/ErrorHandler.h index c0b5bf9621e..961fec2bc3b 100644 --- a/base/poco/Foundation/include/Poco/ErrorHandler.h +++ b/base/poco/Foundation/include/Poco/ErrorHandler.h @@ -21,6 +21,7 @@ #include "Poco/Exception.h" #include "Poco/Foundation.h" #include "Poco/Mutex.h" +#include "Poco/Message.h" namespace Poco @@ -78,6 +79,10 @@ public: /// /// The default implementation just breaks into the debugger. + virtual void logMessageImpl(Message::Priority priority, const std::string & msg) {} + /// Write a messages to the log + /// Useful for logging from Poco + static void handle(const Exception & exc); /// Invokes the currently registered ErrorHandler. @@ -87,6 +92,9 @@ public: static void handle(); /// Invokes the currently registered ErrorHandler. + static void logMessage(Message::Priority priority, const std::string & msg); + /// Invokes the currently registered ErrorHandler. + static ErrorHandler * set(ErrorHandler * pHandler); /// Registers the given handler as the current error handler. /// diff --git a/base/poco/Foundation/src/ErrorHandler.cpp b/base/poco/Foundation/src/ErrorHandler.cpp index d0af8ea8a12..1d0a41b77ec 100644 --- a/base/poco/Foundation/src/ErrorHandler.cpp +++ b/base/poco/Foundation/src/ErrorHandler.cpp @@ -8,7 +8,7 @@ // Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH. // and Contributors. // -// SPDX-License-Identifier: BSL-1.0 +// SPDX-License-Identifier: BSL-1.0 // @@ -35,79 +35,91 @@ ErrorHandler::~ErrorHandler() void ErrorHandler::exception(const Exception& exc) { - poco_debugger_msg(exc.what()); + poco_debugger_msg(exc.what()); } - + void ErrorHandler::exception(const std::exception& exc) { - poco_debugger_msg(exc.what()); + poco_debugger_msg(exc.what()); } void ErrorHandler::exception() { - poco_debugger_msg("unknown exception"); + poco_debugger_msg("unknown exception"); } void ErrorHandler::handle(const Exception& exc) { - FastMutex::ScopedLock lock(_mutex); - try - { - _pHandler->exception(exc); - } - catch (...) - { - } + FastMutex::ScopedLock lock(_mutex); + try + { + _pHandler->exception(exc); + } + catch (...) + { + } } - + void ErrorHandler::handle(const std::exception& exc) { - FastMutex::ScopedLock lock(_mutex); - try - { - _pHandler->exception(exc); - } - catch (...) - { - } + FastMutex::ScopedLock lock(_mutex); + try + { + _pHandler->exception(exc); + } + catch (...) + { + } } void ErrorHandler::handle() { - FastMutex::ScopedLock lock(_mutex); - try - { - _pHandler->exception(); - } - catch (...) - { - } + FastMutex::ScopedLock lock(_mutex); + try + { + _pHandler->exception(); + } + catch (...) + { + } +} + +void ErrorHandler::logMessage(Message::Priority priority, const std::string & msg) +{ + FastMutex::ScopedLock lock(_mutex); + try + { + _pHandler->logMessageImpl(priority, msg); + } + catch (...) + { + } } ErrorHandler* ErrorHandler::set(ErrorHandler* pHandler) { - poco_check_ptr(pHandler); + poco_check_ptr(pHandler); - FastMutex::ScopedLock lock(_mutex); - ErrorHandler* pOld = _pHandler; - _pHandler = pHandler; - return pOld; + FastMutex::ScopedLock lock(_mutex); + ErrorHandler* pOld = _pHandler; + _pHandler = pHandler; + return pOld; } ErrorHandler* ErrorHandler::defaultHandler() { - // NOTE: Since this is called to initialize the static _pHandler - // variable, sh has to be a local static, otherwise we run - // into static initialization order issues. - static SingletonHolder sh; - return sh.get(); + // NOTE: Since this is called to initialize the static _pHandler + // variable, sh has to be a local static, otherwise we run + // into static initialization order issues. + static SingletonHolder sh; + return sh.get(); } diff --git a/base/poco/Net/src/SocketImpl.cpp b/base/poco/Net/src/SocketImpl.cpp index 484b8cfeec3..13a655d153d 100644 --- a/base/poco/Net/src/SocketImpl.cpp +++ b/base/poco/Net/src/SocketImpl.cpp @@ -17,6 +17,7 @@ #include "Poco/Net/StreamSocketImpl.h" #include "Poco/NumberFormatter.h" #include "Poco/Timestamp.h" +#include "Poco/ErrorHandler.h" #include // FD_SET needs memset on some platforms, so we can't use diff --git a/base/poco/Net/src/TCPServer.cpp b/base/poco/Net/src/TCPServer.cpp index 9bdae900bd6..b957829fb7d 100644 --- a/base/poco/Net/src/TCPServer.cpp +++ b/base/poco/Net/src/TCPServer.cpp @@ -8,7 +8,7 @@ // Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH. // and Contributors. // -// SPDX-License-Identifier: BSL-1.0 +// SPDX-License-Identifier: BSL-1.0 // @@ -44,190 +44,194 @@ TCPServerConnectionFilter::~TCPServerConnectionFilter() TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::UInt16 portNumber, TCPServerParams::Ptr pParams): - _socket(ServerSocket(portNumber)), - _thread(threadName(_socket)), - _stopped(true) -{ - Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool(); - if (pParams) - { - int toAdd = pParams->getMaxThreads() - pool.capacity(); - if (toAdd > 0) pool.addCapacity(toAdd); - } - _pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams); - + _socket(ServerSocket(portNumber)), + _thread(threadName(_socket)), + _stopped(true) +{ + Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool(); + if (pParams) + { + int toAdd = pParams->getMaxThreads() - pool.capacity(); + if (toAdd > 0) pool.addCapacity(toAdd); + } + _pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams); + } TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, const ServerSocket& socket, TCPServerParams::Ptr pParams): - _socket(socket), - _thread(threadName(socket)), - _stopped(true) + _socket(socket), + _thread(threadName(socket)), + _stopped(true) { - Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool(); - if (pParams) - { - int toAdd = pParams->getMaxThreads() - pool.capacity(); - if (toAdd > 0) pool.addCapacity(toAdd); - } - _pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams); + Poco::ThreadPool& pool = Poco::ThreadPool::defaultPool(); + if (pParams) + { + int toAdd = pParams->getMaxThreads() - pool.capacity(); + if (toAdd > 0) pool.addCapacity(toAdd); + } + _pDispatcher = new TCPServerDispatcher(pFactory, pool, pParams); } TCPServer::TCPServer(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, const ServerSocket& socket, TCPServerParams::Ptr pParams): - _socket(socket), - _pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)), - _thread(threadName(socket)), - _stopped(true) + _socket(socket), + _pDispatcher(new TCPServerDispatcher(pFactory, threadPool, pParams)), + _thread(threadName(socket)), + _stopped(true) { } TCPServer::~TCPServer() { - try - { - stop(); - _pDispatcher->release(); - } - catch (...) - { - poco_unexpected(); - } + try + { + stop(); + _pDispatcher->release(); + } + catch (...) + { + poco_unexpected(); + } } const TCPServerParams& TCPServer::params() const { - return _pDispatcher->params(); + return _pDispatcher->params(); } void TCPServer::start() { - poco_assert (_stopped); + poco_assert (_stopped); - _stopped = false; - _thread.start(*this); + _stopped = false; + _thread.start(*this); } - + void TCPServer::stop() { - if (!_stopped) - { - _stopped = true; - _thread.join(); - _pDispatcher->stop(); - } + if (!_stopped) + { + _stopped = true; + _thread.join(); + _pDispatcher->stop(); + } } void TCPServer::run() { - while (!_stopped) - { - Poco::Timespan timeout(250000); - try - { - if (_socket.poll(timeout, Socket::SELECT_READ)) - { - try - { - StreamSocket ss = _socket.acceptConnection(); - - if (!_pConnectionFilter || _pConnectionFilter->accept(ss)) - { - // enable nodelay per default: OSX really needs that + while (!_stopped) + { + Poco::Timespan timeout(250000); + try + { + if (_socket.poll(timeout, Socket::SELECT_READ)) + { + try + { + StreamSocket ss = _socket.acceptConnection(); + + if (!_pConnectionFilter || _pConnectionFilter->accept(ss)) + { + // enable nodelay per default: OSX really needs that #if defined(POCO_OS_FAMILY_UNIX) - if (ss.address().family() != AddressFamily::UNIX_LOCAL) + if (ss.address().family() != AddressFamily::UNIX_LOCAL) #endif - { - ss.setNoDelay(true); - } - _pDispatcher->enqueue(ss); - } - } - catch (Poco::Exception& exc) - { - ErrorHandler::handle(exc); - } - catch (std::exception& exc) - { - ErrorHandler::handle(exc); - } - catch (...) - { - ErrorHandler::handle(); - } - } - } - catch (Poco::Exception& exc) - { - ErrorHandler::handle(exc); - // possibly a resource issue since poll() failed; - // give some time to recover before trying again - Poco::Thread::sleep(50); - } - } + { + ss.setNoDelay(true); + } + _pDispatcher->enqueue(ss); + } + else + { + ErrorHandler::logMessage(Message::PRIO_WARNING, "Filtered out connection from " + ss.peerAddress().toString()); + } + } + catch (Poco::Exception& exc) + { + ErrorHandler::handle(exc); + } + catch (std::exception& exc) + { + ErrorHandler::handle(exc); + } + catch (...) + { + ErrorHandler::handle(); + } + } + } + catch (Poco::Exception& exc) + { + ErrorHandler::handle(exc); + // possibly a resource issue since poll() failed; + // give some time to recover before trying again + Poco::Thread::sleep(50); + } + } } int TCPServer::currentThreads() const { - return _pDispatcher->currentThreads(); + return _pDispatcher->currentThreads(); } int TCPServer::maxThreads() const { - return _pDispatcher->maxThreads(); + return _pDispatcher->maxThreads(); } - + int TCPServer::totalConnections() const { - return _pDispatcher->totalConnections(); + return _pDispatcher->totalConnections(); } int TCPServer::currentConnections() const { - return _pDispatcher->currentConnections(); + return _pDispatcher->currentConnections(); } int TCPServer::maxConcurrentConnections() const { - return _pDispatcher->maxConcurrentConnections(); + return _pDispatcher->maxConcurrentConnections(); } - + int TCPServer::queuedConnections() const { - return _pDispatcher->queuedConnections(); + return _pDispatcher->queuedConnections(); } int TCPServer::refusedConnections() const { - return _pDispatcher->refusedConnections(); + return _pDispatcher->refusedConnections(); } void TCPServer::setConnectionFilter(const TCPServerConnectionFilter::Ptr& pConnectionFilter) { - poco_assert (_stopped); + poco_assert (_stopped); - _pConnectionFilter = pConnectionFilter; + _pConnectionFilter = pConnectionFilter; } std::string TCPServer::threadName(const ServerSocket& socket) { - std::string name("TCPServer: "); - name.append(socket.address().toString()); - return name; + std::string name("TCPServer: "); + name.append(socket.address().toString()); + return name; } diff --git a/base/poco/Net/src/TCPServerDispatcher.cpp b/base/poco/Net/src/TCPServerDispatcher.cpp index 7f9f9a20ee7..9ca7c271e63 100644 --- a/base/poco/Net/src/TCPServerDispatcher.cpp +++ b/base/poco/Net/src/TCPServerDispatcher.cpp @@ -8,7 +8,7 @@ // Copyright (c) 2005-2007, Applied Informatics Software Engineering GmbH. // and Contributors. // -// SPDX-License-Identifier: BSL-1.0 +// SPDX-License-Identifier: BSL-1.0 // @@ -33,44 +33,44 @@ namespace Net { class TCPConnectionNotification: public Notification { public: - TCPConnectionNotification(const StreamSocket& socket): - _socket(socket) - { - } - - ~TCPConnectionNotification() - { - } - - const StreamSocket& socket() const - { - return _socket; - } + TCPConnectionNotification(const StreamSocket& socket): + _socket(socket) + { + } + + ~TCPConnectionNotification() + { + } + + const StreamSocket& socket() const + { + return _socket; + } private: - StreamSocket _socket; + StreamSocket _socket; }; TCPServerDispatcher::TCPServerDispatcher(TCPServerConnectionFactory::Ptr pFactory, Poco::ThreadPool& threadPool, TCPServerParams::Ptr pParams): - _rc(1), - _pParams(pParams), - _currentThreads(0), - _totalConnections(0), - _currentConnections(0), - _maxConcurrentConnections(0), - _refusedConnections(0), - _stopped(false), - _pConnectionFactory(pFactory), - _threadPool(threadPool) + _rc(1), + _pParams(pParams), + _currentThreads(0), + _totalConnections(0), + _currentConnections(0), + _maxConcurrentConnections(0), + _refusedConnections(0), + _stopped(false), + _pConnectionFactory(pFactory), + _threadPool(threadPool) { - poco_check_ptr (pFactory); + poco_check_ptr (pFactory); - if (!_pParams) - _pParams = new TCPServerParams; - - if (_pParams->getMaxThreads() == 0) - _pParams->setMaxThreads(threadPool.capacity()); + if (!_pParams) + _pParams = new TCPServerParams; + + if (_pParams->getMaxThreads() == 0) + _pParams->setMaxThreads(threadPool.capacity()); } @@ -81,161 +81,184 @@ TCPServerDispatcher::~TCPServerDispatcher() void TCPServerDispatcher::duplicate() { - ++_rc; + ++_rc; } void TCPServerDispatcher::release() { - if (--_rc == 0) delete this; + if (--_rc == 0) delete this; } void TCPServerDispatcher::run() { - AutoPtr guard(this); // ensure object stays alive + AutoPtr guard(this); // ensure object stays alive - int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds(); + int idleTime = (int) _pParams->getThreadIdleTime().totalMilliseconds(); - for (;;) - { - try - { - AutoPtr pNf = _queue.waitDequeueNotification(idleTime); - if (pNf && !_stopped) - { - TCPConnectionNotification* pCNf = dynamic_cast(pNf.get()); - if (pCNf) - { - beginConnection(); - if (!_stopped) - { - std::unique_ptr pConnection(_pConnectionFactory->createConnection(pCNf->socket())); - poco_check_ptr(pConnection.get()); - pConnection->start(); - } - /// endConnection() should be called after destroying TCPServerConnection, - /// otherwise currentConnections() could become zero while some connections are yet still alive. - endConnection(); - } - } - } - catch (Poco::Exception &exc) { ErrorHandler::handle(exc); } - catch (std::exception &exc) { ErrorHandler::handle(exc); } - catch (...) { ErrorHandler::handle(); } - FastMutex::ScopedLock lock(_mutex); - if (_stopped || (_currentThreads > 1 && _queue.empty())) - { - --_currentThreads; - break; - } - } + for (;;) + { + try + { + AutoPtr pNf = _queue.waitDequeueNotification(idleTime); + if (pNf && !_stopped) + { + TCPConnectionNotification* pCNf = dynamic_cast(pNf.get()); + if (pCNf) + { + beginConnection(); + if (!_stopped) + { + std::unique_ptr pConnection(_pConnectionFactory->createConnection(pCNf->socket())); + poco_check_ptr(pConnection.get()); + pConnection->start(); + } + /// endConnection() should be called after destroying TCPServerConnection, + /// otherwise currentConnections() could become zero while some connections are yet still alive. + endConnection(); + } + } + } + catch (Poco::Exception &exc) { ErrorHandler::handle(exc); } + catch (std::exception &exc) { ErrorHandler::handle(exc); } + catch (...) { ErrorHandler::handle(); } + FastMutex::ScopedLock lock(_mutex); + if (_stopped || (_currentThreads > 1 && _queue.empty())) + { + --_currentThreads; + break; + } + } } namespace { - static const std::string threadName("TCPServerConnection"); + static const std::string threadName("TCPServerConnection"); } - + void TCPServerDispatcher::enqueue(const StreamSocket& socket) { - FastMutex::ScopedLock lock(_mutex); + FastMutex::ScopedLock lock(_mutex); - if (_queue.size() < _pParams->getMaxQueued()) - { - if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads()) - { - try - { + ErrorHandler::logMessage(Message::PRIO_TEST, "Queue size: " + std::to_string(_queue.size()) + + ", current threads: " + std::to_string(_currentThreads) + + ", threads in pool: " + std::to_string(_threadPool.allocated()) + + ", current connections: " + std::to_string(_currentConnections)); + + + if (_queue.size() < _pParams->getMaxQueued()) + { + /// NOTE: the condition below is wrong. + /// Since the thread pool is shared between multiple servers/TCPServerDispatchers, + /// _currentThreads < _pParams->getMaxThreads() will be true when the pool is actually saturated. + /// As a result, queue is useless and connections never wait in queue. + /// Instead, we (mistakenly) think that we can create a thread for this connection, but we fail to create it + /// and the connection get rejected. + /// We could check _currentThreads < _threadPool.allocated() to make it work, + /// but it's not clear if we want to make it work + /// because it may be better to reject connection immediately if we don't have resources to handle it. + if (!_queue.hasIdleThreads() && _currentThreads < _pParams->getMaxThreads()) + { + try + { this->duplicate(); - _threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName); - ++_currentThreads; - } - catch (Poco::Exception& exc) - { + _threadPool.startWithPriority(_pParams->getThreadPriority(), *this, threadName); + ++_currentThreads; + } + catch (Poco::Exception& exc) + { + ErrorHandler::logMessage(Message::PRIO_WARNING, "Got an exception while starting thread for connection from " + + socket.peerAddress().toString()); + ErrorHandler::handle(exc); this->release(); - ++_refusedConnections; - std::cerr << "Got exception while starting thread for connection. Error code: " - << exc.code() << ", message: '" << exc.displayText() << "'" << std::endl; - return; - } - } - _queue.enqueueNotification(new TCPConnectionNotification(socket)); - } - else - { - ++_refusedConnections; - } + ++_refusedConnections; + return; + } + } + else if (!_queue.hasIdleThreads()) + { + ErrorHandler::logMessage(Message::PRIO_TRACE, "Don't have idle threads, adding connection from " + + socket.peerAddress().toString() + " to the queue, size: " + std::to_string(_queue.size())); + } + _queue.enqueueNotification(new TCPConnectionNotification(socket)); + } + else + { + ErrorHandler::logMessage(Message::PRIO_WARNING, "Refusing connection from " + socket.peerAddress().toString() + + ", reached max queue size " + std::to_string(_pParams->getMaxQueued())); + ++_refusedConnections; + } } void TCPServerDispatcher::stop() { - _stopped = true; - _queue.clear(); - _queue.wakeUpAll(); + _stopped = true; + _queue.clear(); + _queue.wakeUpAll(); } int TCPServerDispatcher::currentThreads() const { - return _currentThreads; + return _currentThreads; } int TCPServerDispatcher::maxThreads() const { - FastMutex::ScopedLock lock(_mutex); - - return _threadPool.capacity(); + FastMutex::ScopedLock lock(_mutex); + + return _threadPool.capacity(); } int TCPServerDispatcher::totalConnections() const { - return _totalConnections; + return _totalConnections; } int TCPServerDispatcher::currentConnections() const { - return _currentConnections; + return _currentConnections; } int TCPServerDispatcher::maxConcurrentConnections() const { - return _maxConcurrentConnections; + return _maxConcurrentConnections; } int TCPServerDispatcher::queuedConnections() const { - return _queue.size(); + return _queue.size(); } int TCPServerDispatcher::refusedConnections() const { - return _refusedConnections; + return _refusedConnections; } void TCPServerDispatcher::beginConnection() { - FastMutex::ScopedLock lock(_mutex); + FastMutex::ScopedLock lock(_mutex); - ++_totalConnections; - ++_currentConnections; - if (_currentConnections > _maxConcurrentConnections) - _maxConcurrentConnections.store(_currentConnections); + ++_totalConnections; + ++_currentConnections; + if (_currentConnections > _maxConcurrentConnections) + _maxConcurrentConnections.store(_currentConnections); } void TCPServerDispatcher::endConnection() { - --_currentConnections; + --_currentConnections; } diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 44c2daa33ad..7ecfc513705 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -410,7 +410,7 @@ try std::lock_guard lock(servers_lock); metrics.reserve(servers->size()); for (const auto & server : *servers) - metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()}); + metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()}); return metrics; } ); diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 3b88bb36954..27d9e4f1394 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -909,10 +909,10 @@ try metrics.reserve(servers_to_start_before_tables.size() + servers.size()); for (const auto & server : servers_to_start_before_tables) - metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()}); + metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()}); for (const auto & server : servers) - metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()}); + metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()}); return metrics; } ); diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 6309f6079f6..56e7c4f3405 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -1613,7 +1613,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) #endif { - auto get_metric_name_doc = [](const String & name) -> std::pair + auto threads_get_metric_name_doc = [](const String & name) -> std::pair { static std::map> metric_map = { @@ -1637,11 +1637,38 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) return it->second; }; + auto rejected_connections_get_metric_name_doc = [](const String & name) -> std::pair + { + static std::map> metric_map = + { + {"tcp_port", {"TCPRejectedConnections", "Number of rejected connections for the TCP protocol (without TLS)."}}, + {"tcp_port_secure", {"TCPSecureRejectedConnections", "Number of rejected connections for the TCP protocol (with TLS)."}}, + {"http_port", {"HTTPRejectedConnections", "Number of rejected connections for the HTTP interface (without TLS)."}}, + {"https_port", {"HTTPSecureRejectedConnections", "Number of rejected connections for the HTTPS interface."}}, + {"interserver_http_port", {"InterserverRejectedConnections", "Number of rejected connections for the replicas communication protocol (without TLS)."}}, + {"interserver_https_port", {"InterserverSecureRejectedConnections", "Number of rejected connections for the replicas communication protocol (with TLS)."}}, + {"mysql_port", {"MySQLRejectedConnections", "Number of rejected connections for the MySQL compatibility protocol."}}, + {"postgresql_port", {"PostgreSQLRejectedConnections", "Number of rejected connections for the PostgreSQL compatibility protocol."}}, + {"grpc_port", {"GRPCRejectedConnections", "Number of rejected connections for the GRPC protocol."}}, + {"prometheus.port", {"PrometheusRejectedConnections", "Number of rejected connections for the Prometheus endpoint. Note: prometheus endpoints can be also used via the usual HTTP/HTTPs ports."}}, + {"keeper_server.tcp_port", {"KeeperTCPRejectedConnections", "Number of rejected connections for the Keeper TCP protocol (without TLS)."}}, + {"keeper_server.tcp_port_secure", {"KeeperTCPSecureRejectedConnections", "Number of rejected connections for the Keeper TCP protocol (with TLS)."}} + }; + auto it = metric_map.find(name); + if (it == metric_map.end()) + return { nullptr, nullptr }; + else + return it->second; + }; + const auto server_metrics = protocol_server_metrics_func(); for (const auto & server_metric : server_metrics) { - if (auto name_doc = get_metric_name_doc(server_metric.port_name); name_doc.first != nullptr) + if (auto name_doc = threads_get_metric_name_doc(server_metric.port_name); name_doc.first != nullptr) new_values[name_doc.first] = { server_metric.current_threads, name_doc.second }; + + if (auto name_doc = rejected_connections_get_metric_name_doc(server_metric.port_name); name_doc.first != nullptr) + new_values[name_doc.first] = { server_metric.rejected_connections, name_doc.second }; } } diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index 10a972d2458..04d0319e35b 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -42,6 +42,7 @@ struct ProtocolServerMetrics { String port_name; size_t current_threads; + size_t rejected_connections; }; /** Periodically (by default, each second) diff --git a/src/Common/ErrorHandlers.h b/src/Common/ErrorHandlers.h index a4a7c4683aa..4e7d391e66f 100644 --- a/src/Common/ErrorHandlers.h +++ b/src/Common/ErrorHandlers.h @@ -2,6 +2,7 @@ #include #include +#include /** ErrorHandler for Poco::Thread, @@ -26,8 +27,32 @@ public: void exception(const std::exception &) override { logException(); } void exception() override { logException(); } + void logMessageImpl(Poco::Message::Priority priority, const std::string & msg) override + { + switch (priority) + { + case Poco::Message::PRIO_FATAL: [[fallthrough]]; + case Poco::Message::PRIO_CRITICAL: + LOG_FATAL(trace_log, fmt::runtime(msg)); break; + case Poco::Message::PRIO_ERROR: + LOG_ERROR(trace_log, fmt::runtime(msg)); break; + case Poco::Message::PRIO_WARNING: + LOG_WARNING(trace_log, fmt::runtime(msg)); break; + case Poco::Message::PRIO_NOTICE: [[fallthrough]]; + case Poco::Message::PRIO_INFORMATION: + LOG_INFO(trace_log, fmt::runtime(msg)); break; + case Poco::Message::PRIO_DEBUG: + LOG_DEBUG(trace_log, fmt::runtime(msg)); break; + case Poco::Message::PRIO_TRACE: + LOG_TRACE(trace_log, fmt::runtime(msg)); break; + case Poco::Message::PRIO_TEST: + LOG_TEST(trace_log, fmt::runtime(msg)); break; + } + } + private: LoggerPtr log = getLogger("ServerErrorHandler"); + LoggerPtr trace_log = getLogger("Poco"); void logException() { diff --git a/src/Server/ProtocolServerAdapter.cpp b/src/Server/ProtocolServerAdapter.cpp index b41ad2376f1..6b723bc8d87 100644 --- a/src/Server/ProtocolServerAdapter.cpp +++ b/src/Server/ProtocolServerAdapter.cpp @@ -20,6 +20,7 @@ public: UInt16 portNumber() const override { return tcp_server->portNumber(); } size_t currentConnections() const override { return tcp_server->currentConnections(); } size_t currentThreads() const override { return tcp_server->currentThreads(); } + size_t refusedConnections() const override { return tcp_server->refusedConnections(); } private: std::unique_ptr tcp_server; @@ -54,6 +55,7 @@ public: UInt16 portNumber() const override { return grpc_server->portNumber(); } size_t currentConnections() const override { return grpc_server->currentConnections(); } size_t currentThreads() const override { return grpc_server->currentThreads(); } + size_t refusedConnections() const override { return 0; } private: std::unique_ptr grpc_server; diff --git a/src/Server/ProtocolServerAdapter.h b/src/Server/ProtocolServerAdapter.h index 76a6776ed9c..4a0b0cae8e7 100644 --- a/src/Server/ProtocolServerAdapter.h +++ b/src/Server/ProtocolServerAdapter.h @@ -38,6 +38,8 @@ public: /// Returns the number of currently handled connections. size_t currentConnections() const { return impl->currentConnections(); } + size_t refusedConnections() const { return impl->refusedConnections(); } + /// Returns the number of current threads. size_t currentThreads() const { return impl->currentThreads(); } @@ -61,6 +63,7 @@ private: virtual UInt16 portNumber() const = 0; virtual size_t currentConnections() const = 0; virtual size_t currentThreads() const = 0; + virtual size_t refusedConnections() const = 0; }; class TCPServerAdapterImpl; class GRPCServerAdapterImpl; From f0ac0eccb16146303a4b520291c5039d86d700d2 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Fri, 12 Jul 2024 16:37:52 +0800 Subject: [PATCH 0338/2361] Add unit test --- src/Interpreters/convertFieldToType.cpp | 2 +- src/Storages/Statistics/Statistics.cpp | 16 +++--- src/Storages/Statistics/Statistics.h | 6 +-- .../Statistics/StatisticsCountMinSketch.cpp | 44 +++++++-------- src/Storages/Statistics/tests/gtest_stats.cpp | 53 ++++++++++++++++++- 5 files changed, 83 insertions(+), 38 deletions(-) diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 184c263dbdb..1a40b780e9a 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -545,7 +545,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID catch (Exception & e) { if (e.code() == ErrorCodes::UNEXPECTED_DATA_AFTER_PARSED_VALUE) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert string {} to type {}", src.get(), type.getName()); + throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert string '{}' to type {}", src.get(), type.getName()); e.addMessage(fmt::format("while converting '{}' to {}", src.get(), type.getName())); throw; diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 5663c55b263..5c0e5f178e1 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -27,25 +27,25 @@ enum StatisticsFileVersion : UInt16 V0 = 0, }; -std::optional StatisticsUtils::tryConvertToFloat64(const Field & f) +std::optional StatisticsUtils::tryConvertToFloat64(const Field & field) { - switch (f.getType()) + switch (field.getType()) { case Field::Types::Int64: - return f.get(); + return field.get(); case Field::Types::UInt64: - return f.get(); + return field.get(); case Field::Types::Float64: - return f.get(); + return field.get(); default: return {}; } } -std::optional StatisticsUtils::tryConvertToString(const DB::Field & f) +std::optional StatisticsUtils::tryConvertToString(const DB::Field & field) { - if (f.getType() == Field::Types::String) - return f.get(); + if (field.getType() == Field::Types::String) + return field.get(); return {}; } diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index 0df5359adfc..33a5cbac4de 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -15,11 +15,11 @@ constexpr auto STATS_FILE_PREFIX = "statistics_"; constexpr auto STATS_FILE_SUFFIX = ".stats"; -///Returns std::nullopt if input Field cannot be converted to a concrete value struct StatisticsUtils { - static std::optional tryConvertToFloat64(const Field & f); - static std::optional tryConvertToString(const Field & f); + /// Returns std::nullopt if input Field cannot be converted to a concrete value + static std::optional tryConvertToFloat64(const Field & field); + static std::optional tryConvertToString(const Field & field); }; /// Statistics describe properties of the values in the column, diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index ff985b06ee3..a3c6ee8a819 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -4,7 +4,7 @@ #include #include #include -#include + #if USE_DATASKETCHES @@ -13,7 +13,7 @@ namespace DB namespace ErrorCodes { -extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int LOGICAL_ERROR; } /// Constants chosen based on rolling dices, which provides an error tolerance of 0.1% (ε = 0.001) and a confidence level of 99.9% (δ = 0.001). @@ -28,36 +28,32 @@ StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescrip Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const { - if (data_type->isValueRepresentedByNumber()) + /// Try to convert field to data_type. Converting string to proper data types such as: number, date, datetime, IPv4, Decimal etc. + /// Return null if val larger than the range of data_type + /// + /// For example: if data_type is Int32: + /// 1. For 1.0, 1, '1', return Field(1) + /// 2. For 1.1, max_value_int64, return null + Field val_converted; + try { - /// 'val' maybe number or string, method 'convertFieldToType' will - /// 1. convert string to number, date, datetime, IPv4, Decimal etc - /// 2. return null if val larger than the range of data_type - auto val_converted = convertFieldToType(val, *data_type); + val_converted = convertFieldToType(val, *data_type); if (val_converted.isNull()) return 0; - - /// We will get the proper data type of val_converted, for example, Int8 for 1, Int16 for 257. - auto data_type_converted = applyVisitor(FieldToDataType(), val_converted); - DataTypes data_types = {data_type, data_type_converted}; - auto super_type = tryGetLeastSupertype(data_types); - - /// If data_type is UInt8 but val_typed is UInt16, we should return 0. - if (!super_type->equals(*data_type)) - return 0; - - return sketch.get_estimate(&val_converted, data_type->getSizeOfValueInMemory()); } + catch (...) + { + /// If the conversion fails for example, when converting 'not a number' to Int32, return 0 + return 0; + } + + if (data_type->isValueRepresentedByNumber()) + return sketch.get_estimate(&val_converted, data_type->getSizeOfValueInMemory()); if (isStringOrFixedString(data_type)) - { return sketch.get_estimate(val.get()); - } - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Statistics 'count_min' does not support estimate constant value of type {}", - val.getTypeName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'count_min' does not support estimate data type of {}", data_type->getName()); } void StatisticsCountMinSketch::update(const ColumnPtr & column) diff --git a/src/Storages/Statistics/tests/gtest_stats.cpp b/src/Storages/Statistics/tests/gtest_stats.cpp index c3c14632ba1..d80f64b8b6b 100644 --- a/src/Storages/Statistics/tests/gtest_stats.cpp +++ b/src/Storages/Statistics/tests/gtest_stats.cpp @@ -1,6 +1,10 @@ #include #include +#include +#include + +using namespace DB; TEST(Statistics, TDigestLessThan) { @@ -39,6 +43,51 @@ TEST(Statistics, TDigestLessThan) std::reverse(data.begin(), data.end()); test_less_than(data, {-1, 1e9, 50000.0, 3000.0, 30.0}, {0, 100000, 50000, 3000, 30}, {0, 0, 0.001, 0.001, 0.001}); - - +} + +using Fields = std::vector; + +template +void testConvertFieldToDataType(const DataTypePtr & data_type, const Fields & fields, const T & expected_value, bool convert_failed = false) +{ + for (const auto & field : fields) + { + Field converted_value; + try + { + converted_value = convertFieldToType(field, *data_type); + } + catch(...) + { + /// Just ignore exceptions + } + if (convert_failed) + ASSERT_TRUE(converted_value.isNull()); + else + ASSERT_EQ(converted_value.template get(), expected_value); + } +} + +TEST(Statistics, convertFieldToType) +{ + Fields fields; + + auto data_type_int8 = DataTypeFactory::instance().get("Int8"); + fields = {1, 1.0, "1"}; + testConvertFieldToDataType(data_type_int8, fields, static_cast(1)); + + fields = {256, 1.1, "not a number"}; + testConvertFieldToDataType(data_type_int8, fields, static_cast(1), true); + + auto data_type_float64 = DataTypeFactory::instance().get("Float64"); + fields = {1, 1.0, "1.0"}; + testConvertFieldToDataType(data_type_float64, fields, static_cast(1.0)); + + auto data_type_string = DataTypeFactory::instance().get("String"); + fields = {1, "1"}; + testConvertFieldToDataType(data_type_string, fields, static_cast("1")); + + auto data_type_date = DataTypeFactory::instance().get("Date"); + fields = {"2024-07-12", 19916}; + testConvertFieldToDataType(data_type_date, fields, static_cast(19916)); } From 119777cd7346a32f45588d069bf6a89efe091867 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Fri, 12 Jul 2024 12:03:25 +0200 Subject: [PATCH 0339/2361] update reference --- .../0_stateless/03203_hive_style_partitioning.reference | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference index 0e6b6052946..d187f4cdd2c 100644 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.reference +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -29,9 +29,9 @@ Eugenia Greer Elizabeth Jeffery Delgado Elizabeth Clara Cross Elizabeth Elizabeth Gordon Elizabeth -42 2020-01-01 -[1,2,3] 42.42 -Array(Int64) LowCardinality(Float64) +42 2020-01-01 +[1,2,3] 42.42 +Array(Int64) LowCardinality(Float64) 101 1 TESTING THE URL PARTITIONING From 34e54fd089b5c4b4892e80bd426a9409b31f29c9 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Fri, 12 Jul 2024 13:58:41 +0200 Subject: [PATCH 0340/2361] fix test test_upload_s3_fail_upload_part_when_multi_part_upload --- tests/integration/test_checking_s3_blobs_paranoid/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index 1ed70e20b79..22a5cd525f3 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -197,7 +197,7 @@ def test_upload_s3_fail_upload_part_when_multi_part_upload( ) assert create_multipart == 1 assert upload_parts >= 2 - assert s3_errors >= 2 + assert s3_errors == 1 @pytest.mark.parametrize( From 0988e1deadf34736f126e9eb3a2162d3abbe1314 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Fri, 12 Jul 2024 14:59:43 +0200 Subject: [PATCH 0341/2361] update tests --- .../queries/0_stateless/03203_hive_style_partitioning.reference | 2 +- tests/queries/0_stateless/03203_hive_style_partitioning.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference index d187f4cdd2c..be43048dd01 100644 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.reference +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -96,4 +96,4 @@ Eugenia Greer Elizabeth Jeffery Delgado Elizabeth Clara Cross Elizabeth Elizabeth Gordon Elizabeth -1 +OK diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 14b4a116596..58a74a3ca8f 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -104,4 +104,4 @@ $CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 0; SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; -""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" +""" 2>&1 | grep -F -q "UNKNOWN_IDENTIFIER" && echo "OK" || echo "FAIL"; From 9c6a49b6d474836ee894ddaaa02ebb982370d25c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 12 Jul 2024 14:30:46 +0000 Subject: [PATCH 0342/2361] fix WriteBufferFromPocoSocketChunked::finalizeImpl() --- src/IO/WriteBufferFromPocoSocketChunked.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/WriteBufferFromPocoSocketChunked.cpp b/src/IO/WriteBufferFromPocoSocketChunked.cpp index 98c5126c24b..9da46ee2d10 100644 --- a/src/IO/WriteBufferFromPocoSocketChunked.cpp +++ b/src/IO/WriteBufferFromPocoSocketChunked.cpp @@ -202,7 +202,7 @@ void WriteBufferFromPocoSocketChunked::nextImpl() void WriteBufferFromPocoSocketChunked::finalizeImpl() { - if (offset() == sizeof(*chunk_size_ptr)) + if (chunked && offset() == sizeof(*chunk_size_ptr)) pos -= sizeof(*chunk_size_ptr); WriteBufferFromPocoSocket::finalizeImpl(); } From dfc3db23b1d263ed0fcfce36e2df0b925592d7c3 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 12 Jul 2024 14:38:14 +0000 Subject: [PATCH 0343/2361] Implement new JSON data type --- .../Passes/FunctionToSubcolumnsPass.cpp | 21 +- src/Analyzer/Resolve/IdentifierResolver.cpp | 44 +- src/Analyzer/Resolve/QueryAnalyzer.cpp | 12 +- src/Columns/ColumnDynamic.h | 3 +- src/Columns/ColumnObject.cpp | 2008 +++++++++-------- src/Columns/ColumnObject.h | 385 ++-- src/Columns/ColumnObjectDeprecated.cpp | 1096 +++++++++ src/Columns/ColumnObjectDeprecated.h | 265 +++ src/Columns/IColumn.cpp | 6 +- src/Columns/tests/gtest_column_object.cpp | 351 +++ src/Core/Settings.h | 5 +- src/Core/SettingsChangesHistory.cpp | 5 +- src/Core/TypeId.h | 1 + src/DataTypes/DataTypeDynamic.cpp | 51 +- src/DataTypes/DataTypeDynamic.h | 5 +- src/DataTypes/DataTypeFactory.cpp | 3 +- src/DataTypes/DataTypeFactory.h | 3 +- src/DataTypes/DataTypeObject.cpp | 481 +++- src/DataTypes/DataTypeObject.h | 62 +- src/DataTypes/DataTypeObjectDeprecated.cpp | 85 + src/DataTypes/DataTypeObjectDeprecated.h | 48 + src/DataTypes/DataTypeTuple.cpp | 15 +- src/DataTypes/DataTypesBinaryEncoding.cpp | 95 +- src/DataTypes/DataTypesBinaryEncoding.h | 105 +- src/DataTypes/FieldToDataType.cpp | 3 +- src/DataTypes/IDataType.cpp | 3 +- src/DataTypes/IDataType.h | 6 +- src/DataTypes/ObjectUtils.cpp | 58 +- src/DataTypes/ObjectUtils.h | 4 +- .../Serializations/ISerialization.cpp | 18 +- src/DataTypes/Serializations/ISerialization.h | 85 +- .../Serializations/SerializationDynamic.cpp | 45 +- .../Serializations/SerializationDynamic.h | 12 +- .../SerializationDynamicElement.cpp | 1 + .../Serializations/SerializationJSON.cpp | 409 ++++ .../Serializations/SerializationJSON.h | 50 + .../SerializationJSONElement.cpp | 3 + .../Serializations/SerializationJSONElement.h | 8 + .../SerializationLowCardinality.cpp | 9 +- .../Serializations/SerializationObject.cpp | 1045 +++++---- .../Serializations/SerializationObject.h | 129 +- .../SerializationObjectDeprecated.cpp | 586 +++++ .../SerializationObjectDeprecated.h | 121 + .../SerializationObjectDynamicPath.cpp | 180 ++ .../SerializationObjectDynamicPath.h | 58 + .../SerializationObjectElement.cpp | 3 + .../SerializationObjectElement.h | 8 + .../SerializationObjectTypedPath.cpp | 78 + .../SerializationObjectTypedPath.h | 57 + .../Serializations/SerializationSubObject.cpp | 259 +++ .../Serializations/SerializationSubObject.h | 72 + .../Serializations/SerializationVariant.cpp | 22 +- .../Serializations/SerializationVariant.h | 4 +- .../gtest_deprecated_object_serialization.cpp | 80 + .../tests/gtest_object_serialization.cpp | 160 +- src/DataTypes/Utils.cpp | 1 + .../gtest_data_types_binary_encoding.cpp | 5 +- src/Databases/DatabaseReplicated.cpp | 1 + src/Formats/EscapingRuleUtils.cpp | 2 +- src/Formats/FormatFactory.cpp | 4 +- src/Formats/FormatSettings.h | 4 +- src/Formats/JSONExtractTree.cpp | 303 ++- src/Formats/JSONUtils.cpp | 12 +- src/Formats/SchemaInferenceUtils.cpp | 46 +- src/Functions/FunctionsConversion.cpp | 40 +- src/Functions/JSONEmpty.cpp | 126 ++ src/Functions/JSONPaths.cpp | 339 +++ src/IO/ReadHelpers.cpp | 87 + src/IO/ReadHelpers.h | 7 + src/IO/WriteHelpers.h | 6 + src/Interpreters/TreeRewriter.cpp | 10 +- src/Interpreters/convertFieldToType.cpp | 2 +- .../parseColumnsListForTableFunction.cpp | 16 +- .../parseColumnsListForTableFunction.h | 2 + src/Parsers/ASTObjectTypeArgument.cpp | 75 + src/Parsers/ASTObjectTypeArgument.h | 35 + src/Parsers/CommonParsers.h | 28 + src/Parsers/ExpressionElementParsers.cpp | 37 +- src/Parsers/ExpressionElementParsers.h | 12 +- src/Parsers/Lexer.cpp | 2 + src/Parsers/Lexer.h | 1 + src/Parsers/ParserCreateQuery.cpp | 2 +- src/Parsers/ParserDataType.cpp | 103 +- .../Impl/JSONAsStringRowInputFormat.cpp | 15 +- .../Formats/Impl/JSONAsStringRowInputFormat.h | 18 +- src/Processors/QueryPlan/PartsSplitter.cpp | 2 +- src/Storages/ColumnsDescription.cpp | 17 +- .../MergeTreeDataPartWriterCompact.cpp | 2 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 14 +- .../MergeTree/MergeTreeReaderCompact.cpp | 2 +- .../MergeTree/MergeTreeReaderWide.cpp | 2 +- 91 files changed, 8000 insertions(+), 2111 deletions(-) create mode 100644 src/Columns/ColumnObjectDeprecated.cpp create mode 100644 src/Columns/ColumnObjectDeprecated.h create mode 100644 src/Columns/tests/gtest_column_object.cpp create mode 100644 src/DataTypes/DataTypeObjectDeprecated.cpp create mode 100644 src/DataTypes/DataTypeObjectDeprecated.h create mode 100644 src/DataTypes/Serializations/SerializationJSON.cpp create mode 100644 src/DataTypes/Serializations/SerializationJSON.h create mode 100644 src/DataTypes/Serializations/SerializationJSONElement.cpp create mode 100644 src/DataTypes/Serializations/SerializationJSONElement.h create mode 100644 src/DataTypes/Serializations/SerializationObjectDeprecated.cpp create mode 100644 src/DataTypes/Serializations/SerializationObjectDeprecated.h create mode 100644 src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp create mode 100644 src/DataTypes/Serializations/SerializationObjectDynamicPath.h create mode 100644 src/DataTypes/Serializations/SerializationObjectElement.cpp create mode 100644 src/DataTypes/Serializations/SerializationObjectElement.h create mode 100644 src/DataTypes/Serializations/SerializationObjectTypedPath.cpp create mode 100644 src/DataTypes/Serializations/SerializationObjectTypedPath.h create mode 100644 src/DataTypes/Serializations/SerializationSubObject.cpp create mode 100644 src/DataTypes/Serializations/SerializationSubObject.h create mode 100644 src/DataTypes/Serializations/tests/gtest_deprecated_object_serialization.cpp create mode 100644 src/Functions/JSONEmpty.cpp create mode 100644 src/Functions/JSONPaths.cpp create mode 100644 src/Parsers/ASTObjectTypeArgument.cpp create mode 100644 src/Parsers/ASTObjectTypeArgument.h diff --git a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp index 90051779a26..395ebbfa408 100644 --- a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp +++ b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp @@ -52,6 +52,8 @@ void optimizeFunctionEmpty(QueryTreeNodePtr &, FunctionNode & function_node, Col /// Replace `notEmpty(argument)` with `notEquals(argument.size0, 0)` if not positive /// `argument` may be Array or Map. + std::cerr << "optimizeFunctionEmpty " << ctx.column.name << "\n"; + NameAndTypePair column{ctx.column.name + ".size0", std::make_shared()}; auto & function_arguments_nodes = function_node.getArguments().getNodes(); @@ -234,9 +236,18 @@ std::tuple getTypedNodesForOptimizati return {}; auto column_in_table = storage_snapshot->tryGetColumn(GetColumnsOptions::All, column.name); + std::cerr << "getTypedNodesForOptimization " << column.name << "\n"; if (!column_in_table || !column_in_table->type->equals(*column.type)) + { + std::cerr << "getTypedNodesForOptimization FAIL\n"; + if (column_in_table) + std::cerr << column_in_table->type->getName() << "/" << column.type->getName() << "\n"; + else + std::cerr << "null\n"; return {}; + } + std::cerr << "getTypedNodesForOptimization OK\n"; return std::make_tuple(function_node, first_argument_column_node, table_node); } @@ -422,9 +433,15 @@ public: auto table_name = table_node->getStorage()->getStorageID().getFullTableName(); Identifier qualified_name({table_name, column.name}); - if (!identifiers_to_optimize.contains(qualified_name)) - return; + std::cerr << "FunctionToSubcolumnsVisitorSecondPass " << column.name << "\n"; + if (!identifiers_to_optimize.contains(qualified_name)) + { + std::cerr << "FunctionToSubcolumnsVisitorSecondPass FAIL\n"; + return; + } + + std::cerr << "FunctionToSubcolumnsVisitorSecondPass OK\n"; auto transformer_it = node_transformers.find({column.type->getTypeId(), function_node->getFunctionName()}); if (transformer_it != node_transformers.end()) { diff --git a/src/Analyzer/Resolve/IdentifierResolver.cpp b/src/Analyzer/Resolve/IdentifierResolver.cpp index 692a31b66ba..a3c4d0aad65 100644 --- a/src/Analyzer/Resolve/IdentifierResolver.cpp +++ b/src/Analyzer/Resolve/IdentifierResolver.cpp @@ -1,6 +1,7 @@ #include -#include +#include #include +#include #include #include @@ -449,10 +450,10 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromCompoundExpression( if (auto * column = compound_expression->as()) { const DataTypePtr & column_type = column->getColumn().getTypeInStorage(); - if (column_type->getTypeId() == TypeIndex::Object) + if (column_type->getTypeId() == TypeIndex::ObjectDeprecated) { - const auto * object_type = checkAndGetDataType(column_type.get()); - if (object_type->getSchemaFormat() == "json" && object_type->hasNullableSubcolumns()) + const auto & object_type = checkAndGetDataType(*column_type); + if (object_type.getSchemaFormat() == "json" && object_type.hasNullableSubcolumns()) { QueryTreeNodePtr constant_node_null = std::make_shared(Field()); return constant_node_null; @@ -678,9 +679,33 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromStorage( bool match_full_identifier = false; const auto & identifier_full_name = identifier_without_column_qualifier.getFullName(); - auto it = table_expression_data.column_name_to_column_node.find(identifier_full_name); - bool can_resolve_directly_from_storage = it != table_expression_data.column_name_to_column_node.end(); - if (can_resolve_directly_from_storage && table_expression_data.subcolumn_names.contains(identifier_full_name)) + + ColumnNodePtr result_column_node; + bool can_resolve_directly_from_storage = false; + bool is_subcolumn = false; + if (auto it = table_expression_data.column_name_to_column_node.find(identifier_full_name); it != table_expression_data.column_name_to_column_node.end()) + { + can_resolve_directly_from_storage = true; + is_subcolumn = table_expression_data.subcolumn_names.contains(identifier_full_name); + result_column_node = it->second; + } + /// Check if it's a dynamic subcolumn + else + { + auto [column_name, dynamic_subcolumn_name] = Nested::splitName(identifier_full_name); + auto jt = table_expression_data.column_name_to_column_node.find(column_name); + if (jt != table_expression_data.column_name_to_column_node.end() && jt->second->getColumnType()->hasDynamicSubcolumns()) + { + if (auto dynamic_subcolumn_type = jt->second->getColumnType()->tryGetSubcolumnType(dynamic_subcolumn_name)) + { + result_column_node = std::make_shared(NameAndTypePair{identifier_full_name, dynamic_subcolumn_type}, jt->second->getColumnSource()); + can_resolve_directly_from_storage = true; + is_subcolumn = true; + } + } + } + + if (can_resolve_directly_from_storage && is_subcolumn) { /** In the case when we have an ARRAY JOIN, we should not resolve subcolumns directly from storage. * For example, consider the following SQL query: @@ -696,11 +721,11 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromStorage( if (can_resolve_directly_from_storage) { match_full_identifier = true; - result_expression = it->second; + result_expression = result_column_node; } else { - it = table_expression_data.column_name_to_column_node.find(identifier_without_column_qualifier.at(0)); + auto it = table_expression_data.column_name_to_column_node.find(identifier_without_column_qualifier.at(0)); if (it != table_expression_data.column_name_to_column_node.end()) result_expression = it->second; } @@ -973,7 +998,6 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromJoin(const Identifi if (!join_node_in_resolve_process && from_join_node.isUsingJoinExpression()) { auto & join_using_list = from_join_node.getJoinExpression()->as(); - for (auto & join_using_node : join_using_list.getNodes()) { auto & column_node = join_using_node->as(); diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 576c4943ccb..b8d1e44fca7 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -1,14 +1,14 @@ #include -#include -#include -#include -#include -#include #include -#include #include +#include +#include +#include #include +#include +#include +#include #include #include diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 27ad0dd583f..eaf4f85c9a7 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -22,7 +22,6 @@ namespace DB class ColumnDynamic final : public COWHelper, ColumnDynamic> { public: - /// struct Statistics { enum class Source @@ -77,7 +76,7 @@ public: return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, statistics_); } - static MutablePtr create(size_t max_dynamic_types_) + static MutablePtr create(size_t max_dynamic_types_ = ColumnVariant::MAX_NESTED_COLUMNS) { return Base::create(max_dynamic_types_); } diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 90ef974010c..3d1ab63d219 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1,1096 +1,1186 @@ -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - +#include +#include +#include +#include +#include +#include namespace DB { namespace ErrorCodes { - extern const int ARGUMENT_OUT_OF_BOUND; - extern const int DUPLICATE_COLUMN; - extern const int EXPERIMENTAL_FEATURE_ERROR; - extern const int ILLEGAL_COLUMN; - extern const int NUMBER_OF_DIMENSIONS_MISMATCHED; - extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; + extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; } namespace { -/// Recreates column with default scalar values and keeps sizes of arrays. -ColumnPtr recreateColumnWithDefaultValues( - const ColumnPtr & column, const DataTypePtr & scalar_type, size_t num_dimensions) +static const FormatSettings & getFormatSettings() { - const auto * column_array = checkAndGetColumn(column.get()); - if (column_array && num_dimensions) + static const FormatSettings settings; + return settings; +} + +static const std::shared_ptr & getDynamicSerialization() +{ + static const std::shared_ptr dynamic_serialization = std::make_shared(); + return dynamic_serialization; +} + +} + +ColumnObject::ColumnObject( + std::unordered_map typed_paths_, + std::unordered_map dynamic_paths_, + MutableColumnPtr shared_data_, + size_t max_dynamic_paths_, + size_t max_dynamic_types_, + const Statistics & statistics_) + : shared_data(std::move(shared_data_)) + , max_dynamic_paths(max_dynamic_paths_) + , max_dynamic_types(max_dynamic_types_) + , statistics(statistics_) +{ + typed_paths.reserve(typed_paths_.size()); + for (auto & [path, column] : typed_paths_) + typed_paths[path] = std::move(column); + + dynamic_paths.reserve(dynamic_paths_.size()); + for (auto & [path, column] : dynamic_paths_) + dynamic_paths[path] = std::move(column); +} + +ColumnObject::ColumnObject( + std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_) + : max_dynamic_paths(max_dynamic_paths_), max_dynamic_types(max_dynamic_types_) +{ + typed_paths.reserve(typed_paths_.size()); + for (auto & [path, column] : typed_paths_) { - return ColumnArray::create( - recreateColumnWithDefaultValues( - column_array->getDataPtr(), scalar_type, num_dimensions - 1), - IColumn::mutate(column_array->getOffsetsPtr())); + if (!column->empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected non-empty typed path column in ColumnObject constructor"); + typed_paths[path] = std::move(column); } - return createArrayOfType(scalar_type, num_dimensions)->createColumn()->cloneResized(column->size()); + MutableColumns paths_and_values; + paths_and_values.emplace_back(ColumnString::create()); + paths_and_values.emplace_back(ColumnString::create()); + shared_data = ColumnArray::create(ColumnTuple::create(std::move(paths_and_values))); } -/// Replaces NULL fields to given field or empty array. -class FieldVisitorReplaceNull : public StaticVisitor +ColumnObject::Ptr ColumnObject::create( + const std::unordered_map & typed_paths_, + const std::unordered_map & dynamic_paths_, + const ColumnPtr & shared_data_, + size_t max_dynamic_paths_, + size_t max_dynamic_types_, + const ColumnObject::Statistics & statistics_) { -public: - explicit FieldVisitorReplaceNull( - const Field & replacement_, size_t num_dimensions_) - : replacement(replacement_) - , num_dimensions(num_dimensions_) - { - } + std::unordered_map mutable_typed_paths; + mutable_typed_paths.reserve(typed_paths_.size()); + for (const auto & [path, column] : typed_paths_) + mutable_typed_paths[path] = typed_paths_.at(path)->assumeMutable(); - Field operator()(const Null &) const - { - return num_dimensions ? Array() : replacement; - } - - Field operator()(const Array & x) const - { - assert(num_dimensions > 0); - const size_t size = x.size(); - Array res(size); - for (size_t i = 0; i < size; ++i) - res[i] = applyVisitor(FieldVisitorReplaceNull(replacement, num_dimensions - 1), x[i]); - return res; - } - - template - Field operator()(const T & x) const { return x; } - -private: - const Field & replacement; - size_t num_dimensions; -}; - -/// Visitor that allows to get type of scalar field -/// or least common type of scalars in array. -/// More optimized version of FieldToDataType. -class FieldVisitorToScalarType : public StaticVisitor<> -{ -public: - using FieldType = Field::Types::Which; - - void operator()(const Array & x) - { - size_t size = x.size(); - for (size_t i = 0; i < size; ++i) - applyVisitor(*this, x[i]); - } - - void operator()(const UInt64 & x) - { - field_types.insert(FieldType::UInt64); - if (x <= std::numeric_limits::max()) - type_indexes.insert(TypeIndex::UInt8); - else if (x <= std::numeric_limits::max()) - type_indexes.insert(TypeIndex::UInt16); - else if (x <= std::numeric_limits::max()) - type_indexes.insert(TypeIndex::UInt32); - else - type_indexes.insert(TypeIndex::UInt64); - } - - void operator()(const Int64 & x) - { - field_types.insert(FieldType::Int64); - if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) - type_indexes.insert(TypeIndex::Int8); - else if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) - type_indexes.insert(TypeIndex::Int16); - else if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) - type_indexes.insert(TypeIndex::Int32); - else - type_indexes.insert(TypeIndex::Int64); - } - - void operator()(const bool &) - { - field_types.insert(FieldType::UInt64); - type_indexes.insert(TypeIndex::UInt8); - } - - void operator()(const Null &) - { - have_nulls = true; - } - - template - void operator()(const T &) - { - field_types.insert(Field::TypeToEnum>::value); - type_indexes.insert(TypeToTypeIndex>); - } - - DataTypePtr getScalarType() const { return getLeastSupertypeOrString(type_indexes); } - bool haveNulls() const { return have_nulls; } - bool needConvertField() const { return field_types.size() > 1; } - -private: - TypeIndexSet type_indexes; - std::unordered_set field_types; - bool have_nulls = false; -}; + std::unordered_map mutable_dynamic_paths; + mutable_dynamic_paths.reserve(dynamic_paths_.size()); + for (const auto & [path, column] : dynamic_paths_) + mutable_dynamic_paths[path] = dynamic_paths_.at(path)->assumeMutable(); + return ColumnObject::create(std::move(mutable_typed_paths), std::move(mutable_dynamic_paths), shared_data_->assumeMutable(), max_dynamic_paths_, max_dynamic_types_, statistics_); } -FieldInfo getFieldInfo(const Field & field) +ColumnObject::MutablePtr ColumnObject::create( + std::unordered_map typed_paths_, + std::unordered_map dynamic_paths_, + MutableColumnPtr shared_data_, + size_t max_dynamic_paths_, + size_t max_dynamic_types_, + const ColumnObject::Statistics & statistics_) { - FieldVisitorToScalarType to_scalar_type_visitor; - applyVisitor(to_scalar_type_visitor, field); - FieldVisitorToNumberOfDimensions to_number_dimension_visitor; - - return - { - to_scalar_type_visitor.getScalarType(), - to_scalar_type_visitor.haveNulls(), - to_scalar_type_visitor.needConvertField(), - applyVisitor(to_number_dimension_visitor, field), - to_number_dimension_visitor.need_fold_dimension - }; + return Base::create(std::move(typed_paths_), std::move(dynamic_paths_), std::move(shared_data_), max_dynamic_paths_, max_dynamic_types_, statistics_); } -ColumnObject::Subcolumn::Subcolumn(MutableColumnPtr && data_, bool is_nullable_) - : least_common_type(getDataTypeByColumn(*data_)) - , is_nullable(is_nullable_) - , num_rows(data_->size()) +ColumnObject::MutablePtr ColumnObject::create(std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_) { - data.push_back(std::move(data_)); + return Base::create(std::move(typed_paths_), max_dynamic_paths_, max_dynamic_types_); } -ColumnObject::Subcolumn::Subcolumn( - size_t size_, bool is_nullable_) - : least_common_type(std::make_shared()) - , is_nullable(is_nullable_) - , num_of_defaults_in_prefix(size_) - , num_rows(size_) +std::string ColumnObject::getName() const { + WriteBufferFromOwnString ss; + ss << "Object("; + ss << "max_dynamic_paths=" << max_dynamic_paths; + ss << ", max_dynamic_types=" << max_dynamic_types; + std::vector sorted_typed_paths; + sorted_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + sorted_typed_paths.push_back(path); + std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end()); + for (const auto & path : sorted_typed_paths) + ss << ", " << path << " " << typed_paths.at(path)->getName(); + ss << ")"; + return ss.str(); } -size_t ColumnObject::Subcolumn::size() const +MutableColumnPtr ColumnObject::cloneEmpty() const { - return num_rows; + std::unordered_map empty_typed_paths; + empty_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + empty_typed_paths[path] = column->cloneEmpty(); + + std::unordered_map empty_dynamic_paths; + empty_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + empty_dynamic_paths[path] = column->cloneEmpty(); + + return ColumnObject::create(std::move(empty_typed_paths), std::move(empty_dynamic_paths), shared_data->cloneEmpty(), max_dynamic_paths, max_dynamic_types, statistics); } -size_t ColumnObject::Subcolumn::byteSize() const +MutableColumnPtr ColumnObject::cloneResized(size_t size) const { - size_t res = 0; - for (const auto & part : data) - res += part->byteSize(); - return res; -} + std::unordered_map resized_typed_paths; + resized_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + resized_typed_paths[path] = column->cloneResized(size); -size_t ColumnObject::Subcolumn::allocatedBytes() const -{ - size_t res = 0; - for (const auto & part : data) - res += part->allocatedBytes(); - return res; -} + std::unordered_map resized_dynamic_paths; + resized_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + resized_dynamic_paths[path] = column->cloneResized(size); -void ColumnObject::Subcolumn::get(size_t n, Field & res) const -{ - if (isFinalized()) - { - getFinalizedColumn().get(n, res); - return; - } - - size_t ind = n; - if (ind < num_of_defaults_in_prefix) - { - res = least_common_type.get()->getDefault(); - return; - } - - ind -= num_of_defaults_in_prefix; - for (const auto & part : data) - { - if (ind < part->size()) - { - part->get(ind, res); - res = convertFieldToTypeOrThrow(res, *least_common_type.get()); - return; - } - - ind -= part->size(); - } - - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for getting field is out of range", n); -} - -void ColumnObject::Subcolumn::checkTypes() const -{ - DataTypes prefix_types; - prefix_types.reserve(data.size()); - for (size_t i = 0; i < data.size(); ++i) - { - auto current_type = getDataTypeByColumn(*data[i]); - prefix_types.push_back(current_type); - auto prefix_common_type = getLeastSupertype(prefix_types); - if (!prefix_common_type->equals(*current_type)) - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, - "Data type {} of column at position {} cannot represent all columns from i-th prefix", - current_type->getName(), i); - } -} - -void ColumnObject::Subcolumn::insert(Field field) -{ - auto info = DB::getFieldInfo(field); - insert(std::move(field), std::move(info)); -} - -void ColumnObject::Subcolumn::addNewColumnPart(DataTypePtr type) -{ - auto serialization = type->getSerialization(ISerialization::Kind::SPARSE); - data.push_back(type->createColumn(*serialization)); - least_common_type = LeastCommonType{std::move(type)}; -} - -static bool isConversionRequiredBetweenIntegers(const IDataType & lhs, const IDataType & rhs) -{ - /// If both of types are signed/unsigned integers and size of left field type - /// is less than right type, we don't need to convert field, - /// because all integer fields are stored in Int64/UInt64. - - WhichDataType which_lhs(lhs); - WhichDataType which_rhs(rhs); - - bool is_native_int = which_lhs.isNativeInt() && which_rhs.isNativeInt(); - bool is_native_uint = which_lhs.isNativeUInt() && which_rhs.isNativeUInt(); - - return (!is_native_int && !is_native_uint) - || lhs.getSizeOfValueInMemory() > rhs.getSizeOfValueInMemory(); -} - -void ColumnObject::Subcolumn::insert(Field field, FieldInfo info) -{ - auto base_type = std::move(info.scalar_type); - - if (isNothing(base_type) && info.num_dimensions == 0) - { - insertDefault(); - return; - } - - auto column_dim = least_common_type.getNumberOfDimensions(); - auto value_dim = info.num_dimensions; - - if (isNothing(least_common_type.get())) - column_dim = value_dim; - - if (isNothing(base_type)) - value_dim = column_dim; - - if (value_dim != column_dim) - throw Exception(ErrorCodes::NUMBER_OF_DIMENSIONS_MISMATCHED, - "Dimension of types mismatched between inserted value and column. " - "Dimension of value: {}. Dimension of column: {}", - value_dim, column_dim); - - if (is_nullable) - base_type = makeNullable(base_type); - - if (!is_nullable && info.have_nulls) - field = applyVisitor(FieldVisitorReplaceNull(base_type->getDefault(), value_dim), std::move(field)); - - bool type_changed = false; - const auto & least_common_base_type = least_common_type.getBase(); - - if (data.empty()) - { - addNewColumnPart(createArrayOfType(std::move(base_type), value_dim)); - } - else if (!least_common_base_type->equals(*base_type) && !isNothing(base_type)) - { - if (isConversionRequiredBetweenIntegers(*base_type, *least_common_base_type)) - { - base_type = getLeastSupertypeOrString(DataTypes{std::move(base_type), least_common_base_type}); - type_changed = true; - if (!least_common_base_type->equals(*base_type)) - addNewColumnPart(createArrayOfType(std::move(base_type), value_dim)); - } - } - - if (type_changed || info.need_convert) - field = convertFieldToTypeOrThrow(field, *least_common_type.get()); - - if (!data.back()->tryInsert(field)) - { - /** Normalization of the field above is pretty complicated (it uses several FieldVisitors), - * so in the case of a bug, we may get mismatched types. - * The `IColumn::insert` method does not check the type of the inserted field, and it can lead to a segmentation fault. - * Therefore, we use the safer `tryInsert` method to get an exception instead of a segmentation fault. - */ - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, - "Cannot insert field {} to column {}", - field.dump(), data.back()->dumpStructure()); - } - - ++num_rows; -} - -void ColumnObject::Subcolumn::insertRangeFrom(const Subcolumn & src, size_t start, size_t length) -{ - assert(start + length <= src.size()); - size_t end = start + length; - num_rows += length; - - if (data.empty()) - { - addNewColumnPart(src.getLeastCommonType()); - } - else if (!least_common_type.get()->equals(*src.getLeastCommonType())) - { - auto new_least_common_type = getLeastSupertypeOrString(DataTypes{least_common_type.get(), src.getLeastCommonType()}); - if (!new_least_common_type->equals(*least_common_type.get())) - addNewColumnPart(std::move(new_least_common_type)); - } - - if (end <= src.num_of_defaults_in_prefix) - { - data.back()->insertManyDefaults(length); - return; - } - - if (start < src.num_of_defaults_in_prefix) - data.back()->insertManyDefaults(src.num_of_defaults_in_prefix - start); - - auto insert_from_part = [&](const auto & column, size_t from, size_t n) - { - assert(from + n <= column->size()); - auto column_type = getDataTypeByColumn(*column); - - if (column_type->equals(*least_common_type.get())) - { - data.back()->insertRangeFrom(*column, from, n); - return; - } - - /// If we need to insert large range, there is no sense to cut part of column and cast it. - /// Casting of all column and inserting from it can be faster. - /// Threshold is just a guess. - - if (n * 3 >= column->size()) - { - auto casted_column = castColumn({column, column_type, ""}, least_common_type.get()); - data.back()->insertRangeFrom(*casted_column, from, n); - return; - } - - auto casted_column = column->cut(from, n); - casted_column = castColumn({casted_column, column_type, ""}, least_common_type.get()); - data.back()->insertRangeFrom(*casted_column, 0, n); - }; - - size_t pos = 0; - size_t processed_rows = src.num_of_defaults_in_prefix; - - /// Find the first part of the column that intersects the range. - while (pos < src.data.size() && processed_rows + src.data[pos]->size() < start) - { - processed_rows += src.data[pos]->size(); - ++pos; - } - - /// Insert from the first part of column. - if (pos < src.data.size() && processed_rows < start) - { - size_t part_start = start - processed_rows; - size_t part_length = std::min(src.data[pos]->size() - part_start, end - start); - insert_from_part(src.data[pos], part_start, part_length); - processed_rows += src.data[pos]->size(); - ++pos; - } - - /// Insert from the parts of column in the middle of range. - while (pos < src.data.size() && processed_rows + src.data[pos]->size() < end) - { - insert_from_part(src.data[pos], 0, src.data[pos]->size()); - processed_rows += src.data[pos]->size(); - ++pos; - } - - /// Insert from the last part of column if needed. - if (pos < src.data.size() && processed_rows < end) - { - size_t part_end = end - processed_rows; - insert_from_part(src.data[pos], 0, part_end); - } -} - -bool ColumnObject::Subcolumn::isFinalized() const -{ - return num_of_defaults_in_prefix == 0 && - (data.empty() || (data.size() == 1 && !data[0]->isSparse())); -} - -void ColumnObject::Subcolumn::finalize() -{ - if (isFinalized()) - return; - - if (data.size() == 1 && num_of_defaults_in_prefix == 0) - { - data[0] = data[0]->convertToFullColumnIfSparse(); - return; - } - - const auto & to_type = least_common_type.get(); - auto result_column = to_type->createColumn(); - - if (num_of_defaults_in_prefix) - result_column->insertManyDefaults(num_of_defaults_in_prefix); - - for (auto & part : data) - { - part = part->convertToFullColumnIfSparse(); - auto from_type = getDataTypeByColumn(*part); - size_t part_size = part->size(); - - if (!from_type->equals(*to_type)) - { - auto offsets = ColumnUInt64::create(); - auto & offsets_data = offsets->getData(); - - /// We need to convert only non-default values and then recreate column - /// with default value of new type, because default values (which represents misses in data) - /// may be inconsistent between types (e.g "0" in UInt64 and empty string in String). - - part->getIndicesOfNonDefaultRows(offsets_data, 0, part_size); - - if (offsets->size() == part_size) - { - part = castColumn({part, from_type, ""}, to_type); - } - else - { - auto values = part->index(*offsets, offsets->size()); - values = castColumn({values, from_type, ""}, to_type); - part = values->createWithOffsets(offsets_data, *createColumnConstWithDefaultValue(result_column->getPtr()), part_size, /*shift=*/ 0); - } - } - - result_column->insertRangeFrom(*part, 0, part_size); - } - - data = { std::move(result_column) }; - num_of_defaults_in_prefix = 0; -} - -void ColumnObject::Subcolumn::insertDefault() -{ - if (data.empty()) - ++num_of_defaults_in_prefix; - else - data.back()->insertDefault(); - - ++num_rows; -} - -void ColumnObject::Subcolumn::insertManyDefaults(size_t length) -{ - if (data.empty()) - num_of_defaults_in_prefix += length; - else - data.back()->insertManyDefaults(length); - - num_rows += length; -} - -void ColumnObject::Subcolumn::popBack(size_t n) -{ - assert(n <= size()); - - num_rows -= n; - size_t num_removed = 0; - for (auto it = data.rbegin(); it != data.rend(); ++it) - { - if (n == 0) - break; - - auto & column = *it; - if (n < column->size()) - { - column->popBack(n); - n = 0; - } - else - { - ++num_removed; - n -= column->size(); - } - } - - data.resize(data.size() - num_removed); - num_of_defaults_in_prefix -= n; -} - -ColumnObject::Subcolumn ColumnObject::Subcolumn::cut(size_t start, size_t length) const -{ - Subcolumn new_subcolumn(0, is_nullable); - new_subcolumn.insertRangeFrom(*this, start, length); - return new_subcolumn; -} - -Field ColumnObject::Subcolumn::getLastField() const -{ - if (data.empty()) - return Field(); - - const auto & last_part = data.back(); - assert(!last_part->empty()); - return (*last_part)[last_part->size() - 1]; -} - -FieldInfo ColumnObject::Subcolumn::getFieldInfo() const -{ - const auto & base_type = least_common_type.getBase(); - return FieldInfo - { - .scalar_type = base_type, - .have_nulls = base_type->isNullable(), - .need_convert = false, - .num_dimensions = least_common_type.getNumberOfDimensions(), - .need_fold_dimension = false, - }; -} - -ColumnObject::Subcolumn ColumnObject::Subcolumn::recreateWithDefaultValues(const FieldInfo & field_info) const -{ - auto scalar_type = field_info.scalar_type; - if (is_nullable) - scalar_type = makeNullable(scalar_type); - - Subcolumn new_subcolumn(*this); - new_subcolumn.least_common_type = LeastCommonType{createArrayOfType(scalar_type, field_info.num_dimensions)}; - - for (auto & part : new_subcolumn.data) - part = recreateColumnWithDefaultValues(part, scalar_type, field_info.num_dimensions); - - return new_subcolumn; -} - -IColumn & ColumnObject::Subcolumn::getFinalizedColumn() -{ - assert(isFinalized()); - return *data[0]; -} - -const IColumn & ColumnObject::Subcolumn::getFinalizedColumn() const -{ - assert(isFinalized()); - return *data[0]; -} - -const ColumnPtr & ColumnObject::Subcolumn::getFinalizedColumnPtr() const -{ - assert(isFinalized()); - return data[0]; -} - -ColumnObject::Subcolumn::LeastCommonType::LeastCommonType() - : type(std::make_shared()) - , base_type(type) - , num_dimensions(0) -{ -} - -ColumnObject::Subcolumn::LeastCommonType::LeastCommonType(DataTypePtr type_) - : type(std::move(type_)) - , base_type(getBaseTypeOfArray(type)) - , num_dimensions(DB::getNumberOfDimensions(*type)) -{ -} - -ColumnObject::ColumnObject(bool is_nullable_) - : is_nullable(is_nullable_) - , num_rows(0) -{ -} - -ColumnObject::ColumnObject(Subcolumns && subcolumns_, bool is_nullable_) - : is_nullable(is_nullable_) - , subcolumns(std::move(subcolumns_)) - , num_rows(subcolumns.empty() ? 0 : (*subcolumns.begin())->data.size()) - -{ - checkConsistency(); -} - -void ColumnObject::checkConsistency() const -{ - if (subcolumns.empty()) - return; - - for (const auto & leaf : subcolumns) - { - if (num_rows != leaf->data.size()) - { - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Sizes of subcolumns are inconsistent in ColumnObject." - " Subcolumn '{}' has {} rows, but expected size is {}", - leaf->path.getPath(), leaf->data.size(), num_rows); - } - } -} - -size_t ColumnObject::size() const -{ -#ifndef NDEBUG - checkConsistency(); -#endif - return num_rows; -} - -size_t ColumnObject::byteSize() const -{ - size_t res = 0; - for (const auto & entry : subcolumns) - res += entry->data.byteSize(); - return res; -} - -size_t ColumnObject::allocatedBytes() const -{ - size_t res = 0; - for (const auto & entry : subcolumns) - res += entry->data.allocatedBytes(); - return res; -} - -void ColumnObject::forEachSubcolumn(MutableColumnCallback callback) -{ - for (auto & entry : subcolumns) - for (auto & part : entry->data.data) - callback(part); -} - -void ColumnObject::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) -{ - for (auto & entry : subcolumns) - { - for (auto & part : entry->data.data) - { - callback(*part); - part->forEachSubcolumnRecursively(callback); - } - } -} - -void ColumnObject::insert(const Field & field) -{ - const auto & object = field.get(); - - HashSet inserted_paths; - size_t old_size = size(); - for (const auto & [key_str, value] : object) - { - PathInData key(key_str); - inserted_paths.insert(key_str); - if (!hasSubcolumn(key)) - addSubcolumn(key, old_size); - - auto & subcolumn = getSubcolumn(key); - subcolumn.insert(value); - } - - for (auto & entry : subcolumns) - { - if (!inserted_paths.has(entry->path.getPath())) - { - bool inserted = tryInsertDefaultFromNested(entry); - if (!inserted) - entry->data.insertDefault(); - } - } - - ++num_rows; -} - -bool ColumnObject::tryInsert(const Field & field) -{ - if (field.getType() != Field::Types::Which::Object) - return false; - - insert(field); - return true; -} - -void ColumnObject::insertDefault() -{ - for (auto & entry : subcolumns) - entry->data.insertDefault(); - - ++num_rows; + return ColumnObject::create(std::move(resized_typed_paths), std::move(resized_dynamic_paths), shared_data->cloneResized(size), max_dynamic_paths, max_dynamic_types, statistics); } Field ColumnObject::operator[](size_t n) const { - Field object; - get(n, object); + Object object; + + for (const auto & [path, column] : typed_paths) + object[path] = (*column)[n]; + for (const auto & [path, column] : dynamic_paths) + { + /// Output only non-null values from dynamic paths. We cannot distinguish cases when + /// dynamic path has Null value and when it's absent in the row and consider them equivalent. + if (!column->isNullAt(n)) + object[path] = (*column)[n]; + } + + const auto & shared_data_offsets = getSharedDataOffsets(); + const auto [shared_paths, shared_values] = getSharedDataPathsAndValues(); + size_t start = shared_data_offsets[ssize_t(n) - 1]; + size_t end = shared_data_offsets[n]; + for (size_t i = start; i != end; ++i) + { + String path = shared_paths->getDataAt(i).toString(); + auto value_data = shared_values->getDataAt(i); + ReadBufferFromMemory buf(value_data.data, value_data.size); + Field value; + getDynamicSerialization()->deserializeBinary(value, buf, getFormatSettings()); + object[path] = value; + } + return object; } void ColumnObject::get(size_t n, Field & res) const { - assert(n < size()); - res = Object(); - auto & object = res.get(); + res = (*this)[n]; +} - for (const auto & entry : subcolumns) +bool ColumnObject::isDefaultAt(size_t n) const +{ + for (const auto & [path, column] : typed_paths) { - auto it = object.try_emplace(entry->path.getPath()).first; - entry->data.get(n, it->second); + if (!column->isDefaultAt(n)) + return false; } + + for (const auto & [path, column] : dynamic_paths) + { + if (!column->isDefaultAt(n)) + return false; + } + + if (!shared_data->isDefaultAt(n)) + return false; + + return true; +} + +StringRef ColumnObject::getDataAt(size_t) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getDataAt is not supported for {}", getName()); +} + +void ColumnObject::insertData(const char *, size_t) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertData is not supported for {}", getName()); +} + +IColumn * ColumnObject::tryToAddNewDynamicPath(const String & path) +{ + if (dynamic_paths.size() == max_dynamic_paths) + return nullptr; + + auto new_dynamic_column = ColumnDynamic::create(max_dynamic_types); + new_dynamic_column->insertManyDefaults(size()); + auto it = dynamic_paths.emplace(path, std::move(new_dynamic_column)).first; + return it->second.get(); +} + +void ColumnObject::setDynamicPaths(const std::vector & paths) +{ + if (paths.size() > max_dynamic_paths) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot set dynamic paths to Object column, the number of paths ({}) exceeds the limit ({})", paths.size(), max_dynamic_paths); + + size_t size = this->size(); + for (const auto & path : paths) + { + auto new_dynamic_column = ColumnDynamic::create(max_dynamic_types); + if (size) + new_dynamic_column->insertManyDefaults(size); + dynamic_paths[path] = std::move(new_dynamic_column); + } +} + +void ColumnObject::insert(const Field & x) +{ + const auto & object = x.get(); + auto & shared_data_offsets = getSharedDataOffsets(); + auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + size_t current_size = size(); + for (const auto & [path, value_field] : object) + { + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + typed_it->second->insert(value_field); + } + else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + { + dynamic_it->second->insert(value_field); + } + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + { + dynamic_path_column->insert(value_field); + } + /// We reached the limit on dynamic paths. Add this path to the common data if the value is not Null. + /// (we cannot distinguish cases when path has Null value or is absent in the row and consider them equivalent). + /// Object is actually std::map, so all paths are already sorted and we can add it right now. + else if (!value_field.isNull()) + { + shared_data_paths->insertData(path.data(), path.size()); + auto & shared_data_values_chars = shared_data_values->getChars(); + WriteBufferFromVector value_buf(shared_data_values_chars, AppendModeTag()); + getDynamicSerialization()->serializeBinary(value_field, value_buf, getFormatSettings()); + value_buf.finalize(); + shared_data_values_chars.push_back(0); + shared_data_values->getOffsets().push_back(shared_data_values_chars.size()); + } + } + + shared_data_offsets.push_back(shared_data_paths->size()); + + /// Fill all remaining typed and dynamic paths with default values. + for (auto & [_, column] : typed_paths) + { + if (column->size() == current_size) + column->insertDefault(); + } + + for (auto & [_, column] : dynamic_paths) + { + if (column->size() == current_size) + column->insertDefault(); + } +} + +bool ColumnObject::tryInsert(const Field & x) +{ + if (x.getType() != Field::Types::Which::Object) + return false; + + const auto & object = x.get(); + auto & shared_data_offsets = getSharedDataOffsets(); + auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + size_t prev_size = size(); + size_t prev_paths_size = shared_data_paths->size(); + size_t prev_values_size = shared_data_values->size(); + auto restore_sizes = [&]() + { + for (auto & [_, column] : typed_paths) + { + if (column->size() != prev_size) + column->popBack(column->size() - prev_size); + } + + for (auto & [_, column] : dynamic_paths) + { + if (column->size() != prev_size) + column->popBack(column->size() - prev_size); + } + + if (shared_data_paths->size() != prev_paths_size) + shared_data_paths->popBack(shared_data_paths->size() - prev_paths_size); + if (shared_data_values->size() != prev_values_size) + shared_data_values->popBack(shared_data_values->size() - prev_values_size); + }; + + for (const auto & [path, value_field] : object) + { + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + if (!typed_it->second->tryInsert(value_field)) + { + restore_sizes(); + return false; + } + } + else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + { + if (!dynamic_it->second->tryInsert(value_field)) + { + restore_sizes(); + return false; + } + } + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + { + if (!dynamic_path_column->tryInsert(value_field)) + { + restore_sizes(); + return false; + } + } + /// We reached the limit on dynamic paths. Add this path to the common data if the value is not Null. + /// (we cannot distinguish cases when path has Null value or is absent in the row and consider them equivalent). + /// Object is actually std::map, so all paths are already sorted and we can add it right now. + else if (!value_field.isNull()) + { + WriteBufferFromOwnString value_buf; + getDynamicSerialization()->serializeBinary(value_field, value_buf, getFormatSettings()); + shared_data_paths->insertData(path.data(), path.size()); + shared_data_values->insertData(value_buf.str().data(), value_buf.str().size()); + } + } + + shared_data_offsets.push_back(shared_data_paths->size()); + + /// Fill all remaining typed and dynamic paths with default values. + for (auto & [_, column] : typed_paths) + { + if (column->size() == prev_size) + column->insertDefault(); + } + + for (auto & [_, column] : dynamic_paths) + { + if (column->size() == prev_size) + column->insertDefault(); + } + + return true; } void ColumnObject::insertFrom(const IColumn & src, size_t n) { - insert(src[n]); + const auto & src_object_column = assert_cast(src); + + /// First, insert typed paths, they must be the same for both columns. + for (auto & [path, column] : src_object_column.typed_paths) + typed_paths[path]->insertFrom(*column, n); + + /// Second, insert dynamic paths and extend them if needed. + /// We can reach the limit of dynamic paths, and in this case + /// the rest of dynamic paths will be inserted into shared data. + std::vector src_dynamic_paths_for_shared_data; + for (const auto & [path, column] : src_object_column.dynamic_paths) + { + /// Check if we already have such dynamic path. + if (auto it = dynamic_paths.find(path); it != dynamic_paths.end()) + it->second->insertFrom(*column, n); + /// Try to add a new dynamic path. + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + dynamic_path_column->insertFrom(*column, n); + /// Limit on dynamic paths is reached, add path to shared data later. + else + src_dynamic_paths_for_shared_data.push_back(path); + } + + /// Finally, insert paths from shared data. + insertFromSharedDataAndFillRemainingDynamicPaths(src_object_column, src_dynamic_paths_for_shared_data, n, 1); } void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t length) { - const auto & src_object = assert_cast(src); + const auto & src_object_column = assert_cast(src); - for (const auto & entry : src_object.subcolumns) + /// First, insert typed paths, they must be the same for both columns. + for (auto & [path, column] : src_object_column.typed_paths) + typed_paths[path]->insertRangeFrom(*column, start, length); + + /// Second, insert dynamic paths and extend them if needed. + /// We can reach the limit of dynamic paths, and in this case + /// the rest of dynamic paths will be inserted into shared data. + std::vector src_dynamic_paths_for_shared_data; + for (const auto & [path, column] : src_object_column.dynamic_paths) { - if (!hasSubcolumn(entry->path)) + /// Check if we already have such dynamic path. + if (auto it = dynamic_paths.find(path); it != dynamic_paths.end()) + it->second->insertRangeFrom(*column, start, length); + /// Try to add a new dynamic path. + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + dynamic_path_column->insertRangeFrom(*column, start, length); + /// Limit on dynamic paths is reached, add path to shared data later. + else + src_dynamic_paths_for_shared_data.push_back(path); + } + + /// Finally, insert paths from shared data. + insertFromSharedDataAndFillRemainingDynamicPaths(src_object_column, src_dynamic_paths_for_shared_data, start, length); +} + +void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::ColumnObject & src_object_column, std::vector & src_dynamic_paths_for_shared_data, size_t start, size_t length) +{ + /// Paths in shared data are sorted, so paths from src_dynamic_paths_for_shared_data should be inserted properly + /// to keep paths sorted. Let's sort them in advance. + std::sort(src_dynamic_paths_for_shared_data.begin(), src_dynamic_paths_for_shared_data.end()); + + /// Check if src object doesn't have any paths in shared data in specified range. + const auto & src_shared_data_offsets = src_object_column.getSharedDataOffsets(); + if (src_shared_data_offsets[ssize_t(start) - 1] == src_shared_data_offsets[ssize_t(start) + length - 1]) + { + size_t current_size = size(); + + /// If no src dynamic columns should be inserted into shared data, insert defaults. + if (src_dynamic_paths_for_shared_data.empty()) { - if (entry->path.hasNested()) - addNestedSubcolumn(entry->path, entry->data.getFieldInfo(), num_rows); + shared_data->insertManyDefaults(length); + } + /// Otherwise insert required src dynamic columns into shared data. + else + { + const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + auto & shared_data_offsets = getSharedDataOffsets(); + for (size_t i = start; i != start + length; ++i) + { + /// Paths in src_dynamic_paths_for_shared_data are already sorted. + for (const auto & path : src_dynamic_paths_for_shared_data) + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, path, *src_object_column.dynamic_paths.at(path), i); + shared_data_offsets.push_back(shared_data_paths->size()); + } + } + + /// Insert default values in all remaining dynamic paths. + for (auto & [_, column] : dynamic_paths) + { + if (column->size() == current_size) + column->insertManyDefaults(length); + } + return; + } + + /// Src object column contains some paths in shared data in specified range. + /// Iterate over this range and insert all required paths into shared data or dynamic paths. + const auto [src_shared_data_paths, src_shared_data_values] = src_object_column.getSharedDataPathsAndValues(); + const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + auto & shared_data_offsets = getSharedDataOffsets(); + for (size_t row = start; row != start + length; ++row) + { + size_t current_size = shared_data_offsets.size(); + /// Use separate index to iterate over sorted src_dynamic_paths_for_shared_data. + size_t src_dynamic_paths_for_shared_data_index = 0; + size_t offset = src_shared_data_offsets[ssize_t(row) - 1]; + size_t end = src_shared_data_offsets[row]; + for (size_t i = offset; i != end; ++i) + { + auto path = src_shared_data_paths->getDataAt(i); + /// Check if we have this path in dynamic paths. + if (auto it = dynamic_paths.find(path.toString()); it != dynamic_paths.end()) + { + /// Deserialize binary value into dynamic column from shared data. + deserializeValueFromSharedData(src_shared_data_values, i, *it->second); + } else - addSubcolumn(entry->path, num_rows); + { + /// Before inserting this path into shared data check if we need to + /// insert dynamic paths from src_dynamic_paths_for_shared_data before. + while (src_dynamic_paths_for_shared_data_index < src_dynamic_paths_for_shared_data.size() + && src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index] < path) + { + auto dynamic_path = src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index]; + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, dynamic_path, *src_object_column.dynamic_paths.at(dynamic_path), row); + ++src_dynamic_paths_for_shared_data_index; + } + + /// Insert path and value from src shared data to our shared data. + shared_data_paths->insertFrom(*src_shared_data_paths, i); + shared_data_values->insertFrom(*src_shared_data_values, i); + } } - auto & subcolumn = getSubcolumn(entry->path); - subcolumn.insertRangeFrom(entry->data, start, length); - } - - for (auto & entry : subcolumns) - { - if (!src_object.hasSubcolumn(entry->path)) + /// Insert remaining dynamic paths from src_dynamic_paths_for_shared_data. + for (; src_dynamic_paths_for_shared_data_index != src_dynamic_paths_for_shared_data.size(); ++src_dynamic_paths_for_shared_data_index) { - bool inserted = tryInsertManyDefaultsFromNested(entry); - if (!inserted) - entry->data.insertManyDefaults(length); + auto dynamic_path = src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index]; + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, dynamic_path, *src_object_column.dynamic_paths.at(dynamic_path), row); + } + + shared_data_offsets.push_back(shared_data_paths->size()); + + /// Insert default value in all remaining dynamic paths. + for (auto & [_, column] : dynamic_paths) + { + if (column->size() == current_size) + column->insertDefault(); } } - - num_rows += length; - finalize(); } -void ColumnObject::popBack(size_t length) +void ColumnObject::serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const String & path, const IColumn & column, size_t n) { - for (auto & entry : subcolumns) - entry->data.popBack(length); + /// Don't store Null values in shared data. We consider Null value equivalent to the absense + /// of this path in the row because we cannot distinguish these 2 cases for dynamic paths. + if (column.isNullAt(n)) + return; - num_rows -= length; + shared_data_paths->insertData(path.data(), path.size()); + auto & shared_data_values_chars = shared_data_values->getChars(); + WriteBufferFromVector value_buf(shared_data_values_chars, AppendModeTag()); + getDynamicSerialization()->serializeBinary(column, n, value_buf, getFormatSettings()); + value_buf.finalize(); + shared_data_values_chars.push_back(0); + shared_data_values->getOffsets().push_back(shared_data_values_chars.size()); } -template -MutableColumnPtr ColumnObject::applyForSubcolumns(Func && func) const +void ColumnObject::deserializeValueFromSharedData(const ColumnString * shared_data_values, size_t n, IColumn & column) const { - if (!isFinalized()) + auto value_data = shared_data_values->getDataAt(n); + ReadBufferFromMemory buf(value_data.data, value_data.size); + getDynamicSerialization()->deserializeBinary(column, buf, getFormatSettings()); +} + +void ColumnObject::insertDefault() +{ + for (auto & [_, column] : typed_paths) + column->insertDefault(); + for (auto & [_, column] : dynamic_paths) + column->insertDefault(); + shared_data->insertDefault(); +} + +void ColumnObject::insertManyDefaults(size_t length) +{ + for (auto & [_, column] : typed_paths) + column->insertManyDefaults(length); + for (auto & [_, column] : dynamic_paths) + column->insertManyDefaults(length); + shared_data->insertManyDefaults(length); +} + +void ColumnObject::popBack(size_t n) +{ + for (auto & [_, column] : typed_paths) + column->popBack(n); + for (auto & [_, column] : dynamic_paths) + column->popBack(n); + shared_data->popBack(n); +} + +StringRef ColumnObject::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin) const +{ + StringRef res(begin, 0); + // Serialize all paths and values in binary format. + const auto & shared_data_offsets = getSharedDataOffsets(); + size_t offset = shared_data_offsets[ssize_t(n) - 1]; + size_t end = shared_data_offsets[ssize_t(n)]; + size_t num_paths = typed_paths.size() + dynamic_paths.size() + (end - offset); + char * pos = arena.allocContinue(sizeof(size_t), begin); + memcpy(pos, &num_paths, sizeof(size_t)); + res.data = pos - res.size; + res.size += sizeof(size_t); + /// Serialize paths and values from typed paths. + for (const auto & [path, column] : typed_paths) { - auto finalized = cloneFinalized(); - auto & finalized_object = assert_cast(*finalized); - return finalized_object.applyForSubcolumns(std::forward(func)); + size_t path_size = path.size(); + pos = arena.allocContinue(sizeof(size_t) + path_size, begin); + memcpy(pos, &path_size, sizeof(size_t)); + memcpy(pos + sizeof(size_t), path.data(), path_size); + auto data_ref = column->serializeValueIntoArena(n, arena, begin); + res.data = data_ref.data - res.size - sizeof(size_t) - path_size; + res.size += data_ref.size + sizeof(size_t) + path_size; } - auto res = ColumnObject::create(is_nullable); - for (const auto & subcolumn : subcolumns) + /// Serialize paths and values from dynamic paths. + for (const auto & [path, column] : dynamic_paths) { - auto new_subcolumn = func(subcolumn->data.getFinalizedColumn()); - res->addSubcolumn(subcolumn->path, new_subcolumn->assumeMutable()); + WriteBufferFromOwnString buf; + getDynamicSerialization()->serializeBinary(*column, n, buf, getFormatSettings()); + serializePathAndValueIntoArena(arena, begin, path, buf.str(), res); } + /// Serialize paths and values from shared data. + auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + for (size_t i = offset; i != end; ++i) + serializePathAndValueIntoArena(arena, begin, shared_data_paths->getDataAt(i), shared_data_values->getDataAt(i), res); + return res; } -ColumnPtr ColumnObject::permute(const Permutation & perm, size_t limit) const +void ColumnObject::serializePathAndValueIntoArena(DB::Arena & arena, const char *& begin, StringRef path, StringRef value, StringRef & res) const { - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.permute(perm, limit); }); + size_t value_size = value.size; + size_t path_size = path.size; + char * pos = arena.allocContinue(sizeof(size_t) + path_size + sizeof(size_t) + value_size, begin); + memcpy(pos, &path_size, sizeof(size_t)); + memcpy(pos + sizeof(size_t), path.data, path_size); + memcpy(pos + sizeof(size_t) + path_size, &value_size, sizeof(size_t)); + memcpy(pos + sizeof(size_t) + path_size + sizeof(size_t), value.data, value_size); + res.data = pos - res.size; + res.size += sizeof(size_t) + path_size + sizeof(size_t) + value_size; } -ColumnPtr ColumnObject::filter(const Filter & filter, ssize_t result_size_hint) const +const char * ColumnObject::deserializeAndInsertFromArena(const char * pos) { - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.filter(filter, result_size_hint); }); + /// Deserialize paths and values and insert them into typed paths, dynamic paths or shared data. + /// Serialized paths could be unsorted, so we will have to sort all paths that will be inserted into shared data. + std::vector> paths_and_values_for_shared_data; + auto num_paths = unalignedLoad(pos); + pos += sizeof(size_t); + for (size_t i = 0; i != num_paths; ++i) + { + auto path_size = unalignedLoad(pos); + pos += sizeof(size_t); + StringRef path(pos, path_size); + String path_str = path.toString(); + pos += path_size; + /// Check if it's a typed path. In this case we should use + /// deserializeAndInsertFromArena of corresponding column. + if (auto typed_it = typed_paths.find(path_str); typed_it != typed_paths.end()) + { + pos = typed_it->second->deserializeAndInsertFromArena(pos); + } + /// If it's not a typed path, deserialize binary value and try to insert it + /// to dynamic paths or shared data. + else + { + + auto value_size = unalignedLoad(pos); + pos += sizeof(size_t); + StringRef value(pos, value_size); + pos += value_size; + /// Check if we have this path in dynamic paths. + if (auto dynamic_it = dynamic_paths.find(path_str); dynamic_it != dynamic_paths.end()) + { + ReadBufferFromMemory buf(value.data, value.size); + getDynamicSerialization()->deserializeBinary(*dynamic_it->second, buf, getFormatSettings()); + } + /// Try to add a new dynamic path. + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path_str)) + { + ReadBufferFromMemory buf(value.data, value.size); + getDynamicSerialization()->deserializeBinary(*dynamic_path_column, buf, getFormatSettings()); + } + /// Limit on dynamic paths is reached, add this path to shared data later. + else + { + paths_and_values_for_shared_data.emplace_back(path, value); + } + } + } + + /// Sort and insert all paths from paths_and_values_for_shared_data into shared data. + std::sort(paths_and_values_for_shared_data.begin(), paths_and_values_for_shared_data.end()); + const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + for (const auto & [path, value] : paths_and_values_for_shared_data) + { + shared_data_paths->insertData(path.data, path.size); + shared_data_values->insertData(value.data, value.size); + } + + getSharedDataOffsets().push_back(shared_data_paths->size()); + return pos; +} + +const char * ColumnObject::skipSerializedInArena(const char * pos) const +{ + auto num_paths = unalignedLoad(pos); + pos += sizeof(size_t); + for (size_t i = 0; i != num_paths; ++i) + { + auto path_size = unalignedLoad(pos); + pos += sizeof(size_t); + String path(pos, path_size); + pos += path_size; + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + pos = typed_it->second->skipSerializedInArena(pos); + } + else + { + auto value_size = unalignedLoad(pos); + pos += sizeof(size_t) + value_size; + } + } + + return pos; +} + +void ColumnObject::updateHashWithValue(size_t n, SipHash & hash) const +{ + for (const auto & [_, column] : typed_paths) + column->updateHashWithValue(n, hash); + for (const auto & [_, column] : dynamic_paths) + column->updateHashWithValue(n, hash); + shared_data->updateHashWithValue(n, hash); +} + +void ColumnObject::updateWeakHash32(WeakHash32 & hash) const +{ + for (const auto & [_, column] : typed_paths) + column->updateWeakHash32(hash); + for (const auto & [_, column] : dynamic_paths) + column->updateWeakHash32(hash); + shared_data->updateWeakHash32(hash); +} + +void ColumnObject::updateHashFast(SipHash & hash) const +{ + for (const auto & [_, column] : typed_paths) + column->updateHashFast(hash); + for (const auto & [_, column] : dynamic_paths) + column->updateHashFast(hash); + shared_data->updateHashFast(hash); +} + +ColumnPtr ColumnObject::filter(const Filter & filt, ssize_t result_size_hint) const +{ + std::unordered_map filtered_typed_paths; + filtered_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + filtered_typed_paths[path] = column->filter(filt, result_size_hint); + + std::unordered_map filtered_dynamic_paths; + filtered_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + filtered_dynamic_paths[path] = column->filter(filt, result_size_hint); + + auto filtered_shared_data = shared_data->filter(filt, result_size_hint); + return ColumnObject::create(filtered_typed_paths, filtered_dynamic_paths, filtered_shared_data, max_dynamic_paths, max_dynamic_types); +} + +void ColumnObject::expand(const Filter & mask, bool inverted) +{ + for (auto & [_, column] : typed_paths) + column->expand(mask, inverted); + for (auto & [_, column] : dynamic_paths) + column->expand(mask, inverted); + shared_data->expand(mask, inverted); +} + +ColumnPtr ColumnObject::permute(const Permutation & perm, size_t limit) const +{ + std::unordered_map permuted_typed_paths; + permuted_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + permuted_typed_paths[path] = column->permute(perm, limit); + + std::unordered_map permuted_dynamic_paths; + permuted_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + permuted_dynamic_paths[path] = column->permute(perm, limit); + + auto permuted_shared_data = shared_data->permute(perm, limit); + return ColumnObject::create(permuted_typed_paths, permuted_dynamic_paths, permuted_shared_data, max_dynamic_paths, max_dynamic_types); } ColumnPtr ColumnObject::index(const IColumn & indexes, size_t limit) const { - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.index(indexes, limit); }); + std::unordered_map indexed_typed_paths; + indexed_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + indexed_typed_paths[path] = column->index(indexes, limit); + + std::unordered_map indexed_dynamic_paths; + indexed_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + indexed_dynamic_paths[path] = column->index(indexes, limit); + + auto indexed_shared_data = shared_data->index(indexes, limit); + return ColumnObject::create(indexed_typed_paths, indexed_dynamic_paths, indexed_shared_data, max_dynamic_paths, max_dynamic_types); } -ColumnPtr ColumnObject::replicate(const Offsets & offsets) const +ColumnPtr ColumnObject::replicate(const Offsets & replicate_offsets) const { - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.replicate(offsets); }); + std::unordered_map replicated_typed_paths; + replicated_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + replicated_typed_paths[path] = column->replicate(replicate_offsets); + + std::unordered_map replicated_dynamic_paths; + replicated_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + replicated_dynamic_paths[path] = column->replicate(replicate_offsets); + + auto replicated_shared_data = shared_data->replicate(replicate_offsets); + return ColumnObject::create(replicated_typed_paths, replicated_dynamic_paths, replicated_shared_data, max_dynamic_paths, max_dynamic_types); } -MutableColumnPtr ColumnObject::cloneResized(size_t new_size) const +MutableColumns ColumnObject::scatter(ColumnIndex num_columns, const Selector & selector) const { - if (new_size == 0) - return ColumnObject::create(is_nullable); + std::vector> scattered_typed_paths(num_columns); + for (auto & typed_paths_ : scattered_typed_paths) + typed_paths_.reserve(typed_paths.size()); - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.cloneResized(new_size); }); + for (const auto & [path, column] : typed_paths) + { + auto scattered_columns = column->scatter(num_columns, selector); + for (size_t i = 0; i != num_columns; ++i) + scattered_typed_paths[i][path] = std::move(scattered_columns[i]); + } + + std::vector> scattered_dynamic_paths(num_columns); + for (auto & dynamic_paths_ : scattered_dynamic_paths) + dynamic_paths_.reserve(dynamic_paths.size()); + + for (const auto & [path, column] : dynamic_paths) + { + auto scattered_columns = column->scatter(num_columns, selector); + for (size_t i = 0; i != num_columns; ++i) + scattered_dynamic_paths[i][path] = std::move(scattered_columns[i]); + } + + auto scattered_shared_data_columns = shared_data->scatter(num_columns, selector); + MutableColumns result_columns; + result_columns.reserve(num_columns); + for (size_t i = 0; i != num_columns; ++i) + result_columns.emplace_back(ColumnObject::create(std::move(scattered_typed_paths[i]), std::move(scattered_dynamic_paths[i]), std::move(scattered_shared_data_columns[i]), max_dynamic_paths, max_dynamic_types)); + return result_columns; } void ColumnObject::getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const { - res.resize(num_rows); + /// Values in ColumnObject are not comparable. + res.resize(size()); iota(res.data(), res.size(), size_t(0)); } -void ColumnObject::getExtremes(Field & min, Field & max) const +void ColumnObject::reserve(size_t n) { - if (num_rows == 0) + for (auto & [_, column] : typed_paths) + column->reserve(n); + for (auto & [_, column] : dynamic_paths) + column->reserve(n); + shared_data->reserve(n); +} + +void ColumnObject::ensureOwnership() +{ + for (auto & [_, column] : typed_paths) + column->ensureOwnership(); + for (auto & [_, column] : dynamic_paths) + column->ensureOwnership(); + shared_data->ensureOwnership(); +} + +size_t ColumnObject::byteSize() const +{ + size_t size = 0; + for (auto & [_, column] : typed_paths) + size += column->byteSize(); + for (auto & [_, column] : dynamic_paths) + size += column->byteSize(); + size += shared_data->byteSize(); + return size; +} + +size_t ColumnObject::byteSizeAt(size_t n) const +{ + size_t size = 0; + for (auto & [_, column] : typed_paths) + size += column->byteSizeAt(n); + for (auto & [_, column] : dynamic_paths) + size += column->byteSizeAt(n); + size += shared_data->byteSizeAt(n); + return size; +} + +size_t ColumnObject::allocatedBytes() const +{ + size_t size = 0; + for (auto & [_, column] : typed_paths) + size += column->allocatedBytes(); + for (auto & [_, column] : dynamic_paths) + size += column->allocatedBytes(); + size += shared_data->allocatedBytes(); + return size; +} + +void ColumnObject::protect() +{ + for (auto & [_, column] : typed_paths) + column->protect(); + for (auto & [_, column] : dynamic_paths) + column->protect(); + shared_data->protect(); +} + +void ColumnObject::forEachSubcolumn(DB::IColumn::MutableColumnCallback callback) +{ + for (auto & [_, column] : typed_paths) + callback(column); + for (auto & [_, column] : dynamic_paths) + callback(column); + callback(shared_data); +} + +void ColumnObject::forEachSubcolumnRecursively(DB::IColumn::RecursiveMutableColumnCallback callback) +{ + for (auto & [_, column] : typed_paths) { - min = Object(); - max = Object(); + callback(*column); + column->forEachSubcolumnRecursively(callback); } - else + for (auto & [_, column] : dynamic_paths) { - get(0, min); - get(0, max); + callback(*column); + column->forEachSubcolumnRecursively(callback); } + callback(*shared_data); + shared_data->forEachSubcolumnRecursively(callback); } -const ColumnObject::Subcolumn & ColumnObject::getSubcolumn(const PathInData & key) const +bool ColumnObject::structureEquals(const IColumn & rhs) const { - if (const auto * node = subcolumns.findLeaf(key)) - return node->data; - - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in ColumnObject", key.getPath()); -} - -ColumnObject::Subcolumn & ColumnObject::getSubcolumn(const PathInData & key) -{ - if (const auto * node = subcolumns.findLeaf(key)) - return const_cast(node)->data; - - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in ColumnObject", key.getPath()); -} - -bool ColumnObject::hasSubcolumn(const PathInData & key) const -{ - return subcolumns.findLeaf(key) != nullptr; -} - -void ColumnObject::addSubcolumn(const PathInData & key, MutableColumnPtr && subcolumn) -{ - size_t new_size = subcolumn->size(); - bool inserted = subcolumns.add(key, Subcolumn(std::move(subcolumn), is_nullable)); - - if (!inserted) - throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); - - if (num_rows == 0) - num_rows = new_size; - else if (new_size != num_rows) - throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, - "Size of subcolumn {} ({}) is inconsistent with column size ({})", - key.getPath(), new_size, num_rows); -} - -void ColumnObject::addSubcolumn(const PathInData & key, size_t new_size) -{ - bool inserted = subcolumns.add(key, Subcolumn(new_size, is_nullable)); - if (!inserted) - throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); - - if (num_rows == 0) - num_rows = new_size; - else if (new_size != num_rows) - throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, - "Required size of subcolumn {} ({}) is inconsistent with column size ({})", - key.getPath(), new_size, num_rows); -} - -void ColumnObject::addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size) -{ - if (!key.hasNested()) - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, - "Cannot add Nested subcolumn, because path doesn't contain Nested"); - - bool inserted = false; - /// We find node that represents the same Nested type as @key. - const auto * nested_node = subcolumns.findBestMatch(key); - - if (nested_node) - { - /// Find any leaf of Nested subcolumn. - const auto * leaf = Subcolumns::findLeaf(nested_node, [&](const auto &) { return true; }); - assert(leaf); - - /// Recreate subcolumn with default values and the same sizes of arrays. - auto new_subcolumn = leaf->data.recreateWithDefaultValues(field_info); - - /// It's possible that we have already inserted value from current row - /// to this subcolumn. So, adjust size to expected. - if (new_subcolumn.size() > new_size) - new_subcolumn.popBack(new_subcolumn.size() - new_size); - - assert(new_subcolumn.size() == new_size); - inserted = subcolumns.add(key, new_subcolumn); - } - else - { - /// If node was not found just add subcolumn with empty arrays. - inserted = subcolumns.add(key, Subcolumn(new_size, is_nullable)); - } - - if (!inserted) - throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); - - if (num_rows == 0) - num_rows = new_size; - else if (new_size != num_rows) - throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, - "Required size of subcolumn {} ({}) is inconsistent with column size ({})", - key.getPath(), new_size, num_rows); -} - -const ColumnObject::Subcolumns::Node * ColumnObject::getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const -{ - if (!entry->path.hasNested()) - return nullptr; - - size_t old_size = entry->data.size(); - const auto * current_node = subcolumns.findLeaf(entry->path); - const Subcolumns::Node * leaf = nullptr; - - while (current_node) - { - /// Try to find the first Nested up to the current node. - const auto * node_nested = Subcolumns::findParent(current_node, - [](const auto & candidate) { return candidate.isNested(); }); - - if (!node_nested) - break; - - /// Find the leaf with subcolumn that contains values - /// for the last rows. - /// If there are no leaves, skip current node and find - /// the next node up to the current. - leaf = Subcolumns::findLeaf(node_nested, - [&](const auto & candidate) - { - return candidate.data.size() > old_size; - }); - - if (leaf) - break; - - current_node = node_nested->parent; - } - - if (leaf && isNothing(leaf->data.getLeastCommonTypeBase())) - return nullptr; - - return leaf; -} - -bool ColumnObject::tryInsertManyDefaultsFromNested(const Subcolumns::NodePtr & entry) const -{ - const auto * leaf = getLeafOfTheSameNested(entry); - if (!leaf) + /// 2 Object columns have equal structure if they have the same typed paths and max_dynamic_paths/max_dynamic_types. + const auto * rhs_object = typeid_cast(&rhs); + if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() || max_dynamic_paths != rhs_object->max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types) return false; - size_t old_size = entry->data.size(); - auto field_info = entry->data.getFieldInfo(); + for (const auto & [path, column] : typed_paths) + { + auto it = rhs_object->typed_paths.find(path); + if (it == rhs_object->typed_paths.end() || !it->second->structureEquals(*column)) + return false; + } - /// Cut the needed range from the found leaf - /// and replace scalar values to the correct - /// default values for given entry. - auto new_subcolumn = leaf->data - .cut(old_size, leaf->data.size() - old_size) - .recreateWithDefaultValues(field_info); - - entry->data.insertRangeFrom(new_subcolumn, 0, new_subcolumn.size()); return true; } -bool ColumnObject::tryInsertDefaultFromNested(const Subcolumns::NodePtr & entry) const +ColumnPtr ColumnObject::compress() const { - const auto * leaf = getLeafOfTheSameNested(entry); - if (!leaf) - return false; + std::unordered_map compressed_typed_paths; + compressed_typed_paths.reserve(typed_paths.size()); + size_t byte_size = 0; + for (const auto & [path, column] : typed_paths) + { + auto compressed_column = column->compress(); + byte_size += compressed_column->byteSize(); + compressed_typed_paths[path] = std::move(compressed_column); + } - auto last_field = leaf->data.getLastField(); - if (last_field.isNull()) - return false; + std::unordered_map compressed_dynamic_paths; + compressed_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + { + auto compressed_column = column->compress(); + byte_size += compressed_column->byteSize(); + compressed_dynamic_paths[path] = std::move(compressed_column); + } - size_t leaf_num_dimensions = leaf->data.getNumberOfDimensions(); - size_t entry_num_dimensions = entry->data.getNumberOfDimensions(); + auto compressed_shared_data = shared_data->compress(); + byte_size += compressed_shared_data->byteSize(); - auto default_scalar = entry_num_dimensions > leaf_num_dimensions - ? createEmptyArrayField(entry_num_dimensions - leaf_num_dimensions) - : entry->data.getLeastCommonTypeBase()->getDefault(); + auto decompress = + [my_compressed_typed_paths = std::move(compressed_typed_paths), + my_compressed_dynamic_paths = std::move(compressed_dynamic_paths), + my_compressed_shared_data = std::move(compressed_shared_data), + my_max_dynamic_paths = max_dynamic_paths, + my_max_dynamic_types = max_dynamic_types, + my_statistics = statistics]() mutable + { + std::unordered_map decompressed_typed_paths; + decompressed_typed_paths.reserve(my_compressed_typed_paths.size()); + for (const auto & [path, column] : my_compressed_typed_paths) + decompressed_typed_paths[path] = column->decompress(); - auto default_field = applyVisitor(FieldVisitorReplaceScalars(default_scalar, leaf_num_dimensions), last_field); - entry->data.insert(std::move(default_field)); - return true; -} + std::unordered_map decompressed_dynamic_paths; + decompressed_dynamic_paths.reserve(my_compressed_dynamic_paths.size()); + for (const auto & [path, column] : my_compressed_dynamic_paths) + decompressed_dynamic_paths[path] = column->decompress(); -PathsInData ColumnObject::getKeys() const -{ - PathsInData keys; - keys.reserve(subcolumns.size()); - for (const auto & entry : subcolumns) - keys.emplace_back(entry->path); - return keys; -} + auto decompressed_shared_data = my_compressed_shared_data->decompress(); + return ColumnObject::create(decompressed_typed_paths, decompressed_dynamic_paths, decompressed_shared_data, my_max_dynamic_paths, my_max_dynamic_types, my_statistics); + }; -bool ColumnObject::isFinalized() const -{ - return std::all_of(subcolumns.begin(), subcolumns.end(), - [](const auto & entry) { return entry->data.isFinalized(); }); + return ColumnCompressed::create(size(), byte_size, decompress); } void ColumnObject::finalize() { - size_t old_size = size(); - Subcolumns new_subcolumns; - for (auto && entry : subcolumns) + for (auto & [_, column] : typed_paths) + column->finalize(); + for (auto & [_, column] : dynamic_paths) + column->finalize(); + shared_data->finalize(); +} + +bool ColumnObject::isFinalized() const +{ + bool finalized = true; + for (auto & [_, column] : typed_paths) + finalized &= column->isFinalized(); + for (auto & [_, column] : dynamic_paths) + finalized &= column->isFinalized(); + finalized &= shared_data->isFinalized(); + return finalized; +} + +void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & source_columns) +{ + if (!empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "takeDynamicStructureFromSourceColumns should be called only on empty Object column"); + + /// During serialization of Object column in MergeTree all Object columns + /// in single part must have the same structure (the same dynamic paths). During merge + /// resulting column is constructed by inserting from source columns, + /// but it may happen that resulting column doesn't have rows from all source parts + /// but only from subset of them, and as a result some dynamic paths could be missing + /// and structures of resulting column may differ. + /// To solve this problem, before merge we create empty resulting column and use this method + /// to take dynamic structure from all source columns even if we won't insert + /// rows from some of them. + + /// We want to construct resulting set of dynamic paths with paths that have least number of null values in source columns + /// and insert the rest paths into shared data if we exceed the limit of dynamic paths. + /// First, collect all dynamic paths from all source columns and calculate total number of non-null values. + std::unordered_map path_to_total_number_of_non_null_values; + for (const auto & source_column : source_columns) { - const auto & least_common_type = entry->data.getLeastCommonType(); - - /// Do not add subcolumns, which consist only from NULLs. - if (isNothing(getBaseTypeOfArray(least_common_type))) - continue; - - entry->data.finalize(); - new_subcolumns.add(entry->path, entry->data); + const auto & source_object = assert_cast(*source_column); + /// During deserialization from MergeTree we will have statistics from the whole + /// data part with number of non null values for each dynamic path. + const auto & source_statistics = source_object.getStatistics(); + for (const auto & [path, column] : source_object.dynamic_paths) + { + auto it = path_to_total_number_of_non_null_values.find(path); + if (it == path_to_total_number_of_non_null_values.end()) + it = path_to_total_number_of_non_null_values.emplace(path, 0).first; + auto statistics_it = source_statistics.data.find(path); + size_t size = statistics_it == source_statistics.data.end() ? (column->size() - column->getNumberOfDefaultRows()) : statistics_it->second; + it->second += size; + } } - /// If all subcolumns were skipped add a dummy subcolumn, - /// because Tuple type must have at least one element. - if (new_subcolumns.empty()) - new_subcolumns.add(PathInData{COLUMN_NAME_DUMMY}, Subcolumn{ColumnUInt8::create(old_size, 0), is_nullable}); + dynamic_paths.clear(); - std::swap(subcolumns, new_subcolumns); - checkObjectHasNoAmbiguosPaths(getKeys()); + /// Check if the number of all dynamic paths exceeds the limit. + if (path_to_total_number_of_non_null_values.size() > max_dynamic_paths) + { + /// Sort paths by total_number_of_non_null_values. + std::vector> paths_with_sizes; + paths_with_sizes.reserve(path_to_total_number_of_non_null_values.size()); + for (const auto & [path, size] : path_to_total_number_of_non_null_values) + paths_with_sizes.emplace_back(size, path); + std::sort(paths_with_sizes.begin(), paths_with_sizes.end(), std::greater()); + + /// Fill dynamic_paths with first max_dynamic_paths paths in sorted list. + for (size_t i = 0; i != max_dynamic_paths; ++i) + dynamic_paths[paths_with_sizes[i].second] = ColumnDynamic::create(max_dynamic_types); + } + /// Use all dynamic paths from all source columns. + else + { + for (const auto & [path, _] : path_to_total_number_of_non_null_values) + dynamic_paths[path] = ColumnDynamic::create(max_dynamic_types); + } + + /// Fill statistics for the merged part. + statistics.data.clear(); + statistics.source = Statistics::Source::MERGE; + for (const auto & [path, _] : dynamic_paths) + statistics.data[path] = path_to_total_number_of_non_null_values[path]; + + /// Now we have the resulting set of dynamic paths that will be used in all merged columns. + /// As we use Dynamic column for dynamic paths, we should call takeDynamicStructureFromSourceColumns + /// on all resulting dynamic columns. + for (auto & [path, column] : dynamic_paths) + { + Columns dynamic_path_source_columns; + for (const auto & source_column : source_columns) + { + const auto & source_object = assert_cast(*source_column); + auto it = source_object.dynamic_paths.find(path); + if (it != source_object.dynamic_paths.end()) + dynamic_path_source_columns.push_back(it->second); + } + column->takeDynamicStructureFromSourceColumns(dynamic_path_source_columns); + } +} + +size_t ColumnObject::findPathLowerBoundInSharedData(StringRef path, const ColumnString & shared_data_paths, size_t start, size_t end) +{ + /// Simple random access iterator over values in ColumnString in specified range. + class Iterator : public std::iterator + { + public: + using difference_type = size_t; + Iterator() = delete; + Iterator(const ColumnString * data_, size_t index_) : data(data_), index(index_) {} + Iterator(const Iterator & rhs) : data(rhs.data), index(rhs.index) {} + Iterator & operator=(const Iterator & rhs) { data = rhs.data; index = rhs.index; return *this; } + inline Iterator& operator+=(difference_type rhs) { index += rhs; return *this;} + inline StringRef operator*() const {return data->getDataAt(index);} + + inline Iterator& operator++() { ++index; return *this; } + inline difference_type operator-(const Iterator & rhs) const {return index - rhs.index; } + + const ColumnString * data; + size_t index; + }; + + Iterator start_it(&shared_data_paths, start); + Iterator end_it(&shared_data_paths, end); + auto it = std::lower_bound(start_it, end_it, path); + return it.index; +} + +void ColumnObject::fillPathColumnFromSharedData(IColumn & path_column, StringRef path, const ColumnPtr & shared_data_column, size_t start, size_t end) +{ + const auto & shared_data_array = assert_cast(*shared_data_column); + const auto & shared_data_offsets = shared_data_array.getOffsets(); + size_t first_offset = shared_data_offsets[ssize_t(start) - 1]; + size_t last_offset = shared_data_offsets[ssize_t(end) - 1]; + /// Check if we have at least one row with data. + if (first_offset == last_offset) + { + path_column.insertManyDefaults(end - start); + return; + } + + const auto & shared_data_tuple = assert_cast(shared_data_array.getData()); + const auto & shared_data_paths = assert_cast(shared_data_tuple.getColumn(0)); + const auto & shared_data_values = assert_cast(shared_data_tuple.getColumn(1)); + const auto & dynamic_serialization = getDynamicSerialization(); + for (size_t i = start; i != end; ++i) + { + size_t paths_start = shared_data_offsets[ssize_t(i) - 1]; + size_t paths_end = shared_data_offsets[ssize_t(i)]; + auto lower_bound_path_index = ColumnObject::findPathLowerBoundInSharedData(path, shared_data_paths, paths_start, paths_end); + if (lower_bound_path_index != paths_end && shared_data_paths.getDataAt(lower_bound_path_index) == path) + { + auto value_data = shared_data_values.getDataAt(lower_bound_path_index); + ReadBufferFromMemory buf(value_data.data, value_data.size); + dynamic_serialization->deserializeBinary(path_column, buf, getFormatSettings()); + } + else + { + path_column.insertDefault(); + } + } } } diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index e2936b27994..901af6a353d 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -1,265 +1,206 @@ #pragma once #include -#include -#include -#include -#include - +#include +#include +#include +#include #include +#include +#include namespace DB { -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - -/// Info that represents a scalar or array field in a decomposed view. -/// It allows to recreate field with different number -/// of dimensions or nullability. -struct FieldInfo -{ - /// The common type of of all scalars in field. - DataTypePtr scalar_type; - - /// Do we have NULL scalar in field. - bool have_nulls; - - /// If true then we have scalars with different types in array and - /// we need to convert scalars to the common type. - bool need_convert; - - /// Number of dimension in array. 0 if field is scalar. - size_t num_dimensions; - - /// If true then this field is an array of variadic dimension field - /// and we need to normalize the dimension - bool need_fold_dimension; -}; - -FieldInfo getFieldInfo(const Field & field); - -/** A column that represents object with dynamic set of subcolumns. - * Subcolumns are identified by paths in document and are stored in - * a trie-like structure. ColumnObject is not suitable for writing into tables - * and it should be converted to Tuple with fixed set of subcolumns before that. - */ class ColumnObject final : public COWHelper, ColumnObject> { public: - /** Class that represents one subcolumn. - * It stores values in several parts of column - * and keeps current common type of all parts. - * We add a new column part with a new type, when we insert a field, - * which can't be converted to the current common type. - * After insertion of all values subcolumn should be finalized - * for writing and other operations. - */ - class Subcolumn + struct Statistics { - public: - Subcolumn() = default; - Subcolumn(size_t size_, bool is_nullable_); - Subcolumn(MutableColumnPtr && data_, bool is_nullable_); - - size_t size() const; - size_t byteSize() const; - size_t allocatedBytes() const; - void get(size_t n, Field & res) const; - - bool isFinalized() const; - const DataTypePtr & getLeastCommonType() const { return least_common_type.get(); } - const DataTypePtr & getLeastCommonTypeBase() const { return least_common_type.getBase(); } - size_t getNumberOfDimensions() const { return least_common_type.getNumberOfDimensions(); } - - /// Checks the consistency of column's parts stored in @data. - void checkTypes() const; - - /// Inserts a field, which scalars can be arbitrary, but number of - /// dimensions should be consistent with current common type. - void insert(Field field); - void insert(Field field, FieldInfo info); - - void insertDefault(); - void insertManyDefaults(size_t length); - void insertRangeFrom(const Subcolumn & src, size_t start, size_t length); - void popBack(size_t n); - - Subcolumn cut(size_t start, size_t length) const; - - /// Converts all column's parts to the common type and - /// creates a single column that stores all values. - void finalize(); - - /// Returns last inserted field. - Field getLastField() const; - - FieldInfo getFieldInfo() const; - - /// Recreates subcolumn with default scalar values and keeps sizes of arrays. - /// Used to create columns of type Nested with consistent array sizes. - Subcolumn recreateWithDefaultValues(const FieldInfo & field_info) const; - - /// Returns single column if subcolumn in finalizes. - /// Otherwise -- undefined behaviour. - IColumn & getFinalizedColumn(); - const IColumn & getFinalizedColumn() const; - const ColumnPtr & getFinalizedColumnPtr() const; - - const std::vector & getData() const { return data; } - size_t getNumberOfDefaultsInPrefix() const { return num_of_defaults_in_prefix; } - - friend class ColumnObject; - - private: - class LeastCommonType + enum class Source { - public: - LeastCommonType(); - explicit LeastCommonType(DataTypePtr type_); - - const DataTypePtr & get() const { return type; } - const DataTypePtr & getBase() const { return base_type; } - size_t getNumberOfDimensions() const { return num_dimensions; } - - private: - DataTypePtr type; - DataTypePtr base_type; - size_t num_dimensions = 0; + READ, /// Statistics were loaded into column during reading from MergeTree. + MERGE, /// Statistics were calculated during merge of several MergeTree parts. }; - void addNewColumnPart(DataTypePtr type); - - /// Current least common type of all values inserted to this subcolumn. - LeastCommonType least_common_type; - - /// If true then common type type of subcolumn is Nullable - /// and default values are NULLs. - bool is_nullable = false; - - /// Parts of column. Parts should be in increasing order in terms of subtypes/supertypes. - /// That means that the least common type for i-th prefix is the type of i-th part - /// and it's the supertype for all type of column from 0 to i-1. - std::vector data; - - /// Until we insert any non-default field we don't know further - /// least common type and we count number of defaults in prefix, - /// which will be converted to the default type of final common type. - size_t num_of_defaults_in_prefix = 0; - - size_t num_rows = 0; + /// Source of the statistics. + Source source; + /// Statistics data: (path) -> (total number of not-null values). + std::unordered_map data; }; - using Subcolumns = SubcolumnsTree; - private: - /// If true then all subcolumns are nullable. - const bool is_nullable; + friend class COWHelper, ColumnObject>; - Subcolumns subcolumns; - size_t num_rows; + ColumnObject(std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_); + ColumnObject(std::unordered_map typed_paths_, std::unordered_map dynamic_paths_, MutableColumnPtr shared_data_, size_t max_dynamic_paths_, size_t max_dynamic_types_, const Statistics & statistics_ = {}); public: - static constexpr auto COLUMN_NAME_DUMMY = "_dummy"; + /** Create immutable column using immutable arguments. This arguments may be shared with other columns. + * Use mutate in order to make mutable column and mutate shared nested columns. + */ + using Base = COWHelper, ColumnObject>; + static Ptr create(const std::unordered_map & typed_paths_, const std::unordered_map & dynamic_paths_, const ColumnPtr & shared_data_, size_t max_dynamic_paths_, size_t max_dynamic_types_, const Statistics & statistics_ = {}); + static MutablePtr create(std::unordered_map typed_paths_, std::unordered_map dynamic_paths_, MutableColumnPtr shared_data_, size_t max_dynamic_paths_, size_t max_dynamic_types_, const Statistics & statistics_ = {}); + static MutablePtr create(std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_); - explicit ColumnObject(bool is_nullable_); - ColumnObject(Subcolumns && subcolumns_, bool is_nullable_); + std::string getName() const override; - /// Checks that all subcolumns have consistent sizes. - void checkConsistency() const; + const char * getFamilyName() const override + { + return "Object"; + } - bool hasSubcolumn(const PathInData & key) const; + TypeIndex getDataType() const override + { + return TypeIndex::Object; + } - const Subcolumn & getSubcolumn(const PathInData & key) const; - Subcolumn & getSubcolumn(const PathInData & key); + MutableColumnPtr cloneEmpty() const override; + MutableColumnPtr cloneResized(size_t size) const override; - void incrementNumRows() { ++num_rows; } + size_t size() const override + { + return shared_data->size(); + } - /// Adds a subcolumn from existing IColumn. - void addSubcolumn(const PathInData & key, MutableColumnPtr && subcolumn); - - /// Adds a subcolumn of specific size with default values. - void addSubcolumn(const PathInData & key, size_t new_size); - - /// Adds a subcolumn of type Nested of specific size with default values. - /// It cares about consistency of sizes of Nested arrays. - void addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size); - - /// Finds a subcolumn from the same Nested type as @entry and inserts - /// an array with default values with consistent sizes as in Nested type. - bool tryInsertDefaultFromNested(const Subcolumns::NodePtr & entry) const; - bool tryInsertManyDefaultsFromNested(const Subcolumns::NodePtr & entry) const; - - const Subcolumns & getSubcolumns() const { return subcolumns; } - Subcolumns & getSubcolumns() { return subcolumns; } - PathsInData getKeys() const; - - /// Part of interface - - const char * getFamilyName() const override { return "Object"; } - TypeIndex getDataType() const override { return TypeIndex::Object; } - - size_t size() const override; - size_t byteSize() const override; - size_t allocatedBytes() const override; - void forEachSubcolumn(MutableColumnCallback callback) override; - void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override; - void insert(const Field & field) override; - bool tryInsert(const Field & field) override; - void insertDefault() override; - void insertFrom(const IColumn & src, size_t n) override; - void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; - void popBack(size_t length) override; Field operator[](size_t n) const override; void get(size_t n, Field & res) const override; - ColumnPtr permute(const Permutation & perm, size_t limit) const override; - ColumnPtr filter(const Filter & filter, ssize_t result_size_hint) const override; - ColumnPtr index(const IColumn & indexes, size_t limit) const override; - ColumnPtr replicate(const Offsets & offsets) const override; - MutableColumnPtr cloneResized(size_t new_size) const override; + bool isDefaultAt(size_t n) const override; + StringRef getDataAt(size_t n) const override; + void insertData(const char * pos, size_t length) override; + + void insert(const Field & x) override; + bool tryInsert(const Field & x) override; + void insertFrom(const IColumn & src_, size_t n) override; + void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; + /// TODO: implement more optimal insertManyFrom + void insertDefault() override; + void insertManyDefaults(size_t length) override; + + void popBack(size_t n) override; + + StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override; + const char * deserializeAndInsertFromArena(const char * pos) override; + const char * skipSerializedInArena(const char * pos) const override; + + void updateHashWithValue(size_t n, SipHash & hash) const override; + void updateWeakHash32(WeakHash32 & hash) const override; + void updateHashFast(SipHash & hash) const override; + + ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override; + void expand(const Filter & mask, bool inverted) override; + ColumnPtr permute(const Permutation & perm, size_t limit) const override; + ColumnPtr index(const IColumn & indexes, size_t limit) const override; + ColumnPtr replicate(const Offsets & replicate_offsets) const override; + MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override; + + /// Values of ColumnObject are not comparable. + int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; } + void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &) const override; + void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {} + void getExtremes(Field & min, Field & max) const override + { + min = Object(); + max = Object(); + } + + void reserve(size_t n) override; + void ensureOwnership() override; + size_t byteSize() const override; + size_t byteSizeAt(size_t n) const override; + size_t allocatedBytes() const override; + void protect() override; + + void forEachSubcolumn(MutableColumnCallback callback) override; + + void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override; + + bool structureEquals(const IColumn & rhs) const override; + + ColumnPtr compress() const override; - /// Finalizes all subcolumns. void finalize() override; bool isFinalized() const override; - /// Order of rows in ColumnObject is undefined. - void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override; - void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {} - int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; } - void getExtremes(Field & min, Field & max) const override; + bool hasDynamicStructure() const override { return true; } + void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; - /// All other methods throw exception. + const std::unordered_map & getTypedPaths() const { return typed_paths; } + std::unordered_map & getTypedPaths() { return typed_paths; } - StringRef getDataAt(size_t) const override { throwMustBeConcrete(); } - bool isDefaultAt(size_t) const override { throwMustBeConcrete(); } - void insertData(const char *, size_t) override { throwMustBeConcrete(); } - StringRef serializeValueIntoArena(size_t, Arena &, char const *&) const override { throwMustBeConcrete(); } - char * serializeValueIntoMemory(size_t, char *) const override { throwMustBeConcrete(); } - const char * deserializeAndInsertFromArena(const char *) override { throwMustBeConcrete(); } - const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); } - void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); } - void updateWeakHash32(WeakHash32 &) const override { throwMustBeConcrete(); } - void updateHashFast(SipHash &) const override { throwMustBeConcrete(); } - void expand(const Filter &, bool) override { throwMustBeConcrete(); } - bool hasEqualValues() const override { throwMustBeConcrete(); } - size_t byteSizeAt(size_t) const override { throwMustBeConcrete(); } - double getRatioOfDefaultRows(double) const override { throwMustBeConcrete(); } - UInt64 getNumberOfDefaultRows() const override { throwMustBeConcrete(); } - void getIndicesOfNonDefaultRows(Offsets &, size_t, size_t) const override { throwMustBeConcrete(); } + const std::unordered_map & getDynamicPaths() const { return dynamic_paths; } + std::unordered_map & getDynamicPaths() { return dynamic_paths; } -private: - [[noreturn]] static void throwMustBeConcrete() + const Statistics & getStatistics() const { return statistics; } + + const ColumnPtr & getSharedDataPtr() const { return shared_data; } + ColumnPtr & getSharedDataPtr() { return shared_data; } + IColumn & getSharedDataColumn() { return *shared_data; } + + const ColumnArray & getSharedDataNestedColumn() const { return assert_cast(*shared_data); } + ColumnArray & getSharedDataNestedColumn() { return assert_cast(*shared_data); } + + ColumnArray::Offsets & getSharedDataOffsets() { return assert_cast(*shared_data).getOffsets(); } + const ColumnArray::Offsets & getSharedDataOffsets() const { return assert_cast(*shared_data).getOffsets(); } + + std::pair getSharedDataPathsAndValues() { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ColumnObject must be converted to ColumnTuple before use"); + auto & column_array = assert_cast(*shared_data); + auto & column_tuple = assert_cast(column_array.getData()); + return {assert_cast(&column_tuple.getColumn(0)), assert_cast(&column_tuple.getColumn(1))}; } - template - MutableColumnPtr applyForSubcolumns(Func && func) const; + std::pair getSharedDataPathsAndValues() const + { + const auto & column_array = assert_cast(*shared_data); + const auto & column_tuple = assert_cast(column_array.getData()); + return {assert_cast(&column_tuple.getColumn(0)), assert_cast(&column_tuple.getColumn(1))}; + } - /// It's used to get shared sized of Nested to insert correct default values. - const Subcolumns::Node * getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const; + size_t getMaxDynamicTypes() const { return max_dynamic_types; } + size_t getMaxDynamicPaths() const { return max_dynamic_paths; } + + /// Try to add new dynamic path. Returns pointer to the new dynamic + /// path column or nullptr if limit on dynamic paths is reached. + IColumn * tryToAddNewDynamicPath(const String & path); + + void setDynamicPaths(const std::vector & paths); + void setStatistics(const Statistics & statistics_) { statistics = statistics_; } + + void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const String & path, const IColumn & column, size_t n); + void deserializeValueFromSharedData(const ColumnString * shared_data_values, size_t n, IColumn & column) const; + + /// Paths in shared data are sorted in each row. Use this method to find the lower bound for specific path in the row. + static size_t findPathLowerBoundInSharedData(StringRef path, const ColumnString & shared_data_paths, size_t start, size_t end); + /// Insert all the data from shared data with specified path to dynamic column. + static void fillPathColumnFromSharedData(IColumn & path_column, StringRef path, const ColumnPtr & shared_data_column, size_t start, size_t end); + +private: + void insertFromSharedDataAndFillRemainingDynamicPaths(const ColumnObject & src_object_column, std::vector & dynamic_paths_to_shared_data, size_t start, size_t length); + void serializePathAndValueIntoArena(Arena & arena, const char *& begin, StringRef path, StringRef value, StringRef & res) const; + + /// Map path -> column for paths with explicitly specified types. + /// This set of paths is constant and cannot be changed. + std::unordered_map typed_paths; + /// Map path -> column for dynamically added paths. All columns + /// here are Dynamic columns. This set of paths can be extended + /// during inerts into the column. + std::unordered_map dynamic_paths; + /// Shared storage for all other paths and values. It's filled + /// when the number of dynamic paths reaches the limit. + /// It has type Array(Tuple(String, String)) and stores + /// an array of pairs (path, binary serialized dynamic value) for each row. + WrappedPtr shared_data; + + /// Maximum number of dynamic paths. If this limit is reached, all new paths will be inserted into shared data. + size_t max_dynamic_paths; + /// Maximum number of dynamic types for each dynamic path. Used while creating Dynamic columns for new dynamic paths. + size_t max_dynamic_types; + /// Statistics on the number of non-null values for each dynamic path in the MergeTree data part. + /// Calculated during merges or reading from MergeTree. Used to determine the set of dynamic paths for the merged part. + Statistics statistics; }; + } diff --git a/src/Columns/ColumnObjectDeprecated.cpp b/src/Columns/ColumnObjectDeprecated.cpp new file mode 100644 index 00000000000..d56f2a10309 --- /dev/null +++ b/src/Columns/ColumnObjectDeprecated.cpp @@ -0,0 +1,1096 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ARGUMENT_OUT_OF_BOUND; + extern const int DUPLICATE_COLUMN; + extern const int EXPERIMENTAL_FEATURE_ERROR; + extern const int ILLEGAL_COLUMN; + extern const int NUMBER_OF_DIMENSIONS_MISMATCHED; + extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; +} + +namespace +{ + +/// Recreates column with default scalar values and keeps sizes of arrays. +ColumnPtr recreateColumnWithDefaultValues( + const ColumnPtr & column, const DataTypePtr & scalar_type, size_t num_dimensions) +{ + const auto * column_array = checkAndGetColumn(column.get()); + if (column_array && num_dimensions) + { + return ColumnArray::create( + recreateColumnWithDefaultValues( + column_array->getDataPtr(), scalar_type, num_dimensions - 1), + IColumn::mutate(column_array->getOffsetsPtr())); + } + + return createArrayOfType(scalar_type, num_dimensions)->createColumn()->cloneResized(column->size()); +} + +/// Replaces NULL fields to given field or empty array. +class FieldVisitorReplaceNull : public StaticVisitor +{ +public: + explicit FieldVisitorReplaceNull( + const Field & replacement_, size_t num_dimensions_) + : replacement(replacement_) + , num_dimensions(num_dimensions_) + { + } + + Field operator()(const Null &) const + { + return num_dimensions ? Array() : replacement; + } + + Field operator()(const Array & x) const + { + assert(num_dimensions > 0); + const size_t size = x.size(); + Array res(size); + for (size_t i = 0; i < size; ++i) + res[i] = applyVisitor(FieldVisitorReplaceNull(replacement, num_dimensions - 1), x[i]); + return res; + } + + template + Field operator()(const T & x) const { return x; } + +private: + const Field & replacement; + size_t num_dimensions; +}; + +/// Visitor that allows to get type of scalar field +/// or least common type of scalars in array. +/// More optimized version of FieldToDataType. +class FieldVisitorToScalarType : public StaticVisitor<> +{ +public: + using FieldType = Field::Types::Which; + + void operator()(const Array & x) + { + size_t size = x.size(); + for (size_t i = 0; i < size; ++i) + applyVisitor(*this, x[i]); + } + + void operator()(const UInt64 & x) + { + field_types.insert(FieldType::UInt64); + if (x <= std::numeric_limits::max()) + type_indexes.insert(TypeIndex::UInt8); + else if (x <= std::numeric_limits::max()) + type_indexes.insert(TypeIndex::UInt16); + else if (x <= std::numeric_limits::max()) + type_indexes.insert(TypeIndex::UInt32); + else + type_indexes.insert(TypeIndex::UInt64); + } + + void operator()(const Int64 & x) + { + field_types.insert(FieldType::Int64); + if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) + type_indexes.insert(TypeIndex::Int8); + else if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) + type_indexes.insert(TypeIndex::Int16); + else if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) + type_indexes.insert(TypeIndex::Int32); + else + type_indexes.insert(TypeIndex::Int64); + } + + void operator()(const bool &) + { + field_types.insert(FieldType::UInt64); + type_indexes.insert(TypeIndex::UInt8); + } + + void operator()(const Null &) + { + have_nulls = true; + } + + template + void operator()(const T &) + { + field_types.insert(Field::TypeToEnum>::value); + type_indexes.insert(TypeToTypeIndex>); + } + + DataTypePtr getScalarType() const { return getLeastSupertypeOrString(type_indexes); } + bool haveNulls() const { return have_nulls; } + bool needConvertField() const { return field_types.size() > 1; } + +private: + TypeIndexSet type_indexes; + std::unordered_set field_types; + bool have_nulls = false; +}; + +} + +FieldInfo getFieldInfo(const Field & field) +{ + FieldVisitorToScalarType to_scalar_type_visitor; + applyVisitor(to_scalar_type_visitor, field); + FieldVisitorToNumberOfDimensions to_number_dimension_visitor; + + return + { + to_scalar_type_visitor.getScalarType(), + to_scalar_type_visitor.haveNulls(), + to_scalar_type_visitor.needConvertField(), + applyVisitor(to_number_dimension_visitor, field), + to_number_dimension_visitor.need_fold_dimension + }; +} + +ColumnObjectDeprecated::Subcolumn::Subcolumn(MutableColumnPtr && data_, bool is_nullable_) + : least_common_type(getDataTypeByColumn(*data_)) + , is_nullable(is_nullable_) + , num_rows(data_->size()) +{ + data.push_back(std::move(data_)); +} + +ColumnObjectDeprecated::Subcolumn::Subcolumn( + size_t size_, bool is_nullable_) + : least_common_type(std::make_shared()) + , is_nullable(is_nullable_) + , num_of_defaults_in_prefix(size_) + , num_rows(size_) +{ +} + +size_t ColumnObjectDeprecated::Subcolumn::size() const +{ + return num_rows; +} + +size_t ColumnObjectDeprecated::Subcolumn::byteSize() const +{ + size_t res = 0; + for (const auto & part : data) + res += part->byteSize(); + return res; +} + +size_t ColumnObjectDeprecated::Subcolumn::allocatedBytes() const +{ + size_t res = 0; + for (const auto & part : data) + res += part->allocatedBytes(); + return res; +} + +void ColumnObjectDeprecated::Subcolumn::get(size_t n, Field & res) const +{ + if (isFinalized()) + { + getFinalizedColumn().get(n, res); + return; + } + + size_t ind = n; + if (ind < num_of_defaults_in_prefix) + { + res = least_common_type.get()->getDefault(); + return; + } + + ind -= num_of_defaults_in_prefix; + for (const auto & part : data) + { + if (ind < part->size()) + { + part->get(ind, res); + res = convertFieldToTypeOrThrow(res, *least_common_type.get()); + return; + } + + ind -= part->size(); + } + + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for getting field is out of range", n); +} + +void ColumnObjectDeprecated::Subcolumn::checkTypes() const +{ + DataTypes prefix_types; + prefix_types.reserve(data.size()); + for (size_t i = 0; i < data.size(); ++i) + { + auto current_type = getDataTypeByColumn(*data[i]); + prefix_types.push_back(current_type); + auto prefix_common_type = getLeastSupertype(prefix_types); + if (!prefix_common_type->equals(*current_type)) + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Data type {} of column at position {} cannot represent all columns from i-th prefix", + current_type->getName(), i); + } +} + +void ColumnObjectDeprecated::Subcolumn::insert(Field field) +{ + auto info = DB::getFieldInfo(field); + insert(std::move(field), std::move(info)); +} + +void ColumnObjectDeprecated::Subcolumn::addNewColumnPart(DataTypePtr type) +{ + auto serialization = type->getSerialization(ISerialization::Kind::SPARSE); + data.push_back(type->createColumn(*serialization)); + least_common_type = LeastCommonType{std::move(type)}; +} + +static bool isConversionRequiredBetweenIntegers(const IDataType & lhs, const IDataType & rhs) +{ + /// If both of types are signed/unsigned integers and size of left field type + /// is less than right type, we don't need to convert field, + /// because all integer fields are stored in Int64/UInt64. + + WhichDataType which_lhs(lhs); + WhichDataType which_rhs(rhs); + + bool is_native_int = which_lhs.isNativeInt() && which_rhs.isNativeInt(); + bool is_native_uint = which_lhs.isNativeUInt() && which_rhs.isNativeUInt(); + + return (!is_native_int && !is_native_uint) + || lhs.getSizeOfValueInMemory() > rhs.getSizeOfValueInMemory(); +} + +void ColumnObjectDeprecated::Subcolumn::insert(Field field, FieldInfo info) +{ + auto base_type = std::move(info.scalar_type); + + if (isNothing(base_type) && info.num_dimensions == 0) + { + insertDefault(); + return; + } + + auto column_dim = least_common_type.getNumberOfDimensions(); + auto value_dim = info.num_dimensions; + + if (isNothing(least_common_type.get())) + column_dim = value_dim; + + if (isNothing(base_type)) + value_dim = column_dim; + + if (value_dim != column_dim) + throw Exception(ErrorCodes::NUMBER_OF_DIMENSIONS_MISMATCHED, + "Dimension of types mismatched between inserted value and column. " + "Dimension of value: {}. Dimension of column: {}", + value_dim, column_dim); + + if (is_nullable) + base_type = makeNullable(base_type); + + if (!is_nullable && info.have_nulls) + field = applyVisitor(FieldVisitorReplaceNull(base_type->getDefault(), value_dim), std::move(field)); + + bool type_changed = false; + const auto & least_common_base_type = least_common_type.getBase(); + + if (data.empty()) + { + addNewColumnPart(createArrayOfType(std::move(base_type), value_dim)); + } + else if (!least_common_base_type->equals(*base_type) && !isNothing(base_type)) + { + if (isConversionRequiredBetweenIntegers(*base_type, *least_common_base_type)) + { + base_type = getLeastSupertypeOrString(DataTypes{std::move(base_type), least_common_base_type}); + type_changed = true; + if (!least_common_base_type->equals(*base_type)) + addNewColumnPart(createArrayOfType(std::move(base_type), value_dim)); + } + } + + if (type_changed || info.need_convert) + field = convertFieldToTypeOrThrow(field, *least_common_type.get()); + + if (!data.back()->tryInsert(field)) + { + /** Normalization of the field above is pretty complicated (it uses several FieldVisitors), + * so in the case of a bug, we may get mismatched types. + * The `IColumn::insert` method does not check the type of the inserted field, and it can lead to a segmentation fault. + * Therefore, we use the safer `tryInsert` method to get an exception instead of a segmentation fault. + */ + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Cannot insert field {} to column {}", + field.dump(), data.back()->dumpStructure()); + } + + ++num_rows; +} + +void ColumnObjectDeprecated::Subcolumn::insertRangeFrom(const Subcolumn & src, size_t start, size_t length) +{ + assert(start + length <= src.size()); + size_t end = start + length; + num_rows += length; + + if (data.empty()) + { + addNewColumnPart(src.getLeastCommonType()); + } + else if (!least_common_type.get()->equals(*src.getLeastCommonType())) + { + auto new_least_common_type = getLeastSupertypeOrString(DataTypes{least_common_type.get(), src.getLeastCommonType()}); + if (!new_least_common_type->equals(*least_common_type.get())) + addNewColumnPart(std::move(new_least_common_type)); + } + + if (end <= src.num_of_defaults_in_prefix) + { + data.back()->insertManyDefaults(length); + return; + } + + if (start < src.num_of_defaults_in_prefix) + data.back()->insertManyDefaults(src.num_of_defaults_in_prefix - start); + + auto insert_from_part = [&](const auto & column, size_t from, size_t n) + { + assert(from + n <= column->size()); + auto column_type = getDataTypeByColumn(*column); + + if (column_type->equals(*least_common_type.get())) + { + data.back()->insertRangeFrom(*column, from, n); + return; + } + + /// If we need to insert large range, there is no sense to cut part of column and cast it. + /// Casting of all column and inserting from it can be faster. + /// Threshold is just a guess. + + if (n * 3 >= column->size()) + { + auto casted_column = castColumn({column, column_type, ""}, least_common_type.get()); + data.back()->insertRangeFrom(*casted_column, from, n); + return; + } + + auto casted_column = column->cut(from, n); + casted_column = castColumn({casted_column, column_type, ""}, least_common_type.get()); + data.back()->insertRangeFrom(*casted_column, 0, n); + }; + + size_t pos = 0; + size_t processed_rows = src.num_of_defaults_in_prefix; + + /// Find the first part of the column that intersects the range. + while (pos < src.data.size() && processed_rows + src.data[pos]->size() < start) + { + processed_rows += src.data[pos]->size(); + ++pos; + } + + /// Insert from the first part of column. + if (pos < src.data.size() && processed_rows < start) + { + size_t part_start = start - processed_rows; + size_t part_length = std::min(src.data[pos]->size() - part_start, end - start); + insert_from_part(src.data[pos], part_start, part_length); + processed_rows += src.data[pos]->size(); + ++pos; + } + + /// Insert from the parts of column in the middle of range. + while (pos < src.data.size() && processed_rows + src.data[pos]->size() < end) + { + insert_from_part(src.data[pos], 0, src.data[pos]->size()); + processed_rows += src.data[pos]->size(); + ++pos; + } + + /// Insert from the last part of column if needed. + if (pos < src.data.size() && processed_rows < end) + { + size_t part_end = end - processed_rows; + insert_from_part(src.data[pos], 0, part_end); + } +} + +bool ColumnObjectDeprecated::Subcolumn::isFinalized() const +{ + return num_of_defaults_in_prefix == 0 && + (data.empty() || (data.size() == 1 && !data[0]->isSparse())); +} + +void ColumnObjectDeprecated::Subcolumn::finalize() +{ + if (isFinalized()) + return; + + if (data.size() == 1 && num_of_defaults_in_prefix == 0) + { + data[0] = data[0]->convertToFullColumnIfSparse(); + return; + } + + const auto & to_type = least_common_type.get(); + auto result_column = to_type->createColumn(); + + if (num_of_defaults_in_prefix) + result_column->insertManyDefaults(num_of_defaults_in_prefix); + + for (auto & part : data) + { + part = part->convertToFullColumnIfSparse(); + auto from_type = getDataTypeByColumn(*part); + size_t part_size = part->size(); + + if (!from_type->equals(*to_type)) + { + auto offsets = ColumnUInt64::create(); + auto & offsets_data = offsets->getData(); + + /// We need to convert only non-default values and then recreate column + /// with default value of new type, because default values (which represents misses in data) + /// may be inconsistent between types (e.g "0" in UInt64 and empty string in String). + + part->getIndicesOfNonDefaultRows(offsets_data, 0, part_size); + + if (offsets->size() == part_size) + { + part = castColumn({part, from_type, ""}, to_type); + } + else + { + auto values = part->index(*offsets, offsets->size()); + values = castColumn({values, from_type, ""}, to_type); + part = values->createWithOffsets(offsets_data, *createColumnConstWithDefaultValue(result_column->getPtr()), part_size, /*shift=*/ 0); + } + } + + result_column->insertRangeFrom(*part, 0, part_size); + } + + data = { std::move(result_column) }; + num_of_defaults_in_prefix = 0; +} + +void ColumnObjectDeprecated::Subcolumn::insertDefault() +{ + if (data.empty()) + ++num_of_defaults_in_prefix; + else + data.back()->insertDefault(); + + ++num_rows; +} + +void ColumnObjectDeprecated::Subcolumn::insertManyDefaults(size_t length) +{ + if (data.empty()) + num_of_defaults_in_prefix += length; + else + data.back()->insertManyDefaults(length); + + num_rows += length; +} + +void ColumnObjectDeprecated::Subcolumn::popBack(size_t n) +{ + assert(n <= size()); + + num_rows -= n; + size_t num_removed = 0; + for (auto it = data.rbegin(); it != data.rend(); ++it) + { + if (n == 0) + break; + + auto & column = *it; + if (n < column->size()) + { + column->popBack(n); + n = 0; + } + else + { + ++num_removed; + n -= column->size(); + } + } + + data.resize(data.size() - num_removed); + num_of_defaults_in_prefix -= n; +} + +ColumnObjectDeprecated::Subcolumn ColumnObjectDeprecated::Subcolumn::cut(size_t start, size_t length) const +{ + Subcolumn new_subcolumn(0, is_nullable); + new_subcolumn.insertRangeFrom(*this, start, length); + return new_subcolumn; +} + +Field ColumnObjectDeprecated::Subcolumn::getLastField() const +{ + if (data.empty()) + return Field(); + + const auto & last_part = data.back(); + assert(!last_part->empty()); + return (*last_part)[last_part->size() - 1]; +} + +FieldInfo ColumnObjectDeprecated::Subcolumn::getFieldInfo() const +{ + const auto & base_type = least_common_type.getBase(); + return FieldInfo + { + .scalar_type = base_type, + .have_nulls = base_type->isNullable(), + .need_convert = false, + .num_dimensions = least_common_type.getNumberOfDimensions(), + .need_fold_dimension = false, + }; +} + +ColumnObjectDeprecated::Subcolumn ColumnObjectDeprecated::Subcolumn::recreateWithDefaultValues(const FieldInfo & field_info) const +{ + auto scalar_type = field_info.scalar_type; + if (is_nullable) + scalar_type = makeNullable(scalar_type); + + Subcolumn new_subcolumn(*this); + new_subcolumn.least_common_type = LeastCommonType{createArrayOfType(scalar_type, field_info.num_dimensions)}; + + for (auto & part : new_subcolumn.data) + part = recreateColumnWithDefaultValues(part, scalar_type, field_info.num_dimensions); + + return new_subcolumn; +} + +IColumn & ColumnObjectDeprecated::Subcolumn::getFinalizedColumn() +{ + assert(isFinalized()); + return *data[0]; +} + +const IColumn & ColumnObjectDeprecated::Subcolumn::getFinalizedColumn() const +{ + assert(isFinalized()); + return *data[0]; +} + +const ColumnPtr & ColumnObjectDeprecated::Subcolumn::getFinalizedColumnPtr() const +{ + assert(isFinalized()); + return data[0]; +} + +ColumnObjectDeprecated::Subcolumn::LeastCommonType::LeastCommonType() + : type(std::make_shared()) + , base_type(type) + , num_dimensions(0) +{ +} + +ColumnObjectDeprecated::Subcolumn::LeastCommonType::LeastCommonType(DataTypePtr type_) + : type(std::move(type_)) + , base_type(getBaseTypeOfArray(type)) + , num_dimensions(DB::getNumberOfDimensions(*type)) +{ +} + +ColumnObjectDeprecated::ColumnObjectDeprecated(bool is_nullable_) + : is_nullable(is_nullable_) + , num_rows(0) +{ +} + +ColumnObjectDeprecated::ColumnObjectDeprecated(Subcolumns && subcolumns_, bool is_nullable_) + : is_nullable(is_nullable_) + , subcolumns(std::move(subcolumns_)) + , num_rows(subcolumns.empty() ? 0 : (*subcolumns.begin())->data.size()) + +{ + checkConsistency(); +} + +void ColumnObjectDeprecated::checkConsistency() const +{ + if (subcolumns.empty()) + return; + + for (const auto & leaf : subcolumns) + { + if (num_rows != leaf->data.size()) + { + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Sizes of subcolumns are inconsistent in ColumnObjectDeprecated." + " Subcolumn '{}' has {} rows, but expected size is {}", + leaf->path.getPath(), leaf->data.size(), num_rows); + } + } +} + +size_t ColumnObjectDeprecated::size() const +{ +#ifndef NDEBUG + checkConsistency(); +#endif + return num_rows; +} + +size_t ColumnObjectDeprecated::byteSize() const +{ + size_t res = 0; + for (const auto & entry : subcolumns) + res += entry->data.byteSize(); + return res; +} + +size_t ColumnObjectDeprecated::allocatedBytes() const +{ + size_t res = 0; + for (const auto & entry : subcolumns) + res += entry->data.allocatedBytes(); + return res; +} + +void ColumnObjectDeprecated::forEachSubcolumn(MutableColumnCallback callback) +{ + for (auto & entry : subcolumns) + for (auto & part : entry->data.data) + callback(part); +} + +void ColumnObjectDeprecated::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) +{ + for (auto & entry : subcolumns) + { + for (auto & part : entry->data.data) + { + callback(*part); + part->forEachSubcolumnRecursively(callback); + } + } +} + +void ColumnObjectDeprecated::insert(const Field & field) +{ + const auto & object = field.get(); + + HashSet inserted_paths; + size_t old_size = size(); + for (const auto & [key_str, value] : object) + { + PathInData key(key_str); + inserted_paths.insert(key_str); + if (!hasSubcolumn(key)) + addSubcolumn(key, old_size); + + auto & subcolumn = getSubcolumn(key); + subcolumn.insert(value); + } + + for (auto & entry : subcolumns) + { + if (!inserted_paths.has(entry->path.getPath())) + { + bool inserted = tryInsertDefaultFromNested(entry); + if (!inserted) + entry->data.insertDefault(); + } + } + + ++num_rows; +} + +bool ColumnObjectDeprecated::tryInsert(const Field & field) +{ + if (field.getType() != Field::Types::Which::Object) + return false; + + insert(field); + return true; +} + +void ColumnObjectDeprecated::insertDefault() +{ + for (auto & entry : subcolumns) + entry->data.insertDefault(); + + ++num_rows; +} + +Field ColumnObjectDeprecated::operator[](size_t n) const +{ + Field object; + get(n, object); + return object; +} + +void ColumnObjectDeprecated::get(size_t n, Field & res) const +{ + assert(n < size()); + res = Object(); + auto & object = res.get(); + + for (const auto & entry : subcolumns) + { + auto it = object.try_emplace(entry->path.getPath()).first; + entry->data.get(n, it->second); + } +} + +void ColumnObjectDeprecated::insertFrom(const IColumn & src, size_t n) +{ + insert(src[n]); +} + +void ColumnObjectDeprecated::insertRangeFrom(const IColumn & src, size_t start, size_t length) +{ + const auto & src_object = assert_cast(src); + + for (const auto & entry : src_object.subcolumns) + { + if (!hasSubcolumn(entry->path)) + { + if (entry->path.hasNested()) + addNestedSubcolumn(entry->path, entry->data.getFieldInfo(), num_rows); + else + addSubcolumn(entry->path, num_rows); + } + + auto & subcolumn = getSubcolumn(entry->path); + subcolumn.insertRangeFrom(entry->data, start, length); + } + + for (auto & entry : subcolumns) + { + if (!src_object.hasSubcolumn(entry->path)) + { + bool inserted = tryInsertManyDefaultsFromNested(entry); + if (!inserted) + entry->data.insertManyDefaults(length); + } + } + + num_rows += length; + finalize(); +} + +void ColumnObjectDeprecated::popBack(size_t length) +{ + for (auto & entry : subcolumns) + entry->data.popBack(length); + + num_rows -= length; +} + +template +MutableColumnPtr ColumnObjectDeprecated::applyForSubcolumns(Func && func) const +{ + if (!isFinalized()) + { + auto finalized = cloneFinalized(); + auto & finalized_object = assert_cast(*finalized); + return finalized_object.applyForSubcolumns(std::forward(func)); + } + + auto res = ColumnObjectDeprecated::create(is_nullable); + for (const auto & subcolumn : subcolumns) + { + auto new_subcolumn = func(subcolumn->data.getFinalizedColumn()); + res->addSubcolumn(subcolumn->path, new_subcolumn->assumeMutable()); + } + + return res; +} + +ColumnPtr ColumnObjectDeprecated::permute(const Permutation & perm, size_t limit) const +{ + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.permute(perm, limit); }); +} + +ColumnPtr ColumnObjectDeprecated::filter(const Filter & filter, ssize_t result_size_hint) const +{ + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.filter(filter, result_size_hint); }); +} + +ColumnPtr ColumnObjectDeprecated::index(const IColumn & indexes, size_t limit) const +{ + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.index(indexes, limit); }); +} + +ColumnPtr ColumnObjectDeprecated::replicate(const Offsets & offsets) const +{ + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.replicate(offsets); }); +} + +MutableColumnPtr ColumnObjectDeprecated::cloneResized(size_t new_size) const +{ + if (new_size == 0) + return ColumnObjectDeprecated::create(is_nullable); + + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.cloneResized(new_size); }); +} + +void ColumnObjectDeprecated::getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const +{ + res.resize(num_rows); + iota(res.data(), res.size(), size_t(0)); +} + +void ColumnObjectDeprecated::getExtremes(Field & min, Field & max) const +{ + if (num_rows == 0) + { + min = Object(); + max = Object(); + } + else + { + get(0, min); + get(0, max); + } +} + +const ColumnObjectDeprecated::Subcolumn & ColumnObjectDeprecated::getSubcolumn(const PathInData & key) const +{ + if (const auto * node = subcolumns.findLeaf(key)) + return node->data; + + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in ColumnObjectDeprecated", key.getPath()); +} + +ColumnObjectDeprecated::Subcolumn & ColumnObjectDeprecated::getSubcolumn(const PathInData & key) +{ + if (const auto * node = subcolumns.findLeaf(key)) + return const_cast(node)->data; + + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in ColumnObjectDeprecated", key.getPath()); +} + +bool ColumnObjectDeprecated::hasSubcolumn(const PathInData & key) const +{ + return subcolumns.findLeaf(key) != nullptr; +} + +void ColumnObjectDeprecated::addSubcolumn(const PathInData & key, MutableColumnPtr && subcolumn) +{ + size_t new_size = subcolumn->size(); + bool inserted = subcolumns.add(key, Subcolumn(std::move(subcolumn), is_nullable)); + + if (!inserted) + throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); + + if (num_rows == 0) + num_rows = new_size; + else if (new_size != num_rows) + throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, + "Size of subcolumn {} ({}) is inconsistent with column size ({})", + key.getPath(), new_size, num_rows); +} + +void ColumnObjectDeprecated::addSubcolumn(const PathInData & key, size_t new_size) +{ + bool inserted = subcolumns.add(key, Subcolumn(new_size, is_nullable)); + if (!inserted) + throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); + + if (num_rows == 0) + num_rows = new_size; + else if (new_size != num_rows) + throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, + "Required size of subcolumn {} ({}) is inconsistent with column size ({})", + key.getPath(), new_size, num_rows); +} + +void ColumnObjectDeprecated::addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size) +{ + if (!key.hasNested()) + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Cannot add Nested subcolumn, because path doesn't contain Nested"); + + bool inserted = false; + /// We find node that represents the same Nested type as @key. + const auto * nested_node = subcolumns.findBestMatch(key); + + if (nested_node) + { + /// Find any leaf of Nested subcolumn. + const auto * leaf = Subcolumns::findLeaf(nested_node, [&](const auto &) { return true; }); + assert(leaf); + + /// Recreate subcolumn with default values and the same sizes of arrays. + auto new_subcolumn = leaf->data.recreateWithDefaultValues(field_info); + + /// It's possible that we have already inserted value from current row + /// to this subcolumn. So, adjust size to expected. + if (new_subcolumn.size() > new_size) + new_subcolumn.popBack(new_subcolumn.size() - new_size); + + assert(new_subcolumn.size() == new_size); + inserted = subcolumns.add(key, new_subcolumn); + } + else + { + /// If node was not found just add subcolumn with empty arrays. + inserted = subcolumns.add(key, Subcolumn(new_size, is_nullable)); + } + + if (!inserted) + throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); + + if (num_rows == 0) + num_rows = new_size; + else if (new_size != num_rows) + throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, + "Required size of subcolumn {} ({}) is inconsistent with column size ({})", + key.getPath(), new_size, num_rows); +} + +const ColumnObjectDeprecated::Subcolumns::Node * ColumnObjectDeprecated::getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const +{ + if (!entry->path.hasNested()) + return nullptr; + + size_t old_size = entry->data.size(); + const auto * current_node = subcolumns.findLeaf(entry->path); + const Subcolumns::Node * leaf = nullptr; + + while (current_node) + { + /// Try to find the first Nested up to the current node. + const auto * node_nested = Subcolumns::findParent(current_node, + [](const auto & candidate) { return candidate.isNested(); }); + + if (!node_nested) + break; + + /// Find the leaf with subcolumn that contains values + /// for the last rows. + /// If there are no leaves, skip current node and find + /// the next node up to the current. + leaf = Subcolumns::findLeaf(node_nested, + [&](const auto & candidate) + { + return candidate.data.size() > old_size; + }); + + if (leaf) + break; + + current_node = node_nested->parent; + } + + if (leaf && isNothing(leaf->data.getLeastCommonTypeBase())) + return nullptr; + + return leaf; +} + +bool ColumnObjectDeprecated::tryInsertManyDefaultsFromNested(const Subcolumns::NodePtr & entry) const +{ + const auto * leaf = getLeafOfTheSameNested(entry); + if (!leaf) + return false; + + size_t old_size = entry->data.size(); + auto field_info = entry->data.getFieldInfo(); + + /// Cut the needed range from the found leaf + /// and replace scalar values to the correct + /// default values for given entry. + auto new_subcolumn = leaf->data + .cut(old_size, leaf->data.size() - old_size) + .recreateWithDefaultValues(field_info); + + entry->data.insertRangeFrom(new_subcolumn, 0, new_subcolumn.size()); + return true; +} + +bool ColumnObjectDeprecated::tryInsertDefaultFromNested(const Subcolumns::NodePtr & entry) const +{ + const auto * leaf = getLeafOfTheSameNested(entry); + if (!leaf) + return false; + + auto last_field = leaf->data.getLastField(); + if (last_field.isNull()) + return false; + + size_t leaf_num_dimensions = leaf->data.getNumberOfDimensions(); + size_t entry_num_dimensions = entry->data.getNumberOfDimensions(); + + auto default_scalar = entry_num_dimensions > leaf_num_dimensions + ? createEmptyArrayField(entry_num_dimensions - leaf_num_dimensions) + : entry->data.getLeastCommonTypeBase()->getDefault(); + + auto default_field = applyVisitor(FieldVisitorReplaceScalars(default_scalar, leaf_num_dimensions), last_field); + entry->data.insert(std::move(default_field)); + return true; +} + +PathsInData ColumnObjectDeprecated::getKeys() const +{ + PathsInData keys; + keys.reserve(subcolumns.size()); + for (const auto & entry : subcolumns) + keys.emplace_back(entry->path); + return keys; +} + +bool ColumnObjectDeprecated::isFinalized() const +{ + return std::all_of(subcolumns.begin(), subcolumns.end(), + [](const auto & entry) { return entry->data.isFinalized(); }); +} + +void ColumnObjectDeprecated::finalize() +{ + size_t old_size = size(); + Subcolumns new_subcolumns; + for (auto && entry : subcolumns) + { + const auto & least_common_type = entry->data.getLeastCommonType(); + + /// Do not add subcolumns, which consist only from NULLs. + if (isNothing(getBaseTypeOfArray(least_common_type))) + continue; + + entry->data.finalize(); + new_subcolumns.add(entry->path, entry->data); + } + + /// If all subcolumns were skipped add a dummy subcolumn, + /// because Tuple type must have at least one element. + if (new_subcolumns.empty()) + new_subcolumns.add(PathInData{COLUMN_NAME_DUMMY}, Subcolumn{ColumnUInt8::create(old_size, 0), is_nullable}); + + std::swap(subcolumns, new_subcolumns); + checkObjectHasNoAmbiguosPaths(getKeys()); +} + +} diff --git a/src/Columns/ColumnObjectDeprecated.h b/src/Columns/ColumnObjectDeprecated.h new file mode 100644 index 00000000000..b8981e91f44 --- /dev/null +++ b/src/Columns/ColumnObjectDeprecated.h @@ -0,0 +1,265 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +/// Info that represents a scalar or array field in a decomposed view. +/// It allows to recreate field with different number +/// of dimensions or nullability. +struct FieldInfo +{ + /// The common type of of all scalars in field. + DataTypePtr scalar_type; + + /// Do we have NULL scalar in field. + bool have_nulls; + + /// If true then we have scalars with different types in array and + /// we need to convert scalars to the common type. + bool need_convert; + + /// Number of dimension in array. 0 if field is scalar. + size_t num_dimensions; + + /// If true then this field is an array of variadic dimension field + /// and we need to normalize the dimension + bool need_fold_dimension; +}; + +FieldInfo getFieldInfo(const Field & field); + +/** A column that represents object with dynamic set of subcolumns. + * Subcolumns are identified by paths in document and are stored in + * a trie-like structure. ColumnObjectDeprecated is not suitable for writing into tables + * and it should be converted to Tuple with fixed set of subcolumns before that. + */ +class ColumnObjectDeprecated final : public COWHelper, ColumnObjectDeprecated> +{ +public: + /** Class that represents one subcolumn. + * It stores values in several parts of column + * and keeps current common type of all parts. + * We add a new column part with a new type, when we insert a field, + * which can't be converted to the current common type. + * After insertion of all values subcolumn should be finalized + * for writing and other operations. + */ + class Subcolumn + { + public: + Subcolumn() = default; + Subcolumn(size_t size_, bool is_nullable_); + Subcolumn(MutableColumnPtr && data_, bool is_nullable_); + + size_t size() const; + size_t byteSize() const; + size_t allocatedBytes() const; + void get(size_t n, Field & res) const; + + bool isFinalized() const; + const DataTypePtr & getLeastCommonType() const { return least_common_type.get(); } + const DataTypePtr & getLeastCommonTypeBase() const { return least_common_type.getBase(); } + size_t getNumberOfDimensions() const { return least_common_type.getNumberOfDimensions(); } + + /// Checks the consistency of column's parts stored in @data. + void checkTypes() const; + + /// Inserts a field, which scalars can be arbitrary, but number of + /// dimensions should be consistent with current common type. + void insert(Field field); + void insert(Field field, FieldInfo info); + + void insertDefault(); + void insertManyDefaults(size_t length); + void insertRangeFrom(const Subcolumn & src, size_t start, size_t length); + void popBack(size_t n); + + Subcolumn cut(size_t start, size_t length) const; + + /// Converts all column's parts to the common type and + /// creates a single column that stores all values. + void finalize(); + + /// Returns last inserted field. + Field getLastField() const; + + FieldInfo getFieldInfo() const; + + /// Recreates subcolumn with default scalar values and keeps sizes of arrays. + /// Used to create columns of type Nested with consistent array sizes. + Subcolumn recreateWithDefaultValues(const FieldInfo & field_info) const; + + /// Returns single column if subcolumn in finalizes. + /// Otherwise -- undefined behaviour. + IColumn & getFinalizedColumn(); + const IColumn & getFinalizedColumn() const; + const ColumnPtr & getFinalizedColumnPtr() const; + + const std::vector & getData() const { return data; } + size_t getNumberOfDefaultsInPrefix() const { return num_of_defaults_in_prefix; } + + friend class ColumnObjectDeprecated; + + private: + class LeastCommonType + { + public: + LeastCommonType(); + explicit LeastCommonType(DataTypePtr type_); + + const DataTypePtr & get() const { return type; } + const DataTypePtr & getBase() const { return base_type; } + size_t getNumberOfDimensions() const { return num_dimensions; } + + private: + DataTypePtr type; + DataTypePtr base_type; + size_t num_dimensions = 0; + }; + + void addNewColumnPart(DataTypePtr type); + + /// Current least common type of all values inserted to this subcolumn. + LeastCommonType least_common_type; + + /// If true then common type type of subcolumn is Nullable + /// and default values are NULLs. + bool is_nullable = false; + + /// Parts of column. Parts should be in increasing order in terms of subtypes/supertypes. + /// That means that the least common type for i-th prefix is the type of i-th part + /// and it's the supertype for all type of column from 0 to i-1. + std::vector data; + + /// Until we insert any non-default field we don't know further + /// least common type and we count number of defaults in prefix, + /// which will be converted to the default type of final common type. + size_t num_of_defaults_in_prefix = 0; + + size_t num_rows = 0; + }; + + using Subcolumns = SubcolumnsTree; + +private: + /// If true then all subcolumns are nullable. + const bool is_nullable; + + Subcolumns subcolumns; + size_t num_rows; + +public: + static constexpr auto COLUMN_NAME_DUMMY = "_dummy"; + + explicit ColumnObjectDeprecated(bool is_nullable_); + ColumnObjectDeprecated(Subcolumns && subcolumns_, bool is_nullable_); + + /// Checks that all subcolumns have consistent sizes. + void checkConsistency() const; + + bool hasSubcolumn(const PathInData & key) const; + + const Subcolumn & getSubcolumn(const PathInData & key) const; + Subcolumn & getSubcolumn(const PathInData & key); + + void incrementNumRows() { ++num_rows; } + + /// Adds a subcolumn from existing IColumn. + void addSubcolumn(const PathInData & key, MutableColumnPtr && subcolumn); + + /// Adds a subcolumn of specific size with default values. + void addSubcolumn(const PathInData & key, size_t new_size); + + /// Adds a subcolumn of type Nested of specific size with default values. + /// It cares about consistency of sizes of Nested arrays. + void addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size); + + /// Finds a subcolumn from the same Nested type as @entry and inserts + /// an array with default values with consistent sizes as in Nested type. + bool tryInsertDefaultFromNested(const Subcolumns::NodePtr & entry) const; + bool tryInsertManyDefaultsFromNested(const Subcolumns::NodePtr & entry) const; + + const Subcolumns & getSubcolumns() const { return subcolumns; } + Subcolumns & getSubcolumns() { return subcolumns; } + PathsInData getKeys() const; + + /// Part of interface + + const char * getFamilyName() const override { return "Object"; } + TypeIndex getDataType() const override { return TypeIndex::ObjectDeprecated; } + + size_t size() const override; + size_t byteSize() const override; + size_t allocatedBytes() const override; + void forEachSubcolumn(MutableColumnCallback callback) override; + void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override; + void insert(const Field & field) override; + bool tryInsert(const Field & field) override; + void insertDefault() override; + void insertFrom(const IColumn & src, size_t n) override; + void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; + void popBack(size_t length) override; + Field operator[](size_t n) const override; + void get(size_t n, Field & res) const override; + + ColumnPtr permute(const Permutation & perm, size_t limit) const override; + ColumnPtr filter(const Filter & filter, ssize_t result_size_hint) const override; + ColumnPtr index(const IColumn & indexes, size_t limit) const override; + ColumnPtr replicate(const Offsets & offsets) const override; + MutableColumnPtr cloneResized(size_t new_size) const override; + + /// Finalizes all subcolumns. + void finalize() override; + bool isFinalized() const override; + + /// Order of rows in ColumnObjectDeprecated is undefined. + void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override; + void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {} + int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; } + void getExtremes(Field & min, Field & max) const override; + + /// All other methods throw exception. + + StringRef getDataAt(size_t) const override { throwMustBeConcrete(); } + bool isDefaultAt(size_t) const override { throwMustBeConcrete(); } + void insertData(const char *, size_t) override { throwMustBeConcrete(); } + StringRef serializeValueIntoArena(size_t, Arena &, char const *&) const override { throwMustBeConcrete(); } + char * serializeValueIntoMemory(size_t, char *) const override { throwMustBeConcrete(); } + const char * deserializeAndInsertFromArena(const char *) override { throwMustBeConcrete(); } + const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); } + void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); } + void updateWeakHash32(WeakHash32 &) const override { throwMustBeConcrete(); } + void updateHashFast(SipHash &) const override { throwMustBeConcrete(); } + void expand(const Filter &, bool) override { throwMustBeConcrete(); } + bool hasEqualValues() const override { throwMustBeConcrete(); } + size_t byteSizeAt(size_t) const override { throwMustBeConcrete(); } + double getRatioOfDefaultRows(double) const override { throwMustBeConcrete(); } + UInt64 getNumberOfDefaultRows() const override { throwMustBeConcrete(); } + void getIndicesOfNonDefaultRows(Offsets &, size_t, size_t) const override { throwMustBeConcrete(); } + +private: + [[noreturn]] static void throwMustBeConcrete() + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ColumnObjectDeprecated must be converted to ColumnTuple before use"); + } + + template + MutableColumnPtr applyForSubcolumns(Func && func) const; + + /// It's used to get shared sized of Nested to insert correct default values. + const Subcolumns::Node * getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const; +}; +} diff --git a/src/Columns/IColumn.cpp b/src/Columns/IColumn.cpp index 90cccef2b03..bccc52d4bb2 100644 --- a/src/Columns/IColumn.cpp +++ b/src/Columns/IColumn.cpp @@ -11,12 +11,13 @@ #include #include #include -#include +#include #include #include #include #include #include +#include #include #include #include @@ -462,12 +463,13 @@ template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; -template class IColumnHelper; +template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; +template class IColumnHelper; template class IColumnHelper; diff --git a/src/Columns/tests/gtest_column_object.cpp b/src/Columns/tests/gtest_column_object.cpp new file mode 100644 index 00000000000..f6a1da64ba3 --- /dev/null +++ b/src/Columns/tests/gtest_column_object.cpp @@ -0,0 +1,351 @@ +#include +#include +#include +#include +#include + +#include +#include + +using namespace DB; + +TEST(ColumnObject, CreateEmpty) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=20, a.b UInt32, a.c Array(String))"); + auto col = type->createColumn(); + const auto & col_object = assert_cast(*col); + const auto & typed_paths = col_object.getTypedPaths(); + ASSERT_TRUE(typed_paths.contains("a.b")); + ASSERT_EQ(typed_paths.at("a.b")->getName(), "UInt32"); + ASSERT_TRUE(typed_paths.contains("a.c")); + ASSERT_EQ(typed_paths.at("a.c")->getName(), "Array(String)"); + ASSERT_TRUE(col_object.getDynamicPaths().empty()); + ASSERT_TRUE(col_object.getSharedDataOffsets().empty()); + ASSERT_TRUE(col_object.getSharedDataPathsAndValues().first->empty()); + ASSERT_TRUE(col_object.getSharedDataPathsAndValues().second->empty()); + ASSERT_EQ(col_object.getMaxDynamicTypes(), 10); + ASSERT_EQ(col_object.getMaxDynamicPaths(), 20); +} + +TEST(ColumnObject, GetName) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=20, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + ASSERT_EQ(col->getName(), "Object(max_dynamic_paths=20, max_dynamic_types=10, a.b Array(String), b.d UInt32)"); +} + +Field deserializeFieldFromSharedData(ColumnString * values, size_t n) +{ + auto data = values->getDataAt(n); + ReadBufferFromMemory buf(data.data, data.size); + Field res; + std::make_shared()->deserializeBinary(res, buf, FormatSettings()); + return res; +} + +TEST(ColumnObject, InsertField) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + const auto & typed_paths = col_object.getTypedPaths(); + const auto & dynamic_paths = col_object.getDynamicPaths(); + const auto & shared_data_nested_column = col_object.getSharedDataNestedColumn(); + const auto & shared_data_offsets = col_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = col_object.getSharedDataPathsAndValues(); + Object empty_object; + col_object.insert(empty_object); + ASSERT_EQ(col_object[0], (Object{{"a.b", Array{}}, {"b.d", Field(0u)}})); + ASSERT_EQ(typed_paths.at("a.b")->size(), 1); + ASSERT_TRUE(typed_paths.at("a.b")->isDefaultAt(0)); + ASSERT_EQ(typed_paths.at("b.d")->size(), 1); + ASSERT_TRUE(typed_paths.at("b.d")->isDefaultAt(0)); + ASSERT_TRUE(dynamic_paths.empty()); + ASSERT_EQ(shared_data_nested_column.size(), 1); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(0)); + + Object object1 = {{"a.b", Array{String("Hello"), String("World")}}, {"a.c", Field(42)}}; + col_object.insert(object1); + ASSERT_EQ(col_object[1], (Object{{"a.b", Array{String("Hello"), String("World")}}, {"b.d", Field(0u)}, {"a.c", Field(42)}})); + ASSERT_EQ(typed_paths.at("a.b")->size(), 2); + ASSERT_EQ((*typed_paths.at("a.b"))[1], (Array{String("Hello"), String("World")})); + ASSERT_EQ(typed_paths.at("b.d")->size(), 2); + ASSERT_TRUE(typed_paths.at("b.d")->isDefaultAt(1)); + ASSERT_EQ(dynamic_paths.size(), 1); + ASSERT_TRUE(dynamic_paths.contains("a.c")); + ASSERT_EQ(dynamic_paths.at("a.c")->size(), 2); + ASSERT_TRUE(dynamic_paths.at("a.c")->isDefaultAt(0)); + ASSERT_EQ((*dynamic_paths.at("a.c"))[1], Field(42)); + ASSERT_EQ(shared_data_nested_column.size(), 2); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(1)); + + Object object2 = {{"b.d", Field(142u)}, {"a.c", Field(43)}, {"a.d", Field("str")}, {"a.e", Field(242)}, {"a.f", Array{Field(42), Field(43)}}}; + col_object.insert(object2); + ASSERT_EQ(col_object[2], (Object{{"a.b", Array{}}, {"b.d", Field(142u)}, {"a.c", Field(43)}, {"a.d", Field("str")}, {"a.e", Field(242)}, {"a.f", Array{Field(42), Field(43)}}})); + ASSERT_EQ(typed_paths.at("a.b")->size(), 3); + ASSERT_TRUE(typed_paths.at("a.b")->isDefaultAt(2)); + ASSERT_EQ(typed_paths.at("b.d")->size(), 3); + ASSERT_EQ((*typed_paths.at("b.d"))[2], Field(142u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_TRUE(dynamic_paths.contains("a.c")); + ASSERT_EQ(dynamic_paths.at("a.c")->size(), 3); + ASSERT_EQ((*dynamic_paths.at("a.c"))[2], Field(43)); + ASSERT_TRUE(dynamic_paths.contains("a.d")); + ASSERT_EQ(dynamic_paths.at("a.d")->size(), 3); + ASSERT_EQ((*dynamic_paths.at("a.d"))[2], Field("str")); + + ASSERT_EQ(shared_data_nested_column.size(), 3); + ASSERT_EQ(shared_data_offsets[2] - shared_data_offsets[1], 2); + ASSERT_EQ((*shared_data_paths)[0], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 0), Field(242)); + ASSERT_EQ((*shared_data_paths)[1], "a.f"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 1), (Array({Field(42), Field(43)}))); + + Object object3 = {{"b.a", Field("Str")}, {"b.b", Field(2)}, {"b.c", Field(Tuple{Field(42), Field("Str")})}}; + col_object.insert(object3); + ASSERT_EQ(col_object[3], (Object{{"a.b", Array{}}, {"b.d", Field(0u)}, {"b.a", Field("Str")}, {"b.b", Field(2)}, {"b.c", Field(Tuple{Field(42), Field("Str")})}})); + ASSERT_EQ(typed_paths.at("a.b")->size(), 4); + ASSERT_TRUE(typed_paths.at("a.b")->isDefaultAt(3)); + ASSERT_EQ(typed_paths.at("b.d")->size(), 4); + ASSERT_TRUE(typed_paths.at("b.d")->isDefaultAt(3)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ(dynamic_paths.at("a.c")->size(), 4); + ASSERT_TRUE(dynamic_paths.at("a.c")->isDefaultAt(3)); + ASSERT_EQ(dynamic_paths.at("a.d")->size(), 4); + ASSERT_TRUE(dynamic_paths.at("a.d")->isDefaultAt(3)); + + ASSERT_EQ(shared_data_nested_column.size(), 4); + ASSERT_EQ(shared_data_offsets[3] - shared_data_offsets[2], 3); + ASSERT_EQ((*shared_data_paths)[2], "b.a"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 2), Field("Str")); + ASSERT_EQ((*shared_data_paths)[3], "b.b"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 3), Field(2)); + ASSERT_EQ((*shared_data_paths)[4], "b.c"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 4), Field(Tuple{Field(42), Field("Str")})); + + Object object4 = {{"c.c", Field(Null())}, {"c.d", Field(Null())}}; + col_object.insert(object4); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(4)); +} + +TEST(ColumnObject, InsertFrom) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"a.a", Field(42)}}); + + const auto & typed_paths = col_object.getTypedPaths(); + const auto & dynamic_paths = col_object.getDynamicPaths(); + const auto & shared_data_nested_column = col_object.getSharedDataNestedColumn(); + const auto & shared_data_offsets = col_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = col_object.getSharedDataPathsAndValues(); + + auto src_col1 = type->createColumn(); + auto & src_col_object1 = assert_cast(*src_col1); + src_col_object1.insert(Object{{"b.d", Field(43u)}, {"a.c", Field("Str1")}}); + col_object.insertFrom(src_col_object1, 0); + ASSERT_EQ((*typed_paths.at("a.b"))[1], Field(Array{})); + ASSERT_EQ((*typed_paths.at("b.d"))[1], Field(43u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[1], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[1], Field("Str1")); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(1)); + + auto src_col2 = type->createColumn(); + auto & src_col_object2 = assert_cast(*src_col2); + src_col_object2.insert(Object{{"a.b", Array{"Str4", "Str5"}}, {"b.d", Field(44u)}, {"a.d", Field("Str2")}, {"a.e", Field("Str3")}}); + col_object.insertFrom(src_col_object2, 0); + ASSERT_EQ((*typed_paths.at("a.b"))[2], Field(Array{"Str4", "Str5"})); + ASSERT_EQ((*typed_paths.at("b.d"))[2], Field(44u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[2], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[2], Field(Null())); + ASSERT_EQ(shared_data_offsets[2] - shared_data_offsets[1], 2); + ASSERT_EQ((*shared_data_paths)[0], "a.d"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 0), Field("Str2")); + ASSERT_EQ((*shared_data_paths)[1], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 1), Field("Str3")); + + auto src_col3 = type->createColumn(); + auto & src_col_object3 = assert_cast(*src_col3); + src_col_object3.insert(Object{{"a.h", Field("Str6")}, {"h.h", Field("Str7")}}); + src_col_object3.insert(Object{{"a.a", Field("Str10")}, {"a.c", Field(45u)}, {"a.h", Field("Str6")}, {"h.h", Field("Str7")}, {"a.f", Field("Str8")}, {"a.g", Field("Str9")}, {"a.i", Field("Str11")}, {"a.u", Field(Null())}}); + col_object.insertFrom(src_col_object3, 1); + ASSERT_EQ((*typed_paths.at("a.b"))[3], Field(Array{})); + ASSERT_EQ((*typed_paths.at("b.d"))[3], Field(0u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[3], Field("Str10")); + ASSERT_EQ((*dynamic_paths.at("a.c"))[3], Field(45u)); + ASSERT_EQ(shared_data_offsets[3] - shared_data_offsets[2], 5); + ASSERT_EQ((*shared_data_paths)[2], "a.f"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 2), Field("Str8")); + ASSERT_EQ((*shared_data_paths)[3], "a.g"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 3), Field("Str9")); + ASSERT_EQ((*shared_data_paths)[4], "a.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 4), Field("Str6")); + ASSERT_EQ((*shared_data_paths)[5], "a.i"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 5), Field("Str11")); + ASSERT_EQ((*shared_data_paths)[6], "h.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 6), Field("Str7")); +} + + +TEST(ColumnObject, InsertRangeFrom) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"a.a", Field(42)}}); + + const auto & typed_paths = col_object.getTypedPaths(); + const auto & dynamic_paths = col_object.getDynamicPaths(); + const auto & shared_data_nested_column = col_object.getSharedDataNestedColumn(); + const auto & shared_data_offsets = col_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = col_object.getSharedDataPathsAndValues(); + + auto src_col1 = type->createColumn(); + auto & src_col_object1 = assert_cast(*src_col1); + src_col_object1.insert(Object{{"b.d", Field(43u)}, {"a.c", Field("Str1")}}); + src_col_object1.insert(Object{{"a.b", Field(Array{"Str1", "Str2"})}, {"a.a", Field("Str1")}}); + src_col_object1.insert(Object{{"b.d", Field(45u)}, {"a.c", Field("Str2")}}); + col_object.insertRangeFrom(src_col_object1, 0, 3); + ASSERT_EQ((*typed_paths.at("a.b"))[1], Field(Array{})); + ASSERT_EQ((*typed_paths.at("a.b"))[2], Field(Array{"Str1", "Str2"})); + ASSERT_EQ((*typed_paths.at("a.b"))[3], Field(Array{})); + ASSERT_EQ((*typed_paths.at("b.d"))[1], Field(43u)); + ASSERT_EQ((*typed_paths.at("b.d"))[2], Field(0u)); + ASSERT_EQ((*typed_paths.at("b.d"))[3], Field(45u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[1], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.a"))[2], Field("Str1")); + ASSERT_EQ((*dynamic_paths.at("a.a"))[3], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[1], Field("Str1")); + ASSERT_EQ((*dynamic_paths.at("a.c"))[2], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[3], Field("Str2")); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(1)); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(2)); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(3)); + + auto src_col2 = type->createColumn(); + auto & src_col_object2 = assert_cast(*src_col2); + src_col_object2.insert(Object{{"a.b", Array{"Str4", "Str5"}}, {"a.d", Field("Str2")}, {"a.e", Field("Str3")}}); + src_col_object2.insert(Object{{"b.d", Field(44u)}, {"a.d", Field("Str22")}, {"a.e", Field("Str33")}}); + src_col_object2.insert(Object{{"a.b", Array{"Str44", "Str55"}}, {"a.d", Field("Str222")}, {"a.e", Field("Str333")}}); + col_object.insertRangeFrom(src_col_object2, 0, 3); + ASSERT_EQ((*typed_paths.at("a.b"))[4], Field(Array{"Str4", "Str5"})); + ASSERT_EQ((*typed_paths.at("a.b"))[5], Field(Array{})); + ASSERT_EQ((*typed_paths.at("a.b"))[6], Field(Array{"Str44", "Str55"})); + ASSERT_EQ((*typed_paths.at("b.d"))[4], Field(0u)); + ASSERT_EQ((*typed_paths.at("b.d"))[5], Field(44u)); + ASSERT_EQ((*typed_paths.at("b.d"))[6], Field(0u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[4], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.a"))[5], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.a"))[6], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[4], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[5], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[6], Field(Null())); + ASSERT_EQ(shared_data_offsets[4] - shared_data_offsets[3], 2); + ASSERT_EQ((*shared_data_paths)[0], "a.d"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 0), Field("Str2")); + ASSERT_EQ((*shared_data_paths)[1], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 1), Field("Str3")); + ASSERT_EQ(shared_data_offsets[5] - shared_data_offsets[4], 2); + ASSERT_EQ((*shared_data_paths)[2], "a.d"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 2), Field("Str22")); + ASSERT_EQ((*shared_data_paths)[3], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 3), Field("Str33")); + ASSERT_EQ(shared_data_offsets[6] - shared_data_offsets[5], 2); + ASSERT_EQ((*shared_data_paths)[4], "a.d"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 4), Field("Str222")); + ASSERT_EQ((*shared_data_paths)[5], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 5), Field("Str333")); + + auto src_col3 = type->createColumn(); + auto & src_col_object3 = assert_cast(*src_col3); + src_col_object3.insert(Object{{"a.h", Field("Str6")}, {"h.h", Field("Str7")}}); + src_col_object3.insert(Object{{"a.h", Field("Str6")}, {"h.h", Field("Str7")}, {"a.f", Field("Str8")}, {"a.g", Field("Str9")}, {"a.i", Field("Str11")}}); + src_col_object3.insert(Object{{"a.a", Field("Str10")}}); + src_col_object3.insert(Object{{"a.h", Field("Str6")}, {"a.c", Field(45u)}, {"h.h", Field("Str7")}, {"a.i", Field("Str11")}}); + col_object.insertRangeFrom(src_col_object3, 1, 3); + ASSERT_EQ((*typed_paths.at("a.b"))[7], Field(Array{})); + ASSERT_EQ((*typed_paths.at("a.b"))[8], Field(Array{})); + ASSERT_EQ((*typed_paths.at("a.b"))[9], Field(Array{})); + ASSERT_EQ((*typed_paths.at("b.d"))[7], Field(0u)); + ASSERT_EQ((*typed_paths.at("b.d"))[8], Field(0u)); + ASSERT_EQ((*typed_paths.at("b.d"))[9], Field(0u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[7], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.a"))[8], Field("Str10")); + ASSERT_EQ((*dynamic_paths.at("a.a"))[9], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[7], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[8], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[9], Field(45u)); + ASSERT_EQ(shared_data_offsets[7] - shared_data_offsets[6], 5); + ASSERT_EQ((*shared_data_paths)[6], "a.f"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 6), Field("Str8")); + ASSERT_EQ((*shared_data_paths)[7], "a.g"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 7), Field("Str9")); + ASSERT_EQ((*shared_data_paths)[8], "a.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 8), Field("Str6")); + ASSERT_EQ((*shared_data_paths)[9], "a.i"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 9), Field("Str11")); + ASSERT_EQ((*shared_data_paths)[10], "h.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 10), Field("Str7")); + ASSERT_EQ(shared_data_offsets[8] - shared_data_offsets[7], 0); + ASSERT_EQ(shared_data_offsets[9] - shared_data_offsets[8], 3); + ASSERT_EQ((*shared_data_paths)[11], "a.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 11), Field("Str6")); + ASSERT_EQ((*shared_data_paths)[12], "a.i"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 12), Field("Str11")); +} + +TEST(ColumnObject, SerializeDeserializerFromArena) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"b.d", Field(42u)}, {"a.b", Array{"Str1", "Str2"}}, {"a.a", Tuple{"Str3", 441u}}, {"a.c", Field("Str4")}, {"a.d", Array{Field(45), Field(46)}}, {"a.e", Field(47)}}); + col_object.insert(Object{{"b.a", Field(48)}, {"b.b", Array{Field(49), Field(50)}}}); + col_object.insert(Object{{"b.d", Field(442u)}, {"a.b", Array{"Str11", "Str22"}}, {"a.a", Tuple{"Str33", 444u}}, {"a.c", Field("Str44")}, {"a.d", Array{Field(445), Field(446)}}, {"a.e", Field(447)}}); + + Arena arena; + const char * pos = nullptr; + auto ref1 = col_object.serializeValueIntoArena(0, arena, pos); + col_object.serializeValueIntoArena(1, arena, pos); + col_object.serializeValueIntoArena(2, arena, pos); + + auto col2 = type->createColumn(); + auto & col_object2 = assert_cast(*col); + pos = col_object2.deserializeAndInsertFromArena(ref1.data); + pos = col_object2.deserializeAndInsertFromArena(pos); + col_object2.deserializeAndInsertFromArena(pos); + + ASSERT_EQ(col_object2[0], (Object{{"b.d", Field(42u)}, {"a.b", Array{"Str1", "Str2"}}, {"a.a", Tuple{"Str3", 441u}}, {"a.c", Field("Str4")}, {"a.d", Array{Field(45), Field(46)}}, {"a.e", Field(47)}})); + ASSERT_EQ(col_object2[1], (Object{{"b.d", Field{0u}}, {"a.b", Array{}}, {"b.a", Field(48)}, {"b.b", Array{Field(49), Field(50)}}})); + ASSERT_EQ(col_object2[2], (Object{{"b.d", Field(442u)}, {"a.b", Array{"Str11", "Str22"}}, {"a.a", Tuple{"Str33", 444u}}, {"a.c", Field("Str44")}, {"a.d", Array{Field(445), Field(446)}}, {"a.e", Field(447)}})); +} + +TEST(ColumnObject, SkipSerializedInArena) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"b.d", Field(42u)}, {"a.b", Array{"Str1", "Str2"}}, {"a.a", Tuple{"Str3", 441u}}, {"a.c", Field("Str4")}, {"a.d", Array{Field(45), Field(46)}}, {"a.e", Field(47)}}); + col_object.insert(Object{{"b.a", Field(48)}, {"b.b", Array{Field(49), Field(50)}}}); + col_object.insert(Object{{"b.d", Field(442u)}, {"a.b", Array{"Str11", "Str22"}}, {"a.a", Tuple{"Str33", 444u}}, {"a.c", Field("Str44")}, {"a.d", Array{Field(445), Field(446)}}, {"a.e", Field(447)}}); + + Arena arena; + const char * pos = nullptr; + auto ref1 = col_object.serializeValueIntoArena(0, arena, pos); + col_object.serializeValueIntoArena(1, arena, pos); + auto ref3 = col_object.serializeValueIntoArena(2, arena, pos); + + const char * end = ref3.data + ref3.size; + auto col2 = type->createColumn(); + pos = col2->skipSerializedInArena(ref1.data); + pos = col2->skipSerializedInArena(pos); + pos = col2->skipSerializedInArena(pos); + ASSERT_EQ(pos, end); +} diff --git a/src/Core/Settings.h b/src/Core/Settings.h index e835aa3a4f2..2e808159926 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -871,6 +871,7 @@ class IColumn; M(Bool, allow_get_client_http_header, false, "Allow to use the function `getClientHTTPHeader` which lets to obtain a value of an the current HTTP request's header. It is not enabled by default for security reasons, because some headers, such as `Cookie`, could contain sensitive info. Note that the `X-ClickHouse-*` and `Authentication` headers are always restricted and cannot be obtained with this function.", 0) \ M(Bool, cast_string_to_dynamic_use_inference, false, "Use types inference during String to Dynamic conversion", 0) \ M(Bool, enable_blob_storage_log, true, "Write information about blob storage operations to system.blob_storage_log table", 0) \ + M(Bool, use_json_alias_for_old_object_type, false, "When enabled, JSON type alias will create old experimental Object type instead of a new JSON type", 0) \ M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0) \ M(Bool, create_index_ignore_unique, false, "Ignore UNIQUE keyword in CREATE UNIQUE INDEX. Made for SQL compatibility tests.", 0) \ M(Bool, print_pretty_type_names, true, "Print pretty type names in DESCRIBE query and toTypeName() function", 0) \ @@ -900,6 +901,7 @@ class IColumn; M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \ M(Bool, allow_experimental_variant_type, false, "Allow Variant data type", 0) \ M(Bool, allow_experimental_dynamic_type, false, "Allow Dynamic data type", 0) \ + M(Bool, allow_experimental_json_type, false, "Allow JSON data type", 0) \ M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \ M(Bool, allow_experimental_usearch_index, false, "Allows to use USearch index. Disabled by default because this feature is experimental", 0) \ M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \ @@ -1105,7 +1107,7 @@ class IColumn; M(Bool, schema_inference_make_columns_nullable, true, "If set to true, all inferred types will be Nullable in schema inference for formats without information about nullability.", 0) \ M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \ M(Bool, input_format_json_read_bools_as_strings, true, "Allow to parse bools as strings in JSON input formats", 0) \ - M(Bool, input_format_json_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference", 0) \ + M(Bool, input_format_json_try_infer_numbers_from_strings, true, "Try to infer numbers from string fields while schema inference", 0) \ M(Bool, input_format_json_validate_types_from_metadata, true, "For JSON/JSONCompact/JSONColumnsWithMetadata input formats this controls whether format parser should check if data types from input metadata match data types of the corresponding columns from the table", 0) \ M(Bool, input_format_json_read_numbers_as_strings, true, "Allow to parse numbers as strings in JSON input formats", 0) \ M(Bool, input_format_json_read_objects_as_strings, true, "Allow to parse JSON objects as strings in JSON input formats", 0) \ @@ -1119,6 +1121,7 @@ class IColumn; M(Bool, input_format_json_throw_on_bad_escape_sequence, true, "Throw an exception if JSON string contains bad escape sequence in JSON input formats. If disabled, bad escape sequences will remain as is in the data", 0) \ M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \ M(Bool, input_format_json_ignore_key_case, false, "Ignore json key case while read json field from string", 0) \ + M(Bool, type_json_skip_duplicated_paths, false, "When enabled, during parsing JSON object into JSON type duplicated paths will be ignored and only the first one will be inserted instead of an exception", 0) \ M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index df681425355..7abec285b37 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -75,7 +75,10 @@ static std::initializer_list #include #include +#include namespace DB { @@ -65,7 +66,11 @@ static DataTypePtr create(const ASTPtr & arguments) if (!argument || argument->name != "equals") throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Dynamic data type argument should be in a form 'max_types=N'"); - auto identifier_name = argument->arguments->children[0]->as()->name(); + const auto * identifier = argument->arguments->children[0]->as(); + if (!identifier) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected Dynamic type argument: {}. Expected expression 'max_types=N'", identifier->formatForErrorMessage()); + + auto identifier_name = identifier->name(); if (identifier_name != "max_types") throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected identifier: {}. Dynamic data type argument should be in a form 'max_types=N'", identifier_name); @@ -82,9 +87,51 @@ void registerDataTypeDynamic(DataTypeFactory & factory) factory.registerDataType("Dynamic", create); } +namespace +{ + +std::pair splitSubcolumnName(std::string_view subcolumn_name) +{ + bool inside_quotes = false; + const char * pos = subcolumn_name.data(); + const char * end = subcolumn_name.data() + subcolumn_name.size(); + while (true) + { + pos = find_first_symbols<'`', '.', '\\'>(pos, end); + if (pos == end) + break; + + if (*pos == '`') + { + inside_quotes = !inside_quotes; + ++pos; + } + else if (*pos == '\\') + { + ++pos; + } + else if (*pos == '.') + { + if (inside_quotes) + ++pos; + else + break; + } + } + + if (pos == end) + return {subcolumn_name, {}}; + + return {std::string_view(subcolumn_name.data(), pos), std::string_view(pos + 1, end)}; +} + +} + std::unique_ptr DataTypeDynamic::getDynamicSubcolumnData(std::string_view subcolumn_name, const DB::IDataType::SubstreamData & data, bool throw_if_null) const { - auto [subcolumn_type_name, subcolumn_nested_name] = Nested::splitName(subcolumn_name); + auto [subcolumn_type_name, subcolumn_nested_name] = splitSubcolumnName(subcolumn_name); +// std::cerr << "Dynamic subcolumn: " << subcolumn_name << ", " << subcolumn_nested_name << "\n"; + /// Check if requested subcolumn is a valid data type. auto subcolumn_type = DataTypeFactory::instance().tryGet(String(subcolumn_type_name)); if (!subcolumn_type) diff --git a/src/DataTypes/DataTypeDynamic.h b/src/DataTypes/DataTypeDynamic.h index d5e4c5261ce..2e7a23d314d 100644 --- a/src/DataTypes/DataTypeDynamic.h +++ b/src/DataTypes/DataTypeDynamic.h @@ -12,6 +12,9 @@ class DataTypeDynamic final : public IDataType public: static constexpr bool is_parametric = true; + /// Don't change this constant, it can break backward compatibility. + static constexpr size_t DEFAULT_MAX_DYNAMIC_TYPES = 32; + explicit DataTypeDynamic(size_t max_dynamic_types_ = DEFAULT_MAX_DYNAMIC_TYPES); TypeIndex getTypeId() const override { return TypeIndex::Dynamic; } @@ -43,8 +46,6 @@ public: size_t getMaxDynamicTypes() const { return max_dynamic_types; } private: - static constexpr size_t DEFAULT_MAX_DYNAMIC_TYPES = 32; - SerializationPtr doGetDefaultSerialization() const override; String doGetName() const override; diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index 8c8f9999ada..6488e2d022b 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -290,9 +290,10 @@ DataTypeFactory::DataTypeFactory() registerDataTypeDomainSimpleAggregateFunction(*this); registerDataTypeDomainGeo(*this); registerDataTypeMap(*this); - registerDataTypeObject(*this); + registerDataTypeObjectDeprecated(*this); registerDataTypeVariant(*this); registerDataTypeDynamic(*this); + registerDataTypeJSON(*this); } DataTypeFactory & DataTypeFactory::instance() diff --git a/src/DataTypes/DataTypeFactory.h b/src/DataTypes/DataTypeFactory.h index 86e0203358d..ec0586f2f3c 100644 --- a/src/DataTypes/DataTypeFactory.h +++ b/src/DataTypes/DataTypeFactory.h @@ -98,8 +98,9 @@ void registerDataTypeLowCardinality(DataTypeFactory & factory); void registerDataTypeDomainBool(DataTypeFactory & factory); void registerDataTypeDomainSimpleAggregateFunction(DataTypeFactory & factory); void registerDataTypeDomainGeo(DataTypeFactory & factory); -void registerDataTypeObject(DataTypeFactory & factory); +void registerDataTypeObjectDeprecated(DataTypeFactory & factory); void registerDataTypeVariant(DataTypeFactory & factory); void registerDataTypeDynamic(DataTypeFactory & factory); +void registerDataTypeJSON(DataTypeFactory & factory); } diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 720436d0e0d..338883c3ae4 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -1,11 +1,30 @@ -#include #include -#include +#include +#include +#include +#include +#include +#include #include #include #include +#include +#include +#include +#include +#include + +#if USE_SIMDJSON +#include +#endif +#if USE_RAPIDJSON +#include +#endif +#include + #include +#include namespace DB { @@ -14,69 +33,465 @@ namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int UNEXPECTED_AST_STRUCTURE; + extern const int BAD_ARGUMENTS; } -DataTypeObject::DataTypeObject(const String & schema_format_, bool is_nullable_) - : schema_format(Poco::toLower(schema_format_)) - , is_nullable(is_nullable_) +DataTypeObject::DataTypeObject( + const SchemaFormat & schema_format_, + const std::unordered_map & typed_paths_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_prefixes_to_skip_, + const std::vector & path_regexps_to_skip_, + size_t max_dynamic_paths_, + size_t max_dynamic_types_) + : schema_format(schema_format_) + , typed_paths(typed_paths_) + , paths_to_skip(paths_to_skip_) + , path_prefixes_to_skip(path_prefixes_to_skip_) + , path_regexps_to_skip(path_regexps_to_skip_) + , max_dynamic_paths(max_dynamic_paths_) + , max_dynamic_types(max_dynamic_types_) +{ + for (const auto & path : paths_to_skip) + { + if (typed_paths.contains(path)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified both with the data type ('{}') and in the SKIP section", path, typed_paths[path]->getName()); + } +} + +DataTypeObject::DataTypeObject(const DB::DataTypeObject::SchemaFormat & schema_format_, size_t max_dynamic_paths_, size_t max_dynamic_types_) + : schema_format(schema_format_) + , max_dynamic_paths(max_dynamic_paths_) + , max_dynamic_types(max_dynamic_types_) { } bool DataTypeObject::equals(const IDataType & rhs) const { if (const auto * object = typeid_cast(&rhs)) - return schema_format == object->schema_format && is_nullable == object->is_nullable; + { + if (typed_paths.size() != object->typed_paths.size()) + return false; + + for (const auto & [path, type] : typed_paths) + { + auto it = object->typed_paths.find(path); + if (it == object->typed_paths.end()) + return false; + if (!type->equals(*it->second)) + return false; + } + + return schema_format == object->schema_format && paths_to_skip == object->paths_to_skip && path_prefixes_to_skip == object->path_prefixes_to_skip && path_regexps_to_skip == object->path_regexps_to_skip + && max_dynamic_types == object->max_dynamic_types && max_dynamic_paths == object->max_dynamic_paths; + } + return false; } SerializationPtr DataTypeObject::doGetDefaultSerialization() const { - return getObjectSerialization(schema_format); + std::unordered_map typed_path_serializations; + typed_path_serializations.reserve(typed_paths.size()); + for (const auto & [path, type] : typed_paths) + typed_path_serializations[path] = type->getDefaultSerialization(); + + switch (schema_format) + { + case SchemaFormat::JSON: +#ifdef USE_SIMDJSON + return std::make_shared>( + std::move(typed_path_serializations), + paths_to_skip, + path_prefixes_to_skip, + path_regexps_to_skip, + buildJSONExtractTree(getPtr(), "JSON serialization")); +#elif USE_RAPIDJSON + return std::make_shared>(std::move(typed_path_serializations), paths_to_skip, buildJSONExtractTree(getPtr(), "JSON serialization")); +#else + return std::make_shared>(std::move(typed_path_serializations), paths_to_skip, buildJSONExtractTree(getPtr(), "JSON serialization")); +#endif + } } String DataTypeObject::doGetName() const { WriteBufferFromOwnString out; - if (is_nullable) - out << "Object(Nullable(" << quote << schema_format << "))"; - else - out << "Object(" << quote << schema_format << ")"; + out << magic_enum::enum_name(schema_format); + bool first = true; + auto write_separator = [&]() + { + if (!first) + { + out << ", "; + } + else + { + out << "("; + first = false; + } + }; + + if (max_dynamic_types != DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES) + { + write_separator(); + out << "max_dynamic_types=" << max_dynamic_types; + } + + if (max_dynamic_paths != DEFAULT_MAX_SEPARATELY_STORED_PATHS) + { + write_separator(); + out << "max_dynamic_paths=" << max_dynamic_paths; + } + + std::vector sorted_typed_paths; + sorted_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, _] : typed_paths) + sorted_typed_paths.push_back(path); + std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end()); + for (const auto & path : sorted_typed_paths) + { + write_separator(); + out << path << " " << typed_paths.at(path)->getName(); + } + + std::vector sorted_skip_paths; + sorted_skip_paths.reserve(paths_to_skip.size()); + for (const auto & skip_path : paths_to_skip) + sorted_skip_paths.push_back(skip_path); + std::sort(sorted_skip_paths.begin(), sorted_skip_paths.end()); + for (const auto & skip_path : sorted_skip_paths) + { + write_separator(); + out << "SKIP " << skip_path; + } + + for (const auto & skip_prefix : path_prefixes_to_skip) + { + write_separator(); + out << "SKIP PREFIX " << skip_prefix; + } + + for (const auto & skip_regexp : path_regexps_to_skip) + { + write_separator(); + out << "SKIP REGEXP " << skip_regexp; + } + + if (!first) + out << ")"; + return out.str(); } -static DataTypePtr create(const ASTPtr & arguments) +MutableColumnPtr DataTypeObject::createColumn() const { - if (!arguments || arguments->children.size() != 1) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Object data type family must have one argument - name of schema format"); + std::unordered_map typed_path_columns; + typed_path_columns.reserve(typed_paths.size()); + for (const auto & [path, type] : typed_paths) + typed_path_columns[path] = type->createColumn(); - ASTPtr schema_argument = arguments->children[0]; - bool is_nullable = false; + return ColumnObject::create(std::move(typed_path_columns), max_dynamic_paths, max_dynamic_types); +} - if (const auto * func = schema_argument->as()) +namespace +{ + +/// JSON subcolumn name with Dynamic type subcolumn looks like this: +/// "json.some.path.:`Type_name`.some.subcolumn". +/// We back quoted type name during identifier parsing so we can distinguish type subcolumn and path element ":TypeName". +std::pair splitPathAndDynamicTypeSubcolumn(std::string_view subcolumn_name) +{ + /// Try to find dynamic type subcolumn in a form .:`Type`. + auto pos = subcolumn_name.find(".:`"); + if (pos == std::string_view::npos) + return {String(subcolumn_name), ""}; + + ReadBufferFromMemory buf(subcolumn_name.substr(pos + 2)); + String dynamic_subcolumn; + /// Try to read back quoted type name. + if (!tryReadBackQuotedString(dynamic_subcolumn, buf)) + return {String(subcolumn_name), ""}; + + /// If there is more data in the buffer - it's subcolumn of a type, append it to the type name. + if (!buf.eof()) + dynamic_subcolumn += String(buf.position(), buf.available()); + + return {String(subcolumn_name.substr(0, pos)), dynamic_subcolumn}; +} + +/// Sub-object subcolumn in JSON path always looks like "^`some`.path.path". +/// We back quote first path element after `^` so we can distinguish sub-object subcolumn and path element "^path". +std::optional tryGetSubObjectSubcolumn(std::string_view subcolumn_name) +{ + if (!subcolumn_name.starts_with("^`")) + return std::nullopt; + + ReadBufferFromMemory buf(subcolumn_name.data() + 1); + String path; + /// Try to read back-quoted first path element. + if (!tryReadBackQuotedString(path, buf)) + return std::nullopt; + + /// Add remaining path elements if any. + return path + String(buf.position(), buf.available()); +} + +/// Return sub-path by specified prefix. +/// For example, for prefix a.b: +/// a.b.c.d -> c.d, a.b.c -> c +String getSubPath(const String & path, const String & prefix) +{ + return path.substr(prefix.size() + 1); +} + +std::string_view getSubPath(const std::string_view & path, const String & prefix) +{ + return path.substr(prefix.size() + 1); +} + +} + +std::unique_ptr DataTypeObject::getDynamicSubcolumnData(std::string_view subcolumn_name, const SubstreamData & data, bool throw_if_null) const +{ + /// Check if it's sub-object subcolumn. + /// In this case we should return JSON column with all paths that are inside specified object prefix. + /// For example, if we have {"a" : {"b" : {"c" : {"d" : 10, "e" : "Hello"}, "f" : [1, 2, 3]}}} and subcolumn ^a.b + /// we should return JSON column with data {"c" : {"d" : 10, "e" : Hello}, "f" : [1, 2, 3]} + if (auto sub_object_subcolumn = tryGetSubObjectSubcolumn(subcolumn_name)) { - if (func->name != "Nullable" || func->arguments->children.size() != 1) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, - "Expected 'Nullable()' as parameter for type Object (function: {})", func->name); + const String & prefix = *sub_object_subcolumn; + /// Collect new typed paths. + std::unordered_map typed_sub_paths; + /// Collect serializations for typed paths. They will be needed for sub-object subcolumn deserialization. + std::unordered_map typed_paths_serializations; + for (const auto & [path, type] : typed_paths) + { + if (path.starts_with(prefix) && path.size() != prefix.size()) + { + typed_sub_paths[getSubPath(path, prefix)] = type; + typed_paths_serializations[path] = type->getDefaultSerialization(); + } + } - schema_argument = func->arguments->children[0]; - is_nullable = true; + std::unique_ptr res = std::make_unique(std::make_shared(prefix, typed_paths_serializations)); + /// Keep all current constrains like limits and skip paths/prefixes/regexps. + res->type = std::make_shared(schema_format, typed_sub_paths, paths_to_skip, path_prefixes_to_skip, path_prefixes_to_skip, max_dynamic_paths, max_dynamic_types); + /// If column was provided, we should create a column for the requested subcolumn. + if (data.column) + { + const auto & object_column = assert_cast(*data.column); + + auto result_column = res->type->createColumn(); + auto & result_object_column = assert_cast(*result_column); + + /// Iterate over all typed/dynamic/shared data paths and collect all paths with specified prefix. + auto & result_typed_columns = result_object_column.getTypedPaths(); + for (const auto & [path, column] : object_column.getTypedPaths()) + { + if (path.starts_with(prefix) && path.size() != prefix.size()) + result_typed_columns[getSubPath(path, prefix)] = column; + } + + auto & result_dynamic_columns = result_object_column.getDynamicPaths(); + for (const auto & [path, column] : object_column.getDynamicPaths()) + { + if (path.starts_with(prefix) && path.size() != prefix.size()) + result_dynamic_columns[getSubPath(path, prefix)] = column; + } + + const auto & shared_data_offsets = object_column.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = object_column.getSharedDataPathsAndValues(); + auto & result_shared_data_offsets = result_object_column.getSharedDataOffsets(); + result_shared_data_offsets.reserve(shared_data_offsets.size()); + auto [result_shared_data_paths, result_shared_data_values] = result_object_column.getSharedDataPathsAndValues(); + for (size_t i = 0; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[ssize_t(i) - 1]; + size_t end = shared_data_offsets[ssize_t(i)]; + size_t lower_bound_index = ColumnObject::findPathLowerBoundInSharedData(prefix, *shared_data_paths, start, end); + for (; lower_bound_index != end; ++lower_bound_index) + { + auto path = shared_data_paths->getDataAt(lower_bound_index).toView(); + if (!path.starts_with(prefix)) + break; + + /// Don't include path that is equal to the prefix. + if (path.size() != prefix.size()) + { + auto sub_path = getSubPath(path, prefix); + result_shared_data_paths->insertData(sub_path.data(), sub_path.size()); + result_shared_data_values->insertFrom(*shared_data_values, lower_bound_index); + } + } + result_shared_data_offsets.push_back(result_shared_data_paths->size()); + } + + res->column = std::move(result_column); + } + + return res; } - const auto * literal = schema_argument->as(); - if (!literal || literal->value.getType() != Field::Types::String) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, - "Object data type family must have a const string as its schema name parameter"); + /// Split requested subcolumn to the JSON path and Dynamic type subcolumn. + auto [path, path_subcolumn] = splitPathAndDynamicTypeSubcolumn(subcolumn_name); + std::unique_ptr res; + if (auto it = typed_paths.find(path); it != typed_paths.end()) + { + res = std::make_unique(it->second->getDefaultSerialization()); + res->type = it->second; + } + else + { + res = std::make_unique(std::make_shared()); + res->type = std::make_shared(); + } - return std::make_shared(literal->value.get(), is_nullable); + /// If column was provided, we should create a column for requested subcolumn. + if (data.column) + { + const auto & object_column = assert_cast(*data.column); + /// Try to find requested path in typed paths. + if (auto typed_it = object_column.getTypedPaths().find(path); typed_it != object_column.getTypedPaths().end()) + { + res->column = typed_it->second; + } + /// Try to find requested path in dynamic paths. + else if (auto dynamic_it = object_column.getDynamicPaths().find(path); dynamic_it != object_column.getDynamicPaths().end()) + { + res->column = dynamic_it->second; + } + /// Extract values of requested path from shared data. + else + { + auto dynamic_column = ColumnDynamic::create(max_dynamic_types); + dynamic_column->reserve(object_column.size()); + ColumnObject::fillPathColumnFromSharedData(*dynamic_column, path, object_column.getSharedDataPtr(), 0, object_column.size()); + res->column = std::move(dynamic_column); + } + } + + /// Get subcolumn for Dynamic type if needed. + if (!path_subcolumn.empty()) + { + /// It is possible to have nested JSON object inside Dynamic. For example when we have an array of JSON objects. + /// During parsing in case of creating nested JSON objects, we reduce max_dynamic_paths/max_dynamic_types by NESTED_OBJECT_REDUCE_FACTOR factor. + /// So the type name will actually be JSON(max_dynamic_paths=N, max_dynamic_types=M). But we want the user to be able to query it + /// using json.array.:`Array(JSON)`.some.path without specifying max_dynamic_paths/max_dynamic_types. + /// To support it, we do a trick - we replace JSON name in subcolumn to JSON(max_dynamic_paths=N, max_dynamic_types=M), because we know + /// the exact values of max_dynamic_paths/max_dynamic_types for it. + auto pos = path_subcolumn.find("JSON"); + /// We want to replace JSON keyword only in the first subcolumn part before the first dot. + auto first_dot_pos = path_subcolumn.find('.'); + if (pos != path_subcolumn.npos && (first_dot_pos == path_subcolumn.npos || pos < first_dot_pos)) + path_subcolumn.replace(pos, 4, fmt::format("JSON(max_dynamic_paths={}, max_dynamic_types={})", max_dynamic_paths / NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, std::max(max_dynamic_types / NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))); + + res = res->type->getSubcolumnData(path_subcolumn, *res, throw_if_null); + if (!res) + return nullptr; + } + + if (typed_paths.contains(path)) + res->serialization = std::make_shared(res->serialization, path); + else + res->serialization = std::make_shared(res->serialization, path, path_subcolumn, max_dynamic_types); + + return res; } -void registerDataTypeObject(DataTypeFactory & factory) +static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject::SchemaFormat & schema_format) { - factory.registerDataType("Object", create); - factory.registerSimpleDataType("JSON", - [] { return std::make_shared("JSON", false); }, - DataTypeFactory::CaseInsensitive); + if (!arguments || arguments->children.empty()) + return std::make_shared(schema_format); + + std::unordered_map typed_paths; + std::unordered_set paths_to_skip; + /// Collect prefixes in unordered_set to avoid duplicate prefixes + std::unordered_set path_prefixes_to_skip_set; + std::vector path_regexps_to_skip; + + size_t max_dynamic_types = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES; + size_t max_dynamic_paths = DataTypeObject::DEFAULT_MAX_SEPARATELY_STORED_PATHS; + + for (const auto & argument : arguments->children) + { + const auto * object_type_argument = argument->as(); + if (object_type_argument->parameter) + { + const auto * function = object_type_argument->parameter->as(); + + if (!function || function->name != "equals") + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected parameter in {} type arguments: {}", magic_enum::enum_name(schema_format), function->formatForErrorMessage()); + + const auto * identifier = function->arguments->children[0]->as(); + if (!identifier) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected {} type argument: {}. Expected expression 'max_dynamic_types=N' or 'max_dynamic_paths=N'", magic_enum::enum_name(schema_format), function->formatForErrorMessage()); + + auto identifier_name = identifier->name(); + if (identifier_name != "max_dynamic_types" && identifier_name != "max_dynamic_paths") + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected parameter in {} type arguments: {}. Expected 'max_dynamic_types' or `max_dynamic_paths`", magic_enum::enum_name(schema_format), identifier_name); + + auto * literal = function->arguments->children[1]->as(); + /// Is 1000000 a good maximum for max paths? + size_t min_value = identifier_name == "max_dynamic_types" ? 1 : 0; + size_t max_value = identifier_name == "max_dynamic_types" ? 255 : 1000000; + if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get() < min_value || literal->value.get() > max_value) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'{}' parameter for {} type should be a positive integer between {} and {}. Got {}", identifier_name, magic_enum::enum_name(schema_format), min_value, max_value, function->arguments->children[1]->formatForErrorMessage()); + + if (identifier_name == "max_dynamic_types") + max_dynamic_types = literal->value.get(); + else + max_dynamic_paths = literal->value.get(); + } + else if (object_type_argument->path_with_type) + { + const auto * path_with_type = object_type_argument->path_with_type->as(); + auto data_type = DataTypeFactory::instance().get(path_with_type->type); + if (typed_paths.contains(path_with_type->name)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Found duplicated path with type: {}", path_with_type->name); + typed_paths.emplace(path_with_type->name, data_type); + } + else if (object_type_argument->skip_path) + { + const auto * identifier = object_type_argument->skip_path->as(); + if (!identifier) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST in SKIP section of {} type arguments: {}. Expected identifier with path name", magic_enum::enum_name(schema_format), object_type_argument->skip_path->formatForErrorMessage()); + + paths_to_skip.insert(identifier->name()); + } + else if (object_type_argument->skip_path_prefix) + { + const auto * identifier = object_type_argument->skip_path_prefix->as(); + if (!identifier) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST in SKIP section of {} type arguments: {}. Expected identifier with path name", magic_enum::enum_name(schema_format), object_type_argument->skip_path->formatForErrorMessage()); + + path_prefixes_to_skip_set.insert(identifier->name()); + } + else if (object_type_argument->skip_path_regexp) + { + const auto * literal = object_type_argument->skip_path_regexp->as(); + if (!literal || literal->value.getType() != Field::Types::String) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST in SKIP section of {} type arguments: {}. Expected identifier with path name", magic_enum::enum_name(schema_format), object_type_argument->skip_path->formatForErrorMessage()); + + path_regexps_to_skip.push_back(literal->value.get()); + } + } + + std::vector path_prefixes_to_skip(path_prefixes_to_skip_set.begin(), path_prefixes_to_skip_set.end()); + std::sort(path_prefixes_to_skip.begin(), path_prefixes_to_skip.end()); + std::sort(path_regexps_to_skip.begin(), path_regexps_to_skip.end()); + return std::make_shared(schema_format, typed_paths, paths_to_skip, path_prefixes_to_skip, path_regexps_to_skip, max_dynamic_paths, max_dynamic_types); +} + +static DataTypePtr createJSON(const ASTPtr & arguments) +{ + return createObject(arguments, DataTypeObject::SchemaFormat::JSON); +} + +void registerDataTypeJSON(DataTypeFactory & factory) +{ + if (!Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type) + factory.registerDataType("JSON", createJSON); } } diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index c610a1a8ba4..1a66222cc98 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -1,8 +1,10 @@ #pragma once #include +#include #include -#include +#include +#include namespace DB @@ -15,34 +17,70 @@ namespace ErrorCodes class DataTypeObject : public IDataType { -private: - String schema_format; - bool is_nullable; - public: - DataTypeObject(const String & schema_format_, bool is_nullable_); + enum class SchemaFormat + { + JSON = 0, + }; + + /// Don't change these constants, it can break backward compatibility. + static constexpr size_t DEFAULT_MAX_SEPARATELY_STORED_PATHS = 1000; + static constexpr size_t NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR = 4; + static constexpr size_t NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR = 2; + + DataTypeObject( + const SchemaFormat & schema_format_, + const std::unordered_map & typed_paths_ = {}, + const std::unordered_set & paths_to_skip_ = {}, + const std::vector & path_prefixes_to_skip_ = {}, + const std::vector & path_regexps_to_skip_ = {}, + size_t max_dynamic_paths_ = DEFAULT_MAX_SEPARATELY_STORED_PATHS, + size_t max_dynamic_types_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES); + + DataTypeObject(const SchemaFormat & schema_format_, size_t max_dynamic_paths_, size_t max_dynamic_types_); const char * getFamilyName() const override { return "Object"; } String doGetName() const override; TypeIndex getTypeId() const override { return TypeIndex::Object; } - MutableColumnPtr createColumn() const override { return ColumnObject::create(is_nullable); } + MutableColumnPtr createColumn() const override; Field getDefault() const override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getDefault() is not implemented for data type {}", getName()); } - bool haveSubtypes() const override { return false; } - bool equals(const IDataType & rhs) const override; bool isParametric() const override { return true; } - bool hasDynamicSubcolumnsDeprecated() const override { return true; } + bool canBeInsideNullable() const override { return false; } + bool supportsSparseSerialization() const override { return false; } + bool canBeInsideSparseColumns() const override { return false; } + bool isComparable() const override { return false; } + bool haveSubtypes() const override { return false; } + + bool equals(const IDataType & rhs) const override; + + bool hasDynamicSubcolumnsData() const override { return true; } + std::unique_ptr getDynamicSubcolumnData(std::string_view subcolumn_name, const SubstreamData & data, bool throw_if_null) const override; SerializationPtr doGetDefaultSerialization() const override; - bool hasNullableSubcolumns() const { return is_nullable; } + const SchemaFormat & getSchemaFormat() const { return schema_format; } + const std::unordered_map & getTypedPaths() const { return typed_paths; } + const std::unordered_set & getPathsToSkip() const { return paths_to_skip; } + const std::vector & getPathPrefixesToSkip() const { return path_prefixes_to_skip; } + const std::vector & getPathRegexpsToSkip() const { return path_regexps_to_skip; } - const String & getSchemaFormat() const { return schema_format; } + size_t getMaxDynamicTypes() const { return max_dynamic_types; } + size_t getMaxDynamicPaths() const { return max_dynamic_paths; } + +private: + SchemaFormat schema_format; + std::unordered_map typed_paths; + std::unordered_set paths_to_skip; + std::vector path_prefixes_to_skip; + std::vector path_regexps_to_skip; + size_t max_dynamic_paths; + size_t max_dynamic_types; }; } diff --git a/src/DataTypes/DataTypeObjectDeprecated.cpp b/src/DataTypes/DataTypeObjectDeprecated.cpp new file mode 100644 index 00000000000..8b2b5e72f6c --- /dev/null +++ b/src/DataTypes/DataTypeObjectDeprecated.cpp @@ -0,0 +1,85 @@ +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int UNEXPECTED_AST_STRUCTURE; +} + +DataTypeObjectDeprecated::DataTypeObjectDeprecated(const String & schema_format_, bool is_nullable_) + : schema_format(Poco::toLower(schema_format_)) + , is_nullable(is_nullable_) +{ +} + +bool DataTypeObjectDeprecated::equals(const IDataType & rhs) const +{ + if (const auto * object = typeid_cast(&rhs)) + return schema_format == object->schema_format && is_nullable == object->is_nullable; + return false; +} + +SerializationPtr DataTypeObjectDeprecated::doGetDefaultSerialization() const +{ + return getObjectSerialization(schema_format); +} + +String DataTypeObjectDeprecated::doGetName() const +{ + WriteBufferFromOwnString out; + if (is_nullable) + out << "Object(Nullable(" << quote << schema_format << "))"; + else + out << "Object(" << quote << schema_format << ")"; + return out.str(); +} + +static DataTypePtr create(const ASTPtr & arguments) +{ + if (!arguments || arguments->children.size() != 1) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Object data type family must have one argument - name of schema format"); + + ASTPtr schema_argument = arguments->children[0]; + bool is_nullable = false; + + if (const auto * func = schema_argument->as()) + { + if (func->name != "Nullable" || func->arguments->children.size() != 1) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, + "Expected 'Nullable()' as parameter for type Object (function: {})", func->name); + + schema_argument = func->arguments->children[0]; + is_nullable = true; + } + + const auto * literal = schema_argument->as(); + if (!literal || literal->value.getType() != Field::Types::String) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, + "Object data type family must have a const string as its schema name parameter"); + + return std::make_shared(literal->value.get(), is_nullable); +} + +void registerDataTypeObjectDeprecated(DataTypeFactory & factory) +{ + factory.registerDataType("Object", create); + if (Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type) + factory.registerSimpleDataType("JSON", + [] { return std::make_shared("JSON", false); }, + DataTypeFactory::CaseInsensitive); +} + +} diff --git a/src/DataTypes/DataTypeObjectDeprecated.h b/src/DataTypes/DataTypeObjectDeprecated.h new file mode 100644 index 00000000000..e1f81caaa4f --- /dev/null +++ b/src/DataTypes/DataTypeObjectDeprecated.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +class DataTypeObjectDeprecated : public IDataType +{ +private: + String schema_format; + bool is_nullable; + +public: + DataTypeObjectDeprecated(const String & schema_format_, bool is_nullable_); + + const char * getFamilyName() const override { return "Object"; } + String doGetName() const override; + TypeIndex getTypeId() const override { return TypeIndex::ObjectDeprecated; } + + MutableColumnPtr createColumn() const override { return ColumnObjectDeprecated::create(is_nullable); } + + Field getDefault() const override + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getDefault() is not implemented for data type {}", getName()); + } + + bool haveSubtypes() const override { return false; } + bool equals(const IDataType & rhs) const override; + bool isParametric() const override { return true; } + bool hasDynamicSubcolumnsDeprecated() const override { return true; } + + SerializationPtr doGetDefaultSerialization() const override; + + bool hasNullableSubcolumns() const { return is_nullable; } + + const String & getSchemaFormat() const { return schema_format; } +}; + +} diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index e96937d522d..75556ed4090 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -192,17 +192,12 @@ MutableColumnPtr DataTypeTuple::createColumn() const MutableColumnPtr DataTypeTuple::createColumn(const ISerialization & serialization) const { - /// If we read Tuple as Variant subcolumn, it may be wrapped to SerializationVariantElement. - /// Here we don't need it, so we drop this wrapper. - const auto * current_serialization = &serialization; - while (const auto * serialization_variant_element = typeid_cast(current_serialization)) - current_serialization = serialization_variant_element->getNested().get(); - - /// If we read subcolumn of nested Tuple, it may be wrapped to SerializationNamed + /// If we read subcolumn of nested Tuple or this Tuple is a subcolumn, it may be wrapped to SerializationWrapper /// several times to allow to reconstruct the substream path name. /// Here we don't need substream path name, so we drop first several wrapper serializations. - while (const auto * serialization_named = typeid_cast(current_serialization)) - current_serialization = serialization_named->getNested().get(); + const auto * current_serialization = &serialization; + while (const auto * serialization_wrapper = dynamic_cast(current_serialization)) + current_serialization = serialization_wrapper->getNested().get(); const auto * serialization_tuple = typeid_cast(current_serialization); if (!serialization_tuple) diff --git a/src/DataTypes/DataTypesBinaryEncoding.cpp b/src/DataTypes/DataTypesBinaryEncoding.cpp index bd994e313ba..102e5eaede0 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.cpp +++ b/src/DataTypes/DataTypesBinaryEncoding.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -94,6 +95,7 @@ enum class BinaryTypeIndex : uint8_t Bool = 0x2D, SimpleAggregateFunction = 0x2E, Nested = 0x2F, + JSON = 0x30, }; BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) @@ -202,7 +204,7 @@ BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) return BinaryTypeIndex::LowCardinality; case TypeIndex::Map: return BinaryTypeIndex::Map; - case TypeIndex::Object: + case TypeIndex::ObjectDeprecated: /// Object type will be deprecated and replaced by new implementation. No need to support it here. throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of type Object is not supported"); case TypeIndex::IPv4: @@ -216,6 +218,15 @@ BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) /// JSONPaths is used only during schema inference and cannot be used anywhere else. case TypeIndex::JSONPaths: throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of type JSONPaths is not supported"); + case TypeIndex::Object: + { + const auto & object_type = assert_cast(*type); + switch (object_type.getSchemaFormat()) + { + case DataTypeObject::SchemaFormat::JSON: + return BinaryTypeIndex::JSON; + } + } } } @@ -480,6 +491,32 @@ void encodeDataType(const DataTypePtr & type, WriteBuffer & buf) writeStringBinary(type_name, buf); break; } + case BinaryTypeIndex::JSON: + { + const auto & object_type = assert_cast(*type); + writeVarUInt(object_type.getMaxDynamicPaths(), buf); + writeBinary(UInt8(object_type.getMaxDynamicTypes()), buf); + const auto & typed_paths = object_type.getTypedPaths(); + writeVarUInt(typed_paths.size(), buf); + for (const auto & [path, path_type] : typed_paths) + { + writeStringBinary(path, buf); + encodeDataType(path_type, buf); + } + const auto & paths_to_skip = object_type.getPathsToSkip(); + writeVarUInt(paths_to_skip.size(), buf); + for (const auto & path : paths_to_skip) + writeStringBinary(path, buf); + const auto & path_prefixes_to_skip = object_type.getPathPrefixesToSkip(); + writeVarUInt(path_prefixes_to_skip.size(), buf); + for (const auto & prefix : path_prefixes_to_skip) + writeStringBinary(prefix, buf); + const auto & path_regexps_to_skip = object_type.getPathRegexpsToSkip(); + writeVarUInt(path_regexps_to_skip.size(), buf); + for (const auto & regexp : path_regexps_to_skip) + writeStringBinary(regexp, buf); + break; + } default: break; } @@ -691,6 +728,62 @@ DataTypePtr decodeDataType(ReadBuffer & buf) readStringBinary(type_name, buf); return DataTypeFactory::instance().get(type_name); } + case BinaryTypeIndex::JSON: + { + size_t max_dynamic_paths; + readVarUInt(max_dynamic_paths, buf); + UInt8 max_dynamic_types; + readBinary(max_dynamic_types, buf); + size_t typed_paths_size; + readVarUInt(typed_paths_size, buf); + std::unordered_map typed_paths; + for (size_t i = 0; i != typed_paths_size; ++i) + { + String path; + readStringBinary(path, buf); + typed_paths[path] = decodeDataType(buf); + } + size_t paths_to_skip_size; + readVarUInt(paths_to_skip_size, buf); + std::unordered_set paths_to_skip; + paths_to_skip.reserve(paths_to_skip_size); + for (size_t i = 0; i != paths_to_skip_size; ++i) + { + String path; + readStringBinary(path, buf); + paths_to_skip.insert(path); + } + + size_t path_prefixes_to_skip_size; + readVarUInt(path_prefixes_to_skip_size, buf); + std::vector path_prefixes_to_skip; + path_prefixes_to_skip.reserve(path_prefixes_to_skip_size); + for (size_t i = 0; i != path_prefixes_to_skip_size; ++i) + { + String prefix; + readStringBinary(prefix, buf); + path_prefixes_to_skip.push_back(prefix); + } + + size_t path_regexps_to_skip_size; + readVarUInt(path_regexps_to_skip_size, buf); + std::vector path_regexps_to_skip; + path_regexps_to_skip.reserve(path_regexps_to_skip_size); + for (size_t i = 0; i != path_regexps_to_skip_size; ++i) + { + String regexp; + readStringBinary(regexp, buf); + path_regexps_to_skip.push_back(regexp); + } + return std::make_shared( + DataTypeObject::SchemaFormat::JSON, + typed_paths, + paths_to_skip, + path_prefixes_to_skip, + path_regexps_to_skip, + max_dynamic_paths, + max_dynamic_types); + } } throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown type code: {0:#04x}", UInt64(type)); diff --git a/src/DataTypes/DataTypesBinaryEncoding.h b/src/DataTypes/DataTypesBinaryEncoding.h index d02e7f85942..de001966aee 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.h +++ b/src/DataTypes/DataTypesBinaryEncoding.h @@ -8,58 +8,59 @@ namespace DB /** Binary encoding for ClickHouse data types: -|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ClickHouse data type | Binary encoding | -|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Nothing | 0x00 | -| UInt8 | 0x01 | -| UInt16 | 0x02 | -| UInt32 | 0x03 | -| UInt64 | 0x04 | -| UInt128 | 0x05 | -| UInt256 | 0x06 | -| Int8 | 0x07 | -| Int16 | 0x08 | -| Int32 | 0x09 | -| Int64 | 0x0A | -| Int128 | 0x0B | -| Int256 | 0x0C | -| Float32 | 0x0D | -| Float64 | 0x0E | -| Date | 0x0F | -| Date32 | 0x10 | -| DateTime | 0x11 | -| DateTime(time_zone) | 0x12 | -| DateTime64(P) | 0x13 | -| DateTime64(P, time_zone) | 0x14 | -| String | 0x15 | -| FixedString(N) | 0x16 | -| Enum8 | 0x17... | -| Enum16 | 0x18...> | -| Decimal32(P, S) | 0x19 | -| Decimal64(P, S) | 0x1A | -| Decimal128(P, S) | 0x1B | -| Decimal256(P, S) | 0x1C | -| UUID | 0x1D | -| Array(T) | 0x1E | -| Tuple(T1, ..., TN) | 0x1F... | -| Tuple(name1 T1, ..., nameN TN) | 0x20... | -| Set | 0x21 | -| Interval | 0x22 | -| Nullable(T) | 0x23 | -| Function | 0x24... | -| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25...... | -| LowCardinality(T) | 0x26 | -| Map(K, V) | 0x27 | -| IPv4 | 0x28 | -| IPv6 | 0x29 | -| Variant(T1, ..., TN) | 0x2A... | -| Dynamic(max_types=N) | 0x2B | -| Custom type (Ring, Polygon, etc) | 0x2C | -| Bool | 0x2D | -| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E...... | -| Nested(name1 T1, ..., nameN TN) | 0x2F... | -|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|---------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ClickHouse data type | Binary encoding | +|---------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Nothing | 0x00 | +| UInt8 | 0x01 | +| UInt16 | 0x02 | +| UInt32 | 0x03 | +| UInt64 | 0x04 | +| UInt128 | 0x05 | +| UInt256 | 0x06 | +| Int8 | 0x07 | +| Int16 | 0x08 | +| Int32 | 0x09 | +| Int64 | 0x0A | +| Int128 | 0x0B | +| Int256 | 0x0C | +| Float32 | 0x0D | +| Float64 | 0x0E | +| Date | 0x0F | +| Date32 | 0x10 | +| DateTime | 0x11 | +| DateTime(time_zone) | 0x12 | +| DateTime64(P) | 0x13 | +| DateTime64(P, time_zone) | 0x14 | +| String | 0x15 | +| FixedString(N) | 0x16 | +| Enum8 | 0x17... | +| Enum16 | 0x18...> | +| Decimal32(P, S) | 0x19 | +| Decimal64(P, S) | 0x1A | +| Decimal128(P, S) | 0x1B | +| Decimal256(P, S) | 0x1C | +| UUID | 0x1D | +| Array(T) | 0x1E | +| Tuple(T1, ..., TN) | 0x1F... | +| Tuple(name1 T1, ..., nameN TN) | 0x20... | +| Set | 0x21 | +| Interval | 0x22 | +| Nullable(T) | 0x23 | +| Function | 0x24... | +| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25...... | +| LowCardinality(T) | 0x26 | +| Map(K, V) | 0x27 | +| IPv4 | 0x28 | +| IPv6 | 0x29 | +| Variant(T1, ..., TN) | 0x2A... | +| Dynamic(max_types=N) | 0x2B | +| Custom type (Ring, Polygon, etc) | 0x2C | +| Bool | 0x2D | +| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E...... | +| Nested(name1 T1, ..., nameN TN) | 0x2F... | +| JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP PREFIX skip_path_prefix, SKIP REGEXP skip_path_regexp) | 0x30............ | +|---------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| Interval kind binary encoding: |---------------|-----------------| diff --git a/src/DataTypes/FieldToDataType.cpp b/src/DataTypes/FieldToDataType.cpp index 03874279a0b..536d2656021 100644 --- a/src/DataTypes/FieldToDataType.cpp +++ b/src/DataTypes/FieldToDataType.cpp @@ -178,8 +178,7 @@ DataTypePtr FieldToDataType::operator() (const Map & map) const template DataTypePtr FieldToDataType::operator() (const Object &) const { - /// TODO: Do we need different parameters for type Object? - return std::make_shared("json", false); + return std::make_shared(DataTypeObject::SchemaFormat::JSON); } template diff --git a/src/DataTypes/IDataType.cpp b/src/DataTypes/IDataType.cpp index 1cb64b65d3a..9938bb8849a 100644 --- a/src/DataTypes/IDataType.cpp +++ b/src/DataTypes/IDataType.cpp @@ -361,9 +361,10 @@ bool isArray(TYPE data_type) { return WhichDataType(data_type).isArray(); } \ bool isTuple(TYPE data_type) { return WhichDataType(data_type).isTuple(); } \ bool isMap(TYPE data_type) {return WhichDataType(data_type).isMap(); } \ bool isInterval(TYPE data_type) {return WhichDataType(data_type).isInterval(); } \ -bool isObject(TYPE data_type) { return WhichDataType(data_type).isObject(); } \ +bool isObjectDeprecated(TYPE data_type) { return WhichDataType(data_type).isObjectDeprecated(); } \ bool isVariant(TYPE data_type) { return WhichDataType(data_type).isVariant(); } \ bool isDynamic(TYPE data_type) { return WhichDataType(data_type).isDynamic(); } \ +bool isObject(TYPE data_type) { return WhichDataType(data_type).isObject(); } \ bool isNothing(TYPE data_type) { return WhichDataType(data_type).isNothing(); } \ \ bool isColumnedAsNumber(TYPE data_type) \ diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 397ae3d8be9..a7665e610ab 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -432,7 +432,7 @@ struct WhichDataType constexpr bool isMap() const {return idx == TypeIndex::Map; } constexpr bool isSet() const { return idx == TypeIndex::Set; } constexpr bool isInterval() const { return idx == TypeIndex::Interval; } - constexpr bool isObject() const { return idx == TypeIndex::Object; } + constexpr bool isObjectDeprecated() const { return idx == TypeIndex::ObjectDeprecated; } constexpr bool isNothing() const { return idx == TypeIndex::Nothing; } constexpr bool isNullable() const { return idx == TypeIndex::Nullable; } @@ -444,6 +444,7 @@ struct WhichDataType constexpr bool isVariant() const { return idx == TypeIndex::Variant; } constexpr bool isDynamic() const { return idx == TypeIndex::Dynamic; } + constexpr bool isObject() const { return idx == TypeIndex::Object; } }; /// IDataType helpers (alternative for IDataType virtual methods with single point of truth) @@ -502,9 +503,10 @@ bool isArray(TYPE data_type); \ bool isTuple(TYPE data_type); \ bool isMap(TYPE data_type); \ bool isInterval(TYPE data_type); \ -bool isObject(TYPE data_type); \ +bool isObjectDeprecated(TYPE data_type); \ bool isVariant(TYPE data_type); \ bool isDynamic(TYPE data_type); \ +bool isObject(TYPE data_type); \ bool isNothing(TYPE data_type); \ \ bool isColumnedAsNumber(TYPE data_type); \ diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 1d525e5987f..4df240daa8f 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -3,28 +3,28 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include namespace DB @@ -149,12 +149,12 @@ static DataTypePtr recreateTupleWithElements(const DataTypeTuple & type_tuple, c } static std::pair convertObjectColumnToTuple( - const ColumnObject & column_object, const DataTypeObject & type_object) + const ColumnObjectDeprecated & column_object, const DataTypeObjectDeprecated & type_object) { if (!column_object.isFinalized()) { auto finalized = column_object.cloneFinalized(); - const auto & finalized_object = assert_cast(*finalized); + const auto & finalized_object = assert_cast(*finalized); return convertObjectColumnToTuple(finalized_object, type_object); } @@ -180,9 +180,9 @@ static std::pair recursivlyConvertDynamicColumnToTuple( if (!type->hasDynamicSubcolumnsDeprecated()) return {column, type}; - if (const auto * type_object = typeid_cast(type.get())) + if (const auto * type_object = typeid_cast(type.get())) { - const auto & column_object = assert_cast(*column); + const auto & column_object = assert_cast(*column); return convertObjectColumnToTuple(column_object, *type_object); } @@ -338,7 +338,7 @@ static DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool che for (const auto & [key, subtypes] : subcolumns_types) { assert(!subtypes.empty()); - if (key.getPath() == ColumnObject::COLUMN_NAME_DUMMY) + if (key.getPath() == ColumnObjectDeprecated::COLUMN_NAME_DUMMY) continue; size_t first_dim = getNumberOfDimensions(*subtypes[0]); @@ -354,7 +354,7 @@ static DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool che if (tuple_paths.empty()) { - tuple_paths.emplace_back(ColumnObject::COLUMN_NAME_DUMMY); + tuple_paths.emplace_back(ColumnObjectDeprecated::COLUMN_NAME_DUMMY); tuple_types.emplace_back(std::make_shared()); } @@ -421,7 +421,7 @@ static DataTypePtr getLeastCommonTypeForDynamicColumnsImpl( if (!type_in_storage->hasDynamicSubcolumnsDeprecated()) return type_in_storage; - if (isObject(type_in_storage)) + if (isObjectDeprecated(type_in_storage)) return getLeastCommonTypeForObject(concrete_types, check_ambiguos_paths); if (const auto * type_array = typeid_cast(type_in_storage.get())) @@ -463,9 +463,9 @@ DataTypePtr createConcreteEmptyDynamicColumn(const DataTypePtr & type_in_storage if (!type_in_storage->hasDynamicSubcolumnsDeprecated()) return type_in_storage; - if (isObject(type_in_storage)) + if (isObjectDeprecated(type_in_storage)) return std::make_shared( - DataTypes{std::make_shared()}, Names{ColumnObject::COLUMN_NAME_DUMMY}); + DataTypes{std::make_shared()}, Names{ColumnObjectDeprecated::COLUMN_NAME_DUMMY}); if (const auto * type_array = typeid_cast(type_in_storage.get())) return std::make_shared( @@ -807,7 +807,7 @@ DataTypePtr unflattenTuple(const PathsInData & paths, const DataTypes & tuple_ty return unflattenTuple(paths, tuple_types, tuple_columns).second; } -std::pair unflattenObjectToTuple(const ColumnObject & column) +std::pair unflattenObjectToTuple(const ColumnObjectDeprecated & column) { const auto & subcolumns = column.getSubcolumns(); @@ -815,7 +815,7 @@ std::pair unflattenObjectToTuple(const ColumnObject & co { auto type = std::make_shared( DataTypes{std::make_shared()}, - Names{ColumnObject::COLUMN_NAME_DUMMY}); + Names{ColumnObjectDeprecated::COLUMN_NAME_DUMMY}); return {type->createColumn()->cloneResized(column.size()), type}; } diff --git a/src/DataTypes/ObjectUtils.h b/src/DataTypes/ObjectUtils.h index 6599d8adef1..80c3c4a14c3 100644 --- a/src/DataTypes/ObjectUtils.h +++ b/src/DataTypes/ObjectUtils.h @@ -6,7 +6,7 @@ #include #include #include -#include +#include namespace DB { @@ -85,7 +85,7 @@ DataTypePtr unflattenTuple( const PathsInData & paths, const DataTypes & tuple_types); -std::pair unflattenObjectToTuple(const ColumnObject & column); +std::pair unflattenObjectToTuple(const ColumnObjectDeprecated & column); std::pair unflattenTuple( const PathsInData & paths, diff --git a/src/DataTypes/Serializations/ISerialization.cpp b/src/DataTypes/Serializations/ISerialization.cpp index 7642a6619b3..9d0d9f7c6eb 100644 --- a/src/DataTypes/Serializations/ISerialization.cpp +++ b/src/DataTypes/Serializations/ISerialization.cpp @@ -202,6 +202,12 @@ String getNameForSubstreamPath( stream_name += "." + it->variant_element_name + ".null"; else if (it->type == SubstreamType::DynamicStructure) stream_name += ".dynamic_structure"; + else if (it->type == SubstreamType::ObjectStructure) + stream_name += ".object_structure"; + else if (it->type == SubstreamType::ObjectSharedData) + stream_name += ".object_shared_data"; + else if (it->type == SubstreamType::ObjectTypedPath || it->type == SubstreamType::ObjectDynamicPath) + stream_name += "." + it->object_path_name; } return stream_name; @@ -401,7 +407,17 @@ bool ISerialization::hasSubcolumnForPath(const SubstreamPath & path, size_t pref || path[last_elem].type == Substream::TupleElement || path[last_elem].type == Substream::ArraySizes || path[last_elem].type == Substream::VariantElement - || path[last_elem].type == Substream::VariantElementNullMap; + || path[last_elem].type == Substream::VariantElementNullMap + || path[last_elem].type == Substream::ObjectTypedPath; +} + +bool ISerialization::isFictitiousSubcolumn(const DB::ISerialization::SubstreamPath & path, size_t prefix_len) +{ + if (prefix_len == 0 || prefix_len > path.size()) + return false; + + size_t last_elem = prefix_len - 1; + return path[last_elem].type == Substream::VariantElementNullMap; } ISerialization::SubstreamData ISerialization::createFromPath(const SubstreamPath & path, size_t prefix_len) diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 255dbbfadd2..51fcdaec6ab 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -176,8 +176,8 @@ public: SparseElements, SparseOffsets, - ObjectStructure, - ObjectData, + DeprecatedObjectStructure, + DeprecatedObjectData, VariantDiscriminators, NamedVariantDiscriminators, @@ -189,6 +189,12 @@ public: DynamicData, DynamicStructure, + ObjectData, + ObjectTypedPath, + ObjectDynamicPath, + ObjectSharedData, + ObjectStructure, + Regular, }; @@ -203,6 +209,9 @@ public: /// Name of substream for type from 'named_types'. String name_of_substream; + /// Path name for Object type elements. + String object_path_name; + /// Data for current substream. SubstreamData data; @@ -262,13 +271,13 @@ public: bool use_compact_variant_discriminators_serialization = false; - enum class DynamicStatisticsMode + enum class ObjectAndDynamicStatisticsMode { NONE, /// Don't write statistics. PREFIX, /// Write statistics in prefix. SUFFIX, /// Write statistics in suffix. }; - DynamicStatisticsMode dynamic_write_statistics = DynamicStatisticsMode::NONE; + ObjectAndDynamicStatisticsMode object_and_dynamic_write_statistics = ObjectAndDynamicStatisticsMode::NONE; }; struct DeserializeBinaryBulkSettings @@ -289,7 +298,7 @@ public: /// If not zero, may be used to avoid reallocations while reading column of String type. double avg_value_size_hint = 0; - bool dynamic_read_statistics = false; + bool object_and_dynamic_read_statistics = false; }; /// Call before serializeBinaryBulkWithMultipleStreams chain to write something before first mark. @@ -438,6 +447,7 @@ public: static size_t getArrayLevel(const SubstreamPath & path); static bool hasSubcolumnForPath(const SubstreamPath & path, size_t prefix_len); static SubstreamData createFromPath(const SubstreamPath & path, size_t prefix_len); + static bool isFictitiousSubcolumn(const SubstreamPath & path, size_t prefix_len); protected: template @@ -481,4 +491,69 @@ State * ISerialization::checkAndGetState(const StatePtr & state, const ISerializ return state_concrete; } + +WITH snaps AS ( + SELECT + rs.brokerfk brokerfk, + rs.brokerinstancefk brokerinstancefk, + rs.tradedt tradedt, + min(rs.snapdttm) snapdttmmin, + max(rs.snapdttm) snapdttmmax + FROM risksnapshothistory rs + WHERE rs.snapdttm >= now() - interval '10 minutes' + AND rs.snapdttm <= now() + GROUP BY rs.brokerfk, rs.brokerinstancefk, rs.tradedt + ORDER BY rs.brokerfk, rs.brokerinstancefk, rs.tradedt + ), + start_snaps_by_trade_dt AS ( + SELECT + rs.* + FROM risksnapshothistory rs + WHERE (rs.brokerfk, rs.brokerinstancefk, rs.tradedt, rs.snapdttm) IN ( + SELECT + s.brokerfk s_brokerfk, + s.brokerinstancefk s_brokerinstancefk, + s.tradedt s_tradedt, + s.snapdttmmin s_snapdttmmin + FROM snaps s) + ORDER BY rs.brokerfk, rs.brokerinstancefk, rs.snapdttm + ), + end_snaps_by_trade_dt AS ( + SELECT + rs.* + FROM risksnapshothistory rs + WHERE (rs.brokerfk, rs.brokerinstancefk, rs.tradedt, rs.snapdttm) IN ( + SELECT + s.brokerfk s_brokerfk, + s.brokerinstancefk s_brokerinstancefk, + s.tradedt s_tradedt, + s.snapdttmmax s_snapdttmmax + FROM snaps s) + ORDER BY rs.brokerfk, rs.brokerinstancefk, rs.snapdttm + ), + data as ( + SELECT + ssbtd.snapdttm startdttm, + esbtd.snapdttm enddttm, + case when(ssbtd.snapdttm = '1970-01-01 00:00:00.000000') then 0 else 1 end hasstart, + case when(esbtd.snapdttm = '1970-01-01 00:00:00.000000') then 0 else 1 end hasend, + IF(esbtd.brokerfk = 0, ssbtd.brokerfk, esbtd.brokerfk) brokerfk, + IF(esbtd.brokerinstancefk = 0, ssbtd.brokerinstancefk, esbtd.brokerinstancefk) brokerinstancefk + FROM start_snaps_by_trade_dt ssbtd + FULL OUTER JOIN end_snaps_by_trade_dt esbtd ON ssbtd.brokerfk = esbtd.brokerfk AND ssbtd.brokerinstancefk = esbtd.brokerinstancefk AND ssbtd.tradedt = esbtd.tradedt AND ssbtd.traderfk = esbtd.traderfk AND ssbtd.brokersymbolfk = esbtd.brokersymbolfk + ) + SELECT + d.brokerfk, d.brokerinstancefk, + max(startdttm) startdttm, + max(enddttm) enddttm, + countIf(d.hasstart =1 and d.hasend=1) matches, + countIf(d.hasstart =1 and d.hasend=0) beforeonly, + countIf(d.hasstart =0 and d.hasend=1) afteronly, + (select array_agg(toJSONString(map( + 'snapdttmmax', toString(snapdttmmax), 'snapdttmmin', toString(snapdttmmin), + 'tradedt', toString(tradedt), 'brokerfk', toString(brokerfk), 'brokerinstancefk', toString(brokerinstancefk)))) + from snaps) snaps + FROM data d + GROUP BY d.brokerfk, d.brokerinstancefk + } diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 7609ffc91ca..6cc7a7e2e74 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -117,7 +117,7 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( writeStringBinary(dynamic_state->variant_type->getName(), *stream); /// Write statistics in prefix if needed. - if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::PREFIX) + if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::PREFIX) { const auto & statistics = column_dynamic.getStatistics(); for (size_t i = 0; i != variant_info.variant_names.size(); ++i) @@ -156,8 +156,8 @@ void SerializationDynamic::deserializeBinaryBulkStatePrefix( return; auto dynamic_state = std::make_shared(); - dynamic_state->structure_state = structure_state; - dynamic_state->variant_serialization = checkAndGetState(structure_state)->variant_type->getDefaultSerialization(); + dynamic_state->structure_state = std::move(structure_state); + dynamic_state->variant_serialization = checkAndGetState(dynamic_state->structure_state)->variant_type->getDefaultSerialization(); settings.path.push_back(Substream::DynamicData); dynamic_state->variant_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_state->variant_state, cache); @@ -174,7 +174,7 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD DeserializeBinaryBulkStatePtr state = nullptr; if (auto cached_state = getFromSubstreamsDeserializeStatesCache(cache, settings.path)) { - state = cached_state; + state = std::move(cached_state); } else if (auto * structure_stream = settings.getter(settings.path)) { @@ -198,18 +198,13 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect type of Dynamic nested column, expected Variant, got {}", structure_state->variant_type->getName()); /// Read statistics. - if (settings.dynamic_read_statistics) + if (settings.object_and_dynamic_read_statistics) { - const auto & variants = variant_type->getVariants(); - size_t variant_size; - for (const auto & variant : variants) - { - readVarUInt(variant_size, *structure_stream); - structure_state->statistics.data[variant->getName()] = variant_size; - } + for (const auto & variant : variant_type->getVariants()) + readVarUInt(structure_state->statistics.data[variant->getName()], *structure_stream); } - state = structure_state; + state = std::move(structure_state); addToSubstreamsDeserializeStatesCache(cache, settings.path, state); } @@ -226,10 +221,10 @@ void SerializationDynamic::serializeBinaryBulkStateSuffix( settings.path.pop_back(); if (!stream) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Dynamic column structure during serialization of binary bulk state prefix"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Dynamic column structure during serialization of binary bulk state suffix"); /// Write statistics in suffix if needed. - if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::SUFFIX) + if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::SUFFIX) { for (const auto & variant_name : dynamic_state->variant_names) writeVarUInt(dynamic_state->statistics.data[variant_name], *stream); @@ -246,6 +241,18 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreams( size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const +{ + size_t tmp_size; + serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants(column, offset, limit, settings, state, tmp_size); +} + +void SerializationDynamic::serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state, + size_t & total_size_of_variants) const { const auto & column_dynamic = assert_cast(column); auto * dynamic_state = checkAndGetState(state); @@ -257,7 +264,7 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreams( settings.path.push_back(Substream::DynamicData); assert_cast(*dynamic_state->variant_serialization) - .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.data); + .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.data, total_size_of_variants); settings.path.pop_back(); } @@ -538,6 +545,12 @@ void SerializationDynamic::serializeTextJSON(const IColumn & column, size_t row_ dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextJSON(dynamic_column.getVariantColumn(), row_num, ostr, settings); } +void SerializationDynamic::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + const auto & dynamic_column = assert_cast(column); + dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextJSONPretty(dynamic_column.getVariantColumn(), row_num, ostr, settings, indent); +} + void SerializationDynamic::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { auto read_field = [&settings](ReadBuffer & buf) diff --git a/src/DataTypes/Serializations/SerializationDynamic.h b/src/DataTypes/Serializations/SerializationDynamic.h index 001a3cf87ce..50593c79b0f 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.h +++ b/src/DataTypes/Serializations/SerializationDynamic.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace DB @@ -11,7 +12,7 @@ class SerializationDynamicElement; class SerializationDynamic : public ISerialization { public: - explicit SerializationDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_dynamic_types_) + explicit SerializationDynamic(size_t max_dynamic_types_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES) : max_dynamic_types(max_dynamic_types_) { } @@ -59,6 +60,14 @@ public: SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; + void serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state, + size_t & total_size_of_variants) const; + void deserializeBinaryBulkWithMultipleStreams( ColumnPtr & column, size_t limit, @@ -89,6 +98,7 @@ public: bool tryDeserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; bool tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; diff --git a/src/DataTypes/Serializations/SerializationDynamicElement.cpp b/src/DataTypes/Serializations/SerializationDynamicElement.cpp index 211f0ac9377..9c698f96888 100644 --- a/src/DataTypes/Serializations/SerializationDynamicElement.cpp +++ b/src/DataTypes/Serializations/SerializationDynamicElement.cpp @@ -48,6 +48,7 @@ void SerializationDynamicElement::enumerateStreams( .withColumn(data.column) .withSerializationInfo(data.serialization_info) .withDeserializeState(deserialize_state->variant_element_state); + settings.path.back().data = variant_data; deserialize_state->variant_serialization->enumerateStreams(settings, callback, variant_data); settings.path.pop_back(); } diff --git a/src/DataTypes/Serializations/SerializationJSON.cpp b/src/DataTypes/Serializations/SerializationJSON.cpp new file mode 100644 index 00000000000..b5bf8d1421b --- /dev/null +++ b/src/DataTypes/Serializations/SerializationJSON.cpp @@ -0,0 +1,409 @@ +#include +#include +#include + +#if USE_SIMDJSON +#include +#endif +#if USE_RAPIDJSON +#include +#endif +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INCORRECT_DATA; +} + +template +SerializationJSON::SerializationJSON( + std::unordered_map typed_paths_serializations_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_prefixes_to_skip_, + const std::vector & path_regexps_to_skip_, + std::unique_ptr> json_extract_tree_) + : SerializationObject(std::move(typed_paths_serializations_), paths_to_skip_, path_prefixes_to_skip_, path_regexps_to_skip_) + , json_extract_tree(std::move(json_extract_tree_)) +{ +} + +namespace +{ + +/// Struct that represents elements of the JSON path. +/// "a.b.c" -> ["a", "b", "c"] +struct PathElements +{ + explicit PathElements(const String & path) + { + const char * start = path.data(); + const char * end = start + path.size(); + const char * pos = start; + const char * last_dot_pos = pos - 1; + for (pos = start; pos != end; ++pos) + { + if (*pos == '.') + { + elements.emplace_back(last_dot_pos + 1, size_t(pos - last_dot_pos - 1)); + last_dot_pos = pos; + } + } + + elements.emplace_back(last_dot_pos + 1, size_t(pos - last_dot_pos - 1)); + } + + size_t size() const { return elements.size(); } + + std::vector elements; +}; + +/// Struct that represents a prefix of a JSON path. Used during output of the JSON object. +struct Prefix +{ + /// Shrink current prefix to the common prefix of current prefix and specified path. + /// For example, if current prefix is a.b.c.d and path is a.b.e, then shrink the prefix to a.b. + void shrinkToCommonPrefix(const PathElements & path_elements) + { + /// Don't include last element in path_elements in the prefix. + size_t i = 0; + while (i != elements.size() && i != (path_elements.elements.size() - 1) && elements[i].first == path_elements.elements[i]) + ++i; + elements.resize(i); + } + + /// Check is_first flag in current object. + bool isFirstInCurrentObject() const + { + if (elements.empty()) + return root_is_first_flag; + return elements.back().second; + } + + /// Set flag is_first = false in current object. + void setNotFirstInCurrentObject() + { + if (elements.empty()) + root_is_first_flag = false; + else + elements.back().second = false; + } + + size_t size() const { return elements.size(); } + + /// Elements of the prefix: (path element, is_first flag in this prefix). + /// is_first flag indicates if we already serialized some key in the object with such prefix. + std::vector> elements; + bool root_is_first_flag = true; +}; + +} + +template +void SerializationJSON::serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, bool pretty, size_t indent) const +{ + const auto & column_object = assert_cast(column); + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + size_t shared_data_offset = shared_data_offsets[ssize_t(row_num) - 1]; + size_t shared_data_end = shared_data_offsets[ssize_t(row_num)]; + + /// We need to convert the set of paths in this row to a JSON object. + /// To do it, we first collect all the paths from current row, then we sort them + /// and construct the resulting JSON object by iterating over sorter list of paths. + /// For example: + /// b.c, a.b, a.a, b.e, g, h.u.t -> a.a, a.b, b.c, b.e, g, h.u.t -> {"a" : {"a" : ..., "b" : ...}, "b" : {"c" : ..., "e" : ...}, "g" : ..., "h" : {"u" : {"t" : ...}}}. + std::vector sorted_paths; + sorted_paths.reserve(typed_paths.size() + dynamic_paths.size() + (shared_data_end - shared_data_offset)); + for (const auto & [path, _] : typed_paths) + sorted_paths.emplace_back(path); + for (const auto & [path, dynamic_column] : dynamic_paths) + { + /// We consider null value and absense of the path in a row as equivalent cases, because we cannot actually distinguish them. + /// So, we don't output null values at all. + if (!dynamic_column->isNullAt(row_num)) + sorted_paths.emplace_back(path); + } + for (size_t i = shared_data_offset; i != shared_data_end; ++i) + { + auto path = shared_data_paths->getDataAt(i).toString(); + sorted_paths.emplace_back(path); + } + + std::sort(sorted_paths.begin(), sorted_paths.end()); + + if (pretty) + writeCString("{\n", ostr); + else + writeChar('{', ostr); + size_t index_in_shared_data_values = shared_data_offset; + /// current_prefix represents the path of the object we are currently serializing keys in. + Prefix current_prefix; + for (const auto & path : sorted_paths) + { + PathElements path_elements(path); + /// Change prefix to common prefix between current prefix and current path. + /// If prefix changed (it can only decrease), close all finished objects. + /// For example: + /// Current prefix: a.b.c.d + /// Current path: a.b.e.f + /// It means now we have : {..., "a" : {"b" : {"c" : {"d" : ... + /// Common prefix will be a.b, so it means we should close objects a.b.c.d and a.b.c: {..., "a" : {"b" : {"c" : {"d" : ...}} + /// and continue serializing keys in object a.b + size_t prev_prefix_size = current_prefix.size(); + current_prefix.shrinkToCommonPrefix(path_elements); + size_t prefix_size = current_prefix.size(); + if (prefix_size != prev_prefix_size) + { + size_t objects_to_close = prev_prefix_size - prefix_size; + if (pretty) + { + writeChar('\n', ostr); + for (size_t i = 0; i != objects_to_close; ++i) + { + writeChar(' ', (indent + prefix_size + objects_to_close - i) * 4, ostr); + if (i != objects_to_close - 1) + writeCString("}\n", ostr); + else + writeChar('}', ostr); + } + } + else + { + for (size_t i = 0; i != objects_to_close; ++i) + writeChar('}', ostr); + } + } + + /// Now we are inside object that has common prefix with current path. + /// We should go inside all objects in current path. + /// From the example above we should open object a.b.e: + /// {..., "a" : {"b" : {"c" : {"d" : ...}}, "e" : { + if (prefix_size + 1 < path_elements.size()) + { + for (size_t i = prefix_size; i != path_elements.size() - 1; ++i) + { + /// Write comma before the key if it's not the first key in this prefix. + if (!current_prefix.isFirstInCurrentObject()) + { + if (pretty) + writeCString(",\n", ostr); + else + writeChar(',', ostr); + } + else + { + current_prefix.setNotFirstInCurrentObject(); + } + + if (pretty) + { + writeChar(' ', (indent + i + 1) * 4, ostr); + writeJSONString(path_elements.elements[i], ostr, settings); + writeCString(" : {\n", ostr); + } + else + { + writeJSONString(path_elements.elements[i], ostr, settings); + writeCString(":{", ostr); + } + + /// Update current prefix. + current_prefix.elements.emplace_back(path_elements.elements[i], true); + } + } + + /// Write comma before the key if it's not the first key in this prefix. + if (!current_prefix.isFirstInCurrentObject()) + { + if (pretty) + writeCString(",\n", ostr); + else + writeChar(',', ostr); + } + else + { + current_prefix.setNotFirstInCurrentObject(); + } + + if (pretty) + { + writeChar(' ', (indent + current_prefix.size() + 1) * 4, ostr); + writeJSONString(path_elements.elements.back(), ostr, settings); + writeCString(" : ", ostr); + } + else + { + writeJSONString(path_elements.elements.back(), ostr, settings); + writeCString(":", ostr); + } + + /// Serialize value of current path. + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + if (pretty) + typed_path_serializations.at(path)->serializeTextJSONPretty(*typed_it->second, row_num, ostr, settings, indent + current_prefix.size() + 1); + else + typed_path_serializations.at(path)->serializeTextJSON(*typed_it->second, row_num, ostr, settings); + } + else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + { + if (pretty) + dynamic_serialization->serializeTextJSONPretty(*dynamic_it->second, row_num, ostr, settings, indent + current_prefix.size() + 1); + else + dynamic_serialization->serializeTextJSON(*dynamic_it->second, row_num, ostr, settings); + } + else + { + /// To serialize value stored in shared data we should first deserialize it from binary format. + auto tmp_dynamic_column = ColumnDynamic::create(); + tmp_dynamic_column->reserve(1); + column_object.deserializeValueFromSharedData(shared_data_values, index_in_shared_data_values++, *tmp_dynamic_column); + + if (pretty) + dynamic_serialization->serializeTextJSONPretty(*tmp_dynamic_column, 0, ostr, settings, indent + current_prefix.size() + 1); + else + dynamic_serialization->serializeTextJSON(*tmp_dynamic_column, 0, ostr, settings); + } + } + + /// Close all remaining open objects. + if (pretty) + { + writeChar('\n', ostr); + for (size_t i = 0; i != current_prefix.elements.size(); ++i) + { + writeChar(' ', (indent + current_prefix.size() - i) * 4, ostr); + writeCString("}\n", ostr); + } + writeChar(' ', indent * 4, ostr); + writeChar('}', ostr); + } + else + { + for (size_t i = 0; i != current_prefix.elements.size(); ++i) + writeChar('}', ostr); + writeChar('}', ostr); + } +} + +template +void SerializationJSON::deserializeTextImpl(IColumn & column, std::string_view object, const FormatSettings & settings) const +{ + typename Parser::Element document; + auto parser = parsers_pool.get([] { return new Parser; }); + if (!parser->parse(object, document)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot parse JSON object here: {}", object); + + String error; + if (!json_extract_tree->insertResultToColumn(column, document, insert_settings, settings, error)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot insert data into JSON column: {}", error); +} + +template +void SerializationJSON::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + serializeTextImpl(column, row_num, ostr, settings); +} + +template +void SerializationJSON::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object; + readStringUntilEOF(object, istr); + deserializeTextImpl(column, object, settings); +} + +template +void SerializationJSON::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString buf; + serializeTextImpl(column, row_num, buf, settings); + writeEscapedString(buf.str(), ostr); +} + +template +void SerializationJSON::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object; + readEscapedString(object, istr); + deserializeTextImpl(column, object, settings); +} + +template +void SerializationJSON::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString buf; + serializeTextImpl(column, row_num, buf, settings); + writeQuotedString(buf.str(), ostr); +} + +template +void SerializationJSON::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object; + readQuotedString(object, istr); + deserializeTextImpl(column, object, settings); +} + +template +void SerializationJSON::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString buf; + serializeTextImpl(column, row_num, buf, settings); + writeCSVString(buf.str(), ostr); +} + +template +void SerializationJSON::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object; + readCSVString(object, istr, settings.csv); + deserializeTextImpl(column, object, settings); +} + +template +void SerializationJSON::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString buf; + serializeTextImpl(column, row_num, buf, settings); + writeXMLStringForTextElement(buf.str(), ostr); +} + +template +void SerializationJSON::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + serializeTextImpl(column, row_num, ostr, settings); +} + +template +void SerializationJSON::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + serializeTextImpl(column, row_num, ostr, settings, true, indent); +} + +template +void SerializationJSON::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object_buffer; + auto object_view = readJSONObjectAsViewPossiblyInvalid(istr, object_buffer); + deserializeTextImpl(column, object_view, settings); +} + +#if USE_SIMDJSON +template class SerializationJSON; +#endif +#if USE_RAPIDJSON +template class SerializationJSON; +#else +template class SerializationJSON; +#endif + + + + +} diff --git a/src/DataTypes/Serializations/SerializationJSON.h b/src/DataTypes/Serializations/SerializationJSON.h new file mode 100644 index 00000000000..0c6d2bd164b --- /dev/null +++ b/src/DataTypes/Serializations/SerializationJSON.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +/// Class for text serialization/deserialization of the JSON data type. +template +class SerializationJSON : public SerializationObject +{ +public: + SerializationJSON( + std::unordered_map typed_paths_serializations_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_prefixes_to_skip_, + const std::vector & path_regexps_to_skip_, + std::unique_ptr> json_extract_tree_); + + void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + virtual void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; + void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + +private: + void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, bool pretty = false, size_t indent = 0) const; + void deserializeTextImpl(IColumn & column, std::string_view object, const FormatSettings & settings) const; + + std::unique_ptr> json_extract_tree; + JSONExtractInsertSettings insert_settings; + /// Pool of parser objects to make SerializationJSON thread safe. + mutable SimpleObjectPool parsers_pool; +}; + +} diff --git a/src/DataTypes/Serializations/SerializationJSONElement.cpp b/src/DataTypes/Serializations/SerializationJSONElement.cpp new file mode 100644 index 00000000000..35119cd0764 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationJSONElement.cpp @@ -0,0 +1,3 @@ +// +// Created by Павел Круглов on 05/07/2024. +// diff --git a/src/DataTypes/Serializations/SerializationJSONElement.h b/src/DataTypes/Serializations/SerializationJSONElement.h new file mode 100644 index 00000000000..791ef2eecc5 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationJSONElement.h @@ -0,0 +1,8 @@ +// +// Created by Павел Круглов on 05/07/2024. +// + +#ifndef CLICKHOUSE_SERIALIZATIONJSONELEMENT_H +#define CLICKHOUSE_SERIALIZATIONJSONELEMENT_H + +#endif //CLICKHOUSE_SERIALIZATIONJSONELEMENT_H diff --git a/src/DataTypes/Serializations/SerializationLowCardinality.cpp b/src/DataTypes/Serializations/SerializationLowCardinality.cpp index 40071c4607a..3195a04d348 100644 --- a/src/DataTypes/Serializations/SerializationLowCardinality.cpp +++ b/src/DataTypes/Serializations/SerializationLowCardinality.cpp @@ -268,9 +268,16 @@ void SerializationLowCardinality::serializeBinaryBulkStateSuffix( void SerializationLowCardinality::deserializeBinaryBulkStatePrefix( DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, - SubstreamsDeserializeStatesCache * /*cache*/) const + SubstreamsDeserializeStatesCache * cache) const { settings.path.push_back(Substream::DictionaryKeys); + + if (auto cached_state = getFromSubstreamsDeserializeStatesCache(cache, settings.path)) + { + state = std::move(cached_state); + return; + } + auto * stream = settings.getter(settings.path); settings.path.pop_back(); diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index c6c87b5aa7b..d4483703c6b 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -1,586 +1,673 @@ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include namespace DB { namespace ErrorCodes { - extern const int NOT_IMPLEMENTED; extern const int INCORRECT_DATA; - extern const int CANNOT_READ_ALL_DATA; - extern const int ARGUMENT_OUT_OF_BOUND; - extern const int CANNOT_PARSE_TEXT; - extern const int EXPERIMENTAL_FEATURE_ERROR; + extern const int LOGICAL_ERROR; } -template -template -void SerializationObject::deserializeTextImpl(IColumn & column, Reader && reader) const +SerializationObject::SerializationObject( + std::unordered_map typed_path_serializations_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_prefixes_to_skip_, + const std::vector & path_regexps_to_skip_) + : typed_path_serializations(std::move(typed_path_serializations_)) + , paths_to_skip(paths_to_skip_) + , path_prefixes_to_skip(path_prefixes_to_skip_) + , dynamic_serialization(std::make_shared()) + , shared_data_serialization(getTypeOfSharedData()->getDefaultSerialization()) { - auto & column_object = assert_cast(column); + /// We will need sorted order of typed paths to serialize them in order for consistency. + sorted_typed_paths.reserve(typed_path_serializations.size()); + for (const auto & [path, _] : typed_path_serializations) + sorted_typed_paths.emplace_back(path); + std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end()); + for (const auto & regexp_str : path_regexps_to_skip_) + path_regexps_to_skip.emplace_back(regexp_str); +} - String buf; - reader(buf); - std::optional result; +const DataTypePtr & SerializationObject::getTypeOfSharedData() +{ + /// Array(Tuple(String, String)) + static const DataTypePtr type = std::make_shared(std::make_shared(DataTypes{std::make_shared(), std::make_shared()}, Names{"paths", "values"})); + return type; +} - /// Treat empty string as an empty object - /// for better CAST from String to Object. - if (!buf.empty()) +bool SerializationObject::shouldSkipPath(const String & path) const +{ + if (paths_to_skip.contains(path)) + return true; + + for (const auto & prefix : path_prefixes_to_skip) { - auto parser = parsers_pool.get([] { return new Parser; }); - result = parser->parse(buf.data(), buf.size()); - } - else - { - result = ParseResult{}; + if (path.starts_with(prefix)) + return true; } - if (!result) - throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot parse object"); - - auto & [paths, values] = *result; - assert(paths.size() == values.size()); - - size_t old_column_size = column_object.size(); - for (size_t i = 0; i < paths.size(); ++i) + for (const auto & regexp : path_regexps_to_skip) { - auto field_info = getFieldInfo(values[i]); - if (field_info.need_fold_dimension) - values[i] = applyVisitor(FieldVisitorFoldDimension(field_info.num_dimensions), std::move(values[i])); - if (isNothing(field_info.scalar_type)) - continue; + if (re2::RE2::FullMatch(path, regexp)) + return true; + } - if (!column_object.hasSubcolumn(paths[i])) + return false; +} + +SerializationObject::ObjectSerializationVersion::ObjectSerializationVersion(UInt64 version) : value(static_cast(version)) +{ + checkVersion(version); +} + +void SerializationObject::ObjectSerializationVersion::checkVersion(UInt64 version) +{ + if (version != BASIC) + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Object structure serialization."); +} + +struct SerializeBinaryBulkStateObject: public ISerialization::SerializeBinaryBulkState +{ + SerializationObject::ObjectSerializationVersion serialization_version; + std::vector sorted_dynamic_paths; + std::unordered_map typed_path_states; + std::unordered_map dynamic_path_states; + ISerialization::SerializeBinaryBulkStatePtr shared_data_state; + /// Paths statistics. Map (dynamic path) -> (number of non-null values in this path). + ColumnObject::Statistics statistics = { .source = ColumnObject::Statistics::Source::READ, .data = {} }; + + explicit SerializeBinaryBulkStateObject(UInt64 serialization_version_) : serialization_version(serialization_version_) {} +}; + +struct DeserializeBinaryBulkStateObject : public ISerialization::DeserializeBinaryBulkState +{ + std::unordered_map typed_path_states; + std::unordered_map dynamic_path_states; + ISerialization::DeserializeBinaryBulkStatePtr shared_data_state; + ISerialization::DeserializeBinaryBulkStatePtr structure_state; +}; + +void SerializationObject::enumerateStreams(EnumerateStreamsSettings & settings, const StreamCallback & callback, const SubstreamData & data) const +{ + settings.path.push_back(Substream::ObjectStructure); + callback(settings.path); + settings.path.pop_back(); + + const auto * column_object = data.column ? &assert_cast(*data.column) : nullptr; + const auto * type_object = data.type ? &assert_cast(*data.type) : nullptr; + const auto * deserialize_state = data.deserialize_state ? checkAndGetState(data.deserialize_state) : nullptr; + const auto * structure_state = deserialize_state ? checkAndGetState(deserialize_state->structure_state) : nullptr; + settings.path.push_back(Substream::ObjectData); + + /// First, iterate over typed paths in sorted order, we will always serialize them. + for (const auto & path : sorted_typed_paths) + { + settings.path.back().creator = std::make_shared(path); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + const auto & serialization = typed_path_serializations.at(path); + auto path_data = SubstreamData(serialization) + .withType(type_object ? type_object->getTypedPaths().at(path) : nullptr) + .withColumn(column_object ? column_object->getTypedPaths().at(path) : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->typed_path_states.at(path) : nullptr); + settings.path.back().data = path_data; + serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + settings.path.back().creator.reset(); + } + + /// If column or deserialization state was provided, iterate over dynamic paths, + if (column_object || structure_state) + { + /// Enumerate dynamic paths in sorted order for consistency. + const auto * dynamic_paths = column_object ? &column_object->getDynamicPaths() : nullptr; + std::vector sorted_dynamic_paths; + /// If we have deserialize_state we can take sorted dynamic paths list from it. + if (structure_state) { - if (paths[i].hasNested()) - column_object.addNestedSubcolumn(paths[i], field_info, old_column_size); - else - column_object.addSubcolumn(paths[i], old_column_size); + sorted_dynamic_paths = structure_state->sorted_dynamic_paths; + } + else + { + sorted_dynamic_paths.reserve(dynamic_paths->size()); + for (const auto & [path, _] : *dynamic_paths) + sorted_dynamic_paths.push_back(path); + std::sort(sorted_dynamic_paths.begin(), sorted_dynamic_paths.end()); } - auto & subcolumn = column_object.getSubcolumn(paths[i]); - assert(subcolumn.size() == old_column_size); - - subcolumn.insert(std::move(values[i]), std::move(field_info)); - } - - /// Insert default values to missed subcolumns. - const auto & subcolumns = column_object.getSubcolumns(); - for (const auto & entry : subcolumns) - { - if (entry->data.size() == old_column_size) + DataTypePtr dynamic_type = std::make_shared(); + for (const auto & path : sorted_dynamic_paths) { - bool inserted = column_object.tryInsertDefaultFromNested(entry); - if (!inserted) - entry->data.insertDefault(); + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(dynamic_serialization) + .withType(dynamic_type) + .withColumn(dynamic_paths ? dynamic_paths->at(path) : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->dynamic_path_states.at(path) : nullptr); + settings.path.back().data = path_data; + dynamic_serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); } } - column_object.incrementNumRows(); + settings.path.push_back(Substream::ObjectSharedData); + auto shared_data_substream_data = SubstreamData(shared_data_serialization) + .withType(getTypeOfSharedData()) + .withColumn(column_object ? column_object->getSharedDataPtr() : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->shared_data_state : nullptr); + shared_data_serialization->enumerateStreams(settings, callback, shared_data_substream_data); + settings.path.pop_back(); + settings.path.pop_back(); } -template -void SerializationObject::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const -{ - deserializeTextImpl(column, [&](String & s) { readStringInto(s, istr); }); -} - -template -void SerializationObject::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const -{ - deserializeTextImpl(column, [&](String & s) { settings.tsv.crlf_end_of_line_input ? readEscapedStringCRLF(s, istr) : readEscapedString(s, istr); }); -} - -template -void SerializationObject::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings &) const -{ - deserializeTextImpl(column, [&](String & s) { readQuotedStringInto(s, istr); }); -} - -template -void SerializationObject::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const -{ - deserializeTextImpl(column, [&](String & s) { Parser::readJSON(s, istr); }); -} - -template -void SerializationObject::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const -{ - deserializeTextImpl(column, [&](String & s) { readCSVStringInto(s, istr, settings.csv); }); -} - -template -template -void SerializationObject::checkSerializationIsSupported(const TSettings & settings) const -{ - if (settings.position_independent_encoding) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject doesn't support serialization with position independent encoding"); -} - -template -struct SerializationObject::SerializeStateObject : public ISerialization::SerializeBinaryBulkState -{ - DataTypePtr nested_type; - SerializationPtr nested_serialization; - SerializeBinaryBulkStatePtr nested_state; -}; - -template -struct SerializationObject::DeserializeStateObject : public ISerialization::DeserializeBinaryBulkState -{ - BinarySerializationKind kind; - DataTypePtr nested_type; - SerializationPtr nested_serialization; - DeserializeBinaryBulkStatePtr nested_state; -}; - -template -void SerializationObject::serializeBinaryBulkStatePrefix( +void SerializationObject::serializeBinaryBulkStatePrefix( const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - checkSerializationIsSupported(settings); - if (state) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject doesn't support serialization with non-trivial state"); - const auto & column_object = assert_cast(column); - if (!column_object.isFinalized()) - { - auto finalized = column_object.cloneFinalized(); - serializeBinaryBulkStatePrefix(*finalized, settings, state); - return; - } + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data = column_object.getSharedDataPtr(); settings.path.push_back(Substream::ObjectStructure); auto * stream = settings.getter(settings.path); + settings.path.pop_back(); if (!stream) - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Missing stream for kind of binary serialization"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Object column structure during serialization of binary bulk state prefix"); - auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); + /// Write serialization version. + UInt64 serialization_version = ObjectSerializationVersion::Value::BASIC; + writeBinaryLittleEndian(serialization_version, *stream); - writeIntBinary(static_cast(BinarySerializationKind::TUPLE), *stream); - writeStringBinary(tuple_type->getName(), *stream); + /// Write all dynamic paths in sorted order. + auto object_state = std::make_shared(serialization_version); + object_state->sorted_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, _] : dynamic_paths) + object_state->sorted_dynamic_paths.push_back(path); + std::sort(object_state->sorted_dynamic_paths.begin(), object_state->sorted_dynamic_paths.end()); + writeVarUInt(object_state->sorted_dynamic_paths.size(), *stream); + for (const auto & path : object_state->sorted_dynamic_paths) + writeStringBinary(path, *stream); - auto state_object = std::make_shared(); - state_object->nested_type = tuple_type; - state_object->nested_serialization = tuple_type->getDefaultSerialization(); - - settings.path.back() = Substream::ObjectData; - state_object->nested_serialization->serializeBinaryBulkStatePrefix(*tuple_column, settings, state_object->nested_state); - - state = std::move(state_object); - settings.path.pop_back(); -} - -template -void SerializationObject::serializeBinaryBulkStateSuffix( - SerializeBinaryBulkSettings & settings, - SerializeBinaryBulkStatePtr & state) const -{ - checkSerializationIsSupported(settings); - auto * state_object = checkAndGetState(state); + /// Write statistics in prefix if needed. + if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::PREFIX) + { + const auto & statistics = column_object.getStatistics(); + for (const auto & path : object_state->sorted_dynamic_paths) + { + size_t number_of_non_null_values = 0; + /// Check if we can use statistics stored in the column. There are 2 possible sources + /// of this statistics: + /// - statistics calculated during merge of some data parts (Statistics::Source::MERGE) + /// - statistics read from the data part during deserialization of Object column (Statistics::Source::READ). + /// We can rely only on statistics calculated during the merge, because column with statistics that was read + /// during deserialization from some data part could be filtered/limited/transformed/etc and so the statistics can be outdated. + if (!statistics.data.empty() && statistics.source == ColumnObject::Statistics::Source::MERGE) + number_of_non_null_values = statistics.data.at(path); + /// Otherwise we can use only path column from current object column. + else + number_of_non_null_values = (dynamic_paths.at(path)->size() - dynamic_paths.at(path)->getNumberOfDefaultRows()); + writeVarUInt(number_of_non_null_values, *stream); + } + } settings.path.push_back(Substream::ObjectData); - state_object->nested_serialization->serializeBinaryBulkStateSuffix(settings, state_object->nested_state); + + for (const auto & path : sorted_typed_paths) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->serializeBinaryBulkStatePrefix(*typed_paths.at(path), settings, object_state->typed_path_states[path]); + settings.path.pop_back(); + } + + for (const auto & path : object_state->sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->serializeBinaryBulkStatePrefix(*dynamic_paths.at(path), settings, object_state->dynamic_path_states[path]); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->serializeBinaryBulkStatePrefix(*shared_data, settings, object_state->shared_data_state); settings.path.pop_back(); + settings.path.pop_back(); + + state = std::move(object_state); } -template -void SerializationObject::deserializeBinaryBulkStatePrefix( +void SerializationObject::deserializeBinaryBulkStatePrefix( DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsDeserializeStatesCache * cache) const { - checkSerializationIsSupported(settings); - if (state) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject doesn't support serialization with non-trivial state"); + auto structure_state = deserializeObjectStructureStatePrefix(settings, cache); + if (!structure_state) + return; - settings.path.push_back(Substream::ObjectStructure); - auto * stream = settings.getter(settings.path); - settings.path.pop_back(); - - if (!stream) - throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, - "Cannot read kind of binary serialization of DataTypeObject, because its stream is missing"); - - UInt8 kind_raw; - readIntBinary(kind_raw, *stream); - auto kind = magic_enum::enum_cast(kind_raw); - if (!kind) - throw Exception(ErrorCodes::INCORRECT_DATA, - "Unknown binary serialization kind of Object: {}", std::to_string(kind_raw)); - - auto state_object = std::make_shared(); - state_object->kind = *kind; - - if (state_object->kind == BinarySerializationKind::TUPLE) - { - String data_type_name; - readStringBinary(data_type_name, *stream); - state_object->nested_type = DataTypeFactory::instance().get(data_type_name); - state_object->nested_serialization = state_object->nested_type->getDefaultSerialization(); - - if (!isTuple(state_object->nested_type)) - throw Exception(ErrorCodes::INCORRECT_DATA, - "Data of type Object should be written as Tuple, got: {}", data_type_name); - } - else if (state_object->kind == BinarySerializationKind::STRING) - { - state_object->nested_type = std::make_shared(); - state_object->nested_serialization = std::make_shared(); - } - else - { - throw Exception(ErrorCodes::INCORRECT_DATA, - "Unknown binary serialization kind of Object: {}", std::to_string(kind_raw)); - } + auto object_state = std::make_shared(); + object_state->structure_state = std::move(structure_state); settings.path.push_back(Substream::ObjectData); - state_object->nested_serialization->deserializeBinaryBulkStatePrefix(settings, state_object->nested_state, cache); - settings.path.pop_back(); - state = std::move(state_object); + for (const auto & path : sorted_typed_paths) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->deserializeBinaryBulkStatePrefix(settings, object_state->typed_path_states[path], cache); + settings.path.pop_back(); + } + + const auto & sorted_dynamic_paths = checkAndGetState(object_state->structure_state)->sorted_dynamic_paths; + for (const auto & path : sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->deserializeBinaryBulkStatePrefix(settings, object_state->dynamic_path_states[path], cache); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->deserializeBinaryBulkStatePrefix(settings, object_state->shared_data_state, cache); + settings.path.pop_back(); + settings.path.pop_back(); + + state = std::move(object_state); } -template -void SerializationObject::serializeBinaryBulkWithMultipleStreams( +ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeObjectStructureStatePrefix( + DeserializeBinaryBulkSettings & settings, SubstreamsDeserializeStatesCache * cache) +{ + settings.path.push_back(Substream::ObjectStructure); + + DeserializeBinaryBulkStatePtr state = nullptr; + /// Check if we already deserialized this state. It can happen when we read both object column and its subcolumns. + if (auto cached_state = getFromSubstreamsDeserializeStatesCache(cache, settings.path)) + { + state = cached_state; + } + else if (auto * structure_stream = settings.getter(settings.path)) + { + /// Read structure serialization version. + UInt64 serialization_version; + readBinaryLittleEndian(serialization_version, *structure_stream); + auto structure_state = std::make_shared(serialization_version); + /// Read the sorted list of dynamic paths. + size_t dynamic_paths_size; + readVarUInt(dynamic_paths_size, *structure_stream); + structure_state->sorted_dynamic_paths.reserve(dynamic_paths_size); + structure_state->dynamic_paths.reserve(dynamic_paths_size); + for (size_t i = 0; i != dynamic_paths_size; ++i) + { + structure_state->sorted_dynamic_paths.emplace_back(); + readStringBinary(structure_state->sorted_dynamic_paths.back(), *structure_stream); + structure_state->dynamic_paths.insert(structure_state->sorted_dynamic_paths.back()); + } + + /// Read statistics if needed. + if (settings.object_and_dynamic_read_statistics) + { + for (const auto & path : structure_state->sorted_dynamic_paths) + readVarUInt(structure_state->statistics.data[path], *structure_stream); + } + + state = std::move(structure_state); + addToSubstreamsDeserializeStatesCache(cache, settings.path, state); + } + + settings.path.pop_back(); + return state; +} + +void SerializationObject::serializeBinaryBulkWithMultipleStreams( const IColumn & column, size_t offset, size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - checkSerializationIsSupported(settings); const auto & column_object = assert_cast(column); - auto * state_object = checkAndGetState(state); - - if (!column_object.isFinalized()) - { - auto finalized = column_object.cloneFinalized(); - serializeBinaryBulkWithMultipleStreams(*finalized, offset, limit, settings, state); - return; - } - - auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); - - if (!state_object->nested_type->equals(*tuple_type)) - { - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, - "Types of internal column of Object mismatched. Expected: {}, Got: {}", - state_object->nested_type->getName(), tuple_type->getName()); - } + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data = column_object.getSharedDataPtr(); + auto * object_state = checkAndGetState(state); settings.path.push_back(Substream::ObjectData); - if (auto * stream = settings.getter(settings.path)) + + for (const auto & path : sorted_typed_paths) { - state_object->nested_serialization->serializeBinaryBulkWithMultipleStreams( - *tuple_column, offset, limit, settings, state_object->nested_state); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->serializeBinaryBulkWithMultipleStreams(*typed_paths.at(path), offset, limit, settings, object_state->typed_path_states[path]); + settings.path.pop_back(); } + const auto * dynamic_serialization_typed = assert_cast(dynamic_serialization.get()); + for (const auto & path : object_state->sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + auto it = dynamic_paths.find(path); + if (it == dynamic_paths.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Dynamic structure mismatch for Object column: dynamic path '{}' is not found in the column", path); + size_t number_of_non_null_values = 0; + dynamic_serialization_typed->serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants(*it->second, offset, limit, settings, object_state->dynamic_path_states[path], number_of_non_null_values); + object_state->statistics.data[path] += number_of_non_null_values; + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->serializeBinaryBulkWithMultipleStreams(*shared_data, offset, limit, settings, object_state->shared_data_state); + settings.path.pop_back(); settings.path.pop_back(); } -template -void SerializationObject::deserializeBinaryBulkWithMultipleStreams( +void SerializationObject::serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const +{ + auto * object_state = checkAndGetState(state); + settings.path.push_back(Substream::ObjectStructure); + auto * stream = settings.getter(settings.path); + settings.path.pop_back(); + + if (!stream) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Object column structure during serialization of binary bulk state suffix"); + + /// Write statistics in suffix if needed. + if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::SUFFIX) + { + for (const auto & path : object_state->sorted_dynamic_paths) + writeVarUInt(object_state->statistics.data[path], *stream); + } + + settings.path.push_back(Substream::ObjectData); + + for (const auto & path : sorted_typed_paths) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->serializeBinaryBulkStateSuffix(settings, object_state->typed_path_states[path]); + settings.path.pop_back(); + } + + for (const auto & path : object_state->sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->serializeBinaryBulkStateSuffix(settings, object_state->dynamic_path_states[path]); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->serializeBinaryBulkStateSuffix(settings, object_state->shared_data_state); + settings.path.pop_back(); + settings.path.pop_back(); +} + +void SerializationObject::deserializeBinaryBulkWithMultipleStreams( ColumnPtr & column, size_t limit, DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsCache * cache) const { - checkSerializationIsSupported(settings); - if (!column->empty()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject cannot be deserialized to non-empty column"); - + auto * object_state = checkAndGetState(state); + auto * structure_state = checkAndGetState(object_state->structure_state); auto mutable_column = column->assumeMutable(); auto & column_object = assert_cast(*mutable_column); - auto * state_object = checkAndGetState(state); + /// If it's a new object column, set dynamic paths and statistics. + if (column_object.empty()) + { + column_object.setDynamicPaths(structure_state->sorted_dynamic_paths); + column_object.setStatistics(structure_state->statistics); + } + + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + auto & shared_data = column_object.getSharedDataPtr(); settings.path.push_back(Substream::ObjectData); - if (state_object->kind == BinarySerializationKind::STRING) - deserializeBinaryBulkFromString(column_object, limit, settings, *state_object, cache); - else - deserializeBinaryBulkFromTuple(column_object, limit, settings, *state_object, cache); + for (const auto & path : sorted_typed_paths) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->deserializeBinaryBulkWithMultipleStreams(typed_paths[path], limit, settings, object_state->typed_path_states[path], cache); + settings.path.pop_back(); + } + for (const auto & path : structure_state->sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->deserializeBinaryBulkWithMultipleStreams(dynamic_paths[path], limit, settings, object_state->dynamic_path_states[path], cache); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(shared_data, limit, settings, object_state->shared_data_state, cache); + settings.path.pop_back(); settings.path.pop_back(); - column_object.checkConsistency(); - column_object.finalize(); - column = std::move(mutable_column); } -template -void SerializationObject::deserializeBinaryBulkFromString( - ColumnObject & column_object, - size_t limit, - DeserializeBinaryBulkSettings & settings, - DeserializeStateObject & state, - SubstreamsCache * cache) const +void SerializationObject::serializeBinary(const Field & field, WriteBuffer & ostr, const DB::FormatSettings & settings) const { - ColumnPtr column_string = state.nested_type->createColumn(); - state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( - column_string, limit, settings, state.nested_state, cache); - - size_t input_rows_count = column_string->size(); - column_object.reserve(input_rows_count); - - FormatSettings format_settings; - for (size_t i = 0; i < input_rows_count; ++i) + auto & object = field.get(); + /// Serialize number of paths and then pairs (path, value). + writeVarUInt(object.size(), ostr); + for (const auto & [path, value] : object) { - const auto & val = column_string->getDataAt(i); - ReadBufferFromMemory read_buffer(val.data, val.size); - deserializeWholeText(column_object, read_buffer, format_settings); - - if (!read_buffer.eof()) - throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, - "Cannot parse string to column Object. Expected eof"); - } -} - -template -void SerializationObject::deserializeBinaryBulkFromTuple( - ColumnObject & column_object, - size_t limit, - DeserializeBinaryBulkSettings & settings, - DeserializeStateObject & state, - SubstreamsCache * cache) const -{ - ColumnPtr column_tuple = state.nested_type->createColumn(); - state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( - column_tuple, limit, settings, state.nested_state, cache); - - auto [tuple_paths, tuple_types] = flattenTuple(state.nested_type); - auto flattened_tuple = flattenTuple(column_tuple); - const auto & tuple_columns = assert_cast(*flattened_tuple).getColumns(); - - assert(tuple_paths.size() == tuple_types.size()); - size_t num_subcolumns = tuple_paths.size(); - - if (tuple_columns.size() != num_subcolumns) - throw Exception(ErrorCodes::INCORRECT_DATA, - "Inconsistent type ({}) and column ({}) while reading column of type Object", - state.nested_type->getName(), column_tuple->getName()); - - for (size_t i = 0; i < num_subcolumns; ++i) - column_object.addSubcolumn(tuple_paths[i], tuple_columns[i]->assumeMutable()); -} - -template -void SerializationObject::serializeBinary(const Field &, WriteBuffer &, const FormatSettings &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObject"); -} - -template -void SerializationObject::deserializeBinary(Field &, ReadBuffer &, const FormatSettings &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObject"); -} - -template -void SerializationObject::serializeBinary(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObject"); -} - -template -void SerializationObject::deserializeBinary(IColumn &, ReadBuffer &, const FormatSettings &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObject"); -} - -/// TODO: use format different of JSON in serializations. - -template -void SerializationObject::serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - const auto & column_object = assert_cast(column); - const auto & subcolumns = column_object.getSubcolumns(); - - writeChar('{', ostr); - for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) - { - const auto & entry = *it; - if (it != subcolumns.begin()) - writeCString(",", ostr); - - writeDoubleQuoted(entry->path.getPath(), ostr); - writeChar(':', ostr); - serializeTextFromSubcolumn(entry->data, row_num, ostr, settings); - } - writeChar('}', ostr); -} - -template -template -void SerializationObject::serializeTextFromSubcolumn( - const ColumnObject::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const -{ - const auto & least_common_type = subcolumn.getLeastCommonType(); - - if (subcolumn.isFinalized()) - { - const auto & finalized_column = subcolumn.getFinalizedColumn(); - auto info = least_common_type->getSerializationInfo(finalized_column); - auto serialization = least_common_type->getSerialization(*info); - if constexpr (pretty_json) - serialization->serializeTextJSONPretty(finalized_column, row_num, ostr, settings, indent); + writeStringBinary(path, ostr); + if (auto it = typed_path_serializations.find(path); it != typed_path_serializations.end()) + it->second->serializeBinary(value, ostr, settings); else - serialization->serializeTextJSON(finalized_column, row_num, ostr, settings); - return; + dynamic_serialization->serializeBinary(value, ostr, settings); + } +} + +void SerializationObject::serializeBinary(const IColumn & col, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + const auto & column_object = assert_cast(col); + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + size_t offset = shared_data_offsets[ssize_t(row_num) - 1]; + size_t end = shared_data_offsets[ssize_t(row_num)]; + + /// Serialize number of paths and then pairs (path, value). + writeVarUInt(typed_paths.size() + dynamic_paths.size() + (end - offset), ostr); + + for (const auto & [path, column] : typed_paths) + { + writeStringBinary(path, ostr); + typed_path_serializations.at(path)->serializeBinary(*column, row_num, ostr, settings); } - size_t ind = row_num; - if (ind < subcolumn.getNumberOfDefaultsInPrefix()) + for (const auto & [path, column] : dynamic_paths) { - /// Suboptimal, but it should happen rarely. - auto tmp_column = subcolumn.getLeastCommonType()->createColumn(); - tmp_column->insertDefault(); - - auto info = least_common_type->getSerializationInfo(*tmp_column); - auto serialization = least_common_type->getSerialization(*info); - if constexpr (pretty_json) - serialization->serializeTextJSONPretty(*tmp_column, 0, ostr, settings, indent); - else - serialization->serializeTextJSON(*tmp_column, 0, ostr, settings); - return; + writeStringBinary(path, ostr); + dynamic_serialization->serializeBinary(*column, row_num, ostr, settings); } - ind -= subcolumn.getNumberOfDefaultsInPrefix(); - for (const auto & part : subcolumn.getData()) + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + for (size_t i = offset; i != end; ++i) { - if (ind < part->size()) + writeStringBinary(shared_data_paths->getDataAt(i), ostr); + auto value = shared_data_values->getDataAt(i); + ostr.write(value.data, value.size); + } +} + +void SerializationObject::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const +{ + Object object; + size_t number_of_paths; + readVarUInt(number_of_paths, istr); + /// Read pairs (path, value). + for (size_t i = 0; i != number_of_paths; ++i) + { + String path; + readStringBinary(path, istr); + if (!shouldSkipPath(path)) { - auto part_type = getDataTypeByColumn(*part); - auto info = part_type->getSerializationInfo(*part); - auto serialization = part_type->getSerialization(*info); - if constexpr (pretty_json) - serialization->serializeTextJSONPretty(*part, ind, ostr, settings, indent); + if (auto it = typed_path_serializations.find(path); it != typed_path_serializations.end()) + it->second->deserializeBinary(object[path], istr, settings); else - serialization->serializeTextJSON(*part, ind, ostr, settings); - return; + dynamic_serialization->deserializeBinary(object[path], istr, settings); + } + else + { + /// Skip value of this path. + Field tmp; + dynamic_serialization->deserializeBinary(tmp, istr, settings); + } + } + + field = std::move(object); +} + +/// Restore column object to the state with previous size. +/// We can use it in case of an exception during deserialization. +void SerializationObject::restoreColumnObject(ColumnObject & column_object, size_t prev_size) +{ + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + auto & shared_data_offsets = column_object.getSharedDataOffsets(); + + for (auto & [_, column] : typed_paths) + { + if (column->size() > prev_size) + column->popBack(column->size() - prev_size); + } + + for (auto & [_, column] : dynamic_paths) + { + if (column->size() > prev_size) + column->popBack(column->size() - prev_size); + } + + if (shared_data_offsets.size() > prev_size) + shared_data_offsets.resize(prev_size); + size_t prev_shared_data_offset = shared_data_offsets.back(); + if (shared_data_paths->size() > prev_shared_data_offset) + shared_data_paths->popBack(shared_data_paths->size() - prev_shared_data_offset); + if (shared_data_values->size() > prev_shared_data_offset) + shared_data_values->popBack(shared_data_values->size() - prev_shared_data_offset); +} + +void SerializationObject::deserializeBinary(IColumn & col, ReadBuffer & istr, const FormatSettings & settings) const +{ + auto & column_object = assert_cast(col); + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + auto & shared_data_offsets = column_object.getSharedDataOffsets(); + + size_t number_of_paths; + readVarUInt(number_of_paths, istr); + std::vector> paths_and_values_for_shared_data; + size_t prev_size = column_object.size(); + try + { + /// Read pairs (path, value). + for (size_t i = 0; i != number_of_paths; ++i) + { + String path; + readStringBinary(path, istr); + if (!shouldSkipPath(path)) + { + /// Check if we have this path in typed paths. + if (auto typed_it = typed_path_serializations.find(path); typed_it != typed_path_serializations.end()) + { + auto & typed_column = typed_paths[path]; + /// Check if we already had this path. + if (typed_column->size() > prev_size) + { + if (!settings.json.type_json_skip_duplicated_paths) + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of Object type: {}", path); + } + else + { + typed_it->second->deserializeBinary(*typed_column, istr, settings); + } + } + /// Check if we have this path in dynamc paths. + else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + { + /// Check if we already had this path. + if (dynamic_it->second->size() > prev_size) + { + if (!settings.json.type_json_skip_duplicated_paths) + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of Object type: {}", path); + } + + dynamic_serialization->deserializeBinary(*dynamic_it->second, istr, settings); + } + /// Try to add a new dynamic paths. + else if (auto * dynamic_column = column_object.tryToAddNewDynamicPath(path)) + { + dynamic_serialization->deserializeBinary(*dynamic_column, istr, settings); + } + /// Otherwise this path should go to shared data. + else + { + auto tmp_dynamic_column = ColumnDynamic::create(); + tmp_dynamic_column->reserve(1); + String value; + readParsedValueIntoString(value, istr, [&](ReadBuffer & buf){ dynamic_serialization->deserializeBinary(*tmp_dynamic_column, buf, settings); }); + paths_and_values_for_shared_data.emplace_back(std::move(path), std::move(value)); + } + } + else + { + /// Skip value of this path. + Field tmp; + dynamic_serialization->deserializeBinary(tmp, istr, settings); + } } - ind -= part->size(); + std::sort(paths_and_values_for_shared_data.begin(), paths_and_values_for_shared_data.end()); + for (size_t i = 0; i != paths_and_values_for_shared_data.size(); ++i) + { + const auto & [path, value] = paths_and_values_for_shared_data[i]; + if (i != 0 && path == paths_and_values_for_shared_data[i - 1].first) + { + if (!settings.json.type_json_skip_duplicated_paths) + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of Object type: {}", path); + } + else + { + shared_data_paths->insertData(path.data(), path.size()); + shared_data_values->insertData(value.data(), value.size()); + } + } + shared_data_offsets.push_back(shared_data_paths->size()); } - - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for text serialization is out of range", row_num); -} - -template -void SerializationObject::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - serializeTextImpl(column, row_num, ostr, settings); -} - -template -void SerializationObject::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - WriteBufferFromOwnString ostr_str; - serializeTextImpl(column, row_num, ostr_str, settings); - writeEscapedString(ostr_str.str(), ostr); -} - -template -void SerializationObject::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - WriteBufferFromOwnString ostr_str; - serializeTextImpl(column, row_num, ostr_str, settings); - writeQuotedString(ostr_str.str(), ostr); -} - -template -void SerializationObject::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - serializeTextImpl(column, row_num, ostr, settings); -} - -template -void SerializationObject::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - WriteBufferFromOwnString ostr_str; - serializeTextImpl(column, row_num, ostr_str, settings); - writeCSVString(ostr_str.str(), ostr); -} - -template -void SerializationObject::serializeTextMarkdown( - const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - if (settings.markdown.escape_special_characters) + catch (...) { - WriteBufferFromOwnString ostr_str; - serializeTextImpl(column, row_num, ostr_str, settings); - writeMarkdownEscapedString(ostr_str.str(), ostr); - } - else - { - serializeTextEscaped(column, row_num, ostr, settings); + restoreColumnObject(column_object, prev_size); + throw; } } -template -void SerializationObject::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +SerializationPtr SerializationObject::TypedPathSubcolumnCreator::create(const DB::SerializationPtr & prev) const { - const auto & column_object = assert_cast(column); - const auto & subcolumns = column_object.getSubcolumns(); - - writeCString("{\n", ostr); - for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) - { - const auto & entry = *it; - if (it != subcolumns.begin()) - writeCString(",\n", ostr); - - writeChar(' ', (indent + 1) * 4, ostr); - writeDoubleQuoted(entry->path.getPath(), ostr); - writeCString(": ", ostr); - serializeTextFromSubcolumn(entry->data, row_num, ostr, settings, indent + 1); - } - writeChar('\n', ostr); - writeChar(' ', indent * 4, ostr); - writeChar('}', ostr); -} - - -SerializationPtr getObjectSerialization(const String & schema_format) -{ - if (schema_format == "json") - { -#if USE_SIMDJSON - return std::make_shared>>(); -#elif USE_RAPIDJSON - return std::make_shared>>(); -#else - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "To use data type Object with JSON format ClickHouse should be built with Simdjson or Rapidjson"); -#endif - } - - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unknown schema format '{}'", schema_format); + return std::make_shared(prev, path); } } + diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index 4cb7d0ab6a8..8702b0c7f64 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -1,34 +1,44 @@ #pragma once #include -#include -#include +#include +#include namespace DB { -/** Serialization for data type Object. - * Supported only text serialization/deserialization. - * and binary bulk serialization/deserialization without position independent - * encoding, i.e. serialization/deserialization into Native format. - */ -template +class SerializationObjectDynamicPath; +class SerializationSubObject; + +/// Class for binary serialization/deserialization of an Object type (currently only JSON). class SerializationObject : public ISerialization { public: - /** In Native format ColumnObject can be serialized - * in two formats: as Tuple or as String. - * The format is the following: - * - * 1 byte -- 0 if Tuple, 1 if String. - * [type_name] -- Only for tuple serialization. - * ... data of internal column ... - * - * ClickHouse client serializazes objects as tuples. - * String serialization exists for clients, which cannot - * do parsing by themselves and they can send raw data as - * string. It will be parsed on the server side. - */ + /// Serialization can change in future. Let's introduce serialization version. + struct ObjectSerializationVersion + { + enum Value + { + BASIC = 0, + }; + + Value value; + + static void checkVersion(UInt64 version); + + explicit ObjectSerializationVersion(UInt64 version); + }; + + SerializationObject( + std::unordered_map typed_path_serializations_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_prefixes_to_skip_, + const std::vector & path_regexps_to_skip_); + + void enumerateStreams( + EnumerateStreamsSettings & settings, + const StreamCallback & callback, + const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( const IColumn & column, @@ -63,59 +73,54 @@ public: void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; - void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; - void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextMarkdown(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - - void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; - void deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; - void deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; - void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; - void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + static void restoreColumnObject(ColumnObject & column_object, size_t prev_size); private: - enum class BinarySerializationKind : UInt8 + friend SerializationObjectDynamicPath; + friend SerializationSubObject; + + /// State of an Object structure. Can be also used during deserializing of Object subcolumns. + struct DeserializeBinaryBulkStateObjectStructure : public ISerialization::DeserializeBinaryBulkState { - TUPLE = 0, - STRING = 1, + ObjectSerializationVersion structure_version; + std::vector sorted_dynamic_paths; + std::unordered_set dynamic_paths; + /// Paths statistics. Map (dynamic path) -> (number of non-null values in this path). + ColumnObject::Statistics statistics = {.source = ColumnObject::Statistics::Source::READ, .data = {}}; + + explicit DeserializeBinaryBulkStateObjectStructure(UInt64 structure_version_) : structure_version(structure_version_) {} }; - struct SerializeStateObject; - struct DeserializeStateObject; - - void deserializeBinaryBulkFromString( - ColumnObject & column_object, - size_t limit, + static DeserializeBinaryBulkStatePtr deserializeObjectStructureStatePrefix( DeserializeBinaryBulkSettings & settings, - DeserializeStateObject & state, - SubstreamsCache * cache) const; + SubstreamsDeserializeStatesCache * cache); - void deserializeBinaryBulkFromTuple( - ColumnObject & column_object, - size_t limit, - DeserializeBinaryBulkSettings & settings, - DeserializeStateObject & state, - SubstreamsCache * cache) const; + /// Shared data has type Array(Tuple(String, String)). + static const DataTypePtr & getTypeOfSharedData(); - template - void checkSerializationIsSupported(const TSettings & settings) const; + struct TypedPathSubcolumnCreator : public ISubcolumnCreator + { + String path; - template - void deserializeTextImpl(IColumn & column, Reader && reader) const; + TypedPathSubcolumnCreator(const String & path_) : path(path_) {} - void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const; + DataTypePtr create(const DataTypePtr & prev) const override { return prev; } + ColumnPtr create(const ColumnPtr & prev) const override { return prev; } + SerializationPtr create(const SerializationPtr & prev) const override; + }; - template - void serializeTextFromSubcolumn(const ColumnObject::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent = 0) const; +protected: + bool shouldSkipPath(const String & path) const; - /// Pool of parser objects to make SerializationObject thread safe. - mutable SimpleObjectPool parsers_pool; + std::unordered_map typed_path_serializations; + std::unordered_set paths_to_skip; + std::vector path_prefixes_to_skip; + std::list path_regexps_to_skip; + SerializationPtr dynamic_serialization; + +private: + std::vector sorted_typed_paths; + SerializationPtr shared_data_serialization; }; -SerializationPtr getObjectSerialization(const String & schema_format); - } diff --git a/src/DataTypes/Serializations/SerializationObjectDeprecated.cpp b/src/DataTypes/Serializations/SerializationObjectDeprecated.cpp new file mode 100644 index 00000000000..4e9ebf6c03d --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectDeprecated.cpp @@ -0,0 +1,586 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; + extern const int INCORRECT_DATA; + extern const int CANNOT_READ_ALL_DATA; + extern const int ARGUMENT_OUT_OF_BOUND; + extern const int CANNOT_PARSE_TEXT; + extern const int EXPERIMENTAL_FEATURE_ERROR; +} + +template +template +void SerializationObjectDeprecated::deserializeTextImpl(IColumn & column, Reader && reader) const +{ + auto & column_object = assert_cast(column); + + String buf; + reader(buf); + std::optional result; + + /// Treat empty string as an empty object + /// for better CAST from String to Object. + if (!buf.empty()) + { + auto parser = parsers_pool.get([] { return new Parser; }); + result = parser->parse(buf.data(), buf.size()); + } + else + { + result = ParseResult{}; + } + + if (!result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot parse object"); + + auto & [paths, values] = *result; + assert(paths.size() == values.size()); + + size_t old_column_size = column_object.size(); + for (size_t i = 0; i < paths.size(); ++i) + { + auto field_info = getFieldInfo(values[i]); + if (field_info.need_fold_dimension) + values[i] = applyVisitor(FieldVisitorFoldDimension(field_info.num_dimensions), std::move(values[i])); + if (isNothing(field_info.scalar_type)) + continue; + + if (!column_object.hasSubcolumn(paths[i])) + { + if (paths[i].hasNested()) + column_object.addNestedSubcolumn(paths[i], field_info, old_column_size); + else + column_object.addSubcolumn(paths[i], old_column_size); + } + + auto & subcolumn = column_object.getSubcolumn(paths[i]); + assert(subcolumn.size() == old_column_size); + + subcolumn.insert(std::move(values[i]), std::move(field_info)); + } + + /// Insert default values to missed subcolumns. + const auto & subcolumns = column_object.getSubcolumns(); + for (const auto & entry : subcolumns) + { + if (entry->data.size() == old_column_size) + { + bool inserted = column_object.tryInsertDefaultFromNested(entry); + if (!inserted) + entry->data.insertDefault(); + } + } + + column_object.incrementNumRows(); +} + +template +void SerializationObjectDeprecated::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const +{ + deserializeTextImpl(column, [&](String & s) { readStringInto(s, istr); }); +} + +template +void SerializationObjectDeprecated::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + deserializeTextImpl(column, [&](String & s) { settings.tsv.crlf_end_of_line_input ? readEscapedStringCRLF(s, istr) : readEscapedString(s, istr); }); +} + +template +void SerializationObjectDeprecated::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings &) const +{ + deserializeTextImpl(column, [&](String & s) { readQuotedStringInto(s, istr); }); +} + +template +void SerializationObjectDeprecated::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const +{ + deserializeTextImpl(column, [&](String & s) { Parser::readJSON(s, istr); }); +} + +template +void SerializationObjectDeprecated::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + deserializeTextImpl(column, [&](String & s) { readCSVStringInto(s, istr, settings.csv); }); +} + +template +template +void SerializationObjectDeprecated::checkSerializationIsSupported(const TSettings & settings) const +{ + if (settings.position_independent_encoding) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject doesn't support serialization with position independent encoding"); +} + +template +struct SerializationObjectDeprecated::SerializeStateObject : public ISerialization::SerializeBinaryBulkState +{ + DataTypePtr nested_type; + SerializationPtr nested_serialization; + SerializeBinaryBulkStatePtr nested_state; +}; + +template +struct SerializationObjectDeprecated::DeserializeStateObject : public ISerialization::DeserializeBinaryBulkState +{ + BinarySerializationKind kind; + DataTypePtr nested_type; + SerializationPtr nested_serialization; + DeserializeBinaryBulkStatePtr nested_state; +}; + +template +void SerializationObjectDeprecated::serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const +{ + checkSerializationIsSupported(settings); + if (state) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject doesn't support serialization with non-trivial state"); + + const auto & column_object = assert_cast(column); + if (!column_object.isFinalized()) + { + auto finalized = column_object.cloneFinalized(); + serializeBinaryBulkStatePrefix(*finalized, settings, state); + return; + } + + settings.path.push_back(Substream::DeprecatedObjectStructure); + auto * stream = settings.getter(settings.path); + + if (!stream) + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Missing stream for kind of binary serialization"); + + auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); + + writeIntBinary(static_cast(BinarySerializationKind::TUPLE), *stream); + writeStringBinary(tuple_type->getName(), *stream); + + auto state_object = std::make_shared(); + state_object->nested_type = tuple_type; + state_object->nested_serialization = tuple_type->getDefaultSerialization(); + + settings.path.back() = Substream::DeprecatedObjectData; + state_object->nested_serialization->serializeBinaryBulkStatePrefix(*tuple_column, settings, state_object->nested_state); + + state = std::move(state_object); + settings.path.pop_back(); +} + +template +void SerializationObjectDeprecated::serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const +{ + checkSerializationIsSupported(settings); + auto * state_object = checkAndGetState(state); + + settings.path.push_back(Substream::DeprecatedObjectData); + state_object->nested_serialization->serializeBinaryBulkStateSuffix(settings, state_object->nested_state); + settings.path.pop_back(); +} + +template +void SerializationObjectDeprecated::deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const +{ + checkSerializationIsSupported(settings); + if (state) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject doesn't support serialization with non-trivial state"); + + settings.path.push_back(Substream::DeprecatedObjectStructure); + auto * stream = settings.getter(settings.path); + settings.path.pop_back(); + + if (!stream) + throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, + "Cannot read kind of binary serialization of DataTypeObject, because its stream is missing"); + + UInt8 kind_raw; + readIntBinary(kind_raw, *stream); + auto kind = magic_enum::enum_cast(kind_raw); + if (!kind) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Unknown binary serialization kind of Object: {}", std::to_string(kind_raw)); + + auto state_object = std::make_shared(); + state_object->kind = *kind; + + if (state_object->kind == BinarySerializationKind::TUPLE) + { + String data_type_name; + readStringBinary(data_type_name, *stream); + state_object->nested_type = DataTypeFactory::instance().get(data_type_name); + state_object->nested_serialization = state_object->nested_type->getDefaultSerialization(); + + if (!isTuple(state_object->nested_type)) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Data of type Object should be written as Tuple, got: {}", data_type_name); + } + else if (state_object->kind == BinarySerializationKind::STRING) + { + state_object->nested_type = std::make_shared(); + state_object->nested_serialization = std::make_shared(); + } + else + { + throw Exception(ErrorCodes::INCORRECT_DATA, + "Unknown binary serialization kind of Object: {}", std::to_string(kind_raw)); + } + + settings.path.push_back(Substream::DeprecatedObjectData); + state_object->nested_serialization->deserializeBinaryBulkStatePrefix(settings, state_object->nested_state, cache); + settings.path.pop_back(); + + state = std::move(state_object); +} + +template +void SerializationObjectDeprecated::serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const +{ + checkSerializationIsSupported(settings); + const auto & column_object = assert_cast(column); + auto * state_object = checkAndGetState(state); + + if (!column_object.isFinalized()) + { + auto finalized = column_object.cloneFinalized(); + serializeBinaryBulkWithMultipleStreams(*finalized, offset, limit, settings, state); + return; + } + + auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); + + if (!state_object->nested_type->equals(*tuple_type)) + { + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Types of internal column of Object mismatched. Expected: {}, Got: {}", + state_object->nested_type->getName(), tuple_type->getName()); + } + + settings.path.push_back(Substream::DeprecatedObjectData); + if (auto * stream = settings.getter(settings.path)) + { + state_object->nested_serialization->serializeBinaryBulkWithMultipleStreams( + *tuple_column, offset, limit, settings, state_object->nested_state); + } + + settings.path.pop_back(); +} + +template +void SerializationObjectDeprecated::deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const +{ + checkSerializationIsSupported(settings); + if (!column->empty()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject cannot be deserialized to non-empty column"); + + auto mutable_column = column->assumeMutable(); + auto & column_object = assert_cast(*mutable_column); + auto * state_object = checkAndGetState(state); + + settings.path.push_back(Substream::DeprecatedObjectData); + if (state_object->kind == BinarySerializationKind::STRING) + deserializeBinaryBulkFromString(column_object, limit, settings, *state_object, cache); + else + deserializeBinaryBulkFromTuple(column_object, limit, settings, *state_object, cache); + + settings.path.pop_back(); + column_object.checkConsistency(); + column_object.finalize(); + column = std::move(mutable_column); +} + +template +void SerializationObjectDeprecated::deserializeBinaryBulkFromString( + ColumnObjectDeprecated & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const +{ + ColumnPtr column_string = state.nested_type->createColumn(); + state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( + column_string, limit, settings, state.nested_state, cache); + + size_t input_rows_count = column_string->size(); + column_object.reserve(input_rows_count); + + FormatSettings format_settings; + for (size_t i = 0; i < input_rows_count; ++i) + { + const auto & val = column_string->getDataAt(i); + ReadBufferFromMemory read_buffer(val.data, val.size); + deserializeWholeText(column_object, read_buffer, format_settings); + + if (!read_buffer.eof()) + throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, + "Cannot parse string to column Object. Expected eof"); + } +} + +template +void SerializationObjectDeprecated::deserializeBinaryBulkFromTuple( + ColumnObjectDeprecated & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const +{ + ColumnPtr column_tuple = state.nested_type->createColumn(); + state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( + column_tuple, limit, settings, state.nested_state, cache); + + auto [tuple_paths, tuple_types] = flattenTuple(state.nested_type); + auto flattened_tuple = flattenTuple(column_tuple); + const auto & tuple_columns = assert_cast(*flattened_tuple).getColumns(); + + assert(tuple_paths.size() == tuple_types.size()); + size_t num_subcolumns = tuple_paths.size(); + + if (tuple_columns.size() != num_subcolumns) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Inconsistent type ({}) and column ({}) while reading column of type Object", + state.nested_type->getName(), column_tuple->getName()); + + for (size_t i = 0; i < num_subcolumns; ++i) + column_object.addSubcolumn(tuple_paths[i], tuple_columns[i]->assumeMutable()); +} + +template +void SerializationObjectDeprecated::serializeBinary(const Field &, WriteBuffer &, const FormatSettings &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObjectDeprecated"); +} + +template +void SerializationObjectDeprecated::deserializeBinary(Field &, ReadBuffer &, const FormatSettings &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObjectDeprecated"); +} + +template +void SerializationObjectDeprecated::serializeBinary(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObjectDeprecated"); +} + +template +void SerializationObjectDeprecated::deserializeBinary(IColumn &, ReadBuffer &, const FormatSettings &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObjectDeprecated"); +} + +/// TODO: use format different of JSON in serializations. + +template +void SerializationObjectDeprecated::serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + const auto & column_object = assert_cast(column); + const auto & subcolumns = column_object.getSubcolumns(); + + writeChar('{', ostr); + for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) + { + const auto & entry = *it; + if (it != subcolumns.begin()) + writeCString(",", ostr); + + writeDoubleQuoted(entry->path.getPath(), ostr); + writeChar(':', ostr); + serializeTextFromSubcolumn(entry->data, row_num, ostr, settings); + } + writeChar('}', ostr); +} + +template +template +void SerializationObjectDeprecated::serializeTextFromSubcolumn( + const ColumnObjectDeprecated::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + const auto & least_common_type = subcolumn.getLeastCommonType(); + + if (subcolumn.isFinalized()) + { + const auto & finalized_column = subcolumn.getFinalizedColumn(); + auto info = least_common_type->getSerializationInfo(finalized_column); + auto serialization = least_common_type->getSerialization(*info); + if constexpr (pretty_json) + serialization->serializeTextJSONPretty(finalized_column, row_num, ostr, settings, indent); + else + serialization->serializeTextJSON(finalized_column, row_num, ostr, settings); + return; + } + + size_t ind = row_num; + if (ind < subcolumn.getNumberOfDefaultsInPrefix()) + { + /// Suboptimal, but it should happen rarely. + auto tmp_column = subcolumn.getLeastCommonType()->createColumn(); + tmp_column->insertDefault(); + + auto info = least_common_type->getSerializationInfo(*tmp_column); + auto serialization = least_common_type->getSerialization(*info); + if constexpr (pretty_json) + serialization->serializeTextJSONPretty(*tmp_column, 0, ostr, settings, indent); + else + serialization->serializeTextJSON(*tmp_column, 0, ostr, settings); + return; + } + + ind -= subcolumn.getNumberOfDefaultsInPrefix(); + for (const auto & part : subcolumn.getData()) + { + if (ind < part->size()) + { + auto part_type = getDataTypeByColumn(*part); + auto info = part_type->getSerializationInfo(*part); + auto serialization = part_type->getSerialization(*info); + if constexpr (pretty_json) + serialization->serializeTextJSONPretty(*part, ind, ostr, settings, indent); + else + serialization->serializeTextJSON(*part, ind, ostr, settings); + return; + } + + ind -= part->size(); + } + + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for text serialization is out of range", row_num); +} + +template +void SerializationObjectDeprecated::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + serializeTextImpl(column, row_num, ostr, settings); +} + +template +void SerializationObjectDeprecated::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString ostr_str; + serializeTextImpl(column, row_num, ostr_str, settings); + writeEscapedString(ostr_str.str(), ostr); +} + +template +void SerializationObjectDeprecated::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString ostr_str; + serializeTextImpl(column, row_num, ostr_str, settings); + writeQuotedString(ostr_str.str(), ostr); +} + +template +void SerializationObjectDeprecated::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + serializeTextImpl(column, row_num, ostr, settings); +} + +template +void SerializationObjectDeprecated::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString ostr_str; + serializeTextImpl(column, row_num, ostr_str, settings); + writeCSVString(ostr_str.str(), ostr); +} + +template +void SerializationObjectDeprecated::serializeTextMarkdown( + const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + if (settings.markdown.escape_special_characters) + { + WriteBufferFromOwnString ostr_str; + serializeTextImpl(column, row_num, ostr_str, settings); + writeMarkdownEscapedString(ostr_str.str(), ostr); + } + else + { + serializeTextEscaped(column, row_num, ostr, settings); + } +} + +template +void SerializationObjectDeprecated::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + const auto & column_object = assert_cast(column); + const auto & subcolumns = column_object.getSubcolumns(); + + writeCString("{\n", ostr); + for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) + { + const auto & entry = *it; + if (it != subcolumns.begin()) + writeCString(",\n", ostr); + + writeChar(' ', (indent + 1) * 4, ostr); + writeDoubleQuoted(entry->path.getPath(), ostr); + writeCString(": ", ostr); + serializeTextFromSubcolumn(entry->data, row_num, ostr, settings, indent + 1); + } + writeChar('\n', ostr); + writeChar(' ', indent * 4, ostr); + writeChar('}', ostr); +} + + +SerializationPtr getObjectSerialization(const String & schema_format) +{ + if (schema_format == "json") + { +#if USE_SIMDJSON + return std::make_shared>>(); +#elif USE_RAPIDJSON + return std::make_shared>>(); +#else + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "To use data type Object with JSON format ClickHouse should be built with Simdjson or Rapidjson"); +#endif + } + + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unknown schema format '{}'", schema_format); +} + +} diff --git a/src/DataTypes/Serializations/SerializationObjectDeprecated.h b/src/DataTypes/Serializations/SerializationObjectDeprecated.h new file mode 100644 index 00000000000..c209f946850 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectDeprecated.h @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +/** Serialization for data type Object (deprecated). + * Supported only text serialization/deserialization. + * and binary bulk serialization/deserialization without position independent + * encoding, i.e. serialization/deserialization into Native format. + */ +template +class SerializationObjectDeprecated : public ISerialization +{ +public: + /** In Native format ColumnObjectDeprecated can be serialized + * in two formats: as Tuple or as String. + * The format is the following: + * + * 1 byte -- 0 if Tuple, 1 if String. + * [type_name] -- Only for tuple serialization. + * ... data of internal column ... + * + * ClickHouse client serializazes objects as tuples. + * String serialization exists for clients, which cannot + * do parsing by themselves and they can send raw data as + * string. It will be parsed on the server side. + */ + + void serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const override; + + void serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const override; + + void serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const override; + void deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const override; + void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; + void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; + + void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; + void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextMarkdown(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + + void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + void deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + void deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + +private: + enum class BinarySerializationKind : UInt8 + { + TUPLE = 0, + STRING = 1, + }; + + struct SerializeStateObject; + struct DeserializeStateObject; + + void deserializeBinaryBulkFromString( + ColumnObjectDeprecated & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const; + + void deserializeBinaryBulkFromTuple( + ColumnObjectDeprecated & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const; + + template + void checkSerializationIsSupported(const TSettings & settings) const; + + template + void deserializeTextImpl(IColumn & column, Reader && reader) const; + + void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const; + + template + void serializeTextFromSubcolumn(const ColumnObjectDeprecated::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent = 0) const; + + /// Pool of parser objects to make SerializationObjectDeprecated thread safe. + mutable SimpleObjectPool parsers_pool; +}; + +SerializationPtr getObjectSerialization(const String & schema_format); + +} diff --git a/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp new file mode 100644 index 00000000000..3a635671978 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp @@ -0,0 +1,180 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +SerializationObjectDynamicPath::SerializationObjectDynamicPath( + const DB::SerializationPtr & nested_, const String & path_, const String & path_subcolumn_, size_t max_dynamic_types_) + : SerializationWrapper(nested_) + , path(path_) + , path_subcolumn(path_subcolumn_) + , dynamic_serialization(std::make_shared()) + , shared_data_serialization(SerializationObject::getTypeOfSharedData()->getDefaultSerialization()) + , max_dynamic_types(max_dynamic_types_) +{ +} + +struct DeserializeBinaryBulkStateObjectDynamicPath : public ISerialization::DeserializeBinaryBulkState +{ + ISerialization::DeserializeBinaryBulkStatePtr structure_state; + ISerialization::DeserializeBinaryBulkStatePtr nested_state; + bool read_from_shared_data; + ColumnPtr shared_data; +}; + +void SerializationObjectDynamicPath::enumerateStreams( + DB::ISerialization::EnumerateStreamsSettings & settings, + const DB::ISerialization::StreamCallback & callback, + const DB::ISerialization::SubstreamData & data) const +{ + settings.path.push_back(Substream::ObjectStructure); + callback(settings.path); + settings.path.pop_back(); + + const auto * deserialize_state = data.deserialize_state ? checkAndGetState(data.deserialize_state) : nullptr; + + /// We cannot enumerate anything if we don't have deserialization state, as we don't know the dynamic structure. + if (!deserialize_state) + return; + + settings.path.push_back(Substream::ObjectData); + const auto * structure_state = checkAndGetState(deserialize_state->structure_state); + /// Check if we have our path in dynamic paths. + if (structure_state->dynamic_paths.contains(path)) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(nested_serialization) + .withType(data.type) + .withColumn(data.column) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state->nested_state); + settings.path.back().data = path_data; + nested_serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + } + /// Otherwise we will have to read all shared data and try to find our path there. + else + { + settings.path.push_back(Substream::ObjectSharedData); + auto shared_data_substream_data = SubstreamData(shared_data_serialization) + .withType(data.type ? SerializationObject::getTypeOfSharedData() : nullptr) + .withColumn(data.column ? SerializationObject::getTypeOfSharedData()->createColumn() : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state->nested_state); + settings.path.back().data = shared_data_substream_data; + shared_data_serialization->enumerateStreams(settings, callback, shared_data_substream_data); + settings.path.pop_back(); + } + + settings.path.pop_back(); +} + +void SerializationObjectDynamicPath::serializeBinaryBulkStatePrefix(const IColumn &, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStatePrefix is not implemented for SerializationObjectDynamicPath"); +} + +void SerializationObjectDynamicPath::serializeBinaryBulkStateSuffix(SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStateSuffix is not implemented for SerializationObjectDynamicPath"); +} + +void SerializationObjectDynamicPath::deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsDeserializeStatesCache * cache) const +{ + auto structure_state = SerializationObject::deserializeObjectStructureStatePrefix(settings, cache); + if (!structure_state) + return; + + auto dynamic_path_state = std::make_shared(); + dynamic_path_state->structure_state = std::move(structure_state); + /// Remember if we need to read from shared data or we have this path in dynamic paths. + dynamic_path_state->read_from_shared_data = !checkAndGetState(dynamic_path_state->structure_state)->dynamic_paths.contains(path); + settings.path.push_back(Substream::ObjectData); + if (dynamic_path_state->read_from_shared_data) + { + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_path_state->nested_state, cache); + settings.path.pop_back(); + } + else + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + nested_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_path_state->nested_state, cache); + settings.path.pop_back(); + } + + settings.path.pop_back(); + state = std::move(dynamic_path_state); +} + +void SerializationObjectDynamicPath::serializeBinaryBulkWithMultipleStreams(const IColumn &, size_t, size_t, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkWithMultipleStreams is not implemented for SerializationObjectDynamicPath"); +} + +void SerializationObjectDynamicPath::deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & result_column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const +{ + if (!state) + return; + + auto * dynamic_path_state = checkAndGetState(state); + settings.path.push_back(Substream::ObjectData); + /// Check if we don't need to read shared data. In this case just read data from dynamic path. + if (!dynamic_path_state->read_from_shared_data) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + nested_serialization->deserializeBinaryBulkWithMultipleStreams(result_column, limit, settings, dynamic_path_state->nested_state, cache); + settings.path.pop_back(); + } + /// Otherwise, read the whole shared data column and extract requested path from it. + else + { + settings.path.push_back(Substream::ObjectSharedData); + /// Initialize shared_data column if needed. + if (result_column->empty()) + dynamic_path_state->shared_data = SerializationObject::getTypeOfSharedData()->createColumn(); + size_t prev_size = result_column->size(); + shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(dynamic_path_state->shared_data, limit, settings, dynamic_path_state->nested_state, cache); + /// If we need to read a subcolumn from Dynamic column, create an empty Dynamic column, fill it and extract subcolumn. + MutableColumnPtr dynamic_column = path_subcolumn.empty() ? result_column->assumeMutable() : ColumnDynamic::create(max_dynamic_types)->getPtr(); + /// Check if we don't have any paths in shared data in current range. + const auto & offsets = assert_cast(*dynamic_path_state->shared_data).getOffsets(); + if (offsets.back() == offsets[ssize_t(prev_size) - 1]) + dynamic_column->insertManyDefaults(limit); + else + ColumnObject::fillPathColumnFromSharedData(*dynamic_column, path, dynamic_path_state->shared_data, prev_size, dynamic_path_state->shared_data->size()); + + /// Extract subcolumn from Dynamic column if needed. + if (!path_subcolumn.empty()) + { + auto subcolumn = std::make_shared(max_dynamic_types)->getSubcolumn(path_subcolumn, dynamic_column->getPtr()); + result_column->assumeMutable()->insertRangeFrom(*subcolumn, 0, subcolumn->size()); + } + + settings.path.pop_back(); + } + + settings.path.pop_back(); +} + +} diff --git a/src/DataTypes/Serializations/SerializationObjectDynamicPath.h b/src/DataTypes/Serializations/SerializationObjectDynamicPath.h new file mode 100644 index 00000000000..e11d0cded73 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectDynamicPath.h @@ -0,0 +1,58 @@ +#pragma once + +#include + +namespace DB +{ + +/// Serialization of dynamic Object paths. +/// For example, if we have type JSON(a.b UInt32, b.c String) and data {"a" : {"b" : 42}, "b" : {"c" : "Hello}, "c" : {"d" : [1, 2, 3]}, "d" : 42} +/// this class will be responsible for reading dynamic paths 'c.d' and 'd' as subcolumns. +/// Typed paths 'a.b' and 'b.c' are serialized in SerializationObjectTypedPath. +class SerializationObjectDynamicPath final : public SerializationWrapper +{ +public: + SerializationObjectDynamicPath(const SerializationPtr & nested_, const String & path_, const String & path_subcolumn_, size_t max_dynamic_types_); + + void enumerateStreams( + EnumerateStreamsSettings & settings, + const StreamCallback & callback, + const SubstreamData & data) const override; + + void serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const override; + + void serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const override; + +private: + String path; + String path_subcolumn; + SerializationPtr dynamic_serialization; + SerializationPtr shared_data_serialization; + size_t max_dynamic_types; +}; + +} diff --git a/src/DataTypes/Serializations/SerializationObjectElement.cpp b/src/DataTypes/Serializations/SerializationObjectElement.cpp new file mode 100644 index 00000000000..35119cd0764 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectElement.cpp @@ -0,0 +1,3 @@ +// +// Created by Павел Круглов on 05/07/2024. +// diff --git a/src/DataTypes/Serializations/SerializationObjectElement.h b/src/DataTypes/Serializations/SerializationObjectElement.h new file mode 100644 index 00000000000..791ef2eecc5 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectElement.h @@ -0,0 +1,8 @@ +// +// Created by Павел Круглов on 05/07/2024. +// + +#ifndef CLICKHOUSE_SERIALIZATIONJSONELEMENT_H +#define CLICKHOUSE_SERIALIZATIONJSONELEMENT_H + +#endif //CLICKHOUSE_SERIALIZATIONJSONELEMENT_H diff --git a/src/DataTypes/Serializations/SerializationObjectTypedPath.cpp b/src/DataTypes/Serializations/SerializationObjectTypedPath.cpp new file mode 100644 index 00000000000..ef086d486f7 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectTypedPath.cpp @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + + +void SerializationObjectTypedPath::enumerateStreams( + DB::ISerialization::EnumerateStreamsSettings & settings, + const DB::ISerialization::StreamCallback & callback, + const DB::ISerialization::SubstreamData & data) const +{ + settings.path.push_back(Substream::ObjectData); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(nested_serialization) + .withType(data.type) + .withColumn(data.column) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(data.deserialize_state); + nested_serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + settings.path.pop_back(); +} + +void SerializationObjectTypedPath::serializeBinaryBulkStatePrefix(const IColumn &, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStatePrefix is not implemented for SerializationObjectTypedPath"); +} + +void SerializationObjectTypedPath::serializeBinaryBulkStateSuffix(SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStateSuffix is not implemented for SerializationObjectTypedPath"); +} + +void SerializationObjectTypedPath::deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsDeserializeStatesCache * cache) const +{ + settings.path.push_back(Substream::ObjectData); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + nested_serialization->deserializeBinaryBulkStatePrefix(settings, state, cache); + settings.path.pop_back(); + settings.path.pop_back(); +} + +void SerializationObjectTypedPath::serializeBinaryBulkWithMultipleStreams(const IColumn &, size_t, size_t, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkWithMultipleStreams is not implemented for SerializationObjectTypedPath"); +} + +void SerializationObjectTypedPath::deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & result_column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const +{ + settings.path.push_back(Substream::ObjectData); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + nested_serialization->deserializeBinaryBulkWithMultipleStreams(result_column, limit, settings, state, cache); + settings.path.pop_back(); + settings.path.pop_back(); +} + +} diff --git a/src/DataTypes/Serializations/SerializationObjectTypedPath.h b/src/DataTypes/Serializations/SerializationObjectTypedPath.h new file mode 100644 index 00000000000..a893e07f798 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectTypedPath.h @@ -0,0 +1,57 @@ +#pragma once + +#include + +namespace DB +{ + +/// Serialization of typed Object paths. +/// For example, for type JSON(a.b UInt32, b.c String) this serialization +/// will be used to read paths 'a.b' and 'b.c' as subcolumns. +class SerializationObjectTypedPath final : public SerializationWrapper +{ +public: + SerializationObjectTypedPath(const SerializationPtr & nested_, const String & path_) + : SerializationWrapper(nested_) + , path(path_) + { + } + + void enumerateStreams( + EnumerateStreamsSettings & settings, + const StreamCallback & callback, + const SubstreamData & data) const override; + + void serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const override; + + void serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const override; + +private: + String path; +}; + +} diff --git a/src/DataTypes/Serializations/SerializationSubObject.cpp b/src/DataTypes/Serializations/SerializationSubObject.cpp new file mode 100644 index 00000000000..62ebd4669e1 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationSubObject.cpp @@ -0,0 +1,259 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +SerializationSubObject::SerializationSubObject( + const String & path_prefix_, const std::unordered_map & typed_paths_serializations_) + : path_prefix(path_prefix_) + , typed_paths_serializations(typed_paths_serializations_) + , dynamic_serialization(std::make_shared()) + , shared_data_serialization(SerializationObject::getTypeOfSharedData()->getDefaultSerialization()) +{ +} + +struct DeserializeBinaryBulkStateSubObject : public ISerialization::DeserializeBinaryBulkState +{ + std::unordered_map typed_path_states; + std::unordered_map dynamic_path_states; + std::vector dynamic_paths; + std::vector dynamic_sub_paths; + ISerialization::DeserializeBinaryBulkStatePtr shared_data_state; + ColumnPtr shared_data; +}; + +void SerializationSubObject::enumerateStreams( + DB::ISerialization::EnumerateStreamsSettings & settings, + const DB::ISerialization::StreamCallback & callback, + const DB::ISerialization::SubstreamData & data) const +{ + settings.path.push_back(Substream::ObjectStructure); + callback(settings.path); + settings.path.pop_back(); + + const auto * column_object = data.column ? &assert_cast(*data.column) : nullptr; + const auto * type_object = data.type ? &assert_cast(*data.type) : nullptr; + const auto * deserialize_state = data.deserialize_state ? checkAndGetState(data.deserialize_state) : nullptr; + + settings.path.push_back(Substream::ObjectData); + + /// typed_paths_serializations contains only typed paths with requested prefix from original Object column. + for (const auto & [path, serialization] : typed_paths_serializations) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(serialization) + .withType(type_object ? type_object->getTypedPaths().at(path.substr(path_prefix.size() + 1)) : nullptr) + .withColumn(column_object ? column_object->getTypedPaths().at(path.substr(path_prefix.size() + 1)) : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->typed_path_states.at(path) : nullptr); + settings.path.back().data = path_data; + serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + } + + /// We will need to read shared data to find all paths with requested prefix. + settings.path.push_back(Substream::ObjectSharedData); + auto shared_data_substream_data = SubstreamData(shared_data_serialization) + .withType(data.type ? SerializationObject::getTypeOfSharedData() : nullptr) + .withColumn(data.column ? SerializationObject::getTypeOfSharedData()->createColumn() : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->shared_data_state : nullptr); + settings.path.back().data = shared_data_substream_data; + shared_data_serialization->enumerateStreams(settings, callback, shared_data_substream_data); + settings.path.pop_back(); + + /// If deserialize state is provided, enumerate streams for dynamic paths. + if (deserialize_state) + { + DataTypePtr type = std::make_shared(); + for (const auto & [path, state] : deserialize_state->dynamic_path_states) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(dynamic_serialization) + .withType(type_object ? type : nullptr) + .withColumn(nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(state); + settings.path.back().data = path_data; + dynamic_serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + } + } + + settings.path.pop_back(); +} + +void SerializationSubObject::serializeBinaryBulkStatePrefix(const IColumn &, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStatePrefix is not implemented for SerializationSubObject"); +} + +void SerializationSubObject::serializeBinaryBulkStateSuffix(SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStateSuffix is not implemented for SerializationSubObject"); +} + +namespace +{ + +/// Return sub-path by specified prefix. +/// For example, for prefix a.b: +/// a.b.c.d -> c.d, a.b.c -> c +String getSubPath(const String & path, const String & prefix) +{ + return path.substr(prefix.size() + 1); +} + +std::string_view getSubPath(const std::string_view & path, const String & prefix) +{ + return path.substr(prefix.size() + 1); +} + +} + +void SerializationSubObject::deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsDeserializeStatesCache * cache) const +{ + auto structure_state = SerializationObject::deserializeObjectStructureStatePrefix(settings, cache); + if (!structure_state) + return; + + auto sub_object_state = std::make_shared(); + settings.path.push_back(Substream::ObjectData); + for (const auto & [path, serialization] : typed_paths_serializations) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + serialization->deserializeBinaryBulkStatePrefix(settings, sub_object_state->typed_path_states[path], cache); + settings.path.pop_back(); + } + + for (const auto & dynamic_path : checkAndGetState(structure_state)->sorted_dynamic_paths) + { + /// Save only dynamic paths with requested prefix. + if (dynamic_path.starts_with(path_prefix) && dynamic_path.size() != path_prefix.size()) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = dynamic_path; + dynamic_serialization->deserializeBinaryBulkStatePrefix(settings, sub_object_state->dynamic_path_states[dynamic_path], cache); + settings.path.pop_back(); + sub_object_state->dynamic_paths.push_back(dynamic_path); + sub_object_state->dynamic_sub_paths.push_back(getSubPath(dynamic_path, path_prefix)); + } + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->deserializeBinaryBulkStatePrefix(settings, sub_object_state->shared_data_state, cache); + settings.path.pop_back(); + + settings.path.pop_back(); + state = std::move(sub_object_state); +} + +void SerializationSubObject::serializeBinaryBulkWithMultipleStreams(const IColumn &, size_t, size_t, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkWithMultipleStreams is not implemented for SerializationSubObject"); +} + +void SerializationSubObject::deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & result_column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const +{ + if (!state) + return; + + auto * sub_object_state = checkAndGetState(state); + auto mutable_column = result_column->assumeMutable(); + auto & column_object = assert_cast(*mutable_column); + /// If it's a new object column, set dynamic paths and statistics. + if (column_object.empty()) + column_object.setDynamicPaths(sub_object_state->dynamic_sub_paths); + + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + + settings.path.push_back(Substream::ObjectData); + for (const auto & [path, serialization] : typed_paths_serializations) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + serialization->deserializeBinaryBulkWithMultipleStreams(typed_paths[getSubPath(path, path_prefix)], limit, settings, sub_object_state->typed_path_states[path], cache); + settings.path.pop_back(); + } + + for (const auto & path : sub_object_state->dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->deserializeBinaryBulkWithMultipleStreams(dynamic_paths[getSubPath(path, path_prefix)], limit, settings, sub_object_state->dynamic_path_states[path], cache); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + /// If it's a new object column, reinitialize column for shared data. + if (result_column->empty()) + sub_object_state->shared_data = SerializationObject::getTypeOfSharedData()->createColumn(); + size_t prev_size = column_object.size(); + shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(sub_object_state->shared_data, limit, settings, sub_object_state->shared_data_state, cache); + settings.path.pop_back(); + + auto & sub_object_shared_data = column_object.getSharedDataColumn(); + const auto & offsets = assert_cast(*sub_object_state->shared_data).getOffsets(); + /// Check if there is no data in shared data in current range. + if (offsets.back() == offsets[ssize_t(prev_size) - 1]) + { + sub_object_shared_data.insertManyDefaults(limit); + } + else + { + const auto & shared_data_array = assert_cast(*sub_object_state->shared_data); + const auto & shared_data_offsets = shared_data_array.getOffsets(); + const auto & shared_data_tuple = assert_cast(shared_data_array.getData()); + const auto & shared_data_paths = assert_cast(shared_data_tuple.getColumn(0)); + const auto & shared_data_values = assert_cast(shared_data_tuple.getColumn(1)); + + auto & sub_object_data_offsets = column_object.getSharedDataOffsets(); + auto [sub_object_shared_data_paths, sub_object_shared_data_values] = column_object.getSharedDataPathsAndValues(); + StringRef prefix_ref(path_prefix); + for (size_t i = prev_size; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[ssize_t(i) - 1]; + size_t end = shared_data_offsets[ssize_t(i)]; + size_t lower_bound_index = ColumnObject::findPathLowerBoundInSharedData(prefix_ref, shared_data_paths, start, end); + for (; lower_bound_index != end; ++lower_bound_index) + { + auto path = shared_data_paths.getDataAt(lower_bound_index).toView(); + if (!path.starts_with(path_prefix)) + break; + + /// Don't include path that is equal to the prefix. + if (path.size() != path_prefix.size()) + { + auto sub_path = getSubPath(path, path_prefix); + sub_object_shared_data_paths->insertData(sub_path.data(), sub_path.size()); + sub_object_shared_data_values->insertFrom(shared_data_values, lower_bound_index); + } + } + sub_object_data_offsets.push_back(sub_object_shared_data_paths->size()); + } + } + settings.path.pop_back(); +} + +} diff --git a/src/DataTypes/Serializations/SerializationSubObject.h b/src/DataTypes/Serializations/SerializationSubObject.h new file mode 100644 index 00000000000..8d037720f1f --- /dev/null +++ b/src/DataTypes/Serializations/SerializationSubObject.h @@ -0,0 +1,72 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/// Serialization of a sub-object Object subcolumns. +/// For example, if we have type JSON and data {"a" : {"b" : {"c" : 42, "d" : "Hello"}}, "c" : [1, 2, 3], "d" : 42} +/// this class will be responsible for reading sub-object a.b and will read JSON column with data {"c" : 43, "d" : "Hello"}. +class SerializationSubObject final : public SimpleTextSerialization +{ +public: + SerializationSubObject(const String & path_prefix_, const std::unordered_map & typed_paths_serializations_); + + void enumerateStreams( + EnumerateStreamsSettings & settings, + const StreamCallback & callback, + const SubstreamData & data) const override; + + void serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const override; + + void serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const override; + + void serializeBinary(const Field &, WriteBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void deserializeBinary(Field &, ReadBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void serializeBinary(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void deserializeBinary(IColumn &, ReadBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void serializeText(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void deserializeText(IColumn &, ReadBuffer &, const FormatSettings &, bool) const override { throwNoSerialization(); } + bool tryDeserializeText(IColumn &, ReadBuffer &, const FormatSettings &, bool) const override { throwNoSerialization(); } + +private: + [[noreturn]] static void throwNoSerialization() + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Text/binary serialization is not implemented for object sub-object subcolumn"); + } + +private: + String path_prefix; + std::unordered_map typed_paths_serializations; + SerializationPtr dynamic_serialization; + SerializationPtr shared_data_serialization; +}; + +} diff --git a/src/DataTypes/Serializations/SerializationVariant.cpp b/src/DataTypes/Serializations/SerializationVariant.cpp index e4d71e84cc7..0f6a17ef167 100644 --- a/src/DataTypes/Serializations/SerializationVariant.cpp +++ b/src/DataTypes/Serializations/SerializationVariant.cpp @@ -218,7 +218,8 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreamsAndUpdateVarian size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state, - std::unordered_map & variants_statistics) const + std::unordered_map & variants_statistics, + size_t & total_size_of_variants) const { const ColumnVariant & col = assert_cast(column); if (const size_t size = col.size(); limit == 0 || offset + limit > size) @@ -265,6 +266,7 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreamsAndUpdateVarian /// We can use the same offset/limit as for whole Variant column variants[non_empty_global_discr]->serializeBinaryBulkWithMultipleStreams(col.getVariantByGlobalDiscriminator(non_empty_global_discr), offset, limit, settings, variant_state->variant_states[non_empty_global_discr]); variants_statistics[variant_names[non_empty_global_discr]] += limit; + total_size_of_variants += limit; settings.path.pop_back(); settings.path.pop_back(); return; @@ -315,7 +317,9 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreamsAndUpdateVarian { addVariantElementToPath(settings.path, i); variants[i]->serializeBinaryBulkWithMultipleStreams(col.getVariantByGlobalDiscriminator(i), 0, 0, settings, variant_state->variant_states[i]); - variants_statistics[variant_names[i]] += col.getVariantByGlobalDiscriminator(i).size(); + size_t variant_size = col.getVariantByGlobalDiscriminator(i).size(); + variants_statistics[variant_names[i]] += variant_size; + total_size_of_variants += variant_size; settings.path.pop_back(); } settings.path.pop_back(); @@ -386,6 +390,7 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreamsAndUpdateVarian settings, variant_state->variant_states[i]); variants_statistics[variant_names[i]] += variant_offsets_and_limits[i].second; + total_size_of_variants += variant_offsets_and_limits[i].second; settings.path.pop_back(); } } @@ -400,7 +405,8 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreams( DB::ISerialization::SerializeBinaryBulkStatePtr & state) const { std::unordered_map tmp_statistics; - serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(column, offset, limit, settings, state, tmp_statistics); + size_t tmp_size; + serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(column, offset, limit, settings, state, tmp_statistics, tmp_size); } void SerializationVariant::deserializeBinaryBulkWithMultipleStreams( @@ -1068,6 +1074,16 @@ void SerializationVariant::serializeTextJSON(const IColumn & column, size_t row_ variants[global_discr]->serializeTextJSON(col.getVariantByGlobalDiscriminator(global_discr), col.offsetAt(row_num), ostr, settings); } +void SerializationVariant::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + const ColumnVariant & col = assert_cast(column); + auto global_discr = col.globalDiscriminatorAt(row_num); + if (global_discr == ColumnVariant::NULL_DISCRIMINATOR) + SerializationNullable::serializeNullJSON(ostr); + else + variants[global_discr]->serializeTextJSONPretty(col.getVariantByGlobalDiscriminator(global_discr), col.offsetAt(row_num), ostr, settings, indent); +} + bool SerializationVariant::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { String field; diff --git a/src/DataTypes/Serializations/SerializationVariant.h b/src/DataTypes/Serializations/SerializationVariant.h index af89632cf81..a76a211e897 100644 --- a/src/DataTypes/Serializations/SerializationVariant.h +++ b/src/DataTypes/Serializations/SerializationVariant.h @@ -113,7 +113,8 @@ public: size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state, - std::unordered_map & variants_statistics) const; + std::unordered_map & variants_statistics, + size_t & total_size_of_variants) const; void deserializeBinaryBulkWithMultipleStreams( ColumnPtr & column, @@ -145,6 +146,7 @@ public: bool tryDeserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; bool tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; diff --git a/src/DataTypes/Serializations/tests/gtest_deprecated_object_serialization.cpp b/src/DataTypes/Serializations/tests/gtest_deprecated_object_serialization.cpp new file mode 100644 index 00000000000..ec53df18297 --- /dev/null +++ b/src/DataTypes/Serializations/tests/gtest_deprecated_object_serialization.cpp @@ -0,0 +1,80 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if USE_SIMDJSON + +using namespace DB; + +TEST(SerializationObjectDeprecated, FromString) +{ + WriteBufferFromOwnString out; + + auto column_string = ColumnString::create(); + column_string->insert(R"({"k1" : 1, "k2" : [{"k3" : "aa", "k4" : 2}, {"k3": "bb", "k4": 3}]})"); + column_string->insert(R"({"k1" : 2, "k2" : [{"k3" : "cc", "k5" : 4}, {"k4": 5}, {"k4": 6}]})"); + + { + auto serialization = std::make_shared(); + + ISerialization::SerializeBinaryBulkSettings settings; + ISerialization::SerializeBinaryBulkStatePtr state; + settings.position_independent_encoding = false; + settings.getter = [&out](const auto &) { return &out; }; + + writeIntBinary(static_cast(1), out); + serialization->serializeBinaryBulkStatePrefix(*column_string, settings, state); + serialization->serializeBinaryBulkWithMultipleStreams(*column_string, 0, column_string->size(), settings, state); + serialization->serializeBinaryBulkStateSuffix(settings, state); + } + + auto type_object = std::make_shared("json", false); + ColumnPtr result_column = type_object->createColumn(); + + ReadBufferFromOwnString in(out.str()); + + { + auto serialization = type_object->getDefaultSerialization(); + + ISerialization::DeserializeBinaryBulkSettings settings; + ISerialization::DeserializeBinaryBulkStatePtr state; + settings.position_independent_encoding = false; + settings.getter = [&in](const auto &) { return ∈ }; + + serialization->deserializeBinaryBulkStatePrefix(settings, state, nullptr); + serialization->deserializeBinaryBulkWithMultipleStreams(result_column, column_string->size(), settings, state, nullptr); + } + + auto & column_object = assert_cast(*result_column->assumeMutable()); + column_object.finalize(); + + ASSERT_TRUE(column_object.size() == 2); + ASSERT_TRUE(column_object.getSubcolumns().size() == 4); + + auto check_subcolumn = [&](const auto & name, const auto & type_name, const std::vector & expected) + { + const auto & subcolumn = column_object.getSubcolumn(PathInData{name}); + ASSERT_EQ(subcolumn.getLeastCommonType()->getName(), type_name); + + const auto & data = subcolumn.getFinalizedColumn(); + for (size_t i = 0; i < expected.size(); ++i) + ASSERT_EQ( + applyVisitor(FieldVisitorToString(), data[i]), + applyVisitor(FieldVisitorToString(), expected[i])); + }; + + check_subcolumn("k1", "Int8", {1, 2}); + check_subcolumn("k2.k3", "Array(String)", {Array{"aa", "bb"}, Array{"cc", "", ""}}); + check_subcolumn("k2.k4", "Array(Int8)", {Array{2, 3}, Array{0, 5, 6}}); + check_subcolumn("k2.k5", "Array(Int8)", {Array{0, 0}, Array{4, 0, 0}}); +} + +#endif diff --git a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp index c6337a31fce..8a0c712fa86 100644 --- a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp +++ b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp @@ -1,80 +1,98 @@ -#include -#include -#include -#include -#include -#include #include -#include -#include +#include +#include +#include #include -#if USE_SIMDJSON - using namespace DB; -TEST(SerializationObject, FromString) +TEST(ObjectSerialization, FieldBinarySerialization) { - WriteBufferFromOwnString out; - - auto column_string = ColumnString::create(); - column_string->insert(R"({"k1" : 1, "k2" : [{"k3" : "aa", "k4" : 2}, {"k3": "bb", "k4": 3}]})"); - column_string->insert(R"({"k1" : 2, "k2" : [{"k3" : "cc", "k5" : 4}, {"k4": 5}, {"k4": 6}]})"); - - { - auto serialization = std::make_shared(); - - ISerialization::SerializeBinaryBulkSettings settings; - ISerialization::SerializeBinaryBulkStatePtr state; - settings.position_independent_encoding = false; - settings.getter = [&out](const auto &) { return &out; }; - - writeIntBinary(static_cast(1), out); - serialization->serializeBinaryBulkStatePrefix(*column_string, settings, state); - serialization->serializeBinaryBulkWithMultipleStreams(*column_string, 0, column_string->size(), settings, state); - serialization->serializeBinaryBulkStateSuffix(settings, state); - } - - auto type_object = std::make_shared("json", false); - ColumnPtr result_column = type_object->createColumn(); - - ReadBufferFromOwnString in(out.str()); - - { - auto serialization = type_object->getDefaultSerialization(); - - ISerialization::DeserializeBinaryBulkSettings settings; - ISerialization::DeserializeBinaryBulkStatePtr state; - settings.position_independent_encoding = false; - settings.getter = [&in](const auto &) { return ∈ }; - - serialization->deserializeBinaryBulkStatePrefix(settings, state, nullptr); - serialization->deserializeBinaryBulkWithMultipleStreams(result_column, column_string->size(), settings, state, nullptr); - } - - auto & column_object = assert_cast(*result_column->assumeMutable()); - column_object.finalize(); - - ASSERT_TRUE(column_object.size() == 2); - ASSERT_TRUE(column_object.getSubcolumns().size() == 4); - - auto check_subcolumn = [&](const auto & name, const auto & type_name, const std::vector & expected) - { - const auto & subcolumn = column_object.getSubcolumn(PathInData{name}); - ASSERT_EQ(subcolumn.getLeastCommonType()->getName(), type_name); - - const auto & data = subcolumn.getFinalizedColumn(); - for (size_t i = 0; i < expected.size(); ++i) - ASSERT_EQ( - applyVisitor(FieldVisitorToString(), data[i]), - applyVisitor(FieldVisitorToString(), expected[i])); - }; - - check_subcolumn("k1", "Int8", {1, 2}); - check_subcolumn("k2.k3", "Array(String)", {Array{"aa", "bb"}, Array{"cc", "", ""}}); - check_subcolumn("k2.k4", "Array(Int8)", {Array{2, 3}, Array{0, 5, 6}}); - check_subcolumn("k2.k5", "Array(Int8)", {Array{0, 0}, Array{4, 0, 0}}); + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, a.b UInt32, a.c Array(String))"); + auto serialization = type->getDefaultSerialization(); + Object object1 = Object{{"a.c", Array{"Str1", "Str2"}}, {"a.d", Field(42)}, {"a.e", Tuple{Field(43), "Str3"}}}; + WriteBufferFromOwnString ostr; + serialization->serializeBinary(object1, ostr, FormatSettings()); + ReadBufferFromString istr(ostr.str()); + Field object2; + serialization->deserializeBinary(object2, istr, FormatSettings()); + ASSERT_EQ(object1, object2.get()); } -#endif + +TEST(ObjectSerialization, ColumnBinarySerialization) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, a.b UInt32, a.c Array(String))"); + auto serialization = type->getDefaultSerialization(); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"a.c", Array{"Str1", "Str2"}}, {"a.d", Field(42)}, {"a.e", Tuple{Field(43), "Str3"}}}); + WriteBufferFromOwnString ostr1; + serialization->serializeBinary(col_object, 0, ostr1, FormatSettings()); + ReadBufferFromString istr1(ostr1.str()); + serialization->deserializeBinary(col_object, istr1, FormatSettings()); + ASSERT_EQ(col_object[0], col_object[1]); + col_object.insert(Object{{"a.c", Array{"Str1", "Str2"}}, {"a.e", Field(42)}, {"b.d", Field(42)}, {"b.e", Tuple{Field(43), "Str3"}}, {"b.g", Field("Str4")}}); + WriteBufferFromOwnString ostr2; + serialization->serializeBinary(col_object, 2, ostr2, FormatSettings()); + ReadBufferFromString istr2(ostr2.str()); + serialization->deserializeBinary(col_object, istr2, FormatSettings()); + ASSERT_EQ(col_object[2], col_object[3]); +} + +TEST(ObjectSerialization, JSONSerialization) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, a.b UInt32, a.c Array(String))"); + auto serialization = type->getDefaultSerialization(); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"a.c", Array{"Str1", "Str2"}}, {"a.d", Field(42)}, {"a.e", Tuple{Field(43), "Str3"}}}); + col_object.insert(Object{{"a.c", Array{"Str1", "Str2"}}, {"a", Tuple{Field(43), "Str3"}}, {"a.b.c", Field(42)}, {"a.b.e", Field(43)}, {"b.c.d.e", Field(42)}, {"b.c.d.g", Field(43)}, {"b.c.h.r", Field(44)}, {"c.g.h.t", Array{Field("Str"), Field("Str2")}}, {"h", Field("Str")}, {"j", Field("Str")}}); + WriteBufferFromOwnString buf1; + serialization->serializeTextJSON(col_object, 1, buf1, FormatSettings()); + ASSERT_EQ(buf1.str(), R"({"a":[43,"Str3"],"a":{"b":0,"b":{"c":42,"e":43},"c":["Str1","Str2"]},"b":{"c":{"d":{"e":42,"g":43},"h":{"r":44}}},"c":{"g":{"h":{"t":["Str","Str2"]}}},"h":"Str","j":"Str"})"); + WriteBufferFromOwnString buf2; + serialization->serializeTextJSONPretty(col_object, 1, buf2, FormatSettings(), 0); + ASSERT_EQ(buf2.str(), R"({ + "a" : [ + 43, + "Str3" + ], + "a" : { + "b" : 0, + "b" : { + "c" : 42, + "e" : 43 + }, + "c" : [ + "Str1", + "Str2" + ] + }, + "b" : { + "c" : { + "d" : { + "e" : 42, + "g" : 43 + }, + "h" : { + "r" : 44 + } + } + }, + "c" : { + "g" : { + "h" : { + "t" : [ + "Str", + "Str2" + ] + } + } + }, + "h" : "Str", + "j" : "Str" +})"); + +} diff --git a/src/DataTypes/Utils.cpp b/src/DataTypes/Utils.cpp index e7e69e379af..a6e9452d7ef 100644 --- a/src/DataTypes/Utils.cpp +++ b/src/DataTypes/Utils.cpp @@ -216,6 +216,7 @@ bool canBeSafelyCasted(const DataTypePtr & from_type, const DataTypePtr & to_typ return false; } case TypeIndex::String: + case TypeIndex::ObjectDeprecated: case TypeIndex::Object: case TypeIndex::Set: case TypeIndex::Interval: diff --git a/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp b/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp index 4d0bfc67183..2e646d1e62a 100644 --- a/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp +++ b/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp @@ -32,7 +32,7 @@ extern const int UNSUPPORTED_METHOD; void check(const DataTypePtr & type) { -// std::cerr << "Check " << type->getName() << "\n"; + std::cerr << "Check " << type->getName() << "\n"; WriteBufferFromOwnString ostr; encodeDataType(type, ostr); ReadBufferFromString istr(ostr.str()); @@ -126,4 +126,7 @@ GTEST_TEST(DataTypesBinaryEncoding, EncodeAndDecode) check(DataTypeFactory::instance().get("Polygon")); check(DataTypeFactory::instance().get("MultiPolygon")); check(DataTypeFactory::instance().get("Tuple(Map(LowCardinality(String), Array(AggregateFunction(2, quantiles(0.1, 0.2), Float32))), Array(Array(Tuple(UInt32, Tuple(a Map(String, String), b Nullable(Date), c Variant(Tuple(g String, d Array(UInt32)), Date, Map(String, String)))))))")); + check(DataTypeFactory::instance().get("JSON")); + check(DataTypeFactory::instance().get("JSON(max_dynamic_paths=10)")); + check(DataTypeFactory::instance().get("JSON(max_dynamic_paths=10, max_dynamic_types=10, a.b.c UInt32, SKIP a.c, b.g String, SKIP l.d.f)")); } diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 4ca9afc49eb..133834180c0 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -996,6 +996,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep query_context->setSetting("allow_experimental_object_type", 1); query_context->setSetting("allow_experimental_variant_type", 1); query_context->setSetting("allow_experimental_dynamic_type", 1); + query_context->setSetting("allow_experimental_json_type", 1); query_context->setSetting("allow_experimental_annoy_index", 1); query_context->setSetting("allow_experimental_usearch_index", 1); query_context->setSetting("allow_experimental_bigint_types", 1); diff --git a/src/Formats/EscapingRuleUtils.cpp b/src/Formats/EscapingRuleUtils.cpp index 58407a810c5..5ccc09c7e8f 100644 --- a/src/Formats/EscapingRuleUtils.cpp +++ b/src/Formats/EscapingRuleUtils.cpp @@ -463,7 +463,7 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo settings.json.read_arrays_as_strings, settings.json.try_infer_objects_as_tuples, settings.json.infer_incomplete_types_as_strings, - settings.json.allow_object_type, + settings.json.allow_deprecated_object_type, settings.json.use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects); break; default: diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 3d92023318e..325e3bbb25a 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -144,12 +144,14 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se format_settings.json.validate_types_from_metadata = settings.input_format_json_validate_types_from_metadata; format_settings.json.validate_utf8 = settings.output_format_json_validate_utf8; format_settings.json_object_each_row.column_for_object_name = settings.format_json_object_each_row_column_for_object_name; - format_settings.json.allow_object_type = context->getSettingsRef().allow_experimental_object_type; + format_settings.json.allow_deprecated_object_type = context->getSettingsRef().allow_experimental_object_type; + format_settings.json.allow_json_type = context->getSettingsRef().allow_experimental_json_type; format_settings.json.compact_allow_variable_number_of_columns = settings.input_format_json_compact_allow_variable_number_of_columns; format_settings.json.try_infer_objects_as_tuples = settings.input_format_json_try_infer_named_tuples_from_objects; format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence; format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields; format_settings.json.ignore_key_case = settings.input_format_json_ignore_key_case; + format_settings.json.type_json_skip_duplicated_paths = settings.type_json_skip_duplicated_paths; format_settings.null_as_default = settings.input_format_null_as_default; format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields; format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 8b38ffba524..c2bc1c113eb 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -227,7 +227,8 @@ struct FormatSettings bool try_infer_numbers_from_strings = false; bool validate_types_from_metadata = true; bool validate_utf8 = false; - bool allow_object_type = false; + bool allow_deprecated_object_type = false; + bool allow_json_type = false; bool valid_output_on_exception = false; bool compact_allow_variable_number_of_columns = false; bool try_infer_objects_as_tuples = false; @@ -235,6 +236,7 @@ struct FormatSettings bool throw_on_bad_escape_sequence = true; bool ignore_unnecessary_fields = true; bool ignore_key_case = false; + bool type_json_skip_duplicated_paths = false; } json{}; struct diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 242d2dc9f80..db984d67819 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -8,7 +8,6 @@ #if USE_RAPIDJSON #include #endif - #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #include #include @@ -38,8 +38,10 @@ #include #include #include +#include #include #include +#include #include @@ -53,6 +55,7 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int INCORRECT_DATA; } template @@ -172,7 +175,7 @@ bool tryGetNumericValueFromJSONElement( { if (!tryReadFloatText(value, rb) || !rb.eof()) { - error = fmt::format("cannot parse {} value here: {}", TypeName, element.getString()); + error = fmt::format("cannot parse {} value here: \"{}\"", TypeName, element.getString()); return false; } } @@ -186,13 +189,13 @@ bool tryGetNumericValueFromJSONElement( rb.position() = rb.buffer().begin(); if (!tryReadFloatText(tmp_float, rb) || !rb.eof()) { - error = fmt::format("cannot parse {} value here: {}", TypeName, element.getString()); + error = fmt::format("cannot parse {} value here: \"{}\"", TypeName, element.getString()); return false; } if (!accurate::convertNumeric(tmp_float, value)) { - error = fmt::format("cannot parse {} value here: {}", TypeName, element.getString()); + error = fmt::format("cannot parse {} value here: \"{}\"", TypeName, element.getString()); return false; } } @@ -1104,7 +1107,7 @@ public: } } - if (!were_valid_elements) + if (data.size() != old_size && !were_valid_elements) { data.popBack(data.size() - old_size); return false; @@ -1174,7 +1177,7 @@ public: else { set_size(old_size); - error += fmt::format("(during reading tuple {} element)", index); + error += fmt::format(" (during reading tuple {} element)", index); return false; } } @@ -1202,7 +1205,7 @@ public: else { set_size(old_size); - error += fmt::format("(during reading tuple {} element)", index); + error += fmt::format(" (during reading tuple {} element)", index); return false; } } @@ -1221,7 +1224,7 @@ public: else if (!insert_settings.insert_default_on_invalid_elements_in_complex_types) { set_size(old_size); - error += fmt::format("(during reading tuple element \"{}\")", key); + error += fmt::format(" (during reading tuple element \"{}\")", key); return false; } } @@ -1288,7 +1291,7 @@ public: { key_col.popBack(key_col.size() - offsets.back()); value_col.popBack(value_col.size() - offsets.back()); - error += fmt::format("(during reading value of key \"{}\")", pair.first); + error += fmt::format(" (during reading value of key \"{}\")", pair.first); return false; } } @@ -1346,6 +1349,13 @@ template class DynamicNode : public JSONExtractTreeNode { public: + DynamicNode( + size_t max_dynamic_paths_for_object_ = DataTypeObject::DEFAULT_MAX_SEPARATELY_STORED_PATHS, + size_t max_dynamic_types_for_object_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES) + : max_dynamic_paths_for_object(max_dynamic_paths_for_object_), max_dynamic_types_for_object(max_dynamic_types_for_object_) + { + } + bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, @@ -1362,13 +1372,22 @@ public: } auto & variant_column = column_dynamic.getVariantColumn(); - auto variant_info = column_dynamic.getVariantInfo(); - /// Second, infer ClickHouse type for this element and add it as a new variant. - auto element_type = elementToDataType(element, format_settings); + auto & variant_info = column_dynamic.getVariantInfo(); + /// First, infer ClickHouse type for this element and add it as a new variant. + auto element_type = removeNullable(elementToDataType(element, format_settings)); + if (!checkIfTypeIsComplete(element_type)) + { + throw Exception( + ErrorCodes::INCORRECT_DATA, + "Cannot infer the type of JSON element {}, because it contains only nulls. To use String type for elements with incomplete " + "type, enable setting input_format_json_infer_incomplete_types_as_strings", + jsonElementToString(element, format_settings)); + } + if (column_dynamic.addNewVariant(element_type)) { auto node = buildJSONExtractTree(element_type, "Dynamic inference"); - auto global_discriminator = variant_info.variant_name_to_discriminator[element_type->getName()]; + auto global_discriminator = variant_info.variant_name_to_discriminator.at(element_type->getName()); auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discriminator); if (!node->insertResultToColumn(variant, element, insert_settings, format_settings, error)) return false; @@ -1384,7 +1403,7 @@ public: /// We couldn't insert element into any existing variant, add String variant and read value as String. column_dynamic.addStringVariant(); - auto string_global_discriminator = variant_info.variant_name_to_discriminator["String"]; + auto string_global_discriminator = variant_info.variant_name_to_discriminator.at("String"); auto & string_column = variant_column.getVariantByGlobalDiscriminator(string_global_discriminator); if (!getStringNode()->insertResultToColumn(string_column, element, insert_settings, format_settings, error)) return false; @@ -1400,16 +1419,18 @@ public: return string_node; } - static DataTypePtr elementToDataType(const typename JSONParser::Element & element, const FormatSettings & format_settings) + DataTypePtr elementToDataType(const typename JSONParser::Element & element, const FormatSettings & format_settings) const { JSONInferenceInfo json_inference_info; auto type = elementToDataTypeImpl(element, format_settings, json_inference_info); transformFinalInferredJSONTypeIfNeeded(type, format_settings, &json_inference_info); + if (format_settings.schema_inference_make_columns_nullable) + type = makeNullableRecursively(type); return type; } private: - static DataTypePtr elementToDataTypeImpl(const typename JSONParser::Element & element, const FormatSettings & format_settings, JSONInferenceInfo & json_inference_info) + DataTypePtr elementToDataTypeImpl(const typename JSONParser::Element & element, const FormatSettings & format_settings, JSONInferenceInfo & json_inference_info) const { switch (element.type()) { @@ -1452,10 +1473,10 @@ private: DataTypes types; types.reserve(array.size()); for (auto value : array) - types.push_back(makeNullableSafe(elementToDataTypeImpl(value, format_settings, json_inference_info))); + types.push_back(elementToDataTypeImpl(value, format_settings, json_inference_info)); if (types.empty()) - return std::make_shared(makeNullable(std::make_shared())); + return std::make_shared(std::make_shared()); if (checkIfTypesAreEqual(types)) return std::make_shared(types.back()); @@ -1482,12 +1503,231 @@ private: return std::make_shared(types); } - case ElementType::OBJECT: { - /// TODO: Use new JSON type here when it's ready. - return std::make_shared(std::make_shared(), makeNullable(std::make_shared())); + case ElementType::OBJECT: + { + return std::make_shared(DataTypeObject::SchemaFormat::JSON, max_dynamic_paths_for_object, max_dynamic_types_for_object); } } } + + size_t max_dynamic_paths_for_object; + size_t max_dynamic_types_for_object; +}; + +template +class ObjectJSONNode : public JSONExtractTreeNode +{ +public: + ObjectJSONNode( + std::unordered_map>> typed_path_nodes_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_prefixes_to_skip_, + const std::vector & path_regexps_to_skip_, + size_t max_dynamic_paths_, + size_t max_dynamic_types) + : typed_path_nodes(std::move(typed_path_nodes_)) + , paths_to_skip(paths_to_skip_) + , path_prefixes_to_skip(path_prefixes_to_skip_) + , dynamic_node(std::make_unique>( + max_dynamic_paths_ / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, + std::max(max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))) + , dynamic_serialization(std::make_shared()) + { + for (const auto & regexp : path_regexps_to_skip_) + path_regexps_to_skip.emplace_back(regexp); + } + + bool insertResultToColumn(IColumn & column, const typename JSONParser::Element & element, const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override + { + if (element.isNull() && format_settings.null_as_default) + { + column.insertDefault(); + return true; + } + + if (!element.isObject()) + { + error = fmt::format("Cannot read JSON object from JSON element: {}", jsonElementToString(element, format_settings)); + return false; + } + + auto & column_object = assert_cast(column); + size_t prev_size = column_object.size(); + + /// Paths in shared data should be sorted, so we cannot insert paths there during traverse. + /// Instead we collect all paths and values that should go to shared data, sort them and insert later. + /// It's not optimal, but it's a price we pay for faster reading of subcolumns. + std::vector> paths_and_values_for_shared_data; + if (!traverseAndInsert(column_object, element, "", insert_settings, format_settings, paths_and_values_for_shared_data, prev_size, error)) + { + /// If there was an error, restore previous state. + SerializationObject::restoreColumnObject(column_object, prev_size); + return false; + } + + /// Fill shared data. + auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + std::sort(paths_and_values_for_shared_data.begin(), paths_and_values_for_shared_data.end()); + for (size_t i = 0; i != paths_and_values_for_shared_data.size(); ++i) + { + const auto & [path, value] = paths_and_values_for_shared_data[i]; + /// Check if we duplicated paths. + if (i != 0 && path == paths_and_values_for_shared_data[i - 1].first) + { + if (!format_settings.json.type_json_skip_duplicated_paths) + { + error = fmt::format("Duplicate path found during parsing JSON object: {}", path); + SerializationObject::restoreColumnObject(column_object, prev_size); + return false; + } + } + else + { + shared_data_paths->insertData(path.data(), path.size()); + shared_data_values->insertData(value.data(), value.size()); + } + } + column_object.getSharedDataOffsets().push_back(shared_data_paths->size()); + + /// Fill remaining typed and dynamic paths. + for (auto & [_, typed_column] : column_object.getTypedPaths()) + { + if (typed_column->size() == prev_size) + typed_column->insertDefault(); + } + + for (auto & [_, dynamic_column] : column_object.getDynamicPaths()) + { + if (dynamic_column->size() == prev_size) + dynamic_column->insertDefault(); + } + + return true; + } + +private: + bool traverseAndInsert( + ColumnObject & column_object, + const typename JSONParser::Element & element, + const String & current_path, + const JSONExtractInsertSettings & insert_settings, + const FormatSettings & format_settings, + std::vector> & paths_and_values_for_shared_data, + size_t current_size, + String & error) const + { + if (element.isObject() && !typed_path_nodes.contains(current_path)) + { + for (auto [key, value] : element.getObject()) + { + String path = current_path; + if (!path.empty()) + path.append("."); + path += key; + if (!traverseAndInsert(column_object, value, path, insert_settings, format_settings, paths_and_values_for_shared_data, current_size, error)) + return false; + } + + return true; + } + + if (shouldSkipPath(current_path)) + return true; + + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + /// Check if we have this path in typed pahts. + if (auto typed_it = typed_paths.find(current_path); typed_it != typed_paths.end()) + { + /// Check if we already had this path. + if (typed_it->second->size() > current_size) + { + if (!format_settings.json.type_json_skip_duplicated_paths) + { + error = fmt::format("Duplicate path found during parsing JSON object: {}", current_path); + return false; + } + } + else if (!typed_path_nodes.at(current_path)->insertResultToColumn(*typed_it->second, element, insert_settings, format_settings, error)) + { + error += fmt::format(" (while reading path {})", current_path); + return false; + } + } + /// Check if we have this path in dynamic paths. + else if (auto dynamic_it = dynamic_paths.find(current_path); dynamic_it != dynamic_paths.end()) + { + /// Check if we already had this path. + if (dynamic_it->second->size() > current_size) + { + if (!format_settings.json.type_json_skip_duplicated_paths) + { + error = fmt::format("Duplicate path found during parsing JSON object: {}", current_path); + return false; + } + } + else if (!dynamic_node->insertResultToColumn(*dynamic_it->second, element, insert_settings, format_settings, error)) + { + error += fmt::format(" (while reading path {})", current_path); + return false; + } + } + /// Try to add a new dynamic path. + else if (auto dynamic_column = column_object.tryToAddNewDynamicPath(current_path)) + { + if (!dynamic_node->insertResultToColumn(*dynamic_column, element, insert_settings, format_settings, error)) + { + error += fmt::format(" (while reading path {})", current_path); + return false; + } + } + /// Otherwise this path should go to the shared data. + /// Don't insert null values into shared data. + /// We consider null equivalent to the absense of this path. + else if (!element.isNull()) + { + auto tmp_dynamic_column = ColumnDynamic::create(); + tmp_dynamic_column->reserve(1); + if (!dynamic_node->insertResultToColumn(*tmp_dynamic_column, element, insert_settings, format_settings, error)) + { + error += fmt::format(" (while reading path {})", current_path); + return false; + } + + paths_and_values_for_shared_data.emplace_back(current_path, ""); + WriteBufferFromString buf(paths_and_values_for_shared_data.back().second); + dynamic_serialization->serializeBinary(*tmp_dynamic_column, 0, buf, format_settings); + } + + return true; + } + + bool shouldSkipPath(const String & path) const + { + if (paths_to_skip.contains(path)) + return true; + + for (const auto & prefix : path_prefixes_to_skip) + { + if (path.starts_with(prefix)) + return true; + } + + for (const auto & regexp : path_regexps_to_skip) + { + if (re2::RE2::FullMatch(path, regexp)) + return true; + } + + return false; + } + + std::unordered_map>> typed_path_nodes; + std::unordered_set paths_to_skip; + std::vector path_prefixes_to_skip; + std::list path_regexps_to_skip; + std::unique_ptr> dynamic_node; + std::shared_ptr dynamic_serialization; }; } @@ -1634,6 +1874,27 @@ std::unique_ptr> buildJSONExtractTree(const Data } case TypeIndex::Dynamic: return std::make_unique>(); + case TypeIndex::Object: + { + const auto & object_type = assert_cast(*type); + const auto & typed_paths = object_type.getTypedPaths(); + std::unordered_map>> typed_path_nodes; + typed_path_nodes.reserve(typed_paths.size()); + for (const auto & [path, path_type] : typed_paths) + typed_path_nodes[path] = buildJSONExtractTree(path_type, source_for_exception_message); + + switch (object_type.getSchemaFormat()) + { + case DataTypeObject::SchemaFormat::JSON: + return std::make_unique>( + std::move(typed_path_nodes), + object_type.getPathsToSkip(), + object_type.getPathPrefixesToSkip(), + object_type.getPathRegexpsToSkip(), + object_type.getMaxDynamicPaths(), + object_type.getMaxDynamicTypes()); + } + } default: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index f0985f4a6b7..f859ad73ea6 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -1,13 +1,13 @@ -#include +#include +#include +#include +#include +#include #include #include -#include #include +#include #include -#include -#include -#include -#include #include diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index 3c374ada9e6..0f8dc83bd52 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -1,22 +1,22 @@ -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include -#include #include #include @@ -1176,8 +1176,8 @@ namespace { if constexpr (is_json) { - if (settings.json.allow_object_type) - return std::make_shared("json", true); + if (settings.json.allow_deprecated_object_type) + return std::make_shared("json", true); } /// Empty Map is Map(Nothing, Nothing) @@ -1186,8 +1186,8 @@ namespace if constexpr (is_json) { - if (settings.json.allow_object_type) - return std::make_shared("json", true); + if (settings.json.allow_deprecated_object_type) + return std::make_shared("json", true); if (settings.json.read_objects_as_strings) return std::make_shared(); @@ -1242,7 +1242,7 @@ namespace { if constexpr (is_json) { - if (!settings.json.allow_object_type && settings.json.try_infer_objects_as_tuples) + if (!settings.json.allow_deprecated_object_type && settings.json.try_infer_objects_as_tuples) return tryInferJSONPaths(buf, settings, json_info, depth); } @@ -1525,15 +1525,15 @@ DataTypePtr makeNullableRecursively(DataTypePtr type) return nested_type ? std::make_shared(nested_type) : nullptr; } - if (which.isObject()) + if (which.isObjectDeprecated()) { - const auto * object_type = assert_cast(type.get()); + const auto * object_type = assert_cast(type.get()); if (object_type->hasNullableSubcolumns()) return type; - return std::make_shared(object_type->getSchemaFormat(), true); + return std::make_shared(object_type->getSchemaFormat(), true); } - return makeNullable(type); + return makeNullableSafe(type); } NamesAndTypesList getNamesAndRecursivelyNullableTypes(const Block & header) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index f3e54d2fbd9..0b2cfcab165 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -4,17 +4,18 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include #include #include -#include #include #include #include @@ -24,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -34,18 +36,18 @@ #include #include #include +#include #include #include #include #include #include -#include #include #include #include #include -#include #include +#include #include #include #include @@ -3878,7 +3880,7 @@ private: "Expected tuple with {} subcolumn, but got {} subcolumns", tuple_size, column_tuple.getColumns().size()); - auto res = ColumnObject::create(has_nullable_subcolumns); + auto res = ColumnObjectDeprecated::create(has_nullable_subcolumns); for (size_t i = 0; i < tuple_size; ++i) { ColumnsWithTypeAndName element = {{column_tuple.getColumns()[i], from_types[i], "" }}; @@ -3955,7 +3957,7 @@ private: subcolumn->insertDefault(); } - auto column_object = ColumnObject::create(has_nullable_subcolumns); + auto column_object = ColumnObjectDeprecated::create(has_nullable_subcolumns); for (auto && [key, subcolumn] : subcolumns) { PathInData path(key.toView()); @@ -3966,7 +3968,7 @@ private: }; } - WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_type) const + WrapperType createObjectDeprecatedWrapper(const DataTypePtr & from_type, const DataTypeObjectDeprecated * to_type) const { if (const auto * from_tuple = checkAndGetDataType(from_type.get())) { @@ -3985,12 +3987,12 @@ private: return res; }; } - else if (checkAndGetDataType(from_type.get())) + else if (checkAndGetDataType(from_type.get())) { return [is_nullable = to_type->hasNullableSubcolumns()] (ColumnsWithTypeAndName & arguments, const DataTypePtr & , const ColumnNullable * , size_t) -> ColumnPtr { - const auto & column_object = assert_cast(*arguments.front().column); - auto res = ColumnObject::create(is_nullable); + const auto & column_object = assert_cast(*arguments.front().column); + auto res = ColumnObjectDeprecated::create(is_nullable); for (size_t i = 0; i < column_object.size(); i++) res->insert(column_object[i]); @@ -4002,6 +4004,24 @@ private: throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to Object can be performed only from flatten named Tuple, Map or String. Got: {}", from_type->getName()); } + + WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_object) const + { + if (checkAndGetDataType(from_type.get())) + { + return [this](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) + { + auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context)->assumeMutable(); + res->finalize(); + return res; + }; + } + + /// TODO: support CAST between JSON types with different parameters + /// support CAST from Map to JSON + /// support CAST from Tuple to JSON + throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to {} can be performed only from String. Got: {}", magic_enum::enum_name(to_object->getSchemaFormat()), from_type->getName()); + } WrapperType createVariantToVariantWrapper(const DataTypeVariant & from_variant, const DataTypeVariant & to_variant) const { @@ -5136,6 +5156,8 @@ private: return createTupleWrapper(from_type, checkAndGetDataType(to_type.get())); case TypeIndex::Map: return createMapWrapper(from_type, checkAndGetDataType(to_type.get())); + case TypeIndex::ObjectDeprecated: + return createObjectDeprecatedWrapper(from_type, checkAndGetDataType(to_type.get())); case TypeIndex::Object: return createObjectWrapper(from_type, checkAndGetDataType(to_type.get())); case TypeIndex::AggregateFunction: diff --git a/src/Functions/JSONEmpty.cpp b/src/Functions/JSONEmpty.cpp new file mode 100644 index 00000000000..e932074ccb8 --- /dev/null +++ b/src/Functions/JSONEmpty.cpp @@ -0,0 +1,126 @@ +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; +} + + +namespace +{ + +/// Implements the function JSONEmpty which returns true if provided JSON object is empty and false otherwise. +class FunctionJSONEmpty : public IFunction +{ +public: + static constexpr auto name = "JSONEmpty"; + + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + std::string getName() const override + { + return name; + } + + size_t getNumberOfArguments() const override { return 1; } + bool useDefaultImplementationForConstants() const override { return true; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + DataTypePtr getReturnTypeImpl(const DataTypes & data_types) const override + { + if (data_types.size() != 1 || data_types[0]->getTypeId() != TypeIndex::Object) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires single argument with type JSON", getName()); + return std::make_shared(); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + { + const ColumnWithTypeAndName & elem = arguments[0]; + const auto * object_column = typeid_cast(elem.column.get()); + if (!object_column) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected column type in function {}. Expected Object column, got {}", getName(), elem.column->getName()); + + auto res = DataTypeUInt8().createColumn(); + auto & data = typeid_cast(*res).getData(); + const auto & typed_paths = object_column->getTypedPaths(); + size_t size = object_column->size(); + /// If object column has at least 1 typed path, it will never be empty, because these paths always have values. + if (!typed_paths.empty()) + { + data.resize_fill(size, 0); + return res; + } + + const auto & dynamic_paths = object_column->getDynamicPaths(); + const auto & shared_data = object_column->getSharedDataPtr(); + data.reserve(size); + for (size_t i = 0; i != size; ++i) + { + bool empty = true; + /// Check if there is no paths in shared data. + if (!shared_data->isDefaultAt(i)) + { + empty = false; + } + /// Check that all dynamic paths have NULL value in this row. + else + { + for (const auto & [path, column] : dynamic_paths) + { + if (!column->isNullAt(i)) + { + empty = false; + break; + } + } + } + + data.push_back(empty); + } + + return res; + } +}; + +} + +REGISTER_FUNCTION(JSONEmpty) +{ + factory.registerFunction(FunctionDocumentation{ + .description = R"( +Checks whether thee input JSON object is empty. +)", + .syntax = {"JSONEmpty(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {}}, {"json" : {"b" : "Hello"}}, {"json" : {}} +SELECT json, JSONEmpty(json) FROM test; +)", + R"( +┌─json──────────┬─JSONEmpty(json)─┠+│ {"a":"42"} │ 0 │ +│ {} │ 1 │ +│ {"b":"Hello"} │ 0 │ +│ {} │ 1 │ +└───────────────┴─────────────────┘ + +)"}}}, + .categories{"JSON"}, + }); +} + +} diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp new file mode 100644 index 00000000000..bbdf1385cf2 --- /dev/null +++ b/src/Functions/JSONPaths.cpp @@ -0,0 +1,339 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; +} + +namespace +{ + +enum class PathsMode +{ + ALL_PATHS, + DYNAMIC_PATHS, + SHARED_DATA_PATHS, +}; + +struct JSONAllPathsImpl +{ + static constexpr auto name = "JSONAllPaths"; + static constexpr auto paths_mode = PathsMode::ALL_PATHS; + static constexpr auto with_types = false; +}; + +struct JSONAllPathsWithTypesImpl +{ + static constexpr auto name = "JSONAllPathsWithTypes"; + static constexpr auto paths_mode = PathsMode::ALL_PATHS; + static constexpr auto with_types = true; +}; + +struct JSONDynamicPathsImpl +{ + static constexpr auto name = "JSONDynamicPaths"; + static constexpr auto paths_mode = PathsMode::DYNAMIC_PATHS; + static constexpr auto with_types = false; +}; + +struct JSONDynamicPathsWithTypesImpl +{ + static constexpr auto name = "JSONDynamicPathsWithTypes"; + static constexpr auto paths_mode = PathsMode::DYNAMIC_PATHS; + static constexpr auto with_types = true; +}; + +struct JSONSharedDataPathsImpl +{ + static constexpr auto name = "JSONSharedDataPaths"; + static constexpr auto paths_mode = PathsMode::SHARED_DATA_PATHS; + static constexpr auto with_types = false; +}; + +struct JSONSharedDataPathsWithTypesImpl +{ + static constexpr auto name = "JSONSharedDataPathsWithTypes"; + static constexpr auto paths_mode = PathsMode::SHARED_DATA_PATHS; + static constexpr auto with_types = true; +}; + +/// Implements functions that extracts paths and types from JSON object column. +template +class FunctionJSONPaths : public IFunction +{ +public: + static constexpr auto name = Impl::name; + + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + std::string getName() const override + { + return name; + } + + size_t getNumberOfArguments() const override { return 1; } + bool useDefaultImplementationForConstants() const override { return true; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + DataTypePtr getReturnTypeImpl(const DataTypes & data_types) const override + { + if (data_types.size() != 1 || data_types[0]->getTypeId() != TypeIndex::Object) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires single argument with type JSON", getName()); + + if constexpr (Impl::with_types) + return std::make_shared(std::make_shared(), std::make_shared()); + return std::make_shared(std::make_shared()); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + { + const ColumnWithTypeAndName & elem = arguments[0]; + const auto * column_object = typeid_cast(elem.column.get()); + if (!column_object) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected column type in function {}. Expected Object column, got {}", getName(), elem.column->getName()); + + const auto & type_object = assert_cast(*elem.type); + if constexpr (Impl::with_types) + return executeWithTypes(*column_object, type_object); + return executeWithoutTypes(*column_object); + } + +private: + ColumnPtr executeWithoutTypes(const ColumnObject & column_object) const + { + if constexpr (Impl::paths_mode == PathsMode::SHARED_DATA_PATHS) + { + /// No need to do anything, we already have a column with all sorted paths in shared data. + const auto & shared_data_array = column_object.getSharedDataNestedColumn(); + const auto & shared_data_paths = assert_cast(shared_data_array.getData()).getColumnPtr(0); + return ColumnArray::create(shared_data_paths, shared_data_array.getOffsetsPtr()); + } + + auto res = ColumnArray::create(ColumnString::create()); + auto & offsets = res->getOffsets(); + ColumnString & data = assert_cast(res->getData()); + + if constexpr (Impl::paths_mode == PathsMode::DYNAMIC_PATHS) + { + /// Collect all dynamic paths. + const auto & dynamic_path_columns = column_object.getDynamicPaths(); + std::vector dynamic_paths; + dynamic_paths.reserve(dynamic_path_columns.size()); + for (const auto & [path, _] : dynamic_path_columns) + dynamic_paths.push_back(path); + /// We want the resulting arrays of paths to be sorted for consistency. + std::sort(dynamic_paths.begin(), dynamic_paths.end()); + + for (const auto & path : dynamic_paths) + data.insertData(path.data(), path.size()); + offsets.push_back(data.size()); + return res->replicate(IColumn::Offsets(1, column_object.size())); + } + + /// Collect all paths: typed, dynamic and paths from shared data. + std::vector sorted_dynamic_and_typed_paths; + const auto & typed_path_columns = column_object.getTypedPaths(); + const auto & dynamic_path_columns = column_object.getDynamicPaths(); + for (const auto & [path, _] : typed_path_columns) + sorted_dynamic_and_typed_paths.push_back(path); + for (const auto & [path, _] : dynamic_path_columns) + sorted_dynamic_and_typed_paths.push_back(path); + + /// We want the resulting arrays of paths to be sorted for consistency. + std::sort(sorted_dynamic_and_typed_paths.begin(), sorted_dynamic_and_typed_paths.end()); + + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, _] = column_object.getSharedDataPathsAndValues(); + for (size_t i = 0; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[ssize_t(i) - 1]; + size_t end = shared_data_offsets[ssize_t(i)]; + /// Merge sorted list of paths from shared data and sorted_dynamic_and_typed_paths + size_t sorted_paths_index = 0; + for (size_t j = start; j != end; ++j) + { + auto shared_data_path = shared_data_paths->getDataAt(j); + while (sorted_paths_index != sorted_dynamic_and_typed_paths.size() && sorted_dynamic_and_typed_paths[sorted_paths_index] < shared_data_path) + { + auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; + data.insertData(path.data, path.size); + ++sorted_paths_index; + } + + data.insertData(shared_data_path.data, shared_data_path.size); + } + + for (; sorted_paths_index != sorted_dynamic_and_typed_paths.size(); ++sorted_paths_index) + { + auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; + data.insertData(path.data, path.size); + } + + offsets.push_back(data.size()); + } + + return res; + } + + ColumnPtr executeWithTypes(const ColumnObject & column_object, const DataTypeObject & type_object) const + { + auto offsets_column = ColumnArray::ColumnOffsets::create(); + auto & offsets = offsets_column->getData(); + auto paths_column = ColumnString::create(); + auto types_column = ColumnString::create(); + + if constexpr (Impl::paths_mode == PathsMode::DYNAMIC_PATHS) + { + const auto & dynamic_path_columns = column_object.getDynamicPaths(); + std::vector sorted_dynamic_paths; + sorted_dynamic_paths.reserve(dynamic_path_columns.size()); + for (const auto & [path, _] : dynamic_path_columns) + sorted_dynamic_paths.push_back(path); + /// We want the resulting arrays of paths and values to be sorted for consistency. + std::sort(sorted_dynamic_paths.begin(), sorted_dynamic_paths.end()); + + /// Iterate over all rows and extract types from dynamic columns. + for (size_t i = 0; i != column_object.size(); ++i) + { + for (auto & path : sorted_dynamic_paths) + { + auto type = getDynamicValueType(dynamic_path_columns.at(path), i); + paths_column->insertData(path.data(), path.size()); + types_column->insertData(type.data(), type.size()); + } + + offsets.push_back(types_column->size()); + } + + return ColumnMap::create(ColumnPtr(std::move(paths_column)), ColumnPtr(std::move(types_column)), ColumnPtr(std::move(offsets_column))); + } + + if constexpr (Impl::paths_mode == PathsMode::SHARED_DATA_PATHS) + { + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + /// Iterate over all rows and extract types from dynamic values in shared data. + for (size_t i = 0; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[ssize_t(i) - 1]; + size_t end = shared_data_offsets[ssize_t(i)]; + for (size_t j = start; j != end; ++j) + { + paths_column->insertFrom(*shared_data_paths, j); + auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j)); + types_column->insertData(type_name.data(), type_name.size()); + } + + offsets.push_back(paths_column->size()); + } + + return ColumnMap::create(ColumnPtr(std::move(paths_column)), ColumnPtr(std::move(types_column)), ColumnPtr(std::move(offsets_column))); + } + + /// Iterate over all rows and extract types from dynamic columns from dynamic paths and from values in shared data. + std::vector> sorted_typed_and_dynamic_paths_with_types; + const auto & typed_path_types = type_object.getTypedPaths(); + const auto & dynamic_path_columns = column_object.getDynamicPaths(); + sorted_typed_and_dynamic_paths_with_types.reserve(typed_path_types.size() + dynamic_path_columns.size()); + for (const auto & [path, type] : typed_path_types) + sorted_typed_and_dynamic_paths_with_types.emplace_back(path, type->getName()); + for (const auto & [path, _] : dynamic_path_columns) + sorted_typed_and_dynamic_paths_with_types.emplace_back(path, ""); + + /// We want the resulting arrays of paths and values to be sorted for consistency. + std::sort(sorted_typed_and_dynamic_paths_with_types.begin(), sorted_typed_and_dynamic_paths_with_types.end()); + + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + for (size_t i = 0; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[ssize_t(i) - 1]; + size_t end = shared_data_offsets[ssize_t(i)]; + /// Merge sorted list of paths and values from shared data and sorted_typed_and_dynamic_paths_with_types + size_t sorted_paths_index = 0; + for (size_t j = start; j != end; ++j) + { + auto shared_data_path = shared_data_paths->getDataAt(j); + auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j)); + + while (sorted_paths_index != sorted_typed_and_dynamic_paths_with_types.size() && sorted_typed_and_dynamic_paths_with_types[sorted_paths_index].first < shared_data_path) + { + auto & [path, type] = sorted_typed_and_dynamic_paths_with_types[sorted_paths_index]; + paths_column->insertData(path.data(), path.size()); + /// Update type for path from dynamic paths. + if (auto it = dynamic_path_columns.find(path); it != dynamic_path_columns.end()) + type = getDynamicValueType(it->second, i); + types_column->insertData(type.data(), type.size()); + ++sorted_paths_index; + } + + paths_column->insertData(shared_data_path.data, shared_data_path.size); + types_column->insertData(type_name.data(), type_name.size()); + } + + for (; sorted_paths_index != sorted_typed_and_dynamic_paths_with_types.size(); ++sorted_paths_index) + { + auto & [path, type] = sorted_typed_and_dynamic_paths_with_types[sorted_paths_index]; + paths_column->insertData(path.data(), path.size()); + if (auto it = dynamic_path_columns.find(path); it != dynamic_path_columns.end()) + type = getDynamicValueType(it->second, i); + types_column->insertData(type.data(), type.size()); + } + + offsets.push_back(paths_column->size()); + } + + return ColumnMap::create(ColumnPtr(std::move(paths_column)), ColumnPtr(std::move(types_column)), ColumnPtr(std::move(offsets_column))); + } + + String getDynamicValueType(const ColumnPtr & column, size_t i) const + { + const ColumnDynamic * dynamic_column = checkAndGetColumn(column.get()); + const auto & variant_info = dynamic_column->getVariantInfo(); + const auto & variant_column = dynamic_column->getVariantColumn(); + auto global_discr = variant_column.globalDiscriminatorAt(i); + if (global_discr == ColumnVariant::NULL_DISCRIMINATOR) + return "None"; + + return variant_info.variant_names[global_discr]; + } + + String getDynamicValueTypeFromSharedData(StringRef value) const + { + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + return isNothing(type) ? "None" : type->getName(); + } +}; + +} + +REGISTER_FUNCTION(JSONPaths) +{ + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); +} + +} diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index c771fced73a..cbaf2b4c1d0 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -854,6 +855,12 @@ void readBackQuotedString(String & s, ReadBuffer & buf) readBackQuotedStringInto(s, buf); } +bool tryReadBackQuotedString(String & s, ReadBuffer & buf) +{ + s.clear(); + return readAnyQuotedStringInto<'`', false, String, bool>(s, buf); +} + void readBackQuotedStringWithSQLStyle(String & s, ReadBuffer & buf) { s.clear(); @@ -1269,6 +1276,81 @@ ReturnType readJSONArrayInto(Vector & s, ReadBuffer & buf) template void readJSONArrayInto, void>(PaddedPODArray & s, ReadBuffer & buf); template bool readJSONArrayInto, bool>(PaddedPODArray & s, ReadBuffer & buf); +std::string_view readJSONObjectAsViewPossiblyInvalid(ReadBuffer & buf, String & object_buffer) +{ + if (buf.eof() || *buf.position() != '{') + throw Exception(ErrorCodes::INCORRECT_DATA, "JSON object should start with '{{'"); + + char * start = buf.position(); + bool use_object_buffer = false; + object_buffer.clear(); + + ++buf.position(); + Int64 balance = 1; + bool quotes = false; + + while (true) + { + if (!buf.hasPendingData() && !use_object_buffer) + { + use_object_buffer = true; + object_buffer.append(start, buf.position() - start); + } + + if (buf.eof()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected EOF while reading JSON object"); + + char * next_pos = find_first_symbols<'\\', '{', '}', '"'>(buf.position(), buf.buffer().end()); + if (use_object_buffer) + object_buffer.append(buf.position(), next_pos - buf.position()); + buf.position() = next_pos; + + if (!buf.hasPendingData()) + continue; + + if (use_object_buffer) + object_buffer.push_back(*buf.position()); + + if (*buf.position() == '\\') + { + ++buf.position(); + if (!buf.hasPendingData() && !use_object_buffer) + { + use_object_buffer = true; + object_buffer.append(start, buf.position() - start); + } + + if (buf.eof()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected EOF while reading JSON object"); + + if (use_object_buffer) + object_buffer.push_back(*buf.position()); + ++buf.position(); + + continue; + } + + if (*buf.position() == '"') + quotes = !quotes; + else if (!quotes) // can be only opening_bracket or closing_bracket + balance += *buf.position() == '{' ? 1 : -1; + + ++buf.position(); + + if (balance == 0) + { + if (use_object_buffer) + return object_buffer; + return {start, buf.position()}; + } + + if (balance < 0) + break; + } + + throw Exception(ErrorCodes::INCORRECT_DATA, "JSON object should have equal number of opening and closing brackets"); +} + template ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) { @@ -1894,6 +1976,11 @@ static ReturnType readParsedValueInto(Vector & s, ReadBuffer & buf, ParseFunc pa return ReturnType(true); } +void readParsedValueIntoString(String & s, ReadBuffer & buf, std::function parse_func) +{ + readParsedValueInto(s, buf, std::move(parse_func)); +} + template static ReturnType readQuotedStringFieldInto(Vector & s, ReadBuffer & buf) { diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index ffba4fafb5c..236c71b1bd9 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -600,6 +600,7 @@ bool tryReadDoubleQuotedStringWithSQLStyle(String & s, ReadBuffer & buf); void readJSONString(String & s, ReadBuffer & buf, const FormatSettings::JSON & settings); void readBackQuotedString(String & s, ReadBuffer & buf); +bool tryReadBackQuotedString(String & s, ReadBuffer & buf); void readBackQuotedStringWithSQLStyle(String & s, ReadBuffer & buf); void readStringUntilEOF(String & s, ReadBuffer & buf); @@ -687,6 +688,10 @@ ReturnType readJSONObjectPossiblyInvalid(Vector & s, ReadBuffer & buf); template ReturnType readJSONArrayInto(Vector & s, ReadBuffer & buf); +/// Similar to readJSONObjectPossiblyInvalid but avoids copying the data if JSON object fits into current read buffer +/// If copying is unavoidable, it copies data into provided object_buffer and returns string_view to it. +std::string_view readJSONObjectAsViewPossiblyInvalid(ReadBuffer & buf, String & object_buffer); + template void readStringUntilWhitespaceInto(Vector & s, ReadBuffer & buf); @@ -1893,6 +1898,8 @@ struct PcgDeserializer } }; +void readParsedValueIntoString(String & s, ReadBuffer & buf, std::function parse_func); + template ReturnType readQuotedFieldInto(Vector & s, ReadBuffer & buf); diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index 6b0de441e94..306635272f5 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -90,6 +90,12 @@ inline void writeUUIDBinary(const UUID & x, WriteBuffer & buf) writePODBinary(uuid.items[1], buf); } +template +void setValue(char * data, const T & value) +{ + memcpy(data, reinterpret_cast(&value), sizeof(T)); +} + template inline void writeIntBinary(const T & x, WriteBuffer & buf) { diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index a3c5a7ed3ed..858d441a92f 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -47,10 +47,10 @@ #include #include -#include -#include -#include #include +#include +#include +#include #include #include @@ -1172,9 +1172,9 @@ bool TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select if (object_pos != std::string::npos) { String object_name = it->substr(0, object_pos); - if (pair.name == object_name && pair.type->getTypeId() == TypeIndex::Object) + if (pair.name == object_name && pair.type->getTypeId() == TypeIndex::ObjectDeprecated) { - const auto * object_type = typeid_cast(pair.type.get()); + const auto * object_type = typeid_cast(pair.type.get()); if (object_type->getSchemaFormat() == "json" && object_type->hasNullableSubcolumns()) { missed_subcolumns.insert(*it); diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 184c263dbdb..665f1a6ecd2 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -459,7 +459,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID return src; } - else if (isObject(type)) + else if (isObjectDeprecated(type)) { if (src.getType() == Field::Types::Object) return src; /// Already in needed type. diff --git a/src/Interpreters/parseColumnsListForTableFunction.cpp b/src/Interpreters/parseColumnsListForTableFunction.cpp index 3529863a623..ed17a83e45e 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.cpp +++ b/src/Interpreters/parseColumnsListForTableFunction.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -110,7 +111,7 @@ void validateDataType(const DataTypePtr & type_to_check, const DataTypeValidatio if (!settings.allow_experimental_dynamic_type) { - if (data_type.hasDynamicSubcolumns()) + if (isDynamic(data_type)) { throw Exception( ErrorCodes::ILLEGAL_COLUMN, @@ -119,6 +120,19 @@ void validateDataType(const DataTypePtr & type_to_check, const DataTypeValidatio data_type.getName()); } } + + if (!settings.allow_experimental_json_type) + { + const auto * object_type = typeid_cast(&data_type); + if (object_type && object_type->getSchemaFormat() == DataTypeObject::SchemaFormat::JSON) + { + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Cannot create column with type '{}' because experimental JSON type is not allowed. " + "Set setting allow_experimental_json_type = 1 in order to allow it", + data_type.getName()); + } + } }; validate_callback(*type_to_check); diff --git a/src/Interpreters/parseColumnsListForTableFunction.h b/src/Interpreters/parseColumnsListForTableFunction.h index e2d2bc97ff7..ae5d574229f 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.h +++ b/src/Interpreters/parseColumnsListForTableFunction.h @@ -22,6 +22,7 @@ struct DataTypeValidationSettings , allow_suspicious_variant_types(settings.allow_suspicious_variant_types) , validate_nested_types(settings.validate_experimental_and_suspicious_types_inside_nested_types) , allow_experimental_dynamic_type(settings.allow_experimental_dynamic_type) + , allow_experimental_json_type(settings.allow_experimental_json_type) { } @@ -32,6 +33,7 @@ struct DataTypeValidationSettings bool allow_suspicious_variant_types = true; bool validate_nested_types = true; bool allow_experimental_dynamic_type = true; + bool allow_experimental_json_type = true; }; void validateDataType(const DataTypePtr & type, const DataTypeValidationSettings & settings); diff --git a/src/Parsers/ASTObjectTypeArgument.cpp b/src/Parsers/ASTObjectTypeArgument.cpp new file mode 100644 index 00000000000..afe875586ad --- /dev/null +++ b/src/Parsers/ASTObjectTypeArgument.cpp @@ -0,0 +1,75 @@ +#include +#include +#include + + +namespace DB +{ + +ASTPtr ASTObjectTypeArgument::clone() const +{ + auto res = std::make_shared(*this); + res->children.clear(); + + if (path_with_type) + { + res->path_with_type = path_with_type->clone(); + res->children.push_back(res->path_with_type); + } + else if (skip_path) + { + res->skip_path = skip_path->clone(); + res->children.push_back(res->skip_path); + } + else if (skip_path_prefix) + { + res->skip_path_prefix = skip_path_prefix->clone(); + res->children.push_back(res->skip_path_prefix); + } + else if (skip_path_regexp) + { + res->skip_path_regexp = skip_path_regexp->clone(); + res->children.push_back(res->skip_path_regexp); + } + else if (parameter) + { + res->parameter = parameter->clone(); + res->children.push_back(res->parameter); + } + + return res; +} + +void ASTObjectTypeArgument::formatImpl(const FormatSettings & parameters, FormatState & state, FormatStateStacked frame) const +{ + if (path_with_type) + { + path_with_type->formatImpl(parameters, state, frame); + } + else if (parameter) + { + parameter->formatImpl(parameters, state, frame); + } + else if (skip_path) + { + std::string indent_str = parameters.one_line ? "" : std::string(4 * frame.indent, ' '); + parameters.ostr << indent_str << "SKIP" << ' '; + skip_path->formatImpl(parameters, state, frame); + } + else if (skip_path_prefix) + { + std::string indent_str = parameters.one_line ? "" : std::string(4 * frame.indent, ' '); + parameters.ostr << indent_str << "SKIP PREFIX" << ' '; + skip_path_prefix->formatImpl(parameters, state, frame); + } + else if (skip_path_regexp) + { + std::string indent_str = parameters.one_line ? "" : std::string(4 * frame.indent, ' '); + parameters.ostr << indent_str << "SKIP REGEXP" << ' '; + skip_path_regexp->formatImpl(parameters, state, frame); + } +} + +} + + diff --git a/src/Parsers/ASTObjectTypeArgument.h b/src/Parsers/ASTObjectTypeArgument.h new file mode 100644 index 00000000000..3a6b2bcdd98 --- /dev/null +++ b/src/Parsers/ASTObjectTypeArgument.h @@ -0,0 +1,35 @@ +#pragma once + +#include + + +namespace DB +{ + +/** An argument of Object data type declaration (for example for JSON). Can contain one of: + * - pair (path, data type) + * - path that should be skipped + * - path prefix for paths that should be skipped + * - path regexp for paths that should be skipped + * - setting in a form of `setting=N` + */ +class ASTObjectTypeArgument : public IAST +{ +public: + ASTPtr path_with_type; + ASTPtr skip_path; + ASTPtr skip_path_prefix; + ASTPtr skip_path_regexp; + ASTPtr parameter; + + /** Get the text that identifies this element. */ + String getID(char) const override { return "ASTObjectTypeArgument"; } + ASTPtr clone() const override; + +protected: + void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; +}; + + +} + diff --git a/src/Parsers/CommonParsers.h b/src/Parsers/CommonParsers.h index 0ae9ee4833c..73e633d6e6e 100644 --- a/src/Parsers/CommonParsers.h +++ b/src/Parsers/CommonParsers.h @@ -367,6 +367,7 @@ namespace DB MR_MACROS(POPULATE, "POPULATE") \ MR_MACROS(PRECEDING, "PRECEDING") \ MR_MACROS(PRECISION, "PRECISION") \ + MR_MACROS(PREFIX, "PREFIX") \ MR_MACROS(PREWHERE, "PREWHERE") \ MR_MACROS(PRIMARY_KEY, "PRIMARY KEY") \ MR_MACROS(PRIMARY, "PRIMARY") \ @@ -445,6 +446,7 @@ namespace DB MR_MACROS(SHOW, "SHOW") \ MR_MACROS(SIGNED, "SIGNED") \ MR_MACROS(SIMPLE, "SIMPLE") \ + MR_MACROS(SKIP, "SKIP") \ MR_MACROS(SOURCE, "SOURCE") \ MR_MACROS(SPATIAL, "SPATIAL") \ MR_MACROS(SQL_SECURITY, "SQL SECURITY") \ @@ -635,6 +637,32 @@ protected: } }; +class ParserTokenSequence : public IParserBase +{ +private: + std::vector token_types; +public: + ParserTokenSequence(const std::vector & token_types_) : token_types(token_types_) {} /// NOLINT + +protected: + const char * getName() const override { return "token sequence"; } + + bool parseImpl(Pos & pos, ASTPtr & /*node*/, Expected & expected) override + { + for (auto token_type : token_types) + { + if (pos->type != token_type) + { + expected.add(pos, getTokenName(token_type)); + return false; + } + + ++pos; + } + + return true; + } +}; // Parser always returns true and do nothing. class ParserNothing : public IParserBase diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index d4fc9a4bc4d..d3175110db9 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -285,19 +285,40 @@ bool ParserTableAsStringLiteralIdentifier::parseImpl(Pos & pos, ASTPtr & node, E bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - ASTPtr id_list; - if (!ParserList(std::make_unique(allow_query_parameter, highlight_type), std::make_unique(TokenType::Dot), false) - .parse(pos, id_list, expected)) - return false; + auto element_parser = std::make_unique(allow_query_parameter, highlight_type); + std::vector> delimiter_parsers; + delimiter_parsers.emplace_back(std::make_unique(std::vector{TokenType::Dot, TokenType::Colon}), SpecialDelimiter::JSON_PATH_DYNAMIC_TYPE); + delimiter_parsers.emplace_back(std::make_unique(std::vector{TokenType::Dot, TokenType::Caret}), SpecialDelimiter::JSON_PATH_PREFIX); + delimiter_parsers.emplace_back(std::make_unique(TokenType::Dot), SpecialDelimiter::NONE); std::vector parts; + SpecialDelimiter last_special_delimiter = SpecialDelimiter::NONE; ASTs params; - const auto & list = id_list->as(); - for (const auto & child : list.children) + + bool parsed_delimiter = true; + while (parsed_delimiter) { - parts.emplace_back(getIdentifierName(child)); + ASTPtr element; + if (!element_parser->parse(pos, element, expected)) + return false; + if (last_special_delimiter != SpecialDelimiter::NONE) + parts.push_back(static_cast(last_special_delimiter) + backQuote(getIdentifierName(element))); + else + parts.push_back(getIdentifierName(element)); + if (parts.back().empty()) - params.push_back(child->as()->getParam()); + params.push_back(element->as()->getParam()); + + parsed_delimiter = false; + for (const auto & [parser, special_delimiter] : delimiter_parsers) + { + if (parser->check(pos, expected)) + { + parsed_delimiter = true; + last_special_delimiter = special_delimiter; + break; + } + } } ParserKeyword s_uuid(Keyword::UUID); diff --git a/src/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h index 4e3f29bfe0c..39608bc583e 100644 --- a/src/Parsers/ExpressionElementParsers.h +++ b/src/Parsers/ExpressionElementParsers.h @@ -52,11 +52,21 @@ protected: /** An identifier, possibly containing a dot, for example, x_yz123 or `something special` or Hits.EventTime, - * possibly with UUID clause like `db name`.`table name` UUID 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' + * possibly with UUID clause like `db name`.`table name` UUID 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'. + * There is also special delimiters `.:` and `.^` for JSON type subcolumns. In case of special delimiter + * the next identifier part after it will include special delimiter and be back quoted always: json.a.b.:UInt32 -> ['json', 'a', 'b', ':`UInt32`']. + * It's needed to distinguish identifiers json.a.b.:UInt32 and json.a.b.`:UInt32`. */ class ParserCompoundIdentifier : public IParserBase { public: + enum class SpecialDelimiter : char + { + NONE = '\0', + JSON_PATH_DYNAMIC_TYPE = ':', + JSON_PATH_PREFIX = '^', + }; + explicit ParserCompoundIdentifier(bool table_name_with_optional_uuid_ = false, bool allow_query_parameter_ = false, Highlight highlight_type_ = Highlight::identifier) : table_name_with_optional_uuid(table_name_with_optional_uuid_), allow_query_parameter(allow_query_parameter_), highlight_type(highlight_type_) { diff --git a/src/Parsers/Lexer.cpp b/src/Parsers/Lexer.cpp index b4601389696..43c4ab867d1 100644 --- a/src/Parsers/Lexer.cpp +++ b/src/Parsers/Lexer.cpp @@ -423,6 +423,8 @@ Token Lexer::nextTokenImpl() } case '?': return Token(TokenType::QuestionMark, token_begin, ++pos); + case '^': + return Token(TokenType::Caret, token_begin, ++pos); case ':': { ++pos; diff --git a/src/Parsers/Lexer.h b/src/Parsers/Lexer.h index 6f31d56292d..9dc0850abfd 100644 --- a/src/Parsers/Lexer.h +++ b/src/Parsers/Lexer.h @@ -45,6 +45,7 @@ namespace DB M(Arrow) /** ->. Should be distinguished from minus operator. */ \ M(QuestionMark) \ M(Colon) \ + M(Caret) \ M(DoubleColon) \ M(Equals) \ M(NotEquals) \ diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 014dc7bd3bf..9774a3a65cd 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -62,7 +62,7 @@ bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ASTPtr name; ASTPtr columns; - /// For now `name == 'Nested'`, probably alternative nested data structures will appear + /// For now `name == 'Nested'` or `name == 'Tuple'`, probably alternative nested data structures will appear if (!name_p.parse(pos, name, expected)) return false; diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index ad33c7e4558..2a492e8eaed 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -4,11 +4,13 @@ #include #include #include +#include #include #include #include #include +#include namespace DB { @@ -46,6 +48,89 @@ private: } }; +/// Parser of Object type argument. For example: JSON(some_parameter=N, some.path SomeType, SKIP skip.path, ...) +class ObjectArgumentParser : public IParserBase +{ +private: + const char * getName() const override { return "JSON data type optional argument"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override + { + auto argument = std::make_shared(); + + /// SKIP arguments + if (ParserKeyword(Keyword::SKIP).ignore(pos)) + { + /// SKIP REGEXP '' + if (ParserKeyword(Keyword::REGEXP).ignore(pos)) + { + ParserStringLiteral literal_parser; + ASTPtr literal; + if (!literal_parser.parse(pos, literal, expected)) + return false; + argument->skip_path_regexp = literal; + argument->children.push_back(argument->skip_path_regexp); + } + /// SKIP PREFIX some.path.prefix or SKIP some.path + else + { + bool is_prefix = ParserKeyword(Keyword::PREFIX).ignore(pos); + ParserCompoundIdentifier compound_identifier_parser; + ASTPtr compound_identifier; + if (!compound_identifier_parser.parse(pos, compound_identifier, expected)) + return false; + + if (is_prefix) + { + argument->skip_path_prefix = compound_identifier; + argument->children.push_back(argument->skip_path_prefix); + } + else + { + argument->skip_path = compound_identifier; + argument->children.push_back(argument->skip_path); + } + } + + node = argument; + return true; + } + + ParserCompoundIdentifier compound_identifier_parser; + ASTPtr identifier; + if (!compound_identifier_parser.parse(pos, identifier, expected)) + return false; + + /// some_parameter=N + if (pos->type == TokenType::Equals) + { + ++pos; + ASTPtr number; + ParserNumber number_parser; + if (!number_parser.parse(pos, number, expected)) + return false; + + argument->parameter = makeASTFunction("equals", identifier, number); + argument->children.push_back(argument->parameter); + node = argument; + return true; + } + + ParserDataType type_parser; + ASTPtr type; + if (!type_parser.parse(pos, type, expected)) + return false; + + auto name_and_type = std::make_shared(); + name_and_type->name = getIdentifierName(identifier); + name_and_type->type = type; + name_and_type->children.push_back(name_and_type->type); + argument->path_with_type = name_and_type; + argument->children.push_back(argument->path_with_type); + node = argument; + return true; + } +}; + /// Wrapper to allow mixed lists of nested and normal types. /// Parameters are either: /// - Nested table elements; @@ -70,17 +155,21 @@ private: return parser.parse(pos, node, expected); } - ParserNestedTable nested_parser; + if (type_name == "JSON") + { + ObjectArgumentParser parser; + return parser.parse(pos, node, expected); + } + + ParserNameTypePair name_and_type_parser; ParserDataType data_type_parser; ParserAllCollectionsOfLiterals literal_parser(false); const char * operators[] = {"=", "equals", nullptr}; ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique()); - if (pos->type == TokenType::BareWord && std::string_view(pos->begin, pos->size()) == "Nested") - return nested_parser.parse(pos, node, expected); - - return enum_parser.parse(pos, node, expected) + return name_and_type_parser.parse(pos, node, expected) + || enum_parser.parse(pos, node, expected) || literal_parser.parse(pos, node, expected) || data_type_parser.parse(pos, node, expected); } @@ -92,10 +181,6 @@ private: bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - ParserNestedTable nested; - if (nested.parse(pos, node, expected)) - return true; - String type_name; ParserIdentifier name_parser; diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp index f57e62adba9..c5e36046c62 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp @@ -166,10 +166,11 @@ JSONAsObjectRowInputFormat::JSONAsObjectRowInputFormat( const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_) : JSONAsRowInputFormat(header_, in_, params_, format_settings_) { - if (!isObject(header_.getByPosition(0).type)) + const auto & type = header_.getByPosition(0).type; + if (!isObject(type) && !isObjectDeprecated(type)) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Input format JSONAsObject is only suitable for tables with a single column of type Object but the column type is {}", - header_.getByPosition(0).type->getName()); + "Input format JSONAsObject is only suitable for tables with a single column of type Object/JSON but the column type is {}", + type->getName()); } void JSONAsObjectRowInputFormat::readJSONObject(IColumn & column) @@ -184,13 +185,13 @@ Chunk JSONAsObjectRowInputFormat::getChunkForCount(size_t rows) return Chunk({std::move(column)}, rows); } -JSONAsObjectExternalSchemaReader::JSONAsObjectExternalSchemaReader(const FormatSettings & settings) +JSONAsObjectExternalSchemaReader::JSONAsObjectExternalSchemaReader(const FormatSettings & settings_) : settings(settings_) { - if (!settings.json.allow_object_type) + if (!settings.json.allow_deprecated_object_type && !settings.json.allow_json_type) throw Exception( ErrorCodes::ILLEGAL_COLUMN, - "Cannot infer the data structure in JSONAsObject format because experimental Object type is not allowed. Set setting " - "allow_experimental_object_type = 1 in order to allow it"); + "Cannot infer the data structure in JSONAsObject format because experimental Object/JSON type is not allowed. Set setting " + "allow_experimental_object_type = 1 or allow_experimental_json_type=1 in order to allow it"); } void registerInputFormatJSONAsString(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h index 16112325a97..a5a436260ba 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h @@ -1,11 +1,12 @@ #pragma once -#include -#include +#include +#include +#include #include #include -#include -#include +#include +#include namespace DB { @@ -69,12 +70,17 @@ public: class JSONAsObjectExternalSchemaReader : public IExternalSchemaReader { public: - explicit JSONAsObjectExternalSchemaReader(const FormatSettings & settings); + explicit JSONAsObjectExternalSchemaReader(const FormatSettings & settings_); NamesAndTypesList readSchema() override { - return {{"json", std::make_shared("json", false)}}; + if (settings.json.allow_json_type) + return {{"json", std::make_shared(DataTypeObject::SchemaFormat::JSON)}}; + return {{"json", std::make_shared("json", false)}}; } + +private: + FormatSettings settings; }; } diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index ed4b1906635..42e545828cc 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -49,7 +49,7 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type) case TypeIndex::Float32: case TypeIndex::Float64: case TypeIndex::Nullable: - case TypeIndex::Object: + case TypeIndex::ObjectDeprecated: return false; case TypeIndex::Array: { diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index c07583cd39d..68c11060d88 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -701,17 +701,16 @@ std::optional ColumnsDescription::tryGetColumn(const GetColumns auto jt = subcolumns.get<0>().find(column_name); if (jt != subcolumns.get<0>().end()) return *jt; - } - /// Check for dynamic subcolumns. - auto [ordinary_column_name, dynamic_subcolumn_name] = Nested::splitName(column_name); - it = columns.get<1>().find(ordinary_column_name); - if (it != columns.get<1>().end() && it->type->hasDynamicSubcolumns()) - { - if (auto dynamic_subcolumn_type = it->type->tryGetSubcolumnType(dynamic_subcolumn_name)) - return NameAndTypePair(ordinary_column_name, dynamic_subcolumn_name, it->type, dynamic_subcolumn_type); + /// Check for dynamic subcolumns. + auto [ordinary_column_name, dynamic_subcolumn_name] = Nested::splitName(column_name); + it = columns.get<1>().find(ordinary_column_name); + if (it != columns.get<1>().end() && it->type->hasDynamicSubcolumns()) + { + if (auto dynamic_subcolumn_type = it->type->tryGetSubcolumnType(dynamic_subcolumn_name)) + return NameAndTypePair(ordinary_column_name, dynamic_subcolumn_name, it->type, dynamic_subcolumn_type); + } } - return {}; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 52d12c9db7d..f4be7619fc8 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -164,7 +164,7 @@ void writeColumnSingleGranule( serialize_settings.position_independent_encoding = true; serialize_settings.low_cardinality_max_dictionary_size = 0; serialize_settings.use_compact_variant_discriminators_serialization = settings.use_compact_variant_discriminators_serialization; - serialize_settings.dynamic_write_statistics = ISerialization::SerializeBinaryBulkSettings::DynamicStatisticsMode::PREFIX; + serialize_settings.object_and_dynamic_write_statistics = ISerialization::SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::PREFIX; serialization->serializeBinaryBulkStatePrefix(*column.column, serialize_settings, state); serialization->serializeBinaryBulkWithMultipleStreams(*column.column, from_row, number_of_rows, serialize_settings, state); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 74ea89a8864..b3413437a31 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -131,6 +131,9 @@ void MergeTreeDataPartWriterWide::addStreams( { assert(!substream_path.empty()); + if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + return; + auto full_stream_name = ISerialization::getFileNameForStream(name_and_type, substream_path); String stream_name; @@ -204,6 +207,9 @@ ISerialization::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGett { return [&, this] (const ISerialization::SubstreamPath & substream_path) -> WriteBuffer * { + if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + return nullptr; + bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; auto stream_name = getStreamName(column, substream_path); @@ -366,6 +372,9 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( min_compress_block_size = settings.min_compress_block_size; getSerialization(name_and_type.name)->enumerateStreams([&] (const ISerialization::SubstreamPath & substream_path) { + if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + return; + bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; auto stream_name = getStreamName(name_and_type, substream_path); @@ -404,6 +413,9 @@ void MergeTreeDataPartWriterWide::writeSingleGranule( /// So that instead of the marks pointing to the end of the compressed block, there were marks pointing to the beginning of the next one. serialization->enumerateStreams([&] (const ISerialization::SubstreamPath & substream_path) { + if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + return; + bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; auto stream_name = getStreamName(name_and_type, substream_path); @@ -655,7 +667,7 @@ void MergeTreeDataPartWriterWide::fillDataChecksums(MergeTreeDataPartChecksums & if (!serialization_states.empty()) { serialize_settings.getter = createStreamGetter(*it, written_offset_columns ? *written_offset_columns : offset_columns); - serialize_settings.dynamic_write_statistics = ISerialization::SerializeBinaryBulkSettings::DynamicStatisticsMode::SUFFIX; + serialize_settings.object_and_dynamic_write_statistics = ISerialization::SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::SUFFIX; getSerialization(it->name)->serializeBinaryBulkStateSuffix(serialize_settings, serialization_states[it->name]); } diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index a2b8f0ad96f..192866260de 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -206,7 +206,7 @@ void MergeTreeReaderCompact::readPrefix( serialization = getSerializationInPart(name_and_type); deserialize_settings.getter = buffer_getter; - deserialize_settings.dynamic_read_statistics = true; + deserialize_settings.object_and_dynamic_read_statistics = true; serialization->deserializeBinaryBulkStatePrefix(deserialize_settings, deserialize_binary_bulk_state_map[name_and_type.name], nullptr); } catch (Exception & e) diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index b6882fdced9..c017d7db786 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -326,7 +326,7 @@ void MergeTreeReaderWide::deserializePrefix( if (!deserialize_binary_bulk_state_map.contains(name)) { ISerialization::DeserializeBinaryBulkSettings deserialize_settings; - deserialize_settings.dynamic_read_statistics = true; + deserialize_settings.object_and_dynamic_read_statistics = true; deserialize_settings.getter = [&](const ISerialization::SubstreamPath & substream_path) { return getStream(/* seek_to_start = */true, substream_path, data_part_info_for_read->getChecksums(), name_and_type, 0, /* seek_to_mark = */false, current_task_last_mark, cache); From fb4317f12c355f3de7c633b5545952c77b48d583 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 12 Jul 2024 14:46:33 +0000 Subject: [PATCH 0344/2361] Small clean up --- .../Passes/FunctionToSubcolumnsPass.cpp | 17 ----- src/Analyzer/Resolve/QueryAnalyzer.cpp | 12 ++-- src/DataTypes/DataTypeDynamic.cpp | 1 - src/DataTypes/ObjectUtils.cpp | 36 +++++----- src/DataTypes/Serializations/ISerialization.h | 65 ------------------- .../SerializationJSONElement.cpp | 3 - .../Serializations/SerializationJSONElement.h | 8 --- .../SerializationObjectElement.cpp | 3 - .../SerializationObjectElement.h | 8 --- .../gtest_data_types_binary_encoding.cpp | 2 +- src/Formats/JSONUtils.cpp | 12 ++-- src/Formats/SchemaInferenceUtils.cpp | 30 ++++----- src/Functions/FunctionsConversion.cpp | 7 +- src/Parsers/ParserDataType.cpp | 1 - 14 files changed, 50 insertions(+), 155 deletions(-) delete mode 100644 src/DataTypes/Serializations/SerializationJSONElement.cpp delete mode 100644 src/DataTypes/Serializations/SerializationJSONElement.h delete mode 100644 src/DataTypes/Serializations/SerializationObjectElement.cpp delete mode 100644 src/DataTypes/Serializations/SerializationObjectElement.h diff --git a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp index 395ebbfa408..90051779a26 100644 --- a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp +++ b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp @@ -52,8 +52,6 @@ void optimizeFunctionEmpty(QueryTreeNodePtr &, FunctionNode & function_node, Col /// Replace `notEmpty(argument)` with `notEquals(argument.size0, 0)` if not positive /// `argument` may be Array or Map. - std::cerr << "optimizeFunctionEmpty " << ctx.column.name << "\n"; - NameAndTypePair column{ctx.column.name + ".size0", std::make_shared()}; auto & function_arguments_nodes = function_node.getArguments().getNodes(); @@ -236,18 +234,9 @@ std::tuple getTypedNodesForOptimizati return {}; auto column_in_table = storage_snapshot->tryGetColumn(GetColumnsOptions::All, column.name); - std::cerr << "getTypedNodesForOptimization " << column.name << "\n"; if (!column_in_table || !column_in_table->type->equals(*column.type)) - { - std::cerr << "getTypedNodesForOptimization FAIL\n"; - if (column_in_table) - std::cerr << column_in_table->type->getName() << "/" << column.type->getName() << "\n"; - else - std::cerr << "null\n"; return {}; - } - std::cerr << "getTypedNodesForOptimization OK\n"; return std::make_tuple(function_node, first_argument_column_node, table_node); } @@ -433,15 +422,9 @@ public: auto table_name = table_node->getStorage()->getStorageID().getFullTableName(); Identifier qualified_name({table_name, column.name}); - std::cerr << "FunctionToSubcolumnsVisitorSecondPass " << column.name << "\n"; - if (!identifiers_to_optimize.contains(qualified_name)) - { - std::cerr << "FunctionToSubcolumnsVisitorSecondPass FAIL\n"; return; - } - std::cerr << "FunctionToSubcolumnsVisitorSecondPass OK\n"; auto transformer_it = node_transformers.find({column.type->getTypeId(), function_node->getFunctionName()}); if (transformer_it != node_transformers.end()) { diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index b8d1e44fca7..911b8bc4606 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -1,14 +1,14 @@ #include -#include -#include -#include +#include +#include #include #include -#include -#include #include -#include +#include +#include +#include +#include #include #include diff --git a/src/DataTypes/DataTypeDynamic.cpp b/src/DataTypes/DataTypeDynamic.cpp index bb7768e16ad..88871f28f06 100644 --- a/src/DataTypes/DataTypeDynamic.cpp +++ b/src/DataTypes/DataTypeDynamic.cpp @@ -130,7 +130,6 @@ std::pair splitSubcolumnName(std::string_vie std::unique_ptr DataTypeDynamic::getDynamicSubcolumnData(std::string_view subcolumn_name, const DB::IDataType::SubstreamData & data, bool throw_if_null) const { auto [subcolumn_type_name, subcolumn_nested_name] = splitSubcolumnName(subcolumn_name); -// std::cerr << "Dynamic subcolumn: " << subcolumn_name << ", " << subcolumn_nested_name << "\n"; /// Check if requested subcolumn is a valid data type. auto subcolumn_type = DataTypeFactory::instance().tryGet(String(subcolumn_type_name)); diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 4df240daa8f..51588ce2ba3 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -3,28 +3,28 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include +#include +#include +#include namespace DB diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 51fcdaec6ab..22bbdb25c43 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -491,69 +491,4 @@ State * ISerialization::checkAndGetState(const StatePtr & state, const ISerializ return state_concrete; } - -WITH snaps AS ( - SELECT - rs.brokerfk brokerfk, - rs.brokerinstancefk brokerinstancefk, - rs.tradedt tradedt, - min(rs.snapdttm) snapdttmmin, - max(rs.snapdttm) snapdttmmax - FROM risksnapshothistory rs - WHERE rs.snapdttm >= now() - interval '10 minutes' - AND rs.snapdttm <= now() - GROUP BY rs.brokerfk, rs.brokerinstancefk, rs.tradedt - ORDER BY rs.brokerfk, rs.brokerinstancefk, rs.tradedt - ), - start_snaps_by_trade_dt AS ( - SELECT - rs.* - FROM risksnapshothistory rs - WHERE (rs.brokerfk, rs.brokerinstancefk, rs.tradedt, rs.snapdttm) IN ( - SELECT - s.brokerfk s_brokerfk, - s.brokerinstancefk s_brokerinstancefk, - s.tradedt s_tradedt, - s.snapdttmmin s_snapdttmmin - FROM snaps s) - ORDER BY rs.brokerfk, rs.brokerinstancefk, rs.snapdttm - ), - end_snaps_by_trade_dt AS ( - SELECT - rs.* - FROM risksnapshothistory rs - WHERE (rs.brokerfk, rs.brokerinstancefk, rs.tradedt, rs.snapdttm) IN ( - SELECT - s.brokerfk s_brokerfk, - s.brokerinstancefk s_brokerinstancefk, - s.tradedt s_tradedt, - s.snapdttmmax s_snapdttmmax - FROM snaps s) - ORDER BY rs.brokerfk, rs.brokerinstancefk, rs.snapdttm - ), - data as ( - SELECT - ssbtd.snapdttm startdttm, - esbtd.snapdttm enddttm, - case when(ssbtd.snapdttm = '1970-01-01 00:00:00.000000') then 0 else 1 end hasstart, - case when(esbtd.snapdttm = '1970-01-01 00:00:00.000000') then 0 else 1 end hasend, - IF(esbtd.brokerfk = 0, ssbtd.brokerfk, esbtd.brokerfk) brokerfk, - IF(esbtd.brokerinstancefk = 0, ssbtd.brokerinstancefk, esbtd.brokerinstancefk) brokerinstancefk - FROM start_snaps_by_trade_dt ssbtd - FULL OUTER JOIN end_snaps_by_trade_dt esbtd ON ssbtd.brokerfk = esbtd.brokerfk AND ssbtd.brokerinstancefk = esbtd.brokerinstancefk AND ssbtd.tradedt = esbtd.tradedt AND ssbtd.traderfk = esbtd.traderfk AND ssbtd.brokersymbolfk = esbtd.brokersymbolfk - ) - SELECT - d.brokerfk, d.brokerinstancefk, - max(startdttm) startdttm, - max(enddttm) enddttm, - countIf(d.hasstart =1 and d.hasend=1) matches, - countIf(d.hasstart =1 and d.hasend=0) beforeonly, - countIf(d.hasstart =0 and d.hasend=1) afteronly, - (select array_agg(toJSONString(map( - 'snapdttmmax', toString(snapdttmmax), 'snapdttmmin', toString(snapdttmmin), - 'tradedt', toString(tradedt), 'brokerfk', toString(brokerfk), 'brokerinstancefk', toString(brokerinstancefk)))) - from snaps) snaps - FROM data d - GROUP BY d.brokerfk, d.brokerinstancefk - } diff --git a/src/DataTypes/Serializations/SerializationJSONElement.cpp b/src/DataTypes/Serializations/SerializationJSONElement.cpp deleted file mode 100644 index 35119cd0764..00000000000 --- a/src/DataTypes/Serializations/SerializationJSONElement.cpp +++ /dev/null @@ -1,3 +0,0 @@ -// -// Created by Павел Круглов on 05/07/2024. -// diff --git a/src/DataTypes/Serializations/SerializationJSONElement.h b/src/DataTypes/Serializations/SerializationJSONElement.h deleted file mode 100644 index 791ef2eecc5..00000000000 --- a/src/DataTypes/Serializations/SerializationJSONElement.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by Павел Круглов on 05/07/2024. -// - -#ifndef CLICKHOUSE_SERIALIZATIONJSONELEMENT_H -#define CLICKHOUSE_SERIALIZATIONJSONELEMENT_H - -#endif //CLICKHOUSE_SERIALIZATIONJSONELEMENT_H diff --git a/src/DataTypes/Serializations/SerializationObjectElement.cpp b/src/DataTypes/Serializations/SerializationObjectElement.cpp deleted file mode 100644 index 35119cd0764..00000000000 --- a/src/DataTypes/Serializations/SerializationObjectElement.cpp +++ /dev/null @@ -1,3 +0,0 @@ -// -// Created by Павел Круглов on 05/07/2024. -// diff --git a/src/DataTypes/Serializations/SerializationObjectElement.h b/src/DataTypes/Serializations/SerializationObjectElement.h deleted file mode 100644 index 791ef2eecc5..00000000000 --- a/src/DataTypes/Serializations/SerializationObjectElement.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by Павел Круглов on 05/07/2024. -// - -#ifndef CLICKHOUSE_SERIALIZATIONJSONELEMENT_H -#define CLICKHOUSE_SERIALIZATIONJSONELEMENT_H - -#endif //CLICKHOUSE_SERIALIZATIONJSONELEMENT_H diff --git a/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp b/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp index 2e646d1e62a..789aeac566f 100644 --- a/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp +++ b/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp @@ -32,7 +32,7 @@ extern const int UNSUPPORTED_METHOD; void check(const DataTypePtr & type) { - std::cerr << "Check " << type->getName() << "\n"; +// std::cerr << "Check " << type->getName() << "\n"; WriteBufferFromOwnString ostr; encodeDataType(type, ostr); ReadBufferFromString istr(ostr.str()); diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index f859ad73ea6..f41c3be71f4 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -1,13 +1,13 @@ -#include -#include -#include -#include -#include +#include #include #include +#include #include -#include #include +#include +#include +#include +#include #include diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index 0f8dc83bd52..15dc75c0f3a 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -1,22 +1,22 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include +#include #include #include diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 0b2cfcab165..3b0179dff2b 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -16,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -42,12 +41,13 @@ #include #include #include +#include #include #include #include #include -#include #include +#include #include #include #include @@ -4020,6 +4020,7 @@ private: /// TODO: support CAST between JSON types with different parameters /// support CAST from Map to JSON /// support CAST from Tuple to JSON + /// support CAST from Object('json') to JSON throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to {} can be performed only from String. Got: {}", magic_enum::enum_name(to_object->getSchemaFormat()), from_type->getName()); } diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index 2a492e8eaed..f4793c7fc27 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -10,7 +10,6 @@ #include #include -#include namespace DB { From 2b28323e65a7ba8f9a1be1c3c6a5953f9332709a Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 12 Jul 2024 14:48:38 +0000 Subject: [PATCH 0345/2361] Add documentation --- docs/en/sql-reference/data-types/dynamic.md | 3 +- docs/en/sql-reference/data-types/json.md | 519 ++++++++++++++++-- .../sql-reference/data-types/object-json.md | 83 +++ .../sql-reference/data-types/object_json.md | 86 +++ 4 files changed, 629 insertions(+), 62 deletions(-) create mode 100644 docs/en/sql-reference/data-types/object-json.md create mode 100644 docs/en/sql-reference/data-types/object_json.md diff --git a/docs/en/sql-reference/data-types/dynamic.md b/docs/en/sql-reference/data-types/dynamic.md index 84a724e121e..44421f3b1f8 100644 --- a/docs/en/sql-reference/data-types/dynamic.md +++ b/docs/en/sql-reference/data-types/dynamic.md @@ -533,5 +533,4 @@ In RowBinary format values of `Dynamic` type are serialized in the following for ```text -``` - +``` \ No newline at end of file diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index c29be2cff58..d35a3f90ef3 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -1,83 +1,482 @@ --- -slug: /en/sql-reference/data-types/object-data-type -sidebar_position: 26 -sidebar_label: Object Data Type -keywords: [object, data type] +slug: /en/sql-reference/data-types/json +sidebar_position: 63 +sidebar_label: JSON --- -# Object Data Type - -:::note -This feature is not production-ready and is now deprecated. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864) -::: +# JSON` Stores JavaScript Object Notation (JSON) documents in a single column. -`JSON` is an alias for `Object('json')`. +:::note +This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead. +If you want to use JSON type, set `allow_experimental_json_type = 1`. +::: -## Example +To declare a column of `JSON` type, use the following syntax: -**Example 1** - -Creating a table with a `JSON` column and inserting data into it: - -```sql -CREATE TABLE json -( - o JSON -) -ENGINE = Memory +``` sql + JSON(max_dynamic_paths=N, max_dynamic_types=M, some.path TypeName, SKIP path.to.skip, SKIP PREFIX path.prefix.to.skip, SKIP REGEXP 'paths_regexp') ``` +Where: +- `max_dynamic_paths` is an optional parameter indicating how many paths can be stored separately as subcolumns across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all other paths will be stored together in a single structure. Default value of `max_dynamic_paths` is `1000`. +- `max_dynamic_types` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a single path column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_dynamic_types` is `32`. +- `some.path TypeName` is an optional type hint for particular path in the JSON. Such paths will be always stored as subcolumns with specified type. +- `SKIP path.to.skip` is an optional hint for particular path that should be skipped during JSON parsing. Such paths will never be stored in the JSON column. +- `SKIP PREFIX path.prefix.to.skip` is an optional hint for particular path prefix that should be skipped during JSON parsing. All paths with such prefix will never be stored in the JSON column. +- `SKIP REGEXP 'path_regexp'` is an optional hint with a regular expression that is used to skip paths during JSON parsing. All paths that match this regular expression will never be stored in the JSON column. + +## Creating JSON + +Using `JSON` type in table column definition: ```sql -INSERT INTO json VALUES ('{"a": 1, "b": { "c": 2, "d": [1, 2, 3] }}') -``` - -```sql -SELECT o.a, o.b.c, o.b.d[3] FROM json +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}'); +SELECT json FROM test; ``` ```text -┌─o.a─┬─o.b.c─┬─arrayElement(o.b.d, 3)─┠-│ 1 │ 2 │ 3 │ -└─────┴───────┴────────────────────────┘ -``` - -**Example 2** - -To be able to create an ordered `MergeTree` family table, the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format: - -```sql -CREATE TABLE logs -( - timestamp DateTime, - message JSON -) -ENGINE = MergeTree -ORDER BY timestamp +┌─json────────────────────────────────────────┠+│ {"a":{"b":"42"},"c":["1","2","3"]} │ +│ {"f":"Hello, World!"} │ +│ {"a":{"b":"43","e":"10"},"c":["4","5","6"]} │ +└─────────────────────────────────────────────┘ ``` ```sql -INSERT INTO logs -SELECT parseDateTimeBestEffort(JSONExtractString(json, 'timestamp')), json -FROM file('access.json.gz', JSONAsString) -``` - -## Displaying JSON columns - -When displaying a `JSON` column, ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can also display the field names by setting `output_format_json_named_tuples_as_objects = 1`: - -```sql -SET output_format_json_named_tuples_as_objects = 1 - -SELECT * FROM json FORMAT JSONEachRow +CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}'); +SELECT json FROM test; ``` ```text -{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}} +┌─json──────────────────────────────┠+│ {"a":{"b":42},"c":[1,2,3]} │ +│ {"a":{"b":0},"f":"Hello, World!"} │ +│ {"a":{"b":43},"c":[4,5,6]} │ +└───────────────────────────────────┘ ``` -## Related Content +Using CAST from 'String': -- [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json) -- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) +```sql +SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json; +``` + +```text +┌─json───────────────────────────────────────────┠+│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +CAST from named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later. + +## Reading JSON paths as subcolumns + +JSON type supports reading every path as a separate subcolumn. If type of the requested path was not specified in the JSON type declaration, the subcolumn of the path will always have type [Dynamic](/docs/en/sql-reference/data-types/dynamic.md). + +For example: + +```sql +CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42, "g" : 42.42}, "c" : [1, 2, 3], "d" : "2020-01-01"}'), ('{"f" : "Hello, World!", "d" : "2020-01-02"}'), ('{"a" : {"b" : 43, "e" : 10, "g" : 43.43}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json──────────────────────────────────────────────────┠+│ {"a":{"b":42,"g":42.42},"c":[1,2,3],"d":"2020-01-01"} │ +│ {"a":{"b":0},"d":"2020-01-02","f":"Hello, World!"} │ +│ {"a":{"b":43,"g":43.43},"c":[4,5,6]} │ +└───────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.a.b, json.a.g, json.c, json.d FROM test; +``` + +```text +┌─json.a.b─┬─json.a.g─┬─json.c──┬─json.d─────┠+│ 42 │ 42.42 │ [1,2,3] │ 2020-01-01 │ +│ 0 │ á´ºáµá´¸á´¸ │ á´ºáµá´¸á´¸ │ 2020-01-02 │ +│ 43 │ 43.43 │ [4,5,6] │ á´ºáµá´¸á´¸ │ +└──────────┴──────────┴─────────┴────────────┘ +``` + +If the requested path wasn't found in the data, it will be filled with `NULL` values: + +```sql +SELECT json.non.existing.path FROM test; +``` + +```text +┌─json.non.existing.path─┠+│ á´ºáµá´¸á´¸ │ +│ á´ºáµá´¸á´¸ │ +│ á´ºáµá´¸á´¸ │ +└────────────────────────┘ +``` + +Let's check the data types of returned subcolumns: +```sql +SELECT toTypeName(json.a.b), toTypeName(json.a.g), toTypeName(json.c), toTypeName(json.d) FROM test; +``` + +```text +┌─toTypeName(json.a.b)─┬─toTypeName(json.a.g)─┬─toTypeName(json.c)─┬─toTypeName(json.d)─┠+│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +└──────────────────────┴──────────────────────┴────────────────────┴────────────────────┘ +``` + +As we can see, for `a.b` the type is `UInt32` as we specified in the JSON type declaration, and for all other subcolumns the type is `Dynamic`. + +It is also possible to read subcolumns of a `Dynamic` type using special syntax `json.some.path.:TypeName`: + +```sql +select json.a.g.:Float64, dynamicType(json.a.g), json.d.:Date, dynamicType(json.d) FROM test; +``` + +```text +┌─json.a.g.:`Float64`─┬─dynamicType(json.a.g)─┬─json.d.:`Date`─┬─dynamicType(json.d)─┠+│ 42.42 │ Float64 │ 2020-01-01 │ Date │ +│ á´ºáµá´¸á´¸ │ None │ 2020-01-02 │ Date │ +│ 43.43 │ Float64 │ á´ºáµá´¸á´¸ │ None │ +└─────────────────────┴───────────────────────┴────────────────┴─────────────────────┘ +``` + +`Dynamic` subcolumns can be casted to any data type. In this case the exception will be thrown if internal type inside `Dynamic` cannot be casted to the requested type: + +```sql +select json.a.g::UInt64 as uint FROM test; +``` + +```text +┌─uint─┠+│ 42 │ +│ 0 │ +│ 43 │ +└──────┘ +``` + +```sql +select json.a.g::UUID as float FROM test; +``` + +```text +Received exception: +Code: 48. DB::Exception: Conversion between numeric types and UUID is not supported. Probably the passed UUID is unquoted: while executing 'FUNCTION CAST(__table1.json.a.g :: 2, 'UUID'_String :: 1) -> CAST(__table1.json.a.g, 'UUID'_String) UUID : 0'. (NOT_IMPLEMENTED) +``` + +## Reading JSON sub-objects as subcolumns + +JSON type supports reading nested objects as subcolumns with type `JSON` using special syntax `json.^some.path`: + +```sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : {"c" : 42, "g" : 42.42}}, "c" : [1, 2, 3], "d" : {"e" : {"f" : {"g" : "Hello, World", "h" : [1, 2, 3]}}}}'), ('{"f" : "Hello, World!", "d" : {"e" : {"f" : {"h" : [4, 5, 6]}}}}'), ('{"a" : {"b" : {"c" : 43, "e" : 10, "g" : 43.43}}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json────────────────────────────────────────────────────────────────────────────────────────┠+│ {"a":{"b":{"c":42,"g":42.42}},"c":[1,2,3],"d":{"e":{"f":{"g":"Hello, World","h":[1,2,3]}}}} │ +│ {"d":{"e":{"f":{"h":[4,5,6]}}},"f":"Hello, World!"} │ +│ {"a":{"b":{"c":43,"e":10,"g":43.43}},"c":[4,5,6]} │ +└─────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.^a.b, json.^d.e.f FROM test; +``` + +```text +┌─json.^`a`.b───────────────┬─json.^`d`.e.f────────────────────┠+│ {"c":42,"g":42.42} │ {"g":"Hello, World","h":[1,2,3]} │ +│ {} │ {"h":[4,5,6]} │ +│ {"c":43,"e":10,"g":43.43} │ {} │ +└───────────────────────────┴──────────────────────────────────┘ +``` + +:::note +Reading sub-objects as subcolumns may be inefficient, as this may require almost full scan of the JSON data. +::: + +## Types inference for paths + +During JSON parsing ClickHouse tries to detect the most appropriate data type for each JSON path. It works similar to [automatic schema inference from input data](/docs/en/interfaces/schema-inference.md) and controlled by the same settings: + +- [input_format_try_infer_integers](/docs/en/interfaces/schema-inference.md#inputformattryinferintegers) +- [input_format_try_infer_dates](/docs/en/interfaces/schema-inference.md#inputformattryinferdates) +- [input_format_try_infer_datetimes](/docs/en/interfaces/schema-inference.md#inputformattryinferdatetimes) +- [schema_inference_make_columns_nullable](/docs/en/interfaces/schema-inference.md#schemainferencemakecolumnsnullable) +- [input_format_json_try_infer_numbers_from_strings](/docs/en/interfaces/schema-inference.md#inputformatjsontryinfernumbersfromstrings) +- [input_format_json_infer_incomplete_types_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsoninferincompletetypesasstrings) +- [input_format_json_read_numbers_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadnumbersasstrings) +- [input_format_json_read_bools_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasstrings) +- [input_format_json_read_bools_as_numbers](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasnumbers) +- [input_format_json_read_arrays_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadarraysasstrings) + +Let's see some examples: + +```sql +SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=1, input_format_try_infer_datetimes=1; +``` + +```text +┌─paths_with_types─────────────────┠+│ {'a':'Date','b':'DateTime64(9)'} │ +└──────────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=0, input_format_try_infer_datetimes=0; +``` + +```text +┌─paths_with_types────────────┠+│ {'a':'String','b':'String'} │ +└─────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=1; +``` + +```text +┌─paths_with_types───────────────┠+│ {'a':'Array(Nullable(Int64))'} │ +└────────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=0; +``` + +```text +┌─paths_with_types─────┠+│ {'a':'Array(Int64)'} │ +└──────────────────────┘ +``` + +## Handling arrays of JSON objects + +JSON paths that contains an array of objects are parsed as type `Array(JSON)` and inserted into `Dynamic` column for this path. To read an array of objects you can extract it from `Dynamic` column as a subcolumn: + +```sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES +('{"a" : {"b" : [{"c" : 42, "d" : "Hello", "f" : {"g" : 42.42}}, {"c" : 43}, {"e" : [1, 2, 3], "d" : "My", "f" : {"g" : 43.43, "h" : "2020-01-01"}}]}}'), +('{"a" : {"b" : [1, 2, 3]}}'), +('{"a" : {"b" : [{"c" : 44, "f" : {"h" : "2020-01-02"}}, {"e" : [4, 5, 6], "d" : "World", "f" : {"g" : 44.44}}]}}'); +SELECT json FROM test; +``` + +```text +┌─json──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┠+│ {"a":{"b":[{"c":"42","d":"Hello","f":{"g":42.42}},{"c":"43"},{"d":"My","e":["1","2","3"],"f":{"g":43.43,"h":"2020-01-01"}}]}} │ +│ {"a":{"b":["1","2","3"]}} │ +│ {"a":{"b":[{"c":"44","f":{"h":"2020-01-02"}},{"d":"World","e":["4","5","6"],"f":{"g":44.44}}]}} │ +└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.a.b, dynamicType(json.a.b) FROM test; +``` + +```text +┌─json.a.b────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─dynamicType(json.a.b)───────────────────────────────────┠+│ ['{"c":"42","d":"Hello","f":{"g":42.42}}','{"c":"43"}','{"d":"My","e":["1","2","3"],"f":{"g":43.43,"h":"2020-01-01"}}'] │ Array(JSON(max_dynamic_types=8, max_dynamic_paths=125)) │ +│ [1,2,3] │ Array(Nullable(Int64)) │ +│ ['{"c":"44","f":{"h":"2020-01-02"}}','{"d":"World","e":["4","5","6"],"f":{"g":44.44}}'] │ Array(JSON(max_dynamic_types=8, max_dynamic_paths=125)) │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────┘ +``` + +As you can notice, the `max_dynamic_types/max_dynamic_paths` parameters of the nested `JSON` type were reduced compared to the default values. It's needed to avoid number of subcolumns to grow uncontrolled on nested arrays of JSON objects. + +Let's try to read subcolumns of this nested `JSON` column: + +```sql +SELECT json.a.b.:`Array(JSON)`.c, json.a.b.:`Array(JSON)`.f.g, json.a.b.:`Array(JSON)`.f.g FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f.g─┬─json.a.b.:`Array(JSON)`.f.g─┠+│ [42,43,NULL] │ [42.42,NULL,43.43] │ [42.42,NULL,43.43] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [NULL,44.44] │ [NULL,44.44] │ +└───────────────────────────┴─────────────────────────────┴─────────────────────────────┘ +``` + +We can also read subcolumns of `Dynamic` columns: + +```sql +SELECT json.a.b.:`Array(JSON)`.f.h.:Date FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.f.h.:`Date`─┠+│ [NULL,NULL,'2020-01-01'] │ +│ [] │ +│ ['2020-01-02',NULL] │ +└─────────────────────────────────────┘ +``` + +We can also read sub-object subcolumns from nested `JSON` column: + +```sql +SELECT json.a.b.:`Array(JSON)`.^f FROM test +``` + +```text +┌─json.a.b.:`Array(JSON)`.^`f`────────────────────────┠+│ ['{"g":42.42}','{}','{"g":43.43,"h":"2020-01-01"}'] │ +│ [] │ +│ ['{"h":"2020-01-02"}','{"g":44.44}'] │ +└─────────────────────────────────────────────────────┘ +``` + +## Reading JSON type from the data + +All text formats (JSONEachRow, TSV, CSV, CustomSeparated, Values, etc) supports reading `JSON` type. + +Examples: + +```sql +SELECT json FROM format(JSONEachRow, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP PREFIX d.e, SKIP REGEXP \'b.*\')', ' +{"json" : {"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}}} +{"json" : {"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}}} +{"json" : {"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"}} +{"json" : {"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43}} +{"json" : {"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}} +') +``` + +```text +┌─json──────────────────────────────────────────────────────────┠+│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │ +│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │ +│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │ +│ {"a":{"b":{"c":4}},"c":"43"} │ +│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │ +└───────────────────────────────────────────────────────────────┘ +``` + +For text formats like CSV/TSV/etc `JSON` is parsed from a string containing JSON object + +```sql +SELECT json FROM format(TSV, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP PREFIX d.e, SKIP REGEXP \'b.*\')', +'{"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}} +{"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}} +{"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"} +{"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43} +{"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}') +``` + +```text +┌─json──────────────────────────────────────────────────────────┠+│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │ +│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │ +│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │ +│ {"a":{"b":{"c":4}},"c":"43"} │ +│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │ +└───────────────────────────────────────────────────────────────┘ +``` + +## Reaching the limit of dynamic paths inside JSON + +`JSON` data type can store only limited number of paths as separate subcolumns inside. By default, this limit is 1000, but you can change it in type declaration using parameter `max_dynamic_paths`. +When the limit is reached, all new paths inserted to `JSON` column will be stored in a single shared data structure. It's still possible to read such paths as subcolumns, but it will require reading the whole +shared data structure to extract the values of this path. This limit is needed to avoid the enormous number of different subcolumns that can make the table unusable. + +Let's see what happens when the limit is reached in different scenarios. + +### Reaching the limit during data parsing + +During parsing of `JSON` object from the data, when the limit is reached for current block of data, all new paths will be stored in a shared data structure. We can check it using introspection functions `JSONDynamicPaths, JSONSharedDataPaths`: + +```sql +SELECT json, JSONDynamicPaths(json), JSONSharedDataPaths(json) FROM format(JSONEachRow, 'json JSON(max_dynamic_paths=3)', ' +{"json" : {"a" : {"b" : 42}, "c" : [1, 2, 3]}} +{"json" : {"a" : {"b" : 43}, "d" : "2020-01-01"}} +{"json" : {"a" : {"b" : 44}, "c" : [4, 5, 6]}} +{"json" : {"a" : {"b" : 43}, "d" : "2020-01-02", "e" : "Hello", "f" : {"g" : 42.42}}} +{"json" : {"a" : {"b" : 43}, "c" : [7, 8, 9], "f" : {"g" : 43.43}, "h" : "World"}} +') +``` + +```text +┌─json───────────────────────────────────────────────────────────┬─JSONDynamicPaths(json)─┬─JSONSharedDataPaths(json)─┠+│ {"a":{"b":"42"},"c":["1","2","3"]} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"43"},"d":"2020-01-01"} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"44"},"c":["4","5","6"]} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"43"},"d":"2020-01-02","e":"Hello","f":{"g":42.42}} │ ['a.b','c','d'] │ ['e','f.g'] │ +│ {"a":{"b":"43"},"c":["7","8","9"],"f":{"g":43.43},"h":"World"} │ ['a.b','c','d'] │ ['f.g','h'] │ +└────────────────────────────────────────────────────────────────┴────────────────────────┴───────────────────────────┘ +``` + +As we can see, after inserting paths `e` and `f.g` the limit was reached and we inserted them into shared data structure. + +### During merges of data parts in MergeTree table engines + +During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns. +In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains +the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation. + +Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths: + +```sql +CREATE TABLE test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree ORDER BY id; +SYSTEM STOP MERGES test; +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as a) FROM numbers(5); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as b) FROM numbers(4); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as c) FROM numbers(3); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as d) FROM numbers(2); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as e) FROM numbers(1); +``` + +Each insert will create a separate data pert with `JSON` column containing single path: +```sql +SELECT count(), JSONDynamicPaths(json) AS dynamic_paths, JSONSharedDataPaths(json) AS shared_data_paths, _part FROM test GROUP BY _part, dynamic_paths, shared_data_paths ORDER BY _part ASC +``` + +```text +┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┠+│ 5 │ ['a'] │ [] │ all_1_1_0 │ +│ 4 │ ['b'] │ [] │ all_2_2_0 │ +│ 3 │ ['c'] │ [] │ all_3_3_0 │ +│ 2 │ ['d'] │ [] │ all_4_4_0 │ +│ 1 │ ['e'] │ [] │ all_5_5_0 │ +└─────────┴───────────────┴───────────────────┴───────────┘ + +``` + +Now, let's merge all parts into one and see what will happen: + +```sql +SYSTEM START MERGES test; +OPTIMIZE TABLE test FINAL; +SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; +``` + +```text +┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┠+│ 1 │ ['a','b','c'] │ ['e'] │ all_1_5_2 │ +│ 2 │ ['a','b','c'] │ ['d'] │ all_1_5_2 │ +│ 12 │ ['a','b','c'] │ [] │ all_1_5_2 │ +└─────────┴───────────────┴───────────────────┴───────────┘ +``` + +As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and moved paths `e` and `d` to shared data structure. + + + +## Tips for better usage of the JSON type + +Before creating `JSON` column and loading data into it, consider the following tips: + +- Investigate your data and specify as many path hints with types as you can. It will make the storage and the reading much more efficient. +- Think about what paths you will need and what paths you will never need. Specify paths that you won't need in the SKIP section, use SKIP PREFIX and SKIP REGEXP if needed. It will improve the storage. +- Don't set `max_dynamic_paths` parameter to very high values, it can make the storage and reading less efficient. diff --git a/docs/en/sql-reference/data-types/object-json.md b/docs/en/sql-reference/data-types/object-json.md new file mode 100644 index 00000000000..36835a42db8 --- /dev/null +++ b/docs/en/sql-reference/data-types/object-json.md @@ -0,0 +1,83 @@ +--- +slug: /en/sql-reference/data-types/object-data-type +sidebar_position: 26 +sidebar_label: Object Data Type +keywords: [object, data type] +--- + +# Object Data Type (deprecated) + +**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864). + +
+ +Stores JavaScript Object Notation (JSON) documents in a single column. + +`JSON` is an alias for `Object('json')`. + +## Example + +**Example 1** + +Creating a table with a `JSON` column and inserting data into it: + +```sql +CREATE TABLE json +( + o JSON +) +ENGINE = Memory +``` + +```sql +INSERT INTO json VALUES ('{"a": 1, "b": { "c": 2, "d": [1, 2, 3] }}') +``` + +```sql +SELECT o.a, o.b.c, o.b.d[3] FROM json +``` + +```text +┌─o.a─┬─o.b.c─┬─arrayElement(o.b.d, 3)─┠+│ 1 │ 2 │ 3 │ +└─────┴───────┴────────────────────────┘ +``` + +**Example 2** + +To be able to create an ordered `MergeTree` family table, the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format: + +```sql +CREATE TABLE logs +( + timestamp DateTime, + message JSON +) +ENGINE = MergeTree +ORDER BY timestamp +``` + +```sql +INSERT INTO logs +SELECT parseDateTimeBestEffort(JSONExtractString(json, 'timestamp')), json +FROM file('access.json.gz', JSONAsString) +``` + +## Displaying JSON columns + +When displaying a `JSON` column, ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can also display the field names by setting `output_format_json_named_tuples_as_objects = 1`: + +```sql +SET output_format_json_named_tuples_as_objects = 1 + +SELECT * FROM json FORMAT JSONEachRow +``` + +```text +{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}} +``` + +## Related Content + +- [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json) +- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) \ No newline at end of file diff --git a/docs/en/sql-reference/data-types/object_json.md b/docs/en/sql-reference/data-types/object_json.md new file mode 100644 index 00000000000..39e37abad82 --- /dev/null +++ b/docs/en/sql-reference/data-types/object_json.md @@ -0,0 +1,86 @@ +--- +slug: /en/sql-reference/data-types/json +sidebar_position: 26 +sidebar_label: JSON +--- + +# JSON + +:::note +This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead. +::: + +Stores JavaScript Object Notation (JSON) documents in a single column. + +`JSON` is an alias for `Object('json')`. + +:::note +The JSON data type is an obsolete feature. Do not use it. +If you want to use it, set `allow_experimental_object_type = 1`. +::: + +## Example + +**Example 1** + +Creating a table with a `JSON` column and inserting data into it: + +```sql +CREATE TABLE json +( + o JSON +) +ENGINE = Memory +``` + +```sql +INSERT INTO json VALUES ('{"a": 1, "b": { "c": 2, "d": [1, 2, 3] }}') +``` + +```sql +SELECT o.a, o.b.c, o.b.d[3] FROM json +``` + +```text +┌─o.a─┬─o.b.c─┬─arrayElement(o.b.d, 3)─┠+│ 1 │ 2 │ 3 │ +└─────┴───────┴────────────────────────┘ +``` + +**Example 2** + +To be able to create an ordered `MergeTree` family table the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format: + +```sql +CREATE TABLE logs +( + timestamp DateTime, + message JSON +) +ENGINE = MergeTree +ORDER BY timestamp +``` + +```sql +INSERT INTO logs +SELECT parseDateTimeBestEffort(JSONExtractString(json, 'timestamp')), json +FROM file('access.json.gz', JSONAsString) +``` + +## Displaying JSON columns + +When displaying a `JSON` column ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can display the field names as well by setting `output_format_json_named_tuples_as_objects = 1`: + +```sql +SET output_format_json_named_tuples_as_objects = 1 + +SELECT * FROM json FORMAT JSONEachRow +``` + +```text +{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}} +``` + +## Related Content + +- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) From c6b558c7915b070167649d4e88eafb2613570bd3 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 4 Jul 2024 22:30:18 +0200 Subject: [PATCH 0346/2361] Done --- .../02814_currentDatabase_for_table_functions.sql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02814_currentDatabase_for_table_functions.sql b/tests/queries/0_stateless/02814_currentDatabase_for_table_functions.sql index 74b5cf5f432..8b1e3ba1e10 100644 --- a/tests/queries/0_stateless/02814_currentDatabase_for_table_functions.sql +++ b/tests/queries/0_stateless/02814_currentDatabase_for_table_functions.sql @@ -13,7 +13,13 @@ CREATE MATERIALIZED VIEW null_mv Engine = Log AS SELECT * FROM null_table LEFT J CREATE TABLE null_table_buffer (number UInt64) ENGINE = Buffer(currentDatabase(), null_table, 1, 1, 1, 100, 200, 10000, 20000); INSERT INTO null_table_buffer VALUES (1); -SELECT sleep(3) FORMAT Null; + +-- OPTIMIZE query should flush Buffer table, but still it is not guaranteed +-- (see the comment StorageBuffer::optimize) +-- But the combination of OPTIMIZE + sleep + OPTIMIZE should be enough. +OPTIMIZE TABLE null_table_buffer; +SELECT sleep(1) FORMAT Null; +OPTIMIZE TABLE null_table_buffer; -- Insert about should've landed into `null_mv` SELECT count() FROM null_mv; From 4ad6597ab61fda8f62d9b78713503d1e5c5517d7 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 12 Jul 2024 15:15:39 +0000 Subject: [PATCH 0347/2361] Add more comments --- src/DataTypes/DataTypeObject.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 1a66222cc98..8f580f03b57 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -75,11 +75,17 @@ public: private: SchemaFormat schema_format; + /// Set of paths with types that were specified in type declaration. std::unordered_map typed_paths; + /// Set of paths that should be skipped during data parsing. std::unordered_set paths_to_skip; + /// List of path prefixes that should be skipped during data parsing. std::vector path_prefixes_to_skip; + /// List of regular expressions that should be used to skip paths during data parsing. std::vector path_regexps_to_skip; + /// Limit on the number of paths that can be stored as subcolumn. size_t max_dynamic_paths; + /// Limit of dynamic types that should be used for Dynamic columns. size_t max_dynamic_types; }; From ca7e003c6d7af6bf0676bba7cb61ab560c202bf3 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 12 Jul 2024 15:27:03 +0000 Subject: [PATCH 0348/2361] Fixed test --- .../02814_currentDatabase_for_table_functions.reference | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02814_currentDatabase_for_table_functions.reference b/tests/queries/0_stateless/02814_currentDatabase_for_table_functions.reference index 7ff95106d3d..20b14d9a67b 100644 --- a/tests/queries/0_stateless/02814_currentDatabase_for_table_functions.reference +++ b/tests/queries/0_stateless/02814_currentDatabase_for_table_functions.reference @@ -11,7 +11,12 @@ CREATE VIEW number_view as SELECT * FROM numbers(10) as tb; CREATE MATERIALIZED VIEW null_mv Engine = Log AS SELECT * FROM null_table LEFT JOIN number_view as tb USING number; CREATE TABLE null_table_buffer (number UInt64) ENGINE = Buffer(currentDatabase(), null_table, 1, 1, 1, 100, 200, 10000, 20000); INSERT INTO null_table_buffer VALUES (1); -SELECT sleep(3) FORMAT Null; +-- OPTIMIZE query should flush Buffer table, but still it is not guaranteed +-- (see the comment StorageBuffer::optimize) +-- But the combination of OPTIMIZE + sleep + OPTIMIZE should be enough. +OPTIMIZE TABLE null_table_buffer; +SELECT sleep(1) FORMAT Null; +OPTIMIZE TABLE null_table_buffer; -- Insert about should've landed into `null_mv` SELECT count() FROM null_mv; 1 From 83037f534e3075e9ebaa7e2457c6c96b98d44363 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 12 Jul 2024 16:07:38 +0000 Subject: [PATCH 0349/2361] Fix style --- src/Columns/ColumnObject.cpp | 4 ++-- src/DataTypes/DataTypeObject.cpp | 3 +-- src/DataTypes/Serializations/SerializationJSON.cpp | 5 +---- src/DataTypes/Serializations/SerializationObject.cpp | 4 ++-- src/DataTypes/Serializations/SerializationObjectTypedPath.h | 2 +- src/DataTypes/Serializations/SerializationSubObject.cpp | 2 +- src/DataTypes/Serializations/SerializationSubObject.h | 5 +++++ src/Formats/JSONExtractTree.cpp | 4 ++-- src/Functions/FunctionsConversion.cpp | 2 +- src/Functions/JSONPaths.cpp | 2 +- 10 files changed, 17 insertions(+), 16 deletions(-) diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 50b2bcf90b8..4c4fa931fbc 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -481,7 +481,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co shared_data_offsets.push_back(shared_data_paths->size()); } } - + /// Insert default values in all remaining dynamic paths. for (auto & [_, column] : dynamic_paths) { @@ -550,7 +550,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co void ColumnObject::serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const String & path, const IColumn & column, size_t n) { - /// Don't store Null values in shared data. We consider Null value equivalent to the absense + /// Don't store Null values in shared data. We consider Null value equivalent to the absence /// of this path in the row because we cannot distinguish these 2 cases for dynamic paths. if (column.isNullAt(n)) return; diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 338883c3ae4..8f3baa05925 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -31,7 +31,6 @@ namespace DB namespace ErrorCodes { - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int UNEXPECTED_AST_STRUCTURE; extern const int BAD_ARGUMENTS; } @@ -403,7 +402,7 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject:: { if (!arguments || arguments->children.empty()) return std::make_shared(schema_format); - + std::unordered_map typed_paths; std::unordered_set paths_to_skip; /// Collect prefixes in unordered_set to avoid duplicate prefixes diff --git a/src/DataTypes/Serializations/SerializationJSON.cpp b/src/DataTypes/Serializations/SerializationJSON.cpp index b5bf8d1421b..51b12568981 100644 --- a/src/DataTypes/Serializations/SerializationJSON.cpp +++ b/src/DataTypes/Serializations/SerializationJSON.cpp @@ -123,7 +123,7 @@ void SerializationJSON::serializeTextImpl(const IColumn & column, size_t sorted_paths.emplace_back(path); for (const auto & [path, dynamic_column] : dynamic_paths) { - /// We consider null value and absense of the path in a row as equivalent cases, because we cannot actually distinguish them. + /// We consider null value and absence of the path in a row as equivalent cases, because we cannot actually distinguish them. /// So, we don't output null values at all. if (!dynamic_column->isNullAt(row_num)) sorted_paths.emplace_back(path); @@ -403,7 +403,4 @@ template class SerializationJSON; template class SerializationJSON; #endif - - - } diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index d4483703c6b..8e6f6f105ea 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -285,7 +285,7 @@ void SerializationObject::deserializeBinaryBulkStatePrefix( shared_data_serialization->deserializeBinaryBulkStatePrefix(settings, object_state->shared_data_state, cache); settings.path.pop_back(); settings.path.pop_back(); - + state = std::move(object_state); } @@ -605,7 +605,7 @@ void SerializationObject::deserializeBinary(IColumn & col, ReadBuffer & istr, co typed_it->second->deserializeBinary(*typed_column, istr, settings); } } - /// Check if we have this path in dynamc paths. + /// Check if we have this path in dynamic paths. else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) { /// Check if we already had this path. diff --git a/src/DataTypes/Serializations/SerializationObjectTypedPath.h b/src/DataTypes/Serializations/SerializationObjectTypedPath.h index a893e07f798..997e14bd145 100644 --- a/src/DataTypes/Serializations/SerializationObjectTypedPath.h +++ b/src/DataTypes/Serializations/SerializationObjectTypedPath.h @@ -49,7 +49,7 @@ public: DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsCache * cache) const override; - + private: String path; }; diff --git a/src/DataTypes/Serializations/SerializationSubObject.cpp b/src/DataTypes/Serializations/SerializationSubObject.cpp index 62ebd4669e1..9084d46f9b2 100644 --- a/src/DataTypes/Serializations/SerializationSubObject.cpp +++ b/src/DataTypes/Serializations/SerializationSubObject.cpp @@ -39,7 +39,7 @@ void SerializationSubObject::enumerateStreams( settings.path.push_back(Substream::ObjectStructure); callback(settings.path); settings.path.pop_back(); - + const auto * column_object = data.column ? &assert_cast(*data.column) : nullptr; const auto * type_object = data.type ? &assert_cast(*data.type) : nullptr; const auto * deserialize_state = data.deserialize_state ? checkAndGetState(data.deserialize_state) : nullptr; diff --git a/src/DataTypes/Serializations/SerializationSubObject.h b/src/DataTypes/Serializations/SerializationSubObject.h index 8d037720f1f..8a5582536e7 100644 --- a/src/DataTypes/Serializations/SerializationSubObject.h +++ b/src/DataTypes/Serializations/SerializationSubObject.h @@ -7,6 +7,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + /// Serialization of a sub-object Object subcolumns. /// For example, if we have type JSON and data {"a" : {"b" : {"c" : 42, "d" : "Hello"}}, "c" : [1, 2, 3], "d" : 42} /// this class will be responsible for reading sub-object a.b and will read JSON column with data {"c" : 43, "d" : "Hello"}. diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index db984d67819..e68e98a6a06 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1636,7 +1636,7 @@ private: auto & typed_paths = column_object.getTypedPaths(); auto & dynamic_paths = column_object.getDynamicPaths(); - /// Check if we have this path in typed pahts. + /// Check if we have this path in typed paths. if (auto typed_it = typed_paths.find(current_path); typed_it != typed_paths.end()) { /// Check if we already had this path. @@ -1683,7 +1683,7 @@ private: } /// Otherwise this path should go to the shared data. /// Don't insert null values into shared data. - /// We consider null equivalent to the absense of this path. + /// We consider null equivalent to the absence of this path. else if (!element.isNull()) { auto tmp_dynamic_column = ColumnDynamic::create(); diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 3b0179dff2b..88cd2e382f2 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -4004,7 +4004,7 @@ private: throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to Object can be performed only from flatten named Tuple, Map or String. Got: {}", from_type->getName()); } - + WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_object) const { if (checkAndGetDataType(from_type.get())) diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp index bbdf1385cf2..b7a161901f0 100644 --- a/src/Functions/JSONPaths.cpp +++ b/src/Functions/JSONPaths.cpp @@ -300,7 +300,7 @@ private: offsets.push_back(paths_column->size()); } - + return ColumnMap::create(ColumnPtr(std::move(paths_column)), ColumnPtr(std::move(types_column)), ColumnPtr(std::move(offsets_column))); } From 8394c8ee67d41009dbc72348b64faae98cdfa165 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 12 Jul 2024 16:43:08 +0000 Subject: [PATCH 0350/2361] Fix build --- src/Columns/ColumnObject.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 4c4fa931fbc..dce6b006ff2 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1132,10 +1132,15 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou size_t ColumnObject::findPathLowerBoundInSharedData(StringRef path, const ColumnString & shared_data_paths, size_t start, size_t end) { /// Simple random access iterator over values in ColumnString in specified range. - class Iterator : public std::iterator + class Iterator { public: using difference_type = size_t; + using value_type = StringRef; + using iterator_category = std::random_access_iterator_tag; + using pointer = StringRef*; + using reference = StringRef&; + Iterator() = delete; Iterator(const ColumnString * data_, size_t index_) : data(data_), index(index_) {} Iterator(const Iterator & rhs) : data(rhs.data), index(rhs.index) {} From a6e737ef2afc7fb18d661295e6f84cc3e0478ae1 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Jul 2024 17:19:30 +0000 Subject: [PATCH 0351/2361] Cleaner FilterDAGInfo. --- src/Interpreters/ActionsDAG.cpp | 25 +++++++------- src/Interpreters/ActionsDAG.h | 2 +- src/Interpreters/ExpressionActions.cpp | 2 +- src/Interpreters/ExpressionAnalyzer.cpp | 2 +- src/Interpreters/InterpreterSelectQuery.cpp | 18 +++++------ src/Planner/Planner.cpp | 4 +-- src/Planner/PlannerJoinTree.cpp | 36 +++++++++------------ src/Storages/IStorage.cpp | 6 ++-- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- src/Storages/SelectQueryInfo.h | 2 +- src/Storages/StorageMerge.cpp | 6 ++-- 11 files changed, 49 insertions(+), 56 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 04be9d23c32..4401c83549f 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1249,31 +1249,30 @@ bool ActionsDAG::removeUnusedResult(const std::string & column_name) ActionsDAGPtr ActionsDAG::clone(const ActionsDAG * from) { std::unordered_map old_to_new_nodes; - return ActionsDAG::clone(from, old_to_new_nodes); + if (from == nullptr) + return nullptr; + return std::make_unique(ActionsDAG::clone(*from, old_to_new_nodes)); } -ActionsDAGPtr ActionsDAG::clone(const ActionsDAG * from, std::unordered_map & old_to_new_nodes) +ActionsDAG ActionsDAG::clone(const ActionsDAG & from, std::unordered_map & old_to_new_nodes) { - if (!from) - return nullptr; + ActionsDAG actions; - auto actions = std::make_unique(); - - for (const auto & node : from->nodes) + for (const auto & node : from.nodes) { - auto & copy_node = actions->nodes.emplace_back(node); + auto & copy_node = actions.nodes.emplace_back(node); old_to_new_nodes[&node] = ©_node; } - for (auto & node : actions->nodes) + for (auto & node : actions.nodes) for (auto & child : node.children) child = old_to_new_nodes[child]; - for (const auto & output_node : from->outputs) - actions->outputs.push_back(old_to_new_nodes[output_node]); + for (const auto & output_node : from.outputs) + actions.outputs.push_back(old_to_new_nodes[output_node]); - for (const auto & input_node : from->inputs) - actions->inputs.push_back(old_to_new_nodes[input_node]); + for (const auto & input_node : from.inputs) + actions.inputs.push_back(old_to_new_nodes[input_node]); return actions; } diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index cf6a91b9fe7..f428ca2f01c 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -263,7 +263,7 @@ public: static ActionsDAGPtr clone(const ActionsDAGPtr & from) { return clone(from.get()); } static ActionsDAGPtr clone(const ActionsDAG * from); - static ActionsDAGPtr clone(const ActionsDAG * from, std::unordered_map & old_to_new_nodes); + static ActionsDAG clone(const ActionsDAG & from, std::unordered_map & old_to_new_nodes); static ActionsDAGPtr cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases); diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 1c6c3f2556b..dd1d2eb703e 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -75,7 +75,7 @@ ExpressionActionsPtr ExpressionActions::clone() const auto copy = std::make_shared(ExpressionActions()); std::unordered_map copy_map; - copy->actions_dag = std::move(*ActionsDAG::clone(&actions_dag, copy_map)); + copy->actions_dag = ActionsDAG::clone(actions_dag, copy_map); copy->actions = actions; for (auto & action : copy->actions) action.node = copy_map[action.node]; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 068b6f290fa..286eda14b3f 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1922,7 +1922,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (storage && additional_filter) { - Names columns_for_additional_filter = additional_filter->actions->getRequiredColumnsNames(); + Names columns_for_additional_filter = additional_filter->actions.getRequiredColumnsNames(); additional_required_columns_after_prewhere.insert(additional_required_columns_after_prewhere.end(), columns_for_additional_filter.begin(), columns_for_additional_filter.end()); } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index cde6e305005..e723e5f7982 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -178,12 +178,12 @@ FilterDAGInfoPtr generateFilterActions( filter_info->actions = std::move(analyzer.simpleSelectActions()->dag); filter_info->column_name = expr_list->children.at(0)->getColumnName(); - filter_info->actions->removeUnusedActions(NameSet{filter_info->column_name}); + filter_info->actions.removeUnusedActions(NameSet{filter_info->column_name}); - for (const auto * node : filter_info->actions->getInputs()) - filter_info->actions->getOutputs().push_back(node); + for (const auto * node : filter_info->actions.getInputs()) + filter_info->actions.getOutputs().push_back(node); - auto required_columns_from_filter = filter_info->actions->getRequiredColumns(); + auto required_columns_from_filter = filter_info->actions.getRequiredColumns(); for (const auto & column : required_columns_from_filter) { @@ -1486,7 +1486,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&*expressions.filter_info->actions)), + std::move(*ActionsDAG::clone(&expressions.filter_info->actions)), expressions.filter_info->column_name, expressions.filter_info->do_remove_column); @@ -1612,7 +1612,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&*expressions.filter_info->actions)), + std::move(*ActionsDAG::clone(&expressions.filter_info->actions)), expressions.filter_info->column_name, expressions.filter_info->do_remove_column); @@ -1620,11 +1620,11 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&*new_filter_info->actions)), + std::move(new_filter_info->actions), new_filter_info->column_name, new_filter_info->do_remove_column); @@ -2107,7 +2107,7 @@ void InterpreterSelectQuery::applyFiltersToPrewhereInAnalysis(ExpressionAnalysis else { /// Add row level security actions to prewhere. - analysis.prewhere_info->row_level_filter = std::move(*analysis.filter_info->actions); + analysis.prewhere_info->row_level_filter = std::move(analysis.filter_info->actions); analysis.prewhere_info->row_level_column_name = std::move(analysis.filter_info->column_name); analysis.filter_info = nullptr; } diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 0b10cef82ce..ffed19185d3 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1128,11 +1128,11 @@ void addAdditionalFilterStepIfNeeded(QueryPlan & query_plan, auto fake_table_expression = std::make_shared(std::move(storage), query_context); auto filter_info = buildFilterInfo(additional_result_filter_ast, fake_table_expression, planner_context, std::move(fake_name_set)); - if (!filter_info.actions || !query_plan.isInitialized()) + if (!query_plan.isInitialized()) return; auto filter_step = std::make_unique(query_plan.getCurrentDataStream(), - std::move(*filter_info.actions), + std::move(filter_info.actions), filter_info.column_name, filter_info.do_remove_column); filter_step->setStepDescription("additional result filter"); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index fa3a3483a8e..3217d3461d3 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -458,7 +458,7 @@ void updatePrewhereOutputsIfNeeded(SelectQueryInfo & table_expression_query_info prewhere_outputs.insert(prewhere_outputs.end(), required_output_nodes.begin(), required_output_nodes.end()); } -FilterDAGInfo buildRowPolicyFilterIfNeeded(const StoragePtr & storage, +std::optional buildRowPolicyFilterIfNeeded(const StoragePtr & storage, SelectQueryInfo & table_expression_query_info, PlannerContextPtr & planner_context, std::set & used_row_policies) @@ -479,7 +479,7 @@ FilterDAGInfo buildRowPolicyFilterIfNeeded(const StoragePtr & storage, return buildFilterInfo(row_policy_filter->expression, table_expression_query_info.table_expression, planner_context); } -FilterDAGInfo buildCustomKeyFilterIfNeeded(const StoragePtr & storage, +std::optional buildCustomKeyFilterIfNeeded(const StoragePtr & storage, SelectQueryInfo & table_expression_query_info, PlannerContextPtr & planner_context) { @@ -513,7 +513,7 @@ FilterDAGInfo buildCustomKeyFilterIfNeeded(const StoragePtr & storage, } /// Apply filters from additional_table_filters setting -FilterDAGInfo buildAdditionalFiltersIfNeeded(const StoragePtr & storage, +std::optional buildAdditionalFiltersIfNeeded(const StoragePtr & storage, const String & table_expression_alias, SelectQueryInfo & table_expression_query_info, PlannerContextPtr & planner_context) @@ -789,9 +789,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres std::vector> where_filters; const auto add_filter = [&](FilterDAGInfo & filter_info, std::string description) { - if (!filter_info.actions) - return; - bool is_final = table_expression_query_info.table_expression_modifiers && table_expression_query_info.table_expression_modifiers->hasFinal(); bool optimize_move_to_prewhere @@ -805,14 +802,14 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (!prewhere_info->prewhere_actions) { - prewhere_info->prewhere_actions = std::move(*filter_info.actions); + prewhere_info->prewhere_actions = std::move(filter_info.actions); prewhere_info->prewhere_column_name = filter_info.column_name; prewhere_info->remove_prewhere_column = filter_info.do_remove_column; prewhere_info->need_filter = true; } else if (!prewhere_info->row_level_filter) { - prewhere_info->row_level_filter = std::move(*filter_info.actions); + prewhere_info->row_level_filter = std::move(filter_info.actions); prewhere_info->row_level_column_name = filter_info.column_name; prewhere_info->need_filter = true; } @@ -830,17 +827,18 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto row_policy_filter_info = buildRowPolicyFilterIfNeeded(storage, table_expression_query_info, planner_context, used_row_policies); - if (row_policy_filter_info.actions) - table_expression_data.setRowLevelFilterActions(ActionsDAG::clone(&*row_policy_filter_info.actions)); - add_filter(row_policy_filter_info, "Row-level security filter"); + if (row_policy_filter_info) + { + table_expression_data.setRowLevelFilterActions(ActionsDAG::clone(&row_policy_filter_info->actions)); + add_filter(*row_policy_filter_info, "Row-level security filter"); + } if (query_context->getParallelReplicasMode() == Context::ParallelReplicasMode::CUSTOM_KEY) { if (settings.parallel_replicas_count > 1) { - auto parallel_replicas_custom_key_filter_info - = buildCustomKeyFilterIfNeeded(storage, table_expression_query_info, planner_context); - add_filter(parallel_replicas_custom_key_filter_info, "Parallel replicas custom key filter"); + if (auto parallel_replicas_custom_key_filter_info= buildCustomKeyFilterIfNeeded(storage, table_expression_query_info, planner_context)) + add_filter(*parallel_replicas_custom_key_filter_info, "Parallel replicas custom key filter"); } else if (auto * distributed = typeid_cast(storage.get()); distributed && query_context->canUseParallelReplicasCustomKey(*distributed->getCluster())) @@ -850,9 +848,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres } const auto & table_expression_alias = table_expression->getOriginalAlias(); - auto additional_filters_info - = buildAdditionalFiltersIfNeeded(storage, table_expression_alias, table_expression_query_info, planner_context); - add_filter(additional_filters_info, "additional filter"); + if (auto additional_filters_info = buildAdditionalFiltersIfNeeded(storage, table_expression_alias, table_expression_query_info, planner_context)) + add_filter(*additional_filters_info, "additional filter"); from_stage = storage->getQueryProcessingStage( query_context, select_query_options.to_stage, storage_snapshot, table_expression_query_info); @@ -967,11 +964,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres for (auto && [filter_info, description] : where_filters) { if (query_plan.isInitialized() && - from_stage == QueryProcessingStage::FetchColumns && - filter_info.actions) + from_stage == QueryProcessingStage::FetchColumns) { auto filter_step = std::make_unique(query_plan.getCurrentDataStream(), - std::move(*filter_info.actions), + std::move(filter_info.actions), filter_info.column_name, filter_info.do_remove_column); filter_step->setStepDescription(description); diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 57f79a2cd7f..4164608b4b5 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -340,10 +340,8 @@ std::string FilterDAGInfo::dump() const WriteBufferFromOwnString ss; ss << "FilterDAGInfo for column '" << column_name <<"', do_remove_column " << do_remove_column << "\n"; - if (actions) - { - ss << "actions " << actions->dumpDAG() << "\n"; - } + + ss << "actions " << actions.dumpDAG() << "\n"; return ss.str(); } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 334c8c9c5ac..88fb52a94f2 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -7058,7 +7058,7 @@ ActionDAGNodes MergeTreeData::getFiltersForPrimaryKeyAnalysis(const InterpreterS ActionDAGNodes filter_nodes; if (auto additional_filter_info = select.getAdditionalQueryInfo()) - filter_nodes.nodes.push_back(&additional_filter_info->actions->findInOutputs(additional_filter_info->column_name)); + filter_nodes.nodes.push_back(&additional_filter_info->actions.findInOutputs(additional_filter_info->column_name)); if (before_where) filter_nodes.nodes.push_back(&before_where->dag.findInOutputs(where_column_name)); diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 5276870c037..97b36115dfd 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -93,7 +93,7 @@ struct FilterInfo /// Same as FilterInfo, but with ActionsDAG. struct FilterDAGInfo { - std::optional actions; + ActionsDAG actions; String column_name; bool do_remove_column = false; diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 374abd0b0a5..18e194491b8 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -407,7 +407,7 @@ void ReadFromMerge::addFilter(FilterDAGInfo filter) { output_stream->header = FilterTransform::transformHeader( output_stream->header, - filter.actions ? &*filter.actions : nullptr, + &filter.actions, filter.column_name, filter.do_remove_column); pushed_down_filters.push_back(std::move(filter)); @@ -662,7 +662,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ { auto filter_step = std::make_unique( child.plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&*filter_info.actions)), + std::move(*ActionsDAG::clone(&filter_info.actions)), filter_info.column_name, filter_info.do_remove_column); @@ -1565,7 +1565,7 @@ bool ReadFromMerge::requestReadingInOrder(InputOrderInfoPtr order_info_) void ReadFromMerge::applyFilters(ActionDAGNodes added_filter_nodes) { for (const auto & filter_info : pushed_down_filters) - added_filter_nodes.nodes.push_back(&filter_info.actions->findInOutputs(filter_info.column_name)); + added_filter_nodes.nodes.push_back(&filter_info.actions.findInOutputs(filter_info.column_name)); SourceStepWithFilter::applyFilters(added_filter_nodes); From fb7cf4ab93c991b3e2cd8a3e3e1c6cecf574b936 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Jul 2024 17:46:03 +0000 Subject: [PATCH 0352/2361] Better. --- src/Interpreters/ActionsDAG.cpp | 16 +++++++++++----- src/Interpreters/ActionsDAG.h | 4 +++- src/Interpreters/ExpressionActions.cpp | 2 +- src/Planner/CollectTableExpressionData.cpp | 10 +++++----- src/Planner/Planner.cpp | 4 ++-- src/Planner/PlannerJoinTree.cpp | 7 ++++--- src/Planner/TableExpressionData.h | 18 +++++++++--------- 7 files changed, 35 insertions(+), 26 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 4401c83549f..4f03a9e1602 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1251,14 +1251,20 @@ ActionsDAGPtr ActionsDAG::clone(const ActionsDAG * from) std::unordered_map old_to_new_nodes; if (from == nullptr) return nullptr; - return std::make_unique(ActionsDAG::clone(*from, old_to_new_nodes)); + return std::make_unique(from->clone(old_to_new_nodes)); } -ActionsDAG ActionsDAG::clone(const ActionsDAG & from, std::unordered_map & old_to_new_nodes) +ActionsDAG ActionsDAG::clone() const +{ + std::unordered_map old_to_new_nodes; + return clone(old_to_new_nodes); +} + +ActionsDAG ActionsDAG::clone(std::unordered_map & old_to_new_nodes) const { ActionsDAG actions; - for (const auto & node : from.nodes) + for (const auto & node : nodes) { auto & copy_node = actions.nodes.emplace_back(node); old_to_new_nodes[&node] = ©_node; @@ -1268,10 +1274,10 @@ ActionsDAG ActionsDAG::clone(const ActionsDAG & from, std::unordered_map & old_to_new_nodes); + + ActionsDAG clone(std::unordered_map & old_to_new_nodes) const; + ActionsDAG clone() const; static ActionsDAGPtr cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases); diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index dd1d2eb703e..113410b1480 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -75,7 +75,7 @@ ExpressionActionsPtr ExpressionActions::clone() const auto copy = std::make_shared(ExpressionActions()); std::unordered_map copy_map; - copy->actions_dag = ActionsDAG::clone(actions_dag, copy_map); + copy->actions_dag = actions_dag.clone(copy_map); copy->actions = actions; for (auto & action : copy->actions) action.node = copy_map[action.node]; diff --git a/src/Planner/CollectTableExpressionData.cpp b/src/Planner/CollectTableExpressionData.cpp index 162d3fe8d11..1d85476636c 100644 --- a/src/Planner/CollectTableExpressionData.cpp +++ b/src/Planner/CollectTableExpressionData.cpp @@ -335,22 +335,22 @@ void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContextPtr collect_source_columns_visitor.setKeepAliasColumns(false); collect_source_columns_visitor.visit(query_node_typed.getPrewhere()); - auto prewhere_actions_dag = std::make_unique(); + ActionsDAG prewhere_actions_dag; QueryTreeNodePtr query_tree_node = query_node_typed.getPrewhere(); PlannerActionsVisitor visitor(planner_context, false /*use_column_identifier_as_action_node_name*/); - auto expression_nodes = visitor.visit(*prewhere_actions_dag, query_tree_node); + auto expression_nodes = visitor.visit(prewhere_actions_dag, query_tree_node); if (expression_nodes.size() != 1) throw Exception(ErrorCodes::ILLEGAL_PREWHERE, "Invalid PREWHERE. Expected single boolean expression. In query {}", query_node->formatASTForErrorMessage()); - prewhere_actions_dag->getOutputs().push_back(expression_nodes.back()); + prewhere_actions_dag.getOutputs().push_back(expression_nodes.back()); - for (const auto & prewhere_input_node : prewhere_actions_dag->getInputs()) + for (const auto & prewhere_input_node : prewhere_actions_dag.getInputs()) if (required_column_names_without_prewhere.contains(prewhere_input_node->result_name)) - prewhere_actions_dag->getOutputs().push_back(prewhere_input_node); + prewhere_actions_dag.getOutputs().push_back(prewhere_input_node); table_expression_data.setPrewhereFilterActions(std::move(prewhere_actions_dag)); } diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index ffed19185d3..9042303d0e4 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1412,7 +1412,7 @@ void Planner::buildPlanForQueryNode() checkStoragesSupportTransactions(planner_context); const auto & table_filters = planner_context->getGlobalPlannerContext()->filters_for_table_expressions; - if (!select_query_options.only_analyze && !table_filters.empty()) // && top_level) + if (!select_query_options.only_analyze && !table_filters.empty()) { for (auto & [table_node, table_expression_data] : planner_context->getTableExpressionNodeToData()) { @@ -1420,7 +1420,7 @@ void Planner::buildPlanForQueryNode() if (it != table_filters.end()) { const auto & filters = it->second; - table_expression_data.setFilterActions(ActionsDAG::clone(&*filters.filter_actions)); + table_expression_data.setFilterActions(filters.filter_actions->clone()); table_expression_data.setPrewhereInfo(filters.prewhere_info); } } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 3217d3461d3..d55e5e99f71 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -646,7 +646,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto table_expression_query_info = select_query_info; table_expression_query_info.table_expression = table_expression; - table_expression_query_info.filter_actions_dag = ActionsDAG::clone(table_expression_data.getFilterActions()); + if (const auto & filter_actions = table_expression_data.getFilterActions()) + table_expression_query_info.filter_actions_dag = std::make_shared(filter_actions->clone()); table_expression_query_info.analyzer_can_use_parallel_replicas_on_follower = table_node == planner_context->getGlobalPlannerContext()->parallel_replicas_table; size_t max_streams = settings.max_threads; @@ -776,7 +777,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (prewhere_actions) { prewhere_info = std::make_shared(); - prewhere_info->prewhere_actions = std::move(*ActionsDAG::clone(prewhere_actions)); + prewhere_info->prewhere_actions = prewhere_actions->clone(); prewhere_info->prewhere_column_name = prewhere_actions->getOutputs().at(0)->result_name; prewhere_info->remove_prewhere_column = true; prewhere_info->need_filter = true; @@ -829,7 +830,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres = buildRowPolicyFilterIfNeeded(storage, table_expression_query_info, planner_context, used_row_policies); if (row_policy_filter_info) { - table_expression_data.setRowLevelFilterActions(ActionsDAG::clone(&row_policy_filter_info->actions)); + table_expression_data.setRowLevelFilterActions(row_policy_filter_info->actions.clone()); add_filter(*row_policy_filter_info, "Row-level security filter"); } diff --git a/src/Planner/TableExpressionData.h b/src/Planner/TableExpressionData.h index 9723a00a356..1d04fac3dc3 100644 --- a/src/Planner/TableExpressionData.h +++ b/src/Planner/TableExpressionData.h @@ -211,32 +211,32 @@ public: is_merge_tree = is_merge_tree_value; } - const ActionsDAGPtr & getPrewhereFilterActions() const + const std::optional & getPrewhereFilterActions() const { return prewhere_filter_actions; } - void setRowLevelFilterActions(ActionsDAGPtr row_level_filter_actions_value) + void setRowLevelFilterActions(ActionsDAG row_level_filter_actions_value) { row_level_filter_actions = std::move(row_level_filter_actions_value); } - const ActionsDAGPtr & getRowLevelFilterActions() const + const std::optional & getRowLevelFilterActions() const { return row_level_filter_actions; } - void setPrewhereFilterActions(ActionsDAGPtr prewhere_filter_actions_value) + void setPrewhereFilterActions(ActionsDAG prewhere_filter_actions_value) { prewhere_filter_actions = std::move(prewhere_filter_actions_value); } - const ActionsDAGPtr & getFilterActions() const + const std::optional & getFilterActions() const { return filter_actions; } - void setFilterActions(ActionsDAGPtr filter_actions_value) + void setFilterActions(ActionsDAG filter_actions_value) { filter_actions = std::move(filter_actions_value); } @@ -289,16 +289,16 @@ private: ColumnIdentifierToColumnName column_identifier_to_column_name; /// Valid for table, table function - ActionsDAGPtr filter_actions; + std::optional filter_actions; /// Valid for table, table function PrewhereInfoPtr prewhere_info; /// Valid for table, table function - ActionsDAGPtr prewhere_filter_actions; + std::optional prewhere_filter_actions; /// Valid for table, table function - ActionsDAGPtr row_level_filter_actions; + std::optional row_level_filter_actions; /// Is storage remote bool is_remote = false; From b77cfc2724f551cc359e7fbccf3a074c7510cdd0 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 12 Jul 2024 18:47:24 +0000 Subject: [PATCH 0353/2361] Fix compound identifier parser --- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 1 - src/Parsers/ExpressionElementParsers.cpp | 24 ++++++++++++++++++++---- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 4bbe034cdc0..db2c8572266 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1111,7 +1111,7 @@ class IColumn; M(Bool, schema_inference_make_columns_nullable, true, "If set to true, all inferred types will be Nullable in schema inference for formats without information about nullability.", 0) \ M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \ M(Bool, input_format_json_read_bools_as_strings, true, "Allow to parse bools as strings in JSON input formats", 0) \ - M(Bool, input_format_json_try_infer_numbers_from_strings, true, "Try to infer numbers from string fields while schema inference", 0) \ + M(Bool, input_format_json_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference", 0) \ M(Bool, input_format_json_validate_types_from_metadata, true, "For JSON/JSONCompact/JSONColumnsWithMetadata input formats this controls whether format parser should check if data types from input metadata match data types of the corresponding columns from the table", 0) \ M(Bool, input_format_json_read_numbers_as_strings, true, "Allow to parse numbers as strings in JSON input formats", 0) \ M(Bool, input_format_json_read_objects_as_strings, true, "Allow to parse JSON objects as strings in JSON input formats", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index b5a48c3bf7e..8e42c3a3ea8 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -81,7 +81,6 @@ static std::initializer_listparse(pos, element, expected)) - return false; + { + if (is_first) + return false; + pos = begin; + break; + } + + is_first = false; + if (last_special_delimiter != SpecialDelimiter::NONE) parts.push_back(static_cast(last_special_delimiter) + backQuote(getIdentifierName(element))); else @@ -309,7 +318,8 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex if (parts.back().empty()) params.push_back(element->as()->getParam()); - parsed_delimiter = false; + begin = pos; + bool parsed_delimiter = false; for (const auto & [parser, special_delimiter] : delimiter_parsers) { if (parser->check(pos, expected)) @@ -319,6 +329,12 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex break; } } + + if (!parsed_delimiter) + { + pos = begin; + break; + } } ParserKeyword s_uuid(Keyword::UUID); From 11d56a08cd82aae5b37f30fdbf5d218ef8446a63 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 12 Jul 2024 20:16:47 +0000 Subject: [PATCH 0354/2361] Fix clickhouse-format --- docs/en/sql-reference/data-types/json.md | 2 +- programs/format/Format.cpp | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index d35a3f90ef3..23bb57e5a6c 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -4,7 +4,7 @@ sidebar_position: 63 sidebar_label: JSON --- -# JSON` +# JSON Stores JavaScript Object Notation (JSON) documents in a single column. diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index 1b91e7ceaf3..a434c9171e9 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -175,6 +175,11 @@ int mainEntryClickHouseFormat(int argc, char ** argv) hash_func.update(options["seed"].as()); } + SharedContextHolder shared_context = Context::createShared(); + auto context = Context::createGlobal(shared_context.get()); + auto context_const = WithContext(context).getContext(); + context->makeGlobalContext(); + registerInterpreters(); registerFunctions(); registerAggregateFunctions(); From 201f813516e1283a4d0528bf71753e8291526ccf Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 13 Jul 2024 02:37:09 +0000 Subject: [PATCH 0355/2361] add prep for rebuild --- .../MergeTree/MergeMutateSelectedEntry.h | 5 +- .../MergeTree/MergeTreeMutationEntry.cpp | 4 +- .../MergeTree/MergeTreeMutationEntry.h | 6 ++- .../MergeTree/MutatePlainMergeTreeTask.cpp | 2 + src/Storages/MergeTree/MutateTask.cpp | 52 +++++++++++++------ src/Storages/StorageMergeTree.cpp | 11 +++- 6 files changed, 60 insertions(+), 20 deletions(-) diff --git a/src/Storages/MergeTree/MergeMutateSelectedEntry.h b/src/Storages/MergeTree/MergeMutateSelectedEntry.h index c420cbca12b..116c7d26552 100644 --- a/src/Storages/MergeTree/MergeMutateSelectedEntry.h +++ b/src/Storages/MergeTree/MergeMutateSelectedEntry.h @@ -40,12 +40,15 @@ struct MergeMutateSelectedEntry CurrentlyMergingPartsTaggerPtr tagger; MutationCommandsConstPtr commands; MergeTreeTransactionPtr txn; + Field lightweight_delete_projection_mode; MergeMutateSelectedEntry(FutureMergedMutatedPartPtr future_part_, CurrentlyMergingPartsTaggerPtr tagger_, - MutationCommandsConstPtr commands_, const MergeTreeTransactionPtr & txn_ = NO_TRANSACTION_PTR) + MutationCommandsConstPtr commands_, const MergeTreeTransactionPtr & txn_ = NO_TRANSACTION_PTR, + const Field & lightweight_delete_projection_mode_ = LightweightMutationProjectionMode::THROW) : future_part(future_part_) , tagger(std::move(tagger_)) , commands(commands_) , txn(txn_) + , lightweight_delete_projection_mode(lightweight_delete_projection_mode_) {} }; diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp index 4dbccb91620..06f4875d120 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp @@ -48,7 +48,8 @@ UInt64 MergeTreeMutationEntry::parseFileName(const String & file_name_) } MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk_, const String & path_prefix_, UInt64 tmp_number, - const TransactionID & tid_, const WriteSettings & settings) + const TransactionID & tid_, const WriteSettings & settings, + const Field & lightweight_delete_projection_mode_) : create_time(time(nullptr)) , commands(std::move(commands_)) , disk(std::move(disk_)) @@ -56,6 +57,7 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskP , file_name("tmp_mutation_" + toString(tmp_number) + ".txt") , is_temp(true) , tid(tid_) + , lightweight_delete_projection_mode(lightweight_delete_projection_mode_) { try { diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.h b/src/Storages/MergeTree/MergeTreeMutationEntry.h index 04297f2852a..cbc7e2d4274 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.h +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.h @@ -36,9 +36,13 @@ struct MergeTreeMutationEntry /// or UnknownCSN if it's not committed (yet) or RolledBackCSN if it's rolled back or PrehistoricCSN if there is no transaction. CSN csn = Tx::UnknownCSN; + /// From query context. + Field lightweight_delete_projection_mode; + /// Create a new entry and write it to a temporary file. MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk, const String & path_prefix_, UInt64 tmp_number, - const TransactionID & tid_, const WriteSettings & settings); + const TransactionID & tid_, const WriteSettings & settings, + const Field & lightweight_delete_projection_mode_); MergeTreeMutationEntry(const MergeTreeMutationEntry &) = delete; MergeTreeMutationEntry(MergeTreeMutationEntry &&) = default; diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 20f387137e7..1bf337973ff 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -140,6 +140,8 @@ ContextMutablePtr MutatePlainMergeTreeTask::createTaskContext() const auto queryId = getQueryId(); context->setCurrentQueryId(queryId); context->setBackgroundOperationTypeForContext(ClientInfo::BackgroundOperationType::MUTATION); + if (merge_mutate_entry) + context->setSetting("lightweight_mutation_projection_mode", merge_mutate_entry->lightweight_delete_projection_mode); return context; } diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 2adcb49d6a3..ed603abd9c3 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -656,7 +656,9 @@ static NameSet collectFilesToSkip( const std::set & indices_to_recalc, const String & mrk_extension, const std::set & projections_to_recalc, - const std::set & stats_to_recalc) + const std::set & stats_to_recalc, + const StorageMetadataPtr & metadata_snapshot, + bool lightweight_delete_mode) { NameSet files_to_skip = source_part->getFileNamesWithoutChecksums(); @@ -680,8 +682,16 @@ static NameSet collectFilesToSkip( } } - for (const auto & projection : projections_to_recalc) - files_to_skip.insert(projection->getDirectoryName()); + if (lightweight_delete_mode) + { + for (const auto & projection : metadata_snapshot->getProjections()) + files_to_skip.insert(projection.getDirectoryName()); + } + else + { + for (const auto & projection : projections_to_recalc) + files_to_skip.insert(projection->getDirectoryName()); + } for (const auto & stat : stats_to_recalc) files_to_skip.insert(stat->getFileName() + STATS_FILE_SUFFIX); @@ -1042,8 +1052,6 @@ struct MutationContext /// Whether we need to count lightweight delete rows in this mutation bool count_lightweight_deleted_rows; - - bool lightweight_delete_mode; }; using MutationContextPtr = std::shared_ptr; @@ -1573,7 +1581,7 @@ private: } else { - if (!ctx->lightweight_delete_mode && ctx->source_part->checksums.has(projection.getDirectoryName())) + if (!ctx->updated_header.has(RowExistsColumn::name) && ctx->source_part->checksums.has(projection.getDirectoryName())) entries_to_hardlink.insert(projection.getDirectoryName()); } } @@ -1843,8 +1851,7 @@ private: hardlinked_files.insert(it->name()); } } - /// Ignore projection tmp merge dir, and under lightweight delete mode ignore projection files. - else if (!endsWith(it->name(), ".tmp_proj") && !ctx->lightweight_delete_mode) + else if (!endsWith(it->name(), ".tmp_proj")) // ignore projection tmp merge dir { // it's a projection part directory ctx->new_data_part->getDataPartStorage().createProjection(destination); @@ -2193,6 +2200,7 @@ bool MutateTask::prepare() context_for_reading->setSetting("allow_asynchronous_read_from_io_pool_for_merge_tree", false); context_for_reading->setSetting("max_streams_for_merge_tree_reading", Field(0)); context_for_reading->setSetting("read_from_filesystem_cache_if_exists_otherwise_bypass_cache", 1); + context_for_reading->setSetting("lightweight_mutation_projection_mode", Field(ctx->context->getSettingsRef().lightweight_mutation_projection_mode)); MutationHelpers::splitAndModifyMutationCommands( ctx->source_part, ctx->metadata_snapshot, @@ -2217,6 +2225,15 @@ bool MutateTask::prepare() ctx->mutating_pipeline_builder = ctx->interpreter->execute(); ctx->updated_header = ctx->interpreter->getUpdatedHeader(); ctx->progress_callback = MergeProgressCallback((*ctx->mutate_entry)->ptr(), ctx->watch_prev_elapsed, *ctx->stage_progress); + + // ctx->updated_header.has(RowExistsColumn::name); + // for (const auto & projection : ctx->metadata_snapshot->getProjections()) + // { + // if (!ctx->source_part->hasProjection(projection.name)) + // continue; + + // ctx->materialized_projections.insert(projection.name); + // } } auto single_disk_volume = std::make_shared("volume_" + ctx->future_part->name, ctx->space_reservation->getDisk(), 0); @@ -2258,8 +2275,8 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); - ctx->lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); - if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && ctx->lightweight_delete_mode) + bool lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); + if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && lightweight_delete_mode) { /// This mutation contains lightweight delete and we need to count the deleted rows, /// Reset existing_rows_count of new data part to 0 and it will be updated while writing _row_exists column @@ -2296,10 +2313,13 @@ bool MutateTask::prepare() ctx->context, ctx->materialized_indices); - ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( - ctx->source_part, - ctx->metadata_snapshot, - ctx->materialized_projections); + if (!lightweight_delete_mode) + { + ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( + ctx->source_part, + ctx->metadata_snapshot, + ctx->materialized_projections); + } ctx->stats_to_recalc = MutationHelpers::getStatisticsToRecalculate(ctx->metadata_snapshot, ctx->materialized_statistics); @@ -2310,7 +2330,9 @@ bool MutateTask::prepare() ctx->indices_to_recalc, ctx->mrk_extension, ctx->projections_to_recalc, - ctx->stats_to_recalc); + ctx->stats_to_recalc, + ctx->metadata_snapshot, + lightweight_delete_mode); ctx->files_to_rename = MutationHelpers::collectFilesForRenames( ctx->source_part, diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 611289ffd78..063e3b7f064 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -517,7 +517,8 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context { std::lock_guard lock(currently_processing_in_background_mutex); - MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings()); + MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings(), + Field(query_context->getSettingsRef().lightweight_mutation_projection_mode)); version = increment.get(); entry.commit(version); String mutation_id = entry.file_name; @@ -1282,12 +1283,18 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( auto commands = std::make_shared(); size_t current_ast_elements = 0; auto last_mutation_to_apply = mutations_end_it; + + /// Trying to grab it from query context. + Field lightweight_delete_projection_mode = LightweightMutationProjectionMode::THROW; + for (auto it = mutations_begin_it; it != mutations_end_it; ++it) { /// Do not squash mutations from different transactions to be able to commit/rollback them independently. if (first_mutation_tid != it->second.tid) break; + lightweight_delete_projection_mode = it->second.lightweight_delete_projection_mode; + size_t commands_size = 0; MutationCommands commands_for_size_validation; for (const auto & command : it->second.commands) @@ -1364,7 +1371,7 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( future_part->part_format = part->getFormat(); tagger = std::make_unique(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace({part}, false), *this, metadata_snapshot, true); - return std::make_shared(future_part, std::move(tagger), commands, txn); + return std::make_shared(future_part, std::move(tagger), commands, txn, lightweight_delete_projection_mode); } } From 264d7d760fedd3fc3c900d13ee1f7976887efaa7 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 13 Jul 2024 15:52:23 +0000 Subject: [PATCH 0356/2361] Bump rocksdb to 7.0.4 --- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index be366233921..4fc59e24001 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit be366233921293bd07a84dc4ea6991858665f202 +Subproject commit 4fc59e240016a62180b09703e2938c3d7e928de0 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 3a14407166c..f6479346063 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -59,10 +59,8 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64") # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") endif() -set (HAVE_THREAD_LOCAL 1) -if(HAVE_THREAD_LOCAL) - add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL) -endif() +# thread_local is part of C++11 and later (TODO: clean up this define) +add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL) if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_definitions(-DOS_MACOSX) @@ -182,7 +180,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/env/env.cc ${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc ${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc - ${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc ${ROCKSDB_SOURCE_DIR}/env/file_system.cc ${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc ${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc @@ -311,7 +308,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc ${ROCKSDB_SOURCE_DIR}/util/random.cc ${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc - ${ROCKSDB_SOURCE_DIR}/util/regex.cc ${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc ${ROCKSDB_SOURCE_DIR}/util/slice.cc ${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc @@ -335,6 +331,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc ${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc ${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc + ${ROCKSDB_SOURCE_DIR}/utilities/counted_fs.cc ${ROCKSDB_SOURCE_DIR}/utilities/debug.cc ${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc ${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc From 0fc14520c821f22b493d32657fede6be10832d60 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 13 Jul 2024 23:06:37 +0000 Subject: [PATCH 0357/2361] add server termination on exit --- programs/server/fuzzers/tcp_protocol_fuzzer.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/programs/server/fuzzers/tcp_protocol_fuzzer.cpp b/programs/server/fuzzers/tcp_protocol_fuzzer.cpp index 950ea09669a..7cebdc2ad65 100644 --- a/programs/server/fuzzers/tcp_protocol_fuzzer.cpp +++ b/programs/server/fuzzers/tcp_protocol_fuzzer.cpp @@ -10,6 +10,7 @@ #include #include +#include #include @@ -25,6 +26,12 @@ static int64_t port = 9000; using namespace std::chrono_literals; +void on_exit() +{ + BaseDaemon::terminate(); + main_app.wait(); +} + extern "C" int LLVMFuzzerInitialize(int * argc, char ***argv) { @@ -60,6 +67,8 @@ int LLVMFuzzerInitialize(int * argc, char ***argv) exit(-1); } + atexit(on_exit); + return 0; } From 3ccc2aed4c76eba20e0fc88768412bbfacafbb95 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 13 Jul 2024 23:44:13 +0000 Subject: [PATCH 0358/2361] add fuzzer_arguments to fuzzer runner --- docker/test/libfuzzer/run_libfuzzer.py | 7 +++++++ tests/fuzz/tcp_protocol_fuzzer.options | 4 ++++ 2 files changed, 11 insertions(+) create mode 100644 tests/fuzz/tcp_protocol_fuzzer.options diff --git a/docker/test/libfuzzer/run_libfuzzer.py b/docker/test/libfuzzer/run_libfuzzer.py index 5ed019490d5..cdd09dfa3be 100755 --- a/docker/test/libfuzzer/run_libfuzzer.py +++ b/docker/test/libfuzzer/run_libfuzzer.py @@ -20,6 +20,7 @@ def run_fuzzer(fuzzer: str): options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" + fuzzer_arguments = "" with Path(options_file) as path: if path.exists() and path.is_file(): @@ -47,6 +48,12 @@ def run_fuzzer(fuzzer: str): for key, value in parser["libfuzzer"].items() ) + if parser.has_section("fuzzer_arguments"): + fuzzer_arguments = " ".join( + ("%s" % key) if value == "" else ("%s=%s" % (key, value)) + for key, value in parser["fuzzer_arguments"].items() + ) + cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {corpus_dir}" if custom_libfuzzer_options: cmd_line += f" {custom_libfuzzer_options}" diff --git a/tests/fuzz/tcp_protocol_fuzzer.options b/tests/fuzz/tcp_protocol_fuzzer.options new file mode 100644 index 00000000000..4885669d91d --- /dev/null +++ b/tests/fuzz/tcp_protocol_fuzzer.options @@ -0,0 +1,4 @@ +[fuzzer_arguments] +--log-file=tcp_protocol_fuzzer.log +--= +--logging.terminal=0 From fee7e22c1f60feb0a4c176355453caad18cd5bc1 Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Sun, 14 Jul 2024 15:23:34 +0330 Subject: [PATCH 0359/2361] Changed the error code --- src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp index 4388864434e..87a44db573d 100644 --- a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp +++ b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp @@ -36,7 +36,7 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int BAD_ARGUMENTS; - extern const int QUERY_NOT_ALLOWED; + extern const int BAD_QUERY_PARAMETER; } namespace @@ -150,7 +150,7 @@ StorageObjectStorageQueue::StorageObjectStorageQueue( } else if (!configuration->isPathWithGlobs()) { - throw Exception(ErrorCodes::QUERY_NOT_ALLOWED, "ObjectStorageQueue url must either end with '/' or contain globs"); + throw Exception(ErrorCodes::BAD_QUERY_PARAMETER, "ObjectStorageQueue url must either end with '/' or contain globs"); } checkAndAdjustSettings(*queue_settings, engine_args, mode > LoadingStrictnessLevel::CREATE, log); From b5cb264b017e965037dbb0bd4623df5f5a65ec0b Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 13 Jul 2024 15:41:30 +0000 Subject: [PATCH 0360/2361] Bump ICU to 71 --- contrib/icu | 2 +- contrib/icu-cmake/CMakeLists.txt | 10 +++++----- contrib/icudata | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/contrib/icu b/contrib/icu index a56dde820dc..c205e7ee49a 160000 --- a/contrib/icu +++ b/contrib/icu @@ -1 +1 @@ -Subproject commit a56dde820dc35665a66f2e9ee8ba58e75049b668 +Subproject commit c205e7ee49a7086a28b9c275fcfdac9ca3dc815d diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index 0a650f2bcc0..f23b0002b8d 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -4,7 +4,9 @@ else () option(ENABLE_ICU "Enable ICU" 0) endif () -if (NOT ENABLE_ICU) +# Temporarily disabled s390x because the ICU build links a blob (icudt71b_dat.S) and our friends from IBM did not explain how they re-generated +# the blob on s390x: https://github.com/ClickHouse/icudata/pull/2#issuecomment-2226957255 +if (NOT ENABLE_ICU OR ARCH_S390X) message(STATUS "Not using ICU") return() endif() @@ -12,8 +14,6 @@ endif() set(ICU_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icu/icu4c/source") set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/") -set (CMAKE_CXX_STANDARD 17) - # These lists of sources were generated from build log of the original ICU build system (configure + make). set(ICUUC_SOURCES @@ -462,9 +462,9 @@ file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") enable_language(ASM) if (ARCH_S390X) - set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70b_dat.S" ) + set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt71b_dat.S" ) else() - set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70l_dat.S" ) + set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt71l_dat.S" ) endif() set(ICUDATA_SOURCES diff --git a/contrib/icudata b/contrib/icudata index c8e717892a5..e7488edd1f1 160000 --- a/contrib/icudata +++ b/contrib/icudata @@ -1 +1 @@ -Subproject commit c8e717892a557b4d2852317c7d628aacc0a0e5ab +Subproject commit e7488edd1f141b0664553a985a6fcd0125279527 From 5b6956ea234962cca5414a3fb0a6191407b4305a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 13 Jul 2024 16:48:09 +0000 Subject: [PATCH 0361/2361] Bump rocksdb to v7.10.2 --- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 53 +++++++++++++------------- contrib/rocksdb-cmake/build_version.cc | 31 +++++++++++---- 3 files changed, 51 insertions(+), 35 deletions(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index 4fc59e24001..01e43568fa9 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 4fc59e240016a62180b09703e2938c3d7e928de0 +Subproject commit 01e43568fa9f3f7bf107b2b66c00b286b456f33e diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index f6479346063..98790158baa 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -5,6 +5,9 @@ if (NOT ENABLE_ROCKSDB) return() endif() +# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default +set (CMAKE_CXX_STANDARD 20) + # Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs option(WITH_JEMALLOC "build with JeMalloc" OFF) @@ -16,14 +19,6 @@ option(WITH_LZ4 "build with lz4" ON) option(WITH_ZLIB "build with zlib" ON) option(WITH_ZSTD "build with zstd" ON) -# third-party/folly is only validated to work on Linux and Windows for now. -# So only turn it on there by default. -if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows") - option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON) -else() - option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF) -endif() - if(WITH_SNAPPY) add_definitions(-DSNAPPY) list(APPEND THIRDPARTY_LIBS ch_contrib::snappy) @@ -44,7 +39,7 @@ if(WITH_ZSTD) list(APPEND THIRDPARTY_LIBS ch_contrib::zstd) endif() -option(PORTABLE "build a portable binary" ON) +add_definitions(-DROCKSDB_PORTABLE) if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ) add_definitions(-DHAVE_SSE42) @@ -59,9 +54,6 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64") # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function") endif() -# thread_local is part of C++11 and later (TODO: clean up this define) -add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL) - if(CMAKE_SYSTEM_NAME MATCHES "Darwin") add_definitions(-DOS_MACOSX) elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") @@ -87,19 +79,21 @@ set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb") include_directories(${ROCKSDB_SOURCE_DIR}) include_directories("${ROCKSDB_SOURCE_DIR}/include") -if(WITH_FOLLY_DISTRIBUTED_MUTEX) - include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly") -endif() set(SOURCES ${ROCKSDB_SOURCE_DIR}/cache/cache.cc ${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc ${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc + ${ROCKSDB_SOURCE_DIR}/cache/cache_helpers.cc ${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc + ${ROCKSDB_SOURCE_DIR}/cache/charged_cache.cc ${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc + ${ROCKSDB_SOURCE_DIR}/cache/compressed_secondary_cache.cc ${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc + ${ROCKSDB_SOURCE_DIR}/cache/secondary_cache.cc ${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc ${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc + ${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc @@ -111,6 +105,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc + ${ROCKSDB_SOURCE_DIR}/db/blob/blob_source.cc ${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc ${ROCKSDB_SOURCE_DIR}/db/builder.cc ${ROCKSDB_SOURCE_DIR}/db/c.cc @@ -122,7 +117,11 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc + ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_service_job.cc + ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_state.cc + ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_outputs.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc + ${ROCKSDB_SOURCE_DIR}/db/compaction/subcompaction_state.cc ${ROCKSDB_SOURCE_DIR}/db/convenience.cc ${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc @@ -157,10 +156,11 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc ${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc ${ROCKSDB_SOURCE_DIR}/db/output_validator.cc - ${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc + ${ROCKSDB_SOURCE_DIR}/db/periodic_task_scheduler.cc ${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc ${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc ${ROCKSDB_SOURCE_DIR}/db/repair.cc + ${ROCKSDB_SOURCE_DIR}/db/seqno_to_time_mapping.cc ${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc ${ROCKSDB_SOURCE_DIR}/db/table_cache.cc ${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc @@ -172,6 +172,8 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/version_set.cc ${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc ${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc + ${ROCKSDB_SOURCE_DIR}/db/wide/wide_column_serialization.cc + ${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns.cc ${ROCKSDB_SOURCE_DIR}/db/write_batch.cc ${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc ${ROCKSDB_SOURCE_DIR}/db/write_controller.cc @@ -230,16 +232,17 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/options/options.cc ${ROCKSDB_SOURCE_DIR}/options/options_helper.cc ${ROCKSDB_SOURCE_DIR}/options/options_parser.cc + ${ROCKSDB_SOURCE_DIR}/port/mmap.cc ${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc ${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc - ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc + ${ROCKSDB_SOURCE_DIR}/table/block_based/block_cache.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc ${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc @@ -297,9 +300,12 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc ${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc ${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc + ${ROCKSDB_SOURCE_DIR}/util/async_file_reader.cc + ${ROCKSDB_SOURCE_DIR}/util/cleanable.cc ${ROCKSDB_SOURCE_DIR}/util/coding.cc ${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc ${ROCKSDB_SOURCE_DIR}/util/comparator.cc + ${ROCKSDB_SOURCE_DIR}/util/compression.cc ${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc ${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc ${ROCKSDB_SOURCE_DIR}/util/crc32c.cc @@ -312,11 +318,13 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/util/slice.cc ${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc ${ROCKSDB_SOURCE_DIR}/util/status.cc + ${ROCKSDB_SOURCE_DIR}/util/stderr_logger.cc ${ROCKSDB_SOURCE_DIR}/util/string_util.cc ${ROCKSDB_SOURCE_DIR}/util/thread_local.cc ${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc ${ROCKSDB_SOURCE_DIR}/util/xxhash.cc - ${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc + ${ROCKSDB_SOURCE_DIR}/utilities/agg_merge/agg_merge.cc + ${ROCKSDB_SOURCE_DIR}/utilities/backup/backup_engine.cc ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc ${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc @@ -419,15 +427,6 @@ list(APPEND SOURCES "${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc" "${ROCKSDB_SOURCE_DIR}/env/io_posix.cc") -if(WITH_FOLLY_DISTRIBUTED_MUTEX) - list(APPEND SOURCES - "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp" - "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp" - "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp" - "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp" - "${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp") -endif() - add_library(_rocksdb ${SOURCES}) add_library(ch_contrib::rocksdb ALIAS _rocksdb) target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) diff --git a/contrib/rocksdb-cmake/build_version.cc b/contrib/rocksdb-cmake/build_version.cc index f9639da516f..d5ea56673e0 100644 --- a/contrib/rocksdb-cmake/build_version.cc +++ b/contrib/rocksdb-cmake/build_version.cc @@ -1,16 +1,33 @@ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -/// This file was edited for ClickHouse. #include #include "rocksdb/version.h" +#include "rocksdb/utilities/object_registry.h" #include "util/string_util.h" // The build script may replace these values with real values based // on whether or not GIT is available and the platform settings -static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:0"; -static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:master"; -static const std::string rocksdb_build_date = "rocksdb_build_date:2000-01-01"; +static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:72438a678872544809393b831c7273794c074215"; +static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:main"; +#define HAS_GIT_CHANGES 0 +#if HAS_GIT_CHANGES == 0 +// If HAS_GIT_CHANGES is 0, the GIT date is used. +// Use the time the branch/tag was last modified +static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-12 16:01:57"; +#else +// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications. +// Use the time the build was created. +static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-13 17:15:50"; +#endif + +extern "C" { + +} // extern "C" + +std::unordered_map ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = { + +}; namespace ROCKSDB_NAMESPACE { static void AddProperty(std::unordered_map *props, const std::string& name) { @@ -39,12 +56,12 @@ const std::unordered_map& GetRocksBuildProperties() { } std::string GetRocksVersionAsString(bool with_patch) { - std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR); + std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR); if (with_patch) { - return version + "." + ToString(ROCKSDB_PATCH); + return version + "." + std::to_string(ROCKSDB_PATCH); } else { return version; - } + } } std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) { From a61304508f07694354c1f698ad8c8c6ba5b65edc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Jul 2024 21:47:09 +0200 Subject: [PATCH 0362/2361] Remove noise from unit tests --- src/Common/Exception.h | 9 +++++++++ src/Common/tests/gtest_shell_command.cpp | 13 ------------- .../MergeTree/MergeTreeBackgroundExecutor.cpp | 4 ++++ src/Storages/MergeTree/tests/gtest_executor.cpp | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/Common/Exception.h b/src/Common/Exception.h index a4774a89f6a..4e54c411bf1 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -244,6 +244,15 @@ private: const char * className() const noexcept override { return "DB::ErrnoException"; } }; +/// An exception to use in unit tests to test interfaces. +/// It is distinguished from others, so it does not have to be logged. +class TestException : public Exception +{ +public: + using Exception::Exception; +}; + + using Exceptions = std::vector; /** Try to write an exception to the log (and forget about it). diff --git a/src/Common/tests/gtest_shell_command.cpp b/src/Common/tests/gtest_shell_command.cpp index d6d0a544e9b..0ea96da9da2 100644 --- a/src/Common/tests/gtest_shell_command.cpp +++ b/src/Common/tests/gtest_shell_command.cpp @@ -54,16 +54,3 @@ TEST(ShellCommand, ExecuteWithInput) EXPECT_EQ(res, "Hello, world!\n"); } - -TEST(ShellCommand, AutoWait) -{ - // hunting: - for (int i = 0; i < 1000; ++i) - { - auto command = ShellCommand::execute("echo " + std::to_string(i)); - //command->wait(); // now automatic - } - - // std::cerr << "inspect me: ps auxwwf\n"; - // std::this_thread::sleep_for(std::chrono::seconds(100)); -} diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp index 8cb0badc19b..4e62c503d65 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp @@ -155,6 +155,10 @@ void printExceptionWithRespectToAbort(LoggerPtr log, const String & query_id) { std::rethrow_exception(ex); } + catch (const TestException &) + { + /// Exception from a unit test, ignore it. + } catch (const Exception & e) { NOEXCEPT_SCOPE({ diff --git a/src/Storages/MergeTree/tests/gtest_executor.cpp b/src/Storages/MergeTree/tests/gtest_executor.cpp index 6f34eb4dfbd..c7057ce87c6 100644 --- a/src/Storages/MergeTree/tests/gtest_executor.cpp +++ b/src/Storages/MergeTree/tests/gtest_executor.cpp @@ -34,7 +34,7 @@ public: auto choice = distribution(generator); if (choice == 0) - throw std::runtime_error("Unlucky..."); + throw TestException(); return false; } @@ -48,7 +48,7 @@ public: { auto choice = distribution(generator); if (choice == 0) - throw std::runtime_error("Unlucky..."); + throw TestException(); } Priority getPriority() const override { return {}; } From 4e56b66a9f71b9e339e03f941b5016868ccdd337 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 14 Jul 2024 23:16:53 +0200 Subject: [PATCH 0363/2361] Remove unused local variables --- src/Parsers/ExpressionElementParsers.h | 2 +- src/Parsers/ParserDescribeTableQuery.cpp | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h index 4e3f29bfe0c..0209e785bff 100644 --- a/src/Parsers/ExpressionElementParsers.h +++ b/src/Parsers/ExpressionElementParsers.h @@ -9,7 +9,7 @@ namespace DB { -/** The SELECT subquery is in parenthesis. +/** The SELECT subquery, in parentheses. */ class ParserSubquery : public IParserBase { diff --git a/src/Parsers/ParserDescribeTableQuery.cpp b/src/Parsers/ParserDescribeTableQuery.cpp index 92c0cfacd9b..22bbfdb03e1 100644 --- a/src/Parsers/ParserDescribeTableQuery.cpp +++ b/src/Parsers/ParserDescribeTableQuery.cpp @@ -11,15 +11,12 @@ namespace DB { - bool ParserDescribeTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserKeyword s_describe(Keyword::DESCRIBE); ParserKeyword s_desc(Keyword::DESC); ParserKeyword s_table(Keyword::TABLE); ParserKeyword s_settings(Keyword::SETTINGS); - ParserToken s_dot(TokenType::Dot); - ParserIdentifier name_p; ParserSetQuery parser_settings(true); ASTPtr database; @@ -53,5 +50,4 @@ bool ParserDescribeTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & ex return true; } - } From d4116aeaeaeec3b17cd813d686a815476a794bed Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 15 Jul 2024 01:31:40 +0000 Subject: [PATCH 0364/2361] fix --- src/Core/SettingsEnums.h | 2 +- src/Storages/MergeTree/MergeMutateSelectedEntry.h | 4 ++-- src/Storages/MergeTree/MergeTreeMutationEntry.cpp | 2 +- src/Storages/MergeTree/MergeTreeMutationEntry.h | 4 ++-- src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp | 3 +-- src/Storages/StorageMergeTree.cpp | 4 ++-- 6 files changed, 9 insertions(+), 10 deletions(-) diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 3611dfa72be..67fbce31be8 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -342,7 +342,7 @@ DECLARE_SETTING_ENUM(ParallelReplicasCustomKeyFilterType) enum class LightweightMutationProjectionMode : uint8_t { THROW, - DROP + DROP, }; DECLARE_SETTING_ENUM(LightweightMutationProjectionMode) diff --git a/src/Storages/MergeTree/MergeMutateSelectedEntry.h b/src/Storages/MergeTree/MergeMutateSelectedEntry.h index 116c7d26552..bf2d1a7f677 100644 --- a/src/Storages/MergeTree/MergeMutateSelectedEntry.h +++ b/src/Storages/MergeTree/MergeMutateSelectedEntry.h @@ -40,10 +40,10 @@ struct MergeMutateSelectedEntry CurrentlyMergingPartsTaggerPtr tagger; MutationCommandsConstPtr commands; MergeTreeTransactionPtr txn; - Field lightweight_delete_projection_mode; + LightweightMutationProjectionMode lightweight_delete_projection_mode; MergeMutateSelectedEntry(FutureMergedMutatedPartPtr future_part_, CurrentlyMergingPartsTaggerPtr tagger_, MutationCommandsConstPtr commands_, const MergeTreeTransactionPtr & txn_ = NO_TRANSACTION_PTR, - const Field & lightweight_delete_projection_mode_ = LightweightMutationProjectionMode::THROW) + const LightweightMutationProjectionMode & lightweight_delete_projection_mode_ = LightweightMutationProjectionMode::THROW) : future_part(future_part_) , tagger(std::move(tagger_)) , commands(commands_) diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp index 06f4875d120..d1bd8efa7a5 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp @@ -49,7 +49,7 @@ UInt64 MergeTreeMutationEntry::parseFileName(const String & file_name_) MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk_, const String & path_prefix_, UInt64 tmp_number, const TransactionID & tid_, const WriteSettings & settings, - const Field & lightweight_delete_projection_mode_) + const LightweightMutationProjectionMode & lightweight_delete_projection_mode_) : create_time(time(nullptr)) , commands(std::move(commands_)) , disk(std::move(disk_)) diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.h b/src/Storages/MergeTree/MergeTreeMutationEntry.h index cbc7e2d4274..3aca744aa15 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.h +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.h @@ -37,12 +37,12 @@ struct MergeTreeMutationEntry CSN csn = Tx::UnknownCSN; /// From query context. - Field lightweight_delete_projection_mode; + LightweightMutationProjectionMode lightweight_delete_projection_mode; /// Create a new entry and write it to a temporary file. MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk, const String & path_prefix_, UInt64 tmp_number, const TransactionID & tid_, const WriteSettings & settings, - const Field & lightweight_delete_projection_mode_); + const LightweightMutationProjectionMode & lightweight_delete_projection_mode_); MergeTreeMutationEntry(const MergeTreeMutationEntry &) = delete; MergeTreeMutationEntry(MergeTreeMutationEntry &&) = default; diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 1bf337973ff..666dbe7e61e 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -140,8 +140,7 @@ ContextMutablePtr MutatePlainMergeTreeTask::createTaskContext() const auto queryId = getQueryId(); context->setCurrentQueryId(queryId); context->setBackgroundOperationTypeForContext(ClientInfo::BackgroundOperationType::MUTATION); - if (merge_mutate_entry) - context->setSetting("lightweight_mutation_projection_mode", merge_mutate_entry->lightweight_delete_projection_mode); + context->setSetting("lightweight_mutation_projection_mode", merge_mutate_entry->lightweight_delete_projection_mode); return context; } diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 063e3b7f064..7f210779916 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -518,7 +518,7 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context std::lock_guard lock(currently_processing_in_background_mutex); MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings(), - Field(query_context->getSettingsRef().lightweight_mutation_projection_mode)); + query_context->getSettingsRef().lightweight_mutation_projection_mode); version = increment.get(); entry.commit(version); String mutation_id = entry.file_name; @@ -1285,7 +1285,7 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( auto last_mutation_to_apply = mutations_end_it; /// Trying to grab it from query context. - Field lightweight_delete_projection_mode = LightweightMutationProjectionMode::THROW; + LightweightMutationProjectionMode lightweight_delete_projection_mode = LightweightMutationProjectionMode::THROW; for (auto it = mutations_begin_it; it != mutations_end_it; ++it) { From 3c09d585cde8068e1f57a1b2adfcdf8b126a8574 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 15 Jul 2024 02:14:58 +0000 Subject: [PATCH 0365/2361] fix --- src/Storages/MergeTree/MergeMutateSelectedEntry.h | 1 + src/Storages/MergeTree/MergeTreeMutationEntry.h | 1 + 2 files changed, 2 insertions(+) diff --git a/src/Storages/MergeTree/MergeMutateSelectedEntry.h b/src/Storages/MergeTree/MergeMutateSelectedEntry.h index bf2d1a7f677..f75d10d9ecb 100644 --- a/src/Storages/MergeTree/MergeMutateSelectedEntry.h +++ b/src/Storages/MergeTree/MergeMutateSelectedEntry.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB { diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.h b/src/Storages/MergeTree/MergeTreeMutationEntry.h index 3aca744aa15..dbb17654ddd 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.h +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.h @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB From c72045710ff22405ff5a6d7441d33d64c598e9e8 Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Mon, 15 Jul 2024 03:07:23 +0000 Subject: [PATCH 0366/2361] fix wrong count result when there is non-deterministic function in predicate Signed-off-by: Duc Canh Le --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- src/Storages/VirtualColumnUtils.cpp | 23 +++++++++++++------ src/Storages/VirtualColumnUtils.h | 2 +- ..._with_non_deterministic_function.reference | 2 ++ ..._count_with_non_deterministic_function.sql | 4 ++++ 5 files changed, 24 insertions(+), 9 deletions(-) create mode 100644 tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference create mode 100644 tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 467a5c82141..fe993f1a435 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1141,7 +1141,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr, /*strict=*/ true); if (!filter_dag) return {}; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 778c9e13adb..3f16f582d63 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -271,7 +271,8 @@ bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( const ActionsDAG::Node * node, const Block * allowed_inputs, - ActionsDAG::Nodes & additional_nodes) + ActionsDAG::Nodes & additional_nodes, + bool strict) { if (node->type == ActionsDAG::ActionType::FUNCTION) { @@ -280,8 +281,16 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto & node_copy = additional_nodes.emplace_back(*node); node_copy.children.clear(); for (const auto * child : node->children) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes)) + if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, strict)) + { + /// Expression like (now_allowed AND allowed) is not allowed if strict = true. This is important for + /// trivial count optimization, otherwise we can get incorrect results. For example, if the query is + /// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply + /// trivial count. + if (strict) + return nullptr; node_copy.children.push_back(child_copy); + } if (node_copy.children.empty()) return nullptr; @@ -307,7 +316,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { auto & node_copy = additional_nodes.emplace_back(*node); for (auto & child : node_copy.children) - if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes); !child) + if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, strict); !child) return nullptr; return &node_copy; @@ -321,7 +330,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto index_hint_dag = index_hint->getActions()->clone(); ActionsDAG::NodeRawConstPtrs atoms; for (const auto & output : index_hint_dag->getOutputs()) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes)) + if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, strict)) atoms.push_back(child_copy); if (!atoms.empty()) @@ -355,13 +364,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( return node; } -ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs) +ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool strict) { if (!predicate) return nullptr; ActionsDAG::Nodes additional_nodes; - const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes); + const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, strict); if (!res) return nullptr; @@ -370,7 +379,7 @@ ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context) { - auto dag = splitFilterDagForAllowedInputs(predicate, &block); + auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*strict=*/ false); if (dag) filterBlockWithDAG(dag, block, context); } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index fbfbdd6c6cc..48a0e4fc97f 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -32,7 +32,7 @@ void buildSetsForDAG(const ActionsDAGPtr & dag, const ContextPtr & context); bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); /// Extract a part of predicate that can be evaluated using only columns from input_names. -ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs); +ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool strict = false); /// Extract from the input stream a set of `name` column values template diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql new file mode 100644 index 00000000000..d4ffa4d07ac --- /dev/null +++ b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql @@ -0,0 +1,4 @@ +CREATE TABLE t (p UInt8, x UInt64) Engine = MergeTree PARTITION BY p ORDER BY x; +INSERT INTO t SELECT 0, number FROM numbers(10); +SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 0; +SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 1; From f10cb5ffb750fb96996bcfdef0f90e396e32fb31 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 15 Jul 2024 12:04:44 +0800 Subject: [PATCH 0367/2361] Some fixups --- .../ConditionSelectivityEstimator.cpp | 9 ++-- src/Storages/Statistics/Statistics.h | 2 +- .../Statistics/StatisticsCountMinSketch.cpp | 43 ++++--------------- .../Statistics/StatisticsCountMinSketch.h | 10 ++--- src/Storages/Statistics/StatisticsTDigest.cpp | 4 +- src/Storages/Statistics/tests/gtest_stats.cpp | 2 +- 6 files changed, 21 insertions(+), 49 deletions(-) diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp index 19e3157d99a..57dff958b9a 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp @@ -37,9 +37,9 @@ Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateGreat Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateEqual(const Field & val, Float64 rows) const { - auto float_val = StatisticsUtils::tryConvertToFloat64(val); if (part_statistics.empty()) { + auto float_val = StatisticsUtils::tryConvertToFloat64(val); if (!float_val) return default_unknown_cond_factor * rows; else if (float_val.value() < - threshold || float_val.value() > threshold) @@ -133,9 +133,8 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode { auto result = tryToExtractSingleColumn(node); if (result.second != 1) - { return default_unknown_cond_factor * total_rows; - } + String col = result.first; auto it = column_estimators.find(col); @@ -147,12 +146,14 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode estimator = it->second; else dummy = true; + auto [op, val] = extractBinaryOp(node, col); - auto float_val = StatisticsUtils::tryConvertToFloat64(val); + if (op == "equals") { if (dummy) { + auto float_val = StatisticsUtils::tryConvertToFloat64(val); if (!float_val || (float_val < - threshold || float_val > threshold)) return default_normal_cond_factor * total_rows; else diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index 33a5cbac4de..16f0c67eabd 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -1,9 +1,9 @@ #pragma once #include +#include #include #include -#include #include #include diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index a3c6ee8a819..95a8ceda8c8 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -1,11 +1,11 @@ + +#include #include #include #include #include -#include #include - #if USE_DATASKETCHES namespace DB @@ -14,6 +14,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; +extern const int ILLEGAL_STATISTICS; } /// Constants chosen based on rolling dices, which provides an error tolerance of 0.1% (ε = 0.001) and a confidence level of 99.9% (δ = 0.001). @@ -34,18 +35,9 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const /// For example: if data_type is Int32: /// 1. For 1.0, 1, '1', return Field(1) /// 2. For 1.1, max_value_int64, return null - Field val_converted; - try - { - val_converted = convertFieldToType(val, *data_type); - if (val_converted.isNull()) - return 0; - } - catch (...) - { - /// If the conversion fails for example, when converting 'not a number' to Int32, return 0 + Field val_converted = convertFieldToType(val, *data_type); + if (val_converted.isNull()) return 0; - } if (data_type->isValueRepresentedByNumber()) return sketch.get_estimate(&val_converted, data_type->getSizeOfValueInMemory()); @@ -87,19 +79,6 @@ void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) sketch = Sketch::deserialize(bytes.data(), size); } -} - -#endif - - -namespace DB -{ - -namespace ErrorCodes -{ -extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME; -extern const int ILLEGAL_STATISTICS; -} void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { @@ -109,18 +88,12 @@ void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr da if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } -#if USE_DATASKETCHES + StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { return std::make_shared(stat, data_type); } -#else -StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription &, DataTypePtr) -{ - throw Exception( - ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, - "Statistics of type 'count_min' is not supported in this build, to enable it turn on USE_DATASKETCHES when building."); -} -#endif } + +#endif diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.h b/src/Storages/Statistics/StatisticsCountMinSketch.h index d6141f9f73a..aa71c643c05 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -30,12 +30,10 @@ private: DataTypePtr data_type; }; + +void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); + } #endif - -namespace DB -{ -void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); -} diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index c19d0a0328c..f3c6ce1566b 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -7,7 +7,7 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_STATISTICS; -extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int LOGICAL_ERROR; } StatisticsTDigest::StatisticsTDigest(const SingleStatisticsDescription & stat_) @@ -54,7 +54,7 @@ Float64 StatisticsTDigest::estimateEqual(const Field & val) const auto val_as_float = StatisticsUtils::tryConvertToFloat64(val); if (val_as_float) return t_digest.getCountEqual(*val_as_float); - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Statistics 'tdigest' does not support estimate constant value of type {}", val.getTypeName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimating value of type {}", val.getTypeName()); } void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type) diff --git a/src/Storages/Statistics/tests/gtest_stats.cpp b/src/Storages/Statistics/tests/gtest_stats.cpp index d80f64b8b6b..f82a535bebc 100644 --- a/src/Storages/Statistics/tests/gtest_stats.cpp +++ b/src/Storages/Statistics/tests/gtest_stats.cpp @@ -59,7 +59,7 @@ void testConvertFieldToDataType(const DataTypePtr & data_type, const Fields & fi } catch(...) { - /// Just ignore exceptions + ASSERT_FALSE(convert_failed); } if (convert_failed) ASSERT_TRUE(converted_value.isNull()); From e8aa157c56e035449b90e3c428f27db0c40263b5 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 15 Jul 2024 14:25:39 +0800 Subject: [PATCH 0368/2361] Fix code style --- src/Storages/Statistics/StatisticsTDigest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index f3c6ce1566b..e3a59f3251a 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -46,7 +46,7 @@ Float64 StatisticsTDigest::estimateLess(const Field & val) const auto val_as_float = StatisticsUtils::tryConvertToFloat64(val); if (val_as_float) return t_digest.getCountLessThan(*val_as_float); - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Statistics 'tdigest' does not support estimate constant value of type {}", val.getTypeName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimate value of type {}", val.getTypeName()); } Float64 StatisticsTDigest::estimateEqual(const Field & val) const From 68aebce89f30eb7766c420d25e852e21e27dfe7d Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 15 Jul 2024 14:41:37 +0800 Subject: [PATCH 0369/2361] fix failed uts --- src/Functions/printf.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Functions/printf.cpp b/src/Functions/printf.cpp index a890b886338..3efe854a53b 100644 --- a/src/Functions/printf.cpp +++ b/src/Functions/printf.cpp @@ -93,6 +93,8 @@ private: { T a = data[i]; s = fmt::sprintf(format, static_cast>(a)); + + res_chars.resize(curr_offset + s.size() + 1); memcpy(&res_chars[curr_offset], s.data(), s.size()); res_chars[curr_offset + s.size()] = 0; @@ -115,6 +117,8 @@ private: { auto a = concrete_column->getDataAt(i).toView(); s = fmt::sprintf(format, a); + + res_chars.resize(curr_offset + s.size() + 1); memcpy(&res_chars[curr_offset], s.data(), s.size()); res_chars[curr_offset + s.size()] = 0; From 2132ce52e0f72afe90e72e756d5ef494ad081ea9 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 15 Jul 2024 06:48:39 +0000 Subject: [PATCH 0370/2361] Bump ICU to 75 --- contrib/icu | 2 +- contrib/icu-cmake/CMakeLists.txt | 6 +++--- contrib/icudata | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/contrib/icu b/contrib/icu index c205e7ee49a..7750081bda4 160000 --- a/contrib/icu +++ b/contrib/icu @@ -1 +1 @@ -Subproject commit c205e7ee49a7086a28b9c275fcfdac9ca3dc815d +Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625 diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index f23b0002b8d..f9d05f7fe97 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -4,7 +4,7 @@ else () option(ENABLE_ICU "Enable ICU" 0) endif () -# Temporarily disabled s390x because the ICU build links a blob (icudt71b_dat.S) and our friends from IBM did not explain how they re-generated +# Temporarily disabled s390x because the ICU build links a blob (icudt71b_dat.S) and our friends from IBM did not explain how they generated # the blob on s390x: https://github.com/ClickHouse/icudata/pull/2#issuecomment-2226957255 if (NOT ENABLE_ICU OR ARCH_S390X) message(STATUS "Not using ICU") @@ -462,9 +462,9 @@ file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") enable_language(ASM) if (ARCH_S390X) - set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt71b_dat.S" ) + set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75b_dat.S" ) else() - set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt71l_dat.S" ) + set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" ) endif() set(ICUDATA_SOURCES diff --git a/contrib/icudata b/contrib/icudata index e7488edd1f1..d345d6ac22f 160000 --- a/contrib/icudata +++ b/contrib/icudata @@ -1 +1 @@ -Subproject commit e7488edd1f141b0664553a985a6fcd0125279527 +Subproject commit d345d6ac22f381c882420de9053d30ae1ff38d75 From 77272c925dc15acc5fdd0260a1c0aab35b1df3c3 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Mon, 15 Jul 2024 15:10:39 +0800 Subject: [PATCH 0371/2361] disable insertion and mutation --- docs/en/operations/settings/settings.md | 6 ++ src/Core/ServerSettings.h | 1 + src/Interpreters/InterpreterAlterQuery.cpp | 7 ++ src/Interpreters/InterpreterDeleteQuery.cpp | 4 + src/Interpreters/InterpreterInsertQuery.cpp | 5 ++ .../__init__.py | 0 .../config/cluster.xml | 16 ++++ .../config/reading_node.xml | 3 + .../config/storage_policy.xml | 21 +++++ .../config/writing_node.xml | 3 + .../test.py | 84 +++++++++++++++++++ 11 files changed, 150 insertions(+) create mode 100644 tests/integration/test_disable_insertion_and_mutation/__init__.py create mode 100644 tests/integration/test_disable_insertion_and_mutation/config/cluster.xml create mode 100644 tests/integration/test_disable_insertion_and_mutation/config/reading_node.xml create mode 100644 tests/integration/test_disable_insertion_and_mutation/config/storage_policy.xml create mode 100644 tests/integration/test_disable_insertion_and_mutation/config/writing_node.xml create mode 100644 tests/integration/test_disable_insertion_and_mutation/test.py diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index c3f697c3bdc..143ce836beb 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5608,3 +5608,9 @@ Default value: `10000000`. Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached. Default value: `1GiB`. + +## disable_insertion_and_mutation + +Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries. + +Default value: `false`. diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index 28b32a6e6a5..cf09874125d 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -157,6 +157,7 @@ namespace DB M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \ M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \ M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \ + M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0) /// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index c70a3397f4e..b9dd59909e6 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -46,6 +46,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int UNKNOWN_TABLE; extern const int UNKNOWN_DATABASE; + extern const int QUERY_IS_PROHIBITED; } @@ -191,6 +192,12 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) "to execute ALTERs of different types (replicated and non replicated) in single query"); } + if (mutation_commands.hasNonEmptyMutationCommands() || !partition_commands.empty()) + { + if (getContext()->getServerSettings().disable_insertion_and_mutation) + throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Mutations are prohibited"); + } + if (!alter_commands.empty()) { auto alter_lock = table->lockForAlter(getContext()->getSettingsRef().lock_acquire_timeout); diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index 39d5d9e9cef..5f3e3385148 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -26,6 +26,7 @@ namespace ErrorCodes extern const int SUPPORT_IS_DISABLED; extern const int BAD_ARGUMENTS; extern const int NOT_IMPLEMENTED; + extern const int QUERY_IS_PROHIBITED; } @@ -50,6 +51,9 @@ BlockIO InterpreterDeleteQuery::execute() if (table->isStaticStorage()) throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only"); + if (getContext()->getGlobalContext()->getServerSettings().disable_insertion_and_mutation) + throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Delete queries are prohibited"); + DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name); if (database->shouldReplicateQuery(getContext(), query_ptr)) { diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index f396db70d21..b62a71de884 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -44,6 +44,7 @@ namespace ProfileEvents { extern const Event InsertQueriesWithSubqueries; extern const Event QueriesWithSubqueries; + extern const int QUERY_IS_PROHIBITED; } namespace DB @@ -406,6 +407,10 @@ BlockIO InterpreterInsertQuery::execute() StoragePtr table = getTable(query); checkStorageSupportsTransactionsIfNeeded(table, getContext()); + if (getContext()->getServerSettings().disable_insertion_and_mutation + && query.table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE) + throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Insert queries are prohibited"); + StoragePtr inner_table; if (const auto * mv = dynamic_cast(table.get())) inner_table = mv->getTargetTable(); diff --git a/tests/integration/test_disable_insertion_and_mutation/__init__.py b/tests/integration/test_disable_insertion_and_mutation/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_disable_insertion_and_mutation/config/cluster.xml b/tests/integration/test_disable_insertion_and_mutation/config/cluster.xml new file mode 100644 index 00000000000..17782a77679 --- /dev/null +++ b/tests/integration/test_disable_insertion_and_mutation/config/cluster.xml @@ -0,0 +1,16 @@ + + + + + + writing_node + 9000 + + + reading_node + 9000 + + + + + \ No newline at end of file diff --git a/tests/integration/test_disable_insertion_and_mutation/config/reading_node.xml b/tests/integration/test_disable_insertion_and_mutation/config/reading_node.xml new file mode 100644 index 00000000000..becabce8a44 --- /dev/null +++ b/tests/integration/test_disable_insertion_and_mutation/config/reading_node.xml @@ -0,0 +1,3 @@ + + true + \ No newline at end of file diff --git a/tests/integration/test_disable_insertion_and_mutation/config/storage_policy.xml b/tests/integration/test_disable_insertion_and_mutation/config/storage_policy.xml new file mode 100644 index 00000000000..cec96cfcc1a --- /dev/null +++ b/tests/integration/test_disable_insertion_and_mutation/config/storage_policy.xml @@ -0,0 +1,21 @@ + + + + + s3_with_keeper + http://minio1:9001/root/data/ + minio + minio123 + + + + + +

+ s3_with_keeper +
+ + + + + \ No newline at end of file diff --git a/tests/integration/test_disable_insertion_and_mutation/config/writing_node.xml b/tests/integration/test_disable_insertion_and_mutation/config/writing_node.xml new file mode 100644 index 00000000000..0737af7afc7 --- /dev/null +++ b/tests/integration/test_disable_insertion_and_mutation/config/writing_node.xml @@ -0,0 +1,3 @@ + + false + \ No newline at end of file diff --git a/tests/integration/test_disable_insertion_and_mutation/test.py b/tests/integration/test_disable_insertion_and_mutation/test.py new file mode 100644 index 00000000000..5234ae9c57c --- /dev/null +++ b/tests/integration/test_disable_insertion_and_mutation/test.py @@ -0,0 +1,84 @@ +import pytest +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster +import time + +cluster = ClickHouseCluster(__file__) + +writing_node = cluster.add_instance( + "writing_node", + main_configs=["config/writing_node.xml", "config/storage_policy.xml", "config/cluster.xml"], + with_zookeeper=True, + with_minio=True, + stay_alive=True, + macros={"shard": 1, "replica": 1}, +) +reading_node = cluster.add_instance( + "reading_node", + main_configs=["config/reading_node.xml", "config/storage_policy.xml", "config/cluster.xml"], + with_zookeeper=True, + with_minio=True, + stay_alive=True, + macros={"shard": 1, "replica": 2}, +) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + yield cluster + + finally: + cluster.shutdown() + + +def test_disable_insertion_and_mutation(started_cluster): + writing_node.query("""CREATE TABLE my_table on cluster default (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/default.my_table', '{replica}') ORDER BY key partition by (key % 5) SETTINGS storage_policy='s3_with_keeper' """) + + assert ( + "QUERY_IS_PROHIBITED" + in reading_node.query_and_get_error("INSERT INTO my_table VALUES (1, 'hello')") + ) + + assert ( + "QUERY_IS_PROHIBITED" + in reading_node.query_and_get_error("INSERT INTO my_table SETTINGS async_insert = 1 VALUES (1, 'hello')") + ) + + assert ( + "QUERY_IS_PROHIBITED" + in reading_node.query_and_get_error("ALTER TABLE my_table delete where 1") + ) + + assert ( + "QUERY_IS_PROHIBITED" + in reading_node.query_and_get_error("ALTER table my_table update key = 1 where 1") + ) + + assert ( + "QUERY_IS_PROHIBITED" + in reading_node.query_and_get_error("ALTER TABLE my_table drop partition 0") + ) + + reading_node.query("SELECT * from my_table"); + writing_node.query("INSERT INTO my_table VALUES (1, 'hello')") + writing_node.query("ALTER TABLE my_table delete where 1") + writing_node.query("ALTER table my_table update value = 'no hello' where 1") + + reading_node.query("ALTER TABLE my_table ADD COLUMN new_column UInt64") + writing_node.query("SELECT new_column from my_table") + reading_node.query("SELECT new_column from my_table") + + reading_node.query("ALter Table my_table MODIFY COLUMN new_column String") + + assert( + "new_column\tString" + in reading_node.query("DESC my_table") + ) + + assert( + "new_column\tString" + in writing_node.query("DESC my_table") + ) From b5b944b4e6c5c7227c45e428888f1711c14d2f4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 10:15:32 +0200 Subject: [PATCH 0372/2361] Improve wording of docs based on review comments Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- docs/en/engines/table-engines/integrations/kafka.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 8c9cd18d117..2f3c3bf62cd 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -257,7 +257,7 @@ If `allow_experimental_kafka_store_offsets_in_keeper` is enabled, then two more - `kafka_keeper_path` specifies the path to the table in ClickHouse Keeper - `kafka_replica_name` specifies the replica name in ClickHouse Keeper -Either both of the settings must be specified or neither of them. When both of them is specified, then a new, experimental Kafka engine will be used. The new engine doesn't depend on storing the committed offsets in Kafka,but stores them in ClickHouse Keeper. It still tries to commit the offsets to Kafka, but it only depends on those offsets when the table is created. In any other circumstances (table is restarted, or recovered after some error) the offsets stored in ClickHouse Keeper will be used to consume messages from. Apart from the committed offset, it also stores how many messages were consumed in the last batch, so if the insert fails, the same amount of messages will be consumed, thus enabling deduplication if necessary. +Either both of the settings must be specified or neither of them. When both of them are specified, then a new, experimental Kafka engine will be used. The new engine doesn't depend on storing the committed offsets in Kafka,but stores them in ClickHouse Keeper. It still tries to commit the offsets to Kafka, but it only depends on those offsets when the table is created. In any other circumstances (table is restarted, or recovered after some error) the offsets stored in ClickHouse Keeper will be used to consume messages from. Apart from the committed offset, it also stores how many messages were consumed in the last batch, so if the insert fails, the same amount of messages will be consumed, thus enabling deduplication if necessary. Example: @@ -285,8 +285,8 @@ SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1; As the new engine is experimental, it is not production ready yet. There are few known limitations of the implementation: - The biggest limitation is the engine doesn't support direct reading from Kafka topic (insertion works, but reading doesn't), thus the direct `SELECT` queries will fail. - - Rapidly dropping and recreating the table or specifying the same ClickHouse Keeper path to different engines might cause issues. As best practice you can use the `{uuid}` to avoid clashing paths. - - To make repeatable reads possible messages cannot be consumed from multiple partitions on a single thread. On the other hand the Kafka consumers has to be polled regularly to keep them alive. As a result of these two we decided to only allow creating multiple consumer if `kafka_thread_per_consumer` is enabled, otherwise it is too complicated to avoid issues regarding polling consumers regularly. + - Rapidly dropping and recreating the table or specifying the same ClickHouse Keeper path to different engines might cause issues. As best practice you can use the `{uuid}` in `kafka_keeper_path` to avoid clashing paths. + - To make repeatable reads, messages cannot be consumed from multiple partitions on a single thread. On the other hand, the Kafka consumers have to be polled regularly to keep them alive. As a result of these two objectives, we decided to only allow creating multiple consumers if `kafka_thread_per_consumer` is enabled, otherwise it is too complicated to avoid issues regarding polling consumers regularly. - Consumers created by the new storage engine do not show up in [`system.kafka_consumers`](../../../operations/system-tables/kafka_consumers.md) table. **See Also** From 1ecfba837e41c865eb77e6e94eadb0440d7ba2c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 09:03:05 +0000 Subject: [PATCH 0373/2361] Rename experimental flag to `allow_experimental_kafka_offsets_storage_in_keeper` --- docs/en/engines/table-engines/integrations/kafka.md | 6 +++--- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 2 +- src/Storages/Kafka/StorageKafkaCommon.cpp | 4 ++-- tests/integration/test_storage_kafka/test.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 2f3c3bf62cd..389bb6c9029 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -253,7 +253,7 @@ The number of rows in one Kafka message depends on whether the format is row-bas ## Experimental engine to store committed offsets in ClickHouse Keeper -If `allow_experimental_kafka_store_offsets_in_keeper` is enabled, then two more settings can be specified to the Kafka table engine: +If `allow_experimental_kafka_offsets_storage_in_keeper` is enabled, then two more settings can be specified to the Kafka table engine: - `kafka_keeper_path` specifies the path to the table in ClickHouse Keeper - `kafka_replica_name` specifies the replica name in ClickHouse Keeper @@ -267,7 +267,7 @@ ENGINE = Kafka('localhost:19092', 'my-topic', 'my-consumer', 'JSONEachRow') SETTINGS kafka_keeper_path = '/clickhouse/{database}/experimental_kafka', kafka_replica_name = 'r1' -SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1; +SETTINGS allow_experimental_kafka_offsets_storage_in_keeper=1; ``` Or to utilize the `uuid` and `replica` macros similarly to ReplicatedMergeTree: @@ -278,7 +278,7 @@ ENGINE = Kafka('localhost:19092', 'my-topic', 'my-consumer', 'JSONEachRow') SETTINGS kafka_keeper_path = '/clickhouse/{database}/{uuid}', kafka_replica_name = '{replica}' -SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1; +SETTINGS allow_experimental_kafka_offsets_storage_in_keeper=1; ``` ### Known limitations diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 57a5216b870..d24ea494c73 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -687,7 +687,7 @@ class IColumn; M(UInt64, max_size_to_preallocate_for_joins, 100'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before join", 0) \ \ M(Bool, kafka_disable_num_consumers_limit, false, "Disable limit on kafka_num_consumers that depends on the number of available CPU cores", 0) \ - M(Bool, allow_experimental_kafka_store_offsets_in_keeper, false, "Allow experimental feature to store Kafka related offsets in ClickHouse Keeper. When enabled a ClickHouse Keeper path and replica name can be specified to the Kafka table engine. As a result instead of the regular Kafka engine, a new type of storage engine will be used that stores the committed offsets primarily in ClickHouse Keeper", 0) \ + M(Bool, allow_experimental_kafka_offsets_storage_in_keeper, false, "Allow experimental feature to store Kafka related offsets in ClickHouse Keeper. When enabled a ClickHouse Keeper path and replica name can be specified to the Kafka table engine. As a result instead of the regular Kafka engine, a new type of storage engine will be used that stores the committed offsets primarily in ClickHouse Keeper", 0) \ M(Bool, enable_software_prefetch_in_aggregation, true, "Enable use of software prefetch in aggregation", 0) \ M(Bool, allow_aggregate_partitions_independently, false, "Enable independent aggregation of partitions on separate threads when partition key suits group by key. Beneficial when number of partitions close to number of cores and partitions have roughly the same size", 0) \ M(Bool, force_aggregate_partitions_independently, false, "Force the use of optimization when it is applicable, but heuristics decided not to use it", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 6f96f693681..d612a4f9e08 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -69,7 +69,7 @@ static std::initializer_list( args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); - if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_store_offsets_in_keeper && !args.query.attach) + if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_offsets_storage_in_keeper && !args.query.attach) throw Exception( ErrorCodes::SUPPORT_IS_DISABLED, - "Storing the Kafka offsets in Keeper is experimental. Set `allow_experimental_kafka_store_offsets_in_keeper` setting " + "Storing the Kafka offsets in Keeper is experimental. Set `allow_experimental_kafka_offsets_storage_in_keeper` setting " "to enable it"); if (!has_keeper_path || !has_replica_name) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 81132a9a60f..ea8f6671d20 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -422,7 +422,7 @@ def generate_new_create_table_query( settings_string = create_settings_string(settings) query = f"""CREATE TABLE {database}.{table_name} ({columns_def}) ENGINE = Kafka('{brokers}', '{topic_list}', '{consumer_group}', '{format}', '{row_delimiter}') {settings_string} -SETTINGS allow_experimental_kafka_store_offsets_in_keeper=1""" +SETTINGS allow_experimental_kafka_offsets_storage_in_keeper=1""" logging.debug(f"Generated new create query: {query}") return query From fc29ac7891eddd3a714f5af574c71040f91f451d Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Mon, 15 Jul 2024 17:06:37 +0800 Subject: [PATCH 0374/2361] add error extern to fix compile error --- src/Interpreters/InterpreterInsertQuery.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index b62a71de884..c01b2196ac9 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -56,6 +56,7 @@ namespace ErrorCodes extern const int NO_SUCH_COLUMN_IN_TABLE; extern const int ILLEGAL_COLUMN; extern const int DUPLICATE_COLUMN; + extern const int QUERY_IS_PROHIBITED; } InterpreterInsertQuery::InterpreterInsertQuery( From ccba078da10bed8d42e821f8bcdd47f448d198a0 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Mon, 15 Jul 2024 09:46:31 +0000 Subject: [PATCH 0375/2361] change storage policy to default --- .../config/storage_policy.xml | 21 ------------------- .../test.py | 6 +++--- 2 files changed, 3 insertions(+), 24 deletions(-) delete mode 100644 tests/integration/test_disable_insertion_and_mutation/config/storage_policy.xml diff --git a/tests/integration/test_disable_insertion_and_mutation/config/storage_policy.xml b/tests/integration/test_disable_insertion_and_mutation/config/storage_policy.xml deleted file mode 100644 index cec96cfcc1a..00000000000 --- a/tests/integration/test_disable_insertion_and_mutation/config/storage_policy.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - s3_with_keeper - http://minio1:9001/root/data/ - minio - minio123 - - - - - -
- s3_with_keeper -
-
-
-
-
-
\ No newline at end of file diff --git a/tests/integration/test_disable_insertion_and_mutation/test.py b/tests/integration/test_disable_insertion_and_mutation/test.py index 5234ae9c57c..f098f130d2b 100644 --- a/tests/integration/test_disable_insertion_and_mutation/test.py +++ b/tests/integration/test_disable_insertion_and_mutation/test.py @@ -7,7 +7,7 @@ cluster = ClickHouseCluster(__file__) writing_node = cluster.add_instance( "writing_node", - main_configs=["config/writing_node.xml", "config/storage_policy.xml", "config/cluster.xml"], + main_configs=["config/writing_node.xml", "config/cluster.xml"], with_zookeeper=True, with_minio=True, stay_alive=True, @@ -15,7 +15,7 @@ writing_node = cluster.add_instance( ) reading_node = cluster.add_instance( "reading_node", - main_configs=["config/reading_node.xml", "config/storage_policy.xml", "config/cluster.xml"], + main_configs=["config/reading_node.xml", "config/cluster.xml"], with_zookeeper=True, with_minio=True, stay_alive=True, @@ -35,7 +35,7 @@ def started_cluster(): def test_disable_insertion_and_mutation(started_cluster): - writing_node.query("""CREATE TABLE my_table on cluster default (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/default.my_table', '{replica}') ORDER BY key partition by (key % 5) SETTINGS storage_policy='s3_with_keeper' """) + writing_node.query("""CREATE TABLE my_table on cluster default (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/default.my_table', '{replica}') ORDER BY key partition by (key % 5) """) assert ( "QUERY_IS_PROHIBITED" From 76e1dea7ccdabc27129fdf911845f2622bbbc371 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Mon, 15 Jul 2024 18:17:09 +0800 Subject: [PATCH 0376/2361] Fix unit test --- src/Storages/Statistics/tests/gtest_stats.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/Statistics/tests/gtest_stats.cpp b/src/Storages/Statistics/tests/gtest_stats.cpp index f82a535bebc..9b9fae83109 100644 --- a/src/Storages/Statistics/tests/gtest_stats.cpp +++ b/src/Storages/Statistics/tests/gtest_stats.cpp @@ -59,7 +59,7 @@ void testConvertFieldToDataType(const DataTypePtr & data_type, const Fields & fi } catch(...) { - ASSERT_FALSE(convert_failed); + ASSERT_TRUE(convert_failed); } if (convert_failed) ASSERT_TRUE(converted_value.isNull()); From 16c82501c6292835079822df7aa1f341a644f299 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 11:54:04 +0000 Subject: [PATCH 0377/2361] Small review fixes --- src/Storages/Kafka/KafkaConsumer2.cpp | 4 +- src/Storages/Kafka/KafkaConsumer2.h | 7 +-- src/Storages/Kafka/StorageKafka.cpp | 36 ++++-------- src/Storages/Kafka/StorageKafka.h | 3 - src/Storages/Kafka/StorageKafka2.cpp | 72 ++++++++--------------- src/Storages/Kafka/StorageKafka2.h | 5 +- src/Storages/Kafka/StorageKafkaCommon.cpp | 21 ++++++- src/Storages/Kafka/StorageKafkaCommon.h | 8 ++- 8 files changed, 66 insertions(+), 90 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index d471c263653..51fc9bbe968 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -55,14 +55,14 @@ KafkaConsumer2::KafkaConsumer2( size_t max_batch_size, size_t poll_timeout_, const std::atomic & stopped_, - const Names & _topics) + const Names & topics_) : consumer(consumer_) , log(log_) , batch_size(max_batch_size) , poll_timeout(poll_timeout_) , stopped(stopped_) , current(messages.begin()) - , topics(_topics) + , topics(topics_) { // called (synchronously, during poll) when we enter the consumer group consumer->set_assignment_callback( diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index d7ec227d0bd..0df37434caf 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -71,18 +71,13 @@ public: using TopicPartitionCounts = std::vector; - // struct AssignmentChanges { - // TopicPartitions revoked_partitions; - // TopicPartitions new_partitions; - // }; - KafkaConsumer2( ConsumerPtr consumer_, LoggerPtr log_, size_t max_batch_size, size_t poll_timeout_, const std::atomic & stopped_, - const Names & _topics); + const Names & topics_); ~KafkaConsumer2(); diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 688b78ef78d..0e907187e11 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -21,19 +21,18 @@ #include #include #include +#include #include -#include #include #include #include #include #include +#include #include #include -#include #include #include -#include #include #include #include @@ -42,10 +41,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -148,19 +147,22 @@ private: }; StorageKafka::StorageKafka( - const StorageID & table_id_, ContextPtr context_, - const ColumnsDescription & columns_, std::unique_ptr kafka_settings_, + const StorageID & table_id_, + ContextPtr context_, + const ColumnsDescription & columns_, + std::unique_ptr kafka_settings_, const String & collection_name_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) , kafka_settings(std::move(kafka_settings_)) , macros_info{.table_id = table_id_} - , topics(parseTopics(getContext()->getMacros()->expand(kafka_settings->kafka_topic_list.value, macros_info))) + , topics(StorageKafkaUtils::parseTopics(getContext()->getMacros()->expand(kafka_settings->kafka_topic_list.value, macros_info))) , brokers(getContext()->getMacros()->expand(kafka_settings->kafka_broker_list.value, macros_info)) , group(getContext()->getMacros()->expand(kafka_settings->kafka_group_name.value, macros_info)) , client_id( - kafka_settings->kafka_client_id.value.empty() ? getDefaultClientId(table_id_) - : getContext()->getMacros()->expand(kafka_settings->kafka_client_id.value, macros_info)) + kafka_settings->kafka_client_id.value.empty() + ? StorageKafkaUtils::getDefaultClientId(table_id_) + : getContext()->getMacros()->expand(kafka_settings->kafka_client_id.value, macros_info)) , format_name(getContext()->getMacros()->expand(kafka_settings->kafka_format.value)) , max_rows_per_message(kafka_settings->kafka_max_rows_per_message.value) , schema_name(getContext()->getMacros()->expand(kafka_settings->kafka_schema.value, macros_info)) @@ -261,22 +263,6 @@ SettingsChanges StorageKafka::createSettingsAdjustments() return result; } -Names StorageKafka::parseTopics(String topic_list) -{ - Names result; - boost::split(result,topic_list,[](char c){ return c == ','; }); - for (String & topic : result) - { - boost::trim(topic); - } - return result; -} - -String StorageKafka::getDefaultClientId(const StorageID & table_id_) -{ - return fmt::format("{}-{}-{}-{}", VERSION_NAME, getFQDNOrHostName(), table_id_.database_name, table_id_.table_name); -} - void StorageKafka::read( QueryPlan & query_plan, const Names & column_names, diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index ef1781d5054..31a5744ee2a 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -167,9 +167,6 @@ private: size_t getMaxBlockSize() const; size_t getPollTimeoutMillisecond() const; - static Names parseTopics(String topic_list); - static String getDefaultClientId(const StorageID & table_id_); - bool streamToViews(); bool checkDependencies(const StorageID & table_id); diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 754c2dfa926..652ded635e4 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -33,10 +34,8 @@ #include #include #include -#include #include #include -#include "Common/config_version.h" #include #include #include @@ -45,12 +44,12 @@ #include #include #include +#include #include #include #include #include #include -#include "Storages/Kafka/KafkaConsumer2.h" #if USE_KRB5 # include @@ -115,18 +114,18 @@ StorageKafka2::StorageKafka2( , replica_path(keeper_path + "/replicas/" + kafka_settings_->kafka_replica_name.value) , kafka_settings(std::move(kafka_settings_)) , macros_info{.table_id = table_id_} - , topics(parseTopics(getContext()->getMacros()->expand(kafka_settings->kafka_topic_list.value, macros_info))) + , topics(StorageKafkaUtils::parseTopics(getContext()->getMacros()->expand(kafka_settings->kafka_topic_list.value, macros_info))) , brokers(getContext()->getMacros()->expand(kafka_settings->kafka_broker_list.value, macros_info)) , group(getContext()->getMacros()->expand(kafka_settings->kafka_group_name.value, macros_info)) , client_id( kafka_settings->kafka_client_id.value.empty() - ? getDefaultClientId(table_id_) + ? StorageKafkaUtils::getDefaultClientId(table_id_) : getContext()->getMacros()->expand(kafka_settings->kafka_client_id.value, macros_info)) , format_name(getContext()->getMacros()->expand(kafka_settings->kafka_format.value)) , max_rows_per_message(kafka_settings->kafka_max_rows_per_message.value) , schema_name(getContext()->getMacros()->expand(kafka_settings->kafka_schema.value, macros_info)) , num_consumers(kafka_settings->kafka_num_consumers.value) - , log(getLogger(String("StorageKafka2 ") + table_id_.getNameForLogs())) + , log(getLogger("StorageKafka2 (" + table_id_.getNameForLogs() + ")")) , semaphore(0, static_cast(num_consumers)) , settings_adjustments(createSettingsAdjustments()) , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) @@ -134,7 +133,7 @@ StorageKafka2::StorageKafka2( , active_node_identifier(toString(ServerUUID::get())) { if (kafka_settings->kafka_num_consumers > 1 && !thread_per_consumer) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "With multiple consumer you have to use thread per consumer!"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "With multiple consumers, it is required to use `kafka_thread_per_consumer` setting"); if (kafka_settings->kafka_handle_error_mode == StreamingHandleErrorMode::STREAM) { @@ -186,15 +185,15 @@ VirtualColumnsDescription StorageKafka2::createVirtuals(StreamingHandleErrorMode } void StorageKafka2::partialShutdown() { + LOG_TRACE(log, "Cancelling streams"); for (auto & task : tasks) { - LOG_TRACE(log, "Cancelling streams"); task->stream_cancelled = true; } + LOG_TRACE(log, "Waiting for cleanup"); for (auto & task : tasks) { - LOG_TRACE(log, "Waiting for cleanup"); task->holder->deactivate(); } is_active = false; @@ -202,14 +201,14 @@ void StorageKafka2::partialShutdown() bool StorageKafka2::activate() { - LOG_TEST(log, "activate task"); + LOG_TEST(log, "Activate task"); if (is_active && !getZooKeeper()->expired()) { LOG_TEST(log, "No need to activate"); return true; } - if (first_time) + if (first_activation_time) { LOG_DEBUG(log, "Activating replica"); assert(!is_active); @@ -252,12 +251,9 @@ bool StorageKafka2::activate() String is_active_path = fs::path(replica_path) / "is_active"; zookeeper->deleteEphemeralNodeIfContentMatches(is_active_path, active_node_identifier); - /// Simultaneously declare that this replica is active, and update the host. - Coordination::Requests ops; - ops.emplace_back(zkutil::makeCreateRequest(is_active_path, active_node_identifier, zkutil::CreateMode::Ephemeral)); - try { + /// Simultaneously declare that this replica is active, and update the host. zookeeper->create(is_active_path, active_node_identifier, zkutil::CreateMode::Ephemeral); } catch (const Coordination::Exception & e) @@ -276,27 +272,21 @@ bool StorageKafka2::activate() return true; } - catch (...) + catch (const Coordination::Exception & e) { replica_is_active_node = nullptr; + LOG_ERROR(log, "Couldn't start replica: {}. {}", e.what(), DB::getCurrentExceptionMessage(true)); + return false; - try - { + } + catch (const Exception & e) + { + replica_is_active_node = nullptr; + if (e.code() != ErrorCodes::REPLICA_IS_ALREADY_ACTIVE) throw; - } - catch (const Coordination::Exception & e) - { - LOG_ERROR(log, "Couldn't start replica: {}. {}", e.what(), DB::getCurrentExceptionMessage(true)); - return false; - } - catch (const Exception & e) - { - if (e.code() != ErrorCodes::REPLICA_IS_ALREADY_ACTIVE) - throw; - LOG_ERROR(log, "Couldn't start replica: {}. {}", e.what(), DB::getCurrentExceptionMessage(true)); - return false; - } + LOG_ERROR(log, "Couldn't start replica: {}. {}", e.what(), DB::getCurrentExceptionMessage(true)); + return false; } }; @@ -315,8 +305,8 @@ bool StorageKafka2::activate() task->holder->activateAndSchedule(); } - if (first_time) - first_time = false; + if (first_activation_time) + first_activation_time = false; LOG_DEBUG(log, "Table activated successfully"); return true; @@ -357,20 +347,6 @@ SettingsChanges StorageKafka2::createSettingsAdjustments() return result; } -Names StorageKafka2::parseTopics(String topic_list) -{ - Names result; - boost::split(result, topic_list, [](char c) { return c == ','; }); - for (String & topic : result) - boost::trim(topic); - return result; -} - -String StorageKafka2::getDefaultClientId(const StorageID & table_id_) -{ - return fmt::format("{}-{}-{}-{}", VERSION_NAME, getFQDNOrHostName(), table_id_.database_name, table_id_.table_name); -} - Pipe StorageKafka2::read( const Names & /*column_names */, @@ -381,7 +357,7 @@ Pipe StorageKafka2::read( size_t /* max_block_size */, size_t /* num_streams */) { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "You cannot read from the new Kafka storage!"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Direct read from the new Kafka storage is not implemented"); } diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 0d6734ac0eb..e7c192bc3b0 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -162,7 +162,7 @@ private: zkutil::EphemeralNodeHolderPtr replica_is_active_node; BackgroundSchedulePool::TaskHolder activating_task; String active_node_identifier; - bool first_time = true; + bool first_activation_time = true; bool activate(); void partialShutdown(); @@ -194,9 +194,6 @@ private: size_t getMaxBlockSize() const; size_t getPollTimeoutMillisecond() const; - static Names parseTopics(String topic_list); - static String getDefaultClientId(const StorageID & table_id_); - enum class StallReason { NoAssignment, diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index d570d9993b6..470e3445d03 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -11,11 +11,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -505,7 +507,7 @@ void registerStorageKafka(StorageFactory & factory) auto * settings_query = args.storage_def->settings; - chassert(settings_query != nullptr && "Unexpected settings query in StorageKafka"); + chassert(has_settings && "Unexpected settings query in StorageKafka"); settings_query->changes.setSetting("kafka_keeper_path", kafka_settings->kafka_keeper_path.value); settings_query->changes.setSetting("kafka_replica_name", kafka_settings->kafka_replica_name.value); @@ -539,6 +541,23 @@ void registerStorageKafka(StorageFactory & factory) }); } +namespace StorageKafkaUtils +{ +Names parseTopics(String topic_list) +{ + Names result; + boost::split(result, topic_list, [](char c) { return c == ','; }); + for (String & topic : result) + boost::trim(topic); + return result; +} + +String getDefaultClientId(const StorageID & table_id) +{ + return fmt::format("{}-{}-{}-{}", VERSION_NAME, getFQDNOrHostName(), table_id.database_name, table_id.table_name); +} +} + template struct StorageKafkaInterceptors; template struct StorageKafkaInterceptors; diff --git a/src/Storages/Kafka/StorageKafkaCommon.h b/src/Storages/Kafka/StorageKafkaCommon.h index bed09e9a9cd..f0bae7c9c9c 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.h +++ b/src/Storages/Kafka/StorageKafkaCommon.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -60,8 +61,13 @@ struct KafkaConfigLoader const String & config_prefix, const Names & topics); }; -} +namespace StorageKafkaUtils +{ +Names parseTopics(String topic_list); +String getDefaultClientId(const StorageID & table_id); +} +} template <> struct fmt::formatter : fmt::ostream_formatter From 7c1a181469949f3ddb09380c60bf5f97021e71df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 12:04:46 +0000 Subject: [PATCH 0378/2361] Refactor how we handle activation of new Kafka storage --- src/Storages/Kafka/StorageKafka2.cpp | 51 ++++++++++++++++++++++------ src/Storages/Kafka/StorageKafka2.h | 3 +- 2 files changed, 43 insertions(+), 11 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 652ded635e4..428426d9e6a 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -158,7 +158,7 @@ StorageKafka2::StorageKafka2( if (!first_replica) createReplica(); - activating_task = getContext()->getSchedulePool().createTask(log->name() + "(activating task)", [this]() { activate(); }); + activating_task = getContext()->getSchedulePool().createTask(log->name() + "(activating task)", [this]() { activateAndReschedule(); }); activating_task->deactivate(); } @@ -185,6 +185,7 @@ VirtualColumnsDescription StorageKafka2::createVirtuals(StreamingHandleErrorMode } void StorageKafka2::partialShutdown() { + // This is called in a background task within a catch block, thus this function shouldn't throw LOG_TRACE(log, "Cancelling streams"); for (auto & task : tasks) { @@ -208,12 +209,7 @@ bool StorageKafka2::activate() return true; } - if (first_activation_time) - { - LOG_DEBUG(log, "Activating replica"); - assert(!is_active); - } - else if (!is_active) + if (!is_active) { LOG_WARNING(log, "Table was not active. Will try to activate it"); } @@ -305,13 +301,48 @@ bool StorageKafka2::activate() task->holder->activateAndSchedule(); } - if (first_activation_time) - first_activation_time = false; - LOG_DEBUG(log, "Table activated successfully"); return true; } +void StorageKafka2::activateAndReschedule() +{ + if (shutdown_called) + return; + + /// It would be ideal to introduce a setting for this + constexpr static size_t check_period_ms = 60000; + /// In case of any exceptions we want to rerun the this task as fast as possible but we also don't want to keep retrying immediately + /// in a close loop (as fast as tasks can be processed), so we'll retry in between 100 and 10000 ms + const size_t backoff_ms = 100 * ((consecutive_activate_failures + 1) * (consecutive_activate_failures + 2)) / 2; + const size_t next_failure_retry_ms = std::min(size_t{10000}, backoff_ms); + + try + { + bool replica_is_active = activate(); + if (replica_is_active) + { + consecutive_activate_failures = 0; + activating_task->scheduleAfter(check_period_ms); + } + else + { + consecutive_activate_failures++; + activating_task->scheduleAfter(next_failure_retry_ms); + } + } + catch (...) + { + consecutive_activate_failures++; + activating_task->scheduleAfter(next_failure_retry_ms); + + /// We couldn't activate table let's set it into readonly mode if necessary + /// We do this after scheduling the task in case it throws + partialShutdown(); + tryLogCurrentException(log, "Failed to restart the table. Will try again"); + } +} + void StorageKafka2::assertActive() const { // TODO(antaljanosbenjamin): change LOGICAL_ERROR to something sensible diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index e7c192bc3b0..d7909df1b2c 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -162,8 +162,9 @@ private: zkutil::EphemeralNodeHolderPtr replica_is_active_node; BackgroundSchedulePool::TaskHolder activating_task; String active_node_identifier; - bool first_activation_time = true; + UInt64 consecutive_activate_failures = 0; bool activate(); + void activateAndReschedule(); void partialShutdown(); void assertActive() const; From b2466466d46ab1828b67693edf520d52ff45bc65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 15 Jul 2024 14:28:12 +0200 Subject: [PATCH 0379/2361] Rename Context::getSettings() --- programs/local/LocalServer.cpp | 2 +- src/Analyzer/QueryTreeBuilder.cpp | 2 +- src/Analyzer/Resolve/QueryAnalyzer.cpp | 2 +- src/Analyzer/Utils.cpp | 2 +- src/Bridge/IBridge.cpp | 2 +- src/Client/ClientBase.cpp | 6 +++--- src/Databases/DatabaseDictionary.cpp | 4 ++-- src/Databases/DatabaseOnDisk.cpp | 4 ++-- src/Databases/MySQL/MaterializedMySQLSyncThread.cpp | 2 +- src/Functions/formatQuery.cpp | 2 +- src/Functions/hasColumnInTable.cpp | 2 +- src/Interpreters/Context.cpp | 2 +- src/Interpreters/Context.h | 5 ++--- src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp | 2 +- src/Interpreters/ExpressionAnalyzer.cpp | 4 ++-- src/Interpreters/InterpreterInsertQuery.cpp | 2 +- src/Interpreters/InterpreterSelectQuery.cpp | 2 +- src/Interpreters/JoinedTables.cpp | 2 +- src/Interpreters/ProcessList.cpp | 2 +- src/Interpreters/interpretSubquery.cpp | 2 +- src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp | 2 +- .../QueryPlan/Optimizations/optimizeReadInOrder.cpp | 2 +- src/Server/MySQLHandler.cpp | 2 +- .../Distributed/DistributedAsyncInsertDirectoryQueue.cpp | 2 +- src/Storages/MergeTree/MergedBlockOutputStream.cpp | 2 +- src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp | 2 +- src/Storages/StorageURL.cpp | 4 ++-- src/Storages/StorageView.cpp | 2 +- src/Storages/getStructureOfRemoteTable.cpp | 2 +- src/TableFunctions/Hive/TableFunctionHive.cpp | 2 +- 30 files changed, 37 insertions(+), 38 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 46b543e49e9..a7265ef0de4 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -80,7 +80,7 @@ namespace ErrorCodes void applySettingsOverridesForLocal(ContextMutablePtr context) { - Settings settings = context->getSettings(); + Settings settings = context->getSettingsCopy(); settings.allow_introspection_functions = true; settings.storage_file_read_method = LocalFSReadMethod::mmap; diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index a62b6e56ac5..0a732a3b3b3 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -237,7 +237,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q /// Remove global settings limit and offset if (const auto & settings_ref = updated_context->getSettingsRef(); settings_ref.limit || settings_ref.offset) { - Settings settings = updated_context->getSettings(); + Settings settings = updated_context->getSettingsCopy(); limit = settings.limit; offset = settings.offset; settings.limit = 0; diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 5f7b06231d9..92618dfe346 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -503,7 +503,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden ProfileEvents::increment(ProfileEvents::ScalarSubqueriesCacheMiss); auto subquery_context = Context::createCopy(context); - Settings subquery_settings = context->getSettings(); + Settings subquery_settings = context->getSettingsCopy(); subquery_settings.max_result_rows = 1; subquery_settings.extremes = false; subquery_context->setSettings(subquery_settings); diff --git a/src/Analyzer/Utils.cpp b/src/Analyzer/Utils.cpp index d10bbd9bd23..e5f372b7368 100644 --- a/src/Analyzer/Utils.cpp +++ b/src/Analyzer/Utils.cpp @@ -867,7 +867,7 @@ void updateContextForSubqueryExecution(ContextMutablePtr & mutable_context) * max_rows_in_join, max_bytes_in_join, join_overflow_mode, * which are checked separately (in the Set, Join objects). */ - Settings subquery_settings = mutable_context->getSettings(); + Settings subquery_settings = mutable_context->getSettingsCopy(); subquery_settings.max_result_rows = 0; subquery_settings.max_result_bytes = 0; /// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query). diff --git a/src/Bridge/IBridge.cpp b/src/Bridge/IBridge.cpp index de48a4f2b84..5682a28f899 100644 --- a/src/Bridge/IBridge.cpp +++ b/src/Bridge/IBridge.cpp @@ -232,7 +232,7 @@ int IBridge::main(const std::vector & /*args*/) auto context = Context::createGlobal(shared_context.get()); context->makeGlobalContext(); - auto settings = context->getSettings(); + auto settings = context->getSettingsCopy(); settings.set("http_max_field_value_size", http_max_field_value_size); context->setSettings(settings); diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 5d472ba99b9..2dc603a307f 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -723,7 +723,7 @@ void ClientBase::initLogsOutputStream() void ClientBase::adjustSettings() { - Settings settings = global_context->getSettings(); + Settings settings = global_context->getSettingsCopy(); /// NOTE: Do not forget to set changed=false to avoid sending it to the server (to avoid breakage read only profiles) @@ -931,7 +931,7 @@ bool ClientBase::isSyncInsertWithData(const ASTInsertQuery & insert_query, const if (!insert_query.data) return false; - auto settings = context->getSettings(); + auto settings = context->getSettingsCopy(); if (insert_query.settings_ast) settings.applyChanges(insert_query.settings_ast->as()->changes); @@ -2696,7 +2696,7 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name) if (!getClientConfiguration().has("log_comment")) { - Settings settings = global_context->getSettings(); + Settings settings = global_context->getSettingsCopy(); /// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]" settings.log_comment = fs::absolute(fs::path(file_name)); global_context->setSettings(settings); diff --git a/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp index 52196e75c4a..a9569408814 100644 --- a/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -111,7 +111,7 @@ ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const String & table_name, Co buffer << ") Engine = Dictionary(" << backQuoteIfNeed(table_name) << ")"; } - auto settings = getContext()->getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); ParserCreateQuery parser; const char * pos = query.data(); std::string error_message; @@ -133,7 +133,7 @@ ASTPtr DatabaseDictionary::getCreateDatabaseQuery() const if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) buffer << " COMMENT " << backQuote(comment_value); } - auto settings = getContext()->getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); ParserCreateQuery parser; return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth, settings.max_parser_backtracks); } diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 07a250e72c7..261a917c595 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -523,7 +523,7 @@ ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const { ASTPtr ast; - auto settings = getContext()->getSettingsRef(); + const auto & settings = getContext()->getSettingsRef(); { std::lock_guard lock(mutex); auto database_metadata_path = getContext()->getPath() + "metadata/" + escapeForFileName(database_name) + ".sql"; @@ -722,7 +722,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata( return nullptr; } - auto settings = local_context->getSettingsRef(); + const auto & settings = local_context->getSettingsRef(); ParserCreateQuery parser; const char * pos = query.data(); std::string error_message; diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp index 2c342755337..04b4070d5af 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp @@ -89,7 +89,7 @@ static constexpr auto MYSQL_BACKGROUND_THREAD_NAME = "MySQLDBSync"; static ContextMutablePtr createQueryContext(ContextPtr context) { - Settings new_query_settings = context->getSettings(); + Settings new_query_settings = context->getSettingsCopy(); new_query_settings.insert_allow_materialized_columns = true; /// To avoid call AST::format diff --git a/src/Functions/formatQuery.cpp b/src/Functions/formatQuery.cpp index d10b3f9a5b7..f7f7e4b5bcb 100644 --- a/src/Functions/formatQuery.cpp +++ b/src/Functions/formatQuery.cpp @@ -39,7 +39,7 @@ public: FunctionFormatQuery(ContextPtr context, String name_, OutputFormatting output_formatting_, ErrorHandling error_handling_) : name(name_), output_formatting(output_formatting_), error_handling(error_handling_) { - const Settings & settings = context->getSettings(); + const Settings & settings = context->getSettingsRef(); max_query_size = settings.max_query_size; max_parser_depth = settings.max_parser_depth; max_parser_backtracks = settings.max_parser_backtracks; diff --git a/src/Functions/hasColumnInTable.cpp b/src/Functions/hasColumnInTable.cpp index 00714997b4a..cc496270b01 100644 --- a/src/Functions/hasColumnInTable.cpp +++ b/src/Functions/hasColumnInTable.cpp @@ -143,7 +143,7 @@ ColumnPtr FunctionHasColumnInTable::executeImpl(const ColumnsWithTypeAndName & a /* cluster_name= */ "", /* password= */ "" }; - auto cluster = std::make_shared(getContext()->getSettings(), host_names, params); + auto cluster = std::make_shared(getContext()->getSettingsRef(), host_names, params); // FIXME this (probably) needs a non-constant access to query context, // because it might initialized a storage. Ideally, the tables required diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index fc1e87e7b7e..3a88e0ccfe1 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -2267,7 +2267,7 @@ bool Context::displaySecretsInShowAndSelect() const return shared->server_settings.display_secrets_in_show_and_select; } -Settings Context::getSettings() const +Settings Context::getSettingsCopy() const { SharedLockGuard lock(mutex); return *settings; diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 284cac50769..61095e53a17 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -830,7 +830,8 @@ public: void setMacros(std::unique_ptr && macros); bool displaySecretsInShowAndSelect() const; - Settings getSettings() const; + Settings getSettingsCopy() const; + const Settings & getSettingsRef() const { return *settings; } void setSettings(const Settings & settings_); /// Set settings by name. @@ -955,8 +956,6 @@ public: void makeSessionContext(); void makeGlobalContext(); - const Settings & getSettingsRef() const { return *settings; } - void setProgressCallback(ProgressCallback callback); /// Used in executeQuery() to pass it to the QueryPipeline. ProgressCallback getProgressCallback() const; diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index 4bd1c47d5a0..1ca8c40460c 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -74,7 +74,7 @@ void ExecuteScalarSubqueriesMatcher::visit(ASTPtr & ast, Data & data) static auto getQueryInterpreter(const ASTSubquery & subquery, ExecuteScalarSubqueriesMatcher::Data & data) { auto subquery_context = Context::createCopy(data.getContext()); - Settings subquery_settings = data.getContext()->getSettings(); + Settings subquery_settings = data.getContext()->getSettingsCopy(); subquery_settings.max_result_rows = 1; subquery_settings.extremes = false; subquery_context->setSettings(subquery_settings); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 16d0eb71278..1a4c02bdebb 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -171,7 +171,7 @@ ExpressionAnalyzer::ExpressionAnalyzer( PreparedSetsPtr prepared_sets_, bool is_create_parameterized_view_) : WithContext(context_) - , query(query_), settings(getContext()->getSettings()) + , query(query_), settings(getContext()->getSettingsRef()) , subquery_depth(subquery_depth_) , syntax(syntax_analyzer_result_) , is_create_parameterized_view(is_create_parameterized_view_) @@ -983,7 +983,7 @@ static std::shared_ptr tryCreateJoin( algorithm == JoinAlgorithm::PARALLEL_HASH || algorithm == JoinAlgorithm::DEFAULT) { - const auto & settings = context->getSettings(); + const auto & settings = context->getSettingsRef(); if (analyzed_join->allowParallelHashJoin()) return std::make_shared( diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index ef298d4d45a..dffa0cbaa5b 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -469,7 +469,7 @@ BlockIO InterpreterInsertQuery::execute() * to avoid unnecessary squashing. */ - Settings new_settings = getContext()->getSettings(); + Settings new_settings = getContext()->getSettingsCopy(); new_settings.max_threads = std::max(1, settings.max_insert_threads); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index cd91f9532b9..cb3c478dbb1 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -253,7 +253,7 @@ namespace ContextPtr getSubqueryContext(const ContextPtr & context) { auto subquery_context = Context::createCopy(context); - Settings subquery_settings = context->getSettings(); + Settings subquery_settings = context->getSettingsCopy(); subquery_settings.max_result_rows = 0; subquery_settings.max_result_bytes = 0; /// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query). diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index 0de2bf9cb1f..c5226107f8d 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -308,7 +308,7 @@ std::shared_ptr JoinedTables::makeTableJoin(const ASTSelectQuery & se if (tables_with_columns.size() < 2) return {}; - auto settings = context->getSettingsRef(); + const auto & settings = context->getSettingsRef(); MultiEnum join_algorithm = settings.join_algorithm; bool try_use_direct_join = join_algorithm.isSet(JoinAlgorithm::DIRECT) || join_algorithm.isSet(JoinAlgorithm::DEFAULT); auto table_join = std::make_shared(settings, context->getGlobalTemporaryVolume(), context->getTempDataOnDisk()); diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index 5b07852d9e3..271e23a7288 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -657,7 +657,7 @@ QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_even { if (auto ctx = context.lock()) { - res.query_settings = std::make_shared(ctx->getSettings()); + res.query_settings = std::make_shared(ctx->getSettingsRef()); res.current_database = ctx->getCurrentDatabase(); } } diff --git a/src/Interpreters/interpretSubquery.cpp b/src/Interpreters/interpretSubquery.cpp index 340f6d1d805..909875b99a0 100644 --- a/src/Interpreters/interpretSubquery.cpp +++ b/src/Interpreters/interpretSubquery.cpp @@ -62,7 +62,7 @@ std::shared_ptr interpretSubquery( * which are checked separately (in the Set, Join objects). */ auto subquery_context = Context::createCopy(context); - Settings subquery_settings = context->getSettings(); + Settings subquery_settings = context->getSettingsCopy(); subquery_settings.max_result_rows = 0; subquery_settings.max_result_bytes = 0; /// The calculation of `extremes` does not make sense and is not necessary (if you do it, then the `extremes` of the subquery can be taken instead of the whole query). diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index c23d717d52f..e467c358d1d 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -406,7 +406,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx { const Block & header = getPort().getHeader(); const IDataType & type = *header.getByPosition(column_idx).type; - auto settings = context->getSettingsRef(); + const auto & settings = context->getSettingsRef(); /// Advance the token iterator until the start of the column expression readUntilTheEndOfRowAndReTokenize(column_idx); diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 28eb4da2e17..415a6a11999 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -1057,7 +1057,7 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, } auto context = read_from_merge_tree->getContext(); - const auto & settings = context->getSettings(); + const auto & settings = context->getSettingsRef(); if (!settings.optimize_read_in_window_order || (settings.optimize_read_in_order && settings.query_plan_read_in_order) || context->getSettingsRef().allow_experimental_analyzer) { return 0; diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index b6d795b1e69..3deb09bae88 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -473,7 +473,7 @@ void MySQLHandler::comQuery(ReadBuffer & payload, bool binary_protocol) query_context->setCurrentQueryId(fmt::format("mysql:{}:{}", connection_id, toString(UUIDHelpers::generateV4()))); /// --- Workaround for Bug 56173. Can be removed when the analyzer is on by default. - auto settings = query_context->getSettings(); + auto settings = query_context->getSettingsCopy(); settings.prefer_column_name_to_alias = true; query_context->setSettings(settings); diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp index d471c67553d..c287fc817eb 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp @@ -283,7 +283,7 @@ ConnectionPoolWithFailoverPtr DistributedAsyncInsertDirectoryQueue::createPool(c auto pools = createPoolsForAddresses(addresses, pool_factory, storage.log); - const auto settings = storage.getContext()->getSettings(); + const auto & settings = storage.getContext()->getSettingsRef(); return std::make_shared(std::move(pools), settings.load_balancing, settings.distributed_replica_error_half_life.totalSeconds(), diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 38869aebaa5..4ee68580d3f 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -34,7 +34,7 @@ MergedBlockOutputStream::MergedBlockOutputStream( , write_settings(write_settings_) { MergeTreeWriterSettings writer_settings( - data_part->storage.getContext()->getSettings(), + data_part->storage.getContext()->getSettingsRef(), write_settings, storage_settings, data_part->index_granularity_info.mark_type.adaptive, diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index c167ac87317..05cd77dcd40 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -23,7 +23,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( const MergeTreeIndexGranularityInfo * index_granularity_info) : IMergedBlockOutputStream(data_part->storage.getSettings(), data_part->getDataPartStoragePtr(), metadata_snapshot_, columns_list_, /*reset_columns=*/ true) { - const auto & global_settings = data_part->storage.getContext()->getSettings(); + const auto & global_settings = data_part->storage.getContext()->getSettingsRef(); MergeTreeWriterSettings writer_settings( global_settings, diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 9cec8c75ebe..731bd7ec3d3 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -462,7 +462,7 @@ std::pair> StorageURLSource: setCredentials(credentials, request_uri); - const auto settings = context_->getSettings(); + const auto & settings = context_->getSettingsRef(); auto proxy_config = getProxyConfiguration(request_uri.getScheme()); @@ -1324,7 +1324,7 @@ std::optional IStorageURLBase::tryGetLastModificationTime( const Poco::Net::HTTPBasicCredentials & credentials, const ContextPtr & context) { - auto settings = context->getSettingsRef(); + const auto & settings = context->getSettingsRef(); auto uri = Poco::URI(url); diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 5f768bce978..929896e3246 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -97,7 +97,7 @@ bool hasJoin(const ASTSelectWithUnionQuery & ast) ContextPtr getViewContext(ContextPtr context, const StorageSnapshotPtr & storage_snapshot) { auto view_context = storage_snapshot->metadata->getSQLSecurityOverriddenContext(context); - Settings view_settings = view_context->getSettings(); + Settings view_settings = view_context->getSettingsCopy(); view_settings.max_result_rows = 0; view_settings.max_result_bytes = 0; view_settings.extremes = false; diff --git a/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp index 56071abaa95..9d23f132759 100644 --- a/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -65,7 +65,7 @@ ColumnsDescription getStructureOfRemoteTableInShard( /// Ignore limit for result number of rows (that could be set during handling CSE/CTE), /// since this is a service query and should not lead to query failure. { - Settings new_settings = new_context->getSettings(); + Settings new_settings = new_context->getSettingsCopy(); new_settings.max_result_rows = 0; new_settings.max_result_bytes = 0; new_context->setSettings(new_settings); diff --git a/src/TableFunctions/Hive/TableFunctionHive.cpp b/src/TableFunctions/Hive/TableFunctionHive.cpp index 80494dbe5a8..759807d7a4f 100644 --- a/src/TableFunctions/Hive/TableFunctionHive.cpp +++ b/src/TableFunctions/Hive/TableFunctionHive.cpp @@ -93,7 +93,7 @@ StoragePtr TableFunctionHive::executeImpl( ColumnsDescription /*cached_columns_*/, bool /*is_insert_query*/) const { - const Settings & settings = context_->getSettings(); + const Settings & settings = context_->getSettingsRef(); ParserExpression partition_by_parser; ASTPtr partition_by_ast = parseQuery( partition_by_parser, From 123fd6b7503256b95991b098a3ddf875e5bcdcda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 12:40:44 +0000 Subject: [PATCH 0380/2361] Remove seemingly unnecessary `nullptr` check --- src/Storages/Kafka/KafkaConsumer2.cpp | 11 ++-- tests/integration/test_storage_kafka/test.py | 60 ++++++++++++++++++++ 2 files changed, 67 insertions(+), 4 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 51fc9bbe968..52a4cadd60b 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -401,15 +401,14 @@ void KafkaConsumer2::commit(const TopicPartition & topic_partition) ReadBufferPtr KafkaConsumer2::getNextMessage() { - while (current != messages.end()) + if (current != messages.end()) { const auto * data = current->get_payload().get_data(); size_t size = current->get_payload().get_size(); ++current; - // TODO(antaljanosbenjamin): When this can be nullptr? - if (data) - return std::make_shared(data, size); + chassert(data != nullptr); + return std::make_shared(data, size); } return nullptr; @@ -433,7 +432,11 @@ size_t KafkaConsumer2::filterMessageErrors() }); if (skipped) + { LOG_ERROR(log, "There were {} messages with an error", skipped); + // Technically current is invalidated as soon as we erased a single message + current = messages.begin(); + } return skipped; } diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index ea8f6671d20..710e05b5669 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -5444,6 +5444,66 @@ def test_multiple_read_in_materialized_views(kafka_cluster, create_query_generat ) + + +@pytest.mark.parametrize( + "create_query_generator", + [generate_old_create_table_query, generate_new_create_table_query], +) +def test_kafka_null_message(kafka_cluster, create_query_generator): + topic_name = "null_message" + + instance.query( + f""" + DROP TABLE IF EXISTS test.null_message_view; + DROP TABLE IF EXISTS test.null_message_consumer; + DROP TABLE IF EXISTS test.null_message_kafka; + + {create_query_generator("null_message_kafka", "value UInt64", topic_list=topic_name, consumer_group="mv")}; + CREATE TABLE test.null_message_view (value UInt64) + ENGINE = MergeTree() + ORDER BY value; + CREATE MATERIALIZED VIEW test.null_message_consumer TO test.null_message_view AS + SELECT * FROM test.null_message_kafka; + """ + ) + + message_key_values = [] + for i in range(5): + # Here the key is key for Kafka message + message = json.dumps({"value": i}) if i != 3 else None + message_key_values.append({"key": f"{i}".encode(), "message": message}) + + producer = get_kafka_producer( + kafka_cluster.kafka_port, producer_serializer, 15 + ) + for message_kv in message_key_values: + producer.send(topic=topic_name, key = message_kv["key"], value=message_kv["message"]) + producer.flush() + + expected = TSV( + """ +0 +1 +2 +4 +""" + ) + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): + result = instance.query_with_retry( + "SELECT * FROM test.null_message_view", check_callback=lambda res: TSV(res) == expected + ) + + assert expected == TSV(result) + + instance.query( + """ + DROP TABLE test.null_message_consumer; + DROP TABLE test.null_message_view; + DROP TABLE test.null_message_kafka; + """ + ) + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") From 87e9b7c5bbd2799e2ed7216b0416d137537fba17 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 15 Jul 2024 13:13:47 +0000 Subject: [PATCH 0381/2361] Automatic style fix --- tests/integration/test_storage_kafka/test.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 710e05b5669..85b420cc10e 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -5444,8 +5444,6 @@ def test_multiple_read_in_materialized_views(kafka_cluster, create_query_generat ) - - @pytest.mark.parametrize( "create_query_generator", [generate_old_create_table_query, generate_new_create_table_query], @@ -5474,24 +5472,25 @@ def test_kafka_null_message(kafka_cluster, create_query_generator): message = json.dumps({"value": i}) if i != 3 else None message_key_values.append({"key": f"{i}".encode(), "message": message}) - producer = get_kafka_producer( - kafka_cluster.kafka_port, producer_serializer, 15 - ) + producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, 15) for message_kv in message_key_values: - producer.send(topic=topic_name, key = message_kv["key"], value=message_kv["message"]) + producer.send( + topic=topic_name, key=message_kv["key"], value=message_kv["message"] + ) producer.flush() expected = TSV( - """ + """ 0 1 2 4 """ - ) + ) with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): result = instance.query_with_retry( - "SELECT * FROM test.null_message_view", check_callback=lambda res: TSV(res) == expected + "SELECT * FROM test.null_message_view", + check_callback=lambda res: TSV(res) == expected, ) assert expected == TSV(result) @@ -5504,6 +5503,7 @@ def test_kafka_null_message(kafka_cluster, create_query_generator): """ ) + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") From 36fb1cc3e79c9570ba43f81e4e47041100a63d0d Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 15 Jul 2024 13:15:14 +0000 Subject: [PATCH 0382/2361] temporarily disable the setting in taskcontext --- src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 666dbe7e61e..19aa63d90a2 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -140,7 +140,7 @@ ContextMutablePtr MutatePlainMergeTreeTask::createTaskContext() const auto queryId = getQueryId(); context->setCurrentQueryId(queryId); context->setBackgroundOperationTypeForContext(ClientInfo::BackgroundOperationType::MUTATION); - context->setSetting("lightweight_mutation_projection_mode", merge_mutate_entry->lightweight_delete_projection_mode); + // context->setSetting("lightweight_mutation_projection_mode", merge_mutate_entry->lightweight_delete_projection_mode); return context; } From 43d86f9066ec75d3747e85b297a155690a974e45 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 15 Jul 2024 14:46:08 +0000 Subject: [PATCH 0383/2361] Unify SKIP and SKIP PREFIX hints, improve JSON parsing --- src/Columns/ColumnObject.cpp | 1 + src/DataTypes/DataTypeObject.cpp | 42 ++--- src/DataTypes/DataTypeObject.h | 4 - src/DataTypes/DataTypesBinaryEncoding.cpp | 16 -- src/DataTypes/DataTypesBinaryEncoding.h | 106 ++++++------- .../Serializations/SerializationDynamic.cpp | 2 + .../Serializations/SerializationJSON.cpp | 3 +- .../Serializations/SerializationJSON.h | 1 - .../Serializations/SerializationObject.cpp | 12 +- .../Serializations/SerializationObject.h | 3 +- src/Formats/JSONExtractTree.cpp | 108 +++++++++---- src/Formats/SchemaInferenceUtils.cpp | 145 ++++++++++++------ src/Parsers/ASTObjectTypeArgument.cpp | 11 -- src/Parsers/ASTObjectTypeArgument.h | 2 - src/Parsers/ParserDataType.cpp | 15 +- src/Storages/VirtualColumnUtils.cpp | 1 + 16 files changed, 265 insertions(+), 207 deletions(-) diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index dce6b006ff2..bd6f759ee30 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -424,6 +424,7 @@ void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t len void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) #endif { + /// TODO: try to parallelize doInsertRangeFrom over typed/dynamic paths. const auto & src_object_column = assert_cast(src); /// First, insert typed paths, they must be the same for both columns. diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 8f3baa05925..751a19beca5 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -39,22 +39,29 @@ DataTypeObject::DataTypeObject( const SchemaFormat & schema_format_, const std::unordered_map & typed_paths_, const std::unordered_set & paths_to_skip_, - const std::vector & path_prefixes_to_skip_, const std::vector & path_regexps_to_skip_, size_t max_dynamic_paths_, size_t max_dynamic_types_) : schema_format(schema_format_) , typed_paths(typed_paths_) , paths_to_skip(paths_to_skip_) - , path_prefixes_to_skip(path_prefixes_to_skip_) , path_regexps_to_skip(path_regexps_to_skip_) , max_dynamic_paths(max_dynamic_paths_) , max_dynamic_types(max_dynamic_types_) { - for (const auto & path : paths_to_skip) + for (const auto & [typed_path, type] : typed_paths) { - if (typed_paths.contains(path)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified both with the data type ('{}') and in the SKIP section", path, typed_paths[path]->getName()); + for (const auto & path_to_skip : paths_to_skip) + { + if (typed_path.starts_with(path_to_skip)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified with the data type ('{}') and matches the SKIP path prefix '{}'", typed_path, type->getName(), path_to_skip); + } + + for (const auto & path_regext_to_skip : paths_to_skip) + { + if (re2::RE2::FullMatch(typed_path, re2::RE2(path_regext_to_skip))) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified with the data type ('{}') and matches the SKIP REGEXP '{}'", typed_path, type->getName(), path_regext_to_skip); + } } } @@ -81,7 +88,7 @@ bool DataTypeObject::equals(const IDataType & rhs) const return false; } - return schema_format == object->schema_format && paths_to_skip == object->paths_to_skip && path_prefixes_to_skip == object->path_prefixes_to_skip && path_regexps_to_skip == object->path_regexps_to_skip + return schema_format == object->schema_format && paths_to_skip == object->paths_to_skip && path_regexps_to_skip == object->path_regexps_to_skip && max_dynamic_types == object->max_dynamic_types && max_dynamic_paths == object->max_dynamic_paths; } @@ -102,7 +109,6 @@ SerializationPtr DataTypeObject::doGetDefaultSerialization() const return std::make_shared>( std::move(typed_path_serializations), paths_to_skip, - path_prefixes_to_skip, path_regexps_to_skip, buildJSONExtractTree(getPtr(), "JSON serialization")); #elif USE_RAPIDJSON @@ -165,12 +171,6 @@ String DataTypeObject::doGetName() const out << "SKIP " << skip_path; } - for (const auto & skip_prefix : path_prefixes_to_skip) - { - write_separator(); - out << "SKIP PREFIX " << skip_prefix; - } - for (const auto & skip_regexp : path_regexps_to_skip) { write_separator(); @@ -275,7 +275,7 @@ std::unique_ptr DataTypeObject::getDynamicSubcolu std::unique_ptr res = std::make_unique(std::make_shared(prefix, typed_paths_serializations)); /// Keep all current constrains like limits and skip paths/prefixes/regexps. - res->type = std::make_shared(schema_format, typed_sub_paths, paths_to_skip, path_prefixes_to_skip, path_prefixes_to_skip, max_dynamic_paths, max_dynamic_types); + res->type = std::make_shared(schema_format, typed_sub_paths, paths_to_skip, path_regexps_to_skip, max_dynamic_paths, max_dynamic_types); /// If column was provided, we should create a column for the requested subcolumn. if (data.column) { @@ -405,8 +405,6 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject:: std::unordered_map typed_paths; std::unordered_set paths_to_skip; - /// Collect prefixes in unordered_set to avoid duplicate prefixes - std::unordered_set path_prefixes_to_skip_set; std::vector path_regexps_to_skip; size_t max_dynamic_types = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES; @@ -458,14 +456,6 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject:: paths_to_skip.insert(identifier->name()); } - else if (object_type_argument->skip_path_prefix) - { - const auto * identifier = object_type_argument->skip_path_prefix->as(); - if (!identifier) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST in SKIP section of {} type arguments: {}. Expected identifier with path name", magic_enum::enum_name(schema_format), object_type_argument->skip_path->formatForErrorMessage()); - - path_prefixes_to_skip_set.insert(identifier->name()); - } else if (object_type_argument->skip_path_regexp) { const auto * literal = object_type_argument->skip_path_regexp->as(); @@ -476,10 +466,8 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject:: } } - std::vector path_prefixes_to_skip(path_prefixes_to_skip_set.begin(), path_prefixes_to_skip_set.end()); - std::sort(path_prefixes_to_skip.begin(), path_prefixes_to_skip.end()); std::sort(path_regexps_to_skip.begin(), path_regexps_to_skip.end()); - return std::make_shared(schema_format, typed_paths, paths_to_skip, path_prefixes_to_skip, path_regexps_to_skip, max_dynamic_paths, max_dynamic_types); + return std::make_shared(schema_format, typed_paths, paths_to_skip, path_regexps_to_skip, max_dynamic_paths, max_dynamic_types); } static DataTypePtr createJSON(const ASTPtr & arguments) diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 8f580f03b57..61364dfe5ed 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -32,7 +32,6 @@ public: const SchemaFormat & schema_format_, const std::unordered_map & typed_paths_ = {}, const std::unordered_set & paths_to_skip_ = {}, - const std::vector & path_prefixes_to_skip_ = {}, const std::vector & path_regexps_to_skip_ = {}, size_t max_dynamic_paths_ = DEFAULT_MAX_SEPARATELY_STORED_PATHS, size_t max_dynamic_types_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES); @@ -67,7 +66,6 @@ public: const SchemaFormat & getSchemaFormat() const { return schema_format; } const std::unordered_map & getTypedPaths() const { return typed_paths; } const std::unordered_set & getPathsToSkip() const { return paths_to_skip; } - const std::vector & getPathPrefixesToSkip() const { return path_prefixes_to_skip; } const std::vector & getPathRegexpsToSkip() const { return path_regexps_to_skip; } size_t getMaxDynamicTypes() const { return max_dynamic_types; } @@ -79,8 +77,6 @@ private: std::unordered_map typed_paths; /// Set of paths that should be skipped during data parsing. std::unordered_set paths_to_skip; - /// List of path prefixes that should be skipped during data parsing. - std::vector path_prefixes_to_skip; /// List of regular expressions that should be used to skip paths during data parsing. std::vector path_regexps_to_skip; /// Limit on the number of paths that can be stored as subcolumn. diff --git a/src/DataTypes/DataTypesBinaryEncoding.cpp b/src/DataTypes/DataTypesBinaryEncoding.cpp index 102e5eaede0..bd97bceff4c 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.cpp +++ b/src/DataTypes/DataTypesBinaryEncoding.cpp @@ -507,10 +507,6 @@ void encodeDataType(const DataTypePtr & type, WriteBuffer & buf) writeVarUInt(paths_to_skip.size(), buf); for (const auto & path : paths_to_skip) writeStringBinary(path, buf); - const auto & path_prefixes_to_skip = object_type.getPathPrefixesToSkip(); - writeVarUInt(path_prefixes_to_skip.size(), buf); - for (const auto & prefix : path_prefixes_to_skip) - writeStringBinary(prefix, buf); const auto & path_regexps_to_skip = object_type.getPathRegexpsToSkip(); writeVarUInt(path_regexps_to_skip.size(), buf); for (const auto & regexp : path_regexps_to_skip) @@ -754,17 +750,6 @@ DataTypePtr decodeDataType(ReadBuffer & buf) paths_to_skip.insert(path); } - size_t path_prefixes_to_skip_size; - readVarUInt(path_prefixes_to_skip_size, buf); - std::vector path_prefixes_to_skip; - path_prefixes_to_skip.reserve(path_prefixes_to_skip_size); - for (size_t i = 0; i != path_prefixes_to_skip_size; ++i) - { - String prefix; - readStringBinary(prefix, buf); - path_prefixes_to_skip.push_back(prefix); - } - size_t path_regexps_to_skip_size; readVarUInt(path_regexps_to_skip_size, buf); std::vector path_regexps_to_skip; @@ -779,7 +764,6 @@ DataTypePtr decodeDataType(ReadBuffer & buf) DataTypeObject::SchemaFormat::JSON, typed_paths, paths_to_skip, - path_prefixes_to_skip, path_regexps_to_skip, max_dynamic_paths, max_dynamic_types); diff --git a/src/DataTypes/DataTypesBinaryEncoding.h b/src/DataTypes/DataTypesBinaryEncoding.h index de001966aee..5ac21471a29 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.h +++ b/src/DataTypes/DataTypesBinaryEncoding.h @@ -8,59 +8,59 @@ namespace DB /** Binary encoding for ClickHouse data types: -|---------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ClickHouse data type | Binary encoding | -|---------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Nothing | 0x00 | -| UInt8 | 0x01 | -| UInt16 | 0x02 | -| UInt32 | 0x03 | -| UInt64 | 0x04 | -| UInt128 | 0x05 | -| UInt256 | 0x06 | -| Int8 | 0x07 | -| Int16 | 0x08 | -| Int32 | 0x09 | -| Int64 | 0x0A | -| Int128 | 0x0B | -| Int256 | 0x0C | -| Float32 | 0x0D | -| Float64 | 0x0E | -| Date | 0x0F | -| Date32 | 0x10 | -| DateTime | 0x11 | -| DateTime(time_zone) | 0x12 | -| DateTime64(P) | 0x13 | -| DateTime64(P, time_zone) | 0x14 | -| String | 0x15 | -| FixedString(N) | 0x16 | -| Enum8 | 0x17... | -| Enum16 | 0x18...> | -| Decimal32(P, S) | 0x19 | -| Decimal64(P, S) | 0x1A | -| Decimal128(P, S) | 0x1B | -| Decimal256(P, S) | 0x1C | -| UUID | 0x1D | -| Array(T) | 0x1E | -| Tuple(T1, ..., TN) | 0x1F... | -| Tuple(name1 T1, ..., nameN TN) | 0x20... | -| Set | 0x21 | -| Interval | 0x22 | -| Nullable(T) | 0x23 | -| Function | 0x24... | -| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25...... | -| LowCardinality(T) | 0x26 | -| Map(K, V) | 0x27 | -| IPv4 | 0x28 | -| IPv6 | 0x29 | -| Variant(T1, ..., TN) | 0x2A... | -| Dynamic(max_types=N) | 0x2B | -| Custom type (Ring, Polygon, etc) | 0x2C | -| Bool | 0x2D | -| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E...... | -| Nested(name1 T1, ..., nameN TN) | 0x2F... | -| JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP PREFIX skip_path_prefix, SKIP REGEXP skip_path_regexp) | 0x30............ | -|---------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ClickHouse data type | Binary encoding | +|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Nothing | 0x00 | +| UInt8 | 0x01 | +| UInt16 | 0x02 | +| UInt32 | 0x03 | +| UInt64 | 0x04 | +| UInt128 | 0x05 | +| UInt256 | 0x06 | +| Int8 | 0x07 | +| Int16 | 0x08 | +| Int32 | 0x09 | +| Int64 | 0x0A | +| Int128 | 0x0B | +| Int256 | 0x0C | +| Float32 | 0x0D | +| Float64 | 0x0E | +| Date | 0x0F | +| Date32 | 0x10 | +| DateTime | 0x11 | +| DateTime(time_zone) | 0x12 | +| DateTime64(P) | 0x13 | +| DateTime64(P, time_zone) | 0x14 | +| String | 0x15 | +| FixedString(N) | 0x16 | +| Enum8 | 0x17... | +| Enum16 | 0x18...> | +| Decimal32(P, S) | 0x19 | +| Decimal64(P, S) | 0x1A | +| Decimal128(P, S) | 0x1B | +| Decimal256(P, S) | 0x1C | +| UUID | 0x1D | +| Array(T) | 0x1E | +| Tuple(T1, ..., TN) | 0x1F... | +| Tuple(name1 T1, ..., nameN TN) | 0x20... | +| Set| 0x21 | +| Interval | 0x22 | +| Nullable(T) | 0x23 | +| Function | 0x24... | +| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25...... | +| LowCardinality(T) | 0x26 | +| Map(K, V) | 0x27 | +| IPv4 | 0x28 | +| IPv6 | 0x29 | +| Variant(T1, ..., TN) | 0x2A... | +| Dynamic(max_types=N) | 0x2B | +| Custom type (Ring, Polygon, etc) | 0x2C | +| Bool | 0x2D | +| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E...... | +| Nested(name1 T1, ..., nameN TN) | 0x2F... | +| JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp) | 0x30......... | +|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| Interval kind binary encoding: |---------------|-----------------| diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 6cc7a7e2e74..c1ef67b2aaa 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -338,6 +338,8 @@ void SerializationDynamic::serializeBinary(const IColumn & column, size_t row_nu } const auto & variant_type = assert_cast(*variant_info.variant_type).getVariant(global_discr); + if (!variant_type) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Nullptr type in variant {}", variant_info.variant_name); encodeDataType(variant_type, ostr); variant_type->getDefaultSerialization()->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings); } diff --git a/src/DataTypes/Serializations/SerializationJSON.cpp b/src/DataTypes/Serializations/SerializationJSON.cpp index 51b12568981..346e6785abc 100644 --- a/src/DataTypes/Serializations/SerializationJSON.cpp +++ b/src/DataTypes/Serializations/SerializationJSON.cpp @@ -22,10 +22,9 @@ template SerializationJSON::SerializationJSON( std::unordered_map typed_paths_serializations_, const std::unordered_set & paths_to_skip_, - const std::vector & path_prefixes_to_skip_, const std::vector & path_regexps_to_skip_, std::unique_ptr> json_extract_tree_) - : SerializationObject(std::move(typed_paths_serializations_), paths_to_skip_, path_prefixes_to_skip_, path_regexps_to_skip_) + : SerializationObject(std::move(typed_paths_serializations_), paths_to_skip_, path_regexps_to_skip_) , json_extract_tree(std::move(json_extract_tree_)) { } diff --git a/src/DataTypes/Serializations/SerializationJSON.h b/src/DataTypes/Serializations/SerializationJSON.h index 0c6d2bd164b..aee1413bdd3 100644 --- a/src/DataTypes/Serializations/SerializationJSON.h +++ b/src/DataTypes/Serializations/SerializationJSON.h @@ -15,7 +15,6 @@ public: SerializationJSON( std::unordered_map typed_paths_serializations_, const std::unordered_set & paths_to_skip_, - const std::vector & path_prefixes_to_skip_, const std::vector & path_regexps_to_skip_, std::unique_ptr> json_extract_tree_); diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 8e6f6f105ea..0fa1ef7d2ca 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -19,11 +19,9 @@ namespace ErrorCodes SerializationObject::SerializationObject( std::unordered_map typed_path_serializations_, const std::unordered_set & paths_to_skip_, - const std::vector & path_prefixes_to_skip_, const std::vector & path_regexps_to_skip_) : typed_path_serializations(std::move(typed_path_serializations_)) , paths_to_skip(paths_to_skip_) - , path_prefixes_to_skip(path_prefixes_to_skip_) , dynamic_serialization(std::make_shared()) , shared_data_serialization(getTypeOfSharedData()->getDefaultSerialization()) { @@ -32,6 +30,8 @@ SerializationObject::SerializationObject( for (const auto & [path, _] : typed_path_serializations) sorted_typed_paths.emplace_back(path); std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end()); + sorted_paths_to_skip.assign(paths_to_skip.begin(), paths_to_skip.end()); + std::sort(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end()); for (const auto & regexp_str : path_regexps_to_skip_) path_regexps_to_skip.emplace_back(regexp_str); } @@ -48,11 +48,9 @@ bool SerializationObject::shouldSkipPath(const String & path) const if (paths_to_skip.contains(path)) return true; - for (const auto & prefix : path_prefixes_to_skip) - { - if (path.starts_with(prefix)) - return true; - } + auto it = std::lower_bound(sorted_typed_paths.begin(), sorted_typed_paths.end(), path); + if (it != sorted_paths_to_skip.end() && it != sorted_paths_to_skip.begin() && path.starts_with(*std::prev(it))) + return true; for (const auto & regexp : path_regexps_to_skip) { diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index 8702b0c7f64..b279fab2603 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -32,7 +32,6 @@ public: SerializationObject( std::unordered_map typed_path_serializations_, const std::unordered_set & paths_to_skip_, - const std::vector & path_prefixes_to_skip_, const std::vector & path_regexps_to_skip_); void enumerateStreams( @@ -114,7 +113,7 @@ protected: std::unordered_map typed_path_serializations; std::unordered_set paths_to_skip; - std::vector path_prefixes_to_skip; + std::vector sorted_paths_to_skip; std::list path_regexps_to_skip; SerializationPtr dynamic_serialization; diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index e68e98a6a06..2eaddddb567 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1386,10 +1386,13 @@ public: if (column_dynamic.addNewVariant(element_type)) { - auto node = buildJSONExtractTree(element_type, "Dynamic inference"); - auto global_discriminator = variant_info.variant_name_to_discriminator.at(element_type->getName()); + auto element_type_name = element_type->getName(); + auto it = json_extract_nodes_cache.find(element_type_name); + if (it == json_extract_nodes_cache.end()) + it = json_extract_nodes_cache.emplace(element_type_name, buildJSONExtractTree(element_type, "Dynamic inference")).first; + auto global_discriminator = variant_info.variant_name_to_discriminator.at(element_type_name); auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discriminator); - if (!node->insertResultToColumn(variant, element, insert_settings, format_settings, error)) + if (!it->second->insertResultToColumn(variant, element, insert_settings, format_settings, error)) return false; variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(global_discriminator)); variant_column.getOffsets().push_back(variant.size() - 1); @@ -1397,8 +1400,10 @@ public: } /// We couldn't add new variant. Try to insert element into current variants. - auto variant_node = buildJSONExtractTree(variant_info.variant_type, "Dynamic inference"); - if (variant_node->insertResultToColumn(variant_column, element, insert_settings, format_settings, error)) + auto it = json_extract_nodes_cache.find(variant_info.variant_name); + if (it == json_extract_nodes_cache.end()) + it = json_extract_nodes_cache.emplace(variant_info.variant_name, buildJSONExtractTree(variant_info.variant_type, "Dynamic inference")).first; + if (it->second->insertResultToColumn(variant_column, element, insert_settings, format_settings, error)) return true; /// We couldn't insert element into any existing variant, add String variant and read value as String. @@ -1424,7 +1429,7 @@ public: JSONInferenceInfo json_inference_info; auto type = elementToDataTypeImpl(element, format_settings, json_inference_info); transformFinalInferredJSONTypeIfNeeded(type, format_settings, &json_inference_info); - if (format_settings.schema_inference_make_columns_nullable) + if (format_settings.schema_inference_make_columns_nullable && type->haveSubtypes()) type = makeNullableRecursively(type); return type; } @@ -1435,20 +1440,20 @@ private: switch (element.type()) { case ElementType::NULL_VALUE: - return makeNullable(std::make_shared()); + return getNullType(); case ElementType::BOOL: - return DataTypeFactory::instance().get("Bool"); + return getBoolType(); case ElementType::INT64: { - auto type = std::make_shared(); + auto type = getInt64Type(); if (element.getInt64() < 0) json_inference_info.negative_integers.insert(type.get()); return type; } case ElementType::UINT64: - return std::make_shared(); + return getUInt64Type(); case ElementType::DOUBLE: - return std::make_shared(); + return getFloat64Type(); case ElementType::STRING: { auto data = element.getString(); @@ -1465,7 +1470,7 @@ private: } } - return std::make_shared(); + return getStringType(); } case ElementType::ARRAY: { @@ -1476,7 +1481,7 @@ private: types.push_back(elementToDataTypeImpl(value, format_settings, json_inference_info)); if (types.empty()) - return std::make_shared(std::make_shared()); + return getEmptyArrayType(); if (checkIfTypesAreEqual(types)) return std::make_shared(types.back()); @@ -1505,13 +1510,67 @@ private: } case ElementType::OBJECT: { - return std::make_shared(DataTypeObject::SchemaFormat::JSON, max_dynamic_paths_for_object, max_dynamic_types_for_object); + return getObjectType(); } } } + /// During schema inference we create shared_ptr to the some data types quite a lot. + /// Single creating of such shared_ptr is not expensive, but when it happens on each + /// column on each row, it can be noticeable. + const DataTypePtr & getBoolType() const + { + static const DataTypePtr bool_type = DataTypeFactory::instance().get("Bool"); + return bool_type; + } + + const DataTypePtr & getStringType() const + { + static const DataTypePtr string_type = std::make_shared(); + return string_type; + } + + const DataTypePtr & getInt64Type() const + { + static const DataTypePtr int64_type = std::make_shared(); + return int64_type; + } + + const DataTypePtr & getUInt64Type() const + { + static const DataTypePtr uint64_type = std::make_shared(); + return uint64_type; + } + + const DataTypePtr & getFloat64Type() const + { + static const DataTypePtr float64_type = std::make_shared(); + return float64_type; + } + + const DataTypePtr & getObjectType() const + { + static const DataTypePtr object_type = std::make_shared(DataTypeObject::SchemaFormat::JSON, max_dynamic_paths_for_object, max_dynamic_types_for_object); + return object_type; + } + + const DataTypePtr & getNullType() const + { + static const DataTypePtr null_type = std::make_shared(std::make_shared()); + return null_type; + } + + const DataTypePtr & getEmptyArrayType() const + { + static const DataTypePtr empty_array_type = std::make_shared(std::make_shared()); + return empty_array_type; + } + size_t max_dynamic_paths_for_object; size_t max_dynamic_types_for_object; + + /// Avoid building JSONExtractTreeNode for the same data types on each row by using cache. + mutable std::unordered_map>> json_extract_nodes_cache; }; template @@ -1521,18 +1580,18 @@ public: ObjectJSONNode( std::unordered_map>> typed_path_nodes_, const std::unordered_set & paths_to_skip_, - const std::vector & path_prefixes_to_skip_, const std::vector & path_regexps_to_skip_, size_t max_dynamic_paths_, size_t max_dynamic_types) : typed_path_nodes(std::move(typed_path_nodes_)) , paths_to_skip(paths_to_skip_) - , path_prefixes_to_skip(path_prefixes_to_skip_) , dynamic_node(std::make_unique>( max_dynamic_paths_ / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, std::max(max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))) , dynamic_serialization(std::make_shared()) { + sorted_paths_to_skip.assign(paths_to_skip.begin(), paths_to_skip.end()); + std::sort(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end()); for (const auto & regexp : path_regexps_to_skip_) path_regexps_to_skip.emplace_back(regexp); } @@ -1616,6 +1675,9 @@ private: size_t current_size, String & error) const { + if (shouldSkipPath(current_path)) + return true; + if (element.isObject() && !typed_path_nodes.contains(current_path)) { for (auto [key, value] : element.getObject()) @@ -1631,9 +1693,6 @@ private: return true; } - if (shouldSkipPath(current_path)) - return true; - auto & typed_paths = column_object.getTypedPaths(); auto & dynamic_paths = column_object.getDynamicPaths(); /// Check if we have this path in typed paths. @@ -1707,11 +1766,9 @@ private: if (paths_to_skip.contains(path)) return true; - for (const auto & prefix : path_prefixes_to_skip) - { - if (path.starts_with(prefix)) - return true; - } + auto it = std::lower_bound(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end(), path); + if (it != sorted_paths_to_skip.end() && it != sorted_paths_to_skip.begin() && path.starts_with(*std::prev(it))) + return true; for (const auto & regexp : path_regexps_to_skip) { @@ -1724,7 +1781,7 @@ private: std::unordered_map>> typed_path_nodes; std::unordered_set paths_to_skip; - std::vector path_prefixes_to_skip; + std::vector sorted_paths_to_skip; std::list path_regexps_to_skip; std::unique_ptr> dynamic_node; std::shared_ptr dynamic_serialization; @@ -1889,7 +1946,6 @@ std::unique_ptr> buildJSONExtractTree(const Data return std::make_unique>( std::move(typed_path_nodes), object_type.getPathsToSkip(), - object_type.getPathPrefixesToSkip(), object_type.getPathRegexpsToSkip(), object_type.getMaxDynamicPaths(), object_type.getMaxDynamicTypes()); diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index 15dc75c0f3a..d245200eefa 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -36,6 +36,63 @@ namespace ErrorCodes namespace { + /// During schema inference we create shared_ptr to the some data types quite a lot. + /// Single creating of such shared_ptr is not expensive, but when it happens on each + /// column on each row, it can be noticeable. + const DataTypePtr & getBoolType() + { + static const DataTypePtr bool_type = DataTypeFactory::instance().get("Bool"); + return bool_type; + } + + const DataTypePtr & getStringType() + { + static const DataTypePtr string_type = std::make_shared(); + return string_type; + } + + const DataTypePtr & getInt64Type() + { + static const DataTypePtr int64_type = std::make_shared(); + return int64_type; + } + + const DataTypePtr & getUInt64Type() + { + static const DataTypePtr uint64_type = std::make_shared(); + return uint64_type; + } + + const DataTypePtr & getFloat64Type() + { + static const DataTypePtr float64_type = std::make_shared(); + return float64_type; + } + + const DataTypePtr & getDateType() + { + static const DataTypePtr date_type = std::make_shared(); + return date_type; + } + + const DataTypePtr & getDateTime64Type() + { + static const DataTypePtr date_type = std::make_shared(9); + return date_type; + } + + const DataTypePtr & getNullType() + { + static const DataTypePtr null_type = std::make_shared(std::make_shared()); + return null_type; + } + + const DataTypePtr & getEmptyArrayType() + { + static const DataTypePtr empty_array_type = std::make_shared(std::make_shared()); + return empty_array_type; + } + /// Special data type that represents JSON object as a set of paths and their types. /// It supports merging two JSON objects and creating Named Tuple from itself. /// It's used only for schema inference of Named Tuples from JSON objects. @@ -208,7 +265,7 @@ namespace if (leaf_type && !isNothing(removeNullable(leaf_type)) && !nodes.empty()) { if (use_string_type_for_ambiguous_paths) - return std::make_shared(); + return getStringType(); throw Exception( ErrorCodes::INCORRECT_DATA, @@ -274,7 +331,7 @@ namespace bool is_negative = json_info && json_info->negative_integers.contains(type.get()); have_negative_integers |= is_negative; if (!is_negative) - type = std::make_shared(); + type = getUInt64Type(); } } @@ -295,7 +352,7 @@ namespace WhichDataType which(type); if (which.isInt64() || which.isUInt64()) { - auto new_type = std::make_shared(); + auto new_type = getFloat64Type(); if (json_info && json_info->numbers_parsed_from_json_strings.erase(type.get())) json_info->numbers_parsed_from_json_strings.insert(new_type.get()); type = new_type; @@ -319,7 +376,7 @@ namespace for (auto & type : data_types) { if (isDate(type) || isDateTime64(type)) - type = std::make_shared(); + type = getStringType(); } type_indexes.erase(TypeIndex::Date); @@ -333,7 +390,7 @@ namespace for (auto & type : data_types) { if (isDate(type)) - type = std::make_shared(9); + type = getDateTime64Type(); } type_indexes.erase(TypeIndex::Date); @@ -355,7 +412,7 @@ namespace if (isNumber(type) && (settings.json.read_numbers_as_strings || !json_info || json_info->numbers_parsed_from_json_strings.contains(type.get()))) - type = std::make_shared(); + type = getStringType(); } updateTypeIndexes(data_types, type_indexes); @@ -378,11 +435,11 @@ namespace if (isBool(type)) { if (have_signed_integers) - type = std::make_shared(); + type = getInt64Type(); else if (have_unsigned_integers) - type = std::make_shared(); + type = getUInt64Type(); else - type = std::make_shared(); + type = getFloat64Type(); } } @@ -399,7 +456,7 @@ namespace for (auto & type : data_types) { if (isBool(type)) - type = std::make_shared(); + type = getStringType(); } type_indexes.erase(TypeIndex::UInt8); @@ -549,7 +606,7 @@ namespace for (auto & type : data_types) { if (isMap(type)) - type = std::make_shared(); + type = getStringType(); } type_indexes.erase(TypeIndex::Map); @@ -697,37 +754,37 @@ namespace bool tryInferDate(std::string_view field) { - if (field.empty()) + /// Minimum length of Date text representation is 8 (YYYY-M-D) and maximum is 10 (YYYY-MM-DD) + if (field.size() < 8 || field.size() > 10) return false; - ReadBufferFromString buf(field); - Float64 tmp_float; /// Check if it's just a number, and if so, don't try to infer Date from it, /// because we can interpret this number as a Date (for example 20000101 will be 2000-01-01) /// and it will lead to inferring Date instead of simple Int64/UInt64 in some cases. - if (tryReadFloatText(tmp_float, buf) && buf.eof()) + if (std::all_of(field.begin(), field.end(), isNumericASCII)) return false; - buf.seek(0, SEEK_SET); /// Return position to the beginning - + ReadBufferFromString buf(field); DayNum tmp; return tryReadDateText(tmp, buf) && buf.eof(); } bool tryInferDateTime(std::string_view field, const FormatSettings & settings) { - if (field.empty()) + /// Don't try to infer DateTime if string is too long. + /// It's difficult to say what is the real maximum length of + /// DateTime we can parse using BestEffort approach. + /// 40 symbols is more or less valid limit. + if (field.empty() || field.size() > 40) return false; - ReadBufferFromString buf(field); - Float64 tmp_float; /// Check if it's just a number, and if so, don't try to infer DateTime from it, /// because we can interpret this number as a timestamp and it will lead to /// inferring DateTime instead of simple Int64/Float64 in some cases. - if (tryReadFloatText(tmp_float, buf) && buf.eof()) + if (std::all_of(field.begin(), field.end(), isNumericASCII)) return false; - buf.seek(0, SEEK_SET); /// Return position to the beginning + ReadBufferFromString buf(field); DateTime64 tmp; switch (settings.date_time_input_format) { @@ -792,7 +849,7 @@ namespace /// Empty array has type Array(Nothing) if (nested_types.empty()) - return std::make_shared(std::make_shared()); + return getEmptyArrayType(); if (checkIfTypesAreEqual(nested_types)) return std::make_shared(std::move(nested_types.back())); @@ -905,13 +962,13 @@ namespace /// NOTE: it may break parsing of tryReadFloat() != tryReadIntText() + parsing of '.'/'e' /// But, for now it is true if (tryReadFloat(tmp_float, buf, settings, has_fractional) && has_fractional) - return std::make_shared(); + return getFloat64Type(); Int64 tmp_int; buf.position() = number_start; if (tryReadIntText(tmp_int, buf)) { - auto type = std::make_shared(); + auto type = getInt64Type(); if (json_info && tmp_int < 0) json_info->negative_integers.insert(type.get()); return type; @@ -921,7 +978,7 @@ namespace UInt64 tmp_uint; buf.position() = number_start; if (tryReadIntText(tmp_uint, buf)) - return std::make_shared(); + return getUInt64Type(); return nullptr; } @@ -933,13 +990,13 @@ namespace PeekableReadBufferCheckpoint checkpoint(peekable_buf); if (tryReadFloat(tmp_float, peekable_buf, settings, has_fractional) && has_fractional) - return std::make_shared(); + return getFloat64Type(); peekable_buf.rollbackToCheckpoint(/* drop= */ false); Int64 tmp_int; if (tryReadIntText(tmp_int, peekable_buf)) { - auto type = std::make_shared(); + auto type = getInt64Type(); if (json_info && tmp_int < 0) json_info->negative_integers.insert(type.get()); return type; @@ -949,11 +1006,11 @@ namespace /// In case of Int64 overflow we can try to infer UInt64. UInt64 tmp_uint; if (tryReadIntText(tmp_uint, peekable_buf)) - return std::make_shared(); + return getUInt64Type(); } else if (tryReadFloat(tmp_float, buf, settings, has_fractional)) { - return std::make_shared(); + return getFloat64Type(); } /// This is not a number. @@ -970,7 +1027,7 @@ namespace Int64 tmp_int; if (tryReadIntText(tmp_int, buf) && buf.eof()) { - auto type = std::make_shared(); + auto type = getInt64Type(); if (json_inference_info && tmp_int < 0) json_inference_info->negative_integers.insert(type.get()); return type; @@ -982,7 +1039,7 @@ namespace /// In case of Int64 overflow, try to infer UInt64 UInt64 tmp_uint; if (tryReadIntText(tmp_uint, buf) && buf.eof()) - return std::make_shared(); + return getUInt64Type(); } /// We can safely get back to the start of buffer, because we read from a string and we didn't reach eof. @@ -991,7 +1048,7 @@ namespace Float64 tmp; bool has_fractional; if (tryReadFloat(tmp, buf, settings, has_fractional) && buf.eof()) - return std::make_shared(); + return getFloat64Type(); return nullptr; } @@ -1015,7 +1072,7 @@ namespace if constexpr (is_json) { if (json_info->is_object_key) - return std::make_shared(); + return getStringType(); } if (auto type = tryInferDateOrDateTimeFromString(field, settings)) @@ -1033,7 +1090,7 @@ namespace } } - return std::make_shared(); + return getStringType(); } bool tryReadJSONObject(ReadBuffer & buf, const FormatSettings & settings, DataTypeJSONPaths::Paths & paths, const std::vector & path, JSONInferenceInfo * json_info, size_t depth) @@ -1190,7 +1247,7 @@ namespace return std::make_shared("json", true); if (settings.json.read_objects_as_strings) - return std::make_shared(); + return getStringType(); transformInferredTypesIfNeededImpl(value_types, settings, json_info); if (!checkIfTypesAreEqual(value_types)) @@ -1256,15 +1313,15 @@ namespace /// Bool if (checkStringCaseInsensitive("true", buf) || checkStringCaseInsensitive("false", buf)) - return DataTypeFactory::instance().get("Bool"); + return getBoolType(); /// Null or NaN if (checkCharCaseInsensitive('n', buf)) { if (checkStringCaseInsensitive("ull", buf)) - return makeNullable(std::make_shared()); + return getNullType(); else if (checkStringCaseInsensitive("an", buf)) - return std::make_shared(); + return getFloat64Type(); } /// Number @@ -1321,7 +1378,7 @@ void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const F if (!remain_nothing_types && isNothing(data_type) && settings.json.infer_incomplete_types_as_strings) { - data_type = std::make_shared(); + data_type = getStringType(); return; } @@ -1338,7 +1395,7 @@ void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const F /// If all objects were empty, use type String, so these JSON objects will be read as Strings. if (json_paths->empty() && settings.json.infer_incomplete_types_as_strings) { - data_type = std::make_shared(); + data_type = getStringType(); return; } @@ -1360,7 +1417,7 @@ void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const F auto key_type = map_type->getKeyType(); /// If all inferred Maps are empty, use type String, so these JSON objects will be read as Strings. if (isNothing(key_type) && settings.json.infer_incomplete_types_as_strings) - key_type = std::make_shared(); + key_type = getStringType(); auto value_type = map_type->getValueType(); @@ -1437,10 +1494,10 @@ DataTypePtr tryInferJSONNumberFromString(std::string_view field, const FormatSet DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const FormatSettings & settings) { if (settings.try_infer_dates && tryInferDate(field)) - return std::make_shared(); + return getDateType(); if (settings.try_infer_datetimes && tryInferDateTime(field, settings)) - return std::make_shared(9); + return getDateTime64Type(); return nullptr; } diff --git a/src/Parsers/ASTObjectTypeArgument.cpp b/src/Parsers/ASTObjectTypeArgument.cpp index afe875586ad..aa5d23d7881 100644 --- a/src/Parsers/ASTObjectTypeArgument.cpp +++ b/src/Parsers/ASTObjectTypeArgument.cpp @@ -21,11 +21,6 @@ ASTPtr ASTObjectTypeArgument::clone() const res->skip_path = skip_path->clone(); res->children.push_back(res->skip_path); } - else if (skip_path_prefix) - { - res->skip_path_prefix = skip_path_prefix->clone(); - res->children.push_back(res->skip_path_prefix); - } else if (skip_path_regexp) { res->skip_path_regexp = skip_path_regexp->clone(); @@ -56,12 +51,6 @@ void ASTObjectTypeArgument::formatImpl(const FormatSettings & parameters, Format parameters.ostr << indent_str << "SKIP" << ' '; skip_path->formatImpl(parameters, state, frame); } - else if (skip_path_prefix) - { - std::string indent_str = parameters.one_line ? "" : std::string(4 * frame.indent, ' '); - parameters.ostr << indent_str << "SKIP PREFIX" << ' '; - skip_path_prefix->formatImpl(parameters, state, frame); - } else if (skip_path_regexp) { std::string indent_str = parameters.one_line ? "" : std::string(4 * frame.indent, ' '); diff --git a/src/Parsers/ASTObjectTypeArgument.h b/src/Parsers/ASTObjectTypeArgument.h index 3a6b2bcdd98..ab18d00d770 100644 --- a/src/Parsers/ASTObjectTypeArgument.h +++ b/src/Parsers/ASTObjectTypeArgument.h @@ -9,7 +9,6 @@ namespace DB /** An argument of Object data type declaration (for example for JSON). Can contain one of: * - pair (path, data type) * - path that should be skipped - * - path prefix for paths that should be skipped * - path regexp for paths that should be skipped * - setting in a form of `setting=N` */ @@ -18,7 +17,6 @@ class ASTObjectTypeArgument : public IAST public: ASTPtr path_with_type; ASTPtr skip_path; - ASTPtr skip_path_prefix; ASTPtr skip_path_regexp; ASTPtr parameter; diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index f4793c7fc27..0b40ace3ca8 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -69,25 +69,16 @@ private: argument->skip_path_regexp = literal; argument->children.push_back(argument->skip_path_regexp); } - /// SKIP PREFIX some.path.prefix or SKIP some.path + /// SKIP some.path else { - bool is_prefix = ParserKeyword(Keyword::PREFIX).ignore(pos); ParserCompoundIdentifier compound_identifier_parser; ASTPtr compound_identifier; if (!compound_identifier_parser.parse(pos, compound_identifier, expected)) return false; - if (is_prefix) - { - argument->skip_path_prefix = compound_identifier; - argument->children.push_back(argument->skip_path_prefix); - } - else - { - argument->skip_path = compound_identifier; - argument->children.push_back(argument->skip_path); - } + argument->skip_path = compound_identifier; + argument->children.push_back(argument->skip_path); } node = argument; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 27c52124e9c..eb251c75499 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -283,6 +283,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes)) node_copy.children.push_back(child_copy); + if (node_copy.children.empty()) return nullptr; From 4df94a0ef3f8af73328d0a8f45bb217cc70b2e45 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 15 Jul 2024 14:47:52 +0000 Subject: [PATCH 0384/2361] cleanup for setting in mergetree --- src/Core/Settings.h | 1 - src/Core/SettingsChangesHistory.cpp | 1 - src/Interpreters/InterpreterDeleteQuery.cpp | 66 +++++-------------- src/Storages/IStorage.h | 3 - .../MergeTree/MergeMutateSelectedEntry.h | 7 +- src/Storages/MergeTree/MergeTreeData.cpp | 15 ----- src/Storages/MergeTree/MergeTreeData.h | 2 - .../MergeTree/MergeTreeMutationEntry.cpp | 4 +- .../MergeTree/MergeTreeMutationEntry.h | 7 +- src/Storages/MergeTree/MergeTreeSettings.h | 1 + .../MergeTree/MutatePlainMergeTreeTask.cpp | 1 - src/Storages/MergeTree/MutateTask.cpp | 10 --- src/Storages/StorageMergeTree.cpp | 10 +-- 13 files changed, 25 insertions(+), 103 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index f7b44ea775c..bafc3f93846 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -588,7 +588,6 @@ class IColumn; M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \ M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) ALIAS(allow_experimental_lightweight_delete) \ M(UInt64, lightweight_deletes_sync, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes", 0) \ - M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts.", 0) \ M(Bool, apply_deleted_mask, true, "Enables filtering out rows deleted with lightweight DELETE. If disabled, a query will be able to read those rows. This is useful for debugging and \"undelete\" scenarios", 0) \ M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \ M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 194292a467e..d6cc0112e0a 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -70,7 +70,6 @@ static std::initializer_listlockForShare(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); auto metadata_snapshot = table->getInMemoryMetadataPtr(); - bool hasProjection = table->hasProjection(); - auto lightweightDelete = [&]() + if (table->supportsDelete()) + { + /// Convert to MutationCommand + MutationCommands mutation_commands; + MutationCommand mut_command; + + mut_command.type = MutationCommand::Type::DELETE; + mut_command.predicate = delete_query.predicate; + + mutation_commands.emplace_back(mut_command); + + table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); + MutationsInterpreter::Settings settings(false); + MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), settings).validate(); + table->mutate(mutation_commands, getContext()); + return {}; + } + else if (table->supportsLightweightDelete()) { if (!getContext()->getSettingsRef().enable_lightweight_delete) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, @@ -89,54 +104,9 @@ BlockIO InterpreterDeleteQuery::execute() context->setSetting("mutations_sync", Field(context->getSettingsRef().lightweight_deletes_sync)); InterpreterAlterQuery alter_interpreter(alter_ast, context); return alter_interpreter.execute(); - }; - - if (table->supportsDelete()) - { - /// Convert to MutationCommand - MutationCommands mutation_commands; - MutationCommand mut_command; - - mut_command.type = MutationCommand::Type::DELETE; - mut_command.predicate = delete_query.predicate; - - mutation_commands.emplace_back(mut_command); - - table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); - MutationsInterpreter::Settings settings(false); - MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), settings).validate(); - table->mutate(mutation_commands, getContext()); - return {}; - } - else if (!hasProjection && table->supportsLightweightDelete()) - { - return lightweightDelete(); } else { - if (hasProjection) - { - auto context = Context::createCopy(getContext()); - auto mode = context->getSettingsRef().lightweight_mutation_projection_mode; - - if (mode == LightweightMutationProjectionMode::THROW) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DELETE query is not supported for table {} as it has projections. " - "User should drop all the projections manually before running the query", - table->getStorageID().getFullTableName()); - } - else if (mode == LightweightMutationProjectionMode::DROP) - { - return lightweightDelete(); - } - else - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Unrecognized lightweight_mutation_projection_mode, only throw and drop are allowed."); - } - } - throw Exception(ErrorCodes::BAD_ARGUMENTS, "DELETE query is not supported for table {}", table->getStorageID().getFullTableName()); diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 6217470780d..991c8ff64af 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -262,9 +262,6 @@ public: /// Return true if storage can execute lightweight delete mutations. virtual bool supportsLightweightDelete() const { return false; } - /// Return true if storage has any projection. - virtual bool hasProjection() const { return false; } - /// Return true if storage can execute 'DELETE FROM' mutations. This is different from lightweight delete /// because those are internally translated into 'ALTER UDPATE' mutations. virtual bool supportsDelete() const { return false; } diff --git a/src/Storages/MergeTree/MergeMutateSelectedEntry.h b/src/Storages/MergeTree/MergeMutateSelectedEntry.h index f75d10d9ecb..e7efe00741c 100644 --- a/src/Storages/MergeTree/MergeMutateSelectedEntry.h +++ b/src/Storages/MergeTree/MergeMutateSelectedEntry.h @@ -2,7 +2,7 @@ #include #include -#include + namespace DB { @@ -41,15 +41,12 @@ struct MergeMutateSelectedEntry CurrentlyMergingPartsTaggerPtr tagger; MutationCommandsConstPtr commands; MergeTreeTransactionPtr txn; - LightweightMutationProjectionMode lightweight_delete_projection_mode; MergeMutateSelectedEntry(FutureMergedMutatedPartPtr future_part_, CurrentlyMergingPartsTaggerPtr tagger_, - MutationCommandsConstPtr commands_, const MergeTreeTransactionPtr & txn_ = NO_TRANSACTION_PTR, - const LightweightMutationProjectionMode & lightweight_delete_projection_mode_ = LightweightMutationProjectionMode::THROW) + MutationCommandsConstPtr commands_, const MergeTreeTransactionPtr & txn_ = NO_TRANSACTION_PTR) : future_part(future_part_) , tagger(std::move(tagger_)) , commands(commands_) , txn(txn_) - , lightweight_delete_projection_mode(lightweight_delete_projection_mode_) {} }; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index e31f6db5409..5182147350e 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -6158,21 +6158,6 @@ bool MergeTreeData::supportsLightweightDelete() const return true; } -bool MergeTreeData::hasProjection() const -{ - auto lock = lockParts(); - for (const auto & part : data_parts_by_info) - { - if (part->getState() == MergeTreeDataPartState::Outdated - || part->getState() == MergeTreeDataPartState::Deleting) - continue; - - if (part->hasProjection()) - return true; - } - return false; -} - MergeTreeData::ProjectionPartsVector MergeTreeData::getAllProjectionPartsVector(MergeTreeData::DataPartStateVector * out_states) const { ProjectionPartsVector res; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index c8b721038c6..7d216f989c1 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -439,8 +439,6 @@ public: bool supportsLightweightDelete() const override; - bool hasProjection() const override; - bool areAsynchronousInsertsEnabled() const override { return getSettings()->async_insert; } bool supportsTrivialCountOptimization(const StorageSnapshotPtr &, ContextPtr) const override; diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp index d1bd8efa7a5..4dbccb91620 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp @@ -48,8 +48,7 @@ UInt64 MergeTreeMutationEntry::parseFileName(const String & file_name_) } MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk_, const String & path_prefix_, UInt64 tmp_number, - const TransactionID & tid_, const WriteSettings & settings, - const LightweightMutationProjectionMode & lightweight_delete_projection_mode_) + const TransactionID & tid_, const WriteSettings & settings) : create_time(time(nullptr)) , commands(std::move(commands_)) , disk(std::move(disk_)) @@ -57,7 +56,6 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskP , file_name("tmp_mutation_" + toString(tmp_number) + ".txt") , is_temp(true) , tid(tid_) - , lightweight_delete_projection_mode(lightweight_delete_projection_mode_) { try { diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.h b/src/Storages/MergeTree/MergeTreeMutationEntry.h index dbb17654ddd..04297f2852a 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.h +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.h @@ -5,7 +5,6 @@ #include #include #include -#include namespace DB @@ -37,13 +36,9 @@ struct MergeTreeMutationEntry /// or UnknownCSN if it's not committed (yet) or RolledBackCSN if it's rolled back or PrehistoricCSN if there is no transaction. CSN csn = Tx::UnknownCSN; - /// From query context. - LightweightMutationProjectionMode lightweight_delete_projection_mode; - /// Create a new entry and write it to a temporary file. MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk, const String & path_prefix_, UInt64 tmp_number, - const TransactionID & tid_, const WriteSettings & settings, - const LightweightMutationProjectionMode & lightweight_delete_projection_mode_); + const TransactionID & tid_, const WriteSettings & settings); MergeTreeMutationEntry(const MergeTreeMutationEntry &) = delete; MergeTreeMutationEntry(MergeTreeMutationEntry &&) = default; diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index c0afd781c7e..a458a21ca1b 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -215,6 +215,7 @@ struct Settings; M(Float, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns, 0.9f, "If the value of a column of the primary key in data part changes at least in this ratio of times, skip loading next columns in memory. This allows to save memory usage by not loading useless columns of the primary key.", 0) \ /** Projection settings. */ \ M(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ + M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts.", 0) \ #define MAKE_OBSOLETE_MERGE_TREE_SETTING(M, TYPE, NAME, DEFAULT) \ M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE) diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 19aa63d90a2..20f387137e7 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -140,7 +140,6 @@ ContextMutablePtr MutatePlainMergeTreeTask::createTaskContext() const auto queryId = getQueryId(); context->setCurrentQueryId(queryId); context->setBackgroundOperationTypeForContext(ClientInfo::BackgroundOperationType::MUTATION); - // context->setSetting("lightweight_mutation_projection_mode", merge_mutate_entry->lightweight_delete_projection_mode); return context; } diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index ed603abd9c3..0734174dbef 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -2200,7 +2200,6 @@ bool MutateTask::prepare() context_for_reading->setSetting("allow_asynchronous_read_from_io_pool_for_merge_tree", false); context_for_reading->setSetting("max_streams_for_merge_tree_reading", Field(0)); context_for_reading->setSetting("read_from_filesystem_cache_if_exists_otherwise_bypass_cache", 1); - context_for_reading->setSetting("lightweight_mutation_projection_mode", Field(ctx->context->getSettingsRef().lightweight_mutation_projection_mode)); MutationHelpers::splitAndModifyMutationCommands( ctx->source_part, ctx->metadata_snapshot, @@ -2225,15 +2224,6 @@ bool MutateTask::prepare() ctx->mutating_pipeline_builder = ctx->interpreter->execute(); ctx->updated_header = ctx->interpreter->getUpdatedHeader(); ctx->progress_callback = MergeProgressCallback((*ctx->mutate_entry)->ptr(), ctx->watch_prev_elapsed, *ctx->stage_progress); - - // ctx->updated_header.has(RowExistsColumn::name); - // for (const auto & projection : ctx->metadata_snapshot->getProjections()) - // { - // if (!ctx->source_part->hasProjection(projection.name)) - // continue; - - // ctx->materialized_projections.insert(projection.name); - // } } auto single_disk_volume = std::make_shared("volume_" + ctx->future_part->name, ctx->space_reservation->getDisk(), 0); diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 7f210779916..8404e5c9cd9 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -517,8 +517,7 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context { std::lock_guard lock(currently_processing_in_background_mutex); - MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings(), - query_context->getSettingsRef().lightweight_mutation_projection_mode); + MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings()); version = increment.get(); entry.commit(version); String mutation_id = entry.file_name; @@ -1284,17 +1283,12 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( size_t current_ast_elements = 0; auto last_mutation_to_apply = mutations_end_it; - /// Trying to grab it from query context. - LightweightMutationProjectionMode lightweight_delete_projection_mode = LightweightMutationProjectionMode::THROW; - for (auto it = mutations_begin_it; it != mutations_end_it; ++it) { /// Do not squash mutations from different transactions to be able to commit/rollback them independently. if (first_mutation_tid != it->second.tid) break; - lightweight_delete_projection_mode = it->second.lightweight_delete_projection_mode; - size_t commands_size = 0; MutationCommands commands_for_size_validation; for (const auto & command : it->second.commands) @@ -1371,7 +1365,7 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( future_part->part_format = part->getFormat(); tagger = std::make_unique(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace({part}, false), *this, metadata_snapshot, true); - return std::make_shared(future_part, std::move(tagger), commands, txn, lightweight_delete_projection_mode); + return std::make_shared(future_part, std::move(tagger), commands, txn); } } From 22b37d526bacfa281372211a81a3daf1518ca5d6 Mon Sep 17 00:00:00 2001 From: Blargian Date: Mon, 15 Jul 2024 17:00:47 +0200 Subject: [PATCH 0385/2361] update joingGet and add joinGetOrNull --- .../functions/other-functions.md | 138 +++++++++++++++--- 1 file changed, 121 insertions(+), 17 deletions(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 260457b3be1..12d082fe0f3 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -2449,11 +2449,11 @@ As you can see, `runningAccumulate` merges states for each group of rows separat ## joinGet -The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md). - -Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key. +The function lets you extract data from the table the same way as from a [dictionary](../../sql-reference/dictionaries/index.md). Gets the data from [Join](../../engines/table-engines/special/join.md#creating-a-table) tables using the specified join key. +:::note Only supports tables created with the `ENGINE = Join(ANY, LEFT, )` statement. +::: **Syntax** @@ -2463,26 +2463,32 @@ joinGet(join_storage_table_name, `value_column`, join_keys) **Arguments** -- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed. The identifier is searched in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example. +- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed. - `value_column` — name of the column of the table that contains required data. - `join_keys` — list of keys. +:::note +The identifier is searched for in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example. +::: + **Returned value** -Returns a list of values corresponded to list of keys. - -If certain does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting. +- Returns a list of values corresponded to the list of keys. +:::note +If a certain key does not exist in source table then `0` or `null` will be returned based on [join_use_nulls](../../operations/settings/settings.md#join_use_nulls) setting during table creation. More info about `join_use_nulls` in [Join operation](../../engines/table-engines/special/join.md). +::: **Example** Input table: ```sql -CREATE DATABASE db_test -CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1 -INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) +CREATE DATABASE db_test; +CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id); +INSERT INTO db_test.id_val VALUES (1, 11)(2, 12)(4, 13); +SELECT * FROM db_test.id_val; ``` ```text @@ -2496,18 +2502,116 @@ INSERT INTO db_test.id_val VALUES (1,11)(2,12)(4,13) Query: ```sql -SELECT joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4) SETTINGS join_use_nulls = 1 +SELECT number, joinGet(db_test.id_val, 'val', toUInt32(number)) from numbers(4); ``` Result: ```text -┌─joinGet(db_test.id_val, 'val', toUInt32(number))─┠-│ 0 │ -│ 11 │ -│ 12 │ -│ 0 │ -└──────────────────────────────────────────────────┘ + ┌─number─┬─joinGet('db_test.id_val', 'val', toUInt32(number))─┠+1. │ 0 │ 0 │ +2. │ 1 │ 11 │ +3. │ 2 │ 12 │ +4. │ 3 │ 0 │ + └────────┴────────────────────────────────────────────────────┘ +``` + +Setting `join_use_nulls` can be used during table creation to change the behaviour of what gets returned if no key exists in the source table. + +```sql +CREATE DATABASE db_test; +CREATE TABLE db_test.id_val_nulls(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls=1; +INSERT INTO db_test.id_val_nulls VALUES (1, 11)(2, 12)(4, 13); +SELECT * FROM db_test.id_val_nulls; +``` + +```text +┌─id─┬─val─┠+│ 4 │ 13 │ +│ 2 │ 12 │ +│ 1 │ 11 │ +└────┴─────┘ +``` + +Query: + +```sql +SELECT number, joinGet(db_test.id_val_nulls, 'val', toUInt32(number)) from numbers(4); +``` + +Result: + +```text + ┌─number─┬─joinGet('db_test.id_val_nulls', 'val', toUInt32(number))─┠+1. │ 0 │ á´ºáµá´¸á´¸ │ +2. │ 1 │ 11 │ +3. │ 2 │ 12 │ +4. │ 3 │ á´ºáµá´¸á´¸ │ + └────────┴──────────────────────────────────────────────────────────┘ +``` + +## joinGetOrNull + +Like [joinGet](#joinget) but returns `NULL` when the key is missing instead of returning the default value. + +**Syntax** + +```sql +joinGetOrNull(join_storage_table_name, `value_column`, join_keys) +``` + +**Arguments** + +- `join_storage_table_name` — an [identifier](../../sql-reference/syntax.md#syntax-identifiers) indicating where the search is performed. +- `value_column` — name of the column of the table that contains required data. +- `join_keys` — list of keys. + +:::note +The identifier is searched for in the default database (see setting `default_database` in the config file). To override the default database, use `USE db_name` or specify the database and the table through the separator `db_name.db_table` as in the example. +::: + +**Returned value** + +- Returns a list of values corresponded to the list of keys. + +:::note +If a certain key does not exist in source table then `NULL` is returned for that key. +::: + +**Example** + +Input table: + +```sql +CREATE DATABASE db_test; +CREATE TABLE db_test.id_val(`id` UInt32, `val` UInt32) ENGINE = Join(ANY, LEFT, id); +INSERT INTO db_test.id_val VALUES (1, 11)(2, 12)(4, 13); +SELECT * FROM db_test.id_val; +``` + +```text +┌─id─┬─val─┠+│ 4 │ 13 │ +│ 2 │ 12 │ +│ 1 │ 11 │ +└────┴─────┘ +``` + +Query: + +```sql +SELECT number, joinGetOrNull(db_test.id_val, 'val', toUInt32(number)) from numbers(4); +``` + +Result: + +```text + ┌─number─┬─joinGetOrNull('db_test.id_val', 'val', toUInt32(number))─┠+1. │ 0 │ á´ºáµá´¸á´¸ │ +2. │ 1 │ 11 │ +3. │ 2 │ 12 │ +4. │ 3 │ á´ºáµá´¸á´¸ │ + └────────┴──────────────────────────────────────────────────────────┘ ``` ## catboostEvaluate From 283de9dc4aca62d18d299bff80cf421c22d02ada Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 15 Jul 2024 15:00:52 +0000 Subject: [PATCH 0386/2361] Fix JSON type name creation --- src/DataTypes/DataTypeObject.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 751a19beca5..02065bfcc97 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -168,13 +168,13 @@ String DataTypeObject::doGetName() const for (const auto & skip_path : sorted_skip_paths) { write_separator(); - out << "SKIP " << skip_path; + out << "SKIP " << backQuoteIfNeed(skip_path); } for (const auto & skip_regexp : path_regexps_to_skip) { write_separator(); - out << "SKIP REGEXP " << skip_regexp; + out << "SKIP REGEXP " << quoteString(skip_regexp); } if (!first) From 941898a6d9a44487ecdb07eb2e0eb189b79e42e4 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 15 Jul 2024 15:02:12 +0000 Subject: [PATCH 0387/2361] Update documentation --- .../data-types/data-types-binary-encoding.md | 101 +++++++++--------- docs/en/sql-reference/data-types/json.md | 11 +- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/docs/en/sql-reference/data-types/data-types-binary-encoding.md b/docs/en/sql-reference/data-types/data-types-binary-encoding.md index 812e946e43e..bbd47d6f620 100644 --- a/docs/en/sql-reference/data-types/data-types-binary-encoding.md +++ b/docs/en/sql-reference/data-types/data-types-binary-encoding.md @@ -12,56 +12,57 @@ This specification describes the binary format that can be used for binary encod The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information. `var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression. -| ClickHouse data type | Binary encoding | -|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Nothing` | `0x00` | -| `UInt8` | `0x01` | -| `UInt16` | `0x02` | -| `UInt32` | `0x03` | -| `UInt64` | `0x04` | -| `UInt128` | `0x05` | -| `UInt256` | `0x06` | -| `Int8` | `0x07` | -| `Int16` | `0x08` | -| `Int32` | `0x09` | -| `Int64` | `0x0A` | -| `Int128` | `0x0B` | -| `Int256` | `0x0C` | -| `Float32` | `0x0D` | -| `Float64` | `0x0E` | -| `Date` | `0x0F` | -| `Date32` | `0x10` | -| `DateTime` | `0x11` | -| `DateTime(time_zone)` | `0x12` | -| `DateTime64(P)` | `0x13` | -| `DateTime64(P, time_zone)` | `0x14` | -| `String` | `0x15` | -| `FixedString(N)` | `0x16` | -| `Enum8` | `0x17...` | -| `Enum16` | `0x18...>` | -| `Decimal32(P, S)` | `0x19` | -| `Decimal64(P, S)` | `0x1A` | -| `Decimal128(P, S)` | `0x1B` | -| `Decimal256(P, S)` | `0x1C` | -| `UUID` | `0x1D` | -| `Array(T)` | `0x1E` | -| `Tuple(T1, ..., TN)` | `0x1F...` | -| `Tuple(name1 T1, ..., nameN TN)` | `0x20...` | -| `Set` | `0x21` | -| `Interval` | `0x22` (see [interval kind binary encoding](#interval-kind-binary-encoding)) | -| `Nullable(T)` | `0x23` | -| `Function` | `0x24...` | -| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | -| `LowCardinality(T)` | `0x26` | -| `Map(K, V)` | `0x27` | -| `IPv4` | `0x28` | -| `IPv6` | `0x29` | -| `Variant(T1, ..., TN)` | `0x2A...` | -| `Dynamic(max_types=N)` | `0x2B` | -| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C` | -| `Bool` | `0x2D` | -| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | -| `Nested(name1 T1, ..., nameN TN)` | `0x2F...` | +| ClickHouse data type | Binary encoding | +|-----------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Nothing` | `0x00` | +| `UInt8` | `0x01` | +| `UInt16` | `0x02` | +| `UInt32` | `0x03` | +| `UInt64` | `0x04` | +| `UInt128` | `0x05` | +| `UInt256` | `0x06` | +| `Int8` | `0x07` | +| `Int16` | `0x08` | +| `Int32` | `0x09` | +| `Int64` | `0x0A` | +| `Int128` | `0x0B` | +| `Int256` | `0x0C` | +| `Float32` | `0x0D` | +| `Float64` | `0x0E` | +| `Date` | `0x0F` | +| `Date32` | `0x10` | +| `DateTime` | `0x11` | +| `DateTime(time_zone)` | `0x12` | +| `DateTime64(P)` | `0x13` | +| `DateTime64(P, time_zone)` | `0x14` | +| `String` | `0x15` | +| `FixedString(N)` | `0x16` | +| `Enum8` | `0x17...` | +| `Enum16` | `0x18...>` | +| `Decimal32(P, S)` | `0x19` | +| `Decimal64(P, S)` | `0x1A` | +| `Decimal128(P, S)` | `0x1B` | +| `Decimal256(P, S)` | `0x1C` | +| `UUID` | `0x1D` | +| `Array(T)` | `0x1E` | +| `Tuple(T1, ..., TN)` | `0x1F...` | +| `Tuple(name1 T1, ..., nameN TN)` | `0x20...` | +| `Set` | `0x21` | +| `Interval` | `0x22` (see [interval kind binary encoding](#interval-kind-binary-encoding)) | +| `Nullable(T)` | `0x23` | +| `Function` | `0x24...` | +| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | +| `LowCardinality(T)` | `0x26` | +| `Map(K, V)` | `0x27` | +| `IPv4` | `0x28` | +| `IPv6` | `0x29` | +| `Variant(T1, ..., TN)` | `0x2A...` | +| `Dynamic(max_types=N)` | `0x2B` | +| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C` | +| `Bool` | `0x2D` | +| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | +| `Nested(name1 T1, ..., nameN TN)` | `0x2F...` | +| `JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp)` | `0x30.........` | ### Interval kind binary encoding diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index 23bb57e5a6c..be75f909684 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -16,14 +16,13 @@ If you want to use JSON type, set `allow_experimental_json_type = 1`. To declare a column of `JSON` type, use the following syntax: ``` sql - JSON(max_dynamic_paths=N, max_dynamic_types=M, some.path TypeName, SKIP path.to.skip, SKIP PREFIX path.prefix.to.skip, SKIP REGEXP 'paths_regexp') + JSON(max_dynamic_paths=N, max_dynamic_types=M, some.path TypeName, SKIP path.to.skip, SKIP REGEXP 'paths_regexp') ``` Where: - `max_dynamic_paths` is an optional parameter indicating how many paths can be stored separately as subcolumns across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all other paths will be stored together in a single structure. Default value of `max_dynamic_paths` is `1000`. - `max_dynamic_types` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a single path column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_dynamic_types` is `32`. - `some.path TypeName` is an optional type hint for particular path in the JSON. Such paths will be always stored as subcolumns with specified type. -- `SKIP path.to.skip` is an optional hint for particular path that should be skipped during JSON parsing. Such paths will never be stored in the JSON column. -- `SKIP PREFIX path.prefix.to.skip` is an optional hint for particular path prefix that should be skipped during JSON parsing. All paths with such prefix will never be stored in the JSON column. +- `SKIP path.to.skip` is an optional hint for particular path that should be skipped during JSON parsing. Such paths will never be stored in the JSON column. If specified path is a nested JSON object, the whole nested object will be skipped. - `SKIP REGEXP 'path_regexp'` is an optional hint with a regular expression that is used to skip paths during JSON parsing. All paths that match this regular expression will never be stored in the JSON column. ## Creating JSON @@ -345,7 +344,7 @@ All text formats (JSONEachRow, TSV, CSV, CustomSeparated, Values, etc) supports Examples: ```sql -SELECT json FROM format(JSONEachRow, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP PREFIX d.e, SKIP REGEXP \'b.*\')', ' +SELECT json FROM format(JSONEachRow, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP d.e, SKIP REGEXP \'b.*\')', ' {"json" : {"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}}} {"json" : {"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}}} {"json" : {"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"}} @@ -367,7 +366,7 @@ SELECT json FROM format(JSONEachRow, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP P For text formats like CSV/TSV/etc `JSON` is parsed from a string containing JSON object ```sql -SELECT json FROM format(TSV, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP PREFIX d.e, SKIP REGEXP \'b.*\')', +SELECT json FROM format(TSV, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP REGEXP \'b.*\')', '{"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}} {"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}} {"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"} @@ -478,5 +477,5 @@ As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and move Before creating `JSON` column and loading data into it, consider the following tips: - Investigate your data and specify as many path hints with types as you can. It will make the storage and the reading much more efficient. -- Think about what paths you will need and what paths you will never need. Specify paths that you won't need in the SKIP section, use SKIP PREFIX and SKIP REGEXP if needed. It will improve the storage. +- Think about what paths you will need and what paths you will never need. Specify paths that you won't need in the SKIP section and SKIP REGEXP if needed. It will improve the storage. - Don't set `max_dynamic_paths` parameter to very high values, it can make the storage and reading less efficient. From 908f5899ddfdc701df5e9e6189760431e88b6695 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Mon, 15 Jul 2024 17:28:33 +0200 Subject: [PATCH 0388/2361] Add settings to replace external engines to Null --- src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.cpp | 3 +- src/Interpreters/InterpreterCreateQuery.cpp | 40 +++++++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 52fa28a4481..7bf97896357 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -891,6 +891,7 @@ class IColumn; M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \ M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \ M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \ + M(Bool, restore_replace_external_engine_to_null, false, "Replace all the External table engines to Null on restore. Useful for testing purposes", 0) \ \ \ /* ###################################### */ \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index b9b72209103..a23d9d17da2 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -78,7 +78,8 @@ static std::initializer_listno_empty_args = true; storage.set(storage.engine, engine_ast); } + + void setNullTableEngine(ASTStorage &storage) + { + auto engine_ast = std::make_shared(); + engine_ast->name = "Null"; + engine_ast->no_empty_args = true; + storage.set(storage.engine, engine_ast); + } } void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const @@ -1000,6 +1008,38 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const /// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one. if (!create.storage->engine) setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); + /// For exrternal tables with restore_replace_external_engine_to_null setting we replace external engines to + /// Null table engine. + else (create.storage->engine == "AzureBlobStorage" || + create.storage->engine == "AzureQueue" || + create.storage->engine == "COSN" || + create.storage->engine == "DeltaLake" || + create.storage->engine == "Dictionary" || + create.storage->engine == "Executable" || + create.storage->engine == "ExecutablePool" || + create.storage->engine == "ExternalDistributed" || + create.storage->engine == "File" || + create.storage->engine == "Hudi" || + create.storage->engine == "Iceberg" || + create.storage->engine == "JDBC" || + create.storage->engine == "Kafka" || + create.storage->engine == "MaterializedPostgreSQL" || + create.storage->engine == "MongoDB" || + create.storage->engine == "MySQL" || + create.storage->engine == "NATS" || + create.storage->engine == "ODBC" || + create.storage->engine == "OSS" || + create.storage->engine == "PostgreSQL" || + create.storage->engine == "RabbitMQ" || + create.storage->engine == "Redis" || + create.storage->engine == "S3" || + create.storage->engine == "S3Queue" || + create.storage->engine == "TinyLog" || + create.storage->engine == "URL") + { + if (getContext()->getSettingsRef().restore_replace_external_engine_to_null) + setNullTableEngine(*create.storage) + } return; } From 68ed5767d795e7b5792fed839198f53d43581c47 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 15 Jul 2024 15:31:17 +0000 Subject: [PATCH 0389/2361] fix merge problem --- src/Storages/MergeTree/MergeTreeData.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 125b2c8c513..38ca0aed9da 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -6160,6 +6160,11 @@ bool MergeTreeData::supportsLightweightDelete() const return true; } +bool MergeTreeData::areAsynchronousInsertsEnabled() const +{ + return getSettings()->async_insert; +} + MergeTreeData::ProjectionPartsVector MergeTreeData::getAllProjectionPartsVector(MergeTreeData::DataPartStateVector * out_states) const { ProjectionPartsVector res; From 344e5b716d49eda59783b6fff85757e6a5b6e98f Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Mon, 15 Jul 2024 17:41:32 +0200 Subject: [PATCH 0390/2361] cancel method is noexcept --- src/Common/ConcurrentBoundedQueue.h | 2 +- .../Executors/CompletedPipelineExecutor.cpp | 2 -- .../Formats/Impl/ArrowBlockInputFormat.h | 2 +- .../Formats/Impl/DWARFBlockInputFormat.h | 2 +- .../Formats/Impl/NativeORCBlockInputFormat.h | 2 +- .../Formats/Impl/ORCBlockInputFormat.h | 2 +- .../Impl/ParallelFormattingOutputFormat.cpp | 2 +- .../Impl/ParallelFormattingOutputFormat.h | 4 ++-- .../Impl/ParallelParsingInputFormat.cpp | 2 +- .../Formats/Impl/ParallelParsingInputFormat.h | 4 ++-- .../Formats/Impl/ParquetBlockInputFormat.h | 2 +- .../Formats/Impl/ParquetBlockOutputFormat.cpp | 2 +- .../Formats/Impl/ParquetBlockOutputFormat.h | 2 +- .../Formats/Impl/ParquetMetadataInputFormat.h | 2 +- src/Processors/Formats/LazyOutputFormat.h | 2 +- src/Processors/IProcessor.cpp | 4 ++-- src/Processors/IProcessor.h | 4 ++-- src/Processors/Sources/RemoteSource.cpp | 13 ++++++++++-- src/Processors/Sources/RemoteSource.h | 2 +- .../Transforms/AggregatingTransform.cpp | 2 +- src/Server/TCPHandler.cpp | 1 - src/Storages/Distributed/DistributedSink.cpp | 20 +++++++++++++++---- src/Storages/Distributed/DistributedSink.h | 2 +- src/Storages/LiveView/LiveViewEventsSource.h | 2 +- src/Storages/LiveView/LiveViewSource.h | 2 +- .../MergeTree/MergeTreeSelectProcessor.h | 2 +- src/Storages/MergeTree/MergeTreeSink.cpp | 5 ----- src/Storages/MergeTree/MergeTreeSource.cpp | 2 +- src/Storages/MergeTree/MergeTreeSource.h | 2 +- .../MergeTree/ReplicatedMergeTreeSink.cpp | 16 --------------- src/Storages/MessageQueueSink.cpp | 13 ++++++++++++ src/Storages/MessageQueueSink.h | 6 +----- .../StorageObjectStorageSink.cpp | 2 +- 33 files changed, 70 insertions(+), 64 deletions(-) diff --git a/src/Common/ConcurrentBoundedQueue.h b/src/Common/ConcurrentBoundedQueue.h index 16b9488c98d..a830ae157a5 100644 --- a/src/Common/ConcurrentBoundedQueue.h +++ b/src/Common/ConcurrentBoundedQueue.h @@ -243,7 +243,7 @@ public: } /// Clear and finish queue - void clearAndFinish() + void clearAndFinish() noexcept { { std::lock_guard lock(queue_mutex); diff --git a/src/Processors/Executors/CompletedPipelineExecutor.cpp b/src/Processors/Executors/CompletedPipelineExecutor.cpp index 909e742ffbf..1eeee896ede 100644 --- a/src/Processors/Executors/CompletedPipelineExecutor.cpp +++ b/src/Processors/Executors/CompletedPipelineExecutor.cpp @@ -99,7 +99,6 @@ void CompletedPipelineExecutor::execute() if (is_cancelled_callback()) { - LOG_INFO(getLogger("CompletedPipelineExecutor"), "execute CancelCallback FULLY_CANCELLED"); data->executor->cancel(); } } @@ -121,7 +120,6 @@ CompletedPipelineExecutor::~CompletedPipelineExecutor() { if (data && data->executor) { - LOG_INFO(getLogger("CompletedPipelineExecutor"), "~CompletedPipelineExecutor"); data->executor->cancel(); } } diff --git a/src/Processors/Formats/Impl/ArrowBlockInputFormat.h b/src/Processors/Formats/Impl/ArrowBlockInputFormat.h index 4fe01d0be12..cb74a9dd93e 100644 --- a/src/Processors/Formats/Impl/ArrowBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ArrowBlockInputFormat.h @@ -32,7 +32,7 @@ public: private: Chunk read() override; - void onCancelX() override + void onCancel() noexcept override { is_stopped = 1; } diff --git a/src/Processors/Formats/Impl/DWARFBlockInputFormat.h b/src/Processors/Formats/Impl/DWARFBlockInputFormat.h index 6cab5d34994..2d94d166708 100644 --- a/src/Processors/Formats/Impl/DWARFBlockInputFormat.h +++ b/src/Processors/Formats/Impl/DWARFBlockInputFormat.h @@ -32,7 +32,7 @@ public: protected: Chunk read() override; - void onCancelX() override + void onCancel() noexcept override { is_stopped = 1; } diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.h b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.h index de9925e3737..e4f2ef9ebe3 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.h +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.h @@ -64,7 +64,7 @@ public: protected: Chunk read() override; - void onCancelX() override { is_stopped = 1; } + void onCancel() noexcept override { is_stopped = 1; } private: void prepareFileReader(); diff --git a/src/Processors/Formats/Impl/ORCBlockInputFormat.h b/src/Processors/Formats/Impl/ORCBlockInputFormat.h index 167436ad4b9..85f1636d3dc 100644 --- a/src/Processors/Formats/Impl/ORCBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ORCBlockInputFormat.h @@ -34,7 +34,7 @@ public: protected: Chunk read() override; - void onCancelX() override + void onCancel() noexcept override { is_stopped = 1; } diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp index b2871310be5..5d404d493a6 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.cpp @@ -96,7 +96,7 @@ namespace DB } - void ParallelFormattingOutputFormat::finishAndWait() + void ParallelFormattingOutputFormat::finishAndWait() noexcept { emergency_stop = true; diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index 40774fcfbfa..54617c77f86 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -122,7 +122,7 @@ public: started_prefix = true; } - void onCancelX() override + void onCancel() noexcept override { finishAndWait(); } @@ -268,7 +268,7 @@ private: bool collected_suffix = false; bool collected_finalize = false; - void finishAndWait(); + void finishAndWait() noexcept; void onBackgroundException() { diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp index d38a299cb6e..447adb1ed48 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.cpp @@ -176,7 +176,7 @@ Chunk ParallelParsingInputFormat::read() if (background_exception) { lock.unlock(); - onCancelX(); + onCancel(); std::rethrow_exception(background_exception); } diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index eed40dc43e5..b97bf5213e6 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -137,7 +137,7 @@ private: Chunk read() final; - void onCancelX() final + void onCancel() noexcept final { /* * The format parsers themselves are not being cancelled here, so we'll @@ -292,7 +292,7 @@ private: first_parser_finished.wait(); } - void finishAndWait() + void finishAndWait() noexcept { /// Defending concurrent segmentator thread join std::lock_guard finish_and_wait_lock(finish_and_wait_mutex); diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h index 0123329f026..ed528cc077c 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h @@ -68,7 +68,7 @@ public: private: Chunk read() override; - void onCancelX() override + void onCancel() noexcept override { is_stopped = 1; } diff --git a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp index d08c91d286b..01fb97223f1 100644 --- a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp @@ -268,7 +268,7 @@ void ParquetBlockOutputFormat::resetFormatterImpl() staging_bytes = 0; } -void ParquetBlockOutputFormat::onCancelX() +void ParquetBlockOutputFormat::onCancel() noexcept { is_stopped = true; } diff --git a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h index 0704ba0ed90..f8f5d2556a5 100644 --- a/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h @@ -112,7 +112,7 @@ private: void consume(Chunk) override; void finalizeImpl() override; void resetFormatterImpl() override; - void onCancelX() override; + void onCancel() noexcept override; void writeRowGroup(std::vector chunks); void writeUsingArrow(std::vector chunks); diff --git a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.h b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.h index 35180d202d8..5d2d8989859 100644 --- a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.h +++ b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.h @@ -65,7 +65,7 @@ public: private: Chunk read() override; - void onCancelX() override + void onCancel() noexcept override { is_stopped = 1; } diff --git a/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h index a245df8234d..c803ed5dc61 100644 --- a/src/Processors/Formats/LazyOutputFormat.h +++ b/src/Processors/Formats/LazyOutputFormat.h @@ -29,7 +29,7 @@ public: void setRowsBeforeLimit(size_t rows_before_limit) override; - void onCancelX() override + void onCancel() noexcept override { queue.clearAndFinish(); } diff --git a/src/Processors/IProcessor.cpp b/src/Processors/IProcessor.cpp index 4d95bb5f3e0..edb4d662d8b 100644 --- a/src/Processors/IProcessor.cpp +++ b/src/Processors/IProcessor.cpp @@ -9,14 +9,14 @@ namespace DB { -void IProcessor::cancel() +void IProcessor::cancel() noexcept { bool already_cancelled = is_cancelled.exchange(true, std::memory_order_acq_rel); if (already_cancelled) return; - onCancelX(); + onCancel(); } String IProcessor::debug() const diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 5f4d71fbf54..680a446173e 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -238,7 +238,7 @@ public: /// In case if query was cancelled executor will wait till all processors finish their jobs. /// Generally, there is no reason to check this flag. However, it may be reasonable for long operations (e.g. i/o). bool isCancelled() const { return is_cancelled.load(std::memory_order_acquire); } - void cancel(); + void cancel() noexcept; /// Additional method which is called in case if ports were updated while work() method. /// May be used to stop execution in rare cases. @@ -363,7 +363,7 @@ public: virtual void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr /* counter */) {} protected: - virtual void onCancelX() {} + virtual void onCancel() noexcept {} std::atomic is_cancelled{false}; diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 44cf26e0b01..42696f9c3ce 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -4,6 +4,8 @@ #include #include #include +#include +#include namespace DB { @@ -182,9 +184,16 @@ std::optional RemoteSource::tryGenerate() return chunk; } -void RemoteSource::onCancelX() +void RemoteSource::onCancel() noexcept { - query_executor->cancel(); + try + { + query_executor->cancel(); + } + catch (...) + { + tryLogCurrentException(getLogger("RemoteSource"), "Error occurs on cancelation."); + } } void RemoteSource::onUpdatePorts() diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index 880eb234bfb..adf1b8e9fac 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -36,7 +36,7 @@ public: protected: std::optional tryGenerate() override; - void onCancelX() override; + void onCancel() noexcept override; private: bool was_query_sent = false; diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index e42c1fd3a8d..870ba84722d 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -375,7 +375,7 @@ public: return prepareTwoLevel(); } - void onCancelX() override + void onCancel() noexcept override { shared_data->is_cancelled.store(true, std::memory_order_seq_cst); } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index ba12cad7771..e7342ff9b6f 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -557,7 +557,6 @@ void TCPHandler::runImpl() if (getQueryCancellationStatus() == CancellationStatus::FULLY_CANCELLED) { - LOG_INFO(log, "CancelCallback FULLY_CANCELLED"); return true; } diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index 6283594e0d2..d67910a141f 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -596,7 +596,7 @@ void DistributedSink::onFinish() } } -void DistributedSink::onCancelX() +void DistributedSink::onCancel() noexcept { std::lock_guard lock(execution_mutex); if (pool && !pool->finished()) @@ -607,14 +607,26 @@ void DistributedSink::onCancelX() } catch (...) { - tryLogCurrentException(storage.log); + tryLogCurrentException(storage.log, "Error occurs on cancelation."); } } for (auto & shard_jobs : per_shard_jobs) + { for (JobReplica & job : shard_jobs.replicas_jobs) - if (job.executor) - job.executor->cancel(); + { + try + { + if (job.executor) + job.executor->cancel(); + } + catch (...) + { + tryLogCurrentException(storage.log, "Error occurs on cancelation."); + } + } + } + } diff --git a/src/Storages/Distributed/DistributedSink.h b/src/Storages/Distributed/DistributedSink.h index 1dac3eeba6d..65a5eb52787 100644 --- a/src/Storages/Distributed/DistributedSink.h +++ b/src/Storages/Distributed/DistributedSink.h @@ -53,7 +53,7 @@ public: void onFinish() override; private: - void onCancelX() override; + void onCancel() noexcept override; IColumn::Selector createSelector(const Block & source_block) const; diff --git a/src/Storages/LiveView/LiveViewEventsSource.h b/src/Storages/LiveView/LiveViewEventsSource.h index d1ed222c185..4210acbc5bc 100644 --- a/src/Storages/LiveView/LiveViewEventsSource.h +++ b/src/Storages/LiveView/LiveViewEventsSource.h @@ -54,7 +54,7 @@ public: String getName() const override { return "LiveViewEventsSource"; } - void onCancelX() override + void onCancel() noexcept override { if (storage->shutdown_called) return; diff --git a/src/Storages/LiveView/LiveViewSource.h b/src/Storages/LiveView/LiveViewSource.h index 83589067cf5..81dd5620e57 100644 --- a/src/Storages/LiveView/LiveViewSource.h +++ b/src/Storages/LiveView/LiveViewSource.h @@ -36,7 +36,7 @@ public: String getName() const override { return "LiveViewSource"; } - void onCancelX() override + void onCancel() noexcept override { if (storage->shutdown_called) return; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 03ca30dd5b3..7a9cebbcb2e 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -48,7 +48,7 @@ public: ChunkAndProgress read(); - void cancel() { is_cancelled = true; } + void cancel() noexcept { is_cancelled = true; } const MergeTreeReaderSettings & getSettings() const { return reader_settings; } diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index d2e34665962..210a7057f94 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -28,9 +28,6 @@ struct MergeTreeSink::DelayedChunk MergeTreeSink::~MergeTreeSink() { - size_t addr = delayed_chunk ? size_t(delayed_chunk.get()) : 0; - LOG_INFO(storage.log, "~ReplicatedMergeTreeSinkImpl, delayed_chunk {}, called from {}", addr, StackTrace().toString()); - if (!delayed_chunk) return; @@ -40,8 +37,6 @@ MergeTreeSink::~MergeTreeSink() } delayed_chunk.reset(); - - LOG_INFO(storage.log, "~ReplicatedMergeTreeSinkImpl end"); } MergeTreeSink::MergeTreeSink( diff --git a/src/Storages/MergeTree/MergeTreeSource.cpp b/src/Storages/MergeTree/MergeTreeSource.cpp index 4070ccf4433..380c47723bc 100644 --- a/src/Storages/MergeTree/MergeTreeSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSource.cpp @@ -149,7 +149,7 @@ std::string MergeTreeSource::getName() const return processor->getName(); } -void MergeTreeSource::onCancelX() +void MergeTreeSource::onCancel() noexcept { processor->cancel(); } diff --git a/src/Storages/MergeTree/MergeTreeSource.h b/src/Storages/MergeTree/MergeTreeSource.h index c7092aa26b1..7506af4f9b8 100644 --- a/src/Storages/MergeTree/MergeTreeSource.h +++ b/src/Storages/MergeTree/MergeTreeSource.h @@ -26,7 +26,7 @@ public: protected: std::optional tryGenerate() override; - void onCancelX() override; + void onCancel() noexcept override; private: MergeTreeSelectProcessorPtr processor; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 6c7ed9bdae0..7bfe647fa7f 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -155,9 +155,6 @@ ReplicatedMergeTreeSinkImpl::ReplicatedMergeTreeSinkImpl( template ReplicatedMergeTreeSinkImpl::~ReplicatedMergeTreeSinkImpl() { - size_t addr = delayed_chunk ? size_t(delayed_chunk.get()) : 0; - LOG_INFO(log, "~ReplicatedMergeTreeSinkImpl, delayed_chunk {}, called from {}", addr, StackTrace().toString()); - if (!delayed_chunk) return; @@ -167,8 +164,6 @@ ReplicatedMergeTreeSinkImpl::~ReplicatedMergeTreeSinkImpl() } delayed_chunk.reset(); - - LOG_INFO(log, "~ReplicatedMergeTreeSinkImpl end"); } template @@ -273,8 +268,6 @@ size_t ReplicatedMergeTreeSinkImpl::checkQuorumPrecondition(const template void ReplicatedMergeTreeSinkImpl::consume(Chunk chunk) { - LOG_INFO(log, "consume"); - if (num_blocks_processed > 0) storage.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event, context, false); @@ -448,9 +441,6 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk chunk) template<> void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithFaultInjectionPtr & zookeeper) { - size_t addr = delayed_chunk ? size_t(delayed_chunk.get()) : 0; - LOG_INFO(log, "finishDelayedChunk {}", addr); - if (!delayed_chunk) return; @@ -480,22 +470,16 @@ void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithF { auto counters_snapshot = std::make_shared(partition.part_counters.getPartiallyAtomicSnapshot()); PartLog::addNewPart(storage.getContext(), PartLog::PartLogEntry(part, partition.elapsed_ns, counters_snapshot), ExecutionStatus::fromCurrentException("", true)); - - size_t addr1 = delayed_chunk ? size_t(delayed_chunk.get()) : 0; - LOG_INFO(log, "finishDelayedChunk exception, delayed_chunk {}", addr1); throw; } } delayed_chunk.reset(); - - LOG_INFO(log, "finishDelayedChunk end, delayed_chunk {}", bool(delayed_chunk)); } template<> void ReplicatedMergeTreeSinkImpl::finishDelayedChunk(const ZooKeeperWithFaultInjectionPtr & zookeeper) { - if (!delayed_chunk) return; diff --git a/src/Storages/MessageQueueSink.cpp b/src/Storages/MessageQueueSink.cpp index 4fb81d69070..10617422f40 100644 --- a/src/Storages/MessageQueueSink.cpp +++ b/src/Storages/MessageQueueSink.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include namespace DB @@ -79,4 +80,16 @@ void MessageQueueSink::consume(Chunk chunk) } +void MessageQueueSink::onCancel() noexcept +{ + try + { + onFinish(); + } + catch (...) + { + tryLogCurrentException(getLogger("MessageQueueSink"), "Error occurs on cancelation."); + } +} + } diff --git a/src/Storages/MessageQueueSink.h b/src/Storages/MessageQueueSink.h index 38754e9475e..6964af8cb4d 100644 --- a/src/Storages/MessageQueueSink.h +++ b/src/Storages/MessageQueueSink.h @@ -33,17 +33,13 @@ public: const String & storage_name_, const ContextPtr & context_); - ~MessageQueueSink() override - { - onFinish(); - } - String getName() const override { return storage_name + "Sink"; } void consume(Chunk chunk) override; void onStart() override; void onFinish() override; + void onCancel() noexcept override; void onException(std::exception_ptr /* exception */) override { onFinish(); } protected: diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp index 3bd0e88ecdb..d5813015e13 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSink.cpp @@ -2,8 +2,8 @@ #include #include #include -#include "base/defines.h" #include +#include namespace DB { From 12101f455b06a3df1bd00b8b070b4c7862d087bf Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Mon, 15 Jul 2024 17:55:02 +0200 Subject: [PATCH 0391/2361] fix typo --- src/Processors/Sources/RemoteSource.cpp | 2 +- src/Storages/Distributed/DistributedSink.cpp | 4 ++-- src/Storages/MessageQueueSink.cpp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 42696f9c3ce..48a6804de9a 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -192,7 +192,7 @@ void RemoteSource::onCancel() noexcept } catch (...) { - tryLogCurrentException(getLogger("RemoteSource"), "Error occurs on cancelation."); + tryLogCurrentException(getLogger("RemoteSource"), "Error occurs on cancellation."); } } diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index fa4ba01a37c..197905c0849 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -608,7 +608,7 @@ void DistributedSink::onCancel() noexcept } catch (...) { - tryLogCurrentException(storage.log, "Error occurs on cancelation."); + tryLogCurrentException(storage.log, "Error occurs on cancellation."); } } @@ -623,7 +623,7 @@ void DistributedSink::onCancel() noexcept } catch (...) { - tryLogCurrentException(storage.log, "Error occurs on cancelation."); + tryLogCurrentException(storage.log, "Error occurs on cancellation."); } } } diff --git a/src/Storages/MessageQueueSink.cpp b/src/Storages/MessageQueueSink.cpp index 10617422f40..9cddb2e7ce8 100644 --- a/src/Storages/MessageQueueSink.cpp +++ b/src/Storages/MessageQueueSink.cpp @@ -88,7 +88,7 @@ void MessageQueueSink::onCancel() noexcept } catch (...) { - tryLogCurrentException(getLogger("MessageQueueSink"), "Error occurs on cancelation."); + tryLogCurrentException(getLogger("MessageQueueSink"), "Error occurs on cancellation."); } } From 7841a8b401d0ddb3f9bce9e5dc03049a65a1067a Mon Sep 17 00:00:00 2001 From: yariks5s Date: Mon, 15 Jul 2024 16:27:38 +0000 Subject: [PATCH 0392/2361] fixes after review --- docs/en/operations/settings/settings.md | 2 +- .../table-functions/azureBlobStorage.md | 13 ++++ docs/en/sql-reference/table-functions/file.md | 8 +-- docs/en/sql-reference/table-functions/hdfs.md | 4 +- docs/en/sql-reference/table-functions/s3.md | 8 +-- docs/en/sql-reference/table-functions/url.md | 13 ++++ .../ObjectStorage/StorageObjectStorage.cpp | 16 +++-- .../StorageObjectStorageCluster.cpp | 4 +- .../StorageObjectStorageSource.cpp | 17 ++--- .../StorageObjectStorageSource.h | 4 +- src/Storages/StorageFile.cpp | 20 ++---- src/Storages/StorageURL.cpp | 11 +-- src/Storages/VirtualColumnUtils.cpp | 64 +++++++++--------- src/Storages/VirtualColumnUtils.h | 7 +- .../03203_hive_style_partitioning.reference | 5 ++ .../03203_hive_style_partitioning.sh | 13 ++++ .../column0=Elizabeth/sample.parquet | Bin 0 -> 1308 bytes .../partitioning/identifier=2070/email.csv | 5 ++ 18 files changed, 114 insertions(+), 100 deletions(-) create mode 100644 tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth/sample.parquet create mode 100644 tests/queries/0_stateless/data_hive/partitioning/identifier=2070/email.csv diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 84912b4574f..fb076e76bdd 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5611,6 +5611,6 @@ Default value: `1GiB`. ## use_hive_partitioning -Allows the usage of Hive-style partitioning in queries. When enabled, ClickHouse interprets and maintains table partitions in a way that is consistent with the Hive partitioning scheme, which is commonly used in Hadoop ecosystems. +When enabled, ClickHouse will detect Hive-style partitioning in path (`/name=value/`) in file-like table engines [File](../../engines/table-engines/special/file.md#hive-style-partitioning)/[S3](../../engines/table-engines/integrations/s3.md#hive-style-partitioning)/[URL](../../engines/table-engines/special/url.md#hive-style-partitioning)/[HDFS](../../engines/table-engines/integrations/hdfs.md#hive-style-partitioning)/[AzureBlobStorage](../../engines/table-engines/integrations/azureBlobStorage.md#hive-style-partitioning) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. Default value: `0`. diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index f59fedeb3a2..104ac4e26df 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -77,3 +77,16 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam **See Also** - [AzureBlobStorage Table Engine](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) + +## Hive-style partitioning {#hive-style-partitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_partitioning = 1; +SELECT * from azureBlobStorage(config, storage_account_url='...', container='...', blob_path='http://data/path/date=*/country=*/code=*/*.parquet', format='Parquet', structure='Date DateTime64, Country String, Code UInt64') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; +``` diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 838a7ab61de..0669609a22a 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -208,7 +208,7 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 ## Hive-style partitioning {#hive-style-partitioning} -When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. **Example** @@ -216,11 +216,7 @@ Use virtual column, created with Hive-style partitioning ``` sql SET use_hive_partitioning = 1; -SELECT _specified_column from file('/specified_column=specified_data/file.txt'); -``` - -``` reference -specified_data +SELECT * from file('data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; ``` ## Settings {#settings} diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index fc84c431066..6963d4e4b79 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -101,7 +101,7 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin ## Hive-style partitioning {#hive-style-partitioning} -When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. **Example** @@ -109,7 +109,7 @@ Use virtual column, created with Hive-style partitioning ``` sql SET use_hive_partitioning = 1; -SELECT _specified_column from HDFS('hdfs://hdfs1:9000/specified_column=specified_data/file.txt'); +SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; ``` ``` reference diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 15074a77475..f3ee83afef4 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -276,7 +276,7 @@ FROM s3( ## Hive-style partitioning {#hive-style-partitioning} -When setting `use_hive_partitioning` is set to 1, ClickHouse can introduce virtual columns due to Hive partitioning style if the path has the specific structure. +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. **Example** @@ -284,11 +284,7 @@ Use virtual column, created with Hive-style partitioning ``` sql SET use_hive_partitioning = 1; -SELECT _specified_column from HDFS('hdfs://hdfs1:9000/specified_column=specified_data/file.txt'); -``` - -``` reference -specified_data +SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; ``` ## Storage Settings {#storage-settings} diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index 3bb7aff53a7..596355e2577 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -55,6 +55,19 @@ Character `|` inside patterns is used to specify failover addresses. They are it - `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +## Hive-style partitioning {#hive-style-partitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_partitioning = 1; +SELECT * from url('http://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; +``` + ## Storage Settings {#storage-settings} - [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default. diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 48e9118e321..35cd1492642 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -35,16 +35,19 @@ namespace ErrorCodes std::string StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, ContextPtr context) { + auto query_settings = configuration->getQuerySettings(context); + /// We don't want to throw an exception if there are no files with specified path. + query_settings.throw_on_zero_files_match = false; auto file_iterator = StorageObjectStorageSource::createFileIterator( configuration, + query_settings, object_storage, distributed_processing, context, {}, // predicate metadata.getColumns().getAll(), // virtual_columns nullptr, // read_keys - {}, // file_progress_callback - true // override_settings_for_hive_partitioning + {} // file_progress_callback ); if (auto file = file_iterator->next(0)) @@ -82,12 +85,10 @@ StorageObjectStorage::StorageObjectStorage( metadata.setConstraints(constraints_); metadata.setComment(comment); - if (sample_path.empty() && context->getSettings().use_hive_partitioning) + if (sample_path.empty()) sample_path = getPathSample(metadata, context); - else if (!context->getSettings().use_hive_partitioning) - sample_path = ""; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context, sample_path)); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context, sample_path, format_settings)); setInMemoryMetadata(metadata); } @@ -224,7 +225,7 @@ private: return; auto context = getContext(); iterator_wrapper = StorageObjectStorageSource::createFileIterator( - configuration, object_storage, distributed_processing, + configuration, configuration->getQuerySettings(context), object_storage, distributed_processing, context, predicate, virtual_columns, nullptr, context->getFileProgressCallback()); } }; @@ -376,6 +377,7 @@ std::unique_ptr StorageObjectStorage::createReadBufferIterat { auto file_iterator = StorageObjectStorageSource::createFileIterator( configuration, + configuration->getQuerySettings(context), object_storage, false/* distributed_processing */, context, diff --git a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp index 92327b4cde0..a88532e1ea9 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp @@ -84,8 +84,8 @@ RemoteQueryExecutor::Extension StorageObjectStorageCluster::getTaskIteratorExten const ActionsDAG::Node * predicate, const ContextPtr & local_context) const { auto iterator = StorageObjectStorageSource::createFileIterator( - configuration, object_storage, /* distributed_processing */false, local_context, - predicate, virtual_columns, nullptr, local_context->getFileProgressCallback()); + configuration, configuration->getQuerySettings(local_context), object_storage, /* distributed_processing */false, + local_context, predicate, virtual_columns, nullptr, local_context->getFileProgressCallback()); auto callback = std::make_shared>([iterator]() mutable -> String { diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 2e5416d1ffd..707e7603368 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -99,14 +99,14 @@ std::string StorageObjectStorageSource::getUniqueStoragePathIdentifier( std::shared_ptr StorageObjectStorageSource::createFileIterator( ConfigurationPtr configuration, + const StorageObjectStorage::QuerySettings & query_settings, ObjectStoragePtr object_storage, bool distributed_processing, const ContextPtr & local_context, const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, ObjectInfos * read_keys, - std::function file_progress_callback, - bool override_settings_for_hive_partitioning) + std::function file_progress_callback) { if (distributed_processing) return std::make_shared( @@ -117,20 +117,16 @@ std::shared_ptr StorageObjectStorageSourc throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expression can not have wildcards inside {} name", configuration->getNamespaceType()); - auto settings = configuration->getQuerySettings(local_context); const bool is_archive = configuration->isArchive(); std::unique_ptr iterator; if (configuration->isPathWithGlobs()) { - bool throw_on_zero_files_match = settings.throw_on_zero_files_match; - if (override_settings_for_hive_partitioning) - throw_on_zero_files_match = false; /// Iterate through disclosed globs and make a source for each file iterator = std::make_unique( object_storage, configuration, predicate, virtual_columns, - local_context, is_archive ? nullptr : read_keys, settings.list_object_keys_size, - throw_on_zero_files_match, file_progress_callback); + local_context, is_archive ? nullptr : read_keys, query_settings.list_object_keys_size, + query_settings.throw_on_zero_files_match, file_progress_callback); } else { @@ -149,7 +145,7 @@ std::shared_ptr StorageObjectStorageSourc iterator = std::make_unique( object_storage, copy_configuration, virtual_columns, is_archive ? nullptr : read_keys, - settings.ignore_non_existent_file, file_progress_callback); + query_settings.ignore_non_existent_file, file_progress_callback); } if (is_archive) @@ -208,8 +204,7 @@ Chunk StorageObjectStorageSource::generate() .size = object_info->isArchive() ? object_info->fileSizeInArchive() : object_info->metadata->size_bytes, .filename = &filename, .last_modified = object_info->metadata->last_modified, - .hive_partitioning_path = object_info->getPath(), - }); + }, read_from_format_info.columns_description, getContext()); const auto & partition_columns = configuration->getPartitionColumns(); if (!partition_columns.empty() && chunk_size && chunk.hasColumns()) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.h b/src/Storages/ObjectStorage/StorageObjectStorageSource.h index a99bb068372..ff6d588b364 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.h @@ -52,14 +52,14 @@ public: static std::shared_ptr createFileIterator( ConfigurationPtr configuration, + const StorageObjectStorage::QuerySettings & query_settings, ObjectStoragePtr object_storage, bool distributed_processing, const ContextPtr & local_context, const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns, ObjectInfos * read_keys, - std::function file_progress_callback = {}, - bool override_settings_for_hive_partitioning = false); + std::function file_progress_callback = {}); static std::string getUniqueStoragePathIdentifier( const Configuration & configuration, diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 047631cbc54..42e27a13ca9 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -1108,9 +1108,9 @@ void StorageFile::setStorageMetadata(CommonArguments args) setInMemoryMetadata(storage_metadata); std::string path_for_virtuals; - if (args.getContext()->getSettingsRef().use_hive_partitioning && !paths.empty()) + if (!paths.empty()) path_for_virtuals = paths[0]; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), args.getContext(), path_for_virtuals, format_settings.value_or(FormatSettings{}))); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), args.getContext(), path_for_virtuals, format_settings)); } @@ -1452,10 +1452,6 @@ Chunk StorageFileSource::generate() chunk_size = input_format->getApproxBytesReadForChunk(); progress(num_rows, chunk_size ? chunk_size : chunk.bytes()); - std::string hive_partitioning_path; - if (getContext()->getSettingsRef().use_hive_partitioning) - hive_partitioning_path = current_path; - /// Enrich with virtual columns. VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( chunk, requested_virtual_columns, @@ -1463,9 +1459,8 @@ Chunk StorageFileSource::generate() .path = current_path, .size = current_file_size, .filename = (filename_override.has_value() ? &filename_override.value() : nullptr), - .last_modified = current_file_last_modified, - .hive_partitioning_path = hive_partitioning_path, - }); + .last_modified = current_file_last_modified + }, columns_description, getContext()); return chunk; } @@ -1648,17 +1643,10 @@ void ReadFromFile::initializePipeline(QueryPipelineBuilder & pipeline, const Bui size_t num_streams = max_num_streams; size_t files_to_read = 0; - Strings paths; if (storage->archive_info) - { files_to_read = storage->archive_info->paths_to_archives.size(); - paths = storage->archive_info->paths_to_archives; - } else - { files_to_read = storage->paths.size(); - paths = storage->paths; - } if (max_num_streams > files_to_read) num_streams = files_to_read; diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 5da42638b87..f7560fa7910 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -164,10 +164,7 @@ IStorageURLBase::IStorageURLBase( storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); - std::string uri_for_partitioning; - if (context_->getSettingsRef().use_hive_partitioning) - uri_for_partitioning = getSampleURI(uri, context_); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context_, uri_for_partitioning, format_settings.value_or(FormatSettings{}))); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context_, getSampleURI(uri, context_), format_settings)); } @@ -426,9 +423,6 @@ Chunk StorageURLSource::generate() size_t chunk_size = 0; if (input_format) chunk_size = input_format->getApproxBytesReadForChunk(); - std::string hive_partitioning_path; - if (getContext()->getSettingsRef().use_hive_partitioning) - hive_partitioning_path = curr_uri.getPath(); progress(num_rows, chunk_size ? chunk_size : chunk.bytes()); VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( @@ -436,8 +430,7 @@ Chunk StorageURLSource::generate() { .path = curr_uri.getPath(), .size = current_file_size, - .hive_partitioning_path = hive_partitioning_path, - }); + }, columns_description, getContext()); return chunk; } diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 0efa9522ac6..fb5a345f424 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -119,15 +119,25 @@ NameSet getVirtualNamesForFileLikeStorage() return {"_path", "_file", "_size", "_time"}; } -std::map parseHivePartitioningKeysAndValues(const std::string& path) +std::unordered_map parseHivePartitioningKeysAndValues(const std::string& path, const ColumnsDescription & storage_columns) { std::string pattern = "/([^/]+)=([^/]+)"; re2::StringPiece input_piece(path); - std::map key_values; + std::unordered_map key_values; std::string key, value; + std::set used_keys; while (RE2::FindAndConsume(&input_piece, pattern, &key, &value)) - key_values["_" + key] = value; + { + if (used_keys.contains(key)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Link to file with enabled hive-style partitioning contains duplicated key {}, only unique keys required", key); + used_keys.insert(key); + + auto col_name = "_" + key; + while (storage_columns.has(col_name)) + col_name = "_" + col_name; + key_values[col_name] = value; + } return key_values; } @@ -148,17 +158,20 @@ VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription add_virtual("_size", makeNullable(std::make_shared())); add_virtual("_time", makeNullable(std::make_shared())); - auto map = parseHivePartitioningKeysAndValues(path); - for (auto& item : map) + if (context->getSettingsRef().use_hive_partitioning) { + auto map = parseHivePartitioningKeysAndValues(path, storage_columns); auto format_settings = format_settings_ ? *format_settings_ : getFormatSettings(context); - auto type = tryInferDataTypeByEscapingRule(item.second, format_settings, FormatSettings::EscapingRule::Raw); - if (type == nullptr) - type = std::make_shared(); - if (type->canBeInsideLowCardinality()) - add_virtual(item.first, std::make_shared(type)); - else - add_virtual(item.first, type); + for (auto & item : map) + { + auto type = tryInferDataTypeByEscapingRule(item.second, format_settings, FormatSettings::EscapingRule::Raw); + if (type == nullptr) + type = std::make_shared(); + if (type->canBeInsideLowCardinality()) + add_virtual(item.first, std::make_shared(type)); + else + add_virtual(item.first, type); + } } return desc; @@ -207,8 +220,6 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const { if (column.name == "_file" || column.name == "_path") block.insert({column.type->createColumn(), column.type, column.name}); - if (!getVirtualNamesForFileLikeStorage().contains(column.name)) - block.insert({column.type->createColumn(), column.type, column.name}); } block.insert({ColumnUInt64::create(), std::make_shared(), "_idx"}); @@ -222,9 +233,12 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values) + VirtualsForFileLikeStorage virtual_values, ColumnsDescription columns, ContextPtr context) { - auto hive_map = parseHivePartitioningKeysAndValues(virtual_values.hive_partitioning_path); + std::unordered_map hive_map; + if (context->getSettingsRef().use_hive_partitioning) + hive_map = parseHivePartitioningKeysAndValues(virtual_values.path, columns); + for (const auto & virtual_column : requested_virtual_columns) { if (virtual_column.name == "_path") @@ -258,23 +272,9 @@ void addRequestedFileLikeStorageVirtualsToChunk( else chunk.addColumn(virtual_column.type->createColumnConstWithDefaultValue(chunk.getNumRows())->convertToFullColumnIfConst()); } - else if (!hive_map.empty()) + else if (auto it = hive_map.find(virtual_column.getNameInStorage()); it != hive_map.end()) { - bool contains_virtual_column = std::any_of(hive_map.begin(), hive_map.end(), - [&](const auto& pair) - { - return requested_virtual_columns.contains(pair.first); - }); - - if (!contains_virtual_column) - hive_map.clear(); // If we cannot find any virtual column in requested, we don't add any of them to chunk - - auto it = hive_map.find(virtual_column.getNameInStorage()); - if (it != hive_map.end()) - { - chunk.addColumn(virtual_column.type->createColumnConst(chunk.getNumRows(), convertFieldToType(Field(it->second), *virtual_column.type))->convertToFullColumnIfConst()); - hive_map.erase(it); - } + chunk.addColumn(virtual_column.type->createColumnConst(chunk.getNumRows(), convertFieldToType(Field(it->second), *virtual_column.type))->convertToFullColumnIfConst()); } } } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index aa7d4c4605b..29ec32ab375 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -7,8 +7,6 @@ #include #include -#include -#include #include @@ -81,14 +79,11 @@ struct VirtualsForFileLikeStorage std::optional size { std::nullopt }; const String * filename { nullptr }; std::optional last_modified { std::nullopt }; - const String & hive_partitioning_path = ""; }; -std::map parseFromPath(const std::string& path); - void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values); + VirtualsForFileLikeStorage virtual_values, ColumnsDescription columns = {}, ContextPtr context = {}); } } diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference index be43048dd01..fc6da3a55c1 100644 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.reference +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -33,6 +33,11 @@ Elizabeth Gordon Elizabeth [1,2,3] 42.42 Array(Int64) LowCardinality(Float64) 101 +2070 +4081 +2070 +2070 +1 1 TESTING THE URL PARTITIONING first last Elizabeth diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 58a74a3ca8f..a5f3e36763d 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -36,6 +36,19 @@ SELECT toTypeName(_array), toTypeName(_float) FROM file('$CURDIR/data_hive/parti SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE _number = 42; """ +$CLICKHOUSE_LOCAL -n -q """ +set use_hive_partitioning = 1; + +SELECT _identifier FROM file('$CURDIR/data_hive/partitioning/identitier=*/email.csv') LIMIT 2; +SELECT __identifier FROM file('$CURDIR/data_hive/partitioning/identitier=*/email.csv') LIMIT 2; +""" + +$CLICKHOUSE_LOCAL -n -q """ +set use_hive_partitioning = 1; + +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth/sample.parquet') LIMIT 10; +""" 2>&1 | grep -c "INCORRECT_DATA" + $CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 0; diff --git a/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth/sample.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9b6a78cf8cc7cd3ece15e13c9b2f222c8f09b81e GIT binary patch literal 1308 zcmWG=3^EjD5Z%Hr`iosh^b{kI%_hpmz#!kv!2kyTLxU6Z9~tnZHa*@opEc(Au?K1g zOD4aYo#~scS*oJ`_R8gD$~^!1^Jl8v?6d#uZT@&1Z&h*OEsK+iS@vM( z^NvMD|`UJH2qG^xTWJ-dT6$7G6DVTky7Woy5#*nvWVEJpR{CJ{Fy0- zE8ux@_5^8x!?dEIRau&2MyW=j!5h*xtj<|H$T%nI_ zrsjz?W}YW@dt8{DRBI|`*(jU(m2ZmM@u#NQ!s{)z%{yLgtZF$)cAddC?xOT5D^_mz z-x7J9sr1v$v$K{(^`5h;Sz-1gc2*AGUh7}8F0R?}-B&E(IrH;G`GUhY z?q@1K*wQW0otd;iYI&}N?~AIE{%tkCroWN7t$#4bGw~KP0|PJ-eBc+|z=1tM#0JF{ zU3TEfTh6OHr)jl`=8?k_CV5&tiR=x1?{{sI`|Af*?oUEIqS_tiuleY8e||}EY3bMB zzp9qaKhIf|e>9xYs^&t{(WWC|y8X+=Uc{}=?T>Xh_5JxVk(1Vsywf&)T&i$tu2}yJ zsTDW>>9!Q_yZT7oEaCof4t43QdkFv1JFG`q9?h6g zxTpBgk6%&qwlli6{)!hkc#l_C=)}P;-Ys+NvjP>bYG~cCGCw}YQ1x-0z@w1)u@}^n zTV#|>Z7-{GtbTT=rr=<)~?``+iTxh4l+3|MS-tdVRHm+9w`h0!z=3knV zrSnX_{WmK}KJ?@4(a#30zmF(AmC{eNN7s8Lx}H>x1pMHFk2oys;%$ zvXN_R)m$dd8M|y^7q?Bh-x;&%icdYm3!CL}KR{`PNz%rYL4r4>G&wsZDZV&4BQ-Zs zl!ZZ*N0mu}Jvl$8G&j!xn4o|vkwidc4g-VODMm>dNgXu?8BrcdQ3gqbdKRFR7=zd% z4mA!N3D&gCqT&(>R>!2I%v3Q34HQ1GkiyV!C<@hogF|f<&;XY3{QMLNR)w6z;u4^K eWG+xU(4JF_Y8(t2Y%V}QxHvIf1_}lM%S8a*|2_@? literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/data_hive/partitioning/identifier=2070/email.csv b/tests/queries/0_stateless/data_hive/partitioning/identifier=2070/email.csv new file mode 100644 index 00000000000..936d995cc64 --- /dev/null +++ b/tests/queries/0_stateless/data_hive/partitioning/identifier=2070/email.csv @@ -0,0 +1,5 @@ +_login_email,_identifier,_first_name,_last_name +laura@example.com,2070,Laura,Grey +craig@example.com,4081,Craig,Johnson +mary@example.com,9346,Mary,Jenkins +jamie@example.com,5079,Jamie,Smith From 7d70968db3527d894bc6c02d51dc70f932f7eacd Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Mon, 15 Jul 2024 18:47:04 +0200 Subject: [PATCH 0393/2361] try fix --- src/Interpreters/InterpreterCreateQuery.cpp | 52 ++++++++++----------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index f8696caebe7..9eb13a29af7 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1010,32 +1010,32 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); /// For exrternal tables with restore_replace_external_engine_to_null setting we replace external engines to /// Null table engine. - else (create.storage->engine == "AzureBlobStorage" || - create.storage->engine == "AzureQueue" || - create.storage->engine == "COSN" || - create.storage->engine == "DeltaLake" || - create.storage->engine == "Dictionary" || - create.storage->engine == "Executable" || - create.storage->engine == "ExecutablePool" || - create.storage->engine == "ExternalDistributed" || - create.storage->engine == "File" || - create.storage->engine == "Hudi" || - create.storage->engine == "Iceberg" || - create.storage->engine == "JDBC" || - create.storage->engine == "Kafka" || - create.storage->engine == "MaterializedPostgreSQL" || - create.storage->engine == "MongoDB" || - create.storage->engine == "MySQL" || - create.storage->engine == "NATS" || - create.storage->engine == "ODBC" || - create.storage->engine == "OSS" || - create.storage->engine == "PostgreSQL" || - create.storage->engine == "RabbitMQ" || - create.storage->engine == "Redis" || - create.storage->engine == "S3" || - create.storage->engine == "S3Queue" || - create.storage->engine == "TinyLog" || - create.storage->engine == "URL") + else (create.storage->engine->name == "AzureBlobStorage" || + create.storage->engine->name == "AzureQueue" || + create.storage->engine->name == "COSN" || + create.storage->engine->name == "DeltaLake" || + create.storage->engine->name == "Dictionary" || + create.storage->engine->name == "Executable" || + create.storage->engine->name == "ExecutablePool" || + create.storage->engine->name == "ExternalDistributed" || + create.storage->engine->name == "File" || + create.storage->engine->name == "Hudi" || + create.storage->engine->name == "Iceberg" || + create.storage->engine->name == "JDBC" || + create.storage->engine->name == "Kafka" || + create.storage->engine->name == "MaterializedPostgreSQL" || + create.storage->engine->name == "MongoDB" || + create.storage->engine->name == "MySQL" || + create.storage->engine->name == "NATS" || + create.storage->engine->name == "ODBC" || + create.storage->engine->name == "OSS" || + create.storage->engine->name == "PostgreSQL" || + create.storage->engine->name == "RabbitMQ" || + create.storage->engine->name == "Redis" || + create.storage->engine->name == "S3" || + create.storage->engine->name == "S3Queue" || + create.storage->engine->name == "TinyLog" || + create.storage->engine->name == "URL") { if (getContext()->getSettingsRef().restore_replace_external_engine_to_null) setNullTableEngine(*create.storage) From fc49b1b75f9b075f28cdc4b7eeb768339bb1ebd5 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Mon, 15 Jul 2024 19:02:21 +0200 Subject: [PATCH 0394/2361] semicolon --- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 9eb13a29af7..94230f0e7d1 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1038,7 +1038,7 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const create.storage->engine->name == "URL") { if (getContext()->getSettingsRef().restore_replace_external_engine_to_null) - setNullTableEngine(*create.storage) + setNullTableEngine(*create.storage); } return; } From 899c5a64e078820caf2e68cbaf892d5d39e0af06 Mon Sep 17 00:00:00 2001 From: yariks5s Date: Mon, 15 Jul 2024 17:14:11 +0000 Subject: [PATCH 0395/2361] some more fixes (docs + storageObjectStorage) --- docs/en/sql-reference/table-functions/azureBlobStorage.md | 2 +- docs/en/sql-reference/table-functions/file.md | 2 +- docs/en/sql-reference/table-functions/hdfs.md | 2 +- docs/en/sql-reference/table-functions/s3.md | 2 +- docs/en/sql-reference/table-functions/url.md | 2 +- src/Storages/ObjectStorage/StorageObjectStorage.cpp | 7 ++++--- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index 104ac4e26df..6936c807f96 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -88,5 +88,5 @@ Use virtual column, created with Hive-style partitioning ``` sql SET use_hive_partitioning = 1; -SELECT * from azureBlobStorage(config, storage_account_url='...', container='...', blob_path='http://data/path/date=*/country=*/code=*/*.parquet', format='Parquet', structure='Date DateTime64, Country String, Code UInt64') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; +SELECT * from azureBlobStorage(config, storage_account_url='...', container='...', blob_path='http://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; ``` diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 0669609a22a..7908a3cb934 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -216,7 +216,7 @@ Use virtual column, created with Hive-style partitioning ``` sql SET use_hive_partitioning = 1; -SELECT * from file('data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; +SELECT * from file('data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; ``` ## Settings {#settings} diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 6963d4e4b79..73fdc263d68 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -109,7 +109,7 @@ Use virtual column, created with Hive-style partitioning ``` sql SET use_hive_partitioning = 1; -SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; +SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; ``` ``` reference diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index f3ee83afef4..1bd9f38517e 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -284,7 +284,7 @@ Use virtual column, created with Hive-style partitioning ``` sql SET use_hive_partitioning = 1; -SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; +SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; ``` ## Storage Settings {#storage-settings} diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index 596355e2577..b4027594e7c 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -65,7 +65,7 @@ Use virtual column, created with Hive-style partitioning ``` sql SET use_hive_partitioning = 1; -SELECT * from url('http://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and code = 42; +SELECT * from url('http://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; ``` ## Storage Settings {#storage-settings} diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 35cd1492642..d2cc73f14d7 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -85,7 +85,7 @@ StorageObjectStorage::StorageObjectStorage( metadata.setConstraints(constraints_); metadata.setComment(comment); - if (sample_path.empty()) + if (sample_path.empty() && context->getSettingsRef().use_hive_partitioning) sample_path = getPathSample(metadata, context); setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context, sample_path, format_settings)); @@ -412,8 +412,9 @@ std::string StorageObjectStorage::resolveFormatFromData( { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); + auto format_and_schema = detectFormatAndReadSchema(format_settings, *iterator, context).second; sample_path = iterator->getLastFilePath(); - return detectFormatAndReadSchema(format_settings, *iterator, context).second; + return format_and_schema; } std::pair StorageObjectStorage::resolveSchemaAndFormatFromData( @@ -425,8 +426,8 @@ std::pair StorageObjectStorage::resolveSchemaAn { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - sample_path = iterator->getLastFilePath(); auto [columns, format] = detectFormatAndReadSchema(format_settings, *iterator, context); + sample_path = iterator->getLastFilePath(); configuration->format = format; return std::pair(columns, format); } From 7d39535c989e16d818370cbbd9cbea891b21d07a Mon Sep 17 00:00:00 2001 From: yariks5s Date: Mon, 15 Jul 2024 17:20:46 +0000 Subject: [PATCH 0396/2361] storageObjectStorage small fix --- src/Storages/ObjectStorage/StorageObjectStorage.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 8291327992c..ee1169d2c5c 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -400,8 +400,9 @@ ColumnsDescription StorageObjectStorage::resolveSchemaFromData( { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); + auto schema = readSchemaFromFormat(configuration->format, format_settings, *iterator, context); sample_path = iterator->getLastFilePath(); - return readSchemaFromFormat(configuration->format, format_settings, *iterator, context); + return schema; } std::string StorageObjectStorage::resolveFormatFromData( From 86570eef595c56400c3e04e44b74d4abdbfdcb52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 16:24:02 +0000 Subject: [PATCH 0397/2361] Extract shared logic to common place from consumers --- src/Storages/Kafka/KafkaConsumer.cpp | 74 ++++------------------- src/Storages/Kafka/KafkaConsumer.h | 3 +- src/Storages/Kafka/KafkaConsumer2.cpp | 61 ++----------------- src/Storages/Kafka/KafkaConsumer2.h | 3 +- src/Storages/Kafka/StorageKafkaCommon.cpp | 69 +++++++++++++++++++++ src/Storages/Kafka/StorageKafkaCommon.h | 12 ++++ 6 files changed, 99 insertions(+), 123 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer.cpp b/src/Storages/Kafka/KafkaConsumer.cpp index f4385163323..1affbbaf8fd 100644 --- a/src/Storages/Kafka/KafkaConsumer.cpp +++ b/src/Storages/Kafka/KafkaConsumer.cpp @@ -21,13 +21,12 @@ namespace CurrentMetrics namespace ProfileEvents { - extern const Event KafkaRebalanceRevocations; - extern const Event KafkaRebalanceAssignments; - extern const Event KafkaRebalanceErrors; - extern const Event KafkaMessagesPolled; - extern const Event KafkaCommitFailures; - extern const Event KafkaCommits; - extern const Event KafkaConsumerErrors; +extern const Event KafkaRebalanceRevocations; +extern const Event KafkaRebalanceAssignments; +extern const Event KafkaRebalanceErrors; +extern const Event KafkaMessagesPolled; +extern const Event KafkaCommitFailures; +extern const Event KafkaCommits; } namespace DB @@ -200,44 +199,9 @@ KafkaConsumer::~KafkaConsumer() // https://github.com/confluentinc/confluent-kafka-go/issues/189 etc. void KafkaConsumer::drain() { - auto start_time = std::chrono::steady_clock::now(); - cppkafka::Error last_error(RD_KAFKA_RESP_ERR_NO_ERROR); - - while (true) - { - auto msg = consumer->poll(100ms); - if (!msg) - break; - - auto error = msg.get_error(); - - if (error) - { - if (msg.is_eof() || error == last_error) - { - break; - } - else - { - LOG_ERROR(log, "Error during draining: {}", error); - setExceptionInfo(error); - } - } - - // i don't stop draining on first error, - // only if it repeats once again sequentially - last_error = error; - - auto ts = std::chrono::steady_clock::now(); - if (std::chrono::duration_cast(ts-start_time) > DRAIN_TIMEOUT_MS) - { - LOG_ERROR(log, "Timeout during draining."); - break; - } - } + StorageKafkaUtils::drainConsumer(*consumer, DRAIN_TIMEOUT_MS, log, [this](const cppkafka::Error & err) { setExceptionInfo(err); }); } - void KafkaConsumer::commit() { auto print_offsets = [this] (const char * prefix, const cppkafka::TopicPartitionList & offsets) @@ -410,7 +374,7 @@ void KafkaConsumer::resetToLastCommitted(const char * msg) { if (!assignment.has_value() || assignment->empty()) { - LOG_TRACE(log, "Not assignned. Can't reset to last committed position."); + LOG_TRACE(log, "Not assigned. Can't reset to last committed position."); return; } auto committed_offset = consumer->get_offsets_committed(consumer->get_assignment()); @@ -474,7 +438,7 @@ ReadBufferPtr KafkaConsumer::consume() // If we're doing a manual select then it's better to get something after a wait, then immediate nothing. if (!assignment.has_value()) { - waited_for_assignment += poll_timeout; // slightly innaccurate, but rough calculation is ok. + waited_for_assignment += poll_timeout; // slightly inaccurate, but rough calculation is ok. if (waited_for_assignment < MAX_TIME_TO_WAIT_FOR_ASSIGNMENT_MS) { continue; @@ -536,26 +500,12 @@ ReadBufferPtr KafkaConsumer::getNextMessage() return getNextMessage(); } -size_t KafkaConsumer::filterMessageErrors() +void KafkaConsumer::filterMessageErrors() { assert(current == messages.begin()); - size_t skipped = std::erase_if(messages, [this](auto & message) - { - if (auto error = message.get_error()) - { - ProfileEvents::increment(ProfileEvents::KafkaConsumerErrors); - LOG_ERROR(log, "Consumer error: {}", error); - setExceptionInfo(error); - return true; - } - return false; - }); - - if (skipped) - LOG_ERROR(log, "There were {} messages with an error", skipped); - - return skipped; + StorageKafkaUtils::eraseMessageErrors(messages, log, [this](const cppkafka::Error & err) { setExceptionInfo(err); }); + current = messages.begin(); } void KafkaConsumer::resetIfStopped() diff --git a/src/Storages/Kafka/KafkaConsumer.h b/src/Storages/Kafka/KafkaConsumer.h index a3bc97779b3..285f3680213 100644 --- a/src/Storages/Kafka/KafkaConsumer.h +++ b/src/Storages/Kafka/KafkaConsumer.h @@ -191,8 +191,7 @@ private: void drain(); void cleanUnprocessed(); void resetIfStopped(); - /// Return number of messages with an error. - size_t filterMessageErrors(); + void filterMessageErrors(); ReadBufferPtr getNextMessage(); }; diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 52a4cadd60b..aaefd6fd6f5 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -32,7 +32,6 @@ extern const Event KafkaRebalanceErrors; extern const Event KafkaMessagesPolled; extern const Event KafkaCommitFailures; extern const Event KafkaCommits; -extern const Event KafkaConsumerErrors; } namespace DB @@ -156,40 +155,7 @@ KafkaConsumer2::~KafkaConsumer2() // https://github.com/confluentinc/confluent-kafka-go/issues/189 etc. void KafkaConsumer2::drainConsumerQueue() { - auto start_time = std::chrono::steady_clock::now(); - cppkafka::Error last_error(RD_KAFKA_RESP_ERR_NO_ERROR); - - while (true) - { - auto msg = consumer->poll(100ms); - if (!msg) - break; - - auto error = msg.get_error(); - - if (error) - { - if (msg.is_eof() || error == last_error) - { - break; - } - else - { - LOG_ERROR(log, "Error during draining: {}", error); - } - } - - // i don't stop draining on first error, - // only if it repeats once again sequentially - last_error = error; - - auto ts = std::chrono::steady_clock::now(); - if (std::chrono::duration_cast(ts - start_time) > DRAIN_TIMEOUT_MS) - { - LOG_ERROR(log, "Timeout during draining."); - break; - } - } + StorageKafkaUtils::drainConsumer(*consumer, DRAIN_TIMEOUT_MS, log); } void KafkaConsumer2::pollEvents() @@ -414,31 +380,12 @@ ReadBufferPtr KafkaConsumer2::getNextMessage() return nullptr; } -size_t KafkaConsumer2::filterMessageErrors() +void KafkaConsumer2::filterMessageErrors() { assert(current == messages.begin()); - size_t skipped = std::erase_if( - messages, - [this](auto & message) - { - if (auto error = message.get_error()) - { - ProfileEvents::increment(ProfileEvents::KafkaConsumerErrors); - LOG_ERROR(log, "Consumer error: {}", error); - return true; - } - return false; - }); - - if (skipped) - { - LOG_ERROR(log, "There were {} messages with an error", skipped); - // Technically current is invalidated as soon as we erased a single message - current = messages.begin(); - } - - return skipped; + StorageKafkaUtils::eraseMessageErrors(messages, log); + current = messages.begin(); } void KafkaConsumer2::resetIfStopped() diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index 0df37434caf..57b157416c6 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -151,8 +151,7 @@ private: bool polledDataUnusable(const TopicPartition & topic_partition) const; void drainConsumerQueue(); void resetIfStopped(); - /// Return number of messages with an error. - size_t filterMessageErrors(); + void filterMessageErrors(); ReadBufferPtr getNextMessage(); void initializeQueues(const cppkafka::TopicPartitionList & topic_partitions); diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 470e3445d03..35118f88e53 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -32,9 +32,16 @@ namespace CurrentMetrics extern const Metric KafkaLibrdkafkaThreads; } +namespace ProfileEvents +{ +extern const Event KafkaConsumerErrors; +} + namespace DB { +using namespace std::chrono_literals; + namespace ErrorCodes { extern const int LOGICAL_ERROR; @@ -556,6 +563,68 @@ String getDefaultClientId(const StorageID & table_id) { return fmt::format("{}-{}-{}-{}", VERSION_NAME, getFQDNOrHostName(), table_id.database_name, table_id.table_name); } + +void drainConsumer( + cppkafka::Consumer & consumer, const std::chrono::milliseconds drain_timeout, const LoggerPtr & log, ErrorHandler error_handler) +{ + auto start_time = std::chrono::steady_clock::now(); + cppkafka::Error last_error(RD_KAFKA_RESP_ERR_NO_ERROR); + + while (true) + { + auto msg = consumer.poll(100ms); + if (!msg) + break; + + auto error = msg.get_error(); + + if (error) + { + if (msg.is_eof() || error == last_error) + { + break; + } + else + { + LOG_ERROR(log, "Error during draining: {}", error); + error_handler(error); + } + } + + // i don't stop draining on first error, + // only if it repeats once again sequentially + last_error = error; + + auto ts = std::chrono::steady_clock::now(); + if (std::chrono::duration_cast(ts - start_time) > drain_timeout) + { + LOG_ERROR(log, "Timeout during draining."); + break; + } + } +} + +void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler error_handler) +{ + assert(current == messages.begin()); + + size_t skipped = std::erase_if( + messages, + [&](auto & message) + { + if (auto error = message.get_error()) + { + ProfileEvents::increment(ProfileEvents::KafkaConsumerErrors); + LOG_ERROR(log, "Consumer error: {}", error); + error_handler(error); + return true; + } + return false; + }); + + if (skipped) + LOG_ERROR(log, "There were {} messages with an error", skipped); +} } template struct StorageKafkaInterceptors; diff --git a/src/Storages/Kafka/StorageKafkaCommon.h b/src/Storages/Kafka/StorageKafkaCommon.h index f0bae7c9c9c..d51d9d03208 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.h +++ b/src/Storages/Kafka/StorageKafkaCommon.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -66,6 +67,17 @@ namespace StorageKafkaUtils { Names parseTopics(String topic_list); String getDefaultClientId(const StorageID & table_id); + +using ErrorHandler = std::function; + +void drainConsumer( + cppkafka::Consumer & consumer, + std::chrono::milliseconds drain_timeout, + const LoggerPtr & log, + ErrorHandler error_handler = [](const cppkafka::Error & /*err*/) {}); + +using Messages = std::vector; +void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler error_handler = [](const cppkafka::Error & /*err*/) {}); } } From 137c8b3f64976df0b20294176c09b3d991ff7202 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 17:43:00 +0000 Subject: [PATCH 0398/2361] Extract config loading to `StorageKafkaCommon` --- src/Storages/Kafka/StorageKafka.cpp | 164 ++---------------- src/Storages/Kafka/StorageKafka.h | 12 -- src/Storages/Kafka/StorageKafka2.cpp | 165 ++----------------- src/Storages/Kafka/StorageKafka2.h | 12 -- src/Storages/Kafka/StorageKafkaCommon.cpp | 192 +++++++++++++++++++--- src/Storages/Kafka/StorageKafkaCommon.h | 48 ++++-- src/Storages/Kafka/parseSyslogLevel.cpp | 3 +- 7 files changed, 236 insertions(+), 360 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 0e907187e11..8bcf59d6db8 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -1,5 +1,4 @@ #include -#include #include #include @@ -55,10 +54,6 @@ #include #include -#if USE_KRB5 -#include -#endif // USE_KRB5 - namespace CurrentMetrics { extern const Metric KafkaBackgroundReads; @@ -463,65 +458,26 @@ KafkaConsumerPtr StorageKafka::createKafkaConsumer(size_t consumer_number) topics); return kafka_consumer_ptr; } - cppkafka::Configuration StorageKafka::getConsumerConfiguration(size_t consumer_number) { - cppkafka::Configuration conf; - - conf.set("metadata.broker.list", brokers); - conf.set("group.id", group); - if (num_consumers > 1) - { - conf.set("client.id", fmt::format("{}-{}", client_id, consumer_number)); - } - else - { - conf.set("client.id", client_id); - } - conf.set("client.software.name", VERSION_NAME); - conf.set("client.software.version", VERSION_DESCRIBE); - conf.set("auto.offset.reset", "earliest"); // If no offset stored for this group, read all messages from the start - - // that allows to prevent fast draining of the librdkafka queue - // during building of single insert block. Improves performance - // significantly, but may lead to bigger memory consumption. - size_t default_queued_min_messages = 100000; // must be greater than or equal to default - size_t max_allowed_queued_min_messages = 10000000; // must be less than or equal to max allowed value - conf.set("queued.min.messages", std::min(std::max(getMaxBlockSize(), default_queued_min_messages), max_allowed_queued_min_messages)); - - updateGlobalConfiguration(conf); - updateConsumerConfiguration(conf); - - // those settings should not be changed by users. - conf.set("enable.auto.commit", "false"); // We manually commit offsets after a stream successfully finished - conf.set("enable.auto.offset.store", "false"); // Update offset automatically - to commit them all at once. - conf.set("enable.partition.eof", "false"); // Ignore EOF messages - - for (auto & property : conf.get_all()) - { - LOG_TRACE(log, "Consumer set property {}:{}", property.first, property.second); - } - - return conf; + KafkaConfigLoader::ConsumerConfigParams params{ + {getContext()->getConfigRef(), collection_name, topics, log}, + brokers, + group, + num_consumers > 1, + consumer_number, + client_id, + getMaxBlockSize()}; + return KafkaConfigLoader::getConsumerConfiguration(*this, params); } cppkafka::Configuration StorageKafka::getProducerConfiguration() { - cppkafka::Configuration conf; - conf.set("metadata.broker.list", brokers); - conf.set("client.id", client_id); - conf.set("client.software.name", VERSION_NAME); - conf.set("client.software.version", VERSION_DESCRIBE); - - updateGlobalConfiguration(conf); - updateProducerConfiguration(conf); - - for (auto & property : conf.get_all()) - { - LOG_TRACE(log, "Producer set property {}:{}", property.first, property.second); - } - - return conf; + KafkaConfigLoader::ProducerConfigParams params{ + {getContext()->getConfigRef(), collection_name, topics, log}, + brokers, + client_id}; + return KafkaConfigLoader::getProducerConfiguration(*this, params); } void StorageKafka::cleanConsumers() @@ -599,98 +555,6 @@ size_t StorageKafka::getPollTimeoutMillisecond() const : getContext()->getSettingsRef().stream_poll_timeout_ms.totalMilliseconds(); } -void StorageKafka::updateGlobalConfiguration(cppkafka::Configuration & kafka_config) -{ - const auto & config = getContext()->getConfigRef(); - KafkaConfigLoader::loadFromConfig(kafka_config, config, collection_name, KafkaConfigLoader::CONFIG_KAFKA_TAG, topics); - -#if USE_KRB5 - if (kafka_config.has_property("sasl.kerberos.kinit.cmd")) - LOG_WARNING(log, "sasl.kerberos.kinit.cmd configuration parameter is ignored."); - - kafka_config.set("sasl.kerberos.kinit.cmd",""); - kafka_config.set("sasl.kerberos.min.time.before.relogin","0"); - - if (kafka_config.has_property("sasl.kerberos.keytab") && kafka_config.has_property("sasl.kerberos.principal")) - { - String keytab = kafka_config.get("sasl.kerberos.keytab"); - String principal = kafka_config.get("sasl.kerberos.principal"); - LOG_DEBUG(log, "Running KerberosInit"); - try - { - kerberosInit(keytab,principal); - } - catch (const Exception & e) - { - LOG_ERROR(log, "KerberosInit failure: {}", getExceptionMessage(e, false)); - } - LOG_DEBUG(log, "Finished KerberosInit"); - } -#else // USE_KRB5 - if (kafka_config.has_property("sasl.kerberos.keytab") || kafka_config.has_property("sasl.kerberos.principal")) - LOG_WARNING(log, "Ignoring Kerberos-related parameters because ClickHouse was built without krb5 library support."); -#endif // USE_KRB5 - // No need to add any prefix, messages can be distinguished - kafka_config.set_log_callback( - [this](cppkafka::KafkaHandleBase & handle, int level, const std::string & facility, const std::string & message) - { - auto [poco_level, client_logs_level] = parseSyslogLevel(level); - const auto & kafka_object_config = handle.get_configuration(); - const std::string client_id_key{"client.id"}; - chassert(kafka_object_config.has_property(client_id_key) && "Kafka configuration doesn't have expected client.id set"); - LOG_IMPL( - log, - client_logs_level, - poco_level, - "[client.id:{}] [rdk:{}] {}", - kafka_object_config.get(client_id_key), - facility, - message); - }); - - /// NOTE: statistics should be consumed, otherwise it creates too much - /// entries in the queue, that leads to memory leak and slow shutdown. - if (!kafka_config.has_property("statistics.interval.ms")) - { - // every 3 seconds by default. set to 0 to disable. - kafka_config.set("statistics.interval.ms", "3000"); - } - - // Configure interceptor to change thread name - // - // TODO: add interceptors support into the cppkafka. - // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibliity overrides it to noop. - { - // This should be safe, since we wait the rdkafka object anyway. - void * self = static_cast(this); - - int status; - - status = rd_kafka_conf_interceptor_add_on_new(kafka_config.get_handle(), - "init", StorageKafkaInterceptors::rdKafkaOnNew, self); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(log, "Cannot set new interceptor due to {} error", status); - - // cppkafka always copy the configuration - status = rd_kafka_conf_interceptor_add_on_conf_dup(kafka_config.get_handle(), - "init", StorageKafkaInterceptors::rdKafkaOnConfDup, self); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(log, "Cannot set dup conf interceptor due to {} error", status); - } -} - -void StorageKafka::updateConsumerConfiguration(cppkafka::Configuration & kafka_config) -{ - const auto & config = getContext()->getConfigRef(); - KafkaConfigLoader::loadConsumerConfig(kafka_config, config, collection_name, KafkaConfigLoader::CONFIG_KAFKA_TAG, topics); -} - -void StorageKafka::updateProducerConfiguration(cppkafka::Configuration & kafka_config) -{ - const auto & config = getContext()->getConfigRef(); - KafkaConfigLoader::loadProducerConfig(kafka_config, config, collection_name, KafkaConfigLoader::CONFIG_KAFKA_TAG, topics); -} - bool StorageKafka::checkDependencies(const StorageID & table_id) { // Check if all dependencies are attached diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index 31a5744ee2a..74793292224 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -149,18 +149,6 @@ private: std::atomic shutdown_called = false; - // Load Kafka global configuration - // https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md#global-configuration-properties - void updateGlobalConfiguration(cppkafka::Configuration & kafka_config); - // Load Kafka properties from consumer configuration - // NOTE: librdkafka allow to set a consumer property to a producer and vice versa, - // but a warning will be generated e.g: - // "Configuration property session.timeout.ms is a consumer property and - // will be ignored by this producer instance" - void updateConsumerConfiguration(cppkafka::Configuration & kafka_config); - // Load Kafka properties from producer configuration - void updateProducerConfiguration(cppkafka::Configuration & kafka_config); - void threadFunc(size_t idx); size_t getPollMaxBatchSize() const; diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 428426d9e6a..d980df9d278 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -51,10 +50,6 @@ #include #include -#if USE_KRB5 -# include -#endif // USE_KRB5 - #include #include #include @@ -478,54 +473,27 @@ KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) cppkafka::Configuration StorageKafka2::getConsumerConfiguration(size_t consumer_number) { - cppkafka::Configuration conf; - - conf.set("metadata.broker.list", brokers); - conf.set("group.id", group); - if (num_consumers > 1) - conf.set("client.id", fmt::format("{}-{}", client_id, consumer_number)); - else - conf.set("client.id", client_id); - conf.set("client.software.name", VERSION_NAME); - conf.set("client.software.version", VERSION_DESCRIBE); - conf.set("auto.offset.reset", "earliest"); // If no offset stored for this group, read all messages from the start - - // that allows to prevent fast draining of the librdkafka queue - // during building of single insert block. Improves performance - // significantly, but may lead to bigger memory consumption. - size_t default_queued_min_messages = 100000; // must be greater than or equal to default - size_t max_allowed_queued_min_messages = 10000000; // must be less than or equal to max allowed value - conf.set("queued.min.messages", std::min(std::max(getMaxBlockSize(), default_queued_min_messages), max_allowed_queued_min_messages)); - - updateGlobalConfiguration(conf); - updateConsumerConfiguration(conf); - - // those settings should not be changed by users. - conf.set("enable.auto.commit", "false"); // We manually commit offsets after a stream successfully finished - conf.set("enable.auto.offset.store", "false"); // Update offset automatically - to commit them all at once. - conf.set("enable.partition.eof", "false"); // Ignore EOF messages - - for (auto & property : conf.get_all()) - LOG_TEST(log, "Consumer set property {}:{}", property.first, property.second); - - return conf; + KafkaConfigLoader::ConsumerConfigParams params{ + {getContext()->getConfigRef(), collection_name, topics, log}, + brokers, + group, + num_consumers > 1, + consumer_number, + client_id, + getMaxBlockSize()}; + auto kafka_config = KafkaConfigLoader::getConsumerConfiguration(*this, params); + // It is disabled, because in case of no materialized views are attached, it can cause live memory leak. To enable it, a similar cleanup mechanism must be introduced as for StorageKafka. + kafka_config.set("statistics.interval.ms", "0"); + return kafka_config; } cppkafka::Configuration StorageKafka2::getProducerConfiguration() { - cppkafka::Configuration conf; - conf.set("metadata.broker.list", brokers); - conf.set("client.id", client_id); - conf.set("client.software.name", VERSION_NAME); - conf.set("client.software.version", VERSION_DESCRIBE); - - updateGlobalConfiguration(conf); - updateProducerConfiguration(conf); - - for (auto & property : conf.get_all()) - LOG_TEST(log, "Producer set property {}:{}", property.first, property.second); - - return conf; + KafkaConfigLoader::ProducerConfigParams params{ + {getContext()->getConfigRef(), collection_name, topics, log}, + brokers, + client_id}; + return KafkaConfigLoader::getProducerConfiguration(*this, params); } size_t StorageKafka2::getMaxBlockSize() const @@ -548,105 +516,6 @@ size_t StorageKafka2::getPollTimeoutMillisecond() const : getContext()->getSettingsRef().stream_poll_timeout_ms.totalMilliseconds(); } -void StorageKafka2::updateGlobalConfiguration(cppkafka::Configuration & kafka_config) -{ - const auto & config = getContext()->getConfigRef(); - KafkaConfigLoader::loadFromConfig(kafka_config, config, collection_name, KafkaConfigLoader::CONFIG_KAFKA_TAG, topics); - -#if USE_KRB5 - if (kafka_config.has_property("sasl.kerberos.kinit.cmd")) - LOG_WARNING(log, "sasl.kerberos.kinit.cmd configuration parameter is ignored."); - - kafka_config.set("sasl.kerberos.kinit.cmd", ""); - kafka_config.set("sasl.kerberos.min.time.before.relogin", "0"); - - if (kafka_config.has_property("sasl.kerberos.keytab") && kafka_config.has_property("sasl.kerberos.principal")) - { - String keytab = kafka_config.get("sasl.kerberos.keytab"); - String principal = kafka_config.get("sasl.kerberos.principal"); - LOG_DEBUG(log, "Running KerberosInit"); - try - { - kerberosInit(keytab, principal); - } - catch (const Exception & e) - { - LOG_ERROR(log, "KerberosInit failure: {}", getExceptionMessage(e, false)); - } - LOG_DEBUG(log, "Finished KerberosInit"); - } -#else // USE_KRB5 - if (kafka_config.has_property("sasl.kerberos.keytab") || kafka_config.has_property("sasl.kerberos.principal")) - LOG_WARNING(log, "Ignoring Kerberos-related parameters because ClickHouse was built without krb5 library support."); -#endif // USE_KRB5 - // No need to add any prefix, messages can be distinguished - kafka_config.set_log_callback( - [this](cppkafka::KafkaHandleBase & handle, int level, const std::string & facility, const std::string & message) - { - auto [poco_level, client_logs_level] = parseSyslogLevel(level); - const auto & kafka_object_config = handle.get_configuration(); - const std::string client_id_key{"client.id"}; - chassert(kafka_object_config.has_property(client_id_key) && "Kafka configuration doesn't have expected client.id set"); - LOG_IMPL( - log, - client_logs_level, - poco_level, - "[client.id:{}] [rdk:{}] {}", - kafka_object_config.get(client_id_key), - facility, - message); - }); - - /// NOTE: statistics should be consumed, otherwise it creates too much - /// entries in the queue, that leads to memory leak and slow shutdown. - if (!kafka_config.has_property("statistics.interval.ms")) - { - // every 3 seconds by default. set to 0 to disable. - kafka_config.set("statistics.interval.ms", "3000"); - } - - // Configure interceptor to change thread name - // - // TODO: add interceptors support into the cppkafka. - // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibility overrides it to noop. - { - // This should be safe, since we wait the rdkafka object anyway. - void * self = static_cast(this); - - int status; - - status = rd_kafka_conf_interceptor_add_on_new(kafka_config.get_handle(), "init", StorageKafkaInterceptors::rdKafkaOnNew, self); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(log, "Cannot set new interceptor due to {} error", status); - - // cppkafka always copy the configuration - status = rd_kafka_conf_interceptor_add_on_conf_dup( - kafka_config.get_handle(), "init", StorageKafkaInterceptors::rdKafkaOnConfDup, self); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(log, "Cannot set dup conf interceptor due to {} error", status); - } -} - -void StorageKafka2::updateConsumerConfiguration(cppkafka::Configuration & kafka_config) -{ - const auto & config = getContext()->getConfigRef(); - KafkaConfigLoader::loadConsumerConfig(kafka_config, config, collection_name, KafkaConfigLoader::CONFIG_KAFKA_TAG, topics); -} - -void StorageKafka2::updateProducerConfiguration(cppkafka::Configuration & kafka_config) -{ - const auto & config = getContext()->getConfigRef(); - KafkaConfigLoader::loadProducerConfig(kafka_config, config, collection_name, KafkaConfigLoader::CONFIG_KAFKA_TAG, topics); -} - -String StorageKafka2::getConfigPrefix() const -{ - if (!collection_name.empty()) - return "named_collections." + collection_name + "." - + String{KafkaConfigLoader::CONFIG_KAFKA_TAG}; /// Add one more level to separate librdkafka configuration. - return String{KafkaConfigLoader::CONFIG_KAFKA_TAG}; -} - bool StorageKafka2::checkDependencies(const StorageID & table_id) { // Check if all dependencies are attached diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index d7909df1b2c..2daae8e30d4 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -176,19 +176,7 @@ private: // Returns full producer related configuration, also the configuration // contains global kafka properties. cppkafka::Configuration getProducerConfiguration(); - // Load Kafka global configuration - // https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md#global-configuration-properties - void updateGlobalConfiguration(cppkafka::Configuration & kafka_config); - // Load Kafka properties from consumer configuration - // NOTE: librdkafka allow to set a consumer property to a producer and vice versa, - // but a warning will be generated e.g: - // "Configuration property session.timeout.ms is a consumer property and - // will be ignored by this producer instance" - void updateConsumerConfiguration(cppkafka::Configuration & kafka_config); - // Load Kafka properties from producer configuration - void updateProducerConfiguration(cppkafka::Configuration & kafka_config); - String getConfigPrefix() const; void threadFunc(size_t idx); size_t getPollMaxBatchSize() const; diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 35118f88e53..a2cb397f777 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -1,3 +1,4 @@ +#include #include @@ -9,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -26,6 +28,9 @@ #include #include +#if USE_KRB5 +# include +#endif // USE_KRB5 namespace CurrentMetrics { @@ -228,17 +233,18 @@ void loadTopicConfig(cppkafka::Configuration & kafka_config, const Poco::Util::A } /// Read server configuration into cppkafka configuration, used by global configuration and by legacy per-topic configuration -void KafkaConfigLoader::loadFromConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & collection_name, const String & config_prefix, const Names & topics) +static void +loadFromConfig(cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params, const String & config_prefix) { - if (!collection_name.empty()) + if (!params.collection_name.empty()) { - loadNamedCollectionConfig(kafka_config, collection_name, config_prefix); + loadNamedCollectionConfig(kafka_config, params.collection_name, config_prefix); return; } /// Read all tags one level below Poco::Util::AbstractConfiguration::Keys tags; - config.keys(config_prefix, tags); + params.config.keys(config_prefix, tags); for (const auto & tag : tags) { @@ -263,16 +269,16 @@ void KafkaConfigLoader::loadFromConfig(cppkafka::Configuration & kafka_config, c // // Advantages: The period restriction no longer applies (e.g. sports.football will work), everything // Kafka-related is below . - for (const auto & topic : topics) + for (const auto & topic : params.topics) { /// Read topic name between ... const String kafka_topic_path = config_prefix + "." + tag; const String kafka_topic_name_path = kafka_topic_path + "." + KafkaConfigLoader::CONFIG_NAME_TAG; - const String topic_name = config.getString(kafka_topic_name_path); + const String topic_name = params.config.getString(kafka_topic_name_path); if (topic_name != topic) continue; - loadTopicConfig(kafka_config, config, collection_name, kafka_topic_path, topic); + loadTopicConfig(kafka_config, params.config, params.collection_name, kafka_topic_path, topic); } continue; } @@ -285,36 +291,184 @@ void KafkaConfigLoader::loadFromConfig(cppkafka::Configuration & kafka_config, c // 250 // 100000 // - loadConfigProperty(kafka_config, config, config_prefix, tag); + loadConfigProperty(kafka_config, params.config, config_prefix, tag); } } -void loadLegacyConfigSyntax(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & collection_name, const String & prefix, const Names & topics) +void loadLegacyConfigSyntax( + cppkafka::Configuration & kafka_config, + const Poco::Util::AbstractConfiguration & config, + const String & collection_name, + const Names & topics) { for (const auto & topic : topics) { - const String kafka_topic_path = prefix + "." + KafkaConfigLoader::CONFIG_KAFKA_TAG + "_" + topic; + const String kafka_topic_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_TAG + "_" + topic; loadLegacyTopicConfig(kafka_config, config, collection_name, kafka_topic_path); } } -void KafkaConfigLoader::loadConsumerConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & collection_name, const String & prefix, const Names & topics) +static void loadConsumerConfig(cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params) { - const String consumer_path = prefix + "." + CONFIG_KAFKA_CONSUMER_TAG; - loadLegacyConfigSyntax(kafka_config, config, collection_name, prefix, topics); + const String consumer_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_CONSUMER_TAG; + loadLegacyConfigSyntax(kafka_config, params.config, params.collection_name, params.topics); // A new syntax has higher priority - loadFromConfig(kafka_config, config, collection_name, consumer_path, topics); + loadFromConfig(kafka_config, params, consumer_path); } -void KafkaConfigLoader::loadProducerConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & collection_name, const String & prefix, const Names & topics) +static void loadProducerConfig(cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params) { - const String producer_path = prefix + "." + CONFIG_KAFKA_PRODUCER_TAG; - loadLegacyConfigSyntax(kafka_config, config, collection_name, prefix, topics); + const String producer_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_PRODUCER_TAG; + loadLegacyConfigSyntax(kafka_config, params.config, params.collection_name, params.topics); // A new syntax has higher priority - loadFromConfig(kafka_config, config, collection_name, producer_path, topics); - + loadFromConfig(kafka_config, params, producer_path); } +template +static void updateGlobalConfiguration( + cppkafka::Configuration & kafka_config, TKafkaStorage & storage, const KafkaConfigLoader::LoadConfigParams & params) +{ + loadFromConfig(kafka_config, params, KafkaConfigLoader::CONFIG_KAFKA_TAG); + +#if USE_KRB5 + if (kafka_config.has_property("sasl.kerberos.kinit.cmd")) + LOG_WARNING(params.log, "sasl.kerberos.kinit.cmd configuration parameter is ignored."); + + kafka_config.set("sasl.kerberos.kinit.cmd", ""); + kafka_config.set("sasl.kerberos.min.time.before.relogin", "0"); + + if (kafka_config.has_property("sasl.kerberos.keytab") && kafka_config.has_property("sasl.kerberos.principal")) + { + String keytab = kafka_config.get("sasl.kerberos.keytab"); + String principal = kafka_config.get("sasl.kerberos.principal"); + LOG_DEBUG(params.log, "Running KerberosInit"); + try + { + kerberosInit(keytab, principal); + } + catch (const Exception & e) + { + LOG_ERROR(params.log, "KerberosInit failure: {}", getExceptionMessage(e, false)); + } + LOG_DEBUG(params.log, "Finished KerberosInit"); + } +#else // USE_KRB5 + if (kafka_config.has_property("sasl.kerberos.keytab") || kafka_config.has_property("sasl.kerberos.principal")) + LOG_WARNING(log, "Ignoring Kerberos-related parameters because ClickHouse was built without krb5 library support."); +#endif // USE_KRB5 + // No need to add any prefix, messages can be distinguished + kafka_config.set_log_callback( + [log = params.log](cppkafka::KafkaHandleBase & handle, int level, const std::string & facility, const std::string & message) + { + auto [poco_level, client_logs_level] = parseSyslogLevel(level); + const auto & kafka_object_config = handle.get_configuration(); + const std::string client_id_key{"client.id"}; + chassert(kafka_object_config.has_property(client_id_key) && "Kafka configuration doesn't have expected client.id set"); + LOG_IMPL( + log, + client_logs_level, + poco_level, + "[client.id:{}] [rdk:{}] {}", + kafka_object_config.get(client_id_key), + facility, + message); + }); + + /// NOTE: statistics should be consumed, otherwise it creates too much + /// entries in the queue, that leads to memory leak and slow shutdown. + if (!kafka_config.has_property("statistics.interval.ms")) + { + // every 3 seconds by default. set to 0 to disable. + kafka_config.set("statistics.interval.ms", "3000"); + } + // Configure interceptor to change thread name + // + // TODO: add interceptors support into the cppkafka. + // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibliity overrides it to noop. + { + // This should be safe, since we wait the rdkafka object anyway. + void * self = static_cast(&storage); + + int status; + + status = rd_kafka_conf_interceptor_add_on_new( + kafka_config.get_handle(), "init", StorageKafkaInterceptors::rdKafkaOnNew, self); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(params.log, "Cannot set new interceptor due to {} error", status); + + // cppkafka always copy the configuration + status = rd_kafka_conf_interceptor_add_on_conf_dup( + kafka_config.get_handle(), "init", StorageKafkaInterceptors::rdKafkaOnConfDup, self); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(params.log, "Cannot set dup conf interceptor due to {} error", status); + } +} + +template +cppkafka::Configuration KafkaConfigLoader::getConsumerConfiguration(TKafkaStorage & storage, const ConsumerConfigParams & params) +{ + cppkafka::Configuration conf; + + conf.set("metadata.broker.list", params.brokers); + conf.set("group.id", params.group); + if (params.multiple_consumers) + conf.set("client.id", fmt::format("{}-{}", params.client_id, params.consumer_number)); + else + conf.set("client.id", params.client_id); + conf.set("client.software.name", VERSION_NAME); + conf.set("client.software.version", VERSION_DESCRIBE); + conf.set("auto.offset.reset", "earliest"); // If no offset stored for this group, read all messages from the start + + // that allows to prevent fast draining of the librdkafka queue + // during building of single insert block. Improves performance + // significantly, but may lead to bigger memory consumption. + size_t default_queued_min_messages = 100000; // must be greater than or equal to default + size_t max_allowed_queued_min_messages = 10000000; // must be less than or equal to max allowed value + conf.set( + "queued.min.messages", std::min(std::max(params.max_block_size, default_queued_min_messages), max_allowed_queued_min_messages)); + + updateGlobalConfiguration(conf, storage, params); + loadConsumerConfig(conf, params); + + // those settings should not be changed by users. + conf.set("enable.auto.commit", "false"); // We manually commit offsets after a stream successfully finished + conf.set("enable.auto.offset.store", "false"); // Update offset automatically - to commit them all at once. + conf.set("enable.partition.eof", "false"); // Ignore EOF messages + + for (auto & property : conf.get_all()) + { + LOG_TRACE(params.log, "Consumer set property {}:{}", property.first, property.second); + } + + return conf; +} + +template cppkafka::Configuration KafkaConfigLoader::getConsumerConfiguration(StorageKafka & storage, const ConsumerConfigParams & params); +template cppkafka::Configuration KafkaConfigLoader::getConsumerConfiguration(StorageKafka2 & storage, const ConsumerConfigParams & params); + +template +cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(TKafkaStorage & storage, const ProducerConfigParams & params) +{ + cppkafka::Configuration conf; + conf.set("metadata.broker.list", params.brokers); + conf.set("client.id", params.client_id); + conf.set("client.software.name", VERSION_NAME); + conf.set("client.software.version", VERSION_DESCRIBE); + + updateGlobalConfiguration(conf, storage, params); + loadProducerConfig(conf, params); + + for (auto & property : conf.get_all()) + { + LOG_TRACE(params.log, "Producer set property {}:{}", property.first, property.second); + } + + return conf; +} + +template cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(StorageKafka & storage, const ProducerConfigParams & params); +template cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(StorageKafka2 & storage, const ProducerConfigParams & params); + void registerStorageKafka(StorageFactory & factory) { diff --git a/src/Storages/Kafka/StorageKafkaCommon.h b/src/Storages/Kafka/StorageKafkaCommon.h index d51d9d03208..457247539e0 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.h +++ b/src/Storages/Kafka/StorageKafkaCommon.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -40,27 +41,38 @@ struct KafkaConfigLoader static inline const String CONFIG_NAME_TAG = "name"; static inline const String CONFIG_KAFKA_CONSUMER_TAG = "consumer"; static inline const String CONFIG_KAFKA_PRODUCER_TAG = "producer"; + using LogCallback = cppkafka::Configuration::LogCallback; - static void loadConsumerConfig( - cppkafka::Configuration & kafka_config, - const Poco::Util::AbstractConfiguration & config, - const String & collection_name, - const String & prefix, - const Names & topics); - static void loadProducerConfig( - cppkafka::Configuration & kafka_config, - const Poco::Util::AbstractConfiguration & config, - const String & collection_name, - const String & prefix, - const Names & topics); + struct LoadConfigParams + { + const Poco::Util::AbstractConfiguration & config; + String & collection_name; + const Names & topics; + LoggerPtr & log; + }; - static void loadFromConfig( - cppkafka::Configuration & kafka_config, - const Poco::Util::AbstractConfiguration & config, - const String & collection_name, - const String & config_prefix, - const Names & topics); + struct ConsumerConfigParams : public LoadConfigParams + { + String brokers; + String group; + bool multiple_consumers; + size_t consumer_number; + String client_id; + size_t max_block_size; + }; + + struct ProducerConfigParams : public LoadConfigParams + { + String brokers; + String client_id; + }; + + template + static cppkafka::Configuration getConsumerConfiguration(TKafkaStorage & storage, const ConsumerConfigParams & params); + + template + static cppkafka::Configuration getProducerConfiguration(TKafkaStorage & storage, const ProducerConfigParams & params); }; namespace StorageKafkaUtils diff --git a/src/Storages/Kafka/parseSyslogLevel.cpp b/src/Storages/Kafka/parseSyslogLevel.cpp index 43630a5001f..828cffc311b 100644 --- a/src/Storages/Kafka/parseSyslogLevel.cpp +++ b/src/Storages/Kafka/parseSyslogLevel.cpp @@ -1,4 +1,5 @@ -#include "parseSyslogLevel.h" +#include + #include /// Must be in a separate compilation unit due to macros overlaps: From 47c24c7dbe4394d0176eb91954223e40086b2c63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 18:11:28 +0000 Subject: [PATCH 0399/2361] Fix typos --- src/Storages/Kafka/KafkaSource.cpp | 2 +- src/Storages/Kafka/StorageKafka2.cpp | 4 ++-- src/Storages/Kafka/StorageKafkaCommon.cpp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Storages/Kafka/KafkaSource.cpp b/src/Storages/Kafka/KafkaSource.cpp index 9c68107872e..3ddd0d1be8c 100644 --- a/src/Storages/Kafka/KafkaSource.cpp +++ b/src/Storages/Kafka/KafkaSource.cpp @@ -262,7 +262,7 @@ Chunk KafkaSource::generateImpl() // they are not needed here: // and it's misleading to use them here, // as columns 'materialized' that way stays 'ephemeral' - // i.e. will not be stored anythere + // i.e. will not be stored anywhere // If needed any extra columns can be added using DEFAULT they can be added at MV level if needed. auto result_block = non_virtual_header.cloneWithColumns(executor.getResultColumns()); diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index d980df9d278..3e75f269ec0 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -818,7 +818,7 @@ StorageKafka2::lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const Topi if (code != Coordination::Error::ZNODEEXISTS) zkutil::KeeperMultiException::check(code, ops, responses); - // Possible optimization: check the content of logfiles, if we locked them, then we can clean them up and retry to lock them. + // Possible optimization: check the content of lock files, if we locked them, then we can clean them up and retry to lock them. return std::nullopt; } @@ -1064,7 +1064,7 @@ StorageKafka2::PolledBatchInfo StorageKafka2::pollConsumer( // they are not needed here: // and it's misleading to use them here, // as columns 'materialized' that way stays 'ephemeral' - // i.e. will not be stored anythere + // i.e. will not be stored anywhere // If needed any extra columns can be added using DEFAULT they can be added at MV level if needed. auto result_block = non_virtual_header.cloneWithColumns(executor.getResultColumns()); diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index a2cb397f777..2ad3c014b43 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -384,7 +384,7 @@ static void updateGlobalConfiguration( // Configure interceptor to change thread name // // TODO: add interceptors support into the cppkafka. - // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibliity overrides it to noop. + // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibility overrides it to noop. { // This should be safe, since we wait the rdkafka object anyway. void * self = static_cast(&storage); From b3e6383341848a3a3dca94ede24f07e6103a15d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 15 Jul 2024 18:51:53 +0000 Subject: [PATCH 0400/2361] Extract more common logic --- src/Storages/Kafka/StorageKafka.cpp | 94 +---------------------- src/Storages/Kafka/StorageKafka.h | 4 - src/Storages/Kafka/StorageKafka2.cpp | 61 +-------------- src/Storages/Kafka/StorageKafka2.h | 4 - src/Storages/Kafka/StorageKafkaCommon.cpp | 93 +++++++++++++++++++++- src/Storages/Kafka/StorageKafkaCommon.h | 13 ++++ 6 files changed, 110 insertions(+), 159 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 8bcf59d6db8..a5b709851d7 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -1,12 +1,5 @@ #include -#include -#include -#include -#include -#include -#include -#include #include #include #include @@ -164,7 +157,7 @@ StorageKafka::StorageKafka( , num_consumers(kafka_settings->kafka_num_consumers.value) , log(getLogger("StorageKafka (" + table_id_.table_name + ")")) , intermediate_commit(kafka_settings->kafka_commit_every_batch.value) - , settings_adjustments(createSettingsAdjustments()) + , settings_adjustments(StorageKafkaUtils::createSettingsAdjustments(*kafka_settings, schema_name)) , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) , collection_name(collection_name_) { @@ -179,7 +172,7 @@ StorageKafka::StorageKafka( StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); setInMemoryMetadata(storage_metadata); - setVirtuals(createVirtuals(kafka_settings->kafka_handle_error_mode)); + setVirtuals(StorageKafkaUtils::createVirtuals(kafka_settings->kafka_handle_error_mode)); auto task_count = thread_per_consumer ? num_consumers : 1; for (size_t i = 0; i < task_count; ++i) @@ -204,60 +197,6 @@ StorageKafka::StorageKafka( StorageKafka::~StorageKafka() = default; -VirtualColumnsDescription StorageKafka::createVirtuals(StreamingHandleErrorMode handle_error_mode) -{ - VirtualColumnsDescription desc; - - desc.addEphemeral("_topic", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_key", std::make_shared(), ""); - desc.addEphemeral("_offset", std::make_shared(), ""); - desc.addEphemeral("_partition", std::make_shared(), ""); - desc.addEphemeral("_timestamp", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_timestamp_ms", std::make_shared(std::make_shared(3)), ""); - desc.addEphemeral("_headers.name", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_headers.value", std::make_shared(std::make_shared()), ""); - - if (handle_error_mode == StreamingHandleErrorMode::STREAM) - { - desc.addEphemeral("_raw_message", std::make_shared(), ""); - desc.addEphemeral("_error", std::make_shared(), ""); - } - - return desc; -} - -SettingsChanges StorageKafka::createSettingsAdjustments() -{ - SettingsChanges result; - // Needed for backward compatibility - if (!kafka_settings->input_format_skip_unknown_fields.changed) - { - // Always skip unknown fields regardless of the context (JSON or TSKV) - kafka_settings->input_format_skip_unknown_fields = true; - } - - if (!kafka_settings->input_format_allow_errors_ratio.changed) - { - kafka_settings->input_format_allow_errors_ratio = 0.; - } - - if (!kafka_settings->input_format_allow_errors_num.changed) - { - kafka_settings->input_format_allow_errors_num = kafka_settings->kafka_skip_broken_messages.value; - } - - if (!schema_name.empty()) - result.emplace_back("format_schema", schema_name); - - for (const auto & setting : *kafka_settings) - { - const auto & name = setting.getName(); - if (name.find("kafka_") == std::string::npos) - result.emplace_back(name, setting.getValue()); - } - return result; -} - void StorageKafka::read( QueryPlan & query_plan, const Names & column_names, @@ -555,33 +494,6 @@ size_t StorageKafka::getPollTimeoutMillisecond() const : getContext()->getSettingsRef().stream_poll_timeout_ms.totalMilliseconds(); } -bool StorageKafka::checkDependencies(const StorageID & table_id) -{ - // Check if all dependencies are attached - auto view_ids = DatabaseCatalog::instance().getDependentViews(table_id); - if (view_ids.empty()) - return true; - - // Check the dependencies are ready? - for (const auto & view_id : view_ids) - { - auto view = DatabaseCatalog::instance().tryGetTable(view_id, getContext()); - if (!view) - return false; - - // If it materialized view, check it's target table - auto * materialized_view = dynamic_cast(view.get()); - if (materialized_view && !materialized_view->tryGetTargetTable()) - return false; - - // Check all its dependencies - if (!checkDependencies(view_id)) - return false; - } - - return true; -} - void StorageKafka::threadFunc(size_t idx) { assert(idx < tasks.size()); @@ -602,7 +514,7 @@ void StorageKafka::threadFunc(size_t idx) // Keep streaming as long as there are attached views and streaming is not cancelled while (!task->stream_cancelled) { - if (!checkDependencies(table_id)) + if (!StorageKafkaUtils::checkDependencies(table_id, getContext())) break; LOG_DEBUG(log, "Started streaming to {} attached views", num_views); diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index 74793292224..54d178331bc 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -134,7 +134,6 @@ private: std::mutex thread_statuses_mutex; std::list> thread_statuses; - SettingsChanges createSettingsAdjustments(); /// Creates KafkaConsumer object without real consumer (cppkafka::Consumer) KafkaConsumerPtr createKafkaConsumer(size_t consumer_number); /// Returns full consumer related configuration, also the configuration @@ -156,11 +155,8 @@ private: size_t getPollTimeoutMillisecond() const; bool streamToViews(); - bool checkDependencies(const StorageID & table_id); void cleanConsumers(); - - static VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode); }; } diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 3e75f269ec0..d9fdfd7410f 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -1,13 +1,6 @@ #include #include -#include -#include -#include -#include -#include -#include -#include #include #include #include @@ -122,7 +115,7 @@ StorageKafka2::StorageKafka2( , num_consumers(kafka_settings->kafka_num_consumers.value) , log(getLogger("StorageKafka2 (" + table_id_.getNameForLogs() + ")")) , semaphore(0, static_cast(num_consumers)) - , settings_adjustments(createSettingsAdjustments()) + , settings_adjustments(StorageKafkaUtils::createSettingsAdjustments(*kafka_settings, schema_name)) , thread_per_consumer(kafka_settings->kafka_thread_per_consumer.value) , collection_name(collection_name_) , active_node_identifier(toString(ServerUUID::get())) @@ -138,7 +131,7 @@ StorageKafka2::StorageKafka2( StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); setInMemoryMetadata(storage_metadata); - setVirtuals(createVirtuals(kafka_settings->kafka_handle_error_mode)); + setVirtuals(StorageKafkaUtils::createVirtuals(kafka_settings->kafka_handle_error_mode)); auto task_count = thread_per_consumer ? num_consumers : 1; for (size_t i = 0; i < task_count; ++i) @@ -157,27 +150,6 @@ StorageKafka2::StorageKafka2( activating_task->deactivate(); } -VirtualColumnsDescription StorageKafka2::createVirtuals(StreamingHandleErrorMode handle_error_mode) -{ - VirtualColumnsDescription desc; - - desc.addEphemeral("_topic", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_key", std::make_shared(), ""); - desc.addEphemeral("_offset", std::make_shared(), ""); - desc.addEphemeral("_partition", std::make_shared(), ""); - desc.addEphemeral("_timestamp", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_timestamp_ms", std::make_shared(std::make_shared(3)), ""); - desc.addEphemeral("_headers.name", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_headers.value", std::make_shared(std::make_shared()), ""); - - if (handle_error_mode == StreamingHandleErrorMode::STREAM) - { - desc.addEphemeral("_raw_message", std::make_shared(), ""); - desc.addEphemeral("_error", std::make_shared(), ""); - } - - return desc; -} void StorageKafka2::partialShutdown() { // This is called in a background task within a catch block, thus this function shouldn't throw @@ -516,33 +488,6 @@ size_t StorageKafka2::getPollTimeoutMillisecond() const : getContext()->getSettingsRef().stream_poll_timeout_ms.totalMilliseconds(); } -bool StorageKafka2::checkDependencies(const StorageID & table_id) -{ - // Check if all dependencies are attached - auto view_ids = DatabaseCatalog::instance().getDependentViews(table_id); - if (view_ids.empty()) - return true; - - // Check the dependencies are ready? - for (const auto & view_id : view_ids) - { - auto view = DatabaseCatalog::instance().tryGetTable(view_id, getContext()); - if (!view) - return false; - - // If it materialized view, check it's target table - auto * materialized_view = dynamic_cast(view.get()); - if (materialized_view && !materialized_view->tryGetTargetTable()) - return false; - - // Check all its dependencies - if (!checkDependencies(view_id)) - return false; - } - - return true; -} - namespace { const std::string lock_file_name{"lock"}; @@ -1095,7 +1040,7 @@ void StorageKafka2::threadFunc(size_t idx) while (!task->stream_cancelled && num_created_consumers > 0) { maybe_stall_reason.reset(); - if (!checkDependencies(table_id)) + if (!StorageKafkaUtils::checkDependencies(table_id, getContext())) break; LOG_DEBUG(log, "Started streaming to {} attached views", num_views); diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 2daae8e30d4..d6e564b76f5 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -196,8 +196,6 @@ private: std::optional streamFromConsumer(ConsumerAndAssignmentInfo & consumer_info); - bool checkDependencies(const StorageID & table_id); - // Returns true if this is the first replica bool createTableIfNotExists(); // Returns true if all of the nodes were cleaned up @@ -226,8 +224,6 @@ private: std::filesystem::path getTopicPartitionPath(const TopicPartition & topic_partition); - - static VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode); }; } diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 2ad3c014b43..0c893407014 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -1,4 +1,3 @@ -#include #include @@ -11,8 +10,16 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include #include #include +#include #include #include #include @@ -469,7 +476,6 @@ cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(TKafkaStorag template cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(StorageKafka & storage, const ProducerConfigParams & params); template cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(StorageKafka2 & storage, const ProducerConfigParams & params); - void registerStorageKafka(StorageFactory & factory) { auto creator_fn = [](const StorageFactory::Arguments & args) -> std::shared_ptr @@ -779,6 +785,89 @@ void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler if (skipped) LOG_ERROR(log, "There were {} messages with an error", skipped); } + +SettingsChanges createSettingsAdjustments(KafkaSettings & kafka_settings, const String & schema_name) +{ + SettingsChanges result; + // Needed for backward compatibility + if (!kafka_settings.input_format_skip_unknown_fields.changed) + { + // Always skip unknown fields regardless of the context (JSON or TSKV) + kafka_settings.input_format_skip_unknown_fields = true; + } + + if (!kafka_settings.input_format_allow_errors_ratio.changed) + { + kafka_settings.input_format_allow_errors_ratio = 0.; + } + + if (!kafka_settings.input_format_allow_errors_num.changed) + { + kafka_settings.input_format_allow_errors_num = kafka_settings.kafka_skip_broken_messages.value; + } + + if (!schema_name.empty()) + result.emplace_back("format_schema", schema_name); + + for (const auto & setting : kafka_settings) + { + const auto & name = setting.getName(); + if (name.find("kafka_") == std::string::npos) + result.emplace_back(name, setting.getValue()); + } + return result; +} + + +bool checkDependencies(const StorageID & table_id, const ContextPtr& context) +{ + // Check if all dependencies are attached + auto view_ids = DatabaseCatalog::instance().getDependentViews(table_id); + if (view_ids.empty()) + return true; + + // Check the dependencies are ready? + for (const auto & view_id : view_ids) + { + auto view = DatabaseCatalog::instance().tryGetTable(view_id, context); + if (!view) + return false; + + // If it materialized view, check it's target table + auto * materialized_view = dynamic_cast(view.get()); + if (materialized_view && !materialized_view->tryGetTargetTable()) + return false; + + // Check all its dependencies + if (!checkDependencies(view_id, context)) + return false; + } + + return true; +} + + +VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode) +{ + VirtualColumnsDescription desc; + + desc.addEphemeral("_topic", std::make_shared(std::make_shared()), ""); + desc.addEphemeral("_key", std::make_shared(), ""); + desc.addEphemeral("_offset", std::make_shared(), ""); + desc.addEphemeral("_partition", std::make_shared(), ""); + desc.addEphemeral("_timestamp", std::make_shared(std::make_shared()), ""); + desc.addEphemeral("_timestamp_ms", std::make_shared(std::make_shared(3)), ""); + desc.addEphemeral("_headers.name", std::make_shared(std::make_shared()), ""); + desc.addEphemeral("_headers.value", std::make_shared(std::make_shared()), ""); + + if (handle_error_mode == StreamingHandleErrorMode::STREAM) + { + desc.addEphemeral("_raw_message", std::make_shared(), ""); + desc.addEphemeral("_error", std::make_shared(), ""); + } + + return desc; +} } template struct StorageKafkaInterceptors; diff --git a/src/Storages/Kafka/StorageKafkaCommon.h b/src/Storages/Kafka/StorageKafkaCommon.h index 457247539e0..dd38ee69675 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.h +++ b/src/Storages/Kafka/StorageKafkaCommon.h @@ -2,13 +2,16 @@ #include #include +#include #include #include #include #include #include #include +#include #include +#include namespace Poco { @@ -20,6 +23,10 @@ namespace Util namespace DB { + +struct KafkaSettings; +class VirtualColumnsDescription; + template struct StorageKafkaInterceptors { @@ -90,6 +97,12 @@ void drainConsumer( using Messages = std::vector; void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler error_handler = [](const cppkafka::Error & /*err*/) {}); + +SettingsChanges createSettingsAdjustments(KafkaSettings & kafka_settings, const String & schema_name); + +bool checkDependencies(const StorageID & table_id, const ContextPtr& context); + +VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode); } } From 1bd9a1623f246dbf2a3098a4a022b6764aa3094d Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 15 Jul 2024 19:23:11 +0000 Subject: [PATCH 0401/2361] add throw option in low level --- src/Interpreters/MutationsInterpreter.cpp | 15 +++++++++++++++ src/Interpreters/MutationsInterpreter.h | 1 + src/Storages/IStorage.h | 3 +++ src/Storages/MergeTree/MergeTreeData.cpp | 15 +++++++++++++++ src/Storages/MergeTree/MergeTreeData.h | 2 ++ .../03161_lightweight_delete_projection.sql | 16 ++++++++++------ 6 files changed, 46 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 480c6736bc5..b61f7f78885 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -344,6 +344,11 @@ bool MutationsInterpreter::Source::hasProjection(const String & name) const return part && part->hasProjection(name); } +bool MutationsInterpreter::Source::hasProjection() const +{ + return part && part->hasProjection(); +} + bool MutationsInterpreter::Source::hasBrokenProjection(const String & name) const { return part && part->hasBrokenProjection(name); @@ -491,6 +496,16 @@ static void validateUpdateColumns( { if (!source.supportsLightweightDelete()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Lightweight delete is not supported for table"); + + if (const MergeTreeData * merge_tree_data = source.getMergeTreeData(); merge_tree_data != nullptr) + { + if (merge_tree_data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW + && merge_tree_data->hasProjection()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DELETE query is not supported for table {} as it has projections. " + "User should drop all the projections manually before running the query", + source.getStorage()->getStorageID().getFullTableName()); + } } else if (virtual_columns.tryGet(column_name)) { diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index 6aaa233cda3..b792a33f904 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -126,6 +126,7 @@ public: bool materializeTTLRecalculateOnly() const; bool hasSecondaryIndex(const String & name) const; bool hasProjection(const String & name) const; + bool hasProjection() const; bool hasBrokenProjection(const String & name) const; bool isCompactPart() const; diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 991c8ff64af..d302fcb26a7 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -259,6 +259,9 @@ public: /// Return true if there is at least one part containing lightweight deleted mask. virtual bool hasLightweightDeletedMask() const { return false; } + /// Return true if storage has any projection. + virtual bool hasProjection() const { return false; } + /// Return true if storage can execute lightweight delete mutations. virtual bool supportsLightweightDelete() const { return false; } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 38ca0aed9da..78a551591a6 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -6160,6 +6160,21 @@ bool MergeTreeData::supportsLightweightDelete() const return true; } +bool MergeTreeData::hasProjection() const +{ + auto lock = lockParts(); + for (const auto & part : data_parts_by_info) + { + if (part->getState() == MergeTreeDataPartState::Outdated + || part->getState() == MergeTreeDataPartState::Deleting) + continue; + + if (part->hasProjection()) + return true; + } + return false; +} + bool MergeTreeData::areAsynchronousInsertsEnabled() const { return getSettings()->async_insert; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index d880928098b..7076b680521 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -439,6 +439,8 @@ public: bool supportsLightweightDelete() const override; + bool hasProjection() const override; + bool areAsynchronousInsertsEnabled() const override; bool supportsTrivialCountOptimization(const StorageSnapshotPtr &, ContextPtr) const override; diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index bfeb0127fa4..16a7468234b 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -13,11 +13,13 @@ SETTINGS min_bytes_for_wide_part = 10485760; INSERT INTO users VALUES (1231, 'John', 33); -DELETE FROM users WHERE 1; -- { serverError NOT_IMPLEMENTED } +ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; -DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } +DELETE FROM users WHERE uid = 1231; -- { serverError NOT_IMPLEMENTED } -DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'drop'; +ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; + +DELETE FROM users WHERE uid = 1231; SYSTEM FLUSH LOGS; @@ -45,11 +47,13 @@ SETTINGS min_bytes_for_wide_part = 0; INSERT INTO users VALUES (1231, 'John', 33); -DELETE FROM users WHERE 1; -- { serverError NOT_IMPLEMENTED } +ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; -DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } +DELETE FROM users WHERE uid = 1231; -- { serverError NOT_IMPLEMENTED } -DELETE FROM users WHERE uid = 1231 SETTINGS lightweight_mutation_projection_mode = 'drop'; +ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; + +DELETE FROM users WHERE uid = 1231; SYSTEM FLUSH LOGS; From 12794601921e2d465b27e665b072267b658b8e4c Mon Sep 17 00:00:00 2001 From: yariks5s Date: Mon, 15 Jul 2024 22:09:30 +0000 Subject: [PATCH 0402/2361] fix after review --- .../ObjectStorage/StorageObjectStorage.cpp | 2 +- .../ObjectStorage/StorageObjectStorage.h | 2 +- .../StorageObjectStorageCluster.cpp | 29 ++++++++++++++++++- .../StorageObjectStorageCluster.h | 2 ++ src/Storages/StorageFile.cpp | 5 +--- src/Storages/VirtualColumnUtils.cpp | 6 ++-- src/Storages/VirtualColumnUtils.h | 2 +- .../03203_hive_style_partitioning.reference | 17 +++++++++++ .../03203_hive_style_partitioning.sh | 16 ++++++++++ 9 files changed, 70 insertions(+), 11 deletions(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index ee1169d2c5c..ca0ced8dcd3 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -34,7 +34,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -std::string StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, ContextPtr context) +String StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, ContextPtr context) { auto query_settings = configuration->getQuerySettings(context); /// We don't want to throw an exception if there are no files with specified path. diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index 6ee4ce0c16f..cae0db48f31 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -122,7 +122,7 @@ public: protected: virtual void updateConfiguration(ContextPtr local_context); - std::string getPathSample(StorageInMemoryMetadata metadata, ContextPtr context); + String getPathSample(StorageInMemoryMetadata metadata, ContextPtr context); virtual ReadFromFormatInfo prepareReadingFromFormat( const Strings & requested_columns, diff --git a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp index a88532e1ea9..7f6b3338f9b 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp @@ -1,6 +1,8 @@ #include "Storages/ObjectStorage/StorageObjectStorageCluster.h" #include +#include +#include #include #include #include @@ -19,6 +21,28 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +String StorageObjectStorageCluster::getPathSample(StorageInMemoryMetadata metadata, ContextPtr context) +{ + auto query_settings = configuration->getQuerySettings(context); + /// We don't want to throw an exception if there are no files with specified path. + query_settings.throw_on_zero_files_match = false; + auto file_iterator = StorageObjectStorageSource::createFileIterator( + configuration, + query_settings, + object_storage, + false, // distributed_processing + context, + {}, // predicate + metadata.getColumns().getAll(), // virtual_columns + nullptr, // read_keys + {} // file_progress_callback + ); + + if (auto file = file_iterator->next(0)) + return file->getPath(); + return ""; +} + StorageObjectStorageCluster::StorageObjectStorageCluster( const String & cluster_name_, ConfigurationPtr configuration_, @@ -41,7 +65,10 @@ StorageObjectStorageCluster::StorageObjectStorageCluster( metadata.setColumns(columns); metadata.setConstraints(constraints_); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context_)); + if (sample_path.empty() && context_->getSettingsRef().use_hive_partitioning) + sample_path = getPathSample(metadata, context_); + + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context_, sample_path, getFormatSettings(context_))); setInMemoryMetadata(metadata); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorageCluster.h b/src/Storages/ObjectStorage/StorageObjectStorageCluster.h index 108aa109616..0088ff28fc2 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageCluster.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageCluster.h @@ -27,6 +27,8 @@ public: RemoteQueryExecutor::Extension getTaskIteratorExtension( const ActionsDAG::Node * predicate, const ContextPtr & context) const override; + String getPathSample(StorageInMemoryMetadata metadata, ContextPtr context); + private: void updateQueryToSendIfNeeded( ASTPtr & query, diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index e2b010bf48f..b43fce370a1 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -1109,10 +1109,7 @@ void StorageFile::setStorageMetadata(CommonArguments args) storage_metadata.setComment(args.comment); setInMemoryMetadata(storage_metadata); - std::string path_for_virtuals; - if (!paths.empty()) - path_for_virtuals = paths[0]; - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), args.getContext(), path_for_virtuals, format_settings)); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), args.getContext(), paths.empty() ? "" : paths[0], format_settings)); } diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index fb5a345f424..352fd0d7a76 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -119,18 +119,18 @@ NameSet getVirtualNamesForFileLikeStorage() return {"_path", "_file", "_size", "_time"}; } -std::unordered_map parseHivePartitioningKeysAndValues(const std::string& path, const ColumnsDescription & storage_columns) +std::unordered_map parseHivePartitioningKeysAndValues(const String & path, const ColumnsDescription & storage_columns) { std::string pattern = "/([^/]+)=([^/]+)"; re2::StringPiece input_piece(path); std::unordered_map key_values; std::string key, value; - std::set used_keys; + std::unordered_set used_keys; while (RE2::FindAndConsume(&input_piece, pattern, &key, &value)) { if (used_keys.contains(key)) - throw Exception(ErrorCodes::INCORRECT_DATA, "Link to file with enabled hive-style partitioning contains duplicated key {}, only unique keys required", key); + throw Exception(ErrorCodes::INCORRECT_DATA, "Path '{}' to file with enabled hive-style partitioning contains duplicated partition key {}, only unique keys are allowed", path, key); used_keys.insert(key); auto col_name = "_" + key; diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 29ec32ab375..fef32b149ec 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -51,7 +51,7 @@ NameSet getVirtualNamesForFileLikeStorage(); VirtualColumnsDescription getVirtualsForFileLikeStorage( const ColumnsDescription & storage_columns, const ContextPtr & context, - std::string sample_path = "", + const std::string & sample_path = "", std::optional format_settings_ = std::nullopt); ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference index fc6da3a55c1..430a3582f65 100644 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.reference +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -102,3 +102,20 @@ Jeffery Delgado Elizabeth Clara Cross Elizabeth Elizabeth Gordon Elizabeth OK +TESTING THE S3CLUSTER PARTITIONING +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +Eva Schmidt Elizabeth Schmidt +Samuel Schmidt Elizabeth Schmidt +Eva Schmidt Elizabeth +Samuel Schmidt Elizabeth +Elizabeth Gordon Elizabeth Gordon diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index a5f3e36763d..d2b1f31c85f 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -118,3 +118,19 @@ set use_hive_partitioning = 0; SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; """ 2>&1 | grep -F -q "UNKNOWN_IDENTIFIER" && echo "OK" || echo "FAIL"; + +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3CLUSTER PARTITIONING'" + +$CLICKHOUSE_CLIENT -n -q """ +set use_hive_partitioning = 1; + +SELECT *, _column0 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; + +SELECT *, _column0 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; + +SELECT *, _column0, _column1 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +""" From 4305a38e7394d7bbf3c3455c3b52b1dc9b86f3c9 Mon Sep 17 00:00:00 2001 From: yariks5s Date: Mon, 15 Jul 2024 22:11:08 +0000 Subject: [PATCH 0403/2361] add include --- src/Storages/VirtualColumnUtils.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 352fd0d7a76..938972cffca 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -41,6 +41,7 @@ #include #include #include +#include #include "Functions/FunctionsLogical.h" #include "Functions/IFunction.h" #include "Functions/IFunctionAdaptors.h" From dcf14e68afbc741fdbf830fc7d01ba84817c0760 Mon Sep 17 00:00:00 2001 From: yariks5s Date: Mon, 15 Jul 2024 22:45:21 +0000 Subject: [PATCH 0404/2361] small fix --- src/Storages/VirtualColumnUtils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 938972cffca..e84979833ab 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -142,7 +142,7 @@ std::unordered_map parseHivePartitioningKeysAndValues( return key_values; } -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, const ContextPtr & context, std::string path, std::optional format_settings_) +VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, const ContextPtr & context, const std::string & path, std::optional format_settings_) { VirtualColumnsDescription desc; From 177d006307515e62bbe60082c293c03de4d4cc7c Mon Sep 17 00:00:00 2001 From: yariks5s Date: Mon, 15 Jul 2024 23:02:24 +0000 Subject: [PATCH 0405/2361] add errorcodes --- src/Storages/VirtualColumnUtils.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index e84979833ab..24d0b7160b2 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -56,6 +56,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int INCORRECT_DATA; +} + namespace VirtualColumnUtils { From 82d283357755e3b667074596ec254ca54598ee5d Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 15 Jul 2024 23:29:25 +0000 Subject: [PATCH 0406/2361] clang tidy fix --- src/Interpreters/Context.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index fc1e87e7b7e..2602afd8b78 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include From 083e4b17db62121d6905c35480c3a462dc26477b Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Tue, 16 Jul 2024 09:34:52 +0800 Subject: [PATCH 0407/2361] trigger CI From 8aaf9c1d9824c136bbfc0532b040b2dca7564253 Mon Sep 17 00:00:00 2001 From: morning-color Date: Tue, 16 Jul 2024 11:00:55 +0800 Subject: [PATCH 0408/2361] Fix tests --- src/Client/Connection.cpp | 2 +- src/Core/ProtocolDefines.h | 4 +- src/Core/Settings.h | 2 +- src/Formats/JSONUtils.cpp | 2 +- src/Processors/Formats/IOutputFormat.h | 16 +- .../Formats/Impl/XMLRowOutputFormat.cpp | 4 +- src/Processors/IProcessor.h | 8 +- src/Processors/LimitTransform.h | 4 +- src/Processors/OffsetTransform.h | 4 +- src/Processors/RowsBeforeStepCounter.h | 4 +- src/Processors/Sources/DelayedSource.h | 8 +- src/Processors/Sources/RemoteSource.cpp | 4 - src/Processors/Sources/RemoteSource.h | 9 +- .../AggregatingInOrderTransform.cpp | 2 + .../Transforms/AggregatingInOrderTransform.h | 4 + .../Transforms/AggregatingTransform.cpp | 4 +- .../Transforms/AggregatingTransform.h | 7 +- .../Transforms/PartialSortingTransform.h | 4 +- src/QueryPipeline/ProfileInfo.cpp | 22 +- src/QueryPipeline/ProfileInfo.h | 4 +- src/QueryPipeline/QueryPipeline.cpp | 15 +- src/Server/TCPHandler.cpp | 2 +- ...74_exact_rows_before_aggregation.reference | 196 +++++++++++++++++- .../03174_exact_rows_before_aggregation.sql | 26 ++- 24 files changed, 276 insertions(+), 81 deletions(-) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 19cd8cc4ee5..d545278220a 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -1279,7 +1279,7 @@ Progress Connection::receiveProgress() const ProfileInfo Connection::receiveProfileInfo() const { ProfileInfo profile_info; - profile_info.read(*in); + profile_info.read(*in, server_revision); return profile_info; } diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index 7e6893c6d85..02d54221ed3 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -81,6 +81,8 @@ static constexpr auto DBMS_MIN_REVISION_WITH_TABLE_READ_ONLY_CHECK = 54467; static constexpr auto DBMS_MIN_REVISION_WITH_SYSTEM_KEYWORDS_TABLE = 54468; +static constexpr auto DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION = 54469; + /// Version of ClickHouse TCP protocol. /// /// Should be incremented manually on protocol changes. @@ -88,6 +90,6 @@ static constexpr auto DBMS_MIN_REVISION_WITH_SYSTEM_KEYWORDS_TABLE = 54468; /// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, /// later is just a number for server version (one number instead of commit SHA) /// for simplicity (sometimes it may be more convenient in some use cases). -static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54468; +static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54469; } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 2296a880bd6..842aa54f620 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1200,7 +1200,7 @@ class IColumn; M(Bool, insert_distributed_one_random_shard, false, "If setting is enabled, inserting into distributed table will choose a random shard to write when there is no sharding key", 0) \ \ M(Bool, exact_rows_before_limit, false, "When enabled, ClickHouse will provide exact value for rows_before_limit_at_least statistic, but with the cost that the data before limit will have to be read completely", 0) \ - M(Bool, rows_before_aggregation, false, "When enabled, ClickHouse will provide exact value for rows_before_aggregation_at_least statistic, represents the number of rows read before aggregation", 0) \ + M(Bool, rows_before_aggregation, false, "When enabled, ClickHouse will provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation", 0) \ M(UInt64, cross_to_inner_join_rewrite, 1, "Use inner join instead of comma/cross join if there are joining expressions in the WHERE section. Values: 0 - no rewrite, 1 - apply if possible for comma/cross, 2 - force rewrite all comma joins, cross - if possible", 0) \ \ M(Bool, output_format_arrow_low_cardinality_as_dictionary, false, "Enable output LowCardinality type as Dictionary Arrow type", 0) \ diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index 363e9344770..017befe5b0e 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -507,7 +507,7 @@ namespace JSONUtils if (applied_aggregation) { writeFieldDelimiter(out, 2); - writeTitle("rows_before_aggregation_at_least", out, 1, " "); + writeTitle("rows_before_aggregation", out, 1, " "); writeIntText(rows_before_aggregation, out); } if (write_statistics) diff --git a/src/Processors/Formats/IOutputFormat.h b/src/Processors/Formats/IOutputFormat.h index 40ac1317618..e9af4ca7cf5 100644 --- a/src/Processors/Formats/IOutputFormat.h +++ b/src/Processors/Formats/IOutputFormat.h @@ -39,17 +39,17 @@ public: virtual void setRowsBeforeLimit(size_t /*rows_before_limit*/) { } /// Counter to calculate rows_before_limit_at_least in processors pipeline. - void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit_counter.swap(counter); } + void setRowsBeforeLimitCounter(RowsBeforeStepCounterPtr counter) override { rows_before_limit_counter.swap(counter); } - /// Value for rows_before_aggregation_at_least field. - virtual void setRowsBeforeAggregation(size_t /*rows_before_limit*/) { } + /// Value for rows_before_aggregation field. + virtual void setRowsBeforeAggregation(size_t /*rows_before_aggregation*/) { } - /// Counter to calculate rows_before_aggregation_at_least in processors pipeline. - void setRowsBeforeAggregationCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_aggregation_counter.swap(counter); } + /// Counter to calculate rows_before_aggregation in processors pipeline. + void setRowsBeforeAggregationCounter(RowsBeforeStepCounterPtr counter) override { rows_before_aggregation_counter.swap(counter); } /// Notify about progress. Method could be called from different threads. /// Passed value are delta, that must be summarized. - virtual void onProgress(const Progress & /*progress*/) {} + virtual void onProgress(const Progress & /*progress*/) { } /// Content-Type to set when sending HTTP response. virtual std::string getContentType() const { return "text/plain; charset=UTF-8"; } @@ -192,8 +192,8 @@ protected: bool need_write_prefix = true; bool need_write_suffix = true; - RowsBeforeLimitCounterPtr rows_before_limit_counter; - RowsBeforeAggregationCounterPtr rows_before_aggregation_counter; + RowsBeforeStepCounterPtr rows_before_limit_counter; + RowsBeforeStepCounterPtr rows_before_aggregation_counter; Statistics statistics; private: diff --git a/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp b/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp index 2fd0536ed02..b19fcfd4a4a 100644 --- a/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp @@ -224,9 +224,9 @@ void XMLRowOutputFormat::writeRowsBeforeAggregationAtLeast() { if (statistics.applied_aggregation) { - writeCString("\t", *ostr); + writeCString("\t", *ostr); writeIntText(statistics.rows_before_aggregation, *ostr); - writeCString("\n", *ostr); + writeCString("\n", *ostr); } } diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index ccdd6308de5..28050691d9e 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -22,9 +22,7 @@ struct StorageLimits; using StorageLimitsList = std::list; class RowsBeforeStepCounter; -using RowsBeforeLimitCounterPtr = std::shared_ptr; - -using RowsBeforeAggregationCounterPtr = std::shared_ptr; +using RowsBeforeStepCounterPtr = std::shared_ptr; class IProcessor; using ProcessorPtr = std::shared_ptr; @@ -366,11 +364,11 @@ public: /// Set rows_before_limit counter for current processor. /// This counter is used to calculate the number of rows right before any filtration of LimitTransform. - virtual void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr /* counter */) {} + virtual void setRowsBeforeLimitCounter(RowsBeforeStepCounterPtr /* counter */) { } /// Set rows_before_aggregation counter for current processor. /// This counter is used to calculate the number of rows right before AggregatingTransform. - virtual void setRowsBeforeAggregationCounter(RowsBeforeAggregationCounterPtr /* counter */) { } + virtual void setRowsBeforeAggregationCounter(RowsBeforeStepCounterPtr /* counter */) { } protected: virtual void onCancel() {} diff --git a/src/Processors/LimitTransform.h b/src/Processors/LimitTransform.h index 515203f6829..45ae5b0ce81 100644 --- a/src/Processors/LimitTransform.h +++ b/src/Processors/LimitTransform.h @@ -30,7 +30,7 @@ private: std::vector sort_column_positions; UInt64 rows_read = 0; /// including the last read block - RowsBeforeLimitCounterPtr rows_before_limit_at_least; + RowsBeforeStepCounterPtr rows_before_limit_at_least; /// State of port's pair. /// Chunks from different port pairs are not mixed for better cache locality. @@ -71,7 +71,7 @@ public: InputPort & getInputPort() { return inputs.front(); } OutputPort & getOutputPort() { return outputs.front(); } - void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit_at_least.swap(counter); } + void setRowsBeforeLimitCounter(RowsBeforeStepCounterPtr counter) override { rows_before_limit_at_least.swap(counter); } void setInputPortHasCounter(size_t pos) { ports_data[pos].input_port_has_counter = true; } }; diff --git a/src/Processors/OffsetTransform.h b/src/Processors/OffsetTransform.h index 7ef16518540..04486a6c940 100644 --- a/src/Processors/OffsetTransform.h +++ b/src/Processors/OffsetTransform.h @@ -16,7 +16,7 @@ private: UInt64 offset; UInt64 rows_read = 0; /// including the last read block - RowsBeforeLimitCounterPtr rows_before_limit_at_least; + RowsBeforeStepCounterPtr rows_before_limit_at_least; /// State of port's pair. /// Chunks from different port pairs are not mixed for better cache locality. @@ -45,7 +45,7 @@ public: InputPort & getInputPort() { return inputs.front(); } OutputPort & getOutputPort() { return outputs.front(); } - void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit_at_least.swap(counter); } + void setRowsBeforeLimitCounter(RowsBeforeStepCounterPtr counter) override { rows_before_limit_at_least.swap(counter); } }; } diff --git a/src/Processors/RowsBeforeStepCounter.h b/src/Processors/RowsBeforeStepCounter.h index d9912bfa076..789731f82bd 100644 --- a/src/Processors/RowsBeforeStepCounter.h +++ b/src/Processors/RowsBeforeStepCounter.h @@ -5,7 +5,7 @@ namespace DB { -/// This class helps to calculate rows_before_limit_at_least and rows_before_aggregation_at_least. +/// This class helps to calculate rows_before_limit_at_least and rows_before_aggregation. class RowsBeforeStepCounter { public: @@ -31,6 +31,6 @@ private: std::atomic_bool has_applied_step = false; }; -using RowsBeforeLimitCounterPtr = std::shared_ptr; +using RowsBeforeStepCounterPtr = std::shared_ptr; } diff --git a/src/Processors/Sources/DelayedSource.h b/src/Processors/Sources/DelayedSource.h index 3138a1ab42a..4ee90e34599 100644 --- a/src/Processors/Sources/DelayedSource.h +++ b/src/Processors/Sources/DelayedSource.h @@ -30,15 +30,15 @@ public: OutputPort * getTotalsPort() { return totals; } OutputPort * getExtremesPort() { return extremes; } - void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit.swap(counter); } - void setRowsBeforeAggregationCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_aggregation.swap(counter); } + void setRowsBeforeLimitCounter(RowsBeforeStepCounterPtr counter) override { rows_before_limit.swap(counter); } + void setRowsBeforeAggregationCounter(RowsBeforeStepCounterPtr counter) override { rows_before_aggregation.swap(counter); } private: QueryPlanResourceHolder resources; Creator creator; Processors processors; - RowsBeforeLimitCounterPtr rows_before_limit; - RowsBeforeLimitCounterPtr rows_before_aggregation; + RowsBeforeStepCounterPtr rows_before_limit; + RowsBeforeStepCounterPtr rows_before_aggregation; /// Outputs for DelayedSource. OutputPort * main = nullptr; diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 9a6fe239ee6..683db2c1c2c 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -50,8 +50,6 @@ RemoteSource::RemoteSource(RemoteQueryExecutorPtr executor, bool add_aggregation { if (info.hasAppliedAggregation()) rows_before_aggregation->add(info.getRowsBeforeAggregation()); - else - manually_add_rows_before_aggregation_counter = true; /// Remote subquery doesn't contain a group by } }); } @@ -171,8 +169,6 @@ std::optional RemoteSource::tryGenerate() { if (manually_add_rows_before_limit_counter) rows_before_limit->add(rows); - if (manually_add_rows_before_aggregation_counter) - rows_before_aggregation->add(rows); query_executor->finish(); return {}; } diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index bbc563ec5fe..94dfb46e5f6 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -25,8 +25,8 @@ public: void work() override; String getName() const override { return "Remote"; } - void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_limit.swap(counter); } - void setRowsBeforeAggregationCounter(RowsBeforeLimitCounterPtr counter) override { rows_before_aggregation.swap(counter); } + void setRowsBeforeLimitCounter(RowsBeforeStepCounterPtr counter) override { rows_before_limit.swap(counter); } + void setRowsBeforeAggregationCounter(RowsBeforeStepCounterPtr counter) override { rows_before_aggregation.swap(counter); } /// Stop reading from stream if output port is finished. void onUpdatePorts() override; @@ -45,8 +45,8 @@ private: bool executor_finished = false; bool add_aggregation_info = false; RemoteQueryExecutorPtr query_executor; - RowsBeforeLimitCounterPtr rows_before_limit; - RowsBeforeLimitCounterPtr rows_before_aggregation; + RowsBeforeStepCounterPtr rows_before_limit; + RowsBeforeStepCounterPtr rows_before_aggregation; const bool async_read; const bool async_query_sending; @@ -54,7 +54,6 @@ private: int fd = -1; size_t rows = 0; bool manually_add_rows_before_limit_counter = false; - bool manually_add_rows_before_aggregation_counter = false; }; /// Totals source from RemoteQueryExecutor. diff --git a/src/Processors/Transforms/AggregatingInOrderTransform.cpp b/src/Processors/Transforms/AggregatingInOrderTransform.cpp index 9ffe15d0f85..7e7bf815832 100644 --- a/src/Processors/Transforms/AggregatingInOrderTransform.cpp +++ b/src/Processors/Transforms/AggregatingInOrderTransform.cpp @@ -81,6 +81,8 @@ void AggregatingInOrderTransform::consume(Chunk chunk) is_consume_started = true; } + if (rows_before_aggregation) + rows_before_aggregation->add(rows); src_rows += rows; src_bytes += chunk.bytes(); diff --git a/src/Processors/Transforms/AggregatingInOrderTransform.h b/src/Processors/Transforms/AggregatingInOrderTransform.h index 5d50e97f552..1fea9bad131 100644 --- a/src/Processors/Transforms/AggregatingInOrderTransform.h +++ b/src/Processors/Transforms/AggregatingInOrderTransform.h @@ -42,6 +42,8 @@ public: void work() override; void consume(Chunk chunk); + + void setRowsBeforeAggregationCounter(RowsBeforeStepCounterPtr counter) override { rows_before_aggregation.swap(counter); } private: void generate(); @@ -83,6 +85,8 @@ private: Chunk current_chunk; Chunk to_push_chunk; + RowsBeforeStepCounterPtr rows_before_aggregation; + LoggerPtr log = getLogger("AggregatingInOrderTransform"); }; diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 684de0a3e8c..1a0395f0c9a 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -683,8 +683,8 @@ void AggregatingTransform::consume(Chunk chunk) LOG_TRACE(log, "Aggregating"); is_consume_started = true; } - if (rows_before_aggregation_at_least) - rows_before_aggregation_at_least->add(num_rows); + if (rows_before_aggregation) + rows_before_aggregation->add(num_rows); src_rows += num_rows; src_bytes += chunk.bytes(); diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index 9f9638175f0..3f7a698d0e0 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -169,10 +169,7 @@ public: Status prepare() override; void work() override; Processors expandPipeline() override; - void setRowsBeforeAggregationCounter(RowsBeforeAggregationCounterPtr counter) override - { - rows_before_aggregation_at_least.swap(counter); - } + void setRowsBeforeAggregationCounter(RowsBeforeStepCounterPtr counter) override { rows_before_aggregation.swap(counter); } protected: void consume(Chunk chunk); @@ -216,7 +213,7 @@ private: bool is_consume_started = false; - RowsBeforeAggregationCounterPtr rows_before_aggregation_at_least; + RowsBeforeStepCounterPtr rows_before_aggregation; void initGenerate(); }; diff --git a/src/Processors/Transforms/PartialSortingTransform.h b/src/Processors/Transforms/PartialSortingTransform.h index abb4b290322..73c490d5b92 100644 --- a/src/Processors/Transforms/PartialSortingTransform.h +++ b/src/Processors/Transforms/PartialSortingTransform.h @@ -20,7 +20,7 @@ public: String getName() const override { return "PartialSortingTransform"; } - void setRowsBeforeLimitCounter(RowsBeforeLimitCounterPtr counter) override { read_rows.swap(counter); } + void setRowsBeforeLimitCounter(RowsBeforeStepCounterPtr counter) override { read_rows.swap(counter); } protected: void transform(Chunk & chunk) override; @@ -29,7 +29,7 @@ private: const SortDescription description; SortDescriptionWithPositions description_with_positions; const UInt64 limit; - RowsBeforeLimitCounterPtr read_rows; + RowsBeforeStepCounterPtr read_rows; Columns sort_description_threshold_columns; diff --git a/src/QueryPipeline/ProfileInfo.cpp b/src/QueryPipeline/ProfileInfo.cpp index 87729b7c90e..69575939edc 100644 --- a/src/QueryPipeline/ProfileInfo.cpp +++ b/src/QueryPipeline/ProfileInfo.cpp @@ -1,14 +1,14 @@ #include +#include +#include #include #include -#include - namespace DB { -void ProfileInfo::read(ReadBuffer & in) +void ProfileInfo::read(ReadBuffer & in, UInt64 server_revision) { readVarUInt(rows, in); readVarUInt(blocks, in); @@ -16,12 +16,15 @@ void ProfileInfo::read(ReadBuffer & in) readBinary(applied_limit, in); readVarUInt(rows_before_limit, in); readBinary(calculated_rows_before_limit, in); - readBinary(applied_aggregation, in); - readVarUInt(rows_before_aggregation, in); + if (server_revision >= DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION) + { + readBinary(applied_aggregation, in); + readVarUInt(rows_before_aggregation, in); + } } -void ProfileInfo::write(WriteBuffer & out) const +void ProfileInfo::write(WriteBuffer & out, UInt64 client_revision) const { writeVarUInt(rows, out); writeVarUInt(blocks, out); @@ -29,8 +32,11 @@ void ProfileInfo::write(WriteBuffer & out) const writeBinary(hasAppliedLimit(), out); writeVarUInt(getRowsBeforeLimit(), out); writeBinary(calculated_rows_before_limit, out); - writeBinary(hasAppliedAggregation(), out); - writeVarUInt(getRowsBeforeAggregation(), out); + if (client_revision >= DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION) + { + writeBinary(hasAppliedAggregation(), out); + writeVarUInt(getRowsBeforeAggregation(), out); + } } diff --git a/src/QueryPipeline/ProfileInfo.h b/src/QueryPipeline/ProfileInfo.h index e2467afd6f4..92c83c8c3be 100644 --- a/src/QueryPipeline/ProfileInfo.h +++ b/src/QueryPipeline/ProfileInfo.h @@ -40,8 +40,8 @@ struct ProfileInfo /// Binary serialization and deserialization of main fields. /// Writes only main fields i.e. fields that required by internal transmission protocol. - void read(ReadBuffer & in); - void write(WriteBuffer & out) const; + void read(ReadBuffer & in, UInt64 server_revision); + void write(WriteBuffer & out, UInt64 client_revision) const; /// Sets main fields from other object (see methods above). /// If skip_block_size_info if true, then rows, bytes and block fields are ignored. diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index 311c8a60531..4ec5cca4dc5 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -140,7 +141,7 @@ static void checkCompleted(Processors & processors) static void initRowsBeforeLimit(IOutputFormat * output_format) { - RowsBeforeLimitCounterPtr rows_before_limit_at_least; + RowsBeforeStepCounterPtr rows_before_limit_at_least; std::vector processors; std::map> limit_candidates; std::unordered_set visited; @@ -280,20 +281,20 @@ static void initRowsBeforeAggregation(std::shared_ptr processors, IO if (!processors->empty()) { - RowsBeforeAggregationCounterPtr rows_before_aggregation_at_least = std::make_shared(); + RowsBeforeStepCounterPtr rows_before_aggregation = std::make_shared(); for (auto processor : *processors) { - if (auto transform = std::dynamic_pointer_cast(processor)) + if (typeid_cast(processor.get()) || typeid_cast(processor.get())) { - transform->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); + processor->setRowsBeforeAggregationCounter(rows_before_aggregation); has_aggregation = true; } if (typeid_cast(processor.get()) || typeid_cast(processor.get())) - processor->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); + processor->setRowsBeforeAggregationCounter(rows_before_aggregation); } if (has_aggregation) - rows_before_aggregation_at_least->add(0); - output_format->setRowsBeforeAggregationCounter(rows_before_aggregation_at_least); + rows_before_aggregation->add(0); + output_format->setRowsBeforeAggregationCounter(rows_before_aggregation); } } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 14a2bceebf1..833f84bb866 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1212,7 +1212,7 @@ void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(ParallelReadRequest re void TCPHandler::sendProfileInfo(const ProfileInfo & info) { writeVarUInt(Protocol::Server::ProfileInfo, *out); - info.write(*out); + info.write(*out, client_tcp_protocol_version); out->next(); } diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference index 36db9721599..2fbdf325760 100644 --- a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference @@ -23,7 +23,7 @@ "rows": 10, - "rows_before_aggregation_at_least": 10 + "rows_before_aggregation": 10 } @@ -68,7 +68,7 @@ 10 - 10 + 10 { "meta": @@ -88,7 +88,7 @@ "rows": 3, - "rows_before_aggregation_at_least": 3 + "rows_before_aggregation": 3 } { "meta": @@ -125,7 +125,7 @@ "rows": 20, - "rows_before_aggregation_at_least": 20 + "rows_before_aggregation": 20 } { "meta": @@ -145,7 +145,7 @@ "rows_before_limit_at_least": 1, - "rows_before_aggregation_at_least": 20 + "rows_before_aggregation": 20 } { "meta": @@ -194,7 +194,7 @@ "rows_before_limit_at_least": 60, - "rows_before_aggregation_at_least": 60 + "rows_before_aggregation": 60 } { "meta": @@ -233,7 +233,7 @@ "rows_before_limit_at_least": 40, - "rows_before_aggregation_at_least": 40 + "rows_before_aggregation": 40 } { "meta": @@ -282,7 +282,7 @@ "rows_before_limit_at_least": 30, - "rows_before_aggregation_at_least": 60 + "rows_before_aggregation": 60 } { "meta": @@ -321,7 +321,7 @@ "rows_before_limit_at_least": 20, - "rows_before_aggregation_at_least": 40 + "rows_before_aggregation": 40 } { "meta": @@ -341,7 +341,7 @@ "rows_before_limit_at_least": 1, - "rows_before_aggregation_at_least": 40 + "rows_before_aggregation": 40 } { "meta": @@ -370,5 +370,179 @@ "rows_before_limit_at_least": 10, - "rows_before_aggregation_at_least": 20 + "rows_before_aggregation": 20 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [4], + [3], + [2], + [5], + [1], + [6], + [7], + [9], + [8] + ], + + "rows": 10, + + "rows_before_limit_at_least": 20, + + "rows_before_aggregation": 20 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9] + ], + + "rows": 10, + + "rows_before_aggregation": 10 +} +{ + "meta": + [ + { + "name": "max(i)", + "type": "Int32" + } + ], + + "data": + [ + [19] + ], + + "rows": 1, + + "rows_before_limit_at_least": 1, + + "rows_before_aggregation": 20 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9], + [10], + [11], + [12], + [13], + [14], + [15], + [16], + [17], + [18], + [19], + [20], + [21], + [22], + [23], + [24], + [25], + [26], + [27], + [28], + [29] + ], + + "rows": 30, + + "rows_before_limit_at_least": 60, + + "rows_before_aggregation": 60 +} +{ + "meta": + [ + { + "name": "i", + "type": "Int32" + } + ], + + "data": + [ + [0], + [1], + [2], + [3], + [4], + [5], + [6], + [7], + [8], + [9], + [10], + [11], + [12], + [13], + [14], + [15], + [16], + [17], + [18], + [19], + [20], + [21], + [22], + [23], + [24], + [25], + [26], + [27], + [28], + [29] + ], + + "rows": 30, + + "rows_before_limit_at_least": 30, + + "rows_before_aggregation": 60 } diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql index 31b817e8a65..17e3f3c2cef 100644 --- a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql @@ -1,18 +1,15 @@ -- Tags: no-parallel, no-random-merge-tree-settings +set rows_before_aggregation = 1, exact_rows_before_limit = 1, output_format_write_statistics = 0, max_block_size = 100; + drop table if exists test; create table test (i int) engine MergeTree order by tuple(); - insert into test select arrayJoin(range(10000)); -set rows_before_aggregation = 1, exact_rows_before_limit = 1, output_format_write_statistics = 0, max_block_size = 100; - select * from test where i < 10 group by i order by i FORMAT JSONCompact; select * from test where i < 10 group by i order by i FORMAT XML; - select * from test group by i having i in (10, 11, 12) order by i FORMAT JSONCompact; - select * from test where i < 20 group by i order by i FORMAT JSONCompact; select max(i) from test where i < 20 limit 1 FORMAT JSONCompact; @@ -23,8 +20,27 @@ select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i set prefer_localhost_replica = 1; select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 group by i order by i FORMAT JSONCompact; + select max(i) from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 FORMAT JSONCompact; +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i limit 10 FORMAT JSONCompact; +set prefer_localhost_replica = 0; select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i limit 10 FORMAT JSONCompact; drop table if exists test; + +create table test (i int) engine MergeTree order by i; + +insert into test select arrayJoin(range(10000)); + +set optimize_aggregation_in_order=1; +select * from test where i < 10 group by i order by i FORMAT JSONCompact; +select max(i) from test where i < 20 limit 1 FORMAT JSONCompact; + +set prefer_localhost_replica = 0; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; + +set prefer_localhost_replica = 1; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; + +drop table if exists test; From 77d6e781f593fdcfe50506aba855eda747c05c01 Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Tue, 16 Jul 2024 11:25:18 +0800 Subject: [PATCH 0409/2361] Update src/Storages/VirtualColumnUtils.cpp Co-authored-by: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> --- src/Storages/VirtualColumnUtils.cpp | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index f7ffd47f9e8..df833fa6a66 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -282,15 +282,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( node_copy.children.clear(); for (const auto * child : node->children) if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, strict)) - { - /// Expression like (now_allowed AND allowed) is not allowed if strict = true. This is important for - /// trivial count optimization, otherwise we can get incorrect results. For example, if the query is - /// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply - /// trivial count. - if (strict) - return nullptr; node_copy.children.push_back(child_copy); - } + /// Expression like (now_allowed AND allowed) is not allowed if strict = true. This is important for + /// trivial count optimization, otherwise we can get incorrect results. For example, if the query is + /// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply + /// trivial count. + else if (strict) + return nullptr; if (node_copy.children.empty()) return nullptr; From b8b076fd5bedc80cf53781a0190aabb60212a7ef Mon Sep 17 00:00:00 2001 From: morning-color Date: Tue, 16 Jul 2024 11:37:17 +0800 Subject: [PATCH 0410/2361] Fix style --- src/Processors/Transforms/AggregatingInOrderTransform.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Processors/Transforms/AggregatingInOrderTransform.h b/src/Processors/Transforms/AggregatingInOrderTransform.h index 1fea9bad131..9a1ba513f43 100644 --- a/src/Processors/Transforms/AggregatingInOrderTransform.h +++ b/src/Processors/Transforms/AggregatingInOrderTransform.h @@ -42,7 +42,6 @@ public: void work() override; void consume(Chunk chunk); - void setRowsBeforeAggregationCounter(RowsBeforeStepCounterPtr counter) override { rows_before_aggregation.swap(counter); } private: From da5c92e83ef5b485a119ad5a79098bfc9c216d3e Mon Sep 17 00:00:00 2001 From: morning-color Date: Tue, 16 Jul 2024 12:00:27 +0800 Subject: [PATCH 0411/2361] FIx bug --- src/QueryPipeline/QueryPipeline.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index 4ec5cca4dc5..ff2b5aba13d 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -544,7 +544,7 @@ void QueryPipeline::complete(std::shared_ptr format) extremes = nullptr; initRowsBeforeLimit(format.get()); - for (const auto context : resources.interpreter_context) + for (const auto & context : resources.interpreter_context) { if (context->getSettingsRef().rows_before_aggregation) { From b863ef83c351bd87007b4cacc66cff3c5276666f Mon Sep 17 00:00:00 2001 From: morning-color Date: Tue, 16 Jul 2024 13:00:01 +0800 Subject: [PATCH 0412/2361] Fix compile problem --- src/QueryPipeline/QueryPipeline.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index ff2b5aba13d..844b9e3b039 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -544,7 +545,7 @@ void QueryPipeline::complete(std::shared_ptr format) extremes = nullptr; initRowsBeforeLimit(format.get()); - for (const auto & context : resources.interpreter_context) + for (const auto context : resources.interpreter_context) { if (context->getSettingsRef().rows_before_aggregation) { From 403afbc77057125560d88239f6db182056303657 Mon Sep 17 00:00:00 2001 From: morning-color Date: Tue, 16 Jul 2024 16:05:08 +0800 Subject: [PATCH 0413/2361] Add settings rows_before_aggregation to src/Core/SettingsChangesHistory.h --- src/Core/SettingsChangesHistory.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h index cdc955b38bc..37c91561b0a 100644 --- a/src/Core/SettingsChangesHistory.h +++ b/src/Core/SettingsChangesHistory.h @@ -85,7 +85,10 @@ namespace SettingsChangesHistory /// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972) static const std::map settings_changes_history = { - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, + {"24.6", + { + {"rows_before_aggregation", true, true, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"}, + {"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, From 2bc65fe2080260d16e27df965610197a38052705 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Jul 2024 09:44:05 +0000 Subject: [PATCH 0414/2361] Make ColumnSparse::updateWeakHash32 consistent with internal column. --- src/Columns/ColumnSparse.cpp | 38 +++++++++++++++++++++++++++++++++--- src/Columns/ColumnSparse.h | 3 +++ 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/src/Columns/ColumnSparse.cpp b/src/Columns/ColumnSparse.cpp index 809586d8810..ea4d23c1678 100644 --- a/src/Columns/ColumnSparse.cpp +++ b/src/Columns/ColumnSparse.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -684,13 +685,26 @@ void ColumnSparse::updateWeakHash32(WeakHash32 & hash) const throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " "column size is {}, hash size is {}", _size, hash.getData().size()); - auto offset_it = begin(); + size_t values_size = values->size(); + WeakHash32 values_hash(values_size); + auto & hash_data = hash.getData(); + auto & values_hash_data = values_hash.getData(); + const auto & offsets_data = getOffsetsData(); + + if (getNumberOfDefaultRows() > 0) + values_hash_data[0] = hash_data[getFirstDefaultValueIndex()]; + + for (size_t i = 0; i < values_size; ++i) + values_hash_data[i + 1] = hash_data[offsets_data[i]]; + + values->updateWeakHash32(values_hash); + + auto offset_it = begin(); for (size_t i = 0; i < _size; ++i, ++offset_it) { size_t value_index = offset_it.getValueIndex(); - auto data_ref = values->getDataAt(value_index); - hash_data[i] = ::updateWeakHash32(reinterpret_cast(data_ref.data), data_ref.size, hash_data[i]); + hash_data[i] = values_hash_data[value_index]; } } @@ -807,6 +821,24 @@ size_t ColumnSparse::getValueIndex(size_t n) const return it - offsets_data.begin() + 1; } +size_t ColumnSparse::getFirstDefaultValueIndex() const +{ + if (getNumberOfDefaultRows() == 0) + return size(); + + const auto & offsets_data = getOffsetsData(); + size_t off_size = offsets_data.size(); + + if (off_size == 0 || offsets_data[0] > 0) + return 0; + + size_t idx = 0; + while (idx + 1 < off_size && offsets_data[idx] + 1 == offsets_data[idx + 1]) + ++idx; + + return offsets_data[idx] + 1; +} + ColumnSparse::Iterator ColumnSparse::getIterator(size_t n) const { const auto & offsets_data = getOffsetsData(); diff --git a/src/Columns/ColumnSparse.h b/src/Columns/ColumnSparse.h index 3e34d1de94a..4860f5171f7 100644 --- a/src/Columns/ColumnSparse.h +++ b/src/Columns/ColumnSparse.h @@ -173,6 +173,9 @@ public: /// O(log(offsets.size())) complexity, size_t getValueIndex(size_t n) const; + /// Returns an index of the first default value, or size() if there is no defaults. + size_t getFirstDefaultValueIndex() const; + const IColumn & getValuesColumn() const { return *values; } IColumn & getValuesColumn() { return *values; } From 24b9e1885216a62294e68a977437b4d4c62a12f9 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Jul 2024 09:50:27 +0000 Subject: [PATCH 0415/2361] Add a test. --- ...l_window_finctions_and_column_sparce_bug.reference | 8 ++++++++ ...arallel_window_finctions_and_column_sparce_bug.sql | 11 +++++++++++ 2 files changed, 19 insertions(+) create mode 100644 tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.reference create mode 100644 tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.sql diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.reference b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.reference new file mode 100644 index 00000000000..f11ec57a425 --- /dev/null +++ b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.reference @@ -0,0 +1,8 @@ +false 1 1 +true 1 1 +--- +false 1 1 +false 1 2 +false 1 3 +true 1 1 +true 1 2 diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.sql b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.sql new file mode 100644 index 00000000000..a4c0200813c --- /dev/null +++ b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.sql @@ -0,0 +1,11 @@ +create table t(c Int32, d Bool) Engine=MergeTree order by c; +system stop merges t; + +insert into t values (1, 0); +insert into t values (1, 0); +insert into t values (1, 1); +insert into t values (1, 0)(1, 1); + +SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t qualify c8=1 order by d settings max_threads=2; +SELECT '---'; +SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t order by d, c8 settings max_threads=2; From 902e548a2daf087bdb4363694a91bf665a7f7f9b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Jul 2024 09:52:00 +0000 Subject: [PATCH 0416/2361] Fix typo. --- ...205_parallel_window_finctions_and_column_sparse_bug.reference} | 0 ... => 03205_parallel_window_finctions_and_column_sparse_bug.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{03205_parallel_window_finctions_and_column_sparce_bug.reference => 03205_parallel_window_finctions_and_column_sparse_bug.reference} (100%) rename tests/queries/0_stateless/{03205_parallel_window_finctions_and_column_sparce_bug.sql => 03205_parallel_window_finctions_and_column_sparse_bug.sql} (100%) diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.reference b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference similarity index 100% rename from tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.reference rename to tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.sql b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql similarity index 100% rename from tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparce_bug.sql rename to tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql From 47e3e999241c8084a2b0389d0e854d705fd6e510 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 16 Jul 2024 10:06:21 +0000 Subject: [PATCH 0417/2361] Avoid calling type->getName --- src/Columns/ColumnDynamic.cpp | 6 +++--- src/Columns/ColumnDynamic.h | 3 ++- src/DataTypes/Serializations/ISerialization.h | 3 +++ src/Formats/JSONExtractTree.cpp | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index c735238f515..b9ee867001a 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -57,10 +57,10 @@ ColumnDynamic::MutablePtr ColumnDynamic::create(MutableColumnPtr variant_column, return create(std::move(variant_column), variant_info, max_dynamic_types_, statistics_); } -bool ColumnDynamic::addNewVariant(const DB::DataTypePtr & new_variant) +bool ColumnDynamic::addNewVariant(const DataTypePtr & new_variant, const String & variant_name) { /// Check if we already have such variant. - if (variant_info.variant_name_to_discriminator.contains(new_variant->getName())) + if (variant_info.variant_name_to_discriminator.contains(variant_name)) return true; /// Check if we reached maximum number of variants. @@ -75,7 +75,7 @@ bool ColumnDynamic::addNewVariant(const DB::DataTypePtr & new_variant) } /// If we have (max_dynamic_types - 1) number of variants and don't have String variant, we can add only String variant. - if (variant_info.variant_names.size() == max_dynamic_types - 1 && new_variant->getName() != "String" && !variant_info.variant_name_to_discriminator.contains("String")) + if (variant_info.variant_names.size() == max_dynamic_types - 1 && variant_name != "String" && !variant_info.variant_name_to_discriminator.contains("String")) return false; const DataTypes & current_variants = assert_cast(*variant_info.variant_type).getVariants(); diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 630eb8ad7a8..0edd90b29db 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -334,7 +334,8 @@ public: const ColumnVariant & getVariantColumn() const { return assert_cast(*variant_column); } ColumnVariant & getVariantColumn() { return assert_cast(*variant_column); } - bool addNewVariant(const DataTypePtr & new_variant); + bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); } + bool addNewVariant(const DataTypePtr & new_variant, const String & variant_name); void addStringVariant(); bool hasDynamicStructure() const override { return true; } diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 22bbdb25c43..5d6ac2707ed 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -447,6 +447,9 @@ public: static size_t getArrayLevel(const SubstreamPath & path); static bool hasSubcolumnForPath(const SubstreamPath & path, size_t prefix_len); static SubstreamData createFromPath(const SubstreamPath & path, size_t prefix_len); + + /// Returns true if subcolumn doesn't actually stores any data in column and doen'st require a separate stream + /// for writing/reading data. For example, it's a null-map subcolumn of Variant type (it's always constructed from discriminators);. static bool isFictitiousSubcolumn(const SubstreamPath & path, size_t prefix_len); protected: diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 2eaddddb567..b19cf61e207 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1384,9 +1384,9 @@ public: jsonElementToString(element, format_settings)); } - if (column_dynamic.addNewVariant(element_type)) + auto element_type_name = element_type->getName(); + if (column_dynamic.addNewVariant(element_type, element_type_name)) { - auto element_type_name = element_type->getName(); auto it = json_extract_nodes_cache.find(element_type_name); if (it == json_extract_nodes_cache.end()) it = json_extract_nodes_cache.emplace(element_type_name, buildJSONExtractTree(element_type, "Dynamic inference")).first; From 478616de3d03495cf8c324da9464a9807b51ba41 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 16 Jul 2024 10:54:39 +0000 Subject: [PATCH 0418/2361] forgot --- src/Interpreters/InterpreterCreateQuery.cpp | 52 ++++++++++----------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 94230f0e7d1..3b23c6899e9 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1010,32 +1010,32 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); /// For exrternal tables with restore_replace_external_engine_to_null setting we replace external engines to /// Null table engine. - else (create.storage->engine->name == "AzureBlobStorage" || - create.storage->engine->name == "AzureQueue" || - create.storage->engine->name == "COSN" || - create.storage->engine->name == "DeltaLake" || - create.storage->engine->name == "Dictionary" || - create.storage->engine->name == "Executable" || - create.storage->engine->name == "ExecutablePool" || - create.storage->engine->name == "ExternalDistributed" || - create.storage->engine->name == "File" || - create.storage->engine->name == "Hudi" || - create.storage->engine->name == "Iceberg" || - create.storage->engine->name == "JDBC" || - create.storage->engine->name == "Kafka" || - create.storage->engine->name == "MaterializedPostgreSQL" || - create.storage->engine->name == "MongoDB" || - create.storage->engine->name == "MySQL" || - create.storage->engine->name == "NATS" || - create.storage->engine->name == "ODBC" || - create.storage->engine->name == "OSS" || - create.storage->engine->name == "PostgreSQL" || - create.storage->engine->name == "RabbitMQ" || - create.storage->engine->name == "Redis" || - create.storage->engine->name == "S3" || - create.storage->engine->name == "S3Queue" || - create.storage->engine->name == "TinyLog" || - create.storage->engine->name == "URL") + else if (create.storage->engine->name == "AzureBlobStorage" || + create.storage->engine->name == "AzureQueue" || + create.storage->engine->name == "COSN" || + create.storage->engine->name == "DeltaLake" || + create.storage->engine->name == "Dictionary" || + create.storage->engine->name == "Executable" || + create.storage->engine->name == "ExecutablePool" || + create.storage->engine->name == "ExternalDistributed" || + create.storage->engine->name == "File" || + create.storage->engine->name == "Hudi" || + create.storage->engine->name == "Iceberg" || + create.storage->engine->name == "JDBC" || + create.storage->engine->name == "Kafka" || + create.storage->engine->name == "MaterializedPostgreSQL" || + create.storage->engine->name == "MongoDB" || + create.storage->engine->name == "MySQL" || + create.storage->engine->name == "NATS" || + create.storage->engine->name == "ODBC" || + create.storage->engine->name == "OSS" || + create.storage->engine->name == "PostgreSQL" || + create.storage->engine->name == "RabbitMQ" || + create.storage->engine->name == "Redis" || + create.storage->engine->name == "S3" || + create.storage->engine->name == "S3Queue" || + create.storage->engine->name == "TinyLog" || + create.storage->engine->name == "URL") { if (getContext()->getSettingsRef().restore_replace_external_engine_to_null) setNullTableEngine(*create.storage); From 85c42348bcb47325be61505a7d908b46be9fe3b3 Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Tue, 16 Jul 2024 11:28:59 +0000 Subject: [PATCH 0419/2361] address reviews Signed-off-by: Duc Canh Le --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- src/Storages/VirtualColumnUtils.cpp | 18 +++++++++--------- src/Storages/VirtualColumnUtils.h | 10 +++++++++- ...3_count_with_non_deterministic_function.sql | 2 +- 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index eb07df8f012..faf2741a456 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1144,7 +1144,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr, /*strict=*/ true); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr, /*allow_non_deterministic_functions=*/ false); if (!filter_dag) return {}; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index df833fa6a66..151079154b1 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -272,7 +272,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( const ActionsDAG::Node * node, const Block * allowed_inputs, ActionsDAG::Nodes & additional_nodes, - bool strict) + bool allow_non_deterministic_functions) { if (node->type == ActionsDAG::ActionType::FUNCTION) { @@ -281,13 +281,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto & node_copy = additional_nodes.emplace_back(*node); node_copy.children.clear(); for (const auto * child : node->children) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, strict)) + if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) node_copy.children.push_back(child_copy); - /// Expression like (now_allowed AND allowed) is not allowed if strict = true. This is important for + /// Expression like (now_allowed AND allowed) is not allowed if allow_non_deterministic_functions = true. This is important for /// trivial count optimization, otherwise we can get incorrect results. For example, if the query is /// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply /// trivial count. - else if (strict) + else if (!allow_non_deterministic_functions) return nullptr; if (node_copy.children.empty()) @@ -314,7 +314,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { auto & node_copy = additional_nodes.emplace_back(*node); for (auto & child : node_copy.children) - if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, strict); !child) + if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions); !child) return nullptr; return &node_copy; @@ -328,7 +328,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto index_hint_dag = index_hint->getActions()->clone(); ActionsDAG::NodeRawConstPtrs atoms; for (const auto & output : index_hint_dag->getOutputs()) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, strict)) + if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) atoms.push_back(child_copy); if (!atoms.empty()) @@ -362,13 +362,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( return node; } -ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool strict) +ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions) { if (!predicate) return nullptr; ActionsDAG::Nodes additional_nodes; - const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, strict); + const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, allow_non_deterministic_functions); if (!res) return nullptr; @@ -377,7 +377,7 @@ ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context) { - auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*strict=*/ false); + auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_non_deterministic_functions=*/ false); if (dag) filterBlockWithDAG(dag, block, context); } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 5b41e56c0ef..e5cfa47c8f6 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -32,7 +32,15 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context); bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); /// Extract a part of predicate that can be evaluated using only columns from input_names. -ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool strict = false); +/// When allow_non_deterministic_functions is true then even if the predicate contains non-deterministic +/// functions, we still allow to extract a part of the predicate, otherwise we return nullptr. +/// allow_non_deterministic_functions must be false when we are going to use the result to filter parts in +/// MergeTreeData::totalRowsByPartitionPredicateImp. For example, if the query is +/// `SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1` +/// The predicate will be `_partition_id = '0' AND rowNumberInBlock() = 1`, and `rowNumberInBlock()` is +/// non-deterministic. If we still extract the part `_partition_id = '0'` for filtering parts, then trivial +/// count optimization will be mistakenly applied to the query. +ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions = true); /// Extract from the input stream a set of `name` column values template diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql index d4ffa4d07ac..bb3269da597 100644 --- a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql +++ b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql @@ -1,4 +1,4 @@ CREATE TABLE t (p UInt8, x UInt64) Engine = MergeTree PARTITION BY p ORDER BY x; -INSERT INTO t SELECT 0, number FROM numbers(10); +INSERT INTO t SELECT 0, number FROM numbers(10) SETTINGS max_block_size = 100; SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 0; SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 1; From 808acea1b6677c1b6bdbc28ddeb01c28e95e43d8 Mon Sep 17 00:00:00 2001 From: morning-color Date: Tue, 16 Jul 2024 19:45:39 +0800 Subject: [PATCH 0420/2361] Fix compile problem --- src/Core/SettingsChangesHistory.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index b9b72209103..922946b192e 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,7 +57,8 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, + {"24.7", {{"rows_before_aggregation", true, true, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"}, + {"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, From ff7f5fe80873aad2f0f7b6f4e1e73c0178a69503 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 1 May 2024 03:09:13 +0200 Subject: [PATCH 0421/2361] Move view targets to separate AST class "ASTViewTargets" in order to allow extending it to support more kinds of view targets. --- src/Backups/BackupUtils.cpp | 2 +- src/Backups/RestoreCoordinationLocal.cpp | 6 +- src/Backups/RestoreCoordinationLocal.h | 9 +- src/Backups/RestoreCoordinationRemote.cpp | 9 +- src/Databases/DDLDependencyVisitor.cpp | 19 +- src/Databases/DDLRenamingVisitor.cpp | 17 +- src/Databases/DatabaseReplicated.cpp | 160 +++++----- src/Databases/DatabaseReplicated.h | 1 + src/Interpreters/InterpreterCreateQuery.cpp | 126 ++++++-- .../InterpreterShowCreateQuery.cpp | 3 +- src/Parsers/ASTCreateQuery.cpp | 95 +++--- src/Parsers/ASTCreateQuery.h | 40 ++- src/Parsers/ASTViewTargets.cpp | 300 ++++++++++++++++++ src/Parsers/ASTViewTargets.h | 102 ++++++ src/Parsers/CreateQueryUUIDs.cpp | 168 ++++++++++ src/Parsers/CreateQueryUUIDs.h | 40 +++ src/Parsers/ParserCreateQuery.cpp | 92 ++++-- src/Parsers/ParserViewTargets.cpp | 88 +++++ src/Parsers/ParserViewTargets.h | 24 ++ src/Storages/StorageMaterializedView.cpp | 40 ++- src/Storages/System/StorageSystemTables.cpp | 3 +- src/Storages/WindowView/StorageWindowView.cpp | 15 +- 22 files changed, 1116 insertions(+), 243 deletions(-) create mode 100644 src/Parsers/ASTViewTargets.cpp create mode 100644 src/Parsers/ASTViewTargets.h create mode 100644 src/Parsers/CreateQueryUUIDs.cpp create mode 100644 src/Parsers/CreateQueryUUIDs.h create mode 100644 src/Parsers/ParserViewTargets.cpp create mode 100644 src/Parsers/ParserViewTargets.h diff --git a/src/Backups/BackupUtils.cpp b/src/Backups/BackupUtils.cpp index fa8ed5855dd..cd3f963b15d 100644 --- a/src/Backups/BackupUtils.cpp +++ b/src/Backups/BackupUtils.cpp @@ -105,7 +105,7 @@ bool compareRestoredTableDef(const IAST & restored_table_create_query, const IAS auto new_query = query.clone(); adjustCreateQueryForBackup(new_query, global_context); ASTCreateQuery & create = typeid_cast(*new_query); - create.setUUID({}); + create.resetUUIDs(); create.if_not_exists = false; return new_query; }; diff --git a/src/Backups/RestoreCoordinationLocal.cpp b/src/Backups/RestoreCoordinationLocal.cpp index f51d6c0c1d8..9fe22f874b4 100644 --- a/src/Backups/RestoreCoordinationLocal.cpp +++ b/src/Backups/RestoreCoordinationLocal.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -67,7 +68,7 @@ void RestoreCoordinationLocal::generateUUIDForTable(ASTCreateQuery & create_quer auto it = create_query_uuids.find(query_str); if (it != create_query_uuids.end()) { - create_query.setUUID(it->second); + it->second.copyToQuery(create_query); return true; } return false; @@ -79,7 +80,8 @@ void RestoreCoordinationLocal::generateUUIDForTable(ASTCreateQuery & create_quer return; } - auto new_uuids = create_query.generateRandomUUID(/* always_generate_new_uuid= */ true); + CreateQueryUUIDs new_uuids{create_query, /* generate_random= */ true, /* force_random= */ true}; + new_uuids.copyToQuery(create_query); { std::lock_guard lock{mutex}; diff --git a/src/Backups/RestoreCoordinationLocal.h b/src/Backups/RestoreCoordinationLocal.h index 5e51b719d63..35f93574b68 100644 --- a/src/Backups/RestoreCoordinationLocal.h +++ b/src/Backups/RestoreCoordinationLocal.h @@ -1,16 +1,17 @@ #pragma once #include -#include +#include +#include #include #include #include -namespace Poco { class Logger; } - namespace DB { +class ASTCreateQuery; + /// Implementation of the IRestoreCoordination interface performing coordination in memory. class RestoreCoordinationLocal : public IRestoreCoordination @@ -55,7 +56,7 @@ private: std::set> acquired_tables_in_replicated_databases; std::unordered_set acquired_data_in_replicated_tables; - std::unordered_map create_query_uuids; + std::unordered_map create_query_uuids; std::unordered_set acquired_data_in_keeper_map_tables; mutable std::mutex mutex; diff --git a/src/Backups/RestoreCoordinationRemote.cpp b/src/Backups/RestoreCoordinationRemote.cpp index 84106737fc9..44214d00be5 100644 --- a/src/Backups/RestoreCoordinationRemote.cpp +++ b/src/Backups/RestoreCoordinationRemote.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -269,7 +270,8 @@ bool RestoreCoordinationRemote::acquireInsertingDataForKeeperMap(const String & void RestoreCoordinationRemote::generateUUIDForTable(ASTCreateQuery & create_query) { String query_str = serializeAST(create_query); - String new_uuids_str = create_query.generateRandomUUID(/* always_generate_new_uuid= */ true).toString(); + CreateQueryUUIDs new_uuids{create_query, /* generate_random= */ true, /* force_random= */ true}; + String new_uuids_str = new_uuids.toString(); auto holder = with_retries.createRetriesControlHolder("generateUUIDForTable"); holder.retries_ctl.retryLoop( @@ -281,11 +283,14 @@ void RestoreCoordinationRemote::generateUUIDForTable(ASTCreateQuery & create_que Coordination::Error res = zk->tryCreate(path, new_uuids_str, zkutil::CreateMode::Persistent); if (res == Coordination::Error::ZOK) + { + new_uuids.copyToQuery(create_query); return; + } if (res == Coordination::Error::ZNODEEXISTS) { - create_query.setUUID(ASTCreateQuery::UUIDs::fromString(zk->get(path))); + CreateQueryUUIDs::fromString(zk->get(path)).copyToQuery(create_query); return; } diff --git a/src/Databases/DDLDependencyVisitor.cpp b/src/Databases/DDLDependencyVisitor.cpp index d81dc7a76d8..d149b49d465 100644 --- a/src/Databases/DDLDependencyVisitor.cpp +++ b/src/Databases/DDLDependencyVisitor.cpp @@ -80,13 +80,20 @@ namespace /// CREATE TABLE or CREATE DICTIONARY or CREATE VIEW or CREATE TEMPORARY TABLE or CREATE DATABASE query. void visitCreateQuery(const ASTCreateQuery & create) { - QualifiedTableName to_table{create.to_table_id.database_name, create.to_table_id.table_name}; - if (!to_table.table.empty()) + if (create.targets) { - /// TO target_table (for materialized views) - if (to_table.database.empty()) - to_table.database = current_database; - dependencies.emplace(to_table); + for (const auto & target : create.targets->targets) + { + const auto & table_id = target.table_id; + if (!table_id.table_name.empty()) + { + /// TO target_table (for materialized views) + QualifiedTableName target_name{table_id.database_name, table_id.table_name}; + if (target_name.database.empty()) + target_name.database = current_database; + dependencies.emplace(target_name); + } + } } QualifiedTableName as_table{create.as_database, create.as_table}; diff --git a/src/Databases/DDLRenamingVisitor.cpp b/src/Databases/DDLRenamingVisitor.cpp index 6cd414635a0..38e100e2470 100644 --- a/src/Databases/DDLRenamingVisitor.cpp +++ b/src/Databases/DDLRenamingVisitor.cpp @@ -86,12 +86,19 @@ namespace create.as_table = as_table_new.table; } - QualifiedTableName to_table{create.to_table_id.database_name, create.to_table_id.table_name}; - if (!to_table.table.empty() && !to_table.database.empty()) + if (create.targets) { - auto to_table_new = data.renaming_map.getNewTableName(to_table); - if (to_table_new != to_table) - create.to_table_id = StorageID{to_table_new.database, to_table_new.table}; + for (auto & target : create.targets->targets) + { + auto & table_id = target.table_id; + if (!table_id.database_name.empty() && !table_id.table_name.empty()) + { + QualifiedTableName target_name{table_id.database_name, table_id.table_name}; + auto new_target_name = data.renaming_map.getNewTableName(target_name); + if (new_target_name != target_name) + table_id = StorageID{new_target_name.database, new_target_name.table}; + } + } } } diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 7ce2859e962..25d1ad90a3c 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -729,81 +729,14 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_ if (auto * create = query->as()) { - bool replicated_table = create->storage && create->storage->engine && - (startsWith(create->storage->engine->name, "Replicated") || startsWith(create->storage->engine->name, "Shared")); - if (!replicated_table || !create->storage->engine->arguments) - return; + if (create->storage) + checkTableEngine(*create, *create->storage, query_context); - ASTs & args_ref = create->storage->engine->arguments->children; - ASTs args = args_ref; - if (args.size() < 2) - return; - - /// It can be a constant expression. Try to evaluate it, ignore exception if we cannot. - bool has_expression_argument = args_ref[0]->as() || args_ref[1]->as(); - if (has_expression_argument) + if (create->targets) { - try - { - args[0] = evaluateConstantExpressionAsLiteral(args_ref[0]->clone(), query_context); - args[1] = evaluateConstantExpressionAsLiteral(args_ref[1]->clone(), query_context); - } - catch (...) // NOLINT(bugprone-empty-catch) - { - } + for (auto inner_table_engine : create->targets->getInnerEngines()) + checkTableEngine(*create, *inner_table_engine, query_context); } - - ASTLiteral * arg1 = args[0]->as(); - ASTLiteral * arg2 = args[1]->as(); - if (!arg1 || !arg2 || arg1->value.getType() != Field::Types::String || arg2->value.getType() != Field::Types::String) - return; - - String maybe_path = arg1->value.get(); - String maybe_replica = arg2->value.get(); - - /// Looks like it's ReplicatedMergeTree with explicit zookeeper_path and replica_name arguments. - /// Let's ensure that some macros are used. - /// NOTE: we cannot check here that substituted values will be actually different on shards and replicas. - - Macros::MacroExpansionInfo info; - info.table_id = {getDatabaseName(), create->getTable(), create->uuid}; - info.shard = getShardName(); - info.replica = getReplicaName(); - query_context->getMacros()->expand(maybe_path, info); - bool maybe_shard_macros = info.expanded_other; - info.expanded_other = false; - query_context->getMacros()->expand(maybe_replica, info); - bool maybe_replica_macros = info.expanded_other; - bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros"); - - if (!enable_functional_tests_helper) - { - if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments) - LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments"); - else - throw Exception(ErrorCodes::INCORRECT_QUERY, - "It's not allowed to specify explicit zookeeper_path and replica_name " - "for ReplicatedMergeTree arguments in Replicated database. If you really want to " - "specify them explicitly, enable setting " - "database_replicated_allow_replicated_engine_arguments."); - } - - if (maybe_shard_macros && maybe_replica_macros) - return; - - if (enable_functional_tests_helper && !has_expression_argument) - { - if (maybe_path.empty() || maybe_path.back() != '/') - maybe_path += '/'; - args_ref[0]->as()->value = maybe_path + "auto_{shard}"; - args_ref[1]->as()->value = maybe_replica + "auto_{replica}"; - return; - } - - throw Exception(ErrorCodes::INCORRECT_QUERY, - "Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. " - "If you really want to specify it explicitly, then you should use some macros " - "to distinguish different shards and replicas"); } } @@ -827,6 +760,85 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_ } } +void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStorage & storage, ContextPtr query_context) const +{ + bool replicated_table = storage.engine && + (startsWith(storage.engine->name, "Replicated") || startsWith(storage.engine->name, "Shared")); + if (!replicated_table || !storage.engine->arguments) + return; + + ASTs & args_ref = storage.engine->arguments->children; + ASTs args = args_ref; + if (args.size() < 2) + return; + + /// It can be a constant expression. Try to evaluate it, ignore exception if we cannot. + bool has_expression_argument = args_ref[0]->as() || args_ref[1]->as(); + if (has_expression_argument) + { + try + { + args[0] = evaluateConstantExpressionAsLiteral(args_ref[0]->clone(), query_context); + args[1] = evaluateConstantExpressionAsLiteral(args_ref[1]->clone(), query_context); + } + catch (...) // NOLINT(bugprone-empty-catch) + { + } + } + + ASTLiteral * arg1 = args[0]->as(); + ASTLiteral * arg2 = args[1]->as(); + if (!arg1 || !arg2 || arg1->value.getType() != Field::Types::String || arg2->value.getType() != Field::Types::String) + return; + + String maybe_path = arg1->value.get(); + String maybe_replica = arg2->value.get(); + + /// Looks like it's ReplicatedMergeTree with explicit zookeeper_path and replica_name arguments. + /// Let's ensure that some macros are used. + /// NOTE: we cannot check here that substituted values will be actually different on shards and replicas. + + Macros::MacroExpansionInfo info; + info.table_id = {getDatabaseName(), query.getTable(), query.uuid}; + info.shard = getShardName(); + info.replica = getReplicaName(); + query_context->getMacros()->expand(maybe_path, info); + bool maybe_shard_macros = info.expanded_other; + info.expanded_other = false; + query_context->getMacros()->expand(maybe_replica, info); + bool maybe_replica_macros = info.expanded_other; + bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros"); + + if (!enable_functional_tests_helper) + { + if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments) + LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments"); + else + throw Exception(ErrorCodes::INCORRECT_QUERY, + "It's not allowed to specify explicit zookeeper_path and replica_name " + "for ReplicatedMergeTree arguments in Replicated database. If you really want to " + "specify them explicitly, enable setting " + "database_replicated_allow_replicated_engine_arguments."); + } + + if (maybe_shard_macros && maybe_replica_macros) + return; + + if (enable_functional_tests_helper && !has_expression_argument) + { + if (maybe_path.empty() || maybe_path.back() != '/') + maybe_path += '/'; + args_ref[0]->as()->value = maybe_path + "auto_{shard}"; + args_ref[1]->as()->value = maybe_replica + "auto_{replica}"; + return; + } + + throw Exception(ErrorCodes::INCORRECT_QUERY, + "Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. " + "If you really want to specify it explicitly, then you should use some macros " + "to distinguish different shards and replicas"); +} + BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, ContextPtr query_context, QueryFlags flags) { waitDatabaseStarted(); @@ -1312,11 +1324,9 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node if (create.uuid == UUIDHelpers::Nil || create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER || create.database) throw Exception(ErrorCodes::LOGICAL_ERROR, "Got unexpected query from {}: {}", node_name, query); - bool is_materialized_view_with_inner_table = create.is_materialized_view && create.to_table_id.empty(); - create.setDatabase(getDatabaseName()); create.setTable(unescapeForFileName(node_name)); - create.attach = is_materialized_view_with_inner_table; + create.attach = create.is_materialized_view_with_inner_table(); return ast; } diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index eab5b2ff931..8c3fa7c87f6 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -107,6 +107,7 @@ private: void fillClusterAuthInfo(String collection_name, const Poco::Util::AbstractConfiguration & config); void checkQueryValid(const ASTPtr & query, ContextPtr query_context) const; + void checkTableEngine(const ASTCreateQuery & query, ASTStorage & storage, ContextPtr query_context) const; void recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 & max_log_ptr); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 84d7f0a587c..45e2881ae5c 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -950,7 +950,7 @@ namespace throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with Replicated, Shared or KeeperMap table engines"); } - void setDefaultTableEngine(ASTStorage &storage, DefaultTableEngine engine) + void setDefaultTableEngine(ASTStorage & storage, DefaultTableEngine engine) { if (engine == DefaultTableEngine::None) throw Exception(ErrorCodes::ENGINE_REQUIRED, "Table engine is not specified in CREATE query"); @@ -970,9 +970,6 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const if (create.is_dictionary || create.is_ordinary_view || create.is_live_view || create.is_window_view) return; - if (create.is_materialized_view && create.to_table_id) - return; - if (create.temporary) { /// Some part of storage definition is specified, but ENGINE is not: just set the one from default_temporary_table_engine setting. @@ -987,22 +984,44 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const } if (!create.storage->engine) - { setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_temporary_table_engine.value); - } checkTemporaryTableEngineName(create.storage->engine->name); return; } + if (create.is_materialized_view) + { + /// A materialized view with an external target doesn't need a table engine. + if (create.is_materialized_view_with_external_target()) + return; + + if (auto to_engine = create.getTargetInnerEngine(ViewTarget::To)) + { + /// This materialized view already has a storage definition. + if (!to_engine->engine) + { + /// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one. + setDefaultTableEngine(*to_engine, getContext()->getSettingsRef().default_table_engine.value); + } + return; + } + } + if (create.storage) { - /// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one. + /// This table already has a storage definition. if (!create.storage->engine) + { + /// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one. setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); + } return; } + /// We'll try to extract a storage definition from clause `AS`: + /// CREATE TABLE table_name AS other_table_name + std::shared_ptr storage_def; if (!create.as_table.empty()) { /// NOTE Getting the structure from the table specified in the AS is done not atomically with the creation of the table. @@ -1018,12 +1037,14 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const if (as_create.is_ordinary_view) throw Exception(ErrorCodes::INCORRECT_QUERY, "Cannot CREATE a table AS {}, it is a View", qualified_name); - if (as_create.is_materialized_view && as_create.to_table_id) + if (as_create.is_materialized_view_with_external_target()) + { throw Exception( ErrorCodes::INCORRECT_QUERY, - "Cannot CREATE a table AS {}, it is a Materialized View without storage. Use \"AS `{}`\" instead", + "Cannot CREATE a table AS {}, it is a Materialized View without storage. Use \"AS {}\" instead", qualified_name, - as_create.to_table_id.getQualifiedName()); + as_create.getTargetTableID(ViewTarget::To).getFullTableName()); + } if (as_create.is_live_view) throw Exception(ErrorCodes::INCORRECT_QUERY, "Cannot CREATE a table AS {}, it is a Live View", qualified_name); @@ -1034,18 +1055,37 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const if (as_create.is_dictionary) throw Exception(ErrorCodes::INCORRECT_QUERY, "Cannot CREATE a table AS {}, it is a Dictionary", qualified_name); - if (as_create.storage) - create.set(create.storage, as_create.storage->ptr()); + if (as_create.is_materialized_view) + { + storage_def = as_create.getTargetInnerEngine(ViewTarget::To); + } else if (as_create.as_table_function) + { create.set(create.as_table_function, as_create.as_table_function->ptr()); + return; + } + else if (as_create.storage) + { + storage_def = typeid_cast>(as_create.storage->ptr()); + } else + { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot set engine, it's a bug."); - - return; + } } - create.set(create.storage, std::make_shared()); - setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); + if (!storage_def) + { + /// Set ENGINE by default. + storage_def = std::make_shared(); + setDefaultTableEngine(*storage_def, getContext()->getSettingsRef().default_table_engine.value); + } + + /// Use the found table engine to modify the create query. + if (create.is_materialized_view) + create.setTargetInnerEngine(ViewTarget::To, storage_def); + else + create.set(create.storage, storage_def); } void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const DatabasePtr & database) const @@ -1087,11 +1127,11 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data kind_upper, create.table); } - create.generateRandomUUID(); + create.generateRandomUUIDs(); } else { - bool has_uuid = create.uuid != UUIDHelpers::Nil || create.to_inner_uuid != UUIDHelpers::Nil; + bool has_uuid = (create.uuid != UUIDHelpers::Nil) || (create.targets && create.targets->hasInnerUUIDs()); if (has_uuid && !is_on_cluster && !internal) { /// We don't show the following error message either @@ -1106,8 +1146,7 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data /// The database doesn't support UUID so we'll ignore it. The UUID could be set here because of either /// a) the initiator of `ON CLUSTER` query generated it to ensure the same UUIDs are used on different hosts; or /// b) `RESTORE from backup` query generated it to ensure the same UUIDs are used on different hosts. - create.uuid = UUIDHelpers::Nil; - create.to_inner_uuid = UUIDHelpers::Nil; + create.resetUUIDs(); } } @@ -1131,6 +1170,14 @@ void checkTableCanBeAddedWithNoCyclicDependencies(const ASTCreateQuery & create, DatabaseCatalog::instance().checkTableCanBeAddedWithNoCyclicDependencies(qualified_name, ref_dependencies, loading_dependencies); } +bool isReplicated(const ASTStorage & storage) +{ + if (!storage.engine) + return false; + const auto & storage_name = storage.engine->name; + return storage_name.starts_with("Replicated") || storage_name.starts_with("Shared"); +} + } BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) @@ -1247,8 +1294,9 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (!create.temporary && !create.database) create.setDatabase(current_database); - if (create.to_table_id && create.to_table_id.database_name.empty()) - create.to_table_id.database_name = current_database; + + if (create.targets) + create.targets->setCurrentDatabase(current_database); if (create.select && create.isView()) { @@ -1282,12 +1330,9 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) TableProperties properties = getTablePropertiesAndNormalizeCreateQuery(create, mode); /// Check type compatible for materialized dest table and select columns - if (create.select && create.is_materialized_view && create.to_table_id && mode <= LoadingStrictnessLevel::CREATE) + if (create.is_materialized_view_with_external_target() && create.select && mode <= LoadingStrictnessLevel::CREATE) { - if (StoragePtr to_table = DatabaseCatalog::instance().tryGetTable( - {create.to_table_id.database_name, create.to_table_id.table_name, create.to_table_id.uuid}, - getContext() - )) + if (StoragePtr to_table = DatabaseCatalog::instance().tryGetTable(create.getTargetTableID(ViewTarget::To), getContext())) { Block input_block; @@ -1333,11 +1378,17 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (!allow_heavy_create && database && database->getEngineName() == "Replicated" && (create.select || create.is_populate)) { bool is_storage_replicated = false; - if (create.storage && create.storage->engine) + + if (create.storage && isReplicated(*create.storage)) + is_storage_replicated = true; + + if (create.targets) { - const auto & storage_name = create.storage->engine->name; - if (storage_name.starts_with("Replicated") || storage_name.starts_with("Shared")) - is_storage_replicated = true; + for (auto inner_table_engine : create.targets->getInnerEngines()) + { + if (isReplicated(*inner_table_engine)) + is_storage_replicated = true; + } } const bool allow_create_select_for_replicated = (create.isView() && !create.is_populate) || create.is_create_empty || !is_storage_replicated; @@ -1791,7 +1842,7 @@ void InterpreterCreateQuery::prepareOnClusterQuery(ASTCreateQuery & create, Cont /// For CREATE query generate UUID on initiator, so it will be the same on all hosts. /// It will be ignored if database does not support UUIDs. - create.generateRandomUUID(); + create.generateRandomUUIDs(); /// For cross-replication cluster we cannot use UUID in replica path. String cluster_name_expanded = local_context->getMacros()->expand(cluster_name); @@ -1913,8 +1964,15 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const } } - if (create.to_table_id) - required_access.emplace_back(AccessType::SELECT | AccessType::INSERT, create.to_table_id.database_name, create.to_table_id.table_name); + if (create.targets) + { + for (const auto & target : create.targets->targets) + { + const auto & target_id = target.table_id; + if (target_id) + required_access.emplace_back(AccessType::SELECT | AccessType::INSERT, target_id.database_name, target_id.table_name); + } + } if (create.storage && create.storage->engine) required_access.emplace_back(AccessType::TABLE_ENGINE, create.storage->engine->name); diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index 0fca7b64d5a..16add79d226 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -94,7 +94,8 @@ QueryPipeline InterpreterShowCreateQuery::executeImpl() { auto & create = create_query->as(); create.uuid = UUIDHelpers::Nil; - create.to_inner_uuid = UUIDHelpers::Nil; + if (create.targets) + create.targets->resetInnerUUIDs(); } MutableColumnPtr column = ColumnString::create(); diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index d56a2724914..770a63c6e75 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -2,6 +2,8 @@ #include #include #include +#include +#include #include #include #include @@ -240,12 +242,12 @@ ASTPtr ASTCreateQuery::clone() const res->set(res->columns_list, columns_list->clone()); if (storage) res->set(res->storage, storage->clone()); - if (inner_storage) - res->set(res->inner_storage, inner_storage->clone()); if (select) res->set(res->select, select->clone()); if (table_overrides) res->set(res->table_overrides, table_overrides->clone()); + if (targets) + res->set(res->targets, targets->clone()); if (dictionary) { @@ -388,20 +390,18 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat refresh_strategy->formatImpl(settings, state, frame); } - if (to_table_id) + if (auto to_table_id = getTargetTableID(ViewTarget::To)) { - assert((is_materialized_view || is_window_view) && to_inner_uuid == UUIDHelpers::Nil); - settings.ostr - << (settings.hilite ? hilite_keyword : "") << " TO " << (settings.hilite ? hilite_none : "") - << (!to_table_id.database_name.empty() ? backQuoteIfNeed(to_table_id.database_name) + "." : "") - << backQuoteIfNeed(to_table_id.table_name); + settings.ostr << " " << (settings.hilite ? hilite_keyword : "") << toStringView(Keyword::TO) + << (settings.hilite ? hilite_none : "") << " " + << (!to_table_id.database_name.empty() ? backQuoteIfNeed(to_table_id.database_name) + "." : "") + << backQuoteIfNeed(to_table_id.table_name); } - if (to_inner_uuid != UUIDHelpers::Nil) + if (auto to_inner_uuid = getTargetInnerUUID(ViewTarget::To); to_inner_uuid != UUIDHelpers::Nil) { - assert(is_materialized_view && !to_table_id); - settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO INNER UUID " << (settings.hilite ? hilite_none : "") - << quoteString(toString(to_inner_uuid)); + settings.ostr << " " << (settings.hilite ? hilite_keyword : "") << toStringView(Keyword::TO_INNER_UUID) + << (settings.hilite ? hilite_none : "") << " " << quoteString(toString(to_inner_uuid)); } bool should_add_empty = is_create_empty; @@ -461,14 +461,17 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat frame.expression_list_always_start_on_new_line = false; - if (inner_storage) + if (storage) + storage->formatImpl(settings, state, frame); + + if (auto inner_storage = getTargetInnerEngine(ViewTarget::Inner)) { - settings.ostr << (settings.hilite ? hilite_keyword : "") << " INNER" << (settings.hilite ? hilite_none : ""); + settings.ostr << " " << (settings.hilite ? hilite_keyword : "") << toStringView(Keyword::INNER) << (settings.hilite ? hilite_none : ""); inner_storage->formatImpl(settings, state, frame); } - if (storage) - storage->formatImpl(settings, state, frame); + if (auto to_storage = getTargetInnerEngine(ViewTarget::To)) + to_storage->formatImpl(settings, state, frame); if (dictionary) dictionary->formatImpl(settings, state, frame); @@ -528,48 +531,50 @@ bool ASTCreateQuery::isParameterizedView() const } -ASTCreateQuery::UUIDs::UUIDs(const ASTCreateQuery & query) - : uuid(query.uuid) - , to_inner_uuid(query.to_inner_uuid) +void ASTCreateQuery::generateRandomUUIDs() { + CreateQueryUUIDs{*this, /* generate_random= */ true}.copyToQuery(*this); } -String ASTCreateQuery::UUIDs::toString() const +void ASTCreateQuery::resetUUIDs() { - WriteBufferFromOwnString out; - out << "{" << uuid << "," << to_inner_uuid << "}"; - return out.str(); + CreateQueryUUIDs{}.copyToQuery(*this); } -ASTCreateQuery::UUIDs ASTCreateQuery::UUIDs::fromString(const String & str) + +StorageID ASTCreateQuery::getTargetTableID(ViewTarget::Kind target_kind) const { - ReadBufferFromString in{str}; - ASTCreateQuery::UUIDs res; - in >> "{" >> res.uuid >> "," >> res.to_inner_uuid >> "}"; - return res; + if (targets) + return targets->getTableID(target_kind); + return StorageID::createEmpty(); } -ASTCreateQuery::UUIDs ASTCreateQuery::generateRandomUUID(bool always_generate_new_uuid) +bool ASTCreateQuery::hasTargetTableID(ViewTarget::Kind target_kind) const { - if (always_generate_new_uuid) - setUUID({}); - - if (uuid == UUIDHelpers::Nil) - uuid = UUIDHelpers::generateV4(); - - /// If destination table (to_table_id) is not specified for materialized view, - /// then MV will create inner table. We should generate UUID of inner table here. - bool need_uuid_for_inner_table = !attach && is_materialized_view && !to_table_id; - if (need_uuid_for_inner_table && (to_inner_uuid == UUIDHelpers::Nil)) - to_inner_uuid = UUIDHelpers::generateV4(); - - return UUIDs{*this}; + if (targets) + return targets->hasTableID(target_kind); + return false; } -void ASTCreateQuery::setUUID(const UUIDs & uuids) +UUID ASTCreateQuery::getTargetInnerUUID(ViewTarget::Kind target_kind) const { - uuid = uuids.uuid; - to_inner_uuid = uuids.to_inner_uuid; + if (targets) + return targets->getInnerUUID(target_kind); + return UUIDHelpers::Nil; +} + +std::shared_ptr ASTCreateQuery::getTargetInnerEngine(ViewTarget::Kind target_kind) const +{ + if (targets) + return targets->getInnerEngine(target_kind); + return nullptr; +} + +void ASTCreateQuery::setTargetInnerEngine(ViewTarget::Kind target_kind, ASTPtr storage_def) +{ + if (!targets) + set(targets, std::make_shared()); + targets->setInnerEngine(target_kind, storage_def); } } diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index 6fbf045915b..f751a09169c 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -15,6 +16,7 @@ namespace DB class ASTFunction; class ASTSetQuery; class ASTSelectWithUnionQuery; +struct CreateQueryUUIDs; class ASTStorage : public IAST @@ -101,17 +103,15 @@ public: bool has_uuid{false}; // CREATE TABLE x UUID '...' ASTColumns * columns_list = nullptr; - - StorageID to_table_id = StorageID::createEmpty(); /// For CREATE MATERIALIZED VIEW mv TO table. - UUID to_inner_uuid = UUIDHelpers::Nil; /// For materialized view with inner table - ASTStorage * inner_storage = nullptr; /// For window view with inner table ASTStorage * storage = nullptr; + ASTPtr watermark_function; ASTPtr lateness_function; String as_database; String as_table; IAST * as_table_function = nullptr; ASTSelectWithUnionQuery * select = nullptr; + ASTViewTargets * targets = nullptr; IAST * comment = nullptr; ASTPtr sql_security = nullptr; @@ -153,17 +153,25 @@ public: QueryKind getQueryKind() const override { return QueryKind::Create; } - struct UUIDs - { - UUID uuid = UUIDHelpers::Nil; - UUID to_inner_uuid = UUIDHelpers::Nil; - UUIDs() = default; - explicit UUIDs(const ASTCreateQuery & query); - String toString() const; - static UUIDs fromString(const String & str); - }; - UUIDs generateRandomUUID(bool always_generate_new_uuid = false); - void setUUID(const UUIDs & uuids); + /// Generates a random UUID for this create query if it's not specified already. + /// The function also generates random UUIDs for inner target tables if this create query implies that + /// (for example, if it's a `CREATE MATERIALIZED VIEW` query with an inner storage). + void generateRandomUUIDs(); + + /// Removes UUID from this create query. + /// The function also removes UUIDs for inner target tables from this create query (see also generateRandomUUID()). + void resetUUIDs(); + + /// Returns information about a target table. + /// If that information isn't specified in this create query (or even not allowed) then the function returns an empty value. + StorageID getTargetTableID(ViewTarget::Kind target_kind) const; + bool hasTargetTableID(ViewTarget::Kind target_kind) const; + UUID getTargetInnerUUID(ViewTarget::Kind target_kind) const; + std::shared_ptr getTargetInnerEngine(ViewTarget::Kind target_kind) const; + void setTargetInnerEngine(ViewTarget::Kind target_kind, ASTPtr storage_def); + + bool is_materialized_view_with_external_target() const { return is_materialized_view && hasTargetTableID(ViewTarget::To); } + bool is_materialized_view_with_inner_table() const { return is_materialized_view && !hasTargetTableID(ViewTarget::To); } protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; @@ -171,8 +179,8 @@ protected: void forEachPointerToChild(std::function f) override { f(reinterpret_cast(&columns_list)); - f(reinterpret_cast(&inner_storage)); f(reinterpret_cast(&storage)); + f(reinterpret_cast(&targets)); f(reinterpret_cast(&as_table_function)); f(reinterpret_cast(&select)); f(reinterpret_cast(&comment)); diff --git a/src/Parsers/ASTViewTargets.cpp b/src/Parsers/ASTViewTargets.cpp new file mode 100644 index 00000000000..38f103b6e55 --- /dev/null +++ b/src/Parsers/ASTViewTargets.cpp @@ -0,0 +1,300 @@ +#include + +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; +} + + +std::string_view toString(ViewTarget::Kind kind) +{ + switch (kind) + { + case ViewTarget::To: return "to"; + case ViewTarget::Inner: return "inner"; + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "{} doesn't support kind {}", __FUNCTION__, kind); +} + +void parseFromString(ViewTarget::Kind & out, std::string_view str) +{ + for (auto kind : magic_enum::enum_values()) + { + if (toString(kind) == str) + { + out = kind; + return; + } + } + throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}: Unexpected string {}", __FUNCTION__, str); +} + + +std::vector ASTViewTargets::getKinds() const +{ + std::vector kinds; + kinds.reserve(targets.size()); + for (auto & target : targets) + kinds.push_back(target.kind); + return kinds; +} + + +void ASTViewTargets::setTableID(ViewTarget::Kind kind, const StorageID & table_id_) +{ + for (auto & target : targets) + { + if (target.kind == kind) + { + target.table_id = table_id_; + return; + } + } + if (table_id_) + targets.emplace_back(kind).table_id = table_id_; +} + +StorageID ASTViewTargets::getTableID(ViewTarget::Kind kind) const +{ + if (const auto * target = tryGetTarget(kind)) + return target->table_id; + return StorageID::createEmpty(); +} + +bool ASTViewTargets::hasTableID(ViewTarget::Kind kind) const +{ + if (const auto * target = tryGetTarget(kind)) + return !target->table_id.empty(); + return false; +} + +void ASTViewTargets::setCurrentDatabase(const String & current_database) +{ + for (auto & target : targets) + { + auto & table_id = target.table_id; + if (!table_id.table_name.empty() && table_id.database_name.empty()) + table_id.database_name = current_database; + } +} + +void ASTViewTargets::setInnerUUID(ViewTarget::Kind kind, const UUID & inner_uuid_) +{ + for (auto & target : targets) + { + if (target.kind == kind) + { + target.inner_uuid = inner_uuid_; + return; + } + } + if (inner_uuid_ != UUIDHelpers::Nil) + targets.emplace_back(kind).inner_uuid = inner_uuid_; +} + +UUID ASTViewTargets::getInnerUUID(ViewTarget::Kind kind) const +{ + if (const auto * target = tryGetTarget(kind)) + return target->inner_uuid; + return UUIDHelpers::Nil; +} + +bool ASTViewTargets::hasInnerUUID(ViewTarget::Kind kind) const +{ + return getInnerUUID(kind) != UUIDHelpers::Nil; +} + +void ASTViewTargets::resetInnerUUIDs() +{ + for (auto & target : targets) + target.inner_uuid = UUIDHelpers::Nil; +} + +bool ASTViewTargets::hasInnerUUIDs() const +{ + for (auto & target : targets) + { + if (target.inner_uuid != UUIDHelpers::Nil) + return true; + } + return false; +} + +void ASTViewTargets::setInnerEngine(ViewTarget::Kind kind, ASTPtr storage_def) +{ + auto new_inner_engine = typeid_cast>(storage_def); + if (!new_inner_engine && storage_def) + throw Exception(DB::ErrorCodes::LOGICAL_ERROR, "Bad cast from type {} to ASTStorage", storage_def->getID()); + + for (auto & target : targets) + { + if (target.kind == kind) + { + if (target.inner_engine == new_inner_engine) + return; + if (new_inner_engine) + children.push_back(new_inner_engine); + if (target.inner_engine) + std::erase(children, target.inner_engine); + target.inner_engine = new_inner_engine; + return; + } + } + + if (new_inner_engine) + { + targets.emplace_back(kind).inner_engine = new_inner_engine; + children.push_back(new_inner_engine); + } +} + +std::shared_ptr ASTViewTargets::getInnerEngine(ViewTarget::Kind kind) const +{ + if (const auto * target = tryGetTarget(kind)) + return target->inner_engine; + return nullptr; +} + +std::vector> ASTViewTargets::getInnerEngines() const +{ + std::vector> res; + res.reserve(targets.size()); + for (const auto & target : targets) + { + if (target.inner_engine) + res.push_back(target.inner_engine); + } + return res; +} + +const ViewTarget * ASTViewTargets::tryGetTarget(ViewTarget::Kind kind) const +{ + for (const auto & target : targets) + { + if (target.kind == kind) + return ⌖ + } + return nullptr; +} + +ASTPtr ASTViewTargets::clone() const +{ + auto res = std::make_shared(*this); + res->children.clear(); + for (auto & target : res->targets) + { + if (target.inner_engine) + { + target.inner_engine = typeid_cast>(target.inner_engine->clone()); + res->children.push_back(target.inner_engine); + } + } + return res; +} + +void ASTViewTargets::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const +{ + for (const auto & target : targets) + formatTarget(target, s, state, frame); +} + +void ASTViewTargets::formatTarget(ViewTarget::Kind kind, const FormatSettings & s, FormatState & state, FormatStateStacked frame) const +{ + for (const auto & target : targets) + { + if (target.kind == kind) + formatTarget(target, s, state, frame); + } +} + +void ASTViewTargets::formatTarget(const ViewTarget & target, const FormatSettings & s, FormatState & state, FormatStateStacked frame) +{ + if (target.table_id) + { + auto keyword = getKeywordForTableID(target.kind); + if (!keyword) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No keyword for table name of kind {}", toString(target.kind)); + s.ostr << " " << (s.hilite ? hilite_keyword : "") << toStringView(*keyword) + << (s.hilite ? hilite_none : "") << " " + << (!target.table_id.database_name.empty() ? backQuoteIfNeed(target.table_id.database_name) + "." : "") + << backQuoteIfNeed(target.table_id.table_name); + } + + if (target.inner_uuid != UUIDHelpers::Nil) + { + auto keyword = getKeywordForInnerUUID(target.kind); + if (!keyword) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No prefix keyword for inner UUID of kind {}", toString(target.kind)); + s.ostr << " " << (s.hilite ? hilite_keyword : "") << toStringView(*keyword) + << (s.hilite ? hilite_none : "") << " " << quoteString(toString(target.inner_uuid)); + } + + if (target.inner_engine) + { + auto keyword = getKeywordForInnerStorage(target.kind); + if (!keyword) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No prefix keyword for table engine of kind {}", toString(target.kind)); + s.ostr << " " << (s.hilite ? hilite_keyword : "") << toStringView(*keyword) << (s.hilite ? hilite_none : ""); + target.inner_engine->formatImpl(s, state, frame); + } +} + +std::optional ASTViewTargets::getKeywordForTableID(ViewTarget::Kind kind) +{ + switch (kind) + { + case ViewTarget::To: return Keyword::TO; /// TO mydb.mydata + case ViewTarget::Inner: return std::nullopt; + } + UNREACHABLE(); +} + +std::optional ASTViewTargets::getKeywordForInnerStorage(ViewTarget::Kind kind) +{ + switch (kind) + { + case ViewTarget::To: return std::nullopt; /// ENGINE = MergeTree() + case ViewTarget::Inner: return Keyword::INNER; /// INNER ENGINE = MergeTree() + } + UNREACHABLE(); +} + +std::optional ASTViewTargets::getKeywordForInnerUUID(ViewTarget::Kind kind) +{ + switch (kind) + { + case ViewTarget::To: return Keyword::TO_INNER_UUID; /// TO INNER UUID 'XXX' + case ViewTarget::Inner: return std::nullopt; + } + UNREACHABLE(); +} + +void ASTViewTargets::forEachPointerToChild(std::function f) +{ + for (auto & target : targets) + { + if (target.inner_engine) + { + ASTStorage * new_inner_engine = target.inner_engine.get(); + f(reinterpret_cast(&new_inner_engine)); + if (new_inner_engine != target.inner_engine.get()) + { + if (new_inner_engine) + target.inner_engine = typeid_cast>(new_inner_engine->ptr()); + else + target.inner_engine.reset(); + } + } + } +} + +} diff --git a/src/Parsers/ASTViewTargets.h b/src/Parsers/ASTViewTargets.h new file mode 100644 index 00000000000..33a7bc5fcb1 --- /dev/null +++ b/src/Parsers/ASTViewTargets.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include + + +namespace DB +{ +class ASTStorage; +enum class Keyword : size_t; + +/// Information about the target table for a materialized view or a window view. +struct ViewTarget +{ + enum Kind + { + /// Target table for a materialized view or a window view. + To, + + /// Table with intermediate results for a window view. + Inner, + }; + + Kind kind = To; + + /// StorageID of the target table, if it's not inner. + /// That storage ID can be seen for example after "TO" in a statement like CREATE MATERIALIZED VIEW ... TO ... + StorageID table_id = StorageID::createEmpty(); + + /// UUID of the target table, if it's inner. + /// The UUID is calculated automatically and can be seen for example after "TO INNER UUID" in a statement like + /// CREATE MATERIALIZED VIEW ... TO INNER UUID ... + UUID inner_uuid = UUIDHelpers::Nil; + + /// Table engine of the target table, if it's inner. + /// That engine can be seen for example after "ENGINE" in a statement like CREATE MATERIALIZED VIEW ... ENGINE ... + std::shared_ptr inner_engine; +}; + +/// Converts ViewTarget::Kind to a string. +std::string_view toString(ViewTarget::Kind kind); +void parseFromString(ViewTarget::Kind & out, std::string_view str); + + +/// Information about all the target tables for a view. +class ASTViewTargets : public IAST +{ +public: + std::vector targets; + + /// Sets the StorageID of the target table, if it's not inner. + /// That storage ID can be seen for example after "TO" in a statement like CREATE MATERIALIZED VIEW ... TO ... + void setTableID(ViewTarget::Kind kind, const StorageID & table_id_); + StorageID getTableID(ViewTarget::Kind kind) const; + bool hasTableID(ViewTarget::Kind kind) const; + + /// Replaces an empty database in the StorageID of the target table with a specified database. + void setCurrentDatabase(const String & current_database); + + /// Sets the UUID of the target table, if it's inner. + /// The UUID is calculated automatically and can be seen for example after "TO INNER UUID" in a statement like + /// CREATE MATERIALIZED VIEW ... TO INNER UUID ... + void setInnerUUID(ViewTarget::Kind kind, const UUID & inner_uuid_); + UUID getInnerUUID(ViewTarget::Kind kind) const; + bool hasInnerUUID(ViewTarget::Kind kind) const; + + void resetInnerUUIDs(); + bool hasInnerUUIDs() const; + + /// Sets the table engine of the target table, if it's inner. + /// That engine can be seen for example after "ENGINE" in a statement like CREATE MATERIALIZED VIEW ... ENGINE ... + void setInnerEngine(ViewTarget::Kind kind, ASTPtr storage_def); + std::shared_ptr getInnerEngine(ViewTarget::Kind kind) const; + std::vector> getInnerEngines() const; + + /// Returns a list of all kinds of views in this ASTViewTargets. + std::vector getKinds() const; + + /// Returns information about a target table. + /// The function returns null if such target doesn't exist. + const ViewTarget * tryGetTarget(ViewTarget::Kind kind) const; + + String getID(char) const override { return "ViewTargets"; } + + ASTPtr clone() const override; + + void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override; + + /// Formats information only about a specific target table. + void formatTarget(ViewTarget::Kind kind, const FormatSettings & s, FormatState & state, FormatStateStacked frame) const; + static void formatTarget(const ViewTarget & target, const FormatSettings & s, FormatState & state, FormatStateStacked frame); + + /// Helper functions for class ParserViewTargets. Returns a prefix keyword matching a specified target kind. + static std::optional getKeywordForTableID(ViewTarget::Kind kind); + static std::optional getKeywordForInnerUUID(ViewTarget::Kind kind); + static std::optional getKeywordForInnerStorage(ViewTarget::Kind kind); + +protected: + void forEachPointerToChild(std::function f) override; +}; + +} diff --git a/src/Parsers/CreateQueryUUIDs.cpp b/src/Parsers/CreateQueryUUIDs.cpp new file mode 100644 index 00000000000..4dfee67b537 --- /dev/null +++ b/src/Parsers/CreateQueryUUIDs.cpp @@ -0,0 +1,168 @@ +#include + +#include +#include +#include +#include + + +namespace DB +{ + +CreateQueryUUIDs::CreateQueryUUIDs(const ASTCreateQuery & query, bool generate_random, bool force_random) +{ + if (!generate_random || !force_random) + { + uuid = query.uuid; + if (query.targets) + { + for (const auto & target : query.targets->targets) + setTargetInnerUUID(target.kind, target.inner_uuid); + } + } + + if (generate_random) + { + if (uuid == UUIDHelpers::Nil) + uuid = UUIDHelpers::generateV4(); + + /// For an ATTACH query we should never generate UUIDs for its inner target tables + /// because for an ATTACH query those inner target tables probably already exist and can be accessible by names. + /// If we generate random UUIDs for already existing tables then those UUIDs will not be correct making those inner target table inaccessible. + /// Thus it's not safe for example to replace + /// "ATTACH MATERIALIZED VIEW mv AS SELECT a FROM b" with + /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "XXXX" AS SELECT a FROM b" + /// This replacement is safe only for CREATE queries when inner target tables don't exist yet. + if (!query.attach) + { + auto generate_target_uuid = [&](ViewTarget::Kind target_kind) + { + if ((query.getTargetInnerUUID(target_kind) == UUIDHelpers::Nil) && query.getTargetTableID(target_kind).empty()) + setTargetInnerUUID(target_kind, UUIDHelpers::generateV4()); + }; + + /// If destination table (to_table_id) is not specified for materialized view, + /// then MV will create inner table. We should generate UUID of inner table here. + if (query.is_materialized_view) + generate_target_uuid(ViewTarget::To); + } + } +} + +bool CreateQueryUUIDs::empty() const +{ + if (uuid != UUIDHelpers::Nil) + return false; + for (const auto & [_, inner_uuid] : targets_inner_uuids) + { + if (inner_uuid != UUIDHelpers::Nil) + return false; + } + return true; +} + +String CreateQueryUUIDs::toString() const +{ + WriteBufferFromOwnString out; + out << "{"; + bool need_comma = false; + auto add_name_and_uuid_to_string = [&](std::string_view name_, const UUID & uuid_) + { + if (std::exchange(need_comma, true)) + out << ", "; + out << "\"" << name_ << "\": \"" << uuid_ << "\""; + }; + if (uuid != UUIDHelpers::Nil) + add_name_and_uuid_to_string("uuid", uuid); + for (const auto & [kind, inner_uuid] : targets_inner_uuids) + { + if (inner_uuid != UUIDHelpers::Nil) + add_name_and_uuid_to_string(::DB::toString(kind), inner_uuid); + } + out << "}"; + return out.str(); +} + +CreateQueryUUIDs CreateQueryUUIDs::fromString(const String & str) +{ + ReadBufferFromString in{str}; + CreateQueryUUIDs res; + skipWhitespaceIfAny(in); + in >> "{"; + skipWhitespaceIfAny(in); + char c; + while (in.peek(c) && c != '}') + { + String name; + String value; + readDoubleQuotedString(name, in); + skipWhitespaceIfAny(in); + in >> ":"; + skipWhitespaceIfAny(in); + readDoubleQuotedString(value, in); + skipWhitespaceIfAny(in); + if (name == "uuid") + { + res.uuid = parse(value); + } + else + { + ViewTarget::Kind kind; + parseFromString(kind, name); + res.setTargetInnerUUID(kind, parse(value)); + } + if (in.peek(c) && c == ',') + { + in.ignore(1); + skipWhitespaceIfAny(in); + } + } + in >> "}"; + return res; +} + +void CreateQueryUUIDs::setTargetInnerUUID(ViewTarget::Kind kind, const UUID & new_inner_uuid) +{ + for (auto & pair : targets_inner_uuids) + { + if (pair.first == kind) + { + pair.second = new_inner_uuid; + return; + } + } + if (new_inner_uuid != UUIDHelpers::Nil) + targets_inner_uuids.emplace_back(kind, new_inner_uuid); +} + +UUID CreateQueryUUIDs::getTargetInnerUUID(ViewTarget::Kind kind) const +{ + for (const auto & pair : targets_inner_uuids) + { + if (pair.first == kind) + return pair.second; + } + return UUIDHelpers::Nil; +} + +void CreateQueryUUIDs::copyToQuery(ASTCreateQuery & query) const +{ + query.uuid = uuid; + + if (query.targets) + query.targets->resetInnerUUIDs(); + + if (!targets_inner_uuids.empty()) + { + if (!query.targets) + query.set(query.targets, std::make_shared()); + + for (const auto & [kind, inner_uuid] : targets_inner_uuids) + { + if (inner_uuid != UUIDHelpers::Nil) + query.targets->setInnerUUID(kind, inner_uuid); + } + } +} + +} diff --git a/src/Parsers/CreateQueryUUIDs.h b/src/Parsers/CreateQueryUUIDs.h new file mode 100644 index 00000000000..419dad24b35 --- /dev/null +++ b/src/Parsers/CreateQueryUUIDs.h @@ -0,0 +1,40 @@ +#pragma once + +#include + + +namespace DB +{ +class ASTCreateQuery; + +/// The UUID of a table or a database defined with a CREATE QUERY along with the UUIDs of its inner targets. +struct CreateQueryUUIDs +{ + CreateQueryUUIDs() = default; + + /// Collect UUIDs from ASTCreateQuery. + /// Parameters: + /// `generate_random` - if it's true then unspecified in the query UUIDs will be generated randomly; + /// `force_random` - if it's true then all UUIDs (even specified in the query) will be (re)generated randomly. + explicit CreateQueryUUIDs(const ASTCreateQuery & query, bool generate_random = false, bool force_random = false); + + bool empty() const; + explicit operator bool() const { return !empty(); } + + String toString() const; + static CreateQueryUUIDs fromString(const String & str); + + void setTargetInnerUUID(ViewTarget::Kind kind, const UUID & new_inner_uuid); + UUID getTargetInnerUUID(ViewTarget::Kind kind) const; + + /// Copies UUIDs to ASTCreateQuery. + void copyToQuery(ASTCreateQuery & query) const; + + /// UUID of the table. + UUID uuid = UUIDHelpers::Nil; + + /// UUIDs of its target table (or tables). + std::vector> targets_inner_uuids; +}; + +} diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 014dc7bd3bf..41379a845e7 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -693,7 +694,8 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ASTPtr table; ASTPtr columns_list; - ASTPtr storage; + std::shared_ptr storage; + ASTPtr targets; ASTPtr as_database; ASTPtr as_table; ASTPtr as_table_function; @@ -773,6 +775,17 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return true; } + auto parse_storage = [&] + { + chassert(!storage); + ASTPtr ast; + if (!storage_p.parse(pos, ast, expected)) + return false; + + storage = typeid_cast>(ast); + return true; + }; + auto need_parse_as_select = [&is_create_empty, &pos, &expected]() { if (ParserKeyword{Keyword::EMPTY_AS}.ignore(pos, expected)) @@ -798,7 +811,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!s_rparen.ignore(pos, expected)) return false; - auto storage_parse_result = storage_p.parse(pos, storage, expected); + auto storage_parse_result = parse_storage(); if ((storage_parse_result || is_temporary) && need_parse_as_select()) { @@ -820,7 +833,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe */ else { - storage_p.parse(pos, storage, expected); + parse_storage(); /// CREATE|ATTACH TABLE ... AS ... if (need_parse_as_select()) @@ -843,7 +856,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe /// Optional - ENGINE can be specified. if (!storage) - storage_p.parse(pos, storage, expected); + parse_storage(); } } } @@ -904,6 +917,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe tryGetIdentifierNameInto(as_database, query->as_database); tryGetIdentifierNameInto(as_table, query->as_table); query->set(query->select, select); + query->set(query->targets, targets); query->is_create_empty = is_create_empty; if (from_path) @@ -977,6 +991,13 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e return false; } + std::shared_ptr targets; + if (to_table) + { + targets = std::make_shared(); + targets->setTableID(ViewTarget::To, to_table->as()->getTableId()); + } + /// Optional - a list of columns can be specified. It must fully comply with SELECT. if (s_lparen.ignore(pos, expected)) { @@ -1017,14 +1038,12 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e if (query->table) query->children.push_back(query->table); - if (to_table) - query->to_table_id = to_table->as()->getTableId(); - query->set(query->columns_list, columns_list); tryGetIdentifierNameInto(as_database, query->as_database); tryGetIdentifierNameInto(as_table, query->as_table); query->set(query->select, select); + query->set(query->targets, targets); if (comment) query->set(query->comment, comment); @@ -1139,6 +1158,18 @@ bool ParserCreateWindowViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & storage_p.parse(pos, storage, expected); } + std::shared_ptr targets; + if (to_table || storage || inner_storage) + { + targets = std::make_shared(); + if (to_table) + targets->setTableID(ViewTarget::To, to_table->as()->getTableId()); + if (storage) + targets->setInnerEngine(ViewTarget::To, storage); + if (inner_storage) + targets->setInnerEngine(ViewTarget::Inner, inner_storage); + } + // WATERMARK if (s_watermark.ignore(pos, expected)) { @@ -1195,12 +1226,8 @@ bool ParserCreateWindowViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & if (query->table) query->children.push_back(query->table); - if (to_table) - query->to_table_id = to_table->as()->getTableId(); - query->set(query->columns_list, columns_list); - query->set(query->storage, storage); - query->set(query->inner_storage, inner_storage); + query->is_watermark_strictly_ascending = is_watermark_strictly_ascending; query->is_watermark_ascending = is_watermark_ascending; query->is_watermark_bounded = is_watermark_bounded; @@ -1213,6 +1240,7 @@ bool ParserCreateWindowViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & tryGetIdentifierNameInto(as_database, query->as_database); tryGetIdentifierNameInto(as_table, query->as_table); query->set(query->select, select); + query->set(query->targets, targets); return true; } @@ -1436,6 +1464,7 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e return true; } + bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserKeyword s_create(Keyword::CREATE); @@ -1622,13 +1651,8 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (query->table) query->children.push_back(query->table); - if (to_table) - query->to_table_id = to_table->as()->getTableId(); - if (to_inner_uuid) - query->to_inner_uuid = parseFromString(to_inner_uuid->as()->value.get()); - query->set(query->columns_list, columns_list); - query->set(query->storage, storage); + if (refresh_strategy) query->set(query->refresh_strategy, refresh_strategy); if (comment) @@ -1639,29 +1663,41 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (query->columns_list && query->columns_list->primary_key) { /// If engine is not set will use default one - if (!query->storage) - query->set(query->storage, std::make_shared()); - else if (query->storage->primary_key) + if (!storage) + storage = std::make_shared(); + auto & storage_ref = typeid_cast(*storage); + if (storage_ref.primary_key) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple primary keys are not allowed."); - - query->storage->primary_key = query->columns_list->primary_key; - + storage_ref.primary_key = query->columns_list->primary_key; } if (query->columns_list && (query->columns_list->primary_key_from_columns)) { /// If engine is not set will use default one - if (!query->storage) - query->set(query->storage, std::make_shared()); - else if (query->storage->primary_key) + if (!storage) + storage = std::make_shared(); + auto & storage_ref = typeid_cast(*storage); + if (storage_ref.primary_key) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple primary keys are not allowed."); + storage_ref.primary_key = query->columns_list->primary_key_from_columns; + } - query->storage->primary_key = query->columns_list->primary_key_from_columns; + std::shared_ptr targets; + if (to_table || to_inner_uuid || storage) + { + targets = std::make_shared(); + if (to_table) + targets->setTableID(ViewTarget::To, to_table->as()->getTableId()); + if (to_inner_uuid) + targets->setInnerUUID(ViewTarget::To, parseFromString(to_inner_uuid->as()->value.safeGet())); + if (storage) + targets->setInnerEngine(ViewTarget::To, storage); } tryGetIdentifierNameInto(as_database, query->as_database); tryGetIdentifierNameInto(as_table, query->as_table); query->set(query->select, select); + query->set(query->targets, targets); return true; } diff --git a/src/Parsers/ParserViewTargets.cpp b/src/Parsers/ParserViewTargets.cpp new file mode 100644 index 00000000000..8f010882cdd --- /dev/null +++ b/src/Parsers/ParserViewTargets.cpp @@ -0,0 +1,88 @@ +#include + +#include +#include +#include +#include +#include + + +namespace DB +{ + +ParserViewTargets::ParserViewTargets() +{ + for (auto kind : magic_enum::enum_values()) + accept_kinds.push_back(kind); +} + +bool ParserViewTargets::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + ParserStringLiteral literal_p; + ParserStorage storage_p{ParserStorage::TABLE_ENGINE}; + ParserCompoundIdentifier table_name_p(/*table_name_with_optional_uuid*/ true, /*allow_query_parameter*/ true); + + std::shared_ptr res; + + auto result = [&] -> ASTViewTargets & + { + if (!res) + res = std::make_shared(); + return *res; + }; + + for (;;) + { + auto start = pos; + for (auto kind : accept_kinds) + { + auto current = pos; + + auto keyword = ASTViewTargets::getKeywordForInnerUUID(kind); + if (keyword && ParserKeyword{*keyword}.ignore(pos, expected)) + { + ASTPtr ast; + if (literal_p.parse(pos, ast, expected)) + { + result().setInnerUUID(kind, parseFromString(ast->as()->value.safeGet())); + break; + } + } + pos = current; + + keyword = ASTViewTargets::getKeywordForInnerStorage(kind); + if (keyword && ParserKeyword{*keyword}.ignore(pos, expected)) + { + ASTPtr ast; + if (storage_p.parse(pos, ast, expected)) + { + result().setInnerEngine(kind, ast); + break; + } + } + pos = current; + + keyword = ASTViewTargets::getKeywordForTableID(kind); + if (keyword && ParserKeyword{*keyword}.ignore(pos, expected)) + { + ASTPtr ast; + if (table_name_p.parse(pos, ast, expected)) + { + result().setTableID(kind, ast->as()->getTableId()); + break; + } + } + pos = current; + } + if (pos == start) + break; + } + + if (!res || res->targets.empty()) + return false; + + node = res; + return true; +} + +} diff --git a/src/Parsers/ParserViewTargets.h b/src/Parsers/ParserViewTargets.h new file mode 100644 index 00000000000..f5d1850e974 --- /dev/null +++ b/src/Parsers/ParserViewTargets.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/// Parses information about target views of a table. +class ParserViewTargets : public IParserBase +{ +public: + ParserViewTargets(); + explicit ParserViewTargets(const std::vector & accept_kinds_) : accept_kinds(accept_kinds_) { } + +protected: + const char * getName() const override { return "ViewTargets"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + + std::vector accept_kinds; +}; + +} diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 57d95a98f11..b603d0ecf87 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -93,11 +93,6 @@ StorageMaterializedView::StorageMaterializedView( { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); - auto * storage_def = query.storage; - if (storage_def && storage_def->primary_key) - storage_metadata.primary_key = KeyDescription::getKeyFromAST(storage_def->primary_key->ptr(), - storage_metadata.columns, - local_context->getGlobalContext()); if (query.sql_security) storage_metadata.setSQLSecurity(query.sql_security->as()); @@ -110,12 +105,21 @@ StorageMaterializedView::StorageMaterializedView( throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName()); /// If the destination table is not set, use inner table - has_inner_table = query.to_table_id.empty(); - if (has_inner_table && !query.storage) + auto to_table_id = query.getTargetTableID(ViewTarget::To); + has_inner_table = to_table_id.empty(); + auto to_inner_uuid = query.getTargetInnerUUID(ViewTarget::To); + auto to_table_engine = query.getTargetInnerEngine(ViewTarget::To); + + if (has_inner_table && !to_table_engine) throw Exception(ErrorCodes::INCORRECT_QUERY, "You must specify where to save results of a MaterializedView query: " "either ENGINE or an existing table in a TO clause"); + if (to_table_engine && to_table_engine->primary_key) + storage_metadata.primary_key = KeyDescription::getKeyFromAST(to_table_engine->primary_key->ptr(), + storage_metadata.columns, + local_context->getGlobalContext()); + auto select = SelectQueryDescription::getSelectQueryFromASTForMatView(query.select->clone(), query.refresh_strategy != nullptr, local_context); if (select.select_table_id) { @@ -135,25 +139,25 @@ StorageMaterializedView::StorageMaterializedView( setInMemoryMetadata(storage_metadata); - bool point_to_itself_by_uuid = has_inner_table && query.to_inner_uuid != UUIDHelpers::Nil - && query.to_inner_uuid == table_id_.uuid; - bool point_to_itself_by_name = !has_inner_table && query.to_table_id.database_name == table_id_.database_name - && query.to_table_id.table_name == table_id_.table_name; + bool point_to_itself_by_uuid = has_inner_table && to_inner_uuid != UUIDHelpers::Nil + && to_inner_uuid == table_id_.uuid; + bool point_to_itself_by_name = !has_inner_table && to_table_id.database_name == table_id_.database_name + && to_table_id.table_name == table_id_.table_name; if (point_to_itself_by_uuid || point_to_itself_by_name) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Materialized view {} cannot point to itself", table_id_.getFullTableName()); if (!has_inner_table) { - target_table_id = query.to_table_id; + target_table_id = to_table_id; } else if (LoadingStrictnessLevel::ATTACH <= mode) { /// If there is an ATTACH request, then the internal table must already be created. - target_table_id = StorageID(getStorageID().database_name, generateInnerTableName(getStorageID()), query.to_inner_uuid); + target_table_id = StorageID(getStorageID().database_name, generateInnerTableName(getStorageID()), to_inner_uuid); } else { - const String & engine = query.storage->engine->name; + const String & engine = to_table_engine->engine->name; const auto & storage_features = StorageFactory::instance().getStorageFeatures(engine); /// We will create a query to create an internal table. @@ -161,8 +165,8 @@ StorageMaterializedView::StorageMaterializedView( auto manual_create_query = std::make_shared(); manual_create_query->setDatabase(getStorageID().database_name); manual_create_query->setTable(generateInnerTableName(getStorageID())); - manual_create_query->uuid = query.to_inner_uuid; - manual_create_query->has_uuid = query.to_inner_uuid != UUIDHelpers::Nil; + manual_create_query->uuid = to_inner_uuid; + manual_create_query->has_uuid = to_inner_uuid != UUIDHelpers::Nil; auto new_columns_list = std::make_shared(); new_columns_list->set(new_columns_list->columns, query.columns_list->columns->ptr()); @@ -184,7 +188,9 @@ StorageMaterializedView::StorageMaterializedView( } manual_create_query->set(manual_create_query->columns_list, new_columns_list); - manual_create_query->set(manual_create_query->storage, query.storage->ptr()); + + if (to_table_engine) + manual_create_query->set(manual_create_query->storage, to_table_engine); InterpreterCreateQuery create_interpreter(manual_create_query, create_context); create_interpreter.setInternal(true); diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 783b899c978..14af3bad700 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -470,7 +470,8 @@ protected: if (ast_create && !context->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) { ast_create->uuid = UUIDHelpers::Nil; - ast_create->to_inner_uuid = UUIDHelpers::Nil; + if (ast_create->targets) + ast_create->targets->resetInnerUUIDs(); } if (columns_mask[src_index++]) diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index b842cdda022..2b1d39fd3b6 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1202,8 +1202,11 @@ StorageWindowView::StorageWindowView( setInMemoryMetadata(storage_metadata); /// If the target table is not set, use inner target table - has_inner_target_table = query.to_table_id.empty(); - if (has_inner_target_table && !query.storage) + auto to_table_id = query.getTargetTableID(ViewTarget::To); + has_inner_target_table = to_table_id.empty(); + auto to_table_engine = query.getTargetInnerEngine(ViewTarget::To); + + if (has_inner_target_table && !to_table_engine) throw Exception(ErrorCodes::INCORRECT_QUERY, "You must specify where to save results of a WindowView query: " "either ENGINE or an existing table in a TO clause"); @@ -1218,12 +1221,12 @@ StorageWindowView::StorageWindowView( auto inner_query = initInnerQuery(query.select->list_of_selects->children.at(0)->as(), context_); - if (query.inner_storage) - inner_table_engine = query.inner_storage->clone(); + if (auto inner_storage = query.getTargetInnerEngine(ViewTarget::Inner)) + inner_table_engine = inner_storage->clone(); inner_table_id = StorageID(getStorageID().database_name, generateInnerTableName(getStorageID())); inner_fetch_query = generateInnerFetchQuery(inner_table_id); - target_table_id = has_inner_target_table ? StorageID(table_id_.database_name, generateTargetTableName(table_id_)) : query.to_table_id; + target_table_id = has_inner_target_table ? StorageID(table_id_.database_name, generateTargetTableName(table_id_)) : to_table_id; if (is_proctime) next_fire_signal = getWindowUpperBound(now()); @@ -1248,7 +1251,7 @@ StorageWindowView::StorageWindowView( new_columns_list->set(new_columns_list->columns, query.columns_list->columns->ptr()); target_create_query->set(target_create_query->columns_list, new_columns_list); - target_create_query->set(target_create_query->storage, query.storage->ptr()); + target_create_query->set(target_create_query->storage, to_table_engine); InterpreterCreateQuery create_interpreter_(target_create_query, create_context_); create_interpreter_.setInternal(true); From 0df6448e0f4b4b6d2acbb38466fbd34d979b4d90 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 16 Jul 2024 14:38:02 +0200 Subject: [PATCH 0422/2361] Update base/poco/Foundation/include/Poco/ErrorHandler.h Co-authored-by: Sergei Trifonov --- base/poco/Foundation/include/Poco/ErrorHandler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/poco/Foundation/include/Poco/ErrorHandler.h b/base/poco/Foundation/include/Poco/ErrorHandler.h index 961fec2bc3b..f774f2ccf5e 100644 --- a/base/poco/Foundation/include/Poco/ErrorHandler.h +++ b/base/poco/Foundation/include/Poco/ErrorHandler.h @@ -93,7 +93,7 @@ public: /// Invokes the currently registered ErrorHandler. static void logMessage(Message::Priority priority, const std::string & msg); - /// Invokes the currently registered ErrorHandler. + /// Invokes the currently registered ErrorHandler to log a message. static ErrorHandler * set(ErrorHandler * pHandler); /// Registers the given handler as the current error handler. From 9b6bdee5f3f80661577e0204f622dd6e41571806 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 16 Jul 2024 13:44:50 +0100 Subject: [PATCH 0423/2361] one more change after merging with master --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 8c096c13634..b78fed7e1c1 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -66,6 +66,7 @@ static std::initializer_list Date: Tue, 16 Jul 2024 13:37:59 +0000 Subject: [PATCH 0424/2361] fix --- tests/ci/libfuzzer_test_check.py | 2 +- {utils/libfuzzer => tests/fuzz}/runner.py | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename {utils/libfuzzer => tests/fuzz}/runner.py (100%) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index d9e33229932..8f19dd7d023 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -75,7 +75,7 @@ def get_run_command( f"--volume={result_path}:/test_output " "--security-opt seccomp=unconfined " # required to issue io_uring sys-calls f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image} " - "python3 ./utils/runner.py" + "python3 /usr/share/clickhouse-test/fuzz/runner.py" ) diff --git a/utils/libfuzzer/runner.py b/tests/fuzz/runner.py similarity index 100% rename from utils/libfuzzer/runner.py rename to tests/fuzz/runner.py From 04f3c29b60658938a93aa6de6f15f8c50121e53e Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Jul 2024 13:47:38 +0000 Subject: [PATCH 0425/2361] Fix crash. --- src/Columns/ColumnSparse.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Columns/ColumnSparse.cpp b/src/Columns/ColumnSparse.cpp index ea4d23c1678..0922eb5ea2d 100644 --- a/src/Columns/ColumnSparse.cpp +++ b/src/Columns/ColumnSparse.cpp @@ -695,7 +695,7 @@ void ColumnSparse::updateWeakHash32(WeakHash32 & hash) const if (getNumberOfDefaultRows() > 0) values_hash_data[0] = hash_data[getFirstDefaultValueIndex()]; - for (size_t i = 0; i < values_size; ++i) + for (size_t i = 0; i + 1 < values_size; ++i) values_hash_data[i + 1] = hash_data[offsets_data[i]]; values->updateWeakHash32(values_hash); From 8acc5d90ca295c2ff6b574da15dec70268a19015 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Jul 2024 13:47:58 +0000 Subject: [PATCH 0426/2361] Add more test cases. --- ..._finctions_and_column_sparse_bug.reference | 75 ++++++++++++++++++ ...window_finctions_and_column_sparse_bug.sql | 76 +++++++++++++++++++ 2 files changed, 151 insertions(+) diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference index f11ec57a425..356329a392d 100644 --- a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference +++ b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference @@ -6,3 +6,78 @@ false 1 2 false 1 3 true 1 1 true 1 2 +--- +-755809149 0 +--- +1 -2081147898 +1 -1981899149 +2 -2051538534 +2 -1650266905 +3 -1975508531 +3 -1646738223 +4 -1700730666 +4 -1618912877 +5 -1465484835 +5 -1317193174 +6 -1458338029 +6 -1219769753 +7 -1450619195 +7 -1154269118 +8 -1365934326 +8 -1150980622 +9 -1203382363 +9 -1098155311 +10 -1197430632 +10 -841067875 +11 -1176267855 +11 -816935497 +12 -1020892864 +12 -599948807 +13 -991301833 +13 -526570556 +14 -685902265 +14 -504713125 +15 -653505826 +15 -411038390 +16 -451392958 +16 -331834394 +17 -262516786 +17 -176934810 +18 -222873194 +18 -2 +19 -153185515 +19 6 +20 -74234560 +20 255 +21 -41 +21 406615258 +22 -6 +22 541758331 +23 -5 +23 720985423 +24 -3 +24 745669725 +25 15 +25 897064234 +26 65535 +26 1116921321 +27 77089559 +27 1207796283 +28 100663045 +28 1603772265 +29 561061873 +29 1664059402 +30 643897141 +30 1688303275 +31 914629990 +31 1913361922 +32 1159852204 +32 1929066636 +33 1258218855 +33 1968095908 +34 1459407556 +34 2054878592 +35 1936334332 +35 2125075305 +36 1962246186 +37 2030467062 diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql index a4c0200813c..6e326d0a67f 100644 --- a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql +++ b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql @@ -9,3 +9,79 @@ insert into t values (1, 0)(1, 1); SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t qualify c8=1 order by d settings max_threads=2; SELECT '---'; SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t order by d, c8 settings max_threads=2; +SELECT '---'; + +drop table t; + +create table t ( + c Int32 primary key , + s Bool , + w Float64 + ); + +system stop merges t; + +insert into t values(439499072,true,0),(1393290072,true,0); +insert into t values(-1317193174,false,0),(1929066636,false,0); +insert into t values(-2,false,0),(1962246186,true,0),(2054878592,false,0); +insert into t values(-1893563136,true,41.55); +insert into t values(-1338380855,true,-0.7),(-991301833,true,0),(-755809149,false,43.18),(-41,true,0),(3,false,0),(255,false,0),(255,false,0),(189195893,false,0),(195550885,false,9223372036854776000); + +SELECT * FROM ( +SELECT c, min(w) OVER (PARTITION BY s ORDER BY c ASC, s ASC, w ASC) +FROM t limit toUInt64(-1)) +WHERE c = -755809149; + +SELECT '---'; + +create table t_vkx4cc ( + c_ylzjpt Int32, + c_hqfr9 Bool , + ) engine = MergeTree order by c_ylzjpt; + +system stop merges t_vkx4cc; + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-2081147898, coalesce((NOT NOT(cast( (53 < 539704722) as Nullable(Bool)))), true)), (-1219769753, coalesce((true) and (false), false)), (-1981899149, coalesce(false, false)), (-1646738223, coalesce((NOT NOT(cast( (23.5 <= -26) as Nullable(Bool)))), false)); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (255, coalesce(false, false)), (-1317193174, coalesce(false, false)), (-41, coalesce(true, false)), (1929066636, coalesce(false, true)); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-1700730666, coalesce((NOT NOT(cast( (-2022515471055597472 AND -29) as Nullable(Bool)))), false)), (1664059402, coalesce((NOT NOT(cast( (-19643 >= -122) as Nullable(Bool)))), false)), (1688303275, coalesce((NOT NOT(cast( (737275892 < 105) as Nullable(Bool)))), true)), (406615258, coalesce((NOT NOT(cast( (-657730213 = 82.86) as Nullable(Bool)))), false)); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-2, coalesce(false, false)), (1962246186, coalesce(true, false)), (-991301833, coalesce(true, true)), (2054878592, coalesce(false, false)); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (643897141, coalesce((NOT NOT(cast( (-60 AND cast(null as Nullable(Int64))) as Nullable(Bool)))), true)), (-2051538534, coalesce(((-1616816511 between 332225780 and -1883087387)) or ((-573375170 between -1427445977 and 615586748)), false)), (77089559, coalesce((NOT NOT(cast( ((true) and (true) != 925456787) as Nullable(Bool)))), false)), (1116921321, coalesce((0 is NULL), true)); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-816935497, coalesce(false, false)), (1207796283, coalesce((-129 between -5 and -5), false)), (-1365934326, coalesce(true, false)), (-1618912877, coalesce((NOT NOT(cast( (false >= 31833) as Nullable(Bool)))), false)); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-331834394, coalesce((NOT NOT(cast( (-63 <= -1822810052) as Nullable(Bool)))), true)), (-1020892864, coalesce((NOT NOT(cast( (40.31 <= 8146037365746019777) as Nullable(Bool)))), true)), (-1150980622, coalesce(((94019304 between -730556489 and 32)) and ((-956354236 is not NULL)), true)), (-1203382363, coalesce(true, true)); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-653505826, coalesce((true) or (true), false)), (-1975508531, coalesce(((-796885845 between 65536 and cast(null as Nullable(Int32)))) or ((NOT NOT(cast( (-7467729336434250795 < 100.20) as Nullable(Bool))))), false)), (-1465484835, coalesce(((NOT NOT(cast( (19209 <= 75.96) as Nullable(Bool))))) or (true), false)), (1968095908, coalesce((NOT NOT(cast( (-1309960412156062327 > 13102) as Nullable(Bool)))), true)); + +alter table t_vkx4cc add column c_zosphq2t1 Float64; + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (-153185515, coalesce((NOT NOT(cast( (1291639145 >= 30.22) as Nullable(Bool)))), false), -1.8), (-411038390, coalesce(((-762326135 between 16 and 177530758)) or (false), true), 26.34), (914629990, coalesce((-1125832977 is not NULL), true), 59.2), (541758331, coalesce(false, true), -255.1); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (2125075305, coalesce(false, false), 55.36), (-1176267855, coalesce(true, true), 55.45), (1459407556, coalesce((true) and ((NOT NOT(cast( (95.96 != 65) as Nullable(Bool))))), true), 85.80), (-1098155311, coalesce(false, false), 2147483649.9); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (6, coalesce((NOT NOT(cast( (1546334968 < -4) as Nullable(Bool)))), true), 57.42), (-5, coalesce((NOT NOT(cast( (59 AND 13) as Nullable(Bool)))), false), 65536.3), (100663045, coalesce((-1190355242 is not NULL), true), 73.80), (-451392958, coalesce((NOT NOT(cast( (false != -443845933) as Nullable(Bool)))), false), -4294967294.0); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (561061873, coalesce(true, false), 12.17), (-526570556, coalesce(false, false), 64.73), (-1450619195, coalesce(true, true), 54.33), (-3, coalesce(true, true), 52.9); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (-504713125, coalesce(false, true), 27.58), (897064234, coalesce((836516994 between cast(null as Nullable(Int32)) and -1832647080), true), 9223372036854775809.2), (65535, coalesce(true, true), 4294967297.5), (-599948807, coalesce((false) or ((NOT NOT(cast( (6.52 = 65.49) as Nullable(Bool))))), false), 256.5); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (-1650266905, coalesce((NOT NOT(cast( (-83 = -218055084) as Nullable(Bool)))), true), 1.9), (-841067875, coalesce(false, true), -126.5), (15, coalesce(((NOT NOT(cast( (cast(null as Nullable(Decimal)) = cast(null as Nullable(Int32))) as Nullable(Bool))))) or (true), true), 33.65), (1913361922, coalesce((NOT NOT(cast( (false AND 0) as Nullable(Bool)))), false), 6.4); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (1159852204, coalesce((-2057115045 is not NULL), false), 20.61), (-6, coalesce(true, true), 66.33), (-1154269118, coalesce(false, true), 8.89), (1258218855, coalesce(true, false), 19.80); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (1603772265, coalesce(false, true), 57.87), (-176934810, coalesce(false, true), 128.8), (-1458338029, coalesce((NOT NOT(cast( (20908 != (NOT NOT(cast( (cast(null as Nullable(Decimal)) <= (true) or ((NOT NOT(cast( (973511022 <= -112) as Nullable(Bool)))))) as Nullable(Bool))))) as Nullable(Bool)))), true), 76.54), (-262516786, coalesce((cast(null as Nullable(Int32)) is NULL), false), 21.49); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (-1197430632, coalesce(true, false), 45.40), (-685902265, coalesce((NOT NOT(cast( (cast(null as Nullable(Decimal)) < cast(null as Nullable(Decimal))) as Nullable(Bool)))), true), 5.55), (1936334332, coalesce((-1565552735 is not NULL), false), 26.28), (2030467062, coalesce((NOT NOT(cast( (127.3 != cast(null as Nullable(Int32))) as Nullable(Bool)))), true), 89.50); + +insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (720985423, coalesce((NOT NOT(cast( (-451448940 = cast(null as Nullable(Decimal))) as Nullable(Bool)))), false), 52.65), (-222873194, coalesce(((-20 between -1419620477 and 1616455043)) or ((25624502 between 1312431316 and 1757361651)), false), 127.2), (745669725, coalesce((NOT NOT(cast( ((NOT NOT(cast( (cast(null as Nullable(UInt64)) <= 42) as Nullable(Bool)))) >= 3233811255032796928) as Nullable(Bool)))), false), 7.74), (-74234560, coalesce((NOT NOT(cast( (cast(null as Nullable(Decimal)) >= cast(null as Nullable(Decimal))) as Nullable(Bool)))), true), 19.25); + +SELECT DISTINCT + count(ref_0.c_zosphq2t1) over (partition by ref_0.c_hqfr9 order by ref_0.c_ylzjpt, ref_0.c_hqfr9, ref_0.c_zosphq2t1) as c0, + ref_0.c_ylzjpt as c1 +FROM + t_vkx4cc as ref_0 + order by c0, c1; From c9e02eee7a1d7b2e7aa85bfc87d6a54a3bcaedfa Mon Sep 17 00:00:00 2001 From: yariks5s Date: Tue, 16 Jul 2024 13:48:52 +0000 Subject: [PATCH 0427/2361] fix after review --- docs/en/sql-reference/table-functions/hdfs.md | 4 ---- .../StorageObjectStorageSource.cpp | 2 +- .../ObjectStorageQueueSource.cpp | 2 +- src/Storages/StorageFile.cpp | 2 +- src/Storages/StorageFileCluster.cpp | 2 +- src/Storages/StorageURL.cpp | 19 +------------------ src/Storages/StorageURL.h | 18 ++++++++++++++++++ src/Storages/StorageURLCluster.cpp | 3 +-- src/Storages/VirtualColumnUtils.cpp | 2 +- src/Storages/VirtualColumnUtils.h | 2 +- 10 files changed, 26 insertions(+), 30 deletions(-) diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 73fdc263d68..60c2fd40e6a 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -112,10 +112,6 @@ SET use_hive_partitioning = 1; SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; ``` -``` reference -specified_data -``` - ## Storage Settings {#storage-settings} - [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 93f8eaacbc0..d29e33444b0 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -204,7 +204,7 @@ Chunk StorageObjectStorageSource::generate() .size = object_info->isArchive() ? object_info->fileSizeInArchive() : object_info->metadata->size_bytes, .filename = &filename, .last_modified = object_info->metadata->last_modified, - }, read_from_format_info.columns_description, getContext()); + }, getContext(), read_from_format_info.columns_description); const auto & partition_columns = configuration->getPartitionColumns(); if (!partition_columns.empty() && chunk_size && chunk.hasColumns()) diff --git a/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp b/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp index 4d921003e04..2634a7b2f1e 100644 --- a/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp +++ b/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp @@ -524,7 +524,7 @@ Chunk ObjectStorageQueueSource::generateImpl() { .path = path, .size = reader.getObjectInfo()->metadata->size_bytes - }); + }, getContext(), read_from_format_info.columns_description); return chunk; } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index b43fce370a1..5cbc2b38887 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -1459,7 +1459,7 @@ Chunk StorageFileSource::generate() .size = current_file_size, .filename = (filename_override.has_value() ? &filename_override.value() : nullptr), .last_modified = current_file_last_modified - }, columns_description, getContext()); + }, getContext(), columns_description); return chunk; } diff --git a/src/Storages/StorageFileCluster.cpp b/src/Storages/StorageFileCluster.cpp index f7684182e79..82ae0b761ae 100644 --- a/src/Storages/StorageFileCluster.cpp +++ b/src/Storages/StorageFileCluster.cpp @@ -61,7 +61,7 @@ StorageFileCluster::StorageFileCluster( storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context)); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context, paths.empty() ? "" : paths[0])); } void StorageFileCluster::updateQueryToSendIfNeeded(DB::ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const DB::ContextPtr & context) diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 87911230819..6e7788cfc1d 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -39,7 +39,6 @@ #include #include -#include #include #include @@ -92,27 +91,11 @@ static const std::vector> optional_regex_keys = { std::make_shared(R"(headers.header\[[0-9]*\].value)"), }; -static bool urlWithGlobs(const String & uri) -{ - return (uri.find('{') != std::string::npos && uri.find('}') != std::string::npos) || uri.find('|') != std::string::npos; -} - static ConnectionTimeouts getHTTPTimeouts(ContextPtr context) { return ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), context->getServerSettings().keep_alive_timeout); } -String getSampleURI(String uri, ContextPtr context) -{ - if (urlWithGlobs(uri)) - { - auto uris = parseRemoteDescription(uri, 0, uri.size(), ',', context->getSettingsRef().glob_expansion_max_elements); - if (!uris.empty()) - return uris[0]; - } - return uri; -} - IStorageURLBase::IStorageURLBase( const String & uri_, const ContextPtr & context_, @@ -433,7 +416,7 @@ Chunk StorageURLSource::generate() { .path = curr_uri.getPath(), .size = current_file_size, - }, columns_description, getContext()); + }, getContext(), columns_description); return chunk; } diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index fa7cc6eeeef..a874ca9147c 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include namespace DB @@ -267,6 +269,22 @@ private: bool cancelled = false; }; +static bool urlWithGlobs(const String & uri) +{ + return (uri.find('{') != std::string::npos && uri.find('}') != std::string::npos) || uri.find('|') != std::string::npos; +} + +inline String getSampleURI(String uri, ContextPtr context) +{ + if (urlWithGlobs(uri)) + { + auto uris = parseRemoteDescription(uri, 0, uri.size(), ',', context->getSettingsRef().glob_expansion_max_elements); + if (!uris.empty()) + return uris[0]; + } + return uri; +} + class StorageURL : public IStorageURLBase { public: diff --git a/src/Storages/StorageURLCluster.cpp b/src/Storages/StorageURLCluster.cpp index 664f170c17e..e80f4ebcd06 100644 --- a/src/Storages/StorageURLCluster.cpp +++ b/src/Storages/StorageURLCluster.cpp @@ -3,7 +3,6 @@ #include #include -#include #include #include #include @@ -76,7 +75,7 @@ StorageURLCluster::StorageURLCluster( storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context)); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context, getSampleURI(uri, context), getFormatSettings(context))); } void StorageURLCluster::updateQueryToSendIfNeeded(ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const ContextPtr & context) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 24d0b7160b2..31cee485dde 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -239,7 +239,7 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values, ColumnsDescription columns, ContextPtr context) + VirtualsForFileLikeStorage virtual_values, ContextPtr context, const ColumnsDescription & columns) { std::unordered_map hive_map; if (context->getSettingsRef().use_hive_partitioning) diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index fef32b149ec..1bd74189559 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -83,7 +83,7 @@ struct VirtualsForFileLikeStorage void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values, ColumnsDescription columns = {}, ContextPtr context = {}); + VirtualsForFileLikeStorage virtual_values, ContextPtr context, const ColumnsDescription & columns); } } From 4dea89df763fb9504cb681379d46830f4ec98db3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Jul 2024 13:50:58 +0000 Subject: [PATCH 0428/2361] Cleanup. --- src/Columns/ColumnSparse.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Columns/ColumnSparse.cpp b/src/Columns/ColumnSparse.cpp index 0922eb5ea2d..98a66e87387 100644 --- a/src/Columns/ColumnSparse.cpp +++ b/src/Columns/ColumnSparse.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include From e7e62b358360083eda6d2ec983fb5a1b733d1eba Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 16 Jul 2024 14:17:51 +0000 Subject: [PATCH 0429/2361] fix style --- tests/fuzz/runner.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index bbe648dbbc2..0862ea29e42 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -11,7 +11,7 @@ FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") def run_fuzzer(fuzzer: str): - logging.info(f"Running fuzzer {fuzzer}...") + logging.info("Running fuzzer %s...", fuzzer) corpus_dir = f"{fuzzer}.in" with Path(corpus_dir) as path: @@ -29,28 +29,28 @@ def run_fuzzer(fuzzer: str): if parser.has_section("asan"): os.environ["ASAN_OPTIONS"] = ( - f"{os.environ['ASAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['asan'].items())}" + f"{os.environ['ASAN_OPTIONS']}:{':'.join(f"{key}={value}" for key, value in parser['asan'].items())}" ) if parser.has_section("msan"): os.environ["MSAN_OPTIONS"] = ( - f"{os.environ['MSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['msan'].items())}" + f"{os.environ['MSAN_OPTIONS']}:{':'.join(f"{key}={value}" for key, value in parser['msan'].items())}" ) if parser.has_section("ubsan"): os.environ["UBSAN_OPTIONS"] = ( - f"{os.environ['UBSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['ubsan'].items())}" + f"{os.environ['UBSAN_OPTIONS']}:{':'.join(f"{key}={value}" for key, value in parser['ubsan'].items())}" ) if parser.has_section("libfuzzer"): custom_libfuzzer_options = " ".join( - "-%s=%s" % (key, value) + f"-{key}={value}" for key, value in parser["libfuzzer"].items() ) if parser.has_section("fuzzer_arguments"): fuzzer_arguments = " ".join( - ("%s" % key) if value == "" else ("%s=%s" % (key, value)) + (f"{key}") if value == "" else (f"{key}={value}") for key, value in parser["fuzzer_arguments"].items() ) @@ -65,7 +65,7 @@ def run_fuzzer(fuzzer: str): cmd_line += " < /dev/null" - logging.info(f"...will execute: {cmd_line}") + logging.info("...will execute: %s", cmd_line) subprocess.check_call(cmd_line, shell=True) From f88e825b33273bc369be246b0dbbe7cfaa80855d Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 16 Jul 2024 15:14:56 +0100 Subject: [PATCH 0430/2361] small fix --- src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index 94213e2ba61..26595fbb36d 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -379,7 +379,6 @@ void MergeTreePrefetchedReadPool::fillPerThreadTasks(size_t threads, size_t sum_ for (const auto & part : per_part_statistics) total_size_approx += part.sum_marks * part.approx_size_of_mark; - size_t min_prefetch_step_marks = pool_settings.min_marks_for_concurrent_read; for (size_t i = 0; i < per_part_infos.size(); ++i) { auto & part_stat = per_part_statistics[i]; @@ -411,11 +410,10 @@ void MergeTreePrefetchedReadPool::fillPerThreadTasks(size_t threads, size_t sum_ LOG_DEBUG( log, - "Sum marks: {}, threads: {}, min_marks_per_thread: {}, min prefetch step marks: {}, prefetches limit: {}, total_size_approx: {}", + "Sum marks: {}, threads: {}, min_marks_per_thread: {}, prefetches limit: {}, total_size_approx: {}", sum_marks, threads, min_marks_per_thread, - min_prefetch_step_marks, settings.filesystem_prefetches_limit, total_size_approx); From 288b0aaeb097c714e57d4cfd71a606b6942bd57c Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 16 Jul 2024 15:16:48 +0100 Subject: [PATCH 0431/2361] fix build --- src/Storages/MergeTree/MergeTreeReadPoolBase.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 79eaae14f59..0e713150625 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -1,7 +1,10 @@ -#include +#include + +#include #include #include -#include + +#include namespace DB From c974430e68bff97986379410e9a94c1ea641d1bd Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 16 Jul 2024 15:01:43 +0000 Subject: [PATCH 0432/2361] fix --- tests/fuzz/runner.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 0862ea29e42..047a2245bfa 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -29,17 +29,17 @@ def run_fuzzer(fuzzer: str): if parser.has_section("asan"): os.environ["ASAN_OPTIONS"] = ( - f"{os.environ['ASAN_OPTIONS']}:{':'.join(f"{key}={value}" for key, value in parser['asan'].items())}" + f"{os.environ['ASAN_OPTIONS']}:{':'.join(f'{key}={value}' for key, value in parser['asan'].items())}" ) if parser.has_section("msan"): os.environ["MSAN_OPTIONS"] = ( - f"{os.environ['MSAN_OPTIONS']}:{':'.join(f"{key}={value}" for key, value in parser['msan'].items())}" + f"{os.environ['MSAN_OPTIONS']}:{':'.join(f'{key}={value}' for key, value in parser['msan'].items())}" ) if parser.has_section("ubsan"): os.environ["UBSAN_OPTIONS"] = ( - f"{os.environ['UBSAN_OPTIONS']}:{':'.join(f"{key}={value}" for key, value in parser['ubsan'].items())}" + f"{os.environ['UBSAN_OPTIONS']}:{':'.join(f'{key}={value}' for key, value in parser['ubsan'].items())}" ) if parser.has_section("libfuzzer"): From 8660aec5d79f7a16ab3bcac2aaab291e4bcf0c2d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 16 Jul 2024 15:16:11 +0000 Subject: [PATCH 0433/2361] Automatic style fix --- tests/fuzz/runner.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 047a2245bfa..44259228f60 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -44,8 +44,7 @@ def run_fuzzer(fuzzer: str): if parser.has_section("libfuzzer"): custom_libfuzzer_options = " ".join( - f"-{key}={value}" - for key, value in parser["libfuzzer"].items() + f"-{key}={value}" for key, value in parser["libfuzzer"].items() ) if parser.has_section("fuzzer_arguments"): From b6672b9952caeff523b2836a710dd3be3d6ed4e8 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 16 Jul 2024 15:20:01 +0000 Subject: [PATCH 0434/2361] add rebuild for compact part --- src/Core/SettingsEnums.cpp | 3 +- src/Core/SettingsEnums.h | 1 + src/Interpreters/MutationsInterpreter.cpp | 5 -- src/Interpreters/MutationsInterpreter.h | 1 - .../MergeTree/MergeMutateSelectedEntry.h | 1 - src/Storages/MergeTree/MergeTreeSettings.h | 2 +- src/Storages/MergeTree/MutateTask.cpp | 24 ++++++++-- src/Storages/StorageMergeTree.cpp | 1 - ...61_lightweight_delete_projection.reference | 5 ++ .../03161_lightweight_delete_projection.sql | 46 +++++++++++++++++-- 10 files changed, 69 insertions(+), 20 deletions(-) diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index 82e7d6db410..6c000d83254 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -175,7 +175,8 @@ IMPLEMENT_SETTING_ENUM(ParallelReplicasCustomKeyFilterType, ErrorCodes::BAD_ARGU IMPLEMENT_SETTING_ENUM(LightweightMutationProjectionMode, ErrorCodes::BAD_ARGUMENTS, {{"throw", LightweightMutationProjectionMode::THROW}, - {"drop", LightweightMutationProjectionMode::DROP}}) + {"drop", LightweightMutationProjectionMode::DROP}, + {"rebuild", LightweightMutationProjectionMode::REBUILD}}) IMPLEMENT_SETTING_AUTO_ENUM(LocalFSReadMethod, ErrorCodes::BAD_ARGUMENTS) diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index f6d9593ca56..0281176417a 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -311,6 +311,7 @@ enum class LightweightMutationProjectionMode : uint8_t { THROW, DROP, + REBUILD, }; DECLARE_SETTING_ENUM(LightweightMutationProjectionMode) diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index b61f7f78885..db4ea9c0754 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -344,11 +344,6 @@ bool MutationsInterpreter::Source::hasProjection(const String & name) const return part && part->hasProjection(name); } -bool MutationsInterpreter::Source::hasProjection() const -{ - return part && part->hasProjection(); -} - bool MutationsInterpreter::Source::hasBrokenProjection(const String & name) const { return part && part->hasBrokenProjection(name); diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index b792a33f904..6aaa233cda3 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -126,7 +126,6 @@ public: bool materializeTTLRecalculateOnly() const; bool hasSecondaryIndex(const String & name) const; bool hasProjection(const String & name) const; - bool hasProjection() const; bool hasBrokenProjection(const String & name) const; bool isCompactPart() const; diff --git a/src/Storages/MergeTree/MergeMutateSelectedEntry.h b/src/Storages/MergeTree/MergeMutateSelectedEntry.h index e7efe00741c..c420cbca12b 100644 --- a/src/Storages/MergeTree/MergeMutateSelectedEntry.h +++ b/src/Storages/MergeTree/MergeMutateSelectedEntry.h @@ -3,7 +3,6 @@ #include #include - namespace DB { diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index c84ca9956fc..74e7a7f43bc 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -215,7 +215,7 @@ struct Settings; M(Float, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns, 0.9f, "If the value of a column of the primary key in data part changes at least in this ratio of times, skip loading next columns in memory. This allows to save memory usage by not loading useless columns of the primary key.", 0) \ /** Projection settings. */ \ M(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ - M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts.", 0) \ + M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts, or rebuild the projections.", 0) \ #define MAKE_OBSOLETE_MERGE_TREE_SETTING(M, TYPE, NAME, DEFAULT) \ M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 8790ce6628e..092a6d0d6ed 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -662,7 +662,7 @@ static NameSet collectFilesToSkip( const std::set & projections_to_recalc, const std::set & stats_to_recalc, const StorageMetadataPtr & metadata_snapshot, - bool lightweight_delete_mode) + bool skip_all_projections) { NameSet files_to_skip = source_part->getFileNamesWithoutChecksums(); @@ -686,7 +686,7 @@ static NameSet collectFilesToSkip( } } - if (lightweight_delete_mode) + if (skip_all_projections) { for (const auto & projection : metadata_snapshot->getProjections()) files_to_skip.insert(projection.getDirectoryName()); @@ -2211,6 +2211,8 @@ bool MutateTask::prepare() ctx->stage_progress = std::make_unique(1.0); + bool lightweight_delete_mode = false; + if (!ctx->for_interpreter.empty()) { /// Always disable filtering in mutations: we want to read and write all rows because for updates we rewrite only some of the @@ -2228,6 +2230,16 @@ bool MutateTask::prepare() ctx->mutating_pipeline_builder = ctx->interpreter->execute(); ctx->updated_header = ctx->interpreter->getUpdatedHeader(); ctx->progress_callback = MergeProgressCallback((*ctx->mutate_entry)->ptr(), ctx->watch_prev_elapsed, *ctx->stage_progress); + + lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); + /// If under the condition of lightweight delete mode with rebuild option, add projections again here as we can only know + /// the condition as early as from here. + if (lightweight_delete_mode + && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::REBUILD) + { + for (const auto & projection : ctx->metadata_snapshot->getProjections()) + ctx->materialized_projections.insert(projection.name); + } } auto single_disk_volume = std::make_shared("volume_" + ctx->future_part->name, ctx->space_reservation->getDisk(), 0); @@ -2269,7 +2281,6 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); - bool lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && lightweight_delete_mode) { /// This mutation contains lightweight delete and we need to count the deleted rows, @@ -2307,7 +2318,10 @@ bool MutateTask::prepare() ctx->context, ctx->materialized_indices); - if (!lightweight_delete_mode) + bool lightweight_delete_projection_drop = lightweight_delete_mode + && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP; + /// Under lightweight delete mode, if option is drop, projections_to_recalc should be empty. + if (!lightweight_delete_projection_drop) { ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( ctx->source_part, @@ -2326,7 +2340,7 @@ bool MutateTask::prepare() ctx->projections_to_recalc, ctx->stats_to_recalc, ctx->metadata_snapshot, - lightweight_delete_mode); + lightweight_delete_projection_drop); ctx->files_to_rename = MutationHelpers::collectFilesForRenames( ctx->source_part, diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index d8c61da2a98..40b3a12297b 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1285,7 +1285,6 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( auto commands = std::make_shared(); size_t current_ast_elements = 0; auto last_mutation_to_apply = mutations_end_it; - for (auto it = mutations_begin_it; it != mutations_end_it; ++it) { /// Do not squash mutations from different transactions to be able to commit/rollback them independently. diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index e69de29bb2d..bc7e1faecff 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -0,0 +1,5 @@ +8888 Alice 50 +p1 +p2 +p1 +p2 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 16a7468234b..b63341f5371 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -13,25 +13,43 @@ SETTINGS min_bytes_for_wide_part = 10485760; INSERT INTO users VALUES (1231, 'John', 33); +-- testing throw default mode ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; DELETE FROM users WHERE uid = 1231; -- { serverError NOT_IMPLEMENTED } +-- testing drop mode ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; DELETE FROM users WHERE uid = 1231; +SELECT * FROM users ORDER BY uid; + SYSTEM FLUSH LOGS; -- expecting no projection SELECT - name, - `table` + name FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +-- testing rebuild mode +INSERT INTO users VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); + +ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; + +DELETE FROM users WHERE uid = 6666; + SELECT * FROM users ORDER BY uid; +SYSTEM FLUSH LOGS; + +-- expecting projection p1, p2 in 2 parts +SELECT + name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); + DROP TABLE users; @@ -47,23 +65,41 @@ SETTINGS min_bytes_for_wide_part = 0; INSERT INTO users VALUES (1231, 'John', 33); +-- testing throw default mode ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; DELETE FROM users WHERE uid = 1231; -- { serverError NOT_IMPLEMENTED } +-- testing drop mode ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; DELETE FROM users WHERE uid = 1231; +SELECT * FROM users ORDER BY uid; + SYSTEM FLUSH LOGS; -- expecting no projection SELECT - name, - `table` + name FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); -SELECT * FROM users ORDER BY uid; +-- -- testing rebuild mode +-- INSERT INTO users VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); + +-- ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; + +-- DELETE FROM users WHERE uid = 6666; + +-- SELECT * FROM users ORDER BY uid; + +-- SYSTEM FLUSH LOGS; + +-- -- expecting projection p1, p2 in 2 parts +-- SELECT +-- name +-- FROM system.projection_parts +-- WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); DROP TABLE users; \ No newline at end of file From c3507979cfc0359ab38762525ab0306904a387b8 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 16 Jul 2024 15:41:54 +0000 Subject: [PATCH 0435/2361] fix --- src/Storages/MergeTree/MutateTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 092a6d0d6ed..489c8863a8a 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -2234,7 +2234,7 @@ bool MutateTask::prepare() lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); /// If under the condition of lightweight delete mode with rebuild option, add projections again here as we can only know /// the condition as early as from here. - if (lightweight_delete_mode + if (lightweight_delete_mode && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::REBUILD) { for (const auto & projection : ctx->metadata_snapshot->getProjections()) From 3ae4211b3af575cf8d7186a4cc915f9ecb6b4182 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 16 Jul 2024 17:59:57 +0200 Subject: [PATCH 0436/2361] fix tests --- tests/queries/0_stateless/03203_hive_style_partitioning.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index d2b1f31c85f..0f687d532b0 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -39,8 +39,8 @@ SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01 $CLICKHOUSE_LOCAL -n -q """ set use_hive_partitioning = 1; -SELECT _identifier FROM file('$CURDIR/data_hive/partitioning/identitier=*/email.csv') LIMIT 2; -SELECT __identifier FROM file('$CURDIR/data_hive/partitioning/identitier=*/email.csv') LIMIT 2; +SELECT _identifier FROM file('$CURDIR/data_hive/partitioning/identifier=*/email.csv') LIMIT 2; +SELECT __identifier FROM file('$CURDIR/data_hive/partitioning/identifier=*/email.csv') LIMIT 2; """ $CLICKHOUSE_LOCAL -n -q """ From d91cb40bbdbd18a2bef811002033d0c99fe693d3 Mon Sep 17 00:00:00 2001 From: yariks5s Date: Tue, 16 Jul 2024 16:15:24 +0000 Subject: [PATCH 0437/2361] fix include and remove unused getFormatSettings --- src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp | 2 +- src/Storages/StorageFile.cpp | 1 - src/Storages/StorageURLCluster.cpp | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp index 7f6b3338f9b..c214665f7e0 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp @@ -68,7 +68,7 @@ StorageObjectStorageCluster::StorageObjectStorageCluster( if (sample_path.empty() && context_->getSettingsRef().use_hive_partitioning) sample_path = getPathSample(metadata, context_); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context_, sample_path, getFormatSettings(context_))); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context_, sample_path)); setInMemoryMetadata(metadata); } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 5cbc2b38887..ed05f57b418 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -52,7 +52,6 @@ #include #include #include -#include "Formats/FormatSettings.h" #include #include diff --git a/src/Storages/StorageURLCluster.cpp b/src/Storages/StorageURLCluster.cpp index e80f4ebcd06..1522a18a083 100644 --- a/src/Storages/StorageURLCluster.cpp +++ b/src/Storages/StorageURLCluster.cpp @@ -75,7 +75,7 @@ StorageURLCluster::StorageURLCluster( storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context, getSampleURI(uri, context), getFormatSettings(context))); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context, getSampleURI(uri, context))); } void StorageURLCluster::updateQueryToSendIfNeeded(ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const ContextPtr & context) From 7ea3324776bd4cb8cc886822a9b30d3dfcaff5a2 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Jul 2024 16:31:34 +0000 Subject: [PATCH 0438/2361] Refactor in VirtualColumnUtils --- src/Interpreters/ActionsDAG.cpp | 12 ++++----- src/Interpreters/ActionsDAG.h | 3 +-- .../useDataParallelAggregation.cpp | 8 +++--- src/Processors/QueryPlan/SortingStep.cpp | 10 +++++++ src/Storages/MergeTree/MergeTreeData.cpp | 4 +-- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 4 +-- .../StorageObjectStorageSource.cpp | 15 ++++++++--- .../StorageObjectStorageSource.h | 2 +- src/Storages/StorageFile.cpp | 8 ++++-- src/Storages/StorageURL.cpp | 6 +++-- .../System/StorageSystemDetachedParts.cpp | 4 +-- .../StorageSystemDroppedTablesParts.cpp | 4 +-- .../System/StorageSystemDroppedTablesParts.h | 6 ++--- .../System/StorageSystemPartsBase.cpp | 12 ++++----- src/Storages/System/StorageSystemPartsBase.h | 6 ++--- src/Storages/System/StorageSystemTables.cpp | 2 +- src/Storages/VirtualColumnUtils.cpp | 27 +++++++++++-------- src/Storages/VirtualColumnUtils.h | 13 ++++----- 18 files changed, 87 insertions(+), 59 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 4f03a9e1602..e001406408f 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -624,9 +624,9 @@ void ActionsDAG::removeAliasesForFilter(const std::string & filter_name) } } -ActionsDAGPtr ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases) +ActionsDAG ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases) { - auto actions = std::make_unique(); + ActionsDAG actions; std::unordered_map copy_map; struct Frame @@ -661,21 +661,21 @@ ActionsDAGPtr ActionsDAG::cloneSubDAG(const NodeRawConstPtrs & outputs, bool rem if (remove_aliases && frame.node->type == ActionType::ALIAS) copy_node = copy_map[frame.node->children.front()]; else - copy_node = &actions->nodes.emplace_back(*frame.node); + copy_node = &actions.nodes.emplace_back(*frame.node); if (frame.node->type == ActionType::INPUT) - actions->inputs.push_back(copy_node); + actions.inputs.push_back(copy_node); stack.pop(); } } - for (auto & node : actions->nodes) + for (auto & node : actions.nodes) for (auto & child : node.children) child = copy_map[child]; for (const auto * output : outputs) - actions->outputs.push_back(copy_map[output]); + actions.outputs.push_back(copy_map[output]); return actions; } diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 05948ccf928..6f5c3d3b0df 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -261,13 +261,12 @@ public: void compileExpressions(size_t min_count_to_compile_expression, const std::unordered_set & lazy_executed_nodes = {}); #endif - static ActionsDAGPtr clone(const ActionsDAGPtr & from) { return clone(from.get()); } static ActionsDAGPtr clone(const ActionsDAG * from); ActionsDAG clone(std::unordered_map & old_to_new_nodes) const; ActionsDAG clone() const; - static ActionsDAGPtr cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases); + static ActionsDAG cloneSubDAG(const NodeRawConstPtrs & outputs, bool remove_aliases); /// Execute actions for header. Input block must have empty columns. /// Result should be equal to the execution of ExpressionActions built from this DAG. diff --git a/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp b/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp index 7e0260c0040..0eeaec9bde7 100644 --- a/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp +++ b/src/Processors/QueryPlan/Optimizations/useDataParallelAggregation.cpp @@ -74,11 +74,11 @@ void removeInjectiveFunctionsFromResultsRecursively(const ActionsDAG::Node * nod /// Our objective is to replace injective function nodes in `actions` results with its children /// until only the irreducible subset of nodes remains. Against these set of nodes we will match partition key expression /// to determine if it maps all rows with the same value of group by key to the same partition. -NodeSet removeInjectiveFunctionsFromResultsRecursively(const ActionsDAGPtr & actions) +NodeSet removeInjectiveFunctionsFromResultsRecursively(const ActionsDAG & actions) { NodeSet irreducible; NodeSet visited; - for (const auto & node : actions->getOutputs()) + for (const auto & node : actions.getOutputs()) removeInjectiveFunctionsFromResultsRecursively(node, irreducible, visited); return irreducible; } @@ -158,7 +158,7 @@ bool isPartitionKeySuitsGroupByKey( auto key_nodes = group_by_actions.findInOutpus(aggregating.getParams().keys); auto group_by_key_actions = ActionsDAG::cloneSubDAG(key_nodes, /*remove_aliases=*/ true); - const auto & gb_key_required_columns = group_by_key_actions->getRequiredColumnsNames(); + const auto & gb_key_required_columns = group_by_key_actions.getRequiredColumnsNames(); const auto & partition_actions = reading.getStorageMetadata()->getPartitionKey().expression->getActionsDAG(); @@ -169,7 +169,7 @@ bool isPartitionKeySuitsGroupByKey( const auto irreducibe_nodes = removeInjectiveFunctionsFromResultsRecursively(group_by_key_actions); - const auto matches = matchTrees(group_by_key_actions->getOutputs(), partition_actions); + const auto matches = matchTrees(group_by_key_actions.getOutputs(), partition_actions); return allOutputsDependsOnlyOnAllowedNodes(partition_actions, irreducibe_nodes, matches); } diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 48fad9f5fdb..e8e761e7ab0 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -447,6 +447,13 @@ void SortingStep::describeActions(FormatSettings & settings) const settings.out << '\n'; } + if (!partition_by_description.empty()) + { + settings.out << prefix << "Partition by description: "; + dumpSortDescription(partition_by_description, settings.out); + settings.out << '\n'; + } + if (limit) settings.out << prefix << "Limit " << limit << '\n'; } @@ -461,6 +468,9 @@ void SortingStep::describeActions(JSONBuilder::JSONMap & map) const else map.add("Sort Description", explainSortDescription(result_description)); + if (!partition_by_description.empty()) + map.add("Partition By Description", explainSortDescription(partition_by_description)); + if (limit) map.add("Limit", limit); } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 878e0420665..9aa9490198a 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1154,7 +1154,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( if (!virtual_columns_block.has(input->result_name)) valid = false; - PartitionPruner partition_pruner(metadata_snapshot, filter_dag.get(), local_context, true /* strict */); + PartitionPruner partition_pruner(metadata_snapshot, &*filter_dag, local_context, true /* strict */); if (partition_pruner.isUseless() && !valid) return {}; @@ -1162,7 +1162,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( if (valid) { virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, parts); - VirtualColumnUtils::filterBlockWithDAG(filter_dag, virtual_columns_block, local_context); + VirtualColumnUtils::filterBlockWithDAG(std::move(*filter_dag), virtual_columns_block, local_context); part_values = VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_part"); if (part_values.empty()) return 0; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index a6a40a808e5..a37dbfa554c 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -465,7 +465,7 @@ void MergeTreeDataSelectExecutor::buildKeyConditionFromPartOffset( return; part_offset_condition.emplace(KeyCondition{ - dag.get(), + &*dag, context, sample.getNames(), std::make_shared(ActionsDAG(sample.getColumnsWithTypeAndName()), ExpressionActionsSettings{}), @@ -488,7 +488,7 @@ std::optional> MergeTreeDataSelectExecutor::filterPar return {}; auto virtual_columns_block = data.getBlockWithVirtualsForFilter(metadata_snapshot, parts); - VirtualColumnUtils::filterBlockWithDAG(dag, virtual_columns_block, context); + VirtualColumnUtils::filterBlockWithDAG(std::move(*dag), virtual_columns_block, context); return VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_part"); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index c86b56d3f1b..e760098f10f 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -139,7 +139,10 @@ std::shared_ptr StorageObjectStorageSourc paths.reserve(keys.size()); for (const auto & key : keys) paths.push_back(fs::path(configuration->getNamespace()) / key); - VirtualColumnUtils::filterByPathOrFile(keys, paths, filter_dag, virtual_columns, local_context); + + VirtualColumnUtils::buildSetsForDAG(*filter_dag, local_context); + auto actions = std::make_shared(std::move(*filter_dag)); + VirtualColumnUtils::filterByPathOrFile(keys, paths, actions, virtual_columns); copy_configuration->setPaths(keys); } @@ -506,7 +509,11 @@ StorageObjectStorageSource::GlobIterator::GlobIterator( } recursive = key_with_globs == "/**"; - filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns); + if (auto filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns)) + { + VirtualColumnUtils::buildSetsForDAG(*filter_dag, getContext()); + filter_expr = std::make_shared(std::move(*filter_dag)); + } } else { @@ -570,14 +577,14 @@ StorageObjectStorage::ObjectInfoPtr StorageObjectStorageSource::GlobIterator::ne ++it; } - if (filter_dag) + if (filter_expr) { std::vector paths; paths.reserve(new_batch.size()); for (const auto & object_info : new_batch) paths.push_back(getUniqueStoragePathIdentifier(*configuration, *object_info, false)); - VirtualColumnUtils::filterByPathOrFile(new_batch, paths, filter_dag, virtual_columns, getContext()); + VirtualColumnUtils::filterByPathOrFile(new_batch, paths, filter_expr, virtual_columns); LOG_TEST(logger, "Filtered files: {} -> {}", paths.size(), new_batch.size()); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.h b/src/Storages/ObjectStorage/StorageObjectStorageSource.h index b8418ddd07c..e466621e1e1 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.h @@ -208,7 +208,7 @@ private: ObjectInfos object_infos; ObjectInfos * read_keys; - ActionsDAGPtr filter_dag; + ExpressionActionsPtr filter_expr; ObjectStorageIteratorPtr object_storage_iterator; bool recursive{false}; std::vector expanded_keys; diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index c6acb358d89..fe6f494db00 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -1130,12 +1130,16 @@ StorageFileSource::FilesIterator::FilesIterator( bool distributed_processing_) : WithContext(context_), files(files_), archive_info(std::move(archive_info_)), distributed_processing(distributed_processing_) { - ActionsDAGPtr filter_dag; + std::optional filter_dag; if (!distributed_processing && !archive_info && !files.empty()) filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns); if (filter_dag) - VirtualColumnUtils::filterByPathOrFile(files, files, filter_dag, virtual_columns, context_); + { + VirtualColumnUtils::buildSetsForDAG(*filter_dag, context_); + auto actions = std::make_shared(std::move(*filter_dag)); + VirtualColumnUtils::filterByPathOrFile(files, files, actions, virtual_columns); + } } String StorageFileSource::FilesIterator::next() diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 9cec8c75ebe..c61bb8ac980 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -198,7 +198,7 @@ public: { uris = parseRemoteDescription(uri_, 0, uri_.size(), ',', max_addresses); - ActionsDAGPtr filter_dag; + std::optional filter_dag; if (!uris.empty()) filter_dag = VirtualColumnUtils::createPathAndFileFilterDAG(predicate, virtual_columns); @@ -209,7 +209,9 @@ public: for (const auto & uri : uris) paths.push_back(Poco::URI(uri).getPath()); - VirtualColumnUtils::filterByPathOrFile(uris, paths, filter_dag, virtual_columns, context); + VirtualColumnUtils::buildSetsForDAG(*filter_dag, context); + auto actions = std::make_shared(std::move(*filter_dag)); + VirtualColumnUtils::filterByPathOrFile(uris, paths, actions, virtual_columns); } } diff --git a/src/Storages/System/StorageSystemDetachedParts.cpp b/src/Storages/System/StorageSystemDetachedParts.cpp index 7e4c1de1c65..0d0ae666c10 100644 --- a/src/Storages/System/StorageSystemDetachedParts.cpp +++ b/src/Storages/System/StorageSystemDetachedParts.cpp @@ -307,7 +307,7 @@ protected: std::shared_ptr storage; std::vector columns_mask; - ActionsDAGPtr filter; + std::optional filter; const size_t max_block_size; const size_t num_streams; }; @@ -359,7 +359,7 @@ void StorageSystemDetachedParts::read( void ReadFromSystemDetachedParts::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - auto state = std::make_shared(StoragesInfoStream(nullptr, filter, context)); + auto state = std::make_shared(StoragesInfoStream({}, std::move(filter), context)); Pipe pipe; diff --git a/src/Storages/System/StorageSystemDroppedTablesParts.cpp b/src/Storages/System/StorageSystemDroppedTablesParts.cpp index c17d6402d88..defc4ec2d2a 100644 --- a/src/Storages/System/StorageSystemDroppedTablesParts.cpp +++ b/src/Storages/System/StorageSystemDroppedTablesParts.cpp @@ -11,7 +11,7 @@ namespace DB { -StoragesDroppedInfoStream::StoragesDroppedInfoStream(const ActionsDAGPtr & filter, ContextPtr context) +StoragesDroppedInfoStream::StoragesDroppedInfoStream(std::optional filter, ContextPtr context) : StoragesInfoStreamBase(context) { /// Will apply WHERE to subset of columns and then add more columns. @@ -75,7 +75,7 @@ StoragesDroppedInfoStream::StoragesDroppedInfoStream(const ActionsDAGPtr & filte { /// Filter block_to_filter with columns 'database', 'table', 'engine', 'active'. if (filter) - VirtualColumnUtils::filterBlockWithDAG(filter, block_to_filter, context); + VirtualColumnUtils::filterBlockWithDAG(std::move(*filter), block_to_filter, context); rows = block_to_filter.rows(); } diff --git a/src/Storages/System/StorageSystemDroppedTablesParts.h b/src/Storages/System/StorageSystemDroppedTablesParts.h index dff9e41cce3..32468fc31b2 100644 --- a/src/Storages/System/StorageSystemDroppedTablesParts.h +++ b/src/Storages/System/StorageSystemDroppedTablesParts.h @@ -9,7 +9,7 @@ namespace DB class StoragesDroppedInfoStream : public StoragesInfoStreamBase { public: - StoragesDroppedInfoStream(const ActionsDAGPtr & filter, ContextPtr context); + StoragesDroppedInfoStream(std::optional filter, ContextPtr context); protected: bool tryLockTable(StoragesInfo &) override { @@ -30,9 +30,9 @@ public: std::string getName() const override { return "SystemDroppedTablesParts"; } protected: - std::unique_ptr getStoragesInfoStream(const ActionsDAGPtr &, const ActionsDAGPtr & filter, ContextPtr context) override + std::unique_ptr getStoragesInfoStream(std::optional, std::optional filter, ContextPtr context) override { - return std::make_unique(filter, context); + return std::make_unique(std::move(filter), context); } }; diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index f7d1c1b3eb8..a0c9a5c61bd 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -91,7 +91,7 @@ StoragesInfo::getProjectionParts(MergeTreeData::DataPartStateVector & state, boo return data->getProjectionPartsVectorForInternalUsage({State::Active}, &state); } -StoragesInfoStream::StoragesInfoStream(const ActionsDAGPtr & filter_by_database, const ActionsDAGPtr & filter_by_other_columns, ContextPtr context) +StoragesInfoStream::StoragesInfoStream(std::optional filter_by_database, std::optional filter_by_other_columns, ContextPtr context) : StoragesInfoStreamBase(context) { /// Will apply WHERE to subset of columns and then add more columns. @@ -124,7 +124,7 @@ StoragesInfoStream::StoragesInfoStream(const ActionsDAGPtr & filter_by_database, /// Filter block_to_filter with column 'database'. if (filter_by_database) - VirtualColumnUtils::filterBlockWithDAG(filter_by_database, block_to_filter, context); + VirtualColumnUtils::filterBlockWithDAG(std::move(*filter_by_database), block_to_filter, context); rows = block_to_filter.rows(); /// Block contains new columns, update database_column. @@ -204,7 +204,7 @@ StoragesInfoStream::StoragesInfoStream(const ActionsDAGPtr & filter_by_database, { /// Filter block_to_filter with columns 'database', 'table', 'engine', 'active'. if (filter_by_other_columns) - VirtualColumnUtils::filterBlockWithDAG(filter_by_other_columns, block_to_filter, context); + VirtualColumnUtils::filterBlockWithDAG(std::move(*filter_by_other_columns), block_to_filter, context); rows = block_to_filter.rows(); } @@ -236,8 +236,8 @@ protected: std::shared_ptr storage; std::vector columns_mask; const bool has_state_column; - ActionsDAGPtr filter_by_database; - ActionsDAGPtr filter_by_other_columns; + std::optional filter_by_database; + std::optional filter_by_other_columns; }; ReadFromSystemPartsBase::ReadFromSystemPartsBase( @@ -318,7 +318,7 @@ void StorageSystemPartsBase::read( void ReadFromSystemPartsBase::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - auto stream = storage->getStoragesInfoStream(filter_by_database, filter_by_other_columns, context); + auto stream = storage->getStoragesInfoStream(std::move(filter_by_database), std::move(filter_by_other_columns), context); auto header = getOutputStream().header; MutableColumns res_columns = header.cloneEmptyColumns(); diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index 8671fd850f8..806af4a7bf8 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -116,7 +116,7 @@ protected: class StoragesInfoStream : public StoragesInfoStreamBase { public: - StoragesInfoStream(const ActionsDAGPtr & filter_by_database, const ActionsDAGPtr & filter_by_other_columns, ContextPtr context); + StoragesInfoStream(std::optional filter_by_database, std::optional filter_by_other_columns, ContextPtr context); }; /** Implements system table 'parts' which allows to get information about data parts for tables of MergeTree family. @@ -146,9 +146,9 @@ protected: StorageSystemPartsBase(const StorageID & table_id_, ColumnsDescription && columns); - virtual std::unique_ptr getStoragesInfoStream(const ActionsDAGPtr & filter_by_database, const ActionsDAGPtr & filter_by_other_columns, ContextPtr context) + virtual std::unique_ptr getStoragesInfoStream(std::optional filter_by_database, std::optional filter_by_other_columns, ContextPtr context) { - return std::make_unique(filter_by_database, filter_by_other_columns, context); + return std::make_unique(std::move(filter_by_database), std::move(filter_by_other_columns), context); } virtual void diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 783b899c978..85aaf4ad186 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -169,7 +169,7 @@ ColumnPtr getFilteredTables(const ActionsDAG::Node * predicate, const ColumnPtr block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + VirtualColumnUtils::filterBlockWithDAG(std::move(*dag), block, context); return block.getByPosition(0).column; } diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 1630d9fd9c4..32c6a558340 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -77,15 +77,20 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context) } } -void filterBlockWithDAG(const ActionsDAGPtr & dag, Block & block, ContextPtr context) +void filterBlockWithDAG(ActionsDAG dag, Block & block, ContextPtr context) +{ + buildSetsForDAG(dag, context); + auto actions = std::make_shared(std::move(dag)); + filterBlockWithExpression(actions, block); +} + +void filterBlockWithExpression(const ExpressionActionsPtr & actions, Block & block) { - buildSetsForDAG(*dag, context); - auto actions = std::make_shared(std::move(*ActionsDAG::clone(dag))); Block block_with_filter = block; actions->execute(block_with_filter, /*dry_run=*/ false, /*allow_duplicates_in_input=*/ true); /// Filter the block. - String filter_column_name = dag->getOutputs().at(0)->result_name; + String filter_column_name = actions->getActionsDAG().getOutputs().at(0)->result_name; ColumnPtr filter_column = block_with_filter.getByName(filter_column_name).column->convertToFullColumnIfConst(); ConstantFilterDescription constant_filter(*filter_column); @@ -155,7 +160,7 @@ static void addPathAndFileToVirtualColumns(Block & block, const String & path, s block.getByName("_idx").column->assumeMutableRef().insert(idx); } -ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns) +std::optional createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns) { if (!predicate || virtual_columns.empty()) return {}; @@ -171,7 +176,7 @@ ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, con return splitFilterDagForAllowedInputs(predicate, &block); } -ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const ActionsDAGPtr & dag, const NamesAndTypesList & virtual_columns, const ContextPtr & context) +ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const ExpressionActionsPtr & actions, const NamesAndTypesList & virtual_columns) { Block block; for (const auto & column : virtual_columns) @@ -184,7 +189,7 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const for (size_t i = 0; i != paths.size(); ++i) addPathAndFileToVirtualColumns(block, paths[i], i); - filterBlockWithDAG(dag, block, context); + filterBlockWithExpression(actions, block); return block.getByName("_idx").column; } @@ -355,15 +360,15 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( return node; } -ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs) +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs) { if (!predicate) - return nullptr; + return {}; ActionsDAG::Nodes additional_nodes; const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes); if (!res) - return nullptr; + return {}; return ActionsDAG::cloneSubDAG({res}, true); } @@ -372,7 +377,7 @@ void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, { auto dag = splitFilterDagForAllowedInputs(predicate, &block); if (dag) - filterBlockWithDAG(dag, block, context); + filterBlockWithDAG(std::move(*dag), block, context); } } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 208aa7a8100..72c45964ff4 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -23,7 +23,8 @@ namespace VirtualColumnUtils void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context); /// Just filters block. Block should contain all the required columns. -void filterBlockWithDAG(const ActionsDAGPtr & dag, Block & block, ContextPtr context); +void filterBlockWithDAG(ActionsDAG dag, Block & block, ContextPtr context); +void filterBlockWithExpression(const ExpressionActionsPtr & actions, Block & block); /// Builds sets used by ActionsDAG inplace. void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context); @@ -32,7 +33,7 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context); bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); /// Extract a part of predicate that can be evaluated using only columns from input_names. -ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs); +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs); /// Extract from the input stream a set of `name` column values template @@ -49,14 +50,14 @@ auto extractSingleValueFromBlock(const Block & block, const String & name) NameSet getVirtualNamesForFileLikeStorage(); VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns); -ActionsDAGPtr createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); +std::optional createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); -ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const ActionsDAGPtr & dag, const NamesAndTypesList & virtual_columns, const ContextPtr & context); +ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const ExpressionActionsPtr & actions, const NamesAndTypesList & virtual_columns); template -void filterByPathOrFile(std::vector & sources, const std::vector & paths, const ActionsDAGPtr & dag, const NamesAndTypesList & virtual_columns, const ContextPtr & context) +void filterByPathOrFile(std::vector & sources, const std::vector & paths, const ExpressionActionsPtr & actions, const NamesAndTypesList & virtual_columns) { - auto indexes_column = getFilterByPathAndFileIndexes(paths, dag, virtual_columns, context); + auto indexes_column = getFilterByPathAndFileIndexes(paths, actions, virtual_columns); const auto & indexes = typeid_cast(*indexes_column).getData(); if (indexes.size() == sources.size()) return; From d11d1a42bc7101993e5c85f1c1c3298e6334dbf9 Mon Sep 17 00:00:00 2001 From: yariks5s Date: Tue, 16 Jul 2024 16:49:33 +0000 Subject: [PATCH 0439/2361] fix for storageURL functions --- src/Storages/StorageURL.cpp | 16 ++++++++++++++++ src/Storages/StorageURL.h | 19 +++---------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 6e7788cfc1d..1d1deebf9f5 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -91,6 +91,22 @@ static const std::vector> optional_regex_keys = { std::make_shared(R"(headers.header\[[0-9]*\].value)"), }; +bool urlWithGlobs(const String & uri) +{ + return (uri.find('{') != std::string::npos && uri.find('}') != std::string::npos) || uri.find('|') != std::string::npos; +} + +String getSampleURI(String uri, ContextPtr context) +{ + if (urlWithGlobs(uri)) + { + auto uris = parseRemoteDescription(uri, 0, uri.size(), ',', context->getSettingsRef().glob_expansion_max_elements); + if (!uris.empty()) + return uris[0]; + } + return uri; +} + static ConnectionTimeouts getHTTPTimeouts(ContextPtr context) { return ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), context->getServerSettings().keep_alive_timeout); diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index a874ca9147c..cd48ecb767b 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -143,6 +143,9 @@ private: virtual Block getHeaderBlock(const Names & column_names, const StorageSnapshotPtr & storage_snapshot) const = 0; }; +bool urlWithGlobs(const String & uri); + +String getSampleURI(String uri, ContextPtr context); class StorageURLSource : public SourceWithKeyCondition, WithContext { @@ -269,22 +272,6 @@ private: bool cancelled = false; }; -static bool urlWithGlobs(const String & uri) -{ - return (uri.find('{') != std::string::npos && uri.find('}') != std::string::npos) || uri.find('|') != std::string::npos; -} - -inline String getSampleURI(String uri, ContextPtr context) -{ - if (urlWithGlobs(uri)) - { - auto uris = parseRemoteDescription(uri, 0, uri.size(), ',', context->getSettingsRef().glob_expansion_max_elements); - if (!uris.empty()) - return uris[0]; - } - return uri; -} - class StorageURL : public IStorageURLBase { public: From 0954eefb076d36ec5804b46e594005cd7f4030bf Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Jul 2024 17:01:35 +0000 Subject: [PATCH 0440/2361] Revert SortingStep changes. --- src/Processors/QueryPlan/SortingStep.cpp | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index e8e761e7ab0..48fad9f5fdb 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -447,13 +447,6 @@ void SortingStep::describeActions(FormatSettings & settings) const settings.out << '\n'; } - if (!partition_by_description.empty()) - { - settings.out << prefix << "Partition by description: "; - dumpSortDescription(partition_by_description, settings.out); - settings.out << '\n'; - } - if (limit) settings.out << prefix << "Limit " << limit << '\n'; } @@ -468,9 +461,6 @@ void SortingStep::describeActions(JSONBuilder::JSONMap & map) const else map.add("Sort Description", explainSortDescription(result_description)); - if (!partition_by_description.empty()) - map.add("Partition By Description", explainSortDescription(partition_by_description)); - if (limit) map.add("Limit", limit); } From 14dcb97e353fb4739c7f7d37b9c3c11c9ad40923 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 16 Jul 2024 19:09:18 +0200 Subject: [PATCH 0441/2361] Update src/Storages/StorageURL.h Co-authored-by: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> --- src/Storages/StorageURL.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index cd48ecb767b..1f3d63b4c85 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -12,8 +12,6 @@ #include #include #include -#include -#include namespace DB From 771b39fa2179a9a548580c41859bbecf0165000d Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 16 Jul 2024 19:44:10 +0200 Subject: [PATCH 0442/2361] Update StorageURLCluster.cpp --- src/Storages/StorageURLCluster.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/StorageURLCluster.cpp b/src/Storages/StorageURLCluster.cpp index 1522a18a083..7c7a299c64e 100644 --- a/src/Storages/StorageURLCluster.cpp +++ b/src/Storages/StorageURLCluster.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include From 9d332911fb0f4b25bbdb67a9c10c5f3b42db4ea6 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 16 Jul 2024 20:10:19 +0200 Subject: [PATCH 0443/2361] Update StorageURL.cpp --- src/Storages/StorageURL.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 1d1deebf9f5..4cf191f7e8a 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -39,6 +39,7 @@ #include #include +#include #include #include From 094a1aa970e371a71446ea93bbd4292864257854 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 16 Jul 2024 19:07:15 +0000 Subject: [PATCH 0444/2361] Fix build and JSON getName() method --- src/DataTypes/DataTypeObject.cpp | 4 +++- src/DataTypes/DataTypeObjectDeprecated.cpp | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 02065bfcc97..0e7b46ffbd2 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -15,6 +15,8 @@ #include #include +#include + #if USE_SIMDJSON #include #endif @@ -157,7 +159,7 @@ String DataTypeObject::doGetName() const for (const auto & path : sorted_typed_paths) { write_separator(); - out << path << " " << typed_paths.at(path)->getName(); + out << backQuoteIfNeed(path) << " " << typed_paths.at(path)->getName(); } std::vector sorted_skip_paths; diff --git a/src/DataTypes/DataTypeObjectDeprecated.cpp b/src/DataTypes/DataTypeObjectDeprecated.cpp index 8b2b5e72f6c..e3c1a458eda 100644 --- a/src/DataTypes/DataTypeObjectDeprecated.cpp +++ b/src/DataTypes/DataTypeObjectDeprecated.cpp @@ -8,6 +8,7 @@ #include #include +#include namespace DB { From 949e69c0573354417e21bd83d446e1ea085db04d Mon Sep 17 00:00:00 2001 From: Blargian Date: Tue, 16 Jul 2024 21:58:48 +0200 Subject: [PATCH 0445/2361] add documentation for getSubcolumn and getTypeSerializationStreams --- .../functions/other-functions.md | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 260457b3be1..40f1b82562d 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -4055,3 +4055,94 @@ Result: │ 32 │ └─────────────────────────────┘ ``` + +## getSubcolumn + +Takes a table expression or identifier and constant string with the name of the sub-column, and returns the requested sub-column extracted from the expression. + +**Syntax** + +```sql +getSubcolumn(col_name, subcol_name) +``` + +**Arguments** + +- `col_name` — Table expression or identifier. [Expression](../syntax.md/#expressions), [Identifier](../syntax.md/#identifiers). +- `subcol_name` — The name of the sub-column. [String](../data-types/string.md). + +**Returned value** + +- Returns the extracted sub-colum. + +**Example** + +Query: + +```sql +CREATE TABLE t_arr (arr Array(Tuple(subcolumn1 UInt32, subcolumn2 String))) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]); +SELECT getSubcolumn(arr, 'subcolumn1'), getSubcolumn(arr, 'subcolumn2') FROM t_arr; +``` + +Result: + +```response + ┌─getSubcolumn(arr, 'subcolumn1')─┬─getSubcolumn(arr, 'subcolumn2')─┠+1. │ [1,2] │ ['Hello','World'] │ +2. │ [3,4,5] │ ['This','is','subcolumn'] │ + └─────────────────────────────────┴─────────────────────────────────┘ +``` + +## getTypeSerializationStreams + +Enumerates stream paths of a data type. + +:::note +This function is intended for use by developers. +::: + +**Syntax** + +```sql +getTypeSerializationStreams(col) +``` + +**Arguments** + +- `col` — Column or string representation of a data-type from which the data type will be detected. + +**Returned value** + +- Returns an array with all the serialization sub-stream paths.[Array](../data-types/array.md)([String](../data-types/string.md)). + +**Examples** + +Query: + +```sql +SELECT getTypeSerializationStreams(tuple('a', 1, 'b', 2)); +``` + +Result: + +```response + ┌─getTypeSerializationStreams(('a', 1, 'b', 2))─────────────────────────────────────────────────────────────────────────┠+1. │ ['{TupleElement(1), Regular}','{TupleElement(2), Regular}','{TupleElement(3), Regular}','{TupleElement(4), Regular}'] │ + └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +Query: + +```sql +SELECT getTypeSerializationStreams('Map(String, Int64)'); +``` + +Result: + +```response + ┌─getTypeSerializationStreams('Map(String, Int64)')────────────────────────────────────────────────────────────────┠+1. │ ['{ArraySizes}','{ArrayElements, TupleElement(keys), Regular}','{ArrayElements, TupleElement(values), Regular}'] │ + └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + From b6a790124cd670749b4c504f58a4854307bf7d83 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 16 Jul 2024 20:16:47 +0000 Subject: [PATCH 0446/2361] Handling parallel replicas protocol with priority for async communication --- src/Processors/IProcessor.h | 2 + src/Processors/Sources/RemoteSource.cpp | 23 +++++++++++ src/Processors/Sources/RemoteSource.h | 3 ++ src/QueryPipeline/RemoteQueryExecutor.cpp | 38 +++++++++++++++++-- src/QueryPipeline/RemoteQueryExecutor.h | 4 +- .../RemoteQueryExecutorReadContext.h | 2 + 6 files changed, 67 insertions(+), 5 deletions(-) diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 02f7b6b3d12..358983a2179 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -221,6 +221,8 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method 'schedule' is not implemented for {} processor", getName()); } + virtual void asyncJobReady() {} + /** You must call this method if 'prepare' returned ExpandPipeline. * This method cannot access any port, but it can create new ports for current processor. * diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 3d7dd3f76b8..f1d47f69782 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -89,6 +89,12 @@ ISource::Status RemoteSource::prepare() void RemoteSource::work() { + if (async_immediate_work.exchange(false)) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "async_immediate_work was true"); + return; + } + /// Connection drain is a heavy operation that may take a long time. /// Therefore we move connection drain from prepare() to work(), and drain multiple connections in parallel. /// See issue: https://github.com/ClickHouse/ClickHouse/issues/60844 @@ -101,6 +107,23 @@ void RemoteSource::work() ISource::work(); } +void RemoteSource::asyncJobReady() +{ + chassert(async_read); + + if (!was_query_sent) + return; + + auto res = query_executor->readAsync(/*probe=*/true); + if (res.type == RemoteQueryExecutor::ReadResult::Type::ParallelReplicasToken) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "async_immediate_work is {}", async_immediate_work); + work(); + async_immediate_work = true; + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "async_immediate_work is true"); + } +} + std::optional RemoteSource::tryGenerate() { /// onCancel() will do the cancel if the query was sent. diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index 052567bc261..fa04985f101 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -32,6 +32,8 @@ public: int schedule() override { return fd; } + void asyncJobReady() override; + void setStorageLimits(const std::shared_ptr & storage_limits_) override; protected: @@ -52,6 +54,7 @@ private: int fd = -1; size_t rows = 0; bool manually_add_rows_before_limit_counter = false; + std::atomic_bool async_immediate_work{false}; }; /// Totals source from RemoteQueryExecutor. diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index b08f2002f64..3ca05b53417 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -469,7 +469,7 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::read() return restartQueryWithoutDuplicatedUUIDs(); } -RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync() +RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync(bool check_packet_type_only) { #if defined(OS_LINUX) if (!read_context || (resent_query && recreate_read_context)) @@ -486,7 +486,21 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync() { std::lock_guard lock(was_cancelled_mutex); if (was_cancelled) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "was_cancelled"); return ReadResult(Block()); + } + + if (has_postponed_packet) + { + has_postponed_packet = false; + auto read_result = processPacket(read_context->getPacket()); + if (read_result.getType() == ReadResult::Type::Data || read_result.getType() == ReadResult::Type::ParallelReplicasToken) + return read_result; + + if (got_duplicated_part_uuids) + break; + } read_context->resume(); @@ -506,12 +520,28 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync() /// Check if packet is not ready yet. if (read_context->isInProgress()) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "read_context still in progress"); return ReadResult(read_context->getFileDescriptor()); + } - auto anything = processPacket(read_context->getPacket()); + const auto packet_type = read_context->getPacketType(); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Packet type: {}", packet_type); - if (anything.getType() == ReadResult::Type::Data || anything.getType() == ReadResult::Type::ParallelReplicasToken) - return anything; + if (check_packet_type_only) + { + has_postponed_packet = true; + if (packet_type == Protocol::Server::MergeTreeReadTaskRequest + || packet_type == Protocol::Server::MergeTreeAllRangesAnnouncement) + { + return ReadResult(ReadResult::Type::ParallelReplicasToken); + } + return ReadResult(ReadResult::Type::Nothing); + } + + auto read_result = processPacket(read_context->getPacket()); + if (read_result.getType() == ReadResult::Type::Data || read_result.getType() == ReadResult::Type::ParallelReplicasToken) + return read_result; if (got_duplicated_part_uuids) break; diff --git a/src/QueryPipeline/RemoteQueryExecutor.h b/src/QueryPipeline/RemoteQueryExecutor.h index 04a59cc3b7e..6849c3e0a07 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.h +++ b/src/QueryPipeline/RemoteQueryExecutor.h @@ -183,7 +183,7 @@ public: ReadResult read(); /// Async variant of read. Returns ready block or file descriptor which may be used for polling. - ReadResult readAsync(); + ReadResult readAsync(bool check_packet_type_only = false); /// Receive all remain packets and finish query. /// It should be cancelled after read returned empty block. @@ -303,6 +303,8 @@ private: */ bool got_duplicated_part_uuids = false; + bool has_postponed_packet = false; + /// Parts uuids, collected from remote replicas std::vector duplicated_part_uuids; diff --git a/src/QueryPipeline/RemoteQueryExecutorReadContext.h b/src/QueryPipeline/RemoteQueryExecutorReadContext.h index b8aa8bb9111..c054e75f6f1 100644 --- a/src/QueryPipeline/RemoteQueryExecutorReadContext.h +++ b/src/QueryPipeline/RemoteQueryExecutorReadContext.h @@ -39,6 +39,8 @@ public: Packet getPacket() { return std::move(packet); } + UInt64 getPacketType() const { return packet.type; } + private: bool checkTimeout(bool blocking = false); From 67f5ffc5920892b34b1c8b7176337c39cafe7141 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 16 Jul 2024 20:02:53 +0000 Subject: [PATCH 0447/2361] Fixups --- tests/performance/final_big_column.xml | 4 ++-- tests/performance/function_tokens.xml | 4 ++-- tests/performance/polymorphic_parts_s.xml | 8 ++++---- tests/performance/scripts/report.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/performance/final_big_column.xml b/tests/performance/final_big_column.xml index 1fd586d2d90..5225b3d7ad4 100644 --- a/tests/performance/final_big_column.xml +++ b/tests/performance/final_big_column.xml @@ -10,8 +10,8 @@ PARTITION BY toYYYYMM(d) ORDER BY key - INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(5000000) - INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(5000000) + INSERT INTO optimized_select_final SELECT toDate('2000-01-01'), 2*number, randomPrintableASCII(1000) FROM numbers(2500000) + INSERT INTO optimized_select_final SELECT toDate('2020-01-01'), 2*number+1, randomPrintableASCII(1000) FROM numbers(2500000) SELECT * FROM optimized_select_final FINAL FORMAT Null SETTINGS max_threads = 8 SELECT * FROM optimized_select_final FINAL WHERE key % 10 = 0 FORMAT Null diff --git a/tests/performance/function_tokens.xml b/tests/performance/function_tokens.xml index 1ff56323d62..bc2bc71a933 100644 --- a/tests/performance/function_tokens.xml +++ b/tests/performance/function_tokens.xml @@ -1,5 +1,5 @@ with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByChar(' ', materialize(s)) as w from numbers(1000000) - with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp(' ', materialize(s)) as w from numbers(1000000) - with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp('\s+', materialize(s)) as w from numbers(100000) + with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp(' ', materialize(s)) as w from numbers(200000) + with 'Many years later as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.' as s select splitByRegexp('\s+', materialize(s)) as w from numbers(20000) diff --git a/tests/performance/polymorphic_parts_s.xml b/tests/performance/polymorphic_parts_s.xml index b4dd87a7ae3..5fe1ffffe1d 100644 --- a/tests/performance/polymorphic_parts_s.xml +++ b/tests/performance/polymorphic_parts_s.xml @@ -24,10 +24,10 @@ 1 - - INSERT INTO hits_wide(UserID) SELECT rand() FROM numbers(100) - INSERT INTO hits_compact(UserID) SELECT rand() FROM numbers(1000) - INSERT INTO hits_buffer(UserID) SELECT rand() FROM numbers(100) + + INSERT INTO hits_wide(UserID) SELECT rand() FROM numbers(50) + INSERT INTO hits_compact(UserID) SELECT rand() FROM numbers(500) + INSERT INTO hits_buffer(UserID) SELECT rand() FROM numbers(50) DROP TABLE IF EXISTS hits_wide DROP TABLE IF EXISTS hits_compact diff --git a/tests/performance/scripts/report.py b/tests/performance/scripts/report.py index c2bc773bd54..e45d709ca2c 100755 --- a/tests/performance/scripts/report.py +++ b/tests/performance/scripts/report.py @@ -555,7 +555,7 @@ if args.report == "main": "Total client time for measured query runs, s", # 2 "Queries", # 3 "Longest query, total for measured runs, s", # 4 - "Wall clock time per query, s", # 5 + "Average query wall clock time, s", # 5 "Shortest query, total for measured runs, s", # 6 "", # Runs #7 ] From a7310e51939ad6053d6ab94b07f0171457e5d779 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Jul 2024 19:32:27 +0200 Subject: [PATCH 0448/2361] Ignore async_load_databases for ATTACH query It is quite odd that when ATTACH finishes the tables may not be exists, due to async_load_databases. For server startup it makes total sense, but not for queries. Plus, you can execute queries in parallel if you want to make it faster. Note, that server startup does not uses this code, see loadMetadata.cpp. Signed-off-by: Azat Khuzhin --- src/Interpreters/InterpreterCreateQuery.cpp | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 84d7f0a587c..1d8d885b216 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -362,18 +362,10 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) TablesLoader loader{getContext()->getGlobalContext(), {{database_name, database}}, mode}; auto load_tasks = loader.loadTablesAsync(); auto startup_tasks = loader.startupTablesAsync(); - if (getContext()->getGlobalContext()->getServerSettings().async_load_databases) - { - scheduleLoad(load_tasks); - scheduleLoad(startup_tasks); - } - else - { - /// First prioritize, schedule and wait all the load table tasks - waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), load_tasks); - /// Only then prioritize, schedule and wait all the startup tasks - waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks); - } + /// First prioritize, schedule and wait all the load table tasks + waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), load_tasks); + /// Only then prioritize, schedule and wait all the startup tasks + waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks); } } catch (...) From f30d35ae2926948f1e6a268917113e757df4e2df Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 8 Jul 2024 19:34:18 +0200 Subject: [PATCH 0449/2361] Revert "Merge pull request #65571 from ClickHouse/fix-flaky-test-4" Reverts: https://github.com/ClickHouse/ClickHouse/pull/65571 This reverts commit da9a34ea46b504881ffe5aa605c933106862ba25, reversing changes made to cbdb9833f207d4b0e35ad09cf4757f5d5b506b77. Signed-off-by: Azat Khuzhin --- .../0_stateless/01254_dict_load_after_detach_attach.reference | 2 +- .../queries/0_stateless/01254_dict_load_after_detach_attach.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01254_dict_load_after_detach_attach.reference b/tests/queries/0_stateless/01254_dict_load_after_detach_attach.reference index 9c2c59f6379..2f2d638a294 100644 --- a/tests/queries/0_stateless/01254_dict_load_after_detach_attach.reference +++ b/tests/queries/0_stateless/01254_dict_load_after_detach_attach.reference @@ -1,4 +1,4 @@ -NOT_LOADED +0 NOT_LOADED 0 LOADED 10 1 LOADED diff --git a/tests/queries/0_stateless/01254_dict_load_after_detach_attach.sql b/tests/queries/0_stateless/01254_dict_load_after_detach_attach.sql index 11473c6ce32..ef9e940df8b 100644 --- a/tests/queries/0_stateless/01254_dict_load_after_detach_attach.sql +++ b/tests/queries/0_stateless/01254_dict_load_after_detach_attach.sql @@ -12,7 +12,7 @@ LAYOUT(FLAT()); DETACH DATABASE {CLICKHOUSE_DATABASE:Identifier}; ATTACH DATABASE {CLICKHOUSE_DATABASE:Identifier}; -SELECT COALESCE((SELECT status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict')::Nullable(String), 'NOT_LOADED'); +SELECT query_count, status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; SYSTEM RELOAD DICTIONARY dict; SELECT query_count, status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; SELECT dictGetUInt64('dict', 'val', toUInt64(0)); From b05e02625844930892d7ceaa77f2655765c51b88 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Wed, 17 Jul 2024 09:58:57 +0800 Subject: [PATCH 0450/2361] Fix logical error --- src/Storages/Statistics/Statistics.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 5c0e5f178e1..588e20e801f 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -37,6 +37,14 @@ std::optional StatisticsUtils::tryConvertToFloat64(const Field & field) return field.get(); case Field::Types::Float64: return field.get(); + case Field::Types::Int128: + return field.get(); + case Field::Types::UInt128: + return field.get(); + case Field::Types::Int256: + return field.get(); + case Field::Types::UInt256: + return field.get(); default: return {}; } From 122673592b21a2a0e60d1cedc9f9337b471ebcb8 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 17 Jul 2024 02:37:48 +0000 Subject: [PATCH 0451/2361] add rebuild for wide part --- src/Storages/MergeTree/MutateTask.cpp | 5 ++++ src/Storages/StorageInMemoryMetadata.cpp | 12 +++++++-- ...61_lightweight_delete_projection.reference | 1 + .../03161_lightweight_delete_projection.sql | 25 ++++++++++--------- 4 files changed, 29 insertions(+), 14 deletions(-) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 489c8863a8a..fe14c5a4f05 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -2238,7 +2238,12 @@ bool MutateTask::prepare() && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::REBUILD) { for (const auto & projection : ctx->metadata_snapshot->getProjections()) + { + if (!ctx->source_part->hasProjection(projection.name)) + continue; + ctx->materialized_projections.insert(projection.name); + } } } diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 2226de3e64f..4a655cac566 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -16,6 +16,7 @@ #include #include #include +#include namespace DB @@ -334,10 +335,17 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies( NameSet required_ttl_columns; NameSet updated_ttl_columns; - auto add_dependent_columns = [&updated_columns](const Names & required_columns, auto & to_set) + auto add_dependent_columns = [&updated_columns](const Names & required_columns, auto & to_set, bool is_projection = false) { for (const auto & dependency : required_columns) { + /// useful in the case of lightweight delete with wide part and option of rebuild projection + if (is_projection && updated_columns.contains(RowExistsColumn::name)) + { + to_set.insert(required_columns.begin(), required_columns.end()); + return true; + } + if (updated_columns.contains(dependency)) { to_set.insert(required_columns.begin(), required_columns.end()); @@ -357,7 +365,7 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies( for (const auto & projection : getProjections()) { if (has_dependency(projection.name, ColumnDependency::PROJECTION)) - add_dependent_columns(projection.getRequiredColumns(), projections_columns); + add_dependent_columns(projection.getRequiredColumns(), projections_columns, true); } auto add_for_rows_ttl = [&](const auto & expression, auto & to_set) diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index bc7e1faecff..3401eaf6162 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -1,5 +1,6 @@ 8888 Alice 50 p1 p2 +8888 Alice 50 p1 p2 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index b63341f5371..2c60d83d74d 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -44,7 +44,7 @@ SELECT * FROM users ORDER BY uid; SYSTEM FLUSH LOGS; --- expecting projection p1, p2 in 2 parts +-- expecting projection p1, p2 SELECT name FROM system.projection_parts @@ -85,21 +85,22 @@ SELECT FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); --- -- testing rebuild mode --- INSERT INTO users VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +-- testing rebuild mode +INSERT INTO users VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); --- ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; --- DELETE FROM users WHERE uid = 6666; +DELETE FROM users WHERE uid = 6666; --- SELECT * FROM users ORDER BY uid; +SELECT * FROM users ORDER BY uid; --- SYSTEM FLUSH LOGS; +SYSTEM FLUSH LOGS; + +-- expecting projection p1, p2 +SELECT + name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); --- -- expecting projection p1, p2 in 2 parts --- SELECT --- name --- FROM system.projection_parts --- WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); DROP TABLE users; \ No newline at end of file From 4a69bd78819c1bb97988fce96d17be99bbffe00e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 07:37:45 +0200 Subject: [PATCH 0452/2361] Fix terrible test @arthurpassos --- ...tiple_batches_array_inconsistent_offsets.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/02874_parquet_multiple_batches_array_inconsistent_offsets.sh b/tests/queries/0_stateless/02874_parquet_multiple_batches_array_inconsistent_offsets.sh index 83196458a84..c96531ffea9 100755 --- a/tests/queries/0_stateless/02874_parquet_multiple_batches_array_inconsistent_offsets.sh +++ b/tests/queries/0_stateless/02874_parquet_multiple_batches_array_inconsistent_offsets.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-ubsan, no-fasttest +# Tags: long, no-ubsan, no-fasttest, no-parallel, no-asan, no-msan, no-tsan +# This test requires around 10 GB of memory and it is just too much. CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -121,9 +122,12 @@ echo "Parquet" #} DATA_FILE=$CUR_DIR/data_parquet/string_int_list_inconsistent_offset_multiple_batches.parquet -${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (ints Array(Int64), strings Nullable(String)) ENGINE = Memory" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet" -${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load" -${CLICKHOUSE_CLIENT} --query="drop table parquet_load" \ No newline at end of file + +${CLICKHOUSE_LOCAL} --multiquery " +DROP TABLE IF EXISTS parquet_load; +CREATE TABLE parquet_load (ints Array(Int64), strings Nullable(String)) ENGINE = Memory; +INSERT INTO parquet_load FROM INFILE '$DATA_FILE'; +SELECT sum(cityHash64(*)) FROM parquet_load; +SELECT count() FROM parquet_load; +DROP TABLE parquet_load; +" From df636100d3643031ee8d2102b737e992a6e44a40 Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Mon, 8 Jul 2024 12:00:53 +0200 Subject: [PATCH 0453/2361] add entry in documentation for use_same_password_for_base_backup --- docs/en/operations/backup.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index fc861e25e9f..248fdbc156f 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -85,6 +85,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des - `password` for the file on disk - `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')` - `use_same_s3_credentials_for_base_backup`: whether base backup to S3 should inherit credentials from the query. Only works with `S3`. + - `use_same_password_for_base_backup`: whether base backup archive should inherit the password from the query. - `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables - `storage_policy`: storage policy for the tables being restored. See [Using Multiple Block Devices for Data Storage](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). This setting is only applicable to the `RESTORE` command. The specified storage policy applies only to tables with an engine from the `MergeTree` family. - `s3_storage_class`: the storage class used for S3 backup. For example, `STANDARD` From d8c68a27744dca3f68e4de08036148c2ecfd2ed6 Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Mon, 8 Jul 2024 12:00:53 +0200 Subject: [PATCH 0454/2361] working incremental password protected backups --- src/Backups/BackupFactory.h | 1 + src/Backups/BackupImpl.cpp | 13 +++++++++++-- src/Backups/BackupImpl.h | 7 +++++-- src/Backups/BackupSettings.cpp | 1 + src/Backups/BackupSettings.h | 3 +++ src/Backups/BackupsWorker.cpp | 2 ++ src/Backups/RestoreSettings.cpp | 1 + src/Backups/RestoreSettings.h | 3 +++ .../registerBackupEngineAzureBlobStorage.cpp | 6 ++++-- src/Backups/registerBackupEngineS3.cpp | 6 ++++-- src/Backups/registerBackupEnginesFileAndDisk.cpp | 6 ++++-- 11 files changed, 39 insertions(+), 10 deletions(-) diff --git a/src/Backups/BackupFactory.h b/src/Backups/BackupFactory.h index e13a9a12ca2..807b8516d49 100644 --- a/src/Backups/BackupFactory.h +++ b/src/Backups/BackupFactory.h @@ -41,6 +41,7 @@ public: bool allow_s3_native_copy = true; bool allow_azure_native_copy = true; bool use_same_s3_credentials_for_base_backup = false; + bool use_same_password_for_base_backup = false; bool azure_attempt_to_create_container = true; ReadSettings read_settings; WriteSettings write_settings; diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index 3f972c36e47..23f067a62f5 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -92,7 +92,8 @@ BackupImpl::BackupImpl( std::shared_ptr reader_, const ContextPtr & context_, bool is_internal_backup_, - bool use_same_s3_credentials_for_base_backup_) + bool use_same_s3_credentials_for_base_backup_, + bool use_same_password_for_base_backup_) : backup_info(backup_info_) , backup_name_for_logging(backup_info.toStringForLogging()) , use_archive(!archive_params_.archive_name.empty()) @@ -104,6 +105,7 @@ BackupImpl::BackupImpl( , version(INITIAL_BACKUP_VERSION) , base_backup_info(base_backup_info_) , use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_) + , use_same_password_for_base_backup(use_same_password_for_base_backup_) , log(getLogger("BackupImpl")) { open(); @@ -120,7 +122,8 @@ BackupImpl::BackupImpl( const std::shared_ptr & coordination_, const std::optional & backup_uuid_, bool deduplicate_files_, - bool use_same_s3_credentials_for_base_backup_) + bool use_same_s3_credentials_for_base_backup_, + bool use_same_password_for_base_backup_) : backup_info(backup_info_) , backup_name_for_logging(backup_info.toStringForLogging()) , use_archive(!archive_params_.archive_name.empty()) @@ -135,6 +138,7 @@ BackupImpl::BackupImpl( , base_backup_info(base_backup_info_) , deduplicate_files(deduplicate_files_) , use_same_s3_credentials_for_base_backup(use_same_s3_credentials_for_base_backup_) + , use_same_password_for_base_backup(use_same_password_for_base_backup_) , log(getLogger("BackupImpl")) { open(); @@ -258,6 +262,11 @@ std::shared_ptr BackupImpl::getBaseBackupUnlocked() const params.is_internal_backup = is_internal_backup; /// use_same_s3_credentials_for_base_backup should be inherited for base backups params.use_same_s3_credentials_for_base_backup = use_same_s3_credentials_for_base_backup; + /// use_same_password_for_base_backup should be inherited for base backups + params.use_same_password_for_base_backup = use_same_password_for_base_backup; + + if (params.use_same_password_for_base_backup) + params.password = archive_params.password; base_backup = BackupFactory::instance().createBackup(params); diff --git a/src/Backups/BackupImpl.h b/src/Backups/BackupImpl.h index 2b27e2ab090..d7846104c4c 100644 --- a/src/Backups/BackupImpl.h +++ b/src/Backups/BackupImpl.h @@ -41,7 +41,8 @@ public: std::shared_ptr reader_, const ContextPtr & context_, bool is_internal_backup_, - bool use_same_s3_credentials_for_base_backup_); + bool use_same_s3_credentials_for_base_backup_, + bool use_same_password_for_base_backup_); BackupImpl( const BackupInfo & backup_info_, @@ -53,7 +54,8 @@ public: const std::shared_ptr & coordination_, const std::optional & backup_uuid_, bool deduplicate_files_, - bool use_same_s3_credentials_for_base_backup_); + bool use_same_s3_credentials_for_base_backup_, + bool use_same_password_for_base_backup_); ~BackupImpl() override; @@ -153,6 +155,7 @@ private: bool writing_finalized = false; bool deduplicate_files = true; bool use_same_s3_credentials_for_base_backup = false; + bool use_same_password_for_base_backup = false; const LoggerPtr log; }; diff --git a/src/Backups/BackupSettings.cpp b/src/Backups/BackupSettings.cpp index e33880f88e3..37ddd344001 100644 --- a/src/Backups/BackupSettings.cpp +++ b/src/Backups/BackupSettings.cpp @@ -29,6 +29,7 @@ namespace ErrorCodes M(Bool, allow_s3_native_copy) \ M(Bool, allow_azure_native_copy) \ M(Bool, use_same_s3_credentials_for_base_backup) \ + M(Bool, use_same_password_for_base_backup) \ M(Bool, azure_attempt_to_create_container) \ M(Bool, read_from_filesystem_cache) \ M(UInt64, shard_num) \ diff --git a/src/Backups/BackupSettings.h b/src/Backups/BackupSettings.h index a6c4d5d7181..d8f48f6e1ac 100644 --- a/src/Backups/BackupSettings.h +++ b/src/Backups/BackupSettings.h @@ -50,6 +50,9 @@ struct BackupSettings /// Whether base backup to S3 should inherit credentials from the BACKUP query. bool use_same_s3_credentials_for_base_backup = false; + /// Wheter base backup archive should be unlocked using the same password as the incremental archive + bool use_same_password_for_base_backup = false; + /// Whether a new Azure container should be created if it does not exist (requires permissions at storage account level) bool azure_attempt_to_create_container = true; diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 15a7d7c1eca..0614fb2da01 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -601,6 +601,7 @@ void BackupsWorker::doBackup( backup_create_params.allow_s3_native_copy = backup_settings.allow_s3_native_copy; backup_create_params.allow_azure_native_copy = backup_settings.allow_azure_native_copy; backup_create_params.use_same_s3_credentials_for_base_backup = backup_settings.use_same_s3_credentials_for_base_backup; + backup_create_params.use_same_password_for_base_backup = backup_settings.use_same_password_for_base_backup; backup_create_params.azure_attempt_to_create_container = backup_settings.azure_attempt_to_create_container; backup_create_params.read_settings = getReadSettingsForBackup(context, backup_settings); backup_create_params.write_settings = getWriteSettingsForBackup(context); @@ -912,6 +913,7 @@ void BackupsWorker::doRestore( backup_open_params.password = restore_settings.password; backup_open_params.allow_s3_native_copy = restore_settings.allow_s3_native_copy; backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup; + backup_open_params.use_same_password_for_base_backup = restore_settings.use_same_password_for_base_backup; backup_open_params.read_settings = getReadSettingsForRestore(context); backup_open_params.write_settings = getWriteSettingsForRestore(context); backup_open_params.is_internal_backup = restore_settings.internal; diff --git a/src/Backups/RestoreSettings.cpp b/src/Backups/RestoreSettings.cpp index 7bbfd9ed751..a974fc11d00 100644 --- a/src/Backups/RestoreSettings.cpp +++ b/src/Backups/RestoreSettings.cpp @@ -164,6 +164,7 @@ namespace M(RestoreUDFCreationMode, create_function) \ M(Bool, allow_s3_native_copy) \ M(Bool, use_same_s3_credentials_for_base_backup) \ + M(Bool, use_same_password_for_base_backup) \ M(Bool, restore_broken_parts_as_detached) \ M(Bool, internal) \ M(String, host_id) \ diff --git a/src/Backups/RestoreSettings.h b/src/Backups/RestoreSettings.h index 06ecbc80aef..0fe5ee1a4bf 100644 --- a/src/Backups/RestoreSettings.h +++ b/src/Backups/RestoreSettings.h @@ -113,6 +113,9 @@ struct RestoreSettings /// Whether base backup from S3 should inherit credentials from the RESTORE query. bool use_same_s3_credentials_for_base_backup = false; + /// Wheter base backup archive should be unlocked using the same password as the incremental archive + bool use_same_password_for_base_backup = false; + /// If it's true RESTORE won't stop on broken parts while restoring, instead they will be restored as detached parts /// to the `detached` folder with names starting with `broken-from-backup'. bool restore_broken_parts_as_detached = false; diff --git a/src/Backups/registerBackupEngineAzureBlobStorage.cpp b/src/Backups/registerBackupEngineAzureBlobStorage.cpp index 626df99b00c..45f0386375a 100644 --- a/src/Backups/registerBackupEngineAzureBlobStorage.cpp +++ b/src/Backups/registerBackupEngineAzureBlobStorage.cpp @@ -141,7 +141,8 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory) reader, params.context, params.is_internal_backup, - /* use_same_s3_credentials_for_base_backup*/ false); + /* use_same_s3_credentials_for_base_backup*/ false, + params.use_same_password_for_base_backup); } else { @@ -164,7 +165,8 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory) params.backup_coordination, params.backup_uuid, params.deduplicate_files, - /* use_same_s3_credentials_for_base_backup */ false); + /* use_same_s3_credentials_for_base_backup */ false, + params.use_same_password_for_base_backup); } #else throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "AzureBlobStorage support is disabled"); diff --git a/src/Backups/registerBackupEngineS3.cpp b/src/Backups/registerBackupEngineS3.cpp index 59ed9506af0..79e3e945557 100644 --- a/src/Backups/registerBackupEngineS3.cpp +++ b/src/Backups/registerBackupEngineS3.cpp @@ -120,7 +120,8 @@ void registerBackupEngineS3(BackupFactory & factory) reader, params.context, params.is_internal_backup, - params.use_same_s3_credentials_for_base_backup); + params.use_same_s3_credentials_for_base_backup, + params.use_same_password_for_base_backup); } else { @@ -144,7 +145,8 @@ void registerBackupEngineS3(BackupFactory & factory) params.backup_coordination, params.backup_uuid, params.deduplicate_files, - params.use_same_s3_credentials_for_base_backup); + params.use_same_s3_credentials_for_base_backup, + params.use_same_password_for_base_backup); } #else throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "S3 support is disabled"); diff --git a/src/Backups/registerBackupEnginesFileAndDisk.cpp b/src/Backups/registerBackupEnginesFileAndDisk.cpp index 35263d39cba..c486f79a77a 100644 --- a/src/Backups/registerBackupEnginesFileAndDisk.cpp +++ b/src/Backups/registerBackupEnginesFileAndDisk.cpp @@ -178,7 +178,8 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory) reader, params.context, params.is_internal_backup, - params.use_same_s3_credentials_for_base_backup); + params.use_same_s3_credentials_for_base_backup, + params.use_same_password_for_base_backup); } else { @@ -197,7 +198,8 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory) params.backup_coordination, params.backup_uuid, params.deduplicate_files, - params.use_same_s3_credentials_for_base_backup); + params.use_same_s3_credentials_for_base_backup, + params.use_same_password_for_base_backup); } }; From 74de7833b8d429edcac9c4735143fc92349abdaf Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Mon, 8 Jul 2024 12:34:53 +0200 Subject: [PATCH 0455/2361] fix typo --- src/Backups/BackupSettings.h | 2 +- src/Backups/RestoreSettings.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Backups/BackupSettings.h b/src/Backups/BackupSettings.h index d8f48f6e1ac..0abeb897db4 100644 --- a/src/Backups/BackupSettings.h +++ b/src/Backups/BackupSettings.h @@ -50,7 +50,7 @@ struct BackupSettings /// Whether base backup to S3 should inherit credentials from the BACKUP query. bool use_same_s3_credentials_for_base_backup = false; - /// Wheter base backup archive should be unlocked using the same password as the incremental archive + /// Whether base backup archive should be unlocked using the same password as the incremental archive bool use_same_password_for_base_backup = false; /// Whether a new Azure container should be created if it does not exist (requires permissions at storage account level) diff --git a/src/Backups/RestoreSettings.h b/src/Backups/RestoreSettings.h index 0fe5ee1a4bf..fe07a0a7208 100644 --- a/src/Backups/RestoreSettings.h +++ b/src/Backups/RestoreSettings.h @@ -113,7 +113,7 @@ struct RestoreSettings /// Whether base backup from S3 should inherit credentials from the RESTORE query. bool use_same_s3_credentials_for_base_backup = false; - /// Wheter base backup archive should be unlocked using the same password as the incremental archive + /// Whether base backup archive should be unlocked using the same password as the incremental archive bool use_same_password_for_base_backup = false; /// If it's true RESTORE won't stop on broken parts while restoring, instead they will be restored as detached parts From 12fb08648670dcf39659596ccb552f3462004fd8 Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Mon, 8 Jul 2024 16:25:07 +0200 Subject: [PATCH 0456/2361] added test for use_same_password_for_base_backup --- ...se_same_password_for_base_backup.reference | 21 ++++++++ ...ackup_use_same_password_for_base_backup.sh | 50 +++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference create mode 100755 tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference new file mode 100644 index 00000000000..7354d50a7c0 --- /dev/null +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference @@ -0,0 +1,21 @@ +use_same_password_for_base_backup +base +BACKUP_CREATED +add_more_data_1 +inc_1 +BACKUP_CREATED +add_more_data_2 +inc_2 +BACKUP_CREATED +inc_2_bad +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +restore_inc_1 +RESTORED +restore_inc_2 +RESTORED +restore_inc_2_bad +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +count_inc_1 +20 +count_inc_2 +30 diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh new file mode 100755 index 00000000000..a2b1a953e24 --- /dev/null +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -nm -q " + DROP TABLE IF EXISTS data; + DROP TABLE IF EXISTS data_1; + DROP TABLE IF EXISTS data_2; + CREATE TABLE data (key Int) ENGINE=MergeTree() ORDER BY tuple(); + INSERT INTO data SELECT * from numbers(10); +" + +echo 'use_same_password_for_base_backup' +echo "base" +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_base.zip') SETTINGS password='password';" | cut -f2 + +echo 'add_more_data_1' +$CLICKHOUSE_CLIENT -q "INSERT INTO data SELECT * FROM numbers(10,10);" + +echo "inc_1" +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_base.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 + +echo 'add_more_data_2' +$CLICKHOUSE_CLIENT -q "INSERT INTO data SELECT * FROM numbers(20,10);" + +echo "inc_2" +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 + +echo "inc_2_bad" +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" + +echo "restore_inc_1" +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_1 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 + +echo "restore_inc_2" +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 + +echo "restore_inc_2_bad" +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" + +echo "count_inc_1" +$CLICKHOUSE_CLIENT -q "SELECT COUNT(*) FROM data_1" | cut -f2 + +echo "count_inc_2" +$CLICKHOUSE_CLIENT -q "SELECT COUNT(*) FROM data_2" | cut -f2 + +exit 0 From 6f25aacd71948b682630889e75774eb4f2469aaa Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Tue, 9 Jul 2024 16:36:59 +0200 Subject: [PATCH 0457/2361] use CLICKHOUSE_TEST_NAME instead of CLICKHOUSE_TEST_UNIQUE_NAME for backup filename --- ...kup_use_same_password_for_base_backup.reference | 4 ++-- ...843_backup_use_same_password_for_base_backup.sh | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference index 7354d50a7c0..1a331cca46b 100644 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference @@ -8,13 +8,13 @@ add_more_data_2 inc_2 BACKUP_CREATED inc_2_bad -Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) restore_inc_1 RESTORED restore_inc_2 RESTORED restore_inc_2_bad -Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) count_inc_1 20 count_inc_2 diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh index a2b1a953e24..4c5bec3775c 100755 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh @@ -15,31 +15,31 @@ $CLICKHOUSE_CLIENT -nm -q " echo 'use_same_password_for_base_backup' echo "base" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_base.zip') SETTINGS password='password';" | cut -f2 +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_NAME}_base.zip') SETTINGS password='password';" | cut -f2 echo 'add_more_data_1' $CLICKHOUSE_CLIENT -q "INSERT INTO data SELECT * FROM numbers(10,10);" echo "inc_1" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_base.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_1.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_NAME}_base.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 echo 'add_more_data_2' $CLICKHOUSE_CLIENT -q "INSERT INTO data SELECT * FROM numbers(20,10);" echo "inc_2" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_2.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_1.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 echo "inc_2_bad" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" echo "restore_inc_1" -$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_1 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_1 FROM Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_1.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 echo "restore_inc_2" -$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_2.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 echo "restore_inc_2_bad" -$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" echo "count_inc_1" $CLICKHOUSE_CLIENT -q "SELECT COUNT(*) FROM data_1" | cut -f2 From cdd955f421e442cdb776e95e8e7ae8608bc8636e Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Wed, 10 Jul 2024 09:20:39 +0200 Subject: [PATCH 0458/2361] Revert "use CLICKHOUSE_TEST_NAME instead of CLICKHOUSE_TEST_UNIQUE_NAME for backup filename" This reverts commit 72f6368a2ede6a01390db770f1b9ddfa00d3f1fe. --- ...kup_use_same_password_for_base_backup.reference | 4 ++-- ...843_backup_use_same_password_for_base_backup.sh | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference index 1a331cca46b..7354d50a7c0 100644 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference @@ -8,13 +8,13 @@ add_more_data_2 inc_2 BACKUP_CREATED inc_2_bad -Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) restore_inc_1 RESTORED restore_inc_2 RESTORED restore_inc_2_bad -Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) count_inc_1 20 count_inc_2 diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh index 4c5bec3775c..a2b1a953e24 100755 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh @@ -15,31 +15,31 @@ $CLICKHOUSE_CLIENT -nm -q " echo 'use_same_password_for_base_backup' echo "base" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_NAME}_base.zip') SETTINGS password='password';" | cut -f2 +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_base.zip') SETTINGS password='password';" | cut -f2 echo 'add_more_data_1' $CLICKHOUSE_CLIENT -q "INSERT INTO data SELECT * FROM numbers(10,10);" echo "inc_1" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_1.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_NAME}_base.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_base.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 echo 'add_more_data_2' $CLICKHOUSE_CLIENT -q "INSERT INTO data SELECT * FROM numbers(20,10);" echo "inc_2" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_2.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_1.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 echo "inc_2_bad" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" echo "restore_inc_1" -$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_1 FROM Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_1.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_1 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 echo "restore_inc_2" -$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_2.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 echo "restore_inc_2_bad" -$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" echo "count_inc_1" $CLICKHOUSE_CLIENT -q "SELECT COUNT(*) FROM data_1" | cut -f2 From ba1c6fe3ef3a7f263939b2ae9e6c249d9c6b38c0 Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Wed, 10 Jul 2024 09:23:19 +0200 Subject: [PATCH 0459/2361] shorter grep output --- .../02843_backup_use_same_password_for_base_backup.reference | 4 ++-- .../02843_backup_use_same_password_for_base_backup.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference index 7354d50a7c0..cbcb6b4cb7c 100644 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference @@ -8,13 +8,13 @@ add_more_data_2 inc_2 BACKUP_CREATED inc_2_bad -Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) restore_inc_1 RESTORED restore_inc_2 RESTORED restore_inc_2_bad -Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) count_inc_1 20 count_inc_2 diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh index a2b1a953e24..f2f1265c1a0 100755 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh @@ -30,7 +30,7 @@ echo "inc_2" $CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 echo "inc_2_bad" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" echo "restore_inc_1" $CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_1 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 @@ -39,7 +39,7 @@ echo "restore_inc_2" $CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 echo "restore_inc_2_bad" -$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" echo "count_inc_1" $CLICKHOUSE_CLIENT -q "SELECT COUNT(*) FROM data_1" | cut -f2 From 01c8faf190462d508d9cb1b7d342d6ccfbeeff88 Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Wed, 10 Jul 2024 11:15:33 +0200 Subject: [PATCH 0460/2361] Revert "shorter grep output" This reverts commit b0cbf1495dd0bcaba828706e895347e4ec550e29. --- .../02843_backup_use_same_password_for_base_backup.reference | 4 ++-- .../02843_backup_use_same_password_for_base_backup.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference index cbcb6b4cb7c..7354d50a7c0 100644 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference @@ -8,13 +8,13 @@ add_more_data_2 inc_2 BACKUP_CREATED inc_2_bad -_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) restore_inc_1 RESTORED restore_inc_2 RESTORED restore_inc_2_bad -_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) count_inc_1 20 count_inc_2 diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh index f2f1265c1a0..a2b1a953e24 100755 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.sh @@ -30,7 +30,7 @@ echo "inc_2" $CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password',use_same_password_for_base_backup=1" | cut -f2 echo "inc_2_bad" -$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" +$CLICKHOUSE_CLIENT -q "BACKUP TABLE data TO Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2_bad.zip') SETTINGS base_backup=Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip'),password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" echo "restore_inc_1" $CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_1 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 @@ -39,7 +39,7 @@ echo "restore_inc_2" $CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password',use_same_password_for_base_backup=1" | cut -f2 echo "restore_inc_2_bad" -$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" +$CLICKHOUSE_CLIENT -q "RESTORE TABLE data AS data_2 FROM Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_2.zip') SETTINGS password='password'" |& grep -m1 -o "Couldn't unpack zip archive '${CLICKHOUSE_TEST_UNIQUE_NAME}_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE)" echo "count_inc_1" $CLICKHOUSE_CLIENT -q "SELECT COUNT(*) FROM data_1" | cut -f2 From 9dae370569054b4933b374c12dee6461c938fea8 Mon Sep 17 00:00:00 2001 From: Samuele Guerrini Date: Wed, 10 Jul 2024 11:23:36 +0200 Subject: [PATCH 0461/2361] fix db name in backup_use_same_password_for_base_backup test reference file --- .../02843_backup_use_same_password_for_base_backup.reference | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference index 7354d50a7c0..cdcf0532cd9 100644 --- a/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference +++ b/tests/queries/0_stateless/02843_backup_use_same_password_for_base_backup.reference @@ -8,13 +8,13 @@ add_more_data_2 inc_2 BACKUP_CREATED inc_2_bad -Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_default_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) restore_inc_1 RESTORED restore_inc_2 RESTORED restore_inc_2_bad -Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_test_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) +Couldn't unpack zip archive '02843_backup_use_same_password_for_base_backup_default_inc_1.zip': Password is required. (CANNOT_UNPACK_ARCHIVE) count_inc_1 20 count_inc_2 From b8d6c68f5fdc691f18000878203971689d2f812c Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 17 Jul 2024 10:30:03 +0200 Subject: [PATCH 0462/2361] Update 02874_parquet_multiple_batches_array_inconsistent_offsets.reference --- ...arquet_multiple_batches_array_inconsistent_offsets.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02874_parquet_multiple_batches_array_inconsistent_offsets.reference b/tests/queries/0_stateless/02874_parquet_multiple_batches_array_inconsistent_offsets.reference index ba63f2f7e9c..a9eae234dba 100644 --- a/tests/queries/0_stateless/02874_parquet_multiple_batches_array_inconsistent_offsets.reference +++ b/tests/queries/0_stateless/02874_parquet_multiple_batches_array_inconsistent_offsets.reference @@ -1,3 +1,3 @@ Parquet -e76a749f346078a6a43e0cbd25f0d18a - +3249508141921544766 400 From f29700e04d3ca4d455908ed354472945597da4f5 Mon Sep 17 00:00:00 2001 From: morning-color Date: Wed, 17 Jul 2024 20:01:01 +0800 Subject: [PATCH 0463/2361] Fix flaky tests --- ...74_exact_rows_before_aggregation.reference | 26 +++++++++---------- .../03174_exact_rows_before_aggregation.sql | 4 +-- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference index 2fbdf325760..5f7fedbbcb3 100644 --- a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.reference @@ -355,15 +355,15 @@ "data": [ [0], - [4], - [3], - [2], - [5], [1], + [2], + [3], + [4], + [5], [6], [7], - [9], - [8] + [8], + [9] ], "rows": 10, @@ -384,20 +384,20 @@ "data": [ [0], - [4], - [3], - [2], - [5], [1], + [2], + [3], + [4], + [5], [6], [7], - [9], - [8] + [8], + [9] ], "rows": 10, - "rows_before_limit_at_least": 20, + "rows_before_limit_at_least": 10, "rows_before_aggregation": 20 } diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql index 17e3f3c2cef..0afc0be4370 100644 --- a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql @@ -23,9 +23,9 @@ select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i select max(i) from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 FORMAT JSONCompact; -select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i limit 10 FORMAT JSONCompact; +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i order by i limit 10 FORMAT JSONCompact; set prefer_localhost_replica = 0; -select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i limit 10 FORMAT JSONCompact; +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i order by i limit 10 FORMAT JSONCompact; drop table if exists test; From 523e0abb4ec329c0535602c43c17991f4ef043a3 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 17 Jul 2024 13:15:14 +0000 Subject: [PATCH 0464/2361] Remove debug logs --- src/Processors/Sources/RemoteSource.cpp | 7 +------ src/QueryPipeline/RemoteQueryExecutor.cpp | 10 +--------- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index f1d47f69782..e33613564a2 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -90,10 +90,7 @@ ISource::Status RemoteSource::prepare() void RemoteSource::work() { if (async_immediate_work.exchange(false)) - { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "async_immediate_work was true"); return; - } /// Connection drain is a heavy operation that may take a long time. /// Therefore we move connection drain from prepare() to work(), and drain multiple connections in parallel. @@ -114,13 +111,11 @@ void RemoteSource::asyncJobReady() if (!was_query_sent) return; - auto res = query_executor->readAsync(/*probe=*/true); + auto res = query_executor->readAsync(/*check_packet_type_only=*/true); if (res.type == RemoteQueryExecutor::ReadResult::Type::ParallelReplicasToken) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "async_immediate_work is {}", async_immediate_work); work(); async_immediate_work = true; - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "async_immediate_work is true"); } } diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 3ca05b53417..87f634b8334 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -486,10 +486,7 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync(bool check_packet { std::lock_guard lock(was_cancelled_mutex); if (was_cancelled) - { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "was_cancelled"); return ReadResult(Block()); - } if (has_postponed_packet) { @@ -520,17 +517,12 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync(bool check_packet /// Check if packet is not ready yet. if (read_context->isInProgress()) - { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "read_context still in progress"); return ReadResult(read_context->getFileDescriptor()); - } - - const auto packet_type = read_context->getPacketType(); - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Packet type: {}", packet_type); if (check_packet_type_only) { has_postponed_packet = true; + const auto packet_type = read_context->getPacketType(); if (packet_type == Protocol::Server::MergeTreeReadTaskRequest || packet_type == Protocol::Server::MergeTreeAllRangesAnnouncement) { From baade8baf45c7919f106160b8b8633d2c59e3ae8 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Jul 2024 13:22:50 +0000 Subject: [PATCH 0465/2361] Replace updateWeakHash to getWeakHash --- src/Columns/ColumnAggregateFunction.cpp | 9 +-- src/Columns/ColumnAggregateFunction.h | 2 +- src/Columns/ColumnArray.cpp | 11 ++- src/Columns/ColumnArray.h | 2 +- src/Columns/ColumnCompressed.h | 3 +- src/Columns/ColumnConst.cpp | 14 +--- src/Columns/ColumnConst.h | 2 +- src/Columns/ColumnDecimal.cpp | 9 +-- src/Columns/ColumnDecimal.h | 2 +- src/Columns/ColumnDynamic.h | 5 +- src/Columns/ColumnFixedString.cpp | 10 +-- src/Columns/ColumnFixedString.h | 2 +- src/Columns/ColumnFunction.h | 5 +- src/Columns/ColumnLowCardinality.cpp | 26 +++--- src/Columns/ColumnLowCardinality.h | 4 +- src/Columns/ColumnMap.cpp | 4 +- src/Columns/ColumnMap.h | 2 +- src/Columns/ColumnNullable.cpp | 16 ++-- src/Columns/ColumnNullable.h | 2 +- src/Columns/ColumnObject.h | 3 +- src/Columns/ColumnSparse.cpp | 21 ++--- src/Columns/ColumnSparse.h | 2 +- src/Columns/ColumnString.cpp | 9 +-- src/Columns/ColumnString.h | 2 +- src/Columns/ColumnTuple.cpp | 11 ++- src/Columns/ColumnTuple.h | 2 +- src/Columns/ColumnVariant.cpp | 27 ++----- src/Columns/ColumnVariant.h | 2 +- src/Columns/ColumnVector.cpp | 9 +-- src/Columns/ColumnVector.h | 2 +- src/Columns/IColumn.h | 4 +- src/Columns/IColumnDummy.h | 4 +- src/Columns/IColumnUnique.h | 5 +- src/Columns/tests/gtest_weak_hash_32.cpp | 81 +++++++------------ src/Common/WeakHash.cpp | 22 +++++ src/Common/WeakHash.h | 5 +- src/Interpreters/ConcurrentHashJoin.cpp | 2 +- src/Interpreters/JoinUtils.cpp | 2 +- .../ScatterByPartitionTransform.cpp | 2 +- 39 files changed, 150 insertions(+), 197 deletions(-) diff --git a/src/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp index cfd07c27765..33bd1266c90 100644 --- a/src/Columns/ColumnAggregateFunction.cpp +++ b/src/Columns/ColumnAggregateFunction.cpp @@ -366,13 +366,10 @@ void ColumnAggregateFunction::updateHashWithValue(size_t n, SipHash & hash) cons hash.update(wbuf.str().c_str(), wbuf.str().size()); } -void ColumnAggregateFunction::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnAggregateFunction::getWeakHash32() const { auto s = data.size(); - if (hash.getData().size() != data.size()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), hash.getData().size()); - + WeakHash32 hash(s); auto & hash_data = hash.getData(); std::vector v; @@ -383,6 +380,8 @@ void ColumnAggregateFunction::updateWeakHash32(WeakHash32 & hash) const wbuf.finalize(); hash_data[i] = ::updateWeakHash32(v.data(), v.size(), hash_data[i]); } + + return hash; } void ColumnAggregateFunction::updateHashFast(SipHash & hash) const diff --git a/src/Columns/ColumnAggregateFunction.h b/src/Columns/ColumnAggregateFunction.h index 1be7a862438..330a707b75c 100644 --- a/src/Columns/ColumnAggregateFunction.h +++ b/src/Columns/ColumnAggregateFunction.h @@ -177,7 +177,7 @@ public: void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 5d7350f3a79..9203fb8042f 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -271,15 +271,12 @@ void ColumnArray::updateHashWithValue(size_t n, SipHash & hash) const getData().updateHashWithValue(offset + i, hash); } -void ColumnArray::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnArray::getWeakHash32() const { auto s = offsets->size(); - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", s, hash.getData().size()); + WeakHash32 hash(s); - WeakHash32 internal_hash(data->size()); - data->updateWeakHash32(internal_hash); + WeakHash32 internal_hash = data->getWeakHash32(); Offset prev_offset = 0; const auto & offsets_data = getOffsets(); @@ -300,6 +297,8 @@ void ColumnArray::updateWeakHash32(WeakHash32 & hash) const prev_offset = offsets_data[i]; } + + return hash; } void ColumnArray::updateHashFast(SipHash & hash) const diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index 6cd3e2f6c3b..5e01b9144d7 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -82,7 +82,7 @@ public: const char * deserializeAndInsertFromArena(const char * pos) override; const char * skipSerializedInArena(const char * pos) const override; void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; #if !defined(ABORT_ON_LOGICAL_ERROR) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; diff --git a/src/Columns/ColumnCompressed.h b/src/Columns/ColumnCompressed.h index 5e455709fec..19470113394 100644 --- a/src/Columns/ColumnCompressed.h +++ b/src/Columns/ColumnCompressed.h @@ -3,6 +3,7 @@ #include #include #include +#include #include @@ -98,7 +99,7 @@ public: const char * deserializeAndInsertFromArena(const char *) override { throwMustBeDecompressed(); } const char * skipSerializedInArena(const char *) const override { throwMustBeDecompressed(); } void updateHashWithValue(size_t, SipHash &) const override { throwMustBeDecompressed(); } - void updateWeakHash32(WeakHash32 &) const override { throwMustBeDecompressed(); } + WeakHash32 getWeakHash32() const override { throwMustBeDecompressed(); } void updateHashFast(SipHash &) const override { throwMustBeDecompressed(); } ColumnPtr filter(const Filter &, ssize_t) const override { throwMustBeDecompressed(); } void expand(const Filter &, bool) override { throwMustBeDecompressed(); } diff --git a/src/Columns/ColumnConst.cpp b/src/Columns/ColumnConst.cpp index f2cea83db0e..84427e7be2b 100644 --- a/src/Columns/ColumnConst.cpp +++ b/src/Columns/ColumnConst.cpp @@ -137,18 +137,10 @@ void ColumnConst::updatePermutation(PermutationSortDirection /*direction*/, Perm { } -void ColumnConst::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnConst::getWeakHash32() const { - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); - - WeakHash32 element_hash(1); - data->updateWeakHash32(element_hash); - size_t data_hash = element_hash.getData()[0]; - - for (auto & value : hash.getData()) - value = static_cast(intHashCRC32(data_hash, value)); + WeakHash32 element_hash = data->getWeakHash32(); + return WeakHash32(s, element_hash.getData()[0]); } void ColumnConst::compareColumn( diff --git a/src/Columns/ColumnConst.h b/src/Columns/ColumnConst.h index b55a1f42037..65ce53687b9 100644 --- a/src/Columns/ColumnConst.h +++ b/src/Columns/ColumnConst.h @@ -204,7 +204,7 @@ public: data->updateHashWithValue(0, hash); } - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override { diff --git a/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp index cf413f790a7..ed9c699a841 100644 --- a/src/Columns/ColumnDecimal.cpp +++ b/src/Columns/ColumnDecimal.cpp @@ -76,13 +76,10 @@ void ColumnDecimal::updateHashWithValue(size_t n, SipHash & hash) const } template -void ColumnDecimal::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnDecimal::getWeakHash32() const { auto s = data.size(); - - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); + WeakHash32 hash(s); const T * begin = data.data(); const T * end = begin + s; @@ -94,6 +91,8 @@ void ColumnDecimal::updateWeakHash32(WeakHash32 & hash) const ++begin; ++hash_data; } + + return hash; } template diff --git a/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h index 32efeb643a6..eb8a305a822 100644 --- a/src/Columns/ColumnDecimal.h +++ b/src/Columns/ColumnDecimal.h @@ -102,7 +102,7 @@ public: const char * deserializeAndInsertFromArena(const char * pos) override; const char * skipSerializedInArena(const char * pos) const override; void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; #if !defined(ABORT_ON_LOGICAL_ERROR) int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 9abddc7a26d..6f09abb945a 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -174,9 +175,9 @@ public: void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override + WeakHash32 getWeakHash32() const override { - variant_column->updateWeakHash32(hash); + return variant_column->getWeakHash32(); } void updateHashFast(SipHash & hash) const override diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp index 1c2de203a94..4d17eb0bebd 100644 --- a/src/Columns/ColumnFixedString.cpp +++ b/src/Columns/ColumnFixedString.cpp @@ -137,14 +137,10 @@ void ColumnFixedString::updateHashWithValue(size_t index, SipHash & hash) const hash.update(reinterpret_cast(&chars[n * index]), n); } -void ColumnFixedString::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnFixedString::getWeakHash32() const { auto s = size(); - - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, " - "hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); + WeakHash32 hash(s); const UInt8 * pos = chars.data(); UInt32 * hash_data = hash.getData().data(); @@ -156,6 +152,8 @@ void ColumnFixedString::updateWeakHash32(WeakHash32 & hash) const pos += n; ++hash_data; } + + return hash; } void ColumnFixedString::updateHashFast(SipHash & hash) const diff --git a/src/Columns/ColumnFixedString.h b/src/Columns/ColumnFixedString.h index 6e88136fc50..630c6c1c0a6 100644 --- a/src/Columns/ColumnFixedString.h +++ b/src/Columns/ColumnFixedString.h @@ -133,7 +133,7 @@ public: void updateHashWithValue(size_t index, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; diff --git a/src/Columns/ColumnFunction.h b/src/Columns/ColumnFunction.h index ba924c49a82..dcd67aecad7 100644 --- a/src/Columns/ColumnFunction.h +++ b/src/Columns/ColumnFunction.h @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -130,9 +131,9 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "updateHashWithValue is not implemented for {}", getName()); } - void updateWeakHash32(WeakHash32 &) const override + WeakHash32 getWeakHash32() const override { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "updateWeakHash32 is not implemented for {}", getName()); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getWeakHash32 is not implemented for {}", getName()); } void updateHashFast(SipHash &) const override diff --git a/src/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp index eb694a10b0f..49ce948bf78 100644 --- a/src/Columns/ColumnLowCardinality.cpp +++ b/src/Columns/ColumnLowCardinality.cpp @@ -7,8 +7,7 @@ #include #include #include -#include "Storages/IndicesDescription.h" -#include "base/types.h" +#include #include #include @@ -320,19 +319,10 @@ const char * ColumnLowCardinality::skipSerializedInArena(const char * pos) const return getDictionary().skipSerializedInArena(pos); } -void ColumnLowCardinality::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnLowCardinality::getWeakHash32() const { - auto s = size(); - - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); - - const auto & dict = getDictionary().getNestedColumn(); - WeakHash32 dict_hash(dict->size()); - dict->updateWeakHash32(dict_hash); - - idx.updateWeakHash(hash, dict_hash); + WeakHash32 dict_hash = getDictionary().getNestedColumn()->getWeakHash32(); + return idx.getWeakHash(dict_hash); } void ColumnLowCardinality::updateHashFast(SipHash & hash) const @@ -832,10 +822,11 @@ bool ColumnLowCardinality::Index::containsDefault() const return contains; } -void ColumnLowCardinality::Index::updateWeakHash(WeakHash32 & hash, WeakHash32 & dict_hash) const +WeakHash32 ColumnLowCardinality::Index::getWeakHash(const WeakHash32 & dict_hash) const { + WeakHash32 hash(positions->size()); auto & hash_data = hash.getData(); - auto & dict_hash_data = dict_hash.getData(); + const auto & dict_hash_data = dict_hash.getData(); auto update_weak_hash = [&](auto x) { @@ -844,10 +835,11 @@ void ColumnLowCardinality::Index::updateWeakHash(WeakHash32 & hash, WeakHash32 & auto size = data.size(); for (size_t i = 0; i < size; ++i) - hash_data[i] = static_cast(intHashCRC32(dict_hash_data[data[i]], hash_data[i])); + hash_data[i] = dict_hash_data[data[i]]; }; callForType(std::move(update_weak_hash), size_of_type); + return hash; } void ColumnLowCardinality::Index::collectSerializedValueSizes( diff --git a/src/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h index e99be07cd8d..fb0c1237fcf 100644 --- a/src/Columns/ColumnLowCardinality.h +++ b/src/Columns/ColumnLowCardinality.h @@ -111,7 +111,7 @@ public: getDictionary().updateHashWithValue(getIndexes().getUInt(n), hash); } - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash &) const override; @@ -325,7 +325,7 @@ public: bool containsDefault() const; - void updateWeakHash(WeakHash32 & hash, WeakHash32 & dict_hash) const; + WeakHash32 getWeakHash(const WeakHash32 & dict_hash) const; void collectSerializedValueSizes(PaddedPODArray & sizes, const PaddedPODArray & dict_sizes) const; diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 2dffddb2dc9..08d7734ac6b 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -143,9 +143,9 @@ void ColumnMap::updateHashWithValue(size_t n, SipHash & hash) const nested->updateHashWithValue(n, hash); } -void ColumnMap::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnMap::getWeakHash32() const { - nested->updateWeakHash32(hash); + return nested->getWeakHash32(); } void ColumnMap::updateHashFast(SipHash & hash) const diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index a54071a2974..29275e1b5f7 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -64,7 +64,7 @@ public: const char * deserializeAndInsertFromArena(const char * pos) override; const char * skipSerializedInArena(const char * pos) const override; void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; #if !defined(ABORT_ON_LOGICAL_ERROR) diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index f060e74b315..64e99a3bbe8 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -56,25 +56,21 @@ void ColumnNullable::updateHashWithValue(size_t n, SipHash & hash) const getNestedColumn().updateHashWithValue(n, hash); } -void ColumnNullable::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnNullable::getWeakHash32() const { auto s = size(); - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); - - WeakHash32 old_hash = hash; - nested_column->updateWeakHash32(hash); + WeakHash32 hash = nested_column->getWeakHash32(); const auto & null_map_data = getNullMapData(); auto & hash_data = hash.getData(); - auto & old_hash_data = old_hash.getData(); - /// Use old data for nulls. + /// Use defualt for nulls. for (size_t row = 0; row < s; ++row) if (null_map_data[row]) - hash_data[row] = old_hash_data[row]; + hash_data[row] = WeakHash32::kDefaultInitialValue; + + return hash; } void ColumnNullable::updateHashFast(SipHash & hash) const diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h index a6d0483e527..15bbd8c3b57 100644 --- a/src/Columns/ColumnNullable.h +++ b/src/Columns/ColumnNullable.h @@ -133,7 +133,7 @@ public: void protect() override; ColumnPtr replicate(const Offsets & replicate_offsets) const override; void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; void getExtremes(Field & min, Field & max) const override; // Special function for nullable minmax index diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index fadf2e18779..21607e003f2 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -252,7 +253,7 @@ public: const char * deserializeAndInsertFromArena(const char *) override { throwMustBeConcrete(); } const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); } void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); } - void updateWeakHash32(WeakHash32 &) const override { throwMustBeConcrete(); } + WeakHash32 getWeakHash32() const override { throwMustBeConcrete(); } void updateHashFast(SipHash &) const override { throwMustBeConcrete(); } void expand(const Filter &, bool) override { throwMustBeConcrete(); } bool hasEqualValues() const override { throwMustBeConcrete(); } diff --git a/src/Columns/ColumnSparse.cpp b/src/Columns/ColumnSparse.cpp index 98a66e87387..0d103a263dd 100644 --- a/src/Columns/ColumnSparse.cpp +++ b/src/Columns/ColumnSparse.cpp @@ -678,26 +678,13 @@ void ColumnSparse::updateHashWithValue(size_t n, SipHash & hash) const values->updateHashWithValue(getValueIndex(n), hash); } -void ColumnSparse::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnSparse::getWeakHash32() const { - if (hash.getData().size() != _size) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", _size, hash.getData().size()); - - size_t values_size = values->size(); - WeakHash32 values_hash(values_size); + WeakHash32 values_hash = values->getWeakHash32(); + WeakHash32 hash(size()); auto & hash_data = hash.getData(); auto & values_hash_data = values_hash.getData(); - const auto & offsets_data = getOffsetsData(); - - if (getNumberOfDefaultRows() > 0) - values_hash_data[0] = hash_data[getFirstDefaultValueIndex()]; - - for (size_t i = 0; i + 1 < values_size; ++i) - values_hash_data[i + 1] = hash_data[offsets_data[i]]; - - values->updateWeakHash32(values_hash); auto offset_it = begin(); for (size_t i = 0; i < _size; ++i, ++offset_it) @@ -705,6 +692,8 @@ void ColumnSparse::updateWeakHash32(WeakHash32 & hash) const size_t value_index = offset_it.getValueIndex(); hash_data[i] = values_hash_data[value_index]; } + + return hash; } void ColumnSparse::updateHashFast(SipHash & hash) const diff --git a/src/Columns/ColumnSparse.h b/src/Columns/ColumnSparse.h index 4860f5171f7..a5d4d788b17 100644 --- a/src/Columns/ColumnSparse.h +++ b/src/Columns/ColumnSparse.h @@ -139,7 +139,7 @@ public: void protect() override; ColumnPtr replicate(const Offsets & replicate_offsets) const override; void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; void getExtremes(Field & min, Field & max) const override; diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp index 1eda9714d62..4accfbe8f41 100644 --- a/src/Columns/ColumnString.cpp +++ b/src/Columns/ColumnString.cpp @@ -108,13 +108,10 @@ MutableColumnPtr ColumnString::cloneResized(size_t to_size) const return res; } -void ColumnString::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnString::getWeakHash32() const { auto s = offsets.size(); - - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); + WeakHash32 hash(s); const UInt8 * pos = chars.data(); UInt32 * hash_data = hash.getData().data(); @@ -130,6 +127,8 @@ void ColumnString::updateWeakHash32(WeakHash32 & hash) const prev_offset = offset; ++hash_data; } + + return hash; } diff --git a/src/Columns/ColumnString.h b/src/Columns/ColumnString.h index 602ffac65e8..faaaa8848ca 100644 --- a/src/Columns/ColumnString.h +++ b/src/Columns/ColumnString.h @@ -212,7 +212,7 @@ public: hash.update(reinterpret_cast(&chars[offset]), string_size); } - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override { diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 9b822d7f570..cb0b05d2154 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -308,16 +308,15 @@ void ColumnTuple::updateHashWithValue(size_t n, SipHash & hash) const column->updateHashWithValue(n, hash); } -void ColumnTuple::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnTuple::getWeakHash32() const { auto s = size(); - - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); + WeakHash32 hash(s); for (const auto & column : columns) - column->updateWeakHash32(hash); + hash.update(column->getWeakHash32()); + + return hash; } void ColumnTuple::updateHashFast(SipHash & hash) const diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index 38e479791d4..2fafd93f776 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -81,7 +81,7 @@ public: const char * deserializeAndInsertFromArena(const char * pos) override; const char * skipSerializedInArena(const char * pos) const override; void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; #if !defined(ABORT_ON_LOGICAL_ERROR) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index ee5de4c2dde..8fd6e1bbac1 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -789,36 +789,26 @@ void ColumnVariant::updateHashWithValue(size_t n, SipHash & hash) const variants[localDiscriminatorByGlobal(global_discr)]->updateHashWithValue(offsetAt(n), hash); } -void ColumnVariant::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnVariant::getWeakHash32() const { auto s = size(); - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); - /// If we have only NULLs, keep hash unchanged. if (hasOnlyNulls()) - return; + return WeakHash32(s); /// Optimization for case when there is only 1 non-empty variant and no NULLs. /// In this case we can just calculate weak hash for this variant. if (auto non_empty_local_discr = getLocalDiscriminatorOfOneNoneEmptyVariantNoNulls()) - { - variants[*non_empty_local_discr]->updateWeakHash32(hash); - return; - } + return variants[*non_empty_local_discr]->getWeakHash32(); /// Calculate weak hash for all variants. std::vector nested_hashes; for (const auto & variant : variants) - { - WeakHash32 nested_hash(variant->size()); - variant->updateWeakHash32(nested_hash); - nested_hashes.emplace_back(std::move(nested_hash)); - } + nested_hashes.emplace_back(variant->getWeakHash32()); /// For each row hash is a hash of corresponding row from corresponding variant. + WeakHash32 hash(s); auto & hash_data = hash.getData(); const auto & local_discriminators_data = getLocalDiscriminators(); const auto & offsets_data = getOffsets(); @@ -827,11 +817,10 @@ void ColumnVariant::updateWeakHash32(WeakHash32 & hash) const Discriminator discr = local_discriminators_data[i]; /// Update hash only for non-NULL values if (discr != NULL_DISCRIMINATOR) - { - auto nested_hash = nested_hashes[local_discriminators_data[i]].getData()[offsets_data[i]]; - hash_data[i] = static_cast(hashCRC32(nested_hash, hash_data[i])); - } + hash_data[i] = nested_hashes[discr].getData()[offsets_data[i]]; } + + return hash; } void ColumnVariant::updateHashFast(SipHash & hash) const diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index d91b8e93a7d..94f3066e676 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -213,7 +213,7 @@ public: const char * deserializeVariantAndInsertFromArena(Discriminator global_discr, const char * pos); const char * skipSerializedInArena(const char * pos) const override; void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override; void expand(const Filter & mask, bool inverted) override; diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index 19d1b800961..185a1e0f615 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -73,13 +73,10 @@ void ColumnVector::updateHashWithValue(size_t n, SipHash & hash) const } template -void ColumnVector::updateWeakHash32(WeakHash32 & hash) const +WeakHash32 ColumnVector::getWeakHash32() const { auto s = data.size(); - - if (hash.getData().size() != s) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match size of column: " - "column size is {}, hash size is {}", std::to_string(s), std::to_string(hash.getData().size())); + WeakHash32 hash(s); const T * begin = data.data(); const T * end = begin + s; @@ -91,6 +88,8 @@ void ColumnVector::updateWeakHash32(WeakHash32 & hash) const ++begin; ++hash_data; } + + return hash; } template diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 3a0acf5e312..c01778ecf32 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -114,7 +114,7 @@ public: void updateHashWithValue(size_t n, SipHash & hash) const override; - void updateWeakHash32(WeakHash32 & hash) const override; + WeakHash32 getWeakHash32() const override; void updateHashFast(SipHash & hash) const override; diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index 4b6f34e5aa2..3798d3b7466 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -300,10 +300,10 @@ public: /// passed bytes to hash must identify sequence of values unambiguously. virtual void updateHashWithValue(size_t n, SipHash & hash) const = 0; - /// Update hash function value. Hash is calculated for each element. + /// Get hash function value. Hash is calculated for each element. /// It's a fast weak hash function. Mainly need to scatter data between threads. /// WeakHash32 must have the same size as column. - virtual void updateWeakHash32(WeakHash32 & hash) const = 0; + virtual WeakHash32 getWeakHash32() const = 0; /// Update state of hash with all column. virtual void updateHashFast(SipHash & hash) const = 0; diff --git a/src/Columns/IColumnDummy.h b/src/Columns/IColumnDummy.h index c19fb704d9b..b18f4fdb302 100644 --- a/src/Columns/IColumnDummy.h +++ b/src/Columns/IColumnDummy.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB @@ -63,8 +64,9 @@ public: { } - void updateWeakHash32(WeakHash32 & /*hash*/) const override + WeakHash32 getWeakHash32() const override { + return WeakHash32(s); } void updateHashFast(SipHash & /*hash*/) const override diff --git a/src/Columns/IColumnUnique.h b/src/Columns/IColumnUnique.h index 3398452b7ee..1b86204f5b1 100644 --- a/src/Columns/IColumnUnique.h +++ b/src/Columns/IColumnUnique.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include namespace DB { @@ -166,9 +167,9 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method scatter is not supported for ColumnUnique."); } - void updateWeakHash32(WeakHash32 &) const override + WeakHash32 getWeakHash32() const override { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method updateWeakHash32 is not supported for ColumnUnique."); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getWeakHash32 is not supported for ColumnUnique."); } void updateHashFast(SipHash &) const override diff --git a/src/Columns/tests/gtest_weak_hash_32.cpp b/src/Columns/tests/gtest_weak_hash_32.cpp index 2c95998761b..3143d0ff83c 100644 --- a/src/Columns/tests/gtest_weak_hash_32.cpp +++ b/src/Columns/tests/gtest_weak_hash_32.cpp @@ -60,8 +60,7 @@ TEST(WeakHash32, ColumnVectorU8) data.push_back(i); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -77,8 +76,7 @@ TEST(WeakHash32, ColumnVectorI8) data.push_back(i); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -94,8 +92,7 @@ TEST(WeakHash32, ColumnVectorU16) data.push_back(i); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -111,8 +108,7 @@ TEST(WeakHash32, ColumnVectorI16) data.push_back(i); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -128,8 +124,7 @@ TEST(WeakHash32, ColumnVectorU32) data.push_back(i << 16u); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -145,8 +140,7 @@ TEST(WeakHash32, ColumnVectorI32) data.push_back(i << 16); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -162,8 +156,7 @@ TEST(WeakHash32, ColumnVectorU64) data.push_back(i << 32u); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -179,8 +172,7 @@ TEST(WeakHash32, ColumnVectorI64) data.push_back(i << 32); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -204,8 +196,7 @@ TEST(WeakHash32, ColumnVectorU128) } } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), eq_data); } @@ -221,8 +212,7 @@ TEST(WeakHash32, ColumnVectorI128) data.push_back(i << 32); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -238,8 +228,7 @@ TEST(WeakHash32, ColumnDecimal32) data.push_back(i << 16); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -255,8 +244,7 @@ TEST(WeakHash32, ColumnDecimal64) data.push_back(i << 32); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -272,8 +260,7 @@ TEST(WeakHash32, ColumnDecimal128) data.push_back(i << 32); } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), col->getData()); } @@ -294,8 +281,7 @@ TEST(WeakHash32, ColumnString1) } } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), data); } @@ -331,8 +317,7 @@ TEST(WeakHash32, ColumnString2) } } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), data); } @@ -369,8 +354,7 @@ TEST(WeakHash32, ColumnString3) } } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), data); } @@ -397,8 +381,7 @@ TEST(WeakHash32, ColumnFixedString) } } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), data); } @@ -444,8 +427,7 @@ TEST(WeakHash32, ColumnArray) auto col_arr = ColumnArray::create(std::move(val), std::move(off)); - WeakHash32 hash(col_arr->size()); - col_arr->updateWeakHash32(hash); + WeakHash32 hash = col_arr->getWeakHash32(); checkColumn(hash.getData(), eq_data); } @@ -479,8 +461,7 @@ TEST(WeakHash32, ColumnArray2) auto col_arr = ColumnArray::create(std::move(val), std::move(off)); - WeakHash32 hash(col_arr->size()); - col_arr->updateWeakHash32(hash); + WeakHash32 hash = col_arr->getWeakHash32(); checkColumn(hash.getData(), eq_data); } @@ -536,8 +517,7 @@ TEST(WeakHash32, ColumnArrayArray) auto col_arr = ColumnArray::create(std::move(val), std::move(off)); auto col_arr_arr = ColumnArray::create(std::move(col_arr), std::move(off2)); - WeakHash32 hash(col_arr_arr->size()); - col_arr_arr->updateWeakHash32(hash); + WeakHash32 hash = col_arr_arr->getWeakHash32(); checkColumn(hash.getData(), eq_data); } @@ -555,8 +535,7 @@ TEST(WeakHash32, ColumnConst) auto col_const = ColumnConst::create(std::move(inner_col), 256); - WeakHash32 hash(col_const->size()); - col_const->updateWeakHash32(hash); + WeakHash32 hash = col_const->getWeakHash32(); checkColumn(hash.getData(), data); } @@ -576,8 +555,7 @@ TEST(WeakHash32, ColumnLowcardinality) } } - WeakHash32 hash(col->size()); - col->updateWeakHash32(hash); + WeakHash32 hash = col->getWeakHash32(); checkColumn(hash.getData(), data); } @@ -602,8 +580,7 @@ TEST(WeakHash32, ColumnNullable) auto col_null = ColumnNullable::create(std::move(col), std::move(mask)); - WeakHash32 hash(col_null->size()); - col_null->updateWeakHash32(hash); + WeakHash32 hash = col_null->getWeakHash32(); checkColumn(hash.getData(), eq); } @@ -633,8 +610,7 @@ TEST(WeakHash32, ColumnTupleUInt64UInt64) columns.emplace_back(std::move(col2)); auto col_tuple = ColumnTuple::create(std::move(columns)); - WeakHash32 hash(col_tuple->size()); - col_tuple->updateWeakHash32(hash); + WeakHash32 hash = col_tuple->getWeakHash32(); checkColumn(hash.getData(), eq); } @@ -671,8 +647,7 @@ TEST(WeakHash32, ColumnTupleUInt64String) columns.emplace_back(std::move(col2)); auto col_tuple = ColumnTuple::create(std::move(columns)); - WeakHash32 hash(col_tuple->size()); - col_tuple->updateWeakHash32(hash); + WeakHash32 hash = col_tuple->getWeakHash32(); checkColumn(hash.getData(), eq); } @@ -709,8 +684,7 @@ TEST(WeakHash32, ColumnTupleUInt64FixedString) columns.emplace_back(std::move(col2)); auto col_tuple = ColumnTuple::create(std::move(columns)); - WeakHash32 hash(col_tuple->size()); - col_tuple->updateWeakHash32(hash); + WeakHash32 hash = col_tuple->getWeakHash32(); checkColumn(hash.getData(), eq); } @@ -756,8 +730,7 @@ TEST(WeakHash32, ColumnTupleUInt64Array) columns.emplace_back(ColumnArray::create(std::move(val), std::move(off))); auto col_tuple = ColumnTuple::create(std::move(columns)); - WeakHash32 hash(col_tuple->size()); - col_tuple->updateWeakHash32(hash); + WeakHash32 hash = col_tuple->getWeakHash32(); checkColumn(hash.getData(), eq_data); } diff --git a/src/Common/WeakHash.cpp b/src/Common/WeakHash.cpp index 54d973b6296..cb12df84db1 100644 --- a/src/Common/WeakHash.cpp +++ b/src/Common/WeakHash.cpp @@ -1,2 +1,24 @@ #include +#include +#include +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +void WeakHash32::update(const WeakHash32 & other) +{ + size_t size = data.size(); + if (size != other.data.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of WeakHash32 does not match:" + "left size is {}, right size is {}", size, other.data.size()); + + for (size_t i = 0; i < size; ++i) + data[i] = static_cast(intHashCRC32(other.data[i], data[i])); +} + +} diff --git a/src/Common/WeakHash.h b/src/Common/WeakHash.h index b59624e64f2..d4a8d63868c 100644 --- a/src/Common/WeakHash.h +++ b/src/Common/WeakHash.h @@ -11,9 +11,8 @@ namespace DB /// The main purpose why this class needed is to support data initialization. Initially, every bit is 1. class WeakHash32 { - static constexpr UInt32 kDefaultInitialValue = ~UInt32(0); - public: + static constexpr UInt32 kDefaultInitialValue = ~UInt32(0); using Container = PaddedPODArray; @@ -22,6 +21,8 @@ public: void reset(size_t size, UInt32 initial_value = kDefaultInitialValue) { data.assign(size, initial_value); } + void update(const WeakHash32 & other); + const Container & getData() const { return data; } Container & getData() { return data; } diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 4493a9f4dbd..ac940c62a1a 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -310,7 +310,7 @@ IColumn::Selector ConcurrentHashJoin::selectDispatchBlock(const Strings & key_co { const auto & key_col = from_block.getByName(key_name).column->convertToFullColumnIfConst(); const auto & key_col_no_lc = recursiveRemoveLowCardinality(recursiveRemoveSparse(key_col)); - key_col_no_lc->updateWeakHash32(hash); + hash.update(key_col_no_lc->getWeakHash32()); } return hashToSelector(hash, num_shards); } diff --git a/src/Interpreters/JoinUtils.cpp b/src/Interpreters/JoinUtils.cpp index 1788c9aca48..180a45d4295 100644 --- a/src/Interpreters/JoinUtils.cpp +++ b/src/Interpreters/JoinUtils.cpp @@ -554,7 +554,7 @@ static Blocks scatterBlockByHashImpl(const Strings & key_columns_names, const Bl for (const auto & key_name : key_columns_names) { ColumnPtr key_col = materializeColumn(block, key_name); - key_col->updateWeakHash32(hash); + hash.update(key_col->getWeakHash32()); } auto selector = hashToSelector(hash, sharder); diff --git a/src/Processors/Transforms/ScatterByPartitionTransform.cpp b/src/Processors/Transforms/ScatterByPartitionTransform.cpp index 6e3cdc0fda1..16d265c9bcb 100644 --- a/src/Processors/Transforms/ScatterByPartitionTransform.cpp +++ b/src/Processors/Transforms/ScatterByPartitionTransform.cpp @@ -109,7 +109,7 @@ void ScatterByPartitionTransform::generateOutputChunks() hash.reset(num_rows); for (const auto & column_number : key_columns) - columns[column_number]->updateWeakHash32(hash); + hash.update(columns[column_number]->getWeakHash32()); const auto & hash_data = hash.getData(); IColumn::Selector selector(num_rows); From 63936364b1abf345349403e656c4cf58c44715bc Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 17 Jul 2024 16:53:35 +0200 Subject: [PATCH 0466/2361] fixes of tests --- src/Storages/VirtualColumnUtils.cpp | 2 +- .../0_stateless/03203_hive_style_partitioning.sh | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 31cee485dde..87c1aecc3a7 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -127,7 +127,7 @@ NameSet getVirtualNamesForFileLikeStorage() std::unordered_map parseHivePartitioningKeysAndValues(const String & path, const ColumnsDescription & storage_columns) { - std::string pattern = "/([^/]+)=([^/]+)"; + std::string pattern = "([^/]+)=([^/]+)/"; re2::StringPiece input_piece(path); std::unordered_map key_values; diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh index 0f687d532b0..db1f073d736 100755 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.sh +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -124,13 +124,13 @@ $CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3CLUSTER PARTITIONING'" $CLICKHOUSE_CLIENT -n -q """ set use_hive_partitioning = 1; -SELECT *, _column0 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; -SELECT *, _column0 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; +SELECT *, _column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; -SELECT *, _column0, _column1 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0, _column1 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; -SELECT *, _column0, _column1 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; -SELECT *, _column0 FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0, _column1 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; """ From 938071cd55913c3bb2b8781750ef37bf6307acab Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 17 Jul 2024 14:56:00 +0000 Subject: [PATCH 0467/2361] add ci_include_fuzzer to PR body template --- .github/PULL_REQUEST_TEMPLATE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e045170561d..146542e980c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -59,6 +59,8 @@ At a minimum, the following information should be added (but add more as needed) - [ ] Exclude: All with TSAN, MSAN, UBSAN, Coverage - [ ] Exclude: All with aarch64, release, debug --- +- [ ] Run only libFuzzer related jobs +--- - [ ] Do not test - [ ] Woolen Wolfdog - [ ] Upload binaries for special builds From 542542b44d4688bc125887f811843249a4024379 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 17 Jul 2024 14:58:58 +0000 Subject: [PATCH 0468/2361] fix test --- .../queries/0_stateless/03161_lightweight_delete_projection.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 2c60d83d74d..3bf459cc32d 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,4 +1,6 @@ +SET lightweight_deletes_sync = 2; + DROP TABLE IF EXISTS users; -- compact part From 2dbd04c8a77bf262f0965ddef9a2c166c22fcf55 Mon Sep 17 00:00:00 2001 From: Blargian Date: Wed, 17 Jul 2024 17:01:22 +0200 Subject: [PATCH 0469/2361] add toIntXYZ documentation --- .../functions/type-conversion-functions.md | 1275 ++++++++++++++++- 1 file changed, 1239 insertions(+), 36 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 61e84ca72d1..057083d317f 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -49,105 +49,1308 @@ SETTINGS cast_keep_nullable = 1 └──────────────────┴─────────────────────┴──────────────────┘ ``` -## toInt(8\|16\|32\|64\|128\|256) +## toInt8 -Converts an input value to a value the [Int](../data-types/int-uint.md) data type. This function family includes: +Converts an input value to a value of type `Int8`. -- `toInt8(expr)` — Converts to a value of data type `Int8`. -- `toInt16(expr)` — Converts to a value of data type `Int16`. -- `toInt32(expr)` — Converts to a value of data type `Int32`. -- `toInt64(expr)` — Converts to a value of data type `Int64`. -- `toInt128(expr)` — Converts to a value of data type `Int128`. -- `toInt256(expr)` — Converts to a value of data type `Int256`. +**Syntax** + +```sql +toInt8(expr) +``` **Arguments** -- `expr` — [Expression](../syntax.md/#syntax-expressions) returning a number or a string with the decimal representation of a number. Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: **Returned value** -Integer value in the `Int8`, `Int16`, `Int32`, `Int64`, `Int128` or `Int256` data type. +- 8-bit integer value. [Int8](../data-types/int-uint.md). -Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers. +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: -The behavior of functions for the [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments is undefined. Remember about [numeric conversions issues](#common-issues-with-data-conversion), when using the functions. +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +```sql +SELECT + toInt8(-8), + toInt8(-8.8), + toInt8('-8'); +``` + +Result: + +```response + ┌─toInt8(-8)─┬─toInt8(-8.8)─┬─toInt8('-8')─┠+1. │ -8 │ -8 │ -8 │ + └────────────┴──────────────┴──────────────┘ +``` + +**See also** + +- [`toInt8OrZero`](#toint8orzero). +- [`toInt8OrNull`](#toint8ornull). +- [`toInt8OrDefault`](#toint8ordefault). + +## toInt8OrZero + +Like [`toInt8`](#toint8), it takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int8`. If unsuccessful, returns `0`. + +**Syntax** + +```sql +toInt8OrZero(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 8-bit integer value if successful, otherwise `0`. [Int8](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: **Example** Query: ``` sql -SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8); +SELECT + toInt8OrZero('-8'), + toInt8OrZero('abc'); ``` Result: ```response -┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┠-│ -9223372036854775808 │ 32 │ 16 │ 8 │ -└──────────────────────┴─────────────┴───────────────┴─────────────┘ + ┌─toInt8OrZero('-8')─┬─toInt8OrZero('abc')─┠+1. │ -8 │ 0 │ + └────────────────────┴─────────────────────┘ ``` -## toInt(8\|16\|32\|64\|128\|256)OrZero +**See also** -Takes an argument of type [String](../data-types/string.md) and tries to parse it into an Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If unsuccessful, returns `0`. +- [`toInt8`](#toint8). +- [`toInt8OrNull`](#toint8ornull). +- [`toInt8OrDefault`](#toint8ordefault). + +## toInt8OrNull + +Like [`toInt8`](#toint8), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int8`. If unsuccessful, returns `NULL`. + +**Syntax** + +```sql +toInt8OrNull(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 8-bit integer value if successful, otherwise `NULL`. [Int8](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: **Example** Query: ``` sql -SELECT toInt64OrZero('123123'), toInt8OrZero('123qwe123'); +SELECT toInt8OrNull('-8'), toInt8OrNull('abc'); ``` Result: ```response -┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┠-│ 123123 │ 0 │ -└─────────────────────────┴───────────────────────────┘ + ┌─toInt8OrNull('-8')─┬─toInt8OrNull('abc')─┠+1. │ -8 │ á´ºáµá´¸á´¸ │ + └────────────────────┴─────────────────────┘ ``` -## toInt(8\|16\|32\|64\|128\|256)OrNull +**See also** -It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If unsuccessful, returns `NULL`. +- [`toInt8`](#toint8). +- [`toInt8OrZero`](#toint8orzero). +- [`toInt8OrDefault`](#toint8ordefault). + +## toInt8OrDefault + +Like [`toInt8`](#toint8), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int8`. If unsuccessful, returns the default type value. + +**Syntax** + +```sql +toInt8OrDefault(expr, def) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `def` — The default value to return if parsing to type `Int8` is unsuccessful. [Int8](../data-types/int-uint.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 8-bit integer value if successful, otherwise returns the default value. [Int8](../data-types/int-uint.md). + +:::note +- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: **Example** Query: ``` sql -SELECT toInt64OrNull('123123'), toInt8OrNull('123qwe123'); +SELECT + toInt8OrDefault('-8', CAST('-1', 'Int8')), + toInt8OrDefault('abc', CAST('-1', 'Int8')); ``` Result: ```response -┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┠-│ 123123 │ á´ºáµá´¸á´¸ │ -└─────────────────────────┴───────────────────────────┘ + ┌─toInt8OrDefault('-8', CAST('-1', 'Int8'))─┬─toInt8OrDefault('abc', CAST('-1', 'Int8'))─┠+1. │ -8 │ -1 │ + └───────────────────────────────────────────┴────────────────────────────────────────────┘ ``` -## toInt(8\|16\|32\|64\|128\|256)OrDefault +**See also** -It takes an argument of type String and tries to parse it into Int (8 \| 16 \| 32 \| 64 \| 128 \| 256). If unsuccessful, returns the default type value. +- [`toInt8`](#toint8). +- [`toInt8OrZero`](#toint8orzero). +- [`toInt8OrNull`](#toint8orNull). + +## toInt16 + +Converts an input value to a value of type `Int16`. + +**Syntax** + +```sql +toInt16(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 16-bit integer value. [Int16](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +```sql +SELECT + toInt16(-16), + toInt16(-16.16), + toInt16('-16'); +``` + +Result: + +```response + ┌─toInt16(-16)─┬─toInt16(-16.16)─┬─toInt16('-16')─┠+1. │ -16 │ -16 │ -16 │ + └──────────────┴─────────────────┴────────────────┘ +``` + +**See also** + +- [`toInt16OrZero`](#toint16orzero). +- [`toInt16OrNull`](#toint16ornull). +- [`toInt16OrDefault`](#toint16ordefault). + +## toInt16OrZero + +Like [`toInt16`](#toint16), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int16`. If unsuccessful, returns `0`. + +**Syntax** + +```sql +toInt16OrZero(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 16-bit integer value if successful, otherwise `0`. [Int16](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: **Example** Query: ``` sql -SELECT toInt64OrDefault('123123', cast('-1' as Int64)), toInt8OrDefault('123qwe123', cast('-1' as Int8)); +SELECT + toInt16OrZero('-16'), + toInt16OrZero('abc'); ``` Result: ```response -┌─toInt64OrDefault('123123', CAST('-1', 'Int64'))─┬─toInt8OrDefault('123qwe123', CAST('-1', 'Int8'))─┠-│ 123123 │ -1 │ -└─────────────────────────────────────────────────┴──────────────────────────────────────────────────┘ + ┌─toInt16OrZero('-16')─┬─toInt16OrZero('abc')─┠+1. │ -16 │ 0 │ + └──────────────────────┴──────────────────────┘ ``` +**See also** + +- [`toInt16`](#toint16). +- [`toInt16OrNull`](#toint16ornull). +- [`toInt16OrDefault`](#toint16ordefault). + +## toInt16OrNull + +Like [`toInt16`](#toint16), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int16`. If unsuccessful, returns `NULL`. + +**Syntax** + +```sql +toInt16OrNull(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 16-bit integer value if successful, otherwise `NULL`. [Int16](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt16OrNull('-16'), + toInt16OrNull('abc'); +``` + +Result: + +```response + ┌─toInt16OrNull('-16')─┬─toInt16OrNull('abc')─┠+1. │ -16 │ á´ºáµá´¸á´¸ │ + └──────────────────────┴──────────────────────┘ +``` + +**See also** + +- [`toInt16`](#toint16). +- [`toInt16OrZero`](#toint16orzero). +- [`toInt16OrDefault`](#toint16ordefault). + +## toInt16OrDefault + +Like [`toInt16`](#toint16), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int16`. If unsuccessful, returns the default type value. + +**Syntax** + +```sql +toInt16OrDefault(expr, def) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `def` — The default value to return if parsing to type `Int16` is unsuccessful. [Int8](../data-types/int-uint.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 16-bit integer value if successful, otherwise returns the default value. [Int16](../data-types/int-uint.md). + +:::note +- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT toInt16OrDefault('-16', cast('-1' as Int16)), toInt16OrDefault('abc', cast('-1' as Int16)); +``` + +Result: + +```response + ┌─toInt16OrDefault('-16', CAST('-1', 'Int16'))─┬─toInt16OrDefault('abc', CAST('-1', 'Int16'))─┠+1. │ -16 │ -1 │ + └──────────────────────────────────────────────┴──────────────────────────────────────────────┘ +``` + +**See also** + +- [`toInt16`](#toint16). +- [`toInt16OrZero`](#toint16orzero). +- [`toInt16OrNull`](#toint16ornull). + +## toInt32 + +Converts an input value to a value of type `Int32`. + +**Syntax** + +```sql +toInt32(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 32-bit integer value. [Int32](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +```sql +SELECT + toInt32(-32), + toInt32(-32.32), + toInt32('-32') +``` + +Result: + +```response + ┌─toInt32(-32)─┬─toInt32(-32.32)─┬─toInt32('-32')─┠+1. │ -32 │ -32 │ -32 │ + └──────────────┴─────────────────┴────────────────┘ +``` + +**See also** + +- [`toInt32OrZero`](#toint32orzero). +- [`toInt32OrNull`](#toint32ornull). +- [`toInt32OrDefault`](#toint32ordefault). + +## toInt32OrZero + +Like [`toInt32`](#toint32), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int32`. If unsuccessful, returns `0`. + +**Syntax** + +```sql +toInt32OrZero(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 32-bit integer value if successful, otherwise `0`. [Int32](../data-types/int-uint.md) + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncate fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT toInt32OrZero('-32'), toInt32OrZero('abc'); +``` + +Result: + +```response + ┌─toInt32OrZero('-32')─┬─toInt32OrZero('abc')─┠+1. │ -32 │ 0 │ + └──────────────────────┴──────────────────────┘ +``` +**See also** + +- [`toInt32`](#toint32). +- [`toInt32OrNull`](#toint32ornull). +- [`toInt32OrDefault`](#toint32ordefault). +- +## toInt32OrNull + +Like [`toInt32`](#toint32), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int32`. If unsuccessful, returns `NULL`. + +**Syntax** + +```sql +toInt32OrNull(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 32-bit integer value if successful, otherwise `NULL`. [Int32](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT toInt32OrNull('-32'), toInt32OrNull('abc'); +``` + +Result: + +```response + ┌─toInt32OrNull('-32')─┬─toInt32OrNull('abc')─┠+1. │ -32 │ á´ºáµá´¸á´¸ │ + └──────────────────────┴──────────────────────┘ +``` + +**See also** + +- [`toInt32`](#toint32). +- [`toInt32OrZero`](#toint32orzero). +- [`toInt32OrDefault`](#toint32ordefault). + +## toInt32OrDefault + +Like [`toInt32`](#toint32), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int32`. If unsuccessful, returns the default type value. + +**Syntax** + +```sql +toInt32OrDefault(expr, def) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `def` — The default value to return if parsing to type `Int32` is unsuccessful. [Int32](../data-types/int-uint.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 32-bit integer value if successful, otherwise returns the default value. [Int32](../data-types/int-uint.md). + +:::note +- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. + ::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT toInt32OrDefault('-32', cast('-1' as Int32)), toInt32OrDefault('abc', cast('-1' as Int32)); +``` + +Result: + +```response + ┌─toInt32OrDefault('-32', CAST('-1', 'Int32'))─┬─toInt32OrDefault('abc', CAST('-1', 'Int32'))─┠+1. │ -32 │ -1 │ + └──────────────────────────────────────────────┴──────────────────────────────────────────────┘ +``` + +**See also** + +- [`toInt32`](#toint32). +- [`toInt32OrZero`](#toint32orzero). +- [`toInt32OrNull`](#toint32ornull). + +## toInt64 + +Converts an input value to a value of type `Int64`. + +**Syntax** + +```sql +toInt64(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 64-bit integer value. [Int64](../data-types/int-uint.md). [Int64](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +```sql +SELECT + toInt64(-64), + toInt64(-64.64), + toInt64('-64'); +``` + +Result: + +```response + ┌─toInt64(-64)─┬─toInt64(-64.64)─┬─toInt64('-64')─┠+1. │ -64 │ -64 │ -64 │ + └──────────────┴─────────────────┴────────────────┘ +``` + +**See also** + +- [`toInt64OrZero`](#toint64orzero). +- [`toInt64OrNull`](#toint64ornull). +- [`toInt64OrDefault`](#toint64ordefault). + +## toInt64OrZero + +Like [`toInt64`](#toint64), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int64`. If unsuccessful, returns `0`. + +**Syntax** + +```sql +toInt64OrZero(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 64-bit integer value if successful, otherwise `0`. [Int64](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt64OrZero('-64'), + toInt64OrZero('abc'); +``` + +Result: + +```response + ┌─toInt64OrZero('-64')─┬─toInt64OrZero('abc')─┠+1. │ -64 │ 0 │ + └──────────────────────┴──────────────────────┘ +``` + +**See also** + +- [`toInt64`](#toint64). +- [`toInt64OrNull`](#toint64ornull). +- [`toInt64OrDefault`](#toint64ordefault). + +## toInt64OrNull + +Like [`toInt64`], takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int64`. If unsuccessful, returns `NULL`. + +**Syntax** + +```sql +toInt64OrNull(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- Integer value of type `Int64` if successful, otherwise `NULL`. [Int64](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt64OrNull('-64'), + toInt64OrNull('abc'); +``` + +Result: + +```response + ┌─toInt64OrNull('-64')─┬─toInt64OrNull('abc')─┠+1. │ -64 │ á´ºáµá´¸á´¸ │ + └──────────────────────┴──────────────────────┘ +``` + +**See also** + +- [`toInt64`](#toint64). +- [`toInt64OrZero`](#toint64orzero). +- [`toInt64OrDefault`](#toint64ordefault). + +## toInt64OrDefault + +Like [`toInt64`](#toint64), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int64`. If unsuccessful, returns the default type value. + +**Syntax** + +```sql +toInt64OrDefault(expr, def) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `def` — The default value to return if parsing to type `Int64` is unsuccessful. [Int64](../data-types/int-uint.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- Integer value of type `Int64` if successful, otherwise returns the default value. [Int64](../data-types/int-uint.md). + +:::note +- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. + ::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt64OrDefault('-64', CAST('-1', 'Int64')), + toInt64OrDefault('abc', CAST('-1', 'Int64')); +``` + +Result: + +```response + ┌─toInt64OrDefault('-64', CAST('-1', 'Int64'))─┬─toInt64OrDefault('abc', CAST('-1', 'Int64'))─┠+1. │ -64 │ -1 │ + └──────────────────────────────────────────────┴──────────────────────────────────────────────┘ +``` + +**See also** + +- [`toInt64`](#toint64). +- [`toInt64OrZero`](#toint64orzero). +- [`toInt64OrNull`](#toint64ornull). + +## toInt128 + +Converts an input value to a value of type `Int128`. + +**Syntax** + +```sql +toInt128(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 128-bit integer value. [Int128](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +```sql +SELECT + toInt128(-128), + toInt128(-128.8), + toInt128('-128'), +``` + +Result: + +```response + ┌─toInt128(-128)─┬─toInt128(-128.8)─┬─toInt128('-128')─┠+1. │ -128 │ -128 │ -128 │ + └────────────────┴──────────────────┴──────────────────┘ +``` + +**See also** + +- [`toInt128OrZero`](#toint128orzero). +- [`toInt128OrNull`](#toint128ornull). +- [`toInt128OrDefault`](#toint128ordefault). + +## toInt128OrZero + +Like [`toInt128`](#toint128), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int128`. If unsuccessful, returns `0`. + +**Syntax** + +```sql +toInt128OrZero(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 128-bit integer value if successful, otherwise `0`. [Int128](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt128OrZero('-128'), + toInt128OrZero('abc'); +``` + +Result: + +```response + ┌─toInt128OrZero('-128')─┬─toInt128OrZero('abc')─┠+1. │ -128 │ 0 │ + └────────────────────────┴───────────────────────┘ +``` + +**See also** + +- [`toInt128`](#toint128). +- [`toInt128OrNull`](#toint128ornull). +- [`toInt128OrDefault`](#toint128ordefault). + +## toInt128OrNull + +Like [`toInt128`](#toint128), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int128`. If unsuccessful, returns `NULL`. + +**Syntax** + +```sql +toInt128OrNull(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 128-bit integer value if successful, otherwise `NULL`. [Int128](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt128OrNull('-128'), + toInt128OrNull('abc'); +``` + +Result: + +```response + ┌─toInt128OrNull('-128')─┬─toInt128OrNull('abc')─┠+1. │ -128 │ á´ºáµá´¸á´¸ │ + └────────────────────────┴───────────────────────┘ +``` + +**See also** + +- [`toInt128`](#toint128). +- [`toInt128OrZero`](#toint128orzero). +- [`toInt128OrDefault`](#toint128ordefault). + +## toInt128OrDefault + +Like [`toInt128`](#toint128), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int128`. If unsuccessful, returns the default type value. + +**Syntax** + +```sql +toInt128OrDefault(expr, def) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `def` — The default value to return if parsing to type `Int128` is unsuccessful. [Int128](../data-types/int-uint.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 128-bit integer value if successful, otherwise returns the default value. [Int128](../data-types/int-uint.md). + +:::note +- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt128OrDefault('-128', CAST('-1', 'Int128')), + toInt128OrDefault('abc', CAST('-1', 'Int128')); +``` + +Result: + +```response + ┌─toInt128OrDefault('-128', CAST('-1', 'Int128'))─┬─toInt128OrDefault('abc', CAST('-1', 'Int128'))─┠+1. │ -128 │ -1 │ + └─────────────────────────────────────────────────┴────────────────────────────────────────────────┘ +``` + +**See also** + +- [`toInt128`](#toint128). +- [`toInt128OrZero`](#toint128orzero). +- [`toInt128OrNull`](#toint128ornull). + +## toInt256 + +Converts an input value to a value of type `Int256`. + +**Syntax** + +```sql +toInt256(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 256-bit integer value. [Int256](../data-types/int-uint.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +```sql +SELECT + toInt256(-256), + toInt256(-256.256), + toInt256('-256'); +``` + +Result: + +```response + ┌─toInt256(-256)─┬─toInt256(-256.256)─┬─toInt256('-256')─┠+1. │ -256 │ -256 │ -256 │ + └────────────────┴────────────────────┴──────────────────┘ +``` + +**See also** + +- [`toInt256OrZero`](#toint256orzero). +- [`toInt256OrNull`](#toint256ornull). +- [`toInt256OrDefault`](#toint256ordefault). + +## toInt256OrZero + +Like [`toInt256`](#toint256), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int256`. If unsuccessful, returns `0`. + +**Syntax** + +```sql +toInt256OrZero(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 256-bit integer value if successful, otherwise `0`. [Int256](../data-types/int-uint.md). + +:::note +Functions uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt256OrZero('-256'), + toInt256OrZero('abc'); +``` + +Result: + +```response + ┌─toInt256OrZero('-256')─┬─toInt256OrZero('abc')─┠+1. │ -256 │ 0 │ + └────────────────────────┴───────────────────────┘ +``` + +**See also** + +- [`toInt256`](#toint256). +- [`toInt256OrNull`](#toint256ornull). +- [`toInt256OrDefault`](#toint256ordefault). + +## toInt256OrNull + +Like [`toInt256`](#toint256), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int256`. If unsuccessful, returns `NULL`. + +**Syntax** + +```sql +toInt256OrNull(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 256-bit integer value if successful, otherwise `NULL`. [Int256](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). + +:::note +Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt256OrNull('-256'), + toInt256OrNull('abc'); +``` + +Result: + +```response + ┌─toInt256OrNull('-256')─┬─toInt256OrNull('abc')─┠+1. │ -256 │ á´ºáµá´¸á´¸ │ + └────────────────────────┴───────────────────────┘ +``` + +**See also** + +- [`toInt256`](#toint256). +- [`toInt256OrZero`](#toint256orzero). +- [`toInt256OrDefault`](#toint256ordefault). + +## toInt256OrDefault + +Like [`toInt256`](#toint256), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int256`. If unsuccessful, returns the default type value. + +**Syntax** + +```sql +toInt256OrDefault(expr, def) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `def` — The default value to return if parsing to type `Int256` is unsuccessful. [Int256](../data-types/int-uint.md). + +:::note +Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +::: + +**Returned value** + +- 256-bit integer value if successful, otherwise returns the default value. [Int256](../data-types/int-uint.md). + +:::note +- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The default value type should be the same as the cast type. +::: + +:::danger +An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +::: + +**Example** + +Query: + +``` sql +SELECT + toInt256OrDefault('-256', CAST('-1', 'Int256')), + toInt256OrDefault('abc', CAST('-1', 'Int256')); +``` + +Result: + +```response + ┌─toInt256OrDefault('-256', CAST('-1', 'Int256'))─┬─toInt256OrDefault('abc', CAST('-1', 'Int256'))─┠+1. │ -256 │ -1 │ + └─────────────────────────────────────────────────┴────────────────────────────────────────────────┘ +``` + +**See also** + +- [`toInt256`](#toint256). +- [`toInt256OrZero`](#toint256orzero). +- [`toInt256OrNull`](#toint256ornull). ## toUInt(8\|16\|32\|64\|256) @@ -167,7 +1370,7 @@ Converts an input value to the [UInt](../data-types/int-uint.md) data type. This - Integer value in the `UInt8`, `UInt16`, `UInt32`, `UInt64` or `UInt256` data type. -Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning they truncate fractional digits of numbers. +Functions use [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. The behavior of functions for negative arguments and for the [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments is undefined. If you pass a string with a negative number, for example `'-32'`, ClickHouse raises an exception. Remember about [numeric conversions issues](#common-issues-with-data-conversion), when using the functions. @@ -2289,7 +3492,7 @@ Result: └─────────────────────┴─────────────────┴─────────────────────────────────────┘ ``` -**See Also** +**See also** - [RFC 1123](https://datatracker.ietf.org/doc/html/rfc1123) - [toDate](#todate) From 05874d0b85d78c1067c1db5332c7cc74b94d88cd Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Jul 2024 15:37:55 +0000 Subject: [PATCH 0470/2361] Fixing style. --- src/Columns/ColumnDecimal.cpp | 1 - src/Columns/ColumnNullable.cpp | 2 +- src/Columns/ColumnSparse.cpp | 18 ------------------ src/Columns/ColumnSparse.h | 3 --- 4 files changed, 1 insertion(+), 23 deletions(-) diff --git a/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp index ed9c699a841..e27807950ae 100644 --- a/src/Columns/ColumnDecimal.cpp +++ b/src/Columns/ColumnDecimal.cpp @@ -28,7 +28,6 @@ namespace ErrorCodes extern const int PARAMETER_OUT_OF_BOUND; extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; extern const int NOT_IMPLEMENTED; - extern const int LOGICAL_ERROR; } template diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 64e99a3bbe8..867c9149242 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -65,7 +65,7 @@ WeakHash32 ColumnNullable::getWeakHash32() const const auto & null_map_data = getNullMapData(); auto & hash_data = hash.getData(); - /// Use defualt for nulls. + /// Use default for nulls. for (size_t row = 0; row < s; ++row) if (null_map_data[row]) hash_data[row] = WeakHash32::kDefaultInitialValue; diff --git a/src/Columns/ColumnSparse.cpp b/src/Columns/ColumnSparse.cpp index 0d103a263dd..8f98a4433d3 100644 --- a/src/Columns/ColumnSparse.cpp +++ b/src/Columns/ColumnSparse.cpp @@ -809,24 +809,6 @@ size_t ColumnSparse::getValueIndex(size_t n) const return it - offsets_data.begin() + 1; } -size_t ColumnSparse::getFirstDefaultValueIndex() const -{ - if (getNumberOfDefaultRows() == 0) - return size(); - - const auto & offsets_data = getOffsetsData(); - size_t off_size = offsets_data.size(); - - if (off_size == 0 || offsets_data[0] > 0) - return 0; - - size_t idx = 0; - while (idx + 1 < off_size && offsets_data[idx] + 1 == offsets_data[idx + 1]) - ++idx; - - return offsets_data[idx] + 1; -} - ColumnSparse::Iterator ColumnSparse::getIterator(size_t n) const { const auto & offsets_data = getOffsetsData(); diff --git a/src/Columns/ColumnSparse.h b/src/Columns/ColumnSparse.h index a5d4d788b17..392a6910956 100644 --- a/src/Columns/ColumnSparse.h +++ b/src/Columns/ColumnSparse.h @@ -173,9 +173,6 @@ public: /// O(log(offsets.size())) complexity, size_t getValueIndex(size_t n) const; - /// Returns an index of the first default value, or size() if there is no defaults. - size_t getFirstDefaultValueIndex() const; - const IColumn & getValuesColumn() const { return *values; } IColumn & getValuesColumn() { return *values; } From 5608914bca8b36920f8012fa48b6617512629cfe Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 17 Jul 2024 16:59:31 +0100 Subject: [PATCH 0471/2361] impl --- base/base/defines.h | 15 +++++++++++---- src/Analyzer/QueryTreePassManager.cpp | 6 +++--- src/Columns/ColumnAggregateFunction.cpp | 4 ++-- src/Columns/ColumnAggregateFunction.h | 6 +++--- src/Columns/ColumnArray.cpp | 6 +++--- src/Columns/ColumnArray.h | 6 +++--- src/Columns/ColumnCompressed.h | 4 ++-- src/Columns/ColumnConst.h | 8 ++++---- src/Columns/ColumnDecimal.cpp | 4 ++-- src/Columns/ColumnDecimal.h | 8 ++++---- src/Columns/ColumnDynamic.cpp | 8 ++++---- src/Columns/ColumnDynamic.h | 4 ++-- src/Columns/ColumnFixedString.cpp | 6 +++--- src/Columns/ColumnFixedString.h | 8 ++++---- src/Columns/ColumnFunction.cpp | 4 ++-- src/Columns/ColumnFunction.h | 6 +++--- src/Columns/ColumnLowCardinality.cpp | 6 +++--- src/Columns/ColumnLowCardinality.h | 6 +++--- src/Columns/ColumnMap.cpp | 8 ++++---- src/Columns/ColumnMap.h | 4 ++-- src/Columns/ColumnNullable.cpp | 8 ++++---- src/Columns/ColumnNullable.h | 6 +++--- src/Columns/ColumnObject.cpp | 4 ++-- src/Columns/ColumnObject.h | 4 ++-- src/Columns/ColumnSparse.cpp | 6 +++--- src/Columns/ColumnSparse.h | 6 +++--- src/Columns/ColumnString.cpp | 4 ++-- src/Columns/ColumnString.h | 8 ++++---- src/Columns/ColumnTuple.cpp | 8 ++++---- src/Columns/ColumnTuple.h | 6 +++--- src/Columns/ColumnUnique.h | 4 ++-- src/Columns/ColumnVariant.cpp | 8 ++++---- src/Columns/ColumnVariant.h | 4 ++-- src/Columns/ColumnVector.cpp | 2 +- src/Columns/ColumnVector.h | 8 ++++---- src/Columns/IColumn.cpp | 2 +- src/Columns/IColumn.h | 10 +++++----- src/Columns/IColumnDummy.h | 6 +++--- src/Columns/IColumnUnique.h | 2 +- .../benchmark_column_insert_many_from.cpp | 2 +- .../Config/AbstractConfigurationComparison.cpp | 2 +- src/Common/MemoryTracker.cpp | 2 +- src/Common/PageCache.cpp | 2 +- src/Common/assert_cast.h | 2 +- src/Common/tests/gtest_rw_lock.cpp | 2 +- src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp | 6 +++--- src/IO/tests/gtest_memory_resize.cpp | 6 +++--- src/IO/tests/gtest_writebuffer_s3.cpp | 4 ++-- src/Interpreters/Cache/FileCache.cpp | 8 ++++---- src/Interpreters/Cache/FileSegment.cpp | 6 +++--- src/Interpreters/Cache/Metadata.cpp | 2 +- src/Interpreters/executeDDLQueryOnCluster.cpp | 2 +- .../gtest_exception_on_incorrect_pipeline.cpp | 2 +- .../tests/gtest_check_sorted_stream.cpp | 8 ++++---- src/Server/TCPHandler.cpp | 2 +- src/Storages/MaterializedView/RefreshTask.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 4 ++-- 57 files changed, 152 insertions(+), 145 deletions(-) diff --git a/base/base/defines.h b/base/base/defines.h index 2fc54c37bde..cf3d357da18 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -87,10 +87,17 @@ # define ASAN_POISON_MEMORY_REGION(a, b) #endif -#if !defined(ABORT_ON_LOGICAL_ERROR) - #if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || defined(UNDEFINED_BEHAVIOR_SANITIZER) - #define ABORT_ON_LOGICAL_ERROR - #endif +/// We used to have only ABORT_ON_LOGICAL_ERROR macro, but most of its uses were actually in places where we didn't care about logical errors +/// but wanted to check exactly if the current build type is debug or with sanitizer. This new macro is introduced to fix those places. +#if !defined(DEBUG_OR_SANITIZER_BUILD) +# if !defined(NDEBUG) || defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) \ + || defined(UNDEFINED_BEHAVIOR_SANITIZER) +# define DEBUG_OR_SANITIZER_BUILD +# endif +#endif + +#if !defined(ABORT_ON_LOGICAL_ERROR) && defined(DEBUG_OR_SANITIZER_BUILD) +# define ABORT_ON_LOGICAL_ERROR #endif /// chassert(x) is similar to assert(x), but: diff --git a/src/Analyzer/QueryTreePassManager.cpp b/src/Analyzer/QueryTreePassManager.cpp index f7919b6422c..4443f83596f 100644 --- a/src/Analyzer/QueryTreePassManager.cpp +++ b/src/Analyzer/QueryTreePassManager.cpp @@ -62,7 +62,7 @@ namespace ErrorCodes namespace { -#if defined(ABORT_ON_LOGICAL_ERROR) +#if defined(DEBUG_OR_SANITIZER_BUILD) /** This visitor checks if Query Tree structure is valid after each pass * in debug build. @@ -183,7 +183,7 @@ void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node) for (size_t i = 0; i < passes_size; ++i) { passes[i]->run(query_tree_node, current_context); -#if defined(ABORT_ON_LOGICAL_ERROR) +#if defined(DEBUG_OR_SANITIZER_BUILD) ValidationChecker(passes[i]->getName()).visit(query_tree_node); #endif } @@ -208,7 +208,7 @@ void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node, size_t up_to_pa for (size_t i = 0; i < up_to_pass_index; ++i) { passes[i]->run(query_tree_node, current_context); -#if defined(ABORT_ON_LOGICAL_ERROR) +#if defined(DEBUG_OR_SANITIZER_BUILD) ValidationChecker(passes[i]->getName()).visit(query_tree_node); #endif } diff --git a/src/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp index cfd07c27765..955981a972d 100644 --- a/src/Columns/ColumnAggregateFunction.cpp +++ b/src/Columns/ColumnAggregateFunction.cpp @@ -267,7 +267,7 @@ bool ColumnAggregateFunction::structureEquals(const IColumn & to) const } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnAggregateFunction::insertRangeFrom(const IColumn & from, size_t start, size_t length) #else void ColumnAggregateFunction::doInsertRangeFrom(const IColumn & from, size_t start, size_t length) @@ -466,7 +466,7 @@ void ColumnAggregateFunction::insertFromWithOwnership(const IColumn & from, size insertMergeFrom(from, n); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnAggregateFunction::insertFrom(const IColumn & from, size_t n) #else void ColumnAggregateFunction::doInsertFrom(const IColumn & from, size_t n) diff --git a/src/Columns/ColumnAggregateFunction.h b/src/Columns/ColumnAggregateFunction.h index 1be7a862438..fe678fc1eaa 100644 --- a/src/Columns/ColumnAggregateFunction.h +++ b/src/Columns/ColumnAggregateFunction.h @@ -145,7 +145,7 @@ public: void insertData(const char * pos, size_t length) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & from, size_t n) override; #else using IColumn::insertFrom; @@ -189,7 +189,7 @@ public: void protect() override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & from, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & from, size_t start, size_t length) override; @@ -212,7 +212,7 @@ public: MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t, size_t, const IColumn &, int) const override #else int doCompareAt(size_t, size_t, const IColumn &, int) const override diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 5d7350f3a79..598d501a2b8 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -337,7 +337,7 @@ bool ColumnArray::tryInsert(const Field & x) return true; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnArray::insertFrom(const IColumn & src_, size_t n) #else void ColumnArray::doInsertFrom(const IColumn & src_, size_t n) @@ -396,7 +396,7 @@ int ColumnArray::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int nan : 1); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const #else int ColumnArray::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const @@ -543,7 +543,7 @@ void ColumnArray::getExtremes(Field & min, Field & max) const } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnArray::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnArray::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index 6cd3e2f6c3b..6f735fe9dc3 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -84,14 +84,14 @@ public: void updateHashWithValue(size_t n, SipHash & hash) const override; void updateWeakHash32(WeakHash32 & hash) const override; void updateHashFast(SipHash & hash) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; #endif void insert(const Field & x) override; bool tryInsert(const Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src_, size_t n) override; #else void doInsertFrom(const IColumn & src_, size_t n) override; @@ -103,7 +103,7 @@ public: ColumnPtr permute(const Permutation & perm, size_t limit) const override; ColumnPtr index(const IColumn & indexes, size_t limit) const override; template ColumnPtr indexImpl(const PaddedPODArray & indexes, size_t limit) const; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnCompressed.h b/src/Columns/ColumnCompressed.h index 5e455709fec..10649602920 100644 --- a/src/Columns/ColumnCompressed.h +++ b/src/Columns/ColumnCompressed.h @@ -85,7 +85,7 @@ public: bool isDefaultAt(size_t) const override { throwMustBeDecompressed(); } void insert(const Field &) override { throwMustBeDecompressed(); } bool tryInsert(const Field &) override { throwMustBeDecompressed(); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); } #else void doInsertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); } @@ -104,7 +104,7 @@ public: void expand(const Filter &, bool) override { throwMustBeDecompressed(); } ColumnPtr permute(const Permutation &, size_t) const override { throwMustBeDecompressed(); } ColumnPtr index(const IColumn &, size_t) const override { throwMustBeDecompressed(); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); } #else int doCompareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); } diff --git a/src/Columns/ColumnConst.h b/src/Columns/ColumnConst.h index b55a1f42037..e419dbd2c4c 100644 --- a/src/Columns/ColumnConst.h +++ b/src/Columns/ColumnConst.h @@ -123,7 +123,7 @@ public: return data->isNullAt(0); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override #else void doInsertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override @@ -151,7 +151,7 @@ public: ++s; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn &, size_t) override #else void doInsertFrom(const IColumn &, size_t) override @@ -160,7 +160,7 @@ public: ++s; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; } #else void doInsertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; } @@ -237,7 +237,7 @@ public: return data->allocatedBytes() + sizeof(s); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override #else int doCompareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override diff --git a/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp index cf413f790a7..8e1a96b6ed2 100644 --- a/src/Columns/ColumnDecimal.cpp +++ b/src/Columns/ColumnDecimal.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes } template -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnDecimal::compareAt(size_t n, size_t m, const IColumn & rhs_, int) const #else int ColumnDecimal::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int) const @@ -335,7 +335,7 @@ void ColumnDecimal::insertData(const char * src, size_t /*length*/) } template -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnDecimal::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnDecimal::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) diff --git a/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h index 32efeb643a6..3985a667135 100644 --- a/src/Columns/ColumnDecimal.h +++ b/src/Columns/ColumnDecimal.h @@ -55,13 +55,13 @@ public: void reserve(size_t n) override { data.reserve_exact(n); } void shrinkToFit() override { data.shrink_to_fit(); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast(src).getData()[n]); } #else void doInsertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast(src).getData()[n]); } #endif -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertManyFrom(const IColumn & src, size_t position, size_t length) override #else void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override @@ -76,7 +76,7 @@ public: void insertManyDefaults(size_t length) override { data.resize_fill(data.size() + length); } void insert(const Field & x) override { data.push_back(x.get()); } bool tryInsert(const Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; @@ -104,7 +104,7 @@ public: void updateHashWithValue(size_t n, SipHash & hash) const override; void updateWeakHash32(WeakHash32 & hash) const override; void updateHashFast(SipHash & hash) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index c735238f515..a92d54dd675 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -215,7 +215,7 @@ bool ColumnDynamic::tryInsert(const DB::Field & x) } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n) #else void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) @@ -269,7 +269,7 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) variant_col.insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size_t length) #else void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, size_t length) @@ -439,7 +439,7 @@ void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, si } } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnDynamic::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length) #else void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length) @@ -603,7 +603,7 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const variant_col.getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_col.offsetAt(n), hash); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnDynamic::compareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const #else int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 9abddc7a26d..fa8ec55c60a 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -143,7 +143,7 @@ public: void insert(const Field & x) override; bool tryInsert(const Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src_, size_t n) override; void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; void insertManyFrom(const IColumn & src, size_t position, size_t length) override; @@ -220,7 +220,7 @@ public: return scattered_columns; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp index 1c2de203a94..db697f6372b 100644 --- a/src/Columns/ColumnFixedString.cpp +++ b/src/Columns/ColumnFixedString.cpp @@ -74,7 +74,7 @@ bool ColumnFixedString::tryInsert(const Field & x) return true; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnFixedString::insertFrom(const IColumn & src_, size_t index) #else void ColumnFixedString::doInsertFrom(const IColumn & src_, size_t index) @@ -90,7 +90,7 @@ void ColumnFixedString::doInsertFrom(const IColumn & src_, size_t index) memcpySmallAllowReadWriteOverflow15(chars.data() + old_size, &src.chars[n * index], n); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnFixedString::insertManyFrom(const IColumn & src, size_t position, size_t length) #else void ColumnFixedString::doInsertManyFrom(const IColumn & src, size_t position, size_t length) @@ -227,7 +227,7 @@ size_t ColumnFixedString::estimateCardinalityInPermutedRange(const Permutation & return elements.size(); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnFixedString::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) diff --git a/src/Columns/ColumnFixedString.h b/src/Columns/ColumnFixedString.h index 6e88136fc50..6e7ff488f9a 100644 --- a/src/Columns/ColumnFixedString.h +++ b/src/Columns/ColumnFixedString.h @@ -98,13 +98,13 @@ public: bool tryInsert(const Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src_, size_t index) override; #else void doInsertFrom(const IColumn & src_, size_t index) override; #endif -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertManyFrom(const IColumn & src, size_t position, size_t length) override; #else void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override; @@ -137,7 +137,7 @@ public: void updateHashFast(SipHash & hash) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override #else int doCompareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override @@ -156,7 +156,7 @@ public: size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; diff --git a/src/Columns/ColumnFunction.cpp b/src/Columns/ColumnFunction.cpp index fa57f35a823..fc81efaac0c 100644 --- a/src/Columns/ColumnFunction.cpp +++ b/src/Columns/ColumnFunction.cpp @@ -72,7 +72,7 @@ ColumnPtr ColumnFunction::cut(size_t start, size_t length) const return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnFunction::insertFrom(const IColumn & src, size_t n) #else void ColumnFunction::doInsertFrom(const IColumn & src, size_t n) @@ -93,7 +93,7 @@ void ColumnFunction::doInsertFrom(const IColumn & src, size_t n) ++elements_size; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnFunction::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnFunction::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) diff --git a/src/Columns/ColumnFunction.h b/src/Columns/ColumnFunction.h index ba924c49a82..dfc592ab281 100644 --- a/src/Columns/ColumnFunction.h +++ b/src/Columns/ColumnFunction.h @@ -94,12 +94,12 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot insert into {}", getName()); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src, size_t n) override; #else void doInsertFrom(const IColumn & src, size_t n) override; #endif -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn &, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn &, size_t start, size_t length) override; @@ -145,7 +145,7 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "popBack is not implemented for {}", getName()); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t, size_t, const IColumn &, int) const override #else int doCompareAt(size_t, size_t, const IColumn &, int) const override diff --git a/src/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp index eb694a10b0f..237e157f420 100644 --- a/src/Columns/ColumnLowCardinality.cpp +++ b/src/Columns/ColumnLowCardinality.cpp @@ -159,7 +159,7 @@ void ColumnLowCardinality::insertDefault() idx.insertPosition(getDictionary().getDefaultValueIndex()); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n) #else void ColumnLowCardinality::doInsertFrom(const IColumn & src, size_t n) @@ -191,7 +191,7 @@ void ColumnLowCardinality::insertFromFullColumn(const IColumn & src, size_t n) idx.insertPosition(getDictionary().uniqueInsertFrom(src, n)); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnLowCardinality::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) @@ -372,7 +372,7 @@ int ColumnLowCardinality::compareAtImpl(size_t n, size_t m, const IColumn & rhs, return getDictionary().compareAt(n_index, m_index, low_cardinality_column.getDictionary(), nan_direction_hint); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnLowCardinality::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #else int ColumnLowCardinality::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const diff --git a/src/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h index e99be07cd8d..1b74518037f 100644 --- a/src/Columns/ColumnLowCardinality.h +++ b/src/Columns/ColumnLowCardinality.h @@ -78,14 +78,14 @@ public: bool tryInsert(const Field & x) override; void insertDefault() override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src, size_t n) override; #else void doInsertFrom(const IColumn & src, size_t n) override; #endif void insertFromFullColumn(const IColumn & src, size_t n); -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; @@ -135,7 +135,7 @@ public: return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().index(indexes_, limit)); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 2dffddb2dc9..77bf8802ab4 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -153,7 +153,7 @@ void ColumnMap::updateHashFast(SipHash & hash) const nested->updateHashFast(hash); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnMap::insertFrom(const IColumn & src, size_t n) #else void ColumnMap::doInsertFrom(const IColumn & src, size_t n) @@ -162,7 +162,7 @@ void ColumnMap::doInsertFrom(const IColumn & src, size_t n) nested->insertFrom(assert_cast(src).getNestedColumn(), n); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnMap::insertManyFrom(const IColumn & src, size_t position, size_t length) #else void ColumnMap::doInsertManyFrom(const IColumn & src, size_t position, size_t length) @@ -171,7 +171,7 @@ void ColumnMap::doInsertManyFrom(const IColumn & src, size_t position, size_t le assert_cast(*nested).insertManyFrom(assert_cast(src).getNestedColumn(), position, length); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnMap::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnMap::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) @@ -222,7 +222,7 @@ MutableColumns ColumnMap::scatter(ColumnIndex num_columns, const Selector & sele return res; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnMap::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #else int ColumnMap::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index a54071a2974..592f83732df 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -67,7 +67,7 @@ public: void updateWeakHash32(WeakHash32 & hash) const override; void updateHashFast(SipHash & hash) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src_, size_t n) override; void insertManyFrom(const IColumn & src, size_t position, size_t length) override; void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; @@ -83,7 +83,7 @@ public: ColumnPtr index(const IColumn & indexes, size_t limit) const override; ColumnPtr replicate(const Offsets & offsets) const override; MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index f060e74b315..11447a7966e 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -221,7 +221,7 @@ const char * ColumnNullable::skipSerializedInArena(const char * pos) const return pos; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnNullable::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnNullable::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) @@ -262,7 +262,7 @@ bool ColumnNullable::tryInsert(const Field & x) return true; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnNullable::insertFrom(const IColumn & src, size_t n) #else void ColumnNullable::doInsertFrom(const IColumn & src, size_t n) @@ -274,7 +274,7 @@ void ColumnNullable::doInsertFrom(const IColumn & src, size_t n) } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnNullable::insertManyFrom(const IColumn & src, size_t position, size_t length) #else void ColumnNullable::doInsertManyFrom(const IColumn & src, size_t position, size_t length) @@ -414,7 +414,7 @@ int ColumnNullable::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int return getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnNullable::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const #else int ColumnNullable::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h index a6d0483e527..cf7efba6235 100644 --- a/src/Columns/ColumnNullable.h +++ b/src/Columns/ColumnNullable.h @@ -69,7 +69,7 @@ public: char * serializeValueIntoMemory(size_t n, char * memory) const override; const char * deserializeAndInsertFromArena(const char * pos) override; const char * skipSerializedInArena(const char * pos) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; @@ -77,7 +77,7 @@ public: void insert(const Field & x) override; bool tryInsert(const Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src, size_t n) override; void insertManyFrom(const IColumn & src, size_t position, size_t length) override; #else @@ -100,7 +100,7 @@ public: void expand(const Filter & mask, bool inverted) override; ColumnPtr permute(const Permutation & perm, size_t limit) const override; ColumnPtr index(const IColumn & indexes, size_t limit) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override; diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index adcd42b16e9..39e587368fe 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -763,7 +763,7 @@ void ColumnObject::get(size_t n, Field & res) const } } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnObject::insertFrom(const IColumn & src, size_t n) #else void ColumnObject::doInsertFrom(const IColumn & src, size_t n) @@ -772,7 +772,7 @@ void ColumnObject::doInsertFrom(const IColumn & src, size_t n) insert(src[n]); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index fadf2e18779..09c28d460e4 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -210,7 +210,7 @@ public: bool tryInsert(const Field & field) override; void insertDefault() override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src, size_t n) override; void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else @@ -235,7 +235,7 @@ public: /// Order of rows in ColumnObject is undefined. void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override; void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {} -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; } #else int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; } diff --git a/src/Columns/ColumnSparse.cpp b/src/Columns/ColumnSparse.cpp index 809586d8810..49c953c5aab 100644 --- a/src/Columns/ColumnSparse.cpp +++ b/src/Columns/ColumnSparse.cpp @@ -174,7 +174,7 @@ const char * ColumnSparse::skipSerializedInArena(const char * pos) const return values->skipSerializedInArena(pos); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnSparse::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnSparse::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) @@ -252,7 +252,7 @@ bool ColumnSparse::tryInsert(const Field & x) return true; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnSparse::insertFrom(const IColumn & src, size_t n) #else void ColumnSparse::doInsertFrom(const IColumn & src, size_t n) @@ -454,7 +454,7 @@ ColumnPtr ColumnSparse::indexImpl(const PaddedPODArray & indexes, size_t l return ColumnSparse::create(std::move(res_values), std::move(res_offsets), limit); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnSparse::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const #else int ColumnSparse::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const diff --git a/src/Columns/ColumnSparse.h b/src/Columns/ColumnSparse.h index 3e34d1de94a..732fad292af 100644 --- a/src/Columns/ColumnSparse.h +++ b/src/Columns/ColumnSparse.h @@ -81,14 +81,14 @@ public: char * serializeValueIntoMemory(size_t n, char * memory) const override; const char * deserializeAndInsertFromArena(const char * pos) override; const char * skipSerializedInArena(const char *) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; #endif void insert(const Field & x) override; bool tryInsert(const Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src, size_t n) override; #else void doInsertFrom(const IColumn & src, size_t n) override; @@ -106,7 +106,7 @@ public: template ColumnPtr indexImpl(const PaddedPODArray & indexes, size_t limit) const; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override; diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp index 1eda9714d62..37a0d6b31a2 100644 --- a/src/Columns/ColumnString.cpp +++ b/src/Columns/ColumnString.cpp @@ -39,7 +39,7 @@ ColumnString::ColumnString(const ColumnString & src) last_offset, chars.size()); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnString::insertManyFrom(const IColumn & src, size_t position, size_t length) #else void ColumnString::doInsertManyFrom(const IColumn & src, size_t position, size_t length) @@ -133,7 +133,7 @@ void ColumnString::updateWeakHash32(WeakHash32 & hash) const } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnString::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnString::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) diff --git a/src/Columns/ColumnString.h b/src/Columns/ColumnString.h index 602ffac65e8..157ca9fc9cd 100644 --- a/src/Columns/ColumnString.h +++ b/src/Columns/ColumnString.h @@ -142,7 +142,7 @@ public: return true; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src_, size_t n) override #else void doInsertFrom(const IColumn & src_, size_t n) override @@ -169,7 +169,7 @@ public: } } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertManyFrom(const IColumn & src, size_t position, size_t length) override; #else void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override; @@ -220,7 +220,7 @@ public: hash.update(reinterpret_cast(chars.data()), chars.size() * sizeof(chars[0])); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; @@ -250,7 +250,7 @@ public: offsets.push_back(offsets.back() + 1); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override #else int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 9b822d7f570..50e698f57c2 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -205,7 +205,7 @@ bool ColumnTuple::tryInsert(const Field & x) return true; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnTuple::insertFrom(const IColumn & src_, size_t n) #else void ColumnTuple::doInsertFrom(const IColumn & src_, size_t n) @@ -222,7 +222,7 @@ void ColumnTuple::doInsertFrom(const IColumn & src_, size_t n) columns[i]->insertFrom(*src.columns[i], n); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnTuple::insertManyFrom(const IColumn & src, size_t position, size_t length) #else void ColumnTuple::doInsertManyFrom(const IColumn & src, size_t position, size_t length) @@ -326,7 +326,7 @@ void ColumnTuple::updateHashFast(SipHash & hash) const column->updateHashFast(hash); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnTuple::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnTuple::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) @@ -482,7 +482,7 @@ int ColumnTuple::compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_ return 0; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnTuple::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #else int ColumnTuple::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index 38e479791d4..71a47e58401 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -66,7 +66,7 @@ public: void insert(const Field & x) override; bool tryInsert(const Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src_, size_t n) override; void insertManyFrom(const IColumn & src, size_t position, size_t length) override; #else @@ -83,7 +83,7 @@ public: void updateHashWithValue(size_t n, SipHash & hash) const override; void updateWeakHash32(WeakHash32 & hash) const override; void updateHashFast(SipHash & hash) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; @@ -94,7 +94,7 @@ public: ColumnPtr index(const IColumn & indexes, size_t limit) const override; ColumnPtr replicate(const Offsets & offsets) const override; MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h index ec1f8e0a4d5..d6cb75679be 100644 --- a/src/Columns/ColumnUnique.h +++ b/src/Columns/ColumnUnique.h @@ -90,7 +90,7 @@ public: return getNestedColumn()->updateHashWithValue(n, hash_func); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; @@ -492,7 +492,7 @@ const char * ColumnUnique::skipSerializedInArena(const char *) const } template -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnUnique::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #else int ColumnUnique::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index ee5de4c2dde..18e9cffd61a 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -595,7 +595,7 @@ void ColumnVariant::insertManyFromImpl(const DB::IColumn & src_, size_t position } } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnVariant::insertFrom(const IColumn & src_, size_t n) #else void ColumnVariant::doInsertFrom(const IColumn & src_, size_t n) @@ -604,7 +604,7 @@ void ColumnVariant::doInsertFrom(const IColumn & src_, size_t n) insertFromImpl(src_, n, nullptr); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length) #else void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length) @@ -613,7 +613,7 @@ void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t insertRangeFromImpl(src_, start, length, nullptr); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnVariant::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length) #else void ColumnVariant::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length) @@ -1186,7 +1186,7 @@ bool ColumnVariant::hasEqualValues() const return local_discriminators->hasEqualValues() && variants[localDiscriminatorAt(0)]->hasEqualValues(); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int ColumnVariant::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #else int ColumnVariant::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index d91b8e93a7d..2dbe1494823 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -180,7 +180,7 @@ public: void insert(const Field & x) override; bool tryInsert(const Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src_, size_t n) override; void insertRangeFrom(const IColumn & src_, size_t start, size_t length) override; void insertManyFrom(const IColumn & src_, size_t position, size_t length) override; @@ -223,7 +223,7 @@ public: ColumnPtr indexImpl(const PaddedPODArray & indexes, size_t limit) const; ColumnPtr replicate(const Offsets & replicate_offsets) const override; MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; #else int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override; diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp index 19d1b800961..d5b7786a702 100644 --- a/src/Columns/ColumnVector.cpp +++ b/src/Columns/ColumnVector.cpp @@ -503,7 +503,7 @@ bool ColumnVector::tryInsert(const DB::Field & x) } template -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnVector::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnVector::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 3a0acf5e312..57c108fff23 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -64,7 +64,7 @@ public: return data.size(); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src, size_t n) override #else void doInsertFrom(const IColumn & src, size_t n) override @@ -73,7 +73,7 @@ public: data.push_back(assert_cast(src).getData()[n]); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertManyFrom(const IColumn & src, size_t position, size_t length) override #else void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override @@ -150,7 +150,7 @@ public: } /// This method implemented in header because it could be possibly devirtualized. -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override #else int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override @@ -240,7 +240,7 @@ public: bool tryInsert(const DB::Field & x) override; -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; #else void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; diff --git a/src/Columns/IColumn.cpp b/src/Columns/IColumn.cpp index 552e52cf51c..a189903b617 100644 --- a/src/Columns/IColumn.cpp +++ b/src/Columns/IColumn.cpp @@ -46,7 +46,7 @@ String IColumn::dumpStructure() const return res.str(); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void IColumn::insertFrom(const IColumn & src, size_t n) #else void IColumn::doInsertFrom(const IColumn & src, size_t n) diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index 4b6f34e5aa2..005cb314201 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -179,7 +179,7 @@ public: /// Appends n-th element from other column with the same type. /// Is used in merge-sort and merges. It could be implemented in inherited classes more optimally than default implementation. -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) virtual void insertFrom(const IColumn & src, size_t n); #else void insertFrom(const IColumn & src, size_t n) @@ -191,7 +191,7 @@ public: /// Appends range of elements from other column with the same type. /// Could be used to concatenate columns. -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) virtual void insertRangeFrom(const IColumn & src, size_t start, size_t length) = 0; #else void insertRangeFrom(const IColumn & src, size_t start, size_t length) @@ -202,7 +202,7 @@ public: #endif /// Appends one element from other column with the same type multiple times. -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) virtual void insertManyFrom(const IColumn & src, size_t position, size_t length) { for (size_t i = 0; i < length; ++i) @@ -345,7 +345,7 @@ public: * * For non Nullable and non floating point types, nan_direction_hint is ignored. */ -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) [[nodiscard]] virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0; #else [[nodiscard]] int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const @@ -667,7 +667,7 @@ protected: Sort full_sort, PartialSort partial_sort) const; -#if defined(ABORT_ON_LOGICAL_ERROR) +#if defined(DEBUG_OR_SANITIZER_BUILD) virtual void doInsertFrom(const IColumn & src, size_t n); virtual void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) = 0; diff --git a/src/Columns/IColumnDummy.h b/src/Columns/IColumnDummy.h index c19fb704d9b..db21ec05aae 100644 --- a/src/Columns/IColumnDummy.h +++ b/src/Columns/IColumnDummy.h @@ -26,7 +26,7 @@ public: size_t byteSize() const override { return 0; } size_t byteSizeAt(size_t) const override { return 0; } size_t allocatedBytes() const override { return 0; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; } #else int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; } @@ -71,7 +71,7 @@ public: { } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn &, size_t) override #else void doInsertFrom(const IColumn &, size_t) override @@ -80,7 +80,7 @@ public: ++s; } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override #else void doInsertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override diff --git a/src/Columns/IColumnUnique.h b/src/Columns/IColumnUnique.h index 3398452b7ee..1faabe55772 100644 --- a/src/Columns/IColumnUnique.h +++ b/src/Columns/IColumnUnique.h @@ -85,7 +85,7 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method tryInsert is not supported for ColumnUnique."); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn &, size_t, size_t) override #else void doInsertRangeFrom(const IColumn &, size_t, size_t) override diff --git a/src/Columns/benchmarks/benchmark_column_insert_many_from.cpp b/src/Columns/benchmarks/benchmark_column_insert_many_from.cpp index 645f6ed79f3..240099f0ae5 100644 --- a/src/Columns/benchmarks/benchmark_column_insert_many_from.cpp +++ b/src/Columns/benchmarks/benchmark_column_insert_many_from.cpp @@ -52,7 +52,7 @@ static ColumnPtr mockColumn(const DataTypePtr & type, size_t rows) } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) static NO_INLINE void insertManyFrom(IColumn & dst, const IColumn & src) #else static NO_INLINE void doInsertManyFrom(IColumn & dst, const IColumn & src) diff --git a/src/Common/Config/AbstractConfigurationComparison.cpp b/src/Common/Config/AbstractConfigurationComparison.cpp index 607b583cf31..80c837ed43b 100644 --- a/src/Common/Config/AbstractConfigurationComparison.cpp +++ b/src/Common/Config/AbstractConfigurationComparison.cpp @@ -38,7 +38,7 @@ namespace std::erase_if(left_subkeys, [&](const String & key) { return ignore_keys->contains(key); }); std::erase_if(right_subkeys, [&](const String & key) { return ignore_keys->contains(key); }); -#if defined(ABORT_ON_LOGICAL_ERROR) +#if defined(DEBUG_OR_SANITIZER_BUILD) /// Compound `ignore_keys` are not yet implemented. for (const auto & ignore_key : *ignore_keys) chassert(ignore_key.find('.') == std::string_view::npos); diff --git a/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp index 28cfa98666a..ac412684cf7 100644 --- a/src/Common/MemoryTracker.cpp +++ b/src/Common/MemoryTracker.cpp @@ -192,7 +192,7 @@ void MemoryTracker::debugLogBigAllocationWithoutCheck(Int64 size [[maybe_unused] { /// Big allocations through allocNoThrow (without checking memory limits) may easily lead to OOM (and it's hard to debug). /// Let's find them. -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD if (size < 0) return; diff --git a/src/Common/PageCache.cpp b/src/Common/PageCache.cpp index 56bd8c1a339..d719a387e14 100644 --- a/src/Common/PageCache.cpp +++ b/src/Common/PageCache.cpp @@ -424,7 +424,7 @@ static void logUnexpectedSyscallError(std::string name) { std::string message = fmt::format("{} failed: {}", name, errnoToString()); LOG_WARNING(&Poco::Logger::get("PageCache"), "{}", message); -#if defined(ABORT_ON_LOGICAL_ERROR) +#if defined(DEBUG_OR_SANITIZER_BUILD) volatile bool true_ = true; if (true_) // suppress warning about missing [[noreturn]] abortOnFailedAssertion(message); diff --git a/src/Common/assert_cast.h b/src/Common/assert_cast.h index f9d0bf0e595..7a04372ffad 100644 --- a/src/Common/assert_cast.h +++ b/src/Common/assert_cast.h @@ -25,7 +25,7 @@ namespace DB template inline To assert_cast(From && from) { -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD try { if constexpr (std::is_pointer_v) diff --git a/src/Common/tests/gtest_rw_lock.cpp b/src/Common/tests/gtest_rw_lock.cpp index d8c6e9cb99d..9b0c9aeafbe 100644 --- a/src/Common/tests/gtest_rw_lock.cpp +++ b/src/Common/tests/gtest_rw_lock.cpp @@ -166,7 +166,7 @@ TEST(Common, RWLockRecursive) auto lock2 = fifo_lock->getLock(RWLockImpl::Read, "q2"); -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD /// It throws LOGICAL_ERROR EXPECT_ANY_THROW({fifo_lock->getLock(RWLockImpl::Write, "q2");}); #endif diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index e9c642666d3..198f6c0ea04 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -59,7 +59,7 @@ CachedOnDiskReadBufferFromFile::CachedOnDiskReadBufferFromFile( std::optional read_until_position_, std::shared_ptr cache_log_) : ReadBufferFromFileBase(use_external_buffer_ ? 0 : settings_.remote_fs_buffer_size, nullptr, 0, file_size_) -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD , log(getLogger(fmt::format("CachedOnDiskReadBufferFromFile({})", cache_key_))) #else , log(getLogger("CachedOnDiskReadBufferFromFile")) @@ -452,7 +452,7 @@ CachedOnDiskReadBufferFromFile::getImplementationBuffer(FileSegment & file_segme { case ReadType::CACHED: { -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD size_t file_size = getFileSizeFromReadBuffer(*read_buffer_for_file_segment); if (file_size == 0 || range.left + file_size <= file_offset_of_buffer_end) throw Exception( @@ -937,7 +937,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() if (!result) { -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD if (read_type == ReadType::CACHED) { size_t cache_file_size = getFileSizeFromReadBuffer(*implementation_buffer); diff --git a/src/IO/tests/gtest_memory_resize.cpp b/src/IO/tests/gtest_memory_resize.cpp index d760a948075..c3b34c352b2 100644 --- a/src/IO/tests/gtest_memory_resize.cpp +++ b/src/IO/tests/gtest_memory_resize.cpp @@ -134,7 +134,7 @@ TEST(MemoryResizeTest, SmallInitAndBigResizeOverflowWhenPadding) ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); ASSERT_EQ(memory.m_size, 0x8000000000000000ULL - PADDING_FOR_SIMD); -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD EXPECT_THROW_ERROR_CODE(memory.resize(0x8000000000000000ULL - (PADDING_FOR_SIMD - 1)), Exception, ErrorCodes::LOGICAL_ERROR); ASSERT_TRUE(memory.m_data); // state is intact after exception ASSERT_EQ(memory.m_capacity, 0x8000000000000000ULL - 1); @@ -158,7 +158,7 @@ TEST(MemoryResizeTest, SmallInitAndBigResizeOverflowWhenPadding) ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); ASSERT_EQ(memory.m_size, 1); -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD EXPECT_THROW_ERROR_CODE(memory.resize(0x8000000000000000ULL - (PADDING_FOR_SIMD - 1)), Exception, ErrorCodes::LOGICAL_ERROR); ASSERT_TRUE(memory.m_data); // state is intact after exception ASSERT_EQ(memory.m_capacity, PADDING_FOR_SIMD); @@ -197,7 +197,7 @@ TEST(MemoryResizeTest, BigInitAndSmallResizeOverflowWhenPadding) , ErrorCodes::ARGUMENT_OUT_OF_BOUND); } -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD { EXPECT_THROW_ERROR_CODE( { diff --git a/src/IO/tests/gtest_writebuffer_s3.cpp b/src/IO/tests/gtest_writebuffer_s3.cpp index 3c1af6538ad..b53a8b58023 100644 --- a/src/IO/tests/gtest_writebuffer_s3.cpp +++ b/src/IO/tests/gtest_writebuffer_s3.cpp @@ -917,8 +917,8 @@ TEST_P(SyncAsync, ExceptionOnUploadPart) { TEST_F(WBS3Test, PrefinalizeCalledMultipleTimes) { -#ifdef ABORT_ON_LOGICAL_ERROR - GTEST_SKIP() << "this test trigger LOGICAL_ERROR, runs only if ABORT_ON_LOGICAL_ERROR is not defined"; +#ifdef DEBUG_OR_SANITIZER_BUILD + GTEST_SKIP() << "this test trigger LOGICAL_ERROR, runs only if DEBUG_OR_SANITIZER_BUILD is not defined"; #else EXPECT_THROW({ try { diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index a3848fa3a75..a88c0de2cfe 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -1007,7 +1007,7 @@ void FileCache::freeSpaceRatioKeepingThreadFunc() limits_satisfied = main_priority->collectCandidatesForEviction( desired_size, desired_elements_num, keep_up_free_space_remove_batch, stat, eviction_candidates, lock); -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD /// Let's make sure that we correctly processed the limits. if (limits_satisfied && eviction_candidates.size() < keep_up_free_space_remove_batch) { @@ -1110,7 +1110,7 @@ void FileCache::removeAllReleasable(const UserID & user_id) { assertInitialized(); -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD assertCacheCorrectness(); #endif @@ -1226,7 +1226,7 @@ void FileCache::loadMetadataImpl() if (first_exception) std::rethrow_exception(first_exception); -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD assertCacheCorrectness(); #endif } @@ -1393,7 +1393,7 @@ void FileCache::loadMetadataForKeys(const fs::path & keys_dir) FileCache::~FileCache() { deactivateBackgroundOperations(); -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD assertCacheCorrectness(); #endif } diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 838ca0b491e..1664a91b694 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -67,7 +67,7 @@ FileSegment::FileSegment( , key_metadata(key_metadata_) , queue_iterator(queue_iterator_) , cache(cache_) -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD , log(getLogger(fmt::format("FileSegment({}) : {}", key_.toString(), range().toString()))) #else , log(getLogger("FileSegment")) @@ -385,9 +385,9 @@ void FileSegment::write(char * from, size_t size, size_t offset_in_file) try { -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD /// This mutex is only needed to have a valid assertion in assertCacheCorrectness(), - /// which is only executed in debug/sanitizer builds (under ABORT_ON_LOGICAL_ERROR). + /// which is only executed in debug/sanitizer builds (under DEBUG_OR_SANITIZER_BUILD). std::lock_guard lock(write_mutex); #endif diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 1d23278a255..7e4b76d3cc6 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -963,7 +963,7 @@ KeyMetadata::iterator LockedKey::removeFileSegmentImpl( } else if (!can_be_broken) { -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected path {} to exist", path); #else LOG_WARNING(key_metadata->logger(), "Expected path {} to exist, while removing {}:{}", diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index 5d237d28089..1b57ad2b622 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -538,7 +538,7 @@ Chunk DDLQueryStatusSource::generate() ExecutionStatus status(-1, "Cannot obtain error message"); /// Replicated database retries in case of error, it should not write error status. -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD bool need_check_status = true; #else bool need_check_status = !is_replicated_database; diff --git a/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp b/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp index ce5992c2548..364d7c69071 100644 --- a/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp +++ b/src/Processors/tests/gtest_exception_on_incorrect_pipeline.cpp @@ -50,7 +50,7 @@ TEST(Processors, PortsNotConnected) processors->emplace_back(std::move(source)); processors->emplace_back(std::move(sink)); -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD try { QueryStatusPtr element; diff --git a/src/QueryPipeline/tests/gtest_check_sorted_stream.cpp b/src/QueryPipeline/tests/gtest_check_sorted_stream.cpp index c8ab2e3a973..34bc2eb2b5e 100644 --- a/src/QueryPipeline/tests/gtest_check_sorted_stream.cpp +++ b/src/QueryPipeline/tests/gtest_check_sorted_stream.cpp @@ -133,7 +133,7 @@ TEST(CheckSortedTransform, CheckBadLastRow) EXPECT_NO_THROW(executor.pull(chunk)); EXPECT_NO_THROW(executor.pull(chunk)); -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD EXPECT_THROW(executor.pull(chunk), DB::Exception); #endif } @@ -158,7 +158,7 @@ TEST(CheckSortedTransform, CheckUnsortedBlock1) Chunk chunk; -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD EXPECT_THROW(executor.pull(chunk), DB::Exception); #endif } @@ -181,7 +181,7 @@ TEST(CheckSortedTransform, CheckUnsortedBlock2) PullingPipelineExecutor executor(pipeline); Chunk chunk; -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD EXPECT_THROW(executor.pull(chunk), DB::Exception); #endif } @@ -204,7 +204,7 @@ TEST(CheckSortedTransform, CheckUnsortedBlock3) PullingPipelineExecutor executor(pipeline); Chunk chunk; -#ifndef ABORT_ON_LOGICAL_ERROR +#ifndef DEBUG_OR_SANITIZER_BUILD EXPECT_THROW(executor.pull(chunk), DB::Exception); #endif } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 8d69df8de76..1306a3869c7 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -666,7 +666,7 @@ void TCPHandler::runImpl() // Server should die on std logic errors in debug, like with assert() // or ErrorCodes::LOGICAL_ERROR. This helps catch these errors in // tests. -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD catch (const std::logic_error & e) { state.io.onException(); diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index 29631b95542..41e90aafd42 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -357,7 +357,7 @@ void RefreshTask::refreshTask() stop_requested = true; tryLogCurrentException(log, "Unexpected exception in refresh scheduling, please investigate. The view will be stopped."); -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD abortOnFailedAssertion("Unexpected exception in refresh scheduling"); #endif } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 72f725965e0..3f02486ed15 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1516,7 +1516,7 @@ static time_t tryGetPartCreateTime(zkutil::ZooKeeperPtr & zookeeper, const Strin void StorageReplicatedMergeTree::paranoidCheckForCoveredPartsInZooKeeperOnStart(const Strings & parts_in_zk, const Strings & parts_to_fetch) const { -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD constexpr bool paranoid_check_for_covered_parts_default = true; #else constexpr bool paranoid_check_for_covered_parts_default = false; @@ -2383,7 +2383,7 @@ static void paranoidCheckForCoveredPartsInZooKeeper( const String & covering_part_name, const StorageReplicatedMergeTree & storage) { -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD constexpr bool paranoid_check_for_covered_parts_default = true; #else constexpr bool paranoid_check_for_covered_parts_default = false; From bb01920370e1dd5faa7b17694f74175190537445 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 17 Jul 2024 16:19:02 +0000 Subject: [PATCH 0472/2361] add ci_exclude_ast to PR body template --- .github/PULL_REQUEST_TEMPLATE.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 146542e980c..8b6e957e1d8 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -59,7 +59,8 @@ At a minimum, the following information should be added (but add more as needed) - [ ] Exclude: All with TSAN, MSAN, UBSAN, Coverage - [ ] Exclude: All with aarch64, release, debug --- -- [ ] Run only libFuzzer related jobs +- [ ] Run only fuzzers related jobs (libFuzzer fuzzers, AST fuzzers, etc.) +- [ ] Exclude AST fuzzers --- - [ ] Do not test - [ ] Woolen Wolfdog From 80e1377e5d2223176274c319938187f0da799280 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Jul 2024 17:30:28 +0000 Subject: [PATCH 0473/2361] Fixing build. --- src/Storages/System/StorageSystemTables.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 43b761d84b1..d6b577bf6c8 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -125,7 +125,7 @@ ColumnPtr getFilteredTables( block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); if (dag) - VirtualColumnUtils::filterBlockWithDAG(dag, block, context); + VirtualColumnUtils::filterBlockWithDAG(std::move(*dag), block, context); return block.getByPosition(0).column; } From 47573f1ae158faabd6ac0a62598c907c85fbd4c1 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 17 Jul 2024 20:44:44 +0000 Subject: [PATCH 0474/2361] add options --- src/Core/SettingsEnums.h | 9 +++++++++ src/Storages/MergeTree/MergeTreeSettings.h | 1 + 2 files changed, 10 insertions(+) diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index f6d9593ca56..ac3264fe041 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -315,6 +315,15 @@ enum class LightweightMutationProjectionMode : uint8_t DECLARE_SETTING_ENUM(LightweightMutationProjectionMode) +enum class DeduplicateMergeProjectionMode : uint8_t +{ + THROW, + DROP, + REBUILD, +}; + +DECLARE_SETTING_ENUM(DeduplicateMergeProjectionMode) + DECLARE_SETTING_ENUM(LocalFSReadMethod) enum class ObjectStorageQueueMode : uint8_t diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index f5ada81cf55..166a18a7bab 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -215,6 +215,7 @@ struct Settings; M(Float, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns, 0.9f, "If the value of a column of the primary key in data part changes at least in this ratio of times, skip loading next columns in memory. This allows to save memory usage by not loading useless columns of the primary key.", 0) \ /** Projection settings. */ \ M(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ + M(DeduplicateMergeProjectionMode, deduplicate_merge_projection_mode, DeduplicateMergeProjectionMode::THROW, "Whether to allow create projection for the table with non-classic MergeTree, if allowed, what is the next action.", 0) \ #define MAKE_OBSOLETE_MERGE_TREE_SETTING(M, TYPE, NAME, DEFAULT) \ M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE) From 3985a4012d70762480d50083eb0d30a3b70e0981 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 18 Jul 2024 00:06:25 +0200 Subject: [PATCH 0475/2361] Fix tidy --- src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp index 4e62c503d65..4a4deb07eee 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp @@ -155,7 +155,7 @@ void printExceptionWithRespectToAbort(LoggerPtr log, const String & query_id) { std::rethrow_exception(ex); } - catch (const TestException &) + catch (const TestException &) // NOLINT { /// Exception from a unit test, ignore it. } From b8cf8829927dedc701b97893b3a1e7651193b586 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 17 Jul 2024 23:22:24 +0100 Subject: [PATCH 0476/2361] impl --- src/Common/CgroupsMemoryUsageObserver.cpp | 49 ++++++++++++++++++----- src/Common/CgroupsMemoryUsageObserver.h | 2 + 2 files changed, 40 insertions(+), 11 deletions(-) diff --git a/src/Common/CgroupsMemoryUsageObserver.cpp b/src/Common/CgroupsMemoryUsageObserver.cpp index 02bde0d80b7..cf661174789 100644 --- a/src/Common/CgroupsMemoryUsageObserver.cpp +++ b/src/Common/CgroupsMemoryUsageObserver.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -45,26 +46,33 @@ namespace /// kernel 5 /// rss 15 /// [...] -uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key) +using Metrics = std::map; + +Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf) { + Metrics metrics; while (!buf.eof()) { std::string current_key; readStringUntilWhitespace(current_key, buf); - if (current_key != key) - { - std::string dummy; - readStringUntilNewlineInto(dummy, buf); - buf.ignore(); - continue; - } assertChar(' ', buf); + uint64_t value = 0; readIntText(value, buf); - return value; - } + assertChar('\n', buf); + auto [_, inserted] = metrics.emplace(std::move(current_key), value); + chassert(inserted, "Duplicate keys in stat file"); + } + return metrics; +} + +uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key) +{ + const auto all_metrics = readAllMetricsFromStatFile(buf); + if (const auto it = all_metrics.find(key); it != all_metrics.end()) + return it->second; throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName()); } @@ -79,6 +87,13 @@ struct CgroupsV1Reader : ICgroupsReader return readMetricFromStatFile(buf, "rss"); } + std::string dumpAllStats() override + { + std::lock_guard lock(mutex); + buf.rewind(); + return fmt::format("{}", readAllMetricsFromStatFile(buf)); + } + private: std::mutex mutex; ReadBufferFromFile buf TSA_GUARDED_BY(mutex); @@ -106,6 +121,13 @@ struct CgroupsV2Reader : ICgroupsReader return mem_usage; } + std::string dumpAllStats() override + { + std::lock_guard lock(mutex); + stat_buf.rewind(); + return fmt::format("{}", readAllMetricsFromStatFile(stat_buf)); + } + private: std::mutex mutex; ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex); @@ -234,7 +256,12 @@ void CgroupsMemoryUsageObserver::setMemoryUsageLimits(uint64_t hard_limit_, uint # endif /// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them. uint64_t memory_usage = cgroup_reader->readMemoryUsage(); - LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage)); + LOG_TRACE( + log, + "Read current memory usage {} bytes ({}) from cgroups, full available stats: {}", + memory_usage, + ReadableSize(memory_usage), + cgroup_reader->dumpAllStats()); MemoryTracker::setRSS(memory_usage, 0); LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage)); diff --git a/src/Common/CgroupsMemoryUsageObserver.h b/src/Common/CgroupsMemoryUsageObserver.h index b848a2bff3c..0d5d07597c8 100644 --- a/src/Common/CgroupsMemoryUsageObserver.h +++ b/src/Common/CgroupsMemoryUsageObserver.h @@ -14,6 +14,8 @@ struct ICgroupsReader virtual ~ICgroupsReader() = default; virtual uint64_t readMemoryUsage() = 0; + + virtual std::string dumpAllStats() = 0; }; /// Does two things: From 275b3666dadece731e368dd672e8d6e83ec22d8f Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 18 Jul 2024 01:18:34 +0000 Subject: [PATCH 0477/2361] try to fix the test --- .../03161_lightweight_delete_projection.sql | 114 +++++++++++++++++- 1 file changed, 111 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 3bf459cc32d..9d577f8a701 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,5 +1,73 @@ -SET lightweight_deletes_sync = 2; +SET lightweight_deletes_sync = 2, alter_sync = 2; + +Set max_insert_threads = 2, +group_by_two_level_threshold = 704642, +group_by_two_level_threshold_bytes = 49659607, +distributed_aggregation_memory_efficient = 0, +fsync_metadata = 0, +output_format_parallel_formatting = 0, +input_format_parallel_parsing = 1, +min_chunk_bytes_for_parallel_parsing = 14437539, +max_read_buffer_size = 887507, +prefer_localhost_replica = 0, +max_block_size = 73908, +max_joined_block_size_rows = 21162, +max_threads = 2, +optimize_append_index = 0, +optimize_if_chain_to_multiif = 1, +optimize_if_transform_strings_to_enum = 0, +optimize_read_in_order = 0, +optimize_or_like_chain = 1, +optimize_substitute_columns = 1, +enable_multiple_prewhere_read_steps = 1, +read_in_order_two_level_merge_threshold = 13, +optimize_aggregation_in_order = 1, +aggregation_in_order_max_block_bytes = 37110261, +use_uncompressed_cache = 0, +min_bytes_to_use_direct_io = 10737418240, +min_bytes_to_use_mmap_io = 1, +local_filesystem_read_method ='pread', +remote_filesystem_read_method ='threadpool', +local_filesystem_read_prefetch = 0, +filesystem_cache_segments_batch_size = 3, +read_from_filesystem_cache_if_exists_otherwise_bypass_cache = 1, +throw_on_error_from_cache_on_write_operations = 0, +remote_filesystem_read_prefetch = 1, +allow_prefetched_read_pool_for_remote_filesystem = 0, +filesystem_prefetch_max_memory_usage = '32Mi', +filesystem_prefetches_limit = 0, +filesystem_prefetch_min_bytes_for_single_read_task ='16Mi', +filesystem_prefetch_step_marks = 50, +filesystem_prefetch_step_bytes = 0, +compile_aggregate_expressions = 0, +compile_sort_description = 1, +merge_tree_coarse_index_granularity = 16, +optimize_distinct_in_order = 0, +max_bytes_before_external_sort = 0, +max_bytes_before_external_group_by = 0, +max_bytes_before_remerge_sort = 820113150, +min_compress_block_size = 1262249, +max_compress_block_size = 1472188, +merge_tree_compact_parts_min_granules_to_multibuffer_read = 56, +optimize_sorting_by_input_stream_properties = 1, +http_response_buffer_size = 1883022, +http_wait_end_of_query = False, +enable_memory_bound_merging_of_aggregation_results = 1, +min_count_to_compile_expression = 0, +min_count_to_compile_aggregate_expression = 0, +min_count_to_compile_sort_description = 0, +session_timezone ='Africa/Khartoum', +prefer_warmed_unmerged_parts_seconds = 10, +use_page_cache_for_disks_without_file_cache = True, +page_cache_inject_eviction = False, +merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.02, +prefer_external_sort_block_bytes = 100000000, +cross_join_min_rows_to_compress = 100000000, +cross_join_min_bytes_to_compress = 1, +min_external_table_block_size_bytes = 100000000, +max_parsing_threads = 0; + DROP TABLE IF EXISTS users; @@ -11,7 +79,27 @@ CREATE TABLE users ( projection p1 (select count(), age group by age), projection p2 (select age, name group by age, name) ) ENGINE = MergeTree order by uid -SETTINGS min_bytes_for_wide_part = 10485760; +SETTINGS min_bytes_for_wide_part = 10485760, +ratio_of_defaults_for_sparse_serialization = 1.0, +prefer_fetch_merged_part_size_threshold = 1, +vertical_merge_algorithm_min_rows_to_activate = 1, +vertical_merge_algorithm_min_columns_to_activate = 100, +allow_vertical_merges_from_compact_to_wide_parts = 0, +min_merge_bytes_to_use_direct_io = 114145183, +index_granularity_bytes = 2660363, +merge_max_block_size = 13460, +index_granularity = 51768, +marks_compress_block_size = 59418, +primary_key_compress_block_size = 88795, +replace_long_file_name_to_hash = 0, +max_file_name_length = 0, +min_bytes_for_full_part_storage = 536870912, +compact_parts_max_bytes_to_buffer = 378557913, +compact_parts_max_granules_to_buffer = 254, +compact_parts_merge_max_bytes_to_prefetch_part = 26969686, +cache_populated_by_fetch = 0, +concurrent_part_removal_threshold = 38, +old_parts_lifetime = 480; INSERT INTO users VALUES (1231, 'John', 33); @@ -63,7 +151,27 @@ CREATE TABLE users ( projection p1 (select count(), age group by age), projection p2 (select age, name group by age, name) ) ENGINE = MergeTree order by uid -SETTINGS min_bytes_for_wide_part = 0; +SETTINGS min_bytes_for_wide_part = 0, +ratio_of_defaults_for_sparse_serialization = 1.0, +prefer_fetch_merged_part_size_threshold = 1, +vertical_merge_algorithm_min_rows_to_activate = 1, +vertical_merge_algorithm_min_columns_to_activate = 100, +allow_vertical_merges_from_compact_to_wide_parts = 0, +min_merge_bytes_to_use_direct_io = 114145183, +index_granularity_bytes = 2660363, +merge_max_block_size = 13460, +index_granularity = 51768, +marks_compress_block_size = 59418, +primary_key_compress_block_size = 88795, +replace_long_file_name_to_hash = 0, +max_file_name_length = 0, +min_bytes_for_full_part_storage = 536870912, +compact_parts_max_bytes_to_buffer = 378557913, +compact_parts_max_granules_to_buffer = 254, +compact_parts_merge_max_bytes_to_prefetch_part = 26969686, +cache_populated_by_fetch = 0, +concurrent_part_removal_threshold = 38, +old_parts_lifetime = 480; INSERT INTO users VALUES (1231, 'John', 33); From 55355f43ad420456467121ce43072a10791c5cc8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 05:19:58 +0200 Subject: [PATCH 0478/2361] Fix bad code: it was catching exceptions --- src/IO/WithFileSize.cpp | 48 +++++++++---------- ...ry_and_native_with_binary_encoded_types.sh | 4 +- 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/src/IO/WithFileSize.cpp b/src/IO/WithFileSize.cpp index 3660d962c08..8cea12fa200 100644 --- a/src/IO/WithFileSize.cpp +++ b/src/IO/WithFileSize.cpp @@ -14,40 +14,38 @@ namespace ErrorCodes } template -static size_t getFileSize(T & in) +static std::optional tryGetFileSize(T & in) { if (auto * with_file_size = dynamic_cast(&in)) - { return with_file_size->getFileSize(); - } + + return std::nullopt; +} + +template +static size_t getFileSize(T & in) +{ + if (auto maybe_size = tryGetFileSize(in)) + return *maybe_size; throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size"); } -size_t getFileSizeFromReadBuffer(ReadBuffer & in) -{ - if (auto * delegate = dynamic_cast(&in)) - { - return getFileSize(delegate->getWrappedReadBuffer()); - } - else if (auto * compressed = dynamic_cast(&in)) - { - return getFileSize(compressed->getWrappedReadBuffer()); - } - - return getFileSize(in); -} - std::optional tryGetFileSizeFromReadBuffer(ReadBuffer & in) { - try - { - return getFileSizeFromReadBuffer(in); - } - catch (...) - { - return std::nullopt; - } + if (auto * delegate = dynamic_cast(&in)) + return tryGetFileSize(delegate->getWrappedReadBuffer()); + else if (auto * compressed = dynamic_cast(&in)) + return tryGetFileSize(compressed->getWrappedReadBuffer()); + return tryGetFileSize(in); +} + +size_t getFileSizeFromReadBuffer(ReadBuffer & in) +{ + if (auto maybe_size = tryGetFileSizeFromReadBuffer(in)) + return *maybe_size; + + throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size"); } bool isBufferWithFileSize(const ReadBuffer & in) diff --git a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh index 723b11ad620..0c585d36348 100755 --- a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh +++ b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh @@ -6,8 +6,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function test { - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" } test "materialize(42)::UInt8" From e0aedb992f647a8dcd226bc8775795ecad91a551 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 05:34:04 +0200 Subject: [PATCH 0479/2361] Add a test --- .../03206_no_exceptions_clickhouse_local.reference | 1 + .../0_stateless/03206_no_exceptions_clickhouse_local.sh | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.reference create mode 100755 tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh diff --git a/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.reference b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.reference new file mode 100644 index 00000000000..11277a62b06 --- /dev/null +++ b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.reference @@ -0,0 +1 @@ +Hello world diff --git a/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh new file mode 100755 index 00000000000..86839a228dc --- /dev/null +++ b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +# Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so the grpc library is not built + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +CLICKHOUSE_TERMINATE_ON_ANY_EXCEPTION=1 ${CLICKHOUSE_LOCAL} --query "SELECT * FROM table" --input-format CSV <<<"Hello, world" From 89f6f74418fa361c65261447ebbd0d79d88cc748 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 18 Jul 2024 10:31:11 +0800 Subject: [PATCH 0480/2361] Revert 02864_statistics_uniq --- .../02864_statistics_uniq.reference | 113 ++++++++++++++++++ .../0_stateless/02864_statistics_uniq.sql | 73 +++++++++++ 2 files changed, 186 insertions(+) create mode 100644 tests/queries/0_stateless/02864_statistics_uniq.reference create mode 100644 tests/queries/0_stateless/02864_statistics_uniq.sql diff --git a/tests/queries/0_stateless/02864_statistics_uniq.reference b/tests/queries/0_stateless/02864_statistics_uniq.reference new file mode 100644 index 00000000000..861e35ddd48 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_uniq.reference @@ -0,0 +1,113 @@ +CREATE TABLE default.t1\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest, uniq),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) +After modify TDigest + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(c, -1), less(a, 10), less(b, 10)) (removed) +After drop + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), less(c, -1), less(b, 10)) (removed) + 72 changes: 0 additions & 72 deletions72 +tests/queries/0_stateless/02864_statistics_uniq.sql +Viewed +Original file line number Diff line number Diff line change +@@ -1,72 +0,0 @@ +DROP TABLE IF EXISTS t1; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; + +CREATE TABLE t1 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c Int64 STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; + +SHOW CREATE TABLE t1; + +INSERT INTO t1 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; +INSERT INTO t1 select 0, 0, 11, generateUUIDv4(); + +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +OPTIMIZE TABLE t1 FINAL; + +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +SELECT 'After modify TDigest'; +ALTER TABLE t1 MODIFY STATISTICS c TYPE TDigest; +ALTER TABLE t1 MATERIALIZE STATISTICS c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + + +ALTER TABLE t1 DROP STATISTICS c; + +SELECT 'After drop'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +SET allow_suspicious_low_cardinality_types=1; +CREATE TABLE t2 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c LowCardinality(Int64) STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t2 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; + +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t3 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c Nullable(Int64) STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t3 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; + +DROP TABLE IF EXISTS t3; + diff --git a/tests/queries/0_stateless/02864_statistics_uniq.sql b/tests/queries/0_stateless/02864_statistics_uniq.sql new file mode 100644 index 00000000000..0f5f353c045 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_uniq.sql @@ -0,0 +1,73 @@ +DROP TABLE IF EXISTS t1; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; + +CREATE TABLE t1 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c Int64 STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; + +SHOW CREATE TABLE t1; + +INSERT INTO t1 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; +INSERT INTO t1 select 0, 0, 11, generateUUIDv4(); + +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +OPTIMIZE TABLE t1 FINAL; + +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +SELECT 'After modify TDigest'; +ALTER TABLE t1 MODIFY STATISTICS c TYPE TDigest; +ALTER TABLE t1 MATERIALIZE STATISTICS c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + + +ALTER TABLE t1 DROP STATISTICS c; + +SELECT 'After drop'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +SET allow_suspicious_low_cardinality_types=1; +CREATE TABLE t2 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c LowCardinality(Int64) STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t2 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; + +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t3 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c Nullable(Int64) STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t3 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; + +DROP TABLE IF EXISTS t3; + From 1c883b802db839220a8e0a81892f653152a59cc1 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Thu, 18 Jul 2024 10:33:05 +0800 Subject: [PATCH 0481/2361] Rename test 02864_statistics_count_min_sketch --- ...864_statistics_count_min_sketch.reference} | 11 --- ... => 02864_statistics_count_min_sketch.sql} | 34 +------- .../02864_statistics_uniq.reference | 78 ------------------- 3 files changed, 3 insertions(+), 120 deletions(-) rename tests/queries/0_stateless/{02864_statistics_estimate_predicate.reference => 02864_statistics_count_min_sketch.reference} (63%) rename tests/queries/0_stateless/{02864_statistics_estimate_predicate.sql => 02864_statistics_count_min_sketch.sql} (62%) diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference b/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference similarity index 63% rename from tests/queries/0_stateless/02864_statistics_estimate_predicate.reference rename to tests/queries/0_stateless/02864_statistics_count_min_sketch.reference index 4e41c32750f..02c41656a36 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.reference +++ b/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference @@ -1,15 +1,4 @@ CREATE TABLE default.tab\n(\n `a` String,\n `b` UInt64,\n `c` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -Test statistics TDigest: - Prewhere info - Prewhere filter - Prewhere filter column: and(less(c, -98), greater(b, 0)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(c, -98), equals(b, 0)) (removed) -Test statistics Uniq: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(b, 0), equals(c, 0)) (removed) Test statistics count_min: Prewhere info Prewhere filter diff --git a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql b/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql similarity index 62% rename from tests/queries/0_stateless/02864_statistics_estimate_predicate.sql rename to tests/queries/0_stateless/02864_statistics_count_min_sketch.sql index 91b4f2d05cb..c730aa7b4a7 100644 --- a/tests/queries/0_stateless/02864_statistics_estimate_predicate.sql +++ b/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql @@ -1,5 +1,4 @@ -- Tags: no-fasttest --- Tests statistics usages in prewhere optimization. DROP TABLE IF EXISTS tab SYNC; @@ -21,33 +20,6 @@ SHOW CREATE TABLE tab; INSERT INTO tab select toString(number % 10000), number % 1000, -(number % 100), generateUUIDv4() FROM system.numbers LIMIT 10000; -SELECT 'Test statistics TDigest:'; - -ALTER TABLE tab ADD STATISTICS b, c TYPE tdigest; -ALTER TABLE tab MATERIALIZE STATISTICS b, c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b > 0/*9990*/ and c < -98/*100*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b = 0/*1000*/ and c < -98/*100*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS b, c; - - -SELECT 'Test statistics Uniq:'; - -ALTER TABLE tab ADD STATISTICS b TYPE uniq, tdigest; -ALTER TABLE tab MATERIALIZE STATISTICS b; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*1000*/ and b = 0/*10*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS b; - - SELECT 'Test statistics count_min:'; ALTER TABLE tab ADD STATISTICS a TYPE count_min; @@ -87,9 +59,9 @@ DROP TABLE IF EXISTS tab2 SYNC; SET allow_suspicious_low_cardinality_types=1; CREATE TABLE tab2 ( - a LowCardinality(Int64) STATISTICS(uniq, tdigest, count_min), - b Nullable(Int64) STATISTICS(uniq, tdigest, count_min), - c LowCardinality(Nullable(Int64)) STATISTICS(uniq, tdigest, count_min), + a LowCardinality(Int64) STATISTICS(count_min), + b Nullable(Int64) STATISTICS(count_min), + c LowCardinality(Nullable(Int64)) STATISTICS(count_min), pk String, ) Engine = MergeTree() ORDER BY pk; diff --git a/tests/queries/0_stateless/02864_statistics_uniq.reference b/tests/queries/0_stateless/02864_statistics_uniq.reference index 861e35ddd48..77786dbdd8c 100644 --- a/tests/queries/0_stateless/02864_statistics_uniq.reference +++ b/tests/queries/0_stateless/02864_statistics_uniq.reference @@ -33,81 +33,3 @@ After drop Prewhere info Prewhere filter Prewhere filter column: and(less(a, 10), less(c, -1), less(b, 10)) (removed) - 72 changes: 0 additions & 72 deletions72 -tests/queries/0_stateless/02864_statistics_uniq.sql -Viewed -Original file line number Diff line number Diff line change -@@ -1,72 +0,0 @@ -DROP TABLE IF EXISTS t1; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET mutations_sync = 1; - -CREATE TABLE t1 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Int64 STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE t1; - -INSERT INTO t1 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; -INSERT INTO t1 select 0, 0, 11, generateUUIDv4(); - -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -OPTIMIZE TABLE t1 FINAL; - -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT 'After modify TDigest'; -ALTER TABLE t1 MODIFY STATISTICS c TYPE TDigest; -ALTER TABLE t1 MATERIALIZE STATISTICS c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - - -ALTER TABLE t1 DROP STATISTICS c; - -SELECT 'After drop'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -DROP TABLE IF EXISTS t1; -DROP TABLE IF EXISTS t2; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t2 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c LowCardinality(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t2 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t2; -DROP TABLE IF EXISTS t3; - -CREATE TABLE t3 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Nullable(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t3 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t3; - From c7be25f0a167c2c5ab6944b47779be2f90af443d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 18 Jul 2024 04:54:36 +0200 Subject: [PATCH 0482/2361] Fix everything --- src/Disks/IO/AsynchronousBoundedReadBuffer.h | 2 +- src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp | 2 +- src/Disks/IO/ReadBufferFromAzureBlobStorage.h | 2 +- src/Disks/IO/ReadBufferFromRemoteFSGather.h | 2 +- src/IO/Archives/LibArchiveReader.cpp | 2 +- src/IO/Archives/ZipArchiveReader.cpp | 2 +- src/IO/AsynchronousReadBufferFromFileDescriptor.cpp | 2 +- src/IO/AsynchronousReadBufferFromFileDescriptor.h | 2 +- src/IO/ConcatSeekableReadBuffer.h | 2 +- src/IO/MMapReadBufferFromFileDescriptor.cpp | 2 +- src/IO/MMapReadBufferFromFileDescriptor.h | 2 +- src/IO/ParallelReadBuffer.cpp | 2 +- src/IO/ParallelReadBuffer.h | 2 +- src/IO/ReadBufferFromEmptyFile.h | 2 +- src/IO/ReadBufferFromEncryptedFile.h | 2 +- src/IO/ReadBufferFromFileBase.cpp | 6 ++---- src/IO/ReadBufferFromFileBase.h | 2 +- src/IO/ReadBufferFromFileDecorator.cpp | 4 ++-- src/IO/ReadBufferFromFileDecorator.h | 2 +- src/IO/ReadBufferFromFileDescriptor.cpp | 2 +- src/IO/ReadBufferFromFileDescriptor.h | 2 +- src/IO/ReadBufferFromS3.cpp | 6 +++--- src/IO/ReadBufferFromS3.h | 2 +- src/IO/ReadWriteBufferFromHTTP.cpp | 7 ++----- src/IO/ReadWriteBufferFromHTTP.h | 2 +- src/IO/WithFileSize.cpp | 10 +++++++++- src/IO/WithFileSize.h | 7 ++++--- src/Storages/Cache/ExternalDataSourceCache.h | 2 +- .../HDFS/AsynchronousReadBufferFromHDFS.cpp | 4 ++-- .../HDFS/AsynchronousReadBufferFromHDFS.h | 2 +- src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp | 8 ++++---- src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h | 2 +- 32 files changed, 52 insertions(+), 48 deletions(-) diff --git a/src/Disks/IO/AsynchronousBoundedReadBuffer.h b/src/Disks/IO/AsynchronousBoundedReadBuffer.h index 9a802348998..3dc8fcc39cb 100644 --- a/src/Disks/IO/AsynchronousBoundedReadBuffer.h +++ b/src/Disks/IO/AsynchronousBoundedReadBuffer.h @@ -34,7 +34,7 @@ public: String getFileName() const override { return impl->getFileName(); } - size_t getFileSize() override { return impl->getFileSize(); } + std::optional tryGetFileSize() override { return impl->tryGetFileSize(); } String getInfoForLog() override { return impl->getInfoForLog(); } diff --git a/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp b/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp index da1ea65f2ea..a36a8b031b4 100644 --- a/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp +++ b/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp @@ -253,7 +253,7 @@ void ReadBufferFromAzureBlobStorage::initialize() initialized = true; } -size_t ReadBufferFromAzureBlobStorage::getFileSize() +std::optional ReadBufferFromAzureBlobStorage::tryGetFileSize() { if (!blob_client) blob_client = std::make_unique(blob_container_client->GetBlobClient(path)); diff --git a/src/Disks/IO/ReadBufferFromAzureBlobStorage.h b/src/Disks/IO/ReadBufferFromAzureBlobStorage.h index d328195cc26..f407f27e099 100644 --- a/src/Disks/IO/ReadBufferFromAzureBlobStorage.h +++ b/src/Disks/IO/ReadBufferFromAzureBlobStorage.h @@ -42,7 +42,7 @@ public: bool supportsRightBoundedReads() const override { return true; } - size_t getFileSize() override; + std::optional tryGetFileSize() override; size_t readBigAt(char * to, size_t n, size_t range_begin, const std::function & progress_callback) const override; diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index e36365a8174..9f1cb681f1a 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -41,7 +41,7 @@ public: void setReadUntilEnd() override { setReadUntilPosition(getFileSize()); } - size_t getFileSize() override { return getTotalSize(blobs_to_read); } + std::optional tryGetFileSize() override { return getTotalSize(blobs_to_read); } size_t getFileOffsetOfBufferEnd() const override { return file_offset_of_buffer_end; } diff --git a/src/IO/Archives/LibArchiveReader.cpp b/src/IO/Archives/LibArchiveReader.cpp index e3fe63fa40d..31bad4d6638 100644 --- a/src/IO/Archives/LibArchiveReader.cpp +++ b/src/IO/Archives/LibArchiveReader.cpp @@ -321,7 +321,7 @@ public: off_t getPosition() override { throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "getPosition not supported when reading from archive"); } String getFileName() const override { return handle.getFileName(); } - size_t getFileSize() override { return handle.getFileInfo().uncompressed_size; } + std::optional tryGetFileSize() override { return handle.getFileInfo().uncompressed_size; } Handle releaseHandle() && { return std::move(handle); } diff --git a/src/IO/Archives/ZipArchiveReader.cpp b/src/IO/Archives/ZipArchiveReader.cpp index 2a9b7a43519..12b07d550c2 100644 --- a/src/IO/Archives/ZipArchiveReader.cpp +++ b/src/IO/Archives/ZipArchiveReader.cpp @@ -317,7 +317,7 @@ public: String getFileName() const override { return handle.getFileName(); } - size_t getFileSize() override { return handle.getFileInfo().uncompressed_size; } + std::optional tryGetFileSize() override { return handle.getFileInfo().uncompressed_size; } /// Releases owned handle to pass it to an enumerator. HandleHolder releaseHandle() && diff --git a/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp b/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp index f8c00d62732..6c4bd09b76f 100644 --- a/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp +++ b/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp @@ -244,7 +244,7 @@ void AsynchronousReadBufferFromFileDescriptor::rewind() file_offset_of_buffer_end = 0; } -size_t AsynchronousReadBufferFromFileDescriptor::getFileSize() +std::optional AsynchronousReadBufferFromFileDescriptor::tryGetFileSize() { return getSizeFromFileDescriptor(fd, getFileName()); } diff --git a/src/IO/AsynchronousReadBufferFromFileDescriptor.h b/src/IO/AsynchronousReadBufferFromFileDescriptor.h index 82659b1aca7..097979fbe00 100644 --- a/src/IO/AsynchronousReadBufferFromFileDescriptor.h +++ b/src/IO/AsynchronousReadBufferFromFileDescriptor.h @@ -68,7 +68,7 @@ public: /// Seek to the beginning, discarding already read data if any. Useful to reread file that changes on every read. void rewind(); - size_t getFileSize() override; + std::optional tryGetFileSize() override; size_t getFileOffsetOfBufferEnd() const override { return file_offset_of_buffer_end; } diff --git a/src/IO/ConcatSeekableReadBuffer.h b/src/IO/ConcatSeekableReadBuffer.h index c8c16c5d887..609f0dc25b8 100644 --- a/src/IO/ConcatSeekableReadBuffer.h +++ b/src/IO/ConcatSeekableReadBuffer.h @@ -21,7 +21,7 @@ public: off_t seek(off_t off, int whence) override; off_t getPosition() override; - size_t getFileSize() override { return total_size; } + std::optional tryGetFileSize() override { return total_size; } private: bool nextImpl() override; diff --git a/src/IO/MMapReadBufferFromFileDescriptor.cpp b/src/IO/MMapReadBufferFromFileDescriptor.cpp index f27828f71b2..83dd192de54 100644 --- a/src/IO/MMapReadBufferFromFileDescriptor.cpp +++ b/src/IO/MMapReadBufferFromFileDescriptor.cpp @@ -87,7 +87,7 @@ off_t MMapReadBufferFromFileDescriptor::seek(off_t offset, int whence) return new_pos; } -size_t MMapReadBufferFromFileDescriptor::getFileSize() +std::optional MMapReadBufferFromFileDescriptor::tryGetFileSize() { return getSizeFromFileDescriptor(getFD(), getFileName()); } diff --git a/src/IO/MMapReadBufferFromFileDescriptor.h b/src/IO/MMapReadBufferFromFileDescriptor.h index f774538374a..de44ec3f9d8 100644 --- a/src/IO/MMapReadBufferFromFileDescriptor.h +++ b/src/IO/MMapReadBufferFromFileDescriptor.h @@ -38,7 +38,7 @@ public: int getFD() const; - size_t getFileSize() override; + std::optional tryGetFileSize() override; size_t readBigAt(char * to, size_t n, size_t offset, const std::function &) const override; bool supportsReadAt() override { return true; } diff --git a/src/IO/ParallelReadBuffer.cpp b/src/IO/ParallelReadBuffer.cpp index e6771235a8e..89cff670e37 100644 --- a/src/IO/ParallelReadBuffer.cpp +++ b/src/IO/ParallelReadBuffer.cpp @@ -152,7 +152,7 @@ off_t ParallelReadBuffer::seek(off_t offset, int whence) return offset; } -size_t ParallelReadBuffer::getFileSize() +std::optional ParallelReadBuffer::tryGetFileSize() { return file_size; } diff --git a/src/IO/ParallelReadBuffer.h b/src/IO/ParallelReadBuffer.h index cfeec2b3677..8852472a8bc 100644 --- a/src/IO/ParallelReadBuffer.h +++ b/src/IO/ParallelReadBuffer.h @@ -33,7 +33,7 @@ public: ~ParallelReadBuffer() override { finishAndWait(); } off_t seek(off_t off, int whence) override; - size_t getFileSize() override; + std::optional tryGetFileSize() override; off_t getPosition() override; const SeekableReadBuffer & getReadBuffer() const { return input; } diff --git a/src/IO/ReadBufferFromEmptyFile.h b/src/IO/ReadBufferFromEmptyFile.h index f21f2f507dc..b15299dafee 100644 --- a/src/IO/ReadBufferFromEmptyFile.h +++ b/src/IO/ReadBufferFromEmptyFile.h @@ -19,7 +19,7 @@ private: std::string getFileName() const override { return ""; } off_t seek(off_t /*off*/, int /*whence*/) override { return 0; } off_t getPosition() override { return 0; } - size_t getFileSize() override { return 0; } + std::optional tryGetFileSize() override { return 0; } }; } diff --git a/src/IO/ReadBufferFromEncryptedFile.h b/src/IO/ReadBufferFromEncryptedFile.h index 3626daccb3e..213d242bb91 100644 --- a/src/IO/ReadBufferFromEncryptedFile.h +++ b/src/IO/ReadBufferFromEncryptedFile.h @@ -30,7 +30,7 @@ public: void setReadUntilEnd() override { in->setReadUntilEnd(); } - size_t getFileSize() override { return in->getFileSize(); } + std::optional tryGetFileSize() override { return in->tryGetFileSize(); } private: bool nextImpl() override; diff --git a/src/IO/ReadBufferFromFileBase.cpp b/src/IO/ReadBufferFromFileBase.cpp index 4ac3f984f78..d42b12ba49b 100644 --- a/src/IO/ReadBufferFromFileBase.cpp +++ b/src/IO/ReadBufferFromFileBase.cpp @@ -26,11 +26,9 @@ ReadBufferFromFileBase::ReadBufferFromFileBase( ReadBufferFromFileBase::~ReadBufferFromFileBase() = default; -size_t ReadBufferFromFileBase::getFileSize() +std::optional ReadBufferFromFileBase::tryGetFileSize() { - if (file_size) - return *file_size; - throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size for read buffer"); + return file_size; } void ReadBufferFromFileBase::setProgressCallback(ContextPtr context) diff --git a/src/IO/ReadBufferFromFileBase.h b/src/IO/ReadBufferFromFileBase.h index 9870d8bbe43..c98dcd5a93e 100644 --- a/src/IO/ReadBufferFromFileBase.h +++ b/src/IO/ReadBufferFromFileBase.h @@ -50,7 +50,7 @@ public: clock_type = clock_type_; } - size_t getFileSize() override; + std::optional tryGetFileSize() override; void setProgressCallback(ContextPtr context); diff --git a/src/IO/ReadBufferFromFileDecorator.cpp b/src/IO/ReadBufferFromFileDecorator.cpp index 9ac0fb4e475..8a6468b9bd0 100644 --- a/src/IO/ReadBufferFromFileDecorator.cpp +++ b/src/IO/ReadBufferFromFileDecorator.cpp @@ -52,9 +52,9 @@ bool ReadBufferFromFileDecorator::nextImpl() return result; } -size_t ReadBufferFromFileDecorator::getFileSize() +std::optional ReadBufferFromFileDecorator::tryGetFileSize() { - return getFileSizeFromReadBuffer(*impl); + return tryGetFileSizeFromReadBuffer(*impl); } } diff --git a/src/IO/ReadBufferFromFileDecorator.h b/src/IO/ReadBufferFromFileDecorator.h index 6e62c7f741b..69f029c5cf7 100644 --- a/src/IO/ReadBufferFromFileDecorator.h +++ b/src/IO/ReadBufferFromFileDecorator.h @@ -27,7 +27,7 @@ public: ReadBuffer & getWrappedReadBuffer() { return *impl; } - size_t getFileSize() override; + std::optional tryGetFileSize() override; protected: std::unique_ptr impl; diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index 76a80f145e7..51a1a5d8d93 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -253,7 +253,7 @@ void ReadBufferFromFileDescriptor::rewind() file_offset_of_buffer_end = 0; } -size_t ReadBufferFromFileDescriptor::getFileSize() +std::optional ReadBufferFromFileDescriptor::tryGetFileSize() { return getSizeFromFileDescriptor(fd, getFileName()); } diff --git a/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h index db256ef91c7..6083e744c95 100644 --- a/src/IO/ReadBufferFromFileDescriptor.h +++ b/src/IO/ReadBufferFromFileDescriptor.h @@ -69,7 +69,7 @@ public: /// Seek to the beginning, discarding already read data if any. Useful to reread file that changes on every read. void rewind(); - size_t getFileSize() override; + std::optional tryGetFileSize() override; bool checkIfActuallySeekable() override; diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index 9e001232e65..94f317802e3 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -313,15 +313,15 @@ off_t ReadBufferFromS3::seek(off_t offset_, int whence) return offset; } -size_t ReadBufferFromS3::getFileSize() +std::optional ReadBufferFromS3::tryGetFileSize() { if (file_size) - return *file_size; + return file_size; auto object_size = S3::getObjectSize(*client_ptr, bucket, key, version_id); file_size = object_size; - return *file_size; + return file_size; } off_t ReadBufferFromS3::getPosition() diff --git a/src/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h index c6625c2d632..ff04f78ce7b 100644 --- a/src/IO/ReadBufferFromS3.h +++ b/src/IO/ReadBufferFromS3.h @@ -63,7 +63,7 @@ public: off_t getPosition() override; - size_t getFileSize() override; + std::optional tryGetFileSize() override; void setReadUntilPosition(size_t position) override; void setReadUntilEnd() override; diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index b753e66da48..2a62b11aa44 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -121,15 +121,12 @@ void ReadWriteBufferFromHTTP::prepareRequest(Poco::Net::HTTPRequest & request, s credentials.authenticate(request); } -size_t ReadWriteBufferFromHTTP::getFileSize() +std::optional ReadWriteBufferFromHTTP::tryGetFileSize() { if (!file_info) file_info = getFileInfo(); - if (file_info->file_size) - return *file_info->file_size; - - throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size for: {}", initial_uri.toString()); + return file_info->file_size; } bool ReadWriteBufferFromHTTP::supportsReadAt() diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index f496fe3ddcd..1c9bda53008 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -118,7 +118,7 @@ private: std::unique_ptr initialize(); - size_t getFileSize() override; + std::optional tryGetFileSize() override; bool supportsReadAt() override; diff --git a/src/IO/WithFileSize.cpp b/src/IO/WithFileSize.cpp index 8cea12fa200..cbbcab83de2 100644 --- a/src/IO/WithFileSize.cpp +++ b/src/IO/WithFileSize.cpp @@ -13,11 +13,19 @@ namespace ErrorCodes extern const int UNKNOWN_FILE_SIZE; } +size_t WithFileSize::getFileSize() +{ + if (auto maybe_size = tryGetFileSize()) + return *maybe_size; + + throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size"); +} + template static std::optional tryGetFileSize(T & in) { if (auto * with_file_size = dynamic_cast(&in)) - return with_file_size->getFileSize(); + return with_file_size->tryGetFileSize(); return std::nullopt; } diff --git a/src/IO/WithFileSize.h b/src/IO/WithFileSize.h index 0ae3af98ea0..e5dc383fab0 100644 --- a/src/IO/WithFileSize.h +++ b/src/IO/WithFileSize.h @@ -10,15 +10,16 @@ class ReadBuffer; class WithFileSize { public: - virtual size_t getFileSize() = 0; + /// Returns nullopt if couldn't find out file size; + virtual std::optional tryGetFileSize() = 0; virtual ~WithFileSize() = default; + + size_t getFileSize(); }; bool isBufferWithFileSize(const ReadBuffer & in); size_t getFileSizeFromReadBuffer(ReadBuffer & in); - -/// Return nullopt if couldn't find out file size; std::optional tryGetFileSizeFromReadBuffer(ReadBuffer & in); size_t getDataOffsetMaybeCompressed(const ReadBuffer & in); diff --git a/src/Storages/Cache/ExternalDataSourceCache.h b/src/Storages/Cache/ExternalDataSourceCache.h index 4c8c7974005..3b4eff28307 100644 --- a/src/Storages/Cache/ExternalDataSourceCache.h +++ b/src/Storages/Cache/ExternalDataSourceCache.h @@ -53,7 +53,7 @@ public: bool nextImpl() override; off_t seek(off_t off, int whence) override; off_t getPosition() override; - size_t getFileSize() override { return remote_file_size; } + std::optional tryGetFileSize() override { return remote_file_size; } private: std::unique_ptr local_file_holder; diff --git a/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.cpp b/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.cpp index 21df7e35284..3bbc4e8a2ea 100644 --- a/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.cpp +++ b/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.cpp @@ -91,9 +91,9 @@ void AsynchronousReadBufferFromHDFS::prefetch(Priority priority) } -size_t AsynchronousReadBufferFromHDFS::getFileSize() +std::optional AsynchronousReadBufferFromHDFS::tryGetFileSize() { - return impl->getFileSize(); + return impl->tryGetFileSize(); } String AsynchronousReadBufferFromHDFS::getFileName() const diff --git a/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.h b/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.h index 5aef92315a4..9846d74453b 100644 --- a/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.h +++ b/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.h @@ -35,7 +35,7 @@ public: void prefetch(Priority priority) override; - size_t getFileSize() override; + std::optional tryGetFileSize() override; String getFileName() const override; diff --git a/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp b/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp index be339d021dc..bf6f9db722c 100644 --- a/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp +++ b/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp @@ -31,7 +31,7 @@ namespace ErrorCodes } -struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory +struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory, public WithFileSize { String hdfs_uri; String hdfs_file_path; @@ -90,7 +90,7 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory tryGetFileSize() override { return file_size; } @@ -191,9 +191,9 @@ ReadBufferFromHDFS::ReadBufferFromHDFS( ReadBufferFromHDFS::~ReadBufferFromHDFS() = default; -size_t ReadBufferFromHDFS::getFileSize() +std::optional ReadBufferFromHDFS::tryGetFileSize() { - return impl->getFileSize(); + return impl->tryGetFileSize(); } bool ReadBufferFromHDFS::nextImpl() diff --git a/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h b/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h index d9671e7e445..5363f07967b 100644 --- a/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h +++ b/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h @@ -40,7 +40,7 @@ public: off_t getPosition() override; - size_t getFileSize() override; + std::optional tryGetFileSize() override; size_t getFileOffsetOfBufferEnd() const override; From b8fbfd227fb60e0f244bda716ef5a9bb89376986 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Thu, 18 Jul 2024 15:41:08 +0800 Subject: [PATCH 0483/2361] format --- src/Interpreters/InterpreterInsertQuery.cpp | 1 - .../test.py | 40 ++++++++----------- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 4064cd82b67..181fb064b54 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -49,7 +49,6 @@ namespace ProfileEvents { extern const Event InsertQueriesWithSubqueries; extern const Event QueriesWithSubqueries; - extern const int QUERY_IS_PROHIBITED; } namespace DB diff --git a/tests/integration/test_disable_insertion_and_mutation/test.py b/tests/integration/test_disable_insertion_and_mutation/test.py index f098f130d2b..f25964d27b8 100644 --- a/tests/integration/test_disable_insertion_and_mutation/test.py +++ b/tests/integration/test_disable_insertion_and_mutation/test.py @@ -37,29 +37,25 @@ def started_cluster(): def test_disable_insertion_and_mutation(started_cluster): writing_node.query("""CREATE TABLE my_table on cluster default (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/default.my_table', '{replica}') ORDER BY key partition by (key % 5) """) - assert ( - "QUERY_IS_PROHIBITED" - in reading_node.query_and_get_error("INSERT INTO my_table VALUES (1, 'hello')") + assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error( + "INSERT INTO my_table VALUES (1, 'hello')" ) - assert ( - "QUERY_IS_PROHIBITED" - in reading_node.query_and_get_error("INSERT INTO my_table SETTINGS async_insert = 1 VALUES (1, 'hello')") + assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error( + "INSERT INTO my_table SETTINGS async_insert = 1 VALUES (1, 'hello')" ) - assert ( - "QUERY_IS_PROHIBITED" - in reading_node.query_and_get_error("ALTER TABLE my_table delete where 1") + assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error( + "ALTER TABLE my_table delete where 1" + ) + + + assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error( + "ALTER table my_table update key = 1 where 1" ) - assert ( - "QUERY_IS_PROHIBITED" - in reading_node.query_and_get_error("ALTER table my_table update key = 1 where 1") - ) - - assert ( - "QUERY_IS_PROHIBITED" - in reading_node.query_and_get_error("ALTER TABLE my_table drop partition 0") + assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error( + "ALTER TABLE my_table drop partition 0" ) reading_node.query("SELECT * from my_table"); @@ -73,12 +69,10 @@ def test_disable_insertion_and_mutation(started_cluster): reading_node.query("ALter Table my_table MODIFY COLUMN new_column String") - assert( - "new_column\tString" - in reading_node.query("DESC my_table") + assert "new_column\tString" in reading_node.query( + "DESC my_table" ) - assert( - "new_column\tString" - in writing_node.query("DESC my_table") + assert "new_column\tString" in writing_node.query( + "DESC my_table" ) From 2e1f679ceb05afe4d5d813eb4048555c6311c3e1 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Tue, 16 Jul 2024 18:35:33 +0200 Subject: [PATCH 0484/2361] add S3DiskNoKeyErrors metric --- src/Common/CurrentMetrics.cpp | 2 ++ src/IO/S3/Client.cpp | 21 +++++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index 7c97e73f278..2fedba0175b 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -306,6 +306,8 @@ \ M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \ M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \ + \ + M(S3DiskNoKeyErrors, "Number of no-key S3 disk errors") \ #ifdef APPLY_FOR_EXTERNAL_METRICS #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 3b958dea046..db20420db9f 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -43,6 +44,11 @@ namespace ProfileEvents extern const Event TinyS3Clients; } +namespace CurrentMetrics +{ + extern const Metric S3DiskNoKeyErrors; +} + namespace DB { @@ -379,10 +385,10 @@ Model::HeadObjectOutcome Client::HeadObject(HeadObjectRequest & request) const request.overrideURI(std::move(*bucket_uri)); - /// The next call is NOT a recurcive call - /// This is a virtuall call Aws::S3::S3Client::HeadObject(const Model::HeadObjectRequest&) - return enrichErrorMessage( - HeadObject(static_cast(request))); + if (isClientForDisk()) + CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); + + return enrichErrorMessage(std::move(result)); } /// For each request, we wrap the request functions from Aws::S3::Client with doRequest @@ -402,8 +408,11 @@ Model::ListObjectsOutcome Client::ListObjects(ListObjectsRequest & request) cons Model::GetObjectOutcome Client::GetObject(GetObjectRequest & request) const { - return enrichErrorMessage( - doRequest(request, [this](const Model::GetObjectRequest & req) { return GetObject(req); })); + auto resp = doRequest(request, [this](const Model::GetObjectRequest & req) { return GetObject(req); }); + if (!resp.IsSuccess() && isClientForDisk()) + CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); + + return enrichErrorMessage(std::move(resp)); } Model::AbortMultipartUploadOutcome Client::AbortMultipartUpload(AbortMultipartUploadRequest & request) const From 87fafaa9f5f8406228440631acf597c782a3ecdd Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Jul 2024 11:00:50 +0000 Subject: [PATCH 0485/2361] Remove flaky case from the test. --- ..._finctions_and_column_sparse_bug.reference | 73 ------------------- ...window_finctions_and_column_sparse_bug.sql | 54 -------------- 2 files changed, 127 deletions(-) diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference index 356329a392d..13e229432ae 100644 --- a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference +++ b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.reference @@ -8,76 +8,3 @@ true 1 1 true 1 2 --- -755809149 0 ---- -1 -2081147898 -1 -1981899149 -2 -2051538534 -2 -1650266905 -3 -1975508531 -3 -1646738223 -4 -1700730666 -4 -1618912877 -5 -1465484835 -5 -1317193174 -6 -1458338029 -6 -1219769753 -7 -1450619195 -7 -1154269118 -8 -1365934326 -8 -1150980622 -9 -1203382363 -9 -1098155311 -10 -1197430632 -10 -841067875 -11 -1176267855 -11 -816935497 -12 -1020892864 -12 -599948807 -13 -991301833 -13 -526570556 -14 -685902265 -14 -504713125 -15 -653505826 -15 -411038390 -16 -451392958 -16 -331834394 -17 -262516786 -17 -176934810 -18 -222873194 -18 -2 -19 -153185515 -19 6 -20 -74234560 -20 255 -21 -41 -21 406615258 -22 -6 -22 541758331 -23 -5 -23 720985423 -24 -3 -24 745669725 -25 15 -25 897064234 -26 65535 -26 1116921321 -27 77089559 -27 1207796283 -28 100663045 -28 1603772265 -29 561061873 -29 1664059402 -30 643897141 -30 1688303275 -31 914629990 -31 1913361922 -32 1159852204 -32 1929066636 -33 1258218855 -33 1968095908 -34 1459407556 -34 2054878592 -35 1936334332 -35 2125075305 -36 1962246186 -37 2030467062 diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql index 6e326d0a67f..f2391e0d165 100644 --- a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql +++ b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql @@ -31,57 +31,3 @@ SELECT * FROM ( SELECT c, min(w) OVER (PARTITION BY s ORDER BY c ASC, s ASC, w ASC) FROM t limit toUInt64(-1)) WHERE c = -755809149; - -SELECT '---'; - -create table t_vkx4cc ( - c_ylzjpt Int32, - c_hqfr9 Bool , - ) engine = MergeTree order by c_ylzjpt; - -system stop merges t_vkx4cc; - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-2081147898, coalesce((NOT NOT(cast( (53 < 539704722) as Nullable(Bool)))), true)), (-1219769753, coalesce((true) and (false), false)), (-1981899149, coalesce(false, false)), (-1646738223, coalesce((NOT NOT(cast( (23.5 <= -26) as Nullable(Bool)))), false)); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (255, coalesce(false, false)), (-1317193174, coalesce(false, false)), (-41, coalesce(true, false)), (1929066636, coalesce(false, true)); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-1700730666, coalesce((NOT NOT(cast( (-2022515471055597472 AND -29) as Nullable(Bool)))), false)), (1664059402, coalesce((NOT NOT(cast( (-19643 >= -122) as Nullable(Bool)))), false)), (1688303275, coalesce((NOT NOT(cast( (737275892 < 105) as Nullable(Bool)))), true)), (406615258, coalesce((NOT NOT(cast( (-657730213 = 82.86) as Nullable(Bool)))), false)); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-2, coalesce(false, false)), (1962246186, coalesce(true, false)), (-991301833, coalesce(true, true)), (2054878592, coalesce(false, false)); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (643897141, coalesce((NOT NOT(cast( (-60 AND cast(null as Nullable(Int64))) as Nullable(Bool)))), true)), (-2051538534, coalesce(((-1616816511 between 332225780 and -1883087387)) or ((-573375170 between -1427445977 and 615586748)), false)), (77089559, coalesce((NOT NOT(cast( ((true) and (true) != 925456787) as Nullable(Bool)))), false)), (1116921321, coalesce((0 is NULL), true)); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-816935497, coalesce(false, false)), (1207796283, coalesce((-129 between -5 and -5), false)), (-1365934326, coalesce(true, false)), (-1618912877, coalesce((NOT NOT(cast( (false >= 31833) as Nullable(Bool)))), false)); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-331834394, coalesce((NOT NOT(cast( (-63 <= -1822810052) as Nullable(Bool)))), true)), (-1020892864, coalesce((NOT NOT(cast( (40.31 <= 8146037365746019777) as Nullable(Bool)))), true)), (-1150980622, coalesce(((94019304 between -730556489 and 32)) and ((-956354236 is not NULL)), true)), (-1203382363, coalesce(true, true)); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9) values (-653505826, coalesce((true) or (true), false)), (-1975508531, coalesce(((-796885845 between 65536 and cast(null as Nullable(Int32)))) or ((NOT NOT(cast( (-7467729336434250795 < 100.20) as Nullable(Bool))))), false)), (-1465484835, coalesce(((NOT NOT(cast( (19209 <= 75.96) as Nullable(Bool))))) or (true), false)), (1968095908, coalesce((NOT NOT(cast( (-1309960412156062327 > 13102) as Nullable(Bool)))), true)); - -alter table t_vkx4cc add column c_zosphq2t1 Float64; - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (-153185515, coalesce((NOT NOT(cast( (1291639145 >= 30.22) as Nullable(Bool)))), false), -1.8), (-411038390, coalesce(((-762326135 between 16 and 177530758)) or (false), true), 26.34), (914629990, coalesce((-1125832977 is not NULL), true), 59.2), (541758331, coalesce(false, true), -255.1); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (2125075305, coalesce(false, false), 55.36), (-1176267855, coalesce(true, true), 55.45), (1459407556, coalesce((true) and ((NOT NOT(cast( (95.96 != 65) as Nullable(Bool))))), true), 85.80), (-1098155311, coalesce(false, false), 2147483649.9); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (6, coalesce((NOT NOT(cast( (1546334968 < -4) as Nullable(Bool)))), true), 57.42), (-5, coalesce((NOT NOT(cast( (59 AND 13) as Nullable(Bool)))), false), 65536.3), (100663045, coalesce((-1190355242 is not NULL), true), 73.80), (-451392958, coalesce((NOT NOT(cast( (false != -443845933) as Nullable(Bool)))), false), -4294967294.0); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (561061873, coalesce(true, false), 12.17), (-526570556, coalesce(false, false), 64.73), (-1450619195, coalesce(true, true), 54.33), (-3, coalesce(true, true), 52.9); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (-504713125, coalesce(false, true), 27.58), (897064234, coalesce((836516994 between cast(null as Nullable(Int32)) and -1832647080), true), 9223372036854775809.2), (65535, coalesce(true, true), 4294967297.5), (-599948807, coalesce((false) or ((NOT NOT(cast( (6.52 = 65.49) as Nullable(Bool))))), false), 256.5); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (-1650266905, coalesce((NOT NOT(cast( (-83 = -218055084) as Nullable(Bool)))), true), 1.9), (-841067875, coalesce(false, true), -126.5), (15, coalesce(((NOT NOT(cast( (cast(null as Nullable(Decimal)) = cast(null as Nullable(Int32))) as Nullable(Bool))))) or (true), true), 33.65), (1913361922, coalesce((NOT NOT(cast( (false AND 0) as Nullable(Bool)))), false), 6.4); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (1159852204, coalesce((-2057115045 is not NULL), false), 20.61), (-6, coalesce(true, true), 66.33), (-1154269118, coalesce(false, true), 8.89), (1258218855, coalesce(true, false), 19.80); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (1603772265, coalesce(false, true), 57.87), (-176934810, coalesce(false, true), 128.8), (-1458338029, coalesce((NOT NOT(cast( (20908 != (NOT NOT(cast( (cast(null as Nullable(Decimal)) <= (true) or ((NOT NOT(cast( (973511022 <= -112) as Nullable(Bool)))))) as Nullable(Bool))))) as Nullable(Bool)))), true), 76.54), (-262516786, coalesce((cast(null as Nullable(Int32)) is NULL), false), 21.49); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (-1197430632, coalesce(true, false), 45.40), (-685902265, coalesce((NOT NOT(cast( (cast(null as Nullable(Decimal)) < cast(null as Nullable(Decimal))) as Nullable(Bool)))), true), 5.55), (1936334332, coalesce((-1565552735 is not NULL), false), 26.28), (2030467062, coalesce((NOT NOT(cast( (127.3 != cast(null as Nullable(Int32))) as Nullable(Bool)))), true), 89.50); - -insert into t_vkx4cc (c_ylzjpt, c_hqfr9, c_zosphq2t1) values (720985423, coalesce((NOT NOT(cast( (-451448940 = cast(null as Nullable(Decimal))) as Nullable(Bool)))), false), 52.65), (-222873194, coalesce(((-20 between -1419620477 and 1616455043)) or ((25624502 between 1312431316 and 1757361651)), false), 127.2), (745669725, coalesce((NOT NOT(cast( ((NOT NOT(cast( (cast(null as Nullable(UInt64)) <= 42) as Nullable(Bool)))) >= 3233811255032796928) as Nullable(Bool)))), false), 7.74), (-74234560, coalesce((NOT NOT(cast( (cast(null as Nullable(Decimal)) >= cast(null as Nullable(Decimal))) as Nullable(Bool)))), true), 19.25); - -SELECT DISTINCT - count(ref_0.c_zosphq2t1) over (partition by ref_0.c_hqfr9 order by ref_0.c_ylzjpt, ref_0.c_hqfr9, ref_0.c_zosphq2t1) as c0, - ref_0.c_ylzjpt as c1 -FROM - t_vkx4cc as ref_0 - order by c0, c1; From 959ac9a768530cb9e2c7b013df37bfee000a1644 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 18 Jul 2024 13:16:35 +0200 Subject: [PATCH 0486/2361] ci: dump dmesg in case of OOM Without additional info it is unclear how to tune paralelism or maybe split some modules. Signed-off-by: Azat Khuzhin --- tests/ci/ci.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index cf285f4b97d..c5ad97088aa 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1281,7 +1281,8 @@ def main() -> int: except ValueError: pass if Utils.is_killed_with_oom(): - print("WARNING: OOM while job execution") + print("WARNING: OOM while job execution:") + print(subprocess.run("sudo dmesg -T", check=False)) error = f"Out Of Memory, exit_code {job_report.exit_code}, after {int(job_report.duration)}s" else: error = f"Unknown, exit_code {job_report.exit_code}, after {int(job_report.duration)}s" From 85714e7d7e18a9d91dfe385b658e1ae1ddedb2ea Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 18 Jul 2024 11:31:23 +0000 Subject: [PATCH 0487/2361] fix asof join on nulls --- .../Transforms/MergeJoinTransform.cpp | 10 ++- .../0_stateless/00976_asof_join_on.reference | 78 ++++++++++++++++++- .../0_stateless/00976_asof_join_on.sql.j2 | 7 +- 3 files changed, 88 insertions(+), 7 deletions(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 3b69ddaec06..7bdb3d4f30f 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -257,8 +257,14 @@ JoinKeyRow::JoinKeyRow(const FullMergeJoinCursor & cursor, size_t pos) new_col->insertFrom(*col, pos); row.push_back(std::move(new_col)); } - if (const auto * asof_column = cursor.getAsofColumn()) + if (const IColumn * asof_column = cursor.getAsofColumn()) { + if (const auto * nullable_asof_column = checkAndGetColumn(asof_column)) + { + /// We save matched column, and since NULL do not match anything, we can't use it as a key + chassert(!nullable_asof_column->isNullAt(pos)); + asof_column = nullable_asof_column->getNestedColumnPtr().get(); + } auto new_col = asof_column->cloneEmpty(); new_col->insertFrom(*asof_column, pos); row.push_back(std::move(new_col)); @@ -1174,7 +1180,6 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge() if (!cursors[1]->cursor.isValid() && !cursors[1]->fullyCompleted()) return Status(1); - if (auto result = handleAllJoinState()) return std::move(*result); @@ -1183,7 +1188,6 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge() if (cursors[0]->fullyCompleted() || cursors[1]->fullyCompleted()) { - if (!cursors[0]->fullyCompleted() && isLeftOrFull(kind)) return Status(createBlockWithDefaults(0)); diff --git a/tests/queries/0_stateless/00976_asof_join_on.reference b/tests/queries/0_stateless/00976_asof_join_on.reference index e13745bed9d..80af4287d3c 100644 --- a/tests/queries/0_stateless/00976_asof_join_on.reference +++ b/tests/queries/0_stateless/00976_asof_join_on.reference @@ -1,4 +1,4 @@ -- default - +- default / join_use_nulls = 0 - 1 1 0 0 1 2 1 2 1 3 1 2 @@ -35,7 +35,7 @@ 2 1 2 3 2 2 2 3 1 2 1 2 -- full_sorting_merge - +- full_sorting_merge / join_use_nulls = 0 - 1 1 0 0 1 2 1 2 1 3 1 2 @@ -72,3 +72,77 @@ 2 1 2 3 2 2 2 3 1 2 1 2 +- default / join_use_nulls = 1 - +1 1 \N \N +1 2 1 2 +1 3 1 2 +2 1 \N \N +2 2 \N \N +2 3 2 3 +3 1 \N \N +3 2 \N \N +3 3 \N \N +9 +1 2 1 2 +1 3 1 2 +2 3 2 3 +- +1 1 1 2 +1 2 1 2 +1 3 1 4 +2 1 2 3 +2 2 2 3 +2 3 2 3 +- +1 1 1 2 +1 2 1 2 +1 3 1 4 +2 1 2 3 +2 2 2 3 +2 3 2 3 +- +1 3 1 2 +- +1 1 1 2 +1 2 1 4 +1 3 1 4 +2 1 2 3 +2 2 2 3 +1 2 1 2 +- full_sorting_merge / join_use_nulls = 1 - +1 1 \N \N +1 2 1 2 +1 3 1 2 +2 1 \N \N +2 2 \N \N +2 3 2 3 +3 1 \N \N +3 2 \N \N +3 3 \N \N +9 +1 2 1 2 +1 3 1 2 +2 3 2 3 +- +1 1 1 2 +1 2 1 2 +1 3 1 4 +2 1 2 3 +2 2 2 3 +2 3 2 3 +- +1 1 1 2 +1 2 1 2 +1 3 1 4 +2 1 2 3 +2 2 2 3 +2 3 2 3 +- +1 3 1 2 +- +1 1 1 2 +1 2 1 4 +1 3 1 4 +2 1 2 3 +2 2 2 3 +1 2 1 2 diff --git a/tests/queries/0_stateless/00976_asof_join_on.sql.j2 b/tests/queries/0_stateless/00976_asof_join_on.sql.j2 index aecf472a36c..ea642366a05 100644 --- a/tests/queries/0_stateless/00976_asof_join_on.sql.j2 +++ b/tests/queries/0_stateless/00976_asof_join_on.sql.j2 @@ -7,11 +7,13 @@ CREATE TABLE B(b UInt32, t UInt32) ENGINE = Memory; INSERT INTO A (a,t) VALUES (1,1),(1,2),(1,3), (2,1),(2,2),(2,3), (3,1),(3,2),(3,3); INSERT INTO B (b,t) VALUES (1,2),(1,4),(2,3); +{% for join_use_nulls in [0, 1] -%} {% for join_algorithm in ['default', 'full_sorting_merge'] -%} SET join_algorithm = '{{ join_algorithm }}'; -SELECT '- {{ join_algorithm }} -'; +SELECT '- {{ join_algorithm }} / join_use_nulls = {{ join_use_nulls }} -'; +set join_use_nulls = {{ join_use_nulls }}; SELECT A.a, A.t, B.b, B.t FROM A ASOF LEFT JOIN B ON A.a == B.b AND A.t >= B.t ORDER BY (A.a, A.t); SELECT count() FROM A ASOF LEFT JOIN B ON A.a == B.b AND B.t <= A.t; @@ -34,7 +36,8 @@ ASOF INNER JOIN (SELECT * FROM B UNION ALL SELECT 1, 3) AS B ON B.t <= A.t AND A WHERE B.t != 3 ORDER BY (A.a, A.t) ; -{% endfor %} +{% endfor -%} +{% endfor -%} DROP TABLE A; DROP TABLE B; From 884dc496a0a978074d3e0bd70f4df8d0225e69c1 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Thu, 18 Jul 2024 20:58:37 +0800 Subject: [PATCH 0488/2361] format --- .../test_disable_insertion_and_mutation/test.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tests/integration/test_disable_insertion_and_mutation/test.py b/tests/integration/test_disable_insertion_and_mutation/test.py index f25964d27b8..b6431690245 100644 --- a/tests/integration/test_disable_insertion_and_mutation/test.py +++ b/tests/integration/test_disable_insertion_and_mutation/test.py @@ -35,7 +35,9 @@ def started_cluster(): def test_disable_insertion_and_mutation(started_cluster): - writing_node.query("""CREATE TABLE my_table on cluster default (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/default.my_table', '{replica}') ORDER BY key partition by (key % 5) """) + writing_node.query( + """CREATE TABLE my_table on cluster default (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/default.my_table', '{replica}') ORDER BY key partition by (key % 5) """ + ) assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error( "INSERT INTO my_table VALUES (1, 'hello')" @@ -58,7 +60,7 @@ def test_disable_insertion_and_mutation(started_cluster): "ALTER TABLE my_table drop partition 0" ) - reading_node.query("SELECT * from my_table"); + reading_node.query("SELECT * from my_table") writing_node.query("INSERT INTO my_table VALUES (1, 'hello')") writing_node.query("ALTER TABLE my_table delete where 1") writing_node.query("ALTER table my_table update value = 'no hello' where 1") @@ -69,10 +71,6 @@ def test_disable_insertion_and_mutation(started_cluster): reading_node.query("ALter Table my_table MODIFY COLUMN new_column String") - assert "new_column\tString" in reading_node.query( - "DESC my_table" - ) + assert "new_column\tString" in reading_node.query("DESC my_table") - assert "new_column\tString" in writing_node.query( - "DESC my_table" - ) + assert "new_column\tString" in writing_node.query("DESC my_table") From ab2522b17020f1fb31b760f0594784cd58468797 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 18 Jul 2024 13:17:34 +0000 Subject: [PATCH 0489/2361] fix asof join on nulls --- src/Processors/Transforms/MergeJoinTransform.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 7bdb3d4f30f..e96a75d277b 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -301,7 +301,11 @@ bool JoinKeyRow::asofMatch(const FullMergeJoinCursor & cursor, ASOFJoinInequalit if (isNullAt(*asof_row, 0) || isNullAt(*cursor.getAsofColumn(), cursor->getRow())) return false; - int cmp = cursor.getAsofColumn()->compareAt(cursor->getRow(), 0, *asof_row, 1); + int cmp = 0; + if (const auto * nullable_column = checkAndGetColumn(cursor.getAsofColumn())) + cmp = nullable_column->getNestedColumn().compareAt(cursor->getRow(), 0, *asof_row, 1); + else + cmp = cursor.getAsofColumn()->compareAt(cursor->getRow(), 0, *asof_row, 1); return (asof_inequality == ASOFJoinInequality::Less && cmp < 0) || (asof_inequality == ASOFJoinInequality::LessOrEquals && cmp <= 0) From c01e2cbeea02ebecfc4dea4692baffff3087b043 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Thu, 18 Jul 2024 22:58:30 +0800 Subject: [PATCH 0490/2361] format --- tests/integration/test_disable_insertion_and_mutation/test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/test_disable_insertion_and_mutation/test.py b/tests/integration/test_disable_insertion_and_mutation/test.py index b6431690245..b6da7ed548f 100644 --- a/tests/integration/test_disable_insertion_and_mutation/test.py +++ b/tests/integration/test_disable_insertion_and_mutation/test.py @@ -50,7 +50,6 @@ def test_disable_insertion_and_mutation(started_cluster): assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error( "ALTER TABLE my_table delete where 1" ) - assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error( "ALTER table my_table update key = 1 where 1" From cda846339be22c66cd0d35d49273a314fa3bdf69 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Jul 2024 15:12:32 +0000 Subject: [PATCH 0491/2361] Remove ActionsDAG::clone --- src/Interpreters/ActionsDAG.cpp | 8 ---- src/Interpreters/ActionsDAG.h | 3 -- src/Interpreters/ExpressionAnalyzer.cpp | 9 +---- src/Interpreters/InterpreterSelectQuery.cpp | 24 ++++++------ src/Interpreters/MutationsInterpreter.cpp | 4 +- src/Planner/Planner.cpp | 38 +++++++++---------- src/Planner/PlannerExpressionAnalysis.h | 16 ++++---- src/Planner/PlannerJoins.cpp | 4 +- src/Processors/QueryPlan/ExpressionStep.cpp | 4 +- src/Processors/QueryPlan/FilterStep.cpp | 4 +- .../Optimizations/distinctReadInOrder.cpp | 10 ++--- .../Optimizations/filterPushDown.cpp | 4 +- .../QueryPlan/Optimizations/liftUpUnion.cpp | 2 +- .../optimizePrimaryKeyConditionAndLimit.cpp | 6 +-- .../Optimizations/optimizeReadInOrder.cpp | 28 +++++++------- .../optimizeUseAggregateProjection.cpp | 4 +- .../Optimizations/projectionsCommon.cpp | 7 ++-- .../Optimizations/removeRedundantDistinct.cpp | 20 +++++----- .../QueryPlan/ReadFromMergeTree.cpp | 16 ++++---- .../QueryPlan/SourceStepWithFilter.cpp | 8 ++-- src/Processors/QueryPlan/TotalsHavingStep.cpp | 22 +++++++---- .../Transforms/FillingTransform.cpp | 2 +- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 4 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 4 +- src/Storages/SelectQueryInfo.h | 4 +- src/Storages/StorageBuffer.cpp | 10 +++-- src/Storages/StorageMerge.cpp | 8 ++-- src/Storages/VirtualColumnUtils.cpp | 2 +- 28 files changed, 135 insertions(+), 140 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index e001406408f..53e04f24829 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1246,14 +1246,6 @@ bool ActionsDAG::removeUnusedResult(const std::string & column_name) return true; } -ActionsDAGPtr ActionsDAG::clone(const ActionsDAG * from) -{ - std::unordered_map old_to_new_nodes; - if (from == nullptr) - return nullptr; - return std::make_unique(from->clone(old_to_new_nodes)); -} - ActionsDAG ActionsDAG::clone() const { std::unordered_map old_to_new_nodes; diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 6f5c3d3b0df..6f6c3f9bccb 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -261,8 +261,6 @@ public: void compileExpressions(size_t min_count_to_compile_expression, const std::unordered_set & lazy_executed_nodes = {}); #endif - static ActionsDAGPtr clone(const ActionsDAG * from); - ActionsDAG clone(std::unordered_map & old_to_new_nodes) const; ActionsDAG clone() const; @@ -491,7 +489,6 @@ public: const ActionsDAG::Node * find(const String & output_name); private: - //const ActionsDAG & actions; NameToNodeIndex index; }; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 0f350602777..6b5b129085d 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1944,10 +1944,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( Block before_prewhere_sample = source_header; if (sanitizeBlock(before_prewhere_sample)) { - ActionsDAG dag = std::move(*ActionsDAG::clone(&prewhere_dag_and_flags->dag)); - ExpressionActions( - std::move(dag), - ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); + prewhere_dag_and_flags->dag.updateHeader(before_prewhere_sample); auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName()); /// If the filter column is a constant, record it. if (column_elem.column) @@ -1979,9 +1976,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( before_where_sample = source_header; if (sanitizeBlock(before_where_sample)) { - ExpressionActions( - std::move(*ActionsDAG::clone(&before_where->dag)), - ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_where_sample); + before_where->dag.updateHeader(before_where_sample); auto & column_elem = before_where_sample.getByName(query.where()->getColumnName()); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index c85eb8310dc..e0073a6af5d 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1501,7 +1501,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&expressions.filter_info->actions)), + expressions.filter_info->actions.clone(), expressions.filter_info->column_name, expressions.filter_info->do_remove_column); @@ -1515,7 +1515,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&*expressions.prewhere_info->row_level_filter)), + expressions.prewhere_info->row_level_filter->clone(), expressions.prewhere_info->row_level_column_name, true); @@ -1525,7 +1525,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&*expressions.prewhere_info->prewhere_actions)), + expressions.prewhere_info->prewhere_actions->clone(), expressions.prewhere_info->prewhere_column_name, expressions.prewhere_info->remove_prewhere_column); @@ -1627,7 +1627,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&expressions.filter_info->actions)), + expressions.filter_info->actions.clone(), expressions.filter_info->column_name, expressions.filter_info->do_remove_column); @@ -2056,20 +2056,22 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c { auto & prewhere_info = *query_info.prewhere_info; + auto row_level_actions = std::make_shared(prewhere_info.row_level_filter->clone()); if (prewhere_info.row_level_filter) { pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, - std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info.row_level_filter))), + row_level_actions, prewhere_info.row_level_column_name, true); }); } + auto filter_actions = std::make_shared(prewhere_info.prewhere_actions->clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( - header, std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info.prewhere_actions))), + header, filter_actions, prewhere_info.prewhere_column_name, prewhere_info.remove_prewhere_column); }); } @@ -2589,7 +2591,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc /// Aliases in table declaration. if (processing_stage == QueryProcessingStage::FetchColumns && alias_actions) { - auto table_aliases = std::make_unique(query_plan.getCurrentDataStream(), std::move(*ActionsDAG::clone(&*alias_actions))); + auto table_aliases = std::make_unique(query_plan.getCurrentDataStream(), alias_actions->clone()); table_aliases->setStepDescription("Add table aliases"); query_plan.addStep(std::move(table_aliases)); } @@ -2597,7 +2599,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression, bool remove_filter) { - auto dag = std::move(*ActionsDAG::clone(&expression->dag)); + auto dag = expression->dag.clone(); if (expression->project_input) dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); @@ -2771,7 +2773,7 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool void InterpreterSelectQuery::executeHaving(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression, bool remove_filter) { - auto dag = std::move(*ActionsDAG::clone(&expression->dag)); + auto dag = expression->dag.clone(); if (expression->project_input) dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); @@ -2789,7 +2791,7 @@ void InterpreterSelectQuery::executeTotalsAndHaving( std::optional dag; if (expression) { - dag = std::move(*ActionsDAG::clone(&expression->dag)); + dag = expression->dag.clone(); if (expression->project_input) dag->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); } @@ -2838,7 +2840,7 @@ void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const Act if (!expression) return; - ActionsDAG dag = std::move(*ActionsDAG::clone(&expression->dag)); + auto dag = expression->dag.clone(); if (expression->project_input) dag.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 5b710149d85..57ad5caa4c7 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1281,7 +1281,7 @@ QueryPipelineBuilder MutationsInterpreter::addStreamsForLaterStages(const std::v if (i < stage.filter_column_names.size()) { - auto dag = std::move(*ActionsDAG::clone(&step->actions()->dag)); + auto dag = step->actions()->dag.clone(); if (step->actions()->project_input) dag.appendInputsForUnusedColumns(plan.getCurrentDataStream().header); /// Execute DELETEs. @@ -1289,7 +1289,7 @@ QueryPipelineBuilder MutationsInterpreter::addStreamsForLaterStages(const std::v } else { - auto dag = std::move(*ActionsDAG::clone(&step->actions()->dag)); + auto dag = step->actions()->dag.clone(); if (step->actions()->project_input) dag.appendInputsForUnusedColumns(plan.getCurrentDataStream().header); /// Execute UPDATE or final projection. diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index e087c3691b4..fb721069e6e 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -333,11 +333,11 @@ public: }; void addExpressionStep(QueryPlan & query_plan, - const ActionsAndProjectInputsFlagPtr & expression_actions, + ActionsAndProjectInputsFlagPtr & expression_actions, const std::string & step_description, UsefulSets & useful_sets) { - auto actions = std::move(*ActionsDAG::clone(&expression_actions->dag)); + auto actions = std::move(expression_actions->dag); if (expression_actions->project_input) actions.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); @@ -348,11 +348,11 @@ void addExpressionStep(QueryPlan & query_plan, } void addFilterStep(QueryPlan & query_plan, - const FilterAnalysisResult & filter_analysis_result, + FilterAnalysisResult & filter_analysis_result, const std::string & step_description, UsefulSets & useful_sets) { - auto actions = std::move(*ActionsDAG::clone(&filter_analysis_result.filter_actions->dag)); + auto actions = std::move(filter_analysis_result.filter_actions->dag); if (filter_analysis_result.filter_actions->project_input) actions.appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); @@ -544,7 +544,7 @@ void addMergingAggregatedStep(QueryPlan & query_plan, } void addTotalsHavingStep(QueryPlan & query_plan, - const PlannerExpressionsAnalysisResult & expression_analysis_result, + PlannerExpressionsAnalysisResult & expression_analysis_result, const QueryAnalysisResult & query_analysis_result, const PlannerContextPtr & planner_context, const QueryNode & query_node, @@ -553,14 +553,14 @@ void addTotalsHavingStep(QueryPlan & query_plan, const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); - const auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); - const auto & having_analysis_result = expression_analysis_result.getHaving(); + auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); + auto & having_analysis_result = expression_analysis_result.getHaving(); bool need_finalize = !query_node.isGroupByWithRollup() && !query_node.isGroupByWithCube(); std::optional actions; if (having_analysis_result.filter_actions) { - actions = std::move(*ActionsDAG::clone(&having_analysis_result.filter_actions->dag)); + actions = std::move(having_analysis_result.filter_actions->dag); if (having_analysis_result.filter_actions->project_input) actions->appendInputsForUnusedColumns(query_plan.getCurrentDataStream().header); } @@ -886,7 +886,7 @@ bool addPreliminaryLimitOptimizationStepIfNeeded(QueryPlan & query_plan, * WINDOW functions. */ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, - const PlannerExpressionsAnalysisResult & expressions_analysis_result, + PlannerExpressionsAnalysisResult & expressions_analysis_result, const QueryAnalysisResult & query_analysis_result, const PlannerContextPtr & planner_context, const PlannerQueryProcessingInfo & query_processing_info, @@ -922,7 +922,7 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, if (expressions_analysis_result.hasLimitBy()) { - const auto & limit_by_analysis_result = expressions_analysis_result.getLimitBy(); + auto & limit_by_analysis_result = expressions_analysis_result.getLimitBy(); addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets); addLimitByStep(query_plan, limit_by_analysis_result, query_node); } @@ -1549,7 +1549,7 @@ void Planner::buildPlanForQueryNode() if (expression_analysis_result.hasAggregation()) { - const auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); + auto & aggregation_analysis_result = expression_analysis_result.getAggregation(); if (aggregation_analysis_result.before_aggregation_actions) addExpressionStep(query_plan, aggregation_analysis_result.before_aggregation_actions, "Before GROUP BY", useful_sets); @@ -1568,7 +1568,7 @@ void Planner::buildPlanForQueryNode() * window functions, we can't execute ORDER BY and DISTINCT * now, on shard (first_stage). */ - const auto & window_analysis_result = expression_analysis_result.getWindow(); + auto & window_analysis_result = expression_analysis_result.getWindow(); if (window_analysis_result.before_window_actions) addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before WINDOW", useful_sets); } @@ -1578,7 +1578,7 @@ void Planner::buildPlanForQueryNode() * Projection expressions, preliminary DISTINCT and before ORDER BY expressions * now, on shards (first_stage). */ - const auto & projection_analysis_result = expression_analysis_result.getProjection(); + auto & projection_analysis_result = expression_analysis_result.getProjection(); addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", useful_sets); if (query_node.isDistinct()) @@ -1594,7 +1594,7 @@ void Planner::buildPlanForQueryNode() if (expression_analysis_result.hasSort()) { - const auto & sort_analysis_result = expression_analysis_result.getSort(); + auto & sort_analysis_result = expression_analysis_result.getSort(); addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", useful_sets); } } @@ -1648,7 +1648,7 @@ void Planner::buildPlanForQueryNode() { if (expression_analysis_result.hasWindow()) { - const auto & window_analysis_result = expression_analysis_result.getWindow(); + auto & window_analysis_result = expression_analysis_result.getWindow(); if (expression_analysis_result.hasAggregation()) addExpressionStep(query_plan, window_analysis_result.before_window_actions, "Before window functions", useful_sets); @@ -1658,7 +1658,7 @@ void Planner::buildPlanForQueryNode() if (expression_analysis_result.hasQualify()) addFilterStep(query_plan, expression_analysis_result.getQualify(), "QUALIFY", useful_sets); - const auto & projection_analysis_result = expression_analysis_result.getProjection(); + auto & projection_analysis_result = expression_analysis_result.getProjection(); addExpressionStep(query_plan, projection_analysis_result.projection_actions, "Projection", useful_sets); if (query_node.isDistinct()) @@ -1674,7 +1674,7 @@ void Planner::buildPlanForQueryNode() if (expression_analysis_result.hasSort()) { - const auto & sort_analysis_result = expression_analysis_result.getSort(); + auto & sort_analysis_result = expression_analysis_result.getSort(); addExpressionStep(query_plan, sort_analysis_result.before_order_by_actions, "Before ORDER BY", useful_sets); } } @@ -1727,7 +1727,7 @@ void Planner::buildPlanForQueryNode() if (!query_processing_info.isFromAggregationState() && expression_analysis_result.hasLimitBy()) { - const auto & limit_by_analysis_result = expression_analysis_result.getLimitBy(); + auto & limit_by_analysis_result = expression_analysis_result.getLimitBy(); addExpressionStep(query_plan, limit_by_analysis_result.before_limit_by_actions, "Before LIMIT BY", useful_sets); addLimitByStep(query_plan, limit_by_analysis_result, query_node); } @@ -1759,7 +1759,7 @@ void Planner::buildPlanForQueryNode() /// Project names is not done on shards, because initiator will not find columns in blocks if (!query_processing_info.isToAggregationState()) { - const auto & projection_analysis_result = expression_analysis_result.getProjection(); + auto & projection_analysis_result = expression_analysis_result.getProjection(); addExpressionStep(query_plan, projection_analysis_result.project_names_actions, "Project names", useful_sets); } diff --git a/src/Planner/PlannerExpressionAnalysis.h b/src/Planner/PlannerExpressionAnalysis.h index 820df7131a7..283fcac7aba 100644 --- a/src/Planner/PlannerExpressionAnalysis.h +++ b/src/Planner/PlannerExpressionAnalysis.h @@ -64,7 +64,7 @@ public: : projection_analysis_result(std::move(projection_analysis_result_)) {} - const ProjectionAnalysisResult & getProjection() const + ProjectionAnalysisResult & getProjection() { return projection_analysis_result; } @@ -74,7 +74,7 @@ public: return where_analysis_result.filter_actions != nullptr; } - const FilterAnalysisResult & getWhere() const + FilterAnalysisResult & getWhere() { return where_analysis_result; } @@ -89,7 +89,7 @@ public: return !aggregation_analysis_result.aggregation_keys.empty() || !aggregation_analysis_result.aggregate_descriptions.empty(); } - const AggregationAnalysisResult & getAggregation() const + AggregationAnalysisResult & getAggregation() { return aggregation_analysis_result; } @@ -104,7 +104,7 @@ public: return having_analysis_result.filter_actions != nullptr; } - const FilterAnalysisResult & getHaving() const + FilterAnalysisResult & getHaving() { return having_analysis_result; } @@ -119,7 +119,7 @@ public: return !window_analysis_result.window_descriptions.empty(); } - const WindowAnalysisResult & getWindow() const + WindowAnalysisResult & getWindow() { return window_analysis_result; } @@ -134,7 +134,7 @@ public: return qualify_analysis_result.filter_actions != nullptr; } - const FilterAnalysisResult & getQualify() const + FilterAnalysisResult & getQualify() { return qualify_analysis_result; } @@ -149,7 +149,7 @@ public: return sort_analysis_result.before_order_by_actions != nullptr; } - const SortAnalysisResult & getSort() const + SortAnalysisResult & getSort() { return sort_analysis_result; } @@ -164,7 +164,7 @@ public: return limit_by_analysis_result.before_limit_by_actions != nullptr; } - const LimitByAnalysisResult & getLimitBy() const + LimitByAnalysisResult & getLimitBy() { return limit_by_analysis_result; } diff --git a/src/Planner/PlannerJoins.cpp b/src/Planner/PlannerJoins.cpp index 4cf1b138bed..5acff9dac82 100644 --- a/src/Planner/PlannerJoins.cpp +++ b/src/Planner/PlannerJoins.cpp @@ -591,10 +591,10 @@ JoinClausesAndActions buildJoinClausesAndActions( } } - result.left_join_expressions_actions = std::move(*ActionsDAG::clone(&left_join_actions)); + result.left_join_expressions_actions = left_join_actions.clone(); result.left_join_tmp_expression_actions = std::move(left_join_actions); result.left_join_expressions_actions.removeUnusedActions(join_left_actions_names); - result.right_join_expressions_actions = std::move(*ActionsDAG::clone(&right_join_actions)); + result.right_join_expressions_actions = right_join_actions.clone(); result.right_join_tmp_expression_actions = std::move(right_join_actions); result.right_join_expressions_actions.removeUnusedActions(join_right_actions_names); diff --git a/src/Processors/QueryPlan/ExpressionStep.cpp b/src/Processors/QueryPlan/ExpressionStep.cpp index 94098f443d9..6f88c4527a4 100644 --- a/src/Processors/QueryPlan/ExpressionStep.cpp +++ b/src/Processors/QueryPlan/ExpressionStep.cpp @@ -61,13 +61,13 @@ void ExpressionStep::transformPipeline(QueryPipelineBuilder & pipeline, const Bu void ExpressionStep::describeActions(FormatSettings & settings) const { String prefix(settings.offset, settings.indent_char); - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag))); + auto expression = std::make_shared(actions_dag.clone()); expression->describeActions(settings.out, prefix); } void ExpressionStep::describeActions(JSONBuilder::JSONMap & map) const { - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag))); + auto expression = std::make_shared(actions_dag.clone()); map.add("Expression", expression->toTree()); } diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 5f15c5defac..0c6b71387b7 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -87,7 +87,7 @@ void FilterStep::describeActions(FormatSettings & settings) const settings.out << " (removed)"; settings.out << '\n'; - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag))); + auto expression = std::make_shared(actions_dag.clone()); expression->describeActions(settings.out, prefix); } @@ -96,7 +96,7 @@ void FilterStep::describeActions(JSONBuilder::JSONMap & map) const map.add("Filter Column", filter_column_name); map.add("Removes Filter", remove_filter_column); - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag))); + auto expression = std::make_shared(actions_dag.clone()); map.add("Expression", expression->toTree()); } diff --git a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp index 8666912514e..37e61a6c388 100644 --- a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp @@ -10,18 +10,18 @@ namespace DB::QueryPlanOptimizations { /// build actions DAG from stack of steps -static ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_stack) +static std::optional buildActionsForPlanPath(std::vector & dag_stack) { if (dag_stack.empty()) - return nullptr; + return {}; - ActionsDAGPtr path_actions = ActionsDAG::clone(dag_stack.back()); + ActionsDAG path_actions = dag_stack.back()->clone(); dag_stack.pop_back(); while (!dag_stack.empty()) { - ActionsDAGPtr clone = ActionsDAG::clone(dag_stack.back()); + ActionsDAG clone = dag_stack.back()->clone(); dag_stack.pop_back(); - path_actions->mergeInplace(std::move(*clone)); + path_actions.mergeInplace(std::move(clone)); } return path_actions; } diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 411b20b1a32..73314f005b6 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -599,7 +599,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes filter_node.step = std::make_unique( filter_node.children.front()->step->getOutputStream(), - std::move(*ActionsDAG::clone(&filter->getExpression())), + filter->getExpression().clone(), filter->getFilterColumnName(), filter->removesFilterColumn()); } @@ -613,7 +613,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (auto * read_from_merge = typeid_cast(child.get())) { - FilterDAGInfo info{std::move(*ActionsDAG::clone(&filter->getExpression())), filter->getFilterColumnName(), filter->removesFilterColumn()}; + FilterDAGInfo info{filter->getExpression().clone(), filter->getFilterColumnName(), filter->removesFilterColumn()}; read_from_merge->addFilter(std::move(info)); std::swap(*parent_node, *child_node); return 1; diff --git a/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp b/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp index 53f59198d0f..c48551732c9 100644 --- a/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp +++ b/src/Processors/QueryPlan/Optimizations/liftUpUnion.cpp @@ -49,7 +49,7 @@ size_t tryLiftUpUnion(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) expr_node.step = std::make_unique( expr_node.children.front()->step->getOutputStream(), - std::move(*ActionsDAG::clone(&expression->getExpression()))); + expression->getExpression().clone()); } /// - Expression - Something diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp index 71a7ca327b1..63b4e019066 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp @@ -18,16 +18,16 @@ void optimizePrimaryKeyConditionAndLimit(const Stack & stack) const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); if (storage_prewhere_info) { - source_step_with_filter->addFilter(ActionsDAG::clone(&*storage_prewhere_info->prewhere_actions), storage_prewhere_info->prewhere_column_name); + source_step_with_filter->addFilter(std::make_unique(storage_prewhere_info->prewhere_actions->clone()), storage_prewhere_info->prewhere_column_name); if (storage_prewhere_info->row_level_filter) - source_step_with_filter->addFilter(ActionsDAG::clone(&*storage_prewhere_info->row_level_filter), storage_prewhere_info->row_level_column_name); + source_step_with_filter->addFilter(std::make_unique(storage_prewhere_info->row_level_filter->clone()), storage_prewhere_info->row_level_column_name); } for (auto iter = stack.rbegin() + 1; iter != stack.rend(); ++iter) { if (auto * filter_step = typeid_cast(iter->node->step.get())) { - source_step_with_filter->addFilter(ActionsDAG::clone(&filter_step->getExpression()), filter_step->getFilterColumnName()); + source_step_with_filter->addFilter(std::make_unique(filter_step->getExpression().clone()), filter_step->getFilterColumnName()); } else if (auto * limit_step = typeid_cast(iter->node->step.get())) { diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 99aaef6d054..252420e19fe 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -171,17 +171,17 @@ static void appendFixedColumnsFromFilterExpression(const ActionsDAG::Node & filt } } -static void appendExpression(ActionsDAGPtr & dag, const ActionsDAG & expression) +static void appendExpression(std::optional & dag, const ActionsDAG & expression) { if (dag) - dag->mergeInplace(std::move(*ActionsDAG::clone(&expression))); + dag->mergeInplace(expression.clone()); else - dag = ActionsDAG::clone(&expression); + dag = expression.clone(); } /// This function builds a common DAG which is a merge of DAGs from Filter and Expression steps chain. /// Additionally, build a set of fixed columns. -void buildSortingDAG(QueryPlan::Node & node, ActionsDAGPtr & dag, FixedColumns & fixed_columns, size_t & limit) +void buildSortingDAG(QueryPlan::Node & node, std::optional & dag, FixedColumns & fixed_columns, size_t & limit) { IQueryPlanStep * step = node.step.get(); if (auto * reading = typeid_cast(step)) @@ -330,7 +330,7 @@ void enreachFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns) InputOrderInfoPtr buildInputOrderInfo( const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const SortDescription & description, const KeyDescription & sorting_key, size_t limit) @@ -507,7 +507,7 @@ struct AggregationInputOrder AggregationInputOrder buildInputOrderInfo( const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const Names & group_by_keys, const ActionsDAG & sorting_key_dag, const Names & sorting_key_columns) @@ -693,7 +693,7 @@ AggregationInputOrder buildInputOrderInfo( InputOrderInfoPtr buildInputOrderInfo( const ReadFromMergeTree * reading, const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const SortDescription & description, size_t limit) { @@ -709,7 +709,7 @@ InputOrderInfoPtr buildInputOrderInfo( InputOrderInfoPtr buildInputOrderInfo( ReadFromMerge * merge, const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const SortDescription & description, size_t limit) { @@ -745,7 +745,7 @@ InputOrderInfoPtr buildInputOrderInfo( AggregationInputOrder buildInputOrderInfo( ReadFromMergeTree * reading, const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const Names & group_by_keys) { const auto & sorting_key = reading->getStorageMetadata()->getSortingKey(); @@ -760,7 +760,7 @@ AggregationInputOrder buildInputOrderInfo( AggregationInputOrder buildInputOrderInfo( ReadFromMerge * merge, const FixedColumns & fixed_columns, - const ActionsDAGPtr & dag, + const std::optional & dag, const Names & group_by_keys) { const auto & tables = merge->getSelectedTables(); @@ -801,7 +801,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n const auto & description = sorting.getSortDescription(); size_t limit = sorting.getLimit(); - ActionsDAGPtr dag; + std::optional dag; FixedColumns fixed_columns; buildSortingDAG(node, dag, fixed_columns, limit); @@ -855,7 +855,7 @@ AggregationInputOrder buildInputOrderInfo(AggregatingStep & aggregating, QueryPl const auto & keys = aggregating.getParams().keys; size_t limit = 0; - ActionsDAGPtr dag; + std::optional dag; FixedColumns fixed_columns; buildSortingDAG(node, dag, fixed_columns, limit); @@ -1076,13 +1076,13 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, for (const auto & actions_dag : window_desc.partition_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(std::move(*ActionsDAG::clone(actions_dag.get())), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(actions_dag->clone(), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } for (const auto & actions_dag : window_desc.order_by_actions) { order_by_elements_actions.emplace_back( - std::make_shared(std::move(*ActionsDAG::clone(actions_dag.get())), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); + std::make_shared(actions_dag->clone(), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes))); } auto order_optimizer = std::make_shared( diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 4448d4b7869..ad89cec5f79 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -43,7 +43,7 @@ static DAGIndex buildDAGIndex(const ActionsDAG & dag) /// Required analysis info from aggregate projection. struct AggregateProjectionInfo { - ActionsDAGPtr before_aggregation; + std::optional before_aggregation; Names keys; AggregateDescriptions aggregates; @@ -78,7 +78,7 @@ static AggregateProjectionInfo getAggregatingProjectionInfo( AggregateProjectionInfo info; info.context = interpreter.getContext(); - info.before_aggregation = ActionsDAG::clone(&analysis_result.before_aggregation->dag); + info.before_aggregation = analysis_result.before_aggregation->dag.clone(); info.keys = query_analyzer->aggregationKeys().getNames(); info.aggregates = query_analyzer->aggregates(); diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index ad76976becc..571d1dd0cc1 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -68,9 +68,9 @@ std::shared_ptr getMaxAddedBlocks(ReadFromMergeTree * rea void QueryDAG::appendExpression(const ActionsDAG & expression) { if (dag) - dag->mergeInplace(std::move(*ActionsDAG::clone(&expression))); + dag->mergeInplace(expression.clone()); else - dag = std::move(*ActionsDAG::clone(&expression)); + dag = expression.clone(); } const ActionsDAG::Node * findInOutputs(ActionsDAG & dag, const std::string & name, bool remove) @@ -239,7 +239,8 @@ bool analyzeProjectionCandidate( auto projection_query_info = query_info; projection_query_info.prewhere_info = nullptr; - projection_query_info.filter_actions_dag = ActionsDAG::clone(dag); + if (dag) + projection_query_info.filter_actions_dag = std::make_unique(dag->clone()); auto projection_result_ptr = reader.estimateNumMarksToRead( std::move(projection_parts), diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp index d0acd8221d4..7664822cc7e 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp @@ -43,10 +43,10 @@ namespace } } - void logActionsDAG(const String & prefix, const ActionsDAGPtr & actions) + void logActionsDAG(const String & prefix, const ActionsDAG & actions) { if constexpr (debug_logging_enabled) - LOG_DEBUG(getLogger("redundantDistinct"), "{} :\n{}", prefix, actions->dumpDAG()); + LOG_DEBUG(getLogger("redundantDistinct"), "{} :\n{}", prefix, actions.dumpDAG()); } using DistinctColumns = std::set; @@ -65,19 +65,19 @@ namespace } /// build actions DAG from stack of steps - ActionsDAGPtr buildActionsForPlanPath(std::vector & dag_stack) + std::optional buildActionsForPlanPath(std::vector & dag_stack) { if (dag_stack.empty()) - return nullptr; + return {}; - ActionsDAGPtr path_actions = ActionsDAG::clone(dag_stack.back()); + ActionsDAG path_actions = dag_stack.back()->clone(); dag_stack.pop_back(); while (!dag_stack.empty()) { - ActionsDAGPtr clone = ActionsDAG::clone(dag_stack.back()); + ActionsDAG clone = dag_stack.back()->clone(); logActionsDAG("DAG to merge", clone); dag_stack.pop_back(); - path_actions->mergeInplace(std::move(*clone)); + path_actions.mergeInplace(std::move(clone)); } return path_actions; } @@ -260,15 +260,15 @@ namespace if (distinct_columns.size() != inner_distinct_columns.size()) return false; - ActionsDAGPtr path_actions; + ActionsDAG path_actions; if (!dag_stack.empty()) { /// build actions DAG to find original column names - path_actions = buildActionsForPlanPath(dag_stack); + path_actions = std::move(*buildActionsForPlanPath(dag_stack)); logActionsDAG("distinct pass: merged DAG", path_actions); /// compare columns of two DISTINCTs - FindOriginalNodeForOutputName original_node_finder(*path_actions); + FindOriginalNodeForOutputName original_node_finder(path_actions); for (const auto & column : distinct_columns) { const auto * alias_node = original_node_finder.find(String(column)); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index add53f9d6b3..5dda4ddc18b 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -802,7 +802,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreams(RangesInDataParts && parts_ info.use_uncompressed_cache); }; - auto sorting_expr = std::make_shared(std::move(*ActionsDAG::clone(&metadata_for_reading->getSortingKey().expression->getActionsDAG()))); + auto sorting_expr = metadata_for_reading->getSortingKey().expression; SplitPartsWithRangesByPrimaryKeyResult split_ranges_result = splitPartsWithRangesByPrimaryKey( metadata_for_reading->getPrimaryKey(), @@ -1215,7 +1215,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsFinal( /// we will store lonely parts with level > 0 to use parallel select on them. RangesInDataParts non_intersecting_parts_by_primary_key; - auto sorting_expr = std::make_shared(std::move(*ActionsDAG::clone(&metadata_for_reading->getSortingKey().expression->getActionsDAG()))); + auto sorting_expr = metadata_for_reading->getSortingKey().expression; if (prewhere_info) { @@ -1523,7 +1523,7 @@ void ReadFromMergeTree::applyFilters(ActionDAGNodes added_filter_nodes) /// TODO: Get rid of filter_actions_dag in query_info after we move analysis of /// parallel replicas and unused shards into optimization, similar to projection analysis. if (filter_actions_dag) - query_info.filter_actions_dag = std::make_shared(std::move(*ActionsDAG::clone(&*filter_actions_dag))); + query_info.filter_actions_dag = std::make_shared(std::move(*filter_actions_dag)); buildIndexes( indexes, @@ -2004,7 +2004,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons if (result.sampling.use_sampling) { - auto sampling_actions = std::make_shared(std::move(*ActionsDAG::clone(result.sampling.filter_expression.get()))); + auto sampling_actions = std::make_shared(result.sampling.filter_expression->clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( @@ -2137,7 +2137,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions))); + auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); expression->describeActions(format_settings.out, prefix); } @@ -2146,7 +2146,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter))); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); expression->describeActions(format_settings.out, prefix); } } @@ -2172,7 +2172,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions))); + auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -2182,7 +2182,7 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter))); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.cpp b/src/Processors/QueryPlan/SourceStepWithFilter.cpp index 55c9b5e442e..b91debc8239 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.cpp +++ b/src/Processors/QueryPlan/SourceStepWithFilter.cpp @@ -110,7 +110,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions))); + auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); expression->describeActions(format_settings.out, prefix); } @@ -119,7 +119,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << prefix << "Row level filter" << '\n'; format_settings.out << prefix << "Row level filter column: " << prewhere_info->row_level_column_name << '\n'; - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter))); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); expression->describeActions(format_settings.out, prefix); } } @@ -137,7 +137,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions))); + auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); @@ -147,7 +147,7 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const { std::unique_ptr row_level_filter_map = std::make_unique(); row_level_filter_map->add("Row level filter column", prewhere_info->row_level_column_name); - auto expression = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter))); + auto expression = std::make_shared(prewhere_info->row_level_filter->clone()); row_level_filter_map->add("Row level filter expression", expression->toTree()); prewhere_info_map->add("Row level filter", std::move(row_level_filter_map)); diff --git a/src/Processors/QueryPlan/TotalsHavingStep.cpp b/src/Processors/QueryPlan/TotalsHavingStep.cpp index 70457918de1..2554053064f 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.cpp +++ b/src/Processors/QueryPlan/TotalsHavingStep.cpp @@ -101,13 +101,16 @@ void TotalsHavingStep::describeActions(FormatSettings & settings) const if (actions_dag) { bool first = true; - auto expression = std::make_shared(std::move(*ActionsDAG::clone(getActions()))); - for (const auto & action : expression->getActions()) + if (actions_dag) { - settings.out << prefix << (first ? "Actions: " - : " "); - first = false; - settings.out << action.toString() << '\n'; + auto expression = std::make_shared(actions_dag->clone()); + for (const auto & action : expression->getActions()) + { + settings.out << prefix << (first ? "Actions: " + : " "); + first = false; + settings.out << action.toString() << '\n'; + } } } } @@ -118,8 +121,11 @@ void TotalsHavingStep::describeActions(JSONBuilder::JSONMap & map) const if (actions_dag) { map.add("Filter column", filter_column_name); - auto expression = std::make_shared(std::move(*ActionsDAG::clone(getActions()))); - map.add("Expression", expression->toTree()); + if (actions_dag) + { + auto expression = std::make_shared(actions_dag->clone()); + map.add("Expression", expression->toTree()); + } } } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 36ffc515f43..9601f821cc8 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -203,7 +203,7 @@ FillingTransform::FillingTransform( , use_with_fill_by_sorting_prefix(use_with_fill_by_sorting_prefix_) { if (interpolate_description) - interpolate_actions = std::make_shared(std::move(*ActionsDAG::clone(&interpolate_description->actions))); + interpolate_actions = std::make_shared(interpolate_description->actions.clone()); std::vector is_fill_column(header_.columns()); for (size_t i = 0, size = fill_description.size(); i < size; ++i) diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index ca31ffc9de5..c0875ed184d 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -265,7 +265,7 @@ MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( if (!set->buildOrderedSetInplace(context)) return; - auto filter_actions_dag = std::move(*ActionsDAG::clone(filter_dag)); + auto filter_actions_dag = filter_dag->clone(); const auto * filter_actions_dag_node = filter_actions_dag.getOutputs().at(0); std::unordered_map node_to_result_node; @@ -319,7 +319,7 @@ static const ActionsDAG::NodeRawConstPtrs & getArguments(const ActionsDAG::Node return index_hint.getActions().getOutputs(); /// Import the DAG and map argument pointers. - auto actions_clone = std::move(*ActionsDAG::clone(&index_hint.getActions())); + auto actions_clone = index_hint.getActions().clone(); chassert(storage); result_dag_or_null->mergeNodes(std::move(actions_clone), storage); return *storage; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 22289187cfa..f1df9e231c4 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -80,7 +80,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep row_level_filter_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->row_level_filter)), actions_settings), + .actions = std::make_shared(prewhere_info->row_level_filter->clone(), actions_settings), .filter_column_name = prewhere_info->row_level_column_name, .remove_filter_column = true, .need_filter = true, @@ -96,7 +96,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep prewhere_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(std::move(*ActionsDAG::clone(&*prewhere_info->prewhere_actions)), actions_settings), + .actions = std::make_shared(prewhere_info->prewhere_actions->clone(), actions_settings), .filter_column_name = prewhere_info->prewhere_column_name, .remove_filter_column = prewhere_info->remove_prewhere_column, .need_filter = prewhere_info->need_filter, diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 589698fcc30..60f103fdb70 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -66,10 +66,10 @@ struct PrewhereInfo PrewhereInfoPtr prewhere_info = std::make_shared(); if (row_level_filter) - prewhere_info->row_level_filter = std::move(*ActionsDAG::clone(&*row_level_filter)); + prewhere_info->row_level_filter = row_level_filter->clone(); if (prewhere_actions) - prewhere_info->prewhere_actions = std::move(*ActionsDAG::clone(&*prewhere_actions)); + prewhere_info->prewhere_actions = prewhere_actions->clone(); prewhere_info->row_level_column_name = row_level_column_name; prewhere_info->prewhere_column_name = prewhere_column_name; diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index c096504170e..aee4e4683ad 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -313,7 +313,7 @@ void StorageBuffer::read( if (src_table_query_info.prewhere_info->row_level_filter) { src_table_query_info.prewhere_info->row_level_filter = ActionsDAG::merge( - std::move(*ActionsDAG::clone(&actions_dag)), + actions_dag.clone(), std::move(*src_table_query_info.prewhere_info->row_level_filter)); src_table_query_info.prewhere_info->row_level_filter->removeUnusedActions(); @@ -322,7 +322,7 @@ void StorageBuffer::read( if (src_table_query_info.prewhere_info->prewhere_actions) { src_table_query_info.prewhere_info->prewhere_actions = ActionsDAG::merge( - std::move(*ActionsDAG::clone(&actions_dag)), + actions_dag.clone(), std::move(*src_table_query_info.prewhere_info->prewhere_actions)); src_table_query_info.prewhere_info->prewhere_actions->removeUnusedActions(); @@ -429,21 +429,23 @@ void StorageBuffer::read( if (query_info.prewhere_info->row_level_filter) { + auto actions = std::make_shared(query_info.prewhere_info->row_level_filter->clone(), actions_settings); pipe_from_buffers.addSimpleTransform([&](const Block & header) { return std::make_shared( header, - std::make_shared(std::move(*ActionsDAG::clone(&*query_info.prewhere_info->row_level_filter)), actions_settings), + actions, query_info.prewhere_info->row_level_column_name, false); }); } + auto actions = std::make_shared(query_info.prewhere_info->prewhere_actions->clone(), actions_settings); pipe_from_buffers.addSimpleTransform([&](const Block & header) { return std::make_shared( header, - std::make_shared(std::move(*ActionsDAG::clone(&*query_info.prewhere_info->prewhere_actions)), actions_settings), + actions, query_info.prewhere_info->prewhere_column_name, query_info.prewhere_info->remove_prewhere_column); }); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index e16e2a07685..0e1568c8e79 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -663,7 +663,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ { auto filter_step = std::make_unique( child.plan.getCurrentDataStream(), - std::move(*ActionsDAG::clone(&filter_info.actions)), + filter_info.actions.clone(), filter_info.column_name, filter_info.do_remove_column); @@ -1241,7 +1241,7 @@ ReadFromMerge::RowPolicyData::RowPolicyData(RowPolicyFilterPtr row_policy_filter auto expression_analyzer = ExpressionAnalyzer{expr, syntax_result, local_context}; actions_dag = expression_analyzer.getActionsDAG(false /* add_aliases */, false /* project_result */); - filter_actions = std::make_shared(std::move(*ActionsDAG::clone(&actions_dag)), + filter_actions = std::make_shared(actions_dag.clone(), ExpressionActionsSettings::fromContext(local_context, CompileExpressions::yes)); const auto & required_columns = filter_actions->getRequiredColumnsWithTypes(); const auto & sample_block_columns = filter_actions->getSampleBlock().getNamesAndTypesList(); @@ -1279,12 +1279,12 @@ void ReadFromMerge::RowPolicyData::extendNames(Names & names) const void ReadFromMerge::RowPolicyData::addStorageFilter(SourceStepWithFilter * step) const { - step->addFilter(ActionsDAG::clone(&actions_dag), filter_column_name); + step->addFilter(std::make_unique(actions_dag.clone()), filter_column_name); } void ReadFromMerge::RowPolicyData::addFilterTransform(QueryPlan & plan) const { - auto filter_step = std::make_unique(plan.getCurrentDataStream(), std::move(*ActionsDAG::clone(&actions_dag)), filter_column_name, true /* remove filter column */); + auto filter_step = std::make_unique(plan.getCurrentDataStream(), actions_dag.clone(), filter_column_name, true /* remove filter column */); plan.addStep(std::move(filter_step)); } diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 32c6a558340..146a54eda78 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -323,7 +323,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) { - auto index_hint_dag = std::move(*ActionsDAG::clone(&index_hint->getActions())); + auto index_hint_dag = index_hint->getActions().clone(); ActionsDAG::NodeRawConstPtrs atoms; for (const auto & output : index_hint_dag.getOutputs()) if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes)) From 093b13329c4206b4ecd604fb373d8d60cf345a1f Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 18 Jul 2024 15:46:05 +0000 Subject: [PATCH 0492/2361] One more check in JOIN ON ... IS NULL --- .../Passes/LogicalExpressionOptimizerPass.cpp | 46 +++++++++++++++++-- ...11_join_on_nullsafe_optimization.reference | 4 ++ .../02911_join_on_nullsafe_optimization.sql | 1 + 3 files changed, 47 insertions(+), 4 deletions(-) diff --git a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp index 854697bca9f..7e54b5a4b42 100644 --- a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp +++ b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp @@ -67,6 +67,41 @@ QueryTreeNodePtr findEqualsFunction(const QueryTreeNodes & nodes) return nullptr; } +/// Checks if the node is combination of isNull and notEquals functions of two the same arguments +bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, QueryTreeNodePtr & rhs) +{ + QueryTreeNodePtrWithHashSet all_arguments; + for (const auto & node : nodes) + { + const auto * func_node = node->as(); + if (!func_node) + return false; + + const auto & arguments = func_node->getArguments().getNodes(); + if (func_node->getFunctionName() == "isNull" && arguments.size() == 1) + all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0])); + else if (func_node->getFunctionName() == "notEquals" && arguments.size() == 2) + { + if (arguments[0]->isEqual(*arguments[1])) + return false; + all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0])); + all_arguments.insert(QueryTreeNodePtrWithHash(arguments[1])); + } + else + return false; + + if (all_arguments.size() > 2) + return false; + } + + if (all_arguments.size() != 2) + return false; + + lhs = all_arguments.begin()->node; + rhs = std::next(all_arguments.begin())->node; + return true; +} + bool isBooleanConstant(const QueryTreeNodePtr & node, bool expected_value) { const auto * constant_node = node->as(); @@ -212,11 +247,14 @@ private: else if (func_name == "and") { const auto & and_arguments = argument_function->getArguments().getNodes(); - bool all_are_is_null = and_arguments.size() == 2 && isNodeFunction(and_arguments[0], "isNull") && isNodeFunction(and_arguments[1], "isNull"); - if (all_are_is_null) + + QueryTreeNodePtr is_null_lhs_arg; + QueryTreeNodePtr is_null_rhs_arg; + if (matchIsNullOfTwoArgs(and_arguments, is_null_lhs_arg, is_null_rhs_arg)) { - is_null_argument_to_indices[getFunctionArgument(and_arguments.front(), 0)].push_back(or_operands.size() - 1); - is_null_argument_to_indices[getFunctionArgument(and_arguments.back(), 0)].push_back(or_operands.size() - 1); + is_null_argument_to_indices[is_null_lhs_arg].push_back(or_operands.size() - 1); + is_null_argument_to_indices[is_null_rhs_arg].push_back(or_operands.size() - 1); + continue; } /// Expression `a = b AND (a IS NOT NULL) AND true AND (b IS NOT NULL)` we can be replaced with `a = b` diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference index 4eb7e74446d..31a1cda18e7 100644 --- a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference @@ -35,6 +35,10 @@ SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS N 2 2 2 2 3 3 3 33 \N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; +2 2 2 2 +3 3 3 33 +\N \N \N \N -- aliases defined in the join condition are valid -- FIXME(@vdimir) broken query formatting for the following queries: -- SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql index f7813e2a1b4..f739259caf9 100644 --- a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql @@ -34,6 +34,7 @@ SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) A SELECT x = y OR (x IS NULL AND y IS NULL) FROM t1 ORDER BY x NULLS LAST; SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; -- aliases defined in the join condition are valid -- FIXME(@vdimir) broken query formatting for the following queries: From 7fc8ee726e3ef2dfb7d778fbb1a70fb147a33067 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Thu, 18 Jul 2024 12:32:16 +0200 Subject: [PATCH 0493/2361] add replication lag and recovery time metrics --- src/Databases/DatabaseReplicated.cpp | 44 +++++++++---------- src/Databases/DatabaseReplicated.h | 10 ++++- src/Databases/DatabaseReplicatedWorker.cpp | 4 ++ src/Databases/DatabaseReplicatedWorker.h | 4 ++ src/Storages/System/StorageSystemClusters.cpp | 33 +++++++++----- src/Storages/System/StorageSystemClusters.h | 2 +- .../test_recovery_time_metric/__init__.py | 0 .../configs/config.xml | 41 +++++++++++++++++ .../test_recovery_time_metric/test.py | 26 +++++++++++ 9 files changed, 129 insertions(+), 35 deletions(-) create mode 100644 tests/integration/test_recovery_time_metric/__init__.py create mode 100644 tests/integration/test_recovery_time_metric/configs/config.xml create mode 100644 tests/integration/test_recovery_time_metric/test.py diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 7ce2859e962..b11b9382732 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -338,42 +338,40 @@ ClusterPtr DatabaseReplicated::getClusterImpl(bool all_groups) const return std::make_shared(getContext()->getSettingsRef(), shards, params); } -std::vector DatabaseReplicated::tryGetAreReplicasActive(const ClusterPtr & cluster_) const +ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) const { - Strings paths; + ReplicasInfo res; + + auto zookeeper = getZooKeeper(); const auto & addresses_with_failover = cluster_->getShardsAddresses(); const auto & shards_info = cluster_->getShardsInfo(); - for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) - { - for (const auto & replica : addresses_with_failover[shard_index]) - { - String full_name = getFullReplicaName(replica.database_shard_name, replica.database_replica_name); - paths.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "active"); - } - } try { - auto current_zookeeper = getZooKeeper(); - auto res = current_zookeeper->exists(paths); + UInt32 max_log_ptr = parse(zookeeper->get(zookeeper_path + "/max_log_ptr")); - std::vector statuses; - statuses.resize(paths.size()); - - for (size_t i = 0; i < res.size(); ++i) - if (res[i].error == Coordination::Error::ZOK) - statuses[i] = 1; - - return statuses; - } - catch (...) + for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) + { + for (const auto & replica : addresses_with_failover[shard_index]) + { + String full_name = getFullReplicaName(replica.database_shard_name, replica.database_replica_name); + UInt32 log_ptr = parse(zookeeper->get(fs::path(zookeeper_path) / "replicas" / full_name / "log_ptr")); + bool is_active = zookeeper->exists(fs::path(zookeeper_path) / "replicas" / full_name / "active"); + res.push_back(ReplicaInfo{ + .is_active = is_active, + .replication_lag = max_log_ptr - log_ptr, + .recovery_time = replica.is_local ? ddl_worker->getCurrentInitializationDurationMs() : 0, + }); + } + } + return res; + } catch (...) { tryLogCurrentException(log); return {}; } } - void DatabaseReplicated::fillClusterAuthInfo(String collection_name, const Poco::Util::AbstractConfiguration & config_ref) { const auto & config_prefix = fmt::format("named_collections.{}", collection_name); diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index eab5b2ff931..db02b5ef30f 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -17,6 +17,14 @@ using ZooKeeperPtr = std::shared_ptr; class Cluster; using ClusterPtr = std::shared_ptr; +struct ReplicaInfo +{ + bool is_active; + UInt32 replication_lag; + UInt64 recovery_time; +}; +using ReplicasInfo = std::vector; + class DatabaseReplicated : public DatabaseAtomic { public: @@ -84,7 +92,7 @@ public: static void dropReplica(DatabaseReplicated * database, const String & database_zookeeper_path, const String & shard, const String & replica, bool throw_if_noop); - std::vector tryGetAreReplicasActive(const ClusterPtr & cluster_) const; + ReplicasInfo tryGetReplicasInfo(const ClusterPtr & cluster_) const; void renameDatabase(ContextPtr query_context, const String & new_name) override; diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index 1ef88dc03bc..cea2d123f87 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -32,6 +32,8 @@ DatabaseReplicatedDDLWorker::DatabaseReplicatedDDLWorker(DatabaseReplicated * db bool DatabaseReplicatedDDLWorker::initializeMainThread() { + initialization_duration_timer.emplace(); + while (!stop_flag) { try @@ -69,6 +71,7 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() initializeReplication(); initialized = true; + initialization_duration_timer.reset(); return true; } catch (...) @@ -78,6 +81,7 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() } } + initialization_duration_timer.reset(); return false; } diff --git a/src/Databases/DatabaseReplicatedWorker.h b/src/Databases/DatabaseReplicatedWorker.h index 41edf2221b8..aea3b71173d 100644 --- a/src/Databases/DatabaseReplicatedWorker.h +++ b/src/Databases/DatabaseReplicatedWorker.h @@ -36,6 +36,8 @@ public: DatabaseReplicated * const database, bool committed = false); /// NOLINT UInt32 getLogPointer() const; + + UInt64 getCurrentInitializationDurationMs() const { return initialization_duration_timer ? initialization_duration_timer->elapsedMilliseconds() : 0; } private: bool initializeMainThread() override; void initializeReplication(); @@ -56,6 +58,8 @@ private: ZooKeeperPtr active_node_holder_zookeeper; /// It will remove "active" node when database is detached zkutil::EphemeralNodeHolderPtr active_node_holder; + + std::optional initialization_duration_timer; }; } diff --git a/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp index 160c8d6270e..0da4bd70cbd 100644 --- a/src/Storages/System/StorageSystemClusters.cpp +++ b/src/Storages/System/StorageSystemClusters.cpp @@ -31,6 +31,8 @@ ColumnsDescription StorageSystemClusters::getColumnsDescription() {"database_shard_name", std::make_shared(), "The name of the `Replicated` database shard (for clusters that belong to a `Replicated` database)."}, {"database_replica_name", std::make_shared(), "The name of the `Replicated` database replica (for clusters that belong to a `Replicated` database)."}, {"is_active", std::make_shared(std::make_shared()), "The status of the Replicated database replica (for clusters that belong to a Replicated database): 1 means 'replica is online', 0 means 'replica is offline', NULL means 'unknown'."}, + {"replication_lag", std::make_shared(std::make_shared()), "The replication lag of the `Replicated` database replica (for clusters that belong to a Replicated database)."}, + {"recovery_time", std::make_shared(std::make_shared()), "The recovery time of the `Replicated` database replica (for clusters that belong to a Replicated database), in milliseconds."}, }; description.setAliases({ @@ -46,24 +48,23 @@ void StorageSystemClusters::fillData(MutableColumns & res_columns, ContextPtr co writeCluster(res_columns, name_and_cluster, {}); const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & name_and_database : databases) + for (const auto & [database_name, database] : databases) { - if (const auto * replicated = typeid_cast(name_and_database.second.get())) + if (const auto * replicated = typeid_cast(database.get())) { - if (auto database_cluster = replicated->tryGetCluster()) - writeCluster(res_columns, {name_and_database.first, database_cluster}, - replicated->tryGetAreReplicasActive(database_cluster)); + writeCluster(res_columns, {database_name, database_cluster}, + replicated->tryGetReplicasInfo(database_cluster)); if (auto database_cluster = replicated->tryGetAllGroupsCluster()) - writeCluster(res_columns, {DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX + name_and_database.first, database_cluster}, - replicated->tryGetAreReplicasActive(database_cluster)); + writeCluster(res_columns, {DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX + database_name, database_cluster}, + replicated->tryGetReplicasInfo(database_cluster)); } } } void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const NameAndCluster & name_and_cluster, - const std::vector & is_active) + const ReplicasInfo & replicas_info) { const String & cluster_name = name_and_cluster.first; const ClusterPtr & cluster = name_and_cluster.second; @@ -99,10 +100,22 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const Nam res_columns[i++]->insert(pool_status[replica_index].estimated_recovery_time.count()); res_columns[i++]->insert(address.database_shard_name); res_columns[i++]->insert(address.database_replica_name); - if (is_active.empty()) + if (replicas_info.empty()) + { res_columns[i++]->insertDefault(); + res_columns[i++]->insertDefault(); + res_columns[i++]->insertDefault(); + } else - res_columns[i++]->insert(is_active[replica_idx++]); + { + const auto & replica_info = replicas_info[replica_idx++]; + res_columns[i++]->insert(replica_info.is_active); + res_columns[i++]->insert(replica_info.replication_lag); + if (replica_info.recovery_time != 0) + res_columns[i++]->insert(replica_info.recovery_time); + else + res_columns[i++]->insertDefault(); + } } } } diff --git a/src/Storages/System/StorageSystemClusters.h b/src/Storages/System/StorageSystemClusters.h index 0f7c792261d..ead123aa79e 100644 --- a/src/Storages/System/StorageSystemClusters.h +++ b/src/Storages/System/StorageSystemClusters.h @@ -27,7 +27,7 @@ protected: using NameAndCluster = std::pair>; void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node *, std::vector) const override; - static void writeCluster(MutableColumns & res_columns, const NameAndCluster & name_and_cluster, const std::vector & is_active); + static void writeCluster(MutableColumns & res_columns, const NameAndCluster & name_and_cluster, const ReplicasInfo & replicas_info); }; } diff --git a/tests/integration/test_recovery_time_metric/__init__.py b/tests/integration/test_recovery_time_metric/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_recovery_time_metric/configs/config.xml b/tests/integration/test_recovery_time_metric/configs/config.xml new file mode 100644 index 00000000000..bad9b1fa9ea --- /dev/null +++ b/tests/integration/test_recovery_time_metric/configs/config.xml @@ -0,0 +1,41 @@ + + 9000 + + + + + + + + + default + + + + + + 2181 + 1 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + 20000 + + + + 1 + localhost + 9444 + + + + + + + localhost + 2181 + + 20000 + + + diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py new file mode 100644 index 00000000000..9ceb0cce288 --- /dev/null +++ b/tests/integration/test_recovery_time_metric/test.py @@ -0,0 +1,26 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node", main_configs=["configs/config.xml"], with_zookeeper=True) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_recovery_time_metric(start_cluster): + node.query("CREATE DATABASE rdb ENGINE = Replicated('/test/test_recovery_time_metric', 'shard1', 'replica1');") + node.query("CREATE TABLE rdb.t (x UInt32) ENGINE = MergeTree ORDER BY x;") + node.exec_in_container(["bash", "-c", "rm /var/lib/clickhouse/metadata/rdb/t.sql"]) + node.restart_clickhouse() + assert ( + node.query("SELECT any(recovery_time) FROM system.clusters;") + != "0\n" + ) + From 392183832834bf7041a0cae4fd27fc1112f51bc2 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 18 Jul 2024 16:45:26 +0000 Subject: [PATCH 0494/2361] Fix runtime error while converting [Null] field to Array(Variant) --- src/Interpreters/convertFieldToType.cpp | 10 +++++----- .../03203_variant_convert_field_to_type_bug.reference | 0 .../03203_variant_convert_field_to_type_bug.sql | 5 +++++ 3 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/03203_variant_convert_field_to_type_bug.reference create mode 100644 tests/queries/0_stateless/03203_variant_convert_field_to_type_bug.sql diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 184c263dbdb..9ee214f4415 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -356,7 +356,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID for (size_t i = 0; i < src_arr_size; ++i) { res[i] = convertFieldToType(src_arr[i], element_type); - if (res[i].isNull() && !element_type.isNullable()) + if (res[i].isNull() && !canContainNull(element_type)) { // See the comment for Tuples below. have_unconvertible_element = true; @@ -384,7 +384,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID { const auto & element_type = *(type_tuple->getElements()[i]); res[i] = convertFieldToType(src_tuple[i], element_type); - if (!res[i].isNull() || element_type.isNullable()) + if (!res[i].isNull() || canContainNull(element_type)) continue; /* @@ -433,11 +433,11 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID updated_entry[0] = convertFieldToType(key, key_type); - if (updated_entry[0].isNull() && !key_type.isNullable()) + if (updated_entry[0].isNull() && !canContainNull(key_type)) have_unconvertible_element = true; updated_entry[1] = convertFieldToType(value, value_type); - if (updated_entry[1].isNull() && !value_type.isNullable()) + if (updated_entry[1].isNull() && !canContainNull(value_type)) have_unconvertible_element = true; res[i] = updated_entry; @@ -592,7 +592,7 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint) { bool is_null = from_value.isNull(); - if (is_null && !to_type.isNullable() && !to_type.isLowCardinalityNullable()) + if (is_null && !canContainNull(to_type)) throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert NULL to {}", to_type.getName()); Field converted = convertFieldToType(from_value, to_type, from_type_hint); diff --git a/tests/queries/0_stateless/03203_variant_convert_field_to_type_bug.reference b/tests/queries/0_stateless/03203_variant_convert_field_to_type_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03203_variant_convert_field_to_type_bug.sql b/tests/queries/0_stateless/03203_variant_convert_field_to_type_bug.sql new file mode 100644 index 00000000000..b73bb8ffa6d --- /dev/null +++ b/tests/queries/0_stateless/03203_variant_convert_field_to_type_bug.sql @@ -0,0 +1,5 @@ +set allow_experimental_variant_type=1; +set use_variant_as_common_type=1; + +SELECT * FROM numbers([tuple(1, 2), NULL], 2); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + From a6d4db342b2fc83e385d549ba5ce9ebf9e63064e Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 18 Jul 2024 16:45:40 +0000 Subject: [PATCH 0495/2361] Automatic style fix --- .../integration/test_recovery_time_metric/test.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py index 9ceb0cce288..90155f81ba2 100644 --- a/tests/integration/test_recovery_time_metric/test.py +++ b/tests/integration/test_recovery_time_metric/test.py @@ -2,7 +2,9 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/config.xml"], with_zookeeper=True) +node = cluster.add_instance( + "node", main_configs=["configs/config.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -15,12 +17,10 @@ def start_cluster(): def test_recovery_time_metric(start_cluster): - node.query("CREATE DATABASE rdb ENGINE = Replicated('/test/test_recovery_time_metric', 'shard1', 'replica1');") + node.query( + "CREATE DATABASE rdb ENGINE = Replicated('/test/test_recovery_time_metric', 'shard1', 'replica1');" + ) node.query("CREATE TABLE rdb.t (x UInt32) ENGINE = MergeTree ORDER BY x;") node.exec_in_container(["bash", "-c", "rm /var/lib/clickhouse/metadata/rdb/t.sql"]) node.restart_clickhouse() - assert ( - node.query("SELECT any(recovery_time) FROM system.clusters;") - != "0\n" - ) - + assert node.query("SELECT any(recovery_time) FROM system.clusters;") != "0\n" From 225af356c7ca3fd3c401f0fa8273d3dd751297fa Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Jul 2024 17:06:03 +0000 Subject: [PATCH 0496/2361] Better. --- src/Interpreters/ActionsDAG.cpp | 1 - src/Interpreters/ActionsDAG.h | 1 - src/Interpreters/ActionsVisitor.h | 8 +------- src/Interpreters/ExpressionAnalyzer.h | 3 --- src/Interpreters/WindowDescription.h | 1 - src/Interpreters/addMissingDefaults.h | 6 ------ 6 files changed, 1 insertion(+), 19 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 53e04f24829..85b2b38da17 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -3150,7 +3150,6 @@ const ActionsDAG::Node * FindOriginalNodeForOutputName::find(const String & outp } FindAliasForInputName::FindAliasForInputName(const ActionsDAG & actions_) - //: actions(actions_) { const auto & actions_outputs = actions_.getOutputs(); for (const auto * output_node : actions_outputs) diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 6f6c3f9bccb..76cc9327530 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -501,7 +501,6 @@ public: const ActionsDAG::Node * find(const String & name); private: - //const ActionsDAG & actions; NameToNodeIndex index; }; diff --git a/src/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h index 496d9b9b587..5b638fc14c8 100644 --- a/src/Interpreters/ActionsVisitor.h +++ b/src/Interpreters/ActionsVisitor.h @@ -18,12 +18,6 @@ namespace DB class ASTExpressionList; class ASTFunction; -class ExpressionActions; -using ExpressionActionsPtr = std::shared_ptr; - -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; - class IFunctionOverloadResolver; using FunctionOverloadResolverPtr = std::shared_ptr; @@ -32,7 +26,7 @@ FutureSetPtr makeExplicitSet( const ASTFunction * node, const ActionsDAG & actions, ContextPtr context, PreparedSets & prepared_sets); /** For ActionsVisitor - * A stack of ExpressionActions corresponding to nested lambda expressions. + * A stack of ActionsDAG corresponding to nested lambda expressions. * The new action should be added to the highest possible level. * For example, in the expression "select arrayMap(x -> x + column1 * column2, array1)" * calculation of the product must be done outside the lambda expression (it does not depend on x), diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 737d36eb504..0c00247df85 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -38,9 +38,6 @@ using StorageMetadataPtr = std::shared_ptr; class ArrayJoinAction; using ArrayJoinActionPtr = std::shared_ptr; -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; - /// Create columns in block or return false if not possible bool sanitizeBlock(Block & block, bool throw_if_cannot_create_column = false); diff --git a/src/Interpreters/WindowDescription.h b/src/Interpreters/WindowDescription.h index 17bfe619c30..d51d9ca94d8 100644 --- a/src/Interpreters/WindowDescription.h +++ b/src/Interpreters/WindowDescription.h @@ -14,7 +14,6 @@ namespace DB class ASTFunction; class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; struct WindowFunctionDescription { diff --git a/src/Interpreters/addMissingDefaults.h b/src/Interpreters/addMissingDefaults.h index 5299bae9745..551583a0006 100644 --- a/src/Interpreters/addMissingDefaults.h +++ b/src/Interpreters/addMissingDefaults.h @@ -2,11 +2,6 @@ #include -#include -#include -#include - - namespace DB { @@ -15,7 +10,6 @@ class NamesAndTypesList; class ColumnsDescription; class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; /** Adds three types of columns into block * 1. Columns, that are missed inside request, but present in table without defaults (missed columns) From 4ebb189691c2d553887e9d49b52f9e0a45eaf004 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Jul 2024 17:59:59 +0000 Subject: [PATCH 0497/2361] Better. --- src/Planner/CollectTableExpressionData.cpp | 8 ++++---- src/Planner/PlannerJoinTree.cpp | 12 ++++++------ src/Planner/TableExpressionData.h | 6 +++--- src/Storages/SelectQueryInfo.cpp | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/Planner/CollectTableExpressionData.cpp b/src/Planner/CollectTableExpressionData.cpp index 1d85476636c..2fe62aa9be0 100644 --- a/src/Planner/CollectTableExpressionData.cpp +++ b/src/Planner/CollectTableExpressionData.cpp @@ -88,15 +88,15 @@ public: auto column_identifier = planner_context->getGlobalPlannerContext()->createColumnIdentifier(node); - ActionsDAGPtr alias_column_actions_dag = std::make_unique(); + ActionsDAG alias_column_actions_dag; PlannerActionsVisitor actions_visitor(planner_context, false); - auto outputs = actions_visitor.visit(*alias_column_actions_dag, column_node->getExpression()); + auto outputs = actions_visitor.visit(alias_column_actions_dag, column_node->getExpression()); if (outputs.size() != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single output in actions dag for alias column {}. Actual {}", column_node->dumpTree(), outputs.size()); const auto & column_name = column_node->getColumnName(); - const auto & alias_node = alias_column_actions_dag->addAlias(*outputs[0], column_name); - alias_column_actions_dag->addOrReplaceInOutputs(alias_node); + const auto & alias_node = alias_column_actions_dag.addAlias(*outputs[0], column_name); + alias_column_actions_dag.addOrReplaceInOutputs(alias_node); table_expression_data.addAliasColumn(column_node->getColumn(), column_identifier, std::move(alias_column_actions_dag), select_added_columns); } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 10b5a761d58..048bfa4b577 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -591,16 +591,16 @@ UInt64 mainQueryNodeBlockSizeByLimit(const SelectQueryInfo & select_query_info) } std::unique_ptr createComputeAliasColumnsStep( - const std::unordered_map & alias_column_expressions, const DataStream & current_data_stream) + std::unordered_map & alias_column_expressions, const DataStream & current_data_stream) { ActionsDAG merged_alias_columns_actions_dag(current_data_stream.header.getColumnsWithTypeAndName()); ActionsDAG::NodeRawConstPtrs action_dag_outputs = merged_alias_columns_actions_dag.getInputs(); - for (const auto & [column_name, alias_column_actions_dag] : alias_column_expressions) + for (auto & [column_name, alias_column_actions_dag] : alias_column_expressions) { - const auto & current_outputs = alias_column_actions_dag->getOutputs(); + const auto & current_outputs = alias_column_actions_dag.getOutputs(); action_dag_outputs.insert(action_dag_outputs.end(), current_outputs.begin(), current_outputs.end()); - merged_alias_columns_actions_dag.mergeNodes(std::move(*alias_column_actions_dag)); + merged_alias_columns_actions_dag.mergeNodes(std::move(alias_column_actions_dag)); } for (const auto * output_node : action_dag_outputs) @@ -996,7 +996,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres } } - const auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); + auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); if (!alias_column_expressions.empty() && query_plan.isInitialized() && from_stage == QueryProcessingStage::FetchColumns) { auto alias_column_step = createComputeAliasColumnsStep(alias_column_expressions, query_plan.getCurrentDataStream()); @@ -1085,7 +1085,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(subquery_planner).extractQueryPlan(); } - const auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); + auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); if (!alias_column_expressions.empty() && query_plan.isInitialized() && from_stage == QueryProcessingStage::FetchColumns) { auto alias_column_step = createComputeAliasColumnsStep(alias_column_expressions, query_plan.getCurrentDataStream()); diff --git a/src/Planner/TableExpressionData.h b/src/Planner/TableExpressionData.h index 1d04fac3dc3..72412a869e4 100644 --- a/src/Planner/TableExpressionData.h +++ b/src/Planner/TableExpressionData.h @@ -73,7 +73,7 @@ public: } /// Add alias column - void addAliasColumn(const NameAndTypePair & column, const ColumnIdentifier & column_identifier, ActionsDAGPtr actions_dag, bool is_selected_column = true) + void addAliasColumn(const NameAndTypePair & column, const ColumnIdentifier & column_identifier, ActionsDAG actions_dag, bool is_selected_column = true) { alias_column_expressions.emplace(column.name, std::move(actions_dag)); addColumnImpl(column, column_identifier, is_selected_column); @@ -94,7 +94,7 @@ public: } /// Get ALIAS columns names mapped to expressions - const std::unordered_map & getAliasColumnExpressions() const + std::unordered_map & getAliasColumnExpressions() { return alias_column_expressions; } @@ -277,7 +277,7 @@ private: NameSet selected_column_names_set; /// Expression to calculate ALIAS columns - std::unordered_map alias_column_expressions; + std::unordered_map alias_column_expressions; /// Valid for table, table function, array join, query, union nodes ColumnNameToColumn column_name_to_column; diff --git a/src/Storages/SelectQueryInfo.cpp b/src/Storages/SelectQueryInfo.cpp index d59ccf0dfaf..c9c96ed5837 100644 --- a/src/Storages/SelectQueryInfo.cpp +++ b/src/Storages/SelectQueryInfo.cpp @@ -18,7 +18,7 @@ std::unordered_map SelectQueryInfo::buildNod std::unordered_map node_name_to_input_node_column; if (planner_context) { - const auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); + auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); const auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions(); for (const auto & [column_identifier, column_name] : table_expression_data.getColumnIdentifierToColumnName()) { From 4eb5a404430f69785fd756d1706621fe5410f43c Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 18 Jul 2024 19:04:42 +0100 Subject: [PATCH 0498/2361] add unit tests --- src/Common/CgroupsMemoryUsageObserver.cpp | 12 +- src/Common/CgroupsMemoryUsageObserver.h | 3 + src/Common/tests/gtest_cgroups_reader.cpp | 175 ++++++++++++++++++++++ 3 files changed, 186 insertions(+), 4 deletions(-) create mode 100644 src/Common/tests/gtest_cgroups_reader.cpp diff --git a/src/Common/CgroupsMemoryUsageObserver.cpp b/src/Common/CgroupsMemoryUsageObserver.cpp index cf661174789..ef8bdfc1823 100644 --- a/src/Common/CgroupsMemoryUsageObserver.cpp +++ b/src/Common/CgroupsMemoryUsageObserver.cpp @@ -200,10 +200,7 @@ CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait { const auto [cgroup_path, version] = getCgroupsPath(); - if (version == CgroupsVersion::V2) - cgroup_reader = std::make_unique(cgroup_path); - else - cgroup_reader = std::make_unique(cgroup_path); + cgroup_reader = createCgroupsReader(version, cgroup_path); LOG_INFO( log, @@ -365,6 +362,13 @@ void CgroupsMemoryUsageObserver::runThread() } } +std::unique_ptr createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const fs::path & cgroup_path) +{ + if (version == CgroupsMemoryUsageObserver::CgroupsVersion::V2) + return std::make_unique(cgroup_path); + else + return std::make_unique(cgroup_path); +} } #endif diff --git a/src/Common/CgroupsMemoryUsageObserver.h b/src/Common/CgroupsMemoryUsageObserver.h index 0d5d07597c8..7f888fe631b 100644 --- a/src/Common/CgroupsMemoryUsageObserver.h +++ b/src/Common/CgroupsMemoryUsageObserver.h @@ -83,6 +83,9 @@ private: bool quit = false; }; +std::unique_ptr +createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const std::filesystem::path & cgroup_path); + #else class CgroupsMemoryUsageObserver { diff --git a/src/Common/tests/gtest_cgroups_reader.cpp b/src/Common/tests/gtest_cgroups_reader.cpp new file mode 100644 index 00000000000..38e56401401 --- /dev/null +++ b/src/Common/tests/gtest_cgroups_reader.cpp @@ -0,0 +1,175 @@ +#include +#include +#include +#include + +#include +#include +#include +#include "IO/WriteBufferFromFileBase.h" + +using namespace DB; + + +const std::string SAMPLE_FILE[2] = { + R"(cache 4673703936 +rss 2232029184 +rss_huge 0 +shmem 0 +mapped_file 344678400 +dirty 4730880 +writeback 135168 +swap 0 +pgpgin 2038569918 +pgpgout 2036883790 +pgfault 2055373287 +pgmajfault 0 +inactive_anon 2156335104 +active_anon 0 +inactive_file 2841305088 +active_file 1653915648 +unevictable 256008192 +hierarchical_memory_limit 8589934592 +hierarchical_memsw_limit 8589934592 +total_cache 4673703936 +total_rss 2232029184 +total_rss_huge 0 +total_shmem 0 +total_mapped_file 344678400 +total_dirty 4730880 +total_writeback 135168 +total_swap 0 +total_pgpgin 2038569918 +total_pgpgout 2036883790 +total_pgfault 2055373287 +total_pgmajfault 0 +total_inactive_anon 2156335104 +total_active_anon 0 +total_inactive_file 2841305088 +total_active_file 1653915648 +total_unevictable 256008192 +)", + R"(anon 10429399040 +file 17410793472 +kernel 1537789952 +kernel_stack 3833856 +pagetables 65441792 +sec_pagetables 0 +percpu 15232 +sock 0 +vmalloc 0 +shmem 0 +zswap 0 +zswapped 0 +file_mapped 344010752 +file_dirty 2060857344 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 0 +active_anon 10429370368 +inactive_file 8693084160 +active_file 8717561856 +unevictable 0 +slab_reclaimable 1460982504 +slab_unreclaimable 5152864 +slab 1466135368 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgscan 0 +pgsteal 0 +pgscan_kswapd 0 +pgscan_direct 0 +pgscan_khugepaged 0 +pgsteal_kswapd 0 +pgsteal_direct 0 +pgsteal_khugepaged 0 +pgfault 43026352 +pgmajfault 36762 +pgrefill 0 +pgactivate 0 +pgdeactivate 0 +pglazyfree 259 +pglazyfreed 0 +zswpin 0 +zswpout 0 +thp_fault_alloc 0 +thp_collapse_alloc 0 +)"}; + +const std::string EXPECTED[2] + = {"{\"active_anon\": 0, \"active_file\": 1653915648, \"cache\": 4673703936, \"dirty\": 4730880, \"hierarchical_memory_limit\": " + "8589934592, \"hierarchical_memsw_limit\": 8589934592, \"inactive_anon\": 2156335104, \"inactive_file\": 2841305088, " + "\"mapped_file\": 344678400, \"pgfault\": 2055373287, \"pgmajfault\": 0, \"pgpgin\": 2038569918, \"pgpgout\": 2036883790, \"rss\": " + "2232029184, \"rss_huge\": 0, \"shmem\": 0, \"swap\": 0, \"total_active_anon\": 0, \"total_active_file\": 1653915648, " + "\"total_cache\": 4673703936, \"total_dirty\": 4730880, \"total_inactive_anon\": 2156335104, \"total_inactive_file\": 2841305088, " + "\"total_mapped_file\": 344678400, \"total_pgfault\": 2055373287, \"total_pgmajfault\": 0, \"total_pgpgin\": 2038569918, " + "\"total_pgpgout\": 2036883790, \"total_rss\": 2232029184, \"total_rss_huge\": 0, \"total_shmem\": 0, \"total_swap\": 0, " + "\"total_unevictable\": 256008192, \"total_writeback\": 135168, \"unevictable\": 256008192, \"writeback\": 135168}", + "{\"active_anon\": 10429370368, \"active_file\": 8717561856, \"anon\": 10429399040, \"anon_thp\": 0, \"file\": 17410793472, " + "\"file_dirty\": 2060857344, \"file_mapped\": 344010752, \"file_thp\": 0, \"file_writeback\": 0, \"inactive_anon\": 0, " + "\"inactive_file\": 8693084160, \"kernel\": 1537789952, \"kernel_stack\": 3833856, \"pagetables\": 65441792, \"percpu\": 15232, " + "\"pgactivate\": 0, \"pgdeactivate\": 0, \"pgfault\": 43026352, \"pglazyfree\": 259, \"pglazyfreed\": 0, \"pgmajfault\": 36762, " + "\"pgrefill\": 0, \"pgscan\": 0, \"pgscan_direct\": 0, \"pgscan_khugepaged\": 0, \"pgscan_kswapd\": 0, \"pgsteal\": 0, " + "\"pgsteal_direct\": 0, \"pgsteal_khugepaged\": 0, \"pgsteal_kswapd\": 0, \"sec_pagetables\": 0, \"shmem\": 0, \"shmem_thp\": 0, " + "\"slab\": 1466135368, \"slab_reclaimable\": 1460982504, \"slab_unreclaimable\": 5152864, \"sock\": 0, \"swapcached\": 0, " + "\"thp_collapse_alloc\": 0, \"thp_fault_alloc\": 0, \"unevictable\": 0, \"vmalloc\": 0, \"workingset_activate_anon\": 0, " + "\"workingset_activate_file\": 0, \"workingset_nodereclaim\": 0, \"workingset_refault_anon\": 0, \"workingset_refault_file\": 0, " + "\"workingset_restore_anon\": 0, \"workingset_restore_file\": 0, \"zswap\": 0, \"zswapped\": 0, \"zswpin\": 0, \"zswpout\": 0}"}; + + +class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParam +{ + void SetUp() override + { + const uint8_t version = static_cast(GetParam()); + tmp_dir = fmt::format("./test_cgroups_{}", magic_enum::enum_name(GetParam())); + fs::create_directories(tmp_dir); + + auto stat_file = WriteBufferFromFile(tmp_dir + "/memory.stat"); + stat_file.write(SAMPLE_FILE[version].data(), SAMPLE_FILE[version].size()); + stat_file.sync(); + + if (GetParam() == CgroupsMemoryUsageObserver::CgroupsVersion::V2) + { + auto current_file = WriteBufferFromFile(tmp_dir + "/memory.current"); + current_file.write("29645422592", 11); + current_file.sync(); + } + } + +protected: + std::string tmp_dir; +}; + + +TEST_P(CgroupsMemoryUsageObserverFixture, ReadMemoryUsageTest) +{ + const auto version = GetParam(); + auto reader = createCgroupsReader(version, tmp_dir); + ASSERT_EQ( + reader->readMemoryUsage(), + version == CgroupsMemoryUsageObserver::CgroupsVersion::V1 ? /* rss from memory.stat */ 2232029184 + : /* value from memory.current - inactive_file */ 20952338432); +} + + +TEST_P(CgroupsMemoryUsageObserverFixture, DumpAllStatsTest) +{ + const auto version = GetParam(); + auto reader = createCgroupsReader(version, tmp_dir); + ASSERT_EQ(reader->dumpAllStats(), EXPECTED[static_cast(version)]); +} + + +INSTANTIATE_TEST_SUITE_P( + CgroupsMemoryUsageObserverTests, + CgroupsMemoryUsageObserverFixture, + ::testing::Values(CgroupsMemoryUsageObserver::CgroupsVersion::V1, CgroupsMemoryUsageObserver::CgroupsVersion::V2)); From cdadef78471b47d05d6d1c437a823b17f8867991 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 18 Jul 2024 21:26:33 +0200 Subject: [PATCH 0499/2361] Add more comments. --- src/Parsers/ASTViewTargets.h | 21 +++++++++++++++++---- src/Parsers/ParserViewTargets.h | 7 ++++++- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/Parsers/ASTViewTargets.h b/src/Parsers/ASTViewTargets.h index 33a7bc5fcb1..12182919f0e 100644 --- a/src/Parsers/ASTViewTargets.h +++ b/src/Parsers/ASTViewTargets.h @@ -9,15 +9,20 @@ namespace DB class ASTStorage; enum class Keyword : size_t; -/// Information about the target table for a materialized view or a window view. +/// Information about target tables (external or inner) of a materialized view or a window view. +/// See ASTViewTargets for more details. struct ViewTarget { enum Kind { - /// Target table for a materialized view or a window view. + /// If `kind == ViewTarget::To` then `ViewTarget` contains information about the "TO" table of a materialized view or a window view: + /// CREATE MATERIALIZED VIEW db.mv_name {TO [db.]to_target | ENGINE to_engine} AS SELECT ... + /// or + /// CREATE WINDOW VIEW db.wv_name {TO [db.]to_target | ENGINE to_engine} AS SELECT ... To, - /// Table with intermediate results for a window view. + /// If `kind == ViewTarget::Inner` then `ViewTarget` contains information about the "INNER" table of a window view: + /// CREATE WINDOW VIEW db.wv_name {INNER ENGINE inner_engine} AS SELECT ... Inner, }; @@ -42,7 +47,15 @@ std::string_view toString(ViewTarget::Kind kind); void parseFromString(ViewTarget::Kind & out, std::string_view str); -/// Information about all the target tables for a view. +/// Information about all target tables (external or inner) of a view. +/// +/// For example, for a materialized view: +/// CREATE MATERIALIZED VIEW db.mv_name [TO [db.]to_target | ENGINE to_engine] AS SELECT ... +/// this class contains information about the "TO" table: its name and database (if it's external), its UUID and engine (if it's inner). +/// +/// For a window view: +/// CREATE WINDOW VIEW db.wv_name [TO [db.]to_target | ENGINE to_engine] [INNER ENGINE inner_engine] AS SELECT ... +/// this class contains information about both the "TO" table and the "INNER" table. class ASTViewTargets : public IAST { public: diff --git a/src/Parsers/ParserViewTargets.h b/src/Parsers/ParserViewTargets.h index f5d1850e974..3af3c0b8df3 100644 --- a/src/Parsers/ParserViewTargets.h +++ b/src/Parsers/ParserViewTargets.h @@ -7,7 +7,12 @@ namespace DB { -/// Parses information about target views of a table. +/// Parses information about target tables (external or inner) of a materialized view or a window view. +/// The function parses one or multiple parts of a CREATE query looking like this: +/// TO db.table_name +/// TO INNER UUID 'XXX' +/// {ENGINE / INNER ENGINE} TableEngine(arguments) [ORDER BY ...] [SETTINGS ...] +/// Returns ASTViewTargets if succeeded. class ParserViewTargets : public IParserBase { public: From d7250c1d63c561c14f41f5f1a18f79fe0efc5972 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 18 Jul 2024 21:27:04 +0200 Subject: [PATCH 0500/2361] Add function ASTCreateQuery::hasInnerUUIDs(). --- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- src/Parsers/ASTCreateQuery.cpp | 7 +++++++ src/Parsers/ASTCreateQuery.h | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 45e2881ae5c..faa91341a7c 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1131,7 +1131,7 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data } else { - bool has_uuid = (create.uuid != UUIDHelpers::Nil) || (create.targets && create.targets->hasInnerUUIDs()); + bool has_uuid = (create.uuid != UUIDHelpers::Nil) || create.hasInnerUUIDs(); if (has_uuid && !is_on_cluster && !internal) { /// We don't show the following error message either diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index 770a63c6e75..348b54203fc 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -563,6 +563,13 @@ UUID ASTCreateQuery::getTargetInnerUUID(ViewTarget::Kind target_kind) const return UUIDHelpers::Nil; } +bool ASTCreateQuery::hasInnerUUIDs() const +{ + if (targets) + return targets->hasInnerUUIDs(); + return false; +} + std::shared_ptr ASTCreateQuery::getTargetInnerEngine(ViewTarget::Kind target_kind) const { if (targets) diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index f751a09169c..08d26f28efa 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -167,6 +167,7 @@ public: StorageID getTargetTableID(ViewTarget::Kind target_kind) const; bool hasTargetTableID(ViewTarget::Kind target_kind) const; UUID getTargetInnerUUID(ViewTarget::Kind target_kind) const; + bool hasInnerUUIDs() const; std::shared_ptr getTargetInnerEngine(ViewTarget::Kind target_kind) const; void setTargetInnerEngine(ViewTarget::Kind target_kind, ASTPtr storage_def); From e806123856f5ded0f2e92f4f4b42c38132276c15 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 18 Jul 2024 20:30:56 +0000 Subject: [PATCH 0501/2361] Fix non x86 build --- src/QueryPipeline/RemoteQueryExecutor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 87f634b8334..d7edbc9ed35 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -469,7 +469,7 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::read() return restartQueryWithoutDuplicatedUUIDs(); } -RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync(bool check_packet_type_only) +RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync([[maybe_unused]] bool check_packet_type_only) { #if defined(OS_LINUX) if (!read_context || (resent_query && recreate_read_context)) From c021f9dcd363d01d46108a9084b90c9463c9116a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 18 Jul 2024 22:56:15 +0200 Subject: [PATCH 0502/2361] Fix SHOW MERGES --- src/Interpreters/InterpreterShowTablesQuery.cpp | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/InterpreterShowTablesQuery.cpp b/src/Interpreters/InterpreterShowTablesQuery.cpp index 51038aaca46..78580f2ec02 100644 --- a/src/Interpreters/InterpreterShowTablesQuery.cpp +++ b/src/Interpreters/InterpreterShowTablesQuery.cpp @@ -121,9 +121,18 @@ String InterpreterShowTablesQuery::getRewrittenQuery() if (query.merges) { WriteBufferFromOwnString rewritten_query; - rewritten_query << "SELECT table, database, round((elapsed * (1 / merges.progress)) - merges.elapsed, 2) AS estimate_complete, round(elapsed,2) elapsed, " - "round(progress*100, 2) AS progress, is_mutation, formatReadableSize(total_size_bytes_compressed) AS size_compressed, " - "formatReadableSize(memory_usage) AS memory_usage FROM system.merges"; + rewritten_query << R"( + SELECT + table, + database, + merges.progress > 0 ? round(merges.elapsed * (1 - merges.progress) / merges.progress, 2) : NULL AS estimate_complete, + round(elapsed, 2) AS elapsed, + round(progress * 100, 2) AS progress, + is_mutation, + formatReadableSize(total_size_bytes_compressed) AS size_compressed, + formatReadableSize(memory_usage) AS memory_usage + FROM system.merges + )"; if (!query.like.empty()) { From 76904d4ae645aaf9a4cfce938be23a0c60a03fb6 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 18 Jul 2024 21:08:35 +0000 Subject: [PATCH 0503/2361] support throw option --- src/Core/SettingsEnums.cpp | 5 +++ src/Interpreters/InterpreterCreateQuery.cpp | 21 ++++++++++++ ...ojection_merge_special_mergetree.reference | 0 ...206_projection_merge_special_mergetree.sql | 32 +++++++++++++++++++ 4 files changed, 58 insertions(+) create mode 100644 tests/queries/0_stateless/03206_projection_merge_special_mergetree.reference create mode 100644 tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index 82e7d6db410..74b6c793849 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -177,6 +177,11 @@ IMPLEMENT_SETTING_ENUM(LightweightMutationProjectionMode, ErrorCodes::BAD_ARGUME {{"throw", LightweightMutationProjectionMode::THROW}, {"drop", LightweightMutationProjectionMode::DROP}}) +IMPLEMENT_SETTING_ENUM(DeduplicateMergeProjectionMode, ErrorCodes::BAD_ARGUMENTS, + {{"throw", DeduplicateMergeProjectionMode::THROW}, + {"drop", DeduplicateMergeProjectionMode::DROP}, + {"rebuild", DeduplicateMergeProjectionMode::THROW}}) + IMPLEMENT_SETTING_AUTO_ENUM(LocalFSReadMethod, ErrorCodes::BAD_ARGUMENTS) IMPLEMENT_SETTING_ENUM(ParquetVersion, ErrorCodes::BAD_ARGUMENTS, diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index ea10ad59db4..df5ec4525eb 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1281,6 +1281,27 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) /// Set and retrieve list of columns, indices and constraints. Set table engine if needed. Rewrite query in canonical way. TableProperties properties = getTablePropertiesAndNormalizeCreateQuery(create, mode); + /// Projection is only supported in (Replictaed)MergeTree. + if (std::string_view engine_name(create.storage->engine->name); + !properties.projections.empty() && engine_name != "MergeTree" && engine_name != "ReplicatedMergeTree") + { + bool projection_support = false; + if (auto * setting = create.storage->settings; setting != nullptr) + { + for (const auto & change : setting->changes) + { + if (change.name == "deduplicate_merge_projection_mode" && change.value != Field("throw")) + { + projection_support = true; + break; + } + } + } + if (!projection_support) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "Projection is only supported in (Replictaed)MergeTree. Consider drop or rebuild option of deduplicate_merge_projection_mode."); + } + /// Check type compatible for materialized dest table and select columns if (create.select && create.is_materialized_view && create.to_table_id && mode <= LoadingStrictnessLevel::CREATE) { diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.reference b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql new file mode 100644 index 00000000000..6b5e516ad21 --- /dev/null +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS tp; + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type; -- { serverError SUPPORT_IS_DISABLED } + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'throw'; -- { serverError SUPPORT_IS_DISABLED } + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'drop'; + +DROP TABLE tp; + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'rebuild'; + +DROP TABLE tp; From 4ef9cb6d7aa32aeb56c26bfa6ecad94beacba540 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 18 Jul 2024 23:13:32 +0200 Subject: [PATCH 0504/2361] Fix style --- src/IO/ReadBufferFromFileBase.cpp | 5 ----- src/IO/ReadWriteBufferFromHTTP.cpp | 1 - 2 files changed, 6 deletions(-) diff --git a/src/IO/ReadBufferFromFileBase.cpp b/src/IO/ReadBufferFromFileBase.cpp index d42b12ba49b..b7a1438cff8 100644 --- a/src/IO/ReadBufferFromFileBase.cpp +++ b/src/IO/ReadBufferFromFileBase.cpp @@ -5,11 +5,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int UNKNOWN_FILE_SIZE; -} - ReadBufferFromFileBase::ReadBufferFromFileBase() : BufferWithOwnMemory(0) { } diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index 2a62b11aa44..4d27a78c8dc 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -72,7 +72,6 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int CANNOT_SEEK_THROUGH_FILE; extern const int SEEK_POSITION_OUT_OF_BOUND; - extern const int UNKNOWN_FILE_SIZE; } std::unique_ptr ReadWriteBufferFromHTTP::CallResult::transformToReadBuffer(size_t buf_size) && From 0bf9346b07dc6fb07180a4221477512ba4eae024 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 19 Jul 2024 00:08:36 +0200 Subject: [PATCH 0505/2361] Update 03206_no_exceptions_clickhouse_local.sh --- .../queries/0_stateless/03206_no_exceptions_clickhouse_local.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh index 86839a228dc..00efd1f4591 100755 --- a/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh +++ b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh @@ -1,6 +1,4 @@ #!/usr/bin/env bash -# Tags: no-fasttest -# Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so the grpc library is not built CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 7d9b7cc79611751adc6d22aa47c5e179228a2840 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jul 2024 02:54:29 +0200 Subject: [PATCH 0506/2361] Introduce ASTDataType --- src/DataTypes/DataTypeFactory.cpp | 14 ++--- src/Databases/DatabasesCommon.cpp | 2 +- .../PostgreSQL/DatabasePostgreSQL.cpp | 15 +++-- .../InterpreterShowCreateQuery.cpp | 2 - .../MySQL/InterpretersMySQLDDLQuery.cpp | 28 ++++----- .../formatWithPossiblyHidingSecrets.h | 8 ++- src/Parsers/ASTColumnDeclaration.cpp | 4 -- src/Parsers/ASTDataType.cpp | 57 +++++++++++++++++++ src/Parsers/ASTDataType.h | 36 ++++++++++++ src/Parsers/ASTFunction.cpp | 4 -- src/Parsers/ASTFunction.h | 2 +- src/Parsers/IAST.h | 15 +++-- src/Parsers/ParserCreateQuery.cpp | 7 ++- src/Parsers/ParserCreateQuery.h | 8 +-- src/Parsers/ParserDataType.cpp | 14 ++--- .../StorageMaterializedPostgreSQL.cpp | 13 ++--- src/Storages/WindowView/StorageWindowView.cpp | 3 +- 17 files changed, 156 insertions(+), 76 deletions(-) create mode 100644 src/Parsers/ASTDataType.cpp create mode 100644 src/Parsers/ASTDataType.h diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index af37cde2846..45552e506cd 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include #include @@ -83,15 +83,9 @@ DataTypePtr DataTypeFactory::tryGet(const ASTPtr & ast) const template DataTypePtr DataTypeFactory::getImpl(const ASTPtr & ast) const { - if (const auto * func = ast->as()) + if (const auto * type = ast->as()) { - if (func->parameters) - { - if constexpr (nullptr_on_error) - return nullptr; - throw Exception(ErrorCodes::ILLEGAL_SYNTAX_FOR_DATA_TYPE, "Data type cannot have multiple parenthesized parameters."); - } - return getImpl(func->name, func->arguments); + return getImpl(type->name, type->arguments); } if (const auto * ident = ast->as()) @@ -107,7 +101,7 @@ DataTypePtr DataTypeFactory::getImpl(const ASTPtr & ast) const if constexpr (nullptr_on_error) return nullptr; - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST element for data type."); + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST element for data type: {}.", ast->getID()); } DataTypePtr DataTypeFactory::get(const String & family_name_param, const ASTPtr & parameters) const diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index fe0baf30e57..cacba581745 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -149,7 +149,7 @@ ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_ columns = metadata_ptr->columns.getAll(); for (const auto & column_name_and_type: columns) { - const auto & ast_column_declaration = std::make_shared(); + const auto ast_column_declaration = std::make_shared(); ast_column_declaration->name = column_name_and_type.name; /// parser typename { diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index a846e23cd4f..032fc33ea16 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -12,9 +12,9 @@ #include #include #include +#include #include #include -#include #include #include #include @@ -25,6 +25,7 @@ #include #include + namespace fs = std::filesystem; namespace DB @@ -432,7 +433,7 @@ ASTPtr DatabasePostgreSQL::getCreateTableQueryImpl(const String & table_name, Co auto metadata_snapshot = storage->getInMemoryMetadataPtr(); for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) { - const auto & column_declaration = std::make_shared(); + const auto column_declaration = std::make_shared(); column_declaration->name = column_type_and_name.name; column_declaration->type = getColumnDeclaration(column_type_and_name.type); columns_expression_list->children.emplace_back(column_declaration); @@ -470,17 +471,15 @@ ASTPtr DatabasePostgreSQL::getColumnDeclaration(const DataTypePtr & data_type) c WhichDataType which(data_type); if (which.isNullable()) - return makeASTFunction("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + return makeASTDataType("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); if (which.isArray()) - return makeASTFunction("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + return makeASTDataType("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); if (which.isDateTime64()) - { - return makeASTFunction("DateTime64", std::make_shared(static_cast(6))); - } + return makeASTDataType("DateTime64", std::make_shared(static_cast(6))); - return std::make_shared(data_type->getName()); + return makeASTDataType(data_type->getName()); } void registerDatabasePostgreSQL(DatabaseFactory & factory) diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index 0fca7b64d5a..ca5b7a3b5c1 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -1,9 +1,7 @@ #include #include -#include #include #include -#include #include #include #include diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 4821d607d0e..f73965cfcc8 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include + namespace DB { @@ -95,22 +97,22 @@ NamesAndTypesList getColumnsList(const ASTExpressionList * columns_definition) } ASTPtr data_type = declare_column->data_type; - auto * data_type_function = data_type->as(); + auto * data_type_node = data_type->as(); - if (data_type_function) + if (data_type_node) { - String type_name_upper = Poco::toUpper(data_type_function->name); + String type_name_upper = Poco::toUpper(data_type_node->name); if (is_unsigned) { /// For example(in MySQL): CREATE TABLE test(column_name INT NOT NULL ... UNSIGNED) if (type_name_upper.find("INT") != String::npos && !endsWith(type_name_upper, "SIGNED") && !endsWith(type_name_upper, "UNSIGNED")) - data_type_function->name = type_name_upper + " UNSIGNED"; + data_type_node->name = type_name_upper + " UNSIGNED"; } if (type_name_upper == "SET") - data_type_function->arguments.reset(); + data_type_node->arguments.reset(); /// Transforms MySQL ENUM's list of strings to ClickHouse string-integer pairs /// For example ENUM('a', 'b', 'c') -> ENUM('a'=1, 'b'=2, 'c'=3) @@ -119,7 +121,7 @@ NamesAndTypesList getColumnsList(const ASTExpressionList * columns_definition) if (type_name_upper.find("ENUM") != String::npos) { UInt16 i = 0; - for (ASTPtr & child : data_type_function->arguments->children) + for (ASTPtr & child : data_type_node->arguments->children) { auto new_child = std::make_shared(); new_child->name = "equals"; @@ -133,10 +135,10 @@ NamesAndTypesList getColumnsList(const ASTExpressionList * columns_definition) } if (type_name_upper == "DATE") - data_type_function->name = "Date32"; + data_type_node->name = "Date32"; } if (is_nullable) - data_type = makeASTFunction("Nullable", data_type); + data_type = makeASTDataType("Nullable", data_type); columns_name_and_type.emplace_back(declare_column->name, DataTypeFactory::instance().get(data_type)); } @@ -175,7 +177,7 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col return columns_description; } -static NamesAndTypesList getNames(const ASTFunction & expr, ContextPtr context, const NamesAndTypesList & columns) +static NamesAndTypesList getNames(const ASTDataType & expr, ContextPtr context, const NamesAndTypesList & columns) { if (expr.arguments->children.empty()) return NamesAndTypesList{}; @@ -219,9 +221,9 @@ static std::tuplechildren.empty()) { @@ -482,7 +484,7 @@ ASTs InterpreterCreateImpl::getRewrittenQueries( { auto column_declaration = std::make_shared(); column_declaration->name = name; - column_declaration->type = makeASTFunction(type); + column_declaration->type = makeASTDataType(type); column_declaration->default_specifier = "MATERIALIZED"; column_declaration->default_expression = std::make_shared(default_value); column_declaration->children.emplace_back(column_declaration->type); diff --git a/src/Interpreters/formatWithPossiblyHidingSecrets.h b/src/Interpreters/formatWithPossiblyHidingSecrets.h index 039bcbc2bca..ea8c295b169 100644 --- a/src/Interpreters/formatWithPossiblyHidingSecrets.h +++ b/src/Interpreters/formatWithPossiblyHidingSecrets.h @@ -1,11 +1,14 @@ #pragma once -#include "Access/ContextAccess.h" -#include "Interpreters/Context.h" + +#include +#include + #include namespace DB { + struct SecretHidingFormatSettings { // We can't store const Context& as there's a dangerous usage {.ctx = *getContext()} @@ -24,4 +27,5 @@ inline String format(const SecretHidingFormatSettings & settings) return settings.query.formatWithPossiblyHidingSensitiveData(settings.max_length, settings.one_line, show_secrets); } + } diff --git a/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp index 4a8a3d2967d..c96499095d5 100644 --- a/src/Parsers/ASTColumnDeclaration.cpp +++ b/src/Parsers/ASTColumnDeclaration.cpp @@ -1,8 +1,6 @@ #include #include #include -#include -#include namespace DB @@ -15,8 +13,6 @@ ASTPtr ASTColumnDeclaration::clone() const if (type) { - // Type may be an ASTFunction (e.g. `create table t (a Decimal(9,0))`), - // so we have to clone it properly as well. res->type = type->clone(); res->children.push_back(res->type); } diff --git a/src/Parsers/ASTDataType.cpp b/src/Parsers/ASTDataType.cpp new file mode 100644 index 00000000000..3c17ae8c380 --- /dev/null +++ b/src/Parsers/ASTDataType.cpp @@ -0,0 +1,57 @@ +#include +#include +#include + + +namespace DB +{ + +String ASTDataType::getID(char delim) const +{ + return "DataType" + (delim + name); +} + +ASTPtr ASTDataType::clone() const +{ + auto res = std::make_shared(*this); + res->children.clear(); + + if (arguments) + { + res->arguments = arguments->clone(); + res->children.push_back(res->arguments); + } + + return res; +} + +void ASTDataType::updateTreeHashImpl(SipHash & hash_state, bool) const +{ + hash_state.update(name.size()); + hash_state.update(name); + /// Children are hashed automatically. +} + +void ASTDataType::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + settings.ostr << (settings.hilite ? hilite_function : "") << name; + + if (arguments && !arguments->children.empty()) + { + settings.ostr << '(' << (settings.hilite ? hilite_none : ""); + + for (size_t i = 0, size = arguments->children.size(); i < size; ++i) + { + if (i != 0) + settings.ostr << ", "; + + arguments->children[i]->formatImpl(settings, state, frame); + } + + settings.ostr << (settings.hilite ? hilite_function : "") << ')'; + } + + settings.ostr << (settings.hilite ? hilite_none : ""); +} + +} diff --git a/src/Parsers/ASTDataType.h b/src/Parsers/ASTDataType.h new file mode 100644 index 00000000000..c8f3c6e2e9d --- /dev/null +++ b/src/Parsers/ASTDataType.h @@ -0,0 +1,36 @@ +#pragma once + +#include + + +namespace DB +{ + +/// AST for data types, e.g. UInt8 or Tuple(x UInt8, y Enum(a = 1)) +class ASTDataType : public IAST +{ +public: + String name; + ASTPtr arguments; + + String getID(char delim) const override; + ASTPtr clone() const override; + void updateTreeHashImpl(SipHash & hash_state, bool ignore_aliases) const override; + void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; +}; + +template +std::shared_ptr makeASTDataType(const String & name, Args &&... args) +{ + auto function = std::make_shared(); + + function->name = name; + function->arguments = std::make_shared(); + function->children.push_back(function->arguments); + + function->arguments->children = { std::forward(args)... }; + + return function; +} + +} diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index f39229d7566..7d42b6d1e9c 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -19,9 +18,6 @@ #include #include #include -#include - -#include using namespace std::literals; diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index 3a94691f25d..be2b6beae54 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -46,7 +46,7 @@ public: NullsAction nulls_action = NullsAction::EMPTY; - /// do not print empty parentheses if there are no args - compatibility with new AST for data types and engine names. + /// do not print empty parentheses if there are no args - compatibility with engine names. bool no_empty_args = false; /// Specifies where this function-like expression is used. diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index ee70fed0f07..e7e2ff2ec4a 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -271,16 +271,15 @@ public: throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown element in AST: {}", getID()); } - // Secrets are displayed regarding show_secrets, then SensitiveDataMasker is applied. - // You can use Interpreters/formatWithPossiblyHidingSecrets.h for convenience. + /// Secrets are displayed regarding show_secrets, then SensitiveDataMasker is applied. + /// You can use Interpreters/formatWithPossiblyHidingSecrets.h for convenience. String formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets) const; - /* - * formatForLogging and formatForErrorMessage always hide secrets. This inconsistent - * behaviour is due to the fact such functions are called from Client which knows nothing about - * access rights and settings. Moreover, the only use case for displaying secrets are backups, - * and backup tools use only direct input and ignore logs and error messages. - */ + /** formatForLogging and formatForErrorMessage always hide secrets. This inconsistent + * behaviour is due to the fact such functions are called from Client which knows nothing about + * access rights and settings. Moreover, the only use case for displaying secrets are backups, + * and backup tools use only direct input and ignore logs and error messages. + */ String formatForLogging(size_t max_length = 0) const { return formatWithPossiblyHidingSensitiveData(max_length, true, false); diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 014dc7bd3bf..fa232954cd6 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -75,9 +76,9 @@ bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!close.ignore(pos, expected)) return false; - auto func = std::make_shared(); + auto func = std::make_shared(); tryGetIdentifierNameInto(name, func->name); - // FIXME(ilezhankin): func->no_empty_args = true; ? + func->arguments = columns; func->children.push_back(columns); node = func; @@ -749,7 +750,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe auto * table_id = table->as(); - // Shortcut for ATTACH a previously detached table + /// A shortcut for ATTACH a previously detached table. bool short_attach = attach && !from_path; if (short_attach && (!pos.isValid() || pos.get().type == TokenType::Semicolon)) { diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index bb37491a366..7bd1d1bf588 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -101,17 +101,15 @@ class IParserColumnDeclaration : public IParserBase { public: explicit IParserColumnDeclaration(bool require_type_ = true, bool allow_null_modifiers_ = false, bool check_keywords_after_name_ = false) - : require_type(require_type_) - , allow_null_modifiers(allow_null_modifiers_) - , check_keywords_after_name(check_keywords_after_name_) + : require_type(require_type_) + , allow_null_modifiers(allow_null_modifiers_) + , check_keywords_after_name(check_keywords_after_name_) { } void enableCheckTypeKeyword() { check_type_keyword = true; } protected: - using ASTDeclarePtr = std::shared_ptr; - const char * getName() const override{ return "column declaration"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index ad33c7e4558..63800819899 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -198,13 +199,12 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } } - auto function_node = std::make_shared(); - function_node->name = type_name; - function_node->no_empty_args = true; + auto data_type_node = std::make_shared(); + data_type_node->name = type_name; if (pos->type != TokenType::OpeningRoundBracket) { - node = function_node; + node = data_type_node; return true; } ++pos; @@ -222,10 +222,10 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; ++pos; - function_node->arguments = expr_list_args; - function_node->children.push_back(function_node->arguments); + data_type_node->arguments = expr_list_args; + data_type_node->children.push_back(data_type_node->arguments); - node = function_node; + node = data_type_node; return true; } diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index a904b29e12f..f7dde509d4e 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -7,10 +7,8 @@ #include #include -#include #include -#include #include #include #include @@ -22,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -295,7 +294,7 @@ std::shared_ptr StorageMaterializedPostgreSQL::getMaterial auto column_declaration = std::make_shared(); column_declaration->name = std::move(name); - column_declaration->type = makeASTFunction(type); + column_declaration->type = makeASTDataType(type); column_declaration->default_specifier = "MATERIALIZED"; column_declaration->default_expression = std::make_shared(default_value); @@ -312,17 +311,17 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d WhichDataType which(data_type); if (which.isNullable()) - return makeASTFunction("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + return makeASTDataType("Nullable", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); if (which.isArray()) - return makeASTFunction("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); + return makeASTDataType("Array", getColumnDeclaration(typeid_cast(data_type.get())->getNestedType())); /// getName() for decimal returns 'Decimal(precision, scale)', will get an error with it if (which.isDecimal()) { auto make_decimal_expression = [&](std::string type_name) { - auto ast_expression = std::make_shared(); + auto ast_expression = std::make_shared(); ast_expression->name = type_name; ast_expression->arguments = std::make_shared(); @@ -354,7 +353,7 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d return ast_expression; } - return std::make_shared(data_type->getName()); + return makeASTDataType(data_type->getName()); } diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index e15da0074d5..65a30b18e96 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -805,7 +806,7 @@ ASTPtr StorageWindowView::getInnerTableCreateQuery(const ASTPtr & inner_query, c { auto column_window = std::make_shared(); column_window->name = window_id_name; - column_window->type = std::make_shared("UInt32"); + column_window->type = makeASTDataType("UInt32"); columns_list->children.push_back(column_window); } From 2860aa514d0d8837bd72a87390686b20e05ecae7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jul 2024 03:50:38 +0200 Subject: [PATCH 0507/2361] Fix style --- src/DataTypes/DataTypeFactory.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index 45552e506cd..db6e1738d22 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -22,7 +22,6 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int UNKNOWN_TYPE; - extern const int ILLEGAL_SYNTAX_FOR_DATA_TYPE; extern const int UNEXPECTED_AST_STRUCTURE; extern const int DATA_TYPE_CANNOT_HAVE_ARGUMENTS; } From 985f398925266c5867cd25fcdf655a5a306928fd Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jul 2024 05:33:44 +0200 Subject: [PATCH 0508/2361] Fix error --- src/Client/ClientBase.cpp | 1 - src/DataTypes/DataTypeAggregateFunction.cpp | 4 +-- src/Parsers/ParserDataType.cpp | 36 ++++++++++++++------- src/TableFunctions/ITableFunction.cpp | 1 - 4 files changed, 26 insertions(+), 16 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 13dce05cabc..95d2dff54e6 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -70,7 +70,6 @@ #include #include -#include #include #include #include diff --git a/src/DataTypes/DataTypeAggregateFunction.cpp b/src/DataTypes/DataTypeAggregateFunction.cpp index ef7d86d2a81..09175617bf1 100644 --- a/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/src/DataTypes/DataTypeAggregateFunction.cpp @@ -257,8 +257,8 @@ static DataTypePtr create(const ASTPtr & arguments) } else throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Unexpected AST element passed as aggregate function name for data type AggregateFunction. " - "Must be identifier or function."); + "Unexpected AST element {} passed as aggregate function name for data type AggregateFunction. " + "Must be identifier or function", data_type_ast->getID()); for (size_t i = argument_types_start_idx; i < arguments->children.size(); ++i) argument_types.push_back(DataTypeFactory::instance().get(arguments->children[i])); diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index 63800819899..78b5aaa93a6 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -70,20 +70,32 @@ private: DynamicArgumentsParser parser; return parser.parse(pos, node, expected); } - - ParserNestedTable nested_parser; - ParserDataType data_type_parser; - ParserAllCollectionsOfLiterals literal_parser(false); - - const char * operators[] = {"=", "equals", nullptr}; - ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique()); - - if (pos->type == TokenType::BareWord && std::string_view(pos->begin, pos->size()) == "Nested") + else if (type_name == "Nested") + { + ParserNestedTable nested_parser; return nested_parser.parse(pos, node, expected); + } + else if (type_name == "AggregateFunction") + { + ParserFunction function_parser; + ParserIdentifier identifier_parser; + ParserAllCollectionsOfLiterals literal_parser(false); + return literal_parser.parse(pos, node, expected) + || identifier_parser.parse(pos, node, expected) + || function_parser.parse(pos, node, expected); + } + else + { + ParserDataType data_type_parser; + ParserAllCollectionsOfLiterals literal_parser(false); - return enum_parser.parse(pos, node, expected) - || literal_parser.parse(pos, node, expected) - || data_type_parser.parse(pos, node, expected); + const char * operators[] = {"=", "equals", nullptr}; + ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique()); + + return enum_parser.parse(pos, node, expected) + || literal_parser.parse(pos, node, expected) + || data_type_parser.parse(pos, node, expected); + } } std::string_view type_name; diff --git a/src/TableFunctions/ITableFunction.cpp b/src/TableFunctions/ITableFunction.cpp index e5676c5c25d..916ff7ec022 100644 --- a/src/TableFunctions/ITableFunction.cpp +++ b/src/TableFunctions/ITableFunction.cpp @@ -1,5 +1,4 @@ #include -#include #include #include #include From 15be94ee14a7affe6643dd4c3ac1b104e3c69eeb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 05:47:27 +0200 Subject: [PATCH 0509/2361] Update src/Parsers/ASTDataType.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: János Benjamin Antal --- src/Parsers/ASTDataType.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Parsers/ASTDataType.h b/src/Parsers/ASTDataType.h index c8f3c6e2e9d..abe5cbb8626 100644 --- a/src/Parsers/ASTDataType.h +++ b/src/Parsers/ASTDataType.h @@ -22,13 +22,13 @@ public: template std::shared_ptr makeASTDataType(const String & name, Args &&... args) { - auto function = std::make_shared(); + auto data_type = std::make_shared(); - function->name = name; - function->arguments = std::make_shared(); - function->children.push_back(function->arguments); + data_type->name = name; + data_type->arguments = std::make_shared(); + data_type->children.push_back(function->arguments); - function->arguments->children = { std::forward(args)... }; + data_type->arguments->children = { std::forward(args)... }; return function; } From 87fa2c64e96c6bac67275207bd708ac231fa9fb6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 05:48:00 +0200 Subject: [PATCH 0510/2361] Apply review comments --- src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index f7dde509d4e..e795cd9c6c6 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -345,7 +345,7 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d if (which.isDateTime64()) { - auto ast_expression = std::make_shared(); + auto ast_expression = std::make_shared(); ast_expression->name = "DateTime64"; ast_expression->arguments = std::make_shared(); From 88dce34be907863b6f3cf6890be87b0d7278a101 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 05:48:31 +0200 Subject: [PATCH 0511/2361] Fix error --- src/Parsers/ASTDataType.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Parsers/ASTDataType.h b/src/Parsers/ASTDataType.h index abe5cbb8626..d9427c2fd9e 100644 --- a/src/Parsers/ASTDataType.h +++ b/src/Parsers/ASTDataType.h @@ -26,11 +26,11 @@ std::shared_ptr makeASTDataType(const String & name, Args &&... arg data_type->name = name; data_type->arguments = std::make_shared(); - data_type->children.push_back(function->arguments); + data_type->children.push_back(data_type->arguments); data_type->arguments->children = { std::forward(args)... }; - return function; + return data_type; } } From 4d4e0901881221b39cce0e0527f530fe90eb7ad3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jul 2024 05:58:06 +0200 Subject: [PATCH 0512/2361] Fix build --- src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index e795cd9c6c6..e7b58841c4e 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -7,6 +7,7 @@ #include #include +#include #include #include From 2832f8c57e37a1fc7d0c91b9ad621785c6d2a5a5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 04:59:04 +0200 Subject: [PATCH 0513/2361] Fix a typo --- src/Databases/DatabasesCommon.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index cacba581745..fdbdb610275 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -164,7 +164,7 @@ ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_ if (!parser.parse(pos, ast_type, expected)) { if (throw_on_error) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot parser metadata of {}.{}", + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot parse metadata of {}.{}", backQuote(table_id.database_name), backQuote(table_id.table_name)); else return nullptr; From 6a7a4df1eca0cda4fd2efdcc8aaf2e8741f4cbcc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 05:02:26 +0200 Subject: [PATCH 0514/2361] Fix error --- src/Parsers/ParserDataType.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index 78b5aaa93a6..af1a299a887 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -75,7 +75,7 @@ private: ParserNestedTable nested_parser; return nested_parser.parse(pos, node, expected); } - else if (type_name == "AggregateFunction") + else if (type_name == "AggregateFunction" || type_name == "SimpleAggregateFunction") { ParserFunction function_parser; ParserIdentifier identifier_parser; From cc201745620490c7d885a9e45d9f4b92f7492c10 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 05:19:58 +0200 Subject: [PATCH 0515/2361] Fix bad code: it was catching exceptions --- src/IO/WithFileSize.cpp | 48 +++++++++---------- ...ry_and_native_with_binary_encoded_types.sh | 4 +- 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/src/IO/WithFileSize.cpp b/src/IO/WithFileSize.cpp index 3660d962c08..8cea12fa200 100644 --- a/src/IO/WithFileSize.cpp +++ b/src/IO/WithFileSize.cpp @@ -14,40 +14,38 @@ namespace ErrorCodes } template -static size_t getFileSize(T & in) +static std::optional tryGetFileSize(T & in) { if (auto * with_file_size = dynamic_cast(&in)) - { return with_file_size->getFileSize(); - } + + return std::nullopt; +} + +template +static size_t getFileSize(T & in) +{ + if (auto maybe_size = tryGetFileSize(in)) + return *maybe_size; throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size"); } -size_t getFileSizeFromReadBuffer(ReadBuffer & in) -{ - if (auto * delegate = dynamic_cast(&in)) - { - return getFileSize(delegate->getWrappedReadBuffer()); - } - else if (auto * compressed = dynamic_cast(&in)) - { - return getFileSize(compressed->getWrappedReadBuffer()); - } - - return getFileSize(in); -} - std::optional tryGetFileSizeFromReadBuffer(ReadBuffer & in) { - try - { - return getFileSizeFromReadBuffer(in); - } - catch (...) - { - return std::nullopt; - } + if (auto * delegate = dynamic_cast(&in)) + return tryGetFileSize(delegate->getWrappedReadBuffer()); + else if (auto * compressed = dynamic_cast(&in)) + return tryGetFileSize(compressed->getWrappedReadBuffer()); + return tryGetFileSize(in); +} + +size_t getFileSizeFromReadBuffer(ReadBuffer & in) +{ + if (auto maybe_size = tryGetFileSizeFromReadBuffer(in)) + return *maybe_size; + + throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size"); } bool isBufferWithFileSize(const ReadBuffer & in) diff --git a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh index 723b11ad620..0c585d36348 100755 --- a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh +++ b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh @@ -6,8 +6,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function test { - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" } test "materialize(42)::UInt8" From e1a24c9dd6f6320ce02714265e91e83f8dbf43f6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 17 Jul 2024 05:45:57 +0200 Subject: [PATCH 0516/2361] Fix error --- src/Parsers/ParserDataType.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index af1a299a887..c5da4a32e92 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -80,9 +79,9 @@ private: ParserFunction function_parser; ParserIdentifier identifier_parser; ParserAllCollectionsOfLiterals literal_parser(false); - return literal_parser.parse(pos, node, expected) - || identifier_parser.parse(pos, node, expected) - || function_parser.parse(pos, node, expected); + return function_parser.parse(pos, node, expected) + || literal_parser.parse(pos, node, expected) + || identifier_parser.parse(pos, node, expected); } else { From bc1146389617f9e9198d0dd25eea89a9206421ba Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 19 Jul 2024 03:16:23 +0200 Subject: [PATCH 0517/2361] Fix error --- src/Parsers/ParserDataType.cpp | 142 +++++++++++++++++++-------------- 1 file changed, 84 insertions(+), 58 deletions(-) diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index c5da4a32e92..2edb0141e12 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -46,60 +46,6 @@ private: } }; -/// Wrapper to allow mixed lists of nested and normal types. -/// Parameters are either: -/// - Nested table elements; -/// - Enum element in form of 'a' = 1; -/// - literal; -/// - Dynamic type arguments; -/// - another data type (or identifier); -class ParserDataTypeArgument : public IParserBase -{ -public: - explicit ParserDataTypeArgument(std::string_view type_name_) : type_name(type_name_) - { - } - -private: - const char * getName() const override { return "data type argument"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override - { - if (type_name == "Dynamic") - { - DynamicArgumentsParser parser; - return parser.parse(pos, node, expected); - } - else if (type_name == "Nested") - { - ParserNestedTable nested_parser; - return nested_parser.parse(pos, node, expected); - } - else if (type_name == "AggregateFunction" || type_name == "SimpleAggregateFunction") - { - ParserFunction function_parser; - ParserIdentifier identifier_parser; - ParserAllCollectionsOfLiterals literal_parser(false); - return function_parser.parse(pos, node, expected) - || literal_parser.parse(pos, node, expected) - || identifier_parser.parse(pos, node, expected); - } - else - { - ParserDataType data_type_parser; - ParserAllCollectionsOfLiterals literal_parser(false); - - const char * operators[] = {"=", "equals", nullptr}; - ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique()); - - return enum_parser.parse(pos, node, expected) - || literal_parser.parse(pos, node, expected) - || data_type_parser.parse(pos, node, expected); - } - } - - std::string_view type_name; -}; - } bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) @@ -221,11 +167,91 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ++pos; /// Parse optional parameters - ParserList args_parser(std::make_unique(type_name), std::make_unique(TokenType::Comma)); - ASTPtr expr_list_args; + ASTPtr expr_list_args = std::make_shared(); + + /// Allow mixed lists of nested and normal types. + /// Parameters are either: + /// - Nested table elements; + /// - Enum element in form of 'a' = 1; + /// - literal; + /// - Dynamic type arguments; + /// - another data type (or identifier); + + size_t arg_num = 0; + bool have_version_of_aggregate_function = false; + while (true) + { + if (arg_num > 0) + { + if (pos->type == TokenType::Comma) + ++pos; + else + break; + } + + ASTPtr arg; + if (type_name == "Dynamic") + { + DynamicArgumentsParser parser; + parser.parse(pos, arg, expected); + } + else if (type_name == "Nested") + { + ParserNestedTable nested_parser; + nested_parser.parse(pos, arg, expected); + } + else if (type_name == "AggregateFunction" || type_name == "SimpleAggregateFunction") + { + /// This is less trivial. + /// The first optional argument for AggregateFunction is a numeric literal, defining the version. + /// The next argument is the function name, optionally with parameters. + /// Subsequent arguments are data types. + + if (arg_num == 0 && type_name == "AggregateFunction") + { + ParserUnsignedInteger version_parser; + if (version_parser.parse(pos, arg, expected)) + { + have_version_of_aggregate_function = true; + expr_list_args->children.emplace_back(std::move(arg)); + ++arg_num; + continue; + } + } + + if (arg_num == (have_version_of_aggregate_function ? 1 : 0)) + { + ParserFunction function_parser; + ParserIdentifier identifier_parser; + function_parser.parse(pos, arg, expected) + || identifier_parser.parse(pos, arg, expected); + } + else + { + ParserDataType data_type_parser; + data_type_parser.parse(pos, arg, expected); + } + } + else + { + ParserDataType data_type_parser; + ParserAllCollectionsOfLiterals literal_parser(false); + + const char * operators[] = {"=", "equals", nullptr}; + ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique()); + + enum_parser.parse(pos, arg, expected) + || literal_parser.parse(pos, arg, expected) + || data_type_parser.parse(pos, arg, expected); + } + + if (!arg) + break; + + expr_list_args->children.emplace_back(std::move(arg)); + ++arg_num; + } - if (!args_parser.parse(pos, expr_list_args, expected)) - return false; if (pos->type == TokenType::Comma) // ignore trailing comma inside Nested structures like Tuple(Int, Tuple(Int, String),) ++pos; From 06594935f0e2dd9fc61882c7e643677de474fd7c Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 19 Jul 2024 01:34:03 +0000 Subject: [PATCH 0518/2361] forbid optimize deduplicate --- src/Interpreters/InterpreterOptimizeQuery.cpp | 5 +++++ .../03206_projection_merge_special_mergetree.sql | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/src/Interpreters/InterpreterOptimizeQuery.cpp b/src/Interpreters/InterpreterOptimizeQuery.cpp index 907a01b0432..8d1ac3455b7 100644 --- a/src/Interpreters/InterpreterOptimizeQuery.cpp +++ b/src/Interpreters/InterpreterOptimizeQuery.cpp @@ -20,6 +20,7 @@ namespace DB namespace ErrorCodes { extern const int THERE_IS_NO_COLUMN; + extern const int NOT_IMPLEMENTED; } @@ -42,6 +43,10 @@ BlockIO InterpreterOptimizeQuery::execute() auto metadata_snapshot = table->getInMemoryMetadataPtr(); auto storage_snapshot = table->getStorageSnapshot(metadata_snapshot, getContext()); + /// Don't allow OPTIMIZE DEDUPLICATE for all engines with projections. + if (ast.deduplicate && !metadata_snapshot->projections.empty()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DEDUPLICATE with projections are not supported yet"); + // Empty list of names means we deduplicate by all columns, but user can explicitly state which columns to use. Names column_names; if (ast.deduplicate_by_columns) diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql index 6b5e516ad21..06fb9a30aca 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -30,3 +30,12 @@ CREATE TABLE tp ( SETTINGS deduplicate_merge_projection_mode = 'rebuild'; DROP TABLE tp; + + +-- don't allow OPTIMIZE DEDUPLICATE for all engines with projections +CREATE TABLE test ( + a INT PRIMARY KEY, + PROJECTION p (SELECT * ORDER BY a) +) engine = MergeTree; + +OPTIMIZE TABLE test DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } From f19de4effdf1e9d15acab69cecd882d7fd9b156b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 19 Jul 2024 03:38:05 +0200 Subject: [PATCH 0519/2361] Fix bad tests long_select_and_alter, CC @alesapin --- .../0_stateless/01338_long_select_and_alter.reference | 2 +- tests/queries/0_stateless/01338_long_select_and_alter.sh | 6 +++--- .../01338_long_select_and_alter_zookeeper.reference | 2 +- .../0_stateless/01338_long_select_and_alter_zookeeper.sh | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/01338_long_select_and_alter.reference b/tests/queries/0_stateless/01338_long_select_and_alter.reference index c2678e7052e..921730b17ce 100644 --- a/tests/queries/0_stateless/01338_long_select_and_alter.reference +++ b/tests/queries/0_stateless/01338_long_select_and_alter.reference @@ -1,3 +1,3 @@ -10 5 +10 CREATE TABLE default.alter_mt\n(\n `key` UInt64,\n `value` UInt64\n)\nENGINE = MergeTree\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01338_long_select_and_alter.sh b/tests/queries/0_stateless/01338_long_select_and_alter.sh index 2b0709162a3..2659e5c16cf 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter.sh @@ -13,15 +13,15 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number, toString(number) $CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 10000000 --query "SELECT count(distinct concat(value, '_')) FROM alter_mt WHERE not sleepEachRow(2)" & -# to be sure that select took all required locks +# To be sure that select took all required locks for better test sensitivity, although it isn't guaranteed (then the test will also succeed). sleep 2 $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_mt MODIFY COLUMN value UInt64" -$CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM alter_mt" - wait +$CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM alter_mt" + $CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE alter_mt" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference index b4ed8efab63..9c5ad0fa468 100644 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference @@ -1,3 +1,3 @@ -10 5 +10 CREATE TABLE default.alter_mt\n(\n `key` UInt64,\n `value` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01338_long_select_and_alter_zookeeper_default/alter_mt\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh index 41e0a12f369..6eb795408f4 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh @@ -13,15 +13,15 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number, toString(number) $CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 10000000 --query "SELECT count(distinct concat(value, '_')) FROM alter_mt WHERE not sleepEachRow(2)" & -# to be sure that select took all required locks +# To be sure that select took all required locks for better test sensitivity, although it isn't guaranteed (then the test will also succeed). sleep 2 $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_mt MODIFY COLUMN value UInt64" -$CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM alter_mt" - wait +$CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM alter_mt" + $CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE alter_mt" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" From 69ad57a2a52d50510b4ec5beea08c0c599846859 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 19 Jul 2024 03:58:07 +0200 Subject: [PATCH 0520/2361] Update 03205_parallel_window_finctions_and_column_sparse_bug.sql --- .../03205_parallel_window_finctions_and_column_sparse_bug.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql index f2391e0d165..4cc54bb5ac2 100644 --- a/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql +++ b/tests/queries/0_stateless/03205_parallel_window_finctions_and_column_sparse_bug.sql @@ -6,7 +6,7 @@ insert into t values (1, 0); insert into t values (1, 1); insert into t values (1, 0)(1, 1); -SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t qualify c8=1 order by d settings max_threads=2; +SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t qualify c8=1 order by d settings max_threads=2, allow_experimental_analyzer = 1; SELECT '---'; SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t order by d, c8 settings max_threads=2; SELECT '---'; From 48e7708d7bcf575123ea20cee9455e0a4cf26791 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Fri, 19 Jul 2024 10:29:13 +0800 Subject: [PATCH 0521/2361] fix compile error --- src/Interpreters/InterpreterAlterQuery.cpp | 1 + src/Interpreters/InterpreterDeleteQuery.cpp | 1 + src/Interpreters/InterpreterInsertQuery.cpp | 8 ++++---- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index 398fe31f1a9..9b5b5dfc20a 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index b37ec4de4ab..291c8e19db0 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 181fb064b54..aef6c1249d5 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -411,10 +412,6 @@ std::pair, std::vector> InterpreterInsertQuery::buildP if (!running_group) running_group = std::make_shared(getContext()); - if (getContext()->getServerSettings().disable_insertion_and_mutation - && query.table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE) - throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Insert queries are prohibited"); - std::vector sink_chains; std::vector presink_chains; @@ -737,6 +734,9 @@ BlockIO InterpreterInsertQuery::execute() const Settings & settings = getContext()->getSettingsRef(); auto & query = query_ptr->as(); + if (getContext()->getServerSettings().disable_insertion_and_mutation + && query.table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE) + throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Insert queries are prohibited"); StoragePtr table = getTable(query); checkStorageSupportsTransactionsIfNeeded(table, getContext()); From 4cb862432c50848e3406899f5c7079b4cf1d62a8 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 19 Jul 2024 09:34:20 +0000 Subject: [PATCH 0522/2361] Rename method --- src/Processors/IProcessor.h | 2 +- src/Processors/Sources/RemoteSource.cpp | 2 +- src/Processors/Sources/RemoteSource.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 358983a2179..0776921a814 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -221,7 +221,7 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method 'schedule' is not implemented for {} processor", getName()); } - virtual void asyncJobReady() {} + virtual void onAsyncJobReady() {} /** You must call this method if 'prepare' returned ExpandPipeline. * This method cannot access any port, but it can create new ports for current processor. diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 3ec2356a121..587f6e2001b 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -104,7 +104,7 @@ void RemoteSource::work() ISource::work(); } -void RemoteSource::asyncJobReady() +void RemoteSource::onAsyncJobReady() { chassert(async_read); diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index fa04985f101..2247c781584 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -32,7 +32,7 @@ public: int schedule() override { return fd; } - void asyncJobReady() override; + void onAsyncJobReady() override; void setStorageLimits(const std::shared_ptr & storage_limits_) override; From 8349d260952a6daeb84c653c37ac000cf5302cfd Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 19 Jul 2024 11:25:34 +0000 Subject: [PATCH 0523/2361] Simplified implementation --- src/Processors/Sources/RemoteSource.cpp | 17 +++++------ src/Processors/Sources/RemoteSource.h | 2 +- src/QueryPipeline/RemoteQueryExecutor.cpp | 37 +++++++++++++++++++++++ src/QueryPipeline/RemoteQueryExecutor.h | 3 ++ 4 files changed, 49 insertions(+), 10 deletions(-) diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 587f6e2001b..46c27676e12 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -89,9 +89,6 @@ ISource::Status RemoteSource::prepare() void RemoteSource::work() { - if (async_immediate_work.exchange(false)) - return; - /// Connection drain is a heavy operation that may take a long time. /// Therefore we move connection drain from prepare() to work(), and drain multiple connections in parallel. /// See issue: https://github.com/ClickHouse/ClickHouse/issues/60844 @@ -101,6 +98,13 @@ void RemoteSource::work() executor_finished = true; return; } + + if (preprocessed_packet) + { + preprocessed_packet = false; + return; + } + ISource::work(); } @@ -111,12 +115,7 @@ void RemoteSource::onAsyncJobReady() if (!was_query_sent) return; - auto res = query_executor->readAsync(/*check_packet_type_only=*/true); - if (res.type == RemoteQueryExecutor::ReadResult::Type::ParallelReplicasToken) - { - work(); - async_immediate_work = true; - } + preprocessed_packet = query_executor->processParallelReplicaPacketIfAny(); } std::optional RemoteSource::tryGenerate() diff --git a/src/Processors/Sources/RemoteSource.h b/src/Processors/Sources/RemoteSource.h index 2247c781584..22d3921708b 100644 --- a/src/Processors/Sources/RemoteSource.h +++ b/src/Processors/Sources/RemoteSource.h @@ -54,7 +54,7 @@ private: int fd = -1; size_t rows = 0; bool manually_add_rows_before_limit_counter = false; - std::atomic_bool async_immediate_work{false}; + bool preprocessed_packet = false; }; /// Totals source from RemoteQueryExecutor. diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index d7edbc9ed35..b15e31a120f 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -936,4 +936,41 @@ bool RemoteQueryExecutor::needToSkipUnavailableShard() const return context->getSettingsRef().skip_unavailable_shards && (0 == connections->size()); } +bool RemoteQueryExecutor::processParallelReplicaPacketIfAny() +{ +#if defined(OS_LINUX) + if (!read_context || (resent_query && recreate_read_context)) + { + std::lock_guard lock(was_cancelled_mutex); + if (was_cancelled) + return false; + + read_context = std::make_unique(*this); + recreate_read_context = false; + } + + { + std::lock_guard lock(was_cancelled_mutex); + if (was_cancelled) + return false; + + chassert(!has_postponed_packet); + + read_context->resume(); + if (read_context->isInProgress()) // <- nothing to process + return false; + + const auto packet_type = read_context->getPacketType(); + if (packet_type == Protocol::Server::MergeTreeReadTaskRequest || packet_type == Protocol::Server::MergeTreeAllRangesAnnouncement) + { + processPacket(read_context->getPacket()); + return true; + } + + has_postponed_packet = true; + return false; + } +#endif +} + } diff --git a/src/QueryPipeline/RemoteQueryExecutor.h b/src/QueryPipeline/RemoteQueryExecutor.h index 6849c3e0a07..6f56df71f1d 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.h +++ b/src/QueryPipeline/RemoteQueryExecutor.h @@ -222,6 +222,9 @@ public: bool isReplicaUnavailable() const { return extension && extension->parallel_reading_coordinator && connections->size() == 0; } + /// return true if parallel replica packet was processed + bool processParallelReplicaPacketIfAny(); + private: RemoteQueryExecutor( const String & query_, From 4e3fdfc2d6482d42b8e152911e24ee38b1bafc89 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Fri, 19 Jul 2024 13:26:35 +0200 Subject: [PATCH 0524/2361] Save writer thread id for debugging --- src/Common/SharedMutex.cpp | 10 +++++++++- src/Common/SharedMutex.h | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/Common/SharedMutex.cpp b/src/Common/SharedMutex.cpp index 1df09ca998a..7b00ef0b28b 100644 --- a/src/Common/SharedMutex.cpp +++ b/src/Common/SharedMutex.cpp @@ -1,4 +1,5 @@ #include +#include #ifdef OS_LINUX /// Because of futex @@ -12,6 +13,7 @@ namespace DB SharedMutex::SharedMutex() : state(0) , waiters(0) + , writer_thread_id(0) {} void SharedMutex::lock() @@ -32,16 +34,22 @@ void SharedMutex::lock() value |= writers; while (value & readers) futexWaitLowerFetch(state, value); + + writer_thread_id.store(getThreadId()); } bool SharedMutex::try_lock() { UInt64 value = 0; - return state.compare_exchange_strong(value, writers); + bool success = state.compare_exchange_strong(value, writers); + if (success) + writer_thread_id.store(getThreadId()); + return success; } void SharedMutex::unlock() { + writer_thread_id.store(0); state.store(0); if (waiters) futexWakeUpperAll(state); diff --git a/src/Common/SharedMutex.h b/src/Common/SharedMutex.h index 9215ff62af3..a53e2984239 100644 --- a/src/Common/SharedMutex.h +++ b/src/Common/SharedMutex.h @@ -36,6 +36,8 @@ private: alignas(64) std::atomic state; std::atomic waiters; + /// Is set while the lock is held in exclusive mode only to facilitate debugging + std::atomic writer_thread_id; }; } From 55d1656f4d0da2f23b2df719dabeed7999645349 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Fri, 19 Jul 2024 13:27:41 +0200 Subject: [PATCH 0525/2361] Moving is not safe, prohibit it --- src/Common/SharedMutex.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Common/SharedMutex.h b/src/Common/SharedMutex.h index a53e2984239..c77c8765885 100644 --- a/src/Common/SharedMutex.h +++ b/src/Common/SharedMutex.h @@ -19,6 +19,8 @@ public: ~SharedMutex() = default; SharedMutex(const SharedMutex &) = delete; SharedMutex & operator=(const SharedMutex &) = delete; + SharedMutex(SharedMutex &&) = delete; + SharedMutex & operator=(SharedMutex &&) = delete; // Exclusive ownership void lock() TSA_ACQUIRE(); From 53ea5510143ded0862fd51922077a7cdc1344fe2 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 19 Jul 2024 11:30:55 +0000 Subject: [PATCH 0526/2361] Remove unused code --- src/QueryPipeline/RemoteQueryExecutor.cpp | 14 +------------- src/QueryPipeline/RemoteQueryExecutor.h | 2 +- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index b15e31a120f..b78c38a4134 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -469,7 +469,7 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::read() return restartQueryWithoutDuplicatedUUIDs(); } -RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync([[maybe_unused]] bool check_packet_type_only) +RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync() { #if defined(OS_LINUX) if (!read_context || (resent_query && recreate_read_context)) @@ -519,18 +519,6 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::readAsync([[maybe_unused]] if (read_context->isInProgress()) return ReadResult(read_context->getFileDescriptor()); - if (check_packet_type_only) - { - has_postponed_packet = true; - const auto packet_type = read_context->getPacketType(); - if (packet_type == Protocol::Server::MergeTreeReadTaskRequest - || packet_type == Protocol::Server::MergeTreeAllRangesAnnouncement) - { - return ReadResult(ReadResult::Type::ParallelReplicasToken); - } - return ReadResult(ReadResult::Type::Nothing); - } - auto read_result = processPacket(read_context->getPacket()); if (read_result.getType() == ReadResult::Type::Data || read_result.getType() == ReadResult::Type::ParallelReplicasToken) return read_result; diff --git a/src/QueryPipeline/RemoteQueryExecutor.h b/src/QueryPipeline/RemoteQueryExecutor.h index 6f56df71f1d..7289e2a2243 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.h +++ b/src/QueryPipeline/RemoteQueryExecutor.h @@ -183,7 +183,7 @@ public: ReadResult read(); /// Async variant of read. Returns ready block or file descriptor which may be used for polling. - ReadResult readAsync(bool check_packet_type_only = false); + ReadResult readAsync(); /// Receive all remain packets and finish query. /// It should be cancelled after read returned empty block. From 3fb01ed2c9154d79cd9d23e3ae2e8708e86d0a34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 19 Jul 2024 12:10:28 +0000 Subject: [PATCH 0527/2361] Use nonexistent address to check connection error at table creation --- tests/integration/test_storage_rabbitmq/test.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 3240039ee81..c163f3f7aed 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -2220,13 +2220,11 @@ def test_rabbitmq_commit_on_block_write(rabbitmq_cluster): def test_rabbitmq_no_connection_at_startup_1(rabbitmq_cluster): - # no connection when table is initialized - rabbitmq_cluster.pause_container("rabbitmq1") - instance.query_and_get_error( + error = instance.query_and_get_error( """ CREATE TABLE test.cs (key UInt64, value UInt64) ENGINE = RabbitMQ - SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', + SETTINGS rabbitmq_host_port = 'no_connection_at_startup:5672', rabbitmq_exchange_name = 'cs', rabbitmq_format = 'JSONEachRow', rabbitmq_flush_interval_ms=1000, @@ -2234,7 +2232,7 @@ def test_rabbitmq_no_connection_at_startup_1(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; """ ) - rabbitmq_cluster.unpause_container("rabbitmq1") + assert "CANNOT_CONNECT_RABBITMQ" in error def test_rabbitmq_no_connection_at_startup_2(rabbitmq_cluster): From 064bd643dad9153e7c35b6a235585a962d51df2d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 19 Jul 2024 12:23:56 +0000 Subject: [PATCH 0528/2361] Cosmetics --- src/Functions/dateDiff.cpp | 196 +++++++++++++++---------------------- 1 file changed, 79 insertions(+), 117 deletions(-) diff --git a/src/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp index 8e8865db7ed..5c46ad40daa 100644 --- a/src/Functions/dateDiff.cpp +++ b/src/Functions/dateDiff.cpp @@ -45,26 +45,26 @@ public: template void dispatchForColumns( - const IColumn & x, const IColumn & y, + const IColumn & col_x, const IColumn & col_y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, ColumnInt64::Container & result) const { - if (const auto * x_vec_16 = checkAndGetColumn(&x)) - dispatchForSecondColumn(*x_vec_16, y, timezone_x, timezone_y, result); - else if (const auto * x_vec_32 = checkAndGetColumn(&x)) - dispatchForSecondColumn(*x_vec_32, y, timezone_x, timezone_y, result); - else if (const auto * x_vec_32_s = checkAndGetColumn(&x)) - dispatchForSecondColumn(*x_vec_32_s, y, timezone_x, timezone_y, result); - else if (const auto * x_vec_64 = checkAndGetColumn(&x)) - dispatchForSecondColumn(*x_vec_64, y, timezone_x, timezone_y, result); - else if (const auto * x_const_16 = checkAndGetColumnConst(&x)) - dispatchConstForSecondColumn(x_const_16->getValue(), y, timezone_x, timezone_y, result); - else if (const auto * x_const_32 = checkAndGetColumnConst(&x)) - dispatchConstForSecondColumn(x_const_32->getValue(), y, timezone_x, timezone_y, result); - else if (const auto * x_const_32_s = checkAndGetColumnConst(&x)) - dispatchConstForSecondColumn(x_const_32_s->getValue(), y, timezone_x, timezone_y, result); - else if (const auto * x_const_64 = checkAndGetColumnConst(&x)) - dispatchConstForSecondColumn(x_const_64->getValue>(), y, timezone_x, timezone_y, result); + if (const auto * x_vec_16 = checkAndGetColumn(&col_x)) + dispatchForSecondColumn(*x_vec_16, col_y, timezone_x, timezone_y, result); + else if (const auto * x_vec_32 = checkAndGetColumn(&col_x)) + dispatchForSecondColumn(*x_vec_32, col_y, timezone_x, timezone_y, result); + else if (const auto * x_vec_32_s = checkAndGetColumn(&col_x)) + dispatchForSecondColumn(*x_vec_32_s, col_y, timezone_x, timezone_y, result); + else if (const auto * x_vec_64 = checkAndGetColumn(&col_x)) + dispatchForSecondColumn(*x_vec_64, col_y, timezone_x, timezone_y, result); + else if (const auto * x_const_16 = checkAndGetColumnConst(&col_x)) + dispatchConstForSecondColumn(x_const_16->getValue(), col_y, timezone_x, timezone_y, result); + else if (const auto * x_const_32 = checkAndGetColumnConst(&col_x)) + dispatchConstForSecondColumn(x_const_32->getValue(), col_y, timezone_x, timezone_y, result); + else if (const auto * x_const_32_s = checkAndGetColumnConst(&col_x)) + dispatchConstForSecondColumn(x_const_32_s->getValue(), col_y, timezone_x, timezone_y, result); + else if (const auto * x_const_64 = checkAndGetColumnConst(&col_x)) + dispatchConstForSecondColumn(x_const_64->getValue>(), col_y, timezone_x, timezone_y, result); else throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for first argument of function {}, must be Date, Date32, DateTime or DateTime64", @@ -73,25 +73,25 @@ public: template void dispatchForSecondColumn( - const LeftColumnType & x, const IColumn & y, + const LeftColumnType & x, const IColumn & col_y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, ColumnInt64::Container & result) const { - if (const auto * y_vec_16 = checkAndGetColumn(&y)) + if (const auto * y_vec_16 = checkAndGetColumn(&col_y)) vectorVector(x, *y_vec_16, timezone_x, timezone_y, result); - else if (const auto * y_vec_32 = checkAndGetColumn(&y)) + else if (const auto * y_vec_32 = checkAndGetColumn(&col_y)) vectorVector(x, *y_vec_32, timezone_x, timezone_y, result); - else if (const auto * y_vec_32_s = checkAndGetColumn(&y)) + else if (const auto * y_vec_32_s = checkAndGetColumn(&col_y)) vectorVector(x, *y_vec_32_s, timezone_x, timezone_y, result); - else if (const auto * y_vec_64 = checkAndGetColumn(&y)) + else if (const auto * y_vec_64 = checkAndGetColumn(&col_y)) vectorVector(x, *y_vec_64, timezone_x, timezone_y, result); - else if (const auto * y_const_16 = checkAndGetColumnConst(&y)) + else if (const auto * y_const_16 = checkAndGetColumnConst(&col_y)) vectorConstant(x, y_const_16->getValue(), timezone_x, timezone_y, result); - else if (const auto * y_const_32 = checkAndGetColumnConst(&y)) + else if (const auto * y_const_32 = checkAndGetColumnConst(&col_y)) vectorConstant(x, y_const_32->getValue(), timezone_x, timezone_y, result); - else if (const auto * y_const_32_s = checkAndGetColumnConst(&y)) + else if (const auto * y_const_32_s = checkAndGetColumnConst(&col_y)) vectorConstant(x, y_const_32_s->getValue(), timezone_x, timezone_y, result); - else if (const auto * y_const_64 = checkAndGetColumnConst(&y)) + else if (const auto * y_const_64 = checkAndGetColumnConst(&col_y)) vectorConstant(x, y_const_64->getValue>(), timezone_x, timezone_y, result); else throw Exception(ErrorCodes::ILLEGAL_COLUMN, @@ -101,17 +101,17 @@ public: template void dispatchConstForSecondColumn( - T1 x, const IColumn & y, + T1 x, const IColumn & col_y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, ColumnInt64::Container & result) const { - if (const auto * y_vec_16 = checkAndGetColumn(&y)) + if (const auto * y_vec_16 = checkAndGetColumn(&col_y)) constantVector(x, *y_vec_16, timezone_x, timezone_y, result); - else if (const auto * y_vec_32 = checkAndGetColumn(&y)) + else if (const auto * y_vec_32 = checkAndGetColumn(&col_y)) constantVector(x, *y_vec_32, timezone_x, timezone_y, result); - else if (const auto * y_vec_32_s = checkAndGetColumn(&y)) + else if (const auto * y_vec_32_s = checkAndGetColumn(&col_y)) constantVector(x, *y_vec_32_s, timezone_x, timezone_y, result); - else if (const auto * y_vec_64 = checkAndGetColumn(&y)) + else if (const auto * y_vec_64 = checkAndGetColumn(&col_y)) constantVector(x, *y_vec_64, timezone_x, timezone_y, result); else throw Exception(ErrorCodes::ILLEGAL_COLUMN, @@ -168,8 +168,7 @@ public: Int64 calculate(const TransformX & transform_x, const TransformY & transform_y, T1 x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y) const { if constexpr (is_diff) - return static_cast(transform_y.execute(y, timezone_y)) - - static_cast(transform_x.execute(x, timezone_x)); + return static_cast(transform_y.execute(y, timezone_y)) - static_cast(transform_x.execute(x, timezone_x)); else { auto res = static_cast(transform_y.execute(y, timezone_y)) @@ -332,95 +331,73 @@ public: static constexpr auto name = is_relative ? "dateDiff" : "age"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override - { - return name; - } + String getName() const override { return name; } bool isVariadic() const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } size_t getNumberOfArguments() const override { return 0; } + bool useDefaultImplementationForConstants() const override { return true; } + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0, 3}; } - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { - if (arguments.size() != 3 && arguments.size() != 4) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Number of arguments for function {} doesn't match: passed {}, should be 3 or 4", - getName(), arguments.size()); + FunctionArgumentDescriptors mandatory_args{ + {"unit", static_cast(&isString), nullptr, "String"}, + {"startdate", static_cast(&isDateOrDate32OrDateTimeOrDateTime64), nullptr, "Date[32] or DateTime[64]"}, + {"enddate", static_cast(&isDateOrDate32OrDateTimeOrDateTime64), nullptr, "Date[32] or DateTime[64]"}, + }; - if (!isString(arguments[0])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "First argument for function {} (unit) must be String", - getName()); + FunctionArgumentDescriptors optional_args{ + {"timezone", static_cast(&isString), nullptr, "String"}, + }; - if (!isDate(arguments[1]) && !isDate32(arguments[1]) && !isDateTime(arguments[1]) && !isDateTime64(arguments[1])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Second argument for function {} must be Date, Date32, DateTime or DateTime64", - getName()); - - if (!isDate(arguments[2]) && !isDate32(arguments[2]) && !isDateTime(arguments[2]) && !isDateTime64(arguments[2])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Third argument for function {} must be Date, Date32, DateTime or DateTime64", - getName() - ); - - if (arguments.size() == 4 && !isString(arguments[3])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Fourth argument for function {} (timezone) must be String", - getName()); + validateFunctionArguments(*this, arguments, mandatory_args, optional_args); return std::make_shared(); } - bool useDefaultImplementationForConstants() const override { return true; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0, 3}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { - const auto * unit_column = checkAndGetColumnConst(arguments[0].column.get()); - if (!unit_column) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, - "First argument for function {} must be constant String", - getName()); + const auto * col_unit = checkAndGetColumnConst(arguments[0].column.get()); + if (!col_unit) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "First argument for function {} must be constant String", getName()); - String unit = Poco::toLower(unit_column->getValue()); + String unit = Poco::toLower(col_unit->getValue()); - const IColumn & x = *arguments[1].column; - const IColumn & y = *arguments[2].column; + const IColumn & col_x = *arguments[1].column; + const IColumn & col_y = *arguments[2].column; - size_t rows = input_rows_count; - auto res = ColumnInt64::create(rows); + auto col_res = ColumnInt64::create(input_rows_count); const auto & timezone_x = extractTimeZoneFromFunctionArguments(arguments, 3, 1); const auto & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2); if (unit == "year" || unit == "years" || unit == "yy" || unit == "yyyy") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "quarter" || unit == "quarters" || unit == "qq" || unit == "q") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "month" || unit == "months" || unit == "mm" || unit == "m") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "week" || unit == "weeks" || unit == "wk" || unit == "ww") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "day" || unit == "days" || unit == "dd" || unit == "d") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "hour" || unit == "hours" || unit == "hh" || unit == "h") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "minute" || unit == "minutes" || unit == "mi" || unit == "n") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "second" || unit == "seconds" || unit == "ss" || unit == "s") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "millisecond" || unit == "milliseconds" || unit == "ms") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "microsecond" || unit == "microseconds" || unit == "us" || unit == "u") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else if (unit == "nanosecond" || unit == "nanoseconds" || unit == "ns") - impl.template dispatchForColumns>(x, y, timezone_x, timezone_y, res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); else - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Function {} does not support '{}' unit", getName(), unit); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} does not support '{}' unit", getName(), unit); - return res; + return col_res; } private: DateDiffImpl impl{name}; @@ -437,50 +414,35 @@ public: static constexpr auto name = "timeDiff"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override - { - return name; - } - + String getName() const override { return name; } + bool useDefaultImplementationForConstants() const override { return true; } + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {}; } bool isVariadic() const override { return false; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } size_t getNumberOfArguments() const override { return 2; } - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { - if (arguments.size() != 2) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Number of arguments for function {} doesn't match: passed {}, should be 2", - getName(), arguments.size()); + FunctionArgumentDescriptors args{ + {"first_datetime", static_cast(&isDateOrDate32OrDateTimeOrDateTime64), nullptr, "Date[32] or DateTime[64]"}, + {"second_datetime", static_cast(&isDateOrDate32OrDateTimeOrDateTime64), nullptr, "Date[32] or DateTime[64]"}, + }; - if (!isDate(arguments[0]) && !isDate32(arguments[0]) && !isDateTime(arguments[0]) && !isDateTime64(arguments[0])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "First argument for function {} must be Date, Date32, DateTime or DateTime64", - getName()); - - if (!isDate(arguments[1]) && !isDate32(arguments[1]) && !isDateTime(arguments[1]) && !isDateTime64(arguments[1])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Second argument for function {} must be Date, Date32, DateTime or DateTime64", - getName() - ); + validateFunctionArguments(*this, arguments, args); return std::make_shared(); } - bool useDefaultImplementationForConstants() const override { return true; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { - const IColumn & x = *arguments[0].column; - const IColumn & y = *arguments[1].column; + const IColumn & col_x = *arguments[0].column; + const IColumn & col_y = *arguments[1].column; - size_t rows = input_rows_count; - auto res = ColumnInt64::create(rows); + auto col_res = ColumnInt64::create(input_rows_count); - impl.dispatchForColumns>(x, y, DateLUT::instance(), DateLUT::instance(), res->getData()); + impl.dispatchForColumns>(col_x, col_y, DateLUT::instance(), DateLUT::instance(), col_res->getData()); - return res; + return col_res; } private: DateDiffImpl impl{name}; From 3f66b39a18a7bf271a9a9f97dfc075866e2409eb Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 19 Jul 2024 14:30:27 +0200 Subject: [PATCH 0529/2361] test replication lag metric --- .../0_stateless/03206_replication_lag_metric.reference | 4 ++++ .../queries/0_stateless/03206_replication_lag_metric.sql | 9 +++++++++ 2 files changed, 13 insertions(+) create mode 100644 tests/queries/0_stateless/03206_replication_lag_metric.reference create mode 100644 tests/queries/0_stateless/03206_replication_lag_metric.sql diff --git a/tests/queries/0_stateless/03206_replication_lag_metric.reference b/tests/queries/0_stateless/03206_replication_lag_metric.reference new file mode 100644 index 00000000000..02f4a7264b1 --- /dev/null +++ b/tests/queries/0_stateless/03206_replication_lag_metric.reference @@ -0,0 +1,4 @@ +0 +2 +0 +2 diff --git a/tests/queries/0_stateless/03206_replication_lag_metric.sql b/tests/queries/0_stateless/03206_replication_lag_metric.sql new file mode 100644 index 00000000000..6b86553fcaf --- /dev/null +++ b/tests/queries/0_stateless/03206_replication_lag_metric.sql @@ -0,0 +1,9 @@ +CREATE DATABASE rdb1 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica1'); +CREATE DATABASE rdb2 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica2'); + +SET distributed_ddl_task_timeout = 0; +CREATE TABLE rdb1.t (id UInt32) ENGINE = ReplicatedMergeTree ORDER BY id; +SELECT replication_lag FROM system.clusters; + +DROP DATABASE rdb1; +DROP DATABASE rdb2; From 245626e5789064fda39ccc7288b83162284a3617 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 19 Jul 2024 14:30:44 +0200 Subject: [PATCH 0530/2361] small fix --- src/Storages/System/StorageSystemClusters.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemClusters.h b/src/Storages/System/StorageSystemClusters.h index ead123aa79e..f6e08734896 100644 --- a/src/Storages/System/StorageSystemClusters.h +++ b/src/Storages/System/StorageSystemClusters.h @@ -1,10 +1,10 @@ #pragma once +#include #include #include #include - namespace DB { From 212b81da533d18a6b9f02c66b34a2161ff1e5d71 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Fri, 19 Jul 2024 14:33:20 +0200 Subject: [PATCH 0531/2361] rm debug prints, fix headers --- src/Processors/Executors/CompletedPipelineExecutor.cpp | 1 - src/Storages/MergeTree/MergeTreeDataWriter.cpp | 4 ---- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 1 - 3 files changed, 6 deletions(-) diff --git a/src/Processors/Executors/CompletedPipelineExecutor.cpp b/src/Processors/Executors/CompletedPipelineExecutor.cpp index 1eeee896ede..888835c9beb 100644 --- a/src/Processors/Executors/CompletedPipelineExecutor.cpp +++ b/src/Processors/Executors/CompletedPipelineExecutor.cpp @@ -3,7 +3,6 @@ #include #include #include -#include "Common/Logger.h" #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 97335b601f9..73244b714bf 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -14,8 +14,6 @@ #include #include #include -#include "Common/Logger.h" -#include "Common/logger_useful.h" #include #include #include @@ -186,8 +184,6 @@ void updateTTL( void MergeTreeDataWriter::TemporaryPart::cancel() { - LOG_INFO(getLogger("MergeTreeDataWriter"), "TemporaryPart cancel"); - try { /// An exception context is needed to proper delete write buffers without finalization diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index f6d6b8cb7a3..17662f92035 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include From 57c1d7a1011f96cea21ca66a3064b7481f8ce40b Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 19 Jul 2024 12:36:57 +0000 Subject: [PATCH 0532/2361] fix filling of empty Nested --- src/DataTypes/IDataType.cpp | 4 +- src/DataTypes/ObjectUtils.cpp | 31 ++++++++++ src/DataTypes/ObjectUtils.h | 3 + src/DataTypes/Serializations/ISerialization.h | 3 +- src/Interpreters/inplaceBlockConversions.cpp | 57 ++++++++++++++++--- src/Storages/MergeTree/IMergeTreeReader.cpp | 7 ++- src/Storages/MergeTree/IMergeTreeReader.h | 3 + 7 files changed, 95 insertions(+), 13 deletions(-) diff --git a/src/DataTypes/IDataType.cpp b/src/DataTypes/IDataType.cpp index 1cb64b65d3a..824bc6e33b0 100644 --- a/src/DataTypes/IDataType.cpp +++ b/src/DataTypes/IDataType.cpp @@ -90,7 +90,9 @@ void IDataType::forEachSubcolumn( { auto name = ISerialization::getSubcolumnNameForStream(subpath, prefix_len); auto subdata = ISerialization::createFromPath(subpath, prefix_len); - callback(subpath, name, subdata); + auto path_copy = subpath; + path_copy.resize(prefix_len); + callback(path_copy, name, subdata); } subpath[i].visited = true; } diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 1d525e5987f..356e609e77a 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -66,6 +67,36 @@ DataTypePtr getBaseTypeOfArray(const DataTypePtr & type) return last_array ? last_array->getNestedType() : type; } +DataTypePtr getBaseTypeOfArray(DataTypePtr type, const Names & tuple_elements) +{ + auto it = tuple_elements.begin(); + while (true) + { + if (const auto * type_array = typeid_cast(type.get())) + { + type = type_array->getNestedType(); + } + else if (const auto * type_tuple = typeid_cast(type.get())) + { + if (it == tuple_elements.end()) + break; + + auto pos = type_tuple->tryGetPositionByName(*it); + if (!pos) + break; + + ++it; + type = type_tuple->getElement(*pos); + } + else + { + break; + } + } + + return type; +} + ColumnPtr getBaseColumnOfArray(const ColumnPtr & column) { /// Get raw pointers to avoid extra copying of column pointers. diff --git a/src/DataTypes/ObjectUtils.h b/src/DataTypes/ObjectUtils.h index 6599d8adef1..21e5c3b2f59 100644 --- a/src/DataTypes/ObjectUtils.h +++ b/src/DataTypes/ObjectUtils.h @@ -27,6 +27,9 @@ size_t getNumberOfDimensions(const IColumn & column); /// Returns type of scalars of Array of arbitrary dimensions. DataTypePtr getBaseTypeOfArray(const DataTypePtr & type); +/// The same as above but takes into account Tuples of Nested. +DataTypePtr getBaseTypeOfArray(DataTypePtr type, const Names & tuple_elements); + /// Returns Array type with requested scalar type and number of dimensions. DataTypePtr createArrayOfType(DataTypePtr type, size_t num_dimensions); diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 255dbbfadd2..5d0bf60c59f 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -195,7 +195,7 @@ public: /// Types of substreams that can have arbitrary name. static const std::set named_types; - Type type; + Type type = Type::Regular; /// The name of a variant element type. String variant_element_name; @@ -212,6 +212,7 @@ public: /// Flag, that may help to traverse substream paths. mutable bool visited = false; + Substream() = default; Substream(Type type_) : type(type_) {} /// NOLINT String toString() const; }; diff --git a/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp index f7d8a2a2daf..ce3f25d16f8 100644 --- a/src/Interpreters/inplaceBlockConversions.cpp +++ b/src/Interpreters/inplaceBlockConversions.cpp @@ -283,6 +283,9 @@ static ColumnPtr createColumnWithDefaultValue(const IDataType & data_type, const { auto column = data_type.createColumnConstWithDefaultValue(num_rows); + /// We must turn a constant column into a full column because the interpreter could infer + /// that it is constant everywhere but in some blocks (from other parts) it can be a full column. + if (subcolumn_name.empty()) return column->convertToFullColumnIfConst(); @@ -293,6 +296,35 @@ static ColumnPtr createColumnWithDefaultValue(const IDataType & data_type, const return ColumnConst::create(std::move(column), num_rows)->convertToFullColumnIfConst(); } +static bool hasDefault(const StorageMetadataPtr & metadata_snapshot, const NameAndTypePair & column) +{ + if (!metadata_snapshot) + return false; + + const auto & columns = metadata_snapshot->getColumns(); + if (columns.has(column.name)) + return columns.hasDefault(column.name); + + auto name_in_storage = column.getNameInStorage(); + return columns.hasDefault(name_in_storage); +} + +static String removeTupleElementsFromSubcolumn(String subcolumn_name, const Names & tuple_elements) +{ + subcolumn_name += "."; + for (const auto & elem : tuple_elements) + { + auto pos = subcolumn_name.find(elem + "."); + if (pos != std::string::npos) + subcolumn_name.erase(pos, elem.size()); + } + + if (subcolumn_name.ends_with(".")) + subcolumn_name.pop_back(); + + return subcolumn_name; +} + void fillMissingColumns( Columns & res_columns, size_t num_rows, @@ -321,10 +353,8 @@ void fillMissingColumns( if (res_columns[i] && partially_read_columns.contains(requested_column->name)) res_columns[i] = nullptr; - if (res_columns[i]) - continue; - - if (metadata_snapshot && metadata_snapshot->getColumns().hasDefault(requested_column->getNameInStorage())) + /// Nothing to fill or default should be filled in evaluateMissingDefaults + if (res_columns[i] || hasDefault(metadata_snapshot, *requested_column)) continue; std::vector current_offsets; @@ -365,19 +395,30 @@ void fillMissingColumns( if (!current_offsets.empty()) { + + Names tuple_elements; + auto serialization = IDataType::getSerialization(*requested_column); + + IDataType::forEachSubcolumn([&](const auto & path, const auto &, const auto &) + { + if (path.back().type == ISerialization::Substream::TupleElement) + tuple_elements.push_back(path.back().name_of_substream); + }, ISerialization::SubstreamData(serialization)); + size_t num_empty_dimensions = num_dimensions - current_offsets.size(); - auto scalar_type = createArrayOfType(getBaseTypeOfArray(requested_column->getTypeInStorage()), num_empty_dimensions); + auto base_type = getBaseTypeOfArray(requested_column->getTypeInStorage(), tuple_elements); + auto scalar_type = createArrayOfType(base_type, num_empty_dimensions); size_t data_size = assert_cast(*current_offsets.back()).getData().back(); - res_columns[i] = createColumnWithDefaultValue(*scalar_type, requested_column->getSubcolumnName(), data_size); + auto subcolumn_name = removeTupleElementsFromSubcolumn(requested_column->getSubcolumnName(), tuple_elements); + + res_columns[i] = createColumnWithDefaultValue(*scalar_type, subcolumn_name, data_size); for (auto it = current_offsets.rbegin(); it != current_offsets.rend(); ++it) res_columns[i] = ColumnArray::create(res_columns[i], *it); } else { - /// We must turn a constant column into a full column because the interpreter could infer - /// that it is constant everywhere but in some blocks (from other parts) it can be a full column. res_columns[i] = createColumnWithDefaultValue(*requested_column->getTypeInStorage(), requested_column->getSubcolumnName(), num_rows); } } diff --git a/src/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp index aff1001163e..5f36e4c7c13 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -44,6 +44,7 @@ IMergeTreeReader::IMergeTreeReader( , alter_conversions(data_part_info_for_read->getAlterConversions()) /// For wide parts convert plain arrays of Nested to subcolumns /// to allow to use shared offset column from cache. + , original_requested_columns(columns_) , requested_columns(data_part_info_for_read->isWidePart() ? Nested::convertToSubcolumns(columns_) : columns_) @@ -139,7 +140,7 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns { try { - size_t num_columns = requested_columns.size(); + size_t num_columns = original_requested_columns.size(); if (res_columns.size() != num_columns) throw Exception(ErrorCodes::LOGICAL_ERROR, "invalid number of columns passed to MergeTreeReader::fillMissingColumns. " @@ -151,7 +152,7 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns /// Convert columns list to block. And convert subcolumns to full columns. /// TODO: rewrite with columns interface. It will be possible after changes in ExpressionActions. - auto it = requested_columns.begin(); + auto it = original_requested_columns.begin(); for (size_t pos = 0; pos < num_columns; ++pos, ++it) { auto name_in_storage = it->getNameInStorage(); @@ -178,7 +179,7 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns } /// Move columns from block. - it = requested_columns.begin(); + it = original_requested_columns.begin(); for (size_t pos = 0; pos < num_columns; ++pos, ++it) { auto name_in_storage = it->getNameInStorage(); diff --git a/src/Storages/MergeTree/IMergeTreeReader.h b/src/Storages/MergeTree/IMergeTreeReader.h index a1ec0339fd6..d799ce57b40 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.h +++ b/src/Storages/MergeTree/IMergeTreeReader.h @@ -112,6 +112,9 @@ protected: private: /// Columns that are requested to read. + NamesAndTypesList original_requested_columns; + + /// The same as above but with converted Arrays to subcolumns of Nested. NamesAndTypesList requested_columns; /// Actual columns description in part. From a0171256fd5cb6932ceb9b2cc98c9b553dfa82fd Mon Sep 17 00:00:00 2001 From: Alexander Sapin Date: Fri, 19 Jul 2024 14:41:17 +0200 Subject: [PATCH 0533/2361] Small improvement for background pool in Keeper --- src/Coordination/KeeperServer.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index dc9658e895f..68515debe3b 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -368,7 +368,10 @@ void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & co LockMemoryExceptionInThread::removeUniqueLock(); }; - asio_opts.thread_pool_size_ = getNumberOfPhysicalCPUCores(); + /// At least 16 threads for network communication in asio. + /// asio is async framework, so even with 1 thread it should be ok, but + /// still as safeguard it's better to have some redundant capacity here + asio_opts.thread_pool_size_ = std::max(16U, getNumberOfPhysicalCPUCores()); if (state_manager->isSecure()) { From 189b3d306fc0e488010564384b193412acd0358b Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 19 Jul 2024 13:17:17 +0000 Subject: [PATCH 0534/2361] fix tests --- src/Storages/MergeTree/MergeTreeReaderCompact.cpp | 2 +- .../0_stateless/02026_describe_include_subcolumns.reference | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index fde9dafffb8..ff0311dc1ca 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -101,7 +101,7 @@ NameAndTypePair MergeTreeReaderCompact::getColumnConvertedToSubcolumnOfNested(co if (!storage_columns_with_collected_nested) { - auto options = GetColumnsOptions(GetColumnsOptions::AllPhysical).withExtendedObjects(); + auto options = GetColumnsOptions(GetColumnsOptions::All).withExtendedObjects(); auto storage_columns_list = Nested::collect(storage_snapshot->getColumns(options)); storage_columns_with_collected_nested = ColumnsDescription(std::move(storage_columns_list)); } diff --git a/tests/queries/0_stateless/02026_describe_include_subcolumns.reference b/tests/queries/0_stateless/02026_describe_include_subcolumns.reference index dec65f62748..62efafceaad 100644 --- a/tests/queries/0_stateless/02026_describe_include_subcolumns.reference +++ b/tests/queries/0_stateless/02026_describe_include_subcolumns.reference @@ -26,7 +26,7 @@ 10. │ t.s │ String │ │ │ │ ZSTD(1) │ │ 1 │ 11. │ t.a │ Array(Tuple( a UInt32, - b UInt32)) │ │ │ │ │ │ 1 │ + b UInt32)) │ │ │ │ ZSTD(1) │ │ 1 │ 12. │ t.a.size0 │ UInt64 │ │ │ │ │ │ 1 │ 13. │ t.a.a │ Array(UInt32) │ │ │ │ ZSTD(1) │ │ 1 │ 14. │ t.a.b │ Array(UInt32) │ │ │ │ ZSTD(1) │ │ 1 │ From 79ef630d85cb445a743ee2d5950197709d75325f Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 19 Jul 2024 15:25:08 +0200 Subject: [PATCH 0535/2361] fix tests --- .../0_stateless/02117_show_create_table_system.reference | 2 ++ tests/queries/0_stateless/03206_replication_lag_metric.sql | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 8f62eda9233..28356632a66 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -52,6 +52,8 @@ CREATE TABLE system.clusters `database_shard_name` String, `database_replica_name` String, `is_active` Nullable(UInt8), + `replication_lag` Nullable(UInt32), + `recovery_time` Nullable(UInt64), `name` String ALIAS cluster ) ENGINE = SystemClusters diff --git a/tests/queries/0_stateless/03206_replication_lag_metric.sql b/tests/queries/0_stateless/03206_replication_lag_metric.sql index 6b86553fcaf..998c332a11c 100644 --- a/tests/queries/0_stateless/03206_replication_lag_metric.sql +++ b/tests/queries/0_stateless/03206_replication_lag_metric.sql @@ -1,9 +1,11 @@ +-- Tags: no-parallel + CREATE DATABASE rdb1 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica1'); CREATE DATABASE rdb2 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica2'); SET distributed_ddl_task_timeout = 0; CREATE TABLE rdb1.t (id UInt32) ENGINE = ReplicatedMergeTree ORDER BY id; -SELECT replication_lag FROM system.clusters; +SELECT replication_lag FROM system.clusters WHERE cluster IN ('rdb1', 'rdb2') ORDER BY cluster ASC, replica_num ASC; DROP DATABASE rdb1; DROP DATABASE rdb2; From eb51dc8980a4fa29e7836132158023fbba39db7f Mon Sep 17 00:00:00 2001 From: Alex Katsman Date: Mon, 15 Jul 2024 12:08:18 +0000 Subject: [PATCH 0536/2361] Don't start GWP allocations until init is finished --- programs/keeper/Keeper.cpp | 8 ++++++++ programs/server/Server.cpp | 1 + src/Client/ClientBase.cpp | 9 +++++++++ src/Common/Allocator.cpp | 4 ++-- src/Common/GWPAsan.cpp | 7 +++++++ src/Common/GWPAsan.h | 18 ++++++++++++++++++ src/Common/memory.h | 6 +++--- 7 files changed, 48 insertions(+), 5 deletions(-) diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 44c2daa33ad..8cf1a4d1999 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -52,6 +52,10 @@ # include #endif +#if USE_GWP_ASAN +# include +#endif + #include #include @@ -639,6 +643,10 @@ try tryLogCurrentException(log, "Disabling cgroup memory observer because of an error during initialization"); } +#if USE_GWP_ASAN + GWPAsan::initFinished(); +#endif + LOG_INFO(log, "Ready for connections."); diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 0b695c3dde6..16888015f8b 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -2213,6 +2213,7 @@ try CannotAllocateThreadFaultInjector::setFaultProbability(server_settings.cannot_allocate_thread_fault_injection_probability); #if USE_GWP_ASAN + GWPAsan::initFinished(); GWPAsan::setForceSampleProbability(server_settings.gwp_asan_force_sample_probability); #endif diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 13dce05cabc..7af199131b6 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -81,6 +81,10 @@ #include #include "config.h" +#if USE_GWP_ASAN +# include +#endif + namespace fs = std::filesystem; using namespace std::literals; @@ -3264,6 +3268,11 @@ void ClientBase::init(int argc, char ** argv) fatal_log = createLogger("ClientBase", fatal_channel_ptr.get(), Poco::Message::PRIO_FATAL); signal_listener = std::make_unique(nullptr, fatal_log); signal_listener_thread.start(*signal_listener); + +#if USE_GWP_ASAN + GWPAsan::initFinished(); +#endif + } } diff --git a/src/Common/Allocator.cpp b/src/Common/Allocator.cpp index bcc5d187047..7f2241ab4c0 100644 --- a/src/Common/Allocator.cpp +++ b/src/Common/Allocator.cpp @@ -68,7 +68,7 @@ void * allocNoTrack(size_t size, size_t alignment) { void * buf; #if USE_GWP_ASAN - if (unlikely(GWPAsan::GuardedAlloc.shouldSample())) + if (unlikely(GWPAsan::shouldSample())) { if (void * ptr = GWPAsan::GuardedAlloc.allocate(size, alignment)) { @@ -185,7 +185,7 @@ void * Allocator::realloc(void * buf, size_t old_size, } #if USE_GWP_ASAN - if (unlikely(GWPAsan::GuardedAlloc.shouldSample())) + if (unlikely(GWPAsan::shouldSample())) { auto trace_alloc = CurrentMemoryTracker::alloc(new_size); if (void * ptr = GWPAsan::GuardedAlloc.allocate(new_size, alignment)) diff --git a/src/Common/GWPAsan.cpp b/src/Common/GWPAsan.cpp index 48fbd07ec34..de6991191ea 100644 --- a/src/Common/GWPAsan.cpp +++ b/src/Common/GWPAsan.cpp @@ -217,6 +217,13 @@ void printReport([[maybe_unused]] uintptr_t fault_address) reinterpret_cast(trace.data()), 0, trace_length, [&](const auto line) { LOG_FATAL(logger, fmt::runtime(line)); }); } +std::atomic init_finished = false; + +void initFinished() +{ + init_finished.store(true, std::memory_order_relaxed); +} + std::atomic force_sample_probability = 0.0; void setForceSampleProbability(double value) diff --git a/src/Common/GWPAsan.h b/src/Common/GWPAsan.h index b3215c6157e..846c3417db4 100644 --- a/src/Common/GWPAsan.h +++ b/src/Common/GWPAsan.h @@ -19,12 +19,30 @@ bool isGWPAsanError(uintptr_t fault_address); void printReport(uintptr_t fault_address); +extern std::atomic init_finished; + +void initFinished(); + extern std::atomic force_sample_probability; void setForceSampleProbability(double value); +/** + * We'd like to postpone sampling allocations under the startup is finished. There are mainly + * two reasons for that: + * + * - To avoid complex issues with initialization order + * - Don't waste MaxSimultaneousAllocations on global objects as it's not useful +*/ +inline bool shouldSample() +{ + return init_finished.load(std::memory_order_relaxed) && GuardedAlloc.shouldSample(); +} + inline bool shouldForceSample() { + if (!init_finished.load(std::memory_order_relaxed)) + return false; std::bernoulli_distribution dist(force_sample_probability.load(std::memory_order_relaxed)); return dist(thread_local_rng); } diff --git a/src/Common/memory.h b/src/Common/memory.h index dbef069b408..d673f954fb2 100644 --- a/src/Common/memory.h +++ b/src/Common/memory.h @@ -37,7 +37,7 @@ requires DB::OptionalArgument inline ALWAYS_INLINE void * newImpl(std::size_t size, TAlign... align) { #if USE_GWP_ASAN - if (unlikely(GWPAsan::GuardedAlloc.shouldSample())) + if (unlikely(GWPAsan::shouldSample())) { if constexpr (sizeof...(TAlign) == 1) { @@ -83,7 +83,7 @@ inline ALWAYS_INLINE void * newImpl(std::size_t size, TAlign... align) inline ALWAYS_INLINE void * newNoExcept(std::size_t size) noexcept { #if USE_GWP_ASAN - if (unlikely(GWPAsan::GuardedAlloc.shouldSample())) + if (unlikely(GWPAsan::shouldSample())) { if (void * ptr = GWPAsan::GuardedAlloc.allocate(size)) { @@ -102,7 +102,7 @@ inline ALWAYS_INLINE void * newNoExcept(std::size_t size) noexcept inline ALWAYS_INLINE void * newNoExcept(std::size_t size, std::align_val_t align) noexcept { #if USE_GWP_ASAN - if (unlikely(GWPAsan::GuardedAlloc.shouldSample())) + if (unlikely(GWPAsan::shouldSample())) { if (void * ptr = GWPAsan::GuardedAlloc.allocate(size, alignToSizeT(align))) { From 0ef2c7d486669d8c604e332594a02dfc1c38278a Mon Sep 17 00:00:00 2001 From: Alex Katsman Date: Thu, 18 Jul 2024 11:53:55 +0000 Subject: [PATCH 0537/2361] Disable parallel run for 02440_mutations_finalization test --- tests/queries/0_stateless/02440_mutations_finalization.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02440_mutations_finalization.sql b/tests/queries/0_stateless/02440_mutations_finalization.sql index c522d8ab9df..92ed6a327ad 100644 --- a/tests/queries/0_stateless/02440_mutations_finalization.sql +++ b/tests/queries/0_stateless/02440_mutations_finalization.sql @@ -1,3 +1,4 @@ +-- Tags: no-parallel create table mut (n int) engine=ReplicatedMergeTree('/test/02440/{database}/mut', '1') order by tuple(); set insert_keeper_fault_injection_probability=0; From a54a0614a464d686aff48aab583daa527f74e932 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 19 Jul 2024 12:27:18 +0000 Subject: [PATCH 0538/2361] Iterate over input_rows_count instead of a size of a random input column --- src/Functions/dateDiff.cpp | 99 ++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 48 deletions(-) diff --git a/src/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp index 5c46ad40daa..a39cbae4e30 100644 --- a/src/Functions/dateDiff.cpp +++ b/src/Functions/dateDiff.cpp @@ -47,82 +47,80 @@ public: void dispatchForColumns( const IColumn & col_x, const IColumn & col_y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, + size_t input_rows_count, ColumnInt64::Container & result) const { if (const auto * x_vec_16 = checkAndGetColumn(&col_x)) - dispatchForSecondColumn(*x_vec_16, col_y, timezone_x, timezone_y, result); + dispatchForSecondColumn(*x_vec_16, col_y, timezone_x, timezone_y, input_rows_count, result); else if (const auto * x_vec_32 = checkAndGetColumn(&col_x)) - dispatchForSecondColumn(*x_vec_32, col_y, timezone_x, timezone_y, result); + dispatchForSecondColumn(*x_vec_32, col_y, timezone_x, timezone_y, input_rows_count, result); else if (const auto * x_vec_32_s = checkAndGetColumn(&col_x)) - dispatchForSecondColumn(*x_vec_32_s, col_y, timezone_x, timezone_y, result); + dispatchForSecondColumn(*x_vec_32_s, col_y, timezone_x, timezone_y, input_rows_count, result); else if (const auto * x_vec_64 = checkAndGetColumn(&col_x)) - dispatchForSecondColumn(*x_vec_64, col_y, timezone_x, timezone_y, result); + dispatchForSecondColumn(*x_vec_64, col_y, timezone_x, timezone_y, input_rows_count, result); else if (const auto * x_const_16 = checkAndGetColumnConst(&col_x)) - dispatchConstForSecondColumn(x_const_16->getValue(), col_y, timezone_x, timezone_y, result); + dispatchConstForSecondColumn(x_const_16->getValue(), col_y, timezone_x, timezone_y, input_rows_count, result); else if (const auto * x_const_32 = checkAndGetColumnConst(&col_x)) - dispatchConstForSecondColumn(x_const_32->getValue(), col_y, timezone_x, timezone_y, result); + dispatchConstForSecondColumn(x_const_32->getValue(), col_y, timezone_x, timezone_y, input_rows_count, result); else if (const auto * x_const_32_s = checkAndGetColumnConst(&col_x)) - dispatchConstForSecondColumn(x_const_32_s->getValue(), col_y, timezone_x, timezone_y, result); + dispatchConstForSecondColumn(x_const_32_s->getValue(), col_y, timezone_x, timezone_y, input_rows_count, result); else if (const auto * x_const_64 = checkAndGetColumnConst(&col_x)) - dispatchConstForSecondColumn(x_const_64->getValue>(), col_y, timezone_x, timezone_y, result); + dispatchConstForSecondColumn(x_const_64->getValue>(), col_y, timezone_x, timezone_y, input_rows_count, result); else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, - "Illegal column for first argument of function {}, must be Date, Date32, DateTime or DateTime64", - name); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for first argument of function {}, must be Date, Date32, DateTime or DateTime64", name); } template void dispatchForSecondColumn( const LeftColumnType & x, const IColumn & col_y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, + size_t input_rows_count, ColumnInt64::Container & result) const { if (const auto * y_vec_16 = checkAndGetColumn(&col_y)) - vectorVector(x, *y_vec_16, timezone_x, timezone_y, result); + vectorVector(x, *y_vec_16, timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_vec_32 = checkAndGetColumn(&col_y)) - vectorVector(x, *y_vec_32, timezone_x, timezone_y, result); + vectorVector(x, *y_vec_32, timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_vec_32_s = checkAndGetColumn(&col_y)) - vectorVector(x, *y_vec_32_s, timezone_x, timezone_y, result); + vectorVector(x, *y_vec_32_s, timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_vec_64 = checkAndGetColumn(&col_y)) - vectorVector(x, *y_vec_64, timezone_x, timezone_y, result); + vectorVector(x, *y_vec_64, timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_const_16 = checkAndGetColumnConst(&col_y)) - vectorConstant(x, y_const_16->getValue(), timezone_x, timezone_y, result); + vectorConstant(x, y_const_16->getValue(), timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_const_32 = checkAndGetColumnConst(&col_y)) - vectorConstant(x, y_const_32->getValue(), timezone_x, timezone_y, result); + vectorConstant(x, y_const_32->getValue(), timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_const_32_s = checkAndGetColumnConst(&col_y)) - vectorConstant(x, y_const_32_s->getValue(), timezone_x, timezone_y, result); + vectorConstant(x, y_const_32_s->getValue(), timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_const_64 = checkAndGetColumnConst(&col_y)) - vectorConstant(x, y_const_64->getValue>(), timezone_x, timezone_y, result); + vectorConstant(x, y_const_64->getValue>(), timezone_x, timezone_y, input_rows_count, result); else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, - "Illegal column for second argument of function {}, must be Date, Date32, DateTime or DateTime64", - name); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for second argument of function {}, must be Date, Date32, DateTime or DateTime64", name); } template void dispatchConstForSecondColumn( T1 x, const IColumn & col_y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, + size_t input_rows_count, ColumnInt64::Container & result) const { if (const auto * y_vec_16 = checkAndGetColumn(&col_y)) - constantVector(x, *y_vec_16, timezone_x, timezone_y, result); + constantVector(x, *y_vec_16, timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_vec_32 = checkAndGetColumn(&col_y)) - constantVector(x, *y_vec_32, timezone_x, timezone_y, result); + constantVector(x, *y_vec_32, timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_vec_32_s = checkAndGetColumn(&col_y)) - constantVector(x, *y_vec_32_s, timezone_x, timezone_y, result); + constantVector(x, *y_vec_32_s, timezone_x, timezone_y, input_rows_count, result); else if (const auto * y_vec_64 = checkAndGetColumn(&col_y)) - constantVector(x, *y_vec_64, timezone_x, timezone_y, result); + constantVector(x, *y_vec_64, timezone_x, timezone_y, input_rows_count, result); else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, - "Illegal column for second argument of function {}, must be Date, Date32, DateTime or DateTime64", - name); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for second argument of function {}, must be Date, Date32, DateTime or DateTime64", name); } template void vectorVector( const LeftColumnType & x, const RightColumnType & y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, + size_t input_rows_count, ColumnInt64::Container & result) const { const auto & x_data = x.getData(); @@ -130,14 +128,15 @@ public: const auto transform_x = TransformDateTime64(getScale(x)); const auto transform_y = TransformDateTime64(getScale(y)); - for (size_t i = 0, size = x.size(); i < size; ++i) - result[i] = calculate(transform_x, transform_y, x_data[i], y_data[i], timezone_x, timezone_y); + for (size_t i = 0; i < input_rows_count; ++i) + result[i] = calculate(transform_x, transform_y, x_data[i], y_data[i], timezone_x, timezone_y); } template void vectorConstant( const LeftColumnType & x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, + size_t input_rows_count, ColumnInt64::Container & result) const { const auto & x_data = x.getData(); @@ -145,7 +144,7 @@ public: const auto transform_y = TransformDateTime64(getScale(y)); const auto y_value = stripDecimalFieldValue(y); - for (size_t i = 0, size = x.size(); i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) result[i] = calculate(transform_x, transform_y, x_data[i], y_value, timezone_x, timezone_y); } @@ -153,6 +152,7 @@ public: void constantVector( T1 x, const RightColumnType & y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, + size_t input_rows_count, ColumnInt64::Container & result) const { const auto & y_data = y.getData(); @@ -160,19 +160,22 @@ public: const auto transform_y = TransformDateTime64(getScale(y)); const auto x_value = stripDecimalFieldValue(x); - for (size_t i = 0, size = y.size(); i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) result[i] = calculate(transform_x, transform_y, x_value, y_data[i], timezone_x, timezone_y); } template Int64 calculate(const TransformX & transform_x, const TransformY & transform_y, T1 x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y) const { + auto res = static_cast(transform_y.execute(y, timezone_y)) - static_cast(transform_x.execute(x, timezone_x)); + if constexpr (is_diff) - return static_cast(transform_y.execute(y, timezone_y)) - static_cast(transform_x.execute(x, timezone_x)); + { + return res; + } else { - auto res = static_cast(transform_y.execute(y, timezone_y)) - - static_cast(transform_x.execute(x, timezone_x)); + /// Adjust res: DateTimeComponentsWithFractionalPart a_comp; DateTimeComponentsWithFractionalPart b_comp; Int64 adjust_value; @@ -373,27 +376,27 @@ public: const auto & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2); if (unit == "year" || unit == "years" || unit == "yy" || unit == "yyyy") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "quarter" || unit == "quarters" || unit == "qq" || unit == "q") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "month" || unit == "months" || unit == "mm" || unit == "m") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "week" || unit == "weeks" || unit == "wk" || unit == "ww") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "day" || unit == "days" || unit == "dd" || unit == "d") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "hour" || unit == "hours" || unit == "hh" || unit == "h") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "minute" || unit == "minutes" || unit == "mi" || unit == "n") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "second" || unit == "seconds" || unit == "ss" || unit == "s") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "millisecond" || unit == "milliseconds" || unit == "ms") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "microsecond" || unit == "microseconds" || unit == "us" || unit == "u") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else if (unit == "nanosecond" || unit == "nanoseconds" || unit == "ns") - impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, col_res->getData()); + impl.template dispatchForColumns>(col_x, col_y, timezone_x, timezone_y, input_rows_count, col_res->getData()); else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} does not support '{}' unit", getName(), unit); @@ -440,7 +443,7 @@ public: auto col_res = ColumnInt64::create(input_rows_count); - impl.dispatchForColumns>(col_x, col_y, DateLUT::instance(), DateLUT::instance(), col_res->getData()); + impl.dispatchForColumns>(col_x, col_y, DateLUT::instance(), DateLUT::instance(), input_rows_count, col_res->getData()); return col_res; } From 0cab22fd16caf260306e1b1feb77d2ddcced5205 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 19 Jul 2024 16:44:56 +0200 Subject: [PATCH 0539/2361] Attempt to fix flakiness of 01194_http_query_id --- tests/queries/0_stateless/01194_http_query_id.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01194_http_query_id.sh b/tests/queries/0_stateless/01194_http_query_id.sh index 5aebdc10dfc..fac17cca3c6 100755 --- a/tests/queries/0_stateless/01194_http_query_id.sh +++ b/tests/queries/0_stateless/01194_http_query_id.sh @@ -4,14 +4,14 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -url="http://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/?session_id=test_01194" rnd=$RANDOM +url="${CLICKHOUSE_URL}&session_id=test_01194_$RANDOM" ${CLICKHOUSE_CURL} -sS "$url&query=SELECT+'test_01194',$rnd,1" > /dev/null ${CLICKHOUSE_CURL} -sS "$url&query=SELECT+'test_01194',$rnd,2" > /dev/null ${CLICKHOUSE_CURL} -sS "$url" --data "SELECT 'test_01194',$rnd,3" > /dev/null ${CLICKHOUSE_CURL} -sS "$url" --data "SELECT 'test_01194',$rnd,4" > /dev/null -${CLICKHOUSE_CURL} -sS "$url" --data "SYSTEM FLUSH LOGS" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data "SYSTEM FLUSH LOGS" -${CLICKHOUSE_CURL} -sS "$url&query=SELECT+count(DISTINCT+query_id)+FROM+system.query_log+WHERE+current_database+LIKE+currentDatabase()+AND+query+LIKE+'SELECT+''test_01194'',$rnd%25'" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data "SELECT count(DISTINCT query_id) FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE 'SELECT ''test_01194'',$rnd%'" From 2504a6c36016b41e33ee5323fca79f5d511fb3ce Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 19 Jul 2024 14:59:38 +0000 Subject: [PATCH 0540/2361] make test output a bit clear --- ...61_lightweight_delete_projection.reference | 8 ++ .../03161_lightweight_delete_projection.sql | 131 ++---------------- 2 files changed, 21 insertions(+), 118 deletions(-) diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index 3401eaf6162..cb623ea2b50 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -1,6 +1,14 @@ +compact part +testing throw default mode +testing drop mode +testing rebuild mode 8888 Alice 50 p1 p2 +wide part +testing throw default mode +testing drop mode +testing rebuild mode 8888 Alice 50 p1 p2 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 9d577f8a701..f2d6dcb164f 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,77 +1,11 @@ SET lightweight_deletes_sync = 2, alter_sync = 2; -Set max_insert_threads = 2, -group_by_two_level_threshold = 704642, -group_by_two_level_threshold_bytes = 49659607, -distributed_aggregation_memory_efficient = 0, -fsync_metadata = 0, -output_format_parallel_formatting = 0, -input_format_parallel_parsing = 1, -min_chunk_bytes_for_parallel_parsing = 14437539, -max_read_buffer_size = 887507, -prefer_localhost_replica = 0, -max_block_size = 73908, -max_joined_block_size_rows = 21162, -max_threads = 2, -optimize_append_index = 0, -optimize_if_chain_to_multiif = 1, -optimize_if_transform_strings_to_enum = 0, -optimize_read_in_order = 0, -optimize_or_like_chain = 1, -optimize_substitute_columns = 1, -enable_multiple_prewhere_read_steps = 1, -read_in_order_two_level_merge_threshold = 13, -optimize_aggregation_in_order = 1, -aggregation_in_order_max_block_bytes = 37110261, -use_uncompressed_cache = 0, -min_bytes_to_use_direct_io = 10737418240, -min_bytes_to_use_mmap_io = 1, -local_filesystem_read_method ='pread', -remote_filesystem_read_method ='threadpool', -local_filesystem_read_prefetch = 0, -filesystem_cache_segments_batch_size = 3, -read_from_filesystem_cache_if_exists_otherwise_bypass_cache = 1, -throw_on_error_from_cache_on_write_operations = 0, -remote_filesystem_read_prefetch = 1, -allow_prefetched_read_pool_for_remote_filesystem = 0, -filesystem_prefetch_max_memory_usage = '32Mi', -filesystem_prefetches_limit = 0, -filesystem_prefetch_min_bytes_for_single_read_task ='16Mi', -filesystem_prefetch_step_marks = 50, -filesystem_prefetch_step_bytes = 0, -compile_aggregate_expressions = 0, -compile_sort_description = 1, -merge_tree_coarse_index_granularity = 16, -optimize_distinct_in_order = 0, -max_bytes_before_external_sort = 0, -max_bytes_before_external_group_by = 0, -max_bytes_before_remerge_sort = 820113150, -min_compress_block_size = 1262249, -max_compress_block_size = 1472188, -merge_tree_compact_parts_min_granules_to_multibuffer_read = 56, -optimize_sorting_by_input_stream_properties = 1, -http_response_buffer_size = 1883022, -http_wait_end_of_query = False, -enable_memory_bound_merging_of_aggregation_results = 1, -min_count_to_compile_expression = 0, -min_count_to_compile_aggregate_expression = 0, -min_count_to_compile_sort_description = 0, -session_timezone ='Africa/Khartoum', -prefer_warmed_unmerged_parts_seconds = 10, -use_page_cache_for_disks_without_file_cache = True, -page_cache_inject_eviction = False, -merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.02, -prefer_external_sort_block_bytes = 100000000, -cross_join_min_rows_to_compress = 100000000, -cross_join_min_bytes_to_compress = 1, -min_external_table_block_size_bytes = 100000000, -max_parsing_threads = 0; - - DROP TABLE IF EXISTS users; --- compact part + +SELECT 'compact part'; + CREATE TABLE users ( uid Int16, name String, @@ -79,36 +13,17 @@ CREATE TABLE users ( projection p1 (select count(), age group by age), projection p2 (select age, name group by age, name) ) ENGINE = MergeTree order by uid -SETTINGS min_bytes_for_wide_part = 10485760, -ratio_of_defaults_for_sparse_serialization = 1.0, -prefer_fetch_merged_part_size_threshold = 1, -vertical_merge_algorithm_min_rows_to_activate = 1, -vertical_merge_algorithm_min_columns_to_activate = 100, -allow_vertical_merges_from_compact_to_wide_parts = 0, -min_merge_bytes_to_use_direct_io = 114145183, -index_granularity_bytes = 2660363, -merge_max_block_size = 13460, -index_granularity = 51768, -marks_compress_block_size = 59418, -primary_key_compress_block_size = 88795, -replace_long_file_name_to_hash = 0, -max_file_name_length = 0, -min_bytes_for_full_part_storage = 536870912, -compact_parts_max_bytes_to_buffer = 378557913, -compact_parts_max_granules_to_buffer = 254, -compact_parts_merge_max_bytes_to_prefetch_part = 26969686, -cache_populated_by_fetch = 0, -concurrent_part_removal_threshold = 38, -old_parts_lifetime = 480; +SETTINGS min_bytes_for_wide_part = 10485760; INSERT INTO users VALUES (1231, 'John', 33); --- testing throw default mode +SELECT 'testing throw default mode'; + ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; DELETE FROM users WHERE uid = 1231; -- { serverError NOT_IMPLEMENTED } --- testing drop mode +SELECT 'testing drop mode'; ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; DELETE FROM users WHERE uid = 1231; @@ -123,7 +38,7 @@ SELECT FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); --- testing rebuild mode +SELECT 'testing rebuild mode'; INSERT INTO users VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; @@ -143,7 +58,7 @@ WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); DROP TABLE users; --- wide part +SELECT 'wide part'; CREATE TABLE users ( uid Int16, name String, @@ -151,36 +66,16 @@ CREATE TABLE users ( projection p1 (select count(), age group by age), projection p2 (select age, name group by age, name) ) ENGINE = MergeTree order by uid -SETTINGS min_bytes_for_wide_part = 0, -ratio_of_defaults_for_sparse_serialization = 1.0, -prefer_fetch_merged_part_size_threshold = 1, -vertical_merge_algorithm_min_rows_to_activate = 1, -vertical_merge_algorithm_min_columns_to_activate = 100, -allow_vertical_merges_from_compact_to_wide_parts = 0, -min_merge_bytes_to_use_direct_io = 114145183, -index_granularity_bytes = 2660363, -merge_max_block_size = 13460, -index_granularity = 51768, -marks_compress_block_size = 59418, -primary_key_compress_block_size = 88795, -replace_long_file_name_to_hash = 0, -max_file_name_length = 0, -min_bytes_for_full_part_storage = 536870912, -compact_parts_max_bytes_to_buffer = 378557913, -compact_parts_max_granules_to_buffer = 254, -compact_parts_merge_max_bytes_to_prefetch_part = 26969686, -cache_populated_by_fetch = 0, -concurrent_part_removal_threshold = 38, -old_parts_lifetime = 480; +SETTINGS min_bytes_for_wide_part = 0; INSERT INTO users VALUES (1231, 'John', 33); --- testing throw default mode +SELECT 'testing throw default mode'; ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; DELETE FROM users WHERE uid = 1231; -- { serverError NOT_IMPLEMENTED } --- testing drop mode +SELECT 'testing drop mode'; ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; DELETE FROM users WHERE uid = 1231; @@ -195,7 +90,7 @@ SELECT FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); --- testing rebuild mode +SELECT 'testing rebuild mode'; INSERT INTO users VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; From 7d379388d24fba77de9eab9d99e69e7dc23763c2 Mon Sep 17 00:00:00 2001 From: Alexander Sapin Date: Fri, 19 Jul 2024 17:12:34 +0200 Subject: [PATCH 0541/2361] Turn off randomization of harmful setting --- tests/clickhouse-test | 5 ++++- tests/queries/0_stateless/replication.lib | 8 ++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 60160e71e81..f166f8287d5 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -821,7 +821,10 @@ class SettingsRandomizer: get_localzone(), ] ), - "prefer_warmed_unmerged_parts_seconds": lambda: random.randint(0, 10), + # This setting affect part names and their content which can be read from tables in tests. + # We have a lot of tests which relies on part names, so it's very unsafe to enable randomization + # of this setting + # "prefer_warmed_unmerged_parts_seconds": lambda: random.randint(0, 10), "use_page_cache_for_disks_without_file_cache": lambda: random.random() < 0.7, "page_cache_inject_eviction": lambda: random.random() < 0.5, "merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability": lambda: round( diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index 2e21f351d2a..1a86cd9f8db 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -79,7 +79,9 @@ function check_replication_consistency() fi # Touch all data to check that it's readable (and trigger PartCheckThread if needed) - while ! $CLICKHOUSE_CLIENT -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do + # it's important to disable prefer warmed unmerged parts because + # otherwise it can read non-syncrhonized state of replicas + while ! $CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do sleep 1; num_tries=$((num_tries+1)) if [ $num_tries -eq 250 ]; then @@ -102,7 +104,9 @@ function check_replication_consistency() try_sync_replicas "$table_name_prefix" "$time_left" || exit 1 - res=$($CLICKHOUSE_CLIENT -q \ + # it's important to disable prefer warmed unmerged parts because + # otherwise it can read non-syncrhonized state of replicas + res=$($CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 -q \ "SELECT if((countDistinct(data) as c) == 0, 1, c) FROM From e57a10189c5e2444beae08f6e55bab9959cde063 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 19 Jul 2024 19:09:51 +0200 Subject: [PATCH 0542/2361] Update ci_config.py --- tests/ci/ci_config.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 98c7e99a495..d18faaf8f1b 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -317,42 +317,42 @@ class CI: random_bucket="parrepl_with_sanitizer", ), JobNames.STATELESS_TEST_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2 + required_builds=[BuildNames.PACKAGE_ASAN], num_batches=4 ), JobNames.STATELESS_TEST_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_TSAN], num_batches=2 + required_builds=[BuildNames.PACKAGE_TSAN], num_batches=5 ), JobNames.STATELESS_TEST_MSAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_MSAN], num_batches=3 + required_builds=[BuildNames.PACKAGE_MSAN], num_batches=6 ), JobNames.STATELESS_TEST_UBSAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_UBSAN], num_batches=1 + required_builds=[BuildNames.PACKAGE_UBSAN], num_batches=2 ), JobNames.STATELESS_TEST_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2 + required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=5 ), JobNames.STATELESS_TEST_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], ), JobNames.STATELESS_TEST_RELEASE_COVERAGE: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_RELEASE_COVERAGE], num_batches=5 + required_builds=[BuildNames.PACKAGE_RELEASE_COVERAGE], num_batches=6 ), JobNames.STATELESS_TEST_AARCH64: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], runner_type=Runners.FUNC_TESTER_ARM, ), JobNames.STATELESS_TEST_OLD_ANALYZER_S3_REPLICATED_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=3 + required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4 ), JobNames.STATELESS_TEST_S3_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2 + required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=6 ), JobNames.STATELESS_TEST_AZURE_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2, release_only=True + required_builds=[BuildNames.PACKAGE_ASAN], num_batches=4, release_only=True ), JobNames.STATELESS_TEST_S3_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], - num_batches=3, + num_batches=5, ), JobNames.STRESS_TEST_DEBUG: CommonJobConfigs.STRESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_DEBUG], From 38126bb436c3f46cfdc321abca421f7fae969f5b Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 19 Jul 2024 19:09:55 +0200 Subject: [PATCH 0543/2361] Add test. --- .../configs/disk_s3.xml | 22 +++++++++++++++ .../test_backup_restore_s3/test.py | 28 ++++++------------- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/tests/integration/test_backup_restore_s3/configs/disk_s3.xml b/tests/integration/test_backup_restore_s3/configs/disk_s3.xml index 45a1e17b039..7ac1a052c30 100644 --- a/tests/integration/test_backup_restore_s3/configs/disk_s3.xml +++ b/tests/integration/test_backup_restore_s3/configs/disk_s3.xml @@ -21,6 +21,13 @@ minio123 33554432 + + s3_plain_rewritable + http://minio1:9001/root/data/disks/disk_s3_plain_rewritable/ + minio + minio123 + 33554432 + cache disk_s3 @@ -37,6 +44,20 @@ + + +

+ disk_s3_plain +
+ + + + +
+ disk_s3_plain_rewritable +
+
+
@@ -57,6 +78,7 @@ default disk_s3 disk_s3_plain + disk_s3_plain_rewritable disk_s3_cache disk_s3_other_bucket diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index d53335000a6..4840f5afc66 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -419,25 +419,15 @@ def test_backup_to_s3_multipart(): assert "ReadBufferFromS3RequestsErrors" not in restore_events -def test_backup_to_s3_native_copy(): - storage_policy = "policy_s3" - backup_name = new_backup_name() - backup_destination = ( - f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')" - ) - (backup_events, restore_events) = check_backup_and_restore( - storage_policy, backup_destination - ) - # single part upload - assert backup_events["S3CopyObject"] > 0 - assert restore_events["S3CopyObject"] > 0 - assert node.contains_in_log( - f"copyS3File: Single operation copy has completed. Bucket: root, Key: data/backups/{backup_name}" - ) - - -def test_backup_to_s3_native_copy_other_bucket(): - storage_policy = "policy_s3_other_bucket" +@pytest.mark.parametrize( + "storage_policy", + [ + "policy_s3", + "policy_s3_other_bucket", + "policy_s3_plain_rewritable", + ], +) +def test_backup_to_s3_native_copy(storage_policy): backup_name = new_backup_name() backup_destination = ( f"S3('http://minio1:9001/root/data/backups/{backup_name}', 'minio', 'minio123')" From 5bea6751e07ef9753db0e532d476056679a4393a Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 19 Jul 2024 19:11:07 +0200 Subject: [PATCH 0544/2361] Make the error message about broken parts more useful. --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 17 ++++++++++++++++- src/Storages/MergeTree/MergeTreeData.cpp | 4 +++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index c2f87018872..3a44359b537 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -739,10 +739,25 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks } catch (...) { - /// Don't scare people with broken part error + /// Don't scare people with broken part error if it's retryable. if (!isRetryableException(std::current_exception())) + { LOG_ERROR(storage.log, "Part {} is broken and needs manual correction", getDataPartStorage().getFullPath()); + if (Exception * e = exception_cast(std::current_exception())) + { + /// Probably there is something wrong with files of this part. + /// So it can be helpful to add to the error message some information about those files. + String files_in_part; + for (auto it = getDataPartStorage().iterate(); it->isValid(); it->next()) + files_in_part += fmt::format("{}{} ({} bytes)", (files_in_part.empty() ? "" : ", "), it->name(), getDataPartStorage().getFileSize(it->name())); + if (!files_in_part.empty()) + e->addMessage("Part contains files: {}", files_in_part); + if (isEmpty()) + e->addMessage("Part is empty"); + } + } + // There could be conditions that data part to be loaded is broken, but some of meta infos are already written // into metadata before exception, need to clean them all. metadata_manager->deleteAll(/*include_projection*/ true); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 78a551591a6..12a4effe33c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -5634,9 +5634,11 @@ void MergeTreeData::restorePartFromBackup(std::shared_ptr r String part_name = part_info.getPartNameAndCheckFormat(format_version); auto backup = restored_parts_holder->getBackup(); + /// Find all files of this part in the backup. + Strings filenames = backup->listFiles(part_path_in_backup, /* recursive= */ true); + /// Calculate the total size of the part. UInt64 total_size_of_part = 0; - Strings filenames = backup->listFiles(part_path_in_backup, /* recursive= */ true); fs::path part_path_in_backup_fs = part_path_in_backup; for (const String & filename : filenames) total_size_of_part += backup->getFileSize(part_path_in_backup_fs / filename); From 3abf636853e755d0a5264eb568ce895e55749920 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 19 Jul 2024 19:16:53 +0200 Subject: [PATCH 0545/2361] fix for 992 and friends --- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- .../00992_system_parts_race_condition_zookeeper_long.sh | 2 +- tests/queries/0_stateless/replication.lib | 5 ++++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 72f725965e0..416100def4c 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3940,7 +3940,7 @@ void StorageReplicatedMergeTree::mergeSelectingTask() merge_selecting_task->schedule(); else { - LOG_TRACE(log, "Scheduling next merge selecting task after {}ms", merge_selecting_sleep_ms); + LOG_TRACE(log, "Scheduling next merge selecting task after {}ms, current attempt status: {}", merge_selecting_sleep_ms, result); merge_selecting_task->scheduleAfter(merge_selecting_sleep_ms); } } diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index 4887c409844..02a739ece4a 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -41,7 +41,7 @@ function thread3() function thread4() { - while true; do $CLICKHOUSE_CLIENT --receive_timeout=3 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done + while true; do $CLICKHOUSE_CLIENT --receive_timeout=1 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done } function thread5() diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index 2e21f351d2a..6331a5eb406 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -51,6 +51,9 @@ function check_replication_consistency() table_name_prefix=$1 check_query_part=$2 + # Try to kill some mutations because sometimes tests run too much (it's not guarenteed to kill all mutations, see below) + ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table like '$table_name_prefix%'" > /dev/null + # Wait for all queries to finish (query may still be running if thread is killed by timeout) num_tries=0 while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE current_database=currentDatabase() AND query LIKE '%$table_name_prefix%'") -ne 1 ]]; do @@ -94,7 +97,7 @@ function check_replication_consistency() some_table=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' ORDER BY rand() LIMIT 1") $CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA $some_table PULL" 1>/dev/null 2>/dev/null ||: - # Forcefully cancel mutations to avoid waiting for them to finish + # Forcefully cancel mutations to avoid waiting for them to finish. Kills the remaining mutations ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table like '$table_name_prefix%'" > /dev/null # SYNC REPLICA is not enough if some MUTATE_PARTs are not assigned yet From e1a659f83b96e33b9482157f0a18dd3c3efb3926 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 19 Jul 2024 19:24:22 +0200 Subject: [PATCH 0546/2361] Remove obsolete code from CMakeLists --- src/CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d985595154c..dac20aaca48 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -419,9 +419,6 @@ dbms_target_link_libraries ( boost::circular_buffer boost::heap) -target_include_directories(clickhouse_common_io PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/Core/include") # uses some includes from core -dbms_target_include_directories(PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/Core/include") - target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::miniselect ch_contrib::pdqsort) From a11e89f4f70b101553a138d367d7f6dcd8318554 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 19 Jul 2024 19:00:45 +0100 Subject: [PATCH 0547/2361] impl --- base/base/defines.h | 6 +----- src/Common/Exception.cpp | 4 ++-- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/base/base/defines.h b/base/base/defines.h index cf3d357da18..5685a6d9833 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -96,10 +96,6 @@ # endif #endif -#if !defined(ABORT_ON_LOGICAL_ERROR) && defined(DEBUG_OR_SANITIZER_BUILD) -# define ABORT_ON_LOGICAL_ERROR -#endif - /// chassert(x) is similar to assert(x), but: /// - works in builds with sanitizers, not only in debug builds /// - tries to print failed assertion into server log @@ -108,7 +104,7 @@ /// Also it makes sense to call abort() instead of __builtin_unreachable() in debug builds, /// because SIGABRT is easier to debug than SIGTRAP (the second one makes gdb crazy) #if !defined(chassert) - #if defined(ABORT_ON_LOGICAL_ERROR) +# if defined(DEBUG_OR_SANITIZER_BUILD) // clang-format off #include namespace DB diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 111280074dd..33befa64946 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -64,7 +64,7 @@ void handle_error_code(const std::string & msg, int code, bool remote, const Exc { // In debug builds and builds with sanitizers, treat LOGICAL_ERROR as an assertion failure. // Log the message before we fail. -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD if (code == ErrorCodes::LOGICAL_ERROR) { abortOnFailedAssertion(msg, trace.data(), 0, trace.size()); @@ -443,7 +443,7 @@ PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, b } catch (...) {} // NOLINT(bugprone-empty-catch) -#ifdef ABORT_ON_LOGICAL_ERROR +#ifdef DEBUG_OR_SANITIZER_BUILD try { throw; From 00e14bde80c343919bc237bdcdeb15f22f153eab Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 19 Jul 2024 19:52:30 +0200 Subject: [PATCH 0548/2361] Update ci_config.py --- tests/ci/ci_config.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index d18faaf8f1b..00b545f9f33 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -317,19 +317,19 @@ class CI: random_bucket="parrepl_with_sanitizer", ), JobNames.STATELESS_TEST_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], num_batches=4 + required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2 ), JobNames.STATELESS_TEST_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_TSAN], num_batches=5 + required_builds=[BuildNames.PACKAGE_TSAN], num_batches=4 ), JobNames.STATELESS_TEST_MSAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_MSAN], num_batches=6 + required_builds=[BuildNames.PACKAGE_MSAN], num_batches=4 ), JobNames.STATELESS_TEST_UBSAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_UBSAN], num_batches=2 ), JobNames.STATELESS_TEST_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=5 + required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2 ), JobNames.STATELESS_TEST_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], @@ -345,14 +345,14 @@ class CI: required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4 ), JobNames.STATELESS_TEST_S3_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=6 + required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2 ), JobNames.STATELESS_TEST_AZURE_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], num_batches=4, release_only=True + required_builds=[BuildNames.PACKAGE_ASAN], num_batches=3, release_only=True ), JobNames.STATELESS_TEST_S3_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], - num_batches=5, + num_batches=4, ), JobNames.STRESS_TEST_DEBUG: CommonJobConfigs.STRESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_DEBUG], From fa2659a87cde713011b2a2452269397216c54d6f Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 19 Jul 2024 20:10:08 +0200 Subject: [PATCH 0549/2361] Increase CI timeout for stateless tests --- docker/test/stateless/run.sh | 4 ++-- tests/ci/ci_definitions.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 0647ed02839..264dcd09da3 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -6,8 +6,8 @@ source /setup_export_logs.sh # fail on errors, verbose and export all env variables set -e -x -a -MAX_RUN_TIME=${MAX_RUN_TIME:-7200} -MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME)) +MAX_RUN_TIME=${MAX_RUN_TIME:-9000} +MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 9000 : MAX_RUN_TIME)) USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0} USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0} diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index d41a621bc2e..8addc0de077 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -432,7 +432,7 @@ class CommonJobConfigs: ), run_command='functional_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, - timeout=7200, + timeout=9000, ) STATEFUL_TEST = JobConfig( job_name_keyword="stateful", From 1e160189f0652f476b3efc0ea1ebeb0a1eb5e162 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 19 Jul 2024 20:10:41 +0200 Subject: [PATCH 0550/2361] Increase parallel jobs for stateless tests --- docker/test/stateless/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 264dcd09da3..670a2420ddf 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -224,7 +224,7 @@ function run_tests() else # All other configurations are OK. ADDITIONAL_OPTIONS+=('--jobs') - ADDITIONAL_OPTIONS+=('7') + ADDITIONAL_OPTIONS+=('8') fi if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then From 4ae0daf5d3149a2e9e4e8494e52164c91c27af0e Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 19 Jul 2024 18:46:37 +0000 Subject: [PATCH 0551/2361] output more info --- ...61_lightweight_delete_projection.reference | 14 +++++--- .../03161_lightweight_delete_projection.sql | 32 ++++++++++++++++--- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index cb623ea2b50..960fa1dcc33 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -1,14 +1,20 @@ compact part testing throw default mode testing drop mode +all_1_1_0_2 testing rebuild mode 8888 Alice 50 -p1 -p2 +all_1_1_0_4 +all_3_3_0_4 +p1 all_3_3_0_4 +p2 all_3_3_0_4 wide part testing throw default mode testing drop mode +all_1_1_0_2 testing rebuild mode 8888 Alice 50 -p1 -p2 +all_1_1_0_4 +all_3_3_0_4 +p1 all_3_3_0_4 +p2 all_3_3_0_4 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index f2d6dcb164f..f33653fc652 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -32,9 +32,15 @@ SELECT * FROM users ORDER BY uid; SYSTEM FLUSH LOGS; --- expecting no projection +-- all_1_1_0_2 SELECT name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); + +-- expecting no projection +SELECT + name, parent_name FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); @@ -49,9 +55,15 @@ SELECT * FROM users ORDER BY uid; SYSTEM FLUSH LOGS; --- expecting projection p1, p2 +-- all_1_1_0_4, all_3_3_0_4 SELECT name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); + +-- expecting projection p1, p2 +SELECT + name, parent_name FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); @@ -84,9 +96,15 @@ SELECT * FROM users ORDER BY uid; SYSTEM FLUSH LOGS; --- expecting no projection +-- all_1_1_0_2 SELECT name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); + +-- expecting no projection +SELECT + name, parent_name FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); @@ -101,9 +119,15 @@ SELECT * FROM users ORDER BY uid; SYSTEM FLUSH LOGS; --- expecting projection p1, p2 +-- all_1_1_0_4, all_3_3_0_4 SELECT name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); + +-- expecting projection p1, p2 +SELECT + name, parent_name FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); From 7fd779cc6f6b271366a27b2cbcfe6bad8cb15913 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 19 Jul 2024 20:41:52 +0200 Subject: [PATCH 0552/2361] Stateless tests: split parallel tests more evenly --- tests/clickhouse-test | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a8a6ce68b8f..c543e181c4f 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2168,7 +2168,10 @@ def run_tests_array(all_tests_with_params: Tuple[List[str], int, TestSuite, bool while True: if all_tests: - case = all_tests.pop(0) + try: + case = all_tests.pop(0) + except IndexError: + break else: break @@ -2474,18 +2477,16 @@ def do_run_tests(jobs, test_suite: TestSuite): # of failures will be nearly the same for all tests from the group. random.shuffle(test_suite.parallel_tests) - batch_size = max(1, (len(test_suite.parallel_tests) // jobs) + 1) - parallel_tests_array = [] - for job in range(jobs): - range_ = job * batch_size, job * batch_size + batch_size - batch = test_suite.parallel_tests[range_[0] : range_[1]] - parallel_tests_array.append((batch, batch_size, test_suite, True)) + batch_size = len(test_suite.parallel_tests) // jobs + manager = multiprocessing.Manager() + parallel_tests = manager.list() + parallel_tests.extend(test_suite.parallel_tests) processes = [] - - for test_batch in parallel_tests_array: + for job in range(jobs): process = multiprocessing.Process( - target=run_tests_process, args=(test_batch,) + target=run_tests_process, + args=((parallel_tests, batch_size, test_suite, True),), ) processes.append(process) process.start() From 565ee9ad66ba1198fdbec9ad6c1b19b76e9cf185 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 19 Jul 2024 21:05:21 +0200 Subject: [PATCH 0553/2361] Update 02724_limit_num_mutations.sh --- tests/queries/0_stateless/02724_limit_num_mutations.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02724_limit_num_mutations.sh b/tests/queries/0_stateless/02724_limit_num_mutations.sh index a9d69b2ed48..60888db0e2e 100755 --- a/tests/queries/0_stateless/02724_limit_num_mutations.sh +++ b/tests/queries/0_stateless/02724_limit_num_mutations.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 14a0c7e300e1f451ab9d46fc6cfd8c1e5f811946 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 19 Jul 2024 21:13:51 +0200 Subject: [PATCH 0554/2361] Stateless tests: fix stylecheck --- tests/clickhouse-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index c543e181c4f..6edd3159c7f 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2483,7 +2483,7 @@ def do_run_tests(jobs, test_suite: TestSuite): parallel_tests.extend(test_suite.parallel_tests) processes = [] - for job in range(jobs): + for _ in range(jobs): process = multiprocessing.Process( target=run_tests_process, args=((parallel_tests, batch_size, test_suite, True),), From f3fb729f53860d55db1d72ccfc88f9c5d018aea1 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 19 Jul 2024 20:12:14 +0000 Subject: [PATCH 0555/2361] Call onAsyncJobReady() --- src/Processors/Executors/ExecutorTasks.cpp | 2 ++ src/Processors/Executors/ExecutorTasks.h | 2 +- src/Processors/Executors/PipelineExecutor.h | 1 - 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Processors/Executors/ExecutorTasks.cpp b/src/Processors/Executors/ExecutorTasks.cpp index 7e3bee239ef..d045f59a2e2 100644 --- a/src/Processors/Executors/ExecutorTasks.cpp +++ b/src/Processors/Executors/ExecutorTasks.cpp @@ -204,6 +204,8 @@ void ExecutorTasks::processAsyncTasks() while (auto task = async_task_queue.wait(lock)) { auto * node = static_cast(task.data); + node->processor->onAsyncJobReady(); + executor_contexts[task.thread_num]->pushAsyncTask(node); ++num_waiting_async_tasks; diff --git a/src/Processors/Executors/ExecutorTasks.h b/src/Processors/Executors/ExecutorTasks.h index 202ca253c6c..b2201873edf 100644 --- a/src/Processors/Executors/ExecutorTasks.h +++ b/src/Processors/Executors/ExecutorTasks.h @@ -28,7 +28,7 @@ class ExecutorTasks TaskQueue task_queue; /// Queue which stores tasks where processors returned Async status after prepare. - /// If multiple threads are using, main thread will wait for async tasks. + /// If multiple threads are used, main thread will wait for async tasks. /// For single thread, will wait for async tasks only when task_queue is empty. PollingQueue async_task_queue; diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index 03f0f7f1a0a..ae119355cb5 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -9,7 +9,6 @@ #include #include -#include #include From c948103dee50bf5bddeff9af485d4df2acc8b0f7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 19 Jul 2024 22:23:50 +0200 Subject: [PATCH 0556/2361] Better tests --- .../0_stateless/01338_long_select_and_alter.reference | 2 +- tests/queries/0_stateless/01338_long_select_and_alter.sh | 6 +++--- .../01338_long_select_and_alter_zookeeper.reference | 2 +- .../0_stateless/01338_long_select_and_alter_zookeeper.sh | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/01338_long_select_and_alter.reference b/tests/queries/0_stateless/01338_long_select_and_alter.reference index 921730b17ce..276d6bcc29d 100644 --- a/tests/queries/0_stateless/01338_long_select_and_alter.reference +++ b/tests/queries/0_stateless/01338_long_select_and_alter.reference @@ -1,3 +1,3 @@ 5 -10 +5 CREATE TABLE default.alter_mt\n(\n `key` UInt64,\n `value` UInt64\n)\nENGINE = MergeTree\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01338_long_select_and_alter.sh b/tests/queries/0_stateless/01338_long_select_and_alter.sh index 2659e5c16cf..08609546ff5 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" $CLICKHOUSE_CLIENT --query "CREATE TABLE alter_mt (key UInt64, value String) ENGINE=MergeTree() ORDER BY key" -$CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number, toString(number) FROM numbers(5)" +$CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number - 1 AS x, toString(x) FROM numbers(5)" $CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 10000000 --query "SELECT count(distinct concat(value, '_')) FROM alter_mt WHERE not sleepEachRow(2)" & @@ -18,10 +18,10 @@ sleep 2 $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_mt MODIFY COLUMN value UInt64" -wait - $CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM alter_mt" +wait + $CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE alter_mt" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference index 9c5ad0fa468..aab1b93f6bd 100644 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference @@ -1,3 +1,3 @@ 5 -10 +5 CREATE TABLE default.alter_mt\n(\n `key` UInt64,\n `value` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01338_long_select_and_alter_zookeeper_default/alter_mt\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh index 6eb795408f4..def6d2ab127 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" $CLICKHOUSE_CLIENT --query "CREATE TABLE alter_mt (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_mt', '1') ORDER BY key" -$CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number, toString(number) FROM numbers(5)" +$CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number - 1 AS x, toString(x) FROM numbers(5)" $CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 10000000 --query "SELECT count(distinct concat(value, '_')) FROM alter_mt WHERE not sleepEachRow(2)" & @@ -18,10 +18,10 @@ sleep 2 $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_mt MODIFY COLUMN value UInt64" -wait - $CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM alter_mt" +wait + $CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE alter_mt" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" From 465a34d3dfe3e313471e10d59cab8219b3e5837e Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 19 Jul 2024 20:27:57 +0000 Subject: [PATCH 0557/2361] Simplify, fix build --- src/QueryPipeline/RemoteQueryExecutor.cpp | 48 +++++++++++------------ 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index b78c38a4134..61a512bcfc5 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -924,41 +924,37 @@ bool RemoteQueryExecutor::needToSkipUnavailableShard() const return context->getSettingsRef().skip_unavailable_shards && (0 == connections->size()); } -bool RemoteQueryExecutor::processParallelReplicaPacketIfAny() +bool RemoteQueryExecutor::processParallelReplicaPacketIfAny() { #if defined(OS_LINUX) + + std::lock_guard lock(was_cancelled_mutex); + if (was_cancelled) + return false; + if (!read_context || (resent_query && recreate_read_context)) { - std::lock_guard lock(was_cancelled_mutex); - if (was_cancelled) - return false; - read_context = std::make_unique(*this); recreate_read_context = false; } - { - std::lock_guard lock(was_cancelled_mutex); - if (was_cancelled) - return false; + chassert(!has_postponed_packet); - chassert(!has_postponed_packet); - - read_context->resume(); - if (read_context->isInProgress()) // <- nothing to process - return false; - - const auto packet_type = read_context->getPacketType(); - if (packet_type == Protocol::Server::MergeTreeReadTaskRequest || packet_type == Protocol::Server::MergeTreeAllRangesAnnouncement) - { - processPacket(read_context->getPacket()); - return true; - } - - has_postponed_packet = true; + read_context->resume(); + if (read_context->isInProgress()) // <- nothing to process return false; - } -#endif -} + const auto packet_type = read_context->getPacketType(); + if (packet_type == Protocol::Server::MergeTreeReadTaskRequest || packet_type == Protocol::Server::MergeTreeAllRangesAnnouncement) + { + processPacket(read_context->getPacket()); + return true; + } + + has_postponed_packet = true; + +#endif + + return false; +} } From 277dbfa0574b567241d169494a459e6f2b04d5e6 Mon Sep 17 00:00:00 2001 From: "Zhukova, Maria" Date: Fri, 19 Jul 2024 13:13:42 -0700 Subject: [PATCH 0558/2361] update QPL to 1.6.0 + missing header fix --- contrib/qpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/qpl b/contrib/qpl index d4715e0e798..c2ced94c53c 160000 --- a/contrib/qpl +++ b/contrib/qpl @@ -1 +1 @@ -Subproject commit d4715e0e79896b85612158e135ee1a85f3b3e04d +Subproject commit c2ced94c53c1ee22191201a59878e9280bc9b9b8 From 1347bc32187b694148657459c0b745c45cb92a8d Mon Sep 17 00:00:00 2001 From: "Zhukova, Maria" Date: Fri, 19 Jul 2024 13:32:18 -0700 Subject: [PATCH 0559/2361] update qpl-cmake to reflect changes in QPL 1.5.0-1.6.0 --- contrib/qpl-cmake/CMakeLists.txt | 92 ++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 35 deletions(-) diff --git a/contrib/qpl-cmake/CMakeLists.txt b/contrib/qpl-cmake/CMakeLists.txt index 7a84048e16b..b2f263252c2 100644 --- a/contrib/qpl-cmake/CMakeLists.txt +++ b/contrib/qpl-cmake/CMakeLists.txt @@ -4,7 +4,6 @@ set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl") set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources") set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl") set (EFFICIENT_WAIT OFF) -set (BLOCK_ON_FAULT ON) set (LOG_HW_INIT OFF) set (SANITIZE_MEMORY OFF) set (SANITIZE_THREADS OFF) @@ -16,16 +15,18 @@ function(GetLibraryVersion _content _outputVar) SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE) endfunction() -set (QPL_VERSION 1.2.0) +set (QPL_VERSION 1.6.0) message(STATUS "Intel QPL version: ${QPL_VERSION}") -# There are 5 source subdirectories under $QPL_SRC_DIR: isal, c_api, core-sw, middle-layer, c_api. -# Generate 8 library targets: middle_layer_lib, isal, isal_asm, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, core_iaa, middle_layer_lib. +# There are 5 source subdirectories under $QPL_SRC_DIR: c_api, core-iaa, core-sw, middle-layer and isal. +# Generate 8 library targets: qpl_c_api, core_iaa, qplcore_px, qplcore_avx512, qplcore_sw_dispatcher, middle_layer_lib, isal and isal_asm, +# which are then combined into static or shared qpl. # Output ch_contrib::qpl by linking with 8 library targets. -# The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link -# only upstream isal (ch_contrib::isal) but at this point we can't. +# Note, qpl submodule comes with its own version of isal that is not compatible with upstream isal (e.g., ch_contrib::isal). + +## cmake/CompileOptions.cmake and automatic wrappers generation # ========================================================================== # Copyright (C) 2022 Intel Corporation @@ -442,6 +443,7 @@ function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST) endforeach() endfunction() +# [SUBDIR]isal enable_language(ASM_NASM) @@ -479,7 +481,6 @@ set(ISAL_ASM_SRC ${QPL_SRC_DIR}/isal/igzip/igzip_body.asm ${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_04.asm ${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_06.asm ${QPL_SRC_DIR}/isal/igzip/igzip_multibinary.asm - ${QPL_SRC_DIR}/isal/igzip/stdmac.asm ${QPL_SRC_DIR}/isal/crc/crc_multibinary.asm ${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8.asm ${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8_02.asm @@ -505,7 +506,6 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS # Setting external and internal interfaces for ISA-L library target_include_directories(isal PUBLIC $ - PRIVATE ${QPL_SRC_DIR}/isal/include PUBLIC ${QPL_SRC_DIR}/isal/igzip) set_target_properties(isal PROPERTIES @@ -617,12 +617,9 @@ target_compile_options(qplcore_sw_dispatcher # [SUBDIR]core-iaa file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c - ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.cpp ${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.c - ${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.cpp ${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.c - ${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.cpp - ${QPL_SRC_DIR}/core-iaa/sources/bit_rev.c) + ${QPL_SRC_DIR}/core-iaa/sources/*.c) # Create library add_library(core_iaa OBJECT ${HW_PATH_SRC}) @@ -634,31 +631,27 @@ target_include_directories(core_iaa PRIVATE ${UUID_DIR} PUBLIC $ PUBLIC $ - PRIVATE $ # status.h in own_checkers.h - PRIVATE $ # own_checkers.h + PRIVATE $ # status.h in own_checkers.h + PRIVATE $ # for own_checkers.h PRIVATE $) target_compile_features(core_iaa PRIVATE c_std_11) target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK - PRIVATE $<$: BLOCK_ON_FAULT_ENABLED> PRIVATE $<$:LOG_HW_INIT> PRIVATE $<$:DYNAMIC_LOADING_LIBACCEL_CONFIG>) # [SUBDIR]middle-layer file(GLOB MIDDLE_LAYER_SRC - ${QPL_SRC_DIR}/middle-layer/analytics/*.cpp - ${QPL_SRC_DIR}/middle-layer/c_wrapper/*.cpp - ${QPL_SRC_DIR}/middle-layer/checksum/*.cpp + ${QPL_SRC_DIR}/middle-layer/accelerator/*.cpp + ${QPL_SRC_DIR}/middle-layer/analytics/*.cpp ${QPL_SRC_DIR}/middle-layer/common/*.cpp ${QPL_SRC_DIR}/middle-layer/compression/*.cpp ${QPL_SRC_DIR}/middle-layer/compression/*/*.cpp ${QPL_SRC_DIR}/middle-layer/compression/*/*/*.cpp ${QPL_SRC_DIR}/middle-layer/dispatcher/*.cpp ${QPL_SRC_DIR}/middle-layer/other/*.cpp - ${QPL_SRC_DIR}/middle-layer/util/*.cpp - ${QPL_SRC_DIR}/middle-layer/inflate/*.cpp - ${QPL_SRC_DIR}/core-iaa/sources/accelerator/*.cpp) # todo + ${QPL_SRC_DIR}/middle-layer/util/*.cpp) add_library(middle_layer_lib OBJECT ${MIDDLE_LAYER_SRC}) @@ -667,6 +660,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS $) target_compile_options(middle_layer_lib + PRIVATE $<$:$<$:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>> PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}) target_compile_definitions(middle_layer_lib @@ -682,6 +676,7 @@ target_include_directories(middle_layer_lib PRIVATE ${UUID_DIR} PUBLIC $ PUBLIC $ + PRIVATE $ PUBLIC $ PUBLIC $ PUBLIC $) @@ -689,31 +684,58 @@ target_include_directories(middle_layer_lib target_compile_definitions(middle_layer_lib PUBLIC -DQPL_LIB) # [SUBDIR]c_api -file(GLOB_RECURSE QPL_C_API_SRC - ${QPL_SRC_DIR}/c_api/*.c - ${QPL_SRC_DIR}/c_api/*.cpp) +file(GLOB QPL_C_API_SRC + ${QPL_SRC_DIR}/c_api/compression_operations/*.c + ${QPL_SRC_DIR}/c_api/compression_operations/*.cpp + ${QPL_SRC_DIR}/c_api/filter_operations/*.cpp + ${QPL_SRC_DIR}/c_api/legacy_hw_path/*.c + ${QPL_SRC_DIR}/c_api/legacy_hw_path/*.cpp + ${QPL_SRC_DIR}/c_api/other_operations/*.cpp + ${QPL_SRC_DIR}/c_api/serialization/*.cpp + ${QPL_SRC_DIR}/c_api/*.cpp) + +add_library(qpl_c_api OBJECT ${QPL_C_API_SRC}) + +target_include_directories(qpl_c_api + PUBLIC $ + PUBLIC $ $ + PRIVATE $) + +set_target_properties(qpl_c_api PROPERTIES + $<$:C_STANDARD 17 + CXX_STANDARD 17) + +target_compile_options(qpl_c_api + PRIVATE $<$:$<$:-O3;-U_FORTIFY_SOURCE;-D_FORTIFY_SOURCE=2>> + PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + +target_compile_definitions(qpl_c_api + PUBLIC -DQPL_BADARG_CHECK # own_checkers.h + PUBLIC -DQPL_LIB # needed for middle_layer_lib + PUBLIC $<$:LOG_HW_INIT>) # needed for middle_layer_lib + +set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS + $) + +# Final _qpl target get_property(LIB_DEPS GLOBAL PROPERTY QPL_LIB_DEPS) -add_library(_qpl STATIC ${QPL_C_API_SRC} ${LIB_DEPS}) +add_library(_qpl STATIC ${LIB_DEPS}) target_include_directories(_qpl - PUBLIC $ $ - PRIVATE $ - PRIVATE $) + PUBLIC $ $) -target_compile_options(_qpl - PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}) target_compile_definitions(_qpl - PRIVATE -DQPL_LIB - PRIVATE -DQPL_BADARG_CHECK - PRIVATE $<$:DYNAMIC_LOADING_LIBACCEL_CONFIG> PUBLIC -DENABLE_QPL_COMPRESSION) target_link_libraries(_qpl - PRIVATE ch_contrib::accel-config - PRIVATE ch_contrib::isal) + PRIVATE ch_contrib::accel-config) + +# C++ filesystem library requires additional linking for older GNU/Clang +target_link_libraries(_qpl PRIVATE $<$,$,9.1>>:stdc++fs>) +target_link_libraries(_qpl PRIVATE $<$,$,9.0>>:c++fs>) target_include_directories(_qpl SYSTEM BEFORE PUBLIC "${QPL_PROJECT_DIR}/include" From a373b62bbf8083ffa96210fc1c959f13939526fc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 00:04:52 +0200 Subject: [PATCH 0560/2361] Better diagnostics in functional tests --- docker/test/stateless/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 0647ed02839..f43bb5da33d 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -264,7 +264,7 @@ if [ "$NUM_TRIES" -gt "1" ]; then # We don't run tests with Ordinary database in PRs, only in master. # So run new/changed tests with Ordinary at least once in flaky check. timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \ - | sed 's/All tests have finished//' | sed 's/No tests were run//' ||: + | sed 's/All tests have finished/Redacted: a message about tests finish is deleted/' | sed 's/No tests were run/Redacted: a message about no tests run is deleted/' ||: fi timeout_with_logging "$TIMEOUT" bash -c run_tests ||: From 134c0065407bd3f9394a720fbdfef7edf241ef84 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 00:08:25 +0200 Subject: [PATCH 0561/2361] Whitespace --- docker/test/stateless/utils.lib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/utils.lib b/docker/test/stateless/utils.lib index c3bb8ae9ea4..6b7b659296b 100644 --- a/docker/test/stateless/utils.lib +++ b/docker/test/stateless/utils.lib @@ -47,7 +47,7 @@ function timeout_with_logging() { if [[ "${exit_code}" -eq "124" ]] then - echo "The command 'timeout ${*}' has been killed by timeout" + echo "The command 'timeout ${*}' has been killed by timeout" fi return $exit_code From ba6b7b86ba3e868cd001efbce2c6cf8a5236a024 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 00:09:53 +0200 Subject: [PATCH 0562/2361] Log messages --- docker/test/stateless/utils.lib | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/utils.lib b/docker/test/stateless/utils.lib index 6b7b659296b..cb257536c36 100644 --- a/docker/test/stateless/utils.lib +++ b/docker/test/stateless/utils.lib @@ -45,9 +45,12 @@ function timeout_with_logging() { timeout -s TERM --preserve-status "${@}" || exit_code="${?}" + echo "Checking if it is a timeout. The code 124 will indicate a timeout." if [[ "${exit_code}" -eq "124" ]] then - echo "The command 'timeout ${*}' has been killed by timeout" + echo "The command 'timeout ${*}' has been killed by timeout." + else + echo "No, it isn't a timeout." fi return $exit_code From 5ae3a421e0bf90c3d1755371fe1d6ff5662207ca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 00:12:02 +0200 Subject: [PATCH 0563/2361] Copy-paste --- docker/test/fasttest/run.sh | 5 ++++- docker/test/stateful/run.sh | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 970bf12a81a..26283afc86a 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -261,9 +261,12 @@ function timeout_with_logging() { timeout -s TERM --preserve-status "${@}" || exit_code="${?}" + echo "Checking if it is a timeout. The code 124 will indicate a timeout." if [[ "${exit_code}" -eq "124" ]] then - echo "The command 'timeout ${*}' has been killed by timeout" + echo "The command 'timeout ${*}' has been killed by timeout." + else + echo "No, it isn't a timeout." fi return $exit_code diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 857385f4715..3a0e3a8be48 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -251,9 +251,12 @@ function timeout_with_logging() { timeout -s TERM --preserve-status "${@}" || exit_code="${?}" + echo "Checking if it is a timeout. The code 124 will indicate a timeout." if [[ "${exit_code}" -eq "124" ]] then - echo "The command 'timeout ${*}' has been killed by timeout" + echo "The command 'timeout ${*}' has been killed by timeout." + else + echo "No, it isn't a timeout." fi return $exit_code From 444303cb7117c92e578ad4ea20f7c0001edb3c8b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 00:25:57 +0200 Subject: [PATCH 0564/2361] Better diagnostics --- docker/test/stateless/run.sh | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index f43bb5da33d..b24af431ff1 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -247,12 +247,22 @@ function run_tests() try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')" + TIMEOUT=$((MAX_RUN_TIME - 800 > 8400 ? 8400 : MAX_RUN_TIME - 800)) + START_TIME=${SECONDS} set +e - timeout -k 60m -s TERM --preserve-status 140m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ - --no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ + timeout --preserve-status --signal TERM --kill-after 60m ${TIMEOUT}s \ + clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ + --no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ | ts '%Y-%m-%d %H:%M:%S' \ | tee -a test_output/test_result.txt set -e + DURATION=$((START_TIME - SECONDS)) + + echo "Elapsed ${DURATION} seconds." + if [[ $DURATION -ge $TIMEOUT ]] + then + echo "It looks like the command is terminated by the timeout, which is ${TIMEOUT} seconds." + fi } export -f run_tests From 8a67713e63bbaec8bf820bd6813affefe305d2cc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 01:09:45 +0200 Subject: [PATCH 0565/2361] Fix error --- tests/queries/0_stateless/01338_long_select_and_alter.sh | 4 ++-- .../0_stateless/01338_long_select_and_alter_zookeeper.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01338_long_select_and_alter.sh b/tests/queries/0_stateless/01338_long_select_and_alter.sh index 08609546ff5..5d2759ac884 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" -$CLICKHOUSE_CLIENT --query "CREATE TABLE alter_mt (key UInt64, value String) ENGINE=MergeTree() ORDER BY key" +$CLICKHOUSE_CLIENT --query "CREATE TABLE alter_mt (key Int64, value String) ENGINE=MergeTree() ORDER BY key" $CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number - 1 AS x, toString(x) FROM numbers(5)" @@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 10000000 --query # To be sure that select took all required locks for better test sensitivity, although it isn't guaranteed (then the test will also succeed). sleep 2 -$CLICKHOUSE_CLIENT --query "ALTER TABLE alter_mt MODIFY COLUMN value UInt64" +$CLICKHOUSE_CLIENT --query "ALTER TABLE alter_mt MODIFY COLUMN value Int64" $CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM alter_mt" diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh index def6d2ab127..593a96a7cc8 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" -$CLICKHOUSE_CLIENT --query "CREATE TABLE alter_mt (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_mt', '1') ORDER BY key" +$CLICKHOUSE_CLIENT --query "CREATE TABLE alter_mt (key Int64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_mt', '1') ORDER BY key" $CLICKHOUSE_CLIENT --query "INSERT INTO alter_mt SELECT number - 1 AS x, toString(x) FROM numbers(5)" @@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 10000000 --query # To be sure that select took all required locks for better test sensitivity, although it isn't guaranteed (then the test will also succeed). sleep 2 -$CLICKHOUSE_CLIENT --query "ALTER TABLE alter_mt MODIFY COLUMN value UInt64" +$CLICKHOUSE_CLIENT --query "ALTER TABLE alter_mt MODIFY COLUMN value Int64" $CLICKHOUSE_CLIENT --query "SELECT sum(value) FROM alter_mt" From 7caa7e20601b2500f513476dfe10819f328be3d3 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 20 Jul 2024 02:28:13 +0000 Subject: [PATCH 0566/2361] block deduplicate only in throw mode --- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- src/Interpreters/InterpreterOptimizeQuery.cpp | 4 ---- src/Storages/StorageMergeTree.cpp | 6 ++++-- src/Storages/StorageReplicatedMergeTree.cpp | 6 ++++-- ...206_projection_merge_special_mergetree.sql | 19 +++++++++---------- 5 files changed, 18 insertions(+), 19 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index df5ec4525eb..05df26b0d31 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1298,7 +1298,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) } } if (!projection_support) - throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Projection is only supported in (Replictaed)MergeTree. Consider drop or rebuild option of deduplicate_merge_projection_mode."); } diff --git a/src/Interpreters/InterpreterOptimizeQuery.cpp b/src/Interpreters/InterpreterOptimizeQuery.cpp index 8d1ac3455b7..3bee235185d 100644 --- a/src/Interpreters/InterpreterOptimizeQuery.cpp +++ b/src/Interpreters/InterpreterOptimizeQuery.cpp @@ -43,10 +43,6 @@ BlockIO InterpreterOptimizeQuery::execute() auto metadata_snapshot = table->getInMemoryMetadataPtr(); auto storage_snapshot = table->getStorageSnapshot(metadata_snapshot, getContext()); - /// Don't allow OPTIMIZE DEDUPLICATE for all engines with projections. - if (ast.deduplicate && !metadata_snapshot->projections.empty()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DEDUPLICATE with projections are not supported yet"); - // Empty list of names means we deduplicate by all columns, but user can explicitly state which columns to use. Names column_names; if (ast.deduplicate_by_columns) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 40b3a12297b..a5d434796ba 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1580,10 +1580,12 @@ bool StorageMergeTree::optimize( { assertNotReadonly(); - if (deduplicate && getInMemoryMetadataPtr()->hasProjections()) + if (deduplicate && getInMemoryMetadataPtr()->hasProjections() + && getSettings()->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "OPTIMIZE DEDUPLICATE query is not supported for table {} as it has projections. " - "User should drop all the projections manually before running the query", + "User should drop all the projections manually before running the query, " + "or consider drop or rebuild option of deduplicate_merge_projection_mode", getStorageID().getTableName()); if (deduplicate) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 72f725965e0..3751883df24 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5794,10 +5794,12 @@ bool StorageReplicatedMergeTree::optimize( if (!is_leader) throw Exception(ErrorCodes::NOT_A_LEADER, "OPTIMIZE cannot be done on this replica because it is not a leader"); - if (deduplicate && getInMemoryMetadataPtr()->hasProjections()) + if (deduplicate && getInMemoryMetadataPtr()->hasProjections() + && getSettings()->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "OPTIMIZE DEDUPLICATE query is not supported for table {} as it has projections. " - "User should drop all the projections manually before running the query", + "User should drop all the projections manually before running the query, " + "or consider drop or rebuild option of deduplicate_merge_projection_mode", getStorageID().getTableName()); if (cleanup) diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql index 06fb9a30aca..c8945fd784c 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -4,14 +4,14 @@ CREATE TABLE tp ( type Int32, eventcnt UInt64, PROJECTION p (select sum(eventcnt), type group by type) -) engine = ReplacingMergeTree order by type; -- { serverError SUPPORT_IS_DISABLED } +) engine = ReplacingMergeTree order by type; -- { serverError NOT_IMPLEMENTED } CREATE TABLE tp ( type Int32, eventcnt UInt64, PROJECTION p (select sum(eventcnt), type group by type) ) engine = ReplacingMergeTree order by type -SETTINGS deduplicate_merge_projection_mode = 'throw'; -- { serverError SUPPORT_IS_DISABLED } +SETTINGS deduplicate_merge_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } CREATE TABLE tp ( type Int32, @@ -20,6 +20,10 @@ CREATE TABLE tp ( ) engine = ReplacingMergeTree order by type SETTINGS deduplicate_merge_projection_mode = 'drop'; +ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } + DROP TABLE tp; CREATE TABLE tp ( @@ -29,13 +33,8 @@ CREATE TABLE tp ( ) engine = ReplacingMergeTree order by type SETTINGS deduplicate_merge_projection_mode = 'rebuild'; -DROP TABLE tp; +ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } --- don't allow OPTIMIZE DEDUPLICATE for all engines with projections -CREATE TABLE test ( - a INT PRIMARY KEY, - PROJECTION p (SELECT * ORDER BY a) -) engine = MergeTree; - -OPTIMIZE TABLE test DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } +DROP TABLE tp; \ No newline at end of file From 7c89ca59af17576e3a4e6b42167c84b5045fb969 Mon Sep 17 00:00:00 2001 From: alesapin Date: Sat, 20 Jul 2024 11:46:24 +0200 Subject: [PATCH 0567/2361] Bump From e3b2fbf7ec1a322fa4fcbd808a3a19dd6ec3b8ee Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 16 Jul 2024 15:37:50 +0200 Subject: [PATCH 0568/2361] CI: POC for Auto Releases --- .github/workflows/auto_release.yml | 42 ++++--- .github/workflows/create_release.yml | 9 +- tests/ci/auto_release.py | 169 +++++++++++++-------------- tests/ci/ci_utils.py | 6 +- 4 files changed, 121 insertions(+), 105 deletions(-) diff --git a/.github/workflows/auto_release.yml b/.github/workflows/auto_release.yml index f1a6b307b40..e90b183b8d6 100644 --- a/.github/workflows/auto_release.yml +++ b/.github/workflows/auto_release.yml @@ -1,44 +1,58 @@ name: AutoRelease env: - # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 concurrency: - group: auto-release + group: release on: # yamllint disable-line rule:truthy # schedule: # - cron: '0 10-16 * * 1-5' workflow_dispatch: jobs: - CherryPick: - runs-on: [self-hosted, style-checker-aarch64] + AutoRelease: + runs-on: [self-hosted, release-maker] steps: + - name: DebugInfo + uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6 - name: Set envs - # https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings run: | cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/cherry_pick ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" + - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }} + if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] }} + uses: ./.github/workflows/create_release.yml + with: + type: patch + ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }} + dry-run: true + autorelease: true + - name: Post Slack Message + if: ${{ !cancelled() }} + run: | + echo Slack Message + - name: Clean up run: | docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index d8d27531f28..96cd46f583a 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -22,6 +22,10 @@ concurrency: required: false default: true type: boolean + autorelease: + required: false + default: false + type: boolean jobs: CreateRelease: @@ -30,8 +34,10 @@ jobs: runs-on: [self-hosted, release-maker] steps: - name: DebugInfo + if: ${{ ! inputs.autorelease }} uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6 - name: Set envs + if: ${{ ! inputs.autorelease }} # https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings run: | cat >> "$GITHUB_ENV" << 'EOF' @@ -41,6 +47,7 @@ jobs: RELEASE_INFO_FILE=${{ runner.temp }}/release_info.json EOF - name: Check out repository code + if: ${{ ! inputs.autorelease }} uses: ClickHouse/checkout@v1 with: token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} @@ -161,6 +168,6 @@ jobs: export CHECK_NAME="Docker keeper image" python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} - name: Post Slack Message - if: always() + if: ${{ !cancelled() }} run: | echo Slack Message diff --git a/tests/ci/auto_release.py b/tests/ci/auto_release.py index bfd19b11e6d..88e91eb8aed 100644 --- a/tests/ci/auto_release.py +++ b/tests/ci/auto_release.py @@ -1,13 +1,16 @@ import argparse -from datetime import timedelta, datetime +import dataclasses +import json import logging import os -from commit_status_helper import get_commit_filtered_statuses +from typing import List + from get_robot_token import get_best_robot_token from github_helper import GitHub -from release import Release, Repo as ReleaseRepo, RELEASE_READY_STATUS -from report import SUCCESS from ssh import SSHKey +from ci_utils import Shell +from env_helper import GITHUB_REPOSITORY +from report import SUCCESS LOGGER_NAME = __name__ HELPER_LOGGERS = ["github_helper", LOGGER_NAME] @@ -20,116 +23,104 @@ def parse_args(): "branches and do a release in case for green builds." ) parser.add_argument("--token", help="GitHub token, if not set, used from smm") - parser.add_argument( - "--repo", default="ClickHouse/ClickHouse", help="Repo owner/name" - ) - parser.add_argument("--dry-run", action="store_true", help="Do not create anything") - parser.add_argument( - "--release-after-days", - type=int, - default=3, - help="Do automatic release on the latest green commit after the latest " - "release if the newest release is older than the specified days", - ) - parser.add_argument( - "--debug-helpers", - action="store_true", - help="Add debug logging for this script and github_helper", - ) - parser.add_argument( - "--remote-protocol", - "-p", - default="ssh", - choices=ReleaseRepo.VALID, - help="repo protocol for git commands remote, 'origin' is a special case and " - "uses 'origin' as a remote", - ) return parser.parse_args() +MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE = 5 +AUTORELEASE_INFO_FILE = "/tmp/autorelease_info.json" + + +@dataclasses.dataclass +class ReleaseParams: + release_branch: str + commit_sha: str + + +@dataclasses.dataclass +class AutoReleaseInfo: + releases: List[ReleaseParams] + + def add_release(self, release_params: ReleaseParams): + self.releases.append(release_params) + + def dump(self): + print(f"Dump release info into [{AUTORELEASE_INFO_FILE}]") + with open(AUTORELEASE_INFO_FILE, "w", encoding="utf-8") as f: + print(json.dumps(dataclasses.asdict(self), indent=2), file=f) + + def main(): args = parse_args() - logging.basicConfig(level=logging.INFO) - if args.debug_helpers: - for logger_name in HELPER_LOGGERS: - logging.getLogger(logger_name).setLevel(logging.DEBUG) token = args.token or get_best_robot_token() - days_as_timedelta = timedelta(days=args.release_after_days) - now = datetime.now() - + assert len(token) > 10 + os.environ["GH_TOKEN"] = token + (Shell.run("gh auth status", check=True)) gh = GitHub(token) - prs = gh.get_release_pulls(args.repo) + prs = gh.get_release_pulls(GITHUB_REPOSITORY) branch_names = [pr.head.ref for pr in prs] - logger.info("Found release branches: %s\n ", " \n".join(branch_names)) - repo = gh.get_repo(args.repo) + print(f"Found release branches [{branch_names}]") + repo = gh.get_repo(GITHUB_REPOSITORY) - # In general there is no guarantee on which order the refs/commits are - # returned from the API, so we have to order them. + autoRelease_info = AutoReleaseInfo(releases=[]) for pr in prs: - logger.info("Checking PR %s", pr.head.ref) + print(f"Checking PR [{pr.head.ref}]") refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}")) refs.sort(key=lambda ref: ref.ref) latest_release_tag_ref = refs[-1] latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha) - logger.info("That last release was done at %s", latest_release_tag.tagger.date) - - if latest_release_tag.tagger.date + days_as_timedelta > now: - logger.info( - "Not enough days since the last release %s," - " no automatic release can be done", - latest_release_tag.tag, + commit_num = int( + Shell.run( + f"git rev-list --count {latest_release_tag.tag}..origin/{pr.head.ref}", + check=True, ) - continue - - unreleased_commits = list( - repo.get_commits(sha=pr.head.ref, since=latest_release_tag.tagger.date) ) - unreleased_commits.sort( - key=lambda commit: commit.commit.committer.date, reverse=True + print( + f"Previous release is [{latest_release_tag}] was [{commit_num}] commits before, date [{latest_release_tag.tagger.date}]" ) - - for commit in unreleased_commits: - logger.info("Checking statuses of commit %s", commit.sha) - statuses = get_commit_filtered_statuses(commit) - all_success = all(st.state == SUCCESS for st in statuses) - passed_ready_for_release_check = any( - st.context == RELEASE_READY_STATUS and st.state == SUCCESS - for st in statuses + commit_reverse_index = 0 + commit_found = False + commit_checked = False + commit_sha = "" + while ( + commit_reverse_index < commit_num - 1 + and commit_reverse_index < MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE + ): + commit_checked = True + commit_sha = Shell.run( + f"git rev-list --max-count=1 --skip={commit_reverse_index} origin/{pr.head.ref}", + check=True, ) - if not (all_success and passed_ready_for_release_check): - logger.info("Commit is not green, thus not suitable for release") - continue - - logger.info("Commit is ready for release, let's release!") - - release = Release( - ReleaseRepo(args.repo, args.remote_protocol), - commit.sha, - "patch", - args.dry_run, - True, + print( + f"Check if commit [{commit_sha}] [{pr.head.ref}~{commit_reverse_index}] is ready for release" ) - try: - release.do(True, True, True) - except: - if release.has_rollback: - logging.error( - "!!The release process finished with error, read the output carefully!!" - ) - logging.error( - "Probably, rollback finished with error. " - "If you don't see any of the following commands in the output, " - "execute them manually:" - ) - release.log_rollback() - raise - logging.info("New release is done!") + commit_reverse_index += 1 + + cmd = f"gh api -H 'Accept: application/vnd.github.v3+json' /repos/{GITHUB_REPOSITORY}/commits/{commit_sha}/status" + ci_status_json = Shell.run(cmd, check=True) + ci_status = json.loads(ci_status_json)["state"] + if ci_status == SUCCESS: + commit_found = True break + if commit_found: + print( + f"Add release ready info for commit [{commit_sha}] and release branch [{pr.head.ref}]" + ) + autoRelease_info.add_release( + ReleaseParams(release_branch=pr.head.ref, commit_sha=commit_sha) + ) + else: + print(f"WARNING: No good commits found for release branch [{pr.head.ref}]") + if commit_checked: + print( + f"ERROR: CI is failed. check CI status for branch [{pr.head.ref}]" + ) + + autoRelease_info.dump() if __name__ == "__main__": diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 49f0447b5ca..0653374356f 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -95,7 +95,8 @@ class Shell: return res.stdout.strip() @classmethod - def run(cls, command): + def run(cls, command, check=False): + print(f"Run command [{command}]") res = "" result = subprocess.run( command, @@ -107,6 +108,9 @@ class Shell: ) if result.returncode == 0: res = result.stdout + elif check: + print(f"ERROR: stdout {result.stdout}, stderr {result.stderr}") + assert result.returncode == 0 return res.strip() @classmethod From 3de472cedc22c1b109256507d81a491f9cf57d58 Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 16 Jul 2024 17:07:49 +0200 Subject: [PATCH 0569/2361] add buddy, move release into action.yml try callable wf fix add ci buddy --- .github/actions/release/action.yml | 165 ++++++++++++++++++++++++ .github/workflows/auto_release.yml | 55 +++++++- .github/workflows/create_release.yml | 140 +------------------- tests/ci/artifactory.py | 32 ++--- tests/ci/auto_release.py | 183 ++++++++++++++++++--------- tests/ci/ci.py | 2 +- tests/ci/ci_buddy.py | 56 +++++++- tests/ci/ci_config.py | 3 + tests/ci/ci_utils.py | 76 ++++++++++- tests/ci/create_release.py | 106 ++++++++++------ tests/ci/pr_info.py | 13 +- 11 files changed, 560 insertions(+), 271 deletions(-) create mode 100644 .github/actions/release/action.yml diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml new file mode 100644 index 00000000000..fd4436f1f16 --- /dev/null +++ b/.github/actions/release/action.yml @@ -0,0 +1,165 @@ +name: Release + +description: Makes patch releases and creates new release branch + +inputs: + ref: + description: 'Git reference (branch or commit sha) from which to create the release' + required: true + type: string + type: + description: 'The type of release: "new" for a new release or "patch" for a patch release' + required: true + type: choice + options: + - patch + - new + dry-run: + description: 'Dry run' + required: false + default: true + type: boolean + token: + required: true + type: string + +runs: + using: "composite" + steps: + - name: Prepare Release Info + shell: bash + run: | + python3 ./tests/ci/create_release.py --prepare-release-info \ + --ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \ + ${{ inputs.dry-run && '--dry-run' || '' }} + echo "::group::Release Info" + python3 -m json.tool /tmp/release_info.json + echo "::endgroup::" + release_tag=$(jq -r '.release_tag' /tmp/release_info.json) + commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json) + echo "Release Tag: $release_tag" + echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV" + echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV" + - name: Download All Release Artifacts + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Push Git Tag for the Release + shell: bash + run: | + python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Push New Release Branch + if: ${{ inputs.type == 'new' }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Bump CH Version and Update Contributors' List + shell: bash + run: | + python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Checkout master + shell: bash + run: | + git checkout master + - name: Bump Docker versions, Changelog, Security + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + [ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1 + echo "List versions" + ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv + echo "Update docker version" + ./utils/list-versions/update-docker-version.sh + echo "Generate ChangeLog" + export CI=1 + docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ + --volume=".:/ClickHouse" clickhouse/style-test \ + /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ + --gh-user-or-token=${{ inputs.token }} --jobs=5 \ + --output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} + git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md + echo "Generate Security" + python3 ./utils/security-generator/generate_security.py > SECURITY.md + git diff HEAD + - name: Create ChangeLog PR + if: ${{ inputs.type == 'patch' && ! inputs.dry-run }} + uses: peter-evans/create-pull-request@v6 + with: + author: "robot-clickhouse " + token: ${{ inputs.token }} + committer: "robot-clickhouse " + commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} + branch: auto/${{ env.RELEASE_TAG }} + assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher + delete-branch: true + title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }} + labels: do not test + body: | + Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} + ### Changelog category (leave one): + - Not for changelog (changelog entry is not required) + - name: Reset changes if Dry-run + if: ${{ inputs.dry-run }} + shell: bash + run: | + git reset --hard HEAD + - name: Checkout back to GITHUB_REF + shell: bash + run: | + git checkout "$GITHUB_REF_NAME" + - name: Create GH Release + shell: bash + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/create_release.py --create-gh-release \ + ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Export TGZ Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Test TGZ Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Export RPM Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Test RPM Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Export Debian Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Test Debian Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }} + - name: Docker clickhouse/clickhouse-server building + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + cd "./tests/ci" + export CHECK_NAME="Docker server image" + python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + - name: Docker clickhouse/clickhouse-keeper building + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + cd "./tests/ci" + export CHECK_NAME="Docker keeper image" + python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + - name: Post Slack Message + if: ${{ !cancelled() }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }} diff --git a/.github/workflows/auto_release.yml b/.github/workflows/auto_release.yml index e90b183b8d6..e0a94d3bbb1 100644 --- a/.github/workflows/auto_release.yml +++ b/.github/workflows/auto_release.yml @@ -2,6 +2,7 @@ name: AutoRelease env: PYTHONUNBUFFERED: 1 + DRY_RUN: true concurrency: group: release @@ -9,6 +10,12 @@ on: # yamllint disable-line rule:truthy # schedule: # - cron: '0 10-16 * * 1-5' workflow_dispatch: + inputs: + dry-run: + description: 'Dry run' + required: false + default: true + type: boolean jobs: AutoRelease: @@ -31,7 +38,7 @@ jobs: - name: Auto Release Prepare run: | cd "$GITHUB_WORKSPACE/tests/ci" - python3 auto_release.py + python3 auto_release.py --prepare echo "::group::Auto Release Info" python3 -m json.tool /tmp/autorelease_info.json echo "::endgroup::" @@ -40,14 +47,50 @@ jobs: cat /tmp/autorelease_info.json echo 'EOF' } >> "$GITHUB_ENV" + - name: Post Release Branch statuses + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 auto_release.py --post-status - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }} - if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] }} - uses: ./.github/workflows/create_release.yml + if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }} + uses: ./.github/actions/release with: - type: patch ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }} - dry-run: true - autorelease: true + type: patch + dry-run: ${{ inputs.dry-run }} + token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} + - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }} + if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }} + uses: ./.github/actions/release + with: + ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }} + type: patch + dry-run: ${{ inputs.dry-run }} + token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} + - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }} + if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }} + uses: ./.github/actions/release + with: + ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }} + type: patch + dry-run: ${{ inputs.dry-run }} + token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} + - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }} + if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }} + uses: ./.github/actions/release + with: + ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }} + type: patch + dry-run: ${{ inputs.dry-run }} + token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} + - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }} + if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }} + uses: ./.github/actions/release + with: + ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }} + type: patch + dry-run: ${{ inputs.dry-run }} + token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - name: Post Slack Message if: ${{ !cancelled() }} run: | diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 96cd46f583a..6246306e536 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -3,7 +3,7 @@ name: CreateRelease concurrency: group: release -'on': +on: workflow_dispatch: inputs: ref: @@ -22,10 +22,6 @@ concurrency: required: false default: true type: boolean - autorelease: - required: false - default: false - type: boolean jobs: CreateRelease: @@ -36,138 +32,16 @@ jobs: - name: DebugInfo if: ${{ ! inputs.autorelease }} uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6 - - name: Set envs - if: ${{ ! inputs.autorelease }} - # https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings - run: | - cat >> "$GITHUB_ENV" << 'EOF' - ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" - echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV" - - name: Download All Release Artifacts - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/create_release.py --infile "$RELEASE_INFO_FILE" --download-packages ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Push Git Tag for the Release - run: | - python3 ./tests/ci/create_release.py --push-release-tag --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Push New Release Branch - if: ${{ inputs.type == 'new' }} - run: | - python3 ./tests/ci/create_release.py --push-new-release-branch --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Bump CH Version and Update Contributors' List - run: | - python3 ./tests/ci/create_release.py --create-bump-version-pr --infile "$RELEASE_INFO_FILE" ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Checkout master - run: | - git checkout master - - name: Bump Docker versions, Changelog, Security - if: ${{ inputs.type == 'patch' }} - run: | - [ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1 - echo "List versions" - ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv - echo "Update docker version" - ./utils/list-versions/update-docker-version.sh - echo "Generate ChangeLog" - export CI=1 - docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ - --volume=".:/ClickHouse" clickhouse/style-test \ - /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ - --gh-user-or-token="$GH_TOKEN" --jobs=5 \ - --output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} - git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md - echo "Generate Security" - python3 ./utils/security-generator/generate_security.py > SECURITY.md - git diff HEAD - - name: Create ChangeLog PR - if: ${{ inputs.type == 'patch' && ! inputs.dry-run }} - uses: peter-evans/create-pull-request@v6 + - name: Call Release Action + uses: ./.github/actions/release with: - author: "robot-clickhouse " - token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} - committer: "robot-clickhouse " - commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} - branch: auto/${{ env.RELEASE_TAG }} - assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher - delete-branch: true - title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }} - labels: do not test - body: | - Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} - ### Changelog category (leave one): - - Not for changelog (changelog entry is not required) - - name: Reset changes if Dry-run - if: ${{ inputs.dry-run }} - run: | - git reset --hard HEAD - - name: Checkout back to GITHUB_REF - run: | - git checkout "$GITHUB_REF_NAME" - - name: Create GH Release - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/create_release.py --create-gh-release \ - --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} - - - name: Export TGZ Packages - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/artifactory.py --export-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Test TGZ Packages - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/artifactory.py --test-tgz --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Export RPM Packages - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/artifactory.py --export-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Test RPM Packages - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/artifactory.py --test-rpm --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Export Debian Packages - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/artifactory.py --export-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Test Debian Packages - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/artifactory.py --test-debian --infile ${{ env.RELEASE_INFO_FILE }} ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Docker clickhouse/clickhouse-server building - if: ${{ inputs.type == 'patch' }} - run: | - cd "./tests/ci" - export CHECK_NAME="Docker server image" - python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} - - name: Docker clickhouse/clickhouse-keeper building - if: ${{ inputs.type == 'patch' }} - run: | - cd "./tests/ci" - export CHECK_NAME="Docker keeper image" - python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} - - name: Post Slack Message - if: ${{ !cancelled() }} - run: | - echo Slack Message + ref: ${{ inputs.ref }} + type: inputs.type + dry-run: ${{ inputs.dry-run }} + token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index 1a062d05a23..2009b122a18 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -4,7 +4,7 @@ from pathlib import Path from typing import Optional from shutil import copy2 from create_release import PackageDownloader, ReleaseInfo, ShellRunner -from ci_utils import WithIter +from ci_utils import WithIter, Shell class MountPointApp(metaclass=WithIter): @@ -141,12 +141,16 @@ class DebianArtifactory: ShellRunner.run("sync") def test_packages(self): - ShellRunner.run("docker pull ubuntu:latest") + Shell.run("docker pull ubuntu:latest") print(f"Test packages installation, version [{self.version}]") - cmd = f"docker run --rm ubuntu:latest bash -c \"apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-client={self.version}\"" + debian_command = f"echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-common-static={self.version} clickhouse-client={self.version}" + cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command}"' print("Running test command:") print(f" {cmd}") - ShellRunner.run(cmd) + Shell.run(cmd, check=True) + release_info = ReleaseInfo.from_file() + release_info.debian_command = debian_command + release_info.dump() def _copy_if_not_exists(src: Path, dst: Path) -> Path: @@ -210,15 +214,19 @@ class RpmArtifactory: pub_key_path.write_text(ShellRunner.run(update_public_key)[1]) if codename == RepoCodenames.LTS: self.export_packages(RepoCodenames.STABLE) - ShellRunner.run("sync") + Shell.run("sync") def test_packages(self): - ShellRunner.run("docker pull fedora:latest") + Shell.run("docker pull fedora:latest") print(f"Test package installation, version [{self.version}]") - cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1"' + rpm_command = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1" + cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"' print("Running test command:") print(f" {cmd}") - ShellRunner.run(cmd) + Shell.run(cmd, check=True) + release_info = ReleaseInfo.from_file() + release_info.rpm_command = rpm_command + release_info.dump() class TgzArtifactory: @@ -280,12 +288,6 @@ def parse_args() -> argparse.Namespace: formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Adds release packages to the repository", ) - parser.add_argument( - "--infile", - type=str, - required=True, - help="input file with release info", - ) parser.add_argument( "--export-debian", action="store_true", @@ -328,7 +330,7 @@ if __name__ == "__main__": args = parse_args() assert args.dry_run - release_info = ReleaseInfo.from_file(args.infile) + release_info = ReleaseInfo.from_file() """ Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve: ERROR : IO error: NotImplemented: versionId not implemented diff --git a/tests/ci/auto_release.py b/tests/ci/auto_release.py index 88e91eb8aed..b8f781c4d73 100644 --- a/tests/ci/auto_release.py +++ b/tests/ci/auto_release.py @@ -1,20 +1,17 @@ import argparse import dataclasses import json -import logging import os +import sys from typing import List from get_robot_token import get_best_robot_token from github_helper import GitHub -from ssh import SSHKey from ci_utils import Shell from env_helper import GITHUB_REPOSITORY from report import SUCCESS - -LOGGER_NAME = __name__ -HELPER_LOGGERS = ["github_helper", LOGGER_NAME] -logger = logging.getLogger(LOGGER_NAME) +from ci_buddy import CIBuddy +from ci_config import CI def parse_args(): @@ -23,8 +20,17 @@ def parse_args(): "branches and do a release in case for green builds." ) parser.add_argument("--token", help="GitHub token, if not set, used from smm") - - return parser.parse_args() + parser.add_argument( + "--post-status", + action="store_true", + help="Post release branch statuses", + ) + parser.add_argument( + "--prepare", + action="store_true", + help="Prepare autorelease info", + ) + return parser.parse_args(), parser MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE = 5 @@ -33,8 +39,16 @@ AUTORELEASE_INFO_FILE = "/tmp/autorelease_info.json" @dataclasses.dataclass class ReleaseParams: + ready: bool + ci_status: str + num_patches: int release_branch: str commit_sha: str + commits_to_branch_head: int + latest: bool + + def to_dict(self): + return dataclasses.asdict(self) @dataclasses.dataclass @@ -49,83 +63,128 @@ class AutoReleaseInfo: with open(AUTORELEASE_INFO_FILE, "w", encoding="utf-8") as f: print(json.dumps(dataclasses.asdict(self), indent=2), file=f) + @staticmethod + def from_file() -> "AutoReleaseInfo": + with open(AUTORELEASE_INFO_FILE, "r", encoding="utf-8") as json_file: + res = json.load(json_file) + releases = [ReleaseParams(**release) for release in res["releases"]] + return AutoReleaseInfo(releases=releases) -def main(): - args = parse_args() - token = args.token or get_best_robot_token() +def _prepare(token): assert len(token) > 10 os.environ["GH_TOKEN"] = token - (Shell.run("gh auth status", check=True)) + Shell.run("gh auth status", check=True) + gh = GitHub(token) prs = gh.get_release_pulls(GITHUB_REPOSITORY) + prs.sort(key=lambda x: x.head.ref) branch_names = [pr.head.ref for pr in prs] - print(f"Found release branches [{branch_names}]") - repo = gh.get_repo(GITHUB_REPOSITORY) + repo = gh.get_repo(GITHUB_REPOSITORY) autoRelease_info = AutoReleaseInfo(releases=[]) + for pr in prs: - print(f"Checking PR [{pr.head.ref}]") + print(f"\nChecking PR [{pr.head.ref}]") refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}")) - refs.sort(key=lambda ref: ref.ref) + assert refs + refs.sort(key=lambda ref: ref.ref) latest_release_tag_ref = refs[-1] latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha) - commit_num = int( - Shell.run( - f"git rev-list --count {latest_release_tag.tag}..origin/{pr.head.ref}", - check=True, - ) - ) - print( - f"Previous release is [{latest_release_tag}] was [{commit_num}] commits before, date [{latest_release_tag.tagger.date}]" - ) - commit_reverse_index = 0 - commit_found = False - commit_checked = False - commit_sha = "" - while ( - commit_reverse_index < commit_num - 1 - and commit_reverse_index < MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE - ): - commit_checked = True - commit_sha = Shell.run( - f"git rev-list --max-count=1 --skip={commit_reverse_index} origin/{pr.head.ref}", - check=True, - ) - print( - f"Check if commit [{commit_sha}] [{pr.head.ref}~{commit_reverse_index}] is ready for release" - ) - commit_reverse_index += 1 - cmd = f"gh api -H 'Accept: application/vnd.github.v3+json' /repos/{GITHUB_REPOSITORY}/commits/{commit_sha}/status" - ci_status_json = Shell.run(cmd, check=True) - ci_status = json.loads(ci_status_json)["state"] - if ci_status == SUCCESS: - commit_found = True - break - if commit_found: + commits = Shell.run( + f"git rev-list --first-parent {latest_release_tag.tag}..origin/{pr.head.ref}", + check=True, + ).split("\n") + commit_num = len(commits) + print( + f"Previous release [{latest_release_tag.tag}] was [{commit_num}] commits ago, date [{latest_release_tag.tagger.date}]" + ) + + commits_to_check = commits[:-1] # Exclude the version bump commit + commit_sha = "" + commit_ci_status = "" + commits_to_branch_head = 0 + + for idx, commit in enumerate( + commits_to_check[:MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE] + ): + print( + f"Check commit [{commit}] [{pr.head.ref}~{idx+1}] as release candidate" + ) + commit_num -= 1 + + is_completed = CI.GHActions.check_wf_completed( + token=token, commit_sha=commit + ) + if not is_completed: + print(f"CI is in progress for [{commit}] - check previous commit") + commits_to_branch_head += 1 + continue + + commit_ci_status = CI.GHActions.get_commit_status_by_name( + token=token, + commit_sha=commit, + status_name=(CI.JobNames.BUILD_CHECK, "ClickHouse build check"), + ) + commit_sha = commit + if commit_ci_status == SUCCESS: + break + else: + print(f"CI status [{commit_ci_status}] - skip") + commits_to_branch_head += 1 + + ready = commit_ci_status == SUCCESS and commit_sha + if ready: print( f"Add release ready info for commit [{commit_sha}] and release branch [{pr.head.ref}]" ) - autoRelease_info.add_release( - ReleaseParams(release_branch=pr.head.ref, commit_sha=commit_sha) - ) else: - print(f"WARNING: No good commits found for release branch [{pr.head.ref}]") - if commit_checked: - print( - f"ERROR: CI is failed. check CI status for branch [{pr.head.ref}]" - ) + print(f"WARNING: No ready commits found for release branch [{pr.head.ref}]") + + autoRelease_info.add_release( + ReleaseParams( + release_branch=pr.head.ref, + commit_sha=commit_sha, + ready=ready, + ci_status=commit_ci_status, + num_patches=commit_num, + commits_to_branch_head=commits_to_branch_head, + latest=False, + ) + ) + + if autoRelease_info.releases: + autoRelease_info.releases[-1].latest = True autoRelease_info.dump() -if __name__ == "__main__": - if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""): - with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"): - main() +def main(): + args, parser = parse_args() + + if args.post_status: + info = AutoReleaseInfo.from_file() + for release_info in info.releases: + if release_info.ready: + CIBuddy(dry_run=False).post_info( + title=f"Auto Release Status for {release_info.release_branch}", + body=release_info.to_dict(), + ) + else: + CIBuddy(dry_run=False).post_warning( + title=f"Auto Release Status for {release_info.release_branch}", + body=release_info.to_dict(), + ) + elif args.prepare: + _prepare(token=args.token or get_best_robot_token()) else: - main() + parser.print_help() + sys.exit(2) + + +if __name__ == "__main__": + main() diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 110a7b2a49c..8ad358cb874 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1295,7 +1295,7 @@ def main() -> int: error_description = f"Out Of Memory, exit_code {job_report.exit_code}" else: error_description = f"Unknown, exit_code {job_report.exit_code}" - CIBuddy().post_error( + CIBuddy().post_job_error( error_description + f" after {int(job_report.duration)}s", job_name=_get_ext_check_name(args.job_name), ) diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index c650b876610..727a3d88359 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -1,5 +1,6 @@ import json import os +from typing import Union, Dict import boto3 import requests @@ -60,7 +61,55 @@ class CIBuddy: except Exception as e: print(f"ERROR: Failed to post message, ex {e}") - def post_error(self, error_description, job_name="", with_instance_info=True): + def _post_formatted( + self, title, body: Union[Dict, str], with_wf_link: bool + ) -> None: + message = title + if isinstance(body, dict): + for name, value in body.items(): + if "commit_sha" in name: + value = ( + f"" + ) + message += f" *{name}*: {value}\n" + else: + message += body + "\n" + run_id = os.getenv("GITHUB_RUN_ID", "") + if with_wf_link and run_id: + message += f" *workflow*: \n" + self.post(message) + + def post_info( + self, title, body: Union[Dict, str], with_wf_link: bool = True + ) -> None: + title_extended = f":white_circle: *{title}*\n\n" + self._post_formatted(title_extended, body, with_wf_link) + + def post_done( + self, title, body: Union[Dict, str], with_wf_link: bool = True + ) -> None: + title_extended = f":white_check_mark: *{title}*\n\n" + self._post_formatted(title_extended, body, with_wf_link) + + def post_warning( + self, title, body: Union[Dict, str], with_wf_link: bool = True + ) -> None: + title_extended = f":warning: *{title}*\n\n" + self._post_formatted(title_extended, body, with_wf_link) + + def post_critical( + self, title, body: Union[Dict, str], with_wf_link: bool = True + ) -> None: + title_extended = f":black_circle: *{title}*\n\n" + self._post_formatted(title_extended, body, with_wf_link) + + def post_job_error( + self, + error_description, + job_name="", + with_instance_info=True, + with_wf_link: bool = True, + ): instance_id, instance_type = "unknown", "unknown" if with_instance_info: instance_id = Shell.run("ec2metadata --instance-id") or instance_id @@ -82,10 +131,13 @@ class CIBuddy: message += line_pr_ else: message += line_br_ + run_id = os.getenv("GITHUB_RUN_ID", "") + if with_wf_link and run_id: + message += f" *workflow*: \n" self.post(message) if __name__ == "__main__": # test buddy = CIBuddy(dry_run=True) - buddy.post_error("TEst") + buddy.post_job_error("TEst") diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 0a4ab3a823b..a44b15f34c1 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -32,6 +32,9 @@ class CI: from ci_definitions import MQ_JOBS as MQ_JOBS from ci_definitions import WorkflowStages as WorkflowStages from ci_definitions import Runners as Runners + from ci_utils import Envs as Envs + from ci_utils import Utils as Utils + from ci_utils import GHActions as GHActions from ci_definitions import Labels as Labels from ci_definitions import TRUSTED_CONTRIBUTORS as TRUSTED_CONTRIBUTORS from ci_utils import CATEGORY_TO_LABEL as CATEGORY_TO_LABEL diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 0653374356f..4536d1f2b54 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -1,9 +1,16 @@ import os import re import subprocess +import time from contextlib import contextmanager from pathlib import Path -from typing import Any, Iterator, List, Union, Optional, Tuple +from typing import Any, Iterator, List, Union, Optional, Sequence + +import requests + + +class Envs: + GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse") LABEL_CATEGORIES = { @@ -80,6 +87,62 @@ class GHActions: print(line) print("::endgroup::") + @staticmethod + def get_commit_status_by_name( + token: str, commit_sha: str, status_name: Union[str, Sequence] + ) -> Optional[str]: + assert len(token) == 40 + assert len(commit_sha) == 40 + assert is_hex(commit_sha) + assert not is_hex(token) + url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/statuses?per_page={200}" + headers = { + "Authorization": f"token {token}", + "Accept": "application/vnd.github.v3+json", + } + response = requests.get(url, headers=headers, timeout=5) + + if isinstance(status_name, str): + status_name = (status_name,) + if response.status_code == 200: + assert "next" not in response.links, "Response truncated" + statuses = response.json() + for status in statuses: + if status["context"] in status_name: + return status["state"] + return None + + @staticmethod + def check_wf_completed(token: str, commit_sha: str) -> bool: + headers = { + "Authorization": f"token {token}", + "Accept": "application/vnd.github.v3+json", + } + url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/check-runs?per_page={100}" + + for i in range(3): + try: + response = requests.get(url, headers=headers, timeout=5) + response.raise_for_status() + # assert "next" not in response.links, "Response truncated" + + data = response.json() + assert data["check_runs"], "?" + + for check in data["check_runs"]: + if check["status"] != "completed": + print( + f" Check workflow status: Check not completed [{check['name']}]" + ) + return False + else: + return True + except Exception as e: + print(f"ERROR: exception {e}") + time.sleep(1) + + return False + class Shell: @classmethod @@ -108,15 +171,18 @@ class Shell: ) if result.returncode == 0: res = result.stdout - elif check: - print(f"ERROR: stdout {result.stdout}, stderr {result.stderr}") - assert result.returncode == 0 + else: + print( + f"ERROR: stdout {result.stdout.strip()}, stderr {result.stderr.strip()}" + ) + if check: + assert result.returncode == 0 return res.strip() @classmethod def check(cls, command): result = subprocess.run( - command + " 2>&1", + command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index 277134c3991..414ec8afd3e 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -14,6 +14,7 @@ from ssh import SSHAgent from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET from s3_helper import S3Helper from ci_utils import Shell +from ci_buddy import CIBuddy from version_helper import ( FILE_WITH_VERSION_PATH, GENERATED_CONTRIBUTORS, @@ -27,6 +28,7 @@ from ci_config import CI CMAKE_PATH = get_abs_path(FILE_WITH_VERSION_PATH) CONTRIBUTORS_PATH = get_abs_path(GENERATED_CONTRIBUTORS) +RELEASE_INFO_FILE = "/tmp/release_info.json" class ShellRunner: @@ -67,17 +69,25 @@ class ReleaseInfo: codename: str previous_release_tag: str previous_release_sha: str + changelog_pr: str = "" + version_bump_pr: str = "" + release_url: str = "" + debian_command: str = "" + rpm_command: str = "" @staticmethod - def from_file(file_path: str) -> "ReleaseInfo": - with open(file_path, "r", encoding="utf-8") as json_file: + def from_file() -> "ReleaseInfo": + with open(RELEASE_INFO_FILE, "r", encoding="utf-8") as json_file: res = json.load(json_file) return ReleaseInfo(**res) + def dump(self): + print(f"Dump release info into [{RELEASE_INFO_FILE}]") + with open(RELEASE_INFO_FILE, "w", encoding="utf-8") as f: + print(json.dumps(dataclasses.asdict(self), indent=2), file=f) + @staticmethod - def prepare(commit_ref: str, release_type: str, outfile: str) -> None: - Path(outfile).parent.mkdir(parents=True, exist_ok=True) - Path(outfile).unlink(missing_ok=True) + def prepare(commit_ref: str, release_type: str) -> None: version = None release_branch = None release_tag = None @@ -91,7 +101,7 @@ class ReleaseInfo: f"git merge-base --is-ancestor origin/{commit_ref} origin/master" ) with checkout(commit_ref): - _, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}") + commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True) # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) @@ -112,7 +122,7 @@ class ReleaseInfo: assert previous_release_sha if release_type == "patch": with checkout(commit_ref): - _, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}") + commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True) # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) @@ -171,8 +181,7 @@ class ReleaseInfo: previous_release_tag=previous_release_tag, previous_release_sha=previous_release_sha, ) - with open(outfile, "w", encoding="utf-8") as f: - print(json.dumps(dataclasses.asdict(res), indent=2), file=f) + res.dump() def push_release_tag(self, dry_run: bool) -> None: if dry_run: @@ -276,21 +285,38 @@ class ReleaseInfo: f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'" ) + def update_release_info(self, dry_run: bool) -> None: + branch = f"auto/{release_info.release_tag}" + if not dry_run: + get_url_cmd = f"gh pr list --repo {GITHUB_REPOSITORY} --head {branch} --json url --jq '.[0].url'" + url = Shell.run(get_url_cmd) + if url: + print(f"Update release info with Changelog PR link [{url}]") + else: + print(f"WARNING: Changelog PR not found, branch [{branch}]") + else: + url = "dry-run" + + self.changelog_pr = url + self.dump() + def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None: repo = os.getenv("GITHUB_REPOSITORY") assert repo - cmds = [] - cmds.append( + cmds = [ f"gh release create --repo {repo} --title 'Release {self.release_tag}' {self.release_tag}" - ) + ] for file in packages_files: cmds.append(f"gh release upload {self.release_tag} {file}") if not dry_run: for cmd in cmds: - ShellRunner.run(cmd) + Shell.run(cmd, check=True) + self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" else: print("Dry-run, would run commands:") print("\n * ".join(cmds)) + self.release_url = f"dry-run" + self.dump() class RepoTypes: @@ -508,6 +534,11 @@ def parse_args() -> argparse.Namespace: action="store_true", help="Create GH Release object and attach all packages", ) + parser.add_argument( + "--post-status", + action="store_true", + help="Post release status into Slack", + ) parser.add_argument( "--ref", type=str, @@ -525,18 +556,6 @@ def parse_args() -> argparse.Namespace: action="store_true", help="do not make any actual changes in the repo, just show what will be done", ) - parser.add_argument( - "--outfile", - default="", - type=str, - help="output file to write json result to, if not set - stdout", - ) - parser.add_argument( - "--infile", - default="", - type=str, - help="input file with release info", - ) return parser.parse_args() @@ -547,7 +566,7 @@ def checkout(ref: str) -> Iterator[None]: rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" assert orig_ref if ref not in (orig_ref,): - ShellRunner.run(f"{GIT_PREFIX} checkout {ref}") + Shell.run(f"{GIT_PREFIX} checkout {ref}") try: yield except (Exception, KeyboardInterrupt) as e: @@ -587,27 +606,21 @@ if __name__ == "__main__": if args.prepare_release_info: assert ( - args.ref and args.release_type and args.outfile - ), "--ref, --release-type and --outfile must be provided with --prepare-release-info" - ReleaseInfo.prepare( - commit_ref=args.ref, release_type=args.release_type, outfile=args.outfile - ) + args.ref and args.release_type + ), "--ref and --release-type must be provided with --prepare-release-info" + ReleaseInfo.prepare(commit_ref=args.ref, release_type=args.release_type) if args.push_release_tag: - assert args.infile, "--infile must be provided" - release_info = ReleaseInfo.from_file(args.infile) + release_info = ReleaseInfo.from_file() release_info.push_release_tag(dry_run=args.dry_run) if args.push_new_release_branch: - assert args.infile, "--infile must be provided" - release_info = ReleaseInfo.from_file(args.infile) + release_info = ReleaseInfo.from_file() release_info.push_new_release_branch(dry_run=args.dry_run) if args.create_bump_version_pr: # TODO: store link to PR in release info - assert args.infile, "--infile must be provided" - release_info = ReleaseInfo.from_file(args.infile) + release_info = ReleaseInfo.from_file() release_info.update_version_and_contributors_list(dry_run=args.dry_run) if args.download_packages: - assert args.infile, "--infile must be provided" - release_info = ReleaseInfo.from_file(args.infile) + release_info = ReleaseInfo.from_file() p = PackageDownloader( release=release_info.release_branch, commit_sha=release_info.commit_sha, @@ -615,14 +628,23 @@ if __name__ == "__main__": ) p.run() if args.create_gh_release: - assert args.infile, "--infile must be provided" - release_info = ReleaseInfo.from_file(args.infile) + release_info = ReleaseInfo.from_file() p = PackageDownloader( release=release_info.release_branch, commit_sha=release_info.commit_sha, version=release_info.version, ) - release_info.create_gh_release(p.get_all_packages_files(), args.dry_run) + if args.post_status: + release_info = ReleaseInfo.from_file() + release_info.update_release_info(dry_run=args.dry_run) + if release_info.debian_command: + CIBuddy(dry_run=args.dry_run).post_done( + f"New release issued", dataclasses.asdict(release_info) + ) + else: + CIBuddy(dry_run=args.dry_run).post_critical( + f"Failed to issue new release", dataclasses.asdict(release_info) + ) # tear down ssh if _ssh_agent and _key_pub: diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index 9f3b5a586cc..59806a2a8fa 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -296,13 +296,16 @@ class PRInfo: else: if "schedule" in github_event: self.event_type = EventType.SCHEDULE - else: + elif "inputs" in github_event: # assume this is a dispatch self.event_type = EventType.DISPATCH - logging.warning( - "event.json does not match pull_request or push:\n%s", - json.dumps(github_event, sort_keys=True, indent=4), - ) + print("PR Info:") + print(self) + else: + logging.warning( + "event.json does not match pull_request or push:\n%s", + json.dumps(github_event, sort_keys=True, indent=4), + ) self.sha = os.getenv( "GITHUB_SHA", "0000000000000000000000000000000000000000" ) From 952ab302ce1fa5d9b739bbdf6acdf2fcdd208a04 Mon Sep 17 00:00:00 2001 From: Max K Date: Fri, 19 Jul 2024 09:00:11 +0200 Subject: [PATCH 0570/2361] run test auto release by schedule --- .github/workflows/auto_release.yml | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/auto_release.yml b/.github/workflows/auto_release.yml index e0a94d3bbb1..7588e7998bf 100644 --- a/.github/workflows/auto_release.yml +++ b/.github/workflows/auto_release.yml @@ -7,8 +7,10 @@ env: concurrency: group: release on: # yamllint disable-line rule:truthy - # schedule: - # - cron: '0 10-16 * * 1-5' + # Workflow uses a test bucket for packages and dry run mode (no real releases) + schedule: + - cron: '0 9 * * *' + - cron: '0 15 * * *' workflow_dispatch: inputs: dry-run: @@ -19,6 +21,8 @@ on: # yamllint disable-line rule:truthy jobs: AutoRelease: + env: + DRY_RUN: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.dry_run != '' && github.event.inputs.dry_run || true }} runs-on: [self-hosted, release-maker] steps: - name: DebugInfo @@ -57,7 +61,7 @@ jobs: with: ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }} type: patch - dry-run: ${{ inputs.dry-run }} + dry-run: ${{ env.DRY_RUN }} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }} if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }} @@ -65,7 +69,7 @@ jobs: with: ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }} type: patch - dry-run: ${{ inputs.dry-run }} + dry-run: ${{ env.DRY_RUN }} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }} if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }} @@ -73,7 +77,7 @@ jobs: with: ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }} type: patch - dry-run: ${{ inputs.dry-run }} + dry-run: ${{ env.DRY_RUN }} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }} if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }} @@ -81,7 +85,7 @@ jobs: with: ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }} type: patch - dry-run: ${{ inputs.dry-run }} + dry-run: ${{ env.DRY_RUN }} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }} if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }} @@ -89,7 +93,7 @@ jobs: with: ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }} type: patch - dry-run: ${{ inputs.dry-run }} + dry-run: ${{ env.DRY_RUN }} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - name: Post Slack Message if: ${{ !cancelled() }} From 5d09f205e5bb5ab8aa860b14334eb5656b2c2a1b Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 19 Jul 2024 09:35:43 +0000 Subject: [PATCH 0571/2361] style fixes --- .github/workflows/auto_release.yml | 8 ++++++-- .github/workflows/create_release.yml | 5 +---- tests/ci/auto_release.py | 11 ++++++----- tests/ci/ci_buddy.py | 18 +++++++++--------- tests/ci/ci_utils.py | 11 +++++------ 5 files changed, 27 insertions(+), 26 deletions(-) diff --git a/.github/workflows/auto_release.yml b/.github/workflows/auto_release.yml index 7588e7998bf..f2cbf771190 100644 --- a/.github/workflows/auto_release.yml +++ b/.github/workflows/auto_release.yml @@ -21,8 +21,6 @@ on: # yamllint disable-line rule:truthy jobs: AutoRelease: - env: - DRY_RUN: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.dry_run != '' && github.event.inputs.dry_run || true }} runs-on: [self-hosted, release-maker] steps: - name: DebugInfo @@ -34,6 +32,12 @@ jobs: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}} RCSK EOF + - name: Set DRY_RUN for schedule + if: ${{ github.event_name == 'schedule' }} + run: echo "DRY_RUN=true" >> "$GITHUB_ENV" + - name: Set DRY_RUN for dispatch + if: ${{ github.event_name == 'workflow_dispatch' }} + run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV" - name: Check out repository code uses: ClickHouse/checkout@v1 with: diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 6246306e536..6d914d1567e 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -2,8 +2,7 @@ name: CreateRelease concurrency: group: release - -on: +'on': workflow_dispatch: inputs: ref: @@ -30,10 +29,8 @@ jobs: runs-on: [self-hosted, release-maker] steps: - name: DebugInfo - if: ${{ ! inputs.autorelease }} uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6 - name: Check out repository code - if: ${{ ! inputs.autorelease }} uses: ClickHouse/checkout@v1 with: token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} diff --git a/tests/ci/auto_release.py b/tests/ci/auto_release.py index b8f781c4d73..5d98d8810a4 100644 --- a/tests/ci/auto_release.py +++ b/tests/ci/auto_release.py @@ -55,7 +55,7 @@ class ReleaseParams: class AutoReleaseInfo: releases: List[ReleaseParams] - def add_release(self, release_params: ReleaseParams): + def add_release(self, release_params: ReleaseParams) -> None: self.releases.append(release_params) def dump(self): @@ -133,15 +133,16 @@ def _prepare(token): commit_sha = commit if commit_ci_status == SUCCESS: break - else: - print(f"CI status [{commit_ci_status}] - skip") + + print(f"CI status [{commit_ci_status}] - skip") commits_to_branch_head += 1 - ready = commit_ci_status == SUCCESS and commit_sha - if ready: + ready = False + if commit_ci_status == SUCCESS and commit_sha: print( f"Add release ready info for commit [{commit_sha}] and release branch [{pr.head.ref}]" ) + ready = True else: print(f"WARNING: No ready commits found for release branch [{pr.head.ref}]") diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index 727a3d88359..ff8fdba2b6c 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -62,7 +62,7 @@ class CIBuddy: print(f"ERROR: Failed to post message, ex {e}") def _post_formatted( - self, title, body: Union[Dict, str], with_wf_link: bool + self, title: str, body: Union[Dict, str], with_wf_link: bool ) -> None: message = title if isinstance(body, dict): @@ -80,36 +80,36 @@ class CIBuddy: self.post(message) def post_info( - self, title, body: Union[Dict, str], with_wf_link: bool = True + self, title: str, body: Union[Dict, str], with_wf_link: bool = True ) -> None: title_extended = f":white_circle: *{title}*\n\n" self._post_formatted(title_extended, body, with_wf_link) def post_done( - self, title, body: Union[Dict, str], with_wf_link: bool = True + self, title: str, body: Union[Dict, str], with_wf_link: bool = True ) -> None: title_extended = f":white_check_mark: *{title}*\n\n" self._post_formatted(title_extended, body, with_wf_link) def post_warning( - self, title, body: Union[Dict, str], with_wf_link: bool = True + self, title: str, body: Union[Dict, str], with_wf_link: bool = True ) -> None: title_extended = f":warning: *{title}*\n\n" self._post_formatted(title_extended, body, with_wf_link) def post_critical( - self, title, body: Union[Dict, str], with_wf_link: bool = True + self, title: str, body: Union[Dict, str], with_wf_link: bool = True ) -> None: title_extended = f":black_circle: *{title}*\n\n" self._post_formatted(title_extended, body, with_wf_link) def post_job_error( self, - error_description, - job_name="", - with_instance_info=True, + error_description: str, + job_name: str = "", + with_instance_info: bool = True, with_wf_link: bool = True, - ): + ) -> None: instance_id, instance_type = "unknown", "unknown" if with_instance_info: instance_id = Shell.run("ec2metadata --instance-id") or instance_id diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 4536d1f2b54..eb25a53d492 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -90,7 +90,7 @@ class GHActions: @staticmethod def get_commit_status_by_name( token: str, commit_sha: str, status_name: Union[str, Sequence] - ) -> Optional[str]: + ) -> str: assert len(token) == 40 assert len(commit_sha) == 40 assert is_hex(commit_sha) @@ -109,8 +109,8 @@ class GHActions: statuses = response.json() for status in statuses: if status["context"] in status_name: - return status["state"] - return None + return status["state"] # type: ignore + return "" @staticmethod def check_wf_completed(token: str, commit_sha: str) -> bool: @@ -135,10 +135,9 @@ class GHActions: f" Check workflow status: Check not completed [{check['name']}]" ) return False - else: - return True + return True except Exception as e: - print(f"ERROR: exception {e}") + print(f"ERROR: exception after attempt [{i}]: {e}") time.sleep(1) return False From 3b842885779e123af1fd8aeaca4e7c131c5a33a9 Mon Sep 17 00:00:00 2001 From: Max K Date: Fri, 19 Jul 2024 12:05:19 +0200 Subject: [PATCH 0572/2361] fix create release --- .github/workflows/create_release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 6d914d1567e..55644bdd503 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -39,6 +39,6 @@ jobs: uses: ./.github/actions/release with: ref: ${{ inputs.ref }} - type: inputs.type + type: ${{ inputs.type }} dry-run: ${{ inputs.dry-run }} token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} From a3a4548d96dac4b480e3a54519cfa34fab17ce4c Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 20 Jul 2024 15:01:29 +0200 Subject: [PATCH 0573/2361] Fix removing files after restoring to s3_plain_rewritable. --- src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index e7c85bea1c6..b5805f6d23a 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -874,7 +874,9 @@ void DiskObjectStorageTransaction::writeFileUsingBlobWritingFunction( /// Create metadata (see create_metadata_callback in DiskObjectStorageTransaction::writeFile()). if (mode == WriteMode::Rewrite) { - if (!object_storage.isWriteOnce() && metadata_storage.exists(path)) + /// Otherwise we will produce lost blobs which nobody points to + /// WriteOnce storages are not affected by the issue + if (!object_storage.isPlain() && metadata_storage.exists(path)) object_storage.removeObjectsIfExist(metadata_storage.getStorageObjects(path)); metadata_transaction->createMetadataFile(path, std::move(object_key), object_size); From 3767f723489507dad7fe275c245253e7885aab8e Mon Sep 17 00:00:00 2001 From: Max K Date: Fri, 19 Jul 2024 20:43:14 +0200 Subject: [PATCH 0574/2361] more info for buddy --- .github/actions/release/action.yml | 20 +- .github/workflows/auto_release.yml | 3 +- tests/ci/artifactory.py | 86 ++++--- tests/ci/auto_release.py | 27 +++ tests/ci/ci_buddy.py | 4 +- tests/ci/ci_utils.py | 23 +- tests/ci/create_release.py | 348 ++++++++++++++++++----------- 7 files changed, 338 insertions(+), 173 deletions(-) diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml index fd4436f1f16..99ec02662f6 100644 --- a/.github/actions/release/action.yml +++ b/.github/actions/release/action.yml @@ -58,14 +58,11 @@ runs: shell: bash run: | python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }} - - name: Checkout master - shell: bash - run: | - git checkout master - name: Bump Docker versions, Changelog, Security if: ${{ inputs.type == 'patch' }} shell: bash run: | + python3 ./tests/ci/create_release.py --set-progress-started --progress "update ChangeLog" [ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1 echo "List versions" ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv @@ -108,12 +105,13 @@ runs: shell: bash run: | git checkout "$GITHUB_REF_NAME" + # set current progress to OK + python3 ./tests/ci/create_release.py --set-progress-completed - name: Create GH Release shell: bash if: ${{ inputs.type == 'patch' }} run: | - python3 ./tests/ci/create_release.py --create-gh-release \ - ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }} - name: Export TGZ Packages if: ${{ inputs.type == 'patch' }} shell: bash @@ -148,16 +146,26 @@ runs: if: ${{ inputs.type == 'patch' }} shell: bash run: | + python3 ./tests/ci/create_release.py --set-progress-started --progress "docker server release" cd "./tests/ci" export CHECK_NAME="Docker server image" python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + python3 ./tests/ci/create_release.py --set-progress-completed - name: Docker clickhouse/clickhouse-keeper building if: ${{ inputs.type == 'patch' }} shell: bash run: | + python3 ./tests/ci/create_release.py --set-progress-started --progress "docker keeper release" cd "./tests/ci" export CHECK_NAME="Docker keeper image" python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + python3 ./tests/ci/create_release.py --set-progress-completed + - name: Set Release progress completed + shell: bash + run: | + # If we here - set completed status, to post proper Slack OK or FAIL message in the next step + python3 ./tests/ci/create_release.py --set-progress-started --progress "completed" + python3 ./tests/ci/create_release.py --set-progress-completed - name: Post Slack Message if: ${{ !cancelled() }} shell: bash diff --git a/.github/workflows/auto_release.yml b/.github/workflows/auto_release.yml index f2cbf771190..457ffacc7a8 100644 --- a/.github/workflows/auto_release.yml +++ b/.github/workflows/auto_release.yml @@ -102,7 +102,8 @@ jobs: - name: Post Slack Message if: ${{ !cancelled() }} run: | - echo Slack Message + cd "$GITHUB_WORKSPACE/tests/ci" + python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }} - name: Clean up run: | docker ps --quiet | xargs --no-run-if-empty docker kill ||: diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index 2009b122a18..98a0345c6bd 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -3,7 +3,12 @@ import time from pathlib import Path from typing import Optional from shutil import copy2 -from create_release import PackageDownloader, ReleaseInfo, ShellRunner +from create_release import ( + PackageDownloader, + ReleaseInfo, + ReleaseContextManager, + ReleaseProgress, +) from ci_utils import WithIter, Shell @@ -76,19 +81,20 @@ class R2MountPoint: ) _TEST_MOUNT_CMD = f"mount | grep -q {self.MOUNT_POINT}" - ShellRunner.run(_CLEAN_LOG_FILE_CMD) - ShellRunner.run(_UNMOUNT_CMD) - ShellRunner.run(_MKDIR_CMD) - ShellRunner.run(_MKDIR_FOR_CACHE) - ShellRunner.run(self.mount_cmd, async_=self.async_mount) + Shell.run(_CLEAN_LOG_FILE_CMD) + Shell.run(_UNMOUNT_CMD) + Shell.run(_MKDIR_CMD) + Shell.run(_MKDIR_FOR_CACHE) + # didn't manage to use simple run() and not block or fail + Shell.run_as_daemon(self.mount_cmd) if self.async_mount: time.sleep(3) - ShellRunner.run(_TEST_MOUNT_CMD) + Shell.run(_TEST_MOUNT_CMD, check=True) @classmethod def teardown(cls): print(f"Unmount [{cls.MOUNT_POINT}]") - ShellRunner.run(f"umount {cls.MOUNT_POINT}") + Shell.run(f"umount {cls.MOUNT_POINT}") class RepoCodenames(metaclass=WithIter): @@ -124,8 +130,8 @@ class DebianArtifactory: cmd = f"{REPREPRO_CMD_PREFIX} includedeb {self.codename} {' '.join(paths)}" print("Running export command:") print(f" {cmd}") - ShellRunner.run(cmd) - ShellRunner.run("sync") + Shell.run(cmd, check=True) + Shell.run("sync") if self.codename == RepoCodenames.LTS: packages_with_version = [ @@ -137,8 +143,8 @@ class DebianArtifactory: cmd = f"{REPREPRO_CMD_PREFIX} copy {RepoCodenames.STABLE} {RepoCodenames.LTS} {' '.join(packages_with_version)}" print("Running copy command:") print(f" {cmd}") - ShellRunner.run(cmd) - ShellRunner.run("sync") + Shell.run(cmd, check=True) + Shell.run("sync") def test_packages(self): Shell.run("docker pull ubuntu:latest") @@ -206,12 +212,12 @@ class RpmArtifactory: for command in commands: print("Running command:") print(f" {command}") - ShellRunner.run(command) + Shell.run(command, check=True) update_public_key = f"gpg --armor --export {self._SIGN_KEY}" pub_key_path = dest_dir / "repodata" / "repomd.xml.key" print("Updating repomd.xml.key") - pub_key_path.write_text(ShellRunner.run(update_public_key)[1]) + pub_key_path.write_text(Shell.run(update_public_key, check=True)) if codename == RepoCodenames.LTS: self.export_packages(RepoCodenames.STABLE) Shell.run("sync") @@ -264,23 +270,29 @@ class TgzArtifactory: if codename == RepoCodenames.LTS: self.export_packages(RepoCodenames.STABLE) - ShellRunner.run("sync") + Shell.run("sync") def test_packages(self): tgz_file = "/tmp/tmp.tgz" tgz_sha_file = "/tmp/tmp.tgz.sha512" - ShellRunner.run( - f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz" + cmd = f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz" + Shell.run( + cmd, + check=True, ) - ShellRunner.run( - f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512" + Shell.run( + f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512", + check=True, ) - expected_checksum = ShellRunner.run(f"cut -d ' ' -f 1 {tgz_sha_file}") - actual_checksum = ShellRunner.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1") + expected_checksum = Shell.run(f"cut -d ' ' -f 1 {tgz_sha_file}", check=True) + actual_checksum = Shell.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1") assert ( expected_checksum == actual_checksum ), f"[{actual_checksum} != {expected_checksum}]" - ShellRunner.run("rm /tmp/tmp.tgz*") + Shell.run("rm /tmp/tmp.tgz*") + release_info = ReleaseInfo.from_file() + release_info.tgz_command = cmd + release_info.dump() def parse_args() -> argparse.Namespace: @@ -338,20 +350,26 @@ if __name__ == "__main__": """ mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run) if args.export_debian: - mp.init() - DebianArtifactory(release_info, dry_run=args.dry_run).export_packages() - mp.teardown() + with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_DEB) as _: + mp.init() + DebianArtifactory(release_info, dry_run=args.dry_run).export_packages() + mp.teardown() if args.export_rpm: - mp.init() - RpmArtifactory(release_info, dry_run=args.dry_run).export_packages() - mp.teardown() + with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_RPM) as _: + mp.init() + RpmArtifactory(release_info, dry_run=args.dry_run).export_packages() + mp.teardown() if args.export_tgz: - mp.init() - TgzArtifactory(release_info, dry_run=args.dry_run).export_packages() - mp.teardown() + with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_TGZ) as _: + mp.init() + TgzArtifactory(release_info, dry_run=args.dry_run).export_packages() + mp.teardown() if args.test_debian: - DebianArtifactory(release_info, dry_run=args.dry_run).test_packages() + with ReleaseContextManager(release_progress=ReleaseProgress.TEST_DEB) as _: + DebianArtifactory(release_info, dry_run=args.dry_run).test_packages() if args.test_tgz: - TgzArtifactory(release_info, dry_run=args.dry_run).test_packages() + with ReleaseContextManager(release_progress=ReleaseProgress.TEST_TGZ) as _: + TgzArtifactory(release_info, dry_run=args.dry_run).test_packages() if args.test_rpm: - RpmArtifactory(release_info, dry_run=args.dry_run).test_packages() + with ReleaseContextManager(release_progress=ReleaseProgress.TEST_RPM) as _: + RpmArtifactory(release_info, dry_run=args.dry_run).test_packages() diff --git a/tests/ci/auto_release.py b/tests/ci/auto_release.py index 5d98d8810a4..39ab3156c80 100644 --- a/tests/ci/auto_release.py +++ b/tests/ci/auto_release.py @@ -25,11 +25,22 @@ def parse_args(): action="store_true", help="Post release branch statuses", ) + parser.add_argument( + "--post-auto-release-complete", + action="store_true", + help="Post autorelease completion status", + ) parser.add_argument( "--prepare", action="store_true", help="Prepare autorelease info", ) + parser.add_argument( + "--wf-status", + type=str, + default="", + help="overall workflow status [success|failure]", + ) return parser.parse_args(), parser @@ -180,6 +191,22 @@ def main(): title=f"Auto Release Status for {release_info.release_branch}", body=release_info.to_dict(), ) + if args.post_auto_release_complete: + assert args.wf_status, "--wf-status Required with --post-auto-release-complete" + if args.wf_status != SUCCESS: + CIBuddy(dry_run=False).post_job_error( + error_description="Autorelease workflow failed", + job_name="Autorelease", + with_instance_info=False, + with_wf_link=True, + critical=True, + ) + else: + CIBuddy(dry_run=False).post_info( + title=f"Autorelease completed", + body="", + with_wf_link=True, + ) elif args.prepare: _prepare(token=args.token or get_best_robot_token()) else: diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index ff8fdba2b6c..3eba5532e66 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -109,6 +109,7 @@ class CIBuddy: job_name: str = "", with_instance_info: bool = True, with_wf_link: bool = True, + critical: bool = False, ) -> None: instance_id, instance_type = "unknown", "unknown" if with_instance_info: @@ -116,7 +117,8 @@ class CIBuddy: instance_type = Shell.run("ec2metadata --instance-type") or instance_type if not job_name: job_name = os.getenv("CHECK_NAME", "unknown") - line_err = f":red_circle: *Error: {error_description}*\n\n" + sign = ":red_circle:" if not critical else ":black_circle:" + line_err = f"{sign} *Error: {error_description}*\n\n" line_ghr = f" *Runner:* `{instance_type}`, `{instance_id}`\n" line_job = f" *Job:* `{job_name}`\n" line_pr_ = f" *PR:* , <{self.commit_url}|{self.sha}>\n" diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index eb25a53d492..efbf014cd52 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -4,7 +4,7 @@ import subprocess import time from contextlib import contextmanager from pathlib import Path -from typing import Any, Iterator, List, Union, Optional, Sequence +from typing import Any, Iterator, List, Union, Optional, Sequence, Tuple import requests @@ -142,6 +142,16 @@ class GHActions: return False + @staticmethod + def get_pr_url_by_branch(repo, branch): + get_url_cmd = ( + f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url'" + ) + url = Shell.run(get_url_cmd) + if not url: + print(f"ERROR: PR nor found, branch [{branch}]") + return url + class Shell: @classmethod @@ -157,7 +167,10 @@ class Shell: return res.stdout.strip() @classmethod - def run(cls, command, check=False): + def run(cls, command, check=False, dry_run=False): + if dry_run: + print(f"Dry-ryn. Would run command [{command}]") + return "" print(f"Run command [{command}]") res = "" result = subprocess.run( @@ -178,6 +191,12 @@ class Shell: assert result.returncode == 0 return res.strip() + @classmethod + def run_as_daemon(cls, command): + print(f"Run daemon command [{command}]") + subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with + return 0, "" + @classmethod def check(cls, command): result = subprocess.run( diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index 414ec8afd3e..4347cfebb54 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -2,7 +2,6 @@ import argparse import dataclasses import json import os -import subprocess from contextlib import contextmanager from copy import copy @@ -13,7 +12,7 @@ from git_helper import Git, GIT_PREFIX from ssh import SSHAgent from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET from s3_helper import S3Helper -from ci_utils import Shell +from ci_utils import Shell, GHActions from ci_buddy import CIBuddy from version_helper import ( FILE_WITH_VERSION_PATH, @@ -31,32 +30,62 @@ CONTRIBUTORS_PATH = get_abs_path(GENERATED_CONTRIBUTORS) RELEASE_INFO_FILE = "/tmp/release_info.json" -class ShellRunner: +class ReleaseProgress: + STARTED = "started" + DOWNLOAD_PACKAGES = "download packages" + PUSH_RELEASE_TAG = "push release tag" + PUSH_NEW_RELEASE_BRANCH = "push new release branch" + BUMP_VERSION = "bump version" + CREATE_GH_RELEASE = "create GH release" + EXPORT_TGZ = "export TGZ packages" + EXPORT_RPM = "export RPM packages" + EXPORT_DEB = "export DEB packages" + TEST_TGZ = "test TGZ packages" + TEST_RPM = "test RPM packages" + TEST_DEB = "test DEB packages" - @classmethod - def run( - cls, command, check_retcode=True, print_output=True, async_=False, dry_run=False - ): - if dry_run: - print(f"Dry-run: Would run shell command: [{command}]") - return 0, "" - print(f"Running shell command: [{command}]") - if async_: - subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with - return 0, "" - result = subprocess.run( - command + " 2>&1", - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - check=True, - ) - if print_output: - print(result.stdout) - if check_retcode: - assert result.returncode == 0, f"Return code [{result.returncode}]" - return result.returncode, result.stdout + +class ReleaseProgressDescription: + OK = "OK" + FAILED = "FAILED" + + +class ReleaseContextManager: + def __init__(self, release_progress): + self.release_progress = release_progress + self.release_info = None + + def __enter__(self): + if self.release_progress == ReleaseProgress.STARTED: + # create initial release info + self.release_info = ReleaseInfo( + release_branch="NA", + commit_sha=args.ref, + release_tag="NA", + version="NA", + codename="NA", + previous_release_tag="NA", + previous_release_sha="NA", + release_progress=ReleaseProgress.STARTED, + ).dump() + else: + # fetch release info from fs and update + self.release_info = ReleaseInfo.from_file() + assert self.release_info + assert ( + self.release_info.progress_description == ReleaseProgressDescription.OK + ), "Must be OK on the start of new context" + self.release_info.release_progress = self.release_progress + self.release_info.dump() + return self.release_info + + def __exit__(self, exc_type, exc_value, traceback): + assert self.release_info + if exc_type is not None: + self.release_info.progress_description = ReleaseProgressDescription.FAILED + else: + self.release_info.progress_description = ReleaseProgressDescription.OK + self.release_info.dump() @dataclasses.dataclass @@ -74,6 +103,10 @@ class ReleaseInfo: release_url: str = "" debian_command: str = "" rpm_command: str = "" + tgz_command: str = "" + docker_command: str = "" + release_progress: str = "" + progress_description: str = "" @staticmethod def from_file() -> "ReleaseInfo": @@ -85,9 +118,9 @@ class ReleaseInfo: print(f"Dump release info into [{RELEASE_INFO_FILE}]") with open(RELEASE_INFO_FILE, "w", encoding="utf-8") as f: print(json.dumps(dataclasses.asdict(self), indent=2), file=f) + return self - @staticmethod - def prepare(commit_ref: str, release_type: str) -> None: + def prepare(self, commit_ref: str, release_type: str) -> "ReleaseInfo": version = None release_branch = None release_tag = None @@ -97,8 +130,9 @@ class ReleaseInfo: assert release_type in ("patch", "new") if release_type == "new": # check commit_ref is right and on a right branch - ShellRunner.run( - f"git merge-base --is-ancestor origin/{commit_ref} origin/master" + Shell.run( + f"git merge-base --is-ancestor origin/{commit_ref} origin/master", + check=True, ) with checkout(commit_ref): commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True) @@ -130,10 +164,11 @@ class ReleaseInfo: version.with_description(codename) release_branch = f"{version.major}.{version.minor}" release_tag = version.describe - ShellRunner.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags") + Shell.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags", check=True) # check commit is right and on a right branch - ShellRunner.run( - f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}" + Shell.run( + f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}", + check=True, ) if version.patch == 1: expected_version = copy(version) @@ -172,21 +207,22 @@ class ReleaseInfo: and version and codename in ("lts", "stable") ) - res = ReleaseInfo( - release_branch=release_branch, - commit_sha=commit_sha, - release_tag=release_tag, - version=version.string, - codename=codename, - previous_release_tag=previous_release_tag, - previous_release_sha=previous_release_sha, - ) - res.dump() + + self.release_branch = release_branch + self.commit_sha = commit_sha + self.release_tag = release_tag + self.version = version.string + self.codename = codename + self.previous_release_tag = previous_release_tag + self.previous_release_sha = previous_release_sha + self.release_progress = ReleaseProgress.STARTED + self.progress_description = ReleaseProgressDescription.OK + return self def push_release_tag(self, dry_run: bool) -> None: if dry_run: # remove locally created tag from prev run - ShellRunner.run( + Shell.run( f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag} ||:" ) # Create release tag @@ -194,16 +230,17 @@ class ReleaseInfo: f"Create and push release tag [{self.release_tag}], commit [{self.commit_sha}]" ) tag_message = f"Release {self.release_tag}" - ShellRunner.run( - f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}" + Shell.run( + f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}", + check=True, ) cmd_push_tag = f"{GIT_PREFIX} push origin {self.release_tag}:{self.release_tag}" - ShellRunner.run(cmd_push_tag, dry_run=dry_run) + Shell.run(cmd_push_tag, dry_run=dry_run, check=True) @staticmethod def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None: cmd = f"gh api repos/{GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}" - ShellRunner.run(cmd, dry_run=dry_run) + Shell.run(cmd, dry_run=dry_run, check=True) def push_new_release_branch(self, dry_run: bool) -> None: assert ( @@ -220,8 +257,8 @@ class ReleaseInfo: ), f"Unexpected current version in git, must precede [{self.version}] by one step, actual [{version.string}]" if dry_run: # remove locally created branch from prev run - ShellRunner.run( - f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch} ||:" + Shell.run( + f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch}" ) print( f"Create and push new release branch [{new_release_branch}], commit [{self.commit_sha}]" @@ -234,7 +271,7 @@ class ReleaseInfo: cmd_push_branch = ( f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}" ) - ShellRunner.run(cmd_push_branch, dry_run=dry_run) + Shell.run(cmd_push_branch, dry_run=dry_run, check=True) print("Create and push backport tags for new release branch") ReleaseInfo._create_gh_label( @@ -243,12 +280,13 @@ class ReleaseInfo: ReleaseInfo._create_gh_label( f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run ) - ShellRunner.run( + Shell.run( f"""gh pr create --repo {GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}' --head {new_release_branch} {pr_labels} --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.' """, dry_run=dry_run, + check=True, ) def update_version_and_contributors_list(self, dry_run: bool) -> None: @@ -274,31 +312,34 @@ class ReleaseInfo: body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md") actor = os.getenv("GITHUB_ACTOR", "") or "me" cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file '{body_file} --label 'do not test' --assignee @{actor}" - ShellRunner.run(cmd_commit_version_upd, dry_run=dry_run) - ShellRunner.run(cmd_push_branch, dry_run=dry_run) - ShellRunner.run(cmd_create_pr, dry_run=dry_run) + Shell.run(cmd_commit_version_upd, check=True, dry_run=dry_run) + Shell.run(cmd_push_branch, check=True, dry_run=dry_run) + Shell.run(cmd_create_pr, check=True, dry_run=dry_run) if dry_run: - ShellRunner.run( - f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'" - ) - ShellRunner.run( + Shell.run(f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'") + Shell.run( f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'" ) + self.version_bump_pr = GHActions.get_pr_url_by_branch( + repo=GITHUB_REPOSITORY, branch=branch_upd_version_contributors + ) - def update_release_info(self, dry_run: bool) -> None: + def update_release_info(self, dry_run: bool) -> "ReleaseInfo": branch = f"auto/{release_info.release_tag}" if not dry_run: - get_url_cmd = f"gh pr list --repo {GITHUB_REPOSITORY} --head {branch} --json url --jq '.[0].url'" - url = Shell.run(get_url_cmd) - if url: - print(f"Update release info with Changelog PR link [{url}]") - else: - print(f"WARNING: Changelog PR not found, branch [{branch}]") + url = GHActions.get_pr_url_by_branch(repo=GITHUB_REPOSITORY, branch=branch) else: url = "dry-run" + print(f"ChangeLog PR url [{url}]") self.changelog_pr = url + print(f"Release url [{url}]") + self.release_url = ( + f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" + ) + self.docker_command = f"docker run --rm clickhouse/clickhouse:{self.release_branch} clickhouse --version" self.dump() + return self def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None: repo = os.getenv("GITHUB_REPOSITORY") @@ -376,7 +417,7 @@ class PackageDownloader: self.macos_package_files = ["clickhouse-macos", "clickhouse-macos-aarch64"] self.file_to_type = {} - ShellRunner.run(f"mkdir -p {self.LOCAL_DIR}") + Shell.run(f"mkdir -p {self.LOCAL_DIR}") for package_type in self.PACKAGE_TYPES: for package in self.package_names: @@ -426,7 +467,7 @@ class PackageDownloader: return res def run(self): - ShellRunner.run(f"rm -rf {self.LOCAL_DIR}/*") + Shell.run(f"rm -rf {self.LOCAL_DIR}/*") for package_file in ( self.deb_package_files + self.rpm_package_files + self.tgz_package_files ): @@ -499,6 +540,37 @@ class PackageDownloader: return True +@contextmanager +def checkout(ref: str) -> Iterator[None]: + orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True) + rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" + assert orig_ref + if ref not in (orig_ref,): + Shell.run(f"{GIT_PREFIX} checkout {ref}") + try: + yield + except (Exception, KeyboardInterrupt) as e: + print(f"ERROR: Exception [{e}]") + Shell.run(rollback_cmd) + raise + Shell.run(rollback_cmd) + + +@contextmanager +def checkout_new(ref: str) -> Iterator[None]: + orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True) + rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" + assert orig_ref + Shell.run(f"{GIT_PREFIX} checkout -b {ref}", check=True) + try: + yield + except (Exception, KeyboardInterrupt) as e: + print(f"ERROR: Exception [{e}]") + Shell.run(rollback_cmd) + raise + Shell.run(rollback_cmd) + + def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, @@ -556,44 +628,26 @@ def parse_args() -> argparse.Namespace: action="store_true", help="do not make any actual changes in the repo, just show what will be done", ) - + parser.add_argument( + "--set-progress-started", + action="store_true", + help="Set new progress step, --progress must be set", + ) + parser.add_argument( + "--progress", + type=str, + help="Progress step name, see @ReleaseProgress", + ) + parser.add_argument( + "--set-progress-completed", + action="store_true", + help="Set current progress step to OK (completed)", + ) return parser.parse_args() -@contextmanager -def checkout(ref: str) -> Iterator[None]: - _, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD") - rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" - assert orig_ref - if ref not in (orig_ref,): - Shell.run(f"{GIT_PREFIX} checkout {ref}") - try: - yield - except (Exception, KeyboardInterrupt) as e: - print(f"ERROR: Exception [{e}]") - ShellRunner.run(rollback_cmd) - raise - ShellRunner.run(rollback_cmd) - - -@contextmanager -def checkout_new(ref: str) -> Iterator[None]: - _, orig_ref = ShellRunner.run(f"{GIT_PREFIX} symbolic-ref --short HEAD") - rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" - assert orig_ref - ShellRunner.run(f"{GIT_PREFIX} checkout -b {ref}") - try: - yield - except (Exception, KeyboardInterrupt) as e: - print(f"ERROR: Exception [{e}]") - ShellRunner.run(rollback_cmd) - raise - ShellRunner.run(rollback_cmd) - - if __name__ == "__main__": args = parse_args() - assert args.dry_run # prepare ssh for git if needed _ssh_agent = None @@ -605,35 +659,56 @@ if __name__ == "__main__": _ssh_agent.print_keys() if args.prepare_release_info: - assert ( - args.ref and args.release_type - ), "--ref and --release-type must be provided with --prepare-release-info" - ReleaseInfo.prepare(commit_ref=args.ref, release_type=args.release_type) - if args.push_release_tag: - release_info = ReleaseInfo.from_file() - release_info.push_release_tag(dry_run=args.dry_run) - if args.push_new_release_branch: - release_info = ReleaseInfo.from_file() - release_info.push_new_release_branch(dry_run=args.dry_run) - if args.create_bump_version_pr: - # TODO: store link to PR in release info - release_info = ReleaseInfo.from_file() - release_info.update_version_and_contributors_list(dry_run=args.dry_run) + with ReleaseContextManager( + release_progress=ReleaseProgress.STARTED + ) as release_info: + assert ( + args.ref and args.release_type + ), "--ref and --release-type must be provided with --prepare-release-info" + release_info.prepare(commit_ref=args.ref, release_type=args.release_type) + if args.download_packages: - release_info = ReleaseInfo.from_file() - p = PackageDownloader( - release=release_info.release_branch, - commit_sha=release_info.commit_sha, - version=release_info.version, - ) - p.run() + with ReleaseContextManager( + release_progress=ReleaseProgress.DOWNLOAD_PACKAGES + ) as release_info: + p = PackageDownloader( + release=release_info.release_branch, + commit_sha=release_info.commit_sha, + version=release_info.version, + ) + p.run() + + if args.push_release_tag: + with ReleaseContextManager( + release_progress=ReleaseProgress.PUSH_RELEASE_TAG + ) as release_info: + release_info.push_release_tag(dry_run=args.dry_run) + + if args.push_new_release_branch: + with ReleaseContextManager( + release_progress=ReleaseProgress.PUSH_NEW_RELEASE_BRANCH + ) as release_info: + release_info.push_new_release_branch(dry_run=args.dry_run) + + if args.create_bump_version_pr: + with ReleaseContextManager( + release_progress=ReleaseProgress.BUMP_VERSION + ) as release_info: + release_info.update_version_and_contributors_list(dry_run=args.dry_run) + if args.create_gh_release: - release_info = ReleaseInfo.from_file() - p = PackageDownloader( - release=release_info.release_branch, - commit_sha=release_info.commit_sha, - version=release_info.version, - ) + with ReleaseContextManager( + release_progress=ReleaseProgress.CREATE_GH_RELEASE + ) as release_info: + p = PackageDownloader( + release=release_info.release_branch, + commit_sha=release_info.commit_sha, + version=release_info.version, + ) + release_info.create_gh_release( + packages_files=p.get_all_packages_files(), dry_run=args.dry_run + ) + if args.post_status: release_info = ReleaseInfo.from_file() release_info.update_release_info(dry_run=args.dry_run) @@ -646,6 +721,21 @@ if __name__ == "__main__": f"Failed to issue new release", dataclasses.asdict(release_info) ) + if args.set_progress_started: + ri = ReleaseInfo.from_file() + ri.release_progress = args.progress + ri.progress_description = ReleaseProgressDescription.FAILED + ri.dump() + assert args.progress, "Progress step name must be provided" + + if args.set_progress_completed: + ri = ReleaseInfo.from_file() + assert ( + ri.progress_description == ReleaseProgressDescription.FAILED + ), "Must be FAILED before set to OK" + ri.progress_description = ReleaseProgressDescription.OK + ri.dump() + # tear down ssh if _ssh_agent and _key_pub: _ssh_agent.remove(_key_pub) From 519494a9d08f4a45bd995b712808b908ace94c0c Mon Sep 17 00:00:00 2001 From: avogar Date: Sat, 20 Jul 2024 18:51:20 +0000 Subject: [PATCH 0575/2361] Add tests, docs, implement new syntax sugar for Array(JSON), fix small ugs --- docs/en/operations/settings/settings.md | 7 + docs/en/sql-reference/data-types/json.md | 104 +- .../sql-reference/data-types/object-json.md | 2 +- .../sql-reference/data-types/object_json.md | 86 - .../sql-reference/functions/json-functions.md | 236 ++ src/Columns/ColumnObject.cpp | 2 +- src/Columns/ColumnObject.h | 5 +- src/DataTypes/DataTypeObject.cpp | 4 +- src/DataTypes/DataTypeObject.h | 7 +- .../Serializations/SerializationObject.cpp | 3 + .../SerializationObjectDynamicPath.cpp | 12 + .../SerializationVariantElement.cpp | 19 +- src/Formats/JSONExtractTree.cpp | 8 +- src/Functions/JSONEmpty.cpp | 2 +- src/Functions/JSONPaths.cpp | 228 +- src/Parsers/ExpressionElementParsers.cpp | 51 +- src/Parsers/ExpressionElementParsers.h | 1 + .../0_stateless/00727_concat.reference | 1 + tests/queries/0_stateless/00727_concat.sql | 2 + .../01825_new_type_json_10.reference | 14 + .../0_stateless/01825_new_type_json_10.sql | 16 + .../01825_new_type_json_11.reference | 13 + .../0_stateless/01825_new_type_json_11.sh | 64 + .../01825_new_type_json_12.reference | 14 + .../0_stateless/01825_new_type_json_12.sh | 54 + .../01825_new_type_json_13.reference | 4 + .../0_stateless/01825_new_type_json_13.sh | 50 + .../01825_new_type_json_18.reference | 2 + .../0_stateless/01825_new_type_json_18.sql | 16 + .../01825_new_type_json_2.reference | 24 + .../0_stateless/01825_new_type_json_2.sql | 41 + .../01825_new_type_json_3.reference.j2 | 59 + .../0_stateless/01825_new_type_json_3.sql.j2 | 64 + .../01825_new_type_json_6.reference | 10 + .../0_stateless/01825_new_type_json_6.sh | 59 + ...1825_new_type_json_add_column.reference.j2 | 8 + .../01825_new_type_json_add_column.sql.j2 | 23 + .../01825_new_type_json_btc.reference | 33 + .../0_stateless/01825_new_type_json_btc.sh | 31 + .../01825_new_type_json_ephemeral.reference | 1 + .../01825_new_type_json_ephemeral.sql | 18 + ...w_type_json_ghdata_insert_select.reference | 1 + ...1825_new_type_json_ghdata_insert_select.sh | 27 + .../01825_new_type_json_nbagames.reference | 65 + .../01825_new_type_json_nbagames.sh | 54 + ...25_new_type_json_parallel_insert.reference | 1 + .../01825_new_type_json_parallel_insert.sql | 10 + .../queries/0_stateless/01825_type_json_11.sh | 2 +- .../queries/0_stateless/01825_type_json_12.sh | 2 +- .../queries/0_stateless/01825_type_json_13.sh | 2 +- .../0_stateless/01825_type_json_14.sql | 2 +- .../queries/0_stateless/01825_type_json_15.sh | 2 +- .../queries/0_stateless/01825_type_json_16.sh | 2 +- .../0_stateless/01825_type_json_17.sql | 2 +- .../0_stateless/01825_type_json_3.sql.j2 | 2 +- .../queries/0_stateless/01825_type_json_4.sh | 2 +- .../queries/0_stateless/01825_type_json_5.sql | 2 +- .../queries/0_stateless/01825_type_json_6.sh | 2 +- .../queries/0_stateless/01825_type_json_7.sh | 2 +- .../queries/0_stateless/01825_type_json_8.sh | 2 +- .../queries/0_stateless/01825_type_json_9.sql | 2 +- .../01825_type_json_add_column.sql.j2 | 2 +- .../0_stateless/01825_type_json_bools.sql | 2 +- .../0_stateless/01825_type_json_btc.sh | 2 +- .../0_stateless/01825_type_json_describe.sql | 2 +- .../01825_type_json_distributed.sql | 2 +- .../01825_type_json_empty_string.sql | 2 +- .../0_stateless/01825_type_json_ephemeral.sql | 2 +- .../0_stateless/01825_type_json_field.sql | 2 +- .../0_stateless/01825_type_json_from_map.sql | 2 +- .../01825_type_json_ghdata_insert_select.sh | 4 +- .../0_stateless/01825_type_json_in_array.sql | 10 +- .../01825_type_json_in_other_types.sh | 2 +- .../01825_type_json_insert_select.sql | 4 +- .../01825_type_json_missed_values.sql | 2 +- .../01825_type_json_multiple_files.sh | 8 +- .../0_stateless/01825_type_json_mutations.sql | 2 +- .../0_stateless/01825_type_json_nbagames.sh | 2 +- .../0_stateless/01825_type_json_order_by.sql | 6 +- .../01825_type_json_parallel_insert.sql | 2 +- .../01825_type_json_partitions.sql | 2 +- .../01825_type_json_schema_race_long.sh | 2 +- .../0_stateless/01825_type_json_sparse.sql | 2 +- .../01825_type_json_wide_parts_merge.sql | 2 +- .../0_stateless/02242_subcolumns_sizes.sql | 2 +- .../0_stateless/02246_flatten_tuple.sql | 2 +- .../02286_tuple_numeric_identifier.sql | 2 +- .../02421_type_json_async_insert.sh | 2 +- .../02421_type_json_empty_parts.sh | 6 +- ...02482_json_nested_arrays_with_same_keys.sh | 2 +- .../0_stateless/02513_validate_data_types.sql | 4 +- .../02553_type_json_attach_partition.sql | 2 +- .../02553_type_object_analyzer.sql | 2 +- .../queries/0_stateless/02717_pretty_json.sql | 2 +- ...75_show_columns_called_from_clickhouse.sql | 2 +- ...2775_show_columns_called_from_mysql.expect | 2 +- .../0_stateless/02870_per_column_settings.sql | 2 +- ...2935_format_with_arbitrary_types.reference | 1 + .../02935_format_with_arbitrary_types.sql | 2 + .../02969_mysql_cast_type_aliases.reference | 4 +- .../02969_mysql_cast_type_aliases.sql | 2 +- .../03157_dynamic_type_json.reference | 8 +- .../0_stateless/03157_dynamic_type_json.sql | 4 +- .../03158_dynamic_type_from_variant.sql | 1 - .../03159_dynamic_type_all_types.sql | 1 - .../03205_json_cast_from_string.reference | 18 + .../03205_json_cast_from_string.sql | 22 + .../0_stateless/03205_json_syntax.reference | 0 .../queries/0_stateless/03205_json_syntax.sql | 23 + ...3206_json_parsing_and_formatting.reference | 195 ++ .../03206_json_parsing_and_formatting.sh | 56 + .../03207_json_read_subcolumns_1.reference | 2080 +++++++++++++++++ .../03207_json_read_subcolumns_1.sh | 125 + ..._subcolumns_2_compact_merge_tree.reference | 71 + ...on_read_subcolumns_2_compact_merge_tree.sh | 142 ++ ...07_json_read_subcolumns_2_memory.reference | 35 + .../03207_json_read_subcolumns_2_memory.sh | 137 ++ ...ead_subcolumns_2_wide_merge_tree.reference | 71 + ..._json_read_subcolumns_2_wide_merge_tree.sh | 142 ++ ..._array_of_json_read_subcolumns_1.reference | 560 +++++ .../03208_array_of_json_read_subcolumns_1.sh | 71 + ..._subcolumns_2_compact_merge_tree.reference | 65 + ...on_read_subcolumns_2_compact_merge_tree.sh | 73 + ...of_json_read_subcolumns_2_memory.reference | 32 + ..._array_of_json_read_subcolumns_2_memory.sh | 68 + ...ead_subcolumns_2_wide_merge_tree.reference | 65 + ..._json_read_subcolumns_2_wide_merge_tree.sh | 73 + ...3209_json_type_horizontal_merges.reference | 100 + .../03209_json_type_horizontal_merges.sh | 70 + .../03209_json_type_vertical_merges.reference | 100 + .../03209_json_type_vertical_merges.sh | 70 + ...03210_json_type_alter_add_column.reference | 75 + .../03210_json_type_alter_add_column.sh | 47 + .../03211_nested_json_merges.reference | 88 + .../0_stateless/03211_nested_json_merges.sh | 65 + 135 files changed, 6188 insertions(+), 258 deletions(-) delete mode 100644 docs/en/sql-reference/data-types/object_json.md create mode 100644 tests/queries/0_stateless/01825_new_type_json_10.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_10.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_11.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_11.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_12.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_12.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_13.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_13.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_18.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_18.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_2.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_2.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_3.reference.j2 create mode 100644 tests/queries/0_stateless/01825_new_type_json_3.sql.j2 create mode 100644 tests/queries/0_stateless/01825_new_type_json_6.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_6.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_add_column.reference.j2 create mode 100644 tests/queries/0_stateless/01825_new_type_json_add_column.sql.j2 create mode 100644 tests/queries/0_stateless/01825_new_type_json_btc.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_btc.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_ephemeral.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_ephemeral.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_nbagames.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_nbagames.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_parallel_insert.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_parallel_insert.sql create mode 100644 tests/queries/0_stateless/03205_json_cast_from_string.reference create mode 100644 tests/queries/0_stateless/03205_json_cast_from_string.sql create mode 100644 tests/queries/0_stateless/03205_json_syntax.reference create mode 100644 tests/queries/0_stateless/03205_json_syntax.sql create mode 100644 tests/queries/0_stateless/03206_json_parsing_and_formatting.reference create mode 100755 tests/queries/0_stateless/03206_json_parsing_and_formatting.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_1.reference create mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_1.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference create mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference create mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference create mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh create mode 100644 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference create mode 100755 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sh create mode 100644 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference create mode 100755 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh create mode 100644 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference create mode 100755 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh create mode 100644 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference create mode 100755 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh create mode 100644 tests/queries/0_stateless/03209_json_type_horizontal_merges.reference create mode 100755 tests/queries/0_stateless/03209_json_type_horizontal_merges.sh create mode 100644 tests/queries/0_stateless/03209_json_type_vertical_merges.reference create mode 100755 tests/queries/0_stateless/03209_json_type_vertical_merges.sh create mode 100644 tests/queries/0_stateless/03210_json_type_alter_add_column.reference create mode 100755 tests/queries/0_stateless/03210_json_type_alter_add_column.sh create mode 100644 tests/queries/0_stateless/03211_nested_json_merges.reference create mode 100755 tests/queries/0_stateless/03211_nested_json_merges.sh diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index c3f697c3bdc..9ddfe985cee 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5608,3 +5608,10 @@ Default value: `10000000`. Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached. Default value: `1GiB`. + +## use_json_alias_for_old_object_type + +When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/ob) type instead of the new [JSON](../../sql-reference/data-types/json.md) type. +This setting requires server restart to take effect when changed. + +Default value: `false`. diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index be75f909684..1001ad63999 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -19,7 +19,7 @@ To declare a column of `JSON` type, use the following syntax: JSON(max_dynamic_paths=N, max_dynamic_types=M, some.path TypeName, SKIP path.to.skip, SKIP REGEXP 'paths_regexp') ``` Where: -- `max_dynamic_paths` is an optional parameter indicating how many paths can be stored separately as subcolumns across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all other paths will be stored together in a single structure. Default value of `max_dynamic_paths` is `1000`. +- `max_dynamic_paths` is an optional parameter indicating how many paths can be stored separately as subcolumns across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all other paths will be stored together in a single structure. Default value of `max_dynamic_paths` is `1024`. - `max_dynamic_types` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a single path column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_dynamic_types` is `32`. - `some.path TypeName` is an optional type hint for particular path in the JSON. Such paths will be always stored as subcolumns with specified type. - `SKIP path.to.skip` is an optional hint for particular path that should be skipped during JSON parsing. Such paths will never be stored in the JSON column. If specified path is a nested JSON object, the whole nested object will be skipped. @@ -267,18 +267,18 @@ JSON paths that contains an array of objects are parsed as type `Array(JSON)` an ```sql CREATE TABLE test (json JSON) ENGINE = Memory; INSERT INTO test VALUES -('{"a" : {"b" : [{"c" : 42, "d" : "Hello", "f" : {"g" : 42.42}}, {"c" : 43}, {"e" : [1, 2, 3], "d" : "My", "f" : {"g" : 43.43, "h" : "2020-01-01"}}]}}'), +('{"a" : {"b" : [{"c" : 42, "d" : "Hello", "f" : [[{"g" : 42.42}]], "k" : {"j" : 1000}}, {"c" : 43}, {"e" : [1, 2, 3], "d" : "My", "f" : [[{"g" : 43.43, "h" : "2020-01-01"}]], "k" : {"j" : 2000}}]}}'), ('{"a" : {"b" : [1, 2, 3]}}'), -('{"a" : {"b" : [{"c" : 44, "f" : {"h" : "2020-01-02"}}, {"e" : [4, 5, 6], "d" : "World", "f" : {"g" : 44.44}}]}}'); +('{"a" : {"b" : [{"c" : 44, "f" : [[{"h" : "2020-01-02"}]]}, {"e" : [4, 5, 6], "d" : "World", "f" : [[{"g" : 44.44}]], "k" : {"j" : 3000}}]}}'); SELECT json FROM test; ``` -```text -┌─json──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┠-│ {"a":{"b":[{"c":"42","d":"Hello","f":{"g":42.42}},{"c":"43"},{"d":"My","e":["1","2","3"],"f":{"g":43.43,"h":"2020-01-01"}}]}} │ -│ {"a":{"b":["1","2","3"]}} │ -│ {"a":{"b":[{"c":"44","f":{"h":"2020-01-02"}},{"d":"World","e":["4","5","6"],"f":{"g":44.44}}]}} │ -└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +```text3 +┌─json────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┠+│ {"a":{"b":[{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}},{"c":"43"},{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}]}} │ +│ {"a":{"b":["1","2","3"]}} │ +│ {"a":{"b":[{"c":"44","f":[[{"h":"2020-01-02"}]]},{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}]}} │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` ```sql @@ -286,55 +286,87 @@ SELECT json.a.b, dynamicType(json.a.b) FROM test; ``` ```text -┌─json.a.b────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─dynamicType(json.a.b)───────────────────────────────────┠-│ ['{"c":"42","d":"Hello","f":{"g":42.42}}','{"c":"43"}','{"d":"My","e":["1","2","3"],"f":{"g":43.43,"h":"2020-01-01"}}'] │ Array(JSON(max_dynamic_types=8, max_dynamic_paths=125)) │ -│ [1,2,3] │ Array(Nullable(Int64)) │ -│ ['{"c":"44","f":{"h":"2020-01-02"}}','{"d":"World","e":["4","5","6"],"f":{"g":44.44}}'] │ Array(JSON(max_dynamic_types=8, max_dynamic_paths=125)) │ -└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────────────────────────────────────────────┘ +┌─json.a.b──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─dynamicType(json.a.b)────────────────────────────────────┠+│ ['{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}}','{"c":"43"}','{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │ +│ [1,2,3] │ Array(Nullable(Int64)) │ +│ ['{"c":"44","f":[[{"h":"2020-01-02"}]]}','{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │ +└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────────────────────────────────┘ ``` As you can notice, the `max_dynamic_types/max_dynamic_paths` parameters of the nested `JSON` type were reduced compared to the default values. It's needed to avoid number of subcolumns to grow uncontrolled on nested arrays of JSON objects. -Let's try to read subcolumns of this nested `JSON` column: +Let's try to read subcolumns from this nested `JSON` column: ```sql -SELECT json.a.b.:`Array(JSON)`.c, json.a.b.:`Array(JSON)`.f.g, json.a.b.:`Array(JSON)`.f.g FROM test; +SELECT json.a.b.:`Array(JSON)`.c, json.a.b.:`Array(JSON)`.f, json.a.b.:`Array(JSON)`.d FROM test; ``` ```text -┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f.g─┬─json.a.b.:`Array(JSON)`.f.g─┠-│ [42,43,NULL] │ [42.42,NULL,43.43] │ [42.42,NULL,43.43] │ -│ [] │ [] │ [] │ -│ [44,NULL] │ [NULL,44.44] │ [NULL,44.44] │ -└───────────────────────────┴─────────────────────────────┴─────────────────────────────┘ +┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┠+│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │ +└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘ ``` -We can also read subcolumns of `Dynamic` columns: +We can avoid writing `Array(JSON)` subcolumn name using special syntax: ```sql -SELECT json.a.b.:`Array(JSON)`.f.h.:Date FROM test; +SELECT json.a.b[].c, json.a.b[].f, json.a.b[].d FROM test; ``` ```text -┌─json.a.b.:`Array(JSON)`.f.h.:`Date`─┠-│ [NULL,NULL,'2020-01-01'] │ -│ [] │ -│ ['2020-01-02',NULL] │ -└─────────────────────────────────────┘ +┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┠+│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │ +└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘ +``` + +The number of `[]` after path indicates the array level. `json.path[][]` will be transformed to `json.path.:Array(Array(JSON))` + +Let's check the paths and types inside our `Array(JSON)`: + +```sql +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b[]))) FROM test; +``` + +```text +┌─arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b.:`Array(JSON)`)))──┠+│ ('c','Int64') │ +│ ('d','String') │ +│ ('f','Array(Array(JSON(max_dynamic_types=8, max_dynamic_paths=64)))') │ +│ ('k.j','Int64') │ +│ ('e','Array(Nullable(Int64))') │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +Let's read subcolumns from `Array(JSON)` column: + +```sql +SELECT json.a.b[].c.:Int64, json.a.b[].f[][].g.:Float64, json.a.b[].f[][].h.:Date FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.c.:`Int64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.g.:`Float64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.h.:`Date`─┠+│ [42,43,NULL] │ [[[42.42]],[],[[43.43]]] │ [[[NULL]],[],[['2020-01-01']]] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[[NULL]],[[44.44]]] │ [[['2020-01-02']],[[NULL]]] │ +└────────────────────────────────────┴──────────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ ``` We can also read sub-object subcolumns from nested `JSON` column: ```sql -SELECT json.a.b.:`Array(JSON)`.^f FROM test +SELECT json.a.b[].^k FROM test ``` ```text -┌─json.a.b.:`Array(JSON)`.^`f`────────────────────────┠-│ ['{"g":42.42}','{}','{"g":43.43,"h":"2020-01-01"}'] │ -│ [] │ -│ ['{"h":"2020-01-02"}','{"g":44.44}'] │ -└─────────────────────────────────────────────────────┘ +┌─json.a.b.:`Array(JSON)`.^`k`─────────┠+│ ['{"j":"1000"}','{}','{"j":"2000"}'] │ +│ [] │ +│ ['{}','{"j":"3000"}'] │ +└──────────────────────────────────────┘ ``` ## Reading JSON type from the data @@ -386,7 +418,7 @@ SELECT json FROM format(TSV, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP REGEXP \' ## Reaching the limit of dynamic paths inside JSON -`JSON` data type can store only limited number of paths as separate subcolumns inside. By default, this limit is 1000, but you can change it in type declaration using parameter `max_dynamic_paths`. +`JSON` data type can store only limited number of paths as separate subcolumns inside. By default, this limit is 1024, but you can change it in type declaration using parameter `max_dynamic_paths`. When the limit is reached, all new paths inserted to `JSON` column will be stored in a single shared data structure. It's still possible to read such paths as subcolumns, but it will require reading the whole shared data structure to extract the values of this path. This limit is needed to avoid the enormous number of different subcolumns that can make the table unusable. @@ -470,7 +502,9 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and moved paths `e` and `d` to shared data structure. +## Introspection functions +There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes). ## Tips for better usage of the JSON type diff --git a/docs/en/sql-reference/data-types/object-json.md b/docs/en/sql-reference/data-types/object-json.md index 36835a42db8..c6fa6f0e882 100644 --- a/docs/en/sql-reference/data-types/object-json.md +++ b/docs/en/sql-reference/data-types/object-json.md @@ -13,7 +13,7 @@ keywords: [object, data type] Stores JavaScript Object Notation (JSON) documents in a single column. -`JSON` is an alias for `Object('json')`. +`JSON` can be used as an alias to `Object('json')` when setting [use_json_alias_for_old_object_type](../../operations/settings/settings.md#usejsonaliasforoldobjecttype) is enabled. ## Example diff --git a/docs/en/sql-reference/data-types/object_json.md b/docs/en/sql-reference/data-types/object_json.md deleted file mode 100644 index 39e37abad82..00000000000 --- a/docs/en/sql-reference/data-types/object_json.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -slug: /en/sql-reference/data-types/json -sidebar_position: 26 -sidebar_label: JSON ---- - -# JSON - -:::note -This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead. -::: - -Stores JavaScript Object Notation (JSON) documents in a single column. - -`JSON` is an alias for `Object('json')`. - -:::note -The JSON data type is an obsolete feature. Do not use it. -If you want to use it, set `allow_experimental_object_type = 1`. -::: - -## Example - -**Example 1** - -Creating a table with a `JSON` column and inserting data into it: - -```sql -CREATE TABLE json -( - o JSON -) -ENGINE = Memory -``` - -```sql -INSERT INTO json VALUES ('{"a": 1, "b": { "c": 2, "d": [1, 2, 3] }}') -``` - -```sql -SELECT o.a, o.b.c, o.b.d[3] FROM json -``` - -```text -┌─o.a─┬─o.b.c─┬─arrayElement(o.b.d, 3)─┠-│ 1 │ 2 │ 3 │ -└─────┴───────┴────────────────────────┘ -``` - -**Example 2** - -To be able to create an ordered `MergeTree` family table the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format: - -```sql -CREATE TABLE logs -( - timestamp DateTime, - message JSON -) -ENGINE = MergeTree -ORDER BY timestamp -``` - -```sql -INSERT INTO logs -SELECT parseDateTimeBestEffort(JSONExtractString(json, 'timestamp')), json -FROM file('access.json.gz', JSONAsString) -``` - -## Displaying JSON columns - -When displaying a `JSON` column ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can display the field names as well by setting `output_format_json_named_tuples_as_objects = 1`: - -```sql -SET output_format_json_named_tuples_as_objects = 1 - -SELECT * FROM json FORMAT JSONEachRow -``` - -```text -{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}} -``` - -## Related Content - -- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index 7bff6a6cba5..b02dc3d1d8a 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -1155,3 +1155,239 @@ SELECT jsonMergePatch('{"a":1}', '{"name": "joey"}', '{"name": "tom"}', '{"name" │ {"a":1,"name":"zoey"} │ └───────────────────────┘ ``` + +### JSONAllPaths + +Returns the list of all paths stored in each row in [JSON](../data-types/json.md) column. + +**Syntax** + +``` sql +JSONAllPaths(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/json.md). + +**Returned value** + +- An array of paths. [Array(String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONAllPaths(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONAllPaths(json)─┠+│ {"a":"42"} │ ['a'] │ +│ {"b":"Hello"} │ ['b'] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a','c'] │ +└──────────────────────────────────────┴────────────────────┘ +``` + +### JSONAllPathsWithTypes + +Returns the map of all paths and their data types stored in each row in [JSON](../data-types/json.md) column. + +**Syntax** + +``` sql +JSONAllPathsWithTypes(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/json.md). + +**Returned value** + +- An array of paths. [Map(String, String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONAllPathsWithTypes(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONAllPathsWithTypes(json)───────────────┠+│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {'b':'String'} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))','c':'Date'} │ +└──────────────────────────────────────┴───────────────────────────────────────────┘ +``` + +### JSONDynamicPaths + +Returns the list of dynamic paths that are stored as separate subcolumns in [JSON](../data-types/json.md) column. + +**Syntax** + +``` sql +JSONDynamicPaths(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/json.md). + +**Returned value** + +- An array of paths. [Array(String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPaths(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONDynamicPaths(json)─┠+| {"a":"42"} │ ['a'] │ +│ {"b":"Hello"} │ [] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a'] │ +└──────────────────────────────────────┴────────────────────────┘ +``` + +### JSONDynamicPathsWithTypes + +Returns the map of dynamic paths that are stored as separate subcolumns and their types in each row in [JSON](../data-types/json.md) column. + +**Syntax** + +``` sql +JSONAllPathsWithTypes(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/json.md). + +**Returned value** + +- An array of paths. [Map(String, String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPathsWithTypes(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONDynamicPathsWithTypes(json)─┠+│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))'} │ +└──────────────────────────────────────┴─────────────────────────────────┘ +``` + +### JSONSharedDataPaths + +Returns the list of paths that are stored in shared data structure in [JSON](../data-types/json.md) column. + +**Syntax** + +``` sql +JSONSharedDataPaths(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/json.md). + +**Returned value** + +- An array of paths. [Array(String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONSharedDataPaths(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONSharedDataPaths(json)─┠+│ {"a":"42"} │ [] │ +│ {"b":"Hello"} │ ['b'] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['c'] │ +└──────────────────────────────────────┴───────────────────────────┘ +``` + +### JSONSharedDataPathsWithTypes + +Returns the map of paths that are stored in shared data structure and their types in each row in [JSON](../data-types/json.md) column. + +**Syntax** + +``` sql +JSONSharedDataPathsWithTypes(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/json.md). + +**Returned value** + +- An array of paths. [Map(String, String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONSharedDataPathsWithTypes(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONSharedDataPathsWithTypes(json)─┠+│ {"a":"42"} │ {} │ +│ {"b":"Hello"} │ {'b':'String'} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'c':'Date'} │ +└──────────────────────────────────────┴────────────────────────────────────┘ +``` + +### JSONEmpty + +Checks whether the input [JSON](../data-types/json.md) object is empty. + +``` sql +JSONEmpty(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/json.md). + +**Returned value** + +- Returns `1` for an empty JSON object or `0` for a non-empty JSON object. [UInt8](../data-types/int-uint.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {}}, {"json" : {"a" : [1, 2, 3], "b" : "2020-01-01"}}, {"json" : {}}, +SELECT json, JSONEmpty(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONEmpty(json)─┠+│ {} │ 1 │ +│ {"a":["1","2","3"],"b":"2020-01-01"} │ 0 │ +│ {} │ 1 │ +└──────────────────────────────────────┴─────────────────┘ +``` diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index bd6f759ee30..f9a3af601e9 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -424,7 +424,7 @@ void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t len void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) #endif { - /// TODO: try to parallelize doInsertRangeFrom over typed/dynamic paths. + /// TODO: try to parallelize doInsertRangeFrom over typed/dynamic paths if it makes sense. const auto & src_object_column = assert_cast(src); /// First, insert typed paths, they must be the same for both columns. diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index 72ec20bf534..b130c7f4468 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -101,14 +101,15 @@ public: ColumnPtr replicate(const Offsets & replicate_offsets) const override; MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override; + void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &) const override; + void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {} + /// Values of ColumnObject are not comparable. #if !defined(ABORT_ON_LOGICAL_ERROR) int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; } #else int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; } #endif - void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &) const override; - void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {} void getExtremes(Field & min, Field & max) const override { min = Object(); diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 0e7b46ffbd2..bae1d1935b5 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -14,9 +14,7 @@ #include #include #include - #include - #if USE_SIMDJSON #include #endif @@ -480,7 +478,7 @@ static DataTypePtr createJSON(const ASTPtr & arguments) void registerDataTypeJSON(DataTypeFactory & factory) { if (!Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type) - factory.registerDataType("JSON", createJSON); + factory.registerDataType("JSON", createJSON, DataTypeFactory::CaseInsensitive); } } diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 61364dfe5ed..2984078370b 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -24,7 +24,7 @@ public: }; /// Don't change these constants, it can break backward compatibility. - static constexpr size_t DEFAULT_MAX_SEPARATELY_STORED_PATHS = 1000; + static constexpr size_t DEFAULT_MAX_SEPARATELY_STORED_PATHS = 1024; static constexpr size_t NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR = 4; static constexpr size_t NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR = 2; @@ -44,10 +44,7 @@ public: MutableColumnPtr createColumn() const override; - Field getDefault() const override - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getDefault() is not implemented for data type {}", getName()); - } + Field getDefault() const override { return Object(); } bool isParametric() const override { return true; } bool canBeInsideNullable() const override { return false; } diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 0fa1ef7d2ca..126c2ffec6a 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -423,6 +423,9 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( DeserializeBinaryBulkStatePtr & state, SubstreamsCache * cache) const { + if (!state) + return; + auto * object_state = checkAndGetState(state); auto * structure_state = checkAndGetState(object_state->structure_state); auto mutable_column = column->assumeMutable(); diff --git a/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp index 3a635671978..5323079c54b 100644 --- a/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp +++ b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp @@ -147,6 +147,18 @@ void SerializationObjectDynamicPath::deserializeBinaryBulkWithMultipleStreams( settings.path.pop_back(); } /// Otherwise, read the whole shared data column and extract requested path from it. + /// TODO: We can read several subcolumns of the same path located in the shared data + /// and right now we extract the whole path column from shared data every time + /// and then extract the requested subcolumns. We can optimize it and use substreams + /// cache here to avoid extracting the same path from shared data several times. + /// + /// TODO: We can change the serialization of shared data to optimize reading paths from it. + /// Right now we cannot know if shared data contains our path in current range or not, + /// but we can change the serialization and write the list of all paths stored in shared + /// data before each granule, and then replace the column that stores paths with column + /// with indexes in this list. It can also reduce the storage, because we will store + /// each path only once and can replace UInt64 string offset column with indexes column + /// that can have smaller type depending on the number of paths in the list. else { settings.path.push_back(Substream::ObjectSharedData); diff --git a/src/DataTypes/Serializations/SerializationVariantElement.cpp b/src/DataTypes/Serializations/SerializationVariantElement.cpp index 8ceab17cba4..df70c6394f4 100644 --- a/src/DataTypes/Serializations/SerializationVariantElement.cpp +++ b/src/DataTypes/Serializations/SerializationVariantElement.cpp @@ -11,6 +11,7 @@ namespace DB namespace ErrorCodes { extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; } struct SerializationVariantElement::DeserializeBinaryBulkStateVariantElement : public ISerialization::DeserializeBinaryBulkState @@ -188,27 +189,21 @@ void SerializationVariantElement::deserializeBinaryBulkWithMultipleStreams( assert_cast(*variant_element_state->variant->assumeMutable()).nestedRemoveNullable(); } - /// If nothing to deserialize, just insert defaults. - if (variant_limit == 0) - { - mutable_column->insertManyDefaults(num_new_discriminators); - return; - } - addVariantToPath(settings.path); nested_serialization->deserializeBinaryBulkWithMultipleStreams(variant_element_state->variant, *variant_limit, settings, variant_element_state->variant_element_state, cache); removeVariantFromPath(settings.path); - /// If nothing was deserialized when variant_limit > 0 - /// it means that we don't have a stream for such sub-column. - /// It may happen during ALTER MODIFY column with Variant extension. - /// In this case we should just insert default values. - if (variant_element_state->variant->empty()) + /// If there was nothing to deserialize or nothing was actually deserialized when variant_limit > 0, just insert defaults. + /// The second case means that we don't have a stream for such sub-column. It may happen during ALTER MODIFY column with Variant extension. + if (variant_limit == 0 || variant_element_state->variant->empty()) { mutable_column->insertManyDefaults(num_new_discriminators); return; } + if (variant_element_state->variant->size() < *variant_limit) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of deserialized variant column less than the limit: {} < {}", variant_element_state->variant->size(), *variant_limit); + size_t variant_offset = variant_element_state->variant->size() - *variant_limit; /// If we have only our discriminator in range, insert the whole range to result column. diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index b19cf61e207..e2c127b228d 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1510,7 +1510,7 @@ private: } case ElementType::OBJECT: { - return getObjectType(); + return std::make_shared(DataTypeObject::SchemaFormat::JSON, max_dynamic_paths_for_object, max_dynamic_types_for_object); } } } @@ -1548,12 +1548,6 @@ private: return float64_type; } - const DataTypePtr & getObjectType() const - { - static const DataTypePtr object_type = std::make_shared(DataTypeObject::SchemaFormat::JSON, max_dynamic_paths_for_object, max_dynamic_types_for_object); - return object_type; - } - const DataTypePtr & getNullType() const { static const DataTypePtr null_type = std::make_shared(std::make_shared()); diff --git a/src/Functions/JSONEmpty.cpp b/src/Functions/JSONEmpty.cpp index e932074ccb8..cdd1ac2f237 100644 --- a/src/Functions/JSONEmpty.cpp +++ b/src/Functions/JSONEmpty.cpp @@ -99,7 +99,7 @@ REGISTER_FUNCTION(JSONEmpty) { factory.registerFunction(FunctionDocumentation{ .description = R"( -Checks whether thee input JSON object is empty. +Checks whether the input JSON object is empty. )", .syntax = {"JSONEmpty(json)"}, .arguments = {{"json", "JSON column"}}, diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp index b7a161901f0..0bf6b9ffe12 100644 --- a/src/Functions/JSONPaths.cpp +++ b/src/Functions/JSONPaths.cpp @@ -76,6 +76,7 @@ struct JSONSharedDataPathsWithTypesImpl }; /// Implements functions that extracts paths and types from JSON object column. +/// Used for introspection of the content of the JSON object column. template class FunctionJSONPaths : public IFunction { @@ -95,9 +96,12 @@ public: DataTypePtr getReturnTypeImpl(const DataTypes & data_types) const override { - if (data_types.size() != 1 || data_types[0]->getTypeId() != TypeIndex::Object) + if (data_types.size() != 1 ) throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires single argument with type JSON", getName()); + if (data_types[0]->getTypeId() != TypeIndex::Object) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires argument with type JSON, got: {}", getName(),data_types[0]->getName()); + if constexpr (Impl::with_types) return std::make_shared(std::make_shared(), std::make_shared()); return std::make_shared(std::make_shared()); @@ -142,14 +146,23 @@ private: /// We want the resulting arrays of paths to be sorted for consistency. std::sort(dynamic_paths.begin(), dynamic_paths.end()); - for (const auto & path : dynamic_paths) - data.insertData(path.data(), path.size()); - offsets.push_back(data.size()); - return res->replicate(IColumn::Offsets(1, column_object.size())); + size_t size = column_object.size(); + for (size_t i = 0; i != size; ++i) + { + for (const auto & path : dynamic_paths) + { + /// Don't include path if it contains NULL, because we consider + /// it to be equivalent to the absense of this path in this row. + if (!dynamic_path_columns.at(path)->isNullAt(i)) + data.insertData(path.data(), path.size()); + } + offsets.push_back(data.size()); + } + return res; } /// Collect all paths: typed, dynamic and paths from shared data. - std::vector sorted_dynamic_and_typed_paths; + std::vector sorted_dynamic_and_typed_paths; const auto & typed_path_columns = column_object.getTypedPaths(); const auto & dynamic_path_columns = column_object.getDynamicPaths(); for (const auto & [path, _] : typed_path_columns) @@ -174,7 +187,9 @@ private: while (sorted_paths_index != sorted_dynamic_and_typed_paths.size() && sorted_dynamic_and_typed_paths[sorted_paths_index] < shared_data_path) { auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; - data.insertData(path.data, path.size); + /// If it's dynamic path include it only if it's not NULL. + if (auto it = dynamic_path_columns.find(path); it == dynamic_path_columns.end() || !it->second->isNullAt(i)) + data.insertData(path.data(), path.size()); ++sorted_paths_index; } @@ -184,7 +199,8 @@ private: for (; sorted_paths_index != sorted_dynamic_and_typed_paths.size(); ++sorted_paths_index) { auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; - data.insertData(path.data, path.size); + if (auto it = dynamic_path_columns.find(path); it == dynamic_path_columns.end() || !it->second->isNullAt(i)) + data.insertData(path.data(), path.size()); } offsets.push_back(data.size()); @@ -215,12 +231,16 @@ private: { for (auto & path : sorted_dynamic_paths) { - auto type = getDynamicValueType(dynamic_path_columns.at(path), i); - paths_column->insertData(path.data(), path.size()); - types_column->insertData(type.data(), type.size()); + auto column = dynamic_path_columns.at(path); + if (!column->isNullAt(i)) + { + auto type = getDynamicValueType(column, i); + paths_column->insertData(path.data(), path.size()); + types_column->insertData(type.data(), type.size()); + } } - offsets.push_back(types_column->size()); + offsets.push_back(paths_column->size()); } return ColumnMap::create(ColumnPtr(std::move(paths_column)), ColumnPtr(std::move(types_column)), ColumnPtr(std::move(offsets_column))); @@ -237,9 +257,11 @@ private: size_t end = shared_data_offsets[ssize_t(i)]; for (size_t j = start; j != end; ++j) { - paths_column->insertFrom(*shared_data_paths, j); - auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j)); - types_column->insertData(type_name.data(), type_name.size()); + if (auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j))) + { + paths_column->insertFrom(*shared_data_paths, j); + types_column->insertData(type_name->data(), type_name->size()); + } } offsets.push_back(paths_column->size()); @@ -273,28 +295,44 @@ private: { auto shared_data_path = shared_data_paths->getDataAt(j); auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j)); + /// Skip NULL values. + if (!type_name) + continue; while (sorted_paths_index != sorted_typed_and_dynamic_paths_with_types.size() && sorted_typed_and_dynamic_paths_with_types[sorted_paths_index].first < shared_data_path) { auto & [path, type] = sorted_typed_and_dynamic_paths_with_types[sorted_paths_index]; - paths_column->insertData(path.data(), path.size()); /// Update type for path from dynamic paths. if (auto it = dynamic_path_columns.find(path); it != dynamic_path_columns.end()) + { + /// Skip NULL values. + if (it->second->isNullAt(i)) + { + ++sorted_paths_index; + continue; + } type = getDynamicValueType(it->second, i); + } + paths_column->insertData(path.data(), path.size()); types_column->insertData(type.data(), type.size()); ++sorted_paths_index; } paths_column->insertData(shared_data_path.data, shared_data_path.size); - types_column->insertData(type_name.data(), type_name.size()); + types_column->insertData(type_name->data(), type_name->size()); } for (; sorted_paths_index != sorted_typed_and_dynamic_paths_with_types.size(); ++sorted_paths_index) { auto & [path, type] = sorted_typed_and_dynamic_paths_with_types[sorted_paths_index]; - paths_column->insertData(path.data(), path.size()); if (auto it = dynamic_path_columns.find(path); it != dynamic_path_columns.end()) + { + /// Skip NULL values. + if (it->second->isNullAt(i)) + continue; type = getDynamicValueType(it->second, i); + } + paths_column->insertData(path.data(), path.size()); types_column->insertData(type.data(), type.size()); } @@ -310,17 +348,18 @@ private: const auto & variant_info = dynamic_column->getVariantInfo(); const auto & variant_column = dynamic_column->getVariantColumn(); auto global_discr = variant_column.globalDiscriminatorAt(i); - if (global_discr == ColumnVariant::NULL_DISCRIMINATOR) - return "None"; - + /// We don't output path with NULL values. It should be checked before calling getDynamicValueType. + chassert(global_discr != ColumnVariant::NULL_DISCRIMINATOR); return variant_info.variant_names[global_discr]; } - String getDynamicValueTypeFromSharedData(StringRef value) const + std::optional getDynamicValueTypeFromSharedData(StringRef value) const { ReadBufferFromMemory buf(value.data, value.size); auto type = decodeDataType(buf); - return isNothing(type) ? "None" : type->getName(); + if (isNothing(type)) + return std::nullopt; + return type->getName(); } }; @@ -328,12 +367,143 @@ private: REGISTER_FUNCTION(JSONPaths) { - factory.registerFunction>(); - factory.registerFunction>(); - factory.registerFunction>(); - factory.registerFunction>(); - factory.registerFunction>(); - factory.registerFunction>(); + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of all paths stored in each row in JSON column. +)", + .syntax = {"JSONAllPaths(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONAllPaths(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONAllPaths(json)─┠+│ {"a":"42"} │ ['a'] │ +│ {"b":"Hello"} │ ['b'] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a','c'] │ +└──────────────────────────────────────┴────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of all paths and their data types stored in each row in JSON column. +)", + .syntax = {"JSONAllPathsWithTypes(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONAllPathsWithTypes(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONAllPathsWithTypes(json)───────────────┠+│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {'b':'String'} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))','c':'Date'} │ +└──────────────────────────────────────┴───────────────────────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of dynamic paths that are stored as separate subcolumns in JSON column. +)", + .syntax = {"JSONDynamicPaths(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPaths(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONDynamicPaths(json)─┠+│ {"a":"42"} │ ['a'] │ +│ {"b":"Hello"} │ [] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a'] │ +└──────────────────────────────────────┴────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of dynamic paths that are stored as separate subcolumns and their types in each row in JSON column. +)", + .syntax = {"JSONDynamicPathsWithTypes(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPathsWithTypes(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONDynamicPathsWithTypes(json)─┠+│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))'} │ +└──────────────────────────────────────┴─────────────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of paths that are stored in shared data structure in JSON column. +)", + .syntax = {"JSONDynamicPaths(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONSharedDataPaths(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONSharedDataPaths(json)─┠+│ {"a":"42"} │ [] │ +│ {"b":"Hello"} │ ['b'] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['c'] │ +└──────────────────────────────────────┴───────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of paths that are stored in shared data structure and their types in each row in JSON column. +)", + .syntax = {"JSONDynamicPathsWithTypes(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPathsWithTypes(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONDynamicPathsWithTypes(json)─┠+│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))'} │ +└──────────────────────────────────────┴─────────────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); } } diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 0e0b53c52d9..e0ac2ee6bed 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -282,6 +282,45 @@ bool ParserTableAsStringLiteralIdentifier::parseImpl(Pos & pos, ASTPtr & node, E return true; } +namespace +{ + +/// Parser of syntax sugar for reading JSON subcolumns of type Array(JSON): +/// json.a.b[][].c -> json.a.b.:Array(Array(JSON)).c +class ParserArrayOfJSONIdentifierAddition : public IParserBase +{ +public: + String getLastArrayOfJSONSubcolumnIdentifier() const + { + String subcolumn = ":`"; + for (size_t i = 0; i != last_array_level; ++i) + subcolumn += "Array("; + subcolumn += "JSON"; + for (size_t i = 0; i != last_array_level; ++i) + subcolumn += ")"; + return subcolumn + "`"; + } + +protected: + const char * getName() const override { return "ParserArrayOfJSONIdentifierDelimiter"; } + + bool parseImpl(Pos & pos, ASTPtr & /*node*/, Expected & expected) override + { + last_array_level = 0; + ParserTokenSequence brackets_parser(std::vector{TokenType::OpeningSquareBracket, TokenType::ClosingSquareBracket}); + if (!brackets_parser.check(pos, expected)) + return false; + ++last_array_level; + while (brackets_parser.check(pos, expected)) + ++last_array_level; + return true; + } + +private: + size_t last_array_level; +}; + +} bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { @@ -290,6 +329,7 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex delimiter_parsers.emplace_back(std::make_unique(std::vector{TokenType::Dot, TokenType::Colon}), SpecialDelimiter::JSON_PATH_DYNAMIC_TYPE); delimiter_parsers.emplace_back(std::make_unique(std::vector{TokenType::Dot, TokenType::Caret}), SpecialDelimiter::JSON_PATH_PREFIX); delimiter_parsers.emplace_back(std::make_unique(TokenType::Dot), SpecialDelimiter::NONE); + ParserArrayOfJSONIdentifierAddition array_of_json_identifier_addition; std::vector parts; SpecialDelimiter last_special_delimiter = SpecialDelimiter::NONE; @@ -308,16 +348,23 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex break; } - is_first = false; - if (last_special_delimiter != SpecialDelimiter::NONE) + { parts.push_back(static_cast(last_special_delimiter) + backQuote(getIdentifierName(element))); + } else + { parts.push_back(getIdentifierName(element)); + /// Check if we have Array of JSON subcolumn additioon after identifier + /// and replace it with corresponding type subcolumn. + if (!is_first && array_of_json_identifier_addition.check(pos, expected)) + parts.push_back(array_of_json_identifier_addition.getLastArrayOfJSONSubcolumnIdentifier()); + } if (parts.back().empty()) params.push_back(element->as()->getParam()); + is_first = false; begin = pos; bool parsed_delimiter = false; for (const auto & [parser, special_delimiter] : delimiter_parsers) diff --git a/src/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h index 39608bc583e..e98920acc01 100644 --- a/src/Parsers/ExpressionElementParsers.h +++ b/src/Parsers/ExpressionElementParsers.h @@ -56,6 +56,7 @@ protected: * There is also special delimiters `.:` and `.^` for JSON type subcolumns. In case of special delimiter * the next identifier part after it will include special delimiter and be back quoted always: json.a.b.:UInt32 -> ['json', 'a', 'b', ':`UInt32`']. * It's needed to distinguish identifiers json.a.b.:UInt32 and json.a.b.`:UInt32`. + * There is also a special syntax sugar for reading JSON subcolumns of type Array(JSON): json.a.b[][].c -> json.a.b.:Array(Array(JSON)).c */ class ParserCompoundIdentifier : public IParserBase { diff --git a/tests/queries/0_stateless/00727_concat.reference b/tests/queries/0_stateless/00727_concat.reference index 329ad36ad3c..a93bf12b77a 100644 --- a/tests/queries/0_stateless/00727_concat.reference +++ b/tests/queries/0_stateless/00727_concat.reference @@ -34,6 +34,7 @@ With 2023-11-14 05:50:12.123 With hallo With [\'foo\',\'bar\'] With {"foo":"bar"} +With {"foo":"bar"} With (42,\'foo\') With {42:\'foo\'} With 122.233.64.201 diff --git a/tests/queries/0_stateless/00727_concat.sql b/tests/queries/0_stateless/00727_concat.sql index 76dae541261..65cd019cc13 100644 --- a/tests/queries/0_stateless/00727_concat.sql +++ b/tests/queries/0_stateless/00727_concat.sql @@ -2,6 +2,7 @@ -- no-fasttest: json type needs rapidjson library, geo types need s2 geometry SET allow_experimental_object_type = 1; +SET allow_experimental_json_type = 1; SET allow_suspicious_low_cardinality_types=1; SELECT '-- Const string + non-const arbitrary type'; @@ -40,6 +41,7 @@ SELECT concat('With ', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'E SELECT concat('With ', materialize('hallo' :: Enum('hallo' = 1))); SELECT concat('With ', materialize(['foo', 'bar'] :: Array(String))); SELECT concat('With ', materialize('{"foo": "bar"}' :: JSON)); +SELECT concat('With ', materialize('{"foo": "bar"}' :: Object('json'))); SELECT concat('With ', materialize((42, 'foo') :: Tuple(Int32, String))); SELECT concat('With ', materialize(map(42, 'foo') :: Map(Int32, String))); SELECT concat('With ', materialize('122.233.64.201' :: IPv4)); diff --git a/tests/queries/0_stateless/01825_new_type_json_10.reference b/tests/queries/0_stateless/01825_new_type_json_10.reference new file mode 100644 index 00000000000..0dffa3c46f9 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_10.reference @@ -0,0 +1,14 @@ +('a.b','Int64') +('a.c','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') +('a.c','Array(Nullable(String))') +('e','Array(Nullable(Int64))') +('f','Int64') +('d','Int64') +{"o":{"a":{"b":"1","c":[{"d":"10","e":["31"]},{"d":"20","e":["63","127"]}]}}} +{"o":{"a":{"b":"2","c":[]}}} +{"o":{"a":{"b":"3","c":[{"e":["32"],"f":"20"},{"e":["64","128"],"f":"30"}]}}} +{"o":{"a":{"b":"4","c":[]}}} +1 [10,20] [[31],[63,127]] [NULL,NULL] +2 [] [] [] +3 [NULL,NULL] [[32],[64,128]] [20,30] +4 [] [] [] diff --git a/tests/queries/0_stateless/01825_new_type_json_10.sql b/tests/queries/0_stateless/01825_new_type_json_10.sql new file mode 100644 index 00000000000..0e599b0ac31 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_10.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +SET allow_experimental_json_type = 1; + +DROP TABLE IF EXISTS t_json_10; +CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; + +INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 1, "c": [{"d": 10, "e": [31]}, {"d": 20, "e": [63, 127]}]}} {"a": {"b": 2, "c": []}} +INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 3, "c": [{"f": 20, "e": [32]}, {"f": 30, "e": [64, 128]}]}} {"a": {"b": 4, "c": []}} + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(o)) FROM t_json_10; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(o.a.c.:`Array(JSON)`))) FROM t_json_10; +SELECT o FROM t_json_10 ORDER BY o.a.b FORMAT JSONEachRow; +SELECT o.a.b, o.a.c.:`Array(JSON)`.d, o.a.c.:`Array(JSON)`.e, o.a.c.:`Array(JSON)`.f FROM t_json_10 ORDER BY o.a.b; + +DROP TABLE t_json_10; diff --git a/tests/queries/0_stateless/01825_new_type_json_11.reference b/tests/queries/0_stateless/01825_new_type_json_11.reference new file mode 100644 index 00000000000..aa3375a23cb --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_11.reference @@ -0,0 +1,13 @@ +('id','Int64') +('key_1','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') +('key_2','Int64') +('key_3','Array(JSON(max_dynamic_types=8, max_dynamic_paths=64))') +('key_4','Array(JSON(max_dynamic_types=4, max_dynamic_paths=16))') +('key_7','Int64') +('key_5','Int64') +{"obj":{"id":"1","key_1":[{"key_2":"100","key_3":[{"key_4":[{"key_5":"-2"}],"key_7":"257"}]},{"key_2":"65536"}]}} +{"obj":{"id":"2","key_1":[{"key_2":"101","key_3":[{"key_4":[{"key_5":"-2"}]}]},{"key_2":"102","key_3":[{"key_7":"257"}]},{"key_2":"65536"}]}} +{"obj.key_1.:`Array(JSON)`.key_3":[[{"key_4":[{"key_5":"-2"}],"key_7":"257"}],null]} +{"obj.key_1.:`Array(JSON)`.key_3":[[{"key_4":[{"key_5":"-2"}]}],[{"key_7":"257"}],null]} +[[[-2]],[]] [[257],[]] +[[[-2]],[[]],[]] [[NULL],[257],[]] diff --git a/tests/queries/0_stateless/01825_new_type_json_11.sh b/tests/queries/0_stateless/01825_new_type_json_11.sh new file mode 100755 index 00000000000..2eb6c1768fd --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_11.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_11" + +$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_11 (obj JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +cat < notEmpty(x), outpoints)" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS btc" + +rm ${CLICKHOUSE_USER_FILES_UNIQUE}/btc_transactions.json diff --git a/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference b/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference new file mode 100644 index 00000000000..e8891cf6a56 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference @@ -0,0 +1 @@ +PushEvent some-repo {"actor":{"avatar_url":"https:\\/\\/avatars.githubusercontent.com\\/u\\/123213213?","display_login":"github-actions","gravatar_id":"","id":"123123123","login":"github-actions[bot]","url":"https:\\/\\/api.github.com\\/users\\/github-actions[bot]"},"created_at":"2022-01-04 07:00:00.000000000","repo":{"id":"1001001010101","name":"some-repo","url":"https:\\/\\/api.github.com\\/repos\\/some-repo"},"type":"PushEvent"} diff --git a/tests/queries/0_stateless/01825_new_type_json_ephemeral.sql b/tests/queries/0_stateless/01825_new_type_json_ephemeral.sql new file mode 100644 index 00000000000..4aaebfd326f --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ephemeral.sql @@ -0,0 +1,18 @@ + +SET allow_experimental_json_type = 1; + +DROP TABLE IF EXISTS t_github_json; + +CREATE table t_github_json +( + event_type LowCardinality(String) DEFAULT JSONExtractString(message_raw, 'type'), + repo_name LowCardinality(String) DEFAULT JSONExtractString(message_raw, 'repo', 'name'), + message JSON DEFAULT empty(message_raw) ? '{}' : message_raw, + message_raw String EPHEMERAL +) ENGINE = MergeTree ORDER BY (event_type, repo_name); + +INSERT INTO t_github_json (message_raw) FORMAT JSONEachRow {"message_raw": "{\"type\":\"PushEvent\", \"created_at\": \"2022-01-04 07:00:00\", \"actor\":{\"avatar_url\":\"https://avatars.githubusercontent.com/u/123213213?\",\"display_login\":\"github-actions\",\"gravatar_id\":\"\",\"id\":123123123,\"login\":\"github-actions[bot]\",\"url\":\"https://api.github.com/users/github-actions[bot]\"},\"repo\":{\"id\":1001001010101,\"name\":\"some-repo\",\"url\":\"https://api.github.com/repos/some-repo\"}}"} + +SELECT * FROM t_github_json ORDER BY event_type, repo_name; + +DROP TABLE t_github_json; diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.reference b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh new file mode 100755 index 00000000000..d4a30901341 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_string" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_from_string" + +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2 (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1 +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_string (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_from_string (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1 + +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2 FORMAT JSONAsObject" +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_string FORMAT JSONAsString" + +${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_from_string SELECT data FROM ghdata_2_string" + +${CLICKHOUSE_CLIENT} -q "SELECT \ + (SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), sum(cityHash64(toString(data))) FROM ghdata_2_from_string) = \ + (SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), sum(cityHash64(toString(data))) FROM ghdata_2)" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_string" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_from_string" diff --git a/tests/queries/0_stateless/01825_new_type_json_nbagames.reference b/tests/queries/0_stateless/01825_new_type_json_nbagames.reference new file mode 100644 index 00000000000..d39672c4d9d --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_nbagames.reference @@ -0,0 +1,65 @@ +1000 +('_id.$oid','String') +('date.$date','String') +('teams','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') +('abbreviation','String') +('city','String') +('home','Bool') +('name','String') +('players','Array(JSON(max_dynamic_types=8, max_dynamic_paths=64))') +('results.ast','Int64') +('results.blk','Int64') +('results.drb','Int64') +('results.fg','Int64') +('results.fg3','Int64') +('results.fg3_pct','DateTime64(9)') +('results.fg3a','Int64') +('results.fg_pct','DateTime64(9)') +('results.fga','Int64') +('results.ft','Int64') +('results.ft_pct','DateTime64(9)') +('results.fta','Int64') +('results.mp','Int64') +('results.orb','Int64') +('results.pf','Int64') +('results.pts','Int64') +('results.stl','Int64') +('results.tov','Int64') +('results.trb','Int64') +('score','Int64') +('won','Int64') +('results.fg3_pct','String') +Boston Celtics 70 +Los Angeles Lakers 64 +Milwaukee Bucks 61 +Philadelphia 76ers 57 +Atlanta Hawks 55 +('ast','Int64') +('blk','Int64') +('drb','Int64') +('fg','Int64') +('fg3','Int64') +('fg3_pct','String') +('fg3a','Int64') +('fg_pct','DateTime64(9)') +('fga','Int64') +('ft','Int64') +('ft_pct','DateTime64(9)') +('fta','Int64') +('mp','String') +('orb','Int64') +('pf','Int64') +('player','String') +('pts','Int64') +('stl','Int64') +('tov','Int64') +('trb','Int64') +('fg3_pct','DateTime64(9)') +('fg_pct','String') +('ft_pct','String') +Larry Bird 10 +Clyde Drexler 4 +Alvin Robertson 3 +Magic Johnson 3 +Charles Barkley 2 +1 diff --git a/tests/queries/0_stateless/01825_new_type_json_nbagames.sh b/tests/queries/0_stateless/01825_new_type_json_nbagames.sh new file mode 100755 index 00000000000..b305b16edbe --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_nbagames.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames_string" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames_from_string" + +${CLICKHOUSE_CLIENT} -q "CREATE TABLE nbagames (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +cat $CUR_DIR/data_json/nbagames_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO nbagames FORMAT JSONAsObject" + +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM nbagames WHERE NOT ignore(*)" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) from nbagames" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(data.teams[]))) from nbagames" + +${CLICKHOUSE_CLIENT} -q \ + "SELECT teams.name.:String AS name, sum(teams.won.:Int64) AS wins FROM nbagames \ + ARRAY JOIN data.teams[] AS teams GROUP BY name \ + ORDER BY wins DESC LIMIT 5;" + +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(data.teams[].players[])))) from nbagames" + +${CLICKHOUSE_CLIENT} -q \ +"SELECT player, sum(triple_double) AS triple_doubles FROM \ +( \ + SELECT \ + arrayJoin(arrayJoin(data.teams[].players[])) as players, \ + players.player.:String as player, \ + ((players.pts.:Int64 >= 10) + \ + (players.ast.:Int64 >= 10) + \ + (players.blk.:Int64 >= 10) + \ + (players.stl.:Int64 >= 10) + \ + (players.trb.:Int64 >= 10)) >= 3 AS triple_double \ + from nbagames \ +) \ +GROUP BY player ORDER BY triple_doubles DESC, player LIMIT 5" + +${CLICKHOUSE_CLIENT} -q "CREATE TABLE nbagames_string (data String) ENGINE = MergeTree ORDER BY tuple()" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE nbagames_from_string (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +cat $CUR_DIR/data_json/nbagames_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO nbagames_string FORMAT JSONAsString" +${CLICKHOUSE_CLIENT} -q "INSERT INTO nbagames_from_string SELECT data FROM nbagames_string" + +${CLICKHOUSE_CLIENT} -q "SELECT \ + (SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), sum(cityHash64(toString(data))) FROM nbagames_from_string) = \ + (SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), sum(cityHash64(toString(data))) FROM nbagames)" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames_string" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames_from_string" diff --git a/tests/queries/0_stateless/01825_new_type_json_parallel_insert.reference b/tests/queries/0_stateless/01825_new_type_json_parallel_insert.reference new file mode 100644 index 00000000000..7cf3855d684 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_parallel_insert.reference @@ -0,0 +1 @@ +{'k1':['Int64'],'k2':['String']} 500000 diff --git a/tests/queries/0_stateless/01825_new_type_json_parallel_insert.sql b/tests/queries/0_stateless/01825_new_type_json_parallel_insert.sql new file mode 100644 index 00000000000..a8457ff4f15 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_parallel_insert.sql @@ -0,0 +1,10 @@ +-- Tags: long +DROP TABLE IF EXISTS t_json_parallel; + +SET allow_experimental_json_type = 1, max_insert_threads = 20, max_threads = 20, min_insert_block_size_rows = 65536; +CREATE TABLE t_json_parallel (data JSON) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_json_parallel SELECT materialize('{"k1":1, "k2": "some"}') FROM numbers_mt(500000); +SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), count() FROM t_json_parallel; + +DROP TABLE t_json_parallel; diff --git a/tests/queries/0_stateless/01825_type_json_11.sh b/tests/queries/0_stateless/01825_type_json_11.sh index dbed15c8bb9..6109dff53a6 100755 --- a/tests/queries/0_stateless/01825_type_json_11.sh +++ b/tests/queries/0_stateless/01825_type_json_11.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_11" -$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_11 (obj JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_object_type 1 +$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_11 (obj Object('json')) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_object_type 1 cat <&1 | grep -o -m1 "Cannot parse object" $CLICKHOUSE_CLIENT -q "SELECT count() FROM t_json_async_insert" diff --git a/tests/queries/0_stateless/02421_type_json_empty_parts.sh b/tests/queries/0_stateless/02421_type_json_empty_parts.sh index b6cf5995bfa..2ecec524e25 100755 --- a/tests/queries/0_stateless/02421_type_json_empty_parts.sh +++ b/tests/queries/0_stateless/02421_type_json_empty_parts.sh @@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT 'Collapsing';" -${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, s Int8, data JSON) ENGINE = CollapsingMergeTree(s) ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, s Int8, data Object('json')) ENGINE = CollapsingMergeTree(s) ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 ${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, 1, '{\"k1\": \"aaa\"}') (1, -1, '{\"k2\": \"bbb\"}');" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" @@ -19,7 +19,7 @@ ${CLICKHOUSE_CLIENT} -q "DESC TABLE t_json_empty_parts SETTINGS describe_extend_ ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT 'DELETE all';" -${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, data JSON) ENGINE = MergeTree ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, data Object('json')) ENGINE = MergeTree ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 ${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, '{\"k1\": \"aaa\"}') (1, '{\"k2\": \"bbb\"}');" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" @@ -32,7 +32,7 @@ ${CLICKHOUSE_CLIENT} -q "DESC TABLE t_json_empty_parts SETTINGS describe_extend_ ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT 'TTL';" -${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, d Date, data JSON) ENGINE = MergeTree ORDER BY id TTL d WHERE id % 2 = 1 SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, d Date, data Object('json')) ENGINE = MergeTree ORDER BY id TTL d WHERE id % 2 = 1 SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 ${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, '2000-01-01', '{\"k1\": \"aaa\"}') (2, '2000-01-01', '{\"k2\": \"bbb\"}');" ${CLICKHOUSE_CLIENT} -q "OPTIMIZE TABLE t_json_empty_parts FINAL;" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" diff --git a/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh b/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh index 0d0caa78ea3..99f243833f5 100755 --- a/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh +++ b/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh @@ -21,7 +21,7 @@ echo ' } }' > 02482_object_data.jsonl -$CLICKHOUSE_LOCAL --allow_experimental_object_type=1 -q "select * from file(02482_object_data.jsonl, auto, 'obj JSON')" +$CLICKHOUSE_LOCAL --allow_experimental_object_type=1 -q "select * from file(02482_object_data.jsonl, auto, 'obj Object('json')')" rm 02482_object_data.jsonl diff --git a/tests/queries/0_stateless/02513_validate_data_types.sql b/tests/queries/0_stateless/02513_validate_data_types.sql index 5eb91ac7879..4996f63c5bd 100644 --- a/tests/queries/0_stateless/02513_validate_data_types.sql +++ b/tests/queries/0_stateless/02513_validate_data_types.sql @@ -1,9 +1,9 @@ -- Tags: no-fasttest set allow_experimental_object_type=0; -select CAST('{"x" : 1}', 'JSON'); -- {serverError ILLEGAL_COLUMN} +select CAST('{"x" : 1}', 'Object(''json'')'); -- {serverError ILLEGAL_COLUMN} desc file(nonexist.json, JSONAsObject); -- {serverError ILLEGAL_COLUMN} -desc file(nonexist.json, JSONEachRow, 'x JSON'); -- {serverError ILLEGAL_COLUMN} +desc file(nonexist.json, JSONEachRow, 'x Object(''json'')'); -- {serverError ILLEGAL_COLUMN} set allow_suspicious_low_cardinality_types=0; select CAST(1000000, 'LowCardinality(UInt64)'); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} diff --git a/tests/queries/0_stateless/02553_type_json_attach_partition.sql b/tests/queries/0_stateless/02553_type_json_attach_partition.sql index 9225106f767..0a9d64451c1 100644 --- a/tests/queries/0_stateless/02553_type_json_attach_partition.sql +++ b/tests/queries/0_stateless/02553_type_json_attach_partition.sql @@ -2,7 +2,7 @@ SET allow_experimental_object_type = 1; DROP TABLE IF EXISTS t_json_attach_partition; -CREATE TABLE t_json_attach_partition(b UInt64, c JSON) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t_json_attach_partition(b UInt64, c Object('json')) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": 1}}; ALTER TABLE t_json_attach_partition DETACH PARTITION tuple(); diff --git a/tests/queries/0_stateless/02553_type_object_analyzer.sql b/tests/queries/0_stateless/02553_type_object_analyzer.sql index 55482a02ed1..a8eeb57a6c6 100644 --- a/tests/queries/0_stateless/02553_type_object_analyzer.sql +++ b/tests/queries/0_stateless/02553_type_object_analyzer.sql @@ -3,7 +3,7 @@ SET allow_experimental_object_type = 1; SET allow_experimental_analyzer = 1; DROP TABLE IF EXISTS t_json_analyzer; -CREATE TABLE t_json_analyzer (a JSON) ENGINE = Memory; +CREATE TABLE t_json_analyzer (a Object('json')) ENGINE = Memory; INSERT INTO t_json_analyzer VALUES ('{"id": 2, "obj": {"k2": {"k3": "str", "k4": [{"k6": 55}]}, "some": 42}, "s": "bar"}'); SELECT any(a) AS data FROM t_json_analyzer FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/02717_pretty_json.sql b/tests/queries/0_stateless/02717_pretty_json.sql index 8a49eb50adf..1a5c090bcb2 100644 --- a/tests/queries/0_stateless/02717_pretty_json.sql +++ b/tests/queries/0_stateless/02717_pretty_json.sql @@ -1,3 +1,3 @@ set allow_experimental_object_type=1; -select 42 as num, [42, 42] as arr, [[[42, 42], [42, 42]], [[42, 42]]] as nested_arr, tuple(42, 42)::Tuple(a UInt32, b UInt32) as tuple, tuple(tuple(tuple(42, 42), 42), 42)::Tuple(a Tuple(b Tuple(c UInt32, d UInt32), e UInt32), f UInt32) as nested_tuple, map(42, 42, 24, 24) as map, map(42, map(42, map(42, 42))) as nested_map, [tuple(map(42, 42), [42, 42]), tuple(map(42, 42), [42, 42])]::Array(Tuple(Map(UInt32, UInt32), Array(UInt32))) as nested_types, '{"a" : {"b" : 1, "c" : 2}}'::JSON as json_object format PrettyNDJSON; +select 42 as num, [42, 42] as arr, [[[42, 42], [42, 42]], [[42, 42]]] as nested_arr, tuple(42, 42)::Tuple(a UInt32, b UInt32) as tuple, tuple(tuple(tuple(42, 42), 42), 42)::Tuple(a Tuple(b Tuple(c UInt32, d UInt32), e UInt32), f UInt32) as nested_tuple, map(42, 42, 24, 24) as map, map(42, map(42, map(42, 42))) as nested_map, [tuple(map(42, 42), [42, 42]), tuple(map(42, 42), [42, 42])]::Array(Tuple(Map(UInt32, UInt32), Array(UInt32))) as nested_types, '{"a" : {"b" : 1, "c" : 2}}'::Object('json') as json_object format PrettyNDJSON; diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql index 3bbcbb1a535..dadfa59bf87 100644 --- a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql +++ b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql @@ -11,7 +11,7 @@ DROP TABLE IF EXISTS tab; SET allow_suspicious_low_cardinality_types=1; -SET allow_experimental_object_type=1; +SET allow_experimental_json_type=1; CREATE TABLE tab ( diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect index 4798a6958c6..2079da9d34a 100755 --- a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect +++ b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect @@ -33,7 +33,7 @@ send -- "DROP TABLE IF EXISTS tab;\r" expect "Query OK, 0 rows affected" send -- "SET allow_suspicious_low_cardinality_types=1;\r" -send -- "SET allow_experimental_object_type=1;\r" +send -- "SET allow_experimental_json_type=1;\r" send -- " CREATE TABLE tab diff --git a/tests/queries/0_stateless/02870_per_column_settings.sql b/tests/queries/0_stateless/02870_per_column_settings.sql index d242ebe6c61..c3050222bc8 100644 --- a/tests/queries/0_stateless/02870_per_column_settings.sql +++ b/tests/queries/0_stateless/02870_per_column_settings.sql @@ -49,7 +49,7 @@ CREATE TABLE tab ( id UInt64, tup Tuple(UInt64, UInt64) SETTINGS (min_compress_block_size = 81920, max_compress_block_size = 163840), - json JSON SETTINGS (min_compress_block_size = 81920, max_compress_block_size = 163840), + json Object('json') SETTINGS (min_compress_block_size = 81920, max_compress_block_size = 163840), ) ENGINE = MergeTree ORDER BY id diff --git a/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference b/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference index 3455adc8723..f100e8c48d4 100644 --- a/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference +++ b/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference @@ -34,6 +34,7 @@ The answer to all questions is 2023-11-14 05:50:12.123. The answer to all questions is hallo. The answer to all questions is [\'foo\',\'bar\']. The answer to all questions is {"foo":"bar"}. +The answer to all questions is {"foo":"bar"}. The answer to all questions is (42,\'foo\'). The answer to all questions is {42:\'foo\'}. The answer to all questions is 122.233.64.201. diff --git a/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql b/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql index ad1de2bec6d..dcc3964e4b0 100644 --- a/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql +++ b/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql @@ -3,6 +3,7 @@ -- no-fasttest: json type needs rapidjson library, geo types need s2 geometry SET allow_experimental_object_type = 1; +SET allow_experimental_json_type = 1; SET allow_suspicious_low_cardinality_types=1; SELECT '-- Const string + non-const arbitrary type'; @@ -40,6 +41,7 @@ SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11 SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'Europe/Amsterdam'))); SELECT format('The {0} to all questions is {1}.', 'answer', materialize('hallo' :: Enum('hallo' = 1))); SELECT format('The {0} to all questions is {1}.', 'answer', materialize(['foo', 'bar'] :: Array(String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('{"foo": "bar"}' :: Object('json'))); SELECT format('The {0} to all questions is {1}.', 'answer', materialize('{"foo": "bar"}' :: JSON)); SELECT format('The {0} to all questions is {1}.', 'answer', materialize((42, 'foo') :: Tuple(Int32, String))); SELECT format('The {0} to all questions is {1}.', 'answer', materialize(map(42, 'foo') :: Map(Int32, String))); diff --git a/tests/queries/0_stateless/02969_mysql_cast_type_aliases.reference b/tests/queries/0_stateless/02969_mysql_cast_type_aliases.reference index 5555c918500..3e63763d544 100644 --- a/tests/queries/0_stateless/02969_mysql_cast_type_aliases.reference +++ b/tests/queries/0_stateless/02969_mysql_cast_type_aliases.reference @@ -7,7 +7,7 @@ Decimal 45 Decimal(10, 0) Decimal(M) 46 Decimal(4, 0) Decimal(M, D) 47.21 Decimal(4, 2) Double 48.11 Float64 -JSON {"foo":"bar"} Object(\'json\') +JSON {"foo":"bar"} JSON Real 49.22 Float32 Signed 50 Int64 Unsigned 52 UInt64 @@ -21,7 +21,7 @@ Decimal 45 Decimal(10, 0) Decimal(M) 46 Decimal(4, 0) Decimal(M, D) 47.21 Decimal(4, 2) Double 48.11 Float64 -JSON {"foo":"bar"} Object(\'json\') +JSON {"foo":"bar"} JSON Real 49.22 Float32 Signed 50 Int64 Unsigned 52 UInt64 diff --git a/tests/queries/0_stateless/02969_mysql_cast_type_aliases.sql b/tests/queries/0_stateless/02969_mysql_cast_type_aliases.sql index 7b5735cdebc..8cccde4b0ab 100644 --- a/tests/queries/0_stateless/02969_mysql_cast_type_aliases.sql +++ b/tests/queries/0_stateless/02969_mysql_cast_type_aliases.sql @@ -1,7 +1,7 @@ -- See https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast -- Tests are in order of the type appearance in the docs -SET allow_experimental_object_type = 1; +SET allow_experimental_json_type = 1; SELECT '-- Uppercase tests'; -- Not supported as it is translated to FixedString without arguments diff --git a/tests/queries/0_stateless/03157_dynamic_type_json.reference b/tests/queries/0_stateless/03157_dynamic_type_json.reference index 38bca12bb95..14e851bdbc7 100644 --- a/tests/queries/0_stateless/03157_dynamic_type_json.reference +++ b/tests/queries/0_stateless/03157_dynamic_type_json.reference @@ -1,5 +1,5 @@ -1 (((((((((('deep_value')))))))))) -2 (((((((((('deep_array_value')))))))))) +1 {"level1":{"level2":{"level3":{"level4":{"level5":{"level6":{"level7":{"level8":{"level9":{"level10":"deep_value"}}}}}}}}}} +2 {"level1":{"level2":{"level3":{"level4":{"level5":{"level6":{"level7":{"level8":{"level9":{"level10":"deep_array_value"}}}}}}}}}} -(((((((((('deep_value')))))))))) Tuple(level1 Tuple(level2 Tuple(level3 Tuple(level4 Tuple(level5 Tuple(level6 Tuple(level7 Tuple(level8 Tuple(level9 Tuple(level10 String)))))))))) -(((((((((('deep_array_value')))))))))) Tuple(level1 Tuple(level2 Tuple(level3 Tuple(level4 Tuple(level5 Tuple(level6 Tuple(level7 Tuple(level8 Tuple(level9 Tuple(level10 String)))))))))) +{"level1":{"level2":{"level3":{"level4":{"level5":{"level6":{"level7":{"level8":{"level9":{"level10":"deep_value"}}}}}}}}}} JSON +{"level1":{"level2":{"level3":{"level4":{"level5":{"level6":{"level7":{"level8":{"level9":{"level10":"deep_array_value"}}}}}}}}}} JSON diff --git a/tests/queries/0_stateless/03157_dynamic_type_json.sql b/tests/queries/0_stateless/03157_dynamic_type_json.sql index cb1a5987104..91af7942718 100644 --- a/tests/queries/0_stateless/03157_dynamic_type_json.sql +++ b/tests/queries/0_stateless/03157_dynamic_type_json.sql @@ -1,7 +1,8 @@ SET allow_experimental_dynamic_type=1; -SET allow_experimental_object_type=1; +SET allow_experimental_json_type=1; SET allow_experimental_variant_type=1; +DROP TABLE IF EXISTS test_deep_nested_json; CREATE TABLE test_deep_nested_json (i UInt16, d JSON) ENGINE = Memory; INSERT INTO test_deep_nested_json VALUES (1, '{"level1": {"level2": {"level3": {"level4": {"level5": {"level6": {"level7": {"level8": {"level9": {"level10": "deep_value"}}}}}}}}}}'); @@ -11,3 +12,4 @@ SELECT * FROM test_deep_nested_json ORDER BY i; SELECT ''; SELECT d::Dynamic d1, dynamicType(d1) FROM test_deep_nested_json ORDER BY i; +DROP TABLE test_deep_nested_json; diff --git a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql index 20a9e17a148..a18f985f217 100644 --- a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql +++ b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql @@ -1,5 +1,4 @@ SET allow_experimental_dynamic_type=1; -SET allow_experimental_object_type=1; SET allow_experimental_variant_type=1; CREATE TABLE test_variable (v Variant(String, UInt32, IPv6, Bool, DateTime64)) ENGINE = Memory; diff --git a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql index d302205ca23..d6c293de1be 100644 --- a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql +++ b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql @@ -1,7 +1,6 @@ -- Tags: no-random-settings SET allow_experimental_dynamic_type=1; -SET allow_experimental_object_type=1; SET allow_experimental_variant_type=1; SET allow_suspicious_low_cardinality_types=1; diff --git a/tests/queries/0_stateless/03205_json_cast_from_string.reference b/tests/queries/0_stateless/03205_json_cast_from_string.reference new file mode 100644 index 00000000000..1c4d820b3b4 --- /dev/null +++ b/tests/queries/0_stateless/03205_json_cast_from_string.reference @@ -0,0 +1,18 @@ +{} +{"a":"42","b":"Hello"} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} +{"a":{"b":{"c":{"d":true},"e":"43"},"f":"44"},"g":"44"} +{"a":{"b":{"e":"43"},"f":"44"},"g":"44"} +{"a":{"b":{"e":"43"},"f":"44"},"g":"44"} +{"a":{"f":"44"},"g":"44"} +{"g":"44"} +{"a":{"f":"44"},"g":"44"} +{"g":"44"} +{} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64','a.b.e':'Int64'} {'a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64'} {'a.b.e':'Int64','a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'String','a.b.e':'String','a.f':'Int64','g':'Int64'} {'a.b.c.d':'String','a.b.e':'String'} {'a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'String','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'String'} {'a.b.e':'Int64','a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} diff --git a/tests/queries/0_stateless/03205_json_cast_from_string.sql b/tests/queries/0_stateless/03205_json_cast_from_string.sql new file mode 100644 index 00000000000..6ab88826c86 --- /dev/null +++ b/tests/queries/0_stateless/03205_json_cast_from_string.sql @@ -0,0 +1,22 @@ +-- Tags: no-fasttest +set allow_experimental_json_type=1; + +select materialize('{}')::JSON; +select materialize('{"a" : 42, "b" : "Hello"}')::JSON; +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON; +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(a.b.c.d Bool); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b.c.d); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b.c); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*a.*b'); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*a.*'); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*'); + +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2, max_dynamic_types=1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1, max_dynamic_types=1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0, max_dynamic_types=1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); diff --git a/tests/queries/0_stateless/03205_json_syntax.reference b/tests/queries/0_stateless/03205_json_syntax.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03205_json_syntax.sql b/tests/queries/0_stateless/03205_json_syntax.sql new file mode 100644 index 00000000000..604bddf69d3 --- /dev/null +++ b/tests/queries/0_stateless/03205_json_syntax.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type=1; +drop table if exists test; +create table test (json JSON) engine=Memory; +replace table test (json JSON(max_dynamic_paths=10)) engine=Memory; +replace table test (json JSON(max_dynamic_types=10)) engine=Memory; +replace table test (json JSON(a UInt32)) engine=Memory; +replace table test (json JSON(aaaaa UInt32)) engine=Memory; +replace table test (json JSON(`a b c d` UInt32)) engine=Memory; +replace table test (json JSON(a.b.c UInt32)) engine=Memory; +replace table test (json JSON(aaaa.b.cccc UInt32)) engine=Memory; +replace table test (json JSON(`some path`.`path some` UInt32)) engine=Memory; +replace table test (json JSON(a.b.c Tuple(d UInt32, e UInt32))) engine=Memory; +replace table test (json JSON(SKIP a)) engine=Memory; +replace table test (json JSON(SKIP aaaa)) engine=Memory; +replace table test (json JSON(SKIP `a b c d`)) engine=Memory; +replace table test (json JSON(SKIP a.b.c)) engine=Memory; +replace table test (json JSON(SKIP aaaa.b.cccc)) engine=Memory; +replace table test (json JSON(SKIP `some path`.`path some`)) engine=Memory; +replace table test (json JSON(SKIP REGEXP '.*a.*')) engine=Memory; +replace table test (json JSON(max_dynamic_paths=10, max_dynamic_types=10, a.b.c UInt32, b.c.d String, SKIP g.d.a, SKIP o.g.a, SKIP REGEXP '.*a.*', SKIP REGEXP 'abc')) engine=Memory; +drop table test; diff --git a/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference b/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference new file mode 100644 index 00000000000..fa6fed341ba --- /dev/null +++ b/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference @@ -0,0 +1,195 @@ +JSON with no arguments +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','e':'String'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {} +1 2020-01-01 {"e":{"f":["s1","s2"]}} +2 [1,2,3] {"e":{"g":"43"}} +3 \N {} +4 \N {} +5 ['b1','b2'] {"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}} +JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP '.*h.*') +{"a":{"b":{"c":1,"d":[false,true]}},"b":"2020-01-01"} +{"a":{"b":{"c":2,"d":[true,true]}},"b":["1","2","3"]} +{"a":{"b":{"c":3,"d":[true,true]}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":[true,true]}}} +{"a":{"b":{"c":5,"d":[true,true]}},"b":["b1","b2"]} +{'a.b':'Tuple(c UInt32, d Array(Bool))','b':'Date'} {'b':'Date'} {} +{'a.b':'Tuple(c UInt32, d Array(Bool))','b':'Array(Nullable(Int64))'} {'b':'Array(Nullable(Int64))'} {} +{'a.b':'Tuple(c UInt32, d Array(Bool))','e':'String'} {'e':'String'} {} +{'a.b':'Tuple(c UInt32, d Array(Bool))'} {} {} +{'a.b':'Tuple(c UInt32, d Array(Bool))','b':'Array(Nullable(String))'} {'b':'Array(Nullable(String))'} {} +JSON(a.b.c UInt32, max_dynamic_paths=2) +{"a":{"b":{"c":1,"d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.d':'Array(Nullable(Int64))','b':'Date'} {'c':'Int64','d.e.f':'Array(Nullable(String))'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))'} {'d.e.g':'Int64'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.d':'Array(Nullable(Int64))'} {'e':'String'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.d':'Array(Nullable(Int64))'} {'c':'Int64'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))'} {'d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} +JSON(a.b.c UInt32, max_dynamic_paths=0) +{"a":{"b":{"c":1,"d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {} {'a.b.d':'Array(Nullable(Int64))','e':'String'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} {'a.b.d':'Array(Nullable(Int64))','c':'Int64'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} +JSON(a.b.c UInt32, max_dynamic_types=1) +{"a":{"b":{"c":1,"d":"[0,1]"}},"b":"2020-01-01","c":"42","d":{"e":{"f":"[\\"s1\\",\\"s2\\"]"}}} +{"a":{"b":{"c":2,"d":"[2,3]"}},"b":"[1,2,3]","d":{"e":{"g":"43"}}} +{"a":{"b":{"c":3,"d":"[4,5]"}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":"[6,7]"}},"c":"43"} +{"a":{"b":{"c":5,"d":"[8,9]"}},"b":"[\\"b1\\",\\"b2\\"]","d":{"e":{"f":"[\\"s3\\",\\"s4\\"]","g":"44","h":"2020-02-02 10:00:00"}}} +{'a.b.c':'UInt32','a.b.d':'String','b':'String','c':'String','d.e.f':'String'} {'a.b.d':'String','b':'String','c':'String','d.e.f':'String'} {} +{'a.b.c':'UInt32','a.b.d':'String','b':'String','d.e.g':'String'} {'a.b.d':'String','b':'String','d.e.g':'String'} {} +{'a.b.c':'UInt32','a.b.d':'String','e':'String'} {'a.b.d':'String','e':'String'} {} +{'a.b.c':'UInt32','a.b.d':'String','c':'String'} {'a.b.d':'String','c':'String'} {} +{'a.b.c':'UInt32','a.b.d':'String','b':'String','d.e.f':'String','d.e.g':'String','d.e.h':'String'} {'a.b.d':'String','b':'String','d.e.f':'String','d.e.g':'String','d.e.h':'String'} {} +Test small max_read_buffer_size +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +Test PrettyJSONEachRow +{ + "json": { + "a" : { + "b" : { + "c" : "1", + "d" : [ + "0", + "1" + ] + } + }, + "b" : "2020-01-01", + "c" : "42", + "d" : { + "e" : { + "f" : [ + "s1", + "s2" + ] + } + } + } +} +{ + "json": { + "a" : { + "b" : { + "c" : "2", + "d" : [ + "2", + "3" + ] + } + }, + "b" : [ + "1", + "2", + "3" + ], + "d" : { + "e" : { + "g" : "43" + } + } + } +} +{ + "json": { + "a" : { + "b" : { + "c" : "3", + "d" : [ + "4", + "5" + ] + } + }, + "e" : "Hello, World!" + } +} +{ + "json": { + "a" : { + "b" : { + "c" : "4", + "d" : [ + "6", + "7" + ] + } + }, + "c" : "43" + } +} +{ + "json": { + "a" : { + "b" : { + "c" : "5", + "d" : [ + "8", + "9" + ] + } + }, + "b" : [ + "b1", + "b2" + ], + "d" : { + "e" : { + "f" : [ + "s3", + "s4" + ], + "g" : "44", + "h" : "2020-02-02 10:00:00.000000000" + } + } + } +} +Test TSV +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +Test CSV +"{""a"":{""b"":{""c"":""1"",""d"":[""0"",""1""]}},""b"":""2020-01-01"",""c"":""42"",""d"":{""e"":{""f"":[""s1"",""s2""]}}}" +"{""a"":{""b"":{""c"":""2"",""d"":[""2"",""3""]}},""b"":[""1"",""2"",""3""],""d"":{""e"":{""g"":""43""}}}" +"{""a"":{""b"":{""c"":""3"",""d"":[""4"",""5""]}},""e"":""Hello, World!""}" +"{""a"":{""b"":{""c"":""4"",""d"":[""6"",""7""]}},""c"":""43""}" +"{""a"":{""b"":{""c"":""5"",""d"":[""8"",""9""]}},""b"":[""b1"",""b2""],""d"":{""e"":{""f"":[""s3"",""s4""],""g"":""44"",""h"":""2020-02-02 10:00:00.000000000""}}}" diff --git a/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh b/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh new file mode 100755 index 00000000000..44bef4f0e95 --- /dev/null +++ b/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +DATA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME.json + +echo '{"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}}} +{"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}}} +{"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "e" : "Hello, World!"} +{"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43} +{"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : ["b1", "b2"], "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44, "h" : "2020-02-02 10:00:00"}}}' > $DATA_FILE + +echo "JSON with no arguments" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json.a.b.c, json.b, json.^d from file($DATA_FILE, JSONAsObject)" + +echo "JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP '.*h.*')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP \'.*h.*\')')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP \'.*h.*\')')" + +echo "JSON(a.b.c UInt32, max_dynamic_paths=2)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=2)')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=2)')" + +echo "JSON(a.b.c UInt32, max_dynamic_paths=0)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=0)')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=0)')" + +echo "JSON(a.b.c UInt32, max_dynamic_types=1)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_types=1)')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_types=1)')" + +echo "Test small max_read_buffer_size" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=1 -q "select json from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=2 -q "select json from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=3 -q "select json from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=4 -q "select json from file($DATA_FILE, JSONAsObject)" + +echo "Test PrettyJSONEachRow" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject) format PrettyJSONEachRow" + +echo "Test TSV" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, TSV, 'json JSON') format TSV" +echo "Test CSV" +echo '"{""a"" : {""b"" : {""c"" : 1, ""d"" : [0, 1]}}, ""b"" : ""2020-01-01"", ""c"" : 42, ""d"" : {""e"" : {""f"" : [""s1"", ""s2""]}}}" +"{""a"" : {""b"" : {""c"" : 2, ""d"" : [2, 3]}}, ""b"" : [1, 2, 3], ""c"" : null, ""d"" : {""e"" : {""g"" : 43}}}" +"{""a"" : {""b"" : {""c"" : 3, ""d"" : [4, 5]}}, ""e"" : ""Hello, World!""}" +"{""a"" : {""b"" : {""c"" : 4, ""d"" : [6, 7]}}, ""c"" : 43}" +"{""a"" : {""b"" : {""c"" : 5, ""d"" : [8, 9]}}, ""b"" : [""b1"", ""b2""], ""d"" : {""e"" : {""f"" : [""s3"", ""s4""], ""g"" : 44, ""h"" : ""2020-02-02 10:00:00""}}}"' > $DATA_FILE +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, CSV, 'json JSON') format CSV" + +rm $DATA_FILE diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1.reference new file mode 100644 index 00000000000..90a96b1bb2f --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1.reference @@ -0,0 +1,2080 @@ +Memory +insert +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +MergeTree compact +No merges +insert +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +With merges +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +MergeTree wide +No merges +insert +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +With merges +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_1.sh new file mode 100755 index 00000000000..3c63b513ad6 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1.sh @@ -0,0 +1,125 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_25\`, json.b.b.\`_25\`.:Int64, json.b.b.\`_25\`.:UUID, json.b.b.\`_26\`, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:UUID, json.b.b.\`_27\`, json.b.b.\`_27\`.:Int64, json.b.b.\`_27\`.:UUID, json.b.b.\`_28\`, json.b.b.\`_28\`.:Int64, json.b.b.\`_28\`.:UUID, json.b.b.\`_29\`, json.b.b.\`_29\`.:Int64, json.b.b.\`_29\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_25\`, json.b.b.\`_25\`.:Int64, json.b.b.\`_25\`.:UUID, json.b.b.\`_26\`, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:UUID, json.b.b.\`_27\`, json.b.b.\`_27\`.:Int64, json.b.b.\`_27\`.:UUID, json.b.b.\`_28\`, json.b.b.\`_28\`.:Int64, json.b.b.\`_28\`.:UUID, json.b.b.\`_29\`, json.b.b.\`_29\`.:Int64, json.b.b.\`_29\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.non.existing.path from test order by id format JSONColumns" + $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format JSONColumns" + $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.non.existing.path from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.a.b.c from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.a.b.c from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.b.b.e from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format JSONColumns" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.d.a, json.b.b.\`_26\` from test order by id format JSONColumns" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_26\` from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format JSONColumns" + $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" +} + +$CH_CLIENT -q "drop table if exists test;" + +echo "Memory" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory" +insert +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree compact" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree wide" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference new file mode 100644 index 00000000000..db7180cec75 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference @@ -0,0 +1,71 @@ +No merges +insert +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 +With merges +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh new file mode 100755 index 00000000000..128c525bf02 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" + $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.non.existing.path is Null" + $CH_CLIENT -q "select count() from test where json.non.existing.path.:String is Null" + $CH_CLIENT -q "select json.non.existing.path from test order by id format Null" + $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.a.b.c == 0" + $CH_CLIENT -q "select json.a.b.c from test format Null" + $CH_CLIENT -q "select json.a.b.c from test order by id format Null" + $CH_CLIENT -q "select json, json.a.b.c from test format Null" + $CH_CLIENT -q "select json, json.a.b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null" + $CH_CLIENT -q "select json.b.b.e from test format Null" + $CH_CLIENT -q "select json.b.b.e from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e from test format Null" + $CH_CLIENT -q "select json, json.b.b.e from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.a.b.d is Null " + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null" + $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`)" + $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" + $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.d.a is Null and json.d.b is Null" + $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" + $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.d.a is Null and json.b.b.\`_1\` is Null" + $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.b.b.\`_1\`.:Int64 is Null" + $CH_CLIENT -q "select json.d.a, json.b.b.\`_1\` from test order by id format Null" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_1\` from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0" + $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.c from test format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null" + $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" +} + +$CH_CLIENT -q "drop table if exists test;" + +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference new file mode 100644 index 00000000000..1bbd3926bdc --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference @@ -0,0 +1,35 @@ +insert +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh new file mode 100755 index 00000000000..bf6360a89a4 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" + $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.non.existing.path is Null" + $CH_CLIENT -q "select count() from test where json.non.existing.path.:String is Null" + $CH_CLIENT -q "select json.non.existing.path from test order by id format Null" + $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.a.b.c == 0" + $CH_CLIENT -q "select json.a.b.c from test format Null" + $CH_CLIENT -q "select json.a.b.c from test order by id format Null" + $CH_CLIENT -q "select json, json.a.b.c from test format Null" + $CH_CLIENT -q "select json, json.a.b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null" + $CH_CLIENT -q "select json.b.b.e from test format Null" + $CH_CLIENT -q "select json.b.b.e from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e from test format Null" + $CH_CLIENT -q "select json, json.b.b.e from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.a.b.d is Null " + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null" + $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`)" + $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" + $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.d.a is Null and json.d.b is Null" + $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" + $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.d.a is Null and json.b.b.\`_1\` is Null" + $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.b.b.\`_1\`.:Int64 is Null" + $CH_CLIENT -q "select json.d.a, json.b.b.\`_1\` from test order by id format Null" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_1\` from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0" + $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.c from test format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null" + $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" +} + +$CH_CLIENT -q "drop table if exists test;" + +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory" +insert +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference new file mode 100644 index 00000000000..db7180cec75 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference @@ -0,0 +1,71 @@ +No merges +insert +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 +With merges +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh new file mode 100755 index 00000000000..5e8af0c1f80 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" + $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.non.existing.path is Null" + $CH_CLIENT -q "select count() from test where json.non.existing.path.:String is Null" + $CH_CLIENT -q "select json.non.existing.path from test order by id format Null" + $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null" + $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.a.b.c == 0" + $CH_CLIENT -q "select json.a.b.c from test format Null" + $CH_CLIENT -q "select json.a.b.c from test order by id format Null" + $CH_CLIENT -q "select json, json.a.b.c from test format Null" + $CH_CLIENT -q "select json, json.a.b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null" + $CH_CLIENT -q "select json.b.b.e from test format Null" + $CH_CLIENT -q "select json.b.b.e from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e from test format Null" + $CH_CLIENT -q "select json, json.b.b.e from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.a.b.d is Null " + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null" + $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`)" + $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null" + $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" + $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.d.a is Null and json.d.b is Null" + $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" + $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where json.d.a is Null and json.b.b.\`_1\` is Null" + $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.b.b.\`_1\`.:Int64 is Null" + $CH_CLIENT -q "select json.d.a, json.b.b.\`_1\` from test order by id format Null" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_1\` from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test format Null" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" + + $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0" + $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.c from test format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null" + $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" + $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" +} + +$CH_CLIENT -q "drop table if exists test;" + +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference new file mode 100644 index 00000000000..ec8d1407bdb --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference @@ -0,0 +1,560 @@ +Memory +insert +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +MergeTree compact +No merges +insert +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +With merges +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +MergeTree wide +No merges +insert +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +With merges +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sh new file mode 100755 index 00000000000..9d8d02de7aa --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(5, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(15, 5)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format JSONColumns" + $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format JSONColumns" + $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format JSONColumns" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format JSONColumns" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format JSONColumns" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format JSONColumns" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format JSONColumns" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format JSONColumns" +} + +$CH_CLIENT -q "drop table if exists test;" + +echo "Memory" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=Memory" +insert +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree compact" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree wide" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference new file mode 100644 index 00000000000..9c092278ec7 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference @@ -0,0 +1,65 @@ +No merges +insert +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 +With merges +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh new file mode 100755 index 00000000000..8fa1b809390 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" + $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" + $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" + $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1)" + $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64)" + $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null" + $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null" + + $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" + + $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0)" + $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64)" + $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null" + $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null" + + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" +} + +$CH_CLIENT -q "drop table if exists test;" + +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference new file mode 100644 index 00000000000..63d10b1315f --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference @@ -0,0 +1,32 @@ +insert +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh new file mode 100755 index 00000000000..f7c0fdad1a6 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" + $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" + $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" + $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1)" + $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64)" + $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null" + $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null" + + $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" + + $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0)" + $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64)" + $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null" + $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null" + + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" +} + +$CH_CLIENT -q "drop table if exists test;" + +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=Memory" +insert +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference new file mode 100644 index 00000000000..9c092278ec7 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference @@ -0,0 +1,65 @@ +No merges +insert +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 +With merges +test +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh new file mode 100755 index 00000000000..3106702faf4 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" + $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" + $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" + $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" + + $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1)" + $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64)" + $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null" + $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null" + + $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" + + $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0)" + $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64)" + $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null" + $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null" + + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" + $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" +} + +$CH_CLIENT -q "drop table if exists test;" + +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference new file mode 100644 index 00000000000..203b59521f4 --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference @@ -0,0 +1,100 @@ +MergeTree compact +test +Dynamic paths +100000 a +90000 b +80000 c +70000 d +60000 e +Shared data paths +Dynamic paths +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +10000 g +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +10000 g +MergeTree wide +test +Dynamic paths +100000 a +90000 b +80000 c +70000 d +60000 e +Shared data paths +Dynamic paths +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +10000 g +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +10000 g diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sh b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sh new file mode 100755 index 00000000000..cb937eeb71a --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1" + +function test() +{ + echo "test" + $CH_CLIENT -q "system stop merges test" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a', number)) from numbers(100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b', number)) from numbers(90000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('c', number)) from numbers(80000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('d', number)) from numbers(70000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('e', number)) from numbers(60000)" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" + + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + $CH_CLIENT -nm -q "system start merges test; optimize table test final;" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + + $CH_CLIENT -q "system stop merges test" + $CH_CLIENT -q "insert into test select number, toJSONString(map('f', number)) from numbers(200000)" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + $CH_CLIENT -nm -q "system start merges test; optimize table test final;" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + + $CH_CLIENT -q "system stop merges test" + $CH_CLIENT -q "insert into test select number, toJSONString(map('g', number)) from numbers(10000)" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + $CH_CLIENT -nm -q "system start merges test; optimize table test final;" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" +} + +$CH_CLIENT -q "drop table if exists test;" + +echo "MergeTree compact" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree wide" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" +test +$CH_CLIENT -q "drop table test;" + diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.reference b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference new file mode 100644 index 00000000000..203b59521f4 --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference @@ -0,0 +1,100 @@ +MergeTree compact +test +Dynamic paths +100000 a +90000 b +80000 c +70000 d +60000 e +Shared data paths +Dynamic paths +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +10000 g +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +10000 g +MergeTree wide +test +Dynamic paths +100000 a +90000 b +80000 c +70000 d +60000 e +Shared data paths +Dynamic paths +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +10000 g +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +10000 g diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.sh b/tests/queries/0_stateless/03209_json_type_vertical_merges.sh new file mode 100755 index 00000000000..df0bd5bbbdd --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1" + +function test() +{ + echo "test" + $CH_CLIENT -q "system stop merges test" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a', number)) from numbers(100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b', number)) from numbers(90000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('c', number)) from numbers(80000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('d', number)) from numbers(70000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('e', number)) from numbers(60000)" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" + + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + $CH_CLIENT -nm -q "system start merges test; optimize table test final;" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + + $CH_CLIENT -q "system stop merges test" + $CH_CLIENT -q "insert into test select number, toJSONString(map('f', number)) from numbers(200000)" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + $CH_CLIENT -nm -q "system start merges test; optimize table test final;" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + + $CH_CLIENT -q "system stop merges test" + $CH_CLIENT -q "insert into test select number, toJSONString(map('g', number)) from numbers(10000)" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" + $CH_CLIENT -nm -q "system start merges test; optimize table test final;" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" +} + +$CH_CLIENT -q "drop table if exists test;" + +echo "MergeTree compact" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree wide" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" +test +$CH_CLIENT -q "drop table test;" + diff --git a/tests/queries/0_stateless/03210_json_type_alter_add_column.reference b/tests/queries/0_stateless/03210_json_type_alter_add_column.reference new file mode 100644 index 00000000000..907cde709f3 --- /dev/null +++ b/tests/queries/0_stateless/03210_json_type_alter_add_column.reference @@ -0,0 +1,75 @@ +Memory +initial insert +alter add column 1 +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +insert after alter add column +3 a.b +3 b.c +3 c.d +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +3 {"a":{"b":"3"}} 3 {"b":"3"} \N \N +4 {"a":{"b":"4"}} 4 {"b":"4"} \N \N +5 {"a":{"b":"5"}} 5 {"b":"5"} \N \N +6 {"b":{"c":"6"}} \N {} 6 \N +7 {"b":{"c":"7"}} \N {} 7 \N +8 {"b":{"c":"8"}} \N {} 8 \N +9 {"c":{"d":"9"}} \N {} \N 9 +10 {"c":{"d":"10"}} \N {} \N 10 +11 {"c":{"d":"11"}} \N {} \N 11 +12 {} \N {} \N \N +13 {} \N {} \N \N +14 {} \N {} \N \N +MergeTree compact +initial insert +alter add column 1 +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +insert after alter add column +3 a.b +3 b.c +3 c.d +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +3 {"a":{"b":"3"}} 3 {"b":"3"} \N \N +4 {"a":{"b":"4"}} 4 {"b":"4"} \N \N +5 {"a":{"b":"5"}} 5 {"b":"5"} \N \N +6 {"b":{"c":"6"}} \N {} 6 \N +7 {"b":{"c":"7"}} \N {} 7 \N +8 {"b":{"c":"8"}} \N {} 8 \N +9 {"c":{"d":"9"}} \N {} \N 9 +10 {"c":{"d":"10"}} \N {} \N 10 +11 {"c":{"d":"11"}} \N {} \N 11 +12 {} \N {} \N \N +13 {} \N {} \N \N +14 {} \N {} \N \N +MergeTree wide +initial insert +alter add column 1 +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +insert after alter add column +3 a.b +3 b.c +3 c.d +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +3 {"a":{"b":"3"}} 3 {"b":"3"} \N \N +4 {"a":{"b":"4"}} 4 {"b":"4"} \N \N +5 {"a":{"b":"5"}} 5 {"b":"5"} \N \N +6 {"b":{"c":"6"}} \N {} 6 \N +7 {"b":{"c":"7"}} \N {} 7 \N +8 {"b":{"c":"8"}} \N {} 8 \N +9 {"c":{"d":"9"}} \N {} \N 9 +10 {"c":{"d":"10"}} \N {} \N 10 +11 {"c":{"d":"11"}} \N {} \N 11 +12 {} \N {} \N \N +13 {} \N {} \N \N +14 {} \N {} \N \N diff --git a/tests/queries/0_stateless/03210_json_type_alter_add_column.sh b/tests/queries/0_stateless/03210_json_type_alter_add_column.sh new file mode 100755 index 00000000000..dfa5de9f091 --- /dev/null +++ b/tests/queries/0_stateless/03210_json_type_alter_add_column.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" + +function run() +{ + echo "initial insert" + $CH_CLIENT -q "insert into test select number from numbers(3)" + + echo "alter add column 1" + $CH_CLIENT -q "alter table test add column json JSON settings mutations_sync=1" + $CH_CLIENT -q "select count(), arrayJoin(JSONAllPaths(json)) as path from test group by path order by count() desc, path" + $CH_CLIENT -q "select x, json, json.a.b, json.^a, json.b.c.:Int64, json.c.d from test order by x" + + echo "insert after alter add column" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', number::UInt32)) from numbers(3, 3)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b.c', number::UInt32)) from numbers(6, 3)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('c.d', number::UInt32)) from numbers(9, 3)" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(12, 3)" + $CH_CLIENT -q "select count(), arrayJoin(JSONAllPaths(json)) as path from test group by path order by count() desc, path" + $CH_CLIENT -q "select x, json, json.a.b, json.^a, json.b.c.:Int64, json.c.d from test order by x" +} + +$CH_CLIENT -q "drop table if exists test;" + +echo "Memory" +$CH_CLIENT -q "create table test (x UInt64) engine=Memory" +run +$CH_CLIENT -q "drop table test;" + +echo "MergeTree compact" +$CH_CLIENT -q "create table test (x UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;" +run +$CH_CLIENT -q "drop table test;" + +echo "MergeTree wide" +$CH_CLIENT -q "create table test (x UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +run +$CH_CLIENT -q "drop table test;" + diff --git a/tests/queries/0_stateless/03211_nested_json_merges.reference b/tests/queries/0_stateless/03211_nested_json_merges.reference new file mode 100644 index 00000000000..221fcefba66 --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges.reference @@ -0,0 +1,88 @@ +MergeTree compact + horizontal merge +test +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e +MergeTree wide + horizontal merge +test +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e +MergeTree compact + vertical merge +test +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e +MergeTree wide + vertical merge +test +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e diff --git a/tests/queries/0_stateless/03211_nested_json_merges.sh b/tests/queries/0_stateless/03211_nested_json_merges.sh new file mode 100755 index 00000000000..8e6be01f3fc --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1" + + +function test() +{ + echo "test" + $CH_CLIENT -q "system stop merges test" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a', number)) from numbers(100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(100000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b', arrayMap(x -> map('d', x), range(number % 5 + 1)))) from numbers(50000)" + + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" + $CH_CLIENT -nm -q "system start merges test; optimize table test final;" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" + + $CH_CLIENT -q "insert into test select number, toJSONString(map('b', arrayMap(x -> map('e', x), range(number % 5 + 1)))) from numbers(50000)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b', arrayMap(x -> map('f', x), range(number % 5 + 1)))) from numbers(200000)" + + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" + $CH_CLIENT -nm -q "system start merges test; optimize table test final;" + echo "Dynamic paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" + echo "Shared data paths" + $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" +} + +$CH_CLIENT -q "drop table if exists test;" + +echo "MergeTree compact + horizontal merge" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree wide + horizontal merge" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree compact + vertical merge" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" +test +$CH_CLIENT -q "drop table test;" + +echo "MergeTree wide + vertical merge" +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" +test +$CH_CLIENT -q "drop table test;" From 184405fc7aa60a3c2626475ac4ff9fdbc0e120ae Mon Sep 17 00:00:00 2001 From: avogar Date: Sat, 20 Jul 2024 19:10:08 +0000 Subject: [PATCH 0576/2361] Fix build and style --- src/DataTypes/DataTypeObject.cpp | 2 +- src/DataTypes/DataTypeObject.h | 5 ----- src/DataTypes/DataTypeObjectDeprecated.cpp | 2 +- src/Functions/JSONPaths.cpp | 4 ++-- utils/check-style/aspell-ignore/en/aspell-dict.txt | 6 ++++++ 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index bae1d1935b5..c9378ab395c 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -478,7 +478,7 @@ static DataTypePtr createJSON(const ASTPtr & arguments) void registerDataTypeJSON(DataTypeFactory & factory) { if (!Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type) - factory.registerDataType("JSON", createJSON, DataTypeFactory::CaseInsensitive); + factory.registerDataType("JSON", createJSON, DataTypeFactory::Case::Insensitive); } } diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 2984078370b..5b76d96e0de 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -10,11 +10,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - class DataTypeObject : public IDataType { public: diff --git a/src/DataTypes/DataTypeObjectDeprecated.cpp b/src/DataTypes/DataTypeObjectDeprecated.cpp index e3c1a458eda..a34fe8a628d 100644 --- a/src/DataTypes/DataTypeObjectDeprecated.cpp +++ b/src/DataTypes/DataTypeObjectDeprecated.cpp @@ -80,7 +80,7 @@ void registerDataTypeObjectDeprecated(DataTypeFactory & factory) if (Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type) factory.registerSimpleDataType("JSON", [] { return std::make_shared("JSON", false); }, - DataTypeFactory::CaseInsensitive); + DataTypeFactory::Case::Insensitive); } } diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp index 0bf6b9ffe12..31699ad1c9c 100644 --- a/src/Functions/JSONPaths.cpp +++ b/src/Functions/JSONPaths.cpp @@ -96,7 +96,7 @@ public: DataTypePtr getReturnTypeImpl(const DataTypes & data_types) const override { - if (data_types.size() != 1 ) + if (data_types.size() != 1) throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires single argument with type JSON", getName()); if (data_types[0]->getTypeId() != TypeIndex::Object) @@ -152,7 +152,7 @@ private: for (const auto & path : dynamic_paths) { /// Don't include path if it contains NULL, because we consider - /// it to be equivalent to the absense of this path in this row. + /// it to be equivalent to the absence of this path in this row. if (!dynamic_path_columns.at(path)->isNullAt(i)) data.insertData(path.data(), path.size()); } diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 862f38976ce..723e10ff3ed 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -390,6 +390,8 @@ IsValid JBOD JOINed JOINs +JSONAllPaths +JSONAllPathsWithTypes JSONArrayLength JSONAsObject JSONAsString @@ -404,6 +406,8 @@ JSONCompactStrings JSONCompactStringsEachRow JSONCompactStringsEachRowWithNames JSONCompactStringsEachRowWithNamesAndTypes +JSONDynamicPaths +JSONDynamicPathsWithTypes JSONEachRow JSONEachRowWithProgress JSONExtract @@ -423,6 +427,8 @@ JSONObjectEachRow JSONStrings JSONStringsEachRow JSONStringsEachRowWithProgress +JSONSharedDataPaths +JSONSharedDataPathsWithTypes JSONType JSONs Jaeger From c786e6eb49152d7992ee3ee5c76300b7e4712309 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 21:27:35 +0200 Subject: [PATCH 0577/2361] Fix tests --- tests/queries/0_stateless/01338_long_select_and_alter.reference | 2 +- .../0_stateless/01338_long_select_and_alter_zookeeper.reference | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01338_long_select_and_alter.reference b/tests/queries/0_stateless/01338_long_select_and_alter.reference index 276d6bcc29d..027109252e1 100644 --- a/tests/queries/0_stateless/01338_long_select_and_alter.reference +++ b/tests/queries/0_stateless/01338_long_select_and_alter.reference @@ -1,3 +1,3 @@ 5 5 -CREATE TABLE default.alter_mt\n(\n `key` UInt64,\n `value` UInt64\n)\nENGINE = MergeTree\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_mt\n(\n `key` Int64,\n `value` Int64\n)\nENGINE = MergeTree\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference index aab1b93f6bd..65e638bc3a4 100644 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.reference @@ -1,3 +1,3 @@ 5 5 -CREATE TABLE default.alter_mt\n(\n `key` UInt64,\n `value` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01338_long_select_and_alter_zookeeper_default/alter_mt\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_mt\n(\n `key` Int64,\n `value` Int64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/01338_long_select_and_alter_zookeeper_default/alter_mt\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 From ee3c0e7e1f37a4e9388866145e9cbb8f0220e42c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 21:50:53 +0200 Subject: [PATCH 0578/2361] Better diagnostics in `test_disk_configuration` --- .../test_disk_configuration/test.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_disk_configuration/test.py b/tests/integration/test_disk_configuration/test.py index c003ff85755..afc5303298c 100644 --- a/tests/integration/test_disk_configuration/test.py +++ b/tests/integration/test_disk_configuration/test.py @@ -208,13 +208,21 @@ def test_merge_tree_custom_disk_setting(start_cluster): secret_access_key='minio123'); """ ) - count = len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + + list1 = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)) + count1 = len(list1) + node1.query(f"INSERT INTO {TABLE_NAME}_3 SELECT number FROM numbers(100)") assert int(node1.query(f"SELECT count() FROM {TABLE_NAME}_3")) == 100 - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == count - ) + + list2 = list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)) + count2 = len(list2) + + if count1 != count2: + print("list1: ", list1) + print("list2: ", list2) + + assert count1 == count2 assert ( len(list(minio.list_objects(cluster.minio_bucket, "data2/", recursive=True))) > 0 From 6e7b5e74589350592a8ee163fc51352d12170db1 Mon Sep 17 00:00:00 2001 From: avogar Date: Sat, 20 Jul 2024 21:03:52 +0000 Subject: [PATCH 0579/2361] Fix style --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 723e10ff3ed..ec09eb2e758 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -410,6 +410,7 @@ JSONDynamicPaths JSONDynamicPathsWithTypes JSONEachRow JSONEachRowWithProgress +JSONEmpty JSONExtract JSONExtractArrayRaw JSONExtractBool From f9b97aac84d6ab5f377261e5b57c2d6e8a432cef Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 20 Jul 2024 23:19:33 +0200 Subject: [PATCH 0580/2361] Fix bad test `02950_part_log_bytes_uncompressed` --- .../0_stateless/02950_part_log_bytes_uncompressed.sql | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql b/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql index 248475ab84b..cfed02eaeeb 100644 --- a/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql +++ b/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql @@ -1,3 +1,6 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings +-- Because we compare part sizes, and they could be affected by index granularity and index compression settings. + CREATE TABLE part_log_bytes_uncompressed ( key UInt8, value UInt8 @@ -17,7 +20,8 @@ ALTER TABLE part_log_bytes_uncompressed DROP PART 'all_4_4_0' SETTINGS mutations SYSTEM FLUSH LOGS; -SELECT event_type, table, part_name, bytes_uncompressed > 0, size_in_bytes < bytes_uncompressed FROM system.part_log +SELECT event_type, table, part_name, bytes_uncompressed > 0, size_in_bytes < bytes_uncompressed ? '1' : toString((size_in_bytes, bytes_uncompressed)) +FROM system.part_log WHERE event_date >= yesterday() AND database = currentDatabase() AND table = 'part_log_bytes_uncompressed' AND (event_type != 'RemovePart' OR part_name = 'all_4_4_0') -- ignore removal of other parts ORDER BY part_name, event_type; From 1ce13df07c1292051986e75dcb216488f7bbae35 Mon Sep 17 00:00:00 2001 From: avogar Date: Sat, 20 Jul 2024 21:47:45 +0000 Subject: [PATCH 0581/2361] Fix build --- src/Columns/ColumnObject.cpp | 6 ------ src/Columns/ColumnObjectDeprecated.cpp | 7 +++++++ src/Columns/ColumnObjectDeprecated.h | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index c4190a1b756..f9a3af601e9 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1197,10 +1197,4 @@ void ColumnObject::fillPathColumnFromSharedData(IColumn & path_column, StringRef } } -void ColumnObject::updateHashFast(SipHash & hash) const -{ - for (const auto & entry : subcolumns) - for (auto & part : entry->data.data) - part->updateHashFast(hash); -} } diff --git a/src/Columns/ColumnObjectDeprecated.cpp b/src/Columns/ColumnObjectDeprecated.cpp index 42f088503f7..6d0bfa0e05e 100644 --- a/src/Columns/ColumnObjectDeprecated.cpp +++ b/src/Columns/ColumnObjectDeprecated.cpp @@ -1101,4 +1101,11 @@ void ColumnObjectDeprecated::finalize() checkObjectHasNoAmbiguosPaths(getKeys()); } +void ColumnObjectDeprecated::updateHashFast(SipHash & hash) const +{ + for (const auto & entry : subcolumns) + for (auto & part : entry->data.data) + part->updateHashFast(hash); +} + } diff --git a/src/Columns/ColumnObjectDeprecated.h b/src/Columns/ColumnObjectDeprecated.h index bae32b82e8a..59614632972 100644 --- a/src/Columns/ColumnObjectDeprecated.h +++ b/src/Columns/ColumnObjectDeprecated.h @@ -251,7 +251,7 @@ public: const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); } void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); } void updateWeakHash32(WeakHash32 &) const override { throwMustBeConcrete(); } - void updateHashFast(SipHash &) const override { throwMustBeConcrete(); } + void updateHashFast(SipHash &) const override; void expand(const Filter &, bool) override { throwMustBeConcrete(); } bool hasEqualValues() const override { throwMustBeConcrete(); } size_t byteSizeAt(size_t) const override { throwMustBeConcrete(); } From a296717e14bf43714ab921f8f5f97105104f3055 Mon Sep 17 00:00:00 2001 From: avogar Date: Sat, 20 Jul 2024 22:57:42 +0000 Subject: [PATCH 0582/2361] Fix tests, add docs for setting type_json_skip_duplicated_paths --- docs/en/operations/settings/settings.md | 8 +++++++- src/Core/SettingsChangesHistory.cpp | 1 + .../Serializations/SerializationObject.cpp | 6 +++--- src/Formats/JSONExtractTree.cpp | 6 +++--- src/Formats/SchemaInferenceUtils.cpp | 9 ++++++++- .../02910_object-json-crash-add-column.sql | 16 ++++++++-------- 6 files changed, 30 insertions(+), 16 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 9ddfe985cee..720cf1ce959 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5611,7 +5611,13 @@ Default value: `1GiB`. ## use_json_alias_for_old_object_type -When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/ob) type instead of the new [JSON](../../sql-reference/data-types/json.md) type. +When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/object-json.md) type instead of the new [JSON](../../sql-reference/data-types/json.md) type. This setting requires server restart to take effect when changed. Default value: `false`. + +## type_json_skip_duplicated_paths + +When enabled, ClickHouse will skip duplicated paths during parsing of [JSON](../../sql-reference/data-types/json.md) object. Only the value of the first occurrence of each path will be inserted. + +Default value: `false` diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 50a99c51dcb..47555ec290f 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -83,6 +83,7 @@ static std::initializer_listsize() > prev_size) { if (!settings.json.type_json_skip_duplicated_paths) - throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of Object type: {}", path); + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of JSON type: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", path); } else { @@ -613,7 +613,7 @@ void SerializationObject::deserializeBinary(IColumn & col, ReadBuffer & istr, co if (dynamic_it->second->size() > prev_size) { if (!settings.json.type_json_skip_duplicated_paths) - throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of Object type: {}", path); + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of JSON type: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", path); } dynamic_serialization->deserializeBinary(*dynamic_it->second, istr, settings); @@ -648,7 +648,7 @@ void SerializationObject::deserializeBinary(IColumn & col, ReadBuffer & istr, co if (i != 0 && path == paths_and_values_for_shared_data[i - 1].first) { if (!settings.json.type_json_skip_duplicated_paths) - throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of Object type: {}", path); + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of JSON type: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", path); } else { diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index e2c127b228d..680588c7825 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1629,7 +1629,7 @@ public: { if (!format_settings.json.type_json_skip_duplicated_paths) { - error = fmt::format("Duplicate path found during parsing JSON object: {}", path); + error = fmt::format("Duplicate path found during parsing JSON object: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", path); SerializationObject::restoreColumnObject(column_object, prev_size); return false; } @@ -1697,7 +1697,7 @@ private: { if (!format_settings.json.type_json_skip_duplicated_paths) { - error = fmt::format("Duplicate path found during parsing JSON object: {}", current_path); + error = fmt::format("Duplicate path found during parsing JSON object: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", current_path); return false; } } @@ -1715,7 +1715,7 @@ private: { if (!format_settings.json.type_json_skip_duplicated_paths) { - error = fmt::format("Duplicate path found during parsing JSON object: {}", current_path); + error = fmt::format("Duplicate path found during parsing JSON object: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", current_path); return false; } } diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index d245200eefa..441e7d77d24 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -780,11 +780,18 @@ namespace /// Check if it's just a number, and if so, don't try to infer DateTime from it, /// because we can interpret this number as a timestamp and it will lead to - /// inferring DateTime instead of simple Int64/Float64 in some cases. + /// inferring DateTime instead of simple Int64 in some cases. if (std::all_of(field.begin(), field.end(), isNumericASCII)) return false; ReadBufferFromString buf(field); + Float64 tmp_float; + /// Check if it's a float value, and if so, don't try to infer DateTime from it, + /// because it will lead to inferring DateTime instead of simple Float64 in some cases. + if (tryReadFloatText(tmp_float, buf) && buf.eof()) + return false; + + buf.seek(0, SEEK_SET); /// Return position to the beginning DateTime64 tmp; switch (settings.date_time_input_format) { diff --git a/tests/queries/0_stateless/02910_object-json-crash-add-column.sql b/tests/queries/0_stateless/02910_object-json-crash-add-column.sql index b2d64be1676..0573ac928f8 100644 --- a/tests/queries/0_stateless/02910_object-json-crash-add-column.sql +++ b/tests/queries/0_stateless/02910_object-json-crash-add-column.sql @@ -9,10 +9,10 @@ ORDER BY i; INSERT INTO test02910 (i, jString) SELECT 1, '{"a":"123"}'; -ALTER TABLE test02910 ADD COLUMN j2 Tuple(JSON) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910 ADD COLUMN j2 Tuple(Float64, JSON); -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910 ADD COLUMN j2 Tuple(Array(Tuple(JSON))) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910 ADD COLUMN j2 JSON default jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910 ADD COLUMN j2 Tuple(Object('json')) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910 ADD COLUMN j2 Tuple(Float64, Object('json')); -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910 ADD COLUMN j2 Tuple(Array(Tuple(Object('json')))) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910 ADD COLUMN j2 Object('json') default jString; -- { serverError SUPPORT_IS_DISABLED } -- If we would allow adding a column with dynamic subcolumns the subsequent select would crash the server. -- SELECT * FROM test02910; @@ -37,10 +37,10 @@ INSERT INTO test02910_second SELECT number, number, '2023-10-28 11:11:11.11111', INSERT INTO test02910_second SELECT number, number, '2023-10-28 11:11:11.11111', ['c', 'd'] FROM numbers(10); INSERT INTO test02910_second SELECT number, number, '2023-10-28 11:11:11.11111', [] FROM numbers(10); -ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(JSON) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Float64, JSON); -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Array(Tuple(JSON))) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910_second ADD COLUMN `tags_json` JSON; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Object('json')) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Float64, Object('json')); -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Array(Tuple(Object('json')))) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910_second ADD COLUMN `tags_json` Object('json'); -- { serverError SUPPORT_IS_DISABLED } -- If we would allow adding a column with dynamic subcolumns the subsequent select would crash the server. -- SELECT * FROM test02910; From 67f51153661f37370f4e365ed3f19c6a4b84c2f4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 04:16:31 +0200 Subject: [PATCH 0583/2361] Update 02950_part_log_bytes_uncompressed.sql --- tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql b/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql index cfed02eaeeb..24425062116 100644 --- a/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql +++ b/tests/queries/0_stateless/02950_part_log_bytes_uncompressed.sql @@ -20,7 +20,7 @@ ALTER TABLE part_log_bytes_uncompressed DROP PART 'all_4_4_0' SETTINGS mutations SYSTEM FLUSH LOGS; -SELECT event_type, table, part_name, bytes_uncompressed > 0, size_in_bytes < bytes_uncompressed ? '1' : toString((size_in_bytes, bytes_uncompressed)) +SELECT event_type, table, part_name, bytes_uncompressed > 0, (bytes_uncompressed > 0 ? (size_in_bytes < bytes_uncompressed ? '1' : toString((size_in_bytes, bytes_uncompressed))) : '0') FROM system.part_log WHERE event_date >= yesterday() AND database = currentDatabase() AND table = 'part_log_bytes_uncompressed' AND (event_type != 'RemovePart' OR part_name = 'all_4_4_0') -- ignore removal of other parts From f1021b70f7f79fc6f921989573435ab5df406bdc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 07:05:09 +0200 Subject: [PATCH 0584/2361] Better diagnostics for test trace_events_stress --- src/Common/TraceSender.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/Common/TraceSender.cpp b/src/Common/TraceSender.cpp index 91d07367a82..064da1b3d76 100644 --- a/src/Common/TraceSender.cpp +++ b/src/Common/TraceSender.cpp @@ -23,8 +23,15 @@ namespace DB LazyPipeFDs TraceSender::pipe; +static thread_local bool inside_send = false; void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Extras extras) { + DENY_ALLOCATIONS_IN_SCOPE; + + if (unlikely(inside_send)) + abort(); /// The method shouldn't be called recursively or throw exceptions. + inside_send = true; + constexpr size_t buf_size = sizeof(char) /// TraceCollector stop flag + sizeof(UInt8) /// String size + QUERY_ID_MAX_LEN /// Maximum query_id length @@ -80,6 +87,8 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Ext writePODBinary(extras.increment, out); out.next(); + + inside_send = false; } } From 6437088d81200909245172c441de3bf301f553b0 Mon Sep 17 00:00:00 2001 From: Mikhail Date: Sun, 21 Jul 2024 12:52:26 +0700 Subject: [PATCH 0585/2361] translate playground page to russian in ru locale --- docs/ru/getting-started/playground.md | 40 +++++++++++++-------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/ru/getting-started/playground.md b/docs/ru/getting-started/playground.md index c8c987eec9e..eb990c6071e 100644 --- a/docs/ru/getting-started/playground.md +++ b/docs/ru/getting-started/playground.md @@ -1,43 +1,43 @@ --- slug: /ru/getting-started/playground sidebar_position: 14 -sidebar_label: Playground +sidebar_label: ПеÑочница --- -# ClickHouse Playground {#clickhouse-playground} +# ПеÑочница ClickHouse {#clickhouse-playground} -[ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster. -Several example datasets are available in Playground. +[ПеÑочница ClickHouse](https://play.clickhouse.com/play?user=play) позволÑет пользователÑм ÑкÑпериментировать Ñ ClickHouse, выполнÑÑ Ð·Ð°Ð¿Ñ€Ð¾ÑÑ‹ мгновенно, без необходимоÑти наÑтройки Ñервера или клаÑтера. +Ð’ ПеÑочнице доÑтупны неÑколько примеров наборов данных. -You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces/index.md). +Ð’Ñ‹ можете выполнÑÑ‚ÑŒ запроÑÑ‹ к ПеÑочнице, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð»ÑŽÐ±Ð¾Ð¹ HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или наÑтроить Ñоединение, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð´Ñ€Ð°Ð¹Ð²ÐµÑ€Ñ‹ [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здеÑÑŒ](../interfaces/index.md). -## Credentials {#credentials} +## Учетные данные {#credentials} -| Parameter | Value | +| Параметр | Значение | |:--------------------|:-----------------------------------| -| HTTPS endpoint | `https://play.clickhouse.com:443/` | -| Native TCP endpoint | `play.clickhouse.com:9440` | -| User | `explorer` or `play` | -| Password | (empty) | +| HTTPS-Ð°Ð´Ñ€ÐµÑ | `https://play.clickhouse.com:443/` | +| TCP-Ð°Ð´Ñ€ÐµÑ | `play.clickhouse.com:9440` | +| Пользователь | `explorer` или `play` | +| Пароль | (пуÑто) | -## Limitations {#limitations} +## ÐžÐ³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ {#limitations} -The queries are executed as a read-only user. It implies some limitations: +ЗапроÑÑ‹ выполнÑÑŽÑ‚ÑÑ Ð¾Ñ‚ имени Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñ Ð¿Ñ€Ð°Ð²Ð°Ð¼Ð¸ только на чтение. Это предполагает некоторые ограничениÑ: -- DDL queries are not allowed -- INSERT queries are not allowed +- DDL-запроÑÑ‹ не разрешены +- INSERT-запроÑÑ‹ не разрешены -The service also have quotas on its usage. +Ð¡ÐµÑ€Ð²Ð¸Ñ Ñ‚Ð°ÐºÐ¶Ðµ имеет квоты на иÑпользование. -## Examples {#examples} +## Примеры {#examples} -HTTPS endpoint example with `curl`: +Пример иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ HTTPS-адреÑа Ñ `curl`: -``` bash +```bash curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'" ``` -TCP endpoint example with [CLI](../interfaces/cli.md): +Пример иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ TCP-адреÑа Ñ [CLI](../interfaces/cli.md): ``` bash clickhouse client --secure --host play.clickhouse.com --user explorer From 4e1afb9c4d30ffb217fe3ee1ced586eefeffaa76 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 08:16:27 +0200 Subject: [PATCH 0586/2361] Fix bad test `01042_system_reload_dictionary_reloads_completely` --- ...ad_dictionary_reloads_completely.reference | 6 ++--- ...em_reload_dictionary_reloads_completely.sh | 22 ++++++++++++------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference index f12dcd8258a..10bc7981d3e 100644 --- a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference +++ b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.reference @@ -1,6 +1,6 @@ 12 -> 102 13 -> 103 14 -> -1 -12(r) -> 102 -13(r) -> 103 -14(r) -> 104 +12 (after reloading) -> 102 +13 (after reloading) -> 103 +14 (after reloading) -> 104 diff --git a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh index 2b075566ac3..42488ca946c 100755 --- a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh +++ b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash +# Tags: no-random-settings +# Dictionaries are updated using the server time. CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -6,8 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -o pipefail -# NOTE: dictionaries TTLs works with server timezone, so session_timeout cannot be used -$CLICKHOUSE_CLIENT --session_timezone '' --multiquery < ', dictGetInt64('${CLICKHOUSE_DATABASE $CLICKHOUSE_CLIENT --query "INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (13, 103, now())" $CLICKHOUSE_CLIENT --query "INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (14, 104, now() - INTERVAL 1 DAY)" +# Wait when the dictionary will update the value for 13 on its own: while [ "$(${CLICKHOUSE_CLIENT} --query "SELECT dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))")" = -1 ] - do - sleep 0.5 - done +do + sleep 0.5 +done $CLICKHOUSE_CLIENT --query "SELECT '13 -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))" + +# By the way, the value for 14 is expected to not be updated at this moment, +# because the values were selected by the update field insert_time, and for 14 it was set as one day ago. $CLICKHOUSE_CLIENT --query "SELECT '14 -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(14))" +# SYSTEM RELOAD DICTIONARY reloads it completely, regardless of the update field, so we will see new values, even for key 14. $CLICKHOUSE_CLIENT --query "SYSTEM RELOAD DICTIONARY '${CLICKHOUSE_DATABASE}.dict'" -$CLICKHOUSE_CLIENT --query "SELECT '12(r) -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(12))" -$CLICKHOUSE_CLIENT --query "SELECT '13(r) -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))" -$CLICKHOUSE_CLIENT --query "SELECT '14(r) -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(14))" +$CLICKHOUSE_CLIENT --query "SELECT '12 (after reloading) -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(12))" +$CLICKHOUSE_CLIENT --query "SELECT '13 (after reloading) -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))" +$CLICKHOUSE_CLIENT --query "SELECT '14 (after reloading) -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(14))" From e74892bfaf4aa98654a6253a34bbeb3f1740de41 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 08:21:09 +0200 Subject: [PATCH 0587/2361] Update playground.md --- docs/ru/getting-started/playground.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/ru/getting-started/playground.md b/docs/ru/getting-started/playground.md index eb990c6071e..a2d5498fb9a 100644 --- a/docs/ru/getting-started/playground.md +++ b/docs/ru/getting-started/playground.md @@ -1,15 +1,15 @@ --- slug: /ru/getting-started/playground sidebar_position: 14 -sidebar_label: ПеÑочница +sidebar_label: Playground --- -# ПеÑочница ClickHouse {#clickhouse-playground} +# ClickHouse Playground {#clickhouse-playground} -[ПеÑочница ClickHouse](https://play.clickhouse.com/play?user=play) позволÑет пользователÑм ÑкÑпериментировать Ñ ClickHouse, выполнÑÑ Ð·Ð°Ð¿Ñ€Ð¾ÑÑ‹ мгновенно, без необходимоÑти наÑтройки Ñервера или клаÑтера. -Ð’ ПеÑочнице доÑтупны неÑколько примеров наборов данных. +[ClickHouse Playground](https://play.clickhouse.com/play?user=play) позволÑет пользователÑм ÑкÑпериментировать Ñ ClickHouse, выполнÑÑ Ð·Ð°Ð¿Ñ€Ð¾ÑÑ‹ мгновенно, без необходимоÑти наÑтройки Ñервера или клаÑтера. +Ð’ Playground доÑтупны неÑколько примеров наборов данных. -Ð’Ñ‹ можете выполнÑÑ‚ÑŒ запроÑÑ‹ к ПеÑочнице, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð»ÑŽÐ±Ð¾Ð¹ HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или наÑтроить Ñоединение, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð´Ñ€Ð°Ð¹Ð²ÐµÑ€Ñ‹ [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здеÑÑŒ](../interfaces/index.md). +Ð’Ñ‹ можете выполнÑÑ‚ÑŒ запроÑÑ‹ к Playground, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð»ÑŽÐ±Ð¾Ð¹ HTTP-клиент, например [curl](https://curl.haxx.se) или [wget](https://www.gnu.org/software/wget/), или наÑтроить Ñоединение, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð´Ñ€Ð°Ð¹Ð²ÐµÑ€Ñ‹ [JDBC](../interfaces/jdbc.md) или [ODBC](../interfaces/odbc.md). Дополнительную информацию о программных продуктах, поддерживающих ClickHouse, можно найти [здеÑÑŒ](../interfaces/index.md). ## Учетные данные {#credentials} From f71244c85c76425bdeeb89ce017caed512f9576c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 08:53:54 +0200 Subject: [PATCH 0588/2361] Something is strange with the test about refreshable materialized views --- src/Storages/MaterializedView/RefreshTask.cpp | 2 +- .../0_stateless/02932_refreshable_materialized_views.sh | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index 857cfd78910..cff0e2cf40b 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -304,7 +304,7 @@ void RefreshTask::refreshTask() { PreformattedMessage message = getCurrentExceptionMessageAndPattern(true); auto text = message.text; - message.text = fmt::format("Refresh failed: {}", message.text); + message.text = fmt::format("Refresh view {} failed: {}", view->getStorageID().getFullTableName(), message.text); LOG_ERROR(log, message); exception = text; } diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index 89942e25b67..36cdc8d88e3 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -2,8 +2,6 @@ # Tags: atomic-database CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh @@ -172,13 +170,13 @@ $CLICKHOUSE_CLIENT -nq " drop table b; create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; drop table src;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Exception' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Exception' ] do sleep 0.1 done # Check exception, create src, expect successful refresh. $CLICKHOUSE_CLIENT -nq " - select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' from refreshes; + select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c'; create table src (x Int64) engine Memory as select 1; system refresh view c;" while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] From fab0bb6aa41c9deccb69666ae0d22559c0784121 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 09:01:44 +0200 Subject: [PATCH 0589/2361] Remove all sleeps --- .../02932_refreshable_materialized_views.sh | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index 36cdc8d88e3..9081035579d 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -132,7 +132,7 @@ while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshe do sleep 0.1 done -sleep 1 + $CLICKHOUSE_CLIENT -nq " select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; truncate src; @@ -222,22 +222,27 @@ done $CLICKHOUSE_CLIENT -nq " rename table e to f; select '<24: rename during refresh>', * from f; - select '<25: rename during refresh>', view, status from refreshes; + select '<25: rename during refresh>', view, status from refreshes where view = 'f'; alter table f modify refresh after 10 year;" -sleep 2 # make it likely that at least one row was processed + # Cancel. $CLICKHOUSE_CLIENT -nq " system cancel view f;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Cancelled' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Cancelled' ] do sleep 0.1 done + +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" = 'Running' ] +do + sleep 0.1 +done + # Check that another refresh doesn't immediately start after the cancelled one. -sleep 1 $CLICKHOUSE_CLIENT -nq " - select '<27: cancelled>', view, status from refreshes; + select '<27: cancelled>', view, status from refreshes where view = 'f'; system refresh view f;" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] do sleep 0.1 done From a371557f07ab97b88da40e6b790a5d361f437a47 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 09:06:45 +0200 Subject: [PATCH 0590/2361] Make test `00997_set_index_array` lighter --- tests/queries/0_stateless/00997_set_index_array.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00997_set_index_array.sql b/tests/queries/0_stateless/00997_set_index_array.sql index d6d27f5a6a0..ed972d1a545 100644 --- a/tests/queries/0_stateless/00997_set_index_array.sql +++ b/tests/queries/0_stateless/00997_set_index_array.sql @@ -12,10 +12,10 @@ ORDER BY (primary_key); INSERT INTO set_array select - toString(intDiv(number, 1000000)) as primary_key, + toString(intDiv(number, 100000)) as primary_key, array(number) as index_array from system.numbers -limit 10000000; +limit 1000000; OPTIMIZE TABLE set_array FINAL; From 5ff125e37ef35ce5b451e54482e92d7663e25bdb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 10:32:54 +0200 Subject: [PATCH 0591/2361] Miscellaneous --- src/Common/CurrentMetrics.cpp | 8 ++++---- src/Databases/DatabaseLazy.cpp | 4 ++-- src/Databases/DatabasesCommon.cpp | 8 ++++---- src/Parsers/ExpressionListParsers.cpp | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index 7c97e73f278..1011ab12d15 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -234,10 +234,10 @@ M(PartsCommitted, "Deprecated. See PartsActive.") \ M(PartsPreActive, "The part is in data_parts, but not used for SELECTs.") \ M(PartsActive, "Active data part, used by current and upcoming SELECTs.") \ - M(AttachedDatabase, "Active database, used by current and upcoming SELECTs.") \ - M(AttachedTable, "Active table, used by current and upcoming SELECTs.") \ - M(AttachedView, "Active view, used by current and upcoming SELECTs.") \ - M(AttachedDictionary, "Active dictionary, used by current and upcoming SELECTs.") \ + M(AttachedDatabase, "Active databases.") \ + M(AttachedTable, "Active tables.") \ + M(AttachedView, "Active views.") \ + M(AttachedDictionary, "Active dictionaries.") \ M(PartsOutdated, "Not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes.") \ M(PartsDeleting, "Not active data part with identity refcounter, it is deleting right now by a cleaner.") \ M(PartsDeleteOnDestroy, "Part was moved to another disk and should be deleted in own destructor.") \ diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index 5017c9b25cb..ca30ee6db15 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -195,7 +195,7 @@ void DatabaseLazy::attachTable(ContextPtr /* context_ */, const String & table_n snapshot_detached_tables.erase(table_name); } - CurrentMetrics::add(CurrentMetrics::AttachedTable, 1); + CurrentMetrics::add(CurrentMetrics::AttachedTable); } StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & table_name) @@ -221,7 +221,7 @@ StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & ta .metadata_path = getObjectMetadataPath(table_name), .is_permanently = false}); - CurrentMetrics::sub(CurrentMetrics::AttachedTable, 1); + CurrentMetrics::sub(CurrentMetrics::AttachedTable); } return res; } diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index fe0baf30e57..6ccaf811764 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -289,8 +289,8 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n tables.erase(it); table_storage->is_detached = true; - if (table_storage->isSystemStorage() == false) - CurrentMetrics::sub(getAttachedCounterForStorage(table_storage), 1); + if (!table_storage->isSystemStorage() && database_name != DatabaseCatalog::SYSTEM_DATABASE) + CurrentMetrics::sub(getAttachedCounterForStorage(table_storage)); auto table_id = table_storage->getStorageID(); if (table_id.hasUUID()) @@ -334,8 +334,8 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c /// non-Atomic database the is_detached is set to true before RENAME. table->is_detached = false; - if (table->isSystemStorage() == false && table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE) - CurrentMetrics::add(getAttachedCounterForStorage(table), 1); + if (!table->isSystemStorage() && table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE) + CurrentMetrics::add(getAttachedCounterForStorage(table)); } void DatabaseWithOwnTablesBase::shutdown() diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index fff8383e7b3..f97c042e91e 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -2743,7 +2743,7 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po /// 'AND' can be both boolean function and part of the '... BETWEEN ... AND ...' operator if (op.function_name == "and" && layers.back()->between_counter) { - layers.back()->between_counter--; + --layers.back()->between_counter; op = finish_between_operator; } From ea83f89374f723d1d53dca64ec58e8fd993f013d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 10:39:03 +0200 Subject: [PATCH 0592/2361] Fix inconsistent formatting of lambda functions inside composite types --- src/Parsers/ASTFunction.cpp | 8 ++++---- ...ite_expressions_lambda_consistent_formatting.reference | 0 ...composite_expressions_lambda_consistent_formatting.sql | 6 ++++++ 3 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.reference create mode 100644 tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.sql diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index f39229d7566..b04ec1c22b2 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -19,9 +18,6 @@ #include #include #include -#include - -#include using namespace std::literals; @@ -632,6 +628,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format settings.ostr << ", "; if (arguments->children[i]->as()) settings.ostr << "SETTINGS "; + nested_dont_need_parens.list_element_index = i; arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens); } settings.ostr << (settings.hilite ? hilite_operator : "") << ']' << (settings.hilite ? hilite_none : ""); @@ -642,12 +639,14 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format { settings.ostr << (settings.hilite ? hilite_operator : "") << ((frame.need_parens && !alias.empty()) ? "tuple" : "") << '(' << (settings.hilite ? hilite_none : ""); + for (size_t i = 0; i < arguments->children.size(); ++i) { if (i != 0) settings.ostr << ", "; if (arguments->children[i]->as()) settings.ostr << "SETTINGS "; + nested_dont_need_parens.list_element_index = i; arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens); } settings.ostr << (settings.hilite ? hilite_operator : "") << ')' << (settings.hilite ? hilite_none : ""); @@ -663,6 +662,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format settings.ostr << ", "; if (arguments->children[i]->as()) settings.ostr << "SETTINGS "; + nested_dont_need_parens.list_element_index = i; arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens); } settings.ostr << (settings.hilite ? hilite_operator : "") << ')' << (settings.hilite ? hilite_none : ""); diff --git a/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.reference b/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.sql b/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.sql new file mode 100644 index 00000000000..42c823cf476 --- /dev/null +++ b/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.sql @@ -0,0 +1,6 @@ +SELECT [1, (x -> 1)]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT (1, (x -> 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT map(1, (x -> 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT [1, lambda(x, 1)]; -- { serverError UNKNOWN_IDENTIFIER } +SELECT (1, lambda(x, 1)); -- { serverError UNKNOWN_IDENTIFIER } +SELECT map(1, lambda(x, 1)); -- { serverError UNKNOWN_IDENTIFIER } From 7f03b189667161de61e33523586a594694b02071 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 10:49:53 +0200 Subject: [PATCH 0593/2361] Fix two terrible bugs --- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index ea10ad59db4..8c1a3cb4dff 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1606,7 +1606,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, UInt64 table_count = CurrentMetrics::get(CurrentMetrics::AttachedTable); if (table_count >= table_num_limit) throw Exception(ErrorCodes::TOO_MANY_TABLES, - "Too many tables in the Clickhouse. " + "Too many tables. " "The limit (setting 'max_table_num_to_throw') is set to {}, current number of tables is {}", table_num_limit, table_count); } From a6f1c46448a17f294cc6047dfc3fef3802b40abf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 11:08:38 +0200 Subject: [PATCH 0594/2361] Miscellaneous --- src/Interpreters/InterpreterCreateQuery.cpp | 9 ++++---- .../test_table_db_num_limit/test.py | 22 +++++++++++-------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 8c1a3cb4dff..2a4a02597fe 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -82,13 +81,13 @@ #include #include -#include #include #include #include #include + namespace CurrentMetrics { extern const Metric AttachedTable; @@ -166,8 +165,8 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) if (db_count >= db_num_limit) throw Exception(ErrorCodes::TOO_MANY_DATABASES, - "Too many databases in the Clickhouse. " - "The limit (setting 'max_database_num_to_throw') is set to {}, current number of databases is {}", + "Too many databases. " + "The limit (server configuration parameter `max_database_num_to_throw`) is set to {}, the current number of databases is {}", db_num_limit, db_count); } @@ -1607,7 +1606,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, if (table_count >= table_num_limit) throw Exception(ErrorCodes::TOO_MANY_TABLES, "Too many tables. " - "The limit (setting 'max_table_num_to_throw') is set to {}, current number of tables is {}", + "The limit (server configuration parameter `max_table_num_to_throw`) is set to {}, the current number of tables is {}", table_num_limit, table_count); } diff --git a/tests/integration/test_table_db_num_limit/test.py b/tests/integration/test_table_db_num_limit/test.py index aa8030b077c..56403d165b2 100644 --- a/tests/integration/test_table_db_num_limit/test.py +++ b/tests/integration/test_table_db_num_limit/test.py @@ -4,8 +4,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance( - "node1", main_configs=["config/config.xml"], with_zookeeper=True +node = cluster.add_instance( + "node", main_configs=["config/config.xml"] ) @@ -22,22 +22,26 @@ def started_cluster(): def test_table_db_limit(started_cluster): for i in range(10): - node1.query("create database db{}".format(i)) + node.query("create database db{}".format(i)) with pytest.raises(QueryRuntimeException) as exp_info: - node1.query("create database db_exp".format(i)) + node.query("create database db_exp".format(i)) assert "TOO_MANY_DATABASES" in str(exp_info) for i in range(10): - node1.query("create table t{} (a Int32) Engine = Log".format(i)) + node.query("create table t{} (a Int32) Engine = Log".format(i)) + + # This checks that system tables are not accounted in the number of tables. + node.query("system flush logs") - node1.query("system flush logs") for i in range(10): - node1.query("drop table t{}".format(i)) + node.query("drop table t{}".format(i)) + for i in range(10): - node1.query("create table t{} (a Int32) Engine = Log".format(i)) + node.query("create table t{} (a Int32) Engine = Log".format(i)) with pytest.raises(QueryRuntimeException) as exp_info: - node1.query("create table default.tx (a Int32) Engine = Log") + node.query("create table default.tx (a Int32) Engine = Log") + assert "TOO_MANY_TABLES" in str(exp_info) From efd5ff9f4c63c82d77cfda43af786ad3b7a4b8e0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 21 Jul 2024 09:15:13 +0000 Subject: [PATCH 0595/2361] Automatic style fix --- tests/integration/test_table_db_num_limit/test.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/test_table_db_num_limit/test.py b/tests/integration/test_table_db_num_limit/test.py index 56403d165b2..a7bb04905f2 100644 --- a/tests/integration/test_table_db_num_limit/test.py +++ b/tests/integration/test_table_db_num_limit/test.py @@ -4,9 +4,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance( - "node", main_configs=["config/config.xml"] -) +node = cluster.add_instance("node", main_configs=["config/config.xml"]) @pytest.fixture(scope="module") From 7ed9ab2338f7de5a2495aad267cc688799c5ff18 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 11:29:12 +0200 Subject: [PATCH 0596/2361] Remove wrong logic from InterpreterCreateQuery --- src/Interpreters/InterpreterCreateQuery.cpp | 12 ++++++------ tests/integration/test_table_db_num_limit/test.py | 3 ++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 2a4a02597fe..7e0b6eb4193 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -146,21 +146,21 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) } auto db_num_limit = getContext()->getGlobalContext()->getServerSettings().max_database_num_to_throw; - if (db_num_limit > 0) + if (db_num_limit > 0 && !internal) { size_t db_count = DatabaseCatalog::instance().getDatabases().size(); - std::vector system_databases = { + std::initializer_list system_databases = + { DatabaseCatalog::TEMPORARY_DATABASE, DatabaseCatalog::SYSTEM_DATABASE, DatabaseCatalog::INFORMATION_SCHEMA, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE, - DatabaseCatalog::DEFAULT_DATABASE }; for (const auto & system_database : system_databases) { - if (db_count > 0 && DatabaseCatalog::instance().isDatabaseExist(system_database)) - db_count--; + if (db_count > 0 && DatabaseCatalog::instance().isDatabaseExist(std::string(system_database))) + --db_count; } if (db_count >= db_num_limit) @@ -1600,7 +1600,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, } UInt64 table_num_limit = getContext()->getGlobalContext()->getServerSettings().max_table_num_to_throw; - if (table_num_limit > 0 && create.getDatabase() != DatabaseCatalog::SYSTEM_DATABASE) + if (table_num_limit > 0 && !internal) { UInt64 table_count = CurrentMetrics::get(CurrentMetrics::AttachedTable); if (table_count >= table_num_limit) diff --git a/tests/integration/test_table_db_num_limit/test.py b/tests/integration/test_table_db_num_limit/test.py index 56403d165b2..aa6404c385c 100644 --- a/tests/integration/test_table_db_num_limit/test.py +++ b/tests/integration/test_table_db_num_limit/test.py @@ -21,7 +21,8 @@ def started_cluster(): def test_table_db_limit(started_cluster): - for i in range(10): + # By the way, default database already exists. + for i in range(9): node.query("create database db{}".format(i)) with pytest.raises(QueryRuntimeException) as exp_info: From 433ac55d13276e7a0b26ded07f27f17726703dab Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Sun, 21 Jul 2024 11:41:42 +0200 Subject: [PATCH 0597/2361] Correctly handle failure --- src/Common/Allocator.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Common/Allocator.cpp b/src/Common/Allocator.cpp index 7f2241ab4c0..1b43e746c69 100644 --- a/src/Common/Allocator.cpp +++ b/src/Common/Allocator.cpp @@ -206,7 +206,7 @@ void * Allocator::realloc(void * buf, size_t old_size, } else { - [[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(old_size); + [[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(new_size); ProfileEvents::increment(ProfileEvents::GWPAsanAllocateFailed); } } @@ -239,7 +239,7 @@ void * Allocator::realloc(void * buf, size_t old_size, void * new_buf = ::realloc(buf, new_size); if (nullptr == new_buf) { - [[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(old_size); + [[maybe_unused]] auto trace_free = CurrentMemoryTracker::free(new_size); throw DB::ErrnoException( DB::ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Allocator: Cannot realloc from {} to {}", From e3a0b6ab5ff21518a494ebede1aea47edda22b6c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 11:45:52 +0200 Subject: [PATCH 0598/2361] Randomize `trace_profile_events` --- tests/clickhouse-test | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 0c04d8fb2c3..e3aba5994d9 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -835,6 +835,7 @@ class SettingsRandomizer: "cross_join_min_bytes_to_compress": lambda: random.choice([0, 1, 100000000]), "min_external_table_block_size_bytes": lambda: random.choice([0, 1, 100000000]), "max_parsing_threads": lambda: random.choice([0, 1, 10]), + "trace_profile_events": lambda: random.randint(0, 1), } @staticmethod From 08353419d0798f209627f16f840f4d101227431d Mon Sep 17 00:00:00 2001 From: Max K Date: Sun, 21 Jul 2024 11:50:15 +0200 Subject: [PATCH 0599/2361] CI: Never await on BuildReport job, Skip BuildReport if no builds in workflow --- tests/ci/ci_cache.py | 27 ++++++++++---- tests/ci/ci_definitions.py | 3 ++ tests/ci/merge_pr.py | 5 ++- tests/ci/test_ci_config.py | 74 ++++++++++++++++++++++++++++++++++---- 4 files changed, 96 insertions(+), 13 deletions(-) diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index cfefb954fcd..16b6eac1ecb 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -638,7 +638,7 @@ class CiCache: pushes pending records for all jobs that supposed to be run """ for job, job_config in self.jobs_to_do.items(): - if not job_config.has_digest(): + if not job_config.has_digest() or job_config.disable_await: continue pending_state = PendingState(time.time(), run_url=GITHUB_RUN_URL) assert job_config.batches @@ -708,7 +708,7 @@ class CiCache: Filter is to be applied in PRs to remove jobs that are not affected by the change :return: """ - remove_from_to_do = [] + remove_from_workflow = [] required_builds = [] has_test_jobs_to_skip = False for job_name, job_config in self.jobs_to_do.items(): @@ -723,26 +723,41 @@ class CiCache: job=reference_name, job_config=reference_config, ): - remove_from_to_do.append(job_name) + remove_from_workflow.append(job_name) has_test_jobs_to_skip = True else: required_builds += ( job_config.required_builds if job_config.required_builds else [] ) if has_test_jobs_to_skip: - # If there are tests to skip, it means build digest has not been changed. + # If there are tests to skip, it means builds are not affected as well. # No need to test builds. Let's keep all builds required for test jobs and skip the others for job_name, job_config in self.jobs_to_do.items(): if CI.is_build_job(job_name): if job_name not in required_builds: - remove_from_to_do.append(job_name) + remove_from_workflow.append(job_name) - for job in remove_from_to_do: + for job in remove_from_workflow: print(f"Filter job [{job}] - not affected by the change") if job in self.jobs_to_do: del self.jobs_to_do[job] if job in self.jobs_to_wait: del self.jobs_to_wait[job] + if job in self.jobs_to_skip: + self.jobs_to_skip.remove(job) + + # special handling for the special job: BUILD_CHECK + has_builds = False + for job in list(self.jobs_to_do) + self.jobs_to_skip: + if CI.is_build_job(job): + has_builds = True + break + if not has_builds: + if CI.JobNames.BUILD_CHECK in self.jobs_to_do: + print( + f"Filter job [{CI.JobNames.BUILD_CHECK}] - no builds are required in the workflow" + ) + del self.jobs_to_do[CI.JobNames.BUILD_CHECK] def await_pending_jobs(self, is_release: bool, dry_run: bool = False) -> None: """ diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 4c78efd39a2..a8d9793f1d3 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -351,6 +351,8 @@ class JobConfig: run_by_label: str = "" # to run always regardless of the job digest or/and label run_always: bool = False + # disables CI await for a given job + disable_await: bool = False # if the job needs to be run on the release branch, including master (building packages, docker server). # NOTE: Subsequent runs on the same branch with the similar digest are still considered skip-able. required_on_release_branch: bool = False @@ -395,6 +397,7 @@ class CommonJobConfigs: ], ), runner_type=Runners.STYLE_CHECKER_ARM, + disable_await=True, ) COMPATIBILITY_TEST = JobConfig( job_name_keyword="compatibility", diff --git a/tests/ci/merge_pr.py b/tests/ci/merge_pr.py index 4d8facafb84..13c7537a84b 100644 --- a/tests/ci/merge_pr.py +++ b/tests/ci/merge_pr.py @@ -254,11 +254,14 @@ def main(): statuses = get_commit_filtered_statuses(commit) has_failed_statuses = False + has_native_failed_status = False for status in statuses: print(f"Check status [{status.context}], [{status.state}]") if CI.is_required(status.context) and status.state != SUCCESS: print(f"WARNING: Failed status [{status.context}], [{status.state}]") has_failed_statuses = True + if status.context != CI.StatusNames.SYNC: + has_native_failed_status = True if args.wf_status == SUCCESS or has_failed_statuses: # set Mergeable check if workflow is successful (green) @@ -280,7 +283,7 @@ def main(): print( "Workflow failed but no failed statuses found (died runner?) - cannot set Mergeable Check status" ) - if args.wf_status == SUCCESS and not has_failed_statuses: + if args.wf_status == SUCCESS and not has_native_failed_status: sys.exit(0) else: sys.exit(1) diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 12e863c4d8d..10867ea1444 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -587,11 +587,11 @@ class TestCIConfig(unittest.TestCase): for job, job_config in ci_cache.jobs_to_do.items(): if job in MOCK_AFFECTED_JOBS: MOCK_REQUIRED_BUILDS += job_config.required_builds - elif job not in MOCK_AFFECTED_JOBS: + elif job not in MOCK_AFFECTED_JOBS and not job_config.disable_await: ci_cache.jobs_to_wait[job] = job_config for job, job_config in ci_cache.jobs_to_do.items(): - if job_config.reference_job_name: + if job_config.reference_job_name or job_config.disable_await: # jobs with reference_job_name in config are not supposed to have records in the cache - continue continue if job in MOCK_AFFECTED_JOBS: @@ -624,11 +624,73 @@ class TestCIConfig(unittest.TestCase): + MOCK_AFFECTED_JOBS + MOCK_REQUIRED_BUILDS ) + self.assertTrue(CI.JobNames.BUILD_CHECK not in ci_cache.jobs_to_wait, "We must never await on Builds Report") self.assertCountEqual( list(ci_cache.jobs_to_wait), - [ - CI.JobNames.BUILD_CHECK, - ] - + MOCK_REQUIRED_BUILDS, + MOCK_REQUIRED_BUILDS, + ) + self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do) + + def test_ci_py_filters_not_affected_jobs_in_prs_no_builds(self): + """ + checks ci.py filters not affected jobs in PRs, no builds required + """ + settings = CiSettings() + settings.no_ci_cache = True + pr_info = PRInfo(github_event=_TEST_EVENT_JSON) + pr_info.event_type = EventType.PULL_REQUEST + pr_info.number = 123 + assert pr_info.is_pr + ci_cache = CIPY._configure_jobs( + S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True + ) + self.assertTrue(not ci_cache.jobs_to_skip, "Must be no jobs in skip list") + assert not ci_cache.jobs_to_wait + assert not ci_cache.jobs_to_skip + + MOCK_AFFECTED_JOBS = [ + CI.JobNames.DOCS_CHECK, + ] + MOCK_REQUIRED_BUILDS = [] + + # pretend there are pending jobs that we need to wait + for job, job_config in ci_cache.jobs_to_do.items(): + if job in MOCK_AFFECTED_JOBS: + if job_config.required_builds: + MOCK_REQUIRED_BUILDS += job_config.required_builds + elif job not in MOCK_AFFECTED_JOBS and not job_config.disable_await: + ci_cache.jobs_to_wait[job] = job_config + + for job, job_config in ci_cache.jobs_to_do.items(): + if job_config.reference_job_name or job_config.disable_await: + # jobs with reference_job_name in config are not supposed to have records in the cache - continue + continue + if job in MOCK_AFFECTED_JOBS: + continue + for batch in range(job_config.num_batches): + # add any record into cache + record = CiCache.Record( + record_type=random.choice( + [ + CiCache.RecordType.FAILED, + CiCache.RecordType.PENDING, + CiCache.RecordType.SUCCESSFUL, + ] + ), + job_name=job, + job_digest=ci_cache.job_digests[job], + batch=batch, + num_batches=job_config.num_batches, + release_branch=True, + ) + for record_t_, records_ in ci_cache.records.items(): + if record_t_.value == CiCache.RecordType.FAILED.value: + records_[record.to_str_key()] = record + + ci_cache.filter_out_not_affected_jobs() + expected_to_do = MOCK_AFFECTED_JOBS + MOCK_REQUIRED_BUILDS + self.assertCountEqual( + list(ci_cache.jobs_to_wait), + MOCK_REQUIRED_BUILDS, ) self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do) From 19f8e1768f5095618c4e0cded2c8299968aaf0fb Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 20 Jul 2024 16:36:19 +0200 Subject: [PATCH 0600/2361] Add const to cycles. --- src/Databases/DatabaseReplicated.cpp | 2 +- src/Interpreters/InterpreterCreateQuery.cpp | 2 +- src/Parsers/ASTViewTargets.cpp | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 25d1ad90a3c..4c079ae5300 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -734,7 +734,7 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_ if (create->targets) { - for (auto inner_table_engine : create->targets->getInnerEngines()) + for (const auto & inner_table_engine : create->targets->getInnerEngines()) checkTableEngine(*create, *inner_table_engine, query_context); } } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index faa91341a7c..342374aa580 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1384,7 +1384,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (create.targets) { - for (auto inner_table_engine : create.targets->getInnerEngines()) + for (const auto & inner_table_engine : create.targets->getInnerEngines()) { if (isReplicated(*inner_table_engine)) is_storage_replicated = true; diff --git a/src/Parsers/ASTViewTargets.cpp b/src/Parsers/ASTViewTargets.cpp index 38f103b6e55..8ee98e704df 100644 --- a/src/Parsers/ASTViewTargets.cpp +++ b/src/Parsers/ASTViewTargets.cpp @@ -43,7 +43,7 @@ std::vector ASTViewTargets::getKinds() const { std::vector kinds; kinds.reserve(targets.size()); - for (auto & target : targets) + for (const auto & target : targets) kinds.push_back(target.kind); return kinds; } @@ -121,7 +121,7 @@ void ASTViewTargets::resetInnerUUIDs() bool ASTViewTargets::hasInnerUUIDs() const { - for (auto & target : targets) + for (const auto & target : targets) { if (target.inner_uuid != UUIDHelpers::Nil) return true; From c61581ca69757ce9c1d15b6ccc139d48d27f5b07 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 12:06:53 +0200 Subject: [PATCH 0601/2361] Fix error --- src/Common/TraceSender.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/Common/TraceSender.cpp b/src/Common/TraceSender.cpp index 064da1b3d76..f1adf7c516a 100644 --- a/src/Common/TraceSender.cpp +++ b/src/Common/TraceSender.cpp @@ -26,11 +26,16 @@ LazyPipeFDs TraceSender::pipe; static thread_local bool inside_send = false; void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Extras extras) { - DENY_ALLOCATIONS_IN_SCOPE; - + /** The method shouldn't be called recursively or throw exceptions. + * There are several reasons: + * - avoid infinite recursion when some of subsequent functions invoke tracing; + * - avoid inconsistent writes if the method was interrupted by a signal handler in the middle of writing, + * and then another tracing is invoked (e.g., from query profiler). + */ if (unlikely(inside_send)) - abort(); /// The method shouldn't be called recursively or throw exceptions. + return; inside_send = true; + DENY_ALLOCATIONS_IN_SCOPE; constexpr size_t buf_size = sizeof(char) /// TraceCollector stop flag + sizeof(UInt8) /// String size From 0c2c027af63fcbababffbe3a39ed2631884e1938 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 12:30:20 +0200 Subject: [PATCH 0602/2361] Remove bad tests @azat --- ...2_part_log_rmt_fetch_merge_error.reference | 10 ----- .../03002_part_log_rmt_fetch_merge_error.sql | 35 ---------------- ..._part_log_rmt_fetch_mutate_error.reference | 10 ----- .../03002_part_log_rmt_fetch_mutate_error.sql | 41 ------------------- 4 files changed, 96 deletions(-) delete mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference delete mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql delete mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.reference delete mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference deleted file mode 100644 index b19d389d8d0..00000000000 --- a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference +++ /dev/null @@ -1,10 +0,0 @@ -before -rmt_master NewPart 0 1 -rmt_master MergeParts 0 1 -rmt_slave MergeParts 1 0 -rmt_slave DownloadPart 0 1 -after -rmt_master NewPart 0 1 -rmt_master MergeParts 0 1 -rmt_slave MergeParts 1 0 -rmt_slave DownloadPart 0 2 diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql deleted file mode 100644 index 548a8e5570a..00000000000 --- a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql +++ /dev/null @@ -1,35 +0,0 @@ --- Tags: no-replicated-database, no-parallel, no-shared-merge-tree --- SMT: The merge process is completely different from RMT - -drop table if exists rmt_master; -drop table if exists rmt_slave; - -create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by key settings always_fetch_merged_part=0; --- always_fetch_merged_part=1, consider this table as a "slave" -create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by key settings always_fetch_merged_part=1; - -insert into rmt_master values (1); - -system sync replica rmt_master; -system sync replica rmt_slave; -system stop replicated sends rmt_master; -optimize table rmt_master final settings alter_sync=1, optimize_throw_if_noop=1; - -select sleep(3) format Null; - -system flush logs; -select 'before'; -select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; - -system start replicated sends rmt_master; --- sleep few seconds to try rmt_slave to fetch the part and reflect this error --- in system.part_log -select sleep(3) format Null; -system sync replica rmt_slave; - -system flush logs; -select 'after'; -select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; - -drop table rmt_master; -drop table rmt_slave; diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.reference b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.reference deleted file mode 100644 index aac9e7527d1..00000000000 --- a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.reference +++ /dev/null @@ -1,10 +0,0 @@ -before -rmt_master NewPart 0 1 -rmt_master MutatePart 0 1 -rmt_slave DownloadPart 0 1 -rmt_slave MutatePart 1 0 -after -rmt_master NewPart 0 1 -rmt_master MutatePart 0 1 -rmt_slave DownloadPart 0 2 -rmt_slave MutatePart 1 0 diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql deleted file mode 100644 index d8b5ebb3148..00000000000 --- a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql +++ /dev/null @@ -1,41 +0,0 @@ --- Tags: no-replicated-database, no-parallel, no-shared-merge-tree --- SMT: The merge process is completely different from RMT - -drop table if exists rmt_master; -drop table if exists rmt_slave; - -create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by tuple() settings always_fetch_merged_part=0, old_parts_lifetime=600; --- prefer_fetch_merged_part_*_threshold=0, consider this table as a "slave" -create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by tuple() settings prefer_fetch_merged_part_time_threshold=0, prefer_fetch_merged_part_size_threshold=0, old_parts_lifetime=600; - -insert into rmt_master values (1); - -system sync replica rmt_master; -system sync replica rmt_slave; -system stop replicated sends rmt_master; -system stop pulling replication log rmt_slave; -alter table rmt_master update key=key+100 where 1 settings alter_sync=1; - --- first we need to make the rmt_master execute mutation so that it will have --- the part, and rmt_slave will consider it instead of performing mutation on --- it's own, otherwise prefer_fetch_merged_part_*_threshold will be simply ignored -select sleep(3) format Null; -system start pulling replication log rmt_slave; --- and sleep few more seconds to try rmt_slave to fetch the part and reflect --- this error in system.part_log -select sleep(3) format Null; - -system flush logs; -select 'before'; -select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; - -system start replicated sends rmt_master; -select sleep(3) format Null; -system sync replica rmt_slave; - -system flush logs; -select 'after'; -select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; - -drop table rmt_master; -drop table rmt_slave; From 02bf9e4243c329611fc0be43432d3f9c290512d5 Mon Sep 17 00:00:00 2001 From: Max K Date: Sun, 21 Jul 2024 12:46:58 +0200 Subject: [PATCH 0603/2361] push pending records before await call --- tests/ci/ci.py | 5 +- tests/ci/ci_cache.py | 93 +++++++++++++++++++++++++++++++++++++- tests/ci/test_ci_config.py | 5 +- 3 files changed, 99 insertions(+), 4 deletions(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index ff31d1ce489..f6bec5304a0 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1110,13 +1110,14 @@ def main() -> int: ci_cache.print_status() if IS_CI and not pr_info.is_merge_queue: - # wait for pending jobs to be finished, await_jobs is a long blocking call - ci_cache.await_pending_jobs(pr_info.is_release) if pr_info.is_release: print("Release/master: CI Cache add pending records for all todo jobs") ci_cache.push_pending_all(pr_info.is_release) + # wait for pending jobs to be finished, await_jobs is a long blocking call + ci_cache.await_pending_jobs(pr_info.is_release) + # conclude results result["git_ref"] = git_ref result["version"] = version diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 16b6eac1ecb..85eabb84f9f 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -638,7 +638,14 @@ class CiCache: pushes pending records for all jobs that supposed to be run """ for job, job_config in self.jobs_to_do.items(): - if not job_config.has_digest() or job_config.disable_await: + if ( + job in self.jobs_to_wait + or not job_config.has_digest() + or job_config.disable_await + ): + # 1. "job in self.jobs_to_wait" - this job already has a pending record in cache + # 2. "not job_config.has_digest()" - cache is not used for these jobs + # 3. "job_config.disable_await" - await is explicitly disabled continue pending_state = PendingState(time.time(), run_url=GITHUB_RUN_URL) assert job_config.batches @@ -899,3 +906,87 @@ class CiCache: self.jobs_to_wait[job] = job_config return self + + +if __name__ == "__main__": + # for testing + job_digest = { + "package_release": "bbbd3519d1", + "package_aarch64": "bbbd3519d1", + "package_asan": "bbbd3519d1", + "package_ubsan": "bbbd3519d1", + "package_tsan": "bbbd3519d1", + "package_msan": "bbbd3519d1", + "package_debug": "bbbd3519d1", + "package_release_coverage": "bbbd3519d1", + "binary_release": "bbbd3519d1", + "binary_tidy": "bbbd3519d1", + "binary_darwin": "bbbd3519d1", + "binary_aarch64": "bbbd3519d1", + "binary_aarch64_v80compat": "bbbd3519d1", + "binary_freebsd": "bbbd3519d1", + "binary_darwin_aarch64": "bbbd3519d1", + "binary_ppc64le": "bbbd3519d1", + "binary_amd64_compat": "bbbd3519d1", + "binary_amd64_musl": "bbbd3519d1", + "binary_riscv64": "bbbd3519d1", + "binary_s390x": "bbbd3519d1", + "binary_loongarch64": "bbbd3519d1", + "Builds": "f5dffeecb8", + "Install packages (release)": "ba0c89660e", + "Install packages (aarch64)": "ba0c89660e", + "Stateful tests (asan)": "32a9a1aba9", + "Stateful tests (tsan)": "32a9a1aba9", + "Stateful tests (msan)": "32a9a1aba9", + "Stateful tests (ubsan)": "32a9a1aba9", + "Stateful tests (debug)": "32a9a1aba9", + "Stateful tests (release)": "32a9a1aba9", + "Stateful tests (coverage)": "32a9a1aba9", + "Stateful tests (aarch64)": "32a9a1aba9", + "Stateful tests (release, ParallelReplicas)": "32a9a1aba9", + "Stateful tests (debug, ParallelReplicas)": "32a9a1aba9", + "Stateless tests (asan)": "deb6778b88", + "Stateless tests (tsan)": "deb6778b88", + "Stateless tests (msan)": "deb6778b88", + "Stateless tests (ubsan)": "deb6778b88", + "Stateless tests (debug)": "deb6778b88", + "Stateless tests (release)": "deb6778b88", + "Stateless tests (coverage)": "deb6778b88", + "Stateless tests (aarch64)": "deb6778b88", + "Stateless tests (release, old analyzer, s3, DatabaseReplicated)": "deb6778b88", + "Stateless tests (debug, s3 storage)": "deb6778b88", + "Stateless tests (tsan, s3 storage)": "deb6778b88", + "Stress test (debug)": "aa298abf10", + "Stress test (tsan)": "aa298abf10", + "Upgrade check (debug)": "5ce4d3ee02", + "Integration tests (asan, old analyzer)": "42e58be3aa", + "Integration tests (tsan)": "42e58be3aa", + "Integration tests (aarch64)": "42e58be3aa", + "Integration tests flaky check (asan)": "42e58be3aa", + "Compatibility check (release)": "ecb69d8c4b", + "Compatibility check (aarch64)": "ecb69d8c4b", + "Unit tests (release)": "09d00b702e", + "Unit tests (asan)": "09d00b702e", + "Unit tests (msan)": "09d00b702e", + "Unit tests (tsan)": "09d00b702e", + "Unit tests (ubsan)": "09d00b702e", + "AST fuzzer (debug)": "c38ebf947f", + "AST fuzzer (asan)": "c38ebf947f", + "AST fuzzer (msan)": "c38ebf947f", + "AST fuzzer (tsan)": "c38ebf947f", + "AST fuzzer (ubsan)": "c38ebf947f", + "Stateless tests flaky check (asan)": "deb6778b88", + "Performance Comparison (release)": "a8a7179258", + "ClickBench (release)": "45c07c4aa6", + "ClickBench (aarch64)": "45c07c4aa6", + "Docker server image": "6a24d5b187", + "Docker keeper image": "6a24d5b187", + "Docs check": "4764154c62", + "Fast test": "cb269133f2", + "Style check": "ffffffffff", + "Stateful tests (ubsan, ParallelReplicas)": "32a9a1aba9", + "Stress test (msan)": "aa298abf10", + "Upgrade check (asan)": "5ce4d3ee02", + } + ci_cache = CiCache(job_digests=job_digest, cache_enabled=True, s3=S3Helper()) + ci_cache.update() diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 10867ea1444..04fd44a87e9 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -624,7 +624,10 @@ class TestCIConfig(unittest.TestCase): + MOCK_AFFECTED_JOBS + MOCK_REQUIRED_BUILDS ) - self.assertTrue(CI.JobNames.BUILD_CHECK not in ci_cache.jobs_to_wait, "We must never await on Builds Report") + self.assertTrue( + CI.JobNames.BUILD_CHECK not in ci_cache.jobs_to_wait, + "We must never await on Builds Report", + ) self.assertCountEqual( list(ci_cache.jobs_to_wait), MOCK_REQUIRED_BUILDS, From d83428daafc665542023fac0a9add048603ad224 Mon Sep 17 00:00:00 2001 From: Max K Date: Sun, 21 Jul 2024 14:52:33 +0200 Subject: [PATCH 0604/2361] fix in ci unittests --- tests/ci/test_ci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 04fd44a87e9..44142050821 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -652,7 +652,7 @@ class TestCIConfig(unittest.TestCase): assert not ci_cache.jobs_to_skip MOCK_AFFECTED_JOBS = [ - CI.JobNames.DOCS_CHECK, + CI.JobNames.FAST_TEST, ] MOCK_REQUIRED_BUILDS = [] From 09e4faf2dbadfdf1eaedc0eec127098c6b9540f1 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sun, 21 Jul 2024 15:05:26 +0000 Subject: [PATCH 0605/2361] fix --- src/Interpreters/InterpreterOptimizeQuery.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/InterpreterOptimizeQuery.cpp b/src/Interpreters/InterpreterOptimizeQuery.cpp index 3bee235185d..907a01b0432 100644 --- a/src/Interpreters/InterpreterOptimizeQuery.cpp +++ b/src/Interpreters/InterpreterOptimizeQuery.cpp @@ -20,7 +20,6 @@ namespace DB namespace ErrorCodes { extern const int THERE_IS_NO_COLUMN; - extern const int NOT_IMPLEMENTED; } From 4b8b9b1503800398ccbf68e6a6134838fd67dac5 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 21 Jul 2024 16:30:59 +0000 Subject: [PATCH 0606/2361] Fix UB in function "age" --- src/Functions/DateTimeTransforms.h | 5 ++++- tests/queries/0_stateless/02477_age_datetime64.reference | 5 +++++ tests/queries/0_stateless/02477_age_datetime64.sql | 4 ++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 34c59ecab08..5f745f3ccad 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1954,7 +1954,10 @@ struct ToRelativeSubsecondNumImpl return t.value; if (scale > scale_multiplier) return t.value / (scale / scale_multiplier); - return t.value * (scale_multiplier / scale); + return static_cast(t.value) * static_cast((scale_multiplier / scale)); + /// Casting ^^: All integers are Int64, yet if t.value is big enough the multiplication can still + /// overflow which is UB. This place is too low-level and generic to check if t.value is sane. + /// Therefore just let it overflow safely and don't bother further. } static Int64 execute(UInt32 t, const DateLUTImpl &) { diff --git a/tests/queries/0_stateless/02477_age_datetime64.reference b/tests/queries/0_stateless/02477_age_datetime64.reference index 3b4459dd26d..fb085f461c9 100644 --- a/tests/queries/0_stateless/02477_age_datetime64.reference +++ b/tests/queries/0_stateless/02477_age_datetime64.reference @@ -111,3 +111,8 @@ SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), ma 1 SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC'))); 1 +-- UBsan bug #66638 +set session_timezone = 'UTC'; +SELECT age('second', toDateTime(1157339245694594829, 6, 'UTC'), toDate('2015-08-18')) + +-8973935999 diff --git a/tests/queries/0_stateless/02477_age_datetime64.sql b/tests/queries/0_stateless/02477_age_datetime64.sql index 1bed93991ca..b5fa4da8837 100644 --- a/tests/queries/0_stateless/02477_age_datetime64.sql +++ b/tests/queries/0_stateless/02477_age_datetime64.sql @@ -75,3 +75,7 @@ SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC'))); SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDate('2015-08-19', 'UTC'))); SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC'))); + +-- UBsan bug #66638 +set session_timezone = 'UTC'; +SELECT age('second', toDateTime(1157339245694594829, 6, 'UTC'), toDate('2015-08-18')) From 8786d9b5dd5f93d5c1f22c4d618093d69f8d57a3 Mon Sep 17 00:00:00 2001 From: joelynch Date: Sun, 21 Jul 2024 20:00:38 +0200 Subject: [PATCH 0607/2361] Ensure COMMENT clause works for all table engines --- src/Databases/SQLite/DatabaseSQLite.cpp | 1 + src/Storages/Kafka/StorageKafka.cpp | 10 +++++++--- src/Storages/Kafka/StorageKafka.h | 1 + src/Storages/NATS/StorageNATS.cpp | 4 +++- src/Storages/NATS/StorageNATS.h | 1 + .../PostgreSQL/StorageMaterializedPostgreSQL.cpp | 1 + src/Storages/RabbitMQ/StorageRabbitMQ.cpp | 4 +++- src/Storages/RabbitMQ/StorageRabbitMQ.h | 1 + src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp | 1 + src/Storages/StorageExecutable.cpp | 7 ++++--- src/Storages/StorageExecutable.h | 3 ++- src/Storages/StorageKeeperMap.cpp | 1 + src/Storages/StorageSQLite.cpp | 4 +++- src/Storages/StorageSQLite.h | 1 + src/Storages/WindowView/StorageWindowView.cpp | 4 +++- src/Storages/WindowView/StorageWindowView.h | 1 + src/TableFunctions/TableFunctionExecutable.cpp | 9 ++++++++- src/TableFunctions/TableFunctionSQLite.cpp | 2 +- 18 files changed, 43 insertions(+), 13 deletions(-) diff --git a/src/Databases/SQLite/DatabaseSQLite.cpp b/src/Databases/SQLite/DatabaseSQLite.cpp index 132a978140c..471730fce29 100644 --- a/src/Databases/SQLite/DatabaseSQLite.cpp +++ b/src/Databases/SQLite/DatabaseSQLite.cpp @@ -154,6 +154,7 @@ StoragePtr DatabaseSQLite::fetchTable(const String & table_name, ContextPtr loca table_name, ColumnsDescription{*columns}, ConstraintsDescription{}, + /* comment = */ "", local_context); return storage; diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 809401bb279..3aad64a0cfb 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -418,8 +418,11 @@ namespace } StorageKafka::StorageKafka( - const StorageID & table_id_, ContextPtr context_, - const ColumnsDescription & columns_, std::unique_ptr kafka_settings_, + const StorageID & table_id_, + ContextPtr context_, + const ColumnsDescription & columns_, + const String & comment, + std::unique_ptr kafka_settings_, const String & collection_name_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) @@ -451,6 +454,7 @@ StorageKafka::StorageKafka( StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); + storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); setVirtuals(createVirtuals(kafka_settings->kafka_handle_error_mode)); @@ -1317,7 +1321,7 @@ void registerStorageKafka(StorageFactory & factory) "See https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/#configuration"); } - return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(kafka_settings), collection_name); + return std::make_shared(args.table_id, args.getContext(), args.columns, args.comment, std::move(kafka_settings), collection_name); }; factory.registerStorage("Kafka", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, }); diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index fa4affbda36..31e1a6076b6 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -40,6 +40,7 @@ public: const StorageID & table_id_, ContextPtr context_, const ColumnsDescription & columns_, + const String & comment, std::unique_ptr kafka_settings_, const String & collection_name_); diff --git a/src/Storages/NATS/StorageNATS.cpp b/src/Storages/NATS/StorageNATS.cpp index 8f0e2d76473..9d728c3395f 100644 --- a/src/Storages/NATS/StorageNATS.cpp +++ b/src/Storages/NATS/StorageNATS.cpp @@ -49,6 +49,7 @@ StorageNATS::StorageNATS( const StorageID & table_id_, ContextPtr context_, const ColumnsDescription & columns_, + const String & comment, std::unique_ptr nats_settings_, LoadingStrictnessLevel mode) : IStorage(table_id_) @@ -87,6 +88,7 @@ StorageNATS::StorageNATS( StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); + storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); setVirtuals(createVirtuals(nats_settings->nats_handle_error_mode)); @@ -760,7 +762,7 @@ void registerStorageNATS(StorageFactory & factory) if (!nats_settings->nats_subjects.changed) throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "You must specify `nats_subjects` setting"); - return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(nats_settings), args.mode); + return std::make_shared(args.table_id, args.getContext(), args.columns, args.comment, std::move(nats_settings), args.mode); }; factory.registerStorage("NATS", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, }); diff --git a/src/Storages/NATS/StorageNATS.h b/src/Storages/NATS/StorageNATS.h index 41d77acfde6..5fca8cb0163 100644 --- a/src/Storages/NATS/StorageNATS.h +++ b/src/Storages/NATS/StorageNATS.h @@ -23,6 +23,7 @@ public: const StorageID & table_id_, ContextPtr context_, const ColumnsDescription & columns_, + const String & comment, std::unique_ptr nats_settings_, LoadingStrictnessLevel mode); diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index a904b29e12f..f4c38a52a3f 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -571,6 +571,7 @@ void registerStorageMaterializedPostgreSQL(StorageFactory & factory) StorageInMemoryMetadata metadata; metadata.setColumns(args.columns); metadata.setConstraints(args.constraints); + metadata.setComment(args.comment); if (args.mode <= LoadingStrictnessLevel::CREATE && !args.getLocalContext()->getSettingsRef().allow_experimental_materialized_postgresql_table) diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index f3d2aff68c8..9e3c40071b5 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -70,6 +70,7 @@ StorageRabbitMQ::StorageRabbitMQ( const StorageID & table_id_, ContextPtr context_, const ColumnsDescription & columns_, + const String & comment, std::unique_ptr rabbitmq_settings_, LoadingStrictnessLevel mode) : IStorage(table_id_) @@ -145,6 +146,7 @@ StorageRabbitMQ::StorageRabbitMQ( StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); + storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); setVirtuals(createVirtuals(rabbitmq_settings->rabbitmq_handle_error_mode)); @@ -1288,7 +1290,7 @@ void registerStorageRabbitMQ(StorageFactory & factory) if (!rabbitmq_settings->rabbitmq_format.changed) throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "You must specify `rabbitmq_format` setting"); - return std::make_shared(args.table_id, args.getContext(), args.columns, std::move(rabbitmq_settings), args.mode); + return std::make_shared(args.table_id, args.getContext(), args.columns, args.comment, std::move(rabbitmq_settings), args.mode); }; factory.registerStorage("RabbitMQ", creator_fn, StorageFactory::StorageFeatures{ .supports_settings = true, }); diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.h b/src/Storages/RabbitMQ/StorageRabbitMQ.h index b8fab5825e4..fed80a4357b 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.h +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.h @@ -26,6 +26,7 @@ public: const StorageID & table_id_, ContextPtr context_, const ColumnsDescription & columns_, + const String & comment, std::unique_ptr rabbitmq_settings_, LoadingStrictnessLevel mode); diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp index 409703c84c6..fafc72da04e 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp @@ -691,6 +691,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) StorageInMemoryMetadata metadata; metadata.setColumns(args.columns); metadata.setConstraints(args.constraints); + metadata.setComment(args.comment); if (!args.storage_def->primary_key) throw Exception(ErrorCodes::BAD_ARGUMENTS, "StorageEmbeddedRocksDB must require one column in primary key"); diff --git a/src/Storages/StorageExecutable.cpp b/src/Storages/StorageExecutable.cpp index 381c20c616d..0094723e3fd 100644 --- a/src/Storages/StorageExecutable.cpp +++ b/src/Storages/StorageExecutable.cpp @@ -77,7 +77,8 @@ StorageExecutable::StorageExecutable( const ExecutableSettings & settings_, const std::vector & input_queries_, const ColumnsDescription & columns, - const ConstraintsDescription & constraints) + const ConstraintsDescription & constraints, + const String & comment) : IStorage(table_id_) , settings(settings_) , input_queries(input_queries_) @@ -86,6 +87,7 @@ StorageExecutable::StorageExecutable( StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns); storage_metadata.setConstraints(constraints); + storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); ShellCommandSourceCoordinator::Configuration configuration @@ -237,7 +239,7 @@ void registerStorageExecutable(StorageFactory & factory) settings.loadFromQuery(*args.storage_def); auto global_context = args.getContext()->getGlobalContext(); - return std::make_shared(args.table_id, format, settings, input_queries, columns, constraints); + return std::make_shared(args.table_id, format, settings, input_queries, columns, constraints, args.comment); }; StorageFactory::StorageFeatures storage_features; @@ -255,4 +257,3 @@ void registerStorageExecutable(StorageFactory & factory) } } - diff --git a/src/Storages/StorageExecutable.h b/src/Storages/StorageExecutable.h index 2be2a84ab49..6748bb3223e 100644 --- a/src/Storages/StorageExecutable.h +++ b/src/Storages/StorageExecutable.h @@ -22,7 +22,8 @@ public: const ExecutableSettings & settings, const std::vector & input_queries, const ColumnsDescription & columns, - const ConstraintsDescription & constraints); + const ConstraintsDescription & constraints, + const String & comment); String getName() const override { diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 587cb621362..16caf01955e 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -1280,6 +1280,7 @@ StoragePtr create(const StorageFactory::Arguments & args) StorageInMemoryMetadata metadata; metadata.setColumns(args.columns); metadata.setConstraints(args.constraints); + metadata.setComment(args.comment); if (!args.storage_def->primary_key) throw Exception(ErrorCodes::BAD_ARGUMENTS, "StorageKeeperMap requires one column in primary key"); diff --git a/src/Storages/StorageSQLite.cpp b/src/Storages/StorageSQLite.cpp index 85417a2f2a4..b90b15f3b99 100644 --- a/src/Storages/StorageSQLite.cpp +++ b/src/Storages/StorageSQLite.cpp @@ -50,6 +50,7 @@ StorageSQLite::StorageSQLite( const String & remote_table_name_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, + const String & comment, ContextPtr context_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) @@ -71,6 +72,7 @@ StorageSQLite::StorageSQLite( storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); + storage_metadata.setComment(comment); } @@ -211,7 +213,7 @@ void registerStorageSQLite(StorageFactory & factory) auto sqlite_db = openSQLiteDB(database_path, args.getContext(), /* throw_on_error */ args.mode <= LoadingStrictnessLevel::CREATE); return std::make_shared(args.table_id, sqlite_db, database_path, - table_name, args.columns, args.constraints, args.getContext()); + table_name, args.columns, args.constraints, args.comment, args.getContext()); }, { .supports_schema_inference = true, diff --git a/src/Storages/StorageSQLite.h b/src/Storages/StorageSQLite.h index ed673123fe0..97638ac04cb 100644 --- a/src/Storages/StorageSQLite.h +++ b/src/Storages/StorageSQLite.h @@ -27,6 +27,7 @@ public: const String & remote_table_name_, const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, + const String & comment, ContextPtr context_); std::string getName() const override { return "SQLite"; } diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index e15da0074d5..7e1bca7d0d6 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1188,6 +1188,7 @@ StorageWindowView::StorageWindowView( ContextPtr context_, const ASTCreateQuery & query, const ColumnsDescription & columns_, + const String & comment, LoadingStrictnessLevel mode) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) @@ -1206,6 +1207,7 @@ StorageWindowView::StorageWindowView( StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); + storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); /// If the target table is not set, use inner target table @@ -1761,7 +1763,7 @@ void registerStorageWindowView(StorageFactory & factory) "Experimental WINDOW VIEW feature " "is not enabled (the setting 'allow_experimental_window_view')"); - return std::make_shared(args.table_id, args.getLocalContext(), args.query, args.columns, args.mode); + return std::make_shared(args.table_id, args.getLocalContext(), args.query, args.columns, args.comment, args.mode); }); } diff --git a/src/Storages/WindowView/StorageWindowView.h b/src/Storages/WindowView/StorageWindowView.h index 14ac65091d3..38fca512ed9 100644 --- a/src/Storages/WindowView/StorageWindowView.h +++ b/src/Storages/WindowView/StorageWindowView.h @@ -111,6 +111,7 @@ public: ContextPtr context_, const ASTCreateQuery & query, const ColumnsDescription & columns_, + const String & comment, LoadingStrictnessLevel mode); String getName() const override { return "WindowView"; } diff --git a/src/TableFunctions/TableFunctionExecutable.cpp b/src/TableFunctions/TableFunctionExecutable.cpp index 2c3802e8667..cccd3587bc7 100644 --- a/src/TableFunctions/TableFunctionExecutable.cpp +++ b/src/TableFunctions/TableFunctionExecutable.cpp @@ -170,7 +170,14 @@ StoragePtr TableFunctionExecutable::executeImpl(const ASTPtr & /*ast_function*/, if (settings_query != nullptr) settings.applyChanges(settings_query->as()->changes); - auto storage = std::make_shared(storage_id, format, settings, input_queries, getActualTableStructure(context, is_insert_query), ConstraintsDescription{}); + auto storage = std::make_shared( + storage_id, + format, + settings, + input_queries, + getActualTableStructure(context, is_insert_query), + ConstraintsDescription{}, + /* comment = */ ""); storage->startup(); return storage; } diff --git a/src/TableFunctions/TableFunctionSQLite.cpp b/src/TableFunctions/TableFunctionSQLite.cpp index e367e05bf73..87353025d1d 100644 --- a/src/TableFunctions/TableFunctionSQLite.cpp +++ b/src/TableFunctions/TableFunctionSQLite.cpp @@ -57,7 +57,7 @@ StoragePtr TableFunctionSQLite::executeImpl(const ASTPtr & /*ast_function*/, sqlite_db, database_path, remote_table_name, - cached_columns, ConstraintsDescription{}, context); + cached_columns, ConstraintsDescription{}, /* comment = */ "", context); storage->startup(); return storage; From dd9fe61d1a973a7fa528259b507218cf264548fe Mon Sep 17 00:00:00 2001 From: Max K Date: Sun, 21 Jul 2024 17:44:32 +0200 Subject: [PATCH 0608/2361] CI: New Release workflow updates and fixes --- .github/actions/release/action.yml | 27 +++++------- tests/ci/artifactory.py | 56 ++++++++++++++----------- tests/ci/ci_utils.py | 3 +- tests/ci/create_release.py | 66 +++++++++++++++++++----------- 4 files changed, 87 insertions(+), 65 deletions(-) diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml index 99ec02662f6..c3897682a33 100644 --- a/.github/actions/release/action.yml +++ b/.github/actions/release/action.yml @@ -62,8 +62,8 @@ runs: if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/create_release.py --set-progress-started --progress "update ChangeLog" - [ "$(git branch --show-current)" != "master" ] && echo "not on the master" && exit 1 + git checkout master + python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security" echo "List versions" ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv echo "Update docker version" @@ -96,17 +96,13 @@ runs: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} ### Changelog category (leave one): - Not for changelog (changelog entry is not required) - - name: Reset changes if Dry-run - if: ${{ inputs.dry-run }} + - name: Complete previous steps and Restore git state + if: ${{ inputs.type == 'patch' }} shell: bash run: | - git reset --hard HEAD - - name: Checkout back to GITHUB_REF - shell: bash - run: | - git checkout "$GITHUB_REF_NAME" - # set current progress to OK python3 ./tests/ci/create_release.py --set-progress-completed + git reset --hard HEAD + git checkout "$GITHUB_REF_NAME" - name: Create GH Release shell: bash if: ${{ inputs.type == 'patch' }} @@ -146,24 +142,23 @@ runs: if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/create_release.py --set-progress-started --progress "docker server release" cd "./tests/ci" + python3 ./create_release.py --set-progress-started --progress "docker server release" export CHECK_NAME="Docker server image" python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} - python3 ./tests/ci/create_release.py --set-progress-completed + python3 ./create_release.py --set-progress-completed - name: Docker clickhouse/clickhouse-keeper building if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/create_release.py --set-progress-started --progress "docker keeper release" cd "./tests/ci" + python3 ./create_release.py --set-progress-started --progress "docker keeper release" export CHECK_NAME="Docker keeper image" python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} - python3 ./tests/ci/create_release.py --set-progress-completed - - name: Set Release progress completed + python3 ./create_release.py --set-progress-completed + - name: Set current Release progress to Completed with OK shell: bash run: | - # If we here - set completed status, to post proper Slack OK or FAIL message in the next step python3 ./tests/ci/create_release.py --set-progress-started --progress "completed" python3 ./tests/ci/create_release.py --set-progress-completed - name: Post Slack Message diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index 98a0345c6bd..86dcaf79854 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -43,7 +43,6 @@ class R2MountPoint: self.bucket_name = self._PROD_BUCKET_NAME self.aux_mount_options = "" - self.async_mount = False if self.app == MountPointApp.S3FS: self.cache_dir = "/home/ubuntu/s3fs_cache" # self.aux_mount_options += "-o nomodtime " if self.NOMODTIME else "" not for s3fs @@ -57,7 +56,6 @@ class R2MountPoint: self.mount_cmd = f"s3fs {self.bucket_name} {self.MOUNT_POINT} -o url={self.API_ENDPOINT} -o use_path_request_style -o umask=0000 -o nomultipart -o logfile={self.LOG_FILE} {self.aux_mount_options}" elif self.app == MountPointApp.RCLONE: # run rclone mount process asynchronously, otherwise subprocess.run(daemonized command) will not return - self.async_mount = True self.cache_dir = "/home/ubuntu/rclone_cache" self.aux_mount_options += "--no-modtime " if self.NOMODTIME else "" self.aux_mount_options += "-v " if self.DEBUG else "" # -vv too verbose @@ -85,10 +83,12 @@ class R2MountPoint: Shell.run(_UNMOUNT_CMD) Shell.run(_MKDIR_CMD) Shell.run(_MKDIR_FOR_CACHE) - # didn't manage to use simple run() and not block or fail - Shell.run_as_daemon(self.mount_cmd) - if self.async_mount: - time.sleep(3) + if self.app == MountPointApp.S3FS: + Shell.run(self.mount_cmd, check=True) + else: + # didn't manage to use simple run() and without blocking or failure + Shell.run_as_daemon(self.mount_cmd) + time.sleep(3) Shell.run(_TEST_MOUNT_CMD, check=True) @classmethod @@ -107,6 +107,7 @@ class DebianArtifactory: _PROD_REPO_URL = "https://packages.clickhouse.com/deb" def __init__(self, release_info: ReleaseInfo, dry_run: bool): + self.release_info = release_info self.codename = release_info.codename self.version = release_info.version if dry_run: @@ -154,9 +155,8 @@ class DebianArtifactory: print("Running test command:") print(f" {cmd}") Shell.run(cmd, check=True) - release_info = ReleaseInfo.from_file() - release_info.debian_command = debian_command - release_info.dump() + self.release_info.debian_command = debian_command + self.release_info.dump() def _copy_if_not_exists(src: Path, dst: Path) -> Path: @@ -177,6 +177,7 @@ class RpmArtifactory: _SIGN_KEY = "885E2BDCF96B0B45ABF058453E4AD4719DDE9A38" def __init__(self, release_info: ReleaseInfo, dry_run: bool): + self.release_info = release_info self.codename = release_info.codename self.version = release_info.version if dry_run: @@ -230,9 +231,8 @@ class RpmArtifactory: print("Running test command:") print(f" {cmd}") Shell.run(cmd, check=True) - release_info = ReleaseInfo.from_file() - release_info.rpm_command = rpm_command - release_info.dump() + self.release_info.rpm_command = rpm_command + self.release_info.dump() class TgzArtifactory: @@ -240,6 +240,7 @@ class TgzArtifactory: _PROD_REPO_URL = "https://packages.clickhouse.com/tgz" def __init__(self, release_info: ReleaseInfo, dry_run: bool): + self.release_info = release_info self.codename = release_info.codename self.version = release_info.version if dry_run: @@ -290,9 +291,8 @@ class TgzArtifactory: expected_checksum == actual_checksum ), f"[{actual_checksum} != {expected_checksum}]" Shell.run("rm /tmp/tmp.tgz*") - release_info = ReleaseInfo.from_file() - release_info.tgz_command = cmd - release_info.dump() + self.release_info.tgz_command = cmd + self.release_info.dump() def parse_args() -> argparse.Namespace: @@ -340,9 +340,7 @@ def parse_args() -> argparse.Namespace: if __name__ == "__main__": args = parse_args() - assert args.dry_run - release_info = ReleaseInfo.from_file() """ Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve: ERROR : IO error: NotImplemented: versionId not implemented @@ -350,26 +348,38 @@ if __name__ == "__main__": """ mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run) if args.export_debian: - with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_DEB) as _: + with ReleaseContextManager( + release_progress=ReleaseProgress.EXPORT_DEB + ) as release_info: mp.init() DebianArtifactory(release_info, dry_run=args.dry_run).export_packages() mp.teardown() if args.export_rpm: - with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_RPM) as _: + with ReleaseContextManager( + release_progress=ReleaseProgress.EXPORT_RPM + ) as release_info: mp.init() RpmArtifactory(release_info, dry_run=args.dry_run).export_packages() mp.teardown() if args.export_tgz: - with ReleaseContextManager(release_progress=ReleaseProgress.EXPORT_TGZ) as _: + with ReleaseContextManager( + release_progress=ReleaseProgress.EXPORT_TGZ + ) as release_info: mp.init() TgzArtifactory(release_info, dry_run=args.dry_run).export_packages() mp.teardown() if args.test_debian: - with ReleaseContextManager(release_progress=ReleaseProgress.TEST_DEB) as _: + with ReleaseContextManager( + release_progress=ReleaseProgress.TEST_DEB + ) as release_info: DebianArtifactory(release_info, dry_run=args.dry_run).test_packages() if args.test_tgz: - with ReleaseContextManager(release_progress=ReleaseProgress.TEST_TGZ) as _: + with ReleaseContextManager( + release_progress=ReleaseProgress.TEST_TGZ + ) as release_info: TgzArtifactory(release_info, dry_run=args.dry_run).test_packages() if args.test_rpm: - with ReleaseContextManager(release_progress=ReleaseProgress.TEST_RPM) as _: + with ReleaseContextManager( + release_progress=ReleaseProgress.TEST_RPM + ) as release_info: RpmArtifactory(release_info, dry_run=args.dry_run).test_packages() diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index efbf014cd52..9a1b12af310 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -182,10 +182,11 @@ class Shell: check=False, ) if result.returncode == 0: + print(f"stdout: {result.stdout.strip()}") res = result.stdout else: print( - f"ERROR: stdout {result.stdout.strip()}, stderr {result.stderr.strip()}" + f"ERROR: stdout: {result.stdout.strip()}, stderr: {result.stderr.strip()}" ) if check: assert result.returncode == 0 diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index 4347cfebb54..a0b4083b673 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -43,6 +43,7 @@ class ReleaseProgress: TEST_TGZ = "test TGZ packages" TEST_RPM = "test RPM packages" TEST_DEB = "test DEB packages" + COMPLETED = "completed" class ReleaseProgressDescription: @@ -108,6 +109,12 @@ class ReleaseInfo: release_progress: str = "" progress_description: str = "" + def is_patch(self): + return self.release_branch != "master" + + def is_new_release_branch(self): + return self.release_branch == "master" + @staticmethod def from_file() -> "ReleaseInfo": with open(RELEASE_INFO_FILE, "r", encoding="utf-8") as json_file: @@ -126,12 +133,12 @@ class ReleaseInfo: release_tag = None previous_release_tag = None previous_release_sha = None - codename = None + codename = "" assert release_type in ("patch", "new") if release_type == "new": # check commit_ref is right and on a right branch Shell.run( - f"git merge-base --is-ancestor origin/{commit_ref} origin/master", + f"git merge-base --is-ancestor {commit_ref} origin/master", check=True, ) with checkout(commit_ref): @@ -146,9 +153,6 @@ class ReleaseInfo: git.latest_tag == expected_prev_tag ), f"BUG: latest tag [{git.latest_tag}], expected [{expected_prev_tag}]" release_tag = version.describe - codename = ( - VersionType.STABLE - ) # dummy value (artifactory won't be updated for new release) previous_release_tag = expected_prev_tag previous_release_sha = Shell.run_strict( f"git rev-parse {previous_release_tag}" @@ -205,7 +209,7 @@ class ReleaseInfo: and commit_sha and release_tag and version - and codename in ("lts", "stable") + and (codename in ("lts", "stable") or release_type == "new") ) self.release_branch = release_branch @@ -320,24 +324,27 @@ class ReleaseInfo: Shell.run( f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'" ) - self.version_bump_pr = GHActions.get_pr_url_by_branch( - repo=GITHUB_REPOSITORY, branch=branch_upd_version_contributors - ) + self.version_bump_pr = "dry-run" + else: + self.version_bump_pr = GHActions.get_pr_url_by_branch( + repo=GITHUB_REPOSITORY, branch=branch_upd_version_contributors + ) def update_release_info(self, dry_run: bool) -> "ReleaseInfo": - branch = f"auto/{release_info.release_tag}" - if not dry_run: - url = GHActions.get_pr_url_by_branch(repo=GITHUB_REPOSITORY, branch=branch) - else: - url = "dry-run" - - print(f"ChangeLog PR url [{url}]") - self.changelog_pr = url - print(f"Release url [{url}]") - self.release_url = ( - f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" - ) - self.docker_command = f"docker run --rm clickhouse/clickhouse:{self.release_branch} clickhouse --version" + if self.release_branch != "master": + branch = f"auto/{release_info.release_tag}" + if not dry_run: + url = GHActions.get_pr_url_by_branch( + repo=GITHUB_REPOSITORY, branch=branch + ) + else: + url = "dry-run" + print(f"ChangeLog PR url [{url}]") + self.changelog_pr = url + print(f"Release url [{url}]") + self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" + if self.release_progress == ReleaseProgress.COMPLETED: + self.docker_command = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version" self.dump() return self @@ -712,13 +719,22 @@ if __name__ == "__main__": if args.post_status: release_info = ReleaseInfo.from_file() release_info.update_release_info(dry_run=args.dry_run) - if release_info.debian_command: + if release_info.is_new_release_branch(): + title = "New release branch" + else: + title = "New release" + if ( + release_info.progress_description == ReleaseProgressDescription.OK + and release_info.release_progress == ReleaseProgress.COMPLETED + ): + title = "Completed: " + title CIBuddy(dry_run=args.dry_run).post_done( - f"New release issued", dataclasses.asdict(release_info) + title, dataclasses.asdict(release_info) ) else: + title = "Failed: " + title CIBuddy(dry_run=args.dry_run).post_critical( - f"Failed to issue new release", dataclasses.asdict(release_info) + title, dataclasses.asdict(release_info) ) if args.set_progress_started: From 20d4b16fc1d92632b714368c85ac6701723ef096 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 21 Jul 2024 22:19:30 +0200 Subject: [PATCH 0609/2361] Update 03207_composite_expressions_lambda_consistent_formatting.sql --- .../03207_composite_expressions_lambda_consistent_formatting.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.sql b/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.sql index 42c823cf476..2e2f5510876 100644 --- a/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.sql +++ b/tests/queries/0_stateless/03207_composite_expressions_lambda_consistent_formatting.sql @@ -1,3 +1,4 @@ +SET allow_experimental_analyzer = 1; SELECT [1, (x -> 1)]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT (1, (x -> 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT map(1, (x -> 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } From 3b45916470affeb555ae20a4d557ea9de5075f50 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 05:46:53 +0200 Subject: [PATCH 0610/2361] What if we tighten limits for functional tests? --- tests/config/install.sh | 1 + tests/config/users.d/limits.xml | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 tests/config/users.d/limits.xml diff --git a/tests/config/install.sh b/tests/config/install.sh index 1b0edc5fc16..265b9248f4a 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -93,6 +93,7 @@ ln -sf $SRC_PATH/users.d/prefetch_settings.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/nonconst_timezone.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/allow_introspection_functions.yaml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/replicated_ddl_entry.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/users.d/limits.xml $DEST_SERVER_PATH/users.d/ if [[ -n "$USE_OLD_ANALYZER" ]] && [[ "$USE_OLD_ANALYZER" -eq 1 ]]; then ln -sf $SRC_PATH/users.d/analyzer.xml $DEST_SERVER_PATH/users.d/ diff --git a/tests/config/users.d/limits.xml b/tests/config/users.d/limits.xml new file mode 100644 index 00000000000..f44c73241ab --- /dev/null +++ b/tests/config/users.d/limits.xml @@ -0,0 +1,8 @@ + + + + 5G + 20000000 + + + From 2be21fe05c6b952735fc6895c0286b177864dde5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 05:53:12 +0200 Subject: [PATCH 0611/2361] Fix RocksDB bs --- tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 b/tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 index 6121db6d6a2..4ab98201eed 100644 --- a/tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 +++ b/tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 @@ -1,4 +1,4 @@ --- Tags: use-rocksdb, long +-- Tags: use-rocksdb, long, no-s3-storage, no-random-settings, no-random-merge-tree-settings SET join_algorithm = 'direct'; @@ -41,4 +41,3 @@ ON rdb.key == t1.k; {% for table_size in [10, 65555, 100000] -%} DROP TABLE IF EXISTS rdb_{{ table_size }}; {% endfor -%} - From 7643b99dc5a892f404d756e92c4a17cf689c720b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 06:08:00 +0200 Subject: [PATCH 0612/2361] Split a test for index --- .../0_stateless/02995_index_1.reference | 15 +++++++ tests/queries/0_stateless/02995_index_1.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_10.reference | 13 ++++++ tests/queries/0_stateless/02995_index_10.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_2.reference | 13 ++++++ tests/queries/0_stateless/02995_index_2.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_3.reference | 16 +++++++ tests/queries/0_stateless/02995_index_3.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_4.reference | 16 +++++++ tests/queries/0_stateless/02995_index_4.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_5.reference | 9 ++++ tests/queries/0_stateless/02995_index_5.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_6.reference | 14 ++++++ tests/queries/0_stateless/02995_index_6.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_7.reference | 9 ++++ tests/queries/0_stateless/02995_index_7.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_8.reference | 12 +++++ tests/queries/0_stateless/02995_index_8.sh | 44 +++++++++++++++++++ .../0_stateless/02995_index_9.reference | 9 ++++ tests/queries/0_stateless/02995_index_9.sh | 44 +++++++++++++++++++ 20 files changed, 566 insertions(+) create mode 100644 tests/queries/0_stateless/02995_index_1.reference create mode 100755 tests/queries/0_stateless/02995_index_1.sh create mode 100644 tests/queries/0_stateless/02995_index_10.reference create mode 100755 tests/queries/0_stateless/02995_index_10.sh create mode 100644 tests/queries/0_stateless/02995_index_2.reference create mode 100755 tests/queries/0_stateless/02995_index_2.sh create mode 100644 tests/queries/0_stateless/02995_index_3.reference create mode 100755 tests/queries/0_stateless/02995_index_3.sh create mode 100644 tests/queries/0_stateless/02995_index_4.reference create mode 100755 tests/queries/0_stateless/02995_index_4.sh create mode 100644 tests/queries/0_stateless/02995_index_5.reference create mode 100755 tests/queries/0_stateless/02995_index_5.sh create mode 100644 tests/queries/0_stateless/02995_index_6.reference create mode 100755 tests/queries/0_stateless/02995_index_6.sh create mode 100644 tests/queries/0_stateless/02995_index_7.reference create mode 100755 tests/queries/0_stateless/02995_index_7.sh create mode 100644 tests/queries/0_stateless/02995_index_8.reference create mode 100755 tests/queries/0_stateless/02995_index_8.sh create mode 100644 tests/queries/0_stateless/02995_index_9.reference create mode 100755 tests/queries/0_stateless/02995_index_9.sh diff --git a/tests/queries/0_stateless/02995_index_1.reference b/tests/queries/0_stateless/02995_index_1.reference new file mode 100644 index 00000000000..6c3b1230db6 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_1.reference @@ -0,0 +1,15 @@ +12 4 21722 2209341 4 1415 2333 4 61 64 3 +21 1134 11363 58484 1106 1458 1592 136 26 62 32 +22 210 4504 5729 196 291 767 124 47 54 8 +26 196 1327684 5221 195 4140 5661 161 28 49 19 +28 5 2034378 7102 5 325 3255 2 53 60 4 +29 53 45041 45189 45 1580 211 31 55 84 18 +38 424 1600675 4653 424 562 5944 244 60 65 6 +45 17 62743 674873 17 6239 6494 17 65 76 8 +72 1862 1210073 6200 1677 2498 528 859 51 61 11 +79 2 2255228 2255293 2 5495 7057 2 65 65 1 +85 459 1051571 1829638 459 6402 7131 334 32 61 25 +86 10 1748130 1754217 10 4374 7003 10 56 59 4 +91 165 5718 5802 75 282 7113 112 41 63 22 +94 20 1231916 2050003 20 4802 4917 19 53 59 7 +99 2 3665 36667 2 497 697 2 70 71 2 diff --git a/tests/queries/0_stateless/02995_index_1.sh b/tests/queries/0_stateless/02995_index_1.sh new file mode 100755 index 00000000000..a5f1b30c2e8 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_1.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {1..100} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_10.reference b/tests/queries/0_stateless/02995_index_10.reference new file mode 100644 index 00000000000..bfa38d03801 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_10.reference @@ -0,0 +1,13 @@ +912 146 1322641 2238040 146 1366 6354 143 59 59 1 +913 82 5495 6870 78 350 565 67 24 43 15 +921 763 1580790 416881 763 6191 7131 509 63 64 2 +925 318 2500952 5025 309 476 6114 182 32 56 21 +931 12 4277 4809 12 238 256 9 63 83 9 +942 954 1331 2228193 952 1121 5047 788 65 70 6 +948 14 1785593 2600431 14 6550 6598 13 34 49 9 +956 5 5755 6023 5 359 411 5 43 48 4 +963 4 3812 3835 4 444 537 4 47 53 4 +978 5 51632 58212 5 1127 1556 5 24 32 5 +980 53 47201 59744 53 1537 1625 36 41 49 9 +987 6033 2020131 763444 4306 256 792 1832 60 64 5 +993 4 1615159 1718339 4 1570 3093 4 62 63 2 diff --git a/tests/queries/0_stateless/02995_index_10.sh b/tests/queries/0_stateless/02995_index_10.sh new file mode 100755 index 00000000000..d72c7c72705 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_10.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {901..1000} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_2.reference b/tests/queries/0_stateless/02995_index_2.reference new file mode 100644 index 00000000000..098292a289f --- /dev/null +++ b/tests/queries/0_stateless/02995_index_2.reference @@ -0,0 +1,13 @@ +103 1 2446615 2446615 1 2498 2498 1 58 58 1 +106 72 6149 6699 67 527 826 40 61 61 1 +111 43 2273186 5272 43 492 4923 4 54 72 15 +120 3129 45117 6735 2868 1030 1625 561 59 64 6 +138 2 49243 49374 2 1428 1519 2 47 48 2 +143 100 23321 63639 100 1115 1624 88 51 51 1 +145 1 2447976 2447976 1 6173 6173 1 44 44 1 +153 16 13748 16881 16 1506 1636 16 54 68 9 +159 19952 1525336 7131 12957 1280 6163 2668 24 66 39 +171 5 15042 16698 5 1302 1608 5 65 65 1 +179 6264 1362341 2686 6244 2554 7132 2705 61 67 7 +192 1 1639623 1639623 1 3406 3406 1 32 32 1 +193 1 1429969 1429969 1 7131 7131 1 45 45 1 diff --git a/tests/queries/0_stateless/02995_index_2.sh b/tests/queries/0_stateless/02995_index_2.sh new file mode 100755 index 00000000000..e7451c7ee4b --- /dev/null +++ b/tests/queries/0_stateless/02995_index_2.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {101..200} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_3.reference b/tests/queries/0_stateless/02995_index_3.reference new file mode 100644 index 00000000000..9c2fca9fde6 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_3.reference @@ -0,0 +1,16 @@ +207 12 23057 32500 12 1491 1726 12 32 46 7 +221 5081 1366870 6649 3432 4527 5226 687 24 69 39 +228 73 12281 17929 71 1328 2034 63 49 71 18 +229 2 1617478 1723791 2 4590 5578 2 41 42 2 +230 3916 1332729 6949 3668 1330 4703 845 62 65 4 +238 25 2624456 2625673 24 2535 6465 25 58 75 14 +241 154 2554929 2616444 154 2626 7131 148 34 57 17 +248 276 15529 30987 274 1040 1222 136 37 79 27 +254 3018 33966 6635 2837 1057 1622 539 24 60 33 +255 20 1581774 1811334 20 6068 6301 18 33 57 10 +256 5 5145 6841 5 367 376 5 58 58 1 +270 2 2195579 2262119 2 7102 7123 2 33 34 2 +281 32 2379460 616215 32 6042 6086 23 53 64 12 +282 7 1292651 24244 7 1607 2455 6 46 55 5 +286 123 1521935 5269 123 3793 3940 81 40 66 22 +291 21 2419080 3567 21 297 4731 21 54 55 2 diff --git a/tests/queries/0_stateless/02995_index_3.sh b/tests/queries/0_stateless/02995_index_3.sh new file mode 100755 index 00000000000..506429e2696 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_3.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {201..300} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_4.reference b/tests/queries/0_stateless/02995_index_4.reference new file mode 100644 index 00000000000..deff7afaed3 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_4.reference @@ -0,0 +1,16 @@ +316 4 5221 5616 4 505 558 4 32 35 3 +319 232 56480 63033 230 1599 313 50 33 64 26 +327 15 51647 51894 14 1292 1585 14 47 57 7 +332 24 23484 54948 24 1609 1726 16 32 49 11 +333 1 14189 14189 1 1550 1550 1 63 63 1 +342 49 2579220 2622432 49 4626 6933 48 34 54 14 +344 1 6486 6486 1 509 509 1 24 24 1 +346 1987 53016 6735 1823 1334 174 294 26 62 32 +358 45 59058 60844 44 6746 722 40 57 84 15 +363 1198 1260033 2568811 1196 5710 5790 82 55 80 26 +384 150 2361175 476024 150 7008 7123 81 38 64 22 +387 277 5200 6553 252 243 521 130 65 65 1 +392 1877 1607428 2030850 1875 1416 7131 1379 54 66 13 +396 8181 1380803 6186 7920 545 798 1743 24 67 39 +398 3 5183 5213 2 291 352 3 53 59 3 +399 62 51494 59203 61 7073 754 42 55 78 18 diff --git a/tests/queries/0_stateless/02995_index_4.sh b/tests/queries/0_stateless/02995_index_4.sh new file mode 100755 index 00000000000..1a0458728f9 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_4.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {301..400} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_5.reference b/tests/queries/0_stateless/02995_index_5.reference new file mode 100644 index 00000000000..c5ab4d2417c --- /dev/null +++ b/tests/queries/0_stateless/02995_index_5.reference @@ -0,0 +1,9 @@ +412 2141 1360120 2189792 2136 2491 5658 1371 71 75 5 +413 2 2036037 2064917 2 3963 4666 2 43 45 2 +431 33 2302331 2348449 33 4425 6516 32 69 69 1 +447 59 25125 33094 59 1176 1817 56 53 58 6 +456 1 53157 53157 1 1556 1556 1 26 26 1 +462 5 5456 6280 5 348 4337 5 28 40 5 +472 1 1443716 1443716 1 6122 6122 1 42 42 1 +491 34 1066102 1183673 34 6606 6822 32 46 67 15 +498 896 2230163 3054 895 537 7131 714 24 59 28 diff --git a/tests/queries/0_stateless/02995_index_5.sh b/tests/queries/0_stateless/02995_index_5.sh new file mode 100755 index 00000000000..60c12a8146d --- /dev/null +++ b/tests/queries/0_stateless/02995_index_5.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {401..500} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_6.reference b/tests/queries/0_stateless/02995_index_6.reference new file mode 100644 index 00000000000..bac19179bb6 --- /dev/null +++ b/tests/queries/0_stateless/02995_index_6.reference @@ -0,0 +1,14 @@ +504 108 12281 25180 108 1318 1784 94 55 66 12 +515 22 1588883 2640809 22 6554 6571 15 46 59 12 +518 1 37743 37743 1 1558 1558 1 72 72 1 +530 1 3033 3033 1 561 561 1 59 59 1 +532 26 5721 6355 25 549 665 14 44 50 7 +546 156 2577874 48517 156 1105 324 133 44 51 8 +554 12 1665194 2640066 12 1817 2951 12 57 57 1 +564 3865 2028049 2083433 3722 1115 985 2203 44 84 41 +566 4432 50605 57509 3217 1191 267 459 26 72 39 +567 8 5221 5893 7 333 558 8 27 35 4 +582 1172 1320619 2019743 1172 5819 7131 757 26 63 30 +584 43100 2500 5594 22561 134 4573 1660 48 84 37 +589 28 6046 6068 19 345 564 27 55 62 8 +595 139 1585165 1683606 138 2231 3598 132 54 84 28 diff --git a/tests/queries/0_stateless/02995_index_6.sh b/tests/queries/0_stateless/02995_index_6.sh new file mode 100755 index 00000000000..4936f73f36b --- /dev/null +++ b/tests/queries/0_stateless/02995_index_6.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {501..600} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_7.reference b/tests/queries/0_stateless/02995_index_7.reference new file mode 100644 index 00000000000..4f1d28ab37e --- /dev/null +++ b/tests/queries/0_stateless/02995_index_7.reference @@ -0,0 +1,9 @@ +615 3 1056081 1116230 3 5794 5796 2 59 62 3 +619 7 1543114 5241 7 2442 3105 7 41 45 3 +634 2722 1221058 4999 2686 2426 7131 1735 54 60 7 +635 237 2119333 4667 237 561 5999 176 49 60 12 +644 5 1774169 2056171 5 5591 6091 4 33 39 3 +647 8 51632 64403 8 1457 1624 8 26 34 5 +651 1325 1620565 6281 1301 528 792 815 62 63 2 +665 13 4598 4789 13 511 558 11 39 46 7 +679 1560 1613200 25940 1552 1569 3118 781 49 84 35 diff --git a/tests/queries/0_stateless/02995_index_7.sh b/tests/queries/0_stateless/02995_index_7.sh new file mode 100755 index 00000000000..26be310abce --- /dev/null +++ b/tests/queries/0_stateless/02995_index_7.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {601..700} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_8.reference b/tests/queries/0_stateless/02995_index_8.reference new file mode 100644 index 00000000000..62fbfb2af9a --- /dev/null +++ b/tests/queries/0_stateless/02995_index_8.reference @@ -0,0 +1,12 @@ +704 2 14226 15594 2 1086 1116 2 65 71 2 +715 25 1199352 3490 25 5036 5112 23 34 55 13 +716 1253 61989 6735 1050 1203 1625 397 52 65 14 +730 2584 5560 6170 634 2421 627 293 56 69 14 +736 8 1433153 4941 8 339 4594 8 28 36 5 +749 2 1326176 1339862 2 4339 6213 2 49 50 2 +753 1 53157 53157 1 1556 1556 1 26 26 1 +761 63 1443230 6881 63 3154 3204 26 56 73 14 +762 49 1449596 1968154 49 2437 3753 48 54 62 9 +775 35107 5330 769436 2471 447 6607 656 70 81 12 +789 1 1552458 1552458 1 2441 2441 1 62 62 1 +794 158 5585 6585 155 495 929 67 24 50 20 diff --git a/tests/queries/0_stateless/02995_index_8.sh b/tests/queries/0_stateless/02995_index_8.sh new file mode 100755 index 00000000000..8c2620b59fd --- /dev/null +++ b/tests/queries/0_stateless/02995_index_8.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {701..800} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_9.reference b/tests/queries/0_stateless/02995_index_9.reference new file mode 100644 index 00000000000..86c7be173bd --- /dev/null +++ b/tests/queries/0_stateless/02995_index_9.reference @@ -0,0 +1,9 @@ +839 9 29223 46530 9 1336 1465 9 52 52 1 +844 5 2377545 2377635 5 5129 6321 5 53 69 5 +846 50 2172273 2589295 50 1582 3053 48 64 68 5 +847 2577 56656 63658 1582 1444 838 474 26 63 33 +861 1333 5570 6909 839 457 489 37 33 70 34 +873 2360 1519811 50487 2248 1310 1784 316 60 68 9 +879 228 6704 6785 79 279 507 121 35 66 24 +889 5130 2070007 39692 5040 1151 6791 2606 44 66 23 +896 4 511246 859452 4 6554 6561 4 67 71 4 diff --git a/tests/queries/0_stateless/02995_index_9.sh b/tests/queries/0_stateless/02995_index_9.sh new file mode 100755 index 00000000000..76160c62aaa --- /dev/null +++ b/tests/queries/0_stateless/02995_index_9.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --multiquery " + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +INSERT INTO test +SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) +FROM numbers(100000); + +DETACH TABLE test; +ATTACH TABLE test; +" + +for i in {801..900} +do + echo " +WITH ${i} AS try +SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test +WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String + AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String + AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String + AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String + AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String + AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String +HAVING count() > 0; +" +done | ${CLICKHOUSE_CLIENT} --multiquery + +${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" From 70d6320577a483d6011872a5f642927c27d095c1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 06:08:16 +0200 Subject: [PATCH 0613/2361] Split a test for index --- .../queries/0_stateless/02995_index.reference | 126 ------------------ tests/queries/0_stateless/02995_index.sh | 44 ------ 2 files changed, 170 deletions(-) delete mode 100644 tests/queries/0_stateless/02995_index.reference delete mode 100755 tests/queries/0_stateless/02995_index.sh diff --git a/tests/queries/0_stateless/02995_index.reference b/tests/queries/0_stateless/02995_index.reference deleted file mode 100644 index 1e8639caa88..00000000000 --- a/tests/queries/0_stateless/02995_index.reference +++ /dev/null @@ -1,126 +0,0 @@ -12 4 21722 2209341 4 1415 2333 4 61 64 3 -21 1134 11363 58484 1106 1458 1592 136 26 62 32 -22 210 4504 5729 196 291 767 124 47 54 8 -26 196 1327684 5221 195 4140 5661 161 28 49 19 -28 5 2034378 7102 5 325 3255 2 53 60 4 -29 53 45041 45189 45 1580 211 31 55 84 18 -38 424 1600675 4653 424 562 5944 244 60 65 6 -45 17 62743 674873 17 6239 6494 17 65 76 8 -72 1862 1210073 6200 1677 2498 528 859 51 61 11 -79 2 2255228 2255293 2 5495 7057 2 65 65 1 -85 459 1051571 1829638 459 6402 7131 334 32 61 25 -86 10 1748130 1754217 10 4374 7003 10 56 59 4 -91 165 5718 5802 75 282 7113 112 41 63 22 -94 20 1231916 2050003 20 4802 4917 19 53 59 7 -99 2 3665 36667 2 497 697 2 70 71 2 -103 1 2446615 2446615 1 2498 2498 1 58 58 1 -106 72 6149 6699 67 527 826 40 61 61 1 -111 43 2273186 5272 43 492 4923 4 54 72 15 -120 3129 45117 6735 2868 1030 1625 561 59 64 6 -138 2 49243 49374 2 1428 1519 2 47 48 2 -143 100 23321 63639 100 1115 1624 88 51 51 1 -145 1 2447976 2447976 1 6173 6173 1 44 44 1 -153 16 13748 16881 16 1506 1636 16 54 68 9 -159 19952 1525336 7131 12957 1280 6163 2668 24 66 39 -171 5 15042 16698 5 1302 1608 5 65 65 1 -179 6264 1362341 2686 6244 2554 7132 2705 61 67 7 -192 1 1639623 1639623 1 3406 3406 1 32 32 1 -193 1 1429969 1429969 1 7131 7131 1 45 45 1 -207 12 23057 32500 12 1491 1726 12 32 46 7 -221 5081 1366870 6649 3432 4527 5226 687 24 69 39 -228 73 12281 17929 71 1328 2034 63 49 71 18 -229 2 1617478 1723791 2 4590 5578 2 41 42 2 -230 3916 1332729 6949 3668 1330 4703 845 62 65 4 -238 25 2624456 2625673 24 2535 6465 25 58 75 14 -241 154 2554929 2616444 154 2626 7131 148 34 57 17 -248 276 15529 30987 274 1040 1222 136 37 79 27 -254 3018 33966 6635 2837 1057 1622 539 24 60 33 -255 20 1581774 1811334 20 6068 6301 18 33 57 10 -256 5 5145 6841 5 367 376 5 58 58 1 -270 2 2195579 2262119 2 7102 7123 2 33 34 2 -281 32 2379460 616215 32 6042 6086 23 53 64 12 -282 7 1292651 24244 7 1607 2455 6 46 55 5 -286 123 1521935 5269 123 3793 3940 81 40 66 22 -291 21 2419080 3567 21 297 4731 21 54 55 2 -316 4 5221 5616 4 505 558 4 32 35 3 -319 232 56480 63033 230 1599 313 50 33 64 26 -327 15 51647 51894 14 1292 1585 14 47 57 7 -332 24 23484 54948 24 1609 1726 16 32 49 11 -333 1 14189 14189 1 1550 1550 1 63 63 1 -342 49 2579220 2622432 49 4626 6933 48 34 54 14 -344 1 6486 6486 1 509 509 1 24 24 1 -346 1987 53016 6735 1823 1334 174 294 26 62 32 -358 45 59058 60844 44 6746 722 40 57 84 15 -363 1198 1260033 2568811 1196 5710 5790 82 55 80 26 -384 150 2361175 476024 150 7008 7123 81 38 64 22 -387 277 5200 6553 252 243 521 130 65 65 1 -392 1877 1607428 2030850 1875 1416 7131 1379 54 66 13 -396 8181 1380803 6186 7920 545 798 1743 24 67 39 -398 3 5183 5213 2 291 352 3 53 59 3 -399 62 51494 59203 61 7073 754 42 55 78 18 -412 2141 1360120 2189792 2136 2491 5658 1371 71 75 5 -413 2 2036037 2064917 2 3963 4666 2 43 45 2 -431 33 2302331 2348449 33 4425 6516 32 69 69 1 -447 59 25125 33094 59 1176 1817 56 53 58 6 -456 1 53157 53157 1 1556 1556 1 26 26 1 -462 5 5456 6280 5 348 4337 5 28 40 5 -472 1 1443716 1443716 1 6122 6122 1 42 42 1 -491 34 1066102 1183673 34 6606 6822 32 46 67 15 -498 896 2230163 3054 895 537 7131 714 24 59 28 -504 108 12281 25180 108 1318 1784 94 55 66 12 -515 22 1588883 2640809 22 6554 6571 15 46 59 12 -518 1 37743 37743 1 1558 1558 1 72 72 1 -530 1 3033 3033 1 561 561 1 59 59 1 -532 26 5721 6355 25 549 665 14 44 50 7 -546 156 2577874 48517 156 1105 324 133 44 51 8 -554 12 1665194 2640066 12 1817 2951 12 57 57 1 -564 3865 2028049 2083433 3722 1115 985 2203 44 84 41 -566 4432 50605 57509 3217 1191 267 459 26 72 39 -567 8 5221 5893 7 333 558 8 27 35 4 -582 1172 1320619 2019743 1172 5819 7131 757 26 63 30 -584 43100 2500 5594 22561 134 4573 1660 48 84 37 -589 28 6046 6068 19 345 564 27 55 62 8 -595 139 1585165 1683606 138 2231 3598 132 54 84 28 -615 3 1056081 1116230 3 5794 5796 2 59 62 3 -619 7 1543114 5241 7 2442 3105 7 41 45 3 -634 2722 1221058 4999 2686 2426 7131 1735 54 60 7 -635 237 2119333 4667 237 561 5999 176 49 60 12 -644 5 1774169 2056171 5 5591 6091 4 33 39 3 -647 8 51632 64403 8 1457 1624 8 26 34 5 -651 1325 1620565 6281 1301 528 792 815 62 63 2 -665 13 4598 4789 13 511 558 11 39 46 7 -679 1560 1613200 25940 1552 1569 3118 781 49 84 35 -704 2 14226 15594 2 1086 1116 2 65 71 2 -715 25 1199352 3490 25 5036 5112 23 34 55 13 -716 1253 61989 6735 1050 1203 1625 397 52 65 14 -730 2584 5560 6170 634 2421 627 293 56 69 14 -736 8 1433153 4941 8 339 4594 8 28 36 5 -749 2 1326176 1339862 2 4339 6213 2 49 50 2 -753 1 53157 53157 1 1556 1556 1 26 26 1 -761 63 1443230 6881 63 3154 3204 26 56 73 14 -762 49 1449596 1968154 49 2437 3753 48 54 62 9 -775 35107 5330 769436 2471 447 6607 656 70 81 12 -789 1 1552458 1552458 1 2441 2441 1 62 62 1 -794 158 5585 6585 155 495 929 67 24 50 20 -839 9 29223 46530 9 1336 1465 9 52 52 1 -844 5 2377545 2377635 5 5129 6321 5 53 69 5 -846 50 2172273 2589295 50 1582 3053 48 64 68 5 -847 2577 56656 63658 1582 1444 838 474 26 63 33 -861 1333 5570 6909 839 457 489 37 33 70 34 -873 2360 1519811 50487 2248 1310 1784 316 60 68 9 -879 228 6704 6785 79 279 507 121 35 66 24 -889 5130 2070007 39692 5040 1151 6791 2606 44 66 23 -896 4 511246 859452 4 6554 6561 4 67 71 4 -912 146 1322641 2238040 146 1366 6354 143 59 59 1 -913 82 5495 6870 78 350 565 67 24 43 15 -921 763 1580790 416881 763 6191 7131 509 63 64 2 -925 318 2500952 5025 309 476 6114 182 32 56 21 -931 12 4277 4809 12 238 256 9 63 83 9 -942 954 1331 2228193 952 1121 5047 788 65 70 6 -948 14 1785593 2600431 14 6550 6598 13 34 49 9 -956 5 5755 6023 5 359 411 5 43 48 4 -963 4 3812 3835 4 444 537 4 47 53 4 -978 5 51632 58212 5 1127 1556 5 24 32 5 -980 53 47201 59744 53 1537 1625 36 41 49 9 -987 6033 2020131 763444 4306 256 792 1832 60 64 5 -993 4 1615159 1718339 4 1570 3093 4 62 63 2 diff --git a/tests/queries/0_stateless/02995_index.sh b/tests/queries/0_stateless/02995_index.sh deleted file mode 100755 index 5125d03904e..00000000000 --- a/tests/queries/0_stateless/02995_index.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - -${CLICKHOUSE_CLIENT} --multiquery " - -DROP TABLE IF EXISTS test; -CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; - -INSERT INTO test -SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10)) -FROM numbers(100000); - -INSERT INTO test -SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10)) -FROM numbers(100000); - -INSERT INTO test -SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10)) -FROM numbers(100000); - -DETACH TABLE test; -ATTACH TABLE test; -" - -for i in {1..1000} -do - echo " -WITH ${i} AS try -SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test -WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String - AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String - AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String - AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String - AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String - AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String -HAVING count() > 0; -" -done | ${CLICKHOUSE_CLIENT} --multiquery - -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" From 1b606e525a9942ef69b0210760380170291d4f1b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 06:34:10 +0200 Subject: [PATCH 0614/2361] Fix inconsistent formatting of `NOT ((SELECT ...))` --- src/Parsers/ASTFunction.cpp | 12 ++++++++---- ...inconsistent_formatting_of_not_subquery.reference | 1 + .../03208_inconsistent_formatting_of_not_subquery.sh | 9 +++++++++ 3 files changed, 18 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.reference create mode 100755 tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.sh diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index b04ec1c22b2..230d4c778e8 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -329,19 +329,23 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format const auto * literal = arguments->children[0]->as(); const auto * function = arguments->children[0]->as(); + const auto * subquery = arguments->children[0]->as(); bool is_tuple = literal && literal->value.getType() == Field::Types::Tuple; - // do not add parentheses for tuple literal, otherwise extra parens will be added `-((3, 7, 3), 1)` -> `-(((3, 7, 3), 1))` + /// Do not add parentheses for tuple literal, otherwise extra parens will be added `-((3, 7, 3), 1)` -> `-(((3, 7, 3), 1))` bool literal_need_parens = literal && !is_tuple; - // negate always requires parentheses, otherwise -(-1) will be printed as --1 - bool inside_parens = name == "negate" && (literal_need_parens || (function && function->name == "negate")); + /// Negate always requires parentheses, otherwise -(-1) will be printed as --1 + /// Also extra parentheses are needed for subqueries, because NOT can be parsed as a function: + /// not(SELECT 1) cannot be parsed, while not((SELECT 1)) can. + bool inside_parens = (name == "negate" && (literal_need_parens || (function && function->name == "negate"))) + || (subquery && name == "not"); /// We DO need parentheses around a single literal /// For example, SELECT (NOT 0) + (NOT 0) cannot be transformed into SELECT NOT 0 + NOT 0, since /// this is equal to SELECT NOT (0 + NOT 0) bool outside_parens = frame.need_parens && !inside_parens; - // do not add extra parentheses for functions inside negate, i.e. -(-toUInt64(-(1))) + /// Do not add extra parentheses for functions inside negate, i.e. -(-toUInt64(-(1))) if (inside_parens) nested_need_parens.need_parens = false; diff --git a/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.reference b/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.reference new file mode 100644 index 00000000000..a1afeb1ab82 --- /dev/null +++ b/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.reference @@ -0,0 +1 @@ +SELECT NOT ((SELECT 1)) diff --git a/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.sh b/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.sh new file mode 100755 index 00000000000..5f7397015fa --- /dev/null +++ b/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +# Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so the grpc library is not built + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_FORMAT --oneline --query "SELECT NOT((SELECT 1))" From efb6491cfdc10c2b8ba6268c29677ea298046bed Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 06:34:57 +0200 Subject: [PATCH 0615/2361] Update 03208_inconsistent_formatting_of_not_subquery.sh --- .../03208_inconsistent_formatting_of_not_subquery.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.sh b/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.sh index 5f7397015fa..594d316b621 100755 --- a/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.sh +++ b/tests/queries/0_stateless/03208_inconsistent_formatting_of_not_subquery.sh @@ -1,6 +1,4 @@ #!/usr/bin/env bash -# Tags: no-fasttest -# Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so the grpc library is not built CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 989476d5234bffd223988bf8aa88e2021e999574 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 06:41:52 +0200 Subject: [PATCH 0616/2361] Make test `01592_long_window_functions1` lighter --- .../0_stateless/01592_long_window_functions1.sql | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/01592_long_window_functions1.sql b/tests/queries/0_stateless/01592_long_window_functions1.sql index d2d32e24eaa..671245599cc 100644 --- a/tests/queries/0_stateless/01592_long_window_functions1.sql +++ b/tests/queries/0_stateless/01592_long_window_functions1.sql @@ -8,14 +8,14 @@ drop table if exists stack; set max_insert_threads = 4; create table stack(item_id Int64, brand_id Int64, rack_id Int64, dt DateTime, expiration_dt DateTime, quantity UInt64) -Engine = MergeTree -partition by toYYYYMM(dt) +Engine = MergeTree +partition by toYYYYMM(dt) order by (brand_id, toStartOfHour(dt)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -insert into stack -select number%99991, number%11, number%1111, toDateTime('2020-01-01 00:00:00')+number/100, +insert into stack +select number%99991, number%11, number%1111, toDateTime('2020-01-01 00:00:00')+number/100, toDateTime('2020-02-01 00:00:00')+number/10, intDiv(number,100)+1 -from numbers_mt(10000000); +from numbers_mt(1000000); select '---- arrays ----'; @@ -32,8 +32,8 @@ select '---- window f ----'; select cityHash64( toString( groupArray (tuple(*) ) )) from ( select brand_id, rack_id, quantity from ( select brand_id, rack_id, quantity, row_number() over (partition by brand_id, rack_id order by quantity) rn - from stack ) as t0 - where rn <= 2 + from stack ) as t0 + where rn <= 2 order by brand_id, rack_id, quantity ) t; From 8c264230e30bf97f6bac999401cd594b31e09977 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 07:20:33 +0200 Subject: [PATCH 0617/2361] Loosen the limit --- docker/test/stateful/run.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 857385f4715..72a8f31ab71 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -118,8 +118,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] clickhouse-client --query "CREATE TABLE test.hits AS datasets.hits_v1" clickhouse-client --query "CREATE TABLE test.visits AS datasets.visits_v1" - clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1" - clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1" clickhouse-client --query "DROP TABLE datasets.hits_v1" clickhouse-client --query "DROP TABLE datasets.visits_v1" @@ -191,16 +191,16 @@ else ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" - clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC" clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC" else clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" fi - clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0" + clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0" fi clickhouse-client --query "SHOW TABLES FROM test" From db549c93a18f49540676ae53bc04e75b85705ddb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 07:34:34 +0200 Subject: [PATCH 0618/2361] Fix error --- src/IO/ReadWriteBufferFromHTTP.cpp | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index 4d27a78c8dc..cea1a272401 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -123,7 +123,16 @@ void ReadWriteBufferFromHTTP::prepareRequest(Poco::Net::HTTPRequest & request, s std::optional ReadWriteBufferFromHTTP::tryGetFileSize() { if (!file_info) - file_info = getFileInfo(); + { + try + { + file_info = getFileInfo(); + } + catch (const HTTPException & e) + { + return std::nullopt; + } + } return file_info->file_size; } @@ -679,7 +688,7 @@ std::optional ReadWriteBufferFromHTTP::tryGetLastModificationTime() { file_info = getFileInfo(); } - catch (...) + catch (const HTTPException & e) { return std::nullopt; } @@ -700,7 +709,7 @@ ReadWriteBufferFromHTTP::HTTPFileInfo ReadWriteBufferFromHTTP::getFileInfo() { getHeadResponse(response); } - catch (HTTPException & e) + catch (const HTTPException & e) { /// Maybe the web server doesn't support HEAD requests. /// E.g. webhdfs reports status 400. From 32f624eebaa560f4c9d6bf9145931270098e8db1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 07:35:10 +0200 Subject: [PATCH 0619/2361] Fix error --- src/IO/ReadWriteBufferFromHTTP.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index cea1a272401..961e8dd6425 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -128,7 +128,7 @@ std::optional ReadWriteBufferFromHTTP::tryGetFileSize() { file_info = getFileInfo(); } - catch (const HTTPException & e) + catch (const HTTPException &) { return std::nullopt; } From dc601dc7455895574143f5baf345731d437bf8d3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 07:37:15 +0200 Subject: [PATCH 0620/2361] Fix error --- src/IO/ReadWriteBufferFromHTTP.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index 961e8dd6425..85230957b3f 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -688,7 +688,7 @@ std::optional ReadWriteBufferFromHTTP::tryGetLastModificationTime() { file_info = getFileInfo(); } - catch (const HTTPException & e) + catch (const HTTPException &) { return std::nullopt; } From de4a97a9a732945c3b3df4cc903a1dea30fffc5a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 07:41:15 +0200 Subject: [PATCH 0621/2361] Remove support for -WithDictionary suffix for data types --- src/DataTypes/DataTypeFactory.cpp | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index af37cde2846..eb3bc973857 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -125,23 +125,6 @@ DataTypePtr DataTypeFactory::getImpl(const String & family_name_param, const AST { String family_name = getAliasToOrName(family_name_param); - if (endsWith(family_name, "WithDictionary")) - { - ASTPtr low_cardinality_params = std::make_shared(); - String param_name = family_name.substr(0, family_name.size() - strlen("WithDictionary")); - if (parameters) - { - auto func = std::make_shared(); - func->name = param_name; - func->arguments = parameters; - low_cardinality_params->children.push_back(func); - } - else - low_cardinality_params->children.push_back(std::make_shared(param_name)); - - return getImpl("LowCardinality", low_cardinality_params); - } - const auto * creator = findCreatorByName(family_name); if constexpr (nullptr_on_error) { From f0f3bcfee972ba28928a9f33afd374d369d9babf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 07:45:57 +0200 Subject: [PATCH 0622/2361] Update tests --- ...cardinality_dictionary_deserialization.sql | 2 +- .../0_stateless/00688_low_cardinality_in.sql | 2 +- .../00688_low_cardinality_prewhere.sql | 2 +- .../00688_low_cardinality_serialization.sql | 4 +-- .../00688_low_cardinality_syntax.reference | 6 ----- .../00688_low_cardinality_syntax.sql | 26 +------------------ .../00717_low_cardinaliry_group_by.sql | 2 +- .../00718_low_cardinaliry_alter.sql | 2 +- .../00752_low_cardinality_mv_1.sql | 3 +-- .../02235_remote_fs_cache_stress.sh | 2 +- 10 files changed, 10 insertions(+), 41 deletions(-) diff --git a/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql b/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql index c4613acf5f3..d359efd8d42 100644 --- a/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql +++ b/tests/queries/0_stateless/00688_low_cardinality_dictionary_deserialization.sql @@ -1,5 +1,5 @@ drop table if exists lc_dict_reading; -create table lc_dict_reading (val UInt64, str StringWithDictionary, pat String) engine = MergeTree order by val SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +create table lc_dict_reading (val UInt64, str LowCardinality(String), pat String) engine = MergeTree order by val SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; insert into lc_dict_reading select number, if(number < 8192 * 4, number % 100, number) as s, s from system.numbers limit 1000000; select sum(toUInt64(str)), sum(toUInt64(pat)) from lc_dict_reading where val < 8129 or val > 8192 * 4; drop table if exists lc_dict_reading; diff --git a/tests/queries/0_stateless/00688_low_cardinality_in.sql b/tests/queries/0_stateless/00688_low_cardinality_in.sql index cb57fad51a4..c39fdb37160 100644 --- a/tests/queries/0_stateless/00688_low_cardinality_in.sql +++ b/tests/queries/0_stateless/00688_low_cardinality_in.sql @@ -1,6 +1,6 @@ set allow_suspicious_low_cardinality_types = 1; drop table if exists lc_00688; -create table lc_00688 (str StringWithDictionary, val UInt8WithDictionary) engine = MergeTree order by tuple(); +create table lc_00688 (str LowCardinality(String), val LowCardinality(UInt8)) engine = MergeTree order by tuple(); insert into lc_00688 values ('a', 1), ('b', 2); select str, str in ('a', 'd') from lc_00688; select val, val in (1, 3) from lc_00688; diff --git a/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql b/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql index a15b2540fe6..17c74b7ca05 100644 --- a/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql +++ b/tests/queries/0_stateless/00688_low_cardinality_prewhere.sql @@ -1,5 +1,5 @@ drop table if exists lc_prewhere; -create table lc_prewhere (key UInt64, val UInt64, str StringWithDictionary, s String) engine = MergeTree order by key settings index_granularity = 8192; +create table lc_prewhere (key UInt64, val UInt64, str LowCardinality(String), s String) engine = MergeTree order by key settings index_granularity = 8192; insert into lc_prewhere select number, if(number < 10 or number > 8192 * 9, 1, 0), toString(number) as s, s from system.numbers limit 100000; select sum(toUInt64(str)), sum(toUInt64(s)) from lc_prewhere prewhere val == 1; drop table if exists lc_prewhere; diff --git a/tests/queries/0_stateless/00688_low_cardinality_serialization.sql b/tests/queries/0_stateless/00688_low_cardinality_serialization.sql index b4fe4b29200..1e4de3f3d3e 100644 --- a/tests/queries/0_stateless/00688_low_cardinality_serialization.sql +++ b/tests/queries/0_stateless/00688_low_cardinality_serialization.sql @@ -8,8 +8,8 @@ select 'MergeTree'; drop table if exists lc_small_dict; drop table if exists lc_big_dict; -create table lc_small_dict (str StringWithDictionary) engine = MergeTree order by str SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -create table lc_big_dict (str StringWithDictionary) engine = MergeTree order by str SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +create table lc_small_dict (str LowCardinality(String)) engine = MergeTree order by str SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +create table lc_big_dict (str LowCardinality(String)) engine = MergeTree order by str SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; insert into lc_small_dict select toString(number % 1000) from system.numbers limit 1000000; insert into lc_big_dict select toString(number) from system.numbers limit 1000000; diff --git a/tests/queries/0_stateless/00688_low_cardinality_syntax.reference b/tests/queries/0_stateless/00688_low_cardinality_syntax.reference index ca27069a7df..b06beb0d6f0 100644 --- a/tests/queries/0_stateless/00688_low_cardinality_syntax.reference +++ b/tests/queries/0_stateless/00688_low_cardinality_syntax.reference @@ -1,13 +1,7 @@ a a -a -a 1 1 -1 -1 -ab -ab ab ab - diff --git a/tests/queries/0_stateless/00688_low_cardinality_syntax.sql b/tests/queries/0_stateless/00688_low_cardinality_syntax.sql index a11d9e2d9fe..dccdac1d95d 100644 --- a/tests/queries/0_stateless/00688_low_cardinality_syntax.sql +++ b/tests/queries/0_stateless/00688_low_cardinality_syntax.sql @@ -13,56 +13,32 @@ drop table if exists lc_null_fix_str_0; drop table if exists lc_null_fix_str_1; create table lc_str_0 (str LowCardinality(String)) engine = Memory; -create table lc_str_1 (str StringWithDictionary) engine = Memory; create table lc_null_str_0 (str LowCardinality(Nullable(String))) engine = Memory; -create table lc_null_str_1 (str NullableWithDictionary(String)) engine = Memory; create table lc_int8_0 (val LowCardinality(Int8)) engine = Memory; -create table lc_int8_1 (val Int8WithDictionary) engine = Memory; create table lc_null_int8_0 (val LowCardinality(Nullable(Int8))) engine = Memory; -create table lc_null_int8_1 (val NullableWithDictionary(Int8)) engine = Memory; create table lc_fix_str_0 (str LowCardinality(FixedString(2))) engine = Memory; -create table lc_fix_str_1 (str FixedStringWithDictionary(2)) engine = Memory; create table lc_null_fix_str_0 (str LowCardinality(Nullable(FixedString(2)))) engine = Memory; -create table lc_null_fix_str_1 (str NullableWithDictionary(FixedString(2))) engine = Memory; insert into lc_str_0 select 'a'; -insert into lc_str_1 select 'a'; insert into lc_null_str_0 select 'a'; -insert into lc_null_str_1 select 'a'; insert into lc_int8_0 select 1; -insert into lc_int8_1 select 1; insert into lc_null_int8_0 select 1; -insert into lc_null_int8_1 select 1; insert into lc_fix_str_0 select 'ab'; -insert into lc_fix_str_1 select 'ab'; insert into lc_null_fix_str_0 select 'ab'; -insert into lc_null_fix_str_1 select 'ab'; select str from lc_str_0; -select str from lc_str_1; select str from lc_null_str_0; -select str from lc_null_str_1; select val from lc_int8_0; -select val from lc_int8_1; select val from lc_null_int8_0; -select val from lc_null_int8_1; select str from lc_fix_str_0; -select str from lc_fix_str_1; select str from lc_null_fix_str_0; -select str from lc_null_fix_str_1; drop table if exists lc_str_0; -drop table if exists lc_str_1; drop table if exists lc_null_str_0; -drop table if exists lc_null_str_1; drop table if exists lc_int8_0; -drop table if exists lc_int8_1; drop table if exists lc_null_int8_0; -drop table if exists lc_null_int8_1; drop table if exists lc_fix_str_0; -drop table if exists lc_fix_str_1; drop table if exists lc_null_fix_str_0; -drop table if exists lc_null_fix_str_1; select '-'; SELECT toLowCardinality('a') AS s, toTypeName(s), toTypeName(length(s)) from system.one; @@ -73,7 +49,7 @@ select (toLowCardinality(z) as val) || 'b' from (select arrayJoin(['c', 'd']) a select '-'; drop table if exists lc_str_uuid; -create table lc_str_uuid(str1 String, str2 LowCardinality(String), str3 StringWithDictionary) ENGINE=Memory; +create table lc_str_uuid(str1 String, str2 LowCardinality(String), str3 LowCardinality(String)) ENGINE=Memory; select toUUID(str1), toUUID(str2), toUUID(str3) from lc_str_uuid; select toUUID(str1, '', NULL), toUUID(str2, '', NULL), toUUID(str3, '', NULL) from lc_str_uuid; insert into lc_str_uuid values ('61f0c404-5cb3-11e7-907b-a6006ad3dba0', '61f0c404-5cb3-11e7-907b-a6006ad3dba0', '61f0c404-5cb3-11e7-907b-a6006ad3dba0'); diff --git a/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql b/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql index 02915d4e611..3115ab508fe 100644 --- a/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql +++ b/tests/queries/0_stateless/00717_low_cardinaliry_group_by.sql @@ -1,5 +1,5 @@ drop table if exists tab_00717; -create table tab_00717 (a String, b StringWithDictionary) engine = MergeTree order by a; +create table tab_00717 (a String, b LowCardinality(String)) engine = MergeTree order by a; insert into tab_00717 values ('a_1', 'b_1'), ('a_2', 'b_2'); select count() from tab_00717; select a from tab_00717 group by a order by a; diff --git a/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql b/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql index 591ff952132..524e396bcb1 100644 --- a/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql +++ b/tests/queries/0_stateless/00718_low_cardinaliry_alter.sql @@ -7,7 +7,7 @@ alter table tab_00718 modify column b UInt32; select *, toTypeName(b) from tab_00718; alter table tab_00718 modify column b LowCardinality(UInt32); select *, toTypeName(b) from tab_00718; -alter table tab_00718 modify column b StringWithDictionary; +alter table tab_00718 modify column b LowCardinality(String); select *, toTypeName(b) from tab_00718; alter table tab_00718 modify column b LowCardinality(UInt32); select *, toTypeName(b) from tab_00718; diff --git a/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql b/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql index 60cc30ec2c3..4fdd0a2496e 100644 --- a/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql +++ b/tests/queries/0_stateless/00752_low_cardinality_mv_1.sql @@ -1,7 +1,7 @@ drop table if exists lc_00752; drop table if exists lc_mv_00752; -create table lc_00752 (str StringWithDictionary) engine = MergeTree order by tuple(); +create table lc_00752 (str LowCardinality(String)) engine = MergeTree order by tuple(); insert into lc_00752 values ('a'), ('bbb'), ('ab'), ('accccc'), ('baasddas'), ('bcde'); @@ -12,4 +12,3 @@ select * from lc_mv_00752 order by letter; drop table if exists lc_00752; drop table if exists lc_mv_00752; - diff --git a/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh b/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh index 0b6b9f461b0..ffc38c0c1bd 100755 --- a/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh +++ b/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh @@ -28,7 +28,7 @@ ORDER BY tuple(); INSERT INTO t_01411_num (num) SELECT number % 1000 FROM numbers(100000); -create table lc_dict_reading (val UInt64, str StringWithDictionary, pat String) engine = MergeTree order by val; +create table lc_dict_reading (val UInt64, str LowCardinality(String), pat String) engine = MergeTree order by val; insert into lc_dict_reading select number, if(number < 8192 * 4, number % 100, number) as s, s from system.numbers limit 100000; """ From 049056e55aa974ecad8786fb1ec738c96a191118 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 22 Jul 2024 05:48:40 +0000 Subject: [PATCH 0623/2361] Fix style --- src/Functions/dateDiff.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Functions/dateDiff.cpp b/src/Functions/dateDiff.cpp index a39cbae4e30..faab42817ba 100644 --- a/src/Functions/dateDiff.cpp +++ b/src/Functions/dateDiff.cpp @@ -26,8 +26,6 @@ namespace DB namespace ErrorCodes { - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_COLUMN; extern const int BAD_ARGUMENTS; } From a564f70b66367ee4363d46e45eb5a9c66f131fec Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 08:09:39 +0200 Subject: [PATCH 0624/2361] Fix error --- src/Parsers/ParserAlterQuery.cpp | 2 -- src/Parsers/ParserCreateIndexQuery.cpp | 4 ++-- src/Parsers/ParserCreateQuery.cpp | 4 ++-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index 28dbf781011..dbefb0cb966 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -9,8 +9,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index 2fa34696c58..ddefb3d37fb 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -21,7 +21,7 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected ParserToken close_p(TokenType::ClosingRoundBracket); ParserOrderByExpressionList order_list_p; - ParserDataType data_type_p; + ParserFunction type_p; ParserExpression expression_p; ParserUnsignedInteger granularity_p; @@ -68,7 +68,7 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected if (s_type.ignore(pos, expected)) { - if (!data_type_p.parse(pos, type, expected)) + if (!type_p.parse(pos, type, expected)) return false; } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index fa232954cd6..3dba58546af 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -179,7 +179,7 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ParserKeyword s_granularity(Keyword::GRANULARITY); ParserIdentifier name_p; - ParserDataType data_type_p; + ParserFunction type_p; ParserExpression expression_p; ParserUnsignedInteger granularity_p; @@ -197,7 +197,7 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!s_type.ignore(pos, expected)) return false; - if (!data_type_p.parse(pos, type, expected)) + if (!type_p.parse(pos, type, expected)) return false; if (s_granularity.ignore(pos, expected)) From 8d7471f8bd2e0c6dc242231c4358448787e6c56f Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Sat, 20 Jul 2024 00:03:40 +0200 Subject: [PATCH 0625/2361] Fix for deadlock in getDDLWorker --- src/Interpreters/Context.cpp | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 94bcb88ed53..48878733a00 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -3490,18 +3490,22 @@ DDLWorker & Context::getDDLWorker() const if (shared->ddl_worker_startup_task) waitLoad(shared->ddl_worker_startup_task); // Just wait and do not prioritize, because it depends on all load and startup tasks - SharedLockGuard lock(shared->mutex); - if (!shared->ddl_worker) { - if (!hasZooKeeper()) - throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no Zookeeper configuration in server config"); - - if (!hasDistributedDDL()) - throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no DistributedDDL configuration in server config"); - - throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "DDL background thread is not initialized"); + /// Only acquire the lock for reading ddl_worker field. + /// hasZooKeeper() and hasDistributedDDL() acquire the same lock as well and double acquisition of the lock in shared mode can lead + /// to a deadlock if an exclusive lock attempt is made in the meantime by another thread. + SharedLockGuard lock(shared->mutex); + if (shared->ddl_worker) + return *shared->ddl_worker; } - return *shared->ddl_worker; + + if (!hasZooKeeper()) + throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no Zookeeper configuration in server config"); + + if (!hasDistributedDDL()) + throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no DistributedDDL configuration in server config"); + + throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "DDL background thread is not initialized"); } zkutil::ZooKeeperPtr Context::getZooKeeper() const From bbbf2fec88a331281bf51d3a02bd3f476e3bf6ab Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 08:37:54 +0200 Subject: [PATCH 0626/2361] Fix error --- src/Parsers/ParserCreateIndexQuery.cpp | 9 +++++++-- src/Parsers/ParserCreateQuery.cpp | 7 ++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index ddefb3d37fb..8a4c1c0b17a 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -7,9 +7,9 @@ #include #include #include -#include #include + namespace DB { @@ -69,7 +69,12 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected if (s_type.ignore(pos, expected)) { if (!type_p.parse(pos, type, expected)) - return false; + { + if (ParserIdentifier().parse(pos, type, expected)) + type = makeASTFunction(type->as().name()); + else + return false; + } } if (s_granularity.ignore(pos, expected)) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 3dba58546af..bff5da4a536 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -198,7 +198,12 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; if (!type_p.parse(pos, type, expected)) - return false; + { + if (name_p.parse(pos, type, expected)) + type = makeASTFunction(type->as().name()); + else + return false; + } if (s_granularity.ignore(pos, expected)) { From ed02246e0c2019f9ee661e9636f166b955a672de Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 08:46:41 +0200 Subject: [PATCH 0627/2361] What if I will change this test? --- tests/integration/test_ssl_cert_authentication/test.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tests/integration/test_ssl_cert_authentication/test.py b/tests/integration/test_ssl_cert_authentication/test.py index 756a1e1996c..3af88759e82 100644 --- a/tests/integration/test_ssl_cert_authentication/test.py +++ b/tests/integration/test_ssl_cert_authentication/test.py @@ -43,15 +43,10 @@ def started_cluster(): config = """ - none - + strict {certificateFile} {privateKeyFile} {caConfig} - - - AcceptCertificateHandler - """ From 3d4604a64ad01c70c80924ea11154514bab4e424 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 08:47:08 +0200 Subject: [PATCH 0628/2361] Revert "What if I will change this test?" This reverts commit ed02246e0c2019f9ee661e9636f166b955a672de. --- tests/integration/test_ssl_cert_authentication/test.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_ssl_cert_authentication/test.py b/tests/integration/test_ssl_cert_authentication/test.py index 3af88759e82..756a1e1996c 100644 --- a/tests/integration/test_ssl_cert_authentication/test.py +++ b/tests/integration/test_ssl_cert_authentication/test.py @@ -43,10 +43,15 @@ def started_cluster(): config = """ - strict + none + {certificateFile} {privateKeyFile} {caConfig} + + + AcceptCertificateHandler + """ From 1a3559fbc3c5257c4e0f5ec16eda3d09d8ebcca0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 08:46:41 +0200 Subject: [PATCH 0629/2361] What if I will change this test? --- tests/integration/test_ssl_cert_authentication/test.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tests/integration/test_ssl_cert_authentication/test.py b/tests/integration/test_ssl_cert_authentication/test.py index 756a1e1996c..3af88759e82 100644 --- a/tests/integration/test_ssl_cert_authentication/test.py +++ b/tests/integration/test_ssl_cert_authentication/test.py @@ -43,15 +43,10 @@ def started_cluster(): config = """ - none - + strict {certificateFile} {privateKeyFile} {caConfig} - - - AcceptCertificateHandler - """ From 9d2dac90ed30643bd8c4fb49280060432677905f Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 22 Jul 2024 07:27:46 +0000 Subject: [PATCH 0630/2361] Functions [a-g]: Use input_rows_count where appropriate --- src/Functions/acosh.cpp | 11 ++++--- src/Functions/addMicroseconds.cpp | 1 + src/Functions/addMilliseconds.cpp | 1 + src/Functions/addNanoseconds.cpp | 1 + src/Functions/aes_encrypt_mysql.cpp | 1 - src/Functions/appendTrailingCharIfAbsent.cpp | 9 +++--- src/Functions/ascii.cpp | 2 -- src/Functions/asinh.cpp | 11 ++++--- src/Functions/atan2.cpp | 11 ++++--- src/Functions/atanh.cpp | 11 ++++--- src/Functions/base58Encode.cpp | 2 ++ src/Functions/base64Decode.cpp | 2 ++ src/Functions/base64Encode.cpp | 2 ++ src/Functions/base64URLDecode.cpp | 2 ++ src/Functions/base64URLEncode.cpp | 2 ++ src/Functions/byteSize.cpp | 8 ++--- src/Functions/byteSwap.cpp | 1 + src/Functions/caseWithExpression.cpp | 3 +- src/Functions/convertCharset.cpp | 12 +++---- src/Functions/cosh.cpp | 11 ++++--- .../countSubstringsCaseInsensitiveUTF8.cpp | 3 +- src/Functions/dateName.cpp | 16 +++++----- src/Functions/degrees.cpp | 22 +++++++------ src/Functions/filesystem.cpp | 2 +- src/Functions/formatDateTime.cpp | 32 +++++++++---------- src/Functions/formatQuery.cpp | 10 +++--- src/Functions/formatReadable.h | 31 +++++++++--------- src/Functions/geohashDecode.cpp | 16 ++++------ src/Functions/geohashEncode.cpp | 14 ++++---- src/Functions/transform.cpp | 3 +- 30 files changed, 131 insertions(+), 122 deletions(-) diff --git a/src/Functions/acosh.cpp b/src/Functions/acosh.cpp index 5b071da9c40..2bab84c77af 100644 --- a/src/Functions/acosh.cpp +++ b/src/Functions/acosh.cpp @@ -5,11 +5,12 @@ namespace DB { namespace { - struct AcoshName - { - static constexpr auto name = "acosh"; - }; - using FunctionAcosh = FunctionMathUnary>; + +struct AcoshName +{ + static constexpr auto name = "acosh"; +}; +using FunctionAcosh = FunctionMathUnary>; } diff --git a/src/Functions/addMicroseconds.cpp b/src/Functions/addMicroseconds.cpp index 0dcd6b4452f..8c0ae06dcd0 100644 --- a/src/Functions/addMicroseconds.cpp +++ b/src/Functions/addMicroseconds.cpp @@ -6,6 +6,7 @@ namespace DB { using FunctionAddMicroseconds = FunctionDateOrDateTimeAddInterval; + REGISTER_FUNCTION(AddMicroseconds) { factory.registerFunction(); diff --git a/src/Functions/addMilliseconds.cpp b/src/Functions/addMilliseconds.cpp index 0e2b696d367..83e1f96ec4b 100644 --- a/src/Functions/addMilliseconds.cpp +++ b/src/Functions/addMilliseconds.cpp @@ -6,6 +6,7 @@ namespace DB { using FunctionAddMilliseconds = FunctionDateOrDateTimeAddInterval; + REGISTER_FUNCTION(AddMilliseconds) { factory.registerFunction(); diff --git a/src/Functions/addNanoseconds.cpp b/src/Functions/addNanoseconds.cpp index 93eadc814d9..8f9a54752b9 100644 --- a/src/Functions/addNanoseconds.cpp +++ b/src/Functions/addNanoseconds.cpp @@ -6,6 +6,7 @@ namespace DB { using FunctionAddNanoseconds = FunctionDateOrDateTimeAddInterval; + REGISTER_FUNCTION(AddNanoseconds) { factory.registerFunction(); diff --git a/src/Functions/aes_encrypt_mysql.cpp b/src/Functions/aes_encrypt_mysql.cpp index fb120151c25..33733f92b27 100644 --- a/src/Functions/aes_encrypt_mysql.cpp +++ b/src/Functions/aes_encrypt_mysql.cpp @@ -7,7 +7,6 @@ namespace DB { - namespace { diff --git a/src/Functions/appendTrailingCharIfAbsent.cpp b/src/Functions/appendTrailingCharIfAbsent.cpp index a5554171aaa..0e57d5c55ce 100644 --- a/src/Functions/appendTrailingCharIfAbsent.cpp +++ b/src/Functions/appendTrailingCharIfAbsent.cpp @@ -57,7 +57,7 @@ private: bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & column = arguments[0].column; const auto & column_char = arguments[1].column; @@ -80,14 +80,13 @@ private: auto & dst_data = col_res->getChars(); auto & dst_offsets = col_res->getOffsets(); - const auto size = src_offsets.size(); - dst_data.resize(src_data.size() + size); - dst_offsets.resize(size); + dst_data.resize(src_data.size() + input_rows_count); + dst_offsets.resize(input_rows_count); ColumnString::Offset src_offset{}; ColumnString::Offset dst_offset{}; - for (const auto i : collections::range(0, size)) + for (size_t i = 0; i < input_rows_count; ++i) { const auto src_length = src_offsets[i] - src_offset; memcpySmallAllowReadWriteOverflow15(&dst_data[dst_offset], &src_data[src_offset], src_length); diff --git a/src/Functions/ascii.cpp b/src/Functions/ascii.cpp index 7c8158b53d4..0d50e5d203b 100644 --- a/src/Functions/ascii.cpp +++ b/src/Functions/ascii.cpp @@ -45,9 +45,7 @@ struct AsciiImpl size_t size = data.size() / n; for (size_t i = 0; i < size; ++i) - { res[i] = doAscii(data, i * n, n); - } } [[noreturn]] static void array(const ColumnString::Offsets & /*offsets*/, PaddedPODArray & /*res*/) diff --git a/src/Functions/asinh.cpp b/src/Functions/asinh.cpp index 6af832ae07c..b5e3626148f 100644 --- a/src/Functions/asinh.cpp +++ b/src/Functions/asinh.cpp @@ -5,11 +5,12 @@ namespace DB { namespace { - struct AsinhName - { - static constexpr auto name = "asinh"; - }; - using FunctionAsinh = FunctionMathUnary>; + +struct AsinhName +{ + static constexpr auto name = "asinh"; +}; +using FunctionAsinh = FunctionMathUnary>; } diff --git a/src/Functions/atan2.cpp b/src/Functions/atan2.cpp index 42294e11458..218f4c5406f 100644 --- a/src/Functions/atan2.cpp +++ b/src/Functions/atan2.cpp @@ -5,11 +5,12 @@ namespace DB { namespace { - struct Atan2Name - { - static constexpr auto name = "atan2"; - }; - using FunctionAtan2 = FunctionMathBinaryFloat64>; + +struct Atan2Name +{ + static constexpr auto name = "atan2"; +}; +using FunctionAtan2 = FunctionMathBinaryFloat64>; } diff --git a/src/Functions/atanh.cpp b/src/Functions/atanh.cpp index fab25414725..a36f5bcbcf0 100644 --- a/src/Functions/atanh.cpp +++ b/src/Functions/atanh.cpp @@ -5,11 +5,12 @@ namespace DB { namespace { - struct AtanhName - { - static constexpr auto name = "atanh"; - }; - using FunctionAtanh = FunctionMathUnary>; + +struct AtanhName +{ + static constexpr auto name = "atanh"; +}; +using FunctionAtanh = FunctionMathUnary>; } diff --git a/src/Functions/base58Encode.cpp b/src/Functions/base58Encode.cpp index cf790ebddab..3ae2fb12c5e 100644 --- a/src/Functions/base58Encode.cpp +++ b/src/Functions/base58Encode.cpp @@ -3,8 +3,10 @@ namespace DB { + REGISTER_FUNCTION(Base58Encode) { factory.registerFunction>(); } + } diff --git a/src/Functions/base64Decode.cpp b/src/Functions/base64Decode.cpp index 4d06ac99d6f..349475af3f0 100644 --- a/src/Functions/base64Decode.cpp +++ b/src/Functions/base64Decode.cpp @@ -5,6 +5,7 @@ namespace DB { + REGISTER_FUNCTION(Base64Decode) { FunctionDocumentation::Description description = R"(Accepts a String and decodes it from base64, according to RFC 4648 (https://datatracker.ietf.org/doc/html/rfc4648#section-4). Throws an exception in case of an error. Alias: FROM_BASE64.)"; @@ -19,6 +20,7 @@ REGISTER_FUNCTION(Base64Decode) /// MySQL compatibility alias. factory.registerAlias("FROM_BASE64", "base64Decode", FunctionFactory::Case::Insensitive); } + } #endif diff --git a/src/Functions/base64Encode.cpp b/src/Functions/base64Encode.cpp index 64142995552..fe0fa642599 100644 --- a/src/Functions/base64Encode.cpp +++ b/src/Functions/base64Encode.cpp @@ -5,6 +5,7 @@ namespace DB { + REGISTER_FUNCTION(Base64Encode) { FunctionDocumentation::Description description = R"(Encodes a String as base64, according to RFC 4648 (https://datatracker.ietf.org/doc/html/rfc4648#section-4). Alias: TO_BASE64.)"; @@ -19,6 +20,7 @@ REGISTER_FUNCTION(Base64Encode) /// MySQL compatibility alias. factory.registerAlias("TO_BASE64", "base64Encode", FunctionFactory::Case::Insensitive); } + } #endif diff --git a/src/Functions/base64URLDecode.cpp b/src/Functions/base64URLDecode.cpp index f5766dc60bd..f256e111619 100644 --- a/src/Functions/base64URLDecode.cpp +++ b/src/Functions/base64URLDecode.cpp @@ -5,6 +5,7 @@ namespace DB { + REGISTER_FUNCTION(Base64URLDecode) { FunctionDocumentation::Description description = R"(Accepts a base64-encoded URL and decodes it from base64 with URL-specific modifications, according to RFC 4648 (https://datatracker.ietf.org/doc/html/rfc4648#section-5).)"; @@ -16,6 +17,7 @@ REGISTER_FUNCTION(Base64URLDecode) factory.registerFunction>>({description, syntax, arguments, returned_value, examples, categories}); } + } #endif diff --git a/src/Functions/base64URLEncode.cpp b/src/Functions/base64URLEncode.cpp index 73a465a30c5..215712f7586 100644 --- a/src/Functions/base64URLEncode.cpp +++ b/src/Functions/base64URLEncode.cpp @@ -5,6 +5,7 @@ namespace DB { + REGISTER_FUNCTION(Base64URLEncode) { FunctionDocumentation::Description description = R"(Encodes an URL (String or FixedString) as base64 with URL-specific modifications, according to RFC 4648 (https://datatracker.ietf.org/doc/html/rfc4648#section-5).)"; @@ -16,6 +17,7 @@ REGISTER_FUNCTION(Base64URLEncode) factory.registerFunction>>({description, syntax, arguments, returned_value, examples, categories}); } + } #endif diff --git a/src/Functions/byteSize.cpp b/src/Functions/byteSize.cpp index 93a3a86641a..d366a1b2e12 100644 --- a/src/Functions/byteSize.cpp +++ b/src/Functions/byteSize.cpp @@ -67,11 +67,11 @@ public: const IColumn * column = arguments[arg_num].column.get(); if (arg_num == 0) - for (size_t row_num = 0; row_num < input_rows_count; ++row_num) - vec_res[row_num] = column->byteSizeAt(row_num); + for (size_t row = 0; row < input_rows_count; ++row) + vec_res[row] = column->byteSizeAt(row); else - for (size_t row_num = 0; row_num < input_rows_count; ++row_num) - vec_res[row_num] += column->byteSizeAt(row_num); + for (size_t row = 0; row < input_rows_count; ++row) + vec_res[row] += column->byteSizeAt(row); } return result_col; diff --git a/src/Functions/byteSwap.cpp b/src/Functions/byteSwap.cpp index 6c824b851b0..2094ec4fa1a 100644 --- a/src/Functions/byteSwap.cpp +++ b/src/Functions/byteSwap.cpp @@ -10,6 +10,7 @@ extern const int NOT_IMPLEMENTED; namespace { + template requires std::is_integral_v T byteSwap(T x) diff --git a/src/Functions/caseWithExpression.cpp b/src/Functions/caseWithExpression.cpp index 71fccc8436e..f0a620489ef 100644 --- a/src/Functions/caseWithExpression.cpp +++ b/src/Functions/caseWithExpression.cpp @@ -98,8 +98,7 @@ public: /// Execute transform. ColumnsWithTypeAndName transform_args{args.front(), src_array_col, dst_array_col, args.back()}; - return FunctionFactory::instance().get("transform", context)->build(transform_args) - ->execute(transform_args, result_type, input_rows_count); + return FunctionFactory::instance().get("transform", context)->build(transform_args)->execute(transform_args, result_type, input_rows_count); } private: diff --git a/src/Functions/convertCharset.cpp b/src/Functions/convertCharset.cpp index b3b7394acb9..d998e88e7c2 100644 --- a/src/Functions/convertCharset.cpp +++ b/src/Functions/convertCharset.cpp @@ -88,7 +88,8 @@ private: static void convert(const String & from_charset, const String & to_charset, const ColumnString::Chars & from_chars, const ColumnString::Offsets & from_offsets, - ColumnString::Chars & to_chars, ColumnString::Offsets & to_offsets) + ColumnString::Chars & to_chars, ColumnString::Offsets & to_offsets, + size_t input_rows_count) { auto converter_from = getConverter(from_charset); auto converter_to = getConverter(to_charset); @@ -96,12 +97,11 @@ private: ColumnString::Offset current_from_offset = 0; ColumnString::Offset current_to_offset = 0; - size_t size = from_offsets.size(); - to_offsets.resize(size); + to_offsets.resize(input_rows_count); PODArray uchars; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t from_string_size = from_offsets[i] - current_from_offset - 1; @@ -184,7 +184,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnWithTypeAndName & arg_from = arguments[0]; const ColumnWithTypeAndName & arg_charset_from = arguments[1]; @@ -204,7 +204,7 @@ public: if (const ColumnString * col_from = checkAndGetColumn(arg_from.column.get())) { auto col_to = ColumnString::create(); - convert(charset_from, charset_to, col_from->getChars(), col_from->getOffsets(), col_to->getChars(), col_to->getOffsets()); + convert(charset_from, charset_to, col_from->getChars(), col_from->getOffsets(), col_to->getChars(), col_to->getOffsets(), input_rows_count); return col_to; } else diff --git a/src/Functions/cosh.cpp b/src/Functions/cosh.cpp index 54b52051aab..f4302292303 100644 --- a/src/Functions/cosh.cpp +++ b/src/Functions/cosh.cpp @@ -5,11 +5,12 @@ namespace DB { namespace { - struct CoshName - { - static constexpr auto name = "cosh"; - }; - using FunctionCosh = FunctionMathUnary>; + +struct CoshName +{ + static constexpr auto name = "cosh"; +}; +using FunctionCosh = FunctionMathUnary>; } diff --git a/src/Functions/countSubstringsCaseInsensitiveUTF8.cpp b/src/Functions/countSubstringsCaseInsensitiveUTF8.cpp index 3f71bca63d2..99ae4f1927e 100644 --- a/src/Functions/countSubstringsCaseInsensitiveUTF8.cpp +++ b/src/Functions/countSubstringsCaseInsensitiveUTF8.cpp @@ -13,8 +13,7 @@ struct NameCountSubstringsCaseInsensitiveUTF8 static constexpr auto name = "countSubstringsCaseInsensitiveUTF8"; }; -using FunctionCountSubstringsCaseInsensitiveUTF8 = FunctionsStringSearch< - CountSubstringsImpl>; +using FunctionCountSubstringsCaseInsensitiveUTF8 = FunctionsStringSearch>; } diff --git a/src/Functions/dateName.cpp b/src/Functions/dateName.cpp index 8165ea1b8d3..846cb87f1ee 100644 --- a/src/Functions/dateName.cpp +++ b/src/Functions/dateName.cpp @@ -109,14 +109,14 @@ public: ColumnPtr executeImpl( const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, - [[maybe_unused]] size_t input_rows_count) const override + size_t input_rows_count) const override { ColumnPtr res; - if (!((res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)))) + if (!((res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)))) throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of function {}, must be Date or DateTime.", @@ -127,7 +127,7 @@ public: } template - ColumnPtr executeType(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const + ColumnPtr executeType(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const { auto * times = checkAndGetColumn(arguments[1].column.get()); if (!times) @@ -144,7 +144,7 @@ public: String date_part = date_part_column->getValue(); const DateLUTImpl * time_zone_tmp; - if (std::is_same_v || std::is_same_v) + if constexpr (std::is_same_v || std::is_same_v) time_zone_tmp = &extractTimeZoneFromFunctionArguments(arguments, 2, 1); else time_zone_tmp = &DateLUT::instance(); @@ -175,7 +175,7 @@ public: using TimeType = DateTypeToTimeType; callOnDatePartWriter(date_part, [&](const auto & writer) { - for (size_t i = 0; i < times_data.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { if constexpr (std::is_same_v) { diff --git a/src/Functions/degrees.cpp b/src/Functions/degrees.cpp index 8646eb54d9a..94b5ce3682c 100644 --- a/src/Functions/degrees.cpp +++ b/src/Functions/degrees.cpp @@ -7,18 +7,20 @@ namespace DB { namespace { - struct DegreesName - { - static constexpr auto name = "degrees"; - }; - Float64 degrees(Float64 r) - { - Float64 degrees = r * (180 / M_PI); - return degrees; - } +struct DegreesName +{ + static constexpr auto name = "degrees"; +}; + +Float64 degrees(Float64 r) +{ + Float64 degrees = r * (180 / M_PI); + return degrees; +} + +using FunctionDegrees = FunctionMathUnary>; - using FunctionDegrees = FunctionMathUnary>; } REGISTER_FUNCTION(Degrees) diff --git a/src/Functions/filesystem.cpp b/src/Functions/filesystem.cpp index 9fbf9b0cbe7..9b168f3f088 100644 --- a/src/Functions/filesystem.cpp +++ b/src/Functions/filesystem.cpp @@ -91,7 +91,7 @@ public: auto col_res = ColumnVector::create(col_str->size()); auto & data = col_res->getData(); - for (size_t i = 0; i < col_str->size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { auto disk_name = col_str->getDataAt(i).toString(); if (auto it = disk_map.find(disk_name); it != disk_map.end()) diff --git a/src/Functions/formatDateTime.cpp b/src/Functions/formatDateTime.cpp index f89afd67e78..f33b7849a43 100644 --- a/src/Functions/formatDateTime.cpp +++ b/src/Functions/formatDateTime.cpp @@ -848,7 +848,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, [[maybe_unused]] size_t input_rows_count) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { ColumnPtr res; if constexpr (support_integer == SupportInteger::Yes) @@ -862,17 +862,17 @@ public: if (!castType(arguments[0].type.get(), [&](const auto & type) { using FromDataType = std::decay_t; - if (!(res = executeType(arguments, result_type))) + if (!(res = executeType(arguments, result_type, input_rows_count))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of function {}, must be Integer, Date, Date32, DateTime or DateTime64.", arguments[0].column->getName(), getName()); return true; })) { - if (!((res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)))) + if (!((res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of function {}, must be Integer or DateTime.", arguments[0].column->getName(), getName()); @@ -881,10 +881,10 @@ public: } else { - if (!((res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)) - || (res = executeType(arguments, result_type)))) + if (!((res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)) + || (res = executeType(arguments, result_type, input_rows_count)))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of function {}, must be Date or DateTime.", arguments[0].column->getName(), getName()); @@ -894,7 +894,7 @@ public: } template - ColumnPtr executeType(const ColumnsWithTypeAndName & arguments, const DataTypePtr &) const + ColumnPtr executeType(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const { auto non_const_datetime = arguments[0].column->convertToFullColumnIfConst(); auto * times = checkAndGetColumn(non_const_datetime.get()); @@ -955,13 +955,11 @@ public: else time_zone = &DateLUT::instance(); - const auto & vec = times->getData(); - auto col_res = ColumnString::create(); auto & res_data = col_res->getChars(); auto & res_offsets = col_res->getOffsets(); - res_data.resize(vec.size() * (out_template_size + 1)); - res_offsets.resize(vec.size()); + res_data.resize(input_rows_count * (out_template_size + 1)); + res_offsets.resize(input_rows_count); if constexpr (format_syntax == FormatSyntax::MySQL) { @@ -990,9 +988,11 @@ public: } } + const auto & vec = times->getData(); + auto * begin = reinterpret_cast(res_data.data()); auto * pos = begin; - for (size_t i = 0; i < vec.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { if (!const_time_zone_column && arguments.size() > 2) { diff --git a/src/Functions/formatQuery.cpp b/src/Functions/formatQuery.cpp index d10b3f9a5b7..4e3f302ce36 100644 --- a/src/Functions/formatQuery.cpp +++ b/src/Functions/formatQuery.cpp @@ -75,7 +75,7 @@ public: if (const ColumnString * col_query_string = checkAndGetColumn(col_query.get())) { auto col_res = ColumnString::create(); - formatVector(col_query_string->getChars(), col_query_string->getOffsets(), col_res->getChars(), col_res->getOffsets(), col_null_map); + formatVector(col_query_string->getChars(), col_query_string->getOffsets(), col_res->getChars(), col_res->getOffsets(), col_null_map, input_rows_count); if (error_handling == ErrorHandling::Null) return ColumnNullable::create(std::move(col_res), std::move(col_null_map)); @@ -92,16 +92,16 @@ private: const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, - ColumnUInt8::MutablePtr & res_null_map) const + ColumnUInt8::MutablePtr & res_null_map, + size_t input_rows_count) const { - const size_t size = offsets.size(); - res_offsets.resize(size); + res_offsets.resize(input_rows_count); res_data.resize(data.size()); size_t prev_offset = 0; size_t res_data_size = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * begin = reinterpret_cast(&data[prev_offset]); const char * end = begin + offsets[i] - prev_offset - 1; diff --git a/src/Functions/formatReadable.h b/src/Functions/formatReadable.h index 487ec9d79d0..9161ab43e28 100644 --- a/src/Functions/formatReadable.h +++ b/src/Functions/formatReadable.h @@ -55,19 +55,19 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { ColumnPtr res; - if (!((res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)) - || (res = executeType(arguments)))) + if (!((res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)) + || (res = executeType(arguments, input_rows_count)))) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", arguments[0].column->getName(), getName()); @@ -76,7 +76,7 @@ public: private: template - ColumnPtr executeType(const ColumnsWithTypeAndName & arguments) const + ColumnPtr executeType(const ColumnsWithTypeAndName & arguments, size_t input_rows_count) const { if (const ColumnVector * col_from = checkAndGetColumn>(arguments[0].column.get())) { @@ -85,13 +85,12 @@ private: const typename ColumnVector::Container & vec_from = col_from->getData(); ColumnString::Chars & data_to = col_to->getChars(); ColumnString::Offsets & offsets_to = col_to->getOffsets(); - size_t size = vec_from.size(); - data_to.resize(size * 2); - offsets_to.resize(size); + data_to.resize(input_rows_count * 2); + offsets_to.resize(input_rows_count); WriteBufferFromVector buf_to(data_to); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { Impl::format(static_cast(vec_from[i]), buf_to); writeChar(0, buf_to); diff --git a/src/Functions/geohashDecode.cpp b/src/Functions/geohashDecode.cpp index 96ad7dacfc4..cace6c09fec 100644 --- a/src/Functions/geohashDecode.cpp +++ b/src/Functions/geohashDecode.cpp @@ -51,21 +51,19 @@ public: } template - bool tryExecute(const IColumn * encoded_column, ColumnPtr & result_column) const + bool tryExecute(const IColumn * encoded_column, ColumnPtr & result_column, size_t input_rows_count) const { const auto * encoded = checkAndGetColumn(encoded_column); if (!encoded) return false; - const size_t count = encoded->size(); - - auto latitude = ColumnFloat64::create(count); - auto longitude = ColumnFloat64::create(count); + auto latitude = ColumnFloat64::create(input_rows_count); + auto longitude = ColumnFloat64::create(input_rows_count); ColumnFloat64::Container & lon_data = longitude->getData(); ColumnFloat64::Container & lat_data = latitude->getData(); - for (size_t i = 0; i < count; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { std::string_view encoded_string = encoded->getDataAt(i).toView(); geohashDecode(encoded_string.data(), encoded_string.size(), &lon_data[i], &lat_data[i]); @@ -79,13 +77,13 @@ public: return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const IColumn * encoded = arguments[0].column.get(); ColumnPtr res_column; - if (tryExecute(encoded, res_column) || - tryExecute(encoded, res_column)) + if (tryExecute(encoded, res_column, input_rows_count) || + tryExecute(encoded, res_column, input_rows_count)) return res_column; throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unsupported argument type:{} of argument of function {}", diff --git a/src/Functions/geohashEncode.cpp b/src/Functions/geohashEncode.cpp index 034c8188b63..c49acddd81f 100644 --- a/src/Functions/geohashEncode.cpp +++ b/src/Functions/geohashEncode.cpp @@ -53,7 +53,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const IColumn * longitude = arguments[0].column.get(); const IColumn * latitude = arguments[1].column.get(); @@ -65,26 +65,24 @@ public: precision = arguments[2].column; ColumnPtr res_column; - vector(longitude, latitude, precision.get(), res_column); + vector(longitude, latitude, precision.get(), res_column, input_rows_count); return res_column; } private: - void vector(const IColumn * lon_column, const IColumn * lat_column, const IColumn * precision_column, ColumnPtr & result) const + void vector(const IColumn * lon_column, const IColumn * lat_column, const IColumn * precision_column, ColumnPtr & result, size_t input_rows_count) const { auto col_str = ColumnString::create(); ColumnString::Chars & out_vec = col_str->getChars(); ColumnString::Offsets & out_offsets = col_str->getOffsets(); - const size_t size = lat_column->size(); - - out_offsets.resize(size); - out_vec.resize(size * (GEOHASH_MAX_TEXT_LENGTH + 1)); + out_offsets.resize(input_rows_count); + out_vec.resize(input_rows_count * (GEOHASH_MAX_TEXT_LENGTH + 1)); char * begin = reinterpret_cast(out_vec.data()); char * pos = begin; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const Float64 longitude_value = lon_column->getFloat64(i); const Float64 latitude_value = lat_column->getFloat64(i); diff --git a/src/Functions/transform.cpp b/src/Functions/transform.cpp index 0dbc9946710..68500779f93 100644 --- a/src/Functions/transform.cpp +++ b/src/Functions/transform.cpp @@ -138,8 +138,7 @@ namespace } } - ColumnPtr executeImpl( - const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { std::call_once(once, [&] { initialize(arguments, result_type); }); From 885acd3aa80d421e82f75150b4152e227ca0fba4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 09:42:20 +0200 Subject: [PATCH 0631/2361] Compatibility --- src/Parsers/ParserCreateIndexQuery.cpp | 3 +++ src/Parsers/ParserCreateQuery.cpp | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index 8a4c1c0b17a..2761c99738b 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -71,7 +71,10 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected if (!type_p.parse(pos, type, expected)) { if (ParserIdentifier().parse(pos, type, expected)) + { type = makeASTFunction(type->as().name()); + type->as().no_empty_args = true; + } else return false; } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index bff5da4a536..9aaecd84f59 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -200,7 +200,10 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe if (!type_p.parse(pos, type, expected)) { if (name_p.parse(pos, type, expected)) + { type = makeASTFunction(type->as().name()); + type->as().no_empty_args = true; + } else return false; } From 8217dcccc1438ec6186e8db53e17429a7060183f Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 22 Jul 2024 09:53:56 +0200 Subject: [PATCH 0632/2361] Stop ignoring SIGSEGV in GDB --- docker/test/fuzzer/run-fuzzer.sh | 1 - docker/test/stateless/attach_gdb.lib | 1 - 2 files changed, 2 deletions(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 6191aeaf304..b8f967ed9c2 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -208,7 +208,6 @@ handle SIGPIPE nostop noprint pass handle SIGTERM nostop noprint pass handle SIGUSR1 nostop noprint pass handle SIGUSR2 nostop noprint pass -handle SIGSEGV nostop pass handle SIG$RTMIN nostop noprint pass info signals continue diff --git a/docker/test/stateless/attach_gdb.lib b/docker/test/stateless/attach_gdb.lib index eb54f920b98..d288288bb17 100644 --- a/docker/test/stateless/attach_gdb.lib +++ b/docker/test/stateless/attach_gdb.lib @@ -20,7 +20,6 @@ handle SIGPIPE nostop noprint pass handle SIGTERM nostop noprint pass handle SIGUSR1 nostop noprint pass handle SIGUSR2 nostop noprint pass -handle SIGSEGV nostop pass handle SIG$RTMIN nostop noprint pass info signals continue From c2ac13291f3bf201f7189bd36f2c9be7c06aa886 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Mon, 22 Jul 2024 09:06:13 +0100 Subject: [PATCH 0633/2361] fix tests --- tests/integration/test_recovery_time_metric/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py index 90155f81ba2..e4a44103b76 100644 --- a/tests/integration/test_recovery_time_metric/test.py +++ b/tests/integration/test_recovery_time_metric/test.py @@ -3,7 +3,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node = cluster.add_instance( - "node", main_configs=["configs/config.xml"], with_zookeeper=True + "node", main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True, ) From d040e436f3a1f8594070b04ec10cbf7391f6994a Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 22 Jul 2024 08:18:58 +0000 Subject: [PATCH 0634/2361] Automatic style fix --- tests/integration/test_recovery_time_metric/test.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py index e4a44103b76..4dad844b950 100644 --- a/tests/integration/test_recovery_time_metric/test.py +++ b/tests/integration/test_recovery_time_metric/test.py @@ -3,7 +3,10 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node = cluster.add_instance( - "node", main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True, + "node", + main_configs=["configs/config.xml"], + with_zookeeper=True, + stay_alive=True, ) From 0f327869132940b3bae932730cb2ce2f9c394163 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 22 Jul 2024 10:46:42 +0200 Subject: [PATCH 0635/2361] Better random and queries --- tests/queries/0_stateless/01194_http_query_id.sh | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01194_http_query_id.sh b/tests/queries/0_stateless/01194_http_query_id.sh index fac17cca3c6..42321112185 100755 --- a/tests/queries/0_stateless/01194_http_query_id.sh +++ b/tests/queries/0_stateless/01194_http_query_id.sh @@ -4,14 +4,22 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -rnd=$RANDOM -url="${CLICKHOUSE_URL}&session_id=test_01194_$RANDOM" +rnd="$CLICKHOUSE_DATABASE" +url="${CLICKHOUSE_URL}&session_id=test_01194_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CURL} -sS "$url&query=SELECT+'test_01194',$rnd,1" > /dev/null ${CLICKHOUSE_CURL} -sS "$url&query=SELECT+'test_01194',$rnd,2" > /dev/null ${CLICKHOUSE_CURL} -sS "$url" --data "SELECT 'test_01194',$rnd,3" > /dev/null ${CLICKHOUSE_CURL} -sS "$url" --data "SELECT 'test_01194',$rnd,4" > /dev/null -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data "SYSTEM FLUSH LOGS" +$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data "SELECT count(DISTINCT query_id) FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE 'SELECT ''test_01194'',$rnd%'" +$CLICKHOUSE_CLIENT -q " + SELECT + count(DISTINCT query_id) + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND query LIKE 'SELECT ''test_01194'',$rnd%' + AND query_id != queryID()" From 1be54641c16569e7ee3398fe0770a9b532342643 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 22 Jul 2024 11:03:19 +0200 Subject: [PATCH 0636/2361] Revert libunwind patch --- contrib/libunwind | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libunwind b/contrib/libunwind index 8f28e64d158..fe854449e24 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit 8f28e64d15819d2d096badd598c7d85bebddb1f2 +Subproject commit fe854449e24bedfa26e38465b84374312dbd587f From 255dcec501e7506291cee44f5c9eb30b5eec7e99 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 22 Jul 2024 11:10:34 +0200 Subject: [PATCH 0637/2361] Fix 02241_join_rocksdb_bs --- .../02241_join_rocksdb_bs.reference | 56 ------------------- .../0_stateless/02241_join_rocksdb_bs.sql.j2 | 20 +++---- 2 files changed, 7 insertions(+), 69 deletions(-) diff --git a/tests/queries/0_stateless/02241_join_rocksdb_bs.reference b/tests/queries/0_stateless/02241_join_rocksdb_bs.reference index 8416a2991c1..4dff9ef38ef 100644 --- a/tests/queries/0_stateless/02241_join_rocksdb_bs.reference +++ b/tests/queries/0_stateless/02241_join_rocksdb_bs.reference @@ -10,59 +10,3 @@ 1 1 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 diff --git a/tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 b/tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 index 4ab98201eed..e5703f99d62 100644 --- a/tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 +++ b/tests/queries/0_stateless/02241_join_rocksdb_bs.sql.j2 @@ -1,4 +1,4 @@ --- Tags: use-rocksdb, long, no-s3-storage, no-random-settings, no-random-merge-tree-settings +-- Tags: use-rocksdb, long, no-s3-storage SET join_algorithm = 'direct'; @@ -13,27 +13,21 @@ INSERT INTO rdb_{{ table_size }} SELECT (sipHash64(number) % {{ table_size }}) as key, ('val' || toString(key)) AS value FROM numbers_mt({{ table_size }}); -{% for block_size in [10, 11, 128, 129, 65505, 65506, 70000] -%} - -{% if block_size * 5000 > table_size -%} - -SET max_block_size = {{ block_size }}; - {% for right_size in [table_size // 2, table_size + table_size // 4 + 1] -%} SELECT count() == (SELECT count() FROM rdb_{{ table_size }} WHERE key < {{ right_size }}) FROM (SELECT number as k FROM numbers_mt({{ right_size }})) as t1 INNER JOIN rdb_{{ table_size }} as rdb -ON rdb.key == t1.k; +ON rdb.key == t1.k +{% if table_size < 100 %}SETTINGS max_block_size = 1{% endif -%} +; SELECT count() == {{ right_size }} and countIf(value != '') == (SELECT count() FROM rdb_{{ table_size }} WHERE key < {{ right_size }}) FROM (SELECT number as k FROM numbers_mt({{ right_size }})) as t1 LEFT JOIN rdb_{{ table_size }} as rdb -ON rdb.key == t1.k; - -{% endfor -%} - -{% endif -%} +ON rdb.key == t1.k +{% if table_size < 100 %}SETTINGS max_block_size = 1{% endif -%} +; {% endfor -%} {% endfor -%} From 2f1818b8d5d49747faa8b496eb6c86d1d78b3ec7 Mon Sep 17 00:00:00 2001 From: Yohann Jardin Date: Mon, 22 Jul 2024 11:08:07 +0200 Subject: [PATCH 0638/2361] update error codes related to number of accepted arguments --- src/Functions/FunctionBase58Conversion.h | 3 +- src/Functions/FunctionChar.cpp | 3 +- src/Functions/hilbertEncode.cpp | 4 +-- src/Functions/mortonEncode.cpp | 4 +-- src/Functions/randDistribution.cpp | 3 +- src/Processors/Transforms/WindowTransform.cpp | 31 ++++++++++--------- .../test_functions.py | 13 ++++++-- .../0_stateless/02560_window_ntile.reference | 2 ++ .../0_stateless/02560_window_ntile.sql | 2 ++ .../0_stateless/03131_hilbert_coding.sql | 1 + 10 files changed, 42 insertions(+), 24 deletions(-) diff --git a/src/Functions/FunctionBase58Conversion.h b/src/Functions/FunctionBase58Conversion.h index e519f9768cc..a34a36d8b81 100644 --- a/src/Functions/FunctionBase58Conversion.h +++ b/src/Functions/FunctionBase58Conversion.h @@ -18,6 +18,7 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int BAD_ARGUMENTS; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } struct Base58Encode @@ -135,7 +136,7 @@ public: DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { if (arguments.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong number of arguments for function {}: 1 expected.", getName()); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Wrong number of arguments for function {}: 1 expected.", getName()); if (!isString(arguments[0].type)) throw Exception( diff --git a/src/Functions/FunctionChar.cpp b/src/Functions/FunctionChar.cpp index 0ebe1442f08..79e346a3ea4 100644 --- a/src/Functions/FunctionChar.cpp +++ b/src/Functions/FunctionChar.cpp @@ -15,6 +15,7 @@ namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_COLUMN; + extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; } class FunctionChar : public IFunction @@ -36,7 +37,7 @@ public: DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { if (arguments.empty()) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Number of arguments for function {} can't be {}, should be at least 1", getName(), arguments.size()); diff --git a/src/Functions/hilbertEncode.cpp b/src/Functions/hilbertEncode.cpp index 13512d0d36c..a4e3cc59b76 100644 --- a/src/Functions/hilbertEncode.cpp +++ b/src/Functions/hilbertEncode.cpp @@ -11,8 +11,8 @@ namespace DB namespace ErrorCodes { - extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ARGUMENT_OUT_OF_BOUND; + extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; } @@ -87,7 +87,7 @@ public: return col_res; } - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Illegal number of UInt arguments of function {}: should be not more than 2 dimensions", getName()); } diff --git a/src/Functions/mortonEncode.cpp b/src/Functions/mortonEncode.cpp index 0c19c7c3134..8cf0f18dbfe 100644 --- a/src/Functions/mortonEncode.cpp +++ b/src/Functions/mortonEncode.cpp @@ -16,8 +16,8 @@ namespace DB namespace ErrorCodes { - extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ARGUMENT_OUT_OF_BOUND; + extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; } #define EXTRACT_VECTOR(INDEX) \ @@ -130,7 +130,7 @@ namespace ErrorCodes MASK(8, 6, col6->getUInt(i)), \ MASK(8, 7, col7->getUInt(i))) \ \ - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, \ + throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, \ "Illegal number of UInt arguments of function {}, max: 8", \ getName()); \ diff --git a/src/Functions/randDistribution.cpp b/src/Functions/randDistribution.cpp index 4e616ada697..6a3dac748c1 100644 --- a/src/Functions/randDistribution.cpp +++ b/src/Functions/randDistribution.cpp @@ -24,6 +24,7 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; extern const int BAD_ARGUMENTS; extern const int LOGICAL_ERROR; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } namespace @@ -246,7 +247,7 @@ public: { auto desired = Distribution::getNumberOfArguments(); if (arguments.size() != desired && arguments.size() != desired + 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Wrong number of arguments for function {}. Should be {} or {}", getName(), desired, desired + 1); diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 86421adf4fb..2b255c5120e 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -56,7 +56,10 @@ namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int NOT_IMPLEMENTED; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; + extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; } // Interface for true window functions. It's not much of an interface, they just @@ -1710,7 +1713,7 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); @@ -1723,7 +1726,7 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); } @@ -1807,7 +1810,7 @@ struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); @@ -1820,7 +1823,7 @@ struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); } @@ -1882,7 +1885,7 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); @@ -1895,7 +1898,7 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu { if (argument_types.size() != 1) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one argument", name_); } @@ -1968,7 +1971,7 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); @@ -1981,7 +1984,7 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); } @@ -2116,7 +2119,7 @@ struct WindowFunctionNtile final : public StatefulWindowFunction : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) { if (argument_types.size() != 1) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} takes exactly one argument", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one argument", name_); auto type_id = argument_types[0]->getTypeId(); if (type_id != TypeIndex::UInt8 && type_id != TypeIndex::UInt16 && type_id != TypeIndex::UInt32 && type_id != TypeIndex::UInt64) @@ -2191,7 +2194,7 @@ namespace if (!buckets) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' funtcion must be greater than zero"); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be greater than zero"); } } // new partition @@ -2404,7 +2407,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (argument_types.size() > 3) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Function '{}' accepts at most 3 arguments, {} given", name, argument_types.size()); } @@ -2414,7 +2417,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction { if (argument_types_.empty()) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} takes at least one argument", name_); } @@ -2504,7 +2507,7 @@ struct WindowFunctionNthValue final : public WindowFunction { if (argument_types_.size() != 2) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); } @@ -2578,7 +2581,7 @@ struct NonNegativeDerivativeParams if (argument_types.size() != 2 && argument_types.size() != 3) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes 2 or 3 arguments", name_); } diff --git a/tests/integration/test_backward_compatibility/test_functions.py b/tests/integration/test_backward_compatibility/test_functions.py index fc03a77030e..3231fb87f33 100644 --- a/tests/integration/test_backward_compatibility/test_functions.py +++ b/tests/integration/test_backward_compatibility/test_functions.py @@ -75,11 +75,15 @@ def test_aggregate_states(start_cluster): except QueryRuntimeException as e: error_message = str(e) allowed_errors = [ - "NUMBER_OF_ARGUMENTS_DOESNT_MATCH", "ILLEGAL_TYPE_OF_ARGUMENT", # sequenceNextNode() and friends "UNKNOWN_AGGREGATE_FUNCTION", # Function X takes exactly one parameter: + "NUMBER_OF_ARGUMENTS_DOESNT_MATCH", + # Function X takes at least one argument + "TOO_FEW_ARGUMENTS_FOR_FUNCTION", + # Function X accepts at most 3 arguments, Y given + "TOO_MANY_ARGUMENTS_FOR_FUNCTION", # The function 'X' can only be used as a window function "BAD_ARGUMENTS", # aggThrow @@ -196,9 +200,7 @@ def test_string_functions(start_cluster): "Should start with ", # POINT/POLYGON/... "Cannot read input: expected a digit but got something else:", # ErrorCodes - "NUMBER_OF_ARGUMENTS_DOESNT_MATCH", "ILLEGAL_TYPE_OF_ARGUMENT", - "TOO_FEW_ARGUMENTS_FOR_FUNCTION", "DICTIONARIES_WAS_NOT_LOADED", "CANNOT_PARSE_UUID", "CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING", @@ -218,6 +220,11 @@ def test_string_functions(start_cluster): "CANNOT_PARSE_TEXT", "CANNOT_PARSE_DATETIME", # Function X takes exactly one parameter: + "NUMBER_OF_ARGUMENTS_DOESNT_MATCH", + # Function X takes at least one argument + "TOO_FEW_ARGUMENTS_FOR_FUNCTION", + # Function X accepts at most 3 arguments, Y given + "TOO_MANY_ARGUMENTS_FOR_FUNCTION", # The function 'X' can only be used as a window function "BAD_ARGUMENTS", # String foo is obviously not a valid IP address. diff --git a/tests/queries/0_stateless/02560_window_ntile.reference b/tests/queries/0_stateless/02560_window_ntile.reference index d877b2034cb..5f01832d075 100644 --- a/tests/queries/0_stateless/02560_window_ntile.reference +++ b/tests/queries/0_stateless/02560_window_ntile.reference @@ -213,6 +213,8 @@ select a, b, ntile('2') over (partition by a order by b) from(select intDiv(numb select a, b, ntile(0) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } select a, b, ntile(-2) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } select a, b, ntile(b + 1) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile() over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select a, b, ntile(3, 2) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -- Bad window type select a, b, ntile(2) over (partition by a) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } select a, b, ntile(2) over (partition by a order by b rows between 4 preceding and unbounded following) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } diff --git a/tests/queries/0_stateless/02560_window_ntile.sql b/tests/queries/0_stateless/02560_window_ntile.sql index 44e9b865052..d0e4d557e58 100644 --- a/tests/queries/0_stateless/02560_window_ntile.sql +++ b/tests/queries/0_stateless/02560_window_ntile.sql @@ -16,6 +16,8 @@ select a, b, ntile('2') over (partition by a order by b) from(select intDiv(numb select a, b, ntile(0) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } select a, b, ntile(-2) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } select a, b, ntile(b + 1) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile() over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select a, b, ntile(3, 2) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -- Bad window type select a, b, ntile(2) over (partition by a) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } diff --git a/tests/queries/0_stateless/03131_hilbert_coding.sql b/tests/queries/0_stateless/03131_hilbert_coding.sql index ed293dc6910..b16a0efad5d 100644 --- a/tests/queries/0_stateless/03131_hilbert_coding.sql +++ b/tests/queries/0_stateless/03131_hilbert_coding.sql @@ -46,6 +46,7 @@ drop table if exists hilbert_numbers_1_03131; select '----- ERRORS -----'; select hilbertEncode(); -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } +select hilbertEncode(1, 2, 3); -- { serverError TOO_MANY_ARGUMENTS_FOR_FUNCTION } select hilbertDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select hilbertEncode('text'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } select hilbertDecode('text', 'text'); -- { serverError ILLEGAL_COLUMN } From 41218ad01889cce72569a840461b54dbf9f4b832 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Mon, 22 Jul 2024 12:03:21 +0200 Subject: [PATCH 0639/2361] Stateless tests: add no-parallel tag for high-load tests --- .../01076_cache_dictionary_datarace_exception_ptr.sh | 2 +- .../0_stateless/01171_mv_select_insert_isolation_long.sh | 2 +- .../0_stateless/01301_aggregate_state_exception_memory_leak.sh | 2 +- .../0_stateless/01302_aggregate_state_exception_memory_leak.sh | 2 +- tests/queries/0_stateless/02372_data_race_in_avro.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh index dcd15718416..e003d2a26da 100755 --- a/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh +++ b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: race +# Tags: race, no-parallel # This is a monkey test used to trigger sanitizers. diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index 2ab7f883367..8344bb6f426 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-ordinary-database, no-debug +# Tags: long, no-parallel, no-ordinary-database, no-debug # Test is too heavy, avoid parallel run in Flaky Check # shellcheck disable=SC2119 diff --git a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh index 47fe7a9c7d9..d74092d828d 100755 --- a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh +++ b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01302_aggregate_state_exception_memory_leak.sh b/tests/queries/0_stateless/01302_aggregate_state_exception_memory_leak.sh index a521accb082..bbf2fd9177a 100755 --- a/tests/queries/0_stateless/01302_aggregate_state_exception_memory_leak.sh +++ b/tests/queries/0_stateless/01302_aggregate_state_exception_memory_leak.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02372_data_race_in_avro.sh b/tests/queries/0_stateless/02372_data_race_in_avro.sh index 49c34e31923..50a7ae1e3c5 100755 --- a/tests/queries/0_stateless/02372_data_race_in_avro.sh +++ b/tests/queries/0_stateless/02372_data_race_in_avro.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From bb0b29f6e50e74098fcc8a9b83150998f1bc2601 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Mon, 22 Jul 2024 12:11:56 +0200 Subject: [PATCH 0640/2361] Set writer_thread_id earlier, when new exclusive owener is waiting for existing readers to finish --- src/Common/SharedMutex.cpp | 6 ++++-- src/Common/SharedMutex.h | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/Common/SharedMutex.cpp b/src/Common/SharedMutex.cpp index 7b00ef0b28b..2d63f8542b0 100644 --- a/src/Common/SharedMutex.cpp +++ b/src/Common/SharedMutex.cpp @@ -31,11 +31,13 @@ void SharedMutex::lock() break; } + /// The first step of acquiring the exclusive ownership is finished. + /// Now we just wait until all readers release the shared ownership. + writer_thread_id.store(getThreadId()); + value |= writers; while (value & readers) futexWaitLowerFetch(state, value); - - writer_thread_id.store(getThreadId()); } bool SharedMutex::try_lock() diff --git a/src/Common/SharedMutex.h b/src/Common/SharedMutex.h index c77c8765885..d2947645eca 100644 --- a/src/Common/SharedMutex.h +++ b/src/Common/SharedMutex.h @@ -38,7 +38,7 @@ private: alignas(64) std::atomic state; std::atomic waiters; - /// Is set while the lock is held in exclusive mode only to facilitate debugging + /// Is set while the lock is held (or is in the process of being acquired) in exclusive mode only to facilitate debugging std::atomic writer_thread_id; }; From c36dde51031e817b20db6773df47a42092f6616b Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 22 Jul 2024 12:17:29 +0200 Subject: [PATCH 0641/2361] Fix KeeperMap create after incomplete drop --- src/Common/FailPoint.cpp | 3 ++- src/Storages/StorageKeeperMap.cpp | 27 +++++++++++-------- ...03208_keepermap_failed_data_drop.reference | 0 .../03208_keepermap_failed_data_drop.sql | 7 +++++ 4 files changed, 25 insertions(+), 12 deletions(-) create mode 100644 tests/queries/0_stateless/03208_keepermap_failed_data_drop.reference create mode 100644 tests/queries/0_stateless/03208_keepermap_failed_data_drop.sql diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index 5454cba8e2e..b952e9725e3 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -57,7 +57,8 @@ static struct InitFiu PAUSEABLE_ONCE(finish_clean_quorum_failed_parts) \ PAUSEABLE(dummy_pausable_failpoint) \ ONCE(execute_query_calling_empty_set_result_func_on_exception) \ - ONCE(receive_timeout_on_table_status_response) + ONCE(receive_timeout_on_table_status_response) \ + ONCE(keepermap_fail_drop_data) \ namespace FailPoints diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 587cb621362..1c1de245d10 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -37,6 +37,7 @@ #include #include +#include #include #include #include @@ -64,6 +65,11 @@ namespace DB { +namespace FailPoints +{ + extern const char keepermap_fail_drop_data[]; +} + namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; @@ -411,18 +417,13 @@ StorageKeeperMap::StorageKeeperMap( auto code = client->tryCreate(zk_table_path, "", zkutil::CreateMode::Persistent); - // tables_path was removed with drop - if (code == Coordination::Error::ZNONODE) - { - LOG_INFO(log, "Metadata nodes were removed by another server, will retry"); - continue; - } - else if (code != Coordination::Error::ZOK) - { - throw zkutil::KeeperException(code, "Failed to create table on path {} because a table with same UUID already exists", zk_root_path); - } + if (code == Coordination::Error::ZOK) + return; - return; + if (code != Coordination::Error::ZNONODE) + throw zkutil::KeeperException(code, "Failed to create table on path {} because a table with same UUID already exists", zk_root_path); + + /// ZNONODE means we dropped zk_tables_path but didn't finish drop completely } if (client->exists(zk_dropped_path)) @@ -561,6 +562,10 @@ void StorageKeeperMap::truncate(const ASTPtr &, const StorageMetadataPtr &, Cont bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock) { + fiu_do_on(FailPoints::keepermap_fail_drop_data, + { + throw zkutil::KeeperException(Coordination::Error::ZOPERATIONTIMEOUT, "Manually triggered operation timeout"); + }); zookeeper->removeChildrenRecursive(zk_data_path); bool completely_removed = false; diff --git a/tests/queries/0_stateless/03208_keepermap_failed_data_drop.reference b/tests/queries/0_stateless/03208_keepermap_failed_data_drop.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03208_keepermap_failed_data_drop.sql b/tests/queries/0_stateless/03208_keepermap_failed_data_drop.sql new file mode 100644 index 00000000000..ad0603f12a9 --- /dev/null +++ b/tests/queries/0_stateless/03208_keepermap_failed_data_drop.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS 03208_keepermap_test SYNC; + +CREATE TABLE 03208_keepermap_test (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test03208') PRIMARY KEY(key); +INSERT INTO 03208_keepermap_test VALUES (1, 11); +SYSTEM ENABLE FAILPOINT keepermap_fail_drop_data; +DROP TABLE 03208_keepermap_test; +CREATE TABLE 03208_keepermap_test_another (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test03208') PRIMARY KEY(key); From 1ea05e55d25e144ff2294265a4d70c420174004a Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 10:19:54 +0000 Subject: [PATCH 0642/2361] Split test 03038_nested_dynamic_merges to avoid timeouts --- .../03038_nested_dynamic_merges.reference | 92 ------------------- .../03038_nested_dynamic_merges.sh | 53 ----------- ...ynamic_merges_compact_horizontal.reference | 21 +++++ ...ested_dynamic_merges_compact_horizontal.sh | 32 +++++++ ..._dynamic_merges_compact_vertical.reference | 21 +++++ ..._nested_dynamic_merges_compact_vertical.sh | 32 +++++++ ...d_dynamic_merges_wide_horizontal.reference | 21 +++++ ...8_nested_dynamic_merges_wide_horizontal.sh | 32 +++++++ ...ted_dynamic_merges_wide_vertical.reference | 21 +++++ ...038_nested_dynamic_merges_wide_vertical.sh | 32 +++++++ 10 files changed, 212 insertions(+), 145 deletions(-) delete mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges.reference delete mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges.sh create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference create mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sh create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference create mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sh create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference create mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sh create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference create mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sh diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges.reference deleted file mode 100644 index 65034647775..00000000000 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges.reference +++ /dev/null @@ -1,92 +0,0 @@ -MergeTree compact + horizontal merge -test -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None -MergeTree wide + horizontal merge -test -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None -MergeTree compact + vertical merge -test -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None -MergeTree wide + vertical merge -test -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges.sh deleted file mode 100755 index b82ddb3813e..00000000000 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" - - -function test() -{ - echo "test" - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, number from numbers(100000)" - $CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" - $CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" - - $CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - - $CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" - $CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" - - $CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact + horizontal merge" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide + horizontal merge" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact + vertical merge" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide + vertical merge" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference new file mode 100644 index 00000000000..27ed336a035 --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference @@ -0,0 +1,21 @@ +16667 Tuple(a Dynamic(max_types=3)):Date +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):String +50000 Tuple(a Dynamic(max_types=3)):UInt64 +100000 UInt64:None +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):UInt64 +66667 Tuple(a Dynamic(max_types=3)):String +100000 UInt64:None +16667 Tuple(a Dynamic(max_types=3)):DateTime +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):UInt64 +66667 Tuple(a Dynamic(max_types=3)):String +100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 UInt64:None +133333 Tuple(a Dynamic(max_types=3)):None +50000 Tuple(a Dynamic(max_types=3)):UInt64 +100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 UInt64:None +116667 Tuple(a Dynamic(max_types=3)):String +133333 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sh new file mode 100755 index 00000000000..cb8a8fad6ba --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Tags: long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" + +$CH_CLIENT -q "drop table if exists test;" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" + +$CH_CLIENT -q "system stop merges test" +$CH_CLIENT -q "insert into test select number, number from numbers(100000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" + +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" +$CH_CLIENT -nm -q "system start merges test; optimize table test final;" +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" + +$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" + +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" +$CH_CLIENT -nm -q "system start merges test; optimize table test final;" +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" + +$CH_CLIENT -q "drop table test;" + diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference new file mode 100644 index 00000000000..27ed336a035 --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference @@ -0,0 +1,21 @@ +16667 Tuple(a Dynamic(max_types=3)):Date +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):String +50000 Tuple(a Dynamic(max_types=3)):UInt64 +100000 UInt64:None +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):UInt64 +66667 Tuple(a Dynamic(max_types=3)):String +100000 UInt64:None +16667 Tuple(a Dynamic(max_types=3)):DateTime +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):UInt64 +66667 Tuple(a Dynamic(max_types=3)):String +100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 UInt64:None +133333 Tuple(a Dynamic(max_types=3)):None +50000 Tuple(a Dynamic(max_types=3)):UInt64 +100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 UInt64:None +116667 Tuple(a Dynamic(max_types=3)):String +133333 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sh new file mode 100755 index 00000000000..0627244a02d --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Tags: long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" + +$CH_CLIENT -q "drop table if exists test;" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" + +$CH_CLIENT -q "system stop merges test" +$CH_CLIENT -q "insert into test select number, number from numbers(100000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" + +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" +$CH_CLIENT -nm -q "system start merges test; optimize table test final;" +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" + +$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" + +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" +$CH_CLIENT -nm -q "system start merges test; optimize table test final;" +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" + +$CH_CLIENT -q "drop table test;" + diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference new file mode 100644 index 00000000000..27ed336a035 --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference @@ -0,0 +1,21 @@ +16667 Tuple(a Dynamic(max_types=3)):Date +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):String +50000 Tuple(a Dynamic(max_types=3)):UInt64 +100000 UInt64:None +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):UInt64 +66667 Tuple(a Dynamic(max_types=3)):String +100000 UInt64:None +16667 Tuple(a Dynamic(max_types=3)):DateTime +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):UInt64 +66667 Tuple(a Dynamic(max_types=3)):String +100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 UInt64:None +133333 Tuple(a Dynamic(max_types=3)):None +50000 Tuple(a Dynamic(max_types=3)):UInt64 +100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 UInt64:None +116667 Tuple(a Dynamic(max_types=3)):String +133333 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sh new file mode 100755 index 00000000000..f305fac071c --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Tags: long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" + +$CH_CLIENT -q "drop table if exists test;" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" + +$CH_CLIENT -q "system stop merges test" +$CH_CLIENT -q "insert into test select number, number from numbers(100000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" + +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" +$CH_CLIENT -nm -q "system start merges test; optimize table test final;" +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" + +$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" + +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" +$CH_CLIENT -nm -q "system start merges test; optimize table test final;" +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" + +$CH_CLIENT -q "drop table test;" + diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference new file mode 100644 index 00000000000..27ed336a035 --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference @@ -0,0 +1,21 @@ +16667 Tuple(a Dynamic(max_types=3)):Date +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):String +50000 Tuple(a Dynamic(max_types=3)):UInt64 +100000 UInt64:None +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):UInt64 +66667 Tuple(a Dynamic(max_types=3)):String +100000 UInt64:None +16667 Tuple(a Dynamic(max_types=3)):DateTime +33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) +50000 Tuple(a Dynamic(max_types=3)):UInt64 +66667 Tuple(a Dynamic(max_types=3)):String +100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 UInt64:None +133333 Tuple(a Dynamic(max_types=3)):None +50000 Tuple(a Dynamic(max_types=3)):UInt64 +100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 UInt64:None +116667 Tuple(a Dynamic(max_types=3)):String +133333 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sh new file mode 100755 index 00000000000..0627244a02d --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Tags: long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" + +$CH_CLIENT -q "drop table if exists test;" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" + +$CH_CLIENT -q "system stop merges test" +$CH_CLIENT -q "insert into test select number, number from numbers(100000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" + +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" +$CH_CLIENT -nm -q "system start merges test; optimize table test final;" +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" + +$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" +$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" + +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" +$CH_CLIENT -nm -q "system start merges test; optimize table test final;" +$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" + +$CH_CLIENT -q "drop table test;" + From 99026efcdcc890acc4540eea3e7c94a33d3f99fc Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 10:51:15 +0000 Subject: [PATCH 0643/2361] Improve JSON parsing by different type inference logic --- src/Formats/JSONExtractTree.cpp | 98 ++++++++++++++++++++++++--------- src/Formats/JSONExtractTree.h | 5 +- src/Functions/FunctionsJSON.cpp | 2 +- 3 files changed, 77 insertions(+), 28 deletions(-) diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 680588c7825..a8980e785de 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -126,7 +126,7 @@ void jsonElementToString(const typename JSONParser::Element & element, WriteBuff template bool tryGetNumericValueFromJSONElement( - NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, String & error) + NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, bool allow_type_conversion, String & error) { switch (element.type()) { @@ -138,7 +138,7 @@ bool tryGetNumericValueFromJSONElement( /// But it will be more convenient for user to perform conversion. value = static_cast(element.getDouble()); } - else if (!accurate::convertNumeric(element.getDouble(), value)) + else if (!allow_type_conversion || !accurate::convertNumeric(element.getDouble(), value)) { error = fmt::format("cannot convert double value {} to {}", element.getDouble(), TypeName); return false; @@ -161,7 +161,7 @@ bool tryGetNumericValueFromJSONElement( case ElementType::BOOL: if constexpr (is_integer) { - if (convert_bool_to_integer) + if (convert_bool_to_integer && allow_type_conversion) { value = static_cast(element.getBool()); break; @@ -169,7 +169,11 @@ bool tryGetNumericValueFromJSONElement( } error = fmt::format("cannot convert bool value to {}", TypeName); return false; - case ElementType::STRING: { + case ElementType::STRING: + { + if (!allow_type_conversion) + return false; + auto rb = ReadBufferFromMemory{element.getString()}; if constexpr (std::is_floating_point_v) { @@ -244,8 +248,16 @@ public: return false; } + if (is_bool_type && !insert_settings.allow_type_conversion) + { + if (!element.isBool()) + return false; + assert_cast &>(column).insertValue(element.getBool()); + return true; + } + NumberType value; - if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || is_bool_type, error)) + if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || is_bool_type, insert_settings.allow_type_conversion, error)) { if (error.empty()) error = fmt::format("cannot read {} value from JSON element: {}", TypeName, jsonElementToString(element, format_settings)); @@ -292,8 +304,17 @@ public: return false; } + if (this->is_bool_type && !insert_settings.allow_type_conversion) + { + if (!element.isBool()) + return false; + UInt8 value = element.getBool(); + assert_cast(column).insertData(reinterpret_cast(&value), sizeof(value)); + return true; + } + NumberType value; - if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || this->is_bool_type, error)) + if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || this->is_bool_type, insert_settings.allow_type_conversion, error)) { if (error.empty()) error = fmt::format("cannot read {} value from JSON element: {}", TypeName, jsonElementToString(element, format_settings)); @@ -319,7 +340,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -336,6 +357,9 @@ public: if (!element.isString()) { + if (!insert_settings.allow_type_conversion) + return false; + auto & col_str = assert_cast(column); auto & chars = col_str.getChars(); WriteBufferFromVector buf(chars, AppendModeTag()); @@ -363,7 +387,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -381,6 +405,9 @@ public: if (!element.isString()) { + if (!insert_settings.allow_type_conversion) + return false; + auto value = jsonElementToString(element, format_settings); assert_cast(column).insertData(value.data(), value.size()); } @@ -405,7 +432,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -422,7 +449,11 @@ public: } if (!element.isString()) + { + if (!insert_settings.allow_type_conversion) + return false; return checkValueSizeAndInsert(column, jsonElementToString(element, format_settings), error); + } return checkValueSizeAndInsert(column, element.getString(), error); } @@ -453,7 +484,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -469,7 +500,11 @@ public: } if (!element.isString()) + { + if (!insert_settings.allow_type_conversion) + return false; return checkValueSizeAndInsert(column, jsonElementToString(element, format_settings), error); + } return checkValueSizeAndInsert(column, element.getString(), error); } @@ -633,7 +668,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -652,7 +687,7 @@ public: return false; } } - else if (element.isUInt64()) + else if (element.isUInt64() && insert_settings.allow_type_conversion) { value = element.getUInt64(); } @@ -715,7 +750,8 @@ public: case ElementType::INT64: value = convertToDecimal, DataTypeDecimal>(element.getInt64(), scale); break; - case ElementType::STRING: { + case ElementType::STRING: + { auto rb = ReadBufferFromMemory{element.getString()}; if (!SerializationDecimal::tryReadText(value, rb, DecimalUtils::max_precision, scale)) { @@ -724,7 +760,8 @@ public: } break; } - case ElementType::NULL_VALUE: { + case ElementType::NULL_VALUE: + { if (!format_settings.null_as_default) { error = "cannot convert null to Decimal value"; @@ -759,7 +796,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -780,6 +817,9 @@ public: } else { + if (!insert_settings.allow_type_conversion) + return false; + switch (element.type()) { case ElementType::DOUBLE: @@ -1373,7 +1413,20 @@ public: auto & variant_column = column_dynamic.getVariantColumn(); auto & variant_info = column_dynamic.getVariantInfo(); - /// First, infer ClickHouse type for this element and add it as a new variant. + + /// First, try to insert element into current variants but with no types conversion. + /// We want to avoid inferring the type on each row, so if we can insert this element into + /// any existing variant with no types conversion (like Integer -> String, Double -> Integer, etc) + /// we will do it and won't try to infer the type. + auto it = json_extract_nodes_cache.find(variant_info.variant_name); + if (it == json_extract_nodes_cache.end()) + it = json_extract_nodes_cache.emplace(variant_info.variant_name, buildJSONExtractTree(variant_info.variant_type, "Dynamic inference")).first; + auto insert_settings_with_no_type_conversion = insert_settings; + insert_settings_with_no_type_conversion.allow_type_conversion = false; + if (it->second->insertResultToColumn(variant_column, element, insert_settings_with_no_type_conversion, format_settings, error)) + return true; + + /// We couldn't insert element into current variants, infer ClickHouse type for this element and add it as a new variant. auto element_type = removeNullable(elementToDataType(element, format_settings)); if (!checkIfTypeIsComplete(element_type)) { @@ -1387,7 +1440,7 @@ public: auto element_type_name = element_type->getName(); if (column_dynamic.addNewVariant(element_type, element_type_name)) { - auto it = json_extract_nodes_cache.find(element_type_name); + it = json_extract_nodes_cache.find(element_type_name); if (it == json_extract_nodes_cache.end()) it = json_extract_nodes_cache.emplace(element_type_name, buildJSONExtractTree(element_type, "Dynamic inference")).first; auto global_discriminator = variant_info.variant_name_to_discriminator.at(element_type_name); @@ -1399,14 +1452,7 @@ public: return true; } - /// We couldn't add new variant. Try to insert element into current variants. - auto it = json_extract_nodes_cache.find(variant_info.variant_name); - if (it == json_extract_nodes_cache.end()) - it = json_extract_nodes_cache.emplace(variant_info.variant_name, buildJSONExtractTree(variant_info.variant_type, "Dynamic inference")).first; - if (it->second->insertResultToColumn(variant_column, element, insert_settings, format_settings, error)) - return true; - - /// We couldn't insert element into any existing variant, add String variant and read value as String. + /// We couldn't add a new variant, add String variant and read value as String. column_dynamic.addStringVariant(); auto string_global_discriminator = variant_info.variant_name_to_discriminator.at("String"); auto & string_column = variant_column.getVariantByGlobalDiscriminator(string_global_discriminator); @@ -1962,7 +2008,7 @@ template std::unique_ptr> buildJSONExtractTr #if USE_RAPIDJSON template void jsonElementToString(const RapidJSONParser::Element & element, WriteBuffer & buf, const FormatSettings & format_settings); template std::unique_ptr> buildJSONExtractTree(const DataTypePtr & type, const char * source_for_exception_message); -template bool tryGetNumericValueFromJSONElement(Float64 & value, const RapidJSONParser::Element & element, bool convert_bool_to_integer, String & error); +template bool tryGetNumericValueFromJSONElement(Float64 & value, const RapidJSONParser::Element & element, bool convert_bool_to_integer, bool allow_type_conversion, String & error); #else template void jsonElementToString(const DummyJSONParser::Element & element, WriteBuffer & buf, const FormatSettings & format_settings); template std::unique_ptr> buildJSONExtractTree(const DataTypePtr & type, const char * source_for_exception_message); diff --git a/src/Formats/JSONExtractTree.h b/src/Formats/JSONExtractTree.h index b5e82506548..89f2d191dfb 100644 --- a/src/Formats/JSONExtractTree.h +++ b/src/Formats/JSONExtractTree.h @@ -17,6 +17,9 @@ struct JSONExtractInsertSettings /// For example, if we have [1, "hello", 2] and type Array(UInt32), /// we will insert [1, 0, 2] in the column. Used in all JSONExtract functions. bool insert_default_on_invalid_elements_in_complex_types = false; + /// If false, JSON value will be inserted into column only if type of the value is + /// the same as column type (no conversions like Integer -> String, Integer -> Float, etc). + bool allow_type_conversion = true; }; template @@ -36,6 +39,6 @@ template void jsonElementToString(const typename JSONParser::Element & element, WriteBuffer & buf, const FormatSettings & format_settings); template -bool tryGetNumericValueFromJSONElement(NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, String & error); +bool tryGetNumericValueFromJSONElement(NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, bool allow_type_conversion, String & error); } diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index 47040545677..2681f278958 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -739,7 +739,7 @@ public: { NumberType value; - tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, error); + tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, true, error); auto & col_vec = assert_cast &>(dest); col_vec.insertValue(value); return true; From 378502a331c60cc023e0c965611237cb5a3cfb47 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Mon, 22 Jul 2024 12:54:05 +0200 Subject: [PATCH 0644/2361] Stateless tests: sync tests with private --- .../01651_lc_insert_tiny_log.reference | 12 +- .../0_stateless/01651_lc_insert_tiny_log.sql | 6 +- .../01753_direct_dictionary_simple_key.sql | 2 +- .../0_stateless/02372_analyzer_join.reference | 688 +++++++++--------- .../0_stateless/02372_analyzer_join.sql.j2 | 44 +- .../02373_analyzer_join_use_nulls.reference | 16 +- .../02373_analyzer_join_use_nulls.sql | 16 +- .../02992_all_columns_should_have_comment.sql | 6 +- 8 files changed, 397 insertions(+), 393 deletions(-) diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log.reference b/tests/queries/0_stateless/01651_lc_insert_tiny_log.reference index 3da44c57b27..5cc8909b6c8 100644 --- a/tests/queries/0_stateless/01651_lc_insert_tiny_log.reference +++ b/tests/queries/0_stateless/01651_lc_insert_tiny_log.reference @@ -1,12 +1,12 @@ 10000000 10000000 1274991808 -20000000 -20000000 2549983616 +30000000 +30000000 3824991808 10000000 10000000 1274991808 -20000000 -20000000 2549983616 +30000000 +30000000 3824991808 10000000 10000000 1274991808 -20000000 -20000000 2549983616 +30000000 +30000000 3824991808 diff --git a/tests/queries/0_stateless/01651_lc_insert_tiny_log.sql b/tests/queries/0_stateless/01651_lc_insert_tiny_log.sql index d405bb01fd9..d11c9120c61 100644 --- a/tests/queries/0_stateless/01651_lc_insert_tiny_log.sql +++ b/tests/queries/0_stateless/01651_lc_insert_tiny_log.sql @@ -8,7 +8,7 @@ INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); select sum(length(arr)) from perf_lc_num; select sum(length(arr)), sum(num) from perf_lc_num; -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); select sum(length(arr)) from perf_lc_num; select sum(length(arr)), sum(num) from perf_lc_num; @@ -23,7 +23,7 @@ INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); select sum(length(arr)) from perf_lc_num; select sum(length(arr)), sum(num) from perf_lc_num; -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); select sum(length(arr)) from perf_lc_num; select sum(length(arr)), sum(num) from perf_lc_num; @@ -38,7 +38,7 @@ INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); select sum(length(arr)) from perf_lc_num; select sum(length(arr)), sum(num) from perf_lc_num; -INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000); +INSERT INTO perf_lc_num (num) SELECT toUInt8(number) FROM numbers(10000000, 20000000); select sum(length(arr)) from perf_lc_num; select sum(length(arr)), sum(num) from perf_lc_num; diff --git a/tests/queries/0_stateless/01753_direct_dictionary_simple_key.sql b/tests/queries/0_stateless/01753_direct_dictionary_simple_key.sql index 86af09f391d..93ed3f93c4e 100644 --- a/tests/queries/0_stateless/01753_direct_dictionary_simple_key.sql +++ b/tests/queries/0_stateless/01753_direct_dictionary_simple_key.sql @@ -41,7 +41,7 @@ SELECT dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_simple SELECT 'dictHas'; SELECT dictHas('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', number) FROM system.numbers LIMIT 4; SELECT 'select all values as input stream'; -SELECT * FROM 01753_dictionary_db.direct_dictionary_simple_key_simple_attributes; +SELECT * FROM 01753_dictionary_db.direct_dictionary_simple_key_simple_attributes ORDER BY ALL; DROP DICTIONARY 01753_dictionary_db.direct_dictionary_simple_key_simple_attributes; DROP TABLE 01753_dictionary_db.simple_key_simple_attributes_source_table; diff --git a/tests/queries/0_stateless/02372_analyzer_join.reference b/tests/queries/0_stateless/02372_analyzer_join.reference index eefcb1e50dc..9204dded262 100644 --- a/tests/queries/0_stateless/02372_analyzer_join.reference +++ b/tests/queries/0_stateless/02372_analyzer_join.reference @@ -5,63 +5,63 @@ JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 SELECT '--'; -- SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_1_Value_1 Join_2_Value_1 -SELECT id FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } -SELECT value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT id FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT 'JOIN expression aliases'; JOIN expression aliases -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT '--'; -- -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT 'JOIN LEFT'; @@ -69,75 +69,75 @@ JOIN LEFT SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 SELECT '--'; -- SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_1_Value_1 Join_2_Value_1 Join_1_Value_2 -SELECT id FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } -SELECT value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT id FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 SELECT 'JOIN expression aliases'; JOIN expression aliases -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 SELECT '--'; -- -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 @@ -146,182 +146,182 @@ JOIN RIGHT SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 SELECT '--'; -- SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_1_Value_1 Join_2_Value_1 - Join_2_Value_3 -SELECT id FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } -SELECT value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT id FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 1 Join_2_Value_1 0 3 Join_2_Value_3 +0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 1 Join_2_Value_1 0 3 Join_2_Value_3 +0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; 0 1 Join_2_Value_1 0 3 Join_2_Value_3 +0 Join_1_Value_0 0 Join_2_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT 'JOIN expression aliases'; JOIN expression aliases -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT 'JOIN FULL'; JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 SELECT '--'; -- SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_1_Value_1 Join_2_Value_1 Join_1_Value_2 - Join_2_Value_3 -SELECT id FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } -SELECT value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT id FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY ALL; +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 -0 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 -0 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 -0 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT 'JOIN expression aliases'; JOIN expression aliases -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT '--'; -- -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id ORDER BY ALL; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT 'First JOIN INNER second JOIN INNER'; First JOIN INNER second JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; @@ -329,48 +329,48 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id -INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT 'First JOIN INNER second JOIN LEFT'; @@ -379,14 +379,14 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; @@ -394,48 +394,48 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id -LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT 'First JOIN INNER second JOIN RIGHT'; @@ -444,159 +444,159 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'First JOIN INNER second JOIN FULL'; First JOIN INNER second JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id -FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'First JOIN LEFT second JOIN INNER'; First JOIN LEFT second JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -604,7 +604,7 @@ SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -613,7 +613,7 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 @@ -621,7 +621,7 @@ SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -629,20 +629,20 @@ SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -650,7 +650,7 @@ SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -658,7 +658,7 @@ SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -668,7 +668,7 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -676,7 +676,7 @@ SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -685,7 +685,7 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 @@ -693,7 +693,7 @@ SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -701,7 +701,7 @@ SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 @@ -709,7 +709,7 @@ SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 @@ -718,7 +718,7 @@ JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -726,7 +726,7 @@ SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -734,7 +734,7 @@ SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -744,184 +744,184 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT 'First JOIN LEFT second JOIN FULL'; First JOIN LEFT second JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 0 4 Join_3_Value_4 -0 0 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 0 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT 'First JOIN RIGHT second JOIN INNER'; First JOIN RIGHT second JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; @@ -929,48 +929,48 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT 'First JOIN RIGHT second JOIN LEFT'; @@ -979,246 +979,246 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_2_Value_3 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 1 Join_2_Value_1 0 0 3 Join_2_Value_3 0 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_2_Value_3 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 SELECT 'First JOIN RIGHT second JOIN RIGHT'; First JOIN RIGHT second JOIN RIGHT SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'First JOIN RIGHT second JOIN FULL'; First JOIN RIGHT second JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_3_Value_4 + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_2_Value_3 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 1 Join_2_Value_1 0 -0 0 4 Join_3_Value_4 +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 1 Join_2_Value_1 0 -0 3 Join_2_Value_3 0 +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_3_Value_4 + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_2_Value_3 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT 'First JOIN FULL second JOIN INNER'; First JOIN FULL second JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -1226,7 +1226,7 @@ SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -1235,7 +1235,7 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 @@ -1243,7 +1243,7 @@ SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -1251,20 +1251,20 @@ SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -1272,7 +1272,7 @@ SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -1280,7 +1280,7 @@ SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id -INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -1290,265 +1290,265 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_2_Value_3 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 3 Join_2_Value_3 0 -0 1 Join_2_Value_1 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 1 Join_2_Value_1 0 -0 3 Join_2_Value_3 0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_2_Value_3 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id -LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 SELECT 'First JOIN FULL second JOIN RIGHT'; First JOIN FULL second JOIN RIGHT SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT 'First JOIN FULL second JOIN FULL'; First JOIN FULL second JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; + Join_3_Value_4 + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_2_Value_3 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 3 Join_2_Value_3 0 -0 1 Join_2_Value_1 0 -0 0 4 Join_3_Value_4 -0 0 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 1 Join_2_Value_1 0 -0 3 Join_2_Value_3 0 -0 0 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; + Join_3_Value_4 + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_2_Value_3 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id -FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 diff --git a/tests/queries/0_stateless/02372_analyzer_join.sql.j2 b/tests/queries/0_stateless/02372_analyzer_join.sql.j2 index facf4dc018b..45ae63b9a49 100644 --- a/tests/queries/0_stateless/02372_analyzer_join.sql.j2 +++ b/tests/queries/0_stateless/02372_analyzer_join.sql.j2 @@ -45,59 +45,59 @@ SELECT 'JOIN {{ join_type }}'; SELECT 'JOIN ON without conditions'; SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; SELECT '--'; SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; -SELECT id FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT id FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } -SELECT value FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT value FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id ORDER BY ALL; -- { serverError AMBIGUOUS_IDENTIFIER } SELECT 'JOIN ON with conditions'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY ALL; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY ALL; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY ALL; SELECT 'JOIN multiple clauses'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id ORDER BY ALL; SELECT 'JOIN expression aliases'; -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) ORDER BY ALL; SELECT '--'; -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1_id = t2_id ORDER BY ALL; {% endfor %} @@ -110,56 +110,56 @@ SELECT 'JOIN ON without conditions'; SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 {{ first_join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -{{ second_join_type }} JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +{{ second_join_type }} JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; SELECT '--'; SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY ALL; SELECT 'JOIN ON with conditions'; SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY ALL; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY ALL; SELECT 'JOIN multiple clauses'; SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY ALL; SELECT 'JOIN expression aliases'; SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY ALL; SELECT '--'; SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1_id = t2_id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY ALL; {% endfor %} {% endfor %} diff --git a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference index 3722c23e4a0..2b3671e1ea6 100644 --- a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference +++ b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference @@ -1,27 +1,27 @@ -- { echoOn } SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String SELECT '--'; -- SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; 0 UInt64 Join_1_Value_0 String 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) 1 UInt64 Join_1_Value_1 String 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) 2 UInt64 Join_1_Value_2 String \N Nullable(UInt64) \N Nullable(String) SELECT '--'; -- SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 UInt64 Join_2_Value_0 String 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 UInt64 Join_2_Value_1 String \N Nullable(UInt64) \N Nullable(String) 3 UInt64 Join_2_Value_3 String SELECT '--'; -- SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) 2 Nullable(UInt64) Join_1_Value_2 Nullable(String) \N Nullable(UInt64) \N Nullable(String) @@ -30,14 +30,14 @@ SELECT '--'; -- SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String SELECT '--'; -- SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) 1 UInt64 1 UInt64 Join_1_Value_1 String 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) 2 UInt64 2 UInt64 Join_1_Value_2 String \N Nullable(UInt64) \N Nullable(String) @@ -45,7 +45,7 @@ SELECT '--'; -- SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; 0 UInt64 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 UInt64 Join_2_Value_0 String 1 UInt64 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 UInt64 Join_2_Value_1 String 3 UInt64 \N Nullable(UInt64) \N Nullable(String) 3 UInt64 Join_2_Value_3 String @@ -53,7 +53,7 @@ SELECT '--'; -- SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; 0 Nullable(UInt64) 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) 1 Nullable(UInt64) 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) 2 Nullable(UInt64) 2 Nullable(UInt64) Join_1_Value_2 Nullable(String) \N Nullable(UInt64) \N Nullable(String) diff --git a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql index db7895084e8..bcec6d178a8 100644 --- a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql +++ b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql @@ -26,46 +26,46 @@ INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); -- { echoOn } SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; SELECT '--'; SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; SELECT '--'; SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; SELECT '--'; SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; SELECT '--'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; SELECT '--'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; SELECT '--'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; SELECT '--'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; -- { echoOff } diff --git a/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql b/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql index 127c6fee07d..dcb7c09a973 100644 --- a/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql +++ b/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql @@ -1,4 +1,8 @@ SYSTEM FLUSH LOGS; SELECT 'Column ' || name || ' from table ' || concat(database, '.', table) || ' should have a comment' FROM system.columns -WHERE (database = 'system') AND (comment = '') AND (table NOT ILIKE '%_log_%') AND (table NOT IN ('numbers', 'numbers_mt', 'one', 'generate_series', 'generateSeries', 'coverage_log')) AND (default_kind != 'ALIAS'); +WHERE (database = 'system') AND + (comment = '') AND + (table NOT ILIKE '%_log_%') AND + (table NOT IN ('numbers', 'numbers_mt', 'one', 'generate_series', 'generateSeries', 'coverage_log', 'filesystem_read_prefetches_log')) AND + (default_kind != 'ALIAS'); From 340214a246cd1c35d96cfb21be0576d87e05fea0 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:55:50 +0200 Subject: [PATCH 0645/2361] Unpin docker-ce in integration-tests-runner --- docker/test/integration/runner/Dockerfile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index d250b746e7d..ceb8a1b2b58 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -43,13 +43,11 @@ ENV TZ=Etc/UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ENV DOCKER_CHANNEL stable -# Unpin the docker version after the release 24.0.3 is released -# https://github.com/moby/moby/issues/45770#issuecomment-1618255130 RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ && add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \ && apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ - docker-ce='5:23.*' \ + docker-ce \ && rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ From d0053b505b7d05379644d5d0f47ec704d7496294 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 22 Jul 2024 12:56:35 +0200 Subject: [PATCH 0646/2361] Rename test --- ...p.reference => 03208_keepermap_incomplete_data_drop.reference} | 0 ...led_data_drop.sql => 03208_keepermap_incomplete_data_drop.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{03208_keepermap_failed_data_drop.reference => 03208_keepermap_incomplete_data_drop.reference} (100%) rename tests/queries/0_stateless/{03208_keepermap_failed_data_drop.sql => 03208_keepermap_incomplete_data_drop.sql} (100%) diff --git a/tests/queries/0_stateless/03208_keepermap_failed_data_drop.reference b/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.reference similarity index 100% rename from tests/queries/0_stateless/03208_keepermap_failed_data_drop.reference rename to tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.reference diff --git a/tests/queries/0_stateless/03208_keepermap_failed_data_drop.sql b/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql similarity index 100% rename from tests/queries/0_stateless/03208_keepermap_failed_data_drop.sql rename to tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql From e708219f6aecfe9934827f453665e03142ad5112 Mon Sep 17 00:00:00 2001 From: Max K Date: Mon, 22 Jul 2024 13:01:27 +0200 Subject: [PATCH 0647/2361] CI: Print instance info in runner's init script --- tests/ci/worker/init_runner.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/ci/worker/init_runner.sh b/tests/ci/worker/init_runner.sh index d6cdb6d9c57..1bfeeb38c15 100644 --- a/tests/ci/worker/init_runner.sh +++ b/tests/ci/worker/init_runner.sh @@ -50,7 +50,7 @@ set -uo pipefail # set accordingly to a runner role # #################################### -echo "Running init v1" +echo "Running init v1.1" export DEBIAN_FRONTEND=noninteractive export RUNNER_HOME=/home/ubuntu/actions-runner @@ -66,6 +66,11 @@ bash /usr/local/share/scripts/init-network.sh RUNNER_TYPE=$(/usr/local/bin/aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --query "Tags[?Key=='github:runner-type'].Value" --output text) LABELS="self-hosted,Linux,$(uname -m),$RUNNER_TYPE" export LABELS +echo "Instance Labels: $LABELS" + +LIFE_CYCLE=$(curl -s --fail http://169.254.169.254/latest/meta-data/instance-life-cycle) +export LIFE_CYCLE +echo "Instance lifecycle: $LIFE_CYCLE" # Refresh CloudWatch agent config aws ssm get-parameter --region us-east-1 --name AmazonCloudWatch-github-runners --query 'Parameter.Value' --output text > /opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json @@ -124,10 +129,6 @@ terminate_decrease_and_exit() { declare -f terminate_and_exit >> /tmp/actions-hooks/common.sh check_spot_instance_is_old() { - # This function should be executed ONLY BETWEEN runnings. - # It's unsafe to execute while the runner is working! - local LIFE_CYCLE - LIFE_CYCLE=$(curl -s --fail http://169.254.169.254/latest/meta-data/instance-life-cycle) if [ "$LIFE_CYCLE" == "spot" ]; then local UPTIME UPTIME=$(< /proc/uptime) From a493e5c8e7e885cc8b66626ebf2911a6e3387b78 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 22 Jul 2024 11:05:37 +0000 Subject: [PATCH 0648/2361] Followup #66725 --- .../Passes/LogicalExpressionOptimizerPass.cpp | 11 +++++++++-- .../02911_join_on_nullsafe_optimization.reference | 2 ++ .../02911_join_on_nullsafe_optimization.sql | 2 ++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp index 5c68bca3a6e..e136440556f 100644 --- a/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp +++ b/src/Analyzer/Passes/LogicalExpressionOptimizerPass.cpp @@ -68,10 +68,13 @@ QueryTreeNodePtr findEqualsFunction(const QueryTreeNodes & nodes) return nullptr; } -/// Checks if the node is combination of isNull and notEquals functions of two the same arguments +/// Checks if the node is combination of isNull and notEquals functions of two the same arguments: +/// [ (a <> b AND) ] (a IS NULL) AND (b IS NULL) bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, QueryTreeNodePtr & rhs) { QueryTreeNodePtrWithHashSet all_arguments; + QueryTreeNodePtrWithHashSet is_null_arguments; + for (const auto & node : nodes) { const auto * func_node = node->as(); @@ -80,7 +83,11 @@ bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, const auto & arguments = func_node->getArguments().getNodes(); if (func_node->getFunctionName() == "isNull" && arguments.size() == 1) + { all_arguments.insert(QueryTreeNodePtrWithHash(arguments[0])); + is_null_arguments.insert(QueryTreeNodePtrWithHash(arguments[0])); + } + else if (func_node->getFunctionName() == "notEquals" && arguments.size() == 2) { if (arguments[0]->isEqual(*arguments[1])) @@ -95,7 +102,7 @@ bool matchIsNullOfTwoArgs(const QueryTreeNodes & nodes, QueryTreeNodePtr & lhs, return false; } - if (all_arguments.size() != 2) + if (all_arguments.size() != 2 || is_null_arguments.size() != 2) return false; lhs = all_arguments.begin()->node; diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference index 31a1cda18e7..8f194b4ffde 100644 --- a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.reference @@ -39,6 +39,8 @@ SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS N 2 2 2 2 3 3 3 33 \N \N \N \N +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND t2.x <> t1.x ) ORDER BY t1.x NULLS LAST; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND (t2.x IS NULL) AND (t2.x IS NULL) ) ORDER BY t1.x NULLS LAST; -- { serverError INVALID_JOIN_ON_EXPRESSION } -- aliases defined in the join condition are valid -- FIXME(@vdimir) broken query formatting for the following queries: -- SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql index f739259caf9..18cb303a54a 100644 --- a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql @@ -35,6 +35,8 @@ SELECT x = y OR (x IS NULL AND y IS NULL) FROM t1 ORDER BY x NULLS LAST; SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND t2.x <> t1.x ) ORDER BY t1.x NULLS LAST; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND (t2.x IS NULL) AND (t2.x IS NULL) ) ORDER BY t1.x NULLS LAST; -- { serverError INVALID_JOIN_ON_EXPRESSION } -- aliases defined in the join condition are valid -- FIXME(@vdimir) broken query formatting for the following queries: From cd700c59b1d0b894a7a8461a33d994732b7864f0 Mon Sep 17 00:00:00 2001 From: Max K Date: Mon, 22 Jul 2024 13:17:25 +0200 Subject: [PATCH 0649/2361] minor fix --- tests/ci/auto_release.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/auto_release.py b/tests/ci/auto_release.py index 39ab3156c80..f2386fe207f 100644 --- a/tests/ci/auto_release.py +++ b/tests/ci/auto_release.py @@ -191,7 +191,7 @@ def main(): title=f"Auto Release Status for {release_info.release_branch}", body=release_info.to_dict(), ) - if args.post_auto_release_complete: + elif args.post_auto_release_complete: assert args.wf_status, "--wf-status Required with --post-auto-release-complete" if args.wf_status != SUCCESS: CIBuddy(dry_run=False).post_job_error( From 8246614f5e5cdec99941f048f51cb28b9eee03a5 Mon Sep 17 00:00:00 2001 From: zoomxi <419486879@qq.com> Date: Mon, 22 Jul 2024 17:57:30 +0800 Subject: [PATCH 0650/2361] throw if can't connect to any participating replicas --- src/Processors/QueryPlan/ReadFromRemote.cpp | 3 +- .../ParallelReplicasReadingCoordinator.cpp | 4 +-- .../ParallelReplicasReadingCoordinator.h | 4 +++ .../test.py | 35 +++++++++++++++++++ 4 files changed, 43 insertions(+), 3 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromRemote.cpp b/src/Processors/QueryPlan/ReadFromRemote.cpp index e27515a62a4..29e12c1e613 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.cpp +++ b/src/Processors/QueryPlan/ReadFromRemote.cpp @@ -21,7 +21,7 @@ #include #include #include - +#include #include namespace DB @@ -429,6 +429,7 @@ void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder { shuffled_pool = shard.pool->getShuffledPools(current_settings); shuffled_pool.resize(max_replicas_to_use); + coordinator->adjustParticipatingReplicasCount(max_replicas_to_use); } else { diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp index f46b4de10b7..2ba66256116 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp @@ -1031,7 +1031,7 @@ void ParallelReplicasReadingCoordinator::markReplicaAsUnavailable(size_t replica if (!pimpl) { unavailable_nodes_registered_before_initialization.push_back(replica_number); - if (unavailable_nodes_registered_before_initialization.size() == replicas_count) + if (unavailable_nodes_registered_before_initialization.size() == participating_replicas_count) throw Exception(ErrorCodes::ALL_CONNECTION_TRIES_FAILED, "Can't connect to any replica chosen for query execution"); } else @@ -1061,7 +1061,7 @@ void ParallelReplicasReadingCoordinator::initialize(CoordinationMode mode) } ParallelReplicasReadingCoordinator::ParallelReplicasReadingCoordinator(size_t replicas_count_, size_t mark_segment_size_) - : replicas_count(replicas_count_), mark_segment_size(mark_segment_size_) + : replicas_count(replicas_count_), participating_replicas_count(replicas_count_), mark_segment_size(mark_segment_size_) { } diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h index 8b463fda395..c06ef6ef01a 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h @@ -30,11 +30,15 @@ public: /// needed to report total rows to read void setProgressCallback(ProgressCallback callback); + /// Participating replicas count may be less than replicas count in a shard. + void adjustParticipatingReplicasCount(size_t count) { participating_replicas_count = count; } + private: void initialize(CoordinationMode mode); std::mutex mutex; const size_t replicas_count{0}; + size_t participating_replicas_count{0}; size_t mark_segment_size{0}; std::unique_ptr pimpl; ProgressCallback progress_callback; // store the callback only to bypass it to coordinator implementation diff --git a/tests/integration/test_parallel_replicas_no_replicas/test.py b/tests/integration/test_parallel_replicas_no_replicas/test.py index 9f716459643..b77da338554 100644 --- a/tests/integration/test_parallel_replicas_no_replicas/test.py +++ b/tests/integration/test_parallel_replicas_no_replicas/test.py @@ -48,3 +48,38 @@ def test_skip_all_replicas(start_cluster, skip_unavailable_shards): "skip_unavailable_shards": skip_unavailable_shards, }, ) + +@pytest.mark.parametrize("skip_unavailable_shards", [1, 0]) +def test_skip_all_participating_replicas1(start_cluster, skip_unavailable_shards): + cluster_name = "test_1_shard_3_unavaliable_replicas" + table_name = "tt1" + create_tables(cluster_name, table_name) + + with pytest.raises(QueryRuntimeException): + initiator.query( + f"SELECT key, count() FROM {table_name} GROUP BY key ORDER BY key", + settings={ + "allow_experimental_parallel_reading_from_replicas": 2, + "max_parallel_replicas": 3, + "cluster_for_parallel_replicas": cluster_name, + "skip_unavailable_shards": skip_unavailable_shards, + "parallel_replicas_min_number_of_rows_per_replica": 500, + }, + ) + +@pytest.mark.parametrize("skip_unavailable_shards", [1, 0]) +def test_skip_all_participating_replicas2(start_cluster, skip_unavailable_shards): + cluster_name = "test_1_shard_3_unavaliable_replicas" + table_name = "tt2" + create_tables(cluster_name, table_name) + + with pytest.raises(QueryRuntimeException): + initiator.query( + f"SELECT key, count() FROM {table_name} GROUP BY key ORDER BY key", + settings={ + "allow_experimental_parallel_reading_from_replicas": 2, + "max_parallel_replicas": 2, + "cluster_for_parallel_replicas": cluster_name, + "skip_unavailable_shards": skip_unavailable_shards, + }, + ) From 2dc264928f311e2f4d10001044d070b6a6a05471 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Mon, 22 Jul 2024 11:33:51 +0000 Subject: [PATCH 0651/2361] Added tests, rewritten logic which engines and table functions to allow, added replace for create table ... AS table_function() syntax. --- docs/en/operations/settings/settings.md | 12 ++ src/Core/Settings.h | 3 +- src/Core/SettingsChangesHistory.cpp | 3 +- src/Interpreters/InterpreterCreateQuery.cpp | 50 +++--- .../test_restore_external_engines/__init__.py | 0 .../configs/backups_disk.xml | 14 ++ .../configs/remote_servers.xml | 21 +++ .../test_restore_external_engines/test.py | 143 ++++++++++++++++++ 8 files changed, 217 insertions(+), 29 deletions(-) create mode 100644 tests/integration/test_restore_external_engines/__init__.py create mode 100644 tests/integration/test_restore_external_engines/configs/backups_disk.xml create mode 100644 tests/integration/test_restore_external_engines/configs/remote_servers.xml create mode 100644 tests/integration/test_restore_external_engines/test.py diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index c3f697c3bdc..65b8df7a9e2 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5608,3 +5608,15 @@ Default value: `10000000`. Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached. Default value: `1GiB`. + +## restore_replace_external_engines_to_null + +For testing purposes. Replaces all external engines to Null to not initiate external connections. + +Default value: `False` + +## restore_replace_external_table_functions_to_null + +For testing purposes. Replaces all external engines to Null to not initiate external connections. + +Default value: `False` \ No newline at end of file diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 7bf97896357..e6d2cac359b 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -891,7 +891,8 @@ class IColumn; M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \ M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \ M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \ - M(Bool, restore_replace_external_engine_to_null, false, "Replace all the External table engines to Null on restore. Useful for testing purposes", 0) \ + M(Bool, restore_replace_external_engines_to_null, false, "Replace all the External table engines to Null on restore. Useful for testing purposes", 0) \ + M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \ \ \ /* ###################################### */ \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index a23d9d17da2..0abcfb0cfb9 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -79,7 +79,8 @@ static std::initializer_listno_empty_args = true; storage.set(storage.engine, engine_ast); } + } void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const { + if (create.as_table_function) + { + if (getContext()->getSettingsRef().restore_replace_external_table_functions_to_null) + { + const auto & factory = TableFunctionFactory::instance(); + + auto properties = factory.tryGetProperties(create.as_table_function->as()->name); + if (properties && properties->allow_readonly) + return; + if (!create.storage) + { + auto storage_ast = std::make_shared(); + create.set(create.storage, storage_ast); + } + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage should not be created yet, it's a bug."); + create.as_table_function = nullptr; + setNullTableEngine(*create.storage); + } return; + } if (create.is_dictionary || create.is_ordinary_view || create.is_live_view || create.is_window_view) return; @@ -1010,34 +1031,9 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); /// For exrternal tables with restore_replace_external_engine_to_null setting we replace external engines to /// Null table engine. - else if (create.storage->engine->name == "AzureBlobStorage" || - create.storage->engine->name == "AzureQueue" || - create.storage->engine->name == "COSN" || - create.storage->engine->name == "DeltaLake" || - create.storage->engine->name == "Dictionary" || - create.storage->engine->name == "Executable" || - create.storage->engine->name == "ExecutablePool" || - create.storage->engine->name == "ExternalDistributed" || - create.storage->engine->name == "File" || - create.storage->engine->name == "Hudi" || - create.storage->engine->name == "Iceberg" || - create.storage->engine->name == "JDBC" || - create.storage->engine->name == "Kafka" || - create.storage->engine->name == "MaterializedPostgreSQL" || - create.storage->engine->name == "MongoDB" || - create.storage->engine->name == "MySQL" || - create.storage->engine->name == "NATS" || - create.storage->engine->name == "ODBC" || - create.storage->engine->name == "OSS" || - create.storage->engine->name == "PostgreSQL" || - create.storage->engine->name == "RabbitMQ" || - create.storage->engine->name == "Redis" || - create.storage->engine->name == "S3" || - create.storage->engine->name == "S3Queue" || - create.storage->engine->name == "TinyLog" || - create.storage->engine->name == "URL") + else if (getContext()->getSettingsRef().restore_replace_external_engines_to_null) { - if (getContext()->getSettingsRef().restore_replace_external_engine_to_null) + if (StorageFactory::instance().getStorageFeatures(create.storage->engine->name).source_access_type != AccessType::NONE) setNullTableEngine(*create.storage); } return; diff --git a/tests/integration/test_restore_external_engines/__init__.py b/tests/integration/test_restore_external_engines/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_restore_external_engines/configs/backups_disk.xml b/tests/integration/test_restore_external_engines/configs/backups_disk.xml new file mode 100644 index 00000000000..f7d666c6542 --- /dev/null +++ b/tests/integration/test_restore_external_engines/configs/backups_disk.xml @@ -0,0 +1,14 @@ + + + + + local + /backups/ + + + + + backups + /backups/ + + diff --git a/tests/integration/test_restore_external_engines/configs/remote_servers.xml b/tests/integration/test_restore_external_engines/configs/remote_servers.xml new file mode 100644 index 00000000000..76ad3618339 --- /dev/null +++ b/tests/integration/test_restore_external_engines/configs/remote_servers.xml @@ -0,0 +1,21 @@ + + + + + true + + replica1 + 9000 + + + replica2 + 9000 + + + replica3 + 9000 + + + + + diff --git a/tests/integration/test_restore_external_engines/test.py b/tests/integration/test_restore_external_engines/test.py new file mode 100644 index 00000000000..cde4b0deb00 --- /dev/null +++ b/tests/integration/test_restore_external_engines/test.py @@ -0,0 +1,143 @@ +import pytest + +import pymysql.cursors +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +configs = ["configs/remote_servers.xml", "configs/backups_disk.xml"] + +node1 = cluster.add_instance("replica1", with_zookeeper=True, with_mysql8=True, main_configs=configs, external_dirs=["/backups/"]) +node2 = cluster.add_instance("replica2", with_zookeeper=True, with_mysql8=True, main_configs=configs, external_dirs=["/backups/"]) +node3 = cluster.add_instance("replica3", with_zookeeper=True, with_mysql8=True, main_configs=configs, external_dirs=["/backups/"]) +nodes = [node1, node2, node3] + +backup_id_counter = 0 + +def new_backup_name(): + global backup_id_counter + backup_id_counter += 1 + return f"Disk('backups', '{backup_id_counter}/')" + +def cleanup_nodes(nodes, dbname): + for node in nodes: + node.query(f"DROP DATABASE IF EXISTS {dbname} SYNC") + +def fill_nodes(nodes, dbname): + cleanup_nodes(nodes, dbname) + for node in nodes: + node.query(f"CREATE DATABASE {dbname} ENGINE = Replicated('/clickhouse/databases/{dbname}', 'default', '{node.name}')") + +def drop_mysql_table(conn, tableName): + with conn.cursor() as cursor: + cursor.execute(f"DROP TABLE IF EXISTS `clickhouse`.`{tableName}`") + +def get_mysql_conn(cluster): + conn = pymysql.connect( + user="root", password="clickhouse", host=cluster.mysql8_ip, port=cluster.mysql8_port + ) + return conn + +def fill_tables(cluster, dbname): + fill_nodes(nodes, dbname) + + conn = get_mysql_conn(cluster) + + with conn.cursor() as cursor: + cursor.execute( + "DROP DATABASE IF EXISTS clickhouse" + ) + cursor.execute( + "CREATE DATABASE clickhouse" + ) + cursor.execute( + "DROP TABLE IF EXISTS clickhouse.inference_table" + ) + cursor.execute( + "CREATE TABLE clickhouse.inference_table (id INT PRIMARY KEY, data BINARY(16) NOT NULL)" + ) + cursor.execute( + "INSERT INTO clickhouse.inference_table VALUES (100, X'9fad5e9eefdfb449')" + ) + conn.commit() + + parameters = "'mysql80:3306', 'clickhouse', 'inference_table', 'root', 'clickhouse'" + + node1.query( + f"CREATE TABLE {dbname}.mysql_schema_inference_engine ENGINE=MySQL({parameters})" + ) + node1.query(f"CREATE TABLE {dbname}.mysql_schema_inference_function AS mysql({parameters})") + + node1.query(f"CREATE TABLE {dbname}.merge_tree (id UInt64, b String) ORDER BY id") + node1.query(f"INSERT INTO {dbname}.merge_tree VALUES (100, 'abc')") + + expected = "id\tInt32\t\t\t\t\t\ndata\tFixedString(16)\t\t\t\t\t\n" + assert node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_engine") == expected + assert node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_function") == expected + assert node1.query(f"SELECT id FROM mysql({parameters})") == "100\n" + assert node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_engine") == "100\n" + assert node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_function") == "100\n" + assert node1.query(f"SELECT id FROM {dbname}.merge_tree") == "100\n" + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + + except Exception as ex: + print(ex) + + finally: + cluster.shutdown() + +def test_restore_table(start_cluster): + fill_tables(cluster, "replicated") + backup_name = new_backup_name() + node2.query(f"SYSTEM SYNC DATABASE REPLICA replicated;") + + node2.query(f"BACKUP DATABASE replicated TO {backup_name}") + + node2.query("DROP TABLE replicated.mysql_schema_inference_engine") + node2.query("DROP TABLE replicated.mysql_schema_inference_function") + + node3.query(f"SYSTEM SYNC DATABASE REPLICA replicated;") + + assert node3.query("EXISTS replicated.mysql_schema_inference_engine") == "0\n" + assert node3.query("EXISTS replicated.mysql_schema_inference_function") == "0\n" + + node3.query(f"RESTORE DATABASE replicated FROM {backup_name} SETTINGS allow_different_database_def=true") + node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated;") + + assert node1.query("SELECT count(), sum(id) FROM replicated.mysql_schema_inference_engine") == "1\t100\n" + assert node1.query("SELECT count(), sum(id) FROM replicated.mysql_schema_inference_function") == "1\t100\n" + assert node1.query("SELECT count(), sum(id) FROM replicated.merge_tree") == "1\t100\n" + cleanup_nodes(nodes, "replicated") + + +def test_restore_table_null(start_cluster): + fill_tables(cluster, "replicated2") + + backup_name = new_backup_name() + node2.query(f"SYSTEM SYNC DATABASE REPLICA replicated2;") + + node2.query(f"BACKUP DATABASE replicated2 TO {backup_name}") + + node2.query("DROP TABLE replicated2.mysql_schema_inference_engine") + node2.query("DROP TABLE replicated2.mysql_schema_inference_function") + + node3.query(f"SYSTEM SYNC DATABASE REPLICA replicated2;") + + assert node3.query("EXISTS replicated2.mysql_schema_inference_engine") == "0\n" + assert node3.query("EXISTS replicated2.mysql_schema_inference_function") == "0\n" + + node3.query(f"RESTORE DATABASE replicated2 FROM {backup_name} SETTINGS allow_different_database_def=1, allow_different_table_def=1 SETTINGS restore_replace_external_engine_to_null=1, restore_replace_external_table_functions_to_null=1") + node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated2;") + + assert node1.query("SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_engine") == "0\t0\n" + assert node1.query("SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_function") == "0\t0\n" + assert node1.query("SELECT count(), sum(id) FROM replicated2.merge_tree") == "1\t100\n" + assert node1.query("SELECT engine FROM system.tables where database = 'replicated2' and name like '%mysql%'") == "Null\nNull\n" + assert node1.query("SELECT engine FROM system.tables where database = 'replicated2' and name like '%merge_tree%'") == "MergeTree\n" + cleanup_nodes(nodes, "replicated2") From 69ad8feb9078421aca99709ffb839e2f3b923427 Mon Sep 17 00:00:00 2001 From: Max K Date: Mon, 22 Jul 2024 13:38:10 +0200 Subject: [PATCH 0652/2361] add instance type --- tests/ci/worker/init_runner.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/ci/worker/init_runner.sh b/tests/ci/worker/init_runner.sh index 1bfeeb38c15..5177e112edd 100644 --- a/tests/ci/worker/init_runner.sh +++ b/tests/ci/worker/init_runner.sh @@ -72,6 +72,9 @@ LIFE_CYCLE=$(curl -s --fail http://169.254.169.254/latest/meta-data/instance-lif export LIFE_CYCLE echo "Instance lifecycle: $LIFE_CYCLE" +INSTANCE_TYPE=$(ec2metadata --instance-type) +echo "Instance type: $INSTANCE_TYPE" + # Refresh CloudWatch agent config aws ssm get-parameter --region us-east-1 --name AmazonCloudWatch-github-runners --query 'Parameter.Value' --output text > /opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json systemctl restart amazon-cloudwatch-agent.service From af4c2fa8a405c53d7de6d9ed41d63988caf22b04 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:27:24 +0000 Subject: [PATCH 0653/2361] fix --- tests/integration/test_restore_external_engines/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_restore_external_engines/test.py b/tests/integration/test_restore_external_engines/test.py index cde4b0deb00..be2cae334e2 100644 --- a/tests/integration/test_restore_external_engines/test.py +++ b/tests/integration/test_restore_external_engines/test.py @@ -132,7 +132,7 @@ def test_restore_table_null(start_cluster): assert node3.query("EXISTS replicated2.mysql_schema_inference_engine") == "0\n" assert node3.query("EXISTS replicated2.mysql_schema_inference_function") == "0\n" - node3.query(f"RESTORE DATABASE replicated2 FROM {backup_name} SETTINGS allow_different_database_def=1, allow_different_table_def=1 SETTINGS restore_replace_external_engine_to_null=1, restore_replace_external_table_functions_to_null=1") + node3.query(f"RESTORE DATABASE replicated2 FROM {backup_name} SETTINGS allow_different_database_def=1, allow_different_table_def=1 SETTINGS restore_replace_external_engines_to_null=1, restore_replace_external_table_functions_to_null=1") node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated2;") assert node1.query("SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_engine") == "0\t0\n" From d296e62bf363d7dfab9a5bf6925b67b5e4188151 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:31:50 +0200 Subject: [PATCH 0654/2361] Update docker/test/integration/runner/Dockerfile --- docker/test/integration/runner/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index ceb8a1b2b58..71cf3a16967 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -47,7 +47,7 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ && add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \ && apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ - docker-ce \ + docker-ce="5:27.0.3*" \ && rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ From 9f86b22d302c2feef5d666e75d11d09fe6ee1c6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 22 Jul 2024 14:42:30 +0200 Subject: [PATCH 0655/2361] Speed up stateful tests table setup --- docker/test/stateful/run.sh | 6 +++--- docker/test/stress/run.sh | 6 +++--- tests/queries/1_stateful/00162_mmap_compression_none.sql | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 857385f4715..df85c047649 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -191,8 +191,8 @@ else ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" - clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" + clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" + clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC" clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC" else @@ -200,7 +200,7 @@ else clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" fi clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0" + clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" fi clickhouse-client --query "SHOW TABLES FROM test" diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 86467394513..b21114e456f 100644 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -209,9 +209,9 @@ clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDat ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='$TEMP_POLICY'" -clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" -clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" -clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" +clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" +clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" +clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC" clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC" diff --git a/tests/queries/1_stateful/00162_mmap_compression_none.sql b/tests/queries/1_stateful/00162_mmap_compression_none.sql index d2cbcea8aaa..48d6ada821e 100644 --- a/tests/queries/1_stateful/00162_mmap_compression_none.sql +++ b/tests/queries/1_stateful/00162_mmap_compression_none.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS hits_none; CREATE TABLE hits_none (Title String CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -INSERT INTO hits_none SELECT Title FROM test.hits; +INSERT INTO hits_none SELECT Title FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16; SET min_bytes_to_use_mmap_io = 1; SELECT sum(length(Title)) FROM hits_none; From ae5eccbf20b7198d6a3cc908e0186a384aba038a Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 22 Jul 2024 13:39:48 +0000 Subject: [PATCH 0656/2361] just a commit to trigger CI --- .../test_grant_and_revoke/test_without_table_engine_grant.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py b/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py index 210bb8ec465..4a5dfb83f79 100644 --- a/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py +++ b/tests/integration/test_grant_and_revoke/test_without_table_engine_grant.py @@ -60,6 +60,7 @@ def test_table_engine_and_source_grant(): ) # expecting grant POSTGRES instead of grant PostgreSQL due to discrepancy between source access type and table engine + # similarily, other sources should also use their own defined name instead of the name of table engine assert "grant POSTGRES ON *.*" in instance.query_and_get_error( """ CREATE TABLE test.table1(a Integer) From 85241b3b8e869879718d74c01ab5071a10d66c06 Mon Sep 17 00:00:00 2001 From: Aleksandr Musorin Date: Fri, 12 Jul 2024 18:35:01 +0200 Subject: [PATCH 0657/2361] Allow run query instantly in play Automatically execute the query after the page loads if the `play_now=true` parameter is present. By default, the query does not execute automatically. Reason: While it might be acceptable to click `Run` once or twice, it becomes tedious when using the play service frequently as a simple frontend to generate and open multiple links. This change eliminates the need to click `Run` every time. --- programs/server/play.html | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/programs/server/play.html b/programs/server/play.html index 9590a65524c..a4c01237abd 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -522,6 +522,9 @@ const current_url = new URL(window.location); const opened_locally = location.protocol == 'file:'; + /// Run query instantly after page is loaded + const play_now = current_url.searchParams.get("play_now"); + const server_address = current_url.searchParams.get('url'); if (server_address) { document.getElementById('url').value = server_address; @@ -599,6 +602,9 @@ const title = "ClickHouse Query: " + query; let history_url = window.location.pathname + '?user=' + encodeURIComponent(user); + if (play_now) { + history_url += "&play_now=" + encodeURIComponent(play_now); + } if (server_address != location.origin) { /// Save server's address in URL if it's not identical to the address of the play UI. history_url += '&url=' + encodeURIComponent(server_address); @@ -1160,6 +1166,10 @@ }); } + if (play_now === 'true') { + post(); + } + document.getElementById('toggle-light').onclick = function() { setColorTheme('light', true); } From 85afb1757e92b279dfc7171c409e09d5b6873e8b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 12 Jul 2024 21:21:35 +0200 Subject: [PATCH 0658/2361] Rename variables --- programs/server/play.html | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/programs/server/play.html b/programs/server/play.html index a4c01237abd..b1da7408b58 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -523,7 +523,7 @@ const opened_locally = location.protocol == 'file:'; /// Run query instantly after page is loaded - const play_now = current_url.searchParams.get("play_now"); + const run_immediately = current_url.searchParams.get("run"); const server_address = current_url.searchParams.get('url'); if (server_address) { @@ -602,8 +602,8 @@ const title = "ClickHouse Query: " + query; let history_url = window.location.pathname + '?user=' + encodeURIComponent(user); - if (play_now) { - history_url += "&play_now=" + encodeURIComponent(play_now); + if (run_immediately) { + history_url += "&run=" + encodeURIComponent(run_immediately); } if (server_address != location.origin) { /// Save server's address in URL if it's not identical to the address of the play UI. @@ -1166,7 +1166,7 @@ }); } - if (play_now === 'true') { + if (run_immediately === 'true') { post(); } From 11aee643adc49cf14e73f3ea0a8fbdfe284dd2ef Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 12 Jul 2024 21:23:18 +0200 Subject: [PATCH 0659/2361] Convert to bool --- programs/server/play.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/server/play.html b/programs/server/play.html index b1da7408b58..d4fc1446fa0 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -523,7 +523,7 @@ const opened_locally = location.protocol == 'file:'; /// Run query instantly after page is loaded - const run_immediately = current_url.searchParams.get("run"); + const run_immediately = !!current_url.searchParams.get("run"); const server_address = current_url.searchParams.get('url'); if (server_address) { @@ -603,7 +603,7 @@ let history_url = window.location.pathname + '?user=' + encodeURIComponent(user); if (run_immediately) { - history_url += "&run=" + encodeURIComponent(run_immediately); + history_url += "&run=" + run_immediately; } if (server_address != location.origin) { /// Save server's address in URL if it's not identical to the address of the play UI. From eb4cddd139c5ccdc43a3a0b8168220395303a276 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 12 Jul 2024 21:28:39 +0200 Subject: [PATCH 0660/2361] Allow `run=1` to save space --- programs/server/play.html | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/programs/server/play.html b/programs/server/play.html index d4fc1446fa0..eb861172c02 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -523,7 +523,7 @@ const opened_locally = location.protocol == 'file:'; /// Run query instantly after page is loaded - const run_immediately = !!current_url.searchParams.get("run"); + const run_immediately = current_url.searchParams.get("run") == 1; const server_address = current_url.searchParams.get('url'); if (server_address) { @@ -603,7 +603,7 @@ let history_url = window.location.pathname + '?user=' + encodeURIComponent(user); if (run_immediately) { - history_url += "&run=" + run_immediately; + history_url += "&run=" + (run_immediately ? 1 : 0); } if (server_address != location.origin) { /// Save server's address in URL if it's not identical to the address of the play UI. @@ -1166,7 +1166,7 @@ }); } - if (run_immediately === 'true') { + if (run_immediately) { post(); } From 02704bc5cc8213d6bd56183f613bda960a69b93a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 12 Jul 2024 21:32:14 +0200 Subject: [PATCH 0661/2361] Different --- programs/server/play.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/server/play.html b/programs/server/play.html index eb861172c02..e30b4ac3450 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -522,8 +522,8 @@ const current_url = new URL(window.location); const opened_locally = location.protocol == 'file:'; - /// Run query instantly after page is loaded - const run_immediately = current_url.searchParams.get("run") == 1; + /// Run query instantly after page is loaded if the run parameter is present. + const run_immediately = !!current_url.searchParams.get("run"); const server_address = current_url.searchParams.get('url'); if (server_address) { From af53fd96a7a7adbbe6a6660a37d30b4acaf9fe40 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 12 Jul 2024 21:34:38 +0200 Subject: [PATCH 0662/2361] JavaScript Programming --- programs/server/play.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/play.html b/programs/server/play.html index e30b4ac3450..8f2fab36df4 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -523,7 +523,7 @@ const opened_locally = location.protocol == 'file:'; /// Run query instantly after page is loaded if the run parameter is present. - const run_immediately = !!current_url.searchParams.get("run"); + const run_immediately = current_url.searchParams.has("run"); const server_address = current_url.searchParams.get('url'); if (server_address) { From 0d2459f0de566dfa13eecf3bd176e59ec11e0239 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 12 Jul 2024 21:35:20 +0200 Subject: [PATCH 0663/2361] JavaScript --- programs/server/play.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/play.html b/programs/server/play.html index 8f2fab36df4..0d76a01cf7e 100644 --- a/programs/server/play.html +++ b/programs/server/play.html @@ -603,7 +603,7 @@ let history_url = window.location.pathname + '?user=' + encodeURIComponent(user); if (run_immediately) { - history_url += "&run=" + (run_immediately ? 1 : 0); + history_url += "&run=1"; } if (server_address != location.origin) { /// Save server's address in URL if it's not identical to the address of the play UI. From a3dbf87df6a7b3105fdbc79cafe0b2ec2fa547d5 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Mon, 22 Jul 2024 16:19:16 +0200 Subject: [PATCH 0664/2361] Update convertFieldToType.cpp --- src/Interpreters/convertFieldToType.cpp | 38 ++++++++++++------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 9ee214f4415..b92cbae7b09 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -384,25 +384,25 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID { const auto & element_type = *(type_tuple->getElements()[i]); res[i] = convertFieldToType(src_tuple[i], element_type); - if (!res[i].isNull() || canContainNull(element_type)) - continue; - - /* - * Either the source element was Null, or the conversion did not - * succeed, because the source and the requested types of the - * element are compatible, but the value is not convertible - * (e.g. trying to convert -1 from Int8 to UInt8). In these - * cases, consider the whole tuple also compatible but not - * convertible. According to the specification of this function, - * we must return Null in this case. - * - * The following elements might be not even compatible, so it - * makes sense to check them to detect user errors. Remember - * that there is an unconvertible element, and try to process - * the remaining ones. The convertFieldToType for each element - * will throw if it detects incompatibility. - */ - have_unconvertible_element = true; + if (res[i].isNull() && !canContainNull(element_type)) + { + /* + * Either the source element was Null, or the conversion did not + * succeed, because the source and the requested types of the + * element are compatible, but the value is not convertible + * (e.g. trying to convert -1 from Int8 to UInt8). In these + * cases, consider the whole tuple also compatible but not + * convertible. According to the specification of this function, + * we must return Null in this case. + * + * The following elements might be not even compatible, so it + * makes sense to check them to detect user errors. Remember + * that there is an unconvertible element, and try to process + * the remaining ones. The convertFieldToType for each element + * will throw if it detects incompatibility. + */ + have_unconvertible_element = true; + } } return have_unconvertible_element ? Field(Null()) : Field(res); From f5710beebf9394d954a1fe5dadb9df87b2b8ebaa Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 22 Jul 2024 15:22:31 +0100 Subject: [PATCH 0665/2361] fix build --- src/Common/tests/gtest_cgroups_reader.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Common/tests/gtest_cgroups_reader.cpp b/src/Common/tests/gtest_cgroups_reader.cpp index 38e56401401..2de25bb42ce 100644 --- a/src/Common/tests/gtest_cgroups_reader.cpp +++ b/src/Common/tests/gtest_cgroups_reader.cpp @@ -1,12 +1,13 @@ +#if defined(OS_LINUX) + +#include + #include #include -#include -#include #include #include #include -#include "IO/WriteBufferFromFileBase.h" using namespace DB; @@ -173,3 +174,5 @@ INSTANTIATE_TEST_SUITE_P( CgroupsMemoryUsageObserverTests, CgroupsMemoryUsageObserverFixture, ::testing::Values(CgroupsMemoryUsageObserver::CgroupsVersion::V1, CgroupsMemoryUsageObserver::CgroupsVersion::V2)); + +#endif From 2626880f6e0ed23e043d1f5756c6ab9786993426 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 14:28:07 +0000 Subject: [PATCH 0666/2361] Fix special builds and tests --- src/Columns/ColumnObject.cpp | 28 ++-- src/Columns/ColumnObject.h | 2 +- src/DataTypes/DataTypeObject.cpp | 37 ++++-- src/DataTypes/DataTypeObject.h | 2 +- .../Serializations/SerializationJSON.h | 2 +- .../Serializations/SerializationObject.cpp | 2 +- .../Serializations/SerializationObject.h | 2 +- .../Serializations/SerializationSubObject.h | 1 - src/Formats/JSONExtractTree.cpp | 65 ++------- src/Formats/SchemaInferenceUtils.cpp | 123 +++++------------- src/Functions/JSONPaths.cpp | 1 + .../InterpreterShowColumnsQuery.cpp | 1 + src/Parsers/ASTObjectTypeArgument.cpp | 18 +-- .../MergeTree/MergeTreeReaderWide.cpp | 6 + src/Storages/MergeTree/checkDataPart.cpp | 3 + .../01825_new_type_json_10.reference | 3 +- .../0_stateless/01825_new_type_json_10.sql | 4 +- .../0_stateless/01825_new_type_json_11.sh | 8 +- .../0_stateless/01825_new_type_json_12.sh | 8 +- .../0_stateless/01825_new_type_json_13.sh | 4 +- .../0_stateless/01825_new_type_json_6.sh | 6 +- .../0_stateless/01825_new_type_json_btc.sh | 6 +- .../01825_new_type_json_ghdata.reference | 12 ++ .../0_stateless/01825_new_type_json_ghdata.sh | 26 ++++ ...1825_new_type_json_ghdata_insert_select.sh | 4 +- .../01825_new_type_json_nbagames.reference | 14 +- .../01825_new_type_json_nbagames.sh | 6 +- .../0_stateless/01825_type_json_10.reference | 9 -- .../0_stateless/01825_type_json_10.sql | 2 +- .../0_stateless/01825_type_json_ghdata.sh | 2 +- .../0_stateless/01825_type_json_nbagames.sh | 2 +- .../02421_type_json_async_insert.sh | 2 +- ...02482_json_nested_arrays_with_same_keys.sh | 2 +- ...w_columns_called_from_clickhouse.reference | 2 +- 34 files changed, 178 insertions(+), 237 deletions(-) create mode 100644 tests/queries/0_stateless/01825_new_type_json_ghdata.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_ghdata.sh diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index f9a3af601e9..2fb5831188d 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -18,13 +18,13 @@ namespace ErrorCodes namespace { -static const FormatSettings & getFormatSettings() +const FormatSettings & getFormatSettings() { static const FormatSettings settings; return settings; } -static const std::shared_ptr & getDynamicSerialization() +const std::shared_ptr & getDynamicSerialization() { static const std::shared_ptr dynamic_serialization = std::make_shared(); return dynamic_serialization; @@ -394,7 +394,7 @@ void ColumnObject::doInsertFrom(const IColumn & src, size_t n) const auto & src_object_column = assert_cast(src); /// First, insert typed paths, they must be the same for both columns. - for (auto & [path, column] : src_object_column.typed_paths) + for (const auto & [path, column] : src_object_column.typed_paths) typed_paths[path]->insertFrom(*column, n); /// Second, insert dynamic paths and extend them if needed. @@ -428,7 +428,7 @@ void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t l const auto & src_object_column = assert_cast(src); /// First, insert typed paths, they must be the same for both columns. - for (auto & [path, column] : src_object_column.typed_paths) + for (const auto & [path, column] : src_object_column.typed_paths) typed_paths[path]->insertRangeFrom(*column, start, length); /// Second, insert dynamic paths and extend them if needed. @@ -898,9 +898,9 @@ void ColumnObject::ensureOwnership() size_t ColumnObject::byteSize() const { size_t size = 0; - for (auto & [_, column] : typed_paths) + for (const auto & [_, column] : typed_paths) size += column->byteSize(); - for (auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths) size += column->byteSize(); size += shared_data->byteSize(); return size; @@ -909,9 +909,9 @@ size_t ColumnObject::byteSize() const size_t ColumnObject::byteSizeAt(size_t n) const { size_t size = 0; - for (auto & [_, column] : typed_paths) + for (const auto & [_, column] : typed_paths) size += column->byteSizeAt(n); - for (auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths) size += column->byteSizeAt(n); size += shared_data->byteSizeAt(n); return size; @@ -920,9 +920,9 @@ size_t ColumnObject::byteSizeAt(size_t n) const size_t ColumnObject::allocatedBytes() const { size_t size = 0; - for (auto & [_, column] : typed_paths) + for (const auto & [_, column] : typed_paths) size += column->allocatedBytes(); - for (auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths) size += column->allocatedBytes(); size += shared_data->allocatedBytes(); return size; @@ -1040,9 +1040,9 @@ void ColumnObject::finalize() bool ColumnObject::isFinalized() const { bool finalized = true; - for (auto & [_, column] : typed_paths) + for (const auto & [_, column] : typed_paths) finalized &= column->isFinalized(); - for (auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths) finalized &= column->isFinalized(); finalized &= shared_data->isFinalized(); return finalized; @@ -1144,8 +1144,8 @@ size_t ColumnObject::findPathLowerBoundInSharedData(StringRef path, const Column Iterator() = delete; Iterator(const ColumnString * data_, size_t index_) : data(data_), index(index_) {} - Iterator(const Iterator & rhs) : data(rhs.data), index(rhs.index) {} - Iterator & operator=(const Iterator & rhs) { data = rhs.data; index = rhs.index; return *this; } + Iterator(const Iterator & rhs) = default; + Iterator & operator=(const Iterator & rhs) = default; inline Iterator& operator+=(difference_type rhs) { index += rhs; return *this;} inline StringRef operator*() const {return data->getDataAt(index);} diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index b130c7f4468..41739782266 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -188,7 +188,7 @@ public: static void fillPathColumnFromSharedData(IColumn & path_column, StringRef path, const ColumnPtr & shared_data_column, size_t start, size_t end); private: - void insertFromSharedDataAndFillRemainingDynamicPaths(const ColumnObject & src_object_column, std::vector & dynamic_paths_to_shared_data, size_t start, size_t length); + void insertFromSharedDataAndFillRemainingDynamicPaths(const ColumnObject & src_object_column, std::vector & src_dynamic_paths_for_shared_data, size_t start, size_t length); void serializePathAndValueIntoArena(Arena & arena, const char *& begin, StringRef path, StringRef value, StringRef & res) const; /// Map path -> column for paths with explicitly specified types. diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index c9378ab395c..a6c5c201f36 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -196,10 +196,29 @@ MutableColumnPtr DataTypeObject::createColumn() const namespace { +/// It is possible to have nested JSON object inside Dynamic. For example when we have an array of JSON objects. +/// During type inference in parsing in case of creating nested JSON objects, we reduce max_dynamic_paths/max_dynamic_types by factors +/// NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR/NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR. +/// So the type name will actually be JSON(max_dynamic_paths=N, max_dynamic_types=M). But we want the user to be able to query it +/// using json.array.:`Array(JSON)`.some.path without specifying max_dynamic_paths/max_dynamic_types. +/// To support it, we do a trick - we replace JSON name in subcolumn to JSON(max_dynamic_paths=N, max_dynamic_types=M), because we know +/// the exact values of max_dynamic_paths/max_dynamic_types for it. +void replaceJSONTypeNameIfNeeded(String & type_name, size_t max_dynamic_paths, size_t max_dynamic_types) +{ + auto pos = type_name.find("JSON"); + while (pos != String::npos) + { + /// Replace only if we don't already have parameters in JSON type declaration. + if (pos + 4 == type_name.size() || type_name[pos + 4] != '(') + type_name.replace(pos, 4, fmt::format("JSON(max_dynamic_paths={}, max_dynamic_types={})", max_dynamic_paths / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, std::max(max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))); + pos = type_name.find("JSON", pos + 4); + } +} + /// JSON subcolumn name with Dynamic type subcolumn looks like this: /// "json.some.path.:`Type_name`.some.subcolumn". /// We back quoted type name during identifier parsing so we can distinguish type subcolumn and path element ":TypeName". -std::pair splitPathAndDynamicTypeSubcolumn(std::string_view subcolumn_name) +std::pair splitPathAndDynamicTypeSubcolumn(std::string_view subcolumn_name, size_t max_dynamic_paths, size_t max_dynamic_types) { /// Try to find dynamic type subcolumn in a form .:`Type`. auto pos = subcolumn_name.find(".:`"); @@ -212,6 +231,8 @@ std::pair splitPathAndDynamicTypeSubcolumn(std::string_view subc if (!tryReadBackQuotedString(dynamic_subcolumn, buf)) return {String(subcolumn_name), ""}; + replaceJSONTypeNameIfNeeded(dynamic_subcolumn, max_dynamic_paths, max_dynamic_types); + /// If there is more data in the buffer - it's subcolumn of a type, append it to the type name. if (!buf.eof()) dynamic_subcolumn += String(buf.position(), buf.available()); @@ -333,7 +354,7 @@ std::unique_ptr DataTypeObject::getDynamicSubcolu } /// Split requested subcolumn to the JSON path and Dynamic type subcolumn. - auto [path, path_subcolumn] = splitPathAndDynamicTypeSubcolumn(subcolumn_name); + auto [path, path_subcolumn] = splitPathAndDynamicTypeSubcolumn(subcolumn_name, max_dynamic_paths, max_dynamic_types); std::unique_ptr res; if (auto it = typed_paths.find(path); it != typed_paths.end()) { @@ -373,18 +394,6 @@ std::unique_ptr DataTypeObject::getDynamicSubcolu /// Get subcolumn for Dynamic type if needed. if (!path_subcolumn.empty()) { - /// It is possible to have nested JSON object inside Dynamic. For example when we have an array of JSON objects. - /// During parsing in case of creating nested JSON objects, we reduce max_dynamic_paths/max_dynamic_types by NESTED_OBJECT_REDUCE_FACTOR factor. - /// So the type name will actually be JSON(max_dynamic_paths=N, max_dynamic_types=M). But we want the user to be able to query it - /// using json.array.:`Array(JSON)`.some.path without specifying max_dynamic_paths/max_dynamic_types. - /// To support it, we do a trick - we replace JSON name in subcolumn to JSON(max_dynamic_paths=N, max_dynamic_types=M), because we know - /// the exact values of max_dynamic_paths/max_dynamic_types for it. - auto pos = path_subcolumn.find("JSON"); - /// We want to replace JSON keyword only in the first subcolumn part before the first dot. - auto first_dot_pos = path_subcolumn.find('.'); - if (pos != path_subcolumn.npos && (first_dot_pos == path_subcolumn.npos || pos < first_dot_pos)) - path_subcolumn.replace(pos, 4, fmt::format("JSON(max_dynamic_paths={}, max_dynamic_types={})", max_dynamic_paths / NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, std::max(max_dynamic_types / NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))); - res = res->type->getSubcolumnData(path_subcolumn, *res, throw_if_null); if (!res) return nullptr; diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 5b76d96e0de..6eea777ed26 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -23,7 +23,7 @@ public: static constexpr size_t NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR = 4; static constexpr size_t NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR = 2; - DataTypeObject( + explicit DataTypeObject( const SchemaFormat & schema_format_, const std::unordered_map & typed_paths_ = {}, const std::unordered_set & paths_to_skip_ = {}, diff --git a/src/DataTypes/Serializations/SerializationJSON.h b/src/DataTypes/Serializations/SerializationJSON.h index aee1413bdd3..934c94527f3 100644 --- a/src/DataTypes/Serializations/SerializationJSON.h +++ b/src/DataTypes/Serializations/SerializationJSON.h @@ -31,7 +31,7 @@ public: void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - virtual void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; + void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; void serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 8a9860fd592..7c8c23e8a29 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -48,7 +48,7 @@ bool SerializationObject::shouldSkipPath(const String & path) const if (paths_to_skip.contains(path)) return true; - auto it = std::lower_bound(sorted_typed_paths.begin(), sorted_typed_paths.end(), path); + auto it = std::lower_bound(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end(), path); if (it != sorted_paths_to_skip.end() && it != sorted_paths_to_skip.begin() && path.starts_with(*std::prev(it))) return true; diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index b279fab2603..faf15aa3260 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -101,7 +101,7 @@ private: { String path; - TypedPathSubcolumnCreator(const String & path_) : path(path_) {} + explicit TypedPathSubcolumnCreator(const String & path_) : path(path_) {} DataTypePtr create(const DataTypePtr & prev) const override { return prev; } ColumnPtr create(const ColumnPtr & prev) const override { return prev; } diff --git a/src/DataTypes/Serializations/SerializationSubObject.h b/src/DataTypes/Serializations/SerializationSubObject.h index 8a5582536e7..10973b48957 100644 --- a/src/DataTypes/Serializations/SerializationSubObject.h +++ b/src/DataTypes/Serializations/SerializationSubObject.h @@ -67,7 +67,6 @@ private: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Text/binary serialization is not implemented for object sub-object subcolumn"); } -private: String path_prefix; std::unordered_map typed_paths_serializations; SerializationPtr dynamic_serialization; diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index a8980e785de..640ab1b0acd 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1389,7 +1389,7 @@ template class DynamicNode : public JSONExtractTreeNode { public: - DynamicNode( + explicit DynamicNode( size_t max_dynamic_paths_for_object_ = DataTypeObject::DEFAULT_MAX_SEPARATELY_STORED_PATHS, size_t max_dynamic_types_for_object_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES) : max_dynamic_paths_for_object(max_dynamic_paths_for_object_), max_dynamic_types_for_object(max_dynamic_types_for_object_) @@ -1412,7 +1412,7 @@ public: } auto & variant_column = column_dynamic.getVariantColumn(); - auto & variant_info = column_dynamic.getVariantInfo(); + const auto & variant_info = column_dynamic.getVariantInfo(); /// First, try to insert element into current variants but with no types conversion. /// We want to avoid inferring the type on each row, so if we can insert this element into @@ -1486,20 +1486,20 @@ private: switch (element.type()) { case ElementType::NULL_VALUE: - return getNullType(); + return std::make_shared(std::make_shared()); case ElementType::BOOL: - return getBoolType(); + return DataTypeFactory::instance().get("Bool"); case ElementType::INT64: { - auto type = getInt64Type(); + auto type = std::make_shared(); if (element.getInt64() < 0) json_inference_info.negative_integers.insert(type.get()); return type; } case ElementType::UINT64: - return getUInt64Type(); + return std::make_shared(); case ElementType::DOUBLE: - return getFloat64Type(); + return std::make_shared(); case ElementType::STRING: { auto data = element.getString(); @@ -1516,7 +1516,7 @@ private: } } - return getStringType(); + return std::make_shared(); } case ElementType::ARRAY: { @@ -1527,7 +1527,7 @@ private: types.push_back(elementToDataTypeImpl(value, format_settings, json_inference_info)); if (types.empty()) - return getEmptyArrayType(); + return std::make_shared(std::make_shared()); if (checkIfTypesAreEqual(types)) return std::make_shared(types.back()); @@ -1561,51 +1561,6 @@ private: } } - /// During schema inference we create shared_ptr to the some data types quite a lot. - /// Single creating of such shared_ptr is not expensive, but when it happens on each - /// column on each row, it can be noticeable. - const DataTypePtr & getBoolType() const - { - static const DataTypePtr bool_type = DataTypeFactory::instance().get("Bool"); - return bool_type; - } - - const DataTypePtr & getStringType() const - { - static const DataTypePtr string_type = std::make_shared(); - return string_type; - } - - const DataTypePtr & getInt64Type() const - { - static const DataTypePtr int64_type = std::make_shared(); - return int64_type; - } - - const DataTypePtr & getUInt64Type() const - { - static const DataTypePtr uint64_type = std::make_shared(); - return uint64_type; - } - - const DataTypePtr & getFloat64Type() const - { - static const DataTypePtr float64_type = std::make_shared(); - return float64_type; - } - - const DataTypePtr & getNullType() const - { - static const DataTypePtr null_type = std::make_shared(std::make_shared()); - return null_type; - } - - const DataTypePtr & getEmptyArrayType() const - { - static const DataTypePtr empty_array_type = std::make_shared(std::make_shared()); - return empty_array_type; - } - size_t max_dynamic_paths_for_object; size_t max_dynamic_types_for_object; @@ -1772,7 +1727,7 @@ private: } } /// Try to add a new dynamic path. - else if (auto dynamic_column = column_object.tryToAddNewDynamicPath(current_path)) + else if (auto * dynamic_column = column_object.tryToAddNewDynamicPath(current_path)) { if (!dynamic_node->insertResultToColumn(*dynamic_column, element, insert_settings, format_settings, error)) { diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index 441e7d77d24..28d60bf078b 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -36,63 +36,6 @@ namespace ErrorCodes namespace { - /// During schema inference we create shared_ptr to the some data types quite a lot. - /// Single creating of such shared_ptr is not expensive, but when it happens on each - /// column on each row, it can be noticeable. - const DataTypePtr & getBoolType() - { - static const DataTypePtr bool_type = DataTypeFactory::instance().get("Bool"); - return bool_type; - } - - const DataTypePtr & getStringType() - { - static const DataTypePtr string_type = std::make_shared(); - return string_type; - } - - const DataTypePtr & getInt64Type() - { - static const DataTypePtr int64_type = std::make_shared(); - return int64_type; - } - - const DataTypePtr & getUInt64Type() - { - static const DataTypePtr uint64_type = std::make_shared(); - return uint64_type; - } - - const DataTypePtr & getFloat64Type() - { - static const DataTypePtr float64_type = std::make_shared(); - return float64_type; - } - - const DataTypePtr & getDateType() - { - static const DataTypePtr date_type = std::make_shared(); - return date_type; - } - - const DataTypePtr & getDateTime64Type() - { - static const DataTypePtr date_type = std::make_shared(9); - return date_type; - } - - const DataTypePtr & getNullType() - { - static const DataTypePtr null_type = std::make_shared(std::make_shared()); - return null_type; - } - - const DataTypePtr & getEmptyArrayType() - { - static const DataTypePtr empty_array_type = std::make_shared(std::make_shared()); - return empty_array_type; - } - /// Special data type that represents JSON object as a set of paths and their types. /// It supports merging two JSON objects and creating Named Tuple from itself. /// It's used only for schema inference of Named Tuples from JSON objects. @@ -265,7 +208,7 @@ namespace if (leaf_type && !isNothing(removeNullable(leaf_type)) && !nodes.empty()) { if (use_string_type_for_ambiguous_paths) - return getStringType(); + return std::make_shared(); throw Exception( ErrorCodes::INCORRECT_DATA, @@ -331,7 +274,7 @@ namespace bool is_negative = json_info && json_info->negative_integers.contains(type.get()); have_negative_integers |= is_negative; if (!is_negative) - type = getUInt64Type(); + type = std::make_shared(); } } @@ -352,7 +295,7 @@ namespace WhichDataType which(type); if (which.isInt64() || which.isUInt64()) { - auto new_type = getFloat64Type(); + const auto & new_type = std::make_shared(); if (json_info && json_info->numbers_parsed_from_json_strings.erase(type.get())) json_info->numbers_parsed_from_json_strings.insert(new_type.get()); type = new_type; @@ -376,7 +319,7 @@ namespace for (auto & type : data_types) { if (isDate(type) || isDateTime64(type)) - type = getStringType(); + type = std::make_shared(); } type_indexes.erase(TypeIndex::Date); @@ -390,7 +333,7 @@ namespace for (auto & type : data_types) { if (isDate(type)) - type = getDateTime64Type(); + type = std::make_shared(9); } type_indexes.erase(TypeIndex::Date); @@ -412,7 +355,7 @@ namespace if (isNumber(type) && (settings.json.read_numbers_as_strings || !json_info || json_info->numbers_parsed_from_json_strings.contains(type.get()))) - type = getStringType(); + type = std::make_shared(); } updateTypeIndexes(data_types, type_indexes); @@ -435,11 +378,11 @@ namespace if (isBool(type)) { if (have_signed_integers) - type = getInt64Type(); + type = std::make_shared(); else if (have_unsigned_integers) - type = getUInt64Type(); + type = std::make_shared(); else - type = getFloat64Type(); + type = std::make_shared(); } } @@ -456,7 +399,7 @@ namespace for (auto & type : data_types) { if (isBool(type)) - type = getStringType(); + type = std::make_shared(); } type_indexes.erase(TypeIndex::UInt8); @@ -606,7 +549,7 @@ namespace for (auto & type : data_types) { if (isMap(type)) - type = getStringType(); + type = std::make_shared(); } type_indexes.erase(TypeIndex::Map); @@ -856,7 +799,7 @@ namespace /// Empty array has type Array(Nothing) if (nested_types.empty()) - return getEmptyArrayType(); + return std::make_shared(std::make_shared()); if (checkIfTypesAreEqual(nested_types)) return std::make_shared(std::move(nested_types.back())); @@ -969,13 +912,13 @@ namespace /// NOTE: it may break parsing of tryReadFloat() != tryReadIntText() + parsing of '.'/'e' /// But, for now it is true if (tryReadFloat(tmp_float, buf, settings, has_fractional) && has_fractional) - return getFloat64Type(); + return std::make_shared(); Int64 tmp_int; buf.position() = number_start; if (tryReadIntText(tmp_int, buf)) { - auto type = getInt64Type(); + auto type = std::make_shared(); if (json_info && tmp_int < 0) json_info->negative_integers.insert(type.get()); return type; @@ -985,7 +928,7 @@ namespace UInt64 tmp_uint; buf.position() = number_start; if (tryReadIntText(tmp_uint, buf)) - return getUInt64Type(); + return std::make_shared(); return nullptr; } @@ -997,13 +940,13 @@ namespace PeekableReadBufferCheckpoint checkpoint(peekable_buf); if (tryReadFloat(tmp_float, peekable_buf, settings, has_fractional) && has_fractional) - return getFloat64Type(); + return std::make_shared(); peekable_buf.rollbackToCheckpoint(/* drop= */ false); Int64 tmp_int; if (tryReadIntText(tmp_int, peekable_buf)) { - auto type = getInt64Type(); + auto type = std::make_shared(); if (json_info && tmp_int < 0) json_info->negative_integers.insert(type.get()); return type; @@ -1013,11 +956,11 @@ namespace /// In case of Int64 overflow we can try to infer UInt64. UInt64 tmp_uint; if (tryReadIntText(tmp_uint, peekable_buf)) - return getUInt64Type(); + return std::make_shared(); } else if (tryReadFloat(tmp_float, buf, settings, has_fractional)) { - return getFloat64Type(); + return std::make_shared(); } /// This is not a number. @@ -1034,7 +977,7 @@ namespace Int64 tmp_int; if (tryReadIntText(tmp_int, buf) && buf.eof()) { - auto type = getInt64Type(); + auto type = std::make_shared(); if (json_inference_info && tmp_int < 0) json_inference_info->negative_integers.insert(type.get()); return type; @@ -1046,7 +989,7 @@ namespace /// In case of Int64 overflow, try to infer UInt64 UInt64 tmp_uint; if (tryReadIntText(tmp_uint, buf) && buf.eof()) - return getUInt64Type(); + return std::make_shared(); } /// We can safely get back to the start of buffer, because we read from a string and we didn't reach eof. @@ -1055,7 +998,7 @@ namespace Float64 tmp; bool has_fractional; if (tryReadFloat(tmp, buf, settings, has_fractional) && buf.eof()) - return getFloat64Type(); + return std::make_shared(); return nullptr; } @@ -1079,7 +1022,7 @@ namespace if constexpr (is_json) { if (json_info->is_object_key) - return getStringType(); + return std::make_shared(); } if (auto type = tryInferDateOrDateTimeFromString(field, settings)) @@ -1097,7 +1040,7 @@ namespace } } - return getStringType(); + return std::make_shared(); } bool tryReadJSONObject(ReadBuffer & buf, const FormatSettings & settings, DataTypeJSONPaths::Paths & paths, const std::vector & path, JSONInferenceInfo * json_info, size_t depth) @@ -1254,7 +1197,7 @@ namespace return std::make_shared("json", true); if (settings.json.read_objects_as_strings) - return getStringType(); + return std::make_shared(); transformInferredTypesIfNeededImpl(value_types, settings, json_info); if (!checkIfTypesAreEqual(value_types)) @@ -1320,15 +1263,15 @@ namespace /// Bool if (checkStringCaseInsensitive("true", buf) || checkStringCaseInsensitive("false", buf)) - return getBoolType(); + return DataTypeFactory::instance().get("Bool"); /// Null or NaN if (checkCharCaseInsensitive('n', buf)) { if (checkStringCaseInsensitive("ull", buf)) - return getNullType(); + return std::make_shared(std::make_shared()); else if (checkStringCaseInsensitive("an", buf)) - return getFloat64Type(); + return std::make_shared(); } /// Number @@ -1385,7 +1328,7 @@ void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const F if (!remain_nothing_types && isNothing(data_type) && settings.json.infer_incomplete_types_as_strings) { - data_type = getStringType(); + data_type = std::make_shared(); return; } @@ -1402,7 +1345,7 @@ void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const F /// If all objects were empty, use type String, so these JSON objects will be read as Strings. if (json_paths->empty() && settings.json.infer_incomplete_types_as_strings) { - data_type = getStringType(); + data_type = std::make_shared(); return; } @@ -1424,7 +1367,7 @@ void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const F auto key_type = map_type->getKeyType(); /// If all inferred Maps are empty, use type String, so these JSON objects will be read as Strings. if (isNothing(key_type) && settings.json.infer_incomplete_types_as_strings) - key_type = getStringType(); + key_type = std::make_shared(); auto value_type = map_type->getValueType(); @@ -1501,10 +1444,10 @@ DataTypePtr tryInferJSONNumberFromString(std::string_view field, const FormatSet DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const FormatSettings & settings) { if (settings.try_infer_dates && tryInferDate(field)) - return getDateType(); + return std::make_shared(); if (settings.try_infer_datetimes && tryInferDateTime(field, settings)) - return getDateTime64Type(); + return std::make_shared(9); return nullptr; } diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp index 31699ad1c9c..4f29846319d 100644 --- a/src/Functions/JSONPaths.cpp +++ b/src/Functions/JSONPaths.cpp @@ -165,6 +165,7 @@ private: std::vector sorted_dynamic_and_typed_paths; const auto & typed_path_columns = column_object.getTypedPaths(); const auto & dynamic_path_columns = column_object.getDynamicPaths(); + sorted_dynamic_and_typed_paths.reserve(typed_path_columns.size() + dynamic_path_columns.size()); for (const auto & [path, _] : typed_path_columns) sorted_dynamic_and_typed_paths.push_back(path); for (const auto & [path, _] : dynamic_path_columns) diff --git a/src/Interpreters/InterpreterShowColumnsQuery.cpp b/src/Interpreters/InterpreterShowColumnsQuery.cpp index d8fff4e6026..b39ce25ab57 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.cpp +++ b/src/Interpreters/InterpreterShowColumnsQuery.cpp @@ -68,6 +68,7 @@ WITH map( 'Map', 'JSON', 'Tuple', 'JSON', 'Object', 'JSON', + 'JSON', 'JSON', 'String', '{}', 'FixedString', '{}') AS native_to_mysql_mapping, )", diff --git a/src/Parsers/ASTObjectTypeArgument.cpp b/src/Parsers/ASTObjectTypeArgument.cpp index aa5d23d7881..975f0389505 100644 --- a/src/Parsers/ASTObjectTypeArgument.cpp +++ b/src/Parsers/ASTObjectTypeArgument.cpp @@ -35,27 +35,27 @@ ASTPtr ASTObjectTypeArgument::clone() const return res; } -void ASTObjectTypeArgument::formatImpl(const FormatSettings & parameters, FormatState & state, FormatStateStacked frame) const +void ASTObjectTypeArgument::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { if (path_with_type) { - path_with_type->formatImpl(parameters, state, frame); + path_with_type->formatImpl(settings, state, frame); } else if (parameter) { - parameter->formatImpl(parameters, state, frame); + parameter->formatImpl(settings, state, frame); } else if (skip_path) { - std::string indent_str = parameters.one_line ? "" : std::string(4 * frame.indent, ' '); - parameters.ostr << indent_str << "SKIP" << ' '; - skip_path->formatImpl(parameters, state, frame); + std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); + settings.ostr << indent_str << "SKIP" << ' '; + skip_path->formatImpl(settings, state, frame); } else if (skip_path_regexp) { - std::string indent_str = parameters.one_line ? "" : std::string(4 * frame.indent, ' '); - parameters.ostr << indent_str << "SKIP REGEXP" << ' '; - skip_path_regexp->formatImpl(parameters, state, frame); + std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); + settings.ostr << indent_str << "SKIP REGEXP" << ' '; + skip_path_regexp->formatImpl(settings, state, frame); } } diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index c017d7db786..2c876fade74 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -213,6 +213,9 @@ void MergeTreeReaderWide::addStreams( ISerialization::StreamCallback callback = [&] (const ISerialization::SubstreamPath & substream_path) { + if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + return; + auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(name_and_type, substream_path, data_part_info_for_read->getChecksums()); /** If data file is missing then we will not try to open it. @@ -348,6 +351,9 @@ void MergeTreeReaderWide::prefetchForColumn( deserializePrefix(serialization, name_and_type, current_task_last_mark, cache, deserialize_states_cache); auto callback = [&](const ISerialization::SubstreamPath & substream_path) { + if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + return; + auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(name_and_type, substream_path, data_part_info_for_read->getChecksums()); if (stream_name && !prefetched_streams.contains(*stream_name)) diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 774fd95ebc6..dcdcb7f5800 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -211,6 +211,9 @@ static IMergeTreeDataPart::Checksums checkDataPart( { get_serialization(column)->enumerateStreams([&](const ISerialization::SubstreamPath & substream_path) { + if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + return; + auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(column, substream_path, ".bin", data_part_storage); if (!stream_name) diff --git a/tests/queries/0_stateless/01825_new_type_json_10.reference b/tests/queries/0_stateless/01825_new_type_json_10.reference index 0dffa3c46f9..d70c8210914 100644 --- a/tests/queries/0_stateless/01825_new_type_json_10.reference +++ b/tests/queries/0_stateless/01825_new_type_json_10.reference @@ -1,9 +1,8 @@ ('a.b','Int64') ('a.c','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') -('a.c','Array(Nullable(String))') +('d','Int64') ('e','Array(Nullable(Int64))') ('f','Int64') -('d','Int64') {"o":{"a":{"b":"1","c":[{"d":"10","e":["31"]},{"d":"20","e":["63","127"]}]}}} {"o":{"a":{"b":"2","c":[]}}} {"o":{"a":{"b":"3","c":[{"e":["32"],"f":"20"},{"e":["64","128"],"f":"30"}]}}} diff --git a/tests/queries/0_stateless/01825_new_type_json_10.sql b/tests/queries/0_stateless/01825_new_type_json_10.sql index 0e599b0ac31..a313adb4757 100644 --- a/tests/queries/0_stateless/01825_new_type_json_10.sql +++ b/tests/queries/0_stateless/01825_new_type_json_10.sql @@ -8,8 +8,8 @@ CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 1, "c": [{"d": 10, "e": [31]}, {"d": 20, "e": [63, 127]}]}} {"a": {"b": 2, "c": []}} INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 3, "c": [{"f": 20, "e": [32]}, {"f": 30, "e": [64, 128]}]}} {"a": {"b": 4, "c": []}} -SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(o)) FROM t_json_10; -SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(o.a.c.:`Array(JSON)`))) FROM t_json_10; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(o)) as path FROM t_json_10 order by path; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(o.a.c.:`Array(JSON)`))) as path FROM t_json_10 order by path; SELECT o FROM t_json_10 ORDER BY o.a.b FORMAT JSONEachRow; SELECT o.a.b, o.a.c.:`Array(JSON)`.d, o.a.c.:`Array(JSON)`.e, o.a.c.:`Array(JSON)`.f FROM t_json_10 ORDER BY o.a.b; diff --git a/tests/queries/0_stateless/01825_new_type_json_11.sh b/tests/queries/0_stateless/01825_new_type_json_11.sh index 2eb6c1768fd..f448b7433ab 100755 --- a/tests/queries/0_stateless/01825_new_type_json_11.sh +++ b/tests/queries/0_stateless/01825_new_type_json_11.sh @@ -53,10 +53,10 @@ cat <&1 | grep -o -m1 "Cannot parse object" $CLICKHOUSE_CLIENT -q "SELECT count() FROM t_json_async_insert" diff --git a/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh b/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh index 99f243833f5..e0648f4df6e 100755 --- a/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh +++ b/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh @@ -21,7 +21,7 @@ echo ' } }' > 02482_object_data.jsonl -$CLICKHOUSE_LOCAL --allow_experimental_object_type=1 -q "select * from file(02482_object_data.jsonl, auto, 'obj Object('json')')" +$CLICKHOUSE_LOCAL --allow_experimental_object_type=1 -q "select * from file(02482_object_data.jsonl, auto, 'obj Object(''json'')')" rm 02482_object_data.jsonl diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.reference b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.reference index de0f151db7d..cb905d63ca5 100644 --- a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.reference +++ b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.reference @@ -44,7 +44,7 @@ nested.col1 Array(String) NO \N nested.col2 Array(UInt32) NO \N nfs Nullable(FixedString(3)) YES \N ns Nullable(String) YES \N -o Object(\'json\') NO \N +o JSON NO \N p Point NO \N pg Polygon NO \N r Ring NO \N From f7b7a22c18a39994ae4e212f8069134b7bd595a4 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 14:28:21 +0000 Subject: [PATCH 0667/2361] Update docs --- docs/en/sql-reference/data-types/json.md | 1 + docs/en/sql-reference/data-types/object-json.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index 1001ad63999..494bfba3173 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -2,6 +2,7 @@ slug: /en/sql-reference/data-types/json sidebar_position: 63 sidebar_label: JSON +keywords: [json, data type] --- # JSON diff --git a/docs/en/sql-reference/data-types/object-json.md b/docs/en/sql-reference/data-types/object-json.md index c6fa6f0e882..9609f30aac1 100644 --- a/docs/en/sql-reference/data-types/object-json.md +++ b/docs/en/sql-reference/data-types/object-json.md @@ -1,5 +1,5 @@ --- -slug: /en/sql-reference/data-types/object-data-type +slug: /en/sql-reference/data-types/object-json sidebar_position: 26 sidebar_label: Object Data Type keywords: [object, data type] From 023276b6f6296a68f0e36004c9e40696bad73742 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 14:32:04 +0000 Subject: [PATCH 0668/2361] Fix tests reference --- .../03038_nested_dynamic_merges_compact_horizontal.reference | 4 ++-- .../03038_nested_dynamic_merges_compact_vertical.reference | 4 ++-- .../03038_nested_dynamic_merges_wide_horizontal.reference | 4 ++-- .../03038_nested_dynamic_merges_wide_vertical.reference | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference index 27ed336a035..4be740f6050 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference @@ -11,11 +11,11 @@ 33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) 50000 Tuple(a Dynamic(max_types=3)):UInt64 66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) 100000 UInt64:None 133333 Tuple(a Dynamic(max_types=3)):None 50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) 100000 UInt64:None 116667 Tuple(a Dynamic(max_types=3)):String 133333 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference index 27ed336a035..4be740f6050 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference @@ -11,11 +11,11 @@ 33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) 50000 Tuple(a Dynamic(max_types=3)):UInt64 66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) 100000 UInt64:None 133333 Tuple(a Dynamic(max_types=3)):None 50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) 100000 UInt64:None 116667 Tuple(a Dynamic(max_types=3)):String 133333 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference index 27ed336a035..4be740f6050 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference @@ -11,11 +11,11 @@ 33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) 50000 Tuple(a Dynamic(max_types=3)):UInt64 66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) 100000 UInt64:None 133333 Tuple(a Dynamic(max_types=3)):None 50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) 100000 UInt64:None 116667 Tuple(a Dynamic(max_types=3)):String 133333 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference index 27ed336a035..4be740f6050 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference @@ -11,11 +11,11 @@ 33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) 50000 Tuple(a Dynamic(max_types=3)):UInt64 66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) 100000 UInt64:None 133333 Tuple(a Dynamic(max_types=3)):None 50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(number UInt64) +100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) 100000 UInt64:None 116667 Tuple(a Dynamic(max_types=3)):String 133333 Tuple(a Dynamic(max_types=3)):None From 660530c611000f5eb8875c640d5aed196315a187 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 22 Jul 2024 17:10:39 +0200 Subject: [PATCH 0669/2361] Fix tidy --- src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp b/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp index a36a8b031b4..377f6b36888 100644 --- a/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp +++ b/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp @@ -258,10 +258,9 @@ std::optional ReadBufferFromAzureBlobStorage::tryGetFileSize() if (!blob_client) blob_client = std::make_unique(blob_container_client->GetBlobClient(path)); - if (file_size.has_value()) - return *file_size; + if (!file_size) + file_size = blob_client->GetProperties().Value.BlobSize; - file_size = blob_client->GetProperties().Value.BlobSize; return *file_size; } From eb519c501622af6a6df6f3370b6209c92c2d4c20 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 20 Jul 2024 15:01:47 +0200 Subject: [PATCH 0670/2361] Change paths of temporary part directories during RESTORE use "/var/lib/clickhouse/data/test/table/tmp_restore_all_0_1_1_0-XXXXXXXX" instead of "/tmp/XXXXXXXX/data/test/table/0_1_1_0" (because directories can only be renamed in s3_plain_rewritable, and not moved to another parent directory). --- src/Disks/TemporaryFileOnDisk.cpp | 3 +- src/Disks/TemporaryFileOnDisk.h | 7 +++++ src/Storages/MergeTree/MergeTreeData.cpp | 36 ++++++++++++++---------- src/Storages/MergeTree/MergeTreeData.h | 2 +- 4 files changed, 31 insertions(+), 17 deletions(-) diff --git a/src/Disks/TemporaryFileOnDisk.cpp b/src/Disks/TemporaryFileOnDisk.cpp index 91eb214d941..88674e068d9 100644 --- a/src/Disks/TemporaryFileOnDisk.cpp +++ b/src/Disks/TemporaryFileOnDisk.cpp @@ -58,7 +58,8 @@ TemporaryFileOnDisk::~TemporaryFileOnDisk() if (!disk->exists(relative_path)) { - LOG_WARNING(getLogger("TemporaryFileOnDisk"), "Temporary path '{}' does not exist in '{}'", relative_path, disk->getPath()); + if (show_warning_if_removed) + LOG_WARNING(getLogger("TemporaryFileOnDisk"), "Temporary path '{}' does not exist in '{}'", relative_path, disk->getPath()); return; } diff --git a/src/Disks/TemporaryFileOnDisk.h b/src/Disks/TemporaryFileOnDisk.h index cccfc82cf9e..d0ff44c6f03 100644 --- a/src/Disks/TemporaryFileOnDisk.h +++ b/src/Disks/TemporaryFileOnDisk.h @@ -27,12 +27,19 @@ public: /// Return relative path (without disk) const String & getRelativePath() const { return relative_path; } + /// Sets whether the destructor should show a warning if the temporary file has been already removed. + /// By default a warning is shown. + void setShowWarningIfRemoved(bool show_warning_if_removed_) { show_warning_if_removed = show_warning_if_removed_; } + private: DiskPtr disk; /// Relative path in disk to the temporary file or directory String relative_path; + /// Whether the destructor should show a warning if the temporary file has been already removed. + bool show_warning_if_removed = true; + CurrentMetrics::Increment metric_increment; /// Specified if we know what for file is used (sort/aggregate/join). diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 12a4effe33c..642edf1cfb8 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -5549,12 +5549,17 @@ public: attachIfAllPartsRestored(); } - String getTemporaryDirectory(const DiskPtr & disk) + String getTemporaryDirectory(const DiskPtr & disk, const String & part_name) { std::lock_guard lock{mutex}; - auto it = temp_dirs.find(disk); - if (it == temp_dirs.end()) - it = temp_dirs.emplace(disk, std::make_shared(disk, "tmp/")).first; + auto it = temp_part_dirs.find(part_name); + if (it == temp_part_dirs.end()) + { + auto temp_part_dir = std::make_shared(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-")); + /// Attaching parts will rename them so it's expected for a temporary part directory not to exist anymore in the end. + temp_part_dir->setShowWarningIfRemoved(false); + it = temp_part_dirs.emplace(part_name, temp_part_dir).first; + } return it->second->getRelativePath(); } @@ -5572,7 +5577,7 @@ private: storage->attachRestoredParts(std::move(parts)); parts.clear(); - temp_dirs.clear(); + temp_part_dirs.clear(); num_parts = 0; } @@ -5581,7 +5586,7 @@ private: size_t num_parts = 0; size_t num_broken_parts = 0; MutableDataPartsVector parts; - std::map> temp_dirs; + std::map> temp_part_dirs; mutable std::mutex mutex; }; @@ -5648,11 +5653,9 @@ void MergeTreeData::restorePartFromBackup(std::shared_ptr r /// Calculate paths, for example: /// part_name = 0_1_1_0 /// part_path_in_backup = /data/test/table/0_1_1_0 - /// tmp_dir = tmp/1aaaaaa - /// tmp_part_dir = tmp/1aaaaaa/data/test/table/0_1_1_0 + /// temp_part_dir = /var/lib/clickhouse/data/test/table/tmp_restore_all_0_1_1_0-XXXXXXXX auto disk = reservation->getDisk(); - fs::path temp_dir = restored_parts_holder->getTemporaryDirectory(disk); - fs::path temp_part_dir = temp_dir / part_path_in_backup_fs.relative_path(); + fs::path temp_part_dir = restored_parts_holder->getTemporaryDirectory(disk, part_name); /// Subdirectories in the part's directory. It's used to restore projections. std::unordered_set subdirs; @@ -5679,22 +5682,25 @@ void MergeTreeData::restorePartFromBackup(std::shared_ptr r reservation->update(reservation->getSize() - file_size); } - if (auto part = loadPartRestoredFromBackup(disk, temp_part_dir.parent_path(), part_name, detach_if_broken)) + if (auto part = loadPartRestoredFromBackup(part_name, disk, temp_part_dir, detach_if_broken)) restored_parts_holder->addPart(part); else restored_parts_holder->increaseNumBrokenParts(); } -MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(const DiskPtr & disk, const String & temp_dir, const String & part_name, bool detach_if_broken) const +MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(const String & part_name, const DiskPtr & disk, const String & temp_part_dir, bool detach_if_broken) const { MutableDataPartPtr part; auto single_disk_volume = std::make_shared(disk->getName(), disk, 0); + fs::path full_part_dir{temp_part_dir}; + String parent_part_dir = full_part_dir.parent_path(); + String part_dir_name = full_part_dir.filename(); - /// Load this part from the directory `tmp_part_dir`. + /// Load this part from the directory `temp_part_dir`. auto load_part = [&] { - MergeTreeDataPartBuilder builder(*this, part_name, single_disk_volume, temp_dir, part_name); + MergeTreeDataPartBuilder builder(*this, part_name, single_disk_volume, parent_part_dir, part_dir_name); builder.withPartFormatFromDisk(); part = std::move(builder).build(); part->version.setCreationTID(Tx::PrehistoricTID, nullptr); @@ -5709,7 +5715,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(cons if (!part) { /// Make a fake data part only to copy its files to /detached/. - part = MergeTreeDataPartBuilder{*this, part_name, single_disk_volume, temp_dir, part_name} + part = MergeTreeDataPartBuilder{*this, part_name, single_disk_volume, parent_part_dir, part_dir_name} .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 7076b680521..d185a5262fd 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -1473,7 +1473,7 @@ protected: /// Restores the parts of this table from backup. void restorePartsFromBackup(RestorerFromBackup & restorer, const String & data_path_in_backup, const std::optional & partitions); void restorePartFromBackup(std::shared_ptr restored_parts_holder, const MergeTreePartInfo & part_info, const String & part_path_in_backup, bool detach_if_broken) const; - MutableDataPartPtr loadPartRestoredFromBackup(const DiskPtr & disk, const String & temp_dir, const String & part_name, bool detach_if_broken) const; + MutableDataPartPtr loadPartRestoredFromBackup(const String & part_name, const DiskPtr & disk, const String & temp_part_dir, bool detach_if_broken) const; /// Attaches restored parts to the storage. virtual void attachRestoredParts(MutableDataPartsVector && parts) = 0; From d13bbdf5e1e0f5763045d301dbfc9fddd6013f14 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 15:49:46 +0000 Subject: [PATCH 0671/2361] Fix style --- src/Formats/JSONExtractTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 640ab1b0acd..e8a78a40749 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -314,7 +314,7 @@ public: } NumberType value; - if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || this->is_bool_type, insert_settings.allow_type_conversion, error)) + if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || this->is_bool_type, insert_settings.allow_type_conversion, error)) { if (error.empty()) error = fmt::format("cannot read {} value from JSON element: {}", TypeName, jsonElementToString(element, format_settings)); From 5fa2db8e4828b004ac10d625df62efcc8711dc98 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Mon, 22 Jul 2024 16:59:17 +0100 Subject: [PATCH 0672/2361] fix 01293_show_clusters stateless test --- tests/queries/0_stateless/01293_show_clusters.reference | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01293_show_clusters.reference b/tests/queries/0_stateless/01293_show_clusters.reference index e140f207022..9569fcf2e37 100644 --- a/tests/queries/0_stateless/01293_show_clusters.reference +++ b/tests/queries/0_stateless/01293_show_clusters.reference @@ -1,3 +1,3 @@ test_shard_localhost -test_cluster_one_shard_two_replicas 1 1 0 1 127.0.0.1 127.0.0.1 9000 1 default -test_cluster_one_shard_two_replicas 1 1 0 2 127.0.0.2 127.0.0.2 9000 0 default +test_cluster_one_shard_two_replicas 1 1 0 1 127.0.0.1 127.0.0.1 9000 1 default 0 NULL +test_cluster_one_shard_two_replicas 1 1 0 2 127.0.0.2 127.0.0.2 9000 0 default 0 NULL From fa0f760fd9afd652dee50b89fd8d176d4e031174 Mon Sep 17 00:00:00 2001 From: Max K Date: Mon, 22 Jul 2024 15:46:27 +0200 Subject: [PATCH 0673/2361] CI: CI Buddy to notify about fatal workflow failures --- .github/workflows/backport_branches.yml | 14 ++++++++- .github/workflows/master.yml | 38 +++++++------------------ .github/workflows/merge_queue.yml | 13 ++++++++- .github/workflows/nightly.yml | 17 +++++++++++ .github/workflows/pull_request.yml | 13 ++++++++- .github/workflows/release_branches.yml | 14 ++++++++- .yamllint | 6 ++++ tests/ci/ci_buddy.py | 37 +++++++++++++++++++++--- tests/ci/ci_utils.py | 27 ++++++++++++++++++ 9 files changed, 143 insertions(+), 36 deletions(-) diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 9645d0e46de..50f4f503f5d 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -241,8 +241,9 @@ jobs: runner_type: stress-tester data: ${{ needs.RunConfig.outputs.data }} FinishCheck: - if: ${{ !failure() && !cancelled() }} + if: ${{ !cancelled() }} needs: + - RunConfig - Builds_Report - FunctionalStatelessTestAsan - FunctionalStatefulTestDebug @@ -257,6 +258,7 @@ jobs: with: clear-repository: true - name: Finish label + if: ${{ !failure() }} run: | cd "$GITHUB_WORKSPACE/tests/ci" # update mergeable check @@ -264,3 +266,13 @@ jobs: # update overall ci report python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} python3 merge_pr.py + - name: Check Workflow results + run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + echo "::group::Workflow results" + python3 -m json.tool "$WORKFLOW_RESULT_FILE" + echo "::endgroup::" + python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 09acef5eb8b..b28d87ee31f 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -121,34 +121,6 @@ jobs: runner_type: style-checker-aarch64 data: ${{ needs.RunConfig.outputs.data }} - MarkReleaseReady: - if: ${{ !failure() && !cancelled() }} - needs: [RunConfig, Builds_1, Builds_2] - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Debug - run: | - echo need with different filters - cat << 'EOF' - ${{ toJSON(needs) }} - ${{ toJSON(needs.*.result) }} - no failures ${{ !contains(needs.*.result, 'failure') }} - no skips ${{ !contains(needs.*.result, 'skipped') }} - no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }} - EOF - - name: Not ready - # fail the job to be able to restart it - if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }} - run: exit 1 - - name: Check out repository code - if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }} - uses: ClickHouse/checkout@v1 - - name: Mark Commit Release Ready - if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }} - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 mark_release_ready.py - FinishCheck: if: ${{ !cancelled() }} needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] @@ -160,3 +132,13 @@ jobs: run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + - name: Check Workflow results + run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + echo "::group::Workflow results" + python3 -m json.tool "$WORKFLOW_RESULT_FILE" + echo "::endgroup::" + python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml index 31a65ac3d15..db89825a99a 100644 --- a/.github/workflows/merge_queue.yml +++ b/.github/workflows/merge_queue.yml @@ -93,7 +93,7 @@ jobs: data: ${{ needs.RunConfig.outputs.data }} CheckReadyForMerge: - if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }} + if: ${{ !cancelled() }} # Test_2 or Test_3 must not have jobs required for Mergeable check needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1] runs-on: [self-hosted, style-checker-aarch64] @@ -101,6 +101,17 @@ jobs: - name: Check out repository code uses: ClickHouse/checkout@v1 - name: Check and set merge status + if: ${{ needs.StyleCheck.result == 'success' }} run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + - name: Check Workflow results + run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + echo "::group::Workflow results" + python3 -m json.tool "$WORKFLOW_RESULT_FILE" + echo "::endgroup::" + python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index bffe5b4c1bf..fd5b5eefcc4 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -44,3 +44,20 @@ jobs: with: data: "${{ needs.RunConfig.outputs.data }}" set_latest: true + CheckWorkflow: + if: ${{ !cancelled() }} + needs: [RunConfig, BuildDockers] + runs-on: [self-hosted, style-checker-aarch64] + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + - name: Check Workflow results + run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + echo "::group::Workflow results" + python3 -m json.tool "$WORKFLOW_RESULT_FILE" + echo "::endgroup::" + python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 5124e4dba2c..9930cf6dde4 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -151,7 +151,7 @@ jobs: data: ${{ needs.RunConfig.outputs.data }} CheckReadyForMerge: - if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }} + if: ${{ !cancelled() }} # Test_2 or Test_3 must not have jobs required for Mergeable check needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1] runs-on: [self-hosted, style-checker-aarch64] @@ -161,9 +161,20 @@ jobs: with: filter: tree:0 - name: Check and set merge status + if: ${{ needs.StyleCheck.result == 'success' }} run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + - name: Check Workflow results + run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + echo "::group::Workflow results" + python3 -m json.tool "$WORKFLOW_RESULT_FILE" + echo "::endgroup::" + python3 ./tests/ci/ci_buddy.py --check-wf-status ################################# Stage Final ################################# # diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 6a18999d74e..50565112825 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -441,8 +441,9 @@ jobs: runner_type: stress-tester data: ${{ needs.RunConfig.outputs.data }} FinishCheck: - if: ${{ !failure() && !cancelled() }} + if: ${{ !cancelled() }} needs: + - RunConfig - DockerServerImage - DockerKeeperImage - Builds_Report @@ -478,9 +479,20 @@ jobs: with: clear-repository: true - name: Finish label + if: ${{ !failure() }} run: | cd "$GITHUB_WORKSPACE/tests/ci" # update mergeable check python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} # update overall ci report python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + - name: Check Workflow results + run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + echo "::group::Workflow results" + python3 -m json.tool "$WORKFLOW_RESULT_FILE" + echo "::endgroup::" + python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.yamllint b/.yamllint index f144e2d47b1..7fb741ec9f4 100644 --- a/.yamllint +++ b/.yamllint @@ -14,3 +14,9 @@ rules: comments: min-spaces-from-content: 1 document-start: disable + colons: disable + indentation: disable + line-length: disable + trailing-spaces: disable + truthy: disable + new-line-at-end-of-file: disable diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index 3eba5532e66..688c7d59988 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -1,3 +1,4 @@ +import argparse import json import os from typing import Union, Dict @@ -7,7 +8,7 @@ import requests from botocore.exceptions import ClientError from pr_info import PRInfo -from ci_utils import Shell +from ci_utils import Shell, GHActions class CIBuddy: @@ -29,6 +30,11 @@ class CIBuddy: self.commit_url = pr_info.commit_html_url self.sha = pr_info.sha[:10] + def check_workflow(self): + res = GHActions.get_workflow_job_result(GHActions.ActionsNames.RunConfig) + if res != GHActions.ActionStatuses.SUCCESS: + self.post_job_error("Workflow Configuration Failed", critical=True) + @staticmethod def _get_webhooks(): name = "ci_buddy_web_hooks" @@ -139,7 +145,30 @@ class CIBuddy: self.post(message) +def parse_args(): + parser = argparse.ArgumentParser("CI Buddy bot notifies about CI events") + parser.add_argument( + "--check-wf-status", + action="store_true", + help="Checks workflow status", + ) + parser.add_argument( + "--test", + action="store_true", + help="for test and debug", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="dry run mode", + ) + return parser.parse_args(), parser + + if __name__ == "__main__": - # test - buddy = CIBuddy(dry_run=True) - buddy.post_job_error("TEst") + args, parser = parse_args() + + if args.test: + CIBuddy(dry_run=True).post_job_error("TEst") + elif args.check_wf_status: + CIBuddy(dry_run=args.dry_run).check_workflow() diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 9a1b12af310..1963e3f39d0 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -1,3 +1,4 @@ +import json import os import re import subprocess @@ -11,6 +12,9 @@ import requests class Envs: GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse") + WORKFLOW_RESULT_FILE = os.getenv( + "WORKFLOW_RESULT_FILE", "/tmp/workflow_results.json" + ) LABEL_CATEGORIES = { @@ -79,6 +83,29 @@ def normalize_string(string: str) -> str: class GHActions: + class ActionsNames: + RunConfig = "RunConfig" + + class ActionStatuses: + ERROR = "error" + FAILURE = "failure" + PENDING = "pending" + SUCCESS = "success" + + @staticmethod + def get_workflow_job_result(wf_job_name: str) -> Optional[str]: + if not Path(Envs.WORKFLOW_RESULT_FILE).exists(): + print( + f"ERROR: Failed to get workflow results from file [{Envs.WORKFLOW_RESULT_FILE}]" + ) + return None + with open(Envs.WORKFLOW_RESULT_FILE, "r", encoding="utf-8") as json_file: + res = json.load(json_file) + if wf_job_name in res: + return res[wf_job_name]["result"] # type: ignore + else: + return None + @staticmethod def print_in_group(group_name: str, lines: Union[Any, List[Any]]) -> None: lines = list(lines) From 2a893ed8cd10e6e3b7506f43b644b5037f96c49a Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Mon, 22 Jul 2024 17:03:41 +0100 Subject: [PATCH 0674/2361] fxs --- src/IO/S3/Client.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index db20420db9f..325c820f8bd 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -388,7 +388,8 @@ Model::HeadObjectOutcome Client::HeadObject(HeadObjectRequest & request) const if (isClientForDisk()) CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); - return enrichErrorMessage(std::move(result)); + return enrichErrorMessage( + HeadObject(static_cast(request))); } /// For each request, we wrap the request functions from Aws::S3::Client with doRequest From a8e34f07a8090892ea98c58937bfb4a4ed4a6661 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 16:26:40 +0000 Subject: [PATCH 0675/2361] Fix debug build --- src/Columns/ColumnObjectDeprecated.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Columns/ColumnObjectDeprecated.cpp b/src/Columns/ColumnObjectDeprecated.cpp index 6d0bfa0e05e..d3f23dc6b57 100644 --- a/src/Columns/ColumnObjectDeprecated.cpp +++ b/src/Columns/ColumnObjectDeprecated.cpp @@ -763,7 +763,7 @@ void ColumnObjectDeprecated::get(size_t n, Field & res) const } } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnObjectDeprecated::insertFrom(const IColumn & src, size_t n) #else void ColumnObjectDeprecated::doInsertFrom(const IColumn & src, size_t n) @@ -772,7 +772,7 @@ void ColumnObjectDeprecated::doInsertFrom(const IColumn & src, size_t n) insert(src[n]); } -#if !defined(ABORT_ON_LOGICAL_ERROR) +#if !defined(DEBUG_OR_SANITIZER_BUILD) void ColumnObjectDeprecated::insertRangeFrom(const IColumn & src, size_t start, size_t length) #else void ColumnObjectDeprecated::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) From 69be00cbf00b1be867760c996244093020da9034 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Mon, 22 Jul 2024 15:47:53 +0000 Subject: [PATCH 0676/2361] Improve --- tests/integration/test_storage_s3/test.py | 78 +++++++++++------------ 1 file changed, 38 insertions(+), 40 deletions(-) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 40cbf4b44a6..65a5cdcad29 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -154,6 +154,7 @@ def test_put(started_cluster, maybe_auth, positive, compression): def test_partition_by(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" @@ -161,26 +162,37 @@ def test_partition_by(started_cluster): values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)" filename = "test_{_partition_id}.csv" put_query = f"""INSERT INTO TABLE FUNCTION - s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}') + s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{filename}', 'CSV', '{table_format}') PARTITION BY {partition_by} VALUES {values}""" run_query(instance, put_query) - assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test_3.csv") - assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test_1.csv") - assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test_45.csv") + assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, f"{id}/test_3.csv") + assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, f"{id}/test_1.csv") + assert "78,43,45\n" == get_s3_file_content( + started_cluster, bucket, f"{id}/test_45.csv" + ) filename = "test2_{_partition_id}.csv" instance.query( - f"create table p ({table_format}) engine=S3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV') partition by column3" + f"create table p ({table_format}) engine=S3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{filename}', 'CSV') partition by column3" ) instance.query(f"insert into p values {values}") - assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test2_3.csv") - assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test2_1.csv") - assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test2_45.csv") + assert "1,2,3\n" == get_s3_file_content( + started_cluster, bucket, f"{id}/test2_3.csv" + ) + assert "3,2,1\n" == get_s3_file_content( + started_cluster, bucket, f"{id}/test2_1.csv" + ) + assert "78,43,45\n" == get_s3_file_content( + started_cluster, bucket, f"{id}/test2_45.csv" + ) + + instance.query("drop table p") def test_partition_by_string_column(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "col_num UInt32, col_str String" @@ -188,21 +200,20 @@ def test_partition_by_string_column(started_cluster): values = "(1, 'foo/bar'), (3, 'йцук'), (78, '你好')" filename = "test_{_partition_id}.csv" put_query = f"""INSERT INTO TABLE FUNCTION - s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}') + s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{filename}', 'CSV', '{table_format}') PARTITION BY {partition_by} VALUES {values}""" run_query(instance, put_query) assert '1,"foo/bar"\n' == get_s3_file_content( - started_cluster, bucket, "test_foo/bar.csv" - ) - assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv") - assert '78,"你好"\n' == get_s3_file_content( - started_cluster, bucket, "test_你好.csv" + started_cluster, bucket, f"{id}/test_foo/bar.csv" ) + assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, f"{id}/test_йцук.csv") + assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, f"{id}/test_你好.csv") def test_partition_by_const_column(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" @@ -211,12 +222,14 @@ def test_partition_by_const_column(started_cluster): values_csv = "1,2,3\n3,2,1\n78,43,45\n" filename = "test_{_partition_id}.csv" put_query = f"""INSERT INTO TABLE FUNCTION - s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}') + s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{filename}', 'CSV', '{table_format}') PARTITION BY {partition_by} VALUES {values}""" run_query(instance, put_query) - assert values_csv == get_s3_file_content(started_cluster, bucket, "test_88.csv") + assert values_csv == get_s3_file_content( + started_cluster, bucket, f"{id}/test_88.csv" + ) @pytest.mark.parametrize("special", ["space", "plus"]) @@ -276,46 +289,31 @@ def test_get_path_with_special(started_cluster, special): @pytest.mark.parametrize("auth", [pytest.param("'minio','minio123',", id="minio")]) def test_empty_put(started_cluster, auth): # type: (ClickHouseCluster, str) -> None - + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" drop_empty_table_query = "DROP TABLE IF EXISTS empty_table" - create_empty_table_query = """ - CREATE TABLE empty_table ( - {} - ) ENGINE = Null() - """.format( - table_format + create_empty_table_query = ( + f"CREATE TABLE empty_table ({table_format}) ENGINE = Null()" ) run_query(instance, drop_empty_table_query) run_query(instance, create_empty_table_query) filename = "empty_put_test.csv" - put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format( - started_cluster.minio_ip, - MINIO_INTERNAL_PORT, - bucket, - filename, - auth, - table_format, - ) + put_query = f"""insert into table function + s3('http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{id}/{filename}', {auth} 'CSV', '{table_format}') + select * from empty_table""" run_query(instance, put_query) assert ( run_query( instance, - "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format( - started_cluster.minio_ip, - MINIO_INTERNAL_PORT, - bucket, - filename, - auth, - table_format, - ), + f"""select count(*) from + s3('http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{id}/{filename}', {auth} 'CSV', '{table_format}')""", ) == "0\n" ) @@ -881,7 +879,7 @@ def test_storage_s3_get_unstable(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64" - get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') FORMAT CSV" + get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') SETTINGS s3_max_single_read_retries=30 FORMAT CSV" result = run_query(instance, get_query) assert result.splitlines() == ["500001,500000,0"] From ac679892ab656788d9c9a23362ad82082e8620d4 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Mon, 22 Jul 2024 16:12:11 +0000 Subject: [PATCH 0677/2361] fix --- tests/integration/test_storage_s3/test.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 65a5cdcad29..84c887be388 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -497,6 +497,7 @@ def test_put_get_with_globs(started_cluster): def test_multipart(started_cluster, maybe_auth, positive): # type: (ClickHouseCluster, str, bool) -> None + id = uuid.uuid4() bucket = ( started_cluster.minio_bucket if not maybe_auth @@ -519,7 +520,7 @@ def test_multipart(started_cluster, maybe_auth, positive): assert len(csv_data) > min_part_size_bytes - filename = "test_multipart.csv" + filename = f"{id}/test_multipart.csv" put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format( started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, @@ -691,7 +692,7 @@ def test_s3_glob_many_objects_under_selection(started_cluster): def create_files(thread_num): for f_num in range(thread_num * 63, thread_num * 63 + 63): path = f"folder1/file{f_num}.csv" - query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( + query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') settings s3_truncate_on_insert=1 values {}".format( started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, @@ -704,7 +705,7 @@ def test_s3_glob_many_objects_under_selection(started_cluster): jobs.append(threading.Thread(target=create_files, args=(thread_num,))) jobs[-1].start() - query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( + query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') settings s3_truncate_on_insert=1 values {}".format( started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, From 1d7fcade729b68276dd3c58bec4d81983ca476b1 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 22 Jul 2024 17:28:12 +0200 Subject: [PATCH 0678/2361] Fixes --- src/Common/FailPoint.cpp | 2 +- src/Storages/StorageKeeperMap.cpp | 4 ++++ .../0_stateless/03208_keepermap_incomplete_data_drop.sql | 3 ++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index b952e9725e3..7b8b5036af0 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -58,7 +58,7 @@ static struct InitFiu PAUSEABLE(dummy_pausable_failpoint) \ ONCE(execute_query_calling_empty_set_result_func_on_exception) \ ONCE(receive_timeout_on_table_status_response) \ - ONCE(keepermap_fail_drop_data) \ + REGULAR(keepermap_fail_drop_data) \ namespace FailPoints diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 1c1de245d10..b32a2d302a7 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -417,6 +417,9 @@ StorageKeeperMap::StorageKeeperMap( auto code = client->tryCreate(zk_table_path, "", zkutil::CreateMode::Persistent); + /// A table on the same Keeper path already exists, we just appended our table id to subscribe as a new replica + /// We still don't know if the table matches the expected metadata so table_is_valid is not changed + /// It will be checked lazily on the first operation if (code == Coordination::Error::ZOK) return; @@ -474,6 +477,7 @@ StorageKeeperMap::StorageKeeperMap( table_is_valid = true; + /// we are the first table created for the specified Keeper path, i.e. we are the first replica return; } diff --git a/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql b/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql index ad0603f12a9..e8d4a5bc298 100644 --- a/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql +++ b/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql @@ -3,5 +3,6 @@ DROP TABLE IF EXISTS 03208_keepermap_test SYNC; CREATE TABLE 03208_keepermap_test (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test03208') PRIMARY KEY(key); INSERT INTO 03208_keepermap_test VALUES (1, 11); SYSTEM ENABLE FAILPOINT keepermap_fail_drop_data; -DROP TABLE 03208_keepermap_test; +DROP TABLE 03208_keepermap_test SYNC; -- { KEEPER_EXCEPTION } +SYSTEM DISABLE FAILPOINT keepermap_fail_drop_data; CREATE TABLE 03208_keepermap_test_another (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test03208') PRIMARY KEY(key); From 4b0b5b7f2dd66ecfd9cb0b533166a229e501cd52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 22 Jul 2024 18:53:26 +0200 Subject: [PATCH 0679/2361] groupArrayIntersect: Fix internal name --- .../AggregateFunctionGroupArrayIntersect.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp index 20acda213da..591422adc57 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp @@ -69,7 +69,7 @@ public: : IAggregateFunctionDataHelper, AggregateFunctionGroupArrayIntersect>({argument_type}, parameters_, result_type_) {} - String getName() const override { return "GroupArrayIntersect"; } + String getName() const override { return "groupArrayIntersect"; } bool allocatesMemoryInArena() const override { return false; } @@ -213,7 +213,7 @@ public: : IAggregateFunctionDataHelper>({input_data_type_}, parameters_, result_type_) , input_data_type(result_type_) {} - String getName() const override { return "GroupArrayIntersect"; } + String getName() const override { return "groupArrayIntersect"; } bool allocatesMemoryInArena() const override { return true; } From 468bd551c64057fb056ed5434e68c26ded062a84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 22 Jul 2024 19:02:20 +0200 Subject: [PATCH 0680/2361] Fix includes --- .../AggregateFunctionGroupArrayIntersect.cpp | 24 ++++++++----------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp index 591422adc57..1529cd5899a 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp @@ -1,12 +1,12 @@ -#include -#include - #include #include #include #include -#include +#include +#include +#include +#include #include #include @@ -15,18 +15,14 @@ #include #include -#include -#include - #include #include -#include #include -#include -#include -#include -#include +#include +#include + +#include namespace DB @@ -240,7 +236,7 @@ public: { const char * begin = nullptr; StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin); - assert(serialized.data != nullptr); + chassert(serialized.data != nullptr); set.emplace(SerializedKeyHolder{serialized, *arena}, it, inserted); } } @@ -260,7 +256,7 @@ public: { const char * begin = nullptr; StringRef serialized = data_column->serializeValueIntoArena(offset + i, *arena, begin); - assert(serialized.data != nullptr); + chassert(serialized.data != nullptr); it = set.find(serialized); if (it != nullptr) From fd2d56d58a6ebfd7c995bfc88fafde75dd12381f Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 22 Jul 2024 19:05:49 +0200 Subject: [PATCH 0681/2361] Better processing of broken parts and their projections --- src/Storages/MergeTree/DataPartsExchange.cpp | 10 ++++++-- .../MergeTree/MergeTreeSequentialSource.cpp | 3 ++- src/Storages/MergeTree/checkDataPart.cpp | 25 ++++++++++++------- 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 8e73021d3e7..14c503d0420 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -224,14 +225,19 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write } catch (const Exception & e) { - if (e.code() != ErrorCodes::ABORTED && e.code() != ErrorCodes::CANNOT_WRITE_TO_OSTREAM) + if (e.code() != ErrorCodes::ABORTED + && e.code() != ErrorCodes::CANNOT_WRITE_TO_OSTREAM + && !isRetryableException(std::current_exception())) + { report_broken_part(); + } throw; } catch (...) { - report_broken_part(); + if (!isRetryableException(std::current_exception())) + report_broken_part(); throw; } } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 07476e8b2e9..6eec03c4f18 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -15,6 +15,7 @@ #include #include #include +#include namespace DB { @@ -281,7 +282,7 @@ try catch (...) { /// Suspicion of the broken part. A part is added to the queue for verification. - if (getCurrentExceptionCode() != ErrorCodes::MEMORY_LIMIT_EXCEEDED) + if (!isRetryableException(std::current_exception())) storage.reportBrokenPart(data_part); throw; } diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 774fd95ebc6..922e6e8fb5d 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -36,6 +36,7 @@ namespace ErrorCodes extern const int CANNOT_ALLOCATE_MEMORY; extern const int CANNOT_MUNMAP; extern const int CANNOT_MREMAP; + extern const int CANNOT_SCHEDULE_TASK; extern const int UNEXPECTED_FILE_IN_DATA_PART; extern const int NO_FILE_IN_DATA_PART; extern const int NETWORK_ERROR; @@ -85,7 +86,8 @@ bool isRetryableException(std::exception_ptr exception_ptr) { return isNotEnoughMemoryErrorCode(e.code()) || e.code() == ErrorCodes::NETWORK_ERROR - || e.code() == ErrorCodes::SOCKET_TIMEOUT; + || e.code() == ErrorCodes::SOCKET_TIMEOUT + || e.code() == ErrorCodes::CANNOT_SCHEDULE_TASK; } catch (const Poco::Net::NetException &) { @@ -329,16 +331,21 @@ static IMergeTreeDataPart::Checksums checkDataPart( projections_on_disk.erase(projection_file); } - if (throw_on_broken_projection && !broken_projections_message.empty()) + if (throw_on_broken_projection) { - throw Exception(ErrorCodes::BROKEN_PROJECTION, "{}", broken_projections_message); - } + if (!broken_projections_message.empty()) + { + throw Exception(ErrorCodes::BROKEN_PROJECTION, "{}", broken_projections_message); + } - if (require_checksums && !projections_on_disk.empty()) - { - throw Exception(ErrorCodes::UNEXPECTED_FILE_IN_DATA_PART, - "Found unexpected projection directories: {}", - fmt::join(projections_on_disk, ",")); + /// This one is actually not broken, just redundant files on disk which + /// MergeTree will never use. + if (require_checksums && !projections_on_disk.empty()) + { + throw Exception(ErrorCodes::UNEXPECTED_FILE_IN_DATA_PART, + "Found unexpected projection directories: {}", + fmt::join(projections_on_disk, ",")); + } } if (is_cancelled()) From 79b5fdc90a21094fd9978ab1174acd733df3cf0c Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 17:09:31 +0000 Subject: [PATCH 0682/2361] More tests --- .../01825_new_type_json_7.reference | 5 +++ .../0_stateless/01825_new_type_json_7.sh | 30 +++++++++++++++ .../01825_new_type_json_8.reference | 12 ++++++ .../0_stateless/01825_new_type_json_8.sh | 38 +++++++++++++++++++ 4 files changed, 85 insertions(+) create mode 100644 tests/queries/0_stateless/01825_new_type_json_7.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_7.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_8.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_8.sh diff --git a/tests/queries/0_stateless/01825_new_type_json_7.reference b/tests/queries/0_stateless/01825_new_type_json_7.reference new file mode 100644 index 00000000000..8e26e9b2c44 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_7.reference @@ -0,0 +1,5 @@ +('categories','Array(Nullable(String))') +('key','String') +v1 \N +v2 ['foo','bar'] +v3 \N diff --git a/tests/queries/0_stateless/01825_new_type_json_7.sh b/tests/queries/0_stateless/01825_new_type_json_7.sh new file mode 100755 index 00000000000..36483175df6 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_7.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_7;" + +$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_7 (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +cat < Date: Mon, 22 Jul 2024 18:55:23 +0200 Subject: [PATCH 0683/2361] Let window view take comments --- src/Parsers/ParserCreateQuery.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 014dc7bd3bf..3e908ea3602 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -1176,6 +1176,7 @@ bool ParserCreateWindowViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & if (!select_p.parse(pos, select, expected)) return false; + auto comment = parseComment(pos, expected); auto query = std::make_shared(); node = query; @@ -1194,6 +1195,8 @@ bool ParserCreateWindowViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & query->children.push_back(query->database); if (query->table) query->children.push_back(query->table); + if (comment) + query->set(query->comment, comment); if (to_table) query->to_table_id = to_table->as()->getTableId(); From 240f04561e1ebca2fb1823a6ee34d4f2c15d1e66 Mon Sep 17 00:00:00 2001 From: joelynch Date: Mon, 22 Jul 2024 19:07:46 +0200 Subject: [PATCH 0684/2361] Fix docs for COMMENT clause --- docs/en/sql-reference/statements/create/table.md | 7 +------ docs/en/sql-reference/statements/create/view.md | 8 +++++++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index b866d0b9f5f..9c8984d698f 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -21,7 +21,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [COMMENT 'comment for column'] [compression_codec] [TTL expr2], ... ) ENGINE = engine - COMMENT 'comment for table' + [COMMENT 'comment for table'] ``` Creates a table named `table_name` in the `db` database or the current database if `db` is not set, with the structure specified in brackets and the `engine` engine. @@ -626,11 +626,6 @@ SELECT * FROM base.t1; You can add a comment to the table when you creating it. -:::note -The comment clause is supported by all table engines except [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) and [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md). -::: - - **Syntax** ``` sql diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 1fabb6d8cc7..2931f7020fb 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -16,6 +16,7 @@ Syntax: CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] [DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] AS SELECT ... +[COMMENT 'comment'] ``` Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. @@ -57,6 +58,7 @@ SELECT * FROM view(column1=value1, column2=value2 ...) CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] [DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] AS SELECT ... +[COMMENT 'comment'] ``` :::tip @@ -161,6 +163,7 @@ RANDOMIZE FOR interval DEPENDS ON [db.]name [, [db.]name [, ...]] [TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY] AS SELECT ... +[COMMENT 'comment'] ``` where `interval` is a sequence of simple intervals: ```sql @@ -267,7 +270,10 @@ This is an experimental feature that may change in backwards-incompatible ways i ::: ``` sql -CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [INNER ENGINE engine] [ENGINE engine] [WATERMARK strategy] [ALLOWED_LATENESS interval_function] [POPULATE] AS SELECT ... GROUP BY time_window_function +CREATE WINDOW VIEW [IF NOT EXISTS] [db.]table_name [TO [db.]table_name] [INNER ENGINE engine] [ENGINE engine] [WATERMARK strategy] [ALLOWED_LATENESS interval_function] [POPULATE] +AS SELECT ... +GROUP BY time_window_function +[COMMENT 'comment'] ``` Window view can aggregate data by time window and output the results when the window is ready to fire. It stores the partial aggregation results in an inner(or specified) table to reduce latency and can push the processing result to a specified table or push notifications using the WATCH query. From 4a2708658d6d9b3ab0de9eab41d4e97bf04c3523 Mon Sep 17 00:00:00 2001 From: joelynch Date: Mon, 22 Jul 2024 19:08:04 +0200 Subject: [PATCH 0685/2361] Add more tables to table comment test --- .../0_stateless/01821_table_comment.reference | 8 ++- .../0_stateless/01821_table_comment.sql | 53 +++++++++++++++++-- 2 files changed, 54 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/01821_table_comment.reference b/tests/queries/0_stateless/01821_table_comment.reference index 05acabae3d4..cdd87df43d2 100644 --- a/tests/queries/0_stateless/01821_table_comment.reference +++ b/tests/queries/0_stateless/01821_table_comment.reference @@ -1,4 +1,8 @@ -t1 this is a temtorary table +t1 this is a temporary table t2 this is a MergeTree table t3 this is a Log table -CREATE TABLE default.t1\n(\n `n` Int8\n)\nENGINE = Memory\nCOMMENT \'this is a temtorary table\' +t4 this is a Kafka table +t5 this is a EmbeddedRocksDB table +t6 this is a Executable table +t7 this is a WindowView table +CREATE TABLE default.t1\n(\n `n` Int8\n)\nENGINE = Memory\nCOMMENT \'this is a temporary table\' diff --git a/tests/queries/0_stateless/01821_table_comment.sql b/tests/queries/0_stateless/01821_table_comment.sql index 4bd71d3e278..32b89af0750 100644 --- a/tests/queries/0_stateless/01821_table_comment.sql +++ b/tests/queries/0_stateless/01821_table_comment.sql @@ -9,7 +9,7 @@ CREATE TABLE t1 `n` Int8 ) ENGINE = Memory -COMMENT 'this is a temtorary table'; +COMMENT 'this is a temporary table'; CREATE TABLE t2 ( @@ -26,14 +26,57 @@ CREATE TABLE t3 ENGINE = Log COMMENT 'this is a Log table'; +CREATE TABLE t4 +( + `n` Int8 +) +ENGINE = Kafka +SETTINGS + kafka_broker_list = 'localhost:10000', + kafka_topic_list = 'test', + kafka_group_name = 'test', + kafka_format = 'JSONEachRow' +COMMENT 'this is a Kafka table'; + +CREATE TABLE t5 +( + `n` Int8 +) +ENGINE = EmbeddedRocksDB +PRIMARY KEY n +COMMENT 'this is a EmbeddedRocksDB table'; + +CREATE TABLE t6 +( + `n` Int8 +) +ENGINE = Executable('script.py', TabSeparated) +COMMENT 'this is a Executable table'; + +SET allow_experimental_window_view = 1; +-- New analyzer doesn't support WindowView tables +SET allow_experimental_analyzer = 0; + +CREATE WINDOW VIEW t7 +( + `n` Int8 +) +ENGINE MergeTree +ORDER BY n +AS SELECT 1 +GROUP BY tumble(now(), toIntervalDay('1')) +COMMENT 'this is a WindowView table'; + +SET allow_experimental_analyzer = 1; + SELECT name, comment FROM system.tables -WHERE name IN ('t1', 't2', 't3') AND database = currentDatabase() order by name; +WHERE name IN ('t1', 't2', 't3', 't4', 't5', 't6', 't7') + AND database = currentDatabase() order by name; SHOW CREATE TABLE t1; -DROP TABLE t1; -DROP TABLE t2; -DROP TABLE t3; +DROP TABLE t1, t2, t3, t4, t5, t6; +DROP VIEW t7; From 454eb1af7ed69d00617e6bfa3daece508fcb565b Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 17:15:05 +0000 Subject: [PATCH 0686/2361] Fix invalid format detection in schema inference tmp --- src/Storages/ObjectStorage/ReadBufferIterator.cpp | 4 +++- src/Storages/StorageFile.cpp | 8 ++++++-- src/Storages/StorageURL.cpp | 4 +++- .../03023_invalid_format_detection.reference | 1 + .../0_stateless/03023_invalid_format_detection.sh | 10 ++++++++++ 5 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/03023_invalid_format_detection.reference create mode 100755 tests/queries/0_stateless/03023_invalid_format_detection.sh diff --git a/src/Storages/ObjectStorage/ReadBufferIterator.cpp b/src/Storages/ObjectStorage/ReadBufferIterator.cpp index 78cdc442f64..3d428073e88 100644 --- a/src/Storages/ObjectStorage/ReadBufferIterator.cpp +++ b/src/Storages/ObjectStorage/ReadBufferIterator.cpp @@ -162,7 +162,9 @@ ReadBufferIterator::Data ReadBufferIterator::next() { for (const auto & object_info : read_keys) { - if (auto format_from_file_name = FormatFactory::instance().tryGetFormatFromFileName(object_info->getFileName())) + auto format_from_file_name = FormatFactory::instance().tryGetFormatFromFileName(object_info->getFileName()); + /// Use this format only if we have a schema reader for it. + if (format_from_file_name && FormatFactory::instance().checkIfFormatHasAnySchemaReader(*format_from_file_name)) { format = format_from_file_name; break; diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 7f39ff615f0..b4c03fa86c6 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -416,7 +416,9 @@ namespace { for (const auto & path : paths) { - if (auto format_from_path = FormatFactory::instance().tryGetFormatFromFileName(path)) + auto format_from_path = FormatFactory::instance().tryGetFormatFromFileName(path); + /// Use this format only if we have a schema reader for it. + if (format_from_path && FormatFactory::instance().checkIfFormatHasAnySchemaReader(*format_from_path)) { format = format_from_path; break; @@ -705,7 +707,9 @@ namespace /// If format is unknown we can try to determine it by the file name. if (!format) { - if (auto format_from_file = FormatFactory::instance().tryGetFormatFromFileName(*filename)) + auto format_from_file = FormatFactory::instance().tryGetFormatFromFileName(*filename); + /// Use this format only if we have a schema reader for it. + if (format_from_file && FormatFactory::instance().checkIfFormatHasAnySchemaReader(*format_from_file)) format = format_from_file; } diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 895da028fc2..4d82c2bfadb 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -735,7 +735,9 @@ namespace { for (const auto & url : options) { - if (auto format_from_file_name = FormatFactory::instance().tryGetFormatFromFileName(url)) + auto format_from_file_name = FormatFactory::instance().tryGetFormatFromFileName(url); + /// Use this format only if we have a schema reader for it. + if (format_from_file_name && FormatFactory::instance().checkIfFormatHasAnySchemaReader(*format_from_file_name)) { format = format_from_file_name; break; diff --git a/tests/queries/0_stateless/03023_invalid_format_detection.reference b/tests/queries/0_stateless/03023_invalid_format_detection.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/03023_invalid_format_detection.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03023_invalid_format_detection.sh b/tests/queries/0_stateless/03023_invalid_format_detection.sh new file mode 100755 index 00000000000..984a42aa218 --- /dev/null +++ b/tests/queries/0_stateless/03023_invalid_format_detection.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +touch $CLICKHOUSE_TEST_UNIQUE_NAME.xml +$CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.*')" 2>&1 | grep -c "CANNOT_DETECT_FORMAT" +rm $CLICKHOUSE_TEST_UNIQUE_NAME.xml + From ef4cb8b28324f1599c082b70e9c9c516c6c519d4 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 17:18:20 +0000 Subject: [PATCH 0687/2361] Add missing check --- src/Storages/ObjectStorage/ReadBufferIterator.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/ReadBufferIterator.cpp b/src/Storages/ObjectStorage/ReadBufferIterator.cpp index 3d428073e88..df78f128c80 100644 --- a/src/Storages/ObjectStorage/ReadBufferIterator.cpp +++ b/src/Storages/ObjectStorage/ReadBufferIterator.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include @@ -222,7 +223,9 @@ ReadBufferIterator::Data ReadBufferIterator::next() { for (auto it = read_keys.begin() + prev_read_keys_size; it != read_keys.end(); ++it) { - if (auto format_from_file_name = FormatFactory::instance().tryGetFormatFromFileName((*it)->getFileName())) + auto format_from_file_name = FormatFactory::instance().tryGetFormatFromFileName((*it)->getFileName()); + /// Use this format only if we have a schema reader for it. + if (format_from_file_name && FormatFactory::instance().checkIfFormatHasAnySchemaReader(*format_from_file_name)) { format = format_from_file_name; break; From ea78ae85ed38e757f8dddd99d2018f01f3996f25 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 22 Jul 2024 19:23:45 +0200 Subject: [PATCH 0688/2361] Fix style --- src/Storages/MergeTree/MergeTreeSequentialSource.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 6eec03c4f18..311720728e7 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -20,12 +20,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int MEMORY_LIMIT_EXCEEDED; -} - - /// Lightweight (in terms of logic) stream for reading single part from /// MergeTree, used for merges and mutations. /// From 406addb2dcd0e82a71db06eb8ebaaa34bf1a543d Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 22 Jul 2024 19:39:37 +0200 Subject: [PATCH 0689/2361] Add aborted to retryable errors --- src/Storages/MergeTree/DataPartsExchange.cpp | 3 +-- src/Storages/MergeTree/checkDataPart.cpp | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 14c503d0420..061ee356203 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -225,8 +225,7 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write } catch (const Exception & e) { - if (e.code() != ErrorCodes::ABORTED - && e.code() != ErrorCodes::CANNOT_WRITE_TO_OSTREAM + if (e.code() != ErrorCodes::CANNOT_WRITE_TO_OSTREAM && !isRetryableException(std::current_exception())) { report_broken_part(); diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 922e6e8fb5d..fb86d9e7603 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -42,6 +42,7 @@ namespace ErrorCodes extern const int NETWORK_ERROR; extern const int SOCKET_TIMEOUT; extern const int BROKEN_PROJECTION; + extern const int ABORTED; } @@ -87,7 +88,8 @@ bool isRetryableException(std::exception_ptr exception_ptr) return isNotEnoughMemoryErrorCode(e.code()) || e.code() == ErrorCodes::NETWORK_ERROR || e.code() == ErrorCodes::SOCKET_TIMEOUT - || e.code() == ErrorCodes::CANNOT_SCHEDULE_TASK; + || e.code() == ErrorCodes::CANNOT_SCHEDULE_TASK + || e.code() == ErrorCodes::ABORTED; } catch (const Poco::Net::NetException &) { From b1029fbd671310a4c8d48070f87d84f33f8842fd Mon Sep 17 00:00:00 2001 From: xogoodnow Date: Mon, 22 Jul 2024 22:02:54 +0330 Subject: [PATCH 0690/2361] Fixed the style issue --- src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp index e217d93975d..f51a7a913b8 100644 --- a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp +++ b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp @@ -37,6 +37,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; extern const int BAD_ARGUMENTS; extern const int BAD_QUERY_PARAMETER; + extern const int QUERY_NOT_ALLOWED; } namespace From 71cdf82643fb17b5b68003df314c54ebbca0842f Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 22 Jul 2024 18:41:30 +0000 Subject: [PATCH 0691/2361] Fix: reset is_async_state flag --- src/Processors/Sources/RemoteSource.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Processors/Sources/RemoteSource.cpp b/src/Processors/Sources/RemoteSource.cpp index 46c27676e12..2f9a30296be 100644 --- a/src/Processors/Sources/RemoteSource.cpp +++ b/src/Processors/Sources/RemoteSource.cpp @@ -115,7 +115,10 @@ void RemoteSource::onAsyncJobReady() if (!was_query_sent) return; + chassert(!preprocessed_packet); preprocessed_packet = query_executor->processParallelReplicaPacketIfAny(); + if (preprocessed_packet) + is_async_state = false; } std::optional RemoteSource::tryGenerate() From 6e159791c2a4efb38784338ddcd445b008e1a3f9 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 19:26:44 +0000 Subject: [PATCH 0692/2361] Fix failing tests, add even more tests --- docs/en/sql-reference/data-types/json.md | 553 +---- docs/en/sql-reference/data-types/newjson.md | 516 ++++ .../sql-reference/data-types/object-json.md | 83 - src/Columns/ColumnObject.cpp | 14 + src/Columns/ColumnObject.h | 6 +- src/Interpreters/convertFieldToType.cpp | 7 + .../01825_new_type_json_12.reference | 7 +- .../01825_new_type_json_3.reference.j2 | 4 +- .../01825_new_type_json_6.reference | 2 +- .../01825_new_type_json_9.reference | 3 + .../0_stateless/01825_new_type_json_9.sql | 16 + .../01825_new_type_json_bools.reference | 1 + .../0_stateless/01825_new_type_json_bools.sql | 10 + .../01825_new_type_json_btc.reference | 2 +- .../01825_new_type_json_distributed.reference | 4 + .../01825_new_type_json_distributed.sql | 18 + .../01825_new_type_json_in_array.reference | 28 + .../01825_new_type_json_in_array.sql | 34 + ...825_new_type_json_in_other_types.reference | 17 + .../01825_new_type_json_in_other_types.sh | 90 + ...1825_new_type_json_insert_select.reference | 27 + .../01825_new_type_json_insert_select.sql | 67 + ...1825_new_type_json_missed_values.reference | 4 + .../01825_new_type_json_missed_values.sql | 19 + ...825_new_type_json_multiple_files.reference | 22 + .../01825_new_type_json_multiple_files.sh | 43 + .../01825_new_type_json_mutations.reference | 7 + .../01825_new_type_json_mutations.sql | 21 + .../01825_new_type_json_order_by.reference | 6 + .../01825_new_type_json_order_by.sql | 6 + .../01825_new_type_json_partitions.reference | 2 + .../01825_new_type_json_partitions.sql | 14 + .../0_stateless/01825_type_json_10.reference | 9 + .../queries/0_stateless/01825_type_json_5.sql | 4 +- ...02421_new_type_json_async_insert.reference | 5 + .../02421_new_type_json_async_insert.sh | 21 + .../02421_new_type_json_empty_parts.reference | 16 + .../02421_new_type_json_empty_parts.sh | 48 + ...son_nested_arrays_with_same_keys.reference | 1 + ...2_new_json_nested_arrays_with_same_keys.sh | 27 + ...3_new_type_json_attach_partition.reference | 2 + .../02553_new_type_json_attach_partition.sql | 14 + .../03207_json_read_subcolumns_1.reference | 2080 ----------------- ..._subcolumns_1_compact_merge_tree.reference | 831 +++++++ ...n_read_subcolumns_1_compact_merge_tree.sh} | 18 - ...07_json_read_subcolumns_1_memory.reference | 413 ++++ .../03207_json_read_subcolumns_1_memory.sql | 87 + ...ead_subcolumns_1_wide_merge_tree.reference | 831 +++++++ ..._json_read_subcolumns_1_wide_merge_tree.sh | 107 + ...on_read_subcolumns_2_compact_merge_tree.sh | 2 +- .../03207_json_read_subcolumns_2_memory.sh | 2 +- ..._json_read_subcolumns_2_wide_merge_tree.sh | 2 +- ...on_read_subcolumns_2_compact_merge_tree.sh | 2 +- ..._array_of_json_read_subcolumns_2_memory.sh | 2 +- ..._json_read_subcolumns_2_wide_merge_tree.sh | 2 +- 55 files changed, 3484 insertions(+), 2695 deletions(-) create mode 100644 docs/en/sql-reference/data-types/newjson.md delete mode 100644 docs/en/sql-reference/data-types/object-json.md create mode 100644 tests/queries/0_stateless/01825_new_type_json_9.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_9.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_bools.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_bools.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_distributed.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_distributed.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_in_array.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_in_array.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_in_other_types.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_in_other_types.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_insert_select.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_insert_select.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_missed_values.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_missed_values.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_multiple_files.reference create mode 100755 tests/queries/0_stateless/01825_new_type_json_multiple_files.sh create mode 100644 tests/queries/0_stateless/01825_new_type_json_mutations.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_mutations.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_order_by.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_order_by.sql create mode 100644 tests/queries/0_stateless/01825_new_type_json_partitions.reference create mode 100644 tests/queries/0_stateless/01825_new_type_json_partitions.sql create mode 100644 tests/queries/0_stateless/02421_new_type_json_async_insert.reference create mode 100755 tests/queries/0_stateless/02421_new_type_json_async_insert.sh create mode 100644 tests/queries/0_stateless/02421_new_type_json_empty_parts.reference create mode 100755 tests/queries/0_stateless/02421_new_type_json_empty_parts.sh create mode 100644 tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.reference create mode 100755 tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.sh create mode 100644 tests/queries/0_stateless/02553_new_type_json_attach_partition.reference create mode 100644 tests/queries/0_stateless/02553_new_type_json_attach_partition.sql delete mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_1.reference create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference rename tests/queries/0_stateless/{03207_json_read_subcolumns_1.sh => 03207_json_read_subcolumns_1_compact_merge_tree.sh} (95%) create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference create mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference create mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sh diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index 494bfba3173..c6fa6f0e882 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -1,516 +1,83 @@ --- -slug: /en/sql-reference/data-types/json -sidebar_position: 63 -sidebar_label: JSON -keywords: [json, data type] +slug: /en/sql-reference/data-types/object-data-type +sidebar_position: 26 +sidebar_label: Object Data Type +keywords: [object, data type] --- -# JSON +# Object Data Type (deprecated) + +**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864). + +
Stores JavaScript Object Notation (JSON) documents in a single column. -:::note -This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead. -If you want to use JSON type, set `allow_experimental_json_type = 1`. -::: +`JSON` can be used as an alias to `Object('json')` when setting [use_json_alias_for_old_object_type](../../operations/settings/settings.md#usejsonaliasforoldobjecttype) is enabled. -To declare a column of `JSON` type, use the following syntax: +## Example -``` sql - JSON(max_dynamic_paths=N, max_dynamic_types=M, some.path TypeName, SKIP path.to.skip, SKIP REGEXP 'paths_regexp') -``` -Where: -- `max_dynamic_paths` is an optional parameter indicating how many paths can be stored separately as subcolumns across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all other paths will be stored together in a single structure. Default value of `max_dynamic_paths` is `1024`. -- `max_dynamic_types` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a single path column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_dynamic_types` is `32`. -- `some.path TypeName` is an optional type hint for particular path in the JSON. Such paths will be always stored as subcolumns with specified type. -- `SKIP path.to.skip` is an optional hint for particular path that should be skipped during JSON parsing. Such paths will never be stored in the JSON column. If specified path is a nested JSON object, the whole nested object will be skipped. -- `SKIP REGEXP 'path_regexp'` is an optional hint with a regular expression that is used to skip paths during JSON parsing. All paths that match this regular expression will never be stored in the JSON column. +**Example 1** -## Creating JSON - -Using `JSON` type in table column definition: +Creating a table with a `JSON` column and inserting data into it: ```sql -CREATE TABLE test (json JSON) ENGINE = Memory; -INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}'); -SELECT json FROM test; +CREATE TABLE json +( + o JSON +) +ENGINE = Memory +``` + +```sql +INSERT INTO json VALUES ('{"a": 1, "b": { "c": 2, "d": [1, 2, 3] }}') +``` + +```sql +SELECT o.a, o.b.c, o.b.d[3] FROM json ``` ```text -┌─json────────────────────────────────────────┠-│ {"a":{"b":"42"},"c":["1","2","3"]} │ -│ {"f":"Hello, World!"} │ -│ {"a":{"b":"43","e":"10"},"c":["4","5","6"]} │ -└─────────────────────────────────────────────┘ +┌─o.a─┬─o.b.c─┬─arrayElement(o.b.d, 3)─┠+│ 1 │ 2 │ 3 │ +└─────┴───────┴────────────────────────┘ +``` + +**Example 2** + +To be able to create an ordered `MergeTree` family table, the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format: + +```sql +CREATE TABLE logs +( + timestamp DateTime, + message JSON +) +ENGINE = MergeTree +ORDER BY timestamp ``` ```sql -CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory; -INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}'); -SELECT json FROM test; +INSERT INTO logs +SELECT parseDateTimeBestEffort(JSONExtractString(json, 'timestamp')), json +FROM file('access.json.gz', JSONAsString) +``` + +## Displaying JSON columns + +When displaying a `JSON` column, ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can also display the field names by setting `output_format_json_named_tuples_as_objects = 1`: + +```sql +SET output_format_json_named_tuples_as_objects = 1 + +SELECT * FROM json FORMAT JSONEachRow ``` ```text -┌─json──────────────────────────────┠-│ {"a":{"b":42},"c":[1,2,3]} │ -│ {"a":{"b":0},"f":"Hello, World!"} │ -│ {"a":{"b":43},"c":[4,5,6]} │ -└───────────────────────────────────┘ +{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}} ``` -Using CAST from 'String': +## Related Content -```sql -SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json; -``` - -```text -┌─json───────────────────────────────────────────┠-│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ -└────────────────────────────────────────────────┘ -``` - -CAST from named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later. - -## Reading JSON paths as subcolumns - -JSON type supports reading every path as a separate subcolumn. If type of the requested path was not specified in the JSON type declaration, the subcolumn of the path will always have type [Dynamic](/docs/en/sql-reference/data-types/dynamic.md). - -For example: - -```sql -CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory; -INSERT INTO test VALUES ('{"a" : {"b" : 42, "g" : 42.42}, "c" : [1, 2, 3], "d" : "2020-01-01"}'), ('{"f" : "Hello, World!", "d" : "2020-01-02"}'), ('{"a" : {"b" : 43, "e" : 10, "g" : 43.43}, "c" : [4, 5, 6]}'); -SELECT json FROM test; -``` - -```text -┌─json──────────────────────────────────────────────────┠-│ {"a":{"b":42,"g":42.42},"c":[1,2,3],"d":"2020-01-01"} │ -│ {"a":{"b":0},"d":"2020-01-02","f":"Hello, World!"} │ -│ {"a":{"b":43,"g":43.43},"c":[4,5,6]} │ -└───────────────────────────────────────────────────────┘ -``` - -```sql -SELECT json.a.b, json.a.g, json.c, json.d FROM test; -``` - -```text -┌─json.a.b─┬─json.a.g─┬─json.c──┬─json.d─────┠-│ 42 │ 42.42 │ [1,2,3] │ 2020-01-01 │ -│ 0 │ á´ºáµá´¸á´¸ │ á´ºáµá´¸á´¸ │ 2020-01-02 │ -│ 43 │ 43.43 │ [4,5,6] │ á´ºáµá´¸á´¸ │ -└──────────┴──────────┴─────────┴────────────┘ -``` - -If the requested path wasn't found in the data, it will be filled with `NULL` values: - -```sql -SELECT json.non.existing.path FROM test; -``` - -```text -┌─json.non.existing.path─┠-│ á´ºáµá´¸á´¸ │ -│ á´ºáµá´¸á´¸ │ -│ á´ºáµá´¸á´¸ │ -└────────────────────────┘ -``` - -Let's check the data types of returned subcolumns: -```sql -SELECT toTypeName(json.a.b), toTypeName(json.a.g), toTypeName(json.c), toTypeName(json.d) FROM test; -``` - -```text -┌─toTypeName(json.a.b)─┬─toTypeName(json.a.g)─┬─toTypeName(json.c)─┬─toTypeName(json.d)─┠-│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ -│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ -│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ -└──────────────────────┴──────────────────────┴────────────────────┴────────────────────┘ -``` - -As we can see, for `a.b` the type is `UInt32` as we specified in the JSON type declaration, and for all other subcolumns the type is `Dynamic`. - -It is also possible to read subcolumns of a `Dynamic` type using special syntax `json.some.path.:TypeName`: - -```sql -select json.a.g.:Float64, dynamicType(json.a.g), json.d.:Date, dynamicType(json.d) FROM test; -``` - -```text -┌─json.a.g.:`Float64`─┬─dynamicType(json.a.g)─┬─json.d.:`Date`─┬─dynamicType(json.d)─┠-│ 42.42 │ Float64 │ 2020-01-01 │ Date │ -│ á´ºáµá´¸á´¸ │ None │ 2020-01-02 │ Date │ -│ 43.43 │ Float64 │ á´ºáµá´¸á´¸ │ None │ -└─────────────────────┴───────────────────────┴────────────────┴─────────────────────┘ -``` - -`Dynamic` subcolumns can be casted to any data type. In this case the exception will be thrown if internal type inside `Dynamic` cannot be casted to the requested type: - -```sql -select json.a.g::UInt64 as uint FROM test; -``` - -```text -┌─uint─┠-│ 42 │ -│ 0 │ -│ 43 │ -└──────┘ -``` - -```sql -select json.a.g::UUID as float FROM test; -``` - -```text -Received exception: -Code: 48. DB::Exception: Conversion between numeric types and UUID is not supported. Probably the passed UUID is unquoted: while executing 'FUNCTION CAST(__table1.json.a.g :: 2, 'UUID'_String :: 1) -> CAST(__table1.json.a.g, 'UUID'_String) UUID : 0'. (NOT_IMPLEMENTED) -``` - -## Reading JSON sub-objects as subcolumns - -JSON type supports reading nested objects as subcolumns with type `JSON` using special syntax `json.^some.path`: - -```sql -CREATE TABLE test (json JSON) ENGINE = Memory; -INSERT INTO test VALUES ('{"a" : {"b" : {"c" : 42, "g" : 42.42}}, "c" : [1, 2, 3], "d" : {"e" : {"f" : {"g" : "Hello, World", "h" : [1, 2, 3]}}}}'), ('{"f" : "Hello, World!", "d" : {"e" : {"f" : {"h" : [4, 5, 6]}}}}'), ('{"a" : {"b" : {"c" : 43, "e" : 10, "g" : 43.43}}, "c" : [4, 5, 6]}'); -SELECT json FROM test; -``` - -```text -┌─json────────────────────────────────────────────────────────────────────────────────────────┠-│ {"a":{"b":{"c":42,"g":42.42}},"c":[1,2,3],"d":{"e":{"f":{"g":"Hello, World","h":[1,2,3]}}}} │ -│ {"d":{"e":{"f":{"h":[4,5,6]}}},"f":"Hello, World!"} │ -│ {"a":{"b":{"c":43,"e":10,"g":43.43}},"c":[4,5,6]} │ -└─────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -```sql -SELECT json.^a.b, json.^d.e.f FROM test; -``` - -```text -┌─json.^`a`.b───────────────┬─json.^`d`.e.f────────────────────┠-│ {"c":42,"g":42.42} │ {"g":"Hello, World","h":[1,2,3]} │ -│ {} │ {"h":[4,5,6]} │ -│ {"c":43,"e":10,"g":43.43} │ {} │ -└───────────────────────────┴──────────────────────────────────┘ -``` - -:::note -Reading sub-objects as subcolumns may be inefficient, as this may require almost full scan of the JSON data. -::: - -## Types inference for paths - -During JSON parsing ClickHouse tries to detect the most appropriate data type for each JSON path. It works similar to [automatic schema inference from input data](/docs/en/interfaces/schema-inference.md) and controlled by the same settings: - -- [input_format_try_infer_integers](/docs/en/interfaces/schema-inference.md#inputformattryinferintegers) -- [input_format_try_infer_dates](/docs/en/interfaces/schema-inference.md#inputformattryinferdates) -- [input_format_try_infer_datetimes](/docs/en/interfaces/schema-inference.md#inputformattryinferdatetimes) -- [schema_inference_make_columns_nullable](/docs/en/interfaces/schema-inference.md#schemainferencemakecolumnsnullable) -- [input_format_json_try_infer_numbers_from_strings](/docs/en/interfaces/schema-inference.md#inputformatjsontryinfernumbersfromstrings) -- [input_format_json_infer_incomplete_types_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsoninferincompletetypesasstrings) -- [input_format_json_read_numbers_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadnumbersasstrings) -- [input_format_json_read_bools_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasstrings) -- [input_format_json_read_bools_as_numbers](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasnumbers) -- [input_format_json_read_arrays_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadarraysasstrings) - -Let's see some examples: - -```sql -SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=1, input_format_try_infer_datetimes=1; -``` - -```text -┌─paths_with_types─────────────────┠-│ {'a':'Date','b':'DateTime64(9)'} │ -└──────────────────────────────────┘ -``` - -```sql -SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=0, input_format_try_infer_datetimes=0; -``` - -```text -┌─paths_with_types────────────┠-│ {'a':'String','b':'String'} │ -└─────────────────────────────┘ -``` - -```sql -SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=1; -``` - -```text -┌─paths_with_types───────────────┠-│ {'a':'Array(Nullable(Int64))'} │ -└────────────────────────────────┘ -``` - -```sql -SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=0; -``` - -```text -┌─paths_with_types─────┠-│ {'a':'Array(Int64)'} │ -└──────────────────────┘ -``` - -## Handling arrays of JSON objects - -JSON paths that contains an array of objects are parsed as type `Array(JSON)` and inserted into `Dynamic` column for this path. To read an array of objects you can extract it from `Dynamic` column as a subcolumn: - -```sql -CREATE TABLE test (json JSON) ENGINE = Memory; -INSERT INTO test VALUES -('{"a" : {"b" : [{"c" : 42, "d" : "Hello", "f" : [[{"g" : 42.42}]], "k" : {"j" : 1000}}, {"c" : 43}, {"e" : [1, 2, 3], "d" : "My", "f" : [[{"g" : 43.43, "h" : "2020-01-01"}]], "k" : {"j" : 2000}}]}}'), -('{"a" : {"b" : [1, 2, 3]}}'), -('{"a" : {"b" : [{"c" : 44, "f" : [[{"h" : "2020-01-02"}]]}, {"e" : [4, 5, 6], "d" : "World", "f" : [[{"g" : 44.44}]], "k" : {"j" : 3000}}]}}'); -SELECT json FROM test; -``` - -```text3 -┌─json────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┠-│ {"a":{"b":[{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}},{"c":"43"},{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}]}} │ -│ {"a":{"b":["1","2","3"]}} │ -│ {"a":{"b":[{"c":"44","f":[[{"h":"2020-01-02"}]]},{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}]}} │ -└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ -``` - -```sql -SELECT json.a.b, dynamicType(json.a.b) FROM test; -``` - -```text -┌─json.a.b──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─dynamicType(json.a.b)────────────────────────────────────┠-│ ['{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}}','{"c":"43"}','{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │ -│ [1,2,3] │ Array(Nullable(Int64)) │ -│ ['{"c":"44","f":[[{"h":"2020-01-02"}]]}','{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │ -└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────────────────────────────────┘ -``` - -As you can notice, the `max_dynamic_types/max_dynamic_paths` parameters of the nested `JSON` type were reduced compared to the default values. It's needed to avoid number of subcolumns to grow uncontrolled on nested arrays of JSON objects. - -Let's try to read subcolumns from this nested `JSON` column: - -```sql -SELECT json.a.b.:`Array(JSON)`.c, json.a.b.:`Array(JSON)`.f, json.a.b.:`Array(JSON)`.d FROM test; -``` - -```text -┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┠-│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │ -│ [] │ [] │ [] │ -│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │ -└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘ -``` - -We can avoid writing `Array(JSON)` subcolumn name using special syntax: - -```sql -SELECT json.a.b[].c, json.a.b[].f, json.a.b[].d FROM test; -``` - -```text -┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┠-│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │ -│ [] │ [] │ [] │ -│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │ -└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘ -``` - -The number of `[]` after path indicates the array level. `json.path[][]` will be transformed to `json.path.:Array(Array(JSON))` - -Let's check the paths and types inside our `Array(JSON)`: - -```sql -SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b[]))) FROM test; -``` - -```text -┌─arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b.:`Array(JSON)`)))──┠-│ ('c','Int64') │ -│ ('d','String') │ -│ ('f','Array(Array(JSON(max_dynamic_types=8, max_dynamic_paths=64)))') │ -│ ('k.j','Int64') │ -│ ('e','Array(Nullable(Int64))') │ -└───────────────────────────────────────────────────────────────────────┘ -``` - -Let's read subcolumns from `Array(JSON)` column: - -```sql -SELECT json.a.b[].c.:Int64, json.a.b[].f[][].g.:Float64, json.a.b[].f[][].h.:Date FROM test; -``` - -```text -┌─json.a.b.:`Array(JSON)`.c.:`Int64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.g.:`Float64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.h.:`Date`─┠-│ [42,43,NULL] │ [[[42.42]],[],[[43.43]]] │ [[[NULL]],[],[['2020-01-01']]] │ -│ [] │ [] │ [] │ -│ [44,NULL] │ [[[NULL]],[[44.44]]] │ [[['2020-01-02']],[[NULL]]] │ -└────────────────────────────────────┴──────────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ -``` - -We can also read sub-object subcolumns from nested `JSON` column: - -```sql -SELECT json.a.b[].^k FROM test -``` - -```text -┌─json.a.b.:`Array(JSON)`.^`k`─────────┠-│ ['{"j":"1000"}','{}','{"j":"2000"}'] │ -│ [] │ -│ ['{}','{"j":"3000"}'] │ -└──────────────────────────────────────┘ -``` - -## Reading JSON type from the data - -All text formats (JSONEachRow, TSV, CSV, CustomSeparated, Values, etc) supports reading `JSON` type. - -Examples: - -```sql -SELECT json FROM format(JSONEachRow, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP d.e, SKIP REGEXP \'b.*\')', ' -{"json" : {"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}}} -{"json" : {"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}}} -{"json" : {"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"}} -{"json" : {"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43}} -{"json" : {"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}} -') -``` - -```text -┌─json──────────────────────────────────────────────────────────┠-│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │ -│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │ -│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │ -│ {"a":{"b":{"c":4}},"c":"43"} │ -│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │ -└───────────────────────────────────────────────────────────────┘ -``` - -For text formats like CSV/TSV/etc `JSON` is parsed from a string containing JSON object - -```sql -SELECT json FROM format(TSV, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP REGEXP \'b.*\')', -'{"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}} -{"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}} -{"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"} -{"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43} -{"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}') -``` - -```text -┌─json──────────────────────────────────────────────────────────┠-│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │ -│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │ -│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │ -│ {"a":{"b":{"c":4}},"c":"43"} │ -│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │ -└───────────────────────────────────────────────────────────────┘ -``` - -## Reaching the limit of dynamic paths inside JSON - -`JSON` data type can store only limited number of paths as separate subcolumns inside. By default, this limit is 1024, but you can change it in type declaration using parameter `max_dynamic_paths`. -When the limit is reached, all new paths inserted to `JSON` column will be stored in a single shared data structure. It's still possible to read such paths as subcolumns, but it will require reading the whole -shared data structure to extract the values of this path. This limit is needed to avoid the enormous number of different subcolumns that can make the table unusable. - -Let's see what happens when the limit is reached in different scenarios. - -### Reaching the limit during data parsing - -During parsing of `JSON` object from the data, when the limit is reached for current block of data, all new paths will be stored in a shared data structure. We can check it using introspection functions `JSONDynamicPaths, JSONSharedDataPaths`: - -```sql -SELECT json, JSONDynamicPaths(json), JSONSharedDataPaths(json) FROM format(JSONEachRow, 'json JSON(max_dynamic_paths=3)', ' -{"json" : {"a" : {"b" : 42}, "c" : [1, 2, 3]}} -{"json" : {"a" : {"b" : 43}, "d" : "2020-01-01"}} -{"json" : {"a" : {"b" : 44}, "c" : [4, 5, 6]}} -{"json" : {"a" : {"b" : 43}, "d" : "2020-01-02", "e" : "Hello", "f" : {"g" : 42.42}}} -{"json" : {"a" : {"b" : 43}, "c" : [7, 8, 9], "f" : {"g" : 43.43}, "h" : "World"}} -') -``` - -```text -┌─json───────────────────────────────────────────────────────────┬─JSONDynamicPaths(json)─┬─JSONSharedDataPaths(json)─┠-│ {"a":{"b":"42"},"c":["1","2","3"]} │ ['a.b','c','d'] │ [] │ -│ {"a":{"b":"43"},"d":"2020-01-01"} │ ['a.b','c','d'] │ [] │ -│ {"a":{"b":"44"},"c":["4","5","6"]} │ ['a.b','c','d'] │ [] │ -│ {"a":{"b":"43"},"d":"2020-01-02","e":"Hello","f":{"g":42.42}} │ ['a.b','c','d'] │ ['e','f.g'] │ -│ {"a":{"b":"43"},"c":["7","8","9"],"f":{"g":43.43},"h":"World"} │ ['a.b','c','d'] │ ['f.g','h'] │ -└────────────────────────────────────────────────────────────────┴────────────────────────┴───────────────────────────┘ -``` - -As we can see, after inserting paths `e` and `f.g` the limit was reached and we inserted them into shared data structure. - -### During merges of data parts in MergeTree table engines - -During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns. -In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains -the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation. - -Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths: - -```sql -CREATE TABLE test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree ORDER BY id; -SYSTEM STOP MERGES test; -INSERT INTO test SELECT number, formatRow('JSONEachRow', number as a) FROM numbers(5); -INSERT INTO test SELECT number, formatRow('JSONEachRow', number as b) FROM numbers(4); -INSERT INTO test SELECT number, formatRow('JSONEachRow', number as c) FROM numbers(3); -INSERT INTO test SELECT number, formatRow('JSONEachRow', number as d) FROM numbers(2); -INSERT INTO test SELECT number, formatRow('JSONEachRow', number as e) FROM numbers(1); -``` - -Each insert will create a separate data pert with `JSON` column containing single path: -```sql -SELECT count(), JSONDynamicPaths(json) AS dynamic_paths, JSONSharedDataPaths(json) AS shared_data_paths, _part FROM test GROUP BY _part, dynamic_paths, shared_data_paths ORDER BY _part ASC -``` - -```text -┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┠-│ 5 │ ['a'] │ [] │ all_1_1_0 │ -│ 4 │ ['b'] │ [] │ all_2_2_0 │ -│ 3 │ ['c'] │ [] │ all_3_3_0 │ -│ 2 │ ['d'] │ [] │ all_4_4_0 │ -│ 1 │ ['e'] │ [] │ all_5_5_0 │ -└─────────┴───────────────┴───────────────────┴───────────┘ - -``` - -Now, let's merge all parts into one and see what will happen: - -```sql -SYSTEM START MERGES test; -OPTIMIZE TABLE test FINAL; -SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; -``` - -```text -┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┠-│ 1 │ ['a','b','c'] │ ['e'] │ all_1_5_2 │ -│ 2 │ ['a','b','c'] │ ['d'] │ all_1_5_2 │ -│ 12 │ ['a','b','c'] │ [] │ all_1_5_2 │ -└─────────┴───────────────┴───────────────────┴───────────┘ -``` - -As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and moved paths `e` and `d` to shared data structure. - -## Introspection functions - -There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes). - -## Tips for better usage of the JSON type - -Before creating `JSON` column and loading data into it, consider the following tips: - -- Investigate your data and specify as many path hints with types as you can. It will make the storage and the reading much more efficient. -- Think about what paths you will need and what paths you will never need. Specify paths that you won't need in the SKIP section and SKIP REGEXP if needed. It will improve the storage. -- Don't set `max_dynamic_paths` parameter to very high values, it can make the storage and reading less efficient. +- [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json) +- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) \ No newline at end of file diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md new file mode 100644 index 00000000000..494bfba3173 --- /dev/null +++ b/docs/en/sql-reference/data-types/newjson.md @@ -0,0 +1,516 @@ +--- +slug: /en/sql-reference/data-types/json +sidebar_position: 63 +sidebar_label: JSON +keywords: [json, data type] +--- + +# JSON + +Stores JavaScript Object Notation (JSON) documents in a single column. + +:::note +This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead. +If you want to use JSON type, set `allow_experimental_json_type = 1`. +::: + +To declare a column of `JSON` type, use the following syntax: + +``` sql + JSON(max_dynamic_paths=N, max_dynamic_types=M, some.path TypeName, SKIP path.to.skip, SKIP REGEXP 'paths_regexp') +``` +Where: +- `max_dynamic_paths` is an optional parameter indicating how many paths can be stored separately as subcolumns across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all other paths will be stored together in a single structure. Default value of `max_dynamic_paths` is `1024`. +- `max_dynamic_types` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a single path column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_dynamic_types` is `32`. +- `some.path TypeName` is an optional type hint for particular path in the JSON. Such paths will be always stored as subcolumns with specified type. +- `SKIP path.to.skip` is an optional hint for particular path that should be skipped during JSON parsing. Such paths will never be stored in the JSON column. If specified path is a nested JSON object, the whole nested object will be skipped. +- `SKIP REGEXP 'path_regexp'` is an optional hint with a regular expression that is used to skip paths during JSON parsing. All paths that match this regular expression will never be stored in the JSON column. + +## Creating JSON + +Using `JSON` type in table column definition: + +```sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json────────────────────────────────────────┠+│ {"a":{"b":"42"},"c":["1","2","3"]} │ +│ {"f":"Hello, World!"} │ +│ {"a":{"b":"43","e":"10"},"c":["4","5","6"]} │ +└─────────────────────────────────────────────┘ +``` + +```sql +CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json──────────────────────────────┠+│ {"a":{"b":42},"c":[1,2,3]} │ +│ {"a":{"b":0},"f":"Hello, World!"} │ +│ {"a":{"b":43},"c":[4,5,6]} │ +└───────────────────────────────────┘ +``` + +Using CAST from 'String': + +```sql +SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json; +``` + +```text +┌─json───────────────────────────────────────────┠+│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +CAST from named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later. + +## Reading JSON paths as subcolumns + +JSON type supports reading every path as a separate subcolumn. If type of the requested path was not specified in the JSON type declaration, the subcolumn of the path will always have type [Dynamic](/docs/en/sql-reference/data-types/dynamic.md). + +For example: + +```sql +CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42, "g" : 42.42}, "c" : [1, 2, 3], "d" : "2020-01-01"}'), ('{"f" : "Hello, World!", "d" : "2020-01-02"}'), ('{"a" : {"b" : 43, "e" : 10, "g" : 43.43}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json──────────────────────────────────────────────────┠+│ {"a":{"b":42,"g":42.42},"c":[1,2,3],"d":"2020-01-01"} │ +│ {"a":{"b":0},"d":"2020-01-02","f":"Hello, World!"} │ +│ {"a":{"b":43,"g":43.43},"c":[4,5,6]} │ +└───────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.a.b, json.a.g, json.c, json.d FROM test; +``` + +```text +┌─json.a.b─┬─json.a.g─┬─json.c──┬─json.d─────┠+│ 42 │ 42.42 │ [1,2,3] │ 2020-01-01 │ +│ 0 │ á´ºáµá´¸á´¸ │ á´ºáµá´¸á´¸ │ 2020-01-02 │ +│ 43 │ 43.43 │ [4,5,6] │ á´ºáµá´¸á´¸ │ +└──────────┴──────────┴─────────┴────────────┘ +``` + +If the requested path wasn't found in the data, it will be filled with `NULL` values: + +```sql +SELECT json.non.existing.path FROM test; +``` + +```text +┌─json.non.existing.path─┠+│ á´ºáµá´¸á´¸ │ +│ á´ºáµá´¸á´¸ │ +│ á´ºáµá´¸á´¸ │ +└────────────────────────┘ +``` + +Let's check the data types of returned subcolumns: +```sql +SELECT toTypeName(json.a.b), toTypeName(json.a.g), toTypeName(json.c), toTypeName(json.d) FROM test; +``` + +```text +┌─toTypeName(json.a.b)─┬─toTypeName(json.a.g)─┬─toTypeName(json.c)─┬─toTypeName(json.d)─┠+│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +└──────────────────────┴──────────────────────┴────────────────────┴────────────────────┘ +``` + +As we can see, for `a.b` the type is `UInt32` as we specified in the JSON type declaration, and for all other subcolumns the type is `Dynamic`. + +It is also possible to read subcolumns of a `Dynamic` type using special syntax `json.some.path.:TypeName`: + +```sql +select json.a.g.:Float64, dynamicType(json.a.g), json.d.:Date, dynamicType(json.d) FROM test; +``` + +```text +┌─json.a.g.:`Float64`─┬─dynamicType(json.a.g)─┬─json.d.:`Date`─┬─dynamicType(json.d)─┠+│ 42.42 │ Float64 │ 2020-01-01 │ Date │ +│ á´ºáµá´¸á´¸ │ None │ 2020-01-02 │ Date │ +│ 43.43 │ Float64 │ á´ºáµá´¸á´¸ │ None │ +└─────────────────────┴───────────────────────┴────────────────┴─────────────────────┘ +``` + +`Dynamic` subcolumns can be casted to any data type. In this case the exception will be thrown if internal type inside `Dynamic` cannot be casted to the requested type: + +```sql +select json.a.g::UInt64 as uint FROM test; +``` + +```text +┌─uint─┠+│ 42 │ +│ 0 │ +│ 43 │ +└──────┘ +``` + +```sql +select json.a.g::UUID as float FROM test; +``` + +```text +Received exception: +Code: 48. DB::Exception: Conversion between numeric types and UUID is not supported. Probably the passed UUID is unquoted: while executing 'FUNCTION CAST(__table1.json.a.g :: 2, 'UUID'_String :: 1) -> CAST(__table1.json.a.g, 'UUID'_String) UUID : 0'. (NOT_IMPLEMENTED) +``` + +## Reading JSON sub-objects as subcolumns + +JSON type supports reading nested objects as subcolumns with type `JSON` using special syntax `json.^some.path`: + +```sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : {"c" : 42, "g" : 42.42}}, "c" : [1, 2, 3], "d" : {"e" : {"f" : {"g" : "Hello, World", "h" : [1, 2, 3]}}}}'), ('{"f" : "Hello, World!", "d" : {"e" : {"f" : {"h" : [4, 5, 6]}}}}'), ('{"a" : {"b" : {"c" : 43, "e" : 10, "g" : 43.43}}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json────────────────────────────────────────────────────────────────────────────────────────┠+│ {"a":{"b":{"c":42,"g":42.42}},"c":[1,2,3],"d":{"e":{"f":{"g":"Hello, World","h":[1,2,3]}}}} │ +│ {"d":{"e":{"f":{"h":[4,5,6]}}},"f":"Hello, World!"} │ +│ {"a":{"b":{"c":43,"e":10,"g":43.43}},"c":[4,5,6]} │ +└─────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.^a.b, json.^d.e.f FROM test; +``` + +```text +┌─json.^`a`.b───────────────┬─json.^`d`.e.f────────────────────┠+│ {"c":42,"g":42.42} │ {"g":"Hello, World","h":[1,2,3]} │ +│ {} │ {"h":[4,5,6]} │ +│ {"c":43,"e":10,"g":43.43} │ {} │ +└───────────────────────────┴──────────────────────────────────┘ +``` + +:::note +Reading sub-objects as subcolumns may be inefficient, as this may require almost full scan of the JSON data. +::: + +## Types inference for paths + +During JSON parsing ClickHouse tries to detect the most appropriate data type for each JSON path. It works similar to [automatic schema inference from input data](/docs/en/interfaces/schema-inference.md) and controlled by the same settings: + +- [input_format_try_infer_integers](/docs/en/interfaces/schema-inference.md#inputformattryinferintegers) +- [input_format_try_infer_dates](/docs/en/interfaces/schema-inference.md#inputformattryinferdates) +- [input_format_try_infer_datetimes](/docs/en/interfaces/schema-inference.md#inputformattryinferdatetimes) +- [schema_inference_make_columns_nullable](/docs/en/interfaces/schema-inference.md#schemainferencemakecolumnsnullable) +- [input_format_json_try_infer_numbers_from_strings](/docs/en/interfaces/schema-inference.md#inputformatjsontryinfernumbersfromstrings) +- [input_format_json_infer_incomplete_types_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsoninferincompletetypesasstrings) +- [input_format_json_read_numbers_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadnumbersasstrings) +- [input_format_json_read_bools_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasstrings) +- [input_format_json_read_bools_as_numbers](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasnumbers) +- [input_format_json_read_arrays_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadarraysasstrings) + +Let's see some examples: + +```sql +SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=1, input_format_try_infer_datetimes=1; +``` + +```text +┌─paths_with_types─────────────────┠+│ {'a':'Date','b':'DateTime64(9)'} │ +└──────────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=0, input_format_try_infer_datetimes=0; +``` + +```text +┌─paths_with_types────────────┠+│ {'a':'String','b':'String'} │ +└─────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=1; +``` + +```text +┌─paths_with_types───────────────┠+│ {'a':'Array(Nullable(Int64))'} │ +└────────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=0; +``` + +```text +┌─paths_with_types─────┠+│ {'a':'Array(Int64)'} │ +└──────────────────────┘ +``` + +## Handling arrays of JSON objects + +JSON paths that contains an array of objects are parsed as type `Array(JSON)` and inserted into `Dynamic` column for this path. To read an array of objects you can extract it from `Dynamic` column as a subcolumn: + +```sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES +('{"a" : {"b" : [{"c" : 42, "d" : "Hello", "f" : [[{"g" : 42.42}]], "k" : {"j" : 1000}}, {"c" : 43}, {"e" : [1, 2, 3], "d" : "My", "f" : [[{"g" : 43.43, "h" : "2020-01-01"}]], "k" : {"j" : 2000}}]}}'), +('{"a" : {"b" : [1, 2, 3]}}'), +('{"a" : {"b" : [{"c" : 44, "f" : [[{"h" : "2020-01-02"}]]}, {"e" : [4, 5, 6], "d" : "World", "f" : [[{"g" : 44.44}]], "k" : {"j" : 3000}}]}}'); +SELECT json FROM test; +``` + +```text3 +┌─json────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┠+│ {"a":{"b":[{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}},{"c":"43"},{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}]}} │ +│ {"a":{"b":["1","2","3"]}} │ +│ {"a":{"b":[{"c":"44","f":[[{"h":"2020-01-02"}]]},{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}]}} │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.a.b, dynamicType(json.a.b) FROM test; +``` + +```text +┌─json.a.b──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─dynamicType(json.a.b)────────────────────────────────────┠+│ ['{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}}','{"c":"43"}','{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │ +│ [1,2,3] │ Array(Nullable(Int64)) │ +│ ['{"c":"44","f":[[{"h":"2020-01-02"}]]}','{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │ +└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────────────────────────────────┘ +``` + +As you can notice, the `max_dynamic_types/max_dynamic_paths` parameters of the nested `JSON` type were reduced compared to the default values. It's needed to avoid number of subcolumns to grow uncontrolled on nested arrays of JSON objects. + +Let's try to read subcolumns from this nested `JSON` column: + +```sql +SELECT json.a.b.:`Array(JSON)`.c, json.a.b.:`Array(JSON)`.f, json.a.b.:`Array(JSON)`.d FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┠+│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │ +└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘ +``` + +We can avoid writing `Array(JSON)` subcolumn name using special syntax: + +```sql +SELECT json.a.b[].c, json.a.b[].f, json.a.b[].d FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┠+│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │ +└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘ +``` + +The number of `[]` after path indicates the array level. `json.path[][]` will be transformed to `json.path.:Array(Array(JSON))` + +Let's check the paths and types inside our `Array(JSON)`: + +```sql +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b[]))) FROM test; +``` + +```text +┌─arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b.:`Array(JSON)`)))──┠+│ ('c','Int64') │ +│ ('d','String') │ +│ ('f','Array(Array(JSON(max_dynamic_types=8, max_dynamic_paths=64)))') │ +│ ('k.j','Int64') │ +│ ('e','Array(Nullable(Int64))') │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +Let's read subcolumns from `Array(JSON)` column: + +```sql +SELECT json.a.b[].c.:Int64, json.a.b[].f[][].g.:Float64, json.a.b[].f[][].h.:Date FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.c.:`Int64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.g.:`Float64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.h.:`Date`─┠+│ [42,43,NULL] │ [[[42.42]],[],[[43.43]]] │ [[[NULL]],[],[['2020-01-01']]] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[[NULL]],[[44.44]]] │ [[['2020-01-02']],[[NULL]]] │ +└────────────────────────────────────┴──────────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ +``` + +We can also read sub-object subcolumns from nested `JSON` column: + +```sql +SELECT json.a.b[].^k FROM test +``` + +```text +┌─json.a.b.:`Array(JSON)`.^`k`─────────┠+│ ['{"j":"1000"}','{}','{"j":"2000"}'] │ +│ [] │ +│ ['{}','{"j":"3000"}'] │ +└──────────────────────────────────────┘ +``` + +## Reading JSON type from the data + +All text formats (JSONEachRow, TSV, CSV, CustomSeparated, Values, etc) supports reading `JSON` type. + +Examples: + +```sql +SELECT json FROM format(JSONEachRow, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP d.e, SKIP REGEXP \'b.*\')', ' +{"json" : {"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}}} +{"json" : {"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}}} +{"json" : {"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"}} +{"json" : {"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43}} +{"json" : {"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}} +') +``` + +```text +┌─json──────────────────────────────────────────────────────────┠+│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │ +│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │ +│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │ +│ {"a":{"b":{"c":4}},"c":"43"} │ +│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │ +└───────────────────────────────────────────────────────────────┘ +``` + +For text formats like CSV/TSV/etc `JSON` is parsed from a string containing JSON object + +```sql +SELECT json FROM format(TSV, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP REGEXP \'b.*\')', +'{"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}} +{"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}} +{"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"} +{"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43} +{"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}') +``` + +```text +┌─json──────────────────────────────────────────────────────────┠+│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │ +│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │ +│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │ +│ {"a":{"b":{"c":4}},"c":"43"} │ +│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │ +└───────────────────────────────────────────────────────────────┘ +``` + +## Reaching the limit of dynamic paths inside JSON + +`JSON` data type can store only limited number of paths as separate subcolumns inside. By default, this limit is 1024, but you can change it in type declaration using parameter `max_dynamic_paths`. +When the limit is reached, all new paths inserted to `JSON` column will be stored in a single shared data structure. It's still possible to read such paths as subcolumns, but it will require reading the whole +shared data structure to extract the values of this path. This limit is needed to avoid the enormous number of different subcolumns that can make the table unusable. + +Let's see what happens when the limit is reached in different scenarios. + +### Reaching the limit during data parsing + +During parsing of `JSON` object from the data, when the limit is reached for current block of data, all new paths will be stored in a shared data structure. We can check it using introspection functions `JSONDynamicPaths, JSONSharedDataPaths`: + +```sql +SELECT json, JSONDynamicPaths(json), JSONSharedDataPaths(json) FROM format(JSONEachRow, 'json JSON(max_dynamic_paths=3)', ' +{"json" : {"a" : {"b" : 42}, "c" : [1, 2, 3]}} +{"json" : {"a" : {"b" : 43}, "d" : "2020-01-01"}} +{"json" : {"a" : {"b" : 44}, "c" : [4, 5, 6]}} +{"json" : {"a" : {"b" : 43}, "d" : "2020-01-02", "e" : "Hello", "f" : {"g" : 42.42}}} +{"json" : {"a" : {"b" : 43}, "c" : [7, 8, 9], "f" : {"g" : 43.43}, "h" : "World"}} +') +``` + +```text +┌─json───────────────────────────────────────────────────────────┬─JSONDynamicPaths(json)─┬─JSONSharedDataPaths(json)─┠+│ {"a":{"b":"42"},"c":["1","2","3"]} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"43"},"d":"2020-01-01"} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"44"},"c":["4","5","6"]} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"43"},"d":"2020-01-02","e":"Hello","f":{"g":42.42}} │ ['a.b','c','d'] │ ['e','f.g'] │ +│ {"a":{"b":"43"},"c":["7","8","9"],"f":{"g":43.43},"h":"World"} │ ['a.b','c','d'] │ ['f.g','h'] │ +└────────────────────────────────────────────────────────────────┴────────────────────────┴───────────────────────────┘ +``` + +As we can see, after inserting paths `e` and `f.g` the limit was reached and we inserted them into shared data structure. + +### During merges of data parts in MergeTree table engines + +During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns. +In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains +the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation. + +Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths: + +```sql +CREATE TABLE test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree ORDER BY id; +SYSTEM STOP MERGES test; +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as a) FROM numbers(5); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as b) FROM numbers(4); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as c) FROM numbers(3); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as d) FROM numbers(2); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as e) FROM numbers(1); +``` + +Each insert will create a separate data pert with `JSON` column containing single path: +```sql +SELECT count(), JSONDynamicPaths(json) AS dynamic_paths, JSONSharedDataPaths(json) AS shared_data_paths, _part FROM test GROUP BY _part, dynamic_paths, shared_data_paths ORDER BY _part ASC +``` + +```text +┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┠+│ 5 │ ['a'] │ [] │ all_1_1_0 │ +│ 4 │ ['b'] │ [] │ all_2_2_0 │ +│ 3 │ ['c'] │ [] │ all_3_3_0 │ +│ 2 │ ['d'] │ [] │ all_4_4_0 │ +│ 1 │ ['e'] │ [] │ all_5_5_0 │ +└─────────┴───────────────┴───────────────────┴───────────┘ + +``` + +Now, let's merge all parts into one and see what will happen: + +```sql +SYSTEM START MERGES test; +OPTIMIZE TABLE test FINAL; +SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; +``` + +```text +┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┠+│ 1 │ ['a','b','c'] │ ['e'] │ all_1_5_2 │ +│ 2 │ ['a','b','c'] │ ['d'] │ all_1_5_2 │ +│ 12 │ ['a','b','c'] │ [] │ all_1_5_2 │ +└─────────┴───────────────┴───────────────────┴───────────┘ +``` + +As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and moved paths `e` and `d` to shared data structure. + +## Introspection functions + +There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes). + +## Tips for better usage of the JSON type + +Before creating `JSON` column and loading data into it, consider the following tips: + +- Investigate your data and specify as many path hints with types as you can. It will make the storage and the reading much more efficient. +- Think about what paths you will need and what paths you will never need. Specify paths that you won't need in the SKIP section and SKIP REGEXP if needed. It will improve the storage. +- Don't set `max_dynamic_paths` parameter to very high values, it can make the storage and reading less efficient. diff --git a/docs/en/sql-reference/data-types/object-json.md b/docs/en/sql-reference/data-types/object-json.md deleted file mode 100644 index 9609f30aac1..00000000000 --- a/docs/en/sql-reference/data-types/object-json.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -slug: /en/sql-reference/data-types/object-json -sidebar_position: 26 -sidebar_label: Object Data Type -keywords: [object, data type] ---- - -# Object Data Type (deprecated) - -**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864). - -
- -Stores JavaScript Object Notation (JSON) documents in a single column. - -`JSON` can be used as an alias to `Object('json')` when setting [use_json_alias_for_old_object_type](../../operations/settings/settings.md#usejsonaliasforoldobjecttype) is enabled. - -## Example - -**Example 1** - -Creating a table with a `JSON` column and inserting data into it: - -```sql -CREATE TABLE json -( - o JSON -) -ENGINE = Memory -``` - -```sql -INSERT INTO json VALUES ('{"a": 1, "b": { "c": 2, "d": [1, 2, 3] }}') -``` - -```sql -SELECT o.a, o.b.c, o.b.d[3] FROM json -``` - -```text -┌─o.a─┬─o.b.c─┬─arrayElement(o.b.d, 3)─┠-│ 1 │ 2 │ 3 │ -└─────┴───────┴────────────────────────┘ -``` - -**Example 2** - -To be able to create an ordered `MergeTree` family table, the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format: - -```sql -CREATE TABLE logs -( - timestamp DateTime, - message JSON -) -ENGINE = MergeTree -ORDER BY timestamp -``` - -```sql -INSERT INTO logs -SELECT parseDateTimeBestEffort(JSONExtractString(json, 'timestamp')), json -FROM file('access.json.gz', JSONAsString) -``` - -## Displaying JSON columns - -When displaying a `JSON` column, ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can also display the field names by setting `output_format_json_named_tuples_as_objects = 1`: - -```sql -SET output_format_json_named_tuples_as_objects = 1 - -SELECT * FROM json FORMAT JSONEachRow -``` - -```text -{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}} -``` - -## Related Content - -- [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json) -- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) \ No newline at end of file diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index aa2010579af..65aacf2b539 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1050,6 +1050,20 @@ bool ColumnObject::isFinalized() const return finalized; } +void ColumnObject::getExtremes(DB::Field & min, DB::Field & max) const +{ + if (size() == 0) + { + min = Object(); + max = Object(); + } + else + { + get(0, min); + get(0, max); + } +} + void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & source_columns) { if (!empty()) diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index 61e78e727a0..0b949cbcb3a 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -113,11 +113,7 @@ public: #else int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; } #endif - void getExtremes(Field & min, Field & max) const override - { - min = Object(); - max = Object(); - } + void getExtremes(Field & min, Field & max) const override; void reserve(size_t n) override; void ensureOwnership() override; diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 665f1a6ecd2..61d34b49964 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -519,6 +519,13 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID /// We can insert any field to Dynamic column. return src; } + else if (isObject(type)) + { + if (src.getType() == Field::Types::Object) + return src; /// Already in needed type. + + /// TODO: add conversion from Map/Tuple to Object. + } /// Conversion from string by parsing. if (src.getType() == Field::Types::String) diff --git a/tests/queries/0_stateless/01825_new_type_json_12.reference b/tests/queries/0_stateless/01825_new_type_json_12.reference index 3fd2abcac50..36f014f0178 100644 --- a/tests/queries/0_stateless/01825_new_type_json_12.reference +++ b/tests/queries/0_stateless/01825_new_type_json_12.reference @@ -3,12 +3,11 @@ ('key_1','Array(JSON(max_dynamic_types=8, max_dynamic_paths=64))') ('key_3','Array(JSON(max_dynamic_types=4, max_dynamic_paths=16))') ('key_4','Int64') +('key_4','String') ('key_5','Float64') ('key_6','Float64') -('key_7','Int64') ('key_6','String') -('key_5','Int64') ('key_7','Float64') -('key_4','String') -{"obj":{"id":"1","key_0":[{"key_1":[{"key_3":[{"key_4":"1048576","key_5":0.0001048576,"key_6":25.5,"key_7":"1025"},{"key_6":"","key_7":"2"}]}]},{},{"key_1":[{"key_3":[{"key_5":"-1","key_6":"aqbjfiruu","key_7":-922337203685477600},{"key_4":"","key_6":"","key_7":"65537"}]},{"key_3":[{"key_4":"ghdqyeiom","key_5":"1048575","key_7":21474836.48}]}]}]}} +('key_7','Int64') +{"obj":{"id":"1","key_0":[{"key_1":[{"key_3":[{"key_4":"1048576","key_5":0.0001048576,"key_6":25.5,"key_7":"1025"},{"key_6":"","key_7":"2"}]}]},{},{"key_1":[{"key_3":[{"key_5":-1,"key_6":"aqbjfiruu","key_7":-922337203685477600},{"key_4":"","key_6":"","key_7":"65537"}]},{"key_3":[{"key_4":"ghdqyeiom","key_5":1048575,"key_7":21474836.48}]}]}]}} [[[1048576,NULL]],[],[[NULL,''],['ghdqyeiom']]] [[[0.0001048576,NULL]],[],[[-1,NULL],[1048575]]] [[[25.5,'']],[],[['aqbjfiruu',''],[NULL]]] [[[1025,2]],[],[[-922337203685477600,65537],[21474836.48]]] diff --git a/tests/queries/0_stateless/01825_new_type_json_3.reference.j2 b/tests/queries/0_stateless/01825_new_type_json_3.reference.j2 index 62c00713c45..8ff87d1132b 100644 --- a/tests/queries/0_stateless/01825_new_type_json_3.reference.j2 +++ b/tests/queries/0_stateless/01825_new_type_json_3.reference.j2 @@ -25,7 +25,7 @@ data JSON ======== 1 {"k1":{"k2":"1","k3":"foo"}} {'k1.k2':'Int64','k1.k3':'String'} 2 {"k4":["1","2","3"]} {'k4':'Array(Nullable(Int64))'} -3 {"k1":{"k2":"10"},"k4":[]} {'k1.k2':'Int64','k4':'Array(Nullable(String))'} +3 {"k1":{"k2":"10"},"k4":[]} {'k1.k2':'Int64','k4':'Array(Nullable(Int64))'} 1 1 foo \N 2 \N \N [1,2,3] 3 10 \N [] @@ -53,7 +53,7 @@ data JSON ======== 1 {"k1":{"k2":"1","k3":"foo"}} {'k1.k2':'Int64','k1.k3':'String'} 2 {"k4":["1","2","3"]} {'k4':'Array(Nullable(Int64))'} -3 {"k1":{"k2":"10"},"k4":[]} {'k1.k2':'Int64','k4':'Array(Nullable(String))'} +3 {"k1":{"k2":"10"},"k4":[]} {'k1.k2':'Int64','k4':'Array(Nullable(Int64))'} 1 1 foo \N 2 \N \N [1,2,3] 3 10 \N [] diff --git a/tests/queries/0_stateless/01825_new_type_json_6.reference b/tests/queries/0_stateless/01825_new_type_json_6.reference index 0227093eab4..dacf9ec4a23 100644 --- a/tests/queries/0_stateless/01825_new_type_json_6.reference +++ b/tests/queries/0_stateless/01825_new_type_json_6.reference @@ -1,9 +1,9 @@ ('key','String') ('out','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') +('outputs','Array(JSON(max_dynamic_types=8, max_dynamic_paths=64))') ('outputs','Array(Nullable(String))') ('type','Int64') ('value','Int64') -('outputs','Array(JSON(max_dynamic_types=8, max_dynamic_paths=64))') ('index','Int64') ('n','Int64') v1 [0,0] [1,2] [[],[1960131]] [[],[0]] diff --git a/tests/queries/0_stateless/01825_new_type_json_9.reference b/tests/queries/0_stateless/01825_new_type_json_9.reference new file mode 100644 index 00000000000..9ad0b55b9c5 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_9.reference @@ -0,0 +1,3 @@ +('foo','Int64') +('k1','Int64') +('k2','Int64') diff --git a/tests/queries/0_stateless/01825_new_type_json_9.sql b/tests/queries/0_stateless/01825_new_type_json_9.sql new file mode 100644 index 00000000000..626016a0a81 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_9.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json; + +SET allow_experimental_json_type = 1; + +CREATE TABLE t_json(id UInt64, obj JSON) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json format JSONEachRow {"id": 1, "obj": {"foo": 1, "k1": 2}}; +INSERT INTO t_json format JSONEachRow {"id": 2, "obj": {"foo": 1, "k2": 2}}; + +OPTIMIZE TABLE t_json FINAL; + +SELECT distinct arrayJoin(JSONAllPathsWithTypes(obj)) as path from t_json order by path; + +DROP TABLE IF EXISTS t_json; diff --git a/tests/queries/0_stateless/01825_new_type_json_bools.reference b/tests/queries/0_stateless/01825_new_type_json_bools.reference new file mode 100644 index 00000000000..ebbfd28c321 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_bools.reference @@ -0,0 +1 @@ +{"k1":true,"k2":false} {'k1':'Bool','k2':'Bool'} diff --git a/tests/queries/0_stateless/01825_new_type_json_bools.sql b/tests/queries/0_stateless/01825_new_type_json_bools.sql new file mode 100644 index 00000000000..f8775d0f00a --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_bools.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_bools; +SET allow_experimental_json_type = 1; + +CREATE TABLE t_json_bools (data JSON) ENGINE = Memory; +INSERT INTO t_json_bools VALUES ('{"k1": true, "k2": false}'); +SELECT data, JSONAllPathsWithTypes(data) FROM t_json_bools; + +DROP TABLE t_json_bools; diff --git a/tests/queries/0_stateless/01825_new_type_json_btc.reference b/tests/queries/0_stateless/01825_new_type_json_btc.reference index cf8017c17c1..b7ea598d47b 100644 --- a/tests/queries/0_stateless/01825_new_type_json_btc.reference +++ b/tests/queries/0_stateless/01825_new_type_json_btc.reference @@ -5,6 +5,7 @@ ('inputs','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') ('lock_time','Int64') ('out','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') +('rbf','Bool') ('relayed_by','String') ('size','Int64') ('time','Int64') @@ -13,7 +14,6 @@ ('vin_sz','Int64') ('vout_sz','Int64') ('weight','Int64') -('rbf','Bool') ('index','Int64') ('prev_out.addr','String') ('prev_out.n','Int64') diff --git a/tests/queries/0_stateless/01825_new_type_json_distributed.reference b/tests/queries/0_stateless/01825_new_type_json_distributed.reference new file mode 100644 index 00000000000..b2cbe847542 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_distributed.reference @@ -0,0 +1,4 @@ +{"k1":"2","k2":{"k3":"qqq","k4":["44","55"]}} {'k1':'Int64','k2.k3':'String','k2.k4':'Array(Nullable(Int64))'} +{"k1":"2","k2":{"k3":"qqq","k4":["44","55"]}} {'k1':'Int64','k2.k3':'String','k2.k4':'Array(Nullable(Int64))'} +2 qqq [44,55] +2 qqq [44,55] diff --git a/tests/queries/0_stateless/01825_new_type_json_distributed.sql b/tests/queries/0_stateless/01825_new_type_json_distributed.sql new file mode 100644 index 00000000000..0fede046927 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_distributed.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SET allow_experimental_json_type = 1; + +DROP TABLE IF EXISTS t_json_local; +DROP TABLE IF EXISTS t_json_dist; + +CREATE TABLE t_json_local(data JSON) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t_json_dist AS t_json_local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), t_json_local); + +INSERT INTO t_json_local FORMAT JSONAsObject {"k1": 2, "k2": {"k3": "qqq", "k4": [44, 55]}} +; + +SELECT data, JSONAllPathsWithTypes(data) FROM t_json_dist; +SELECT data.k1, data.k2.k3, data.k2.k4 FROM t_json_dist; + +DROP TABLE IF EXISTS t_json_local; +DROP TABLE IF EXISTS t_json_dist; diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.reference b/tests/queries/0_stateless/01825_new_type_json_in_array.reference new file mode 100644 index 00000000000..0ca02385389 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.reference @@ -0,0 +1,28 @@ +{"id":1,"arr":[{"k1":"1","k2":{"k3":"2","k4":"3"}},{"k1":"2","k2":{"k5":"foo"}}]} +{"id":2,"arr":[{"k1":"3","k2":{"k3":"4","k4":"5"}}]} +1 [1,2] [2,NULL] [3,NULL] [NULL,'foo'] +2 [3] [4] [5] [NULL] +{"arr":{"k1":"1","k2":{"k3":"2","k4":"3"}}} +{"arr":{"k1":"2","k2":{"k5":"foo"}}} +{"arr":{"k1":"3","k2":{"k3":"4","k4":"5"}}} +('k1','Int64') +('k2.k3','Int64') +('k2.k4','Int64') +('k2.k5','String') +{"id":1,"arr":[{"k1":[{"k2":"aaa","k3":"bbb"},{"k2":"ccc"}]}]} +{"id":2,"arr":[{"k1":[{"k3":"ddd","k4":"10"},{"k4":"20"}],"k5":{"k6":"foo"}}]} +1 [['aaa','ccc']] [['bbb',NULL]] [[NULL,NULL]] [NULL] +2 [[NULL,NULL]] [['ddd',NULL]] [[10,20]] ['foo'] +{"k1":{"k2":"aaa","k3":"bbb"}} +{"k1":{"k2":"ccc"}} +{"k1":{"k3":"ddd","k4":"10"}} +{"k1":{"k4":"20"}} +('k2','String') +('k3','String') +('k4','Int64') +{"arr":[{"x":1}]} +{"arr":{"x":{"y":1},"t":{"y":2}}} +{"arr":[1,{"y":1}]} +{"arr":[2,{"y":2}]} +{"arr":[{"x":"aaa","y":["1","2","3"]}]} +{"arr":[{"x":1}]} diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.sql b/tests/queries/0_stateless/01825_new_type_json_in_array.sql new file mode 100644 index 00000000000..e9ba6e402ac --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.sql @@ -0,0 +1,34 @@ +-- Tags: no-fasttest + +SET allow_experimental_json_type = 1; +DROP TABLE IF EXISTS t_json_array; + +CREATE TABLE t_json_array (id UInt32, arr Array(JSON)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": 1, "k2": {"k3": 2, "k4": 3}}, {"k1": 2, "k2": {"k5": "foo"}}]} +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": 3, "k2": {"k3": 4, "k4": 5}}]} + + +SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; +SELECT id, arr.k1, arr.k2.k3, arr.k2.k4, arr.k2.k5 FROM t_json_array ORDER BY id; +SELECT arr FROM t_json_array ARRAY JOIN arr ORDER BY arr.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arr))) as path FROM t_json_array order by path; + +TRUNCATE TABLE t_json_array; + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": [{"k2": "aaa", "k3": "bbb"}, {"k2": "ccc"}]}]} +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": [{"k3": "ddd", "k4": 10}, {"k4": 20}], "k5": {"k6": "foo"}}]} + +SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; +SELECT id, arr.k1[].k2, arr.k1[].k3, arr.k1[].k4, arr.k5.k6 FROM t_json_array ORDER BY id; + +SELECT arrayJoin(arrayJoin(arr.k1[])) AS k1 FROM t_json_array ORDER BY toString(k1) FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arr.k1[])))) AS path FROM t_json_array order by path; + +DROP TABLE t_json_array; + +SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; +SELECT * FROM values('arr Map(String, JSON)', '{\'x\' : \'{"y" : 1}\', \'t\' : \'{"y" : 2}\'}') FORMAT JSONEachRow; +SELECT * FROM values('arr Tuple(Int32, JSON)', '(1, \'{"y" : 1}\')', '(2, \'{"y" : 2}\')') FORMAT JSONEachRow; +SELECT * FROM format(JSONEachRow, '{"arr" : [{"x" : "aaa", "y" : [1,2,3]}]}') FORMAT JSONEachRow; +SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/01825_new_type_json_in_other_types.reference b/tests/queries/0_stateless/01825_new_type_json_in_other_types.reference new file mode 100644 index 00000000000..03913e5098e --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_in_other_types.reference @@ -0,0 +1,17 @@ +Tuple(String, Map(String, Array(JSON)), JSON) +============= +{"id":1,"data":["foo",{"aa":[{"k1":[{"k2":"1","k3":"2"},{"k3":"3"}]},{"k1":[{"k2":"4"},{"k3":"5"},{"k2":"6"}],"k4":"qqq"}],"bb":[{"k4":"www"},{"k1":[{"k2":"7","k3":"8"},{"k2":"9","k3":"10"},{"k2":"11","k3":"12"}]}]},{"k1":"aa","k2":{"k3":"bb","k4":"c"}}]} +{"id":2,"data":["bar",{"aa":[{"k1":[{"k2":"13","k3":"14"},{"k2":"15","k3":"16"}],"k4":"www"}]},{}]} +{"id":3,"data":["some",{"aa":[{"k1":[{"k3":"20","k5":"some"}]}]},{"k1":"eee"}]} +============= +{"aa":[{"k1":[{"k2":"1","k3":"2"},{"k3":"3"}]},{"k1":[{"k2":"4"},{"k3":"5"},{"k2":"6"}],"k4":"qqq"}],"bb":[{"k4":"www"},{"k1":[{"k2":"7","k3":"8"},{"k2":"9","k3":"10"},{"k2":"11","k3":"12"}]}]} +{"aa":[{"k1":[{"k2":"13","k3":"14"},{"k2":"15","k3":"16"}],"k4":"www"}],"bb":[]} +{"aa":[{"k1":[{"k3":"20","k5":"some"}]}],"bb":[]} +============= +{"k1":[[{"k2":"1","k3":"2"},{"k3":"3"}],[{"k2":"4"},{"k3":"5"},{"k2":"6"}]],"k4":[null,"qqq"]} +{"k1":[[{"k2":"13","k3":"14"},{"k2":"15","k3":"16"}]],"k4":["www"]} +{"k1":[[{"k3":"20","k5":"some"}]],"k4":[null]} +============= +{"obj":{"k1":"aa","k2":{"k3":"bb","k4":"c"}}} +{"obj":{}} +{"obj":{"k1":"eee"}} diff --git a/tests/queries/0_stateless/01825_new_type_json_in_other_types.sh b/tests/queries/0_stateless/01825_new_type_json_in_other_types.sh new file mode 100755 index 00000000000..cd009c3ef70 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_in_other_types.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_nested" + +${CLICKHOUSE_CLIENT} -q " + CREATE TABLE t_json_nested + ( + id UInt32, + data Tuple(String, Map(String, Array(JSON)), JSON) + ) + ENGINE = MergeTree ORDER BY id" --allow_experimental_json_type 1 + +cat < 1; + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; +SELECT id, data FROM type_json_dst ORDER BY id; + +INSERT INTO type_json_dst VALUES (4, '{"arr": [{"k11": 5, "k22": 6}, {"k11": 7, "k33": 8}]}'); + +INSERT INTO type_json_src VALUES (5, '{"arr": "not array"}'); +INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; + +TRUNCATE TABLE type_json_src; +INSERT INTO type_json_src VALUES (6, '{"arr": [{"k22": "str1"}]}') +INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; +SELECT id, data FROM type_json_dst ORDER BY id; + +DROP TABLE type_json_src; +DROP TABLE type_json_dst; + +CREATE TABLE type_json_dst (data JSON) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE type_json_src (data String) ENGINE = MergeTree ORDER BY tuple(); + +SYSTEM STOP MERGES type_json_src; + +SET max_threads = 1; +SET max_insert_threads = 1; +SET output_format_json_named_tuples_as_objects = 1; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; + +INSERT INTO type_json_dst SELECT data FROM type_json_src; + +SELECT * FROM type_json_dst ORDER BY data.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; + +TRUNCATE TABLE type_json_src; +TRUNCATE TABLE type_json_dst; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; + +INSERT INTO type_json_dst SELECT data FROM type_json_src; + +SELECT * FROM type_json_dst ORDER BY data.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; + +DROP TABLE type_json_src; +DROP TABLE type_json_dst; diff --git a/tests/queries/0_stateless/01825_new_type_json_missed_values.reference b/tests/queries/0_stateless/01825_new_type_json_missed_values.reference new file mode 100644 index 00000000000..952b5652bc1 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_missed_values.reference @@ -0,0 +1,4 @@ +('foo','Int64') +('k1','Int64') +('k2','Int64') +1 diff --git a/tests/queries/0_stateless/01825_new_type_json_missed_values.sql b/tests/queries/0_stateless/01825_new_type_json_missed_values.sql new file mode 100644 index 00000000000..84bd8a19c18 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_missed_values.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json; + +SET allow_experimental_json_type = 1; + +CREATE TABLE t_json(id UInt64, obj JSON) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +SYSTEM STOP MERGES t_json; + +INSERT INTO t_json SELECT number, '{"k1": 1, "k2": 2}' FROM numbers(1000000); +INSERT INTO t_json VALUES (1000001, '{"foo": 1}'); + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) AS path FROM t_json ORDER BY path; +SELECT count() FROM t_json WHERE obj.foo IS NOT NULL; + +DROP TABLE IF EXISTS t_json; diff --git a/tests/queries/0_stateless/01825_new_type_json_multiple_files.reference b/tests/queries/0_stateless/01825_new_type_json_multiple_files.reference new file mode 100644 index 00000000000..63c12792c17 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_multiple_files.reference @@ -0,0 +1,22 @@ +{"data":{"k0":"100"}} +{"data":{"k1":"100"}} +{"data":{"k2":"100"}} +{"data":{"k3":"100"}} +{"data":{"k4":"100"}} +{"data":{"k5":"100"}} +('k0','Int64') +('k1','Int64') +('k2','Int64') +('k3','Int64') +('k4','Int64') +('k5','Int64') +{"data":{"k0":"100"}} +{"data":{"k1":"100"}} +{"data":{"k2":"100"}} +('k0','Int64') +('k1','Int64') +('k2','Int64') +{"data":{"k1":"100"}} +{"data":{"k3":"100"}} +('k1','Int64') +('k3','Int64') diff --git a/tests/queries/0_stateless/01825_new_type_json_multiple_files.sh b/tests/queries/0_stateless/01825_new_type_json_multiple_files.sh new file mode 100755 index 00000000000..9cb37987628 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_multiple_files.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +for f in "${USER_FILES_PATH:?}/${CLICKHOUSE_DATABASE}"_*.json; do + [ -e $f ] && rm $f +done + +for i in {0..5}; do + echo "{\"k$i\": 100}" > "$USER_FILES_PATH/${CLICKHOUSE_DATABASE}_$i.json" +done + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_files" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_files (file String, data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_files SELECT _file, data FROM file('${CLICKHOUSE_DATABASE}_*.json', 'JSONAsObject', 'data JSON')" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT data FROM t_json_files ORDER BY file FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_files ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "TRUNCATE TABLE IF EXISTS t_json_files" + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_files \ + SELECT _file, data FROM file('${CLICKHOUSE_DATABASE}_*.json', 'JSONAsObject', 'data JSON') \ + ORDER BY _file LIMIT 3" --max_threads 1 --min_insert_block_size_rows 1 --max_insert_block_size 1 --max_block_size 1 --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT data FROM t_json_files ORDER BY file, data FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_files ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "TRUNCATE TABLE IF EXISTS t_json_files" + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_files \ + SELECT _file, data FROM file('${CLICKHOUSE_DATABASE}_*.json', 'JSONAsObject', 'data JSON') \ + WHERE _file IN ('${CLICKHOUSE_DATABASE}_1.json', '${CLICKHOUSE_DATABASE}_3.json')" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT data FROM t_json_files ORDER BY file FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_files ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_files" +rm "$USER_FILES_PATH"/${CLICKHOUSE_DATABASE}_*.json diff --git a/tests/queries/0_stateless/01825_new_type_json_mutations.reference b/tests/queries/0_stateless/01825_new_type_json_mutations.reference new file mode 100644 index 00000000000..c7523661a3b --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_mutations.reference @@ -0,0 +1,7 @@ +1 q {"k1":"1","k2":"2","k3":[{"k4":"aaa"},{"k4":"bbb"}]} +2 w {"k1":"3","k2":"4","k3":[{"k4":"ccc"}]} +3 e {"k1":"5","k2":"6"} +1 q {"k1":"1","k2":"2","k3":[{"k4":"aaa"},{"k4":"bbb"}]} +3 e {"k1":"5","k2":"6"} +1 foo +3 foo diff --git a/tests/queries/0_stateless/01825_new_type_json_mutations.sql b/tests/queries/0_stateless/01825_new_type_json_mutations.sql new file mode 100644 index 00000000000..77feee692d9 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_mutations.sql @@ -0,0 +1,21 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_mutations; + +SET allow_experimental_json_type = 1; +SET output_format_json_named_tuples_as_objects = 1; +SET mutations_sync = 2; + +CREATE TABLE t_json_mutations(id UInt32, s String, obj JSON) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json_mutations VALUES (1, 'q', '{"k1": 1, "k2": 2, "k3": [{"k4": "aaa"}, {"k4": "bbb"}]}'); +INSERT INTO t_json_mutations VALUES (2, 'w', '{"k1": 3, "k2": 4, "k3": [{"k4": "ccc"}]}'); +INSERT INTO t_json_mutations VALUES (3, 'e', '{"k1": 5, "k2": 6}'); + +SELECT * FROM t_json_mutations ORDER BY id; +ALTER TABLE t_json_mutations DELETE WHERE id = 2; +SELECT * FROM t_json_mutations ORDER BY id; +ALTER TABLE t_json_mutations DROP COLUMN s, DROP COLUMN obj, ADD COLUMN t String DEFAULT 'foo'; +SELECT * FROM t_json_mutations ORDER BY id; + +DROP TABLE t_json_mutations; diff --git a/tests/queries/0_stateless/01825_new_type_json_order_by.reference b/tests/queries/0_stateless/01825_new_type_json_order_by.reference new file mode 100644 index 00000000000..611d2835127 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_order_by.reference @@ -0,0 +1,6 @@ +0 +0 +{"k":"v"} + +{"k":"v"} +{"k":"v"} diff --git a/tests/queries/0_stateless/01825_new_type_json_order_by.sql b/tests/queries/0_stateless/01825_new_type_json_order_by.sql new file mode 100644 index 00000000000..6b5fb40aed4 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_order_by.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SET allow_experimental_json_type = 1; +SELECT dummy FROM system.one ORDER BY materialize('{"k":"v"}'::JSON); +SELECT dummy FROM system.one ORDER BY materialize('{"k":"v"}'::JSON), dummy; +SELECT materialize('{"k":"v"}'::JSON) SETTINGS extremes = 1; diff --git a/tests/queries/0_stateless/01825_new_type_json_partitions.reference b/tests/queries/0_stateless/01825_new_type_json_partitions.reference new file mode 100644 index 00000000000..c5839472132 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_partitions.reference @@ -0,0 +1,2 @@ +{"id":1,"obj":{"k1":"v1"}} +{"id":2,"obj":{"k2":"v2"}} diff --git a/tests/queries/0_stateless/01825_new_type_json_partitions.sql b/tests/queries/0_stateless/01825_new_type_json_partitions.sql new file mode 100644 index 00000000000..d1f37dedded --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_partitions.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_partitions; + +SET allow_experimental_json_type = 1; + +CREATE TABLE t_json_partitions (id UInt32, obj JSON) +ENGINE MergeTree ORDER BY id PARTITION BY id; + +INSERT INTO t_json_partitions FORMAT JSONEachRow {"id": 1, "obj": {"k1": "v1"}} {"id": 2, "obj": {"k2": "v2"}}; + +SELECT * FROM t_json_partitions ORDER BY id FORMAT JSONEachRow; + +DROP TABLE t_json_partitions; diff --git a/tests/queries/0_stateless/01825_type_json_10.reference b/tests/queries/0_stateless/01825_type_json_10.reference index e69de29bb2d..4161fb59c93 100644 --- a/tests/queries/0_stateless/01825_type_json_10.reference +++ b/tests/queries/0_stateless/01825_type_json_10.reference @@ -0,0 +1,9 @@ +Tuple(\n a Tuple(\n b Int8,\n c Nested(d Int8, e Array(Int16), f Int8))) +{"o":{"a":{"b":1,"c":[{"d":10,"e":[31],"f":0},{"d":20,"e":[63,127],"f":0}]}}} +{"o":{"a":{"b":2,"c":[]}}} +{"o":{"a":{"b":3,"c":[{"d":0,"e":[32],"f":20},{"d":0,"e":[64,128],"f":30}]}}} +{"o":{"a":{"b":4,"c":[]}}} +1 [10,20] [[31],[63,127]] [0,0] +2 [] [] [] +3 [0,0] [[32],[64,128]] [20,30] +4 [] [] [] diff --git a/tests/queries/0_stateless/01825_type_json_5.sql b/tests/queries/0_stateless/01825_type_json_5.sql index f508042c4ce..f50eee6b8ef 100644 --- a/tests/queries/0_stateless/01825_type_json_5.sql +++ b/tests/queries/0_stateless/01825_type_json_5.sql @@ -2,8 +2,8 @@ SET allow_experimental_object_type = 1; -SELECT '{"a": {"b": 1, "c": 2}}'::JSON AS s; -SELECT '{"a": {"b": 1, "c": 2}}'::JSON AS s format JSONEachRow; +SELECT '{"a": {"b": 1, "c": 2}}'::Object('JSON') AS s; +SELECT '{"a": {"b": 1, "c": 2}}'::Object('JSON') AS s format JSONEachRow; DROP TABLE IF EXISTS t_json_5; DROP TABLE IF EXISTS t_json_str_5; diff --git a/tests/queries/0_stateless/02421_new_type_json_async_insert.reference b/tests/queries/0_stateless/02421_new_type_json_async_insert.reference new file mode 100644 index 00000000000..fdd133460c6 --- /dev/null +++ b/tests/queries/0_stateless/02421_new_type_json_async_insert.reference @@ -0,0 +1,5 @@ +INCORRECT_DATA +0 +0 +INCORRECT_DATA +aaa diff --git a/tests/queries/0_stateless/02421_new_type_json_async_insert.sh b/tests/queries/0_stateless/02421_new_type_json_async_insert.sh new file mode 100755 index 00000000000..b23470a4179 --- /dev/null +++ b/tests/queries/0_stateless/02421_new_type_json_async_insert.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_async_insert" +$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 -q "CREATE TABLE t_json_async_insert (data JSON) ENGINE = MergeTree ORDER BY tuple()" + +$CLICKHOUSE_CLIENT --async_insert=1 --wait_for_async_insert=1 -q 'INSERT INTO t_json_async_insert FORMAT JSONAsObject {"aaa"}' 2>&1 | grep -o -m1 "INCORRECT_DATA" +$CLICKHOUSE_CLIENT -q "SELECT count() FROM t_json_async_insert" +$CLICKHOUSE_CLIENT -q "SELECT count() FROM system.parts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_json_async_insert'" + +$CLICKHOUSE_CLIENT --async_insert=1 --wait_for_async_insert=1 -q 'INSERT INTO t_json_async_insert FORMAT JSONAsObject {"aaa"}' 2>&1 | grep -o -m1 "INCORRECT_DATA" & +$CLICKHOUSE_CLIENT --async_insert=1 --wait_for_async_insert=1 -q 'INSERT INTO t_json_async_insert FORMAT JSONAsObject {"k1": "aaa"}' & + +wait + +$CLICKHOUSE_CLIENT -q "SELECT data.k1 FROM t_json_async_insert ORDER BY data.k1" +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_async_insert" diff --git a/tests/queries/0_stateless/02421_new_type_json_empty_parts.reference b/tests/queries/0_stateless/02421_new_type_json_empty_parts.reference new file mode 100644 index 00000000000..172ea2d3eed --- /dev/null +++ b/tests/queries/0_stateless/02421_new_type_json_empty_parts.reference @@ -0,0 +1,16 @@ +Collapsing +0 +0 +DELETE all +2 +1 +('k1','String') +('k2','String') +0 +0 +TTL +1 +1 +('k2','String') +0 +0 diff --git a/tests/queries/0_stateless/02421_new_type_json_empty_parts.sh b/tests/queries/0_stateless/02421_new_type_json_empty_parts.sh new file mode 100755 index 00000000000..2714b9586f8 --- /dev/null +++ b/tests/queries/0_stateless/02421_new_type_json_empty_parts.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +set -euo pipefail + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh +# shellcheck source=./parts.lib +. "$CURDIR"/parts.lib + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT 'Collapsing';" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, s Int8, data JSON) ENGINE = CollapsingMergeTree(s) ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_json_type 1 +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, 1, '{\"k1\": \"aaa\"}') (1, -1, '{\"k2\": \"bbb\"}');" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT 'DELETE all';" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, data JSON) ENGINE = MergeTree ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_json_type 1 +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, '{\"k1\": \"aaa\"}') (1, '{\"k2\": \"bbb\"}');" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" +${CLICKHOUSE_CLIENT} -q "ALTER TABLE t_json_empty_parts DELETE WHERE 1 SETTINGS mutations_sync = 1;" +timeout 60 bash -c 'wait_for_delete_empty_parts t_json_empty_parts' +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT 'TTL';" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, d Date, data JSON) ENGINE = MergeTree ORDER BY id TTL d WHERE id % 2 = 1 SETTINGS old_parts_lifetime=5;" --allow_experimental_json_type 1 +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, '2000-01-01', '{\"k1\": \"aaa\"}') (2, '2000-01-01', '{\"k2\": \"bbb\"}');" +${CLICKHOUSE_CLIENT} -q "OPTIMIZE TABLE t_json_empty_parts FINAL;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" +${CLICKHOUSE_CLIENT} -q "ALTER TABLE t_json_empty_parts MODIFY TTL d;" +${CLICKHOUSE_CLIENT} -q "OPTIMIZE TABLE t_json_empty_parts FINAL;" +timeout 60 bash -c 'wait_for_delete_empty_parts t_json_empty_parts' +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" diff --git a/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.reference b/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.reference new file mode 100644 index 00000000000..3eb1f72bfd6 --- /dev/null +++ b/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.reference @@ -0,0 +1 @@ +{"list":[{"nested":{"x":[{"r":"1"},{"r":"2"}]},"x":[{"r":"1"}]}]} diff --git a/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.sh b/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.sh new file mode 100755 index 00000000000..ae98946ad73 --- /dev/null +++ b/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo ' +{ + "obj" : + { + "list" : + [ + { + "nested" : { + "x" : [{"r" : 1}, {"r" : 2}] + }, + "x" : [{"r" : 1}] + } + ] + } +}' > 02482_object_data.jsonl + +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select * from file(02482_object_data.jsonl, auto, 'obj JSON')" + +rm 02482_object_data.jsonl + diff --git a/tests/queries/0_stateless/02553_new_type_json_attach_partition.reference b/tests/queries/0_stateless/02553_new_type_json_attach_partition.reference new file mode 100644 index 00000000000..1556b015503 --- /dev/null +++ b/tests/queries/0_stateless/02553_new_type_json_attach_partition.reference @@ -0,0 +1,2 @@ +{"b":"1","c":{"k1":"1"}} +{"b":"1","c":{"k1":["1","2"]}} diff --git a/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql b/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql new file mode 100644 index 00000000000..8a5abd31cb4 --- /dev/null +++ b/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql @@ -0,0 +1,14 @@ +SET allow_experimental_json_type = 1; + +DROP TABLE IF EXISTS t_json_attach_partition; + +CREATE TABLE t_json_attach_partition(b UInt64, c JSON) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": 1}}; +ALTER TABLE t_json_attach_partition DETACH PARTITION tuple(); +INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": [1, 2]}}; + +ALTER TABLE t_json_attach_partition ATTACH PARTITION tuple(); +SELECT * FROM t_json_attach_partition ORDER BY toString(c) FORMAT JSONEachRow; + +DROP TABLE t_json_attach_partition; diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1.reference deleted file mode 100644 index 90a96b1bb2f..00000000000 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1.reference +++ /dev/null @@ -1,2080 +0,0 @@ -Memory -insert -test -('a.b.c','UInt32') -('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') -('a.b.d','Int64') -('a.b.e','String') -('b.b._25','Int64') -('b.b._26','Int64') -('b.b._27','Int64') -('b.b._28','Int64') -('b.b._29','Int64') -('b.b.d','Int64') -('b.b.e','String') -('d.a','Array(Nullable(Int64))') -('d.a','Int64') -('d.b','Int64') -('d.c','Date') -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -MergeTree compact -No merges -insert -test -('a.b.c','UInt32') -('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') -('a.b.d','Int64') -('a.b.e','String') -('b.b._25','Int64') -('b.b._26','Int64') -('b.b._27','Int64') -('b.b._28','Int64') -('b.b._29','Int64') -('b.b.d','Int64') -('b.b.e','String') -('d.a','Array(Nullable(Int64))') -('d.a','Int64') -('d.b','Int64') -('d.c','Date') -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -With merges -test -('a.b.c','UInt32') -('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') -('a.b.d','Int64') -('a.b.e','String') -('b.b._25','Int64') -('b.b._26','Int64') -('b.b._27','Int64') -('b.b._28','Int64') -('b.b._29','Int64') -('b.b.d','Int64') -('b.b.e','String') -('d.a','Array(Nullable(Int64))') -('d.a','Int64') -('d.b','Int64') -('d.c','Date') -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -MergeTree wide -No merges -insert -test -('a.b.c','UInt32') -('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') -('a.b.d','Int64') -('a.b.e','String') -('b.b._25','Int64') -('b.b._26','Int64') -('b.b._27','Int64') -('b.b._28','Int64') -('b.b._29','Int64') -('b.b.d','Int64') -('b.b.e','String') -('d.a','Array(Nullable(Int64))') -('d.a','Int64') -('d.b','Int64') -('d.c','Date') -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -With merges -test -('a.b.c','UInt32') -('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') -('a.b.d','Int64') -('a.b.e','String') -('b.b._25','Int64') -('b.b._26','Int64') -('b.b._27','Int64') -('b.b._28','Int64') -('b.b._29','Int64') -('b.b.d','Int64') -('b.b.e','String') -('d.a','Array(Nullable(Int64))') -('d.a','Int64') -('d.b','Int64') -('d.c','Date') -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], - "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], - "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], - "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], - "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], - "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], - "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} -{ - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], - "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], - "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] -} diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference new file mode 100644 index 00000000000..16150ee7b45 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference @@ -0,0 +1,831 @@ +No merges +insert +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +With merges +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sh similarity index 95% rename from tests/queries/0_stateless/03207_json_read_subcolumns_1.sh rename to tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sh index 3c63b513ad6..75b13f89a06 100755 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1.sh +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sh @@ -96,13 +96,6 @@ function test() $CH_CLIENT -q "drop table if exists test;" -echo "Memory" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory" -insert -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact" $CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" echo "No merges" $CH_CLIENT -q "system stop merges test" @@ -112,14 +105,3 @@ echo "With merges" $CH_CLIENT -q "system start merges test" test $CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference new file mode 100644 index 00000000000..a7361856bc1 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference @@ -0,0 +1,413 @@ +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql new file mode 100755 index 00000000000..51e6970759d --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql @@ -0,0 +1,87 @@ +-- Tags: no-fasttest, long +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type=1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory; + +truncate table test; +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; + +select json.non.existing.path from test order by id format JSONColumns; +select json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path from test order by id format JSONColumns; +select json, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; + +select json.a.b.c from test order by id format JSONColumns; +select json, json.a.b.c from test order by id format JSONColumns; + +select json.b.b.e from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.d.b from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; + +select json.^a, json.a.b.c from test order by id format JSONColumns; +select json, json.^a, json.a.b.c from test order by id format JSONColumns; + +select json.^a, json.a.b.d from test order by id format JSONColumns; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d from test order by id format JSONColumns; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +drop table test; diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference new file mode 100644 index 00000000000..16150ee7b45 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference @@ -0,0 +1,831 @@ +No merges +insert +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +With merges +test +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime64(9)') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sh new file mode 100755 index 00000000000..144a88d8ec6 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" + +function insert() +{ + echo "insert" + $CH_CLIENT -q "truncate table test" + $CH_CLIENT -q "insert into test select number, '{}' from numbers(5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5)" + $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5)" +} + +function test() +{ + echo "test" + $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" + + $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_25\`, json.b.b.\`_25\`.:Int64, json.b.b.\`_25\`.:UUID, json.b.b.\`_26\`, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:UUID, json.b.b.\`_27\`, json.b.b.\`_27\`.:Int64, json.b.b.\`_27\`.:UUID, json.b.b.\`_28\`, json.b.b.\`_28\`.:Int64, json.b.b.\`_28\`.:UUID, json.b.b.\`_29\`, json.b.b.\`_29\`.:Int64, json.b.b.\`_29\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_25\`, json.b.b.\`_25\`.:Int64, json.b.b.\`_25\`.:UUID, json.b.b.\`_26\`, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:UUID, json.b.b.\`_27\`, json.b.b.\`_27\`.:Int64, json.b.b.\`_27\`.:UUID, json.b.b.\`_28\`, json.b.b.\`_28\`.:Int64, json.b.b.\`_28\`.:UUID, json.b.b.\`_29\`, json.b.b.\`_29\`.:Int64, json.b.b.\`_29\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.non.existing.path from test order by id format JSONColumns" + $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format JSONColumns" + $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.non.existing.path from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.a.b.c from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.a.b.c from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.b.b.e from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format JSONColumns" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.d.a, json.b.b.\`_26\` from test order by id format JSONColumns" + $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_26\` from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format JSONColumns" + + $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format JSONColumns" + $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" + $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" +} + +$CH_CLIENT -q "drop table if exists test;" + +$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +echo "No merges" +$CH_CLIENT -q "system stop merges test" +insert +test +echo "With merges" +$CH_CLIENT -q "system start merges test" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh index 128c525bf02..6df95ad4ad6 100755 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, long +# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # reset --log_comment diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh index bf6360a89a4..154c1aa5f9c 100755 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, long +# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # reset --log_comment diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh index 5e8af0c1f80..d20d9a4d79d 100755 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, long +# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # reset --log_comment diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh index 8fa1b809390..c39411f7b09 100755 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, long +# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # reset --log_comment diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh index f7c0fdad1a6..b179baeadef 100755 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, long +# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # reset --log_comment diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh index 3106702faf4..6686179e801 100755 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, long +# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # reset --log_comment From a7bf79b30833d9cb8fe9a20cab7025b3db95bef7 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 19:27:39 +0000 Subject: [PATCH 0693/2361] Update docs --- docs/en/sql-reference/data-types/json.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index c6fa6f0e882..8e16d6e4d57 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -80,4 +80,5 @@ SELECT * FROM json FORMAT JSONEachRow ## Related Content - [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json) -- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) \ No newline at end of file +- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) +- \ No newline at end of file From 155b28227972fe2f33dac98d3c471c555637d246 Mon Sep 17 00:00:00 2001 From: joelynch Date: Mon, 22 Jul 2024 21:30:40 +0200 Subject: [PATCH 0694/2361] This cannot be fasttest because it uses Kafka engine --- tests/queries/0_stateless/01821_table_comment.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01821_table_comment.sql b/tests/queries/0_stateless/01821_table_comment.sql index 32b89af0750..4946e46d37a 100644 --- a/tests/queries/0_stateless/01821_table_comment.sql +++ b/tests/queries/0_stateless/01821_table_comment.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-parallel, no-fasttest DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; From 7f5c58f599d34f690c4a04e4223a2f86a433d0e9 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Mon, 22 Jul 2024 20:58:56 +0100 Subject: [PATCH 0695/2361] fxs --- tests/queries/0_stateless/01293_show_clusters.reference | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01293_show_clusters.reference b/tests/queries/0_stateless/01293_show_clusters.reference index 9569fcf2e37..e140f207022 100644 --- a/tests/queries/0_stateless/01293_show_clusters.reference +++ b/tests/queries/0_stateless/01293_show_clusters.reference @@ -1,3 +1,3 @@ test_shard_localhost -test_cluster_one_shard_two_replicas 1 1 0 1 127.0.0.1 127.0.0.1 9000 1 default 0 NULL -test_cluster_one_shard_two_replicas 1 1 0 2 127.0.0.2 127.0.0.2 9000 0 default 0 NULL +test_cluster_one_shard_two_replicas 1 1 0 1 127.0.0.1 127.0.0.1 9000 1 default +test_cluster_one_shard_two_replicas 1 1 0 2 127.0.0.2 127.0.0.2 9000 0 default From 72f4919fdad5217f48bd83e51ce2d1f3f083087b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mi=D1=81hael=20Stetsyuk?= <59827607+mstetsyuk@users.noreply.github.com> Date: Mon, 22 Jul 2024 21:33:47 +0100 Subject: [PATCH 0696/2361] Update src/Common/CurrentMetrics.cpp Co-authored-by: Sema Checherinda <104093494+CheSema@users.noreply.github.com> --- src/Common/CurrentMetrics.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index 2fedba0175b..39198147794 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -307,7 +307,7 @@ M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \ M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \ \ - M(S3DiskNoKeyErrors, "Number of no-key S3 disk errors") \ + M(S3DiskNoKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \ #ifdef APPLY_FOR_EXTERNAL_METRICS #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) From 7353064d1c97231d02aab2ffade3e4c63a3ebcb1 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 22 Jul 2024 23:12:22 +0200 Subject: [PATCH 0697/2361] fix --- tests/queries/0_stateless/replication.lib | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index b2fcc422508..fe867537000 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -52,7 +52,12 @@ function check_replication_consistency() check_query_part=$2 # Try to kill some mutations because sometimes tests run too much (it's not guarenteed to kill all mutations, see below) - ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table like '$table_name_prefix%'" > /dev/null + # Try multiple replicas, because queries are not finished yet, and "global" KILL MUTATION may fail due to another query (like DROP TABLE) + readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%'") + for t in "${tables_arr[@]}" + do + ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table='$t'" > /dev/null 2>/dev/null + done # Wait for all queries to finish (query may still be running if thread is killed by timeout) num_tries=0 From 9d3a5cb699d9933b712c970c690b4ba33cf9b133 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 22 Jul 2024 21:27:04 +0000 Subject: [PATCH 0698/2361] Fix style --- tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql old mode 100755 new mode 100644 From abb6025e727d8b825ed62b07d8684dc0f7eddb7d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 23 Jul 2024 02:30:45 +0200 Subject: [PATCH 0699/2361] Fix performance test about the generateRandom table function, supposedly --- tests/performance/generate_table_function.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance/generate_table_function.xml b/tests/performance/generate_table_function.xml index c219d73b6cf..d56c585188d 100644 --- a/tests/performance/generate_table_function.xml +++ b/tests/performance/generate_table_function.xml @@ -7,7 +7,7 @@ SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('d Date, dt DateTime, dtm DateTime(\'Asia/Istanbul\')', 0, 10, 10) LIMIT 1000000000); SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('dt64 DateTime64, dts64 DateTime64(6), dtms64 DateTime64(6 ,\'Asia/Istanbul\')', 0, 10, 10) LIMIT 100000000); SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('f32 Float32, f64 Float64', 0, 10, 10) LIMIT 1000000000); - SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('d32 Decimal32(4), d64 Decimal64(8), d128 Decimal128(16)', 0, 10, 10) LIMIT 1000000000); + SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('d32 Decimal32(4), d64 Decimal64(8), d128 Decimal128(16)', 0, 10, 10) LIMIT 100000000); SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('i Tuple(Int32, Int64)', 0, 10, 10) LIMIT 1000000000); SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('i Array(Int8)', 0, 10, 10) LIMIT 100000000); SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('i Array(Nullable(Int32))', 0, 10, 10) LIMIT 100000000); From 07f642f1cac5253fda7eaa928ca5e659159b6b16 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 23 Jul 2024 03:13:52 +0200 Subject: [PATCH 0700/2361] Fix bad tests `share_big_sets`, CC @davenger --- ...en_multiple_mutations_tasks_long.reference | 1 + ..._between_multiple_mutations_tasks_long.sql | 13 +++++++---- ...sets_between_mutation_tasks_long.reference | 1 + ...e_big_sets_between_mutation_tasks_long.sql | 23 +++++++++++-------- 4 files changed, 23 insertions(+), 15 deletions(-) diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.reference b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.reference index 3a92fcf283d..9cb32105006 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.reference +++ b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.reference @@ -7,3 +7,4 @@ all_4_4_0 5000 all_2_2_0_9 5000 all_3_3_0_9 5000 all_4_4_0_9 +Ok diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql index 741d0177971..631fd9cb2cc 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql @@ -18,12 +18,15 @@ SELECT name FROM system.parts WHERE database=currentDatabase() AND table = '0258 -- Start multiple mutations simultaneously SYSTEM STOP MERGES 02581_trips; -ALTER TABLE 02581_trips UPDATE description='5' WHERE id IN (SELECT (number*10 + 5)::UInt32 FROM numbers(200000000)) SETTINGS mutations_sync=0; -ALTER TABLE 02581_trips UPDATE description='6' WHERE id IN (SELECT (number*10 + 6)::UInt32 FROM numbers(200000000)) SETTINGS mutations_sync=0; -ALTER TABLE 02581_trips DELETE WHERE id IN (SELECT (number*10 + 7)::UInt32 FROM numbers(200000000)) SETTINGS mutations_sync=0; -ALTER TABLE 02581_trips UPDATE description='8' WHERE id IN (SELECT (number*10 + 8)::UInt32 FROM numbers(200000000)) SETTINGS mutations_sync=0; +ALTER TABLE 02581_trips UPDATE description='5' WHERE id IN (SELECT (number*10 + 5)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; +ALTER TABLE 02581_trips UPDATE description='6' WHERE id IN (SELECT (number*10 + 6)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; +ALTER TABLE 02581_trips DELETE WHERE id IN (SELECT (number*10 + 7)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; +ALTER TABLE 02581_trips UPDATE description='8' WHERE id IN (SELECT (number*10 + 8)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; SYSTEM START MERGES 02581_trips; -DELETE FROM 02581_trips WHERE id IN (SELECT (number*10 + 9)::UInt32 FROM numbers(200000000)); +DELETE FROM 02581_trips WHERE id IN (SELECT (number*10 + 9)::UInt32 FROM numbers(10000000)); SELECT count(), _part from 02581_trips WHERE description = '' GROUP BY _part ORDER BY _part; +SYSTEM FLUSH LOGS; +SELECT DISTINCT peak_memory_usage < 2000000000 ? 'Ok' : toString(tuple(*)) FROM system.part_log WHERE database = currentDatabase() AND event_date >= yesterday() AND table = '02581_trips' AND event_type = 'MutatePart'; + DROP TABLE 02581_trips; diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.reference b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.reference index 3a7410d925f..d21598bc12e 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.reference +++ b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.reference @@ -10,3 +10,4 @@ all_4_4_0 20000 16000 12000 +Ok diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql index b7314c8fa47..062f22357e8 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql @@ -18,42 +18,45 @@ SELECT count() from 02581_trips WHERE description = ''; SELECT name FROM system.parts WHERE database=currentDatabase() AND table = '02581_trips' AND active ORDER BY name; -- Run mutation with `id` a 'IN big subquery' -ALTER TABLE 02581_trips UPDATE description='a' WHERE id IN (SELECT (number*10)::UInt32 FROM numbers(200000000)) SETTINGS mutations_sync=2; +ALTER TABLE 02581_trips UPDATE description='a' WHERE id IN (SELECT (number*10)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; -ALTER TABLE 02581_trips UPDATE description='a' WHERE id IN (SELECT (number*10 + 1)::UInt32 FROM numbers(200000000)) SETTINGS mutations_sync=2, max_rows_in_set=1000; +ALTER TABLE 02581_trips UPDATE description='a' WHERE id IN (SELECT (number*10 + 1)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2, max_rows_in_set=1000; SELECT count() from 02581_trips WHERE description = ''; -- Run mutation with func(`id`) IN big subquery -ALTER TABLE 02581_trips UPDATE description='b' WHERE id::UInt64 IN (SELECT (number*10 + 2)::UInt32 FROM numbers(200000000)) SETTINGS mutations_sync=2; +ALTER TABLE 02581_trips UPDATE description='b' WHERE id::UInt64 IN (SELECT (number*10 + 2)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; -- Run mutation with non-PK `id2` IN big subquery -ALTER TABLE 02581_trips UPDATE description='c' WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(200000000)) SETTINGS mutations_sync=2; +ALTER TABLE 02581_trips UPDATE description='c' WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; -- Run mutation with PK and non-PK IN big subquery ALTER TABLE 02581_trips UPDATE description='c' WHERE - (id IN (SELECT (number*10 + 4)::UInt32 FROM numbers(200000000))) OR - (id2 IN (SELECT (number*10 + 4)::UInt32 FROM numbers(200000000))) + (id IN (SELECT (number*10 + 4)::UInt32 FROM numbers(10000000))) OR + (id2 IN (SELECT (number*10 + 4)::UInt32 FROM numbers(10000000))) SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; -- Run mutation with PK and non-PK IN big subquery ALTER TABLE 02581_trips UPDATE description='c' WHERE - (id::UInt64 IN (SELECT (number*10 + 5)::UInt32 FROM numbers(200000000))) OR - (id2::UInt64 IN (SELECT (number*10 + 5)::UInt32 FROM numbers(200000000))) + (id::UInt64 IN (SELECT (number*10 + 5)::UInt32 FROM numbers(10000000))) OR + (id2::UInt64 IN (SELECT (number*10 + 5)::UInt32 FROM numbers(10000000))) SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; -- Run mutation with PK and non-PK IN big subquery ALTER TABLE 02581_trips UPDATE description='c' WHERE - (id::UInt32 IN (SELECT (number*10 + 6)::UInt32 FROM numbers(200000000))) OR - ((id2+1)::String IN (SELECT (number*10 + 6)::UInt32 FROM numbers(200000000))) + (id::UInt32 IN (SELECT (number*10 + 6)::UInt32 FROM numbers(10000000))) OR + ((id2+1)::String IN (SELECT (number*10 + 6)::UInt32 FROM numbers(10000000))) SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; +SYSTEM FLUSH LOGS; +SELECT DISTINCT peak_memory_usage < 2000000000 ? 'Ok' : toString(tuple(*)) FROM system.part_log WHERE database = currentDatabase() AND event_date >= yesterday() AND table = '02581_trips' AND event_type = 'MutatePart'; + DROP TABLE 02581_trips; From 4cbb8bab56c288a8a2d5bd5d0bc5ec102516c621 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 23 Jul 2024 04:09:06 +0200 Subject: [PATCH 0701/2361] Speed up mutations for non-replicated MergeTree a bit --- src/Storages/StorageMergeTree.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 444b3fbae4c..45573925c02 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -506,13 +506,14 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context } Int64 version; + String mutation_id; { std::lock_guard lock(currently_processing_in_background_mutex); MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings()); version = increment.get(); entry.commit(version); - String mutation_id = entry.file_name; + mutation_id = entry.file_name; if (txn) txn->addMutation(shared_from_this(), mutation_id); @@ -527,9 +528,9 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context } throw Exception(ErrorCodes::LOGICAL_ERROR, "Mutation {} already exists, it's a bug", version); } - - LOG_INFO(log, "Added mutation: {}{}", mutation_id, additional_info); } + + LOG_INFO(log, "Added mutation: {}{}", mutation_id, additional_info); background_operations_assignee.trigger(); return version; } From 8899d4242895893c0485efb2de237bca008d1a8d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 23 Jul 2024 04:29:46 +0200 Subject: [PATCH 0702/2361] Speed up mutations for non-replicated MergeTree significantly --- src/Storages/StorageMergeTree.cpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 45573925c02..527872d701e 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -505,19 +505,18 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context additional_info = fmt::format(" (TID: {}; TIDH: {})", current_tid, current_tid.getHash()); } - Int64 version; - String mutation_id; + MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings()); + Int64 version = increment.get(); + entry.commit(version); + String mutation_id = entry.file_name; + if (txn) + txn->addMutation(shared_from_this(), mutation_id); + + bool alter_conversions_mutations_updated = updateAlterConversionsMutations(entry.commands, alter_conversions_mutations, /* remove= */ false); + { std::lock_guard lock(currently_processing_in_background_mutex); - MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings()); - version = increment.get(); - entry.commit(version); - mutation_id = entry.file_name; - if (txn) - txn->addMutation(shared_from_this(), mutation_id); - - bool alter_conversions_mutations_updated = updateAlterConversionsMutations(entry.commands, alter_conversions_mutations, /* remove= */ false); bool inserted = current_mutations_by_version.try_emplace(version, std::move(entry)).second; if (!inserted) { From dee5790b22d06e3916f4030937eb9384247a6baa Mon Sep 17 00:00:00 2001 From: zoomxi <419486879@qq.com> Date: Tue, 23 Jul 2024 10:49:37 +0800 Subject: [PATCH 0703/2361] =?UTF-8?q?According=20to=20the=20suggestions=20?= =?UTF-8?q?from=20cr=EF=BC=8C=20modify=20test=20code?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../test.py | 26 +++---------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/tests/integration/test_parallel_replicas_no_replicas/test.py b/tests/integration/test_parallel_replicas_no_replicas/test.py index b77da338554..e05f72316d0 100644 --- a/tests/integration/test_parallel_replicas_no_replicas/test.py +++ b/tests/integration/test_parallel_replicas_no_replicas/test.py @@ -49,8 +49,8 @@ def test_skip_all_replicas(start_cluster, skip_unavailable_shards): }, ) -@pytest.mark.parametrize("skip_unavailable_shards", [1, 0]) -def test_skip_all_participating_replicas1(start_cluster, skip_unavailable_shards): +@pytest.mark.parametrize("max_parallel_replicas", [2, 3, 100]) +def test_skip_all_participating_replicas(start_cluster, max_parallel_replicas): cluster_name = "test_1_shard_3_unavaliable_replicas" table_name = "tt1" create_tables(cluster_name, table_name) @@ -60,26 +60,8 @@ def test_skip_all_participating_replicas1(start_cluster, skip_unavailable_shards f"SELECT key, count() FROM {table_name} GROUP BY key ORDER BY key", settings={ "allow_experimental_parallel_reading_from_replicas": 2, - "max_parallel_replicas": 3, + "max_parallel_replicas": max_parallel_replicas, "cluster_for_parallel_replicas": cluster_name, - "skip_unavailable_shards": skip_unavailable_shards, - "parallel_replicas_min_number_of_rows_per_replica": 500, - }, - ) - -@pytest.mark.parametrize("skip_unavailable_shards", [1, 0]) -def test_skip_all_participating_replicas2(start_cluster, skip_unavailable_shards): - cluster_name = "test_1_shard_3_unavaliable_replicas" - table_name = "tt2" - create_tables(cluster_name, table_name) - - with pytest.raises(QueryRuntimeException): - initiator.query( - f"SELECT key, count() FROM {table_name} GROUP BY key ORDER BY key", - settings={ - "allow_experimental_parallel_reading_from_replicas": 2, - "max_parallel_replicas": 2, - "cluster_for_parallel_replicas": cluster_name, - "skip_unavailable_shards": skip_unavailable_shards, + "skip_unavailable_shards": 1, }, ) From feaa7ede979a624dfbe67facefe66395cfc021b4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 23 Jul 2024 06:34:29 +0200 Subject: [PATCH 0704/2361] Fix views over distributed tables with Analyzer --- src/Analyzer/IQueryTreeNode.h | 2 +- src/Storages/StorageDistributed.cpp | 37 ++++++++++------------------- 2 files changed, 14 insertions(+), 25 deletions(-) diff --git a/src/Analyzer/IQueryTreeNode.h b/src/Analyzer/IQueryTreeNode.h index df3687f8fd9..b36c1401798 100644 --- a/src/Analyzer/IQueryTreeNode.h +++ b/src/Analyzer/IQueryTreeNode.h @@ -49,7 +49,7 @@ enum class QueryTreeNodeType : uint8_t /// Convert query tree node type to string const char * toString(QueryTreeNodeType type); -/** Query tree is semantical representation of query. +/** Query tree is a semantic representation of query. * Query tree node represent node in query tree. * IQueryTreeNode is base class for all query tree nodes. * diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 65323b4bb52..07892971ec2 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -43,7 +43,6 @@ #include #include -#include #include #include #include @@ -61,26 +60,20 @@ #include #include #include -#include #include #include #include #include #include -#include #include #include #include #include #include #include -#include #include -#include #include -#include -#include #include #include @@ -90,7 +83,6 @@ #include #include #include -#include #include #include #include @@ -496,7 +488,7 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage( } std::optional optimized_stage; - if (settings.allow_experimental_analyzer) + if (query_info.query_tree) optimized_stage = getOptimizedQueryProcessingStageAnalyzer(query_info, settings); else optimized_stage = getOptimizedQueryProcessingStage(query_info, settings); @@ -860,31 +852,28 @@ void StorageDistributed::read( modified_query_info.query = queryNodeToDistributedSelectQuery(query_tree_distributed); modified_query_info.query_tree = std::move(query_tree_distributed); + + /// Return directly (with correct header) if no shard to query. + if (modified_query_info.getCluster()->getShardsInfo().empty()) + return; } else { header = InterpreterSelectQuery(modified_query_info.query, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock(); - } - if (!settings.allow_experimental_analyzer) - { modified_query_info.query = ClusterProxy::rewriteSelectQuery( local_context, modified_query_info.query, remote_database, remote_table, remote_table_function_ptr); - } - /// Return directly (with correct header) if no shard to query. - if (modified_query_info.getCluster()->getShardsInfo().empty()) - { - if (settings.allow_experimental_analyzer) + if (modified_query_info.getCluster()->getShardsInfo().empty()) + { + Pipe pipe(std::make_shared(header)); + auto read_from_pipe = std::make_unique(std::move(pipe)); + read_from_pipe->setStepDescription("Read from NullSource (Distributed)"); + query_plan.addStep(std::move(read_from_pipe)); + return; - - Pipe pipe(std::make_shared(header)); - auto read_from_pipe = std::make_unique(std::move(pipe)); - read_from_pipe->setStepDescription("Read from NullSource (Distributed)"); - query_plan.addStep(std::move(read_from_pipe)); - - return; + } } const auto & snapshot_data = assert_cast(*storage_snapshot->data); From ac13983ebb8f2a75ed44f1e2616e1aac006ce69d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 23 Jul 2024 06:52:19 +0200 Subject: [PATCH 0705/2361] Better --- .../IInterpreterUnionOrSelectQuery.cpp | 18 +++++++++++------ src/Interpreters/InterpreterSelectQuery.cpp | 20 ++++++++----------- src/Interpreters/InterpreterSelectQuery.h | 1 - src/Storages/TTLDescription.cpp | 2 +- 4 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index d8f6df05ca4..bc7b07ca18d 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -17,13 +17,19 @@ namespace DB { -IInterpreterUnionOrSelectQuery::IInterpreterUnionOrSelectQuery(const DB::ASTPtr& query_ptr_, - const DB::ContextMutablePtr& context_, const DB::SelectQueryOptions& options_) - : query_ptr(query_ptr_) - , context(context_) - , options(options_) - , max_streams(context->getSettingsRef().max_threads) +IInterpreterUnionOrSelectQuery::IInterpreterUnionOrSelectQuery(const ASTPtr & query_ptr_, + const ContextMutablePtr & context_, const SelectQueryOptions & options_) + : query_ptr(query_ptr_) + , context(context_) + , options(options_) + , max_streams(context->getSettingsRef().max_threads) { + /// FIXME All code here will work with the old analyzer, however for views over Distributed tables + /// it's possible that new analyzer will be enabled in ::getQueryProcessingStage method + /// of the underlying storage when all other parts of infrastructure are not ready for it + /// (built with old analyzer). + context->setSetting("allow_experimental_analyzer", false); + if (options.shard_num) context->addSpecialScalar( "_shard_num", diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 7bee497f6da..0f24888cb79 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -75,7 +75,6 @@ #include #include -#include #include #include #include @@ -214,11 +213,11 @@ InterpreterSelectQuery::InterpreterSelectQuery( {} InterpreterSelectQuery::InterpreterSelectQuery( - const ASTPtr & query_ptr_, - const ContextPtr & context_, - Pipe input_pipe_, - const SelectQueryOptions & options_) - : InterpreterSelectQuery(query_ptr_, context_, std::move(input_pipe_), nullptr, options_.copy().noSubquery()) + const ASTPtr & query_ptr_, + const ContextPtr & context_, + Pipe input_pipe_, + const SelectQueryOptions & options_) + : InterpreterSelectQuery(query_ptr_, context_, std::move(input_pipe_), nullptr, options_.copy().noSubquery()) {} InterpreterSelectQuery::InterpreterSelectQuery( @@ -227,18 +226,15 @@ InterpreterSelectQuery::InterpreterSelectQuery( const StoragePtr & storage_, const StorageMetadataPtr & metadata_snapshot_, const SelectQueryOptions & options_) - : InterpreterSelectQuery( - query_ptr_, context_, std::nullopt, storage_, options_.copy().noSubquery(), {}, metadata_snapshot_) -{ -} + : InterpreterSelectQuery(query_ptr_, context_, std::nullopt, storage_, options_.copy().noSubquery(), {}, metadata_snapshot_) +{} InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const ContextPtr & context_, const SelectQueryOptions & options_, PreparedSetsPtr prepared_sets_) - : InterpreterSelectQuery( - query_ptr_, context_, std::nullopt, nullptr, options_, {}, {}, prepared_sets_) + : InterpreterSelectQuery(query_ptr_, context_, std::nullopt, nullptr, options_, {}, {}, prepared_sets_) {} InterpreterSelectQuery::~InterpreterSelectQuery() = default; diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index d4ed19d45ea..ac1230a6eba 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -26,7 +26,6 @@ class Logger; namespace DB { -class SubqueryForSet; class InterpreterSelectWithUnionQuery; class Context; class QueryPlan; diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index ac091e7cf3c..92f6f17583d 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -172,7 +172,7 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType /// with subqueries it's possible that new analyzer will be enabled in ::read method /// of underlying storage when all other parts of infra are not ready for it /// (built with old analyzer). - context_copy->setSetting("allow_experimental_analyzer", Field{0}); + context_copy->setSetting("allow_experimental_analyzer", false); auto syntax_analyzer_result = TreeRewriter(context_copy).analyze(ast, columns); ExpressionAnalyzer analyzer(ast, syntax_analyzer_result, context_copy); auto dag = analyzer.getActionsDAG(false); From d50742eadc1234bf75fa76e7b21aec0f8763851e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 23 Jul 2024 06:52:34 +0200 Subject: [PATCH 0706/2361] Better --- src/Interpreters/IInterpreterUnionOrSelectQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index bc7b07ca18d..f64b9540dbb 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -24,7 +24,7 @@ IInterpreterUnionOrSelectQuery::IInterpreterUnionOrSelectQuery(const ASTPtr & qu , options(options_) , max_streams(context->getSettingsRef().max_threads) { - /// FIXME All code here will work with the old analyzer, however for views over Distributed tables + /// FIXME All code here will work with the old analyzer, however for views over Distributed tables /// it's possible that new analyzer will be enabled in ::getQueryProcessingStage method /// of the underlying storage when all other parts of infrastructure are not ready for it /// (built with old analyzer). From 133e734d73b87707ccba17bd488cf60e1895abe5 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 22 Jul 2024 21:53:22 +0200 Subject: [PATCH 0707/2361] Move to integration test --- tests/integration/test_keeper_map/test.py | 21 +++++++++++++++++++ ...8_keepermap_incomplete_data_drop.reference | 0 .../03208_keepermap_incomplete_data_drop.sql | 8 ------- 3 files changed, 21 insertions(+), 8 deletions(-) delete mode 100644 tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.reference delete mode 100644 tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql diff --git a/tests/integration/test_keeper_map/test.py b/tests/integration/test_keeper_map/test.py index d7b4230d872..31316af7b1e 100644 --- a/tests/integration/test_keeper_map/test.py +++ b/tests/integration/test_keeper_map/test.py @@ -104,3 +104,24 @@ def test_keeper_map_without_zk(started_cluster): node.query("DETACH TABLE test_keeper_map_without_zk") client.stop() + + +def test_keeper_map_with_failed_drop(started_cluster): + run_query( + "CREATE TABLE test_keeper_map_with_failed_drop (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_with_failed_drop') PRIMARY KEY(key);" + ) + + run_query("INSERT INTO test_keeper_map_with_failed_drop VALUES (1, 11)") + run_query("SYSTEM ENABLE FAILPOINT keepermap_fail_drop_data") + node.query("DROP TABLE test_keeper_map_with_failed_drop SYNC") + + zk_client = get_genuine_zk() + assert ( + zk_client.get("/test_keeper_map/test_keeper_map_with_failed_drop/data") + is not None + ) + + run_query("SYSTEM DISABLE FAILPOINT keepermap_fail_drop_data") + run_query( + "CREATE TABLE test_keeper_map_with_failed_drop_another (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_with_failed_drop') PRIMARY KEY(key);" + ) diff --git a/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.reference b/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql b/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql deleted file mode 100644 index e8d4a5bc298..00000000000 --- a/tests/queries/0_stateless/03208_keepermap_incomplete_data_drop.sql +++ /dev/null @@ -1,8 +0,0 @@ -DROP TABLE IF EXISTS 03208_keepermap_test SYNC; - -CREATE TABLE 03208_keepermap_test (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test03208') PRIMARY KEY(key); -INSERT INTO 03208_keepermap_test VALUES (1, 11); -SYSTEM ENABLE FAILPOINT keepermap_fail_drop_data; -DROP TABLE 03208_keepermap_test SYNC; -- { KEEPER_EXCEPTION } -SYSTEM DISABLE FAILPOINT keepermap_fail_drop_data; -CREATE TABLE 03208_keepermap_test_another (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test03208') PRIMARY KEY(key); From 9d7bb2a430b142e0761cc30efcf78ea1bbeb6871 Mon Sep 17 00:00:00 2001 From: JackyWoo Date: Tue, 23 Jul 2024 15:07:18 +0800 Subject: [PATCH 0708/2361] Move unit tests to gtest_convertFieldToType.cpp --- .../tests/gtest_convertFieldToType.cpp | 83 ++++++++++++++++++- src/Storages/Statistics/Statistics.cpp | 12 +-- .../Statistics/StatisticsCountMinSketch.cpp | 17 ++-- .../Statistics/StatisticsCountMinSketch.h | 4 +- src/Storages/Statistics/StatisticsTDigest.cpp | 6 +- src/Storages/Statistics/StatisticsTDigest.h | 4 +- src/Storages/Statistics/StatisticsUniq.cpp | 4 +- src/Storages/Statistics/StatisticsUniq.h | 4 +- src/Storages/Statistics/tests/gtest_stats.cpp | 47 ----------- 9 files changed, 109 insertions(+), 72 deletions(-) diff --git a/src/Interpreters/tests/gtest_convertFieldToType.cpp b/src/Interpreters/tests/gtest_convertFieldToType.cpp index c8a9d5aa2c0..0cac9a3b59d 100644 --- a/src/Interpreters/tests/gtest_convertFieldToType.cpp +++ b/src/Interpreters/tests/gtest_convertFieldToType.cpp @@ -147,7 +147,7 @@ INSTANTIATE_TEST_SUITE_P( DecimalField(DateTime64(123 * Day * 1'000'000), 6) } }) - ); +); INSTANTIATE_TEST_SUITE_P( DateTimeToDateTime64, @@ -179,3 +179,84 @@ INSTANTIATE_TEST_SUITE_P( }, }) ); + +INSTANTIATE_TEST_SUITE_P( + StringToNumber, + ConvertFieldToTypeTest, + ::testing::ValuesIn(std::initializer_list{ + { + "String", + Field("1"), + "Int8", + Field(1) + }, + { + "String", + Field("256"), + "Int8", + Field() + }, + { + "String", + Field("not a number"), + "Int8", + {} + }, + { + "String", + Field("1.1"), + "Int8", + {} /// we can not convert '1.1' to Int8 + }, + { + "String", + Field("1.1"), + "Float64", + Field(1.1) + }, + }) +); + +INSTANTIATE_TEST_SUITE_P( + NumberToString, + ConvertFieldToTypeTest, + ::testing::ValuesIn(std::initializer_list{ + { + "Int8", + Field(1), + "String", + Field("1") + }, + { + "Int8", + Field(-1), + "String", + Field("-1") + }, + { + "Float64", + Field(1.1), + "String", + Field("1.1") + }, + }) +); + +INSTANTIATE_TEST_SUITE_P( + StringToDate, + ConvertFieldToTypeTest, + ::testing::ValuesIn(std::initializer_list{ + { + "String", + Field("2024-07-12"), + "Date", + Field(static_cast(19916)) + }, + { + "String", + Field("not a date"), + "Date", + {} + }, + }) +); diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 588e20e801f..ade3326288a 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -204,15 +204,15 @@ void MergeTreeStatisticsFactory::registerValidator(StatisticsType stats_type, Va MergeTreeStatisticsFactory::MergeTreeStatisticsFactory() { - registerValidator(StatisticsType::TDigest, TDigestValidator); - registerCreator(StatisticsType::TDigest, TDigestCreator); + registerValidator(StatisticsType::TDigest, tdigestValidator); + registerCreator(StatisticsType::TDigest, tdigestCreator); - registerValidator(StatisticsType::Uniq, UniqValidator); - registerCreator(StatisticsType::Uniq, UniqCreator); + registerValidator(StatisticsType::Uniq, uniqValidator); + registerCreator(StatisticsType::Uniq, uniqCreator); #if USE_DATASKETCHES - registerValidator(StatisticsType::CountMinSketch, CountMinSketchValidator); - registerCreator(StatisticsType::CountMinSketch, CountMinSketchCreator); + registerValidator(StatisticsType::CountMinSketch, countMinSketchValidator); + registerCreator(StatisticsType::CountMinSketch, countMinSketchCreator); #endif } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index 95a8ceda8c8..e69bbc1515b 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -17,13 +17,18 @@ extern const int LOGICAL_ERROR; extern const int ILLEGAL_STATISTICS; } -/// Constants chosen based on rolling dices, which provides an error tolerance of 0.1% (ε = 0.001) and a confidence level of 99.9% (δ = 0.001). +/// Constants chosen based on rolling dices. +/// The values provides: +/// 1. an error tolerance of 0.1% (ε = 0.001) +/// 2. a confidence level of 99.9% (δ = 0.001). /// And sketch the size is 152kb. static constexpr auto num_hashes = 7uz; static constexpr auto num_buckets = 2718uz; StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) - : IStatistics(stat_), sketch(num_hashes, num_buckets), data_type(data_type_) + : IStatistics(stat_) + , sketch(num_hashes, num_buckets) + , data_type(data_type_) { } @@ -50,8 +55,7 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const void StatisticsCountMinSketch::update(const ColumnPtr & column) { - size_t size = column->size(); - for (size_t row = 0; row < size; ++row) + for (size_t row = 0; row < column->size(); ++row) { if (column->isNullAt(row)) continue; @@ -80,16 +84,15 @@ void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) } -void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void countMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); - /// Data types of Numeric, String family, IPv4, IPv6, Date family, Enum family are supported. if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } -StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) +StatisticsPtr countMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { return std::make_shared(stat, data_type); } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.h b/src/Storages/Statistics/StatisticsCountMinSketch.h index aa71c643c05..6c8b74f8c35 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -31,8 +31,8 @@ private: }; -void CountMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr CountMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); +void countMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr countMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); } diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index e3a59f3251a..66150e00fdb 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -46,7 +46,7 @@ Float64 StatisticsTDigest::estimateLess(const Field & val) const auto val_as_float = StatisticsUtils::tryConvertToFloat64(val); if (val_as_float) return t_digest.getCountLessThan(*val_as_float); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimate value of type {}", val.getTypeName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimating value of type {}", val.getTypeName()); } Float64 StatisticsTDigest::estimateEqual(const Field & val) const @@ -57,7 +57,7 @@ Float64 StatisticsTDigest::estimateEqual(const Field & val) const throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimating value of type {}", val.getTypeName()); } -void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void tdigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); @@ -65,7 +65,7 @@ void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'tdigest' do not support type {}", data_type->getName()); } -StatisticsPtr TDigestCreator(const SingleStatisticsDescription & stat, DataTypePtr) +StatisticsPtr tdigestCreator(const SingleStatisticsDescription & stat, DataTypePtr) { return std::make_shared(stat); } diff --git a/src/Storages/Statistics/StatisticsTDigest.h b/src/Storages/Statistics/StatisticsTDigest.h index 801d0787eaf..614973e5d8b 100644 --- a/src/Storages/Statistics/StatisticsTDigest.h +++ b/src/Storages/Statistics/StatisticsTDigest.h @@ -23,7 +23,7 @@ private: QuantileTDigest t_digest; }; -void TDigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr TDigestCreator(const SingleStatisticsDescription & stat, DataTypePtr); +void tdigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr tdigestCreator(const SingleStatisticsDescription & stat, DataTypePtr); } diff --git a/src/Storages/Statistics/StatisticsUniq.cpp b/src/Storages/Statistics/StatisticsUniq.cpp index 9eea1dec39b..8f60ffcf0b5 100644 --- a/src/Storages/Statistics/StatisticsUniq.cpp +++ b/src/Storages/Statistics/StatisticsUniq.cpp @@ -52,7 +52,7 @@ UInt64 StatisticsUniq::estimateCardinality() const return column->getUInt(0); } -void UniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void uniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); @@ -60,7 +60,7 @@ void UniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'uniq' do not support type {}", data_type->getName()); } -StatisticsPtr UniqCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) +StatisticsPtr uniqCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { return std::make_shared(stat, data_type); } diff --git a/src/Storages/Statistics/StatisticsUniq.h b/src/Storages/Statistics/StatisticsUniq.h index 5290585bd94..faabde8d47c 100644 --- a/src/Storages/Statistics/StatisticsUniq.h +++ b/src/Storages/Statistics/StatisticsUniq.h @@ -27,7 +27,7 @@ private: }; -void UniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr UniqCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type); +void uniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr uniqCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type); } diff --git a/src/Storages/Statistics/tests/gtest_stats.cpp b/src/Storages/Statistics/tests/gtest_stats.cpp index 9b9fae83109..e55c52c49f3 100644 --- a/src/Storages/Statistics/tests/gtest_stats.cpp +++ b/src/Storages/Statistics/tests/gtest_stats.cpp @@ -44,50 +44,3 @@ TEST(Statistics, TDigestLessThan) std::reverse(data.begin(), data.end()); test_less_than(data, {-1, 1e9, 50000.0, 3000.0, 30.0}, {0, 100000, 50000, 3000, 30}, {0, 0, 0.001, 0.001, 0.001}); } - -using Fields = std::vector; - -template -void testConvertFieldToDataType(const DataTypePtr & data_type, const Fields & fields, const T & expected_value, bool convert_failed = false) -{ - for (const auto & field : fields) - { - Field converted_value; - try - { - converted_value = convertFieldToType(field, *data_type); - } - catch(...) - { - ASSERT_TRUE(convert_failed); - } - if (convert_failed) - ASSERT_TRUE(converted_value.isNull()); - else - ASSERT_EQ(converted_value.template get(), expected_value); - } -} - -TEST(Statistics, convertFieldToType) -{ - Fields fields; - - auto data_type_int8 = DataTypeFactory::instance().get("Int8"); - fields = {1, 1.0, "1"}; - testConvertFieldToDataType(data_type_int8, fields, static_cast(1)); - - fields = {256, 1.1, "not a number"}; - testConvertFieldToDataType(data_type_int8, fields, static_cast(1), true); - - auto data_type_float64 = DataTypeFactory::instance().get("Float64"); - fields = {1, 1.0, "1.0"}; - testConvertFieldToDataType(data_type_float64, fields, static_cast(1.0)); - - auto data_type_string = DataTypeFactory::instance().get("String"); - fields = {1, "1"}; - testConvertFieldToDataType(data_type_string, fields, static_cast("1")); - - auto data_type_date = DataTypeFactory::instance().get("Date"); - fields = {"2024-07-12", 19916}; - testConvertFieldToDataType(data_type_date, fields, static_cast(19916)); -} From 8fb560d2575c121b252ab1e6d8e13f9486dc2b38 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov Date: Tue, 23 Jul 2024 07:08:58 +0000 Subject: [PATCH 0709/2361] add replica sync --- tests/integration/test_broken_projections/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_broken_projections/test.py b/tests/integration/test_broken_projections/test.py index e198f98e4c5..a565f47449f 100644 --- a/tests/integration/test_broken_projections/test.py +++ b/tests/integration/test_broken_projections/test.py @@ -433,6 +433,7 @@ def test_broken_ignored_replicated(cluster): check(node, table_name, 1) create_table(node, table_name2, 2, table_name) + node.query(f"system sync replica {table_name}") check(node, table_name2, 1) break_projection(node, table_name, "proj1", "all_0_0_0", "data") From d7ffbab7c4eab820b303bd80f6b52e856f4e1d47 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 23 Jul 2024 09:23:18 +0200 Subject: [PATCH 0710/2361] Add test from #66378 Co-authored-by: Azat Khuzhin --- src/Parsers/ASTTablesInSelectQuery.cpp | 1 + .../0_stateless/03204_format_join_on.reference | 4 ++++ tests/queries/0_stateless/03204_format_join_on.sh | 15 +++++++++++++++ 3 files changed, 20 insertions(+) create mode 100644 tests/queries/0_stateless/03204_format_join_on.reference create mode 100644 tests/queries/0_stateless/03204_format_join_on.sh diff --git a/src/Parsers/ASTTablesInSelectQuery.cpp b/src/Parsers/ASTTablesInSelectQuery.cpp index dbb2a008bae..b6d42513aa7 100644 --- a/src/Parsers/ASTTablesInSelectQuery.cpp +++ b/src/Parsers/ASTTablesInSelectQuery.cpp @@ -235,6 +235,7 @@ void ASTTableJoin::formatImplAfterTable(const FormatSettings & settings, FormatS else if (on_expression) { settings.ostr << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : ""); + /// If there is an alias for the whole expression parens should be added, otherwise it will be invalid syntax bool on_has_alias = !on_expression->tryGetAlias().empty(); if (on_has_alias) settings.ostr << "("; diff --git a/tests/queries/0_stateless/03204_format_join_on.reference b/tests/queries/0_stateless/03204_format_join_on.reference new file mode 100644 index 00000000000..846f36fcca4 --- /dev/null +++ b/tests/queries/0_stateless/03204_format_join_on.reference @@ -0,0 +1,4 @@ +SELECT * FROM t1 INNER JOIN t2 ON ((t1.x = t2.x) AND (t1.x IS NULL) AS e2) +SELECT * FROM t1 INNER JOIN t2 ON ((t1.x = t2.x) AND (t1.x IS NULL) AS e2) +SELECT * FROM t1 INNER JOIN t2 ON (t1.x = t2.x) AND ((t1.x IS NULL) AS e2) +SELECT * FROM t1 INNER JOIN t2 ON t1.x = t2.x diff --git a/tests/queries/0_stateless/03204_format_join_on.sh b/tests/queries/0_stateless/03204_format_join_on.sh new file mode 100644 index 00000000000..87b0afac042 --- /dev/null +++ b/tests/queries/0_stateless/03204_format_join_on.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# regression for the JOIN ON alias for the whole expression +phase1="$($CLICKHOUSE_FORMAT --oneline --query "SELECT * FROM t1 JOIN t2 ON ((t1.x = t2.x) AND (t1.x IS NULL) AS e2)")" +echo "$phase1" +# phase 2 +$CLICKHOUSE_FORMAT --oneline --query "$phase1" + +# other test cases +$CLICKHOUSE_FORMAT --oneline --query "SELECT * FROM t1 JOIN t2 ON (t1.x = t2.x) AND (t1.x IS NULL AS e2)" +$CLICKHOUSE_FORMAT --oneline --query "SELECT * FROM t1 JOIN t2 ON t1.x = t2.x" From 2b56cbc1fdff42e31a9fee80f931726a5123e675 Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 23 Jul 2024 09:42:58 +0200 Subject: [PATCH 0711/2361] CI: Add ec2 instance lifecycle metadata to CIDB --- tests/ci/clickhouse_helper.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py index 91ea5c6d5d3..287970cce9a 100644 --- a/tests/ci/clickhouse_helper.py +++ b/tests/ci/clickhouse_helper.py @@ -197,6 +197,10 @@ def get_instance_id(): return _query_imds("latest/meta-data/instance-id") +def get_instance_lifecycle(): + return _query_imds("latest/meta-data/instance-life-cycle") + + def prepare_tests_results_for_clickhouse( pr_info: PRInfo, test_results: TestResults, @@ -233,7 +237,7 @@ def prepare_tests_results_for_clickhouse( "head_ref": head_ref, "head_repo": head_repo, "task_url": pr_info.task_url, - "instance_type": get_instance_type(), + "instance_type": ",".join([get_instance_type(), get_instance_lifecycle()]), "instance_id": get_instance_id(), } From 223eee3f46b07c38de3223fb56575f9ecbc5bea7 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 23 Jul 2024 07:57:03 +0000 Subject: [PATCH 0712/2361] Comment to new IProcessor method --- src/Processors/IProcessor.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 0776921a814..94e93595f4e 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -221,6 +221,21 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method 'schedule' is not implemented for {} processor", getName()); } + /* The method is called right after asynchronous job is done + * i.e. when file descriptor returned by schedule() is readable. + * The sequence of method calls: + * ... prepare() -> schedule() -> onAsyncJobReady() -> work() ... + * See also comment to schedule() method + * + * It allows doing some preprocessing immediately after asynchronous job is done. + * The implementation should return control quickly, to avoid blocking another asynchronous completed jobs + * created by the same pipeline. + * + * Example, scheduling tasks for remote workers (file descriptor in this case is a socket) + * When the remote worker asks for the next task, doing it in onAsyncJobReady() we can provide it immediately. + * Otherwise, the returning of the next task for the remote worker can be delayed by current work done in the pipeline + * i.e. processor->work(), which will create unnecessary latency in query processing by remote workers Not Committed Yet + */ virtual void onAsyncJobReady() {} /** You must call this method if 'prepare' returned ExpandPipeline. From b6ad57aa37f01ed4d101bd059b04222f361245ff Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Mon, 22 Jul 2024 19:25:34 +0200 Subject: [PATCH 0713/2361] Stateless tests: change status for failed tests in case of server crash --- .../util/process_functional_tests_result.py | 32 ++++++++++++------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/docker/test/util/process_functional_tests_result.py b/docker/test/util/process_functional_tests_result.py index 4442c9d7d9e..dbe50eeade0 100755 --- a/docker/test/util/process_functional_tests_result.py +++ b/docker/test/util/process_functional_tests_result.py @@ -12,6 +12,7 @@ UNKNOWN_SIGN = "[ UNKNOWN " SKIPPED_SIGN = "[ SKIPPED " HUNG_SIGN = "Found hung queries in processlist" SERVER_DIED_SIGN = "Server died, terminating all processes" +SERVER_DIED_SIGN2 = "Server does not respond to health check" DATABASE_SIGN = "Database: " SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"] @@ -43,7 +44,7 @@ def process_test_log(log_path, broken_tests): if HUNG_SIGN in line: hung = True break - if SERVER_DIED_SIGN in line: + if SERVER_DIED_SIGN in line or SERVER_DIED_SIGN2 in line: server_died = True if RETRIES_SIGN in line: retries = True @@ -111,12 +112,12 @@ def process_test_log(log_path, broken_tests): # Python does not support TSV, so we have to escape '\t' and '\n' manually # and hope that complex escape sequences will not break anything test_results = [ - ( + [ test[0], test[1], test[2], "".join(test[3])[:4096].replace("\t", "\\t").replace("\n", "\\n"), - ) + ] for test in test_results ] @@ -170,18 +171,24 @@ def process_result(result_path, broken_tests): if hung: description = "Some queries hung, " state = "failure" - test_results.append(("Some queries hung", "FAIL", "0", "")) + test_results.append(["Some queries hung", "FAIL", "0", ""]) elif server_died: description = "Server died, " state = "failure" - test_results.append(("Server died", "FAIL", "0", "")) + # When ClickHouse server crashes, some tests are still running + # and fail because they cannot connect to server + for result in test_results: + if result[1] == "FAIL": + result[1] = "SERVER_DIED" + + test_results.append(["Server died", "FAIL", "0", ""]) elif not success_finish: description = "Tests are not finished, " state = "failure" - test_results.append(("Tests are not finished", "FAIL", "0", "")) + test_results.append(["Tests are not finished", "FAIL", "0", ""]) elif retries: description = "Some tests restarted, " - test_results.append(("Some tests restarted", "SKIPPED", "0", "")) + test_results.append(["Some tests restarted", "SKIPPED", "0", ""]) else: description = "" @@ -233,11 +240,12 @@ if __name__ == "__main__": # sort by status then by check name order = { "FAIL": 0, - "Timeout": 1, - "NOT_FAILED": 2, - "BROKEN": 3, - "OK": 4, - "SKIPPED": 5, + "SERVER_DIED": 1, + "Timeout": 2, + "NOT_FAILED": 3, + "BROKEN": 4, + "OK": 5, + "SKIPPED": 6, } return order.get(item[1], 10), str(item[0]), item[1] From 492dab5e5d4fb775d8f3551e990fc078929c2bd8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 23 Jul 2024 01:52:01 +0200 Subject: [PATCH 0714/2361] Update tests/queries/0_stateless/02992_all_columns_should_have_comment.sql --- .../0_stateless/02992_all_columns_should_have_comment.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql b/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql index dcb7c09a973..0d34b033354 100644 --- a/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql +++ b/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql @@ -3,6 +3,6 @@ SELECT 'Column ' || name || ' from table ' || concat(database, '.', table) || ' FROM system.columns WHERE (database = 'system') AND (comment = '') AND - (table NOT ILIKE '%_log_%') AND + (table NOT ILIKE '%\_log\_%') AND (table NOT IN ('numbers', 'numbers_mt', 'one', 'generate_series', 'generateSeries', 'coverage_log', 'filesystem_read_prefetches_log')) AND (default_kind != 'ALIAS'); From b164014bf99278f6e1bac67d3066d77aefc229c2 Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 23 Jul 2024 11:00:05 +0200 Subject: [PATCH 0715/2361] CI: Remove ci runners scripts from oss --- tests/ci/worker/.gitignore | 1 - tests/ci/worker/deploy-runner-init.sh | 87 ------ tests/ci/worker/init_runner.sh | 406 -------------------------- 3 files changed, 494 deletions(-) delete mode 100644 tests/ci/worker/.gitignore delete mode 100755 tests/ci/worker/deploy-runner-init.sh delete mode 100644 tests/ci/worker/init_runner.sh diff --git a/tests/ci/worker/.gitignore b/tests/ci/worker/.gitignore deleted file mode 100644 index 4ed18989e78..00000000000 --- a/tests/ci/worker/.gitignore +++ /dev/null @@ -1 +0,0 @@ -generated_*init_runner.sh diff --git a/tests/ci/worker/deploy-runner-init.sh b/tests/ci/worker/deploy-runner-init.sh deleted file mode 100755 index 96fbd82a99c..00000000000 --- a/tests/ci/worker/deploy-runner-init.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash - -set -e - -usage() { - echo "Usage: $0 ENVIRONMENT" >&2 - echo "Valid values for ENVIRONMENT: staging, production" >&2 - exit 1 -} - -case "$1" in - staging|production) - ENVIRONMENT="$1" ;; - --help) - usage ;; - *) - echo "Invalid argument" >&2 - usage ;; -esac - -cd "$(dirname "$0")" || exit 1 -SOURCE_SCRIPT='init_runner.sh' - -check_response() { - # Are we even in the interactive shell? - [ -t 1 ] || return 1 - local request - request="$1" - read -rp "$request (y/N): " response - case "$response" in - [Yy]) - return 0 - # Your code to continue goes here - ;; - *) - return 1 - ;; - esac -} - -check_dirty() { - if [ -n "$(git status --porcelain=v2 "$SOURCE_SCRIPT")" ]; then - echo "The $SOURCE_SCRIPT has uncommited changes, won't deploy it" >&2 - exit 1 - fi -} -GIT_HASH=$(git log -1 --format=format:%H) - -header() { - cat << EOF -#!/usr/bin/env bash - -echo 'The $ENVIRONMENT script is generated from $SOURCE_SCRIPT, commit $GIT_HASH' - -EOF -} - -body() { - local first_line - first_line=$(sed -n '/^# THE SCRIPT START$/{=;q;}' "$SOURCE_SCRIPT") - if [ -z "$first_line" ]; then - echo "The pattern '# THE SCRIPT START' is not found in $SOURCE_SCRIPT" >&2 - exit 1 - fi - tail "+$first_line" "$SOURCE_SCRIPT" -} - -GENERATED_FILE="generated_${ENVIRONMENT}_${SOURCE_SCRIPT}" - -{ header && body; } > "$GENERATED_FILE" - -echo "The file $GENERATED_FILE is generated" - -if check_response "Display the content of $GENERATED_FILE?"; then - if [ -z "$PAGER" ]; then - less "$GENERATED_FILE" - else - $PAGER "$GENERATED_FILE" - fi -fi - -check_dirty - -S3_OBJECT=${S3_OBJECT:-s3://github-runners-data/cloud-init/${ENVIRONMENT}.sh} -if check_response "Deploy the generated script to $S3_OBJECT?"; then - aws s3 mv "$GENERATED_FILE" "$S3_OBJECT" -fi diff --git a/tests/ci/worker/init_runner.sh b/tests/ci/worker/init_runner.sh deleted file mode 100644 index 5177e112edd..00000000000 --- a/tests/ci/worker/init_runner.sh +++ /dev/null @@ -1,406 +0,0 @@ -#!/usr/bin/env bash - -cat > /dev/null << 'EOF' -The following content is embedded into the s3 object via the script -deploy-runner-init.sh {staging,production} -with additional helping information - -In the `user data` you should define as the following text -between `### COPY BELOW` and `### COPY ABOVE` - -### COPY BELOW -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -INSTANCE_ID=$(ec2metadata --instance-id) -INIT_ENVIRONMENT=$(/usr/local/bin/aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --query "Tags[?Key=='github:init-environment'].Value" --output text) -echo "Downloading and using $INIT_ENVIRONMENT cloud-init.sh" -aws s3 cp "s3://github-runners-data/cloud-init/${INIT_ENVIRONMENT:-production}.sh" /tmp/cloud-init.sh -chmod 0700 /tmp/cloud-init.sh -exec bash /tmp/cloud-init.sh ---// -### COPY ABOVE -EOF - -# THE SCRIPT START - -set -uo pipefail - -#################################### -# IMPORTANT! # -# EC2 instance should have # -# `github:runner-type` tag # -# set accordingly to a runner role # -#################################### - -echo "Running init v1.1" -export DEBIAN_FRONTEND=noninteractive -export RUNNER_HOME=/home/ubuntu/actions-runner - -export RUNNER_ORG="ClickHouse" -export RUNNER_URL="https://github.com/${RUNNER_ORG}" -# Funny fact, but metadata service has fixed IP -INSTANCE_ID=$(ec2metadata --instance-id) -export INSTANCE_ID - -bash /usr/local/share/scripts/init-network.sh - -# combine labels -RUNNER_TYPE=$(/usr/local/bin/aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --query "Tags[?Key=='github:runner-type'].Value" --output text) -LABELS="self-hosted,Linux,$(uname -m),$RUNNER_TYPE" -export LABELS -echo "Instance Labels: $LABELS" - -LIFE_CYCLE=$(curl -s --fail http://169.254.169.254/latest/meta-data/instance-life-cycle) -export LIFE_CYCLE -echo "Instance lifecycle: $LIFE_CYCLE" - -INSTANCE_TYPE=$(ec2metadata --instance-type) -echo "Instance type: $INSTANCE_TYPE" - -# Refresh CloudWatch agent config -aws ssm get-parameter --region us-east-1 --name AmazonCloudWatch-github-runners --query 'Parameter.Value' --output text > /opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json -systemctl restart amazon-cloudwatch-agent.service - -# Refresh teams ssh keys -TEAM_KEYS_URL=$(aws ssm get-parameter --region us-east-1 --name team-keys-url --query 'Parameter.Value' --output=text) -curl -s "${TEAM_KEYS_URL}" > /home/ubuntu/.ssh/authorized_keys2 -chown ubuntu: /home/ubuntu/.ssh -R - - -# Create a pre-run script that will provide diagnostics info -mkdir -p /tmp/actions-hooks -cat > /tmp/actions-hooks/common.sh << 'EOF' -#!/bin/bash -EOF - -terminate_delayed() { - # The function for post hook to gracefully finish the job and then tear down - # The very specific sleep time is used later to determine in the main loop if - # the instance is tearing down - # IF `sleep` IS CHANGED, CHANGE ANOTHER VALUE IN `pgrep` - sleep=13.14159265358979323846 - echo "Going to terminate the runner's instance in $sleep seconds" - # We execute it with `at` to not have it as an orphan process, but launched independently - # GH Runners kill all remain processes - echo "sleep '$sleep'; aws ec2 terminate-instances --instance-ids $INSTANCE_ID" | at now || \ - aws ec2 terminate-instances --instance-ids "$INSTANCE_ID" # workaround for complete out of space or non-installed `at` - exit 0 -} - -detect_delayed_termination() { - # The function look for very specific sleep with pi - if pgrep 'sleep 13.14159265358979323846'; then - echo 'The instance has delayed termination, sleep the same time to wait if it goes down' - sleep 14 - fi -} - -declare -f terminate_delayed >> /tmp/actions-hooks/common.sh - -terminate_and_exit() { - # Terminate instance and exit from the script instantly - echo "Going to terminate the runner's instance" - aws ec2 terminate-instances --instance-ids "$INSTANCE_ID" - exit 0 -} - -terminate_decrease_and_exit() { - # Terminate instance and exit from the script instantly - echo "Going to terminate the runner's instance and decrease asg capacity" - aws autoscaling terminate-instance-in-auto-scaling-group --instance-id "$INSTANCE_ID" --should-decrement-desired-capacity - exit 0 -} - -declare -f terminate_and_exit >> /tmp/actions-hooks/common.sh - -check_spot_instance_is_old() { - if [ "$LIFE_CYCLE" == "spot" ]; then - local UPTIME - UPTIME=$(< /proc/uptime) - UPTIME=${UPTIME%%.*} - if (( 3600 < UPTIME )); then - echo "The spot instance has uptime $UPTIME, it's time to shut it down" - return 0 - fi - fi - return 1 -} - -check_proceed_spot_termination() { - # The function checks and proceeds spot instance termination if exists - # The event for spot instance termination - local FORCE - FORCE=${1:-} - if TERMINATION_DATA=$(curl -s --fail http://169.254.169.254/latest/meta-data/spot/instance-action); then - # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-instance-termination-notices.html#instance-action-metadata - _action=$(jq '.action' -r <<< "$TERMINATION_DATA") - _time=$(jq '.time | fromdate' <<< "$TERMINATION_DATA") - _until_action=$((_time - $(date +%s))) - echo "Received the '$_action' event that will be effective in $_until_action seconds" - if (( _until_action <= 30 )) || [ "$FORCE" == "force" ]; then - echo "The action $_action will be done in $_until_action, killing the runner and exit" - local runner_pid - runner_pid=$(pgrep Runner.Listener) - if [ -n "$runner_pid" ]; then - # Kill the runner to not allow it cancelling the job - # shellcheck disable=SC2046 - kill -9 "$runner_pid" $(list_children "$runner_pid") - fi - sudo -u ubuntu ./config.sh remove --token "$(get_runner_token)" - terminate_and_exit - fi - fi -} - -no_terminating_metadata() { - # The function check that instance could continue work - # Returns 1 if any of termination events are received - - # The event for rebalance recommendation. Not strict, so we have some room to make a decision here - if curl -s --fail http://169.254.169.254/latest/meta-data/events/recommendations/rebalance; then - echo 'Received recommendation to rebalance, checking the uptime' - local UPTIME - UPTIME=$(< /proc/uptime) - UPTIME=${UPTIME%%.*} - # We don't shutdown the instances younger than 30m - if (( 1800 < UPTIME )); then - # To not shutdown everything at once, use the 66% to survive - if (( $((RANDOM % 3)) == 0 )); then - echo 'The instance is older than 30m and won the roulette' - return 1 - fi - echo 'The instance is older than 30m, but is not chosen for rebalance' - else - echo 'The instance is younger than 30m, do not shut it down' - fi - fi - - # Checks if the ASG in a lifecycle hook state - local ASG_STATUS - ASG_STATUS=$(curl -s http://169.254.169.254/latest/meta-data/autoscaling/target-lifecycle-state) - if [ "$ASG_STATUS" == "Terminated" ]; then - echo 'The instance in ASG status Terminating:Wait' - return 1 - fi -} - -terminate_on_event() { - # If there is a rebalance event, then the instance could die soon - # Let's don't wait for it and terminate proactively - if curl -s --fail http://169.254.169.254/latest/meta-data/events/recommendations/rebalance; then - terminate_and_exit - fi - - # Here we check if the autoscaling group marked the instance for termination, and it's wait for the job to finish - ASG_STATUS=$(curl -s http://169.254.169.254/latest/meta-data/autoscaling/target-lifecycle-state) - if [ "$ASG_STATUS" == "Terminated" ]; then - INSTANCE_ID=$(ec2metadata --instance-id) - ASG_NAME=$(aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --query "Tags[?Key=='aws:autoscaling:groupName'].Value" --output text) - LIFECYCLE_HOOKS=$(aws autoscaling describe-lifecycle-hooks --auto-scaling-group-name "$ASG_NAME" --query "LifecycleHooks[].LifecycleHookName" --output text) - for LCH in $LIFECYCLE_HOOKS; do - aws autoscaling complete-lifecycle-action --lifecycle-action-result CONTINUE \ - --lifecycle-hook-name "$LCH" --auto-scaling-group-name "$ASG_NAME" \ - --instance-id "$INSTANCE_ID" - true # autoformat issue - done - echo 'The runner is marked as "Terminated" by the autoscaling group, we are terminating' - terminate_and_exit - fi -} - -cat > /tmp/actions-hooks/pre-run.sh << EOF -#!/bin/bash -set -uo pipefail - -echo "Runner's public DNS: $(ec2metadata --public-hostname)" -echo "Runner's labels: ${LABELS}" -echo "Runner's instance type: $(ec2metadata --instance-type)" -EOF - -# Create a post-run script that will restart docker daemon before the job started -cat > /tmp/actions-hooks/post-run.sh << 'EOF' -#!/bin/bash -set -xuo pipefail - -source /tmp/actions-hooks/common.sh - -# Free KiB, free percents -ROOT_STAT=($(df / | awk '/\// {print $4 " " int($4/$2 * 100)}')) -if [[ ${ROOT_STAT[0]} -lt 3000000 ]] || [[ ${ROOT_STAT[1]} -lt 5 ]]; then - echo "The runner has ${ROOT_STAT[0]}KiB and ${ROOT_STAT[1]}% of free space on /" - terminate_delayed -fi - -# shellcheck disable=SC2046 -docker ps --quiet | xargs --no-run-if-empty docker kill ||: -# shellcheck disable=SC2046 -docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - -# If we have hanged containers after the previous commands, than we have a hanged one -# and should restart the daemon -if [ "$(docker ps --all --quiet)" ]; then - # Systemd service of docker has StartLimitBurst=3 and StartLimitInterval=60s, - # that's why we try restarting it for long - for i in {1..25}; - do - sudo systemctl restart docker && break || sleep 5 - done - - for i in {1..10} - do - docker info && break || sleep 2 - done - # Last chance, otherwise we have to terminate poor instance - docker info 1>/dev/null || { echo Docker unable to start; terminate_delayed ; } -fi -EOF - -get_runner_token() { - /usr/local/bin/aws ssm get-parameter --name github_runner_registration_token --with-decryption --output text --query Parameter.Value -} - -is_job_assigned() { - local runner_pid - runner_pid=$(pgrep Runner.Listener) - if [ -z "$runner_pid" ]; then - # if runner has finished, it's fine - return 0 - fi - local log_file - log_file=$(lsof -p "$runner_pid" 2>/dev/null | grep -o "$RUNNER_HOME/_diag/Runner.*log") - if [ -z "$log_file" ]; then - # assume, the process is over or just started - return 0 - fi - # So far it's the only solid way to determine that the job is starting - grep -q 'Terminal] .* Running job:' "$log_file" \ - && return 0 \ - || return 1 -} - -list_children () { - local children - children=$(ps --ppid "$1" -o pid=) - if [ -z "$children" ]; then - return - fi - - for pid in $children; do - list_children "$pid" - done - echo "$children" -} - -# There's possibility that it fails because the runner's version is outdated, -# so after the first failure we'll try to launch it with enabled autoupdate. -# -# We'll fail and terminate after 10 consequent failures. -ATTEMPT=0 -# In `kill` 0 means "all processes in process group", -1 is "all but PID 1" -# We use `-2` to get an error -RUNNER_PID=-2 - -while true; do - # Does not send signal, but checks that the process $RUNNER_PID is running - if kill -0 -- $RUNNER_PID; then - ATTEMPT=0 - echo "Runner is working with pid $RUNNER_PID, checking the metadata in background" - check_proceed_spot_termination - - if ! is_job_assigned; then - RUNNER_AGE=$(( $(date +%s) - $(stat -c +%Y /proc/"$RUNNER_PID" 2>/dev/null || date +%s) )) - echo "The runner is launched $RUNNER_AGE seconds ago and still hasn't received a job" - if (( 60 < RUNNER_AGE )); then - echo "Attempt to delete the runner for a graceful shutdown" - sudo -u ubuntu ./config.sh remove --token "$(get_runner_token)" \ - || continue - echo "Runner didn't launch or have assigned jobs after ${RUNNER_AGE} seconds, shutting down" - terminate_decrease_and_exit - fi - fi - else - if [ "$RUNNER_PID" != "-2" ]; then - wait $RUNNER_PID \ - && echo "Runner with PID $RUNNER_PID successfully finished" \ - || echo "Attempt $((++ATTEMPT)) to start the runner" - fi - if (( ATTEMPT > 10 )); then - echo "The runner has failed to start after $ATTEMPT attempt. Give up and terminate it" - terminate_and_exit - fi - - cd $RUNNER_HOME || terminate_and_exit - detect_delayed_termination - # If runner is not active, check that it needs to terminate itself - echo "Checking if the instance suppose to terminate" - no_terminating_metadata || terminate_on_event - check_spot_instance_is_old && terminate_and_exit - check_proceed_spot_termination force - - echo "Going to configure runner" - token_args=(--token "$(get_runner_token)") - config_args=( - "${token_args[@]}" --url "$RUNNER_URL" - --ephemeral --unattended --replace --runnergroup Default - --labels "$LABELS" --work _work --name "$INSTANCE_ID" - ) - if (( ATTEMPT > 1 )); then - echo 'The runner failed to start at least once. Removing it and then configuring with autoupdate enabled.' - sudo -u ubuntu ./config.sh remove "${token_args[@]}" - sudo -u ubuntu ./config.sh "${config_args[@]}" - else - echo "Configure runner with disabled autoupdate" - config_args+=("--disableupdate") - sudo -u ubuntu ./config.sh "${config_args[@]}" - fi - - echo "Another one check to avoid race between runner and infrastructure" - no_terminating_metadata || terminate_on_event - check_spot_instance_is_old && terminate_and_exit - check_proceed_spot_termination force - - # There were some failures to start the Job because of trash in _work - rm -rf _work - - # https://github.com/actions/runner/issues/3266 - # We're unable to know if the runner is failed to start. - echo 'Monkey-patching run helpers to get genuine exit code of the runner' - for script in run.sh run-helper.sh.template; do - # shellcheck disable=SC2016 - grep -q 'exit 0$' "$script" && \ - sed 's/exit 0/exit $returnCode/' -i "$script" && \ - echo "Script $script is patched" - done - - echo "Run" - sudo -u ubuntu \ - ACTIONS_RUNNER_HOOK_JOB_STARTED=/tmp/actions-hooks/pre-run.sh \ - ACTIONS_RUNNER_HOOK_JOB_COMPLETED=/tmp/actions-hooks/post-run.sh \ - ./run.sh & - RUNNER_PID=$! - - sleep 10 - fi - - sleep 5 -done - -# vim:ts=4:sw=4 From 1c723e7fd7f85cb8bdd4b6103a158229912af164 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 23 Jul 2024 09:10:47 +0000 Subject: [PATCH 0716/2361] black --- tests/integration/test_storage_s3/test.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 84c887be388..5453ad5a796 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -208,8 +208,12 @@ def test_partition_by_string_column(started_cluster): assert '1,"foo/bar"\n' == get_s3_file_content( started_cluster, bucket, f"{id}/test_foo/bar.csv" ) - assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, f"{id}/test_йцук.csv") - assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, f"{id}/test_你好.csv") + assert '3,"йцук"\n' == get_s3_file_content( + started_cluster, bucket, f"{id}/test_йцук.csv" + ) + assert '78,"你好"\n' == get_s3_file_content( + started_cluster, bucket, f"{id}/test_你好.csv" + ) def test_partition_by_const_column(started_cluster): @@ -895,9 +899,10 @@ def test_storage_s3_get_slow(started_cluster): def test_storage_s3_put_uncompressed(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] - filename = "test_put_uncompressed.bin" + filename = f"{id}/test_put_uncompressed.bin" name = "test_put_uncompressed" data = [ "'Gloria Thompson',99", @@ -949,6 +954,7 @@ def test_storage_s3_put_uncompressed(started_cluster): r = result.strip().split("\t") assert int(r[0]) >= 1, blob_storage_log assert all(col == r[0] for col in r), blob_storage_log + run_query(instance, f"DROP TABLE {name}") @pytest.mark.parametrize( From a000f8f8a13598d8c4dd24043d5029d7a0158ace Mon Sep 17 00:00:00 2001 From: Yohann Jardin Date: Tue, 23 Jul 2024 11:27:16 +0200 Subject: [PATCH 0717/2361] fix stateless test 02346_non_negative_derivative --- tests/queries/0_stateless/02346_non_negative_derivative.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02346_non_negative_derivative.sql b/tests/queries/0_stateless/02346_non_negative_derivative.sql index 704241da16c..ab648f2ee73 100644 --- a/tests/queries/0_stateless/02346_non_negative_derivative.sql +++ b/tests/queries/0_stateless/02346_non_negative_derivative.sql @@ -58,8 +58,8 @@ SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 13 YEAR) OVER (PAR -- test against wrong arguments/types SELECT ts, metric, nonNegativeDerivative(metric, 1, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } SELECT ts, metric, nonNegativeDerivative('string not datetime', ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND, id) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } -SELECT ts, metric, nonNegativeDerivative(metric) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND, id) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT ts, metric, nonNegativeDerivative(metric) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -- cleanup DROP TABLE IF EXISTS nnd; From 7750914f0b85e324dbf5d49d565312f117c02a2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 23 Jul 2024 09:33:58 +0000 Subject: [PATCH 0718/2361] Decrease rate limit In slow tests (sanitizer, debug) it can take 7-8 seconds to start up `clickhouse-client`. In those cases the rate limit doesn't make the wait time longer, because the input will be already in the input of clickhouse-client when it starts to execute the query. --- .../0_stateless/01923_network_receive_time_metric_insert.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh index 4d7e79fae52..a6b83eba27d 100755 --- a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh +++ b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = Memory;" # Rate limit is chosen for operation to spent more than one second. -seq 1 1000 | pv --quiet --rate-limit 500 | ${CLICKHOUSE_CLIENT} --query "INSERT INTO t FORMAT TSV" +seq 1 1000 | pv --quiet --rate-limit 400 | ${CLICKHOUSE_CLIENT} --query "INSERT INTO t FORMAT TSV" # We check that the value of NetworkReceiveElapsedMicroseconds correctly includes the time spent waiting data from the client. ${CLICKHOUSE_CLIENT} --multiquery --query "SYSTEM FLUSH LOGS; From bb28a65e9814302307c52d9e9b7ea6d052624fad Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 23 Jul 2024 11:25:19 +0200 Subject: [PATCH 0719/2361] minor fix for wf status reporting --- .github/workflows/backport_branches.yml | 3 --- .github/workflows/master.yml | 3 --- .github/workflows/merge_queue.yml | 3 --- .github/workflows/nightly.yml | 3 --- .github/workflows/pull_request.yml | 3 --- .github/workflows/release_branches.yml | 4 +--- tests/ci/ci_buddy.py | 1 + tests/ci/ci_utils.py | 23 +++++++++++++++++++---- 8 files changed, 21 insertions(+), 22 deletions(-) diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 50f4f503f5d..c602a46d23c 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -272,7 +272,4 @@ jobs: cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF - echo "::group::Workflow results" - python3 -m json.tool "$WORKFLOW_RESULT_FILE" - echo "::endgroup::" python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index b28d87ee31f..7c319da6045 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -138,7 +138,4 @@ jobs: cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF - echo "::group::Workflow results" - python3 -m json.tool "$WORKFLOW_RESULT_FILE" - echo "::endgroup::" python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml index db89825a99a..4b186241a0e 100644 --- a/.github/workflows/merge_queue.yml +++ b/.github/workflows/merge_queue.yml @@ -111,7 +111,4 @@ jobs: cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF - echo "::group::Workflow results" - python3 -m json.tool "$WORKFLOW_RESULT_FILE" - echo "::endgroup::" python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index fd5b5eefcc4..84db3338065 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -57,7 +57,4 @@ jobs: cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF - echo "::group::Workflow results" - python3 -m json.tool "$WORKFLOW_RESULT_FILE" - echo "::endgroup::" python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 9930cf6dde4..c7d7b28af38 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -171,9 +171,6 @@ jobs: cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF - echo "::group::Workflow results" - python3 -m json.tool "$WORKFLOW_RESULT_FILE" - echo "::endgroup::" python3 ./tests/ci/ci_buddy.py --check-wf-status ################################# Stage Final ################################# diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 50565112825..bca9ff33cd0 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -492,7 +492,5 @@ jobs: cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF - echo "::group::Workflow results" - python3 -m json.tool "$WORKFLOW_RESULT_FILE" - echo "::endgroup::" + python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index 688c7d59988..dfb5885270a 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -31,6 +31,7 @@ class CIBuddy: self.sha = pr_info.sha[:10] def check_workflow(self): + GHActions.print_workflow_results() res = GHActions.get_workflow_job_result(GHActions.ActionsNames.RunConfig) if res != GHActions.ActionStatuses.SUCCESS: self.post_job_error("Workflow Configuration Failed", critical=True) diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 1963e3f39d0..3b12fe6974f 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -92,15 +92,30 @@ class GHActions: PENDING = "pending" SUCCESS = "success" - @staticmethod - def get_workflow_job_result(wf_job_name: str) -> Optional[str]: + @classmethod + def _get_workflow_results(cls): if not Path(Envs.WORKFLOW_RESULT_FILE).exists(): print( f"ERROR: Failed to get workflow results from file [{Envs.WORKFLOW_RESULT_FILE}]" ) - return None + return {} with open(Envs.WORKFLOW_RESULT_FILE, "r", encoding="utf-8") as json_file: - res = json.load(json_file) + try: + res = json.load(json_file) + except json.JSONDecodeError as e: + print(f"ERROR: json decoder exception {e}") + return {} + return res + + @classmethod + def print_workflow_results(cls): + res = cls._get_workflow_results() + results = [f"{job}: {data['result']}" for job, data in res.items()] + cls.print_in_group("Workflow results", results) + + @classmethod + def get_workflow_job_result(cls, wf_job_name: str) -> Optional[str]: + res = cls._get_workflow_results() if wf_job_name in res: return res[wf_job_name]["result"] # type: ignore else: From 932033fca9bdacbfdb544fac5389e03fa7732eeb Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Tue, 23 Jul 2024 10:55:45 +0100 Subject: [PATCH 0720/2361] use atomic to avoid data race --- src/Databases/DatabaseReplicatedWorker.cpp | 12 +++++++++--- src/Databases/DatabaseReplicatedWorker.h | 5 +++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index cea2d123f87..a9a74c5f56a 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -32,7 +32,8 @@ DatabaseReplicatedDDLWorker::DatabaseReplicatedDDLWorker(DatabaseReplicated * db bool DatabaseReplicatedDDLWorker::initializeMainThread() { - initialization_duration_timer.emplace(); + initialization_duration_timer.restart(); + initializing.store(true, std::memory_order_release); while (!stop_flag) { @@ -71,7 +72,7 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() initializeReplication(); initialized = true; - initialization_duration_timer.reset(); + initializing.store(false, std::memory_order_relaxed); return true; } catch (...) @@ -81,7 +82,7 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() } } - initialization_duration_timer.reset(); + initializing.store(false, std::memory_order_relaxed); return false; } @@ -463,4 +464,9 @@ UInt32 DatabaseReplicatedDDLWorker::getLogPointer() const return max_id.load(); } +UInt64 DatabaseReplicatedDDLWorker::getCurrentInitializationDurationMs() const +{ + return initializing.load(std::memory_order_acquire) ? initialization_duration_timer.elapsedMilliseconds() : 0; +} + } diff --git a/src/Databases/DatabaseReplicatedWorker.h b/src/Databases/DatabaseReplicatedWorker.h index aea3b71173d..3e5887be825 100644 --- a/src/Databases/DatabaseReplicatedWorker.h +++ b/src/Databases/DatabaseReplicatedWorker.h @@ -37,7 +37,7 @@ public: UInt32 getLogPointer() const; - UInt64 getCurrentInitializationDurationMs() const { return initialization_duration_timer ? initialization_duration_timer->elapsedMilliseconds() : 0; } + UInt64 getCurrentInitializationDurationMs() const; private: bool initializeMainThread() override; void initializeReplication(); @@ -59,7 +59,8 @@ private: /// It will remove "active" node when database is detached zkutil::EphemeralNodeHolderPtr active_node_holder; - std::optional initialization_duration_timer; + Stopwatch initialization_duration_timer; + std::atomic initializing = false; }; } From bc7fd604b7d6489733ed362a9009f91428c064e7 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 10:10:15 +0000 Subject: [PATCH 0721/2361] Fix tests, builds and docs --- docs/en/operations/settings/settings.md | 4 +- docs/en/sql-reference/data-types/index.md | 3 +- .../sql-reference/functions/json-functions.md | 28 +++++----- src/Columns/ColumnObject.cpp | 2 +- .../Serializations/SerializationObject.cpp | 2 +- .../test_distributed_type_object/test.py | 2 +- .../0_stateless/01825_new_type_json_13.sh | 2 +- .../0_stateless/01825_new_type_json_ghdata.sh | 2 +- .../01825_new_type_json_in_array.sql | 1 + .../01825_new_type_json_in_other_types.sh | 2 +- .../01825_new_type_json_nbagames.sh | 2 +- .../queries/0_stateless/03205_json_syntax.sql | 51 ++++++++++++------- .../03209_json_type_horizontal_merges.sh | 2 +- .../03209_json_type_vertical_merges.sh | 2 +- .../0_stateless/03211_nested_json_merges.sh | 2 +- 15 files changed, 63 insertions(+), 44 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 720cf1ce959..e8957a24de0 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5611,13 +5611,13 @@ Default value: `1GiB`. ## use_json_alias_for_old_object_type -When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/object-json.md) type instead of the new [JSON](../../sql-reference/data-types/json.md) type. +When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/json.md) type. This setting requires server restart to take effect when changed. Default value: `false`. ## type_json_skip_duplicated_paths -When enabled, ClickHouse will skip duplicated paths during parsing of [JSON](../../sql-reference/data-types/json.md) object. Only the value of the first occurrence of each path will be inserted. +When enabled, ClickHouse will skip duplicated paths during parsing of [JSON](../../sql-reference/data-types/newjson.md) object. Only the value of the first occurrence of each path will be inserted. Default value: `false` diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index fcb0b60d022..2b89dd145e6 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -19,7 +19,8 @@ ClickHouse data types include: - **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md) - **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md) - **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time -- **JSON**: the [`JSON` object](./json.md) stores a JSON document in a single column +- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated) +- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column - **UUID**: a performant option for storing [`UUID` values](./uuid.md) - **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column - **Arrays**: any column can be defined as an [`Array` of values](./array.md) diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index b02dc3d1d8a..fd1f924c18f 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -1158,7 +1158,7 @@ SELECT jsonMergePatch('{"a":1}', '{"name": "joey"}', '{"name": "tom"}', '{"name" ### JSONAllPaths -Returns the list of all paths stored in each row in [JSON](../data-types/json.md) column. +Returns the list of all paths stored in each row in [JSON](../data-types/newjson.md) column. **Syntax** @@ -1168,7 +1168,7 @@ JSONAllPaths(json) **Arguments** -- `json` — [JSON](../data-types/json.md). +- `json` — [JSON](../data-types/newjson.md). **Returned value** @@ -1192,7 +1192,7 @@ SELECT json, JSONAllPaths(json) FROM test; ### JSONAllPathsWithTypes -Returns the map of all paths and their data types stored in each row in [JSON](../data-types/json.md) column. +Returns the map of all paths and their data types stored in each row in [JSON](../data-types/newjson.md) column. **Syntax** @@ -1202,7 +1202,7 @@ JSONAllPathsWithTypes(json) **Arguments** -- `json` — [JSON](../data-types/json.md). +- `json` — [JSON](../data-types/newjson.md). **Returned value** @@ -1226,7 +1226,7 @@ SELECT json, JSONAllPathsWithTypes(json) FROM test; ### JSONDynamicPaths -Returns the list of dynamic paths that are stored as separate subcolumns in [JSON](../data-types/json.md) column. +Returns the list of dynamic paths that are stored as separate subcolumns in [JSON](../data-types/newjson.md) column. **Syntax** @@ -1236,7 +1236,7 @@ JSONDynamicPaths(json) **Arguments** -- `json` — [JSON](../data-types/json.md). +- `json` — [JSON](../data-types/newjson.md). **Returned value** @@ -1260,7 +1260,7 @@ SELECT json, JSONDynamicPaths(json) FROM test; ### JSONDynamicPathsWithTypes -Returns the map of dynamic paths that are stored as separate subcolumns and their types in each row in [JSON](../data-types/json.md) column. +Returns the map of dynamic paths that are stored as separate subcolumns and their types in each row in [JSON](../data-types/newjson.md) column. **Syntax** @@ -1270,7 +1270,7 @@ JSONAllPathsWithTypes(json) **Arguments** -- `json` — [JSON](../data-types/json.md). +- `json` — [JSON](../data-types/newjson.md). **Returned value** @@ -1294,7 +1294,7 @@ SELECT json, JSONDynamicPathsWithTypes(json) FROM test; ### JSONSharedDataPaths -Returns the list of paths that are stored in shared data structure in [JSON](../data-types/json.md) column. +Returns the list of paths that are stored in shared data structure in [JSON](../data-types/newjson.md) column. **Syntax** @@ -1304,7 +1304,7 @@ JSONSharedDataPaths(json) **Arguments** -- `json` — [JSON](../data-types/json.md). +- `json` — [JSON](../data-types/newjson.md). **Returned value** @@ -1328,7 +1328,7 @@ SELECT json, JSONSharedDataPaths(json) FROM test; ### JSONSharedDataPathsWithTypes -Returns the map of paths that are stored in shared data structure and their types in each row in [JSON](../data-types/json.md) column. +Returns the map of paths that are stored in shared data structure and their types in each row in [JSON](../data-types/newjson.md) column. **Syntax** @@ -1338,7 +1338,7 @@ JSONSharedDataPathsWithTypes(json) **Arguments** -- `json` — [JSON](../data-types/json.md). +- `json` — [JSON](../data-types/newjson.md). **Returned value** @@ -1362,7 +1362,7 @@ SELECT json, JSONSharedDataPathsWithTypes(json) FROM test; ### JSONEmpty -Checks whether the input [JSON](../data-types/json.md) object is empty. +Checks whether the input [JSON](../data-types/newjson.md) object is empty. ``` sql JSONEmpty(json) @@ -1370,7 +1370,7 @@ JSONEmpty(json) **Arguments** -- `json` — [JSON](../data-types/json.md). +- `json` — [JSON](../data-types/newjson.md). **Returned value** diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 65aacf2b539..47eb517e81b 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1052,7 +1052,7 @@ bool ColumnObject::isFinalized() const void ColumnObject::getExtremes(DB::Field & min, DB::Field & max) const { - if (size() == 0) + if (empty()) { min = Object(); max = Object(); diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 7c8c23e8a29..305ebfa2e16 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -466,7 +466,7 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( void SerializationObject::serializeBinary(const Field & field, WriteBuffer & ostr, const DB::FormatSettings & settings) const { - auto & object = field.get(); + const auto & object = field.get(); /// Serialize number of paths and then pairs (path, value). writeVarUInt(object.size(), ostr); for (const auto & [path, value] : object) diff --git a/tests/integration/test_distributed_type_object/test.py b/tests/integration/test_distributed_type_object/test.py index 360087c9dda..ed27962f075 100644 --- a/tests/integration/test_distributed_type_object/test.py +++ b/tests/integration/test_distributed_type_object/test.py @@ -16,7 +16,7 @@ def started_cluster(): for node in (node1, node2): node.query( - "CREATE TABLE local_table(id UInt32, data JSON) ENGINE = MergeTree ORDER BY id", + "CREATE TABLE local_table(id UInt32, data Object('json')) ENGINE = MergeTree ORDER BY id", settings={"allow_experimental_object_type": 1}, ) node.query( diff --git a/tests/queries/0_stateless/01825_new_type_json_13.sh b/tests/queries/0_stateless/01825_new_type_json_13.sh index 765b62b1f4a..316e6890d5e 100755 --- a/tests/queries/0_stateless/01825_new_type_json_13.sh +++ b/tests/queries/0_stateless/01825_new_type_json_13.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_13" -$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_13 (obj JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 +$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_13 (obj JSON) ENGINE = MergeTree ORDER BY tuple() settings replace_long_file_name_to_hash=1" --allow_experimental_json_type 1 cat < Date: Tue, 23 Jul 2024 10:27:12 +0000 Subject: [PATCH 0722/2361] Add comments about fictitious subcolumns --- src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp | 4 ++++ src/Storages/MergeTree/MergeTreeReaderWide.cpp | 2 ++ src/Storages/MergeTree/checkDataPart.cpp | 1 + 3 files changed, 7 insertions(+) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 470ab977719..d507468f3ea 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -132,6 +132,7 @@ void MergeTreeDataPartWriterWide::addStreams( { assert(!substream_path.empty()); + /// Don't create streams for fictitious subcolumns that don't store any real data. if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) return; @@ -208,6 +209,7 @@ ISerialization::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGett { return [&, this] (const ISerialization::SubstreamPath & substream_path) -> WriteBuffer * { + /// Skip fictitious subcolumns that don't store any real data. if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) return nullptr; @@ -373,6 +375,7 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( min_compress_block_size = settings.min_compress_block_size; getSerialization(name_and_type.name)->enumerateStreams([&] (const ISerialization::SubstreamPath & substream_path) { + /// Skip fictitious subcolumns that don't store any real data. if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) return; @@ -414,6 +417,7 @@ void MergeTreeDataPartWriterWide::writeSingleGranule( /// So that instead of the marks pointing to the end of the compressed block, there were marks pointing to the beginning of the next one. serialization->enumerateStreams([&] (const ISerialization::SubstreamPath & substream_path) { + /// Skip fictitious subcolumns that don't store any real data. if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) return; diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 2c876fade74..9ab45ec7b56 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -213,6 +213,7 @@ void MergeTreeReaderWide::addStreams( ISerialization::StreamCallback callback = [&] (const ISerialization::SubstreamPath & substream_path) { + /// Don't create streams for fictitious subcolumns that don't store any real data. if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) return; @@ -351,6 +352,7 @@ void MergeTreeReaderWide::prefetchForColumn( deserializePrefix(serialization, name_and_type, current_task_last_mark, cache, deserialize_states_cache); auto callback = [&](const ISerialization::SubstreamPath & substream_path) { + /// Skip fictitious subcolumns that don't store any real data. if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) return; diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index dcdcb7f5800..dba64edd00d 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -211,6 +211,7 @@ static IMergeTreeDataPart::Checksums checkDataPart( { get_serialization(column)->enumerateStreams([&](const ISerialization::SubstreamPath & substream_path) { + /// Skip fictitious subcolumns that don't store any real data. if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) return; From 83ef3bf32e16deb946047ddf0de6ee4defd77f93 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 10:27:56 +0000 Subject: [PATCH 0723/2361] Fix link in docs --- docs/en/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index e8957a24de0..8366351da59 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5611,7 +5611,7 @@ Default value: `1GiB`. ## use_json_alias_for_old_object_type -When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/json.md) type. +When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/newjson.md) type. This setting requires server restart to take effect when changed. Default value: `false`. From 342efff2edd9bcbecd28f5196fb11dcabdb629e1 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 23 Jul 2024 12:49:50 +0200 Subject: [PATCH 0724/2361] Bump From 86ff4e8b73d99daa5239104a0223271411949b4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 23 Jul 2024 12:39:24 +0200 Subject: [PATCH 0725/2361] groupArrayIntersect: Fix serialization bug --- .../AggregateFunctionGroupArrayIntersect.cpp | 4 +- ...roupArrayIntersect_serialization.reference | 12 ++++++ ...3208_groupArrayIntersect_serialization.sql | 41 +++++++++++++++++++ 3 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03208_groupArrayIntersect_serialization.reference create mode 100644 tests/queries/0_stateless/03208_groupArrayIntersect_serialization.sql diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp index 1529cd5899a..38f2fcb9fb9 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp @@ -47,7 +47,7 @@ struct AggregateFunctionGroupArrayIntersectData }; -/// Puts all values to the hash set. Returns an array of unique values. Implemented for numeric types. +/// Puts all values to the hash set. Returns an array of unique values present in all inputs. Implemented for numeric types. template class AggregateFunctionGroupArrayIntersect : public IAggregateFunctionDataHelper, AggregateFunctionGroupArrayIntersect> @@ -154,7 +154,7 @@ public: set.reserve(size); for (size_t i = 0; i < size; ++i) { - int key; + T key; readIntBinary(key, buf); set.insert(key); } diff --git a/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.reference b/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.reference new file mode 100644 index 00000000000..c3b6e0cd5b7 --- /dev/null +++ b/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.reference @@ -0,0 +1,12 @@ +010101 AggregateFunction(groupArrayIntersect, Array(UInt8)) +[1] +1 [2,4,6,8,10] +2 [2,4,6,8,10] +3 [2,4,6,8,10] +5 [2,6,10] +6 [10] +7 [] +a [(['2','4','6','8','10'])] +b [(['2','4','6','8','10'])] +c [(['2','4','6','8','10'])] +d [] diff --git a/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.sql b/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.sql new file mode 100644 index 00000000000..e05f78a4051 --- /dev/null +++ b/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.sql @@ -0,0 +1,41 @@ +SELECT hex(groupArrayIntersectState([1]) AS a), toTypeName(a); +SELECT finalizeAggregation(CAST(unhex('010101'), 'AggregateFunction(groupArrayIntersect, Array(UInt8))')); + +DROP TABLE IF EXISTS grouparray; +CREATE TABLE grouparray +( + `v` AggregateFunction(groupArrayIntersect, Array(UInt8)) +) +ENGINE = Log; + +INSERT INTO grouparray Select groupArrayIntersectState([2, 4, 6, 8, 10]::Array(UInt8)); +SELECT '1', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([2, 4, 6, 8, 10]::Array(UInt8)); +SELECT '2', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]::Array(UInt8)); +SELECT '3', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([2, 6, 10]::Array(UInt8)); +SELECT '5', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([10]::Array(UInt8)); +SELECT '6', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([]::Array(UInt8)); +SELECT '7', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; + +DROP TABLE IF EXISTS grouparray; + + +DROP TABLE IF EXISTS grouparray_string; +CREATE TABLE grouparray_string +( + `v` AggregateFunction(groupArrayIntersect, Array(Tuple(Array(String)))) +) +ENGINE = Log; + +INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10'])]); +SELECT 'a', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; +INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10']), tuple(['2', '4', '6', '8', '10'])]); +SELECT 'b', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; +INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10']), tuple(['2', '4', '6', '8', '10', '14'])]); +SELECT 'c', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; +INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10', '20']), tuple(['2', '4', '6', '8', '10', '14'])]); +SELECT 'd', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; From 0256dba672bd23302b845f7d78f4663e3c633140 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 23 Jul 2024 13:20:41 +0200 Subject: [PATCH 0726/2361] Make 02987_group_array_intersect smaller --- .../0_stateless/02987_group_array_intersect.reference | 8 ++++---- .../0_stateless/02987_group_array_intersect.sql | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/02987_group_array_intersect.reference b/tests/queries/0_stateless/02987_group_array_intersect.reference index 7ec64a889f5..ec4d07742af 100644 --- a/tests/queries/0_stateless/02987_group_array_intersect.reference +++ b/tests/queries/0_stateless/02987_group_array_intersect.reference @@ -8,12 +8,12 @@ [1,4,5] [] [] -1000000 -999999 +100000 +99999 [9] ['a','c'] -1000000 -999999 +50000 +49999 ['1'] [] ['2023-01-01 00:00:00'] diff --git a/tests/queries/0_stateless/02987_group_array_intersect.sql b/tests/queries/0_stateless/02987_group_array_intersect.sql index 321e860b0a8..15acd0ca900 100644 --- a/tests/queries/0_stateless/02987_group_array_intersect.sql +++ b/tests/queries/0_stateless/02987_group_array_intersect.sql @@ -39,15 +39,15 @@ DROP TABLE test_numbers; DROP TABLE IF EXISTS test_big_numbers_sep; CREATE TABLE test_big_numbers_sep (a Array(Int64)) engine=MergeTree ORDER BY a; -INSERT INTO test_big_numbers_sep SELECT array(number) FROM numbers_mt(1000000); +INSERT INTO test_big_numbers_sep SELECT array(number) FROM numbers_mt(100000); SELECT groupArrayIntersect(*) FROM test_big_numbers_sep; DROP TABLE test_big_numbers_sep; DROP TABLE IF EXISTS test_big_numbers; CREATE TABLE test_big_numbers (a Array(Int64)) engine=MergeTree ORDER BY a; -INSERT INTO test_big_numbers SELECT range(1000000); +INSERT INTO test_big_numbers SELECT range(100000); SELECT length(groupArrayIntersect(*)) FROM test_big_numbers; -INSERT INTO test_big_numbers SELECT range(999999); +INSERT INTO test_big_numbers SELECT range(99999); SELECT length(groupArrayIntersect(*)) FROM test_big_numbers; INSERT INTO test_big_numbers VALUES ([9]); SELECT groupArrayIntersect(*) FROM test_big_numbers; @@ -63,9 +63,9 @@ DROP TABLE test_string; DROP TABLE IF EXISTS test_big_string; CREATE TABLE test_big_string (a Array(String)) engine=MergeTree ORDER BY a; -INSERT INTO test_big_string SELECT groupArray(toString(number)) FROM numbers_mt(1000000); +INSERT INTO test_big_string SELECT groupArray(toString(number)) FROM numbers_mt(50000); SELECT length(groupArrayIntersect(*)) FROM test_big_string; -INSERT INTO test_big_string SELECT groupArray(toString(number)) FROM numbers_mt(999999); +INSERT INTO test_big_string SELECT groupArray(toString(number)) FROM numbers_mt(49999); SELECT length(groupArrayIntersect(*)) FROM test_big_string; INSERT INTO test_big_string VALUES (['1']); SELECT groupArrayIntersect(*) FROM test_big_string; From 53c7bf680b401dc4948353dec2d94a428327b1f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 23 Jul 2024 11:46:48 +0000 Subject: [PATCH 0727/2361] Update autogenerated version to 24.8.1.1 and contributors --- cmake/autogenerated_versions.txt | 10 +++---- .../StorageSystemContributors.generated.cpp | 27 +++++++++++++++++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index bb776fa9506..d69646d3694 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54488) +SET(VERSION_REVISION 54489) SET(VERSION_MAJOR 24) -SET(VERSION_MINOR 7) +SET(VERSION_MINOR 8) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH aa023477a9265e403982fca5ee29a714db5133d9) -SET(VERSION_DESCRIBE v24.7.1.1-testing) -SET(VERSION_STRING 24.7.1.1) +SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af) +SET(VERSION_DESCRIBE v24.8.1.1-testing) +SET(VERSION_STRING 24.8.1.1) # end of autochange diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index 9201eef185f..35b9c0008c6 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -1,5 +1,6 @@ // autogenerated by tests/ci/version_helper.py const char * auto_contributors[] { + "0x01f", "0xflotus", "13DaGGeR", "1lann", @@ -167,6 +168,7 @@ const char * auto_contributors[] { "AnneClickHouse", "Anselmo D. Adams", "Anthony N. Simon", + "AntiTopQuark", "Anton Ivashkin", "Anton Kobzev", "Anton Kozlov", @@ -299,6 +301,7 @@ const char * auto_contributors[] { "Dan Wu", "DanRoscigno", "Dani Pozo", + "Daniel Anugerah", "Daniel Bershatsky", "Daniel Byta", "Daniel Dao", @@ -370,6 +373,7 @@ const char * auto_contributors[] { "Elena", "Elena Baskakova", "Elena Torró", + "Elena Torró Martínez", "Elghazal Ahmed", "Eliot Hautefeuille", "Elizaveta Mironyuk", @@ -415,6 +419,7 @@ const char * auto_contributors[] { "FgoDt", "Filatenkov Artur", "Filipe Caixeta", + "Filipp Bakanov", "Filipp Ozinov", "Filippov Denis", "Fille", @@ -451,6 +456,7 @@ const char * auto_contributors[] { "Gleb Novikov", "Gleb-Tretyakov", "GoGoWen2021", + "Gosha Letov", "Gregory", "Grigorii Sokolik", "Grigory", @@ -461,6 +467,7 @@ const char * auto_contributors[] { "Guillaume Tassery", "Guo Wangyang", "Guo Wei (William)", + "Guspan Tanadi", "Haavard Kvaalen", "Habibullah Oladepo", "HaiBo Li", @@ -474,6 +481,7 @@ const char * auto_contributors[] { "HarryLeeIBM", "Hasitha Kanchana", "Hasnat", + "Haydn", "Heena Bansal", "HeenaBansal2009", "Hendrik M", @@ -606,6 +614,7 @@ const char * auto_contributors[] { "Kevin Chiang", "Kevin Michel", "Kevin Mingtarja", + "Kevin Song", "Kevin Zhang", "KevinyhZou", "KinderRiven", @@ -661,6 +670,7 @@ const char * auto_contributors[] { "Lewinma", "Li Shuai", "Li Yin", + "Linh Giang", "Lino Uruñuela", "Lirikl", "Liu Cong", @@ -690,6 +700,7 @@ const char * auto_contributors[] { "Maksim Alekseev", "Maksim Buren", "Maksim Fedotov", + "Maksim Galkin", "Maksim Kita", "Maksym Sobolyev", "Mal Curtis", @@ -724,6 +735,7 @@ const char * auto_contributors[] { "Max Akhmedov", "Max Bruce", "Max K", + "Max K.", "Max Kainov", "Max Vetrov", "MaxTheHuman", @@ -811,6 +823,7 @@ const char * auto_contributors[] { "Nataly Merezhuk", "Natalya Chizhonkova", "Natasha Murashkina", + "Nathan Clevenger", "NeZeD [Mac Pro]", "Neeke Gao", "Neng Liu", @@ -946,6 +959,7 @@ const char * auto_contributors[] { "Robert Coelho", "Robert Hodges", "Robert Schulze", + "Rodolphe Dugé de Bernonville", "RogerYK", "Rohit Agarwal", "Romain Neutron", @@ -1107,6 +1121,7 @@ const char * auto_contributors[] { "Timur Solodovnikov", "TiunovNN", "Tobias Adamson", + "Tobias Florek", "Tobias Lins", "Tom Bombadil", "Tom Risse", @@ -1231,11 +1246,13 @@ const char * auto_contributors[] { "Yingchun Lai", "Yingfan Chen", "Yinzheng-Sun", + "Yinzuo Jiang", "YiÄŸit Konur", "Yohann Jardin", "Yong Wang", "Yong-Hao Zou", "Youenn Lebras", + "Your Name", "Yu, Peng", "Yuko Takagi", "Yuntao Wu", @@ -1250,6 +1267,7 @@ const char * auto_contributors[] { "Yury Stankevich", "Yusuke Tanaka", "Zach Naimon", + "Zawa-II", "Zheng Miao", "ZhiHong Zhang", "ZhiYong Wang", @@ -1380,6 +1398,7 @@ const char * auto_contributors[] { "conicliu", "copperybean", "coraxster", + "cw5121", "cwkyaoyao", "d.v.semenov", "dalei2019", @@ -1460,12 +1479,14 @@ const char * auto_contributors[] { "fuzzERot", "fyu", "g-arslan", + "gabrielmcg44", "ggerogery", "giordyb", "glockbender", "glushkovds", "grantovsky", "gulige", + "gun9nir", "guoleiyi", "guomaolin", "guov100", @@ -1527,6 +1548,7 @@ const char * auto_contributors[] { "jferroal", "jiahui-97", "jianmei zhang", + "jiaosenvip", "jinjunzh", "jiyoungyoooo", "jktng", @@ -1541,6 +1563,7 @@ const char * auto_contributors[] { "jun won", "jus1096", "justindeguzman", + "jwoodhead", "jyz0309", "karnevil13", "kashwy", @@ -1633,10 +1656,12 @@ const char * auto_contributors[] { "mateng0915", "mateng915", "mauidude", + "max-vostrikov", "maxim", "maxim-babenko", "maxkuzn", "maxulan", + "maxvostrikov", "mayamika", "mehanizm", "melin", @@ -1677,6 +1702,7 @@ const char * auto_contributors[] { "nathanbegbie", "nauta", "nautaa", + "nauu", "ndchikin", "nellicus", "nemonlou", @@ -1975,6 +2001,7 @@ const char * auto_contributors[] { "å¼ å¥", "张风啸", "å¾ç‚˜", + "忒休斯~Theseus", "曲正é¹", "木木夕120", "未æ¥æ˜Ÿ___è´¹", From ba3979275b53a44d9a9c097f047188b2d44ffa8b Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Tue, 23 Jul 2024 13:54:18 +0200 Subject: [PATCH 0728/2361] Test that big set is shared between parts by checking read_rows from system.part_log --- ...en_multiple_mutations_tasks_long.reference | 5 +++- ..._between_multiple_mutations_tasks_long.sql | 26 ++++++++++++++++--- ...sets_between_mutation_tasks_long.reference | 9 ++++++- ...e_big_sets_between_mutation_tasks_long.sql | 19 +++++++++++++- 4 files changed, 53 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.reference b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.reference index 9cb32105006..d3e7e113ffe 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.reference +++ b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.reference @@ -3,8 +3,11 @@ all_1_1_0 all_2_2_0 all_3_3_0 all_4_4_0 +40000 5000 all_1_1_0_9 5000 all_2_2_0_9 5000 all_3_3_0_9 5000 all_4_4_0_9 -Ok +mutation_version has_parts_for_which_set_was_built has_parts_that_shared_set +8 1 1 +9 1 1 diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql index 631fd9cb2cc..40f7800fee1 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql @@ -23,10 +23,30 @@ ALTER TABLE 02581_trips UPDATE description='6' WHERE id IN (SELECT (number*10 + ALTER TABLE 02581_trips DELETE WHERE id IN (SELECT (number*10 + 7)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; ALTER TABLE 02581_trips UPDATE description='8' WHERE id IN (SELECT (number*10 + 8)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; SYSTEM START MERGES 02581_trips; -DELETE FROM 02581_trips WHERE id IN (SELECT (number*10 + 9)::UInt32 FROM numbers(10000000)); -SELECT count(), _part from 02581_trips WHERE description = '' GROUP BY _part ORDER BY _part; + +-- Wait for mutations to finish +SELECT count() FROM 02581_trips SETTINGS select_sequential_consistency = 1; + +DELETE FROM 02581_trips WHERE id IN (SELECT (number*10 + 9)::UInt32 FROM numbers(10000000)) SETTINGS lightweight_deletes_sync = 2; +SELECT count(), _part from 02581_trips WHERE description = '' GROUP BY _part ORDER BY _part SETTINGS select_sequential_consistency=1; SYSTEM FLUSH LOGS; -SELECT DISTINCT peak_memory_usage < 2000000000 ? 'Ok' : toString(tuple(*)) FROM system.part_log WHERE database = currentDatabase() AND event_date >= yesterday() AND table = '02581_trips' AND event_type = 'MutatePart'; + +-- Check that in every mutation there were parts where selected rows count then the size of big sets which will mean that sets were shared +-- Also check that there was at least one part that read more rows then the size of set which will mean that the +WITH 10000000 AS rows_in_set +SELECT + mutation_version, + countIf(read_rows >= rows_in_set) >= 1 as has_parts_for_which_set_was_built, + countIf(read_rows <= rows_in_set) >= 1 as has_parts_that_shared_set +FROM +( + SELECT + CAST(splitByChar('_', part_name)[5], 'UInt64') AS mutation_version, + read_rows + FROM system.part_log + WHERE database = currentDatabase() and (event_date >= yesterday()) AND (`table` = '02581_trips') AND (event_type = 'MutatePart') +) +GROUP BY mutation_version ORDER BY mutation_version FORMAT TSVWithNames; DROP TABLE 02581_trips; diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.reference b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.reference index d21598bc12e..eecd768eb5e 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.reference +++ b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.reference @@ -10,4 +10,11 @@ all_4_4_0 20000 16000 12000 -Ok +mutation_version has_parts_for_which_set_was_built has_parts_that_shared_set +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql index 062f22357e8..603c7cb7db0 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql @@ -29,6 +29,8 @@ ALTER TABLE 02581_trips UPDATE description='b' WHERE id::UInt64 IN (SELECT (numb SELECT count() from 02581_trips WHERE description = ''; -- Run mutation with non-PK `id2` IN big subquery +--SELECT count(), _part FROM 02581_trips WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(10000000)) GROUP BY _part ORDER BY _part; +--EXPLAIN SELECT (), _part FROM 02581_trips WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(10000000)); ALTER TABLE 02581_trips UPDATE description='c' WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; @@ -57,6 +59,21 @@ SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; SYSTEM FLUSH LOGS; -SELECT DISTINCT peak_memory_usage < 2000000000 ? 'Ok' : toString(tuple(*)) FROM system.part_log WHERE database = currentDatabase() AND event_date >= yesterday() AND table = '02581_trips' AND event_type = 'MutatePart'; +-- Check that in every mutation there were parts where selected rows count then the size of big sets which will mean that sets were shared +-- Also check that there was at least one part that read more rows then the size of set which will mean that the +WITH 10000000 AS rows_in_set +SELECT + mutation_version, + countIf(read_rows >= rows_in_set) >= 1 as has_parts_for_which_set_was_built, + countIf(read_rows <= rows_in_set) >= 1 as has_parts_that_shared_set +FROM +( + SELECT + CAST(splitByChar('_', part_name)[5], 'UInt64') AS mutation_version, + read_rows + FROM system.part_log + WHERE database = currentDatabase() and (event_date >= yesterday()) AND (`table` = '02581_trips') AND (event_type = 'MutatePart') +) +GROUP BY mutation_version ORDER BY mutation_version FORMAT TSVWithNames; DROP TABLE 02581_trips; From aaf603035e31874d6d5bcd024d0f4040715baa72 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Tue, 23 Jul 2024 13:35:37 +0100 Subject: [PATCH 0729/2361] check error type --- src/IO/S3/Client.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 325c820f8bd..7196dfa9bdc 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -385,7 +385,7 @@ Model::HeadObjectOutcome Client::HeadObject(HeadObjectRequest & request) const request.overrideURI(std::move(*bucket_uri)); - if (isClientForDisk()) + if (isClientForDisk() && error.GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); return enrichErrorMessage( @@ -410,7 +410,7 @@ Model::ListObjectsOutcome Client::ListObjects(ListObjectsRequest & request) cons Model::GetObjectOutcome Client::GetObject(GetObjectRequest & request) const { auto resp = doRequest(request, [this](const Model::GetObjectRequest & req) { return GetObject(req); }); - if (!resp.IsSuccess() && isClientForDisk()) + if (!resp.IsSuccess() && isClientForDisk() && resp.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); return enrichErrorMessage(std::move(resp)); From ea61af961acd3d8fb9dd3ee2b48900e4f1ee7937 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 23 Jul 2024 00:14:19 +0100 Subject: [PATCH 0730/2361] impl --- src/Server/TCPHandler.cpp | 73 +++++++++++++++++++++++---------------- 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 07366d7cc07..2a2e2e22538 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1,48 +1,48 @@ -#include -#include -#include #include #include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include +#include +#include +#include #include #include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #include #include +#include +#include +#include +#include #include -#include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include #include -#include #include #include @@ -61,6 +61,8 @@ #include +#include + using namespace std::literals; using namespace DB; @@ -1036,6 +1038,17 @@ void TCPHandler::processOrdinaryQuery() PullingAsyncPipelineExecutor executor(pipeline); CurrentMetrics::Increment query_thread_metric_increment{CurrentMetrics::QueryThread}; + /// The following may happen: + /// * current thread is holding the lock + /// * because of the exception we unwind the stack and call the destructor of `executor` + /// * the destructor calls cancel() and waits for all query threads to finish + /// * at the same time one of the query threads is trying to acquire the lock, e.g. inside `merge_tree_read_task_callback` + /// * deadlock + SCOPE_EXIT({ + if (out_lock.owns_lock()) + out_lock.unlock(); + }); + Block block; while (executor.pull(block, interactive_delay / 1000)) { @@ -1079,8 +1092,7 @@ void TCPHandler::processOrdinaryQuery() } /// This lock wasn't acquired before and we make .lock() call here - /// so everything under this line is covered even together - /// with sendProgress() out of the scope + /// so everything under this line is covered. out_lock.lock(); /** If data has run out, we will send the profiling data and total values to @@ -1107,6 +1119,7 @@ void TCPHandler::processOrdinaryQuery() last_sent_snapshots.clear(); } + out_lock.lock(); sendProgress(); } From 496c1fbf8413db3b3a3b410750adf3b5ecddabf3 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 23 Jul 2024 12:46:09 +0000 Subject: [PATCH 0731/2361] Un-flake test_runtime_configurable_cache_size --- ..._query_cache.xml => empty_query_cache.xml} | 2 +- .../test.py | 60 +++++++++---------- ...query_cache_asynchronous_metrics.reference | 2 + ...02494_query_cache_asynchronous_metrics.sql | 13 ++++ 4 files changed, 45 insertions(+), 32 deletions(-) rename tests/integration/test_runtime_configurable_cache_size/configs/{smaller_query_cache.xml => empty_query_cache.xml} (64%) create mode 100644 tests/queries/0_stateless/02494_query_cache_asynchronous_metrics.reference create mode 100644 tests/queries/0_stateless/02494_query_cache_asynchronous_metrics.sql diff --git a/tests/integration/test_runtime_configurable_cache_size/configs/smaller_query_cache.xml b/tests/integration/test_runtime_configurable_cache_size/configs/empty_query_cache.xml similarity index 64% rename from tests/integration/test_runtime_configurable_cache_size/configs/smaller_query_cache.xml rename to tests/integration/test_runtime_configurable_cache_size/configs/empty_query_cache.xml index 6f2de0fa8f5..c4872a0ce41 100644 --- a/tests/integration/test_runtime_configurable_cache_size/configs/smaller_query_cache.xml +++ b/tests/integration/test_runtime_configurable_cache_size/configs/empty_query_cache.xml @@ -1,7 +1,7 @@ - 1 + 0 diff --git a/tests/integration/test_runtime_configurable_cache_size/test.py b/tests/integration/test_runtime_configurable_cache_size/test.py index f761005f297..26676009e19 100644 --- a/tests/integration/test_runtime_configurable_cache_size/test.py +++ b/tests/integration/test_runtime_configurable_cache_size/test.py @@ -94,51 +94,49 @@ CONFIG_DIR = os.path.join(SCRIPT_DIR, "configs") def test_query_cache_size_is_runtime_configurable(start_cluster): - # the initial config specifies the maximum query cache size as 2, run 3 queries, expect 2 cache entries node.query("SYSTEM DROP QUERY CACHE") + + # the initial config allows at most two query cache entries but we don't mind node.query("SELECT 1 SETTINGS use_query_cache = 1, query_cache_ttl = 1") - node.query("SELECT 2 SETTINGS use_query_cache = 1, query_cache_ttl = 1") - node.query("SELECT 3 SETTINGS use_query_cache = 1, query_cache_ttl = 1") time.sleep(2) - node.query("SYSTEM RELOAD ASYNCHRONOUS METRICS") - res = node.query( - "SELECT value FROM system.asynchronous_metrics WHERE metric = 'QueryCacheEntries'", - ) - assert res == "2\n" + # at this point, the query cache contains one entry and it is stale - # switch to a config with a maximum query cache size of 1 + res = node.query( + "SELECT count(*) FROM system.query_cache", + ) + assert res == "1\n" + + # switch to a config with a maximum query cache size of _0_ node.copy_file_to_container( - os.path.join(CONFIG_DIR, "smaller_query_cache.xml"), + os.path.join(CONFIG_DIR, "empty_query_cache.xml"), "/etc/clickhouse-server/config.d/default.xml", ) node.query("SYSTEM RELOAD CONFIG") - # check that eviction worked as expected - time.sleep(2) - node.query("SYSTEM RELOAD ASYNCHRONOUS METRICS") res = node.query( - "SELECT value FROM system.asynchronous_metrics WHERE metric = 'QueryCacheEntries'", - ) - assert ( - res == "2\n" - ) # "Why not 1?", you think. Reason is that QC uses the TTLCachePolicy that evicts lazily only upon insert. - # Not a real issue, can be changed later, at least there's a test now. - - # Also, you may also wonder "why query_cache_ttl = 1"? Reason is that TTLCachePolicy only removes *stale* entries. With the default TTL - # (60 sec), no entries would be removed at all. Again: not a real issue, can be changed later and there's at least a test now. - - # check that the new query cache maximum size is respected when more queries run - node.query("SELECT 4 SETTINGS use_query_cache = 1, query_cache_ttl = 1") - node.query("SELECT 5 SETTINGS use_query_cache = 1, query_cache_ttl = 1") - - time.sleep(2) - node.query("SYSTEM RELOAD ASYNCHRONOUS METRICS") - res = node.query( - "SELECT value FROM system.asynchronous_metrics WHERE metric = 'QueryCacheEntries'", + "SELECT count(*) FROM system.query_cache", ) assert res == "1\n" + # "Why not 0?", I hear you say. Reason is that QC uses the TTLCachePolicy that evicts lazily only upon insert. + # Not a real issue, can be changed later, at least there's a test now. + + # The next SELECT will find a single stale entry which is one entry too much according to the new config. + # This triggers the eviction of all stale entries, in this case the 'SELECT 1' result. + # Then, it tries to insert the 'SELECT 2' result but it also cannot be added according to the config. + node.query("SELECT 2 SETTINGS use_query_cache = 1, query_cache_ttl = 1") + res = node.query( + "SELECT count(*) FROM system.query_cache", + ) + assert res == "0\n" + + # The new maximum cache size is respected when more queries run + node.query("SELECT 3 SETTINGS use_query_cache = 1, query_cache_ttl = 1") + res = node.query( + "SELECT count(*) FROM system.query_cache", + ) + assert res == "0\n" # restore the original config node.copy_file_to_container( diff --git a/tests/queries/0_stateless/02494_query_cache_asynchronous_metrics.reference b/tests/queries/0_stateless/02494_query_cache_asynchronous_metrics.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_asynchronous_metrics.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/02494_query_cache_asynchronous_metrics.sql b/tests/queries/0_stateless/02494_query_cache_asynchronous_metrics.sql new file mode 100644 index 00000000000..d8de4facb38 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_asynchronous_metrics.sql @@ -0,0 +1,13 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- Create an entry in the query cache +SELECT 1 SETTINGS use_query_cache = true; + +-- Asynchronous metrics must know about the entry +SYSTEM RELOAD ASYNCHRONOUS METRICS; +SELECT value FROM system.asynchronous_metrics WHERE metric = 'QueryCacheEntries'; + +SYSTEM DROP QUERY CACHE; From fce9d607309002d0b8043856dad6905676a2c412 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 12:48:06 +0000 Subject: [PATCH 0732/2361] Try to fix docs --- docs/en/sql-reference/data-types/newjson.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md index 494bfba3173..524dc7810e6 100644 --- a/docs/en/sql-reference/data-types/newjson.md +++ b/docs/en/sql-reference/data-types/newjson.md @@ -1,5 +1,5 @@ --- -slug: /en/sql-reference/data-types/json +slug: /en/sql-reference/data-types/newjson sidebar_position: 63 sidebar_label: JSON keywords: [json, data type] From d74dc587d7a183225b7cf0846b85e8213dcb7fc0 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 23 Jul 2024 13:06:58 +0000 Subject: [PATCH 0733/2361] Fix comment --- src/Processors/IProcessor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 94e93595f4e..4fd00d5e164 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -234,7 +234,7 @@ public: * Example, scheduling tasks for remote workers (file descriptor in this case is a socket) * When the remote worker asks for the next task, doing it in onAsyncJobReady() we can provide it immediately. * Otherwise, the returning of the next task for the remote worker can be delayed by current work done in the pipeline - * i.e. processor->work(), which will create unnecessary latency in query processing by remote workers Not Committed Yet + * (by other processors), which will create unnecessary latency in query processing by remote workers */ virtual void onAsyncJobReady() {} From 919005c4f70b044ecd9cc1bbce5dc5e276e11929 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 23 Jul 2024 13:09:34 +0000 Subject: [PATCH 0734/2361] Incorporate review feedback --- .../test_runtime_configurable_cache_size/test.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_runtime_configurable_cache_size/test.py b/tests/integration/test_runtime_configurable_cache_size/test.py index 26676009e19..beaf83ea754 100644 --- a/tests/integration/test_runtime_configurable_cache_size/test.py +++ b/tests/integration/test_runtime_configurable_cache_size/test.py @@ -96,11 +96,11 @@ CONFIG_DIR = os.path.join(SCRIPT_DIR, "configs") def test_query_cache_size_is_runtime_configurable(start_cluster): node.query("SYSTEM DROP QUERY CACHE") - # the initial config allows at most two query cache entries but we don't mind + # The initial config allows at most two query cache entries but we don't mind node.query("SELECT 1 SETTINGS use_query_cache = 1, query_cache_ttl = 1") time.sleep(2) - # at this point, the query cache contains one entry and it is stale + # At this point, the query cache contains one entry and it is stale res = node.query( "SELECT count(*) FROM system.query_cache", @@ -138,8 +138,17 @@ def test_query_cache_size_is_runtime_configurable(start_cluster): ) assert res == "0\n" - # restore the original config + # Restore the original config node.copy_file_to_container( os.path.join(CONFIG_DIR, "default.xml"), "/etc/clickhouse-server/config.d/default.xml", ) + + node.query("SYSTEM RELOAD CONFIG") + + # It is possible to insert entries again + node.query("SELECT 4 SETTINGS use_query_cache = 1, query_cache_ttl = 1") + res = node.query( + "SELECT count(*) FROM system.query_cache", + ) + assert res == "1\n" From 3cb35efb3149d0c0cef79ecece53b1995be0e0e9 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 13:31:59 +0000 Subject: [PATCH 0735/2361] Try to improve insert speed --- src/Columns/ColumnArray.cpp | 5 + src/Columns/ColumnArray.h | 1 + src/Columns/ColumnDecimal.h | 1 + src/Columns/ColumnDynamic.cpp | 91 +++++++++------- src/Columns/ColumnDynamic.h | 79 ++++++++------ src/Columns/ColumnFixedString.h | 5 + src/Columns/ColumnLowCardinality.h | 2 + src/Columns/ColumnMap.cpp | 5 + src/Columns/ColumnMap.h | 1 + src/Columns/ColumnNullable.cpp | 5 + src/Columns/ColumnNullable.h | 1 + src/Columns/ColumnObject.cpp | 103 +++++++++++------- src/Columns/ColumnObject.h | 7 +- src/Columns/ColumnString.cpp | 5 + src/Columns/ColumnString.h | 1 + src/Columns/ColumnTuple.cpp | 8 ++ src/Columns/ColumnTuple.h | 1 + src/Columns/ColumnVariant.cpp | 5 + src/Columns/ColumnVariant.h | 1 + src/Columns/ColumnVector.h | 5 + src/Columns/IColumn.h | 3 + src/DataTypes/DataTypeObject.cpp | 7 +- .../Serializations/SerializationDynamic.cpp | 9 +- src/Formats/JSONExtractTree.cpp | 6 +- src/Processors/Formats/IRowInputFormat.cpp | 4 + .../Impl/JSONAsStringRowInputFormat.cpp | 63 ++++++----- .../Formats/Impl/JSONAsStringRowInputFormat.h | 14 +-- 27 files changed, 278 insertions(+), 160 deletions(-) diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 19cce678cc7..299bba95eb6 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -452,6 +452,11 @@ void ColumnArray::reserve(size_t n) getData().reserve(n); /// The average size of arrays is not taken into account here. Or it is considered to be no more than 1. } +size_t ColumnArray::capacity() const +{ + return getOffsets().capacity(); +} + void ColumnArray::shrinkToFit() { getOffsets().shrink_to_fit(); diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index 63affb86d9d..906ff014f5b 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -118,6 +118,7 @@ public: void updatePermutationWithCollation(const Collator & collator, PermutationSortDirection direction, PermutationSortStability stability, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override; void reserve(size_t n) override; + size_t capacity() const override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h index 59bfbd2159c..6c1b257871a 100644 --- a/src/Columns/ColumnDecimal.h +++ b/src/Columns/ColumnDecimal.h @@ -53,6 +53,7 @@ public: size_t allocatedBytes() const override { return data.allocated_bytes(); } void protect() override { data.protect(); } void reserve(size_t n) override { data.reserve_exact(n); } + size_t capacity() const override { return data.capacity(); } void shrinkToFit() override { data.shrink_to_fit(); } #if !defined(DEBUG_OR_SANITIZER_BUILD) diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index bbaf2e669d8..652448f0220 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -29,11 +29,13 @@ ColumnDynamic::ColumnDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_ variant_info.variant_type = std::make_shared(DataTypes{}); variant_info.variant_name = variant_info.variant_type->getName(); variant_column = variant_info.variant_type->createColumn(); + variant_column_ptr = assert_cast(variant_column.get()); } ColumnDynamic::ColumnDynamic( MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_) : variant_column(std::move(variant_column_)) + , variant_column_ptr(assert_cast(variant_column.get())) , variant_info(variant_info_) , max_dynamic_types(max_dynamic_types_) , statistics(statistics_) @@ -57,6 +59,25 @@ ColumnDynamic::MutablePtr ColumnDynamic::create(MutableColumnPtr variant_column, return create(std::move(variant_column), variant_info, max_dynamic_types_, statistics_); } +void ColumnDynamic::setVariantType(const DB::DataTypePtr & variant_type) +{ + if (!empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Setting specific variant type is allowed only for empty dynamic column"); + + variant_column = variant_type->createColumn(); + variant_column_ptr = assert_cast(variant_column.get()); + variant_info.variant_type = variant_type; + variant_info.variant_name = variant_type->getName(); + const auto & variants = assert_cast(*variant_type).getVariants(); + variant_info.variant_names.reserve(variants.size()); + variant_info.variant_name_to_discriminator.reserve(variants.size()); + for (ColumnVariant::Discriminator discr = 0; discr != variants.size(); ++discr) + { + const auto & variant_name = variant_info.variant_names.emplace_back(variants[discr]->getName()); + variant_info.variant_name_to_discriminator[variant_name] = discr; + } +} + bool ColumnDynamic::addNewVariant(const DataTypePtr & new_variant, const String & variant_name) { /// Check if we already have such variant. @@ -122,7 +143,7 @@ void ColumnDynamic::updateVariantInfoAndExpandVariantColumn(const DB::DataTypePt variant_info.variant_name = new_variant_type->getName(); variant_info.variant_names = new_variant_names; variant_info.variant_name_to_discriminator = new_variant_name_to_discriminator; - assert_cast(*variant_column).extend(current_to_new_discriminators, std::move(new_variant_columns_and_discriminators_to_add)); + variant_column_ptr->extend(current_to_new_discriminators, std::move(new_variant_columns_and_discriminators_to_add)); /// Clear mappings cache because now with new Variant we will have new mappings. variant_mappings_cache.clear(); } @@ -188,14 +209,14 @@ std::vector * ColumnDynamic::combineVariants(const void ColumnDynamic::insert(const DB::Field & x) { /// Check if we can insert field without Variant extension. - if (variant_column->tryInsert(x)) + if (variant_column_ptr->tryInsert(x)) return; /// If we cannot insert field into current variant column, extend it with new variant for this field from its type. if (addNewVariant(applyVisitor(FieldToDataType(), x))) { /// Now we should be able to insert this field into extended variant column. - variant_column->insert(x); + variant_column_ptr->insert(x); } else { @@ -203,7 +224,7 @@ void ColumnDynamic::insert(const DB::Field & x) /// This case should be really rare in real use cases. /// We should always be able to add String variant and cast inserted value to String. addStringVariant(); - variant_column->insert(toString(x)); + variant_column_ptr->insert(toString(x)); } } @@ -226,22 +247,20 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) /// Check if we have the same variants in both columns. if (variant_info.variant_name == dynamic_src.variant_info.variant_name) { - variant_column->insertFrom(*dynamic_src.variant_column, n); + variant_column_ptr->insertFrom(*dynamic_src.variant_column, n); return; } - auto & variant_col = assert_cast(*variant_column); - /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { - variant_col.insertFrom(*dynamic_src.variant_column, n, *global_discriminators_mapping); + variant_column_ptr->insertFrom(*dynamic_src.variant_column, n, *global_discriminators_mapping); return; } /// We cannot combine 2 Variant types as total number of variants exceeds the limit. /// We need to insert single value, try to add only corresponding variant. - const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); + const auto & src_variant_col = dynamic_src.getVariantColumn(); auto src_global_discr = src_variant_col.globalDiscriminatorAt(n); /// NULL doesn't require Variant extension. @@ -255,7 +274,7 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) if (addNewVariant(variant_type)) { auto discr = variant_info.variant_name_to_discriminator[dynamic_src.variant_info.variant_names[src_global_discr]]; - variant_col.insertIntoVariantFrom(discr, src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(n)); + variant_column_ptr->insertIntoVariantFrom(discr, src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(n)); return; } @@ -266,7 +285,7 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) tmp_variant_column->insertFrom(src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(n)); auto tmp_string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); auto string_variant_discr = variant_info.variant_name_to_discriminator["String"]; - variant_col.insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0); + variant_column_ptr->insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0); } #if !defined(DEBUG_OR_SANITIZER_BUILD) @@ -284,16 +303,14 @@ void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, si /// Check if we have the same variants in both columns. if (variant_info.variant_names == dynamic_src.variant_info.variant_names) { - variant_column->insertRangeFrom(*dynamic_src.variant_column, start, length); + variant_column_ptr->insertRangeFrom(*dynamic_src.variant_column, start, length); return; } - auto & variant_col = assert_cast(*variant_column); - /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { - variant_col.insertRangeFrom(*dynamic_src.variant_column, start, length, *global_discriminators_mapping); + variant_column_ptr->insertRangeFrom(*dynamic_src.variant_column, start, length, *global_discriminators_mapping); return; } @@ -420,7 +437,7 @@ void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, si auto local_discr = src_local_discriminators[i]; if (local_discr == ColumnVariant::NULL_DISCRIMINATOR) { - variant_col.insertDefault(); + variant_column_ptr->insertDefault(); } else { @@ -429,11 +446,11 @@ void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, si auto it = variants_converted_to_string.find(global_discr); if (it == variants_converted_to_string.end()) { - variant_col.insertIntoVariantFrom(to_global_discr, *src_variant_columns[local_discr], src_offsets[i]); + variant_column_ptr->insertIntoVariantFrom(to_global_discr, *src_variant_columns[local_discr], src_offsets[i]); } else { - variant_col.insertIntoVariantFrom(to_global_discr, *it->second, src_offsets[i] - variants_ranges[global_discr].first); + variant_column_ptr->insertIntoVariantFrom(to_global_discr, *it->second, src_offsets[i] - variants_ranges[global_discr].first); } } } @@ -450,16 +467,14 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, /// Check if we have the same variants in both columns. if (variant_info.variant_names == dynamic_src.variant_info.variant_names) { - variant_column->insertManyFrom(*dynamic_src.variant_column, position, length); + variant_column_ptr->insertManyFrom(*dynamic_src.variant_column, position, length); return; } - auto & variant_col = assert_cast(*variant_column); - /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { - variant_col.insertManyFrom(*dynamic_src.variant_column, position, length, *global_discriminators_mapping); + variant_column_ptr->insertManyFrom(*dynamic_src.variant_column, position, length, *global_discriminators_mapping); return; } @@ -477,7 +492,7 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, if (addNewVariant(variant_type)) { auto discr = variant_info.variant_name_to_discriminator[dynamic_src.variant_info.variant_names[src_global_discr]]; - variant_col.insertManyIntoVariantFrom(discr, src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(position), length); + variant_column_ptr->insertManyIntoVariantFrom(discr, src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(position), length); return; } @@ -486,7 +501,7 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, tmp_variant_column->insertFrom(src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(position)); auto tmp_string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); auto string_variant_discr = variant_info.variant_name_to_discriminator["String"]; - variant_col.insertManyIntoVariantFrom(string_variant_discr, *tmp_string_column, 0, length); + variant_column_ptr->insertManyIntoVariantFrom(string_variant_discr, *tmp_string_column, 0, length); } @@ -496,8 +511,7 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co /// but Dynamic doesn't have fixed mapping discriminator <-> variant type /// as different Dynamic column can have different Variants. /// Instead, we serialize null bit + variant type in binary format (size + bytes) + value. - const auto & variant_col = assert_cast(*variant_column); - auto discr = variant_col.globalDiscriminatorAt(n); + auto discr = variant_column_ptr->globalDiscriminatorAt(n); StringRef res; UInt8 null_bit = discr == ColumnVariant::NULL_DISCRIMINATOR; if (null_bit) @@ -519,7 +533,7 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co res.data = pos; res.size = sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size(); - auto value_ref = variant_col.getVariantByGlobalDiscriminator(discr).serializeValueIntoArena(variant_col.offsetAt(n), arena, begin); + auto value_ref = variant_column_ptr->getVariantByGlobalDiscriminator(discr).serializeValueIntoArena(variant_column_ptr->offsetAt(n), arena, begin); res.data = value_ref.data - res.size; res.size += value_ref.size; return res; @@ -527,7 +541,6 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos) { - auto & variant_col = assert_cast(*variant_column); UInt8 null_bit = unalignedLoad(pos); pos += sizeof(UInt8); if (null_bit) @@ -550,14 +563,14 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos) if (it != variant_info.variant_name_to_discriminator.end()) { auto discr = it->second; - return variant_col.deserializeVariantAndInsertFromArena(discr, pos); + return variant_column_ptr->deserializeVariantAndInsertFromArena(discr, pos); } /// If we don't have such variant, add it. if (likely(addNewVariant(variant_type))) { auto discr = variant_info.variant_name_to_discriminator[variant_name]; - return variant_col.deserializeVariantAndInsertFromArena(discr, pos); + return variant_column_ptr->deserializeVariantAndInsertFromArena(discr, pos); } /// We reached maximum number of variants and couldn't add new variant. @@ -568,7 +581,7 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos) pos = tmp_variant_column->deserializeAndInsertFromArena(pos); /// Cast temporary column to String and insert this value into String variant. auto str_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - variant_col.insertIntoVariantFrom(variant_info.variant_name_to_discriminator["String"], *str_column, 0); + variant_column_ptr->insertIntoVariantFrom(variant_info.variant_name_to_discriminator["String"], *str_column, 0); return pos; } @@ -591,8 +604,7 @@ const char * ColumnDynamic::skipSerializedInArena(const char * pos) const void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const { - const auto & variant_col = assert_cast(*variant_column); - auto discr = variant_col.globalDiscriminatorAt(n); + auto discr = variant_column_ptr->globalDiscriminatorAt(n); if (discr == ColumnVariant::NULL_DISCRIMINATOR) { hash.update(discr); @@ -600,7 +612,7 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const } hash.update(variant_info.variant_names[discr]); - variant_col.getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_col.offsetAt(n), hash); + variant_column_ptr->getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_column_ptr->offsetAt(n), hash); } #if !defined(DEBUG_OR_SANITIZER_BUILD) @@ -609,9 +621,9 @@ int ColumnDynamic::compareAt(size_t n, size_t m, const DB::IColumn & rhs, int na int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const #endif { - const auto & left_variant = assert_cast(*variant_column); + const auto & left_variant = getVariantColumn(); const auto & right_dynamic = assert_cast(rhs); - const auto & right_variant = assert_cast(*right_dynamic.variant_column); + const auto & right_variant = right_dynamic.getVariantColumn(); auto left_discr = left_variant.globalDiscriminatorAt(n); auto right_discr = right_variant.globalDiscriminatorAt(m); @@ -634,7 +646,7 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int ColumnPtr ColumnDynamic::compress() const { - ColumnPtr variant_compressed = variant_column->compress(); + ColumnPtr variant_compressed = variant_column_ptr->compress(); size_t byte_size = variant_compressed->byteSize(); return ColumnCompressed::create(size(), byte_size, [my_variant_compressed = std::move(variant_compressed), my_variant_info = variant_info, my_max_dynamic_types = max_dynamic_types, my_statistics = statistics]() mutable @@ -740,6 +752,7 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source } variant_column = variant_info.variant_type->createColumn(); + variant_column_ptr = assert_cast(variant_column.get()); /// Now we have the resulting Variant that will be used in all merged columns. /// Variants can also contain Dynamic columns inside, we should collect @@ -767,12 +780,12 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source void ColumnDynamic::applyNullMap(const ColumnVector::Container & null_map) { - assert_cast(*variant_column).applyNullMap(null_map); + variant_column_ptr->applyNullMap(null_map); } void ColumnDynamic::applyNegatedNullMap(const ColumnVector::Container & null_map) { - assert_cast(*variant_column).applyNegatedNullMap(null_map); + variant_column_ptr->applyNegatedNullMap(null_map); } } diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 3d36186213a..d384dce58d0 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -107,37 +107,37 @@ public: size_t size() const override { - return variant_column->size(); + return variant_column_ptr->size(); } Field operator[](size_t n) const override { - return (*variant_column)[n]; + return (*variant_column_ptr)[n]; } void get(size_t n, Field & res) const override { - variant_column->get(n, res); + variant_column_ptr->get(n, res); } bool isDefaultAt(size_t n) const override { - return variant_column->isDefaultAt(n); + return variant_column_ptr->isDefaultAt(n); } bool isNullAt(size_t n) const override { - return variant_column->isNullAt(n); + return variant_column_ptr->isNullAt(n); } StringRef getDataAt(size_t n) const override { - return variant_column->getDataAt(n); + return variant_column_ptr->getDataAt(n); } void insertData(const char * pos, size_t length) override { - variant_column->insertData(pos, length); + variant_column_ptr->insertData(pos, length); } void insert(const Field & x) override; @@ -155,17 +155,17 @@ public: void insertDefault() override { - variant_column->insertDefault(); + variant_column_ptr->insertDefault(); } void insertManyDefaults(size_t length) override { - variant_column->insertManyDefaults(length); + variant_column_ptr->insertManyDefaults(length); } void popBack(size_t n) override { - variant_column->popBack(n); + variant_column_ptr->popBack(n); } StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override; @@ -176,42 +176,42 @@ public: WeakHash32 getWeakHash32() const override { - return variant_column->getWeakHash32(); + return variant_column_ptr->getWeakHash32(); } void updateHashFast(SipHash & hash) const override { - variant_column->updateHashFast(hash); + variant_column_ptr->updateHashFast(hash); } ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override { - return create(variant_column->filter(filt, result_size_hint), variant_info, max_dynamic_types); + return create(variant_column_ptr->filter(filt, result_size_hint), variant_info, max_dynamic_types); } void expand(const Filter & mask, bool inverted) override { - variant_column->expand(mask, inverted); + variant_column_ptr->expand(mask, inverted); } ColumnPtr permute(const Permutation & perm, size_t limit) const override { - return create(variant_column->permute(perm, limit), variant_info, max_dynamic_types); + return create(variant_column_ptr->permute(perm, limit), variant_info, max_dynamic_types); } ColumnPtr index(const IColumn & indexes, size_t limit) const override { - return create(variant_column->index(indexes, limit), variant_info, max_dynamic_types); + return create(variant_column_ptr->index(indexes, limit), variant_info, max_dynamic_types); } ColumnPtr replicate(const Offsets & replicate_offsets) const override { - return create(variant_column->replicate(replicate_offsets), variant_info, max_dynamic_types); + return create(variant_column_ptr->replicate(replicate_offsets), variant_info, max_dynamic_types); } MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override { - MutableColumns scattered_variant_columns = variant_column->scatter(num_columns, selector); + MutableColumns scattered_variant_columns = variant_column_ptr->scatter(num_columns, selector); MutableColumns scattered_columns; scattered_columns.reserve(num_columns); for (auto & scattered_variant_column : scattered_variant_columns) @@ -228,54 +228,59 @@ public: bool hasEqualValues() const override { - return variant_column->hasEqualValues(); + return variant_column_ptr->hasEqualValues(); } void getExtremes(Field & min, Field & max) const override { - variant_column->getExtremes(min, max); + variant_column_ptr->getExtremes(min, max); } void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override { - variant_column->getPermutation(direction, stability, limit, nan_direction_hint, res); + variant_column_ptr->getPermutation(direction, stability, limit, nan_direction_hint, res); } void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override { - variant_column->updatePermutation(direction, stability, limit, nan_direction_hint, res, equal_ranges); + variant_column_ptr->updatePermutation(direction, stability, limit, nan_direction_hint, res, equal_ranges); } void reserve(size_t n) override { - variant_column->reserve(n); + variant_column_ptr->reserve(n); + } + + size_t capacity() const override + { + return variant_column_ptr->capacity(); } void ensureOwnership() override { - variant_column->ensureOwnership(); + variant_column_ptr->ensureOwnership(); } size_t byteSize() const override { - return variant_column->byteSize(); + return variant_column_ptr->byteSize(); } size_t byteSizeAt(size_t n) const override { - return variant_column->byteSizeAt(n); + return variant_column_ptr->byteSizeAt(n); } size_t allocatedBytes() const override { - return variant_column->allocatedBytes(); + return variant_column_ptr->allocatedBytes(); } void protect() override { - variant_column->protect(); + variant_column_ptr->protect(); } void forEachSubcolumn(MutableColumnCallback callback) override @@ -300,27 +305,27 @@ public: double getRatioOfDefaultRows(double sample_ratio) const override { - return variant_column->getRatioOfDefaultRows(sample_ratio); + return variant_column_ptr->getRatioOfDefaultRows(sample_ratio); } UInt64 getNumberOfDefaultRows() const override { - return variant_column->getNumberOfDefaultRows(); + return variant_column_ptr->getNumberOfDefaultRows(); } void getIndicesOfNonDefaultRows(Offsets & indices, size_t from, size_t limit) const override { - variant_column->getIndicesOfNonDefaultRows(indices, from, limit); + variant_column_ptr->getIndicesOfNonDefaultRows(indices, from, limit); } void finalize() override { - variant_column->finalize(); + variant_column_ptr->finalize(); } bool isFinalized() const override { - return variant_column->isFinalized(); + return variant_column_ptr->isFinalized(); } /// Apply null map to a nested Variant column. @@ -332,8 +337,8 @@ public: const ColumnPtr & getVariantColumnPtr() const { return variant_column; } ColumnPtr & getVariantColumnPtr() { return variant_column; } - const ColumnVariant & getVariantColumn() const { return assert_cast(*variant_column); } - ColumnVariant & getVariantColumn() { return assert_cast(*variant_column); } + const ColumnVariant & getVariantColumn() const { return *variant_column_ptr; } + ColumnVariant & getVariantColumn() { return *variant_column_ptr; } bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); } bool addNewVariant(const DataTypePtr & new_variant, const String & variant_name); @@ -342,10 +347,13 @@ public: bool hasDynamicStructure() const override { return true; } void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; + void setStatistics(const Statistics & statistics_) { statistics = statistics_; } const Statistics & getStatistics() const { return statistics; } size_t getMaxDynamicTypes() const { return max_dynamic_types; } + void setVariantType(const DataTypePtr & variant_type); + private: /// Combine current variant with the other variant and return global discriminators mapping /// from other variant to the combined one. It's used for inserting from @@ -356,6 +364,7 @@ private: void updateVariantInfoAndExpandVariantColumn(const DataTypePtr & new_variant_type); WrappedPtr variant_column; + ColumnVariant * variant_column_ptr; /// Store the type of current variant with some additional information. VariantInfo variant_info; /// The maximum number of different types that can be stored in this Dynamic column. diff --git a/src/Columns/ColumnFixedString.h b/src/Columns/ColumnFixedString.h index 676ac7712ba..8cf0a6a57da 100644 --- a/src/Columns/ColumnFixedString.h +++ b/src/Columns/ColumnFixedString.h @@ -182,6 +182,11 @@ public: chars.reserve_exact(n * size); } + size_t capacity() const override + { + return chars.capacity() / n; + } + void shrinkToFit() override { chars.shrink_to_fit(); diff --git a/src/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h index 3766b247d60..0b12b50b97d 100644 --- a/src/Columns/ColumnLowCardinality.h +++ b/src/Columns/ColumnLowCardinality.h @@ -172,6 +172,7 @@ public: } void reserve(size_t n) override { idx.reserve(n); } + size_t capacity() const override { return idx.capacity(); } void shrinkToFit() override { idx.shrinkToFit(); } /// Don't count the dictionary size as it can be shared between different blocks. @@ -309,6 +310,7 @@ public: void popBack(size_t n) { positions->popBack(n); } void reserve(size_t n) { positions->reserve(n); } + size_t capacity() const { return positions->capacity(); } void shrinkToFit() { positions->shrinkToFit(); } UInt64 getMaxPositionForCurrentType() const; diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 1025b4e77b9..56f5f754495 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -249,6 +249,11 @@ void ColumnMap::reserve(size_t n) nested->reserve(n); } +size_t ColumnMap::capacity() const +{ + return nested->capacity(); +} + void ColumnMap::shrinkToFit() { nested->shrinkToFit(); diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index 3eaaa0ad562..246feff116e 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -94,6 +94,7 @@ public: void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override; + size_t capacity() const override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 6529f0b78db..c65fed63562 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -706,6 +706,11 @@ void ColumnNullable::reserve(size_t n) getNullMapData().reserve(n); } +size_t ColumnNullable::capacity() const +{ + return getNullMapData().capacity(); +} + void ColumnNullable::shrinkToFit() { getNestedColumn().shrinkToFit(); diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h index fe9f5b6dcc2..c4d2eaf253c 100644 --- a/src/Columns/ColumnNullable.h +++ b/src/Columns/ColumnNullable.h @@ -125,6 +125,7 @@ public: size_t limit, int null_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override; size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override; void reserve(size_t n) override; + size_t capacity() const override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 47eb517e81b..4181c6081c7 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -49,8 +49,12 @@ ColumnObject::ColumnObject( typed_paths[path] = std::move(column); dynamic_paths.reserve(dynamic_paths_.size()); + dynamic_paths_ptrs.reserve(dynamic_paths_.size()); for (auto & [path, column] : dynamic_paths_) + { dynamic_paths[path] = std::move(column); + dynamic_paths_ptrs[path] = assert_cast(dynamic_paths[path].get()); + } } ColumnObject::ColumnObject( @@ -161,7 +165,7 @@ Field ColumnObject::operator[](size_t n) const for (const auto & [path, column] : typed_paths) object[path] = (*column)[n]; - for (const auto & [path, column] : dynamic_paths) + for (const auto & [path, column] : dynamic_paths_ptrs) { /// Output only non-null values from dynamic paths. We cannot distinguish cases when /// dynamic path has Null value and when it's absent in the row and consider them equivalent. @@ -199,7 +203,7 @@ bool ColumnObject::isDefaultAt(size_t n) const return false; } - for (const auto & [path, column] : dynamic_paths) + for (const auto & [path, column] : dynamic_paths_ptrs) { if (!column->isDefaultAt(n)) return false; @@ -221,15 +225,17 @@ void ColumnObject::insertData(const char *, size_t) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertData is not supported for {}", getName()); } -IColumn * ColumnObject::tryToAddNewDynamicPath(const String & path) +ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(const String & path) { if (dynamic_paths.size() == max_dynamic_paths) return nullptr; auto new_dynamic_column = ColumnDynamic::create(max_dynamic_types); + new_dynamic_column->reserve(shared_data->capacity()); new_dynamic_column->insertManyDefaults(size()); auto it = dynamic_paths.emplace(path, std::move(new_dynamic_column)).first; - return it->second.get(); + auto it_ptr = dynamic_paths_ptrs.emplace(path, assert_cast(it->second.get())).first; + return it_ptr->second; } void ColumnObject::setDynamicPaths(const std::vector & paths) @@ -244,6 +250,7 @@ void ColumnObject::setDynamicPaths(const std::vector & paths) if (size) new_dynamic_column->insertManyDefaults(size); dynamic_paths[path] = std::move(new_dynamic_column); + dynamic_paths_ptrs[path] = assert_cast(dynamic_paths[path].get()); } } @@ -259,7 +266,7 @@ void ColumnObject::insert(const Field & x) { typed_it->second->insert(value_field); } - else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + else if (auto dynamic_it = dynamic_paths_ptrs.find(path); dynamic_it != dynamic_paths_ptrs.end()) { dynamic_it->second->insert(value_field); } @@ -291,7 +298,7 @@ void ColumnObject::insert(const Field & x) column->insertDefault(); } - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) { if (column->size() == current_size) column->insertDefault(); @@ -317,7 +324,7 @@ bool ColumnObject::tryInsert(const Field & x) column->popBack(column->size() - prev_size); } - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) { if (column->size() != prev_size) column->popBack(column->size() - prev_size); @@ -339,7 +346,7 @@ bool ColumnObject::tryInsert(const Field & x) return false; } } - else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + else if (auto dynamic_it = dynamic_paths_ptrs.find(path); dynamic_it != dynamic_paths_ptrs.end()) { if (!dynamic_it->second->tryInsert(value_field)) { @@ -376,7 +383,7 @@ bool ColumnObject::tryInsert(const Field & x) column->insertDefault(); } - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) { if (column->size() == prev_size) column->insertDefault(); @@ -404,7 +411,7 @@ void ColumnObject::doInsertFrom(const IColumn & src, size_t n) for (const auto & [path, column] : src_object_column.dynamic_paths) { /// Check if we already have such dynamic path. - if (auto it = dynamic_paths.find(path); it != dynamic_paths.end()) + if (auto it = dynamic_paths_ptrs.find(path); it != dynamic_paths_ptrs.end()) it->second->insertFrom(*column, n); /// Try to add a new dynamic path. else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) @@ -438,7 +445,7 @@ void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t l for (const auto & [path, column] : src_object_column.dynamic_paths) { /// Check if we already have such dynamic path. - if (auto it = dynamic_paths.find(path); it != dynamic_paths.end()) + if (auto it = dynamic_paths_ptrs.find(path); it != dynamic_paths_ptrs.end()) it->second->insertRangeFrom(*column, start, length); /// Try to add a new dynamic path. else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) @@ -484,7 +491,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co } /// Insert default values in all remaining dynamic paths. - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) { if (column->size() == current_size) column->insertManyDefaults(length); @@ -508,7 +515,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co { auto path = src_shared_data_paths->getDataAt(i); /// Check if we have this path in dynamic paths. - if (auto it = dynamic_paths.find(path.toString()); it != dynamic_paths.end()) + if (auto it = dynamic_paths_ptrs.find(path.toString()); it != dynamic_paths_ptrs.end()) { /// Deserialize binary value into dynamic column from shared data. deserializeValueFromSharedData(src_shared_data_values, i, *it->second); @@ -541,7 +548,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co shared_data_offsets.push_back(shared_data_paths->size()); /// Insert default value in all remaining dynamic paths. - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) { if (column->size() == current_size) column->insertDefault(); @@ -576,7 +583,7 @@ void ColumnObject::insertDefault() { for (auto & [_, column] : typed_paths) column->insertDefault(); - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) column->insertDefault(); shared_data->insertDefault(); } @@ -585,7 +592,7 @@ void ColumnObject::insertManyDefaults(size_t length) { for (auto & [_, column] : typed_paths) column->insertManyDefaults(length); - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) column->insertManyDefaults(length); shared_data->insertManyDefaults(length); } @@ -594,7 +601,7 @@ void ColumnObject::popBack(size_t n) { for (auto & [_, column] : typed_paths) column->popBack(n); - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) column->popBack(n); shared_data->popBack(n); } @@ -742,7 +749,7 @@ void ColumnObject::updateHashWithValue(size_t n, SipHash & hash) const { for (const auto & [_, column] : typed_paths) column->updateHashWithValue(n, hash); - for (const auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths_ptrs) column->updateHashWithValue(n, hash); shared_data->updateHashWithValue(n, hash); } @@ -752,7 +759,7 @@ WeakHash32 ColumnObject::getWeakHash32() const WeakHash32 hash(size()); for (const auto & [_, column] : typed_paths) hash.update(column->getWeakHash32()); - for (const auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths_ptrs) hash.update(column->getWeakHash32()); hash.update(shared_data->getWeakHash32()); return hash; @@ -762,7 +769,7 @@ void ColumnObject::updateHashFast(SipHash & hash) const { for (const auto & [_, column] : typed_paths) column->updateHashFast(hash); - for (const auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths_ptrs) column->updateHashFast(hash); shared_data->updateHashFast(hash); } @@ -775,8 +782,8 @@ ColumnPtr ColumnObject::filter(const Filter & filt, ssize_t result_size_hint) co filtered_typed_paths[path] = column->filter(filt, result_size_hint); std::unordered_map filtered_dynamic_paths; - filtered_dynamic_paths.reserve(dynamic_paths.size()); - for (const auto & [path, column] : dynamic_paths) + filtered_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) filtered_dynamic_paths[path] = column->filter(filt, result_size_hint); auto filtered_shared_data = shared_data->filter(filt, result_size_hint); @@ -787,7 +794,7 @@ void ColumnObject::expand(const Filter & mask, bool inverted) { for (auto & [_, column] : typed_paths) column->expand(mask, inverted); - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) column->expand(mask, inverted); shared_data->expand(mask, inverted); } @@ -800,8 +807,8 @@ ColumnPtr ColumnObject::permute(const Permutation & perm, size_t limit) const permuted_typed_paths[path] = column->permute(perm, limit); std::unordered_map permuted_dynamic_paths; - permuted_dynamic_paths.reserve(dynamic_paths.size()); - for (const auto & [path, column] : dynamic_paths) + permuted_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) permuted_dynamic_paths[path] = column->permute(perm, limit); auto permuted_shared_data = shared_data->permute(perm, limit); @@ -816,8 +823,8 @@ ColumnPtr ColumnObject::index(const IColumn & indexes, size_t limit) const indexed_typed_paths[path] = column->index(indexes, limit); std::unordered_map indexed_dynamic_paths; - indexed_dynamic_paths.reserve(dynamic_paths.size()); - for (const auto & [path, column] : dynamic_paths) + indexed_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) indexed_dynamic_paths[path] = column->index(indexes, limit); auto indexed_shared_data = shared_data->index(indexes, limit); @@ -832,8 +839,8 @@ ColumnPtr ColumnObject::replicate(const Offsets & replicate_offsets) const replicated_typed_paths[path] = column->replicate(replicate_offsets); std::unordered_map replicated_dynamic_paths; - replicated_dynamic_paths.reserve(dynamic_paths.size()); - for (const auto & [path, column] : dynamic_paths) + replicated_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) replicated_dynamic_paths[path] = column->replicate(replicate_offsets); auto replicated_shared_data = shared_data->replicate(replicate_offsets); @@ -855,9 +862,9 @@ MutableColumns ColumnObject::scatter(ColumnIndex num_columns, const Selector & s std::vector> scattered_dynamic_paths(num_columns); for (auto & dynamic_paths_ : scattered_dynamic_paths) - dynamic_paths_.reserve(dynamic_paths.size()); + dynamic_paths_.reserve(dynamic_paths_ptrs.size()); - for (const auto & [path, column] : dynamic_paths) + for (const auto & [path, column] : dynamic_paths_ptrs) { auto scattered_columns = column->scatter(num_columns, selector); for (size_t i = 0; i != num_columns; ++i) @@ -883,16 +890,21 @@ void ColumnObject::reserve(size_t n) { for (auto & [_, column] : typed_paths) column->reserve(n); - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) column->reserve(n); shared_data->reserve(n); } +size_t ColumnObject::capacity() const +{ + return shared_data->capacity(); +} + void ColumnObject::ensureOwnership() { for (auto & [_, column] : typed_paths) column->ensureOwnership(); - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) column->ensureOwnership(); shared_data->ensureOwnership(); } @@ -902,7 +914,7 @@ size_t ColumnObject::byteSize() const size_t size = 0; for (const auto & [_, column] : typed_paths) size += column->byteSize(); - for (const auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths_ptrs) size += column->byteSize(); size += shared_data->byteSize(); return size; @@ -913,7 +925,7 @@ size_t ColumnObject::byteSizeAt(size_t n) const size_t size = 0; for (const auto & [_, column] : typed_paths) size += column->byteSizeAt(n); - for (const auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths_ptrs) size += column->byteSizeAt(n); size += shared_data->byteSizeAt(n); return size; @@ -924,7 +936,7 @@ size_t ColumnObject::allocatedBytes() const size_t size = 0; for (const auto & [_, column] : typed_paths) size += column->allocatedBytes(); - for (const auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths_ptrs) size += column->allocatedBytes(); size += shared_data->allocatedBytes(); return size; @@ -934,7 +946,7 @@ void ColumnObject::protect() { for (auto & [_, column] : typed_paths) column->protect(); - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) column->protect(); shared_data->protect(); } @@ -955,7 +967,7 @@ void ColumnObject::forEachSubcolumnRecursively(DB::IColumn::RecursiveMutableColu callback(*column); column->forEachSubcolumnRecursively(callback); } - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) { callback(*column); column->forEachSubcolumnRecursively(callback); @@ -994,8 +1006,8 @@ ColumnPtr ColumnObject::compress() const } std::unordered_map compressed_dynamic_paths; - compressed_dynamic_paths.reserve(dynamic_paths.size()); - for (const auto & [path, column] : dynamic_paths) + compressed_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) { auto compressed_column = column->compress(); byte_size += compressed_column->byteSize(); @@ -1034,7 +1046,7 @@ void ColumnObject::finalize() { for (auto & [_, column] : typed_paths) column->finalize(); - for (auto & [_, column] : dynamic_paths) + for (auto & [_, column] : dynamic_paths_ptrs) column->finalize(); shared_data->finalize(); } @@ -1044,7 +1056,7 @@ bool ColumnObject::isFinalized() const bool finalized = true; for (const auto & [_, column] : typed_paths) finalized &= column->isFinalized(); - for (const auto & [_, column] : dynamic_paths) + for (const auto & [_, column] : dynamic_paths_ptrs) finalized &= column->isFinalized(); finalized &= shared_data->isFinalized(); return finalized; @@ -1101,6 +1113,7 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou } dynamic_paths.clear(); + dynamic_paths_ptrs.clear(); /// Check if the number of all dynamic paths exceeds the limit. if (path_to_total_number_of_non_null_values.size() > max_dynamic_paths) @@ -1114,13 +1127,19 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou /// Fill dynamic_paths with first max_dynamic_paths paths in sorted list. for (size_t i = 0; i != max_dynamic_paths; ++i) + { dynamic_paths[paths_with_sizes[i].second] = ColumnDynamic::create(max_dynamic_types); + dynamic_paths_ptrs[paths_with_sizes[i].second] = assert_cast(dynamic_paths[paths_with_sizes[i].second].get()); + } } /// Use all dynamic paths from all source columns. else { for (const auto & [path, _] : path_to_total_number_of_non_null_values) + { dynamic_paths[path] = ColumnDynamic::create(max_dynamic_types); + dynamic_paths_ptrs[path] = assert_cast(dynamic_paths[path].get()); + } } /// Fill statistics for the merged part. diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index 0b949cbcb3a..4a27edfc295 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -116,6 +116,7 @@ public: void getExtremes(Field & min, Field & max) const override; void reserve(size_t n) override; + size_t capacity() const override; void ensureOwnership() override; size_t byteSize() const override; size_t byteSizeAt(size_t n) const override; @@ -142,6 +143,9 @@ public: const std::unordered_map & getDynamicPaths() const { return dynamic_paths; } std::unordered_map & getDynamicPaths() { return dynamic_paths; } + const std::unordered_map & getDynamicPathsPtrs() const { return dynamic_paths_ptrs; } + std::unordered_map & getDynamicPathsPtrs() { return dynamic_paths_ptrs; } + const Statistics & getStatistics() const { return statistics; } const ColumnPtr & getSharedDataPtr() const { return shared_data; } @@ -173,7 +177,7 @@ public: /// Try to add new dynamic path. Returns pointer to the new dynamic /// path column or nullptr if limit on dynamic paths is reached. - IColumn * tryToAddNewDynamicPath(const String & path); + ColumnDynamic * tryToAddNewDynamicPath(const String & path); void setDynamicPaths(const std::vector & paths); void setStatistics(const Statistics & statistics_) { statistics = statistics_; } @@ -197,6 +201,7 @@ private: /// here are Dynamic columns. This set of paths can be extended /// during inerts into the column. std::unordered_map dynamic_paths; + std::unordered_map dynamic_paths_ptrs; /// Shared storage for all other paths and values. It's filled /// when the number of dynamic paths reaches the limit. /// It has type Array(Tuple(String, String)) and stores diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp index 7cfa2571f5a..4cf0eb894e6 100644 --- a/src/Columns/ColumnString.cpp +++ b/src/Columns/ColumnString.cpp @@ -557,6 +557,11 @@ void ColumnString::reserve(size_t n) offsets.reserve_exact(n); } +size_t ColumnString::capacity() const +{ + return offsets.capacity(); +} + void ColumnString::shrinkToFit() { chars.shrink_to_fit(); diff --git a/src/Columns/ColumnString.h b/src/Columns/ColumnString.h index c1012e1e55e..6b94de83530 100644 --- a/src/Columns/ColumnString.h +++ b/src/Columns/ColumnString.h @@ -283,6 +283,7 @@ public: ColumnPtr compress() const override; void reserve(size_t n) override; + size_t capacity() const override; void shrinkToFit() override; void getExtremes(Field & min, Field & max) const override; diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 4fc3f88a87c..8cef8b0a499 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -595,6 +595,14 @@ void ColumnTuple::reserve(size_t n) getColumn(i).reserve(n); } +size_t ColumnTuple::capacity() const +{ + if (columns.empty()) + return size(); + + return getColumn(0).capacity(); +} + void ColumnTuple::shrinkToFit() { const size_t tuple_size = columns.size(); diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index 16b47a993f6..502022c9532 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -110,6 +110,7 @@ public: void updatePermutationWithCollation(const Collator & collator, IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges& equal_ranges) const override; void reserve(size_t n) override; + size_t capacity() const override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index de7efb41d19..adba4e9691f 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -1251,6 +1251,11 @@ void ColumnVariant::reserve(size_t n) offsets->reserve(n); } +size_t ColumnVariant::capacity() const +{ + return local_discriminators->capacity(); +} + void ColumnVariant::ensureOwnership() { const size_t num_variants = variants.size(); diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index 34c24b5428d..b4bc63e8d4c 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -237,6 +237,7 @@ public: size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override; + size_t capacity() const override; void ensureOwnership() override; size_t byteSize() const override; size_t byteSizeAt(size_t n) const override; diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 2fe5b635bd2..81d514afd4a 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -180,6 +180,11 @@ public: data.reserve_exact(n); } + size_t capacity() const override + { + return data.capacity(); + } + void shrinkToFit() override { data.shrink_to_fit(); diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index f9c1a3e7034..e75275d03c5 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -475,6 +475,9 @@ public: /// It affects performance only (not correctness). virtual void reserve(size_t /*n*/) {} + /// Returns the number of elements allocated in reserve. + virtual size_t capacity() const { return size(); } + /// Requests the removal of unused capacity. /// It is a non-binding request to reduce the capacity of the underlying container to its size. virtual void shrinkToFit() {} diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index a6c5c201f36..eeda6476fd0 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -314,10 +314,15 @@ std::unique_ptr DataTypeObject::getDynamicSubcolu } auto & result_dynamic_columns = result_object_column.getDynamicPaths(); + auto & result_dynamic_columns_ptrs = result_object_column.getDynamicPathsPtrs(); for (const auto & [path, column] : object_column.getDynamicPaths()) { if (path.starts_with(prefix) && path.size() != prefix.size()) - result_dynamic_columns[getSubPath(path, prefix)] = column; + { + auto sub_path = getSubPath(path, prefix); + result_dynamic_columns[sub_path] = column; + result_dynamic_columns_ptrs[sub_path] = assert_cast(result_dynamic_columns[sub_path].get()); + } } const auto & shared_data_offsets = object_column.getSharedDataOffsets(); diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index c1ef67b2aaa..bd760648498 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -279,13 +279,16 @@ void SerializationDynamic::deserializeBinaryBulkWithMultipleStreams( return; auto mutable_column = column->assumeMutable(); + auto & column_dynamic = assert_cast(*mutable_column); auto * dynamic_state = checkAndGetState(state); auto * structure_state = checkAndGetState(dynamic_state->structure_state); - if (mutable_column->empty()) - mutable_column = ColumnDynamic::create(structure_state->variant_type->createColumn(), structure_state->variant_type, max_dynamic_types, structure_state->statistics); + if (column_dynamic.empty()) + { + column_dynamic.setVariantType(structure_state->variant_type); + column_dynamic.setStatistics(structure_state->statistics); + } - auto & column_dynamic = assert_cast(*mutable_column); const auto & variant_info = column_dynamic.getVariantInfo(); if (!variant_info.variant_type->equals(*structure_state->variant_type)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", structure_state->variant_type->getName(), variant_info.variant_type->getName()); diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index e8a78a40749..f5b5a7bb984 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1650,7 +1650,7 @@ public: typed_column->insertDefault(); } - for (auto & [_, dynamic_column] : column_object.getDynamicPaths()) + for (auto & [_, dynamic_column] : column_object.getDynamicPathsPtrs()) { if (dynamic_column->size() == prev_size) dynamic_column->insertDefault(); @@ -1689,7 +1689,7 @@ private: } auto & typed_paths = column_object.getTypedPaths(); - auto & dynamic_paths = column_object.getDynamicPaths(); + auto & dynamic_paths_ptrs = column_object.getDynamicPathsPtrs(); /// Check if we have this path in typed paths. if (auto typed_it = typed_paths.find(current_path); typed_it != typed_paths.end()) { @@ -1709,7 +1709,7 @@ private: } } /// Check if we have this path in dynamic paths. - else if (auto dynamic_it = dynamic_paths.find(current_path); dynamic_it != dynamic_paths.end()) + else if (auto dynamic_it = dynamic_paths_ptrs.find(current_path); dynamic_it != dynamic_paths_ptrs.end()) { /// Check if we already had this path. if (dynamic_it->second->size() > current_size) diff --git a/src/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp index 0b6c81923db..31d7fdbf67b 100644 --- a/src/Processors/Formats/IRowInputFormat.cpp +++ b/src/Processors/Formats/IRowInputFormat.cpp @@ -124,6 +124,10 @@ Chunk IRowInputFormat::read() return getChunkForCount(num_rows); } + /// Reserve params.max_block_size rows in advance. + for (auto & column : columns) + column->reserve(params.max_block_size); + RowReadExtension info; bool continue_reading = true; for (size_t rows = 0; (rows < params.max_block_size || num_rows == 0) && continue_reading; ++rows) diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp index c5e36046c62..dde8a86babd 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp @@ -15,11 +15,8 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; } -JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_) - : JSONAsRowInputFormat(header_, std::make_unique(in_), params_, format_settings_) {} - -JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, std::unique_ptr buf_, Params params_, const FormatSettings & format_settings_) : - JSONEachRowRowInputFormat(*buf_, header_, std::move(params_), format_settings_, false), buf(std::move(buf_)) +JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_) : + JSONEachRowRowInputFormat(in_, header_, std::move(params_), format_settings_, false) { if (header_.columns() > 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, @@ -27,19 +24,6 @@ JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, std::unique_pt header_.columns()); } - -void JSONAsRowInputFormat::setReadBuffer(ReadBuffer & in_) -{ - buf = std::make_unique(in_); - JSONEachRowRowInputFormat::setReadBuffer(*buf); -} - -void JSONAsRowInputFormat::resetReadBuffer() -{ - buf.reset(); - JSONEachRowRowInputFormat::resetReadBuffer(); -} - bool JSONAsRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) { assert(columns.size() == 1); @@ -48,35 +32,44 @@ bool JSONAsRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) if (!allow_new_rows) return false; - skipWhitespaceIfAny(*buf); - if (!buf->eof()) + skipWhitespaceIfAny(*in); + if (!in->eof()) { - if (!data_in_square_brackets && *buf->position() == ';') + if (!data_in_square_brackets && *in->position() == ';') { /// ';' means the end of query, but it cannot be before ']'. return allow_new_rows = false; } - else if (data_in_square_brackets && *buf->position() == ']') + else if (data_in_square_brackets && *in->position() == ']') { /// ']' means the end of query. return allow_new_rows = false; } } - if (!buf->eof()) + if (!in->eof()) readJSONObject(*columns[0]); - skipWhitespaceIfAny(*buf); - if (!buf->eof() && *buf->position() == ',') - ++buf->position(); - skipWhitespaceIfAny(*buf); + skipWhitespaceIfAny(*in); + if (!in->eof() && *in->position() == ',') + ++in->position(); + skipWhitespaceIfAny(*in); - return !buf->eof(); + return !in->eof(); } JSONAsStringRowInputFormat::JSONAsStringRowInputFormat( const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_) : JSONAsRowInputFormat(header_, in_, params_, format_settings_) +{ +} + +JSONAsStringRowInputFormat::JSONAsStringRowInputFormat( + const DB::Block & header_, + std::unique_ptr buf_, + DB::IRowInputFormat::Params params_, + const DB::FormatSettings & format_settings_) + : JSONAsRowInputFormat(header_, *buf_, params_, format_settings_), buf(std::move(buf_)) { if (!isString(removeNullable(removeLowCardinality(header_.getByPosition(0).type)))) throw Exception(ErrorCodes::BAD_ARGUMENTS, @@ -84,6 +77,18 @@ JSONAsStringRowInputFormat::JSONAsStringRowInputFormat( header_.getByPosition(0).type->getName()); } +void JSONAsStringRowInputFormat::setReadBuffer(ReadBuffer & in_) +{ + buf = std::make_unique(in_); + JSONAsRowInputFormat::setReadBuffer(*buf); +} + +void JSONAsStringRowInputFormat::resetReadBuffer() +{ + buf.reset(); + JSONAsRowInputFormat::resetReadBuffer(); +} + void JSONAsStringRowInputFormat::readJSONObject(IColumn & column) { PeekableReadBufferCheckpoint checkpoint{*buf}; @@ -175,7 +180,7 @@ JSONAsObjectRowInputFormat::JSONAsObjectRowInputFormat( void JSONAsObjectRowInputFormat::readJSONObject(IColumn & column) { - serializations[0]->deserializeTextJSON(column, *buf, format_settings); + serializations[0]->deserializeTextJSON(column, *in, format_settings); } Chunk JSONAsObjectRowInputFormat::getChunkForCount(size_t rows) diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h index a5a436260ba..7c3c1fb9fb7 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h @@ -19,17 +19,11 @@ class JSONAsRowInputFormat : public JSONEachRowRowInputFormat public: JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings); - void setReadBuffer(ReadBuffer & in_) override; - void resetReadBuffer() override; - private: - JSONAsRowInputFormat(const Block & header_, std::unique_ptr buf_, Params params_, const FormatSettings & format_settings); - bool readRow(MutableColumns & columns, RowReadExtension & ext) override; protected: virtual void readJSONObject(IColumn & column) = 0; - std::unique_ptr buf; }; /// Each JSON object is parsed as a whole to string. @@ -37,11 +31,17 @@ protected: class JSONAsStringRowInputFormat final : public JSONAsRowInputFormat { public: - JSONAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings); + JSONAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_); String getName() const override { return "JSONAsStringRowInputFormat"; } + void setReadBuffer(ReadBuffer & in_) override; + void resetReadBuffer() override; + private: + JSONAsStringRowInputFormat(const Block & header_, std::unique_ptr buf_, Params params_, const FormatSettings & format_settings); + void readJSONObject(IColumn & column) override; + std::unique_ptr buf; }; From 17b306dee522bf40b6020df5e664fcd9cc66dacf Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 23 Jul 2024 16:20:12 +0200 Subject: [PATCH 0736/2361] fix tests with timeouts --- src/Common/tests/gtest_resolve_pool.cpp | 29 +++++++++++++++---------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/src/Common/tests/gtest_resolve_pool.cpp b/src/Common/tests/gtest_resolve_pool.cpp index b760b9b1524..c443e961cc7 100644 --- a/src/Common/tests/gtest_resolve_pool.cpp +++ b/src/Common/tests/gtest_resolve_pool.cpp @@ -33,7 +33,7 @@ size_t toMilliseconds(auto duration) return std::chrono::duration_cast(duration).count(); } -const auto epsilon = 500us; +const auto epsilon = 1ms; class ResolvePoolMock : public DB::HostResolver { @@ -358,53 +358,59 @@ void check_no_failed_address(size_t iteration, auto & resolver, auto & addresses TEST_F(ResolvePoolTest, BannedForConsiquenceFail) { - auto history = 5ms; + auto history = 10ms; auto resolver = make_resolver(toMilliseconds(history)); auto failed_addr = resolver->resolve(); ASSERT_TRUE(addresses.contains(*failed_addr)); - auto start_at = now(); failed_addr.setFail(); + auto start_at = now(); + ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count)); ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count)); check_no_failed_address(1, resolver, addresses, failed_addr, metrics, start_at + history - epsilon); sleep_until(start_at + history + epsilon); - start_at = now(); resolver->update(); ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count)); ASSERT_EQ(0, CurrentMetrics::get(metrics.banned_count)); failed_addr.setFail(); + start_at = now(); + check_no_failed_address(2, resolver, addresses, failed_addr, metrics, start_at + history - epsilon); sleep_until(start_at + history + epsilon); - start_at = now(); resolver->update(); + + // too much time has passed + if (now() > start_at + 2*history - epsilon) + return; + ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count)); ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count)); // ip still banned adter history_ms + update, because it was his second consiquent fail - check_no_failed_address(2, resolver, addresses, failed_addr, metrics, start_at + history - epsilon); + check_no_failed_address(2, resolver, addresses, failed_addr, metrics, start_at + 2*history - epsilon); } TEST_F(ResolvePoolTest, NoAditionalBannForConcurrentFail) { - auto history = 5ms; + auto history = 10ms; auto resolver = make_resolver(toMilliseconds(history)); auto failed_addr = resolver->resolve(); ASSERT_TRUE(addresses.contains(*failed_addr)); - auto start_at = now(); + failed_addr.setFail(); + failed_addr.setFail(); + failed_addr.setFail(); - failed_addr.setFail(); - failed_addr.setFail(); - failed_addr.setFail(); + auto start_at = now(); ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count)); ASSERT_EQ(1, CurrentMetrics::get(metrics.banned_count)); @@ -413,6 +419,7 @@ TEST_F(ResolvePoolTest, NoAditionalBannForConcurrentFail) sleep_until(start_at + history + epsilon); resolver->update(); + // ip is cleared after just 1 history_ms interval. ASSERT_EQ(3, CurrentMetrics::get(metrics.active_count)); ASSERT_EQ(0, CurrentMetrics::get(metrics.banned_count)); From 094eac641b0ff72e35b735a36eb5d70b6b52f3f1 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 23 Jul 2024 15:25:23 +0100 Subject: [PATCH 0737/2361] impl --- .../03036_dynamic_read_subcolumns_1.reference | 19 +++++++++++++++++ .../03036_dynamic_read_subcolumns_1.sh | 21 +++++++++++++++++++ .../03036_dynamic_read_subcolumns_2.reference | 19 +++++++++++++++++ .../03036_dynamic_read_subcolumns_2.sh | 21 +++++++++++++++++++ .../03036_dynamic_read_subcolumns_3.reference | 19 +++++++++++++++++ .../03036_dynamic_read_subcolumns_3.sh | 21 +++++++++++++++++++ 6 files changed, 120 insertions(+) create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.reference create mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.reference create mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.reference create mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.reference b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.reference new file mode 100644 index 00000000000..0d51ecfac3b --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.reference @@ -0,0 +1,19 @@ +Memory +test +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +None +String +UInt64 +200000 +200000 +200000 +200000 +0 +0 +200000 +200000 +100000 +100000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh new file mode 100755 index 00000000000..aabba731816 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +# shellcheck source=./03036_dynamic_read_subcolumns.lib +. "$CUR_DIR"/03036_dynamic_read_subcolumns.lib + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" + +$CH_CLIENT -q "drop table if exists test;" + +echo "Memory" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=Memory" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.reference b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.reference new file mode 100644 index 00000000000..099b7574566 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.reference @@ -0,0 +1,19 @@ +MergeTree compact +test +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +None +String +UInt64 +200000 +200000 +200000 +200000 +0 +0 +200000 +200000 +100000 +100000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh new file mode 100755 index 00000000000..872f4c20a98 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +# shellcheck source=./03036_dynamic_read_subcolumns.lib +. "$CUR_DIR"/03036_dynamic_read_subcolumns.lib + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" + +$CH_CLIENT -q "drop table if exists test;" + +echo "MergeTree compact" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" +test +$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.reference b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.reference new file mode 100644 index 00000000000..35db4a22b4c --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.reference @@ -0,0 +1,19 @@ +MergeTree wide +test +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +None +String +UInt64 +200000 +200000 +200000 +200000 +0 +0 +200000 +200000 +100000 +100000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh new file mode 100755 index 00000000000..96276c96add --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +# shellcheck source=./03036_dynamic_read_subcolumns.lib +. "$CUR_DIR"/03036_dynamic_read_subcolumns.lib + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" + +$CH_CLIENT -q "drop table if exists test;" + +echo "MergeTree wide" +$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" +test +$CH_CLIENT -q "drop table test;" From a7baafc0221178099ef4d683a1e4f6d81af160f2 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 23 Jul 2024 15:25:52 +0100 Subject: [PATCH 0738/2361] remove old --- .../03036_dynamic_read_subcolumns.reference | 57 ----------------- .../03036_dynamic_read_subcolumns.sh | 62 ------------------- 2 files changed, 119 deletions(-) delete mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns.reference delete mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns.sh diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.reference b/tests/queries/0_stateless/03036_dynamic_read_subcolumns.reference deleted file mode 100644 index 36984bc8b9b..00000000000 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.reference +++ /dev/null @@ -1,57 +0,0 @@ -Memory -test -Array(Array(Dynamic)) -Array(Variant(String, UInt64)) -None -String -UInt64 -200000 -200000 -200000 -200000 -0 -0 -200000 -200000 -100000 -100000 -200000 -0 -MergeTree compact -test -Array(Array(Dynamic)) -Array(Variant(String, UInt64)) -None -String -UInt64 -200000 -200000 -200000 -200000 -0 -0 -200000 -200000 -100000 -100000 -200000 -0 -MergeTree wide -test -Array(Array(Dynamic)) -Array(Variant(String, UInt64)) -None -String -UInt64 -200000 -200000 -200000 -200000 -0 -0 -200000 -200000 -100000 -100000 -200000 -0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns.sh deleted file mode 100755 index 65517061b99..00000000000 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" - - -function test() -{ - echo "test" - $CH_CLIENT -q "insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000" - - $CH_CLIENT -q "select distinct dynamicType(d) as type from test order by type" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'UInt64'" - $CH_CLIENT -q "select count() from test where d.UInt64 is not NULL" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'String'" - $CH_CLIENT -q "select count() from test where d.String is not NULL" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Date'" - $CH_CLIENT -q "select count() from test where d.Date is not NULL" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'" - $CH_CLIENT -q "select count() from test where not empty(d.\`Array(Variant(String, UInt64))\`)" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'" - $CH_CLIENT -q "select count() from test where not empty(d.\`Array(Array(Dynamic))\`)" - $CH_CLIENT -q "select count() from test where d is NULL" - $CH_CLIENT -q "select count() from test where not empty(d.\`Tuple(a Array(Dynamic))\`.a.String)" - - $CH_CLIENT -q "select d, d.UInt64, d.String, d.\`Array(Variant(String, UInt64))\` from test format Null" - $CH_CLIENT -q "select d.UInt64, d.String, d.\`Array(Variant(String, UInt64))\` from test format Null" - $CH_CLIENT -q "select d.Int8, d.Date, d.\`Array(String)\` from test format Null" - $CH_CLIENT -q "select d, d.UInt64, d.Date, d.\`Array(Variant(String, UInt64))\`, d.\`Array(Variant(String, UInt64))\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" - $CH_CLIENT -q "select d.UInt64, d.Date, d.\`Array(Variant(String, UInt64))\`, d.\`Array(Variant(String, UInt64))\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64, d.\`Array(Variant(String, UInt64))\`.String from test format Null" - $CH_CLIENT -q "select d, d.\`Tuple(a UInt64, b String)\`.a, d.\`Array(Dynamic)\`.\`Variant(String, UInt64)\`.UInt64, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" - $CH_CLIENT -q "select d.\`Array(Dynamic)\`.\`Variant(String, UInt64)\`.UInt64, d.\`Array(Dynamic)\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" - $CH_CLIENT -q "select d.\`Array(Array(Dynamic))\`.size1, d.\`Array(Array(Dynamic))\`.UInt64, d.\`Array(Array(Dynamic))\`.\`Map(String, Tuple(a UInt64))\`.values.a from test format Null" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "Memory" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=Memory" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -test -$CH_CLIENT -q "drop table test;" From e671d4c55d520187f0fb2c5abb4ac301926d9651 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 23 Jul 2024 14:43:49 +0000 Subject: [PATCH 0739/2361] more changes --- tests/integration/test_storage_s3/test.py | 137 ++++++++++++++-------- 1 file changed, 86 insertions(+), 51 deletions(-) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index 5453ad5a796..d13605170ec 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -962,9 +962,10 @@ def test_storage_s3_put_uncompressed(started_cluster): [pytest.param("bin", "gzip", id="bin"), pytest.param("gz", "auto", id="gz")], ) def test_storage_s3_put_gzip(started_cluster, extension, method): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] - filename = f"test_put_gzip.{extension}" + filename = f"{id}/test_put_gzip.{extension}" name = f"test_put_gzip_{extension}" data = [ "'Joseph Tomlinson',5", @@ -1001,6 +1002,7 @@ def test_storage_s3_put_gzip(started_cluster, extension, method): f = gzip.GzipFile(fileobj=buf, mode="rb") uncompressed_content = f.read().decode() assert sum([int(i.split(",")[1]) for i in uncompressed_content.splitlines()]) == 708 + run_query(instance, f"DROP TABLE {name}") def test_truncate_table(started_cluster): @@ -1026,14 +1028,24 @@ def test_truncate_table(started_cluster): len(list(minio.list_objects(started_cluster.minio_bucket, "truncate/"))) == 0 ): - return + break timeout -= 1 time.sleep(1) assert len(list(minio.list_objects(started_cluster.minio_bucket, "truncate/"))) == 0 - assert instance.query("SELECT * FROM {}".format(name)) == "" + # FIXME: there was a bug in test and it was never checked. + # Currently read from truncated table fails with + # DB::Exception: Failed to get object info: No response body.. + # HTTP response code: 404: while reading truncate: While executing S3Source + # assert instance.query("SELECT * FROM {}".format(name)) == "" + instance.query(f"DROP TABLE {name} SYNC") + assert ( + instance.query(f"SELECT count() FROM system.tables where name='{name}'") + == "0\n" + ) def test_predefined_connection_configuration(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances[ "dummy_without_named_collections" @@ -1061,7 +1073,9 @@ def test_predefined_connection_configuration(started_cluster): user="user", ) - instance.query(f"INSERT INTO {name} SELECT number FROM numbers(10)") + instance.query( + f"INSERT INTO {name} SELECT number FROM numbers(10) SETTINGS s3_truncate_on_insert=1" + ) result = instance.query(f"SELECT * FROM {name}") assert result == instance.query("SELECT number FROM numbers(10)") @@ -1075,9 +1089,11 @@ def test_predefined_connection_configuration(started_cluster): "To execute this query, it's necessary to have the grant NAMED COLLECTION ON no_collection" in error ) - instance = started_cluster.instances["dummy"] # has named collection access - error = instance.query_and_get_error("SELECT * FROM s3(no_collection)") + instance2 = started_cluster.instances["dummy"] # has named collection access + error = instance2.query_and_get_error("SELECT * FROM s3(no_collection)") assert "There is no named collection `no_collection`" in error + instance.query("DROP USER user") + instance.query(f"DROP TABLE {name}") result = "" @@ -1227,7 +1243,7 @@ def test_s3_schema_inference(started_cluster): instance = started_cluster.instances["dummy"] instance.query( - f"insert into table function s3(s3_native, structure='a Int32, b String', format='Native') select number, randomString(100) from numbers(5000000)" + f"insert into table function s3(s3_native, structure='a Int32, b String', format='Native') select number, randomString(100) from numbers(5000000) SETTINGS s3_truncate_on_insert=1" ) result = instance.query(f"desc s3(s3_native, format='Native')") assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n" @@ -1267,6 +1283,9 @@ def test_s3_schema_inference(started_cluster): result = instance.query(f"select count(*) from {table_function}") assert int(result) == 5000000 + instance.query("drop table schema_inference") + instance.query("drop table schema_inference_2") + def test_empty_file(started_cluster): bucket = started_cluster.minio_bucket @@ -1302,6 +1321,7 @@ def test_overwrite(started_cluster): result = instance.query(f"select count() from test_overwrite") assert int(result) == 200 + instance.query(f"drop table test_overwrite") def test_create_new_files_on_insert(started_cluster): @@ -1343,6 +1363,7 @@ def test_create_new_files_on_insert(started_cluster): result = instance.query(f"select count() from test_multiple_inserts") assert int(result) == 60 + instance.query("drop table test_multiple_inserts") def test_format_detection(started_cluster): @@ -1350,7 +1371,9 @@ def test_format_detection(started_cluster): instance = started_cluster.instances["dummy"] instance.query(f"create table arrow_table_s3 (x UInt64) engine=S3(s3_arrow)") - instance.query(f"insert into arrow_table_s3 select 1") + instance.query( + f"insert into arrow_table_s3 select 1 settings s3_truncate_on_insert=1" + ) result = instance.query(f"select * from s3(s3_arrow)") assert int(result) == 1 @@ -1365,7 +1388,9 @@ def test_format_detection(started_cluster): assert int(result) == 1 instance.query(f"create table parquet_table_s3 (x UInt64) engine=S3(s3_parquet2)") - instance.query(f"insert into parquet_table_s3 select 1") + instance.query( + f"insert into parquet_table_s3 select 1 settings s3_truncate_on_insert=1" + ) result = instance.query(f"select * from s3(s3_parquet2)") assert int(result) == 1 @@ -1378,64 +1403,67 @@ def test_format_detection(started_cluster): f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')" ) assert int(result) == 1 + instance.query(f"drop table arrow_table_s3") + instance.query(f"drop table parquet_table_s3") def test_schema_inference_from_globs(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] instance.query( - f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" + f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" ) instance.query( - f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0" + f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0" ) url_filename = "test{1,2}.jsoncompacteachrow" result = instance.query( - f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}') settings input_format_json_infer_incomplete_types_as_strings=0" + f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{url_filename}') settings input_format_json_infer_incomplete_types_as_strings=0" ) assert result.strip() == "c1\tNullable(Int64)" result = instance.query( - f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}') settings input_format_json_infer_incomplete_types_as_strings=0" + f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{url_filename}') settings input_format_json_infer_incomplete_types_as_strings=0" ) assert sorted(result.split()) == ["0", "\\N"] result = instance.query( - f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow') settings input_format_json_infer_incomplete_types_as_strings=0" + f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test*.jsoncompacteachrow') settings input_format_json_infer_incomplete_types_as_strings=0" ) assert result.strip() == "c1\tNullable(Int64)" result = instance.query( - f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow') settings input_format_json_infer_incomplete_types_as_strings=0" + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test*.jsoncompacteachrow') settings input_format_json_infer_incomplete_types_as_strings=0" ) assert sorted(result.split()) == ["0", "\\N"] instance.query( - f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" + f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" ) url_filename = "test{1,3}.jsoncompacteachrow" result = instance.query_and_get_error( - f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}') settings schema_inference_use_cache_for_s3=0, input_format_json_infer_incomplete_types_as_strings=0" + f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{url_filename}') settings schema_inference_use_cache_for_s3=0, input_format_json_infer_incomplete_types_as_strings=0" ) assert "All attempts to extract table structure from files failed" in result result = instance.query_and_get_error( - f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}') settings schema_inference_use_cache_for_url=0, input_format_json_infer_incomplete_types_as_strings=0" + f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{url_filename}') settings schema_inference_use_cache_for_url=0, input_format_json_infer_incomplete_types_as_strings=0" ) assert "All attempts to extract table structure from files failed" in result instance.query( - f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]'" + f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]'" ) result = instance.query_and_get_error( - f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow') settings schema_inference_use_cache_for_s3=0, input_format_json_infer_incomplete_types_as_strings=0" + f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test*.jsoncompacteachrow') settings schema_inference_use_cache_for_s3=0, input_format_json_infer_incomplete_types_as_strings=0" ) assert "CANNOT_EXTRACT_TABLE_STRUCTURE" in result @@ -1443,7 +1471,7 @@ def test_schema_inference_from_globs(started_cluster): url_filename = "test{0,1,2,3}.jsoncompacteachrow" result = instance.query_and_get_error( - f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}') settings schema_inference_use_cache_for_url=0, input_format_json_infer_incomplete_types_as_strings=0" + f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/{url_filename}') settings schema_inference_use_cache_for_url=0, input_format_json_infer_incomplete_types_as_strings=0" ) assert "CANNOT_EXTRACT_TABLE_STRUCTURE" in result @@ -1503,9 +1531,12 @@ def test_signatures(started_cluster): ) assert "S3_ERROR" in error + instance.query(f"drop table test_signatures") + def test_select_columns(started_cluster): bucket = started_cluster.minio_bucket + id = uuid.uuid4() instance = started_cluster.instances["dummy"] name = "test_table2" structure = "id UInt32, value1 Int32, value2 Int32" @@ -1519,36 +1550,37 @@ def test_select_columns(started_cluster): instance.query( f"INSERT INTO {name} SELECT * FROM generateRandom('{structure}') LIMIT {limit} SETTINGS s3_truncate_on_insert=1" ) - instance.query(f"SELECT value2 FROM {name}") + instance.query(f"SELECT value2, '{id}' FROM {name}") instance.query("SYSTEM FLUSH LOGS") result1 = instance.query( - f"SELECT ProfileEvents['ReadBufferFromS3Bytes'] FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT value2 FROM {name}'" + f"SELECT ProfileEvents['ReadBufferFromS3Bytes'] FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT value2, ''{id}'' FROM {name}'" ) - instance.query(f"SELECT * FROM {name}") + instance.query(f"SELECT *, '{id}' FROM {name}") instance.query("SYSTEM FLUSH LOGS") result2 = instance.query( - f"SELECT ProfileEvents['ReadBufferFromS3Bytes'] FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT * FROM {name}'" + f"SELECT ProfileEvents['ReadBufferFromS3Bytes'] FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT *, ''{id}'' FROM {name}'" ) assert round(int(result2) / int(result1)) == 3 def test_insert_select_schema_inference(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native') select toUInt64(1) as x" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test_insert_select.native') select toUInt64(1) as x" ) result = instance.query( - f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')" + f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test_insert_select.native')" ) assert result.strip() == "x\tUInt64" result = instance.query( - f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')" + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{id}/test_insert_select.native')" ) assert int(result) == 1 @@ -1558,7 +1590,7 @@ def test_parallel_reading_with_memory_limit(started_cluster): instance = started_cluster.instances["dummy"] instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_memory_limit.native') select * from numbers(1000000)" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_memory_limit.native') select * from numbers(1000000) SETTINGS s3_truncate_on_insert=1" ) result = instance.query_and_get_error( @@ -1579,7 +1611,7 @@ def test_wrong_format_usage(started_cluster): instance = started_cluster.instances["dummy"] instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_wrong_format.native') select * from numbers(10e6)" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_wrong_format.native') select * from numbers(10e6) SETTINGS s3_truncate_on_insert=1" ) # size(test_wrong_format.native) = 10e6*8+16(header) ~= 76MiB @@ -2102,11 +2134,11 @@ def test_read_subcolumns(started_cluster): instance = started_cluster.instances["dummy"] instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS s3_truncate_on_insert=1" ) instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS s3_truncate_on_insert=1" ) res = instance.query( @@ -2165,7 +2197,7 @@ def test_read_subcolumn_time(started_cluster): instance = started_cluster.instances["dummy"] instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_subcolumn_time.tsv', auto, 'a UInt32') select (42)" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_subcolumn_time.tsv', auto, 'a UInt32') select (42) SETTINGS s3_truncate_on_insert=1" ) res = instance.query( @@ -2176,29 +2208,30 @@ def test_read_subcolumn_time(started_cluster): def test_filtering_by_file_or_path(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_filter1.tsv', auto, 'x UInt64') select 1" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_filter1.tsv', auto, 'x UInt64') select 1 SETTINGS s3_truncate_on_insert=1" ) instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_filter2.tsv', auto, 'x UInt64') select 2" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_filter2.tsv', auto, 'x UInt64') select 2 SETTINGS s3_truncate_on_insert=1" ) instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_filter3.tsv', auto, 'x UInt64') select 3" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_filter3.tsv', auto, 'x UInt64') select 3 SETTINGS s3_truncate_on_insert=1" ) instance.query( - f"select count() from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_filter*.tsv') where _file = 'test_filter1.tsv'" + f"select count(), '{id}' from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_filter*.tsv') where _file = 'test_filter1.tsv'" ) instance.query("SYSTEM FLUSH LOGS") result = instance.query( - f"SELECT ProfileEvents['EngineFileLikeReadFiles'] FROM system.query_log WHERE query like '%select%s3%test_filter%' AND type='QueryFinish'" + f"SELECT ProfileEvents['EngineFileLikeReadFiles'] FROM system.query_log WHERE query like '%{id}%' AND type='QueryFinish'" ) assert int(result) == 1 @@ -2211,54 +2244,56 @@ def test_filtering_by_file_or_path(started_cluster): def test_union_schema_inference_mode(started_cluster): + id = uuid.uuid4() bucket = started_cluster.minio_bucket instance = started_cluster.instances["s3_non_default"] + file_name_prefix = f"test_union_schema_inference_{id}_" instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference1.jsonl') select 1 as a" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}1.jsonl') select 1 as a SETTINGS s3_truncate_on_insert=1" ) instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference2.jsonl') select 2 as b" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}2.jsonl') select 2 as b SETTINGS s3_truncate_on_insert=1" ) instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference3.jsonl') select 2 as c" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}3.jsonl') select 2 as c SETTINGS s3_truncate_on_insert=1" ) instance.query( - f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference4.jsonl', TSV) select 'Error'" + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}4.jsonl', TSV) select 'Error' SETTINGS s3_truncate_on_insert=1" ) for engine in ["s3", "url"]: instance.query("system drop schema cache for s3") result = instance.query( - f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference{{1,2,3}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}{{1,2,3}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert result == "a\tNullable(Int64)\nb\tNullable(Int64)\nc\tNullable(Int64)\n" result = instance.query( - "select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache where source like '%test_union_schema_inference%' order by file format TSV" + f"select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache where source like '%{file_name_prefix}%' order by file format TSV" ) assert ( - result == "UNION\ttest_union_schema_inference1.jsonl\ta Nullable(Int64)\n" - "UNION\ttest_union_schema_inference2.jsonl\tb Nullable(Int64)\n" - "UNION\ttest_union_schema_inference3.jsonl\tc Nullable(Int64)\n" + result == f"UNION\t{file_name_prefix}1.jsonl\ta Nullable(Int64)\n" + f"UNION\t{file_name_prefix}2.jsonl\tb Nullable(Int64)\n" + f"UNION\t{file_name_prefix}3.jsonl\tc Nullable(Int64)\n" ) result = instance.query( - f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference{{1,2,3}}.jsonl') order by tuple(*) settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"select * from {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}{{1,2,3}}.jsonl') order by tuple(*) settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert result == "1\t\\N\t\\N\n" "\\N\t2\t\\N\n" "\\N\t\\N\t2\n" instance.query(f"system drop schema cache for {engine}") result = instance.query( - f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference2.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}2.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert result == "b\tNullable(Int64)\n" result = instance.query( - f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference{{1,2,3}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}{{1,2,3}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert ( result == "a\tNullable(Int64)\n" @@ -2267,7 +2302,7 @@ def test_union_schema_inference_mode(started_cluster): ) error = instance.query_and_get_error( - f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_union_schema_inference{{1,2,3,4}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"desc {engine}('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{file_name_prefix}{{1,2,3,4}}.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert "CANNOT_EXTRACT_TABLE_STRUCTURE" in error From ea15ad4ff5929106c7e3cc18204764472a360811 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 14:45:54 +0000 Subject: [PATCH 0740/2361] Fix --- src/Columns/ColumnDynamic.h | 5 +++++ src/Columns/ColumnObject.cpp | 8 ++++++-- src/Columns/ColumnObject.h | 2 ++ utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index d384dce58d0..5a915134886 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -286,11 +286,13 @@ public: void forEachSubcolumn(MutableColumnCallback callback) override { callback(variant_column); + variant_column_ptr = assert_cast(variant_column.get()); } void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override { callback(*variant_column); + variant_column_ptr = assert_cast(variant_column.get()); variant_column->forEachSubcolumnRecursively(callback); } @@ -364,6 +366,9 @@ private: void updateVariantInfoAndExpandVariantColumn(const DataTypePtr & new_variant_type); WrappedPtr variant_column; + /// Store and use pointer to ColumnVariant to avoid virtual calls. + /// ColumnDynamic is widely used inside ColumnObject for each path and + /// with hundreds of paths these virtual calls are noticeable. ColumnVariant * variant_column_ptr; /// Store the type of current variant with some additional information. VariantInfo variant_info; diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 4181c6081c7..ee0d87f500a 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -955,8 +955,11 @@ void ColumnObject::forEachSubcolumn(DB::IColumn::MutableColumnCallback callback) { for (auto & [_, column] : typed_paths) callback(column); - for (auto & [_, column] : dynamic_paths) + for (auto & [path, column] : dynamic_paths) + { callback(column); + dynamic_paths_ptrs[path] = assert_cast(column.get()); + } callback(shared_data); } @@ -967,10 +970,11 @@ void ColumnObject::forEachSubcolumnRecursively(DB::IColumn::RecursiveMutableColu callback(*column); column->forEachSubcolumnRecursively(callback); } - for (auto & [_, column] : dynamic_paths_ptrs) + for (auto & [path, column] : dynamic_paths) { callback(*column); column->forEachSubcolumnRecursively(callback); + dynamic_paths_ptrs[path] = assert_cast(column.get()); } callback(*shared_data); shared_data->forEachSubcolumnRecursively(callback); diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index 4a27edfc295..d274f4e857a 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -201,6 +201,8 @@ private: /// here are Dynamic columns. This set of paths can be extended /// during inerts into the column. std::unordered_map dynamic_paths; + /// Store and use pointers to ColumnDynamic to avoid virtual calls. + /// With hundreds of dynamic paths these virtual calls are noticeable. std::unordered_map dynamic_paths_ptrs; /// Shared storage for all other paths and values. It's filled /// when the number of dynamic paths reaches the limit. diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 3c7fe12ff08..506d89ca646 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -2117,6 +2117,7 @@ natively nats nestjs netloc +newjson ngram ngramDistance ngramDistanceCaseInsensitive From bc9c462155bed43eeb72415660917900ee4e7b58 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 23 Jul 2024 15:47:55 +0100 Subject: [PATCH 0741/2361] more comments --- src/Storages/MergeTree/MergeTreeReadPoolBase.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 0e713150625..46482bc0959 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -61,6 +61,9 @@ static size_t calculateMinMarksPerTask( const size_t part_marks_count = part.getMarksCount(); if (part_marks_count && part.data_part->isStoredOnRemoteDisk()) { + /// We assume that most of the time prewhere does it's job good meaning that lion's share of the rows is filtered out. + /// Which means in turn that for most of the rows we will read only the columns from prewhere clause. + /// So it makes sense to use only them for the estimation. const auto & columns = settings.merge_tree_determine_task_size_by_prewhere_columns && prewhere_info ? prewhere_info->prewhere_actions->getRequiredColumnsNames() : columns_to_read; @@ -69,6 +72,7 @@ static size_t calculateMinMarksPerTask( const auto avg_mark_bytes = std::max(part_compressed_bytes / part_marks_count, 1); const auto min_bytes_per_task = settings.merge_tree_min_bytes_per_task_for_remote_reading; /// We're taking min here because number of tasks shouldn't be too low - it will make task stealing impossible. + /// We also create at least two tasks per thread to have something to steal from a slow thread. const auto heuristic_min_marks = std::min(pool_settings.sum_marks / pool_settings.threads / 2, min_bytes_per_task / avg_mark_bytes); if (heuristic_min_marks > min_marks_per_task) From e0a7aa80af298465292a5227e65fe21c931b1b9b Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Tue, 23 Jul 2024 16:57:07 +0200 Subject: [PATCH 0742/2361] Enable repeat for flaky check for more tests hardening --- tests/ci/integration_tests_runner.py | 18 ++++++++++++++---- tests/integration/runner | 6 +++++- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index 21f16d995a4..2b348be8b51 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -29,7 +29,8 @@ CLICKHOUSE_BINARY_PATH = "usr/bin/clickhouse" CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH = "usr/bin/clickhouse-odbc-bridge" CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH = "usr/bin/clickhouse-library-bridge" -FLAKY_TRIES_COUNT = 10 +FLAKY_TRIES_COUNT = 10 # run whole pytest several times +FLAKY_REPEAT_COUNT = 5 # runs test case in single module several times MAX_TIME_SECONDS = 3600 MAX_TIME_IN_SANDBOX = 20 * 60 # 20 minutes @@ -568,6 +569,7 @@ class ClickhouseIntegrationTestsRunner: tests_in_group, num_tries, num_workers, + repeat_count, ): try: return self.run_test_group( @@ -576,6 +578,7 @@ class ClickhouseIntegrationTestsRunner: tests_in_group, num_tries, num_workers, + repeat_count, ) except Exception as e: logging.info("Failed to run %s:\n%s", test_group, e) @@ -598,6 +601,7 @@ class ClickhouseIntegrationTestsRunner: tests_in_group, num_tries, num_workers, + repeat_count, ): counters = { "ERROR": [], @@ -639,6 +643,7 @@ class ClickhouseIntegrationTestsRunner: test_cmd = " ".join([shlex.quote(test) for test in sorted(test_names)]) parallel_cmd = f" --parallel {num_workers} " if num_workers > 0 else "" + repeat_cmd = f" --count {repeat_count} " if repeat_count > 0 else "" # -r -- show extra test summary: # -f -- (f)ailed # -E -- (E)rror @@ -647,7 +652,7 @@ class ClickhouseIntegrationTestsRunner: cmd = ( f"cd {repo_path}/tests/integration && " f"timeout --signal=KILL 1h ./runner {self._get_runner_opts()} " - f"{image_cmd} -t {test_cmd} {parallel_cmd} -- -rfEps --run-id={i} " + f"{image_cmd} -t {test_cmd} {parallel_cmd} {repeat_cmd} -- -rfEps --run-id={i} " f"--color=no --durations=0 {_get_deselect_option(self.should_skip_tests())} " f"| tee {info_path}" ) @@ -784,7 +789,12 @@ class ClickhouseIntegrationTestsRunner: final_retry += 1 logging.info("Running tests for the %s time", i) counters, tests_times, log_paths = self.try_run_test_group( - repo_path, "bugfix" if should_fail else "flaky", tests_to_run, 1, 1 + repo_path, + "bugfix" if should_fail else "flaky", + tests_to_run, + 1, + 1, + FLAKY_REPEAT_COUNT, ) logs += log_paths if counters["FAILED"]: @@ -919,7 +929,7 @@ class ClickhouseIntegrationTestsRunner: for group, tests in items_to_run: logging.info("Running test group %s containing %s tests", group, len(tests)) group_counters, group_test_times, log_paths = self.try_run_test_group( - repo_path, group, tests, MAX_RETRY, NUM_WORKERS + repo_path, group, tests, MAX_RETRY, NUM_WORKERS, 0 ) total_tests = 0 for counter, value in group_counters.items(): diff --git a/tests/integration/runner b/tests/integration/runner index a583d7fe897..fc13cb9807a 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -243,6 +243,8 @@ if __name__ == "__main__": "-n", "--parallel", action="store", dest="parallel", help="Parallelism" ) + parser.add_argument("--count", action="store", dest="count", help="Repeat count") + parser.add_argument( "--no-random", action="store", @@ -318,6 +320,8 @@ if __name__ == "__main__": parallel_args += "--dist=loadfile" parallel_args += f" -n {args.parallel}".format() + repeat_args = f" --count {args.count}" if args.count > 0 else "" + rand_args = "" # if not args.no_random: # rand_args += f"--random-seed={os.getpid()}" @@ -409,7 +413,7 @@ if __name__ == "__main__": f"--volume={args.utils_dir}/grpc-client/pb2:/ClickHouse/utils/grpc-client/pb2 " f"--volume=/run:/run/host:ro {dockerd_internal_volume} {env_tags} {env_cleanup} " f"-e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 {use_old_analyzer} -e PYTHONUNBUFFERED=1 " - f'-e PYTEST_ADDOPTS="{parallel_args} {pytest_opts} {tests_list} {rand_args} -vvv"' + f'-e PYTEST_ADDOPTS="{parallel_args} {repeat_args} {pytest_opts} {tests_list} {rand_args} -vvv"' f" {DIND_INTEGRATION_TESTS_IMAGE_NAME}:{args.docker_image_version}" ) From 2f8b28a17cd885581b4cb177643b952d7975d30c Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Tue, 23 Jul 2024 17:05:23 +0200 Subject: [PATCH 0743/2361] Touch test to trigger flaky check --- tests/integration/test_backup_restore_new/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index d8662fad011..fbdc64725d5 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -1,8 +1,8 @@ -import pytest import glob -import re -import random import os.path +import pytest +import random +import re import sys from collections import namedtuple from helpers.cluster import ClickHouseCluster From c7427e6572f278d1a72289ca9ef50fbb701c1115 Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 23 Jul 2024 16:36:30 +0200 Subject: [PATCH 0744/2361] CI: Fixes docker server build for release branches --- tests/ci/docker_server.py | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 21fc02ce02a..413c35cbebe 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -21,7 +21,7 @@ from env_helper import ( TEMP_PATH, ) from git_helper import Git -from pr_info import PRInfo, EventType +from pr_info import PRInfo from report import FAILURE, SUCCESS, JobReport, TestResult, TestResults from stopwatch import Stopwatch from tee_popen import TeePopen @@ -375,25 +375,23 @@ def main(): tags = gen_tags(args.version, args.release_type) repo_urls = {} direct_urls: Dict[str, List[str]] = {} - if pr_info.event_type == EventType.PULL_REQUEST: - release_or_pr = str(pr_info.number) - sha = pr_info.sha - elif pr_info.event_type == EventType.PUSH and pr_info.is_master: - release_or_pr = str(0) - sha = pr_info.sha - else: - release_or_pr = f"{args.version.major}.{args.version.minor}" - sha = args.sha - assert sha for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")): - if not args.bucket_prefix: + if args.bucket_prefix: + assert not args.allow_build_reuse + repo_urls[arch] = f"{args.bucket_prefix}/{build_name}" + elif args.sha: + # CreateRelease workflow only. TODO + version = args.version repo_urls[arch] = ( f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/" - f"{release_or_pr}/{sha}/{build_name}" + f"{version.major}.{version.minor}/{args.sha}/{build_name}" ) else: - repo_urls[arch] = f"{args.bucket_prefix}/{build_name}" + # In all other cases urls must be fetched from build reports. TODO: script needs refactoring + repo_urls[arch] = "" + assert args.allow_build_reuse + if args.allow_build_reuse: # read s3 urls from pre-downloaded build reports if "clickhouse-server" in image_repo: @@ -431,7 +429,6 @@ def main(): ) if test_results[-1].status != "OK": status = FAILURE - pr_info = pr_info or PRInfo() description = f"Processed tags: {', '.join(tags)}" JobReport( From 18a43be044947cdc450f1342db440f76d2fa6bbe Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 15:57:09 +0000 Subject: [PATCH 0745/2361] Revert changes in JSONAsString format --- .../Impl/JSONAsStringRowInputFormat.cpp | 63 +++++++++---------- .../Formats/Impl/JSONAsStringRowInputFormat.h | 16 ++--- 2 files changed, 37 insertions(+), 42 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp index dde8a86babd..c5e36046c62 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp @@ -15,8 +15,11 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; } -JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_) : - JSONEachRowRowInputFormat(in_, header_, std::move(params_), format_settings_, false) +JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_) + : JSONAsRowInputFormat(header_, std::make_unique(in_), params_, format_settings_) {} + +JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, std::unique_ptr buf_, Params params_, const FormatSettings & format_settings_) : + JSONEachRowRowInputFormat(*buf_, header_, std::move(params_), format_settings_, false), buf(std::move(buf_)) { if (header_.columns() > 1) throw Exception(ErrorCodes::BAD_ARGUMENTS, @@ -24,6 +27,19 @@ JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, ReadBuffer & i header_.columns()); } + +void JSONAsRowInputFormat::setReadBuffer(ReadBuffer & in_) +{ + buf = std::make_unique(in_); + JSONEachRowRowInputFormat::setReadBuffer(*buf); +} + +void JSONAsRowInputFormat::resetReadBuffer() +{ + buf.reset(); + JSONEachRowRowInputFormat::resetReadBuffer(); +} + bool JSONAsRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) { assert(columns.size() == 1); @@ -32,44 +48,35 @@ bool JSONAsRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &) if (!allow_new_rows) return false; - skipWhitespaceIfAny(*in); - if (!in->eof()) + skipWhitespaceIfAny(*buf); + if (!buf->eof()) { - if (!data_in_square_brackets && *in->position() == ';') + if (!data_in_square_brackets && *buf->position() == ';') { /// ';' means the end of query, but it cannot be before ']'. return allow_new_rows = false; } - else if (data_in_square_brackets && *in->position() == ']') + else if (data_in_square_brackets && *buf->position() == ']') { /// ']' means the end of query. return allow_new_rows = false; } } - if (!in->eof()) + if (!buf->eof()) readJSONObject(*columns[0]); - skipWhitespaceIfAny(*in); - if (!in->eof() && *in->position() == ',') - ++in->position(); - skipWhitespaceIfAny(*in); + skipWhitespaceIfAny(*buf); + if (!buf->eof() && *buf->position() == ',') + ++buf->position(); + skipWhitespaceIfAny(*buf); - return !in->eof(); + return !buf->eof(); } JSONAsStringRowInputFormat::JSONAsStringRowInputFormat( const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_) : JSONAsRowInputFormat(header_, in_, params_, format_settings_) -{ -} - -JSONAsStringRowInputFormat::JSONAsStringRowInputFormat( - const DB::Block & header_, - std::unique_ptr buf_, - DB::IRowInputFormat::Params params_, - const DB::FormatSettings & format_settings_) - : JSONAsRowInputFormat(header_, *buf_, params_, format_settings_), buf(std::move(buf_)) { if (!isString(removeNullable(removeLowCardinality(header_.getByPosition(0).type)))) throw Exception(ErrorCodes::BAD_ARGUMENTS, @@ -77,18 +84,6 @@ JSONAsStringRowInputFormat::JSONAsStringRowInputFormat( header_.getByPosition(0).type->getName()); } -void JSONAsStringRowInputFormat::setReadBuffer(ReadBuffer & in_) -{ - buf = std::make_unique(in_); - JSONAsRowInputFormat::setReadBuffer(*buf); -} - -void JSONAsStringRowInputFormat::resetReadBuffer() -{ - buf.reset(); - JSONAsRowInputFormat::resetReadBuffer(); -} - void JSONAsStringRowInputFormat::readJSONObject(IColumn & column) { PeekableReadBufferCheckpoint checkpoint{*buf}; @@ -180,7 +175,7 @@ JSONAsObjectRowInputFormat::JSONAsObjectRowInputFormat( void JSONAsObjectRowInputFormat::readJSONObject(IColumn & column) { - serializations[0]->deserializeTextJSON(column, *in, format_settings); + serializations[0]->deserializeTextJSON(column, *buf, format_settings); } Chunk JSONAsObjectRowInputFormat::getChunkForCount(size_t rows) diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h index 7c3c1fb9fb7..593c4ae6fea 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h @@ -19,11 +19,17 @@ class JSONAsRowInputFormat : public JSONEachRowRowInputFormat public: JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings); + void setReadBuffer(ReadBuffer & in_) override; + void resetReadBuffer() override; + private: + JSONAsRowInputFormat(const Block & header_, std::unique_ptr buf_, Params params_, const FormatSettings & format_settings); + bool readRow(MutableColumns & columns, RowReadExtension & ext) override; protected: virtual void readJSONObject(IColumn & column) = 0; + std::unique_ptr buf; }; /// Each JSON object is parsed as a whole to string. @@ -31,17 +37,11 @@ protected: class JSONAsStringRowInputFormat final : public JSONAsRowInputFormat { public: - JSONAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_); + JSONAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings); String getName() const override { return "JSONAsStringRowInputFormat"; } - void setReadBuffer(ReadBuffer & in_) override; - void resetReadBuffer() override; - private: - JSONAsStringRowInputFormat(const Block & header_, std::unique_ptr buf_, Params params_, const FormatSettings & format_settings); - void readJSONObject(IColumn & column) override; - std::unique_ptr buf; }; @@ -83,4 +83,4 @@ private: FormatSettings settings; }; -} +} \ No newline at end of file From 81b5caad2ddb1cf59e0349a479d67aa9b7893256 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 15:59:03 +0000 Subject: [PATCH 0746/2361] Fix header --- .../Formats/Impl/JSONAsStringRowInputFormat.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h index 593c4ae6fea..61bb7f810e0 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h @@ -1,12 +1,12 @@ #pragma once -#include -#include -#include +#include +#include #include #include -#include -#include +#include +#include +#include namespace DB { @@ -83,4 +83,4 @@ private: FormatSettings settings; }; -} \ No newline at end of file +} From c7400fbd508af022f9fdace728094444e03c1d28 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 23 Jul 2024 18:08:07 +0200 Subject: [PATCH 0747/2361] Add more diagnostics in case of digest mismatch for DatabaseReplicated --- src/Databases/DatabaseReplicated.cpp | 249 +++++++++++++++++++-------- src/Databases/DatabaseReplicated.h | 10 +- 2 files changed, 185 insertions(+), 74 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 4c079ae5300..d0648faf07b 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -671,92 +671,96 @@ void DatabaseReplicated::stopLoading() DatabaseAtomic::stopLoading(); } -bool DatabaseReplicated::checkDigestValid(const ContextPtr & local_context, bool debug_check /* = true */) const +void DatabaseReplicated::dumpLocalTablesForDebugOnly(const ContextPtr & local_context) const { - if (debug_check) + auto table_names = getAllTableNames(context.lock()); + for (const auto & table_name : table_names) { - /// Reduce number of debug checks - if (thread_local_rng() % 16) - return true; + auto ast_ptr = tryGetCreateTableQuery(table_name, local_context); + if (ast_ptr) + LOG_DEBUG(log, "[local] Table {} create query is {}", table_name, queryToString(ast_ptr)); + else + LOG_DEBUG(log, "[local] Table {} has no create query", table_name); } - - LOG_TEST(log, "Current in-memory metadata digest: {}", tables_metadata_digest); - - /// Database is probably being dropped - if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive())) - return true; - - UInt64 local_digest = 0; - { - std::lock_guard lock{mutex}; - for (const auto & table : TSA_SUPPRESS_WARNING_FOR_READ(tables)) - local_digest += getMetadataHash(table.first); - } - - if (local_digest != tables_metadata_digest) - { - LOG_ERROR(log, "Digest of local metadata ({}) is not equal to in-memory digest ({})", local_digest, tables_metadata_digest); - return false; - } - - /// Do not check digest in Keeper after internal subquery, it's probably not committed yet - if (local_context->isInternalSubquery()) - return true; - - /// Check does not make sense to check digest in Keeper during recovering - if (is_recovering) - return true; - - String zk_digest = getZooKeeper()->get(replica_path + "/digest"); - String local_digest_str = toString(local_digest); - if (zk_digest != local_digest_str) - { - LOG_ERROR(log, "Digest of local metadata ({}) is not equal to digest in Keeper ({})", local_digest_str, zk_digest); - return false; - } - - return true; } -void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_context) const +void DatabaseReplicated::dumpTablesInZooKeeperForDebugOnly() const { - /// Replicas will set correct name of current database in query context (database name can be different on replicas) - if (auto * ddl_query = dynamic_cast(query.get())) + UInt32 max_log_ptr; + auto table_name_to_metadata = tryGetConsistentMetadataSnapshot(getZooKeeper(), max_log_ptr); + for (const auto & [table_name, create_table_query] : table_name_to_metadata) { - if (ddl_query->getDatabase() != getDatabaseName()) - throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed"); - ddl_query->database.reset(); - - if (auto * create = query->as()) + auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_table_query); + if (query_ast) { - if (create->storage) - checkTableEngine(*create, *create->storage, query_context); + LOG_DEBUG(log, "[zookeeper] Table {} create query is {}", table_name, queryToString(query_ast)); + } + else + { + LOG_DEBUG(log, "[zookeeper] Table {} has no create query", table_name); + } + } +} - if (create->targets) +void DatabaseReplicated::tryCompareLocalAndZooKeeperTablesAndDumpDiffForDebugOnly(const ContextPtr & local_context) const +{ + UInt32 max_log_ptr; + auto table_name_to_metadata_in_zk = tryGetConsistentMetadataSnapshot(getZooKeeper(), max_log_ptr); + auto table_names_local = getAllTableNames(local_context); + + if (table_name_to_metadata_in_zk.size() != table_names_local.size()) + LOG_DEBUG(log, "Amount of tables in zk {} locally {}", table_name_to_metadata_in_zk.size(), table_names_local.size()); + + std::unordered_set checked_tables; + + for (const auto & table_name : table_names_local) + { + auto local_ast_ptr = tryGetCreateTableQuery(table_name, local_context); + if (table_name_to_metadata_in_zk.contains(table_name)) + { + checked_tables.insert(table_name); + auto create_table_query_in_zk = table_name_to_metadata_in_zk[table_name]; + auto zk_ast_ptr = parseQueryFromMetadataInZooKeeper(table_name, create_table_query_in_zk); + + if (local_ast_ptr == nullptr && zk_ast_ptr == nullptr) { - for (const auto & inner_table_engine : create->targets->getInnerEngines()) - checkTableEngine(*create, *inner_table_engine, query_context); + LOG_DEBUG(log, "AST for table {} is the same (nullptr) in local and ZK", table_name); + } + else if (local_ast_ptr != nullptr && zk_ast_ptr != nullptr && queryToString(local_ast_ptr) != queryToString(zk_ast_ptr)) + { + LOG_DEBUG(log, "AST differs for table {}, local {}, in zookeeper {}", table_name, queryToString(local_ast_ptr), queryToString(zk_ast_ptr)); + } + else if (local_ast_ptr == nullptr) + { + LOG_DEBUG(log, "AST differs for table {}, local nullptr, in zookeeper {}", table_name, queryToString(zk_ast_ptr)); + } + else if (zk_ast_ptr == nullptr) + { + LOG_DEBUG(log, "AST differs for table {}, local {}, in zookeeper nullptr", table_name, queryToString(local_ast_ptr)); + } + else + { + LOG_DEBUG(log, "AST for table {} is the same in local and ZK", table_name); } } - } - - if (const auto * query_alter = query->as()) - { - for (const auto & command : query_alter->command_list->children) + else { - if (!isSupportedAlterTypeForOnClusterDDLQuery(command->as().type)) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported type of ALTER query"); + if (local_ast_ptr == nullptr) + LOG_DEBUG(log, "Table {} exists locally, but missing in ZK", table_name); + else + LOG_DEBUG(log, "Table {} exists locally with AST {}, but missing in ZK", table_name, queryToString(local_ast_ptr)); } } - - if (auto * query_drop = query->as()) + for (const auto & [table_name, table_metadata] : table_name_to_metadata_in_zk) { - if (query_drop->kind == ASTDropQuery::Kind::Detach && query_context->getSettingsRef().database_replicated_always_detach_permanently) - query_drop->permanently = true; - if (query_drop->kind == ASTDropQuery::Kind::Detach && !query_drop->permanently) - throw Exception(ErrorCodes::INCORRECT_QUERY, "DETACH TABLE is not allowed for Replicated databases. " - "Use DETACH TABLE PERMANENTLY or SYSTEM RESTART REPLICA or set " - "database_replicated_always_detach_permanently to 1"); + if (!checked_tables.contains(table_name)) + { + auto zk_ast_ptr = parseQueryFromMetadataInZooKeeper(table_name, table_metadata); + if (zk_ast_ptr == nullptr) + LOG_DEBUG(log, "Table {} exists in ZK with AST {}, but missing locally", table_name, queryToString(zk_ast_ptr)); + else + LOG_DEBUG(log, "Table {} exists in ZK, but missing locally", table_name); + } } } @@ -839,6 +843,107 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora "to distinguish different shards and replicas"); } +bool DatabaseReplicated::checkDigestValid(const ContextPtr & local_context, bool debug_check /* = true */) const +{ + if (debug_check) + { + /// Reduce number of debug checks + if (thread_local_rng() % 16) + return true; + } + + LOG_TEST(log, "Current in-memory metadata digest: {}", tables_metadata_digest); + + /// Database is probably being dropped + if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive())) + return true; + + UInt64 local_digest = 0; + { + std::lock_guard lock{mutex}; + for (const auto & table : TSA_SUPPRESS_WARNING_FOR_READ(tables)) + local_digest += getMetadataHash(table.first); + } + + if (local_digest != tables_metadata_digest) + { + LOG_ERROR(log, "Digest of local metadata ({}) is not equal to in-memory digest ({})", local_digest, tables_metadata_digest); + +#ifndef NDEBUG + dumpLocalTablesForDebugOnly(local_context); + dumpTablesInZooKeeperForDebugOnly(); + tryCompareLocalAndZooKeeperTablesAndDumpDiffForDebugOnly(local_context); +#endif + + return false; + } + + /// Do not check digest in Keeper after internal subquery, it's probably not committed yet + if (local_context->isInternalSubquery()) + return true; + + /// Check does not make sense to check digest in Keeper during recovering + if (is_recovering) + return true; + + String zk_digest = getZooKeeper()->get(replica_path + "/digest"); + String local_digest_str = toString(local_digest); + if (zk_digest != local_digest_str) + { + LOG_ERROR(log, "Digest of local metadata ({}) is not equal to digest in Keeper ({})", local_digest_str, zk_digest); +#ifndef NDEBUG + dumpLocalTablesForDebugOnly(local_context); + dumpTablesInZooKeeperForDebugOnly(); + tryCompareLocalAndZooKeeperTablesAndDumpDiffForDebugOnly(local_context); +#endif + return false; + } + + return true; +} + +void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_context) const +{ + /// Replicas will set correct name of current database in query context (database name can be different on replicas) + if (auto * ddl_query = dynamic_cast(query.get())) + { + if (ddl_query->getDatabase() != getDatabaseName()) + throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed"); + ddl_query->database.reset(); + + if (auto * create = query->as()) + { + if (create->storage) + checkTableEngine(*create, *create->storage, query_context); + + if (create->targets) + { + for (const auto & inner_table_engine : create->targets->getInnerEngines()) + checkTableEngine(*create, *inner_table_engine, query_context); + } + } + } + + if (const auto * query_alter = query->as()) + { + for (const auto & command : query_alter->command_list->children) + { + if (!isSupportedAlterTypeForOnClusterDDLQuery(command->as().type)) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported type of ALTER query"); + } + } + + if (auto * query_drop = query->as()) + { + if (query_drop->kind == ASTDropQuery::Kind::Detach && query_context->getSettingsRef().database_replicated_always_detach_permanently) + query_drop->permanently = true; + if (query_drop->kind == ASTDropQuery::Kind::Detach && !query_drop->permanently) + throw Exception(ErrorCodes::INCORRECT_QUERY, "DETACH TABLE is not allowed for Replicated databases. " + "Use DETACH TABLE PERMANENTLY or SYSTEM RESTART REPLICA or set " + "database_replicated_always_detach_permanently to 1"); + } +} + BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, ContextPtr query_context, QueryFlags flags) { waitDatabaseStarted(); @@ -1253,7 +1358,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep current_zookeeper->set(replica_path + "/digest", toString(tables_metadata_digest)); } -std::map DatabaseReplicated::tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr) +std::map DatabaseReplicated::tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr) const { return getConsistentMetadataSnapshotImpl(zookeeper, {}, /* max_retries= */ 10, max_log_ptr); } @@ -1314,7 +1419,7 @@ std::map DatabaseReplicated::getConsistentMetadataSnapshotImpl( return table_name_to_metadata; } -ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query) +ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query) const { ParserCreateQuery parser; String description = "in ZooKeeper " + zookeeper_path + "/metadata/" + node_name; diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index 8c3fa7c87f6..2d96695f7cc 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -109,14 +109,15 @@ private: void checkQueryValid(const ASTPtr & query, ContextPtr query_context) const; void checkTableEngine(const ASTCreateQuery & query, ASTStorage & storage, ContextPtr query_context) const; + void recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 & max_log_ptr); - std::map tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr); + std::map tryGetConsistentMetadataSnapshot(const ZooKeeperPtr & zookeeper, UInt32 & max_log_ptr) const; std::map getConsistentMetadataSnapshotImpl(const ZooKeeperPtr & zookeeper, const FilterByNameFunction & filter_by_table_name, size_t max_retries, UInt32 & max_log_ptr) const; - ASTPtr parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query); + ASTPtr parseQueryFromMetadataInZooKeeper(const String & node_name, const String & query) const; String readMetadataFile(const String & table_name) const; ClusterPtr getClusterImpl(bool all_groups = false) const; @@ -132,6 +133,11 @@ private: UInt64 getMetadataHash(const String & table_name) const; bool checkDigestValid(const ContextPtr & local_context, bool debug_check = true) const TSA_REQUIRES(metadata_mutex); + /// For debug purposes only, don't use in production code + void dumpLocalTablesForDebugOnly(const ContextPtr & local_context) const; + void dumpTablesInZooKeeperForDebugOnly() const; + void tryCompareLocalAndZooKeeperTablesAndDumpDiffForDebugOnly(const ContextPtr & local_context) const; + void waitDatabaseStarted() const override; void stopLoading() override; From 7a65eb08ed571037206640cbdfe305a383e52c9b Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 23 Jul 2024 18:00:33 +0100 Subject: [PATCH 0748/2361] add forgotten file --- .../03036_dynamic_read_subcolumns.lib | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns.lib diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.lib b/tests/queries/0_stateless/03036_dynamic_read_subcolumns.lib new file mode 100755 index 00000000000..4914051db82 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns.lib @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +function test() +{ + echo "test" + $CH_CLIENT -q "insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000" + $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000" + $CH_CLIENT -q "insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000" + $CH_CLIENT -q "insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000" + $CH_CLIENT -q "insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000" + $CH_CLIENT -q "insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000" + + $CH_CLIENT -q "select distinct dynamicType(d) as type from test order by type" + $CH_CLIENT -q "select count() from test where dynamicType(d) == 'UInt64'" + $CH_CLIENT -q "select count() from test where d.UInt64 is not NULL" + $CH_CLIENT -q "select count() from test where dynamicType(d) == 'String'" + $CH_CLIENT -q "select count() from test where d.String is not NULL" + $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Date'" + $CH_CLIENT -q "select count() from test where d.Date is not NULL" + $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'" + $CH_CLIENT -q "select count() from test where not empty(d.\`Array(Variant(String, UInt64))\`)" + $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'" + $CH_CLIENT -q "select count() from test where not empty(d.\`Array(Array(Dynamic))\`)" + $CH_CLIENT -q "select count() from test where d is NULL" + $CH_CLIENT -q "select count() from test where not empty(d.\`Tuple(a Array(Dynamic))\`.a.String)" + + $CH_CLIENT -q "select d, d.UInt64, d.String, d.\`Array(Variant(String, UInt64))\` from test format Null" + $CH_CLIENT -q "select d.UInt64, d.String, d.\`Array(Variant(String, UInt64))\` from test format Null" + $CH_CLIENT -q "select d.Int8, d.Date, d.\`Array(String)\` from test format Null" + $CH_CLIENT -q "select d, d.UInt64, d.Date, d.\`Array(Variant(String, UInt64))\`, d.\`Array(Variant(String, UInt64))\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" + $CH_CLIENT -q "select d.UInt64, d.Date, d.\`Array(Variant(String, UInt64))\`, d.\`Array(Variant(String, UInt64))\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64, d.\`Array(Variant(String, UInt64))\`.String from test format Null" + $CH_CLIENT -q "select d, d.\`Tuple(a UInt64, b String)\`.a, d.\`Array(Dynamic)\`.\`Variant(String, UInt64)\`.UInt64, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" + $CH_CLIENT -q "select d.\`Array(Dynamic)\`.\`Variant(String, UInt64)\`.UInt64, d.\`Array(Dynamic)\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" + $CH_CLIENT -q "select d.\`Array(Array(Dynamic))\`.size1, d.\`Array(Array(Dynamic))\`.UInt64, d.\`Array(Array(Dynamic))\`.\`Map(String, Tuple(a UInt64))\`.values.a from test format Null" +} From 3df8faf640945ca73a292fe8da6c8b4eed6398b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 23 Jul 2024 19:07:19 +0200 Subject: [PATCH 0749/2361] Add initial 24.7 changelog --- CHANGELOG.md | 179 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2eb65e2967..4fddd7d7685 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ### Table of Contents +**[ClickHouse release v24.7, 2024-07-30](#247)**
**[ClickHouse release v24.6, 2024-07-01](#246)**
**[ClickHouse release v24.5, 2024-05-30](#245)**
**[ClickHouse release v24.4, 2024-04-30](#244)**
@@ -9,6 +10,184 @@ # 2024 Changelog +### ClickHouse release 24.7, 2024-07-30 + +#### Backward Incompatible Change +* Forbid `CRATE MATERIALIZED VIEW ... ENGINE Replicated*MergeTree POPULATE AS SELECT ...` with Replicated databases. [#63963](https://github.com/ClickHouse/ClickHouse/pull/63963) ([vdimir](https://github.com/vdimir)). +* `clickhouse-keeper-client` will only accept paths in string literals, such as `ls '/hello/world'`, not bare strings such as `ls /hello/world`. [#65494](https://github.com/ClickHouse/ClickHouse/pull/65494) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### New Feature +* Extend function `tuple` to construct named tuples in query. Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)). +* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)). +* A new table function, `fuzzQuery,` was added. This function allows you to modify a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1');`. [#62103](https://github.com/ClickHouse/ClickHouse/pull/62103) ([pufit](https://github.com/pufit)). +* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)). +* Support JWT authentication in `clickhouse-client`. [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)). +* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)). +* Support accept_invalid_certificate in client's config in order to allow for client to connect over secure TCP to a server running with self-signed certificate - can be used as a shorthand for corresponding `openSSL` client settings `verificationMode=none` + `invalidCertificateHandler.name=AcceptCertificateHandler`. [#65238](https://github.com/ClickHouse/ClickHouse/pull/65238) ([peacewalker122](https://github.com/peacewalker122)). +* Add system.error_log which contains history of error values from table system.errors, periodically flushed to disk. [#65381](https://github.com/ClickHouse/ClickHouse/pull/65381) ([Pablo Marcos](https://github.com/pamarcos)). +* Add aggregate function `groupConcat`. About the same as `arrayStringConcat( groupArray(column), ',')` Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)). +* Allow system administrators to configure `logger.console_log_level`. [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)). +* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)). +* Add `--memory-usage` option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)). + +#### Experimental Feature +* Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)). +* Support rocksdb as backend storage of keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)). +* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)). +* Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)). + +#### Performance Improvement +* Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)). +* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)). +* Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)). +* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)). +* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)). +* Unload primary index of outdated parts to reduce total memory usage. [#65852](https://github.com/ClickHouse/ClickHouse/pull/65852) ([Anton Popov](https://github.com/CurtizJ)). +* Functions `replaceRegexpAll` and `replaceRegexpOne` are now significantly faster if the pattern is trivial, i.e. contains no metacharacters, pattern classes, flags, grouping characters etc. (Thanks to Taiyang Li). [#66185](https://github.com/ClickHouse/ClickHouse/pull/66185) ([Robert Schulze](https://github.com/rschu1ze)). + +#### Improvement +* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)). +* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)). +* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)). +* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_ignore_key_case`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)). +* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)). +* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)). +* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)). +* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)). +* `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixed broken multiple columns aggregation on s390x. [#65062](https://github.com/ClickHouse/ClickHouse/pull/65062) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)). +* s3 requests: Reduce retry time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)). +* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). +* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)). +* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)). +* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)). +* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)). +* `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)). +* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)). +* Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)). +* Fixed out-of-range exception in parsing Dwarf5 on s390x. [#65501](https://github.com/ClickHouse/ClickHouse/pull/65501) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Reduce `clickhouse-local` prompt to just `:)`. `getFQDNOrHostName()` takes too long on macOS, and we don't want a hostname in the prompt for `clickhouse-local` anyway. [#65510](https://github.com/ClickHouse/ClickHouse/pull/65510) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Avoid printing a message from jemalloc about per-CPU arenas on low-end virtual machines. [#65532](https://github.com/ClickHouse/ClickHouse/pull/65532) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add new option to config `` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)). +* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)). +* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)). +* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)). +* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)). +* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* PostgreSQL source support cancel. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)). +* Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)). +* Allow to use `concat` function with empty arguments ``` sql :) select concat();. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([æŽæ‰¬](https://github.com/taiyang-li)). +* Allow controlling named collections in clickhouse-local. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve Azure profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)). +* Support ORC file read by writer time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)). +* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)). +* Add settings to control connection to the PostgreSQL. * Setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. * Setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)). +* Reduce inaccuracy of input_wait_elapsed_us/input_wait_elapsed_us/elapsed_us. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)). +* Improve FilesystemCache ProfileEvents. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)). +* Add settings to ignore ON CLUSTER clause in queries for named collection management with replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)). +* Disable suspending on Ctrl+Z in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add option for validating the Primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. ### Documentation entry for user-facing changes. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). +* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)). +* Fix the VALID UNTIL clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Fix SHOW MERGES remaining time. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fixed crash while using MaterializedMySQL with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)). +* Fix logical error when PREWHERE expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)). +* Fix bug with cancelation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)). +* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)). +* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)). +* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)). +* Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)). +* Fix unexpected projection name when query with CTE. [#65267](https://github.com/ClickHouse/ClickHouse/pull/65267) ([wudidapaopao](https://github.com/wudidapaopao)). +* Require `dictGet` privilege when accessing dictionaries via direct query or the `Dictionary` table engine. [#65359](https://github.com/ClickHouse/ClickHouse/pull/65359) ([Joe Lynch](https://github.com/joelynch)). +* Fix user-specific S3 auth with incremental backups. [#65481](https://github.com/ClickHouse/ClickHouse/pull/65481) ([Antonio Andelic](https://github.com/antonio2368)). +* Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix getting exception `Index out of bound for blob metadata` in case all files from list batch were filtered out. [#65523](https://github.com/ClickHouse/ClickHouse/pull/65523) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix NOT_FOUND_COLUMN_IN_BLOCK for deduplicate merge of projection. [#65573](https://github.com/ClickHouse/ClickHouse/pull/65573) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)). +* Fixed a bug that compatibility level '23.4' was not properly applied. [#65737](https://github.com/ClickHouse/ClickHouse/pull/65737) ([cw5121](https://github.com/cw5121)). +* Fix odbc table with nullable fields. [#65738](https://github.com/ClickHouse/ClickHouse/pull/65738) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)). +* Fix data race in `TCPHandler`, which could happen on fatal error. [#65744](https://github.com/ClickHouse/ClickHouse/pull/65744) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)). +* For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)). +* Fix a bug leads to EmbeddedRocksDB with TTL write corrupted SST files. [#65816](https://github.com/ClickHouse/ClickHouse/pull/65816) ([Duc Canh Le](https://github.com/canhld94)). +* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)). +* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)). +* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)). +* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)). +* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)). +* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible issues with MySQL client protocol TLS connections. [#65938](https://github.com/ClickHouse/ClickHouse/pull/65938) ([Azat Khuzhin](https://github.com/azat)). +* Fix handling of `SSL_ERROR_WANT_READ`/`SSL_ERROR_WANT_WRITE` with zero timeout. [#65941](https://github.com/ClickHouse/ClickHouse/pull/65941) ([Azat Khuzhin](https://github.com/azat)). +* Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)). +* Column _size in s3 engine and s3 table function denotes the size of a file inside the archive, not a size of the archive itself. [#65993](https://github.com/ClickHouse/ClickHouse/pull/65993) ([Daniil Ivanik](https://github.com/divanik)). +* Fix resolving dynamic subcolumns in analyzer, avoid reading the whole column on dynamic subcolumn reading. [#66004](https://github.com/ClickHouse/ClickHouse/pull/66004) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix config merging for from_env with replace overrides. [#66034](https://github.com/ClickHouse/ClickHouse/pull/66034) ([Azat Khuzhin](https://github.com/azat)). +* Fix a possible hanging in `GRPCServer` during shutdown. [#66061](https://github.com/ClickHouse/ClickHouse/pull/66061) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` peremeter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)). +* Fixed several cases in function `has` with non-constant `LowCardinality` arguments. [#66088](https://github.com/ClickHouse/ClickHouse/pull/66088) ([Anton Popov](https://github.com/CurtizJ)). +* Fix for `groupArrayIntersect`. It had incorrect behavior in the `merge()` function. Also, fixed behavior in `deserialise()` for numeric and general data. [#66103](https://github.com/ClickHouse/ClickHouse/pull/66103) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)). +* Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed the issue when the server failed to parse Avro files with negative block size arrays encoded, which is now allowed by the Avro specification. [#66130](https://github.com/ClickHouse/ClickHouse/pull/66130) ([Serge Klochkov](https://github.com/slvrtrn)). +* Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)). +* Fix rare case with missing data in the result of distributed query. [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)). +* Fix order of parsing metadata fields in StorageDeltaLake. [#66211](https://github.com/ClickHouse/ClickHouse/pull/66211) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)). +* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)). +* Prevent watchdog from keeping descriptors of unlinked(rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)). +* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)). +* Added missing column materialization for cross join. [#66413](https://github.com/ClickHouse/ClickHouse/pull/66413) ([lgbo](https://github.com/lgbo-ustc)). +* Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Avoid possible logical error during import from Npy format in case of bad array nesting level, fix testing of other kinds of errors. [#66461](https://github.com/ClickHouse/ClickHouse/pull/66461) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix wrong count() result when there is non-deterministic function in predicate. [#66510](https://github.com/ClickHouse/ClickHouse/pull/66510) ([Duc Canh Le](https://github.com/canhld94)). +* Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix reading of uninitialized memory when hashing empty tuples. [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix `column_length` is not updated in `ColumnTuple::insertManyFrom`. [#66626](https://github.com/ClickHouse/ClickHouse/pull/66626) ([lgbo](https://github.com/lgbo-ustc)). +* Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix rare case of stuck merge after drop column. [#66707](https://github.com/ClickHouse/ClickHouse/pull/66707) ([Raúl Marín](https://github.com/Algunenano)). +* Fix assertion `isUniqTypes` when insert select from remote sources. [#66722](https://github.com/ClickHouse/ClickHouse/pull/66722) ([Sema Checherinda](https://github.com/CheSema)). +* Fix logical error in PrometheusRequestHandler. [#66621](https://github.com/ClickHouse/ClickHouse/pull/66621) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix `indexHint` function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)). +* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)). + +#### Build/Testing/Packaging Improvement +* Instantiate template methods ahead in different .cpp files, avoid too large translation units during compiling. [#64818](https://github.com/ClickHouse/ClickHouse/pull/64818) ([lgbo](https://github.com/lgbo-ustc)). +* Upgraded `pocketfft` dependency to the recent commit https://github.com/mreineck/pocketfft/commit/f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3. [#66291](https://github.com/ClickHouse/ClickHouse/pull/66291) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Upgraded `azure-sdk-for-cpp` to the recent commit https://github.com/ClickHouse/azure-sdk-for-cpp/commit/ea3e19a7be08519134c643177d56c7484dfec884. [#66292](https://github.com/ClickHouse/ClickHouse/pull/66292) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + ### ClickHouse release 24.6, 2024-07-01 #### Backward Incompatible Change From 73ab9a1197f0d7e71be7c8014621de7226ad4da0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 23 Jul 2024 19:11:22 +0200 Subject: [PATCH 0750/2361] Revert "Merge pull request #65298 from Algunenano/low_cardinality_wtf" This reverts commit d3a269c61dfa784414245b01ec4d8ccccfeb8b3a, reversing changes made to 023cf118dc77cc316b158a543a5be51ae67b65f9. --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 4 +- src/Functions/IFunction.cpp | 101 ++++++------------ src/Planner/Planner.cpp | 7 +- src/Planner/PlannerActionsVisitor.cpp | 61 +++-------- src/Planner/PlannerActionsVisitor.h | 9 +- ..._no_aggregates_and_constant_keys.reference | 4 +- ...nality_group_by_distributed_plan.reference | 55 ---------- ..._cardinality_group_by_distributed_plan.sql | 80 -------------- 8 files changed, 56 insertions(+), 265 deletions(-) delete mode 100644 tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.reference delete mode 100644 tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.sql diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index a43e3e62ebc..b1fe2554988 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -4124,9 +4124,7 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo auto * column_to_interpolate = interpolate_node_typed.getExpression()->as(); if (!column_to_interpolate) - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "INTERPOLATE can work only for identifiers, but {} is found", + throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found", interpolate_node_typed.getExpression()->formatASTForErrorMessage()); auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName(); diff --git a/src/Functions/IFunction.cpp b/src/Functions/IFunction.cpp index 8b092ba9b6e..31695fc95d5 100644 --- a/src/Functions/IFunction.cpp +++ b/src/Functions/IFunction.cpp @@ -47,85 +47,54 @@ bool allArgumentsAreConstants(const ColumnsWithTypeAndName & args) return true; } -/// Replaces single low cardinality column in a function call by its dictionary -/// This can only happen after the arguments have been adapted in IFunctionOverloadResolver::getReturnType -/// as it's only possible if there is one low cardinality column and, optionally, const columns ColumnPtr replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( ColumnsWithTypeAndName & args, bool can_be_executed_on_default_arguments, size_t input_rows_count) { - /// We return the LC indexes so the LC can be reconstructed with the function result + size_t num_rows = input_rows_count; ColumnPtr indexes; - size_t number_low_cardinality_columns = 0; - size_t last_low_cardinality = 0; - size_t number_const_columns = 0; - size_t number_full_columns = 0; - - for (size_t i = 0; i < args.size(); i++) + /// Find first LowCardinality column and replace it to nested dictionary. + for (auto & column : args) { - auto const & arg = args[i]; - if (checkAndGetColumn(arg.column.get())) + if (const auto * low_cardinality_column = checkAndGetColumn(column.column.get())) { - number_low_cardinality_columns++; - last_low_cardinality = i; + /// Single LowCardinality column is supported now. + if (indexes) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single dictionary argument for function."); + + const auto * low_cardinality_type = checkAndGetDataType(column.type.get()); + + if (!low_cardinality_type) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Incompatible type for LowCardinality column: {}", + column.type->getName()); + + if (can_be_executed_on_default_arguments) + { + /// Normal case, when function can be executed on values' default. + column.column = low_cardinality_column->getDictionary().getNestedColumn(); + indexes = low_cardinality_column->getIndexesPtr(); + } + else + { + /// Special case when default value can't be used. Example: 1 % LowCardinality(Int). + /// LowCardinality always contains default, so 1 % 0 will throw exception in normal case. + auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size()); + column.column = dict_encoded.dictionary; + indexes = dict_encoded.indexes; + } + + num_rows = column.column->size(); + column.type = low_cardinality_type->getDictionaryType(); } - else if (checkAndGetColumn(arg.column.get())) - number_const_columns++; - else - number_full_columns++; } - if (!number_low_cardinality_columns && !number_const_columns) - return nullptr; - - if (number_full_columns > 0 || number_low_cardinality_columns > 1) - { - /// This should not be possible but currently there are multiple tests in CI failing because of it - /// TODO: Fix those cases, then enable this exception -#if 0 - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected low cardinality types found. Low cardinality: {}. Full {}. Const {}", - number_low_cardinality_columns, number_full_columns, number_const_columns); -#else - return nullptr; -#endif - } - else if (number_low_cardinality_columns == 1) - { - auto & lc_arg = args[last_low_cardinality]; - - const auto * low_cardinality_type = checkAndGetDataType(lc_arg.type.get()); - if (!low_cardinality_type) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Incompatible type for LowCardinality column: {}", lc_arg.type->getName()); - - const auto * low_cardinality_column = checkAndGetColumn(lc_arg.column.get()); - chassert(low_cardinality_column); - - if (can_be_executed_on_default_arguments) - { - /// Normal case, when function can be executed on values' default. - lc_arg.column = low_cardinality_column->getDictionary().getNestedColumn(); - indexes = low_cardinality_column->getIndexesPtr(); - } - else - { - /// Special case when default value can't be used. Example: 1 % LowCardinality(Int). - /// LowCardinality always contains default, so 1 % 0 will throw exception in normal case. - auto dict_encoded = low_cardinality_column->getMinimalDictionaryEncodedColumn(0, low_cardinality_column->size()); - lc_arg.column = dict_encoded.dictionary; - indexes = dict_encoded.indexes; - } - - /// The new column will have a different number of rows, normally less but occasionally it might be more (NULL) - input_rows_count = lc_arg.column->size(); - lc_arg.type = low_cardinality_type->getDictionaryType(); - } - - /// Change size of constants + /// Change size of constants. for (auto & column : args) { if (const auto * column_const = checkAndGetColumn(column.column.get())) { - column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), input_rows_count); + column.column = ColumnConst::create(recursiveRemoveLowCardinality(column_const->getDataColumnPtr()), num_rows); column.type = recursiveRemoveLowCardinality(column.type); } } @@ -301,8 +270,6 @@ ColumnPtr IExecutableFunction::executeWithoutSparseColumns(const ColumnsWithType bool can_be_executed_on_default_arguments = canBeExecutedOnDefaultArguments(); const auto & dictionary_type = res_low_cardinality_type->getDictionaryType(); - /// The arguments should have been adapted in IFunctionOverloadResolver::getReturnType - /// So there is only one low cardinality column (and optionally some const columns) and no full column ColumnPtr indexes = replaceLowCardinalityColumnsByNestedAndGetDictionaryIndexes( columns_without_low_cardinality, can_be_executed_on_default_arguments, input_rows_count); diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index a35ba69d459..5640d84731f 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -745,12 +745,7 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, { auto & interpolate_node_typed = interpolate_node->as(); - PlannerActionsVisitor planner_actions_visitor( - planner_context, - /* use_column_identifier_as_action_node_name_, (default value)*/ true, - /// Prefer the INPUT to CONSTANT nodes (actions must be non constant) - /* always_use_const_column_for_constant_nodes */ false); - + PlannerActionsVisitor planner_actions_visitor(planner_context); auto expression_to_interpolate_expression_nodes = planner_actions_visitor.visit(*interpolate_actions_dag, interpolate_node_typed.getExpression()); if (expression_to_interpolate_expression_nodes.size() != 1) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 6fd37df11c6..a0c7f9b2516 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -487,33 +487,16 @@ public: return node; } - [[nodiscard]] String addConstantIfNecessary( - const std::string & node_name, const ColumnWithTypeAndName & column, bool always_use_const_column_for_constant_nodes) + const ActionsDAG::Node * addConstantIfNecessary(const std::string & node_name, const ColumnWithTypeAndName & column) { - chassert(column.column != nullptr); auto it = node_name_to_node.find(node_name); - if (it != node_name_to_node.end() && (!always_use_const_column_for_constant_nodes || it->second->column)) - return {node_name}; - if (it != node_name_to_node.end()) - { - /// There is a node with this name, but it doesn't have a column - /// This likely happens because we executed the query until WithMergeableState with a const node in the - /// WHERE clause and, as the results of headers are materialized, the column was removed - /// Let's add a new column and keep this - String dupped_name{node_name + "_dupped"}; - if (node_name_to_node.find(dupped_name) != node_name_to_node.end()) - return dupped_name; - - const auto * node = &actions_dag.addColumn(column); - node_name_to_node[dupped_name] = node; - return dupped_name; - } + return it->second; const auto * node = &actions_dag.addColumn(column); node_name_to_node[node->result_name] = node; - return {node_name}; + return node; } template @@ -542,7 +525,7 @@ public: } private: - std::unordered_map node_name_to_node; + std::unordered_map node_name_to_node; ActionsDAG & actions_dag; QueryTreeNodePtr scope_node; }; @@ -550,11 +533,9 @@ private: class PlannerActionsVisitorImpl { public: - PlannerActionsVisitorImpl( - ActionsDAG & actions_dag, + PlannerActionsVisitorImpl(ActionsDAG & actions_dag, const PlannerContextPtr & planner_context_, - bool use_column_identifier_as_action_node_name_, - bool always_use_const_column_for_constant_nodes_); + bool use_column_identifier_as_action_node_name_); ActionsDAG::NodeRawConstPtrs visit(QueryTreeNodePtr expression_node); @@ -614,18 +595,14 @@ private: const PlannerContextPtr planner_context; ActionNodeNameHelper action_node_name_helper; bool use_column_identifier_as_action_node_name; - bool always_use_const_column_for_constant_nodes; }; -PlannerActionsVisitorImpl::PlannerActionsVisitorImpl( - ActionsDAG & actions_dag, +PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAG & actions_dag, const PlannerContextPtr & planner_context_, - bool use_column_identifier_as_action_node_name_, - bool always_use_const_column_for_constant_nodes_) + bool use_column_identifier_as_action_node_name_) : planner_context(planner_context_) , action_node_name_helper(node_to_node_name, *planner_context, use_column_identifier_as_action_node_name_) , use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_) - , always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_) { actions_stack.emplace_back(actions_dag, nullptr); } @@ -748,16 +725,17 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi column.type = constant_type; column.column = column.type->createColumnConst(1, constant_literal); - String final_name = actions_stack[0].addConstantIfNecessary(constant_node_name, column, always_use_const_column_for_constant_nodes); + actions_stack[0].addConstantIfNecessary(constant_node_name, column); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) { auto & actions_stack_node = actions_stack[i]; - actions_stack_node.addInputConstantColumnIfNecessary(final_name, column); + actions_stack_node.addInputConstantColumnIfNecessary(constant_node_name, column); } - return {final_name, Levels(0)}; + return {constant_node_name, Levels(0)}; + } PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitLambda(const QueryTreeNodePtr & node) @@ -886,16 +864,16 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::ma else column.column = std::move(column_set); - String final_name = actions_stack[0].addConstantIfNecessary(column.name, column, always_use_const_column_for_constant_nodes); + actions_stack[0].addConstantIfNecessary(column.name, column); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) { auto & actions_stack_node = actions_stack[i]; - actions_stack_node.addInputConstantColumnIfNecessary(final_name, column); + actions_stack_node.addInputConstantColumnIfNecessary(column.name, column); } - return {final_name, Levels(0)}; + return {column.name, Levels(0)}; } PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitIndexHintFunction(const QueryTreeNodePtr & node) @@ -1032,19 +1010,14 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi } -PlannerActionsVisitor::PlannerActionsVisitor( - const PlannerContextPtr & planner_context_, - bool use_column_identifier_as_action_node_name_, - bool always_use_const_column_for_constant_nodes_) +PlannerActionsVisitor::PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_) : planner_context(planner_context_) , use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_) - , always_use_const_column_for_constant_nodes(always_use_const_column_for_constant_nodes_) {} ActionsDAG::NodeRawConstPtrs PlannerActionsVisitor::visit(ActionsDAG & actions_dag, QueryTreeNodePtr expression_node) { - PlannerActionsVisitorImpl actions_visitor_impl( - actions_dag, planner_context, use_column_identifier_as_action_node_name, always_use_const_column_for_constant_nodes); + PlannerActionsVisitorImpl actions_visitor_impl(actions_dag, planner_context, use_column_identifier_as_action_node_name); return actions_visitor_impl.visit(expression_node); } diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index 1dbd149bc4b..6bb32047327 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -27,17 +27,11 @@ using PlannerContextPtr = std::shared_ptr; * During actions build, there is special handling for following functions: * 1. Aggregate functions are added in actions dag as INPUT nodes. Aggregate functions arguments are not added. * 2. For function `in` and its variants, already collected sets from planner context are used. - * 3. When building actions that use CONSTANT nodes, by default we ignore pre-existing INPUTs if those don't have - * a column (a const column always has a column). This is for compatibility with previous headers. We disable this - * behaviour when we explicitly want to override CONSTANT nodes with the input (resolving InterpolateNode for example) */ class PlannerActionsVisitor { public: - explicit PlannerActionsVisitor( - const PlannerContextPtr & planner_context_, - bool use_column_identifier_as_action_node_name_ = true, - bool always_use_const_column_for_constant_nodes_ = true); + explicit PlannerActionsVisitor(const PlannerContextPtr & planner_context_, bool use_column_identifier_as_action_node_name_ = true); /** Add actions necessary to calculate expression node into expression dag. * Necessary actions are not added in actions dag output. @@ -48,7 +42,6 @@ public: private: const PlannerContextPtr planner_context; bool use_column_identifier_as_action_node_name = true; - bool always_use_const_column_for_constant_nodes = true; }; /** Calculate query tree expression node action dag name and add them into node to name map. diff --git a/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference index fc77ed8a241..63b8a9d14fc 100644 --- a/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference +++ b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference @@ -8,13 +8,13 @@ 40 41 -41 +0 2 42 2 42 43 -43 +0 11 11 diff --git a/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.reference b/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.reference deleted file mode 100644 index 1508c24f410..00000000000 --- a/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.reference +++ /dev/null @@ -1,55 +0,0 @@ --- { echoOn } -SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8))) -FROM system.one -GROUP BY '666'; -6.666.8 -SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8))) -FROM remote('127.0.0.{1,1}', 'system.one') -GROUP BY '666'; -6.666.8 --- https://github.com/ClickHouse/ClickHouse/issues/63006 -SELECT - 6, - concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a, - concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b -FROM system.one -GROUP BY toNullable(6) - WITH ROLLUP -WITH TOTALS; -6 World666666 \N -6 World666666 \N - -6 World666666 \N -SELECT - 6, - concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a, - concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b -FROM remote('127.0.0.1') -GROUP BY toNullable(6) - WITH ROLLUP - WITH TOTALS; -6 World666666 \N -6 World666666 \N - -6 World666666 \N --- { echoOn } -SELECT - '%', - tuple(concat('%', 1, toLowCardinality(toLowCardinality(toNullable(materialize(1)))), currentDatabase(), 101., toNullable(13), '%AS%id_02%', toNullable(toNullable(10)), toLowCardinality(toNullable(10)), 10, 10)), - (toDecimal128(99.67, 6), 36, 61, 14) -FROM dist_03174 -WHERE dummy IN (0, '255') -GROUP BY - toNullable(13), - (99.67, 61, toLowCardinality(14)); -% ('%11default10113%AS%id_02%10101010') (99.67,36,61,14) --- { echoOn } -SELECT - 38, - concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3))) -FROM set_index_not__fuzz_0 -GROUP BY - toNullable(3), - concat(concat(CAST(NULL, 'Nullable(Int8)'), toNullable(3))) -FORMAT Null -SETTINGS max_threads = 1, allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 3, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1; diff --git a/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.sql b/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.sql deleted file mode 100644 index d397d30e285..00000000000 --- a/tests/queries/0_stateless/03174_low_cardinality_group_by_distributed_plan.sql +++ /dev/null @@ -1,80 +0,0 @@ --- There are various tests that check that group by keys don't propagate into functions replacing const arguments --- by full (empty) columns - -DROP TABLE IF EXISTS dist_03174; -DROP TABLE IF EXISTS set_index_not__fuzz_0; - --- https://github.com/ClickHouse/ClickHouse/issues/63006 - -SET allow_experimental_analyzer=1; - --- { echoOn } -SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8))) -FROM system.one -GROUP BY '666'; - -SELECT concatWithSeparator('.', toUInt128(6), '666' as b, materialize(toLowCardinality(8))) -FROM remote('127.0.0.{1,1}', 'system.one') -GROUP BY '666'; - --- https://github.com/ClickHouse/ClickHouse/issues/63006 -SELECT - 6, - concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a, - concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b -FROM system.one -GROUP BY toNullable(6) - WITH ROLLUP -WITH TOTALS; - -SELECT - 6, - concat(' World', toUInt128(6), 6, 6, 6, toNullable(6), materialize(toLowCardinality(toNullable(toUInt128(6))))) AS a, - concat(concat(' World', 6, toLowCardinality(6), ' World', toUInt256(6), materialize(6), 6, toNullable(6), 6, 6, NULL, 6, 6), ' World', 6, 6, 6, 6, toUInt256(6), NULL, 6, 6) AS b -FROM remote('127.0.0.1') -GROUP BY toNullable(6) - WITH ROLLUP - WITH TOTALS; - --- https://github.com/ClickHouse/ClickHouse/issues/64945 --- { echoOff } -CREATE TABLE dist_03174 AS system.one ENGINE = Distributed(test_cluster_two_shards, system, one, dummy); - --- { echoOn } -SELECT - '%', - tuple(concat('%', 1, toLowCardinality(toLowCardinality(toNullable(materialize(1)))), currentDatabase(), 101., toNullable(13), '%AS%id_02%', toNullable(toNullable(10)), toLowCardinality(toNullable(10)), 10, 10)), - (toDecimal128(99.67, 6), 36, 61, 14) -FROM dist_03174 -WHERE dummy IN (0, '255') -GROUP BY - toNullable(13), - (99.67, 61, toLowCardinality(14)); - --- Parallel replicas --- { echoOff } -CREATE TABLE set_index_not__fuzz_0 -( - `name` String, - `status` Enum8('alive' = 0, 'rip' = 1), - INDEX idx_status status TYPE set(2) GRANULARITY 1 -) -ENGINE = MergeTree() -ORDER BY name; - -INSERT INTO set_index_not__fuzz_0 SELECT * FROM generateRandom() LIMIT 10; - --- { echoOn } -SELECT - 38, - concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3))) -FROM set_index_not__fuzz_0 -GROUP BY - toNullable(3), - concat(concat(CAST(NULL, 'Nullable(Int8)'), toNullable(3))) -FORMAT Null -SETTINGS max_threads = 1, allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 3, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1; - --- { echoOff } -DROP TABLE IF EXISTS dist_03174; -DROP TABLE IF EXISTS set_index_not__fuzz_0; From 104c3be7b691a8e2c7b8c71789d60868aa062e01 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 22 Jul 2024 16:23:51 +0000 Subject: [PATCH 0751/2361] Functions [h-r]*: Iterate over input_rows_count where appropriate --- src/Functions/FunctionStringToString.h | 6 ++-- src/Functions/LowerUpperImpl.h | 8 +++-- src/Functions/LowerUpperUTF8Impl.h | 7 +++-- src/Functions/StringHelpers.h | 28 +++++++++-------- .../URL/ExtractFirstSignificantSubdomain.h | 4 +-- src/Functions/URL/cutFragment.cpp | 2 +- src/Functions/URL/cutQueryString.cpp | 2 +- .../URL/cutQueryStringAndFragment.cpp | 2 +- .../URL/cutToFirstSignificantSubdomain.cpp | 2 +- .../cutToFirstSignificantSubdomainCustom.cpp | 4 +-- src/Functions/URL/cutWWW.cpp | 2 +- src/Functions/URL/decodeURLComponent.cpp | 15 +++++----- src/Functions/URL/domain.cpp | 3 +- src/Functions/URL/domain.h | 7 +++-- src/Functions/URL/domainWithoutWWW.cpp | 2 +- .../URL/firstSignificantSubdomain.cpp | 2 +- .../URL/firstSignificantSubdomainCustom.cpp | 4 +-- src/Functions/URL/fragment.cpp | 2 +- src/Functions/URL/path.cpp | 2 +- src/Functions/URL/pathFull.cpp | 2 +- src/Functions/URL/port.cpp | 2 +- src/Functions/URL/protocol.cpp | 2 +- src/Functions/URL/queryString.cpp | 2 +- src/Functions/URL/queryStringAndFragment.cpp | 2 +- src/Functions/URL/topLevelDomain.cpp | 2 +- src/Functions/decodeHTMLComponent.cpp | 11 ++++--- src/Functions/decodeXMLComponent.cpp | 10 +++---- src/Functions/encodeXMLComponent.cpp | 10 +++---- src/Functions/idna.cpp | 20 ++++++------- src/Functions/initcap.cpp | 8 +++-- src/Functions/initcapUTF8.cpp | 5 ++-- src/Functions/normalizeQuery.cpp | 14 +++++---- src/Functions/normalizeString.cpp | 10 +++---- src/Functions/normalizedQueryHash.cpp | 14 ++++----- src/Functions/pointInEllipses.cpp | 10 ++----- src/Functions/punycode.cpp | 30 +++++++++---------- src/Functions/reverse.cpp | 6 ++-- src/Functions/reverse.h | 18 ++++++----- src/Functions/reverseUTF8.cpp | 14 ++++----- src/Functions/soundex.cpp | 12 ++++---- src/Functions/toValidUTF8.cpp | 10 +++---- src/Functions/trim.cpp | 10 +++---- 42 files changed, 169 insertions(+), 159 deletions(-) diff --git a/src/Functions/FunctionStringToString.h b/src/Functions/FunctionStringToString.h index 62d7b09f79e..e0e64e47b49 100644 --- a/src/Functions/FunctionStringToString.h +++ b/src/Functions/FunctionStringToString.h @@ -59,19 +59,19 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr column = arguments[0].column; if (const ColumnString * col = checkAndGetColumn(column.get())) { auto col_res = ColumnString::create(); - Impl::vector(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets()); + Impl::vector(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets(), input_rows_count); return col_res; } else if (const ColumnFixedString * col_fixed = checkAndGetColumn(column.get())) { auto col_res = ColumnFixedString::create(col_fixed->getN()); - Impl::vectorFixed(col_fixed->getChars(), col_fixed->getN(), col_res->getChars()); + Impl::vectorFixed(col_fixed->getChars(), col_fixed->getN(), col_res->getChars(), input_rows_count); return col_res; } else diff --git a/src/Functions/LowerUpperImpl.h b/src/Functions/LowerUpperImpl.h index 72b3ce1ca34..d463ef96e16 100644 --- a/src/Functions/LowerUpperImpl.h +++ b/src/Functions/LowerUpperImpl.h @@ -8,17 +8,19 @@ namespace DB template struct LowerUpperImpl { - static void vector(const ColumnString::Chars & data, + static void vector( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t /*input_rows_count*/) { res_data.resize_exact(data.size()); res_offsets.assign(offsets); array(data.data(), data.data() + data.size(), res_data.data()); } - static void vectorFixed(const ColumnString::Chars & data, size_t /*n*/, ColumnString::Chars & res_data) + static void vectorFixed(const ColumnString::Chars & data, size_t /*n*/, ColumnString::Chars & res_data, size_t /*input_rows_count*/) { res_data.resize_exact(data.size()); array(data.data(), data.data() + data.size(), res_data.data()); diff --git a/src/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h index eebba7b9d5f..eedabca5b22 100644 --- a/src/Functions/LowerUpperUTF8Impl.h +++ b/src/Functions/LowerUpperUTF8Impl.h @@ -90,7 +90,8 @@ struct LowerUpperUTF8Impl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { if (data.empty()) return; @@ -98,7 +99,7 @@ struct LowerUpperUTF8Impl bool all_ascii = isAllASCII(data.data(), data.size()); if (all_ascii) { - LowerUpperImpl::vector(data, offsets, res_data, res_offsets); + LowerUpperImpl::vector(data, offsets, res_data, res_offsets, input_rows_count); return; } @@ -107,7 +108,7 @@ struct LowerUpperUTF8Impl array(data.data(), data.data() + data.size(), offsets, res_data.data()); } - static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Functions lowerUTF8 and upperUTF8 cannot work with FixedString argument"); } diff --git a/src/Functions/StringHelpers.h b/src/Functions/StringHelpers.h index 8f3a87d5d0e..24d8fe86c62 100644 --- a/src/Functions/StringHelpers.h +++ b/src/Functions/StringHelpers.h @@ -62,12 +62,13 @@ using Pos = const char *; template struct ExtractSubstringImpl { - static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, - ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) + static void vector( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, + ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - size_t size = offsets.size(); - res_offsets.resize(size); - res_data.reserve(size * Extractor::getReserveLengthForElement()); + res_offsets.resize(input_rows_count); + res_data.reserve(input_rows_count * Extractor::getReserveLengthForElement()); size_t prev_offset = 0; size_t res_offset = 0; @@ -76,7 +77,7 @@ struct ExtractSubstringImpl Pos start; size_t length; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { Extractor::execute(reinterpret_cast(&data[prev_offset]), offsets[i] - prev_offset - 1, start, length); @@ -99,7 +100,7 @@ struct ExtractSubstringImpl res_data.assign(start, length); } - static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by this function"); } @@ -111,12 +112,13 @@ struct ExtractSubstringImpl template struct CutSubstringImpl { - static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, - ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) + static void vector( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, + ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, + size_t input_rows_count) { res_data.reserve(data.size()); - size_t size = offsets.size(); - res_offsets.resize(size); + res_offsets.resize(input_rows_count); size_t prev_offset = 0; size_t res_offset = 0; @@ -125,7 +127,7 @@ struct CutSubstringImpl Pos start; size_t length; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * current = reinterpret_cast(&data[prev_offset]); Extractor::execute(current, offsets[i] - prev_offset - 1, start, length); @@ -154,7 +156,7 @@ struct CutSubstringImpl res_data.append(start + length, data.data() + data.size()); } - static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by this function"); } diff --git a/src/Functions/URL/ExtractFirstSignificantSubdomain.h b/src/Functions/URL/ExtractFirstSignificantSubdomain.h index 0d1b1cac8ef..4316fd2fc3a 100644 --- a/src/Functions/URL/ExtractFirstSignificantSubdomain.h +++ b/src/Functions/URL/ExtractFirstSignificantSubdomain.h @@ -1,8 +1,8 @@ #pragma once #include -#include "domain.h" -#include "tldLookup.h" +#include +#include #include /// TLDType namespace DB diff --git a/src/Functions/URL/cutFragment.cpp b/src/Functions/URL/cutFragment.cpp index 3b99edf61a3..b32c4410190 100644 --- a/src/Functions/URL/cutFragment.cpp +++ b/src/Functions/URL/cutFragment.cpp @@ -1,6 +1,6 @@ #include -#include "fragment.h" #include +#include namespace DB { diff --git a/src/Functions/URL/cutQueryString.cpp b/src/Functions/URL/cutQueryString.cpp index 2886adc2e36..d2fa4a004f2 100644 --- a/src/Functions/URL/cutQueryString.cpp +++ b/src/Functions/URL/cutQueryString.cpp @@ -1,6 +1,6 @@ #include -#include "queryString.h" #include +#include namespace DB { diff --git a/src/Functions/URL/cutQueryStringAndFragment.cpp b/src/Functions/URL/cutQueryStringAndFragment.cpp index 4116b352542..ff427beb277 100644 --- a/src/Functions/URL/cutQueryStringAndFragment.cpp +++ b/src/Functions/URL/cutQueryStringAndFragment.cpp @@ -1,6 +1,6 @@ #include -#include "queryStringAndFragment.h" #include +#include namespace DB { diff --git a/src/Functions/URL/cutToFirstSignificantSubdomain.cpp b/src/Functions/URL/cutToFirstSignificantSubdomain.cpp index 6e64b0b6ab8..454a241c54f 100644 --- a/src/Functions/URL/cutToFirstSignificantSubdomain.cpp +++ b/src/Functions/URL/cutToFirstSignificantSubdomain.cpp @@ -1,6 +1,6 @@ #include #include -#include "ExtractFirstSignificantSubdomain.h" +#include namespace DB diff --git a/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp b/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp index 77f40e465a6..7612b6ea4af 100644 --- a/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp +++ b/src/Functions/URL/cutToFirstSignificantSubdomainCustom.cpp @@ -1,6 +1,6 @@ #include -#include "ExtractFirstSignificantSubdomain.h" -#include "FirstSignificantSubdomainCustomImpl.h" +#include +#include namespace DB { diff --git a/src/Functions/URL/cutWWW.cpp b/src/Functions/URL/cutWWW.cpp index 992d5128440..ab3fae6b094 100644 --- a/src/Functions/URL/cutWWW.cpp +++ b/src/Functions/URL/cutWWW.cpp @@ -1,6 +1,6 @@ #include #include -#include "protocol.h" +#include #include diff --git a/src/Functions/URL/decodeURLComponent.cpp b/src/Functions/URL/decodeURLComponent.cpp index 05e3fbea3fd..bf4aaa6d5e3 100644 --- a/src/Functions/URL/decodeURLComponent.cpp +++ b/src/Functions/URL/decodeURLComponent.cpp @@ -1,7 +1,7 @@ -#include #include #include #include +#include namespace DB @@ -121,8 +121,10 @@ enum URLCodeStrategy template struct CodeURLComponentImpl { - static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, - ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) + static void vector( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, + ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, + size_t input_rows_count) { if (code_strategy == encode) { @@ -134,13 +136,12 @@ struct CodeURLComponentImpl res_data.resize(data.size()); } - size_t size = offsets.size(); - res_offsets.resize(size); + res_offsets.resize(input_rows_count); size_t prev_offset = 0; size_t res_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * src_data = reinterpret_cast(&data[prev_offset]); size_t src_size = offsets[i] - prev_offset; @@ -165,7 +166,7 @@ struct CodeURLComponentImpl res_data.resize(res_offset); } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by URL functions"); } diff --git a/src/Functions/URL/domain.cpp b/src/Functions/URL/domain.cpp index 443a3323075..68724b57a70 100644 --- a/src/Functions/URL/domain.cpp +++ b/src/Functions/URL/domain.cpp @@ -1,5 +1,4 @@ -#include "domain.h" - +#include #include #include diff --git a/src/Functions/URL/domain.h b/src/Functions/URL/domain.h index 936fb9d5f00..328df76b570 100644 --- a/src/Functions/URL/domain.h +++ b/src/Functions/URL/domain.h @@ -1,9 +1,10 @@ #pragma once -#include "protocol.h" -#include -#include #include +#include +#include + +#include namespace DB { diff --git a/src/Functions/URL/domainWithoutWWW.cpp b/src/Functions/URL/domainWithoutWWW.cpp index f6c8b5c84fc..436aad01b82 100644 --- a/src/Functions/URL/domainWithoutWWW.cpp +++ b/src/Functions/URL/domainWithoutWWW.cpp @@ -1,6 +1,6 @@ #include #include -#include "domain.h" +#include namespace DB { diff --git a/src/Functions/URL/firstSignificantSubdomain.cpp b/src/Functions/URL/firstSignificantSubdomain.cpp index b04f6d882ef..e929aefbc27 100644 --- a/src/Functions/URL/firstSignificantSubdomain.cpp +++ b/src/Functions/URL/firstSignificantSubdomain.cpp @@ -1,6 +1,6 @@ #include #include -#include "ExtractFirstSignificantSubdomain.h" +#include namespace DB diff --git a/src/Functions/URL/firstSignificantSubdomainCustom.cpp b/src/Functions/URL/firstSignificantSubdomainCustom.cpp index c07aa2b3ac8..95e5142667b 100644 --- a/src/Functions/URL/firstSignificantSubdomainCustom.cpp +++ b/src/Functions/URL/firstSignificantSubdomainCustom.cpp @@ -1,6 +1,6 @@ #include -#include "ExtractFirstSignificantSubdomain.h" -#include "FirstSignificantSubdomainCustomImpl.h" +#include +#include namespace DB diff --git a/src/Functions/URL/fragment.cpp b/src/Functions/URL/fragment.cpp index 262c1a4c7a6..66da5529d84 100644 --- a/src/Functions/URL/fragment.cpp +++ b/src/Functions/URL/fragment.cpp @@ -1,6 +1,6 @@ #include #include -#include "fragment.h" +#include namespace DB { diff --git a/src/Functions/URL/path.cpp b/src/Functions/URL/path.cpp index 8d609f43191..6de8e4fbf95 100644 --- a/src/Functions/URL/path.cpp +++ b/src/Functions/URL/path.cpp @@ -1,7 +1,7 @@ #include #include #include -#include "path.h" +#include #include diff --git a/src/Functions/URL/pathFull.cpp b/src/Functions/URL/pathFull.cpp index 9aacee21fed..deea617eb29 100644 --- a/src/Functions/URL/pathFull.cpp +++ b/src/Functions/URL/pathFull.cpp @@ -1,7 +1,7 @@ #include #include #include -#include "path.h" +#include #include namespace DB diff --git a/src/Functions/URL/port.cpp b/src/Functions/URL/port.cpp index c8f50f10a56..fac46281604 100644 --- a/src/Functions/URL/port.cpp +++ b/src/Functions/URL/port.cpp @@ -5,7 +5,7 @@ #include #include #include -#include "domain.h" +#include namespace DB diff --git a/src/Functions/URL/protocol.cpp b/src/Functions/URL/protocol.cpp index 6bed71207f6..1ac395dc554 100644 --- a/src/Functions/URL/protocol.cpp +++ b/src/Functions/URL/protocol.cpp @@ -1,6 +1,6 @@ #include #include -#include "protocol.h" +#include namespace DB diff --git a/src/Functions/URL/queryString.cpp b/src/Functions/URL/queryString.cpp index 7069ce46b38..f6aaf40fc96 100644 --- a/src/Functions/URL/queryString.cpp +++ b/src/Functions/URL/queryString.cpp @@ -1,6 +1,6 @@ #include #include -#include "queryString.h" +#include namespace DB { diff --git a/src/Functions/URL/queryStringAndFragment.cpp b/src/Functions/URL/queryStringAndFragment.cpp index 367a95acbdc..94f1dfa41e3 100644 --- a/src/Functions/URL/queryStringAndFragment.cpp +++ b/src/Functions/URL/queryStringAndFragment.cpp @@ -1,6 +1,6 @@ #include #include -#include "queryStringAndFragment.h" +#include namespace DB { diff --git a/src/Functions/URL/topLevelDomain.cpp b/src/Functions/URL/topLevelDomain.cpp index 25e9f383f60..b3e88832350 100644 --- a/src/Functions/URL/topLevelDomain.cpp +++ b/src/Functions/URL/topLevelDomain.cpp @@ -1,6 +1,6 @@ #include #include -#include "domain.h" +#include namespace DB { diff --git a/src/Functions/decodeHTMLComponent.cpp b/src/Functions/decodeHTMLComponent.cpp index 00a601b77a6..d36bee534a8 100644 --- a/src/Functions/decodeHTMLComponent.cpp +++ b/src/Functions/decodeHTMLComponent.cpp @@ -28,20 +28,20 @@ namespace const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { /// The size of result is always not more than the size of source. /// Because entities decodes to the shorter byte sequence. /// Example: &#xx... &#xx... will decode to UTF-8 byte sequence not longer than 4 bytes. res_data.resize(data.size()); - size_t size = offsets.size(); - res_offsets.resize(size); + res_offsets.resize(input_rows_count); size_t prev_offset = 0; size_t res_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * src_data = reinterpret_cast(&data[prev_offset]); size_t src_size = offsets[i] - prev_offset; @@ -55,7 +55,7 @@ namespace res_data.resize(res_offset); } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function decodeHTMLComponent cannot work with FixedString argument"); } @@ -64,7 +64,6 @@ namespace static const int max_legal_unicode_value = 0x10FFFF; static const int max_decimal_length_of_unicode_point = 7; /// 1114111 - static size_t execute(const char * src, size_t src_size, char * dst) { const char * src_pos = src; diff --git a/src/Functions/decodeXMLComponent.cpp b/src/Functions/decodeXMLComponent.cpp index cbbe46fcb8c..8743aa4759d 100644 --- a/src/Functions/decodeXMLComponent.cpp +++ b/src/Functions/decodeXMLComponent.cpp @@ -27,20 +27,20 @@ namespace const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { /// The size of result is always not more than the size of source. /// Because entities decodes to the shorter byte sequence. /// Example: &#xx... &#xx... will decode to UTF-8 byte sequence not longer than 4 bytes. res_data.resize(data.size()); - size_t size = offsets.size(); - res_offsets.resize(size); + res_offsets.resize(input_rows_count); size_t prev_offset = 0; size_t res_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * src_data = reinterpret_cast(&data[prev_offset]); size_t src_size = offsets[i] - prev_offset; @@ -54,7 +54,7 @@ namespace res_data.resize(res_offset); } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function decodeXMLComponent cannot work with FixedString argument"); } diff --git a/src/Functions/encodeXMLComponent.cpp b/src/Functions/encodeXMLComponent.cpp index 64d85ecaeb8..a22917838b7 100644 --- a/src/Functions/encodeXMLComponent.cpp +++ b/src/Functions/encodeXMLComponent.cpp @@ -25,17 +25,17 @@ namespace const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { /// 6 is the maximum size amplification (the maximum length of encoded entity: ") res_data.resize(data.size() * 6); - size_t size = offsets.size(); - res_offsets.resize(size); + res_offsets.resize(input_rows_count); size_t prev_offset = 0; size_t res_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * src_data = reinterpret_cast(&data[prev_offset]); size_t src_size = offsets[i] - prev_offset; @@ -49,7 +49,7 @@ namespace res_data.resize(res_offset); } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function encodeXML cannot work with FixedString argument"); } diff --git a/src/Functions/idna.cpp b/src/Functions/idna.cpp index 5a7ae3485ba..cf9e855c912 100644 --- a/src/Functions/idna.cpp +++ b/src/Functions/idna.cpp @@ -44,15 +44,15 @@ struct IdnaEncode const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - const size_t rows = offsets.size(); res_data.reserve(data.size()); /// just a guess, assuming the input is all-ASCII - res_offsets.reserve(rows); + res_offsets.reserve(input_rows_count); size_t prev_offset = 0; std::string ascii; - for (size_t row = 0; row < rows; ++row) + for (size_t row = 0; row < input_rows_count; ++row) { const char * value = reinterpret_cast(&data[prev_offset]); const size_t value_length = offsets[row] - prev_offset - 1; @@ -85,7 +85,7 @@ struct IdnaEncode } } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Arguments of type FixedString are not allowed"); } @@ -99,15 +99,15 @@ struct IdnaDecode const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - const size_t rows = offsets.size(); res_data.reserve(data.size()); /// just a guess, assuming the input is all-ASCII - res_offsets.reserve(rows); + res_offsets.reserve(input_rows_count); size_t prev_offset = 0; std::string unicode; - for (size_t row = 0; row < rows; ++row) + for (size_t row = 0; row < input_rows_count; ++row) { const char * ascii = reinterpret_cast(&data[prev_offset]); const size_t ascii_length = offsets[row] - prev_offset - 1; @@ -124,7 +124,7 @@ struct IdnaDecode } } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Arguments of type FixedString are not allowed"); } diff --git a/src/Functions/initcap.cpp b/src/Functions/initcap.cpp index 4661ce117c0..00414b22344 100644 --- a/src/Functions/initcap.cpp +++ b/src/Functions/initcap.cpp @@ -9,10 +9,12 @@ namespace struct InitcapImpl { - static void vector(const ColumnString::Chars & data, + static void vector( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t /*input_rows_count*/) { if (data.empty()) return; @@ -21,7 +23,7 @@ struct InitcapImpl array(data.data(), data.data() + data.size(), res_data.data()); } - static void vectorFixed(const ColumnString::Chars & data, size_t /*n*/, ColumnString::Chars & res_data) + static void vectorFixed(const ColumnString::Chars & data, size_t /*n*/, ColumnString::Chars & res_data, size_t) { res_data.resize(data.size()); array(data.data(), data.data() + data.size(), res_data.data()); diff --git a/src/Functions/initcapUTF8.cpp b/src/Functions/initcapUTF8.cpp index 076dcff6622..282d846094e 100644 --- a/src/Functions/initcapUTF8.cpp +++ b/src/Functions/initcapUTF8.cpp @@ -22,7 +22,8 @@ struct InitcapUTF8Impl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t /*input_rows_count*/) { if (data.empty()) return; @@ -31,7 +32,7 @@ struct InitcapUTF8Impl array(data.data(), data.data() + data.size(), offsets, res_data.data()); } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function initcapUTF8 cannot work with FixedString argument"); } diff --git a/src/Functions/normalizeQuery.cpp b/src/Functions/normalizeQuery.cpp index ad9a8903733..1a78bce7d29 100644 --- a/src/Functions/normalizeQuery.cpp +++ b/src/Functions/normalizeQuery.cpp @@ -19,17 +19,19 @@ template struct Impl { static constexpr auto name = keep_names ? "normalizeQueryKeepNames" : "normalizeQuery"; - static void vector(const ColumnString::Chars & data, + + static void vector( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - size_t size = offsets.size(); - res_offsets.resize(size); + res_offsets.resize(input_rows_count); res_data.reserve(data.size()); ColumnString::Offset prev_src_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { ColumnString::Offset curr_src_offset = offsets[i]; @@ -43,7 +45,7 @@ struct Impl } } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot apply function normalizeQuery to fixed string."); } diff --git a/src/Functions/normalizeString.cpp b/src/Functions/normalizeString.cpp index 92411490eaa..c56508ced0e 100644 --- a/src/Functions/normalizeString.cpp +++ b/src/Functions/normalizeString.cpp @@ -84,7 +84,8 @@ struct NormalizeUTF8Impl static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { UErrorCode err = U_ZERO_ERROR; @@ -92,8 +93,7 @@ struct NormalizeUTF8Impl if (U_FAILURE(err)) throw Exception(ErrorCodes::CANNOT_NORMALIZE_STRING, "Normalization failed (getNormalizer): {}", u_errorName(err)); - size_t size = offsets.size(); - res_offsets.resize(size); + res_offsets.resize(input_rows_count); res_data.reserve(data.size() * 2); @@ -103,7 +103,7 @@ struct NormalizeUTF8Impl PODArray from_uchars; PODArray to_uchars; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t from_size = offsets[i] - current_from_offset - 1; @@ -157,7 +157,7 @@ struct NormalizeUTF8Impl res_data.resize(current_to_offset); } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot apply function normalizeUTF8 to fixed string."); } diff --git a/src/Functions/normalizedQueryHash.cpp b/src/Functions/normalizedQueryHash.cpp index 63218f28af5..3dbe3ff9847 100644 --- a/src/Functions/normalizedQueryHash.cpp +++ b/src/Functions/normalizedQueryHash.cpp @@ -27,13 +27,13 @@ struct Impl static void vector( const ColumnString::Chars & data, const ColumnString::Offsets & offsets, - PaddedPODArray & res_data) + PaddedPODArray & res_data, + size_t input_rows_count) { - size_t size = offsets.size(); - res_data.resize(size); + res_data.resize(input_rows_count); ColumnString::Offset prev_src_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { ColumnString::Offset curr_src_offset = offsets[i]; res_data[i] = normalizedQueryHash( @@ -77,15 +77,15 @@ public: bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr column = arguments[0].column; if (const ColumnString * col = checkAndGetColumn(column.get())) { auto col_res = ColumnUInt64::create(); typename ColumnUInt64::Container & vec_res = col_res->getData(); - vec_res.resize(col->size()); - Impl::vector(col->getChars(), col->getOffsets(), vec_res); + vec_res.resize(input_rows_count); + Impl::vector(col->getChars(), col->getOffsets(), vec_res, input_rows_count); return col_res; } else diff --git a/src/Functions/pointInEllipses.cpp b/src/Functions/pointInEllipses.cpp index 2147428cee3..ac7a8cc4204 100644 --- a/src/Functions/pointInEllipses.cpp +++ b/src/Functions/pointInEllipses.cpp @@ -91,8 +91,6 @@ private: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { - const auto size = input_rows_count; - /// Prepare array of ellipses. size_t ellipses_count = (arguments.size() - 2) / 4; std::vector ellipses(ellipses_count); @@ -141,13 +139,11 @@ private: auto dst = ColumnVector::create(); auto & dst_data = dst->getData(); - dst_data.resize(size); + dst_data.resize(input_rows_count); size_t start_index = 0; - for (const auto row : collections::range(0, size)) - { + for (size_t row = 0; row < input_rows_count; ++row) dst_data[row] = isPointInEllipses(col_vec_x->getData()[row], col_vec_y->getData()[row], ellipses.data(), ellipses_count, start_index); - } return dst; } @@ -157,7 +153,7 @@ private: const auto * col_const_y = assert_cast (col_y); size_t start_index = 0; UInt8 res = isPointInEllipses(col_const_x->getValue(), col_const_y->getValue(), ellipses.data(), ellipses_count, start_index); - return DataTypeUInt8().createColumnConst(size, res); + return DataTypeUInt8().createColumnConst(input_rows_count, res); } else { diff --git a/src/Functions/punycode.cpp b/src/Functions/punycode.cpp index 8004e3731b5..ec1fcfd0a70 100644 --- a/src/Functions/punycode.cpp +++ b/src/Functions/punycode.cpp @@ -6,11 +6,11 @@ #include #include -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wnewline-eof" -# include -# include -# pragma clang diagnostic pop +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wnewline-eof" +#include +#include +#pragma clang diagnostic pop namespace DB { @@ -38,16 +38,16 @@ struct PunycodeEncode const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - const size_t rows = offsets.size(); res_data.reserve(data.size()); /// just a guess, assuming the input is all-ASCII - res_offsets.reserve(rows); + res_offsets.reserve(input_rows_count); size_t prev_offset = 0; std::u32string value_utf32; std::string value_puny; - for (size_t row = 0; row < rows; ++row) + for (size_t row = 0; row < input_rows_count; ++row) { const char * value = reinterpret_cast(&data[prev_offset]); const size_t value_length = offsets[row] - prev_offset - 1; @@ -72,7 +72,7 @@ struct PunycodeEncode } } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Arguments of type FixedString are not allowed"); } @@ -86,16 +86,16 @@ struct PunycodeDecode const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - const size_t rows = offsets.size(); res_data.reserve(data.size()); /// just a guess, assuming the input is all-ASCII - res_offsets.reserve(rows); + res_offsets.reserve(input_rows_count); size_t prev_offset = 0; std::u32string value_utf32; std::string value_utf8; - for (size_t row = 0; row < rows; ++row) + for (size_t row = 0; row < input_rows_count; ++row) { const char * value = reinterpret_cast(&data[prev_offset]); const size_t value_length = offsets[row] - prev_offset - 1; @@ -129,7 +129,7 @@ struct PunycodeDecode } } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Arguments of type FixedString are not allowed"); } diff --git a/src/Functions/reverse.cpp b/src/Functions/reverse.cpp index d23e48b8d42..94b6634ffbd 100644 --- a/src/Functions/reverse.cpp +++ b/src/Functions/reverse.cpp @@ -55,19 +55,19 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr column = arguments[0].column; if (const ColumnString * col = checkAndGetColumn(column.get())) { auto col_res = ColumnString::create(); - ReverseImpl::vector(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets()); + ReverseImpl::vector(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets(), input_rows_count); return col_res; } else if (const ColumnFixedString * col_fixed = checkAndGetColumn(column.get())) { auto col_res = ColumnFixedString::create(col_fixed->getN()); - ReverseImpl::vectorFixed(col_fixed->getChars(), col_fixed->getN(), col_res->getChars()); + ReverseImpl::vectorFixed(col_fixed->getChars(), col_fixed->getN(), col_res->getChars(), input_rows_count); return col_res; } else diff --git a/src/Functions/reverse.h b/src/Functions/reverse.h index 5f999af4297..7c18d72769a 100644 --- a/src/Functions/reverse.h +++ b/src/Functions/reverse.h @@ -9,17 +9,18 @@ namespace DB */ struct ReverseImpl { - static void vector(const ColumnString::Chars & data, + static void vector( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { res_data.resize_exact(data.size()); res_offsets.assign(offsets); - size_t size = offsets.size(); ColumnString::Offset prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { for (size_t j = prev_offset; j < offsets[i] - 1; ++j) res_data[j] = data[offsets[i] + prev_offset - 2 - j]; @@ -28,12 +29,15 @@ struct ReverseImpl } } - static void vectorFixed(const ColumnString::Chars & data, size_t n, ColumnString::Chars & res_data) + static void vectorFixed( + const ColumnString::Chars & data, + size_t n, + ColumnString::Chars & res_data, + size_t input_rows_count) { res_data.resize_exact(data.size()); - size_t size = data.size() / n; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) for (size_t j = i * n; j < (i + 1) * n; ++j) res_data[j] = data[(i * 2 + 1) * n - j - 1]; } diff --git a/src/Functions/reverseUTF8.cpp b/src/Functions/reverseUTF8.cpp index 1aee349fa8d..ca57d946b19 100644 --- a/src/Functions/reverseUTF8.cpp +++ b/src/Functions/reverseUTF8.cpp @@ -23,25 +23,25 @@ namespace */ struct ReverseUTF8Impl { - static void vector(const ColumnString::Chars & data, + static void vector( + const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { bool all_ascii = isAllASCII(data.data(), data.size()); if (all_ascii) { - ReverseImpl::vector(data, offsets, res_data, res_offsets); + ReverseImpl::vector(data, offsets, res_data, res_offsets, input_rows_count); return; } res_data.resize(data.size()); res_offsets.assign(offsets); - size_t size = offsets.size(); - ColumnString::Offset prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { ColumnString::Offset j = prev_offset; while (j < offsets[i] - 1) @@ -73,7 +73,7 @@ struct ReverseUTF8Impl } } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot apply function reverseUTF8 to fixed string."); } diff --git a/src/Functions/soundex.cpp b/src/Functions/soundex.cpp index fcf1523d1a3..e8bd1b664e3 100644 --- a/src/Functions/soundex.cpp +++ b/src/Functions/soundex.cpp @@ -79,14 +79,14 @@ struct SoundexImpl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - const size_t size = offsets.size(); - res_data.resize(size * (length + 1)); - res_offsets.resize(size); + res_data.resize(input_rows_count * (length + 1)); + res_offsets.resize(input_rows_count); size_t prev_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * value = reinterpret_cast(&data[prev_offset]); const size_t value_length = offsets[i] - prev_offset - 1; @@ -98,7 +98,7 @@ struct SoundexImpl } } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by soundex function"); } diff --git a/src/Functions/toValidUTF8.cpp b/src/Functions/toValidUTF8.cpp index 41d29d9c494..376732256b0 100644 --- a/src/Functions/toValidUTF8.cpp +++ b/src/Functions/toValidUTF8.cpp @@ -128,16 +128,16 @@ struct ToValidUTF8Impl const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - const size_t offsets_size = offsets.size(); /// It can be larger than that, but we believe it is unlikely to happen. res_data.resize(data.size()); - res_offsets.resize(offsets_size); + res_offsets.resize(input_rows_count); size_t prev_offset = 0; WriteBufferFromVector write_buffer(res_data); - for (size_t i = 0; i < offsets_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * haystack_data = reinterpret_cast(&data[prev_offset]); const size_t haystack_size = offsets[i] - prev_offset - 1; @@ -149,7 +149,7 @@ struct ToValidUTF8Impl write_buffer.finalize(); } - [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + [[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column of type FixedString is not supported by toValidUTF8 function"); } diff --git a/src/Functions/trim.cpp b/src/Functions/trim.cpp index 1f0011b8e99..5703e871423 100644 --- a/src/Functions/trim.cpp +++ b/src/Functions/trim.cpp @@ -43,10 +43,10 @@ public: const ColumnString::Chars & data, const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - size_t size = offsets.size(); - res_offsets.resize_exact(size); + res_offsets.resize_exact(input_rows_count); res_data.reserve_exact(data.size()); size_t prev_offset = 0; @@ -55,7 +55,7 @@ public: const UInt8 * start; size_t length; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { execute(reinterpret_cast(&data[prev_offset]), offsets[i] - prev_offset - 1, start, length); @@ -69,7 +69,7 @@ public: } } - static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &) + static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Functions trimLeft, trimRight and trimBoth cannot work with FixedString argument"); } From 54a503910299c13dac305049a98e8136ab989fbb Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Tue, 23 Jul 2024 19:25:38 +0200 Subject: [PATCH 0752/2361] Count log messages for building set and reusing set from cache --- ..._between_multiple_mutations_tasks_long.sql | 30 +++++++++---------- ...e_big_sets_between_mutation_tasks_long.sql | 29 +++++++++--------- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql index 40f7800fee1..6b0677a80ae 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_multiple_mutations_tasks_long.sql @@ -31,22 +31,22 @@ DELETE FROM 02581_trips WHERE id IN (SELECT (number*10 + SELECT count(), _part from 02581_trips WHERE description = '' GROUP BY _part ORDER BY _part SETTINGS select_sequential_consistency=1; SYSTEM FLUSH LOGS; - --- Check that in every mutation there were parts where selected rows count then the size of big sets which will mean that sets were shared --- Also check that there was at least one part that read more rows then the size of set which will mean that the -WITH 10000000 AS rows_in_set +-- Check that in every mutation there were parts that built sets (log messages like 'Created Set with 10000000 entries from 10000000 rows in 0.388989187 sec.' ) +-- and parts that shared sets (log messages like 'Got set from cache in 0.388930505 sec.' ) +WITH ( + SELECT uuid + FROM system.tables + WHERE (database = currentDatabase()) AND (name = '02581_trips') + ) AS table_uuid SELECT - mutation_version, - countIf(read_rows >= rows_in_set) >= 1 as has_parts_for_which_set_was_built, - countIf(read_rows <= rows_in_set) >= 1 as has_parts_that_shared_set -FROM -( - SELECT - CAST(splitByChar('_', part_name)[5], 'UInt64') AS mutation_version, - read_rows - FROM system.part_log - WHERE database = currentDatabase() and (event_date >= yesterday()) AND (`table` = '02581_trips') AND (event_type = 'MutatePart') -) + CAST(splitByChar('_', query_id)[5], 'UInt64') AS mutation_version, -- '5521485f-8a40-4aba-87a2-00342c369563::all_3_3_0_6' + sum(message LIKE 'Created Set with % entries%') >= 1 AS has_parts_for_which_set_was_built, + sum(message LIKE 'Got set from cache%') >= 1 AS has_parts_that_shared_set +FROM system.text_log +WHERE + query_id LIKE concat(CAST(table_uuid, 'String'), '::all\\_%') + AND (event_date >= yesterday()) + AND (message LIKE 'Created Set with % entries%' OR message LIKE 'Got set from cache%') GROUP BY mutation_version ORDER BY mutation_version FORMAT TSVWithNames; DROP TABLE 02581_trips; diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql index 603c7cb7db0..091a9c8171d 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks_long.sql @@ -59,21 +59,22 @@ SETTINGS mutations_sync=2; SELECT count() from 02581_trips WHERE description = ''; SYSTEM FLUSH LOGS; --- Check that in every mutation there were parts where selected rows count then the size of big sets which will mean that sets were shared --- Also check that there was at least one part that read more rows then the size of set which will mean that the -WITH 10000000 AS rows_in_set +-- Check that in every mutation there were parts that built sets (log messages like 'Created Set with 10000000 entries from 10000000 rows in 0.388989187 sec.' ) +-- and parts that shared sets (log messages like 'Got set from cache in 0.388930505 sec.' ) +WITH ( + SELECT uuid + FROM system.tables + WHERE (database = currentDatabase()) AND (name = '02581_trips') + ) AS table_uuid SELECT - mutation_version, - countIf(read_rows >= rows_in_set) >= 1 as has_parts_for_which_set_was_built, - countIf(read_rows <= rows_in_set) >= 1 as has_parts_that_shared_set -FROM -( - SELECT - CAST(splitByChar('_', part_name)[5], 'UInt64') AS mutation_version, - read_rows - FROM system.part_log - WHERE database = currentDatabase() and (event_date >= yesterday()) AND (`table` = '02581_trips') AND (event_type = 'MutatePart') -) + CAST(splitByChar('_', query_id)[5], 'UInt64') AS mutation_version, -- '5521485f-8a40-4aba-87a2-00342c369563::all_3_3_0_6' + sum(message LIKE 'Created Set with % entries%') >= 1 AS has_parts_for_which_set_was_built, + sum(message LIKE 'Got set from cache%') >= 1 AS has_parts_that_shared_set +FROM system.text_log +WHERE + query_id LIKE concat(CAST(table_uuid, 'String'), '::all\\_%') + AND (event_date >= yesterday()) + AND (message LIKE 'Created Set with % entries%' OR message LIKE 'Got set from cache%') GROUP BY mutation_version ORDER BY mutation_version FORMAT TSVWithNames; DROP TABLE 02581_trips; From a9d7abbb928d43664f057dde88058c449a9a521c Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 23 Jul 2024 17:27:05 +0000 Subject: [PATCH 0753/2361] add drop option --- src/Core/SettingsEnums.cpp | 2 +- src/Storages/MergeTree/MergeTask.cpp | 9 +++++++++ .../03206_projection_merge_special_mergetree.sql | 12 ++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index 74b6c793849..b53a882de4e 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -180,7 +180,7 @@ IMPLEMENT_SETTING_ENUM(LightweightMutationProjectionMode, ErrorCodes::BAD_ARGUME IMPLEMENT_SETTING_ENUM(DeduplicateMergeProjectionMode, ErrorCodes::BAD_ARGUMENTS, {{"throw", DeduplicateMergeProjectionMode::THROW}, {"drop", DeduplicateMergeProjectionMode::DROP}, - {"rebuild", DeduplicateMergeProjectionMode::THROW}}) + {"rebuild", DeduplicateMergeProjectionMode::REBUILD}}) IMPLEMENT_SETTING_AUTO_ENUM(LocalFSReadMethod, ErrorCodes::BAD_ARGUMENTS) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index fc64fae9a58..0b358c0fd7c 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -797,6 +797,15 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c } + const auto mode = global_ctx->data->getSettings()->deduplicate_merge_projection_mode; + /// Under throw mode, we still choose to drop projections due to backward compatibility since some + /// users might have projections before this change. + if (mode == DeduplicateMergeProjectionMode::THROW || mode == DeduplicateMergeProjectionMode::DROP) + { + ctx->projections_iterator = ctx->tasks_for_projections.begin(); + return false; + } + const auto & projections = global_ctx->metadata_snapshot->getProjections(); for (const auto & projection : projections) diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql index c8945fd784c..749f906569e 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -20,6 +20,18 @@ CREATE TABLE tp ( ) engine = ReplacingMergeTree order by type SETTINGS deduplicate_merge_projection_mode = 'drop'; +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); + +OPTIMIZE TABLE tp FINAL; + +-- expecting no projection +SYSTEM FLUSH LOGS; +SELECT + name, + part_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'tp') AND (active = 1); + ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } From 1cbbbd107712c066bef673e5d4692a6950d9e85e Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 23 Jul 2024 17:35:10 +0000 Subject: [PATCH 0754/2361] Apply libunwind fix --- contrib/libunwind | 2 +- src/Common/QueryProfiler.cpp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/libunwind b/contrib/libunwind index 8f28e64d158..9b1f47ad8a6 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit 8f28e64d15819d2d096badd598c7d85bebddb1f2 +Subproject commit 9b1f47ad8a6fcecbeaaead93bd87756ccf658071 diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index 746010b5462..a7717a4288a 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -33,7 +33,7 @@ namespace DB namespace { #if defined(OS_LINUX) - thread_local size_t write_trace_iteration = 0; + //thread_local size_t write_trace_iteration = 0; #endif /// Even after timer_delete() the signal can be delivered, /// since it does not do anything with pending signals. @@ -57,7 +57,7 @@ namespace auto saved_errno = errno; /// We must restore previous value of errno in signal handler. -#if defined(OS_LINUX) +#if defined(OS_LINUX) && false //asdqwe if (info) { int overrun_count = info->si_overrun; @@ -92,7 +92,7 @@ namespace constexpr bool sanitizer = false; #endif - asynchronous_stack_unwinding = true; + //asdqwe asynchronous_stack_unwinding = true; if (sanitizer || 0 == sigsetjmp(asynchronous_stack_unwinding_signal_jump_buffer, 1)) { stack_trace.emplace(signal_context); From 2551f141418a9625678ea6295426bc1d10517482 Mon Sep 17 00:00:00 2001 From: Max K Date: Tue, 23 Jul 2024 20:42:21 +0200 Subject: [PATCH 0755/2361] CI: Add messages fro debugging --- tests/ci/ci_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 3b12fe6974f..d42091fb0da 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -104,6 +104,9 @@ class GHActions: res = json.load(json_file) except json.JSONDecodeError as e: print(f"ERROR: json decoder exception {e}") + json_file.seek(0) + print(" File content:") + print(json_file.read()) return {} return res From 9d55553225c4c5e253e32fb0de9944a2e29b7bcf Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 23 Jul 2024 18:52:50 +0000 Subject: [PATCH 0756/2361] Forbid create as select even when database_replicated_allow_heavy_create is set --- src/Interpreters/InterpreterCreateQuery.cpp | 16 ++++++++++++---- ...ed_database_forbid_create_as_select.reference | 2 ++ ...eplicated_database_forbid_create_as_select.sh | 8 ++++++-- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index ea10ad59db4..2f837fe4d2b 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1329,8 +1329,8 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (need_add_to_database) database = DatabaseCatalog::instance().tryGetDatabase(database_name); - bool allow_heavy_create = getContext()->getSettingsRef().database_replicated_allow_heavy_create; - if (!allow_heavy_create && database && database->getEngineName() == "Replicated" && (create.select || create.is_populate)) + bool allow_heavy_populate = getContext()->getSettingsRef().database_replicated_allow_heavy_create && create.is_populate; + if (!allow_heavy_populate && database && database->getEngineName() == "Replicated" && (create.select || create.is_populate)) { bool is_storage_replicated = false; if (create.storage && create.storage->engine) @@ -1342,10 +1342,18 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) const bool allow_create_select_for_replicated = (create.isView() && !create.is_populate) || create.is_create_empty || !is_storage_replicated; if (!allow_create_select_for_replicated) + { + /// POPULATE can be enabled with setting, provide hint in error message + if (create.is_populate) + throw Exception( + ErrorCodes::SUPPORT_IS_DISABLED, + "CREATE with POPULATE is not supported with Replicated databases. Consider using separate CREATE and INSERT queries. " + "Alternatively, you can enable 'database_replicated_allow_heavy_create' setting to allow this operation, use with caution"); + throw Exception( ErrorCodes::SUPPORT_IS_DISABLED, - "CREATE AS SELECT and POPULATE is not supported with Replicated databases. Consider using separate CREATE and INSERT queries. " - "Alternatively, you can enable 'database_replicated_allow_heavy_create' setting to allow this operation, use with caution"); + "CREATE AS SELECT is not supported with Replicated databases. Consider using separate CREATE and INSERT queries."); + } } if (database && database->shouldReplicateQuery(getContext(), query_ptr)) diff --git a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference index 6ed281c757a..98fb6a68656 100644 --- a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference +++ b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.reference @@ -1,2 +1,4 @@ 1 1 +1 +1 diff --git a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh index 15f169d880f..b587549cb60 100755 --- a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh +++ b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh @@ -18,8 +18,12 @@ ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIAL ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" -# But it is allowed with the special setting -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" --database_replicated_allow_heavy_create=1 +# POPULATE is allowed with the special setting ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" --database_replicated_allow_heavy_create=1 +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv3 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" --compatibility='24.6' + +# AS SELECT is forbidden even with the setting +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" --database_replicated_allow_heavy_create=1 |& grep -cm1 "SUPPORT_IS_DISABLED" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" --compatibility='24.6' |& grep -cm1 "SUPPORT_IS_DISABLED" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" From 032486951316e518aa31e2548e6a501289bb0b6e Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 19:07:56 +0000 Subject: [PATCH 0757/2361] Split dynamic tests and rewrite them from sh to sql to speed up --- .../03036_dynamic_read_subcolumns.reference | 57 -- .../03036_dynamic_read_subcolumns.sh | 62 --- ...ad_subcolumns_compact_merge_tree.reference | 17 + ...mic_read_subcolumns_compact_merge_tree.sql | 40 ++ ...6_dynamic_read_subcolumns_memory.reference | 17 + .../03036_dynamic_read_subcolumns_memory.sql | 40 ++ ..._read_subcolumns_wide_merge_tree.reference | 17 + ...ynamic_read_subcolumns_wide_merge_tree.sql | 40 ++ ...3037_dynamic_merges_1_horizontal.reference | 60 -- .../03037_dynamic_merges_1_horizontal.sh | 52 -- ..._1_horizontal_compact_merge_tree.reference | 28 + ...merges_1_horizontal_compact_merge_tree.sql | 33 ++ ...s_1_horizontal_compact_wide_tree.reference | 28 + ..._merges_1_horizontal_compact_wide_tree.sql | 33 ++ .../03037_dynamic_merges_1_vertical.reference | 60 -- .../03037_dynamic_merges_1_vertical.sh | 51 -- ...es_1_vertical_compact_merge_tree.reference | 28 + ...c_merges_1_vertical_compact_merge_tree.sql | 33 ++ ...erges_1_vertical_wide_merge_tree.reference | 28 + ...amic_merges_1_vertical_wide_merge_tree.sql | 33 ++ .../03037_dynamic_merges_2.reference | 20 - .../0_stateless/03037_dynamic_merges_2.sh | 45 -- ..._2_horizontal_compact_merge_tree.reference | 3 + ...merges_2_horizontal_compact_merge_tree.sql | 14 + ...ges_2_horizontal_wide_merge_tree.reference | 3 + ...ic_merges_2_horizontal_wide_merge_tree.sql | 14 + ...es_2_vertical_compact_merge_tree.reference | 3 + ...c_merges_2_vertical_compact_merge_tree.sql | 14 + ...erges_2_vertical_wide_merge_tree.reference | 3 + ...amic_merges_2_vertical_wide_merge_tree.sql | 14 + ...ested_dynamic_merges_compact_horizontal.sh | 32 -- ...sted_dynamic_merges_compact_horizontal.sql | 29 + ..._nested_dynamic_merges_compact_vertical.sh | 32 -- ...nested_dynamic_merges_compact_vertical.sql | 29 + ...8_nested_dynamic_merges_wide_horizontal.sh | 32 -- ..._nested_dynamic_merges_wide_horizontal.sql | 29 + ...038_nested_dynamic_merges_wide_vertical.sh | 32 -- ...38_nested_dynamic_merges_wide_vertical.sql | 29 + .../03040_dynamic_type_alters_1.reference | 526 ------------------ .../03040_dynamic_type_alters_1.sh | 77 --- ...type_alters_1_compact_merge_tree.reference | 174 ++++++ ...namic_type_alters_1_compact_merge_tree.sql | 53 ++ ...040_dynamic_type_alters_1_memory.reference | 175 ++++++ .../03040_dynamic_type_alters_1_memory.sql | 53 ++ ...ic_type_alters_1_wide_merge_tree.reference | 174 ++++++ ..._dynamic_type_alters_1_wide_merge_tree.sql | 53 ++ .../03040_dynamic_type_alters_2.reference | 182 ------ .../03040_dynamic_type_alters_2.sh | 57 -- ...type_alters_2_compact_merge_tree.reference | 90 +++ ...namic_type_alters_2_compact_merge_tree.sql | 39 ++ ...ic_type_alters_2_wide_merge_tree.reference | 90 +++ ..._dynamic_type_alters_2_wide_merge_tree.sql | 39 ++ 52 files changed, 1539 insertions(+), 1377 deletions(-) delete mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns.reference delete mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns.sh create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.reference create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.sql create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_memory.reference create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_memory.sql create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.reference create mode 100644 tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.sql delete mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.reference delete mode 100755 tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.sh create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql delete mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_vertical.reference delete mode 100755 tests/queries/0_stateless/03037_dynamic_merges_1_vertical.sh create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql delete mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2.reference delete mode 100755 tests/queries/0_stateless/03037_dynamic_merges_2.sh create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.reference create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.reference create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.reference create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.reference create mode 100644 tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql delete mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sh create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql delete mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sh create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql delete mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sh create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql delete mode 100755 tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sh create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql delete mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_1.reference delete mode 100755 tests/queries/0_stateless/03040_dynamic_type_alters_1.sh create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.sql create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.sql delete mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_2.reference delete mode 100755 tests/queries/0_stateless/03040_dynamic_type_alters_2.sh create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.reference create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.sql create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.reference create mode 100644 tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.sql diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.reference b/tests/queries/0_stateless/03036_dynamic_read_subcolumns.reference deleted file mode 100644 index 36984bc8b9b..00000000000 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.reference +++ /dev/null @@ -1,57 +0,0 @@ -Memory -test -Array(Array(Dynamic)) -Array(Variant(String, UInt64)) -None -String -UInt64 -200000 -200000 -200000 -200000 -0 -0 -200000 -200000 -100000 -100000 -200000 -0 -MergeTree compact -test -Array(Array(Dynamic)) -Array(Variant(String, UInt64)) -None -String -UInt64 -200000 -200000 -200000 -200000 -0 -0 -200000 -200000 -100000 -100000 -200000 -0 -MergeTree wide -test -Array(Array(Dynamic)) -Array(Variant(String, UInt64)) -None -String -UInt64 -200000 -200000 -200000 -200000 -0 -0 -200000 -200000 -100000 -100000 -200000 -0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns.sh deleted file mode 100755 index 65517061b99..00000000000 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" - - -function test() -{ - echo "test" - $CH_CLIENT -q "insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000" - $CH_CLIENT -q "insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000" - - $CH_CLIENT -q "select distinct dynamicType(d) as type from test order by type" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'UInt64'" - $CH_CLIENT -q "select count() from test where d.UInt64 is not NULL" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'String'" - $CH_CLIENT -q "select count() from test where d.String is not NULL" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Date'" - $CH_CLIENT -q "select count() from test where d.Date is not NULL" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'" - $CH_CLIENT -q "select count() from test where not empty(d.\`Array(Variant(String, UInt64))\`)" - $CH_CLIENT -q "select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'" - $CH_CLIENT -q "select count() from test where not empty(d.\`Array(Array(Dynamic))\`)" - $CH_CLIENT -q "select count() from test where d is NULL" - $CH_CLIENT -q "select count() from test where not empty(d.\`Tuple(a Array(Dynamic))\`.a.String)" - - $CH_CLIENT -q "select d, d.UInt64, d.String, d.\`Array(Variant(String, UInt64))\` from test format Null" - $CH_CLIENT -q "select d.UInt64, d.String, d.\`Array(Variant(String, UInt64))\` from test format Null" - $CH_CLIENT -q "select d.Int8, d.Date, d.\`Array(String)\` from test format Null" - $CH_CLIENT -q "select d, d.UInt64, d.Date, d.\`Array(Variant(String, UInt64))\`, d.\`Array(Variant(String, UInt64))\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" - $CH_CLIENT -q "select d.UInt64, d.Date, d.\`Array(Variant(String, UInt64))\`, d.\`Array(Variant(String, UInt64))\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64, d.\`Array(Variant(String, UInt64))\`.String from test format Null" - $CH_CLIENT -q "select d, d.\`Tuple(a UInt64, b String)\`.a, d.\`Array(Dynamic)\`.\`Variant(String, UInt64)\`.UInt64, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" - $CH_CLIENT -q "select d.\`Array(Dynamic)\`.\`Variant(String, UInt64)\`.UInt64, d.\`Array(Dynamic)\`.size0, d.\`Array(Variant(String, UInt64))\`.UInt64 from test format Null" - $CH_CLIENT -q "select d.\`Array(Array(Dynamic))\`.size1, d.\`Array(Array(Dynamic))\`.UInt64, d.\`Array(Array(Dynamic))\`.\`Map(String, Tuple(a UInt64))\`.values.a from test format Null" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "Memory" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=Memory" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.reference b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.reference new file mode 100644 index 00000000000..d75d75896f7 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.reference @@ -0,0 +1,17 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +None +String +UInt64 +200000 +200000 +200000 +200000 +0 +0 +200000 +200000 +100000 +100000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.sql b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.sql new file mode 100644 index 00000000000..66fbf006a8c --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.sql @@ -0,0 +1,40 @@ +-- Tags: long + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_memory.reference b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_memory.reference new file mode 100644 index 00000000000..d75d75896f7 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_memory.reference @@ -0,0 +1,17 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +None +String +UInt64 +200000 +200000 +200000 +200000 +0 +0 +200000 +200000 +100000 +100000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_memory.sql b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_memory.sql new file mode 100644 index 00000000000..bb03bdef704 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_memory.sql @@ -0,0 +1,40 @@ +-- Tags: long + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=Memory; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.reference b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.reference new file mode 100644 index 00000000000..d75d75896f7 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.reference @@ -0,0 +1,17 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +None +String +UInt64 +200000 +200000 +200000 +200000 +0 +0 +200000 +200000 +100000 +100000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.sql b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.sql new file mode 100644 index 00000000000..00aba3a57b6 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.sql @@ -0,0 +1,40 @@ +-- Tags: long + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.reference deleted file mode 100644 index 59297e46330..00000000000 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.reference +++ /dev/null @@ -1,60 +0,0 @@ -MergeTree compact -test -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String -MergeTree wide -test -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.sh b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.sh deleted file mode 100755 index 887b2ed94d7..00000000000 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1" - -function test() -{ - echo "test" - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, number from numbers(100000)" - $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(80000)" - $CH_CLIENT -q "insert into test select number, range(number % 10 + 1) from numbers(70000)" - $CH_CLIENT -q "insert into test select number, toDate(number) from numbers(60000)" - $CH_CLIENT -q "insert into test select number, toDateTime(number) from numbers(50000)" - $CH_CLIENT -q "insert into test select number, NULL from numbers(100000)" - - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, map(number, number) from numbers(200000)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, tuple(number, number) from numbers(10000)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" -test -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference new file mode 100644 index 00000000000..d0d777a5a38 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference @@ -0,0 +1,28 @@ +50000 DateTime +60000 Date +70000 Array(UInt16) +80000 String +100000 None +100000 UInt64 +70000 Array(UInt16) +100000 None +100000 UInt64 +190000 String +70000 Array(UInt16) +100000 None +100000 UInt64 +190000 String +200000 Map(UInt64, UInt64) +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +260000 String +10000 Tuple(UInt64, UInt64) +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +260000 String +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +270000 String diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql new file mode 100644 index 00000000000..b66fe5e2187 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql @@ -0,0 +1,33 @@ +-- Tags: long +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, 'str_' || toString(number) from numbers(80000); +insert into test select number, range(number % 10 + 1) from numbers(70000); +insert into test select number, toDate(number) from numbers(60000); +insert into test select number, toDateTime(number) from numbers(50000); +insert into test select number, NULL from numbers(100000); + +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, map(number, number) from numbers(200000); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, tuple(number, number) from numbers(10000); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference new file mode 100644 index 00000000000..d0d777a5a38 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference @@ -0,0 +1,28 @@ +50000 DateTime +60000 Date +70000 Array(UInt16) +80000 String +100000 None +100000 UInt64 +70000 Array(UInt16) +100000 None +100000 UInt64 +190000 String +70000 Array(UInt16) +100000 None +100000 UInt64 +190000 String +200000 Map(UInt64, UInt64) +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +260000 String +10000 Tuple(UInt64, UInt64) +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +260000 String +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +270000 String diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql new file mode 100644 index 00000000000..8a376b6d7d7 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql @@ -0,0 +1,33 @@ +-- Tags: long +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, 'str_' || toString(number) from numbers(80000); +insert into test select number, range(number % 10 + 1) from numbers(70000); +insert into test select number, toDate(number) from numbers(60000); +insert into test select number, toDateTime(number) from numbers(50000); +insert into test select number, NULL from numbers(100000); + +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, map(number, number) from numbers(200000); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, tuple(number, number) from numbers(10000); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.reference deleted file mode 100644 index 59297e46330..00000000000 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.reference +++ /dev/null @@ -1,60 +0,0 @@ -MergeTree compact -test -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String -MergeTree wide -test -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.sh b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.sh deleted file mode 100755 index 371ae87c2ef..00000000000 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1" -function test() -{ - echo "test" - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, number from numbers(100000)" - $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(80000)" - $CH_CLIENT -q "insert into test select number, range(number % 10 + 1) from numbers(70000)" - $CH_CLIENT -q "insert into test select number, toDate(number) from numbers(60000)" - $CH_CLIENT -q "insert into test select number, toDateTime(number) from numbers(50000)" - $CH_CLIENT -q "insert into test select number, NULL from numbers(100000)" - - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, map(number, number) from numbers(200000)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, tuple(number, number) from numbers(10000)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference new file mode 100644 index 00000000000..d0d777a5a38 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference @@ -0,0 +1,28 @@ +50000 DateTime +60000 Date +70000 Array(UInt16) +80000 String +100000 None +100000 UInt64 +70000 Array(UInt16) +100000 None +100000 UInt64 +190000 String +70000 Array(UInt16) +100000 None +100000 UInt64 +190000 String +200000 Map(UInt64, UInt64) +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +260000 String +10000 Tuple(UInt64, UInt64) +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +260000 String +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +270000 String diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql new file mode 100644 index 00000000000..127b56e727c --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql @@ -0,0 +1,33 @@ +-- Tags: long +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, 'str_' || toString(number) from numbers(80000); +insert into test select number, range(number % 10 + 1) from numbers(70000); +insert into test select number, toDate(number) from numbers(60000); +insert into test select number, toDateTime(number) from numbers(50000); +insert into test select number, NULL from numbers(100000); + +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, map(number, number) from numbers(200000); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, tuple(number, number) from numbers(10000); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference new file mode 100644 index 00000000000..d0d777a5a38 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference @@ -0,0 +1,28 @@ +50000 DateTime +60000 Date +70000 Array(UInt16) +80000 String +100000 None +100000 UInt64 +70000 Array(UInt16) +100000 None +100000 UInt64 +190000 String +70000 Array(UInt16) +100000 None +100000 UInt64 +190000 String +200000 Map(UInt64, UInt64) +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +260000 String +10000 Tuple(UInt64, UInt64) +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +260000 String +100000 None +100000 UInt64 +200000 Map(UInt64, UInt64) +270000 String diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql new file mode 100644 index 00000000000..e5c273cb592 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql @@ -0,0 +1,33 @@ +-- Tags: long +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, 'str_' || toString(number) from numbers(80000); +insert into test select number, range(number % 10 + 1) from numbers(70000); +insert into test select number, toDate(number) from numbers(60000); +insert into test select number, toDateTime(number) from numbers(50000); +insert into test select number, NULL from numbers(100000); + +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, map(number, number) from numbers(200000); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, tuple(number, number) from numbers(10000); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2.reference b/tests/queries/0_stateless/03037_dynamic_merges_2.reference deleted file mode 100644 index 420b8185b16..00000000000 --- a/tests/queries/0_stateless/03037_dynamic_merges_2.reference +++ /dev/null @@ -1,20 +0,0 @@ -MergeTree compact + horizontal merge -test -1000000 Array(UInt16) -1000000 String -1000000 UInt64 -MergeTree wide + horizontal merge -test -1000000 Array(UInt16) -1000000 String -1000000 UInt64 -MergeTree compact + vertical merge -test -1000000 Array(UInt16) -1000000 String -1000000 UInt64 -MergeTree wide + vertical merge -test -1000000 Array(UInt16) -1000000 String -1000000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2.sh b/tests/queries/0_stateless/03037_dynamic_merges_2.sh deleted file mode 100755 index 40adbdd4262..00000000000 --- a/tests/queries/0_stateless/03037_dynamic_merges_2.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1" - - -function test() -{ - echo "test" - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, number from numbers(1000000)" - $CH_CLIENT -q "insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000)" - $CH_CLIENT -q "insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000)" - - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact + horizontal merge" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide + horizontal merge" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact + vertical merge" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide + vertical merge" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.reference new file mode 100644 index 00000000000..afd392002e5 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.reference @@ -0,0 +1,3 @@ +1000000 Array(UInt16) +1000000 String +1000000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql new file mode 100644 index 00000000000..6d7a0dd8c18 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql @@ -0,0 +1,14 @@ +-- Tags: long + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; +system stop merges test; +insert into test select number, number from numbers(1000000); +insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); +insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.reference new file mode 100644 index 00000000000..afd392002e5 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.reference @@ -0,0 +1,3 @@ +1000000 Array(UInt16) +1000000 String +1000000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql new file mode 100644 index 00000000000..011d54d2360 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql @@ -0,0 +1,14 @@ +-- Tags: long + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +system stop merges test; +insert into test select number, number from numbers(1000000); +insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); +insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.reference new file mode 100644 index 00000000000..afd392002e5 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.reference @@ -0,0 +1,3 @@ +1000000 Array(UInt16) +1000000 String +1000000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql new file mode 100644 index 00000000000..1a74f9e5417 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql @@ -0,0 +1,14 @@ +-- Tags: long + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +system stop merges test; +insert into test select number, number from numbers(1000000); +insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); +insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.reference new file mode 100644 index 00000000000..afd392002e5 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.reference @@ -0,0 +1,3 @@ +1000000 Array(UInt16) +1000000 String +1000000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql new file mode 100644 index 00000000000..cbc834e9660 --- /dev/null +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql @@ -0,0 +1,14 @@ +-- Tags: long + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +system stop merges test; +insert into test select number, number from numbers(1000000); +insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); +insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sh deleted file mode 100755 index d4b6d1f4b63..00000000000 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0" - -$CH_CLIENT -q "drop table if exists test;" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" - -$CH_CLIENT -q "system stop merges test" -$CH_CLIENT -q "insert into test select number, number from numbers(100000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" - -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -$CH_CLIENT -nm -q "system start merges test; optimize table test final;" -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - -$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" - -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -$CH_CLIENT -nm -q "system start merges test; optimize table test final;" -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql new file mode 100644 index 00000000000..ff1dc5e7ded --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql @@ -0,0 +1,29 @@ +-- Tags: long + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test;; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sh deleted file mode 100755 index 39671a297cf..00000000000 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0" - -$CH_CLIENT -q "drop table if exists test;" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" - -$CH_CLIENT -q "system stop merges test" -$CH_CLIENT -q "insert into test select number, number from numbers(100000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" - -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -$CH_CLIENT -nm -q "system start merges test; optimize table test final;" -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - -$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" - -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -$CH_CLIENT -nm -q "system start merges test; optimize table test final;" -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql new file mode 100644 index 00000000000..f9b0101cb87 --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql @@ -0,0 +1,29 @@ +-- Tags: long + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test;; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sh deleted file mode 100755 index d58545c0b13..00000000000 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0" - -$CH_CLIENT -q "drop table if exists test;" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" - -$CH_CLIENT -q "system stop merges test" -$CH_CLIENT -q "insert into test select number, number from numbers(100000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" - -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -$CH_CLIENT -nm -q "system start merges test; optimize table test final;" -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - -$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" - -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -$CH_CLIENT -nm -q "system start merges test; optimize table test final;" -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql new file mode 100644 index 00000000000..5f373d41c7d --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql @@ -0,0 +1,29 @@ +-- Tags: long + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test;; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sh b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sh deleted file mode 100755 index 39671a297cf..00000000000 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0" - -$CH_CLIENT -q "drop table if exists test;" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" - -$CH_CLIENT -q "system stop merges test" -$CH_CLIENT -q "insert into test select number, number from numbers(100000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" - -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -$CH_CLIENT -nm -q "system start merges test; optimize table test final;" -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - -$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)" -$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)" - -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" -$CH_CLIENT -nm -q "system start merges test; optimize table test final;" -$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type" - -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql new file mode 100644 index 00000000000..36bbc76b8cb --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql @@ -0,0 +1,29 @@ +-- Tags: long + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test;; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +drop table test; diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1.reference deleted file mode 100644 index a9c785d1e48..00000000000 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1.reference +++ /dev/null @@ -1,526 +0,0 @@ -Memory -initial insert -alter add column 1 -3 None -0 0 \N \N \N 0 -1 1 \N \N \N 0 -2 2 \N \N \N 0 -insert after alter add column 1 -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -alter modify column 1 -7 None -8 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -insert after alter modify column 1 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -alter modify column 2 -4 UInt64 -7 String -8 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -insert after alter modify column 2 -1 Date -5 UInt64 -8 String -9 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -19 19 \N \N \N \N 0 -20 20 20 \N 20 \N 0 -21 21 str_21 str_21 \N \N 0 -22 22 1970-01-23 \N \N 1970-01-23 0 -alter modify column 3 -1 Date -5 UInt64 -8 String -9 None -0 0 0 \N 0 \N \N \N 0 -1 1 1 \N 0 \N \N \N 0 -2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 \N 3 \N 0 -4 4 4 \N 0 \N 4 \N 0 -5 5 5 \N 0 \N 5 \N 0 -6 6 6 \N 0 str_6 \N \N 0 -7 7 7 \N 0 str_7 \N \N 0 -8 8 8 \N 0 str_8 \N \N 0 -9 9 9 \N 0 \N \N \N 0 -10 10 10 \N 0 \N \N \N 0 -11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 \N 12 \N 0 -13 13 13 \N 0 str_13 \N \N 0 -14 14 14 \N 0 \N \N \N 0 -15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 -17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 -19 19 19 \N 0 \N \N \N 0 -20 20 20 \N 0 \N 20 \N 0 -21 21 21 \N 0 str_21 \N \N 0 -22 22 22 \N 0 \N \N 1970-01-23 0 -insert after alter modify column 3 -1 Date -5 UInt64 -8 String -12 None -0 0 0 \N 0 \N \N \N 0 -1 1 1 \N 0 \N \N \N 0 -2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 \N 3 \N 0 -4 4 4 \N 0 \N 4 \N 0 -5 5 5 \N 0 \N 5 \N 0 -6 6 6 \N 0 str_6 \N \N 0 -7 7 7 \N 0 str_7 \N \N 0 -8 8 8 \N 0 str_8 \N \N 0 -9 9 9 \N 0 \N \N \N 0 -10 10 10 \N 0 \N \N \N 0 -11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 \N 12 \N 0 -13 13 13 \N 0 str_13 \N \N 0 -14 14 14 \N 0 \N \N \N 0 -15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 -17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 -19 19 19 \N 0 \N \N \N 0 -20 20 20 \N 0 \N 20 \N 0 -21 21 21 \N 0 str_21 \N \N 0 -22 22 22 \N 0 \N \N 1970-01-23 0 -23 \N \N \N 0 \N \N \N 0 -24 24 24 \N 0 \N \N \N 0 -25 str_25 \N str_25 0 \N \N \N 0 -MergeTree compact -initial insert -alter add column 1 -3 None -0 0 \N \N \N 0 -1 1 \N \N \N 0 -2 2 \N \N \N 0 -insert after alter add column 1 -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -alter modify column 1 -7 None -8 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -insert after alter modify column 1 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -alter modify column 2 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -insert after alter modify column 2 -1 Date -1 UInt64 -9 None -12 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -19 19 \N \N \N \N 0 -20 20 20 \N 20 \N 0 -21 21 str_21 str_21 \N \N 0 -22 22 1970-01-23 \N \N 1970-01-23 0 -alter modify column 3 -1 Date -1 UInt64 -9 None -12 String -0 0 0 \N 0 \N \N \N 0 -1 1 1 \N 0 \N \N \N 0 -2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 -6 6 6 \N 0 str_6 \N \N 0 -7 7 7 \N 0 str_7 \N \N 0 -8 8 8 \N 0 str_8 \N \N 0 -9 9 9 \N 0 \N \N \N 0 -10 10 10 \N 0 \N \N \N 0 -11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 -13 13 13 \N 0 str_13 \N \N 0 -14 14 14 \N 0 \N \N \N 0 -15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 -17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 -19 19 19 \N 0 \N \N \N 0 -20 20 20 \N 0 \N 20 \N 0 -21 21 21 \N 0 str_21 \N \N 0 -22 22 22 \N 0 \N \N 1970-01-23 0 -insert after alter modify column 3 -1 Date -1 UInt64 -12 None -12 String -0 0 0 \N 0 \N \N \N 0 -1 1 1 \N 0 \N \N \N 0 -2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 -6 6 6 \N 0 str_6 \N \N 0 -7 7 7 \N 0 str_7 \N \N 0 -8 8 8 \N 0 str_8 \N \N 0 -9 9 9 \N 0 \N \N \N 0 -10 10 10 \N 0 \N \N \N 0 -11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 -13 13 13 \N 0 str_13 \N \N 0 -14 14 14 \N 0 \N \N \N 0 -15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 -17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 -19 19 19 \N 0 \N \N \N 0 -20 20 20 \N 0 \N 20 \N 0 -21 21 21 \N 0 str_21 \N \N 0 -22 22 22 \N 0 \N \N 1970-01-23 0 -23 \N \N \N 0 \N \N \N 0 -24 24 24 \N 0 \N \N \N 0 -25 str_25 \N str_25 0 \N \N \N 0 -MergeTree wide -initial insert -alter add column 1 -3 None -0 0 \N \N \N 0 -1 1 \N \N \N 0 -2 2 \N \N \N 0 -insert after alter add column 1 -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -alter modify column 1 -7 None -8 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -insert after alter modify column 1 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -alter modify column 2 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -insert after alter modify column 2 -1 Date -1 UInt64 -9 None -12 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -19 19 \N \N \N \N 0 -20 20 20 \N 20 \N 0 -21 21 str_21 str_21 \N \N 0 -22 22 1970-01-23 \N \N 1970-01-23 0 -alter modify column 3 -1 Date -1 UInt64 -9 None -12 String -0 0 0 \N 0 \N \N \N 0 -1 1 1 \N 0 \N \N \N 0 -2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 -6 6 6 \N 0 str_6 \N \N 0 -7 7 7 \N 0 str_7 \N \N 0 -8 8 8 \N 0 str_8 \N \N 0 -9 9 9 \N 0 \N \N \N 0 -10 10 10 \N 0 \N \N \N 0 -11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 -13 13 13 \N 0 str_13 \N \N 0 -14 14 14 \N 0 \N \N \N 0 -15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 -17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 -19 19 19 \N 0 \N \N \N 0 -20 20 20 \N 0 \N 20 \N 0 -21 21 21 \N 0 str_21 \N \N 0 -22 22 22 \N 0 \N \N 1970-01-23 0 -insert after alter modify column 3 -1 Date -1 UInt64 -12 None -12 String -0 0 0 \N 0 \N \N \N 0 -1 1 1 \N 0 \N \N \N 0 -2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 -6 6 6 \N 0 str_6 \N \N 0 -7 7 7 \N 0 str_7 \N \N 0 -8 8 8 \N 0 str_8 \N \N 0 -9 9 9 \N 0 \N \N \N 0 -10 10 10 \N 0 \N \N \N 0 -11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 -13 13 13 \N 0 str_13 \N \N 0 -14 14 14 \N 0 \N \N \N 0 -15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 -17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 -19 19 19 \N 0 \N \N \N 0 -20 20 20 \N 0 \N 20 \N 0 -21 21 21 \N 0 str_21 \N \N 0 -22 22 22 \N 0 \N \N 1970-01-23 0 -23 \N \N \N 0 \N \N \N 0 -24 24 24 \N 0 \N \N \N 0 -25 str_25 \N str_25 0 \N \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1.sh b/tests/queries/0_stateless/03040_dynamic_type_alters_1.sh deleted file mode 100755 index 1f2a6a31ad7..00000000000 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" - -function run() -{ - echo "initial insert" - $CH_CLIENT -q "insert into test select number, number from numbers(3)" - - echo "alter add column 1" - $CH_CLIENT -q "alter table test add column d Dynamic(max_types=3) settings mutations_sync=1" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "insert after alter add column 1" - $CH_CLIENT -q "insert into test select number, number, number from numbers(3, 3)" - $CH_CLIENT -q "insert into test select number, number, 'str_' || toString(number) from numbers(6, 3)" - $CH_CLIENT -q "insert into test select number, number, NULL from numbers(9, 3)" - $CH_CLIENT -q "insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.Date, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "alter modify column 1" - $CH_CLIENT -q "alter table test modify column d Dynamic(max_types=1) settings mutations_sync=1" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.Date, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "insert after alter modify column 1" - $CH_CLIENT -q "insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.Date, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "alter modify column 2" - $CH_CLIENT -q "alter table test modify column d Dynamic(max_types=3) settings mutations_sync=1" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.Date, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "insert after alter modify column 2" - $CH_CLIENT -q "insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.Date, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "alter modify column 3" - $CH_CLIENT -q "alter table test modify column y Dynamic settings mutations_sync=1" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, y.UInt64, y.String, y.\`Tuple(a UInt64)\`.a, d.String, d.UInt64, d.Date, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "insert after alter modify column 3" - $CH_CLIENT -q "insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, y.UInt64, y.String, y.\`Tuple(a UInt64)\`.a, d.String, d.UInt64, d.Date, d.\`Tuple(a UInt64)\`.a from test order by x" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "Memory" -$CH_CLIENT -q "create table test (x UInt64, y UInt64) engine=Memory" -run -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;" -run -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (x UInt64, y UInt64 ) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -run -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference new file mode 100644 index 00000000000..2ec301b747b --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference @@ -0,0 +1,174 @@ +initial insert +alter add column 1 +3 None +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 +insert after alter add column 1 +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +alter modify column 1 +7 None +8 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +insert after alter modify column 1 +8 None +11 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +alter modify column 2 +8 None +11 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +insert after alter modify column 2 +1 Date +1 UInt64 +9 None +12 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +19 19 \N \N \N \N 0 +20 20 20 \N 20 \N 0 +21 21 str_21 str_21 \N \N 0 +22 22 1970-01-23 \N \N 1970-01-23 0 +alter modify column 3 +1 Date +1 UInt64 +9 None +12 String +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 3 \N \N 0 +4 4 4 \N 0 4 \N \N 0 +5 5 5 \N 0 5 \N \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 12 \N \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 16 \N \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 1970-01-19 \N \N 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +insert after alter modify column 3 +1 Date +1 UInt64 +12 None +12 String +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 3 \N \N 0 +4 4 4 \N 0 4 \N \N 0 +5 5 5 \N 0 5 \N \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 12 \N \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 16 \N \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 1970-01-19 \N \N 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +23 \N \N \N 0 \N \N \N 0 +24 24 24 \N 0 \N \N \N 0 +25 str_25 \N str_25 0 \N \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql new file mode 100644 index 00000000000..4ab700306d4 --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql @@ -0,0 +1,53 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000; +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column 1'; +alter table test add column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 1'; +alter table test modify column d Dynamic(max_types=1) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 1'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 2'; +alter table test modify column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 2'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 3'; +alter table test modify column y Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 3'; +insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference new file mode 100644 index 00000000000..c592528c3cd --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference @@ -0,0 +1,175 @@ +initial insert +alter add column 1 +3 None +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 +insert after alter add column 1 +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +alter modify column 1 +7 None +8 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +insert after alter modify column 1 +8 None +11 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +alter modify column 2 +4 UInt64 +7 String +8 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +insert after alter modify column 2 +1 Date +5 UInt64 +8 String +9 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +19 19 \N \N \N \N 0 +20 20 20 \N 20 \N 0 +21 21 str_21 str_21 \N \N 0 +22 22 1970-01-23 \N \N 1970-01-23 0 +alter modify column 3 +1 Date +5 UInt64 +8 String +9 None +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 \N 12 \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 16 \N \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 1970-01-19 \N \N 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +insert after alter modify column 3 +1 Date +5 UInt64 +8 String +12 None +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 \N 12 \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 16 \N \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 1970-01-19 \N \N 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +23 \N \N \N 0 \N \N \N 0 +24 24 24 \N 0 \N \N \N 0 +25 str_25 \N str_25 0 \N \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.sql b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.sql new file mode 100644 index 00000000000..e802fd034ce --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.sql @@ -0,0 +1,53 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=Memory; +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column 1'; +alter table test add column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 1'; +alter table test modify column d Dynamic(max_types=1) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 1'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 2'; +alter table test modify column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 2'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 3'; +alter table test modify column y Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 3'; +insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference new file mode 100644 index 00000000000..2ec301b747b --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference @@ -0,0 +1,174 @@ +initial insert +alter add column 1 +3 None +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 +insert after alter add column 1 +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +alter modify column 1 +7 None +8 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +insert after alter modify column 1 +8 None +11 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +alter modify column 2 +8 None +11 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +insert after alter modify column 2 +1 Date +1 UInt64 +9 None +12 String +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 3 \N \N 0 +4 4 4 4 \N \N 0 +5 5 5 5 \N \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 12 \N \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 16 \N \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 1970-01-19 \N \N 0 +19 19 \N \N \N \N 0 +20 20 20 \N 20 \N 0 +21 21 str_21 str_21 \N \N 0 +22 22 1970-01-23 \N \N 1970-01-23 0 +alter modify column 3 +1 Date +1 UInt64 +9 None +12 String +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 3 \N \N 0 +4 4 4 \N 0 4 \N \N 0 +5 5 5 \N 0 5 \N \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 12 \N \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 16 \N \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 1970-01-19 \N \N 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +insert after alter modify column 3 +1 Date +1 UInt64 +12 None +12 String +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 3 \N \N 0 +4 4 4 \N 0 4 \N \N 0 +5 5 5 \N 0 5 \N \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 12 \N \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 16 \N \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 1970-01-19 \N \N 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +23 \N \N \N 0 \N \N \N 0 +24 24 24 \N 0 \N \N \N 0 +25 str_25 \N str_25 0 \N \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.sql b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.sql new file mode 100644 index 00000000000..55c4f0b5f0c --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.sql @@ -0,0 +1,53 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column 1'; +alter table test add column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 1'; +alter table test modify column d Dynamic(max_types=1) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 1'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 2'; +alter table test modify column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 2'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 3'; +alter table test modify column y Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 3'; +insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_2.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_2.reference deleted file mode 100644 index f7c00bd8c44..00000000000 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_2.reference +++ /dev/null @@ -1,182 +0,0 @@ -MergeTree compact -initial insert -alter add column -3 None -0 0 \N \N \N 0 -1 1 \N \N \N 0 -2 2 \N \N \N 0 -insert after alter add column 1 -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -alter rename column 1 -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -insert nested dynamic -3 Array(Dynamic) -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 [] [] [] -1 1 \N \N \N \N 0 [] [] [] -2 2 \N \N \N \N 0 [] [] [] -3 3 3 \N 3 \N 0 [] [] [] -4 4 4 \N 4 \N 0 [] [] [] -5 5 5 \N 5 \N 0 [] [] [] -6 6 str_6 str_6 \N \N 0 [] [] [] -7 7 str_7 str_7 \N \N 0 [] [] [] -8 8 str_8 str_8 \N \N 0 [] [] [] -9 9 \N \N \N \N 0 [] [] [] -10 10 \N \N \N \N 0 [] [] [] -11 11 \N \N \N \N 0 [] [] [] -12 12 12 \N 12 \N 0 [] [] [] -13 13 str_13 str_13 \N \N 0 [] [] [] -14 14 \N \N \N \N 0 [] [] [] -15 15 [15] \N \N \N 0 [15] [NULL] [NULL] -16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] -17 17 [17] \N \N \N 0 [17] [NULL] [NULL] -alter rename column 2 -3 Array(Dynamic) -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 [] [] [] -1 1 \N \N \N \N 0 [] [] [] -2 2 \N \N \N \N 0 [] [] [] -3 3 3 \N 3 \N 0 [] [] [] -4 4 4 \N 4 \N 0 [] [] [] -5 5 5 \N 5 \N 0 [] [] [] -6 6 str_6 str_6 \N \N 0 [] [] [] -7 7 str_7 str_7 \N \N 0 [] [] [] -8 8 str_8 str_8 \N \N 0 [] [] [] -9 9 \N \N \N \N 0 [] [] [] -10 10 \N \N \N \N 0 [] [] [] -11 11 \N \N \N \N 0 [] [] [] -12 12 12 \N 12 \N 0 [] [] [] -13 13 str_13 str_13 \N \N 0 [] [] [] -14 14 \N \N \N \N 0 [] [] [] -15 15 [15] \N \N \N 0 [15] [NULL] [NULL] -16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] -17 17 [17] \N \N \N 0 [17] [NULL] [NULL] -MergeTree wide -initial insert -alter add column -3 None -0 0 \N \N \N 0 -1 1 \N \N \N 0 -2 2 \N \N \N 0 -insert after alter add column 1 -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -alter rename column 1 -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 \N 3 \N 0 -4 4 4 \N 4 \N 0 -5 5 5 \N 5 \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 \N 12 \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -insert nested dynamic -3 Array(Dynamic) -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 [] [] [] -1 1 \N \N \N \N 0 [] [] [] -2 2 \N \N \N \N 0 [] [] [] -3 3 3 \N 3 \N 0 [] [] [] -4 4 4 \N 4 \N 0 [] [] [] -5 5 5 \N 5 \N 0 [] [] [] -6 6 str_6 str_6 \N \N 0 [] [] [] -7 7 str_7 str_7 \N \N 0 [] [] [] -8 8 str_8 str_8 \N \N 0 [] [] [] -9 9 \N \N \N \N 0 [] [] [] -10 10 \N \N \N \N 0 [] [] [] -11 11 \N \N \N \N 0 [] [] [] -12 12 12 \N 12 \N 0 [] [] [] -13 13 str_13 str_13 \N \N 0 [] [] [] -14 14 \N \N \N \N 0 [] [] [] -15 15 [15] \N \N \N 0 [15] [NULL] [NULL] -16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] -17 17 [17] \N \N \N 0 [17] [NULL] [NULL] -alter rename column 2 -3 Array(Dynamic) -4 String -4 UInt64 -7 None -0 0 \N \N \N \N 0 [] [] [] -1 1 \N \N \N \N 0 [] [] [] -2 2 \N \N \N \N 0 [] [] [] -3 3 3 \N 3 \N 0 [] [] [] -4 4 4 \N 4 \N 0 [] [] [] -5 5 5 \N 5 \N 0 [] [] [] -6 6 str_6 str_6 \N \N 0 [] [] [] -7 7 str_7 str_7 \N \N 0 [] [] [] -8 8 str_8 str_8 \N \N 0 [] [] [] -9 9 \N \N \N \N 0 [] [] [] -10 10 \N \N \N \N 0 [] [] [] -11 11 \N \N \N \N 0 [] [] [] -12 12 12 \N 12 \N 0 [] [] [] -13 13 str_13 str_13 \N \N 0 [] [] [] -14 14 \N \N \N \N 0 [] [] [] -15 15 [15] \N \N \N 0 [15] [NULL] [NULL] -16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] -17 17 [17] \N \N \N 0 [17] [NULL] [NULL] diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_2.sh b/tests/queries/0_stateless/03040_dynamic_type_alters_2.sh deleted file mode 100755 index 6491e64372f..00000000000 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_2.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" - -function run() -{ - echo "initial insert" - $CH_CLIENT -q "insert into test select number, number from numbers(3)" - - echo "alter add column" - $CH_CLIENT -q "alter table test add column d Dynamic settings mutations_sync=1" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "insert after alter add column 1" - $CH_CLIENT -q "insert into test select number, number, number from numbers(3, 3)" - $CH_CLIENT -q "insert into test select number, number, 'str_' || toString(number) from numbers(6, 3)" - $CH_CLIENT -q "insert into test select number, number, NULL from numbers(9, 3)" - $CH_CLIENT -q "insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3)" - $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" - $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.Date, d.\`Tuple(a UInt64)\`.a from test order by x" - - echo "alter rename column 1" - $CH_CLIENT -q "alter table test rename column d to d1 settings mutations_sync=1" - $CH_CLIENT -q "select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1)" - $CH_CLIENT -q "select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.\`Tuple(a UInt64)\`.a from test order by x" - - echo "insert nested dynamic" - $CH_CLIENT -q "insert into test select number, number, [number % 2 ? number : 'str_' || toString(number)]::Array(Dynamic) from numbers(15, 3)" - $CH_CLIENT -q "select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1)" - $CH_CLIENT -q "select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.\`Tuple(a UInt64)\`.a, d1.\`Array(Dynamic)\`.UInt64, d1.\`Array(Dynamic)\`.String, d1.\`Array(Dynamic)\`.Date from test order by x" - - echo "alter rename column 2" - $CH_CLIENT -q "alter table test rename column d1 to d2 settings mutations_sync=1" - $CH_CLIENT -q "select count(), dynamicType(d2) from test group by dynamicType(d2) order by count(), dynamicType(d2)" - $CH_CLIENT -q "select x, y, d2, d2.String, d2.UInt64, d2.Date, d2.\`Tuple(a UInt64)\`.a, d2.\`Array(Dynamic)\`.UInt64, d2.\`Array(Dynamic)\`.String, d2.\`Array(Dynamic)\`.Date, from test order by x" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;" -run -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (x UInt64, y UInt64 ) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -run -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.reference new file mode 100644 index 00000000000..a2f2a19805d --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.reference @@ -0,0 +1,90 @@ +initial insert +alter add column +3 None +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 +insert after alter add column 1 +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +alter rename column 1 +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +insert nested dynamic +3 Array(Dynamic) +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 [] [] [] +1 1 \N \N \N \N 0 [] [] [] +2 2 \N \N \N \N 0 [] [] [] +3 3 3 \N 3 \N 0 [] [] [] +4 4 4 \N 4 \N 0 [] [] [] +5 5 5 \N 5 \N 0 [] [] [] +6 6 str_6 str_6 \N \N 0 [] [] [] +7 7 str_7 str_7 \N \N 0 [] [] [] +8 8 str_8 str_8 \N \N 0 [] [] [] +9 9 \N \N \N \N 0 [] [] [] +10 10 \N \N \N \N 0 [] [] [] +11 11 \N \N \N \N 0 [] [] [] +12 12 12 \N 12 \N 0 [] [] [] +13 13 str_13 str_13 \N \N 0 [] [] [] +14 14 \N \N \N \N 0 [] [] [] +15 15 [15] \N \N \N 0 [15] [NULL] [NULL] +16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] +17 17 [17] \N \N \N 0 [17] [NULL] [NULL] +alter rename column 2 +3 Array(Dynamic) +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 [] [] [] +1 1 \N \N \N \N 0 [] [] [] +2 2 \N \N \N \N 0 [] [] [] +3 3 3 \N 3 \N 0 [] [] [] +4 4 4 \N 4 \N 0 [] [] [] +5 5 5 \N 5 \N 0 [] [] [] +6 6 str_6 str_6 \N \N 0 [] [] [] +7 7 str_7 str_7 \N \N 0 [] [] [] +8 8 str_8 str_8 \N \N 0 [] [] [] +9 9 \N \N \N \N 0 [] [] [] +10 10 \N \N \N \N 0 [] [] [] +11 11 \N \N \N \N 0 [] [] [] +12 12 12 \N 12 \N 0 [] [] [] +13 13 str_13 str_13 \N \N 0 [] [] [] +14 14 \N \N \N \N 0 [] [] [] +15 15 [15] \N \N \N 0 [15] [NULL] [NULL] +16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] +17 17 [17] \N \N \N 0 [17] [NULL] [NULL] diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.sql b/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.sql new file mode 100644 index 00000000000..cead110dd7d --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.sql @@ -0,0 +1,39 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000; + +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column'; +alter table test add column d Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter rename column 1'; +alter table test rename column d to d1 settings mutations_sync=1; +select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1); +select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.`Tuple(a UInt64)`.a from test order by x; + +select 'insert nested dynamic'; +insert into test select number, number, [number % 2 ? number : 'str_' || toString(number)]::Array(Dynamic) from numbers(15, 3); +select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1); +select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.`Tuple(a UInt64)`.a, d1.`Array(Dynamic)`.UInt64, d1.`Array(Dynamic)`.String, d1.`Array(Dynamic)`.Date from test order by x; + +select 'alter rename column 2'; +alter table test rename column d1 to d2 settings mutations_sync=1; +select count(), dynamicType(d2) from test group by dynamicType(d2) order by count(), dynamicType(d2); +select x, y, d2, d2.String, d2.UInt64, d2.Date, d2.`Tuple(a UInt64)`.a, d2.`Array(Dynamic)`.UInt64, d2.`Array(Dynamic)`.String, d2.`Array(Dynamic)`.Date, from test order by x; + +drop table test; diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.reference new file mode 100644 index 00000000000..a2f2a19805d --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.reference @@ -0,0 +1,90 @@ +initial insert +alter add column +3 None +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 +insert after alter add column 1 +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +alter rename column 1 +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +insert nested dynamic +3 Array(Dynamic) +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 [] [] [] +1 1 \N \N \N \N 0 [] [] [] +2 2 \N \N \N \N 0 [] [] [] +3 3 3 \N 3 \N 0 [] [] [] +4 4 4 \N 4 \N 0 [] [] [] +5 5 5 \N 5 \N 0 [] [] [] +6 6 str_6 str_6 \N \N 0 [] [] [] +7 7 str_7 str_7 \N \N 0 [] [] [] +8 8 str_8 str_8 \N \N 0 [] [] [] +9 9 \N \N \N \N 0 [] [] [] +10 10 \N \N \N \N 0 [] [] [] +11 11 \N \N \N \N 0 [] [] [] +12 12 12 \N 12 \N 0 [] [] [] +13 13 str_13 str_13 \N \N 0 [] [] [] +14 14 \N \N \N \N 0 [] [] [] +15 15 [15] \N \N \N 0 [15] [NULL] [NULL] +16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] +17 17 [17] \N \N \N 0 [17] [NULL] [NULL] +alter rename column 2 +3 Array(Dynamic) +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 [] [] [] +1 1 \N \N \N \N 0 [] [] [] +2 2 \N \N \N \N 0 [] [] [] +3 3 3 \N 3 \N 0 [] [] [] +4 4 4 \N 4 \N 0 [] [] [] +5 5 5 \N 5 \N 0 [] [] [] +6 6 str_6 str_6 \N \N 0 [] [] [] +7 7 str_7 str_7 \N \N 0 [] [] [] +8 8 str_8 str_8 \N \N 0 [] [] [] +9 9 \N \N \N \N 0 [] [] [] +10 10 \N \N \N \N 0 [] [] [] +11 11 \N \N \N \N 0 [] [] [] +12 12 12 \N 12 \N 0 [] [] [] +13 13 str_13 str_13 \N \N 0 [] [] [] +14 14 \N \N \N \N 0 [] [] [] +15 15 [15] \N \N \N 0 [15] [NULL] [NULL] +16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] +17 17 [17] \N \N \N 0 [17] [NULL] [NULL] diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.sql b/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.sql new file mode 100644 index 00000000000..f58599b1d61 --- /dev/null +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.sql @@ -0,0 +1,39 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column'; +alter table test add column d Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter rename column 1'; +alter table test rename column d to d1 settings mutations_sync=1; +select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1); +select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.`Tuple(a UInt64)`.a from test order by x; + +select 'insert nested dynamic'; +insert into test select number, number, [number % 2 ? number : 'str_' || toString(number)]::Array(Dynamic) from numbers(15, 3); +select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1); +select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.`Tuple(a UInt64)`.a, d1.`Array(Dynamic)`.UInt64, d1.`Array(Dynamic)`.String, d1.`Array(Dynamic)`.Date from test order by x; + +select 'alter rename column 2'; +alter table test rename column d1 to d2 settings mutations_sync=1; +select count(), dynamicType(d2) from test group by dynamicType(d2) order by count(), dynamicType(d2); +select x, y, d2, d2.String, d2.UInt64, d2.Date, d2.`Tuple(a UInt64)`.a, d2.`Array(Dynamic)`.UInt64, d2.`Array(Dynamic)`.String, d2.`Array(Dynamic)`.Date, from test order by x; + +drop table test; From 6a3f49ada4d8319edf34fdf91d5eee007df48e26 Mon Sep 17 00:00:00 2001 From: Linh Giang <165205637+linhgiang24@users.noreply.github.com> Date: Tue, 23 Jul 2024 13:23:35 -0600 Subject: [PATCH 0758/2361] Included "ON CLUSTER cluster_name" syntax for RELOAD DICTIONARIES command Included "ON CLUSTER cluster_name" syntax for RELOAD DICTIONARY and RELOAD DICTIONARIES commands to ensure execution on all replicas. Also to conform with the rest of the document. --- docs/en/sql-reference/statements/system.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index e6d3439d2b9..f4780fd41c1 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -18,6 +18,12 @@ Reloads all dictionaries that have been successfully loaded before. By default, dictionaries are loaded lazily (see [dictionaries_lazy_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED). Always returns `Ok.` regardless of the result of the dictionary update. +**Syntax** + +```sql +SYSTEM RELOAD DICTIONARIES [ON CLUSTER cluster_name] +``` + ## RELOAD DICTIONARY Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT_LOADED / FAILED). @@ -25,6 +31,8 @@ Always returns `Ok.` regardless of the result of updating the dictionary. The status of the dictionary can be checked by querying the `system.dictionaries` table. ``` sql +SYSTEM RELOAD DICTIONARY [ON CLUSTER cluster_name] dictionary_name + SELECT name, status FROM system.dictionaries; ``` From 190421aafa83bf54c3921ae17c91cbad8c28bd1d Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 23 Jul 2024 20:38:54 +0100 Subject: [PATCH 0759/2361] impl --- .../01508_partition_pruning_long.sh | 30 ----- ... 01508_partition_pruning_long_1.reference} | 119 ------------------ .../01508_partition_pruning_long_1.sh | 79 ++++++++++++ .../01508_partition_pruning_long_2.reference | 119 ++++++++++++++++++ ...ries => 01508_partition_pruning_long_2.sh} | 80 +++++------- 5 files changed, 228 insertions(+), 199 deletions(-) delete mode 100755 tests/queries/0_stateless/01508_partition_pruning_long.sh rename tests/queries/0_stateless/{01508_partition_pruning_long.reference => 01508_partition_pruning_long_1.reference} (50%) create mode 100755 tests/queries/0_stateless/01508_partition_pruning_long_1.sh create mode 100644 tests/queries/0_stateless/01508_partition_pruning_long_2.reference rename tests/queries/0_stateless/{01508_partition_pruning_long.queries => 01508_partition_pruning_long_2.sh} (58%) mode change 100644 => 100755 diff --git a/tests/queries/0_stateless/01508_partition_pruning_long.sh b/tests/queries/0_stateless/01508_partition_pruning_long.sh deleted file mode 100755 index 7b56d8bbf03..00000000000 --- a/tests/queries/0_stateless/01508_partition_pruning_long.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# Tags: long, no-polymorphic-parts, no-random-settings, no-random-merge-tree-settings, no-debug - -# Description of test result: -# Test the correctness of the partition pruning -# -# Script executes queries from a file 01508_partition_pruning_long.queries (1 line = 1 query) -# Queries are started with 'select' (but NOT with 'SELECT') are executed with log_level=debug - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - - -queries="${CURDIR}/01508_partition_pruning_long.queries" -while IFS= read -r sql -do - [ -z "$sql" ] && continue - if [[ "$sql" == select* ]] ; - then - echo "$sql" - ${CLICKHOUSE_CLIENT} --query "$sql" - CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') - ${CLICKHOUSE_CLIENT} --query "$sql" 2>&1 | grep -oh "Selected .* parts by partition key, *. parts by primary key, .* marks by primary key, .* marks to read from .* ranges.*$" - CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/--send_logs_level=debug/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/g') - echo "" - else - ${CLICKHOUSE_CLIENT} --query "$sql" - fi -done < "$queries" diff --git a/tests/queries/0_stateless/01508_partition_pruning_long.reference b/tests/queries/0_stateless/01508_partition_pruning_long_1.reference similarity index 50% rename from tests/queries/0_stateless/01508_partition_pruning_long.reference rename to tests/queries/0_stateless/01508_partition_pruning_long_1.reference index afdb4257505..3ea4cc4f6ee 100644 --- a/tests/queries/0_stateless/01508_partition_pruning_long.reference +++ b/tests/queries/0_stateless/01508_partition_pruning_long_1.reference @@ -123,122 +123,3 @@ select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 2 2 20000 Selected 2/3 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges ---------- tDD ---------------------------- -select uniqExact(_part), count() from tDD where toDate(d)=toDate('2020-09-24'); -1 10000 -Selected 1/4 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges - -select uniqExact(_part), count() FROM tDD WHERE toDate(d) = toDate('2020-09-24'); -1 10000 -Selected 1/4 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges - -select uniqExact(_part), count() FROM tDD WHERE toDate(d) = '2020-09-24'; -1 10000 -Selected 1/4 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges - -select uniqExact(_part), count() FROM tDD WHERE toDate(d) >= '2020-09-23' and toDate(d) <= '2020-09-26'; -3 40000 -Selected 3/4 parts by partition key, 3 parts by primary key, 4/4 marks by primary key, 4 marks to read from 3 ranges - -select uniqExact(_part), count() FROM tDD WHERE toYYYYMMDD(d) >= 20200923 and toDate(d) <= '2020-09-26'; -3 40000 -Selected 3/4 parts by partition key, 3 parts by primary key, 4/4 marks by primary key, 4 marks to read from 3 ranges - ---------- sDD ---------------------------- -select uniqExact(_part), count() from sDD; -6 30000 -Selected 6/6 parts by partition key, 6 parts by primary key, 6/6 marks by primary key, 6 marks to read from 6 ranges - -select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC')-1)+1 = 202010; -3 9999 -Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges - -select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC')-1) = 202010; -2 9999 -Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC')-1) = 202110; -0 0 -Selected 0/6 parts by partition key, 0 parts by primary key, 0/0 marks by primary key, 0 marks to read from 0 ranges - -select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC'))+1 > 202009 and toStartOfDay(toDateTime(intDiv(d,1000),'UTC')) < toDateTime('2020-10-02 00:00:00','UTC'); -3 11440 -Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges - -select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC'))+1 > 202009 and toDateTime(intDiv(d,1000),'UTC') < toDateTime('2020-10-01 00:00:00','UTC'); -2 10000 -Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from sDD where d >= 1598918400000; -4 20000 -Selected 4/6 parts by partition key, 4 parts by primary key, 4/4 marks by primary key, 4 marks to read from 4 ranges - -select uniqExact(_part), count() from sDD where d >= 1598918400000 and toYYYYMM(toDateTime(intDiv(d,1000),'UTC')-1) < 202010; -3 10001 -Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges - ---------- xMM ---------------------------- -select uniqExact(_part), count() from xMM where toStartOfDay(d) >= '2020-10-01 00:00:00'; -2 10000 -Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00'; -3 10001 -Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges - -select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-10-01 00:00:00'; -2 10000 -Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00' and a=1; -1 1 -Selected 1/6 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges - -select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00' and a<>3; -2 5001 -Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-10-01 00:00:00' and a<>3; -1 5000 -Selected 1/6 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges - -select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-11-01 00:00:00' and a = 1; -2 10000 -Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from xMM where a = 1; -3 15000 -Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges - -select uniqExact(_part), count() from xMM where a = 66; -0 0 -Selected 0/6 parts by partition key, 0 parts by primary key, 0/0 marks by primary key, 0 marks to read from 0 ranges - -select uniqExact(_part), count() from xMM where a <> 66; -6 30000 -Selected 6/6 parts by partition key, 6 parts by primary key, 6/6 marks by primary key, 6 marks to read from 6 ranges - -select uniqExact(_part), count() from xMM where a = 2; -2 10000 -Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from xMM where a = 1; -2 15000 -Selected 2/5 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from xMM where toStartOfDay(d) >= '2020-10-01 00:00:00'; -1 10000 -Selected 1/5 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges - -select uniqExact(_part), count() from xMM where a <> 66; -5 30000 -Selected 5/5 parts by partition key, 5 parts by primary key, 5/5 marks by primary key, 5 marks to read from 5 ranges - -select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00' and a<>3; -2 5001 -Selected 2/5 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges - -select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-10-01 00:00:00' and a<>3; -1 5000 -Selected 1/5 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges - diff --git a/tests/queries/0_stateless/01508_partition_pruning_long_1.sh b/tests/queries/0_stateless/01508_partition_pruning_long_1.sh new file mode 100755 index 00000000000..512cf8f5265 --- /dev/null +++ b/tests/queries/0_stateless/01508_partition_pruning_long_1.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# Tags: long, no-polymorphic-parts, no-random-settings, no-random-merge-tree-settings, no-debug + +# Description of test result: +# Test the correctness of the partition pruning +# +# Script executes queries from a file 01508_partition_pruning_long.queries (1 line = 1 query) +# Queries are started with 'select' (but NOT with 'SELECT') are executed with log_level=debug + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +while IFS= read -r sql +do + [ -z "$sql" ] && continue + if [[ "$sql" == select* ]] ; + then + echo "$sql" + ${CLICKHOUSE_CLIENT} --query "$sql" + CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') + ${CLICKHOUSE_CLIENT} --query "$sql" 2>&1 | grep -oh "Selected .* parts by partition key, *. parts by primary key, .* marks by primary key, .* marks to read from .* ranges.*$" + CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/--send_logs_level=debug/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/g') + echo "" + else + ${CLICKHOUSE_CLIENT} --query "$sql" + fi +done <<< " +DROP TABLE IF EXISTS tMM; + +CREATE TABLE tMM(d DateTime('Asia/Istanbul'), a Int64) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY tuple() SETTINGS index_granularity = 8192; +SYSTEM STOP MERGES tMM; +INSERT INTO tMM SELECT toDateTime('2020-08-16 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); +INSERT INTO tMM SELECT toDateTime('2020-08-16 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); +INSERT INTO tMM SELECT toDateTime('2020-09-01 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); +INSERT INTO tMM SELECT toDateTime('2020-09-01 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); +INSERT INTO tMM SELECT toDateTime('2020-10-01 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); +INSERT INTO tMM SELECT toDateTime('2020-10-15 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); + +SELECT '--------- tMM ----------------------------'; +select uniqExact(_part), count() from tMM where toDate(d)=toDate('2020-09-15'); +select uniqExact(_part), count() from tMM where toDate(d)=toDate('2020-09-01'); +select uniqExact(_part), count() from tMM where toDate(d)=toDate('2020-10-15'); +select uniqExact(_part), count() from tMM where toDate(d)='2020-09-15'; +select uniqExact(_part), count() from tMM where toYYYYMM(d)=202009; +select uniqExact(_part), count() from tMM where toYYYYMMDD(d)=20200816; +select uniqExact(_part), count() from tMM where toYYYYMMDD(d)=20201015; +select uniqExact(_part), count() from tMM where toDate(d)='2020-10-15'; +select uniqExact(_part), count() from tMM where d >= '2020-09-01 00:00:00' and d<'2020-10-15 00:00:00'; +select uniqExact(_part), count() from tMM where d >= '2020-01-16 00:00:00' and d < toDateTime('2021-08-17 00:00:00', 'Asia/Istanbul'); +select uniqExact(_part), count() from tMM where d >= '2020-09-16 00:00:00' and d < toDateTime('2020-10-01 00:00:00', 'Asia/Istanbul'); +select uniqExact(_part), count() from tMM where d >= '2020-09-12 00:00:00' and d < '2020-10-16 00:00:00'; +select uniqExact(_part), count() from tMM where toStartOfDay(d) >= '2020-09-12 00:00:00'; +select uniqExact(_part), count() from tMM where toStartOfDay(d) = '2020-09-01 00:00:00'; +select uniqExact(_part), count() from tMM where toStartOfDay(d) = '2020-10-01 00:00:00'; +select uniqExact(_part), count() from tMM where toStartOfDay(d) >= '2020-09-15 00:00:00' and d < '2020-10-16 00:00:00'; +select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 202010; +select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 202009; +select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 202010 and toStartOfDay(d) = '2020-10-01 00:00:00'; +select uniqExact(_part), count() from tMM where toYYYYMM(d) >= 202009 and toStartOfDay(d) < '2020-10-02 00:00:00'; +select uniqExact(_part), count() from tMM where toYYYYMM(d) > 202009 and toStartOfDay(d) < '2020-10-02 00:00:00'; +select uniqExact(_part), count() from tMM where toYYYYMM(d)+1 > 202009 and toStartOfDay(d) < '2020-10-02 00:00:00'; +select uniqExact(_part), count() from tMM where toYYYYMM(d)+1 > 202010 and toStartOfDay(d) < '2020-10-02 00:00:00'; +select uniqExact(_part), count() from tMM where toYYYYMM(d)+1 > 202010; +select uniqExact(_part), count() from tMM where toYYYYMM(d-1)+1 = 202010; +select uniqExact(_part), count() from tMM where toStartOfMonth(d) >= '2020-09-15'; +select uniqExact(_part), count() from tMM where toStartOfMonth(d) >= '2020-09-01'; +select uniqExact(_part), count() from tMM where toStartOfMonth(d) >= '2020-09-01' and toStartOfMonth(d) < '2020-10-01'; + +SYSTEM START MERGES tMM; +OPTIMIZE TABLE tMM FINAL; + +select uniqExact(_part), count() from tMM where toYYYYMM(d-1)+1 = 202010; +select uniqExact(_part), count() from tMM where toYYYYMM(d)+1 > 202010; +select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 202010; + +DROP TABLE tMM; +" diff --git a/tests/queries/0_stateless/01508_partition_pruning_long_2.reference b/tests/queries/0_stateless/01508_partition_pruning_long_2.reference new file mode 100644 index 00000000000..bc767f17031 --- /dev/null +++ b/tests/queries/0_stateless/01508_partition_pruning_long_2.reference @@ -0,0 +1,119 @@ +--------- tDD ---------------------------- +select uniqExact(_part), count() from tDD where toDate(d)=toDate('2020-09-24'); +1 10000 +Selected 1/4 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges + +select uniqExact(_part), count() FROM tDD WHERE toDate(d) = toDate('2020-09-24'); +1 10000 +Selected 1/4 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges + +select uniqExact(_part), count() FROM tDD WHERE toDate(d) = '2020-09-24'; +1 10000 +Selected 1/4 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges + +select uniqExact(_part), count() FROM tDD WHERE toDate(d) >= '2020-09-23' and toDate(d) <= '2020-09-26'; +3 40000 +Selected 3/4 parts by partition key, 3 parts by primary key, 4/4 marks by primary key, 4 marks to read from 3 ranges + +select uniqExact(_part), count() FROM tDD WHERE toYYYYMMDD(d) >= 20200923 and toDate(d) <= '2020-09-26'; +3 40000 +Selected 3/4 parts by partition key, 3 parts by primary key, 4/4 marks by primary key, 4 marks to read from 3 ranges + +--------- sDD ---------------------------- +select uniqExact(_part), count() from sDD; +6 30000 +Selected 6/6 parts by partition key, 6 parts by primary key, 6/6 marks by primary key, 6 marks to read from 6 ranges + +select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC')-1)+1 = 202010; +3 9999 +Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges + +select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC')-1) = 202010; +2 9999 +Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC')-1) = 202110; +0 0 +Selected 0/6 parts by partition key, 0 parts by primary key, 0/0 marks by primary key, 0 marks to read from 0 ranges + +select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC'))+1 > 202009 and toStartOfDay(toDateTime(intDiv(d,1000),'UTC')) < toDateTime('2020-10-02 00:00:00','UTC'); +3 11440 +Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges + +select uniqExact(_part), count() from sDD where toYYYYMM(toDateTime(intDiv(d,1000),'UTC'))+1 > 202009 and toDateTime(intDiv(d,1000),'UTC') < toDateTime('2020-10-01 00:00:00','UTC'); +2 10000 +Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from sDD where d >= 1598918400000; +4 20000 +Selected 4/6 parts by partition key, 4 parts by primary key, 4/4 marks by primary key, 4 marks to read from 4 ranges + +select uniqExact(_part), count() from sDD where d >= 1598918400000 and toYYYYMM(toDateTime(intDiv(d,1000),'UTC')-1) < 202010; +3 10001 +Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges + +--------- xMM ---------------------------- +select uniqExact(_part), count() from xMM where toStartOfDay(d) >= '2020-10-01 00:00:00'; +2 10000 +Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00'; +3 10001 +Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges + +select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-10-01 00:00:00'; +2 10000 +Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00' and a=1; +1 1 +Selected 1/6 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges + +select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00' and a<>3; +2 5001 +Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-10-01 00:00:00' and a<>3; +1 5000 +Selected 1/6 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges + +select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-11-01 00:00:00' and a = 1; +2 10000 +Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from xMM where a = 1; +3 15000 +Selected 3/6 parts by partition key, 3 parts by primary key, 3/3 marks by primary key, 3 marks to read from 3 ranges + +select uniqExact(_part), count() from xMM where a = 66; +0 0 +Selected 0/6 parts by partition key, 0 parts by primary key, 0/0 marks by primary key, 0 marks to read from 0 ranges + +select uniqExact(_part), count() from xMM where a <> 66; +6 30000 +Selected 6/6 parts by partition key, 6 parts by primary key, 6/6 marks by primary key, 6 marks to read from 6 ranges + +select uniqExact(_part), count() from xMM where a = 2; +2 10000 +Selected 2/6 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from xMM where a = 1; +2 15000 +Selected 2/5 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from xMM where toStartOfDay(d) >= '2020-10-01 00:00:00'; +1 10000 +Selected 1/5 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges + +select uniqExact(_part), count() from xMM where a <> 66; +5 30000 +Selected 5/5 parts by partition key, 5 parts by primary key, 5/5 marks by primary key, 5 marks to read from 5 ranges + +select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00' and a<>3; +2 5001 +Selected 2/5 parts by partition key, 2 parts by primary key, 2/2 marks by primary key, 2 marks to read from 2 ranges + +select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-10-01 00:00:00' and a<>3; +1 5000 +Selected 1/5 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges + diff --git a/tests/queries/0_stateless/01508_partition_pruning_long.queries b/tests/queries/0_stateless/01508_partition_pruning_long_2.sh old mode 100644 new mode 100755 similarity index 58% rename from tests/queries/0_stateless/01508_partition_pruning_long.queries rename to tests/queries/0_stateless/01508_partition_pruning_long_2.sh index 0d64fc05f0f..3f8a89bdb20 --- a/tests/queries/0_stateless/01508_partition_pruning_long.queries +++ b/tests/queries/0_stateless/01508_partition_pruning_long_2.sh @@ -1,15 +1,35 @@ -DROP TABLE IF EXISTS tMM; +#!/usr/bin/env bash +# Tags: long, no-polymorphic-parts, no-random-settings, no-random-merge-tree-settings, no-debug + +# Description of test result: +# Test the correctness of the partition pruning +# +# Script executes queries from a file 01508_partition_pruning_long.queries (1 line = 1 query) +# Queries are started with 'select' (but NOT with 'SELECT') are executed with log_level=debug + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +while IFS= read -r sql +do + [ -z "$sql" ] && continue + if [[ "$sql" == select* ]] ; + then + echo "$sql" + ${CLICKHOUSE_CLIENT} --query "$sql" + CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') + ${CLICKHOUSE_CLIENT} --query "$sql" 2>&1 | grep -oh "Selected .* parts by partition key, *. parts by primary key, .* marks by primary key, .* marks to read from .* ranges.*$" + CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/--send_logs_level=debug/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/g') + echo "" + else + ${CLICKHOUSE_CLIENT} --query "$sql" + fi +done <<< " DROP TABLE IF EXISTS tDD; DROP TABLE IF EXISTS sDD; DROP TABLE IF EXISTS xMM; -CREATE TABLE tMM(d DateTime('Asia/Istanbul'), a Int64) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY tuple() SETTINGS index_granularity = 8192; -SYSTEM STOP MERGES tMM; -INSERT INTO tMM SELECT toDateTime('2020-08-16 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); -INSERT INTO tMM SELECT toDateTime('2020-08-16 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); -INSERT INTO tMM SELECT toDateTime('2020-09-01 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); -INSERT INTO tMM SELECT toDateTime('2020-09-01 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); -INSERT INTO tMM SELECT toDateTime('2020-10-01 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); -INSERT INTO tMM SELECT toDateTime('2020-10-15 00:00:00', 'Asia/Istanbul') + number*60, number FROM numbers(5000); CREATE TABLE tDD(d DateTime('Asia/Istanbul'),a Int) ENGINE = MergeTree PARTITION BY toYYYYMMDD(d) ORDER BY tuple() SETTINGS index_granularity = 8192; SYSTEM STOP MERGES tDD; @@ -34,44 +54,6 @@ INSERT INTO xMM SELECT toDateTime('2020-10-01 00:00:00', 'Asia/Istanbul') + numb INSERT INTO xMM SELECT toDateTime('2020-10-15 00:00:00', 'Asia/Istanbul') + number*60, 1, number FROM numbers(5000); -SELECT '--------- tMM ----------------------------'; -select uniqExact(_part), count() from tMM where toDate(d)=toDate('2020-09-15'); -select uniqExact(_part), count() from tMM where toDate(d)=toDate('2020-09-01'); -select uniqExact(_part), count() from tMM where toDate(d)=toDate('2020-10-15'); -select uniqExact(_part), count() from tMM where toDate(d)='2020-09-15'; -select uniqExact(_part), count() from tMM where toYYYYMM(d)=202009; -select uniqExact(_part), count() from tMM where toYYYYMMDD(d)=20200816; -select uniqExact(_part), count() from tMM where toYYYYMMDD(d)=20201015; -select uniqExact(_part), count() from tMM where toDate(d)='2020-10-15'; -select uniqExact(_part), count() from tMM where d >= '2020-09-01 00:00:00' and d<'2020-10-15 00:00:00'; -select uniqExact(_part), count() from tMM where d >= '2020-01-16 00:00:00' and d < toDateTime('2021-08-17 00:00:00', 'Asia/Istanbul'); -select uniqExact(_part), count() from tMM where d >= '2020-09-16 00:00:00' and d < toDateTime('2020-10-01 00:00:00', 'Asia/Istanbul'); -select uniqExact(_part), count() from tMM where d >= '2020-09-12 00:00:00' and d < '2020-10-16 00:00:00'; -select uniqExact(_part), count() from tMM where toStartOfDay(d) >= '2020-09-12 00:00:00'; -select uniqExact(_part), count() from tMM where toStartOfDay(d) = '2020-09-01 00:00:00'; -select uniqExact(_part), count() from tMM where toStartOfDay(d) = '2020-10-01 00:00:00'; -select uniqExact(_part), count() from tMM where toStartOfDay(d) >= '2020-09-15 00:00:00' and d < '2020-10-16 00:00:00'; -select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 202010; -select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 202009; -select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 202010 and toStartOfDay(d) = '2020-10-01 00:00:00'; -select uniqExact(_part), count() from tMM where toYYYYMM(d) >= 202009 and toStartOfDay(d) < '2020-10-02 00:00:00'; -select uniqExact(_part), count() from tMM where toYYYYMM(d) > 202009 and toStartOfDay(d) < '2020-10-02 00:00:00'; -select uniqExact(_part), count() from tMM where toYYYYMM(d)+1 > 202009 and toStartOfDay(d) < '2020-10-02 00:00:00'; -select uniqExact(_part), count() from tMM where toYYYYMM(d)+1 > 202010 and toStartOfDay(d) < '2020-10-02 00:00:00'; -select uniqExact(_part), count() from tMM where toYYYYMM(d)+1 > 202010; -select uniqExact(_part), count() from tMM where toYYYYMM(d-1)+1 = 202010; -select uniqExact(_part), count() from tMM where toStartOfMonth(d) >= '2020-09-15'; -select uniqExact(_part), count() from tMM where toStartOfMonth(d) >= '2020-09-01'; -select uniqExact(_part), count() from tMM where toStartOfMonth(d) >= '2020-09-01' and toStartOfMonth(d) < '2020-10-01'; - -SYSTEM START MERGES tMM; -OPTIMIZE TABLE tMM FINAL; - -select uniqExact(_part), count() from tMM where toYYYYMM(d-1)+1 = 202010; -select uniqExact(_part), count() from tMM where toYYYYMM(d)+1 > 202010; -select uniqExact(_part), count() from tMM where toYYYYMM(d) between 202009 and 202010; - - SELECT '--------- tDD ----------------------------'; SYSTEM START MERGES tDD; OPTIMIZE TABLE tDD FINAL; @@ -116,9 +98,7 @@ select uniqExact(_part), count() from xMM where a <> 66; select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d <= '2020-10-01 00:00:00' and a<>3; select uniqExact(_part), count() from xMM where d >= '2020-09-01 00:00:00' and d < '2020-10-01 00:00:00' and a<>3; -DROP TABLE tMM; DROP TABLE tDD; DROP TABLE sDD; DROP TABLE xMM; - - +" From beed31a7e38a6484aec156754106570a34dcb5a9 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 23 Jul 2024 20:07:47 +0000 Subject: [PATCH 0760/2361] Fix use-of-uninitialized-value in JSONExtract* numeric functions --- src/Functions/FunctionsJSON.cpp | 3 ++- .../03209_functions_json_msan_fuzzer_issue.reference | 1 + .../0_stateless/03209_functions_json_msan_fuzzer_issue.sql | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03209_functions_json_msan_fuzzer_issue.reference create mode 100644 tests/queries/0_stateless/03209_functions_json_msan_fuzzer_issue.sql diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index 47040545677..848856c500f 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -739,7 +739,8 @@ public: { NumberType value; - tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, error); + if (!tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, error)) + return false; auto & col_vec = assert_cast &>(dest); col_vec.insertValue(value); return true; diff --git a/tests/queries/0_stateless/03209_functions_json_msan_fuzzer_issue.reference b/tests/queries/0_stateless/03209_functions_json_msan_fuzzer_issue.reference new file mode 100644 index 00000000000..e02f3666d40 --- /dev/null +++ b/tests/queries/0_stateless/03209_functions_json_msan_fuzzer_issue.reference @@ -0,0 +1 @@ +0 0 0 1.1 diff --git a/tests/queries/0_stateless/03209_functions_json_msan_fuzzer_issue.sql b/tests/queries/0_stateless/03209_functions_json_msan_fuzzer_issue.sql new file mode 100644 index 00000000000..a05b07d5971 --- /dev/null +++ b/tests/queries/0_stateless/03209_functions_json_msan_fuzzer_issue.sql @@ -0,0 +1,2 @@ +WITH '{ "v":1.1}' AS raw SELECT JSONExtract(raw, 'float') AS float32_1, JSONExtract(concat(tuple('1970-01-05', 10, materialize(10), 10, 10, 10, toUInt256(10), 10, toNullable(10), 10, 10), materialize(toUInt256(0)), ', ', 2, 2, toLowCardinality(toLowCardinality(2))), raw, toLowCardinality('v'), 'Float32') AS float32_2, JSONExtractFloat(raw) AS float64_1, JSONExtract(raw, 'v', 'double') AS float64_2; + From 841012698dda51d20b3162c8a6f44fc3e870d608 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 23 Jul 2024 21:47:20 +0000 Subject: [PATCH 0761/2361] Add brief docs to `StorageKafka2` --- src/Storages/Kafka/KafkaConsumer2.cpp | 23 ----------------------- src/Storages/Kafka/KafkaConsumer2.h | 9 ++++----- src/Storages/Kafka/StorageKafka2.h | 22 +++++++++++++++++----- 3 files changed, 21 insertions(+), 33 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index aaefd6fd6f5..8659465a805 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -173,29 +173,6 @@ void KafkaConsumer2::pollEvents() } }; -KafkaConsumer2::TopicPartitionCounts KafkaConsumer2::getPartitionCounts() const -{ - TopicPartitionCounts result; - try - { - auto metadata = consumer->get_metadata(); - auto topic_metadatas = metadata.get_topics(); - - for (auto & topic_metadata : topic_metadatas) - { - if (const auto it = std::find(topics.begin(), topics.end(), topic_metadata.get_name()); it != topics.end()) - { - result.push_back({topic_metadata.get_name(), topic_metadata.get_partitions().size()}); - } - } - } - catch (cppkafka::HandleException & e) - { - chassert(e.what() != nullptr); - } - return result; -} - bool KafkaConsumer2::polledDataUnusable(const TopicPartition & topic_partition) const { const auto different_topic_partition = current == messages.end() diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index 57b157416c6..3c91df8a02f 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -81,10 +81,10 @@ public: ~KafkaConsumer2(); + // Poll only the main consumer queue without any topic-partition queues. This is useful to get notified about events, such as rebalance, + // new assignment, etc.. void pollEvents(); - TopicPartitionCounts getPartitionCounts() const; - auto pollTimeout() const { return poll_timeout; } inline bool hasMorePolledMessages() const { return (stalled_status == StalledStatus::NOT_STALLED) && (current != messages.end()); } @@ -96,12 +96,11 @@ public: TopicPartitions const * getKafkaAssignment() const; // As the main source of offsets is not Kafka, the offsets needs to be pushed to the consumer from outside + // Returns true if it received new assignment and internal state should be updated by updateOffsets bool needsOffsetUpdate() const { return needs_offset_update; } - - // Returns true if it received new assignment and could update the internal state accordingly, false otherwise void updateOffsets(const TopicPartitions & topic_partitions); - /// Polls batch of messages from Kafka and returns read buffer containing the next message or + /// Polls batch of messages from the given topic-partition and returns read buffer containing the next message or /// nullptr when there are no messages to process. ReadBufferPtr consume(const TopicPartition & topic_partition, const std::optional & message_count); diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index d6e564b76f5..834125d52b7 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -34,9 +34,21 @@ struct StorageKafkaInterceptors; using KafkaConsumer2Ptr = std::shared_ptr; -/** Implements a Kafka queue table engine that can be used as a persistent queue / buffer, - * or as a basic building block for creating pipelines with a continuous insertion / ETL. - */ +/// Implements a Kafka queue table engine that can be used as a persistent queue / buffer, +/// or as a basic building block for creating pipelines with a continuous insertion / ETL. +/// +/// It is similar to the already existing StorageKafka, it instead of storing the offsets +/// in Kafka, its main source of information about offsets is Keeper. On top of the +/// offsets, it also stores the number of messages (intent size) it tried to insert from +/// each topic. By storing the intent sizes it possible to retry the same batch of +/// messages in case of any errors and giving deduplication a chance to deduplicate +/// blocks. +/// +/// To not complicate things too much, the current implementation makes sure to fetch +/// messages only from a single topic-partition on a single thread at a time by +/// manipulating the queues of librdkafka. By pulling from multiple topic-partitions +/// the order of messages are not guaranteed, therefore they would have different +/// hashes for deduplication. class StorageKafka2 final : public IStorage, WithContext { using StorageKafkaInterceptors = StorageKafkaInterceptors; @@ -97,7 +109,7 @@ private: struct ConsumerAndAssignmentInfo { - KafkaConsumer2Ptr consumer; /// available consumers + KafkaConsumer2Ptr consumer; size_t consume_from_topic_partition_index{0}; TopicPartitions topic_partitions{}; zkutil::ZooKeeperPtr keeper; @@ -204,7 +216,7 @@ private: void createReplica(); void dropReplica(); - // Takes lock over topic partitions and set's the committed offset in topic_partitions + // Takes lock over topic partitions and sets the committed offset in topic_partitions. std::optional lockTopicPartitions(zkutil::ZooKeeper & keeper_to_use, const TopicPartitions & topic_partitions); void saveCommittedOffset(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition); void saveIntent(zkutil::ZooKeeper & keeper_to_use, const TopicPartition & topic_partition, int64_t intent); From 7a6233340e5a141a43e3a1ac7a1d33ea1c4d5a8b Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 23 Jul 2024 21:57:01 +0000 Subject: [PATCH 0762/2361] Hardening of test_backup_restore_new test --- .../test_backup_restore_new/test.py | 46 ++++++++++++++----- 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index d8662fad011..5cbabe35b03 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -4,6 +4,7 @@ import re import random import os.path import sys +import uuid from collections import namedtuple from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, TSV @@ -538,7 +539,8 @@ def test_backup_not_found_or_already_exists(): def test_file_engine(): - backup_name = f"File('/backups/file/')" + id = uuid.uuid4() + backup_name = f"File('/backups/file/{id}/')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -549,6 +551,7 @@ def test_file_engine(): instance.query(f"RESTORE TABLE test.table FROM {backup_name}") assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query("DROP TABLE test.table") def test_database(): @@ -565,7 +568,8 @@ def test_database(): def test_zip_archive(): - backup_name = f"Disk('backups', 'archive.zip')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_{id}.zip')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -578,10 +582,12 @@ def test_zip_archive(): instance.query(f"RESTORE TABLE test.table FROM {backup_name}") assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query("DROP TABLE test.table") def test_zip_archive_with_settings(): - backup_name = f"Disk('backups', 'archive_with_settings.zip')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_with_settings_{id}.zip')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -596,10 +602,12 @@ def test_zip_archive_with_settings(): f"RESTORE TABLE test.table FROM {backup_name} SETTINGS password='qwerty'" ) assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query("DROP TABLE test.table") def test_zip_archive_with_bad_compression_method(): - backup_name = f"Disk('backups', 'archive_with_bad_compression_method.zip')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_with_bad_compression_method_{id}.zip')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -617,7 +625,8 @@ def test_zip_archive_with_bad_compression_method(): def test_tar_archive(): - backup_name = f"Disk('backups', 'archive.tar')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_{id}.tar')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -630,10 +639,12 @@ def test_tar_archive(): instance.query(f"RESTORE TABLE test.table FROM {backup_name}") assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query("DROP TABLE test.table") def test_tar_bz2_archive(): - backup_name = f"Disk('backups', 'archive.tar.bz2')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_{id}.tar.bz2')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -649,7 +660,8 @@ def test_tar_bz2_archive(): def test_tar_gz_archive(): - backup_name = f"Disk('backups', 'archive.tar.gz')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_{id}.tar.gz')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -665,7 +677,8 @@ def test_tar_gz_archive(): def test_tar_lzma_archive(): - backup_name = f"Disk('backups', 'archive.tar.lzma')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_{id}.tar.lzma')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -681,7 +694,8 @@ def test_tar_lzma_archive(): def test_tar_zst_archive(): - backup_name = f"Disk('backups', 'archive.tar.zst')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_{id}.tar.zst')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -697,7 +711,8 @@ def test_tar_zst_archive(): def test_tar_xz_archive(): - backup_name = f"Disk('backups', 'archive.tar.xz')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_{id}.tar.xz')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -713,7 +728,8 @@ def test_tar_xz_archive(): def test_tar_archive_with_password(): - backup_name = f"Disk('backups', 'archive_with_password.tar')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_with_password_{id}.tar')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -731,7 +747,8 @@ def test_tar_archive_with_password(): def test_tar_archive_with_bad_compression_method(): - backup_name = f"Disk('backups', 'archive_with_bad_compression_method.tar')" + id = uuid.uuid4() + backup_name = f"Disk('backups', 'archive_with_bad_compression_method_{id}.tar')" create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -1220,6 +1237,9 @@ def test_system_users_required_privileges(): assert instance.query("SHOW CREATE ROLE r1") == "CREATE ROLE r1\n" assert instance.query("SHOW GRANTS FOR r1") == "" + instance.query("DROP USER u1") + instance.query("DROP ROLE r1") + def test_system_users_async(): instance.query("CREATE USER u1 IDENTIFIED BY 'qwe123' SETTINGS custom_c = 3") @@ -1412,6 +1432,8 @@ def test_system_functions(): assert instance.query("SELECT number, parity_str(number) FROM numbers(3)") == TSV( [[0, "even"], [1, "odd"], [2, "even"]] ) + instance.query("DROP FUNCTION linear_equation") + instance.query("DROP FUNCTION parity_str") def test_backup_partition(): From d04ca9eea617a406be715fdf5d541e8d650b23d0 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Wed, 24 Jul 2024 00:08:32 +0200 Subject: [PATCH 0763/2361] fix --- tests/integration/runner | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/runner b/tests/integration/runner index fc13cb9807a..0667541b196 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -243,7 +243,9 @@ if __name__ == "__main__": "-n", "--parallel", action="store", dest="parallel", help="Parallelism" ) - parser.add_argument("--count", action="store", dest="count", help="Repeat count") + parser.add_argument( + "--count", action="store", type=int, dest="count", help="Repeat count" + ) parser.add_argument( "--no-random", @@ -320,7 +322,9 @@ if __name__ == "__main__": parallel_args += "--dist=loadfile" parallel_args += f" -n {args.parallel}".format() - repeat_args = f" --count {args.count}" if args.count > 0 else "" + repeat_args = ( + f" --count {args.count}" if args.count is not None and args.count > 0 else "" + ) rand_args = "" # if not args.no_random: From 8925060366e18e2fcb88d5d3c173097e524ce613 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Wed, 24 Jul 2024 00:18:12 +0200 Subject: [PATCH 0764/2361] Touch dockerfile to forcefully rebuild image --- docker/test/sqlancer/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/sqlancer/Dockerfile b/docker/test/sqlancer/Dockerfile index 82fc2598397..3b919ffb3e3 100644 --- a/docker/test/sqlancer/Dockerfile +++ b/docker/test/sqlancer/Dockerfile @@ -6,7 +6,7 @@ ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN apt-get update --yes \ - && env DEBIAN_FRONTEND=noninteractive apt-get install wget git default-jdk maven python3 --yes --no-install-recommends \ + && env DEBIAN_FRONTEND=noninteractive apt-get install wget git python3 default-jdk maven --yes --no-install-recommends \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* From d460fa9f3618cdba64315bce579a93329463b7ad Mon Sep 17 00:00:00 2001 From: zoomxi <419486879@qq.com> Date: Wed, 24 Jul 2024 09:57:00 +0800 Subject: [PATCH 0765/2361] create coordinator after the number of replicas to use for the query is determined --- .../ClusterProxy/executeQuery.cpp | 3 --- src/Processors/QueryPlan/ReadFromRemote.cpp | 6 ++--- src/Processors/QueryPlan/ReadFromRemote.h | 1 - .../ParallelReplicasReadingCoordinator.cpp | 4 ++-- .../ParallelReplicasReadingCoordinator.h | 4 ---- .../test.py | 22 +++---------------- 6 files changed, 8 insertions(+), 32 deletions(-) diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 5d56ef09127..59f095f7487 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -517,14 +517,11 @@ void executeQueryWithParallelReplicas( "`cluster_for_parallel_replicas` setting refers to cluster with several shards. Expected a cluster with one shard"); } - auto coordinator = std::make_shared( - new_cluster->getShardsInfo().begin()->getAllNodeCount(), settings.parallel_replicas_mark_segment_size); auto external_tables = new_context->getExternalTables(); auto read_from_remote = std::make_unique( query_ast, new_cluster, storage_id, - std::move(coordinator), header, processed_stage, new_context, diff --git a/src/Processors/QueryPlan/ReadFromRemote.cpp b/src/Processors/QueryPlan/ReadFromRemote.cpp index 29e12c1e613..cf11052cd59 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.cpp +++ b/src/Processors/QueryPlan/ReadFromRemote.cpp @@ -362,7 +362,6 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep( ASTPtr query_ast_, ClusterPtr cluster_, const StorageID & storage_id_, - ParallelReplicasReadingCoordinatorPtr coordinator_, Block header_, QueryProcessingStage::Enum stage_, ContextMutablePtr context_, @@ -375,7 +374,6 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep( , cluster(cluster_) , query_ast(query_ast_) , storage_id(storage_id_) - , coordinator(std::move(coordinator_)) , stage(std::move(stage_)) , context(context_) , throttler(throttler_) @@ -429,7 +427,6 @@ void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder { shuffled_pool = shard.pool->getShuffledPools(current_settings); shuffled_pool.resize(max_replicas_to_use); - coordinator->adjustParticipatingReplicasCount(max_replicas_to_use); } else { @@ -439,6 +436,9 @@ void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder shuffled_pool = shard.pool->getShuffledPools(current_settings, priority_func); } + coordinator + = std::make_shared(max_replicas_to_use, current_settings.parallel_replicas_mark_segment_size); + for (size_t i=0; i < max_replicas_to_use; ++i) { IConnections::ReplicaInfo replica_info diff --git a/src/Processors/QueryPlan/ReadFromRemote.h b/src/Processors/QueryPlan/ReadFromRemote.h index eb15269155a..1adb26b2915 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.h +++ b/src/Processors/QueryPlan/ReadFromRemote.h @@ -70,7 +70,6 @@ public: ASTPtr query_ast_, ClusterPtr cluster_, const StorageID & storage_id_, - ParallelReplicasReadingCoordinatorPtr coordinator_, Block header_, QueryProcessingStage::Enum stage_, ContextMutablePtr context_, diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp index 2ba66256116..f46b4de10b7 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp @@ -1031,7 +1031,7 @@ void ParallelReplicasReadingCoordinator::markReplicaAsUnavailable(size_t replica if (!pimpl) { unavailable_nodes_registered_before_initialization.push_back(replica_number); - if (unavailable_nodes_registered_before_initialization.size() == participating_replicas_count) + if (unavailable_nodes_registered_before_initialization.size() == replicas_count) throw Exception(ErrorCodes::ALL_CONNECTION_TRIES_FAILED, "Can't connect to any replica chosen for query execution"); } else @@ -1061,7 +1061,7 @@ void ParallelReplicasReadingCoordinator::initialize(CoordinationMode mode) } ParallelReplicasReadingCoordinator::ParallelReplicasReadingCoordinator(size_t replicas_count_, size_t mark_segment_size_) - : replicas_count(replicas_count_), participating_replicas_count(replicas_count_), mark_segment_size(mark_segment_size_) + : replicas_count(replicas_count_), mark_segment_size(mark_segment_size_) { } diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h index c06ef6ef01a..8b463fda395 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.h @@ -30,15 +30,11 @@ public: /// needed to report total rows to read void setProgressCallback(ProgressCallback callback); - /// Participating replicas count may be less than replicas count in a shard. - void adjustParticipatingReplicasCount(size_t count) { participating_replicas_count = count; } - private: void initialize(CoordinationMode mode); std::mutex mutex; const size_t replicas_count{0}; - size_t participating_replicas_count{0}; size_t mark_segment_size{0}; std::unique_ptr pimpl; ProgressCallback progress_callback; // store the callback only to bypass it to coordinator implementation diff --git a/tests/integration/test_parallel_replicas_no_replicas/test.py b/tests/integration/test_parallel_replicas_no_replicas/test.py index e05f72316d0..04e3a54e581 100644 --- a/tests/integration/test_parallel_replicas_no_replicas/test.py +++ b/tests/integration/test_parallel_replicas_no_replicas/test.py @@ -33,7 +33,8 @@ def create_tables(cluster, table_name): @pytest.mark.parametrize("skip_unavailable_shards", [1, 0]) -def test_skip_all_replicas(start_cluster, skip_unavailable_shards): +@pytest.mark.parametrize("max_parallel_replicas", [2, 3, 100]) +def test_skip_all_replicas(start_cluster, skip_unavailable_shards, max_parallel_replicas): cluster_name = "test_1_shard_3_unavaliable_replicas" table_name = "tt" create_tables(cluster_name, table_name) @@ -43,25 +44,8 @@ def test_skip_all_replicas(start_cluster, skip_unavailable_shards): f"SELECT key, count() FROM {table_name} GROUP BY key ORDER BY key", settings={ "allow_experimental_parallel_reading_from_replicas": 2, - "max_parallel_replicas": 3, + "max_parallel_replicas": max_parallel_replicas, "cluster_for_parallel_replicas": cluster_name, "skip_unavailable_shards": skip_unavailable_shards, }, ) - -@pytest.mark.parametrize("max_parallel_replicas", [2, 3, 100]) -def test_skip_all_participating_replicas(start_cluster, max_parallel_replicas): - cluster_name = "test_1_shard_3_unavaliable_replicas" - table_name = "tt1" - create_tables(cluster_name, table_name) - - with pytest.raises(QueryRuntimeException): - initiator.query( - f"SELECT key, count() FROM {table_name} GROUP BY key ORDER BY key", - settings={ - "allow_experimental_parallel_reading_from_replicas": 2, - "max_parallel_replicas": max_parallel_replicas, - "cluster_for_parallel_replicas": cluster_name, - "skip_unavailable_shards": 1, - }, - ) From b42069cfa80e66ab59669bb7ccb93c2944d91170 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 04:27:11 +0200 Subject: [PATCH 0766/2361] Adjust some tests --- .../0_stateless/00111_shard_external_sort_distributed.sql | 6 +++--- .../0_stateless/00463_long_sessions_in_http_interface.sh | 2 +- tests/queries/0_stateless/00601_kill_running_query.sh | 2 +- tests/queries/0_stateless/00976_max_execution_speed.sql | 2 +- .../0_stateless/01119_optimize_trivial_insert_select.sql | 5 +++-- .../queries/0_stateless/01245_limit_infinite_sources.sql | 1 + tests/queries/0_stateless/01249_flush_interactive.sh | 4 ++-- tests/queries/0_stateless/01293_show_settings.reference | 1 + .../01301_aggregate_state_exception_memory_leak.sh | 2 +- tests/queries/0_stateless/01603_read_with_backoff_bug.sql | 2 +- .../0_stateless/02021_exponential_sum_shard.reference | 1 - tests/queries/0_stateless/02021_exponential_sum_shard.sql | 1 - tests/queries/0_stateless/02136_kill_scalar_queries.sh | 2 +- tests/queries/0_stateless/02293_ttest_large_samples.sql | 2 ++ .../02294_floating_point_second_in_settings.sh | 6 +++--- tests/queries/0_stateless/02343_aggregation_pipeline.sql | 8 +++----- .../0_stateless/02697_stop_reading_on_first_cancel.sh | 2 +- tests/queries/0_stateless/02700_s3_part_INT_MAX.sh | 4 +++- .../02896_max_execution_time_with_break_overflow_mode.sql | 2 ++ tests/queries/0_stateless/02915_sleep_large_uint.sql | 1 + ...p_virtual_columns_with_non_deterministic_functions.sql | 1 + 21 files changed, 32 insertions(+), 25 deletions(-) diff --git a/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql index 112f5edae36..88a05f59111 100644 --- a/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql +++ b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql @@ -1,10 +1,10 @@ -- Tags: distributed -SET max_memory_usage = 300000000; -SET max_bytes_before_external_sort = 20000000; +SET max_memory_usage = 150000000; +SET max_bytes_before_external_sort = 10000000; DROP TABLE IF EXISTS numbers10m; -CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 10000000; +CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 5000000; SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers10m) ORDER BY number * 1234567890123456789 LIMIT 19999980, 20; diff --git a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh index d41d6409315..bb77a88820a 100755 --- a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh +++ b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh @@ -74,7 +74,7 @@ ${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE t" echo "A session cannot be used by concurrent connections:" -${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9&query_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT count() FROM system.numbers" >/dev/null & +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9&query_id=${CLICKHOUSE_DATABASE}_9&max_rows_to_read=0" --data-binary "SELECT count() FROM system.numbers" >/dev/null & # An infinite loop is required to make the test reliable. We will ensure that at least once the query on the line above has started before this check while true diff --git a/tests/queries/0_stateless/00601_kill_running_query.sh b/tests/queries/0_stateless/00601_kill_running_query.sh index 3163f8146d0..be0fff49129 100755 --- a/tests/queries/0_stateless/00601_kill_running_query.sh +++ b/tests/queries/0_stateless/00601_kill_running_query.sh @@ -11,7 +11,7 @@ function wait_for_query_to_start() while [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE query_id = '$1'") == 0 ]]; do sleep 0.1; done } -${CLICKHOUSE_CURL_COMMAND} -q --max-time 30 -sS "$CLICKHOUSE_URL&query_id=test_00601_$CLICKHOUSE_DATABASE" -d 'SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k)' > /dev/null & +${CLICKHOUSE_CURL_COMMAND} -q --max-time 30 -sS "$CLICKHOUSE_URL&query_id=test_00601_$CLICKHOUSE_DATABASE" -d 'SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k) SETTINGS max_rows_to_read = 0' > /dev/null & wait_for_query_to_start "test_00601_$CLICKHOUSE_DATABASE" $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = 'test_00601_$CLICKHOUSE_DATABASE'" wait diff --git a/tests/queries/0_stateless/00976_max_execution_speed.sql b/tests/queries/0_stateless/00976_max_execution_speed.sql index 52c3f05ff43..41374712724 100644 --- a/tests/queries/0_stateless/00976_max_execution_speed.sql +++ b/tests/queries/0_stateless/00976_max_execution_speed.sql @@ -1,2 +1,2 @@ -SET max_execution_speed = 1, max_execution_time = 3; +SET max_execution_speed = 1, max_execution_time = 3, max_rows_to_read = 0; SELECT count() FROM system.numbers; -- { serverError TIMEOUT_EXCEEDED } diff --git a/tests/queries/0_stateless/01119_optimize_trivial_insert_select.sql b/tests/queries/0_stateless/01119_optimize_trivial_insert_select.sql index a53b60a5ad3..2b301d7aced 100644 --- a/tests/queries/0_stateless/01119_optimize_trivial_insert_select.sql +++ b/tests/queries/0_stateless/01119_optimize_trivial_insert_select.sql @@ -1,8 +1,9 @@ drop table if exists t; create table t(n int, a Int64, s String) engine = MergeTree() order by a; -set enable_positional_arguments=0; -set optimize_trivial_insert_select=1; +set enable_positional_arguments = 0; +set optimize_trivial_insert_select = 1; +set max_rows_to_read = 0; -- due to aggregate functions, optimize_trivial_insert_select will not be applied insert into t select 1, sum(number) as c, getSetting('max_threads') from numbers_mt(100000000) settings max_insert_threads=4, max_threads=2; diff --git a/tests/queries/0_stateless/01245_limit_infinite_sources.sql b/tests/queries/0_stateless/01245_limit_infinite_sources.sql index 05680d86a33..69c93baf8a8 100644 --- a/tests/queries/0_stateless/01245_limit_infinite_sources.sql +++ b/tests/queries/0_stateless/01245_limit_infinite_sources.sql @@ -9,3 +9,4 @@ FROM ) WHERE number = 1 LIMIT 1 +SETTINGS max_rows_to_read = 0; diff --git a/tests/queries/0_stateless/01249_flush_interactive.sh b/tests/queries/0_stateless/01249_flush_interactive.sh index 551e11c8c8d..775b7825a16 100755 --- a/tests/queries/0_stateless/01249_flush_interactive.sh +++ b/tests/queries/0_stateless/01249_flush_interactive.sh @@ -14,10 +14,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function test() { - timeout 5 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --query " + timeout 5 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --max_rows_to_read 0 --query " SELECT DISTINCT number % 5 FROM system.numbers" ||: echo -e '---' - timeout 5 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10" --data-binary " + timeout 5 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10&max_rows_to_read=0" --data-binary " SELECT DISTINCT number % 5 FROM system.numbers" ||: echo -e '---' } diff --git a/tests/queries/0_stateless/01293_show_settings.reference b/tests/queries/0_stateless/01293_show_settings.reference index 187f55697e4..9d326f16a3b 100644 --- a/tests/queries/0_stateless/01293_show_settings.reference +++ b/tests/queries/0_stateless/01293_show_settings.reference @@ -5,5 +5,6 @@ connect_timeout_with_failover_secure_ms Milliseconds 3000 external_storage_connect_timeout_sec UInt64 10 s3_connect_timeout_ms UInt64 1000 filesystem_prefetch_max_memory_usage UInt64 1073741824 +max_memory_usage UInt64 5000000000 max_untracked_memory UInt64 1048576 memory_profiler_step UInt64 1048576 diff --git a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh index 47fe7a9c7d9..9dd800ceb09 100755 --- a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh +++ b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) start=$SECONDS # If the memory leak exists, it will lead to OOM fairly quickly. for _ in {1..1000}; do - $CLICKHOUSE_CLIENT --max_memory_usage 1G <<< "SELECT uniqExactState(number) FROM system.numbers_mt GROUP BY number % 10"; + $CLICKHOUSE_CLIENT --max_memory_usage 1G --max_rows_to_read 0 <<< "SELECT uniqExactState(number) FROM system.numbers_mt GROUP BY number % 10"; # NOTE: we cannot use timeout here since this will not guarantee that the query will be executed at least once. # (since graceful wait of clickhouse-client had been reverted) diff --git a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql index 1cf52c0288b..ec14f637c01 100644 --- a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql +++ b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql @@ -9,7 +9,7 @@ create table t (x UInt64, s String) engine = MergeTree order by x SETTINGS index INSERT INTO t SELECT number, if(number < (8129 * 1024), arrayStringConcat(arrayMap(x -> toString(x), range(number % 128)), ' '), '') -FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8; +FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8, max_rows_to_read=0; -- optimize table t final; diff --git a/tests/queries/0_stateless/02021_exponential_sum_shard.reference b/tests/queries/0_stateless/02021_exponential_sum_shard.reference index 8453706a05a..c28e5d7a132 100644 --- a/tests/queries/0_stateless/02021_exponential_sum_shard.reference +++ b/tests/queries/0_stateless/02021_exponential_sum_shard.reference @@ -2,4 +2,3 @@ 0.009775171065493644 0.009775171065493644 0.009775171065493644 -0.009775171065493644 diff --git a/tests/queries/0_stateless/02021_exponential_sum_shard.sql b/tests/queries/0_stateless/02021_exponential_sum_shard.sql index 49fde0fe217..8e91637e41d 100644 --- a/tests/queries/0_stateless/02021_exponential_sum_shard.sql +++ b/tests/queries/0_stateless/02021_exponential_sum_shard.sql @@ -3,4 +3,3 @@ WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1) WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(10000)); WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(100000)); WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(1000000)); -WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(10000000)); diff --git a/tests/queries/0_stateless/02136_kill_scalar_queries.sh b/tests/queries/0_stateless/02136_kill_scalar_queries.sh index c8691b62360..f8bd5a42756 100755 --- a/tests/queries/0_stateless/02136_kill_scalar_queries.sh +++ b/tests/queries/0_stateless/02136_kill_scalar_queries.sh @@ -10,7 +10,7 @@ function wait_for_query_to_start() } QUERY_1_ID="${CLICKHOUSE_DATABASE}_TEST02132KILL_QUERY1" -(${CLICKHOUSE_CLIENT} --query_id="${QUERY_1_ID}" --query='select (SELECT max(number) from system.numbers) + 1;' 2>&1 | grep -q "Code: 394." || echo 'FAIL') & +(${CLICKHOUSE_CLIENT} --max_rows_to_read 0 --query_id="${QUERY_1_ID}" --query='select (SELECT max(number) from system.numbers) + 1;' 2>&1 | grep -q "Code: 394." || echo 'FAIL') & wait_for_query_to_start "${QUERY_1_ID}" ${CLICKHOUSE_CLIENT} --query="KILL QUERY WHERE query_id='${QUERY_1_ID}' SYNC" diff --git a/tests/queries/0_stateless/02293_ttest_large_samples.sql b/tests/queries/0_stateless/02293_ttest_large_samples.sql index 14baa3fddfe..826bd483fe9 100644 --- a/tests/queries/0_stateless/02293_ttest_large_samples.sql +++ b/tests/queries/0_stateless/02293_ttest_large_samples.sql @@ -15,6 +15,8 @@ SELECT FROM system.numbers limit 500000)); +SET max_rows_to_read = 0; + SELECT roundBankers(result.1, 5), roundBankers(result.2, 5 ) FROM ( SELECT studentTTest(sample, variant) as result diff --git a/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh b/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh index 7a18b8fea29..27dbd3e3de6 100755 --- a/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh +++ b/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh @@ -23,16 +23,16 @@ function check_output() { # TCP CLIENT echo "TCP CLIENT" -OUTPUT=$($CLICKHOUSE_CLIENT --max_execution_time $MAX_TIMEOUT -q "SELECT count() FROM system.numbers" 2>&1 || true) +OUTPUT=$($CLICKHOUSE_CLIENT --max_rows_to_read 0 --max_execution_time $MAX_TIMEOUT -q "SELECT count() FROM system.numbers" 2>&1 || true) check_output "${OUTPUT}" echo "TCP CLIENT WITH SETTINGS IN QUERY" -OUTPUT=$($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.numbers SETTINGS max_execution_time=$MAX_TIMEOUT" 2>&1 || true) +OUTPUT=$($CLICKHOUSE_CLIENT --max_rows_to_read 0 -q "SELECT count() FROM system.numbers SETTINGS max_execution_time=$MAX_TIMEOUT" 2>&1 || true) check_output "${OUTPUT}" # HTTP CLIENT echo "HTTP CLIENT" -OUTPUT=$(${CLICKHOUSE_CURL_COMMAND} -q -sS "$CLICKHOUSE_URL&max_execution_time=$MAX_TIMEOUT" -d \ +OUTPUT=$(${CLICKHOUSE_CURL_COMMAND} -q -sS "$CLICKHOUSE_URL&max_execution_time=${MAX_TIMEOUT}&max_rows_to_read=0" -d \ "SELECT count() FROM system.numbers" || true) check_output "${OUTPUT}" diff --git a/tests/queries/0_stateless/02343_aggregation_pipeline.sql b/tests/queries/0_stateless/02343_aggregation_pipeline.sql index 0f9dbd0247d..24d54293313 100644 --- a/tests/queries/0_stateless/02343_aggregation_pipeline.sql +++ b/tests/queries/0_stateless/02343_aggregation_pipeline.sql @@ -13,11 +13,9 @@ set allow_prefetched_read_pool_for_local_filesystem = 0; -- { echoOn } -explain pipeline select * from (select * from numbers(1e8) group by number) group by number; - -explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number; - -explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number; +explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0; explain pipeline select number from remote('127.0.0.{1,2,3}', system, numbers_mt) group by number settings distributed_aggregation_memory_efficient = 1; diff --git a/tests/queries/0_stateless/02697_stop_reading_on_first_cancel.sh b/tests/queries/0_stateless/02697_stop_reading_on_first_cancel.sh index 2be13588453..5a2cec08eca 100755 --- a/tests/queries/0_stateless/02697_stop_reading_on_first_cancel.sh +++ b/tests/queries/0_stateless/02697_stop_reading_on_first_cancel.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) QUERY_ID="${CLICKHOUSE_DATABASE}_read_with_cancel" -$CLICKHOUSE_CLIENT -n --query_id="$QUERY_ID" --query="SELECT sum(number * 0) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true;" & +$CLICKHOUSE_CLIENT --max_rows_to_read 0 -n --query_id="$QUERY_ID" --query="SELECT sum(number * 0) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true;" & pid=$! for _ in {0..60} diff --git a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh index a34a480a078..c431686b594 100755 --- a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh +++ b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh @@ -10,7 +10,9 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # NOTE: .sh test is used over .sql because it needs $CLICKHOUSE_DATABASE to # avoid truncation, since seems that the version of MinIO that is used on CI # too slow with this. -$CLICKHOUSE_CLIENT -nm -q " +# +# Unfortunately, the test has to buffer it in memory. +$CLICKHOUSE_CLIENT --max_memory_usage 10G -nm -q " INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV') SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024) SETTINGS s3_max_single_part_upload_size = '5Gi'; diff --git a/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql b/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql index ec86a66c7dd..3e131cad0f0 100644 --- a/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql +++ b/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql @@ -1,5 +1,7 @@ -- Tags: no-fasttest +SET max_rows_to_read = 0; + -- Query stops after timeout without an error SELECT * FROM numbers(100000000) SETTINGS max_block_size=1, max_execution_time=2, timeout_overflow_mode='break' FORMAT Null; diff --git a/tests/queries/0_stateless/02915_sleep_large_uint.sql b/tests/queries/0_stateless/02915_sleep_large_uint.sql index f7c04ab6d1f..08b6c580a28 100644 --- a/tests/queries/0_stateless/02915_sleep_large_uint.sql +++ b/tests/queries/0_stateless/02915_sleep_large_uint.sql @@ -1,6 +1,7 @@ SELECT sleep(3.40282e+44); -- { serverError BAD_ARGUMENTS } SELECT sleep((pow(2, 64) / 1000000) - 1); -- { serverError BAD_ARGUMENTS } SELECT sleepEachRow(184467440737095516) from numbers(10000); -- { serverError BAD_ARGUMENTS } +SET max_rows_to_read = 0; SELECT sleepEachRow(pow(2, 31)) from numbers(9007199254740992) settings function_sleep_max_microseconds_per_block = 8589934592000000000; -- { serverError TOO_SLOW } -- Another corner case, but it requires lots of memory to run (huge block size) diff --git a/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql index 8ccc3cf61da..6ef8c5a8656 100644 --- a/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql +++ b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql @@ -1,3 +1,4 @@ +SET max_rows_to_read = 0; create table test (number UInt64) engine=MergeTree order by number; insert into test select * from numbers(50000000); select ignore(number) from test where RAND() > 4292390314 limit 10; From 016888e29a828870d5cdb50a0eb5e1514bafc97c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 04:32:20 +0200 Subject: [PATCH 0767/2361] Adjust some tests --- tests/queries/1_stateful/00067_union_all.sql | 3 ++- .../00088_global_in_one_shard_and_rows_before_limit.sql | 2 +- .../queries/1_stateful/00147_global_in_aggregate_function.sql | 1 + .../queries/1_stateful/00149_quantiles_timing_distributed.sql | 1 + tests/queries/1_stateful/00167_read_bytes_from_fs.sql | 1 + .../1_stateful/00171_grouping_aggregated_transform_bug.sql | 1 + .../1_stateful/00182_simple_squashing_transform_bug.sql | 1 + 7 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/queries/1_stateful/00067_union_all.sql b/tests/queries/1_stateful/00067_union_all.sql index 2a1d00e975d..9ee14b36b03 100644 --- a/tests/queries/1_stateful/00067_union_all.sql +++ b/tests/queries/1_stateful/00067_union_all.sql @@ -10,4 +10,5 @@ UNION ALL ORDER BY id DESC LIMIT 10 ) -ORDER BY id, event; +ORDER BY id, event +SETTINGS max_rows_to_read = 40_000_000; diff --git a/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql index 52f9c46997f..443808e7bed 100644 --- a/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql +++ b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql @@ -1,4 +1,4 @@ -- Tags: shard -SET output_format_write_statistics = 0; +SET output_format_write_statistics = 0, max_rows_to_read = 20_000_000; SELECT EventDate, count() FROM remote('127.0.0.1', test.hits) WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits) GROUP BY EventDate ORDER BY EventDate LIMIT 5 FORMAT JSONCompact; diff --git a/tests/queries/1_stateful/00147_global_in_aggregate_function.sql b/tests/queries/1_stateful/00147_global_in_aggregate_function.sql index 075c01530c6..c156f073573 100644 --- a/tests/queries/1_stateful/00147_global_in_aggregate_function.sql +++ b/tests/queries/1_stateful/00147_global_in_aggregate_function.sql @@ -1,4 +1,5 @@ -- Tags: global +SET max_rows_to_read = 40_000_000; SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); diff --git a/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql index 6f910646fb7..16b565985ea 100644 --- a/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql +++ b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql @@ -1,4 +1,5 @@ -- Tags: distributed +SET max_rows_to_read = 100_000_000; SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID); SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1; diff --git a/tests/queries/1_stateful/00167_read_bytes_from_fs.sql b/tests/queries/1_stateful/00167_read_bytes_from_fs.sql index 7b3f50f8141..1a98a531067 100644 --- a/tests/queries/1_stateful/00167_read_bytes_from_fs.sql +++ b/tests/queries/1_stateful/00167_read_bytes_from_fs.sql @@ -1,5 +1,6 @@ -- Tags: no-random-settings +SET max_memory_usage = '10G' SELECT sum(cityHash64(*)) FROM test.hits SETTINGS max_threads=40; -- We had a bug which lead to additional compressed data read. test.hits compressed size is about 1.2Gb, but we read more then 3Gb. diff --git a/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql b/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql index 7068780a1b1..b3e4d749328 100644 --- a/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql +++ b/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql @@ -1,4 +1,5 @@ -- Tags: distributed +SET max_rows_to_read = '100M'; SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS max_block_size = 63169; SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1, max_block_size = 63169; diff --git a/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql b/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql index e73de4b33fb..85bad651090 100644 --- a/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql +++ b/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql @@ -1,6 +1,7 @@ -- Tags: global set allow_prefetched_read_pool_for_remote_filesystem=0, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0, max_threads=2, max_block_size=65387; +set max_rows_to_read = '20M'; SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); From e569a305ba07225ee641ae4af07ba9c88e4608d4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 04:46:31 +0200 Subject: [PATCH 0768/2361] Adjust some tests --- .../00002_log_and_exception_messages_formatting.sql | 2 ++ .../0_stateless/00375_shard_group_uniq_array_of_string.sql | 2 +- .../00376_shard_group_uniq_array_of_int_array.sql | 2 +- .../00377_shard_group_uniq_array_of_string_array.sql | 2 +- tests/queries/0_stateless/00600_replace_running_query.sh | 6 +++--- .../00834_cancel_http_readonly_queries_on_client_close.sh | 2 +- tests/queries/0_stateless/00906_low_cardinality_cache.sql | 2 +- tests/queries/0_stateless/01304_direct_io_long.sh | 4 ++-- tests/queries/0_stateless/02021_exponential_sum.reference | 1 - tests/queries/0_stateless/02021_exponential_sum.sql | 1 - tests/queries/0_stateless/02234_cast_to_ip_address.sql | 2 +- .../0_stateless/02450_kill_distributed_query_deadlock.sh | 2 +- tests/queries/0_stateless/02585_query_status_deadlock.sh | 3 +-- tests/queries/0_stateless/02786_max_execution_time_leaf.sql | 1 + .../0_stateless/02844_subquery_timeout_with_break.sql | 2 +- tests/queries/0_stateless/02916_glogal_in_cancel.sql | 2 +- 16 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 07c42d6d039..c158406c0da 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -7,6 +7,8 @@ system flush logs; drop table if exists logs; create view logs as select * from system.text_log where now() - toIntervalMinute(120) < event_time; +SET max_rows_to_read = 0; + -- Check that we don't have too many messages formatted with fmt::runtime or strings concatenation. -- 0.001 threshold should be always enough, the value was about 0.00025 WITH 0.001 AS threshold diff --git a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql index 8a310cb8fc9..f32a64cd30f 100644 --- a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql +++ b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql @@ -7,7 +7,7 @@ INSERT INTO group_uniq_str SELECT 2 as id, toString(number % 100) as v FROM syst INSERT INTO group_uniq_str SELECT 5 as id, toString(number % 100) as v FROM system.numbers LIMIT 10000000; SELECT length(groupUniqArray(v)) FROM group_uniq_str GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '50M'; SELECT length(groupUniqArray(10)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; SELECT length(groupUniqArray(10000)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; diff --git a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql index abd0e6e6a45..43066880102 100644 --- a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql +++ b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql @@ -6,7 +6,7 @@ CREATE TABLE group_uniq_arr_int ENGINE = Memory AS (SELECT intDiv(number%1000000, 100) as v, intDiv(number%100, 10) as g, number%10 as c FROM system.numbers WHERE c < 3 LIMIT 10000000); SELECT length(groupUniqArray(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_int') GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_int') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '50M'; SELECT length(groupUniqArray(10)(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; SELECT length(groupUniqArray(100000)(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; diff --git a/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql index e9cfff211f8..1c4376ad577 100644 --- a/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql +++ b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql @@ -6,6 +6,6 @@ CREATE TABLE group_uniq_arr_str ENGINE = Memory AS (SELECT intDiv(number%1000000, 100) as v, intDiv(number%100, 10) as g, number%10 as c FROM system.numbers WHERE c < 3 LIMIT 10000000); SELECT length(groupUniqArray(v)) FROM group_uniq_arr_str GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_str') GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '50M'; DROP TABLE IF EXISTS group_uniq_arr_str; diff --git a/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh index 7a71d17f19b..e7022875086 100755 --- a/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/tests/queries/0_stateless/00600_replace_running_query.sh @@ -17,7 +17,7 @@ function wait_for_query_to_start() } -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 & +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1&max_rows_to_read=0" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 & wait_for_query_to_start 'hello' # Replace it @@ -26,7 +26,7 @@ $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d # Wait for it to be replaced wait -${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id=42 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & +${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id=42 --max_rows_to_read=0 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & wait_for_query_to_start '42' # Trying to run another query with the same query_id @@ -38,7 +38,7 @@ $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=42&replace_running_query=1" -d 'S $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = '42' SYNC" > /dev/null wait -${CLICKHOUSE_CLIENT} --query_id=42 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & +${CLICKHOUSE_CLIENT} --query_id=42 --max_rows_to_read=0 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & wait_for_query_to_start '42' ${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --replace_running_query_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null wait diff --git a/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh index 5c21c70e06a..dd3735f27b1 100755 --- a/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh +++ b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)' +${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&max_rows_to_read=0&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)' i=0 retries=300 while [[ $i -lt $retries ]]; do diff --git a/tests/queries/0_stateless/00906_low_cardinality_cache.sql b/tests/queries/0_stateless/00906_low_cardinality_cache.sql index 55eacd0db44..15a53841761 100644 --- a/tests/queries/0_stateless/00906_low_cardinality_cache.sql +++ b/tests/queries/0_stateless/00906_low_cardinality_cache.sql @@ -1,5 +1,5 @@ drop table if exists lc_00906; create table lc_00906 (b LowCardinality(String)) engine=MergeTree order by b SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -insert into lc_00906 select '0123456789' from numbers(100000000); +insert into lc_00906 select '0123456789' from numbers(100000000) SETTINGS max_rows_to_read = '100M'; select count(), b from lc_00906 group by b; drop table if exists lc_00906; diff --git a/tests/queries/0_stateless/01304_direct_io_long.sh b/tests/queries/0_stateless/01304_direct_io_long.sh index 2e27c2f7728..a66239058ab 100755 --- a/tests/queries/0_stateless/01304_direct_io_long.sh +++ b/tests/queries/0_stateless/01304_direct_io_long.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --max_rows_to_read 50M --multiquery " DROP TABLE IF EXISTS bug; CREATE TABLE bug (UserID UInt64, Date Date) ENGINE = MergeTree ORDER BY Date SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi', merge_max_block_size = 8192; @@ -18,5 +18,5 @@ cat "$LOG" | grep Loaded rm "$LOG" -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --multiquery " DROP TABLE bug;" diff --git a/tests/queries/0_stateless/02021_exponential_sum.reference b/tests/queries/0_stateless/02021_exponential_sum.reference index 5bd77479cf7..c9dcee51173 100644 --- a/tests/queries/0_stateless/02021_exponential_sum.reference +++ b/tests/queries/0_stateless/02021_exponential_sum.reference @@ -5,4 +5,3 @@ 0.0009775171065493646 0.0009775171065493646 0.0009775171065493646 -0.0009775171065493646 diff --git a/tests/queries/0_stateless/02021_exponential_sum.sql b/tests/queries/0_stateless/02021_exponential_sum.sql index 8ab7638029c..62ec7dcf9f1 100644 --- a/tests/queries/0_stateless/02021_exponential_sum.sql +++ b/tests/queries/0_stateless/02021_exponential_sum.sql @@ -6,4 +6,3 @@ WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1) WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100000); WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(1000000); WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(10000000); -WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100000000); diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.sql b/tests/queries/0_stateless/02234_cast_to_ip_address.sql index 28f1afff57f..51e953da905 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.sql +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.sql @@ -67,7 +67,7 @@ SELECT toIPv6('::.1.2.3'); --{serverError CANNOT_PARSE_IPV6} SELECT toIPv6OrDefault('::.1.2.3'); SELECT toIPv6OrNull('::.1.2.3'); -SELECT count() FROM numbers_mt(100000000) WHERE NOT ignore(toIPv6OrZero(randomString(8))); +SELECT count() FROM numbers_mt(20000000) WHERE NOT ignore(toIPv6OrZero(randomString(8))); SELECT '--'; diff --git a/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh b/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh index 0cd520d8d5d..445f907bcc5 100755 --- a/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh +++ b/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # this can trigger a hung/deadlock in ProcessorList. for i in {1..50}; do query_id="$CLICKHOUSE_TEST_UNIQUE_NAME-$i" - $CLICKHOUSE_CLIENT --format Null --query_id "$query_id" -q "select * from remote('127.{1|2|3|4|5|6}', numbers(1e12))" 2>/dev/null & + $CLICKHOUSE_CLIENT --format Null --query_id "$query_id" --max_rows_to_read 0 -q "select * from remote('127.{1|2|3|4|5|6}', numbers(1e12))" 2>/dev/null & while :; do killed_queries="$($CLICKHOUSE_CLIENT -q "kill query where query_id = '$query_id' sync" | wc -l)" if [[ "$killed_queries" -ge 1 ]]; then diff --git a/tests/queries/0_stateless/02585_query_status_deadlock.sh b/tests/queries/0_stateless/02585_query_status_deadlock.sh index e3e34109cdb..6321ac0064a 100755 --- a/tests/queries/0_stateless/02585_query_status_deadlock.sh +++ b/tests/queries/0_stateless/02585_query_status_deadlock.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) QUERY_ID="${CLICKHOUSE_DATABASE}_test_02585_query_to_kill_id_1" -$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" -n -q " +$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -n -q " create temporary table tmp as select * from numbers(500000000); select * from remote('127.0.0.2', 'system.numbers_mt') where number in (select * from tmp);" &> /dev/null & @@ -23,4 +23,3 @@ do done $CLICKHOUSE_CLIENT -q "kill query where query_id = '$QUERY_ID' sync" &> /dev/null - diff --git a/tests/queries/0_stateless/02786_max_execution_time_leaf.sql b/tests/queries/0_stateless/02786_max_execution_time_leaf.sql index f678c913b46..2e4623f4ac6 100644 --- a/tests/queries/0_stateless/02786_max_execution_time_leaf.sql +++ b/tests/queries/0_stateless/02786_max_execution_time_leaf.sql @@ -1,4 +1,5 @@ -- Tags: no-fasttest +SET max_rows_to_read = 0; SELECT count() FROM cluster('test_cluster_two_shards', view( SELECT * FROM numbers(100000000000) )) SETTINGS max_execution_time_leaf = 1; -- { serverError TIMEOUT_EXCEEDED } -- Can return partial result SELECT count() FROM cluster('test_cluster_two_shards', view( SELECT * FROM numbers(100000000000) )) FORMAT Null SETTINGS max_execution_time_leaf = 1, timeout_overflow_mode_leaf = 'break'; diff --git a/tests/queries/0_stateless/02844_subquery_timeout_with_break.sql b/tests/queries/0_stateless/02844_subquery_timeout_with_break.sql index 511ed0c59de..00b527a9378 100644 --- a/tests/queries/0_stateless/02844_subquery_timeout_with_break.sql +++ b/tests/queries/0_stateless/02844_subquery_timeout_with_break.sql @@ -4,7 +4,7 @@ CREATE TABLE t (key UInt64, value UInt64, INDEX value_idx value TYPE bloom_filte INSERT INTO t SELECT number, rand()%1000 FROM numbers(10000); SET timeout_overflow_mode='break'; -SET max_execution_time=0.1; +SET max_execution_time=0.1, max_rows_to_read=0; SELECT * FROM t WHERE value IN (SELECT number FROM numbers(1000000000)); DROP TABLE t; diff --git a/tests/queries/0_stateless/02916_glogal_in_cancel.sql b/tests/queries/0_stateless/02916_glogal_in_cancel.sql index ad54f1ecdec..dd61795947a 100644 --- a/tests/queries/0_stateless/02916_glogal_in_cancel.sql +++ b/tests/queries/0_stateless/02916_glogal_in_cancel.sql @@ -1,2 +1,2 @@ -set max_execution_time = 0.5, timeout_overflow_mode = 'break'; +set max_execution_time = 0.5, timeout_overflow_mode = 'break', max_rows_to_read = 0; SELECT number FROM remote('127.0.0.{3|2}', numbers(1)) WHERE number GLOBAL IN (SELECT number FROM numbers(10000000000.)) format Null; From 26650dcb2e39b0d28cae4029b6adde2b6c01fda2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 04:59:13 +0200 Subject: [PATCH 0769/2361] More limits --- tests/config/install.sh | 2 +- tests/config/users.d/limits.xml | 8 ----- tests/config/users.d/limits.yaml | 57 ++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 9 deletions(-) delete mode 100644 tests/config/users.d/limits.xml create mode 100644 tests/config/users.d/limits.yaml diff --git a/tests/config/install.sh b/tests/config/install.sh index 265b9248f4a..c5fb3cc92c7 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -93,7 +93,7 @@ ln -sf $SRC_PATH/users.d/prefetch_settings.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/nonconst_timezone.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/allow_introspection_functions.yaml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/replicated_ddl_entry.xml $DEST_SERVER_PATH/users.d/ -ln -sf $SRC_PATH/users.d/limits.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/users.d/limits.yaml $DEST_SERVER_PATH/users.d/ if [[ -n "$USE_OLD_ANALYZER" ]] && [[ "$USE_OLD_ANALYZER" -eq 1 ]]; then ln -sf $SRC_PATH/users.d/analyzer.xml $DEST_SERVER_PATH/users.d/ diff --git a/tests/config/users.d/limits.xml b/tests/config/users.d/limits.xml deleted file mode 100644 index f44c73241ab..00000000000 --- a/tests/config/users.d/limits.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - 5G - 20000000 - - - diff --git a/tests/config/users.d/limits.yaml b/tests/config/users.d/limits.yaml new file mode 100644 index 00000000000..4f3f439a997 --- /dev/null +++ b/tests/config/users.d/limits.yaml @@ -0,0 +1,57 @@ +profiles: + default: + max_memory_usage: 5G + max_rows_to_read: 20000000 + + # Also set every other limit to a high value, so it will not limit anything, but we will test that code around it. + s3_max_get_rps: 1000000 + s3_max_get_burst: 2000000 + s3_max_put_rps: 1000000 + s3_max_put_burst: 2000000 + max_remote_read_network_bandwidth: 1T + max_remote_write_network_bandwidth: 1T + max_local_read_bandwidth: 1T + max_local_write_bandwidth: 1T + use_index_for_in_with_subqueries_max_values: 1G + max_bytes_to_read: 1T + max_bytes_to_read_leaf: 1T + max_rows_to_group_by: 10G + max_bytes_before_external_group_by: 10G + max_rows_to_sort: 10G + max_bytes_to_sort: 10G + max_bytes_before_external_sort: 10G + max_result_rows: 1G + max_result_bytes: 1G + max_execution_time: 600 + max_execution_time_leaf: 600 + max_execution_speed: 100G + max_execution_speed_bytes: 10T + max_estimated_execution_time: 600 + max_columns_to_read: 10K + max_temporary_columns: 10K + max_temporary_non_const_columns: 10K + max_sessions_for_user: 1K + max_rows_in_set: 10G + max_bytes_in_set: 10G + max_rows_in_join: 10G + max_bytes_in_join: 10G + max_rows_in_set_to_optimize_join: 1G + max_rows_to_transfer: 1G + max_bytes_to_transfer: 1G + max_rows_in_distinct: 10G + max_bytes_in_distinct: 10G + max_memory_usage_for_user: 10G + max_network_bandwidth: 100G + max_network_bytes: 1T + max_network_bandwidth_for_user: 100G + max_network_bandwidth_for_all_users: 100G + max_temporary_data_on_disk_size_for_user: 100G + max_temporary_data_on_disk_size_for_query: 100G + max_backup_bandwidth: 100G + max_hyperscan_regexp_length: 1M + max_hyperscan_regexp_total_length: 10M + query_cache_max_size_in_bytes: 10M + query_cache_max_entries: 100K + external_storage_max_read_rows: 10G + external_storage_max_read_bytes: 10G + max_streams_for_merge_tree_reading: 1000 From e855ce793783bb50f95339608f688f6c27e83c36 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 04:59:57 +0200 Subject: [PATCH 0770/2361] Revert "Fix for 992 and friends" --- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- ...00992_system_parts_race_condition_zookeeper_long.sh | 2 +- tests/queries/0_stateless/replication.lib | 10 +--------- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index da379a466af..3f02486ed15 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3940,7 +3940,7 @@ void StorageReplicatedMergeTree::mergeSelectingTask() merge_selecting_task->schedule(); else { - LOG_TRACE(log, "Scheduling next merge selecting task after {}ms, current attempt status: {}", merge_selecting_sleep_ms, result); + LOG_TRACE(log, "Scheduling next merge selecting task after {}ms", merge_selecting_sleep_ms); merge_selecting_task->scheduleAfter(merge_selecting_sleep_ms); } } diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index 02a739ece4a..4887c409844 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -41,7 +41,7 @@ function thread3() function thread4() { - while true; do $CLICKHOUSE_CLIENT --receive_timeout=1 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done + while true; do $CLICKHOUSE_CLIENT --receive_timeout=3 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done } function thread5() diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index fe867537000..1a86cd9f8db 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -51,14 +51,6 @@ function check_replication_consistency() table_name_prefix=$1 check_query_part=$2 - # Try to kill some mutations because sometimes tests run too much (it's not guarenteed to kill all mutations, see below) - # Try multiple replicas, because queries are not finished yet, and "global" KILL MUTATION may fail due to another query (like DROP TABLE) - readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%'") - for t in "${tables_arr[@]}" - do - ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table='$t'" > /dev/null 2>/dev/null - done - # Wait for all queries to finish (query may still be running if thread is killed by timeout) num_tries=0 while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE current_database=currentDatabase() AND query LIKE '%$table_name_prefix%'") -ne 1 ]]; do @@ -104,7 +96,7 @@ function check_replication_consistency() some_table=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' ORDER BY rand() LIMIT 1") $CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA $some_table PULL" 1>/dev/null 2>/dev/null ||: - # Forcefully cancel mutations to avoid waiting for them to finish. Kills the remaining mutations + # Forcefully cancel mutations to avoid waiting for them to finish ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table like '$table_name_prefix%'" > /dev/null # SYNC REPLICA is not enough if some MUTATE_PARTs are not assigned yet From 385dabaa19a41a703d549925aeb6c9ecc9c0a493 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 07:14:32 +0200 Subject: [PATCH 0771/2361] Fix CLion, which is one of the worst examples of software ever created --- src/Core/Settings.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 3d181e33001..8d3a11dac98 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1,5 +1,8 @@ #pragma once +/// CLion freezes for a minute on every keypress in any file including this. +#if !defined(__CLION_IDE__) + #include #include #include @@ -1348,3 +1351,5 @@ struct FormatFactorySettings : public BaseSettings }; } + +#endif From bc899ead26f593f2e09f6f303151539b5b615560 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 07:45:24 +0200 Subject: [PATCH 0772/2361] Fix the estimation of total rows for the numbers data source --- src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index b070bbe739b..4136e2d58b4 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -596,12 +596,12 @@ Pipe ReadFromSystemNumbersStep::makePipe() numbers_storage.step, step_between_chunks); - if (numbers_storage.limit && i == 0) + if (end && i == 0) { - auto rows_appr = itemCountInRange(numbers_storage.offset, *numbers_storage.limit, numbers_storage.step); - if (limit > 0 && limit < rows_appr) - rows_appr = query_info_limit; - source->addTotalRowsApprox(rows_appr); + UInt64 rows_approx = itemCountInRange(numbers_storage.offset, *end, numbers_storage.step); + if (limit > 0 && limit < rows_approx) + rows_approx = query_info_limit; + source->addTotalRowsApprox(rows_approx); } pipe.addSource(std::move(source)); From 0744a761cd86d43e8746bf71619987b70e34238b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 07:46:23 +0200 Subject: [PATCH 0773/2361] Add a test --- .../0_stateless/03208_numbers_total_rows_approx.reference | 1 + tests/queries/0_stateless/03208_numbers_total_rows_approx.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/queries/0_stateless/03208_numbers_total_rows_approx.reference create mode 100644 tests/queries/0_stateless/03208_numbers_total_rows_approx.sql diff --git a/tests/queries/0_stateless/03208_numbers_total_rows_approx.reference b/tests/queries/0_stateless/03208_numbers_total_rows_approx.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/03208_numbers_total_rows_approx.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/03208_numbers_total_rows_approx.sql b/tests/queries/0_stateless/03208_numbers_total_rows_approx.sql new file mode 100644 index 00000000000..7855dfb6207 --- /dev/null +++ b/tests/queries/0_stateless/03208_numbers_total_rows_approx.sql @@ -0,0 +1 @@ +SELECT number FROM numbers(2, 1) WHERE number % 2 = 0 SETTINGS max_rows_to_read = 10; From e156db49225d273abe97eef84612f564a7547be9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 07:56:11 +0200 Subject: [PATCH 0774/2361] Remove the support for Kerberized HDFS --- .../test_storage_kerberized_hdfs/__init__.py | 0 .../configs/hdfs.xml | 13 - .../hdfs_configs/bootstrap.sh | 280 ------------------ .../kerberos_image_config.sh | 140 --------- .../secrets/krb.conf | 25 -- .../secrets/krb_long.conf | 24 -- .../test_storage_kerberized_hdfs/test.py | 155 ---------- 7 files changed, 637 deletions(-) delete mode 100644 tests/integration/test_storage_kerberized_hdfs/__init__.py delete mode 100644 tests/integration/test_storage_kerberized_hdfs/configs/hdfs.xml delete mode 100755 tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh delete mode 100644 tests/integration/test_storage_kerberized_hdfs/kerberos_image_config.sh delete mode 100644 tests/integration/test_storage_kerberized_hdfs/secrets/krb.conf delete mode 100644 tests/integration/test_storage_kerberized_hdfs/secrets/krb_long.conf delete mode 100644 tests/integration/test_storage_kerberized_hdfs/test.py diff --git a/tests/integration/test_storage_kerberized_hdfs/__init__.py b/tests/integration/test_storage_kerberized_hdfs/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_storage_kerberized_hdfs/configs/hdfs.xml b/tests/integration/test_storage_kerberized_hdfs/configs/hdfs.xml deleted file mode 100644 index 2e9dc6cc06c..00000000000 --- a/tests/integration/test_storage_kerberized_hdfs/configs/hdfs.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - /tmp/keytab/clickhouse.keytab - root@TEST.CLICKHOUSE.TECH - kerberos - - - specuser@TEST.CLICKHOUSE.TECH - - - /tmp/kerb_cache - - diff --git a/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh b/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh deleted file mode 100755 index db6921bc1c8..00000000000 --- a/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh +++ /dev/null @@ -1,280 +0,0 @@ -#!/bin/bash - -: "${HADOOP_PREFIX:=/usr/local/hadoop}" - -cat >> $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh < /usr/local/hadoop/etc/hadoop/core-site.xml - -cat >> /usr/local/hadoop/etc/hadoop/core-site.xml << EOF - - hadoop.security.authentication - kerberos - - - hadoop.security.authorization - true - - - fs.defaultFS - hdfs://kerberizedhdfs1:9010 - - - fs.default.name - hdfs://kerberizedhdfs1:9010 - - - -EOF - - -cat > /usr/local/hadoop/etc/hadoop/hdfs-site.xml << EOF - - - - dfs.replication - 1 - - - - dfs.block.access.token.enable - true - - - - dfs.namenode.keytab.file - /usr/local/hadoop/etc/hadoop/conf/hdfs.keytab - - - dfs.namenode.kerberos.principal - hdfs/_HOST@TEST.CLICKHOUSE.TECH - - - dfs.namenode.kerberos.internal.spnego.principal - HTTP/_HOST@TEST.CLICKHOUSE.TECH - - - - dfs.secondary.namenode.keytab.file - /usr/local/hadoop/etc/hadoop/conf/hdfs.keytab - - - dfs.secondary.namenode.kerberos.principal - hdfs/_HOST@TEST.CLICKHOUSE.TECH - - - dfs.secondary.namenode.kerberos.internal.spnego.principal - HTTP/_HOST@TEST.CLICKHOUSE.TECH - - - - dfs.datanode.data.dir.perm - 700 - - - dfs.datanode.address - 0.0.0.0:1004 - - - dfs.datanode.http.address - 0.0.0.0:1006 - - - - dfs.datanode.ipc.address - 0.0.0.0:0 - - - dfs.namenode.secondary.http-address - 0.0.0.0:0 - - - dfs.namenode.backup.address - 0.0.0.0:0 - - - dfs.namenode.backup.http-address - 0.0.0.0:0 - - - - - - dfs.datanode.keytab.file - /usr/local/hadoop/etc/hadoop/conf/hdfs.keytab - - - dfs.datanode.kerberos.principal - hdfs/_HOST@TEST.CLICKHOUSE.TECH - - - - - dfs.webhdfs.enabled - true - - -dfs.encrypt.data.transfer -false - - - dfs.web.authentication.kerberos.principal - HTTP/_HOST@TEST.CLICKHOUSE.TECH - - - dfs.web.authentication.kerberos.keytab - /usr/local/hadoop/etc/hadoop/conf/hdfs.keytab - - -EOF - - - -# cat > /usr/local/hadoop/etc/hadoop/ssl-server.xml << EOF -# -# -# ssl.server.truststore.location -# /usr/local/hadoop/etc/hadoop/conf/hdfs.jks -# -# -# ssl.server.truststore.password -# masterkey -# -# -# ssl.server.keystore.location -# /usr/local/hadoop/etc/hadoop/conf/hdfs.jks -# -# -# ssl.server.keystore.password -# masterkey -# -# -# ssl.server.keystore.keypassword -# masterkey -# -# -# EOF - -cat > /usr/local/hadoop/etc/hadoop/log4j.properties << EOF -# Set everything to be logged to the console -log4j.rootCategory=DEBUG, console -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n - -# Set the default spark-shell log level to WARN. When running the spark-shell, the -# log level for this class is used to overwrite the root logger's log level, so that -# the user can have different defaults for the shell and regular Spark apps. -log4j.logger.org.apache.spark.repl.Main=INFO - -# Settings to quiet third party logs that are too verbose -log4j.logger.org.spark_project.jetty=DEBUG -log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR -log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO -log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO -log4j.logger.org.apache.parquet=ERROR -log4j.logger.parquet=ERROR - -# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support -log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL -log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR - -log4j.logger.org.apache.spark.deploy=DEBUG -log4j.logger.org.apache.spark.executor=DEBUG -log4j.logger.org.apache.spark.scheduler=DEBUG -EOF - -useradd -u 1098 hdfs - -# keytool -genkey -alias kerberized_hdfs1.test.clickhouse.com -keyalg rsa -keysize 1024 -dname "CN=kerberized_hdfs1.test.clickhouse.com" -keypass masterkey -keystore /usr/local/hadoop/etc/hadoop/conf/hdfs.jks -storepass masterkey -keytool -genkey -alias kerberizedhdfs1 -keyalg rsa -keysize 1024 -dname "CN=kerberizedhdfs1" -keypass masterkey -keystore /usr/local/hadoop/etc/hadoop/conf/hdfs.jks -storepass masterkey - -chmod g+r /usr/local/hadoop/etc/hadoop/conf/hdfs.jks - - -service sshd start - -# yum --quiet --assumeyes install krb5-workstation.x86_64 -# yum --quiet --assumeyes install tcpdump - -# cd /tmp -# curl http://archive.apache.org/dist/commons/daemon/source/commons-daemon-1.0.15-src.tar.gz -o commons-daemon-1.0.15-src.tar.gz -# tar xzf commons-daemon-1.0.15-src.tar.gz -# cd commons-daemon-1.0.15-src/src/native/unix -# ./configure && make -# cp ./jsvc /usr/local/hadoop/sbin - - -until kinit -kt /usr/local/hadoop/etc/hadoop/conf/hdfs.keytab hdfs/kerberizedhdfs1@TEST.CLICKHOUSE.TECH; do sleep 2; done -echo "KDC is up and ready to go... starting up" - -$HADOOP_PREFIX/sbin/start-dfs.sh -$HADOOP_PREFIX/sbin/start-yarn.sh -$HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver - -chmod a+r /usr/local/hadoop/etc/hadoop/conf/hdfs.keytab # create dedicated keytab for hdfsuser - -$HADOOP_PREFIX/sbin/start-secure-dns.sh -sleep 3 - -/usr/local/hadoop/bin/hdfs dfsadmin -safemode leave - -/usr/local/hadoop/bin/hdfs dfs -mkdir /user/specuser -/usr/local/hadoop/bin/hdfs dfs -chown specuser /user/specuser -echo "chown_completed" | /usr/local/hadoop/bin/hdfs dfs -appendToFile - /preparations_done_marker - -kdestroy - - - -# adduser --groups hdfs hdfsuser - -# /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop/ --script /usr/local/hadoop/sbin/hdfs start namenode -# /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop/ --script /usr/local/hadoop/sbin/hdfs start datanode - - -if [[ $1 == "-d" ]]; then - while true; do sleep 1000; done -fi - -if [[ $1 == "-bash" ]]; then - /bin/bash -fi diff --git a/tests/integration/test_storage_kerberized_hdfs/kerberos_image_config.sh b/tests/integration/test_storage_kerberized_hdfs/kerberos_image_config.sh deleted file mode 100644 index 45fb93792e0..00000000000 --- a/tests/integration/test_storage_kerberized_hdfs/kerberos_image_config.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/bin/bash - - -set -x # trace - -: "${REALM:=TEST.CLICKHOUSE.TECH}" -: "${DOMAIN_REALM:=test.clickhouse.com}" -: "${KERB_MASTER_KEY:=masterkey}" -: "${KERB_ADMIN_USER:=admin}" -: "${KERB_ADMIN_PASS:=admin}" - -create_config() { - : "${KDC_ADDRESS:=$(hostname -f)}" - - cat>/etc/krb5.conf</var/kerberos/krb5kdc/kdc.conf< /var/kerberos/krb5kdc/kadm5.acl -} - -create_keytabs() { - rm /tmp/keytab/*.keytab - - - # kadmin.local -q "addprinc -randkey hdfs/kerberizedhdfs1.${DOMAIN_REALM}@${REALM}" - # kadmin.local -q "ktadd -norandkey -k /tmp/keytab/hdfs.keytab hdfs/kerberizedhdfs1.${DOMAIN_REALM}@${REALM}" - - # kadmin.local -q "addprinc -randkey HTTP/kerberizedhdfs1.${DOMAIN_REALM}@${REALM}" - # kadmin.local -q "ktadd -norandkey -k /tmp/keytab/hdfs.keytab HTTP/kerberizedhdfs1.${DOMAIN_REALM}@${REALM}" - kadmin.local -q "addprinc -randkey hdfs/kerberizedhdfs1@${REALM}" - kadmin.local -q "ktadd -norandkey -k /tmp/keytab/hdfs.keytab hdfs/kerberizedhdfs1@${REALM}" - - kadmin.local -q "addprinc -randkey HTTP/kerberizedhdfs1@${REALM}" - kadmin.local -q "ktadd -norandkey -k /tmp/keytab/hdfs.keytab HTTP/kerberizedhdfs1@${REALM}" - - kadmin.local -q "addprinc -randkey hdfsuser/node1@${REALM}" - kadmin.local -q "ktadd -norandkey -k /tmp/keytab/clickhouse.keytab hdfsuser/node1@${REALM}" - kadmin.local -q "addprinc -randkey hdfsuser@${REALM}" - kadmin.local -q "ktadd -norandkey -k /tmp/keytab/clickhouse.keytab hdfsuser@${REALM}" - kadmin.local -q "addprinc -randkey root@${REALM}" - kadmin.local -q "ktadd -norandkey -k /tmp/keytab/clickhouse.keytab root@${REALM}" - kadmin.local -q "addprinc -randkey specuser@${REALM}" - kadmin.local -q "ktadd -norandkey -k /tmp/keytab/clickhouse.keytab specuser@${REALM}" - - chmod g+r /tmp/keytab/clickhouse.keytab -} - -main() { - - if [ ! -f /kerberos_initialized ]; then - create_config - create_db - create_admin_user - start_kdc - - touch /kerberos_initialized - fi - - if [ ! -f /var/kerberos/krb5kdc/principal ]; then - while true; do sleep 1000; done - else - start_kdc - create_keytabs - tail -F /var/log/kerberos/krb5kdc.log - fi - -} - -[[ "$0" == "${BASH_SOURCE[0]}" ]] && main "$@" diff --git a/tests/integration/test_storage_kerberized_hdfs/secrets/krb.conf b/tests/integration/test_storage_kerberized_hdfs/secrets/krb.conf deleted file mode 100644 index dffdcaebe81..00000000000 --- a/tests/integration/test_storage_kerberized_hdfs/secrets/krb.conf +++ /dev/null @@ -1,25 +0,0 @@ -[logging] - default = FILE:/var/log/krb5libs.log - kdc = FILE:/var/log/krb5kdc.log - admin_server = FILE:/var/log/kadmind.log - -[libdefaults] - default_realm = TEST.CLICKHOUSE.TECH - dns_lookup_realm = false - dns_lookup_kdc = false - ticket_lifetime = 5s - forwardable = true - rdns = false - default_tgs_enctypes = des3-hmac-sha1 - default_tkt_enctypes = des3-hmac-sha1 - permitted_enctypes = des3-hmac-sha1 - -[realms] - TEST.CLICKHOUSE.TECH = { - kdc = hdfskerberos - admin_server = hdfskerberos - } - -[domain_realm] - .test.clickhouse.com = TEST.CLICKHOUSE.TECH - test.clickhouse.com = TEST.CLICKHOUSE.TECH diff --git a/tests/integration/test_storage_kerberized_hdfs/secrets/krb_long.conf b/tests/integration/test_storage_kerberized_hdfs/secrets/krb_long.conf deleted file mode 100644 index 43c009d2e98..00000000000 --- a/tests/integration/test_storage_kerberized_hdfs/secrets/krb_long.conf +++ /dev/null @@ -1,24 +0,0 @@ -[logging] - default = FILE:/var/log/krb5libs.log - kdc = FILE:/var/log/krb5kdc.log - admin_server = FILE:/var/log/kadmind.log - -[libdefaults] - default_realm = TEST.CLICKHOUSE.TECH - dns_lookup_realm = false - dns_lookup_kdc = false - ticket_lifetime = 15d - forwardable = true - default_tgs_enctypes = des3-hmac-sha1 - default_tkt_enctypes = des3-hmac-sha1 - permitted_enctypes = des3-hmac-sha1 - -[realms] - TEST.CLICKHOUSE.TECH = { - kdc = hdfskerberos - admin_server = hdfskerberos - } - -[domain_realm] - .test.clickhouse.com = TEST.CLICKHOUSE.TECH - test.clickhouse.com = TEST.CLICKHOUSE.TECH diff --git a/tests/integration/test_storage_kerberized_hdfs/test.py b/tests/integration/test_storage_kerberized_hdfs/test.py deleted file mode 100644 index ddfc1f6483d..00000000000 --- a/tests/integration/test_storage_kerberized_hdfs/test.py +++ /dev/null @@ -1,155 +0,0 @@ -import time -import pytest - -import os - -from helpers.cluster import ClickHouseCluster, is_arm -import subprocess - -if is_arm(): - pytestmark = pytest.mark.skip - -cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance( - "node1", - with_kerberized_hdfs=True, - user_configs=[], - main_configs=["configs/hdfs.xml"], -) - - -@pytest.fixture(scope="module") -def started_cluster(): - try: - cluster.start() - - yield cluster - - except Exception as ex: - print(ex) - raise ex - finally: - cluster.shutdown() - - -def test_read_table(started_cluster): - hdfs_api = started_cluster.hdfs_api - - data = "1\tSerialize\t555.222\n2\tData\t777.333\n" - hdfs_api.write_data("/simple_table_function", data) - - api_read = hdfs_api.read_data("/simple_table_function") - assert api_read == data - - select_read = node1.query( - "select * from hdfs('hdfs://kerberizedhdfs1:9010/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')" - ) - assert select_read == data - - -def test_read_write_storage(started_cluster): - hdfs_api = started_cluster.hdfs_api - - node1.query( - "create table SimpleHDFSStorage2 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/simple_storage1', 'TSV')" - ) - node1.query("insert into SimpleHDFSStorage2 values (1, 'Mark', 72.53)") - - api_read = hdfs_api.read_data("/simple_storage1") - assert api_read == "1\tMark\t72.53\n" - - select_read = node1.query("select * from SimpleHDFSStorage2") - assert select_read == "1\tMark\t72.53\n" - - -def test_write_storage_not_expired(started_cluster): - hdfs_api = started_cluster.hdfs_api - - node1.query( - "create table SimpleHDFSStorageNotExpired (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/simple_storage_not_expired', 'TSV')" - ) - - time.sleep(15) # wait for ticket expiration - node1.query("insert into SimpleHDFSStorageNotExpired values (1, 'Mark', 72.53)") - - api_read = hdfs_api.read_data("/simple_storage_not_expired") - assert api_read == "1\tMark\t72.53\n" - - select_read = node1.query("select * from SimpleHDFSStorageNotExpired") - assert select_read == "1\tMark\t72.53\n" - - -def test_two_users(started_cluster): - hdfs_api = started_cluster.hdfs_api - - node1.query( - "create table HDFSStorOne (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/storage_user_one', 'TSV')" - ) - node1.query("insert into HDFSStorOne values (1, 'Real', 86.00)") - - node1.query( - "create table HDFSStorTwo (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://suser@kerberizedhdfs1:9010/user/specuser/storage_user_two', 'TSV')" - ) - node1.query("insert into HDFSStorTwo values (1, 'Ideal', 74.00)") - - select_read_1 = node1.query( - "select * from hdfs('hdfs://kerberizedhdfs1:9010/user/specuser/storage_user_two', 'TSV', 'id UInt64, text String, number Float64')" - ) - - select_read_2 = node1.query( - "select * from hdfs('hdfs://suser@kerberizedhdfs1:9010/storage_user_one', 'TSV', 'id UInt64, text String, number Float64')" - ) - - -def test_read_table_expired(started_cluster): - hdfs_api = started_cluster.hdfs_api - - data = "1\tSerialize\t555.222\n2\tData\t777.333\n" - hdfs_api.write_data("/simple_table_function_relogin", data) - - started_cluster.pause_container("hdfskerberos") - time.sleep(15) - - try: - select_read = node1.query( - "select * from hdfs('hdfs://reloginuser&kerberizedhdfs1:9010/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')" - ) - assert False, "Exception have to be thrown" - except Exception as ex: - assert "DB::Exception: KerberosInit failure:" in str(ex) - - started_cluster.unpause_container("hdfskerberos") - - -def test_prohibited(started_cluster): - node1.query( - "create table HDFSStorTwoProhibited (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://suser@kerberizedhdfs1:9010/storage_user_two_prohibited', 'TSV')" - ) - try: - node1.query("insert into HDFSStorTwoProhibited values (1, 'SomeOne', 74.00)") - assert False, "Exception have to be thrown" - except Exception as ex: - assert ( - "Unable to open HDFS file: /storage_user_two_prohibited (hdfs://suser@kerberizedhdfs1:9010/storage_user_two_prohibited) error: Permission denied: user=specuser, access=WRITE" - in str(ex) - ) - - -def test_cache_path(started_cluster): - node1.query( - "create table HDFSStorCachePath (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://dedicatedcachepath@kerberizedhdfs1:9010/storage_dedicated_cache_path', 'TSV')" - ) - try: - node1.query("insert into HDFSStorCachePath values (1, 'FatMark', 92.53)") - assert False, "Exception have to be thrown" - except Exception as ex: - assert ( - "DB::Exception: hadoop.security.kerberos.ticket.cache.path cannot be set per user" - in str(ex) - ) - - -if __name__ == "__main__": - cluster.start() - input("Cluster created, press any key to destroy...") - cluster.shutdown() From c850fac65276342e0b8694fa00d44dd3269d1abc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 09:05:06 +0200 Subject: [PATCH 0775/2361] Fix error --- src/Parsers/MySQL/tests/gtest_column_parser.cpp | 11 ++++++----- src/Parsers/ParserCreateQuery.h | 7 ++++--- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/Parsers/MySQL/tests/gtest_column_parser.cpp b/src/Parsers/MySQL/tests/gtest_column_parser.cpp index 21c37e4ee2e..3a9a0690f06 100644 --- a/src/Parsers/MySQL/tests/gtest_column_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_column_parser.cpp @@ -1,13 +1,14 @@ #include #include #include -#include +#include #include #include #include #include #include + using namespace DB; using namespace DB::MySQLParser; @@ -19,8 +20,8 @@ TEST(ParserColumn, AllNonGeneratedColumnOption) "COLUMN_FORMAT FIXED STORAGE MEMORY REFERENCES tbl_name (col_01) CHECK 1"; ASTPtr ast = parseQuery(p_column, input.data(), input.data() + input.size(), "", 0, 0, 0); EXPECT_EQ(ast->as()->name, "col_01"); - EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); - EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); + EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); + EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); ASTDeclareOptions * declare_options = ast->as()->column_options->as(); EXPECT_EQ(declare_options->changes["is_null"]->as()->value.safeGet(), 0); @@ -44,8 +45,8 @@ TEST(ParserColumn, AllGeneratedColumnOption) "REFERENCES tbl_name (col_01) CHECK 1 GENERATED ALWAYS AS (1) STORED"; ASTPtr ast = parseQuery(p_column, input.data(), input.data() + input.size(), "", 0, 0, 0); EXPECT_EQ(ast->as()->name, "col_01"); - EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); - EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); + EXPECT_EQ(ast->as()->data_type->as()->name, "VARCHAR"); + EXPECT_EQ(ast->as()->data_type->as()->arguments->children[0]->as()->value.safeGet(), 100); ASTDeclareOptions * declare_options = ast->as()->column_options->as(); EXPECT_EQ(declare_options->changes["is_null"]->as()->value.safeGet(), 1); diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index 7bd1d1bf588..53a62deb22b 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -13,6 +14,7 @@ #include #include + namespace DB { @@ -268,9 +270,8 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E auto default_function = std::make_shared(); default_function->name = "defaultValueOfTypeName"; default_function->arguments = std::make_shared(); - // Ephemeral columns don't really have secrets but we need to format - // into a String, hence the strange call - default_function->arguments->children.emplace_back(std::make_shared(type->as()->formatForLogging())); + /// Ephemeral columns don't really have secrets but we need to format into a String, hence the strange call + default_function->arguments->children.emplace_back(std::make_shared(type->as()->formatForLogging())); default_expression = default_function; } From c3204fb89577e50ec7ef2c7ddd3c62f913e084f2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 09:25:22 +0200 Subject: [PATCH 0776/2361] Fix error --- src/Parsers/ExpressionElementParsers.cpp | 3 +-- src/Parsers/ExpressionListParsers.cpp | 17 +++++++++++++++++ src/Parsers/ExpressionListParsers.h | 10 ++++++++++ src/Parsers/ParserCreateIndexQuery.cpp | 2 +- src/Parsers/ParserCreateQuery.cpp | 4 ++-- 5 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index d4fc9a4bc4d..865d07faaa7 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -9,8 +9,8 @@ #include #include #include -#include "Parsers/CommonParsers.h" +#include #include #include #include @@ -725,7 +725,6 @@ bool ParserStatisticsType::parseImpl(Pos & pos, ASTPtr & node, Expected & expect function_node->name = "STATISTICS"; function_node->arguments = stat_type; function_node->children.push_back(function_node->arguments); - node = function_node; return true; } diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index f97c042e91e..66817fafa5e 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -2388,6 +2388,23 @@ bool ParserFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } } +bool ParserExpressionWithOptionalArguments::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) +{ + ParserIdentifier id_p; + ParserFunction func_p; + + if (ParserFunction(false, false).parse(pos, node, expected)) + return true; + + if (ParserIdentifier().parse(pos, node, expected)) + { + node = makeASTFunction(node->as()->name()); + return true; + } + + return false; +} + const std::vector> ParserExpressionImpl::operators_table { {"->", Operator("lambda", 1, 2, OperatorType::Lambda)}, diff --git a/src/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h index 235d5782630..6ab38416f32 100644 --- a/src/Parsers/ExpressionListParsers.h +++ b/src/Parsers/ExpressionListParsers.h @@ -144,6 +144,16 @@ protected: }; +/** Similar to ParserFunction (and yields ASTFunction), but can also parse identifiers without braces. + */ +class ParserExpressionWithOptionalArguments : public IParserBase +{ +protected: + const char * getName() const override { return "expression with optional parameters"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; +}; + + /** An expression with an infix binary left-associative operator. * For example, a + b - c + d. */ diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index 2761c99738b..b815ba60bab 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -21,7 +21,7 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected ParserToken close_p(TokenType::ClosingRoundBracket); ParserOrderByExpressionList order_list_p; - ParserFunction type_p; + ParserExpressionWithOptionalArguments type_p; ParserExpression expression_p; ParserUnsignedInteger granularity_p; diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 92c0e7b2558..5da6c3a2510 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -180,7 +180,7 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ParserKeyword s_granularity(Keyword::GRANULARITY); ParserIdentifier name_p; - ParserFunction type_p; + ParserExpressionWithOptionalArguments type_p; ParserExpression expression_p; ParserUnsignedInteger granularity_p; @@ -240,7 +240,7 @@ bool ParserStatisticsDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & ParserKeyword s_type(Keyword::TYPE); ParserList columns_p(std::make_unique(), std::make_unique(TokenType::Comma), false); - ParserList types_p(std::make_unique(), std::make_unique(TokenType::Comma), false); + ParserList types_p(std::make_unique(), std::make_unique(TokenType::Comma), false); ASTPtr columns; ASTPtr types; From 3a69e1555530f59cc83cb274c7dfa1dcaab14a2d Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 24 Jul 2024 09:32:51 +0200 Subject: [PATCH 0777/2361] CI: Fix for workflow results parsing --- .github/workflows/backport_branches.yml | 2 +- .github/workflows/master.yml | 2 +- .github/workflows/merge_queue.yml | 2 +- .github/workflows/nightly.yml | 2 +- .github/workflows/pull_request.yml | 2 +- .github/workflows/release_branches.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index c602a46d23c..322946ac77b 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -269,7 +269,7 @@ jobs: - name: Check Workflow results run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" - cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 7c319da6045..acd7511d520 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -135,7 +135,7 @@ jobs: - name: Check Workflow results run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" - cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml index 4b186241a0e..64083668719 100644 --- a/.github/workflows/merge_queue.yml +++ b/.github/workflows/merge_queue.yml @@ -108,7 +108,7 @@ jobs: - name: Check Workflow results run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" - cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 84db3338065..ea9c125db70 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -54,7 +54,7 @@ jobs: - name: Check Workflow results run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" - cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index c7d7b28af38..63b2bd87dc9 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -168,7 +168,7 @@ jobs: - name: Check Workflow results run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" - cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index bca9ff33cd0..b79208b03a6 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -489,7 +489,7 @@ jobs: - name: Check Workflow results run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" - cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF From 63e586d17af1e8a92fc4d2e8af71f1dba4996fea Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 09:57:56 +0200 Subject: [PATCH 0778/2361] Adjust tests --- tests/config/users.d/limits.yaml | 6 +-- ..._shard_external_sort_distributed.reference | 40 +++++++++---------- .../00111_shard_external_sort_distributed.sql | 2 +- ...00375_shard_group_uniq_array_of_string.sql | 2 +- ...76_shard_group_uniq_array_of_int_array.sql | 2 +- ...shard_group_uniq_array_of_string_array.sql | 2 +- .../00600_replace_running_query.sh | 4 +- .../00601_kill_running_query.reference | 2 +- .../00906_low_cardinality_cache.sql | 3 +- .../01091_query_profiler_does_not_hang.sql | 2 +- .../0_stateless/01293_show_settings.reference | 1 + .../0_stateless/01485_256_bit_multiply.sql | 2 + .../01603_read_with_backoff_bug.sql | 1 + .../01961_roaring_memory_tracking.sql | 2 +- .../02003_memory_limit_in_client.sh | 6 +-- .../02161_addressToLineWithInlines.sql | 2 +- .../02226_analyzer_or_like_combine.sql | 2 + .../02234_cast_to_ip_address.reference | 8 ++-- .../02343_aggregation_pipeline.reference | 6 +-- .../02353_simdjson_buffer_overflow.sql | 1 + .../0_stateless/02372_now_in_block.sql | 1 + .../02536_delta_gorilla_corruption.sql | 2 +- 22 files changed, 54 insertions(+), 45 deletions(-) diff --git a/tests/config/users.d/limits.yaml b/tests/config/users.d/limits.yaml index 4f3f439a997..1c9fff9f4c8 100644 --- a/tests/config/users.d/limits.yaml +++ b/tests/config/users.d/limits.yaml @@ -27,9 +27,9 @@ profiles: max_execution_speed: 100G max_execution_speed_bytes: 10T max_estimated_execution_time: 600 - max_columns_to_read: 10K - max_temporary_columns: 10K - max_temporary_non_const_columns: 10K + max_columns_to_read: 20K + max_temporary_columns: 20K + max_temporary_non_const_columns: 20K max_sessions_for_user: 1K max_rows_in_set: 10G max_bytes_in_set: 10G diff --git a/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference b/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference index df5aa77af60..7534c12a0d8 100644 --- a/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference +++ b/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference @@ -1,20 +1,20 @@ -7040546 -7040546 -4327029 -4327029 -1613512 -1613512 -8947307 -8947307 -6233790 -6233790 -3520273 -3520273 -806756 -806756 -8140551 -8140551 -5427034 -5427034 -2713517 -2713517 +4437158 +4437158 +1723641 +1723641 +3630402 +3630402 +916885 +916885 +2823646 +2823646 +110129 +110129 +4730407 +4730407 +2016890 +2016890 +3923651 +3923651 +1210134 +1210134 diff --git a/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql index 88a05f59111..ef9c0f9f9d0 100644 --- a/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql +++ b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql @@ -6,6 +6,6 @@ SET max_bytes_before_external_sort = 10000000; DROP TABLE IF EXISTS numbers10m; CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 5000000; -SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers10m) ORDER BY number * 1234567890123456789 LIMIT 19999980, 20; +SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers10m) ORDER BY number * 1234567890123456789 LIMIT 4999980, 20; DROP TABLE numbers10m; diff --git a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql index f32a64cd30f..445ffe66f64 100644 --- a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql +++ b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql @@ -7,7 +7,7 @@ INSERT INTO group_uniq_str SELECT 2 as id, toString(number % 100) as v FROM syst INSERT INTO group_uniq_str SELECT 5 as id, toString(number % 100) as v FROM system.numbers LIMIT 10000000; SELECT length(groupUniqArray(v)) FROM group_uniq_str GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '50M'; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '60M'; SELECT length(groupUniqArray(10)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; SELECT length(groupUniqArray(10000)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; diff --git a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql index 43066880102..7593e1e1580 100644 --- a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql +++ b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql @@ -6,7 +6,7 @@ CREATE TABLE group_uniq_arr_int ENGINE = Memory AS (SELECT intDiv(number%1000000, 100) as v, intDiv(number%100, 10) as g, number%10 as c FROM system.numbers WHERE c < 3 LIMIT 10000000); SELECT length(groupUniqArray(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_int') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '50M'; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_int') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '55M'; SELECT length(groupUniqArray(10)(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; SELECT length(groupUniqArray(100000)(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; diff --git a/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql index 1c4376ad577..8b48ee673f3 100644 --- a/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql +++ b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql @@ -6,6 +6,6 @@ CREATE TABLE group_uniq_arr_str ENGINE = Memory AS (SELECT intDiv(number%1000000, 100) as v, intDiv(number%100, 10) as g, number%10 as c FROM system.numbers WHERE c < 3 LIMIT 10000000); SELECT length(groupUniqArray(v)) FROM group_uniq_arr_str GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '50M'; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '55M'; DROP TABLE IF EXISTS group_uniq_arr_str; diff --git a/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh index e7022875086..8f21443d589 100755 --- a/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/tests/queries/0_stateless/00600_replace_running_query.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) TEST_PREFIX=$RANDOM ${CLICKHOUSE_CLIENT} -q "drop user if exists u_00600${TEST_PREFIX}" -${CLICKHOUSE_CLIENT} -q "create user u_00600${TEST_PREFIX} settings max_execution_time=60, readonly=1" +${CLICKHOUSE_CLIENT} -q "create user u_00600${TEST_PREFIX} settings max_execution_time=60, readonly=1, max_rows_to_read=0" ${CLICKHOUSE_CLIENT} -q "grant select on system.numbers to u_00600${TEST_PREFIX}" function wait_for_query_to_start() @@ -26,7 +26,7 @@ $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d # Wait for it to be replaced wait -${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id=42 --max_rows_to_read=0 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & +${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id=42 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & wait_for_query_to_start '42' # Trying to run another query with the same query_id diff --git a/tests/queries/0_stateless/00601_kill_running_query.reference b/tests/queries/0_stateless/00601_kill_running_query.reference index 3917ff89482..7824d5804bc 100644 --- a/tests/queries/0_stateless/00601_kill_running_query.reference +++ b/tests/queries/0_stateless/00601_kill_running_query.reference @@ -1 +1 @@ -waiting test_00601_default default SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k) +waiting test_00601_default default SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k) SETTINGS max_rows_to_read = 0 diff --git a/tests/queries/0_stateless/00906_low_cardinality_cache.sql b/tests/queries/0_stateless/00906_low_cardinality_cache.sql index 15a53841761..efd96746dc4 100644 --- a/tests/queries/0_stateless/00906_low_cardinality_cache.sql +++ b/tests/queries/0_stateless/00906_low_cardinality_cache.sql @@ -1,5 +1,6 @@ +SET max_rows_to_read = '100M' drop table if exists lc_00906; create table lc_00906 (b LowCardinality(String)) engine=MergeTree order by b SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -insert into lc_00906 select '0123456789' from numbers(100000000) SETTINGS max_rows_to_read = '100M'; +insert into lc_00906 select '0123456789' from numbers(100000000); select count(), b from lc_00906 group by b; drop table if exists lc_00906; diff --git a/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql b/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql index 21a84bdd691..45f1a00ae23 100644 --- a/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql +++ b/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql @@ -1,4 +1,4 @@ -- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug -SET query_profiler_cpu_time_period_ns = 1; +SET query_profiler_cpu_time_period_ns = 1, max_rows_to_read = 0; SELECT count() FROM numbers_mt(1000000000); diff --git a/tests/queries/0_stateless/01293_show_settings.reference b/tests/queries/0_stateless/01293_show_settings.reference index 9d326f16a3b..8b383813c9f 100644 --- a/tests/queries/0_stateless/01293_show_settings.reference +++ b/tests/queries/0_stateless/01293_show_settings.reference @@ -6,5 +6,6 @@ external_storage_connect_timeout_sec UInt64 10 s3_connect_timeout_ms UInt64 1000 filesystem_prefetch_max_memory_usage UInt64 1073741824 max_memory_usage UInt64 5000000000 +max_memory_usage_for_user UInt64 10000000000 max_untracked_memory UInt64 1048576 memory_profiler_step UInt64 1048576 diff --git a/tests/queries/0_stateless/01485_256_bit_multiply.sql b/tests/queries/0_stateless/01485_256_bit_multiply.sql index 5c8c47c9127..18be2b11599 100644 --- a/tests/queries/0_stateless/01485_256_bit_multiply.sql +++ b/tests/queries/0_stateless/01485_256_bit_multiply.sql @@ -1,5 +1,7 @@ -- Tags: no-random-settings, no-asan, no-msan, no-tsan, no-ubsan, no-debug +SET max_rows_to_read = '100M' + select count() from ( select toInt128(number) * number x, toInt256(number) * number y from numbers_mt(100000000) where x != y diff --git a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql index ec14f637c01..b68d15a2200 100644 --- a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql +++ b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql @@ -3,6 +3,7 @@ set enable_filesystem_cache=0; set enable_filesystem_cache_on_write_operations=0; +set max_rows_to_read = '30M'; drop table if exists t; create table t (x UInt64, s String) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; diff --git a/tests/queries/0_stateless/01961_roaring_memory_tracking.sql b/tests/queries/0_stateless/01961_roaring_memory_tracking.sql index 485c8192f69..79c722bd629 100644 --- a/tests/queries/0_stateless/01961_roaring_memory_tracking.sql +++ b/tests/queries/0_stateless/01961_roaring_memory_tracking.sql @@ -2,5 +2,5 @@ SET max_bytes_before_external_group_by = 0; -SET max_memory_usage = '100M'; +SET max_memory_usage = '100M', max_rows_to_read = '1B'; SELECT cityHash64(rand() % 1000) as n, groupBitmapState(number) FROM numbers_mt(200000000) GROUP BY n FORMAT Null; -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/tests/queries/0_stateless/02003_memory_limit_in_client.sh b/tests/queries/0_stateless/02003_memory_limit_in_client.sh index 96028f4847a..94eba8f25be 100755 --- a/tests/queries/0_stateless/02003_memory_limit_in_client.sh +++ b/tests/queries/0_stateless/02003_memory_limit_in_client.sh @@ -4,11 +4,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client=1 -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" $CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client='5K' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client='5k' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" $CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" $CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" $CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" diff --git a/tests/queries/0_stateless/02161_addressToLineWithInlines.sql b/tests/queries/0_stateless/02161_addressToLineWithInlines.sql index cf400ed34c5..d7ce133f38c 100644 --- a/tests/queries/0_stateless/02161_addressToLineWithInlines.sql +++ b/tests/queries/0_stateless/02161_addressToLineWithInlines.sql @@ -6,7 +6,7 @@ SELECT addressToLineWithInlines(1); -- { serverError FUNCTION_NOT_ALLOWED } SET allow_introspection_functions = 1; SET query_profiler_real_time_period_ns = 0; SET query_profiler_cpu_time_period_ns = 1000000; -SET log_queries = 1; +SET log_queries = 1, max_rows_to_read = 0; SELECT count() FROM numbers_mt(10000000000) SETTINGS log_comment='02161_test_case'; SET log_queries = 0; SET query_profiler_cpu_time_period_ns = 0; diff --git a/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql b/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql index fbebfc6d281..1cd6b8a4e4d 100644 --- a/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql +++ b/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql @@ -1,3 +1,5 @@ +SET allow_hyperscan = 1, max_hyperscan_regexp_length = 0, max_hyperscan_regexp_total_length = 0; + EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0; EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0, allow_experimental_analyzer = 1; EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1; diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.reference b/tests/queries/0_stateless/02234_cast_to_ip_address.reference index fa9c6bd0f94..3dd306477b9 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.reference +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.reference @@ -26,9 +26,9 @@ IPv4 functions IPv6 functions \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 \N -\0\0\0\0\0\0\0\0\0\0ÿÿ\0\0 -\0\0\0\0\0\0\0\0\0\0ÿÿ\0\0 -\0\0\0\0\0\0\0\0\0\0ÿÿ\0\0 +\0\0\0\0\0\0\0\0\0\0��\0\0 +\0\0\0\0\0\0\0\0\0\0��\0\0 +\0\0\0\0\0\0\0\0\0\0��\0\0 -- :: \N @@ -37,7 +37,7 @@ IPv6 functions ::ffff:127.0.0.1 :: \N -100000000 +20000000 -- ::ffff:127.0.0.1 -- diff --git a/tests/queries/0_stateless/02343_aggregation_pipeline.reference b/tests/queries/0_stateless/02343_aggregation_pipeline.reference index bf61eb6da0a..eb013200a17 100644 --- a/tests/queries/0_stateless/02343_aggregation_pipeline.reference +++ b/tests/queries/0_stateless/02343_aggregation_pipeline.reference @@ -1,6 +1,6 @@ -- { echoOn } -explain pipeline select * from (select * from numbers(1e8) group by number) group by number; +explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0; (Expression) ExpressionTransform × 16 (Aggregating) @@ -16,7 +16,7 @@ ExpressionTransform × 16 ExpressionTransform (ReadFromSystemNumbers) NumbersRange 0 → 1 -explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0; (Expression) ExpressionTransform × 16 (Aggregating) @@ -32,7 +32,7 @@ ExpressionTransform × 16 ExpressionTransform × 16 (ReadFromSystemNumbers) NumbersRange × 16 0 → 1 -explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0; (Expression) ExpressionTransform (Sorting) diff --git a/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql b/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql index b324f834053..e7c6c272102 100644 --- a/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql +++ b/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql @@ -2,5 +2,6 @@ SET max_execution_time = 3; SET timeout_overflow_mode = 'break'; +SET max_rows_to_read = 0, max_bytes_to_read = 0; SELECT count() FROM system.numbers_mt WHERE NOT ignore(JSONExtract('{' || repeat('"a":"b",', rand() % 10) || '"c":"d"}', 'a', 'String')) FORMAT Null; diff --git a/tests/queries/0_stateless/02372_now_in_block.sql b/tests/queries/0_stateless/02372_now_in_block.sql index aee4572ce8d..d0aec471801 100644 --- a/tests/queries/0_stateless/02372_now_in_block.sql +++ b/tests/queries/0_stateless/02372_now_in_block.sql @@ -1,3 +1,4 @@ +SET max_rows_to_read = 0, max_bytes_to_read = 0; SELECT count() FROM (SELECT DISTINCT nowInBlock(), nowInBlock('Pacific/Pitcairn') FROM system.numbers LIMIT 2); SELECT nowInBlock(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT nowInBlock(NULL) IS NULL; diff --git a/tests/queries/0_stateless/02536_delta_gorilla_corruption.sql b/tests/queries/0_stateless/02536_delta_gorilla_corruption.sql index a4e0965e329..3accc726d08 100644 --- a/tests/queries/0_stateless/02536_delta_gorilla_corruption.sql +++ b/tests/queries/0_stateless/02536_delta_gorilla_corruption.sql @@ -12,7 +12,7 @@ create table bug_delta_gorilla (value_bug UInt64 codec (Delta, Gorilla)) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi' -as (select 0 from numbers(30000000)); +as (select 0 from numbers(20000000)); select count(*) from bug_delta_gorilla From f39c18d4d0e97771d919c2f4d511b8f8f87dae24 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 24 Jul 2024 10:03:36 +0200 Subject: [PATCH 0779/2361] Fix flaky 01454_storagememory_data_race_challenge --- .../01454_storagememory_data_race_challenge.sh | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh index fa9238041b1..ec9c5134059 100755 --- a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh +++ b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh @@ -11,12 +11,17 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mem" $CLICKHOUSE_CLIENT -q "CREATE TABLE mem (x UInt64) engine = Memory" function f { + local TIMELIMIT=$((SECONDS+$1)) for _ in $(seq 1 300); do $CLICKHOUSE_CLIENT -q "SELECT count() FROM (SELECT * FROM mem SETTINGS max_threads=2) FORMAT Null;" + if [ $SECONDS -ge "$TIMELIMIT" ]; then + break + fi done } function g { + local TIMELIMIT=$((SECONDS+$1)) for _ in $(seq 1 100); do $CLICKHOUSE_CLIENT -n -q " INSERT INTO mem SELECT number FROM numbers(1000000); @@ -30,14 +35,18 @@ function g { INSERT INTO mem VALUES (1); TRUNCATE TABLE mem; " + if [ $SECONDS -ge "$TIMELIMIT" ]; then + break + fi done } export -f f; export -f g; -timeout 20 bash -c f > /dev/null & -timeout 20 bash -c g > /dev/null & +TIMEOUT=20 +f $TIMEOUT & +g $TIMEOUT & wait $CLICKHOUSE_CLIENT -q "DROP TABLE mem" From 284c0204b06f9ecd21b6eb361097b7d986342bd7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 10:11:50 +0200 Subject: [PATCH 0780/2361] Fix test --- .../02845_threads_count_in_distributed_queries.sql.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02845_threads_count_in_distributed_queries.sql.j2 b/tests/queries/0_stateless/02845_threads_count_in_distributed_queries.sql.j2 index ffdd4e3400e..2c62279169f 100644 --- a/tests/queries/0_stateless/02845_threads_count_in_distributed_queries.sql.j2 +++ b/tests/queries/0_stateless/02845_threads_count_in_distributed_queries.sql.j2 @@ -1,5 +1,5 @@ -- enforce some defaults to be sure that the env settings will not affect the test -SET max_threads=5, async_socket_for_remote=1, prefer_localhost_replica=1, optimize_read_in_order=1, load_marks_asynchronously=0, local_filesystem_read_method='pread', remote_filesystem_read_method='read'; +SET max_threads=5, async_socket_for_remote=1, prefer_localhost_replica=1, optimize_read_in_order=1, load_marks_asynchronously=0, local_filesystem_read_method='pread', remote_filesystem_read_method='read', trace_profile_events=0; -- we use query_thread_log to check peak thread usage -- after https://github.com/ClickHouse/ClickHouse/issues/53417 there is a simpler way to check it From 5fe78d47bc855867f6431ad06e019b3e0278d0ae Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 10:24:13 +0200 Subject: [PATCH 0781/2361] Compatibility --- src/Parsers/ExpressionListParsers.cpp | 1 + src/Parsers/ParserCreateIndexQuery.cpp | 10 +--------- src/Parsers/ParserCreateQuery.cpp | 10 +--------- 3 files changed, 3 insertions(+), 18 deletions(-) diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index 66817fafa5e..a9715cec81e 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -2399,6 +2399,7 @@ bool ParserExpressionWithOptionalArguments::parseImpl(Pos & pos, ASTPtr & node, if (ParserIdentifier().parse(pos, node, expected)) { node = makeASTFunction(node->as()->name()); + node->as().no_empty_args = true; return true; } diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index b815ba60bab..9ebee4cc852 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -69,15 +69,7 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected if (s_type.ignore(pos, expected)) { if (!type_p.parse(pos, type, expected)) - { - if (ParserIdentifier().parse(pos, type, expected)) - { - type = makeASTFunction(type->as().name()); - type->as().no_empty_args = true; - } - else - return false; - } + return false; } if (s_granularity.ignore(pos, expected)) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 5da6c3a2510..bf5523152ac 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -199,15 +199,7 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; if (!type_p.parse(pos, type, expected)) - { - if (name_p.parse(pos, type, expected)) - { - type = makeASTFunction(type->as().name()); - type->as().no_empty_args = true; - } - else - return false; - } + return false; if (s_granularity.ignore(pos, expected)) { From e5bb485a006d93a9e00736dc37ad90a0a0a47673 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 10:25:23 +0200 Subject: [PATCH 0782/2361] Compatibility --- src/Parsers/ExpressionListParsers.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index a9715cec81e..d38dc6d5f37 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -2399,7 +2399,7 @@ bool ParserExpressionWithOptionalArguments::parseImpl(Pos & pos, ASTPtr & node, if (ParserIdentifier().parse(pos, node, expected)) { node = makeASTFunction(node->as()->name()); - node->as().no_empty_args = true; + node->as().no_empty_args = true; return true; } From 73fc5c266f3bc254db3882bfa2f9f42db6b2bc87 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 10:37:19 +0200 Subject: [PATCH 0783/2361] Fix error --- src/DataTypes/DataTypeObject.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 5636a46373f..91b9bfcb2a5 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -4,9 +4,10 @@ #include #include -#include +#include #include + namespace DB { @@ -53,13 +54,13 @@ static DataTypePtr create(const ASTPtr & arguments) ASTPtr schema_argument = arguments->children[0]; bool is_nullable = false; - if (const auto * func = schema_argument->as()) + if (const auto * type = schema_argument->as()) { - if (func->name != "Nullable" || func->arguments->children.size() != 1) + if (type->name != "Nullable" || type->arguments->children.size() != 1) throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, - "Expected 'Nullable()' as parameter for type Object (function: {})", func->name); + "Expected 'Nullable()' as parameter for type Object (function: {})", type->name); - schema_argument = func->arguments->children[0]; + schema_argument = type->arguments->children[0]; is_nullable = true; } From 42fe58f0466cf5a74f88c1a48bee9e96540f01e5 Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Wed, 24 Jul 2024 08:33:00 +0000 Subject: [PATCH 0784/2361] bugfix AttachedTable counting not symmetry, and adding some test logs on attaching and detaching tables --- src/Databases/DatabasesCommon.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 6ccaf811764..851ba8aef35 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -289,8 +289,12 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n tables.erase(it); table_storage->is_detached = true; - if (!table_storage->isSystemStorage() && database_name != DatabaseCatalog::SYSTEM_DATABASE) + if (!table_storage->isSystemStorage() + && database_name != DatabaseCatalog::SYSTEM_DATABASE + && database_name != DatabaseCatalog::TEMPORARY_DATABASE) { + LOG_TEST(log, "Counting detached table {} to database {}", table_name, database_name); CurrentMetrics::sub(getAttachedCounterForStorage(table_storage)); + } auto table_id = table_storage->getStorageID(); if (table_id.hasUUID()) @@ -334,8 +338,12 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c /// non-Atomic database the is_detached is set to true before RENAME. table->is_detached = false; - if (!table->isSystemStorage() && table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE) + if (!table->isSystemStorage() + && database_name != DatabaseCatalog::SYSTEM_DATABASE + && database_name != DatabaseCatalog::TEMPORARY_DATABASE) { + LOG_TEST(log, "Counting attached table {} to database {}", table_name, database_name); CurrentMetrics::add(getAttachedCounterForStorage(table)); + } } void DatabaseWithOwnTablesBase::shutdown() From c2238c57231fe86883eb9b9a7042a72d28d31eae Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 10:45:33 +0200 Subject: [PATCH 0785/2361] Fix tests --- .../0_stateless/00375_shard_group_uniq_array_of_string.sql | 2 +- .../00376_shard_group_uniq_array_of_int_array.sql | 4 +++- .../00377_shard_group_uniq_array_of_string_array.sql | 3 ++- tests/queries/0_stateless/00906_low_cardinality_cache.sql | 2 +- tests/queries/0_stateless/01485_256_bit_multiply.sql | 2 +- tests/queries/0_stateless/01961_roaring_memory_tracking.sql | 2 +- .../queries/0_stateless/02234_cast_to_ip_address.reference | 6 +++--- tests/queries/0_stateless/02234_cast_to_ip_address.sql | 2 +- 8 files changed, 13 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql index 445ffe66f64..8db91904a6a 100644 --- a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql +++ b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql @@ -7,7 +7,7 @@ INSERT INTO group_uniq_str SELECT 2 as id, toString(number % 100) as v FROM syst INSERT INTO group_uniq_str SELECT 5 as id, toString(number % 100) as v FROM system.numbers LIMIT 10000000; SELECT length(groupUniqArray(v)) FROM group_uniq_str GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '60M'; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '100M'; SELECT length(groupUniqArray(10)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; SELECT length(groupUniqArray(10000)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; diff --git a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql index 7593e1e1580..24b7f1c30a6 100644 --- a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql +++ b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql @@ -1,12 +1,14 @@ -- Tags: shard +SET max_rows_to_read = '55M'; + DROP TABLE IF EXISTS group_uniq_arr_int; CREATE TABLE group_uniq_arr_int ENGINE = Memory AS SELECT g as id, if(c == 0, [v], if(c == 1, emptyArrayInt64(), [v, v])) as v FROM (SELECT intDiv(number%1000000, 100) as v, intDiv(number%100, 10) as g, number%10 as c FROM system.numbers WHERE c < 3 LIMIT 10000000); SELECT length(groupUniqArray(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_int') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '55M'; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_int') GROUP BY id ORDER BY id; SELECT length(groupUniqArray(10)(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; SELECT length(groupUniqArray(100000)(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; diff --git a/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql index 8b48ee673f3..180a6a04861 100644 --- a/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql +++ b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql @@ -1,4 +1,5 @@ -- Tags: shard +SET max_rows_to_read = '55M'; DROP TABLE IF EXISTS group_uniq_arr_str; CREATE TABLE group_uniq_arr_str ENGINE = Memory AS @@ -6,6 +7,6 @@ CREATE TABLE group_uniq_arr_str ENGINE = Memory AS (SELECT intDiv(number%1000000, 100) as v, intDiv(number%100, 10) as g, number%10 as c FROM system.numbers WHERE c < 3 LIMIT 10000000); SELECT length(groupUniqArray(v)) FROM group_uniq_arr_str GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '55M'; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_str') GROUP BY id ORDER BY id; DROP TABLE IF EXISTS group_uniq_arr_str; diff --git a/tests/queries/0_stateless/00906_low_cardinality_cache.sql b/tests/queries/0_stateless/00906_low_cardinality_cache.sql index efd96746dc4..9c1abe1b6df 100644 --- a/tests/queries/0_stateless/00906_low_cardinality_cache.sql +++ b/tests/queries/0_stateless/00906_low_cardinality_cache.sql @@ -1,4 +1,4 @@ -SET max_rows_to_read = '100M' +SET max_rows_to_read = '100M'; drop table if exists lc_00906; create table lc_00906 (b LowCardinality(String)) engine=MergeTree order by b SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; insert into lc_00906 select '0123456789' from numbers(100000000); diff --git a/tests/queries/0_stateless/01485_256_bit_multiply.sql b/tests/queries/0_stateless/01485_256_bit_multiply.sql index 18be2b11599..a4e99d51970 100644 --- a/tests/queries/0_stateless/01485_256_bit_multiply.sql +++ b/tests/queries/0_stateless/01485_256_bit_multiply.sql @@ -1,6 +1,6 @@ -- Tags: no-random-settings, no-asan, no-msan, no-tsan, no-ubsan, no-debug -SET max_rows_to_read = '100M' +SET max_rows_to_read = '100M'; select count() from ( diff --git a/tests/queries/0_stateless/01961_roaring_memory_tracking.sql b/tests/queries/0_stateless/01961_roaring_memory_tracking.sql index 79c722bd629..22eb8e887f2 100644 --- a/tests/queries/0_stateless/01961_roaring_memory_tracking.sql +++ b/tests/queries/0_stateless/01961_roaring_memory_tracking.sql @@ -2,5 +2,5 @@ SET max_bytes_before_external_group_by = 0; -SET max_memory_usage = '100M', max_rows_to_read = '1B'; +SET max_memory_usage = '100M', max_rows_to_read = '1G'; SELECT cityHash64(rand() % 1000) as n, groupBitmapState(number) FROM numbers_mt(200000000) GROUP BY n FORMAT Null; -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.reference b/tests/queries/0_stateless/02234_cast_to_ip_address.reference index 3dd306477b9..b9f0a49ec4d 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.reference +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.reference @@ -26,9 +26,9 @@ IPv4 functions IPv6 functions \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 \N -\0\0\0\0\0\0\0\0\0\0��\0\0 -\0\0\0\0\0\0\0\0\0\0��\0\0 -\0\0\0\0\0\0\0\0\0\0��\0\0 +\0\0\0\0\0\0\0\0\0\0ÿÿ\0\0 +\0\0\0\0\0\0\0\0\0\0ÿÿ\0\0 +\0\0\0\0\0\0\0\0\0\0ÿÿ\0\0 -- :: \N diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.sql b/tests/queries/0_stateless/02234_cast_to_ip_address.sql index 51e953da905..c851cfde927 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.sql +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.sql @@ -71,7 +71,7 @@ SELECT count() FROM numbers_mt(20000000) WHERE NOT ignore(toIPv6OrZero(randomStr SELECT '--'; -SELECT cast('test' , 'IPv6'); --{serverError CANNOT_PARSE_IPV6} +SELECT cast('test' , 'IPv6'); -- { serverError CANNOT_PARSE_IPV6 } SELECT cast('::ffff:127.0.0.1', 'IPv6'); SELECT '--'; From 2106d4769ac4fca6604dec3b66831aef4b12e943 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 10:53:28 +0200 Subject: [PATCH 0786/2361] Fix tests --- tests/config/users.d/limits.yaml | 1 - tests/queries/0_stateless/02346_additional_filters.sql | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/config/users.d/limits.yaml b/tests/config/users.d/limits.yaml index 1c9fff9f4c8..23aaccf9298 100644 --- a/tests/config/users.d/limits.yaml +++ b/tests/config/users.d/limits.yaml @@ -30,7 +30,6 @@ profiles: max_columns_to_read: 20K max_temporary_columns: 20K max_temporary_non_const_columns: 20K - max_sessions_for_user: 1K max_rows_in_set: 10G max_bytes_in_set: 10G max_rows_in_join: 10G diff --git a/tests/queries/0_stateless/02346_additional_filters.sql b/tests/queries/0_stateless/02346_additional_filters.sql index f6b665713ec..5a799e1c8c1 100644 --- a/tests/queries/0_stateless/02346_additional_filters.sql +++ b/tests/queries/0_stateless/02346_additional_filters.sql @@ -4,6 +4,8 @@ drop table if exists table_2; drop table if exists v_numbers; drop table if exists mv_table; +SET max_rows_to_read = 0; + create table table_1 (x UInt32, y String) engine = MergeTree order by x; insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); From a43c4f4f7ff5e0f0f63c870a01e6c1c28ee6f267 Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 24 Jul 2024 10:54:16 +0200 Subject: [PATCH 0787/2361] CI: Automerge when required and non-required checks completed --- .github/workflows/pull_request.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 63b2bd87dc9..34bf51871d2 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -152,8 +152,9 @@ jobs: CheckReadyForMerge: if: ${{ !cancelled() }} - # Test_2 or Test_3 must not have jobs required for Mergeable check - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1] + # Test_2 or Test_3 do not have the jobs required for Mergeable check, + # however, set them as "needs" to get all checks results before the automatic merge occurs. + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code From e46d400f7beba9479dfcd0d63f025268081f625c Mon Sep 17 00:00:00 2001 From: Xu Jia Date: Wed, 24 Jul 2024 09:03:05 +0000 Subject: [PATCH 0788/2361] fix style --- src/Databases/DatabasesCommon.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index 851ba8aef35..d2926c64f29 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -291,7 +291,8 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n if (!table_storage->isSystemStorage() && database_name != DatabaseCatalog::SYSTEM_DATABASE - && database_name != DatabaseCatalog::TEMPORARY_DATABASE) { + && database_name != DatabaseCatalog::TEMPORARY_DATABASE) + { LOG_TEST(log, "Counting detached table {} to database {}", table_name, database_name); CurrentMetrics::sub(getAttachedCounterForStorage(table_storage)); } @@ -340,7 +341,8 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c if (!table->isSystemStorage() && database_name != DatabaseCatalog::SYSTEM_DATABASE - && database_name != DatabaseCatalog::TEMPORARY_DATABASE) { + && database_name != DatabaseCatalog::TEMPORARY_DATABASE) + { LOG_TEST(log, "Counting attached table {} to database {}", table_name, database_name); CurrentMetrics::add(getAttachedCounterForStorage(table)); } From 57a6d281000f0a49116db82e8b0b364990e61970 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 11:17:43 +0200 Subject: [PATCH 0789/2361] Fix error --- src/IO/ReadWriteBufferFromHTTP.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index 85230957b3f..17a5ed385d4 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -132,6 +132,14 @@ std::optional ReadWriteBufferFromHTTP::tryGetFileSize() { return std::nullopt; } + catch (const NetException &) + { + return std::nullopt; + } + catch (const Poco::Net::NetException &) + { + return std::nullopt; + } } return file_info->file_size; From 0f9ee5c37d68f0877c0bc982c6bb59bf4803f98e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 11:25:08 +0200 Subject: [PATCH 0790/2361] Fix test "very_long_arrays" --- tests/queries/0_stateless/00186_very_long_arrays.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00186_very_long_arrays.sh b/tests/queries/0_stateless/00186_very_long_arrays.sh index 739b17ccc99..086303279fa 100755 --- a/tests/queries/0_stateless/00186_very_long_arrays.sh +++ b/tests/queries/0_stateless/00186_very_long_arrays.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long +# Tags: long, no-tsan CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 878a340317863e94aec61476e105342aef997c7b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 11:47:07 +0200 Subject: [PATCH 0791/2361] Fix tests --- docker/test/stateful/run.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 304bfd7b533..5532df40fdf 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -200,7 +200,8 @@ else clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" fi clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" + # AWS S3 is very inefficient, so increase memory even further: + clickhouse-client --max_memory_usage 20G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" fi clickhouse-client --query "SHOW TABLES FROM test" From 48b30081265f85502b83c7bc5cc4beb9bfeaa3a1 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 24 Jul 2024 12:04:28 +0200 Subject: [PATCH 0792/2361] Handle better static destructors --- src/Client/ClientBase.cpp | 13 +++++++--- src/Common/FailPoint.cpp | 1 + src/Common/PipeFDs.cpp | 13 ++++++++-- src/Common/PipeFDs.h | 3 --- src/Common/SignalHandlers.cpp | 9 ++++++- src/Common/StackTrace.cpp | 23 ++++++++++++++++- src/Daemon/BaseDaemon.cpp | 17 ++++++++++--- src/Loggers/OwnSplitChannel.cpp | 10 ++++++++ src/Loggers/OwnSplitChannel.h | 2 ++ .../__init__.py | 0 .../test.py | 25 +++++++++++++++++++ 11 files changed, 102 insertions(+), 14 deletions(-) create mode 100644 tests/integration/test_shutdown_static_destructor_failure/__init__.py create mode 100644 tests/integration/test_shutdown_static_destructor_failure/test.py diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 7af199131b6..7867cf32d24 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -309,9 +309,16 @@ public: ClientBase::~ClientBase() { - writeSignalIDtoSignalPipe(SignalListener::StopThread); - signal_listener_thread.join(); - HandledSignals::instance().reset(); + try + { + writeSignalIDtoSignalPipe(SignalListener::StopThread); + signal_listener_thread.join(); + HandledSignals::instance().reset(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } } ClientBase::ClientBase( diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index 7b8b5036af0..f5ec8cf0356 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -59,6 +59,7 @@ static struct InitFiu ONCE(execute_query_calling_empty_set_result_func_on_exception) \ ONCE(receive_timeout_on_table_status_response) \ REGULAR(keepermap_fail_drop_data) \ + REGULAR(lazy_pipe_fds_fail_close) \ namespace FailPoints diff --git a/src/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp index ceadbb2f983..50eeda1bbe2 100644 --- a/src/Common/PipeFDs.cpp +++ b/src/Common/PipeFDs.cpp @@ -1,19 +1,23 @@ #include #include #include +#include #include #include #include #include -#include #include - namespace DB { +namespace FailPoints +{ + extern const char lazy_pipe_fds_fail_close[]; +} + namespace ErrorCodes { extern const int CANNOT_PIPE; @@ -42,6 +46,11 @@ void LazyPipeFDs::open() void LazyPipeFDs::close() { + fiu_do_on(FailPoints::lazy_pipe_fds_fail_close, + { + throw Exception(ErrorCodes::CANNOT_PIPE, "Manually triggered exception on close"); + }); + for (int & fd : fds_rw) { if (fd < 0) diff --git a/src/Common/PipeFDs.h b/src/Common/PipeFDs.h index 20bd847c077..b651176ee26 100644 --- a/src/Common/PipeFDs.h +++ b/src/Common/PipeFDs.h @@ -1,8 +1,5 @@ #pragma once -#include - - namespace DB { diff --git a/src/Common/SignalHandlers.cpp b/src/Common/SignalHandlers.cpp index a4b7784df5c..52c83d80121 100644 --- a/src/Common/SignalHandlers.cpp +++ b/src/Common/SignalHandlers.cpp @@ -605,7 +605,14 @@ void HandledSignals::reset() HandledSignals::~HandledSignals() { - reset(); + try + { + reset(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } }; HandledSignals & HandledSignals::instance() diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index 34f6f0b7535..b2ece02e2f0 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -4,6 +4,7 @@ #include #include +#include "Common/PipeFDs.h" #include #include #include @@ -489,11 +490,24 @@ struct CacheEntry using CacheEntryPtr = std::shared_ptr; -using StackTraceCache = std::map>; +static constinit std::atomic can_use_cache = false; + +using StackTraceCacheBase = std::map>; + +struct StackTraceCache : public StackTraceCacheBase +{ + using StackTraceCacheBase::StackTraceCacheBase; + + ~StackTraceCache() + { + can_use_cache = false; + } +}; static StackTraceCache & cacheInstance() { static StackTraceCache cache; + can_use_cache = true; return cache; } @@ -503,6 +517,13 @@ String toStringCached(const StackTrace::FramePointers & pointers, size_t offset, { const StackTraceRefTriple key{pointers, offset, size}; + if (!can_use_cache) + { + DB::WriteBufferFromOwnString out; + toStringEveryLineImpl(false, key, [&](std::string_view str) { out << str << '\n'; }); + return out.str(); + } + /// Calculation of stack trace text is extremely slow. /// We use cache because otherwise the server could be overloaded by trash queries. /// Note that this cache can grow unconditionally, but practically it should be small. diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index d77878415db..366aad00376 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -146,10 +146,19 @@ BaseDaemon::BaseDaemon() = default; BaseDaemon::~BaseDaemon() { - writeSignalIDtoSignalPipe(SignalListener::StopThread); - signal_listener_thread.join(); - HandledSignals::instance().reset(); - SentryWriter::resetInstance(); + try + { + writeSignalIDtoSignalPipe(SignalListener::StopThread); + signal_listener_thread.join(); + HandledSignals::instance().reset(); + SentryWriter::resetInstance(); + } + catch (...) + { + tryLogCurrentException(&logger()); + } + + OwnSplitChannel::disableLogging(); } diff --git a/src/Loggers/OwnSplitChannel.cpp b/src/Loggers/OwnSplitChannel.cpp index dc51a13e01f..c0e8514c62a 100644 --- a/src/Loggers/OwnSplitChannel.cpp +++ b/src/Loggers/OwnSplitChannel.cpp @@ -16,8 +16,18 @@ namespace DB { +static constinit std::atomic allow_logging{true}; + +void OwnSplitChannel::disableLogging() +{ + allow_logging = false; +} + void OwnSplitChannel::log(const Poco::Message & msg) { + if (!allow_logging) + return; + #ifndef WITHOUT_TEXT_LOG auto logs_queue = CurrentThread::getInternalTextLogsQueue(); diff --git a/src/Loggers/OwnSplitChannel.h b/src/Loggers/OwnSplitChannel.h index 88bb6b9ce76..9872a4fb558 100644 --- a/src/Loggers/OwnSplitChannel.h +++ b/src/Loggers/OwnSplitChannel.h @@ -39,6 +39,8 @@ public: void setLevel(const std::string & name, int level); + static void disableLogging(); + private: void logSplit(const Poco::Message & msg); void tryLogSplit(const Poco::Message & msg); diff --git a/tests/integration/test_shutdown_static_destructor_failure/__init__.py b/tests/integration/test_shutdown_static_destructor_failure/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_shutdown_static_destructor_failure/test.py b/tests/integration/test_shutdown_static_destructor_failure/test.py new file mode 100644 index 00000000000..8f1d4423238 --- /dev/null +++ b/tests/integration/test_shutdown_static_destructor_failure/test.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance( + "node", + main_configs=[], + stay_alive=True +) + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_shutdown(): + node.query("SYSTEM ENABLE FAILPOINT lazy_pipe_fds_fail_close") + node.stop_clickhouse() From e7b5c0ea19a64157f7bc743f81cf148cf3fdb4eb Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 24 Jul 2024 10:14:25 +0000 Subject: [PATCH 0793/2361] Automatic style fix --- .../test_shutdown_static_destructor_failure/test.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/integration/test_shutdown_static_destructor_failure/test.py b/tests/integration/test_shutdown_static_destructor_failure/test.py index 8f1d4423238..b1d925cc432 100644 --- a/tests/integration/test_shutdown_static_destructor_failure/test.py +++ b/tests/integration/test_shutdown_static_destructor_failure/test.py @@ -4,11 +4,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance( - "node", - main_configs=[], - stay_alive=True -) +node = cluster.add_instance("node", main_configs=[], stay_alive=True) @pytest.fixture(scope="module", autouse=True) From 036485a657a35d764ad33f912d0d10b37d05e59b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 12:15:48 +0200 Subject: [PATCH 0794/2361] Fix error --- docker/test/stateful/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 5532df40fdf..bd2e38ff00f 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -201,7 +201,7 @@ else fi clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" # AWS S3 is very inefficient, so increase memory even further: - clickhouse-client --max_memory_usage 20G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" + clickhouse-client --max_memory_usage 20G --max_memory_usage_for_user 20G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" fi clickhouse-client --query "SHOW TABLES FROM test" From 8c19d502f861ca844501fa505481a1b64684a8ec Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 24 Jul 2024 10:44:06 +0000 Subject: [PATCH 0795/2361] Add decorator and retries for azurite --- tests/integration/helpers/cluster.py | 22 ++++++------ tests/integration/helpers/retry_decorator.py | 36 ++++++++++++++++++++ 2 files changed, 48 insertions(+), 10 deletions(-) create mode 100644 tests/integration/helpers/retry_decorator.py diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 548b58a17e8..3914192f9af 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -52,6 +52,7 @@ from helpers.client import QueryRuntimeException import docker from .client import Client +from .retry_decorator import retry from .config_cluster import * @@ -2690,15 +2691,12 @@ class ClickHouseCluster: images_pull_cmd = self.base_cmd + ["pull"] # sometimes dockerhub/proxy can be flaky - for i in range(5): - try: - run_and_check(images_pull_cmd) - break - except Exception as ex: - if i == 4: - raise ex - logging.info("Got exception pulling images: %s", ex) - time.sleep(i * 3) + + retry( + log_function=lambda exception: logging.info( + "Got exception pulling images: %s", exception + ), + )(run_and_check)(images_pull_cmd) if self.with_zookeeper_secure and self.base_zookeeper_cmd: logging.debug("Setup ZooKeeper Secure") @@ -2971,7 +2969,11 @@ class ClickHouseCluster: "Trying to create Azurite instance by command %s", " ".join(map(str, azurite_start_cmd)), ) - run_and_check(azurite_start_cmd) + retry( + log_function=lambda exception: logginnfo( + f"Azurite initialization failed with error: {exception}" + ), + )(run_and_check)(azurite_start_cmd) self.up_called = True logging.info("Trying to connect to Azurite") self.wait_azurite_to_start() diff --git a/tests/integration/helpers/retry_decorator.py b/tests/integration/helpers/retry_decorator.py new file mode 100644 index 00000000000..aaa040464c2 --- /dev/null +++ b/tests/integration/helpers/retry_decorator.py @@ -0,0 +1,36 @@ +import time +import random +from typing import Type, List + + +def retry( + retries: int = 5, + delay: float = 1, + backoff: float = 1.5, + jitter: float = 2, + log_function=lambda *args, **kwargs: None, + retriable_expections_list: List[Type[BaseException]] = [Exception], +): + def inner(func): + def wrapper(*args, **kwargs): + current_delay = delay + for retry in range(retries): + try: + func(*args, **kwargs) + break + except Exception as e: + should_retry = False + for retriable_exception in retriable_expections_list: + if isinstance(e, retriable_exception): + should_retry = True + break + if not should_retry or (retry == retries - 1): + raise e + log_function(retry=retry, exception=e) + sleep_time = current_delay + random.uniform(0, jitter) + time.sleep(sleep_time) + current_delay *= backoff + + return wrapper + + return inner From 1b95a68dc08266c50b2dafb5c852b84431723112 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 24 Jul 2024 10:48:18 +0000 Subject: [PATCH 0796/2361] Fix typo --- tests/integration/helpers/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 3914192f9af..dfdeddb75cf 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2969,8 +2969,8 @@ class ClickHouseCluster: "Trying to create Azurite instance by command %s", " ".join(map(str, azurite_start_cmd)), ) - retry( - log_function=lambda exception: logginnfo( + retry_decorator.retry( + log_function=lambda exception: logging.info( f"Azurite initialization failed with error: {exception}" ), )(run_and_check)(azurite_start_cmd) From f21a9e5d0892315ba79d8f10bd9bb6ef271a3aa9 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 24 Jul 2024 11:00:49 +0000 Subject: [PATCH 0797/2361] Fix bug --- tests/integration/helpers/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index dfdeddb75cf..7d80fbe90f8 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2969,7 +2969,7 @@ class ClickHouseCluster: "Trying to create Azurite instance by command %s", " ".join(map(str, azurite_start_cmd)), ) - retry_decorator.retry( + retry( log_function=lambda exception: logging.info( f"Azurite initialization failed with error: {exception}" ), From 2ea10d99940132828c61457d62a54b77a7a66af2 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Wed, 24 Jul 2024 13:13:58 +0200 Subject: [PATCH 0798/2361] Integration tests: fix flaky tests --- .../test_concurrency.py | 11 +++-------- tests/integration/test_manipulate_statistics/test.py | 6 ++++-- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py index c08f3c9c242..d3caadd0b46 100644 --- a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py +++ b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py @@ -276,15 +276,10 @@ def test_create_or_drop_tables_during_backup(db_engine, table_engine): for node in nodes: assert_eq_with_retry( node, - f"SELECT status from system.backups WHERE id IN {ids_list} AND (status == 'CREATING_BACKUP')", - "", - ) - - for node in nodes: - assert_eq_with_retry( - node, - f"SELECT status, error from system.backups WHERE id IN {ids_list} AND (status == 'BACKUP_FAILED')", + f"SELECT status from system.backups " + f"WHERE id IN {ids_list} AND ((status == 'CREATING_BACKUP') OR (status == 'BACKUP_FAILED'))", "", + retry_count=100, ) backup_names = {} diff --git a/tests/integration/test_manipulate_statistics/test.py b/tests/integration/test_manipulate_statistics/test.py index a602cce63df..2541c9b946f 100644 --- a/tests/integration/test_manipulate_statistics/test.py +++ b/tests/integration/test_manipulate_statistics/test.py @@ -167,7 +167,8 @@ def test_replicated_table_ddl(started_cluster): check_stat_file_on_disk(node2, "test_stat", "all_0_0_0_1", "c", False) check_stat_file_on_disk(node2, "test_stat", "all_0_0_0_1", "d", True) node1.query( - "ALTER TABLE test_stat CLEAR STATISTICS d", settings={"alter_sync": "2"} + "ALTER TABLE test_stat CLEAR STATISTICS d", + settings={"alter_sync": "2", "mutations_sync": 2}, ) node1.query( "ALTER TABLE test_stat ADD STATISTICS b type tdigest", @@ -177,7 +178,8 @@ def test_replicated_table_ddl(started_cluster): check_stat_file_on_disk(node2, "test_stat", "all_0_0_0_2", "b", False) check_stat_file_on_disk(node2, "test_stat", "all_0_0_0_2", "d", False) node1.query( - "ALTER TABLE test_stat MATERIALIZE STATISTICS b", settings={"alter_sync": "2"} + "ALTER TABLE test_stat MATERIALIZE STATISTICS b", + settings={"alter_sync": "2", "mutations_sync": 2}, ) check_stat_file_on_disk(node2, "test_stat", "all_0_0_0_3", "a", True) check_stat_file_on_disk(node2, "test_stat", "all_0_0_0_3", "b", True) From d507adf9a7a524706b8247026acfbb29d8467e2b Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Wed, 24 Jul 2024 11:33:31 +0000 Subject: [PATCH 0799/2361] fix --- tests/integration/test_backup_restore_new/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index a632e18a650..4806625f3f0 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -1239,6 +1239,7 @@ def test_system_users_required_privileges(): instance.query("DROP USER u1") instance.query("DROP ROLE r1") + instance.query("DROP USER u2") def test_system_users_async(): From 595cf9a6a81f034a552d2b0e365bda87e0a7a9f5 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 24 Jul 2024 13:42:32 +0200 Subject: [PATCH 0800/2361] Revert "Revert "Fix for 992 and friends"" --- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- ...00992_system_parts_race_condition_zookeeper_long.sh | 2 +- tests/queries/0_stateless/replication.lib | 10 +++++++++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 3f02486ed15..da379a466af 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3940,7 +3940,7 @@ void StorageReplicatedMergeTree::mergeSelectingTask() merge_selecting_task->schedule(); else { - LOG_TRACE(log, "Scheduling next merge selecting task after {}ms", merge_selecting_sleep_ms); + LOG_TRACE(log, "Scheduling next merge selecting task after {}ms, current attempt status: {}", merge_selecting_sleep_ms, result); merge_selecting_task->scheduleAfter(merge_selecting_sleep_ms); } } diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index 4887c409844..02a739ece4a 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -41,7 +41,7 @@ function thread3() function thread4() { - while true; do $CLICKHOUSE_CLIENT --receive_timeout=3 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done + while true; do $CLICKHOUSE_CLIENT --receive_timeout=1 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done } function thread5() diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index 1a86cd9f8db..fe867537000 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -51,6 +51,14 @@ function check_replication_consistency() table_name_prefix=$1 check_query_part=$2 + # Try to kill some mutations because sometimes tests run too much (it's not guarenteed to kill all mutations, see below) + # Try multiple replicas, because queries are not finished yet, and "global" KILL MUTATION may fail due to another query (like DROP TABLE) + readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%'") + for t in "${tables_arr[@]}" + do + ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table='$t'" > /dev/null 2>/dev/null + done + # Wait for all queries to finish (query may still be running if thread is killed by timeout) num_tries=0 while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE current_database=currentDatabase() AND query LIKE '%$table_name_prefix%'") -ne 1 ]]; do @@ -96,7 +104,7 @@ function check_replication_consistency() some_table=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' ORDER BY rand() LIMIT 1") $CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA $some_table PULL" 1>/dev/null 2>/dev/null ||: - # Forcefully cancel mutations to avoid waiting for them to finish + # Forcefully cancel mutations to avoid waiting for them to finish. Kills the remaining mutations ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table like '$table_name_prefix%'" > /dev/null # SYNC REPLICA is not enough if some MUTATE_PARTs are not assigned yet From b25cad23ed3b90dc8c0903710dba0714bac7219c Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Wed, 24 Jul 2024 11:42:28 +0000 Subject: [PATCH 0801/2361] Use unique names for tables and files --- .../integration/test_storage_s3_queue/test.py | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index bf3c28c5429..cf24e91f36b 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -7,6 +7,7 @@ import pytest from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster, ClickHouseInstance import json +from uuid import uuid4 AVAILABLE_MODES = ["unordered", "ordered"] @@ -822,7 +823,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): def test_max_set_age(started_cluster): node = started_cluster.instances["instance"] - table_name = f"max_set_age" + table_name = f"max_set_age_{uuid4().hex}" dst_table_name = f"{table_name}_dst" keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" @@ -847,11 +848,11 @@ def test_max_set_age(started_cluster): ) create_mv(node, table_name, dst_table_name) - total_values = generate_random_files( + _ = generate_random_files( started_cluster, files_path, files_to_generate, row_num=1 ) - expected_rows = 10 + expected_rows = files_to_generate node.wait_for_log_line("Checking node limits") node.wait_for_log_line("Node limits check finished") @@ -865,11 +866,11 @@ def test_max_set_age(started_cluster): time.sleep(1) assert expected_rows == get_count() - assert 10 == int(node.query(f"SELECT uniq(_path) from {dst_table_name}")) + assert files_to_generate == int(node.query(f"SELECT uniq(_path) from {dst_table_name}")) time.sleep(max_age + 5) - expected_rows = 20 + expected_rows *= 2 for _ in range(20): if expected_rows == get_count(): @@ -877,7 +878,7 @@ def test_max_set_age(started_cluster): time.sleep(1) assert expected_rows == get_count() - assert 10 == int(node.query(f"SELECT uniq(_path) from {dst_table_name}")) + assert files_to_generate == int(node.query(f"SELECT uniq(_path) from {dst_table_name}")) paths_count = [ int(x) @@ -885,7 +886,7 @@ def test_max_set_age(started_cluster): f"SELECT count() from {dst_table_name} GROUP BY _path" ).splitlines() ] - assert 10 == len(paths_count) + assert files_to_generate == len(paths_count) for path_count in paths_count: assert 2 == path_count @@ -901,7 +902,8 @@ def test_max_set_age(started_cluster): values_csv = ( "\n".join((",".join(map(str, row)) for row in values)) + "\n" ).encode() - put_s3_file_content(started_cluster, f"{files_path}/fff.csv", values_csv) + file_with_error = f"fff_{uuid4().hex}.csv" + put_s3_file_content(started_cluster, f"{files_path}/{file_with_error}", values_csv) for _ in range(30): if failed_count + 1 == int( @@ -920,16 +922,17 @@ def test_max_set_age(started_cluster): node.query("SYSTEM FLUSH LOGS") assert "Cannot parse input" in node.query( - "SELECT exception FROM system.s3queue WHERE file_name ilike '%fff.csv'" + f"SELECT exception FROM system.s3queue WHERE file_name ilike '%{file_with_error}'" ) + assert 1 == int( node.query( - "SELECT count() FROM system.s3queue_log WHERE file_name ilike '%fff.csv'" + f"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}'" ) ) assert 1 == int( node.query( - "SELECT count() FROM system.s3queue_log WHERE file_name ilike '%fff.csv' AND notEmpty(exception)" + f"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}' AND notEmpty(exception)" ) ) @@ -943,11 +946,11 @@ def test_max_set_age(started_cluster): node.query("SYSTEM FLUSH LOGS") assert "Cannot parse input" in node.query( - "SELECT exception FROM system.s3queue WHERE file_name ilike '%fff.csv' ORDER BY processing_end_time DESC LIMIT 1" + f"SELECT exception FROM system.s3queue WHERE file_name ilike '%{file_with_error}' ORDER BY processing_end_time DESC LIMIT 1" ) assert 1 < int( node.query( - "SELECT count() FROM system.s3queue_log WHERE file_name ilike '%fff.csv' AND notEmpty(exception)" + f"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}' AND notEmpty(exception)" ) ) From 3f992e4e08c326857c6dec7c5d45fb46154280b7 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 24 Jul 2024 13:43:13 +0200 Subject: [PATCH 0802/2361] Update replication.lib --- tests/queries/0_stateless/replication.lib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index fe867537000..05651531fba 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -56,7 +56,7 @@ function check_replication_consistency() readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%'") for t in "${tables_arr[@]}" do - ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table='$t'" > /dev/null 2>/dev/null + ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table='$t'" > /dev/null 2>/dev/null ||: done # Wait for all queries to finish (query may still be running if thread is killed by timeout) From a3f7642d05a730dac2d5030e22947598da70cb72 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 14:01:02 +0200 Subject: [PATCH 0803/2361] Fix a test --- tests/queries/0_stateless/00974_query_profiler.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/00974_query_profiler.sql b/tests/queries/0_stateless/00974_query_profiler.sql index 24e4241b813..cd2f65eb94a 100644 --- a/tests/queries/0_stateless/00974_query_profiler.sql +++ b/tests/queries/0_stateless/00974_query_profiler.sql @@ -2,7 +2,9 @@ -- Tag no-fasttest: Not sure why fail even in sequential mode. Disabled for now to make some progress. SET allow_introspection_functions = 1; +SET trace_profile_events = 0; -- This can inhibit profiler from working, because it prevents sending samples from different profilers concurrently. +SET query_profiler_cpu_time_period_ns = 0; SET query_profiler_real_time_period_ns = 100000000; SET log_queries = 1; SELECT sleep(0.5), ignore('test real time query profiler'); From 9aaf806021873f010b5d6fda9061ad836de54e36 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Wed, 24 Jul 2024 14:03:40 +0200 Subject: [PATCH 0804/2361] Integration tests: fix flaky tests 2 --- .../test_backup_restore_on_cluster/test_concurrency.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py index d3caadd0b46..0e381d48fb1 100644 --- a/tests/integration/test_backup_restore_on_cluster/test_concurrency.py +++ b/tests/integration/test_backup_restore_on_cluster/test_concurrency.py @@ -276,7 +276,7 @@ def test_create_or_drop_tables_during_backup(db_engine, table_engine): for node in nodes: assert_eq_with_retry( node, - f"SELECT status from system.backups " + f"SELECT status, error from system.backups " f"WHERE id IN {ids_list} AND ((status == 'CREATING_BACKUP') OR (status == 'BACKUP_FAILED'))", "", retry_count=100, From 09cb2d822a0960f9917f28d36c47c1e67e861178 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Wed, 24 Jul 2024 14:05:50 +0200 Subject: [PATCH 0805/2361] Change description for setting: `throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert` --- src/Core/SettingsChangesHistory.cpp | 2 +- src/Interpreters/executeQuery.cpp | 6 +++--- src/Server/TCPHandler.cpp | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index de4725dc350..38ad88cfa1d 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -147,7 +147,7 @@ static std::initializer_list executeQueryImpl( /// In case when the client had to retry some mini-INSERTs then they will be properly deduplicated /// by the source tables. This functionality is controlled by a setting `async_insert_deduplicate`. /// But then they will be glued together into a block and pushed through a chain of Materialized Views if any. - /// The process of forming such blocks is not deteministic so each time we retry mini-INSERTs the resulting + /// The process of forming such blocks is not deterministic so each time we retry mini-INSERTs the resulting /// block may be concatenated differently. /// That's why deduplication in dependent Materialized Views doesn't make sense in presence of async INSERTs. if (settings.throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert && settings.deduplicate_blocks_in_dependent_materialized_views) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Deduplication is dependent materialized view cannot work together with async inserts. "\ - "Please disable eiher `deduplicate_blocks_in_dependent_materialized_views` or `async_insert` setting."); + "Deduplication in dependent materialized view cannot work together with async inserts. "\ + "Please disable either `deduplicate_blocks_in_dependent_materialized_views` or `async_insert` setting."); quota = context->getQuota(); if (quota) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 5bc2d09df35..42dc7f5a78f 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -961,8 +961,8 @@ void TCPHandler::processInsertQuery() if (settings.throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert && settings.deduplicate_blocks_in_dependent_materialized_views) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Deduplication is dependent materialized view cannot work together with async inserts. "\ - "Please disable eiher `deduplicate_blocks_in_dependent_materialized_views` or `async_insert` setting."); + "Deduplication in dependent materialized view cannot work together with async inserts. "\ + "Please disable either `deduplicate_blocks_in_dependent_materialized_views` or `async_insert` setting."); auto result = processAsyncInsertQuery(*insert_queue); if (result.status == AsynchronousInsertQueue::PushResult::OK) From 91b7001df6e827f801bd792e7bd9d96cdd947946 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Wed, 24 Jul 2024 12:08:31 +0000 Subject: [PATCH 0806/2361] Refactor test to improve it - Create wait_for_condition that checks greedily for a period of time - Remove redundant checks - Allow other tests running in parallel to have `ObjectStorageQueueFailedFiles` errors --- .../integration/test_storage_s3_queue/test.py | 56 ++++++------------- 1 file changed, 16 insertions(+), 40 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index cf24e91f36b..e178b3b6608 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -860,24 +860,21 @@ def test_max_set_age(started_cluster): def get_count(): return int(node.query(f"SELECT count() FROM {dst_table_name}")) - for _ in range(20): - if expected_rows == get_count(): - break - time.sleep(1) + def wait_for_condition(check_function, max_wait_time=30): + before = time.time() + while time.time() - before < max_wait_time: + if check_function(): + return + time.sleep(0.1) + assert False - assert expected_rows == get_count() + wait_for_condition(lambda: get_count() == expected_rows) assert files_to_generate == int(node.query(f"SELECT uniq(_path) from {dst_table_name}")) time.sleep(max_age + 5) expected_rows *= 2 - - for _ in range(20): - if expected_rows == get_count(): - break - time.sleep(1) - - assert expected_rows == get_count() + wait_for_condition(lambda: get_count() == expected_rows) assert files_to_generate == int(node.query(f"SELECT uniq(_path) from {dst_table_name}")) paths_count = [ @@ -890,11 +887,12 @@ def test_max_set_age(started_cluster): for path_count in paths_count: assert 2 == path_count - failed_count = int( - node.query( + def get_object_storage_failures(): + return int(node.query( "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" - ) - ) + )) + + failed_count = get_object_storage_failures() values = [ ["failed", 1, 1], @@ -905,31 +903,13 @@ def test_max_set_age(started_cluster): file_with_error = f"fff_{uuid4().hex}.csv" put_s3_file_content(started_cluster, f"{files_path}/{file_with_error}", values_csv) - for _ in range(30): - if failed_count + 1 == int( - node.query( - "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" - ) - ): - break - time.sleep(1) - - assert failed_count + 1 == int( - node.query( - "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" - ) - ) + wait_for_condition(lambda: failed_count + 1 <= get_object_storage_failures()) node.query("SYSTEM FLUSH LOGS") assert "Cannot parse input" in node.query( f"SELECT exception FROM system.s3queue WHERE file_name ilike '%{file_with_error}'" ) - assert 1 == int( - node.query( - f"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}'" - ) - ) assert 1 == int( node.query( f"SELECT count() FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}' AND notEmpty(exception)" @@ -938,11 +918,7 @@ def test_max_set_age(started_cluster): time.sleep(max_age + 1) - assert failed_count + 2 == int( - node.query( - "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles'" - ) - ) + assert failed_count + 2 <= get_object_storage_failures() node.query("SYSTEM FLUSH LOGS") assert "Cannot parse input" in node.query( From d5ea07be2ef2e61edf95408dec70a8e1bc66cafa Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 24 Jul 2024 14:20:03 +0200 Subject: [PATCH 0807/2361] Bump From 01ce22049a76995dc00974618c94af9ccbcc30db Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 14:29:14 +0200 Subject: [PATCH 0808/2361] Fix tests --- src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp | 11 +++++------ .../MySQL/tests/gtest_create_rewritten.cpp | 6 ++---- .../test_postgresql_replica_database_engine_2/test.py | 2 +- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index f73965cfcc8..3917ffb8823 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -158,7 +157,7 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col /// (see git blame for details). auto column_name_and_type = columns_name_and_type.begin(); const auto * declare_column_ast = columns_definition->children.begin(); - for (; column_name_and_type != columns_name_and_type.end(); column_name_and_type++, declare_column_ast++) + for (; column_name_and_type != columns_name_and_type.end(); ++column_name_and_type, ++declare_column_ast) { const auto & declare_column = (*declare_column_ast)->as(); String comment; @@ -177,7 +176,7 @@ static ColumnsDescription createColumnsDescription(const NamesAndTypesList & col return columns_description; } -static NamesAndTypesList getNames(const ASTDataType & expr, ContextPtr context, const NamesAndTypesList & columns) +static NamesAndTypesList getNames(const ASTFunction & expr, ContextPtr context, const NamesAndTypesList & columns) { if (expr.arguments->children.empty()) return NamesAndTypesList{}; @@ -221,9 +220,9 @@ static std::tuplechildren.empty()) { diff --git a/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp b/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp index 6d6077a0295..81e6e6a8761 100644 --- a/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp +++ b/src/Interpreters/MySQL/tests/gtest_create_rewritten.cpp @@ -2,12 +2,10 @@ #include -#include #include #include #include #include -#include #include #include #include @@ -26,8 +24,8 @@ static inline ASTPtr tryRewrittenCreateQuery(const String & query, ContextPtr co context, "test_database", "test_database")[0]; } -static const char MATERIALIZEDMYSQL_TABLE_COLUMNS[] = ", `_sign` Int8() MATERIALIZED 1" - ", `_version` UInt64() MATERIALIZED 1" +static const char MATERIALIZEDMYSQL_TABLE_COLUMNS[] = ", `_sign` Int8 MATERIALIZED 1" + ", `_version` UInt64 MATERIALIZED 1" ", INDEX _version _version TYPE minmax GRANULARITY 1"; TEST(MySQLCreateRewritten, ColumnsDataType) diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index 5e04c9e4d12..406b50bc486 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -654,7 +654,7 @@ def test_table_override(started_cluster): instance.query(f"SELECT count() FROM {materialized_database}.{table_name}") ) - expected = "CREATE TABLE test_database.table_override\\n(\\n `key` Int32,\\n `value` String,\\n `_sign` Int8() MATERIALIZED 1,\\n `_version` UInt64() MATERIALIZED 1\\n)\\nENGINE = ReplacingMergeTree(_version)\\nPARTITION BY key\\nORDER BY tuple(key)" + expected = "CREATE TABLE test_database.table_override\\n(\\n `key` Int32,\\n `value` String,\\n `_sign` Int8 MATERIALIZED 1,\\n `_version` UInt64 MATERIALIZED 1\\n)\\nENGINE = ReplacingMergeTree(_version)\\nPARTITION BY key\\nORDER BY tuple(key)" assert ( expected == instance.query( From 81c19b02e78ea7f2980531732bbaf304622e81fb Mon Sep 17 00:00:00 2001 From: Mark Needham Date: Wed, 24 Jul 2024 13:33:32 +0100 Subject: [PATCH 0809/2361] Update json.md --- docs/en/sql-reference/data-types/json.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index f2eac12594d..f218c8d0339 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -7,7 +7,7 @@ keywords: [object, data type] # Object Data Type (deprecated) -**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864). +**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
From 6efd29144558ded7fb95b36c6c19ee50aee0071f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 14:37:05 +0200 Subject: [PATCH 0810/2361] Add a test --- .../03210_inconsistent_formatting_of_data_types.reference | 1 + .../03210_inconsistent_formatting_of_data_types.sh | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference create mode 100755 tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh diff --git a/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference new file mode 100644 index 00000000000..ccb445a0573 --- /dev/null +++ b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference @@ -0,0 +1 @@ +ALTER TABLE columns_with_multiple_streams MODIFY COLUMN `field1` Nullable(tupleElement(x, 2), UInt8) diff --git a/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh new file mode 100755 index 00000000000..6cb2d083d71 --- /dev/null +++ b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE columns_with_multiple_streams MODIFY COLUMN field1 Nullable(tupleElement(x, 2), UInt8)" | $CLICKHOUSE_FORMAT --oneline From cb7fafd1e6f04a2f29cd77036bb29042b4cfe3f6 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 24 Jul 2024 14:39:26 +0200 Subject: [PATCH 0811/2361] Fix --- src/Common/StackTrace.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index b2ece02e2f0..59a58ac027a 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -4,7 +4,6 @@ #include #include -#include "Common/PipeFDs.h" #include #include #include @@ -496,8 +495,6 @@ using StackTraceCacheBase = std::map Date: Wed, 24 Jul 2024 14:45:58 +0200 Subject: [PATCH 0812/2361] Fix tidy --- programs/local/LocalServer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 5879fd50872..b96de397e96 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -743,7 +743,7 @@ void LocalServer::processConfig() DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context); if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil) DatabaseCatalog::instance().addUUIDMapping(uuid); - DatabaseCatalog::instance().attachDatabase(default_database, std::move(database)); + DatabaseCatalog::instance().attachDatabase(default_database, database); } global_context->setCurrentDatabase(default_database); From 009da21694539f4c35983898fb34d4a492c5a692 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 24 Jul 2024 14:46:44 +0200 Subject: [PATCH 0813/2361] 24.7 changelog improvements --- CHANGELOG.md | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4fddd7d7685..a0933bd6544 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,13 +17,12 @@ * `clickhouse-keeper-client` will only accept paths in string literals, such as `ls '/hello/world'`, not bare strings such as `ls /hello/world`. [#65494](https://github.com/ClickHouse/ClickHouse/pull/65494) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)). * Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)). #### New Feature -* Extend function `tuple` to construct named tuples in query. Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)). * Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)). -* A new table function, `fuzzQuery,` was added. This function allows you to modify a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1');`. [#62103](https://github.com/ClickHouse/ClickHouse/pull/62103) ([pufit](https://github.com/pufit)). * Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)). -* Support JWT authentication in `clickhouse-client`. [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)). * Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)). * Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)). * Support accept_invalid_certificate in client's config in order to allow for client to connect over secure TCP to a server running with self-signed certificate - can be used as a shorthand for corresponding `openSSL` client settings `verificationMode=none` + `invalidCertificateHandler.name=AcceptCertificateHandler`. [#65238](https://github.com/ClickHouse/ClickHouse/pull/65238) ([peacewalker122](https://github.com/peacewalker122)). @@ -31,15 +30,19 @@ * Add aggregate function `groupConcat`. About the same as `arrayStringConcat( groupArray(column), ',')` Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). * Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)). * Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)). -* Allow system administrators to configure `logger.console_log_level`. [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)). +* Introduce `logger.console_log_level` server config to control the log level to the console (if enabled). [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)). * Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)). * Add `--memory-usage` option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)). +* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)). +* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)). #### Experimental Feature * Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)). * Support rocksdb as backend storage of keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)). * Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)). * Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)). +* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)). #### Performance Improvement * Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)). @@ -51,6 +54,10 @@ * Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)). * Unload primary index of outdated parts to reduce total memory usage. [#65852](https://github.com/ClickHouse/ClickHouse/pull/65852) ([Anton Popov](https://github.com/CurtizJ)). * Functions `replaceRegexpAll` and `replaceRegexpOne` are now significantly faster if the pattern is trivial, i.e. contains no metacharacters, pattern classes, flags, grouping characters etc. (Thanks to Taiyang Li). [#66185](https://github.com/ClickHouse/ClickHouse/pull/66185) ([Robert Schulze](https://github.com/rschu1ze)). +* s3 requests: Reduce retry time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)). +* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)). +* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)). +* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)). #### Improvement * The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)). @@ -60,21 +67,15 @@ * Allow matching column names in a case insensitive manner when reading json files (`input_format_json_ignore_key_case`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)). * Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)). * In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)). -* Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)). * Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)). * `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fixed broken multiple columns aggregation on s390x. [#65062](https://github.com/ClickHouse/ClickHouse/pull/65062) ([Harry Lee](https://github.com/HarryLeeIBM)). * Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)). -* s3 requests: Reduce retry time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)). * Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). * Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)). * Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)). -* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)). -* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)). * `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)). * Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)). * Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)). -* Fixed out-of-range exception in parsing Dwarf5 on s390x. [#65501](https://github.com/ClickHouse/ClickHouse/pull/65501) ([Harry Lee](https://github.com/HarryLeeIBM)). * Reduce `clickhouse-local` prompt to just `:)`. `getFQDNOrHostName()` takes too long on macOS, and we don't want a hostname in the prompt for `clickhouse-local` anyway. [#65510](https://github.com/ClickHouse/ClickHouse/pull/65510) ([Konstantin Bogdanov](https://github.com/thevar1able)). * Avoid printing a message from jemalloc about per-CPU arenas on low-end virtual machines. [#65532](https://github.com/ClickHouse/ClickHouse/pull/65532) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)). @@ -82,7 +83,6 @@ * Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)). * Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)). * Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)). -* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)). * Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)). * `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)). @@ -93,7 +93,6 @@ * Allow controlling named collections in clickhouse-local. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Improve Azure profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)). * Support ORC file read by writer time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)). -* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)). * Add settings to control connection to the PostgreSQL. * Setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. * Setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)). * Reduce inaccuracy of input_wait_elapsed_us/input_wait_elapsed_us/elapsed_us. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)). * Improve FilesystemCache ProfileEvents. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)). @@ -102,18 +101,16 @@ * Disable suspending on Ctrl+Z in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Add option for validating the Primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. ### Documentation entry for user-facing changes. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). -#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) -* Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). +#### Bug Fix (user-visible misbehavior in an official stable release) +* Fix unexpected size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). * Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)). * Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)). * Fix the VALID UNTIL clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). - -#### Bug Fix (user-visible misbehavior in an official stable release) * Fix SHOW MERGES remaining time. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Fixed crash while using MaterializedMySQL with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)). * Fix logical error when PREWHERE expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)). -* Fix bug with cancelation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)). +* Fix bug with cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)). * Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)). * Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)). * Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)). @@ -146,7 +143,6 @@ * Fix resolving dynamic subcolumns in analyzer, avoid reading the whole column on dynamic subcolumn reading. [#66004](https://github.com/ClickHouse/ClickHouse/pull/66004) ([Kruglov Pavel](https://github.com/Avogar)). * Fix config merging for from_env with replace overrides. [#66034](https://github.com/ClickHouse/ClickHouse/pull/66034) ([Azat Khuzhin](https://github.com/azat)). * Fix a possible hanging in `GRPCServer` during shutdown. [#66061](https://github.com/ClickHouse/ClickHouse/pull/66061) ([Vitaly Baranov](https://github.com/vitlibar)). -* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` peremeter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)). * Fixed several cases in function `has` with non-constant `LowCardinality` arguments. [#66088](https://github.com/ClickHouse/ClickHouse/pull/66088) ([Anton Popov](https://github.com/CurtizJ)). * Fix for `groupArrayIntersect`. It had incorrect behavior in the `merge()` function. Also, fixed behavior in `deserialise()` for numeric and general data. [#66103](https://github.com/ClickHouse/ClickHouse/pull/66103) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). * Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)). @@ -185,8 +181,6 @@ #### Build/Testing/Packaging Improvement * Instantiate template methods ahead in different .cpp files, avoid too large translation units during compiling. [#64818](https://github.com/ClickHouse/ClickHouse/pull/64818) ([lgbo](https://github.com/lgbo-ustc)). -* Upgraded `pocketfft` dependency to the recent commit https://github.com/mreineck/pocketfft/commit/f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3. [#66291](https://github.com/ClickHouse/ClickHouse/pull/66291) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Upgraded `azure-sdk-for-cpp` to the recent commit https://github.com/ClickHouse/azure-sdk-for-cpp/commit/ea3e19a7be08519134c643177d56c7484dfec884. [#66292](https://github.com/ClickHouse/ClickHouse/pull/66292) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). ### ClickHouse release 24.6, 2024-07-01 From ff44b2066195c8f72897794ff1051a695c56cbfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 24 Jul 2024 14:51:48 +0200 Subject: [PATCH 0814/2361] Revert "FuzzQuery table function" --- .../table-functions/fuzzQuery.md | 36 ---- programs/client/Client.h | 5 +- src/Client/ClientBase.h | 2 +- src/{Common => Client}/QueryFuzzer.cpp | 50 ++---- src/{Common => Client}/QueryFuzzer.h | 35 ++-- src/Storages/StorageFuzzQuery.cpp | 169 ------------------ src/Storages/StorageFuzzQuery.h | 88 --------- src/Storages/registerStorages.cpp | 2 - src/TableFunctions/TableFunctionFuzzQuery.cpp | 54 ------ src/TableFunctions/TableFunctionFuzzQuery.h | 42 ----- src/TableFunctions/registerTableFunctions.cpp | 1 - src/TableFunctions/registerTableFunctions.h | 1 - .../03031_table_function_fuzzquery.reference | 2 - .../03031_table_function_fuzzquery.sql | 18 -- 14 files changed, 31 insertions(+), 474 deletions(-) delete mode 100644 docs/en/sql-reference/table-functions/fuzzQuery.md rename src/{Common => Client}/QueryFuzzer.cpp (97%) rename src/{Common => Client}/QueryFuzzer.h (91%) delete mode 100644 src/Storages/StorageFuzzQuery.cpp delete mode 100644 src/Storages/StorageFuzzQuery.h delete mode 100644 src/TableFunctions/TableFunctionFuzzQuery.cpp delete mode 100644 src/TableFunctions/TableFunctionFuzzQuery.h delete mode 100644 tests/queries/0_stateless/03031_table_function_fuzzquery.reference delete mode 100644 tests/queries/0_stateless/03031_table_function_fuzzquery.sql diff --git a/docs/en/sql-reference/table-functions/fuzzQuery.md b/docs/en/sql-reference/table-functions/fuzzQuery.md deleted file mode 100644 index e15f8a40156..00000000000 --- a/docs/en/sql-reference/table-functions/fuzzQuery.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -slug: /en/sql-reference/table-functions/fuzzQuery -sidebar_position: 75 -sidebar_label: fuzzQuery ---- - -# fuzzQuery - -Perturbs the given query string with random variations. - -``` sql -fuzzQuery(query[, max_query_length[, random_seed]]) -``` - -**Arguments** - -- `query` (String) - The source query to perform the fuzzing on. -- `max_query_length` (UInt64) - A maximum length the query can get during the fuzzing process. -- `random_seed` (UInt64) - A random seed for producing stable results. - -**Returned Value** - -A table object with a single column containing perturbed query strings. - -## Usage Example - -``` sql -SELECT * FROM fuzzQuery('SELECT materialize(\'a\' AS key) GROUP BY key') LIMIT 2; -``` - -``` - ┌─query──────────────────────────────────────────────────────────┠-1. │ SELECT 'a' AS key GROUP BY key │ -2. │ EXPLAIN PIPELINE compact = true SELECT 'a' AS key GROUP BY key │ - └────────────────────────────────────────────────────────────────┘ -``` diff --git a/programs/client/Client.h b/programs/client/Client.h index 6d57a6ea648..229608f787d 100644 --- a/programs/client/Client.h +++ b/programs/client/Client.h @@ -9,10 +9,7 @@ namespace DB class Client : public ClientBase { public: - Client() - { - fuzzer = QueryFuzzer(randomSeed(), &std::cout, &std::cerr); - } + Client() = default; void initialize(Poco::Util::Application & self) override; diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 4f500a4c45d..986990aecaa 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -17,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Common/QueryFuzzer.cpp b/src/Client/QueryFuzzer.cpp similarity index 97% rename from src/Common/QueryFuzzer.cpp rename to src/Client/QueryFuzzer.cpp index 161c38f20e0..f5b700ea529 100644 --- a/src/Common/QueryFuzzer.cpp +++ b/src/Client/QueryFuzzer.cpp @@ -68,21 +68,22 @@ Field QueryFuzzer::getRandomField(int type) { case 0: { - return bad_int64_values[fuzz_rand() % std::size(bad_int64_values)]; + return bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) + / sizeof(*bad_int64_values))]; } case 1: { static constexpr double values[] = {NAN, INFINITY, -INFINITY, 0., -0., 0.0001, 0.5, 0.9999, 1., 1.0001, 2., 10.0001, 100.0001, 1000.0001, 1e10, 1e20, - FLT_MIN, FLT_MIN + FLT_EPSILON, FLT_MAX, FLT_MAX + FLT_EPSILON}; return values[fuzz_rand() % std::size(values)]; + FLT_MIN, FLT_MIN + FLT_EPSILON, FLT_MAX, FLT_MAX + FLT_EPSILON}; return values[fuzz_rand() % (sizeof(values) / sizeof(*values))]; } case 2: { static constexpr UInt64 scales[] = {0, 1, 2, 10}; return DecimalField( - bad_int64_values[fuzz_rand() % std::size(bad_int64_values)], - static_cast(scales[fuzz_rand() % std::size(scales)]) + bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) / sizeof(*bad_int64_values))], + static_cast(scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))]) ); } default: @@ -164,8 +165,7 @@ Field QueryFuzzer::fuzzField(Field field) { size_t pos = fuzz_rand() % arr.size(); arr.erase(arr.begin() + pos); - if (debug_stream) - *debug_stream << "erased\n"; + std::cerr << "erased\n"; } if (fuzz_rand() % 5 == 0) @@ -174,14 +174,12 @@ Field QueryFuzzer::fuzzField(Field field) { size_t pos = fuzz_rand() % arr.size(); arr.insert(arr.begin() + pos, fuzzField(arr[pos])); - if (debug_stream) - *debug_stream << fmt::format("inserted (pos {})\n", pos); + std::cerr << fmt::format("inserted (pos {})\n", pos); } else { arr.insert(arr.begin(), getRandomField(0)); - if (debug_stream) - *debug_stream << "inserted (0)\n"; + std::cerr << "inserted (0)\n"; } } @@ -199,9 +197,7 @@ Field QueryFuzzer::fuzzField(Field field) { size_t pos = fuzz_rand() % arr.size(); arr.erase(arr.begin() + pos); - - if (debug_stream) - *debug_stream << "erased\n"; + std::cerr << "erased\n"; } if (fuzz_rand() % 5 == 0) @@ -210,16 +206,12 @@ Field QueryFuzzer::fuzzField(Field field) { size_t pos = fuzz_rand() % arr.size(); arr.insert(arr.begin() + pos, fuzzField(arr[pos])); - - if (debug_stream) - *debug_stream << fmt::format("inserted (pos {})\n", pos); + std::cerr << fmt::format("inserted (pos {})\n", pos); } else { arr.insert(arr.begin(), getRandomField(0)); - - if (debug_stream) - *debug_stream << "inserted (0)\n"; + std::cerr << "inserted (0)\n"; } } @@ -352,8 +344,7 @@ void QueryFuzzer::fuzzOrderByList(IAST * ast) } else { - if (debug_stream) - *debug_stream << "No random column.\n"; + std::cerr << "No random column.\n"; } } @@ -387,8 +378,7 @@ void QueryFuzzer::fuzzColumnLikeExpressionList(IAST * ast) if (col) impl->children.insert(pos, col); else - if (debug_stream) - *debug_stream << "No random column.\n"; + std::cerr << "No random column.\n"; } // We don't have to recurse here to fuzz the children, this is handled by @@ -1371,15 +1361,11 @@ void QueryFuzzer::fuzzMain(ASTPtr & ast) collectFuzzInfoMain(ast); fuzz(ast); - if (out_stream) - { - *out_stream << std::endl; - - WriteBufferFromOStream ast_buf(*out_stream, 4096); - formatAST(*ast, ast_buf, false /*highlight*/); - ast_buf.finalize(); - *out_stream << std::endl << std::endl; - } + std::cout << std::endl; + WriteBufferFromOStream ast_buf(std::cout, 4096); + formatAST(*ast, ast_buf, false /*highlight*/); + ast_buf.finalize(); + std::cout << std::endl << std::endl; } } diff --git a/src/Common/QueryFuzzer.h b/src/Client/QueryFuzzer.h similarity index 91% rename from src/Common/QueryFuzzer.h rename to src/Client/QueryFuzzer.h index 35d088809f2..6165e589cae 100644 --- a/src/Common/QueryFuzzer.h +++ b/src/Client/QueryFuzzer.h @@ -35,31 +35,9 @@ struct ASTWindowDefinition; * queries, so you want to feed it a lot of queries to get some interesting mix * of them. Normally we feed SQL regression tests to it. */ -class QueryFuzzer +struct QueryFuzzer { -public: - explicit QueryFuzzer(pcg64 fuzz_rand_ = randomSeed(), std::ostream * out_stream_ = nullptr, std::ostream * debug_stream_ = nullptr) - : fuzz_rand(fuzz_rand_) - , out_stream(out_stream_) - , debug_stream(debug_stream_) - { - } - - // This is the only function you have to call -- it will modify the passed - // ASTPtr to point to new AST with some random changes. - void fuzzMain(ASTPtr & ast); - - ASTs getInsertQueriesForFuzzedTables(const String & full_query); - ASTs getDropQueriesForFuzzedTables(const ASTDropQuery & drop_query); - void notifyQueryFailed(ASTPtr ast); - - static bool isSuitableForFuzzing(const ASTCreateQuery & create); - -private: - pcg64 fuzz_rand; - - std::ostream * out_stream = nullptr; - std::ostream * debug_stream = nullptr; + pcg64 fuzz_rand{randomSeed()}; // We add elements to expression lists with fixed probability. Some elements // are so large, that the expected number of elements we add to them is @@ -88,6 +66,10 @@ private: std::unordered_map index_of_fuzzed_table; std::set created_tables_hashes; + // This is the only function you have to call -- it will modify the passed + // ASTPtr to point to new AST with some random changes. + void fuzzMain(ASTPtr & ast); + // Various helper functions follow, normally you shouldn't have to call them. Field getRandomField(int type); Field fuzzField(Field field); @@ -95,6 +77,9 @@ private: ASTPtr getRandomExpressionList(); DataTypePtr fuzzDataType(DataTypePtr type); DataTypePtr getRandomType(); + ASTs getInsertQueriesForFuzzedTables(const String & full_query); + ASTs getDropQueriesForFuzzedTables(const ASTDropQuery & drop_query); + void notifyQueryFailed(ASTPtr ast); void replaceWithColumnLike(ASTPtr & ast); void replaceWithTableLike(ASTPtr & ast); void fuzzOrderByElement(ASTOrderByElement * elem); @@ -117,6 +102,8 @@ private: void addTableLike(ASTPtr ast); void addColumnLike(ASTPtr ast); void collectFuzzInfoRecurse(ASTPtr ast); + + static bool isSuitableForFuzzing(const ASTCreateQuery & create); }; } diff --git a/src/Storages/StorageFuzzQuery.cpp b/src/Storages/StorageFuzzQuery.cpp deleted file mode 100644 index 6e8f425f8dc..00000000000 --- a/src/Storages/StorageFuzzQuery.cpp +++ /dev/null @@ -1,169 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; -} - -ColumnPtr FuzzQuerySource::createColumn() -{ - auto column = ColumnString::create(); - ColumnString::Chars & data_to = column->getChars(); - ColumnString::Offsets & offsets_to = column->getOffsets(); - - offsets_to.resize(block_size); - IColumn::Offset offset = 0; - - auto fuzz_base = query; - size_t row_num = 0; - - while (row_num < block_size) - { - ASTPtr new_query = fuzz_base->clone(); - - auto base_before_fuzz = fuzz_base->formatForErrorMessage(); - fuzzer.fuzzMain(new_query); - auto fuzzed_text = new_query->formatForErrorMessage(); - - if (base_before_fuzz == fuzzed_text) - continue; - - /// AST is too long, will start from the original query. - if (config.max_query_length > 500) - { - fuzz_base = query; - continue; - } - - IColumn::Offset next_offset = offset + fuzzed_text.size() + 1; - data_to.resize(next_offset); - - std::copy(fuzzed_text.begin(), fuzzed_text.end(), &data_to[offset]); - - data_to[offset + fuzzed_text.size()] = 0; - offsets_to[row_num] = next_offset; - - offset = next_offset; - fuzz_base = new_query; - ++row_num; - } - - return column; -} - -StorageFuzzQuery::StorageFuzzQuery( - const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment_, const Configuration & config_) - : IStorage(table_id_), config(config_) -{ - StorageInMemoryMetadata storage_metadata; - storage_metadata.setColumns(columns_); - storage_metadata.setComment(comment_); - setInMemoryMetadata(storage_metadata); -} - -Pipe StorageFuzzQuery::read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & /*query_info*/, - ContextPtr /*context*/, - QueryProcessingStage::Enum /*processed_stage*/, - size_t max_block_size, - size_t num_streams) -{ - storage_snapshot->check(column_names); - - Pipes pipes; - pipes.reserve(num_streams); - - const ColumnsDescription & our_columns = storage_snapshot->metadata->getColumns(); - Block block_header; - for (const auto & name : column_names) - { - const auto & name_type = our_columns.get(name); - MutableColumnPtr column = name_type.type->createColumn(); - block_header.insert({std::move(column), name_type.type, name_type.name}); - } - - const char * begin = config.query.data(); - const char * end = begin + config.query.size(); - - ParserQuery parser(end, false); - auto query = parseQuery(parser, begin, end, "", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); - - for (UInt64 i = 0; i < num_streams; ++i) - pipes.emplace_back(std::make_shared(max_block_size, block_header, config, query)); - - return Pipe::unitePipes(std::move(pipes)); -} - -StorageFuzzQuery::Configuration StorageFuzzQuery::getConfiguration(ASTs & engine_args, ContextPtr local_context) -{ - StorageFuzzQuery::Configuration configuration{}; - - // Supported signatures: - // - // FuzzQuery(query) - // FuzzQuery(query, max_query_length) - // FuzzQuery(query, max_query_length, random_seed) - if (engine_args.empty() || engine_args.size() > 3) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "FuzzQuery requires 1 to 3 arguments: query, max_query_length, random_seed"); - - for (auto & engine_arg : engine_args) - engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, local_context); - - auto first_arg = checkAndGetLiteralArgument(engine_args[0], "query"); - configuration.query = std::move(first_arg); - - if (engine_args.size() >= 2) - { - const auto & literal = engine_args[1]->as(); - if (!literal.value.isNull()) - configuration.max_query_length = checkAndGetLiteralArgument(literal, "max_query_length"); - } - - if (engine_args.size() == 3) - { - const auto & literal = engine_args[2]->as(); - if (!literal.value.isNull()) - configuration.random_seed = checkAndGetLiteralArgument(literal, "random_seed"); - } - - return configuration; -} - -void registerStorageFuzzQuery(StorageFactory & factory) -{ - factory.registerStorage( - "FuzzQuery", - [](const StorageFactory::Arguments & args) -> std::shared_ptr - { - ASTs & engine_args = args.engine_args; - - if (engine_args.empty()) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Storage FuzzQuery must have arguments."); - - StorageFuzzQuery::Configuration configuration = StorageFuzzQuery::getConfiguration(engine_args, args.getLocalContext()); - - for (const auto& col : args.columns) - if (col.type->getTypeId() != TypeIndex::String) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "'StorageFuzzQuery' supports only columns of String type, got {}.", col.type->getName()); - - return std::make_shared(args.table_id, args.columns, args.comment, configuration); - }); -} - -} diff --git a/src/Storages/StorageFuzzQuery.h b/src/Storages/StorageFuzzQuery.h deleted file mode 100644 index 125ef960e74..00000000000 --- a/src/Storages/StorageFuzzQuery.h +++ /dev/null @@ -1,88 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include "config.h" - -namespace DB -{ - -class NamedCollection; - -class StorageFuzzQuery final : public IStorage -{ -public: - struct Configuration : public StatelessTableEngineConfiguration - { - String query; - UInt64 max_query_length = 500; - UInt64 random_seed = randomSeed(); - }; - - StorageFuzzQuery( - const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment_, const Configuration & config_); - - std::string getName() const override { return "FuzzQuery"; } - - Pipe read( - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - size_t num_streams) override; - - static StorageFuzzQuery::Configuration getConfiguration(ASTs & engine_args, ContextPtr local_context); - -private: - const Configuration config; -}; - - -class FuzzQuerySource : public ISource -{ -public: - FuzzQuerySource( - UInt64 block_size_, Block block_header_, const StorageFuzzQuery::Configuration & config_, ASTPtr query_) - : ISource(block_header_) - , block_size(block_size_) - , block_header(std::move(block_header_)) - , config(config_) - , query(query_) - , fuzzer(config_.random_seed) - { - } - - String getName() const override { return "FuzzQuery"; } - -protected: - Chunk generate() override - { - Columns columns; - columns.reserve(block_header.columns()); - for (const auto & col : block_header) - { - chassert(col.type->getTypeId() == TypeIndex::String); - columns.emplace_back(createColumn()); - } - - return {std::move(columns), block_size}; - } - -private: - ColumnPtr createColumn(); - - UInt64 block_size; - Block block_header; - - StorageFuzzQuery::Configuration config; - ASTPtr query; - - QueryFuzzer fuzzer; -}; - -} diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index adc1074b1fe..8f33314397c 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -26,7 +26,6 @@ void registerStorageGenerateRandom(StorageFactory & factory); void registerStorageExecutable(StorageFactory & factory); void registerStorageWindowView(StorageFactory & factory); void registerStorageLoop(StorageFactory & factory); -void registerStorageFuzzQuery(StorageFactory & factory); #if USE_RAPIDJSON || USE_SIMDJSON void registerStorageFuzzJSON(StorageFactory & factory); #endif @@ -127,7 +126,6 @@ void registerStorages() registerStorageExecutable(factory); registerStorageWindowView(factory); registerStorageLoop(factory); - registerStorageFuzzQuery(factory); #if USE_RAPIDJSON || USE_SIMDJSON registerStorageFuzzJSON(factory); #endif diff --git a/src/TableFunctions/TableFunctionFuzzQuery.cpp b/src/TableFunctions/TableFunctionFuzzQuery.cpp deleted file mode 100644 index 224f6666556..00000000000 --- a/src/TableFunctions/TableFunctionFuzzQuery.cpp +++ /dev/null @@ -1,54 +0,0 @@ -#include - -#include -#include -#include -#include - -namespace DB -{ - - -namespace ErrorCodes -{ - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; -} - -void TableFunctionFuzzQuery::parseArguments(const ASTPtr & ast_function, ContextPtr context) -{ - ASTs & args_func = ast_function->children; - - if (args_func.size() != 1) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Table function '{}' must have arguments", getName()); - - auto args = args_func.at(0)->children; - configuration = StorageFuzzQuery::getConfiguration(args, context); -} - -StoragePtr TableFunctionFuzzQuery::executeImpl( - const ASTPtr & /*ast_function*/, - ContextPtr context, - const std::string & table_name, - ColumnsDescription /*cached_columns*/, - bool is_insert_query) const -{ - ColumnsDescription columns = getActualTableStructure(context, is_insert_query); - auto res = std::make_shared( - StorageID(getDatabaseName(), table_name), - columns, - /* comment */ String{}, - configuration); - res->startup(); - return res; -} - -void registerTableFunctionFuzzQuery(TableFunctionFactory & factory) -{ - factory.registerFunction( - {.documentation - = {.description = "Perturbs a query string with random variations.", - .returned_value = "A table object with a single column containing perturbed query strings."}, - .allow_readonly = true}); -} - -} diff --git a/src/TableFunctions/TableFunctionFuzzQuery.h b/src/TableFunctions/TableFunctionFuzzQuery.h deleted file mode 100644 index 22d10341c4d..00000000000 --- a/src/TableFunctions/TableFunctionFuzzQuery.h +++ /dev/null @@ -1,42 +0,0 @@ -#pragma once - -#include - -#include -#include -#include - -#include "config.h" - -namespace DB -{ - -class TableFunctionFuzzQuery : public ITableFunction -{ -public: - static constexpr auto name = "fuzzQuery"; - std::string getName() const override { return name; } - - void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; - - ColumnsDescription getActualTableStructure(ContextPtr /* context */, bool /* is_insert_query */) const override - { - return ColumnsDescription{{"query", std::make_shared()}}; - } - -private: - StoragePtr executeImpl( - const ASTPtr & ast_function, - ContextPtr context, - const std::string & table_name, - ColumnsDescription cached_columns, - bool is_insert_query) const override; - - const char * getStorageTypeName() const override { return "fuzzQuery"; } - - String source; - std::optional random_seed; - StorageFuzzQuery::Configuration configuration; -}; - -} diff --git a/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp index a6c90872f12..ca4913898f9 100644 --- a/src/TableFunctions/registerTableFunctions.cpp +++ b/src/TableFunctions/registerTableFunctions.cpp @@ -26,7 +26,6 @@ void registerTableFunctions() registerTableFunctionMongoDB(factory); registerTableFunctionRedis(factory); registerTableFunctionMergeTreeIndex(factory); - registerTableFunctionFuzzQuery(factory); #if USE_RAPIDJSON || USE_SIMDJSON registerTableFunctionFuzzJSON(factory); #endif diff --git a/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h index 2a8864a9bfd..efde4d6dcdc 100644 --- a/src/TableFunctions/registerTableFunctions.h +++ b/src/TableFunctions/registerTableFunctions.h @@ -23,7 +23,6 @@ void registerTableFunctionGenerate(TableFunctionFactory & factory); void registerTableFunctionMongoDB(TableFunctionFactory & factory); void registerTableFunctionRedis(TableFunctionFactory & factory); void registerTableFunctionMergeTreeIndex(TableFunctionFactory & factory); -void registerTableFunctionFuzzQuery(TableFunctionFactory & factory); #if USE_RAPIDJSON || USE_SIMDJSON void registerTableFunctionFuzzJSON(TableFunctionFactory & factory); #endif diff --git a/tests/queries/0_stateless/03031_table_function_fuzzquery.reference b/tests/queries/0_stateless/03031_table_function_fuzzquery.reference deleted file mode 100644 index 202e4557a33..00000000000 --- a/tests/queries/0_stateless/03031_table_function_fuzzquery.reference +++ /dev/null @@ -1,2 +0,0 @@ -query -String diff --git a/tests/queries/0_stateless/03031_table_function_fuzzquery.sql b/tests/queries/0_stateless/03031_table_function_fuzzquery.sql deleted file mode 100644 index b26096f7f0e..00000000000 --- a/tests/queries/0_stateless/03031_table_function_fuzzquery.sql +++ /dev/null @@ -1,18 +0,0 @@ - -SELECT * FROM fuzzQuery('SELECT 1', 500, 8956) LIMIT 0 FORMAT TSVWithNamesAndTypes; - -SELECT * FROM fuzzQuery('SELECT * -FROM ( - SELECT - ([toString(number % 2)] :: Array(LowCardinality(String))) AS item_id, - count() - FROM numbers(3) - GROUP BY item_id WITH TOTALS -) AS l FULL JOIN ( - SELECT - ([toString((number % 2) * 2)] :: Array(String)) AS item_id - FROM numbers(3) -) AS r -ON l.item_id = r.item_id -ORDER BY 1,2,3; -', 500, 8956) LIMIT 10 FORMAT NULL; From e2c78844a005aa5c04e454df6ff7e0508c967d18 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 14:53:09 +0200 Subject: [PATCH 0815/2361] Fix tests --- docker/test/stateful/run.sh | 2 +- tests/queries/0_stateless/02177_issue_31009.sql | 2 ++ .../00088_global_in_one_shard_and_rows_before_limit.sql | 2 +- tests/queries/1_stateful/00147_global_in_aggregate_function.sql | 2 +- tests/queries/1_stateful/00167_read_bytes_from_fs.sql | 2 +- .../queries/1_stateful/00182_simple_squashing_transform_bug.sql | 2 +- 6 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index bd2e38ff00f..fde8b8ae529 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -201,7 +201,7 @@ else fi clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" # AWS S3 is very inefficient, so increase memory even further: - clickhouse-client --max_memory_usage 20G --max_memory_usage_for_user 20G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" + clickhouse-client --max_memory_usage 30G --max_memory_usage_for_user 30G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" fi clickhouse-client --query "SHOW TABLES FROM test" diff --git a/tests/queries/0_stateless/02177_issue_31009.sql b/tests/queries/0_stateless/02177_issue_31009.sql index f25df59f4b4..5c62b5a9c2f 100644 --- a/tests/queries/0_stateless/02177_issue_31009.sql +++ b/tests/queries/0_stateless/02177_issue_31009.sql @@ -8,6 +8,8 @@ DROP TABLE IF EXISTS right; CREATE TABLE left ( key UInt32, value String ) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; CREATE TABLE right ( key UInt32, value String ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +SET max_rows_to_read = '50M'; + INSERT INTO left SELECT number, toString(number) FROM numbers(25367182); INSERT INTO right SELECT number, toString(number) FROM numbers(23124707); diff --git a/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql index 443808e7bed..8f18f3740e4 100644 --- a/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql +++ b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql @@ -1,4 +1,4 @@ -- Tags: shard -SET output_format_write_statistics = 0, max_rows_to_read = 20_000_000; +SET output_format_write_statistics = 0, max_rows_to_read = 50_000_000; SELECT EventDate, count() FROM remote('127.0.0.1', test.hits) WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits) GROUP BY EventDate ORDER BY EventDate LIMIT 5 FORMAT JSONCompact; diff --git a/tests/queries/1_stateful/00147_global_in_aggregate_function.sql b/tests/queries/1_stateful/00147_global_in_aggregate_function.sql index c156f073573..f0b249e9af4 100644 --- a/tests/queries/1_stateful/00147_global_in_aggregate_function.sql +++ b/tests/queries/1_stateful/00147_global_in_aggregate_function.sql @@ -1,5 +1,5 @@ -- Tags: global -SET max_rows_to_read = 40_000_000; +SET max_rows_to_read = 100_000_000; SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); diff --git a/tests/queries/1_stateful/00167_read_bytes_from_fs.sql b/tests/queries/1_stateful/00167_read_bytes_from_fs.sql index 1a98a531067..184a8edcbcb 100644 --- a/tests/queries/1_stateful/00167_read_bytes_from_fs.sql +++ b/tests/queries/1_stateful/00167_read_bytes_from_fs.sql @@ -1,6 +1,6 @@ -- Tags: no-random-settings -SET max_memory_usage = '10G' +SET max_memory_usage = '10G'; SELECT sum(cityHash64(*)) FROM test.hits SETTINGS max_threads=40; -- We had a bug which lead to additional compressed data read. test.hits compressed size is about 1.2Gb, but we read more then 3Gb. diff --git a/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql b/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql index 85bad651090..26e112cff04 100644 --- a/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql +++ b/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql @@ -1,7 +1,7 @@ -- Tags: global set allow_prefetched_read_pool_for_remote_filesystem=0, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0, max_threads=2, max_block_size=65387; -set max_rows_to_read = '20M'; +set max_rows_to_read = '100M'; SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); From e42e04c5230560f652c75c2c3b43efc3567cb31c Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 24 Jul 2024 12:53:52 +0000 Subject: [PATCH 0816/2361] Test --- ...llel_replicas_alter_select_ubsan.reference | 0 ...5_parallel_replicas_alter_select_ubsan.sql | 35 +++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 tests/queries/0_stateless/03205_parallel_replicas_alter_select_ubsan.reference create mode 100644 tests/queries/0_stateless/03205_parallel_replicas_alter_select_ubsan.sql diff --git a/tests/queries/0_stateless/03205_parallel_replicas_alter_select_ubsan.reference b/tests/queries/0_stateless/03205_parallel_replicas_alter_select_ubsan.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03205_parallel_replicas_alter_select_ubsan.sql b/tests/queries/0_stateless/03205_parallel_replicas_alter_select_ubsan.sql new file mode 100644 index 00000000000..2ec9368327d --- /dev/null +++ b/tests/queries/0_stateless/03205_parallel_replicas_alter_select_ubsan.sql @@ -0,0 +1,35 @@ +SET alter_sync = 2; +SET max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree = true; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t1__fuzz_26; + +CREATE TABLE t1__fuzz_26 (`a` Nullable(Float64), `b` Nullable(Float32), `pk` Int64) ENGINE = MergeTree ORDER BY pk; +CREATE TABLE t1 ( a Float64, b Int64, pk String) Engine = MergeTree() ORDER BY pk; + +ALTER TABLE t1 + (MODIFY COLUMN `a` Float64 TTL toDateTime(b) + toIntervalMonth(viewExplain('EXPLAIN', 'actions = 1', ( + SELECT + toIntervalMonth(1), + 2 + FROM t1__fuzz_26 + GROUP BY + toFixedString('%Prewhere%', 10), + toNullable(12) + WITH ROLLUP + )), 1)) settings allow_experimental_parallel_reading_from_replicas = 1; -- { serverError INCORRECT_RESULT_OF_SCALAR_SUBQUERY } + +ALTER TABLE t1 + (MODIFY COLUMN `a` Float64 TTL toDateTime(b) + toIntervalMonth(viewExplain('EXPLAIN', 'actions = 1', ( + SELECT + toIntervalMonth(1), + 2 + FROM t1__fuzz_26 + GROUP BY + toFixedString('%Prewhere%', 10), + toNullable(12) + WITH ROLLUP + )), 1)) settings allow_experimental_parallel_reading_from_replicas = 0; -- { serverError INCORRECT_RESULT_OF_SCALAR_SUBQUERY } + +DROP TABLE t1; +DROP TABLE t1__fuzz_26; From f37f228af9d70a04166178bac24127693d8b8c35 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 24 Jul 2024 13:06:29 +0000 Subject: [PATCH 0817/2361] Improve columns squashing for String/Array/Map/Variant/Dynamic types --- src/Columns/ColumnArray.cpp | 16 +++ src/Columns/ColumnArray.h | 1 + src/Columns/ColumnDynamic.cpp | 110 ++++++++++++++++++ src/Columns/ColumnDynamic.h | 2 + src/Columns/ColumnMap.cpp | 9 ++ src/Columns/ColumnMap.h | 1 + src/Columns/ColumnNullable.cpp | 16 +++ src/Columns/ColumnNullable.h | 1 + src/Columns/ColumnString.cpp | 15 +++ src/Columns/ColumnString.h | 1 + src/Columns/ColumnTuple.cpp | 13 +++ src/Columns/ColumnTuple.h | 1 + src/Columns/ColumnVariant.cpp | 21 +++- src/Columns/ColumnVariant.h | 1 + src/Columns/IColumn.h | 9 ++ src/Interpreters/Squashing.cpp | 27 +++-- tests/performance/insert_select_squashing.xml | 23 ++++ .../03210_dynamic_squashing.reference | 8 ++ .../0_stateless/03210_dynamic_squashing.sql | 20 ++++ 19 files changed, 285 insertions(+), 10 deletions(-) create mode 100644 tests/performance/insert_select_squashing.xml create mode 100644 tests/queries/0_stateless/03210_dynamic_squashing.reference create mode 100644 tests/queries/0_stateless/03210_dynamic_squashing.sql diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 19cce678cc7..9244d75a04d 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -452,6 +452,22 @@ void ColumnArray::reserve(size_t n) getData().reserve(n); /// The average size of arrays is not taken into account here. Or it is considered to be no more than 1. } +void ColumnArray::prepareForSquashing(const Columns & source_columns) +{ + size_t new_size = size(); + Columns source_data_columns; + source_data_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + { + const auto & source_array_column = assert_cast(*source_column); + new_size += source_array_column.size(); + source_data_columns.push_back(source_array_column.getDataPtr()); + } + + getOffsets().reserve_exact(new_size); + data->prepareForSquashing(source_data_columns); +} + void ColumnArray::shrinkToFit() { getOffsets().shrink_to_fit(); diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index 63affb86d9d..d6f71b72940 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -118,6 +118,7 @@ public: void updatePermutationWithCollation(const Collator & collator, PermutationSortDirection direction, PermutationSortStability stability, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index a92d54dd675..74b7ef69d8d 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -643,6 +643,116 @@ ColumnPtr ColumnDynamic::compress() const }); } +void ColumnDynamic::prepareForSquashing(const Columns & source_columns) +{ + if (source_columns.empty()) + return; + + /// Internal variants of source dynamic columns may differ. + /// We want to preallocate memory for all variants we will have after squashing. + /// It may happen that the total number of variants in source columns will + /// exceed the limit, in this case we will choose the most frequent variants. + + /// First, preallocate memory for variant discriminators and offsets. + size_t new_size = size(); + for (const auto & source_column : source_columns) + new_size += source_column->size(); + auto & variant_col = getVariantColumn(); + variant_col.getLocalDiscriminators().reserve_exact(new_size); + variant_col.getOffsets().reserve_exact(new_size); + + /// Second, collect all variants and their total sizes. + std::unordered_map total_variant_sizes; + DataTypes all_variants; + + auto add_variants = [&](const ColumnDynamic & source_dynamic) + { + const auto & source_variant_column = source_dynamic.getVariantColumn(); + const auto & source_variant_info = source_dynamic.getVariantInfo(); + const auto & source_variants = assert_cast(*source_variant_info.variant_type).getVariants(); + + for (size_t i = 0; i != source_variants.size(); ++i) + { + const auto & variant_name = source_variant_info.variant_names[i]; + auto it = total_variant_sizes.find(variant_name); + /// Add this variant to the list of all variants if we didn't see it yet. + if (it == total_variant_sizes.end()) + { + all_variants.push_back(source_variants[i]); + it = total_variant_sizes.emplace(variant_name, 0).first; + } + + it->second += source_variant_column.getVariantByGlobalDiscriminator(i).size(); + } + }; + + for (const auto & source_column : source_columns) + add_variants(assert_cast(*source_column)); + + /// Add variants from this dynamic column. + add_variants(*this); + + DataTypePtr result_variant_type; + /// Check if the number of all variants exceeds the limit. + if (all_variants.size() > max_dynamic_types || (all_variants.size() == max_dynamic_types && !total_variant_sizes.contains("String"))) + { + /// We want to keep the most frequent variants in the resulting dynamic column. + DataTypes result_variants; + result_variants.reserve(max_dynamic_types); + /// Add variants from current variant column as we will not rewrite it. + for (const auto & variant : assert_cast(*variant_info.variant_type).getVariants()) + result_variants.push_back(variant); + /// Add String variant in advance (if we didn't add it yet) as we must have it across variants when we reach the limit. + if (!variant_info.variant_name_to_discriminator.contains("String")) + result_variants.push_back(std::make_shared()); + + /// Create list of remaining variants with their sizes and sort it. + std::vector> variants_with_sizes; + variants_with_sizes.reserve(all_variants.size() - variant_info.variant_names.size()); + for (const auto & variant : all_variants) + { + /// Add variant to the list only of we didn't add it yet. + auto variant_name = variant->getName(); + if (variant_name != "String" && !variant_info.variant_name_to_discriminator.contains(variant_name)) + variants_with_sizes.emplace_back(total_variant_sizes[variant->getName()], variant); + } + + std::sort(variants_with_sizes.begin(), variants_with_sizes.end(), std::greater()); + /// Add the most frequent variants until we reach max_dynamic_types. + size_t num_new_variants = max_dynamic_types - result_variants.size(); + for (size_t i = 0; i != num_new_variants; ++i) + result_variants.push_back(variants_with_sizes[i].second); + + result_variant_type = std::make_shared(result_variants); + } + else + { + result_variant_type = std::make_shared(all_variants); + } + + if (!result_variant_type->equals(*variant_info.variant_type)) + updateVariantInfoAndExpandVariantColumn(result_variant_type); + + /// Now current dynamic column has all resulting variants and we can call + /// prepareForSquashing on them to preallocate the memory. + for (size_t i = 0; i != variant_info.variant_names.size(); ++i) + { + Columns source_variant_columns; + source_variant_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + { + const auto & source_dynamic_column = assert_cast(*source_column); + const auto & source_variant_info = source_dynamic_column.getVariantInfo(); + /// Try to find this variant in the current source column. + auto it = source_variant_info.variant_name_to_discriminator.find(variant_info.variant_names[i]); + if (it != source_variant_info.variant_name_to_discriminator.end()) + source_variant_columns.push_back(source_dynamic_column.getVariantColumn().getVariantPtrByGlobalDiscriminator(it->second)); + } + + variant_col.getVariantByGlobalDiscriminator(i).prepareForSquashing(source_variant_columns); + } +} + void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { if (!empty()) diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index e92cabd3db9..cb3a896d2cb 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -254,6 +254,8 @@ public: variant_column->reserve(n); } + void prepareForSquashing(const Columns & source_columns) override; + void ensureOwnership() override { variant_column->ensureOwnership(); diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 1025b4e77b9..3bab20dfbf2 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -249,6 +249,15 @@ void ColumnMap::reserve(size_t n) nested->reserve(n); } +void ColumnMap::prepareForSquashing(const Columns & source_columns) +{ + Columns nested_source_columns; + nested_source_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + nested_source_columns.push_back(assert_cast(*source_column).getNestedColumnPtr()); + nested->prepareForSquashing(nested_source_columns); +} + void ColumnMap::shrinkToFit() { nested->shrinkToFit(); diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index 3eaaa0ad562..191476839f1 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -94,6 +94,7 @@ public: void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 6529f0b78db..2a25cac6461 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -706,6 +706,22 @@ void ColumnNullable::reserve(size_t n) getNullMapData().reserve(n); } +void ColumnNullable::prepareForSquashing(const Columns & source_columns) +{ + size_t new_size = size(); + Columns nested_source_columns; + nested_source_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + { + const auto & source_nullable_column = assert_cast(*source_column); + new_size += source_nullable_column.size(); + nested_source_columns.push_back(source_nullable_column.getNestedColumnPtr()); + } + + nested_column->prepareForSquashing(nested_source_columns); + getNullMapData().reserve(new_size); +} + void ColumnNullable::shrinkToFit() { getNestedColumn().shrinkToFit(); diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h index fe9f5b6dcc2..2c32e0fe5a0 100644 --- a/src/Columns/ColumnNullable.h +++ b/src/Columns/ColumnNullable.h @@ -125,6 +125,7 @@ public: size_t limit, int null_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override; size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp index 7cfa2571f5a..9ed2c7e3d4d 100644 --- a/src/Columns/ColumnString.cpp +++ b/src/Columns/ColumnString.cpp @@ -557,6 +557,21 @@ void ColumnString::reserve(size_t n) offsets.reserve_exact(n); } +void ColumnString::prepareForSquashing(const Columns & source_columns) +{ + size_t new_size = size(); + size_t new_chars_size = chars.size(); + for (const auto & source_column : source_columns) + { + const auto & source_string_column = assert_cast(*source_column); + new_size += source_string_column.size(); + new_chars_size += source_string_column.chars.size(); + } + + offsets.reserve_exact(new_size); + chars.reserve_exact(new_chars_size); +} + void ColumnString::shrinkToFit() { chars.shrink_to_fit(); diff --git a/src/Columns/ColumnString.h b/src/Columns/ColumnString.h index c1012e1e55e..20cd950fe9b 100644 --- a/src/Columns/ColumnString.h +++ b/src/Columns/ColumnString.h @@ -283,6 +283,7 @@ public: ColumnPtr compress() const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void getExtremes(Field & min, Field & max) const override; diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 4fc3f88a87c..c6ee7d775ae 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -595,6 +595,19 @@ void ColumnTuple::reserve(size_t n) getColumn(i).reserve(n); } +void ColumnTuple::prepareForSquashing(const Columns & source_columns) +{ + const size_t tuple_size = columns.size(); + for (size_t i = 0; i < tuple_size; ++i) + { + Columns nested_columns; + nested_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + nested_columns.push_back(assert_cast(*source_column).getColumnPtr(i)); + getColumn(i).prepareForSquashing(nested_columns); + } +} + void ColumnTuple::shrinkToFit() { const size_t tuple_size = columns.size(); diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index 16b47a993f6..ef396d6a130 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -110,6 +110,7 @@ public: void updatePermutationWithCollation(const Collator & collator, IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges& equal_ranges) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index de7efb41d19..68e19861c38 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -1247,8 +1247,25 @@ void ColumnVariant::updatePermutation(IColumn::PermutationSortDirection directio void ColumnVariant::reserve(size_t n) { - local_discriminators->reserve(n); - offsets->reserve(n); + getLocalDiscriminators().reserve_exact(n); + getOffsets().reserve_exact(n); +} + +void ColumnVariant::prepareForSquashing(const Columns & source_columns) +{ + size_t new_size = size(); + for (const auto & source_column : source_columns) + new_size += source_column->size(); + reserve(new_size); + + for (size_t i = 0; i != variants.size(); ++i) + { + Columns source_variant_columns; + source_variant_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + source_variant_columns.push_back(assert_cast(*source_column).getVariantPtrByGlobalDiscriminator(i)); + getVariantByGlobalDiscriminator(i).prepareForSquashing(source_variant_columns); + } } void ColumnVariant::ensureOwnership() diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index 34c24b5428d..737eb27abfe 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -237,6 +237,7 @@ public: size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void ensureOwnership() override; size_t byteSize() const override; size_t byteSizeAt(size_t n) const override; diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index f9c1a3e7034..edcb9f0bc30 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -475,6 +475,15 @@ public: /// It affects performance only (not correctness). virtual void reserve(size_t /*n*/) {} + /// Reserve memory before squashing all specified source columns into this column. + virtual void prepareForSquashing(const std::vector & source_columns) + { + size_t new_size = size(); + for (const auto & source_column : source_columns) + new_size += source_column->size(); + reserve(new_size); + } + /// Requests the removal of unused capacity. /// It is a non-binding request to reduce the capacity of the underlying container to its size. virtual void shrinkToFit() {} diff --git a/src/Interpreters/Squashing.cpp b/src/Interpreters/Squashing.cpp index 488177c3b4f..5cd40974c45 100644 --- a/src/Interpreters/Squashing.cpp +++ b/src/Interpreters/Squashing.cpp @@ -5,7 +5,6 @@ #include #include - namespace DB { @@ -114,20 +113,32 @@ Chunk Squashing::squash(std::vector && input_chunks, Chunk::ChunkInfoColl { auto & first_chunk = input_chunks[0]; Columns columns = first_chunk.detachColumns(); + mutable_columns.reserve(columns.size()); for (auto & column : columns) - { mutable_columns.push_back(IColumn::mutate(std::move(column))); - mutable_columns.back()->reserve(rows); - } } + size_t num_columns = mutable_columns.size(); + /// Collect the list of source columns for each column. + std::vector source_columns_list(num_columns, Columns{}); + for (size_t i = 0; i != num_columns; ++i) + source_columns_list[i].reserve(input_chunks.size() - 1); + for (size_t i = 1; i < input_chunks.size(); ++i) // We've already processed the first chunk above { - Columns columns = input_chunks[i].detachColumns(); - for (size_t j = 0, size = mutable_columns.size(); j < size; ++j) + auto columns = input_chunks[i].detachColumns(); + for (size_t j = 0; j != num_columns; ++j) + source_columns_list[j].emplace_back(std::move(columns[j])); + } + + for (size_t i = 0; i != num_columns; ++i) + { + /// We know all the data we will insert in advance and can make all necessary pre-allocations. + mutable_columns[i]->prepareForSquashing(source_columns_list[i]); + for (auto & source_column : source_columns_list[i]) { - const auto source_column = columns[j]; - mutable_columns[j]->insertRangeFrom(*source_column, 0, source_column->size()); + auto column = std::move(source_column); + mutable_columns[i]->insertRangeFrom(*column, 0, column->size()); } } diff --git a/tests/performance/insert_select_squashing.xml b/tests/performance/insert_select_squashing.xml new file mode 100644 index 00000000000..4c2c88f3d22 --- /dev/null +++ b/tests/performance/insert_select_squashing.xml @@ -0,0 +1,23 @@ + + + 1000 + + + +CREATE TABLE squash_performance +( + s1 String, + s2 Nullable(String), + a1 Array(Array(String)), + a2 Array(Array(UInt32)), + m1 Map(String, Array(String)), + m2 Map(String, Array(UInt64)), + t Tuple(String, Array(String), Map(String, String)) +) +ENGINE = Null; + + + INSERT INTO squash_performance SELECT * FROM generateRandom(42) LIMIT 500000 + + DROP TABLE IF EXISTS squash_performance + diff --git a/tests/queries/0_stateless/03210_dynamic_squashing.reference b/tests/queries/0_stateless/03210_dynamic_squashing.reference new file mode 100644 index 00000000000..4f5b5ba098c --- /dev/null +++ b/tests/queries/0_stateless/03210_dynamic_squashing.reference @@ -0,0 +1,8 @@ +Array(UInt8) +None +UInt64 +None +String +UInt64 +String +UInt64 diff --git a/tests/queries/0_stateless/03210_dynamic_squashing.sql b/tests/queries/0_stateless/03210_dynamic_squashing.sql new file mode 100644 index 00000000000..23b47184e33 --- /dev/null +++ b/tests/queries/0_stateless/03210_dynamic_squashing.sql @@ -0,0 +1,20 @@ +set allow_experimental_dynamic_type = 1; +set max_block_size = 1000; + +drop table if exists test; + +create table test (d Dynamic) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=2), number < 3000, range(number % 5)::Dynamic(max_types=2), number::Dynamic(max_types=2)) from numbers(1000000); +select distinct dynamicType(d) as type from test order by type; + +drop table test; +create table test (d Dynamic(max_types=2)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=2), number < 3000, range(number % 5)::Dynamic(max_types=2), number::Dynamic(max_types=2)) from numbers(1000000); +select distinct dynamicType(d) as type from test order by type; + +truncate table test; +insert into test select multiIf(number < 1000, 'Str'::Dynamic(max_types=2), number < 3000, range(number % 5)::Dynamic(max_types=2), number::Dynamic(max_types=2)) from numbers(1000000); +select distinct dynamicType(d) as type from test order by type; + +drop table test; + From 80304b59626c2cf2d9cbe3ba39058b9dfd22ddd9 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 24 Jul 2024 15:13:10 +0200 Subject: [PATCH 0818/2361] Less memory-intensive fill queries for decimal aggregates --- tests/performance/decimal_aggregates.xml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml index ec88be0124f..724d0c5d0e6 100644 --- a/tests/performance/decimal_aggregates.xml +++ b/tests/performance/decimal_aggregates.xml @@ -4,8 +4,13 @@ CREATE TABLE t (x UInt64, d32 Decimal32(3), d64 Decimal64(4), d128 Decimal128(5)) ENGINE = Memory - - INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(500000000) SETTINGS max_threads = 8 + + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(100000000) SETTINGS max_threads = 2 + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(100000000, 100000000) SETTINGS max_threads = 2 + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(200000000, 100000000) SETTINGS max_threads = 2 + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(300000000, 100000000) SETTINGS max_threads = 2 + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(400000000, 100000000) SETTINGS max_threads = 2 + DROP TABLE IF EXISTS t SELECT min(d32), max(d32), argMin(x, d32), argMax(x, d32) FROM t From a03a59a3bfd2dc55f1f04197cad36d147edb3ee1 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 24 Jul 2024 15:14:02 +0200 Subject: [PATCH 0819/2361] Use less data overall --- tests/performance/decimal_aggregates.xml | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml index 724d0c5d0e6..9fc94f01a4d 100644 --- a/tests/performance/decimal_aggregates.xml +++ b/tests/performance/decimal_aggregates.xml @@ -8,8 +8,6 @@ INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(100000000) SETTINGS max_threads = 2 INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(100000000, 100000000) SETTINGS max_threads = 2 INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(200000000, 100000000) SETTINGS max_threads = 2 - INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(300000000, 100000000) SETTINGS max_threads = 2 - INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(400000000, 100000000) SETTINGS max_threads = 2 DROP TABLE IF EXISTS t From 3c6c9303c3046d780ef01ce0fcb87f03b004162a Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 24 Jul 2024 13:14:43 +0000 Subject: [PATCH 0820/2361] disable setting optimize_functions_to_subcolumns --- src/Core/Settings.h | 2 +- tests/clickhouse-test | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 3d181e33001..fbb7663b612 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -602,7 +602,7 @@ class IColumn; M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \ M(Bool, optimize_multiif_to_if, true, "Replace 'multiIf' with only one condition to 'if'.", 0) \ M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \ - M(Bool, optimize_functions_to_subcolumns, true, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \ + M(Bool, optimize_functions_to_subcolumns, false, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \ M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \ M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \ M(Bool, optimize_append_index, false, "Use constraints in order to append index condition (indexHint)", 0) \ diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 0c04d8fb2c3..a29c786e998 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -835,6 +835,7 @@ class SettingsRandomizer: "cross_join_min_bytes_to_compress": lambda: random.choice([0, 1, 100000000]), "min_external_table_block_size_bytes": lambda: random.choice([0, 1, 100000000]), "max_parsing_threads": lambda: random.choice([0, 1, 10]), + "optimize_functions_to_subcolumns": lambda: random.randint(0, 1), } @staticmethod From 5d14d823e3882887ab8238c6ee5778f552cce835 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 24 Jul 2024 13:27:19 +0000 Subject: [PATCH 0821/2361] remove setting from changes history --- src/Core/SettingsChangesHistory.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index de4725dc350..c395bfdc815 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -63,7 +63,6 @@ static std::initializer_list Date: Wed, 24 Jul 2024 15:33:59 +0200 Subject: [PATCH 0822/2361] Bump From 535c872b39b7f593e9e63e614a78d39cde5dd4d9 Mon Sep 17 00:00:00 2001 From: Yohann Jardin Date: Wed, 24 Jul 2024 15:46:48 +0200 Subject: [PATCH 0823/2361] empty comit From c5d262c23b4ee289a374ae5d817a101bd44154c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 24 Jul 2024 13:55:59 +0000 Subject: [PATCH 0824/2361] Increase max allocation size for sanitizers --- docker/test/base/Dockerfile | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 2317f84e0cb..79ed871b822 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -23,15 +23,16 @@ RUN apt-get update \ # and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB). # TSAN will flush shadow memory when reaching this limit. # It may cause false-negatives, but it's better than OOM. -RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'" >> /etc/environment -RUN echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment -RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment -RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment +# max_allocation_size_mb is set to 32GB, so we have much bigger chance to run into memory limit than the limitation of the sanitizers +RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768'" >> /etc/environment +RUN echo "UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'" >> /etc/environment +RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'" >> /etc/environment +RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt max_allocation_size_mb=32768'" >> /etc/environment # Sanitizer options for current shell (not current, but the one that will be spawned on "docker run") # (but w/o verbosity for TSAN, otherwise test.reference will not match) -ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1' -ENV UBSAN_OPTIONS='print_stacktrace=1' -ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1' +ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768' +ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768' +ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768' # for external_symbolizer_path RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer From e498b86f50dbba8ac8e50f832547850c7cc29c6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 24 Jul 2024 14:00:28 +0000 Subject: [PATCH 0825/2361] Add option for LSAN in shell also --- docker/test/base/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 79ed871b822..a81826ed6b5 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -33,6 +33,7 @@ RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_supp ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768' ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768' ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768' +ENV LSAN_OPTIONS='max_allocation_size_mb=32768' # for external_symbolizer_path RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer From 37c345bb4925095da3e82e3fc3ed27072786d7e7 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 24 Jul 2024 16:01:21 +0200 Subject: [PATCH 0826/2361] rewrite 01171 test --- ..._mv_select_insert_isolation_long.reference | 4 - .../01171_mv_select_insert_isolation_long.sh | 229 ++++++++++++------ 2 files changed, 152 insertions(+), 81 deletions(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.reference b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.reference index d8bb9e310e6..e69de29bb2d 100644 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.reference +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.reference @@ -1,4 +0,0 @@ -275 0 138 136 0 -275 0 -275 0 138 136 0 -275 0 diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index 2ab7f883367..f6850864be5 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-ordinary-database, no-debug +# Tags: long, no-ordinary-database # Test is too heavy, avoid parallel run in Flaky Check # shellcheck disable=SC2119 @@ -7,82 +7,125 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -set -e +set -ue $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS src"; $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS dst"; $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS mv"; $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS tmp"; -$CLICKHOUSE_CLIENT --query "CREATE TABLE src (n Int8, m Int8, CONSTRAINT c CHECK xxHash32(n+m) % 8 != 0) ENGINE=MergeTree ORDER BY n PARTITION BY 0 < n SETTINGS old_parts_lifetime=0"; -$CLICKHOUSE_CLIENT --query "CREATE TABLE dst (nm Int16, CONSTRAINT c CHECK xxHash32(nm) % 8 != 0) ENGINE=MergeTree ORDER BY nm SETTINGS old_parts_lifetime=0"; -$CLICKHOUSE_CLIENT --query "CREATE MATERIALIZED VIEW mv TO dst (nm Int16) AS SELECT n*m AS nm FROM src"; -$CLICKHOUSE_CLIENT --query "CREATE TABLE tmp (x UInt8, nm Int16) ENGINE=MergeTree ORDER BY (x, nm) SETTINGS old_parts_lifetime=0" +$CLICKHOUSE_CLIENT --query "CREATE TABLE src (n Int32, m Int32, CONSTRAINT c CHECK xxHash32(n+m) % 8 != 0) ENGINE=MergeTree ORDER BY n PARTITION BY 0 < n SETTINGS old_parts_lifetime=0"; +$CLICKHOUSE_CLIENT --query "CREATE TABLE dst (nm Int32, CONSTRAINT c CHECK xxHash32(nm) % 8 != 0) ENGINE=MergeTree ORDER BY nm SETTINGS old_parts_lifetime=0"; +$CLICKHOUSE_CLIENT --query "CREATE MATERIALIZED VIEW mv TO dst (nm Int32) AS SELECT n*m AS nm FROM src"; + +$CLICKHOUSE_CLIENT --query "CREATE TABLE tmp (x UInt32, nm Int32) ENGINE=MergeTree ORDER BY (x, nm) SETTINGS old_parts_lifetime=0" $CLICKHOUSE_CLIENT --query "INSERT INTO src VALUES (0, 0)" -# some transactions will fail due to constraint -function thread_insert_commit() +function get_now() { - set -e - for i in {1..100}; do - $CLICKHOUSE_CLIENT --multiquery --query " - BEGIN TRANSACTION; - INSERT INTO src VALUES /* ($i, $1) */ ($i, $1); - SELECT throwIf((SELECT sum(nm) FROM mv) != $(($i * $1))) FORMAT Null; - INSERT INTO src VALUES /* (-$i, $1) */ (-$i, $1); - COMMIT;" 2>&1| grep -Fv "is violated at row" | grep -Fv "Transaction is not in RUNNING state" | grep -F "Received from " ||: - done + date +%s } -function thread_insert_rollback() +is_pid_exist() +{ + local pid=$1 + ps -p $pid > /dev/null +} + +function run_until_deadline_and_at_least_times() { set -e - for _ in {1..100}; do - $CLICKHOUSE_CLIENT --multiquery --query " - BEGIN TRANSACTION; - INSERT INTO src VALUES /* (42, $1) */ (42, $1); - SELECT throwIf((SELECT count() FROM src WHERE n=42 AND m=$1) != 1) FORMAT Null; - ROLLBACK;" + + local deadline=$1; shift + local min_iterations=$1; shift + local function_to_run=$1; shift + + local started_time=$(get_now) + local i=0 + + while true + do + $function_to_run $i $@ + + [[ $(get_now) -lt $deadline ]] || break + + i=$(($i + 1)) done + + [[ $i -gt $min_iterations ]] || echo "$i/$min_iterations : not enough iterations of $function_to_run has been made from $started_time until $deadline" >&2 +} + +function insert_commit_action() +{ + set -e + + local i=$1; shift + local tag=$1; shift + + # some transactions will fail due to constraint + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + INSERT INTO src VALUES /* ($i, $tag) */ ($i, $tag); + SELECT throwIf((SELECT sum(nm) FROM mv) != $(($i * $tag))) /* ($i, $tag) */ FORMAT Null; + INSERT INTO src VALUES /* (-$i, $tag) */ (-$i, $tag); + COMMIT; + " 2>&1 \ + | grep -Fv "is violated at row" | grep -Fv "Transaction is not in RUNNING state" | grep -F "Received from " ||: +} + + +function insert_rollback_action() +{ + set -e + + local i=$1; shift + local tag=$1; shift + + $CLICKHOUSE_CLIENT --multiquery --query " + BEGIN TRANSACTION; + INSERT INTO src VALUES /* (42, $tag) */ (42, $tag); + SELECT throwIf((SELECT count() FROM src WHERE n=42 AND m=$tag) != 1) FORMAT Null; + ROLLBACK;" } # make merges more aggressive -function thread_optimize() +function optimize_action() { set -e - while true; do - optimize_query="OPTIMIZE TABLE src" - partition_id=$(( RANDOM % 2 )) - if (( RANDOM % 2 )); then - optimize_query="OPTIMIZE TABLE dst" - partition_id="all" - fi - if (( RANDOM % 2 )); then - optimize_query="$optimize_query PARTITION ID '$partition_id'" - fi - if (( RANDOM % 2 )); then - optimize_query="$optimize_query FINAL" - fi - action="COMMIT" - if (( RANDOM % 4 )); then - action="ROLLBACK" - fi - $CLICKHOUSE_CLIENT --multiquery --query " + optimize_query="OPTIMIZE TABLE src" + partition_id=$(( RANDOM % 2 )) + if (( RANDOM % 2 )); then + optimize_query="OPTIMIZE TABLE dst" + partition_id="all" + fi + if (( RANDOM % 2 )); then + optimize_query="$optimize_query PARTITION ID '$partition_id'" + fi + if (( RANDOM % 2 )); then + optimize_query="$optimize_query FINAL" + fi + action="COMMIT" + if (( RANDOM % 4 )); then + action="ROLLBACK" + fi + + $CLICKHOUSE_CLIENT --multiquery --query " BEGIN TRANSACTION; - $optimize_query; + $optimize_query; $action; - " 2>&1| grep -Fv "already exists, but it will be deleted soon" | grep -F "Received from " ||: - sleep 0.$RANDOM; - done + " 2>&1 \ + | grep -Fv "already exists, but it will be deleted soon" | grep -F "Received from " ||: + + sleep 0.$RANDOM; } -function thread_select() +function select_action() { set -e - while true; do - $CLICKHOUSE_CLIENT --multiquery --query " + + $CLICKHOUSE_CLIENT --multiquery --query " BEGIN TRANSACTION; SELECT throwIf((SELECT (sum(n), count() % 2) FROM src) != (0, 1)) FORMAT Null; SELECT throwIf((SELECT (sum(nm), count() % 2) FROM mv) != (0, 1)) FORMAT Null; @@ -90,14 +133,13 @@ function thread_select() SELECT throwIf((SELECT arraySort(groupArray(nm)) FROM mv) != (SELECT arraySort(groupArray(nm)) FROM dst)) FORMAT Null; SELECT throwIf((SELECT arraySort(groupArray(nm)) FROM mv) != (SELECT arraySort(groupArray(n*m)) FROM src)) FORMAT Null; COMMIT;" - done } -function thread_select_insert() +function select_insert_action() { set -e - while true; do - $CLICKHOUSE_CLIENT --multiquery --query " + + $CLICKHOUSE_CLIENT --multiquery --query " BEGIN TRANSACTION; SELECT throwIf((SELECT count() FROM tmp) != 0) FORMAT Null; INSERT INTO tmp SELECT 1, n*m FROM src; @@ -110,36 +152,69 @@ function thread_select_insert() SELECT throwIf(1 != (SELECT countDistinct(arr) FROM (SELECT x, arraySort(groupArray(nm)) AS arr FROM tmp WHERE x!=4 GROUP BY x))) FORMAT Null; SELECT throwIf((SELECT count(), sum(nm) FROM tmp WHERE x=4) != (SELECT count(), sum(nm) FROM tmp WHERE x!=4)) FORMAT Null; ROLLBACK;" - done } -thread_insert_commit 1 & PID_1=$! -thread_insert_commit 2 & PID_2=$! -thread_insert_rollback 3 & PID_3=$! +MAIN_TIME_PART=400 +SECOND_TIME_PART=30 +WAIT_FINISH=60 +LAST_TIME_GAP=10 -thread_optimize & PID_4=$! -thread_select & PID_5=$! -thread_select_insert & PID_6=$! -sleep 0.$RANDOM; -thread_select & PID_7=$! -thread_select_insert & PID_8=$! +if [[ $((MAIN_TIME_PART + SECOND_TIME_PART + WAIT_FINISH + LAST_TIME_GAP)) -ge 600 ]]; then + echo "time sttings are wrong" 2>&1 + exit 1 +fi -wait $PID_1 && wait $PID_2 && wait $PID_3 -kill -TERM $PID_4 -kill -TERM $PID_5 -kill -TERM $PID_6 -kill -TERM $PID_7 -kill -TERM $PID_8 -wait -wait_for_queries_to_finish 40 +START_TIME=$(get_now) +STOP_TIME=$((START_TIME + MAIN_TIME_PART)) +SECOND_STOP_TIME=$((STOP_TIME + SECOND_TIME_PART)) +MIN_ITERATIONS=50 + +run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 1 & PID_1=$! +run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 2 & PID_2=$! +run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_rollback_action 3 & PID_3=$! + +run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS optimize_action & PID_4=$! +run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS select_action & PID_5=$! +run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS select_insert_action & PID_6=$! +sleep 0.$RANDOM +run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS select_action & PID_7=$! +run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS select_insert_action & PID_8=$! + +wait $PID_1 || echo "insert_commit_action has failed with status $?" 2>&1 +wait $PID_2 || echo "second insert_commit_action has failed with status $?" 2>&1 +wait $PID_3 || echo "insert_rollback_action has failed with status $?" 2>&1 + +is_pid_exist $PID_4 || echo "optimize_action is not running" 2>&1 +is_pid_exist $PID_5 || echo "select_action is not running" 2>&1 +is_pid_exist $PID_6 || echo "select_insert_action is not running" 2>&1 +is_pid_exist $PID_7 || echo "second select_action is not running" 2>&1 +is_pid_exist $PID_8 || echo "second select_insert_action is not running" 2>&1 + +wait $PID_4 || echo "optimize_action has failed with status $?" 2>&1 +wait $PID_5 || echo "select_action has failed with status $?" 2>&1 +wait $PID_6 || echo "select_insert_action has failed with status $?" 2>&1 +wait $PID_7 || echo "second select_action has failed with status $?" 2>&1 +wait $PID_8 || echo "second select_insert_action has failed with status $?" 2>&1 + +wait_for_queries_to_finish $WAIT_FINISH $CLICKHOUSE_CLIENT --multiquery --query " -BEGIN TRANSACTION; -SELECT count(), sum(n), sum(m=1), sum(m=2), sum(m=3) FROM src; -SELECT count(), sum(nm) FROM mv"; + BEGIN TRANSACTION; + SELECT throwIf((SELECT (sum(n), count() % 2) FROM src) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT (sum(nm), count() % 2) FROM mv) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT (sum(nm), count() % 2) FROM dst) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT arraySort(groupArray(nm)) FROM mv) != (SELECT arraySort(groupArray(nm)) FROM dst)) FORMAT Null; + SELECT throwIf((SELECT arraySort(groupArray(nm)) FROM mv) != (SELECT arraySort(groupArray(n*m)) FROM src)) FORMAT Null; + COMMIT; +" -$CLICKHOUSE_CLIENT --query "SELECT count(), sum(n), sum(m=1), sum(m=2), sum(m=3) FROM src" -$CLICKHOUSE_CLIENT --query "SELECT count(), sum(nm) FROM mv" +$CLICKHOUSE_CLIENT --multiquery --query " + SELECT throwIf((SELECT (sum(n), count() % 2) FROM src) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT (sum(nm), count() % 2) FROM mv) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT (sum(nm), count() % 2) FROM dst) != (0, 1)) FORMAT Null; + SELECT throwIf((SELECT arraySort(groupArray(nm)) FROM mv) != (SELECT arraySort(groupArray(nm)) FROM dst)) FORMAT Null; + SELECT throwIf((SELECT arraySort(groupArray(nm)) FROM mv) != (SELECT arraySort(groupArray(n*m)) FROM src)) FORMAT Null; +" $CLICKHOUSE_CLIENT --query "DROP TABLE src"; $CLICKHOUSE_CLIENT --query "DROP TABLE dst"; From 62d956f8a882f74a930340cce1650babc13bf7a1 Mon Sep 17 00:00:00 2001 From: zoomxi <419486879@qq.com> Date: Wed, 24 Jul 2024 22:39:30 +0800 Subject: [PATCH 0827/2361] remove unused file ParallelReplicasReadingCoordinator.h --- src/Interpreters/ClusterProxy/executeQuery.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 59f095f7487..d04a73e384e 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include From 4ab4b6729224806e7d34f1bee91a21dcd1818e8f Mon Sep 17 00:00:00 2001 From: Justin de Guzman Date: Wed, 24 Jul 2024 07:59:34 -0700 Subject: [PATCH 0828/2361] Update link for JSON schema inference --- docs/en/getting-started/example-datasets/nypd_complaint_data.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/getting-started/example-datasets/nypd_complaint_data.md b/docs/en/getting-started/example-datasets/nypd_complaint_data.md index a178fe456a6..1caa1ab6a9a 100644 --- a/docs/en/getting-started/example-datasets/nypd_complaint_data.md +++ b/docs/en/getting-started/example-datasets/nypd_complaint_data.md @@ -55,7 +55,7 @@ CMPLNT_FR_TM Nullable(String) ``` :::tip -Most of the time the above command will let you know which fields in the input data are numeric, and which are strings, and which are tuples. This is not always the case. Because ClickHouse is routineley used with datasets containing billions of records there is a default number (100) of rows examined to [infer the schema](/docs/en/integrations/data-ingestion/data-formats/json.md#relying-on-schema-inference) in order to avoid parsing billions of rows to infer the schema. The response below may not match what you see, as the dataset is updated several times each year. Looking at the Data Dictionary you can see that CMPLNT_NUM is specified as text, and not numeric. By overriding the default of 100 rows for inference with the setting `SETTINGS input_format_max_rows_to_read_for_schema_inference=2000` +Most of the time the above command will let you know which fields in the input data are numeric, and which are strings, and which are tuples. This is not always the case. Because ClickHouse is routineley used with datasets containing billions of records there is a default number (100) of rows examined to [infer the schema](/en/integrations/data-formats/json/inference) in order to avoid parsing billions of rows to infer the schema. The response below may not match what you see, as the dataset is updated several times each year. Looking at the Data Dictionary you can see that CMPLNT_NUM is specified as text, and not numeric. By overriding the default of 100 rows for inference with the setting `SETTINGS input_format_max_rows_to_read_for_schema_inference=2000` you can get a better idea of the content. Note: as of version 22.5 the default is now 25,000 rows for inferring the schema, so only change the setting if you are on an older version or if you need more than 25,000 rows to be sampled. From f03d4bb7d5d40203bba68c4f8958d584f27ae881 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Wed, 24 Jul 2024 15:07:53 +0000 Subject: [PATCH 0829/2361] Format with black --- .../integration/test_storage_s3_queue/test.py | 24 ++++++++++++------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index e178b3b6608..4348857acd3 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -823,7 +823,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): def test_max_set_age(started_cluster): node = started_cluster.instances["instance"] - table_name = f"max_set_age_{uuid4().hex}" + table_name = "max_set_age" dst_table_name = f"{table_name}_dst" keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" @@ -848,9 +848,7 @@ def test_max_set_age(started_cluster): ) create_mv(node, table_name, dst_table_name) - _ = generate_random_files( - started_cluster, files_path, files_to_generate, row_num=1 - ) + _ = generate_random_files(started_cluster, files_path, files_to_generate, row_num=1) expected_rows = files_to_generate @@ -869,13 +867,17 @@ def test_max_set_age(started_cluster): assert False wait_for_condition(lambda: get_count() == expected_rows) - assert files_to_generate == int(node.query(f"SELECT uniq(_path) from {dst_table_name}")) + assert files_to_generate == int( + node.query(f"SELECT uniq(_path) from {dst_table_name}") + ) time.sleep(max_age + 5) expected_rows *= 2 wait_for_condition(lambda: get_count() == expected_rows) - assert files_to_generate == int(node.query(f"SELECT uniq(_path) from {dst_table_name}")) + assert files_to_generate == int( + node.query(f"SELECT uniq(_path) from {dst_table_name}") + ) paths_count = [ int(x) @@ -888,9 +890,11 @@ def test_max_set_age(started_cluster): assert 2 == path_count def get_object_storage_failures(): - return int(node.query( - "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" - )) + return int( + node.query( + "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" + ) + ) failed_count = get_object_storage_failures() @@ -900,6 +904,8 @@ def test_max_set_age(started_cluster): values_csv = ( "\n".join((",".join(map(str, row)) for row in values)) + "\n" ).encode() + + # use a different filename for each test to allow running a bunch of them sequentially with --count file_with_error = f"fff_{uuid4().hex}.csv" put_s3_file_content(started_cluster, f"{files_path}/{file_with_error}", values_csv) From f990f235da10b72e1625d007563761da52067753 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 24 Jul 2024 17:20:19 +0200 Subject: [PATCH 0830/2361] Increase timeout for test_broken_part_during_merge --- tests/integration/test_broken_part_during_merge/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_broken_part_during_merge/test.py b/tests/integration/test_broken_part_during_merge/test.py index 19c22201fb0..0ba7beeb1fd 100644 --- a/tests/integration/test_broken_part_during_merge/test.py +++ b/tests/integration/test_broken_part_during_merge/test.py @@ -54,7 +54,7 @@ def test_merge_and_part_corruption(started_cluster): with Pool(1) as p: def optimize_with_delay(x): - node1.query("OPTIMIZE TABLE replicated_mt FINAL", timeout=30) + node1.query("OPTIMIZE TABLE replicated_mt FINAL", timeout=120) # corrupt part after merge already assigned, but not started res_opt = p.apply_async(optimize_with_delay, (1,)) @@ -70,7 +70,7 @@ def test_merge_and_part_corruption(started_cluster): node1.query( "ALTER TABLE replicated_mt UPDATE value = 7 WHERE 1", settings={"mutations_sync": 2}, - timeout=30, + timeout=120, ) assert node1.query("SELECT sum(value) FROM replicated_mt") == "2100000\n" From 1cca1d4ba3c9f020f21e684423ace943d863bb0f Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 24 Jul 2024 17:27:18 +0200 Subject: [PATCH 0831/2361] bump From 8d4b919bf4d02c09399296e5213525f78bd68a21 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 24 Jul 2024 17:45:40 +0200 Subject: [PATCH 0832/2361] Fix detaching broken parts from backup. --- .../MergeTree/DataPartStorageOnDiskBase.cpp | 37 ++++++++++++++++--- .../MergeTree/DataPartStorageOnDiskBase.h | 3 ++ 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp b/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp index 378a1944396..df151e8478f 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp @@ -73,12 +73,7 @@ std::optional DataPartStorageOnDiskBase::getRelativePathForPrefix(Logger for (int try_no = 0; try_no < 10; ++try_no) { - if (prefix.empty()) - res = part_dir + (try_no ? "_try" + DB::toString(try_no) : ""); - else if (prefix.ends_with("_")) - res = prefix + part_dir + (try_no ? "_try" + DB::toString(try_no) : ""); - else - res = prefix + "_" + part_dir + (try_no ? "_try" + DB::toString(try_no) : ""); + res = getPartDirForPrefix(prefix, detached, try_no); if (!volume->getDisk()->exists(full_relative_path / res)) return res; @@ -101,6 +96,36 @@ std::optional DataPartStorageOnDiskBase::getRelativePathForPrefix(Logger return res; } +String DataPartStorageOnDiskBase::getPartDirForPrefix(const String & prefix, bool detached, int try_no) const +{ + /// This function joins `prefix` and the part name and an attempt number returning something like "__". + String res = prefix; + if (!prefix.empty() && !prefix.ends_with("_")) + res += "_"; + + /// During RESTORE temporary part directories are created with names like "tmp_restore_all_2_2_0-XXXXXXXX". + /// To detach such a directory we need to rename it replacing "tmp_restore_" with a specified prefix, + /// and a random suffix with an attempt number. + String part_name; + if (detached && part_dir.starts_with("tmp_restore_")) + { + part_name = part_dir.substr(strlen("tmp_restore_")); + size_t endpos = part_name.find('-'); + if (endpos != String::npos) + part_name.erase(endpos, String::npos); + } + + if (!part_name.empty()) + res += part_name; + else + res += part_dir; + + if (try_no) + res += "_try" + DB::toString(try_no); + + return res; +} + bool DataPartStorageOnDiskBase::looksLikeBrokenDetachedPartHasTheSameContent(const String & detached_part_path, std::optional & original_checksums_content, std::optional & original_files_list) const diff --git a/src/Storages/MergeTree/DataPartStorageOnDiskBase.h b/src/Storages/MergeTree/DataPartStorageOnDiskBase.h index 81353d4e20b..1707efc2d4d 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDiskBase.h +++ b/src/Storages/MergeTree/DataPartStorageOnDiskBase.h @@ -148,6 +148,9 @@ private: /// Actual file name may be the same as expected /// or be the name of the file with packed data. virtual NameSet getActualFileNamesOnDisk(const NameSet & file_names) const = 0; + + /// Returns the destination path for the part directory while copying a detached part. + String getPartDirForPrefix(const String & prefix, bool detached, int try_no) const; }; } From c3620391b0befaf30eea0eab9001cc98fd5eeecc Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 24 Jul 2024 17:48:07 +0200 Subject: [PATCH 0833/2361] fix style --- .../0_stateless/01171_mv_select_insert_isolation_long.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index f6850864be5..718017bca3d 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -41,12 +41,13 @@ function run_until_deadline_and_at_least_times() local min_iterations=$1; shift local function_to_run=$1; shift - local started_time=$(get_now) + local started_time + started_time=$(get_now) local i=0 while true do - $function_to_run $i $@ + $function_to_run $i "$@" [[ $(get_now) -lt $deadline ]] || break From fa48ff0ca1561f20eb5abb285f400a8faf664d0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 24 Jul 2024 15:49:17 +0000 Subject: [PATCH 0834/2361] Separate tests --- tests/integration/test_storage_s3/test.py | 52 ++++++++++------------- 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index d13605170ec..dae98daf0cf 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1148,31 +1148,27 @@ def test_url_reconnect_in_the_middle(started_cluster): assert result == "1000000\t3914219105369203805\n" -def test_seekable_formats(started_cluster): - bucket = started_cluster.minio_bucket + +# At the time of writing the actual read bytes are respectively 148 and 169, so -10% to not be flaky +@pytest.mark.parametrize("format_name,expected_bytes_read", [("Parquet", 133), ("ORC", 150)]) +def test_seekable_formats(started_cluster, format_name, expected_bytes_read): + expected_lines=1500000 instance = started_cluster.instances["dummy"] # type: ClickHouseInstance - table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')" + table_function = f"s3(s3_{format_name.lower()}, structure='a Int32, b String', format='{format_name}')" exec_query_with_retry( instance, - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(1000000) settings s3_truncate_on_insert=1", - timeout=100, + f"INSERT INTO TABLE FUNCTION {table_function} SELECT number, randomString(100) FROM numbers({expected_lines}) settings s3_truncate_on_insert=1", + timeout=300, ) result = instance.query(f"SELECT count() FROM {table_function}") - assert int(result) == 1000000 - - table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')" - exec_query_with_retry( - instance, - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(1500000) settings s3_truncate_on_insert=1", - timeout=100, - ) + assert int(result) == expected_lines result = instance.query( f"SELECT count() FROM {table_function} SETTINGS max_memory_usage='60M', max_download_threads=1" ) - assert int(result) == 1500000 + assert int(result) == expected_lines instance.query(f"SELECT * FROM {table_function} FORMAT Null") @@ -1183,35 +1179,31 @@ def test_seekable_formats(started_cluster): result = result.strip() assert result.endswith("MiB") result = result[: result.index(".")] - assert int(result) > 150 + assert int(result) > 140 -def test_seekable_formats_url(started_cluster): +@pytest.mark.parametrize("format_name", ["Parquet", "ORC"]) +def test_seekable_formats_url(started_cluster, format_name): bucket = started_cluster.minio_bucket + expected_lines=1500000 instance = started_cluster.instances["dummy"] # type: ClickHouseInstance - table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')" + format_name_lower=format_name.lower() + table_function = f"s3(s3_{format_name_lower}, structure='a Int32, b String', format='{format_name}')" exec_query_with_retry( instance, - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(1500000) settings s3_truncate_on_insert=1", - timeout=100, + f"INSERT INTO TABLE FUNCTION {table_function} SELECT number, randomString(100) FROM numbers({expected_lines}) settings s3_truncate_on_insert=1", + timeout=300, ) result = instance.query(f"SELECT count() FROM {table_function}") - assert int(result) == 1500000 + assert int(result) == expected_lines - table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')" - exec_query_with_retry( - instance, - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(1500000) settings s3_truncate_on_insert=1", - timeout=100, - ) - - table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_parquet', 'Parquet', 'a Int32, b String')" + url_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_{format_name_lower}', '{format_name}', 'a Int32, b String')" result = instance.query( - f"SELECT count() FROM {table_function} SETTINGS max_memory_usage='60M'" + f"SELECT count() FROM {url_function} SETTINGS max_memory_usage='60M'" ) - assert int(result) == 1500000 + assert int(result) == expected_lines def test_empty_file(started_cluster): From 68c53764f271463bf7d05639c7d31975717e8d6f Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 24 Jul 2024 17:50:42 +0200 Subject: [PATCH 0835/2361] Update tests/performance/decimal_aggregates.xml --- tests/performance/decimal_aggregates.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml index 9fc94f01a4d..b204dddea6e 100644 --- a/tests/performance/decimal_aggregates.xml +++ b/tests/performance/decimal_aggregates.xml @@ -8,6 +8,7 @@ INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(100000000) SETTINGS max_threads = 2 INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(100000000, 100000000) SETTINGS max_threads = 2 INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(200000000, 100000000) SETTINGS max_threads = 2 + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(300000000, 100000000) SETTINGS max_threads = 2 DROP TABLE IF EXISTS t From 2e25808e586d46baf52029d626d02fdcc6ff53d2 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 24 Jul 2024 18:03:48 +0200 Subject: [PATCH 0836/2361] Fix test test_backup_restore_s3. --- tests/integration/test_backup_restore_s3/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index 4840f5afc66..381268ce7fe 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -254,6 +254,7 @@ def check_system_tables(backup_query_id=None): ("disk_s3_cache", "ObjectStorage", "S3", "Local"), ("disk_s3_other_bucket", "ObjectStorage", "S3", "Local"), ("disk_s3_plain", "ObjectStorage", "S3", "Plain"), + ("disk_s3_plain_rewritable", "ObjectStorage", "S3", "PlainRewritable"), ("disk_s3_restricted_user", "ObjectStorage", "S3", "Local"), ) assert len(expected_disks) == len(disks) From ce153c9b54045959877e0b400971db352fbeb916 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 24 Jul 2024 16:05:10 +0000 Subject: [PATCH 0837/2361] Automatic style fix --- tests/integration/test_storage_s3/test.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index dae98daf0cf..ab327afe90b 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1148,11 +1148,12 @@ def test_url_reconnect_in_the_middle(started_cluster): assert result == "1000000\t3914219105369203805\n" - # At the time of writing the actual read bytes are respectively 148 and 169, so -10% to not be flaky -@pytest.mark.parametrize("format_name,expected_bytes_read", [("Parquet", 133), ("ORC", 150)]) +@pytest.mark.parametrize( + "format_name,expected_bytes_read", [("Parquet", 133), ("ORC", 150)] +) def test_seekable_formats(started_cluster, format_name, expected_bytes_read): - expected_lines=1500000 + expected_lines = 1500000 instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_function = f"s3(s3_{format_name.lower()}, structure='a Int32, b String', format='{format_name}')" @@ -1185,10 +1186,10 @@ def test_seekable_formats(started_cluster, format_name, expected_bytes_read): @pytest.mark.parametrize("format_name", ["Parquet", "ORC"]) def test_seekable_formats_url(started_cluster, format_name): bucket = started_cluster.minio_bucket - expected_lines=1500000 + expected_lines = 1500000 instance = started_cluster.instances["dummy"] # type: ClickHouseInstance - format_name_lower=format_name.lower() + format_name_lower = format_name.lower() table_function = f"s3(s3_{format_name_lower}, structure='a Int32, b String', format='{format_name}')" exec_query_with_retry( instance, From 689e31b47e1c85f1ae9721b3928de658eaf9a6ff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 24 Jul 2024 18:31:40 +0200 Subject: [PATCH 0838/2361] More tests --- ...03210_inconsistent_formatting_of_data_types.reference | 6 ++++++ .../03210_inconsistent_formatting_of_data_types.sh | 9 +++++++++ 2 files changed, 15 insertions(+) diff --git a/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference index ccb445a0573..836b526905a 100644 --- a/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference +++ b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.reference @@ -1 +1,7 @@ ALTER TABLE columns_with_multiple_streams MODIFY COLUMN `field1` Nullable(tupleElement(x, 2), UInt8) +ALTER TABLE t_update_empty_nested ADD COLUMN `nested.arr2` Array(tuple('- ON NULL -', toLowCardinality(11), 11, 11, toLowCardinality(11), 11), UInt64) +ALTER TABLE t ADD COLUMN `x` Array(tuple(1), UInt8) +ALTER TABLE enum_alter_issue MODIFY COLUMN `a` Enum8(equals('one', timeSlots(timeSlots(arrayEnumerateDense(tuple('0.2147483646', toLowCardinality(toUInt128)), NULL), 4, 12.34, materialize(73), 2)), 1)) +ALTER TABLE t_sparse_mutations_3 MODIFY COLUMN `s` Tuple(Nullable(tupleElement(s, 1), UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(String)) +Syntax error +Syntax error diff --git a/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh index 6cb2d083d71..86c7a5469ca 100755 --- a/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh +++ b/tests/queries/0_stateless/03210_inconsistent_formatting_of_data_types.sh @@ -4,4 +4,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# Ensure that these (possibly incorrect) queries can at least be parsed back after formatting. $CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE columns_with_multiple_streams MODIFY COLUMN field1 Nullable(tupleElement(x, 2), UInt8)" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE t_update_empty_nested ADD COLUMN \`nested.arr2\` Array(tuple('- ON NULL -', toLowCardinality(11), 11, 11, toLowCardinality(11), 11), UInt64)" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE t ADD COLUMN x Array((1), UInt8)" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE enum_alter_issue (MODIFY COLUMN a Enum8(equals('one', timeSlots(timeSlots(arrayEnumerateDense(tuple('0.2147483646', toLowCardinality(toUInt128(12))), NULL), 4, 12.34, materialize(73), 2)), 1)))" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE t_sparse_mutations_3 MODIFY COLUMN s Tuple(Nullable(tupleElement(s, 1), UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(String))" | $CLICKHOUSE_FORMAT --oneline + +# These invalid queries don't parse and this is normal. +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE alter_compression_codec1 MODIFY COLUMN alter_column CODEC((2 + ignore(1, toUInt128(materialize(2)), 2 + toNullable(toNullable(3))), 3), NONE)" 2>&1 | grep -o -F 'Syntax error' +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE test_table ADD COLUMN \`array\` Array(('110', 3, toLowCardinality(3), 3, toNullable(3), toLowCardinality(toNullable(3)), 3), UInt8) DEFAULT [1, 2, 3]" 2>&1 | grep -o -F 'Syntax error' From 5d88f6fc8c4b8fcce4e7a5da073f5d11a86cd3cb Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 24 Jul 2024 19:32:20 +0200 Subject: [PATCH 0839/2361] fix MIN_ITERATIONS --- .../0_stateless/01171_mv_select_insert_isolation_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index 718017bca3d..d79ab27d8b2 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -168,7 +168,7 @@ fi START_TIME=$(get_now) STOP_TIME=$((START_TIME + MAIN_TIME_PART)) SECOND_STOP_TIME=$((STOP_TIME + SECOND_TIME_PART)) -MIN_ITERATIONS=50 +MIN_ITERATIONS=30 run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 1 & PID_1=$! run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 2 & PID_2=$! From 67567fcff481793cf7828808c094221ad2ec7389 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 24 Jul 2024 19:51:22 +0200 Subject: [PATCH 0840/2361] Fix build --- programs/odbc-bridge/tests/CMakeLists.txt | 2 +- src/Common/examples/CMakeLists.txt | 38 ++++++++--------- src/Common/mysqlxx/tests/CMakeLists.txt | 2 +- src/Compression/examples/CMakeLists.txt | 2 +- src/Core/examples/CMakeLists.txt | 4 +- src/IO/examples/CMakeLists.txt | 50 +++++++++++------------ src/Interpreters/examples/CMakeLists.txt | 20 ++++----- src/Parsers/examples/CMakeLists.txt | 2 +- utils/corrector_utf8/CMakeLists.txt | 2 +- 9 files changed, 61 insertions(+), 61 deletions(-) diff --git a/programs/odbc-bridge/tests/CMakeLists.txt b/programs/odbc-bridge/tests/CMakeLists.txt index edf364ea192..f1411dbb554 100644 --- a/programs/odbc-bridge/tests/CMakeLists.txt +++ b/programs/odbc-bridge/tests/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp ../validateODBCConnectionString.cpp) -target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io) +target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io clickhouse_common_config) diff --git a/src/Common/examples/CMakeLists.txt b/src/Common/examples/CMakeLists.txt index c133e9f5617..69580d4ad0e 100644 --- a/src/Common/examples/CMakeLists.txt +++ b/src/Common/examples/CMakeLists.txt @@ -1,14 +1,14 @@ clickhouse_add_executable (hashes_test hashes_test.cpp) -target_link_libraries (hashes_test PRIVATE clickhouse_common_io ch_contrib::cityhash) +target_link_libraries (hashes_test PRIVATE clickhouse_common_io clickhouse_common_config ch_contrib::cityhash) if (TARGET OpenSSL::Crypto) target_link_libraries (hashes_test PRIVATE OpenSSL::Crypto) endif() clickhouse_add_executable (sip_hash_perf sip_hash_perf.cpp) -target_link_libraries (sip_hash_perf PRIVATE clickhouse_common_io) +target_link_libraries (sip_hash_perf PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (small_table small_table.cpp) -target_link_libraries (small_table PRIVATE clickhouse_common_io) +target_link_libraries (small_table PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (parallel_aggregation parallel_aggregation.cpp) target_link_libraries (parallel_aggregation PRIVATE dbms clickhouse_functions) @@ -17,13 +17,13 @@ clickhouse_add_executable (parallel_aggregation2 parallel_aggregation2.cpp) target_link_libraries (parallel_aggregation2 PRIVATE dbms clickhouse_functions) clickhouse_add_executable (int_hashes_perf int_hashes_perf.cpp) -target_link_libraries (int_hashes_perf PRIVATE clickhouse_common_io) +target_link_libraries (int_hashes_perf PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (compact_array compact_array.cpp) -target_link_libraries (compact_array PRIVATE clickhouse_common_io) +target_link_libraries (compact_array PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (radix_sort radix_sort.cpp) -target_link_libraries (radix_sort PRIVATE clickhouse_common_io ch_contrib::pdqsort) +target_link_libraries (radix_sort PRIVATE clickhouse_common_io clickhouse_common_config ch_contrib::pdqsort) clickhouse_add_executable (arena_with_free_lists arena_with_free_lists.cpp) target_link_libraries (arena_with_free_lists PRIVATE dbms) @@ -33,46 +33,46 @@ target_link_libraries (lru_hash_map_perf PRIVATE dbms) if (OS_LINUX) clickhouse_add_executable (thread_creation_latency thread_creation_latency.cpp) - target_link_libraries (thread_creation_latency PRIVATE clickhouse_common_io) + target_link_libraries (thread_creation_latency PRIVATE clickhouse_common_io clickhouse_common_config) endif() clickhouse_add_executable (array_cache array_cache.cpp) -target_link_libraries (array_cache PRIVATE clickhouse_common_io) +target_link_libraries (array_cache PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (space_saving space_saving.cpp) -target_link_libraries (space_saving PRIVATE clickhouse_common_io) +target_link_libraries (space_saving PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (integer_hash_tables_benchmark integer_hash_tables_benchmark.cpp) target_link_libraries (integer_hash_tables_benchmark PRIVATE dbms ch_contrib::abseil_swiss_tables ch_contrib::sparsehash) clickhouse_add_executable (cow_columns cow_columns.cpp) -target_link_libraries (cow_columns PRIVATE clickhouse_common_io) +target_link_libraries (cow_columns PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (cow_compositions cow_compositions.cpp) -target_link_libraries (cow_compositions PRIVATE clickhouse_common_io) +target_link_libraries (cow_compositions PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (stopwatch stopwatch.cpp) -target_link_libraries (stopwatch PRIVATE clickhouse_common_io) +target_link_libraries (stopwatch PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (symbol_index symbol_index.cpp) -target_link_libraries (symbol_index PRIVATE clickhouse_common_io) +target_link_libraries (symbol_index PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (chaos_sanitizer chaos_sanitizer.cpp) -target_link_libraries (chaos_sanitizer PRIVATE clickhouse_common_io) +target_link_libraries (chaos_sanitizer PRIVATE clickhouse_common_io clickhouse_common_config) if (OS_LINUX) clickhouse_add_executable (memory_statistics_os_perf memory_statistics_os_perf.cpp) - target_link_libraries (memory_statistics_os_perf PRIVATE clickhouse_common_io) + target_link_libraries (memory_statistics_os_perf PRIVATE clickhouse_common_io clickhouse_common_config) endif() clickhouse_add_executable (procfs_metrics_provider_perf procfs_metrics_provider_perf.cpp) -target_link_libraries (procfs_metrics_provider_perf PRIVATE clickhouse_common_io) +target_link_libraries (procfs_metrics_provider_perf PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (average average.cpp) -target_link_libraries (average PRIVATE clickhouse_common_io) +target_link_libraries (average PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (shell_command_inout shell_command_inout.cpp) -target_link_libraries (shell_command_inout PRIVATE clickhouse_common_io) +target_link_libraries (shell_command_inout PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (executable_udf executable_udf.cpp) target_link_libraries (executable_udf PRIVATE dbms) @@ -91,4 +91,4 @@ if (ENABLE_SSL) endif() clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp) -target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io) +target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io clickhouse_common_config) diff --git a/src/Common/mysqlxx/tests/CMakeLists.txt b/src/Common/mysqlxx/tests/CMakeLists.txt index c560d0e3153..f62908ddcaf 100644 --- a/src/Common/mysqlxx/tests/CMakeLists.txt +++ b/src/Common/mysqlxx/tests/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp) -target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx) +target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx clickhouse_common_config) diff --git a/src/Compression/examples/CMakeLists.txt b/src/Compression/examples/CMakeLists.txt index a7cc6bebf42..fee8cf89942 100644 --- a/src/Compression/examples/CMakeLists.txt +++ b/src/Compression/examples/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (compressed_buffer compressed_buffer.cpp) -target_link_libraries (compressed_buffer PRIVATE clickhouse_common_io clickhouse_compression) +target_link_libraries (compressed_buffer PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression) diff --git a/src/Core/examples/CMakeLists.txt b/src/Core/examples/CMakeLists.txt index f30ee25491f..97e5e8e67e6 100644 --- a/src/Core/examples/CMakeLists.txt +++ b/src/Core/examples/CMakeLists.txt @@ -1,8 +1,8 @@ clickhouse_add_executable (string_pool string_pool.cpp) -target_link_libraries (string_pool PRIVATE clickhouse_common_io ch_contrib::sparsehash) +target_link_libraries (string_pool PRIVATE clickhouse_common_io clickhouse_common_config ch_contrib::sparsehash) clickhouse_add_executable (field field.cpp) target_link_libraries (field PRIVATE dbms) clickhouse_add_executable (string_ref_hash string_ref_hash.cpp) -target_link_libraries (string_ref_hash PRIVATE clickhouse_common_io) +target_link_libraries (string_ref_hash PRIVATE clickhouse_common_io clickhouse_common_config) diff --git a/src/IO/examples/CMakeLists.txt b/src/IO/examples/CMakeLists.txt index fc9d9c7dcd1..bfd171a3d00 100644 --- a/src/IO/examples/CMakeLists.txt +++ b/src/IO/examples/CMakeLists.txt @@ -1,77 +1,77 @@ clickhouse_add_executable (read_buffer read_buffer.cpp) -target_link_libraries (read_buffer PRIVATE clickhouse_common_io) +target_link_libraries (read_buffer PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (read_buffer_perf read_buffer_perf.cpp) -target_link_libraries (read_buffer_perf PRIVATE clickhouse_common_io) +target_link_libraries (read_buffer_perf PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (read_float_perf read_float_perf.cpp) -target_link_libraries (read_float_perf PRIVATE clickhouse_common_io) +target_link_libraries (read_float_perf PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (write_buffer write_buffer.cpp) -target_link_libraries (write_buffer PRIVATE clickhouse_common_io) +target_link_libraries (write_buffer PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (write_buffer_perf write_buffer_perf.cpp) -target_link_libraries (write_buffer_perf PRIVATE clickhouse_common_io) +target_link_libraries (write_buffer_perf PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (valid_utf8_perf valid_utf8_perf.cpp) -target_link_libraries (valid_utf8_perf PRIVATE clickhouse_common_io) +target_link_libraries (valid_utf8_perf PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (valid_utf8 valid_utf8.cpp) -target_link_libraries (valid_utf8 PRIVATE clickhouse_common_io) +target_link_libraries (valid_utf8 PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (var_uint var_uint.cpp) -target_link_libraries (var_uint PRIVATE clickhouse_common_io) +target_link_libraries (var_uint PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (read_escaped_string read_escaped_string.cpp) -target_link_libraries (read_escaped_string PRIVATE clickhouse_common_io) +target_link_libraries (read_escaped_string PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (parse_int_perf parse_int_perf.cpp) -target_link_libraries (parse_int_perf PRIVATE clickhouse_common_io) +target_link_libraries (parse_int_perf PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (parse_int_perf2 parse_int_perf2.cpp) -target_link_libraries (parse_int_perf2 PRIVATE clickhouse_common_io) +target_link_libraries (parse_int_perf2 PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (read_write_int read_write_int.cpp) -target_link_libraries (read_write_int PRIVATE clickhouse_common_io) +target_link_libraries (read_write_int PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (o_direct_and_dirty_pages o_direct_and_dirty_pages.cpp) -target_link_libraries (o_direct_and_dirty_pages PRIVATE clickhouse_common_io) +target_link_libraries (o_direct_and_dirty_pages PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (io_operators io_operators.cpp) -target_link_libraries (io_operators PRIVATE clickhouse_common_io) +target_link_libraries (io_operators PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (write_int write_int.cpp) -target_link_libraries (write_int PRIVATE clickhouse_common_io) +target_link_libraries (write_int PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (zlib_buffers zlib_buffers.cpp) -target_link_libraries (zlib_buffers PRIVATE clickhouse_common_io) +target_link_libraries (zlib_buffers PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (lzma_buffers lzma_buffers.cpp) -target_link_libraries (lzma_buffers PRIVATE clickhouse_common_io) +target_link_libraries (lzma_buffers PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (limit_read_buffer limit_read_buffer.cpp) -target_link_libraries (limit_read_buffer PRIVATE clickhouse_common_io) +target_link_libraries (limit_read_buffer PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (limit_read_buffer2 limit_read_buffer2.cpp) -target_link_libraries (limit_read_buffer2 PRIVATE clickhouse_common_io) +target_link_libraries (limit_read_buffer2 PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (parse_date_time_best_effort parse_date_time_best_effort.cpp) -target_link_libraries (parse_date_time_best_effort PRIVATE clickhouse_common_io) +target_link_libraries (parse_date_time_best_effort PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (zlib_ng_bug zlib_ng_bug.cpp) -target_link_libraries (zlib_ng_bug PRIVATE ch_contrib::zlib clickhouse_common_io) +target_link_libraries (zlib_ng_bug PRIVATE ch_contrib::zlib clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (dragonbox_test dragonbox_test.cpp) -target_link_libraries (dragonbox_test PRIVATE ch_contrib::dragonbox_to_chars clickhouse_common_io) +target_link_libraries (dragonbox_test PRIVATE ch_contrib::dragonbox_to_chars clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (zstd_buffers zstd_buffers.cpp) -target_link_libraries (zstd_buffers PRIVATE clickhouse_common_io) +target_link_libraries (zstd_buffers PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (snappy_read_buffer snappy_read_buffer.cpp) -target_link_libraries (snappy_read_buffer PRIVATE clickhouse_common_io) +target_link_libraries (snappy_read_buffer PRIVATE clickhouse_common_io clickhouse_common_config) clickhouse_add_executable (hadoop_snappy_read_buffer hadoop_snappy_read_buffer.cpp) -target_link_libraries (hadoop_snappy_read_buffer PRIVATE clickhouse_common_io) +target_link_libraries (hadoop_snappy_read_buffer PRIVATE clickhouse_common_io clickhouse_common_config) if (TARGET ch_contrib::hdfs) clickhouse_add_executable (read_buffer_from_hdfs read_buffer_from_hdfs.cpp) diff --git a/src/Interpreters/examples/CMakeLists.txt b/src/Interpreters/examples/CMakeLists.txt index 8bb7f9eeb98..4b1ec970b26 100644 --- a/src/Interpreters/examples/CMakeLists.txt +++ b/src/Interpreters/examples/CMakeLists.txt @@ -2,34 +2,34 @@ clickhouse_add_executable (hash_map hash_map.cpp) target_link_libraries (hash_map PRIVATE dbms clickhouse_functions ch_contrib::sparsehash) clickhouse_add_executable (hash_map_lookup hash_map_lookup.cpp) -target_link_libraries (hash_map_lookup PRIVATE clickhouse_common_io clickhouse_compression) +target_link_libraries (hash_map_lookup PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression) clickhouse_add_executable (hash_map3 hash_map3.cpp) -target_link_libraries (hash_map3 PRIVATE clickhouse_common_io clickhouse_compression ch_contrib::farmhash ch_contrib::metrohash) +target_link_libraries (hash_map3 PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression ch_contrib::farmhash ch_contrib::metrohash) clickhouse_add_executable (hash_map_string hash_map_string.cpp) -target_link_libraries (hash_map_string PRIVATE clickhouse_common_io clickhouse_compression ch_contrib::sparsehash) +target_link_libraries (hash_map_string PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression ch_contrib::sparsehash) clickhouse_add_executable (hash_map_string_2 hash_map_string_2.cpp) -target_link_libraries (hash_map_string_2 PRIVATE clickhouse_common_io clickhouse_compression) +target_link_libraries (hash_map_string_2 PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression) clickhouse_add_executable (hash_map_string_3 hash_map_string_3.cpp) -target_link_libraries (hash_map_string_3 PRIVATE clickhouse_common_io clickhouse_compression ch_contrib::farmhash ch_contrib::metrohash) +target_link_libraries (hash_map_string_3 PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression ch_contrib::farmhash ch_contrib::metrohash) clickhouse_add_executable (hash_map_string_small hash_map_string_small.cpp) -target_link_libraries (hash_map_string_small PRIVATE clickhouse_common_io clickhouse_compression ch_contrib::sparsehash) +target_link_libraries (hash_map_string_small PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression ch_contrib::sparsehash) clickhouse_add_executable (string_hash_map string_hash_map.cpp) -target_link_libraries (string_hash_map PRIVATE clickhouse_common_io clickhouse_compression ch_contrib::sparsehash) +target_link_libraries (string_hash_map PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression ch_contrib::sparsehash) clickhouse_add_executable (string_hash_map_aggregation string_hash_map.cpp) -target_link_libraries (string_hash_map_aggregation PRIVATE clickhouse_common_io clickhouse_compression) +target_link_libraries (string_hash_map_aggregation PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression) clickhouse_add_executable (string_hash_set string_hash_set.cpp) -target_link_libraries (string_hash_set PRIVATE clickhouse_common_io clickhouse_compression) +target_link_libraries (string_hash_set PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression) clickhouse_add_executable (two_level_hash_map two_level_hash_map.cpp) -target_link_libraries (two_level_hash_map PRIVATE clickhouse_common_io clickhouse_compression ch_contrib::sparsehash) +target_link_libraries (two_level_hash_map PRIVATE clickhouse_common_io clickhouse_common_config clickhouse_compression ch_contrib::sparsehash) clickhouse_add_executable (jit_example jit_example.cpp) target_link_libraries (jit_example PRIVATE dbms) diff --git a/src/Parsers/examples/CMakeLists.txt b/src/Parsers/examples/CMakeLists.txt index 261f234081c..07f60601acd 100644 --- a/src/Parsers/examples/CMakeLists.txt +++ b/src/Parsers/examples/CMakeLists.txt @@ -1,7 +1,7 @@ set(SRCS) clickhouse_add_executable(lexer lexer.cpp ${SRCS}) -target_link_libraries(lexer PRIVATE clickhouse_parsers) +target_link_libraries(lexer PRIVATE clickhouse_parsers clickhouse_common_config) clickhouse_add_executable(select_parser select_parser.cpp ${SRCS} "../../Server/ServerType.cpp") target_link_libraries(select_parser PRIVATE dbms) diff --git a/utils/corrector_utf8/CMakeLists.txt b/utils/corrector_utf8/CMakeLists.txt index 4744dd5e0a5..17f047a8cff 100644 --- a/utils/corrector_utf8/CMakeLists.txt +++ b/utils/corrector_utf8/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(corrector_utf8 corrector_utf8.cpp) -target_link_libraries(corrector_utf8 PRIVATE clickhouse_common_io) +target_link_libraries(corrector_utf8 PRIVATE clickhouse_common_io clickhouse_common_config) From 9de43325e4771f54e29ccc43722c5f8c104f129a Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 24 Jul 2024 19:51:34 +0200 Subject: [PATCH 0841/2361] CI: Jepsen Workflow with CI buddy --- .github/workflows/jepsen.yml | 69 +++++++++++++++++++++++++++++------- tests/ci/ci.py | 40 +++++++++++++++------ tests/ci/ci_config.py | 26 ++++++++++++-- tests/ci/ci_definitions.py | 8 +++++ tests/ci/ci_settings.py | 7 ---- tests/ci/jepsen_check.py | 48 ++++++------------------- tests/ci/pr_info.py | 4 +++ tests/ci/test_ci_config.py | 26 ++++++++++++++ 8 files changed, 159 insertions(+), 69 deletions(-) diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index db837ac1ec7..035ba2e5b98 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -9,19 +9,64 @@ on: # yamllint disable-line rule:truthy - cron: '0 */6 * * *' workflow_dispatch: jobs: + RunConfig: + runs-on: [self-hosted, style-checker-aarch64] + outputs: + data: ${{ steps.runconfig.outputs.CI_DATA }} + steps: + - name: DebugInfo + uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6 + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true # to ensure correct digests + fetch-depth: 0 # to get version + filter: tree:0 + - name: PrepareRunConfig + id: runconfig + run: | + echo "::group::configure CI run" + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow "$GITHUB_WORKFLOW" --outfile ${{ runner.temp }}/ci_run_data.json + echo "::endgroup::" + + echo "::group::CI run configure results" + python3 -m json.tool ${{ runner.temp }}/ci_run_data.json + echo "::endgroup::" + { + echo 'CI_DATA<> "$GITHUB_OUTPUT" KeeperJepsenRelease: - uses: ./.github/workflows/reusable_simple_job.yml + needs: [RunConfig] + uses: ./.github/workflows/reusable_test.yml with: - test_name: Jepsen keeper check - runner_type: style-checker - report_required: true + test_name: ClickHouse Keeper Jepsen + runner_type: style-checker-aarch64 + data: ${{ needs.RunConfig.outputs.data }} run_command: | python3 jepsen_check.py keeper - # ServerJepsenRelease: - # uses: ./.github/workflows/reusable_simple_job.yml - # with: - # test_name: Jepsen server check - # runner_type: style-checker - # run_command: | - # cd "$REPO_COPY/tests/ci" - # python3 jepsen_check.py server + ServerJepsenRelease: + if: false # skip for server + needs: [RunConfig] + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse Server Jepsen + runner_type: style-checker-aarch64 + data: ${{ needs.RunConfig.outputs.data }} + run_command: | + python3 jepsen_check.py server + CheckWorkflow: + if: ${{ !cancelled() }} + needs: [RunConfig, ServerJepsenRelease, KeeperJepsenRelease] + runs-on: [self-hosted, style-checker-aarch64] + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + - name: Check Workflow results + run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 171819e2632..e30062c32ff 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -95,6 +95,12 @@ def parse_args(parser: argparse.ArgumentParser) -> argparse.Namespace: action="store_true", help="Action that configures ci run. Calculates digests, checks job to be executed, generates json output", ) + parser.add_argument( + "--workflow", + default="", + type=str, + help="Workflow Name, to be provided with --configure for workflow-specific CI runs", + ) parser.add_argument( "--update-gh-statuses", action="store_true", @@ -287,7 +293,10 @@ def _pre_action(s3, job_name, batch, indata, pr_info): # for release/master branches reports must be from the same branch report_prefix = "" if pr_info.is_master or pr_info.is_release: - report_prefix = normalize_string(pr_info.head_ref) + # do not set report prefix for scheduled or dispatched wf (in case it started from feature branch while + # testing), otherwise reports won't be found + if not (pr_info.is_scheduled or pr_info.is_dispatched): + report_prefix = normalize_string(pr_info.head_ref) print( f"Use report prefix [{report_prefix}], pr_num [{pr_info.number}], head_ref [{pr_info.head_ref}]" ) @@ -520,6 +529,7 @@ def _configure_jobs( pr_info: PRInfo, ci_settings: CiSettings, skip_jobs: bool, + workflow_name: str = "", dry_run: bool = False, ) -> CiCache: """ @@ -537,18 +547,27 @@ def _configure_jobs( is_docs_only=pr_info.has_changes_in_documentation_only(), is_master=pr_info.is_master, is_pr=pr_info.is_pr, + workflow_name=workflow_name, ) else: job_configs = {} - # filter jobs in accordance with ci settings - job_configs = ci_settings.apply( - job_configs, - pr_info.is_release, - is_pr=pr_info.is_pr, - is_mq=pr_info.is_merge_queue, - labels=pr_info.labels, - ) + if not workflow_name: + # filter jobs in accordance with ci settings + job_configs = ci_settings.apply( + job_configs, + pr_info.is_release, + is_pr=pr_info.is_pr, + is_mq=pr_info.is_merge_queue, + labels=pr_info.labels, + ) + + # add all job batches to job's to_do batches + for _job, job_config in job_configs.items(): + batches = [] + for batch in range(job_config.num_batches): + batches.append(batch) + job_config.batches = batches # check jobs in ci cache ci_cache = CiCache.calc_digests_and_create( @@ -1102,6 +1121,7 @@ def main() -> int: pr_info, ci_settings, args.skip_jobs, + args.workflow, ) ci_cache.print_status() @@ -1111,7 +1131,7 @@ def main() -> int: if IS_CI and not pr_info.is_merge_queue: - if pr_info.is_release: + if pr_info.is_release and pr_info.is_push_event: print("Release/master: CI Cache add pending records for all todo jobs") ci_cache.push_pending_all(pr_info.is_release) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index a44b15f34c1..a9bdb639835 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -37,11 +37,22 @@ class CI: from ci_utils import GHActions as GHActions from ci_definitions import Labels as Labels from ci_definitions import TRUSTED_CONTRIBUTORS as TRUSTED_CONTRIBUTORS + from ci_definitions import WorkFlowNames as WorkFlowNames from ci_utils import CATEGORY_TO_LABEL as CATEGORY_TO_LABEL # Jobs that run for doc related updates _DOCS_CHECK_JOBS = [JobNames.DOCS_CHECK, JobNames.STYLE_CHECK] + WORKFLOW_CONFIGS = { + WorkFlowNames.JEPSEN: LabelConfig( + run_jobs=[ + BuildNames.BINARY_RELEASE, + JobNames.JEPSEN_KEEPER, + JobNames.JEPSEN_SERVER, + ] + ) + } # type: Dict[str, LabelConfig] + TAG_CONFIGS = { Tags.DO_NOT_TEST_LABEL: LabelConfig(run_jobs=[JobNames.STYLE_CHECK]), Tags.CI_SET_ARM: LabelConfig( @@ -68,7 +79,7 @@ class CI: JobNames.STATEFUL_TEST_ASAN, ] ), - } + } # type: Dict[str, LabelConfig] JOB_CONFIGS: Dict[str, JobConfig] = { BuildNames.PACKAGE_RELEASE: CommonJobConfigs.BUILD.with_properties( @@ -599,16 +610,25 @@ class CI: @classmethod def get_workflow_jobs_with_configs( - cls, is_mq: bool, is_docs_only: bool, is_master: bool, is_pr: bool + cls, + is_mq: bool, + is_docs_only: bool, + is_master: bool, + is_pr: bool, + workflow_name: str, ) -> Dict[str, JobConfig]: """ get a list of all jobs for a workflow with configs """ - jobs = [] if is_mq: jobs = MQ_JOBS elif is_docs_only: jobs = cls._DOCS_CHECK_JOBS + elif workflow_name: + assert ( + workflow_name in cls.WORKFLOW_CONFIGS + ), "Workflow name if provided must be configured in WORKFLOW_CONFIGS" + jobs = list(cls.WORKFLOW_CONFIGS[workflow_name].run_jobs) else: # add all jobs jobs = list(cls.JOB_CONFIGS) diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index a8d9793f1d3..149177ecba5 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -106,6 +106,14 @@ class Tags(metaclass=WithIter): libFuzzer = "libFuzzer" +class WorkFlowNames(metaclass=WithIter): + """ + CI WorkFlow Names for custom CI runs + """ + + JEPSEN = "JepsenWorkflow" + + class BuildNames(metaclass=WithIter): """ Build' job names diff --git a/tests/ci/ci_settings.py b/tests/ci/ci_settings.py index c29c5777dba..d6e9765ceb7 100644 --- a/tests/ci/ci_settings.py +++ b/tests/ci/ci_settings.py @@ -234,11 +234,4 @@ class CiSettings: for job in add_parents: res[job] = job_configs[job] - for job, job_config in res.items(): - batches = [] - for batch in range(job_config.num_batches): - if not self.job_batches or batch in self.job_batches: - batches.append(batch) - job_config.batches = batches - return res diff --git a/tests/ci/jepsen_check.py b/tests/ci/jepsen_check.py index f91a3f080c0..772467d4245 100644 --- a/tests/ci/jepsen_check.py +++ b/tests/ci/jepsen_check.py @@ -9,16 +9,13 @@ from pathlib import Path from typing import Any, List import boto3 # type: ignore -import requests from build_download_helper import ( - download_build_with_progress, read_build_urls, ) from compress_files import compress_fast -from env_helper import REPO_COPY, REPORT_PATH, S3_BUILDS_BUCKET, S3_URL, TEMP_PATH +from env_helper import REPO_COPY, REPORT_PATH, TEMP_PATH from get_robot_token import get_parameter_from_ssm -from git_helper import git_runner from pr_info import PRInfo from report import FAILURE, SUCCESS, JobReport, TestResult, TestResults from ssh import SSHKey @@ -32,11 +29,10 @@ KEEPER_DESIRED_INSTANCE_COUNT = 3 SERVER_DESIRED_INSTANCE_COUNT = 4 KEEPER_IMAGE_NAME = "clickhouse/keeper-jepsen-test" -KEEPER_CHECK_NAME = "ClickHouse Keeper Jepsen" +KEEPER_CHECK_NAME = CI.JobNames.JEPSEN_KEEPER SERVER_IMAGE_NAME = "clickhouse/server-jepsen-test" -SERVER_CHECK_NAME = "ClickHouse Server Jepsen" - +SERVER_CHECK_NAME = CI.JobNames.JEPSEN_SERVER SUCCESSFUL_TESTS_ANCHOR = "# Successful tests" INTERMINATE_TESTS_ANCHOR = "# Indeterminate tests" @@ -201,36 +197,14 @@ def main(): # always use latest docker_image = KEEPER_IMAGE_NAME if args.program == "keeper" else SERVER_IMAGE_NAME - if pr_info.is_scheduled or pr_info.is_dispatched: - # get latest clickhouse by the static link for latest master buit - get its version and provide permanent url for this version to the jepsen - build_url = f"{S3_URL}/{S3_BUILDS_BUCKET}/master/amd64/clickhouse" - download_build_with_progress(build_url, Path(TEMP_PATH) / "clickhouse") - git_runner.run(f"chmod +x {TEMP_PATH}/clickhouse") - sha = git_runner.run( - f"{TEMP_PATH}/clickhouse local -q \"select value from system.build_options where name='GIT_HASH'\"" - ) - version_full = git_runner.run( - f'{TEMP_PATH}/clickhouse local -q "select version()"' - ) - version = ".".join(version_full.split(".")[0:2]) - assert len(sha) == 40, f"failed to fetch sha from the binary. result: {sha}" - assert ( - version - ), f"failed to fetch version from the binary. result: {version_full}" - build_url = ( - f"{S3_URL}/{S3_BUILDS_BUCKET}/{version}/{sha}/binary_release/clickhouse" - ) - print(f"Clickhouse version: [{version_full}], sha: [{sha}], url: [{build_url}]") - head = requests.head(build_url, timeout=60) - assert head.status_code == 200, f"Clickhouse binary not found: {build_url}" - else: - build_name = CI.get_required_build_name(check_name) - urls = read_build_urls(build_name, REPORT_PATH) - build_url = None - for url in urls: - if url.endswith("clickhouse"): - build_url = url - assert build_url, "No build url found in the report" + # binary_release assumed to be always ready on the master as it's part of the merge queue workflow + build_name = CI.get_required_build_name(check_name) + urls = read_build_urls(build_name, REPORT_PATH) + build_url = None + for url in urls: + if url.endswith("clickhouse"): + build_url = url + assert build_url, "No build url found in the report" extra_args = "" if args.program == "server": diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index 59806a2a8fa..2c8ada7b983 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -337,6 +337,10 @@ class PRInfo: return True return False + @property + def is_push_event(self) -> bool: + return self.event_type == EventType.PUSH + @property def is_scheduled(self) -> bool: return self.event_type == EventType.SCHEDULE diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 44142050821..4a2bd606d0e 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -419,6 +419,32 @@ class TestCIConfig(unittest.TestCase): ] self.assertCountEqual(expected_jobs_to_do, actual_jobs_to_do) + def test_ci_py_for_specific_workflow(self): + """ + checks ci.py job configuration + """ + settings = CiSettings() + settings.no_ci_cache = True + pr_info = PRInfo(github_event=_TEST_EVENT_JSON) + # make it merge_queue + pr_info.event_type = EventType.SCHEDULE + assert pr_info.number == 0 and not pr_info.is_merge_queue and not pr_info.is_pr + ci_cache = CIPY._configure_jobs( + S3Helper(), + pr_info, + settings, + skip_jobs=False, + dry_run=True, + workflow_name=CI.WorkFlowNames.JEPSEN, + ) + actual_jobs_to_do = list(ci_cache.jobs_to_do) + expected_jobs_to_do = [ + CI.BuildNames.BINARY_RELEASE, + CI.JobNames.JEPSEN_KEEPER, + CI.JobNames.JEPSEN_SERVER, + ] + self.assertCountEqual(expected_jobs_to_do, actual_jobs_to_do) + def test_ci_py_await(self): """ checks ci.py job configuration From 687c99e39a3ece073239517ffbcecf4612721995 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 24 Jul 2024 18:37:25 +0000 Subject: [PATCH 0842/2361] try to fix --- .../0_stateless/02680_mysql_ast_logical_err.sql | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02680_mysql_ast_logical_err.sql b/tests/queries/0_stateless/02680_mysql_ast_logical_err.sql index bde91df83ca..78ce1b68b0d 100644 --- a/tests/queries/0_stateless/02680_mysql_ast_logical_err.sql +++ b/tests/queries/0_stateless/02680_mysql_ast_logical_err.sql @@ -1,4 +1,10 @@ CREATE TABLE foo (key UInt32, a String, b Int64, c String) ENGINE = TinyLog; -SELECT count() FROM mysql(mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', ''), '127.0.0.1:9004', currentDatabase(), 'foo', '', ''); -- { serverError UNKNOWN_FUNCTION } -SELECT count() FROM mysql(mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', '', SETTINGS connection_pool_size = 1), '127.0.0.1:9004', currentDatabase(), 'foo', '', ''); -- { serverError UNKNOWN_FUNCTION, UNSUPPORTED_METHOD } +SELECT count() FROM mysql( + mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', ''), + '127.0.0.1:9004', currentDatabase(), 'foo', '', '', + SETTINGS connect_timeout = 100, connection_wait_timeout = 100, read_write_timeout = 300); -- { serverError UNKNOWN_FUNCTION } +SELECT count() FROM mysql( + mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', '', SETTINGS connection_pool_size = 1), + '127.0.0.1:9004', currentDatabase(), 'foo', '', '', + SETTINGS connect_timeout = 100, connection_wait_timeout = 100, read_write_timeout = 300); -- { serverError UNKNOWN_FUNCTION, UNSUPPORTED_METHOD } From 2381c3dbca98ff289f71c0fdd699ed3b31f146db Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 24 Jul 2024 19:11:51 +0000 Subject: [PATCH 0843/2361] Fix and rewrite tests --- src/Columns/ColumnObject.cpp | 16 ++ src/Storages/ColumnsDescription.cpp | 1 + tests/performance/json_type.xml | 6 +- tests/performance/new_json_type.xml | 41 +++++ .../01825_new_type_json_in_array.reference | 2 + .../01825_new_type_json_in_array.sql | 2 + .../01825_new_type_json_nbagames.sh | 2 +- ...columns_1_compact_merge_tree.reference.j2} | 5 - ...on_read_subcolumns_1_compact_merge_tree.sh | 107 ------------- ...ead_subcolumns_1_compact_merge_tree.sql.j2 | 93 ++++++++++++ ...subcolumns_1_wide_merge_tree.reference.j2} | 5 - ..._json_read_subcolumns_1_wide_merge_tree.sh | 107 ------------- ...n_read_subcolumns_1_wide_merge_tree.sql.j2 | 93 ++++++++++++ ...columns_2_compact_merge_tree.reference.j2} | 5 - ...on_read_subcolumns_2_compact_merge_tree.sh | 142 ------------------ ...ead_subcolumns_2_compact_merge_tree.sql.j2 | 128 ++++++++++++++++ ...07_json_read_subcolumns_2_memory.reference | 2 - .../03207_json_read_subcolumns_2_memory.sh | 137 ----------------- .../03207_json_read_subcolumns_2_memory.sql | 123 +++++++++++++++ ...subcolumns_2_wide_merge_tree.reference.j2} | 5 - ..._json_read_subcolumns_2_wide_merge_tree.sh | 142 ------------------ ...n_read_subcolumns_2_wide_merge_tree.sql.j2 | 128 ++++++++++++++++ ...ay_of_json_read_subcolumns_1.reference.j2} | 15 -- .../03208_array_of_json_read_subcolumns_1.sh | 71 --------- ...208_array_of_json_read_subcolumns_1.sql.j2 | 41 +++++ ...columns_2_compact_merge_tree.reference.j2} | 5 - ...on_read_subcolumns_2_compact_merge_tree.sh | 73 --------- ...ead_subcolumns_2_compact_merge_tree.sql.j2 | 57 +++++++ ...of_json_read_subcolumns_2_memory.reference | 2 - ..._array_of_json_read_subcolumns_2_memory.sh | 68 --------- ...array_of_json_read_subcolumns_2_memory.sql | 52 +++++++ ...subcolumns_2_wide_merge_tree.reference.j2} | 5 - ..._json_read_subcolumns_2_wide_merge_tree.sh | 73 --------- ...n_read_subcolumns_2_wide_merge_tree.sql.j2 | 57 +++++++ ..._json_type_horizontal_merges.reference.j2} | 4 - .../03209_json_type_horizontal_merges.sh | 70 --------- .../03209_json_type_horizontal_merges.sql.j2 | 59 ++++++++ ...09_json_type_vertical_merges.reference.j2} | 4 - .../03209_json_type_vertical_merges.sh | 70 --------- .../03209_json_type_vertical_merges.sql.j2 | 59 ++++++++ ...0_json_type_alter_add_column.reference.j2} | 3 - .../03210_json_type_alter_add_column.sh | 47 ------ .../03210_json_type_alter_add_column.sql.j2 | 33 ++++ ... => 03211_nested_json_merges.reference.j2} | 8 - .../0_stateless/03211_nested_json_merges.sh | 65 -------- .../03211_nested_json_merges.sql.j2 | 46 ++++++ 46 files changed, 1035 insertions(+), 1244 deletions(-) create mode 100644 tests/performance/new_json_type.xml rename tests/queries/0_stateless/{03207_json_read_subcolumns_1_compact_merge_tree.reference => 03207_json_read_subcolumns_1_compact_merge_tree.reference.j2} (99%) delete mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sql.j2 rename tests/queries/0_stateless/{03207_json_read_subcolumns_1_wide_merge_tree.reference => 03207_json_read_subcolumns_1_wide_merge_tree.reference.j2} (99%) delete mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sql.j2 rename tests/queries/0_stateless/{03207_json_read_subcolumns_2_compact_merge_tree.reference => 03207_json_read_subcolumns_2_compact_merge_tree.reference.j2} (95%) delete mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 delete mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql rename tests/queries/0_stateless/{03207_json_read_subcolumns_2_wide_merge_tree.reference => 03207_json_read_subcolumns_2_wide_merge_tree.reference.j2} (95%) delete mode 100755 tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh create mode 100644 tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 rename tests/queries/0_stateless/{03208_array_of_json_read_subcolumns_1.reference => 03208_array_of_json_read_subcolumns_1.reference.j2} (99%) delete mode 100755 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sh create mode 100644 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sql.j2 rename tests/queries/0_stateless/{03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference => 03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference.j2} (96%) delete mode 100755 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh create mode 100644 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 delete mode 100755 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh create mode 100644 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql rename tests/queries/0_stateless/{03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference => 03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference.j2} (96%) delete mode 100755 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh create mode 100644 tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 rename tests/queries/0_stateless/{03209_json_type_horizontal_merges.reference => 03209_json_type_horizontal_merges.reference.j2} (95%) delete mode 100755 tests/queries/0_stateless/03209_json_type_horizontal_merges.sh create mode 100644 tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 rename tests/queries/0_stateless/{03209_json_type_vertical_merges.reference => 03209_json_type_vertical_merges.reference.j2} (95%) delete mode 100755 tests/queries/0_stateless/03209_json_type_vertical_merges.sh create mode 100644 tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 rename tests/queries/0_stateless/{03210_json_type_alter_add_column.reference => 03210_json_type_alter_add_column.reference.j2} (97%) delete mode 100755 tests/queries/0_stateless/03210_json_type_alter_add_column.sh create mode 100644 tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 rename tests/queries/0_stateless/{03211_nested_json_merges.reference => 03211_nested_json_merges.reference.j2} (85%) delete mode 100755 tests/queries/0_stateless/03211_nested_json_merges.sh create mode 100644 tests/queries/0_stateless/03211_nested_json_merges.sql.j2 diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index ee0d87f500a..98cffdf32bb 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -661,6 +661,7 @@ void ColumnObject::serializePathAndValueIntoArena(DB::Arena & arena, const char const char * ColumnObject::deserializeAndInsertFromArena(const char * pos) { + size_t current_size = size(); /// Deserialize paths and values and insert them into typed paths, dynamic paths or shared data. /// Serialized paths could be unsorted, so we will have to sort all paths that will be inserted into shared data. std::vector> paths_and_values_for_shared_data; @@ -718,6 +719,21 @@ const char * ColumnObject::deserializeAndInsertFromArena(const char * pos) } getSharedDataOffsets().push_back(shared_data_paths->size()); + + /// Insert default value in all remaining typed and dynamic paths. + + for (auto & [_, column] : typed_paths) + { + if (column->size() == current_size) + column->insertDefault(); + } + + for (auto & [_, column] : dynamic_paths_ptrs) + { + if (column->size() == current_size) + column->insertDefault(); + } + return pos; } diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index 68c11060d88..da749812167 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -711,6 +711,7 @@ std::optional ColumnsDescription::tryGetColumn(const GetColumns return NameAndTypePair(ordinary_column_name, dynamic_subcolumn_name, it->type, dynamic_subcolumn_type); } } + return {}; } diff --git a/tests/performance/json_type.xml b/tests/performance/json_type.xml index b6406f52579..db3fd844f89 100644 --- a/tests/performance/json_type.xml +++ b/tests/performance/json_type.xml @@ -27,9 +27,9 @@ - CREATE TABLE t_json_1(data JSON) ENGINE = MergeTree ORDER BY tuple() - CREATE TABLE t_json_2(data JSON) ENGINE = MergeTree ORDER BY tuple() - CREATE TABLE t_json_3(data JSON) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_1(data Object('json')) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_2(data Object('json')) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_3(data Object('json')) ENGINE = MergeTree ORDER BY tuple() INSERT INTO t_json_1 SELECT materialize({json1}) FROM numbers(200000) INSERT INTO t_json_2 SELECT {json2} FROM numbers(100000) diff --git a/tests/performance/new_json_type.xml b/tests/performance/new_json_type.xml new file mode 100644 index 00000000000..1ad21850c6c --- /dev/null +++ b/tests/performance/new_json_type.xml @@ -0,0 +1,41 @@ + + + 1 + + + + + + + json1 + + '{"k1":1, "k2": "some"}' + + + + json2 + + '{"col' || toString(number % 100) || '":' || toString(number) || '}' + + + + json3 + + '{"k1":[{"k2":"aaa","k3":[{"k4":"bbb"},{"k4":"ccc"}]},{"k2":"ddd","k3":[{"k4":"eee"},{"k4":"fff"}]}]}' + + + + + CREATE TABLE t_json_1(data JSON) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_2(data JSON) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_3(data JSON) ENGINE = MergeTree ORDER BY tuple() + + INSERT INTO t_json_1 SELECT materialize({json1}) FROM numbers(200000) + INSERT INTO t_json_2 SELECT {json2} FROM numbers(100000) + INSERT INTO t_json_3 SELECT materialize({json3}) FROM numbers_mt(100000) + + DROP TABLE IF EXISTS t_json_1 + DROP TABLE IF EXISTS t_json_2 + DROP TABLE IF EXISTS t_json_3 + diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.reference b/tests/queries/0_stateless/01825_new_type_json_in_array.reference index 0ca02385389..aa33d9a7413 100644 --- a/tests/queries/0_stateless/01825_new_type_json_in_array.reference +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.reference @@ -20,6 +20,8 @@ ('k2','String') ('k3','String') ('k4','Int64') +[['{"k2":"aaa","k3":"bbb"}','{"k2":"ccc"}']] +[['{"k3":"ddd","k4":"10"}','{"k4":"20"}']] {"arr":[{"x":1}]} {"arr":{"x":{"y":1},"t":{"y":2}}} {"arr":[1,{"y":1}]} diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.sql b/tests/queries/0_stateless/01825_new_type_json_in_array.sql index 7a8314ee3a8..fbf47e8607d 100644 --- a/tests/queries/0_stateless/01825_new_type_json_in_array.sql +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.sql @@ -26,6 +26,8 @@ SELECT id, arr.k1[].k2, arr.k1[].k3, arr.k1[].k4, arr.k5.k6 FROM t_json_array OR SELECT arrayJoin(arrayJoin(arr.k1[])) AS k1 FROM t_json_array ORDER BY toString(k1) FORMAT JSONEachRow; SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arr.k1[])))) AS path FROM t_json_array order by path; +SELECT arr.k1 FROM t_json_array GROUP BY arr.k1 ORDER BY toString(arr.k1); + DROP TABLE t_json_array; SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/01825_new_type_json_nbagames.sh b/tests/queries/0_stateless/01825_new_type_json_nbagames.sh index 2a02aa9adc9..20eba88eda4 100755 --- a/tests/queries/0_stateless/01825_new_type_json_nbagames.sh +++ b/tests/queries/0_stateless/01825_new_type_json_nbagames.sh @@ -17,7 +17,7 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM nbagames WHERE NOT ignore(*)" ${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) as path from nbagames order by path" ${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(data.teams[]))) as path from nbagames order by path" -${CLICKHOUSE_CLIENT} -q \ +${CLICKHOUSE_CLIENT} --allow_experimental_analyzer=1 -q \ "SELECT teams.name.:String AS name, sum(teams.won.:Int64) AS wins FROM nbagames \ ARRAY JOIN data.teams[] AS teams GROUP BY name \ ORDER BY wins DESC LIMIT 5;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 similarity index 99% rename from tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference rename to tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 index 16150ee7b45..972cfd9c37f 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 @@ -1,6 +1,3 @@ -No merges -insert -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') @@ -414,8 +411,6 @@ test "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } -With merges -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sh deleted file mode 100755 index 75b13f89a06..00000000000 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_25\`, json.b.b.\`_25\`.:Int64, json.b.b.\`_25\`.:UUID, json.b.b.\`_26\`, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:UUID, json.b.b.\`_27\`, json.b.b.\`_27\`.:Int64, json.b.b.\`_27\`.:UUID, json.b.b.\`_28\`, json.b.b.\`_28\`.:Int64, json.b.b.\`_28\`.:UUID, json.b.b.\`_29\`, json.b.b.\`_29\`.:Int64, json.b.b.\`_29\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_25\`, json.b.b.\`_25\`.:Int64, json.b.b.\`_25\`.:UUID, json.b.b.\`_26\`, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:UUID, json.b.b.\`_27\`, json.b.b.\`_27\`.:Int64, json.b.b.\`_27\`.:UUID, json.b.b.\`_28\`, json.b.b.\`_28\`.:Int64, json.b.b.\`_28\`.:UUID, json.b.b.\`_29\`, json.b.b.\`_29\`.:Int64, json.b.b.\`_29\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.non.existing.path from test order by id format JSONColumns" - $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format JSONColumns" - $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.non.existing.path from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.a.b.c from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.a.b.c from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.b.b.e from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format JSONColumns" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.d.a, json.b.b.\`_26\` from test order by id format JSONColumns" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_26\` from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format JSONColumns" - $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" -} - -$CH_CLIENT -q "drop table if exists test;" - -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sql.j2 new file mode 100644 index 00000000000..0ec1a86372b --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sql.j2 @@ -0,0 +1,93 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; + +select json.non.existing.path from test order by id format JSONColumns; +select json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path from test order by id format JSONColumns; +select json, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; + +select json.a.b.c from test order by id format JSONColumns; +select json, json.a.b.c from test order by id format JSONColumns; + +select json.b.b.e from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.d.b from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; + +select json.^a, json.a.b.c from test order by id format JSONColumns; +select json, json.^a, json.a.b.c from test order by id format JSONColumns; + +select json.^a, json.a.b.d from test order by id format JSONColumns; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d from test order by id format JSONColumns; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +{% endfor -%} + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 similarity index 99% rename from tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference rename to tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 index 16150ee7b45..972cfd9c37f 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 @@ -1,6 +1,3 @@ -No merges -insert -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') @@ -414,8 +411,6 @@ test "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } -With merges -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sh deleted file mode 100755 index 144a88d8ec6..00000000000 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_25\`, json.b.b.\`_25\`.:Int64, json.b.b.\`_25\`.:UUID, json.b.b.\`_26\`, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:UUID, json.b.b.\`_27\`, json.b.b.\`_27\`.:Int64, json.b.b.\`_27\`.:UUID, json.b.b.\`_28\`, json.b.b.\`_28\`.:Int64, json.b.b.\`_28\`.:UUID, json.b.b.\`_29\`, json.b.b.\`_29\`.:Int64, json.b.b.\`_29\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_25\`, json.b.b.\`_25\`.:Int64, json.b.b.\`_25\`.:UUID, json.b.b.\`_26\`, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:UUID, json.b.b.\`_27\`, json.b.b.\`_27\`.:Int64, json.b.b.\`_27\`.:UUID, json.b.b.\`_28\`, json.b.b.\`_28\`.:Int64, json.b.b.\`_28\`.:UUID, json.b.b.\`_29\`, json.b.b.\`_29\`.:Int64, json.b.b.\`_29\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.non.existing.path from test order by id format JSONColumns" - $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format JSONColumns" - $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.non.existing.path from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.a.b.c from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.a.b.c from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.b.b.e from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format JSONColumns" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.d.a, json.b.b.\`_26\` from test order by id format JSONColumns" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_26\` from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_26\`.:Int64, json.b.b, json.b.b.\`_26\`.:Date from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format JSONColumns" - $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" - $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns" -} - -$CH_CLIENT -q "drop table if exists test;" - -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sql.j2 new file mode 100644 index 00000000000..f571d2417f4 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sql.j2 @@ -0,0 +1,93 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; + +select json.non.existing.path from test order by id format JSONColumns; +select json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path from test order by id format JSONColumns; +select json, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; + +select json.a.b.c from test order by id format JSONColumns; +select json, json.a.b.c from test order by id format JSONColumns; + +select json.b.b.e from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.d.b from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; + +select json.^a, json.a.b.c from test order by id format JSONColumns; +select json, json.^a, json.a.b.c from test order by id format JSONColumns; + +select json.^a, json.a.b.d from test order by id format JSONColumns; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d from test order by id format JSONColumns; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +{% endfor -%} + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 similarity index 95% rename from tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference rename to tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 index db7180cec75..13343b21a8c 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 @@ -1,6 +1,3 @@ -No merges -insert -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') @@ -34,8 +31,6 @@ test 680000 0 0 -With merges -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh deleted file mode 100755 index 6df95ad4ad6..00000000000 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" - $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.non.existing.path is Null" - $CH_CLIENT -q "select count() from test where json.non.existing.path.:String is Null" - $CH_CLIENT -q "select json.non.existing.path from test order by id format Null" - $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.a.b.c == 0" - $CH_CLIENT -q "select json.a.b.c from test format Null" - $CH_CLIENT -q "select json.a.b.c from test order by id format Null" - $CH_CLIENT -q "select json, json.a.b.c from test format Null" - $CH_CLIENT -q "select json, json.a.b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null" - $CH_CLIENT -q "select json.b.b.e from test format Null" - $CH_CLIENT -q "select json.b.b.e from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e from test format Null" - $CH_CLIENT -q "select json, json.b.b.e from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.a.b.d is Null " - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null" - $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`)" - $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" - $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.d.a is Null and json.d.b is Null" - $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" - $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.d.a is Null and json.b.b.\`_1\` is Null" - $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.b.b.\`_1\`.:Int64 is Null" - $CH_CLIENT -q "select json.d.a, json.b.b.\`_1\` from test order by id format Null" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_1\` from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0" - $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.c from test format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null" - $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" -} - -$CH_CLIENT -q "drop table if exists test;" - -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 new file mode 100644 index 00000000000..942489ec8fc --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 @@ -0,0 +1,128 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(100000); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; + +select count() from test where json.non.existing.path is Null; +select count() from test where json.non.existing.path.:String is Null; +select json.non.existing.path from test order by id format Null; +select json.non.existing.path.:Int64 from test order by id format Null; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path from test order by id format Null; +select json, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; + +select count() from test where json.a.b.c == 0; +select json.a.b.c from test format Null; +select json.a.b.c from test order by id format Null; +select json, json.a.b.c from test format Null; +select json, json.a.b.c from test order by id format Null; + +select count() from test where json.b.b.e is Null; +select count() from test where json.b.b.e.:String is Null; +select json.b.b.e from test format Null; +select json.b.b.e from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e from test format Null; +select json, json.b.b.e from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.a.b.d is Null ; +select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null; +select json.b.b.e, json.a.b.d from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.a.b.d from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`); +select json.b.b.e, json.d.a from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.d.b is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.d.a, json.d.b from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.b from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.b.b.`_1` is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.b.b.`_1`.:Int64 is Null; +select json.d.a, json.b.b.`_1` from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.b.b.`_1` from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; + +select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0; +select json.^a, json.a.b.c from test order by id format Null; +select json, json.^a, json.a.b.c from test format Null; +select json, json.^a, json.a.b.c from test order by id format Null; + +select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null; +select json.^a, json.a.b.d from test order by id format Null; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d from test order by id format Null; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +{% endfor -%} + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference index 1bbd3926bdc..6c455b1bb0d 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference @@ -1,5 +1,3 @@ -insert -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh deleted file mode 100755 index 154c1aa5f9c..00000000000 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" - $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.non.existing.path is Null" - $CH_CLIENT -q "select count() from test where json.non.existing.path.:String is Null" - $CH_CLIENT -q "select json.non.existing.path from test order by id format Null" - $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.a.b.c == 0" - $CH_CLIENT -q "select json.a.b.c from test format Null" - $CH_CLIENT -q "select json.a.b.c from test order by id format Null" - $CH_CLIENT -q "select json, json.a.b.c from test format Null" - $CH_CLIENT -q "select json, json.a.b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null" - $CH_CLIENT -q "select json.b.b.e from test format Null" - $CH_CLIENT -q "select json.b.b.e from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e from test format Null" - $CH_CLIENT -q "select json, json.b.b.e from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.a.b.d is Null " - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null" - $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`)" - $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" - $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.d.a is Null and json.d.b is Null" - $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" - $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.d.a is Null and json.b.b.\`_1\` is Null" - $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.b.b.\`_1\`.:Int64 is Null" - $CH_CLIENT -q "select json.d.a, json.b.b.\`_1\` from test order by id format Null" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_1\` from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0" - $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.c from test format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null" - $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" -} - -$CH_CLIENT -q "drop table if exists test;" - -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory" -insert -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql new file mode 100644 index 00000000000..3c791118c9c --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql @@ -0,0 +1,123 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory; + +truncate table test; +insert into test select number, '{}' from numbers(100000); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; + +select count() from test where json.non.existing.path is Null; +select count() from test where json.non.existing.path.:String is Null; +select json.non.existing.path from test order by id format Null; +select json.non.existing.path.:Int64 from test order by id format Null; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path from test order by id format Null; +select json, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; + +select count() from test where json.a.b.c == 0; +select json.a.b.c from test format Null; +select json.a.b.c from test order by id format Null; +select json, json.a.b.c from test format Null; +select json, json.a.b.c from test order by id format Null; + +select count() from test where json.b.b.e is Null; +select count() from test where json.b.b.e.:String is Null; +select json.b.b.e from test format Null; +select json.b.b.e from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e from test format Null; +select json, json.b.b.e from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.a.b.d is Null ; +select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null; +select json.b.b.e, json.a.b.d from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.a.b.d from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`); +select json.b.b.e, json.d.a from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.d.b is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.d.a, json.d.b from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.b from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.b.b.`_1` is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.b.b.`_1`.:Int64 is Null; +select json.d.a, json.b.b.`_1` from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.b.b.`_1` from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; + +select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0; +select json.^a, json.a.b.c from test order by id format Null; +select json, json.^a, json.a.b.c from test format Null; +select json, json.^a, json.a.b.c from test order by id format Null; + +select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null; +select json.^a, json.a.b.d from test order by id format Null; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d from test order by id format Null; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 similarity index 95% rename from tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference rename to tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 index db7180cec75..13343b21a8c 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 @@ -1,6 +1,3 @@ -No merges -insert -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') @@ -34,8 +31,6 @@ test 680000 0 0 -With merges -test ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') ('a.b.d','DateTime64(9)') diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh deleted file mode 100755 index d20d9a4d79d..00000000000 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --session_timezone=UTC" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" - $CH_CLIENT -q "select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.\`_0\`, json.b.b.\`_0\`.:Int64, json.b.b.\`_0\`.:UUID, json.b.b.\`_1\`, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:UUID, json.b.b.\`_2\`, json.b.b.\`_2\`.:Int64, json.b.b.\`_2\`.:UUID, json.b.b.\`_3\`, json.b.b.\`_3\`.:Int64, json.b.b.\`_3\`.:UUID, json.b.b.\`_4\`, json.b.b.\`_4\`.:Int64, json.b.b.\`_4\`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.non.existing.path is Null" - $CH_CLIENT -q "select count() from test where json.non.existing.path.:String is Null" - $CH_CLIENT -q "select json.non.existing.path from test order by id format Null" - $CH_CLIENT -q "select json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null" - $CH_CLIENT -q "select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.a.b.c == 0" - $CH_CLIENT -q "select json.a.b.c from test format Null" - $CH_CLIENT -q "select json.a.b.c from test order by id format Null" - $CH_CLIENT -q "select json, json.a.b.c from test format Null" - $CH_CLIENT -q "select json, json.a.b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null" - $CH_CLIENT -q "select json.b.b.e from test format Null" - $CH_CLIENT -q "select json.b.b.e from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e from test format Null" - $CH_CLIENT -q "select json, json.b.b.e from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.a.b.d is Null " - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null" - $CH_CLIENT -q "select json.b.b.e, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`)" - $CH_CLIENT -q "select json.b.b.e, json.d.a from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null" - $CH_CLIENT -q "select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" - $CH_CLIENT -q "select json.b.b.e, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" - $CH_CLIENT -q "select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.d.a is Null and json.d.b is Null" - $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.d.b.:Int64 is Null" - $CH_CLIENT -q "select json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.b from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where json.d.a is Null and json.b.b.\`_1\` is Null" - $CH_CLIENT -q "select count() from test where empty(json.d.a.:\`Array(Nullable(Int64))\`) and json.b.b.\`_1\`.:Int64 is Null" - $CH_CLIENT -q "select json.d.a, json.b.b.\`_1\` from test order by id format Null" - $CH_CLIENT -q "select json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.b.b.\`_1\` from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b.\`_1\`.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test format Null" - $CH_CLIENT -q "select json, json.d.a, json.d.a.:\`Array(Nullable(Int64))\`, json.d.a.:Date, json.b.b.\`_1\`.:Int64, json.b.b, json.b.b.\`_1\`.:Date from test order by id format Null" - - $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0" - $CH_CLIENT -q "select json.^a, json.a.b.c from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.c from test format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null" - $CH_CLIENT -q "select json.^a, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null" - $CH_CLIENT -q "select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null" -} - -$CH_CLIENT -q "drop table if exists test;" - -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 new file mode 100644 index 00000000000..325ce2dcbf5 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 @@ -0,0 +1,128 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, '{}' from numbers(100000); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; + +select count() from test where json.non.existing.path is Null; +select count() from test where json.non.existing.path.:String is Null; +select json.non.existing.path from test order by id format Null; +select json.non.existing.path.:Int64 from test order by id format Null; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path from test order by id format Null; +select json, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; + +select count() from test where json.a.b.c == 0; +select json.a.b.c from test format Null; +select json.a.b.c from test order by id format Null; +select json, json.a.b.c from test format Null; +select json, json.a.b.c from test order by id format Null; + +select count() from test where json.b.b.e is Null; +select count() from test where json.b.b.e.:String is Null; +select json.b.b.e from test format Null; +select json.b.b.e from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e from test format Null; +select json, json.b.b.e from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.a.b.d is Null ; +select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null; +select json.b.b.e, json.a.b.d from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.a.b.d from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`); +select json.b.b.e, json.d.a from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.d.b is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.d.a, json.d.b from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.b from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.b.b.`_1` is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.b.b.`_1`.:Int64 is Null; +select json.d.a, json.b.b.`_1` from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.b.b.`_1` from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; + +select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0; +select json.^a, json.a.b.c from test order by id format Null; +select json, json.^a, json.a.b.c from test format Null; +select json, json.^a, json.a.b.c from test order by id format Null; + +select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null; +select json.^a, json.a.b.d from test order by id format Null; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d from test order by id format Null; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +{% endfor -%} + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference.j2 similarity index 99% rename from tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference rename to tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference.j2 index ec8d1407bdb..0228ae1e7df 100644 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference.j2 @@ -1,6 +1,3 @@ -Memory -insert -test ('a.a1','String') ('a.a2','String') ('a.a3','String') @@ -110,10 +107,6 @@ test "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] } -MergeTree compact -No merges -insert -test ('a.a1','String') ('a.a2','String') ('a.a3','String') @@ -223,8 +216,6 @@ test "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] } -With merges -test ('a.a1','String') ('a.a2','String') ('a.a3','String') @@ -334,10 +325,6 @@ test "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] } -MergeTree wide -No merges -insert -test ('a.a1','String') ('a.a2','String') ('a.a3','String') @@ -447,8 +434,6 @@ test "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] } -With merges -test ('a.a1','String') ('a.a2','String') ('a.a3','String') diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sh deleted file mode 100755 index 9d8d02de7aa..00000000000 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(5, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10, 5)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(15, 5)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format JSONColumns" - $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format JSONColumns" - $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format JSONColumns" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format JSONColumns" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format JSONColumns" - - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format JSONColumns" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format JSONColumns" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format JSONColumns" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format JSONColumns" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "Memory" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=Memory" -insert -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sql.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sql.j2 new file mode 100644 index 00000000000..1353980cd35 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sql.j2 @@ -0,0 +1,41 @@ +-- Tags: no-fasttest, long + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=Memory;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; system stop merges test;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; system start merges test;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; system stop merges test;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; system start merges test;'] -%} + +{{ create_command }} + +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(5, 5); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10, 5); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(15, 5); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format JSONColumns; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format JSONColumns; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format JSONColumns; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format JSONColumns; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format JSONColumns; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format JSONColumns; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format JSONColumns; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format JSONColumns; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format JSONColumns; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format JSONColumns; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference.j2 similarity index 96% rename from tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference rename to tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference.j2 index 9c092278ec7..2fd3437e3d2 100644 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference.j2 @@ -1,6 +1,3 @@ -No merges -insert -test ('a.a1','String') ('a.a2','String') ('a.a3','String') @@ -31,8 +28,6 @@ test 20000 0 0 -With merges -test ('a.a1','String') ('a.a2','String') ('a.a3','String') diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh deleted file mode 100755 index c39411f7b09..00000000000 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" - $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" - $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" - $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1)" - $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64)" - $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null" - $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null" - - $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" - - $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0)" - $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64)" - $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null" - $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null" - - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" -} - -$CH_CLIENT -q "drop table if exists test;" - -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 new file mode 100644 index 00000000000..e1e0c07a5df --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 @@ -0,0 +1,57 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(10000); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; + +select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1); +select count() from test where empty(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64); +select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null; +select count() from test where arrayJoin(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; + +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); +select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; + +{% endfor -%} + +drop table test; diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference index 63d10b1315f..34557cf60bb 100644 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference @@ -1,5 +1,3 @@ -insert -test ('a.a1','String') ('a.a2','String') ('a.a3','String') diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh deleted file mode 100755 index b179baeadef..00000000000 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" - $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" - $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" - $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1)" - $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64)" - $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null" - $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null" - - $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" - - $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0)" - $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64)" - $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null" - $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null" - - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" -} - -$CH_CLIENT -q "drop table if exists test;" - -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=Memory" -insert -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql new file mode 100644 index 00000000000..1cd5e2b8d47 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql @@ -0,0 +1,52 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(10000); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; + +select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1); +select count() from test where empty(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64); +select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null; +select count() from test where arrayJoin(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; + +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); +select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference.j2 similarity index 96% rename from tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference rename to tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference.j2 index 9c092278ec7..2fd3437e3d2 100644 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference.j2 @@ -1,6 +1,3 @@ -No merges -insert -test ('a.a1','String') ('a.a2','String') ('a.a3','String') @@ -31,8 +28,6 @@ test 20000 0 0 -With merges -test ('a.a1','String') ('a.a2','String') ('a.a3','String') diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh deleted file mode 100755 index 6686179e801..00000000000 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" - -function insert() -{ - echo "insert" - $CH_CLIENT -q "truncate table test" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000)" -} - -function test() -{ - echo "test" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types" - $CH_CLIENT -q "select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types" - - $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" - $CH_CLIENT -q "select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" - $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null" - $CH_CLIENT -q "select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null" - - $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1)" - $CH_CLIENT -q "select count() from test where empty(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64)" - $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null" - $CH_CLIENT -q "select count() from test where arrayJoin(json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null" - - $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].c.d.e.:\`Array(Nullable(Int64))\`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null" - - $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0)" - $CH_CLIENT -q "select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64)" - $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null" - $CH_CLIENT -q "select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null" - - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null" - $CH_CLIENT -q "select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null" -} - -$CH_CLIENT -q "drop table if exists test;" - -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -echo "No merges" -$CH_CLIENT -q "system stop merges test" -insert -test -echo "With merges" -$CH_CLIENT -q "system start merges test" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 new file mode 100644 index 00000000000..89223316aa3 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 @@ -0,0 +1,57 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, '{}' from numbers(10000); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; + +select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1); +select count() from test where empty(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64); +select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null; +select count() from test where arrayJoin(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; + +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); +select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; + +{% endfor -%} + +drop table test; diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 similarity index 95% rename from tests/queries/0_stateless/03209_json_type_horizontal_merges.reference rename to tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 index 203b59521f4..31ba25b7732 100644 --- a/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 @@ -1,5 +1,3 @@ -MergeTree compact -test Dynamic paths 100000 a 90000 b @@ -48,8 +46,6 @@ Shared data paths 70000 d 60000 e 10000 g -MergeTree wide -test Dynamic paths 100000 a 90000 b diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sh b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sh deleted file mode 100755 index 7b30827e7a0..00000000000 --- a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1" - -function test() -{ - echo "test" - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a', number)) from numbers(100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b', number)) from numbers(90000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('c', number)) from numbers(80000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('d', number)) from numbers(70000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('e', number)) from numbers(60000)" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" - - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, toJSONString(map('f', number)) from numbers(200000)" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, toJSONString(map('g', number)) from numbers(10000)" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" -test -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 new file mode 100644 index 00000000000..6ae9f438432 --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 @@ -0,0 +1,59 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;'] -%} + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(100000); +insert into test select number, toJSONString(map('b', number)) from numbers(90000); +insert into test select number, toJSONString(map('c', number)) from numbers(80000); +insert into test select number, toJSONString(map('d', number)) from numbers(70000); +insert into test select number, toJSONString(map('e', number)) from numbers(60000); +insert into test select number, '{}' from numbers(100000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('f', number)) from numbers(200000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('g', number)) from numbers(10000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.reference b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 similarity index 95% rename from tests/queries/0_stateless/03209_json_type_vertical_merges.reference rename to tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 index 203b59521f4..31ba25b7732 100644 --- a/tests/queries/0_stateless/03209_json_type_vertical_merges.reference +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 @@ -1,5 +1,3 @@ -MergeTree compact -test Dynamic paths 100000 a 90000 b @@ -48,8 +46,6 @@ Shared data paths 70000 d 60000 e 10000 g -MergeTree wide -test Dynamic paths 100000 a 90000 b diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.sh b/tests/queries/0_stateless/03209_json_type_vertical_merges.sh deleted file mode 100755 index 66b05ea0394..00000000000 --- a/tests/queries/0_stateless/03209_json_type_vertical_merges.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1" - -function test() -{ - echo "test" - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a', number)) from numbers(100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b', number)) from numbers(90000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('c', number)) from numbers(80000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('d', number)) from numbers(70000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('e', number)) from numbers(60000)" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(100000)" - - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, toJSONString(map('f', number)) from numbers(200000)" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, toJSONString(map('g', number)) from numbers(10000)" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;" -test -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 b/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 new file mode 100644 index 00000000000..aef36452bb8 --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 @@ -0,0 +1,59 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;'] -%} + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(100000); +insert into test select number, toJSONString(map('b', number)) from numbers(90000); +insert into test select number, toJSONString(map('c', number)) from numbers(80000); +insert into test select number, toJSONString(map('d', number)) from numbers(70000); +insert into test select number, toJSONString(map('e', number)) from numbers(60000); +insert into test select number, '{}' from numbers(100000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('f', number)) from numbers(200000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('g', number)) from numbers(10000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03210_json_type_alter_add_column.reference b/tests/queries/0_stateless/03210_json_type_alter_add_column.reference.j2 similarity index 97% rename from tests/queries/0_stateless/03210_json_type_alter_add_column.reference rename to tests/queries/0_stateless/03210_json_type_alter_add_column.reference.j2 index 907cde709f3..37b6854938a 100644 --- a/tests/queries/0_stateless/03210_json_type_alter_add_column.reference +++ b/tests/queries/0_stateless/03210_json_type_alter_add_column.reference.j2 @@ -1,4 +1,3 @@ -Memory initial insert alter add column 1 0 {} \N {} \N \N @@ -23,7 +22,6 @@ insert after alter add column 12 {} \N {} \N \N 13 {} \N {} \N \N 14 {} \N {} \N \N -MergeTree compact initial insert alter add column 1 0 {} \N {} \N \N @@ -48,7 +46,6 @@ insert after alter add column 12 {} \N {} \N \N 13 {} \N {} \N \N 14 {} \N {} \N \N -MergeTree wide initial insert alter add column 1 0 {} \N {} \N \N diff --git a/tests/queries/0_stateless/03210_json_type_alter_add_column.sh b/tests/queries/0_stateless/03210_json_type_alter_add_column.sh deleted file mode 100755 index dfa5de9f091..00000000000 --- a/tests/queries/0_stateless/03210_json_type_alter_add_column.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1" - -function run() -{ - echo "initial insert" - $CH_CLIENT -q "insert into test select number from numbers(3)" - - echo "alter add column 1" - $CH_CLIENT -q "alter table test add column json JSON settings mutations_sync=1" - $CH_CLIENT -q "select count(), arrayJoin(JSONAllPaths(json)) as path from test group by path order by count() desc, path" - $CH_CLIENT -q "select x, json, json.a.b, json.^a, json.b.c.:Int64, json.c.d from test order by x" - - echo "insert after alter add column" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a.b', number::UInt32)) from numbers(3, 3)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b.c', number::UInt32)) from numbers(6, 3)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('c.d', number::UInt32)) from numbers(9, 3)" - $CH_CLIENT -q "insert into test select number, '{}' from numbers(12, 3)" - $CH_CLIENT -q "select count(), arrayJoin(JSONAllPaths(json)) as path from test group by path order by count() desc, path" - $CH_CLIENT -q "select x, json, json.a.b, json.^a, json.b.c.:Int64, json.c.d from test order by x" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "Memory" -$CH_CLIENT -q "create table test (x UInt64) engine=Memory" -run -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (x UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;" -run -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (x UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -run -$CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 b/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 new file mode 100644 index 00000000000..8ab9f5181e3 --- /dev/null +++ b/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 @@ -0,0 +1,33 @@ +-- Tags: no-fasttest, long + +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (x UInt64) engine=Memory;', + 'create table test (x UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;', + 'create table test (x UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;'] -%} + +{{ create_command }} + +select 'initial insert'; +insert into test select number from numbers(3); + +select 'alter add column 1'; +alter table test add column json JSON settings mutations_sync=1; +select count(), arrayJoin(JSONAllPaths(json)) as path from test group by path order by count() desc, path; +select x, json, json.a.b, json.^a, json.b.c.:Int64, json.c.d from test order by x; + +select 'insert after alter add column'; +insert into test select number, toJSONString(map('a.b', number::UInt32)) from numbers(3, 3); +insert into test select number, toJSONString(map('b.c', number::UInt32)) from numbers(6, 3); +insert into test select number, toJSONString(map('c.d', number::UInt32)) from numbers(9, 3); +insert into test select number, '{}' from numbers(12, 3); +select count(), arrayJoin(JSONAllPaths(json)) as path from test group by path order by count() desc, path; +select x, json, json.a.b, json.^a, json.b.c.:Int64, json.c.d from test order by x; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03211_nested_json_merges.reference b/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 similarity index 85% rename from tests/queries/0_stateless/03211_nested_json_merges.reference rename to tests/queries/0_stateless/03211_nested_json_merges.reference.j2 index 221fcefba66..e05e9ced0b1 100644 --- a/tests/queries/0_stateless/03211_nested_json_merges.reference +++ b/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 @@ -1,5 +1,3 @@ -MergeTree compact + horizontal merge -test Dynamic paths 300000 c 150000 d @@ -20,8 +18,6 @@ Shared data paths 300000 c 150000 d 150000 e -MergeTree wide + horizontal merge -test Dynamic paths 300000 c 150000 d @@ -42,8 +38,6 @@ Shared data paths 300000 c 150000 d 150000 e -MergeTree compact + vertical merge -test Dynamic paths 300000 c 150000 d @@ -64,8 +58,6 @@ Shared data paths 300000 c 150000 d 150000 e -MergeTree wide + vertical merge -test Dynamic paths 300000 c 150000 d diff --git a/tests/queries/0_stateless/03211_nested_json_merges.sh b/tests/queries/0_stateless/03211_nested_json_merges.sh deleted file mode 100755 index be0ee696694..00000000000 --- a/tests/queries/0_stateless/03211_nested_json_merges.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# reset --log_comment -CLICKHOUSE_LOG_COMMENT= -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_json_type=1" - - -function test() -{ - echo "test" - $CH_CLIENT -q "system stop merges test" - $CH_CLIENT -q "insert into test select number, toJSONString(map('a', number)) from numbers(100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(100000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b', arrayMap(x -> map('d', x), range(number % 5 + 1)))) from numbers(50000)" - - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" - - $CH_CLIENT -q "insert into test select number, toJSONString(map('b', arrayMap(x -> map('e', x), range(number % 5 + 1)))) from numbers(50000)" - $CH_CLIENT -q "insert into test select number, toJSONString(map('b', arrayMap(x -> map('f', x), range(number % 5 + 1)))) from numbers(200000)" - - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" - $CH_CLIENT -nm -q "system start merges test; optimize table test final;" - echo "Dynamic paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" - echo "Shared data paths" - $CH_CLIENT -q "select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path" -} - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact + horizontal merge" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide + horizontal merge" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree compact + vertical merge" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" -test -$CH_CLIENT -q "drop table test;" - -echo "MergeTree wide + vertical merge" -$CH_CLIENT -q "create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 b/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 new file mode 100644 index 00000000000..4ab5a5da0ef --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 @@ -0,0 +1,46 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(100000); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(100000); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('d', x), range(number % 5 + 1)))) from numbers(50000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +insert into test select number, toJSONString(map('b', arrayMap(x -> map('e', x), range(number % 5 + 1)))) from numbers(50000); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('f', x), range(number % 5 + 1)))) from numbers(200000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} From 9c28c64adf04cd03711a846a17babd683dc2c002 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Wed, 24 Jul 2024 19:55:03 +0000 Subject: [PATCH 0844/2361] Fix deprecated HDFS image and harden test_storage_hdfs. --- .../compose/docker_compose_hdfs.yml | 2 +- tests/integration/test_storage_hdfs/test.py | 134 +++++++++++------- 2 files changed, 87 insertions(+), 49 deletions(-) diff --git a/tests/integration/compose/docker_compose_hdfs.yml b/tests/integration/compose/docker_compose_hdfs.yml index 1cae54ad9e1..40a10df01f7 100644 --- a/tests/integration/compose/docker_compose_hdfs.yml +++ b/tests/integration/compose/docker_compose_hdfs.yml @@ -1,7 +1,7 @@ version: '2.3' services: hdfs1: - image: sequenceiq/hadoop-docker:2.7.0 + image: prasanthj/docker-hadoop:2.6.0 hostname: hdfs1 restart: always expose: diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 47d8f44c0b7..aaeb472dd52 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -1,6 +1,7 @@ import os import pytest +import uuid import time from helpers.cluster import ClickHouseCluster, is_arm from helpers.test_tools import TSV @@ -31,13 +32,15 @@ def started_cluster(): def test_read_write_storage(started_cluster): + id = uuid.uuid4() hdfs_api = started_cluster.hdfs_api + filename = f"simple_storage_{id}" node1.query("drop table if exists SimpleHDFSStorage SYNC") node1.query( - "create table SimpleHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/simple_storage', 'TSV')" + f"create table SimpleHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/{filename}', 'TSV')" ) node1.query("insert into SimpleHDFSStorage values (1, 'Mark', 72.53)") - assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n" + assert hdfs_api.read_data(f"/{filename}") == "1\tMark\t72.53\n" assert node1.query("select * from SimpleHDFSStorage") == "1\tMark\t72.53\n" @@ -92,6 +95,10 @@ def test_read_write_storage_with_globs(started_cluster): print(ex) assert "in readonly mode" in str(ex) + node1.query("drop table HDFSStorageWithRange") + node1.query("drop table HDFSStorageWithEnum") + node1.query("drop table HDFSStorageWithQuestionMark") + node1.query("drop table HDFSStorageWithAsterisk") def test_storage_with_multidirectory_glob(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -137,7 +144,6 @@ def test_read_write_table(started_cluster): def test_write_table(started_cluster): hdfs_api = started_cluster.hdfs_api - node1.query( "create table OtherHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/other_storage', 'TSV')" ) @@ -148,7 +154,8 @@ def test_write_table(started_cluster): result = "10\ttomas\t55.55\n11\tjack\t32.54\n" assert hdfs_api.read_data("/other_storage") == result assert node1.query("select * from OtherHDFSStorage order by id") == result - + node1.query("truncate table OtherHDFSStorage") + node1.query("drop table OtherHDFSStorage") def test_bad_hdfs_uri(started_cluster): try: @@ -166,6 +173,7 @@ def test_bad_hdfs_uri(started_cluster): print(ex) assert "Unable to connect to HDFS" in str(ex) + node1.query("drop table BadStorage2") try: node1.query( "create table BadStorage3 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/<>', 'TSV')" @@ -173,6 +181,7 @@ def test_bad_hdfs_uri(started_cluster): except Exception as ex: print(ex) assert "Unable to open HDFS file" in str(ex) + node1.query("drop table BadStorage3") @pytest.mark.timeout(800) @@ -304,7 +313,8 @@ def test_write_gz_storage(started_cluster): node1.query("insert into GZHDFSStorage values (1, 'Mark', 72.53)") assert hdfs_api.read_gzip_data("/storage.gz") == "1\tMark\t72.53\n" assert node1.query("select * from GZHDFSStorage") == "1\tMark\t72.53\n" - + node1.query("truncate table GZHDFSStorage") + node1.query("drop table GZHDFSStorage") def test_write_gzip_storage(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -315,6 +325,8 @@ def test_write_gzip_storage(started_cluster): node1.query("insert into GZIPHDFSStorage values (1, 'Mark', 72.53)") assert hdfs_api.read_gzip_data("/gzip_storage") == "1\tMark\t72.53\n" assert node1.query("select * from GZIPHDFSStorage") == "1\tMark\t72.53\n" + node1.query("truncate table GZIPHDFSStorage") + node1.query("drop table GZIPHDFSStorage") def test_virtual_columns(started_cluster): @@ -333,7 +345,7 @@ def test_virtual_columns(started_cluster): ) == expected ) - + node1.query("drop table virtual_cols") def test_read_files_with_spaces(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -354,6 +366,7 @@ def test_read_files_with_spaces(started_cluster): ) assert node1.query("select * from test order by id") == "1\n2\n3\n" fs.delete(dir, recursive=True) + node1.query(f"drop table test") def test_truncate_table(started_cluster): @@ -375,47 +388,52 @@ def test_truncate_table(started_cluster): def test_partition_by(started_cluster): - hdfs_api = started_cluster.hdfs_api - + fs = HdfsClient(hosts=started_cluster.hdfs_ip) + id = uuid.uuid4() table_format = "column1 UInt32, column2 UInt32, column3 UInt32" + dir = f"partition_{id}" + fs.mkdirs(f"/{dir}/", permission=777) + file_name = "test_{_partition_id}" partition_by = "column3" values = "(1, 2, 3), (3, 2, 1), (1, 3, 2)" - table_function = f"hdfs('hdfs://hdfs1:9000/{file_name}', 'TSV', '{table_format}')" + table_function = f"hdfs('hdfs://hdfs1:9000/{dir}/{file_name}', 'TSV', '{table_format}')" node1.query( f"insert into table function {table_function} PARTITION BY {partition_by} values {values}" ) result = node1.query( - f"select * from hdfs('hdfs://hdfs1:9000/test_1', 'TSV', '{table_format}')" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_1', 'TSV', '{table_format}')" ) assert result.strip() == "3\t2\t1" result = node1.query( - f"select * from hdfs('hdfs://hdfs1:9000/test_2', 'TSV', '{table_format}')" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_2', 'TSV', '{table_format}')" ) assert result.strip() == "1\t3\t2" result = node1.query( - f"select * from hdfs('hdfs://hdfs1:9000/test_3', 'TSV', '{table_format}')" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_3', 'TSV', '{table_format}')" ) assert result.strip() == "1\t2\t3" file_name = "test2_{_partition_id}" node1.query( - f"create table p(column1 UInt32, column2 UInt32, column3 UInt32) engine = HDFS('hdfs://hdfs1:9000/{file_name}', 'TSV') partition by column3" + f"create table p(column1 UInt32, column2 UInt32, column3 UInt32) engine = HDFS('hdfs://hdfs1:9000/{dir}/{file_name}', 'TSV') partition by column3" ) node1.query(f"insert into p values {values}") result = node1.query( - f"select * from hdfs('hdfs://hdfs1:9000/test2_1', 'TSV', '{table_format}')" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test2_1', 'TSV', '{table_format}')" ) assert result.strip() == "3\t2\t1" result = node1.query( - f"select * from hdfs('hdfs://hdfs1:9000/test2_2', 'TSV', '{table_format}')" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test2_2', 'TSV', '{table_format}')" ) assert result.strip() == "1\t3\t2" result = node1.query( - f"select * from hdfs('hdfs://hdfs1:9000/test2_3', 'TSV', '{table_format}')" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test2_3', 'TSV', '{table_format}')" ) assert result.strip() == "1\t2\t3" + node1.query(f"drop table p") + fs.delete("/{dir}", recursive=True) def test_seekable_formats(started_cluster): @@ -425,7 +443,7 @@ def test_seekable_formats(started_cluster): f"hdfs('hdfs://hdfs1:9000/parquet', 'Parquet', 'a Int32, b String')" ) node1.query( - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)" + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query(f"SELECT count() FROM {table_function}") @@ -433,7 +451,7 @@ def test_seekable_formats(started_cluster): table_function = f"hdfs('hdfs://hdfs1:9000/orc', 'ORC', 'a Int32, b String')" node1.query( - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)" + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query(f"SELECT count() FROM {table_function}") assert int(result) == 5000000 @@ -457,7 +475,7 @@ def test_read_table_with_default(started_cluster): def test_schema_inference(started_cluster): node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000)" + f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/native', 'Native')") @@ -476,7 +494,7 @@ def test_schema_inference(started_cluster): result = node1.query(f"select count(*) from schema_inference") assert int(result) == 5000000 - + node1.query(f"drop table schema_inference") def test_hdfsCluster(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -510,6 +528,7 @@ def test_hdfs_directory_not_exist(started_cluster): assert "" == node1.query( "select * from HDFSStorageWithNotExistDir settings hdfs_ignore_file_doesnt_exist=1" ) + node1.query("drop table HDFSStorageWithNotExistDir") def test_overwrite(started_cluster): @@ -529,12 +548,16 @@ def test_overwrite(started_cluster): result = node1.query(f"select count() from test_overwrite") assert int(result) == 10 + node1.query(f"truncate table test_overwrite") + node1.query(f"drop table test_overwrite") def test_multiple_inserts(started_cluster): - hdfs_api = started_cluster.hdfs_api + fs = HdfsClient(hosts=started_cluster.hdfs_ip) + id = uuid.uuid4() + fs.mkdirs(f"/{id}/", permission=777) - table_function = f"hdfs('hdfs://hdfs1:9000/data_multiple_inserts', 'Parquet', 'a Int32, b String')" + table_function = f"hdfs('hdfs://hdfs1:9000/{id}/data_multiple_inserts', 'Parquet', 'a Int32, b String')" node1.query(f"create table test_multiple_inserts as {table_function}") node1.query( f"insert into test_multiple_inserts select number, randomString(100) from numbers(10)" @@ -551,7 +574,7 @@ def test_multiple_inserts(started_cluster): result = node1.query(f"drop table test_multiple_inserts") - table_function = f"hdfs('hdfs://hdfs1:9000/data_multiple_inserts.gz', 'Parquet', 'a Int32, b String')" + table_function = f"hdfs('hdfs://hdfs1:9000/{id}/data_multiple_inserts.gz', 'Parquet', 'a Int32, b String')" node1.query(f"create table test_multiple_inserts as {table_function}") node1.query( f"insert into test_multiple_inserts select number, randomString(100) FROM numbers(10)" @@ -565,7 +588,7 @@ def test_multiple_inserts(started_cluster): result = node1.query(f"select count() from test_multiple_inserts") assert int(result) == 60 - + node1.query(f"drop table test_multiple_inserts") def test_format_detection(started_cluster): node1.query( @@ -574,6 +597,8 @@ def test_format_detection(started_cluster): node1.query(f"insert into arrow_table select 1") result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/data.arrow')") assert int(result) == 1 + node1.query(f"truncate table arrow_table") + node1.query(f"drop table arrow_table") def test_schema_inference_with_globs(started_cluster): @@ -618,6 +643,8 @@ def test_schema_inference_with_globs(started_cluster): def test_insert_select_schema_inference(started_cluster): + fs = HdfsClient(hosts=started_cluster.hdfs_ip) + node1.query( f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x" ) @@ -627,6 +654,7 @@ def test_insert_select_schema_inference(started_cluster): result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test.native.zst')") assert int(result) == 1 + fs.delete('/test.native.zst') def test_cluster_join(started_cluster): @@ -967,11 +995,11 @@ def test_read_subcolumns(started_cluster): node = started_cluster.instances["node1"] node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)" + f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) settings hdfs_truncate_on_insert=1" ) node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)" + f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) settings hdfs_truncate_on_insert=1" ) res = node.query( @@ -1003,7 +1031,7 @@ def test_read_subcolumn_time(started_cluster): node = started_cluster.instances["node1"] node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumn_time.tsv', auto, 'a UInt32') select (42)" + f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumn_time.tsv', auto, 'a UInt32') select (42) settings hdfs_truncate_on_insert=1" ) res = node.query( @@ -1014,91 +1042,101 @@ def test_read_subcolumn_time(started_cluster): def test_union_schema_inference_mode(started_cluster): + id = uuid.uuid4() + fs = HdfsClient(hosts=started_cluster.hdfs_ip) + + dir = f"union_{id}" + fs.mkdirs(f"/{dir}/", permission=777) + node = started_cluster.instances["node1"] node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference1.jsonl') select 1 as a" + f"insert into function hdfs('hdfs://hdfs1:9000/{dir}/test_union_schema_inference1.jsonl') select 1 as a" ) node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') select 2 as b" + f"insert into function hdfs('hdfs://hdfs1:9000/{dir}/test_union_schema_inference2.jsonl') select 2 as b" ) node.query("system drop schema cache for hdfs") result = node.query( - "desc hdfs('hdfs://hdfs1:9000/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert result == "a\tNullable(Int64)\nb\tNullable(Int64)\n" result = node.query( - "select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache where source like '%test_union_schema_inference%' order by file format TSV" + f"select schema_inference_mode, splitByChar('/', source)[-1] as file, schema from system.schema_inference_cache where source like '%test_union_schema_inference%' order by file format TSV" ) assert ( result == "UNION\ttest_union_schema_inference1.jsonl\ta Nullable(Int64)\n" "UNION\ttest_union_schema_inference2.jsonl\tb Nullable(Int64)\n" ) result = node.query( - "select * from hdfs('hdfs://hdfs1:9000/test_union_schema_inference*.jsonl') order by tuple(*) settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_union_schema_inference*.jsonl') order by tuple(*) settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert result == "1\t\\N\n" "\\N\t2\n" node.query(f"system drop schema cache for hdfs") result = node.query( - "desc hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_union_schema_inference2.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert result == "b\tNullable(Int64)\n" result = node.query( - "desc hdfs('hdfs://hdfs1:9000/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert result == "a\tNullable(Int64)\n" "b\tNullable(Int64)\n" node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference3.jsonl', TSV) select 'Error'" + f"insert into function hdfs('hdfs://hdfs1:9000/{dir}/test_union_schema_inference3.jsonl', TSV) select 'Error'" ) error = node.query_and_get_error( - "desc hdfs('hdfs://hdfs1:9000/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" + f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_union_schema_inference*.jsonl') settings schema_inference_mode='union', describe_compact_output=1 format TSV" ) assert "CANNOT_EXTRACT_TABLE_STRUCTURE" in error def test_format_detection(started_cluster): node = started_cluster.instances["node1"] + fs = HdfsClient(hosts=started_cluster.hdfs_ip) + id = uuid.uuid4() + dir = f"{id}" + fs.mkdirs(f"/{dir}/", permission=777) node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection0', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(0)" + f"insert into function hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection0', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(0)" ) node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(10)" + f"insert into function hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(10)" ) expected_desc_result = node.query( - "desc hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow)" + f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1', JSONEachRow)" ) - desc_result = node.query("desc hdfs('hdfs://hdfs1:9000/test_format_detection1')") + desc_result = node.query(f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1')") assert expected_desc_result == desc_result expected_result = node.query( - "select * from hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow, 'x UInt64, y String') order by x, y" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1', JSONEachRow, 'x UInt64, y String') order by x, y" ) result = node.query( - "select * from hdfs('hdfs://hdfs1:9000/test_format_detection1') order by x, y" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1') order by x, y" ) assert expected_result == result result = node.query( - "select * from hdfs('hdfs://hdfs1:9000/test_format_detection1', auto, 'x UInt64, y String') order by x, y" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1', auto, 'x UInt64, y String') order by x, y" ) assert expected_result == result result = node.query( - "select * from hdfs('hdfs://hdfs1:9000/test_format_detection{0,1}') order by x, y" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection{{0,1}}') order by x, y" ) assert expected_result == result @@ -1106,25 +1144,25 @@ def test_format_detection(started_cluster): node.query("system drop schema cache for hdfs") result = node.query( - "select * from hdfs('hdfs://hdfs1:9000/test_format_detection{0,1}') order by x, y" + f"select * from hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection{{0,1}}') order by x, y" ) assert expected_result == result result = node.query( - "select * from hdfsCluster(test_cluster_two_shards, 'hdfs://hdfs1:9000/test_format_detection{0,1}') order by x, y" + f"select * from hdfsCluster(test_cluster_two_shards, 'hdfs://hdfs1:9000/{dir}/test_format_detection{{0,1}}') order by x, y" ) assert expected_result == result result = node.query( - "select * from hdfsCluster(test_cluster_two_shards, 'hdfs://hdfs1:9000/test_format_detection{0,1}', auto, auto) order by x, y" + f"select * from hdfsCluster(test_cluster_two_shards, 'hdfs://hdfs1:9000/{dir}/test_format_detection{{0,1}}', auto, auto) order by x, y" ) assert expected_result == result result = node.query( - "select * from hdfsCluster(test_cluster_two_shards, 'hdfs://hdfs1:9000/test_format_detection{0,1}', auto, 'x UInt64, y String') order by x, y" + f"select * from hdfsCluster(test_cluster_two_shards, 'hdfs://hdfs1:9000/{dir}/test_format_detection{{0,1}}', auto, 'x UInt64, y String') order by x, y" ) assert expected_result == result From 34c8351bce754740a18388dd623309dfb6030bb0 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Wed, 24 Jul 2024 19:55:21 +0000 Subject: [PATCH 0845/2361] black --- tests/integration/test_storage_hdfs/test.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index aaeb472dd52..4aac0142026 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -100,6 +100,7 @@ def test_read_write_storage_with_globs(started_cluster): node1.query("drop table HDFSStorageWithQuestionMark") node1.query("drop table HDFSStorageWithAsterisk") + def test_storage_with_multidirectory_glob(started_cluster): hdfs_api = started_cluster.hdfs_api for i in ["1", "2"]: @@ -157,6 +158,7 @@ def test_write_table(started_cluster): node1.query("truncate table OtherHDFSStorage") node1.query("drop table OtherHDFSStorage") + def test_bad_hdfs_uri(started_cluster): try: node1.query( @@ -316,6 +318,7 @@ def test_write_gz_storage(started_cluster): node1.query("truncate table GZHDFSStorage") node1.query("drop table GZHDFSStorage") + def test_write_gzip_storage(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -347,6 +350,7 @@ def test_virtual_columns(started_cluster): ) node1.query("drop table virtual_cols") + def test_read_files_with_spaces(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -397,7 +401,9 @@ def test_partition_by(started_cluster): file_name = "test_{_partition_id}" partition_by = "column3" values = "(1, 2, 3), (3, 2, 1), (1, 3, 2)" - table_function = f"hdfs('hdfs://hdfs1:9000/{dir}/{file_name}', 'TSV', '{table_format}')" + table_function = ( + f"hdfs('hdfs://hdfs1:9000/{dir}/{file_name}', 'TSV', '{table_format}')" + ) node1.query( f"insert into table function {table_function} PARTITION BY {partition_by} values {values}" @@ -496,6 +502,7 @@ def test_schema_inference(started_cluster): assert int(result) == 5000000 node1.query(f"drop table schema_inference") + def test_hdfsCluster(started_cluster): hdfs_api = started_cluster.hdfs_api fs = HdfsClient(hosts=started_cluster.hdfs_ip) @@ -590,6 +597,7 @@ def test_multiple_inserts(started_cluster): assert int(result) == 60 node1.query(f"drop table test_multiple_inserts") + def test_format_detection(started_cluster): node1.query( f"create table arrow_table (x UInt64) engine=HDFS('hdfs://hdfs1:9000/data.arrow')" @@ -654,7 +662,7 @@ def test_insert_select_schema_inference(started_cluster): result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test.native.zst')") assert int(result) == 1 - fs.delete('/test.native.zst') + fs.delete("/test.native.zst") def test_cluster_join(started_cluster): @@ -1115,7 +1123,9 @@ def test_format_detection(started_cluster): f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1', JSONEachRow)" ) - desc_result = node.query(f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1')") + desc_result = node.query( + f"desc hdfs('hdfs://hdfs1:9000/{dir}/test_format_detection1')" + ) assert expected_desc_result == desc_result From e4b50c18c2c1918905bf44a8e1183f0cddd5a811 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 24 Jul 2024 22:26:46 +0200 Subject: [PATCH 0846/2361] getauxval: Avoid crash under sanitizer re-exec due to high ASLR entropy --- base/glibc-compatibility/musl/getauxval.c | 38 +++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/base/glibc-compatibility/musl/getauxval.c b/base/glibc-compatibility/musl/getauxval.c index ea5cff9fc11..86f9a546ee4 100644 --- a/base/glibc-compatibility/musl/getauxval.c +++ b/base/glibc-compatibility/musl/getauxval.c @@ -75,6 +75,44 @@ unsigned long NO_SANITIZE_THREAD __getauxval_procfs(unsigned long type) } static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type) { +#if defined(__x86_64__) && defined(__has_feature) +# if __has_feature(memory_sanitizer) || __has_feature(thread_sanitizer) + /// Sanitizers are not compatible with high ASLR entropy, which is the default on modern Linux distributions, and + /// to workaround this limitation, TSAN and MSAN (couldn't see other sanitizers doing the same), re-exec the binary + /// without ASLR (see https://github.com/llvm/llvm-project/commit/0784b1eefa36d4acbb0dacd2d18796e26313b6c5) + + /// The problem we face is that, in order to re-exec, the sanitizer wants to use the original pathname in the call + /// and to get its value it uses getauxval (https://github.com/llvm/llvm-project/blob/20eff684203287828d6722fc860b9d3621429542/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp#L985-L988). + /// Since we provide getauxval ourselves (to minimize the version dependency on runtime glibc), we are the ones + // being called and we fail horribly: + /// + /// ==301455==ERROR: MemorySanitizer: SEGV on unknown address 0x2ffc6d721550 (pc 0x5622c1cc0073 bp 0x000000000003 sp 0x7ffc6d721530 T301455) + /// ==301455==The signal is caused by a WRITE memory access. + /// #0 0x5622c1cc0073 in __auxv_init_procfs ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:129:5 + /// #1 0x5622c1cbffe9 in getauxval ./ClickHouse/base/glibc-compatibility/musl/getauxval.c:240:12 + /// #2 0x5622c0d7bfb4 in __sanitizer::ReExec() crtstuff.c + /// #3 0x5622c0df7bfc in __msan::InitShadowWithReExec(bool) crtstuff.c + /// #4 0x5622c0d95356 in __msan_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x256356) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741) + /// #5 0x5622c0dfe878 in msan.module_ctor main.cc + /// #6 0x5622c1cc156c in __libc_csu_init (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x118256c) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741) + /// #7 0x73dc05dd7ea3 in __libc_start_main /usr/src/debug/glibc/glibc/csu/../csu/libc-start.c:343:6 + /// #8 0x5622c0d6b7cd in _start (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x22c7cd) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741) + + /// The source of the issue above is that, at this point in time during __msan_init, we can't really do much as + /// most global variables aren't initialized or available yet, so we we can't initiate the auxiliar vector. + /// Normal glibc / musl getauxval doesn't have this problem since they initiate their auxval vector at the very + /// start of __libc_start_main (just keeping track of argv+argc+1), but we don't have such option (otherwise + // this complexity of reading "/proc/self/auxv" or using __environ would not be necessary). + + /// To avoid this crashes on the re-exec call (see above how it would fail when creating `aux`, and it we used + /// __auxv_init_environ then it would SIGSEV on READing `__environ`) we capture this call for `AT_EXECFN` and + /// unconditionally return "/proc/self/exe" without any preparation. Theoretically this should be fine in + /// our case, as we don't load any libraries. That's the theory at least. + if (type == AT_EXECFN) + return (unsigned long)"/proc/self/exe"; +# endif +#endif + // For debugging: // - od -t dL /proc/self/auxv // - LD_SHOW_AUX= ls From fda11dc62d81b717b9ab06c8adc8554c827764bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 24 Jul 2024 22:51:26 +0200 Subject: [PATCH 0847/2361] Typo --- base/glibc-compatibility/musl/getauxval.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/glibc-compatibility/musl/getauxval.c b/base/glibc-compatibility/musl/getauxval.c index 86f9a546ee4..b5bd2f114c2 100644 --- a/base/glibc-compatibility/musl/getauxval.c +++ b/base/glibc-compatibility/musl/getauxval.c @@ -99,7 +99,7 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type) /// #8 0x5622c0d6b7cd in _start (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x22c7cd) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741) /// The source of the issue above is that, at this point in time during __msan_init, we can't really do much as - /// most global variables aren't initialized or available yet, so we we can't initiate the auxiliar vector. + /// most global variables aren't initialized or available yet, so we we can't initiate the auxiliary vector. /// Normal glibc / musl getauxval doesn't have this problem since they initiate their auxval vector at the very /// start of __libc_start_main (just keeping track of argv+argc+1), but we don't have such option (otherwise // this complexity of reading "/proc/self/auxv" or using __environ would not be necessary). From c847d2f63fdacf1ce5d636a9af8812d543547cfe Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 24 Jul 2024 20:52:35 +0000 Subject: [PATCH 0848/2361] fix --- src/Interpreters/InterpreterCreateQuery.cpp | 25 ++++++++++++--------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 05df26b0d31..a5f374ba71c 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1282,24 +1282,27 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) TableProperties properties = getTablePropertiesAndNormalizeCreateQuery(create, mode); /// Projection is only supported in (Replictaed)MergeTree. - if (std::string_view engine_name(create.storage->engine->name); - !properties.projections.empty() && engine_name != "MergeTree" && engine_name != "ReplicatedMergeTree") + if (create.storage && create.storage->engine) { - bool projection_support = false; - if (auto * setting = create.storage->settings; setting != nullptr) + if (std::string_view engine_name(create.storage->engine->name); + !properties.projections.empty() && engine_name != "MergeTree" && engine_name != "ReplicatedMergeTree") { - for (const auto & change : setting->changes) + bool projection_support = false; + if (auto * setting = create.storage->settings; setting != nullptr) { - if (change.name == "deduplicate_merge_projection_mode" && change.value != Field("throw")) + for (const auto & change : setting->changes) { - projection_support = true; - break; + if (change.name == "deduplicate_merge_projection_mode" && change.value != Field("throw")) + { + projection_support = true; + break; + } } } + if (!projection_support) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Projection is only supported in (Replictaed)MergeTree. Consider drop or rebuild option of deduplicate_merge_projection_mode."); } - if (!projection_support) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Projection is only supported in (Replictaed)MergeTree. Consider drop or rebuild option of deduplicate_merge_projection_mode."); } /// Check type compatible for materialized dest table and select columns From c418ea4e60bdfcab416951f41928d3c72c490188 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 24 Jul 2024 21:42:17 +0000 Subject: [PATCH 0849/2361] Fix build --- src/Functions/FunctionsJSON.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index 848856c500f..ea485241aab 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -739,7 +739,7 @@ public: { NumberType value; - if (!tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, error)) + if (!tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, true, error)) return false; auto & col_vec = assert_cast &>(dest); col_vec.insertValue(value); From a6a9b8c27204f96e373c9625145dc1609cb7ca8f Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky <43110995+evillique@users.noreply.github.com> Date: Thu, 25 Jul 2024 00:49:28 +0200 Subject: [PATCH 0850/2361] Fix flaky 02447_drop_replica test --- tests/queries/0_stateless/02447_drop_database_replica.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/02447_drop_database_replica.sh b/tests/queries/0_stateless/02447_drop_database_replica.sh index 93a5fcee8e2..c6bf298f944 100755 --- a/tests/queries/0_stateless/02447_drop_database_replica.sh +++ b/tests/queries/0_stateless/02447_drop_database_replica.sh @@ -1,5 +1,9 @@ #!/usr/bin/env bash +# Tags: no-parallel +# no-parallel: This test is not parallel because when we execute system-wide SYSTEM DROP REPLICA, +# other tests might shut down the storage in parallel and the test will fail. + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh From 8df648b3c8bbc22cee9657145b825e9d991e3c8e Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 25 Jul 2024 00:56:41 +0200 Subject: [PATCH 0851/2361] fix a test, add retries for sql tests --- src/Client/ClientBase.cpp | 12 ++++- src/Client/TestHint.cpp | 52 ++++++++++++++++++- src/Client/TestHint.h | 6 +++ .../02446_parent_zero_copy_locks.sql | 14 +++-- 4 files changed, 76 insertions(+), 8 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 13dce05cabc..149e1899ac3 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -2230,6 +2230,8 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) ASTPtr parsed_query; std::unique_ptr current_exception; + size_t retries_count = 0; + while (true) { auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end, @@ -2310,7 +2312,12 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) // Check whether the error (or its absence) matches the test hints // (or their absence). bool error_matches_hint = true; - if (have_error) + bool need_retry = test_hint.needRetry(server_exception, &retries_count); + if (need_retry) + { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + else if (have_error) { if (test_hint.hasServerErrors()) { @@ -2404,7 +2411,8 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) if (have_error && !ignore_error) return is_interactive; - this_query_begin = this_query_end; + if (!need_retry) + this_query_begin = this_query_end; break; } } diff --git a/src/Client/TestHint.cpp b/src/Client/TestHint.cpp index b64882577ee..74c65009a73 100644 --- a/src/Client/TestHint.cpp +++ b/src/Client/TestHint.cpp @@ -10,6 +10,7 @@ namespace DB::ErrorCodes { extern const int CANNOT_PARSE_TEXT; + extern const int OK; } namespace DB @@ -62,9 +63,28 @@ bool TestHint::hasExpectedServerError(int error) return std::find(server_errors.begin(), server_errors.end(), error) != server_errors.end(); } +bool TestHint::needRetry(const std::unique_ptr & server_exception, size_t * retries_counter) +{ + chassert(retries_counter); + if (max_retries <= *retries_counter) + return false; + + ++*retries_counter; + + int error = ErrorCodes::OK; + if (server_exception) + error = server_exception->code(); + + + if (retry_until) + return !hasExpectedServerError(error); /// retry until we get the expected error + else + return hasExpectedServerError(error); /// retry while we have the expected error +} + void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint) { - std::unordered_set commands{"echo", "echoOn", "echoOff"}; + std::unordered_set commands{"echo", "echoOn", "echoOff", "retry"}; std::unordered_set command_errors{ "serverError", @@ -73,6 +93,9 @@ void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint) for (Token token = comment_lexer.nextToken(); !token.isEnd(); token = comment_lexer.nextToken()) { + if (token.type == TokenType::Whitespace) + continue; + String item = String(token.begin, token.end); if (token.type == TokenType::BareWord && commands.contains(item)) { @@ -82,6 +105,30 @@ void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint) echo.emplace(true); if (item == "echoOff") echo.emplace(false); + + if (item == "retry") + { + token = comment_lexer.nextToken(); + while (token.type == TokenType::Whitespace) + token = comment_lexer.nextToken(); + + if (token.type != TokenType::Number) + throw DB::Exception(DB::ErrorCodes::CANNOT_PARSE_TEXT, "Could not parse the number of retries: {}", + std::string_view(token.begin, token.end)); + + max_retries = std::stoul(std::string(token.begin, token.end)); + + token = comment_lexer.nextToken(); + while (token.type == TokenType::Whitespace) + token = comment_lexer.nextToken(); + + if (token.type != TokenType::BareWord || + (std::string_view(token.begin, token.end) != "until" && + std::string_view(token.begin, token.end) != "while")) + throw DB::Exception(DB::ErrorCodes::CANNOT_PARSE_TEXT, "Expected 'until' or 'while' after the number of retries, got: {}", + std::string_view(token.begin, token.end)); + retry_until = std::string_view(token.begin, token.end) == "until"; + } } else if (!is_leading_hint && token.type == TokenType::BareWord && command_errors.contains(item)) { @@ -133,6 +180,9 @@ void TestHint::parse(Lexer & comment_lexer, bool is_leading_hint) break; } } + + if (max_retries && server_errors.size() != 1) + throw DB::Exception(DB::ErrorCodes::CANNOT_PARSE_TEXT, "Expected one serverError after the 'retry N while|until' command"); } } diff --git a/src/Client/TestHint.h b/src/Client/TestHint.h index b76c4245df4..bbe7873c08b 100644 --- a/src/Client/TestHint.h +++ b/src/Client/TestHint.h @@ -6,6 +6,7 @@ #include #include +#include namespace DB @@ -65,12 +66,17 @@ public: bool hasExpectedClientError(int error); bool hasExpectedServerError(int error); + bool needRetry(const std::unique_ptr & server_exception, size_t * retries_counter); + private: const String & query; ErrorVector server_errors{}; ErrorVector client_errors{}; std::optional echo; + size_t max_retries = 0; + bool retry_until = false; + void parse(Lexer & comment_lexer, bool is_leading_hint); bool allErrorsExpected(int actual_server_error, int actual_client_error) const diff --git a/tests/queries/0_stateless/02446_parent_zero_copy_locks.sql b/tests/queries/0_stateless/02446_parent_zero_copy_locks.sql index 86eda526c72..1cae8ae0237 100644 --- a/tests/queries/0_stateless/02446_parent_zero_copy_locks.sql +++ b/tests/queries/0_stateless/02446_parent_zero_copy_locks.sql @@ -7,7 +7,7 @@ create table rmt2 (n int, m int, k int) engine=ReplicatedMergeTree('/test/02446/ settings storage_policy='s3_cache', allow_remote_fs_zero_copy_replication=1, old_parts_lifetime=0, cleanup_delay_period=0, max_cleanup_delay_period=1, cleanup_delay_period_random_add=1, min_bytes_for_wide_part=0; -- FIXME zero-copy locks may remain in ZooKeeper forever if we failed to insert a part. --- Probably that's why we have to replace repsistent lock with ephemeral sometimes. +-- Probably that's why we have to replace persistent lock with ephemeral sometimes. -- See also "Replacing persistent lock with ephemeral for path {}. It can happen only in case of local part loss" -- in StorageReplicatedMergeTree::createZeroCopyLockNode set insert_keeper_fault_injection_probability=0; @@ -23,6 +23,10 @@ select sleepEachRow(0.5) as test_does_not_rely_on_this; insert into rmt1 values(5, 5, 5); alter table rmt2 update m = m * 10 where 1 settings mutations_sync=2; +-- wait for parts to be merged +select throwIf(name = 'all_0_5_1_6') from system.parts where database=currentDatabase() and table like 'rmt%' and active +format Null; -- { retry 30 until serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } + system sync replica rmt2; set optimize_throw_if_noop=1; optimize table rmt2 final; @@ -32,10 +36,10 @@ select 1, * from rmt1 order by n; system sync replica rmt1; select 2, * from rmt2 order by n; --- a funny way to wait for outdated parts to be removed -select sleep(1), sleepEachRow(0.1) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( - 'select *, _state from system.parts where database=''' || currentDatabase() || ''' and table like ''rmt%'' and active=0' - ), 'LineAsString', 's String') settings max_threads=1 format Null; +-- wait for outdated parts to be removed +select throwIf(count() = 0) from ( +select *, _state from system.parts where database=currentDatabase() and table like 'rmt%' and active=0 +) format Null; -- { retry 30 until serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } select *, _state from system.parts where database=currentDatabase() and table like 'rmt%' and active=0; From 23c3fa73266cb9f32c7f30a2c83437815de89291 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 25 Jul 2024 01:03:59 +0000 Subject: [PATCH 0852/2361] fix --- .../01710_aggregate_projection_with_normalized_states.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql index e023c0991b3..5375823aa8e 100644 --- a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql +++ b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql @@ -9,7 +9,8 @@ CREATE TABLE r ( s Int64, PROJECTION p (SELECT a, quantilesTimingMerge(0.5, 0.95, 0.99)(q), sum(s) GROUP BY a) -) Engine=SummingMergeTree order by (x, a); +) Engine=SummingMergeTree order by (x, a) +SETTINGS deduplicate_merge_projection_mode = 'drop'; -- should set it to rebuild once projection is supported with SummingMergeTree insert into r select number%100 x, From fa437b34ec16a0f7fe52f0e4261f964fef2ed606 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 25 Jul 2024 03:16:20 +0200 Subject: [PATCH 0853/2361] Increase timeout for curl in tests --- tests/queries/shell_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index ef2d89f0218..f7017958635 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -132,7 +132,7 @@ export CLICKHOUSE_URL_INTERSERVER=${CLICKHOUSE_URL_INTERSERVER:="${CLICKHOUSE_PO export CLICKHOUSE_CURL_COMMAND=${CLICKHOUSE_CURL_COMMAND:="curl"} # The queries in CI are prone to sudden delays, and we often don't check for curl # errors, so it makes sense to set a relatively generous timeout. -export CLICKHOUSE_CURL_TIMEOUT=${CLICKHOUSE_CURL_TIMEOUT:="60"} +export CLICKHOUSE_CURL_TIMEOUT=${CLICKHOUSE_CURL_TIMEOUT:="120"} export CLICKHOUSE_CURL=${CLICKHOUSE_CURL:="${CLICKHOUSE_CURL_COMMAND} -q -s --max-time ${CLICKHOUSE_CURL_TIMEOUT}"} export CLICKHOUSE_TMP=${CLICKHOUSE_TMP:="."} mkdir -p ${CLICKHOUSE_TMP} From 82959ce5b3e3709f12cb5cf8d50f8ca81858c7ed Mon Sep 17 00:00:00 2001 From: zoomxi <419486879@qq.com> Date: Thu, 25 Jul 2024 09:55:23 +0800 Subject: [PATCH 0854/2361] format test.py --- tests/integration/test_parallel_replicas_no_replicas/test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_parallel_replicas_no_replicas/test.py b/tests/integration/test_parallel_replicas_no_replicas/test.py index 04e3a54e581..62d4b005d94 100644 --- a/tests/integration/test_parallel_replicas_no_replicas/test.py +++ b/tests/integration/test_parallel_replicas_no_replicas/test.py @@ -34,7 +34,9 @@ def create_tables(cluster, table_name): @pytest.mark.parametrize("skip_unavailable_shards", [1, 0]) @pytest.mark.parametrize("max_parallel_replicas", [2, 3, 100]) -def test_skip_all_replicas(start_cluster, skip_unavailable_shards, max_parallel_replicas): +def test_skip_all_replicas( + start_cluster, skip_unavailable_shards, max_parallel_replicas +): cluster_name = "test_1_shard_3_unavaliable_replicas" table_name = "tt" create_tables(cluster_name, table_name) From 36e59a1b7083f646b5509e6ef379d1c24f23ad0b Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 25 Jul 2024 01:58:27 +0000 Subject: [PATCH 0855/2361] Final commit hash --- contrib/libunwind | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libunwind b/contrib/libunwind index 9b1f47ad8a6..a89d904befe 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit 9b1f47ad8a6fcecbeaaead93bd87756ccf658071 +Subproject commit a89d904befea07814628c6ce0b44083c4e149c62 From f239dd67eec9cded889d8aee9e4895da5745541f Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Wed, 24 Jul 2024 03:30:04 +0200 Subject: [PATCH 0856/2361] Fix --- src/Databases/DatabaseReplicated.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 4c079ae5300..706d91d6c9e 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -641,7 +641,10 @@ LoadTaskPtr DatabaseReplicated::startupDatabaseAsync(AsyncLoader & async_loader, if (is_probably_dropped) return; - ddl_worker = std::make_unique(this, getContext()); + { + std::lock_guard lock{mutex}; + ddl_worker = std::make_unique(this, getContext()); + } ddl_worker->startup(); ddl_worker_initialized = true; }); From aa9908f6da6ee7bf932a649c2884615856b408bb Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Wed, 24 Jul 2024 04:33:47 +0200 Subject: [PATCH 0857/2361] Fix harder --- src/Databases/DatabaseReplicated.cpp | 2 +- src/Databases/DatabaseReplicated.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 706d91d6c9e..dd524e305a1 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -642,7 +642,7 @@ LoadTaskPtr DatabaseReplicated::startupDatabaseAsync(AsyncLoader & async_loader, return; { - std::lock_guard lock{mutex}; + std::lock_guard lock{ddl_worker_mutex}; ddl_worker = std::make_unique(this, getContext()); } ddl_worker->startup(); diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index 8c3fa7c87f6..9edba0eeb2b 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -149,6 +149,7 @@ private: std::atomic_bool is_recovering = false; std::atomic_bool ddl_worker_initialized = false; std::unique_ptr ddl_worker; + std::mutex ddl_worker_mutex; UInt32 max_log_ptr_at_creation = 0; /// Usually operation with metadata are single-threaded because of the way replication works, From cea3bbc45b530742ea5d0374093278494ec5a9b8 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Wed, 24 Jul 2024 07:06:08 +0200 Subject: [PATCH 0858/2361] More checks --- src/Databases/DatabaseReplicated.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index dd524e305a1..97a9b65f6b4 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -559,7 +559,7 @@ void DatabaseReplicated::createEmptyLogEntry(const ZooKeeperPtr & current_zookee bool DatabaseReplicated::waitForReplicaToProcessAllEntries(UInt64 timeout_ms) { - if (!ddl_worker || is_probably_dropped) + if (!ddl_worker_initialized || !ddl_worker || is_probably_dropped) return false; return ddl_worker->waitForReplicaToProcessAllEntries(timeout_ms); } @@ -686,7 +686,7 @@ bool DatabaseReplicated::checkDigestValid(const ContextPtr & local_context, bool LOG_TEST(log, "Current in-memory metadata digest: {}", tables_metadata_digest); /// Database is probably being dropped - if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive())) + if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker_initialized || !ddl_worker || !ddl_worker->isCurrentlyActive())) return true; UInt64 local_digest = 0; @@ -1414,7 +1414,7 @@ void DatabaseReplicated::renameDatabase(ContextPtr query_context, const String & void DatabaseReplicated::stopReplication() { - if (ddl_worker) + if (ddl_worker_initialized && ddl_worker) ddl_worker->shutdown(); } From 80b9b13771bb08a1d8d713aabd55ac0471292b1a Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Wed, 24 Jul 2024 07:15:35 +0200 Subject: [PATCH 0859/2361] Less checks, more locks --- src/Databases/DatabaseReplicated.cpp | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 97a9b65f6b4..c90f4ea4719 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -559,8 +559,11 @@ void DatabaseReplicated::createEmptyLogEntry(const ZooKeeperPtr & current_zookee bool DatabaseReplicated::waitForReplicaToProcessAllEntries(UInt64 timeout_ms) { - if (!ddl_worker_initialized || !ddl_worker || is_probably_dropped) - return false; + { + std::lock_guard lock{ddl_worker_mutex}; + if (!ddl_worker || is_probably_dropped) + return false; + } return ddl_worker->waitForReplicaToProcessAllEntries(timeout_ms); } @@ -686,8 +689,11 @@ bool DatabaseReplicated::checkDigestValid(const ContextPtr & local_context, bool LOG_TEST(log, "Current in-memory metadata digest: {}", tables_metadata_digest); /// Database is probably being dropped - if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker_initialized || !ddl_worker || !ddl_worker->isCurrentlyActive())) - return true; + { + std::lock_guard lock{ddl_worker_mutex}; + if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive())) + return true; + } UInt64 local_digest = 0; { @@ -1414,7 +1420,8 @@ void DatabaseReplicated::renameDatabase(ContextPtr query_context, const String & void DatabaseReplicated::stopReplication() { - if (ddl_worker_initialized && ddl_worker) + std::lock_guard lock{ddl_worker_mutex}; + if (ddl_worker) ddl_worker->shutdown(); } From e21d23d04daf2577db63e26cee22bc686e10833e Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Wed, 24 Jul 2024 07:55:36 +0200 Subject: [PATCH 0860/2361] Less locks --- src/Databases/DatabaseReplicated.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index c90f4ea4719..c737ece02ec 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -689,11 +689,8 @@ bool DatabaseReplicated::checkDigestValid(const ContextPtr & local_context, bool LOG_TEST(log, "Current in-memory metadata digest: {}", tables_metadata_digest); /// Database is probably being dropped - { - std::lock_guard lock{ddl_worker_mutex}; - if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive())) - return true; - } + if (!local_context->getZooKeeperMetadataTransaction() && (!ddl_worker || !ddl_worker->isCurrentlyActive())) + return true; UInt64 local_digest = 0; { From 7612060d232a24dbd721597c8e33cd1f556cddd6 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 25 Jul 2024 06:40:51 +0000 Subject: [PATCH 0861/2361] allow only equal types in lagInFrame and leadInFrame --- src/Processors/Transforms/WindowTransform.cpp | 15 ++------------ .../03210_lag_lead_inframe_types.reference | 20 +++++++++++++++++++ .../03210_lag_lead_inframe_types.sql | 4 ++++ 3 files changed, 26 insertions(+), 13 deletions(-) create mode 100644 tests/queries/0_stateless/03210_lag_lead_inframe_types.reference create mode 100644 tests/queries/0_stateless/03210_lag_lead_inframe_types.sql diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 86421adf4fb..06ae2bfb25e 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -2385,22 +2385,11 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction return; } - const auto supertype = getLeastSupertype(DataTypes{argument_types[0], argument_types[2]}); - if (!supertype) - { + if (!argument_types[0]->equals(*argument_types[2])) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "There is no supertype for the argument type '{}' and the default value type '{}'", + "Argument type '{}' and the default value type '{}' are different", argument_types[0]->getName(), argument_types[2]->getName()); - } - if (!argument_types[0]->equals(*supertype)) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The supertype '{}' for the argument type '{}' and the default value type '{}' is not the same as the argument type", - supertype->getName(), - argument_types[0]->getName(), - argument_types[2]->getName()); - } if (argument_types.size() > 3) { diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference new file mode 100644 index 00000000000..cc3b9a096b9 --- /dev/null +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference @@ -0,0 +1,20 @@ +0 +1 +2 +2 +2 +2 +2 +2 +2 +2 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql new file mode 100644 index 00000000000..5466cfe0fad --- /dev/null +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql @@ -0,0 +1,4 @@ +SELECT lagInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); -- { serverError BAD_ARGUMENTS } +SELECT leadInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); -- { serverError BAD_ARGUMENTS } +SELECT lagInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); +SELECT leadInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); From c6a643f981505d0293358c912723f1aece480c7c Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Thu, 25 Jul 2024 10:31:38 +0200 Subject: [PATCH 0862/2361] Update tests/queries/0_stateless/02992_all_columns_should_have_comment.sql --- .../0_stateless/02992_all_columns_should_have_comment.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql b/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql index 0d34b033354..ad056384bfd 100644 --- a/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql +++ b/tests/queries/0_stateless/02992_all_columns_should_have_comment.sql @@ -3,6 +3,6 @@ SELECT 'Column ' || name || ' from table ' || concat(database, '.', table) || ' FROM system.columns WHERE (database = 'system') AND (comment = '') AND - (table NOT ILIKE '%\_log\_%') AND + (table NOT ILIKE '%log%') AND (table NOT IN ('numbers', 'numbers_mt', 'one', 'generate_series', 'generateSeries', 'coverage_log', 'filesystem_read_prefetches_log')) AND (default_kind != 'ALIAS'); From b5171df7798323761b366f01d401c0559ff4c736 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Thu, 25 Jul 2024 10:32:52 +0200 Subject: [PATCH 0863/2361] Update test 03198_table_function_directory_path.sql --- .../0_stateless/03198_table_function_directory_path.reference | 1 + .../queries/0_stateless/03198_table_function_directory_path.sql | 2 ++ 2 files changed, 3 insertions(+) diff --git a/tests/queries/0_stateless/03198_table_function_directory_path.reference b/tests/queries/0_stateless/03198_table_function_directory_path.reference index 19920de3d3c..74cd8c6d31f 100644 --- a/tests/queries/0_stateless/03198_table_function_directory_path.reference +++ b/tests/queries/0_stateless/03198_table_function_directory_path.reference @@ -1,3 +1,4 @@ 2 2 1 +1 diff --git a/tests/queries/0_stateless/03198_table_function_directory_path.sql b/tests/queries/0_stateless/03198_table_function_directory_path.sql index 9e2791847af..90f687ed6a3 100644 --- a/tests/queries/0_stateless/03198_table_function_directory_path.sql +++ b/tests/queries/0_stateless/03198_table_function_directory_path.sql @@ -1,5 +1,6 @@ -- Tags: no-parallel +INSERT INTO FUNCTION file('data_03198_table_function_directory_path.csv', 'csv') SELECT '1.csv' SETTINGS engine_file_truncate_on_insert=1; INSERT INTO FUNCTION file('data_03198_table_function_directory_path/1.csv', 'csv') SELECT '1.csv' SETTINGS engine_file_truncate_on_insert=1; INSERT INTO FUNCTION file('data_03198_table_function_directory_path/2.csv', 'csv') SELECT '2.csv' SETTINGS engine_file_truncate_on_insert=1; INSERT INTO FUNCTION file('data_03198_table_function_directory_path/dir/3.csv', 'csv') SELECT '3.csv' SETTINGS engine_file_truncate_on_insert=1; @@ -11,3 +12,4 @@ SELECT COUNT(*) FROM file('data_03198_table_function_directory_path/'); SELECT COUNT(*) FROM file('data_03198_table_function_directory_path/dir'); SELECT COUNT(*) FROM file('data_03198_table_function_directory_path/*/dir', 'csv'); -- { serverError 74, 636 } SELECT COUNT(*) FROM file('data_03198_table_function_directory_pat'); -- { serverError 400 } +SELECT COUNT(*) FROM file('data_03198_table_function_directory_path.csv'); From 60f529f667069c15fa49296ac1f59a33d94d3f31 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 25 Jul 2024 11:06:00 +0200 Subject: [PATCH 0864/2361] The most precise way of tracking flushing time in 01246_buffer_flush Right now there are couple of issues with the test: - it does not takes into account INSERT time - it does not takes into account SELECT time, which can be significant from time to time, for instance here [1] it takes 3.3 seconds (and due to tsan build it is not possible to find out why) 2024.07.23 20:52:18.238844 [ 13045 ] {d903650b-ab87-44f3-b7c3-4145e02f1301} executeQuery: (from [::1]:39430) (comment: 01246_buffer_flush.sh) select count() from data_01256; (stage: Complete) 2024.07.23 20:52:21.588183 [ 13045 ] {d903650b-ab87-44f3-b7c3-4145e02f1301} TCPHandler: Processed in 3.354887498 sec. [1]: https://s3.amazonaws.com/clickhouse-test-reports/66934/919005c4f70b044ecd9cc1bbce5dc5e276e11929/stateless_tests__tsan__s3_storage__[4_4].html Anyway all of this can be fixed by using QueryStart-insert into data table time. Signed-off-by: Azat Khuzhin --- .../queries/0_stateless/01246_buffer_flush.sh | 49 ++++++++++++------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/tests/queries/0_stateless/01246_buffer_flush.sh b/tests/queries/0_stateless/01246_buffer_flush.sh index 1ca953c80d9..27c3f01f216 100755 --- a/tests/queries/0_stateless/01246_buffer_flush.sh +++ b/tests/queries/0_stateless/01246_buffer_flush.sh @@ -5,59 +5,72 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -function elapsed_sec() +set -e + +function wait_until() { local expr=$1 && shift - local start end - start=$(date +%s.%N) while ! eval "$expr"; do sleep 0.5 done - end=$(date +%s.%N) - $CLICKHOUSE_LOCAL -q "select floor($end-$start)" +} +function get_buffer_delay() +{ + local buffer_insert_id=$1 && shift + $CLICKHOUSE_CLIENT -nm -q " + SYSTEM FLUSH LOGS; + WITH + (SELECT event_time_microseconds FROM system.query_log WHERE current_database = currentDatabase() AND type = 'QueryStart' AND query_id = '$buffer_insert_id') AS begin_, + (SELECT max(event_time) FROM data_01256) AS end_ + SELECT dateDiff('seconds', begin_, end_)::UInt64; + " } $CLICKHOUSE_CLIENT -nm -q " drop table if exists data_01256; drop table if exists buffer_01256; - create table data_01256 as system.numbers Engine=Memory(); + create table data_01256 (key UInt64, event_time DateTime(6) MATERIALIZED now64(6)) Engine=Memory(); " echo "min" -$CLICKHOUSE_CLIENT -nm -q " - create table buffer_01256 as system.numbers Engine=Buffer(currentDatabase(), data_01256, 1, +$CLICKHOUSE_CLIENT -q " + create table buffer_01256 (key UInt64) Engine=Buffer(currentDatabase(), data_01256, 1, 2, 100, /* time */ 4, 100, /* rows */ 1, 1e6 /* bytes */ - ); - insert into buffer_01256 select * from system.numbers limit 5; - select count() from data_01256; + ) " -sec=$(elapsed_sec '[[ $($CLICKHOUSE_CLIENT -q "select count() from data_01256") -eq 5 ]]') +min_query_id=$(random_str 10) +$CLICKHOUSE_CLIENT --query_id="$min_query_id" -q "insert into buffer_01256 select * from system.numbers limit 5" +$CLICKHOUSE_CLIENT -q "select count() from data_01256" +wait_until '[[ $($CLICKHOUSE_CLIENT -q "select count() from data_01256") -eq 5 ]]' +sec=$(get_buffer_delay "$min_query_id") [[ $sec -ge 2 ]] || echo "Buffer flushed too early, min_time=2, flushed after $sec sec" [[ $sec -lt 100 ]] || echo "Buffer flushed too late, max_time=100, flushed after $sec sec" $CLICKHOUSE_CLIENT -q "select count() from data_01256" $CLICKHOUSE_CLIENT -q "drop table buffer_01256" echo "max" -$CLICKHOUSE_CLIENT -nm -q " - create table buffer_01256 as system.numbers Engine=Buffer(currentDatabase(), data_01256, 1, +$CLICKHOUSE_CLIENT -q " + create table buffer_01256 (key UInt64) Engine=Buffer(currentDatabase(), data_01256, 1, 100, 2, /* time */ 0, 100, /* rows */ 0, 1e6 /* bytes */ ); - insert into buffer_01256 select * from system.numbers limit 5; - select count() from data_01256; " -sec=$(elapsed_sec '[[ $($CLICKHOUSE_CLIENT -q "select count() from data_01256") -eq 10 ]]') +max_query_id=$(random_str 10) +$CLICKHOUSE_CLIENT --query_id="$max_query_id" -q "insert into buffer_01256 select * from system.numbers limit 5" +$CLICKHOUSE_CLIENT -q "select count() from data_01256" +wait_until '[[ $($CLICKHOUSE_CLIENT -q "select count() from data_01256") -eq 10 ]]' +sec=$(get_buffer_delay "$max_query_id") [[ $sec -ge 2 ]] || echo "Buffer flushed too early, max_time=2, flushed after $sec sec" $CLICKHOUSE_CLIENT -q "select count() from data_01256" $CLICKHOUSE_CLIENT -q "drop table buffer_01256" echo "direct" $CLICKHOUSE_CLIENT -nm -q " - create table buffer_01256 as system.numbers Engine=Buffer(currentDatabase(), data_01256, 1, + create table buffer_01256 (key UInt64) Engine=Buffer(currentDatabase(), data_01256, 1, 100, 100, /* time */ 0, 9, /* rows */ 0, 1e6 /* bytes */ From c2f85c6fd062dde095ee34178450dc94c245e691 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 25 Jul 2024 17:43:02 +0800 Subject: [PATCH 0865/2361] support map type as first argument type --- .../functions/tuple-map-functions.md | 2 +- src/Functions/map.cpp | 131 +++++++++++------- .../0_stateless/01651_map_functions.reference | 3 + .../0_stateless/01651_map_functions.sql | 9 +- 4 files changed, 89 insertions(+), 56 deletions(-) diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index 24b356eca87..ae23387f6e5 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -62,7 +62,7 @@ Alias: `MAP_FROM_ARRAYS(keys, values)` **Arguments** -- `keys` — Array of keys to create the map from. [Array(T)](../data-types/array.md) where `T` can be any type supported by [Map](../data-types/map.md) as key type. +- `keys` — Array or map of keys to create the map from. [Array(T)](../data-types/array.md) where `T` can be any type supported by [Map](../data-types/map.md) as key type. - `values` - Array or map of values to create the map from. [Array](../data-types/array.md) or [Map](../data-types/map.md). **Returned value** diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index 66cd10a3f0b..5319390fb70 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -1,14 +1,17 @@ -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include +#include #include +#include #include @@ -178,22 +181,28 @@ public: getName(), arguments.size()); - /// The first argument should always be Array. - /// Because key type can not be nested type of Map, which is Tuple - DataTypePtr key_type; - if (const auto * keys_type = checkAndGetDataType(arguments[0].get())) - key_type = keys_type->getNestedType(); - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be an Array", getName()); + auto get_nested_type = [this](const DataTypePtr & type) -> DataTypePtr + { + DataTypePtr nested; + if (const auto * array_type = checkAndGetDataType(type.get())) + nested = array_type->getNestedType(); + else if (const auto * map_type = checkAndGetDataType(type.get())) + nested = std::make_shared(map_type->getKeyValueTypes()); + else + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Argument types of function {} must be Array or Map, but {} is given", + getName(), + type->getName()); - DataTypePtr value_type; - if (const auto * value_array_type = checkAndGetDataType(arguments[1].get())) - value_type = value_array_type->getNestedType(); - else if (const auto * value_map_type = checkAndGetDataType(arguments[1].get())) - value_type = std::make_shared(value_map_type->getKeyValueTypes()); - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Second argument for function {} must be Array or Map", getName()); + return nested; + }; + auto key_type = get_nested_type(arguments[0]); + auto value_type = get_nested_type(arguments[1]); + + /// Remove Nullable from key_type if needed for map key must not be Nullable + key_type = removeNullableOrLowCardinalityNullable(key_type); DataTypes key_value_types{key_type, value_type}; return std::make_shared(key_value_types); } @@ -201,44 +210,62 @@ public: ColumnPtr executeImpl( const ColumnsWithTypeAndName & arguments, const DataTypePtr & /* result_type */, size_t /* input_rows_count */) const override { - bool is_keys_const = isColumnConst(*arguments[0].column); - ColumnPtr holder_keys; - const ColumnArray * col_keys; - if (is_keys_const) + auto get_array_column = [this](const ColumnPtr & column) -> std::pair { - holder_keys = arguments[0].column->convertToFullColumnIfConst(); - col_keys = checkAndGetColumn(holder_keys.get()); - } - else + bool is_const = isColumnConst(*column); + ColumnPtr holder = is_const ? column->convertToFullColumnIfConst() : column; + + const ColumnArray * col_res = nullptr; + if (const auto * col_array = checkAndGetColumn(holder.get())) + col_res = col_array; + else if (const auto * col_map = checkAndGetColumn(holder.get())) + col_res = &col_map->getNestedColumn(); + else + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Argument columns of function {} must be Array or Map, but {} is given", + getName(), + holder->getName()); + + return {col_res, holder}; + }; + + auto [col_keys, key_holder] = get_array_column(arguments[0].column); + + /// Check if nested column of first argument contains NULL value in case its nested type is Nullable(T) type. + ColumnPtr data_keys = col_keys->getDataPtr(); + if (isColumnNullableOrLowCardinalityNullable(*data_keys)) { - col_keys = checkAndGetColumn(arguments[0].column.get()); + std::cout << "data keys is nullable" << std::endl; + const NullMap * null_map = nullptr; + if (const auto * nullable = checkAndGetColumn(data_keys.get())) + { + null_map = &nullable->getNullMapData(); + data_keys = nullable->getNestedColumnPtr(); + } + else if (const auto * low_cardinality = checkAndGetColumn(data_keys.get())) + { + if (const auto * nullable_dict = checkAndGetColumn(low_cardinality->getDictionaryPtr().get())) + { + null_map = &nullable_dict->getNullMapData(); + data_keys = ColumnLowCardinality::create(nullable_dict->getNestedColumnPtr(), low_cardinality->getIndexesPtr()); + } + } + + if (null_map && !memoryIsZero(null_map->data(), 0, null_map->size())) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, "The nested column of first argument in function {} must not contain NULLs", getName()); } - if (!col_keys) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "The first argument of function {} must be Array", getName()); - - bool is_values_const = isColumnConst(*arguments[1].column); - ColumnPtr holder_values; - if (is_values_const) - holder_values = arguments[1].column->convertToFullColumnIfConst(); - else - holder_values = arguments[1].column; - - const ColumnArray * col_values; - if (const auto * col_values_array = checkAndGetColumn(holder_values.get())) - col_values = col_values_array; - else if (const auto * col_values_map = checkAndGetColumn(holder_values.get())) - col_values = &col_values_map->getNestedColumn(); - else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "The second arguments of function {} must be Array or Map", getName()); - + auto [col_values, values_holder] = get_array_column(arguments[1].column); if (!col_keys->hasEqualOffsets(*col_values)) - throw Exception(ErrorCodes::SIZES_OF_ARRAYS_DONT_MATCH, "Two arguments for function {} must have equal sizes", getName()); + throw Exception(ErrorCodes::SIZES_OF_ARRAYS_DONT_MATCH, "Two arguments of function {} must have equal sizes", getName()); - const auto & data_keys = col_keys->getDataPtr(); const auto & data_values = col_values->getDataPtr(); const auto & offsets = col_keys->getOffsetsPtr(); - auto nested_column = ColumnArray::create(ColumnTuple::create(Columns{data_keys, data_values}), offsets); + std::cout << "before create array:" << "offsets:" << offsets->getName() << std::endl; + auto nested_column = ColumnArray::create(ColumnTuple::create(Columns{std::move(data_keys), data_values}), offsets); + std::cout << "after create array:" << "offsets:" << offsets->getName() << std::endl; return ColumnMap::create(nested_column); } }; diff --git a/tests/queries/0_stateless/01651_map_functions.reference b/tests/queries/0_stateless/01651_map_functions.reference index 471da5586b7..9114aa419b1 100644 --- a/tests/queries/0_stateless/01651_map_functions.reference +++ b/tests/queries/0_stateless/01651_map_functions.reference @@ -52,3 +52,6 @@ {1:4,2:5} {1:4,2:5} {1:4,2:5} +{1:3,2:4} +{1:3,2:4} +{1:3,2:4} {(1,3):'a',(2,4):'b'} diff --git a/tests/queries/0_stateless/01651_map_functions.sql b/tests/queries/0_stateless/01651_map_functions.sql index cf2460fce2c..4604ddd6db1 100644 --- a/tests/queries/0_stateless/01651_map_functions.sql +++ b/tests/queries/0_stateless/01651_map_functions.sql @@ -67,12 +67,15 @@ select mapFromArrays(['aa', 'bb'], [4, 5, 6]); -- { serverError SIZES_OF_ARRAYS_ select mapFromArrays([[1,2], [3,4]], [4, 5, 6]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } select mapFromArrays(['a', 2], [4, 5]); -- { serverError NO_COMMON_TYPE} select mapFromArrays([1, 2], [4, 'a']); -- { serverError NO_COMMON_TYPE} +select mapFromArrays(['aa', 'bb'], map('a', 4)); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +select mapFromArrays([1,null]::Array(Nullable(UInt8)), [3,4]); -- { serverError ILLEGAL_COLUMN } select mapFromArrays(['aa', 'bb'], map('a', 4, 'b', 5)); select mapFromArrays(['aa', 'bb'], materialize(map('a', 4, 'b', 5))) from numbers(2); -select mapFromArrays(map('a', 4, 'b', 4), ['aa', 'bb']) from numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -select mapFromArrays(['aa', 'bb'], map('a', 4)); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } - select mapFromArrays([toLowCardinality(1), toLowCardinality(2)], [4, 5]); select mapFromArrays([toLowCardinality(1), toLowCardinality(2)], materialize([4, 5])) from numbers(2); + +select mapFromArrays([1,2], [3,4]); +select mapFromArrays([1,2]::Array(Nullable(UInt8)), [3,4]); +select mapFromArrays([1,2], [3,4]) as x, mapFromArrays(x, ['a', 'b']); From 65573871485c2e8ca45d791551856fd2f8622cf9 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 25 Jul 2024 11:05:36 +0200 Subject: [PATCH 0866/2361] Try calcualting memory with numactl if membind used --- .gitmodules | 3 + base/base/CMakeLists.txt | 4 ++ base/base/getMemoryAmount.cpp | 26 +++++++- contrib/CMakeLists.txt | 2 + contrib/numactl | 1 + contrib/numactl-cmake/CMakeLists.txt | 20 +++++++ contrib/numactl-cmake/include/config.h | 82 ++++++++++++++++++++++++++ programs/server/Server.cpp | 27 +++++++++ src/Common/config.h.in | 1 + src/configure_config.cmake | 3 + 10 files changed, 168 insertions(+), 1 deletion(-) create mode 160000 contrib/numactl create mode 100644 contrib/numactl-cmake/CMakeLists.txt create mode 100644 contrib/numactl-cmake/include/config.h diff --git a/.gitmodules b/.gitmodules index 12d865307d8..b5d7e1e56b3 100644 --- a/.gitmodules +++ b/.gitmodules @@ -372,3 +372,6 @@ [submodule "contrib/double-conversion"] path = contrib/double-conversion url = https://github.com/ClickHouse/double-conversion.git +[submodule "contrib/numactl"] + path = contrib/numactl + url = https://github.com/numactl/numactl.git diff --git a/base/base/CMakeLists.txt b/base/base/CMakeLists.txt index 159502c9735..451a6eb5e8b 100644 --- a/base/base/CMakeLists.txt +++ b/base/base/CMakeLists.txt @@ -46,6 +46,10 @@ if (TARGET ch_contrib::crc32_s390x) target_link_libraries(common PUBLIC ch_contrib::crc32_s390x) endif() +if (TARGET ch_contrib::numactl) + target_link_libraries(common PUBLIC ch_contrib::numactl) +endif() + target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..") target_link_libraries (common diff --git a/base/base/getMemoryAmount.cpp b/base/base/getMemoryAmount.cpp index afdb6ba068a..b8162146496 100644 --- a/base/base/getMemoryAmount.cpp +++ b/base/base/getMemoryAmount.cpp @@ -4,12 +4,17 @@ #include #include -#include #include #include #include +#include "config.h" + +#if USE_NUMACTL +#include +#endif + namespace { @@ -63,6 +68,25 @@ uint64_t getMemoryAmountOrZero() uint64_t memory_amount = num_pages * page_size; +#if USE_NUMACTL + if (numa_available() != -1) + { + auto * membind = numa_get_membind(); + if (!numa_bitmask_equal(membind, numa_all_nodes_ptr)) + { + uint64_t total_numa_memory = 0; + auto max_node = numa_max_node(); + for (int i = 0; i <= max_node; ++i) + { + if (numa_bitmask_isbitset(membind, i)) + total_numa_memory += numa_node_size(i, nullptr); + } + + memory_amount = total_numa_memory; + } + } +#endif + /// Respect the memory limit set by cgroups v2. auto limit_v2 = getCgroupsV2MemoryLimit(); if (limit_v2.has_value() && *limit_v2 < memory_amount) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 90ae5981a21..977efda15ff 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -230,6 +230,8 @@ add_contrib (libssh-cmake libssh) add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo) +add_contrib(numactl-cmake numactl) + # Put all targets defined here and in subdirectories under "contrib/" folders in GUI-based IDEs. # Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear # in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually, diff --git a/contrib/numactl b/contrib/numactl new file mode 160000 index 00000000000..3871b1c42fc --- /dev/null +++ b/contrib/numactl @@ -0,0 +1 @@ +Subproject commit 3871b1c42fc71bceadafd745d2eff5dddfc2d67e diff --git a/contrib/numactl-cmake/CMakeLists.txt b/contrib/numactl-cmake/CMakeLists.txt new file mode 100644 index 00000000000..5d086366c7f --- /dev/null +++ b/contrib/numactl-cmake/CMakeLists.txt @@ -0,0 +1,20 @@ +option (ENABLE_NUMACTL "Enable numactl" ${ENABLE_LIBRARIES}) + +if (NOT ENABLE_NUMACTL) + message (STATUS "Not using numactl") + return() +endif () + +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/numactl") + +set (SRCS + "${LIBRARY_DIR}/libnuma.c" + "${LIBRARY_DIR}/syscall.c" +) + +add_library(_numactl ${SRCS}) + +target_include_directories(_numactl SYSTEM PRIVATE include) +target_include_directories(_numactl SYSTEM PUBLIC "${LIBRARY_DIR}") + +add_library(ch_contrib::numactl ALIAS _numactl) diff --git a/contrib/numactl-cmake/include/config.h b/contrib/numactl-cmake/include/config.h new file mode 100644 index 00000000000..a304db38e53 --- /dev/null +++ b/contrib/numactl-cmake/include/config.h @@ -0,0 +1,82 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Checking for symver attribute */ +#define HAVE_ATTRIBUTE_SYMVER 0 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDIO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "numactl" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "numactl" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "numactl 2.1" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "numactl" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "2.1" + +/* Define to 1 if all of the C89 standard headers exist (not just the ones + required in a freestanding environment). This macro is provided for + backward compatibility; new code need not use it. */ +#define STDC_HEADERS 1 + +/* If the compiler supports a TLS storage class define it to that here */ +#define TLS __thread + +/* Version number of package */ +#define VERSION "2.1" + +/* Number of bits in a file offset, on hosts where this is settable. */ +/* #undef _FILE_OFFSET_BITS */ + +/* Define to 1 on platforms where this makes off_t a 64-bit type. */ +/* #undef _LARGE_FILES */ + +/* Number of bits in time_t, on hosts where this is settable. */ +/* #undef _TIME_BITS */ + +/* Define to 1 on platforms where this makes time_t a 64-bit type. */ +/* #undef __MINGW_USE_VC2005_COMPAT */ diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 16888015f8b..619a72ff200 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -140,6 +140,11 @@ # include #endif +#if USE_NUMACTL +#include +#endif + + #include /// A minimal file used when the server is run without installation INCBIN(resource_embedded_xml, SOURCE_DIR "/programs/server/embedded.xml"); @@ -754,6 +759,28 @@ try setenv("OPENSSL_CONF", config_dir.c_str(), true); /// NOLINT } +#if USE_NUMACTL + if (numa_available() != -1) + { + auto * membind = numa_get_membind(); + if (!numa_bitmask_equal(membind, numa_all_nodes_ptr)) + { + uint64_t total_numa_memory = 0; + auto max_node = numa_max_node(); + for (int i = 0; i <= max_node; ++i) + { + if (numa_bitmask_isbitset(membind, i)) + total_numa_memory += numa_node_size(i, nullptr); + } + + LOG_INFO( + log, + "ClickHouse is bound to a subset of NUMA nodes. Total memory of all available nodes {}", + ReadableSize(total_numa_memory)); + } + } +#endif + registerInterpreters(); registerFunctions(); registerAggregateFunctions(); diff --git a/src/Common/config.h.in b/src/Common/config.h.in index f68701d5d10..6a0090130a3 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -64,6 +64,7 @@ #cmakedefine01 USE_LIBARCHIVE #cmakedefine01 USE_POCKETFFT #cmakedefine01 USE_PROMETHEUS_PROTOBUFS +#cmakedefine01 USE_NUMACTL /// This is needed for .incbin in assembly. For some reason, include paths don't work there in presence of LTO. /// That's why we use absolute paths. diff --git a/src/configure_config.cmake b/src/configure_config.cmake index 75f61baa854..d22bf674df4 100644 --- a/src/configure_config.cmake +++ b/src/configure_config.cmake @@ -173,5 +173,8 @@ endif() if (TARGET ch_contrib::prometheus_protobufs) set(USE_PROMETHEUS_PROTOBUFS 1) endif() +if (TARGET ch_contrib::numactl) + set(USE_NUMACTL 1) +endif() set(SOURCE_DIR ${PROJECT_SOURCE_DIR}) From e181ccd0173c46d31867097532f64df0be3944da Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 25 Jul 2024 17:53:51 +0800 Subject: [PATCH 0867/2361] update doc --- docs/en/sql-reference/functions/tuple-map-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index ae23387f6e5..db66188b1f5 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -62,7 +62,7 @@ Alias: `MAP_FROM_ARRAYS(keys, values)` **Arguments** -- `keys` — Array or map of keys to create the map from. [Array(T)](../data-types/array.md) where `T` can be any type supported by [Map](../data-types/map.md) as key type. +- `keys` — Array or map of keys to create the map from. [Array(T)](../data-types/array.md) where `T` can be any type supported by [Map](../data-types/map.md) as key type, or [Map](../data-types/map.md). - `values` - Array or map of values to create the map from. [Array](../data-types/array.md) or [Map](../data-types/map.md). **Returned value** From 6968945373b2a73c135b0025cf892e21a2af4dbf Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 25 Jul 2024 09:58:32 +0000 Subject: [PATCH 0868/2361] Functions [s-t]*: Iterate over input_rows_count where appropriate --- src/Functions/FunctionTokens.h | 9 +- src/Functions/seriesDecomposeSTL.cpp | 4 +- src/Functions/space.cpp | 50 +++---- src/Functions/stem.cpp | 9 +- src/Functions/stringCutToZero.cpp | 21 ++- src/Functions/substringIndex.cpp | 22 ++-- src/Functions/subtractNanoseconds.cpp | 1 + src/Functions/throwIf.cpp | 2 +- src/Functions/timeSlots.cpp | 80 ++++++------ src/Functions/toDecimalString.cpp | 64 ++++----- src/Functions/toStartOfInterval.cpp | 44 +++---- src/Functions/tokenExtractors.cpp | 18 +-- src/Functions/transform.cpp | 167 ++++++++++++------------ src/Functions/translate.cpp | 16 ++- src/Functions/tupleToNameValuePairs.cpp | 6 +- 15 files changed, 251 insertions(+), 262 deletions(-) diff --git a/src/Functions/FunctionTokens.h b/src/Functions/FunctionTokens.h index f1435ca5651..b6d8e9ee589 100644 --- a/src/Functions/FunctionTokens.h +++ b/src/Functions/FunctionTokens.h @@ -84,7 +84,7 @@ public: return std::make_shared(std::make_shared()); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { Generator generator; generator.init(arguments, max_substrings_includes_remaining_string); @@ -107,18 +107,17 @@ public: const ColumnString::Chars & src_chars = col_str->getChars(); const ColumnString::Offsets & src_offsets = col_str->getOffsets(); - res_offsets.reserve(src_offsets.size()); - res_strings_offsets.reserve(src_offsets.size() * 5); /// Constant 5 - at random. + res_offsets.reserve(input_rows_count); + res_strings_offsets.reserve(input_rows_count * 5); /// Constant 5 - at random. res_strings_chars.reserve(src_chars.size()); Pos token_begin = nullptr; Pos token_end = nullptr; - size_t size = src_offsets.size(); ColumnString::Offset current_src_offset = 0; ColumnArray::Offset current_dst_offset = 0; ColumnString::Offset current_dst_strings_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { Pos pos = reinterpret_cast(&src_chars[current_src_offset]); current_src_offset = src_offsets[i]; diff --git a/src/Functions/seriesDecomposeSTL.cpp b/src/Functions/seriesDecomposeSTL.cpp index 720aa1e0799..1e1c41cafad 100644 --- a/src/Functions/seriesDecomposeSTL.cpp +++ b/src/Functions/seriesDecomposeSTL.cpp @@ -50,7 +50,7 @@ public: return std::make_shared(std::make_shared(std::make_shared())); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { ColumnPtr array_ptr = arguments[0].column; const ColumnArray * array = checkAndGetColumn(array_ptr.get()); @@ -79,7 +79,7 @@ public: ColumnArray::Offset prev_src_offset = 0; - for (size_t i = 0; i < src_offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { UInt64 period; auto period_ptr = arguments[1].column->convertToFullColumnIfConst(); diff --git a/src/Functions/space.cpp b/src/Functions/space.cpp index cd6ca73c088..cf1634e0319 100644 --- a/src/Functions/space.cpp +++ b/src/Functions/space.cpp @@ -55,7 +55,7 @@ public: template - bool executeConstant(ColumnPtr col_times, ColumnString::Offsets & res_offsets, ColumnString::Chars & res_chars) const + bool executeConstant(ColumnPtr col_times, ColumnString::Offsets & res_offsets, ColumnString::Chars & res_chars, size_t input_rows_count) const { const ColumnConst & col_times_const = checkAndGetColumn(*col_times); @@ -71,12 +71,12 @@ public: checkRepeatTime(times); - res_offsets.resize(col_times->size()); - res_chars.resize(col_times->size() * (times + 1)); + res_offsets.resize(input_rows_count); + res_chars.resize(input_rows_count * (times + 1)); size_t pos = 0; - for (size_t i = 0; i < col_times->size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { memset(res_chars.begin() + pos, space, times); pos += times; @@ -92,20 +92,20 @@ public: template - bool executeVector(ColumnPtr col_times_, ColumnString::Offsets & res_offsets, ColumnString::Chars & res_chars) const + bool executeVector(ColumnPtr col_times_, ColumnString::Offsets & res_offsets, ColumnString::Chars & res_chars, size_t input_rows_count) const { auto * col_times = checkAndGetColumn(col_times_.get()); if (!col_times) return false; - res_offsets.resize(col_times->size()); - res_chars.resize(col_times->size() * 10); /// heuristic + res_offsets.resize(input_rows_count); + res_chars.resize(input_rows_count * 10); /// heuristic const PaddedPODArray & times_data = col_times->getData(); size_t pos = 0; - for (size_t i = 0; i < col_times->size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { typename DataType::FieldType times = times_data[i]; @@ -132,7 +132,7 @@ public: } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & col_num = arguments[0].column; @@ -143,26 +143,26 @@ public: if (const ColumnConst * col_num_const = checkAndGetColumn(col_num.get())) { - if ((executeConstant(col_num, res_offsets, res_chars)) - || (executeConstant(col_num, res_offsets, res_chars)) - || (executeConstant(col_num, res_offsets, res_chars)) - || (executeConstant(col_num, res_offsets, res_chars)) - || (executeConstant(col_num, res_offsets, res_chars)) - || (executeConstant(col_num, res_offsets, res_chars)) - || (executeConstant(col_num, res_offsets, res_chars)) - || (executeConstant(col_num, res_offsets, res_chars))) + if ((executeConstant(col_num, res_offsets, res_chars, input_rows_count)) + || (executeConstant(col_num, res_offsets, res_chars, input_rows_count)) + || (executeConstant(col_num, res_offsets, res_chars, input_rows_count)) + || (executeConstant(col_num, res_offsets, res_chars, input_rows_count)) + || (executeConstant(col_num, res_offsets, res_chars, input_rows_count)) + || (executeConstant(col_num, res_offsets, res_chars, input_rows_count)) + || (executeConstant(col_num, res_offsets, res_chars, input_rows_count)) + || (executeConstant(col_num, res_offsets, res_chars, input_rows_count))) return col_res; } else { - if ((executeVector(col_num, res_offsets, res_chars)) - || (executeVector(col_num, res_offsets, res_chars)) - || (executeVector(col_num, res_offsets, res_chars)) - || (executeVector(col_num, res_offsets, res_chars)) - || (executeVector(col_num, res_offsets, res_chars)) - || (executeVector(col_num, res_offsets, res_chars)) - || (executeVector(col_num, res_offsets, res_chars)) - || (executeVector(col_num, res_offsets, res_chars))) + if ((executeVector(col_num, res_offsets, res_chars, input_rows_count)) + || (executeVector(col_num, res_offsets, res_chars, input_rows_count)) + || (executeVector(col_num, res_offsets, res_chars, input_rows_count)) + || (executeVector(col_num, res_offsets, res_chars, input_rows_count)) + || (executeVector(col_num, res_offsets, res_chars, input_rows_count)) + || (executeVector(col_num, res_offsets, res_chars, input_rows_count)) + || (executeVector(col_num, res_offsets, res_chars, input_rows_count)) + || (executeVector(col_num, res_offsets, res_chars, input_rows_count))) return col_res; } diff --git a/src/Functions/stem.cpp b/src/Functions/stem.cpp index 5b845cf332b..b3be40f4022 100644 --- a/src/Functions/stem.cpp +++ b/src/Functions/stem.cpp @@ -32,7 +32,8 @@ struct StemImpl const ColumnString::Offsets & offsets, ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, - const String & language) + const String & language, + size_t input_rows_count) { sb_stemmer * stemmer = sb_stemmer_new(language.data(), "UTF_8"); @@ -45,7 +46,7 @@ struct StemImpl res_offsets.assign(offsets); UInt64 data_size = 0; - for (UInt64 i = 0; i < offsets.size(); ++i) + for (UInt64 i = 0; i < input_rows_count; ++i) { /// Note that accessing -1th element is valid for PaddedPODArray. size_t original_size = offsets[i] - offsets[i - 1]; @@ -101,7 +102,7 @@ public: ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {0}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & langcolumn = arguments[0].column; const auto & strcolumn = arguments[1].column; @@ -119,7 +120,7 @@ public: String language = lang_col->getValue(); auto col_res = ColumnString::create(); - StemImpl::vector(words_col->getChars(), words_col->getOffsets(), col_res->getChars(), col_res->getOffsets(), language); + StemImpl::vector(words_col->getChars(), words_col->getOffsets(), col_res->getChars(), col_res->getOffsets(), language, input_rows_count); return col_res; } }; diff --git a/src/Functions/stringCutToZero.cpp b/src/Functions/stringCutToZero.cpp index b9f742cd8bc..16e57d741fa 100644 --- a/src/Functions/stringCutToZero.cpp +++ b/src/Functions/stringCutToZero.cpp @@ -40,7 +40,7 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - static bool tryExecuteString(const IColumn * col, ColumnPtr & col_res) + static bool tryExecuteString(const IColumn * col, ColumnPtr & col_res, size_t input_rows_count) { const ColumnString * col_str_in = checkAndGetColumn(col); @@ -53,8 +53,7 @@ public: const ColumnString::Chars & in_vec = col_str_in->getChars(); const ColumnString::Offsets & in_offsets = col_str_in->getOffsets(); - size_t size = in_offsets.size(); - out_offsets.resize(size); + out_offsets.resize(input_rows_count); out_vec.resize(in_vec.size()); char * begin = reinterpret_cast(out_vec.data()); @@ -62,7 +61,7 @@ public: ColumnString::Offset current_in_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char * pos_in = reinterpret_cast(&in_vec[current_in_offset]); size_t current_size = strlen(pos_in); @@ -87,7 +86,7 @@ public: } } - static bool tryExecuteFixedString(const IColumn * col, ColumnPtr & col_res) + static bool tryExecuteFixedString(const IColumn * col, ColumnPtr & col_res, size_t input_rows_count) { const ColumnFixedString * col_fstr_in = checkAndGetColumn(col); @@ -99,10 +98,8 @@ public: const ColumnString::Chars & in_vec = col_fstr_in->getChars(); - size_t size = col_fstr_in->size(); - - out_offsets.resize(size); - out_vec.resize(in_vec.size() + size); + out_offsets.resize(input_rows_count); + out_vec.resize(in_vec.size() + input_rows_count); char * begin = reinterpret_cast(out_vec.data()); char * pos = begin; @@ -110,7 +107,7 @@ public: size_t n = col_fstr_in->getN(); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t current_size = strnlen(pos_in, n); memcpySmallAllowReadWriteOverflow15(pos, pos_in, current_size); @@ -133,12 +130,12 @@ public: } } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const IColumn * column = arguments[0].column.get(); ColumnPtr res_column; - if (tryExecuteFixedString(column, res_column) || tryExecuteString(column, res_column)) + if (tryExecuteFixedString(column, res_column, input_rows_count) || tryExecuteString(column, res_column, input_rows_count)) return res_column; throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", diff --git a/src/Functions/substringIndex.cpp b/src/Functions/substringIndex.cpp index eccd849059b..dc12ae193ff 100644 --- a/src/Functions/substringIndex.cpp +++ b/src/Functions/substringIndex.cpp @@ -68,7 +68,7 @@ namespace return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { ColumnPtr column_string = arguments[0].column; ColumnPtr column_delim = arguments[1].column; @@ -110,10 +110,10 @@ namespace if (is_count_const) { Int64 count = column_count->getInt(0); - vectorConstant(col_str, delim, count, vec_res, offsets_res); + vectorConstant(col_str, delim, count, vec_res, offsets_res, input_rows_count); } else - vectorVector(col_str, delim, column_count.get(), vec_res, offsets_res); + vectorVector(col_str, delim, column_count.get(), vec_res, offsets_res, input_rows_count); } return column_res; } @@ -124,18 +124,18 @@ namespace const String & delim, const IColumn * count_column, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - size_t rows = str_column->size(); res_data.reserve(str_column->getChars().size() / 2); - res_offsets.reserve(rows); + res_offsets.reserve(input_rows_count); bool all_ascii = isAllASCII(str_column->getChars().data(), str_column->getChars().size()) && isAllASCII(reinterpret_cast(delim.data()), delim.size()); std::unique_ptr searcher = !is_utf8 || all_ascii ? nullptr : std::make_unique(delim.data(), delim.size()); - for (size_t i = 0; i < rows; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { StringRef str_ref = str_column->getDataAt(i); Int64 count = count_column->getInt(i); @@ -157,18 +157,18 @@ namespace const String & delim, Int64 count, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { - size_t rows = str_column->size(); res_data.reserve(str_column->getChars().size() / 2); - res_offsets.reserve(rows); + res_offsets.reserve(input_rows_count); bool all_ascii = isAllASCII(str_column->getChars().data(), str_column->getChars().size()) && isAllASCII(reinterpret_cast(delim.data()), delim.size()); std::unique_ptr searcher = !is_utf8 || all_ascii ? nullptr : std::make_unique(delim.data(), delim.size()); - for (size_t i = 0; i < rows; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { StringRef str_ref = str_column->getDataAt(i); diff --git a/src/Functions/subtractNanoseconds.cpp b/src/Functions/subtractNanoseconds.cpp index fffb4eae37a..360c5ecd9cb 100644 --- a/src/Functions/subtractNanoseconds.cpp +++ b/src/Functions/subtractNanoseconds.cpp @@ -6,6 +6,7 @@ namespace DB { using FunctionSubtractNanoseconds = FunctionDateOrDateTimeAddInterval; + REGISTER_FUNCTION(SubtractNanoseconds) { factory.registerFunction(); diff --git a/src/Functions/throwIf.cpp b/src/Functions/throwIf.cpp index becc6d2f772..e317c65c622 100644 --- a/src/Functions/throwIf.cpp +++ b/src/Functions/throwIf.cpp @@ -152,7 +152,7 @@ private: return nullptr; } - bool allow_custom_error_code_argument; + const bool allow_custom_error_code_argument; }; } diff --git a/src/Functions/timeSlots.cpp b/src/Functions/timeSlots.cpp index 040495ab023..b62bb20c64e 100644 --- a/src/Functions/timeSlots.cpp +++ b/src/Functions/timeSlots.cpp @@ -41,18 +41,17 @@ struct TimeSlotsImpl /// The following three methods process DateTime type static void vectorVector( const PaddedPODArray & starts, const PaddedPODArray & durations, UInt32 time_slot_size, - PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets) + PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, + size_t input_rows_count) { if (time_slot_size == 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Time slot size cannot be zero"); - size_t size = starts.size(); - - result_offsets.resize(size); - result_values.reserve(size); + result_offsets.resize(input_rows_count); + result_values.reserve(input_rows_count); ColumnArray::Offset current_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { for (UInt32 value = starts[i] / time_slot_size, end = (starts[i] + durations[i]) / time_slot_size; value <= end; ++value) { @@ -66,18 +65,17 @@ struct TimeSlotsImpl static void vectorConstant( const PaddedPODArray & starts, UInt32 duration, UInt32 time_slot_size, - PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets) + PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, + size_t input_rows_count) { if (time_slot_size == 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Time slot size cannot be zero"); - size_t size = starts.size(); - - result_offsets.resize(size); - result_values.reserve(size); + result_offsets.resize(input_rows_count); + result_values.reserve(input_rows_count); ColumnArray::Offset current_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { for (UInt32 value = starts[i] / time_slot_size, end = (starts[i] + duration) / time_slot_size; value <= end; ++value) { @@ -91,18 +89,17 @@ struct TimeSlotsImpl static void constantVector( UInt32 start, const PaddedPODArray & durations, UInt32 time_slot_size, - PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets) + PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, + size_t input_rows_count) { if (time_slot_size == 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Time slot size cannot be zero"); - size_t size = durations.size(); - - result_offsets.resize(size); - result_values.reserve(size); + result_offsets.resize(input_rows_count); + result_values.reserve(input_rows_count); ColumnArray::Offset current_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { for (UInt32 value = start / time_slot_size, end = (start + durations[i]) / time_slot_size; value <= end; ++value) { @@ -120,12 +117,11 @@ struct TimeSlotsImpl */ static NO_SANITIZE_UNDEFINED void vectorVector( const PaddedPODArray & starts, const PaddedPODArray & durations, Decimal64 time_slot_size, - PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, UInt16 dt_scale, UInt16 duration_scale, UInt16 time_slot_scale) + PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, UInt16 dt_scale, UInt16 duration_scale, UInt16 time_slot_scale, + size_t input_rows_count) { - size_t size = starts.size(); - - result_offsets.resize(size); - result_values.reserve(size); + result_offsets.resize(input_rows_count); + result_values.reserve(input_rows_count); /// Modify all units to have same scale UInt16 max_scale = std::max({dt_scale, duration_scale, time_slot_scale}); @@ -139,7 +135,7 @@ struct TimeSlotsImpl if (time_slot_size == 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Time slot size cannot be zero"); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { for (DateTime64 value = (starts[i] * dt_multiplier) / time_slot_size, end = (starts[i] * dt_multiplier + durations[i] * dur_multiplier) / time_slot_size; value <= end; value += 1) { @@ -152,12 +148,11 @@ struct TimeSlotsImpl static NO_SANITIZE_UNDEFINED void vectorConstant( const PaddedPODArray & starts, Decimal64 duration, Decimal64 time_slot_size, - PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, UInt16 dt_scale, UInt16 duration_scale, UInt16 time_slot_scale) + PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, UInt16 dt_scale, UInt16 duration_scale, UInt16 time_slot_scale, + size_t input_rows_count) { - size_t size = starts.size(); - - result_offsets.resize(size); - result_values.reserve(size); + result_offsets.resize(input_rows_count); + result_values.reserve(input_rows_count); /// Modify all units to have same scale UInt16 max_scale = std::max({dt_scale, duration_scale, time_slot_scale}); @@ -172,7 +167,7 @@ struct TimeSlotsImpl if (time_slot_size == 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Time slot size cannot be zero"); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { for (DateTime64 value = (starts[i] * dt_multiplier) / time_slot_size, end = (starts[i] * dt_multiplier + duration) / time_slot_size; value <= end; value += 1) { @@ -185,12 +180,11 @@ struct TimeSlotsImpl static NO_SANITIZE_UNDEFINED void constantVector( DateTime64 start, const PaddedPODArray & durations, Decimal64 time_slot_size, - PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, UInt16 dt_scale, UInt16 duration_scale, UInt16 time_slot_scale) + PaddedPODArray & result_values, ColumnArray::Offsets & result_offsets, UInt16 dt_scale, UInt16 duration_scale, UInt16 time_slot_scale, + size_t input_rows_count) { - size_t size = durations.size(); - - result_offsets.resize(size); - result_values.reserve(size); + result_offsets.resize(input_rows_count); + result_values.reserve(input_rows_count); /// Modify all units to have same scale UInt16 max_scale = std::max({dt_scale, duration_scale, time_slot_scale}); @@ -205,7 +199,7 @@ struct TimeSlotsImpl if (time_slot_size == 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Time slot size cannot be zero"); - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { for (DateTime64 value = start / time_slot_size, end = (start + durations[i] * dur_multiplier) / time_slot_size; value <= end; value += 1) { @@ -282,7 +276,7 @@ public: } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { if (WhichDataType(arguments[0].type).isDateTime()) { @@ -308,17 +302,17 @@ public: if (dt_starts && durations) { - TimeSlotsImpl::vectorVector(dt_starts->getData(), durations->getData(), time_slot_size, res_values, res->getOffsets()); + TimeSlotsImpl::vectorVector(dt_starts->getData(), durations->getData(), time_slot_size, res_values, res->getOffsets(), input_rows_count); return res; } else if (dt_starts && const_durations) { - TimeSlotsImpl::vectorConstant(dt_starts->getData(), const_durations->getValue(), time_slot_size, res_values, res->getOffsets()); + TimeSlotsImpl::vectorConstant(dt_starts->getData(), const_durations->getValue(), time_slot_size, res_values, res->getOffsets(), input_rows_count); return res; } else if (dt_const_starts && durations) { - TimeSlotsImpl::constantVector(dt_const_starts->getValue(), durations->getData(), time_slot_size, res_values, res->getOffsets()); + TimeSlotsImpl::constantVector(dt_const_starts->getValue(), durations->getData(), time_slot_size, res_values, res->getOffsets(), input_rows_count); return res; } } @@ -353,21 +347,21 @@ public: if (starts && durations) { TimeSlotsImpl::vectorVector(starts->getData(), durations->getData(), time_slot_size, res_values, res->getOffsets(), - start_time_scale, duration_scale, time_slot_scale); + start_time_scale, duration_scale, time_slot_scale, input_rows_count); return res; } else if (starts && const_durations) { TimeSlotsImpl::vectorConstant( starts->getData(), const_durations->getValue(), time_slot_size, res_values, res->getOffsets(), - start_time_scale, duration_scale, time_slot_scale); + start_time_scale, duration_scale, time_slot_scale, input_rows_count); return res; } else if (const_starts && durations) { TimeSlotsImpl::constantVector( const_starts->getValue(), durations->getData(), time_slot_size, res_values, res->getOffsets(), - start_time_scale, duration_scale, time_slot_scale); + start_time_scale, duration_scale, time_slot_scale, input_rows_count); return res; } } diff --git a/src/Functions/toDecimalString.cpp b/src/Functions/toDecimalString.cpp index 523948a5396..3566ebc93ad 100644 --- a/src/Functions/toDecimalString.cpp +++ b/src/Functions/toDecimalString.cpp @@ -54,9 +54,9 @@ private: /// For operations with Integer/Float template void vectorConstant(const FromVectorType & vec_from, UInt8 precision, - ColumnString::Chars & vec_to, ColumnString::Offsets & result_offsets) const + ColumnString::Chars & vec_to, ColumnString::Offsets & result_offsets, + size_t input_rows_count) const { - size_t input_rows_count = vec_from.size(); result_offsets.resize(input_rows_count); /// Buffer is used here and in functions below because resulting size cannot be precisely anticipated, @@ -74,9 +74,9 @@ private: template void vectorVector(const FirstArgVectorType & vec_from, const ColumnVector::Container & vec_precision, - ColumnString::Chars & vec_to, ColumnString::Offsets & result_offsets) const + ColumnString::Chars & vec_to, ColumnString::Offsets & result_offsets, + size_t input_rows_count) const { - size_t input_rows_count = vec_from.size(); result_offsets.resize(input_rows_count); WriteBufferFromVector buf_to(vec_to); @@ -98,7 +98,8 @@ private: /// For operations with Decimal template void vectorConstant(const FirstArgVectorType & vec_from, UInt8 precision, - ColumnString::Chars & vec_to, ColumnString::Offsets & result_offsets, UInt8 from_scale) const + ColumnString::Chars & vec_to, ColumnString::Offsets & result_offsets, UInt8 from_scale, + size_t input_rows_count) const { /// There are no more than 77 meaning digits (as it is the max length of UInt256). So we can limit it with 77. constexpr size_t max_digits = std::numeric_limits::digits10; @@ -107,7 +108,6 @@ private: "Too many fractional digits requested for Decimal, must not be more than {}", max_digits); WriteBufferFromVector buf_to(vec_to); - size_t input_rows_count = vec_from.size(); result_offsets.resize(input_rows_count); for (size_t i = 0; i < input_rows_count; ++i) @@ -121,9 +121,9 @@ private: template void vectorVector(const FirstArgVectorType & vec_from, const ColumnVector::Container & vec_precision, - ColumnString::Chars & vec_to, ColumnString::Offsets & result_offsets, UInt8 from_scale) const + ColumnString::Chars & vec_to, ColumnString::Offsets & result_offsets, UInt8 from_scale, + size_t input_rows_count) const { - size_t input_rows_count = vec_from.size(); result_offsets.resize(input_rows_count); WriteBufferFromVector buf_to(vec_to); @@ -182,28 +182,28 @@ private: } public: - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { switch (arguments[0].type->getTypeId()) { - case TypeIndex::UInt8: return executeType(arguments); - case TypeIndex::UInt16: return executeType(arguments); - case TypeIndex::UInt32: return executeType(arguments); - case TypeIndex::UInt64: return executeType(arguments); - case TypeIndex::UInt128: return executeType(arguments); - case TypeIndex::UInt256: return executeType(arguments); - case TypeIndex::Int8: return executeType(arguments); - case TypeIndex::Int16: return executeType(arguments); - case TypeIndex::Int32: return executeType(arguments); - case TypeIndex::Int64: return executeType(arguments); - case TypeIndex::Int128: return executeType(arguments); - case TypeIndex::Int256: return executeType(arguments); - case TypeIndex::Float32: return executeType(arguments); - case TypeIndex::Float64: return executeType(arguments); - case TypeIndex::Decimal32: return executeType(arguments); - case TypeIndex::Decimal64: return executeType(arguments); - case TypeIndex::Decimal128: return executeType(arguments); - case TypeIndex::Decimal256: return executeType(arguments); + case TypeIndex::UInt8: return executeType(arguments, input_rows_count); + case TypeIndex::UInt16: return executeType(arguments, input_rows_count); + case TypeIndex::UInt32: return executeType(arguments, input_rows_count); + case TypeIndex::UInt64: return executeType(arguments, input_rows_count); + case TypeIndex::UInt128: return executeType(arguments, input_rows_count); + case TypeIndex::UInt256: return executeType(arguments, input_rows_count); + case TypeIndex::Int8: return executeType(arguments, input_rows_count); + case TypeIndex::Int16: return executeType(arguments, input_rows_count); + case TypeIndex::Int32: return executeType(arguments, input_rows_count); + case TypeIndex::Int64: return executeType(arguments, input_rows_count); + case TypeIndex::Int128: return executeType(arguments, input_rows_count); + case TypeIndex::Int256: return executeType(arguments, input_rows_count); + case TypeIndex::Float32: return executeType(arguments, input_rows_count); + case TypeIndex::Float64: return executeType(arguments, input_rows_count); + case TypeIndex::Decimal32: return executeType(arguments, input_rows_count); + case TypeIndex::Decimal64: return executeType(arguments, input_rows_count); + case TypeIndex::Decimal128: return executeType(arguments, input_rows_count); + case TypeIndex::Decimal256: return executeType(arguments, input_rows_count); default: throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", arguments[0].column->getName(), getName()); @@ -212,7 +212,7 @@ public: private: template - ColumnPtr executeType(const ColumnsWithTypeAndName & arguments) const + ColumnPtr executeType(const ColumnsWithTypeAndName & arguments, size_t input_rows_count) const { const auto * precision_col = checkAndGetColumn>(arguments[1].column.get()); const auto * precision_col_const = checkAndGetColumnConst>(arguments[1].column.get()); @@ -230,9 +230,9 @@ private: { UInt8 from_scale = from_col->getScale(); if (precision_col_const) - vectorConstant(from_col->getData(), precision_col_const->template getValue(), result_chars, result_offsets, from_scale); + vectorConstant(from_col->getData(), precision_col_const->template getValue(), result_chars, result_offsets, from_scale, input_rows_count); else if (precision_col) - vectorVector(from_col->getData(), precision_col->getData(), result_chars, result_offsets, from_scale); + vectorVector(from_col->getData(), precision_col->getData(), result_chars, result_offsets, from_scale, input_rows_count); else throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of second argument of function formatDecimal", arguments[1].column->getName()); } @@ -245,9 +245,9 @@ private: if (from_col) { if (precision_col_const) - vectorConstant(from_col->getData(), precision_col_const->template getValue(), result_chars, result_offsets); + vectorConstant(from_col->getData(), precision_col_const->template getValue(), result_chars, result_offsets, input_rows_count); else if (precision_col) - vectorVector(from_col->getData(), precision_col->getData(), result_chars, result_offsets); + vectorVector(from_col->getData(), precision_col->getData(), result_chars, result_offsets, input_rows_count); else throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of second argument of function formatDecimal", arguments[1].column->getName()); diff --git a/src/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp index 50442d1b448..21b7cf895d2 100644 --- a/src/Functions/toStartOfInterval.cpp +++ b/src/Functions/toStartOfInterval.cpp @@ -147,19 +147,20 @@ public: std::unreachable(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /* input_rows_count */) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { const auto & time_column = arguments[0]; const auto & interval_column = arguments[1]; const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0); - auto result_column = dispatchForTimeColumn(time_column, interval_column, result_type, time_zone); + auto result_column = dispatchForTimeColumn(time_column, interval_column, result_type, time_zone, input_rows_count); return result_column; } private: ColumnPtr dispatchForTimeColumn( const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, - const DataTypePtr & result_type, const DateLUTImpl & time_zone) const + const DataTypePtr & result_type, const DateLUTImpl & time_zone, + size_t input_rows_count) const { const auto & time_column_type = *time_column.type.get(); const auto & time_column_col = *time_column.column.get(); @@ -170,19 +171,19 @@ private: auto scale = assert_cast(time_column_type).getScale(); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, scale); + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count, scale); } else if (isDateTime(time_column_type)) { const auto * time_column_vec = checkAndGetColumn(&time_column_col); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone); + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count); } else if (isDate(time_column_type)) { const auto * time_column_vec = checkAndGetColumn(&time_column_col); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone); + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count); } throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, DateTime or DateTime64", getName()); } @@ -190,7 +191,7 @@ private: template ColumnPtr dispatchForIntervalColumn( const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column, - const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale = 1) const + const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale = 1) const { const auto * interval_type = checkAndGetDataType(interval_column.type.get()); if (!interval_type) @@ -207,27 +208,27 @@ private: switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case) { case IntervalKind::Kind::Nanosecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Microsecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Millisecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Second: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Minute: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Hour: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Day: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Week: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Month: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Quarter: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); case IntervalKind::Kind::Year: - return execute(time_data_type, time_column, num_units, result_type, time_zone, scale); + return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); } std::unreachable(); @@ -236,22 +237,21 @@ private: template ColumnPtr execute( const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units, - const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale) const + const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale) const { using ResultColumnType = typename ResultDataType::ColumnType; using ResultFieldType = typename ResultDataType::FieldType; const auto & time_data = time_column_type.getData(); - size_t size = time_data.size(); auto result_col = result_type->createColumn(); auto * col_to = assert_cast(result_col.get()); auto & result_data = col_to->getData(); - result_data.resize(size); + result_data.resize(input_rows_count); Int64 scale_multiplier = DecimalUtils::scaleMultiplier(scale); - for (size_t i = 0; i != size; ++i) + for (size_t i = 0; i != input_rows_count; ++i) result_data[i] = static_cast(ToStartOfInterval::execute(time_data[i], num_units, time_zone, scale_multiplier)); return result_col; diff --git a/src/Functions/tokenExtractors.cpp b/src/Functions/tokenExtractors.cpp index e7dcb5cced3..1bbf313fbae 100644 --- a/src/Functions/tokenExtractors.cpp +++ b/src/Functions/tokenExtractors.cpp @@ -73,7 +73,7 @@ public: return std::make_shared(std::make_shared()); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { auto column_offsets = ColumnArray::ColumnOffsets::create(); @@ -90,9 +90,9 @@ public: auto input_column = arguments[0].column; if (const auto * column_string = checkAndGetColumn(input_column.get())) - executeImpl(extractor, *column_string, *result_column_string, *column_offsets); + executeImpl(extractor, *column_string, *result_column_string, *column_offsets, input_rows_count); else if (const auto * column_fixed_string = checkAndGetColumn(input_column.get())) - executeImpl(extractor, *column_fixed_string, *result_column_string, *column_offsets); + executeImpl(extractor, *column_fixed_string, *result_column_string, *column_offsets, input_rows_count); return ColumnArray::create(std::move(result_column_string), std::move(column_offsets)); } @@ -105,9 +105,9 @@ public: auto input_column = arguments[0].column; if (const auto * column_string = checkAndGetColumn(input_column.get())) - executeImpl(extractor, *column_string, *result_column_string, *column_offsets); + executeImpl(extractor, *column_string, *result_column_string, *column_offsets, input_rows_count); else if (const auto * column_fixed_string = checkAndGetColumn(input_column.get())) - executeImpl(extractor, *column_fixed_string, *result_column_string, *column_offsets); + executeImpl(extractor, *column_fixed_string, *result_column_string, *column_offsets, input_rows_count); return ColumnArray::create(std::move(result_column_string), std::move(column_offsets)); } @@ -120,15 +120,15 @@ private: const ExtractorType & extractor, StringColumnType & input_data_column, ResultStringColumnType & result_data_column, - ColumnArray::ColumnOffsets & offsets_column) const + ColumnArray::ColumnOffsets & offsets_column, + size_t input_rows_count) const { size_t current_tokens_size = 0; auto & offsets_data = offsets_column.getData(); - size_t column_size = input_data_column.size(); - offsets_data.resize(column_size); + offsets_data.resize(input_rows_count); - for (size_t i = 0; i < column_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { auto data = input_data_column.getDataAt(i); diff --git a/src/Functions/transform.cpp b/src/Functions/transform.cpp index 68500779f93..0dfc9197845 100644 --- a/src/Functions/transform.cpp +++ b/src/Functions/transform.cpp @@ -173,30 +173,30 @@ namespace } else if (cache.table_num_to_idx) { - if (!executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted) - && !executeNum>(in, *column_result, default_non_const, *in_casted)) + if (!executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count) + && !executeNum>(in, *column_result, default_non_const, *in_casted, input_rows_count)) { throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", in->getName(), getName()); } } else if (cache.table_string_to_idx) { - if (!executeString(in, *column_result, default_non_const, *in_casted)) - executeContiguous(in, *column_result, default_non_const, *in_casted); + if (!executeString(in, *column_result, default_non_const, *in_casted, input_rows_count)) + executeContiguous(in, *column_result, default_non_const, *in_casted, input_rows_count); } else if (cache.table_anything_to_idx) { - executeAnything(in, *column_result, default_non_const, *in_casted); + executeAnything(in, *column_result, default_non_const, *in_casted, input_rows_count); } else throw Exception(ErrorCodes::LOGICAL_ERROR, "State of the function `transform` is not initialized"); @@ -217,12 +217,11 @@ namespace return impl->execute(args, result_type, input_rows_count); } - void executeAnything(const IColumn * in, IColumn & column_result, const ColumnPtr default_non_const, const IColumn & in_casted) const + void executeAnything(const IColumn * in, IColumn & column_result, const ColumnPtr default_non_const, const IColumn & in_casted, size_t input_rows_count) const { - const size_t size = in->size(); const auto & table = *cache.table_anything_to_idx; - column_result.reserve(size); - for (size_t i = 0; i < size; ++i) + column_result.reserve(input_rows_count); + for (size_t i = 0; i < input_rows_count; ++i) { SipHash hash; in->updateHashWithValue(i, hash); @@ -239,12 +238,11 @@ namespace } } - void executeContiguous(const IColumn * in, IColumn & column_result, const ColumnPtr default_non_const, const IColumn & in_casted) const + void executeContiguous(const IColumn * in, IColumn & column_result, const ColumnPtr default_non_const, const IColumn & in_casted, size_t input_rows_count) const { - const size_t size = in->size(); const auto & table = *cache.table_string_to_idx; - column_result.reserve(size); - for (size_t i = 0; i < size; ++i) + column_result.reserve(input_rows_count); + for (size_t i = 0; i < input_rows_count; ++i) { const auto * it = table.find(in->getDataAt(i)); if (it) @@ -259,7 +257,7 @@ namespace } template - bool executeNum(const IColumn * in_untyped, IColumn & column_result, const ColumnPtr default_non_const, const IColumn & in_casted) const + bool executeNum(const IColumn * in_untyped, IColumn & column_result, const ColumnPtr default_non_const, const IColumn & in_casted, size_t input_rows_count) const { const auto * const in = checkAndGetColumn(in_untyped); if (!in) @@ -269,24 +267,23 @@ namespace if constexpr (std::is_same_v, T> || std::is_same_v, T>) in_scale = in->getScale(); - if (!executeNumToString(pod, column_result, default_non_const) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale) - && !executeNumToNum>(pod, column_result, default_non_const, in_scale)) + if (!executeNumToString(pod, column_result, default_non_const, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count) + && !executeNumToNum>(pod, column_result, default_non_const, in_scale, input_rows_count)) { - const size_t size = pod.size(); const auto & table = *cache.table_num_to_idx; - column_result.reserve(size); - for (size_t i = 0; i < size; ++i) + column_result.reserve(input_rows_count); + for (size_t i = 0; i < input_rows_count; ++i) { const auto * it = table.find(bit_cast(pod[i])); if (it) @@ -303,14 +300,13 @@ namespace } template - bool executeNumToString(const PaddedPODArray & pod, IColumn & column_result, const ColumnPtr default_non_const) const + bool executeNumToString(const PaddedPODArray & pod, IColumn & column_result, const ColumnPtr default_non_const, size_t input_rows_count) const { auto * out = typeid_cast(&column_result); if (!out) return false; auto & out_offs = out->getOffsets(); - const size_t size = pod.size(); - out_offs.resize(size); + out_offs.resize(input_rows_count); auto & out_chars = out->getChars(); const auto * to_col = assert_cast(cache.to_column.get()); @@ -325,14 +321,14 @@ namespace const auto & def_offs = def->getOffsets(); const auto * def_data = def_chars.data(); auto def_size = def_offs[0]; - executeNumToStringHelper(table, pod, out_chars, out_offs, to_chars, to_offs, def_data, def_size, size); + executeNumToStringHelper(table, pod, out_chars, out_offs, to_chars, to_offs, def_data, def_size, input_rows_count); } else { const auto * def = assert_cast(default_non_const.get()); const auto & def_chars = def->getChars(); const auto & def_offs = def->getOffsets(); - executeNumToStringHelper(table, pod, out_chars, out_offs, to_chars, to_offs, def_chars, def_offs, size); + executeNumToStringHelper(table, pod, out_chars, out_offs, to_chars, to_offs, def_chars, def_offs, input_rows_count); } return true; } @@ -347,10 +343,10 @@ namespace const ColumnString::Offsets & to_offsets, const DefData & def_data, const DefOffs & def_offsets, - const size_t size) const + size_t input_rows_count) const { size_t out_cur_off = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char8_t * to = nullptr; size_t to_size = 0; @@ -382,14 +378,13 @@ namespace template bool executeNumToNum( - const PaddedPODArray & pod, IColumn & column_result, const ColumnPtr default_non_const, const UInt32 in_scale) const + const PaddedPODArray & pod, IColumn & column_result, ColumnPtr default_non_const, UInt32 in_scale, size_t input_rows_count) const { auto * out = typeid_cast(&column_result); if (!out) return false; auto & out_pod = out->getData(); - const size_t size = pod.size(); - out_pod.resize(size); + out_pod.resize(input_rows_count); UInt32 out_scale = 0; if constexpr (std::is_same_v, T> || std::is_same_v, T>) out_scale = out->getScale(); @@ -399,15 +394,15 @@ namespace if (cache.default_column) { const auto const_def = assert_cast(cache.default_column.get())->getData()[0]; - executeNumToNumHelper(table, pod, out_pod, to_pod, const_def, size, out_scale, out_scale); + executeNumToNumHelper(table, pod, out_pod, to_pod, const_def, input_rows_count, out_scale, out_scale); } else if (default_non_const) { const auto & nconst_def = assert_cast(default_non_const.get())->getData(); - executeNumToNumHelper(table, pod, out_pod, to_pod, nconst_def, size, out_scale, out_scale); + executeNumToNumHelper(table, pod, out_pod, to_pod, nconst_def, input_rows_count, out_scale, out_scale); } else - executeNumToNumHelper(table, pod, out_pod, to_pod, pod, size, out_scale, in_scale); + executeNumToNumHelper(table, pod, out_pod, to_pod, pod, input_rows_count, out_scale, in_scale); return true; } @@ -418,11 +413,11 @@ namespace PaddedPODArray & out_pod, const PaddedPODArray & to_pod, const Def & def, - const size_t size, - const UInt32 out_scale, - const UInt32 def_scale) const + size_t input_rows_count, + UInt32 out_scale, + UInt32 def_scale) const { - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const auto * it = table.find(bit_cast(pod[i])); if (it) @@ -450,7 +445,7 @@ namespace } } - bool executeString(const IColumn * in_untyped, IColumn & column_result, const ColumnPtr default_non_const, const IColumn & in_casted) const + bool executeString(const IColumn * in_untyped, IColumn & column_result, const ColumnPtr default_non_const, const IColumn & in_casted, size_t input_rows_count) const { const auto * const in = checkAndGetColumn(in_untyped); if (!in) @@ -458,19 +453,19 @@ namespace const auto & data = in->getChars(); const auto & offsets = in->getOffsets(); - if (!executeStringToString(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const) - && !executeStringToNum>(data, offsets, column_result, default_non_const)) + if (!executeStringToString(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count) + && !executeStringToNum>(data, offsets, column_result, default_non_const, input_rows_count)) { const size_t size = offsets.size(); const auto & table = *cache.table_string_to_idx; @@ -497,14 +492,14 @@ namespace const ColumnString::Chars & data, const ColumnString::Offsets & offsets, IColumn & column_result, - const ColumnPtr default_non_const) const + const ColumnPtr default_non_const, + size_t input_rows_count) const { auto * out = typeid_cast(&column_result); if (!out) return false; auto & out_offs = out->getOffsets(); - const size_t size = offsets.size(); - out_offs.resize(size); + out_offs.resize(input_rows_count); auto & out_chars = out->getChars(); const auto * to_col = assert_cast(cache.to_column.get()); @@ -519,18 +514,18 @@ namespace const auto & def_offs = def->getOffsets(); const auto * def_data = def_chars.data(); auto def_size = def_offs[0]; - executeStringToStringHelper(table, data, offsets, out_chars, out_offs, to_chars, to_offs, def_data, def_size, size); + executeStringToStringHelper(table, data, offsets, out_chars, out_offs, to_chars, to_offs, def_data, def_size, input_rows_count); } else if (default_non_const) { const auto * def = assert_cast(default_non_const.get()); const auto & def_chars = def->getChars(); const auto & def_offs = def->getOffsets(); - executeStringToStringHelper(table, data, offsets, out_chars, out_offs, to_chars, to_offs, def_chars, def_offs, size); + executeStringToStringHelper(table, data, offsets, out_chars, out_offs, to_chars, to_offs, def_chars, def_offs, input_rows_count); } else { - executeStringToStringHelper(table, data, offsets, out_chars, out_offs, to_chars, to_offs, data, offsets, size); + executeStringToStringHelper(table, data, offsets, out_chars, out_offs, to_chars, to_offs, data, offsets, input_rows_count); } return true; } @@ -546,11 +541,11 @@ namespace const ColumnString::Offsets & to_offsets, const DefData & def_data, const DefOffs & def_offsets, - const size_t size) const + size_t input_rows_count) const { ColumnString::Offset current_offset = 0; size_t out_cur_off = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const char8_t * to = nullptr; size_t to_size = 0; @@ -587,26 +582,26 @@ namespace const ColumnString::Chars & data, const ColumnString::Offsets & offsets, IColumn & column_result, - const ColumnPtr default_non_const) const + const ColumnPtr default_non_const, + size_t input_rows_count) const { auto * out = typeid_cast(&column_result); if (!out) return false; auto & out_pod = out->getData(); - const size_t size = offsets.size(); - out_pod.resize(size); + out_pod.resize(input_rows_count); const auto & to_pod = assert_cast(cache.to_column.get())->getData(); const auto & table = *cache.table_string_to_idx; if (cache.default_column) { const auto const_def = assert_cast(cache.default_column.get())->getData()[0]; - executeStringToNumHelper(table, data, offsets, out_pod, to_pod, const_def, size); + executeStringToNumHelper(table, data, offsets, out_pod, to_pod, const_def, input_rows_count); } else { const auto & nconst_def = assert_cast(default_non_const.get())->getData(); - executeStringToNumHelper(table, data, offsets, out_pod, to_pod, nconst_def, size); + executeStringToNumHelper(table, data, offsets, out_pod, to_pod, nconst_def, input_rows_count); } return true; } @@ -619,10 +614,10 @@ namespace PaddedPODArray & out_pod, const PaddedPODArray & to_pod, const Def & def, - const size_t size) const + size_t input_rows_count) const { ColumnString::Offset current_offset = 0; - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const StringRef ref{&data[current_offset], offsets[i] - current_offset - 1}; current_offset = offsets[i]; diff --git a/src/Functions/translate.cpp b/src/Functions/translate.cpp index 2df08a5664e..366640d7d20 100644 --- a/src/Functions/translate.cpp +++ b/src/Functions/translate.cpp @@ -52,7 +52,8 @@ struct TranslateImpl const std::string & map_from, const std::string & map_to, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { Map map; fillMapWithValues(map, map_from, map_to); @@ -62,7 +63,7 @@ struct TranslateImpl UInt8 * dst = res_data.data(); - for (UInt64 i = 0; i < offsets.size(); ++i) + for (UInt64 i = 0; i < input_rows_count; ++i) { const UInt8 * src = data.data() + offsets[i - 1]; const UInt8 * src_end = data.data() + offsets[i] - 1; @@ -175,19 +176,20 @@ struct TranslateUTF8Impl const std::string & map_from, const std::string & map_to, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { MapASCII map_ascii; MapUTF8 map; fillMapWithValues(map_ascii, map, map_from, map_to); res_data.resize(data.size()); - res_offsets.resize(offsets.size()); + res_offsets.resize(input_rows_count); UInt8 * dst = res_data.data(); UInt64 data_size = 0; - for (UInt64 i = 0; i < offsets.size(); ++i) + for (UInt64 i = 0; i < input_rows_count; ++i) { const UInt8 * src = data.data() + offsets[i - 1]; const UInt8 * src_end = data.data() + offsets[i] - 1; @@ -311,7 +313,7 @@ public: } } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr column_src = arguments[0].column; const ColumnPtr column_map_from = arguments[1].column; @@ -330,7 +332,7 @@ public: if (const ColumnString * col = checkAndGetColumn(column_src.get())) { auto col_res = ColumnString::create(); - Impl::vector(col->getChars(), col->getOffsets(), map_from, map_to, col_res->getChars(), col_res->getOffsets()); + Impl::vector(col->getChars(), col->getOffsets(), map_from, map_to, col_res->getChars(), col_res->getOffsets(), input_rows_count); return col_res; } else if (const ColumnFixedString * col_fixed = checkAndGetColumn(column_src.get())) diff --git a/src/Functions/tupleToNameValuePairs.cpp b/src/Functions/tupleToNameValuePairs.cpp index 998e0da4f0c..92734d3d1fc 100644 --- a/src/Functions/tupleToNameValuePairs.cpp +++ b/src/Functions/tupleToNameValuePairs.cpp @@ -99,16 +99,16 @@ public: return std::make_shared(item_data_type); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const IColumn * tuple_col = arguments[0].column.get(); const DataTypeTuple * tuple = checkAndGetDataType(arguments[0].type.get()); - const auto * tuple_col_concrete = assert_cast(tuple_col); + const auto * tuple_col_concrete = assert_cast(tuple_col); auto keys = ColumnString::create(); MutableColumnPtr values = tuple_col_concrete->getColumn(0).cloneEmpty(); auto offsets = ColumnVector::create(); - for (size_t row = 0; row < tuple_col_concrete->size(); ++row) + for (size_t row = 0; row < input_rows_count; ++row) { for (size_t col = 0; col < tuple_col_concrete->tupleSize(); ++col) { From dc2c3fb1ca4653ee006c8cbbbfa32688f19f1992 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 25 Jul 2024 11:57:14 +0200 Subject: [PATCH 0869/2361] Revert "Merge pull request #66563 from ClickHouse/delete-bad-test" This reverts commit d0753c8bb60dacfbd99687906fe4efb7665b20fa, reversing changes made to b8202e19baf7ad171e232a431c8a4c3f1c86e63e. Signed-off-by: Azat Khuzhin --- .../0_stateless/02805_distributed_queries_timeouts.reference | 0 .../queries/0_stateless/02805_distributed_queries_timeouts.sql | 3 +++ 2 files changed, 3 insertions(+) create mode 100644 tests/queries/0_stateless/02805_distributed_queries_timeouts.reference create mode 100644 tests/queries/0_stateless/02805_distributed_queries_timeouts.sql diff --git a/tests/queries/0_stateless/02805_distributed_queries_timeouts.reference b/tests/queries/0_stateless/02805_distributed_queries_timeouts.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql b/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql new file mode 100644 index 00000000000..0b7337d1255 --- /dev/null +++ b/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql @@ -0,0 +1,3 @@ +select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=1 format Null; +select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=1, use_hedged_requests=0 format Null; +select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=0 format Null; From 1a4730f1f390e468dab2849bd1b2770e0fb2cbe6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 25 Jul 2024 12:03:50 +0200 Subject: [PATCH 0870/2361] Use Distributed table to avoid extra DESC queries Signed-off-by: Azat Khuzhin --- .../0_stateless/02805_distributed_queries_timeouts.sql | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql b/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql index 0b7337d1255..bfa39cd78ee 100644 --- a/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql +++ b/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql @@ -1,3 +1,4 @@ -select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=1 format Null; -select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=1, use_hedged_requests=0 format Null; -select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=0 format Null; +create table dist as system.one engine=Distributed(test_shard_localhost, system, one); +select sleep(3) from dist settings prefer_localhost_replica=0, receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=1 format Null; +select sleep(3) from dist settings prefer_localhost_replica=0, receive_timeout=1, async_socket_for_remote=1, use_hedged_requests=0 format Null; +select sleep(3) from dist settings prefer_localhost_replica=0, receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=0 format Null; From 49732f2966cd793e32234068cf0b87cea9e3eed6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 25 Jul 2024 12:06:10 +0200 Subject: [PATCH 0871/2361] Tune sleep duration/receive_timeout in 02805_distributed_queries_timeouts Signed-off-by: Azat Khuzhin --- .../0_stateless/02805_distributed_queries_timeouts.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql b/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql index bfa39cd78ee..f6bccc99977 100644 --- a/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql +++ b/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql @@ -1,4 +1,4 @@ create table dist as system.one engine=Distributed(test_shard_localhost, system, one); -select sleep(3) from dist settings prefer_localhost_replica=0, receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=1 format Null; -select sleep(3) from dist settings prefer_localhost_replica=0, receive_timeout=1, async_socket_for_remote=1, use_hedged_requests=0 format Null; -select sleep(3) from dist settings prefer_localhost_replica=0, receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=0 format Null; +select sleep(8) from dist settings function_sleep_max_microseconds_per_block=8e9, prefer_localhost_replica=0, receive_timeout=7, async_socket_for_remote=0, use_hedged_requests=1 format Null; +select sleep(8) from dist settings function_sleep_max_microseconds_per_block=8e9, prefer_localhost_replica=0, receive_timeout=7, async_socket_for_remote=1, use_hedged_requests=0 format Null; +select sleep(8) from dist settings function_sleep_max_microseconds_per_block=8e9, prefer_localhost_replica=0, receive_timeout=7, async_socket_for_remote=0, use_hedged_requests=0 format Null; From 4ee409094d4b1b9a8c8f4da5f1cf55d78a43b11f Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 25 Jul 2024 12:12:23 +0200 Subject: [PATCH 0872/2361] Update tests/performance/decimal_aggregates.xml --- tests/performance/decimal_aggregates.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml index b204dddea6e..724d0c5d0e6 100644 --- a/tests/performance/decimal_aggregates.xml +++ b/tests/performance/decimal_aggregates.xml @@ -9,6 +9,7 @@ INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(100000000, 100000000) SETTINGS max_threads = 2 INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(200000000, 100000000) SETTINGS max_threads = 2 INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(300000000, 100000000) SETTINGS max_threads = 2 + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(400000000, 100000000) SETTINGS max_threads = 2 DROP TABLE IF EXISTS t From 86e23b346fbce791794e38ad7ae77d8af964988a Mon Sep 17 00:00:00 2001 From: Max K Date: Thu, 25 Jul 2024 12:12:37 +0200 Subject: [PATCH 0873/2361] rename test stages --- .github/workflows/master.yml | 21 +++++++++++---------- .github/workflows/pull_request.yml | 20 ++++++++++---------- tests/ci/ci_config.py | 6 +++--- tests/ci/ci_definitions.py | 6 +++--- tests/ci/test_ci_config.py | 4 ++-- 5 files changed, 29 insertions(+), 28 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index acd7511d520..2ce1124404f 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -93,21 +93,21 @@ jobs: with: stage: Builds_2 data: ${{ needs.RunConfig.outputs.data }} - Tests_2: + Tests_2_ww: needs: [RunConfig, Builds_2] + if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }} + uses: ./.github/workflows/reusable_test_stage.yml + with: + stage: Tests_2_ww + data: ${{ needs.RunConfig.outputs.data }} + Tests_2: + # Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there. + needs: [RunConfig, Builds_1] if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }} uses: ./.github/workflows/reusable_test_stage.yml with: stage: Tests_2 data: ${{ needs.RunConfig.outputs.data }} - Tests_3: - # Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there. - needs: [RunConfig, Builds_1] - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }} - uses: ./.github/workflows/reusable_test_stage.yml - with: - stage: Tests_3 - data: ${{ needs.RunConfig.outputs.data }} ################################# Reports ################################# # Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3 @@ -123,7 +123,7 @@ jobs: FinishCheck: if: ${{ !cancelled() }} - needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] + needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code @@ -133,6 +133,7 @@ jobs: cd "$GITHUB_WORKSPACE/tests/ci" python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - name: Check Workflow results + if: ${{ !cancelled() }} run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" cat > "$WORKFLOW_RESULT_FILE" << 'EOF' diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 34bf51871d2..854dff530e7 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -123,20 +123,20 @@ jobs: stage: Builds_2 data: ${{ needs.RunConfig.outputs.data }} # stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected - Tests_2: + Tests_2_ww: needs: [RunConfig, Builds_1] + if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }} + uses: ./.github/workflows/reusable_test_stage.yml + with: + stage: Tests_2_ww + data: ${{ needs.RunConfig.outputs.data }} + Tests_2: + needs: [RunConfig, Builds_1, Tests_1] if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }} uses: ./.github/workflows/reusable_test_stage.yml with: stage: Tests_2 data: ${{ needs.RunConfig.outputs.data }} - Tests_3: - needs: [RunConfig, Builds_1, Tests_1] - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }} - uses: ./.github/workflows/reusable_test_stage.yml - with: - stage: Tests_3 - data: ${{ needs.RunConfig.outputs.data }} ################################# Reports ################################# # Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3) @@ -154,7 +154,7 @@ jobs: if: ${{ !cancelled() }} # Test_2 or Test_3 do not have the jobs required for Mergeable check, # however, set them as "needs" to get all checks results before the automatic merge occurs. - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code @@ -178,7 +178,7 @@ jobs: # FinishCheck: if: ${{ !failure() && !cancelled() }} - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3] + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] runs-on: [self-hosted, style-checker-aarch64] steps: - name: Check out repository code diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index a9bdb639835..0e295b2339d 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -587,10 +587,10 @@ class CI: if job_name in REQUIRED_CHECKS: stage_type = WorkflowStages.TESTS_1 else: - stage_type = WorkflowStages.TESTS_3 + stage_type = WorkflowStages.TESTS_2 assert stage_type, f"BUG [{job_name}]" - if non_blocking_ci and stage_type == WorkflowStages.TESTS_3: - stage_type = WorkflowStages.TESTS_2 + if non_blocking_ci and stage_type == WorkflowStages.TESTS_2: + stage_type = WorkflowStages.TESTS_2_WW return stage_type @classmethod diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 149177ecba5..054b554b8fa 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -67,10 +67,10 @@ class WorkflowStages(metaclass=WithIter): BUILDS_2 = "Builds_2" # all tests required for merge TESTS_1 = "Tests_1" - # not used atm - TESTS_2 = "Tests_2" + # used in woolenwolfdog mode + TESTS_2_WW = "Tests_2_ww" # all tests not required for merge - TESTS_3 = "Tests_3" + TESTS_2 = "Tests_2" class Runners(metaclass=WithIter): diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 4a2bd606d0e..be540413b3c 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -211,7 +211,7 @@ class TestCIConfig(unittest.TestCase): else: self.assertTrue( CI.get_job_ci_stage(job) - in (CI.WorkflowStages.TESTS_1, CI.WorkflowStages.TESTS_3), + in (CI.WorkflowStages.TESTS_1, CI.WorkflowStages.TESTS_2), msg=f"Stage for [{job}] is not correct", ) @@ -242,7 +242,7 @@ class TestCIConfig(unittest.TestCase): else: self.assertTrue( CI.get_job_ci_stage(job, non_blocking_ci=True) - in (CI.WorkflowStages.TESTS_1, CI.WorkflowStages.TESTS_2), + in (CI.WorkflowStages.TESTS_1, CI.WorkflowStages.TESTS_2_WW), msg=f"Stage for [{job}] is not correct", ) From a32c702caa142d15bc3e5bc51ca90240d5d010a9 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 25 Jul 2024 18:23:47 +0800 Subject: [PATCH 0874/2361] fix style --- src/Functions/map.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index 5319390fb70..a8e5f7ad90e 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -236,7 +236,6 @@ public: ColumnPtr data_keys = col_keys->getDataPtr(); if (isColumnNullableOrLowCardinalityNullable(*data_keys)) { - std::cout << "data keys is nullable" << std::endl; const NullMap * null_map = nullptr; if (const auto * nullable = checkAndGetColumn(data_keys.get())) { @@ -263,9 +262,7 @@ public: const auto & data_values = col_values->getDataPtr(); const auto & offsets = col_keys->getOffsetsPtr(); - std::cout << "before create array:" << "offsets:" << offsets->getName() << std::endl; auto nested_column = ColumnArray::create(ColumnTuple::create(Columns{std::move(data_keys), data_values}), offsets); - std::cout << "after create array:" << "offsets:" << offsets->getName() << std::endl; return ColumnMap::create(nested_column); } }; From 7fedc0ffbee9d04e0352037021a127cea93cbbfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 25 Jul 2024 12:26:37 +0200 Subject: [PATCH 0875/2361] Update base/glibc-compatibility/musl/getauxval.c Co-authored-by: Alexander Gololobov --- base/glibc-compatibility/musl/getauxval.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/glibc-compatibility/musl/getauxval.c b/base/glibc-compatibility/musl/getauxval.c index b5bd2f114c2..28cb0f8d005 100644 --- a/base/glibc-compatibility/musl/getauxval.c +++ b/base/glibc-compatibility/musl/getauxval.c @@ -99,12 +99,12 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type) /// #8 0x5622c0d6b7cd in _start (./ClickHouse/build_msan/contrib/google-protobuf-cmake/protoc+0x22c7cd) (BuildId: 6411d3c88b898ba3f7d49760555977d3e61f0741) /// The source of the issue above is that, at this point in time during __msan_init, we can't really do much as - /// most global variables aren't initialized or available yet, so we we can't initiate the auxiliary vector. + /// most global variables aren't initialized or available yet, so we can't initiate the auxiliary vector. /// Normal glibc / musl getauxval doesn't have this problem since they initiate their auxval vector at the very /// start of __libc_start_main (just keeping track of argv+argc+1), but we don't have such option (otherwise // this complexity of reading "/proc/self/auxv" or using __environ would not be necessary). - /// To avoid this crashes on the re-exec call (see above how it would fail when creating `aux`, and it we used + /// To avoid this crashes on the re-exec call (see above how it would fail when creating `aux`, and if we used /// __auxv_init_environ then it would SIGSEV on READing `__environ`) we capture this call for `AT_EXECFN` and /// unconditionally return "/proc/self/exe" without any preparation. Theoretically this should be fine in /// our case, as we don't load any libraries. That's the theory at least. From beb506a5b8179f5c88a6f5fc90d62b8e74bf0d35 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Thu, 25 Jul 2024 12:37:05 +0200 Subject: [PATCH 0876/2361] added somme tests in relation with https://github.com/ClickHouse/ClickHouse/pull/54881 with new behaviour when enable_named_columns_in_function_tuple=1 (default value) --- .../0_stateless/00307_format_xml.reference | 41 ++++++++++++++++++ .../queries/0_stateless/00307_format_xml.sql | 3 ++ .../0_stateless/00309_formats.reference | Bin 18537 -> 18736 bytes tests/queries/0_stateless/00309_formats.sql | 5 +++ 4 files changed, 49 insertions(+) diff --git a/tests/queries/0_stateless/00307_format_xml.reference b/tests/queries/0_stateless/00307_format_xml.reference index 2d9badc5a3e..14e74653d4f 100644 --- a/tests/queries/0_stateless/00307_format_xml.reference +++ b/tests/queries/0_stateless/00307_format_xml.reference @@ -1,3 +1,4 @@ +unnamed columns in tuple @@ -54,3 +55,43 @@ 1 +named columns in tuple + + + + + + s + String + + + time + DateTime + + + tpl + Tuple(String, DateTime) + + + + + + Hello & world + + Hello & world2001-02-03 04:05:06 + + + + + Hello & world + + Hello & world2001-02-03 04:05:06 + + + Hello & world + + Hello & world2001-02-03 04:05:06 + + + 1 + diff --git a/tests/queries/0_stateless/00307_format_xml.sql b/tests/queries/0_stateless/00307_format_xml.sql index 29c733bb186..22566112bc7 100644 --- a/tests/queries/0_stateless/00307_format_xml.sql +++ b/tests/queries/0_stateless/00307_format_xml.sql @@ -1,2 +1,5 @@ SET output_format_write_statistics = 0; +SELECT 'unnamed columns in tuple'; SELECT 'Hello & world' AS s, 'Hello\n', toDateTime('2001-02-03 04:05:06') AS time, arrayMap(x -> toString(x), range(10)) AS arr, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML; +SELECT 'named columns in tuple'; +SELECT 'Hello & world' AS s, toDateTime('2001-02-03 04:05:06') AS time, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML;` diff --git a/tests/queries/0_stateless/00309_formats.reference b/tests/queries/0_stateless/00309_formats.reference index e637ee0363a7b35152a155ae3fa73a4f451d5148..a63720618ba54c6cc456f3356512449322dc2e80 100644 GIT binary patch delta 149 zcmaDkfpNnm#toSsmduQKj47-or3E>uY@wccC1xfpE{P?n;vparjXVX2n2th<0!Ubs ziva?zYfU!f5;0_8gmBMl=W(WRf=%Iq=->n?=fp5&@&sYg$;X8ig*A*!&2-d_3=9l( M4J>sHjny@|06s7$QUCw| delta 9 QcmdlmiSgwG#toSs02V|99{>OV diff --git a/tests/queries/0_stateless/00309_formats.sql b/tests/queries/0_stateless/00309_formats.sql index b0939c00a10..691fc6e7ab6 100644 --- a/tests/queries/0_stateless/00309_formats.sql +++ b/tests/queries/0_stateless/00309_formats.sql @@ -9,3 +9,8 @@ SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, a SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSON; SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSONCompact; SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT XML; + +SET enable_named_columns_in_function_tuple = 1; + +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT RowBinaryWithNamesAndTypes; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT TabSeparatedWithNamesAndTypes; From 0dc67aae97d4b964cb0f9c389cbf3ce91cb76fb7 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 25 Jul 2024 13:52:30 +0200 Subject: [PATCH 0877/2361] fix MIN_ITERATIONS 2 --- .../0_stateless/01171_mv_select_insert_isolation_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index d79ab27d8b2..620281ee972 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -168,7 +168,7 @@ fi START_TIME=$(get_now) STOP_TIME=$((START_TIME + MAIN_TIME_PART)) SECOND_STOP_TIME=$((STOP_TIME + SECOND_TIME_PART)) -MIN_ITERATIONS=30 +MIN_ITERATIONS=25 run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 1 & PID_1=$! run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 2 & PID_2=$! From 2988e13050f0ab8a06e36ec8fe745386a214141b Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 25 Jul 2024 13:55:01 +0200 Subject: [PATCH 0878/2361] Free bitmask --- base/base/getMemoryAmount.cpp | 1 + programs/server/Server.cpp | 2 ++ 2 files changed, 3 insertions(+) diff --git a/base/base/getMemoryAmount.cpp b/base/base/getMemoryAmount.cpp index b8162146496..56cddbfd628 100644 --- a/base/base/getMemoryAmount.cpp +++ b/base/base/getMemoryAmount.cpp @@ -84,6 +84,7 @@ uint64_t getMemoryAmountOrZero() memory_amount = total_numa_memory; } + numa_bitmask_free(membind); } #endif diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 619a72ff200..b9a7c298f00 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -778,6 +778,8 @@ try "ClickHouse is bound to a subset of NUMA nodes. Total memory of all available nodes {}", ReadableSize(total_numa_memory)); } + + numa_bitmask_free(membind); } #endif From b80305ba981ca1f862084d3316144efcba17466b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 25 Jul 2024 14:31:00 +0200 Subject: [PATCH 0879/2361] Improve backport script --- tests/ci/cherry_pick.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index a7fc6d02853..623a816148e 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -420,7 +420,8 @@ class Backport: fetch_release_prs = self.gh.get_release_pulls(self._fetch_from) fetch_release_branches = [pr.head.ref for pr in fetch_release_prs] self.labels_to_backport = [ - f"v{branch}-must-backport" for branch in fetch_release_branches + f"v{branch if self._repo_name == "ClickHouse/ClickHouse" else branch.replace('release/','')}-must-backport" + for branch in fetch_release_branches ] logging.info("Fetching from %s", self._fetch_from) @@ -490,17 +491,23 @@ class Backport: def process_pr(self, pr: PullRequest) -> None: pr_labels = [label.name for label in pr.labels] - if ( - any(label in pr_labels for label in self.must_create_backport_labels) - or self._repo_name != self._fetch_from - ): + if any(label in pr_labels for label in self.must_create_backport_labels): branches = [ ReleaseBranch(br, pr, self.repo, self.backport_created_label) for br in self.release_branches ] # type: List[ReleaseBranch] else: branches = [ - ReleaseBranch(br, pr, self.repo, self.backport_created_label) + ReleaseBranch( + ( + br + if self._repo_name == "ClickHouse/clickhouse" + else f"release/{br}" + ), + pr, + self.repo, + self.backport_created_label, + ) for br in [ label.split("-", 1)[0][1:] # v21.8-must-backport for label in pr_labels From 2c83a39503255f0b2233b511a63a262cb8749a53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 25 Jul 2024 12:53:16 +0000 Subject: [PATCH 0880/2361] Fixes --- tests/ci/cherry_pick.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index 623a816148e..c2f567e5f15 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -420,7 +420,9 @@ class Backport: fetch_release_prs = self.gh.get_release_pulls(self._fetch_from) fetch_release_branches = [pr.head.ref for pr in fetch_release_prs] self.labels_to_backport = [ - f"v{branch if self._repo_name == "ClickHouse/ClickHouse" else branch.replace('release/','')}-must-backport" + f"v{branch}-must-backport" + if self._repo_name == "ClickHouse/ClickHouse" + else f"v{branch.replace('release/','')}-must-backport" for branch in fetch_release_branches ] @@ -501,7 +503,7 @@ class Backport: ReleaseBranch( ( br - if self._repo_name == "ClickHouse/clickhouse" + if self._repo_name == "ClickHouse/Clickhouse" else f"release/{br}" ), pr, From 92cca8e65dec9f46d5a248c10e748088c9437cb6 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Thu, 25 Jul 2024 14:51:53 +0200 Subject: [PATCH 0881/2361] Fix --- src/AggregateFunctions/SingleValueData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/SingleValueData.cpp b/src/AggregateFunctions/SingleValueData.cpp index a14caf00f73..996e64b22e0 100644 --- a/src/AggregateFunctions/SingleValueData.cpp +++ b/src/AggregateFunctions/SingleValueData.cpp @@ -1191,7 +1191,7 @@ bool SingleValueDataString::isEqualTo(const DB::IColumn & column, size_t row_num bool SingleValueDataString::isEqualTo(const SingleValueDataBase & other) const { auto const & to = assert_cast(other); - return has() && to.getStringRef() == getStringRef(); + return has() && to.has() && to.getStringRef() == getStringRef(); } void SingleValueDataString::set(const IColumn & column, size_t row_num, Arena * arena) From cd06945a03df0f8dbec6ff82332236caf86fbff3 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 25 Jul 2024 13:05:25 +0000 Subject: [PATCH 0882/2361] Fix crash with Variant + AggregateFunction type --- src/Columns/ColumnAggregateFunction.cpp | 6 +- ...ant_with_aggregate_function_type.reference | 6 ++ ...0_variant_with_aggregate_function_type.sql | 60 +++++++++++++++++++ 3 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03210_variant_with_aggregate_function_type.reference create mode 100644 tests/queries/0_stateless/03210_variant_with_aggregate_function_type.sql diff --git a/src/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp index e26fe790a8e..9934970c868 100644 --- a/src/Columns/ColumnAggregateFunction.cpp +++ b/src/Columns/ColumnAggregateFunction.cpp @@ -330,7 +330,11 @@ ColumnPtr ColumnAggregateFunction::filter(const Filter & filter, ssize_t result_ void ColumnAggregateFunction::expand(const Filter & mask, bool inverted) { - expandDataByMask(data, mask, inverted); + ensureOwnership(); + Arena & arena = createOrGetArena(); + char * default_ptr = arena.alignedAlloc(func->sizeOfData(), func->alignOfData()); + func->create(default_ptr); + expandDataByMask(data, mask, inverted, default_ptr); } ColumnPtr ColumnAggregateFunction::permute(const Permutation & perm, size_t limit) const diff --git a/tests/queries/0_stateless/03210_variant_with_aggregate_function_type.reference b/tests/queries/0_stateless/03210_variant_with_aggregate_function_type.reference new file mode 100644 index 00000000000..105e8e7d8bd --- /dev/null +++ b/tests/queries/0_stateless/03210_variant_with_aggregate_function_type.reference @@ -0,0 +1,6 @@ + 500 +fail 500 + 499 +fail 500 + 500 499 +fail 500 500 diff --git a/tests/queries/0_stateless/03210_variant_with_aggregate_function_type.sql b/tests/queries/0_stateless/03210_variant_with_aggregate_function_type.sql new file mode 100644 index 00000000000..cb9cdb0b456 --- /dev/null +++ b/tests/queries/0_stateless/03210_variant_with_aggregate_function_type.sql @@ -0,0 +1,60 @@ +SET allow_experimental_variant_type = 1; + +DROP TABLE IF EXISTS source; +CREATE TABLE source +( + Name String, + Value Int64 + +) ENGINE = MergeTree ORDER BY (); + +INSERT INTO source SELECT ['fail', 'success'][number % 2] as Name, number AS Value FROM numbers(1000); + +DROP TABLE IF EXISTS test_agg_variant; +CREATE TABLE test_agg_variant +( + Name String, + Value Variant(AggregateFunction(uniqExact, Int64), AggregateFunction(avg, Int64)) +) +ENGINE = MergeTree +ORDER BY (Name); + +INSERT INTO test_agg_variant +SELECT + Name, + t AS Value +FROM +( + SELECT + Name, + arrayJoin([ + uniqExactState(Value)::Variant(AggregateFunction(uniqExact, Int64), AggregateFunction(avg, Int64)), + avgState(Value)::Variant(AggregateFunction(uniqExact, Int64), AggregateFunction(avg, Int64)) + ]) AS t + FROM source + GROUP BY Name +); + +SELECT + Name, + uniqExactMerge(Value.`AggregateFunction(uniqExact, Int64)`) AS Value +FROM test_agg_variant +GROUP BY Name; + +SELECT + Name, + avgMerge(Value.`AggregateFunction(avg, Int64)`) AS Value +FROM test_agg_variant +GROUP BY Name; + +SELECT + Name, + uniqExactMerge(Value.`AggregateFunction(uniqExact, Int64)`) AS ValueUniq, + avgMerge(Value.`AggregateFunction(avg, Int64)`) AS ValueAvg +FROM test_agg_variant +GROUP BY Name; + + +DROP TABLE test_agg_variant; +DROP TABLE source; + From b23ce171c3620568829201d80789f314fc27499a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 25 Jul 2024 13:11:32 +0000 Subject: [PATCH 0883/2361] My black version said this was ok --- tests/ci/cherry_pick.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index c2f567e5f15..0b2aa9a2d35 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -420,9 +420,11 @@ class Backport: fetch_release_prs = self.gh.get_release_pulls(self._fetch_from) fetch_release_branches = [pr.head.ref for pr in fetch_release_prs] self.labels_to_backport = [ - f"v{branch}-must-backport" - if self._repo_name == "ClickHouse/ClickHouse" - else f"v{branch.replace('release/','')}-must-backport" + ( + f"v{branch}-must-backport" + if self._repo_name == "ClickHouse/ClickHouse" + else f"v{branch.replace('release/','')}-must-backport" + ) for branch in fetch_release_branches ] From c5164fede8665b61c10ec0d7b6873a7cf04aab12 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Jul 2024 13:17:21 +0000 Subject: [PATCH 0884/2361] Fix some test. --- src/Interpreters/ExpressionAnalyzer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 6b5b129085d..5972d89bddd 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1944,7 +1944,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( Block before_prewhere_sample = source_header; if (sanitizeBlock(before_prewhere_sample)) { - prewhere_dag_and_flags->dag.updateHeader(before_prewhere_sample); + before_prewhere_sample = prewhere_dag_and_flags->dag.updateHeader(before_prewhere_sample); auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName()); /// If the filter column is a constant, record it. if (column_elem.column) @@ -1976,7 +1976,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( before_where_sample = source_header; if (sanitizeBlock(before_where_sample)) { - before_where->dag.updateHeader(before_where_sample); + before_where_sample = before_where->dag.updateHeader(before_where_sample); auto & column_elem = before_where_sample.getByName(query.where()->getColumnName()); From ae75c99e3fad02a3716c9d520c3a680f4d9d28e4 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Thu, 25 Jul 2024 15:17:59 +0200 Subject: [PATCH 0885/2361] Add a test --- .../0_stateless/03210_fix-single-value-data-assertion.reference | 0 .../0_stateless/03210_fix-single-value-data-assertion.sql | 1 + 2 files changed, 1 insertion(+) create mode 100644 tests/queries/0_stateless/03210_fix-single-value-data-assertion.reference create mode 100644 tests/queries/0_stateless/03210_fix-single-value-data-assertion.sql diff --git a/tests/queries/0_stateless/03210_fix-single-value-data-assertion.reference b/tests/queries/0_stateless/03210_fix-single-value-data-assertion.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03210_fix-single-value-data-assertion.sql b/tests/queries/0_stateless/03210_fix-single-value-data-assertion.sql new file mode 100644 index 00000000000..66e62377d6b --- /dev/null +++ b/tests/queries/0_stateless/03210_fix-single-value-data-assertion.sql @@ -0,0 +1 @@ +SELECT intDiv(number, 2) AS k, count(toFixedString(toFixedString('hello', 5), 5)) IGNORE NULLS, sumArgMax(number, toString(number % 20)), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 65537) WHERE toLowCardinality(toLowCardinality(toNullable(21))) GROUP BY k WITH TOTALS ORDER BY k ASC NULLS FIRST LIMIT 255 SETTINGS group_by_overflow_mode = 'any', totals_mode = 'before_having', max_rows_to_group_by = 100000 FORMAT Null From ad44fb1ba4759434ecb4353a7878aea6162f8fef Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 25 Jul 2024 13:22:19 +0000 Subject: [PATCH 0886/2361] Increase lock_acquire_timeout_for_background_operations setting in dynamic merges tests --- .../03037_dynamic_merges_1_horizontal_compact_merge_tree.sql | 2 +- .../03037_dynamic_merges_1_horizontal_compact_wide_tree.sql | 2 +- .../03037_dynamic_merges_1_vertical_compact_merge_tree.sql | 5 +++-- .../03037_dynamic_merges_1_vertical_wide_merge_tree.sql | 2 +- .../03037_dynamic_merges_2_horizontal_compact_merge_tree.sql | 2 +- .../03037_dynamic_merges_2_horizontal_wide_merge_tree.sql | 2 +- .../03037_dynamic_merges_2_vertical_compact_merge_tree.sql | 2 +- .../03037_dynamic_merges_2_vertical_wide_merge_tree.sql | 2 +- .../03038_nested_dynamic_merges_compact_horizontal.sql | 2 +- .../03038_nested_dynamic_merges_compact_vertical.sql | 2 +- .../03038_nested_dynamic_merges_wide_horizontal.sql | 2 +- .../03038_nested_dynamic_merges_wide_vertical.sql | 2 +- 12 files changed, 14 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql index b66fe5e2187..07371ee099b 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql index 8a376b6d7d7..2b55a31e937 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql index 127b56e727c..ea7295a9eab 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -13,7 +13,8 @@ insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); -system start merges test; optimize table test final;; +system start merges test; +optimize table test final; select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); system stop merges test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql index e5c273cb592..e888a14b323 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql index 6d7a0dd8c18..e633b277ebd 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql @@ -3,7 +3,7 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; -create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(1000000); insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql index 011d54d2360..90dbc2d84f5 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql @@ -3,7 +3,7 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; -create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(1000000); insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql index 1a74f9e5417..ffd2618ee51 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql @@ -3,7 +3,7 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; -create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(1000000); insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql index cbc834e9660..36dff88751b 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql @@ -3,7 +3,7 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; -create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(1000000); insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql index ff1dc5e7ded..1d5c63dcdf1 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql @@ -6,7 +6,7 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql index f9b0101cb87..2bffe35c577 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql @@ -6,7 +6,7 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql index 5f373d41c7d..fb686091ebb 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql @@ -6,7 +6,7 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql index 36bbc76b8cb..ed195452d56 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql @@ -6,7 +6,7 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); From e0b125368855e57733132046de5cd383ccc9b7d2 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Thu, 25 Jul 2024 15:25:33 +0200 Subject: [PATCH 0887/2361] Fix harder --- src/AggregateFunctions/SingleValueData.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/AggregateFunctions/SingleValueData.cpp b/src/AggregateFunctions/SingleValueData.cpp index 996e64b22e0..566b40253a3 100644 --- a/src/AggregateFunctions/SingleValueData.cpp +++ b/src/AggregateFunctions/SingleValueData.cpp @@ -195,7 +195,7 @@ bool SingleValueDataFixed::isEqualTo(const IColumn & column, size_t index) co template bool SingleValueDataFixed::isEqualTo(const SingleValueDataFixed & to) const { - return has() && to.value == value; + return has() && to.has() && to.value == value; } template @@ -905,7 +905,7 @@ template bool SingleValueDataNumeric::isEqualTo(const DB::SingleValueDataBase & to) const { auto const & other = assert_cast(to); - return memory.get().isEqualTo(other.memory.get()); + return to.has() && memory.get().isEqualTo(other.memory.get()); } template @@ -1291,7 +1291,7 @@ bool SingleValueDataGeneric::isEqualTo(const IColumn & column, size_t row_num) c bool SingleValueDataGeneric::isEqualTo(const DB::SingleValueDataBase & other) const { auto const & to = assert_cast(other); - return has() && to.value == value; + return has() && to.has() && to.value == value; } void SingleValueDataGeneric::set(const IColumn & column, size_t row_num, Arena *) From cad120987c0847b14c407b6548c1862d8ed34f6d Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 25 Jul 2024 13:33:45 +0000 Subject: [PATCH 0888/2361] Fix flaky tests --- src/Processors/Formats/IRowInputFormat.cpp | 4 ---- src/Storages/VirtualColumnUtils.cpp | 1 - .../0_stateless/01825_new_type_json_ghdata_insert_select.sh | 2 +- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/src/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp index 31d7fdbf67b..0b6c81923db 100644 --- a/src/Processors/Formats/IRowInputFormat.cpp +++ b/src/Processors/Formats/IRowInputFormat.cpp @@ -124,10 +124,6 @@ Chunk IRowInputFormat::read() return getChunkForCount(num_rows); } - /// Reserve params.max_block_size rows in advance. - for (auto & column : columns) - column->reserve(params.max_block_size); - RowReadExtension info; bool continue_reading = true; for (size_t rows = 0; (rows < params.max_block_size || num_rows == 0) && continue_reading; ++rows) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index dd1bcb7446c..151079154b1 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -290,7 +290,6 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( else if (!allow_non_deterministic_functions) return nullptr; - if (node_copy.children.empty()) return nullptr; diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh index ad428603da2..ef87034ff89 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, long CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 713b337681dbf562ef1a354a30f2aec2a6fdbd00 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 25 Jul 2024 13:38:07 +0000 Subject: [PATCH 0889/2361] Add comment about splitSubcolumnName in DataTypeDynamic --- src/DataTypes/DataTypeDynamic.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/DataTypes/DataTypeDynamic.cpp b/src/DataTypes/DataTypeDynamic.cpp index 88871f28f06..da2299f53e6 100644 --- a/src/DataTypes/DataTypeDynamic.cpp +++ b/src/DataTypes/DataTypeDynamic.cpp @@ -90,6 +90,10 @@ void registerDataTypeDynamic(DataTypeFactory & factory) namespace { +/// Split Dynamic subcolumn name into 2 parts: type name and subcolumn of this type. +/// We cannot simply split by '.' because type name can also contain dots. For example: Tuple(`a.b` UInt32). +/// But in all such cases this '.' will be inside back quotes. To split subcolumn name correctly +/// we search for the first '.' that is not inside back quotes. std::pair splitSubcolumnName(std::string_view subcolumn_name) { bool inside_quotes = false; From f20ca25a79333a82c8b191c8718fa1a88fdc014a Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 25 Jul 2024 13:40:21 +0000 Subject: [PATCH 0890/2361] Fix creating serialization for Rapid and Dummy json parsers --- src/DataTypes/DataTypeObject.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index eeda6476fd0..e2a2caa9f21 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -112,9 +112,17 @@ SerializationPtr DataTypeObject::doGetDefaultSerialization() const path_regexps_to_skip, buildJSONExtractTree(getPtr(), "JSON serialization")); #elif USE_RAPIDJSON - return std::make_shared>(std::move(typed_path_serializations), paths_to_skip, buildJSONExtractTree(getPtr(), "JSON serialization")); + return std::make_shared>( + std::move(typed_path_serializations), + paths_to_skip, + path_regexps_to_skip, + buildJSONExtractTree(getPtr(), "JSON serialization")); #else - return std::make_shared>(std::move(typed_path_serializations), paths_to_skip, buildJSONExtractTree(getPtr(), "JSON serialization")); + return std::make_shared>( + std::move(typed_path_serializations), + paths_to_skip, + path_regexps_to_skip, + buildJSONExtractTree(getPtr(), "JSON serialization")); #endif } } From 64eeece5331c36c4f2b1c00c2bc229e40cd74d63 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 25 Jul 2024 15:49:59 +0200 Subject: [PATCH 0891/2361] fix --- tests/queries/0_stateless/02446_parent_zero_copy_locks.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02446_parent_zero_copy_locks.sql b/tests/queries/0_stateless/02446_parent_zero_copy_locks.sql index 1cae8ae0237..a44322e02cf 100644 --- a/tests/queries/0_stateless/02446_parent_zero_copy_locks.sql +++ b/tests/queries/0_stateless/02446_parent_zero_copy_locks.sql @@ -36,8 +36,8 @@ select 1, * from rmt1 order by n; system sync replica rmt1; select 2, * from rmt2 order by n; --- wait for outdated parts to be removed -select throwIf(count() = 0) from ( +-- wait for outdated parts to be removed (do not ignore _state column, so it will count Deleting parts as well) +select throwIf(count() = 0), groupArray(_state) from ( select *, _state from system.parts where database=currentDatabase() and table like 'rmt%' and active=0 ) format Null; -- { retry 30 until serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } From 701d99a9ad56243933ce5d85796f47c26f574b8e Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 25 Jul 2024 13:56:17 +0000 Subject: [PATCH 0892/2361] Small clean up --- src/DataTypes/Serializations/ISerialization.h | 2 +- src/Formats/JSONExtractTree.cpp | 4 ++-- src/Formats/SchemaInferenceUtils.cpp | 2 +- src/IO/WriteHelpers.h | 6 ------ src/Interpreters/InterpreterShowColumnsQuery.cpp | 2 +- 5 files changed, 5 insertions(+), 11 deletions(-) diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 5d6ac2707ed..54234189528 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -448,7 +448,7 @@ public: static bool hasSubcolumnForPath(const SubstreamPath & path, size_t prefix_len); static SubstreamData createFromPath(const SubstreamPath & path, size_t prefix_len); - /// Returns true if subcolumn doesn't actually stores any data in column and doen'st require a separate stream + /// Returns true if subcolumn doesn't actually stores any data in column and doesn't require a separate stream /// for writing/reading data. For example, it's a null-map subcolumn of Variant type (it's always constructed from discriminators);. static bool isFictitiousSubcolumn(const SubstreamPath & path, size_t prefix_len); diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index f5b5a7bb984..8203ccb5862 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1404,7 +1404,7 @@ public: String & error) const override { auto & column_dynamic = assert_cast(column); - /// First, check if element is NULL. + /// Check if element is NULL. if (element.isNull()) { column_dynamic.insertDefault(); @@ -1414,7 +1414,7 @@ public: auto & variant_column = column_dynamic.getVariantColumn(); const auto & variant_info = column_dynamic.getVariantInfo(); - /// First, try to insert element into current variants but with no types conversion. + /// Try to insert element into current variants but with no types conversion. /// We want to avoid inferring the type on each row, so if we can insert this element into /// any existing variant with no types conversion (like Integer -> String, Double -> Integer, etc) /// we will do it and won't try to infer the type. diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index 28d60bf078b..5d6267cd1bb 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -295,7 +295,7 @@ namespace WhichDataType which(type); if (which.isInt64() || which.isUInt64()) { - const auto & new_type = std::make_shared(); + auto new_type = std::make_shared(); if (json_info && json_info->numbers_parsed_from_json_strings.erase(type.get())) json_info->numbers_parsed_from_json_strings.insert(new_type.get()); type = new_type; diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index 306635272f5..6b0de441e94 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -90,12 +90,6 @@ inline void writeUUIDBinary(const UUID & x, WriteBuffer & buf) writePODBinary(uuid.items[1], buf); } -template -void setValue(char * data, const T & value) -{ - memcpy(data, reinterpret_cast(&value), sizeof(T)); -} - template inline void writeIntBinary(const T & x, WriteBuffer & buf) { diff --git a/src/Interpreters/InterpreterShowColumnsQuery.cpp b/src/Interpreters/InterpreterShowColumnsQuery.cpp index b39ce25ab57..472cdedf3ae 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.cpp +++ b/src/Interpreters/InterpreterShowColumnsQuery.cpp @@ -68,7 +68,7 @@ WITH map( 'Map', 'JSON', 'Tuple', 'JSON', 'Object', 'JSON', - 'JSON', 'JSON', + 'JSON', 'JSON', 'String', '{}', 'FixedString', '{}') AS native_to_mysql_mapping, )", From f654db215ffb961010763c0daf6484fa75e4fd6b Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Thu, 25 Jul 2024 15:56:29 +0200 Subject: [PATCH 0893/2361] Fix naming --- ....reference => 03210_fix_single_value_data_assertion.reference} | 0 ...ta-assertion.sql => 03210_fix_single_value_data_assertion.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{03210_fix-single-value-data-assertion.reference => 03210_fix_single_value_data_assertion.reference} (100%) rename tests/queries/0_stateless/{03210_fix-single-value-data-assertion.sql => 03210_fix_single_value_data_assertion.sql} (100%) diff --git a/tests/queries/0_stateless/03210_fix-single-value-data-assertion.reference b/tests/queries/0_stateless/03210_fix_single_value_data_assertion.reference similarity index 100% rename from tests/queries/0_stateless/03210_fix-single-value-data-assertion.reference rename to tests/queries/0_stateless/03210_fix_single_value_data_assertion.reference diff --git a/tests/queries/0_stateless/03210_fix-single-value-data-assertion.sql b/tests/queries/0_stateless/03210_fix_single_value_data_assertion.sql similarity index 100% rename from tests/queries/0_stateless/03210_fix-single-value-data-assertion.sql rename to tests/queries/0_stateless/03210_fix_single_value_data_assertion.sql From 4b505badd3566cf2b47681c667ee134699cf2764 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 25 Jul 2024 14:11:41 +0000 Subject: [PATCH 0894/2361] Attempt to fix flakiness of some window view tests --- .../queries/0_stateless/01052_window_view_proc_tumble_to_now.sh | 1 + tests/queries/0_stateless/01053_window_view_proc_hop_to_now.sh | 1 + tests/queries/0_stateless/01054_window_view_proc_tumble_to.sh | 1 + tests/queries/0_stateless/01055_window_view_proc_hop_to.sh | 1 + .../0_stateless/01075_window_view_proc_tumble_to_now_populate.sh | 1 + 5 files changed, 5 insertions(+) diff --git a/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh b/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh index 4325ebeed24..5c70806ea7b 100755 --- a/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh +++ b/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-settings, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01053_window_view_proc_hop_to_now.sh b/tests/queries/0_stateless/01053_window_view_proc_hop_to_now.sh index 8e28995980f..32c9c52ab09 100755 --- a/tests/queries/0_stateless/01053_window_view_proc_hop_to_now.sh +++ b/tests/queries/0_stateless/01053_window_view_proc_hop_to_now.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-settings, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01054_window_view_proc_tumble_to.sh b/tests/queries/0_stateless/01054_window_view_proc_tumble_to.sh index ee11b265ecd..ba566bb4ae6 100755 --- a/tests/queries/0_stateless/01054_window_view_proc_tumble_to.sh +++ b/tests/queries/0_stateless/01054_window_view_proc_tumble_to.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-settings, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01055_window_view_proc_hop_to.sh b/tests/queries/0_stateless/01055_window_view_proc_hop_to.sh index ea8ad372617..0db4173b3dc 100755 --- a/tests/queries/0_stateless/01055_window_view_proc_hop_to.sh +++ b/tests/queries/0_stateless/01055_window_view_proc_hop_to.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-settings, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01075_window_view_proc_tumble_to_now_populate.sh b/tests/queries/0_stateless/01075_window_view_proc_tumble_to_now_populate.sh index f7842af4dad..67c249a9d0e 100755 --- a/tests/queries/0_stateless/01075_window_view_proc_tumble_to_now_populate.sh +++ b/tests/queries/0_stateless/01075_window_view_proc_tumble_to_now_populate.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-random-settings, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From baee31c12a68cd6e8f906a9224a39eea446a8f2b Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 25 Jul 2024 16:15:37 +0200 Subject: [PATCH 0895/2361] fix truncate database --- src/Interpreters/InterpreterDropQuery.cpp | 3 +-- tests/queries/0_stateless/02842_truncate_database.reference | 2 ++ tests/queries/0_stateless/02842_truncate_database.sql | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index b68b3ddcd48..bad3e5277db 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -399,10 +399,9 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, if (query.if_empty) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DROP IF EMPTY is not implemented for databases"); - if (database->hasReplicationThread()) + if (!truncate && database->hasReplicationThread()) database->stopReplication(); - if (database->shouldBeEmptyOnDetach()) { /// Cancel restarting replicas in that database, wait for remaining RESTART queries to finish. diff --git a/tests/queries/0_stateless/02842_truncate_database.reference b/tests/queries/0_stateless/02842_truncate_database.reference index 71f52bcd1da..bc8c0210d27 100644 --- a/tests/queries/0_stateless/02842_truncate_database.reference +++ b/tests/queries/0_stateless/02842_truncate_database.reference @@ -20,3 +20,5 @@ source_table_stripe_log source_table_tiny_log === DICTIONARIES IN test_truncate_database === dest_dictionary +new tables +new_table diff --git a/tests/queries/0_stateless/02842_truncate_database.sql b/tests/queries/0_stateless/02842_truncate_database.sql index 09ac844cfe2..be92108ccb8 100644 --- a/tests/queries/0_stateless/02842_truncate_database.sql +++ b/tests/queries/0_stateless/02842_truncate_database.sql @@ -73,4 +73,8 @@ SELECT * FROM dest_dictionary; -- {serverError UNKNOWN_TABLE} SHOW TABLES FROM test_truncate_database; SHOW DICTIONARIES FROM test_truncate_database; +CREATE TABLE new_table (x UInt16) ENGINE = ReplicatedMergeTree ORDER BY x; +select 'new tables'; +SHOW TABLES FROM test_truncate_database; + DROP DATABASE test_truncate_database; From 0642ed19b7c67e443be110f2a0f2d1f032ddd8d5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Jul 2024 14:17:45 +0000 Subject: [PATCH 0896/2361] Fixing more tests. --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index bdb90abd326..e5aeb9686be 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1525,7 +1525,7 @@ void ReadFromMergeTree::applyFilters(ActionDAGNodes added_filter_nodes) /// TODO: Get rid of filter_actions_dag in query_info after we move analysis of /// parallel replicas and unused shards into optimization, similar to projection analysis. if (filter_actions_dag) - query_info.filter_actions_dag = std::make_shared(std::move(*filter_actions_dag)); + query_info.filter_actions_dag = std::make_shared(filter_actions_dag->clone()); buildIndexes( indexes, From bd721950b0401f94be652c11015bd1985c283f3a Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Thu, 25 Jul 2024 16:24:17 +0200 Subject: [PATCH 0897/2361] squash! added somme tests in relation with https://github.com/ClickHouse/ClickHouse/pull/54881 with new behaviour when enable_named_columns_in_function_tuple=1 (default value) --- .../0_stateless/00309_formats.reference | Bin 18736 -> 18666 bytes tests/queries/0_stateless/00309_formats.sql | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00309_formats.reference b/tests/queries/0_stateless/00309_formats.reference index a63720618ba54c6cc456f3356512449322dc2e80..f3ea45520bb50fb936caf6724e9fedf3cdd00b75 100644 GIT binary patch delta 54 zcmdlmiSgA$#tkJN8myt7c_kJsE{P?nVj-mkIjI_X3Sc1}g%kylsHVzCEg+hFz{3v! D3NRA? delta 99 zcmaDgk#WN$#tkJNdTgPdc_n5hEG~&9sp27}1v#l2c?u9Q9fcGHkgz5f0|Z>xnrz4= o!o|P{;hxo=JjcUt@=alWVGScwGaYp!0|NtH14~^)V|7g~0E0RhwEzGB diff --git a/tests/queries/0_stateless/00309_formats.sql b/tests/queries/0_stateless/00309_formats.sql index 691fc6e7ab6..0366cdeea5c 100644 --- a/tests/queries/0_stateless/00309_formats.sql +++ b/tests/queries/0_stateless/00309_formats.sql @@ -12,5 +12,5 @@ SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, a SET enable_named_columns_in_function_tuple = 1; -SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT RowBinaryWithNamesAndTypes; -SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT TabSeparatedWithNamesAndTypes; +SELECT 36 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT RowBinaryWithNamesAndTypes; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT TabSeparatedWithNamesAndTypes; From 59f9c125044b6e56a3ded8034478eff79e930018 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Thu, 25 Jul 2024 14:37:47 +0000 Subject: [PATCH 0898/2361] Increase sleep time make sure there is a new failure The previous sleep was already adding +5s to make sure the TTL was properly applied, so we'd rather use the same value here instead of just 1s. --- tests/integration/test_storage_s3_queue/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 4348857acd3..2e339a9b5c9 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -871,7 +871,7 @@ def test_max_set_age(started_cluster): node.query(f"SELECT uniq(_path) from {dst_table_name}") ) - time.sleep(max_age + 5) + time.sleep(max_age + max_age / 2) expected_rows *= 2 wait_for_condition(lambda: get_count() == expected_rows) @@ -922,7 +922,7 @@ def test_max_set_age(started_cluster): ) ) - time.sleep(max_age + 1) + time.sleep(max_age + max_age / 2) assert failed_count + 2 <= get_object_storage_failures() From 1973458ae07a5cd519b7069451d2be5822a89bf7 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Jul 2024 15:00:37 +0000 Subject: [PATCH 0899/2361] Update PlannerWindowFunctions --- src/Planner/Planner.cpp | 13 ++++++------- src/Planner/PlannerWindowFunctions.cpp | 15 ++------------- src/Planner/PlannerWindowFunctions.h | 2 +- 3 files changed, 9 insertions(+), 21 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index fb721069e6e..968642dc9de 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -933,19 +933,19 @@ void addPreliminarySortOrDistinctOrLimitStepsIfNeeded(QueryPlan & query_plan, void addWindowSteps(QueryPlan & query_plan, const PlannerContextPtr & planner_context, - const WindowAnalysisResult & window_analysis_result) + WindowAnalysisResult & window_analysis_result) { const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); - const auto & window_descriptions = window_analysis_result.window_descriptions; - auto perm = sortWindowDescriptions(window_descriptions); + auto & window_descriptions = window_analysis_result.window_descriptions; + sortWindowDescriptions(window_descriptions); size_t window_descriptions_size = window_descriptions.size(); for (size_t i = 0; i < window_descriptions_size; ++i) { - const auto & window_description = window_descriptions[perm[i]]; + const auto & window_description = window_descriptions[i]; /** We don't need to sort again if the input from previous window already * has suitable sorting. Also don't create sort steps when there are no @@ -958,9 +958,8 @@ void addWindowSteps(QueryPlan & query_plan, bool need_sort = !window_description.full_sort_description.empty(); if (need_sort && i != 0) { - auto prev = perm[i - 1]; - need_sort = !sortDescriptionIsPrefix(window_description.full_sort_description, window_descriptions[prev].full_sort_description) - || (settings.max_threads != 1 && window_description.partition_by.size() != window_descriptions[prev].partition_by.size()); + need_sort = !sortDescriptionIsPrefix(window_description.full_sort_description, window_descriptions[i - 1].full_sort_description) + || (settings.max_threads != 1 && window_description.partition_by.size() != window_descriptions[i - 1].partition_by.size()); } if (need_sort) { diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp index ba0e11df76b..225852de5a7 100644 --- a/src/Planner/PlannerWindowFunctions.cpp +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -122,7 +122,7 @@ std::vector extractWindowDescriptions(const QueryTreeNodes & return result; } -std::vector sortWindowDescriptions(const std::vector & window_descriptions) +void sortWindowDescriptions(std::vector & window_descriptions) { auto window_description_comparator = [](const WindowDescription & lhs, const WindowDescription & rhs) { @@ -153,18 +153,7 @@ std::vector sortWindowDescriptions(const std::vector return left.size() > right.size(); }; - auto comparator = [&](size_t lhs, size_t rhs) - { - return window_description_comparator(window_descriptions[lhs], window_descriptions[rhs]); - }; - - std::vector perm(window_descriptions.size()); - for (size_t i = 0; i < perm.size(); ++i) - perm[i] = i; - - ::sort(perm.begin(), perm.end(), comparator); - - return perm; + ::sort(window_descriptions.begin(), window_descriptions.end(), window_description_comparator); } } diff --git a/src/Planner/PlannerWindowFunctions.h b/src/Planner/PlannerWindowFunctions.h index 3039ecefc4b..1552ef5a71f 100644 --- a/src/Planner/PlannerWindowFunctions.h +++ b/src/Planner/PlannerWindowFunctions.h @@ -15,6 +15,6 @@ std::vector extractWindowDescriptions(const QueryTreeNodes & /** Try to sort window descriptions in such an order that the window with the longest * sort description goes first, and all window that use its prefixes follow. */ -std::vector sortWindowDescriptions(const std::vector & window_descriptions); +void sortWindowDescriptions(std::vector & window_descriptions); } From 93a2dbf85c446cd5efa802dfe8fc99e2a82f394f Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Thu, 25 Jul 2024 17:02:11 +0200 Subject: [PATCH 0900/2361] Move syntax part on top --- docs/en/sql-reference/statements/system.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index f4780fd41c1..35f2f15dd80 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -28,11 +28,14 @@ SYSTEM RELOAD DICTIONARIES [ON CLUSTER cluster_name] Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT_LOADED / FAILED). Always returns `Ok.` regardless of the result of updating the dictionary. -The status of the dictionary can be checked by querying the `system.dictionaries` table. ``` sql SYSTEM RELOAD DICTIONARY [ON CLUSTER cluster_name] dictionary_name +``` +The status of the dictionary can be checked by querying the `system.dictionaries` table. + +``` sql SELECT name, status FROM system.dictionaries; ``` From 5ea867231bafc01b4512989f351106b7afcc14af Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 25 Jul 2024 16:55:23 +0200 Subject: [PATCH 0901/2361] Read configuration for clickhouse-local from ~/.clickhouse-local Signed-off-by: Azat Khuzhin --- programs/local/LocalServer.cpp | 16 +++++++-- src/Common/Config/CMakeLists.txt | 1 + src/Common/Config/getLocalConfigPath.cpp | 46 ++++++++++++++++++++++++ src/Common/Config/getLocalConfigPath.h | 12 +++++++ 4 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 src/Common/Config/getLocalConfigPath.cpp create mode 100644 src/Common/Config/getLocalConfigPath.h diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 48e0cca7b73..ade4e0f49df 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -1,6 +1,7 @@ #include "LocalServer.h" #include +#include #include #include #include @@ -127,10 +128,21 @@ void LocalServer::initialize(Poco::Util::Application & self) { Poco::Util::Application::initialize(self); + const char * home_path_cstr = getenv("HOME"); // NOLINT(concurrency-mt-unsafe) + if (home_path_cstr) + home_path = home_path_cstr; + /// Load config files if exists - if (getClientConfiguration().has("config-file") || fs::exists("config.xml")) + std::string config_path; + if (getClientConfiguration().has("config-file")) + config_path = getClientConfiguration().getString("config-file"); + else if (config_path.empty() && fs::exists("config.xml")) + config_path = "config.xml"; + else if (config_path.empty()) + config_path = getLocalConfigPath(home_path).value_or(""); + + if (fs::exists(config_path)) { - const auto config_path = getClientConfiguration().getString("config-file", "config.xml"); ConfigProcessor config_processor(config_path, false, true); ConfigProcessor::setConfigPath(fs::path(config_path).parent_path()); auto loaded_config = config_processor.loadConfig(); diff --git a/src/Common/Config/CMakeLists.txt b/src/Common/Config/CMakeLists.txt index 09095ef5acc..2bd32b98bda 100644 --- a/src/Common/Config/CMakeLists.txt +++ b/src/Common/Config/CMakeLists.txt @@ -2,6 +2,7 @@ set (SRCS AbstractConfigurationComparison.cpp ConfigProcessor.cpp getClientConfigPath.cpp + getLocalConfigPath.cpp ConfigReloader.cpp YAMLParser.cpp ConfigHelper.cpp diff --git a/src/Common/Config/getLocalConfigPath.cpp b/src/Common/Config/getLocalConfigPath.cpp new file mode 100644 index 00000000000..afaa7f79026 --- /dev/null +++ b/src/Common/Config/getLocalConfigPath.cpp @@ -0,0 +1,46 @@ +#include + +#include +#include + + +namespace fs = std::filesystem; + +namespace DB +{ + +std::optional getLocalConfigPath(const std::string & home_path) +{ + std::string config_path; + bool found = false; + + std::vector names; + names.emplace_back("./clickhouse-local"); + if (!home_path.empty()) + names.emplace_back(home_path + "/.clickhouse-local/config"); + names.emplace_back("/etc/clickhouse-local/config"); + + for (const auto & name : names) + { + for (const auto & extension : {".xml", ".yaml", ".yml"}) + { + config_path = name + extension; + + std::error_code ec; + if (fs::exists(config_path, ec)) + { + found = true; + break; + } + } + if (found) + break; + } + + if (found) + return config_path; + + return std::nullopt; +} + +} diff --git a/src/Common/Config/getLocalConfigPath.h b/src/Common/Config/getLocalConfigPath.h new file mode 100644 index 00000000000..14625571d6c --- /dev/null +++ b/src/Common/Config/getLocalConfigPath.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/// Return path to existing configuration file. +std::optional getLocalConfigPath(const std::string & home_path); + +} From fb271436a1efe969f4de09b14aec942baa145cb9 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Jul 2024 15:37:13 +0000 Subject: [PATCH 0902/2361] Remove ActionsDAGPtr completely. --- src/Interpreters/ActionsDAG.h | 3 --- src/Interpreters/evaluateConstantExpression.cpp | 4 ++-- .../optimizePrimaryKeyConditionAndLimit.cpp | 6 +++--- src/Processors/QueryPlan/SourceStepWithFilter.h | 6 +++--- src/Processors/QueryPlan/TotalsHavingStep.h | 3 --- src/Processors/QueryPlan/WindowStep.h | 3 --- src/Storages/MergeTree/KeyCondition.cpp | 14 +++++++------- src/Storages/MergeTree/KeyCondition.h | 2 +- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 3 +++ src/Storages/SelectQueryInfo.h | 3 --- src/Storages/StorageMerge.cpp | 2 +- 11 files changed, 20 insertions(+), 29 deletions(-) diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 76cc9327530..43c1b41a240 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -11,9 +11,6 @@ namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; - class IExecutableFunction; using ExecutableFunctionPtr = std::shared_ptr; diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 42d6f3d3037..4bfc80af1fe 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -679,9 +679,9 @@ std::optional evaluateExpressionOverConstantCondition( size_t max_elements) { auto inverted_dag = KeyCondition::cloneASTWithInversionPushDown({predicate}, context); - auto matches = matchTrees(expr, *inverted_dag, false); + auto matches = matchTrees(expr, inverted_dag, false); - auto predicates = analyze(inverted_dag->getOutputs().at(0), matches, context, max_elements); + auto predicates = analyze(inverted_dag.getOutputs().at(0), matches, context, max_elements); if (!predicates) return {}; diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp index 63b4e019066..f53212407d2 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp @@ -18,16 +18,16 @@ void optimizePrimaryKeyConditionAndLimit(const Stack & stack) const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); if (storage_prewhere_info) { - source_step_with_filter->addFilter(std::make_unique(storage_prewhere_info->prewhere_actions->clone()), storage_prewhere_info->prewhere_column_name); + source_step_with_filter->addFilter(storage_prewhere_info->prewhere_actions->clone(), storage_prewhere_info->prewhere_column_name); if (storage_prewhere_info->row_level_filter) - source_step_with_filter->addFilter(std::make_unique(storage_prewhere_info->row_level_filter->clone()), storage_prewhere_info->row_level_column_name); + source_step_with_filter->addFilter(storage_prewhere_info->row_level_filter->clone(), storage_prewhere_info->row_level_column_name); } for (auto iter = stack.rbegin() + 1; iter != stack.rend(); ++iter) { if (auto * filter_step = typeid_cast(iter->node->step.get())) { - source_step_with_filter->addFilter(std::make_unique(filter_step->getExpression().clone()), filter_step->getFilterColumnName()); + source_step_with_filter->addFilter(filter_step->getExpression().clone(), filter_step->getFilterColumnName()); } else if (auto * limit_step = typeid_cast(iter->node->step.get())) { diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.h b/src/Processors/QueryPlan/SourceStepWithFilter.h index f7a030c0628..6cea5fd7245 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.h +++ b/src/Processors/QueryPlan/SourceStepWithFilter.h @@ -45,9 +45,9 @@ public: const Names & requiredSourceColumns() const { return required_source_columns; } - void addFilter(ActionsDAGPtr filter_dag, std::string column_name) + void addFilter(ActionsDAG filter_dag, std::string column_name) { - filter_nodes.nodes.push_back(&filter_dag->findInOutputs(column_name)); + filter_nodes.nodes.push_back(&filter_dag.findInOutputs(column_name)); filter_dags.push_back(std::move(filter_dag)); } @@ -86,7 +86,7 @@ protected: private: /// Will be cleared after applyFilters() is called. ActionDAGNodes filter_nodes; - std::vector filter_dags; + std::vector filter_dags; }; } diff --git a/src/Processors/QueryPlan/TotalsHavingStep.h b/src/Processors/QueryPlan/TotalsHavingStep.h index 927b8d99de3..4b414d41c57 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.h +++ b/src/Processors/QueryPlan/TotalsHavingStep.h @@ -6,9 +6,6 @@ namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; - enum class TotalsMode : uint8_t; /// Execute HAVING and calculate totals. See TotalsHavingTransform. diff --git a/src/Processors/QueryPlan/WindowStep.h b/src/Processors/QueryPlan/WindowStep.h index 47883e5edf6..d79cd7fd45e 100644 --- a/src/Processors/QueryPlan/WindowStep.h +++ b/src/Processors/QueryPlan/WindowStep.h @@ -6,9 +6,6 @@ namespace DB { -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; - class WindowTransform; class WindowStep : public ITransformingStep diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 0eb59a47cae..69bffac9160 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -696,22 +696,22 @@ const std::unordered_map KeyConditi {"hilbertEncode", SpaceFillingCurveType::Hilbert} }; -ActionsDAGPtr KeyCondition::cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context) +ActionsDAG KeyCondition::cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context) { - auto res = std::make_unique(); + ActionsDAG res; std::unordered_map to_inverted; for (auto & node : nodes) - node = &DB::cloneASTWithInversionPushDown(*node, *res, to_inverted, context, false); + node = &DB::cloneASTWithInversionPushDown(*node, res, to_inverted, context, false); if (nodes.size() > 1) { auto function_builder = FunctionFactory::instance().get("and", context); - nodes = {&res->addFunction(function_builder, std::move(nodes), "")}; + nodes = {&res.addFunction(function_builder, std::move(nodes), "")}; } - res->getOutputs().swap(nodes); + res.getOutputs().swap(nodes); return res; } @@ -826,9 +826,9 @@ KeyCondition::KeyCondition( * are pushed down and applied (when possible) to leaf nodes. */ auto inverted_dag = cloneASTWithInversionPushDown({filter_dag->getOutputs().at(0)}, context); - assert(inverted_dag->getOutputs().size() == 1); + assert(inverted_dag.getOutputs().size() == 1); - const auto * inverted_dag_filter_node = inverted_dag->getOutputs()[0]; + const auto * inverted_dag_filter_node = inverted_dag.getOutputs()[0]; RPNBuilder builder(inverted_dag_filter_node, context, [&](const RPNBuilderTreeNode & node, RPNElement & out) { diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index a9e1a589ba5..e9343ec08ea 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -134,7 +134,7 @@ public: DataTypePtr current_type, bool single_point = false); - static ActionsDAGPtr cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context); + static ActionsDAG cloneASTWithInversionPushDown(ActionsDAG::NodeRawConstPtrs nodes, const ContextPtr & context); bool matchesExactContinuousRange() const; diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 116edf5b9cb..1d0569e0df6 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -10,6 +10,9 @@ namespace DB { +class ActionsDAG; +using ActionsDAGPtr = std::unique_ptr; + namespace ErrorCodes { extern const int LOGICAL_ERROR; diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 60f103fdb70..1c4cb7d92d8 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -18,9 +18,6 @@ namespace DB class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; -class ActionsDAG; -using ActionsDAGPtr = std::unique_ptr; - struct PrewhereInfo; using PrewhereInfoPtr = std::shared_ptr; diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 0e1568c8e79..e5de15c1d21 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1279,7 +1279,7 @@ void ReadFromMerge::RowPolicyData::extendNames(Names & names) const void ReadFromMerge::RowPolicyData::addStorageFilter(SourceStepWithFilter * step) const { - step->addFilter(std::make_unique(actions_dag.clone()), filter_column_name); + step->addFilter(actions_dag.clone(), filter_column_name); } void ReadFromMerge::RowPolicyData::addFilterTransform(QueryPlan & plan) const From ccd92d20821903123d4748027cc2248095b34efa Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 25 Jul 2024 17:44:26 +0200 Subject: [PATCH 0903/2361] Update chassert in cache --- src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 198f6c0ea04..c928d25c7b8 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -810,6 +810,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() { last_caller_id = FileSegment::getCallerId(); + chassert(file_offset_of_buffer_end <= read_until_position); if (file_offset_of_buffer_end == read_until_position) return false; @@ -1051,7 +1052,11 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() if (download_current_segment && download_current_segment_succeeded) chassert(file_segment.getCurrentWriteOffset() >= file_offset_of_buffer_end); - chassert(file_offset_of_buffer_end <= read_until_position); + + chassert( + file_offset_of_buffer_end <= read_until_position, + fmt::format("Expected {} <= {} (size: {}, read range: {})", + file_offset_of_buffer_end, read_until_position, size, current_read_range.toString())); } swap(*implementation_buffer); From e199fbaeaadd05b28e9dee1265fc813b081071f1 Mon Sep 17 00:00:00 2001 From: Sema Checherinda <104093494+CheSema@users.noreply.github.com> Date: Thu, 25 Jul 2024 17:55:32 +0200 Subject: [PATCH 0904/2361] Update tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh --- .../0_stateless/01171_mv_select_insert_isolation_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index 620281ee972..13aa64d3cbe 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -168,7 +168,7 @@ fi START_TIME=$(get_now) STOP_TIME=$((START_TIME + MAIN_TIME_PART)) SECOND_STOP_TIME=$((STOP_TIME + SECOND_TIME_PART)) -MIN_ITERATIONS=25 +MIN_ITERATIONS=20 run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 1 & PID_1=$! run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 2 & PID_2=$! From 869f6a6f105f50aa4d0e71e6440646b78539f0ff Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Jul 2024 16:33:12 +0000 Subject: [PATCH 0905/2361] Updating PrewhereInfo --- src/Interpreters/ExpressionAnalyzer.cpp | 9 ++++----- src/Interpreters/InterpreterSelectQuery.cpp | 16 ++++++++-------- src/Planner/PlannerJoinTree.cpp | 8 +++----- .../QueryPlan/Optimizations/optimizePrewhere.cpp | 8 ++++---- .../optimizePrimaryKeyConditionAndLimit.cpp | 2 +- .../Optimizations/optimizeReadInOrder.cpp | 12 +++++------- .../Optimizations/projectionsCommon.cpp | 15 ++++++--------- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 12 ++++-------- .../QueryPlan/SourceStepWithFilter.cpp | 9 +++------ src/Storages/IStorage.cpp | 3 +-- .../MergeTree/MergeTreePrefetchedReadPool.cpp | 2 +- src/Storages/MergeTree/MergeTreeReadPoolBase.cpp | 2 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 4 ++-- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 6 +++--- src/Storages/SelectQueryInfo.h | 7 +++---- src/Storages/StorageBuffer.cpp | 7 +++---- 16 files changed, 52 insertions(+), 70 deletions(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 5972d89bddd..d25434a515d 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -2230,12 +2230,11 @@ void ExpressionAnalysisResult::checkActions() const /// Check that PREWHERE doesn't contain unusual actions. Unusual actions are that can change number of rows. if (hasPrewhere()) { - auto check_actions = [](const std::optional & actions) + auto check_actions = [](ActionsDAG & actions) { - if (actions) - for (const auto & node : actions->getNodes()) - if (node.type == ActionsDAG::ActionType::ARRAY_JOIN) - throw Exception(ErrorCodes::ILLEGAL_PREWHERE, "PREWHERE cannot contain ARRAY JOIN action"); + for (const auto & node : actions.getNodes()) + if (node.type == ActionsDAG::ActionType::ARRAY_JOIN) + throw Exception(ErrorCodes::ILLEGAL_PREWHERE, "PREWHERE cannot contain ARRAY JOIN action"); }; check_actions(prewhere_info->prewhere_actions); diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 174e6b5b0e0..4fd6f7a2900 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -937,7 +937,7 @@ bool InterpreterSelectQuery::adjustParallelReplicasAfterAnalysis() { { const auto & node - = query_info_copy.prewhere_info->prewhere_actions->findInOutputs(query_info_copy.prewhere_info->prewhere_column_name); + = query_info_copy.prewhere_info->prewhere_actions.findInOutputs(query_info_copy.prewhere_info->prewhere_column_name); added_filter_nodes.nodes.push_back(&node); } @@ -1058,7 +1058,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl() if (analysis_result.prewhere_info) { - header = analysis_result.prewhere_info->prewhere_actions->updateHeader(header); + header = analysis_result.prewhere_info->prewhere_actions.updateHeader(header); if (analysis_result.prewhere_info->remove_prewhere_column) header.erase(analysis_result.prewhere_info->prewhere_column_name); } @@ -1521,7 +1521,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( query_plan.getCurrentDataStream(), - expressions.prewhere_info->prewhere_actions->clone(), + expressions.prewhere_info->prewhere_actions.clone(), expressions.prewhere_info->prewhere_column_name, expressions.prewhere_info->remove_prewhere_column); @@ -2066,7 +2066,7 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c }); } - auto filter_actions = std::make_shared(prewhere_info.prewhere_actions->clone()); + auto filter_actions = std::make_shared(prewhere_info.prewhere_actions.clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared( @@ -2157,7 +2157,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() if (prewhere_info) { /// Get some columns directly from PREWHERE expression actions - auto prewhere_required_columns = prewhere_info->prewhere_actions->getRequiredColumns().getNames(); + auto prewhere_required_columns = prewhere_info->prewhere_actions.getRequiredColumns().getNames(); columns.insert(prewhere_required_columns.begin(), prewhere_required_columns.end()); if (prewhere_info->row_level_filter) @@ -2229,7 +2229,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() if (prewhere_info) { NameSet columns_to_remove(columns_to_remove_after_prewhere.begin(), columns_to_remove_after_prewhere.end()); - Block prewhere_actions_result = prewhere_info->prewhere_actions->getResultColumns(); + Block prewhere_actions_result = prewhere_info->prewhere_actions.getResultColumns(); /// Populate required columns with the columns, added by PREWHERE actions and not removed afterwards. /// XXX: looks hacky that we already know which columns after PREWHERE we won't need for sure. @@ -2268,7 +2268,7 @@ void InterpreterSelectQuery::addPrewhereAliasActions() { /// Don't remove columns which are needed to be aliased. for (const auto & name : required_columns) - prewhere_info->prewhere_actions->tryRestoreColumn(name); + prewhere_info->prewhere_actions.tryRestoreColumn(name); /// Add physical columns required by prewhere actions. for (const auto & column : required_columns_from_prewhere) @@ -2326,7 +2326,7 @@ std::optional InterpreterSelectQuery::getTrivialCount(UInt64 max_paralle if (analysis_result.hasPrewhere()) { auto & prewhere_info = analysis_result.prewhere_info; - filter_nodes.push_back(&prewhere_info->prewhere_actions->findInOutputs(prewhere_info->prewhere_column_name)); + filter_nodes.push_back(&prewhere_info->prewhere_actions.findInOutputs(prewhere_info->prewhere_column_name)); if (prewhere_info->row_level_filter) filter_nodes.push_back(&prewhere_info->row_level_filter->findInOutputs(prewhere_info->row_level_column_name)); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index e9f886ab162..a3db0395ccc 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -437,7 +437,7 @@ void updatePrewhereOutputsIfNeeded(SelectQueryInfo & table_expression_query_info std::unordered_set required_output_nodes; - for (const auto * input : prewhere_actions->getInputs()) + for (const auto * input : prewhere_actions.getInputs()) { if (required_columns.contains(input->result_name)) required_output_nodes.insert(input); @@ -446,7 +446,7 @@ void updatePrewhereOutputsIfNeeded(SelectQueryInfo & table_expression_query_info if (required_output_nodes.empty()) return; - auto & prewhere_outputs = prewhere_actions->getOutputs(); + auto & prewhere_outputs = prewhere_actions.getOutputs(); for (const auto & output : prewhere_outputs) { auto required_output_node_it = required_output_nodes.find(output); @@ -801,10 +801,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres if (storage->canMoveConditionsToPrewhere() && optimize_move_to_prewhere && (!supported_prewhere_columns || supported_prewhere_columns->contains(filter_info.column_name))) { if (!prewhere_info) - prewhere_info = std::make_shared(); - - if (!prewhere_info->prewhere_actions) { + prewhere_info = std::make_shared(); prewhere_info->prewhere_actions = std::move(filter_info.actions); prewhere_info->prewhere_column_name = filter_info.column_name; prewhere_info->remove_prewhere_column = filter_info.do_remove_column; diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp index 5711189136c..dc73521210a 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp @@ -56,7 +56,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) return; const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); - if (storage_prewhere_info && storage_prewhere_info->prewhere_actions) + if (storage_prewhere_info) return; /// TODO: We can also check for UnionStep, such as StorageBuffer and local distributed plans. @@ -165,16 +165,16 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes &) { prewhere_info->prewhere_column_name = conditions.front()->result_name; if (prewhere_info->remove_prewhere_column) - prewhere_info->prewhere_actions->getOutputs().push_back(conditions.front()); + prewhere_info->prewhere_actions.getOutputs().push_back(conditions.front()); } else { prewhere_info->remove_prewhere_column = true; FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - const auto * node = &prewhere_info->prewhere_actions->addFunction(func_builder_and, std::move(conditions), {}); + const auto * node = &prewhere_info->prewhere_actions.addFunction(func_builder_and, std::move(conditions), {}); prewhere_info->prewhere_column_name = node->result_name; - prewhere_info->prewhere_actions->getOutputs().push_back(node); + prewhere_info->prewhere_actions.getOutputs().push_back(node); } source_step_with_filter->updatePrewhereInfo(prewhere_info); diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp index f53212407d2..490b79fbf8d 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyConditionAndLimit.cpp @@ -18,7 +18,7 @@ void optimizePrimaryKeyConditionAndLimit(const Stack & stack) const auto & storage_prewhere_info = source_step_with_filter->getPrewhereInfo(); if (storage_prewhere_info) { - source_step_with_filter->addFilter(storage_prewhere_info->prewhere_actions->clone(), storage_prewhere_info->prewhere_column_name); + source_step_with_filter->addFilter(storage_prewhere_info->prewhere_actions.clone(), storage_prewhere_info->prewhere_column_name); if (storage_prewhere_info->row_level_filter) source_step_with_filter->addFilter(storage_prewhere_info->row_level_filter->clone(), storage_prewhere_info->row_level_column_name); } diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 252420e19fe..99df6da263f 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -191,13 +191,11 @@ void buildSortingDAG(QueryPlan::Node & node, std::optional & dag, Fi /// Should ignore limit if there is filtering. limit = 0; - if (prewhere_info->prewhere_actions) - { - //std::cerr << "====== Adding prewhere " << std::endl; - appendExpression(dag, *prewhere_info->prewhere_actions); - if (const auto * filter_expression = dag->tryFindInOutputs(prewhere_info->prewhere_column_name)) - appendFixedColumnsFromFilterExpression(*filter_expression, fixed_columns); - } + //std::cerr << "====== Adding prewhere " << std::endl; + appendExpression(dag, prewhere_info->prewhere_actions); + if (const auto * filter_expression = dag->tryFindInOutputs(prewhere_info->prewhere_column_name)) + appendFixedColumnsFromFilterExpression(*filter_expression, fixed_columns); + } return; } diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index 571d1dd0cc1..7414d479cc9 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -128,15 +128,12 @@ bool QueryDAG::buildImpl(QueryPlan::Node & node, ActionsDAG::NodeRawConstPtrs & return false; } - if (prewhere_info->prewhere_actions) - { - appendExpression(*prewhere_info->prewhere_actions); - if (const auto * filter_expression - = findInOutputs(*dag, prewhere_info->prewhere_column_name, prewhere_info->remove_prewhere_column)) - filter_nodes.push_back(filter_expression); - else - return false; - } + appendExpression(prewhere_info->prewhere_actions); + if (const auto * filter_expression + = findInOutputs(*dag, prewhere_info->prewhere_column_name, prewhere_info->remove_prewhere_column)) + filter_nodes.push_back(filter_expression); + else + return false; } return true; } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index e5aeb9686be..483876dd293 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -109,8 +109,7 @@ bool restorePrewhereInputs(PrewhereInfo & info, const NameSet & inputs) if (info.row_level_filter) added = added || restoreDAGInputs(*info.row_level_filter, inputs); - if (info.prewhere_actions) - added = added || restoreDAGInputs(*info.prewhere_actions, inputs); + added = added || restoreDAGInputs(info.prewhere_actions, inputs); return added; } @@ -175,9 +174,8 @@ static void updateSortDescriptionForOutputStream( Block original_header = output_stream.header.cloneEmpty(); if (prewhere_info) { - if (prewhere_info->prewhere_actions) { - FindOriginalNodeForOutputName original_column_finder(*prewhere_info->prewhere_actions); + FindOriginalNodeForOutputName original_column_finder(prewhere_info->prewhere_actions); for (auto & column : original_header) { const auto * original_node = original_column_finder.find(column.name); @@ -2131,7 +2129,6 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const prefix.push_back(format_settings.indent_char); prefix.push_back(format_settings.indent_char); - if (prewhere_info->prewhere_actions) { format_settings.out << prefix << "Prewhere filter" << '\n'; format_settings.out << prefix << "Prewhere filter column: " << prewhere_info->prewhere_column_name; @@ -2139,7 +2136,7 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); + auto expression = std::make_shared(prewhere_info->prewhere_actions.clone()); expression->describeActions(format_settings.out, prefix); } @@ -2169,12 +2166,11 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_info_map = std::make_unique(); prewhere_info_map->add("Need filter", prewhere_info->need_filter); - if (prewhere_info->prewhere_actions) { std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); + auto expression = std::make_shared(prewhere_info->prewhere_actions.clone()); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.cpp b/src/Processors/QueryPlan/SourceStepWithFilter.cpp index b91debc8239..3de9ae37db0 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.cpp +++ b/src/Processors/QueryPlan/SourceStepWithFilter.cpp @@ -34,9 +34,8 @@ Block SourceStepWithFilter::applyPrewhereActions(Block block, const PrewhereInfo block.erase(prewhere_info->row_level_column_name); } - if (prewhere_info->prewhere_actions) { - block = prewhere_info->prewhere_actions->updateHeader(block); + block = prewhere_info->prewhere_actions.updateHeader(block); auto & prewhere_column = block.getByName(prewhere_info->prewhere_column_name); if (!prewhere_column.type->canBeUsedInBooleanContext()) @@ -102,7 +101,6 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con prefix.push_back(format_settings.indent_char); prefix.push_back(format_settings.indent_char); - if (prewhere_info->prewhere_actions) { format_settings.out << prefix << "Prewhere filter" << '\n'; format_settings.out << prefix << "Prewhere filter column: " << prewhere_info->prewhere_column_name; @@ -110,7 +108,7 @@ void SourceStepWithFilter::describeActions(FormatSettings & format_settings) con format_settings.out << " (removed)"; format_settings.out << '\n'; - auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); + auto expression = std::make_shared(prewhere_info->prewhere_actions.clone()); expression->describeActions(format_settings.out, prefix); } @@ -132,12 +130,11 @@ void SourceStepWithFilter::describeActions(JSONBuilder::JSONMap & map) const std::unique_ptr prewhere_info_map = std::make_unique(); prewhere_info_map->add("Need filter", prewhere_info->need_filter); - if (prewhere_info->prewhere_actions) { std::unique_ptr prewhere_filter_map = std::make_unique(); prewhere_filter_map->add("Prewhere filter column", prewhere_info->prewhere_column_name); prewhere_filter_map->add("Prewhere filter remove filter column", prewhere_info->remove_prewhere_column); - auto expression = std::make_shared(prewhere_info->prewhere_actions->clone()); + auto expression = std::make_shared(prewhere_info->prewhere_actions.clone()); prewhere_filter_map->add("Prewhere filter expression", expression->toTree()); prewhere_info_map->add("Prewhere filter", std::move(prewhere_filter_map)); diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 823a6ae1cbc..755d71df531 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -325,9 +325,8 @@ std::string PrewhereInfo::dump() const ss << "row_level_filter " << row_level_filter->dumpDAG() << "\n"; } - if (prewhere_actions) { - ss << "prewhere_actions " << prewhere_actions->dumpDAG() << "\n"; + ss << "prewhere_actions " << prewhere_actions.dumpDAG() << "\n"; } ss << "remove_prewhere_column " << remove_prewhere_column diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index 26595fbb36d..a9b77fb6c03 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -329,7 +329,7 @@ void MergeTreePrefetchedReadPool::fillPerPartStatistics() part_stat.sum_marks += range.end - range.begin; const auto & columns = settings.merge_tree_determine_task_size_by_prewhere_columns && prewhere_info - ? prewhere_info->prewhere_actions->getRequiredColumnsNames() + ? prewhere_info->prewhere_actions.getRequiredColumnsNames() : column_names; part_stat.approx_size_of_mark = getApproximateSizeOfGranule(*read_info.data_part, columns); diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 46482bc0959..6d2560bc9c7 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -65,7 +65,7 @@ static size_t calculateMinMarksPerTask( /// Which means in turn that for most of the rows we will read only the columns from prewhere clause. /// So it makes sense to use only them for the estimation. const auto & columns = settings.merge_tree_determine_task_size_by_prewhere_columns && prewhere_info - ? prewhere_info->prewhere_actions->getRequiredColumnsNames() + ? prewhere_info->prewhere_actions.getRequiredColumnsNames() : columns_to_read; const size_t part_compressed_bytes = getApproxSizeOfPart(*part.data_part, columns); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index f1df9e231c4..1a0709faf1c 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -59,7 +59,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( if (prewhere_info) LOG_TEST(log, "Original PREWHERE DAG:\n{}\nPREWHERE actions:\n{}", - (prewhere_info->prewhere_actions ? prewhere_info->prewhere_actions->dumpDAG(): std::string("")), + prewhere_info->prewhere_actions.dumpDAG(), (!prewhere_actions.steps.empty() ? prewhere_actions.dump() : std::string(""))); } @@ -96,7 +96,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr PrewhereExprStep prewhere_step { .type = PrewhereExprStep::Filter, - .actions = std::make_shared(prewhere_info->prewhere_actions->clone(), actions_settings), + .actions = std::make_shared(prewhere_info->prewhere_actions.clone(), actions_settings), .filter_column_name = prewhere_info->prewhere_column_name, .remove_filter_column = prewhere_info->remove_prewhere_column, .need_filter = prewhere_info->need_filter, diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 1d0569e0df6..36ff6c0a4bd 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -216,11 +216,11 @@ const ActionsDAG::Node & addAndTrue( /// 8. Add computation of the remaining outputs to the last step with the procedure similar to 4 bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere) { - if (!prewhere_info || !prewhere_info->prewhere_actions) + if (!prewhere_info) return true; /// 1. List all condition nodes that are combined with AND into PREWHERE condition - const auto & condition_root = prewhere_info->prewhere_actions->findInOutputs(prewhere_info->prewhere_column_name); + const auto & condition_root = prewhere_info->prewhere_actions.findInOutputs(prewhere_info->prewhere_column_name); const bool is_conjunction = (condition_root.type == ActionsDAG::ActionType::FUNCTION && condition_root.function_base->getName() == "and"); if (!is_conjunction) return false; @@ -306,7 +306,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction } /// 6. Find all outputs of the original DAG - auto original_outputs = prewhere_info->prewhere_actions->getOutputs(); + auto original_outputs = prewhere_info->prewhere_actions.getOutputs(); /// 7. Find all outputs that were computed in the already built DAGs, mark these nodes as outputs in the steps where they were computed /// 8. Add computation of the remaining outputs to the last step with the procedure similar to 4 NameSet all_output_names; diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 1c4cb7d92d8..7ad6a733c6f 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -45,7 +45,7 @@ struct PrewhereInfo /// This actions are separate because prewhere condition should not be executed over filtered rows. std::optional row_level_filter; /// Actions which are executed on block in order to get filter column for prewhere step. - std::optional prewhere_actions; + ActionsDAG prewhere_actions; String row_level_column_name; String prewhere_column_name; bool remove_prewhere_column = false; @@ -53,7 +53,7 @@ struct PrewhereInfo bool generated_by_optimizer = false; PrewhereInfo() = default; - explicit PrewhereInfo(std::optional prewhere_actions_, String prewhere_column_name_) + explicit PrewhereInfo(ActionsDAG prewhere_actions_, String prewhere_column_name_) : prewhere_actions(std::move(prewhere_actions_)), prewhere_column_name(std::move(prewhere_column_name_)) {} std::string dump() const; @@ -65,8 +65,7 @@ struct PrewhereInfo if (row_level_filter) prewhere_info->row_level_filter = row_level_filter->clone(); - if (prewhere_actions) - prewhere_info->prewhere_actions = prewhere_actions->clone(); + prewhere_info->prewhere_actions = prewhere_actions.clone(); prewhere_info->row_level_column_name = row_level_column_name; prewhere_info->prewhere_column_name = prewhere_column_name; diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index aee4e4683ad..04e6d6676d1 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -319,13 +319,12 @@ void StorageBuffer::read( src_table_query_info.prewhere_info->row_level_filter->removeUnusedActions(); } - if (src_table_query_info.prewhere_info->prewhere_actions) { src_table_query_info.prewhere_info->prewhere_actions = ActionsDAG::merge( actions_dag.clone(), - std::move(*src_table_query_info.prewhere_info->prewhere_actions)); + std::move(src_table_query_info.prewhere_info->prewhere_actions)); - src_table_query_info.prewhere_info->prewhere_actions->removeUnusedActions(); + src_table_query_info.prewhere_info->prewhere_actions.removeUnusedActions(); } } @@ -440,7 +439,7 @@ void StorageBuffer::read( }); } - auto actions = std::make_shared(query_info.prewhere_info->prewhere_actions->clone(), actions_settings); + auto actions = std::make_shared(query_info.prewhere_info->prewhere_actions.clone(), actions_settings); pipe_from_buffers.addSimpleTransform([&](const Block & header) { return std::make_shared( From 638d4640959f93924cec00b172d1cc1837d9ac10 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 25 Jul 2024 18:42:16 +0200 Subject: [PATCH 0906/2361] Fix test `00673_subquery_prepared_set_performance` --- .../0_stateless/00673_subquery_prepared_set_performance.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql b/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql index 0591592344c..b938d54c646 100644 --- a/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql +++ b/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql @@ -1,3 +1,5 @@ +-- Tags: no-tsan + DROP TABLE IF EXISTS mergetree_00673; CREATE TABLE mergetree_00673 (x UInt64) ENGINE = MergeTree ORDER BY x; From 7a003237befaa8d58cb6a77bb47e11fd1493e277 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 25 Jul 2024 18:43:11 +0200 Subject: [PATCH 0907/2361] Fix test `00673_subquery_prepared_set_performance` --- .../0_stateless/00673_subquery_prepared_set_performance.sql | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql b/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql index b938d54c646..98c0802ffbc 100644 --- a/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql +++ b/tests/queries/0_stateless/00673_subquery_prepared_set_performance.sql @@ -1,14 +1,12 @@ --- Tags: no-tsan - DROP TABLE IF EXISTS mergetree_00673; CREATE TABLE mergetree_00673 (x UInt64) ENGINE = MergeTree ORDER BY x; INSERT INTO mergetree_00673 VALUES (1); -SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree_00673 WHERE x IN (SELECT * FROM numbers(10000000)))))))))))); +SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree_00673 WHERE x IN (SELECT * FROM numbers(1000000)))))))))))))))))))))); SET force_primary_key = 1; -SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree_00673 WHERE x IN (SELECT * FROM numbers(10000000)))))))))))); +SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree_00673 WHERE x IN (SELECT * FROM numbers(1000000)))))))))))))))))))))); DROP TABLE mergetree_00673; From 21f3a08ba7d626b967d99f694b1fde93da022ab9 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 25 Jul 2024 18:54:51 +0200 Subject: [PATCH 0908/2361] fix flaky test --- .../queries/0_stateless/03145_non_loaded_projection_backup.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh b/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh index 7df2118ad0c..95aef9bbc5b 100755 --- a/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh +++ b/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh @@ -6,8 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -nm -q " drop table if exists tp_1; -create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y partition by intDiv(y, 100); -system stop merges tp_1; +create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y partition by intDiv(y, 100) settings max_parts_to_merge_at_once=1; insert into tp_1 select number, number from numbers(3); set mutations_sync = 2; @@ -39,7 +38,6 @@ $CLICKHOUSE_CLIENT -nm -q " set send_logs_level='fatal'; drop table tp_1; restore table tp_1 from Disk('backups', '$backup_id'); -system stop merges tp_1; " | grep -o "RESTORED" $CLICKHOUSE_CLIENT -q "select count() from tp_1;" From f4b943f9f82bd4d297574774173e45abb2ee42d0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 25 Jul 2024 19:05:41 +0200 Subject: [PATCH 0909/2361] Fix tidy --- src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp b/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp index 377f6b36888..ba864035777 100644 --- a/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp +++ b/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp @@ -261,7 +261,7 @@ std::optional ReadBufferFromAzureBlobStorage::tryGetFileSize() if (!file_size) file_size = blob_client->GetProperties().Value.BlobSize; - return *file_size; + return file_size; } size_t ReadBufferFromAzureBlobStorage::readBigAt(char * to, size_t n, size_t range_begin, const std::function & /*progress_callback*/) const From a06df0729ea398642b715bfd2b121b1db0c5dd6d Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Jul 2024 17:10:59 +0000 Subject: [PATCH 0910/2361] Remove the comment. --- src/Interpreters/ActionsDAG.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 85b2b38da17..4aaecc491e0 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -3111,7 +3111,6 @@ ActionsDAG::NodeRawConstPtrs ActionsDAG::filterNodesByAllowedInputs( } FindOriginalNodeForOutputName::FindOriginalNodeForOutputName(const ActionsDAG & actions_) - //: actions(actions_) { const auto & actions_outputs = actions_.getOutputs(); for (const auto * output_node : actions_outputs) From 257be35365b8e0fd6163af027bbc02288ce8910b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 25 Jul 2024 19:21:31 +0200 Subject: [PATCH 0911/2361] Minor tweaks and extra type tests --- src/AggregateFunctions/SingleValueData.cpp | 6 +++++- ..._fix_single_value_data_assertion.reference | 12 +++++++++++ .../03210_fix_single_value_data_assertion.sql | 20 ++++++++++++++++++- 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/SingleValueData.cpp b/src/AggregateFunctions/SingleValueData.cpp index 566b40253a3..11931acbbc8 100644 --- a/src/AggregateFunctions/SingleValueData.cpp +++ b/src/AggregateFunctions/SingleValueData.cpp @@ -904,8 +904,9 @@ bool SingleValueDataNumeric::isEqualTo(const DB::IColumn & column, size_t ind template bool SingleValueDataNumeric::isEqualTo(const DB::SingleValueDataBase & to) const { + /// to.has() is checked in memory.get().isEqualTo auto const & other = assert_cast(to); - return to.has() && memory.get().isEqualTo(other.memory.get()); + return memory.get().isEqualTo(other.memory.get()); } template @@ -917,6 +918,7 @@ void SingleValueDataNumeric::set(const DB::IColumn & column, size_t row_num, template void SingleValueDataNumeric::set(const DB::SingleValueDataBase & to, DB::Arena * arena) { + /// to.has() is checked in memory.get().set auto const & other = assert_cast(to); return memory.get().set(other.memory.get(), arena); } @@ -924,6 +926,7 @@ void SingleValueDataNumeric::set(const DB::SingleValueDataBase & to, DB::Aren template bool SingleValueDataNumeric::setIfSmaller(const DB::SingleValueDataBase & to, DB::Arena * arena) { + /// to.has() is checked in memory.get().setIfSmaller auto const & other = assert_cast(to); return memory.get().setIfSmaller(other.memory.get(), arena); } @@ -931,6 +934,7 @@ bool SingleValueDataNumeric::setIfSmaller(const DB::SingleValueDataBase & to, template bool SingleValueDataNumeric::setIfGreater(const DB::SingleValueDataBase & to, DB::Arena * arena) { + /// to.has() is checked in memory.get().setIfGreater auto const & other = assert_cast(to); return memory.get().setIfGreater(other.memory.get(), arena); } diff --git a/tests/queries/0_stateless/03210_fix_single_value_data_assertion.reference b/tests/queries/0_stateless/03210_fix_single_value_data_assertion.reference index e69de29bb2d..d8f7e13db55 100644 --- a/tests/queries/0_stateless/03210_fix_single_value_data_assertion.reference +++ b/tests/queries/0_stateless/03210_fix_single_value_data_assertion.reference @@ -0,0 +1,12 @@ +0 1 1 1 0 0 0 +1 3 3 3 2 2 2 +2 5 5 5 4 4 4 +3 7 7 7 6 6 6 +4 9 9 9 8 8 8 +5 11 11 11 10 10 10 +6 13 13 13 12 12 12 +7 15 15 15 14 14 14 +8 17 17 17 16 16 16 +9 19 19 19 18 18 18 + +0 107351244 107351244 107351244 107354520 107354520 107354520 diff --git a/tests/queries/0_stateless/03210_fix_single_value_data_assertion.sql b/tests/queries/0_stateless/03210_fix_single_value_data_assertion.sql index 66e62377d6b..a1243ef0b25 100644 --- a/tests/queries/0_stateless/03210_fix_single_value_data_assertion.sql +++ b/tests/queries/0_stateless/03210_fix_single_value_data_assertion.sql @@ -1 +1,19 @@ -SELECT intDiv(number, 2) AS k, count(toFixedString(toFixedString('hello', 5), 5)) IGNORE NULLS, sumArgMax(number, toString(number % 20)), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 65537) WHERE toLowCardinality(toLowCardinality(toNullable(21))) GROUP BY k WITH TOTALS ORDER BY k ASC NULLS FIRST LIMIT 255 SETTINGS group_by_overflow_mode = 'any', totals_mode = 'before_having', max_rows_to_group_by = 100000 FORMAT Null +SELECT + intDiv(number, 2) AS k, + sumArgMax(number, number % 20), + sumArgMax(number, leftPad(toString(number % 20), 5, '0')), -- Pad with 0 to preserve number ordering + sumArgMax(number, [number % 20, number % 20]), + sumArgMin(number, number % 20), + sumArgMin(number, leftPad(toString(number % 20), 5, '0')), + sumArgMin(number, [number % 20, number % 20]), +FROM +( + SELECT number + FROM system.numbers + LIMIT 65537 +) +GROUP BY k + WITH TOTALS +ORDER BY k ASC + LIMIT 10 +SETTINGS group_by_overflow_mode = 'any', totals_mode = 'before_having', max_rows_to_group_by = 100000; From ee193ffa019fc0f6104c1c5010ba00cd1993c8bd Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 25 Jul 2024 19:25:58 +0200 Subject: [PATCH 0912/2361] Lower max allocation size in query fuzzer --- docker/test/fuzzer/query-fuzzer-tweaks-users.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml index e2a4976b385..d5b876a4c85 100644 --- a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml +++ b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml @@ -20,7 +20,7 @@ - 10G + 5G From 738d659e3bd8e222ff947e206d03d516c7053052 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Jul 2024 17:26:16 +0000 Subject: [PATCH 0913/2361] Do not remove constants from Distributed header if query is executed up to Complete. --- src/Storages/StorageDistributed.cpp | 7 ++- .../02563_analyzer_merge.reference | 1 + .../0_stateless/02563_analyzer_merge.sql | 45 +++++++++++++++++++ 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 07892971ec2..9b417cda177 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -847,8 +847,11 @@ void StorageDistributed::read( /** For distributed tables we do not need constants in header, since we don't send them to remote servers. * Moreover, constants can break some functions like `hostName` that are constants only for local queries. */ - for (auto & column : header) - column.column = column.column->convertToFullColumnIfConst(); + if (processed_stage != QueryProcessingStage::Complete) + { + for (auto & column : header) + column.column = column.column->convertToFullColumnIfConst(); + } modified_query_info.query = queryNodeToDistributedSelectQuery(query_tree_distributed); modified_query_info.query_tree = std::move(query_tree_distributed); diff --git a/tests/queries/0_stateless/02563_analyzer_merge.reference b/tests/queries/0_stateless/02563_analyzer_merge.reference index 8be01c88d6f..2b3cc2d5dfb 100644 --- a/tests/queries/0_stateless/02563_analyzer_merge.reference +++ b/tests/queries/0_stateless/02563_analyzer_merge.reference @@ -1,2 +1,3 @@ 0 Value_0 02563_db test_merge_table_1 1 Value_1 02563_db test_merge_table_2 +91138316-5127-45ac-9c25-4ad8779777b4 160 diff --git a/tests/queries/0_stateless/02563_analyzer_merge.sql b/tests/queries/0_stateless/02563_analyzer_merge.sql index c90f7dcb2a5..217fb7019c4 100644 --- a/tests/queries/0_stateless/02563_analyzer_merge.sql +++ b/tests/queries/0_stateless/02563_analyzer_merge.sql @@ -35,4 +35,49 @@ SELECT id, value, _database, _table FROM 02563_db.test_merge_table ORDER BY id; DROP TABLE 02563_db.test_merge_table; DROP TABLE 02563_db.test_merge_table_1; DROP TABLE 02563_db.test_merge_table_2; + +CREATE TABLE 02563_db.t_1 +( + timestamp DateTime64(9), + a String, + b String +) +ENGINE = MergeTree +PARTITION BY formatDateTime(toStartOfMinute(timestamp), '%Y%m%d%H', 'UTC') +ORDER BY (timestamp, a, b); + +CREATE TABLE 02563_db.dist_t_1 (timestamp DateTime64(9), a String, b String) ENGINE = Distributed('test_shard_localhost', '02563_db', 't_1'); + +CREATE TABLE 02563_db.m ENGINE = Merge('02563_db', '^dist_'); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-13 22:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(30); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-13 23:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(30); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-14 00:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(100); + + +SELECT '91138316-5127-45ac-9c25-4ad8779777b4', + count() +FROM 02563_db.m; + +DROP TABLE 02563_db.t_1; +DROP TABLE 02563_db.dist_t_1; +DROP TABLE 02563_db.m; + DROP DATABASE 02563_db; From 6725546b312ef52675f8fbd5f41d0ef5327a3e8a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 25 Jul 2024 19:35:21 +0200 Subject: [PATCH 0914/2361] Update some tests --- .../02884_parallel_window_functions.sql | 18 ++++++++++-------- .../03143_asof_join_ddb_long.reference | 4 ++-- .../0_stateless/03143_asof_join_ddb_long.sql | 4 ++-- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.sql b/tests/queries/0_stateless/02884_parallel_window_functions.sql index c5ab013a198..ea1cd458c65 100644 --- a/tests/queries/0_stateless/02884_parallel_window_functions.sql +++ b/tests/queries/0_stateless/02884_parallel_window_functions.sql @@ -1,6 +1,6 @@ -- Tags: long, no-tsan, no-asan, no-ubsan, no-msan, no-debug -CREATE TABLE window_funtion_threading +CREATE TABLE window_function_threading Engine = MergeTree ORDER BY (ac, nw) AS SELECT @@ -20,7 +20,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -40,7 +40,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -58,7 +58,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -66,6 +66,8 @@ ORDER BY nw ASC, R DESC LIMIT 10 SETTINGS max_threads = 1; +SET max_rows_to_read = 30000000; + SELECT nw, sum(WR) AS R, @@ -77,7 +79,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 0 GROUP BY ac, @@ -88,7 +90,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 1 GROUP BY ac, @@ -99,7 +101,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 2 GROUP BY ac, @@ -110,7 +112,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 3 GROUP BY ac, diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.reference b/tests/queries/0_stateless/03143_asof_join_ddb_long.reference index 2850a8aba98..ae7f7c805f2 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.reference +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.reference @@ -1,2 +1,2 @@ -49999983751397 10000032 -49999983751397 10000032 +7999995751397 4000032 +7999995751397 4000032 diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql index 17a67511030..a635fd2e86a 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql @@ -11,7 +11,7 @@ AS toDateTime('1990-03-21 13:00:00') + INTERVAL number MINUTE AS begin, number % 4 AS key, number AS value - FROM numbers(0, 10000000); + FROM numbers(0, 4000000); CREATE TABLE skewed_probe ENGINE = MergeTree ORDER BY (key, begin) AS @@ -33,7 +33,7 @@ AS SELECT toDateTime('1990-03-21 13:00:01') + INTERVAL number MINUTE AS begin, 3 AS key - FROM numbers(0, 10000000); + FROM numbers(0, 4000000); SELECT SUM(value), COUNT(*) From a3d5b2d29014bb3894982cdb1cadd65448ecdf63 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 25 Jul 2024 19:39:20 +0200 Subject: [PATCH 0915/2361] Update ZooKeeperImpl.cpp --- src/Common/ZooKeeper/ZooKeeperImpl.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 2728f953bea..d01fc341a63 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -1014,9 +1014,6 @@ void ZooKeeper::finalize(bool error_send, bool error_receive, const String & rea LOG_INFO(log, "Finalizing session {}. finalization_started: {}, queue_finished: {}, reason: '{}'", session_id, already_started, requests_queue.isFinished(), reason); - /// Reset the original index. - original_index = -1; - auto expire_session_if_not_expired = [&] { /// No new requests will appear in queue after finish() From f32a0716b9bb42a09ece308a3ca64626099bfb1e Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 25 Jul 2024 19:45:06 +0200 Subject: [PATCH 0916/2361] Update 02842_truncate_database.sql --- tests/queries/0_stateless/02842_truncate_database.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02842_truncate_database.sql b/tests/queries/0_stateless/02842_truncate_database.sql index be92108ccb8..bcd818f55ba 100644 --- a/tests/queries/0_stateless/02842_truncate_database.sql +++ b/tests/queries/0_stateless/02842_truncate_database.sql @@ -73,7 +73,7 @@ SELECT * FROM dest_dictionary; -- {serverError UNKNOWN_TABLE} SHOW TABLES FROM test_truncate_database; SHOW DICTIONARIES FROM test_truncate_database; -CREATE TABLE new_table (x UInt16) ENGINE = ReplicatedMergeTree ORDER BY x; +CREATE TABLE new_table (x UInt16) ENGINE = MergeTree ORDER BY x; select 'new tables'; SHOW TABLES FROM test_truncate_database; From e46512a3bed4cd260042acbde4fcbef5cb83e032 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Thu, 25 Jul 2024 16:59:10 +0000 Subject: [PATCH 0917/2361] Try fix 02481_async_insert_race_long https://github.com/ClickHouse/clickhouse-private/issues/13101 My assumption is that for high --insert_keeper_fault_injection_probability values, entries don't leave the sink, causing the queue memory consumption to grow, eventually hitting the memory limit and the MEMORY_LIMIT_EXCEEDED error. Later, we may raise the insert_keeper_fault_injection_probability to a small positive value. --- tests/queries/0_stateless/02481_async_insert_race_long.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02481_async_insert_race_long.sh b/tests/queries/0_stateless/02481_async_insert_race_long.sh index d8153967e9a..b0088017d32 100755 --- a/tests/queries/0_stateless/02481_async_insert_race_long.sh +++ b/tests/queries/0_stateless/02481_async_insert_race_long.sh @@ -13,7 +13,7 @@ function insert1() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - ${MY_CLICKHOUSE_CLIENT} --wait_for_async_insert 0 -q 'INSERT INTO async_inserts_race FORMAT CSV 1,"a"' + ${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 0 -q 'INSERT INTO async_inserts_race FORMAT CSV 1,"a"' done } @@ -21,7 +21,7 @@ function insert2() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - ${MY_CLICKHOUSE_CLIENT} --wait_for_async_insert 0 -q 'INSERT INTO async_inserts_race FORMAT JSONEachRow {"id": 5, "s": "e"} {"id": 6, "s": "f"}' + ${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 0 -q 'INSERT INTO async_inserts_race FORMAT JSONEachRow {"id": 5, "s": "e"} {"id": 6, "s": "f"}' done } @@ -29,7 +29,7 @@ function insert3() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - ${MY_CLICKHOUSE_CLIENT} --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" & + ${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" & sleep 0.05 done From eb4ec0912ad3a1e89ea7aec424366bc268262e11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 25 Jul 2024 20:21:37 +0200 Subject: [PATCH 0918/2361] Rename bad setting --- CHANGELOG.md | 2 +- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 2 +- src/Formats/FormatFactory.cpp | 2 +- src/Formats/FormatSettings.h | 2 +- src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp | 4 ++-- tests/queries/0_stateless/03013_json_key_ignore_case.sh | 4 ++-- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a0933bd6544..07b37835dda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,7 +64,7 @@ * Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)). * The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)). * Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)). -* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_ignore_key_case`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)). +* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_case_insensitive_column_matching`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)). * Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)). * In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)). * Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)). diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 3f1ecc47f79..e10cf3fd745 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1122,7 +1122,7 @@ class IColumn; M(Bool, input_format_json_defaults_for_missing_elements_in_named_tuple, true, "Insert default value in named tuple element if it's missing in json object", 0) \ M(Bool, input_format_json_throw_on_bad_escape_sequence, true, "Throw an exception if JSON string contains bad escape sequence in JSON input formats. If disabled, bad escape sequences will remain as is in the data", 0) \ M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \ - M(Bool, input_format_json_ignore_key_case, false, "Ignore json key case while read json field from string", 0) \ + M(Bool, input_format_json_case_insensitive_column_matching, false, "Ignore case when matching JSON keys with CH columns", 0) \ M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index acd119c159b..9faf77e9087 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,7 +64,7 @@ static std::initializer_list Date: Thu, 25 Jul 2024 19:17:38 +0000 Subject: [PATCH 0919/2361] Fix: order by all with parallel replicas --- src/Analyzer/QueryTreeBuilder.cpp | 7 ++++++- ...09_parallel_replicas_order_by_all.reference | 12 ++++++++++++ .../03209_parallel_replicas_order_by_all.sql | 18 ++++++++++++++++++ 3 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03209_parallel_replicas_order_by_all.reference create mode 100644 tests/queries/0_stateless/03209_parallel_replicas_order_by_all.sql diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index a62b6e56ac5..ed1227b0f00 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -268,6 +268,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q } } + const auto enable_order_by_all = updated_context->getSettingsRef().enable_order_by_all; + auto current_query_tree = std::make_shared(std::move(updated_context), std::move(settings_changes)); current_query_tree->setIsSubquery(is_subquery); @@ -281,7 +283,10 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup); current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets); current_query_tree->setIsGroupByAll(select_query_typed.group_by_all); - current_query_tree->setIsOrderByAll(select_query_typed.order_by_all); + /// order_by_all flag in AST is set w/o consideration of `enable_order_by_all` setting + /// since SETTINGS section has not been parsed yet, - so, check the setting here + if (enable_order_by_all) + current_query_tree->setIsOrderByAll(select_query_typed.order_by_all); current_query_tree->setOriginalAST(select_query); auto current_context = current_query_tree->getContext(); diff --git a/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.reference b/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.reference new file mode 100644 index 00000000000..fd453d088a6 --- /dev/null +++ b/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.reference @@ -0,0 +1,12 @@ +-- { echoOn } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=0; +B 3 10 +D 1 20 +A 2 30 +C \N 40 +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=1; +B 3 10 +D 1 20 +A 2 30 +C \N 40 +DROP TABLE order_by_all SYNC; diff --git a/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.sql b/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.sql new file mode 100644 index 00000000000..46a3ab4d171 --- /dev/null +++ b/tests/queries/0_stateless/03209_parallel_replicas_order_by_all.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS order_by_all SYNC; +CREATE TABLE order_by_all +( + a String, + b Nullable(Int32), + all UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_03210', 'r1') ORDER BY tuple(); + +INSERT INTO order_by_all VALUES ('B', 3, 10), ('C', NULL, 40), ('D', 1, 20), ('A', 2, 30); + +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='parallel_replicas'; +SET enable_order_by_all = 0; +-- { echoOn } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=0; +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=1; + +DROP TABLE order_by_all SYNC; From 1ba4790511e1a06af8fb85e01767ce95866ee2a8 Mon Sep 17 00:00:00 2001 From: Blargian Date: Thu, 25 Jul 2024 21:18:48 +0200 Subject: [PATCH 0920/2361] Review changes --- .../functions/type-conversion-functions.md | 550 ++++++++++-------- 1 file changed, 310 insertions(+), 240 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 057083d317f..844d957d538 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -51,7 +51,7 @@ SETTINGS cast_keep_nullable = 1 ## toInt8 -Converts an input value to a value of type `Int8`. +Converts an input value to a value of type [`Int8`](../data-types/int-uint.md). Throws an exception in case of an error. **Syntax** @@ -61,10 +61,20 @@ toInt8(expr) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Unsupported types: +- Float values `NaN` and `Inf` throw an exception. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt8('0xc0fe');` :::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +If the input value cannot be represented within the bounds of [Int8](../data-types/int-uint.md), the result over or under flows. This is not considered an error. +For example: `SELECT toInt8(128) == -128;`, `SELECT toInt8(128.0) == -128;`, `SELECT toInt8('128') == -128;`. ::: **Returned value** @@ -72,11 +82,7 @@ Binary, octal, and hexadecimal representations of numbers are not supported. Lea - 8-bit integer value. [Int8](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -106,32 +112,33 @@ Result: ## toInt8OrZero -Like [`toInt8`](#toint8), it takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int8`. If unsuccessful, returns `0`. +Like [`toInt8`](#toint8), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** ```sql -toInt8OrZero(expr) +toInt8OrZero(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `0` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt8OrZero('0xc0fe');`. +- If the input value cannot be represented within the bounds of [toInt16](../data-types/int-uint.md), and the result over or under flows. **Returned value** - 8-bit integer value if successful, otherwise `0`. [Int8](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -160,32 +167,33 @@ Result: ## toInt8OrNull -Like [`toInt8`](#toint8), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int8`. If unsuccessful, returns `NULL`. +Like [`toInt8`](#toint8), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int8`](../data-types/int-uint.md). If unsuccessful, returns [`NULL`](../data-types/nullable.md). **Syntax** ```sql -toInt8OrNull(expr) +toInt8OrNull(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `\N` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt8OrNull('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), and the result over or under flows. **Returned value** - 8-bit integer value if successful, otherwise `NULL`. [Int8](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -212,7 +220,7 @@ Result: ## toInt8OrDefault -Like [`toInt8`](#toint8), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int8`. If unsuccessful, returns the default type value. +Like [`toInt8`](#toint8), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int8`](../data-types/int-uint.md). If unsuccessful, returns the default type value. **Syntax** @@ -222,26 +230,28 @@ toInt8OrDefault(expr, def) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). - `def` — The default value to return if parsing to type `Int8` is unsuccessful. [Int8](../data-types/int-uint.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Types for which the default value is returned: +- Float values `NaN` and `Inf` return the default value. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt8OrDefault('0xc0fe', CAST('-1', 'Int8'));` +- If the input value cannot be represented within the bounds of [Int8](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 8-bit integer value if successful, otherwise returns the default value. [Int8](../data-types/int-uint.md). :::note -- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. - The default value type should be the same as the cast type. ::: -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. -::: - **Example** Query: @@ -268,7 +278,7 @@ Result: ## toInt16 -Converts an input value to a value of type `Int16`. +Converts an input value to a value of type [`Int16`](../data-types/int-uint.md). Throws an exception in case of an error. **Syntax** @@ -278,10 +288,20 @@ toInt16(expr) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Unsupported types: +- Float values `NaN` and `Inf` throw an exception. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt16('0xc0fe');` :::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +If the input value cannot be represented within the bounds of [toInt16](../data-types/int-uint.md), the result over or under flows. This is not considered an error. +For example: `SELECT toInt16(32768) == -32768;`, `SELECT toInt16(32768) == -32768;`, `SELECT toInt16('32768') == -32768;`. ::: **Returned value** @@ -289,11 +309,7 @@ Binary, octal, and hexadecimal representations of numbers are not supported. Lea - 16-bit integer value. [Int16](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -323,32 +339,33 @@ Result: ## toInt16OrZero -Like [`toInt16`](#toint16), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int16`. If unsuccessful, returns `0`. +Like [`toInt16`](#toint16), this function converts an input value to a value of type [Int16](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** ```sql -toInt16OrZero(expr) +toInt16OrZero(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `0` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt16OrZero('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 16-bit integer value if successful, otherwise `0`. [Int16](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -377,32 +394,33 @@ Result: ## toInt16OrNull -Like [`toInt16`](#toint16), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int16`. If unsuccessful, returns `NULL`. +Like [`toInt16`](#toint16), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int16`](../data-types/int-uint.md). If unsuccessful, returns [`NULL`](../data-types/nullable.md). **Syntax** ```sql -toInt16OrNull(expr) +toInt16OrNull(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `\N` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt16OrNull('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 16-bit integer value if successful, otherwise `NULL`. [Int16](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -431,7 +449,7 @@ Result: ## toInt16OrDefault -Like [`toInt16`](#toint16), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int16`. If unsuccessful, returns the default type value. +Like [`toInt16`](#toint16), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int16`](../data-types/int-uint.md). If unsuccessful, returns the default type value. **Syntax** @@ -441,26 +459,28 @@ toInt16OrDefault(expr, def) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). - `def` — The default value to return if parsing to type `Int16` is unsuccessful. [Int8](../data-types/int-uint.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Types for which the default value is returned: +- Float values `NaN` and `Inf` return the default value. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt16OrDefault('0xc0fe', CAST('-1', 'Int16'));` +- If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 16-bit integer value if successful, otherwise returns the default value. [Int16](../data-types/int-uint.md). :::note -- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. - The default value type should be the same as the cast type. ::: -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. -::: - **Example** Query: @@ -485,7 +505,7 @@ Result: ## toInt32 -Converts an input value to a value of type `Int32`. +Converts an input value to a value of type [`Int32`](../data-types/int-uint.md). Throws an exception in case of an error. **Syntax** @@ -495,10 +515,25 @@ toInt32(expr) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Unsupported types: +- Float values `NaN` and `Inf` throw an exception. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt32('0xc0fe');` :::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +If the input value cannot be represented within the bounds of [toInt16](../data-types/int-uint.md), the result over or under flows. This is not considered an error. +For example: +``` +SELECT toInt32(2147483648) == -2147483648; +SELECT toInt32(2147483648.0) == -2147483648; +SELECT toInt32('2147483648') == -2147483648; +``` ::: **Returned value** @@ -506,11 +541,7 @@ Binary, octal, and hexadecimal representations of numbers are not supported. Lea - 32-bit integer value. [Int32](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -540,32 +571,34 @@ Result: ## toInt32OrZero -Like [`toInt32`](#toint32), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int32`. If unsuccessful, returns `0`. +Like [`toInt32`](#toint32), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** ```sql -toInt32OrZero(expr) +toInt32OrZero(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `0` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt32OrZero('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int32](../data-types/int-uint.md) and the result over or under flows. -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: **Returned value** - 32-bit integer value if successful, otherwise `0`. [Int32](../data-types/int-uint.md) :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncate fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncate fractional digits of numbers. ::: **Example** @@ -588,35 +621,36 @@ Result: - [`toInt32`](#toint32). - [`toInt32OrNull`](#toint32ornull). - [`toInt32OrDefault`](#toint32ordefault). -- + ## toInt32OrNull -Like [`toInt32`](#toint32), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int32`. If unsuccessful, returns `NULL`. +Like [`toInt32`](#toint32), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int32`](../data-types/int-uint.md). If unsuccessful, returns [`NULL`](../data-types/nullable.md). **Syntax** ```sql -toInt32OrNull(expr) +toInt32OrNull(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `\N` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt32OrNull('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int32](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 32-bit integer value if successful, otherwise `NULL`. [Int32](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -643,7 +677,7 @@ Result: ## toInt32OrDefault -Like [`toInt32`](#toint32), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int32`. If unsuccessful, returns the default type value. +Like [`toInt32`](#toint32), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int32`](../data-types/int-uint.md). If unsuccessful, returns the default type value. **Syntax** @@ -653,24 +687,26 @@ toInt32OrDefault(expr, def) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). - `def` — The default value to return if parsing to type `Int32` is unsuccessful. [Int32](../data-types/int-uint.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Types for which the default value is returned: +- Float values `NaN` and `Inf` return the default value. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt32OrDefault('0xc0fe', CAST('-1', 'Int32'));` +- If the input value cannot be represented within the bounds of [Int32](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 32-bit integer value if successful, otherwise returns the default value. [Int32](../data-types/int-uint.md). :::note -- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. - The default value type should be the same as the cast type. - ::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. ::: **Example** @@ -697,7 +733,7 @@ Result: ## toInt64 -Converts an input value to a value of type `Int64`. +Converts an input value to a value of type [`Int64`](../data-types/int-uint.md). Throws an exception in case of an error. **Syntax** @@ -707,10 +743,26 @@ toInt64(expr) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Unsupported types: +- Float values `NaN` and `Inf` throw an exception. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt64('0xc0fe');` :::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), the result over or under flows. This is not considered an error. +For example: + +``` +SELECT toInt64(9223372036854775808) == -9223372036854775808; +SELECT toInt64(9223372036854775808.0) == -9223372036854775808; +SELECT toInt64('9223372036854775808') == --9223372036854775808; +``` ::: **Returned value** @@ -718,11 +770,7 @@ Binary, octal, and hexadecimal representations of numbers are not supported. Lea - 64-bit integer value. [Int64](../data-types/int-uint.md). [Int64](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -752,32 +800,33 @@ Result: ## toInt64OrZero -Like [`toInt64`](#toint64), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int64`. If unsuccessful, returns `0`. +Like [`toInt64`](#toint64), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** ```sql -toInt64OrZero(expr) +toInt64OrZero(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `0` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt64OrZero('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int64](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 64-bit integer value if successful, otherwise `0`. [Int64](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -806,32 +855,33 @@ Result: ## toInt64OrNull -Like [`toInt64`], takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int64`. If unsuccessful, returns `NULL`. +Like [`toInt64`], takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int64`](../data-types/nullable.md). If unsuccessful, returns [`NULL`](../data-types/nullable.md). **Syntax** ```sql -toInt64OrNull(expr) +toInt64OrNull(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `\N` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt64OrNull('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int64](../data-types/int-uint.md) and the result over or under flows. **Returned value** - Integer value of type `Int64` if successful, otherwise `NULL`. [Int64](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -860,7 +910,7 @@ Result: ## toInt64OrDefault -Like [`toInt64`](#toint64), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int64`. If unsuccessful, returns the default type value. +Like [`toInt64`](#toint64), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int64`](../data-types/nullable.md). If unsuccessful, returns the default type value. **Syntax** @@ -870,24 +920,26 @@ toInt64OrDefault(expr, def) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). - `def` — The default value to return if parsing to type `Int64` is unsuccessful. [Int64](../data-types/int-uint.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Types for which the default value is returned: +- Float values `NaN` and `Inf` return the default value. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt64OrDefault('0xc0fe', CAST('-1', 'Int64'));` +- If the input value cannot be represented within the bounds of [Int64](../data-types/int-uint.md) and the result over or under flows. **Returned value** - Integer value of type `Int64` if successful, otherwise returns the default value. [Int64](../data-types/int-uint.md). :::note -- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. - The default value type should be the same as the cast type. - ::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. ::: **Example** @@ -916,7 +968,7 @@ Result: ## toInt128 -Converts an input value to a value of type `Int128`. +Converts an input value to a value of type [`Int128`](../data-types/int-uint.md). Throws an exception in case of an error. **Syntax** @@ -926,10 +978,19 @@ toInt128(expr) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Unsupported types: +- Float values `NaN` and `Inf` throw an exception. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128('0xc0fe');` :::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +If the input value cannot be represented within the bounds of [Int128](../data-types/int-uint.md), the result over or under flows. This is not considered an error. ::: **Returned value** @@ -937,11 +998,7 @@ Binary, octal, and hexadecimal representations of numbers are not supported. Lea - 128-bit integer value. [Int128](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -971,7 +1028,7 @@ Result: ## toInt128OrZero -Like [`toInt128`](#toint128), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int128`. If unsuccessful, returns `0`. +Like [`toInt128`](#toint128), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** @@ -981,22 +1038,23 @@ toInt128OrZero(expr) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `0` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128OrZero('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int128](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 128-bit integer value if successful, otherwise `0`. [Int128](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -1025,32 +1083,33 @@ Result: ## toInt128OrNull -Like [`toInt128`](#toint128), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int128`. If unsuccessful, returns `NULL`. +Like [`toInt128`](#toint128), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int128`](../data-types/int-uint.md). If unsuccessful, returns [`NULL`](../data-types/nullable.md). **Syntax** ```sql -toInt128OrNull(expr) +toInt128OrNull(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `\N` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128OrNull('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int128](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 128-bit integer value if successful, otherwise `NULL`. [Int128](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -1079,7 +1138,7 @@ Result: ## toInt128OrDefault -Like [`toInt128`](#toint128), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int128`. If unsuccessful, returns the default type value. +Like [`toInt128`](#toint128), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int128`](../data-types/int-uint.md). If unsuccessful, returns the default type value. **Syntax** @@ -1089,26 +1148,28 @@ toInt128OrDefault(expr, def) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). - `def` — The default value to return if parsing to type `Int128` is unsuccessful. [Int128](../data-types/int-uint.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Types for which the default value is returned: +- Float values `NaN` and `Inf` return the default value. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128OrDefault('0xc0fe', CAST('-1', 'Int128'));` +- If the input value cannot be represented within the bounds of [Int128](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 128-bit integer value if successful, otherwise returns the default value. [Int128](../data-types/int-uint.md). :::note -- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. - The default value type should be the same as the cast type. ::: -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. -::: - **Example** Query: @@ -1135,7 +1196,7 @@ Result: ## toInt256 -Converts an input value to a value of type `Int256`. +Converts an input value to a value of type [`Int256`](../data-types/int-uint.md). Throws an exception in case of an error. **Syntax** @@ -1145,10 +1206,19 @@ toInt256(expr) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Unsupported types: +- Float values `NaN` and `Inf` throw an exception. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt256('0xc0fe');` :::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. +If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md), the result over or under flows. This is not considered an error. ::: **Returned value** @@ -1156,11 +1226,7 @@ Binary, octal, and hexadecimal representations of numbers are not supported. Lea - 256-bit integer value. [Int256](../data-types/int-uint.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -1190,32 +1256,33 @@ Result: ## toInt256OrZero -Like [`toInt256`](#toint256), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int256`. If unsuccessful, returns `0`. +Like [`toInt256`](#toint256), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** ```sql -toInt256OrZero(expr) +toInt256OrZero(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `0` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt256OrZero('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 256-bit integer value if successful, otherwise `0`. [Int256](../data-types/int-uint.md). :::note -Functions uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -1244,32 +1311,33 @@ Result: ## toInt256OrNull -Like [`toInt256`](#toint256), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int256`. If unsuccessful, returns `NULL`. +Like [`toInt256`](#toint256), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int256`](../data-types/int-uint.md). If unsuccessful, returns [`NULL`](../data-types/nullable.md). **Syntax** ```sql -toInt256OrNull(expr) +toInt256OrNull(x) ``` **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `x` — A String representation of a number. [String](../data-types/string.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- String representations of (U)Int8/16/32/128/256 + +Types for which `\N` is returned: +- String representations of ordinary Float32/64 values. +- String representations of Float values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt256OrNull('0xc0fe');`. +- If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 256-bit integer value if successful, otherwise `NULL`. [Int256](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). :::note -Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. -::: - -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. +The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. ::: **Example** @@ -1298,7 +1366,7 @@ Result: ## toInt256OrDefault -Like [`toInt256`](#toint256), takes an argument of type [String](../data-types/string.md) and tries to parse it to type `Int256`. If unsuccessful, returns the default type value. +Like [`toInt256`](#toint256), takes an argument of type [String](../data-types/string.md) and tries to parse it to type [`Int256`](../data-types/int-uint.md). If unsuccessful, returns the default type value. **Syntax** @@ -1308,26 +1376,28 @@ toInt256OrDefault(expr, def) **Arguments** -- `expr` — Expression returning a number or a string with the decimal representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). - `def` — The default value to return if parsing to type `Int256` is unsuccessful. [Int256](../data-types/int-uint.md). -:::note -Binary, octal, and hexadecimal representations of numbers are not supported. Leading zeroes are stripped. -::: +Supported types: +- (U)Int8/16/32/64/128/256 +- Float* +- String representations of (U)Int8/16/32/128/256 + +Types for which the default value is returned: +- Float values `NaN` and `Inf` return the default value. +- String representations of binary and hexadecimal values, e.g. `SELECT toInt128OrDefault('0xc0fe', CAST('-1', 'Int256'));` +- If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md) and the result over or under flows. **Returned value** - 256-bit integer value if successful, otherwise returns the default value. [Int256](../data-types/int-uint.md). :::note -- Function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. +- The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. - The default value type should be the same as the cast type. ::: -:::danger -An exception is thrown for [NaN and Inf](../data-types/float.md/#data_type-float-nan-inf) arguments. Keep in mind [numeric conversions issues](#common-issues-with-data-conversion), when using this function. -::: - **Example** Query: From 3c1004aee4a3b1f3e1b0bd91a1b02c6c9e16c832 Mon Sep 17 00:00:00 2001 From: Blargian Date: Thu, 25 Jul 2024 21:26:14 +0200 Subject: [PATCH 0921/2361] Fix typo --- .../functions/type-conversion-functions.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 844d957d538..4326753216e 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -131,7 +131,7 @@ Types for which `0` is returned: - String representations of ordinary Float32/64 values. - String representations of Float values `NaN` and `Inf`. - String representations of binary and hexadecimal values, e.g. `SELECT toInt8OrZero('0xc0fe');`. -- If the input value cannot be represented within the bounds of [toInt16](../data-types/int-uint.md), and the result over or under flows. +- If the input value cannot be represented within the bounds of [Int8](../data-types/int-uint.md), and the result over or under flows. **Returned value** @@ -186,7 +186,7 @@ Types for which `\N` is returned: - String representations of ordinary Float32/64 values. - String representations of Float values `NaN` and `Inf`. - String representations of binary and hexadecimal values, e.g. `SELECT toInt8OrNull('0xc0fe');`. -- If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), and the result over or under flows. +- If the input value cannot be represented within the bounds of [Int8](../data-types/int-uint.md), and the result over or under flows. **Returned value** @@ -300,7 +300,7 @@ Unsupported types: - String representations of binary and hexadecimal values, e.g. `SELECT toInt16('0xc0fe');` :::note -If the input value cannot be represented within the bounds of [toInt16](../data-types/int-uint.md), the result over or under flows. This is not considered an error. +If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), the result over or under flows. This is not considered an error. For example: `SELECT toInt16(32768) == -32768;`, `SELECT toInt16(32768) == -32768;`, `SELECT toInt16('32768') == -32768;`. ::: @@ -527,7 +527,7 @@ Unsupported types: - String representations of binary and hexadecimal values, e.g. `SELECT toInt32('0xc0fe');` :::note -If the input value cannot be represented within the bounds of [toInt16](../data-types/int-uint.md), the result over or under flows. This is not considered an error. +If the input value cannot be represented within the bounds of [Int32](../data-types/int-uint.md), the result over or under flows. This is not considered an error. For example: ``` SELECT toInt32(2147483648) == -2147483648; @@ -755,7 +755,7 @@ Unsupported types: - String representations of binary and hexadecimal values, e.g. `SELECT toInt64('0xc0fe');` :::note -If the input value cannot be represented within the bounds of [Int16](../data-types/int-uint.md), the result over or under flows. This is not considered an error. +If the input value cannot be represented within the bounds of [Int64](../data-types/int-uint.md), the result over or under flows. This is not considered an error. For example: ``` @@ -1386,7 +1386,7 @@ Supported types: Types for which the default value is returned: - Float values `NaN` and `Inf` return the default value. -- String representations of binary and hexadecimal values, e.g. `SELECT toInt128OrDefault('0xc0fe', CAST('-1', 'Int256'));` +- String representations of binary and hexadecimal values, e.g. `SELECT toInt256OrDefault('0xc0fe', CAST('-1', 'Int256'));` - If the input value cannot be represented within the bounds of [Int256](../data-types/int-uint.md) and the result over or under flows. **Returned value** From 7b8c41818bcec1c567c85b15d916fd2e064b7482 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 25 Jul 2024 19:34:41 +0000 Subject: [PATCH 0922/2361] Uncomment accidentally commented out code in QueryProfiler --- src/Common/QueryProfiler.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index a7717a4288a..f6524088102 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -57,7 +57,7 @@ namespace auto saved_errno = errno; /// We must restore previous value of errno in signal handler. -#if defined(OS_LINUX) && false //asdqwe +#if defined(OS_LINUX) if (info) { int overrun_count = info->si_overrun; @@ -92,7 +92,7 @@ namespace constexpr bool sanitizer = false; #endif - //asdqwe asynchronous_stack_unwinding = true; + asynchronous_stack_unwinding = true; if (sanitizer || 0 == sigsetjmp(asynchronous_stack_unwinding_signal_jump_buffer, 1)) { stack_trace.emplace(signal_context); From a7441669aa87b1551d2211ec4c3e550aaaa86b41 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 25 Jul 2024 21:43:03 +0200 Subject: [PATCH 0923/2361] Update some tests --- ...675_profile_events_from_query_log_and_client.sh | 2 +- .../02884_parallel_window_functions.sql | 2 +- ...967_parallel_replicas_join_algo_and_analyzer.sh | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh index e346d9893a7..1cf65ed8120 100755 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "INSERT TO S3" $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1; -" 2>&1 | grep -o -e '\ \[\ .*\ \]\ S3.*:\ .*\ ' | grep -v 'Microseconds' | grep -v 'S3DiskConnections' | grep -v 'S3DiskAddresses' | sort +" 2>&1 | grep -o -e '\ \[\ .*\ \]\ S3.*:\ .*\ ' | grep -v 'Microseconds' | grep -v 'S3DiskConnections' | grep -v 'S3DiskAddresses' | grep -v 'RequestThrottlerCount' | sort echo "CHECK WITH query_log" $CLICKHOUSE_CLIENT -nq " diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.sql b/tests/queries/0_stateless/02884_parallel_window_functions.sql index ea1cd458c65..2207c90a4ee 100644 --- a/tests/queries/0_stateless/02884_parallel_window_functions.sql +++ b/tests/queries/0_stateless/02884_parallel_window_functions.sql @@ -66,7 +66,7 @@ ORDER BY nw ASC, R DESC LIMIT 10 SETTINGS max_threads = 1; -SET max_rows_to_read = 30000000; +SET max_rows_to_read = 40000000; SELECT nw, diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh index 2840482da6d..8cefa873940 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh @@ -90,7 +90,7 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', +SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -128,7 +128,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', +SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -154,7 +154,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=0) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', +SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -179,7 +179,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', +SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -205,7 +205,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -230,7 +230,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='hash') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -255,7 +255,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='hash'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | From ba5a07bcc7b78673e58d0501e85b59e929215bac Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 25 Jul 2024 21:47:41 +0200 Subject: [PATCH 0924/2361] Better tests --- .../01301_aggregate_state_exception_memory_leak.reference | 2 +- .../0_stateless/01301_aggregate_state_exception_memory_leak.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.reference b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.reference index b20e7415f52..6282bf366d0 100644 --- a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.reference +++ b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.reference @@ -1,2 +1,2 @@ -Memory limit (for query) exceeded +Memory limit exceeded Ok diff --git a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh index 9dd800ceb09..266518d11d4 100755 --- a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh +++ b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh @@ -16,5 +16,5 @@ for _ in {1..1000}; do if [[ $elapsed -gt 30 ]]; then break fi -done 2>&1 | grep -o -F 'Memory limit (for query) exceeded' | uniq +done 2>&1 | grep -o -P 'Memory limit .+ exceeded' | sed -r -e 's/(Memory limit)(.+)( exceeded)/\1\3/' | uniq echo 'Ok' From f2e83f092d1f677c4e0240e749f96766ff6e205c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 25 Jul 2024 21:56:42 +0200 Subject: [PATCH 0925/2361] Patch getauxval for tsan re-exec --- base/glibc-compatibility/CMakeLists.txt | 10 ++++++++++ base/glibc-compatibility/musl/getauxval.c | 4 ++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt index c967fa5b11b..8948e25cb8e 100644 --- a/base/glibc-compatibility/CMakeLists.txt +++ b/base/glibc-compatibility/CMakeLists.txt @@ -18,6 +18,16 @@ if (GLIBC_COMPATIBILITY) message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.") endif () + if (SANITIZE STREQUAL thread) + # Disable TSAN instrumentation that conflicts with re-exec due to high ASLR entropy using getauxval + # See longer comment in __auxv_init_procfs + # In the case of tsan we need to make sure getauxval is not instrumented as that would introduce tsan + # internal calls to functions that depend on a state that isn't initialized yet + set_source_files_properties( + musl/getauxval.c + PROPERTIES COMPILE_FLAGS "-mllvm -tsan-instrument-func-entry-exit=false") + endif() + # Need to omit frame pointers to match the performance of glibc set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer") diff --git a/base/glibc-compatibility/musl/getauxval.c b/base/glibc-compatibility/musl/getauxval.c index 28cb0f8d005..ec2cce1e4aa 100644 --- a/base/glibc-compatibility/musl/getauxval.c +++ b/base/glibc-compatibility/musl/getauxval.c @@ -102,7 +102,7 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type) /// most global variables aren't initialized or available yet, so we can't initiate the auxiliary vector. /// Normal glibc / musl getauxval doesn't have this problem since they initiate their auxval vector at the very /// start of __libc_start_main (just keeping track of argv+argc+1), but we don't have such option (otherwise - // this complexity of reading "/proc/self/auxv" or using __environ would not be necessary). + /// this complexity of reading "/proc/self/auxv" or using __environ would not be necessary). /// To avoid this crashes on the re-exec call (see above how it would fail when creating `aux`, and if we used /// __auxv_init_environ then it would SIGSEV on READing `__environ`) we capture this call for `AT_EXECFN` and @@ -237,7 +237,7 @@ static unsigned long NO_SANITIZE_THREAD __auxv_init_environ(unsigned long type) // - __auxv_init_procfs -> __auxv_init_environ -> __getauxval_environ static void * volatile getauxval_func = (void *)__auxv_init_procfs; -unsigned long getauxval(unsigned long type) +unsigned long NO_SANITIZE_THREAD getauxval(unsigned long type) { return ((unsigned long (*)(unsigned long))getauxval_func)(type); } From f81e8aa345d64f5fbcae103f92cdc649f0d82d24 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 25 Jul 2024 22:08:32 +0200 Subject: [PATCH 0926/2361] Update tests --- .../queries/0_stateless/01010_pmj_right_table_memory_limits.sql | 2 ++ tests/queries/0_stateless/01304_direct_io_long.sh | 2 +- .../01730_distributed_group_by_no_merge_order_by_long.sql | 2 +- tests/queries/0_stateless/02151_lc_prefetch.sql | 1 + .../queries/0_stateless/02344_insert_profile_events_stress.sql | 1 + ...02354_distributed_with_external_aggregation_memory_usage.sql | 2 ++ .../02481_parquet_list_monotonically_increasing_offsets.sh | 2 +- tests/queries/0_stateless/02497_remote_disk_fat_column.sql | 2 +- .../02896_max_execution_time_with_break_overflow_mode.sql | 2 +- tests/queries/0_stateless/03143_asof_join_ddb_long.sql | 1 + 10 files changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql b/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql index a090be85221..b8f2596f3d5 100644 --- a/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql +++ b/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql @@ -1,5 +1,7 @@ -- Tags: no-parallel, no-fasttest, no-random-settings +SET max_bytes_in_join = 0; +SET max_rows_in_join = 0; SET max_memory_usage = 32000000; SET join_on_disk_max_files_to_merge = 4; diff --git a/tests/queries/0_stateless/01304_direct_io_long.sh b/tests/queries/0_stateless/01304_direct_io_long.sh index a66239058ab..35d1440bcb5 100755 --- a/tests/queries/0_stateless/01304_direct_io_long.sh +++ b/tests/queries/0_stateless/01304_direct_io_long.sh @@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --max_rows_to_read 50M --multiquery " INSERT INTO bug SELECT rand64(), '2020-06-07' FROM numbers(50000000); OPTIMIZE TABLE bug FINAL;" LOG="$CLICKHOUSE_TMP/err-$CLICKHOUSE_DATABASE" -$CLICKHOUSE_BENCHMARK --iterations 10 --max_threads 100 --min_bytes_to_use_direct_io 1 <<< "SELECT sum(UserID) FROM bug PREWHERE NOT ignore(Date)" 1>/dev/null 2>"$LOG" +$CLICKHOUSE_BENCHMARK --max_rows_to_read 51M --iterations 10 --max_threads 100 --min_bytes_to_use_direct_io 1 <<< "SELECT sum(UserID) FROM bug PREWHERE NOT ignore(Date)" 1>/dev/null 2>"$LOG" cat "$LOG" | grep Exception cat "$LOG" | grep Loaded diff --git a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql index 6625ad916e8..6172afbc699 100644 --- a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql +++ b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql @@ -12,7 +12,7 @@ select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by n -- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block, -- so the initiator will first receive all blocks from remotes and only after start merging, -- and will hit the memory limit. -select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296; -- { serverError MEMORY_LIMIT_EXCEEDED } +select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296, max_rows_to_read=0; -- { serverError MEMORY_LIMIT_EXCEEDED } -- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently, -- since they don't need to wait until the aggregation will be finished, diff --git a/tests/queries/0_stateless/02151_lc_prefetch.sql b/tests/queries/0_stateless/02151_lc_prefetch.sql index c2b97231145..f8c76038120 100644 --- a/tests/queries/0_stateless/02151_lc_prefetch.sql +++ b/tests/queries/0_stateless/02151_lc_prefetch.sql @@ -3,5 +3,6 @@ drop table if exists tab_lc; CREATE TABLE tab_lc (x UInt64, y LowCardinality(String)) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; insert into tab_lc select number, toString(number % 10) from numbers(20000000); optimize table tab_lc; +SET max_rows_to_read = '21M'; select count() from tab_lc where y == '0' settings local_filesystem_read_prefetch=1; drop table if exists tab_lc; diff --git a/tests/queries/0_stateless/02344_insert_profile_events_stress.sql b/tests/queries/0_stateless/02344_insert_profile_events_stress.sql index e9a790bea5d..902e1da543c 100644 --- a/tests/queries/0_stateless/02344_insert_profile_events_stress.sql +++ b/tests/queries/0_stateless/02344_insert_profile_events_stress.sql @@ -1,4 +1,5 @@ -- Tags: no-parallel, long, no-debug, no-tsan, no-msan, no-asan +SET max_rows_to_read = 0; create table data_02344 (key Int) engine=Null; -- 3e9 rows is enough to fill the socket buffer and cause INSERT hung. diff --git a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql index 105fb500461..82eb4c93e3d 100644 --- a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql +++ b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql @@ -1,5 +1,7 @@ -- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-object-storage +SET max_rows_to_read = '51M'; + DROP TABLE IF EXISTS t_2354_dist_with_external_aggr; create table t_2354_dist_with_external_aggr(a UInt64, b String, c FixedString(100)) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; diff --git a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh index 55e6ac2f758..3e28e76d6da 100755 --- a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh +++ b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh @@ -11,7 +11,7 @@ echo "Parquet" DATA_FILE=$CUR_DIR/data_parquet/list_monotonically_increasing_offsets.parquet ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (list Array(Int64), json Nullable(String)) ENGINE = Memory" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet" +cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO parquet_load FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load" ${CLICKHOUSE_CLIENT} --query="drop table parquet_load" diff --git a/tests/queries/0_stateless/02497_remote_disk_fat_column.sql b/tests/queries/0_stateless/02497_remote_disk_fat_column.sql index d97109b66f3..65519296602 100644 --- a/tests/queries/0_stateless/02497_remote_disk_fat_column.sql +++ b/tests/queries/0_stateless/02497_remote_disk_fat_column.sql @@ -2,7 +2,7 @@ set allow_suspicious_fixed_string_types=1; create table fat_granularity (x UInt32, fat FixedString(160000)) engine = MergeTree order by x settings storage_policy = 's3_cache'; -insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 8192, max_insert_threads=8; +insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 3000, max_insert_threads = 8, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; -- Too large sizes of FixedString to deserialize select x from fat_granularity prewhere fat like '256\_%' settings max_threads=2; diff --git a/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql b/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql index 3e131cad0f0..ecaad62b35a 100644 --- a/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql +++ b/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql @@ -1,6 +1,6 @@ -- Tags: no-fasttest -SET max_rows_to_read = 0; +SET max_rows_to_read = 0, max_execution_time = 0, max_estimated_execution_time = 0; -- Query stops after timeout without an error SELECT * FROM numbers(100000000) SETTINGS max_block_size=1, max_execution_time=2, timeout_overflow_mode='break' FORMAT Null; diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql index a635fd2e86a..18d98dbdfe4 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql @@ -35,6 +35,7 @@ AS 3 AS key FROM numbers(0, 4000000); +SET max_rows_to_read = 0; SELECT SUM(value), COUNT(*) FROM skewed_probe From 992a2764855d61ea8c8a75c727e6e0fd33054c1a Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 25 Jul 2024 20:23:45 +0000 Subject: [PATCH 0927/2361] uncomment also write_trace_iteration --- src/Common/QueryProfiler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index f6524088102..746010b5462 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -33,7 +33,7 @@ namespace DB namespace { #if defined(OS_LINUX) - //thread_local size_t write_trace_iteration = 0; + thread_local size_t write_trace_iteration = 0; #endif /// Even after timer_delete() the signal can be delivered, /// since it does not do anything with pending signals. From 3f70977cd660e4617d9bbd68cc229020adc57f98 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 25 Jul 2024 21:02:30 +0000 Subject: [PATCH 0928/2361] try to fix --- ...2572_query_views_log_background_thread.reference | 13 +++++++++---- .../02572_query_views_log_background_thread.sql | 8 ++++++-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.reference b/tests/queries/0_stateless/02572_query_views_log_background_thread.reference index 22dfaf93781..f867fd0d085 100644 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.reference +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.reference @@ -1,15 +1,14 @@ -- { echoOn } insert into buffer_02572 values (1); -- ensure that the flush was not direct +select * from buffer_02572; +1 select * from data_02572; select * from copy_02572; -- we cannot use OPTIMIZE, this will attach query context, so let's wait SET function_sleep_max_microseconds_per_block = 6000000; select sleepEachRow(1) from numbers(3*2) format Null; -select * from data_02572; -1 -select * from copy_02572; -1 +select sleepEachRow(1) from numbers(3*2) format Null; system flush logs; select count() > 0, lower(status::String), errorCodeToName(exception_code) from system.query_views_log where @@ -18,3 +17,9 @@ select count() > 0, lower(status::String), errorCodeToName(exception_code) group by 2, 3 ; 1 queryfinish OK +select * from buffer_02572; +1 +select * from data_02572; +1 +select * from copy_02572; +1 diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.sql b/tests/queries/0_stateless/02572_query_views_log_background_thread.sql index 939c189c5fe..2e9a62b71da 100644 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.sql +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.sql @@ -19,13 +19,13 @@ create materialized view mv_02572 to copy_02572 as select * from data_02572; -- { echoOn } insert into buffer_02572 values (1); -- ensure that the flush was not direct +select * from buffer_02572; select * from data_02572; select * from copy_02572; -- we cannot use OPTIMIZE, this will attach query context, so let's wait SET function_sleep_max_microseconds_per_block = 6000000; select sleepEachRow(1) from numbers(3*2) format Null; -select * from data_02572; -select * from copy_02572; +select sleepEachRow(1) from numbers(3*2) format Null; system flush logs; select count() > 0, lower(status::String), errorCodeToName(exception_code) @@ -34,3 +34,7 @@ select count() > 0, lower(status::String), errorCodeToName(exception_code) view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') group by 2, 3 ; + +select * from buffer_02572; +select * from data_02572; +select * from copy_02572; \ No newline at end of file From f0faa111d73c8512c1f88009f0ecfd1a804de45c Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 25 Jul 2024 23:19:58 +0200 Subject: [PATCH 0929/2361] Fix wrong usage of input_format_max_bytes_to_read_for_schema_inference --- src/Formats/FormatFactory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 1271cdfb7ad..e8956159714 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -253,7 +253,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se format_settings.msgpack.number_of_columns = settings.input_format_msgpack_number_of_columns; format_settings.msgpack.output_uuid_representation = settings.output_format_msgpack_uuid_representation; format_settings.max_rows_to_read_for_schema_inference = settings.input_format_max_rows_to_read_for_schema_inference; - format_settings.max_bytes_to_read_for_schema_inference = settings.input_format_max_rows_to_read_for_schema_inference; + format_settings.max_bytes_to_read_for_schema_inference = settings.input_format_max_bytes_to_read_for_schema_inference; format_settings.column_names_for_schema_inference = settings.column_names_for_schema_inference; format_settings.schema_inference_hints = settings.schema_inference_hints; format_settings.schema_inference_make_columns_nullable = settings.schema_inference_make_columns_nullable; From 18fb7396f941fd5a7e3872788ab07a18731dc943 Mon Sep 17 00:00:00 2001 From: xc0derx <11428624+xc0derx@users.noreply.github.com> Date: Thu, 25 Jul 2024 23:21:30 +0200 Subject: [PATCH 0930/2361] fix broken links (compression codecs) --- docs/en/sql-reference/statements/alter/column.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index aa6f132e08e..2e9b0cf3080 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -36,7 +36,7 @@ These actions are described in detail below. ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST] ``` -Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md/#codecs) and `default_expr` (see the section [Default expressions](/docs/en/sql-reference/statements/create/table.md/#create-default-values)). +Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md/#column_compression_codec) and `default_expr` (see the section [Default expressions](/docs/en/sql-reference/statements/create/table.md/#create-default-values)). If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions. @@ -155,7 +155,7 @@ This query changes the `name` column properties: - Column-level Settings -For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md/#codecs). +For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md/#column_compression_codec). For examples of columns TTL modifying, see [Column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl). From d0c4c4151c5e4bcb86b9417f4ab8cc71316404b5 Mon Sep 17 00:00:00 2001 From: Shri Bodas Date: Thu, 25 Jul 2024 14:24:28 -0700 Subject: [PATCH 0931/2361] Update keepermap.md Needs quotes around keeper path --- docs/en/engines/table-engines/special/keepermap.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/special/keepermap.md b/docs/en/engines/table-engines/special/keepermap.md index 5559cc2c648..04a9a4b0d4e 100644 --- a/docs/en/engines/table-engines/special/keepermap.md +++ b/docs/en/engines/table-engines/special/keepermap.md @@ -54,7 +54,7 @@ CREATE TABLE keeper_map_table `v2` String, `v3` Float32 ) -ENGINE = KeeperMap(/keeper_map_table, 4) +ENGINE = KeeperMap('/keeper_map_table', 4) PRIMARY KEY key ``` From 321766d0b8161a794f11835b4650d30b3723835b Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 25 Jul 2024 22:51:14 +0000 Subject: [PATCH 0932/2361] Automatic style fix --- tests/performance/scripts/perf.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/performance/scripts/perf.py b/tests/performance/scripts/perf.py index f89784a0e0b..83d66997677 100755 --- a/tests/performance/scripts/perf.py +++ b/tests/performance/scripts/perf.py @@ -349,9 +349,7 @@ for query_index in queries_to_run: try: c.execute("SYSTEM JEMALLOC PURGE") - print( - f"purging jemalloc arenas\t{conn_index}\t{c.last_query.elapsed}" - ) + print(f"purging jemalloc arenas\t{conn_index}\t{c.last_query.elapsed}") except KeyboardInterrupt: raise except: From 9c7078bcf7edfc79dbea2cf6e065aa594810ccaf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 02:54:11 +0200 Subject: [PATCH 0933/2361] Update tests --- tests/queries/0_stateless/00974_query_profiler.sql | 1 + ...0_distributed_group_by_no_merge_order_by_long.sql | 3 ++- .../0_stateless/02122_join_group_by_timeout.sh | 8 ++++---- ...ibuted_with_external_aggregation_memory_usage.sql | 2 +- ..._parquet_list_monotonically_increasing_offsets.sh | 2 +- .../02884_parallel_window_functions.reference | 12 ++++++------ 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/00974_query_profiler.sql b/tests/queries/0_stateless/00974_query_profiler.sql index 24e4241b813..71ea14c3d64 100644 --- a/tests/queries/0_stateless/00974_query_profiler.sql +++ b/tests/queries/0_stateless/00974_query_profiler.sql @@ -15,6 +15,7 @@ SELECT count() > 0 FROM system.trace_log t WHERE query_id = (SELECT query_id FRO SET query_profiler_real_time_period_ns = 0; SET query_profiler_cpu_time_period_ns = 1000000; SET log_queries = 1; +SET max_rows_to_read = 0; SELECT count(), ignore('test cpu time query profiler') FROM numbers_mt(10000000000); SET log_queries = 0; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql index 6172afbc699..e980f367de7 100644 --- a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql +++ b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql @@ -12,7 +12,8 @@ select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by n -- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block, -- so the initiator will first receive all blocks from remotes and only after start merging, -- and will hit the memory limit. -select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296, max_rows_to_read=0; -- { serverError MEMORY_LIMIT_EXCEEDED } +SET max_rows_to_read = 0; +select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296; -- { serverError MEMORY_LIMIT_EXCEEDED } -- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently, -- since they don't need to wait until the aggregation will be finished, diff --git a/tests/queries/0_stateless/02122_join_group_by_timeout.sh b/tests/queries/0_stateless/02122_join_group_by_timeout.sh index 59719f75d7c..79c4f01c98a 100755 --- a/tests/queries/0_stateless/02122_join_group_by_timeout.sh +++ b/tests/queries/0_stateless/02122_join_group_by_timeout.sh @@ -14,7 +14,7 @@ MAX_PROCESS_WAIT=5 # TCP CLIENT: As of today (02/12/21) uses PullingAsyncPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) -timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_execution_time 1 -q \ +timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_result_rows 0 --max_result_bytes 0 --max_execution_time 1 -q \ "SELECT * FROM ( SELECT a.name as n @@ -31,7 +31,7 @@ timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_execution_time 1 -q \ FORMAT Null" 2>&1 | grep -o "Code: 159" | sort | uniq ### Should stop pulling data and return what has been generated already (return code 0) -timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT -q \ +timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_result_rows 0 --max_result_bytes 0 -q \ "SELECT a.name as n FROM ( @@ -48,7 +48,7 @@ echo $? # HTTP CLIENT: As of today (02/12/21) uses PullingPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) -${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_execution_time=1" -d \ +${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_result_rows=0&max_result_bytes=0&max_execution_time=1" -d \ "SELECT * FROM ( SELECT a.name as n @@ -66,7 +66,7 @@ ${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_exec ### Should stop pulling data and return what has been generated already (return code 0) -${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL" -d \ +${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_result_rows=0&max_result_bytes=0" -d \ "SELECT a.name as n FROM ( diff --git a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql index 82eb4c93e3d..5eea6f149b5 100644 --- a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql +++ b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql @@ -1,6 +1,6 @@ -- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-object-storage -SET max_rows_to_read = '51M'; +SET max_rows_to_read = '101M'; DROP TABLE IF EXISTS t_2354_dist_with_external_aggr; diff --git a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh index 3e28e76d6da..2f512697868 100755 --- a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh +++ b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh @@ -12,6 +12,6 @@ DATA_FILE=$CUR_DIR/data_parquet/list_monotonically_increasing_offsets.parquet ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (list Array(Int64), json Nullable(String)) ENGINE = Memory" cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO parquet_load FORMAT Parquet" -${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum +${CLICKHOUSE_CLIENT} --max_result_rows 0 --max_result_bytes 0 --query="SELECT * FROM parquet_load" | md5sum ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load" ${CLICKHOUSE_CLIENT} --query="drop table parquet_load" diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.reference b/tests/queries/0_stateless/02884_parallel_window_functions.reference index bac15838dc2..a2cc96dda74 100644 --- a/tests/queries/0_stateless/02884_parallel_window_functions.reference +++ b/tests/queries/0_stateless/02884_parallel_window_functions.reference @@ -12,7 +12,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -32,7 +32,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -53,7 +53,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 0 GROUP BY ac, @@ -64,7 +64,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 1 GROUP BY ac, @@ -75,7 +75,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 2 GROUP BY ac, @@ -86,7 +86,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 3 GROUP BY ac, From f3c88ff66707a50523ccef6e964f2fe78a711ace Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 03:56:02 +0200 Subject: [PATCH 0934/2361] Fix benign data race in ZooKeeper --- src/Common/ZooKeeper/IKeeper.h | 2 +- src/Common/ZooKeeper/TestKeeper.h | 2 +- src/Common/ZooKeeper/ZooKeeper.cpp | 27 ++++++++--------- src/Common/ZooKeeper/ZooKeeper.h | 2 +- src/Common/ZooKeeper/ZooKeeperImpl.cpp | 29 +++++++++++++++++-- src/Common/ZooKeeper/ZooKeeperImpl.h | 9 +++--- .../StorageSystemZooKeeperConnection.cpp | 10 +++++-- 7 files changed, 54 insertions(+), 27 deletions(-) diff --git a/src/Common/ZooKeeper/IKeeper.h b/src/Common/ZooKeeper/IKeeper.h index 2c6cbc4a5d5..ce7489a33e5 100644 --- a/src/Common/ZooKeeper/IKeeper.h +++ b/src/Common/ZooKeeper/IKeeper.h @@ -548,7 +548,7 @@ public: virtual bool isExpired() const = 0; /// Get the current connected node idx. - virtual Int8 getConnectedNodeIdx() const = 0; + virtual std::optional getConnectedNodeIdx() const = 0; /// Get the current connected host and port. virtual String getConnectedHostPort() const = 0; diff --git a/src/Common/ZooKeeper/TestKeeper.h b/src/Common/ZooKeeper/TestKeeper.h index 2194ad015bf..562c313ac0e 100644 --- a/src/Common/ZooKeeper/TestKeeper.h +++ b/src/Common/ZooKeeper/TestKeeper.h @@ -39,7 +39,7 @@ public: ~TestKeeper() override; bool isExpired() const override { return expired; } - Int8 getConnectedNodeIdx() const override { return 0; } + std::optional getConnectedNodeIdx() const override { return 0; } String getConnectedHostPort() const override { return "TestKeeper:0000"; } int32_t getConnectionXid() const override { return 0; } int64_t getSessionID() const override { return 0; } diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 01bb508da95..1250e1273b9 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -128,16 +128,15 @@ void ZooKeeper::init(ZooKeeperArgs args_, std::unique_ptr ShuffleHosts shuffled_hosts = shuffleHosts(); impl = std::make_unique(shuffled_hosts, args, zk_log); - Int8 node_idx = impl->getConnectedNodeIdx(); + auto node_idx = impl->getConnectedNodeIdx(); if (args.chroot.empty()) LOG_TRACE(log, "Initialized, hosts: {}", fmt::join(args.hosts, ",")); else LOG_TRACE(log, "Initialized, hosts: {}, chroot: {}", fmt::join(args.hosts, ","), args.chroot); - /// If the balancing strategy has an optimal node then it will be the first in the list - bool connected_to_suboptimal_node = node_idx != shuffled_hosts[0].original_index; + bool connected_to_suboptimal_node = node_idx && *node_idx != shuffled_hosts[0].original_index; bool respect_az = args.prefer_local_availability_zone && !args.client_availability_zone.empty(); bool may_benefit_from_reconnecting = respect_az || args.get_priority_load_balancing.hasOptimalNode(); if (connected_to_suboptimal_node && may_benefit_from_reconnecting) @@ -145,7 +144,7 @@ void ZooKeeper::init(ZooKeeperArgs args_, std::unique_ptr auto reconnect_timeout_sec = getSecondsUntilReconnect(args); LOG_DEBUG(log, "Connected to a suboptimal ZooKeeper host ({}, index {})." " To preserve balance in ZooKeeper usage, this ZooKeeper session will expire in {} seconds", - impl->getConnectedHostPort(), node_idx, reconnect_timeout_sec); + impl->getConnectedHostPort(), *node_idx, reconnect_timeout_sec); auto reconnect_task_holder = DB::Context::getGlobalContextInstance()->getSchedulePool().createTask("ZKReconnect", [this, optimal_host = shuffled_hosts[0]]() { @@ -154,13 +153,15 @@ void ZooKeeper::init(ZooKeeperArgs args_, std::unique_ptr LOG_DEBUG(log, "Trying to connect to a more optimal node {}", optimal_host.host); ShuffleHosts node{optimal_host}; std::unique_ptr new_impl = std::make_unique(node, args, zk_log); - Int8 new_node_idx = new_impl->getConnectedNodeIdx(); - /// Maybe the node was unavailable when getting AZs first time, update just in case - if (args.availability_zone_autodetect && availability_zones[new_node_idx].empty()) + if (auto new_node_idx = new_impl->getConnectedNodeIdx(); new_node_idx) { - availability_zones[new_node_idx] = new_impl->tryGetAvailabilityZone(); - LOG_DEBUG(log, "Got availability zone for {}: {}", optimal_host.host, availability_zones[new_node_idx]); + /// Maybe the node was unavailable when getting AZs first time, update just in case + if (args.availability_zone_autodetect && availability_zones[*new_node_idx].empty()) + { + availability_zones[*new_node_idx] = new_impl->tryGetAvailabilityZone(); + LOG_DEBUG(log, "Got availability zone for {}: {}", optimal_host.host, availability_zones[*new_node_idx]); + } } optimal_impl = std::move(new_impl); @@ -1525,7 +1526,7 @@ void ZooKeeper::setServerCompletelyStarted() zk->setServerCompletelyStarted(); } -Int8 ZooKeeper::getConnectedHostIdx() const +std::optional ZooKeeper::getConnectedHostIdx() const { return impl->getConnectedNodeIdx(); } @@ -1544,10 +1545,10 @@ String ZooKeeper::getConnectedHostAvailabilityZone() const { if (args.implementation != "zookeeper" || !impl) return ""; - Int8 idx = impl->getConnectedNodeIdx(); - if (idx < 0) + std::optional idx = impl->getConnectedNodeIdx(); + if (!idx) return ""; /// session expired - return availability_zones.at(idx); + return availability_zones.at(*idx); } size_t getFailedOpIndex(Coordination::Error exception_code, const Coordination::Responses & responses) diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 4ae2cfa6096..657c9cb2c03 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -620,7 +620,7 @@ public: void setServerCompletelyStarted(); - Int8 getConnectedHostIdx() const; + std::optional getConnectedHostIdx() const; String getConnectedHostPort() const; int32_t getConnectionXid() const; diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 2728f953bea..53c7a5728aa 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -536,7 +536,7 @@ void ZooKeeper::connect( compressed_out.emplace(*out, CompressionCodecFactory::instance().get("LZ4", {})); } - original_index = static_cast(node.original_index); + original_index.store(node.original_index); break; } catch (...) @@ -1014,8 +1014,7 @@ void ZooKeeper::finalize(bool error_send, bool error_receive, const String & rea LOG_INFO(log, "Finalizing session {}. finalization_started: {}, queue_finished: {}, reason: '{}'", session_id, already_started, requests_queue.isFinished(), reason); - /// Reset the original index. - original_index = -1; + original_index.store(-1); auto expire_session_if_not_expired = [&] { @@ -1534,6 +1533,30 @@ void ZooKeeper::close() } +std::optional ZooKeeper::getConnectedNodeIdx() const +{ + int8_t res = original_index.load(); + if (res == -1) + return std::nullopt; + else + return res; +} + +String ZooKeeper::getConnectedHostPort() const +{ + auto idx = getConnectedNodeIdx(); + if (idx) + return args.hosts[*idx]; + else + return ""; +} + +int32_t ZooKeeper::getConnectionXid() const +{ + return next_xid.load(); +} + + void ZooKeeper::setZooKeeperLog(std::shared_ptr zk_log_) { /// logOperationIfNeeded(...) uses zk_log and can be called from different threads, so we have to use atomic shared_ptr diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.h b/src/Common/ZooKeeper/ZooKeeperImpl.h index 0c88c35b381..39082cd14c1 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -114,13 +114,12 @@ public: ~ZooKeeper() override; - /// If expired, you can only destroy the object. All other methods will throw exception. bool isExpired() const override { return requests_queue.isFinished(); } - Int8 getConnectedNodeIdx() const override { return original_index; } - String getConnectedHostPort() const override { return (original_index == -1) ? "" : args.hosts[original_index]; } - int32_t getConnectionXid() const override { return next_xid.load(); } + std::optional getConnectedNodeIdx() const override; + String getConnectedHostPort() const override; + int32_t getConnectionXid() const override; String tryGetAvailabilityZone() override; @@ -219,7 +218,7 @@ private: ACLs default_acls; zkutil::ZooKeeperArgs args; - Int8 original_index = -1; + std::atomic original_index{-1}; /// Fault injection void maybeInjectSendFault(); diff --git a/src/Storages/System/StorageSystemZooKeeperConnection.cpp b/src/Storages/System/StorageSystemZooKeeperConnection.cpp index ec29b84dac3..72a7ba38429 100644 --- a/src/Storages/System/StorageSystemZooKeeperConnection.cpp +++ b/src/Storages/System/StorageSystemZooKeeperConnection.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -27,7 +28,7 @@ ColumnsDescription StorageSystemZooKeeperConnection::getColumnsDescription() /* 0 */ {"name", std::make_shared(), "ZooKeeper cluster's name."}, /* 1 */ {"host", std::make_shared(), "The hostname/IP of the ZooKeeper node that ClickHouse connected to."}, /* 2 */ {"port", std::make_shared(), "The port of the ZooKeeper node that ClickHouse connected to."}, - /* 3 */ {"index", std::make_shared(), "The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config."}, + /* 3 */ {"index", std::make_shared(std::make_shared()), "The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config. If not connected, this column is NULL."}, /* 4 */ {"connected_time", std::make_shared(), "When the connection was established."}, /* 5 */ {"session_uptime_elapsed_seconds", std::make_shared(), "Seconds elapsed since the connection was established."}, /* 6 */ {"is_expired", std::make_shared(), "Is the current connection expired."}, @@ -64,7 +65,7 @@ void StorageSystemZooKeeperConnection::fillData(MutableColumns & res_columns, Co /// For read-only snapshot type functionality, it's acceptable even though 'getZooKeeper' may cause data inconsistency. auto fill_data = [&](const String & name, const zkutil::ZooKeeperPtr zookeeper, MutableColumns & columns) { - Int8 index = zookeeper->getConnectedHostIdx(); + auto index = zookeeper->getConnectedHostIdx(); String host_port = zookeeper->getConnectedHostPort(); if (index != -1 && !host_port.empty()) { @@ -78,7 +79,10 @@ void StorageSystemZooKeeperConnection::fillData(MutableColumns & res_columns, Co columns[0]->insert(name); columns[1]->insert(host); columns[2]->insert(port); - columns[3]->insert(index); + if (index) + columns[3]->insert(*index); + else + columns[3]->insertDefault(); columns[4]->insert(connected_time); columns[5]->insert(uptime); columns[6]->insert(zookeeper->expired()); From d6fdf29679ece887567cba6fa43aee4c22c7d6f7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 04:48:41 +0200 Subject: [PATCH 0935/2361] Remove too long unit test --- .../tests/gtest_archive_reader_and_writer.cpp | 42 ------------------- 1 file changed, 42 deletions(-) diff --git a/src/IO/tests/gtest_archive_reader_and_writer.cpp b/src/IO/tests/gtest_archive_reader_and_writer.cpp index 898c7017e7d..06f8f53546b 100644 --- a/src/IO/tests/gtest_archive_reader_and_writer.cpp +++ b/src/IO/tests/gtest_archive_reader_and_writer.cpp @@ -492,48 +492,6 @@ TEST_P(ArchiveReaderAndWriterTest, ManyFilesOnDisk) } } -TEST_P(ArchiveReaderAndWriterTest, LargeFile) -{ - /// Make an archive. - std::string_view contents = "The contents of a.txt\n"; - int times = 10000000; - { - auto writer = createArchiveWriter(getPathToArchive()); - { - auto out = writer->writeFile("a.txt", times * contents.size()); - for (int i = 0; i < times; i++) - writeString(contents, *out); - out->finalize(); - } - writer->finalize(); - } - - /// Read the archive. - auto reader = createArchiveReader(getPathToArchive()); - - ASSERT_TRUE(reader->fileExists("a.txt")); - - auto file_info = reader->getFileInfo("a.txt"); - EXPECT_EQ(file_info.uncompressed_size, contents.size() * times); - EXPECT_GT(file_info.compressed_size, 0); - - { - auto in = reader->readFile("a.txt", /*throw_on_not_found=*/true); - for (int i = 0; i < times; i++) - ASSERT_TRUE(checkString(String(contents), *in)); - } - - { - /// Use an enumerator. - auto enumerator = reader->firstFile(); - ASSERT_NE(enumerator, nullptr); - EXPECT_EQ(enumerator->getFileName(), "a.txt"); - EXPECT_EQ(enumerator->getFileInfo().uncompressed_size, contents.size() * times); - EXPECT_GT(enumerator->getFileInfo().compressed_size, 0); - EXPECT_FALSE(enumerator->nextFile()); - } -} - TEST(TarArchiveReaderTest, FileExists) { String archive_path = "archive.tar"; From 9c6026965d985ca0ffcf0ab789d09946bd37c569 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 04:55:53 +0200 Subject: [PATCH 0936/2361] Fix error --- src/IO/ReadWriteBufferFromHTTP.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index 17a5ed385d4..a62f22d4bd9 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -700,6 +700,14 @@ std::optional ReadWriteBufferFromHTTP::tryGetLastModificationTime() { return std::nullopt; } + catch (const NetException &) + { + return std::nullopt; + } + catch (const Poco::Net::NetException &) + { + return std::nullopt; + } } return file_info->last_modified; From 64ff5d7bc443cdb15fd0a5eec391d449a617b3f9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 05:07:56 +0200 Subject: [PATCH 0937/2361] Fix `00705_drop_create_merge_tree` --- tests/queries/0_stateless/00705_drop_create_merge_tree.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh index d7754091290..ea8b9d02e49 100755 --- a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh +++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --ignore-error -nm 2>/dev/null & -yes 'DROP TABLE table;' | head -n 1000 | $CLICKHOUSE_CLIENT --ignore-error -nm 2>/dev/null & +yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & +yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & wait ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table" From 3f483e547572c47d137d0f2664bd82c0b978ac7b Mon Sep 17 00:00:00 2001 From: heguangnan Date: Fri, 26 Jul 2024 12:09:03 +0800 Subject: [PATCH 0938/2361] fix memory leak when exception happend during count distinct for null key --- src/Interpreters/Aggregator.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index e073b7a49b6..543fd8a0bf2 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -3300,6 +3300,17 @@ void NO_INLINE Aggregator::destroyImpl(Table & table) const data = nullptr; }); + + if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization) + { + if (table.getNullKeyData() != nullptr) + { + for (size_t i = 0; i < params.aggregates_size; ++i) + aggregate_functions[i]->destroy(table.getNullKeyData() + offsets_of_aggregate_states[i]); + + table.getNullKeyData() = nullptr; + } + } } From 287cce7d211b9386895a4fa898f87405eccb3e96 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 26 Jul 2024 09:20:15 +0200 Subject: [PATCH 0939/2361] Fixes --- .gitmodules | 2 +- contrib/numactl | 2 +- docker/test/performance-comparison/run.sh | 1 + programs/server/Server.cpp | 9 ++++++++- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index b5d7e1e56b3..7e0b4df4ad1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -374,4 +374,4 @@ url = https://github.com/ClickHouse/double-conversion.git [submodule "contrib/numactl"] path = contrib/numactl - url = https://github.com/numactl/numactl.git + url = https://github.com/ClickHouse/numactl.git diff --git a/contrib/numactl b/contrib/numactl index 3871b1c42fc..8d13d63a05f 160000 --- a/contrib/numactl +++ b/contrib/numactl @@ -1 +1 @@ -Subproject commit 3871b1c42fc71bceadafd745d2eff5dddfc2d67e +Subproject commit 8d13d63a05f0c3cd88bf777cbb61541202b7da08 diff --git a/docker/test/performance-comparison/run.sh b/docker/test/performance-comparison/run.sh index 7afb5da59b1..6ef781fa4c8 100644 --- a/docker/test/performance-comparison/run.sh +++ b/docker/test/performance-comparison/run.sh @@ -13,6 +13,7 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh" # https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt # Double-escaped backslashes are a tribute to the engineering wonder of docker -- # it gives '/bin/sh: 1: [bash,: not found' otherwise. +numactl --hardware node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') )); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node $entry diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index b9a7c298f00..b818ff1f3a2 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -775,9 +775,16 @@ try LOG_INFO( log, - "ClickHouse is bound to a subset of NUMA nodes. Total memory of all available nodes {}", + "ClickHouse is bound to a subset of NUMA nodes. Total memory of all available nodes: {}", ReadableSize(total_numa_memory)); } + else + { + LOG_TRACE( + log, + "All NUMA nodes are used. Detected NUMA nodes: {}", + numa_num_configured_nodes()); + } numa_bitmask_free(membind); } From 400f8e5b2116ab585312e578eee4d783b9d6783b Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 26 Jul 2024 09:33:46 +0200 Subject: [PATCH 0940/2361] Fix stacktrace cache --- src/Common/StackTrace.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index 59a58ac027a..ff8765c9727 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -495,18 +495,19 @@ using StackTraceCacheBase = std::map Date: Fri, 26 Jul 2024 15:38:26 +0800 Subject: [PATCH 0941/2361] support set orc reader time zone name --- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 2 +- src/Formats/FormatFactory.cpp | 2 +- src/Formats/FormatSettings.h | 2 +- .../Formats/Impl/NativeORCBlockInputFormat.cpp | 6 +----- tests/queries/0_stateless/03198_orc_read_time_zone.sh | 10 +++++----- 6 files changed, 10 insertions(+), 14 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 2a665326afc..a5220c3017c 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1073,7 +1073,7 @@ class IColumn; M(Bool, input_format_orc_allow_missing_columns, true, "Allow missing columns while reading ORC input formats", 0) \ M(Bool, input_format_orc_use_fast_decoder, true, "Use a faster ORC decoder implementation.", 0) \ M(Bool, input_format_orc_filter_push_down, true, "When reading ORC files, skip whole stripes or row groups based on the WHERE/PREWHERE expressions, min/max statistics or bloom filter in the ORC metadata.", 0) \ - M(Bool, input_format_orc_read_use_writer_time_zone, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT.", 0) \ + M(String, input_format_orc_reader_time_zone_name, "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT.", 0) \ M(Bool, input_format_parquet_allow_missing_columns, true, "Allow missing columns while reading Parquet input formats", 0) \ M(UInt64, input_format_parquet_local_file_min_bytes_for_seek, 8192, "Min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format", 0) \ M(Bool, input_format_arrow_allow_missing_columns, true, "Allow missing columns while reading Arrow input formats", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index acd119c159b..457caa76bb6 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -69,7 +69,7 @@ static std::initializer_listgetWriterTimezone(); - row_reader_options.setTimezoneName(writer_time_zone); - } + row_reader_options.setTimezoneName(format_settings.orc.reader_time_zone_name); row_reader_options.range(current_stripe_info->getOffset(), current_stripe_info->getLength()); if (format_settings.orc.filter_push_down && sarg) { diff --git a/tests/queries/0_stateless/03198_orc_read_time_zone.sh b/tests/queries/0_stateless/03198_orc_read_time_zone.sh index 27530c06237..7e931e16e48 100755 --- a/tests/queries/0_stateless/03198_orc_read_time_zone.sh +++ b/tests/queries/0_stateless/03198_orc_read_time_zone.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "drop table if exists test" -$CLICKHOUSE_CLIENT -q "create table test(id UInt64, t DateTime64) Engine=MergeTree order by id" -$CLICKHOUSE_CLIENT -q "insert into test from infile '$CURDIR/data_orc/test_reader_time_zone.snappy.orc' SETTINGS input_format_orc_read_use_writer_time_zone=true FORMAT ORC" -$CLICKHOUSE_CLIENT -q "select * from test SETTINGS session_timezone='Asia/Shanghai'" -$CLICKHOUSE_CLIENT -q "drop table test" \ No newline at end of file +$CLICKHOUSE_CLIENT -q "drop table if exists test_orc_read_timezone" +$CLICKHOUSE_CLIENT -q "create table test_orc_read_timezone(id UInt64, t DateTime64) Engine=MergeTree order by id" +$CLICKHOUSE_CLIENT -q "insert into test_orc_read_timezone from infile '$CURDIR/data_orc/test_reader_time_zone.snappy.orc' SETTINGS input_format_orc_reader_time_zone_name='Asia/Shanghai' FORMAT ORC" +$CLICKHOUSE_CLIENT -q "select * from test_orc_read_timezone" +$CLICKHOUSE_CLIENT -q "drop table test_orc_read_timezone" \ No newline at end of file From c66a9f2d365e101798b2eee6cde7acc903a2fb46 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Fri, 26 Jul 2024 09:39:31 +0200 Subject: [PATCH 0942/2361] Fix --- .../01676_clickhouse_client_autocomplete.python | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python b/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python index 9072dfeb09f..0f35d259c7c 100644 --- a/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python +++ b/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python @@ -59,13 +59,14 @@ def test_completion(program, argv, comp_word): output = output_b.decode() debug_log_fd.write(repr(output_b) + "\n") debug_log_fd.flush() - # fail fast if there is a bell character in the output, - # meaning no concise completion is found - if "\x07" in output: - print(f"{comp_word}: FAIL") - return while not comp_word in output: + # fail fast if there is a bell character in the output, + # meaning no concise completion is found + if "\x07" in output: + print(f"{comp_word}: FAIL") + return + output_b = os.read(master, 4096) output += output_b.decode() debug_log_fd.write(repr(output_b) + "\n") From 83dba7194f3467dc0f6e5499d65bda8a66fa8206 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 26 Jul 2024 09:55:09 +0200 Subject: [PATCH 0943/2361] Add deserialization of empty state --- .../03208_groupArrayIntersect_serialization.reference | 1 + .../0_stateless/03208_groupArrayIntersect_serialization.sql | 2 ++ 2 files changed, 3 insertions(+) diff --git a/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.reference b/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.reference index c3b6e0cd5b7..e84856c90fd 100644 --- a/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.reference +++ b/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.reference @@ -10,3 +10,4 @@ a [(['2','4','6','8','10'])] b [(['2','4','6','8','10'])] c [(['2','4','6','8','10'])] d [] +e [] diff --git a/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.sql b/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.sql index e05f78a4051..1b3d48ce0c3 100644 --- a/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.sql +++ b/tests/queries/0_stateless/03208_groupArrayIntersect_serialization.sql @@ -39,3 +39,5 @@ INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', SELECT 'c', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10', '20']), tuple(['2', '4', '6', '8', '10', '14'])]); SELECT 'd', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; +INSERT INTO grouparray_string Select groupArrayIntersectState([]::Array(Tuple(Array(String)))); +SELECT 'e', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; From ad4e807cf4bb3633616b01e3616844fe2108d59f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 26 Jul 2024 08:12:01 +0000 Subject: [PATCH 0944/2361] Fix stupid crash. --- src/Interpreters/InterpreterSelectQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 4fd6f7a2900..41306a79198 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -2055,9 +2055,9 @@ void InterpreterSelectQuery::addEmptySourceToQueryPlan(QueryPlan & query_plan, c { auto & prewhere_info = *query_info.prewhere_info; - auto row_level_actions = std::make_shared(prewhere_info.row_level_filter->clone()); if (prewhere_info.row_level_filter) { + auto row_level_actions = std::make_shared(prewhere_info.row_level_filter->clone()); pipe.addSimpleTransform([&](const Block & header) { return std::make_shared(header, From de84f4f045f5ece627ca8295a09a5f2cf1eab6aa Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 26 Jul 2024 08:13:21 +0000 Subject: [PATCH 0945/2361] add proper cast to lagInFrame/leadInFrame --- src/Processors/Transforms/WindowTransform.cpp | 86 ++++++++++++++++--- src/Processors/Transforms/WindowTransform.h | 1 + 2 files changed, 75 insertions(+), 12 deletions(-) diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 06ae2bfb25e..006593edeaa 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -16,6 +16,9 @@ #include #include #include +#include +#include +#include #include #include @@ -75,6 +78,8 @@ public: virtual std::optional getDefaultFrame() const { return {}; } + virtual ColumnPtr castColumn(const Columns &, const std::vector &) { return nullptr; } + /// Is the frame type supported by this function. virtual bool checkWindowFrameType(const WindowTransform * /*transform*/) const { return true; } }; @@ -1171,6 +1176,9 @@ void WindowTransform::appendChunk(Chunk & chunk) // Initialize output columns. for (auto & ws : workspaces) { + if (ws.window_function_impl) + block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices)); + block.output_columns.push_back(ws.aggregate_function->getResultType() ->createColumn()); block.output_columns.back()->reserve(block.rows); @@ -2358,6 +2366,8 @@ public: template struct WindowFunctionLagLeadInFrame final : public WindowFunction { + FunctionBasePtr func_cast = nullptr; + WindowFunctionLagLeadInFrame(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, createResultType(argument_types_, name_)) @@ -2385,18 +2395,71 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction return; } - if (!argument_types[0]->equals(*argument_types[2])) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Argument type '{}' and the default value type '{}' are different", - argument_types[0]->getName(), - argument_types[2]->getName()); - if (argument_types.size() > 3) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function '{}' accepts at most 3 arguments, {} given", name, argument_types.size()); } + + if (argument_types[0]->equals(*argument_types[2])) + return; + + const auto supertype = getLeastSupertype(DataTypes{argument_types[0], argument_types[2]}); + if (!supertype) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "There is no supertype for the argument type '{}' and the default value type '{}'", + argument_types[0]->getName(), + argument_types[2]->getName()); + } + if (!argument_types[0]->equals(*supertype)) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The supertype '{}' for the argument type '{}' and the default value type '{}' is not the same as the argument type", + supertype->getName(), + argument_types[0]->getName(), + argument_types[2]->getName()); + } + + const auto from_name = argument_types[2]->getName(); + const auto to_name = argument_types[0]->getName(); + ColumnsWithTypeAndName arguments + { + { argument_types[2], "" }, + { + DataTypeString().createColumnConst(0, to_name), + std::make_shared(), + "" + } + }; + + auto get_cast_func = [&arguments] + { + FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::accurate, {}); + return func_builder_cast->build(arguments); + }; + + func_cast = get_cast_func(); + + } + + ColumnPtr castColumn(const Columns & columns, const std::vector & idx) override + { + if (!func_cast) + return nullptr; + + ColumnsWithTypeAndName arguments + { + { columns[idx[2]], argument_types[2], "" }, + { + DataTypeString().createColumnConst(columns[idx[2]]->size(), argument_types[0]->getName()), + std::make_shared(), + "" + } + }; + + return func_cast->execute(arguments, argument_types[0], columns[idx[2]]->size()); } static DataTypePtr createResultType(const DataTypes & argument_types_, const std::string & name_) @@ -2446,12 +2509,11 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (argument_types.size() > 2) { // Column with default values is specified. - // The conversion through Field is inefficient, but we accept - // subtypes of the argument type as a default value (for convenience), - // and it's a pain to write conversion that respects ColumnNothing - // and ColumnConst and so on. - const IColumn & default_column = *current_block.input_columns[ - workspace.argument_column_indices[2]].get(); + const IColumn & default_column = + current_block.casted_columns[function_index] ? + *current_block.casted_columns[function_index].get() : + *current_block.input_columns[workspace.argument_column_indices[2]].get(); + to.insert(default_column[transform->current_row.row]); } else diff --git a/src/Processors/Transforms/WindowTransform.h b/src/Processors/Transforms/WindowTransform.h index 43fa6b28019..fe4f79e997c 100644 --- a/src/Processors/Transforms/WindowTransform.h +++ b/src/Processors/Transforms/WindowTransform.h @@ -50,6 +50,7 @@ struct WindowTransformBlock { Columns original_input_columns; Columns input_columns; + Columns casted_columns; MutableColumns output_columns; size_t rows = 0; From 3cbb3dc55f6582bc8abc7d5683080702080adcd8 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 26 Jul 2024 08:41:44 +0000 Subject: [PATCH 0946/2361] Do not spam logs with messages related to connection reset by peer --- src/Server/HTTP/HTTPServerConnection.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/Server/HTTP/HTTPServerConnection.cpp b/src/Server/HTTP/HTTPServerConnection.cpp index 047db014560..8eb2ecb1224 100644 --- a/src/Server/HTTP/HTTPServerConnection.cpp +++ b/src/Server/HTTP/HTTPServerConnection.cpp @@ -97,6 +97,18 @@ void HTTPServerConnection::run() { sendErrorResponse(session, Poco::Net::HTTPResponse::HTTP_BAD_REQUEST); } + catch (const Poco::Net::NetException & e) + { + /// Do not spam logs with messages related to connection reset by peer. + if (e.code() == POCO_ENOTCONN) + break; + + if (session.networkException()) + session.networkException()->rethrow(); + else + throw; + } + catch (const Poco::Exception &) { if (session.networkException()) From 498ae4358647dbff5fde2861a7113a9c9597930a Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Fri, 26 Jul 2024 10:42:23 +0200 Subject: [PATCH 0947/2361] Make 02908_many_requests_to_system_replicas less stressful --- ...08_many_requests_to_system_replicas.reference | 14 +++++++------- .../02908_many_requests_to_system_replicas.sh | 16 ++++++++-------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference index f376bb87044..fdefd2e3466 100644 --- a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.reference @@ -1,10 +1,10 @@ -Creating 300 tables -900 1 1 -900 1 1 -900 1 1 -900 1 1 -Making 200 requests to system.replicas +Creating 50 tables +150 1 1 +150 1 1 +150 1 1 +150 1 1 +Making 100 requests to system.replicas Query system.replicas while waiting for other concurrent requests to finish 0 -900 +150 1 diff --git a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh index a247c99a818..81ba59fc591 100755 --- a/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh +++ b/tests/queries/0_stateless/02908_many_requests_to_system_replicas.sh @@ -7,8 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -NUM_TABLES=300 -CONCURRENCY=200 +NUM_TABLES=50 +CONCURRENCY=100 echo "Creating $NUM_TABLES tables" @@ -46,10 +46,10 @@ wait; # Check results with different max_block_size -$CLICKHOUSE_CLIENT -q 'SELECT count(), sum(total_replicas) >= 2700, sum(active_replicas) >= 2700 FROM system.replicas WHERE database=currentDatabase()' -$CLICKHOUSE_CLIENT -q 'SELECT count(), sum(total_replicas) >= 2700, sum(active_replicas) >= 2700 FROM system.replicas WHERE database=currentDatabase() SETTINGS max_block_size=1' -$CLICKHOUSE_CLIENT -q 'SELECT count(), sum(total_replicas) >= 2700, sum(active_replicas) >= 2700 FROM system.replicas WHERE database=currentDatabase() SETTINGS max_block_size=77' -$CLICKHOUSE_CLIENT -q 'SELECT count(), sum(total_replicas) >= 2700, sum(active_replicas) >= 2700 FROM system.replicas WHERE database=currentDatabase() SETTINGS max_block_size=11111' +$CLICKHOUSE_CLIENT -q 'SELECT count() as c, sum(total_replicas) >= 3*c, sum(active_replicas) >= 3*c FROM system.replicas WHERE database=currentDatabase()' +$CLICKHOUSE_CLIENT -q 'SELECT count() as c, sum(total_replicas) >= 3*c, sum(active_replicas) >= 3*c FROM system.replicas WHERE database=currentDatabase() SETTINGS max_block_size=1' +$CLICKHOUSE_CLIENT -q 'SELECT count() as c, sum(total_replicas) >= 3*c, sum(active_replicas) >= 3*c FROM system.replicas WHERE database=currentDatabase() SETTINGS max_block_size=77' +$CLICKHOUSE_CLIENT -q 'SELECT count() as c, sum(total_replicas) >= 3*c, sum(active_replicas) >= 3*c FROM system.replicas WHERE database=currentDatabase() SETTINGS max_block_size=11111' echo "Making $CONCURRENCY requests to system.replicas" @@ -70,8 +70,8 @@ wait; $CLICKHOUSE_CLIENT -nq " SYSTEM FLUSH LOGS; --- without optimisation there are ~350K zk requests -SELECT sum(ProfileEvents['ZooKeeperTransactions']) < 30000 +-- Check that number of ZK request is less then a half of (total replicas * concurrency) +SELECT sum(ProfileEvents['ZooKeeperTransactions']) < (${NUM_TABLES} * 3 * ${CONCURRENCY} / 2) FROM system.query_log WHERE current_database=currentDatabase() AND log_comment='02908_many_requests'; " From 5bf89a433128985944cb5dd6ad6ef40a9658ff52 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 26 Jul 2024 08:42:52 +0000 Subject: [PATCH 0948/2361] Set a different instance dir when using pytest-xdist This allows executing in integration tests in parallel without directory clashes. --- tests/integration/helpers/cluster.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 548b58a17e8..0c8278048bf 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -434,6 +434,11 @@ class ClickHouseCluster: # docker-compose removes everything non-alphanumeric from project names so we do it too. self.project_name = re.sub(r"[^a-z0-9]", "", project_name.lower()) self.instances_dir_name = get_instances_dir(self.name) + xdist_worker = os.getenv("PYTEST_XDIST_WORKER") + if xdist_worker: + self.project_name += f"_{xdist_worker}" + self.instances_dir_name += f"_{xdist_worker}" + self.instances_dir = p.join(self.base_dir, self.instances_dir_name) self.docker_logs_path = p.join(self.instances_dir, "docker.log") self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME) From 0c5c23e78477636560cd09f17b91db79e420680f Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 26 Jul 2024 10:21:36 +0200 Subject: [PATCH 0949/2361] More fixes --- src/Common/Exception.cpp | 42 +++++++++++++++++++++++++++++---- src/Common/Exception.h | 31 ++++++++++++++++++------ src/Common/SignalHandlers.cpp | 2 +- src/Common/StackTrace.cpp | 2 +- src/Common/ThreadPool.cpp | 4 ++-- src/Loggers/OwnSplitChannel.cpp | 7 +++++- src/Loggers/OwnSplitChannel.h | 2 ++ 7 files changed, 73 insertions(+), 17 deletions(-) diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 33befa64946..c4bd4fbd943 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -100,7 +101,7 @@ Exception::Exception(const MessageMasked & msg_masked, int code, bool remote_) { if (terminate_on_any_exception) std::_Exit(terminate_status_code); - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); handle_error_code(msg_masked.msg, code, remote, getStackFramePointers()); } @@ -110,7 +111,7 @@ Exception::Exception(MessageMasked && msg_masked, int code, bool remote_) { if (terminate_on_any_exception) std::_Exit(terminate_status_code); - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); handle_error_code(message(), code, remote, getStackFramePointers()); } @@ -119,7 +120,7 @@ Exception::Exception(CreateFromPocoTag, const Poco::Exception & exc) { if (terminate_on_any_exception) std::_Exit(terminate_status_code); - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); #ifdef STD_EXCEPTION_HAS_STACK_TRACE auto * stack_trace_frames = exc.get_stack_trace_frames(); auto stack_trace_size = exc.get_stack_trace_size(); @@ -133,7 +134,7 @@ Exception::Exception(CreateFromSTDTag, const std::exception & exc) { if (terminate_on_any_exception) std::_Exit(terminate_status_code); - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); #ifdef STD_EXCEPTION_HAS_STACK_TRACE auto * stack_trace_frames = exc.get_stack_trace_frames(); auto stack_trace_size = exc.get_stack_trace_size(); @@ -223,10 +224,38 @@ Exception::FramePointers Exception::getStackFramePointers() const } thread_local bool Exception::enable_job_stack_trace = false; -thread_local std::vector Exception::thread_frame_pointers = {}; +thread_local bool Exception::can_use_thread_frame_pointers = false; +thread_local Exception::ThreadFramePointers Exception::thread_frame_pointers; + +Exception::ThreadFramePointers::ThreadFramePointers() +{ + can_use_thread_frame_pointers = true; +} + +Exception::ThreadFramePointers::~ThreadFramePointers() +{ + can_use_thread_frame_pointers = false; +} + +Exception::ThreadFramePointersBase Exception::getThreadFramePointers() +{ + if (can_use_thread_frame_pointers) + return thread_frame_pointers.frame_pointers; + + return {}; +} + +void Exception::setThreadFramePointers(ThreadFramePointersBase frame_pointers) +{ + if (can_use_thread_frame_pointers) + thread_frame_pointers.frame_pointers = std::move(frame_pointers); +} static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message) { + if (!OwnSplitChannel::isLoggingEnabled()) + return; + try { PreformattedMessage message = getCurrentExceptionMessageAndPattern(true); @@ -242,6 +271,9 @@ static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string void tryLogCurrentException(const char * log_name, const std::string & start_of_message) { + if (!OwnSplitChannel::isLoggingEnabled()) + return; + /// Under high memory pressure, new allocations throw a /// MEMORY_LIMIT_EXCEEDED exception. /// diff --git a/src/Common/Exception.h b/src/Common/Exception.h index 4e54c411bf1..a4f55f41caa 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -10,7 +10,6 @@ #include #include -#include #include #include @@ -49,14 +48,14 @@ public: { if (terminate_on_any_exception) std::terminate(); - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); } Exception(const PreformattedMessage & msg, int code): Exception(msg.text, code) { if (terminate_on_any_exception) std::terminate(); - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); message_format_string = msg.format_string; message_format_string_args = msg.format_string_args; } @@ -65,18 +64,36 @@ public: { if (terminate_on_any_exception) std::terminate(); - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); message_format_string = msg.format_string; message_format_string_args = msg.format_string_args; } /// Collect call stacks of all previous jobs' schedulings leading to this thread job's execution static thread_local bool enable_job_stack_trace; - static thread_local std::vector thread_frame_pointers; + static thread_local bool can_use_thread_frame_pointers; + /// Because of unknown order of static destructor calls, + /// thread_frame_pointers can already be uninitialized when a different destructor generates an exception. + /// To prevent such scenarios, a wrapper class is created and a function that will return empty vector + /// if its destructor is already called + using ThreadFramePointersBase = std::vector; + struct ThreadFramePointers + { + ThreadFramePointers(); + ~ThreadFramePointers(); + + ThreadFramePointersBase frame_pointers; + }; + + static ThreadFramePointersBase getThreadFramePointers(); + static void setThreadFramePointers(ThreadFramePointersBase frame_pointers); + /// Callback for any exception static std::function callback; protected: + static thread_local ThreadFramePointers thread_frame_pointers; + // used to remove the sensitive information from exceptions if query_masking_rules is configured struct MessageMasked { @@ -178,7 +195,7 @@ class ErrnoException : public Exception public: ErrnoException(std::string && msg, int code, int with_errno) : Exception(msg, code), saved_errno(with_errno) { - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); addMessage(", {}", errnoToString(saved_errno)); } @@ -187,7 +204,7 @@ public: requires std::is_convertible_v ErrnoException(int code, T && message) : Exception(message, code), saved_errno(errno) { - capture_thread_frame_pointers = thread_frame_pointers; + capture_thread_frame_pointers = getThreadFramePointers(); addMessage(", {}", errnoToString(saved_errno)); } diff --git a/src/Common/SignalHandlers.cpp b/src/Common/SignalHandlers.cpp index 52c83d80121..e025e49e0a3 100644 --- a/src/Common/SignalHandlers.cpp +++ b/src/Common/SignalHandlers.cpp @@ -89,7 +89,7 @@ void signalHandler(int sig, siginfo_t * info, void * context) writePODBinary(*info, out); writePODBinary(signal_context, out); writePODBinary(stack_trace, out); - writeVectorBinary(Exception::enable_job_stack_trace ? Exception::thread_frame_pointers : std::vector{}, out); + writeVectorBinary(Exception::enable_job_stack_trace ? Exception::getThreadFramePointers() : std::vector{}, out); writeBinary(static_cast(getThreadId()), out); writePODBinary(current_thread, out); diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index ff8765c9727..76277cbc993 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -489,7 +489,7 @@ struct CacheEntry using CacheEntryPtr = std::shared_ptr; -static constinit std::atomic can_use_cache = false; +static constinit bool can_use_cache = false; using StackTraceCacheBase = std::map>; diff --git a/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp index 01f561d573f..c8f1ae99969 100644 --- a/src/Common/ThreadPool.cpp +++ b/src/Common/ThreadPool.cpp @@ -51,7 +51,7 @@ public: if (!capture_frame_pointers) return; /// Save all previous jobs call stacks and append with current - frame_pointers = DB::Exception::thread_frame_pointers; + frame_pointers = DB::Exception::getThreadFramePointers(); frame_pointers.push_back(StackTrace().getFramePointers()); } @@ -455,7 +455,7 @@ void ThreadPoolImpl::worker(typename std::list::iterator thread_ try { if (DB::Exception::enable_job_stack_trace) - DB::Exception::thread_frame_pointers = std::move(job_data->frame_pointers); + DB::Exception::setThreadFramePointers(std::move(job_data->frame_pointers)); CurrentMetrics::Increment metric_active_pool_threads(metric_active_threads); diff --git a/src/Loggers/OwnSplitChannel.cpp b/src/Loggers/OwnSplitChannel.cpp index c0e8514c62a..e29d2a1e0aa 100644 --- a/src/Loggers/OwnSplitChannel.cpp +++ b/src/Loggers/OwnSplitChannel.cpp @@ -18,6 +18,11 @@ namespace DB static constinit std::atomic allow_logging{true}; +bool OwnSplitChannel::isLoggingEnabled() +{ + return allow_logging; +} + void OwnSplitChannel::disableLogging() { allow_logging = false; @@ -25,7 +30,7 @@ void OwnSplitChannel::disableLogging() void OwnSplitChannel::log(const Poco::Message & msg) { - if (!allow_logging) + if (!isLoggingEnabled()) return; #ifndef WITHOUT_TEXT_LOG diff --git a/src/Loggers/OwnSplitChannel.h b/src/Loggers/OwnSplitChannel.h index 9872a4fb558..9de55f330be 100644 --- a/src/Loggers/OwnSplitChannel.h +++ b/src/Loggers/OwnSplitChannel.h @@ -41,6 +41,8 @@ public: static void disableLogging(); + static bool isLoggingEnabled(); + private: void logSplit(const Poco::Message & msg); void tryLogSplit(const Poco::Message & msg); From c7330252cf581441b95c51b47977f597eb41734e Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 26 Jul 2024 11:47:49 +0300 Subject: [PATCH 0950/2361] Disable convert OUTER JOIN to INNER JOIN optimization for non ALL JOIN strictness --- .../QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp b/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp index d90f0e152e7..d9296f10a98 100644 --- a/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp +++ b/src/Processors/QueryPlan/Optimizations/convertOuterJoinToInnerJoin.cpp @@ -23,7 +23,10 @@ size_t tryConvertOuterJoinToInnerJoin(QueryPlan::Node * parent_node, QueryPlan:: return 0; const auto & table_join = join->getJoin()->getTableJoin(); - if (table_join.strictness() == JoinStrictness::Asof) + + /// Any JOIN issue https://github.com/ClickHouse/ClickHouse/issues/66447 + /// Anti JOIN issue https://github.com/ClickHouse/ClickHouse/issues/67156 + if (table_join.strictness() != JoinStrictness::All) return 0; /// TODO: Support join_use_nulls From aaa25454b31d854338200b335d7ac6442e959af4 Mon Sep 17 00:00:00 2001 From: Blargian Date: Fri, 26 Jul 2024 10:58:45 +0200 Subject: [PATCH 0951/2361] Additional formatting fixes --- .../functions/type-conversion-functions.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 4326753216e..87d824ec5bb 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -571,7 +571,7 @@ Result: ## toInt32OrZero -Like [`toInt32`](#toint32), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. +Like [`toInt32`](#toint32), this function converts an input value to a value of type [Int32](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** @@ -767,7 +767,7 @@ SELECT toInt64('9223372036854775808') == --9223372036854775808; **Returned value** -- 64-bit integer value. [Int64](../data-types/int-uint.md). [Int64](../data-types/int-uint.md). +- 64-bit integer value. [Int64](../data-types/int-uint.md). :::note The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. @@ -800,7 +800,7 @@ Result: ## toInt64OrZero -Like [`toInt64`](#toint64), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. +Like [`toInt64`](#toint64), this function converts an input value to a value of type [Int64](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** @@ -878,7 +878,7 @@ Types for which `\N` is returned: **Returned value** -- Integer value of type `Int64` if successful, otherwise `NULL`. [Int64](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). +- 64-bit integer value if successful, otherwise `NULL`. [Int64](../data-types/int-uint.md) / [NULL](../data-types/nullable.md). :::note The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. @@ -935,7 +935,7 @@ Types for which the default value is returned: **Returned value** -- Integer value of type `Int64` if successful, otherwise returns the default value. [Int64](../data-types/int-uint.md). +- 64-bit integer value if successful, otherwise returns the default value. [Int64](../data-types/int-uint.md). :::note - The function uses [rounding towards zero](https://en.wikipedia.org/wiki/Rounding#Rounding_towards_zero), meaning it truncates fractional digits of numbers. @@ -1028,7 +1028,7 @@ Result: ## toInt128OrZero -Like [`toInt128`](#toint128), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. +Like [`toInt128`](#toint128), this function converts an input value to a value of type [Int128](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** @@ -1256,7 +1256,7 @@ Result: ## toInt256OrZero -Like [`toInt256`](#toint256), this function converts an input value to a value of type [Int8](../data-types/int-uint.md) but returns `0` in case of an error. +Like [`toInt256`](#toint256), this function converts an input value to a value of type [Int256](../data-types/int-uint.md) but returns `0` in case of an error. **Syntax** From 338685cc79a5358246977f2ba039230a615c6ea6 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 26 Jul 2024 10:59:17 +0200 Subject: [PATCH 0952/2361] Fix build --- programs/odbc-bridge/tests/CMakeLists.txt | 2 +- src/CMakeLists.txt | 1 + src/Common/mysqlxx/tests/CMakeLists.txt | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/programs/odbc-bridge/tests/CMakeLists.txt b/programs/odbc-bridge/tests/CMakeLists.txt index f1411dbb554..2f63aed7942 100644 --- a/programs/odbc-bridge/tests/CMakeLists.txt +++ b/programs/odbc-bridge/tests/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp ../validateODBCConnectionString.cpp) -target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io clickhouse_common_config) +target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io clickhouse_common_config loggers_no_text_log) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0f84dd35320..fede7d69105 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -638,6 +638,7 @@ if (ENABLE_TESTS) dbms clickhouse_common_config clickhouse_common_zookeeper + loggers hilite_comparator) if (TARGET ch_contrib::simdjson) diff --git a/src/Common/mysqlxx/tests/CMakeLists.txt b/src/Common/mysqlxx/tests/CMakeLists.txt index f62908ddcaf..53bee778470 100644 --- a/src/Common/mysqlxx/tests/CMakeLists.txt +++ b/src/Common/mysqlxx/tests/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp) -target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx clickhouse_common_config) +target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx clickhouse_common_config loggers_no_text_log) From 8d13461fb74fc991b73382d04a9bc7a9fd3425fa Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 26 Jul 2024 09:03:37 +0000 Subject: [PATCH 0953/2361] Another fix. --- src/Storages/StorageDistributed.cpp | 7 ++----- src/Storages/StorageMerge.cpp | 8 ++++++++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 9b417cda177..07892971ec2 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -847,11 +847,8 @@ void StorageDistributed::read( /** For distributed tables we do not need constants in header, since we don't send them to remote servers. * Moreover, constants can break some functions like `hostName` that are constants only for local queries. */ - if (processed_stage != QueryProcessingStage::Complete) - { - for (auto & column : header) - column.column = column.column->convertToFullColumnIfConst(); - } + for (auto & column : header) + column.column = column.column->convertToFullColumnIfConst(); modified_query_info.query = queryNodeToDistributedSelectQuery(query_tree_distributed); modified_query_info.query_tree = std::move(query_tree_distributed); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f5bc183931f..9962da3d6de 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -369,6 +369,14 @@ void StorageMerge::read( /// What will be result structure depending on query processed stage in source tables? Block common_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, local_context, processed_stage); + if (local_context->getSettingsRef().allow_experimental_analyzer && processed_stage == QueryProcessingStage::Complete) + { + /// Remove constants. + /// For StorageDistributed some functions like `hostName` that are constants only for local queries. + for (auto & column : common_header) + column.column = column.column->convertToFullColumnIfConst(); + } + auto step = std::make_unique( column_names, query_info, From 02bfe82192fa4aa6ebb3e7b9192ec6f334fbfc56 Mon Sep 17 00:00:00 2001 From: Blargian Date: Fri, 26 Jul 2024 11:19:46 +0200 Subject: [PATCH 0954/2361] rename filesystemFree to fiilesystemUnreserved --- docs/en/sql-reference/functions/other-functions.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index b7e4094f30e..79bffe00d01 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -2102,14 +2102,14 @@ Result: └─────────────────┘ ``` -## filesystemFree +## filesystemUnreserved -Returns the total amount of the free space on the filesystem hosting the database persistence. See also `filesystemAvailable` +Returns the total amount of the free space on the filesystem hosting the database persistence. (previously `filesystemFree`). See also [`filesystemAvailable`](#filesystemavailable). **Syntax** ```sql -filesystemFree() +filesystemUnreserved() ``` **Returned value** @@ -2121,7 +2121,7 @@ filesystemFree() Query: ```sql -SELECT formatReadableSize(filesystemFree()) AS "Free space"; +SELECT formatReadableSize(filesystemUnreserved()) AS "Free space"; ``` Result: From 434571d496a6ca6fc1b0038ead560572d0553ee5 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Fri, 26 Jul 2024 12:40:20 +0300 Subject: [PATCH 0955/2361] Added tests --- ...uter_join_to_inner_join_any_join.reference | 3 ++ ...vert_outer_join_to_inner_join_any_join.sql | 33 ++++++++++++++ ...ter_join_to_inner_join_anti_join.reference | 19 ++++++++ ...ert_outer_join_to_inner_join_anti_join.sql | 45 +++++++++++++++++++ 4 files changed, 100 insertions(+) create mode 100644 tests/queries/0_stateless/03210_convert_outer_join_to_inner_join_any_join.reference create mode 100644 tests/queries/0_stateless/03210_convert_outer_join_to_inner_join_any_join.sql create mode 100644 tests/queries/0_stateless/03211_convert_outer_join_to_inner_join_anti_join.reference create mode 100644 tests/queries/0_stateless/03211_convert_outer_join_to_inner_join_anti_join.sql diff --git a/tests/queries/0_stateless/03210_convert_outer_join_to_inner_join_any_join.reference b/tests/queries/0_stateless/03210_convert_outer_join_to_inner_join_any_join.reference new file mode 100644 index 00000000000..3d6a23045fb --- /dev/null +++ b/tests/queries/0_stateless/03210_convert_outer_join_to_inner_join_any_join.reference @@ -0,0 +1,3 @@ +1 tx1 US +1 tx2 US +1 tx3 US diff --git a/tests/queries/0_stateless/03210_convert_outer_join_to_inner_join_any_join.sql b/tests/queries/0_stateless/03210_convert_outer_join_to_inner_join_any_join.sql new file mode 100644 index 00000000000..599875e90cf --- /dev/null +++ b/tests/queries/0_stateless/03210_convert_outer_join_to_inner_join_any_join.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS user_country; +DROP TABLE IF EXISTS user_transactions; + +CREATE TABLE user_country ( + user_id UInt64, + country String +) +ENGINE = ReplacingMergeTree +ORDER BY user_id; + +CREATE TABLE user_transactions ( + user_id UInt64, + transaction_id String +) +ENGINE = MergeTree +ORDER BY user_id; + +INSERT INTO user_country (user_id, country) VALUES (1, 'US'); +INSERT INTO user_transactions (user_id, transaction_id) VALUES (1, 'tx1'), (1, 'tx2'), (1, 'tx3'), (2, 'tx1'); + +-- Expected 3 rows, got only 1. Removing 'ANY' and adding 'FINAL' fixes +-- the issue (but it is not always possible). Moving filter by 'country' to +-- an outer query doesn't help. Query without filter by 'country' works +-- as expected (returns 3 rows). +SELECT * FROM user_transactions +ANY LEFT JOIN user_country USING (user_id) +WHERE + user_id = 1 + AND country = 'US' +ORDER BY ALL; + +DROP TABLE user_country; +DROP TABLE user_transactions; diff --git a/tests/queries/0_stateless/03211_convert_outer_join_to_inner_join_anti_join.reference b/tests/queries/0_stateless/03211_convert_outer_join_to_inner_join_anti_join.reference new file mode 100644 index 00000000000..d717a29ab23 --- /dev/null +++ b/tests/queries/0_stateless/03211_convert_outer_join_to_inner_join_anti_join.reference @@ -0,0 +1,19 @@ +DATA + â”â”â”â”â”â”â”â”â”â”â”â”┳â”â”â”â”â”â”â”â”â”â”â”┳â”â”â”â”┓ + ┃ c0 ┃ c1 ┃ c2 ┃ + ┡â”â”â”â”â”â”â”â”â”â”â”╇â”â”â”â”â”â”â”â”â”â”â”╇â”â”â”â”┩ +1. │ 826636805 │ 0 │ │ + ├───────────┼───────────┼────┤ +2. │ 0 │ 150808457 │ │ + └───────────┴───────────┴────┘ +NUMBER OF ROWS IN FIRST SHOULD BE EQUAL TO SECOND +FISRT + +SECOND +1 +TO DEBUG I TOOK JUST A SUBQUERY AND IT HAS 1 ROW +THIRD +1 +AND I ADDED SINGLE CONDITION THAT CONDITION <>0 THAT IS 1 IN THIRD QUERY AND IT HAS NO RESULT!!! +FOURTH +1 diff --git a/tests/queries/0_stateless/03211_convert_outer_join_to_inner_join_anti_join.sql b/tests/queries/0_stateless/03211_convert_outer_join_to_inner_join_anti_join.sql new file mode 100644 index 00000000000..77b1d52dd18 --- /dev/null +++ b/tests/queries/0_stateless/03211_convert_outer_join_to_inner_join_anti_join.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int32, c1 Int32, c2 String) ENGINE = Log() ; +INSERT INTO t0(c0, c1, c2) VALUES (826636805,0, ''), (0, 150808457, ''); + +SELECT 'DATA'; +SELECT * FROM t0 FORMAT PrettyMonoBlock; + +SELECT 'NUMBER OF ROWS IN FIRST SHOULD BE EQUAL TO SECOND'; + + +SELECT 'FISRT'; +SELECT left.c2 FROM t0 AS left +LEFT ANTI JOIN t0 AS right_0 ON ((left.c0)=(right_0.c1)) +WHERE (abs ((- ((sign (right_0.c1)))))); + +SELECT 'SECOND'; +SELECT SUM(check <> 0) +FROM +( + SELECT (abs ((- ((sign (right_0.c1)))))) AS `check` + FROM t0 AS left + LEFT ANTI JOIN t0 AS right_0 ON ((left.c0)=(right_0.c1)) +); + + +SELECT 'TO DEBUG I TOOK JUST A SUBQUERY AND IT HAS 1 ROW'; + +SELECT 'THIRD'; + +SELECT (abs ((- ((sign (right_0.c1)))))) AS `check` +FROM t0 AS left +LEFT ANTI JOIN t0 AS right_0 ON ((left.c0)=(right_0.c1)); + + +SELECT 'AND I ADDED SINGLE CONDITION THAT CONDITION <>0 THAT IS 1 IN THIRD QUERY AND IT HAS NO RESULT!!!'; + + +SELECT 'FOURTH'; +SELECT (abs ((- ((sign (right_0.c1)))))) AS `check` +FROM t0 AS left +LEFT ANTI JOIN t0 AS right_0 ON ((left.c0)=(right_0.c1)) +WHERE check <> 0; + +DROP TABLE t0; From b3828b038dbcc9c5cf71b99d58f06497c2af3bd6 Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Fri, 26 Jul 2024 11:49:04 +0200 Subject: [PATCH 0956/2361] add `filesystemUnreserved` --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 8e4e4fafe29..1a324b98ff4 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1622,6 +1622,7 @@ filesystem filesystemAvailable filesystemCapacity filesystemFree +filesystemUnreserved filesystems finalizeAggregation fips From ca9bf2c67c8ac16d4fd18f2def6e4d3dfea62971 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 11:53:48 +0200 Subject: [PATCH 0957/2361] Fix tidy --- src/Common/ZooKeeper/ZooKeeper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 1250e1273b9..7448d73cbbc 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -136,7 +136,7 @@ void ZooKeeper::init(ZooKeeperArgs args_, std::unique_ptr LOG_TRACE(log, "Initialized, hosts: {}, chroot: {}", fmt::join(args.hosts, ","), args.chroot); /// If the balancing strategy has an optimal node then it will be the first in the list - bool connected_to_suboptimal_node = node_idx && *node_idx != shuffled_hosts[0].original_index; + bool connected_to_suboptimal_node = node_idx && static_cast(*node_idx) != shuffled_hosts[0].original_index; bool respect_az = args.prefer_local_availability_zone && !args.client_availability_zone.empty(); bool may_benefit_from_reconnecting = respect_az || args.get_priority_load_balancing.hasOptimalNode(); if (connected_to_suboptimal_node && may_benefit_from_reconnecting) From 2519f9ed4252020c6a9fb21ef1410c87f4053200 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 26 Jul 2024 12:08:16 +0200 Subject: [PATCH 0958/2361] Only support archs --- contrib/numactl-cmake/CMakeLists.txt | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/contrib/numactl-cmake/CMakeLists.txt b/contrib/numactl-cmake/CMakeLists.txt index 5d086366c7f..a72ff11e485 100644 --- a/contrib/numactl-cmake/CMakeLists.txt +++ b/contrib/numactl-cmake/CMakeLists.txt @@ -1,4 +1,14 @@ -option (ENABLE_NUMACTL "Enable numactl" ${ENABLE_LIBRARIES}) +if (NOT ( + OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_LOONGARCH64)) +) + if (ENABLE_NUMACTL) + message (${RECONFIGURE_MESSAGE_LEVEL} + "numactl is disabled implicitly because the OS or architecture is not supported. Use -DENABLE_NUMACTL=0") + endif () + set (ENABLE_NUMACTL OFF) +else() + option (ENABLE_NUMACTL "Enable numactl" ${ENABLE_LIBRARIES}) +endif() if (NOT ENABLE_NUMACTL) message (STATUS "Not using numactl") From 72ebff825c0752e66cefa4f367ce43ff23d77703 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 26 Jul 2024 12:19:09 +0200 Subject: [PATCH 0959/2361] Reduce max time of 00763_long_lock_buffer_alter_destination_table --- ...ong_lock_buffer_alter_destination_table.sh | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh index 7e2384cfc52..c12b4426740 100755 --- a/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh +++ b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh @@ -16,18 +16,39 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE buffer_00763_1 (s String) ENGINE = Bu ${CLICKHOUSE_CLIENT} --query="CREATE TABLE mt_00763_1 (x UInt32, s String) ENGINE = MergeTree ORDER BY x" ${CLICKHOUSE_CLIENT} --query="INSERT INTO mt_00763_1 VALUES (1, '1'), (2, '2'), (3, '3')" -function thread1() +function thread_alter() { - seq 1 300 | sed -r -e 's/.+/ALTER TABLE mt_00763_1 MODIFY column s UInt32; ALTER TABLE mt_00763_1 MODIFY column s String;/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error ||: + local TIMELIMIT=$((SECONDS+$1)) + local it=0 + while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 300 ]; + do + it=$((it+1)) + $CLICKHOUSE_CLIENT --multiquery --ignore-error -q " + ALTER TABLE mt_00763_1 MODIFY column s UInt32; + ALTER TABLE mt_00763_1 MODIFY column s String; + " ||: + done } -function thread2() +function thread_query() { - seq 1 2000 | sed -r -e 's/.+/SELECT sum(length(s)) FROM buffer_00763_1;/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error 2>&1 | grep -vP '(^3$|^Received exception from server|^Code: 473)' + local TIMELIMIT=$((SECONDS+$1)) + local it=0 + while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 2000 ]; + do + it=$((it+1)) + $CLICKHOUSE_CLIENT --multiquery --ignore-error -q " + SELECT sum(length(s)) FROM buffer_00763_1; + " 2>&1 | grep -vP '(^3$|^Received exception from server|^Code: 473)' + done } -thread1 & -thread2 & +export -f thread_alter +export -f thread_query + +TIMEOUT=30 +thread_alter $TIMEOUT & +thread_query $TIMEOUT & wait From 98418120cd3167983b5436834d7c568cb42865af Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 26 Jul 2024 10:21:43 +0000 Subject: [PATCH 0960/2361] Add parallel integration test execution to doc --- tests/integration/README.md | 67 ++++++++++++++++++++++++++++++++++--- 1 file changed, 62 insertions(+), 5 deletions(-) diff --git a/tests/integration/README.md b/tests/integration/README.md index cde4cb05aec..5d4fa407e3f 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -16,7 +16,7 @@ Don't use Docker from your system repository. * [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest` * [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: -``` +```bash sudo -H pip install \ PyMySQL \ avro \ @@ -78,7 +78,7 @@ Notes: * Some tests maybe require a lot of resources (CPU, RAM, etc.). Better not try large tests like `test_distributed_ddl*` on your laptop. You can run tests via `./runner` script and pass pytest arguments as last arg: -``` +```bash $ ./runner --binary $HOME/ClickHouse/programs/clickhouse --odbc-bridge-binary $HOME/ClickHouse/programs/clickhouse-odbc-bridge --base-configs-dir $HOME/ClickHouse/programs/server/ 'test_ssl_cert_authentication -ss' Start tests ====================================================================================================== test session starts ====================================================================================================== @@ -102,7 +102,7 @@ test_ssl_cert_authentication/test.py::test_create_user PASSED ``` Path to binary and configs maybe specified via env variables: -``` +```bash $ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/ $ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse $ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge @@ -121,6 +121,63 @@ test_odbc_interaction/test.py ...... [100%] You can just open shell inside a container by overwritting the command: ./runner --command=bash +### Parallel test execution + +On the CI, we run a number of parallel runners (5 at the time of this writing), each on its own +Docker container. These runner containers spawn more containers for the services needed such as +ZooKeeper, MySQL, PostgreSQL and minio, among others. Within each runner, tests are parallelized +using [pytest-xdist](https://pytest-xdist.readthedocs.io/en/stable/). We're using `--dist=loadfile` +to [distribute the load](https://pytest-xdist.readthedocs.io/en/stable/distribution.html). In other +words: tests are grouped by module for test functions and by class for test methods. This means that +any test within the same module (or any class) will never execute their tests in parallel. They'll +be executed on the same worker one after the other. + +If the test supports parallel and repeated execution, you can run a bunch of them in parallel to +look for flakiness. We use [pytest-repeat](https://pypi.org/project/pytest-repeat/) to set the +number of times we want to execute a test through the `--count` argument. Then, `-n` sets the number +of parallel workers for `pytest-xdist`. + +```bash +$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/ +$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse +$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge +$ ./runner 'test_storage_s3_queue/test.py::test_max_set_age -- --count 10 -n 5' +Start tests +=============================================================================== test session starts ================================================================================ +platform linux -- Python 3.10.12, pytest-7.4.4, pluggy-1.5.0 -- /usr/bin/python3 +cachedir: .pytest_cache +rootdir: /ClickHouse/tests/integration +configfile: pytest.ini +plugins: reportlog-0.4.0, xdist-3.5.0, random-0.2, repeat-0.9.3, order-1.0.0, timeout-2.2.0 +timeout: 900.0s +timeout method: signal +timeout func_only: False +5 workers [10 items] +scheduling tests via LoadScheduling + +test_storage_s3_queue/test.py::test_max_set_age[9-10] +test_storage_s3_queue/test.py::test_max_set_age[7-10] +test_storage_s3_queue/test.py::test_max_set_age[5-10] +test_storage_s3_queue/test.py::test_max_set_age[1-10] +test_storage_s3_queue/test.py::test_max_set_age[3-10] +[gw3] [ 10%] PASSED test_storage_s3_queue/test.py::test_max_set_age[7-10] +test_storage_s3_queue/test.py::test_max_set_age[8-10] +[gw4] [ 20%] PASSED test_storage_s3_queue/test.py::test_max_set_age[9-10] +test_storage_s3_queue/test.py::test_max_set_age[10-10] +[gw0] [ 30%] PASSED test_storage_s3_queue/test.py::test_max_set_age[1-10] +test_storage_s3_queue/test.py::test_max_set_age[2-10] +[gw1] [ 40%] PASSED test_storage_s3_queue/test.py::test_max_set_age[3-10] +test_storage_s3_queue/test.py::test_max_set_age[4-10] +[gw2] [ 50%] PASSED test_storage_s3_queue/test.py::test_max_set_age[5-10] +test_storage_s3_queue/test.py::test_max_set_age[6-10] +[gw3] [ 60%] PASSED test_storage_s3_queue/test.py::test_max_set_age[8-10] +[gw4] [ 70%] PASSED test_storage_s3_queue/test.py::test_max_set_age[10-10] +[gw0] [ 80%] PASSED test_storage_s3_queue/test.py::test_max_set_age[2-10] +[gw1] [ 90%] PASSED test_storage_s3_queue/test.py::test_max_set_age[4-10] +[gw2] [100%] PASSED test_storage_s3_queue/test.py::test_max_set_age[6-10] +========================================================================== 10 passed in 120.65s (0:02:00) ========================================================================== +``` + ### Rebuilding the docker containers The main container used for integration tests lives in `docker/test/integration/base/Dockerfile`. Rebuild it with @@ -149,7 +206,7 @@ will automagically detect the types of variables and only the small diff of two If tests failing for mysterious reasons, this may help: -``` +```bash sudo service docker stop sudo bash -c 'rm -rf /var/lib/docker/*' sudo service docker start @@ -159,6 +216,6 @@ sudo service docker start On Ubuntu 20.10 and later in host network mode (default) one may encounter problem with nested containers not seeing each other. It happens because legacy and nftables rules are out of sync. Problem can be solved by: -``` +```bash sudo iptables -P FORWARD ACCEPT ``` From 7f80dab6927316f5c6c56e51ba439d01161f7567 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 26 Jul 2024 12:34:36 +0200 Subject: [PATCH 0961/2361] CI push --- docker/test/util/process_functional_tests_result.py | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/test/util/process_functional_tests_result.py b/docker/test/util/process_functional_tests_result.py index dbe50eeade0..3da1a8f3674 100755 --- a/docker/test/util/process_functional_tests_result.py +++ b/docker/test/util/process_functional_tests_result.py @@ -180,7 +180,6 @@ def process_result(result_path, broken_tests): for result in test_results: if result[1] == "FAIL": result[1] = "SERVER_DIED" - test_results.append(["Server died", "FAIL", "0", ""]) elif not success_finish: description = "Tests are not finished, " From 60cca77c8a415142fe6f181b25aaed84232ea3c0 Mon Sep 17 00:00:00 2001 From: Blargian Date: Fri, 26 Jul 2024 12:39:35 +0200 Subject: [PATCH 0962/2361] add example for materialize function --- .../functions/other-functions.md | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index b7e4094f30e..797607e552a 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -346,7 +346,9 @@ Result: ## materialize Turns a constant into a full column containing a single value. -Full columns and constants are represented differently in memory. Functions usually execute different code for normal and constant arguments, although the result should typically be the same. This function can be used to debug this behavior. +Full columns and constants are represented differently in memory. +Functions usually execute different code for normal and constant arguments, although the result should typically be the same. +This function can be used to debug this behavior. **Syntax** @@ -354,6 +356,34 @@ Full columns and constants are represented differently in memory. Functions usua materialize(x) ``` +**Parameters** + +- `x` — A constant. [Constant](../functions/index.md/#constants). + +**Returned value** + +- A column containing a single value `x`. + +**Example** + +In the example below the `countMatches` function expects a constant second argument. +This behaviour can be debugged by using the `materialize` function to turn a constant into a full column, +verifying that the function throws an error for a non-constant argument. + +Query: + +```sql +SELECT countMatches('foobarfoo', 'foo'); +SELECT countMatches('foobarfoo', materialize('foo')); +``` + +Result: + +```response +2 +Code: 44. DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #2 'pattern' of function countMatches, expected constant String, got String +``` + ## ignore Accepts any arguments, including `NULL` and does nothing. Always returns 0. From 1ebafccc13ea69ba06e2450014fd15d39facdcaa Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Fri, 26 Jul 2024 12:42:07 +0200 Subject: [PATCH 0963/2361] add `joinGetOrNull` --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 7de065cc589..182e1d2cb33 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1862,6 +1862,7 @@ jdbc jemalloc jeprof joinGet +joinGetOrNull json jsonMergePatch jsonasstring From 73e71e7b7a476c7073154a112dd12815bbe49bfe Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 26 Jul 2024 08:45:43 +0000 Subject: [PATCH 0964/2361] log --- src/Server/HTTP/HTTPServerConnection.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Server/HTTP/HTTPServerConnection.cpp b/src/Server/HTTP/HTTPServerConnection.cpp index 8eb2ecb1224..39e066005b9 100644 --- a/src/Server/HTTP/HTTPServerConnection.cpp +++ b/src/Server/HTTP/HTTPServerConnection.cpp @@ -2,6 +2,7 @@ #include #include +#include namespace DB { @@ -101,7 +102,10 @@ void HTTPServerConnection::run() { /// Do not spam logs with messages related to connection reset by peer. if (e.code() == POCO_ENOTCONN) + { + LOG_DEBUG(LogFrequencyLimiter(getLogger("HTTPServerConnection"), 10), "Connection reset by peer while processing HTTP request: {}", e.message()); break; + } if (session.networkException()) session.networkException()->rethrow(); From 3f1dbdfce978bab2b2ce2aedecdbb5afbf54c4a0 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 26 Jul 2024 11:01:10 +0000 Subject: [PATCH 0965/2361] Clarify documentation --- tests/integration/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/README.md b/tests/integration/README.md index 5d4fa407e3f..ab984b7bd04 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -127,10 +127,10 @@ On the CI, we run a number of parallel runners (5 at the time of this writing), Docker container. These runner containers spawn more containers for the services needed such as ZooKeeper, MySQL, PostgreSQL and minio, among others. Within each runner, tests are parallelized using [pytest-xdist](https://pytest-xdist.readthedocs.io/en/stable/). We're using `--dist=loadfile` -to [distribute the load](https://pytest-xdist.readthedocs.io/en/stable/distribution.html). In other -words: tests are grouped by module for test functions and by class for test methods. This means that -any test within the same module (or any class) will never execute their tests in parallel. They'll -be executed on the same worker one after the other. +to [distribute the load](https://pytest-xdist.readthedocs.io/en/stable/distribution.html). In the +documentation words: this guarantees that all tests in a file run in the same worker. This means +that any test within the same file will never execute their tests in parallel. They'll be executed +on the same worker one after the other. If the test supports parallel and repeated execution, you can run a bunch of them in parallel to look for flakiness. We use [pytest-repeat](https://pypi.org/project/pytest-repeat/) to set the From 1225d50508ad0885dca3367b08c15f54c65b02f6 Mon Sep 17 00:00:00 2001 From: serxa Date: Fri, 26 Jul 2024 11:09:48 +0000 Subject: [PATCH 0966/2361] Do not count AttachedTable for tables in information schema databases --- src/Databases/DatabasesCommon.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index d2926c64f29..b8e9231f5c6 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -289,9 +289,7 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n tables.erase(it); table_storage->is_detached = true; - if (!table_storage->isSystemStorage() - && database_name != DatabaseCatalog::SYSTEM_DATABASE - && database_name != DatabaseCatalog::TEMPORARY_DATABASE) + if (!table_storage->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name)) { LOG_TEST(log, "Counting detached table {} to database {}", table_name, database_name); CurrentMetrics::sub(getAttachedCounterForStorage(table_storage)); @@ -339,9 +337,7 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c /// non-Atomic database the is_detached is set to true before RENAME. table->is_detached = false; - if (!table->isSystemStorage() - && database_name != DatabaseCatalog::SYSTEM_DATABASE - && database_name != DatabaseCatalog::TEMPORARY_DATABASE) + if (!table->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name)) { LOG_TEST(log, "Counting attached table {} to database {}", table_name, database_name); CurrentMetrics::add(getAttachedCounterForStorage(table)); From 0cf0437196dfe4ee0f489ecc040b71e42e1f1a22 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 25 Jul 2024 16:36:32 +0200 Subject: [PATCH 0967/2361] Use separate client context in clickhouse-local --- programs/client/Client.cpp | 29 ++++----- programs/client/Client.h | 1 - programs/local/LocalServer.cpp | 28 +++++--- programs/local/LocalServer.h | 4 +- src/Client/ClientBase.cpp | 113 ++++++++++++++++++--------------- src/Client/ClientBase.h | 6 ++ 6 files changed, 102 insertions(+), 79 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 887c5cb86bc..f2919db0308 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -209,8 +209,8 @@ std::vector Client::loadWarningMessages() {} /* query_parameters */, "" /* query_id */, QueryProcessingStage::Complete, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), false, {}); + &client_context->getSettingsRef(), + &client_context->getClientInfo(), false, {}); while (true) { Packet packet = connection->receivePacket(); @@ -306,9 +306,6 @@ void Client::initialize(Poco::Util::Application & self) if (env_password && !config().has("password")) config().setString("password", env_password); - // global_context->setApplicationType(Context::ApplicationType::CLIENT); - global_context->setQueryParameters(query_parameters); - /// settings and limits could be specified in config file, but passed settings has higher priority for (const auto & setting : global_context->getSettingsRef().allUnchanged()) { @@ -382,7 +379,7 @@ try showWarnings(); /// Set user password complexity rules - auto & access_control = global_context->getAccessControl(); + auto & access_control = client_context->getAccessControl(); access_control.setPasswordComplexityRules(connection->getPasswordComplexityRules()); if (is_interactive && !delayed_interactive) @@ -459,7 +456,7 @@ void Client::connect() << connection_parameters.host << ":" << connection_parameters.port << (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl; - connection = Connection::createConnection(connection_parameters, global_context); + connection = Connection::createConnection(connection_parameters, client_context); if (max_client_network_bandwidth) { @@ -528,7 +525,7 @@ void Client::connect() } } - if (!global_context->getSettingsRef().use_client_time_zone) + if (!client_context->getSettingsRef().use_client_time_zone) { const auto & time_zone = connection->getServerTimezone(connection_parameters.timeouts); if (!time_zone.empty()) @@ -611,7 +608,7 @@ void Client::printChangedSettings() const } }; - print_changes(global_context->getSettingsRef().changes(), "settings"); + print_changes(client_context->getSettingsRef().changes(), "settings"); print_changes(cmd_merge_tree_settings.changes(), "MergeTree settings"); } @@ -709,7 +706,7 @@ bool Client::processWithFuzzing(const String & full_query) { const char * begin = full_query.data(); orig_ast = parseQuery(begin, begin + full_query.size(), - global_context->getSettingsRef(), + client_context->getSettingsRef(), /*allow_multi_statements=*/ true); } catch (const Exception & e) @@ -733,7 +730,7 @@ bool Client::processWithFuzzing(const String & full_query) } // Kusto is not a subject for fuzzing (yet) - if (global_context->getSettingsRef().dialect == DB::Dialect::kusto) + if (client_context->getSettingsRef().dialect == DB::Dialect::kusto) { return true; } @@ -1072,6 +1069,11 @@ void Client::processOptions(const OptionsDescription & options_description, global_context->makeGlobalContext(); global_context->setApplicationType(Context::ApplicationType::CLIENT); + /// In case of clickhouse-client the `client_context` can be just an alias for the `global_context`. + /// (There is no need to copy the context because clickhouse-client has no background tasks so it won't use that context in parallel.) + client_context = global_context; + initClientContext(); + global_context->setSettings(cmd_settings); /// Copy settings-related program options to config. @@ -1205,11 +1207,6 @@ void Client::processConfig() pager = config().getString("pager", ""); setDefaultFormatsAndCompressionFromConfiguration(); - - global_context->setClientName(std::string(DEFAULT_CLIENT_NAME)); - global_context->setQueryKindInitial(); - global_context->setQuotaClientKey(config().getString("quota_key", "")); - global_context->setQueryKind(query_kind); } diff --git a/programs/client/Client.h b/programs/client/Client.h index 6d57a6ea648..ff71b36dbf3 100644 --- a/programs/client/Client.h +++ b/programs/client/Client.h @@ -19,7 +19,6 @@ public: int main(const std::vector & /*args*/) override; protected: - Poco::Util::LayeredConfiguration & getClientConfiguration() override; bool processWithFuzzing(const String & full_query) override; diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 48e0cca7b73..e60c8ef6085 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -295,6 +295,8 @@ void LocalServer::cleanup() if (suggest) suggest.reset(); + client_context.reset(); + if (global_context) { global_context->shutdown(); @@ -436,7 +438,7 @@ void LocalServer::connect() in = input.get(); } connection = LocalConnection::createConnection( - connection_parameters, global_context, in, need_render_progress, need_render_profile_events, server_display_name); + connection_parameters, client_context, in, need_render_progress, need_render_profile_events, server_display_name); } @@ -497,8 +499,6 @@ try initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default"))); ASTAlterCommand::setFormatAlterCommandsWithParentheses(true); - applyCmdSettings(global_context); - /// try to load user defined executable functions, throw on error and die try { @@ -510,6 +510,11 @@ try throw; } + /// Must be called after we stopped initializing the global context and changing its settings. + /// After this point the global context must be stayed almost unchanged till shutdown, + /// and all necessary changes must be made to the client context instead. + createClientContext(); + if (is_interactive) { clearTerminal(); @@ -730,11 +735,12 @@ void LocalServer::processConfig() /// there is separate context for Buffer tables). adjustSettings(); applySettingsOverridesForLocal(global_context); - applyCmdOptions(global_context); /// Load global settings from default_profile and system_profile. global_context->setDefaultProfiles(getClientConfiguration()); + applyCmdOptions(global_context); + /// We load temporary database first, because projections need it. DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase(); @@ -778,10 +784,6 @@ void LocalServer::processConfig() server_display_name = getClientConfiguration().getString("display_name", ""); prompt_by_server_display_name = getClientConfiguration().getRawString("prompt_by_server_display_name.default", ":) "); - - global_context->setQueryKindInitial(); - global_context->setQueryKind(query_kind); - global_context->setQueryParameters(query_parameters); } @@ -860,6 +862,16 @@ void LocalServer::applyCmdOptions(ContextMutablePtr context) } +void LocalServer::createClientContext() +{ + /// In case of clickhouse-local it's necessary to use a separate context for client-related purposes. + /// We can't just change the global context because it is used in background tasks (for example, in merges) + /// which don't expect that the global context can suddenly change. + client_context = Context::createCopy(global_context); + initClientContext(); +} + + void LocalServer::processOptions(const OptionsDescription &, const CommandLineOptions & options, const std::vector &, const std::vector &) { if (options.count("table")) diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 0715f358313..ae9980311e1 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -31,7 +31,6 @@ public: int main(const std::vector & /*args*/) override; protected: - Poco::Util::LayeredConfiguration & getClientConfiguration() override; void connect() override; @@ -50,7 +49,6 @@ protected: void processConfig() override; void readArguments(int argc, char ** argv, Arguments & common_arguments, std::vector &, std::vector &) override; - void updateLoggerLevel(const String & logs_level) override; private: @@ -67,6 +65,8 @@ private: void applyCmdOptions(ContextMutablePtr context); void applyCmdSettings(ContextMutablePtr context); + void createClientContext(); + ServerSettings server_settings; std::optional status; diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 13dce05cabc..50cc6b98b81 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -467,7 +467,7 @@ void ClientBase::sendExternalTables(ASTPtr parsed_query) std::vector data; for (auto & table : external_tables) - data.emplace_back(table.getData(global_context)); + data.emplace_back(table.getData(client_context)); connection->sendExternalTablesData(data); } @@ -680,10 +680,10 @@ try /// intermixed with data with parallel formatting. /// It may increase code complexity significantly. if (!extras_into_stdout || select_only_into_file) - output_format = global_context->getOutputFormatParallelIfPossible( + output_format = client_context->getOutputFormatParallelIfPossible( current_format, out_file_buf ? *out_file_buf : *out_buf, block); else - output_format = global_context->getOutputFormat( + output_format = client_context->getOutputFormat( current_format, out_file_buf ? *out_file_buf : *out_buf, block); output_format->setAutoFlush(); @@ -762,6 +762,15 @@ void ClientBase::adjustSettings() global_context->setSettings(settings); } +void ClientBase::initClientContext() +{ + client_context->setClientName(std::string(DEFAULT_CLIENT_NAME)); + client_context->setQuotaClientKey(getClientConfiguration().getString("quota_key", "")); + client_context->setQueryKindInitial(); + client_context->setQueryKind(query_kind); + client_context->setQueryParameters(query_parameters); +} + bool ClientBase::isRegularFile(int fd) { struct stat file_stat; @@ -952,7 +961,7 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) /// client-side. Thus we need to parse the query. const char * begin = full_query.data(); auto parsed_query = parseQuery(begin, begin + full_query.size(), - global_context->getSettingsRef(), + client_context->getSettingsRef(), /*allow_multi_statements=*/ false); if (!parsed_query) @@ -975,7 +984,7 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) /// But for asynchronous inserts we don't extract data, because it's needed /// to be done on server side in that case (for coalescing the data from multiple inserts on server side). const auto * insert = parsed_query->as(); - if (insert && isSyncInsertWithData(*insert, global_context)) + if (insert && isSyncInsertWithData(*insert, client_context)) query_to_execute = full_query.substr(0, insert->data - full_query.data()); else query_to_execute = full_query; @@ -1093,7 +1102,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa } } - const auto & settings = global_context->getSettingsRef(); + const auto & settings = client_context->getSettingsRef(); const Int32 signals_before_stop = settings.partial_result_on_first_cancel ? 2 : 1; int retries_left = 10; @@ -1108,10 +1117,10 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa connection_parameters.timeouts, query, query_parameters, - global_context->getCurrentQueryId(), + client_context->getCurrentQueryId(), query_processing_stage, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), + &client_context->getSettingsRef(), + &client_context->getClientInfo(), true, [&](const Progress & progress) { onProgress(progress); }); @@ -1298,7 +1307,7 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { - global_context->setSetting("session_timezone", tz); + client_context->setSetting("session_timezone", tz); } @@ -1494,13 +1503,13 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de void ClientBase::setInsertionTable(const ASTInsertQuery & insert_query) { - if (!global_context->hasInsertionTable() && insert_query.table) + if (!client_context->hasInsertionTable() && insert_query.table) { String table = insert_query.table->as().shortName(); if (!table.empty()) { String database = insert_query.database ? insert_query.database->as().shortName() : ""; - global_context->setInsertionTable(StorageID(database, table)); + client_context->setInsertionTable(StorageID(database, table)); } } } @@ -1551,7 +1560,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars const auto & parsed_insert_query = parsed_query->as(); if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && !isStdinNotEmptyAndValid(std_in)))) { - const auto & settings = global_context->getSettingsRef(); + const auto & settings = client_context->getSettingsRef(); if (settings.throw_if_no_data_to_insert) throw Exception(ErrorCodes::NO_DATA_TO_INSERT, "No data to insert"); else @@ -1565,10 +1574,10 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars connection_parameters.timeouts, query, query_parameters, - global_context->getCurrentQueryId(), + client_context->getCurrentQueryId(), query_processing_stage, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), + &client_context->getSettingsRef(), + &client_context->getClientInfo(), true, [&](const Progress & progress) { onProgress(progress); }); @@ -1616,7 +1625,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des /// Set callback to be called on file progress. if (tty_buf) - progress_indication.setFileProgressCallback(global_context, *tty_buf); + progress_indication.setFileProgressCallback(client_context, *tty_buf); } /// If data fetched from file (maybe compressed file) @@ -1650,10 +1659,10 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des } StorageFile::CommonArguments args{ - WithContext(global_context), + WithContext(client_context), parsed_insert_query->table_id, current_format, - getFormatSettings(global_context), + getFormatSettings(client_context), compression_method, columns_for_storage_file, ConstraintsDescription{}, @@ -1661,7 +1670,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des {}, String{}, }; - StoragePtr storage = std::make_shared(in_file, global_context->getUserFilesPath(), args); + StoragePtr storage = std::make_shared(in_file, client_context->getUserFilesPath(), args); storage->startup(); SelectQueryInfo query_info; @@ -1672,16 +1681,16 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des storage->read( plan, sample.getNames(), - storage->getStorageSnapshot(metadata, global_context), + storage->getStorageSnapshot(metadata, client_context), query_info, - global_context, + client_context, {}, - global_context->getSettingsRef().max_block_size, + client_context->getSettingsRef().max_block_size, getNumberOfPhysicalCPUCores()); auto builder = plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(global_context), - BuildQueryPipelineSettings::fromContext(global_context)); + QueryPlanOptimizationSettings::fromContext(client_context), + BuildQueryPipelineSettings::fromContext(client_context)); QueryPlanResourceHolder resources; auto pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); @@ -1742,14 +1751,14 @@ void ClientBase::sendDataFrom(ReadBuffer & buf, Block & sample, const ColumnsDes current_format = insert->format; } - auto source = global_context->getInputFormat(current_format, buf, sample, insert_format_max_block_size); + auto source = client_context->getInputFormat(current_format, buf, sample, insert_format_max_block_size); Pipe pipe(source); if (columns_description.hasDefaults()) { pipe.addSimpleTransform([&](const Block & header) { - return std::make_shared(header, columns_description, *source, global_context); + return std::make_shared(header, columns_description, *source, client_context); }); } @@ -1911,12 +1920,12 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin if (is_interactive) { - global_context->setCurrentQueryId(""); + client_context->setCurrentQueryId(""); // Generate a new query_id for (const auto & query_id_format : query_id_formats) { writeString(query_id_format.first, std_out); - writeString(fmt::format(fmt::runtime(query_id_format.second), fmt::arg("query_id", global_context->getCurrentQueryId())), std_out); + writeString(fmt::format(fmt::runtime(query_id_format.second), fmt::arg("query_id", client_context->getCurrentQueryId())), std_out); writeChar('\n', std_out); std_out.next(); } @@ -1943,7 +1952,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin auto password = auth_data->getPassword(); if (password) - global_context->getAccessControl().checkPasswordComplexityRules(*password); + client_context->getAccessControl().checkPasswordComplexityRules(*password); } } } @@ -1958,15 +1967,15 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin std::optional old_settings; SCOPE_EXIT_SAFE({ if (old_settings) - global_context->setSettings(*old_settings); + client_context->setSettings(*old_settings); }); auto apply_query_settings = [&](const IAST & settings_ast) { if (!old_settings) - old_settings.emplace(global_context->getSettingsRef()); - global_context->applySettingsChanges(settings_ast.as()->changes); - global_context->resetSettingsToDefaultValue(settings_ast.as()->default_settings); + old_settings.emplace(client_context->getSettingsRef()); + client_context->applySettingsChanges(settings_ast.as()->changes); + client_context->resetSettingsToDefaultValue(settings_ast.as()->default_settings); }; const auto * insert = parsed_query->as(); @@ -1999,7 +2008,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin if (insert && insert->select) insert->tryFindInputFunction(input_function); - bool is_async_insert_with_inlined_data = global_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); + bool is_async_insert_with_inlined_data = client_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); if (is_async_insert_with_inlined_data) { @@ -2034,9 +2043,9 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin if (change.name == "profile") current_profile = change.value.safeGet(); else - global_context->applySettingChange(change); + client_context->applySettingChange(change); } - global_context->resetSettingsToDefaultValue(set_query->default_settings); + client_context->resetSettingsToDefaultValue(set_query->default_settings); /// Query parameters inside SET queries should be also saved on the client side /// to override their previous definitions set with --param_* arguments @@ -2044,7 +2053,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin for (const auto & [name, value] : set_query->query_parameters) query_parameters.insert_or_assign(name, value); - global_context->addQueryParameters(NameToNameMap{set_query->query_parameters.begin(), set_query->query_parameters.end()}); + client_context->addQueryParameters(NameToNameMap{set_query->query_parameters.begin(), set_query->query_parameters.end()}); } if (const auto * use_query = parsed_query->as()) { @@ -2121,8 +2130,8 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( if (this_query_begin >= all_queries_end) return MultiQueryProcessingStage::QUERIES_END; - unsigned max_parser_depth = static_cast(global_context->getSettingsRef().max_parser_depth); - unsigned max_parser_backtracks = static_cast(global_context->getSettingsRef().max_parser_backtracks); + unsigned max_parser_depth = static_cast(client_context->getSettingsRef().max_parser_depth); + unsigned max_parser_backtracks = static_cast(client_context->getSettingsRef().max_parser_backtracks); // If there are only comments left until the end of file, we just // stop. The parser can't handle this situation because it always @@ -2142,7 +2151,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( try { parsed_query = parseQuery(this_query_end, all_queries_end, - global_context->getSettingsRef(), + client_context->getSettingsRef(), /*allow_multi_statements=*/ true); } catch (const Exception & e) @@ -2185,7 +2194,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( { this_query_end = find_first_symbols<'\n'>(insert_ast->data, all_queries_end); insert_ast->end = this_query_end; - query_to_execute_end = isSyncInsertWithData(*insert_ast, global_context) ? insert_ast->data : this_query_end; + query_to_execute_end = isSyncInsertWithData(*insert_ast, client_context) ? insert_ast->data : this_query_end; } query_to_execute = all_queries_text.substr(this_query_begin - all_queries_text.data(), query_to_execute_end - this_query_begin); @@ -2387,13 +2396,13 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) // , where the inline data is delimited by semicolon and not by a // newline. auto * insert_ast = parsed_query->as(); - if (insert_ast && isSyncInsertWithData(*insert_ast, global_context)) + if (insert_ast && isSyncInsertWithData(*insert_ast, client_context)) { this_query_end = insert_ast->end; adjustQueryEnd( this_query_end, all_queries_end, - static_cast(global_context->getSettingsRef().max_parser_depth), - static_cast(global_context->getSettingsRef().max_parser_backtracks)); + static_cast(client_context->getSettingsRef().max_parser_depth), + static_cast(client_context->getSettingsRef().max_parser_backtracks)); } // Report error. @@ -2523,10 +2532,10 @@ void ClientBase::runInteractive() if (load_suggestions) { /// Load suggestion data from the server. - if (global_context->getApplicationType() == Context::ApplicationType::CLIENT) - suggest->load(global_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load); - else if (global_context->getApplicationType() == Context::ApplicationType::LOCAL) - suggest->load(global_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load); + if (client_context->getApplicationType() == Context::ApplicationType::CLIENT) + suggest->load(client_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load); + else if (client_context->getApplicationType() == Context::ApplicationType::LOCAL) + suggest->load(client_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load); } if (home_path.empty()) @@ -2664,7 +2673,7 @@ void ClientBase::runInteractive() { // If a separate connection loading suggestions failed to open a new session, // use the main session to receive them. - suggest->load(*connection, connection_parameters.timeouts, getClientConfiguration().getInt("suggestion_limit"), global_context->getClientInfo()); + suggest->load(*connection, connection_parameters.timeouts, getClientConfiguration().getInt("suggestion_limit"), client_context->getClientInfo()); } try @@ -2713,10 +2722,10 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name) if (!getClientConfiguration().has("log_comment")) { - Settings settings = global_context->getSettings(); + Settings settings = client_context->getSettings(); /// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]" settings.log_comment = fs::absolute(fs::path(file_name)); - global_context->setSettings(settings); + client_context->setSettings(settings); } return executeMultiQuery(queries_from_file); diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 4f500a4c45d..be74090b84d 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -206,6 +206,9 @@ protected: /// Adjust some settings after command line options and config had been processed. void adjustSettings(); + /// Initializes the client context. + void initClientContext(); + void setDefaultFormatsAndCompressionFromConfiguration(); void initTTYBuffer(ProgressOption progress); @@ -215,6 +218,9 @@ protected: SharedContextHolder shared_context; ContextMutablePtr global_context; + /// Client context is a context used only by the client to parse queries, process query parameters and to connect to clickhouse-server. + ContextMutablePtr client_context; + LoggerPtr fatal_log; Poco::AutoPtr fatal_channel_ptr; Poco::AutoPtr fatal_console_channel_ptr; From 3cf2ec36ca31964a5a57717d5645f5e5a287dd00 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 26 Jul 2024 11:36:54 +0000 Subject: [PATCH 0968/2361] Verbose output for 03203_client_benchmark_options --- .../03203_client_benchmark_options.sh | 27 ++++++++++++++----- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/03203_client_benchmark_options.sh b/tests/queries/0_stateless/03203_client_benchmark_options.sh index a9b9d69822b..475309cebb9 100755 --- a/tests/queries/0_stateless/03203_client_benchmark_options.sh +++ b/tests/queries/0_stateless/03203_client_benchmark_options.sh @@ -4,10 +4,23 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -t -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1 | grep -q "^2\." && echo "Ok" || echo "Fail" -${CLICKHOUSE_CLIENT} --time -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1 | grep -q "^2\." && echo "Ok" || echo "Fail" -${CLICKHOUSE_CLIENT} --memory-usage -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1 | grep -q "^[0-9]\+$" && echo "Ok" || echo "Fail" -${CLICKHOUSE_CLIENT} --memory-usage=none -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" # expected no output -${CLICKHOUSE_CLIENT} --memory-usage=default -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1 | grep -q "^[0-9]\+$" && echo "Ok" || echo "Fail" -${CLICKHOUSE_CLIENT} --memory-usage=readable -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1 | grep -q "^[0-9].*B$" && echo "Ok" || echo "Fail" -${CLICKHOUSE_CLIENT} --memory-usage=unknown -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1 | grep -q "BAD_ARGUMENTS" && echo "Ok" || echo "Fail" +output=$(${CLICKHOUSE_CLIENT} -t -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1) +echo "$output" | grep -q "^2\." && echo "Ok" || { echo "Fail"; echo "$output"; } + +output=$(${CLICKHOUSE_CLIENT} --time -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1) +echo "$output" | grep -q "^2\." && echo "Ok" || { echo "Fail"; echo "$output"; } + +output=$(${CLICKHOUSE_CLIENT} --memory-usage -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) +echo "$output" | grep -q "^[0-9]\+$" && echo "Ok" || { echo "Fail"; echo "$output"; } + +output=$(${CLICKHOUSE_CLIENT} --memory-usage=none -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) +echo -n "$output" # expected no output + +output=$(${CLICKHOUSE_CLIENT} --memory-usage=default -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) +echo "$output" | grep -q "^[0-9]\+$" && echo "Ok" || { echo "Fail"; echo "$output"; } + +output=$(${CLICKHOUSE_CLIENT} --memory-usage=readable -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) +echo "$output" | grep -q "^[0-9].*B$" && echo "Ok" || { echo "Fail"; echo "$output"; } + +output=$(${CLICKHOUSE_CLIENT} --memory-usage=unknown -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) +echo "$output" | grep -q "BAD_ARGUMENTS" && echo "Ok" || { echo "Fail"; echo "$output"; } From 28e991708be1facd87c3760f7929cd5ddc299805 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Fri, 26 Jul 2024 13:45:57 +0200 Subject: [PATCH 0969/2361] squash! added somme tests in relation with https://github.com/ClickHouse/ClickHouse/pull/54881 with new behaviour when enable_named_columns_in_function_tuple=1 (default value) --- .../0_stateless/00309_formats.reference | Bin 18666 -> 20353 bytes tests/queries/0_stateless/00309_formats.sql | 5 ----- 2 files changed, 5 deletions(-) diff --git a/tests/queries/0_stateless/00309_formats.reference b/tests/queries/0_stateless/00309_formats.reference index f3ea45520bb50fb936caf6724e9fedf3cdd00b75..cab311692be229716b58af39079275d3942b01cc 100644 GIT binary patch literal 20353 zcmeHP3wIP%5{6aK9zDL_&p18;axq=E-!KeHeM!>@7a6Ht;zJ#Jk0d+*gt^JbdYKDu4%*X9d$a^ZOYUl#FBHF2yr}q<;@67HiYtm^#Z|>iir*-% zDXuF{6fY}&tN5Mb_lj2(e^C5U@h8Qria#snd+xpO{s$g>=;22meeCfko}78A|MWA@J~z8>|MLgJ!58LIidkNiRoxsq zJUB8u`r@(UFP(V#6D$6wrgio0oJnBji=r;A-&v0K-- z%ZqMZ-|En<>suYVb$zQtx30$|*-G(uTzhnP-g&=^d;Asr39~@AuK#;~vDmn-A16x* zR`tnxBCGmdyM|AAwVs$&ebTP#ldV?ulqNAv(}O7;BueJfKp}=K;v>=KamanSU6UWrDN?_JXVk8gMI`72?`P<#P@i7&<`pS zWF+WF5R#xIK}v#_1ThI}667T4Nf4BvC_z$!rUX$5suE-+=t>Zlpe#XJg0=*43F;E$ zCFn~Kn4mC0VuHp5kqIgjWG3iL5SpMgL28251hENf6XYi7O%R-*I6-oP<^<6RsuN@< z=uQxxpgci(g7yUQ3F;H%C+JUb0Ko$U7Z7|va00;#1UC@;KyU=X69iWfd_iyq!5aj3 z5d48~#s?7ye?<5r!XFX-i10^*KO+1Q;g1M^M8t7~KO+1Q;g1M^MEE1Z9})hD9M2rk z64B_#G0j2EQO#k^am|4Ze?<5r!XFX-i0}tWF!-P!{1M@g2!BNQBf=jM{)q5Lgg+wu z5#f&re?<5r!XFX-i10^*KO+1Q;g1M^#G){NzzgHs7A`ETZCepq_933?i~U2rWWJXqI8KuOcyVRaKQz-H_k$cyu3VDiwye}g+f_bcJ1@D;TzuN5 zD@s+avnf(-W3}#V+EUgwouv-u47MnGiFGeI6M7J0X>pSftOBKaoh3fnm@GvczL`Ta z^L_N{6Fw0i&Ve~T1wJJ{6+SgSO&=q%!s~pgh07?nk!+)EGuh^{Eo58Dw&Hr*^k(>Z zybQ>^Hqtrc{32`%3@;;YAm1kG-$5QXD0j)XNqnD@?1}G`_*RMUmH1|f@0J>DqOcTU z*dnqtku4%yM7D@*5!oWLMaC8xTV$doTV!mJu|>uf8Czs*k+DV27CBqwY>|unY>~4? z&K5aasMw-ni;68Owy4;mVvC9`Dz>QDqGpSlEo!!?*`j8Pnk{O!sM(@si<&JOwrJR* zVT*<>8n$TIqG5}MEgH6H*a8(t*|H6)jlxDpGfP)fUuE8&1TrL@Sn5)r6Ya*MU7eL0wb!Wn76AR5ztX z#+6_}jZ<1=T!|M{I=Mw62Gvg4mvJR%Q1g@)8CT*4l}~AraV2z6|Kt`49@Ic(U&fXA zK_ygLWLya$)I+&Nf(SKH*_Ux8j!+qu78zGU3H4EFk#QxOP$lISi6>M_WnadXkV3sw zT4Y>_DpXCSg~!FbC|e#E)l=EBEh?z8Wm{BHWy|BDk}6xVZKpC4TZt>wRQasL73!+8 zFSZg_sIE$j*tQdl#8s5ILYJI}Nejl>nIuG~}N3YAyc7h8!d)L*4V z#+A526;@hgT!|~xV!1`)3iVjom;05tLRD5;5#VP`KA{>G~DrX+|J4NE4^tt78W7My1Ewbk!9(?&_}d~o*M=)&sS=v zm5YR#+gx;@nz8VMIrYYX7g?<00RC^)91khQIPJmG@OrgrFt^k;LYv_Lo5d@`!DxB3 zGDLsD_K3_;G)I{^%FR(>j!JV>nWNeqHPfRx<;2)BT_0Y1Xf&A9h!Y`)0|nvDH@x8n zHr{gEJg2=CS+6-2Y@=}a^V*~wFA#5d<+f6RtRKlbl5c8*WMo}Q)|X_RN!FWW-AUK{ zDc6tAH={hZdv7)Rc=}3mQ`+9<(5@$S>={hVObTHRbg2bmQ`U{6_yqMxorr-!m=tXtHQD> zEvwS9DlMzhvMMdB(y}TotJ1P6EvwS9sw}I@vZ^er%Cf2~tID#fEUU`0sw}I@vZ^hs z+On!GtJ<=vEvwqHsx7P9vZ^hs+OldatH!cwEUU({YAmb9vT7`=#}TLyo>A3TXJ|u*PXZY@b0cX?~)z)dEQU-ex~gv+M8t>k(P&*cKVkL~(9{aiZ0vklwdB?P?7?an*> zTu#9I-mbsX&!q*_8f<@;81VeWop<`V+<^DVU4N&aOAmPG-1T?*xeS5#(rtg2Bk;7w zop<`VG=X>8U4N&a%M^I;-Sv0+xnzOoK(@b27kFphop<`VjDh#+U4N&aOB#6h-u8EC z1J9b=d8ePt9C&Zv^>_NY_NY{DCT;>+ke)`2)`|ZGV?P@Y@r2-syKefBb6wVB0s=_`E`L+6m)Ezc_5xV{`a(e6A7cfwpXiGp`$XFl3xq_$>F>d{|a@uQ~ Lvprlx%YOd>&)TR8 delta 3052 zcmeIy$4}c(00wZIgtksdZ-w^I3JD}eL2?j6{R4XG1$t-?y;Q0c4wb5?2YL>Bj|8*# z-Xp>6y?59U_TGDMNaN%a?SDX3#j^bK`<|bk?ZfAlN^UDiNE{JVHgaD5e8+19DxExLIy^` zXcz-yVH}Ky2`~{R!DN^MQ}xOSA`L-0%z&9N3ueO{m<#h@J}iKRum~2z5?IQq+n-P( zP{A_D!g5#vD`6F^hBdGj*1>w%02^VGPHumqnY-wY76U{pY=iBv19rkL*bRGNFYJT; zPz?v*AoeGQ5Ddc+I10z$IGlhQI0>iVG@OC6a1PGH1;h6f#3F(vxC~d|DqMr>a070_ zEw~ML;4a*Q`|v>j|8;{y1V``~p1@Oh2G8LIyo6Wq8s5NLcn9yHg%&qjk~HD*O`kk{ zMhS#wBF4@26;n)ok$HM~`}q3#2LuKMhlGZON5~bCOjLACY+QUoVp4KSYFc_mW>$7i zZeD&tVNr2OsZv$OmRD3(RoB$k)i*RYHMg|3wRd!Ob@%l4^{WR4hlWQ+$Hpf#lT*_( zvvczci%ZKZt842Un_JsEyL2uP zs~KC(*lPbltFhlLRQ$(oSyIc$t;|{F>ph;XVgHfpC$+5H%8ajnp%+Y<&DY2OORU_k z(V7&oZmv$Gj7LgIiK)l0pFiA9KYx&VyyL(B=HYJsi3@C{mioxSQew%acGgGsx>HI? tDE$HfH|yd|adXt)D_oo(mI%Z& reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSON; SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSONCompact; SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT XML; - -SET enable_named_columns_in_function_tuple = 1; - -SELECT 36 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT RowBinaryWithNamesAndTypes; -SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT TabSeparatedWithNamesAndTypes; From d041df80aa112920f28d74ed26a0c8381808dafc Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 26 Jul 2024 12:14:26 +0000 Subject: [PATCH 0970/2361] Add test --- ..._to_read_for_schema_inference_in_cache.reference | 2 ++ ...x_bytes_to_read_for_schema_inference_in_cache.sh | 13 +++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference create mode 100755 tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh diff --git a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference new file mode 100644 index 00000000000..cd109daac52 --- /dev/null +++ b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference @@ -0,0 +1,2 @@ +x Nullable(Int64) +schema_inference_hints=, max_rows_to_read_for_schema_inference=25000, max_bytes_to_read_for_schema_inference=1000, schema_inference_make_columns_nullable=true, try_infer_integers=true, try_infer_dates=true, try_infer_datetimes=true, try_infer_numbers_from_strings=false, read_bools_as_numbers=true, read_bools_as_strings=true, read_objects_as_strings=true, read_numbers_as_strings=true, read_arrays_as_strings=true, try_infer_objects_as_tuples=true, infer_incomplete_types_as_strings=true, try_infer_objects=false, use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects=false diff --git a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh new file mode 100755 index 00000000000..8a77538f592 --- /dev/null +++ b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo '{"x" : 42}' > $CLICKHOUSE_TEST_UNIQUE_NAME.json +$CLICKHOUSE_LOCAL -nm -q " +DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS input_format_max_bytes_to_read_for_schema_inference=1000; +SELECT additional_format_info from system.schema_inference_cache" + +rm $CLICKHOUSE_TEST_UNIQUE_NAME.json + From a03f12fe76969bc2002bc76ce04f7097cb02295b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 14:17:17 +0200 Subject: [PATCH 0971/2361] Fix some tests --- ...2354_distributed_with_external_aggregation_memory_usage.sql | 2 +- .../0_stateless/02884_parallel_window_functions.reference | 1 + tests/queries/0_stateless/replication.lib | 3 ++- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql index 5eea6f149b5..f9da5b3a73c 100644 --- a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql +++ b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql @@ -25,6 +25,6 @@ select a, b, c, sum(a) as s from remote('127.0.0.{2,3}', currentDatabase(), t_2354_dist_with_external_aggr) group by a, b, c format Null -settings max_memory_usage = '5Gi'; +settings max_memory_usage = '5Gi', max_result_rows = 0, max_result_bytes = 0; DROP TABLE t_2354_dist_with_external_aggr; diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.reference b/tests/queries/0_stateless/02884_parallel_window_functions.reference index a2cc96dda74..1f5346a1484 100644 --- a/tests/queries/0_stateless/02884_parallel_window_functions.reference +++ b/tests/queries/0_stateless/02884_parallel_window_functions.reference @@ -42,6 +42,7 @@ SETTINGS max_threads = 1; 0 2 0 1 2 0 2 2 0 +SET max_rows_to_read = 40000000; SELECT nw, sum(WR) AS R, diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index 05651531fba..dcac721859e 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -114,7 +114,8 @@ function check_replication_consistency() # it's important to disable prefer warmed unmerged parts because # otherwise it can read non-syncrhonized state of replicas - res=$($CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 -q \ + # also, disable the limit that is set for tests globally + res=$($CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 --max_rows_to_read=0 -q \ "SELECT if((countDistinct(data) as c) == 0, 1, c) FROM From 7f4eb59c42aa4f432c99fa7e4f8240056fa26b91 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 14:33:40 +0200 Subject: [PATCH 0972/2361] Fix some tests --- .../01730_distributed_group_by_no_merge_order_by_long.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql index e980f367de7..805e0b4fedb 100644 --- a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql +++ b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql @@ -12,7 +12,7 @@ select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by n -- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block, -- so the initiator will first receive all blocks from remotes and only after start merging, -- and will hit the memory limit. -SET max_rows_to_read = 0; +SET max_rows_to_read = 0, max_result_rows = 0; select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296; -- { serverError MEMORY_LIMIT_EXCEEDED } -- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently, From d06b9ca99669ed3cfdf1beb68c9fbc1b90180536 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 26 Jul 2024 14:47:38 +0200 Subject: [PATCH 0973/2361] Ping CI From 80b925ec75983ea558be536d657bfd2d277f1fbc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 14:47:58 +0200 Subject: [PATCH 0974/2361] Update test --- ...arallel_replicas_join_algo_and_analyzer.sh | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh index 8cefa873940..2c5b5a2a07b 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh @@ -86,11 +86,11 @@ SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', +SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -123,12 +123,12 @@ SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', +SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -149,12 +149,12 @@ SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=0) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, send_logs_level='trace', +SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -174,12 +174,12 @@ SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, send_logs_level='trace', +SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -200,12 +200,12 @@ SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -225,12 +225,12 @@ SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='hash') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -250,12 +250,12 @@ SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='hash'" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS max_rows_to_read=0, allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='hash'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | From 342bbb53c65d32698d55649252e6bc29f2a557b8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 14:49:36 +0200 Subject: [PATCH 0975/2361] It was an optimization, not a limit --- tests/config/users.d/limits.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/config/users.d/limits.yaml b/tests/config/users.d/limits.yaml index 23aaccf9298..63c4e884a9d 100644 --- a/tests/config/users.d/limits.yaml +++ b/tests/config/users.d/limits.yaml @@ -34,7 +34,6 @@ profiles: max_bytes_in_set: 10G max_rows_in_join: 10G max_bytes_in_join: 10G - max_rows_in_set_to_optimize_join: 1G max_rows_to_transfer: 1G max_bytes_to_transfer: 1G max_rows_in_distinct: 10G From 0299475202b59a4d1a54f13f02b7cc9ff44f38cc Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 26 Jul 2024 14:02:37 +0100 Subject: [PATCH 0976/2361] impl --- ...eplicas_join_algo_and_analyzer_1.reference | 30 +++++ ...allel_replicas_join_algo_and_analyzer_1.sh | 51 ++++++++ ...eplicas_join_algo_and_analyzer_2.reference | 57 +++++++++ ...allel_replicas_join_algo_and_analyzer_2.sh | 103 +++++++++++++++ ...plicas_join_algo_and_analyzer_3.reference} | 87 ------------- ...llel_replicas_join_algo_and_analyzer_3.sh} | 119 ------------------ 6 files changed, 241 insertions(+), 206 deletions(-) create mode 100644 tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference create mode 100755 tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh create mode 100644 tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference create mode 100755 tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh rename tests/queries/0_stateless/{02967_parallel_replicas_join_algo_and_analyzer.reference => 02967_parallel_replicas_join_algo_and_analyzer_3.reference} (55%) rename tests/queries/0_stateless/{02967_parallel_replicas_join_algo_and_analyzer.sh => 02967_parallel_replicas_join_algo_and_analyzer_3.sh} (58%) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference new file mode 100644 index 00000000000..e1bf9c27a81 --- /dev/null +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference @@ -0,0 +1,30 @@ + +simple join with analyzer +4200000 4200000 4200000 -1400000 +4200006 4200006 4200006 -1400002 +4200012 4200012 4200012 -1400004 +4200018 4200018 4200018 -1400006 +4200024 4200024 4200024 -1400008 +4200030 4200030 4200030 -1400010 +4200036 4200036 4200036 -1400012 +4200042 4200042 4200042 -1400014 +4200048 4200048 4200048 -1400016 +4200054 4200054 4200054 -1400018 + +simple (global) join with analyzer and parallel replicas +4200000 4200000 4200000 -1400000 +4200006 4200006 4200006 -1400002 +4200012 4200012 4200012 -1400004 +4200018 4200018 4200018 -1400006 +4200024 4200024 4200024 -1400008 +4200030 4200030 4200030 -1400010 +4200036 4200036 4200036 -1400012 +4200042 4200042 4200042 -1400014 +4200048 4200048 4200048 -1400016 +4200054 4200054 4200054 -1400018 +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value` FROM `default`.`num_2` AS `__table1` (stage: WithMergeableState) +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value` FROM `default`.`num_2` AS `__table1` (stage: WithMergeableState) + DefaultCoordinator: Coordination done +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) + DefaultCoordinator: Coordination done diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh new file mode 100755 index 00000000000..1089eb4051f --- /dev/null +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# Tags: long, no-random-settings, no-random-merge-tree-settings + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT -nm -q " +drop table if exists num_1; +drop table if exists num_2; + +create table num_1 (key UInt64, value String) engine = MergeTree order by key; +create table num_2 (key UInt64, value Int64) engine = MergeTree order by key; + +insert into num_1 select number * 2, toString(number * 2) from numbers(1e7); +insert into num_2 select number * 3, -number from numbers(1.5e6); +" + +############## +echo +echo "simple join with analyzer" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1" + +############## +echo +echo "simple (global) join with analyzer and parallel replicas" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, +max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, send_logs_level='trace', +max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | +grep "executeQuery\|.*Coordinator: Coordination done" | +grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | +sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference new file mode 100644 index 00000000000..297ec311f3e --- /dev/null +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference @@ -0,0 +1,57 @@ + +simple (local) join with analyzer and parallel replicas +4200000 4200000 4200000 -1400000 +4200006 4200006 4200006 -1400002 +4200012 4200012 4200012 -1400004 +4200018 4200018 4200018 -1400006 +4200024 4200024 4200024 -1400008 +4200030 4200030 4200030 -1400010 +4200036 4200036 4200036 -1400012 +4200042 4200042 4200042 -1400014 +4200048 4200048 4200048 -1400016 +4200054 4200054 4200054 -1400018 +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) + DefaultCoordinator: Coordination done + +simple (local) join with analyzer and parallel replicas and full sorting merge join +4200000 4200000 4200000 -1400000 +4200006 4200006 4200006 -1400002 +4200012 4200012 4200012 -1400004 +4200018 4200018 4200018 -1400006 +4200024 4200024 4200024 -1400008 +4200030 4200030 4200030 -1400010 +4200036 4200036 4200036 -1400012 +4200042 4200042 4200042 -1400014 +4200048 4200048 4200048 -1400016 +4200054 4200054 4200054 -1400018 +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) + WithOrderCoordinator: Coordination done + +nested join with analyzer +420000 420000 420000 -140000 +420042 420042 420042 -140014 +420084 420084 420084 -140028 +420126 420126 420126 -140042 +420168 420168 420168 -140056 +420210 420210 420210 -140070 +420252 420252 420252 -140084 +420294 420294 420294 -140098 +420336 420336 420336 -140112 +420378 420378 420378 -140126 + +nested join with analyzer and parallel replicas, both local +420000 420000 420000 -140000 +420042 420042 420042 -140014 +420084 420084 420084 -140028 +420126 420126 420126 -140042 +420168 420168 420168 -140056 +420210 420210 420210 -140070 +420252 420252 420252 -140084 +420294 420294 420294 -140098 +420336 420336 420336 -140112 +420378 420378 420378 -140126 +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4` ALL INNER JOIN (SELECT `__table6`.`number` * 7 AS `key` FROM numbers(100000.) AS `__table6`) AS `__table5` ON `__table4`.`key` = `__table5`.`key` SETTINGS parallel_replicas_prefer_local_join = 1) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(10000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4` ALL INNER JOIN (SELECT `__table6`.`number` * 7 AS `key` FROM numbers(100000.) AS `__table6`) AS `__table5` ON `__table4`.`key` = `__table5`.`key` SETTINGS parallel_replicas_prefer_local_join = 1) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(10000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) + WithOrderCoordinator: Coordination done diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh new file mode 100755 index 00000000000..7a0e2d9bfdb --- /dev/null +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash +# Tags: long, no-random-settings, no-random-merge-tree-settings + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT -nm -q " +drop table if exists num_1; +drop table if exists num_2; + +create table num_1 (key UInt64, value String) engine = MergeTree order by key; +create table num_2 (key UInt64, value Int64) engine = MergeTree order by key; + +insert into num_1 select number * 2, toString(number * 2) from numbers(1e7); +insert into num_2 select number * 3, -number from numbers(1.5e6); +" + +############## +echo +echo "simple (local) join with analyzer and parallel replicas" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1, +allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', +allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +grep "executeQuery\|.*Coordinator: Coordination done" | +grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | +sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' + + +############## +echo +echo "simple (local) join with analyzer and parallel replicas and full sorting merge join" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', +allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', +allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +grep "executeQuery\|.*Coordinator: Coordination done" | +grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | +sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' + + +############## +echo +echo "nested join with analyzer" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2 inner join + (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r +on l.key = r.key order by l.key limit 10 offset 10000 +SETTINGS allow_experimental_analyzer=1" + + +############## +echo +echo "nested join with analyzer and parallel replicas, both local" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2 inner join + (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r +on l.key = r.key order by l.key limit 10 offset 10000 +SETTINGS allow_experimental_analyzer=1, +allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2 inner join + (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r +on l.key = r.key order by l.key limit 10 offset 10000 +SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', +allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +grep "executeQuery\|.*Coordinator: Coordination done" | +grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | +sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.reference b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.reference similarity index 55% rename from tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.reference rename to tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.reference index d7fa419aeab..c0485b817c4 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.reference @@ -1,91 +1,4 @@ -simple join with analyzer -4200000 4200000 4200000 -1400000 -4200006 4200006 4200006 -1400002 -4200012 4200012 4200012 -1400004 -4200018 4200018 4200018 -1400006 -4200024 4200024 4200024 -1400008 -4200030 4200030 4200030 -1400010 -4200036 4200036 4200036 -1400012 -4200042 4200042 4200042 -1400014 -4200048 4200048 4200048 -1400016 -4200054 4200054 4200054 -1400018 - -simple (global) join with analyzer and parallel replicas -4200000 4200000 4200000 -1400000 -4200006 4200006 4200006 -1400002 -4200012 4200012 4200012 -1400004 -4200018 4200018 4200018 -1400006 -4200024 4200024 4200024 -1400008 -4200030 4200030 4200030 -1400010 -4200036 4200036 4200036 -1400012 -4200042 4200042 4200042 -1400014 -4200048 4200048 4200048 -1400016 -4200054 4200054 4200054 -1400018 -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value` FROM `default`.`num_2` AS `__table1` (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value` FROM `default`.`num_2` AS `__table1` (stage: WithMergeableState) - DefaultCoordinator: Coordination done -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) - DefaultCoordinator: Coordination done - -simple (local) join with analyzer and parallel replicas -4200000 4200000 4200000 -1400000 -4200006 4200006 4200006 -1400002 -4200012 4200012 4200012 -1400004 -4200018 4200018 4200018 -1400006 -4200024 4200024 4200024 -1400008 -4200030 4200030 4200030 -1400010 -4200036 4200036 4200036 -1400012 -4200042 4200042 4200042 -1400014 -4200048 4200048 4200048 -1400016 -4200054 4200054 4200054 -1400018 -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) - DefaultCoordinator: Coordination done - -simple (local) join with analyzer and parallel replicas and full sorting merge join -4200000 4200000 4200000 -1400000 -4200006 4200006 4200006 -1400002 -4200012 4200012 4200012 -1400004 -4200018 4200018 4200018 -1400006 -4200024 4200024 4200024 -1400008 -4200030 4200030 4200030 -1400010 -4200036 4200036 4200036 -1400012 -4200042 4200042 4200042 -1400014 -4200048 4200048 4200048 -1400016 -4200054 4200054 4200054 -1400018 -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) - WithOrderCoordinator: Coordination done - -nested join with analyzer -420000 420000 420000 -140000 -420042 420042 420042 -140014 -420084 420084 420084 -140028 -420126 420126 420126 -140042 -420168 420168 420168 -140056 -420210 420210 420210 -140070 -420252 420252 420252 -140084 -420294 420294 420294 -140098 -420336 420336 420336 -140112 -420378 420378 420378 -140126 - -nested join with analyzer and parallel replicas, both local -420000 420000 420000 -140000 -420042 420042 420042 -140014 -420084 420084 420084 -140028 -420126 420126 420126 -140042 -420168 420168 420168 -140056 -420210 420210 420210 -140070 -420252 420252 420252 -140084 -420294 420294 420294 -140098 -420336 420336 420336 -140112 -420378 420378 420378 -140126 -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4` ALL INNER JOIN (SELECT `__table6`.`number` * 7 AS `key` FROM numbers(100000.) AS `__table6`) AS `__table5` ON `__table4`.`key` = `__table5`.`key` SETTINGS parallel_replicas_prefer_local_join = 1) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(10000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4` ALL INNER JOIN (SELECT `__table6`.`number` * 7 AS `key` FROM numbers(100000.) AS `__table6`) AS `__table5` ON `__table4`.`key` = `__table5`.`key` SETTINGS parallel_replicas_prefer_local_join = 1) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(10000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) - WithOrderCoordinator: Coordination done - nested join with analyzer and parallel replicas, both global 420000 420000 420000 -140000 420042 420042 420042 -140014 diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh similarity index 58% rename from tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh rename to tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh index 2840482da6d..e49a340ab67 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh @@ -17,125 +17,6 @@ insert into num_1 select number * 2, toString(number * 2) from numbers(1e7); insert into num_2 select number * 3, -number from numbers(1.5e6); " -############## -echo -echo "simple join with analyzer" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2) r on l.key = r.key -order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1" - -############## -echo -echo "simple (global) join with analyzer and parallel replicas" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2) r on l.key = r.key -order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, -max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2) r on l.key = r.key -order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, send_logs_level='trace', -max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | -grep "executeQuery\|.*Coordinator: Coordination done" | -grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | -sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' - -############## -echo -echo "simple (local) join with analyzer and parallel replicas" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2) r on l.key = r.key -order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2) r on l.key = r.key -order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | -grep "executeQuery\|.*Coordinator: Coordination done" | -grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | -sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' - - -############## -echo -echo "simple (local) join with analyzer and parallel replicas and full sorting merge join" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2) r on l.key = r.key -order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2) r on l.key = r.key -order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | -grep "executeQuery\|.*Coordinator: Coordination done" | -grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | -sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' - - -############## -echo -echo "nested join with analyzer" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2 inner join - (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r -on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1" - - -############## -echo -echo "nested join with analyzer and parallel replicas, both local" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2 inner join - (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r -on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" - -$CLICKHOUSE_CLIENT -q " -select * from (select key, value from num_1) l -inner join (select key, value from num_2 inner join - (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r -on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | -grep "executeQuery\|.*Coordinator: Coordination done" | -grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | -sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' - - ############## echo echo "nested join with analyzer and parallel replicas, both global" From aec7848525d2d6bd1cc9e7c573c25bd3b4ac79e7 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 26 Jul 2024 13:04:44 +0000 Subject: [PATCH 0977/2361] fix --- src/Processors/Transforms/WindowTransform.cpp | 2 +- .../03210_lag_lead_inframe_types.reference | 20 +++++++++++++++++++ .../03210_lag_lead_inframe_types.sql | 4 ++-- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 61be0c32a7d..1eac08780e9 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -2424,7 +2424,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction argument_types[0]->getName(), argument_types[2]->getName()); } - + const auto from_name = argument_types[2]->getName(); const auto to_name = argument_types[0]->getName(); ColumnsWithTypeAndName arguments diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference index cc3b9a096b9..d4734a85e72 100644 --- a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference @@ -18,3 +18,23 @@ 7 8 9 +0 +1 +2 +2 +2 +2 +2 +2 +2 +2 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql index 5466cfe0fad..f6017ee6690 100644 --- a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql @@ -1,4 +1,4 @@ -SELECT lagInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); -- { serverError BAD_ARGUMENTS } -SELECT leadInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); -- { serverError BAD_ARGUMENTS } +SELECT lagInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); +SELECT leadInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); SELECT lagInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); SELECT leadInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); From d2b3be2fb8345436422e6214f7652545696be6ea Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 26 Jul 2024 15:05:03 +0200 Subject: [PATCH 0978/2361] Apply optimizations for a single file --- src/CMakeLists.txt | 3 + src/Client/ClientBase.cpp | 163 ---------------------- src/Client/ClientBaseOptimizedParts.cpp | 178 ++++++++++++++++++++++++ 3 files changed, 181 insertions(+), 163 deletions(-) create mode 100644 src/Client/ClientBaseOptimizedParts.cpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0f84dd35320..8c133971785 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -226,6 +226,9 @@ add_object_library(clickhouse_storages_windowview Storages/WindowView) add_object_library(clickhouse_storages_s3queue Storages/ObjectStorageQueue) add_object_library(clickhouse_storages_materializedview Storages/MaterializedView) add_object_library(clickhouse_client Client) +# Always compile this file with the highest possible level of optimizations, even in Debug builds. +# https://github.com/ClickHouse/ClickHouse/issues/65745 +set_source_files_properties(Client/ClientBaseOptimizedParts.cpp PROPERTIES COMPILE_FLAGS "-O3") add_object_library(clickhouse_bridge BridgeHelper) add_object_library(clickhouse_server Server) add_object_library(clickhouse_server_http Server/HTTP) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 1e1917e1ca1..04af9db7afe 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -108,7 +108,6 @@ namespace ErrorCodes extern const int UNEXPECTED_PACKET_FROM_SERVER; extern const int INVALID_USAGE_OF_INPUT; extern const int CANNOT_SET_SIGNAL_HANDLER; - extern const int UNRECOGNIZED_ARGUMENTS; extern const int LOGICAL_ERROR; extern const int CANNOT_OPEN_FILE; extern const int FILE_ALREADY_EXISTS; @@ -2848,168 +2847,6 @@ void ClientBase::showClientVersion() output_stream << VERSION_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl; } -namespace -{ - -/// Define transparent hash to we can use -/// std::string_view with the containers -struct TransparentStringHash -{ - using is_transparent = void; - size_t operator()(std::string_view txt) const - { - return std::hash{}(txt); - } -}; - -/* - * This functor is used to parse command line arguments and replace dashes with underscores, - * allowing options to be specified using either dashes or underscores. - */ -class OptionsAliasParser -{ -public: - explicit OptionsAliasParser(const boost::program_options::options_description& options) - { - options_names.reserve(options.options().size()); - for (const auto& option : options.options()) - options_names.insert(option->long_name()); - } - - /* - * Parses arguments by replacing dashes with underscores, and matches the resulting name with known options - * Implements boost::program_options::ext_parser logic - */ - std::pair operator()(const std::string & token) const - { - if (!token.starts_with("--")) - return {}; - std::string arg = token.substr(2); - - // divide token by '=' to separate key and value if options style=long_allow_adjacent - auto pos_eq = arg.find('='); - std::string key = arg.substr(0, pos_eq); - - if (options_names.contains(key)) - // option does not require any changes, because it is already correct - return {}; - - std::replace(key.begin(), key.end(), '-', '_'); - if (!options_names.contains(key)) - // after replacing '-' with '_' argument is still unknown - return {}; - - std::string value; - if (pos_eq != std::string::npos && pos_eq < arg.size()) - value = arg.substr(pos_eq + 1); - - return {key, value}; - } - -private: - std::unordered_set options_names; -}; - -} - -/// Enable optimizations even in debug builds because otherwise options parsing becomes extremely slow affecting .sh tests -#if defined(__clang__) -#pragma clang optimize on -#endif -void ClientBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments) -{ - if (allow_repeated_settings) - addProgramOptionsAsMultitokens(cmd_settings, options_description.main_description.value()); - else - addProgramOptions(cmd_settings, options_description.main_description.value()); - - if (allow_merge_tree_settings) - { - /// Add merge tree settings manually, because names of some settings - /// may clash. Query settings have higher priority and we just - /// skip ambiguous merge tree settings. - auto & main_options = options_description.main_description.value(); - - std::unordered_set> main_option_names; - for (const auto & option : main_options.options()) - main_option_names.insert(option->long_name()); - - for (const auto & setting : cmd_merge_tree_settings.all()) - { - const auto add_setting = [&](const std::string_view name) - { - if (auto it = main_option_names.find(name); it != main_option_names.end()) - return; - - if (allow_repeated_settings) - addProgramOptionAsMultitoken(cmd_merge_tree_settings, main_options, name, setting); - else - addProgramOption(cmd_merge_tree_settings, main_options, name, setting); - }; - - const auto & setting_name = setting.getName(); - - add_setting(setting_name); - - const auto & settings_to_aliases = MergeTreeSettings::Traits::settingsToAliases(); - if (auto it = settings_to_aliases.find(setting_name); it != settings_to_aliases.end()) - { - for (const auto alias : it->second) - { - add_setting(alias); - } - } - } - } - - /// Parse main commandline options. - auto parser = po::command_line_parser(arguments) - .options(options_description.main_description.value()) - .extra_parser(OptionsAliasParser(options_description.main_description.value())) - .allow_unregistered(); - po::parsed_options parsed = parser.run(); - - /// Check unrecognized options without positional options. - auto unrecognized_options = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::exclude_positional); - if (!unrecognized_options.empty()) - { - auto hints = this->getHints(unrecognized_options[0]); - if (!hints.empty()) - throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'. Maybe you meant {}", - unrecognized_options[0], toString(hints)); - - throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'", unrecognized_options[0]); - } - - /// Check positional options. - for (const auto & op : parsed.options) - { - if (!op.unregistered && op.string_key.empty() && !op.original_tokens[0].starts_with("--") - && !op.original_tokens[0].empty() && !op.value.empty()) - { - /// Two special cases for better usability: - /// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1" - /// These are relevant for interactive usage - user-friendly, but questionable in general. - /// In case of ambiguity or for scripts, prefer using proper options. - - const auto & token = op.original_tokens[0]; - po::variable_value value(boost::any(op.value), false); - - const char * option; - if (token.contains(' ')) - option = "query"; - else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); - - if (!options.emplace(option, value).second) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); - } - } - - po::store(parsed, options); -} - - void ClientBase::init(int argc, char ** argv) { namespace po = boost::program_options; diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp new file mode 100644 index 00000000000..31614d301b6 --- /dev/null +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -0,0 +1,178 @@ +#include + +#include + +namespace DB +{ + +/** + * Program ptions parsing is very slow in debug builds and it affects .sh tests + * causing them to timeout sporadically. + * It seems impossible to enable optimizations for a single function (only to disable them), so + * instead we extract the code to a separate source file and compile it with different options. + */ + +/// +namespace ErrorCodes +{ + extern const int UNRECOGNIZED_ARGUMENTS; +} + +namespace +{ + +/// Define transparent hash to we can use +/// std::string_view with the containers +struct TransparentStringHash +{ + using is_transparent = void; + size_t operator()(std::string_view txt) const + { + return std::hash{}(txt); + } +}; + +/* + * This functor is used to parse command line arguments and replace dashes with underscores, + * allowing options to be specified using either dashes or underscores. + */ +class OptionsAliasParser +{ +public: + explicit OptionsAliasParser(const boost::program_options::options_description& options) + { + options_names.reserve(options.options().size()); + for (const auto& option : options.options()) + options_names.insert(option->long_name()); + } + + /* + * Parses arguments by replacing dashes with underscores, and matches the resulting name with known options + * Implements boost::program_options::ext_parser logic + */ + std::pair operator()(const std::string & token) const + { + if (!token.starts_with("--")) + return {}; + std::string arg = token.substr(2); + + // divide token by '=' to separate key and value if options style=long_allow_adjacent + auto pos_eq = arg.find('='); + std::string key = arg.substr(0, pos_eq); + + if (options_names.contains(key)) + // option does not require any changes, because it is already correct + return {}; + + std::replace(key.begin(), key.end(), '-', '_'); + if (!options_names.contains(key)) + // after replacing '-' with '_' argument is still unknown + return {}; + + std::string value; + if (pos_eq != std::string::npos && pos_eq < arg.size()) + value = arg.substr(pos_eq + 1); + + return {key, value}; + } + +private: + std::unordered_set options_names; +}; + +} + +void ClientBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments) +{ + if (allow_repeated_settings) + addProgramOptionsAsMultitokens(cmd_settings, options_description.main_description.value()); + else + addProgramOptions(cmd_settings, options_description.main_description.value()); + + if (allow_merge_tree_settings) + { + /// Add merge tree settings manually, because names of some settings + /// may clash. Query settings have higher priority and we just + /// skip ambiguous merge tree settings. + auto & main_options = options_description.main_description.value(); + + std::unordered_set> main_option_names; + for (const auto & option : main_options.options()) + main_option_names.insert(option->long_name()); + + for (const auto & setting : cmd_merge_tree_settings.all()) + { + const auto add_setting = [&](const std::string_view name) + { + if (auto it = main_option_names.find(name); it != main_option_names.end()) + return; + + if (allow_repeated_settings) + addProgramOptionAsMultitoken(cmd_merge_tree_settings, main_options, name, setting); + else + addProgramOption(cmd_merge_tree_settings, main_options, name, setting); + }; + + const auto & setting_name = setting.getName(); + + add_setting(setting_name); + + const auto & settings_to_aliases = MergeTreeSettings::Traits::settingsToAliases(); + if (auto it = settings_to_aliases.find(setting_name); it != settings_to_aliases.end()) + { + for (const auto alias : it->second) + { + add_setting(alias); + } + } + } + } + + /// Parse main commandline options. + auto parser = po::command_line_parser(arguments) + .options(options_description.main_description.value()) + .extra_parser(OptionsAliasParser(options_description.main_description.value())) + .allow_unregistered(); + po::parsed_options parsed = parser.run(); + + /// Check unrecognized options without positional options. + auto unrecognized_options = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::exclude_positional); + if (!unrecognized_options.empty()) + { + auto hints = this->getHints(unrecognized_options[0]); + if (!hints.empty()) + throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'. Maybe you meant {}", + unrecognized_options[0], toString(hints)); + + throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'", unrecognized_options[0]); + } + + /// Check positional options. + for (const auto & op : parsed.options) + { + if (!op.unregistered && op.string_key.empty() && !op.original_tokens[0].starts_with("--") + && !op.original_tokens[0].empty() && !op.value.empty()) + { + /// Two special cases for better usability: + /// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1" + /// These are relevant for interactive usage - user-friendly, but questionable in general. + /// In case of ambiguity or for scripts, prefer using proper options. + + const auto & token = op.original_tokens[0]; + po::variable_value value(boost::any(op.value), false); + + const char * option; + if (token.contains(' ')) + option = "query"; + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); + + if (!options.emplace(option, value).second) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); + } + } + + po::store(parsed, options); +} + +} From 42384af0ef38dd326337e8cf18327871924f7359 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Thu, 25 Jul 2024 15:42:50 +0000 Subject: [PATCH 0979/2361] Fix crash when the connection is empty --- src/Storages/Distributed/DistributedAsyncInsertBatch.cpp | 6 ++++++ .../Distributed/DistributedAsyncInsertDirectoryQueue.cpp | 3 +++ 2 files changed, 9 insertions(+) diff --git a/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp b/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp index e1facec5b40..31779a32c1f 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp @@ -245,6 +245,9 @@ void DistributedAsyncInsertBatch::sendBatch(const SettingsChanges & settings_cha connection = std::move(result.front().entry); compression_expected = connection->getCompression() == Protocol::Compression::Enable; + if (connection.isNull()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty connection"); + LOG_DEBUG(parent.log, "Sending a batch of {} files to {} ({} rows, {} bytes).", files.size(), connection->getDescription(), @@ -303,6 +306,9 @@ void DistributedAsyncInsertBatch::sendSeparateFiles(const SettingsChanges & sett auto connection = std::move(result.front().entry); bool compression_expected = connection->getCompression() == Protocol::Compression::Enable; + if (connection.isNull()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty connection"); + RemoteInserter remote(*connection, timeouts, distributed_header.insert_query, insert_settings, diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp index d471c67553d..15998776d27 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp @@ -415,6 +415,9 @@ void DistributedAsyncInsertDirectoryQueue::processFile(std::string & file_path, auto result = pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName()); auto connection = std::move(result.front().entry); + if (connection.isNull()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty connection"); + LOG_DEBUG(log, "Sending `{}` to {} ({} rows, {} bytes)", file_path, connection->getDescription(), From 031b435e3ad35a57d82ff98ad4e6f79d47d1cbc3 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 26 Jul 2024 15:21:11 +0200 Subject: [PATCH 0980/2361] Style --- src/Client/ClientBaseOptimizedParts.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp index 31614d301b6..421843a0e79 100644 --- a/src/Client/ClientBaseOptimizedParts.cpp +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -1,20 +1,18 @@ #include - #include namespace DB { /** - * Program ptions parsing is very slow in debug builds and it affects .sh tests + * Program options parsing is very slow in debug builds and it affects .sh tests * causing them to timeout sporadically. * It seems impossible to enable optimizations for a single function (only to disable them), so * instead we extract the code to a separate source file and compile it with different options. */ - -/// namespace ErrorCodes { + extern const int BAD_ARGUMENTS; extern const int UNRECOGNIZED_ARGUMENTS; } From 414ebf035d9e2f47c16ee93d7ff0d21fbee89bff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 26 Jul 2024 15:32:05 +0200 Subject: [PATCH 0981/2361] Fix error --- src/IO/ReadWriteBufferFromHTTP.cpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index a62f22d4bd9..4b2e6580f9b 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -140,6 +140,10 @@ std::optional ReadWriteBufferFromHTTP::tryGetFileSize() { return std::nullopt; } + catch (const Poco::IOException &) + { + return std::nullopt; + } } return file_info->file_size; @@ -324,12 +328,12 @@ void ReadWriteBufferFromHTTP::doWithRetries(std::function && callable, error_message = e.displayText(); exception = std::current_exception(); } - catch (DB::NetException & e) + catch (NetException & e) { error_message = e.displayText(); exception = std::current_exception(); } - catch (DB::HTTPException & e) + catch (HTTPException & e) { if (!isRetriableError(e.getHTTPStatus())) is_retriable = false; @@ -337,7 +341,7 @@ void ReadWriteBufferFromHTTP::doWithRetries(std::function && callable, error_message = e.displayText(); exception = std::current_exception(); } - catch (DB::Exception & e) + catch (Exception & e) { is_retriable = false; @@ -708,6 +712,10 @@ std::optional ReadWriteBufferFromHTTP::tryGetLastModificationTime() { return std::nullopt; } + catch (const Poco::IOException &) + { + return std::nullopt; + } } return file_info->last_modified; From 981135bfb104b5ecfa0f1da5533e3d12f6850838 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 26 Jul 2024 13:38:42 +0000 Subject: [PATCH 0982/2361] Fix style check --- src/Storages/Distributed/DistributedAsyncInsertBatch.cpp | 5 +++-- .../Distributed/DistributedAsyncInsertDirectoryQueue.cpp | 2 +- src/Storages/Distributed/DistributedSink.cpp | 2 ++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp b/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp index 31779a32c1f..5e7b4b979c7 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp @@ -28,6 +28,7 @@ namespace ErrorCodes extern const int TOO_MANY_PARTITIONS; extern const int DISTRIBUTED_TOO_MANY_PENDING_BYTES; extern const int ARGUMENT_OUT_OF_BOUND; + extern const int LOGICAL_ERROR; } /// Can the batch be split and send files from batch one-by-one instead? @@ -246,7 +247,7 @@ void DistributedAsyncInsertBatch::sendBatch(const SettingsChanges & settings_cha compression_expected = connection->getCompression() == Protocol::Compression::Enable; if (connection.isNull()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty connection"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty connection"); LOG_DEBUG(parent.log, "Sending a batch of {} files to {} ({} rows, {} bytes).", files.size(), @@ -307,7 +308,7 @@ void DistributedAsyncInsertBatch::sendSeparateFiles(const SettingsChanges & sett bool compression_expected = connection->getCompression() == Protocol::Compression::Enable; if (connection.isNull()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty connection"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty connection"); RemoteInserter remote(*connection, timeouts, distributed_header.insert_query, diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp index 15998776d27..2bb0e720c72 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp @@ -416,7 +416,7 @@ void DistributedAsyncInsertDirectoryQueue::processFile(std::string & file_path, auto connection = std::move(result.front().entry); if (connection.isNull()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty connection"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty connection"); LOG_DEBUG(log, "Sending `{}` to {} ({} rows, {} bytes)", file_path, diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index b2ce62caf0a..f8bbc081e55 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -378,6 +378,8 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si /// (anyway fallback_to_stale_replicas_for_distributed_queries=true by default) auto results = shard_info.pool->getManyCheckedForInsert(timeouts, settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName()); job.connection_entry = std::move(results.front().entry); + if (job.connection_entry.isNull()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got empty connection"); } else { From 503dc25d1021eb1b598ac52efc0370cfd15c57c6 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 26 Jul 2024 15:53:03 +0200 Subject: [PATCH 0983/2361] Fix linking --- programs/odbc-bridge/tests/CMakeLists.txt | 2 +- src/CMakeLists.txt | 1 - src/Common/Exception.cpp | 6 +++--- src/Common/Logger.cpp | 12 ++++++++++++ src/Common/Logger.h | 4 ++++ src/Daemon/BaseDaemon.cpp | 2 +- src/Loggers/OwnSplitChannel.cpp | 12 ------------ src/Loggers/OwnSplitChannel.h | 4 ---- 8 files changed, 21 insertions(+), 22 deletions(-) diff --git a/programs/odbc-bridge/tests/CMakeLists.txt b/programs/odbc-bridge/tests/CMakeLists.txt index 2f63aed7942..f1411dbb554 100644 --- a/programs/odbc-bridge/tests/CMakeLists.txt +++ b/programs/odbc-bridge/tests/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp ../validateODBCConnectionString.cpp) -target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io clickhouse_common_config loggers_no_text_log) +target_link_libraries (validate-odbc-connection-string PRIVATE clickhouse_common_io clickhouse_common_config) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fede7d69105..0f84dd35320 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -638,7 +638,6 @@ if (ENABLE_TESTS) dbms clickhouse_common_config clickhouse_common_zookeeper - loggers hilite_comparator) if (TARGET ch_contrib::simdjson) diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index c4bd4fbd943..d68537513da 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -3,12 +3,12 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include #include @@ -253,7 +253,7 @@ void Exception::setThreadFramePointers(ThreadFramePointersBase frame_pointers) static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message) { - if (!OwnSplitChannel::isLoggingEnabled()) + if (!isLoggingEnabled()) return; try @@ -271,7 +271,7 @@ static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string void tryLogCurrentException(const char * log_name, const std::string & start_of_message) { - if (!OwnSplitChannel::isLoggingEnabled()) + if (!isLoggingEnabled()) return; /// Under high memory pressure, new allocations throw a diff --git a/src/Common/Logger.cpp b/src/Common/Logger.cpp index c8d557bc3a3..bd848abe353 100644 --- a/src/Common/Logger.cpp +++ b/src/Common/Logger.cpp @@ -25,3 +25,15 @@ bool hasLogger(const std::string & name) { return Poco::Logger::has(name); } + +static constinit std::atomic allow_logging{true}; + +bool isLoggingEnabled() +{ + return allow_logging; +} + +void disableLogging() +{ + allow_logging = false; +} diff --git a/src/Common/Logger.h b/src/Common/Logger.h index b54ccd33e72..7471e3dff9b 100644 --- a/src/Common/Logger.h +++ b/src/Common/Logger.h @@ -64,3 +64,7 @@ LoggerRawPtr createRawLogger(const std::string & name, Poco::Channel * channel, * Otherwise, returns false. */ bool hasLogger(const std::string & name); + +void disableLogging(); + +bool isLoggingEnabled(); diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 366aad00376..e7ae8ea5a1d 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -158,7 +158,7 @@ BaseDaemon::~BaseDaemon() tryLogCurrentException(&logger()); } - OwnSplitChannel::disableLogging(); + disableLogging(); } diff --git a/src/Loggers/OwnSplitChannel.cpp b/src/Loggers/OwnSplitChannel.cpp index e29d2a1e0aa..c1594361b2c 100644 --- a/src/Loggers/OwnSplitChannel.cpp +++ b/src/Loggers/OwnSplitChannel.cpp @@ -16,18 +16,6 @@ namespace DB { -static constinit std::atomic allow_logging{true}; - -bool OwnSplitChannel::isLoggingEnabled() -{ - return allow_logging; -} - -void OwnSplitChannel::disableLogging() -{ - allow_logging = false; -} - void OwnSplitChannel::log(const Poco::Message & msg) { if (!isLoggingEnabled()) diff --git a/src/Loggers/OwnSplitChannel.h b/src/Loggers/OwnSplitChannel.h index 9de55f330be..88bb6b9ce76 100644 --- a/src/Loggers/OwnSplitChannel.h +++ b/src/Loggers/OwnSplitChannel.h @@ -39,10 +39,6 @@ public: void setLevel(const std::string & name, int level); - static void disableLogging(); - - static bool isLoggingEnabled(); - private: void logSplit(const Poco::Message & msg); void tryLogSplit(const Poco::Message & msg); From 1e12ac577a4ed4f64d4de4feb8110cd794d4ce90 Mon Sep 17 00:00:00 2001 From: serxa Date: Fri, 26 Jul 2024 14:26:37 +0000 Subject: [PATCH 0984/2361] Fix flaky `test_pkill_query_log` (tsan) --- tests/integration/test_crash_log/test.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/integration/test_crash_log/test.py b/tests/integration/test_crash_log/test.py index a5b82039a84..5a63e6ca6a7 100644 --- a/tests/integration/test_crash_log/test.py +++ b/tests/integration/test_crash_log/test.py @@ -60,6 +60,13 @@ def test_pkill(started_node): def test_pkill_query_log(started_node): + if ( + started_node.is_built_with_thread_sanitizer() + or started_node.is_built_with_address_sanitizer() + or started_node.is_built_with_memory_sanitizer() + ): + pytest.skip("doesn't fit in timeouts for stacktrace generation") + for signal in ["SEGV", "4"]: # force create query_log if it was not created started_node.query("SYSTEM FLUSH LOGS") From eeb012357196db988b01a70b33798fce99bb5deb Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 26 Jul 2024 14:48:52 +0000 Subject: [PATCH 0985/2361] fix filling of multilevel Nested --- src/Interpreters/inplaceBlockConversions.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp index ce3f25d16f8..945cc62754d 100644 --- a/src/Interpreters/inplaceBlockConversions.cpp +++ b/src/Interpreters/inplaceBlockConversions.cpp @@ -316,7 +316,7 @@ static String removeTupleElementsFromSubcolumn(String subcolumn_name, const Name { auto pos = subcolumn_name.find(elem + "."); if (pos != std::string::npos) - subcolumn_name.erase(pos, elem.size()); + subcolumn_name.erase(pos, elem.size() + 1); } if (subcolumn_name.ends_with(".")) @@ -395,7 +395,6 @@ void fillMissingColumns( if (!current_offsets.empty()) { - Names tuple_elements; auto serialization = IDataType::getSerialization(*requested_column); From d8318fc428e2f5b847415886782fd8e25bca401b Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Fri, 26 Jul 2024 17:09:22 +0200 Subject: [PATCH 0986/2361] Wrap in retries --- ...1676_clickhouse_client_autocomplete.python | 38 +++++++++++++++---- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python b/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python index 0f35d259c7c..fe08a07c214 100644 --- a/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python +++ b/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python @@ -10,18 +10,36 @@ DEBUG_LOG = os.path.join( os.path.basename(os.path.abspath(__file__)).strip(".python") + ".debuglog", ) +STATE_MAP = { + -1: "process did not start", + 0: "completion was found", + 1: "process started and said ':)'", + 2: "completion search was started", + 3: "completion is missing", +} + def run_with_timeout(func, args, timeout): - process = multiprocessing.Process(target=func, args=args) - process.start() - process.join(timeout) + for _ in range(5): + state = multiprocessing.Value("i", -1) + process = multiprocessing.Process(target=func, args=args, kwargs={"state": state}) + process.start() + process.join(timeout) - if process.is_alive(): - process.terminate() - print("Timeout") + if state.value in (0, 3): + return + + if process.is_alive(): + process.terminate() + + if state.value == -1: + continue + + print(f"Timeout, state: {STATE_MAP[state.value]}") + return -def test_completion(program, argv, comp_word): +def test_completion(program, argv, comp_word, state=None): comp_begin = comp_word[:-3] shell_pid, master = pty.fork() @@ -41,6 +59,8 @@ def test_completion(program, argv, comp_word): debug_log_fd.write(repr(output_b) + "\n") debug_log_fd.flush() + state.value = 1 + os.write(master, b"SET " + bytes(comp_begin.encode())) output_b = os.read(master, 4096) output = output_b.decode() @@ -55,6 +75,8 @@ def test_completion(program, argv, comp_word): time.sleep(0.01) os.write(master, b"\t") + state.value = 2 + output_b = os.read(master, 4096) output = output_b.decode() debug_log_fd.write(repr(output_b) + "\n") @@ -65,6 +87,7 @@ def test_completion(program, argv, comp_word): # meaning no concise completion is found if "\x07" in output: print(f"{comp_word}: FAIL") + state.value = 3 return output_b = os.read(master, 4096) @@ -73,6 +96,7 @@ def test_completion(program, argv, comp_word): debug_log_fd.flush() print(f"{comp_word}: OK") + state.value = 0 finally: os.close(master) debug_log_fd.close() From ff5cd2051fc8bfd609a9040ffba02697283e69af Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Fri, 26 Jul 2024 17:10:39 +0200 Subject: [PATCH 0987/2361] squash! added somme tests in relation with https://github.com/ClickHouse/ClickHouse/pull/54881 with new behaviour when enable_named_columns_in_function_tuple=1 (default value) --- tests/queries/0_stateless/00307_format_xml.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00307_format_xml.sql b/tests/queries/0_stateless/00307_format_xml.sql index 22566112bc7..a7e0e628945 100644 --- a/tests/queries/0_stateless/00307_format_xml.sql +++ b/tests/queries/0_stateless/00307_format_xml.sql @@ -2,4 +2,4 @@ SET output_format_write_statistics = 0; SELECT 'unnamed columns in tuple'; SELECT 'Hello & world' AS s, 'Hello\n', toDateTime('2001-02-03 04:05:06') AS time, arrayMap(x -> toString(x), range(10)) AS arr, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML; SELECT 'named columns in tuple'; -SELECT 'Hello & world' AS s, toDateTime('2001-02-03 04:05:06') AS time, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML;` +SELECT 'Hello & world' AS s, toDateTime('2001-02-03 04:05:06') AS time, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML; From 7b4951990edc08a1230c3569339c44ac22036eed Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 26 Jul 2024 15:19:47 +0000 Subject: [PATCH 0988/2361] better place to check and add more test --- src/Interpreters/InterpreterCreateQuery.cpp | 24 -------------- src/Storages/StorageFactory.cpp | 28 ++++++++++++++++ ...206_projection_merge_special_mergetree.sql | 32 +++++++++++++++++++ 3 files changed, 60 insertions(+), 24 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index a5f374ba71c..ea10ad59db4 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1281,30 +1281,6 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) /// Set and retrieve list of columns, indices and constraints. Set table engine if needed. Rewrite query in canonical way. TableProperties properties = getTablePropertiesAndNormalizeCreateQuery(create, mode); - /// Projection is only supported in (Replictaed)MergeTree. - if (create.storage && create.storage->engine) - { - if (std::string_view engine_name(create.storage->engine->name); - !properties.projections.empty() && engine_name != "MergeTree" && engine_name != "ReplicatedMergeTree") - { - bool projection_support = false; - if (auto * setting = create.storage->settings; setting != nullptr) - { - for (const auto & change : setting->changes) - { - if (change.name == "deduplicate_merge_projection_mode" && change.value != Field("throw")) - { - projection_support = true; - break; - } - } - } - if (!projection_support) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Projection is only supported in (Replictaed)MergeTree. Consider drop or rebuild option of deduplicate_merge_projection_mode."); - } - } - /// Check type compatible for materialized dest table and select columns if (create.select && create.is_materialized_view && create.to_table_id && mode <= LoadingStrictnessLevel::CREATE) { diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 060b271d8f4..71f70a807a8 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -20,6 +20,7 @@ namespace ErrorCodes extern const int FUNCTION_CANNOT_HAVE_PARAMETERS; extern const int BAD_ARGUMENTS; extern const int DATA_TYPE_CANNOT_BE_USED_IN_TABLES; + extern const int NOT_IMPLEMENTED; } @@ -196,9 +197,36 @@ StoragePtr StorageFactory::get( [](StorageFeatures features) { return features.supports_skipping_indices; }); if (query.columns_list && query.columns_list->projections && !query.columns_list->projections->children.empty()) + { check_feature( "projections", [](StorageFeatures features) { return features.supports_projections; }); + + /// Now let's handle the merge tree family, projection is fully supported in (Replictaed)MergeTree, + /// but also allowed in non-throw mode with other mergetree family members. + chassert(query.storage->engine); + if (std::string_view engine_name(query.storage->engine->name); + engine_name != "MergeTree" && engine_name != "ReplicatedMergeTree") + { + /// default throw mode in deduplicate_merge_projection_mode + bool projection_allowed = false; + if (auto * setting = query.storage->settings; setting != nullptr) + { + for (const auto & change : setting->changes) + { + if (change.name == "deduplicate_merge_projection_mode" && change.value != Field("throw")) + { + projection_allowed = true; + break; + } + } + } + if (!projection_allowed) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Projection is fully supported in (Replictaed)MergeTree, but also allowed in non-throw mode with other" + " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode."); + } + } } } diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql index 749f906569e..25517fbba30 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -1,5 +1,37 @@ DROP TABLE IF EXISTS tp; +-- test regular merge tree +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = MergeTree order by type; + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE tp; + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = MergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'drop'; + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); + +OPTIMIZE TABLE tp DEDUPLICATE; + +ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE tp; + + +-- test irregular merge tree CREATE TABLE tp ( type Int32, eventcnt UInt64, From a59036e5152aac2d44b07e0f62ab0ae1a066bb5b Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 26 Jul 2024 15:36:15 +0000 Subject: [PATCH 0989/2361] chmod +x ./tests/queries/0_stateless/03204_format_join_on.sh --- tests/queries/0_stateless/03204_format_join_on.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tests/queries/0_stateless/03204_format_join_on.sh diff --git a/tests/queries/0_stateless/03204_format_join_on.sh b/tests/queries/0_stateless/03204_format_join_on.sh old mode 100644 new mode 100755 From d42fa0690d1b6ec19755b64740d83327e71a914a Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 26 Jul 2024 15:59:23 +0000 Subject: [PATCH 0990/2361] Remove filterBlockWithDAG. --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp | 2 +- src/Storages/System/StorageSystemDroppedTablesParts.cpp | 2 +- src/Storages/System/StorageSystemPartsBase.cpp | 4 ++-- src/Storages/System/StorageSystemTables.cpp | 2 +- src/Storages/VirtualColumnUtils.cpp | 7 +++---- src/Storages/VirtualColumnUtils.h | 2 +- 7 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ecd25e3cf71..d9ab2894dc4 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1164,7 +1164,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( if (valid) { virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, parts); - VirtualColumnUtils::filterBlockWithDAG(std::move(*filter_dag), virtual_columns_block, local_context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*filter_dag), local_context), virtual_columns_block); part_values = VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_part"); if (part_values.empty()) return 0; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index a37dbfa554c..a6ef0063069 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -488,7 +488,7 @@ std::optional> MergeTreeDataSelectExecutor::filterPar return {}; auto virtual_columns_block = data.getBlockWithVirtualsForFilter(metadata_snapshot, parts); - VirtualColumnUtils::filterBlockWithDAG(std::move(*dag), virtual_columns_block, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*dag), context), virtual_columns_block); return VirtualColumnUtils::extractSingleValueFromBlock(virtual_columns_block, "_part"); } diff --git a/src/Storages/System/StorageSystemDroppedTablesParts.cpp b/src/Storages/System/StorageSystemDroppedTablesParts.cpp index defc4ec2d2a..c2601b8ebe3 100644 --- a/src/Storages/System/StorageSystemDroppedTablesParts.cpp +++ b/src/Storages/System/StorageSystemDroppedTablesParts.cpp @@ -75,7 +75,7 @@ StoragesDroppedInfoStream::StoragesDroppedInfoStream(std::optional f { /// Filter block_to_filter with columns 'database', 'table', 'engine', 'active'. if (filter) - VirtualColumnUtils::filterBlockWithDAG(std::move(*filter), block_to_filter, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*filter), context), block_to_filter); rows = block_to_filter.rows(); } diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index a0c9a5c61bd..7ace8ee24aa 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -124,7 +124,7 @@ StoragesInfoStream::StoragesInfoStream(std::optional filter_by_datab /// Filter block_to_filter with column 'database'. if (filter_by_database) - VirtualColumnUtils::filterBlockWithDAG(std::move(*filter_by_database), block_to_filter, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*filter_by_database), context), block_to_filter); rows = block_to_filter.rows(); /// Block contains new columns, update database_column. @@ -204,7 +204,7 @@ StoragesInfoStream::StoragesInfoStream(std::optional filter_by_datab { /// Filter block_to_filter with columns 'database', 'table', 'engine', 'active'. if (filter_by_other_columns) - VirtualColumnUtils::filterBlockWithDAG(std::move(*filter_by_other_columns), block_to_filter, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*filter_by_other_columns), context), block_to_filter); rows = block_to_filter.rows(); } diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 9ae21ded9ba..943ce9c317a 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -125,7 +125,7 @@ ColumnPtr getFilteredTables( block.insert(ColumnWithTypeAndName(std::move(engine_column), std::make_shared(), "engine")); if (dag) - VirtualColumnUtils::filterBlockWithDAG(std::move(*dag), block, context); + VirtualColumnUtils::filterBlockWithExpression(VirtualColumnUtils::buildFilterExpression(std::move(*dag), context), block); return block.getByPosition(0).column; } diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index a25b7b5ca49..90c2c7f93c1 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -77,11 +77,10 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context) } } -void filterBlockWithDAG(ActionsDAG dag, Block & block, ContextPtr context) +ExpressionActionsPtr buildFilterExpression(ActionsDAG dag, ContextPtr context) { buildSetsForDAG(dag, context); - auto actions = std::make_shared(std::move(dag)); - filterBlockWithExpression(actions, block); + return std::make_shared(std::move(dag)); } void filterBlockWithExpression(const ExpressionActionsPtr & actions, Block & block) @@ -384,7 +383,7 @@ void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, { auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_non_deterministic_functions=*/ false); if (dag) - filterBlockWithDAG(std::move(*dag), block, context); + filterBlockWithExpression(buildFilterExpression(std::move(*dag), context), block); } } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 640f9db2fb8..73b7908b75c 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -23,7 +23,7 @@ namespace VirtualColumnUtils void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context); /// Just filters block. Block should contain all the required columns. -void filterBlockWithDAG(ActionsDAG dag, Block & block, ContextPtr context); +ExpressionActionsPtr buildFilterExpression(ActionsDAG dag, ContextPtr context); void filterBlockWithExpression(const ExpressionActionsPtr & actions, Block & block); /// Builds sets used by ActionsDAG inplace. From 454353215736a4c6da635e777b571be0f1bd1831 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 26 Jul 2024 18:33:48 +0200 Subject: [PATCH 0991/2361] Fix ShellCheck --- .../0_stateless/03203_client_benchmark_options.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/03203_client_benchmark_options.sh b/tests/queries/0_stateless/03203_client_benchmark_options.sh index 475309cebb9..cbbd8aab382 100755 --- a/tests/queries/0_stateless/03203_client_benchmark_options.sh +++ b/tests/queries/0_stateless/03203_client_benchmark_options.sh @@ -5,22 +5,22 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh output=$(${CLICKHOUSE_CLIENT} -t -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1) -echo "$output" | grep -q "^2\." && echo "Ok" || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^2\." && echo "Ok"; } || { echo "Fail"; echo "$output"; } output=$(${CLICKHOUSE_CLIENT} --time -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1) -echo "$output" | grep -q "^2\." && echo "Ok" || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^2\." && echo "Ok"; } || { echo "Fail"; echo "$output"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) -echo "$output" | grep -q "^[0-9]\+$" && echo "Ok" || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^[0-9]\+$" && echo "Ok"; } || { echo "Fail"; echo "$output"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage=none -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) echo -n "$output" # expected no output output=$(${CLICKHOUSE_CLIENT} --memory-usage=default -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) -echo "$output" | grep -q "^[0-9]\+$" && echo "Ok" || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^[0-9]\+$" && echo "Ok"; } || { echo "Fail"; echo "$output"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage=readable -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) -echo "$output" | grep -q "^[0-9].*B$" && echo "Ok" || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^[0-9].*B$" && echo "Ok"; } || { echo "Fail"; echo "$output"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage=unknown -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) -echo "$output" | grep -q "BAD_ARGUMENTS" && echo "Ok" || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "BAD_ARGUMENTS" && echo "Ok"; } || { echo "Fail"; echo "$output"; } From d153a1cf93e157acb7fadb5ca8b4f30fbd08bad5 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 26 Jul 2024 18:37:30 +0200 Subject: [PATCH 0992/2361] add quotes --- .../0_stateless/03203_client_benchmark_options.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/03203_client_benchmark_options.sh b/tests/queries/0_stateless/03203_client_benchmark_options.sh index cbbd8aab382..37a1f2cd3ac 100755 --- a/tests/queries/0_stateless/03203_client_benchmark_options.sh +++ b/tests/queries/0_stateless/03203_client_benchmark_options.sh @@ -5,22 +5,22 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh output=$(${CLICKHOUSE_CLIENT} -t -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1) -{ echo "$output" | grep -q "^2\." && echo "Ok"; } || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^2\." && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } output=$(${CLICKHOUSE_CLIENT} --time -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1) -{ echo "$output" | grep -q "^2\." && echo "Ok"; } || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^2\." && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) -{ echo "$output" | grep -q "^[0-9]\+$" && echo "Ok"; } || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^[0-9]\+$" && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage=none -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) echo -n "$output" # expected no output output=$(${CLICKHOUSE_CLIENT} --memory-usage=default -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) -{ echo "$output" | grep -q "^[0-9]\+$" && echo "Ok"; } || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^[0-9]\+$" && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage=readable -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) -{ echo "$output" | grep -q "^[0-9].*B$" && echo "Ok"; } || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "^[0-9].*B$" && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage=unknown -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) -{ echo "$output" | grep -q "BAD_ARGUMENTS" && echo "Ok"; } || { echo "Fail"; echo "$output"; } +{ echo "$output" | grep -q "BAD_ARGUMENTS" && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } From f276be829bebd8e704e33565127034f3e258cc31 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 26 Jul 2024 16:59:41 +0000 Subject: [PATCH 0993/2361] Automatic style fix --- .../0_stateless/01676_clickhouse_client_autocomplete.python | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python b/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python index fe08a07c214..f363cb64018 100644 --- a/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python +++ b/tests/queries/0_stateless/01676_clickhouse_client_autocomplete.python @@ -22,7 +22,9 @@ STATE_MAP = { def run_with_timeout(func, args, timeout): for _ in range(5): state = multiprocessing.Value("i", -1) - process = multiprocessing.Process(target=func, args=args, kwargs={"state": state}) + process = multiprocessing.Process( + target=func, args=args, kwargs={"state": state} + ) process.start() process.join(timeout) From 343f1fa4bae219f7c287cb314ed6e04feb9a0de4 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 26 Jul 2024 17:42:06 +0000 Subject: [PATCH 0994/2361] Check type after optimize_rewrite_aggregate_function_with_if. --- .../RewriteAggregateFunctionWithIfPass.cpp | 32 ++++++++++++++++--- src/Analyzer/Resolve/QueryAnalyzer.cpp | 4 ++- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp b/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp index c1adf05ac76..a48e88132a6 100644 --- a/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp +++ b/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -42,7 +43,7 @@ public: if (lower_name.ends_with("if")) return; - auto & function_arguments_nodes = function_node->getArguments().getNodes(); + const auto & function_arguments_nodes = function_node->getArguments().getNodes(); if (function_arguments_nodes.size() != 1) return; @@ -50,6 +51,8 @@ public: if (!if_node || if_node->getFunctionName() != "if") return; + FunctionNodePtr replaced_node; + auto if_arguments_nodes = if_node->getArguments().getNodes(); auto * first_const_node = if_arguments_nodes[1]->as(); auto * second_const_node = if_arguments_nodes[2]->as(); @@ -75,8 +78,11 @@ public: new_arguments[0] = std::move(if_arguments_nodes[1]); new_arguments[1] = std::move(if_arguments_nodes[0]); - function_arguments_nodes = std::move(new_arguments); - resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If"); + + replaced_node = std::make_shared(function_node->getFunctionName() + "If"); + replaced_node->getArguments().getNodes() = std::move(new_arguments); + replaced_node->getParameters().getNodes() = function_node->getParameters().getNodes(); + resolveAggregateFunctionNodeByName(*replaced_node, replaced_node->getFunctionName()); } } else if (first_const_node) @@ -104,10 +110,26 @@ public: FunctionFactory::instance().get("not", getContext())->build(not_function->getArgumentColumns())); new_arguments[1] = std::move(not_function); - function_arguments_nodes = std::move(new_arguments); - resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If"); + replaced_node = std::make_shared(function_node->getFunctionName() + "If"); + replaced_node->getArguments().getNodes() = std::move(new_arguments); + replaced_node->getParameters().getNodes() = function_node->getParameters().getNodes(); + resolveAggregateFunctionNodeByName(*replaced_node, replaced_node->getFunctionName()); } } + + if (!replaced_node) + return; + + auto prev_type = function_node->getResultType(); + auto curr_type = replaced_node->getResultType(); + if (!prev_type->equals(*curr_type)) + return; + + /// Just in case, CAST compatible aggregate function states. + if (WhichDataType(prev_type).isAggregateFunction() && !DataTypeAggregateFunction::strictEquals(prev_type, curr_type)) + node = createCastFunction(std::move(replaced_node), prev_type, getContext()); + else + node = std::move(replaced_node); } }; diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index b1fe2554988..b1603bb18dd 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -3239,11 +3239,13 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi auto action = function_node_ptr->getNullsAction(); std::string aggregate_function_name = rewriteAggregateFunctionNameIfNeeded(function_name, action, scope.context); + std::cerr << "==================== " << function_name << " -> " << aggregate_function_name << std::endl; + AggregateFunctionProperties properties; auto aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name, action, argument_types, parameters, properties); - + std::cerr << aggregate_function->getName() << ' ' << aggregate_function->getResultType()->getName() << std::endl; function_node.resolveAsAggregateFunction(std::move(aggregate_function)); return result_projection_names; From 4833b46a1a86bb1847d2520ea12ea4650c497abc Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 26 Jul 2024 17:43:30 +0000 Subject: [PATCH 0995/2361] Remove debug code --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index b1603bb18dd..b1fe2554988 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -3239,13 +3239,11 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi auto action = function_node_ptr->getNullsAction(); std::string aggregate_function_name = rewriteAggregateFunctionNameIfNeeded(function_name, action, scope.context); - std::cerr << "==================== " << function_name << " -> " << aggregate_function_name << std::endl; - AggregateFunctionProperties properties; auto aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name, action, argument_types, parameters, properties); - std::cerr << aggregate_function->getName() << ' ' << aggregate_function->getResultType()->getName() << std::endl; + function_node.resolveAsAggregateFunction(std::move(aggregate_function)); return result_projection_names; From 1ba44252cd20ab660d374970257a1ceb438236dd Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 26 Jul 2024 18:33:50 +0000 Subject: [PATCH 0996/2361] turn sql to bash --- ...uery_views_log_background_thread.reference | 25 +----------- ...02572_query_views_log_background_thread.sh | 38 ++++++++++++++++++ ...2572_query_views_log_background_thread.sql | 40 ------------------- 3 files changed, 40 insertions(+), 63 deletions(-) create mode 100755 tests/queries/0_stateless/02572_query_views_log_background_thread.sh delete mode 100644 tests/queries/0_stateless/02572_query_views_log_background_thread.sql diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.reference b/tests/queries/0_stateless/02572_query_views_log_background_thread.reference index f867fd0d085..d7f2272f5b4 100644 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.reference +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.reference @@ -1,25 +1,4 @@ --- { echoOn } -insert into buffer_02572 values (1); --- ensure that the flush was not direct -select * from buffer_02572; +OK +1 1 -select * from data_02572; -select * from copy_02572; --- we cannot use OPTIMIZE, this will attach query context, so let's wait -SET function_sleep_max_microseconds_per_block = 6000000; -select sleepEachRow(1) from numbers(3*2) format Null; -select sleepEachRow(1) from numbers(3*2) format Null; -system flush logs; -select count() > 0, lower(status::String), errorCodeToName(exception_code) - from system.query_views_log where - view_name = concatWithSeparator('.', currentDatabase(), 'mv_02572') and - view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') - group by 2, 3 -; 1 queryfinish OK -select * from buffer_02572; -1 -select * from data_02572; -1 -select * from copy_02572; -1 diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.sh b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh new file mode 100755 index 00000000000..a3e428e75c8 --- /dev/null +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# INSERT buffer_02572 -> data_02572 -> copy_02572 +# ^^ +# push to system.query_views_log + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "drop table if exists buffer_02572; + drop table if exists data_02572; drop table if exists copy_02572; drop table if exists mv_02572;" + +${CLICKHOUSE_CLIENT} --query="create table copy_02572 (key Int) engine=Memory();" +${CLICKHOUSE_CLIENT} --query="create table data_02572 (key Int) engine=Memory();" +${CLICKHOUSE_CLIENT} --query="create table buffer_02572 (key Int) engine=Buffer(currentDatabase(), data_02572, 1, 3, 3, 1, 1e9, 1, 1e9);" +${CLICKHOUSE_CLIENT} --query="create materialized view mv_02572 to copy_02572 as select * from data_02572;" + +${CLICKHOUSE_CLIENT} --query="insert into buffer_02572 values (1);" + +# ensure that the flush was not direct +${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" + +# we cannot use OPTIMIZE, this will attach query context, so let's wait +for _ in {1..100}; do + $CLICKHOUSE_CLIENT -q "select * from data_02572;" | grep -q "1" && echo 'OK' && break + sleep 0.5 +done + + +${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" + +${CLICKHOUSE_CLIENT} --query="system flush logs;" +${CLICKHOUSE_CLIENT} --query="select count() > 0, lower(status::String), errorCodeToName(exception_code) + from system.query_views_log where + view_name = concatWithSeparator('.', currentDatabase(), 'mv_02572') and + view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') + group by 2, 3;" \ No newline at end of file diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.sql b/tests/queries/0_stateless/02572_query_views_log_background_thread.sql deleted file mode 100644 index 2e9a62b71da..00000000000 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.sql +++ /dev/null @@ -1,40 +0,0 @@ --- INSERT buffer_02572 -> data_02572 -> copy_02572 --- ^^ --- push to system.query_views_log - -drop table if exists buffer_02572; -drop table if exists data_02572; -drop table if exists copy_02572; -drop table if exists mv_02572; - -create table copy_02572 (key Int) engine=Memory(); -create table data_02572 (key Int) engine=Memory(); -create table buffer_02572 (key Int) engine=Buffer(currentDatabase(), data_02572, 1, - /* never direct flush for flush from background thread */ - /* min_time= */ 3, 3, - 1, 1e9, - 1, 1e9); -create materialized view mv_02572 to copy_02572 as select * from data_02572; - --- { echoOn } -insert into buffer_02572 values (1); --- ensure that the flush was not direct -select * from buffer_02572; -select * from data_02572; -select * from copy_02572; --- we cannot use OPTIMIZE, this will attach query context, so let's wait -SET function_sleep_max_microseconds_per_block = 6000000; -select sleepEachRow(1) from numbers(3*2) format Null; -select sleepEachRow(1) from numbers(3*2) format Null; - -system flush logs; -select count() > 0, lower(status::String), errorCodeToName(exception_code) - from system.query_views_log where - view_name = concatWithSeparator('.', currentDatabase(), 'mv_02572') and - view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') - group by 2, 3 -; - -select * from buffer_02572; -select * from data_02572; -select * from copy_02572; \ No newline at end of file From 870ec237bb427243388acbe5bca770241eeb7fbb Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 22 Jun 2024 14:14:11 +0200 Subject: [PATCH 0997/2361] Add ability to load dashboards for system.dashboards from config One of the obvious reasons is to allow rendering them with readonly user, which is not possible right now, due to usage of merge() function there. Another one, is to add some custom metrics. Note, that once set, they overrides the default dashboards preset. Signed-off-by: Azat Khuzhin --- programs/server/Server.cpp | 2 + programs/server/config.xml | 25 ++++++++++ src/Interpreters/Context.cpp | 49 +++++++++++++++++++ src/Interpreters/Context.h | 4 ++ .../System/StorageSystemDashboards.cpp | 26 +++++++--- src/Storages/System/StorageSystemDashboards.h | 2 +- .../test_custom_dashboards/__init__.py | 0 .../configs/config.d/overrides.xml | 15 ++++++ .../test_custom_dashboards/test.py | 35 +++++++++++++ 9 files changed, 149 insertions(+), 9 deletions(-) create mode 100644 tests/integration/test_custom_dashboards/__init__.py create mode 100644 tests/integration/test_custom_dashboards/configs/config.d/overrides.xml create mode 100644 tests/integration/test_custom_dashboards/test.py diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 16888015f8b..f8aea3ad10c 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1582,6 +1582,8 @@ try global_context->setMacros(std::make_unique(*config, "macros", log)); global_context->setExternalAuthenticatorsConfig(*config); + global_context->setDashboardsConfig(config); + if (global_context->isServerCompletelyStarted()) { /// It does not make sense to reload anything before server has started. diff --git a/programs/server/config.xml b/programs/server/config.xml index 94825a55f67..5dedd78ff2a 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -1312,6 +1312,31 @@ event_date + INTERVAL 30 DAY + + + + - - create table views_max_insert_threads_null (a UInt64) Engine = Null - create materialized view views_max_insert_threads_mv Engine = Null AS select now() as ts, max(a) from views_max_insert_threads_null group by ts - - insert into views_max_insert_threads_null select * from numbers_mt(3000000000) settings max_threads = 16, max_insert_threads=16 - - drop table if exists views_max_insert_threads_null - drop table if exists views_max_insert_threads_mv - - + + + + + + + + + + From b2d8eaf1e6d67ab76f3e86cd4fd857e9535a9d20 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 29 Jul 2024 18:55:08 +0200 Subject: [PATCH 1146/2361] Debug TimerDescriptor --- src/Common/TimerDescriptor.cpp | 26 ++++++++++++++++++++++++-- src/IO/MMappedFileDescriptor.cpp | 2 -- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/src/Common/TimerDescriptor.cpp b/src/Common/TimerDescriptor.cpp index b36ea4059cb..9a171ae9487 100644 --- a/src/Common/TimerDescriptor.cpp +++ b/src/Common/TimerDescriptor.cpp @@ -2,9 +2,11 @@ #include #include +#include #include #include +#include namespace DB @@ -89,9 +91,29 @@ void TimerDescriptor::drain() const /// A signal happened, need to retry. if (errno == EINTR) - continue; + { + /** This is to help with debugging. + * + * Sometimes reading from timer_fd blocks, which should not happen, because we opened it in a non-blocking mode. + * But it could be possible if a rogue 3rd-party library closed our file descriptor by mistake + * (for example by double closing due to the lack of exception safety or if it is a crappy code in plain C) + * and then another file descriptor is opened in its place. + * + * Let's try to get a name of this file descriptor and log it. + */ + LoggerPtr log = getLogger("TimerDescriptor"); - throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot drain timer_fd"); + static constexpr ssize_t max_link_path_length = 256; + char link_path[max_link_path_length]; + ssize_t link_path_length = readlink(fmt::format("/proc/self/fd/{}", timer_fd).c_str(), link_path, max_link_path_length); + if (-1 == link_path_length) + throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot readlink for a timer_fd {}", timer_fd); + + LOG_TRACE(log, "Received EINTR while trying to drain a TimerDescriptor, fd {}: {}", timer_fd, std::string_view(link_path, link_path_length)); + continue; + } + + throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot drain timer_fd {}", timer_fd); } chassert(res == sizeof(buf)); diff --git a/src/IO/MMappedFileDescriptor.cpp b/src/IO/MMappedFileDescriptor.cpp index a7eb8e4ede5..47f80005c9d 100644 --- a/src/IO/MMappedFileDescriptor.cpp +++ b/src/IO/MMappedFileDescriptor.cpp @@ -3,8 +3,6 @@ #include #include -#include - #include #include #include From 45db7c85cf25f9b4b27cedd7464a786c53580d3f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 29 Jul 2024 16:57:15 +0000 Subject: [PATCH 1147/2361] Remove has_single_port property from plan stream. --- src/Processors/QueryPlan/AggregatingStep.cpp | 2 -- src/Processors/QueryPlan/FillingStep.cpp | 8 +++----- src/Processors/QueryPlan/IQueryPlanStep.h | 6 +----- src/Processors/QueryPlan/ITransformingStep.cpp | 3 --- src/Processors/QueryPlan/ReadNothingStep.cpp | 2 +- 5 files changed, 5 insertions(+), 16 deletions(-) diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp index f31de80b22d..8a5ed7fde65 100644 --- a/src/Processors/QueryPlan/AggregatingStep.cpp +++ b/src/Processors/QueryPlan/AggregatingStep.cpp @@ -134,7 +134,6 @@ AggregatingStep::AggregatingStep( { output_stream->sort_description = group_by_sort_description; output_stream->sort_scope = DataStream::SortScope::Global; - output_stream->has_single_port = true; } } @@ -147,7 +146,6 @@ void AggregatingStep::applyOrder(SortDescription sort_description_for_merging_, { output_stream->sort_description = group_by_sort_description; output_stream->sort_scope = DataStream::SortScope::Global; - output_stream->has_single_port = true; } explicit_sorting_required_for_aggregation_in_order = false; diff --git a/src/Processors/QueryPlan/FillingStep.cpp b/src/Processors/QueryPlan/FillingStep.cpp index 65c9cf11661..81622389ada 100644 --- a/src/Processors/QueryPlan/FillingStep.cpp +++ b/src/Processors/QueryPlan/FillingStep.cpp @@ -39,12 +39,13 @@ FillingStep::FillingStep( , interpolate_description(interpolate_description_) , use_with_fill_by_sorting_prefix(use_with_fill_by_sorting_prefix_) { - if (!input_stream_.has_single_port) - throw Exception(ErrorCodes::LOGICAL_ERROR, "FillingStep expects single input"); } void FillingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { + if (pipeline.getNumStreams() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "FillingStep expects single input"); + pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) -> ProcessorPtr { if (stream_type == QueryPipelineBuilder::StreamType::Totals) @@ -69,9 +70,6 @@ void FillingStep::describeActions(JSONBuilder::JSONMap & map) const void FillingStep::updateOutputStream() { - if (!input_streams.front().has_single_port) - throw Exception(ErrorCodes::LOGICAL_ERROR, "FillingStep expects single input"); - output_stream = createOutputStream( input_streams.front(), FillingTransform::transformHeader(input_streams.front().header, sort_description), getDataStreamTraits()); } diff --git a/src/Processors/QueryPlan/IQueryPlanStep.h b/src/Processors/QueryPlan/IQueryPlanStep.h index daca88fcceb..44eb7ea0c59 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.h +++ b/src/Processors/QueryPlan/IQueryPlanStep.h @@ -28,9 +28,6 @@ class DataStream public: Block header; - /// QueryPipeline has single port. Totals or extremes ports are not counted. - bool has_single_port = false; - /// Sorting scope. Please keep the mutual order (more strong mode should have greater value). enum class SortScope : uint8_t { @@ -51,8 +48,7 @@ public: bool hasEqualPropertiesWith(const DataStream & other) const { - return has_single_port == other.has_single_port - && sort_description == other.sort_description + return sort_description == other.sort_description && (sort_description.empty() || sort_scope == other.sort_scope); } diff --git a/src/Processors/QueryPlan/ITransformingStep.cpp b/src/Processors/QueryPlan/ITransformingStep.cpp index 9ecfdb0af22..3fa9d1b8308 100644 --- a/src/Processors/QueryPlan/ITransformingStep.cpp +++ b/src/Processors/QueryPlan/ITransformingStep.cpp @@ -20,9 +20,6 @@ DataStream ITransformingStep::createOutputStream( { DataStream output_stream{.header = std::move(output_header)}; - output_stream.has_single_port = stream_traits.returns_single_stream - || (input_stream.has_single_port && stream_traits.preserves_number_of_streams); - if (stream_traits.preserves_sorting) { output_stream.sort_description = input_stream.sort_description; diff --git a/src/Processors/QueryPlan/ReadNothingStep.cpp b/src/Processors/QueryPlan/ReadNothingStep.cpp index 253f3a5b980..3037172bbd4 100644 --- a/src/Processors/QueryPlan/ReadNothingStep.cpp +++ b/src/Processors/QueryPlan/ReadNothingStep.cpp @@ -6,7 +6,7 @@ namespace DB { ReadNothingStep::ReadNothingStep(Block output_header) - : ISourceStep(DataStream{.header = std::move(output_header), .has_single_port = true}) + : ISourceStep(DataStream{.header = std::move(output_header)}) { } From 412268bf4e64b5c0df3980e5b8ccd2b078cf2177 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 29 Jul 2024 17:05:46 +0000 Subject: [PATCH 1148/2361] Update reference --- ...dynamic_read_subcolumns_small.reference.j2 | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 index 3d814e1205a..be3f4e53990 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 @@ -25,26 +25,26 @@ UInt64 7 7 \N [] 8 8 \N [] 9 9 \N [] -[[0]] \N \N [] str_10 \N str_10 [] -[[0,1]] \N \N [] +[[0]] \N \N [] str_11 \N str_11 [] -[[0,1,2]] \N \N [] +[[0,1]] \N \N [] str_12 \N str_12 [] -[[0,1,2,3]] \N \N [] +[[0,1,2]] \N \N [] str_13 \N str_13 [] -[[0,1,2,3,4]] \N \N [] +[[0,1,2,3]] \N \N [] str_14 \N str_14 [] -[[0,1,2,3,4,5]] \N \N [] +[[0,1,2,3,4]] \N \N [] str_15 \N str_15 [] -[[0,1,2,3,4,5,6]] \N \N [] +[[0,1,2,3,4,5]] \N \N [] str_16 \N str_16 [] -[[0,1,2,3,4,5,6,7]] \N \N [] +[[0,1,2,3,4,5,6]] \N \N [] str_17 \N str_17 [] -[[0,1,2,3,4,5,6,7,8]] \N \N [] +[[0,1,2,3,4,5,6,7]] \N \N [] str_18 \N str_18 [] -[[0,1,2,3,4,5,6,7,8,9]] \N \N [] +[[0,1,2,3,4,5,6,7,8]] \N \N [] str_19 \N str_19 [] +[[0,1,2,3,4,5,6,7,8,9]] \N \N [] [20] \N \N [20] ['str_21','str_21'] \N \N ['str_21','str_21'] [22,22,22] \N \N [22,22,22] From 812a2b929938c293441f6a893adf96a00d469351 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 29 Jul 2024 17:24:28 +0000 Subject: [PATCH 1149/2361] formatDateTime[InJodaSyntax]: make format string optional --- .../functions/type-conversion-functions.md | 12 ++++---- src/Functions/parseDateTime.cpp | 30 ++++++++++++------- .../02668_parse_datetime.reference | 6 +++- .../0_stateless/02668_parse_datetime.sql | 7 ++++- ...68_parse_datetime_in_joda_syntax.reference | 6 +++- .../02668_parse_datetime_in_joda_syntax.sql | 7 ++++- 6 files changed, 48 insertions(+), 20 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 7cc2c022143..dc90697bd20 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -3467,13 +3467,13 @@ This function is the opposite operation of function [formatDateTime](../function **Syntax** ``` sql -parseDateTime(str, format[, timezone]) +parseDateTime(str[, format[, timezone]]) ``` **Arguments** -- `str` — the String to be parsed -- `format` — the format string +- `str` — The String to be parsed +- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). Optional. **Returned value(s)** @@ -3516,13 +3516,13 @@ This function is the opposite operation of function [formatDateTimeInJodaSyntax] **Syntax** ``` sql -parseDateTimeInJodaSyntax(str, format[, timezone]) +parseDateTimeInJodaSyntax(str[, format[, timezone]]) ``` **Arguments** -- `str` — the String to be parsed -- `format` — the format string +- `str` — The String to be parsed +- `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified. - `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). Optional. **Returned value(s)** diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp index bdca0151bba..7ca10677be7 100644 --- a/src/Functions/parseDateTime.cpp +++ b/src/Functions/parseDateTime.cpp @@ -582,11 +582,11 @@ namespace DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { FunctionArgumentDescriptors mandatory_args{ - {"time", static_cast(&isString), nullptr, "String"}, - {"format", static_cast(&isString), nullptr, "String"} + {"time", static_cast(&isString), nullptr, "String"} }; FunctionArgumentDescriptors optional_args{ + {"format", static_cast(&isString), nullptr, "String"}, {"timezone", static_cast(&isString), &isColumnConst, "const String"} }; @@ -2029,14 +2029,24 @@ namespace String getFormat(const ColumnsWithTypeAndName & arguments) const { - const auto * format_column = checkAndGetColumnConst(arguments[1].column.get()); - if (!format_column) - throw Exception( - ErrorCodes::ILLEGAL_COLUMN, - "Illegal column {} of second ('format') argument of function {}. Must be constant string.", - arguments[1].column->getName(), - getName()); - return format_column->getValue(); + if (arguments.size() == 1) + { + if constexpr (parse_syntax == ParseSyntax::MySQL) + return "%Y-%m-%d %H:%i:%s"; + else + return "yyyy-MM-dd HH:mm:ss"; + } + else + { + const auto * col_format = checkAndGetColumnConst(arguments[1].column.get()); + if (!col_format) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Illegal column {} of second ('format') argument of function {}. Must be constant string.", + arguments[1].column->getName(), + getName()); + return col_format->getValue(); + } } const DateLUTImpl & getTimeZone(const ColumnsWithTypeAndName & arguments) const diff --git a/tests/queries/0_stateless/02668_parse_datetime.reference b/tests/queries/0_stateless/02668_parse_datetime.reference index d21a51ce70c..b67ca2d8b76 100644 --- a/tests/queries/0_stateless/02668_parse_datetime.reference +++ b/tests/queries/0_stateless/02668_parse_datetime.reference @@ -239,7 +239,7 @@ select sTr_To_DaTe('10:04:11 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') = toDateTi select str_to_date('10:04:11 invalid 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') IS NULL; 1 -- Error handling -select parseDateTime('12 AM'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select parseDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select parseDateTime('12 AM', '%h %p', 'UTC', 'a fourth argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -- Fuzzer crash bug #53715 select parseDateTime('', '', toString(number)) from numbers(13); -- { serverError ILLEGAL_COLUMN } @@ -270,3 +270,7 @@ select parseDateTime('8 13, 2022, 7:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); 2022-08-13 07:58:32 select parseDateTime('08 13, 2022, 07:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); 2022-08-13 07:58:32 +-- The format string argument is optional +set session_timezone = 'UTC'; -- don't randomize the session timezone +select parseDateTime('2021-01-04 23:12:34') = toDateTime('2021-01-04 23:12:34'); +1 diff --git a/tests/queries/0_stateless/02668_parse_datetime.sql b/tests/queries/0_stateless/02668_parse_datetime.sql index 02ac0c5f35c..7b3aed60a4a 100644 --- a/tests/queries/0_stateless/02668_parse_datetime.sql +++ b/tests/queries/0_stateless/02668_parse_datetime.sql @@ -162,7 +162,7 @@ select sTr_To_DaTe('10:04:11 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') = toDateTi select str_to_date('10:04:11 invalid 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') IS NULL; -- Error handling -select parseDateTime('12 AM'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select parseDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select parseDateTime('12 AM', '%h %p', 'UTC', 'a fourth argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -- Fuzzer crash bug #53715 @@ -187,4 +187,9 @@ select parseDateTime('08 13, 2022, 07:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); select parseDateTime('8 13, 2022, 7:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); select parseDateTime('08 13, 2022, 07:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); +-- The format string argument is optional +set session_timezone = 'UTC'; -- don't randomize the session timezone +select parseDateTime('2021-01-04 23:12:34') = toDateTime('2021-01-04 23:12:34'); + + -- { echoOff } diff --git a/tests/queries/0_stateless/02668_parse_datetime_in_joda_syntax.reference b/tests/queries/0_stateless/02668_parse_datetime_in_joda_syntax.reference index 9fbf105dc41..6f560577ab5 100644 --- a/tests/queries/0_stateless/02668_parse_datetime_in_joda_syntax.reference +++ b/tests/queries/0_stateless/02668_parse_datetime_in_joda_syntax.reference @@ -354,5 +354,9 @@ select parseDateTimeInJodaSyntaxOrNull('2001 366 2000', 'yyyy D yyyy', 'UTC') = select parseDateTimeInJodaSyntaxOrNull('2001 invalid 366 2000', 'yyyy D yyyy', 'UTC') IS NULL; 1 -- Error handling -select parseDateTimeInJodaSyntax('12 AM'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select parseDateTimeInJodaSyntax(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select parseDateTimeInJodaSyntax('12 AM', 'h a', 'UTC', 'a fourth argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- The format string argument is optional +set session_timezone = 'UTC'; -- don't randomize the session timezone +select parseDateTimeInJodaSyntax('2021-01-04 23:12:34') = toDateTime('2021-01-04 23:12:34'); +1 diff --git a/tests/queries/0_stateless/02668_parse_datetime_in_joda_syntax.sql b/tests/queries/0_stateless/02668_parse_datetime_in_joda_syntax.sql index f5810d3d4c3..28d14607ba6 100644 --- a/tests/queries/0_stateless/02668_parse_datetime_in_joda_syntax.sql +++ b/tests/queries/0_stateless/02668_parse_datetime_in_joda_syntax.sql @@ -239,6 +239,11 @@ select parseDateTimeInJodaSyntaxOrNull('2001 366 2000', 'yyyy D yyyy', 'UTC') = select parseDateTimeInJodaSyntaxOrNull('2001 invalid 366 2000', 'yyyy D yyyy', 'UTC') IS NULL; -- Error handling -select parseDateTimeInJodaSyntax('12 AM'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select parseDateTimeInJodaSyntax(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select parseDateTimeInJodaSyntax('12 AM', 'h a', 'UTC', 'a fourth argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- The format string argument is optional +set session_timezone = 'UTC'; -- don't randomize the session timezone +select parseDateTimeInJodaSyntax('2021-01-04 23:12:34') = toDateTime('2021-01-04 23:12:34'); + -- { echoOff } From 1c9d60ca972eab618b82704e14f1a680daed9c04 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Jul 2024 16:36:45 +0000 Subject: [PATCH 1150/2361] Refactoring --- programs/client/Client.h | 6 +- programs/local/LocalServer.h | 4 +- src/Client/ClientApplicationBase.cpp | 539 +++++++++++++++++++++++++++ src/Client/ClientApplicationBase.h | 54 +++ src/Client/ClientBase.cpp | 362 +----------------- src/Client/ClientBase.h | 73 +++- 6 files changed, 659 insertions(+), 379 deletions(-) create mode 100644 src/Client/ClientApplicationBase.cpp create mode 100644 src/Client/ClientApplicationBase.h diff --git a/programs/client/Client.h b/programs/client/Client.h index 9571440d6ba..7fdf77031ab 100644 --- a/programs/client/Client.h +++ b/programs/client/Client.h @@ -1,14 +1,16 @@ #pragma once -#include +#include namespace DB { -class Client : public ClientBase +class Client : public ClientApplicationBase { public: + using Arguments = ClientApplicationBase::Arguments; + Client() = default; void initialize(Poco::Util::Application & self) override; diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index ae9980311e1..b18a7a90961 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -21,7 +21,7 @@ namespace DB /// Lightweight Application for clickhouse-local /// No networking, no extra configs and working directories, no pid and status files, no dictionaries, no logging. /// Quiet mode by default -class LocalServer : public ClientBase, public Loggers +class LocalServer : public ClientApplicationBase, public Loggers { public: LocalServer() = default; diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp new file mode 100644 index 00000000000..59c98983694 --- /dev/null +++ b/src/Client/ClientApplicationBase.cpp @@ -0,0 +1,539 @@ +#include + +#include +#include +#include +#include +#include +#include + +#include +#include "config.h" + +#include +#include +#include +#include +#include + +using namespace std::literals; + +namespace CurrentMetrics +{ + extern const Metric MemoryTracking; +} + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int CANNOT_SET_SIGNAL_HANDLER; + extern const int UNRECOGNIZED_ARGUMENTS; +} + +static ClientInfo::QueryKind parseQueryKind(const String & query_kind) +{ + if (query_kind == "initial_query") + return ClientInfo::QueryKind::INITIAL_QUERY; + if (query_kind == "secondary_query") + return ClientInfo::QueryKind::SECONDARY_QUERY; + if (query_kind == "no_query") + return ClientInfo::QueryKind::NO_QUERY; + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown query kind {}", query_kind); +} + +/// This signal handler is set only for SIGINT and SIGQUIT. +void interruptSignalHandler(int signum) +{ + if (ClientApplicationBase::getInstance().tryStopQuery()) + safeExit(128 + signum); +} + +ClientApplicationBase::~ClientApplicationBase() = default; +ClientApplicationBase::ClientApplicationBase() : ClientBase(STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO, std::cin, std::cout, std::cerr) {} + +ClientApplicationBase & ClientApplicationBase::getInstance() +{ + return dynamic_cast(Poco::Util::Application::instance()); +} + +void ClientApplicationBase::setupSignalHandler() +{ + ClientApplicationBase::getInstance().stopQuery(); + + struct sigaction new_act; + memset(&new_act, 0, sizeof(new_act)); + + new_act.sa_handler = interruptSignalHandler; + new_act.sa_flags = 0; + +#if defined(OS_DARWIN) + sigemptyset(&new_act.sa_mask); +#else + if (sigemptyset(&new_act.sa_mask)) + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); +#endif + + if (sigaction(SIGINT, &new_act, nullptr)) + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); + + if (sigaction(SIGQUIT, &new_act, nullptr)) + throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); +} + + +namespace +{ + +/// Define transparent hash to we can use +/// std::string_view with the containers +struct TransparentStringHash +{ + using is_transparent = void; + size_t operator()(std::string_view txt) const + { + return std::hash{}(txt); + } +}; + +/* + * This functor is used to parse command line arguments and replace dashes with underscores, + * allowing options to be specified using either dashes or underscores. + */ +class OptionsAliasParser +{ +public: + explicit OptionsAliasParser(const boost::program_options::options_description& options) + { + options_names.reserve(options.options().size()); + for (const auto& option : options.options()) + options_names.insert(option->long_name()); + } + + /* + * Parses arguments by replacing dashes with underscores, and matches the resulting name with known options + * Implements boost::program_options::ext_parser logic + */ + std::pair operator()(const std::string & token) const + { + if (!token.starts_with("--")) + return {}; + std::string arg = token.substr(2); + + // divide token by '=' to separate key and value if options style=long_allow_adjacent + auto pos_eq = arg.find('='); + std::string key = arg.substr(0, pos_eq); + + if (options_names.contains(key)) + // option does not require any changes, because it is already correct + return {}; + + std::replace(key.begin(), key.end(), '-', '_'); + if (!options_names.contains(key)) + // after replacing '-' with '_' argument is still unknown + return {}; + + std::string value; + if (pos_eq != std::string::npos && pos_eq < arg.size()) + value = arg.substr(pos_eq + 1); + + return {key, value}; + } + +private: + std::unordered_set options_names; +}; + +} + +/// Enable optimizations even in debug builds because otherwise options parsing becomes extremely slow affecting .sh tests +#if defined(__clang__) +#pragma clang optimize on +#endif +void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments) +{ + if (allow_repeated_settings) + addProgramOptionsAsMultitokens(cmd_settings, options_description.main_description.value()); + else + addProgramOptions(cmd_settings, options_description.main_description.value()); + + if (allow_merge_tree_settings) + { + /// Add merge tree settings manually, because names of some settings + /// may clash. Query settings have higher priority and we just + /// skip ambiguous merge tree settings. + auto & main_options = options_description.main_description.value(); + + std::unordered_set> main_option_names; + for (const auto & option : main_options.options()) + main_option_names.insert(option->long_name()); + + for (const auto & setting : cmd_merge_tree_settings.all()) + { + const auto add_setting = [&](const std::string_view name) + { + if (auto it = main_option_names.find(name); it != main_option_names.end()) + return; + + if (allow_repeated_settings) + addProgramOptionAsMultitoken(cmd_merge_tree_settings, main_options, name, setting); + else + addProgramOption(cmd_merge_tree_settings, main_options, name, setting); + }; + + const auto & setting_name = setting.getName(); + + add_setting(setting_name); + + const auto & settings_to_aliases = MergeTreeSettings::Traits::settingsToAliases(); + if (auto it = settings_to_aliases.find(setting_name); it != settings_to_aliases.end()) + { + for (const auto alias : it->second) + { + add_setting(alias); + } + } + } + } + + /// Parse main commandline options. + auto parser = po::command_line_parser(arguments) + .options(options_description.main_description.value()) + .extra_parser(OptionsAliasParser(options_description.main_description.value())) + .allow_unregistered(); + po::parsed_options parsed = parser.run(); + + /// Check unrecognized options without positional options. + auto unrecognized_options = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::exclude_positional); + if (!unrecognized_options.empty()) + { + auto hints = this->getHints(unrecognized_options[0]); + if (!hints.empty()) + throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'. Maybe you meant {}", + unrecognized_options[0], toString(hints)); + + throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'", unrecognized_options[0]); + } + + /// Check positional options. + for (const auto & op : parsed.options) + { + if (!op.unregistered && op.string_key.empty() && !op.original_tokens[0].starts_with("--") + && !op.original_tokens[0].empty() && !op.value.empty()) + { + /// Two special cases for better usability: + /// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1" + /// These are relevant for interactive usage - user-friendly, but questionable in general. + /// In case of ambiguity or for scripts, prefer using proper options. + + const auto & token = op.original_tokens[0]; + po::variable_value value(boost::any(op.value), false); + + const char * option; + if (token.contains(' ')) + option = "query"; + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); + + if (!options.emplace(option, value).second) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); + } + } + + po::store(parsed, options); +} + +void ClientApplicationBase::addMultiquery(std::string_view query, Arguments & common_arguments) const +{ + common_arguments.emplace_back("--multiquery"); + common_arguments.emplace_back("-q"); + common_arguments.emplace_back(query); +} + +Poco::Util::LayeredConfiguration & ClientApplicationBase::getClientConfiguration() +{ + return config(); +} + +void ClientApplicationBase::init(int argc, char ** argv) +{ + namespace po = boost::program_options; + + /// Don't parse options with Poco library, we prefer neat boost::program_options. + stopOptionsProcessing(); + + stdin_is_a_tty = isatty(STDIN_FILENO); + stdout_is_a_tty = isatty(STDOUT_FILENO); + stderr_is_a_tty = isatty(STDERR_FILENO); + terminal_width = getTerminalWidth(); + + std::vector external_tables_arguments; + Arguments common_arguments = {""}; /// 0th argument is ignored. + std::vector hosts_and_ports_arguments; + + if (argc) + argv0 = argv[0]; + readArguments(argc, argv, common_arguments, external_tables_arguments, hosts_and_ports_arguments); + + /// Support for Unicode dashes + /// Interpret Unicode dashes as default double-hyphen + for (auto & arg : common_arguments) + { + // replace em-dash(U+2014) + boost::replace_all(arg, "—", "--"); + // replace en-dash(U+2013) + boost::replace_all(arg, "–", "--"); + // replace mathematical minus(U+2212) + boost::replace_all(arg, "−", "--"); + } + + + OptionsDescription options_description; + options_description.main_description.emplace(createOptionsDescription("Main options", terminal_width)); + + /// Common options for clickhouse-client and clickhouse-local. + options_description.main_description->add_options() + ("help", "print usage summary, combine with --verbose to display all options") + ("verbose", "print query and other debugging info") + ("version,V", "print version information and exit") + ("version-clean", "print version in machine-readable format and exit") + + ("config-file,C", po::value(), "config-file path") + + ("query,q", po::value>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)") + ("queries-file", po::value>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)") + ("multiquery,n", "Obsolete, does nothing") + ("multiline,m", "If specified, allow multiline queries (do not send the query on Enter)") + ("database,d", po::value(), "database") + ("query_kind", po::value()->default_value("initial_query"), "One of initial_query/secondary_query/no_query") + ("query_id", po::value(), "query_id") + + ("history_file", po::value(), "path to history file") + + ("stage", po::value()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit") + ("progress", po::value()->implicit_value(ProgressOption::TTY, "tty")->default_value(ProgressOption::DEFAULT, "default"), "Print progress of queries execution - to TTY: tty|on|1|true|yes; to STDERR non-interactive mode: err; OFF: off|0|false|no; DEFAULT - interactive to TTY, non-interactive is off") + + ("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.") + ("wait_for_suggestions_to_load", "Load suggestion data synchonously.") + ("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)") + ("memory-usage", po::value()->implicit_value("default")->default_value("none"), "print memory usage to stderr in non-interactive mode (for benchmarks). Values: 'none', 'default', 'readable'") + + ("echo", "in batch mode, print query before execution") + + ("log-level", po::value(), "log level") + ("server_logs_file", po::value(), "put server logs into specified file") + + ("suggestion_limit", po::value()->default_value(10000), "Suggestion limit for how many databases, tables and columns to fetch.") + + ("format,f", po::value(), "default output format (and input format for clickhouse-local)") + ("output-format", po::value(), "default output format (this option has preference over --format)") + + ("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command") + ("highlight", po::value()->default_value(true), "enable or disable basic syntax highlight in interactive command line") + + ("ignore-error", "do not stop processing when an error occurs") + ("stacktrace", "print stack traces of exceptions") + ("hardware-utilization", "print hardware utilization information in progress bar") + ("print-profile-events", po::value(&profile_events.print)->zero_tokens(), "Printing ProfileEvents packets") + ("profile-events-delay-ms", po::value()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)") + ("processed-rows", "print the number of locally processed rows") + + ("interactive", "Process queries-file or --query query and start interactive mode") + ("pager", po::value(), "Pipe all output into this command (less or similar)") + ("max_memory_usage_in_client", po::value(), "Set memory limit in client/local server") + + ("fuzzer-args", po::value(), "Command line arguments for the LLVM's libFuzzer driver. Only relevant if the application is compiled with libFuzzer.") + + ("client_logs_file", po::value(), "Path to a file for writing client logs. Currently we only have fatal logs (when the client crashes)") + ; + + addOptions(options_description); + + OptionsDescription options_description_non_verbose = options_description; + + auto getter = [](const auto & op) + { + String op_long_name = op->long_name(); + return "--" + String(op_long_name); + }; + + if (options_description.main_description) + { + const auto & main_options = options_description.main_description->options(); + std::transform(main_options.begin(), main_options.end(), std::back_inserter(cmd_options), getter); + } + + if (options_description.external_description) + { + const auto & external_options = options_description.external_description->options(); + std::transform(external_options.begin(), external_options.end(), std::back_inserter(cmd_options), getter); + } + + po::variables_map options; + parseAndCheckOptions(options_description, options, common_arguments); + po::notify(options); + + if (options.count("version") || options.count("V")) + { + showClientVersion(); + exit(0); // NOLINT(concurrency-mt-unsafe) + } + + if (options.count("version-clean")) + { + output_stream << VERSION_STRING; + exit(0); // NOLINT(concurrency-mt-unsafe) + } + + if (options.count("verbose")) + getClientConfiguration().setBool("verbose", true); + + /// Output of help message. + if (options.count("help") + || (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help. + { + if (getClientConfiguration().getBool("verbose", false)) + printHelpMessage(options_description, true); + else + printHelpMessage(options_description_non_verbose, false); + exit(0); // NOLINT(concurrency-mt-unsafe) + } + + /// Common options for clickhouse-client and clickhouse-local. + + /// Output execution time to stderr in batch mode. + if (options.count("time")) + getClientConfiguration().setBool("print-time-to-stderr", true); + if (options.count("memory-usage")) + { + const auto & memory_usage_mode = options["memory-usage"].as(); + if (memory_usage_mode != "none" && memory_usage_mode != "default" && memory_usage_mode != "readable") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown memory-usage mode: {}", memory_usage_mode); + getClientConfiguration().setString("print-memory-to-stderr", memory_usage_mode); + } + + if (options.count("query")) + queries = options["query"].as>(); + if (options.count("query_id")) + getClientConfiguration().setString("query_id", options["query_id"].as()); + if (options.count("database")) + getClientConfiguration().setString("database", options["database"].as()); + if (options.count("config-file")) + getClientConfiguration().setString("config-file", options["config-file"].as()); + if (options.count("queries-file")) + queries_files = options["queries-file"].as>(); + if (options.count("multiline")) + getClientConfiguration().setBool("multiline", true); + if (options.count("ignore-error")) + getClientConfiguration().setBool("ignore-error", true); + if (options.count("format")) + getClientConfiguration().setString("format", options["format"].as()); + if (options.count("output-format")) + getClientConfiguration().setString("output-format", options["output-format"].as()); + if (options.count("vertical")) + getClientConfiguration().setBool("vertical", true); + if (options.count("stacktrace")) + getClientConfiguration().setBool("stacktrace", true); + if (options.count("print-profile-events")) + getClientConfiguration().setBool("print-profile-events", true); + if (options.count("profile-events-delay-ms")) + getClientConfiguration().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as()); + /// Whether to print the number of processed rows at + if (options.count("processed-rows")) + getClientConfiguration().setBool("print-num-processed-rows", true); + if (options.count("progress")) + { + switch (options["progress"].as()) + { + case DEFAULT: + getClientConfiguration().setString("progress", "default"); + break; + case OFF: + getClientConfiguration().setString("progress", "off"); + break; + case TTY: + getClientConfiguration().setString("progress", "tty"); + break; + case ERR: + getClientConfiguration().setString("progress", "err"); + break; + } + } + if (options.count("echo")) + getClientConfiguration().setBool("echo", true); + if (options.count("disable_suggestion")) + getClientConfiguration().setBool("disable_suggestion", true); + if (options.count("wait_for_suggestions_to_load")) + getClientConfiguration().setBool("wait_for_suggestions_to_load", true); + if (options.count("suggestion_limit")) + getClientConfiguration().setInt("suggestion_limit", options["suggestion_limit"].as()); + if (options.count("highlight")) + getClientConfiguration().setBool("highlight", options["highlight"].as()); + if (options.count("history_file")) + getClientConfiguration().setString("history_file", options["history_file"].as()); + if (options.count("interactive")) + getClientConfiguration().setBool("interactive", true); + if (options.count("pager")) + getClientConfiguration().setString("pager", options["pager"].as()); + + if (options.count("log-level")) + Poco::Logger::root().setLevel(options["log-level"].as()); + if (options.count("server_logs_file")) + server_logs_file = options["server_logs_file"].as(); + + query_processing_stage = QueryProcessingStage::fromString(options["stage"].as()); + query_kind = parseQueryKind(options["query_kind"].as()); + profile_events.print = options.count("print-profile-events"); + profile_events.delay_ms = options["profile-events-delay-ms"].as(); + + processOptions(options_description, options, external_tables_arguments, hosts_and_ports_arguments); + { + std::unordered_set alias_names; + alias_names.reserve(options_description.main_description->options().size()); + for (const auto& option : options_description.main_description->options()) + alias_names.insert(option->long_name()); + argsToConfig(common_arguments, getClientConfiguration(), 100, &alias_names); + } + + clearPasswordFromCommandLine(argc, argv); + + /// Limit on total memory usage + std::string max_client_memory_usage = getClientConfiguration().getString("max_memory_usage_in_client", "0" /*default value*/); + if (max_client_memory_usage != "0") + { + UInt64 max_client_memory_usage_int = parseWithSizeSuffix(max_client_memory_usage.c_str(), max_client_memory_usage.length()); + + total_memory_tracker.setHardLimit(max_client_memory_usage_int); + total_memory_tracker.setDescription("(total)"); + total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking); + } + + /// Print stacktrace in case of crash + HandledSignals::instance().setupTerminateHandler(); + HandledSignals::instance().setupCommonDeadlySignalHandlers(); + /// We don't setup signal handlers for SIGINT, SIGQUIT, SIGTERM because we don't + /// have an option for client to shutdown gracefully. + + fatal_channel_ptr = new Poco::SplitterChannel; + fatal_console_channel_ptr = new Poco::ConsoleChannel; + fatal_channel_ptr->addChannel(fatal_console_channel_ptr); + if (options.count("client_logs_file")) + { + fatal_file_channel_ptr = new Poco::SimpleFileChannel(options["client_logs_file"].as()); + fatal_channel_ptr->addChannel(fatal_file_channel_ptr); + } + + fatal_log = createLogger("ClientBase", fatal_channel_ptr.get(), Poco::Message::PRIO_FATAL); + signal_listener = std::make_unique(nullptr, fatal_log); + signal_listener_thread.start(*signal_listener); + +#if USE_GWP_ASAN + GWPAsan::initFinished(); +#endif + +} + + +} diff --git a/src/Client/ClientApplicationBase.h b/src/Client/ClientApplicationBase.h new file mode 100644 index 00000000000..217fa29c3f4 --- /dev/null +++ b/src/Client/ClientApplicationBase.h @@ -0,0 +1,54 @@ +#pragma once + + +#include +#include +#include +#include +#include +#include + +#include + +namespace po = boost::program_options; + +namespace DB +{ + +void interruptSignalHandler(int signum); + +/** + * The base class for client appliucations such as + * clickhouse-client or clickhouse-local. + * The main purpose and responsibility of it is dealing with + * application-specific stuff such as command line arguments parsing + * and setting up signal handlers, so queries will be cancelled after + * Ctrl+C is pressed. + */ +class ClientApplicationBase : public ClientBase, public Poco::Util::Application, public IHints<2> +{ +public: + using ClientBase::processOptions; + using Arguments = ClientBase::Arguments; + + static ClientApplicationBase & getInstance(); + + ClientApplicationBase(); + ~ClientApplicationBase() override; + + void init(int argc, char ** argv); + std::vector getAllRegisteredNames() const override { return cmd_options; } + +protected: + Poco::Util::LayeredConfiguration & getClientConfiguration() override; + void setupSignalHandler() override; + void addMultiquery(std::string_view query, Arguments & common_arguments) const; + +private: + void parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments); + + std::vector cmd_options; +}; + + +} diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index a88eed25db1..9cf3b955d26 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -17,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -158,17 +156,6 @@ std::istream& operator>> (std::istream & in, ProgressOption & progress) return in; } -static ClientInfo::QueryKind parseQueryKind(const String & query_kind) -{ - if (query_kind == "initial_query") - return ClientInfo::QueryKind::INITIAL_QUERY; - if (query_kind == "secondary_query") - return ClientInfo::QueryKind::SECONDARY_QUERY; - if (query_kind == "no_query") - return ClientInfo::QueryKind::NO_QUERY; - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown query kind {}", query_kind); -} - static void incrementProfileEventsBlock(Block & dst, const Block & src) { if (!dst) @@ -269,36 +256,6 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src) dst.setColumns(std::move(mutable_columns)); } - -std::atomic exit_after_signals = 0; - -class QueryInterruptHandler : private boost::noncopyable -{ -public: - /// Store how much interrupt signals can be before stopping the query - /// by default stop after the first interrupt signal. - static void start(Int32 signals_before_stop = 1) { exit_after_signals.store(signals_before_stop); } - - /// Set value not greater then 0 to mark the query as stopped. - static void stop() { exit_after_signals.store(0); } - - /// Return true if the query was stopped. - /// Query was stopped if it received at least "signals_before_stop" interrupt signals. - static bool try_stop() { return exit_after_signals.fetch_sub(1) <= 0; } - static bool cancelled() { return exit_after_signals.load() <= 0; } - - /// Return how much interrupt signals remain before stop. - static Int32 cancelled_status() { return exit_after_signals.load(); } -}; - -/// This signal handler is set for SIGINT and SIGQUIT. -void interruptSignalHandler(int signum) -{ - if (QueryInterruptHandler::try_stop()) - safeExit(128 + signum); -} - - /// To cancel the query on local format error. class LocalFormatError : public DB::Exception { @@ -345,31 +302,6 @@ ClientBase::ClientBase( terminal_width = getTerminalWidth(in_fd, err_fd); } -void ClientBase::setupSignalHandler() -{ - QueryInterruptHandler::stop(); - - struct sigaction new_act; - memset(&new_act, 0, sizeof(new_act)); - - new_act.sa_handler = interruptSignalHandler; - new_act.sa_flags = 0; - -#if defined(OS_DARWIN) - sigemptyset(&new_act.sa_mask); -#else - if (sigemptyset(&new_act.sa_mask)) - throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); -#endif - - if (sigaction(SIGINT, &new_act, nullptr)) - throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); - - if (sigaction(SIGQUIT, &new_act, nullptr)) - throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); -} - - ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Settings & settings, bool allow_multi_statements) { std::unique_ptr parser; @@ -1113,8 +1045,8 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa { try { - QueryInterruptHandler::start(signals_before_stop); - SCOPE_EXIT({ QueryInterruptHandler::stop(); }); + query_interrupt_handler.start(signals_before_stop); + SCOPE_EXIT({ query_interrupt_handler.stop(); }); connection->sendQuery( connection_parameters.timeouts, @@ -1178,13 +1110,13 @@ void ClientBase::receiveResult(ASTPtr parsed_query, Int32 signals_before_stop, b /// to avoid losing sync. if (!cancelled) { - if (partial_result_on_first_cancel && QueryInterruptHandler::cancelled_status() == signals_before_stop - 1) + if (partial_result_on_first_cancel && query_interrupt_handler.cancelled_status() == signals_before_stop - 1) { connection->sendCancel(); /// First cancel reading request was sent. Next requests will only be with a full cancel partial_result_on_first_cancel = false; } - else if (QueryInterruptHandler::cancelled()) + else if (query_interrupt_handler.cancelled()) { cancelQuery(); } @@ -1563,8 +1495,8 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars return; } - QueryInterruptHandler::start(); - SCOPE_EXIT({ QueryInterruptHandler::stop(); }); + query_interrupt_handler.start(); + SCOPE_EXIT({ query_interrupt_handler.stop(); }); connection->sendQuery( connection_parameters.timeouts, @@ -1775,7 +1707,7 @@ try Block block; while (executor.pull(block)) { - if (!cancelled && QueryInterruptHandler::cancelled()) + if (!cancelled && query_interrupt_handler.cancelled()) { cancelQuery(); executor.cancel(); @@ -2857,7 +2789,6 @@ void ClientBase::runLibFuzzer() void ClientBase::runLibFuzzer() {} #endif - void ClientBase::clearTerminal() { /// Clear from cursor until end of screen. @@ -2867,288 +2798,9 @@ void ClientBase::clearTerminal() output_stream << "\033[0J" "\033[?25h"; } - void ClientBase::showClientVersion() { output_stream << VERSION_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl; } -void ClientBase::init(int argc, char ** argv) -{ - namespace po = boost::program_options; - - /// Don't parse options with Poco library, we prefer neat boost::program_options. - stopOptionsProcessing(); - - stdin_is_a_tty = isatty(STDIN_FILENO); - stdout_is_a_tty = isatty(STDOUT_FILENO); - stderr_is_a_tty = isatty(STDERR_FILENO); - terminal_width = getTerminalWidth(); - - std::vector external_tables_arguments; - Arguments common_arguments = {""}; /// 0th argument is ignored. - std::vector hosts_and_ports_arguments; - - if (argc) - argv0 = argv[0]; - readArguments(argc, argv, common_arguments, external_tables_arguments, hosts_and_ports_arguments); - - /// Support for Unicode dashes - /// Interpret Unicode dashes as default double-hyphen - for (auto & arg : common_arguments) - { - // replace em-dash(U+2014) - boost::replace_all(arg, "—", "--"); - // replace en-dash(U+2013) - boost::replace_all(arg, "–", "--"); - // replace mathematical minus(U+2212) - boost::replace_all(arg, "−", "--"); - } - - - OptionsDescription options_description; - options_description.main_description.emplace(createOptionsDescription("Main options", terminal_width)); - - /// Common options for clickhouse-client and clickhouse-local. - options_description.main_description->add_options() - ("help", "print usage summary, combine with --verbose to display all options") - ("verbose", "print query and other debugging info") - ("version,V", "print version information and exit") - ("version-clean", "print version in machine-readable format and exit") - - ("config-file,C", po::value(), "config-file path") - - ("query,q", po::value>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)") - ("queries-file", po::value>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)") - ("multiquery,n", "Obsolete, does nothing") - ("multiline,m", "If specified, allow multiline queries (do not send the query on Enter)") - ("database,d", po::value(), "database") - ("query_kind", po::value()->default_value("initial_query"), "One of initial_query/secondary_query/no_query") - ("query_id", po::value(), "query_id") - - ("history_file", po::value(), "path to history file") - - ("stage", po::value()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit") - ("progress", po::value()->implicit_value(ProgressOption::TTY, "tty")->default_value(ProgressOption::DEFAULT, "default"), "Print progress of queries execution - to TTY: tty|on|1|true|yes; to STDERR non-interactive mode: err; OFF: off|0|false|no; DEFAULT - interactive to TTY, non-interactive is off") - - ("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.") - ("wait_for_suggestions_to_load", "Load suggestion data synchonously.") - ("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)") - ("memory-usage", po::value()->implicit_value("default")->default_value("none"), "print memory usage to stderr in non-interactive mode (for benchmarks). Values: 'none', 'default', 'readable'") - - ("echo", "in batch mode, print query before execution") - - ("log-level", po::value(), "log level") - ("server_logs_file", po::value(), "put server logs into specified file") - - ("suggestion_limit", po::value()->default_value(10000), "Suggestion limit for how many databases, tables and columns to fetch.") - - ("format,f", po::value(), "default output format (and input format for clickhouse-local)") - ("output-format", po::value(), "default output format (this option has preference over --format)") - - ("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command") - ("highlight", po::value()->default_value(true), "enable or disable basic syntax highlight in interactive command line") - - ("ignore-error", "do not stop processing when an error occurs") - ("stacktrace", "print stack traces of exceptions") - ("hardware-utilization", "print hardware utilization information in progress bar") - ("print-profile-events", po::value(&profile_events.print)->zero_tokens(), "Printing ProfileEvents packets") - ("profile-events-delay-ms", po::value()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)") - ("processed-rows", "print the number of locally processed rows") - - ("interactive", "Process queries-file or --query query and start interactive mode") - ("pager", po::value(), "Pipe all output into this command (less or similar)") - ("max_memory_usage_in_client", po::value(), "Set memory limit in client/local server") - - ("fuzzer-args", po::value(), "Command line arguments for the LLVM's libFuzzer driver. Only relevant if the application is compiled with libFuzzer.") - - ("client_logs_file", po::value(), "Path to a file for writing client logs. Currently we only have fatal logs (when the client crashes)") - ; - - addOptions(options_description); - - OptionsDescription options_description_non_verbose = options_description; - - auto getter = [](const auto & op) - { - String op_long_name = op->long_name(); - return "--" + String(op_long_name); - }; - - if (options_description.main_description) - { - const auto & main_options = options_description.main_description->options(); - std::transform(main_options.begin(), main_options.end(), std::back_inserter(cmd_options), getter); - } - - if (options_description.external_description) - { - const auto & external_options = options_description.external_description->options(); - std::transform(external_options.begin(), external_options.end(), std::back_inserter(cmd_options), getter); - } - - po::variables_map options; - parseAndCheckOptions(options_description, options, common_arguments); - po::notify(options); - - if (options.count("version") || options.count("V")) - { - showClientVersion(); - exit(0); // NOLINT(concurrency-mt-unsafe) - } - - if (options.count("version-clean")) - { - output_stream << VERSION_STRING; - exit(0); // NOLINT(concurrency-mt-unsafe) - } - - if (options.count("verbose")) - getClientConfiguration().setBool("verbose", true); - - /// Output of help message. - if (options.count("help") - || (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help. - { - if (getClientConfiguration().getBool("verbose", false)) - printHelpMessage(options_description, true); - else - printHelpMessage(options_description_non_verbose, false); - exit(0); // NOLINT(concurrency-mt-unsafe) - } - - /// Common options for clickhouse-client and clickhouse-local. - - /// Output execution time to stderr in batch mode. - if (options.count("time")) - getClientConfiguration().setBool("print-time-to-stderr", true); - if (options.count("memory-usage")) - { - const auto & memory_usage_mode = options["memory-usage"].as(); - if (memory_usage_mode != "none" && memory_usage_mode != "default" && memory_usage_mode != "readable") - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown memory-usage mode: {}", memory_usage_mode); - getClientConfiguration().setString("print-memory-to-stderr", memory_usage_mode); - } - - if (options.count("query")) - queries = options["query"].as>(); - if (options.count("query_id")) - getClientConfiguration().setString("query_id", options["query_id"].as()); - if (options.count("database")) - getClientConfiguration().setString("database", options["database"].as()); - if (options.count("config-file")) - getClientConfiguration().setString("config-file", options["config-file"].as()); - if (options.count("queries-file")) - queries_files = options["queries-file"].as>(); - if (options.count("multiline")) - getClientConfiguration().setBool("multiline", true); - if (options.count("ignore-error")) - getClientConfiguration().setBool("ignore-error", true); - if (options.count("format")) - getClientConfiguration().setString("format", options["format"].as()); - if (options.count("output-format")) - getClientConfiguration().setString("output-format", options["output-format"].as()); - if (options.count("vertical")) - getClientConfiguration().setBool("vertical", true); - if (options.count("stacktrace")) - getClientConfiguration().setBool("stacktrace", true); - if (options.count("print-profile-events")) - getClientConfiguration().setBool("print-profile-events", true); - if (options.count("profile-events-delay-ms")) - getClientConfiguration().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as()); - /// Whether to print the number of processed rows at - if (options.count("processed-rows")) - getClientConfiguration().setBool("print-num-processed-rows", true); - if (options.count("progress")) - { - switch (options["progress"].as()) - { - case DEFAULT: - getClientConfiguration().setString("progress", "default"); - break; - case OFF: - getClientConfiguration().setString("progress", "off"); - break; - case TTY: - getClientConfiguration().setString("progress", "tty"); - break; - case ERR: - getClientConfiguration().setString("progress", "err"); - break; - } - } - if (options.count("echo")) - getClientConfiguration().setBool("echo", true); - if (options.count("disable_suggestion")) - getClientConfiguration().setBool("disable_suggestion", true); - if (options.count("wait_for_suggestions_to_load")) - getClientConfiguration().setBool("wait_for_suggestions_to_load", true); - if (options.count("suggestion_limit")) - getClientConfiguration().setInt("suggestion_limit", options["suggestion_limit"].as()); - if (options.count("highlight")) - getClientConfiguration().setBool("highlight", options["highlight"].as()); - if (options.count("history_file")) - getClientConfiguration().setString("history_file", options["history_file"].as()); - if (options.count("interactive")) - getClientConfiguration().setBool("interactive", true); - if (options.count("pager")) - getClientConfiguration().setString("pager", options["pager"].as()); - - if (options.count("log-level")) - Poco::Logger::root().setLevel(options["log-level"].as()); - if (options.count("server_logs_file")) - server_logs_file = options["server_logs_file"].as(); - - query_processing_stage = QueryProcessingStage::fromString(options["stage"].as()); - query_kind = parseQueryKind(options["query_kind"].as()); - profile_events.print = options.count("print-profile-events"); - profile_events.delay_ms = options["profile-events-delay-ms"].as(); - - processOptions(options_description, options, external_tables_arguments, hosts_and_ports_arguments); - { - std::unordered_set alias_names; - alias_names.reserve(options_description.main_description->options().size()); - for (const auto& option : options_description.main_description->options()) - alias_names.insert(option->long_name()); - argsToConfig(common_arguments, getClientConfiguration(), 100, &alias_names); - } - - clearPasswordFromCommandLine(argc, argv); - - /// Limit on total memory usage - std::string max_client_memory_usage = getClientConfiguration().getString("max_memory_usage_in_client", "0" /*default value*/); - if (max_client_memory_usage != "0") - { - UInt64 max_client_memory_usage_int = parseWithSizeSuffix(max_client_memory_usage.c_str(), max_client_memory_usage.length()); - - total_memory_tracker.setHardLimit(max_client_memory_usage_int); - total_memory_tracker.setDescription("(total)"); - total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking); - } - - /// Print stacktrace in case of crash - HandledSignals::instance().setupTerminateHandler(); - HandledSignals::instance().setupCommonDeadlySignalHandlers(); - /// We don't setup signal handlers for SIGINT, SIGQUIT, SIGTERM because we don't - /// have an option for client to shutdown gracefully. - - fatal_channel_ptr = new Poco::SplitterChannel; - fatal_console_channel_ptr = new Poco::ConsoleChannel; - fatal_channel_ptr->addChannel(fatal_console_channel_ptr); - if (options.count("client_logs_file")) - { - fatal_file_channel_ptr = new Poco::SimpleFileChannel(options["client_logs_file"].as()); - fatal_channel_ptr->addChannel(fatal_file_channel_ptr); - } - - fatal_log = createLogger("ClientBase", fatal_channel_ptr.get(), Poco::Message::PRIO_FATAL); - signal_listener = std::make_unique(nullptr, fatal_log); - signal_listener_thread.start(*signal_listener); - -#if USE_GWP_ASAN - GWPAsan::initFinished(); -#endif - -} - } diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 7689744a373..557ac30d27c 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -1,26 +1,30 @@ #pragma once -#include -#include "Common/NamePrompter.h" -#include -#include + +#include +#include #include +#include #include #include -#include #include #include -#include #include #include #include #include -#include -#include -#include -#include -#include +#include +#include #include +#include +#include + +#include + +#include +#include +#include +#include namespace po = boost::program_options; @@ -64,9 +68,16 @@ std::istream& operator>> (std::istream & in, ProgressOption & progress); class InternalTextLogs; class WriteBufferFromFileDescriptor; -class ClientBase : public Poco::Util::Application, public IHints<2> +/** + * The base class which encapsulates the core functionality of a client. + * Can be used in a standalone application (clickhouse-client or clickhouse-local), + * or be embedded into server. + * Always keep in mind that there can be several instances of this class within + * a process. Thus, it cannot keep its state in global shared variables or even use them. + * The best example - std::cin, std::cout and std::cerr. + */ +class ClientBase { - public: using Arguments = std::vector; @@ -79,12 +90,11 @@ public: std::ostream & output_stream_ = std::cout, std::ostream & error_stream_ = std::cerr ); + virtual ~ClientBase(); - ~ClientBase() override; + bool tryStopQuery() { return query_interrupt_handler.tryStop(); } + void stopQuery() { return query_interrupt_handler.stop(); } - void init(int argc, char ** argv); - - std::vector getAllRegisteredNames() const override { return cmd_options; } ASTPtr parseQuery(const char *& pos, const char * end, const Settings & settings, bool allow_multi_statements); protected: @@ -114,7 +124,7 @@ protected: ASTPtr parsed_query, std::optional echo_query_ = {}, bool report_error = false); static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth, uint32_t max_parser_backtracks); - static void setupSignalHandler(); + virtual void setupSignalHandler() = 0; bool executeMultiQuery(const String & all_queries_text); MultiQueryProcessingStage analyzeMultiQueryText( @@ -188,7 +198,6 @@ private: String prompt() const; void resetOutput(); - void parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments); void updateSuggest(const ASTPtr & ast); @@ -196,6 +205,31 @@ private: bool addMergeTreeSettings(ASTCreateQuery & ast_create); protected: + + class QueryInterruptHandler : private boost::noncopyable + { + public: + /// Store how much interrupt signals can be before stopping the query + /// by default stop after the first interrupt signal. + void start(Int32 signals_before_stop = 1) { exit_after_signals.store(signals_before_stop); } + + /// Set value not greater then 0 to mark the query as stopped. + void stop() { exit_after_signals.store(0); } + + /// Return true if the query was stopped. + /// Query was stopped if it received at least "signals_before_stop" interrupt signals. + bool tryStop() { return exit_after_signals.fetch_sub(1) <= 0; } + bool cancelled() { return exit_after_signals.load() <= 0; } + + /// Return how much interrupt signals remain before stop. + Int32 cancelled_status() { return exit_after_signals.load(); } + + private: + std::atomic exit_after_signals = 0; + }; + + QueryInterruptHandler query_interrupt_handler; + static bool isSyncInsertWithData(const ASTInsertQuery & insert_query, const ContextPtr & context); bool processMultiQueryFromFile(const String & file_name); @@ -239,7 +273,6 @@ protected: std::vector queries; /// Queries passed via '--query' std::vector queries_files; /// If not empty, queries will be read from these files std::vector interleave_queries_files; /// If not empty, run queries from these files before processing every file from 'queries_files'. - std::vector cmd_options; bool stdin_is_a_tty = false; /// stdin is a terminal. bool stdout_is_a_tty = false; /// stdout is a terminal. From 9186e647eb672283b8cb95d2fc152e0994f6df6f Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Jul 2024 17:05:09 +0000 Subject: [PATCH 1151/2361] Fix style --- src/Client/ClientBase.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 9cf3b955d26..56685f9d3f4 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -88,12 +88,6 @@ namespace fs = std::filesystem; using namespace std::literals; - -namespace CurrentMetrics -{ - extern const Metric MemoryTracking; -} - namespace DB { From af2c9fcaaf4e2f38c9db105c246ee24b095b256f Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Jul 2024 18:18:42 +0000 Subject: [PATCH 1152/2361] Skip file --- utils/check-style/check-style | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 380656cd1ca..3c959617d02 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -318,6 +318,7 @@ std_cerr_cout_excludes=( src/Interpreters/Context.cpp # IProcessor::dump() src/Processors/IProcessor.cpp + src/Client/ClientApplicationBase.cpp src/Client/ClientBase.cpp src/Client/LineReader.cpp src/Client/QueryFuzzer.cpp From 8ba85074e74f403b3d5106f6ef811019075cefb4 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 19 Jul 2024 12:25:17 +0000 Subject: [PATCH 1153/2361] Fix build --- src/Client/ClientApplicationBase.cpp | 16 +++++++++++++++- src/Client/ClientApplicationBase.h | 11 +++++++++++ src/Client/ClientBase.cpp | 14 +------------- src/Client/ClientBase.h | 10 ---------- 4 files changed, 27 insertions(+), 24 deletions(-) diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index 59c98983694..6b19898ef5c 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "config.h" @@ -51,7 +52,20 @@ void interruptSignalHandler(int signum) safeExit(128 + signum); } -ClientApplicationBase::~ClientApplicationBase() = default; +ClientApplicationBase::~ClientApplicationBase() +{ + try + { + writeSignalIDtoSignalPipe(SignalListener::StopThread); + signal_listener_thread.join(); + HandledSignals::instance().reset(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } +} + ClientApplicationBase::ClientApplicationBase() : ClientBase(STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO, std::cin, std::cout, std::cerr) {} ClientApplicationBase & ClientApplicationBase::getInstance() diff --git a/src/Client/ClientApplicationBase.h b/src/Client/ClientApplicationBase.h index 217fa29c3f4..771bb948cb7 100644 --- a/src/Client/ClientApplicationBase.h +++ b/src/Client/ClientApplicationBase.h @@ -6,6 +6,10 @@ #include #include #include +#include +#include +#include + #include #include @@ -48,6 +52,13 @@ private: void parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments); std::vector cmd_options; + + LoggerPtr fatal_log; + Poco::AutoPtr fatal_channel_ptr; + Poco::AutoPtr fatal_console_channel_ptr; + Poco::AutoPtr fatal_file_channel_ptr; + Poco::Thread signal_listener_thread; + std::unique_ptr signal_listener; }; diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 56685f9d3f4..85dfb767e75 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -258,19 +258,7 @@ public: }; -ClientBase::~ClientBase() -{ - try - { - writeSignalIDtoSignalPipe(SignalListener::StopThread); - signal_listener_thread.join(); - HandledSignals::instance().reset(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } -} +ClientBase::~ClientBase() = default; ClientBase::ClientBase( int in_fd_, diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 557ac30d27c..304d8c4b890 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -9,9 +9,6 @@ #include #include #include -#include -#include -#include #include #include #include @@ -253,13 +250,6 @@ protected: /// Client context is a context used only by the client to parse queries, process query parameters and to connect to clickhouse-server. ContextMutablePtr client_context; - LoggerPtr fatal_log; - Poco::AutoPtr fatal_channel_ptr; - Poco::AutoPtr fatal_console_channel_ptr; - Poco::AutoPtr fatal_file_channel_ptr; - Poco::Thread signal_listener_thread; - std::unique_ptr signal_listener; - bool is_interactive = false; /// Use either interactive line editing interface or batch mode. bool delayed_interactive = false; From 49dc30d5c28392d361ce0ef1e18f7db73841617f Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Wed, 24 Jul 2024 21:42:36 +0000 Subject: [PATCH 1154/2361] Small adjustement --- src/Client/ClientApplicationBase.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index 6b19898ef5c..0306468d084 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -48,8 +48,13 @@ static ClientInfo::QueryKind parseQueryKind(const String & query_kind) /// This signal handler is set only for SIGINT and SIGQUIT. void interruptSignalHandler(int signum) { - if (ClientApplicationBase::getInstance().tryStopQuery()) - safeExit(128 + signum); + /// Signal handler might be called even before the setup is fully finished + /// and client application started to process the query. + /// Because of that we have to manually check it. + if (auto * instance = ClientApplicationBase::instanceRawPtr(); instance) + if (auto * base = dynamic_cast(instance); base) + if (base->tryStopQuery()) + safeExit(128 + signum); } ClientApplicationBase::~ClientApplicationBase() From c7c1f10720cd194d85de6d81156cbd37304ab52b Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Wed, 24 Jul 2024 21:45:16 +0000 Subject: [PATCH 1155/2361] Added new method --- base/poco/Util/include/Poco/Util/Application.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/base/poco/Util/include/Poco/Util/Application.h b/base/poco/Util/include/Poco/Util/Application.h index c8d18e1bce9..d1a2021eb67 100644 --- a/base/poco/Util/include/Poco/Util/Application.h +++ b/base/poco/Util/include/Poco/Util/Application.h @@ -261,6 +261,11 @@ namespace Util /// /// Throws a NullPointerException if no Application instance exists. + static Application * instanceRawPtr(); + /// Returns a raw pointer to the Application sigleton. + /// + /// The caller should check whether the result is nullptr. + const Poco::Timestamp & startTime() const; /// Returns the application start time (UTC). @@ -448,6 +453,12 @@ namespace Util } + inline Application * Application::instanceRawPtr() + { + return _pInstance; + } + + inline const Poco::Timestamp & Application::startTime() const { return _startTime; From 6f068639db627944aaab978c79866ad5a2a234e7 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 25 Jul 2024 16:38:26 +0000 Subject: [PATCH 1156/2361] Better --- src/Client/ClientApplicationBase.h | 1 - src/Client/ClientBase.h | 5 +++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Client/ClientApplicationBase.h b/src/Client/ClientApplicationBase.h index 771bb948cb7..3663271dd25 100644 --- a/src/Client/ClientApplicationBase.h +++ b/src/Client/ClientApplicationBase.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 304d8c4b890..175ebe97075 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -2,6 +2,7 @@ #include +#include #include #include #include @@ -9,9 +10,13 @@ #include #include #include +#include +#include +#include #include #include #include + #include #include #include From a457db34b216ea987a4875a3cbeb5363878d5d5a Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 25 Jul 2024 16:58:41 +0000 Subject: [PATCH 1157/2361] Fixed a typo --- base/poco/Util/include/Poco/Util/Application.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/poco/Util/include/Poco/Util/Application.h b/base/poco/Util/include/Poco/Util/Application.h index d1a2021eb67..786e331fe73 100644 --- a/base/poco/Util/include/Poco/Util/Application.h +++ b/base/poco/Util/include/Poco/Util/Application.h @@ -262,7 +262,7 @@ namespace Util /// Throws a NullPointerException if no Application instance exists. static Application * instanceRawPtr(); - /// Returns a raw pointer to the Application sigleton. + /// Returns a raw pointer to the Application singleton. /// /// The caller should check whether the result is nullptr. From 2f255dc68d2cffb8bc17efc153a9e75a9166675d Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 26 Jul 2024 01:11:40 +0200 Subject: [PATCH 1158/2361] Fix clang-tidy --- src/Client/ClientBase.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 175ebe97075..1a23b6b1363 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -95,7 +95,7 @@ public: virtual ~ClientBase(); bool tryStopQuery() { return query_interrupt_handler.tryStop(); } - void stopQuery() { return query_interrupt_handler.stop(); } + void stopQuery() { query_interrupt_handler.stop(); } ASTPtr parseQuery(const char *& pos, const char * end, const Settings & settings, bool allow_multi_statements); From 9b4accebb3d19789e69b422ae2f235149e453f94 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 29 Jul 2024 15:47:52 +0000 Subject: [PATCH 1159/2361] Fix build --- src/Client/ClientApplicationBase.cpp | 162 ------------------------ src/Client/ClientBaseOptimizedParts.cpp | 4 +- 2 files changed, 2 insertions(+), 164 deletions(-) diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index 0306468d084..4aa8b6c0bbe 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -102,168 +102,6 @@ void ClientApplicationBase::setupSignalHandler() throw ErrnoException(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER, "Cannot set signal handler"); } - -namespace -{ - -/// Define transparent hash to we can use -/// std::string_view with the containers -struct TransparentStringHash -{ - using is_transparent = void; - size_t operator()(std::string_view txt) const - { - return std::hash{}(txt); - } -}; - -/* - * This functor is used to parse command line arguments and replace dashes with underscores, - * allowing options to be specified using either dashes or underscores. - */ -class OptionsAliasParser -{ -public: - explicit OptionsAliasParser(const boost::program_options::options_description& options) - { - options_names.reserve(options.options().size()); - for (const auto& option : options.options()) - options_names.insert(option->long_name()); - } - - /* - * Parses arguments by replacing dashes with underscores, and matches the resulting name with known options - * Implements boost::program_options::ext_parser logic - */ - std::pair operator()(const std::string & token) const - { - if (!token.starts_with("--")) - return {}; - std::string arg = token.substr(2); - - // divide token by '=' to separate key and value if options style=long_allow_adjacent - auto pos_eq = arg.find('='); - std::string key = arg.substr(0, pos_eq); - - if (options_names.contains(key)) - // option does not require any changes, because it is already correct - return {}; - - std::replace(key.begin(), key.end(), '-', '_'); - if (!options_names.contains(key)) - // after replacing '-' with '_' argument is still unknown - return {}; - - std::string value; - if (pos_eq != std::string::npos && pos_eq < arg.size()) - value = arg.substr(pos_eq + 1); - - return {key, value}; - } - -private: - std::unordered_set options_names; -}; - -} - -/// Enable optimizations even in debug builds because otherwise options parsing becomes extremely slow affecting .sh tests -#if defined(__clang__) -#pragma clang optimize on -#endif -void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments) -{ - if (allow_repeated_settings) - addProgramOptionsAsMultitokens(cmd_settings, options_description.main_description.value()); - else - addProgramOptions(cmd_settings, options_description.main_description.value()); - - if (allow_merge_tree_settings) - { - /// Add merge tree settings manually, because names of some settings - /// may clash. Query settings have higher priority and we just - /// skip ambiguous merge tree settings. - auto & main_options = options_description.main_description.value(); - - std::unordered_set> main_option_names; - for (const auto & option : main_options.options()) - main_option_names.insert(option->long_name()); - - for (const auto & setting : cmd_merge_tree_settings.all()) - { - const auto add_setting = [&](const std::string_view name) - { - if (auto it = main_option_names.find(name); it != main_option_names.end()) - return; - - if (allow_repeated_settings) - addProgramOptionAsMultitoken(cmd_merge_tree_settings, main_options, name, setting); - else - addProgramOption(cmd_merge_tree_settings, main_options, name, setting); - }; - - const auto & setting_name = setting.getName(); - - add_setting(setting_name); - - const auto & settings_to_aliases = MergeTreeSettings::Traits::settingsToAliases(); - if (auto it = settings_to_aliases.find(setting_name); it != settings_to_aliases.end()) - { - for (const auto alias : it->second) - { - add_setting(alias); - } - } - } - } - - /// Parse main commandline options. - auto parser = po::command_line_parser(arguments) - .options(options_description.main_description.value()) - .extra_parser(OptionsAliasParser(options_description.main_description.value())) - .allow_unregistered(); - po::parsed_options parsed = parser.run(); - - /// Check unrecognized options without positional options. - auto unrecognized_options = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::exclude_positional); - if (!unrecognized_options.empty()) - { - auto hints = this->getHints(unrecognized_options[0]); - if (!hints.empty()) - throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'. Maybe you meant {}", - unrecognized_options[0], toString(hints)); - - throw Exception(ErrorCodes::UNRECOGNIZED_ARGUMENTS, "Unrecognized option '{}'", unrecognized_options[0]); - } - - /// Check positional options. - for (const auto & op : parsed.options) - { - if (!op.unregistered && op.string_key.empty() && !op.original_tokens[0].starts_with("--") - && !op.original_tokens[0].empty() && !op.value.empty()) - { - /// Two special cases for better usability: - /// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1" - /// These are relevant for interactive usage - user-friendly, but questionable in general. - /// In case of ambiguity or for scripts, prefer using proper options. - - const auto & token = op.original_tokens[0]; - po::variable_value value(boost::any(op.value), false); - - const char * option; - if (token.contains(' ')) - option = "query"; - else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); - - if (!options.emplace(option, value).second) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); - } - } - - po::store(parsed, options); -} - void ClientApplicationBase::addMultiquery(std::string_view query, Arguments & common_arguments) const { common_arguments.emplace_back("--multiquery"); diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp index 421843a0e79..297b8e7ce51 100644 --- a/src/Client/ClientBaseOptimizedParts.cpp +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -1,4 +1,4 @@ -#include +#include #include namespace DB @@ -80,7 +80,7 @@ private: } -void ClientBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments) +void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments) { if (allow_repeated_settings) addProgramOptionsAsMultitokens(cmd_settings, options_description.main_description.value()); From 6f16ca02a74a81956b7524958c5f94eb7a2c7bf5 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 29 Jul 2024 16:14:31 +0000 Subject: [PATCH 1160/2361] Fixed Style Check --- src/Client/ClientApplicationBase.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index 4aa8b6c0bbe..9f133616d2e 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -31,7 +31,6 @@ namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int CANNOT_SET_SIGNAL_HANDLER; - extern const int UNRECOGNIZED_ARGUMENTS; } static ClientInfo::QueryKind parseQueryKind(const String & query_kind) From f9a5210bacc418e354ddcf8893fa8c5a291b46d4 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Mon, 29 Jul 2024 19:36:31 +0200 Subject: [PATCH 1161/2361] solve Alexey's review --- src/Storages/VirtualColumnUtils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 87c1aecc3a7..257a77547c0 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -127,7 +127,7 @@ NameSet getVirtualNamesForFileLikeStorage() std::unordered_map parseHivePartitioningKeysAndValues(const String & path, const ColumnsDescription & storage_columns) { - std::string pattern = "([^/]+)=([^/]+)/"; + std::string pattern = "([^/])=([^/]+)/"; re2::StringPiece input_piece(path); std::unordered_map key_values; From 75728ac56d83b85e476162a745686837cb194b73 Mon Sep 17 00:00:00 2001 From: Halersson Paris <142428374+halersson@users.noreply.github.com> Date: Mon, 29 Jul 2024 14:42:58 -0300 Subject: [PATCH 1162/2361] Fix typo --- src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index e837d4d5e20..bc5e8292192 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -420,7 +420,7 @@ void ParquetBlockInputFormat::initializeIfNeeded() int num_row_groups = metadata->num_row_groups(); row_group_batches.reserve(num_row_groups); - auto adative_chunk_size = [&](int row_group_idx) -> size_t + auto adaptive_chunk_size = [&](int row_group_idx) -> size_t { size_t total_size = 0; auto row_group_meta = metadata->RowGroup(row_group_idx); @@ -457,7 +457,7 @@ void ParquetBlockInputFormat::initializeIfNeeded() row_group_batches.back().row_groups_idxs.push_back(row_group); row_group_batches.back().total_rows += metadata->RowGroup(row_group)->num_rows(); row_group_batches.back().total_bytes_compressed += metadata->RowGroup(row_group)->total_compressed_size(); - auto rows = adative_chunk_size(row_group); + auto rows = adaptive_chunk_size(row_group); row_group_batches.back().adaptive_chunk_size = rows ? rows : format_settings.parquet.max_block_size; } } From 06b3185e57953c43531e4281f22bc03a8cd424e7 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Mon, 29 Jul 2024 20:05:29 +0200 Subject: [PATCH 1163/2361] fixes --- src/Interpreters/InterpreterCreateQuery.cpp | 14 +- .../test_restore_external_engines/test.py | 133 ++++++++++++++---- 2 files changed, 111 insertions(+), 36 deletions(-) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index a5eb3a83365..6e689c59c09 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1031,13 +1031,6 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const /// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one. setDefaultTableEngine(*to_engine, getContext()->getSettingsRef().default_table_engine.value); } - /// For external tables with restore_replace_external_engine_to_null setting we replace external engines to - /// Null table engine. - else if (getContext()->getSettingsRef().restore_replace_external_engines_to_null) - { - if (StorageFactory::instance().getStorageFeatures(create.storage->engine->name).source_access_type != AccessType::NONE) - setNullTableEngine(*create.storage); - } return; } } @@ -1050,6 +1043,13 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const /// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one. setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); } + /// For external tables with restore_replace_external_engine_to_null setting we replace external engines to + /// Null table engine. + else if (getContext()->getSettingsRef().restore_replace_external_engines_to_null) + { + if (StorageFactory::instance().getStorageFeatures(create.storage->engine->name).source_access_type != AccessType::NONE) + setNullTableEngine(*create.storage); + } return; } diff --git a/tests/integration/test_restore_external_engines/test.py b/tests/integration/test_restore_external_engines/test.py index eb88da6b61f..cf189f2a6ed 100644 --- a/tests/integration/test_restore_external_engines/test.py +++ b/tests/integration/test_restore_external_engines/test.py @@ -7,52 +7,75 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) configs = ["configs/remote_servers.xml", "configs/backups_disk.xml"] -node1 = cluster.add_instance("replica1", with_zookeeper=True, with_mysql8=True, main_configs=configs, external_dirs=["/backups/"]) -node2 = cluster.add_instance("replica2", with_zookeeper=True, with_mysql8=True, main_configs=configs, external_dirs=["/backups/"]) -node3 = cluster.add_instance("replica3", with_zookeeper=True, with_mysql8=True, main_configs=configs, external_dirs=["/backups/"]) +node1 = cluster.add_instance( + "replica1", + with_zookeeper=True, + with_mysql8=True, + main_configs=configs, + external_dirs=["/backups/"], +) +node2 = cluster.add_instance( + "replica2", + with_zookeeper=True, + with_mysql8=True, + main_configs=configs, + external_dirs=["/backups/"], +) +node3 = cluster.add_instance( + "replica3", + with_zookeeper=True, + with_mysql8=True, + main_configs=configs, + external_dirs=["/backups/"], +) nodes = [node1, node2, node3] backup_id_counter = 0 + def new_backup_name(): global backup_id_counter backup_id_counter += 1 return f"Disk('backups', '{backup_id_counter}/')" + def cleanup_nodes(nodes, dbname): for node in nodes: node.query(f"DROP DATABASE IF EXISTS {dbname} SYNC") + def fill_nodes(nodes, dbname): cleanup_nodes(nodes, dbname) for node in nodes: - node.query(f"CREATE DATABASE {dbname} ENGINE = Replicated('/clickhouse/databases/{dbname}', 'default', '{node.name}')") + node.query( + f"CREATE DATABASE {dbname} ENGINE = Replicated('/clickhouse/databases/{dbname}', 'default', '{node.name}')" + ) + def drop_mysql_table(conn, tableName): with conn.cursor() as cursor: cursor.execute(f"DROP TABLE IF EXISTS `clickhouse`.`{tableName}`") + def get_mysql_conn(cluster): conn = pymysql.connect( - user="root", password="clickhouse", host=cluster.mysql8_ip, port=cluster.mysql8_port + user="root", + password="clickhouse", + host=cluster.mysql8_ip, + port=cluster.mysql8_port, ) return conn + def fill_tables(cluster, dbname): fill_nodes(nodes, dbname) conn = get_mysql_conn(cluster) with conn.cursor() as cursor: - cursor.execute( - "DROP DATABASE IF EXISTS clickhouse" - ) - cursor.execute( - "CREATE DATABASE clickhouse" - ) - cursor.execute( - "DROP TABLE IF EXISTS clickhouse.inference_table" - ) + cursor.execute("DROP DATABASE IF EXISTS clickhouse") + cursor.execute("CREATE DATABASE clickhouse") + cursor.execute("DROP TABLE IF EXISTS clickhouse.inference_table") cursor.execute( "CREATE TABLE clickhouse.inference_table (id INT PRIMARY KEY, data BINARY(16) NOT NULL)" ) @@ -66,17 +89,30 @@ def fill_tables(cluster, dbname): node1.query( f"CREATE TABLE {dbname}.mysql_schema_inference_engine ENGINE=MySQL({parameters})" ) - node1.query(f"CREATE TABLE {dbname}.mysql_schema_inference_function AS mysql({parameters})") + node1.query( + f"CREATE TABLE {dbname}.mysql_schema_inference_function AS mysql({parameters})" + ) node1.query(f"CREATE TABLE {dbname}.merge_tree (id UInt64, b String) ORDER BY id") node1.query(f"INSERT INTO {dbname}.merge_tree VALUES (100, 'abc')") expected = "id\tInt32\t\t\t\t\t\ndata\tFixedString(16)\t\t\t\t\t\n" - assert node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_engine") == expected - assert node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_function") == expected + assert ( + node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_engine") + == expected + ) + assert ( + node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_function") + == expected + ) assert node1.query(f"SELECT id FROM mysql({parameters})") == "100\n" - assert node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_engine") == "100\n" - assert node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_function") == "100\n" + assert ( + node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_engine") == "100\n" + ) + assert ( + node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_function") + == "100\n" + ) assert node1.query(f"SELECT id FROM {dbname}.merge_tree") == "100\n" @@ -92,6 +128,7 @@ def start_cluster(): finally: cluster.shutdown() + def test_restore_table(start_cluster): fill_tables(cluster, "replicated") backup_name = new_backup_name() @@ -107,12 +144,26 @@ def test_restore_table(start_cluster): assert node3.query("EXISTS replicated.mysql_schema_inference_engine") == "0\n" assert node3.query("EXISTS replicated.mysql_schema_inference_function") == "0\n" - node3.query(f"RESTORE DATABASE replicated FROM {backup_name} SETTINGS allow_different_database_def=true") + node3.query( + f"RESTORE DATABASE replicated FROM {backup_name} SETTINGS allow_different_database_def=true" + ) node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated") - assert node1.query("SELECT count(), sum(id) FROM replicated.mysql_schema_inference_engine") == "1\t100\n" - assert node1.query("SELECT count(), sum(id) FROM replicated.mysql_schema_inference_function") == "1\t100\n" - assert node1.query("SELECT count(), sum(id) FROM replicated.merge_tree") == "1\t100\n" + assert ( + node1.query( + "SELECT count(), sum(id) FROM replicated.mysql_schema_inference_engine" + ) + == "1\t100\n" + ) + assert ( + node1.query( + "SELECT count(), sum(id) FROM replicated.mysql_schema_inference_function" + ) + == "1\t100\n" + ) + assert ( + node1.query("SELECT count(), sum(id) FROM replicated.merge_tree") == "1\t100\n" + ) cleanup_nodes(nodes, "replicated") @@ -132,12 +183,36 @@ def test_restore_table_null(start_cluster): assert node3.query("EXISTS replicated2.mysql_schema_inference_engine") == "0\n" assert node3.query("EXISTS replicated2.mysql_schema_inference_function") == "0\n" - node3.query(f"RESTORE DATABASE replicated2 FROM {backup_name} SETTINGS allow_different_database_def=1, allow_different_table_def=1 SETTINGS restore_replace_external_engines_to_null=1, restore_replace_external_table_functions_to_null=1") + node3.query( + f"RESTORE DATABASE replicated2 FROM {backup_name} SETTINGS allow_different_database_def=1, allow_different_table_def=1 SETTINGS restore_replace_external_engines_to_null=1, restore_replace_external_table_functions_to_null=1" + ) node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated2") - assert node1.query("SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_engine") == "0\t0\n" - assert node1.query("SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_function") == "0\t0\n" - assert node1.query("SELECT count(), sum(id) FROM replicated2.merge_tree") == "1\t100\n" - assert node1.query("SELECT engine FROM system.tables where database = 'replicated2' and name like '%mysql%'") == "Null\nNull\n" - assert node1.query("SELECT engine FROM system.tables where database = 'replicated2' and name like '%merge_tree%'") == "MergeTree\n" + assert ( + node1.query( + "SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_engine" + ) + == "0\t0\n" + ) + assert ( + node1.query( + "SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_function" + ) + == "0\t0\n" + ) + assert ( + node1.query("SELECT count(), sum(id) FROM replicated2.merge_tree") == "1\t100\n" + ) + assert ( + node1.query( + "SELECT engine FROM system.tables where database = 'replicated2' and name like '%mysql%'" + ) + == "Null\nNull\n" + ) + assert ( + node1.query( + "SELECT engine FROM system.tables where database = 'replicated2' and name like '%merge_tree%'" + ) + == "MergeTree\n" + ) cleanup_nodes(nodes, "replicated2") From cb056cf3a5080cbff61f6efd070733ae2061d5b8 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Mon, 29 Jul 2024 12:05:44 -0700 Subject: [PATCH 1164/2361] Add camelCase aliases for percentRank() and denseRank() for percent_rank() and dense_rank() --- src/Processors/Transforms/WindowTransform.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 2b255c5120e..a1b46c8e36c 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -2721,20 +2721,24 @@ void registerWindowFunctions(AggregateFunctionFactory & factory) parameters); }, properties}, AggregateFunctionFactory::Case::Insensitive); - factory.registerFunction("dense_rank", {[](const std::string & name, + factory.registerFunction("denseRank", {[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *) { return std::make_shared(name, argument_types, parameters); }, properties}, AggregateFunctionFactory::Case::Insensitive); - factory.registerFunction("percent_rank", {[](const std::string & name, + factory.registerAlias("dense_rank", "denseRank", AggregateFunctionFactory::Case::Sensitive); + + factory.registerFunction("percentRank", {[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *) { return std::make_shared(name, argument_types, parameters); }, properties}, AggregateFunctionFactory::Case::Insensitive); + factory.registerAlias("percent_rank", "percentRank", AggregateFunctionFactory::Case::Sensitive); + factory.registerFunction("row_number", {[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *) { From 9811a2e71b825a55c376edfb38303c817493cd9e Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sun, 28 Jul 2024 17:32:32 -0700 Subject: [PATCH 1165/2361] Add test 03213_denseRank_percentRank_alias --- ...3213_denseRank_percentRank_alias.reference | 45 ++++++++++++++ .../03213_denseRank_percentRank_alias.sql | 59 +++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 tests/queries/0_stateless/03213_denseRank_percentRank_alias.reference create mode 100644 tests/queries/0_stateless/03213_denseRank_percentRank_alias.sql diff --git a/tests/queries/0_stateless/03213_denseRank_percentRank_alias.reference b/tests/queries/0_stateless/03213_denseRank_percentRank_alias.reference new file mode 100644 index 00000000000..b49e179df68 --- /dev/null +++ b/tests/queries/0_stateless/03213_denseRank_percentRank_alias.reference @@ -0,0 +1,45 @@ +---- denseRank() ---- +0 0 0 1 1 1 1 +3 0 0 2 2 2 2 +1 0 1 3 3 3 3 +4 0 1 4 4 4 4 +2 0 2 5 5 5 5 +6 1 0 1 1 1 1 +9 1 0 2 2 2 2 +7 1 1 3 3 3 3 +5 1 2 4 4 4 4 +8 1 2 5 5 5 5 +12 2 0 1 1 1 1 +10 2 1 2 2 2 2 +13 2 1 3 3 3 3 +11 2 2 4 4 4 4 +14 2 2 5 5 5 5 +15 3 0 1 1 1 1 +18 3 0 2 2 2 2 +16 3 1 3 3 3 3 +19 3 1 4 4 4 4 +17 3 2 5 5 5 5 +21 4 0 1 1 1 1 +24 4 0 2 2 2 2 +22 4 1 3 3 3 3 +20 4 2 4 4 4 4 +23 4 2 5 5 5 5 +27 5 0 1 1 1 1 +25 5 1 2 2 2 2 +28 5 1 3 3 3 3 +26 5 2 4 4 4 4 +29 5 2 5 5 5 5 +30 6 0 1 1 1 1 +---- percentRank() ---- +Lenovo Thinkpad Laptop 700 1 0 +Sony VAIO Laptop 700 1 0 +Dell Vostro Laptop 800 3 0.6666666666666666 +HP Elite Laptop 1200 4 1 +Microsoft Lumia Smartphone 200 1 0 +HTC One Smartphone 400 2 0.3333333333333333 +Nexus Smartphone 500 3 0.6666666666666666 +iPhone Smartphone 900 4 1 +Kindle Fire Tablet 150 1 0 +Samsung Galaxy Tab Tablet 200 2 0.5 +iPad Tablet 700 3 1 +Others Unknow 200 1 0 diff --git a/tests/queries/0_stateless/03213_denseRank_percentRank_alias.sql b/tests/queries/0_stateless/03213_denseRank_percentRank_alias.sql new file mode 100644 index 00000000000..ff841294eb1 --- /dev/null +++ b/tests/queries/0_stateless/03213_denseRank_percentRank_alias.sql @@ -0,0 +1,59 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/67042 +-- Reference generated using percent_rank() and dense_rank() + +-- From ClickHouse/tests/queries/0_stateless/01591_window_functions.sql (for deterministic query) +SELECT '---- denseRank() ----'; +select number, p, o, + count(*) over w, + rank() over w, + denseRank() over w, + row_number() over w +from (select number, intDiv(number, 5) p, mod(number, 3) o + from numbers(31) order by o, number) t +window w as (partition by p order by o, number) +order by p, o, number +settings max_block_size = 2; + +-- Modifed from ClickHouse/tests/queries/0_stateless/01592_window_functions.sql (for deterministic query) +SELECT '---- percentRank() ----'; + +drop table if exists product_groups; +drop table if exists products; + +CREATE TABLE product_groups ( + group_id Int64, + group_name String +) Engine = Memory; + +CREATE TABLE products ( + product_id Int64, + product_name String, + price DECIMAL(11, 2), + group_id Int64 +) Engine = Memory; + +INSERT INTO product_groups VALUES (1, 'Smartphone'),(2, 'Laptop'),(3, 'Tablet'); +INSERT INTO products (product_id,product_name, group_id,price) VALUES (1, 'Microsoft Lumia', 1, 200), (2, 'HTC One', 1, 400), (3, 'Nexus', 1, 500), (4, 'iPhone', 1, 900),(5, 'HP Elite', 2, 1200),(6, 'Lenovo Thinkpad', 2, 700),(7, 'Sony VAIO', 2, 700),(8, 'Dell Vostro', 2, 800),(9, 'iPad', 3, 700),(10, 'Kindle Fire', 3, 150),(11, 'Samsung Galaxy Tab', 3, 200); +INSERT INTO product_groups VALUES (4, 'Unknow'); +INSERT INTO products (product_id,product_name, group_id,price) VALUES (12, 'Others', 4, 200); + + +SELECT * +FROM +( + SELECT + product_name, + group_name, + price, + rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS rank, + percentRank() OVER (PARTITION BY group_name ORDER BY price ASC) AS percent + FROM products + INNER JOIN product_groups USING (group_id) +) AS t +ORDER BY + group_name ASC, + price ASC, + product_name ASC; + +drop table product_groups; +drop table products; From fea03cf46ff29aa398b08d86ae77361fe85d7c40 Mon Sep 17 00:00:00 2001 From: Max K Date: Mon, 29 Jul 2024 21:07:24 +0200 Subject: [PATCH 1166/2361] Build results fix --- tests/ci/ci.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index e30062c32ff..935fe472e50 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -766,7 +766,9 @@ def _upload_build_artifacts( int(job_report.duration), GITHUB_JOB_API_URL(), head_ref=pr_info.head_ref, - pr_number=pr_info.number, + # PRInfo fetches pr number for release branches as well - set pr_number to 0 for release + # so that build results are not mistakenly treated as feature branch builds + pr_number=pr_info.number if pr_info.is_pr else 0, ) report_url = ci_cache.upload_build_report(build_result) print(f"Report file has been uploaded to [{report_url}]") From 3df2d88cf13ad552058a6958630741d7cdab9d3c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 29 Jul 2024 21:09:11 +0200 Subject: [PATCH 1167/2361] Update CHANGELOG.md --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07b37835dda..620b7c99bac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,7 +45,6 @@ * Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)). #### Performance Improvement -* Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)). * Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)). * Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)). * Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)). From e517338182b79aaa70c6b3e2d15c499acccc4d88 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 29 Jul 2024 21:16:19 +0200 Subject: [PATCH 1168/2361] Update tests --- tests/queries/0_stateless/replication.lib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index dcac721859e..36309cf0331 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -89,7 +89,7 @@ function check_replication_consistency() # Touch all data to check that it's readable (and trigger PartCheckThread if needed) # it's important to disable prefer warmed unmerged parts because # otherwise it can read non-syncrhonized state of replicas - while ! $CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do + while ! $CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 --max_result_rows 0 --max_result_bytes 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do sleep 1; num_tries=$((num_tries+1)) if [ $num_tries -eq 250 ]; then From 756bde1158c4b3e6e65d324436291d53b9e25fbb Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 29 Jul 2024 20:27:15 +0100 Subject: [PATCH 1169/2361] rm file --- tests/performance/views_max_insert_threads.xml | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 tests/performance/views_max_insert_threads.xml diff --git a/tests/performance/views_max_insert_threads.xml b/tests/performance/views_max_insert_threads.xml deleted file mode 100644 index 473bcd02ab8..00000000000 --- a/tests/performance/views_max_insert_threads.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - From 2308a362a0aca716e2e50d3eb5283bdfd575e023 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 29 Jul 2024 19:31:46 +0000 Subject: [PATCH 1170/2361] Disable 02932_refreshable_materialized_views --- .../02932_refreshable_materialized_views.sh | 304 +----------------- 1 file changed, 1 insertion(+), 303 deletions(-) diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh index 9081035579d..6df3c391ddb 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.sh @@ -1,306 +1,4 @@ #!/usr/bin/env bash # Tags: atomic-database -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -# Set session timezone to UTC to make all DateTime formatting and parsing use UTC, because refresh -# scheduling is done in UTC. -CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" -CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" - -$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" - - -# Basic refreshing. -$CLICKHOUSE_CLIENT -nq " - create materialized view a - refresh after 2 second - engine Memory - empty - as select number as x from numbers(2) union all select rand64() as x" -$CLICKHOUSE_CLIENT -nq "select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes"; -$CLICKHOUSE_CLIENT -nq "show create a" -# Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] -do - sleep 0.1 -done -start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" -# Check table contents. -$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" -# Wait for table contents to change. -res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" -while : -do - res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" - [ "$res2" == "$res1" ] || break - sleep 0.1 -done -# Wait for another change. -while : -do - res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" - [ "$res3" == "$res2" ] || break - sleep 0.1 -done -# Check that the two changes were at least 1 second apart, in particular that we're not refreshing -# like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer -# to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. -$CLICKHOUSE_CLIENT -nq " - select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000); - select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" - -# Create a source table from which views will read. -$CLICKHOUSE_CLIENT -nq " - create table src (x Int8) engine Memory as select 1" - -# Switch to fake clock, change refresh schedule, change query. -$CLICKHOUSE_CLIENT -nq " - system test view a set fake time '2050-01-01 00:00:01';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2050-01-01 00:00:01 2050-01-01 00:00:03' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - alter table a modify refresh every 2 year; - alter table a modify query select x*2 as x from src; - select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; - show create a;" -# Advance time to trigger the refresh. -$CLICKHOUSE_CLIENT -nq " - select '<5: no refresh>', count() from a; - system test view a set fake time '2052-02-03 04:05:06';" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<6: refreshed>', * from a; - select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" - -# Create a dependent view, refresh it once. -$CLICKHOUSE_CLIENT -nq " - create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; - show create b; - system test view b set fake time '2052-11-11 11:11:11'; - system refresh view b;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2052-11-11 11:11:11' ] -do - sleep 0.1 -done -# Next refresh shouldn't start until the dependency refreshes. -$CLICKHOUSE_CLIENT -nq " - select '<8: refreshed>', * from b; - select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; - system test view b set fake time '2054-01-24 23:22:21';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] -do - sleep 0.1 -done -# Update source table (by dropping and re-creating it - to test that tables are looked up by name -# rather than uuid), kick off refresh of the dependency. -$CLICKHOUSE_CLIENT -nq " - select '<10: waiting>', view, status, remaining_dependencies, next_refresh_time from refreshes; - drop table src; - create table src (x Int16) engine Memory as select 2; - system test view a set fake time '2054-01-01 00:00:01';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] -do - sleep 0.1 -done -# Both tables should've refreshed. -$CLICKHOUSE_CLIENT -nq " - select '<11: chain-refreshed a>', * from a; - select '<12: chain-refreshed b>', * from b; - select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception from refreshes;" - -# Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to -# catch up to the same cycle. -$CLICKHOUSE_CLIENT -nq " - system test view b set fake time '2059-01-01 00:00:00'; - system refresh view b;" -while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - system test view b set fake time '2061-01-01 00:00:00'; - system test view a set fake time '2057-01-01 00:00:00';" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] -do - sleep 0.1 -done - -$CLICKHOUSE_CLIENT -nq " - select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; - truncate src; - insert into src values (3); - system test view a set fake time '2060-02-02 02:02:02';" -while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<15: chain-refreshed a>', * from a; - select '<16: chain-refreshed b>', * from b; - select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" - -# Get to WaitingForDependencies state and remove the depencency. -$CLICKHOUSE_CLIENT -nq " - system test view b set fake time '2062-03-03 03:03:03'" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - alter table b modify refresh every 2 year" -while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; - show create b;" - -# Select from a table that doesn't exist, get an exception. -$CLICKHOUSE_CLIENT -nq " - drop table a; - drop table b; - create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; - drop table src;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Exception' ] -do - sleep 0.1 -done -# Check exception, create src, expect successful refresh. -$CLICKHOUSE_CLIENT -nq " - select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c'; - create table src (x Int64) engine Memory as select 1; - system refresh view c;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] -do - sleep 0.1 -done -# Rename table. -$CLICKHOUSE_CLIENT -nq " - select '<20: unexception>', * from c; - rename table c to d; - select '<21: rename>', * from d; - select '<22: rename>', view, last_refresh_result from refreshes;" - -# Do various things during a refresh. -# First make a nonempty view. -$CLICKHOUSE_CLIENT -nq " - drop table d; - truncate src; - insert into src values (1) - create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] -do - sleep 0.1 -done -# Stop refreshes. -$CLICKHOUSE_CLIENT -nq " - select '<23: simple refresh>', * from e; - system stop view e;" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] -do - sleep 0.1 -done -# Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure -# we wait for a slow refresh, not a previous fast one.) -$CLICKHOUSE_CLIENT -nq " - insert into src select * from numbers(1000) settings max_block_size=1; - system start view e;" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] -do - sleep 0.1 -done -# Rename. -$CLICKHOUSE_CLIENT -nq " - rename table e to f; - select '<24: rename during refresh>', * from f; - select '<25: rename during refresh>', view, status from refreshes where view = 'f'; - alter table f modify refresh after 10 year;" - -# Cancel. -$CLICKHOUSE_CLIENT -nq " - system cancel view f;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Cancelled' ] -do - sleep 0.1 -done - -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" = 'Running' ] -do - sleep 0.1 -done - -# Check that another refresh doesn't immediately start after the cancelled one. -$CLICKHOUSE_CLIENT -nq " - select '<27: cancelled>', view, status from refreshes where view = 'f'; - system refresh view f;" -while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] -do - sleep 0.1 -done -# Drop. -$CLICKHOUSE_CLIENT -nq " - drop table f; - select '<28: drop during refresh>', view, status from refreshes;" - -# Try OFFSET and RANDOMIZE FOR. -$CLICKHOUSE_CLIENT -nq " - create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42; - show create g; - system test view g set fake time '2050-02-03 15:30:13';" -while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - with '2050-02-10 04:00:00'::DateTime as expected - select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;" - -# Send data 'TO' an existing table. -$CLICKHOUSE_CLIENT -nq " - drop table g; - create table dest (x Int64) engine MergeTree order by x; - truncate src; - insert into src values (1); - create materialized view h refresh every 1 second to dest empty as select x*10 as x from src; - show create h;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<30: to existing table>', * from dest; - insert into src values (2);" -while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<31: to existing table>', * from dest; - drop table dest; - drop table src; - drop table h;" - -# EMPTY -$CLICKHOUSE_CLIENT -nq " - create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2); - create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2)" -while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] -do - sleep 0.1 -done -$CLICKHOUSE_CLIENT -nq " - select '<32: empty>', view, status, last_refresh_result from refreshes order by view; - drop table i; - drop table j" - -$CLICKHOUSE_CLIENT -nq " - drop table refreshes;" +# TODO: Re-add this test in https://github.com/ClickHouse/ClickHouse/pull/58934 From f3d5859c8bd5d1fb43f0d636d6cff3062e4ca267 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 29 Jul 2024 19:35:37 +0000 Subject: [PATCH 1171/2361] Fix new test --- ...dynamic_read_subcolumns_small.reference.j2 | 192 +++++++++--------- ...03036_dynamic_read_subcolumns_small.sql.j2 | 16 +- 2 files changed, 104 insertions(+), 104 deletions(-) diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 index be3f4e53990..d6add681f51 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 @@ -25,26 +25,26 @@ UInt64 7 7 \N [] 8 8 \N [] 9 9 \N [] -str_10 \N str_10 [] [[0]] \N \N [] -str_11 \N str_11 [] +str_10 \N str_10 [] [[0,1]] \N \N [] -str_12 \N str_12 [] +str_11 \N str_11 [] [[0,1,2]] \N \N [] -str_13 \N str_13 [] +str_12 \N str_12 [] [[0,1,2,3]] \N \N [] -str_14 \N str_14 [] +str_13 \N str_13 [] [[0,1,2,3,4]] \N \N [] -str_15 \N str_15 [] +str_14 \N str_14 [] [[0,1,2,3,4,5]] \N \N [] -str_16 \N str_16 [] +str_15 \N str_15 [] [[0,1,2,3,4,5,6]] \N \N [] -str_17 \N str_17 [] +str_16 \N str_16 [] [[0,1,2,3,4,5,6,7]] \N \N [] -str_18 \N str_18 [] +str_17 \N str_17 [] [[0,1,2,3,4,5,6,7,8]] \N \N [] -str_19 \N str_19 [] +str_18 \N str_18 [] [[0,1,2,3,4,5,6,7,8,9]] \N \N [] +str_19 \N str_19 [] [20] \N \N [20] ['str_21','str_21'] \N \N ['str_21','str_21'] [22,22,22] \N \N [22,22,22] @@ -115,6 +115,7 @@ str_79 \N str_79 [] 7 \N [] 8 \N [] 9 \N [] +\N \N [] \N str_10 [] \N \N [] \N str_11 [] @@ -134,7 +135,6 @@ str_79 \N str_79 [] \N str_18 [] \N \N [] \N str_19 [] -\N \N [] \N \N [20] \N \N ['str_21','str_21'] \N \N [22,22,22] @@ -295,26 +295,26 @@ str_79 \N str_79 [] 7 7 \N [] 0 [] 8 8 \N [] 0 [] 9 9 \N [] 0 [] -str_10 \N \N [] 0 [] [[0]] \N \N [] 0 [] -str_11 \N \N [] 0 [] +str_10 \N \N [] 0 [] [[0,1]] \N \N [] 0 [] -str_12 \N \N [] 0 [] +str_11 \N \N [] 0 [] [[0,1,2]] \N \N [] 0 [] -str_13 \N \N [] 0 [] +str_12 \N \N [] 0 [] [[0,1,2,3]] \N \N [] 0 [] -str_14 \N \N [] 0 [] +str_13 \N \N [] 0 [] [[0,1,2,3,4]] \N \N [] 0 [] -str_15 \N \N [] 0 [] +str_14 \N \N [] 0 [] [[0,1,2,3,4,5]] \N \N [] 0 [] -str_16 \N \N [] 0 [] +str_15 \N \N [] 0 [] [[0,1,2,3,4,5,6]] \N \N [] 0 [] -str_17 \N \N [] 0 [] +str_16 \N \N [] 0 [] [[0,1,2,3,4,5,6,7]] \N \N [] 0 [] -str_18 \N \N [] 0 [] +str_17 \N \N [] 0 [] [[0,1,2,3,4,5,6,7,8]] \N \N [] 0 [] -str_19 \N \N [] 0 [] +str_18 \N \N [] 0 [] [[0,1,2,3,4,5,6,7,8,9]] \N \N [] 0 [] +str_19 \N \N [] 0 [] [20] \N \N [20] 1 [20] ['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] [22,22,22] \N \N [22,22,22] 3 [22,22,22] @@ -475,26 +475,26 @@ str_79 \N \N [] 0 [] 7 0 [] [] 8 0 [] [] 9 0 [] [] -str_10 0 [] [] [[0]] 0 [] [] -str_11 0 [] [] +str_10 0 [] [] [[0,1]] 0 [] [] -str_12 0 [] [] +str_11 0 [] [] [[0,1,2]] 0 [] [] -str_13 0 [] [] +str_12 0 [] [] [[0,1,2,3]] 0 [] [] -str_14 0 [] [] +str_13 0 [] [] [[0,1,2,3,4]] 0 [] [] -str_15 0 [] [] +str_14 0 [] [] [[0,1,2,3,4,5]] 0 [] [] -str_16 0 [] [] +str_15 0 [] [] [[0,1,2,3,4,5,6]] 0 [] [] -str_17 0 [] [] +str_16 0 [] [] [[0,1,2,3,4,5,6,7]] 0 [] [] -str_18 0 [] [] +str_17 0 [] [] [[0,1,2,3,4,5,6,7,8]] 0 [] [] -str_19 0 [] [] +str_18 0 [] [] [[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] [20] 0 [] [20] ['str_21','str_21'] 0 [] [NULL,NULL] [22,22,22] 0 [] [22,22,22] @@ -655,7 +655,6 @@ str_79 0 [] [] [] [] [] [] [] [] [] [] [] -[] [] [] [1] [[0]] [[[]]] [] [] [] [2] [[0,1]] [[[],[]]] @@ -735,6 +734,7 @@ str_79 0 [] [] [] [] [] [] [] [] [] [] [] +[] [] [] Array(Array(Dynamic)) Array(Variant(String, UInt64)) None @@ -762,26 +762,26 @@ UInt64 7 7 \N [] 8 8 \N [] 9 9 \N [] -str_10 \N str_10 [] [[0]] \N \N [] -str_11 \N str_11 [] +str_10 \N str_10 [] [[0,1]] \N \N [] -str_12 \N str_12 [] +str_11 \N str_11 [] [[0,1,2]] \N \N [] -str_13 \N str_13 [] +str_12 \N str_12 [] [[0,1,2,3]] \N \N [] -str_14 \N str_14 [] +str_13 \N str_13 [] [[0,1,2,3,4]] \N \N [] -str_15 \N str_15 [] +str_14 \N str_14 [] [[0,1,2,3,4,5]] \N \N [] -str_16 \N str_16 [] +str_15 \N str_15 [] [[0,1,2,3,4,5,6]] \N \N [] -str_17 \N str_17 [] +str_16 \N str_16 [] [[0,1,2,3,4,5,6,7]] \N \N [] -str_18 \N str_18 [] +str_17 \N str_17 [] [[0,1,2,3,4,5,6,7,8]] \N \N [] -str_19 \N str_19 [] +str_18 \N str_18 [] [[0,1,2,3,4,5,6,7,8,9]] \N \N [] +str_19 \N str_19 [] [20] \N \N [20] ['str_21','str_21'] \N \N ['str_21','str_21'] [22,22,22] \N \N [22,22,22] @@ -852,6 +852,7 @@ str_79 \N str_79 [] 7 \N [] 8 \N [] 9 \N [] +\N \N [] \N str_10 [] \N \N [] \N str_11 [] @@ -871,7 +872,6 @@ str_79 \N str_79 [] \N str_18 [] \N \N [] \N str_19 [] -\N \N [] \N \N [20] \N \N ['str_21','str_21'] \N \N [22,22,22] @@ -1032,26 +1032,26 @@ str_79 \N str_79 [] 7 7 \N [] 0 [] 8 8 \N [] 0 [] 9 9 \N [] 0 [] -str_10 \N \N [] 0 [] [[0]] \N \N [] 0 [] -str_11 \N \N [] 0 [] +str_10 \N \N [] 0 [] [[0,1]] \N \N [] 0 [] -str_12 \N \N [] 0 [] +str_11 \N \N [] 0 [] [[0,1,2]] \N \N [] 0 [] -str_13 \N \N [] 0 [] +str_12 \N \N [] 0 [] [[0,1,2,3]] \N \N [] 0 [] -str_14 \N \N [] 0 [] +str_13 \N \N [] 0 [] [[0,1,2,3,4]] \N \N [] 0 [] -str_15 \N \N [] 0 [] +str_14 \N \N [] 0 [] [[0,1,2,3,4,5]] \N \N [] 0 [] -str_16 \N \N [] 0 [] +str_15 \N \N [] 0 [] [[0,1,2,3,4,5,6]] \N \N [] 0 [] -str_17 \N \N [] 0 [] +str_16 \N \N [] 0 [] [[0,1,2,3,4,5,6,7]] \N \N [] 0 [] -str_18 \N \N [] 0 [] +str_17 \N \N [] 0 [] [[0,1,2,3,4,5,6,7,8]] \N \N [] 0 [] -str_19 \N \N [] 0 [] +str_18 \N \N [] 0 [] [[0,1,2,3,4,5,6,7,8,9]] \N \N [] 0 [] +str_19 \N \N [] 0 [] [20] \N \N [20] 1 [20] ['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] [22,22,22] \N \N [22,22,22] 3 [22,22,22] @@ -1212,26 +1212,26 @@ str_79 \N \N [] 0 [] 7 0 [] [] 8 0 [] [] 9 0 [] [] -str_10 0 [] [] [[0]] 0 [] [] -str_11 0 [] [] +str_10 0 [] [] [[0,1]] 0 [] [] -str_12 0 [] [] +str_11 0 [] [] [[0,1,2]] 0 [] [] -str_13 0 [] [] +str_12 0 [] [] [[0,1,2,3]] 0 [] [] -str_14 0 [] [] +str_13 0 [] [] [[0,1,2,3,4]] 0 [] [] -str_15 0 [] [] +str_14 0 [] [] [[0,1,2,3,4,5]] 0 [] [] -str_16 0 [] [] +str_15 0 [] [] [[0,1,2,3,4,5,6]] 0 [] [] -str_17 0 [] [] +str_16 0 [] [] [[0,1,2,3,4,5,6,7]] 0 [] [] -str_18 0 [] [] +str_17 0 [] [] [[0,1,2,3,4,5,6,7,8]] 0 [] [] -str_19 0 [] [] +str_18 0 [] [] [[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] [20] 0 [] [20] ['str_21','str_21'] 0 [] [NULL,NULL] [22,22,22] 0 [] [22,22,22] @@ -1392,7 +1392,6 @@ str_79 0 [] [] [] [] [] [] [] [] [] [] [] -[] [] [] [1] [[0]] [[[]]] [] [] [] [2] [[0,1]] [[[],[]]] @@ -1472,6 +1471,7 @@ str_79 0 [] [] [] [] [] [] [] [] [] [] [] +[] [] [] Array(Array(Dynamic)) Array(Variant(String, UInt64)) None @@ -1499,26 +1499,26 @@ UInt64 7 7 \N [] 8 8 \N [] 9 9 \N [] -str_10 \N str_10 [] [[0]] \N \N [] -str_11 \N str_11 [] +str_10 \N str_10 [] [[0,1]] \N \N [] -str_12 \N str_12 [] +str_11 \N str_11 [] [[0,1,2]] \N \N [] -str_13 \N str_13 [] +str_12 \N str_12 [] [[0,1,2,3]] \N \N [] -str_14 \N str_14 [] +str_13 \N str_13 [] [[0,1,2,3,4]] \N \N [] -str_15 \N str_15 [] +str_14 \N str_14 [] [[0,1,2,3,4,5]] \N \N [] -str_16 \N str_16 [] +str_15 \N str_15 [] [[0,1,2,3,4,5,6]] \N \N [] -str_17 \N str_17 [] +str_16 \N str_16 [] [[0,1,2,3,4,5,6,7]] \N \N [] -str_18 \N str_18 [] +str_17 \N str_17 [] [[0,1,2,3,4,5,6,7,8]] \N \N [] -str_19 \N str_19 [] +str_18 \N str_18 [] [[0,1,2,3,4,5,6,7,8,9]] \N \N [] +str_19 \N str_19 [] [20] \N \N [20] ['str_21','str_21'] \N \N ['str_21','str_21'] [22,22,22] \N \N [22,22,22] @@ -1589,6 +1589,7 @@ str_79 \N str_79 [] 7 \N [] 8 \N [] 9 \N [] +\N \N [] \N str_10 [] \N \N [] \N str_11 [] @@ -1608,7 +1609,6 @@ str_79 \N str_79 [] \N str_18 [] \N \N [] \N str_19 [] -\N \N [] \N \N [20] \N \N ['str_21','str_21'] \N \N [22,22,22] @@ -1769,26 +1769,26 @@ str_79 \N str_79 [] 7 7 \N [] 0 [] 8 8 \N [] 0 [] 9 9 \N [] 0 [] -str_10 \N \N [] 0 [] [[0]] \N \N [] 0 [] -str_11 \N \N [] 0 [] +str_10 \N \N [] 0 [] [[0,1]] \N \N [] 0 [] -str_12 \N \N [] 0 [] +str_11 \N \N [] 0 [] [[0,1,2]] \N \N [] 0 [] -str_13 \N \N [] 0 [] +str_12 \N \N [] 0 [] [[0,1,2,3]] \N \N [] 0 [] -str_14 \N \N [] 0 [] +str_13 \N \N [] 0 [] [[0,1,2,3,4]] \N \N [] 0 [] -str_15 \N \N [] 0 [] +str_14 \N \N [] 0 [] [[0,1,2,3,4,5]] \N \N [] 0 [] -str_16 \N \N [] 0 [] +str_15 \N \N [] 0 [] [[0,1,2,3,4,5,6]] \N \N [] 0 [] -str_17 \N \N [] 0 [] +str_16 \N \N [] 0 [] [[0,1,2,3,4,5,6,7]] \N \N [] 0 [] -str_18 \N \N [] 0 [] +str_17 \N \N [] 0 [] [[0,1,2,3,4,5,6,7,8]] \N \N [] 0 [] -str_19 \N \N [] 0 [] +str_18 \N \N [] 0 [] [[0,1,2,3,4,5,6,7,8,9]] \N \N [] 0 [] +str_19 \N \N [] 0 [] [20] \N \N [20] 1 [20] ['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] [22,22,22] \N \N [22,22,22] 3 [22,22,22] @@ -1949,26 +1949,26 @@ str_79 \N \N [] 0 [] 7 0 [] [] 8 0 [] [] 9 0 [] [] -str_10 0 [] [] [[0]] 0 [] [] -str_11 0 [] [] +str_10 0 [] [] [[0,1]] 0 [] [] -str_12 0 [] [] +str_11 0 [] [] [[0,1,2]] 0 [] [] -str_13 0 [] [] +str_12 0 [] [] [[0,1,2,3]] 0 [] [] -str_14 0 [] [] +str_13 0 [] [] [[0,1,2,3,4]] 0 [] [] -str_15 0 [] [] +str_14 0 [] [] [[0,1,2,3,4,5]] 0 [] [] -str_16 0 [] [] +str_15 0 [] [] [[0,1,2,3,4,5,6]] 0 [] [] -str_17 0 [] [] +str_16 0 [] [] [[0,1,2,3,4,5,6,7]] 0 [] [] -str_18 0 [] [] +str_17 0 [] [] [[0,1,2,3,4,5,6,7,8]] 0 [] [] -str_19 0 [] [] +str_18 0 [] [] [[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] [20] 0 [] [20] ['str_21','str_21'] 0 [] [NULL,NULL] [22,22,22] 0 [] [22,22,22] @@ -2129,7 +2129,6 @@ str_79 0 [] [] [] [] [] [] [] [] [] [] [] -[] [] [] [1] [[0]] [[[]]] [] [] [] [2] [[0,1]] [[[],[]]] @@ -2209,3 +2208,4 @@ str_79 0 [] [] [] [] [] [] [] [] [] [] [] +[] [] [] diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 index 1ed836fbeee..3253d7a6c68 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 @@ -29,14 +29,14 @@ select count() from test where not empty(d.`Array(Array(Dynamic))`); select count() from test where d is NULL; select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); -select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test order by id; -select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test order by id; -select d.Int8, d.Date, d.`Array(String)` from test order by id; -select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id; -select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test order by id; -select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id; -select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id; -select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test order by id; +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test order by id, d; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test order by id, d; +select d.Int8, d.Date, d.`Array(String)` from test order by id, d; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test order by id, d; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test order by id, d; drop table test; From 36c57ca50bf54180dc1a68bfe097112f3dc13a6f Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 29 Jul 2024 19:50:28 +0000 Subject: [PATCH 1172/2361] only check in the create mode to prevent failure to start --- src/Storages/StorageFactory.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 71f70a807a8..a059d624cd8 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -202,11 +202,12 @@ StoragePtr StorageFactory::get( "projections", [](StorageFeatures features) { return features.supports_projections; }); - /// Now let's handle the merge tree family, projection is fully supported in (Replictaed)MergeTree, - /// but also allowed in non-throw mode with other mergetree family members. + /// Now let's handle the merge tree family. Note we only handle in the mode of CREATE due to backward compatibility. + /// Otherwise, it would fail to start in the case of existing projections with special mergetree. + /// Projection is fully supported in (Replictaed)MergeTree, but also allowed in non-throw mode with other mergetree family members. chassert(query.storage->engine); - if (std::string_view engine_name(query.storage->engine->name); - engine_name != "MergeTree" && engine_name != "ReplicatedMergeTree") + if (std::string_view engine_name(query.storage->engine->name); mode == LoadingStrictnessLevel::CREATE + && engine_name != "MergeTree" && engine_name != "ReplicatedMergeTree") { /// default throw mode in deduplicate_merge_projection_mode bool projection_allowed = false; From 6317979825794882905bc02b3a18dd82cfd8ec1c Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 29 Jul 2024 20:53:11 +0100 Subject: [PATCH 1173/2361] add one more --- tests/integration/test_executable_table_function/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_executable_table_function/test.py b/tests/integration/test_executable_table_function/test.py index 801a3c7c14a..a79616fc008 100644 --- a/tests/integration/test_executable_table_function/test.py +++ b/tests/integration/test_executable_table_function/test.py @@ -139,6 +139,7 @@ def test_executable_function_input_signalled_python(started_cluster): assert node.query(query.format(source="(SELECT id FROM test_data_table)")) == "" +@pytest.mark.repeat(50) def test_executable_function_input_slow_python(started_cluster): skip_test_msan(node) From 2cae0cb5ecedc2fd041def829b35bdf4dbb50f2f Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 29 Jul 2024 20:29:15 +0000 Subject: [PATCH 1174/2361] force_connected flag for connection establisher --- src/Client/ConnectionEstablisher.cpp | 4 ++-- src/Client/ConnectionEstablisher.h | 4 +++- src/QueryPipeline/RemoteQueryExecutor.cpp | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/Client/ConnectionEstablisher.cpp b/src/Client/ConnectionEstablisher.cpp index 8cebe7a6183..f96546846c7 100644 --- a/src/Client/ConnectionEstablisher.cpp +++ b/src/Client/ConnectionEstablisher.cpp @@ -33,12 +33,12 @@ ConnectionEstablisher::ConnectionEstablisher( { } -void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::string & fail_message) +void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::string & fail_message, bool force_connected) { try { ProfileEvents::increment(ProfileEvents::DistributedConnectionTries); - result.entry = pool->get(*timeouts, settings, /* force_connected = */ true); + result.entry = pool->get(*timeouts, settings, force_connected); AsyncCallbackSetter async_setter(&*result.entry, std::move(async_callback)); UInt64 server_revision = 0; diff --git a/src/Client/ConnectionEstablisher.h b/src/Client/ConnectionEstablisher.h index a3a01e63246..304ec4d34b4 100644 --- a/src/Client/ConnectionEstablisher.h +++ b/src/Client/ConnectionEstablisher.h @@ -24,7 +24,9 @@ public: const QualifiedTableName * table_to_check = nullptr); /// Establish connection and save it in result, write possible exception message in fail_message. - void run(TryResult & result, std::string & fail_message); + /// The connection is returned from the pool, it can be stale. Use force_connected flag + /// to ensure that connection is working one + void run(TryResult & result, std::string & fail_message, bool force_connected = false); /// Set async callback that will be called when reading from socket blocks. void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); } diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index b08f2002f64..09ea6a9fb3c 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -89,12 +89,12 @@ RemoteQueryExecutor::RemoteQueryExecutor( auto table_name = main_table.getQualifiedName(); ConnectionEstablisher connection_establisher(pool, &timeouts, current_settings, log, &table_name); - connection_establisher.run(result, fail_message); + connection_establisher.run(result, fail_message, /*force_connected=*/ true); } else { ConnectionEstablisher connection_establisher(pool, &timeouts, current_settings, log, nullptr); - connection_establisher.run(result, fail_message); + connection_establisher.run(result, fail_message, /*force_connected=*/ true); } std::vector connection_entries; From 09619e6006f122fb3a8352328a07f42bfd284d17 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 29 Jul 2024 20:57:21 +0000 Subject: [PATCH 1175/2361] consider the case of alter table add projection --- src/Storages/MergeTree/MergeTreeData.cpp | 10 +++++++ ...ojection_merge_special_mergetree.reference | 1 + ...206_projection_merge_special_mergetree.sql | 26 +++++++++++++++++-- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 78a551591a6..677c4a92cda 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3211,6 +3211,16 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context queryToString(mutation_commands.ast())); } + /// Block the case of alter table add projection for special merge trees. + if (std::any_of(commands.begin(), commands.end(), [](const AlterCommand & c) { return c.type == AlterCommand::ADD_PROJECTION; })) + { + if (auto storage_name = getName(); storage_name != "MergeTree" && storage_name != "ReplicatedMergeTree" + && settings_from_storage->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "Projection is fully supported in (Replictaed)MergeTree, but also allowed in non-throw mode with other" + " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode."); + } + commands.apply(new_metadata, local_context); if (AlterCommands::hasFullTextIndex(new_metadata) && !settings.allow_experimental_full_text_index) diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.reference b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.reference index e69de29bb2d..1a9cc2b7fbf 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.reference +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.reference @@ -0,0 +1 @@ +p diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql index 25517fbba30..e0a4f4f8cec 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -59,8 +59,7 @@ OPTIMIZE TABLE tp FINAL; -- expecting no projection SYSTEM FLUSH LOGS; SELECT - name, - part_name + name FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'tp') AND (active = 1); @@ -81,4 +80,27 @@ ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } +DROP TABLE tp; + +-- test alter add projection case +CREATE TABLE tp ( + type Int32, + eventcnt UInt64 +) engine = ReplacingMergeTree order by type; + +ALTER TABLE tp ADD PROJECTION p (SELECT sum(eventcnt), type GROUP BY type); -- { serverError NOT_IMPLEMENTED } + +ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'drop'; + +ALTER TABLE tp ADD PROJECTION p (SELECT sum(eventcnt), type GROUP BY type); + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); + +SYSTEM FLUSH LOGS; +-- expecting projection p +SELECT + name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'tp') AND (active = 1); + DROP TABLE tp; \ No newline at end of file From 8dfe4a93f6c1afde8475984a899cd5604f415d78 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 29 Jul 2024 22:07:10 +0200 Subject: [PATCH 1176/2361] Rewrite function get_broken_projections_info() without using system.errors --- .../test_broken_projections/test.py | 86 +++++++++---------- 1 file changed, 42 insertions(+), 44 deletions(-) diff --git a/tests/integration/test_broken_projections/test.py b/tests/integration/test_broken_projections/test.py index 2cbbee5563f..9493937d936 100644 --- a/tests/integration/test_broken_projections/test.py +++ b/tests/integration/test_broken_projections/test.py @@ -148,23 +148,22 @@ def break_part(node, table, part): bash(node, f"rm '{part_path}/columns.txt'") -def get_broken_projections_info(node, table, active=True): +def get_broken_projections_info(node, table, part=None, projection=None, active=True): + parent_name_filter = f" AND parent_name = '{part}'" if part else "" + name_filter = f" AND name = '{projection}'" if projection else "" return node.query( f""" - SELECT parent_name, name, errors.name FROM - ( - SELECT parent_name, name, exception_code + SELECT parent_name, name, exception FROM system.projection_parts WHERE table='{table}' AND database=currentDatabase() AND is_broken = 1 AND active = {active} - ) AS parts_info - INNER JOIN system.errors AS errors - ON parts_info.exception_code = errors.code + {parent_name_filter} + {name_filter} ORDER BY parent_name, name """ - ).strip() + ) def get_projections_info(node, table): @@ -312,8 +311,8 @@ def test_broken_ignored(cluster): # Projection 'proj1' from part all_2_2_0 will now appear in broken parts info # because it was marked broken during "check table" query. - assert "all_2_2_0\tproj1\tFILE_DOESNT_EXIST" in get_broken_projections_info( - node, table_name + assert "FILE_DOESNT_EXIST" in get_broken_projections_info( + node, table_name, part="all_2_2_0", projection="proj1" ) # Check table query will also show a list of parts which have broken projections. @@ -323,14 +322,14 @@ def test_broken_ignored(cluster): break_projection(node, table_name, "proj2", "all_2_2_0", "data") # It will not yet appear in broken projections info. - assert "proj2" not in get_broken_projections_info(node, table_name) + assert not get_broken_projections_info(node, table_name, projection="proj2") # Select now fails with error "File doesn't exist" check(node, table_name, 0, "proj2", "FILE_DOESNT_EXIST") # Projection 'proj2' from part all_2_2_0 will now appear in broken parts info. - assert "all_2_2_0\tproj2\tNO_FILE_IN_DATA_PART" in get_broken_projections_info( - node, table_name + assert "NO_FILE_IN_DATA_PART" in get_broken_projections_info( + node, table_name, part="all_2_2_0", projection="proj2" ) # Second select works, because projection is now marked as broken. @@ -340,7 +339,7 @@ def test_broken_ignored(cluster): break_projection(node, table_name, "proj2", "all_3_3_0", "data") # It will not yet appear in broken projections info. - assert "all_3_3_0" not in get_broken_projections_info(node, table_name) + assert not get_broken_projections_info(node, table_name, part="all_3_3_0") insert(node, table_name, 20, 5) insert(node, table_name, 25, 5) @@ -371,8 +370,8 @@ def test_broken_ignored(cluster): node, table_name ) - assert "all_3_3_0" in get_broken_projections_info(node, table_name, active=False) - assert "all_2_2_0" in get_broken_projections_info(node, table_name, active=True) + assert get_broken_projections_info(node, table_name, part="all_3_3_0", active=False) + assert get_broken_projections_info(node, table_name, part="all_2_2_0", active=True) # 0 because of all_2_2_0 check(node, table_name, 0) @@ -396,8 +395,8 @@ def test_materialize_broken_projection(cluster): break_projection(node, table_name, "proj1", "all_1_1_0", "metadata") reattach(node, table_name) - assert "all_1_1_0\tproj1\tNO_FILE_IN_DATA_PART" in get_broken_projections_info( - node, table_name + assert "NO_FILE_IN_DATA_PART" in get_broken_projections_info( + node, table_name, part="all_1_1_0", projection="proj1" ) assert "Part all_1_1_0 has a broken projection proj1" in check_table_full( node, table_name @@ -406,8 +405,8 @@ def test_materialize_broken_projection(cluster): break_projection(node, table_name, "proj2", "all_1_1_0", "data") reattach(node, table_name) - assert "all_1_1_0\tproj2\tFILE_DOESNT_EXIST" in get_broken_projections_info( - node, table_name + assert "FILE_DOESNT_EXIST" in get_broken_projections_info( + node, table_name, part="all_1_1_0", projection="proj2" ) assert "Part all_1_1_0 has a broken projection proj2" in check_table_full( node, table_name @@ -469,8 +468,8 @@ def test_broken_projections_in_backups_2(cluster): break_projection(node, table_name, "proj2", "all_2_2_0", "part") check(node, table_name, 0, "proj2", "ErrnoException") - assert "all_2_2_0\tproj2\tFILE_DOESNT_EXIST" == get_broken_projections_info( - node, table_name + assert "FILE_DOESNT_EXIST" in get_broken_projections_info( + node, table_name, part="all_2_2_0", projection="proj2" ) assert "FILE_DOESNT_EXIST" in node.query_and_get_error( @@ -524,8 +523,8 @@ def test_broken_projections_in_backups_3(cluster): assert "Part all_1_1_0 has a broken projection proj1" in check_table_full( node, table_name ) - assert "all_1_1_0\tproj1\tFILE_DOESNT_EXIST" == get_broken_projections_info( - node, table_name + assert "FILE_DOESNT_EXIST" in get_broken_projections_info( + node, table_name, part="all_1_1_0", projection="proj1" ) backup_name = f"b4-{get_random_string()}" @@ -545,8 +544,11 @@ def test_broken_projections_in_backups_3(cluster): ) check(node, table_name, 0) - assert "all_1_1_0\tproj1\tNO_FILE_IN_DATA_PART" == get_broken_projections_info( - node, table_name + assert ( + "Projection directory proj1.proj does not exist while loading projections" + in get_broken_projections_info( + node, table_name, part="all_1_1_0", projection="proj1" + ) ) @@ -569,7 +571,7 @@ def test_check_part_thread(cluster): break_projection(node, table_name, "proj2", "all_2_2_0", "data") # It will not yet appear in broken projections info. - assert "proj2" not in get_broken_projections_info(node, table_name) + assert not get_broken_projections_info(node, table_name, projection="proj2") # Select now fails with error "File doesn't exist" check(node, table_name, 0, "proj2", "FILE_DOESNT_EXIST", do_check_command=False) @@ -606,15 +608,15 @@ def test_broken_on_start(cluster): break_projection(node, table_name, "proj2", "all_2_2_0", "data") # It will not yet appear in broken projections info. - assert "proj2" not in get_broken_projections_info(node, table_name) + assert not get_broken_projections_info(node, table_name, projection="proj2") # Select now fails with error "File doesn't exist" # We will mark projection as broken. check(node, table_name, 0, "proj2", "FILE_DOESNT_EXIST") # Projection 'proj2' from part all_2_2_0 will now appear in broken parts info. - assert "all_2_2_0\tproj2\tNO_FILE_IN_DATA_PART" in get_broken_projections_info( - node, table_name + assert "NO_FILE_IN_DATA_PART" in get_broken_projections_info( + node, table_name, part="all_2_2_0", projection="proj2" ) # Second select works, because projection is now marked as broken. @@ -623,7 +625,7 @@ def test_broken_on_start(cluster): node.restart_clickhouse() # It will not yet appear in broken projections info. - assert "proj2" in get_broken_projections_info(node, table_name) + assert get_broken_projections_info(node, table_name, projection="proj2") # Select works check(node, table_name, 0) @@ -654,7 +656,7 @@ def test_mutation_with_broken_projection(cluster): node, table_name ) - assert "" == get_broken_projections_info(node, table_name) + assert not get_broken_projections_info(node, table_name) check(node, table_name, 1) @@ -662,21 +664,21 @@ def test_mutation_with_broken_projection(cluster): break_projection(node, table_name, "proj2", "all_2_2_0_4", "data") # It will not yet appear in broken projections info. - assert "proj2" not in get_broken_projections_info(node, table_name) + assert not get_broken_projections_info(node, table_name, projection="proj2") # Select now fails with error "File doesn't exist" # We will mark projection as broken. check(node, table_name, 0, "proj2", "FILE_DOESNT_EXIST") # Projection 'proj2' from part all_2_2_0_4 will now appear in broken parts info. - assert "all_2_2_0_4\tproj2\tNO_FILE_IN_DATA_PART" in get_broken_projections_info( - node, table_name + assert "NO_FILE_IN_DATA_PART" in get_broken_projections_info( + node, table_name, part="all_2_2_0_4", projection="proj2" ) # Second select works, because projection is now marked as broken. check(node, table_name, 0) - assert "all_2_2_0_4" in get_broken_projections_info(node, table_name) + assert get_broken_projections_info(node, table_name, part="all_2_2_0_4") node.query( f"ALTER TABLE {table_name} DELETE WHERE _part == 'all_0_0_0_4' SETTINGS mutations_sync = 1" @@ -690,14 +692,10 @@ def test_mutation_with_broken_projection(cluster): # Still broken because it was hardlinked. broken = get_broken_projections_info(node, table_name) - assert ( - "all_2_2_0_5" in broken or "" == broken - ) # second could be because of a merge. + if broken: # can be not broken because of a merge. + assert get_broken_projections_info(node, table_name, part="all_2_2_0_5") - if "" == broken: - check(node, table_name, 1) - else: - check(node, table_name, 0) + check(node, table_name, not broken) node.query( f"ALTER TABLE {table_name} DELETE WHERE c == 13 SETTINGS mutations_sync = 1" @@ -710,6 +708,6 @@ def test_mutation_with_broken_projection(cluster): ) # Not broken anymore. - assert "" == get_broken_projections_info(node, table_name) + assert not get_broken_projections_info(node, table_name) check(node, table_name, 1) From 18327bdf9c7a08d0f88683d51f930ef83b23127f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 29 Jul 2024 22:17:02 +0000 Subject: [PATCH 1177/2361] move timeout reset to after pipeline reset --- src/Server/TCPHandler.cpp | 10 ++-------- src/Server/TCPHandler.h | 3 --- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index d184074729b..06feeadb892 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -365,12 +365,6 @@ void TCPHandler::runImpl() try { - /// If a user passed query-local timeouts, reset socket to initial state at the end of the query - SCOPE_EXIT({ - std::scoped_lock lock(in_mutex, out_mutex); - state.timeout_setter.reset(); - }); - /** If Query - process it. If Ping or Cancel - go back to the beginning. * There may come settings for a separate query that modify `query_context`. * It's possible to receive part uuids packet before the query, so then receivePacket has to be called twice. @@ -633,6 +627,8 @@ void TCPHandler::runImpl() state.io.onException(); exception.reset(e.clone()); + state.timeout_setter.reset(); + if (e.code() == ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT) throw; @@ -782,8 +778,6 @@ void TCPHandler::extractConnectionSettingsFromContext(const ContextPtr & context bool TCPHandler::readDataNext() { - std::scoped_lock lock(in_mutex); - Stopwatch watch(CLOCK_MONOTONIC_COARSE); /// Poll interval should not be greater than receive_timeout diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 50ef6bcf20d..74afb5a14a5 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -229,9 +229,6 @@ private: /// `out_mutex` protects `out` (WriteBuffer). /// So it is used for method sendData(), sendProgress(), sendLogs(), etc. std::mutex out_mutex; - /// `in_mutex` protects `in` (ReadBuffer) - /// Used in readDataNext() and to protect socket timeout settings - std::mutex in_mutex; /// `task_callback_mutex` protects tasks callbacks. /// Inside these callbacks we might also change cancellation status, /// so it also protects cancellation status checks. From f94bebb0530b7a9fdd3db104ad4261a467fafad3 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 29 Jul 2024 22:36:59 +0000 Subject: [PATCH 1178/2361] fix --- docs/en/sql-reference/window-functions/lagInFrame.md | 2 +- docs/en/sql-reference/window-functions/leadInFrame.md | 2 +- src/Processors/Transforms/WindowTransform.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/window-functions/lagInFrame.md b/docs/en/sql-reference/window-functions/lagInFrame.md index 049e095c10f..de6e9005baa 100644 --- a/docs/en/sql-reference/window-functions/lagInFrame.md +++ b/docs/en/sql-reference/window-functions/lagInFrame.md @@ -23,7 +23,7 @@ For more detail on window function syntax see: [Window Functions - Syntax](./ind **Parameters** - `x` — Column name. - `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default). -- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default). +- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - default value of column type when omitted). **Returned value** diff --git a/docs/en/sql-reference/window-functions/leadInFrame.md b/docs/en/sql-reference/window-functions/leadInFrame.md index fc1b92cc266..4a82c03f6e6 100644 --- a/docs/en/sql-reference/window-functions/leadInFrame.md +++ b/docs/en/sql-reference/window-functions/leadInFrame.md @@ -23,7 +23,7 @@ For more detail on window function syntax see: [Window Functions - Syntax](./ind **Parameters** - `x` — Column name. - `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default). -- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default). +- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - default value of column type when omitted). **Returned value** diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 1eac08780e9..f76e2d64368 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -2408,7 +2408,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (argument_types[0]->equals(*argument_types[2])) return; - const auto supertype = getLeastSupertype(DataTypes{argument_types[0], argument_types[2]}); + const auto supertype = tryGetLeastSupertype(DataTypes{argument_types[0], argument_types[2]}); if (!supertype) { throw Exception(ErrorCodes::BAD_ARGUMENTS, From 2aafd711463d30cb7803a054f434268335817db8 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 26 Jul 2024 19:08:07 +0200 Subject: [PATCH 1179/2361] Moved settings to ServerSettings and made the table drop even faster (cherry picked from commit e1eb542dcc2b9fbc6a470a3cd9a183e79c86d7c7) --- programs/local/LocalServer.cpp | 5 +++ programs/server/Server.cpp | 5 +++ src/Core/ServerSettings.cpp | 2 +- src/Core/ServerSettings.h | 9 ++++ src/IO/SharedThreadPools.cpp | 10 +++++ src/IO/SharedThreadPools.h | 3 ++ src/Interpreters/DatabaseCatalog.cpp | 50 ++++++++++------------- src/Interpreters/DatabaseCatalog.h | 15 ------- src/Interpreters/InterpreterDropQuery.cpp | 19 +++++++-- 9 files changed, 70 insertions(+), 48 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 88d5a0253d1..250c5e3b6c8 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -184,6 +184,11 @@ void LocalServer::initialize(Poco::Util::Application & self) cleanup_threads, 0, // We don't need any threads one all the parts will be deleted cleanup_threads); + + getDatabaseCatalogDropTablesThreadPool().initialize( + server_settings.database_catalog_drop_table_concurrency, + 0, // We don't need any threads if there are no DROP queries. + server_settings.database_catalog_drop_table_concurrency); } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 16888015f8b..dd56114de0f 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1035,6 +1035,11 @@ try 0, // We don't need any threads once all the tables will be created max_database_replicated_create_table_thread_pool_size); + getDatabaseCatalogDropTablesThreadPool().initialize( + server_settings.database_catalog_drop_table_concurrency, + 0, // We don't need any threads if there are no DROP queries. + server_settings.database_catalog_drop_table_concurrency); + /// Initialize global local cache for remote filesystem. if (config().has("local_cache_for_remote_fs")) { diff --git a/src/Core/ServerSettings.cpp b/src/Core/ServerSettings.cpp index fbf86d3e9ad..6c498014996 100644 --- a/src/Core/ServerSettings.cpp +++ b/src/Core/ServerSettings.cpp @@ -1,4 +1,4 @@ -#include "ServerSettings.h" +#include #include namespace DB diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index 28b32a6e6a5..f2f78f70e91 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -66,6 +66,15 @@ namespace DB M(Bool, async_insert_queue_flush_on_shutdown, true, "If true queue of asynchronous inserts is flushed on graceful shutdown", 0) \ M(Bool, ignore_empty_sql_security_in_create_view_query, true, "If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries. This setting is only necessary for the migration period and will become obsolete in 24.4", 0) \ \ + /* Database Catalog */ \ + M(UInt64, database_atomic_delay_before_drop_table_sec, 8 * 60, "The delay during which a dropped table can be restored using the UNDROP statement. If DROP TABLE ran with a SYNC modifier, the setting is ignored.", 0) \ + M(UInt64, database_catalog_unused_dir_hide_timeout_sec, 60 * 60, "Parameter of a task that cleans up garbage from store/ directory. If some subdirectory is not used by clickhouse-server and this directory was not modified for last database_catalog_unused_dir_hide_timeout_sec seconds, the task will 'hide' this directory by removing all access rights. It also works for directories that clickhouse-server does not expect to see inside store/. Zero means 'immediately'.", 0) \ + M(UInt64, database_catalog_unused_dir_rm_timeout_sec, 30 * 24 * 60 * 60, "Parameter of a task that cleans up garbage from store/ directory. If some subdirectory is not used by clickhouse-server and it was previously 'hidden' (see database_catalog_unused_dir_hide_timeout_sec) and this directory was not modified for last database_catalog_unused_dir_rm_timeout_sec seconds, the task will remove this directory. It also works for directories that clickhouse-server does not expect to see inside store/. Zero means 'never'.", 0) \ + M(UInt64, database_catalog_unused_dir_cleanup_period_sec, 24 * 60 * 60, "Parameter of a task that cleans up garbage from store/ directory. Sets scheduling period of the task. Zero means 'never'.", 0) \ + M(UInt64, database_catalog_drop_error_cooldown_sec, 5, "In case if drop table failed, ClickHouse will wait for this timeout before retrying the operation.", 0) \ + M(UInt64, database_catalog_drop_table_concurrency, 16, "The size of the threadpool used for dropping tables.", 0) \ + \ + \ M(UInt64, max_concurrent_queries, 0, "Maximum number of concurrently executed queries. Zero means unlimited.", 0) \ M(UInt64, max_concurrent_insert_queries, 0, "Maximum number of concurrently INSERT queries. Zero means unlimited.", 0) \ M(UInt64, max_concurrent_select_queries, 0, "Maximum number of concurrently SELECT queries. Zero means unlimited.", 0) \ diff --git a/src/IO/SharedThreadPools.cpp b/src/IO/SharedThreadPools.cpp index 3606ddd984c..cda7bc01bbf 100644 --- a/src/IO/SharedThreadPools.cpp +++ b/src/IO/SharedThreadPools.cpp @@ -23,6 +23,9 @@ namespace CurrentMetrics extern const Metric MergeTreeUnexpectedPartsLoaderThreads; extern const Metric MergeTreeUnexpectedPartsLoaderThreadsActive; extern const Metric MergeTreeUnexpectedPartsLoaderThreadsScheduled; + extern const Metric DatabaseCatalogThreads; + extern const Metric DatabaseCatalogThreadsActive; + extern const Metric DatabaseCatalogThreadsScheduled; extern const Metric DatabaseReplicatedCreateTablesThreads; extern const Metric DatabaseReplicatedCreateTablesThreadsActive; extern const Metric DatabaseReplicatedCreateTablesThreadsScheduled; @@ -166,4 +169,11 @@ StaticThreadPool & getDatabaseReplicatedCreateTablesThreadPool() return instance; } +/// ThreadPool used for dropping tables. +StaticThreadPool & getDatabaseCatalogDropTablesThreadPool() +{ + static StaticThreadPool instance("DropTablesThreadPool", CurrentMetrics::DatabaseCatalogThreads, CurrentMetrics::DatabaseCatalogThreadsActive, CurrentMetrics::DatabaseCatalogThreadsScheduled); + return instance; +} + } diff --git a/src/IO/SharedThreadPools.h b/src/IO/SharedThreadPools.h index 50adc70c9a0..06ccebd20b2 100644 --- a/src/IO/SharedThreadPools.h +++ b/src/IO/SharedThreadPools.h @@ -69,4 +69,7 @@ StaticThreadPool & getUnexpectedPartsLoadingThreadPool(); /// ThreadPool used for creating tables in DatabaseReplicated. StaticThreadPool & getDatabaseReplicatedCreateTablesThreadPool(); +/// ThreadPool used for dropping tables. +StaticThreadPool & getDatabaseCatalogDropTablesThreadPool(); + } diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index bb2dd158710..f64f8a06f38 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include #include @@ -189,13 +191,6 @@ StoragePtr TemporaryTableHolder::getTable() const void DatabaseCatalog::initializeAndLoadTemporaryDatabase() { - drop_delay_sec = getContext()->getConfigRef().getInt("database_atomic_delay_before_drop_table_sec", default_drop_delay_sec); - unused_dir_hide_timeout_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_hide_timeout_sec", unused_dir_hide_timeout_sec); - unused_dir_rm_timeout_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_rm_timeout_sec", unused_dir_rm_timeout_sec); - unused_dir_cleanup_period_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_cleanup_period_sec", unused_dir_cleanup_period_sec); - drop_error_cooldown_sec = getContext()->getConfigRef().getInt64("database_catalog_drop_error_cooldown_sec", drop_error_cooldown_sec); - drop_table_concurrency = getContext()->getConfigRef().getInt64("database_catalog_drop_table_concurrency", drop_table_concurrency); - auto db_for_temporary_and_external_tables = std::make_shared(TEMPORARY_DATABASE, getContext()); attachDatabase(TEMPORARY_DATABASE, db_for_temporary_and_external_tables); } @@ -203,7 +198,7 @@ void DatabaseCatalog::initializeAndLoadTemporaryDatabase() void DatabaseCatalog::createBackgroundTasks() { /// It has to be done before databases are loaded (to avoid a race condition on initialization) - if (Context::getGlobalContextInstance()->getApplicationType() == Context::ApplicationType::SERVER && unused_dir_cleanup_period_sec) + if (Context::getGlobalContextInstance()->getApplicationType() == Context::ApplicationType::SERVER && getContext()->getServerSettings().database_catalog_unused_dir_cleanup_period_sec) { auto cleanup_task_holder = getContext()->getSchedulePool().createTask("DatabaseCatalogCleanupStoreDirectoryTask", [this]() { this->cleanupStoreDirectoryTask(); }); @@ -224,7 +219,7 @@ void DatabaseCatalog::startupBackgroundTasks() { (*cleanup_task)->activate(); /// Do not start task immediately on server startup, it's not urgent. - (*cleanup_task)->scheduleAfter(unused_dir_hide_timeout_sec * 1000); + (*cleanup_task)->scheduleAfter(static_cast(getContext()->getServerSettings().database_catalog_unused_dir_hide_timeout_sec) * 1000); } (*drop_task)->activate(); @@ -1038,15 +1033,12 @@ void DatabaseCatalog::loadMarkedAsDroppedTables() LOG_INFO(log, "Found {} partially dropped tables. Will load them and retry removal.", dropped_metadata.size()); - ThreadPool pool(CurrentMetrics::DatabaseCatalogThreads, CurrentMetrics::DatabaseCatalogThreadsActive, CurrentMetrics::DatabaseCatalogThreadsScheduled); + ThreadPoolCallbackRunnerLocal runner(getDatabaseCatalogDropTablesThreadPool().get(), "DropTables"); for (const auto & elem : dropped_metadata) { - pool.scheduleOrThrowOnError([&]() - { - this->enqueueDroppedTableCleanup(elem.second, nullptr, elem.first); - }); + runner([this, &elem](){ this->enqueueDroppedTableCleanup(elem.second, nullptr, elem.first); }); } - pool.wait(); + runner.waitForAllToFinishAndRethrowFirstError(); } String DatabaseCatalog::getPathForDroppedMetadata(const StorageID & table_id) const @@ -1135,7 +1127,13 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr } else { - tables_marked_dropped.push_back({table_id, table, dropped_metadata_path, drop_time + drop_delay_sec}); + tables_marked_dropped.push_back + ({ + table_id, + table, + dropped_metadata_path, + drop_time + static_cast(getContext()->getServerSettings().database_atomic_delay_before_drop_table_sec) + }); if (first_async_drop_in_queue == tables_marked_dropped.end()) --first_async_drop_in_queue; } @@ -1289,13 +1287,7 @@ void DatabaseCatalog::dropTablesParallel(std::vector runner(getDatabaseCatalogDropTablesThreadPool().get(), "DropTables"); for (const auto & item : tables_to_drop) { @@ -1332,7 +1324,7 @@ void DatabaseCatalog::dropTablesParallel(std::vectordrop_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()) + drop_error_cooldown_sec; + table_iterator->drop_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()) + getContext()->getServerSettings().database_catalog_drop_error_cooldown_sec; if (first_async_drop_in_queue == tables_marked_dropped.end()) --first_async_drop_in_queue; @@ -1342,7 +1334,7 @@ void DatabaseCatalog::dropTablesParallel(std::vectorscheduleAfter(unused_dir_cleanup_period_sec * 1000); + (*cleanup_task)->scheduleAfter(static_cast(getContext()->getServerSettings().database_catalog_unused_dir_cleanup_period_sec) * 1000); } bool DatabaseCatalog::maybeRemoveDirectory(const String & disk_name, const DiskPtr & disk, const String & unused_dir) @@ -1742,7 +1734,7 @@ bool DatabaseCatalog::maybeRemoveDirectory(const String & disk_name, const DiskP time_t current_time = time(nullptr); if (st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)) { - if (current_time <= max_modification_time + unused_dir_hide_timeout_sec) + if (current_time <= max_modification_time + static_cast(getContext()->getServerSettings().database_catalog_unused_dir_hide_timeout_sec)) return false; LOG_INFO(log, "Removing access rights for unused directory {} from disk {} (will remove it when timeout exceed)", unused_dir, disk_name); @@ -1758,6 +1750,8 @@ bool DatabaseCatalog::maybeRemoveDirectory(const String & disk_name, const DiskP } else { + auto unused_dir_rm_timeout_sec = static_cast(getContext()->getServerSettings().database_catalog_unused_dir_rm_timeout_sec); + if (!unused_dir_rm_timeout_sec) return false; diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 23e38a6445e..83a302f117d 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -354,23 +354,8 @@ private: mutable std::mutex tables_marked_dropped_mutex; std::unique_ptr drop_task; - static constexpr time_t default_drop_delay_sec = 8 * 60; - time_t drop_delay_sec = default_drop_delay_sec; std::condition_variable wait_table_finally_dropped; - std::unique_ptr cleanup_task; - static constexpr time_t default_unused_dir_hide_timeout_sec = 60 * 60; /// 1 hour - time_t unused_dir_hide_timeout_sec = default_unused_dir_hide_timeout_sec; - static constexpr time_t default_unused_dir_rm_timeout_sec = 30 * 24 * 60 * 60; /// 30 days - time_t unused_dir_rm_timeout_sec = default_unused_dir_rm_timeout_sec; - static constexpr time_t default_unused_dir_cleanup_period_sec = 24 * 60 * 60; /// 1 day - time_t unused_dir_cleanup_period_sec = default_unused_dir_cleanup_period_sec; - - static constexpr time_t default_drop_error_cooldown_sec = 5; - time_t drop_error_cooldown_sec = default_drop_error_cooldown_sec; - - static constexpr size_t default_drop_table_concurrency = 10; - size_t drop_table_concurrency = default_drop_table_concurrency; std::unique_ptr reload_disks_task; std::mutex reload_disks_mutex; diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index bad3e5277db..d8056ddd1a3 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -424,18 +425,28 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, auto table_context = Context::createCopy(getContext()); table_context->setInternalQuery(true); /// Do not hold extra shared pointers to tables - std::vector> tables_to_drop; + std::vector> tables_to_drop; // NOTE: This means we wait for all tables to be loaded inside getTablesIterator() call in case of `async_load_databases = true`. for (auto iterator = database->getTablesIterator(table_context); iterator->isValid(); iterator->next()) { auto table_ptr = iterator->table(); - table_ptr->flushAndPrepareForShutdown(); - tables_to_drop.push_back({iterator->name(), table_ptr->isDictionary()}); + tables_to_drop.push_back({table_ptr->getStorageID(), table_ptr->isDictionary()}); } + /// Prepare tables for shutdown in parallel. + ThreadPoolCallbackRunnerLocal runner(getDatabaseCatalogDropTablesThreadPool().get(), "DropTables"); + for (const auto & [name, _] : tables_to_drop) + { + auto table_ptr = DatabaseCatalog::instance().getTable(name, table_context); + runner([my_table_ptr = std::move(table_ptr)](){ + my_table_ptr->flushAndPrepareForShutdown(); + }); + } + runner.waitForAllToFinishAndRethrowFirstError(); + for (const auto & table : tables_to_drop) { - query_for_table.setTable(table.first); + query_for_table.setTable(table.first.getTableName()); query_for_table.is_dictionary = table.second; DatabasePtr db; UUID table_to_wait = UUIDHelpers::Nil; From 1427b16689601d7dd29d26de99b233c132905fde Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 26 Jul 2024 21:44:15 +0200 Subject: [PATCH 1180/2361] Fixed style --- src/Interpreters/DatabaseCatalog.cpp | 3 --- src/Interpreters/InterpreterDropQuery.cpp | 3 ++- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index f64f8a06f38..30b151eb81d 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -50,9 +50,6 @@ namespace CurrentMetrics { extern const Metric TablesToDropQueueSize; - extern const Metric DatabaseCatalogThreads; - extern const Metric DatabaseCatalogThreadsActive; - extern const Metric DatabaseCatalogThreadsScheduled; } namespace DB diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index d8056ddd1a3..ef560ec3405 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -438,7 +438,8 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, for (const auto & [name, _] : tables_to_drop) { auto table_ptr = DatabaseCatalog::instance().getTable(name, table_context); - runner([my_table_ptr = std::move(table_ptr)](){ + runner([my_table_ptr = std::move(table_ptr)]() + { my_table_ptr->flushAndPrepareForShutdown(); }); } From 1096a4ff33497c64eb786f6dbb603b18ccf804b1 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 29 Jul 2024 13:37:36 +0000 Subject: [PATCH 1181/2361] Fixed occasional LOGICAL_ERROR --- src/Interpreters/DatabaseCatalog.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 30b151eb81d..eaf8cf1cc82 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1336,7 +1336,7 @@ void DatabaseCatalog::dropTablesParallel(std::vector Date: Tue, 30 Jul 2024 00:39:16 +0200 Subject: [PATCH 1182/2361] Dont throw --- src/Interpreters/DatabaseCatalog.cpp | 30 ++++++++++------------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index eaf8cf1cc82..98526e5c1cd 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1329,26 +1329,10 @@ void DatabaseCatalog::dropTablesParallel(std::vector Date: Mon, 29 Jul 2024 23:10:13 +0000 Subject: [PATCH 1183/2361] Fix Dwarf range list parsing in stack symbolizer --- src/Common/Dwarf.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Common/Dwarf.cpp b/src/Common/Dwarf.cpp index 8439c01b22c..1f22e3b05aa 100644 --- a/src/Common/Dwarf.cpp +++ b/src/Common/Dwarf.cpp @@ -1559,8 +1559,7 @@ bool Dwarf::isAddrInRangeList(const CompilationUnit & cu, auto sp_start = addr_.substr(*cu.addr_base + index_start * sizeof(uint64_t)); auto start = read(sp_start); - auto sp_end = addr_.substr(*cu.addr_base + index_start * sizeof(uint64_t) + length); - auto end = read(sp_end); + auto end = start + length; if (start != end && address >= start && address < end) { return true; From 4a42ddc18e57576d119b1416f1da06b9ec292fce Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Sat, 27 Jul 2024 02:36:54 +0000 Subject: [PATCH 1184/2361] Make Dwarf::findAddress() fallback slow path less slow --- src/Common/Dwarf.cpp | 42 ++++++++++++++++++++++++++++++++++++++---- src/Common/Dwarf.h | 3 ++- 2 files changed, 40 insertions(+), 5 deletions(-) diff --git a/src/Common/Dwarf.cpp b/src/Common/Dwarf.cpp index 8439c01b22c..eda479607b1 100644 --- a/src/Common/Dwarf.cpp +++ b/src/Common/Dwarf.cpp @@ -1029,7 +1029,8 @@ bool Dwarf::findLocation( const LocationInfoMode mode, CompilationUnit & cu, LocationInfo & info, - std::vector & inline_frames) const + std::vector & inline_frames, + bool assume_in_cu_range) const { Die die = getDieAtOffset(cu, cu.first_die); // Partial compilation unit (DW_TAG_partial_unit) is not supported. @@ -1041,6 +1042,11 @@ bool Dwarf::findLocation( std::optional main_file_name; std::optional base_addr_cu; + std::optional low_pc; + std::optional high_pc; + std::optional is_high_pc_addr; + std::optional range_offset; + forEachAttribute(cu, die, [&](const Attribute & attr) { switch (attr.spec.name) // NOLINT(bugprone-switch-missing-default-case) @@ -1058,18 +1064,46 @@ bool Dwarf::findLocation( // File name of main file being compiled main_file_name = std::get(attr.attr_value); break; - case DW_AT_low_pc: case DW_AT_entry_pc: // 2.17.1: historically DW_AT_low_pc was used. DW_AT_entry_pc was // introduced in DWARF3. Support either to determine the base address of // the CU. base_addr_cu = std::get(attr.attr_value); break; + case DW_AT_ranges: + range_offset = std::get(attr.attr_value); + break; + case DW_AT_low_pc: + low_pc = std::get(attr.attr_value); + base_addr_cu = std::get(attr.attr_value); + break; + case DW_AT_high_pc: + // The value of the DW_AT_high_pc attribute can be + // an address (DW_FORM_addr*) or an offset (DW_FORM_data*). + is_high_pc_addr = attr.spec.form == DW_FORM_addr || // + attr.spec.form == DW_FORM_addrx || // + attr.spec.form == DW_FORM_addrx1 || // + attr.spec.form == DW_FORM_addrx2 || // + attr.spec.form == DW_FORM_addrx3 || // + attr.spec.form == DW_FORM_addrx4; + high_pc = std::get(attr.attr_value); + break; } // Iterate through all attributes until find all above. return true; }); + /// Check if the address falls inside this unit's address ranges. + if (!assume_in_cu_range && ((low_pc && high_pc) || range_offset)) { + bool pc_match = low_pc && high_pc && is_high_pc_addr && address >= *low_pc + && (address < (*is_high_pc_addr ? *high_pc : *low_pc + *high_pc)); + bool range_match = range_offset && isAddrInRangeList(cu, address, base_addr_cu, range_offset.value(), cu.addr_size); + if (!pc_match && !range_match) + { + return false; + } + } + if (main_file_name) { info.has_main_file = true; @@ -1442,7 +1476,7 @@ bool Dwarf::findAddress( { return false; } - findLocation(address, mode, unit, locationInfo, inline_frames); + findLocation(address, mode, unit, locationInfo, inline_frames, /*assume_in_cu_range*/ true); return locationInfo.has_file_and_line; } else if (mode == LocationInfoMode::FAST) @@ -1471,7 +1505,7 @@ bool Dwarf::findAddress( { continue; } - findLocation(address, mode, unit, locationInfo, inline_frames); + findLocation(address, mode, unit, locationInfo, inline_frames, /*assume_in_cu_range*/ false); } return locationInfo.has_file_and_line; diff --git a/src/Common/Dwarf.h b/src/Common/Dwarf.h index da18b3affa0..d754191bfa9 100644 --- a/src/Common/Dwarf.h +++ b/src/Common/Dwarf.h @@ -283,7 +283,8 @@ private: LocationInfoMode mode, CompilationUnit & cu, LocationInfo & info, - std::vector & inline_frames) const; + std::vector & inline_frames, + bool assume_in_cu_range) const; /** * Finds a subprogram debugging info entry that contains a given address among From b0629726a05701341e5d96207fe5e3743cd9345b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 03:23:19 +0200 Subject: [PATCH 1185/2361] Update src/Common/Dwarf.cpp --- src/Common/Dwarf.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Common/Dwarf.cpp b/src/Common/Dwarf.cpp index eda479607b1..f17219e9bf5 100644 --- a/src/Common/Dwarf.cpp +++ b/src/Common/Dwarf.cpp @@ -1094,7 +1094,8 @@ bool Dwarf::findLocation( }); /// Check if the address falls inside this unit's address ranges. - if (!assume_in_cu_range && ((low_pc && high_pc) || range_offset)) { + if (!assume_in_cu_range && ((low_pc && high_pc) || range_offset)) + { bool pc_match = low_pc && high_pc && is_high_pc_addr && address >= *low_pc && (address < (*is_high_pc_addr ? *high_pc : *low_pc + *high_pc)); bool range_match = range_offset && isAddrInRangeList(cu, address, base_addr_cu, range_offset.value(), cu.addr_size); From 8f920d064ccca8ed8d9341b10d54ebef7500484c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 04:05:43 +0200 Subject: [PATCH 1186/2361] Fix inconsistent formatting of CODEC and STATISTICS --- src/Parsers/ASTFunction.cpp | 4 +++- src/Parsers/ASTFunction.h | 2 ++ src/Parsers/ExpressionElementParsers.cpp | 2 ++ src/Parsers/FunctionSecretArgumentsFinderAST.h | 4 +++- src/Parsers/IAST.h | 1 + src/Storages/StatisticsDescription.cpp | 1 + 6 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index 230d4c778e8..cd9e910d45a 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -285,6 +285,8 @@ static bool formatNamedArgWithHiddenValue(IAST * arg, const IAST::FormatSettings void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { frame.expression_list_prepend_whitespace = false; + if (kind == Kind::CODEC || kind == Kind::STATISTICS || kind == Kind::BACKUP_NAME) + frame.allow_operators = false; FormatStateStacked nested_need_parens = frame; FormatStateStacked nested_dont_need_parens = frame; nested_need_parens.need_parens = true; @@ -308,7 +310,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format /// Should this function to be written as operator? bool written = false; - if (arguments && !parameters && nulls_action == NullsAction::EMPTY) + if (arguments && !parameters && frame.allow_operators && nulls_action == NullsAction::EMPTY) { /// Unary prefix operators. if (arguments->children.size() == 1) diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index be2b6beae54..1b4a5928d1c 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -58,6 +58,8 @@ public: TABLE_ENGINE, DATABASE_ENGINE, BACKUP_NAME, + CODEC, + STATISTICS, }; Kind kind = Kind::ORDINARY_FUNCTION; diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 865d07faaa7..9927acdcf17 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -696,6 +696,7 @@ bool ParserCodec::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) auto function_node = std::make_shared(); function_node->name = "CODEC"; + function_node->kind = ASTFunction::Kind::CODEC; function_node->arguments = expr_list_args; function_node->children.push_back(function_node->arguments); @@ -723,6 +724,7 @@ bool ParserStatisticsType::parseImpl(Pos & pos, ASTPtr & node, Expected & expect auto function_node = std::make_shared(); function_node->name = "STATISTICS"; + function_node->kind = ASTFunction::Kind::STATISTICS; function_node->arguments = stat_type; function_node->children.push_back(function_node->arguments); node = function_node; diff --git a/src/Parsers/FunctionSecretArgumentsFinderAST.h b/src/Parsers/FunctionSecretArgumentsFinderAST.h index 5b77485afb0..94da30922cc 100644 --- a/src/Parsers/FunctionSecretArgumentsFinderAST.h +++ b/src/Parsers/FunctionSecretArgumentsFinderAST.h @@ -33,7 +33,9 @@ public: { case ASTFunction::Kind::ORDINARY_FUNCTION: findOrdinaryFunctionSecretArguments(); break; case ASTFunction::Kind::WINDOW_FUNCTION: break; - case ASTFunction::Kind::LAMBDA_FUNCTION: break; + case ASTFunction::Kind::LAMBDA_FUNCTION: break; + case ASTFunction::Kind::CODEC: break; + case ASTFunction::Kind::STATISTICS: break; case ASTFunction::Kind::TABLE_ENGINE: findTableEngineSecretArguments(); break; case ASTFunction::Kind::DATABASE_ENGINE: findDatabaseEngineSecretArguments(); break; case ASTFunction::Kind::BACKUP_NAME: findBackupNameSecretArguments(); break; diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index d70c1cd0b6c..e2cf7579667 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -256,6 +256,7 @@ public: bool expression_list_always_start_on_new_line = false; /// Line feed and indent before expression list even if it's of single element. bool expression_list_prepend_whitespace = false; /// Prepend whitespace (if it is required) bool surround_each_list_element_with_parens = false; + bool allow_operators = true; /// Format some functions, such as "plus", "in", etc. as operators. size_t list_element_index = 0; const IAST * current_select = nullptr; }; diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index 9c5fd3604b2..63c849e3806 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -193,6 +193,7 @@ ASTPtr ColumnStatisticsDescription::getAST() const { auto function_node = std::make_shared(); function_node->name = "STATISTICS"; + function_node->kind = ASTFunction::Kind::STATISTICS; function_node->arguments = std::make_shared(); for (const auto & [type, desc] : types_to_desc) { From c1e7b7be89f5c5e39318ab093b00fa6cd8114ff3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 04:07:46 +0200 Subject: [PATCH 1187/2361] Add a test --- ...nconsistent_formatting_of_codecs_statistics.reference | 2 ++ ...03214_inconsistent_formatting_of_codecs_statistics.sh | 9 +++++++++ 2 files changed, 11 insertions(+) create mode 100644 tests/queries/0_stateless/03214_inconsistent_formatting_of_codecs_statistics.reference create mode 100755 tests/queries/0_stateless/03214_inconsistent_formatting_of_codecs_statistics.sh diff --git a/tests/queries/0_stateless/03214_inconsistent_formatting_of_codecs_statistics.reference b/tests/queries/0_stateless/03214_inconsistent_formatting_of_codecs_statistics.reference new file mode 100644 index 00000000000..7213baa3e5b --- /dev/null +++ b/tests/queries/0_stateless/03214_inconsistent_formatting_of_codecs_statistics.reference @@ -0,0 +1,2 @@ +ALTER TABLE t MODIFY COLUMN `c` CODEC(in(1, 2)) +ALTER TABLE t MODIFY COLUMN `c` STATISTICS(plus(1, 2)) diff --git a/tests/queries/0_stateless/03214_inconsistent_formatting_of_codecs_statistics.sh b/tests/queries/0_stateless/03214_inconsistent_formatting_of_codecs_statistics.sh new file mode 100755 index 00000000000..c3f8d89b9a4 --- /dev/null +++ b/tests/queries/0_stateless/03214_inconsistent_formatting_of_codecs_statistics.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# Ensure that these (possibly incorrect) queries can at least be parsed back after formatting. +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE t MODIFY COLUMN c CODEC(in(1, 2))" | $CLICKHOUSE_FORMAT --oneline +$CLICKHOUSE_FORMAT --oneline --query "ALTER TABLE t MODIFY COLUMN c STATISTICS(plus(1, 2))" | $CLICKHOUSE_FORMAT --oneline From cb6b6329c8e763f61f70797a95dab8ef24fd47d1 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Tue, 30 Jul 2024 10:45:36 +0800 Subject: [PATCH 1188/2361] add session timezone settings --- tests/queries/0_stateless/03198_orc_read_time_zone.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03198_orc_read_time_zone.sh b/tests/queries/0_stateless/03198_orc_read_time_zone.sh index 7e931e16e48..7d1da0c1579 100755 --- a/tests/queries/0_stateless/03198_orc_read_time_zone.sh +++ b/tests/queries/0_stateless/03198_orc_read_time_zone.sh @@ -8,5 +8,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q "drop table if exists test_orc_read_timezone" $CLICKHOUSE_CLIENT -q "create table test_orc_read_timezone(id UInt64, t DateTime64) Engine=MergeTree order by id" $CLICKHOUSE_CLIENT -q "insert into test_orc_read_timezone from infile '$CURDIR/data_orc/test_reader_time_zone.snappy.orc' SETTINGS input_format_orc_reader_time_zone_name='Asia/Shanghai' FORMAT ORC" -$CLICKHOUSE_CLIENT -q "select * from test_orc_read_timezone" +$CLICKHOUSE_CLIENT -q "select * from test_orc_read_timezone SETTINGS session_timezone='Asia/Shanghai'" $CLICKHOUSE_CLIENT -q "drop table test_orc_read_timezone" \ No newline at end of file From dd5819ab6ab2df3231737a4808d0445ad5345555 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 05:16:09 +0200 Subject: [PATCH 1189/2361] Changelog sanity --- CHANGELOG.md | 55 +++++++++++++++++++++++++--------------------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 620b7c99bac..06f7bcdd84e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ * Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)). * Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)). +* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)). #### New Feature * Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)). @@ -32,13 +33,14 @@ * Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)). * Introduce `logger.console_log_level` server config to control the log level to the console (if enabled). [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)). * Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)). -* Add `--memory-usage` option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)). +* Add `--memory-usage` option to client in non-interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)). * Make an interactive client for clickhouse-disks, add local disk from the local directory. [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)). * When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)). +* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)). #### Experimental Feature * Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)). -* Support rocksdb as backend storage of keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)). +* Support rocksdb as backend storage of clickhouse-keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)). * Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)). * Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)). * Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)). @@ -46,8 +48,8 @@ #### Performance Improvement * Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)). -* Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)). -* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)). +* Sizes of hash tables created by join (`parallel_hash` algorithm) are collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)). +* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)). * Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)). @@ -59,11 +61,11 @@ * DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)). #### Improvement +* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)). * The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)). * The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)). -* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)). -* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_case_insensitive_column_matching`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)). +* Allow matching column names in a case-insensitive manner when reading json files (`input_format_json_case_insensitive_column_matching`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)). * Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)). * In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)). * Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)). @@ -71,7 +73,6 @@ * Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)). * Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). * Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)). -* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)). * `arrayMin`/`arrayMax` can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)). * Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)). * Do not create format settings for each row when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)). @@ -80,36 +81,35 @@ * Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)). * Add new option to config `` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)). * Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)). -* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)). * Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)). * Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)). * `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Add a script to backup your files to ClickHouse. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* PostgreSQL source support cancel. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)). -* Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* PostgreSQL source to support query cancellations. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)). +* Make `allow_experimental_analyzer` be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)). -* Allow to use `concat` function with empty arguments ``` sql :) select concat();. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([æŽæ‰¬](https://github.com/taiyang-li)). -* Allow controlling named collections in clickhouse-local. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Improve Azure profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)). -* Support ORC file read by writer time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)). -* Add settings to control connection to the PostgreSQL. * Setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. * Setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)). -* Reduce inaccuracy of input_wait_elapsed_us/input_wait_elapsed_us/elapsed_us. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)). -* Improve FilesystemCache ProfileEvents. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)). -* Add settings to ignore ON CLUSTER clause in queries for named collection management with replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Allow to use `concat` function with empty arguments `:) select concat();`. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([æŽæ‰¬](https://github.com/taiyang-li)). +* Allow controlling named collections in `clickhouse-local`. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve Azure-related profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)). +* Support ORC file read by writer's time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)). +* Add settings to control connections to PostgreSQL. The setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. The setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)). +* Reduce inaccuracy of `input_wait_elapsed_us`/`elapsed_us` in the `system.processors_profile_log`. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)). +* Improve ProfileEvents for the filesystem cache. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)). +* Add settings to ignore the `ON CLUSTER` clause in queries for named collection management with the replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). * Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)). -* Disable suspending on Ctrl+Z in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Add option for validating the Primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. ### Documentation entry for user-facing changes. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Disable suspending on `Ctrl+Z` in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add option for validating the primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). #### Bug Fix (user-visible misbehavior in an official stable release) -* Fix unexpected size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). * Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix unexpected sizes of `LowCardinality` columns in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). * Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)). -* Fix the VALID UNTIL clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). -* Fix SHOW MERGES remaining time. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix the `VALID UNTIL` clause in the user definition resetting after a restart. [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix the remaining time column in `SHOW MERGES`. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). -* Fixed crash while using MaterializedMySQL with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)). -* Fix logical error when PREWHERE expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)). -* Fix bug with cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)). +* Fixed crash while using `MaterializedMySQL` (which is an unsupported, experimental feature) with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)). +* Fix logical error when `PREWHERE` expression read no columns and table has no adaptive index granularity (very old table). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)). +* Fix bug with the cancellation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)). * Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)). * Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)). * Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)). @@ -178,9 +178,6 @@ * Fix `indexHint` function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)). * Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)). -#### Build/Testing/Packaging Improvement -* Instantiate template methods ahead in different .cpp files, avoid too large translation units during compiling. [#64818](https://github.com/ClickHouse/ClickHouse/pull/64818) ([lgbo](https://github.com/lgbo-ustc)). - ### ClickHouse release 24.6, 2024-07-01 #### Backward Incompatible Change From 3a7ffb3284003d853974baf12cf442bdc1105143 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 05:19:10 +0200 Subject: [PATCH 1190/2361] Changelog sanity --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06f7bcdd84e..9d1a63cb3a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -156,7 +156,7 @@ * Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)). * Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)). -* Prevent watchdog from keeping descriptors of unlinked(rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Prevent watchdog from keeping descriptors of unlinked (rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)). * Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)). * Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)). From 368b9a058379c2e7902fd9ee7a21b664f0500df9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 05:29:36 +0200 Subject: [PATCH 1191/2361] Changelog sanity --- CHANGELOG.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d1a63cb3a9..722ae4f8268 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,11 +39,11 @@ * Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)). #### Experimental Feature -* Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)). -* Support rocksdb as backend storage of clickhouse-keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)). +* Change binary serialization of the `Variant` data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)). +* Support on-disk backend storage for clickhouse-keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)). * Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)). -* Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)). -* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)). +* Support null map subcolumn for `Variant` and `Dynamic` subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix reading `Dynamic` subcolumns from altered `Memory` table. Previously if `max_types` parameter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)). * Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)). #### Performance Improvement From 4df37538820f4874f54852e2418cf560c9da9ecc Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 30 Jul 2024 03:50:28 +0000 Subject: [PATCH 1192/2361] Fix bloom filter index breaking some queries --- .../MergeTree/MergeTreeIndexBloomFilter.cpp | 115 ++++++++++-------- .../00908_bloom_filter_index.reference | 2 + .../0_stateless/00908_bloom_filter_index.sh | 4 + .../00945_bloom_filter_index.reference | 5 + .../0_stateless/00945_bloom_filter_index.sql | 9 ++ 5 files changed, 83 insertions(+), 52 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index c6a00751f25..dc314ce53d4 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -371,67 +371,78 @@ bool MergeTreeIndexConditionBloomFilter::extractAtomFromTree(const RPNBuilderTre bool MergeTreeIndexConditionBloomFilter::traverseFunction(const RPNBuilderTreeNode & node, RPNElement & out, const RPNBuilderTreeNode * parent) { - bool maybe_useful = false; + if (!node.isFunction()) + return false; - if (node.isFunction()) + const auto function = node.toFunctionNode(); + auto arguments_size = function.getArgumentsSize(); + auto function_name = function.getFunctionName(); + + if (parent == nullptr) { - const auto function = node.toFunctionNode(); - auto arguments_size = function.getArgumentsSize(); - auto function_name = function.getFunctionName(); - + /// Recurse a little bit for indexOf(). for (size_t i = 0; i < arguments_size; ++i) { auto argument = function.getArgumentAt(i); if (traverseFunction(argument, out, &node)) - maybe_useful = true; - } - - if (arguments_size != 2) - return false; - - auto lhs_argument = function.getArgumentAt(0); - auto rhs_argument = function.getArgumentAt(1); - - if (functionIsInOrGlobalInOperator(function_name)) - { - if (auto future_set = rhs_argument.tryGetPreparedSet(); future_set) - { - if (auto prepared_set = future_set->buildOrderedSetInplace(rhs_argument.getTreeContext().getQueryContext()); prepared_set) - { - if (prepared_set->hasExplicitSetElements()) - { - const auto prepared_info = getPreparedSetInfo(prepared_set); - if (traverseTreeIn(function_name, lhs_argument, prepared_set, prepared_info.type, prepared_info.column, out)) - maybe_useful = true; - } - } - } - } - else if (function_name == "equals" || - function_name == "notEquals" || - function_name == "has" || - function_name == "mapContains" || - function_name == "indexOf" || - function_name == "hasAny" || - function_name == "hasAll") - { - Field const_value; - DataTypePtr const_type; - - if (rhs_argument.tryGetConstant(const_value, const_type)) - { - if (traverseTreeEquals(function_name, lhs_argument, const_type, const_value, out, parent)) - maybe_useful = true; - } - else if (lhs_argument.tryGetConstant(const_value, const_type)) - { - if (traverseTreeEquals(function_name, rhs_argument, const_type, const_value, out, parent)) - maybe_useful = true; - } + return true; } } - return maybe_useful; + if (arguments_size != 2) + return false; + + /// indexOf() should be inside comparison function, e.g. greater(indexOf(key, 42), 0). + /// Other conditions should be at top level, e.g. equals(key, 42), not equals(equals(key, 42), 1). + if ((function_name == "indexOf") != (parent != nullptr)) + return false; + + auto lhs_argument = function.getArgumentAt(0); + auto rhs_argument = function.getArgumentAt(1); + + if (functionIsInOrGlobalInOperator(function_name)) + { + if (auto future_set = rhs_argument.tryGetPreparedSet(); future_set) + { + if (auto prepared_set = future_set->buildOrderedSetInplace(rhs_argument.getTreeContext().getQueryContext()); prepared_set) + { + if (prepared_set->hasExplicitSetElements()) + { + const auto prepared_info = getPreparedSetInfo(prepared_set); + if (traverseTreeIn(function_name, lhs_argument, prepared_set, prepared_info.type, prepared_info.column, out)) + return true; + } + } + } + return false; + } + + if (function_name == "equals" || + function_name == "notEquals" || + function_name == "has" || + function_name == "mapContains" || + function_name == "indexOf" || + function_name == "hasAny" || + function_name == "hasAll") + { + Field const_value; + DataTypePtr const_type; + + if (rhs_argument.tryGetConstant(const_value, const_type)) + { + if (traverseTreeEquals(function_name, lhs_argument, const_type, const_value, out, parent)) + return true; + } + else if (lhs_argument.tryGetConstant(const_value, const_type) && (function_name == "equals" || function_name == "notEquals")) + { + if (traverseTreeEquals(function_name, rhs_argument, const_type, const_value, out, parent)) + return true; + } + + return false; + } + + return false; } bool MergeTreeIndexConditionBloomFilter::traverseTreeIn( diff --git a/tests/queries/0_stateless/00908_bloom_filter_index.reference b/tests/queries/0_stateless/00908_bloom_filter_index.reference index c0cbd6c0335..e2e13a9ed12 100644 --- a/tests/queries/0_stateless/00908_bloom_filter_index.reference +++ b/tests/queries/0_stateless/00908_bloom_filter_index.reference @@ -28,6 +28,8 @@ "rows_read": 3, 8 aбвгдеёж "rows_read": 2, +13 +1 1 column-oriented 2 column-oriented "rows_read": 4, diff --git a/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh index 88fc7944236..25a6567b894 100755 --- a/tests/queries/0_stateless/00908_bloom_filter_index.sh +++ b/tests/queries/0_stateless/00908_bloom_filter_index.sh @@ -103,6 +103,10 @@ $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filte $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE (s, lower(s)) IN (('aбвгдеёж', 'aбвгдеёж'), ('abc', 'cba')) ORDER BY k" $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT * FROM bloom_filter_idx WHERE (s, lower(s)) IN (('aбвгдеёж', 'aбвгдеёж'), ('abc', 'cba')) ORDER BY k FORMAT JSON" | grep "rows_read" +# Weird conditions not supported by the index. +$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT count() FROM bloom_filter_idx WHERE (s = 'asd') = (s = 'asd')" +$CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT count() FROM bloom_filter_idx WHERE has(['asd', 'some string'], s)" + # TOKEN BF $CLICKHOUSE_CLIENT -n --query=" diff --git a/tests/queries/0_stateless/00945_bloom_filter_index.reference b/tests/queries/0_stateless/00945_bloom_filter_index.reference index c0c2254648e..e6751fe4762 100644 --- a/tests/queries/0_stateless/00945_bloom_filter_index.reference +++ b/tests/queries/0_stateless/00945_bloom_filter_index.reference @@ -14,6 +14,11 @@ 0 2 2 +18 +100 +100 +3 +100 1 1 1 diff --git a/tests/queries/0_stateless/00945_bloom_filter_index.sql b/tests/queries/0_stateless/00945_bloom_filter_index.sql index 4c26988574a..2b7feacbd98 100644 --- a/tests/queries/0_stateless/00945_bloom_filter_index.sql +++ b/tests/queries/0_stateless/00945_bloom_filter_index.sql @@ -25,6 +25,15 @@ WITH ((1, 2), (2, 3)) AS liter_prepared_set SELECT COUNT() FROM single_column_bl WITH ((1, 1), (2, 2)) AS liter_prepared_set SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i64) IN liter_prepared_set SETTINGS max_rows_to_read = 6; WITH ((1, (1, 1)), (2, (2, 2))) AS liter_prepared_set SELECT COUNT() FROM single_column_bloom_filter WHERE (i64, (i64, i32)) IN liter_prepared_set SETTINGS max_rows_to_read = 6; +-- Check that indexHint() works (but it doesn't work with COUNT()). +SELECT SUM(ignore(*) + 1) FROM single_column_bloom_filter WHERE indexHint(i32 in (3, 15, 50)); + +-- The index doesn't understand expressions like these, but it shouldn't break the query. +SELECT COUNT() FROM single_column_bloom_filter WHERE (i32 = 200) = (i32 = 200); +SELECT SUM(ignore(*) + 1) FROM single_column_bloom_filter WHERE indexHint((i32 = 200) != (i32 = 200)); +SELECT COUNT() FROM single_column_bloom_filter WHERE indexOf([10, 20, 30], i32) != 0; +SELECT COUNT() FROM single_column_bloom_filter WHERE has([100, 200, 300], 200); + DROP TABLE IF EXISTS single_column_bloom_filter; From d89019293e955452ede3e0abbe4b11ab2a3471bb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 06:15:13 +0200 Subject: [PATCH 1193/2361] Update tests --- tests/queries/0_stateless/01603_read_with_backoff_bug.sql | 4 ++-- tests/queries/0_stateless/02700_s3_part_INT_MAX.sh | 2 +- tests/queries/1_stateful/00157_cache_dictionary.sql | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql index 85be5082d92..f41b336be46 100644 --- a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql +++ b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql @@ -1,5 +1,5 @@ --- Tags: no-tsan, long --- Tag no-tsan: Too long for TSan +-- Tags: no-tsan, no-msan, long +-- too long. set enable_filesystem_cache=0; set enable_filesystem_cache_on_write_operations=0; diff --git a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh index c431686b594..cfb38c60615 100755 --- a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh +++ b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh @@ -12,7 +12,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # too slow with this. # # Unfortunately, the test has to buffer it in memory. -$CLICKHOUSE_CLIENT --max_memory_usage 10G -nm -q " +$CLICKHOUSE_CLIENT --max_memory_usage 16G -nm -q " INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV') SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024) SETTINGS s3_max_single_part_upload_size = '5Gi'; diff --git a/tests/queries/1_stateful/00157_cache_dictionary.sql b/tests/queries/1_stateful/00157_cache_dictionary.sql index 3621ff82126..a7c6c099de6 100644 --- a/tests/queries/1_stateful/00157_cache_dictionary.sql +++ b/tests/queries/1_stateful/00157_cache_dictionary.sql @@ -9,7 +9,7 @@ ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS storage_policy = 'default'; -INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000; +INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000 SETTINGS min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; CREATE DATABASE IF NOT EXISTS db_dict; DROP DICTIONARY IF EXISTS db_dict.cache_hits; From f90b88c978d87225a5cf5f66136714ff5535d69c Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Tue, 30 Jul 2024 05:57:07 +0000 Subject: [PATCH 1194/2361] Reduced comprexity of the test --- ...2832_alter_max_sessions_for_user.reference | 7 +-- .../02832_alter_max_sessions_for_user.sh | 62 ++++++++----------- 2 files changed, 30 insertions(+), 39 deletions(-) diff --git a/tests/queries/0_stateless/02832_alter_max_sessions_for_user.reference b/tests/queries/0_stateless/02832_alter_max_sessions_for_user.reference index f80f8738ff8..c2e103d61cb 100644 --- a/tests/queries/0_stateless/02832_alter_max_sessions_for_user.reference +++ b/tests/queries/0_stateless/02832_alter_max_sessions_for_user.reference @@ -1,8 +1,7 @@ -test_alter_profile case: max_session_count 1 alter_sessions_count 1 -test_alter_profile case: max_session_count 2 alter_sessions_count 1 +test_alter_profile case: max_sessions_for_user 1 +USER_SESSION_LIMIT_EXCEEDED +test_alter_profile case: max_sessions_for_user 2 USER_SESSION_LIMIT_EXCEEDED -test_alter_profile case: max_session_count 1 alter_sessions_count 2 -test_alter_profile case: max_session_count 2 alter_sessions_count 2 READONLY READONLY READONLY diff --git a/tests/queries/0_stateless/02832_alter_max_sessions_for_user.sh b/tests/queries/0_stateless/02832_alter_max_sessions_for_user.sh index 87fbffdb1e6..55f9e3e97a4 100755 --- a/tests/queries/0_stateless/02832_alter_max_sessions_for_user.sh +++ b/tests/queries/0_stateless/02832_alter_max_sessions_for_user.sh @@ -5,7 +5,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -SESSION_ID_PREFIX="02832_alter_max_sessions_session_$$" QUERY_ID_PREFIX="02832_alter_max_sessions_query_$$" PROFILE="02832_alter_max_sessions_profile_$$" USER="02832_alter_max_sessions_user_$$" @@ -17,48 +16,41 @@ ${CLICKHOUSE_CLIENT} -q $"DROP PROFILE IF EXISTS ${PROFILE}" ${CLICKHOUSE_CLIENT} -q $"CREATE SETTINGS PROFILE ${PROFILE}" ${CLICKHOUSE_CLIENT} -q $"CREATE USER '${USER}' SETTINGS PROFILE '${PROFILE}'" -function run_sessions_set() +function wait_for_query_to_start() { - local sessions_count="$1" - local session_check="$2" - for ((i = 1 ; i <= ${sessions_count} ; i++)); do - local session_id="${SESSION_ID_PREFIX}_${i}" - local query_id="${QUERY_ID_PREFIX}_${i}" - # Write only expected error text - # More than alter_sessions_count queries will not start. - ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=${USER}&query_id=${query_id}&session_id=${session_id}&session_check=${session_check}&session_timeout=600&function_sleep_max_microseconds_per_block=120000000" --data-binary "SELECT sleep(120)" | grep -o -m 1 'USER_SESSION_LIMIT_EXCEEDED' & + while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE query_id = '$1'") == 0 ]]; do sleep 0.2; done +} + +function test_alter_max_sessions_for_user() +{ + local max_sessions_for_user="$1" + echo $"test_alter_profile case: max_sessions_for_user ${max_sessions_for_user}" + + # Step 0: Set max_sessions_for_user. + ${CLICKHOUSE_CLIENT} -q $"ALTER SETTINGS PROFILE ${PROFILE} SETTINGS max_sessions_for_user = ${max_sessions_for_user}" + + # Step 1: Simulaneously run `max_sessions_for_user` queries. These queries should run without any problems. + for ((i = 1 ; i <= max_sessions_for_user ; i++)); do + local query_id="${QUERY_ID_PREFIX}_${i}_${max_sessions_for_user}" + ${CLICKHOUSE_CLIENT} --max_block_size 1 --query_id $query_id --user $USER --function_sleep_max_microseconds_per_block=120000000 -q "SELECT sleepEachRow(0.1) FROM numbers(1200)" &>/dev/null & + wait_for_query_to_start $query_id done - for ((i = 1 ; i <= ${sessions_count} ; i++)); do - local query_id="${QUERY_ID_PREFIX}_${i}" - $CLICKHOUSE_CLIENT --query "KILL QUERY WHERE query_id='$query_id' SYNC" >/dev/null + # Step 2: Run another `max_sessions_for_user` + 1 query. That query should fail. + local query_id="${QUERY_ID_PREFIX}_should_fail" + ${CLICKHOUSE_CLIENT} --query_id $query_id --user $USER -q "SELECT 1" 2>&1 | grep -o -m 1 'USER_SESSION_LIMIT_EXCEEDED' + + # Step 3: Stop running queries launched at step 1. + for ((i = 1 ; i <= max_sessions_for_user ; i++)); do + local query_id="${QUERY_ID_PREFIX}_${i}_${max_sessions_for_user}" + $CLICKHOUSE_CLIENT --query "KILL QUERY WHERE query_id='$query_id' ASYNC" >/dev/null done wait } -function test_alter_profile() -{ - local max_session_count="$1" - local alter_sessions_count="$2" - echo $"test_alter_profile case: max_session_count ${max_session_count} alter_sessions_count ${alter_sessions_count}" - - ${CLICKHOUSE_CLIENT} -q $"ALTER SETTINGS PROFILE ${PROFILE} SETTINGS max_sessions_for_user = ${max_session_count}" - - # Create sessions with $max_session_count restriction - run_sessions_set $max_session_count 0 - - # Update restriction to $alter_sessions_count - ${CLICKHOUSE_CLIENT} -q $"ALTER SETTINGS PROFILE ${PROFILE} SETTINGS max_sessions_for_user = ${alter_sessions_count}" - - # Simultaneous sessions should use max settings from profile ($alter_sessions_count) - run_sessions_set $max_session_count 1 -} - -test_alter_profile 1 1 -test_alter_profile 2 1 -test_alter_profile 1 2 -test_alter_profile 2 2 +test_alter_max_sessions_for_user 1 +test_alter_max_sessions_for_user 2 ${CLICKHOUSE_CLIENT} -q "SELECT 1 SETTINGS max_sessions_for_user = 1" 2>&1 | grep -m 1 -o 'READONLY' | head -1 ${CLICKHOUSE_CLIENT} -q $"SET max_sessions_for_user = 1 " 2>&1 | grep -o -m 1 'READONLY' | head -1 From b0e6b3e88930d3ca493dddb688235c64cec1d893 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 30 Jul 2024 06:30:12 +0000 Subject: [PATCH 1195/2361] Kick off CI build From 7a53a14940ae1be299305548f0d024de7f279fe3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 09:29:16 +0200 Subject: [PATCH 1196/2361] Update 03213_deep_json.sql --- tests/queries/0_stateless/03213_deep_json.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03213_deep_json.sql b/tests/queries/0_stateless/03213_deep_json.sql index 4f79d99eb26..2a9476381ff 100644 --- a/tests/queries/0_stateless/03213_deep_json.sql +++ b/tests/queries/0_stateless/03213_deep_json.sql @@ -1,5 +1,5 @@ -- The default limit works. -SELECT * FROM format("JSONCompactEachRow", 'x UInt32, y UInt32', REPEAT('[1,1,', 100000)) SETTINGS input_format_json_compact_allow_variable_number_of_columns = 1; -- { serverError TOO_DEEP_RECURSION } +SELECT * FROM format("JSONCompactEachRow", 'x UInt32, y UInt32', REPEAT('[1,1,', 100000)) SETTINGS input_format_json_compact_allow_variable_number_of_columns = 1; -- { serverError TOO_DEEP_RECURSION, INCORRECT_DATA } -- Even if we relax the limit, it is also safe. SET input_format_json_max_depth = 100000; -SELECT * FROM format("JSONCompactEachRow", 'x UInt32, y UInt32', REPEAT('[1,1,', 100000)) SETTINGS input_format_json_compact_allow_variable_number_of_columns = 1; -- { serverError TOO_DEEP_RECURSION } +SELECT * FROM format("JSONCompactEachRow", 'x UInt32, y UInt32', REPEAT('[1,1,', 100000)) SETTINGS input_format_json_compact_allow_variable_number_of_columns = 1; -- { serverError TOO_DEEP_RECURSION, INCORRECT_DATA } From 56ba7c5d48cfa648f1d496d55cdfd50450da0299 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 09:33:17 +0200 Subject: [PATCH 1197/2361] Update a test --- tests/queries/0_stateless/00632_get_sample_block_cache.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00632_get_sample_block_cache.sql b/tests/queries/0_stateless/00632_get_sample_block_cache.sql index 6a226c4912a..a631cbb8b86 100644 --- a/tests/queries/0_stateless/00632_get_sample_block_cache.sql +++ b/tests/queries/0_stateless/00632_get_sample_block_cache.sql @@ -57,7 +57,7 @@ DROP TABLE video_views; -- Test for tsan: Ensure cache is used from one thread -SET max_threads = 32; +SET max_threads = 32, max_memory_usage = '10G'; DROP TABLE IF EXISTS sample_00632; From 6ab67323862391e520ac571f609f52b582248da5 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 30 Jul 2024 07:36:38 +0000 Subject: [PATCH 1198/2361] Delete .reference --- ...2_refreshable_materialized_views.reference | 44 ------------------- 1 file changed, 44 deletions(-) delete mode 100644 tests/queries/0_stateless/02932_refreshable_materialized_views.reference diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference deleted file mode 100644 index 2eb41590af1..00000000000 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference +++ /dev/null @@ -1,44 +0,0 @@ -<1: created view> a [] 1 -CREATE MATERIALIZED VIEW default.a\nREFRESH AFTER 2 SECOND\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT number AS x\nFROM numbers(2)\nUNION ALL\nSELECT rand64() AS x -<2: refreshed> 3 1 1 -<3: time difference at least> 1000 -<4: next refresh in> 2 -<4.5: altered> Scheduled Finished 2052-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT x * 2 AS x\nFROM default.src -<5: no refresh> 3 -<6: refreshed> 2 -<7: refreshed> Scheduled Finished 2054-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a -<8: refreshed> 20 -<9: refreshed> a Scheduled Finished 2054-01-01 00:00:00 -<9: refreshed> b Scheduled Finished 2054-01-01 00:00:00 -<10: waiting> a Scheduled [] 2054-01-01 00:00:00 -<10: waiting> b WaitingForDependencies ['default.a'] 2054-01-01 00:00:00 -<11: chain-refreshed a> 4 -<12: chain-refreshed b> 40 -<13: chain-refreshed> a Scheduled [] Finished 2054-01-01 00:00:01 2056-01-01 00:00:00 -<13: chain-refreshed> b Scheduled ['default.a'] Finished 2054-01-24 23:22:21 2056-01-01 00:00:00 -<14: waiting for next cycle> a Scheduled [] 2058-01-01 00:00:00 -<14: waiting for next cycle> b WaitingForDependencies ['default.a'] 2060-01-01 00:00:00 -<15: chain-refreshed a> 6 -<16: chain-refreshed b> 60 -<17: chain-refreshed> a Scheduled 2062-01-01 00:00:00 -<17: chain-refreshed> b Scheduled 2062-01-01 00:00:00 -<18: removed dependency> b Scheduled [] 2062-03-03 03:03:03 2064-01-01 00:00:00 5 -CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a -<19: exception> 1 -<20: unexception> 1 -<21: rename> 1 -<22: rename> d Finished -<23: simple refresh> 1 -<24: rename during refresh> 1 -<25: rename during refresh> f Running -<27: cancelled> f Scheduled -CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 -<29: randomize> 1 1 -CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src -<30: to existing table> 10 -<31: to existing table> 10 -<31: to existing table> 20 -<32: empty> i Scheduled Unknown -<32: empty> j Scheduled Finished From 861bdb51f8e098c4b06cb14988e00febbb5a0ac7 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 09:42:38 +0200 Subject: [PATCH 1199/2361] Fix test --- .../test_replicated_table_attach/test.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/integration/test_replicated_table_attach/test.py b/tests/integration/test_replicated_table_attach/test.py index 02ef1ad6353..499220def2c 100644 --- a/tests/integration/test_replicated_table_attach/test.py +++ b/tests/integration/test_replicated_table_attach/test.py @@ -27,7 +27,20 @@ def started_cluster(): cluster.shutdown() +def start_clean_clickhouse(): + # remove fault injection if present + if "fault_injection.xml" in node.exec_in_container( + ["bash", "-c", "ls /etc/clickhouse-server/config.d"] + ): + print("Removing fault injection") + node.exec_in_container( + ["bash", "-c", "rm /etc/clickhouse-server/config.d/fault_injection.xml"] + ) + node.restart_clickhouse() + + def test_startup_with_small_bg_pool(started_cluster): + start_clean_clickhouse() node.query( "CREATE TABLE replicated_table (k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/replicated_table', 'r1') ORDER BY k" ) @@ -45,6 +58,7 @@ def test_startup_with_small_bg_pool(started_cluster): def test_startup_with_small_bg_pool_partitioned(started_cluster): + start_clean_clickhouse() node.query( "CREATE TABLE replicated_table_partitioned (k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/replicated_table_partitioned', 'r1') ORDER BY k" ) From cd036886ec9ba030da7b6b6151bb81e5cc3f7636 Mon Sep 17 00:00:00 2001 From: heguangnan Date: Tue, 30 Jul 2024 15:53:48 +0800 Subject: [PATCH 1200/2361] add test --- ...unt_distinct_null_key_memory_leak.reference | 0 ...214_count_distinct_null_key_memory_leak.sql | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.reference create mode 100644 tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql diff --git a/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.reference b/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql b/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql new file mode 100644 index 00000000000..847d3742dc3 --- /dev/null +++ b/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS testnull; +CREATE TABLE testnull +( + `a` Nullable(String), + `b` Nullable(String), + `c` Nullable(String) +) +ENGINE = MergeTree +PARTITION BY tuple() +ORDER BY c +SETTINGS index_granularity = 8192, allow_nullable_key=1; + +INSERT INTO testnull(b,c) SELECT toString(rand64()) AS b, toString(rand64()) AS c FROM numbers(1000000) +SELECT count(distinct b) FROM testnull GROUP BY a SETTINGS max_memory_usage = 54748364; -- {serverError MEMORY_LIMIT_EXCEEDED} + +DROP TABLE testnull; \ No newline at end of file From a70571762f7d73a7ecc94981e8086418ecfdeb3b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 10:08:35 +0200 Subject: [PATCH 1201/2361] Enable text_log by default --- programs/server/config.xml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/programs/server/config.xml b/programs/server/config.xml index 94825a55f67..844aff8f668 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -1130,8 +1130,7 @@ 7500 - + system part_log
@@ -1143,9 +1142,9 @@ false
- system text_log
@@ -1154,9 +1153,8 @@ 8192 524288 false - + trace
- --> From c427c4e2bba852f6f8f9b9346a9a2d0a09f0e4be Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 30 Jul 2024 10:34:47 +0200 Subject: [PATCH 1202/2361] Typo --- src/Interpreters/DatabaseCatalog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 98526e5c1cd..a8e5fd7e6aa 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1355,7 +1355,7 @@ void DatabaseCatalog::dropTableDataTask() } catch (...) { - /// We don't re-throw expection, because we are in a background pool. + /// We don't re-throw exception, because we are in a background pool. tryLogCurrentException(log, "Cannot drop tables. Will retry later."); } } From 3e6a1b99e023eb3d592c72c17ae4913a9074b5af Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 11:05:45 +0200 Subject: [PATCH 1203/2361] Fix file/URI parsing with archive syntax --- src/IO/Archives/ArchiveUtils.cpp | 50 +++++++++++++++++++ src/IO/Archives/ArchiveUtils.h | 14 ++++++ src/IO/Archives/createArchiveReader.cpp | 13 ++--- src/IO/Archives/createArchiveWriter.cpp | 9 ++-- src/IO/S3/URI.cpp | 39 +++++++-------- src/IO/S3/URI.h | 3 +- src/Storages/StorageFile.cpp | 8 ++- src/TableFunctions/TableFunctionFile.cpp | 9 ++-- .../03214_parsing_archive_name_file.reference | 12 +++++ .../03214_parsing_archive_name_file.sh | 21 ++++++++ .../03215_parsing_archive_name_s3.reference | 2 + .../03215_parsing_archive_name_s3.sql | 6 +++ .../data_minio/::03215_archive.csv | 1 + .../data_minio/test :: 03215_archive.csv | 1 + .../data_minio/test::03215_archive.csv | 1 + 15 files changed, 145 insertions(+), 44 deletions(-) create mode 100644 src/IO/Archives/ArchiveUtils.cpp create mode 100644 tests/queries/0_stateless/03214_parsing_archive_name_file.reference create mode 100755 tests/queries/0_stateless/03214_parsing_archive_name_file.sh create mode 100644 tests/queries/0_stateless/03215_parsing_archive_name_s3.reference create mode 100644 tests/queries/0_stateless/03215_parsing_archive_name_s3.sql create mode 100644 tests/queries/0_stateless/data_minio/::03215_archive.csv create mode 100644 tests/queries/0_stateless/data_minio/test :: 03215_archive.csv create mode 100644 tests/queries/0_stateless/data_minio/test::03215_archive.csv diff --git a/src/IO/Archives/ArchiveUtils.cpp b/src/IO/Archives/ArchiveUtils.cpp new file mode 100644 index 00000000000..50009087de3 --- /dev/null +++ b/src/IO/Archives/ArchiveUtils.cpp @@ -0,0 +1,50 @@ +#include + +#include +#include + +namespace DB +{ + +namespace +{ + +using namespace std::literals; +constexpr std::array tar_extensions{".tar"sv, ".tar.gz"sv, ".tgz"sv, ".tar.zst"sv, ".tzst"sv, ".tar.xz"sv, ".tar.bz2"sv, ".tar.lzma"sv}; +constexpr std::array zip_extensions{".zip"sv, ".zipx"sv}; +constexpr std::array sevenz_extensiosns{".7z"sv}; + +bool hasSupportedExtension(std::string_view path, const auto & supported_extensions) +{ + for (auto supported_extension : supported_extensions) + { + if (path.ends_with(supported_extension)) + return true; + } + + return false; +} + +} + +bool hasSupportedTarExtension(std::string_view path) +{ + return hasSupportedExtension(path, tar_extensions); +} + +bool hasSupportedZipExtension(std::string_view path) +{ + return hasSupportedExtension(path, zip_extensions); +} + +bool hasSupported7zExtension(std::string_view path) +{ + return hasSupportedExtension(path, sevenz_extensiosns); +} + +bool hasSupportedArchiveExtension(std::string_view path) +{ + return hasSupportedTarExtension(path) || hasSupportedZipExtension(path) || hasSupported7zExtension(path); +} + +} diff --git a/src/IO/Archives/ArchiveUtils.h b/src/IO/Archives/ArchiveUtils.h index 1b66be005a2..cdb731d1d57 100644 --- a/src/IO/Archives/ArchiveUtils.h +++ b/src/IO/Archives/ArchiveUtils.h @@ -10,3 +10,17 @@ #include #include #endif + +#include + +namespace DB +{ + +bool hasSupportedTarExtension(std::string_view path); +bool hasSupportedZipExtension(std::string_view path); +bool hasSupported7zExtension(std::string_view path); + +bool hasSupportedArchiveExtension(std::string_view path); + + +} diff --git a/src/IO/Archives/createArchiveReader.cpp b/src/IO/Archives/createArchiveReader.cpp index 782602091ac..dfa098eede0 100644 --- a/src/IO/Archives/createArchiveReader.cpp +++ b/src/IO/Archives/createArchiveReader.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include @@ -12,7 +13,6 @@ extern const int CANNOT_UNPACK_ARCHIVE; extern const int SUPPORT_IS_DISABLED; } - std::shared_ptr createArchiveReader(const String & path_to_archive) { return createArchiveReader(path_to_archive, {}, 0); @@ -24,11 +24,7 @@ std::shared_ptr createArchiveReader( [[maybe_unused]] const std::function()> & archive_read_function, [[maybe_unused]] size_t archive_size) { - using namespace std::literals; - static constexpr std::array tar_extensions{ - ".tar"sv, ".tar.gz"sv, ".tgz"sv, ".tar.zst"sv, ".tzst"sv, ".tar.xz"sv, ".tar.bz2"sv, ".tar.lzma"sv}; - - if (path_to_archive.ends_with(".zip") || path_to_archive.ends_with(".zipx")) + if (hasSupportedZipExtension(path_to_archive)) { #if USE_MINIZIP return std::make_shared(path_to_archive, archive_read_function, archive_size); @@ -36,8 +32,7 @@ std::shared_ptr createArchiveReader( throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "minizip library is disabled"); #endif } - else if (std::any_of( - tar_extensions.begin(), tar_extensions.end(), [&](const auto extension) { return path_to_archive.ends_with(extension); })) + else if (hasSupportedTarExtension(path_to_archive)) { #if USE_LIBARCHIVE return std::make_shared(path_to_archive, archive_read_function); @@ -45,7 +40,7 @@ std::shared_ptr createArchiveReader( throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "libarchive library is disabled"); #endif } - else if (path_to_archive.ends_with(".7z")) + else if (hasSupported7zExtension(path_to_archive)) { #if USE_LIBARCHIVE return std::make_shared(path_to_archive); diff --git a/src/IO/Archives/createArchiveWriter.cpp b/src/IO/Archives/createArchiveWriter.cpp index 9a169587088..53be0a85a10 100644 --- a/src/IO/Archives/createArchiveWriter.cpp +++ b/src/IO/Archives/createArchiveWriter.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -24,10 +25,7 @@ std::shared_ptr createArchiveWriter(const String & path_to_archi std::shared_ptr createArchiveWriter(const String & path_to_archive, [[maybe_unused]] std::unique_ptr archive_write_buffer) { - using namespace std::literals; - static constexpr std::array tar_extensions{ - ".tar"sv, ".tar.gz"sv, ".tgz"sv, ".tar.bz2"sv, ".tar.lzma"sv, ".tar.zst"sv, ".tzst"sv, ".tar.xz"sv}; - if (path_to_archive.ends_with(".zip") || path_to_archive.ends_with(".zipx")) + if (hasSupportedZipExtension(path_to_archive)) { #if USE_MINIZIP return std::make_shared(path_to_archive, std::move(archive_write_buffer)); @@ -35,8 +33,7 @@ createArchiveWriter(const String & path_to_archive, [[maybe_unused]] std::unique throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "minizip library is disabled"); #endif } - else if (std::any_of( - tar_extensions.begin(), tar_extensions.end(), [&](const auto extension) { return path_to_archive.ends_with(extension); })) + else if (hasSupportedTarExtension(path_to_archive)) { #if USE_LIBARCHIVE return std::make_shared(path_to_archive, std::move(archive_write_buffer)); diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index 4bf7a3ddf86..b9c400d2b98 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -54,10 +55,7 @@ URI::URI(const std::string & uri_) static constexpr auto OSS = "OSS"; static constexpr auto EOS = "EOS"; - if (containsArchive(uri_)) - std::tie(uri_str, archive_pattern) = getPathToArchiveAndArchivePattern(uri_); - else - uri_str = uri_; + std::tie(uri_str, archive_pattern) = getURIAndArchivePattern(uri_); uri = Poco::URI(uri_str); std::unordered_map mapper; @@ -167,32 +165,29 @@ void URI::validateBucket(const String & bucket, const Poco::URI & uri) !uri.empty() ? " (" + uri.toString() + ")" : ""); } -bool URI::containsArchive(const std::string & source) +std::pair> URI::getURIAndArchivePattern(const std::string & source) { size_t pos = source.find("::"); - return (pos != std::string::npos); -} + if (pos == String::npos) + return {source, std::nullopt}; -std::pair URI::getPathToArchiveAndArchivePattern(const std::string & source) -{ - size_t pos = source.find("::"); - assert(pos != std::string::npos); + std::string_view path_to_archive_view = std::string_view{source}.substr(0, pos); + while (path_to_archive_view.ends_with(' ')) + path_to_archive_view.remove_suffix(1); - std::string path_to_archive = source.substr(0, pos); - while ((!path_to_archive.empty()) && path_to_archive.ends_with(' ')) - path_to_archive.pop_back(); + if (path_to_archive_view.empty() || !hasSupportedArchiveExtension(path_to_archive_view)) + return {source, std::nullopt}; - if (path_to_archive.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path to archive is empty"); + auto archive_uri = path_to_archive_view; - std::string_view path_in_archive_view = std::string_view{source}.substr(pos + 2); - while (path_in_archive_view.front() == ' ') - path_in_archive_view.remove_prefix(1); + std::string_view archive_pattern_view = std::string_view{source}.substr(pos + 2); + while (archive_pattern_view.front() == ' ') + archive_pattern_view.remove_prefix(1); - if (path_in_archive_view.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filename is empty"); + if (archive_pattern_view.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Archive pattern is empty"); - return {path_to_archive, std::string{path_in_archive_view}}; + return std::pair{std::string{archive_uri}, std::string{archive_pattern_view}}; } } diff --git a/src/IO/S3/URI.h b/src/IO/S3/URI.h index 363f98c46f5..e4bb0d9eae1 100644 --- a/src/IO/S3/URI.h +++ b/src/IO/S3/URI.h @@ -42,8 +42,7 @@ struct URI static void validateBucket(const std::string & bucket, const Poco::URI & uri); private: - bool containsArchive(const std::string & source); - std::pair getPathToArchiveAndArchivePattern(const std::string & source); + std::pair> getURIAndArchivePattern(const std::string & source); }; } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 98cd5c4dfa9..de56fcf66a0 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -2247,8 +2248,11 @@ void StorageFile::parseFileSource(String source, String & filename, String & pat while (path_to_archive_view.ends_with(' ')) path_to_archive_view.remove_suffix(1); - if (path_to_archive_view.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path to archive is empty"); + if (path_to_archive_view.empty() || !hasSupportedArchiveExtension(path_to_archive_view)) + { + filename = std::move(source); + return; + } path_to_archive = path_to_archive_view; diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index 1b6d86f8fa5..12b88ae2b14 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -63,9 +63,12 @@ std::optional TableFunctionFile::tryGetFormatFromFirstArgument() return FormatFactory::instance().tryGetFormatFromFileName(filename); } -StoragePtr TableFunctionFile::getStorage(const String & source, - const String & format_, const ColumnsDescription & columns, - ContextPtr global_context, const std::string & table_name, +StoragePtr TableFunctionFile::getStorage( + const String & source, + const String & format_, + const ColumnsDescription & columns, + ContextPtr global_context, + const std::string & table_name, const std::string & compression_method_) const { // For `file` table function, we are going to use format settings from the diff --git a/tests/queries/0_stateless/03214_parsing_archive_name_file.reference b/tests/queries/0_stateless/03214_parsing_archive_name_file.reference new file mode 100644 index 00000000000..243a7c8fd02 --- /dev/null +++ b/tests/queries/0_stateless/03214_parsing_archive_name_file.reference @@ -0,0 +1,12 @@ +::nonexistentfile.csv +1 +nonexistent::nonexistentfile.csv +1 +nonexistent :: nonexistentfile.csv +1 +nonexistent ::nonexistentfile.csv +1 +nonexistent.tar.gz :: nonexistentfile.csv +1 +nonexistent.zip:: nonexistentfile.csv +1 diff --git a/tests/queries/0_stateless/03214_parsing_archive_name_file.sh b/tests/queries/0_stateless/03214_parsing_archive_name_file.sh new file mode 100755 index 00000000000..32bf3246c84 --- /dev/null +++ b/tests/queries/0_stateless/03214_parsing_archive_name_file.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +function try_to_read_file() +{ + file_to_read=$1 + file_argument=$2 + + echo $file_argument + $CLICKHOUSE_LOCAL -q "SELECT * FROM file('$file_argument')" 2>&1 | rg -c "Cannot stat file.*$file_to_read" +} + +try_to_read_file "::nonexistentfile.csv" "::nonexistentfile.csv" +try_to_read_file "nonexistent::nonexistentfile.csv" "nonexistent::nonexistentfile.csv" +try_to_read_file "nonexistent :: nonexistentfile.csv" "nonexistent :: nonexistentfile.csv" +try_to_read_file "nonexistent ::nonexistentfile.csv" "nonexistent ::nonexistentfile.csv" +try_to_read_file "nonexistent.tar.gz" "nonexistent.tar.gz :: nonexistentfile.csv" +try_to_read_file "nonexistent.zip" "nonexistent.zip:: nonexistentfile.csv" diff --git a/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference b/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference new file mode 100644 index 00000000000..9dd925a7480 --- /dev/null +++ b/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference @@ -0,0 +1,2 @@ +::03215_archive.csv test/::03215_archive.csv +test::03215_archive.csv test/test::03215_archive.csv diff --git a/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql b/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql new file mode 100644 index 00000000000..9d01f53c838 --- /dev/null +++ b/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT _file, _path FROM s3(s3_conn, filename='::03215_archive.csv') ORDER BY (_file, _path); +SELECT _file, _path FROM s3(s3_conn, filename='test :: 03215_archive.csv') ORDER BY (_file, _path); -- { serverError STD_EXCEPTION } +SELECT _file, _path FROM s3(s3_conn, filename='test::03215_archive.csv') ORDER BY (_file, _path); diff --git a/tests/queries/0_stateless/data_minio/::03215_archive.csv b/tests/queries/0_stateless/data_minio/::03215_archive.csv new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/data_minio/::03215_archive.csv @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/data_minio/test :: 03215_archive.csv b/tests/queries/0_stateless/data_minio/test :: 03215_archive.csv new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/data_minio/test :: 03215_archive.csv @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/data_minio/test::03215_archive.csv b/tests/queries/0_stateless/data_minio/test::03215_archive.csv new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/data_minio/test::03215_archive.csv @@ -0,0 +1 @@ +1 From 5381619b2ab465386f11e86242883419e48e5f6b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 11:36:25 +0200 Subject: [PATCH 1204/2361] Remove bad feature. --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 722ae4f8268..a4c873ba3f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,7 +65,6 @@ * The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)). * The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)). -* Allow matching column names in a case-insensitive manner when reading json files (`input_format_json_case_insensitive_column_matching`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)). * Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)). * In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)). * Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)). From 4d4fc8fd6f0123613305423d861429f54222d23f Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 11:57:37 +0200 Subject: [PATCH 1205/2361] Add setting to disable archive path syntax --- src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.cpp | 1 + src/IO/S3/URI.cpp | 8 ++++++-- src/IO/S3/URI.h | 2 +- src/Storages/ObjectStorage/S3/Configuration.cpp | 8 ++++---- src/Storages/StorageFile.cpp | 14 ++++++++++++-- src/Storages/StorageFile.h | 2 +- src/TableFunctions/TableFunctionFile.cpp | 5 +++-- .../03214_parsing_archive_name_file.reference | 4 ++++ .../0_stateless/03214_parsing_archive_name_file.sh | 10 ++++++++-- .../03215_parsing_archive_name_s3.reference | 1 + .../0_stateless/03215_parsing_archive_name_s3.sql | 1 + .../data_minio/test.zip::03215_archive.csv | 1 + 13 files changed, 44 insertions(+), 14 deletions(-) create mode 100644 tests/queries/0_stateless/data_minio/test.zip::03215_archive.csv diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 4fc2034b855..5114a8204cd 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -936,6 +936,7 @@ class IColumn; M(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, "Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'", 0) \ M(Bool, parallel_replicas_prefer_local_join, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN.", 0) \ M(UInt64, parallel_replicas_mark_segment_size, 128, "Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing", 0) \ + M(Bool, allow_archive_path_syntax, true, "File/S3 engines/table function will parse paths with '::' as ' :: ' if archive has correct extension", 0) \ \ M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \ M(Bool, allow_experimental_full_text_index, false, "If it is set to true, allow to use experimental full-text index.", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 9faf77e9087..8483a267237 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -124,6 +124,7 @@ static std::initializer_list mapper; diff --git a/src/IO/S3/URI.h b/src/IO/S3/URI.h index e4bb0d9eae1..80e2da96cd4 100644 --- a/src/IO/S3/URI.h +++ b/src/IO/S3/URI.h @@ -36,7 +36,7 @@ struct URI bool is_virtual_hosted_style; URI() = default; - explicit URI(const std::string & uri_); + explicit URI(const std::string & uri_, bool allow_archive_path_syntax = false); void addRegionToURI(const std::string & region); static void validateBucket(const std::string & bucket, const Poco::URI & uri); diff --git a/src/Storages/ObjectStorage/S3/Configuration.cpp b/src/Storages/ObjectStorage/S3/Configuration.cpp index 094ca069e7a..7542f59dcc4 100644 --- a/src/Storages/ObjectStorage/S3/Configuration.cpp +++ b/src/Storages/ObjectStorage/S3/Configuration.cpp @@ -142,14 +142,14 @@ ObjectStoragePtr StorageS3Configuration::createObjectStorage(ContextPtr context, void StorageS3Configuration::fromNamedCollection(const NamedCollection & collection, ContextPtr context) { - const auto settings = context->getSettingsRef(); + const auto & settings = context->getSettingsRef(); validateNamedCollection(collection, required_configuration_keys, optional_configuration_keys); auto filename = collection.getOrDefault("filename", ""); if (!filename.empty()) - url = S3::URI(std::filesystem::path(collection.get("url")) / filename); + url = S3::URI(std::filesystem::path(collection.get("url")) / filename, settings.allow_archive_path_syntax); else - url = S3::URI(collection.get("url")); + url = S3::URI(collection.get("url"), settings.allow_archive_path_syntax); auth_settings.access_key_id = collection.getOrDefault("access_key_id", ""); auth_settings.secret_access_key = collection.getOrDefault("secret_access_key", ""); @@ -330,7 +330,7 @@ void StorageS3Configuration::fromAST(ASTs & args, ContextPtr context, bool with_ } /// This argument is always the first - url = S3::URI(checkAndGetLiteralArgument(args[0], "url")); + url = S3::URI(checkAndGetLiteralArgument(args[0], "url"), context->getSettingsRef().allow_archive_path_syntax); if (engine_args_to_idx.contains("format")) { diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index de56fcf66a0..efb39f90053 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -2208,7 +2208,11 @@ void registerStorageFile(StorageFactory & factory) else if (type == Field::Types::UInt64) source_fd = static_cast(literal->value.get()); else if (type == Field::Types::String) - StorageFile::parseFileSource(literal->value.get(), source_path, storage_args.path_to_archive); + StorageFile::parseFileSource( + literal->value.get(), + source_path, + storage_args.path_to_archive, + factory_args.getLocalContext()->getSettingsRef().allow_archive_path_syntax); else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second argument must be path or file descriptor"); } @@ -2235,8 +2239,14 @@ SchemaCache & StorageFile::getSchemaCache(const ContextPtr & context) return schema_cache; } -void StorageFile::parseFileSource(String source, String & filename, String & path_to_archive) +void StorageFile::parseFileSource(String source, String & filename, String & path_to_archive, bool allow_archive_path_syntax) { + if (!allow_archive_path_syntax) + { + filename = std::move(source); + return; + } + size_t pos = source.find("::"); if (pos == String::npos) { diff --git a/src/Storages/StorageFile.h b/src/Storages/StorageFile.h index 895a8a663b8..bb969c1877c 100644 --- a/src/Storages/StorageFile.h +++ b/src/Storages/StorageFile.h @@ -128,7 +128,7 @@ public: static SchemaCache & getSchemaCache(const ContextPtr & context); - static void parseFileSource(String source, String & filename, String & path_to_archive); + static void parseFileSource(String source, String & filename, String & path_to_archive, bool allow_archive_path_syntax); static ArchiveInfo getArchiveInfo( const std::string & path_to_archive, diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index 12b88ae2b14..af327cfe54e 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -26,7 +26,7 @@ void TableFunctionFile::parseFirstArguments(const ASTPtr & arg, const ContextPtr if (context->getApplicationType() != Context::ApplicationType::LOCAL) { ITableFunctionFileLike::parseFirstArguments(arg, context); - StorageFile::parseFileSource(std::move(filename), filename, path_to_archive); + StorageFile::parseFileSource(std::move(filename), filename, path_to_archive, context->getSettingsRef().allow_archive_path_syntax); return; } @@ -42,7 +42,8 @@ void TableFunctionFile::parseFirstArguments(const ASTPtr & arg, const ContextPtr else if (filename == "stderr") fd = STDERR_FILENO; else - StorageFile::parseFileSource(std::move(filename), filename, path_to_archive); + StorageFile::parseFileSource( + std::move(filename), filename, path_to_archive, context->getSettingsRef().allow_archive_path_syntax); } else if (type == Field::Types::Int64 || type == Field::Types::UInt64) { diff --git a/tests/queries/0_stateless/03214_parsing_archive_name_file.reference b/tests/queries/0_stateless/03214_parsing_archive_name_file.reference index 243a7c8fd02..d793d26dfc3 100644 --- a/tests/queries/0_stateless/03214_parsing_archive_name_file.reference +++ b/tests/queries/0_stateless/03214_parsing_archive_name_file.reference @@ -10,3 +10,7 @@ nonexistent.tar.gz :: nonexistentfile.csv 1 nonexistent.zip:: nonexistentfile.csv 1 +nonexistent.tar.gz :: nonexistentfile.csv SETTINGS allow_archive_path_syntax=0 +1 +nonexistent.zip:: nonexistentfile.csv SETTINGS allow_archive_path_syntax=0 +1 diff --git a/tests/queries/0_stateless/03214_parsing_archive_name_file.sh b/tests/queries/0_stateless/03214_parsing_archive_name_file.sh index 32bf3246c84..2f77627f6be 100755 --- a/tests/queries/0_stateless/03214_parsing_archive_name_file.sh +++ b/tests/queries/0_stateless/03214_parsing_archive_name_file.sh @@ -8,14 +8,20 @@ function try_to_read_file() { file_to_read=$1 file_argument=$2 + settings=$3 - echo $file_argument - $CLICKHOUSE_LOCAL -q "SELECT * FROM file('$file_argument')" 2>&1 | rg -c "Cannot stat file.*$file_to_read" + echo $file_argument $settings + $CLICKHOUSE_LOCAL -q "SELECT * FROM file('$file_argument') $settings" 2>&1 | rg -c "Cannot stat file.*$file_to_read" } +# if archive extension is not detected for part before '::', path is taken as is try_to_read_file "::nonexistentfile.csv" "::nonexistentfile.csv" try_to_read_file "nonexistent::nonexistentfile.csv" "nonexistent::nonexistentfile.csv" try_to_read_file "nonexistent :: nonexistentfile.csv" "nonexistent :: nonexistentfile.csv" try_to_read_file "nonexistent ::nonexistentfile.csv" "nonexistent ::nonexistentfile.csv" +# if archive extension is detected for part before '::', path is split into archive and filename try_to_read_file "nonexistent.tar.gz" "nonexistent.tar.gz :: nonexistentfile.csv" try_to_read_file "nonexistent.zip" "nonexistent.zip:: nonexistentfile.csv" +# disabling archive syntax will always parse path as is +try_to_read_file "nonexistent.tar.gz :: nonexistentfile.csv" "nonexistent.tar.gz :: nonexistentfile.csv" "SETTINGS allow_archive_path_syntax=0" +try_to_read_file "nonexistent.zip:: nonexistentfile.csv" "nonexistent.zip:: nonexistentfile.csv" "SETTINGS allow_archive_path_syntax=0" diff --git a/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference b/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference index 9dd925a7480..b4804c82dc2 100644 --- a/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference +++ b/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference @@ -1,2 +1,3 @@ ::03215_archive.csv test/::03215_archive.csv test::03215_archive.csv test/test::03215_archive.csv +test.zip::03215_archive.csv test/test.zip::03215_archive.csv diff --git a/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql b/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql index 9d01f53c838..3a7ed0b864c 100644 --- a/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql +++ b/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql @@ -4,3 +4,4 @@ SELECT _file, _path FROM s3(s3_conn, filename='::03215_archive.csv') ORDER BY (_file, _path); SELECT _file, _path FROM s3(s3_conn, filename='test :: 03215_archive.csv') ORDER BY (_file, _path); -- { serverError STD_EXCEPTION } SELECT _file, _path FROM s3(s3_conn, filename='test::03215_archive.csv') ORDER BY (_file, _path); +SELECT _file, _path FROM s3(s3_conn, filename='test.zip::03215_archive.csv') ORDER BY (_file, _path) SETTINGS allow_archive_path_syntax=0; diff --git a/tests/queries/0_stateless/data_minio/test.zip::03215_archive.csv b/tests/queries/0_stateless/data_minio/test.zip::03215_archive.csv new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/data_minio/test.zip::03215_archive.csv @@ -0,0 +1 @@ +1 From b1e80883f0324995d84250d5edf37fd8ab475987 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Tue, 30 Jul 2024 11:39:22 +0200 Subject: [PATCH 1206/2361] `accept_invalid_certificate` in client config: additional testing #65238 --- .../configs/ssl_config_strict.xml | 17 +++++++++ .../test_accept_invalid_certificate/test.py | 35 +++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 tests/integration/test_accept_invalid_certificate/configs/ssl_config_strict.xml diff --git a/tests/integration/test_accept_invalid_certificate/configs/ssl_config_strict.xml b/tests/integration/test_accept_invalid_certificate/configs/ssl_config_strict.xml new file mode 100644 index 00000000000..a4383a77ac4 --- /dev/null +++ b/tests/integration/test_accept_invalid_certificate/configs/ssl_config_strict.xml @@ -0,0 +1,17 @@ + + + 9440 + + + + + /etc/clickhouse-server/config.d/self-cert.pem + /etc/clickhouse-server/config.d/self-key.pem + /etc/clickhouse-server/config.d/ca-cert.pem + strict + + + diff --git a/tests/integration/test_accept_invalid_certificate/test.py b/tests/integration/test_accept_invalid_certificate/test.py index 87229d75f90..f43e9e6140a 100644 --- a/tests/integration/test_accept_invalid_certificate/test.py +++ b/tests/integration/test_accept_invalid_certificate/test.py @@ -17,6 +17,19 @@ instance = cluster.add_instance( "certs/self-cert.pem", "certs/ca-cert.pem", ], + with_zookeeper=False, +) + + +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/ssl_config_strict.xml", + "certs/self-key.pem", + "certs/self-cert.pem", + "certs/ca-cert.pem", + ], + with_zookeeper=False, ) @@ -90,3 +103,25 @@ def test_connection_accept(): ) == "1\n" ) + + +def test_strict_reject(): + with pytest.raises(Exception) as err: + execute_query_native(node1, "SELECT 1", "") + assert "certificate verify failed" in str(err.value) + + +def test_strict_reject_with_config(): + with pytest.raises(Exception) as err: + execute_query_native(node1, "SELECT 1", config_accept) + assert "alert certificate required" in str(err.value) + + +def test_strict_connection_reject(): + with pytest.raises(Exception) as err: + execute_query_native( + node1, + "SELECT 1", + config_connection_accept.format(ip_address=f"{instance.ip_address}"), + ) + assert "certificate verify failed" in str(err.value) From 0c9fa155d4993220c00e4b41c0354b20d3312f33 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 30 Jul 2024 11:58:35 +0200 Subject: [PATCH 1207/2361] revert last commit --- src/Storages/VirtualColumnUtils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 4edab01925d..f16eff7edb6 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -131,7 +131,7 @@ NameSet getVirtualNamesForFileLikeStorage() std::unordered_map parseHivePartitioningKeysAndValues(const String & path, const ColumnsDescription & storage_columns) { - std::string pattern = "([^/])=([^/]+)/"; + std::string pattern = "([^/]+)=([^/]+)/"; re2::StringPiece input_piece(path); std::unordered_map key_values; From bdf98cbcc0121ab94dd1db39fc5cf977a7ed42ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 30 Jul 2024 10:06:01 +0000 Subject: [PATCH 1208/2361] Fix public backports --- tests/ci/cherry_pick.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index 0b2aa9a2d35..b660ad2c040 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -505,7 +505,7 @@ class Backport: ReleaseBranch( ( br - if self._repo_name == "ClickHouse/Clickhouse" + if self._repo_name == "ClickHouse/ClickHouse" else f"release/{br}" ), pr, From 9a05a3ed9e7cccada42f49a0cd5c3896010f9edb Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Tue, 30 Jul 2024 12:07:34 +0200 Subject: [PATCH 1209/2361] Add missing documentation for `groupConcat` after series of reverts and merges https://github.com/ClickHouse/ClickHouse/pull/65384 --- .../reference/groupconcat.md | 90 +++++++++++++++++++ .../aspell-ignore/en/aspell-dict.txt | 2 + 2 files changed, 92 insertions(+) create mode 100644 docs/en/sql-reference/aggregate-functions/reference/groupconcat.md diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md b/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md new file mode 100644 index 00000000000..072252de8c9 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md @@ -0,0 +1,90 @@ +--- +slug: /en/sql-reference/aggregate-functions/reference/groupconcat +sidebar_position: 363 +sidebar_label: groupConcat +title: groupConcat +--- + +Calculates a concatenated string from a group of strings, optionally separated by a delimiter, and optionally limited by a maximum number of elements. + +**Syntax** + +``` sql +groupConcat(expression [, delimiter] [, limit]); +``` + +**Arguments** + +- `expression` — The expression or column name that outputs strings to be concatenated.. +- `delimiter` — A [string](../../../sql-reference/data-types/string.md) that will be used to separate concatenated values. This parameter is optional and defaults to an empty string if not specified. +- `limit` — A positive [integer](../../../sql-reference/data-types/int-uint.md) specifying the maximum number of elements to concatenate. If more elements are present, excess elements are ignored. This parameter is optional. + +:::note +If delimiter is specified without limit, it must be the first parameter following the expression. If both delimiter and limit are specified, delimiter must precede limit. +::: + +**Returned value** + +- Returns a [string](../../../sql-reference/data-types/string.md) consisting of the concatenated values of the column or expression. If the group has no elements or only null elements, and the function does not specify a handling for only null values, the result is a nullable string with a null value. + +**Examples** + +Input table: + +``` text +┌─id─┬─name─┠+│ 1 │ John│ +│ 2 │ Jane│ +│ 3 │ Bob│ +└────┴──────┘ +``` + +1. Basic usage without a delimiter: + +Query: + +``` sql +SELECT groupConcat(Name) FROM Employees; +``` + +Result: + +``` text +JohnJaneBob +``` + +This concatenates all names into one continuous string without any separator. + + +2. Using comma as a delimiter: + +Query: + +``` sql +SELECT groupConcat(Name, ', ', 2) FROM Employees; +``` + +Result: + +``` text +John, Jane, Bob +``` + +This output shows the names separated by a comma followed by a space. + + +3. Limiting the number of concatenated elements + +Query: + +``` sql +SELECT groupConcat(Name, ', ', 2) FROM Employees; +``` + +Result: + +``` text +John, Jane +``` + +This query limits the output to the first two names, even though there are more names in the table. diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index b21ae0764c6..d82b70cfdb4 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1733,6 +1733,7 @@ groupBitmap groupBitmapAnd groupBitmapOr groupBitmapXor +groupConcat groupUniqArray grouparray grouparrayinsertat @@ -1749,6 +1750,7 @@ groupbitmapor groupbitmapxor groupbitor groupbitxor +groupconcat groupuniqarray grpc grpcio From fb466287dac16801518547f34b42edbc16a57fae Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Tue, 30 Jul 2024 12:13:47 +0200 Subject: [PATCH 1210/2361] Update 02150_index_hypothesis_race_long.sh --- tests/queries/0_stateless/02150_index_hypothesis_race_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02150_index_hypothesis_race_long.sh b/tests/queries/0_stateless/02150_index_hypothesis_race_long.sh index be7cfa78492..c29b604d23d 100755 --- a/tests/queries/0_stateless/02150_index_hypothesis_race_long.sh +++ b/tests/queries/0_stateless/02150_index_hypothesis_race_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-distributed-cache +# Tags: long, no-distributed-cache CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From d7534c5b40c315cdb3d52101f626dbee7d565cdb Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 30 Jul 2024 12:20:54 +0200 Subject: [PATCH 1211/2361] Update 03210_optimize_rewrite_aggregate_function_with_if_return_type_bug.sql --- ...imize_rewrite_aggregate_function_with_if_return_type_bug.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug.sql b/tests/queries/0_stateless/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug.sql index b620a6434bf..565a481940a 100644 --- a/tests/queries/0_stateless/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug.sql +++ b/tests/queries/0_stateless/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug.sql @@ -1,3 +1,5 @@ +SET allow_experimental_analyzer = 1; + -- For function count, rewrite countState to countStateIf changes the type from AggregateFunction(count, Nullable(UInt64)) to AggregateFunction(count, UInt64) -- We can cast AggregateFunction(count, UInt64) back to AggregateFunction(count, Nullable(UInt64)) with additional _CAST select hex(countState(if(toNullable(number % 2 = 0), number, null))) from numbers(5) settings optimize_rewrite_aggregate_function_with_if=1; From 6ef628a7c80ebd6ec727365ee69d8141a4f11400 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 30 Jul 2024 10:33:22 +0000 Subject: [PATCH 1212/2361] Fixing build. --- src/Processors/QueryPlan/DistinctStep.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/DistinctStep.cpp b/src/Processors/QueryPlan/DistinctStep.cpp index a481454139d..b1c24fc01ce 100644 --- a/src/Processors/QueryPlan/DistinctStep.cpp +++ b/src/Processors/QueryPlan/DistinctStep.cpp @@ -10,6 +10,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + static ITransformingStep::Traits getTraits(bool pre_distinct) { const bool preserves_number_of_streams = pre_distinct; @@ -90,7 +95,8 @@ void DistinctStep::transformPipeline(QueryPipelineBuilder & pipeline, const Buil /// final distinct for sorted stream (sorting inside and among chunks) if (input_stream.sort_scope == DataStream::SortScope::Global) { - assert(input_stream.has_single_port); + if (pipeline.getNumStreams() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "DistinctStep with in-order expects single input"); if (distinct_sort_desc.size() < columns.size()) { From 0f8feff4d3806fa6f81d24184ab68bcd6e727551 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 12:34:30 +0200 Subject: [PATCH 1213/2361] Add KeeperMap retries --- src/Storages/StorageKeeperMap.cpp | 132 +++++++++++++++++------------- 1 file changed, 73 insertions(+), 59 deletions(-) diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index ef157239e26..5534bb7f346 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -35,6 +35,7 @@ #include #include +#include "Common/ZooKeeper/ZooKeeperRetries.h" #include #include #include @@ -120,7 +121,7 @@ public: : SinkToStorage(header), storage(storage_), context(std::move(context_)) { auto primary_key = storage.getPrimaryKey(); - assert(primary_key.size() == 1); + chassert(primary_key.size() == 1); primary_key_pos = getHeader().getPositionByName(primary_key[0]); } @@ -171,76 +172,89 @@ public: template void finalize(bool strict) { - auto zookeeper = storage.getClient(); + const auto & settings = context->getSettingsRef(); - auto keys_limit = storage.keysLimit(); + ZooKeeperRetriesControl zk_retry{ + getName(), + getLogger(getName()), + ZooKeeperRetriesInfo{ + settings.insert_keeper_max_retries, + settings.insert_keeper_retry_initial_backoff_ms, + settings.insert_keeper_retry_max_backoff_ms}, + context->getProcessListElement()}; - size_t current_keys_num = 0; - size_t new_keys_num = 0; - - // We use keys limit as a soft limit so we ignore some cases when it can be still exceeded - // (e.g if parallel insert queries are being run) - if (keys_limit != 0) + retries_ctl.retryLoop([&]() { - Coordination::Stat data_stat; - zookeeper->get(storage.dataPath(), &data_stat); - current_keys_num = data_stat.numChildren; - } + auto zookeeper = storage.getClient(); + auto keys_limit = storage.keysLimit(); - std::vector key_paths; - key_paths.reserve(new_values.size()); - for (const auto & [key, _] : new_values) - key_paths.push_back(storage.fullPathForKey(key)); + size_t current_keys_num = 0; + size_t new_keys_num = 0; - zkutil::ZooKeeper::MultiExistsResponse results; - - if constexpr (!for_update) - { - if (!strict) - results = zookeeper->exists(key_paths); - } - - Coordination::Requests requests; - requests.reserve(key_paths.size()); - for (size_t i = 0; i < key_paths.size(); ++i) - { - auto key = fs::path(key_paths[i]).filename(); - - if constexpr (for_update) + // We use keys limit as a soft limit so we ignore some cases when it can be still exceeded + // (e.g if parallel insert queries are being run) + if (keys_limit != 0) { - int32_t version = -1; - if (strict) - version = versions.at(key); - - requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], version)); + Coordination::Stat data_stat; + zookeeper->get(storage.dataPath(), &data_stat); + current_keys_num = data_stat.numChildren; } - else + + std::vector key_paths; + key_paths.reserve(new_values.size()); + for (const auto & [key, _] : new_values) + key_paths.push_back(storage.fullPathForKey(key)); + + zkutil::ZooKeeper::MultiExistsResponse results; + + if constexpr (!for_update) { - if (!strict && results[i].error == Coordination::Error::ZOK) + if (!strict) + results = zookeeper->exists(key_paths); + } + + Coordination::Requests requests; + requests.reserve(key_paths.size()); + for (size_t i = 0; i < key_paths.size(); ++i) + { + auto key = fs::path(key_paths[i]).filename(); + + if constexpr (for_update) { - requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], -1)); + int32_t version = -1; + if (strict) + version = versions.at(key); + + requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], version)); } else { - requests.push_back(zkutil::makeCreateRequest(key_paths[i], new_values[key], zkutil::CreateMode::Persistent)); - ++new_keys_num; + if (!strict && results[i].error == Coordination::Error::ZOK) + { + requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], -1)); + } + else + { + requests.push_back(zkutil::makeCreateRequest(key_paths[i], new_values[key], zkutil::CreateMode::Persistent)); + ++new_keys_num; + } } } - } - if (new_keys_num != 0) - { - auto will_be = current_keys_num + new_keys_num; - if (keys_limit != 0 && will_be > keys_limit) - throw Exception( - ErrorCodes::LIMIT_EXCEEDED, - "Limit would be exceeded by inserting {} new key(s). Limit is {}, while the number of keys would be {}", - new_keys_num, - keys_limit, - will_be); - } + if (new_keys_num != 0) + { + auto will_be = current_keys_num + new_keys_num; + if (keys_limit != 0 && will_be > keys_limit) + throw Exception( + ErrorCodes::LIMIT_EXCEEDED, + "Limit would be exceeded by inserting {} new key(s). Limit is {}, while the number of keys would be {}", + new_keys_num, + keys_limit, + will_be); + } - zookeeper->multi(requests, /* check_session_valid */ true); + zookeeper->multi(requests, /* check_session_valid */ true); + }); } }; @@ -529,8 +543,8 @@ Pipe StorageKeeperMap::read( size_t num_keys = keys->size(); size_t num_threads = std::min(num_streams, keys->size()); - assert(num_keys <= std::numeric_limits::max()); - assert(num_threads <= std::numeric_limits::max()); + chassert(num_keys <= std::numeric_limits::max()); + chassert(num_threads <= std::numeric_limits::max()); for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) { @@ -1160,7 +1174,7 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca bool strict = local_context->getSettingsRef().keeper_map_strict_mode; - assert(commands.size() == 1); + chassert(commands.size() == 1); auto metadata_snapshot = getInMemoryMetadataPtr(); auto storage = getStorageID(); @@ -1236,7 +1250,7 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca return; } - assert(commands.front().type == MutationCommand::Type::UPDATE); + chassert(commands.front().type == MutationCommand::Type::UPDATE); if (commands.front().column_to_update_expression.contains(primary_key)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key cannot be updated (cannot update column {})", primary_key); From 2530c5eb41a759baded5380a4e697c2e884c0abd Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 12:37:35 +0200 Subject: [PATCH 1214/2361] Fix tests --- tests/queries/0_stateless/02952_archive_parsing.reference | 0 tests/queries/0_stateless/02952_archive_parsing.sql | 1 - tests/queries/0_stateless/03214_parsing_archive_name_file.sh | 2 +- 3 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 tests/queries/0_stateless/02952_archive_parsing.reference delete mode 100644 tests/queries/0_stateless/02952_archive_parsing.sql diff --git a/tests/queries/0_stateless/02952_archive_parsing.reference b/tests/queries/0_stateless/02952_archive_parsing.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/02952_archive_parsing.sql b/tests/queries/0_stateless/02952_archive_parsing.sql deleted file mode 100644 index 49b0223e6ec..00000000000 --- a/tests/queries/0_stateless/02952_archive_parsing.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM file('::a'); -- { serverError BAD_ARGUMENTS } diff --git a/tests/queries/0_stateless/03214_parsing_archive_name_file.sh b/tests/queries/0_stateless/03214_parsing_archive_name_file.sh index 2f77627f6be..b54cbb10aa6 100755 --- a/tests/queries/0_stateless/03214_parsing_archive_name_file.sh +++ b/tests/queries/0_stateless/03214_parsing_archive_name_file.sh @@ -11,7 +11,7 @@ function try_to_read_file() settings=$3 echo $file_argument $settings - $CLICKHOUSE_LOCAL -q "SELECT * FROM file('$file_argument') $settings" 2>&1 | rg -c "Cannot stat file.*$file_to_read" + $CLICKHOUSE_LOCAL -q "SELECT * FROM file('$file_argument') $settings" 2>&1 | grep -c "Cannot stat file.*$file_to_read" } # if archive extension is not detected for part before '::', path is taken as is From 27a15bc5ace68acb0ba62791ca6e3d3f17ae569c Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 30 Jul 2024 10:39:35 +0000 Subject: [PATCH 1215/2361] Remove sh files --- .../03036_dynamic_read_subcolumns_1.sh | 19 ------------------- .../03036_dynamic_read_subcolumns_2.sh | 19 ------------------- .../03036_dynamic_read_subcolumns_3.sh | 19 ------------------- 3 files changed, 57 deletions(-) delete mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh delete mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh delete mode 100755 tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh deleted file mode 100755 index ed548ae74e9..00000000000 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_1.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - -# shellcheck source=./03036_dynamic_read_subcolumns.lib -. "$CUR_DIR"/03036_dynamic_read_subcolumns.lib - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" - -$CH_CLIENT -q "drop table if exists test;" - -echo "Memory" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=Memory" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh deleted file mode 100755 index 95dafcf5832..00000000000 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_2.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - -# shellcheck source=./03036_dynamic_read_subcolumns.lib -. "$CUR_DIR"/03036_dynamic_read_subcolumns.lib - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree compact" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;" -test -$CH_CLIENT -q "drop table test;" diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh deleted file mode 100755 index a3c2d93e568..00000000000 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_3.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# Tags: long - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - - -# shellcheck source=./03036_dynamic_read_subcolumns.lib -. "$CUR_DIR"/03036_dynamic_read_subcolumns.lib - -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1" - -$CH_CLIENT -q "drop table if exists test;" - -echo "MergeTree wide" -$CH_CLIENT -q "create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" -test -$CH_CLIENT -q "drop table test;" From 72d6467fd2c34a82e1ef8ac73a451240843279a6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 30 Jul 2024 10:43:21 +0000 Subject: [PATCH 1216/2361] Bump Azure to 1.13 --- contrib/azure | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/azure b/contrib/azure index ea3e19a7be0..67272b7ee0a 160000 --- a/contrib/azure +++ b/contrib/azure @@ -1 +1 @@ -Subproject commit ea3e19a7be08519134c643177d56c7484dfec884 +Subproject commit 67272b7ee0adff6b69921b26eb071ba1a353062c From d4537d91875d1ecec832af94fde15073c45a63d7 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Tue, 30 Jul 2024 12:07:47 +0200 Subject: [PATCH 1217/2361] Add `groupConcat` to fuzzer https://github.com/ClickHouse/ClickHouse/pull/65384 --- tests/fuzz/dictionaries/functions.dict | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fuzz/dictionaries/functions.dict b/tests/fuzz/dictionaries/functions.dict index ec7f8017fb2..6f2a88c22fa 100644 --- a/tests/fuzz/dictionaries/functions.dict +++ b/tests/fuzz/dictionaries/functions.dict @@ -1588,6 +1588,7 @@ "groupBitmapXorResample" "groupBitmapXorSimpleState" "groupBitmapXorState" +"groupConcat" "groupUniqArray" "groupUniqArrayArgMax" "groupUniqArrayArgMin" From d4d3d590e38436da44b13dbf11a92cc6d00863e7 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 29 Jul 2024 20:06:55 +0000 Subject: [PATCH 1218/2361] Remove obsolete --multiquery parameter from tests --- .../integration/test_distributed_format/test.py | 8 ++++---- ...80_client_break_at_exception_in_batch_mode.sh | 2 +- .../00463_long_sessions_in_http_interface.sh | 10 +++++----- .../0_stateless/00474_readonly_settings.sh | 4 ++-- .../0_stateless/00612_pk_in_tuple_perf.sh | 4 ++-- .../0_stateless/00630_arbitrary_csv_delimiter.sh | 6 +++--- .../00650_csv_with_specified_quote_rule.sh | 4 ++-- ...00651_default_database_on_client_reconnect.sh | 2 +- .../00652_mutations_default_database.sh | 2 +- ...cated_mutations_default_database_zookeeper.sh | 2 +- .../00699_materialized_view_mutations.sh | 8 ++++---- .../00704_drop_truncate_memory_table.sh | 6 +++--- .../0_stateless/00705_drop_create_merge_tree.sh | 4 ++-- .../0_stateless/00763_lock_buffer_long.sh | 4 ++-- ...3_long_lock_buffer_alter_destination_table.sh | 4 ++-- .../00825_protobuf_format_array_3dim.sh | 2 +- .../00825_protobuf_format_array_of_arrays.sh | 2 +- .../00825_protobuf_format_enum_mapping.sh | 2 +- .../0_stateless/00825_protobuf_format_map.sh | 2 +- .../00825_protobuf_format_nested_in_nested.sh | 2 +- .../00825_protobuf_format_nested_optional.sh | 2 +- .../00825_protobuf_format_no_length_delimiter.sh | 6 +++--- .../0_stateless/00825_protobuf_format_persons.sh | 4 ++-- ...5_protobuf_format_skipped_column_in_nested.sh | 2 +- .../00825_protobuf_format_splitted_nested.sh | 2 +- .../0_stateless/00825_protobuf_format_squares.sh | 2 +- .../00825_protobuf_format_table_default.sh | 2 +- .../0_stateless/00900_long_parquet_load.sh | 2 +- .../0_stateless/00900_orc_arrow_parquet_maps.sh | 2 +- .../00937_format_schema_rows_template.sh | 4 ++-- .../0_stateless/00956_sensitive_data_masking.sh | 12 ++++++------ .../01019_alter_materialized_view_atomic.sh | 2 +- .../01019_alter_materialized_view_consistent.sh | 2 +- .../0_stateless/01035_lc_empty_part_bug.sh | 4 ++-- ...ystem_reload_dictionary_reloads_completely.sh | 2 +- .../01052_window_view_proc_tumble_to_now.sh | 2 +- .../01053_window_view_proc_hop_to_now.sh | 2 +- .../01054_window_view_proc_tumble_to.sh | 2 +- .../0_stateless/01055_window_view_proc_hop_to.sh | 2 +- ...057_window_view_event_tumble_to_strict_asc.sh | 2 +- .../01058_window_view_event_hop_to_strict_asc.sh | 2 +- .../01060_window_view_event_tumble_to_asc.sh | 2 +- .../01061_window_view_event_hop_to_asc.sh | 2 +- .../01063_window_view_event_tumble_to_bounded.sh | 2 +- .../01064_window_view_event_hop_to_bounded.sh | 2 +- ...w_view_event_tumble_to_strict_asc_lateness.sh | 2 +- ...7_window_view_event_tumble_to_asc_lateness.sh | 2 +- ...ndow_view_event_tumble_to_bounded_lateness.sh | 2 +- .../01071_window_view_event_tumble_asc_join.sh | 2 +- ...01072_window_view_multiple_columns_groupby.sh | 2 +- ...3_window_view_event_tumble_to_asc_populate.sh | 4 ++-- ...window_view_event_tumble_asc_join_populate.sh | 2 +- ...75_window_view_proc_tumble_to_now_populate.sh | 2 +- .../01076_window_view_alter_query_to.sh | 4 ++-- ...7_window_view_alter_query_to_modify_source.sh | 4 ++-- ...1079_window_view_inner_table_memory_tumble.sh | 2 +- .../01080_window_view_inner_table_memory_hop.sh | 2 +- .../01081_window_view_target_table_engine.sh | 2 +- .../0_stateless/01083_window_view_select.sh | 2 +- .../01084_window_view_with_table_identifier.sh | 2 +- .../0_stateless/01086_window_view_cleanup.sh | 2 +- .../0_stateless/01087_window_view_alter_query.sh | 4 ++-- .../01088_window_view_default_column.sh | 2 +- .../0_stateless/01133_begin_commit_race.sh | 6 +++--- .../01169_alter_partition_isolation_stress.sh | 4 ++-- ...01169_old_alter_partition_isolation_stress.sh | 10 +++++----- .../01171_mv_select_insert_isolation_long.sh | 14 +++++++------- .../0_stateless/01174_select_insert_isolation.sh | 8 ++++---- .../0_stateless/01198_client_quota_key.sh | 2 +- .../0_stateless/01285_engine_join_donmikel.sh | 4 ++-- .../0_stateless/01293_optimize_final_force.sh | 2 +- .../queries/0_stateless/01304_direct_io_long.sh | 4 ++-- ...orage_file_tsv_csv_with_names_write_prefix.sh | 4 ++-- .../0_stateless/01443_merge_truncate_long.sh | 2 +- .../01527_clickhouse_local_optimize.sh | 2 +- .../01543_avro_deserialization_with_lc.sh | 2 +- .../0_stateless/01544_file_engine_settings.sh | 4 ++-- .../0_stateless/01600_detach_permanently.sh | 2 +- .../01600_parts_states_metrics_long.sh | 2 +- tests/queries/0_stateless/01606_git_import.sh | 4 ++-- .../0_stateless/01607_arrays_as_nested_csv.sh | 4 ++-- .../0_stateless/01632_tinylog_read_write.sh | 4 ++-- .../01658_read_file_to_stringcolumn.sh | 4 ++-- .../01666_merge_tree_max_query_limit.sh | 2 +- .../0_stateless/01747_system_session_log_long.sh | 6 +++--- .../01801_nullable_low_cardinality_tsv.sh | 2 +- .../01834_alias_columns_laziness_filimonov.sh | 2 +- .../01923_network_receive_time_metric_insert.sh | 4 ++-- .../01939_network_receive_bytes_metrics.sh | 4 ++-- .../01946_test_wrong_host_name_access.sh | 2 +- ..._with_escape_sequence_at_the_end_of_buffer.sh | 2 +- tests/queries/0_stateless/02009_from_infile.sh | 2 +- .../0_stateless/02024_compression_in_query.sh | 6 +++--- .../02048_parallel_reading_from_infile.sh | 6 +++--- ...02104_clickhouse_local_columns_description.sh | 2 +- ...insert_deduplication_token_multiple_blocks.sh | 10 +++++----- ...eduplication_token_multiple_blocks_replica.sh | 10 +++++----- .../queries/0_stateless/02125_many_mutations.sh | 6 +++--- .../0_stateless/02125_many_mutations_2.sh | 6 +++--- .../queries/0_stateless/02135_local_create_db.sh | 2 +- .../0_stateless/02151_client_option_echo.sh | 4 ++-- .../0_stateless/02151_hash_table_sizes_stats.sh | 2 +- .../02151_hash_table_sizes_stats_distributed.sh | 2 +- .../02158_explain_ast_alter_commands.sh | 2 +- .../02206_clickhouse_local_use_database.sh | 2 +- .../02226_filesystem_cache_profile_events.sh | 14 +++++++------- .../02227_test_create_empty_sqlite_db.sh | 4 ++-- .../0_stateless/02235_remote_fs_cache_stress.sh | 6 +++--- .../02240_protobuflist_format_persons.sh | 4 ++-- .../02246_clickhouse_local_drop_database.sh | 4 ++-- .../0_stateless/02286_drop_filesystem_cache.sh | 2 +- .../02337_drop_filesystem_cache_access.sh | 8 ++++---- .../0_stateless/02364_window_view_segfault.sh | 2 +- .../0_stateless/02373_datetime64_monotonicity.sh | 2 +- .../0_stateless/02416_rename_database_rbac.sh | 4 ++-- .../02435_rollback_cancelled_queries.sh | 2 +- ...60_projections_and_aggregate_null_if_empty.sh | 2 +- .../02494_query_cache_user_isolation.sh | 8 ++++---- ...503_cache_on_write_with_small_segment_size.sh | 4 ++-- .../queries/0_stateless/02521_merge_over_gap.sh | 2 +- .../02530_dictionaries_update_field.sh | 2 +- .../0_stateless/02702_allow_skip_errors_enum.sh | 2 +- .../0_stateless/02704_keeper_map_zk_nodes.sh | 2 +- ...ckhouse_local_implicit_file_table_function.sh | 4 ++-- .../02712_bool_better_exception_message.sh | 8 ++++---- .../0_stateless/02722_database_filesystem.sh | 8 ++++---- tests/queries/0_stateless/02724_database_s3.sh | 14 +++++++------- tests/queries/0_stateless/02725_database_hdfs.sh | 10 +++++----- .../0_stateless/02725_local_query_parameters.sh | 2 +- .../02751_multiquery_with_argument.reference | 2 -- .../02751_multiquery_with_argument.sh | 16 ++++++---------- .../02815_no_throw_in_simple_queries.sh | 2 +- .../02843_insertion_table_schema_infer.sh | 2 +- .../02864_restore_table_with_broken_part.sh | 2 +- ...ultiple_batches_array_inconsistent_offsets.sh | 2 +- .../0_stateless/02875_merge_engine_set_index.sh | 2 +- .../02877_optimize_read_in_order_from_view.sh | 2 +- ...02884_create_view_with_sql_security_option.sh | 16 ++++++++-------- ...885_async_insert_access_check_for_defaults.sh | 4 ++-- ...900_clickhouse_local_drop_current_database.sh | 2 +- .../02956_clickhouse_local_system_parts.sh | 2 +- .../02973_backup_of_in_memory_compressed.sh | 10 +++++----- .../02973_parse_crlf_with_tsv_files.sh | 2 +- .../0_stateless/02995_forget_partition.sh | 4 ++-- tests/queries/0_stateless/02995_index_1.sh | 6 +++--- tests/queries/0_stateless/02995_index_10.sh | 6 +++--- tests/queries/0_stateless/02995_index_2.sh | 6 +++--- tests/queries/0_stateless/02995_index_3.sh | 6 +++--- tests/queries/0_stateless/02995_index_4.sh | 6 +++--- tests/queries/0_stateless/02995_index_5.sh | 6 +++--- tests/queries/0_stateless/02995_index_6.sh | 6 +++--- tests/queries/0_stateless/02995_index_7.sh | 6 +++--- tests/queries/0_stateless/02995_index_8.sh | 6 +++--- tests/queries/0_stateless/02995_index_9.sh | 6 +++--- .../0_stateless/02998_native_parquet_reader.sh | 2 +- .../03001_backup_matview_after_modify_query.sh | 2 +- .../03001_matview_columns_after_modify_query.sh | 2 +- .../03006_correct_revoke_for_partial_rights.sh | 2 +- .../03147_system_columns_access_checks.sh | 4 ++-- .../0_stateless/03201_local_named_collections.sh | 6 +++--- .../0_stateless/03212_thousand_exceptions.sh | 2 +- 161 files changed, 316 insertions(+), 322 deletions(-) diff --git a/tests/integration/test_distributed_format/test.py b/tests/integration/test_distributed_format/test.py index 91afb8f7b34..5611f465e8b 100644 --- a/tests/integration/test_distributed_format/test.py +++ b/tests/integration/test_distributed_format/test.py @@ -55,7 +55,7 @@ def test_single_file(started_cluster, cluster): path = get_dist_path(cluster, "distr_1", 1) query = f"select * from file('{path}/1.bin', 'Distributed')" out = node.exec_in_container( - ["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query] + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] ) assert out == "1\ta\n2\tbb\n3\tccc\n" @@ -65,7 +65,7 @@ def test_single_file(started_cluster, cluster): select * from t; """ out = node.exec_in_container( - ["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query] + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] ) assert out == "1\ta\n2\tbb\n3\tccc\n" @@ -106,7 +106,7 @@ def test_two_files(started_cluster, cluster): select * from t order by x; """ out = node.exec_in_container( - ["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query] + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] ) assert out == "0\t_\n1\ta\n2\tbb\n3\tccc\n" @@ -141,7 +141,7 @@ def test_single_file_old(started_cluster, cluster): select * from t; """ out = node.exec_in_container( - ["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query] + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] ) assert out == "1\ta\n2\tbb\n3\tccc\n" diff --git a/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh b/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh index 62f891db33c..0aab52d15c2 100755 --- a/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh +++ b/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true; +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true; diff --git a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh index d41d6409315..6ee1649c9ed 100755 --- a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh +++ b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh @@ -25,7 +25,7 @@ ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_4&se ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_5&session_timeout=60" --data-binary "SELECT 1" echo "Sessions are local per user:" -${CLICKHOUSE_CLIENT} --multiquery --query "DROP USER IF EXISTS test_00463; CREATE USER test_00463; GRANT ALL ON *.* TO test_00463;" +${CLICKHOUSE_CLIENT} --query "DROP USER IF EXISTS test_00463; CREATE USER test_00463; GRANT ALL ON *.* TO test_00463;" ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6&session_timeout=600" --data-binary "CREATE TEMPORARY TABLE t (s String)" ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "INSERT INTO t VALUES ('Hello')" @@ -37,7 +37,7 @@ ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${C ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" -${CLICKHOUSE_CLIENT} --multiquery --query "DROP USER test_00463"; +${CLICKHOUSE_CLIENT} --query "DROP USER test_00463"; echo "And cannot be accessed for a non-existent user:" ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" | grep -c -F 'Exception' @@ -59,7 +59,7 @@ done echo "A session successfully expire after a timeout and the session's temporary table shadows the permanent table:" # An infinite loop is required to make the test reliable. We will check that the timeout corresponds to the observed time at least once -${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (s String) ENGINE = Memory; INSERT INTO t VALUES ('World');" +${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS t; CREATE TABLE t (s String) ENGINE = Memory; INSERT INTO t VALUES ('World');" while true do ( @@ -70,7 +70,7 @@ do ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_8" --data-binary "SELECT * FROM t" ) | tr -d '\n' | grep -F 'HelloWorld' && break || sleep 1 done -${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE t" +${CLICKHOUSE_CLIENT} --query "DROP TABLE t" echo "A session cannot be used by concurrent connections:" @@ -83,5 +83,5 @@ do done ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT 1" | grep -c -F 'SESSION_IS_LOCKED' -${CLICKHOUSE_CLIENT} --multiquery --query "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}_9' SYNC FORMAT Null"; +${CLICKHOUSE_CLIENT} --query "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}_9' SYNC FORMAT Null"; wait diff --git a/tests/queries/0_stateless/00474_readonly_settings.sh b/tests/queries/0_stateless/00474_readonly_settings.sh index 3a857d81a74..ed3558c6d7a 100755 --- a/tests/queries/0_stateless/00474_readonly_settings.sh +++ b/tests/queries/0_stateless/00474_readonly_settings.sh @@ -8,8 +8,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=0 | grep value $CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=1 | grep value -$CLICKHOUSE_CLIENT --readonly=1 --multiquery --query="set output_format_json_quote_64bit_integers=1 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL" -$CLICKHOUSE_CLIENT --readonly=1 --multiquery --query="set output_format_json_quote_64bit_integers=0 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL" +$CLICKHOUSE_CLIENT --readonly=1 --query="set output_format_json_quote_64bit_integers=1 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL" +$CLICKHOUSE_CLIENT --readonly=1 --query="set output_format_json_quote_64bit_integers=0 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=1" | grep value ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=0" | grep value diff --git a/tests/queries/0_stateless/00612_pk_in_tuple_perf.sh b/tests/queries/0_stateless/00612_pk_in_tuple_perf.sh index c8297635c43..7b2973669de 100755 --- a/tests/queries/0_stateless/00612_pk_in_tuple_perf.sh +++ b/tests/queries/0_stateless/00612_pk_in_tuple_perf.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -$CLICKHOUSE_CLIENT --multiquery </dev/null ||: +${CLICKHOUSE_CLIENT} --ignore-error --query "DROP TABLE IF EXISTS tab_00651; CREATE TABLE tab_00651 (val UInt64) engine = Memory; SHOW CREATE TABLE tab_00651 format abcd; DESC tab_00651; DROP TABLE tab_00651;" 2>/dev/null ||: diff --git a/tests/queries/0_stateless/00652_mutations_default_database.sh b/tests/queries/0_stateless/00652_mutations_default_database.sh index eed45540f9b..577943bc3fd 100755 --- a/tests/queries/0_stateless/00652_mutations_default_database.sh +++ b/tests/queries/0_stateless/00652_mutations_default_database.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery --mutations_sync=1 << EOF +${CLICKHOUSE_CLIENT} --mutations_sync=1 << EOF DROP TABLE IF EXISTS mutations; DROP TABLE IF EXISTS for_subquery; diff --git a/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh index 0ac5a2f748a..d4f6d3b290c 100755 --- a/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh +++ b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib -${CLICKHOUSE_CLIENT} --allow_nondeterministic_mutations=1 --multiquery << EOF +${CLICKHOUSE_CLIENT} --allow_nondeterministic_mutations=1 << EOF DROP TABLE IF EXISTS mutations_r1; DROP TABLE IF EXISTS for_subquery; diff --git a/tests/queries/0_stateless/00699_materialized_view_mutations.sh b/tests/queries/0_stateless/00699_materialized_view_mutations.sh index a0f7db536dc..07ca9bc0f67 100755 --- a/tests/queries/0_stateless/00699_materialized_view_mutations.sh +++ b/tests/queries/0_stateless/00699_materialized_view_mutations.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS view_00699; DROP TABLE IF EXISTS null_00699; @@ -20,14 +20,14 @@ SELECT count(), min(x), max(x) FROM view_00699; ALTER TABLE null_00699 DELETE WHERE x % 2 = 0;" --mutations_sync=1 -${CLICKHOUSE_CLIENT} --multiquery --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT count(), min(x), max(x) FROM null_00699; SELECT count(), min(x), max(x) FROM view_00699; ALTER TABLE view_00699 DELETE WHERE x % 2 = 0; " --mutations_sync=1 -${CLICKHOUSE_CLIENT} --multiquery --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT count(), min(x), max(x) FROM null_00699; SELECT count(), min(x), max(x) FROM view_00699; @@ -35,7 +35,7 @@ ALTER TABLE null_00699 DELETE WHERE x % 2 = 1; ALTER TABLE view_00699 DELETE WHERE x % 2 = 1; " --mutations_sync=1 -${CLICKHOUSE_CLIENT} --multiquery --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT count(), min(x), max(x) FROM null_00699; SELECT count(), min(x), max(x) FROM view_00699; diff --git a/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh b/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh index e1540d1a25e..e40da11b893 100755 --- a/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh +++ b/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS memory; CREATE TABLE memory (x UInt64) ENGINE = Memory; @@ -21,13 +21,13 @@ INSERT INTO memory SELECT * FROM numbers(1000);" # But if the table will be dropped before query - just pass. # It's Ok, because otherwise the test will depend on the race condition in the test itself. -${CLICKHOUSE_CLIENT} --multiquery --query=" +${CLICKHOUSE_CLIENT} --query=" SET max_threads = 1; SELECT count() FROM memory WHERE NOT ignore(sleep(0.0001));" 2>&1 | grep -c -P '^1000$|^0$|Exception' & sleep 0.05; -${CLICKHOUSE_CLIENT} --multiquery --query=" +${CLICKHOUSE_CLIENT} --query=" TRUNCATE TABLE memory; DROP TABLE memory; " diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh index ea8b9d02e49..fd002668696 100755 --- a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh +++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & -yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & +yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT & +yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT & wait ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table" diff --git a/tests/queries/0_stateless/00763_lock_buffer_long.sh b/tests/queries/0_stateless/00763_lock_buffer_long.sh index 444a66767aa..92f917aa287 100755 --- a/tests/queries/0_stateless/00763_lock_buffer_long.sh +++ b/tests/queries/0_stateless/00763_lock_buffer_long.sh @@ -16,12 +16,12 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE buffer_00763_2 (s String) ENGINE = Bu function thread1() { - seq 1 500 | sed -r -e 's/.+/DROP TABLE IF EXISTS mt_00763_2; CREATE TABLE mt_00763_2 (s String) ENGINE = MergeTree ORDER BY s; INSERT INTO mt_00763_2 SELECT toString(number) FROM numbers(10);/' | ${CLICKHOUSE_CLIENT} --fsync-metadata 0 --multiquery --ignore-error ||: + seq 1 500 | sed -r -e 's/.+/DROP TABLE IF EXISTS mt_00763_2; CREATE TABLE mt_00763_2 (s String) ENGINE = MergeTree ORDER BY s; INSERT INTO mt_00763_2 SELECT toString(number) FROM numbers(10);/' | ${CLICKHOUSE_CLIENT} --fsync-metadata 0 --ignore-error ||: } function thread2() { - seq 1 500 | sed -r -e 's/.+/SELECT count() FROM buffer_00763_2;/' | ${CLICKHOUSE_CLIENT} --multiquery --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^0$|^10$|^Received exception|^Code: 60|^Code: 218|^Code: 473' | grep -v '(query: ' + seq 1 500 | sed -r -e 's/.+/SELECT count() FROM buffer_00763_2;/' | ${CLICKHOUSE_CLIENT} --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^0$|^10$|^Received exception|^Code: 60|^Code: 218|^Code: 473' | grep -v '(query: ' } thread1 & diff --git a/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh index 7e2384cfc52..79df667d45f 100755 --- a/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh +++ b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh @@ -18,12 +18,12 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO mt_00763_1 VALUES (1, '1'), (2, '2'), function thread1() { - seq 1 300 | sed -r -e 's/.+/ALTER TABLE mt_00763_1 MODIFY column s UInt32; ALTER TABLE mt_00763_1 MODIFY column s String;/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error ||: + seq 1 300 | sed -r -e 's/.+/ALTER TABLE mt_00763_1 MODIFY column s UInt32; ALTER TABLE mt_00763_1 MODIFY column s String;/' | ${CLICKHOUSE_CLIENT} --ignore-error ||: } function thread2() { - seq 1 2000 | sed -r -e 's/.+/SELECT sum(length(s)) FROM buffer_00763_1;/' | ${CLICKHOUSE_CLIENT} --multiquery --ignore-error 2>&1 | grep -vP '(^3$|^Received exception from server|^Code: 473)' + seq 1 2000 | sed -r -e 's/.+/SELECT sum(length(s)) FROM buffer_00763_1;/' | ${CLICKHOUSE_CLIENT} --ignore-error 2>&1 | grep -vP '(^3$|^Received exception from server|^Code: 473)' } thread1 & diff --git a/tests/queries/0_stateless/00825_protobuf_format_array_3dim.sh b/tests/queries/0_stateless/00825_protobuf_format_array_3dim.sh index 3cd842a10ba..468ced802cd 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_array_3dim.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_array_3dim.sh @@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas set -eo pipefail # Run the client. -$CLICKHOUSE_CLIENT --multiquery < /dev/null < /dev/null < $row_format_file -$CLICKHOUSE_CLIENT --multiline --multiquery --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \ +$CLICKHOUSE_CLIENT --multiline --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \ format_template_row = '$row_format_file', \ format_template_row_format = 'Question: \${question:Quoted}, Answer: \${answer:Quoted}, Number of Likes: \${likes:Raw}, Date: \${date:Raw}', \ format_template_rows_between_delimiter = ';\n'; --{clientError 474}" @@ -38,7 +38,7 @@ format_template_rows_between_delimiter = ';\n'"; # Test that if both format_template_result_format setting and format_template_resultset are provided, error is thrown resultset_output_file="$CURDIR"/"$CLICKHOUSE_TEST_UNIQUE_NAME"_template_output_format_resultset.tmp echo -ne '===== Resultset ===== \n \${data} \n ===============' > $resultset_output_file -$CLICKHOUSE_CLIENT --multiline --multiquery --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \ +$CLICKHOUSE_CLIENT --multiline --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \ format_template_resultset = '$resultset_output_file', \ format_template_resultset_format = '===== Resultset ===== \n \${data} \n ===============', \ format_template_row_format = 'Question: \${question:Quoted}, Answer: \${answer:Quoted}, Number of Likes: \${likes:Raw}, Date: \${date:Raw}', \ diff --git a/tests/queries/0_stateless/00956_sensitive_data_masking.sh b/tests/queries/0_stateless/00956_sensitive_data_masking.sh index 926557e4ba6..bd65b937648 100755 --- a/tests/queries/0_stateless/00956_sensitive_data_masking.sh +++ b/tests/queries/0_stateless/00956_sensitive_data_masking.sh @@ -17,7 +17,7 @@ echo 1 # normal execution $CLICKHOUSE_CLIENT \ --query="SELECT 'find_me_TOPSECRET=TOPSECRET' FROM numbers(1) FORMAT Null" \ - --log_queries=1 --ignore-error --multiquery >"$tmp_file" 2>&1 + --log_queries=1 --ignore-error >"$tmp_file" 2>&1 grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 1a' grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 1b' @@ -38,7 +38,7 @@ echo 3 # failure at before query start $CLICKHOUSE_CLIENT \ --query="SELECT 1 FROM system.numbers WHERE credit_card_number='find_me_TOPSECRET=TOPSECRET' FORMAT Null" \ - --log_queries=1 --ignore-error --multiquery |& grep -v '^(query: ' > "$tmp_file" + --log_queries=1 --ignore-error |& grep -v '^(query: ' > "$tmp_file" grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 3a' grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 3b' @@ -56,7 +56,7 @@ echo 4 # failure at the end of query $CLICKHOUSE_CLIENT \ --query="SELECT 'find_me_TOPSECRET=TOPSECRET', intDiv( 100, number - 10) FROM numbers(11) FORMAT Null" \ - --log_queries=1 --ignore-error --max_block_size=2 --multiquery |& grep -v '^(query: ' > "$tmp_file" + --log_queries=1 --ignore-error --max_block_size=2 |& grep -v '^(query: ' > "$tmp_file" grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 4a' grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 4b' @@ -67,7 +67,7 @@ rm -f "$tmp_file2" >/dev/null 2>&1 bash -c "$CLICKHOUSE_CLIENT \ --function_sleep_max_microseconds_per_block 60000000 \ --query=\"select sleepEachRow(1) from numbers(10) where ignore('find_me_TOPSECRET=TOPSECRET')=0 and ignore('fwerkh_that_magic_string_make_me_unique') = 0 FORMAT Null\" \ - --log_queries=1 --ignore-error --multiquery |& grep -v '^(query: ' > $tmp_file2" & + --log_queries=1 --ignore-error |& grep -v '^(query: ' > $tmp_file2" & rm -f "$tmp_file" >/dev/null 2>&1 # check that executing query doesn't expose secrets in processlist @@ -133,7 +133,7 @@ insert into sensitive select number as id, toDate('2019-01-01') as date, 'abcd' insert into sensitive select number as id, toDate('2019-01-01') as date, 'find_me_TOPSECRET=TOPSECRET' as value1, rand() as valuer from numbers(10); insert into sensitive select number as id, toDate('2019-01-01') as date, 'abcd' as value1, rand() as valuer from numbers(10000); select * from sensitive WHERE value1 = 'find_me_TOPSECRET=TOPSECRET' FORMAT Null; -drop table sensitive;" --log_queries=1 --ignore-error --multiquery >"$tmp_file" 2>&1 +drop table sensitive;" --log_queries=1 --ignore-error >"$tmp_file" 2>&1 grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 8a' grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 8b' @@ -144,7 +144,7 @@ echo 9 $CLICKHOUSE_CLIENT \ --server_logs_file=/dev/null \ --query="SELECT if( count() > 0, 'text_log non empty', 'text_log empty') FROM system.text_log WHERE event_date >= yesterday() and message like '%find_me%'; - select * from system.text_log where event_date >= yesterday() and message like '%TOPSECRET=TOPSECRET%';" --ignore-error --multiquery + select * from system.text_log where event_date >= yesterday() and message like '%TOPSECRET=TOPSECRET%';" --ignore-error echo 'finish' rm -f "$tmp_file" >/dev/null 2>&1 diff --git a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh index 4bd21fcee02..eb12a76eb62 100755 --- a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh +++ b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery <&1| grep -Fa "Exception: " | grep -Fv UNKNOWN_STATUS_OF_TRANSACTION @@ -24,7 +24,7 @@ function begin_commit_readonly() function begin_rollback_readonly() { while true; do - $CLICKHOUSE_CLIENT --wait_changes_become_visible_after_commit_mode=wait_unknown --multiquery --query " + $CLICKHOUSE_CLIENT --wait_changes_become_visible_after_commit_mode=wait_unknown --query " BEGIN TRANSACTION; SET TRANSACTION SNAPSHOT 42; ROLLBACK;" @@ -34,7 +34,7 @@ function begin_rollback_readonly() function begin_insert_commit() { while true; do - $CLICKHOUSE_CLIENT --wait_changes_become_visible_after_commit_mode=async --multiquery --query " + $CLICKHOUSE_CLIENT --wait_changes_become_visible_after_commit_mode=async --query " BEGIN TRANSACTION; INSERT INTO mt VALUES ($RANDOM); COMMIT;" 2>&1| grep -Fa "Exception: " | grep -Fv UNKNOWN_STATUS_OF_TRANSACTION diff --git a/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh b/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh index d4884cbf457..8873fd88f0e 100755 --- a/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh +++ b/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh @@ -22,7 +22,7 @@ function thread_insert() set -eu val=1 while true; do - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; INSERT INTO src VALUES /* ($val, 1) */ ($val, 1); INSERT INTO src VALUES /* ($val, 2) */ ($val, 2); @@ -210,7 +210,7 @@ function thread_select() set -eu while true; do output=$( - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; -- no duplicates SELECT type, throwIf(count(n) != countDistinct(n)) FROM src GROUP BY type FORMAT Null; diff --git a/tests/queries/0_stateless/01169_old_alter_partition_isolation_stress.sh b/tests/queries/0_stateless/01169_old_alter_partition_isolation_stress.sh index 0d2016952d4..404042ab64e 100755 --- a/tests/queries/0_stateless/01169_old_alter_partition_isolation_stress.sh +++ b/tests/queries/0_stateless/01169_old_alter_partition_isolation_stress.sh @@ -19,7 +19,7 @@ function thread_insert() set -e val=1 while true; do - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; INSERT INTO src VALUES /* ($val, 1) */ ($val, 1); INSERT INTO src VALUES /* ($val, 2) */ ($val, 2); @@ -40,7 +40,7 @@ function thread_partition_src_to_dst() sum=0 for i in {1..20}; do out=$( - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; INSERT INTO src VALUES /* ($i, 3) */ ($i, 3); INSERT INTO dst SELECT * FROM src; @@ -49,7 +49,7 @@ function thread_partition_src_to_dst() SELECT throwIf((SELECT (count(), sum(n)) FROM merge(currentDatabase(), '') WHERE type=3) != ($count + 1, $sum + $i)) FORMAT Null; COMMIT;" 2>&1) ||: - echo "$out" | grep -Fv "SERIALIZATION_ERROR" | grep -F "Received from " && $CLICKHOUSE_CLIENT --multiquery --query " + echo "$out" | grep -Fv "SERIALIZATION_ERROR" | grep -F "Received from " && $CLICKHOUSE_CLIENT --query " begin transaction; set transaction snapshot 3; select $i, 'src', type, n, _part from src order by type, n; @@ -68,7 +68,7 @@ function thread_partition_dst_to_src() if (( i % 2 )); then action="COMMIT" fi - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " SYSTEM STOP MERGES dst; ALTER TABLE dst DROP PARTITION ID 'nonexistent'; -- STOP MERGES doesn't wait for started merges to finish, so we use this trick SYSTEM SYNC TRANSACTION LOG; @@ -87,7 +87,7 @@ function thread_select() { set -e while true; do - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; -- no duplicates SELECT type, throwIf(count(n) != countDistinct(n)) FROM src GROUP BY type FORMAT Null; diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index 3b9bb50517d..2fb58e4cc57 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -65,7 +65,7 @@ function insert_commit_action() local tag=$1; shift # some transactions will fail due to constraint - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; INSERT INTO src VALUES /* ($i, $tag) */ ($i, $tag); SELECT throwIf((SELECT sum(nm) FROM mv) != $(($i * $tag))) /* ($i, $tag) */ FORMAT Null; @@ -83,7 +83,7 @@ function insert_rollback_action() local i=$1; shift local tag=$1; shift - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; INSERT INTO src VALUES /* (42, $tag) */ (42, $tag); SELECT throwIf((SELECT count() FROM src WHERE n=42 AND m=$tag) != 1) FORMAT Null; @@ -112,7 +112,7 @@ function optimize_action() action="ROLLBACK" fi - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; $optimize_query; $action; @@ -126,7 +126,7 @@ function select_action() { set -e - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; SELECT throwIf((SELECT (sum(n), count() % 2) FROM src) != (0, 1)) FORMAT Null; SELECT throwIf((SELECT (sum(nm), count() % 2) FROM mv) != (0, 1)) FORMAT Null; @@ -140,7 +140,7 @@ function select_insert_action() { set -e - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; SELECT throwIf((SELECT count() FROM tmp) != 0) FORMAT Null; INSERT INTO tmp SELECT 1, n*m FROM src; @@ -199,7 +199,7 @@ wait $PID_8 || echo "second select_insert_action has failed with status $?" 2>&1 wait_for_queries_to_finish $WAIT_FINISH -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; SELECT throwIf((SELECT (sum(n), count() % 2) FROM src) != (0, 1)) FORMAT Null; SELECT throwIf((SELECT (sum(nm), count() % 2) FROM mv) != (0, 1)) FORMAT Null; @@ -209,7 +209,7 @@ $CLICKHOUSE_CLIENT --multiquery --query " COMMIT; " -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --query " SELECT throwIf((SELECT (sum(n), count() % 2) FROM src) != (0, 1)) FORMAT Null; SELECT throwIf((SELECT (sum(nm), count() % 2) FROM mv) != (0, 1)) FORMAT Null; SELECT throwIf((SELECT (sum(nm), count() % 2) FROM dst) != (0, 1)) FORMAT Null; diff --git a/tests/queries/0_stateless/01174_select_insert_isolation.sh b/tests/queries/0_stateless/01174_select_insert_isolation.sh index 6321f6ff01b..235d98fb5de 100755 --- a/tests/queries/0_stateless/01174_select_insert_isolation.sh +++ b/tests/queries/0_stateless/01174_select_insert_isolation.sh @@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT --query "CREATE TABLE mt (n Int8, m Int8) ENGINE=MergeTree OR function thread_insert_commit() { for i in {1..50}; do - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; INSERT INTO mt VALUES /* ($i, $1) */ ($i, $1); INSERT INTO mt VALUES /* (-$i, $1) */ (-$i, $1); @@ -27,7 +27,7 @@ function thread_insert_commit() function thread_insert_rollback() { for _ in {1..50}; do - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; INSERT INTO mt VALUES /* (42, $1) */ (42, $1); ROLLBACK;"; @@ -38,7 +38,7 @@ function thread_select() { while true; do # The first and the last queries must get the same result - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; SET throw_on_unsupported_query_inside_transaction=0; CREATE TEMPORARY TABLE tmp AS SELECT arraySort(groupArray(n)), arraySort(groupArray(m)), arraySort(groupArray(_part)) FROM mt FORMAT Null; @@ -58,7 +58,7 @@ kill -TERM $PID_4 wait wait_for_queries_to_finish 40 -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --query " BEGIN TRANSACTION; SELECT count(), sum(n), sum(m=1), sum(m=2), sum(m=3) FROM mt;"; diff --git a/tests/queries/0_stateless/01198_client_quota_key.sh b/tests/queries/0_stateless/01198_client_quota_key.sh index 3f5f5df5071..d08aa2e364f 100755 --- a/tests/queries/0_stateless/01198_client_quota_key.sh +++ b/tests/queries/0_stateless/01198_client_quota_key.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --quota_key Hello --query_id test_quota_key --log_queries 1 --multiquery --query "SELECT 1; SYSTEM FLUSH LOGS; SELECT DISTINCT quota_key FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND event_time >= now() - 300 AND query_id = 'test_quota_key'" +$CLICKHOUSE_CLIENT --quota_key Hello --query_id test_quota_key --log_queries 1 --query "SELECT 1; SYSTEM FLUSH LOGS; SELECT DISTINCT quota_key FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND event_time >= now() - 300 AND query_id = 'test_quota_key'" diff --git a/tests/queries/0_stateless/01285_engine_join_donmikel.sh b/tests/queries/0_stateless/01285_engine_join_donmikel.sh index 7522ed9924b..ce273ab8e0c 100755 --- a/tests/queries/0_stateless/01285_engine_join_donmikel.sh +++ b/tests/queries/0_stateless/01285_engine_join_donmikel.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS NmSubj; DROP TABLE IF EXISTS events; @@ -60,7 +60,7 @@ FROM events as e INNER JOIN NmSubj as ns ON ns.NmId = toUInt32(e.Param1) WHERE e.EventDate = today() - 7 AND e.EventId = 'GCO' AND ns.SubjectId = 2073" -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE NmSubj; DROP TABLE events; " diff --git a/tests/queries/0_stateless/01293_optimize_final_force.sh b/tests/queries/0_stateless/01293_optimize_final_force.sh index e838af8af9b..9c135d272e4 100755 --- a/tests/queries/0_stateless/01293_optimize_final_force.sh +++ b/tests/queries/0_stateless/01293_optimize_final_force.sh @@ -11,7 +11,7 @@ TIMELIMIT=31 while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 100 ]; do it=$((it+1)) - $CLICKHOUSE_CLIENT --multiquery --query " + $CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS mt; CREATE TABLE mt (x UInt8, k UInt8 DEFAULT 0) ENGINE = SummingMergeTree ORDER BY k; diff --git a/tests/queries/0_stateless/01304_direct_io_long.sh b/tests/queries/0_stateless/01304_direct_io_long.sh index 2e27c2f7728..1241f299d94 100755 --- a/tests/queries/0_stateless/01304_direct_io_long.sh +++ b/tests/queries/0_stateless/01304_direct_io_long.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS bug; CREATE TABLE bug (UserID UInt64, Date Date) ENGINE = MergeTree ORDER BY Date SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi', merge_max_block_size = 8192; @@ -18,5 +18,5 @@ cat "$LOG" | grep Loaded rm "$LOG" -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE bug;" diff --git a/tests/queries/0_stateless/01375_storage_file_tsv_csv_with_names_write_prefix.sh b/tests/queries/0_stateless/01375_storage_file_tsv_csv_with_names_write_prefix.sh index af8d3f4e69b..a634f689dca 100755 --- a/tests/queries/0_stateless/01375_storage_file_tsv_csv_with_names_write_prefix.sh +++ b/tests/queries/0_stateless/01375_storage_file_tsv_csv_with_names_write_prefix.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo 'zero rows' for format in TSVWithNames TSVWithNamesAndTypes CSVWithNames CSVWithNamesAndTypes JSONCompactEachRowWithNames JSONCompactEachRowWithNamesAndTypes JSONCompactStringsEachRow JSONCompactStringsEachRowWithNamesAndTypes; do echo $format - ${CLICKHOUSE_LOCAL} --multiquery --query=" + ${CLICKHOUSE_LOCAL} --query=" CREATE TABLE ${format}_01375 ENGINE File($format, '01375_$format') AS SELECT * FROM numbers(1) WHERE number < 0; SELECT * FROM ${format}_01375; DROP TABLE ${format}_01375; @@ -22,7 +22,7 @@ echo 'multi clickhouse-local one file' for format in TSVWithNames TSVWithNamesAndTypes CSVWithNames CSVWithNamesAndTypes JSONCompactEachRowWithNames JSONCompactEachRowWithNamesAndTypes JSONCompactStringsEachRow JSONCompactStringsEachRowWithNamesAndTypes; do echo $format for _ in {1..2}; do - ${CLICKHOUSE_LOCAL} --multiquery --query=" + ${CLICKHOUSE_LOCAL} --query=" CREATE TABLE ${format}_01375 ENGINE File($format, '01375_$format') AS SELECT * FROM numbers(1); SELECT * FROM ${format}_01375; DROP TABLE ${format}_01375; diff --git a/tests/queries/0_stateless/01443_merge_truncate_long.sh b/tests/queries/0_stateless/01443_merge_truncate_long.sh index 65b9bcd366e..51654b2e4e1 100755 --- a/tests/queries/0_stateless/01443_merge_truncate_long.sh +++ b/tests/queries/0_stateless/01443_merge_truncate_long.sh @@ -34,7 +34,7 @@ do SELECT count() FROM t HAVING count() > 0; SELECT ${i}; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} wait diff --git a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh index d61d34244d9..c1d5c357308 100755 --- a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh +++ b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh @@ -10,6 +10,6 @@ rm -rf "${WORKING_FOLDER_01527}" mkdir -p "${WORKING_FOLDER_01527}" # OPTIMIZE was crashing due to lack of temporary volume in local -${CLICKHOUSE_LOCAL} --multiquery --query "drop database if exists d; create database d; create table d.t engine MergeTree order by a as select 1 a; optimize table d.t final" --path="${WORKING_FOLDER_01527}" +${CLICKHOUSE_LOCAL} --query "drop database if exists d; create database d; create table d.t engine MergeTree order by a as select 1 a; optimize table d.t final" --path="${WORKING_FOLDER_01527}" rm -rf "${WORKING_FOLDER_01527}" diff --git a/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh index a5697a62dc2..bc9efaedd5d 100755 --- a/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh +++ b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery --query " +$CLICKHOUSE_CLIENT --query " SET allow_suspicious_low_cardinality_types=1; CREATE TABLE IF NOT EXISTS test_01543 (value LowCardinality(String), value2 LowCardinality(UInt64)) ENGINE=Memory(); " diff --git a/tests/queries/0_stateless/01544_file_engine_settings.sh b/tests/queries/0_stateless/01544_file_engine_settings.sh index b31754f9531..eb0a8a964d0 100755 --- a/tests/queries/0_stateless/01544_file_engine_settings.sh +++ b/tests/queries/0_stateless/01544_file_engine_settings.sh @@ -10,7 +10,7 @@ rm -f -- "$the_file" # We are going to check that format settings work for File engine, # by creating a table with a non-default delimiter, and reading from it. -${CLICKHOUSE_LOCAL} --multiquery --query " +${CLICKHOUSE_LOCAL} --query " create table t(a int, b int) engine File(CSV, '$the_file') settings format_csv_delimiter = '|'; insert into t select 1 a, 1 b; " @@ -18,7 +18,7 @@ ${CLICKHOUSE_LOCAL} --multiquery --query " # See what's in the file cat "$the_file" -${CLICKHOUSE_LOCAL} --multiquery --query " +${CLICKHOUSE_LOCAL} --query " create table t(a int, b int) engine File(CSV, '$the_file') settings format_csv_delimiter = '|'; select * from t; " diff --git a/tests/queries/0_stateless/01600_detach_permanently.sh b/tests/queries/0_stateless/01600_detach_permanently.sh index 6721dbf3015..679e9a749ee 100755 --- a/tests/queries/0_stateless/01600_detach_permanently.sh +++ b/tests/queries/0_stateless/01600_detach_permanently.sh @@ -18,7 +18,7 @@ mkdir -p "${WORKING_FOLDER_01600}" clickhouse_local() { local query="$1" shift - ${CLICKHOUSE_LOCAL} --allow_deprecated_database_ordinary=1 --multiquery --query "$query" "$@" --path="${WORKING_FOLDER_01600}" + ${CLICKHOUSE_LOCAL} --allow_deprecated_database_ordinary=1 --query "$query" "$@" --path="${WORKING_FOLDER_01600}" } test_detach_attach_sequence() { diff --git a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh index 7215f270a4c..47b5a4dea13 100755 --- a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh @@ -24,7 +24,7 @@ verify() if [[ $i -eq 5000 ]] then - $CLICKHOUSE_CLIENT --multiquery " + $CLICKHOUSE_CLIENT " SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics; SELECT sum(active), sum(NOT active) FROM system.parts; SELECT sum(active), sum(NOT active) FROM system.projection_parts; diff --git a/tests/queries/0_stateless/01606_git_import.sh b/tests/queries/0_stateless/01606_git_import.sh index 48558d79f93..6986d6b14cf 100755 --- a/tests/queries/0_stateless/01606_git_import.sh +++ b/tests/queries/0_stateless/01606_git_import.sh @@ -19,7 +19,7 @@ done ${CLICKHOUSE_GIT_IMPORT} 2>&1 | wc -l -${CLICKHOUSE_CLIENT} --multiline --multiquery --query " +${CLICKHOUSE_CLIENT} --multiline --query " DROP TABLE IF EXISTS commits; DROP TABLE IF EXISTS file_changes; @@ -122,7 +122,7 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM commits" ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM file_changes" ${CLICKHOUSE_CLIENT} --query "SELECT count(), round(avg(indent), 1) FROM line_changes" -${CLICKHOUSE_CLIENT} --multiline --multiquery --query " +${CLICKHOUSE_CLIENT} --multiline --query " DROP TABLE commits; DROP TABLE file_changes; DROP TABLE line_changes; diff --git a/tests/queries/0_stateless/01607_arrays_as_nested_csv.sh b/tests/queries/0_stateless/01607_arrays_as_nested_csv.sh index 946be7fb4af..2a1182c14c1 100755 --- a/tests/queries/0_stateless/01607_arrays_as_nested_csv.sh +++ b/tests/queries/0_stateless/01607_arrays_as_nested_csv.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS test; CREATE TABLE test (a Array(String)) ENGINE = Memory; " @@ -22,7 +22,7 @@ ${CLICKHOUSE_CLIENT} --input_format_csv_arrays_as_nested_csv 1 --query "INSERT I """Hello"", ""world"", ""42"""" TV""" END -${CLICKHOUSE_CLIENT} --multiquery --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM test; DROP TABLE IF EXISTS test; " diff --git a/tests/queries/0_stateless/01632_tinylog_read_write.sh b/tests/queries/0_stateless/01632_tinylog_read_write.sh index 10625ec5d27..68d28b080e9 100755 --- a/tests/queries/0_stateless/01632_tinylog_read_write.sh +++ b/tests/queries/0_stateless/01632_tinylog_read_write.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery --query "DROP TABLE IF EXISTS test; CREATE TABLE IF NOT EXISTS test (x UInt64, s Array(Nullable(String))) ENGINE = TinyLog;" +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test; CREATE TABLE IF NOT EXISTS test (x UInt64, s Array(Nullable(String))) ENGINE = TinyLog;" function thread_select { local TIMELIMIT=$((SECONDS+$1)) @@ -47,4 +47,4 @@ thread_insert $TIMEOUT & wait echo "Done" -$CLICKHOUSE_CLIENT --multiquery --query "DROP TABLE IF EXISTS test;" +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test;" diff --git a/tests/queries/0_stateless/01658_read_file_to_stringcolumn.sh b/tests/queries/0_stateless/01658_read_file_to_stringcolumn.sh index 685fe69642a..ceb6aa060ea 100755 --- a/tests/queries/0_stateless/01658_read_file_to_stringcolumn.sh +++ b/tests/queries/0_stateless/01658_read_file_to_stringcolumn.sh @@ -24,7 +24,7 @@ ${CLICKHOUSE_CLIENT} --query "select file('a.txt'), file('b.txt');";echo ":"$? ${CLICKHOUSE_CLIENT} --query "insert into data select file('a.txt'), file('b.txt');";echo ":"$? ${CLICKHOUSE_CLIENT} --query "insert into data select file('a.txt'), file('b.txt');";echo ":"$? ${CLICKHOUSE_CLIENT} --query "select file('c.txt'), * from data";echo ":"$? -${CLICKHOUSE_CLIENT} --multiquery --query " +${CLICKHOUSE_CLIENT} --query " create table filenames(name String) engine=MergeTree() order by tuple(); insert into filenames values ('a.txt'), ('b.txt'), ('c.txt'); select file(name) from filenames format TSV; @@ -56,7 +56,7 @@ echo $c_count # Valid cases: # The default dir is the CWD path in LOCAL mode -${CLICKHOUSE_LOCAL} --multiquery --query " +${CLICKHOUSE_LOCAL} --query " drop table if exists data; create table data (A String, B String) engine=MergeTree() order by A; select file('a.txt'), file('b.txt'); diff --git a/tests/queries/0_stateless/01666_merge_tree_max_query_limit.sh b/tests/queries/0_stateless/01666_merge_tree_max_query_limit.sh index e04c9515009..ec318db98bf 100755 --- a/tests/queries/0_stateless/01666_merge_tree_max_query_limit.sh +++ b/tests/queries/0_stateless/01666_merge_tree_max_query_limit.sh @@ -8,7 +8,7 @@ function wait_for_query_to_start() { while [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT sum(read_rows) FROM system.processes WHERE query_id = '$1'") == 0 ]]; do sleep 0.1; done } -${CLICKHOUSE_CLIENT} --multiline --multiquery --query " +${CLICKHOUSE_CLIENT} --multiline --query " drop table if exists simple; create table simple (i int, j int) engine = MergeTree order by i diff --git a/tests/queries/0_stateless/01747_system_session_log_long.sh b/tests/queries/0_stateless/01747_system_session_log_long.sh index 022bf488886..07055f96782 100755 --- a/tests/queries/0_stateless/01747_system_session_log_long.sh +++ b/tests/queries/0_stateless/01747_system_session_log_long.sh @@ -82,7 +82,7 @@ trap "cleanup" EXIT function executeQueryExpectError() { cat - > "${TMP_QUERY_FILE}" - ! ${CLICKHOUSE_CLIENT} --multiquery --queries-file "${TMP_QUERY_FILE}" "${@}" 2>&1 | tee -a "${TMP_QUERY_FILE}" + ! ${CLICKHOUSE_CLIENT} --queries-file "${TMP_QUERY_FILE}" "${@}" 2>&1 | tee -a "${TMP_QUERY_FILE}" } function createUser() @@ -303,7 +303,7 @@ function runEndpointTests() if [[ -n "${setup_queries}" ]] then # echo "Executing setup queries: ${setup_queries}" - echo "${setup_queries}" | executeQuery --multiquery + echo "${setup_queries}" | executeQuery fi testTCP "${auth_type}" "${username}" "${password}" @@ -357,7 +357,7 @@ testAsUserIdentifiedBy "plaintext_password" testAsUserIdentifiedBy "sha256_password" testAsUserIdentifiedBy "double_sha1_password" -executeQuery --multiquery <= 1000000 ? 1 : time FROM system.query_log WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" diff --git a/tests/queries/0_stateless/01939_network_receive_bytes_metrics.sh b/tests/queries/0_stateless/01939_network_receive_bytes_metrics.sh index 03babad40f3..b2335a0365b 100755 --- a/tests/queries/0_stateless/01939_network_receive_bytes_metrics.sh +++ b/tests/queries/0_stateless/01939_network_receive_bytes_metrics.sh @@ -4,11 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = Memory;" +${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = Memory;" seq 1 1000 | ${CLICKHOUSE_CLIENT} --query "INSERT INTO t FORMAT TSV" -${CLICKHOUSE_CLIENT} --multiquery --query "SYSTEM FLUSH LOGS; +${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS; WITH ProfileEvents['NetworkReceiveBytes'] AS bytes SELECT bytes >= 8000 AND bytes < 9000 ? 1 : bytes FROM system.query_log WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" diff --git a/tests/queries/0_stateless/01946_test_wrong_host_name_access.sh b/tests/queries/0_stateless/01946_test_wrong_host_name_access.sh index a00f307673e..ed2828c3f54 100755 --- a/tests/queries/0_stateless/01946_test_wrong_host_name_access.sh +++ b/tests/queries/0_stateless/01946_test_wrong_host_name_access.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery --query " +${CLICKHOUSE_CLIENT} --query " DROP USER IF EXISTS dns_fail_1, dns_fail_2; CREATE USER dns_fail_1 HOST NAME 'non.existing.host.name', '${MYHOSTNAME}'; CREATE USER dns_fail_2 HOST NAME '${MYHOSTNAME}', 'non.existing.host.name';" diff --git a/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh b/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh index 0aedef028a2..b3748581f4f 100755 --- a/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh +++ b/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) mkdir -p ${USER_FILES_PATH}/ cp $CUR_DIR/data_zstd/test_01946.zstd ${USER_FILES_PATH}/ -${CLICKHOUSE_CLIENT} --multiline --multiquery --query " +${CLICKHOUSE_CLIENT} --multiline --query " set min_chunk_bytes_for_parallel_parsing=10485760; set max_read_buffer_size = 65536; set input_format_parallel_parsing = 0; diff --git a/tests/queries/0_stateless/02009_from_infile.sh b/tests/queries/0_stateless/02009_from_infile.sh index 6a31aa4ac55..578ac14f558 100755 --- a/tests/queries/0_stateless/02009_from_infile.sh +++ b/tests/queries/0_stateless/02009_from_infile.sh @@ -19,7 +19,7 @@ ${CLICKHOUSE_CLIENT} --query "INSERT INTO test_infile FROM INFILE '${CLICKHOUSE_ ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_infile;" # if it not fails, select will print information -${CLICKHOUSE_LOCAL} --multiquery --query "CREATE TABLE test_infile (word String) ENGINE=Memory(); INSERT INTO test_infile FROM INFILE '${CLICKHOUSE_TMP}/test_infile.gz' FORMAT CSV; SELECT * from test_infile;" +${CLICKHOUSE_LOCAL} --query "CREATE TABLE test_infile (word String) ENGINE=Memory(); INSERT INTO test_infile FROM INFILE '${CLICKHOUSE_TMP}/test_infile.gz' FORMAT CSV; SELECT * from test_infile;" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=DROP+TABLE" -d 'IF EXISTS test_infile_url' ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=CREATE" -d 'TABLE test_infile_url (x String) ENGINE = Memory' diff --git a/tests/queries/0_stateless/02024_compression_in_query.sh b/tests/queries/0_stateless/02024_compression_in_query.sh index caa74523bd7..2936568c991 100755 --- a/tests/queries/0_stateless/02024_compression_in_query.sh +++ b/tests/queries/0_stateless/02024_compression_in_query.sh @@ -55,8 +55,8 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_compression_keyword;" [ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz.gz ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz.gz # create files using compression method and without it to check that both queries work correct -${CLICKHOUSE_LOCAL} --multiquery --query "SELECT * FROM (SELECT 'Hello, World! From local.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' FORMAT TabSeparated;" -${CLICKHOUSE_LOCAL} --multiquery --query "SELECT * FROM (SELECT 'Hello, World! From local.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz' COMPRESSION 'GZ' FORMAT TabSeparated;" +${CLICKHOUSE_LOCAL} --query "SELECT * FROM (SELECT 'Hello, World! From local.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' FORMAT TabSeparated;" +${CLICKHOUSE_LOCAL} --query "SELECT * FROM (SELECT 'Hello, World! From local.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz' COMPRESSION 'GZ' FORMAT TabSeparated;" # check content of files cp ${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp.gz @@ -68,7 +68,7 @@ gunzip ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp.gz cat ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp # create table to check inserts -${CLICKHOUSE_LOCAL} --multiquery --query " +${CLICKHOUSE_LOCAL} --query " DROP TABLE IF EXISTS test_compression_keyword; CREATE TABLE test_compression_keyword (text String) Engine=Memory; INSERT INTO TABLE test_compression_keyword FROM INFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' FORMAT TabSeparated; diff --git a/tests/queries/0_stateless/02048_parallel_reading_from_infile.sh b/tests/queries/0_stateless/02048_parallel_reading_from_infile.sh index f055ea304b2..efc19cad054 100755 --- a/tests/queries/0_stateless/02048_parallel_reading_from_infile.sh +++ b/tests/queries/0_stateless/02048_parallel_reading_from_infile.sh @@ -17,7 +17,7 @@ echo -e "103" > "${CLICKHOUSE_TMP}"/test_infile_parallel_3 gzip "${CLICKHOUSE_TMP}"/test_infile_parallel -${CLICKHOUSE_CLIENT} --multiquery <&1 | grep -q "27" && echo "Correct" || echo 'Fail' -${CLICKHOUSE_LOCAL} --multiquery <&1 | grep 'AlterCommand' + $CLICKHOUSE_CLIENT --readonly 1 2>&1 | grep 'AlterCommand' diff --git a/tests/queries/0_stateless/02206_clickhouse_local_use_database.sh b/tests/queries/0_stateless/02206_clickhouse_local_use_database.sh index 59ede739e4a..3b71c8754c9 100755 --- a/tests/queries/0_stateless/02206_clickhouse_local_use_database.sh +++ b/tests/queries/0_stateless/02206_clickhouse_local_use_database.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --echo --multiline --multiquery -q """ +$CLICKHOUSE_LOCAL --echo --multiline -q """ SHOW TABLES; CREATE DATABASE test1; CREATE TABLE test1.table1 (a Int32) ENGINE=Memory; diff --git a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh index d0e61541b15..18ae2d7b4b3 100755 --- a/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh +++ b/tests/queries/0_stateless/02226_filesystem_cache_profile_events.sh @@ -10,7 +10,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do echo "Using storage policy: $STORAGE_POLICY" - $CLICKHOUSE_CLIENT --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiline --query """ SET max_memory_usage='20G'; SET enable_filesystem_cache_on_write_operations = 0; @@ -25,7 +25,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1" 2>&1) - $CLICKHOUSE_CLIENT --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiline --query """ SYSTEM FLUSH LOGS; SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, @@ -40,14 +40,14 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do LIMIT 1; """ - $CLICKHOUSE_CLIENT --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiline --query """ set remote_filesystem_read_method = 'read'; set local_filesystem_read_method = 'pread'; """ query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1" 2>&1) - $CLICKHOUSE_CLIENT --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiline --query """ SYSTEM FLUSH LOGS; SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, @@ -63,13 +63,13 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do """ - $CLICKHOUSE_CLIENT --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiline --query """ set remote_filesystem_read_method='threadpool'; """ query_id=$($CLICKHOUSE_CLIENT --query "select queryID() from ($query) limit 1") - $CLICKHOUSE_CLIENT --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiline --query """ SYSTEM FLUSH LOGS; SELECT ProfileEvents['CachedReadBufferReadFromCacheHits'] > 0 as remote_fs_cache_hit, ProfileEvents['CachedReadBufferReadFromCacheMisses'] > 0 as remote_fs_cache_miss, @@ -84,7 +84,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do LIMIT 1; """ - $CLICKHOUSE_CLIENT --multiquery --multiline --query """ + $CLICKHOUSE_CLIENT --multiline --query """ SELECT * FROM test_02226 WHERE value LIKE '%abc%' ORDER BY value LIMIT 10 FORMAT Null; SET enable_filesystem_cache_on_write_operations = 1; diff --git a/tests/queries/0_stateless/02227_test_create_empty_sqlite_db.sh b/tests/queries/0_stateless/02227_test_create_empty_sqlite_db.sh index 344452767cc..a3fe5f19de0 100755 --- a/tests/queries/0_stateless/02227_test_create_empty_sqlite_db.sh +++ b/tests/queries/0_stateless/02227_test_create_empty_sqlite_db.sh @@ -17,7 +17,7 @@ export CURR_DATABASE="test_01889_sqllite_${CLICKHOUSE_DATABASE}" DB_PATH=${USER_FILES_PATH}/${CURR_DATABASE}_db1 -${CLICKHOUSE_CLIENT} --multiquery --multiline --query=""" +${CLICKHOUSE_CLIENT} --multiline --query=""" DROP DATABASE IF EXISTS ${CURR_DATABASE}; CREATE DATABASE ${CURR_DATABASE} ENGINE = SQLite('${DB_PATH}'); SHOW TABLES FROM ${CURR_DATABASE}; @@ -25,6 +25,6 @@ SHOW TABLES FROM ${CURR_DATABASE}; sqlite3 "${DB_PATH}" 'CREATE TABLE table1 (col1 text, col2 smallint);' -${CLICKHOUSE_CLIENT} --multiquery --multiline --query=""" +${CLICKHOUSE_CLIENT} --multiline --query=""" SHOW TABLES FROM ${CURR_DATABASE}; """ diff --git a/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh b/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh index ffc38c0c1bd..aa5db33417c 100755 --- a/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh +++ b/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --allow_suspicious_low_cardinality_types=1 --multiquery --multiline --query=""" +${CLICKHOUSE_CLIENT} --allow_suspicious_low_cardinality_types=1 --multiline --query=""" DROP TABLE IF EXISTS t_01411; DROP TABLE IF EXISTS t_01411_num; @@ -35,7 +35,7 @@ insert into lc_dict_reading select number, if(number < 8192 * 4, number % 100, n function go() { -${CLICKHOUSE_CLIENT} --multiquery --multiline --query=""" +${CLICKHOUSE_CLIENT} --multiline --query=""" select sum(toUInt64(str)), sum(toUInt64(pat)) from lc_dict_reading where val < 8129 or val > 8192 * 4; @@ -67,7 +67,7 @@ for _ in `seq 1 32`; do go | grep -q "Exception" && echo 'FAIL' || echo 'OK' ||: wait -${CLICKHOUSE_CLIENT} --multiquery --multiline --query=""" +${CLICKHOUSE_CLIENT} --multiline --query=""" DROP TABLE IF EXISTS t_01411; DROP TABLE IF EXISTS t_01411_num; """ diff --git a/tests/queries/0_stateless/02240_protobuflist_format_persons.sh b/tests/queries/0_stateless/02240_protobuflist_format_persons.sh index 637e01b9e63..e5e717d00a8 100755 --- a/tests/queries/0_stateless/02240_protobuflist_format_persons.sh +++ b/tests/queries/0_stateless/02240_protobuflist_format_persons.sh @@ -15,7 +15,7 @@ SCHEMADIR=$CURDIR/format_schemas set -eo pipefail # Run the client. -$CLICKHOUSE_CLIENT --multiquery <&1 | rg -Fc "'w' character" +$CLICKHOUSE_LOCAL <&1 | rg -Fc "'w' character" SELECT * FROM format(JSONEachRow, 'x Bool', '{"x": wtf}'); END -$CLICKHOUSE_LOCAL --multiquery <&1 | rg -Fc "expected 'false'" +$CLICKHOUSE_LOCAL <&1 | rg -Fc "expected 'false'" SELECT * FROM format(JSONEachRow, 'x Bool', '{"x": ftw}'); END -$CLICKHOUSE_LOCAL --multiquery <&1 | rg -Fc "'{' character" +$CLICKHOUSE_LOCAL <&1 | rg -Fc "'{' character" SELECT * FROM format(JSONEachRow, 'x Bool', '{"x": {}}'); END diff --git a/tests/queries/0_stateless/02722_database_filesystem.sh b/tests/queries/0_stateless/02722_database_filesystem.sh index 2d0ff256c95..fa23d847d90 100755 --- a/tests/queries/0_stateless/02722_database_filesystem.sh +++ b/tests/queries/0_stateless/02722_database_filesystem.sh @@ -30,7 +30,7 @@ cp ${user_files_tmp_dir}/tmp_numbers_1.csv ${user_files_tmp_dir}/tmp/tmp_numbers ################# echo "Test 1: create filesystem database and check implicit calls" -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test1; CREATE DATABASE test1 ENGINE = Filesystem; """ @@ -57,20 +57,20 @@ ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`/tmp/tmp.csv\`;" 2>&1 ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../*/tmp_numbers_*.csv\`;" 2>&1 | tr '\n' ' ' | grep -oF "PATH_ACCESS_DENIED" > /dev/null && echo "OK" || echo 'FAIL' ||: ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../tmp_numbers_*.csv\`;" 2>&1 | tr '\n' ' ' | grep -oF "PATH_ACCESS_DENIED" > /dev/null && echo "OK" || echo 'FAIL' ||: ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../*.csv\`;" 2>&1 | tr '\n' ' ' | grep -oF "PATH_ACCESS_DENIED" > /dev/null && echo "OK" || echo 'FAIL' ||: -${CLICKHOUSE_CLIENT} --multiline --multiquery --query """ +${CLICKHOUSE_CLIENT} --multiline --query """ USE test1; SELECT COUNT(*) FROM \"../${tmp_dir}/tmp.csv\"; """ 2>&1 | tr '\n' ' ' | grep -oF "PATH_ACCESS_DENIED" > /dev/null && echo "OK" || echo 'FAIL' ||: ${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../../../../../../tmp.csv\`;" 2>&1 | tr '\n' ' ' | grep -oF "PATH_ACCESS_DENIED" > /dev/null && echo "OK" || echo 'FAIL' ||: # BAD_ARGUMENTS: path should be inside user_files -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test2; CREATE DATABASE test2 ENGINE = Filesystem('/tmp'); """ 2>&1 | tr '\n' ' ' | grep -oF -e "UNKNOWN_TABLE" -e "BAD_ARGUMENTS" > /dev/null && echo "OK" || echo 'FAIL' ||: # BAD_ARGUMENTS: .../user_files/relative_unknown_dir does not exist -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test2; CREATE DATABASE test2 ENGINE = Filesystem('relative_unknown_dir'); """ 2>&1 | tr '\n' ' ' | grep -oF -e "UNKNOWN_TABLE" -e "BAD_ARGUMENTS" > /dev/null && echo "OK" || echo 'FAIL' ||: diff --git a/tests/queries/0_stateless/02724_database_s3.sh b/tests/queries/0_stateless/02724_database_s3.sh index 80b47282146..cc7f012c8cf 100755 --- a/tests/queries/0_stateless/02724_database_s3.sh +++ b/tests/queries/0_stateless/02724_database_s3.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ################# echo "Test 1: select from s3" -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test1; CREATE DATABASE test1 ENGINE = S3; USE test1; @@ -17,7 +17,7 @@ SELECT * FROM \"http://localhost:11111/test/a.tsv\" ${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test1 # check credentials with absolute path -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test2; CREATE DATABASE test2 ENGINE = S3('', 'test', 'testtest'); USE test2; @@ -25,7 +25,7 @@ SELECT * FROM \"http://localhost:11111/test/b.tsv\" """ # check credentials with relative path -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test4; CREATE DATABASE test4 ENGINE = S3('http://localhost:11111/test', 'test', 'testtest'); USE test4; @@ -33,7 +33,7 @@ SELECT * FROM \"b.tsv\" """ # Check named collection loading -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test5; CREATE DATABASE test5 ENGINE = S3(s3_conn_db); SELECT * FROM test5.\`b.tsv\` @@ -41,20 +41,20 @@ SELECT * FROM test5.\`b.tsv\` ################# echo "Test 2: check exceptions" -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test3; CREATE DATABASE test3 ENGINE = S3; USE test3; SELECT * FROM \"http://localhost:11111/test/a.myext\" """ 2>&1 | tr '\n' ' ' | grep -oF -e "UNKNOWN_TABLE" -e "S3_ERROR" > /dev/null && echo "OK" || echo 'FAIL' ||: -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ USE test3; SELECT * FROM \"abacaba\" """ 2>&1 | tr '\n' ' ' | grep -oF -e "UNKNOWN_TABLE" -e "BAD_ARGUMENTS" > /dev/null && echo "OK" || echo 'FAIL' ||: # Cleanup -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test1; DROP DATABASE IF EXISTS test2; DROP DATABASE IF EXISTS test3; diff --git a/tests/queries/0_stateless/02725_database_hdfs.sh b/tests/queries/0_stateless/02725_database_hdfs.sh index 1eb22976b84..7fd35c72ef1 100755 --- a/tests/queries/0_stateless/02725_database_hdfs.sh +++ b/tests/queries/0_stateless/02725_database_hdfs.sh @@ -25,7 +25,7 @@ fi echo "Test 1: select from hdfs database" # Database without specific host -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test_hdfs_1; CREATE DATABASE test_hdfs_1 ENGINE = HDFS; USE test_hdfs_1; @@ -34,7 +34,7 @@ SELECT * FROM \"hdfs://localhost:12222/test_02725_1.tsv\" ${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test_hdfs_1 # Database with host -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test_hdfs_2; CREATE DATABASE test_hdfs_2 ENGINE = HDFS('hdfs://localhost:12222'); USE test_hdfs_2; @@ -45,12 +45,12 @@ ${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test_hdfs_2 ################# echo "Test 2: check exceptions" -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test_hdfs_3; CREATE DATABASE test_hdfs_3 ENGINE = HDFS('abacaba'); """ 2>&1 | tr '\n' ' ' | grep -oF "BAD_ARGUMENTS" -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test_hdfs_4; CREATE DATABASE test_hdfs_4 ENGINE = HDFS; USE test_hdfs_4; @@ -64,7 +64,7 @@ ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_hdfs_4.\`hdfs://localhost:12222 # Cleanup -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ DROP DATABASE IF EXISTS test_hdfs_1; DROP DATABASE IF EXISTS test_hdfs_2; DROP DATABASE IF EXISTS test_hdfs_3; diff --git a/tests/queries/0_stateless/02725_local_query_parameters.sh b/tests/queries/0_stateless/02725_local_query_parameters.sh index 92d7f645454..151a854d5b9 100755 --- a/tests/queries/0_stateless/02725_local_query_parameters.sh +++ b/tests/queries/0_stateless/02725_local_query_parameters.sh @@ -5,6 +5,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --multiquery -q " +$CLICKHOUSE_LOCAL -q " SET param_x=1; SELECT {x:UInt64}, {x:String};" diff --git a/tests/queries/0_stateless/02751_multiquery_with_argument.reference b/tests/queries/0_stateless/02751_multiquery_with_argument.reference index 2e55712e49c..f02e9bab2cd 100644 --- a/tests/queries/0_stateless/02751_multiquery_with_argument.reference +++ b/tests/queries/0_stateless/02751_multiquery_with_argument.reference @@ -5,8 +5,6 @@ Syntax error Empty query Empty query -BAD_ARGUMENTS -BAD_ARGUMENTS 301 302 304 diff --git a/tests/queries/0_stateless/02751_multiquery_with_argument.sh b/tests/queries/0_stateless/02751_multiquery_with_argument.sh index 7b959a3c3dc..4021194656b 100755 --- a/tests/queries/0_stateless/02751_multiquery_with_argument.sh +++ b/tests/queries/0_stateless/02751_multiquery_with_argument.sh @@ -4,18 +4,14 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --multiquery "SELECT 100" -$CLICKHOUSE_LOCAL --multiquery "SELECT 101;" -$CLICKHOUSE_LOCAL --multiquery "SELECT 102;SELECT 103;" +$CLICKHOUSE_LOCAL "SELECT 100" +$CLICKHOUSE_LOCAL "SELECT 101;" +$CLICKHOUSE_LOCAL "SELECT 102;SELECT 103;" # Invalid SQL. -$CLICKHOUSE_LOCAL --multiquery "SELECT 200; S" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_LOCAL --multiquery "; SELECT 201;" 2>&1 | grep -o 'Empty query' -$CLICKHOUSE_LOCAL --multiquery "; S; SELECT 202" 2>&1 | grep -o 'Empty query' - -# Simultaneously passing --queries-file + --query (multiquery) is prohibited. -$CLICKHOUSE_LOCAL --queries-file "queries.csv" --multiquery "SELECT 250;" 2>&1 | grep -o 'BAD_ARGUMENTS' -$CLICKHOUSE_CLIENT --queries-file "queries.csv" --multiquery "SELECT 251;" 2>&1 | grep -o 'BAD_ARGUMENTS' +$CLICKHOUSE_LOCAL "SELECT 200; S" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_LOCAL "; SELECT 201;" 2>&1 | grep -o 'Empty query' +$CLICKHOUSE_LOCAL "; S; SELECT 202" 2>&1 | grep -o 'Empty query' # Error expectation cases. # -n is also interpreted as a query diff --git a/tests/queries/0_stateless/02815_no_throw_in_simple_queries.sh b/tests/queries/0_stateless/02815_no_throw_in_simple_queries.sh index 68c55f9b66a..18ffc9dfec3 100755 --- a/tests/queries/0_stateless/02815_no_throw_in_simple_queries.sh +++ b/tests/queries/0_stateless/02815_no_throw_in_simple_queries.sh @@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT --query "SHOW TABLES" || echo "Failed" $CLICKHOUSE_CLIENT --query "SELECT * FROM system.tables WHERE database = currentDatabase() FORMAT Null" || echo "Failed" # Multi queries are ok: -$CLICKHOUSE_LOCAL --multiquery "SELECT 1; SELECT 2;" || echo "Failed" +$CLICKHOUSE_LOCAL "SELECT 1; SELECT 2;" || echo "Failed" # It can run in interactive mode: function run() diff --git a/tests/queries/0_stateless/02843_insertion_table_schema_infer.sh b/tests/queries/0_stateless/02843_insertion_table_schema_infer.sh index d806b678456..9207e48092f 100755 --- a/tests/queries/0_stateless/02843_insertion_table_schema_infer.sh +++ b/tests/queries/0_stateless/02843_insertion_table_schema_infer.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) DATA_DIR=$CUR_DIR/data_tsv -$CLICKHOUSE_LOCAL --multiquery \ +$CLICKHOUSE_LOCAL \ "CREATE VIEW users AS SELECT * FROM file('$DATA_DIR/mock_data.tsv', TSVWithNamesAndTypes); CREATE TABLE users_output (name String, tag UInt64)ENGINE = Memory; INSERT INTO users_output WITH (SELECT groupUniqArrayArray(mapKeys(Tags)) FROM users) AS unique_tags SELECT UserName AS name, length(unique_tags) AS tag FROM users; diff --git a/tests/queries/0_stateless/02864_restore_table_with_broken_part.sh b/tests/queries/0_stateless/02864_restore_table_with_broken_part.sh index 08313e2fd3b..229f832ba14 100755 --- a/tests/queries/0_stateless/02864_restore_table_with_broken_part.sh +++ b/tests/queries/0_stateless/02864_restore_table_with_broken_part.sh @@ -39,7 +39,7 @@ $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS tbl" # Then try to restore with the setting `restore_broken_parts_as_detached` set to true. $CLICKHOUSE_CLIENT --query "RESTORE TABLE default.tbl AS tbl FROM Disk('backups', '${backup_name}') SETTINGS restore_broken_parts_as_detached = true" 2>/dev/null | awk -F '\t' '{print $2}' -$CLICKHOUSE_CLIENT --multiquery <&1 | grep -c "INVOKER") >= 1 )) && echo "OK" || echo "UNEXPECTED" (( $(${CLICKHOUSE_CLIENT} --query "SHOW TABLE $db.test_view_2" 2>&1 | grep -c "DEFINER = $user1") >= 1 )) && echo "OK" || echo "UNEXPECTED" -${CLICKHOUSE_CLIENT} --multiquery <&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" (( $(${CLICKHOUSE_CLIENT} --query "INSERT INTO $db.test_table VALUES ('foo'), ('bar');" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" -${CLICKHOUSE_CLIENT} --multiquery <\n" $CLICKHOUSE_CLIENT --query "SELECT * FROM file(${UNIX_ENDINGS}, 'TabSeparated', 'SearchTerm String, Date Date, Hits UInt32');" -$CLICKHOUSE_CLIENT --multiquery --query "SELECT * FROM file(${DOS_ENDINGS}, 'TabSeparated', 'SearchTerm String, Date Date, Hits UInt32'); --{serverError 117}" +$CLICKHOUSE_CLIENT --query "SELECT * FROM file(${DOS_ENDINGS}, 'TabSeparated', 'SearchTerm String, Date Date, Hits UInt32'); --{serverError 117}" echo -e "\n<-- Read DOS endings with setting input_format_tsv_crlf_end_of_line=1 -->\n" $CLICKHOUSE_CLIENT --query "SELECT * FROM file(${DOS_ENDINGS}, 'TabSeparated', 'SearchTerm String, Date Date, Hits UInt32') SETTINGS input_format_tsv_crlf_end_of_line = 1;" diff --git a/tests/queries/0_stateless/02995_forget_partition.sh b/tests/queries/0_stateless/02995_forget_partition.sh index c22f5829130..6fa0b96e90d 100755 --- a/tests/queries/0_stateless/02995_forget_partition.sh +++ b/tests/queries/0_stateless/02995_forget_partition.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ drop table if exists forget_partition; create table forget_partition @@ -31,7 +31,7 @@ alter table forget_partition drop partition '20240102'; # DROP PARTITION do not wait for a part to be removed from memory due to possible concurrent SELECTs, so we have to do wait manually here while [[ $(${CLICKHOUSE_CLIENT} -q "select count() from system.parts where database=currentDatabase() and table='forget_partition' and partition IN ('20240101', '20240102')") != 0 ]]; do sleep 1; done -${CLICKHOUSE_CLIENT} --multiline --multiquery -q """ +${CLICKHOUSE_CLIENT} --multiline -q """ set allow_unrestricted_reads_from_keeper=1; select '---before---'; diff --git a/tests/queries/0_stateless/02995_index_1.sh b/tests/queries/0_stateless/02995_index_1.sh index a5f1b30c2e8..128697fd0fe 100755 --- a/tests/queries/0_stateless/02995_index_1.sh +++ b/tests/queries/0_stateless/02995_index_1.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_10.sh b/tests/queries/0_stateless/02995_index_10.sh index d72c7c72705..c15ba00fd05 100755 --- a/tests/queries/0_stateless/02995_index_10.sh +++ b/tests/queries/0_stateless/02995_index_10.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_2.sh b/tests/queries/0_stateless/02995_index_2.sh index e7451c7ee4b..a32f5c511f8 100755 --- a/tests/queries/0_stateless/02995_index_2.sh +++ b/tests/queries/0_stateless/02995_index_2.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_3.sh b/tests/queries/0_stateless/02995_index_3.sh index 506429e2696..9cc937391fc 100755 --- a/tests/queries/0_stateless/02995_index_3.sh +++ b/tests/queries/0_stateless/02995_index_3.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_4.sh b/tests/queries/0_stateless/02995_index_4.sh index 1a0458728f9..e450997e48b 100755 --- a/tests/queries/0_stateless/02995_index_4.sh +++ b/tests/queries/0_stateless/02995_index_4.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_5.sh b/tests/queries/0_stateless/02995_index_5.sh index 60c12a8146d..80f75a532e3 100755 --- a/tests/queries/0_stateless/02995_index_5.sh +++ b/tests/queries/0_stateless/02995_index_5.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_6.sh b/tests/queries/0_stateless/02995_index_6.sh index 4936f73f36b..e90387c7c0c 100755 --- a/tests/queries/0_stateless/02995_index_6.sh +++ b/tests/queries/0_stateless/02995_index_6.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_7.sh b/tests/queries/0_stateless/02995_index_7.sh index 26be310abce..a5fdd98b2f8 100755 --- a/tests/queries/0_stateless/02995_index_7.sh +++ b/tests/queries/0_stateless/02995_index_7.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_8.sh b/tests/queries/0_stateless/02995_index_8.sh index 8c2620b59fd..adb835aedca 100755 --- a/tests/queries/0_stateless/02995_index_8.sh +++ b/tests/queries/0_stateless/02995_index_8.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_9.sh b/tests/queries/0_stateless/02995_index_9.sh index 76160c62aaa..4b78777cd2a 100755 --- a/tests/queries/0_stateless/02995_index_9.sh +++ b/tests/queries/0_stateless/02995_index_9.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} --multiquery +done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test" +${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02998_native_parquet_reader.sh b/tests/queries/0_stateless/02998_native_parquet_reader.sh index d6369c4921b..95b659815ed 100755 --- a/tests/queries/0_stateless/02998_native_parquet_reader.sh +++ b/tests/queries/0_stateless/02998_native_parquet_reader.sh @@ -208,4 +208,4 @@ CH_SCHEMA="\ QUERY="SELECT * from file('$PAR_PATH', 'Parquet', '$CH_SCHEMA')" # there may be more than on group in parquet files, unstable results may generated by multithreads -$CLICKHOUSE_LOCAL --multiquery --max_threads 1 --max_parsing_threads 1 --input_format_parquet_use_native_reader true --query "$QUERY" +$CLICKHOUSE_LOCAL --max_threads 1 --max_parsing_threads 1 --input_format_parquet_use_native_reader true --query "$QUERY" diff --git a/tests/queries/0_stateless/03001_backup_matview_after_modify_query.sh b/tests/queries/0_stateless/03001_backup_matview_after_modify_query.sh index f857358a5ea..8c6aa70f14c 100755 --- a/tests/queries/0_stateless/03001_backup_matview_after_modify_query.sh +++ b/tests/queries/0_stateless/03001_backup_matview_after_modify_query.sh @@ -9,7 +9,7 @@ db="$CLICKHOUSE_DATABASE" db_2="${db}_2" backup_name="${db}_backup" -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS src; DROP TABLE IF EXISTS mv; CREATE TABLE src(Timestamp DateTime64(9), c1 String, c2 String) ENGINE=MergeTree ORDER BY Timestamp; diff --git a/tests/queries/0_stateless/03001_matview_columns_after_modify_query.sh b/tests/queries/0_stateless/03001_matview_columns_after_modify_query.sh index 2ec5832fac6..96cbd391a44 100755 --- a/tests/queries/0_stateless/03001_matview_columns_after_modify_query.sh +++ b/tests/queries/0_stateless/03001_matview_columns_after_modify_query.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE IF EXISTS src; DROP TABLE IF EXISTS mv; CREATE TABLE src(Timestamp DateTime64(9), c1 String, c2 String) ENGINE=MergeTree ORDER BY Timestamp; diff --git a/tests/queries/0_stateless/03006_correct_revoke_for_partial_rights.sh b/tests/queries/0_stateless/03006_correct_revoke_for_partial_rights.sh index 8c79dfdbafc..312fb03668c 100755 --- a/tests/queries/0_stateless/03006_correct_revoke_for_partial_rights.sh +++ b/tests/queries/0_stateless/03006_correct_revoke_for_partial_rights.sh @@ -8,7 +8,7 @@ db=${CLICKHOUSE_DATABASE} user1="user1_03006_${db}_$RANDOM" user2="user2_03006_${db}_$RANDOM" -${CLICKHOUSE_CLIENT} --multiquery <&1 | grep --text -F -v "ASan doesn't fully support makecontext/swapcontext functions" -${CLICKHOUSE_CLIENT} --multiquery " +${CLICKHOUSE_CLIENT} " DROP TABLE test; " diff --git a/tests/queries/0_stateless/03212_thousand_exceptions.sh b/tests/queries/0_stateless/03212_thousand_exceptions.sh index 0a6abf35c10..1237cbf537f 100755 --- a/tests/queries/0_stateless/03212_thousand_exceptions.sh +++ b/tests/queries/0_stateless/03212_thousand_exceptions.sh @@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # This should not be too slow, even under sanitizers. -yes "SELECT throwIf(1); SELECT '.' FORMAT Values;" | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery --ignore-error 2>/dev/null +yes "SELECT throwIf(1); SELECT '.' FORMAT Values;" | head -n 1000 | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null From 6372fdee6d344bd87d58ce89fa069b55750c9aba Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 30 Jul 2024 13:46:05 +0200 Subject: [PATCH 1219/2361] Update tests --- tests/queries/0_stateless/02995_index_3.sh | 2 +- tests/queries/0_stateless/03143_asof_join_ddb_long.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02995_index_3.sh b/tests/queries/0_stateless/02995_index_3.sh index 506429e2696..219ae81154f 100755 --- a/tests/queries/0_stateless/02995_index_3.sh +++ b/tests/queries/0_stateless/02995_index_3.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage, no-distributed-cache CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql index 17a67511030..a927e4f1e1f 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-distributed-cache DROP TABLE IF EXISTS build; DROP TABLE IF EXISTS skewed_probe; From eb129b539fce2a407182d892ce3bd00f782a5833 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 13:46:27 +0200 Subject: [PATCH 1220/2361] Add tests --- src/Storages/StorageKeeperMap.cpp | 135 +++++++++++++----- src/Storages/StorageKeeperMap.h | 3 +- .../test_keeper_map_retries/__init__.py | 0 .../configs/enable_keeper_map.xml | 3 + .../configs/fault_injection.xml | 6 + .../test_keeper_map_retries/test.py | 78 ++++++++++ .../02911_backup_restore_keeper_map.sh | 15 +- 7 files changed, 194 insertions(+), 46 deletions(-) create mode 100644 tests/integration/test_keeper_map_retries/__init__.py create mode 100644 tests/integration/test_keeper_map_retries/configs/enable_keeper_map.xml create mode 100644 tests/integration/test_keeper_map_retries/configs/fault_injection.xml create mode 100644 tests/integration/test_keeper_map_retries/test.py diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 5534bb7f346..09c21ae28f5 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -183,7 +183,7 @@ public: settings.insert_keeper_retry_max_backoff_ms}, context->getProcessListElement()}; - retries_ctl.retryLoop([&]() + zk_retry.retryLoop([&]() { auto zookeeper = storage.getClient(); auto keys_limit = storage.keysLimit(); @@ -205,12 +205,12 @@ public: for (const auto & [key, _] : new_values) key_paths.push_back(storage.fullPathForKey(key)); - zkutil::ZooKeeper::MultiExistsResponse results; + zkutil::ZooKeeper::MultiTryGetResponse results; if constexpr (!for_update) { if (!strict) - results = zookeeper->exists(key_paths); + results = zookeeper->tryGet(key_paths); } Coordination::Requests requests; @@ -231,7 +231,8 @@ public: { if (!strict && results[i].error == Coordination::Error::ZOK) { - requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], -1)); + if (results[i].data != new_values[key]) + requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], -1)); } else { @@ -241,6 +242,9 @@ public: } } + if (requests.empty()) + return; + if (new_keys_num != 0) { auto will_be = current_keys_num + new_keys_num; @@ -259,7 +263,7 @@ public: }; template -class StorageKeeperMapSource : public ISource +class StorageKeeperMapSource : public ISource, WithContext { const StorageKeeperMap & storage; size_t max_block_size; @@ -290,8 +294,15 @@ public: KeyContainerPtr container_, KeyContainerIter begin_, KeyContainerIter end_, - bool with_version_column_) - : ISource(getHeader(header, with_version_column_)), storage(storage_), max_block_size(max_block_size_), container(std::move(container_)), it(begin_), end(end_) + bool with_version_column_, + ContextPtr context_) + : ISource(getHeader(header, with_version_column_)) + , WithContext(std::move(context_)) + , storage(storage_) + , max_block_size(max_block_size_) + , container(std::move(container_)) + , it(begin_) + , end(end_) , with_version_column(with_version_column_) { } @@ -316,12 +327,12 @@ public: for (auto & raw_key : raw_keys) raw_key = base64Encode(raw_key, /* url_encoding */ true); - return storage.getBySerializedKeys(raw_keys, nullptr, with_version_column); + return storage.getBySerializedKeys(raw_keys, nullptr, with_version_column, getContext()); } else { size_t elem_num = std::min(max_block_size, static_cast(end - it)); - auto chunk = storage.getBySerializedKeys(std::span{it, it + elem_num}, nullptr, with_version_column); + auto chunk = storage.getBySerializedKeys(std::span{it, it + elem_num}, nullptr, with_version_column, getContext()); it += elem_num; return chunk; } @@ -553,14 +564,31 @@ Pipe StorageKeeperMap::read( using KeyContainer = typename KeyContainerPtr::element_type; pipes.emplace_back(std::make_shared>( - *this, sample_block, max_block_size, keys, keys->begin() + begin, keys->begin() + end, with_version_column)); + *this, sample_block, max_block_size, keys, keys->begin() + begin, keys->begin() + end, with_version_column, context_)); } return Pipe::unitePipes(std::move(pipes)); }; - auto client = getClient(); if (all_scan) - return process_keys(std::make_shared>(client->getChildren(zk_data_path))); + { + const auto & settings = context_->getSettingsRef(); + ZooKeeperRetriesControl zk_retry{ + getName(), + getLogger(getName()), + ZooKeeperRetriesInfo{ + settings.keeper_max_retries, + settings.keeper_retry_initial_backoff_ms, + settings.keeper_retry_max_backoff_ms}, + context_->getProcessListElement()}; + + std::vector children; + zk_retry.retryLoop([&] + { + auto client = getClient(); + children = client->getChildren(zk_data_path); + }); + return process_keys(std::make_shared>(std::move(children))); + } return process_keys(std::move(filtered_keys)); } @@ -571,11 +599,24 @@ SinkToStoragePtr StorageKeeperMap::write(const ASTPtr & /*query*/, const Storage return std::make_shared(*this, metadata_snapshot->getSampleBlock(), local_context); } -void StorageKeeperMap::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) +void StorageKeeperMap::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr local_context, TableExclusiveLockHolder &) { checkTable(); - auto client = getClient(); - client->tryRemoveChildrenRecursive(zk_data_path, true); + const auto & settings = local_context->getSettingsRef(); + ZooKeeperRetriesControl zk_retry{ + getName(), + getLogger(getName()), + ZooKeeperRetriesInfo{ + settings.keeper_max_retries, + settings.keeper_retry_initial_backoff_ms, + settings.keeper_retry_max_backoff_ms}, + local_context->getProcessListElement()}; + + zk_retry.retryLoop([&] + { + auto client = getClient(); + client->tryRemoveChildrenRecursive(zk_data_path, true); + }); } bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock) @@ -1064,10 +1105,11 @@ Chunk StorageKeeperMap::getByKeys(const ColumnsWithTypeAndName & keys, PaddedPOD if (raw_keys.size() != keys[0].column->size()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Assertion failed: {} != {}", raw_keys.size(), keys[0].column->size()); - return getBySerializedKeys(raw_keys, &null_map, /* version_column */ false); + return getBySerializedKeys(raw_keys, &null_map, /* version_column */ false, getContext()); } -Chunk StorageKeeperMap::getBySerializedKeys(const std::span keys, PaddedPODArray * null_map, bool with_version) const +Chunk StorageKeeperMap::getBySerializedKeys( + const std::span keys, PaddedPODArray * null_map, bool with_version, const ContextPtr & local_context) const { Block sample_block = getInMemoryMetadataPtr()->getSampleBlock(); MutableColumns columns = sample_block.cloneEmptyColumns(); @@ -1084,17 +1126,27 @@ Chunk StorageKeeperMap::getBySerializedKeys(const std::span k null_map->resize_fill(keys.size(), 1); } - auto client = getClient(); - Strings full_key_paths; full_key_paths.reserve(keys.size()); for (const auto & key : keys) - { full_key_paths.emplace_back(fullPathForKey(key)); - } - auto values = client->tryGet(full_key_paths); + const auto & settings = local_context->getSettingsRef(); + ZooKeeperRetriesControl zk_retry{ + getName(), + getLogger(getName()), + ZooKeeperRetriesInfo{ + settings.keeper_max_retries, + settings.keeper_retry_initial_backoff_ms, + settings.keeper_retry_max_backoff_ms}, + local_context->getProcessListElement()}; + + zkutil::ZooKeeper::MultiTryGetResponse values; + zk_retry.retryLoop([&]{ + auto client = getClient(); + values = client->tryGet(full_key_paths); + }); for (size_t i = 0; i < keys.size(); ++i) { @@ -1182,16 +1234,16 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca if (commands.front().type == MutationCommand::Type::DELETE) { - MutationsInterpreter::Settings settings(true); - settings.return_all_columns = true; - settings.return_mutated_rows = true; + MutationsInterpreter::Settings mutation_settings(true); + mutation_settings.return_all_columns = true; + mutation_settings.return_mutated_rows = true; auto interpreter = std::make_unique( storage_ptr, metadata_snapshot, commands, local_context, - settings); + mutation_settings); auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute()); PullingPipelineExecutor executor(pipeline); @@ -1200,8 +1252,6 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca auto primary_key_pos = header.getPositionByName(primary_key); auto version_position = header.getPositionByName(std::string{version_column_name}); - auto client = getClient(); - Block block; while (executor.pull(block)) { @@ -1229,7 +1279,23 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca } Coordination::Responses responses; - auto status = client->tryMulti(delete_requests, responses, /* check_session_valid */ true); + + const auto & settings = local_context->getSettingsRef(); + ZooKeeperRetriesControl zk_retry{ + getName(), + getLogger(getName()), + ZooKeeperRetriesInfo{ + settings.keeper_max_retries, + settings.keeper_retry_initial_backoff_ms, + settings.keeper_retry_max_backoff_ms}, + local_context->getProcessListElement()}; + + Coordination::Error status; + zk_retry.retryLoop([&] + { + auto client = getClient(); + status = client->tryMulti(delete_requests, responses, /* check_session_valid */ true); + }); if (status == Coordination::Error::ZOK) return; @@ -1241,9 +1307,14 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca for (const auto & delete_request : delete_requests) { - auto code = client->tryRemove(delete_request->getPath()); - if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE) - throw zkutil::KeeperException::fromPath(code, delete_request->getPath()); + zk_retry.retryLoop([&] + { + auto client = getClient(); + status = client->tryRemove(delete_request->getPath()); + }); + + if (status != Coordination::Error::ZOK && status != Coordination::Error::ZNONODE) + throw zkutil::KeeperException::fromPath(status, delete_request->getPath()); } } diff --git a/src/Storages/StorageKeeperMap.h b/src/Storages/StorageKeeperMap.h index d4556792c48..cfbb35ab2fe 100644 --- a/src/Storages/StorageKeeperMap.h +++ b/src/Storages/StorageKeeperMap.h @@ -54,7 +54,8 @@ public: Names getPrimaryKey() const override { return {primary_key}; } Chunk getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray & null_map, const Names &) const override; - Chunk getBySerializedKeys(std::span keys, PaddedPODArray * null_map, bool with_version) const; + Chunk getBySerializedKeys( + std::span keys, PaddedPODArray * null_map, bool with_version, const ContextPtr & local_context) const; Block getSampleBlock(const Names &) const override; diff --git a/tests/integration/test_keeper_map_retries/__init__.py b/tests/integration/test_keeper_map_retries/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_keeper_map_retries/configs/enable_keeper_map.xml b/tests/integration/test_keeper_map_retries/configs/enable_keeper_map.xml new file mode 100644 index 00000000000..b4cbb6a954b --- /dev/null +++ b/tests/integration/test_keeper_map_retries/configs/enable_keeper_map.xml @@ -0,0 +1,3 @@ + + /test_keeper_map + diff --git a/tests/integration/test_keeper_map_retries/configs/fault_injection.xml b/tests/integration/test_keeper_map_retries/configs/fault_injection.xml new file mode 100644 index 00000000000..145945c7c7c --- /dev/null +++ b/tests/integration/test_keeper_map_retries/configs/fault_injection.xml @@ -0,0 +1,6 @@ + + + 0.05 + 0.05 + + diff --git a/tests/integration/test_keeper_map_retries/test.py b/tests/integration/test_keeper_map_retries/test.py new file mode 100644 index 00000000000..352119147cd --- /dev/null +++ b/tests/integration/test_keeper_map_retries/test.py @@ -0,0 +1,78 @@ +import pytest + +from helpers.cluster import ClickHouseCluster + +import os + +CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs") + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance( + "node", + main_configs=["configs/enable_keeper_map.xml"], + with_zookeeper=True, + stay_alive=True, +) + + +def start_clean_clickhouse(): + # remove fault injection if present + if "fault_injection.xml" in node.exec_in_container( + ["bash", "-c", "ls /etc/clickhouse-server/config.d"] + ): + print("Removing fault injection") + node.exec_in_container( + ["bash", "-c", "rm /etc/clickhouse-server/config.d/fault_injection.xml"] + ) + node.restart_clickhouse() + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def repeat_query(query, repeat): + for _ in range(repeat): + node.query( + query, + settings={ + "keeper_max_retries": 20, + "keeper_retry_max_backoff_ms": 10000, + }, + ) + + +def test_queries(started_cluster): + start_clean_clickhouse() + + node.query("DROP TABLE IF EXISTS keeper_map_retries SYNC") + node.query( + "CREATE TABLE keeper_map_retries (a UInt64, b UInt64) Engine=KeeperMap('/keeper_map_retries') PRIMARY KEY a" + ) + + node.stop_clickhouse() + node.copy_file_to_container( + os.path.join(CONFIG_DIR, "fault_injection.xml"), + "/etc/clickhouse-server/config.d/fault_injection.xml", + ) + node.start_clickhouse() + + repeat_count = 10 + + repeat_query( + "INSERT INTO keeper_map_retries SELECT number, number FROM numbers(500)", + repeat_count, + ) + repeat_query("SELECT * FROM keeper_map_retries", repeat_count) + repeat_query( + "ALTER TABLE keeper_map_retries UPDATE b = 3 WHERE a > 2", repeat_count + ) + repeat_query("ALTER TABLE keeper_map_retries DELETE WHERE a > 2", repeat_count) + repeat_query("TRUNCATE keeper_map_retries", repeat_count) diff --git a/tests/queries/0_stateless/02911_backup_restore_keeper_map.sh b/tests/queries/0_stateless/02911_backup_restore_keeper_map.sh index ee070b40f6f..c04667505c3 100755 --- a/tests/queries/0_stateless/02911_backup_restore_keeper_map.sh +++ b/tests/queries/0_stateless/02911_backup_restore_keeper_map.sh @@ -13,20 +13,9 @@ $CLICKHOUSE_CLIENT -nm -q " CREATE TABLE $database_name.02911_backup_restore_keeper_map3 (key UInt64, value String) Engine=KeeperMap('/' || currentDatabase() || '/test02911_different') PRIMARY KEY key; " -# KeeperMap table engine doesn't have internal retries for interaction with Keeper. Do it on our own, otherwise tests with overloaded server can be flaky. -while true -do - $CLICKHOUSE_CLIENT -nm -q "INSERT INTO $database_name.02911_backup_restore_keeper_map2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5000; - " 2>&1 | grep -q "KEEPER_EXCEPTION" && sleep 1 && continue - break -done +$CLICKHOUSE_CLIENT -nm -q "INSERT INTO $database_name.02911_backup_restore_keeper_map2 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 5000;" -while true -do - $CLICKHOUSE_CLIENT -nm -q "INSERT INTO $database_name.02911_backup_restore_keeper_map3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 3000; - " 2>&1 | grep -q "KEEPER_EXCEPTION" && sleep 1 && continue - break -done +$CLICKHOUSE_CLIENT -nm -q "INSERT INTO $database_name.02911_backup_restore_keeper_map3 SELECT number, 'test' || toString(number) FROM system.numbers LIMIT 3000;" backup_path="$database_name" for i in $(seq 1 3); do From a1ececb24c0a6f21d3985f5a9f8a726befef78c3 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 30 Jul 2024 11:51:48 +0000 Subject: [PATCH 1221/2361] Fix use-of-unitialized-value --- src/Columns/ColumnAggregateFunction.cpp | 33 ++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/src/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp index 9934970c868..4bc48c62eb4 100644 --- a/src/Columns/ColumnAggregateFunction.cpp +++ b/src/Columns/ColumnAggregateFunction.cpp @@ -332,9 +332,36 @@ void ColumnAggregateFunction::expand(const Filter & mask, bool inverted) { ensureOwnership(); Arena & arena = createOrGetArena(); - char * default_ptr = arena.alignedAlloc(func->sizeOfData(), func->alignOfData()); - func->create(default_ptr); - expandDataByMask(data, mask, inverted, default_ptr); + + if (mask.size() < data.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mask size should be no less than data size."); + + ssize_t from = data.size() - 1; + ssize_t index = mask.size() - 1; + data.resize(mask.size()); + while (index >= 0) + { + if (!!mask[index] ^ inverted) + { + if (from < 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Too many bytes in mask"); + + /// Copy only if it makes sense. + if (index != from) + data[index] = data[from]; + --from; + } + else + { + data[index] = arena.alignedAlloc(func->sizeOfData(), func->alignOfData()); + func->create(data[index]); + } + + --index; + } + + if (from != -1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Not enough bytes in mask"); } ColumnPtr ColumnAggregateFunction::permute(const Permutation & perm, size_t limit) const From fd075470d6e141d5aa4d01ccefcc3d3ee04130c7 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 30 Jul 2024 11:54:00 +0000 Subject: [PATCH 1222/2361] Add docs --- tests/integration/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/integration/README.md b/tests/integration/README.md index cde4cb05aec..d5137a9c148 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -130,6 +130,14 @@ docker build -t clickhouse/integration-test . ``` The helper container used by the `runner` script is in `docker/test/integration/runner/Dockerfile`. +It can be rebuild with + +``` +cd docker/test/integration/runner +docker build -t clickhouse/integration-test-runner . +``` + +Also you need to add option --network=host if you rebuild image for a local integration testsing. ### Adding new tests From fd26672864a7e1557908b878d7daa018de20c61a Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 13:54:30 +0200 Subject: [PATCH 1223/2361] Revert some change --- src/Storages/StorageKeeperMap.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 09c21ae28f5..1559b442e43 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -35,7 +35,6 @@ #include #include -#include "Common/ZooKeeper/ZooKeeperRetries.h" #include #include #include @@ -44,6 +43,7 @@ #include #include #include +#include #include #include @@ -205,12 +205,12 @@ public: for (const auto & [key, _] : new_values) key_paths.push_back(storage.fullPathForKey(key)); - zkutil::ZooKeeper::MultiTryGetResponse results; + zkutil::ZooKeeper::MultiExistsResponse results; if constexpr (!for_update) { if (!strict) - results = zookeeper->tryGet(key_paths); + results = zookeeper->exists(key_paths); } Coordination::Requests requests; @@ -231,8 +231,7 @@ public: { if (!strict && results[i].error == Coordination::Error::ZOK) { - if (results[i].data != new_values[key]) - requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], -1)); + requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], -1)); } else { @@ -242,9 +241,6 @@ public: } } - if (requests.empty()) - return; - if (new_keys_num != 0) { auto will_be = current_keys_num + new_keys_num; From 0124d211ec81a3779fe0e99c868fe85be8856629 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 14:00:52 +0200 Subject: [PATCH 1224/2361] Better --- tests/integration/test_replicated_table_attach/test.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_replicated_table_attach/test.py b/tests/integration/test_replicated_table_attach/test.py index 499220def2c..de60b7ec291 100644 --- a/tests/integration/test_replicated_table_attach/test.py +++ b/tests/integration/test_replicated_table_attach/test.py @@ -41,6 +41,7 @@ def start_clean_clickhouse(): def test_startup_with_small_bg_pool(started_cluster): start_clean_clickhouse() + node.query("DROP TABLE IF EXISTS replicated_table SYNC") node.query( "CREATE TABLE replicated_table (k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/replicated_table', 'r1') ORDER BY k" ) @@ -54,11 +55,10 @@ def test_startup_with_small_bg_pool(started_cluster): node.restart_clickhouse(stop_start_wait_sec=10) assert_values() - node.query("DROP TABLE replicated_table SYNC") - def test_startup_with_small_bg_pool_partitioned(started_cluster): start_clean_clickhouse() + node.query("DROP TABLE IF EXISTS replicated_table_partitioned SYNC") node.query( "CREATE TABLE replicated_table_partitioned (k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/replicated_table_partitioned', 'r1') ORDER BY k" ) @@ -81,5 +81,3 @@ def test_startup_with_small_bg_pool_partitioned(started_cluster): # check that we activate it in the end node.query_with_retry("INSERT INTO replicated_table_partitioned VALUES(20, 30)") - - node.query("DROP TABLE replicated_table_partitioned SYNC") From de99ee1b05e68b964535664d4197afd0944d0261 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 30 Jul 2024 12:07:31 +0000 Subject: [PATCH 1225/2361] Change docs --- tests/integration/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/README.md b/tests/integration/README.md index d5137a9c148..c1eb511fa44 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -137,7 +137,7 @@ cd docker/test/integration/runner docker build -t clickhouse/integration-test-runner . ``` -Also you need to add option --network=host if you rebuild image for a local integration testsing. +If your docker configuration doesn't allow access to public internet with docker build command you may also need to add option --network=host if you rebuild image for a local integration testsing. ### Adding new tests From c3e8825c8f14bb82f60c41754a021813d3dbc8aa Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 14:23:32 +0200 Subject: [PATCH 1226/2361] Use correct order of fields in StorageURLSource --- src/Storages/StorageURL.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index 63d01a02417..6c95cad474c 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -228,12 +228,12 @@ private: bool need_only_count; size_t total_rows_in_file = 0; + Poco::Net::HTTPBasicCredentials credentials; + std::unique_ptr read_buf; std::shared_ptr input_format; std::unique_ptr pipeline; std::unique_ptr reader; - - Poco::Net::HTTPBasicCredentials credentials; }; class StorageURLSink : public SinkToStorage From 8b52d7b711d54f1d4bb5b2f39bf4aea3966f64dc Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Tue, 30 Jul 2024 13:35:19 +0100 Subject: [PATCH 1227/2361] fxs --- src/IO/S3/Client.cpp | 19 +++++++++---------- src/IO/S3/Client.h | 2 +- .../test_checking_s3_blobs_paranoid/test.py | 12 ++++++++++++ tests/integration/test_storage_delta/test.py | 12 ++++++++++++ 4 files changed, 34 insertions(+), 11 deletions(-) diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 7196dfa9bdc..8f037ea71be 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -385,10 +385,9 @@ Model::HeadObjectOutcome Client::HeadObject(HeadObjectRequest & request) const request.overrideURI(std::move(*bucket_uri)); - if (isClientForDisk() && error.GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) - CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); - - return enrichErrorMessage( + /// The next call is NOT a recurcive call + /// This is a virtuall call Aws::S3::S3Client::HeadObject(const Model::HeadObjectRequest&) + return processRequestResult( HeadObject(static_cast(request))); } @@ -409,11 +408,8 @@ Model::ListObjectsOutcome Client::ListObjects(ListObjectsRequest & request) cons Model::GetObjectOutcome Client::GetObject(GetObjectRequest & request) const { - auto resp = doRequest(request, [this](const Model::GetObjectRequest & req) { return GetObject(req); }); - if (!resp.IsSuccess() && isClientForDisk() && resp.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) - CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); - - return enrichErrorMessage(std::move(resp)); + return processRequestResult( + doRequest(request, [this](const Model::GetObjectRequest & req) { return GetObject(req); })); } Model::AbortMultipartUploadOutcome Client::AbortMultipartUpload(AbortMultipartUploadRequest & request) const @@ -699,11 +695,14 @@ Client::doRequestWithRetryNetworkErrors(RequestType & request, RequestFn request } template -RequestResult Client::enrichErrorMessage(RequestResult && outcome) const +RequestResult Client::processRequestResult(RequestResult && outcome) const { if (outcome.IsSuccess() || !isClientForDisk()) return std::forward(outcome); + if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) + CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); + String enriched_message = fmt::format( "{} {}", outcome.GetError().GetMessage(), diff --git a/src/IO/S3/Client.h b/src/IO/S3/Client.h index 11cace4e1fd..e54953419e1 100644 --- a/src/IO/S3/Client.h +++ b/src/IO/S3/Client.h @@ -274,7 +274,7 @@ private: void insertRegionOverride(const std::string & bucket, const std::string & region) const; template - RequestResult enrichErrorMessage(RequestResult && outcome) const; + RequestResult processRequestResult(RequestResult && outcome) const; String initial_endpoint; std::shared_ptr credentials_provider; diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index 1ed70e20b79..dde636b5d29 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -689,3 +689,15 @@ def test_no_key_found_disk(cluster, broken_s3): "DB::Exception: The specified key does not exist. This error happened for S3 disk." in error ) + + s3_disk_no_key_errors_metric_value = int( + node.query( + """ + SELECT value + FROM system.metrics + WHERE metric = 'S3DiskNoKeyErrors' + """ + ).strip() + ) + + assert s3_disk_no_key_errors_metric_value > 0 diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index d3dd7cfe52a..67cc7cdd6da 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -452,6 +452,18 @@ def test_restart_broken(started_cluster): f"SELECT count() FROM {TABLE_NAME}" ) + s3_disk_no_key_errors_metric_value = int( + instance.query( + """ + SELECT value + FROM system.metrics + WHERE metric = 'S3DiskNoKeyErrors' + """ + ).strip() + ) + + assert s3_disk_no_key_errors_metric_value == 0 + minio_client.make_bucket(bucket) upload_directory(minio_client, bucket, f"/{TABLE_NAME}", "") From d69f6cccde7633214eba48c08d7647e4ea9a40da Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 30 Jul 2024 15:08:26 +0200 Subject: [PATCH 1228/2361] Fix --- src/Common/ThreadPoolTaskTracker.cpp | 5 ++++- src/IO/WriteBufferFromS3.cpp | 9 ++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/Common/ThreadPoolTaskTracker.cpp b/src/Common/ThreadPoolTaskTracker.cpp index 61d34801f7a..1697a13f780 100644 --- a/src/Common/ThreadPoolTaskTracker.cpp +++ b/src/Common/ThreadPoolTaskTracker.cpp @@ -19,6 +19,10 @@ TaskTracker::TaskTracker(ThreadPoolCallbackRunnerUnsafe scheduler_, size_t TaskTracker::~TaskTracker() { + /// Tasks should be waited outside of dtor. + /// Important for WriteBufferFromS3/AzureBlobStorage, where TaskTracker is currently used. + chassert(finished_futures.empty() && futures.empty()); + safeWaitAll(); } @@ -170,4 +174,3 @@ bool TaskTracker::isAsync() const } } - diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 3682e49b018..e702b4d35ad 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -277,12 +277,10 @@ WriteBufferFromS3::~WriteBufferFromS3() "The file might not be written to S3. " "{}.", getVerboseLogDetails()); - return; } - - /// That destructor could be call with finalized=false in case of exceptions - if (!finalized && !canceled) + else if (!finalized) { + /// That destructor could be call with finalized=false in case of exceptions LOG_INFO( log, "WriteBufferFromS3 is not finalized in destructor. " @@ -291,9 +289,10 @@ WriteBufferFromS3::~WriteBufferFromS3() getVerboseLogDetails()); } + /// Wait for all tasks, because they contain reference to this write buffer. task_tracker->safeWaitAll(); - if (!multipart_upload_id.empty() && !multipart_upload_finished) + if (!canceled && !multipart_upload_id.empty() && !multipart_upload_finished) { LOG_WARNING(log, "WriteBufferFromS3 was neither finished nor aborted, try to abort upload in destructor. {}.", getVerboseLogDetails()); tryToAbortMultipartUpload(); From 20faed85ca30c6352fd091e8d4d763fb98fe1311 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 15:18:37 +0200 Subject: [PATCH 1229/2361] Remove useless file --- tests/queries/0_stateless/data_minio/test :: 03215_archive.csv | 1 - 1 file changed, 1 deletion(-) delete mode 100644 tests/queries/0_stateless/data_minio/test :: 03215_archive.csv diff --git a/tests/queries/0_stateless/data_minio/test :: 03215_archive.csv b/tests/queries/0_stateless/data_minio/test :: 03215_archive.csv deleted file mode 100644 index d00491fd7e5..00000000000 --- a/tests/queries/0_stateless/data_minio/test :: 03215_archive.csv +++ /dev/null @@ -1 +0,0 @@ -1 From 21aa514c80cb463f079f9877ae97048a8b13dfbe Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 30 Jul 2024 15:03:09 +0100 Subject: [PATCH 1230/2361] don't run removed tests --- tests/performance/scripts/entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance/scripts/entrypoint.sh b/tests/performance/scripts/entrypoint.sh index 0c3bfa550f4..db7d96ad150 100755 --- a/tests/performance/scripts/entrypoint.sh +++ b/tests/performance/scripts/entrypoint.sh @@ -118,7 +118,7 @@ then # far in the future and have unrelated test changes. base=$(git -C right/ch merge-base pr origin/master) git -C right/ch diff --name-only "$base" pr -- . | tee all-changed-files.txt - git -C right/ch diff --name-only "$base" pr -- tests/performance/*.xml | tee changed-test-definitions.txt + git -C right/ch diff --name-only --diff-filter=d "$base" pr -- tests/performance/*.xml | tee changed-test-definitions.txt git -C right/ch diff --name-only "$base" pr -- :!tests/performance/*.xml :!docker/test/performance-comparison | tee other-changed-files.txt fi From 7d5c30e76cf0fd17515803fec96899f4aad1294e Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 16:26:19 +0200 Subject: [PATCH 1231/2361] No retries when partitioned --- tests/integration/test_keeper_map/test.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_keeper_map/test.py b/tests/integration/test_keeper_map/test.py index 31316af7b1e..7aee5df5746 100644 --- a/tests/integration/test_keeper_map/test.py +++ b/tests/integration/test_keeper_map/test.py @@ -46,7 +46,11 @@ def assert_keeper_exception_after_partition(query): with PartitionManager() as pm: pm.drop_instance_zk_connections(node) try: - error = node.query_and_get_error_with_retry(query, sleep_time=1) + error = node.query_and_get_error_with_retry( + query, + sleep_time=1, + settings={"insert_keeper_max_retries": 1, "keeper_max_retries": 1}, + ) assert "Coordination::Exception" in error except: print_iptables_rules() @@ -84,7 +88,9 @@ def test_keeper_map_without_zk(started_cluster): node.restart_clickhouse(60) try: error = node.query_and_get_error_with_retry( - "SELECT * FROM test_keeper_map_without_zk", sleep_time=1 + "SELECT * FROM test_keeper_map_without_zk", + sleep_time=1, + settings={"keeper_max_retries": 1}, ) assert "Failed to activate table because of connection issues" in error except: From aec431f68bf16b45a6b36deb61c146e08cb4f644 Mon Sep 17 00:00:00 2001 From: heguangnan Date: Tue, 30 Jul 2024 22:54:05 +0800 Subject: [PATCH 1232/2361] fix test --- .../0_stateless/03214_count_distinct_null_key_memory_leak.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql b/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql index 847d3742dc3..d8428ec6b4a 100644 --- a/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql +++ b/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql @@ -12,7 +12,7 @@ PARTITION BY tuple() ORDER BY c SETTINGS index_granularity = 8192, allow_nullable_key=1; -INSERT INTO testnull(b,c) SELECT toString(rand64()) AS b, toString(rand64()) AS c FROM numbers(1000000) +INSERT INTO testnull(b,c) SELECT toString(rand64()) AS b, toString(rand64()) AS c FROM numbers(1000000); SELECT count(distinct b) FROM testnull GROUP BY a SETTINGS max_memory_usage = 54748364; -- {serverError MEMORY_LIMIT_EXCEEDED} DROP TABLE testnull; \ No newline at end of file From d3830e0a4fd96c286e5b91a78b1a9583f5eb6291 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 30 Jul 2024 14:54:42 +0000 Subject: [PATCH 1233/2361] Add perf tests for Dynamic and Variant --- .../insert_select_squashing_dynamic.xml | 59 +++++++++++++++++++ .../insert_select_squashing_variant.xml | 34 +++++++++++ 2 files changed, 93 insertions(+) create mode 100644 tests/performance/insert_select_squashing_dynamic.xml create mode 100644 tests/performance/insert_select_squashing_variant.xml diff --git a/tests/performance/insert_select_squashing_dynamic.xml b/tests/performance/insert_select_squashing_dynamic.xml new file mode 100644 index 00000000000..f7f600fd8bd --- /dev/null +++ b/tests/performance/insert_select_squashing_dynamic.xml @@ -0,0 +1,59 @@ + + + 1000 + 0 + 1 + + + +CREATE TABLE dynamic_squash_performance_1 +( + d Dynamic +) +ENGINE = Null; + + + +CREATE TABLE dynamic_squash_performance_2 +( + d Dynamic(max_types=6) +) +ENGINE = Null; + + + +CREATE TABLE src_dynamic_squash_performance_1 +( + d Dynamic +) +ENGINE = Memory; + + + +CREATE TABLE src_dynamic_squash_performance_2 +( + d Dynamic(max_types=6) +) +ENGINE = Memory; + + + + + + + + + + + + INSERT INTO dynamic_squash_performance_1 SELECT number::Dynamic FROM numbers(10000000) + INSERT INTO dynamic_squash_performance_1 SELECT range(number % 100)::Dynamic FROM numbers(2000000) + INSERT INTO dynamic_squash_performance_1 SELECT * FROM src_dynamic_squash_performance_1 + INSERT INTO dynamic_squash_performance_2 SELECT * FROM src_dynamic_squash_performance_2 + + DROP TABLE IF EXISTS dynamic_squash_performance_1 + DROP TABLE IF EXISTS dynamic_squash_performance_2 + DROP TABLE IF EXISTS src_dynamic_squash_performance_1 + DROP TABLE IF EXISTS src_dynamic_squash_performance_2 + + diff --git a/tests/performance/insert_select_squashing_variant.xml b/tests/performance/insert_select_squashing_variant.xml new file mode 100644 index 00000000000..5c59fc7b50f --- /dev/null +++ b/tests/performance/insert_select_squashing_variant.xml @@ -0,0 +1,34 @@ + + + 1000 + 0 + 1 + 1 + + + +CREATE TABLE variant_squash_performance +( + v Variant(Tuple(v1 Array(UInt64)), Tuple(v2 Array(UInt64)), Tuple(v3 Array(UInt64)), Tuple(v4 Array(UInt64)), Tuple(v5 Array(UInt64))) +) +ENGINE = Null; + + + +CREATE TABLE src_variant_squash_performance +( + v Variant(Tuple(v1 Array(UInt64)), Tuple(v2 Array(UInt64)), Tuple(v3 Array(UInt64)), Tuple(v4 Array(UInt64)), Tuple(v5 Array(UInt64))) +) +ENGINE = Memory; + + + + + + + INSERT INTO variant_squash_performance SELECT * FROM src_variant_squash_performance + + DROP TABLE IF EXISTS variant_squash_performance + DROP TABLE IF EXISTS src_variant_squash_performance + + From 9d0608ce001b35e17ff81c00d4965dbe4938b56b Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:55:37 +0200 Subject: [PATCH 1234/2361] Update Runner.cpp --- utils/keeper-bench/Runner.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/keeper-bench/Runner.cpp b/utils/keeper-bench/Runner.cpp index d99f2645a31..59761d827e1 100644 --- a/utils/keeper-bench/Runner.cpp +++ b/utils/keeper-bench/Runner.cpp @@ -545,8 +545,7 @@ struct ZooKeeperRequestFromLogReader file_read_buf = DB::wrapReadBufferWithCompressionMethod(std::move(file_read_buf), compression_method); DB::SingleReadBufferIterator read_buffer_iterator(std::move(file_read_buf)); - std::string sample_path; - auto [columns_description, format] = DB::detectFormatAndReadSchema(format_settings, read_buffer_iterator, sample_path, context); + auto [columns_description, format] = DB::detectFormatAndReadSchema(format_settings, read_buffer_iterator, context); DB::ColumnsWithTypeAndName columns; columns.reserve(columns_description.size()); From 83c6d97cd2aae0a3f79c2776ffc3a5691f8fd4bb Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Tue, 30 Jul 2024 17:34:38 +0200 Subject: [PATCH 1235/2361] squash! added somme tests in relation with https://github.com/ClickHouse/ClickHouse/pull/54881 with new behaviour when enable_named_columns_in_function_tuple=1 (default value) --- .../0_stateless/00309_formats.reference | Bin 20353 -> 18666 bytes tests/queries/0_stateless/00309_formats.sql | 5 +++++ 2 files changed, 5 insertions(+) diff --git a/tests/queries/0_stateless/00309_formats.reference b/tests/queries/0_stateless/00309_formats.reference index cab311692be229716b58af39079275d3942b01cc..f3ea45520bb50fb936caf6724e9fedf3cdd00b75 100644 GIT binary patch delta 3052 zcmeIy$4}c(00wZIgtksdZ-w^I3JD}eL2?j6{R4XG1$t-?y;Q0c4wb5?2YL>Bj|8*# z-Xp>6y?59U_TGDMNaN%a?SDX3#j^bK`<|bk?ZfAlN^UDiNE{JVHgaD5e8+19DxExLIy^` zXcz-yVH}Ky2`~{R!DN^MQ}xOSA`L-0%z&9N3ueO{m<#h@J}iKRum~2z5?IQq+n-P( zP{A_D!g5#vD`6F^hBdGj*1>w%02^VGPHumqnY-wY76U{pY=iBv19rkL*bRGNFYJT; zPz?v*AoeGQ5Ddc+I10z$IGlhQI0>iVG@OC6a1PGH1;h6f#3F(vxC~d|DqMr>a070_ zEw~ML;4a*Q`|v>j|8;{y1V``~p1@Oh2G8LIyo6Wq8s5NLcn9yHg%&qjk~HD*O`kk{ zMhS#wBF4@26;n)ok$HM~`}q3#2LuKMhlGZON5~bCOjLACY+QUoVp4KSYFc_mW>$7i zZeD&tVNr2OsZv$OmRD3(RoB$k)i*RYHMg|3wRd!Ob@%l4^{WR4hlWQ+$Hpf#lT*_( zvvczci%ZKZt842Un_JsEyL2uP zs~KC(*lPbltFhlLRQ$(oSyIc$t;|{F>ph;XVgHfpC$+5H%8ajnp%+Y<&DY2OORU_k z(V7&oZmv$Gj7LgIiK)l0pFiA9KYx&VyyL(B=HYJsi3@C{mioxSQew%acGgGsx>HI? tDE$HfH|yd|adXt)D_oo(mI%Z&HeM!>@7a6Ht;zJ#Jk0d+*gt^JbdYKDu4%*X9d$a^ZOYUl#FBHF2yr}q<;@67HiYtm^#Z|>iir*-% zDXuF{6fY}&tN5Mb_lj2(e^C5U@h8Qria#snd+xpO{s$g>=;22meeCfko}78A|MWA@J~z8>|MLgJ!58LIidkNiRoxsq zJUB8u`r@(UFP(V#6D$6wrgio0oJnBji=r;A-&v0K-- z%ZqMZ-|En<>suYVb$zQtx30$|*-G(uTzhnP-g&=^d;Asr39~@AuK#;~vDmn-A16x* zR`tnxBCGmdyM|AAwVs$&ebTP#ldV?ulqNAv(}O7;BueJfKp}=K;v>=KamanSU6UWrDN?_JXVk8gMI`72?`P<#P@i7&<`pS zWF+WF5R#xIK}v#_1ThI}667T4Nf4BvC_z$!rUX$5suE-+=t>Zlpe#XJg0=*43F;E$ zCFn~Kn4mC0VuHp5kqIgjWG3iL5SpMgL28251hENf6XYi7O%R-*I6-oP<^<6RsuN@< z=uQxxpgci(g7yUQ3F;H%C+JUb0Ko$U7Z7|va00;#1UC@;KyU=X69iWfd_iyq!5aj3 z5d48~#s?7ye?<5r!XFX-i10^*KO+1Q;g1M^M8t7~KO+1Q;g1M^MEE1Z9})hD9M2rk z64B_#G0j2EQO#k^am|4Ze?<5r!XFX-i0}tWF!-P!{1M@g2!BNQBf=jM{)q5Lgg+wu z5#f&re?<5r!XFX-i10^*KO+1Q;g1M^#G){NzzgHs7A`ETZCepq_933?i~U2rWWJXqI8KuOcyVRaKQz-H_k$cyu3VDiwye}g+f_bcJ1@D;TzuN5 zD@s+avnf(-W3}#V+EUgwouv-u47MnGiFGeI6M7J0X>pSftOBKaoh3fnm@GvczL`Ta z^L_N{6Fw0i&Ve~T1wJJ{6+SgSO&=q%!s~pgh07?nk!+)EGuh^{Eo58Dw&Hr*^k(>Z zybQ>^Hqtrc{32`%3@;;YAm1kG-$5QXD0j)XNqnD@?1}G`_*RMUmH1|f@0J>DqOcTU z*dnqtku4%yM7D@*5!oWLMaC8xTV$doTV!mJu|>uf8Czs*k+DV27CBqwY>|unY>~4? z&K5aasMw-ni;68Owy4;mVvC9`Dz>QDqGpSlEo!!?*`j8Pnk{O!sM(@si<&JOwrJR* zVT*<>8n$TIqG5}MEgH6H*a8(t*|H6)jlxDpGfP)fUuE8&1TrL@Sn5)r6Ya*MU7eL0wb!Wn76AR5ztX z#+6_}jZ<1=T!|M{I=Mw62Gvg4mvJR%Q1g@)8CT*4l}~AraV2z6|Kt`49@Ic(U&fXA zK_ygLWLya$)I+&Nf(SKH*_Ux8j!+qu78zGU3H4EFk#QxOP$lISi6>M_WnadXkV3sw zT4Y>_DpXCSg~!FbC|e#E)l=EBEh?z8Wm{BHWy|BDk}6xVZKpC4TZt>wRQasL73!+8 zFSZg_sIE$j*tQdl#8s5ILYJI}Nejl>nIuG~}N3YAyc7h8!d)L*4V z#+A526;@hgT!|~xV!1`)3iVjom;05tLRD5;5#VP`KA{>G~DrX+|J4NE4^tt78W7My1Ewbk!9(?&_}d~o*M=)&sS=v zm5YR#+gx;@nz8VMIrYYX7g?<00RC^)91khQIPJmG@OrgrFt^k;LYv_Lo5d@`!DxB3 zGDLsD_K3_;G)I{^%FR(>j!JV>nWNeqHPfRx<;2)BT_0Y1Xf&A9h!Y`)0|nvDH@x8n zHr{gEJg2=CS+6-2Y@=}a^V*~wFA#5d<+f6RtRKlbl5c8*WMo}Q)|X_RN!FWW-AUK{ zDc6tAH={hZdv7)Rc=}3mQ`+9<(5@$S>={hVObTHRbg2bmQ`U{6_yqMxorr-!m=tXtHQD> zEvwS9DlMzhvMMdB(y}TotJ1P6EvwS9sw}I@vZ^er%Cf2~tID#fEUU`0sw}I@vZ^hs z+On!GtJ<=vEvwqHsx7P9vZ^hs+OldatH!cwEUU({YAmb9vT7`=#}TLyo>A3TXJ|u*PXZY@b0cX?~)z)dEQU-ex~gv+M8t>k(P&*cKVkL~(9{aiZ0vklwdB?P?7?an*> zTu#9I-mbsX&!q*_8f<@;81VeWop<`V+<^DVU4N&aOAmPG-1T?*xeS5#(rtg2Bk;7w zop<`VG=X>8U4N&a%M^I;-Sv0+xnzOoK(@b27kFphop<`VjDh#+U4N&aOB#6h-u8EC z1J9b=d8ePt9C&Zv^>_NY_NY{DCT;>+ke)`2)`|ZGV?P@Y@r2-syKefBb6wVB0s=_`E`L+6m)Ezc_5xV{`a(e6A7cfwpXiGp`$XFl3xq_$>F>d{|a@uQ~ Lvprlx%YOd>&)TR8 diff --git a/tests/queries/0_stateless/00309_formats.sql b/tests/queries/0_stateless/00309_formats.sql index b0939c00a10..0366cdeea5c 100644 --- a/tests/queries/0_stateless/00309_formats.sql +++ b/tests/queries/0_stateless/00309_formats.sql @@ -9,3 +9,8 @@ SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, a SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSON; SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSONCompact; SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT XML; + +SET enable_named_columns_in_function_tuple = 1; + +SELECT 36 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT RowBinaryWithNamesAndTypes; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT TabSeparatedWithNamesAndTypes; From a70cdb8bba5503f3723a2e29957617ea06106c4d Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 30 Jul 2024 15:37:59 +0000 Subject: [PATCH 1236/2361] Add comment regarding default value for force_connected --- src/Client/ConnectionEstablisher.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Client/ConnectionEstablisher.h b/src/Client/ConnectionEstablisher.h index 304ec4d34b4..ff071e59aea 100644 --- a/src/Client/ConnectionEstablisher.h +++ b/src/Client/ConnectionEstablisher.h @@ -24,8 +24,12 @@ public: const QualifiedTableName * table_to_check = nullptr); /// Establish connection and save it in result, write possible exception message in fail_message. - /// The connection is returned from the pool, it can be stale. Use force_connected flag - /// to ensure that connection is working one + /// The connection is returned from connection pool and it can be stale. Use force_connected flag to ensure that connection is working one. + /// NOTE: force_connected is false by default due to the following consideration ... + /// When true, it implies sending a Ping packet to another peer and, if it fails - reestablishing the connection. + /// Ping-Pong round trip can be unnecessary in case of connection is still alive. + /// So, the optimistic approach is used by default. In this case, stale connections can be handled by retrying, + /// - see ConnectionPoolWithFailover, as example void run(TryResult & result, std::string & fail_message, bool force_connected = false); /// Set async callback that will be called when reading from socket blocks. From aa26291ff25b16539efa3d50a540fc32a05b702d Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Tue, 30 Jul 2024 18:15:24 +0100 Subject: [PATCH 1237/2361] fxs --- src/Databases/DatabaseReplicated.cpp | 57 +++++++++++++++---- src/Databases/DatabaseReplicatedWorker.cpp | 21 +++++-- src/Databases/DatabaseReplicatedWorker.h | 4 +- src/Storages/System/StorageSystemClusters.cpp | 6 +- .../test_recovery_time_metric/test.py | 36 ++++++++++-- 5 files changed, 98 insertions(+), 26 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index b11b9382732..06cea65d62e 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -340,31 +341,63 @@ ClusterPtr DatabaseReplicated::getClusterImpl(bool all_groups) const ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) const { - ReplicasInfo res; + Strings paths_get, paths_exists; + + paths_get.emplace_back(fs::path(zookeeper_path) / "max_log_ptr"); - auto zookeeper = getZooKeeper(); const auto & addresses_with_failover = cluster_->getShardsAddresses(); const auto & shards_info = cluster_->getShardsInfo(); - + for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) + { + for (const auto & replica : addresses_with_failover[shard_index]) + { + String full_name = getFullReplicaName(replica.database_shard_name, replica.database_replica_name); + paths_exists.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "active"); + paths_get.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "log_ptr"); + } + } + try { - UInt32 max_log_ptr = parse(zookeeper->get(zookeeper_path + "/max_log_ptr")); + auto current_zookeeper = getZooKeeper(); + auto get_res = current_zookeeper->get(paths_get); + auto exist_res = current_zookeeper->exists(paths_exists); + chassert(get_res.size() == exist_res.size() + 1); + auto max_log_ptr_zk = get_res[0]; + if (max_log_ptr_zk.error != Coordination::Error::ZOK) + throw Coordination::Exception(max_log_ptr_zk.error); + + UInt32 max_log_ptr = parse(max_log_ptr_zk.data); + + ReplicasInfo replicas_info; + replicas_info.resize(exist_res.size()); + + size_t global_replica_index = 0; for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) { for (const auto & replica : addresses_with_failover[shard_index]) { - String full_name = getFullReplicaName(replica.database_shard_name, replica.database_replica_name); - UInt32 log_ptr = parse(zookeeper->get(fs::path(zookeeper_path) / "replicas" / full_name / "log_ptr")); - bool is_active = zookeeper->exists(fs::path(zookeeper_path) / "replicas" / full_name / "active"); - res.push_back(ReplicaInfo{ - .is_active = is_active, - .replication_lag = max_log_ptr - log_ptr, + auto replica_active = exist_res[global_replica_index]; + auto replica_log_ptr = get_res[global_replica_index + 1]; + + if (replica_active.error != Coordination::Error::ZOK && replica_active.error != Coordination::Error::ZNONODE) + throw Coordination::Exception(replica_active.error); + + if (replica_log_ptr.error != Coordination::Error::ZOK) + throw Coordination::Exception(replica_log_ptr.error); + + replicas_info[global_replica_index] = ReplicaInfo{ + .is_active = replica_active.error == Coordination::Error::ZOK, + .replication_lag = max_log_ptr - parse(replica_log_ptr.data), .recovery_time = replica.is_local ? ddl_worker->getCurrentInitializationDurationMs() : 0, - }); + }; + + ++global_replica_index; } } - return res; + + return replicas_info; } catch (...) { tryLogCurrentException(log); diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index a9a74c5f56a..4e7408aa96e 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -32,8 +32,11 @@ DatabaseReplicatedDDLWorker::DatabaseReplicatedDDLWorker(DatabaseReplicated * db bool DatabaseReplicatedDDLWorker::initializeMainThread() { - initialization_duration_timer.restart(); - initializing.store(true, std::memory_order_release); + { + std::lock_guard lock(initialization_duration_timer_mutex); + initialization_duration_timer.emplace(); + initialization_duration_timer->start(); + } while (!stop_flag) { @@ -72,7 +75,10 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() initializeReplication(); initialized = true; - initializing.store(false, std::memory_order_relaxed); + { + std::lock_guard lock(initialization_duration_timer_mutex); + initialization_duration_timer.reset(); + } return true; } catch (...) @@ -82,7 +88,11 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() } } - initializing.store(false, std::memory_order_relaxed); + { + std::lock_guard lock(initialization_duration_timer_mutex); + initialization_duration_timer.reset(); + } + return false; } @@ -466,7 +476,8 @@ UInt32 DatabaseReplicatedDDLWorker::getLogPointer() const UInt64 DatabaseReplicatedDDLWorker::getCurrentInitializationDurationMs() const { - return initializing.load(std::memory_order_acquire) ? initialization_duration_timer.elapsedMilliseconds() : 0; + std::lock_guard lock(initialization_duration_timer_mutex); + return initialization_duration_timer ? initialization_duration_timer->elapsedMilliseconds() : 0; } } diff --git a/src/Databases/DatabaseReplicatedWorker.h b/src/Databases/DatabaseReplicatedWorker.h index 3e5887be825..2309c831839 100644 --- a/src/Databases/DatabaseReplicatedWorker.h +++ b/src/Databases/DatabaseReplicatedWorker.h @@ -59,8 +59,8 @@ private: /// It will remove "active" node when database is detached zkutil::EphemeralNodeHolderPtr active_node_holder; - Stopwatch initialization_duration_timer; - std::atomic initializing = false; + std::optional initialization_duration_timer; + mutable std::mutex initialization_duration_timer_mutex; }; } diff --git a/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp index 0da4bd70cbd..4b9802c9089 100644 --- a/src/Storages/System/StorageSystemClusters.cpp +++ b/src/Storages/System/StorageSystemClusters.cpp @@ -71,7 +71,7 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const Nam const auto & shards_info = cluster->getShardsInfo(); const auto & addresses_with_failover = cluster->getShardsAddresses(); - size_t replica_idx = 0; + size_t global_replica_idx = 0; for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) { const auto & shard_info = shards_info[shard_index]; @@ -108,7 +108,7 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const Nam } else { - const auto & replica_info = replicas_info[replica_idx++]; + const auto & replica_info = replicas_info[global_replica_idx]; res_columns[i++]->insert(replica_info.is_active); res_columns[i++]->insert(replica_info.replication_lag); if (replica_info.recovery_time != 0) @@ -116,6 +116,8 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const Nam else res_columns[i++]->insertDefault(); } + + ++global_replica_idx; } } } diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py index 4dad844b950..8f369d7759c 100644 --- a/tests/integration/test_recovery_time_metric/test.py +++ b/tests/integration/test_recovery_time_metric/test.py @@ -5,7 +5,6 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", main_configs=["configs/config.xml"], - with_zookeeper=True, stay_alive=True, ) @@ -21,9 +20,36 @@ def start_cluster(): def test_recovery_time_metric(start_cluster): node.query( - "CREATE DATABASE rdb ENGINE = Replicated('/test/test_recovery_time_metric', 'shard1', 'replica1');" + """ + CREATE DATABASE rdb + ENGINE = Replicated('/test/test_recovery_time_metric', 'shard1', 'replica1') + """ ) - node.query("CREATE TABLE rdb.t (x UInt32) ENGINE = MergeTree ORDER BY x;") - node.exec_in_container(["bash", "-c", "rm /var/lib/clickhouse/metadata/rdb/t.sql"]) + + node.query( + """ + CREATE TABLE rdb.t + ( + `x` UInt32 + ) + ENGINE = MergeTree + ORDER BY x + """ + ) + + node.exec_in_container( + ["bash", "-c", "rm /var/lib/clickhouse/metadata/rdb/t.sql"] + ) + node.restart_clickhouse() - assert node.query("SELECT any(recovery_time) FROM system.clusters;") != "0\n" + + ret = int( + node.query( + """ + SELECT recovery_time + FROM system.clusters + WHERE cluster = 'rdb' + """ + ).strip() + ) + assert ret > 0 From 7e51e9962c34320a9c60ba6abcf8b38cf517e86c Mon Sep 17 00:00:00 2001 From: Alex Katsman Date: Tue, 30 Jul 2024 17:20:07 +0000 Subject: [PATCH 1238/2361] Fix WriteBuffer destructor when finalize has failed for MergeTreeDeduplicationLog::shutdown --- src/Storages/MergeTree/MergeTreeDeduplicationLog.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDeduplicationLog.cpp b/src/Storages/MergeTree/MergeTreeDeduplicationLog.cpp index 22ff9b7194f..a8110500f13 100644 --- a/src/Storages/MergeTree/MergeTreeDeduplicationLog.cpp +++ b/src/Storages/MergeTree/MergeTreeDeduplicationLog.cpp @@ -341,15 +341,19 @@ void MergeTreeDeduplicationLog::shutdown() stopped = true; if (current_writer) { + /// If an error has occurred during finalize, we'd like to have the exception set for reset. + /// Otherwise, we'll be in a situation when a finalization didn't happen, and we didn't get + /// any error, causing logical error (see ~MemoryBuffer()). try { current_writer->finalize(); + current_writer.reset(); } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); + current_writer.reset(); } - current_writer.reset(); } } From 51af0d305c9959fb4870bd8a57035d48207648b2 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Tue, 30 Jul 2024 20:00:23 +0200 Subject: [PATCH 1239/2361] Reduce number of tested combinations --- tests/queries/0_stateless/02473_multistep_prewhere.python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02473_multistep_prewhere.python b/tests/queries/0_stateless/02473_multistep_prewhere.python index a942568233c..11095202039 100644 --- a/tests/queries/0_stateless/02473_multistep_prewhere.python +++ b/tests/queries/0_stateless/02473_multistep_prewhere.python @@ -193,7 +193,7 @@ def main(): url = os.environ["CLICKHOUSE_URL"] + "&max_threads=1" default_index_granularity = 10 - total_rows = 8 * default_index_granularity + total_rows = 7 * default_index_granularity step = default_index_granularity session = requests.Session() for index_granularity in [ From bd83ba88b03b7de15fbc7530d6c827156674f3b5 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 30 Jul 2024 18:48:34 +0000 Subject: [PATCH 1240/2361] add comments --- src/Server/TCPHandler.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 06feeadb892..13ec2ab102e 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -394,7 +394,8 @@ void TCPHandler::runImpl() /// So it's better to update the connection settings for flexibility. extractConnectionSettingsFromContext(query_context); - /// Sync timeouts on client and server during current query to avoid dangling queries on server + /// Sync timeouts on client and server during current query to avoid dangling queries on server. + /// It should be reset at the end of query. state.timeout_setter = std::make_unique(socket(), send_timeout, receive_timeout); /// Should we send internal logs to client? @@ -602,6 +603,7 @@ void TCPHandler::runImpl() /// QueryState should be cleared before QueryScope, since otherwise /// the MemoryTracker will be wrong for possible deallocations. /// (i.e. deallocations from the Aggregator with two-level aggregation) + /// Also it resets socket's timeouts. state.reset(); last_sent_snapshots = ProfileEvents::ThreadIdToCountersSnapshot{}; query_scope.reset(); @@ -627,6 +629,7 @@ void TCPHandler::runImpl() state.io.onException(); exception.reset(e.clone()); + /// In case of exception state was not reset, so socket's timouts must be reset explicitly state.timeout_setter.reset(); if (e.code() == ErrorCodes::UNKNOWN_PACKET_FROM_CLIENT) From dacf044c3dee65d799242b7f4846f7d6d8b2bd34 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 30 Jul 2024 19:20:52 +0000 Subject: [PATCH 1241/2361] Update version_date.tsv and changelogs after v24.7.1.2915-stable --- SECURITY.md | 3 +- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v24.7.1.2915-stable.md | 524 +++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 6 files changed, 530 insertions(+), 4 deletions(-) create mode 100644 docs/changelogs/v24.7.1.2915-stable.md diff --git a/SECURITY.md b/SECURITY.md index 53328b6e16b..8930dc96f8a 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -14,9 +14,10 @@ The following versions of ClickHouse server are currently supported with securit | Version | Supported | |:-|:-| +| 24.7 | âœ”ï¸ | | 24.6 | âœ”ï¸ | | 24.5 | âœ”ï¸ | -| 24.4 | âœ”ï¸ | +| 24.4 | ⌠| | 24.3 | âœ”ï¸ | | 24.2 | ⌠| | 24.1 | ⌠| diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index c59ef1b919a..e99c86267f9 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.6.2.17" +ARG VERSION="24.7.1.2915" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 240df79aeb1..fb562b911a3 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.6.2.17" +ARG VERSION="24.7.1.2915" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index ac64655991a..51f4e6a0f40 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.6.2.17" +ARG VERSION="24.7.1.2915" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docs/changelogs/v24.7.1.2915-stable.md b/docs/changelogs/v24.7.1.2915-stable.md new file mode 100644 index 00000000000..abffbe58bfc --- /dev/null +++ b/docs/changelogs/v24.7.1.2915-stable.md @@ -0,0 +1,524 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.7.1.2915-stable (a37d2d43da7) FIXME as compared to v24.7.1.1-new (aa023477a92) + +#### Backward Incompatible Change +* Change binary serialization of Variant data type: add `compact` mode to avoid writing the same discriminator multiple times for granules with single variant or with only NULL values. Add MergeTree setting `use_compact_variant_discriminators_serialization` that is enabled by default. Note that Variant type is still experimental and backward-incompatible change in serialization is ok. [#62774](https://github.com/ClickHouse/ClickHouse/pull/62774) ([Kruglov Pavel](https://github.com/Avogar)). +* Forbid `CREATE MATERIALIZED VIEW ... ENGINE Replicated*MergeTree POPULATE AS SELECT ...` with Replicated databases. [#63963](https://github.com/ClickHouse/ClickHouse/pull/63963) ([vdimir](https://github.com/vdimir)). +* `clickhouse-keeper-client` will only accept paths in string literals, such as `ls '/hello/world'`, not bare strings such as `ls /hello/world`. [#65494](https://github.com/ClickHouse/ClickHouse/pull/65494) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Metric `KeeperOutstandingRequets` was renamed to `KeeperOutstandingRequests`. This fixes a typo reported in [#66179](https://github.com/ClickHouse/ClickHouse/issues/66179). [#66206](https://github.com/ClickHouse/ClickHouse/pull/66206) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### New Feature +* Extend function `tuple` to construct named tuples in query. Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)). +* `ASOF JOIN` support for `full_sorting_join` algorithm Close [#54493](https://github.com/ClickHouse/ClickHouse/issues/54493). [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)). +* A new table function, `fuzzQuery,` was added. This function allows you to modify a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1');`. [#62103](https://github.com/ClickHouse/ClickHouse/pull/62103) ([pufit](https://github.com/pufit)). +* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)). +* Support JWT authentication in `clickhouse-client`. [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)). +* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)). +* Support accept_invalid_certificate in client's config in order to allow for client to connect over secure TCP to a server running with self-signed certificate - can be used as a shorthand for corresponding `openSSL` client settings `verificationMode=none` + `invalidCertificateHandler.name=AcceptCertificateHandler`. [#65238](https://github.com/ClickHouse/ClickHouse/pull/65238) ([peacewalker122](https://github.com/peacewalker122)). +* Add system.error_log which contains history of error values from table system.errors, periodically flushed to disk. [#65381](https://github.com/ClickHouse/ClickHouse/pull/65381) ([Pablo Marcos](https://github.com/pamarcos)). +* Add aggregate function `groupConcat`. About the same as `arrayStringConcat( groupArray(column), ',')` Can receive 2 parameters: a string delimiter and the number of elements to be processed. [#65451](https://github.com/ClickHouse/ClickHouse/pull/65451) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add a new setting to disable/enable writing page index into parquet files. [#65475](https://github.com/ClickHouse/ClickHouse/pull/65475) ([lgbo](https://github.com/lgbo-ustc)). +* Allow system administrators to configure `logger.console_log_level`. [#65559](https://github.com/ClickHouse/ClickHouse/pull/65559) ([Azat Khuzhin](https://github.com/azat)). +* Automatically append a wildcard `*` to the end of a directory path with table function `file`. [#66019](https://github.com/ClickHouse/ClickHouse/pull/66019) ([Zhidong (David) Guo](https://github.com/Gun9niR)). +* Add `--memory-usage` option to client in non interactive mode. [#66393](https://github.com/ClickHouse/ClickHouse/pull/66393) ([vdimir](https://github.com/vdimir)). + +#### Performance Improvement +* Enable `optimize_functions_to_subcolumns` by default. [#58661](https://github.com/ClickHouse/ClickHouse/pull/58661) ([Anton Popov](https://github.com/CurtizJ)). +* Replace int to string algorithm with a faster one (from a modified amdn/itoa to a modified jeaiii/itoa). [#61661](https://github.com/ClickHouse/ClickHouse/pull/61661) ([Raúl Marín](https://github.com/Algunenano)). +* Sizes of hash tables created by join (`parallel_hash` algorithm) is collected and cached now. This information will be used to preallocate space in hash tables for subsequent query executions and save time on hash table resizes. [#64553](https://github.com/ClickHouse/ClickHouse/pull/64553) ([Nikita Taranov](https://github.com/nickitat)). +* Optimized queries with `ORDER BY` primary key and `WHERE` that have a condition with high selectivity by using of buffering. It is controlled by setting `read_in_order_use_buffering` (enabled by default) and can increase memory usage of query. [#64607](https://github.com/ClickHouse/ClickHouse/pull/64607) ([Anton Popov](https://github.com/CurtizJ)). +* Improve performance of loading `plain_rewritable` metadata. [#65634](https://github.com/ClickHouse/ClickHouse/pull/65634) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Attaching tables on read-only disks will use fewer resources by not loading outdated parts. [#65635](https://github.com/ClickHouse/ClickHouse/pull/65635) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Support minmax hyperrectangle for Set indices. [#65676](https://github.com/ClickHouse/ClickHouse/pull/65676) ([AntiTopQuark](https://github.com/AntiTopQuark)). +* Unload primary index of outdated parts to reduce total memory usage. [#65852](https://github.com/ClickHouse/ClickHouse/pull/65852) ([Anton Popov](https://github.com/CurtizJ)). +* Functions `replaceRegexpAll` and `replaceRegexpOne` are now significantly faster if the pattern is trivial, i.e. contains no metacharacters, pattern classes, flags, grouping characters etc. (Thanks to Taiyang Li). [#66185](https://github.com/ClickHouse/ClickHouse/pull/66185) ([Robert Schulze](https://github.com/rschu1ze)). + +#### Improvement +* Support rocksdb as backend storage of keeper. [#56626](https://github.com/ClickHouse/ClickHouse/pull/56626) ([Han Fei](https://github.com/hanfei1991)). +* The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)). +* The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)). +* This PR changes how deduplication for MV works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)). +* Allow matching column names in a case insensitive manner when reading json files (`input_format_json_case_insensitive_column_matching`). [#61750](https://github.com/ClickHouse/ClickHouse/pull/61750) ([kevinyhzou](https://github.com/KevinyhZou)). +* Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)). +* In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)). +* Make an interactive client for clickhouse-disks, add local disk from the local directory. Fixes [#56791](https://github.com/ClickHouse/ClickHouse/issues/56791). [#64446](https://github.com/ClickHouse/ClickHouse/pull/64446) ([Daniil Ivanik](https://github.com/divanik)). +* Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)). +* `StorageS3Queue` related fixes and improvements. Deduce a default value of `s3queue_processing_threads_num` according to the number of physical cpu cores on the server (instead of the previous default value as 1). Set default value of `s3queue_loading_retries` to 10. Fix possible vague "Uncaught exception" in exception column of `system.s3queue`. Do not increment retry count on `MEMORY_LIMIT_EXCEEDED` exception. Move files commit to a stage after insertion into table fully finished to avoid files being commited while not inserted. Add settings `s3queue_max_processed_files_before_commit`, `s3queue_max_processed_rows_before_commit`, `s3queue_max_processed_bytes_before_commit`, `s3queue_max_processing_time_sec_before_commit`, to better control commit and flush time. [#65046](https://github.com/ClickHouse/ClickHouse/pull/65046) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixed broken multiple columns aggregation on s390x. [#65062](https://github.com/ClickHouse/ClickHouse/pull/65062) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Support aliases in parametrized view function (only new analyzer). [#65190](https://github.com/ClickHouse/ClickHouse/pull/65190) ([Kseniia Sumarokova](https://github.com/kssenii)). +* S3. reduce retires time for queries, increase retries count for backups. 8.5 minutes and 100 retires for queries, 1.2 hours and 1000 retries for backup restore. [#65232](https://github.com/ClickHouse/ClickHouse/pull/65232) ([Sema Checherinda](https://github.com/CheSema)). +* Updated to mask account key in logs in azureBlobStorage. [#65273](https://github.com/ClickHouse/ClickHouse/pull/65273) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). +* Partition pruning for `IN` predicates when filter expression is a part of `PARTITION BY` expression. [#65335](https://github.com/ClickHouse/ClickHouse/pull/65335) ([Eduard Karacharov](https://github.com/korowa)). +* Add system tables with main information about all detached tables. [#65400](https://github.com/ClickHouse/ClickHouse/pull/65400) ([Konstantin Morozov](https://github.com/k-morozov)). +* Add support for `cluster_for_parallel_replicas` when using custom key parallel replicas. It allows you to use parallel replicas with custom key with MergeTree tables. [#65453](https://github.com/ClickHouse/ClickHouse/pull/65453) ([Antonio Andelic](https://github.com/antonio2368)). +* Support query plan LIMIT optimization. Support LIMIT pushdown for PostgreSQL storage and table function. [#65454](https://github.com/ClickHouse/ClickHouse/pull/65454) ([Maksim Kita](https://github.com/kitaisreal)). +* Arraymin/max can be applicable to all data types that are comparable. [#65455](https://github.com/ClickHouse/ClickHouse/pull/65455) ([pn](https://github.com/chloro-pn)). +* Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)). +* Do not create format settings for each rows when serializing chunks to insert to EmbeddedRocksDB table. [#65474](https://github.com/ClickHouse/ClickHouse/pull/65474) ([Duc Canh Le](https://github.com/canhld94)). +* Fixed out-of-range exception in parsing Dwarf5 on s390x. [#65501](https://github.com/ClickHouse/ClickHouse/pull/65501) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Reduce `clickhouse-local` prompt to just `:)`. `getFQDNOrHostName()` takes too long on macOS, and we don't want a hostname in the prompt for `clickhouse-local` anyway. [#65510](https://github.com/ClickHouse/ClickHouse/pull/65510) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Avoid printing a message from jemalloc about per-CPU arenas on low-end virtual machines. [#65532](https://github.com/ClickHouse/ClickHouse/pull/65532) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Disable filesystem cache background download by default. It will be enabled back when we fix the issue with possible "Memory limit exceeded" because memory deallocation is done outside of query context (while buffer is allocated inside of query context) if we use background download threads. Plus we need to add a separate setting to define max size to download for background workers (currently it is limited by max_file_segment_size, which might be too big). [#65534](https://github.com/ClickHouse/ClickHouse/pull/65534) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add new option to config `` which allow to specify how often clickhouse will reload config. [#65545](https://github.com/ClickHouse/ClickHouse/pull/65545) ([alesapin](https://github.com/alesapin)). +* Implement binary encoding for ClickHouse data types and add its specification in docs. Use it in Dynamic binary serialization, allow to use it in RowBinaryWithNamesAndTypes and Native formats under settings. [#65546](https://github.com/ClickHouse/ClickHouse/pull/65546) ([Kruglov Pavel](https://github.com/Avogar)). +* Improved ZooKeeper load balancing. The current session doesn't expire until the optimal nodes become available despite `fallback_session_lifetime`. Added support for AZ-aware balancing. [#65570](https://github.com/ClickHouse/ClickHouse/pull/65570) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Server settings `compiled_expression_cache_size` and `compiled_expression_cache_elements_size` are now shown in `system.server_settings`. [#65584](https://github.com/ClickHouse/ClickHouse/pull/65584) ([Robert Schulze](https://github.com/rschu1ze)). +* When lightweight delete happens on a table with projection(s), users have choices either throw an exception (by default) or drop the projection lightweight delete would happen. [#65594](https://github.com/ClickHouse/ClickHouse/pull/65594) ([jsc0218](https://github.com/jsc0218)). +* Add support for user identification based on x509 SubjectAltName extension. [#65626](https://github.com/ClickHouse/ClickHouse/pull/65626) ([Anton Kozlov](https://github.com/tonickkozlov)). +* `clickhouse-local` will respect the `max_server_memory_usage` and `max_server_memory_usage_to_ram_ratio` from the configuration file. It will also set the max memory usage to 90% of the system memory by default, like `clickhouse-server` does. This closes [#65695](https://github.com/ClickHouse/ClickHouse/issues/65695). [#65697](https://github.com/ClickHouse/ClickHouse/pull/65697) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a script to backup your files to ClickHouse. This is strange, but works. [#65699](https://github.com/ClickHouse/ClickHouse/pull/65699) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* PostgreSQL source support cancel. [#65722](https://github.com/ClickHouse/ClickHouse/pull/65722) ([Maksim Kita](https://github.com/kitaisreal)). +* Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)). +* Allow to use `concat` function with empty arguments ``` sql :) select concat();. [#65887](https://github.com/ClickHouse/ClickHouse/pull/65887) ([æŽæ‰¬](https://github.com/taiyang-li)). +* Allow controlling named collections in clickhouse-local. [#65973](https://github.com/ClickHouse/ClickHouse/pull/65973) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve Azure profile events. [#65999](https://github.com/ClickHouse/ClickHouse/pull/65999) ([alesapin](https://github.com/alesapin)). +* `Query was cancelled` might have been printed twice in clickhouse-client. This behaviour is fixed. [#66005](https://github.com/ClickHouse/ClickHouse/pull/66005) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Support ORC file read by writer time zone. [#66025](https://github.com/ClickHouse/ClickHouse/pull/66025) ([kevinyhzou](https://github.com/KevinyhZou)). +* Refactor JSONExtract functions, support more types including experimental Dynamic type. [#66046](https://github.com/ClickHouse/ClickHouse/pull/66046) ([Kruglov Pavel](https://github.com/Avogar)). +* DatabaseCatalog drops tables faster by using up to database_catalog_drop_table_concurrency threads. [#66065](https://github.com/ClickHouse/ClickHouse/pull/66065) ([Sema Checherinda](https://github.com/CheSema)). +* This PR changes how deduplication for MV works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#66144](https://github.com/ClickHouse/ClickHouse/pull/66144) ([Sema Checherinda](https://github.com/CheSema)). +* Support null map subcolumn for Variant and Dynamic subcolumns. [#66178](https://github.com/ClickHouse/ClickHouse/pull/66178) ([Kruglov Pavel](https://github.com/Avogar)). +* Add settings to control connection to the PostgreSQL. * Setting `postgresql_connection_attempt_timeout` specifies the value passed to `connect_timeout` parameter of connection URL. * Setting `postgresql_connection_pool_retries` specifies the number of retries to establish a connection to the PostgreSQL end-point. [#66232](https://github.com/ClickHouse/ClickHouse/pull/66232) ([Dmitry Novik](https://github.com/novikd)). +* Reduce inaccuracy of input_wait_elapsed_us/input_wait_elapsed_us/elapsed_us. [#66239](https://github.com/ClickHouse/ClickHouse/pull/66239) ([Azat Khuzhin](https://github.com/azat)). +* Improve FilesystemCache ProfileEvents. [#66249](https://github.com/ClickHouse/ClickHouse/pull/66249) ([zhukai](https://github.com/nauu)). +* Add settings to ignore ON CLUSTER clause in queries for named collection management with replicated storage. [#66288](https://github.com/ClickHouse/ClickHouse/pull/66288) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Upgraded `pocketfft` dependency to the recent commit https://github.com/mreineck/pocketfft/commit/f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3. [#66291](https://github.com/ClickHouse/ClickHouse/pull/66291) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Upgraded `azure-sdk-for-cpp` to the recent commit https://github.com/ClickHouse/azure-sdk-for-cpp/commit/ea3e19a7be08519134c643177d56c7484dfec884. [#66292](https://github.com/ClickHouse/ClickHouse/pull/66292) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Function `generateSnowflakeID` now allows to specify a machine ID as a parameter to prevent collisions in large clusters. [#66374](https://github.com/ClickHouse/ClickHouse/pull/66374) ([ZAWA_ll](https://github.com/Zawa-ll)). +* Disable suspending on Ctrl+Z in interactive mode. This is a common trap and is not expected behavior for almost all users. I imagine only a few extreme power users could appreciate suspending terminal applications to the background, but I don't know any. [#66511](https://github.com/ClickHouse/ClickHouse/pull/66511) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add option for validating the Primary key type in Dictionaries. Without this option for simple layouts any column type will be implicitly converted to UInt64. ### Documentation entry for user-facing changes. [#66595](https://github.com/ClickHouse/ClickHouse/pull/66595) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Fix SHOW MERGES remaining time. [#66735](https://github.com/ClickHouse/ClickHouse/pull/66735) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). +* Check cyclic dependencies on CREATE/REPLACE/RENAME/EXCHANGE queries and throw an exception if there is a cyclic dependency. Previously such cyclic dependencies could lead to a deadlock during server startup. Closes [#65355](https://github.com/ClickHouse/ClickHouse/issues/65355). Also fix some bugs in dependencies creation. [#65405](https://github.com/ClickHouse/ClickHouse/pull/65405) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)). +* Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Fixed crash while using MaterializedMySQL with TABLE OVERRIDE that maps MySQL NULL field into ClickHouse not NULL field. [#54649](https://github.com/ClickHouse/ClickHouse/pull/54649) ([Filipp Ozinov](https://github.com/bakwc)). +* Fix logical error when PREWHERE expression read no columns and table has no adaptive index granularity (very old table). Fix [#56640](https://github.com/ClickHouse/ClickHouse/issues/56640). [#59173](https://github.com/ClickHouse/ClickHouse/pull/59173) ([Alexander Gololobov](https://github.com/davenger)). +* Fix bug with cancelation buffer when canceling a query. [#64478](https://github.com/ClickHouse/ClickHouse/pull/64478) ([Sema Checherinda](https://github.com/CheSema)). +* Fix filling parts columns from metadata (when columns.txt does not exists). [#64757](https://github.com/ClickHouse/ClickHouse/pull/64757) ([Azat Khuzhin](https://github.com/azat)). +* Fix AST formatting of 'create table b empty as a'. [#64951](https://github.com/ClickHouse/ClickHouse/pull/64951) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)). +* Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)). +* Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)). +* Fix unexpected projection name when query with CTE. [#65267](https://github.com/ClickHouse/ClickHouse/pull/65267) ([wudidapaopao](https://github.com/wudidapaopao)). +* Require `dictGet` privilege when accessing dictionaries via direct query or the `Dictionary` table engine. [#65359](https://github.com/ClickHouse/ClickHouse/pull/65359) ([Joe Lynch](https://github.com/joelynch)). +* Fix user-specific S3 auth with incremental backups. [#65481](https://github.com/ClickHouse/ClickHouse/pull/65481) ([Antonio Andelic](https://github.com/antonio2368)). +* Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix getting exception `Index out of bound for blob metadata` in case all files from list batch were filtered out. [#65523](https://github.com/ClickHouse/ClickHouse/pull/65523) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix NOT_FOUND_COLUMN_IN_BLOCK for deduplicate merge of projection. [#65573](https://github.com/ClickHouse/ClickHouse/pull/65573) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)). +* Fixed a bug that compatibility level '23.4' was not properly applied. [#65737](https://github.com/ClickHouse/ClickHouse/pull/65737) ([cw5121](https://github.com/cw5121)). +* Fix odbc table with nullable fields. [#65738](https://github.com/ClickHouse/ClickHouse/pull/65738) ([Rodolphe Dugé de Bernonville](https://github.com/RodolpheDuge)). +* Fix data race in `TCPHandler`, which could happen on fatal error. [#65744](https://github.com/ClickHouse/ClickHouse/pull/65744) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)). +* For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)). +* Fix a bug leads to EmbeddedRocksDB with TTL write corrupted SST files. [#65816](https://github.com/ClickHouse/ClickHouse/pull/65816) ([Duc Canh Le](https://github.com/canhld94)). +* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds (issue [#65517](https://github.com/ClickHouse/ClickHouse/issues/65517)). [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)). +* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)). +* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)). +* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions (issue [#65516](https://github.com/ClickHouse/ClickHouse/issues/65516)). [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)). +* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)). +* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible issues with MySQL client protocol TLS connections. [#65938](https://github.com/ClickHouse/ClickHouse/pull/65938) ([Azat Khuzhin](https://github.com/azat)). +* Fix handling of `SSL_ERROR_WANT_READ`/`SSL_ERROR_WANT_WRITE` with zero timeout. [#65941](https://github.com/ClickHouse/ClickHouse/pull/65941) ([Azat Khuzhin](https://github.com/azat)). +* Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)). +* Column _size in s3 engine and s3 table function denotes the size of a file inside the archive, not a size of the archive itself. [#65993](https://github.com/ClickHouse/ClickHouse/pull/65993) ([Daniil Ivanik](https://github.com/divanik)). +* Fix resolving dynamic subcolumns in analyzer, avoid reading the whole column on dynamic subcolumn reading. [#66004](https://github.com/ClickHouse/ClickHouse/pull/66004) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix config merging for from_env with replace overrides. [#66034](https://github.com/ClickHouse/ClickHouse/pull/66034) ([Azat Khuzhin](https://github.com/azat)). +* Fix a possible hanging in `GRPCServer` during shutdown. This PR fixes [#65622](https://github.com/ClickHouse/ClickHouse/issues/65622). [#66061](https://github.com/ClickHouse/ClickHouse/pull/66061) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix reading dynamic subcolumns from altered Memory table. Previously if `max_types` peremeter of a Dynamic type was changed in Memory table via alter, further subcolumns reading can return wrong result. [#66066](https://github.com/ClickHouse/ClickHouse/pull/66066) ([Kruglov Pavel](https://github.com/Avogar)). +* Fixed several cases in function `has` with non-constant `LowCardinality` arguments. [#66088](https://github.com/ClickHouse/ClickHouse/pull/66088) ([Anton Popov](https://github.com/CurtizJ)). +* Fix for `groupArrayIntersect`. It had incorrect behavior in the `merge()` function. Also, fixed behavior in `deserialise()` for numeric and general data. [#66103](https://github.com/ClickHouse/ClickHouse/pull/66103) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)). +* Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed the issue when the server failed to parse Avro files with negative block size arrays encoded, which is now allowed by the Avro specification. [#66130](https://github.com/ClickHouse/ClickHouse/pull/66130) ([Serge Klochkov](https://github.com/slvrtrn)). +* Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)). +* Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)). +* Fix order of parsing metadata fields in StorageDeltaLake. [#66211](https://github.com/ClickHouse/ClickHouse/pull/66211) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)). +* Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)). +* Fix error reporting while copying to S3 or AzureBlobStorage. [#66295](https://github.com/ClickHouse/ClickHouse/pull/66295) ([Vitaly Baranov](https://github.com/vitlibar)). +* Prevent watchdog from keeping descriptors of unlinked(rotated) log files. [#66334](https://github.com/ClickHouse/ClickHouse/pull/66334) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Fix the bug that logicalexpressionoptimizerpass lost logical type of constant. closes [#64487](https://github.com/ClickHouse/ClickHouse/issues/64487). [#66344](https://github.com/ClickHouse/ClickHouse/pull/66344) ([pn](https://github.com/chloro-pn)). +* Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix possible incorrect result for queries joining and filtering table external engine (like PostgreSQL), due to too aggressive filter pushdown. Since now, conditions from where section won't be send to external database in case of outer join with external table. [#66402](https://github.com/ClickHouse/ClickHouse/pull/66402) ([vdimir](https://github.com/vdimir)). +* Added missing column materialization for cross join. [#66413](https://github.com/ClickHouse/ClickHouse/pull/66413) ([lgbo](https://github.com/lgbo-ustc)). +* Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Avoid possible logical error during import from Npy format in case of bad array nesting level, fix testing of other kinds of errors. [#66461](https://github.com/ClickHouse/ClickHouse/pull/66461) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix wrong count() result when there is non-deterministic function in predicate. [#66510](https://github.com/ClickHouse/ClickHouse/pull/66510) ([Duc Canh Le](https://github.com/canhld94)). +* Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix logical error in `PrometheusRequestHandler`. [#66621](https://github.com/ClickHouse/ClickHouse/pull/66621) ([Vitaly Baranov](https://github.com/vitlibar)). +* `column_length` is not updated in `ColumnTuple::insertManyFrom`. [#66626](https://github.com/ClickHouse/ClickHouse/pull/66626) ([lgbo](https://github.com/lgbo-ustc)). +* Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix rare case of stuck merge after drop column. [#66707](https://github.com/ClickHouse/ClickHouse/pull/66707) ([Raúl Marín](https://github.com/Algunenano)). +* Fix assertion `isUniqTypes` when insert select from remote sources. [#66722](https://github.com/ClickHouse/ClickHouse/pull/66722) ([Sema Checherinda](https://github.com/CheSema)). +* Backported in [#67026](https://github.com/ClickHouse/ClickHouse/issues/67026): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)). +* Backported in [#67443](https://github.com/ClickHouse/ClickHouse/issues/67443): Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)). +* Backported in [#67201](https://github.com/ClickHouse/ClickHouse/issues/67201): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#67383](https://github.com/ClickHouse/ClickHouse/issues/67383): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67246](https://github.com/ClickHouse/ClickHouse/issues/67246): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)). + +#### Build/Testing/Packaging Improvement +* Instantiate template methods ahead in different .cpp files, avoid too large translation units during compiling. [#64818](https://github.com/ClickHouse/ClickHouse/pull/64818) ([lgbo](https://github.com/lgbo-ustc)). +* Stateless tests: Improve tests speed and decrease number of parallel jobs. [#65186](https://github.com/ClickHouse/ClickHouse/pull/65186) ([Nikita Fomichev](https://github.com/fm4v)). +* Add tests for `base64URLEncode` and `base64URLDecode`. Add analyzer tests. [#65979](https://github.com/ClickHouse/ClickHouse/pull/65979) ([Nikita Fomichev](https://github.com/fm4v)). +* Fix problem when github terminate instances by timeout and artifacts are not collected and full test report is not generated. [#66036](https://github.com/ClickHouse/ClickHouse/pull/66036) ([Nikita Fomichev](https://github.com/fm4v)). +* Fix test [test_grpc_protocol/test.py::test_progress](https://s3.amazonaws.com/clickhouse-test-reports/57695/188f8a3df74caf830ad1ced3c4cf6dfb0aa90093/integration_tests__asan__old_analyzer__[4_6].html). [#66063](https://github.com/ClickHouse/ClickHouse/pull/66063) ([Vitaly Baranov](https://github.com/vitlibar)). +* Stateless tests: Improve tests speed and decrease number of parallel jobs. [#66305](https://github.com/ClickHouse/ClickHouse/pull/66305) ([Nikita Fomichev](https://github.com/fm4v)). +* Stateless tests: Improve tests speed and decrease number of parallel jobs 3. [#66363](https://github.com/ClickHouse/ClickHouse/pull/66363) ([Nikita Fomichev](https://github.com/fm4v)). +* Tests: fix tests hang up in cases when gdb catches error. [#66411](https://github.com/ClickHouse/ClickHouse/pull/66411) ([Nikita Fomichev](https://github.com/fm4v)). +* ... since [Release v24.6.1.4423-stable](https://github.com/ClickHouse/ClickHouse/releases/tag/v24.6.1.4423-stable) when build in ppc64le with dynamic openssl build (`cmake -DENABLE_OPENSSL_DYNAMIC=1 -DCMAKE_TOOLCHAIN_FILE= cmake/linux/toolchain-ppc64le.cmake `) got error: ` ld.lld: error: duplicate symbol: OPENSSL_cleanse`. [#66733](https://github.com/ClickHouse/ClickHouse/pull/66733) ([Yong Wang](https://github.com/kashwy)). + +#### NO CL CATEGORY + +* Backported in [#67084](https://github.com/ClickHouse/ClickHouse/issues/67084):. [#67040](https://github.com/ClickHouse/ClickHouse/pull/67040) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#67452](https://github.com/ClickHouse/ClickHouse/issues/67452):. [#67392](https://github.com/ClickHouse/ClickHouse/pull/67392) ([alesapin](https://github.com/alesapin)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Revert "Small fix for 02340_parts_refcnt_mergetree""'. [#65155](https://github.com/ClickHouse/ClickHouse/pull/65155) ([Nikita Taranov](https://github.com/nickitat)). +* NO CL ENTRY: 'Revert "Use 1MB HTTP buffers to avoid frequent send syscalls"'. [#65498](https://github.com/ClickHouse/ClickHouse/pull/65498) ([Sergei Trifonov](https://github.com/serxa)). +* NO CL ENTRY: 'Revert "Resubmit http_external_tables_memory_tracking test"'. [#65500](https://github.com/ClickHouse/ClickHouse/pull/65500) ([Nikita Taranov](https://github.com/nickitat)). +* NO CL ENTRY: 'Revert "Add an assertion in ReplicatedMergeTreeQueue"'. [#65686](https://github.com/ClickHouse/ClickHouse/pull/65686) ([Raúl Marín](https://github.com/Algunenano)). +* NO CL ENTRY: 'Revert "insertion deduplication on retries for materialised views"'. [#66134](https://github.com/ClickHouse/ClickHouse/pull/66134) ([Sema Checherinda](https://github.com/CheSema)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Return and fix 01600_parts_states_metrics_long test. [#58748](https://github.com/ClickHouse/ClickHouse/pull/58748) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Add azure_cache as storage policy to tests. [#59943](https://github.com/ClickHouse/ClickHouse/pull/59943) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). +* Minor: replaced expression with LEGACY_MAX_LEVEL. [#61268](https://github.com/ClickHouse/ClickHouse/pull/61268) ([Vasily Nemkov](https://github.com/Enmk)). +* Make write to temporary data in cache do all checks and assertions as during write to ordinary cache. [#63348](https://github.com/ClickHouse/ClickHouse/pull/63348) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Refactoring near azure blob storage. [#63636](https://github.com/ClickHouse/ClickHouse/pull/63636) ([Anton Popov](https://github.com/CurtizJ)). +* Everything should work with Analyzer. [#63643](https://github.com/ClickHouse/ClickHouse/pull/63643) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* add some log for non using detached tables. [#64992](https://github.com/ClickHouse/ClickHouse/pull/64992) ([Konstantin Morozov](https://github.com/k-morozov)). +* Remove dag flags. [#65234](https://github.com/ClickHouse/ClickHouse/pull/65234) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix flaky autocompletion test. [#65246](https://github.com/ClickHouse/ClickHouse/pull/65246) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Disable userspace page cache by default. [#65305](https://github.com/ClickHouse/ClickHouse/pull/65305) ([Michael Kolupaev](https://github.com/al13n321)). +* Update version_date.tsv and changelogs after v24.4.3.25-stable. [#65308](https://github.com/ClickHouse/ClickHouse/pull/65308) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)). +* Dodging reading from wrong table with parallel replicas. [#65417](https://github.com/ClickHouse/ClickHouse/pull/65417) ([Nikita Taranov](https://github.com/nickitat)). +* Fix: return error if can't connect to any replicas chosen for query execution. [#65467](https://github.com/ClickHouse/ClickHouse/pull/65467) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix `AzureObjectStorage::exists` method. [#65471](https://github.com/ClickHouse/ClickHouse/pull/65471) ([Anton Popov](https://github.com/CurtizJ)). +* Update version after release. [#65483](https://github.com/ClickHouse/ClickHouse/pull/65483) ([Raúl Marín](https://github.com/Algunenano)). +* Generate 24.6 changelog. [#65485](https://github.com/ClickHouse/ClickHouse/pull/65485) ([Raúl Marín](https://github.com/Algunenano)). +* Fix of `PlanSquashingTransform`: pipeline stuck. [#65487](https://github.com/ClickHouse/ClickHouse/pull/65487) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix bad test `02922_deduplication_with_zero_copy`. [#65492](https://github.com/ClickHouse/ClickHouse/pull/65492) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Disable clang-format in special areas. [#65495](https://github.com/ClickHouse/ClickHouse/pull/65495) ([Nikita Taranov](https://github.com/nickitat)). +* Fix `test_keeper_snapshots`. [#65497](https://github.com/ClickHouse/ClickHouse/pull/65497) ([Antonio Andelic](https://github.com/antonio2368)). +* Update to libunwind 8.1.7. [#65509](https://github.com/ClickHouse/ClickHouse/pull/65509) ([Michael Kolupaev](https://github.com/al13n321)). +* Setting `uniform_snowflake_conversion_functions` (not in any release yet) was replaced by setting `allow_deprecated_snowflake_conversion_functions`. The latter controls if the legacy snowflake conversion functions are available (by default, they are not). [#65522](https://github.com/ClickHouse/ClickHouse/pull/65522) ([Robert Schulze](https://github.com/rschu1ze)). +* Try CI without RerunCheck, jobs can be easily rerun manually though extra amount of work in CI will follow on workflow restart. [#65524](https://github.com/ClickHouse/ClickHouse/pull/65524) ([Max K.](https://github.com/maxknv)). +* Bump re2 to latest HEAD. [#65526](https://github.com/ClickHouse/ClickHouse/pull/65526) ([Robert Schulze](https://github.com/rschu1ze)). +* OpenSSL: Replace temporary fix for unsynchronized access by official fix. [#65529](https://github.com/ClickHouse/ClickHouse/pull/65529) ([Robert Schulze](https://github.com/rschu1ze)). +* Update README.md. [#65531](https://github.com/ClickHouse/ClickHouse/pull/65531) ([Tyler Hannan](https://github.com/tylerhannan)). +* CI: some time there are timeouts on DROP TABLES for random tests. [#65535](https://github.com/ClickHouse/ClickHouse/pull/65535) ([Sema Checherinda](https://github.com/CheSema)). +* Synchronize `MARK_CACHE_SIZE` value in default settings and config. [#65547](https://github.com/ClickHouse/ClickHouse/pull/65547) ([Denny Crane](https://github.com/den-crane)). +* CI: Skip removed test files in stateless flaky check job. [#65553](https://github.com/ClickHouse/ClickHouse/pull/65553) ([Max K.](https://github.com/maxknv)). +* Renames Build report jobs. [#65554](https://github.com/ClickHouse/ClickHouse/pull/65554) ([Max K.](https://github.com/maxknv)). +* Parse user from URL for dashboard.html (useful for sharing). [#65556](https://github.com/ClickHouse/ClickHouse/pull/65556) ([Azat Khuzhin](https://github.com/azat)). +* Remove tech debt. [#65561](https://github.com/ClickHouse/ClickHouse/pull/65561) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Maybe fix test `00763_lock_buffer_long.sh`. [#65562](https://github.com/ClickHouse/ClickHouse/pull/65562) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix clickhouse-keeper with not system-wide directories and provide override for local development. [#65563](https://github.com/ClickHouse/ClickHouse/pull/65563) ([Azat Khuzhin](https://github.com/azat)). +* Re-configure yamllint to allow document-start. [#65565](https://github.com/ClickHouse/ClickHouse/pull/65565) ([Azat Khuzhin](https://github.com/azat)). +* Fix flaky test `01254_dict_load_after_detach_attach.sql`. [#65571](https://github.com/ClickHouse/ClickHouse/pull/65571) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve flaky test to provide more diagnostics. [#65586](https://github.com/ClickHouse/ClickHouse/pull/65586) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test_parallel_replicas_distributed_skip_shards flakiness. [#65588](https://github.com/ClickHouse/ClickHouse/pull/65588) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix an error in the test about metadata_type. [#65592](https://github.com/ClickHouse/ClickHouse/pull/65592) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix slow test. [#65593](https://github.com/ClickHouse/ClickHouse/pull/65593) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* fix flaky 02864_statistics_uniq. [#65599](https://github.com/ClickHouse/ClickHouse/pull/65599) ([Han Fei](https://github.com/hanfei1991)). +* Fix 03172_error_log_table_not_empty. [#65604](https://github.com/ClickHouse/ClickHouse/pull/65604) ([Pablo Marcos](https://github.com/pamarcos)). +* Enable realtime digest for Jepsen tests. [#65608](https://github.com/ClickHouse/ClickHouse/pull/65608) ([Antonio Andelic](https://github.com/antonio2368)). +* CI: Return Job Rerun check. [#65613](https://github.com/ClickHouse/ClickHouse/pull/65613) ([Max K.](https://github.com/maxknv)). +* Update CHANGELOG.md. [#65624](https://github.com/ClickHouse/ClickHouse/pull/65624) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Module is required for authenticating in GH (in cloud). [#65628](https://github.com/ClickHouse/ClickHouse/pull/65628) ([Max K.](https://github.com/maxknv)). +* Update IObjectStorage.h. [#65631](https://github.com/ClickHouse/ClickHouse/pull/65631) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix 02834_apache_arrow_abort flakiness with MSAN. [#65640](https://github.com/ClickHouse/ClickHouse/pull/65640) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix overflow in StorageWindowView. [#65641](https://github.com/ClickHouse/ClickHouse/pull/65641) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix inconsistent AST formatting when a keyword is used as type name. [#65648](https://github.com/ClickHouse/ClickHouse/pull/65648) ([Michael Kolupaev](https://github.com/al13n321)). +* CI: Single point of setting mergeable check status. [#65658](https://github.com/ClickHouse/ClickHouse/pull/65658) ([Max K.](https://github.com/maxknv)). +* Miscellaneous and insignificant changes around Client/ClientBase. [#65669](https://github.com/ClickHouse/ClickHouse/pull/65669) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add Replicated database names to ZooKeeper for introspection. [#65675](https://github.com/ClickHouse/ClickHouse/pull/65675) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Introduce type contract checks in `IColumn`. [#65687](https://github.com/ClickHouse/ClickHouse/pull/65687) ([Nikita Taranov](https://github.com/nickitat)). +* Print slightly more information in 02982_aggregation_states_destruction. [#65688](https://github.com/ClickHouse/ClickHouse/pull/65688) ([Michael Kolupaev](https://github.com/al13n321)). +* Disable stacktrace collection in GWPAsan by default. [#65701](https://github.com/ClickHouse/ClickHouse/pull/65701) ([Antonio Andelic](https://github.com/antonio2368)). +* Build jemalloc with profiler. [#65702](https://github.com/ClickHouse/ClickHouse/pull/65702) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix duplicate symbol linkage error. [#65705](https://github.com/ClickHouse/ClickHouse/pull/65705) ([Nikita Taranov](https://github.com/nickitat)). +* Fix server restarts in performance tests. [#65717](https://github.com/ClickHouse/ClickHouse/pull/65717) ([Antonio Andelic](https://github.com/antonio2368)). +* Update 03002_part_log_rmt_fetch_mutate_error.sql. [#65720](https://github.com/ClickHouse/ClickHouse/pull/65720) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix flaky `02265_column_ttl`. Closes [#65719](https://github.com/ClickHouse/ClickHouse/issues/65719). [#65742](https://github.com/ClickHouse/ClickHouse/pull/65742) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* See [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). It doesn't solve the issue, but helps a bit. [#65746](https://github.com/ClickHouse/ClickHouse/pull/65746) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update CHANGELOG.md. [#65752](https://github.com/ClickHouse/ClickHouse/pull/65752) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* document declarative ssh-keys authentication. [#65756](https://github.com/ClickHouse/ClickHouse/pull/65756) ([Tobias Florek](https://github.com/ibotty)). +* `base64En/Decode64Url` --> `base64En/Decode64URL`. [#65760](https://github.com/ClickHouse/ClickHouse/pull/65760) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix for issue [#65757](https://github.com/ClickHouse/ClickHouse/issues/65757). [#65763](https://github.com/ClickHouse/ClickHouse/pull/65763) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix flaky `test_replicated_database::test_alter_attach`. [#65766](https://github.com/ClickHouse/ClickHouse/pull/65766) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix: progress bar for read in order queries. [#65769](https://github.com/ClickHouse/ClickHouse/pull/65769) ([Igor Nikonov](https://github.com/devcrafter)). +* CI: Fix for Builds report job in backports and releases. [#65774](https://github.com/ClickHouse/ClickHouse/pull/65774) ([Max K.](https://github.com/maxknv)). +* CI: New create release workflow. [#65775](https://github.com/ClickHouse/ClickHouse/pull/65775) ([Max K.](https://github.com/maxknv)). +* fixed misspelled word. [#65778](https://github.com/ClickHouse/ClickHouse/pull/65778) ([Linh Giang](https://github.com/linhgiang24)). +* Refactor statistics interface. [#65792](https://github.com/ClickHouse/ClickHouse/pull/65792) ([Robert Schulze](https://github.com/rschu1ze)). +* Try to make `test_ldap_external_user_directory` less flaky. [#65794](https://github.com/ClickHouse/ClickHouse/pull/65794) ([Andrey Zvonov](https://github.com/zvonand)). +* AMI image with gh and jwt. [#65795](https://github.com/ClickHouse/ClickHouse/pull/65795) ([Max K.](https://github.com/maxknv)). +* Forbid join algorithm randomisation for 03094_one_thousand_joins. [#65798](https://github.com/ClickHouse/ClickHouse/pull/65798) ([Nikita Taranov](https://github.com/nickitat)). +* Fix 02931_rewrite_sum_column_and_constant flakiness. [#65800](https://github.com/ClickHouse/ClickHouse/pull/65800) ([Michael Kolupaev](https://github.com/al13n321)). +* Update StorageMaterializedView.cpp. [#65801](https://github.com/ClickHouse/ClickHouse/pull/65801) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix slow `getFQDNOrHostNameImpl` on macOS. [#65803](https://github.com/ClickHouse/ClickHouse/pull/65803) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* No jemalloc profiler for non-Linux. [#65834](https://github.com/ClickHouse/ClickHouse/pull/65834) ([Antonio Andelic](https://github.com/antonio2368)). +* Add missing workload identity changes. [#65848](https://github.com/ClickHouse/ClickHouse/pull/65848) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). +* Fix rocksdb. [#65858](https://github.com/ClickHouse/ClickHouse/pull/65858) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update the list of easy tasks. [#65865](https://github.com/ClickHouse/ClickHouse/pull/65865) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update CHANGELOG.md. [#65866](https://github.com/ClickHouse/ClickHouse/pull/65866) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This closes [#43003](https://github.com/ClickHouse/ClickHouse/issues/43003). [#65870](https://github.com/ClickHouse/ClickHouse/pull/65870) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Uninteresting changes. [#65871](https://github.com/ClickHouse/ClickHouse/pull/65871) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Max sessions for user tests improvements. [#65888](https://github.com/ClickHouse/ClickHouse/pull/65888) ([Alexey Gerasimchuck](https://github.com/Demilivor)). +* Update version_date.tsv and changelogs after v24.6.1.4423-stable. [#65909](https://github.com/ClickHouse/ClickHouse/pull/65909) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Remove standalone Keeper build. [#65910](https://github.com/ClickHouse/ClickHouse/pull/65910) ([Antonio Andelic](https://github.com/antonio2368)). +* Add extra profiling helpers for Keeper. [#65918](https://github.com/ClickHouse/ClickHouse/pull/65918) ([Antonio Andelic](https://github.com/antonio2368)). +* PostgreSQL source cancel query comments. [#65919](https://github.com/ClickHouse/ClickHouse/pull/65919) ([Maksim Kita](https://github.com/kitaisreal)). +* Remove mysqlxx::Pool::Entry assignment operator. [#65920](https://github.com/ClickHouse/ClickHouse/pull/65920) ([Azat Khuzhin](https://github.com/azat)). +* No random settings for a test with `Object(JSON)`. [#65921](https://github.com/ClickHouse/ClickHouse/pull/65921) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Follow up to [#65046](https://github.com/ClickHouse/ClickHouse/issues/65046). [#65928](https://github.com/ClickHouse/ClickHouse/pull/65928) ([Kseniia Sumarokova](https://github.com/kssenii)). +* add restriction for storage join. [#65936](https://github.com/ClickHouse/ClickHouse/pull/65936) ([Han Fei](https://github.com/hanfei1991)). +* Update version_date.tsv and changelogs after v24.5.4.49-stable. [#65937](https://github.com/ClickHouse/ClickHouse/pull/65937) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Add table name to MergeTreeSource spans. [#65940](https://github.com/ClickHouse/ClickHouse/pull/65940) ([Nikita Taranov](https://github.com/nickitat)). +* Fix SettingsChangesHistory 24.7. [#65945](https://github.com/ClickHouse/ClickHouse/pull/65945) ([Raúl Marín](https://github.com/Algunenano)). +* Fix logical error "Expected ReadBufferFromFile, but got DB::EmptyReadBuffer". [#65949](https://github.com/ClickHouse/ClickHouse/pull/65949) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Use -Og instead of -O0 for debug builds. [#65953](https://github.com/ClickHouse/ClickHouse/pull/65953) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix data race for Keeper snapshot queue. [#65970](https://github.com/ClickHouse/ClickHouse/pull/65970) ([Antonio Andelic](https://github.com/antonio2368)). +* Minor changes in CHANGELOG. [#65971](https://github.com/ClickHouse/ClickHouse/pull/65971) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove unnatural punctuation from Parquet. [#65972](https://github.com/ClickHouse/ClickHouse/pull/65972) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Try fix "Check timeout expired" without any server logs in report in stateless tests. [#65977](https://github.com/ClickHouse/ClickHouse/pull/65977) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix support of non-const scale arguments in rounding functions. [#65983](https://github.com/ClickHouse/ClickHouse/pull/65983) ([Mikhail Gorshkov](https://github.com/mgorshkov)). +* More aesthetic error messages. [#65985](https://github.com/ClickHouse/ClickHouse/pull/65985) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix race in s3queue. [#65986](https://github.com/ClickHouse/ClickHouse/pull/65986) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Now it's possible to specify `s3-storage`, `azure-object-storage` and in general `object-storage`. [#65988](https://github.com/ClickHouse/ClickHouse/pull/65988) ([alesapin](https://github.com/alesapin)). +* Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Relax the check in 02982_aggregation_states_destruction. [#66011](https://github.com/ClickHouse/ClickHouse/pull/66011) ([Nikita Taranov](https://github.com/nickitat)). +* Fix `01158_zookeeper_log_long`. [#66012](https://github.com/ClickHouse/ClickHouse/pull/66012) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Remove scary jemalloc log. [#66028](https://github.com/ClickHouse/ClickHouse/pull/66028) ([Antonio Andelic](https://github.com/antonio2368)). +* Move experimental settings to the experimental block. [#66030](https://github.com/ClickHouse/ClickHouse/pull/66030) ([Raúl Marín](https://github.com/Algunenano)). +* Fix lock-order-inversion in DatabaseCatalog. [#66038](https://github.com/ClickHouse/ClickHouse/pull/66038) ([Nikolay Degterinsky](https://github.com/evillique)). +* Try disabling jemalloc background threads. [#66041](https://github.com/ClickHouse/ClickHouse/pull/66041) ([Antonio Andelic](https://github.com/antonio2368)). +* Try to avoid conflicts in `SettingsChangesHistory.cpp`. [#66042](https://github.com/ClickHouse/ClickHouse/pull/66042) ([Anton Popov](https://github.com/CurtizJ)). +* Add profile events for regex cache. [#66050](https://github.com/ClickHouse/ClickHouse/pull/66050) ([Antonio Andelic](https://github.com/antonio2368)). +* Bump vectorscan to 5.4.10.1. [#66056](https://github.com/ClickHouse/ClickHouse/pull/66056) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove obsolete comment. [#66059](https://github.com/ClickHouse/ClickHouse/pull/66059) ([Robert Schulze](https://github.com/rschu1ze)). +* Maybe fix tsan assert in `test_mysql_killed_while_insert_8_0`. [#66064](https://github.com/ClickHouse/ClickHouse/pull/66064) ([Robert Schulze](https://github.com/rschu1ze)). +* Move some of `HTTPHandler` stuff to separate source files in order to reuse it in `PrometheusRequestHandler`. This PR is required for https://github.com/ClickHouse/ClickHouse/pull/64183. [#66067](https://github.com/ClickHouse/ClickHouse/pull/66067) ([Vitaly Baranov](https://github.com/vitlibar)). +* Bump rocksdb to v6.23.3. [#66068](https://github.com/ClickHouse/ClickHouse/pull/66068) ([Robert Schulze](https://github.com/rschu1ze)). +* Add protobufs for `Prometheus` `remote-write` / `remote-read` protocols to our repository. Fix cmake script for compiling protobufs. [#66069](https://github.com/ClickHouse/ClickHouse/pull/66069) ([Vitaly Baranov](https://github.com/vitlibar)). +* Use pinned versions of all python packages in CI docker images. Also makes clang-18.1.8 work with sanitizers and surprisingly fixes [#66049](https://github.com/ClickHouse/ClickHouse/issues/66049). [#66070](https://github.com/ClickHouse/ClickHouse/pull/66070) ([alesapin](https://github.com/alesapin)). +* Clean-up custom LLVM 15 patches. [#66072](https://github.com/ClickHouse/ClickHouse/pull/66072) ([Robert Schulze](https://github.com/rschu1ze)). +* Minor JWT client fixes. [#66073](https://github.com/ClickHouse/ClickHouse/pull/66073) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Bump vectorscan to 5.4.11. [#66082](https://github.com/ClickHouse/ClickHouse/pull/66082) ([Robert Schulze](https://github.com/rschu1ze)). +* Print stacktrace in case of abort after logical error. [#66091](https://github.com/ClickHouse/ClickHouse/pull/66091) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* fix flaky 03172_error_log_table_not_empty. [#66093](https://github.com/ClickHouse/ClickHouse/pull/66093) ([Sema Checherinda](https://github.com/CheSema)). +* Bump s2geometry to latest master. [#66094](https://github.com/ClickHouse/ClickHouse/pull/66094) ([Robert Schulze](https://github.com/rschu1ze)). +* update keeper bench example config file. [#66095](https://github.com/ClickHouse/ClickHouse/pull/66095) ([Han Fei](https://github.com/hanfei1991)). +* Avoid using source directory for generated files. [#66097](https://github.com/ClickHouse/ClickHouse/pull/66097) ([Azat Khuzhin](https://github.com/azat)). +* More precise warning message about sanitizers. [#66098](https://github.com/ClickHouse/ClickHouse/pull/66098) ([Anton Popov](https://github.com/CurtizJ)). +* Slightly better calculation of primary index. [#66099](https://github.com/ClickHouse/ClickHouse/pull/66099) ([Anton Popov](https://github.com/CurtizJ)). +* Bump Azure to 1.12. [#66100](https://github.com/ClickHouse/ClickHouse/pull/66100) ([Robert Schulze](https://github.com/rschu1ze)). +* Add a test for [#58998](https://github.com/ClickHouse/ClickHouse/issues/58998). [#66101](https://github.com/ClickHouse/ClickHouse/pull/66101) ([Anton Popov](https://github.com/CurtizJ)). +* CI: Fix sync pr merge. [#66105](https://github.com/ClickHouse/ClickHouse/pull/66105) ([Max K.](https://github.com/maxknv)). +* Remove flaky case from 02956_rocksdb_bulk_sink. [#66107](https://github.com/ClickHouse/ClickHouse/pull/66107) ([vdimir](https://github.com/vdimir)). +* Fix bugfix checker. [#66120](https://github.com/ClickHouse/ClickHouse/pull/66120) ([Raúl Marín](https://github.com/Algunenano)). +* Correctly print long processing requests in Keeper. [#66124](https://github.com/ClickHouse/ClickHouse/pull/66124) ([Antonio Andelic](https://github.com/antonio2368)). +* Update version_date.tsv and changelogs after v24.6.2.17-stable. [#66127](https://github.com/ClickHouse/ClickHouse/pull/66127) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Bump s2geometry again. [#66136](https://github.com/ClickHouse/ClickHouse/pull/66136) ([Robert Schulze](https://github.com/rschu1ze)). +* Switch submodule `contrib/orc` to a proper commit in the [main](https://github.com/ClickHouse/orc/tree/main) branch. Previously a commit from a removed branch was used ([see](https://github.com/ClickHouse/orc/pull/13)). [#66137](https://github.com/ClickHouse/ClickHouse/pull/66137) ([Vitaly Baranov](https://github.com/vitlibar)). +* Finalize MergedBlockOutputStream in dtor. [#66138](https://github.com/ClickHouse/ClickHouse/pull/66138) ([Nikita Taranov](https://github.com/nickitat)). +* Proper destruction order of AsyncLoader::Pool fields. [#66145](https://github.com/ClickHouse/ClickHouse/pull/66145) ([Sergei Trifonov](https://github.com/serxa)). +* Playing minesweeper with build system. [#66147](https://github.com/ClickHouse/ClickHouse/pull/66147) ([Nikita Taranov](https://github.com/nickitat)). +* Fix clang-tidy error in BufferWithOwnMemory.h. [#66161](https://github.com/ClickHouse/ClickHouse/pull/66161) ([Nikita Taranov](https://github.com/nickitat)). +* Use peak_threads_usage instead of arrayUniq(thread_ids) in tests. [#66162](https://github.com/ClickHouse/ClickHouse/pull/66162) ([Azat Khuzhin](https://github.com/azat)). +* Fix crash when adding empty tuple to query cache. [#66168](https://github.com/ClickHouse/ClickHouse/pull/66168) ([Michael Kolupaev](https://github.com/al13n321)). +* tests: fix 01563_distributed_query_finish flakiness (due to system.*_log_sender). [#66171](https://github.com/ClickHouse/ClickHouse/pull/66171) ([Azat Khuzhin](https://github.com/azat)). +* Refactor `OptimizeIfWithConstantConditionVisitor` using `InDepthNodeVisitor`. [#66184](https://github.com/ClickHouse/ClickHouse/pull/66184) ([zhongyuankai](https://github.com/zhongyuankai)). +* Update README.md. [#66186](https://github.com/ClickHouse/ClickHouse/pull/66186) ([Tyler Hannan](https://github.com/tylerhannan)). +* Fix 01246_buffer_flush flakiness. [#66188](https://github.com/ClickHouse/ClickHouse/pull/66188) ([Azat Khuzhin](https://github.com/azat)). +* Avoid using harmful function `rand()` in grpc. [#66191](https://github.com/ClickHouse/ClickHouse/pull/66191) ([Vitaly Baranov](https://github.com/vitlibar)). +* Bump RocksDB. [#66216](https://github.com/ClickHouse/ClickHouse/pull/66216) ([Robert Schulze](https://github.com/rschu1ze)). +* Update README.md. [#66217](https://github.com/ClickHouse/ClickHouse/pull/66217) ([Tyler Hannan](https://github.com/tylerhannan)). +* Fixes peak_threads_usage metric when materialised views are involved. [#66230](https://github.com/ClickHouse/ClickHouse/pull/66230) ([Sema Checherinda](https://github.com/CheSema)). +* Remove test as requested in https://github.com/ClickHouse/ClickHouse/pull/65277#issuecomment-2211361465. [#66233](https://github.com/ClickHouse/ClickHouse/pull/66233) ([Arthur Passos](https://github.com/arthurpassos)). +* Fix test `00504_mergetree_arrays_rw.sql`. [#66248](https://github.com/ClickHouse/ClickHouse/pull/66248) ([Anton Popov](https://github.com/CurtizJ)). +* CI: Do not finalize CI running status unless all success. [#66276](https://github.com/ClickHouse/ClickHouse/pull/66276) ([Max K.](https://github.com/maxknv)). +* Collect core dumps in more tests. [#66281](https://github.com/ClickHouse/ClickHouse/pull/66281) ([Antonio Andelic](https://github.com/antonio2368)). +* Add a stateless test for gRPC protocol. [#66284](https://github.com/ClickHouse/ClickHouse/pull/66284) ([Vitaly Baranov](https://github.com/vitlibar)). +* Log message: Failed to connect to replica ... [#66289](https://github.com/ClickHouse/ClickHouse/pull/66289) ([Igor Nikonov](https://github.com/devcrafter)). +* Update run.sh. [#66290](https://github.com/ClickHouse/ClickHouse/pull/66290) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Some changes in the codebase as a preparation for LLVM 18. [#66293](https://github.com/ClickHouse/ClickHouse/pull/66293) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* There's some problem with iptables in parallel tests. [#66304](https://github.com/ClickHouse/ClickHouse/pull/66304) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)). +* Build failure if ENABLE_AWS_S3 is OFF fixed. [#66335](https://github.com/ClickHouse/ClickHouse/pull/66335) ([Ilya Golshtein](https://github.com/ilejn)). +* Enable checks in assert_cast under sanitizers. [#66336](https://github.com/ClickHouse/ClickHouse/pull/66336) ([Nikita Taranov](https://github.com/nickitat)). +* Create release workflow. [#66339](https://github.com/ClickHouse/ClickHouse/pull/66339) ([Max K.](https://github.com/maxknv)). +* Fix invalid XML. [#66342](https://github.com/ClickHouse/ClickHouse/pull/66342) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix possible deadlock for jemalloc with enabled profiler. [#66346](https://github.com/ClickHouse/ClickHouse/pull/66346) ([Azat Khuzhin](https://github.com/azat)). +* Fix test_parallel_replicas_custom_key. [#66349](https://github.com/ClickHouse/ClickHouse/pull/66349) ([Antonio Andelic](https://github.com/antonio2368)). +* Collect logs from `minio` in stateless and statefull tests. [#66353](https://github.com/ClickHouse/ClickHouse/pull/66353) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix "Sending a batch of X files to Y (0.00 rows, 0.00 B bytes)." in case of batch restoring. [#66375](https://github.com/ClickHouse/ClickHouse/pull/66375) ([Azat Khuzhin](https://github.com/azat)). +* Fix 03030_system_flush_distributed_settings flakiness. [#66376](https://github.com/ClickHouse/ClickHouse/pull/66376) ([Azat Khuzhin](https://github.com/azat)). +* PR cleanup: remove redundant code. [#66380](https://github.com/ClickHouse/ClickHouse/pull/66380) ([Igor Nikonov](https://github.com/devcrafter)). +* New slack bot to post messages about CI events - Post message if OOM. [#66392](https://github.com/ClickHouse/ClickHouse/pull/66392) ([Max K.](https://github.com/maxknv)). +* Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Update test_storage_rabbitmq/test.py. [#66396](https://github.com/ClickHouse/ClickHouse/pull/66396) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add query elapsed time for non-default format in play UI. [#66398](https://github.com/ClickHouse/ClickHouse/pull/66398) ([Azat Khuzhin](https://github.com/azat)). +* Untangle setting headers. [#66404](https://github.com/ClickHouse/ClickHouse/pull/66404) ([Raúl Marín](https://github.com/Algunenano)). +* Remove noisy message. [#66406](https://github.com/ClickHouse/ClickHouse/pull/66406) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* If job detected as in-progress in PR workflow run - just remove it from todo list, as it not affected by the change. [#66407](https://github.com/ClickHouse/ClickHouse/pull/66407) ([Max K.](https://github.com/maxknv)). +* CI: CIBuddy to post from master only. [#66417](https://github.com/ClickHouse/ClickHouse/pull/66417) ([Max K.](https://github.com/maxknv)). +* Add a test for [#66333](https://github.com/ClickHouse/ClickHouse/issues/66333). [#66432](https://github.com/ClickHouse/ClickHouse/pull/66432) ([max-vostrikov](https://github.com/max-vostrikov)). +* Limit number of linker jobs on arm to avoid OOM during build. [#66435](https://github.com/ClickHouse/ClickHouse/pull/66435) ([Nikita Taranov](https://github.com/nickitat)). +* [RFC] Fix jemalloc assertion due to non-monotonic CLOCK_MONOTONIC_COARSE. [#66439](https://github.com/ClickHouse/ClickHouse/pull/66439) ([Azat Khuzhin](https://github.com/azat)). +* CI: Do not block CI on few number of test failures. [#66440](https://github.com/ClickHouse/ClickHouse/pull/66440) ([Max K.](https://github.com/maxknv)). +* Stateless tests: fix flaky tests 01037_polygon_dicts*. [#66445](https://github.com/ClickHouse/ClickHouse/pull/66445) ([Nikita Fomichev](https://github.com/fm4v)). +* Related to https://github.com/ClickHouse/ClickHouse/pull/62067 https://s3.amazonaws.com/clickhouse-test-reports/66410/5557dce188cabc7477bb4e874d47e3b80278ee66/stateless_tests__release_.html ``` 2024-07-12 16:04:29 +Queries for alter_table did not finish automatically after 250+ seconds 2024-07-12 16:04:29 +==================== QUERIES ==================== 2024-07-12 16:04:29 +Row 1: 2024-07-12 16:04:29 +────── 2024-07-12 16:04:29 +is_initial_query: 1 2024-07-12 16:04:29 +user: default 2024-07-12 16:04:29 +query_id: b43ffd7d-aee6-4161-aa82-bf9fff9d78c0 2024-07-12 16:04:29 +address: ::1 2024-07-12 16:04:29 +port: 58360 2024-07-12 16:04:29 +initial_user: default 2024-07-12 16:04:29 +initial_query_id: b43ffd7d-aee6-4161-aa82-bf9fff9d78c0 ... 2024-07-12 16:04:29 +query: OPTIMIZE TABLE alter_table0 FINAL ```. [#66460](https://github.com/ClickHouse/ClickHouse/pull/66460) ([Alexander Tokmakov](https://github.com/tavplubix)). +* OOM error was not visible since process is killed and status is not set Change sets ERROR status if job was killed. [#66463](https://github.com/ClickHouse/ClickHouse/pull/66463) ([Max K.](https://github.com/maxknv)). +* Add AST fuzzers jobs for CI caching so that they can be skipped in PRs not related to build or tests. [#66468](https://github.com/ClickHouse/ClickHouse/pull/66468) ([Max K.](https://github.com/maxknv)). +* If job with the same digest has been seen in master's CI it should be skipped in PR run. [#66471](https://github.com/ClickHouse/ClickHouse/pull/66471) ([Max K.](https://github.com/maxknv)). +* CI: Check job's exit status and report if killed. [#66477](https://github.com/ClickHouse/ClickHouse/pull/66477) ([Max K.](https://github.com/maxknv)). +* This closes [#37557](https://github.com/ClickHouse/ClickHouse/issues/37557). [#66482](https://github.com/ClickHouse/ClickHouse/pull/66482) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* CI: Add retry for GH set_status_comment call. [#66488](https://github.com/ClickHouse/ClickHouse/pull/66488) ([Max K.](https://github.com/maxknv)). +* OpenSSL: Minor follow-up to [#66064](https://github.com/ClickHouse/ClickHouse/issues/66064). [#66489](https://github.com/ClickHouse/ClickHouse/pull/66489) ([Robert Schulze](https://github.com/rschu1ze)). +* CI: Fix for job filtering in PRs. [#66490](https://github.com/ClickHouse/ClickHouse/pull/66490) ([Max K.](https://github.com/maxknv)). +* CI: Create release workflow updates. [#66498](https://github.com/ClickHouse/ClickHouse/pull/66498) ([Max K.](https://github.com/maxknv)). +* Add one more revision to ignore. [#66499](https://github.com/ClickHouse/ClickHouse/pull/66499) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Unit tests vomit a ton of garbage, see https://s3.amazonaws.com/clickhouse-test-reports/66457/0c82dc91f07b29ba503d7579c7d3ebecba532b73/unit_tests__tsan_/run.log - remove it. [#66501](https://github.com/ClickHouse/ClickHouse/pull/66501) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix double whitespace in EXPLAIN AST CREATE. [#66505](https://github.com/ClickHouse/ClickHouse/pull/66505) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad test `02530_dictionaries_update_field`. [#66507](https://github.com/ClickHouse/ClickHouse/pull/66507) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Https://play.clickhouse.com/play?user=play#u0vmrunuignozwnrx3n0yxj0x3rpbwusignozwnrx25hbwusihrlc3rfbmftzswgcmvwb3j0x3vybapguk9nignozwnrcwpxsevsrsbjagvja19zdgfydf90aw1lid49ig5vdygpic0gsu5urvjwquwgmjqwiehpvvikicagieforcbwdwxsx3jlcxvlc3rfbnvtymvyid0gmaogicagqu5eihrlc3rfc3rhdhvzice9icdts0lquevejwogicagqu5eihrlc3rfc3rhdhvziexjs0ugj0yljwogicagqu5eignozwnrx3n0yxr1cyahpsanc3vjy2vzcyckicagieforcbwb3npdglvbih0zxn0x25hbwusicdhcgfjagvfyxjyb3cnksa+idakt1jervigqlkgy2hly2tfc3rhcnrfdgltzq==. [#66508](https://github.com/ClickHouse/ClickHouse/pull/66508) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix MSan report in GRPC. [#66509](https://github.com/ClickHouse/ClickHouse/pull/66509) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* CI: Fix for skipping Builds_2 in PRs' CI. [#66512](https://github.com/ClickHouse/ClickHouse/pull/66512) ([Max K.](https://github.com/maxknv)). +* CI: Do not block Tests_3 unless MAX_FAILED_TESTS exceeded. [#66513](https://github.com/ClickHouse/ClickHouse/pull/66513) ([Max K.](https://github.com/maxknv)). +* Fix `02918_parallel_replicas_custom_key_unavailable_replica`. [#66516](https://github.com/ClickHouse/ClickHouse/pull/66516) ([Antonio Andelic](https://github.com/antonio2368)). +* Stateless tests: improvements related to OOM of test runs. [#66520](https://github.com/ClickHouse/ClickHouse/pull/66520) ([Nikita Fomichev](https://github.com/fm4v)). +* Tests: rename bad log names. [#66522](https://github.com/ClickHouse/ClickHouse/pull/66522) ([Nikita Fomichev](https://github.com/fm4v)). +* Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)). +* CI: Multiple fixes for handling killed jobs. [#66524](https://github.com/ClickHouse/ClickHouse/pull/66524) ([Max K.](https://github.com/maxknv)). +* Allow GWP Asan allocations only when initialization is finished. [#66526](https://github.com/ClickHouse/ClickHouse/pull/66526) ([Alexey Katsman](https://github.com/alexkats)). +* Update 02443_detach_attach_partition.sh. [#66529](https://github.com/ClickHouse/ClickHouse/pull/66529) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Follow up [#66435](https://github.com/ClickHouse/ClickHouse/issues/66435). [#66530](https://github.com/ClickHouse/ClickHouse/pull/66530) ([Nikita Taranov](https://github.com/nickitat)). +* fix log in keeper tcp handler. [#66531](https://github.com/ClickHouse/ClickHouse/pull/66531) ([Han Fei](https://github.com/hanfei1991)). +* CI: Report job start and finish to CI DB. [#66533](https://github.com/ClickHouse/ClickHouse/pull/66533) ([Max K.](https://github.com/maxknv)). +* Update 01396_inactive_replica_cleanup_nodes_zookeeper.sh. [#66535](https://github.com/ClickHouse/ClickHouse/pull/66535) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add dedicated runner to libfuzzer, update docker. [#66551](https://github.com/ClickHouse/ClickHouse/pull/66551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* fix tidy build. [#66552](https://github.com/ClickHouse/ClickHouse/pull/66552) ([Sema Checherinda](https://github.com/CheSema)). +* No-op env change. [#66553](https://github.com/ClickHouse/ClickHouse/pull/66553) ([Raúl Marín](https://github.com/Algunenano)). +* Fix typo in new_delete.cpp. [#66554](https://github.com/ClickHouse/ClickHouse/pull/66554) ([alesapin](https://github.com/alesapin)). +* Fix something in Fast Test. [#66558](https://github.com/ClickHouse/ClickHouse/pull/66558) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* update trusted contributors. [#66561](https://github.com/ClickHouse/ClickHouse/pull/66561) ([Xu Jia](https://github.com/XuJia0210)). +* Delete bad test `02805_distributed_queries_timeouts`. [#66563](https://github.com/ClickHouse/ClickHouse/pull/66563) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* More clarity in the test `03001_consider_lwd_when_merge`. [#66564](https://github.com/ClickHouse/ClickHouse/pull/66564) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Stateless tests: temporary disable sequential tests in parallel. [#66585](https://github.com/ClickHouse/ClickHouse/pull/66585) ([Nikita Fomichev](https://github.com/fm4v)). +* Move view targets to separate AST class `ASTViewTargets` in order to allow extending it to support more kinds of view targets. [#66590](https://github.com/ClickHouse/ClickHouse/pull/66590) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix bsdtar for @nikitamikhaylov. [#66592](https://github.com/ClickHouse/ClickHouse/pull/66592) ([alesapin](https://github.com/alesapin)). +* CI: POC for Auto Releases. [#66593](https://github.com/ClickHouse/ClickHouse/pull/66593) ([Max K.](https://github.com/maxknv)). +* Fix clang tidy after [#66402](https://github.com/ClickHouse/ClickHouse/issues/66402). [#66597](https://github.com/ClickHouse/ClickHouse/pull/66597) ([vdimir](https://github.com/vdimir)). +* Adjust the runtime of some slow performance test. [#66619](https://github.com/ClickHouse/ClickHouse/pull/66619) ([Robert Schulze](https://github.com/rschu1ze)). +* CI: Scale down AutoScaling Groups from runners. [#66622](https://github.com/ClickHouse/ClickHouse/pull/66622) ([Max K.](https://github.com/maxknv)). +* Allow to run clang-tidy with clang-19. [#66625](https://github.com/ClickHouse/ClickHouse/pull/66625) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix terrible test @arthurpassos. [#66632](https://github.com/ClickHouse/ClickHouse/pull/66632) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad log message. [#66633](https://github.com/ClickHouse/ClickHouse/pull/66633) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Negative sign in prewhere optimization. [#66637](https://github.com/ClickHouse/ClickHouse/pull/66637) ([cangyin](https://github.com/cangyin)). +* Closes [#66639](https://github.com/ClickHouse/ClickHouse/issues/66639#event-13533944949). [#66640](https://github.com/ClickHouse/ClickHouse/pull/66640) ([Kruglov Pavel](https://github.com/Avogar)). +* Avoid generating named tuple for special keywords (null, true, false). [#66641](https://github.com/ClickHouse/ClickHouse/pull/66641) ([Amos Bird](https://github.com/amosbird)). +* rearrange heavy tests 03008_deduplication. [#66642](https://github.com/ClickHouse/ClickHouse/pull/66642) ([Sema Checherinda](https://github.com/CheSema)). +* Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)). +* CI: Remove aws lambda packages from oss. [#66651](https://github.com/ClickHouse/ClickHouse/pull/66651) ([Max K.](https://github.com/maxknv)). +* Introduce separate DEBUG_OR_SANITIZER_BUILD macro. [#66652](https://github.com/ClickHouse/ClickHouse/pull/66652) ([Nikita Taranov](https://github.com/nickitat)). +* Increase backoff because with slow builds sometimes 100ms is not enough to recover. [#66653](https://github.com/ClickHouse/ClickHouse/pull/66653) ([alesapin](https://github.com/alesapin)). +* Fix wrong queries hung error because of 02044_url_glob_parallel_connection_refused. [#66657](https://github.com/ClickHouse/ClickHouse/pull/66657) ([Nikita Taranov](https://github.com/nickitat)). +* add log for splitBlockIntoParts. [#66658](https://github.com/ClickHouse/ClickHouse/pull/66658) ([Han Fei](https://github.com/hanfei1991)). +* Minor: Make `CaseSensitiveness` an enum class. [#66673](https://github.com/ClickHouse/ClickHouse/pull/66673) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix bad test `02210_processors_profile_log`. [#66684](https://github.com/ClickHouse/ClickHouse/pull/66684) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix something around clang-tidy. [#66694](https://github.com/ClickHouse/ClickHouse/pull/66694) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* ci: dump dmesg in case of OOM. [#66705](https://github.com/ClickHouse/ClickHouse/pull/66705) ([Azat Khuzhin](https://github.com/azat)). +* fix clang tidy. [#66706](https://github.com/ClickHouse/ClickHouse/pull/66706) ([Han Fei](https://github.com/hanfei1991)). +* Https://s3.amazonaws.com/clickhouse-test-reports/61109/5cf2b53f146c1a4f24d8212f9f810d587c46bfc0/stateless_tests__release_.html. [#66724](https://github.com/ClickHouse/ClickHouse/pull/66724) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)). +* CI: Fix issue with a skipped Build report. [#66726](https://github.com/ClickHouse/ClickHouse/pull/66726) ([Max K.](https://github.com/maxknv)). +* relax condition in test, remove unused counters. [#66730](https://github.com/ClickHouse/ClickHouse/pull/66730) ([Sema Checherinda](https://github.com/CheSema)). +* Remove bad test `host_resolver_fail_count`. [#66731](https://github.com/ClickHouse/ClickHouse/pull/66731) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad test `03036_join_filter_push_down_equivalent_sets`. [#66736](https://github.com/ClickHouse/ClickHouse/pull/66736) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad tests `long_select_and_alter`. [#66737](https://github.com/ClickHouse/ClickHouse/pull/66737) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add test test_storage_mysql/test.py::test_joins. [#66743](https://github.com/ClickHouse/ClickHouse/pull/66743) ([vdimir](https://github.com/vdimir)). +* Disallow build exclusion only by CI settings (ci_include_, ci_exclude_) to avoid running builds in auto sync prs. [#66744](https://github.com/ClickHouse/ClickHouse/pull/66744) ([Max K.](https://github.com/maxknv)). +* Use non-existent address to check connection error at table creation. [#66760](https://github.com/ClickHouse/ClickHouse/pull/66760) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#67063](https://github.com/ClickHouse/ClickHouse/issues/67063): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)). +* Fix flakiness of async insert tests due to adaptive timeout. [#66771](https://github.com/ClickHouse/ClickHouse/pull/66771) ([Raúl Marín](https://github.com/Algunenano)). +* Attempt to fix flakiness of 01194_http_query_id. [#66774](https://github.com/ClickHouse/ClickHouse/pull/66774) ([Raúl Marín](https://github.com/Algunenano)). +* Turn off randomization of harmful setting. [#66776](https://github.com/ClickHouse/ClickHouse/pull/66776) ([alesapin](https://github.com/alesapin)). +* The number of batches was reduced in https://github.com/ClickHouse/ClickHouse/pull/65186, but then the parallel execution was disabled in https://github.com/ClickHouse/ClickHouse/pull/66585. So now tasks fail with timeout sometimes: https://s3.amazonaws.com/clickhouse-test-reports/66724/36275fdacc34206931f69087fe77539e25bbbedd/stateless_tests__tsan__s3_storage__[2_3].html. [#66783](https://github.com/ClickHouse/ClickHouse/pull/66783) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Remove obsolete code from CMakeLists. [#66786](https://github.com/ClickHouse/ClickHouse/pull/66786) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Stateless tests: split parallel tests more evenly. [#66787](https://github.com/ClickHouse/ClickHouse/pull/66787) ([Nikita Fomichev](https://github.com/fm4v)). +* Fix test `02724_limit_num_mutations.sh`. [#66788](https://github.com/ClickHouse/ClickHouse/pull/66788) ([Anton Popov](https://github.com/CurtizJ)). +* Better diagnostics in `test_disk_configuration`. [#66802](https://github.com/ClickHouse/ClickHouse/pull/66802) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad test `02950_part_log_bytes_uncompressed`. [#66803](https://github.com/ClickHouse/ClickHouse/pull/66803) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Better diagnostics for test trace_events_stress. [#66804](https://github.com/ClickHouse/ClickHouse/pull/66804) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Make test `00997_set_index_array` lighter. [#66817](https://github.com/ClickHouse/ClickHouse/pull/66817) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Miscellaneous. [#66818](https://github.com/ClickHouse/ClickHouse/pull/66818) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix inconsistent formatting of lambda functions inside composite types. [#66819](https://github.com/ClickHouse/ClickHouse/pull/66819) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)). +* Never await in CI on BuildReport - just redo (await can be longer) - Remove BuildReport if no build jobs in workflow (for instance: Docs change) - Do not fail CheckReadyForMerge job if the only non-green status is Cloud Sync. [#66822](https://github.com/ClickHouse/ClickHouse/pull/66822) ([Max K.](https://github.com/maxknv)). +* Remove bad tests @azat. [#66823](https://github.com/ClickHouse/ClickHouse/pull/66823) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* CI: New Release workflow updates and fixes. [#66830](https://github.com/ClickHouse/ClickHouse/pull/66830) ([Max K.](https://github.com/maxknv)). +* Fix signed integer overflow in function `age`. [#66831](https://github.com/ClickHouse/ClickHouse/pull/66831) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix RocksDB bs. [#66838](https://github.com/ClickHouse/ClickHouse/pull/66838) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Split a test for index. [#66839](https://github.com/ClickHouse/ClickHouse/pull/66839) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix inconsistent formatting of `NOT ((SELECT ...))`. [#66840](https://github.com/ClickHouse/ClickHouse/pull/66840) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Make test `01592_long_window_functions1` lighter. [#66841](https://github.com/ClickHouse/ClickHouse/pull/66841) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* What if I will change the test for SSL authentication?. [#66844](https://github.com/ClickHouse/ClickHouse/pull/66844) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Functions `[a-g]*`: Iterate over `input_rows_count` where appropriate. [#66846](https://github.com/ClickHouse/ClickHouse/pull/66846) ([Robert Schulze](https://github.com/rschu1ze)). +* Revert libunwind patch. [#66850](https://github.com/ClickHouse/ClickHouse/pull/66850) ([Antonio Andelic](https://github.com/antonio2368)). +* Split test 03038_nested_dynamic_merges to avoid timeouts. [#66863](https://github.com/ClickHouse/ClickHouse/pull/66863) ([Kruglov Pavel](https://github.com/Avogar)). +* CI: Print instance info in runner's init script. [#66868](https://github.com/ClickHouse/ClickHouse/pull/66868) ([Max K.](https://github.com/maxknv)). +* Backported in [#67257](https://github.com/ClickHouse/ClickHouse/issues/67257): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)). +* CI: CI Buddy to notify about fatal workflow failures. [#66890](https://github.com/ClickHouse/ClickHouse/pull/66890) ([Max K.](https://github.com/maxknv)). +* CI: Add ec2 instance lifecycle metadata to CIDB. [#66918](https://github.com/ClickHouse/ClickHouse/pull/66918) ([Max K.](https://github.com/maxknv)). +* CI: Remove ci runners scripts from oss. [#66920](https://github.com/ClickHouse/ClickHouse/pull/66920) ([Max K.](https://github.com/maxknv)). +* Backported in [#67209](https://github.com/ClickHouse/ClickHouse/issues/67209): Decrease rate limit in `01923_network_receive_time_metric_insert`. [#66924](https://github.com/ClickHouse/ClickHouse/pull/66924) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#67227](https://github.com/ClickHouse/ClickHouse/issues/67227): Grouparrayintersect: fix serialization bug. [#66928](https://github.com/ClickHouse/ClickHouse/pull/66928) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#67207](https://github.com/ClickHouse/ClickHouse/issues/67207): Un-flake test_runtime_configurable_cache_size. [#66934](https://github.com/ClickHouse/ClickHouse/pull/66934) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#66975](https://github.com/ClickHouse/ClickHouse/issues/66975): CI: Fixes docker server build for release branches. [#66955](https://github.com/ClickHouse/ClickHouse/pull/66955) ([Max K.](https://github.com/maxknv)). +* Backported in [#67213](https://github.com/ClickHouse/ClickHouse/issues/67213): [CI Fest] Split dynamic tests and rewrite them from sh to sql to avoid timeouts. [#66981](https://github.com/ClickHouse/ClickHouse/pull/66981) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67033](https://github.com/ClickHouse/ClickHouse/issues/67033): [CI Fest] Fix use-of-uninitialized-value in JSONExtract* numeric functions. [#66984](https://github.com/ClickHouse/ClickHouse/pull/66984) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67051](https://github.com/ClickHouse/ClickHouse/issues/67051): CI: Fix for workflow results parsing. [#67000](https://github.com/ClickHouse/ClickHouse/pull/67000) ([Max K.](https://github.com/maxknv)). +* Backported in [#67116](https://github.com/ClickHouse/ClickHouse/issues/67116): Disable setting `optimize_functions_to_subcolumns`. [#67046](https://github.com/ClickHouse/ClickHouse/pull/67046) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#67205](https://github.com/ClickHouse/ClickHouse/issues/67205): Increase max allocation size for sanitizers. [#67049](https://github.com/ClickHouse/ClickHouse/pull/67049) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#67124](https://github.com/ClickHouse/ClickHouse/issues/67124): Very sad failure: ``` 2024.07.24 13:28:45.517777 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} executeQuery: (from 172.16.11.1:55890) OPTIMIZE TABLE replicated_mt FINAL (stage: Complete) 2024.07.24 13:28:45.525945 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (ReplicatedMergeTreeQueue): Waiting for 4 entries to be processed: queue-0000000004, queue-0000000002, queue-0000000001, queue-0000000000 2024.07.24 13:29:15.528024 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e) (MergerMutator): Selected 3 parts from all_0_0_0 to all_2_2_0 2024.07.24 13:29:15.530736 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Created log entry /clickhouse/tables/replicated_mt/log/log-0000000004 for merge all_0_2_1 2024.07.24 13:29:15.530873 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for node1 to process log entry 2024.07.24 13:29:15.530919 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for node1 to pull log-0000000004 to queue 2024.07.24 13:29:15.534286 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Looking for node corresponding to log-0000000004 in node1 queue 2024.07.24 13:29:15.534793 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for queue-0000000005 to disappear from node1 queue 2024.07.24 13:29:15.585533 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} TCPHandler: Processed in 30.067804125 sec. ```. [#67067](https://github.com/ClickHouse/ClickHouse/pull/67067) ([alesapin](https://github.com/alesapin)). +* Backported in [#67203](https://github.com/ClickHouse/ClickHouse/issues/67203): Fix flaky `test_seekable_formats_url` and `test_seekable_formats` S3 storage tests. [#67070](https://github.com/ClickHouse/ClickHouse/pull/67070) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#67222](https://github.com/ClickHouse/ClickHouse/issues/67222): Fix 2680 flasky. [#67078](https://github.com/ClickHouse/ClickHouse/pull/67078) ([jsc0218](https://github.com/jsc0218)). +* Backported in [#67190](https://github.com/ClickHouse/ClickHouse/issues/67190): Attempt to fix flakiness of some window view tests. [#67130](https://github.com/ClickHouse/ClickHouse/pull/67130) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#67272](https://github.com/ClickHouse/ClickHouse/issues/67272): Rename (unreleased) bad setting. [#67149](https://github.com/ClickHouse/ClickHouse/pull/67149) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#67441](https://github.com/ClickHouse/ClickHouse/issues/67441): Try to fix 2572. [#67158](https://github.com/ClickHouse/ClickHouse/pull/67158) ([jsc0218](https://github.com/jsc0218)). +* Backported in [#67416](https://github.com/ClickHouse/ClickHouse/issues/67416): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 271065a78fb..027b207d3ad 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v24.7.1.2915-stable 2024-07-30 v24.6.2.17-stable 2024-07-05 v24.6.1.4423-stable 2024-07-01 v24.5.4.49-stable 2024-07-01 From 08ecf6c6642f7fba6f5503f5121c832b982de945 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 30 Jul 2024 20:07:37 +0000 Subject: [PATCH 1242/2361] Reset formatter after buffer is restarted Formatter can use buffer to initialize some of its members. If buffer is restarted after those members are initialized, then the buffer restart might invalidate pointers/references/iterators hold into buffer. --- src/Storages/MessageQueueSink.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/MessageQueueSink.cpp b/src/Storages/MessageQueueSink.cpp index 36899011e33..d4dabd60ef8 100644 --- a/src/Storages/MessageQueueSink.cpp +++ b/src/Storages/MessageQueueSink.cpp @@ -60,12 +60,12 @@ void MessageQueueSink::consume(Chunk & chunk) row_format->writeRow(columns, row); } row_format->finalize(); - row_format->resetFormatter(); producer->produce(buffer->str(), i, columns, row - 1); /// Reallocate buffer if it's capacity is large then DBMS_DEFAULT_BUFFER_SIZE, /// because most likely in this case we serialized abnormally large row /// and won't need this large allocated buffer anymore. buffer->restart(DBMS_DEFAULT_BUFFER_SIZE); + row_format->resetFormatter(); } } else @@ -73,8 +73,8 @@ void MessageQueueSink::consume(Chunk & chunk) format->write(getHeader().cloneWithColumns(chunk.detachColumns())); format->finalize(); producer->produce(buffer->str(), chunk.getNumRows(), columns, chunk.getNumRows() - 1); - format->resetFormatter(); buffer->restart(); + format->resetFormatter(); } } From b337474e05a39b9d30aa8202be40a4ab71718884 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 30 Jul 2024 20:44:19 +0000 Subject: [PATCH 1243/2361] Fix flaky test --- .../02932_refreshable_materialized_views_2.reference | 2 +- .../0_stateless/02932_refreshable_materialized_views_2.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference index eb4a0498260..cdaad32de0a 100644 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference @@ -5,7 +5,7 @@ <23: simple refresh> 1 <24: rename during refresh> 1 <25: rename during refresh> f Running -<27: cancelled> f Scheduled +<27: cancelled> f Scheduled Cancelled <28: drop during refresh> 0 0 CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 AS x <29: randomize> 1 1 diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh index cf0e61fb6a5..2a803114842 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh @@ -78,13 +78,13 @@ $CLICKHOUSE_CLIENT -nq " # Cancel. $CLICKHOUSE_CLIENT -nq " system cancel view f;" -while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Cancelled' ] +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ] do sleep 0.5 done # Check that another refresh doesn't immediately start after the cancelled one. $CLICKHOUSE_CLIENT -nq " - select '<27: cancelled>', view, status from refreshes where view = 'f'; + select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f'; system refresh view f;" while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] do From 8d2b804c670d0941acc4fff059859017c1bd93c2 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Tue, 30 Jul 2024 21:57:08 +0100 Subject: [PATCH 1244/2361] fxs --- tests/integration/test_recovery_time_metric/test.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py index 8f369d7759c..628f2e744e0 100644 --- a/tests/integration/test_recovery_time_metric/test.py +++ b/tests/integration/test_recovery_time_metric/test.py @@ -37,9 +37,7 @@ def test_recovery_time_metric(start_cluster): """ ) - node.exec_in_container( - ["bash", "-c", "rm /var/lib/clickhouse/metadata/rdb/t.sql"] - ) + node.exec_in_container(["bash", "-c", "rm /var/lib/clickhouse/metadata/rdb/t.sql"]) node.restart_clickhouse() From 4aedb9d40298c1a3204bb72a3288ea711eb5e2f6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 30 Jul 2024 23:02:22 +0200 Subject: [PATCH 1245/2361] Update test --- tests/integration/test_system_flush_logs/test.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/test_system_flush_logs/test.py b/tests/integration/test_system_flush_logs/test.py index 2022f9d4a89..713b327eb76 100644 --- a/tests/integration/test_system_flush_logs/test.py +++ b/tests/integration/test_system_flush_logs/test.py @@ -13,9 +13,8 @@ node = cluster.add_instance( ) system_logs = [ - # disabled by default - ("system.text_log", 0), # enabled by default + ("system.text_log", 1), ("system.query_log", 1), ("system.query_thread_log", 1), ("system.part_log", 1), From 51212a414fd57270694c2653f730890b77714949 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 30 Jul 2024 21:13:20 +0000 Subject: [PATCH 1246/2361] Revert "Rename bad setting" This reverts commit eb4ec0912ad3a1e89ea7aec424366bc268262e11. --- CHANGELOG.md | 1 + src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 2 +- src/Formats/FormatFactory.cpp | 2 +- src/Formats/FormatSettings.h | 2 +- src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp | 4 ++-- tests/queries/0_stateless/03013_json_key_ignore_case.sh | 4 ++-- 7 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4c873ba3f9..730346c0a2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ * The setting `optimize_trivial_insert_select` is disabled by default. In most cases, it should be beneficial. Nevertheless, if you are seeing slower INSERT SELECT or increased memory usage, you can enable it back or `SET compatibility = '24.6'`. [#58970](https://github.com/ClickHouse/ClickHouse/pull/58970) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Print stacktrace and diagnostic info if `clickhouse-client` or `clickhouse-local` crashes. [#61109](https://github.com/ClickHouse/ClickHouse/pull/61109) ([Alexander Tokmakov](https://github.com/tavplubix)). * The result of `SHOW INDEX | INDEXES | INDICES | KEYS` was previously sorted by the primary key column names. Since this was unintuitive, the result is now sorted by the position of the primary key columns within the primary key. [#61131](https://github.com/ClickHouse/ClickHouse/pull/61131) ([Robert Schulze](https://github.com/rschu1ze)). +* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)). * Support reading partitioned data DeltaLake data. Infer DeltaLake schema by reading metadata instead of data. [#63201](https://github.com/ClickHouse/ClickHouse/pull/63201) ([Kseniia Sumarokova](https://github.com/kssenii)). * In composable protocols TLS layer accepted only `certificateFile` and `privateKeyFile` parameters. https://clickhouse.com/docs/en/operations/settings/composable-protocols. [#63985](https://github.com/ClickHouse/ClickHouse/pull/63985) ([Anton Ivashkin](https://github.com/ianton-ru)). * Added profile event `SelectQueriesWithPrimaryKeyUsage` which indicates how many SELECT queries use the primary key to evaluate the WHERE clause. [#64492](https://github.com/ClickHouse/ClickHouse/pull/64492) ([0x01f](https://github.com/0xfei)). diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 27b71558bd3..0aa879fd9ad 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1127,7 +1127,7 @@ class IColumn; M(Bool, input_format_json_defaults_for_missing_elements_in_named_tuple, true, "Insert default value in named tuple element if it's missing in json object", 0) \ M(Bool, input_format_json_throw_on_bad_escape_sequence, true, "Throw an exception if JSON string contains bad escape sequence in JSON input formats. If disabled, bad escape sequences will remain as is in the data", 0) \ M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \ - M(Bool, input_format_json_case_insensitive_column_matching, false, "Ignore case when matching JSON keys with CH columns", 0) \ + M(Bool, input_format_json_ignore_key_case, false, "Ignore json key case while read json field from string", 0) \ M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 8bea0b1eed3..d38c8025227 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,7 +64,7 @@ static std::initializer_list Date: Tue, 30 Jul 2024 21:14:22 +0000 Subject: [PATCH 1247/2361] Revert "Merge pull request #61750 from KevinyhZou/improve_json_each_row_ignore_key_case" This reverts commit 3229cb5874cc9c8b04e16d2fec3231a25a3fc171, reversing changes made to f838c25d20bc87ed9a788eff0659d26be42cadb6. --- src/Core/Settings.h | 1 - src/Core/SettingsChangesHistory.cpp | 1 - src/Formats/FormatFactory.cpp | 1 - src/Formats/FormatSettings.h | 1 - .../Impl/JSONEachRowRowInputFormat.cpp | 19 +----------------- .../Formats/Impl/JSONEachRowRowInputFormat.h | 11 ++-------- .../03013_json_key_ignore_case.reference | 3 --- .../0_stateless/03013_json_key_ignore_case.sh | 18 ----------------- .../data_json/key_ignore_case.json | Bin 123 -> 0 bytes 9 files changed, 3 insertions(+), 52 deletions(-) delete mode 100644 tests/queries/0_stateless/03013_json_key_ignore_case.reference delete mode 100755 tests/queries/0_stateless/03013_json_key_ignore_case.sh delete mode 100644 tests/queries/0_stateless/data_json/key_ignore_case.json diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0aa879fd9ad..8cc25f42cc6 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1127,7 +1127,6 @@ class IColumn; M(Bool, input_format_json_defaults_for_missing_elements_in_named_tuple, true, "Insert default value in named tuple element if it's missing in json object", 0) \ M(Bool, input_format_json_throw_on_bad_escape_sequence, true, "Throw an exception if JSON string contains bad escape sequence in JSON input formats. If disabled, bad escape sequences will remain as is in the data", 0) \ M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \ - M(Bool, input_format_json_ignore_key_case, false, "Ignore json key case while read json field from string", 0) \ M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index d38c8025227..873578013e1 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,7 +64,6 @@ static std::initializer_list position in the block`. NOTE You can use perfect hash map. Block::NameMap name_map; - /// Hash table match `lower_case field name -> field name in the block`. - std::unordered_map lower_case_name_map; + /// Cached search results for previous row (keyed as index in JSON object) - used as a hint. std::vector prev_positions; diff --git a/tests/queries/0_stateless/03013_json_key_ignore_case.reference b/tests/queries/0_stateless/03013_json_key_ignore_case.reference deleted file mode 100644 index 54683d8fbc5..00000000000 --- a/tests/queries/0_stateless/03013_json_key_ignore_case.reference +++ /dev/null @@ -1,3 +0,0 @@ -1 77328912 Ben -2 77328913 Jim -3 77328914 Bill diff --git a/tests/queries/0_stateless/03013_json_key_ignore_case.sh b/tests/queries/0_stateless/03013_json_key_ignore_case.sh deleted file mode 100755 index 807e743b22a..00000000000 --- a/tests/queries/0_stateless/03013_json_key_ignore_case.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -# NOTE: this sh wrapper is required because of shell_config - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - - -USER_FILES_PATH=$($CLICKHOUSE_CLIENT --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep -E '^Code: 107.*FILE_DOESNT_EXIST' | head -1 | awk '{gsub("/nonexist.txt","",$9); print $9}') - -cp "$CURDIR"/data_json/key_ignore_case.json $USER_FILES_PATH/ - -$CLICKHOUSE_CLIENT -q "drop table if exists test_tbl" -$CLICKHOUSE_CLIENT -q "create table test_tbl (id UInt16, reqid UInt32, name String) engine=MergeTree order by id" -$CLICKHOUSE_CLIENT -q "INSERT INTO test_tbl SELECT * FROM file('key_ignore_case.json', 'JSONEachRow') SETTINGS input_format_json_ignore_key_case=true" -$CLICKHOUSE_CLIENT -q "select * from test_tbl" -$CLICKHOUSE_CLIENT -q "drop table test_tbl" \ No newline at end of file diff --git a/tests/queries/0_stateless/data_json/key_ignore_case.json b/tests/queries/0_stateless/data_json/key_ignore_case.json deleted file mode 100644 index ad8f7cb450780891d64ac8cbbc19de17b92e7db5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 123 zcmbzd5O8HN>)lvsd-AZT-9KOAYoL6#t4O8 VnYnO Date: Tue, 30 Jul 2024 23:21:14 +0200 Subject: [PATCH 1248/2361] Update test --- tests/queries/1_stateful/00157_cache_dictionary.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/1_stateful/00157_cache_dictionary.sql b/tests/queries/1_stateful/00157_cache_dictionary.sql index a7c6c099de6..bb5a21d0779 100644 --- a/tests/queries/1_stateful/00157_cache_dictionary.sql +++ b/tests/queries/1_stateful/00157_cache_dictionary.sql @@ -9,7 +9,7 @@ ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS storage_policy = 'default'; -INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000 SETTINGS min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000 SETTINGS min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_block_size = 8192; CREATE DATABASE IF NOT EXISTS db_dict; DROP DICTIONARY IF EXISTS db_dict.cache_hits; From ea06447ca3b5330e09c0426574583b11c2ecaaa0 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 30 Jul 2024 22:03:29 +0000 Subject: [PATCH 1249/2361] Fix: skip cast only if constant doesn't have source expression --- src/Analyzer/FunctionNode.cpp | 7 +++++++ ...ize_skip_unused_shards_rewrite_in.reference | 18 +++++++++--------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/Analyzer/FunctionNode.cpp b/src/Analyzer/FunctionNode.cpp index debed0983fd..8e4e0725a2d 100644 --- a/src/Analyzer/FunctionNode.cpp +++ b/src/Analyzer/FunctionNode.cpp @@ -239,6 +239,13 @@ ASTPtr FunctionNode::toASTImpl(const ConvertToASTOptions & options) const if (function_name == "_CAST" && !argument_nodes.empty() && argument_nodes[0]->getNodeType() == QueryTreeNodeType::CONSTANT) new_options.add_cast_for_constants = false; + /// Avoid cast for `IN tuple(...)` expression. + /// Tuples could be quite big, and adding a type may significantly increase query size. + /// It should be safe because set type for `column IN tuple` is deduced from `column` type. + if (isNameOfInFunction(function_name) && argument_nodes.size() > 1 && argument_nodes[1]->getNodeType() == QueryTreeNodeType::CONSTANT + && !static_cast(argument_nodes[1].get())->hasSourceExpression()) + new_options.add_cast_for_constants = false; + const auto & parameters = getParameters(); if (!parameters.getNodes().empty()) { diff --git a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference index 0a7a9d64208..8d064020c1f 100644 --- a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference +++ b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference @@ -24,10 +24,10 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%id_no%') and type = 'QueryFinish' order by query; - _CAST((0, 2), \'Tuple(UInt8, UInt8)\') - _CAST((0, 2), \'Tuple(UInt8, UInt8)\') + (0, 2) + (0, 2) -- --- w/ optimize_skip_unused_shards_rewrite_in=1 +-- w/ otimize_skip_unused_shards_rewrite_in=1 -- set optimize_skip_unused_shards_rewrite_in=1; @@ -45,8 +45,8 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%id_02%') and type = 'QueryFinish' order by query; - _CAST(tuple(0), \'Tuple(UInt8)\') - _CAST(tuple(2), \'Tuple(UInt8)\') + tuple(0) + tuple(2) select 'optimize_skip_unused_shards_rewrite_in(2,)'; optimize_skip_unused_shards_rewrite_in(2,) with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2,); @@ -59,7 +59,7 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%id_2%') and type = 'QueryFinish' order by query; - _CAST(tuple(2), \'Tuple(UInt8)\') + tuple(2) select 'optimize_skip_unused_shards_rewrite_in(0,)'; optimize_skip_unused_shards_rewrite_in(0,) with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0,); @@ -73,7 +73,7 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%id_00%') and type = 'QueryFinish' order by query; - _CAST(tuple(0), \'Tuple(UInt8)\') + tuple(0) -- signed column select 'signed column'; signed column @@ -88,8 +88,8 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%key_signed%') and type = 'QueryFinish' order by query; - _CAST(tuple(-1), \'Tuple(Int8)\') - _CAST(tuple(-2), \'Tuple(Int8)\') + tuple(-1) + tuple(-2) -- not tuple select * from dist_01756 where dummy in (0); 0 From 4a4bd97b4b63e495f76273ea8c12045dc129d81b Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Tue, 30 Jul 2024 16:50:37 -0600 Subject: [PATCH 1250/2361] Fix case sensitivity for percent_rank, dense_rank, and their aliases --- src/Processors/Transforms/WindowTransform.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index a1b46c8e36c..5fad68e4968 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -2726,18 +2726,18 @@ void registerWindowFunctions(AggregateFunctionFactory & factory) { return std::make_shared(name, argument_types, parameters); - }, properties}, AggregateFunctionFactory::Case::Insensitive); + }, properties}); - factory.registerAlias("dense_rank", "denseRank", AggregateFunctionFactory::Case::Sensitive); + factory.registerAlias("dense_rank", "denseRank", AggregateFunctionFactory::Case::Insensitive); factory.registerFunction("percentRank", {[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *) { return std::make_shared(name, argument_types, parameters); - }, properties}, AggregateFunctionFactory::Case::Insensitive); + }, properties}); - factory.registerAlias("percent_rank", "percentRank", AggregateFunctionFactory::Case::Sensitive); + factory.registerAlias("percent_rank", "percentRank", AggregateFunctionFactory::Case::Insensitive); factory.registerFunction("row_number", {[](const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *) From 3b12fec141fda8cb2a3ef68ac96e6e58f1fd69e3 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Tue, 30 Jul 2024 17:05:48 -0600 Subject: [PATCH 1251/2361] Update dense_rank doc to mention the denseRank alias --- docs/en/sql-reference/window-functions/dense_rank.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/en/sql-reference/window-functions/dense_rank.md b/docs/en/sql-reference/window-functions/dense_rank.md index d6445b68c55..2c8617fb668 100644 --- a/docs/en/sql-reference/window-functions/dense_rank.md +++ b/docs/en/sql-reference/window-functions/dense_rank.md @@ -12,6 +12,8 @@ The [rank](./rank.md) function provides the same behaviour, but with gaps in ran **Syntax** +Alias: `denseRank` (case-sensitive) + ```sql dense_rank (column_name) OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column] From 419a5e7f730dabe514becabc6c24ec5b87325e28 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Tue, 30 Jul 2024 17:17:01 -0600 Subject: [PATCH 1252/2361] Update window-functions doc with denseRank and percentRank aliases --- docs/en/sql-reference/window-functions/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/window-functions/index.md b/docs/en/sql-reference/window-functions/index.md index 0c3e2ea1cb6..27d4bd763c7 100644 --- a/docs/en/sql-reference/window-functions/index.md +++ b/docs/en/sql-reference/window-functions/index.md @@ -23,8 +23,8 @@ ClickHouse supports the standard grammar for defining windows and window functio | `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | ⌠(specify the number of seconds instead (`RANGE` works with any numeric type).) | | `GROUPS` frame | ⌠| | Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) | -| `rank()`, `dense_rank()`, `row_number()` | ✅ | -| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`| +| `rank()`, `dense_rank()`, `row_number()` | ✅
Alias: `denseRank()` | +| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`
Alias: `percentRank()`| | `lag/lead(value, offset)` | âŒ
You can use one of the following workarounds:
1) `any(value) over (.... rows between preceding and preceding)`, or `following` for `lead`
2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` | | ntile(buckets) | ✅
Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). | From 956f8762fef7473804f7d82d63f076e09736f42c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 31 Jul 2024 05:11:34 +0000 Subject: [PATCH 1253/2361] fix after merge --- src/Client/ClientApplicationBase.cpp | 37 ++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index 9f133616d2e..1b2ae16a479 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -158,6 +158,8 @@ void ClientApplicationBase::init(int argc, char ** argv) ("config-file,C", po::value(), "config-file path") + ("proto_caps", po::value(), "enable/disable chunked protocol: chunked_optional, notchunked, notchunked_optional, send_chunked, send_chunked_optional, send_notchunked, send_notchunked_optional, recv_chunked, recv_chunked_optional, recv_notchunked, recv_notchunked_optional") + ("query,q", po::value>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)") ("queries-file", po::value>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)") ("multiquery,n", "Obsolete, does nothing") @@ -339,6 +341,41 @@ void ClientApplicationBase::init(int argc, char ** argv) if (options.count("server_logs_file")) server_logs_file = options["server_logs_file"].as(); + if (options.count("proto_caps")) + { + std::string proto_caps_str = options["proto_caps"].as(); + + std::vector proto_caps; + splitInto<','>(proto_caps, proto_caps_str); + + for (auto cap_str : proto_caps) + { + std::string direction; + + if (cap_str.starts_with("send_")) + { + direction = "send"; + cap_str = cap_str.substr(std::string_view("send_").size()); + } + else if (cap_str.starts_with("recv_")) + { + direction = "recv"; + cap_str = cap_str.substr(std::string_view("recv_").size()); + } + + if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str); + + if (direction.empty()) + { + config().setString("proto_caps.send", std::string(cap_str)); + config().setString("proto_caps.recv", std::string(cap_str)); + } + else + config().setString("proto_caps." + direction, std::string(cap_str)); + } + } + query_processing_stage = QueryProcessingStage::fromString(options["stage"].as()); query_kind = parseQueryKind(options["query_kind"].as()); profile_events.print = options.count("print-profile-events"); From bc312eb046db07d901e208cdc1bb0abb1df3eabd Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 30 Jul 2024 21:27:50 +0200 Subject: [PATCH 1254/2361] Improve check --- src/IO/S3/URI.cpp | 32 ++++++++++++------- src/Storages/StorageFile.cpp | 18 +++++------ .../03215_parsing_archive_name_s3.sql | 2 +- 3 files changed, 30 insertions(+), 22 deletions(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index 33a4939c810..fead18315d8 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -55,10 +55,10 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) static constexpr auto OSS = "OSS"; static constexpr auto EOS = "EOS"; - if (!allow_archive_path_syntax) - uri_str = uri_; - else + if (allow_archive_path_syntax) std::tie(uri_str, archive_pattern) = getURIAndArchivePattern(uri_); + else + uri_str = uri_; uri = Poco::URI(uri_str); @@ -176,22 +176,30 @@ std::pair> URI::getURIAndArchivePattern( return {source, std::nullopt}; std::string_view path_to_archive_view = std::string_view{source}.substr(0, pos); + bool contains_spaces_around_operator = false; while (path_to_archive_view.ends_with(' ')) + { + contains_spaces_around_operator = true; path_to_archive_view.remove_suffix(1); - - if (path_to_archive_view.empty() || !hasSupportedArchiveExtension(path_to_archive_view)) - return {source, std::nullopt}; - - auto archive_uri = path_to_archive_view; + } std::string_view archive_pattern_view = std::string_view{source}.substr(pos + 2); - while (archive_pattern_view.front() == ' ') + while (archive_pattern_view.starts_with(' ')) + { + contains_spaces_around_operator = true; archive_pattern_view.remove_prefix(1); + } - if (archive_pattern_view.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Archive pattern is empty"); + /// possible situations when the first part can be archive is only if one of the following is true: + /// - it contains supported extension + /// - it contains spaces after or before :: (URI cannot contain spaces) + /// - it contains characters that could mean glob expression + if (archive_pattern_view.empty() || path_to_archive_view.empty() + || (!contains_spaces_around_operator && !hasSupportedArchiveExtension(path_to_archive_view) + && path_to_archive_view.find_first_of("*?{") == std::string_view::npos)) + return {source, std::nullopt}; - return std::pair{std::string{archive_uri}, std::string{archive_pattern_view}}; + return std::pair{std::string{path_to_archive_view}, std::string{archive_pattern_view}}; } } diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index efb39f90053..8c079aa4600 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -2258,21 +2258,21 @@ void StorageFile::parseFileSource(String source, String & filename, String & pat while (path_to_archive_view.ends_with(' ')) path_to_archive_view.remove_suffix(1); - if (path_to_archive_view.empty() || !hasSupportedArchiveExtension(path_to_archive_view)) + std::string_view filename_view = std::string_view{source}.substr(pos + 2); + while (filename_view.starts_with(' ')) + filename_view.remove_prefix(1); + + /// possible situations when the first part can be archive is only if one of the following is true: + /// - it contains supported extension + /// - it contains characters that could mean glob expression + if (filename_view.empty() || path_to_archive_view.empty() + || (!hasSupportedArchiveExtension(path_to_archive_view) && path_to_archive_view.find_first_of("*?{") == std::string_view::npos)) { filename = std::move(source); return; } path_to_archive = path_to_archive_view; - - std::string_view filename_view = std::string_view{source}.substr(pos + 2); - while (filename_view.front() == ' ') - filename_view.remove_prefix(1); - - if (filename_view.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filename is empty"); - filename = filename_view; } diff --git a/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql b/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql index 3a7ed0b864c..e34be475c5a 100644 --- a/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql +++ b/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql @@ -2,6 +2,6 @@ -- Tag no-fasttest: Depends on AWS SELECT _file, _path FROM s3(s3_conn, filename='::03215_archive.csv') ORDER BY (_file, _path); -SELECT _file, _path FROM s3(s3_conn, filename='test :: 03215_archive.csv') ORDER BY (_file, _path); -- { serverError STD_EXCEPTION } +SELECT _file, _path FROM s3(s3_conn, filename='test :: 03215_archive.csv') ORDER BY (_file, _path); -- { serverError S3_ERROR } SELECT _file, _path FROM s3(s3_conn, filename='test::03215_archive.csv') ORDER BY (_file, _path); SELECT _file, _path FROM s3(s3_conn, filename='test.zip::03215_archive.csv') ORDER BY (_file, _path) SETTINGS allow_archive_path_syntax=0; From e664a144788b48c029f56548242baaeed82a80ff Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Wed, 31 Jul 2024 08:49:14 +0100 Subject: [PATCH 1255/2361] fix style --- src/Databases/DatabaseReplicated.cpp | 4 ++-- src/Storages/System/StorageSystemClusters.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 06cea65d62e..b2be593d326 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -356,7 +356,7 @@ ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) paths_get.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "log_ptr"); } } - + try { auto current_zookeeper = getZooKeeper(); @@ -396,7 +396,7 @@ ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) ++global_replica_index; } } - + return replicas_info; } catch (...) { diff --git a/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp index 4b9802c9089..d03b600b6ef 100644 --- a/src/Storages/System/StorageSystemClusters.cpp +++ b/src/Storages/System/StorageSystemClusters.cpp @@ -116,7 +116,7 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const Nam else res_columns[i++]->insertDefault(); } - + ++global_replica_idx; } } From 5152248d438ef9162845507b68e18f1d8541a250 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 31 Jul 2024 09:25:59 +0200 Subject: [PATCH 1256/2361] Add test. --- .../02864_restore_table_with_broken_part.sh | 25 ++-------------- ...ackup_with_matview_inner_table_metadata.sh | 25 ++-------------- ..._clear_old_temporary_directories.reference | 2 ++ ...kup_and_clear_old_temporary_directories.sh | 22 ++++++++++++++ .../0_stateless/backups/mt_250_parts.zip | Bin 0 -> 265998 bytes .../helpers/install_predefined_backup.sh | 27 ++++++++++++++++++ 6 files changed, 55 insertions(+), 46 deletions(-) create mode 100644 tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.reference create mode 100755 tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh create mode 100644 tests/queries/0_stateless/backups/mt_250_parts.zip create mode 100755 tests/queries/0_stateless/helpers/install_predefined_backup.sh diff --git a/tests/queries/0_stateless/02864_restore_table_with_broken_part.sh b/tests/queries/0_stateless/02864_restore_table_with_broken_part.sh index 08313e2fd3b..bf76727f76f 100755 --- a/tests/queries/0_stateless/02864_restore_table_with_broken_part.sh +++ b/tests/queries/0_stateless/02864_restore_table_with_broken_part.sh @@ -5,29 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -# Copies a test predefined backup from "/tests/queries/0_stateless/backups/" folder to the "backups" disk, -# returns the path to the backup relative to that disk. -function install_test_backup() -{ - local test_backup_filename="$1" - local test_backup_path="$CURDIR/backups/${test_backup_filename}" - - local backups_disk_root - backups_disk_root=$($CLICKHOUSE_CLIENT --query "SELECT path FROM system.disks WHERE name='backups'") - - if [ -z "${backups_disk_root}" ]; then - echo "Disk '${backups_disk_root}' not found" - exit 1 - fi - - local install_path=${backups_disk_root}/${CLICKHOUSE_DATABASE}/${test_backup_filename} - mkdir -p "$(dirname "${install_path}")" - ln -s "${test_backup_path}" "${install_path}" - - echo "${CLICKHOUSE_DATABASE}/${test_backup_filename}" -} - -backup_name="$(install_test_backup with_broken_part.zip)" +# In this test we restore from "/tests/queries/0_stateless/backups/with_broken_part.zip" +backup_name="$($CURDIR/helpers/install_predefined_backup.sh with_broken_part.zip)" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS tbl" diff --git a/tests/queries/0_stateless/03001_restore_from_old_backup_with_matview_inner_table_metadata.sh b/tests/queries/0_stateless/03001_restore_from_old_backup_with_matview_inner_table_metadata.sh index 8d987dbf1df..2c70cb1e3be 100755 --- a/tests/queries/0_stateless/03001_restore_from_old_backup_with_matview_inner_table_metadata.sh +++ b/tests/queries/0_stateless/03001_restore_from_old_backup_with_matview_inner_table_metadata.sh @@ -5,29 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -# Copies a test predefined backup from "/tests/queries/0_stateless/backups/" folder to the "backups" disk, -# returns the path to the backup relative to that disk. -function install_test_backup() -{ - local test_backup_filename="$1" - local test_backup_path="$CURDIR/backups/${test_backup_filename}" - - local backups_disk_root - backups_disk_root=$($CLICKHOUSE_CLIENT --query "SELECT path FROM system.disks WHERE name='backups'") - - if [ -z "${backups_disk_root}" ]; then - echo "Disk '${backups_disk_root}' not found" - exit 1 - fi - - local install_path=${backups_disk_root}/${CLICKHOUSE_DATABASE}/${test_backup_filename} - mkdir -p "$(dirname "${install_path}")" - ln -s "${test_backup_path}" "${install_path}" - - echo "${CLICKHOUSE_DATABASE}/${test_backup_filename}" -} - -backup_name="$(install_test_backup old_backup_with_matview_inner_table_metadata.zip)" +# In this test we restore from "/tests/queries/0_stateless/backups/old_backup_with_matview_inner_table_metadata.zip" +backup_name="$($CURDIR/helpers/install_predefined_backup.sh old_backup_with_matview_inner_table_metadata.zip)" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS mv" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS src" diff --git a/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.reference b/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.reference new file mode 100644 index 00000000000..3f3fbd9ab58 --- /dev/null +++ b/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.reference @@ -0,0 +1,2 @@ +RESTORED +250 31375 diff --git a/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh b/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh new file mode 100755 index 00000000000..e0c8f08e695 --- /dev/null +++ b/tests/queries/0_stateless/03214_backup_and_clear_old_temporary_directories.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# In this test we restore from "/tests/queries/0_stateless/backups/mt_250_parts.zip" +backup_name="$($CURDIR/helpers/install_predefined_backup.sh mt_250_parts.zip)" + +${CLICKHOUSE_CLIENT} -nm --query " +DROP TABLE IF EXISTS manyparts; +CREATE TABLE manyparts (x Int64) ENGINE=MergeTree ORDER BY tuple() SETTINGS merge_tree_clear_old_temporary_directories_interval_seconds=1, temporary_directories_lifetime=1; +" + +# RESTORE must protect its temporary directories from removing. +${CLICKHOUSE_CLIENT} --query "RESTORE TABLE default.mt_250_parts AS manyparts FROM Disk('backups', '${backup_name}') SETTINGS allow_different_table_def=true" | grep -o "RESTORED" + +${CLICKHOUSE_CLIENT} -nm --query " +SELECT count(), sum(x) FROM manyparts; +DROP TABLE manyparts; +" diff --git a/tests/queries/0_stateless/backups/mt_250_parts.zip b/tests/queries/0_stateless/backups/mt_250_parts.zip new file mode 100644 index 0000000000000000000000000000000000000000..15310b79054e09c75876efd20845bbdabdfec2f5 GIT binary patch literal 265998 zcmZ^LbyU>d_qBycBi$g~(%s$NAT23KcS(1Hv~;I1q$7e1-7V7HNF(_UqxgN-`pxq% zi?u%M-gC}A`|PvtD9Jv3hVkghix-bx^WJF%st<_?m_2%wHSzor?4w7I7>$igtex#v zfqS3Ho1zSV(_TH{Cl!!FHF@ii#D+%V3LtZC0OSLHxL*cIVv#w>nQ$yQ9NY-Mexva= zsnm9PanzoGNa2X{s=+tY_fqig;^!Uf?MjvJWrObx+4Wh-Y2Sj^#lqdqkniWShC`Fv zt)Z2wuDh$4yI&2hH(Rg4|7q>}`&`gitX1627ys_E;X*!oVJD{b#`>!7BCnwp z5NcB2diVRi??vB&kD-F#E%2sm;r24*Y(wzQFHxWDrtP-M=x(pd_pZ<2u5aP?YQg8~ z`%dc(_%D~lD=|{P8w9`joc_H0cq=tm5Ofjw6)EO=o5_KTWB*C!Q_VN}HsCysUt9*{ydw3%9QbE_VLxz9JxlSYTvsU!JC2` z@GTqMuHN1FMi*U+3KeMYb9n#?$IN4@M!K6x9wYzqd~&&S5hDl?yxa1vYbCV2Y`wdD ze|ISeQa`>UtsIf~pxxGH{UOCPg}U%=IGH5MbNTY%t^oYc$UzsxB3q#WZwIY7$}x*8 z>xCR;f{*Ji|zH3p5wOmh>kr;4%G9~9dp16!nfsKEx!}JTUXy@%xiBN8V z@O%3F_lUc8gu%JRn7{d{`_=AeLkwC54`Gi>FR6bLJrB z?06B@h6;tTG{nPhNw6&-;xrJJoAH~7rHb0yWYb|svt1P#wW!g(EaMs=EcT`*QR4jb z33_?etnsa_>lnFR;mqr_gI6Ka@eBv(GYQ2t+^52C~kX_T-~!NUptBc=wE5O zX;fD5-bOq=9S6~H4uz?xZ8<5{OwR`(oY0{tR0^n(3~_i3W{|BI?sApK*UlvzNAaZ? ztkl6u_HTxdnhiVxM5h*}+0|IIRJ^&SKPMdXA-c}qTWu3RRYGCBrgF2Z$@*mox)@nE z)o~*KCek!zl*cp@v$PDsA~?*Nm^z8Kwb8%{>#xx#(UyWJKEp0Yi@6@fu-Lcge2rR)Zm}I=>~Zc+ek^5f_3%-; z7&|I5-1Gs1j}XwC*Dl;jeTO=-6q> zlAWXGomf0ILXOomjq=rGW+;HTE|agZm}t8#=dN%_+fL zpsgBb5);jPpT;p&wODG~0Esr^_+_BW@bg;E@I+1pFZ-eJ1+tl;4+!t5p5B^z6yN!@ z3%2r*<`g;Cx7{9}W^C}o1Y7&*t z4e>_ve`8;X-)|$tZi~avU{-N@L87F(lT6ty592|GvTU8@EapAt?Jz7MXb*9hAfGI_5eFaDaWW<8ScVASYR4 zAM`*C6Q*+rOGloHHO!wiFqb_snIdXRUWW!R>xmRyS=zi-P9Kop4fOVlmqro@PN)9? zF$4E%zlMw${!gkqzy=k3ipX1CTF$SRUpMdT9*i#@*ns^_7~N2+M`Q0Q3iYk~KZQ|a zyH4D8)O~^#wBU8OQ|2EJK)8b=(km}=L7y1kt_eY;6^R#lRB@V!$uuwyPUD}Rb{~A! zggYXY;d_K4= z#78&{A;@{$w=1i&HJM`D-}cV}-ZDM`oV#e=!z+hMMLe2or<8 zv+lW5OF-8w%A!|OT^<7N$5m;H-ZHGg8rq%ZfrwbY316kI+u0;_ zP!;a=pCimj?0c&6kNLd6hP^l#@Gv7Dk&&3lGhOV?(slAPQ8}T|w(AD5x-2~2+yBP7 zsfktIOHqg}8`jZLW*Hn>cNg=1@EokBrYfQNor5&um`60bY8&6AK}&7`Zj@taz(S>rtdpx!qa zigkrD8kyR&Rnl+@G?bADPC}+ipw~&*|7~PLyQOnS{ftc7IZDAgrDwK=Bl#A3+5bq) zV%aT#Jnl%!am3=gc+p2Sr&8_a2b|eLORk`jU{WUg9vJ7w2=Lg)-2q7wLgyDNu)M4% zje=$|pZPwkNR-YyoyDa+RHtoHhiMVKnS0?Q0^>)kYRgvJWGi3(vY3;7W)M6vEGOP1 zDSd_b9vXEa9ME7Ebs5@){ot1<#FBZ+M#~h%BJYse{#{yjJo@3nyalGi`%^Rny4G04K1|vYVd}_R>rG3oEoOA9}hlq21R)N907HGK)hk_OJTlP&6LW6}Q zYEzV4f0oL}2)e2H$2RvZd5#vYVFY#E{>#L-c=pF-sPzP4#7*<}WJyaO9F|v}{flZk zA34xn6rVarv*7}s=v{AKK_g4r>{aA!%x@>+d#Sgyq}L8@(oC`_9{5FXK;T>C=e5EG z`4^AJ3pr$8k2D}MaAIyoK~RQMd1ZXMprJ7`X*v~O%w#jsv&;U_IUgFDnN9EwScoFl z5M5~n-^gU*26`FF!vB9VD zCh2%JU;A5y-FKe{(l=-NqR4KKEAKqoVKYw(Z!qtS+~>Z@5E+;=XuW)=Vddi% z@^MmAcEt(Qa zVxRbxXMCUf<kLe*G2pqQ?>x^iB*=Q$S9zJIp9vxHHRRaJrpa-5iG*v(c@x<8Pbc z)vb1|n(#n6CU_sBfs=eXeclv;@M__{YNYO2xdIz?7r(?dE2Ad!(e64>nuc&4#MTOh zEVH&)p=+cd0h91FSH5lib~7TQFAk2~|Jybjbd(UzW;{$j0i=5Ks>uok*e;LC&WS$V zpQg2QYtAHk>Pv48%Ba#PCIY$rtY(9Y|_Gdmun&m!U^trfsW~K#~76>w8 z_E~3~8NlV;XnpKL~`ty07UnHaP(cXnJegA6# z8MW%)w?*qjyi~(j6ln9-OFd=->g%Gj*);Nn$`N!&X$n-;K*4iu-@~aDs)(=0y=6_Gptntabp|EDwmaa%1!Vjq>xJ8M1??{-F8wDarXa7?3 z!GMD1!*^n&Gv@wNORmpH@>cb}K(M(O%*_aaY~9&;iD@gPQIoL|VzS)@Y(Ztj-=WQ9 ztV7~-BZkH<=kGu-95XyA?IKE zYBn7&`gy&pZxtv@gyC}7$d+!^k|*}={nt+^3wybgJr`UXhHl&9+}u|1J8UwR$Z4*t zp2R!$9#OUPJ0Hdgj^uLED{E2qOPq*RLM2+ya(19{n!>9wkBsb&VX9i`QY|kR3%;>K zO)x>x$)Kk+F#DHtffgY(>e&dzqmzDyuL?ix2Rg8M4l8QF+gt=m+juD zABXglz6{1Jh6`w1BZGmu%QMG07AM3Y-pqzSHEt|XWA&ey;WX*5##)~oGr(|M(*kF) zpB?m*9>O-klhZyd86f!yOtZ3$48JW$=_JvV?Y7t6<2z_GO|56Yl471BD0jXl)lRxZ z-LW%e(C(oZO!(J6CX5kB7_?2w$<0o5$Php=ga{2Jz)p7*P!bB%L^1~_PfY4)N^!b}<|egw5#4tpWuH#DH(>$Wu8;?o(TVU#T8BMA_9 zj5kvH*FAAts10v6uU`6GI%R%l)9PVW&yS~74~j1Q{!L1a z(f0gWminh#^%rn;no^=5x7HE45*B;#oxG;dIoze$J^^jyLL-9XmH~@%1oFiq?^&he zuriktd5#(z>n;d~|B67NUIcF^^8HUrLFoSZGtuA*@=^thK-piwvYbd+$xb2>CpMg+gDk%)6=gCtGUz(~l@HuihYd8l8BwhgAC2|>X{ zqPNWiHPs>opP8wz&avKnYlIjI6=9q9o3n}-pmg)y4)Lr#&0exi;9IImj_hW z=)30$Cy`f54E{o+&nBe9t*}N%=-4t%T@yq12F}rsTgKKYhN$6FOe0@bbbliP8Y~re z(#SYAku6+ly}sSYF$ZeO!Ue9#s^{J@l`GYJ zEFQGwK|8r;X>%Xm4_DE1(S~|l$0eCXc%l+3t+;#_pRkLD+}RSCq1(ki>B^_1jS}_2 zvQ)4aHYYlE`|@A?%x7l0mdC|HEbC>_AWKQ^s;PZW^50}Oa7Wd(>7r|7`K;KNq4U~y z)+w|I`4qyOyrRb5hsdNehp0Ygi1*l^F0nexSABFb;|Qi`|5s;R52F@i#es>9%@uI^ z_=H_k<7p_o$Uo}bASxYRLK+ZJ?OqZfi@8&4$0Y|D0rRK0cG?IEd&WEySGa3B2_+I= z%O--EMr$MUDAc+XK0=i>tJTVoV2%7p9Y0{sD}d;nCf2s#g( zMSytfCn(~4zoW8fiZQ^Lj>f2uR&+W+w*Hy=|45*o&L`H5sayS69sJ^fTq~5+jzGw* zO-^y`2_MT)a=%-_EL|If_wHZuDjPpE{&@q1Hz`(RD+;`D2*?PRH21f^>(_Eh%T|97 zfZqs8eZT4_{He_P$j(fdGj6lD?RAzD1euk<_}?5R@FFR#qoWd#v?5m)zg{CuqS(2t;@)l-TnBTQ$&D=}-I zf1TxX>&XR~p^~9`ZVG2S->qgEt?)+82G$X}ps=s#B&a8YU~^?ao$$$vDpf zB~E8be?vop&VYf`(vw1D?qZJ(jkW0@`$3K7xra@_1mACxw!%1D^LLvSn!=Hlv9OL1 zoffRpTo-*vp<4T)7D1$5hEB68Fl?9TVQ&R}t_jmHlFA=@SEh z&eVW1gLxf@fepB=rq81XgPgY^AaH_Lq^K_Q^jn&mNFc2Mf-63tHuMM16o&-Vk!C*p zvD1sH$P$`w)r547<}#`i$5O`+wRy9+YxIq+{Nv`A5{*9i(Ts5O*Vsd8clS&KOlU5T z+p-El&a_SatC%x~BXk3ka;oE15e;t8C_o@ALRnsioB&a*L&Z3^WKb4UZ=L_zztgZO zl(T!Z>F`M-6FoVcUr8o+e+_}>wrji%&4G`l_ARfnk$EJqo4D8t#&MIXDzL7h@g|Ld zkZ0+J7iL6VI})m!@ri|LC}Gmx4_3@?TbEOf ziH0H!D?U}Hy?Gh! zC2d`Ht2q7#m=?084to_tOTZ7O0x|NtM+Bj)e>f8uV-&^nfRoIs)<*3aTn&6H`cKPH zKSpDx??c@+DK@@Ni%We=$-w{Af6X#ps3Pn{?Iank2Kd{@*k=tmT9`zQA>K^&Hq#;n z4Us;~y?ORt2&UR%AFRSkG!nOR@&TxyGwTspi-ZJn8}K%RH)t;FS1>kY=fILI%T~_Y zr`P9Z{9`wuhOuIQLdwLH9?Cbbl-a0KiVSA$?g)qXk-Y+Z8bUavib{Lvm`$)!yM9#J zrkRxh{;V9pD{oh2Itf5*E$Jhysd7J{`ilbsMefRi-_RnXO%W&aiBzDbw~iXu=}V<@ zu7=%4;_-t9kmbJ~7gI6DLWNl)<=9V4Sb<2u+y@!YJHXA^G8-TEB{K0L(htOC57`0F zVD8u|VBi(B(TwlqTGnzQH;OT-Cy+HZ2zbhvtNQz2Rc6c^Zr7AZZvm2cnpQ2onOh^} zk|aa88CX3(AY#o4b(T%OutSQv&}Ho(@$5qC0-!WQ6`YQf0&yG(Y1VCLIYh7dx(<1S z(YC~*9+IFT-D0T6arMnuf2`+hm|TGXoZmMP1e907%t_crjn!}~Lq@zZ|IY8(dceyf zq-F_mdX^>((@)Q7X(V?>r)67bP<%vwbOx-kD|l9GPv9M2xjmh6Dg`~0!8>=)GYbAM zg>KGdh9s2%KHzND;eGZ@_~*$C-d7KbMsGlBbWCM1ugZ87pNP0puAZk0579DYIZq9S zS1Xx{KDT|(nb?gn**qctAR8|^H9^ra-7a)YT7YY+8* z8bIJm$leuXV#aTV7Qs7Rr)>B90McV1<4murYQTt$d{VwbhITYMe?B(se=X_8Hn?4% zXK$yr|KjoWG%49DEa;F^AQ{*Jku%as&Hn`Yx_APw>G4CbF+9Z0*fZR$2b1< zaaZ$dLv;)IcaLv9YqOXQ7vVGyqJqZqhY<=zx^9z4;^e>-@Q*r!bwcEff_FLGm{l6n zigIcufCaGOq^9do^@mrX(zG!nBeS1`_I&n4lxCep?! zL(5;O&MNQyu%Ow0CoEQ`MGybViZrbP$@pnE-W1jer1}rR(<~-ByVkl>2f;aZ1eDx&O-;ux)E|(Z2xi5k zA$GIwf6O7_93c3(_W+rLTE-bA+M%y?Y&jFsJUQvKT9S%MAUSYko#FesQ{)3kC(0j@ z``edj4xq!fm`W9NVE$!T<{r)BoIKk1C?Ib6{kAEdGWP!tGZ-{33pYiafm_ zAoua&AN$X-b;g2t#p%3Cw;eUsO4hRL;tKaxjrW^Yw}^9S1ArSUT5@tCDr@4iVsaog zmE3&EWctItnMp1{IogJUvu4Gw46&<7OPa@_$Kk;>QwD?1&JwP}Ctk`T<+E2d36E4_ zXXEG(HTsDKn{tunxO0*#e4%)Saj3tzvO2H+URV`hXuftG9;X6~P;Y)%*1&Vq4}1_v z1`EJD!hJnDM8L-^Is?5h7hK$k&Pre18V{ibr8Xi>YMg&tUZpOetMe+SO@RMWgyD2~ zF!|t5^li^^a-}ssL<7*~!Lxsn9hnT4E7>sQL*RYWWB@L8j1fbj;!tKUB6_%=+$b)0 z>qYSjG-Td_EmJMkHlCRb$3+9k5wSCczd~64cNnhlBmp?duufFN`jN89cR=^v-4)3E z?+unRS9z!jJ&(1_&7M5(dIoeYsrU^FP=zKj;i9N)`~ZCHMHb3Z;h$e(${b#AY)-cM zD*+FQ+xRD~i{VDFQbm#CsM=tKv}-QNUj}}Tx}TTKWfvFqdDK*U02>(fG{YVok=H*# zAevUmx=&BBrxl)G_nau*-30pj-}uC5*?sb0J?05ORej4XkVJZ*49i-J_s=$?@Z)Vd z4;2|!Wy+Vyj7LT%J)`BbD)HwvS%u)x0JoYbY)JATG+hnG=TYmd0ru3%2;$T)PiOupGdS3Tei9aT??}F{Ye>*hE5CYmndlBFOz^<` zW;~j|E`pMKf1^lwxgv@Y|@(230bWH%7muZcufD^>!2u~F%vFS`<* z%roqBJn!E`UI+$+Y>VztG;R0g~VP9;f@ zkd*2Q_LEK|aU{ppkMD#{@)oeO7jiRLZCiM8s=vP_4gFV zXx{C=H)i0@w9nT}K}<`CS*#wc)0-c;&o>5fG~CNFwz>NNX&wQDx~)`|)aOW<5)<)N z0zS}~3>9Cf^6>lyb!y z>|FcoHd*(_?72k#KAfF1_Te)vN+Gs8v)sr4bVgOz$#{!o$-xyRPa-DHoVHiDZ1{Kq zdF8fdn#Epa1*#imTV;w{IpDlXb>N@+$%VYaYsAy4x>-}1L^dQ-^=32pP--mi=dno~ zo!(6F^~V$!$GIwttBZZnQ>hK_U+3rZA*X>?xhFuD)^ZjFkui&`hvpp)jEy6w5D8HBS<~NExj5AXRW~`>%`p)(zaFZTtNaH05hKZ0> z!~Oa^$*- z5rj=Rd}hwSbYcun#&|BBzNeE0T>v=ZA|F%10}fsQFdAGkaxvz@wHhf5JQ-;T z1b<7Ij=wObjZ5gQ5l%zPxBMTTm0@t=0;AQ0fP=J|%kxjU*IlYA{$p?M7{6Fu?7rt_ zJ?nse`TMA|e@2zD;M+eAP)wi2a2{9vQUW`gB@gz!a-&VM800(2U=aBHg*)93vocUs zZdh4Y9Q}A^b9t{5S_fEayMQ~RPjj>_P7Z7jL??HKzY`*Qm}ZVhC@|q+@sf=zC`zC1 zQ1?)*Ykh?58Sgdb(+?o(2@9AB*fsKaQ@7-laoQ1Q124iYXkd0^_>vy|F@-2MWu-%6 zn21;`|ARpjOw7CpKB_rps+jTWPo1s@JI7OWA4AqZkcr8PuCOb-Uya-KS@-e?A!V73 zq4d<~HB^pihvxYb^H<($OLbV{_n5Kg`)WrIBJF{|AAz_ zcv7d)_5lPra+$$hWq9~q{UK?Y*UD;w&PgZlH|FjJVMXB1gWF7-t$bz zWcC!SZIk*mVRV+Jwjsx{QrZM2}mZ z%!B{q*9SA!aESm7-~^n&g9T&?rM}TL)UmJjX+b!^*!ED9TVc}qkEI!$XI5?V0nZaX zs20xzzi=~!O~dtp=+!{U+s6)MP!VwJj?zx%oT)qUwu%%p4-kegS$ zZ;DB!#p(COpE+i!K}L*zh|$Prcc_<#Ce8upg`cpNyD`%Ge<%!>!mRWO>O4r%pDdDA z0RBvdAA6lJ4tf8+WvECyU~#}VHLR_jf*&wC@62mzvv9|~gWa1Idui;f zv&|k121&5iw9?|Z=Ka-;u+rr00O~MNNj*b|&#dhdZ$nXA#$P)nY9};WS0Ynf*zDAD zm=XGGi6J+;V%7kHQ)A-|J&%dh@Xc(-!H3%aDQ;LxC6xJi~K5o#fHQQ$OE9V63-g&35-`#w*BhIf6}u5+G+vgKxvj=g}>mt+Q`Z#aRmIsM5YZE zF>z5HDLOX1MRwkjOT*wxH>6hDL_)X!60N9GZ`=3Jz^BN>#4pj3mJstQ4g912@hNaT z3Zf}vbbgm{?yqU!O=siAA;CEW@8p@tuo;2}WS%}D&=|w9RI0bN(UXH}V>=bAJEKNk z-1cAf&Tsd?*i2Lj(tjxYhKRHpKxD1L#Kjn@J=a+j%~JlRsx9p>_z@F0k@uOO5bA(L zdd&KF^NuUixK>%IssBTd;eL~AtVxU5Wt)tVyrsf5B;fI>;_bsTgCtm7l}}{KPeHp# z%|&3@c~wCs9S9Lh!SmQ;fLpn+@Zz%%lRsmnOO<5aS5EHy$I3Sl_OWbY*l1A>JMS5h zMs<9iK}pHf=)+mogyGNE!oq>ZLH6%-UA;Wi!z1-ZL7DInrzvpKFM zGH5ZXZ1=vkv>ky5Wpw_ToyN?vLc}P#=*FpGA}gozZEWJ-&=!lTEmn@10`adVbn=Le zY{x`GwIqwpA8>!)DU3QWuSI2RJQEfypX;zK8|81JLV*9?`j-_cHrBynurU4N@6k-7Ircm;=UV=zvMZ$Ax3E4e!cxMzEd* zua^6qHBNL^`C;<2)!b{NXv#|}3FA(2z<=|mS@Ov7fyk7@%4#t5n9+U9lR@|$7CG+# zK+gH}UmuwcDO?ewSb0}rt~vSgG!>st;TiDXL=JpuKQ3sImiIRI9IV78C{Q?O z%N5cZ8Hse_FqXuS)!B`PY_Pmq^-(J}Itwm$#VOPy`Zh0=l$i76`yjqQWe>221=Qat z6@zU{BU)CWt4Oeq|K^dI<&?e`)D5{J4pWf?@(hh_48X;yuU52C(H>*zezkb7`y99^ z_sMYc2(EU!koxoG$-mt8XL7)U(6W1Rc`;O$y#~Q5O@KOM)7fr2#J9V$BK`u}GIlUN zl(f4^F1>(`Zb_NfsxCOS;2S8D;jbTSjy7z?w52i7DSz^fb=nBgy5E}LYBc0CVB?>F zsDWx_Vq@@jf_Ul>^l^9_oJ%w?#S3 zbJhWe0}+b~gll1cJOWerXXFE~{XCQ;HlBBNmBo7wq?p?e5y-nmwX~KEL0w{|Dn4h8 zDQ;)biQh+)UnDsagX=pa(|kyPxuSq3QT%e20#z@?lsw@JDl0&q`NNaU6G!Fa6?b0@ z+Pxp!hD^_x*!?@dS3=*%DORMNH!`ML%dc^50jR&|BfhPG8>@b(|B5t40p>^6#)XT| z7)Rhgui-7^H`qfe#s@57mIrESv6g(s8L;O&{5F|hkEZeEKc4=132atrwr$&ifnFvW{up!4|!b|w{hqnSk@wjyK z2GNLp*RI*=f3E!V_9t|>18UT=&{?rclpn>+W#RYy8!ffB$rL{cmAID71hN_@Ib}4O z7R;$-LtKAbs;>OB8a>Zgdoh}2BMdOmge;h&{)1#Fx>W;4nHh(Y^LRK{R=W2`OL;<7 zW;q_pfB#t6NqCp*5;ek>LRuy>k4(&Hivw~Ru)KWh6vmN*`YLXPUN0$RtcE*YZqwmE z-b}1}E2mgWF_-c*Ye&`RYpPqma9{kNxp1yIYz9@9mVf5k(ukWV{^)nfE{>mhQxANm zl}a_$YF4|u#-vH;dO{r-D#!ZS$>uKQKSa6X>cvn}3(|4z(4^o_u}-7%|9O~p!>pEO zj}pC{XrfGow%f%ie>Bp7{nEd>1ntkRi!07}0x88jURui$@FR~Wdh8Oo9omc#r)Tlq zJ^$I`CrISAeSPptv3enw% zIjV()nPLbA&*u6=*5zIIo8O#)Ia$Fonxf@64dLZ25(*t5@fH`S38*GxGi<=JS`2nq zk9wX%Vr8z0DQPDM(*vR5MOfb+C9$M+j*ev8sUfG3m7f)ndVreK>@$^Mc&i|PGEIU& zy&O^2_mlqtHB@{ry4A!mjTzGmXO-71)P>exnEoqGZ8UO`0_|BZ0@>y=8H_EH<4HDr z(?8-L7-&vOFiJZ#;LPRii@FF~q!}2F#&X5KKz{|;8GtNS@hcQJ!;qHaeBP0b%6oPH z!I077!^HDmj)B!|rvfFNjhi-CiHHUl!Zf7rnTA(+cuGmWzTL9llNm2l8l+{1tSJE$ zXdad#mzW;p5sc2$67KyhW|@$PGqEmPO8p0IX{~L`bUY8cJfQ_~>)3ggQT$fdN&wQG z!K?Bcz462nPbc5?fZ>kml|d6Baq*Fe8>l+=lCbqK)vB%4fVCRpfq;q~M{P;)12;d) zj?|`7z%ttLu7^ut+gNd=JD~aFVff9iAP9E!H_!l*dn~l_O*fy`C=p7Z7(^1Mv};@sg_W2K>k!WVq1m~q&P72! zw?}2gmHl*Hf8|J!rdkUfxMc34=z87{NEq~by3#DW%Ox$)1o=SNOgA$p z(m;B3{-}1bF{ZTez=?;DfSC;Vy(BvAY{*E3;i!&C`1i-tT@c+D{ER^xC7zXpPSe$* zE4YHWO*)sI05|8&DfE!EjX(DPyp@Qixj9Y^LgkmKNyW>1u#{C26X;1|`{QC`qC92d z=zK{#&H)Dr`#9L~bLq98CBr|d_-K7T7*IT{T%9d)93g80U5g{lW@EFX=l5u{khg%Z z3+#JCHN+5q3UZp=gozbde#MB5wJm!YJzjm zb=miuFV#-w8#H0k%a|V(BvOpZebn*BRx}^O4>L`{4nf6XYgYsvSz(G7xjaZ8XRH#W ze%yUXYUlL^KYcfE0h(t;&2o}yQ}?FyOD52ef7c0Gu6?Q-6ME*j_!w*zxHGyezPtT! z=YMni3b-V|lHr;`btDr5d{U=o*d1GWI3%q4KL6fejj|4CS@VAM;n+!?FiOH&)WoSn z=VkL6=Oy?R$H=<|*Y1rra#K{R*P^0B-$Py|(el*seaTu0wcy>!G{o0_&+I+sfpGhD zQa9h#Ho-NlcyJ}~jc-km_U#9bBVzWN$zKxT({mmO$1}W&w(ZLR=gDA7oXb#stu8!# zORKf0TkzAgxAj+7#Kk?-X$;CI3hY(CCBSlK`ug=Q;OOOu%Or0b$LD$D%feHvYuNGz zsrOn#z9LH2)1{X&y?pCKv;v|xMaVy9@M(-)cNqGu>G+j-*Jc|hALHcf!ilu8Atf^H zLO#Oxb5GTfK17)Q)gT$6m!CP6td`e1*lm>eaD1dQgC11V5|g@e3w87UcYdc>TbWg! z0%~Mk?qx3i;r#vbZ8IZ&uMqt>RXX-3byKEnyOQ)VYm06r6e)EVp865n& z9Uc*^KFmsFom#~gW`x0*i(&vq-w$y=b2Oa zd*yv9{*RxDZ;+DW${s0uW#PD-&mJ7Tk$fYuKP@u=VqZfbo{QU`7mI(M^ z+d6c@!|h?5J4kcog(R#tK}~~m={TFmC;%leggZf@`WAH~D%OGJ6ISPH8F+{QdT=7o zRWvYF%~|oTi(u|yl%|93Vrwl&nPOmtMl|medFfq)=DX5!JB`=rPuUfFoX;H1hWY&C z1ZLnJBNDW#5WyUNQPwgVfJ{LBw$ncqj?|})B>*t=%YpRnXj=LB!%3cdbp-ugLVcfMJ z&%0T3_sU)UZA^in6Zsv63ly#Qaq~YC6#8peT;jj^YBXtwjMG*N62+K@a1t)<+5&^z zZT4gqjeF2VrIkfA`zew0MRD@aPat1m`?;5(l}jomm(%fbBQv4rosc|JB*VI7U%+5! zeeFD4ecUP57-vPpqM#vA+`}AXwh2--AdxlvUOZ@z$o-_A7Gv~pwa88$+cJ=0D&oF* z#!^2BQ0c(|uB%c*!qQheS5K5I#exl|uKY@g(lV6&lA3rtYr;cP?5Fm@v!C4_5nJh# zy1$64=ShJ^z*z^Us*CadFmc(SZB^D_6~5QUwLg_({v@&~5JO(KPaq_IBXX3b|CC<( zsaS!+_5E4+)Ar8r6)?uN>{ii)1Mtny7%o@e1`7>Y0m-0TShYUp4a)Mnq7`dhPS%rW0cyJG|TCLm3$e!2CQPWU&jM`1B)p zU`3O*n*CS)#15a$<&OXmRER;zUi;l#A%XZj*N>)t(Kq>tJ)%3AI3zAT9=QD1?z2ef z95Ro~^LSyK0^c1e+#e-t?-oy?Mn^*3^k7!OpTgc^R;ByLA(8VT)Gt&;ps1hP2FFFH zq;AMs6XD4glF!cnO|V*h9B!oWTQA`reR*uC__73!67JC8{l*V4-{$BC!opE>NII*1KnK9ozvl3eLAfuE3t zEq_&Ux`%oNSH`gN!X~))ok!e7C1^h>oHg!ND{&4M&eI_u(oT8&i2?g^ugQ9;VJ<6Z z!PS-iTHv$6lB;9G6V~jZW?TJ%S5HB&db-{rX+Oc(8AAWQpLO+Sz6AAo`m+)snn~=3 zczW#cA8cwK7ZS$}69PdPW-Z?DJ*0tf&#Eay^(THOM&*>6`Dw56Z!NOs&hedNzat(a zl>6i~_qern4|x>-t2JMs%=?On@K~jCqhj;OIHssGnegdZH`w(FZ+QK|l2o0?5syuu zRqYf9r?5zQ<+YyI5cRuU(dpyAaIZuO&k~^KC=H^CQ(SA3z8P_jHHVVPwN$_^%&(Xj2a)von z)~qB^qSbh|I2R4)P3lZ+_A~_&7;N(v?-(US03xRW0bbd&kMn%nb*sMz>I7SO4 zOXe)+bjW>~Fj%{}A-;tXHpkj=+gPr*1_otF$>5vVG|qX^PsHJhKB1HOkY6~$s_Jup zZTe|@E)_*NCpZ$76$wInBfpD3)M(u8)WSwUt4r+~OB*Ri9vu0!t14ZilSnxl(lTHi z>7OQsCOe^*KxMYH+J1~k{kUZ7%Zs9jB}lJ40{6-dMNAy>$A&u`AFIgYZJ!_t1R!N; zA3z}&4Ypc1Hn#|M&W0~>d;5XX629~r9{)HD_NIl3qJLuw9)Ddns z<7D2CeI3POM~Z7lm-1&hN|EDbRu+{6h2|&@(17eX*iou0MGKVeUXM_6%A)o`my&bw zA6TZRi@cyM-Dp|f$0wb*;bdbD9@+X!U|J^ea|WYc8O|XHL^Vxuo%5GcnY_MR)#mmV z`@ii4e-gbkm7RjzJU>yIk5eppR7?01VZY~J981vJre5z+D8P3w>f_rtE2dDyLNoS+ zB=eBV)%bu>(r0p5I+j`1NwpT8aS`^Q|2T_dl~SAvt$B4?@h`nMuSU9#aG2u+cyU)> zm_vxt)hXYGk@7M}hz2G-aC~VF~pG>Hojl>7u!&tx>ANa@T zASCAFE&h2S9m=;6vgnbwPGta=c-_~=C_~Y2F7}#yjvMB-2NqnXY;OfVI~=<@&Y2-* zUK^zZSW*!;HW`&2SAp!UKW~H?xenbhR{bc~2=x+jwte!}h3N%$Oz*QeJpO|n8-fkr zp7SAMSpb!?Q&WfSECo*@CE>8liO#`@NeAwTZPpb+easeep{h03VB=F$ zoB|UOP&XD$lFaOjJb~l6mxJq_yi8TYQvcJJ`^y3Y%nfFh+HN;mJX;Mjd?9jV#^o3D9RB8KzoC@9>tNbs z^zakr%du*#QM50=j-B*m-v8RIE+osi=Si=%QH)n3@lr#LHTt&|3BoHV(cdOR_&(@| z?UYAWn$(lzrso<*o9uzYfKF!3o?vqnEa>G@yXJ1GpVXmZ(;pNl&35r@05xj$FS=0d zupgqOeeTTVuQ?rVT9bik4ocK<|BEhXNC=`1Yc#m@8g1~V$UP3_!7(k+lFQYK|4AtP zW4jAg`(Pt>|GVNDy7-vIzdZb7c@d2VlK=cH+pJFC+q$ONc3PQ22;E!zVOwQJ6SQ$k zvVN9Me3Y1j%ETmUVfCN9g5_u-#(K-<1y z^!K+t&?2I_1B%zlPg4~d?|kzjUj6msws-$Z@m5P{vOOGNs#-ihk&~b1P|N1@XJ~P& z|G99Q?Xe*vSJ^adg6->(m&kf`vggd_8OsZWf@k7mc{Q4eBs@EIusmVg^%Wgcp5vkZ z*CW~ahgb*F_z2CT$ev4;0Gi%S`ro}cg&f8C{zKhwLuhbfbkZb{Fmz8yc^8?I&96?Q zp)l9)9!c^2y0+DtRe~uLuCdc9A;OM@U;hG&DUge6Vo?0sMgaRUr8m+}YxrH->Ad9~Ab1s~MwLqIk*pUNYu772`eD(iGyAx=ty1oJ6 zt_&9`V?>6?lzAqUhz5ns5t1o$GLsCYOhrU8B}3*YnL<$%DKexGC4`7FPw^eM=Un%5 zd);$S-@0e5_gm|E_V(=aJOBOP`|Pu~`=wrBN!X3&_N9W-@w3A{yOcu%>;@v$F4OgR zy`7m$zuX+`KK048(D%59h;OSK>GDd6+>wdI+CZ)|cKP3yhrWI6GndUE5X?TpTaX_7 z_PWOzlci8=dG=c$)pV2#GWkQj`Dl(g>ZJX7?yV|*?{^^*WQv|28{FruE=ZB1npc-w zdAMTgZ^`}(?>H@bCAN}crB`RkzrVjm#7}ncB{($ZdDxysb~H?q#IKB3AzmZ>>&W>% z6TeW;Jup0KW=s1-TN>$|c9w6iCa~Q~`kYtdSepLJzhz0)yAiE z#W5)FH)M7F+Ze23D$ttrZs4wKqs@M1?}=WTKMk1*9EkZZb(wkK7yF$iB;RsObUrSK z{e@rqC!_HJR zR(LH85#QbN?7yc)Zn?NXT@Ywg29(Ho}vYQYky`HVbuskv98(nQI9Ti-ghfPAU}x zfjb}AH1~DHk;=QNX!40Qp$#8km&7T zIPPS_ZgkyoM|SzV#M^`5_~?L5J6(42!ON%u%1sS|gmnxWiU8%rALSjR6mRwfB_(l2i&b9ffuap!WWZ|l0 zWYJ3ZhW*UgPD@4i^RG?uT$+3Ky;w`jzUJC?o9wjqu8TUk+tXBO2JTE<^10Nujz7<(uPu7>r_&AkT;AwrR!fRak>e!TRc5!mW)^`{&i<&1v2R$G zE$4H2%YvTWNj{?yQa^g`=spqg_pHYzPUc&Ax?gVltmwWrDwnoh?#RG-E#C-uaX?Cp^t9kA>Ab_=uN>E@e8YJ+N%U>;6@{&(VRhD{!{F?W0?p4uW=!8M ztB}W^J743qVq`~1{bwMt7K^Xz_!no@k$lp6psCfEO}f*p-MZtiS%t!go&@E!cJgY0 zV`s{>aw4x3&w}%c96BqJ#Td3VUI;8jg zt8#-ju zPd3%1PTDK3&R(oo=pOI+Hc{au<;Uh#u)y!pe0#S1-0A$Hmj?0NwEQf5B*~6ljT(?TTW>%-D5?2+jO040<@-j>6SnP6TL8w~Y(L>2d{W%-U%rID9A zZ}0ecFM9iD{XY}!xARETy`&t=cNQYkZ^g*Fc{;ysej2|*X~6(Y=z8Sy#MRs#R4puL z&1%+Xr0IXRb?Ud3-?Zw~-$q2X3XBcWggNbcGVwe$irII_>Jp8>DLn@dlQdqw>*^1s z_bNZybE0$g1QX+4R2MAVr$7ymk;n{-%9IbUkR zks@DC?=}gG%0b!l;2g!n4m)nDFwF!t%Y^Ozx5$gVj0^Uk=AV|1G+s+td^Nm(?iFS% z7Ru1oE|}#wQgF{_fpl^B1#nmX_5y@Lo}bvCpA*S^KaRuIA?jX240=SwK(R!3_7-7G zzE^1Q`$w5nH6~v=b)EdSbk=!$m2~Hxa-C=F2q)Y^XrLQzkge*#c=_Z1iYqf-&i1p3 z)h}`>?}nELWCb*6Hf}HaN&aW{pmgLQ zhTC1#4M%?>i1M2$1kO(0Ssn}g>m+y~>p-Zcs$!#wORz?L`{Nt$_y=RJfl2+J z*9=#hmaVcunrW+q0=r8WkFCwLi*^5yPOK93ks4Pa<%oLRx_igh_MR)VV;R5yI@{Wu zr@_kjbzioHC<^(5q}w}SW8mm5)U4Fb48@!rlcSj@z~|c`6FOl$+r?s9zsO& zS;ahHDwcYwbH=mgcH@89N@;VvV{-Tf%KXEmBgRqjdd2Dwxd*Kioc|V9hL%MK+N;J2 z`^cIf+XP zLCGvJB6{IFPR9@dW&T`@rgPW?ZmDx4Pfv$^JFOUI^g&GMNJsg=yh<$U0 z{^D1QojYC9THe;EW`pmEKDej5>3FcjY<;Mmwd44qs`zcijcF?Ne;c8gDX14e%m}RV z**ktxt)7hk%uZwR=iopn#^grTEeUzbOg;I-ilX$ixh?_3bKp40i8^Q3363-R!Nu+M zJB|1hzoflZn9us_Z66w!pR6&oH!<(py-hKgMzF+SudEoDa=V)gXtq&{sC+r7WS|yN zcUUiLYE}sx29?anI18G^@#F@|zDmCw!xVJ(#N_{&T>3W7*I^zubJ9 zs$;WB*WZ$&j-vu}<~y=7+_w;lIbK)GjZ87h0K4B=KHkIkZxShQVU|)=x_clo_dN}; zu62PKm|g{56BV1x_2sI!8Y#{0HqLSu5B6epkM1ur{M#h1bRN&vgQtDDs2;cT(zplg zk4PD>((?JQXZ#^m6?!L3xo_TO?)nEG0a414FU>55U{EQT*{8N6`V-f@a?p00*Rm-G zI*Iva$7cTa`eq`pZm6iXLdd(^Z3_hLKGtf8s!yz=$ezh|pgXZA|&h@bPYYoNYkac;-z9HD{F6Bn}})Jrd}(O=a>_jq<4 zy{W0ps9UrkBce9bw^A<-^eU)X+)$km#PLb_t^R?EW+9q~e~vAmKk~P`-{jz(FG_L6 z(03b6aL~KJ^FqB-Q)6$z`|qBupQoc0URT9KpuE>8|190(s4J+)<@dN;!B*SqD}ovC z?!9Gx6go6?pF3xyPcQK^@a?w4P{n5thq3|U=1?Yyeu=gs}QbhXfLDZ+1cD5BqVbaxkd$Gz#lXJ&72DlWeE6Zx+U@UR@e=)o_nz`x4@T>OyB@Dw_0JoXPX2!NyVRA@=HMda z>gkI9f7q>@E;yLEvb$SeaItZ6+{Z3Ko~+xL!eSyz_IvxYp&kSNR zb0;@P*R>K5_KCVxBk|D}v$b^$rs;@R2`J8AuyHWE;3Z^Z;YoEfqK+|}?#J1%u)>Z& z*P3eE(_3W|Bds0H>23{OlA@vk8cer3DCOlzp8ARm&RQt#;Q$9yA3;1yLA9Ei7hfCGM|Z{a1Nl5J&XuX-WN^G#M2-*mlBTF}6g zvm=2wF7g#}G289o88@dTc4~81^v;G9!o5mj8J(sAMDe&7Ed3M(LRDy2!asFXo87Mh+z9PmE z&aEd~QsUzYIy)Q5!JaGN8dlG>U%-SClmp?(`R`AKY)4jdT6aEFGd}X(do^7uy(_Tn zB;K%8p%Pafdab7}aU-?wFS`!oh#Ro>@=3mrK$_MrvphHKy_z-Pd^yvD zf)Tcz*6UKcKs&gfI`9@@=;=4}Ct%%F4}_je7U;QvYoi|`@}1||05@xy4}_izUCJLp z^KKJ^$4fb8wFRRVm>sFO@WmuA`PT@YyoYu516djvAt zIcBs{!I4lpfMY**XAT2wMCMmdUj-3CcmUC@hrvj$o&Z6*Y1f522uVwr0>6!j-0m0i zLJvY<87M+aK0lwfL=)+YpQIML+~d*qBH_RExh4!N2$h~#hy(@UZUe9y$bOzod->$D z={HRz#YflPOf!Pzhx-e~*(0t^7W&SF?`(&yYVh(y%N_dYSn3%YXTRSO>NB5Q5y;@y zZF6y*x`_!#x63M}pB;ql=YA*u4bTtnAOyA@hMp;)3lsjwAS5OtYr1xA^z&k$MjG@W zv|b8Ikle!$S_*0}2fMV?>K=8m4DAF3^>J(=+8BhYKQYIGgm4F;^_n0f%Gc}csz833 zwwNsTRhB}0g@uSgD*Ri{8*5V1PhY&kMdRY|LZoI@AX{!&He7D+F+%9e^%Q~pyj7G# zZ%DS7aVM|n?sH7Gun~2HRHJ(`+z;S->tQge0D=(1KL#OD5mVr|5z(7qwnrc$e%zVR ze`TNu@k>-*^f)*xRxEs=?DPj>dDHiMYOAZO5w(=3w*uqM|KE>oxu%u|62jdG;NJ=v z(3K<-65#6}J8#{q*TyIEU%&!XW-_05K_@5P+Y2~am&f`TUV!#rz84WlqIdR@gL7t| zH0copGUqavYtJDPEoXbHCHy3l8AVTlsI)bN@^dF?2m(HIVg)_yoF8 z*1e5?64DP`qRk*4R=cDQ6gCvvVz&_%bua2H;BCVols`QALE{JC<57T zr+nn?M?O8dDCq_BHXsLqI|ZcuhV3|XJz(2m=y3t=hxMjsx->9qqgF%*w9g=VN=CvuDnHA^3XL8#|G9Y#2#V&=E^%|2hO_eN*!A5j!*Wr2)5rpuKZaoY}xz_&v-v7!-Le^9i zxHg6%hIDfWFbqLpgf8U|qs2q1jU{2y&D^fVZ?ehLK7xTL?lsn2klMpTd+<<%t_ZRs z((eg74^ose7)K>|35?%9087zyd%&GW5_i_uHMPCL2g4KbQq=zLG?B26uQo|pejWar?23t|(s(~6FiqNfxp(p{kC))p-2}uBmungU8cU1=s z@#79?|CNCf)aczjQ}(#7apIW9=apIYrBU>3;wvmcr^(BXfPx@AN&i&?*-usI&(A%E zKILpS^;@pJrILo_NB%BTS+Yxvjr4VdY-)aD6kdLIR1kz)^j3Wp8y+Wp{?b$$f!uYz z-&AvCCZCO@#eiglq6xO2Hw8tVpdZ}BG2kAfZZn3Sesh0<{r?z)#DKY`wQFMx;s|5R zg5H&_m+~hWYLsv`-kY-g(yO?}eIeU)KYC7x9=j{6`9iA$62je;t=9w@(cJr?n|U=a zmW0fC2qm^;alu0L?eLE?yWf4)GQL>z`;NG1H(rQzpE3Ei5`9!6(2T8BPd#LgKpr?- zJAXt+hFB!{sA<&l!%EnQ(ha?YKt#A7!1dO{U{vv(0HL3<+3O74`7|)s1pGE4QWj(J z0TJO2LSPvvLi|flE%mdE$LN3aOcaa=Cy%#+!?B0h!?D(HT-6{U+>HR%0~rua&Lso; zgIhe)Or;6N&r05f1&G+X;@3ejhYQJKWc4vpoV|Df+J9&)`nP{>*W(JGF;mwuTe+>h(f@%5m3Q0Q3wbK zcP6w}!kD(`L@g?MqNF~FRqC~$3L zgw%h%9Xg}668@BeVno~N*RR(ms&LrtJq24+ksN~r_<~~}c4tOUdes~{qxG5~Ga5ON zy?0NkCp*h^iq}Kc-}7NH3XmFW8xoXA{(j;6FQ>QS_INS+)f1BNfJ^prH>KhjPZXCT z0_l)8X2E12_UzZkQttt3@;TUy8hYhrpfg%;Jq$)hK#n^4kBr15M1eRWipA(iJAW8- zM(d@Z7>z#rKy&wko5t@G(X;*UCtneRZ(92}v8Sj>WmsyUGg_|+GNa2%H|SsamOVWw zPg%)(FjfQ>Bb(S2hU0y=9}zbYv)!>)rooF5BmLs}ZyW}<<6cY0=Sg|AA&@K?5~tcm z?U%j{#hsbH=&}zsqu-Gslh7Hhw;l$gC18r0ZPS4n?r0pfjE+9Y|CIB+kap+A#S((J zYAaZb1k!)nw=>h-Ts`f6*YFfI1zwEA3Qmm*)z(svNt|bM89maAKn}({f8|G6q3w4z zI*!)N2>xm_WKfj|K8(QD!(>!LfH<&eG{T))OGuakzl~j5E;mmi^zl7d#-9!?-;Bnq zde6$t58Vep&h8HQ4vz0rUSN&yk5(!Yf`V{&>|iyJ{nURdxhc+1mi{)hEZX%7e=jUQ zO}xriX99;#Og&k=xBp2&6JCD4b}#J9<1YMMGjxSw>#e$@2;^CD>nq<)p3(%|bQICn zYBqu$h8Ax5p~DdF%o=Pv3_WeYnh%yuGi%&>WW-E?-$p$%N1S}1>jBF^>5(%@?Mbv{ zu==of>7ruI<6;vqEoqJ|=7>$|{<^o+3dWa^ra zKCoc7-0vCRC^kBcz)MdYyOW$=?c-v<`@rUWkIfem$i+jSTyvK6h1Gr@xcWqiQWv(K z5v_xd!RUiq57>4XdMM8N`?GJV2X_}HDh{mT0#C)c@4Z0q%8pwJ=>FDxPgI(`v72GruKheq?-6$8f9vdraV(5Cn zw!_e)cE;bI=pXfn0eLz0AS~T)tN?oO0n0$?2{Rq{UTNuNTb(jBib;-7t?K}Tj~I6F zk+>$R4xJBJ4`edOQ!s0`{J)o~O?+A(DPKhfDG7lQ?;&V~) zyma$(KBbs}nuDg#J1Y=KK~qgHv)D^ce)h-NkLSF+2b+&|V9r(Oe89HD;1dlb&^tHf z!;7=u5fhaHUXWpjpM8iWT`>GWn3_hHf+D27(pBK_BcC?9`RuB1Pwj&#FaVvujFm(i z`WEwhj}Ll4JY^%H(fEFO}hPd@y-Y8I9u^AYKwhU;ab-a30f-e+$o!pqRe zsZbBTzVhG3CGCu{ifMz;Aqa}lrJxigOtmaErrix{%c=5M?sRSLEj=0>5D>5> zn7K(*apMb)JM>Lgh5Yun%8^mrk>G{IuDS;GhZTe6v{*cLn(p<54w zksL5HJ+vt!+=;X(u#O9R*_qqPRcSC3@#73=U>PVvK9(Kr>9h}T=~l~jrPSnI)QhIt zvSkY?l{8WeOHj*B?!%xU+>a7i4P-w#ip^INH3h~mrqVePo7HZE<;N&TNK$lW?j@m> z>7kWh&mZCCM|qHRKP%(IP70w6qu&DdzzZ^=A)RD`SXOPP<84g_!$&+}cV!dr2kAgR zxbtYR?J)GvTKE$j{MW8bT-sC&xHbl%c@0Hz&<}(lgf8WeARb=r?o?S{w?{iiiAv`W zl!IN_&seOk>=c*gMNklgt_)ofWJ4@2KW>L=aA|(^=@>FhrhN;0%&xaaM6Jq%r@zluXohVF*ep5@G=%&4 zTf5PJZZ?J@pGE?N)4#sf5RnABGD$2&Dn~2J@KA&<<&PrQQ>jWz`e!crHv7Hm%?`*0 z6`jXcq!GR#frlb=MUWK*2AWEpk+~#VZ%8h7nu@F%mLfAN;)I!35j`%3Z^*NcRNuf$ z(Uw{HdP7-14rZ~vUyM%s-a;TFUQrEBm?#aYuO@ZIWNF=pqiwu|!;qJ-6GEjo}trIgnKVO>s@s8w&2APjl8Tgw1@;>ZY z#c7cqc90P6M+vM4GN8)zjPE{q;srHr68qkSY}bYbh)z(ODk&y;tY6iiY)xifmzO=1Kd3FwO11EKGKfP|U*yM3dGu0|LU`m4R)C!AINL zpTKR?t_(LHNm*0iw~>!B%j$FJe84hLe8Nt>xKuUT*;MI9sDJsnzOYFzIH>Qy9@O79 z7&V5@2doD&ABkA8v-Q*afzf+J$pRku*2#|?(@atu=%jte!dHx57>4Xe2Rb%IP5m%!;3oy zmJu}-1+I;N3Ze?!p!Z>GCHyG`MabodW=_-R5`EnizH<$OMBB^3mG>X9SKePLrey#H z;m(BCD}rn&V5uZ?ywAQnoW!=AfXyQiRtS0)`DVJ31!k& zEQ&|cAnXGLqJVitMor|BuJa9dSCJd@KkEu$8@m0CIs-I>JAALV9)_ZB;GP(5+J`|< z1gsGf{mVKb9p`UBcql@bf>P8!r6Bmdsw{upH9z0da4?ev%!J+nz3zt35IY?h`|(hO zt_ZRsEn}qpO{Q~CA7*_Z`t2+hiR-_6R0dp#}0P=fURiCRm>R=Md;SUP&5jxeKpxM7UAy7fUk>y z-^NIk&q8_>dRGRPffA&aJSFVbA~fQ6?T}EiCujV7bXV4j-Id)Fl`sMc;eM3BdLRRu z+#fx9#lJo4f_!W*34a9vEI{9hrKqNTyyt?+8?#fJ?p?$S&`RNC?Y5Hhy%aQeLkun7 zwjz+n;;FeVDt{{~iwQe*j*%<_HXsU@STrEqnGo1^7<{6EuFPgrJ`jSCDDV*(aBTz> zkbM6I2nhF}eyxN*rJx8U%-P=UZSF3r$Y;&IcD&+gcqTZr_qk}`x&(G(*B@AmV@ zAdq+N^E&Oz%j>u)e(88vq*M!RM*gjnsn8j%w;l$gB_Jg--83b_-I+;2ZIQtg8KQ>FQO(UU{^c6(Wu9sc_&n#2Knz3cm}wZ}n1xH~hj9>{=J zg9mCp-13R1I8ga4n35abhxG~d4Ix$^&zWZxPO2QAQk}sIkfp)zTXttIvJwT(*ku1& z0rvGsTKKqiF;|G1;%jA5QtjF1*N3JbW zJ!wn#(Rrql(h*qU$F%a=TmT8eGn{}o;Vb7`GQ9L~=lZVF(^qek%+g(&s&DZ?AWM5+ zE$f_idi~K~KjyHkoH1-Y=V;FjfZ+#1D)irN#?TXO=}&O#AN2rh?f6U)dp@p;C;s6BYwu8V9Ta_AY{_Az%QcKp6I+!Y5_^xZY>q=E_`)P2wv>6+A3Z(A)>I_H-LMDx_2{Ix%;gG=sy5}{& zCokW;_lVj87NSpaTRRtn?Bv}g;yAAj9h1Tfk!jek(>(VbKOLv8t`TEOEFIH}H;rY14P~5EzL|0c)nQmwKEex>5$l zAqb4nrTpo~Ix;@+j!XQWTk(JEB9*^7p9>Di_F;d^s-BY9f(IjXO^_Mw2;~CeRfmaRqu7Nd+ELKLy$>JPi_&6oxIkDb7a(O`Mn_uJZ>nG*qCy}aRzs|X~ApXQIe zxfI{Xf{zWuvwX*3GYZHIQwF;-+-bnto&IyTF&K>k%NDn78jWzLLcmVvz;9zLDhb*m z4?PtE%RmvLaMGn~<$t4XkofdV$}uxrMR2JHqp0G>A^o&FzcMHYcN>7!K=xxW7nR_C zZik*{{h@aOCuG@S`B9fY8yb69>ISF49$z;47Dl}M?60mS_2wxvxx_F-`MmHZAp-f+ z?%`hGjcrcS>k9vTY|G;mfo|DIvObmVwgaV$$zo^(}R#GdwqbQD-z_7X0q;@?PwIO!81zD0DtxJ&^erYurySF!vp@ zcb7GHQGJjHi_g*l=TY_)Wtz%9_GlL34EzT2OQXcsXjWTU74M%7w)jG&1q|d>ZY~k| zK02o5!ttakt;`WNpRECE=&ufO^8woqgO4_lN$YOP2X`MPCIT#)2Cj{Oj6JR-fq-!L zVe6%!2$_eQoT`{ zBHU33ECWS||A`hazxd#Z#Tw%UJB_&2CG?gaE7-3#%$EqCfrM~30$2}ZKpzRNZ6WO4 zu_91l!1?3J(+OCB#ys*SD@t6KEfBn%nFB$m@d6a2GnFI0BUJx*#AQkyezhM6 z*+nJ09a7#eZJyFa9)b<1ojj@%1cW;Zfo+GuX9`&4;rFjmNCJ2@23#8f-Lo%v00M## zh0vv-2#tO{cITF@dw}fW@qzXO`j*=z!4K#?u*dX+VofhWLJ*=5x+chojA#qRNX27r zYu)b==qpVQfQ9Hfp$f;cxW%?4PkOO7EsZw35KWw*{!#20$1cbbX%~Bbgb0D$#%KLJ zX^5Ww8*NBiPrpbHY(&XFJZAfotc=psTgo=WK==F zstV@P5E!9LK{2u;|8hN7I$6?1S6{{Z>J1fBFq{4az~Atiz9_${4i81>iXbaGb;80S z)$zN;%nN@m0(FK(Sc*>PJR4n7aZfODX{ED>=I6yr(USuA++QoiRride?rmA2IEX+p z4-4|9FmK68s!^&bh`K5QTakuCJ^I)U2#V0HhoPvVnE)~OkBUTrPpsFjjf#j~e{Y2z zjMhs*DKe5?>a=ZfvM#Z>G7&$RU0z=f4(m%qu?A%tlMC|D8Lig@nNil|!#&Q5InN1n zS_Hb)y5LPQs?^P9`jE7bUi1E5>^0nY7%xV?>4wddY;XM2o_j5+H&eSIkdlYT$u8zk zE~Y$<^dhvU&4kV9s;=JJ5nQOVqi{1?Z#@h~-7g6c-J7OFxPy_XB(P=(xHdAH_>^Ca z2P1STC`RVT79LJCC(++XYB|7g+JbiA9JqDx2zFZ3|AEUC4@T&kAT#pfP4Zl!oTR$1 zvfsaOUl;;bG}?jK>J~UPyTI{YoQXfFI2A8Oj1<1f$3B&i>ns~z()149i$LCZbiU|~ z{=59cA{w$Y4TR@mGfM60L7$-jff2g(Fc>8OeOlYUMk8QdttfD9WR$Y_`Z)AHZLNeq zrJxvT&-N6SbLHffXwGyDhU<~u0~at31F!Zr^l1dbvWa*oLRSP?(ca5F<5PQtK1T9a z64aIE!~3!|%5pWn?@~_ST@5Q&?m6)zcqw8|&^;m1RNomL)?Ax-m-P{_FGHHf)B)XX zY(|T|=387v9>P`>N%sc*x(tFMbn9U#8nyEGcl^f$TXbzxS=6qw|NoplBkCt3cmyMK zDJVtV*!$z_l?Lg*m$=;K2x83W|}7 z_VxvzXFoFI{U*y#P<-C+iQWJ<0ecBrN-13v9?=M06J$ma)m>NbTykpQws0A-(Yx^( z79$atZi~nkRyCq$`_dmMhc4j7D52pVPpyLNNa)!HlhIx1+TN`)} z-J3Rn&1m}S1bWI5LNr3R9tNZCHw1`;f9=y`foKF=8yQtwCoAF6r=d&v)2E3S$3Cf< z5$g|M-4arN)NwbsoccWWc2!>u7%lNogsup(BI97>_^_&#w zCOx-JZ^CGc`7gB&yc8W*&Rv|8XXZU)cFuf;`6(F!S?#>CuOLe}PoS}rMKgl=8f--u zB$;>P5sc8ShoMLg*aP<3rU^FgIY(fdec-pzkSF)d888&_1EYY=PmTr4KnarT%A1KP zVkg(qi==%ZA>5A=SPx`C&fm6G3}kpIQ{P{luISU;3JVaa z=7=gQr>oVu%eVL&Ttvq40+jOmeFN{c*SaT8RAp${R3Q<_GUk~$>H6hTH$sgGT04IM z*nn!^miXQa>zA7NQ4D=`+8T1J@}Q^vnT!Jrymqt z-UAzv!6|+`_QPLqJq$((z--afzxHLKz)_pPwULqf>pO~gFhZAtVq|9|J>vV~{#f+q z=lw%Yr4nClz+t`;_SCg&pqmgFiJ)ZG;NJ_Gk@|}rSGk%Ux_-IF(q!>pu!hA5kv%5f zIlAQ$?Ps;$3NM}SN&)vpg#Bbs6ue512!rhrkNtgn^jfgruS8YH2ZILYF?{ z8i54u$%MvBdJfdo=;hR;*o1Uv5wsv7+>HR%0~wI@)*HuT^C~5Fq*(^l%Mez;0`$|# zxGSdM8C4Zon^sOg6@DAW)Rhn1dv|tJFELhUi%80T;04+;?f29loiSFT%!YxfEem9@ zH*8v(UWub|K6fY6ztOF+$sRD4$+7|mYewNTRCXf(>D1@#FGNQ%oulk`5_s!?+rp+Q1?2f`h zHR%0~wH#|EnL}3eNer^!BzlmomaT@weV7 zgYk`DK9S!2zQ!Y*RjYUb>ewDwyFIRaN!KH;Im&DaI34JX+GrkgJH0rauHiOU-ZU!M zQD~@SQ5pnANGhW{hC@nbUt7`konjw*VpZ6K2{*r{(|O;nHx8(_+x$2M!SMvH^y;i z%2Hp)2mD54PDeVv1$e98W+jlUrSG<$0KSkW&`|lr<-Je;NNcH3PU;!hd=diYuR`Yo zwjBl^3TuD=^PBR)-HC|-OC;B>jexR=M7=>kxI3}+Qc#4#BKf~`ezJdP9s7#>ZA`a$ z0eTgPFm@hvo_M+fB!s&YTdxT+qKgPpf_sJ|=_Df_wf9GQNMRx3^inyd?Bge|&wQcx z;@+ZaykgK9z9K`Z+-xsoyuhQvt?7;k?%S}z5~$Xvl~rul}2+fDbYw}YB? zFC7OzaJ?6UwfWhlREIL?iq20b5)}iM`vccTMd6f&^U#CQdMPMH{LQ{)(>?uBE_2p%0_9Kn?-zmT z(0A-~h$EW_y{Zv+Fj}t(G9#0Y4tfbgsnPoaCtNwT3g9h_wKQ$Dr8!Zx?Hft#DfR=> z_zlbKN8}vrnzKEl&qy`x8gLRrAQ2Cn?0fj03sj3g&)iq%)(M-D%JrZeut&okjMiHZ zgHf)zzyH2X8R5>TfsKRyvilHWi$8h|2tUrj9asj6P$PX`nXZIib9%oMePG5z5+j-SB3~u~xDV z`RaJZp(CD8RV+^*dKlJc7^-Jqw*!I9>$rb8E%TR}^yPgqF_F!LumR2Yk>WFiKaPok|R2W zn+h*P`l^pexmr*1e!lo=QQ-GiIUovkJ^vcs9% z+Zc?x8we0`|2Ro4CI;;J0bCn9v&!x9tk7S$u9t#hUnX#4cOU+cvBLAbklo3y4cyKpd}>TGjm8&6}F-ycj@uiIcB}}FckR! z$J}lIXGOre?Z50`C110B26`}BE8$NmC`F@Br#t19{6;D-9X7kpzY<^#Cf0qT*uf|$ z;8rU>jKG>8Gn%Rk__m!O;-M~8q;(r18@#n>mSVyQjxA62bXN*JRd_&k6fZ_ZLCCn5 zpJ#XU=N3A1Wee5-TbqkZC(|TE?02^$57;tw2L2BDdR3A6V2_478(6%rbVlnn zL1x6ibXxqzwh!kk(?b33im>Uzibgc=&6IPCSUYpx9DQ*ZaYG(2MzJ(p)ag3kSFe$k z9go%D25g3wNY41vjfJYd>4D&*tZ?B9*wILHp9CdzM(eGI!N>^s#LhLjYhZ!Xws6Vz;7d>I^o+~ zU}D6NbASexfg+T!RC3DR=U|D}lh}d4`;t>{zz(e@0V^%yQ#tS&6ofk>gVjLxKTZ#Du{Iv{ueVQh8qwLLghI z@}Dg7&WxyAeJ6O|l6C|(qDh2WB!~$21GwIL7>t&HzKrc3`!Z2!8DL#C_E-1<2ODld zXS80*pFmW2*32QXontqt)JR+Qo4GcSkr^#Eqg@|uc0p&fUK37o(r|#1>QyDD`zsSV>s7H3G3H{EpSR=bzrG zoY@(<`&?G#JJ^ht7zDMTGg@yw3`X6+ir9pIWh4fCat2%*qtVubV*X%n2I0*Ix)c$kG)l@bfIO-oq%We8IVnQDLOfFTZ+okFy%(~L6xsxf`P=Ecubj!{hE%u!Xldh z!%5D|uodxLEJyDrgF6_l-RM6z8$(epFz0yeU;8vMF<^j>{r#)dp)ZEegV9 znn)P_n-9fhay96|Xub6?7*znfqs?s^jBqE`z^-e+Z(}G*|JgutvbE) zr{6t?C8=lgMncwaHNcaDUN~dFJJV-TN1x}4JF#A`2{IyswvScyJXK8ZLb^sr$c`4m zLbNyaWm5Fn8DjUHrKbH2=eY0+LWB`_+gityI=fz9V!NoGG6an7>jP^)D^|GOvnDVK z2#k`1-Io!3OGUpu!~FoRw;l!~Ibd}EYSYm@FU|lW27ElSc5P(zLD#tnI-~VcP>i~N zeh?q%oeQ0Gs922utz>!#{OsB=;v`WFEJT!# zmwj4R*c6x3Ws@a_G6wMqLg)Q|(=n4Vk=-kFO~2j~3+zioEWTj#V3w_tgirFuPyJCZ z*g=S#WCD*#qV@ZP!AK2QN+j{GjAVh;0l>A9k@}-_T09t`OF=QRYxkU4-u-#;k=B;& z`Ax)3ndotu2==(l_49j8JQ$&Cg3O5gC(D&p`7;L%KSn>U*ZQ>pi;=Mw$=1OZ)t&SK zrE^sekJI7BXw`%}mAp;=Wl|$qq)?3iJOar*Zu`idkp9qzore~Te%Ot{X5^dRiO+V! z=+?tvBnP~Hy|!szhC3w^1rGXLyEZb)phlSB!3bRnic!MrmG@&OKc)t{4owNFNk83& zUX^wnJ0%+9ndirY5xOSGjQD1bYL0(vHgZBVcD$3KE{Da)cK?l*(Xlc~ZbWkIRyW;1 zycp@K*moseP5khKEbGn*ZEjHnGVsi*Lo4adopR!%yDJMha$qw`bm4jhc4@fNf%UtM z!Ke|KB|84E(Fpj4L=?C-GP+sP>Wc>>bSWrC{2mfdqv97ue|0ga{TlXos|Kb;W;EE* z=FDEYSOYLSTe$Jq$((z?60L zzs?d#n*OCr+o^De6GX(1bC3rVfGPt;$fsjfV(D2@SNd@2S33pS*a2{GrYw(5X!$#j z8Au3sBY^cl22@qoM^U(|(!@w_I)ga%CA?#VY&(v!gu5lejAWya5$q%|JY^$-iONO}rB_-@QWJbJ=8;nD^Q){s8F!+R7`}=$T zYZQ_KUYr5f#wavBmvaXM1mTSbx)cNrT#QVgyeSn9)pY`wSkq&t)`!#{ zm4k#Jq}J$~AR|)olI0sG9zM9gz2;5QJ$HIoQK+;{|F=oem!><#fhtooJ;(7vBzUW~ zMMBf#g}|&^b@#99HxNkCT@mJt?aORslP4NOx2uH0MwIjPIUY;W*6#G5yN$ufsD=Q+ z|Bq2fTG|x&ZAA22zL|$%RmvDO0qARcB?IYLE1SOmuG!6Iv=0_R2e8fVIlNZ3k!nhdnDdkes2(d#tlwXU*^N^#Pm+S%!1Ab ztOqh5A(k*!`q(t7mL(Tiopin`SbXgEpR`0&>g3NShds8D&||`jPv)U7EY_z&DSsS! zD}9<2Zuau;@u@vz*vy|?7(9=!DIKC~U_;l1CNi7M-oy^{P+ z1k!4V<$erH$d_+_Tpj70f(2*a&OF-%LS@rma#=jW!K<(Ef^+pLE2Zab93N|l*y_{Lcsm4g zo;BjI2Mv#5h_=b%gIiqWWOz6rx zChM75q>$s2nfZ+CZ+^of)HR{NI+*UacWWBwPH{Ec?|2cCmQQ%!*-OV(;A8vDFQ3XE zfjs*B(`q97;n(-=?P}-_gx`QoXm4oLE-?1sP8!$l^q;$p!6*S(H&L`{<_v+63@{A@ zTpJlt4@=>(XTw?ve@a0yk~9AK>y=CCx7v7$na_>|iJ!W`?70H_GwC6p-~ccRL12Wg z2{NO)qi3cNe>tQhJ1{hUr$_RG`EiG;e{5B%e z`BfAP#v*>45e+N@MW{_z@a?4+9MN-L4tWDPKf<<4|9AMy<-dI6%B@rt1`@*kEP?ev z1{Ba!M<&Oy(9Y6Jzd$+H{RI}F)|lYD%I^biT}e@y6tz_N9oq5om2-QwM(=^-mD9xys3b5l)sjWTw&<@;50N8dIe5QcTOk>l|3^yOtT0<;8 z3CE65LFWUOf#PH2^?SfJ%xUoWC*uI4Idye0@SFt_QS3ZumLv89bUt7`koo9rjj`Vo z$Zc6b{k(;hG*<)`AA7Qk94FH5zW8pF7qqP>;u~J^r>7a+;>{})JuY7=GgRrAd!;HWyJox-q z28xgQZ>#S=Dr|MBUtM#t$@aVJgN{Bj*yDHaGv^-S!RNnvAoB^^D!p)WmEt!i=kJHR za^x?;;xjCzVyZyL_i44k)nO|KZz*1Ubh3gjyl`?JcKTFv^$p$AVBnCBaP3@^prG7^ zS@Ri|)H{b^N1yOw<^VkS{MUAve1L6voj2tJAqNrx#x}sUvHNN@zRn8*f{;q1OFJC~yhcWI%qvhVamNyb#fp1gjV5sfX+D(d~F|8nEM#&c8Ae1quePjf|RS4&26r5xNu0$QbW#&m$d(u>jXq#m|vn z^W69{&Tl67&QW~#4c{i2zM?7jP!utMnu(Pw$mUYew?W^SO$uaiyTu? z=x6J;inrplQerHp2GNA9un9SZ+2b=A{omgT84!nik&|8-OXkf*7HKvG(mq&#bRDAJ zCa^`XocujQLnTj*-&WL?#ZgTJS$-KgjH^*xr=)9vb!H*=4c~Fe`RX~!9~;lSl>{45 zmO>=@!+PAg5O|L;_=p3!kocy2aCc^65)#0QX>35+e)>!xAl#kVdMPME35E|C*uQ)% z`5M_A@taMEO$7Z$RT!I4YR9`i=uv3BCdh~gGkhj;N;CUy3PuN*3+H%XA$q>>X5ZwO zs-9gRUpE;2Vt$QRXND;3$vYV{^=q->xX1M5{3QgkLyaepN?KGbsWt zBGq6u}ISh*t!dO(C>i|!FY!p(aSNZadWq;nnu-@~QJ&a3k}!9Q1e@vKe} z9*oefhrvh=I9*}rUl%J%Nds^Eu^H8*wdsH_$8blZwG#f6f?^crowstY!%vCv+H*nW zdx88-;Onu%M6AX4NBv9ALRYk25oAT*chj5yii*+g_})k_a|6i;OVLhxr|Yu;WLH`e zFTV{KdDwtgFdC8Pve;Uz4b5@83A7bHU6U_G2m0#wQFNAk}zp0gsy146qKTV$}shZeKtOpABJQsyQj&1 zfQruF#!|FRm@WjmqV5-c2 z!Ansv!@=zxBs59XA6IuTKSBs2kh%P-qzRc{#B9GxA2~?n7Yj3oTLLQJh)8 zu@D=+YTX-CLj|4DdQFfSrOj2{G2yE4ZKg{R;;e-Ab2sfkk*27?w zd&b|N{9hAn;BZEFMj~CklJL@)ioW@9TiYh>6v*{(%(b{UXt15dw;{xA2Na zQl?y)5f^V0th&;$sjiZrL?BD_`sC|x{cKVfbt$*0Eu(-Pjh=WeqtAQ6&1k*#Fc?(; z$4h_MG{MF_%$Jcc1%4Z2kw=Yq1lXhT;~eILWuOSjHKu-fnKI;e+(cuHLVxUX64;+n z0mFd}fyl{#`4K1xcN>7!K=u=L^yQOJCch`Wnr#aZKkMLq89mf{GWV0H#GV_>fg#i$ zH!|_^W8f@k^m9=5RK|^+rGY=LN+Xcloe2iVOhRXc?IdOPt#Vnw_7f`h41Ibc?omG2 zb{KjnfP-GB{xJxN0_PB|T^s!zVAwGSJqWFrf)ZqY=F=%g3E#nrZjH~5UbCwNYT)op zzznM|>wRB@$59LGH9*}tRTew(uuWsn~GfUq5LhYJV_RK zAv)Lnj;6v_6k$l}P_`$I2$(8T%Ty=TCM@$kz*!$}E#U?qk)WprK3m10?=}V_BjB-C z`^O+8DQyb;HX>ROGDU9`gF6U;WuOR!b^ZML`h!o&;YNW=S2IY}d2PY#5&3w88>3!I~JWZq}V8K&WGr=4y`L?&izO1BSO; zztHU*bK?tt#j6P{U6k@U&Z^7gE%S+9O83Dc{G59K5kvHW(zv4#c#kmn6j}HasBIdB zaQ9(AGX0l)I+~(K4~#*$`G94h`1rhBoLy|OJMxbC3tTf1QIQ9kf zP7JIEG9QBct-H7;9Bcg|_>Ff_&n$;y9uaq!y6j4jKA?f;Q>-tkoZe;jvHx-AuLqex|B?_Eifgvczi zcVy3!B85~WrAXPUkj#>Z5E7!utb~y4mEUo7j{Cd!`*4OH_w#u4*ZX~Z&hxz9@6URF z#OW~yv4>QrQ5^mO`;G`7S|B29Tp~U~{CdETRDCWb1-wR{0)aLN_*AS)|N83rZv~}^ zD2f}2`z~z+Q=qfJR2E4Jv>~4+3OPNX8^rX;vy8HOGrdh>*z5K@W?S_dO!Rb@2I&Sd z7O&NKGkIBN$C(eP=t+36{(Hx3KbI?K*Wsj|TBhRgLF@fYpBD#SrCHCNE>d1%hMAr- ztkKor%J&G2xh4Hg@9cj``h#@*MF{m^0_6)`P!LzpNyNj{i zyD9vot=-CH@1EH!a`du(B0i`fN-p_3Zp(Vo;l4{A&*02=U=Q_#6VuMar6 z3aa2RL-hKcHj25$h2A5=$i0XHXR~Muga9Kxpu!N)NM&flu=95m7(rGD7}+0dI&ZFb zwQTVHgb7o?&QNXe{7;c8c^WjmH$xT$Mvy1O80lwA^B4=waLhgvQT1s{5yYfL+yC5- z9xjOpIg@m{*oE%qD^wT-Tz**j-sGfa+s`{YqUHF%;PB0XF5&hKRioAKW{ynGyq>{~ zk>J;JBA^z*4`t`x^!(c<#;BYEr?kY$MLu9Vz+5B6NbYmSEfg3*RtOmJ|EWE#Ry#W} zTUVM@XA{AC6spQlLB8xNzalsh1xAo3#2D?69?IVI{daS`ltmfMafb((Fq)}Q&9vmZ zQ*o4Cmtx&x6?7*$D8)H<4yN^P6+0C^bKsNNcN{)uF!pS#o>!Q25 zenx>2)O$o2b-TM*B*H6%lfVH_o|*Bm?_%7 zP6$O&hK1fErU)o^CcG#`@Z&NOQ9a;CDn+x!6+@sDaZpfD80kCca~m0(=sQ|Da9cYN zuI{wew{x)P*0-{nGeEFGASk?MvOp$8u48#u+?st`oPS;X0H#H^$b~4nQMCgUB6vjx z{U8SD9rrGgdykby2d9K-U7Ia5F#)vwsLxhj?oGwz)LsM1wO3A}0!T=Vj#+X04(IRz zn&Nd0LO|KY6~a8YfE;&T0k(a?-ulARm;qX5{2N-fhtI6Rz9YiNA1DoTbcvZYFJO1B zkpjeb=;1>UAb1g4uriNOZjwS?Ptm@l%>}V$)qgg8g{}g2C%>5^@@y*)h!A{cz2FHk zL{1h?->-R3+K>5+bkLUN;`S@_g*bQ>8{jcP{k_)M%iY<-eH zrDPD;lb^gS{zTo$Wz=3hnl~J3%Ed54WNpy30(mT3=shBgnt=6I_eE8R6;?rbd4WPz zKqJLylSV@$@>sTDWj>buz9BR{c_AqHZQUx*1oql+u-;h|dErGGE}Bjh6hV)Rn4+ke zSAjGdhWk&m+5WEV@W7T>cU-@&*uTOFxQm5apLq?XCn_o8b$e;6d#OW@u4+dd^Yy=t zIDGG8wT-qq3>uAHRHCax_=7Q1RQaVGMPj}1nusXkF!Ay-SgIHa0Y?jgMoN+EVFMJ2 z^_+!yD+G${O|uG~omU7m99mlx`Q0w^6v`Hmb=a$y5t3c7(t#8V?=Fq=5Wo$ zD=&O&zH(j`QDnp7FY4j3N^mU%ZYHms zx}Di{S(Wocv>^Q(rybbGXZ+gbVJO51>OCTibb#g7iX{&7d4W66=Nc(S%Xt6#powXq z6#_;X9)HRvor^EY+OyllT+ddYfwskdB~Od)Z0U_ap+?a2BF1Rymhj~-K9nboTJoFy zEg1G=!idVuHF&0X7mF&6Do*^TdlxE7LbY1rE^OlkXv_WUrz^ff3{hF-AJLiO_#c`6<~m1gMg5GhWk{- zQ{nys4z2Y3xRLJZHgFpn9w<>lGRE(6j)wNcA{e0k^Me>51(*CC;ihWpyRQdx#Ppfn zz@$T(-=}-q^P^vBr;!dHd#m4>=R25E-VWohF zdy@WN9?SswJn@8zz#$^F3wB`rm@MS(+rVoO@#Tp-r zUOh@}jy<&wbFM{G0v75$B8-}V-5)wjR3TvL>p#}d!q?YFfDpl}5NLyd5N-3&*?}g8 zp}2=xj&_X`=5f&Zj(GCr`^ePy55PbMuR@?7!~m^{(Kn}-h<5*U1Rv_n|GXO$KuQzQ zR6k>+?!H~-pImPccmS0OIjtNRD}8(6WVj;b1sRjAkvKeK0K?T9SrHMo9k_#6JFF}* zs}SXeYtT_ecohQsjtC!B;FI3GR22fM@$mwURE0KDAG!nrgg}KLEAzRK2h))ceQ#EG z#c@qs%W5dl2R9J;29l@L$AkncL4**D%pgyQA)1&Kf1CEfwz-&xGx%!{KYk@2ao8d;L4JQqmW_Zc1s) z5cNb`jG!2qLA^(WQ4?@0Io(oIYGGh#4m47XR^UD9&|m~wAz(C=n0;;s-{8kqN}5;v z{loXpfpyS)*O6m%eK!M|W7E*{BE~4HA;0jKb74A`K}3p7&oVK{VD3PcA%5QD4`@Tq-vaptb%uw!5ghpOSA`_3My z4E{=TeA2&jGK2Ua2x5>Y#0d4Z22GFfT3o(m@hI6ZbRq^5LaDRgWr8BtGt!^49G&=~ z7mo^|A)3P5X`^cgd#T%RUrcW|#o@>D+aE9<));2wU7orO*R=~XLOauR(bWEcdXEUB zCSX7>zNq>jSnJ^72a1Z2Z)SXT^so{Nj36rnjA%EB%nm=RJ99^{a($$dR^#&&dB01S zE>Zf?eI#Epd)Dz0)ukNZ^CHIR+pplv`q+A=J32!b)6Ha`V8SSW%LbFIL#OE0%0)}A zH=IyJg^@caUA9&8@uHVQD-twBQr&TQflWN-59ujgjrU#>ru}kH95Y5M?sB3jqX)ew zB8(J(+8+*!suBFW5HIjn$RV;cHrkh3ATUWke z9eXk=ex(0WJegg=wl3tw2i;7WwQ)E+bM&OuNozkB^Mki`J1u`QfSI4K^urgxBnUnS z0Q-)Jo=#wzPIsyF%vGczi)1%ad{saRLO{tZyz*ELok>K1&%eGS#s|3O z?%bmIAc$eYz$_imNK>KhKW)oFfDoiYkQD+#ohO^}KlGYN_#2m13>6f__zV84<5|Q` zzS3HLWjhy$5Q0<)@`M}z3 zr9n}Cqc@wIM=xp|5@@_B;=U1wf22merKdjaXSmVTX6rk)xtJk({y@$Q)F1@<4fP%o zMmj(#*rcVVLO`8#UZ9a;WcWy&5(P$(6#_=$<#+h|s=gf3kNln+v$DZL!W_KP#fN-6 zpKEExjshdd6Jm^L=z7-mpRV3;m?=8HR5Cst6Gr*5v+0|z9?RQuvL?Y@q)rSKM#sKI zzh=Ig#K~S~d5c}RB^ZZS+8d&OWT!4mA=Sg1ryg_*W5!4?Y19SJ(KUs__Mf!+9# zwGIbxzkD_W=Q`52&kQW@PY}wJs)!%FLDP$wpSL;nbRa+Qu?*}xB6@~^Vg1fUV;KT^ z`1FLySLt`eAB1Y-!>2-k0fG$zJsdi0)>-4JDK9ULPBZ8yq>q;Y;L4zI%bdL>j6K~ z2-d|c-2+B2czi$`1bmX7=VbgUe){A%t+CW$C*B9In!yd!*U00SQR{;`az@ z&*1Rf*AkZbZ#QxAGe|iS%ir@7Gd?C`wWi4N0sD>!9|d3o_2DJr!wXzPgOQ$oiDQ|XY~~-kKbb?V;I3C3aD^jD?4s|sfU+NW@d4c+rf25C%9svm zSvFtZ-*)~DPflVIpEvSqvze0TH_PGqn1d>lAEKhiI$5rOkw)2XowpJ5aAfsiplTjn z(eDh}hjE5Xk?|r&pYUR)=da0Q6x;8?z9XWC*1(H`X^Hgk^XLITQt{cPaKs6@_<%MD z^bB!V>y17aaQZxyrCKbZ#Bc_>%+8Knd^CQ#^dT1?&<|pKybDVB+T{va4>dVQovnS_ zgb5$dho|IQuX}FhmT=2uG0$a3g^$~@tF;cM?v=|o-}ZQE-#>`Mf4DF9I^c}>?(MYc z$;^dQI+*cseZ-Aoa}d~fMEC>)1z<%N#fKGk{*Q0&v_8;C0rEaCfTGOVf|dF7r@nnE zXLfy2>SL!>BMAymrJ!YbadLzjGL2|Kgy4tq3!V@|^ojdr(}V79zzxcT2(R_d@RxVU!1qXq*=v#=~dOLOgoFj})S14OdnnS0T^_0ioJ)d!>=L zv8%#m8>$1eMvqLN0r!_Rl5Y-buvlFNA_U(FpdZ8l^=tRhY&|Aq5TuL4=XNWtS$pMhV1!_0qT5f? zSj+$&5ATHLf#3&VVBZnplK~9C=oVEWczpQz^nf2JJ`D}^50R%ppbY{(8Ezl{%1@j% zYwOA5O>`}f;09MR4qhTF*O^+!+Ju}Q&<$dGJa!Ii+DISCGoRs@`qtRnfr%c`xRxPZ zpR!^4`VA|*IluX%qNjYrsPad%onEi04rC{WcJTxEl6h#Ros!5Mi#bzYHm>RAftj8Q zC;u!%o2v@g+P$Kg-B`3d#!>2IxPPQIi{Eo_0N>@?&FF+}yJOq5>Rmt}u?Vc~PeNFo&zL`O}z%vYu9HK&K&=L@fS zqKpb6>hDZh_REGg^D8&A+|dqRfy0Zw-=}c*W6D*V2fM|r?uOmNEJ7^j*djrQ;6K2* zH$DHhi7;{pP99b-DnjtFj8_EM)B!Y7jCg(Mp&MP`W7&e0`E2OEa{8;>ERCQi#U3%^ zUBLt3gpj^CIYwu{G~ED|2tJlActVVk!|>OaQn8N*Pu`XLk+)WC0A>UtQD-hZskJ8ZkU9MmEX*r@@)0Kos2m7#!*t7_empNci7z1wAMzs$?du`i=)` z4_2X4ArDJMc9j)(3tL%ChD~$teZ}Eh{SLGG-8=sK&Yzfmo^VI(cT1Hh{e|ki!he7Z zZ<`1s9iRwi@1iP%Aeiw3n>&C;iqW5SatFbj8UaR-6#_< zA(&O00lS(=vTDZ%%e_%h1UW)XQHlJD@YlBsy4UM#NnObQCXR`s5XBAaKmHNh_%oIA zPuw?FZB!J|8|B#eCHCwOS@xXuN6uRp9NtE#-X?kd<-TO9HpV0L@l2Sd$l|8hVHB|p z>OCTgEPRV%V)>ZtlMN>{9_v{W{en%_kBWv5!8D`7>NTl z7v+{JMxuZi0gV)+b4Jv$XfOh;5HPZLy8r0Av+;eJCU40}U0vZJaDb*SO^%WJc$5PQ zHG-ZOF-DEC`?y@h@dY~hqgvb6f5Ud6%SK%2YN<&_7Im5$zB9cmyisAqdb=*0-|_O` z>+0clpNhN`;N)0Q^E>4dKGpYOn!73b^$uai$o_xx*h-p(x#fTqJ zBcPFDEAy$5hxYgM;^!P+Cc}3#XNmi71NRHb{Kx(EcTf08puh<7gczfe z(5SMjCuu_Y?6*D=6}^&)DX6V|X?L70UGLAQYw*gcu`5*OlMv z4BhCjUNrB~%1#Nwgi(Q=qK`LkOBP3tN^_X!g$Ps_6+H0%b8~7;y)H?!cl~G;P)Imb zt%j|@@H2nLbikUH0lV{iA=ULglSStRxqLL=xfp2)DR~SSH{#yc{6)`}Ul0#NZ z^$KqZ3}n3DZ4nAmIgn$AeuL7;V%jv;*j}|NoxaJmP`;K1gSOT2{A%uztJuIqqS{$?$b|Pu#=d zGlEKJlva7TTBMI(uRs398#6=&tL#^U5W#5&Jrmn$7^ zfn$95)AaKe2>6Kac<1OXIuc!dN+a%1%%OZvaOeGNKrczg_xlwnp{g+OAJM!M#N?du z2oABZ;O0F0Znh#Esce1sOIh7~sM@CpLO9e_qE zKHjpFPf&;tWQ9Oa?`-Dpp!7nEOHU2NvqX~>?n2Ac(kFe4=2V|5Kg2!Z%Oy+?#mII#81e$f;N9wR;;;BHKykz!Qc zNmUC95PS?kFN5*+^2gGv!0(~`Hi zGu#}9&i%!HMOmZ|wJ&mv7J82eqh13qFRrD=G=5;=3}~bnt*U2XM}rY)g@930b9%FZ zckk`nK2MW2?Ng8Jfnu5yIYv}tXQ3VW2oh@Oc@bkoIXND(Ta3-(0CON!M#mFdOfl^W zQ%IyxP}cDPzJ!D=RxeJW!pNJxkAG@h#_q(|9xI-wn@wen)k zm<%>~i?P}`W=SxneIP&5dWX*w+EI;wBFGV9ikLV3X|7RYX3Dag_PB6JuMHDL^i%7U zc1&4&+a4dLtFyl(kBXx01$1$joX=|JHI7DUns|=k@IBV%sTW3yX^hps8NUc@YQs#? z^!MMiD8vZrJtB&Ffop#mmr4;YkYEFilp?AExds$s1X&?aR1uc$F&lYr;Ax+GMvunZ zTTQn?jFic5j#idvL=)2>Plz$<-n>icOYD{GZ#$|>pG7fX+e_4-`2DKsNJ;souUpu{ z)j0Z4VWb^>Woz}aCuX^Mt2Yds`l64+JH9C$o$4Nv_T04JJ=pDvI%bTbI7Xq16A>iX zQ1208!~tMLvqX&efJv9RMv76*OD7cTuM1WP81eUrMmbDO^tqnQi2JN3nb=(e9&ybg zzdF(39W$zHroiV#jL{Lt!}^Y|qIPV^VsUVeWjKgQjq0*GU8y3o)t~=4tg-d*!*i%G zx>;UxF=FM7zTL-MXfDrW{>9;cPfnZ5p1#7--t+Zyhb2olW{f0XorjLt!(+7Yo`^8Y z1GZOZEj7ay02a=GMv75*Vq7%}HG-@VFw*fTjj=A+eD&M=zEX$G>>bac^;bUf_194e z3L_L5HslF0Miu&d)(`nLH(C^O6uh~5xfc^gRlASPB*~b{naYa!q?*eAL4{Gyg}-|a z-?lIO%6R47#cvs#aQJqsJR9Nq_$SQM)w6r=pP0prQEOx@G+_;o(cGJ!f7?VDg#&lq z1uwe(3cun3l(+tmTIOM1ZBZaZ9I!J_pbY{-3fesergmMgtn3}9oQ7J()uFXl3UY+< zFGQo+rvL9*5d##}&wA$LiFbFTZ1sXYdk*?y0!Xwa$hLFT(RWLO1UJn^2|Ox*{C^c4 z>fRSJb0K*{8qHMF0USOqp)@gk{ql34lVk_3xIW*H8K5!S;`3lu1V32|z9S-h)|~b7 zqF8b;12%5+l4sU_VfVy9fDp_U0T$-15D>~^cyCa#CED6`vf`e>pr!XyP=(rQ$Pr>b zUHT2V3e9<$_kHk@t0II+{`z@|e_*w*@_qmrWiVYKXu z;f*{cDNn_-2G2&}@Hs2G8qTlm%h8wUJN;?!#&gUR^*?%mrkFX@dqfmf0GT!Y62*v@ z4|pv=Bc>kA6#cUDBfT#wii~?B8-iHcQfkl$jD;3(K-_BrR-jrG6< zWgU6i`=yjSingf*@;x&6idlgUk55D8&)`?_=X;I_8$;m4#FBUC^8wfY0*y3=(dOSs zLleV5D+GLWY~6Hcd?;mkm1yefJRD?Yz)UFW7Fj05v*$=VNDqRkYUpth6BHE9_gJWn z*YD2|Wz>pY%VjVT#Br>IK|@s1qD#%QI!LQzFDin%r(XQ5qc&~9X$EooJ8;Y3@cDx` zGt+E-9I0sZKa%q{2m7&Chx7szXYc1;^!&>vqR0~14tr!#@nMA>k@E@wcjp0(l%hxN zEe}8ef*+PGSRqgp?zA`m>?fbAym$8`o#=iqr2)+`dXkIKL5~}@C@_Ma7coZZQd{lL z^L)|iw^dp%p3Z$A6GpschbrSKj+C-Dncps;eprPHqvDNK^d|fEZM%}stu8JQWrM@N z%rfZ^Ih~_-?><#IeO=dD%tF*r>WJoy2h@8+81(`h(WI7&5ic<71T<2N=q)2(pb#U- z3IQWSmET)8-K}!EPkYXx!lYuj6ild7&Cn)NhV&k{Epu4H2KNsqsYj-s%ZYjl5$64LoDh87wgJjo2yBHARe^Y*v( znxVXhR3xPC`eCLhHCG@7MJ5FG9uY+xKqho@sT2tTXYheWO3^85hj2!t!X!rP zbstVp4E##9J9l${dL!p0RAQ9#sa_~$t5$%>CesRPmY2YNcdmAmPZAy$O8(6$chh5t zOvH@Q!E5%3C@_M0j|d|jV5ZuC(V1%an8wE|3QXORXGY;4MW&Hsv|xpRQ67)1vG?1C zCTfqEvRUcrV9zEnvCbkNmsv>+`y$6^!4qPPwyaIi3A@7ZYl5$Z`jh;dhnO(ROMNg} zNa1^phRHZSA$tqDvP9=`7a67tuHI>u$*p_R@DAADDEn8ytwG;`J!-Exev++H7BfbC z+!@doStL`9;M*p`C=a+qI%`pk;KyYmdgK#CHs&RdL4@GPW%CvY_%u21;cV!8TjS4v z?yXeP=gesru&kaJego<0XPMwO%<4DfZ|>GS=2+yVGHz(h0#t@wyY}JXj25}qQ@d%lr?q3h zfBb2+;&D)c;Kyb2Jx7F1=UFcb>P3SX{5lyho%0`aLOJTUL%>9c12!uMZ4l7ueMO&F z5&fJyUTQR_-uQ;-B$x*6DkDpS_J3o31d@Z`)A{$Ph}qc_eXeNyCu?Kcj-4Dn`%A4c zu~W(+-aIvR{+ngcadWkS-RPFeaY$*ydo@sY;OPNv5a_&d*txw?aTj=izSJgwrh^ zGtzVy0nYh{XbtsY;K2r0t^QOjU_ z;0|_{>isQMPl9~ujZs0={HSXmUfY`$PdgPOTdUZE!|T2FIJv#CmcOZKI~T{7Br41r zlvI%i#V-W<4ZSBKjPiiNvBsjQ4-$-k$iPd!vhgKbv=fYCNH7Ae5HK=y%lT3#pr9gh zQ!-jjGjc1oPkt1~4g4G>F2)+}52PHf!Vt}|gLQk>T4X@L-dni11Y0?1`KsN)fJcy-_ z<9o#4L3i~11}{_q#TCZ}^WPp?m(Epwiq1a?D5>XNm3f$vOV>HH>$?38++OV4@t1d~ zLEG^WWYquqjtHM%11}1;Me%`;WxT+}O>>P@g+>iFLOU1XW7&cg0z&Rns@~P(x+b4` zizehR1mc3ANyT;K2)Vf&5&;o{Pl*;hA%0#_%SAVy!K;vvpdRoeh3Lq0R_H_ryb6If2ndDG*gpAv{ZZ&us(=j5 zj1=8S@UBEEIr770x+8ZgL4@E{2=s#(prlxzWyQ}F6(y%n)!gtF!*=&LyUQfCw|}B| z{;QYPG4xwXPytl$%hh}L)8X*!w`M$R+7Dd9;itr{PpBlUwyBy`nz^aG8T$!E#s2$H zHUzIiVBZnpV-Jk$tfyd_f?^1D}S90!r=`*a_Aao%V%Xz9-h&03&4IB$ceWPDq96l z57>7^^aPuFQMfKj4}uuR2gER-k@6!caxDf-f)KSGZE7((3{cT<^1Mx#LA$E zMDW^Zna$)eY_!~XABYfw7zTMl4AGiehQU=j+#`(*0xh|XvP_sD8VxoqFU-5;6(^1&G#R57+Is;;*pCGXoG-|y~~K?GhyjnrorYaSB`aMae!0P z8-Vo}k|DiW*a%cT4?cu}ZV>a+$b85z?RMG=d!;`spP7CW!o&~L+nIg04y`Dyy;RRB zcjNvQR3a4EbD+Do;)2S{xaYk}&XRj@_~-J+znVufoA174UY*gnuNgBx^m2|U<`%)e zBcjJ2Cq@n8g@atjo76|0%G%Jhr&Du-XdhOdT-mgVp0UoWV z`j4aaR)z+Yp!mRR@Vpnq;IN!f;I&$QDsH(<)~WdDiF261p-jRXe(dTkcHjsp`rXJiUbj`+D#sa{jV;RTg9s2tFuJ1dycyhWiR_atU;M02k~M-buH!{&RA z2pdBnb}fFS9$s_=`SgGvY2-@njfWx^eC7k%AfS`Seep!@kITpKMM7!9Nds}L(2B+? z^0>8TCCvzk4!r7seh|a6SMkL~p&xsA^^<#NTv*1i)enw{@@zOehAVYCt?f;<<6|-^ zc>Le>hOk?0JpD${#JA|A2rUk8wK~=!JK5zFL#%pz!qtaknBjS`bMreeYQcYYVBZnp z!w=k0M!Q7S5!EB#$dMc!@e)jX;8kbd!aO>kb@sEXUaZl!?M;h!UvK#oM2GS}%DwJ8 z#{tbj!~gm7UJ!$G!cnCs|LsGmv+Ir@>}Pwt4U_7Ks=oWMyYc1m(@u;V!hC7bO&s=k zuJ#P8J7Wgtk!}zd2oAjJ%=a7-Hl4tD zYw}xs5I^^sgk$$y8F;)fu~X2f;>?4;bF0)}E!?_M6uMX@xB6?JTz4gY6 ziVpmEix03n*GT#4a1?@WriUMIEm)aPdBQCdKWVh`2u>d|Y`tF>R|_qp3X;dI_;lK8 z5Fz;S)`BO*5J{MH)>WTjNQ}+6sK|G22e!ki)%rbd`?7m9o0qK~&Qp!@L>0Msj&j*N zs#g4|Gr%8iecZJQhmXEtWI5s;hsSA0U?FYCADose`|NVWJo(PaOiJ}HI2d9vuzX|RQBX^ zhF1Fbq43N0A1b~a03JgUWFE{%*V0XhQ=JG-$yEwV|%xJy(-cIra(oT$PqI5 z%FcmYg%&&^hRE;rudNTJSlKjh(WiU08)6r+^5PwvPunzJx(g@DmiQ-8!WA6|0{^UR6T%``h(z%|q&N^*<_ z89MeL$7sP5VvMFj1DmEkoAs+sEpt!a$SH{lBa3XWnA0V50FLKi^)JwK{ebQX^h+jQsLf7b3@K!4qPP zxO85rIBnItRZ#ob{m@v`A50h>QkE^g@aL()E;`NaJ=Phes4!Zu^!4PB$+jzEx6<#n zyq+os>b)lZ+;VQM?wK6^^7le};w@RP+~oh#Zw0m;uAB#c zdHSqb-A4Dx>fQ$b_(W6yJqum?BfL9oYNGsTDg@94h&%M)) zsc*6q#Am;RG#=*|0%Ms=A=#Shefq>5$SGQIgqWi0Yw?$hZcxQP9#M-6iF>;S6GacM zO$`kQREK16YfPRf{$fD>H7bufA95C?JCF+2W%_@Z4d}j=-i+FLu*r{ z#L%byb49WZn$XsuK(ehtpPX3DL4x4FB+w0Fe$*V^h4ysv7Q1Y?t($(zo(B^@*A>)- zr{CTTF!dNIY0OB-E355HiiKCva=P-#RNhi}tcF|_=p&*_TPLgr`rA##}c zVc=VP7gQnmPzLrL5k0EFWiEn?hBA11fO4O}k2H{#{=9Mv3}o>1fHny9(B5mjv!}g& z)ZQt8v8&FxS_s@=e_4urn)-&DzW{Q4KtG7_sr|VuO4V`O#4;IrmXPN=OEBSMLLcDf zPg`OsE5y9_aKN1xsPGYcrX^eTH2b-MK}*F%-Az|RUPN0zj^quY54^Vy(WI~V?0z#@cT+WwY$@I7U zoBOW*Wx3==a9hx``(y$%MiV{?5`-WTf*c`c=s|hKBl+)r{gqaH0aW_aH!(4E>VSf@ z{>S{?^3zxUc#c`Np<<}sgzH?-XWeMbE!%B8qf?g^r{}7djA7#0%_C1R5ztZaS7iC@6xg5Gd-Mtlk*+yK7*i zZR_7#uCu-M;Cfk<7a2u;o;T%DPy{(bOp$U(8`tZcDSWob=@a`Obz{3zGg|Q7$<#)t z)@wFd6X)!b#!ykjeyhnxg~vI{r~a&t7VWYI9Da?VqqQn^9eYfkvfK;BC>6{~)NCFY z2`UkM5-|6o=U+AvMI69EpJj_`5qu~U0nX!-&o2hQ<8c5Pf)8b&4FW-yp(#dq(adv zMmY~S|N6X%$=B@4+A1?t{AgSpJkJteR@7Xv=a+BQbw(UMM(fMCcz?7r|MEZ&zk2oJH5aED zF*B4-*9mPlga7^(dXI=Aci^3HFB-|VYNuzX7`y@+`zEaVDHLPMK4f z0pk4}CW2gqz`i5GCj$ryYZetD1o(*P36h5}1Ciw4DDe4bgMd$ln5)O&RjLWflP9#C zBGrU_!FlP+|8Yp8vrTU`3Vio`Q2PV^5*`$5yeXpMElZe57uQ>M<19Z#~ zK867=ned*8@iFkC;8-F)z=c%6k5qjcE!hqr#|N}Qz=!`Y|KM1{bX>JtPo&t?)A--e zt>Wv**H;4y^A01&2lRs&AK%dAm!<-*_B+d46funWpT>kwiqfG0-1JRD%e?%QFQ!xR zsPM^95xBWm;x`=+W7n^$tefmOeBhe*UIK6K{}_Lo9$}I*gT3U!l~2o2ENg&$M}$u; zpg!vs#RonG;uQeuCjyNWAh|&tiuvdTD+Gi(*PKvw-~Bl4g-V2HfttI%1{lAF`N`r} z_$qcZgL&w25i`VR8u9q}HM2KAZsk4ItO>z(%EwYT$6M>Jqdfy-`YyrG%p9l~%1jxq zk$rep*VZq^LU#X-1fZ5{XmNd3dg5d2 zER`Z2U~vp+q!iIVKG}H`n2A0GG%=tpzPdVRVcv?pv7NcTmARX~gSpK)E(?2`b4lvQ zs;b#4D>>BDucxY)SG`wINk3^Puc)xUl1=5KO1Z*&#TBd7yuxB~2H7^OdSpQ9cR=NW z=uJaSx}+y+q1tK>?nSN)NnAlPf@5lJ8pZ+7HP<9iJfF3(gZ|v#qUU66XFvCd4lWKn z$_kXrH&HCZ<0-hsXEj-UT#G>kh95&L{ON=b$X@DOy2Ioa=RGOkI4&wBuqc`R+qCKX zA3N?cGm9Bk0}nsfWKsMP|5my63~j7!?Tqc`{tn>(M#hG7e~paNVObg0lLLjfK_a$DOcyw(}=WH&Iah!yD*6=pTaMDGHR{0~)F9y~*!aM)B=KRtVp|`+KHy zJEx~day;4OawV$8bZx+Ie>Zt0bj6*8?I=D+$P?nv@knH0W_(1CvrOSK7rOWOI!vD< zv(@HuvDNx%>TB<%GJe0X4%O#)roOhgyj%NQ_4Iog#^*`nIDGx;j=eo4cqzI` z_5Q{DIc_}l9ZmFvdXMOH6bE+O$}PH@3y%>Wa97V{!%cozQ9N<)V`+ppHG{4LYt;xZw`hq9K7`gZqX4wAidiSI7R%fs4p>|9d z*>9Ho9mT(mO5u%PhJ)CiZ>TWRxm@G^uvc66$nuGjkL!*AbD5h5e%o8L6jVR^fQ$H) zl2(HmBeCPj+JJsg07ZEz;3aFJ_lPiZ2QX4vB1XKTz$81+NHOZ7^Uy=1MxYe}Mj7k$ zEdLD4KJ1uf3*DZgC#V5VCKTbwM}uy;Eg-Jc#{23EQ zr{7atf5Az$CnBftfx>|w=TTv_$?S<~*nV%R?LI5Z0xDm$;_%WgTC<{BTv58u_@#aK zQDQ#=X|*-t8*+>m-Zl|N;=rcum5X8oKOZG3pa=X&A=-u$rvj(MIRMTV&*VWH1caJu z{(ji~PO&m!x}4RyCdP3VX&^!HpAzT>F+Y7=PExI%4qa4&hqKZJ zwXr3ap)Ch*xy4^Ur{DhYZknKBGb(6n-hW7tfcNP?padbH=bsG% zJrz>78RKHI!mXdY=zghi(Nq-NTOzZTd~eB+G~NpZKL7k6#wTl>S@zed`Uo}lJEMla z>#<#J7SSoxXS6l;vV7`sVCxfS369e`n*ZVi2?C}&!87PAx3DKpHpb8*JC5Y9<4xH zN0&R85IQOLB9pd7%TRqfOV{ky5W2hZ!tV;x|0+wrC4v|7R+W^$fWuRGANMtoj`FUj zV5DAIN0EdXp~9EDp_()BVSl0bh%gF&M}ebVbR`!7MnXVF3N%uT4)WVV3WNY7$O-`? z9Ze7S8Xr0O61n8nc2ieOFM{hSzOv-wc^lERwJ0!xJR!zN`~021=hbM9Y!3O*ww1U4 z#)OgNZ@+R%+ChgAcjg(<4G!p5R3_w;jvcJye46))jmF!!eC-!>6OI>3d~{!7fDf#TTzQ3&0DkT`Ge zvg#rJF3!8(qUC)1o_(zcFA|z2M<{yBjm;oJ2)?C%&x#nJpsS|#DG4GyM~-$K5+69D zfeE0@Jia@k$@WE`Lk%3vlUF`R70SxfIP;545?_z-RE*~)r|!n#m%mr@KQ+erbt3py zM|(}icFX`3Nu7soEQe15z;{H1PbaXycFBt=e87?%&`4FNJ2n+fFoUcR5DFJLe|=cW zsjE9E;$p(s11;-15TSZeBI7HSi>FXQ#QnV@M@Vv`2jlqfxAY>4+`yG)@xOVWeHk*Bw(4(wv}LRh zGep9=5@Fyt48aEo^&SyMNx*?Mi>2m7Jiz^mKqJLy#7iR*)FA}H46;JNXsBoM`Pmk2 zmG9qkY3}waJ>v&cY8fH&lzMN&F-{a1L7os}w2|Vps@W2{4_eawZ>@t*|hIE>(ob2lRs&pKQ7@&SKZwrN>`xt9n;l+l2|AgO7CM-(+sF zE*|E3e&A!ra#Z+iwm8#~T4ihClrHCYxh--v4lgPm+fo-(%XLV4=Bbi39rh~LTsMs9 zLH&Wxg}}Ze!p9!Sg`5|~2Yy7x#}AZ{0U9Yl^5#_gK>dLaVhdIX2sNE4a2&fu8TzXr zbkK5W=APpV@YJ;~?*Y;o=~I`Rj(`dTp9?K`LJX1RO_ANJayGt7{Db!oDk;HXg6P*S zFL`eH9p(jBsr(KPH;15tC{!T1q-WkeN;{6d ziuIHDXs9R_{0F$udqfzyzXhtQEjl7YfDsQ6#ehbNQEMEfB?^onEAtrTCJkk_)qmf= z{<4Tf!OIhF zBPua!QTvd4IxIVNr;6wgzJih;z=gpxr`w;4HH`?I(un`Era%)jMg~KBOp#->(0fD} zH37E^xi1>i;0IU*>i>WIh(xVjx0-Y=)W4ey&V*y z4FL2c>-2Z{+MGdx-~}0UgP5Op*QRZ@Mr4?K2$WGY2feDn#LpEk`uvYkO-wDTdqX2u z<(Q!2=U3^UXbZXMlyUI?e9Zo%QVFnF3uRyCx{cKtP1RJd~rGe19yE`I{~flsW# zz9XV%4KS=fyVS&*2MA?ABju-qbIW>=9|WNcvNE3$jop@1d8=@&Fu*cO>A>%jU*Jv; z5%Rk|POLs=3lfANuZA2UX6UDQki*oZ%=dj7UhAGHux`P`kkY$NF}k8=(p+>BkJ@@V zs8BIE^d1pK zmcX7Np`~gOQ0ImZXrx-i)8>5^IYo08=B*GYnqq#DZLF9t` zQkJUH?x{G&KITlSy^lq3c=MSo#aV+AmhIZLY2Be?t(Yll>1mZlp+(SZBBIE>lma)f z#C%9dR1f%(GW438%M?tiIbic4&<24Zchd`9*+Mc|JFosOm3c5=ss!CPq(MH@nB13% zVx|%FgBYOoAH>XmS}Tbvt?7Dc@g=+!6F}W^D+He9?%JvCEo^qb_W?Q?T9w=zdfi6K zsgPy)*bn_a;21Xjstvy-DO?$DPT0I1)Vqj%)s-i?{WAyD%i9DK7g;K+?@eya9A8MMVPi_j7G)LxJw1fdLi*+dkD1MkFs(S!(oP$nd# z2mDAGN?Coa3uFj^2>r7`Ac*5$mR!1ZuSUTM#Z!@vToYa3F^ilZP-3-F>Xc@d5oH z#^-X2_<7|MRC|Mam{m^P`#FUPADV9?g?D3Q)I%N9_B1y8*rLKm`EBj#cHp`Y;vj~1lV_$OQYdZ z(A6BtQ{I|)o_-KBK0995pqLT@`;G{oV4xJlszvc(g-w2Wg@Ju%KqHM_@*Pb{AVBa_ zLJL+12!->U(ly8mu3pE`zmtx!U&J0-YyHGWHkvoq8=e3Wf}ayw@Prtm09z+l%hLwC zR>ra&y<$+c91}#*{O&6wj9T8QbOf)xu}1)12=ic|62Psm?3Jkc1Kf^4|>}~80i2hkmaH&5IjbF{Gz}(oIH&2Eb~7CCPDBR zEm$F7L|f^ue7o@bwzRWZzA4jNWV^vc+NGK-k-q1Y(vO^?1xJV}I`(e-P$}!4&wbWX z@~S7-U|X?|DCQEqCnazH+`d$=+o~ZSl@#?$Ts)Gs;lh)UqmFvoMk3E}_)mA9Ppm#K z+sCEaFTf)E_zq@@YTOylAWx(hdXI=Aci^22ERiB!Az))N&`8B-El1KWH;*HyXu%O;iWpjk#ce!R@onY&>O|>F`x28D zi8;1BkY3e5RW>wni97CM2P%qY_eJVe>s{G-CpX`1P5e`F9A0evgw$ATQ}>QEilJO8 zqh**WV(tz^F(!r1iJ?xpdKN}AOc;6ZFOA$QFxAm) ztsJ$vbKPxJ81XW!JzvVH!M)KVfnBGdj}nLHZGHOdMJrz)bDf$jr^)77%ou6B>pqTL zjOO0-{M#nNC>)qt99nd05k92x@&bj4fJQ1tX1M0tC@_Mo5HRYjSe^MLyYojCzhkx3 zsiXVuLtCjC$+uDu6oo_A!y?FxAWw)fvNPl?J9@^6Q{GSU^X+d!6__v@S^k8DsxN6~ zt@=3g$RCf2y}y(ADVsV4krkS|Ma7S;eE*l8w-Xfm_a5i2+rScp!ymZk<6>WO)8$~9QH{XQCvPzG<9s;e zHOLSAC_gAqjkscutJ>W+wLHPQ#+sNqwZ4l^DHJfl~FQ~k5@mjQY{r$6{ ztH2eqHlP>@$$pRh7pl3C(*wFeOpjW?gNQ9=WnG_n#M8oQ@GCIUvqrXBBj>(UH>=W@ zao`-_Jyi4v(oPt;-Ixqzo)lr3ayx+p#HA&iet2&gecJ~eH7JS+uy2wSj1AV|T{q@|skpF4WCU;1vq!`WHr zD#qR9^F!5geg{B=;6vDgC&Uo3xZRmn?WWP%qR=EMB(fFTA$zW}8-HY{j~RXU6M54% za}?d)#Y@YS=@U$fq)xod5c=GEZ~%vI^shY-tsyOy$1jp0Bcwi#S%S3opR5KY2wuJy zdXESrci?Di&{8uY0br{J&`4!y^ktPDa*XCI%v&K~B(B4_KRZXwX0~cF@9Mpx8xr6w zwePC`m3G!)RV`f@2fMqwK|$dhNxM6++as7Lhzg2gfvqTZ2dLO$cVc&8fO%Eyz9uSH zzd3qmAJ^UM%&fgXpEJ+>{`h>?@^{{|X7=m}^?>^RjWQZ;Ik!Y2+o(hi7stGhiUbYY z;~G=C(u;|@YLQ!!6W)b#H}klVX6K6??Xs5AREv5^& z`67Olh^f`^#h^~UHd`Zg+vuX>gJEldfXK&f`E%h9Su`#DeE*6iQSCf-Jqf%5tlEjf7JHhkaVH_hhAootfM|E(R0({I>>e zZ#_4WZXd4k+dht$6RJFT{p4rio<9zy9b0sOVxKZ<`=qY9M>(-rZXX_pY@f7IiOVOq zC|R}XOXD;Dt8-rHvQNd&Pd>)%{MmBohAyo#E}QGG$vz)8&dgK$OS)AT%bp+hJnX_G z+jOJyoG8@UxZNk^=Teiq6<@4>BS(1slNv_SoZTPxx!NPtrm4_Ags~7iSJlYce+?A3 zD&jOZ5MhQ|QTb(|@9X#YoP6r*V>)oRar?9PGIUWov7G8oY{WI&QM>{X#zIOYvWfQl zY^>n8+a~Gp+g*>x4r;3J(Ce#b?>ySpFwIy0_s=rsE_l1MCKIi`H|KqH+6rwa1Scnq z9GURWHr)lcxRyK5#q4xhU&!gc`474ILPG}uV_p8l}WVzv~2 zyYN+qd`!d+Gh(0r)}k|G`nhVD9{N|~mxaO`B@ZpVaP)+Ujw^SrY=5-y75=hpi~88k z!(NrQaRGZpP8N2aN*3-K4ooQY#6@bK$p-gG|LY>QHrv43e_eF&dHx+5x-v!Omy0@8*_-^T)9kcwyC!!} zbllz46wgnta>!rozo#|Wc~u{z!A43XvW>O|xm@=4i=2OI*1cDIqdc4IvQgH~7aGNl zOXZa3mLc!TMXx_;vQa0eho&+ChtDm){bNJ;$KBEF>1^7T>_sBaU8}t%qV$#t^Xus@ zMXQ`_G)yv@zv&)oJ9VGuL@i1inb`T1+5x)! zb8q1#O<*@_S~R?sSeKIZqxLGveI4 z_t(PRhK|$ipURpzBvRts5<+f|F?fHy7~A2 z&aX@}*a_)PxY=+dB zeC^o8BD3nH%e14y43CM+@^$RkVO-f3QTEl}f9ldGBIl{{A-Q|^>fO8kUyKGpE{ys^&^|SeQi%~UNiC4X>axK~Z;#>~L zqaJH&7Ao!dqu_}1PcoLwl#u$ysgc#9c6yCDx8cj3UJV-z%eMSO){Smo>jk~Ia65B{ z0?T`@3}3mr?df)|nTjOdPM<&5!1$Vjy~;*ZUO8sK-0_=g?W^t+d)59?iyX~P=h@h5 z@3s2VTW-$0Y~T9v%`Y#y+bxfKi%!pzH{P#4=gzZ3{VTMe6XBk}?3!F5esz{Rc58Vy z)6MbAH|MLJRQI2Hi`peRHYu?zX~E(HyYnnE_lE~P*zNqwn)US`^6-vF+hdD5 z9x!FNyCme!(sJ|89XJ}Wx^C7iDZ3Ot8Cu<@!Kn_1vJSc0WmNx|qw~img#H}aXx;wh zdnV%V57M>pEqC&U2xTx1XPh-NpOwb-WPY=-H3&|5^LFY*bkJ-m<$0xaO~(;CHf? z>#Sa_$hYIqYHtyqLly?LpVQz~X5cYrWs<=>y61nQ!Y=orX8uQkMS;Wp) zCU0%CbskJcdWtiwi*=D5wHe*Pya%wK~l3VimNC}oq0d7RJxM1|eZAT2~*+4H*c zzTMn|^FL8xx%$zW5TToowEz9b{JZp*1~8g_Kcqw8iLqOzbKqsUAp37r*k+<43j4&z zYP-0*q4{$8$w>T8R0z7UZ?&?#yEh;F&i_P(HS?#*6CuMgvQjE;zHJ`Eod3x1101sh z&zAi+D(op)69rw(v1?`y8d|Pde;Em$tPpkFG&gVAo~(?p zrY<9?GUOld=VPBGX}bZ^q!m>Z_}n)+z_||=>6sd&g~+>4=sbbD8-Ayk2GX-9LWd5w zA0!Ll?qXseHS)o$ciQ6)_akefz;`$OIsEXF!+S~P zjci)W+|;;_Rca3A=q)zfs6iD49($+YJPsUolz9frcvun^BEK;?Rw*jb;jM?zLlB`u zhsQ_*m>1#xenktr%JY+oZsSD-*6_c{nkev;6C1DOz#)we!7>^=St0JNuTM|L9X@v` zHGnSIucY#B?k4t8``c742+X-y{OvqUmNM5wVXyACG&K%;K3P}@yVd!k%FY2;POgSf zGbh4^<)qwjnY_WsPMTm%jk{dMTJyQL;kbK%22~Vz|1&m@95^f|w=_r#kvA$-AKboS zF-yLYbSAo>50nRRVUv}tsiEhM?(WaKObC6q22~V#_sgQIGA@=;;S*i4T2QbdO07Y!n8SXMqs<&Eq?!rasjFA<(^EnZeCkC7#9QAL3t zxu3H&2M){1Dh<*?%Yj20&&a|;+@tNnm6HWw$60Cul?D+ubog+oywQa@oHez>Hz%i>%yGjSenf*R z3jB4$QAas&=x~RLG9H$Mg~(r(ya;ZEw-goMN%Ro9pdXb6FtF!Lb5lc4*Y0dq9D2IR zva~I#DD>QECv?Z557Hnlgns5;6_0d`bT-!D4{6I3diJ`Yrw^9}FtU}e=B9?;;F?1? z4!yYsRTO$!kHSZA=zBFt3!(orWAb|(`j@G6Cc2<|O_K*;2S~d7-eL!zIZK_bXT|CB zG^nD`?_6jJ*1cGM`!*5ugoV)4jXwx>zgj|6n@$fwgbq8<`O*MvC(ys!i6HU~W$c#V zL&V!;O%!;;12s}`m&4zEtr;>JJXs;`uX9E#9)}(1sF~CNx?o?I%6k}1?4vF!pBe-V z$6@E6B}TUtp(Hox?nGq z%6qz**hlT}O?9`v#>>fFvL*_0dG3%8VY!LS0CDhD`uwm`avs5Z? zV7uj6Q+vCT?e{j^+we)Vvj$ZZc*l{OLpg9*yH9A479uaXLo5cZsn)Br@-_WXDA>#_c&RNmlWV7K>SQ?r|AZ61u56GMzFX^Scf zeALdIc{y-c!xw3g79yYjB>O7vZ}7j(mBX#h4CZb0Y5TMv9P9)}*RK^295 z(?6ebVjhHkUxTy|`iCInKpc9Jt#l?t=&*(dO9QYaD(EVen8Ilr;$>w&SrY}m+~CrQ zyBu1uJFq7}*vyb5lc)>}mS~hhBS^ENzP_ z3cX)cs_8iN7!A@w=%I1%SK!cJ?xtr?gbvF}ojtMu>{?y+Q6sO09~+;NafkPWAyd$meGONaA&7JC`*}Z zqOe!b-gKOo5(wL$EG&e5u1kOA!5+e1cZixf5jN~FZ4S%i4W4X)IBV)c(&NEIp83?w&O@^!>Sif;-6gw;2^y6J5}I z$^*Evd2!a%(BIyP*@;8npg|Rdo)}-+ghS7LO-e*bSO|T^-Mh+Wa0tECb$SS0&^O8h zu=j-q^zCNUx3f+7h#c1R_Zn1D=r&HCOK|9&Zb*qJ2@9dm$aP+sS%J_Ovh(L9d)t%{ zx}d+82QcrRLpKw4%D(S8?sSg?Y1)b^3jJ>CcFM6HaHzOKgR~I((Z=aI;^pQ3O?vja zpnKku24Dk413Fj?A2@h14t=x+RTO&E?pzmfr$5&qErkAY!dvB$2s+*F4xNcE=%Mle zp3Ld2scU*{?`5^{@^VsxDhmB!`+UmP@zCi`ccny>goV%txfU#rJKg^tJp>Utd?-38 z4Zt>AqnpErmig3;``m$#7TJHJ!ZQ3NOPXt&4eZXA~EjWlQEW~~M{Kjt_H+<~h zbf20z5w^Vo#(V}=>IX7;16u=PZfb{5^?Naev?8b*>CEqhy(iT+|_}U5+3vl4j;bSyN z3z2u)Y!kqdLx;ycr)N)u4jrEVg)D%Jn~8nY$Ommt(E@jP7qTV_-0RYdbsRXPae^!? z#Qk*FH+PO3I^6aZm4+_Z?WOV_?k4t8!)`t;q6-dt30V_`9X+ve3=TWhYZ(iktPu9% zup}#cfNH#<1|Y(=2cwkvQhD>)&*rAay)Km@gyV+g;XFWo}Rrf=%qeL1F(Bx4gcN^3zzFp&D=2wcY0q9swngl>ryFig`m@~ zX^<8|uRCr0dS1z)({p^HGa*8U9cMpj0Jel2T}7VaN~kh%>!6e@^AqQ0vL*`rrYfSZRyrohcvun^BF|QAiItOs7JQ|L&;|W(X#k^fWeKeMPyCw^t78xt;3-o*Px0*FZE*C zM;v;EpE4qrgoV((D!F9l6&*tNv$2)E5mK1wf__38z*v!;k;R%Cy6vSh-Z=D(wz9M> zswnivr*8z}&SP1=l-tsNE&mr{D>FFVKK~IneFtT+ltf`%zsirZM z7Z&)~UnGMxZABG@p492_avb_Z4bnpBWk>8)7CJ-dCo#gOww(Gi9MOAwq{8XisSXwqpg|v=aU9stxzK z1L#24ku_1^89#X_%blSGvt^ah;K>Sc=e)7o%6ThIvQYyNVZ#n|rBvSF$zC+Grgr$l zVgone4u7se6$Rd|a5LrHEZBjz$}Zz!Nmz*7=E(|WrUibdGjq^G=z{)28obhhF-Dmz6~tq=nGa<{E9K15L<9&z=Y!mK8&8Spc@I5?u!R zXw9u2cv%@t);R<<5S7o>;K1^pj+0CsMc0X;iwRd(C1 zIP}_uq-iUvDD+o$Ne(#l7!A@w=t}}ipW?*@RDlXoUZ;pO09#ODKo?ca+!Z$x zhdxDvDhj>+n;Ok==pQvm3!z`m(nHy%1)&>@(U}mT!wxiD8i4&+(c+T7>cWy7xdRxN zUm|Owz>{Ctd2`^9hI?@t4W6tJw|D9+Ww^&-2O3y{8h{8JcA#ga@&-3H$HJPrmVf-R zej&#VANRABlqGFZMS<68ylpQB4#(vo8l;8D*B?4+(* z8u_tXw;poj(BW;!nkeuWT{rm7$n zcV$bNSyOv@;kK(8IcV5HHqxMq!hVx9O*wuKc91(XNDG0Nn=;GFjkE8a=uC7$Z|p1$ z;K9b-tf`@Af97Uo!RJg3swniQ!zYDthePP!HAoAg+s?kNTu1}E22UfM2@yIRmCTX` zaIa`$A2ssUe{Y}1eGW$@H^`bO@G`XyhI8PMMzxAE8a!Dc?%jR;lwJC;W`(#=1L%T% zMJoU6g%1LIaia#=aM-zBWhrw_6!zy5^Wt&XBgw)-*keoGv~pYFUN>syx?pE>m&&t4 zC@^QA`Q&&Chuxa2iNX%))@LgY`v6&32z%*G z>id!V+yRVbi&v8+%{5Wr0pWgDo-8Mmg@w4+R-K`&GK3xGiR#qMiLhZ=DO5uyZ)7+A znVZ_-twy&iz;VN};-f(o1-|0O4!uKN84*juLg-b_rBjYUfKH!Zj~+r7^moz#Mz%oB z+|~%qRYbXm~FtWw^ ztf`^f4sX^0ujwN+sG`uz%{`J0hyFx^v=Dlk*TaDKglO@eHQQ!gX3vT1UA&mgCun_kY<6-3tAvjdr(43k%5jGqurtp%< z8`$v~=BCD7J#U^bxWgN0P(^{?yT0AZcJXZ*q=m@K4{dH`3G=%abS6aT(BTbR$^y8$ zo7hK<{A&f1vVa*nd>UC31>P;Qo0Vm6pUA>O+|?R%RL+Bg4mY%>($EEal2o4cnP|5; zJLBE9+}{xPd9o%7yW_KY$MACE(niLDCo6=#G)-qKOC$!gr3TOi`;=7Pt)hv2)Xomu zFf|sh-5J}-Qs$Z{>}&^eEXQFFA`1&)cd?JY#Gg5#vp2P;W=@0+Yj^4nGI=Ar(!ku* zxM!VCH1#U zi9^4sK^2AGF!k*9IP}_Gr9_m3h0srLsCyHK9%iD4&;|V#Apo1uLT_2lcKY2QUd>^7 zDb`J%HdjTVkE%A;%F^Y@8l;8L9W%RGS+fk!8_ddOW4fA{Cf!b03BdcITU4`BzIt_PI{5jN~VO;ULSLv3zq+{Ri< ztz6`>N`op2JhIzg&Kx-GK(qFg@vtN;MDFkNRJqv=ey2@)(L?BhzFHo@z!vATriPxY zU3>ywR$ggPMWN@Yki*KQsBL;ni6{vRpRkA=cE;ht=`1 z(t#R%rD-dwDD-`q!<9o2pwpLVkQPF(*zv4#(;Ia9t-kc^bwRJ#Pa1%oC1^lTK1^Zw zF`id**ntkypo&7Do9E>+yu2i8kQPGEk|MvA4Ohke=}dG%50M9OWxL#1Q#<|1&@R_- z=&>49QRo-ST}*>RFV|m6L`hf(-DPcFD?`OT0rU_==y23>L>hpdQHCB}EkwQ`_^OrNZEphUOo-5-!|Mmh0?v&XkE6atT!<~juX%JyUhfk2o8`zE?b5lD! zdzM;Z95;0Mc@3&4@J4Q~7w{VH7A)goNmz*dMRZ>)XV?Y}rH9Z3{Q@BX8zQ=6L&VhS zMi#`O=Nu+ao2#PGr<5#gWm()v4bnpBk6qW!$LCn~htRVpLWkQ@at)USa4})8-hQku z+M5H1+fv#R6;ap^FP~J7#fHE514LONX#4cB%Df8PmhyQ7jRO%fEGLae%ES%q?XS71 z%Squ{4XSa_u$;`)po+rY_PBfryqtX3AT0!5bZb{Dt86_-)0yakK1&|JnH`DEni~3{ z26dX_&=WMMqR_WIjI*)_zgDP}h?1}ndg}S*t!zsP4Wox3LWiT2oALnWW5^NYfsJS7 z<1;I8lu~$%H0ggeQQ$`o#vbJ^hoh8nWMLuhyM;a~chbU|6+4!ixh~jw$4TYQ%g7Pf z5it|2+#%74tck*&w)o3;9QHA?un_j45gC+aK@j%O@l+Z_*szATogkHWXAeEBsmn?J zX9LdS1C(YWJz*j89ZTw4IhDE2Bzg#4(3i>suwB0fbdPfPsLCmE z=!qIsQRw^n^|5jkcGJmHB1*zS=%x*ytz1nLK7}5F2p!h&N74Xn2#$`z&&_P`5U=6( z;j*N;CJOxTA)~F-@abe>A?_1}{Q*lJOdTkt{5Pz32NT|S54D))&)*bQe=1L%SsC6za@vud!TY7FPPRL9H6BeEt6 zyU_E#O5KJ-!N#*>EO@d)*gtx$u<{~#;%sUFU9j&-*i*5*8va)xT^cM-IQ!)+^~DbU}}i1~7QAc~jQZ&<*1YDMuc_vhqQL zDhj>Zt=kdY<*=-DiIx$uBrJqJ`S_e}IP}G<=pl&EVOjYo4ZtP{(K*7Pd=pb~pF4nc zC?2b2NpnpU_%Uza8yq;aU=&$ch`V^v)mG-e@2sI_PJ|81O2xG@c>}vr$=uWq|M~J; zE{+>Ie3%AR6!;Yvw+0+IbajvgO`=w8|fi*L64OOV3R=xbn0s8;TorL zr>BXLrmd)=(2HzpsLZE8rw`B|Ered=xZMpL`le0v>~%p;yICH9Io%U`!PdV=men}) zrW#aH=xvjh2jNcNtwCA{J+Ow4l|yVkZJ{&K1-;o;X@H8%>8z=1`tT)_-{8>aXi!C= z&u=!q3J(2;25BMm2)BmH4t!YCt8Aw;(FJ|3G=Pzvns07u=Ut94g+C24HvPq1WXVyZ7n{NA3WIip6%xlIEHy z@a_>W-8pb*!AWFcA@19Yz3%a94kxsZ@1|x>gbjy^1^3A04dw}c^mW9@v#KGFbgCak0!OdB+l4-?_q z!f2u*3fr-9N@ck+G++AtG7`TN6@qqisH>cr57!pfK0uQvLWa&>AQd;bvh}U3scZL` zx+z!U&c3HX6@}fSc=rI@*$ocLXjl>!0&n=jZ!#|?(Ag6X(L?Bh{+Bd>(U~2~#F`rV zrNlH>xw~OGDScR$wnY_%K6cjXBRKR34bnpBFR$cO?j(fJ&&SfU*9E=w5m^93MOMRE zQ$t_ZziMsV>HRdQqR?k=oHiAQeqDpK5PIbJEE902=R8Jdq6@koApo0Wsff+7q`zLO z91eY(22~V#nlIgym&>r@EOcB>geNS7o;R+h6{mMRK@Xt|`gVB$c6qe{y}6p!xdV9I z&v8kXG}lCd9~c#F<%M$ySy+hs%Yfqp`M3o>aqYTH&724uJ``oXB9k{7*b8UY)DEAt zU|}VW8;;LgX;4Lhm*{-z5$^DV8l;8DPgR<@pCgA3{~kwYq6>QKtFizFXEr`(O$|MC z$$l$uh8Ag1MWOF2-+BQKJ>@le!b0e8qLx_MG+XmJJ%ldki{t@}3_5FS=ym#5a>a*= z_cf@Z&==Z7x#7?o-jEVe5*9)qUUH;zK@D{J#CUoLUC{3n0|E)ivgpz9%XK#l!S%QAD$X)Kg-(f~#SJ203vb$MAF=aGy(?g*Hd~3B6K)fIV=s}VZz4b0lp_!aNuyXV*5;%@^3{H zwjsFJbq*UoiT5VT3PEqa)U*kI>V%_}<@=7Lep2)^#$D3z6-*e9pJ#>8=#VJWjrhi3z2U)w_hnG@CW4mjvhi6^nHW?te&6;1K--?naB$Y zgl_Y->^SRxk_9lb^`2NQA0MA^l_Pfm z9cMUM69wM6ul;Zi9Mbqe78c@8^{}<_Y8iH%PG6`rh_GQTpCFYtume8KP3`c{KG~Ib zKd`Kv*Px054;fm^g9C@P+$~AQ!;-KNd3^HTe{hEfeWiyWLWd5&APwN=YGNNX@_IY$ zm3R5j;n}~*lIEHy@UEZcDyMfshYuqQ3voAnvq-7p(BV73Q#02EJ5#bup6#$fcLW`- zv~)G^HX&>;vL*`q%Gc9#xw|3k-DF`Q>>-BN%5r@O`@=s}8bsKzoHY6&lQ$SyIbluh z?W!J=SMll%%gGE4swnW!XM5Ul;IN#0(;zKG?s4gdl}ndAY*N_t@DB2J>d~{8_h7#u zC;uS!+~;f>;5{fP(8=4+?-!5XOo-57IhiR9z_tOQJAv$@L)PLBzfRUff%jSPN!gMD z9bUy&MuR6S#9gTVU@H$lLsL)#5Mjf)lS@*0cN4ZYWu0GjW!EyCJIS6>mhf*y6trJ= zpOoC$@RuG;loew3+&58K2n6R&wx*(GO@s^`ohG$RoE`d)UQ)2>QB2f_q zZP(vHd7^|IUK3@7n4`K*ufQ=wN86>RaUep5jvgTuXRDOah02YyUY>^6=vcBQ3fkj- zz+2qWb{S+Oc(Ov&0S|U6&yet&?4FStfCw9oLiS1JjqD#_Zt79U!@+LK$%SwfVv|Xh zv_%yKzII#AOB^^Hh4j@REkwT2vzL`65Nk8jvnN7_4!6xB3*hQ%Vjnf~*WX%2^Fjh^ zc3rY23f#|oky1)v&5j`p3vsX6*itzN6*~NRRw@l5Z0PVB*<|tt7q+sBH8t+B*{4Qu z+|c0@G^nD$eeRaCa(?(b4bnp7+xssn%j-A%PAlY~GtmWoqBMZf$Tn87riLD~=EiUw z`UMTDDD=MzKP-)x75AJnB9?@O(CcT3x3W)ja4vcXUC=K|0~kD+(^*qPUq5}=0vvj- z+_JPSswnhH2QnPNp^wraErgymxSO&-89Mzy9(wk=py$pj4ZzOBGN5N-#TBoRnU7as zdFiA<6@@2RGL&QdfWIQYh3z5e(Df)^dhu`U>!t@Zjpg)iXFtXDy%}ot` z(#K~j@v>r9M3%Ni6@|X2(E0*6^cfnYh0qU9DQ;zj)y1Op?1|7}S+Of73&1udqx+IW z<62MT$YELON7h7v-xy-A+*$=K7)KTs;x25=q}*8r%Sx6KR2sTq_ms-Bjce%MwNuZ= zDfh@i*z3reDC~%ka2M`w2s?X884I4Q5cV@Wk2buZK-f)7Q3DWR!x|nfl{dJvGq6}w zmy^e3w>;tAhNF}h8dOo>Uvez|%z?ui-m0{Whb3Vl^8J~|TN#hfEJF{W3;GLr00Vo& z!?yB~H`;x?q2h z%X^qyv2x<$vGE-a+ud23^1qrW?8N(~3^?pavak@g)Az#4avuo$wtI{8{!@08_>T(-@t(WUX5*2VBhj`LqF z90DY1h=kf`=g#AWGqB{%tuFXS_;}F&5&tejGIbB|A26tAp!wgy{&$a_-7Wtb3%8SB z6FTxz0Y@E|G=vm7H&>~?%JU<1Zstn#e-+|f{*BZNHg+|C6Xw_b@Rffr4FQH$=2SIS_zm{&ANXegn}2^8 zf7r+d*3wP#0G@194QuM3_0a=wyzn2vM-8gzkDzJMBM)##cC8{Mq9iPIWcD_Xt<=&b zRp}uRN6Mjpk_Irc6DZ704LxlOdu2fmtUQ&g$R7YL0SmCQj?8V zt_?`6PS0K!^eQ!E0SumOPJ=Zy^rr`3tsFR{QK7bs22WOq zyZ@Z?$}J7Bv*=rg8bBB9BXW783H7(jn9lFHzai|@b)_l)tBJz?P(58J4%?qBEQCE^ zS3~8{3TL^{tx?<;ot{gR7P+5zph=MlMjw;SULk??+vO>&u z3sNY@*1^%$!v-`CM96S-;@(guZg61-60xQ(AUTugE#jbIM>$f1Dhm7Py1lD7Y}ipg z)gUbde&k?DWlc7$Ql%TyndpK(N*=(Cb(E~Bq1UW9Kyf#Oeq4hp3O&GcYbG3eg(gxW zO2R_u&GK!vGBxVglpcZz9d?u_VJ;=+yQiy>6=ND{#O$P9-Sv5HwO+a zIFKwX#C@#VK;@)BShHf9Q!^*RhGiv{msH-!CXiTDJN)_TO;*ZEBMqu3@UvexSUK}` zhX!dOa`&!P7O-bRn{2GX@AQ2OIul*c8@H4OaAwCHv8IOpDYRWA_c$yoGc~B9(96U( zD$iXGYx#E#(n9ECT0|-LxB9&*W6wrkV`E9!>`^F&b-0fs3b4?U> z>b`xHBaR^Kkz`>Z>vv=I5qwb9Cc1o)l)=tyUx3wpaw(g4n^p0K8d{@0q6{cz|@HK?M{ zJ4CMAheJ=>nVzr^`oa|Jtt=U?+l3xN7xZQF0PIzl5q;gYyZfgoK8pf7&PN(lQRwrW zj!nX$H|;7Vq9iPYp13WOJ1;KK>ER}N2wl)0Ndp+zQXF$rJH1p=t?9Va%XO2bZBa#` z=efRSJq~@225BL5pAV&bDd#Z!_kU^k1PPYD;m8n`g?p(9`17o@DLP0)Az@Y_ikcEY~pBx*j96$;?&>TIeG>EWa2kI-8H#)Pj!kXIQ56kDt%5lT8vRQ*F z3cOi{Mpn*z&DT@L!;-KN`NU#QR?h8i+lwAT7xc~200uTswnhn zIXrH2m&1pkuDxYMEC~yt=c_$dIm-e5ke2kJhtLK6lQe*lEsZxfHFW3uW4htcEBngQ zwy2`eJ9t&>fJ2YgAT5M`@5Nu$@ml_{FFkuAbXdzP_mc&1^E9!K8hNZ!h2FgTgtdG$ zSrY}m@%2tC4?|DM!b0568z0={xM3|X=})CWgbizXh*aL-%H~#BQ{#TQ@5L0{;jtQ2 zQQ)PD^lFYfylj6N4@<&AQtni7lLn$uRjTM-CmH zYJee{vs2)vm*kNQ% z6n5lzyTiD%pOb}!unWXqu`&)XJ%mbw2piV!5mI@BJ9`VLT zz0}Z5P5?Ca4UOl`VOUs&;|XNG=Q-pJCU9>HFW0`;q`d+hS1Xw zlcjA@MWHVo67vR!K1hSK5c-wjg}dM#=9Un8_PU^_A1(`EV8>UQn;Lp1hYiJW=*=~# zqR`jvonhs|i#-~oh0seppJrto{&@tQi7x2PM@j>*O&|tzBgnBR&y#q0nX5q+gBs-x*lbU~jd55R8zGoZKs^|8G(3x|G3gDMLB?XktmVPLSP z*9(;rQ4$tHe>Wz-GhSZCh0#M0p~La`U1;g)RjEl^{umT*e0?j3fpni zr3yIglVo8b?CMoZT3N4>Vgi*05jGqmc96;&&8MZK$E6QwvLFU8Cy^RdQQ(*APpZLz z!*Y^-qKt(BBszo5TU~w{!|)(O^u_o<0F<2%g&L*8txD-OPXt2*0l}y68h&XiHFI6C%S@BWyBJLDqxSdq)^iha*nP>GDD2cV_b7WP zA?!G^un=~5{gYNY%q-KXG>EWa4eu$HH?kdw=BCCSw<^NQJ@XqhsG`6}T(cd;-3@Da z?in&3mV|}K=gsb;tjB`iX{(v^5W1jmlm;+3v)yg1si9wd{@@AsIDE{1uR#@so)ns- zloSZP^DG$=OTt3v(_($P@uC8uFPcpcp$qy4c>s1otN}eGHYDl(N*uc99BJB$Dhj=S za@TUW(^qPc7D6vw&}$S9{lQ#%_C)BggBy07qsW>l@WWn} zw{hUmf{)3kWjrhi3z0uCbXCq=gWqY-Mf4E5pdXS4V6T%6=qu%e&-3rV%Sx)n z(zF#-6uO;P^+7oF{u-o(&{sLxSa}HAxP+cP5jrd@sh7$Eu)SC4_A9r@2439fu&gvB zYofqM*O}3c1BVveLKYU{{+#;Y3|_@yS$VaLN`nYHy)xO9&E{Fn+L0-2_~8>y{KN@t zH+gpO*ZThX*ZB1_{IB!l5sJ*u^pKq(W#gF2#-_OP*KKT~!WLqG{m + +HELPERS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) + +src_backup_filename="$1" +src_backup_path="$HELPERS_DIR/../backups/${src_backup_filename}" + +backups_disk_root=$($CLICKHOUSE_CLIENT --query "SELECT path FROM system.disks WHERE name='backups'") + +if [ -z "${backups_disk_root}" ]; then + echo "Disk 'backups' not found" + exit 1 +fi + +dest_relative_path=${CLICKHOUSE_DATABASE}/${src_backup_filename} +dest_path=${backups_disk_root}/${dest_relative_path} + +mkdir -p "$(dirname "${dest_path}")" +ln -s "${src_backup_path}" "${dest_path}" + +echo "${dest_relative_path}" From d421636a5fdc3b73fa5cb05e83529483d69e75e2 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 31 Jul 2024 09:26:09 +0200 Subject: [PATCH 1257/2361] Protect temporary part directories from removing during RESTORE. --- src/Storages/MergeTree/MergeTreeData.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 2e10f5a0227..ce27ad24e10 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -5557,12 +5557,16 @@ public: auto it = temp_part_dirs.find(part_name); if (it == temp_part_dirs.end()) { - auto temp_part_dir = std::make_shared(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-")); + auto temp_dir_deleter = std::make_unique(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-")); + auto temp_part_dir = fs::path{temp_dir_deleter->getRelativePath()}.filename(); /// Attaching parts will rename them so it's expected for a temporary part directory not to exist anymore in the end. - temp_part_dir->setShowWarningIfRemoved(false); - it = temp_part_dirs.emplace(part_name, temp_part_dir).first; + temp_dir_deleter->setShowWarningIfRemoved(false); + /// The following holder is needed to prevent clearOldTemporaryDirectories() from clearing `temp_part_dir` before we attach the part. + auto temp_dir_holder = storage->getTemporaryPartDirectoryHolder(temp_part_dir); + it = temp_part_dirs.emplace(part_name, + std::make_pair(std::move(temp_dir_deleter), std::move(temp_dir_holder))).first; } - return it->second->getRelativePath(); + return it->second.first->getRelativePath(); } private: @@ -5588,7 +5592,7 @@ private: size_t num_parts = 0; size_t num_broken_parts = 0; MutableDataPartsVector parts; - std::map> temp_part_dirs; + std::map, scope_guard>> temp_part_dirs; mutable std::mutex mutex; }; From 4e2f8576e5a6e8e39a16334d0c697d5cb09e0469 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 31 Jul 2024 11:50:58 +0200 Subject: [PATCH 1258/2361] Revert "Add settings to replace external engines to Null during create" --- docs/en/operations/settings/settings.md | 12 - src/Core/Settings.h | 2 - src/Core/SettingsChangesHistory.cpp | 4 +- src/Interpreters/InterpreterCreateQuery.cpp | 35 --- .../test_restore_external_engines/__init__.py | 0 .../configs/backups_disk.xml | 14 -- .../configs/remote_servers.xml | 21 -- .../test_restore_external_engines/test.py | 218 ------------------ 8 files changed, 1 insertion(+), 305 deletions(-) delete mode 100644 tests/integration/test_restore_external_engines/__init__.py delete mode 100644 tests/integration/test_restore_external_engines/configs/backups_disk.xml delete mode 100644 tests/integration/test_restore_external_engines/configs/remote_servers.xml delete mode 100644 tests/integration/test_restore_external_engines/test.py diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 8739414464e..c3f697c3bdc 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5608,15 +5608,3 @@ Default value: `10000000`. Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached. Default value: `1GiB`. - -## restore_replace_external_engines_to_null - -For testing purposes. Replaces all external engines to Null to not initiate external connections. - -Default value: `False` - -## restore_replace_external_table_functions_to_null - -For testing purposes. Replaces all external table functions to Null to not initiate external connections. - -Default value: `False` \ No newline at end of file diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 27b71558bd3..4fc2034b855 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -893,8 +893,6 @@ class IColumn; M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \ M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \ M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \ - M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \ - M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \ \ \ /* ###################################### */ \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 8bea0b1eed3..9faf77e9087 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -80,9 +80,7 @@ static std::initializer_listno_empty_args = true; storage.set(storage.engine, engine_ast); } - - void setNullTableEngine(ASTStorage & storage) - { - auto engine_ast = std::make_shared(); - engine_ast->name = "Null"; - engine_ast->no_empty_args = true; - storage.set(storage.engine, engine_ast); - } - } void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const { if (create.as_table_function) - { - if (getContext()->getSettingsRef().restore_replace_external_table_functions_to_null) - { - const auto & factory = TableFunctionFactory::instance(); - - auto properties = factory.tryGetProperties(create.as_table_function->as()->name); - if (properties && properties->allow_readonly) - return; - if (!create.storage) - { - auto storage_ast = std::make_shared(); - create.set(create.storage, storage_ast); - } - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage should not be created yet, it's a bug."); - create.as_table_function = nullptr; - setNullTableEngine(*create.storage); - } return; - } if (create.is_dictionary || create.is_ordinary_view || create.is_live_view || create.is_window_view) return; @@ -1043,13 +1015,6 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const /// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one. setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); } - /// For external tables with restore_replace_external_engine_to_null setting we replace external engines to - /// Null table engine. - else if (getContext()->getSettingsRef().restore_replace_external_engines_to_null) - { - if (StorageFactory::instance().getStorageFeatures(create.storage->engine->name).source_access_type != AccessType::NONE) - setNullTableEngine(*create.storage); - } return; } diff --git a/tests/integration/test_restore_external_engines/__init__.py b/tests/integration/test_restore_external_engines/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_restore_external_engines/configs/backups_disk.xml b/tests/integration/test_restore_external_engines/configs/backups_disk.xml deleted file mode 100644 index f7d666c6542..00000000000 --- a/tests/integration/test_restore_external_engines/configs/backups_disk.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - local - /backups/ - - - - - backups - /backups/ - - diff --git a/tests/integration/test_restore_external_engines/configs/remote_servers.xml b/tests/integration/test_restore_external_engines/configs/remote_servers.xml deleted file mode 100644 index 76ad3618339..00000000000 --- a/tests/integration/test_restore_external_engines/configs/remote_servers.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - true - - replica1 - 9000 - - - replica2 - 9000 - - - replica3 - 9000 - - - - - diff --git a/tests/integration/test_restore_external_engines/test.py b/tests/integration/test_restore_external_engines/test.py deleted file mode 100644 index cf189f2a6ed..00000000000 --- a/tests/integration/test_restore_external_engines/test.py +++ /dev/null @@ -1,218 +0,0 @@ -import pytest - -import pymysql.cursors -import pytest -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -configs = ["configs/remote_servers.xml", "configs/backups_disk.xml"] - -node1 = cluster.add_instance( - "replica1", - with_zookeeper=True, - with_mysql8=True, - main_configs=configs, - external_dirs=["/backups/"], -) -node2 = cluster.add_instance( - "replica2", - with_zookeeper=True, - with_mysql8=True, - main_configs=configs, - external_dirs=["/backups/"], -) -node3 = cluster.add_instance( - "replica3", - with_zookeeper=True, - with_mysql8=True, - main_configs=configs, - external_dirs=["/backups/"], -) -nodes = [node1, node2, node3] - -backup_id_counter = 0 - - -def new_backup_name(): - global backup_id_counter - backup_id_counter += 1 - return f"Disk('backups', '{backup_id_counter}/')" - - -def cleanup_nodes(nodes, dbname): - for node in nodes: - node.query(f"DROP DATABASE IF EXISTS {dbname} SYNC") - - -def fill_nodes(nodes, dbname): - cleanup_nodes(nodes, dbname) - for node in nodes: - node.query( - f"CREATE DATABASE {dbname} ENGINE = Replicated('/clickhouse/databases/{dbname}', 'default', '{node.name}')" - ) - - -def drop_mysql_table(conn, tableName): - with conn.cursor() as cursor: - cursor.execute(f"DROP TABLE IF EXISTS `clickhouse`.`{tableName}`") - - -def get_mysql_conn(cluster): - conn = pymysql.connect( - user="root", - password="clickhouse", - host=cluster.mysql8_ip, - port=cluster.mysql8_port, - ) - return conn - - -def fill_tables(cluster, dbname): - fill_nodes(nodes, dbname) - - conn = get_mysql_conn(cluster) - - with conn.cursor() as cursor: - cursor.execute("DROP DATABASE IF EXISTS clickhouse") - cursor.execute("CREATE DATABASE clickhouse") - cursor.execute("DROP TABLE IF EXISTS clickhouse.inference_table") - cursor.execute( - "CREATE TABLE clickhouse.inference_table (id INT PRIMARY KEY, data BINARY(16) NOT NULL)" - ) - cursor.execute( - "INSERT INTO clickhouse.inference_table VALUES (100, X'9fad5e9eefdfb449')" - ) - conn.commit() - - parameters = "'mysql80:3306', 'clickhouse', 'inference_table', 'root', 'clickhouse'" - - node1.query( - f"CREATE TABLE {dbname}.mysql_schema_inference_engine ENGINE=MySQL({parameters})" - ) - node1.query( - f"CREATE TABLE {dbname}.mysql_schema_inference_function AS mysql({parameters})" - ) - - node1.query(f"CREATE TABLE {dbname}.merge_tree (id UInt64, b String) ORDER BY id") - node1.query(f"INSERT INTO {dbname}.merge_tree VALUES (100, 'abc')") - - expected = "id\tInt32\t\t\t\t\t\ndata\tFixedString(16)\t\t\t\t\t\n" - assert ( - node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_engine") - == expected - ) - assert ( - node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_function") - == expected - ) - assert node1.query(f"SELECT id FROM mysql({parameters})") == "100\n" - assert ( - node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_engine") == "100\n" - ) - assert ( - node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_function") - == "100\n" - ) - assert node1.query(f"SELECT id FROM {dbname}.merge_tree") == "100\n" - - -@pytest.fixture(scope="module") -def start_cluster(): - try: - cluster.start() - yield cluster - - except Exception as ex: - print(ex) - - finally: - cluster.shutdown() - - -def test_restore_table(start_cluster): - fill_tables(cluster, "replicated") - backup_name = new_backup_name() - node2.query(f"SYSTEM SYNC DATABASE REPLICA replicated") - - node2.query(f"BACKUP DATABASE replicated TO {backup_name}") - - node2.query("DROP TABLE replicated.mysql_schema_inference_engine") - node2.query("DROP TABLE replicated.mysql_schema_inference_function") - - node3.query(f"SYSTEM SYNC DATABASE REPLICA replicated") - - assert node3.query("EXISTS replicated.mysql_schema_inference_engine") == "0\n" - assert node3.query("EXISTS replicated.mysql_schema_inference_function") == "0\n" - - node3.query( - f"RESTORE DATABASE replicated FROM {backup_name} SETTINGS allow_different_database_def=true" - ) - node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated") - - assert ( - node1.query( - "SELECT count(), sum(id) FROM replicated.mysql_schema_inference_engine" - ) - == "1\t100\n" - ) - assert ( - node1.query( - "SELECT count(), sum(id) FROM replicated.mysql_schema_inference_function" - ) - == "1\t100\n" - ) - assert ( - node1.query("SELECT count(), sum(id) FROM replicated.merge_tree") == "1\t100\n" - ) - cleanup_nodes(nodes, "replicated") - - -def test_restore_table_null(start_cluster): - fill_tables(cluster, "replicated2") - - backup_name = new_backup_name() - node2.query(f"SYSTEM SYNC DATABASE REPLICA replicated2") - - node2.query(f"BACKUP DATABASE replicated2 TO {backup_name}") - - node2.query("DROP TABLE replicated2.mysql_schema_inference_engine") - node2.query("DROP TABLE replicated2.mysql_schema_inference_function") - - node3.query(f"SYSTEM SYNC DATABASE REPLICA replicated2") - - assert node3.query("EXISTS replicated2.mysql_schema_inference_engine") == "0\n" - assert node3.query("EXISTS replicated2.mysql_schema_inference_function") == "0\n" - - node3.query( - f"RESTORE DATABASE replicated2 FROM {backup_name} SETTINGS allow_different_database_def=1, allow_different_table_def=1 SETTINGS restore_replace_external_engines_to_null=1, restore_replace_external_table_functions_to_null=1" - ) - node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated2") - - assert ( - node1.query( - "SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_engine" - ) - == "0\t0\n" - ) - assert ( - node1.query( - "SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_function" - ) - == "0\t0\n" - ) - assert ( - node1.query("SELECT count(), sum(id) FROM replicated2.merge_tree") == "1\t100\n" - ) - assert ( - node1.query( - "SELECT engine FROM system.tables where database = 'replicated2' and name like '%mysql%'" - ) - == "Null\nNull\n" - ) - assert ( - node1.query( - "SELECT engine FROM system.tables where database = 'replicated2' and name like '%merge_tree%'" - ) - == "MergeTree\n" - ) - cleanup_nodes(nodes, "replicated2") From 9fb610ae10da22f521a1ab2e4442c78766d5be37 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 31 Jul 2024 11:52:36 +0200 Subject: [PATCH 1259/2361] fix tests --- .../queries/0_stateless/03203_hive_style_partitioning.reference | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference index 430a3582f65..a4a2e48e046 100644 --- a/tests/queries/0_stateless/03203_hive_style_partitioning.reference +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -118,4 +118,3 @@ Eva Schmidt Elizabeth Schmidt Samuel Schmidt Elizabeth Schmidt Eva Schmidt Elizabeth Samuel Schmidt Elizabeth -Elizabeth Gordon Elizabeth Gordon From 0cd37533a1e9873632cff7dc6debbbf802a29742 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 31 Jul 2024 10:10:44 +0000 Subject: [PATCH 1260/2361] fix after merge --- src/Client/ClientBase.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index f8c2fb0d6bc..0c26b77bcec 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1039,10 +1039,10 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa connection_parameters.timeouts, query, query_parameters, - global_context->getCurrentQueryId(), + client_context->getCurrentQueryId(), query_processing_stage, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), + &client_context->getSettingsRef(), + &client_context->getClientInfo(), true, [&](const Progress & progress) { onProgress(progress); }); From 06863cf4157765c04759109afa756022dc5e9c55 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Wed, 31 Jul 2024 12:12:30 +0200 Subject: [PATCH 1261/2361] fix for allow_experimental_analyzer --- tests/queries/0_stateless/00309_formats.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00309_formats.sql b/tests/queries/0_stateless/00309_formats.sql index 0366cdeea5c..b784907be08 100644 --- a/tests/queries/0_stateless/00309_formats.sql +++ b/tests/queries/0_stateless/00309_formats.sql @@ -12,5 +12,5 @@ SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, a SET enable_named_columns_in_function_tuple = 1; -SELECT 36 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT RowBinaryWithNamesAndTypes; -SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT TabSeparatedWithNamesAndTypes; +SELECT 36 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT RowBinaryWithNamesAndTypes SETTINGS allow_experimental_analyzer=1; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT TabSeparatedWithNamesAndTypes SETTINGS allow_experimental_analyzer=1; From debcc2e61053f763cb84e34e48275dbebd5bd544 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 31 Jul 2024 10:46:19 +0200 Subject: [PATCH 1262/2361] Fix test test_mutation --- tests/integration/test_backup_restore_on_cluster/test.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_backup_restore_on_cluster/test.py b/tests/integration/test_backup_restore_on_cluster/test.py index 1b7f4aaa97d..d20e10e8a04 100644 --- a/tests/integration/test_backup_restore_on_cluster/test.py +++ b/tests/integration/test_backup_restore_on_cluster/test.py @@ -1054,9 +1054,12 @@ def test_mutation(): backup_name = new_backup_name() node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}") - assert not has_mutation_in_backup("0000000000", backup_name, "default", "tbl") + # mutation #0000000000: "UPDATE x=x+1 WHERE 1" could already finish before starting the backup + # mutation #0000000001: "UPDATE x=x+1+sleep(3) WHERE 1" assert has_mutation_in_backup("0000000001", backup_name, "default", "tbl") + # mutation #0000000002: "UPDATE x=x+1+sleep(3) WHERE 1" assert has_mutation_in_backup("0000000002", backup_name, "default", "tbl") + # mutation #0000000003: not expected assert not has_mutation_in_backup("0000000003", backup_name, "default", "tbl") node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC") From f9f17fb61e2ab27f90434b5e3fc9081c061eaae4 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 31 Jul 2024 12:32:17 +0200 Subject: [PATCH 1263/2361] Fix reference --- .../0_stateless/03215_parsing_archive_name_s3.reference | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference b/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference index b4804c82dc2..b27524812c7 100644 --- a/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference +++ b/tests/queries/0_stateless/03215_parsing_archive_name_s3.reference @@ -1,3 +1,3 @@ -::03215_archive.csv test/::03215_archive.csv -test::03215_archive.csv test/test::03215_archive.csv -test.zip::03215_archive.csv test/test.zip::03215_archive.csv +::03215_archive.csv test/::03215_archive.csv +test::03215_archive.csv test/test::03215_archive.csv +test.zip::03215_archive.csv test/test.zip::03215_archive.csv From 4bf7aa1950f65aa82e85962ab3643f7df0e8bf2a Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Wed, 31 Jul 2024 13:30:14 +0200 Subject: [PATCH 1264/2361] Revert "Revert "Add settings to replace external engines to Null during create"" --- docs/en/operations/settings/settings.md | 12 + src/Core/Settings.h | 2 + src/Core/SettingsChangesHistory.cpp | 4 +- src/Interpreters/InterpreterCreateQuery.cpp | 35 +++ .../test_restore_external_engines/__init__.py | 0 .../configs/backups_disk.xml | 14 ++ .../configs/remote_servers.xml | 21 ++ .../test_restore_external_engines/test.py | 218 ++++++++++++++++++ 8 files changed, 305 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_restore_external_engines/__init__.py create mode 100644 tests/integration/test_restore_external_engines/configs/backups_disk.xml create mode 100644 tests/integration/test_restore_external_engines/configs/remote_servers.xml create mode 100644 tests/integration/test_restore_external_engines/test.py diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index c3f697c3bdc..8739414464e 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5608,3 +5608,15 @@ Default value: `10000000`. Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached. Default value: `1GiB`. + +## restore_replace_external_engines_to_null + +For testing purposes. Replaces all external engines to Null to not initiate external connections. + +Default value: `False` + +## restore_replace_external_table_functions_to_null + +For testing purposes. Replaces all external table functions to Null to not initiate external connections. + +Default value: `False` \ No newline at end of file diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 4fc2034b855..27b71558bd3 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -893,6 +893,8 @@ class IColumn; M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \ M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \ M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \ + M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \ + M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \ \ \ /* ###################################### */ \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 9faf77e9087..8bea0b1eed3 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -80,7 +80,9 @@ static std::initializer_listno_empty_args = true; storage.set(storage.engine, engine_ast); } + + void setNullTableEngine(ASTStorage & storage) + { + auto engine_ast = std::make_shared(); + engine_ast->name = "Null"; + engine_ast->no_empty_args = true; + storage.set(storage.engine, engine_ast); + } + } void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const { if (create.as_table_function) + { + if (getContext()->getSettingsRef().restore_replace_external_table_functions_to_null) + { + const auto & factory = TableFunctionFactory::instance(); + + auto properties = factory.tryGetProperties(create.as_table_function->as()->name); + if (properties && properties->allow_readonly) + return; + if (!create.storage) + { + auto storage_ast = std::make_shared(); + create.set(create.storage, storage_ast); + } + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage should not be created yet, it's a bug."); + create.as_table_function = nullptr; + setNullTableEngine(*create.storage); + } return; + } if (create.is_dictionary || create.is_ordinary_view || create.is_live_view || create.is_window_view) return; @@ -1015,6 +1043,13 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const /// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one. setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value); } + /// For external tables with restore_replace_external_engine_to_null setting we replace external engines to + /// Null table engine. + else if (getContext()->getSettingsRef().restore_replace_external_engines_to_null) + { + if (StorageFactory::instance().getStorageFeatures(create.storage->engine->name).source_access_type != AccessType::NONE) + setNullTableEngine(*create.storage); + } return; } diff --git a/tests/integration/test_restore_external_engines/__init__.py b/tests/integration/test_restore_external_engines/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_restore_external_engines/configs/backups_disk.xml b/tests/integration/test_restore_external_engines/configs/backups_disk.xml new file mode 100644 index 00000000000..f7d666c6542 --- /dev/null +++ b/tests/integration/test_restore_external_engines/configs/backups_disk.xml @@ -0,0 +1,14 @@ + + + + + local + /backups/ + + + + + backups + /backups/ + + diff --git a/tests/integration/test_restore_external_engines/configs/remote_servers.xml b/tests/integration/test_restore_external_engines/configs/remote_servers.xml new file mode 100644 index 00000000000..76ad3618339 --- /dev/null +++ b/tests/integration/test_restore_external_engines/configs/remote_servers.xml @@ -0,0 +1,21 @@ + + + + + true + + replica1 + 9000 + + + replica2 + 9000 + + + replica3 + 9000 + + + + + diff --git a/tests/integration/test_restore_external_engines/test.py b/tests/integration/test_restore_external_engines/test.py new file mode 100644 index 00000000000..cf189f2a6ed --- /dev/null +++ b/tests/integration/test_restore_external_engines/test.py @@ -0,0 +1,218 @@ +import pytest + +import pymysql.cursors +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +configs = ["configs/remote_servers.xml", "configs/backups_disk.xml"] + +node1 = cluster.add_instance( + "replica1", + with_zookeeper=True, + with_mysql8=True, + main_configs=configs, + external_dirs=["/backups/"], +) +node2 = cluster.add_instance( + "replica2", + with_zookeeper=True, + with_mysql8=True, + main_configs=configs, + external_dirs=["/backups/"], +) +node3 = cluster.add_instance( + "replica3", + with_zookeeper=True, + with_mysql8=True, + main_configs=configs, + external_dirs=["/backups/"], +) +nodes = [node1, node2, node3] + +backup_id_counter = 0 + + +def new_backup_name(): + global backup_id_counter + backup_id_counter += 1 + return f"Disk('backups', '{backup_id_counter}/')" + + +def cleanup_nodes(nodes, dbname): + for node in nodes: + node.query(f"DROP DATABASE IF EXISTS {dbname} SYNC") + + +def fill_nodes(nodes, dbname): + cleanup_nodes(nodes, dbname) + for node in nodes: + node.query( + f"CREATE DATABASE {dbname} ENGINE = Replicated('/clickhouse/databases/{dbname}', 'default', '{node.name}')" + ) + + +def drop_mysql_table(conn, tableName): + with conn.cursor() as cursor: + cursor.execute(f"DROP TABLE IF EXISTS `clickhouse`.`{tableName}`") + + +def get_mysql_conn(cluster): + conn = pymysql.connect( + user="root", + password="clickhouse", + host=cluster.mysql8_ip, + port=cluster.mysql8_port, + ) + return conn + + +def fill_tables(cluster, dbname): + fill_nodes(nodes, dbname) + + conn = get_mysql_conn(cluster) + + with conn.cursor() as cursor: + cursor.execute("DROP DATABASE IF EXISTS clickhouse") + cursor.execute("CREATE DATABASE clickhouse") + cursor.execute("DROP TABLE IF EXISTS clickhouse.inference_table") + cursor.execute( + "CREATE TABLE clickhouse.inference_table (id INT PRIMARY KEY, data BINARY(16) NOT NULL)" + ) + cursor.execute( + "INSERT INTO clickhouse.inference_table VALUES (100, X'9fad5e9eefdfb449')" + ) + conn.commit() + + parameters = "'mysql80:3306', 'clickhouse', 'inference_table', 'root', 'clickhouse'" + + node1.query( + f"CREATE TABLE {dbname}.mysql_schema_inference_engine ENGINE=MySQL({parameters})" + ) + node1.query( + f"CREATE TABLE {dbname}.mysql_schema_inference_function AS mysql({parameters})" + ) + + node1.query(f"CREATE TABLE {dbname}.merge_tree (id UInt64, b String) ORDER BY id") + node1.query(f"INSERT INTO {dbname}.merge_tree VALUES (100, 'abc')") + + expected = "id\tInt32\t\t\t\t\t\ndata\tFixedString(16)\t\t\t\t\t\n" + assert ( + node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_engine") + == expected + ) + assert ( + node1.query(f"DESCRIBE TABLE {dbname}.mysql_schema_inference_function") + == expected + ) + assert node1.query(f"SELECT id FROM mysql({parameters})") == "100\n" + assert ( + node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_engine") == "100\n" + ) + assert ( + node1.query(f"SELECT id FROM {dbname}.mysql_schema_inference_function") + == "100\n" + ) + assert node1.query(f"SELECT id FROM {dbname}.merge_tree") == "100\n" + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + + except Exception as ex: + print(ex) + + finally: + cluster.shutdown() + + +def test_restore_table(start_cluster): + fill_tables(cluster, "replicated") + backup_name = new_backup_name() + node2.query(f"SYSTEM SYNC DATABASE REPLICA replicated") + + node2.query(f"BACKUP DATABASE replicated TO {backup_name}") + + node2.query("DROP TABLE replicated.mysql_schema_inference_engine") + node2.query("DROP TABLE replicated.mysql_schema_inference_function") + + node3.query(f"SYSTEM SYNC DATABASE REPLICA replicated") + + assert node3.query("EXISTS replicated.mysql_schema_inference_engine") == "0\n" + assert node3.query("EXISTS replicated.mysql_schema_inference_function") == "0\n" + + node3.query( + f"RESTORE DATABASE replicated FROM {backup_name} SETTINGS allow_different_database_def=true" + ) + node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated") + + assert ( + node1.query( + "SELECT count(), sum(id) FROM replicated.mysql_schema_inference_engine" + ) + == "1\t100\n" + ) + assert ( + node1.query( + "SELECT count(), sum(id) FROM replicated.mysql_schema_inference_function" + ) + == "1\t100\n" + ) + assert ( + node1.query("SELECT count(), sum(id) FROM replicated.merge_tree") == "1\t100\n" + ) + cleanup_nodes(nodes, "replicated") + + +def test_restore_table_null(start_cluster): + fill_tables(cluster, "replicated2") + + backup_name = new_backup_name() + node2.query(f"SYSTEM SYNC DATABASE REPLICA replicated2") + + node2.query(f"BACKUP DATABASE replicated2 TO {backup_name}") + + node2.query("DROP TABLE replicated2.mysql_schema_inference_engine") + node2.query("DROP TABLE replicated2.mysql_schema_inference_function") + + node3.query(f"SYSTEM SYNC DATABASE REPLICA replicated2") + + assert node3.query("EXISTS replicated2.mysql_schema_inference_engine") == "0\n" + assert node3.query("EXISTS replicated2.mysql_schema_inference_function") == "0\n" + + node3.query( + f"RESTORE DATABASE replicated2 FROM {backup_name} SETTINGS allow_different_database_def=1, allow_different_table_def=1 SETTINGS restore_replace_external_engines_to_null=1, restore_replace_external_table_functions_to_null=1" + ) + node1.query(f"SYSTEM SYNC DATABASE REPLICA replicated2") + + assert ( + node1.query( + "SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_engine" + ) + == "0\t0\n" + ) + assert ( + node1.query( + "SELECT count(), sum(id) FROM replicated2.mysql_schema_inference_function" + ) + == "0\t0\n" + ) + assert ( + node1.query("SELECT count(), sum(id) FROM replicated2.merge_tree") == "1\t100\n" + ) + assert ( + node1.query( + "SELECT engine FROM system.tables where database = 'replicated2' and name like '%mysql%'" + ) + == "Null\nNull\n" + ) + assert ( + node1.query( + "SELECT engine FROM system.tables where database = 'replicated2' and name like '%merge_tree%'" + ) + == "MergeTree\n" + ) + cleanup_nodes(nodes, "replicated2") From c81d3322b18b0eb4b45b91ac019a8c4f42d7518d Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Wed, 31 Jul 2024 13:39:30 +0200 Subject: [PATCH 1265/2361] Update 02150_index_hypothesis_race_long.sh --- tests/queries/0_stateless/02150_index_hypothesis_race_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02150_index_hypothesis_race_long.sh b/tests/queries/0_stateless/02150_index_hypothesis_race_long.sh index c29b604d23d..5c432350768 100755 --- a/tests/queries/0_stateless/02150_index_hypothesis_race_long.sh +++ b/tests/queries/0_stateless/02150_index_hypothesis_race_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-distributed-cache +# Tags: long, no-random-settings, no-distributed-cache CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 64a7a413619cda2edb336079740cca2a3d6503f7 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Wed, 31 Jul 2024 13:42:38 +0200 Subject: [PATCH 1266/2361] Update SettingsChangesHistory.cpp --- src/Core/SettingsChangesHistory.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 8bea0b1eed3..5e846868478 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,6 +57,9 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { + {"24.8", {{"restore_replace_external_table_functions_to_null", false, false, "New setting."}, + {"restore_replace_external_engines_to_null", false, false, "New setting."} + }}, {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, @@ -80,9 +83,7 @@ static std::initializer_list Date: Wed, 31 Jul 2024 12:00:09 +0000 Subject: [PATCH 1267/2361] Fix --- tests/integration/helpers/cluster.py | 25 +++++++++++++------- tests/integration/helpers/retry_decorator.py | 7 ++++-- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 7d80fbe90f8..7f0a9154be9 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2692,11 +2692,13 @@ class ClickHouseCluster: images_pull_cmd = self.base_cmd + ["pull"] # sometimes dockerhub/proxy can be flaky - retry( - log_function=lambda exception: logging.info( - "Got exception pulling images: %s", exception - ), - )(run_and_check)(images_pull_cmd) + def logging_pulling_images(**kwargs): + if "exception" in kwargs: + logging.info( + "Got exception pulling images: %s", kwargs["exception"] + ) + + retry(log_function=logging_pulling_images)(run_and_check)(images_pull_cmd) if self.with_zookeeper_secure and self.base_zookeeper_cmd: logging.debug("Setup ZooKeeper Secure") @@ -2969,11 +2971,16 @@ class ClickHouseCluster: "Trying to create Azurite instance by command %s", " ".join(map(str, azurite_start_cmd)), ) - retry( - log_function=lambda exception: logging.info( + def logging_azurite_initialization(exception, retry_number, sleep_time): + logging.info( f"Azurite initialization failed with error: {exception}" - ), - )(run_and_check)(azurite_start_cmd) + ) + + retry( + log_function=logging_azurite_initialization, + )( + run_and_check + )(azurite_start_cmd) self.up_called = True logging.info("Trying to connect to Azurite") self.wait_azurite_to_start() diff --git a/tests/integration/helpers/retry_decorator.py b/tests/integration/helpers/retry_decorator.py index aaa040464c2..e7bafbe29c1 100644 --- a/tests/integration/helpers/retry_decorator.py +++ b/tests/integration/helpers/retry_decorator.py @@ -8,7 +8,7 @@ def retry( delay: float = 1, backoff: float = 1.5, jitter: float = 2, - log_function=lambda *args, **kwargs: None, + log_function=None, # should take **kwargs or arguments: `retry_number`, `exception` and `sleep_time` retriable_expections_list: List[Type[BaseException]] = [Exception], ): def inner(func): @@ -26,8 +26,11 @@ def retry( break if not should_retry or (retry == retries - 1): raise e - log_function(retry=retry, exception=e) sleep_time = current_delay + random.uniform(0, jitter) + if log_function is not None: + log_function( + retry_number=retry, exception=e, sleep_time=sleep_time + ) time.sleep(sleep_time) current_delay *= backoff From 15e0033016eb0e23a7e6f512d5096e50863e3187 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Wed, 31 Jul 2024 12:09:49 +0000 Subject: [PATCH 1268/2361] Bring back the strict check Also update the doc. --- tests/integration/README.md | 9 +++++---- tests/integration/test_storage_s3_queue/test.py | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/integration/README.md b/tests/integration/README.md index ab984b7bd04..a8deb97b526 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -124,10 +124,11 @@ You can just open shell inside a container by overwritting the command: ### Parallel test execution On the CI, we run a number of parallel runners (5 at the time of this writing), each on its own -Docker container. These runner containers spawn more containers for the services needed such as -ZooKeeper, MySQL, PostgreSQL and minio, among others. Within each runner, tests are parallelized -using [pytest-xdist](https://pytest-xdist.readthedocs.io/en/stable/). We're using `--dist=loadfile` -to [distribute the load](https://pytest-xdist.readthedocs.io/en/stable/distribution.html). In the +Docker container. These runner containers spawn more containers for each test for the services +needed such as ZooKeeper, MySQL, PostgreSQL and minio, among others. This means that tests do not +share any services among them. Within each runner, tests are parallelized using +[pytest-xdist](https://pytest-xdist.readthedocs.io/en/stable/). We're using `--dist=loadfile` to +[distribute the load](https://pytest-xdist.readthedocs.io/en/stable/distribution.html). In the documentation words: this guarantees that all tests in a file run in the same worker. This means that any test within the same file will never execute their tests in parallel. They'll be executed on the same worker one after the other. diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index e3445d14cdb..9a97e8c23d1 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -907,7 +907,7 @@ def test_max_set_age(started_cluster): file_with_error = f"max_set_age_fail_{uuid4().hex[:8]}.csv" put_s3_file_content(started_cluster, f"{files_path}/{file_with_error}", values_csv) - wait_for_condition(lambda: failed_count + 1 <= get_object_storage_failures()) + wait_for_condition(lambda: failed_count + 1 == get_object_storage_failures()) node.query("SYSTEM FLUSH LOGS") assert "Cannot parse input" in node.query( @@ -920,7 +920,7 @@ def test_max_set_age(started_cluster): ) ) - wait_for_condition(lambda: failed_count + 2 <= get_object_storage_failures()) + wait_for_condition(lambda: failed_count + 2 == get_object_storage_failures()) node.query("SYSTEM FLUSH LOGS") assert "Cannot parse input" in node.query( From 9d14053cfe7867fd688c08b493c264ee679a4a61 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 31 Jul 2024 12:17:56 +0000 Subject: [PATCH 1269/2361] Proper fix for short circuit execution with nested dictGetOrDefaultt --- src/Columns/ColumnFunction.cpp | 26 ++++++++++++++----- src/Interpreters/ExpressionActions.cpp | 4 --- ...sted_short_circuit_functions_bug.reference | 2 ++ ...210_nested_short_circuit_functions_bug.sql | 3 +++ 4 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 tests/queries/0_stateless/03210_nested_short_circuit_functions_bug.reference create mode 100644 tests/queries/0_stateless/03210_nested_short_circuit_functions_bug.sql diff --git a/src/Columns/ColumnFunction.cpp b/src/Columns/ColumnFunction.cpp index fc81efaac0c..18c343c6ca6 100644 --- a/src/Columns/ColumnFunction.cpp +++ b/src/Columns/ColumnFunction.cpp @@ -296,16 +296,28 @@ ColumnWithTypeAndName ColumnFunction::reduce() const function->getName(), toString(args), toString(captured)); ColumnsWithTypeAndName columns = captured_columns; - IFunction::ShortCircuitSettings settings; /// Arguments of lazy executed function can also be lazy executed. - /// But we shouldn't execute arguments if this function is short circuit, - /// because it will handle lazy executed arguments by itself. - if (is_short_circuit_argument && !function->isShortCircuit(settings, args)) + if (is_short_circuit_argument) { - for (auto & col : columns) + IFunction::ShortCircuitSettings settings; + /// We shouldn't execute all arguments if this function is short circuit, + /// because it will handle lazy executed arguments by itself. + /// Execute only arguments with disabled lazy execution. + if (function->isShortCircuit(settings, args)) { - if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(col.column)) - col = arg->reduce(); + for (size_t i : settings.arguments_with_disabled_lazy_execution) + { + if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(columns[i].column)) + columns[i] = arg->reduce(); + } + } + else + { + for (auto & col : columns) + { + if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(col.column)) + col = arg->reduce(); + } } } diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index d832f568cb8..8993830af14 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -195,10 +195,6 @@ static void setLazyExecutionInfo( } lazy_execution_info.short_circuit_ancestors_info[parent].insert(indexes.begin(), indexes.end()); - /// After checking arguments_with_disabled_lazy_execution, if there is no relation with parent, - /// disable the current node. - if (indexes.empty()) - lazy_execution_info.can_be_lazy_executed = false; } else /// If lazy execution is disabled for one of parents, we should disable it for current node. diff --git a/tests/queries/0_stateless/03210_nested_short_circuit_functions_bug.reference b/tests/queries/0_stateless/03210_nested_short_circuit_functions_bug.reference new file mode 100644 index 00000000000..aa47d0d46d4 --- /dev/null +++ b/tests/queries/0_stateless/03210_nested_short_circuit_functions_bug.reference @@ -0,0 +1,2 @@ +0 +0 diff --git a/tests/queries/0_stateless/03210_nested_short_circuit_functions_bug.sql b/tests/queries/0_stateless/03210_nested_short_circuit_functions_bug.sql new file mode 100644 index 00000000000..923f1e3be1f --- /dev/null +++ b/tests/queries/0_stateless/03210_nested_short_circuit_functions_bug.sql @@ -0,0 +1,3 @@ +select if(equals(materialize('abc'), 'aws.lambda.duration'), if(toFloat64(materialize('x86_74')) < 50.0000, 0, 1), 0) settings short_circuit_function_evaluation='enable'; +select if(equals(materialize('abc'), 'aws.lambda.duration'), if(toFloat64(materialize('x86_74')) < 50.0000, 0, 1), 0) settings short_circuit_function_evaluation='force_enable'; + From bcd53dcd20ae4ffce2eeceeea6637725b19a0803 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 31 Jul 2024 14:31:33 +0200 Subject: [PATCH 1270/2361] Fix docs --- docs/en/sql-reference/data-types/newjson.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md index 524dc7810e6..9e43216df6c 100644 --- a/docs/en/sql-reference/data-types/newjson.md +++ b/docs/en/sql-reference/data-types/newjson.md @@ -10,7 +10,7 @@ keywords: [json, data type] Stores JavaScript Object Notation (JSON) documents in a single column. :::note -This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json.md) instead. +This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. If you want to use JSON type, set `allow_experimental_json_type = 1`. ::: From 6b7c5eb5da1be1fc31d4ebfd4f0dfa0c6a6e728c Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 31 Jul 2024 14:09:07 +0200 Subject: [PATCH 1271/2361] Fix drop --- src/Storages/StorageKeeperMap.cpp | 34 +++++++++++++++-------- src/Storages/StorageKeeperMap.h | 21 ++++++++++---- tests/integration/test_keeper_map/test.py | 5 ++-- 3 files changed, 41 insertions(+), 19 deletions(-) diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 1559b442e43..0634c7be6ee 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -79,6 +79,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; extern const int LIMIT_EXCEEDED; extern const int CANNOT_RESTORE_TABLE; + extern const int INVALID_STATE; } namespace @@ -497,7 +498,7 @@ StorageKeeperMap::StorageKeeperMap( } - table_is_valid = true; + table_status = TableStatus::VALID; /// we are the first table created for the specified Keeper path, i.e. we are the first replica return; } @@ -656,7 +657,18 @@ bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::E void StorageKeeperMap::drop() { - checkTable(); + auto current_table_status = getTableStatus(); + if (current_table_status == TableStatus::UNKNOWN) + { + static constexpr auto error_msg = "Failed to activate table because of connection issues. It will be activated " + "once a connection is established and metadata is verified"; + throw Exception(ErrorCodes::INVALID_STATE, error_msg); + } + + /// if only column metadata is wrong we can still drop the table correctly + if (current_table_status == TableStatus::INVALID_KEEPER_STRUCTURE) + return; + auto client = getClient(); // we allow ZNONODE in case we got hardware error on previous drop @@ -1017,11 +1029,11 @@ UInt64 StorageKeeperMap::keysLimit() const return keys_limit; } -std::optional StorageKeeperMap::isTableValid() const +StorageKeeperMap::TableStatus StorageKeeperMap::getTableStatus() const { std::lock_guard lock{init_mutex}; - if (table_is_valid.has_value()) - return table_is_valid; + if (table_status != TableStatus::UNKNOWN) + return table_status; [&] { @@ -1034,7 +1046,7 @@ std::optional StorageKeeperMap::isTableValid() const if (metadata_stat.numChildren == 0) { - table_is_valid = false; + table_status = TableStatus::INVALID_KEEPER_STRUCTURE; return; } @@ -1045,7 +1057,7 @@ std::optional StorageKeeperMap::isTableValid() const "Table definition does not match to the one stored in the path {}. Stored definition: {}", zk_root_path, stored_metadata_string); - table_is_valid = false; + table_status = TableStatus::INVALID_METADATA; return; } @@ -1058,7 +1070,7 @@ std::optional StorageKeeperMap::isTableValid() const Coordination::Responses responses; client->tryMulti(requests, responses); - table_is_valid = false; + table_status = TableStatus::INVALID_KEEPER_STRUCTURE; if (responses[0]->error != Coordination::Error::ZOK) { LOG_ERROR(log, "Table node ({}) is missing", zk_table_path); @@ -1077,18 +1089,18 @@ std::optional StorageKeeperMap::isTableValid() const return; } - table_is_valid = true; + table_status = TableStatus::VALID; } catch (const Coordination::Exception & e) { tryLogCurrentException(log); if (!Coordination::isHardwareError(e.code)) - table_is_valid = false; + table_status = TableStatus::INVALID_KEEPER_STRUCTURE; } }(); - return table_is_valid; + return table_status; } Chunk StorageKeeperMap::getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray & null_map, const Names &) const diff --git a/src/Storages/StorageKeeperMap.h b/src/Storages/StorageKeeperMap.h index cfbb35ab2fe..8ed348a4f6f 100644 --- a/src/Storages/StorageKeeperMap.h +++ b/src/Storages/StorageKeeperMap.h @@ -80,8 +80,8 @@ public: template void checkTable() const { - auto is_table_valid = isTableValid(); - if (!is_table_valid.has_value()) + auto current_table_status = getTableStatus(); + if (table_status == TableStatus::UNKNOWN) { static constexpr auto error_msg = "Failed to activate table because of connection issues. It will be activated " "once a connection is established and metadata is verified"; @@ -94,10 +94,10 @@ public: } } - if (!*is_table_valid) + if (current_table_status != TableStatus::VALID) { static constexpr auto error_msg - = "Failed to activate table because of invalid metadata in ZooKeeper. Please DETACH table"; + = "Failed to activate table because of invalid metadata in ZooKeeper. Please DROP/DETACH table"; if constexpr (throw_on_error) throw Exception(ErrorCodes::INVALID_STATE, error_msg); else @@ -111,7 +111,15 @@ public: private: bool dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock); - std::optional isTableValid() const; + enum class TableStatus : uint8_t + { + UNKNOWN, + INVALID_METADATA, + INVALID_KEEPER_STRUCTURE, + VALID + }; + + TableStatus getTableStatus() const; void restoreDataImpl( const BackupPtr & backup, @@ -143,7 +151,8 @@ private: mutable zkutil::ZooKeeperPtr zookeeper_client{nullptr}; mutable std::mutex init_mutex; - mutable std::optional table_is_valid; + + mutable TableStatus table_status{TableStatus::UNKNOWN}; LoggerPtr log; }; diff --git a/tests/integration/test_keeper_map/test.py b/tests/integration/test_keeper_map/test.py index 7aee5df5746..4b1bcd11cfe 100644 --- a/tests/integration/test_keeper_map/test.py +++ b/tests/integration/test_keeper_map/test.py @@ -67,6 +67,7 @@ def run_query(query): def test_keeper_map_without_zk(started_cluster): + run_query("DROP TABLE IF EXISTS test_keeper_map_without_zk SYNC") assert_keeper_exception_after_partition( "CREATE TABLE test_keeper_map_without_zk (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_without_zk') PRIMARY KEY(key);" ) @@ -107,12 +108,12 @@ def test_keeper_map_without_zk(started_cluster): ) assert "Failed to activate table because of invalid metadata in ZooKeeper" in error - node.query("DETACH TABLE test_keeper_map_without_zk") - client.stop() def test_keeper_map_with_failed_drop(started_cluster): + run_query("DROP TABLE IF EXISTS test_keeper_map_with_failed_drop SYNC") + run_query("DROP TABLE IF EXISTS test_keeper_map_with_failed_drop_another SYNC") run_query( "CREATE TABLE test_keeper_map_with_failed_drop (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_with_failed_drop') PRIMARY KEY(key);" ) From 406ac2279ecbfc24913548dfcf459c55dd450723 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 31 Jul 2024 14:48:33 +0200 Subject: [PATCH 1272/2361] Analyzer: Do not traverse unresolved subtrees --- src/Planner/findParallelReplicasQuery.cpp | 12 +++++------- src/Planner/findQueryForParallelReplicas.h | 2 +- ...5_analyzer_replace_with_dummy_tables.reference | 0 .../03215_analyzer_replace_with_dummy_tables.sql | 15 +++++++++++++++ 4 files changed, 21 insertions(+), 8 deletions(-) create mode 100644 tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.reference create mode 100644 tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.sql diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index c89a70be541..1140f30ad9c 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -113,13 +113,13 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre return res; } -class ReplaceTableNodeToDummyVisitor : public InDepthQueryTreeVisitor +class ReplaceTableNodeToDummyVisitor : public InDepthQueryTreeVisitorWithContext { public: - using Base = InDepthQueryTreeVisitor; + using Base = InDepthQueryTreeVisitorWithContext; using Base::Base; - void visitImpl(const QueryTreeNodePtr & node) + void enterImpl(QueryTreeNodePtr & node) { auto * table_node = node->as(); auto * table_function_node = node->as(); @@ -134,21 +134,19 @@ public: ColumnsDescription(storage_snapshot->getColumns(get_column_options)), storage_snapshot); - auto dummy_table_node = std::make_shared(std::move(storage_dummy), context); + auto dummy_table_node = std::make_shared(std::move(storage_dummy), getContext()); dummy_table_node->setAlias(node->getAlias()); replacement_map.emplace(node.get(), std::move(dummy_table_node)); } } - ContextPtr context; std::unordered_map replacement_map; }; QueryTreeNodePtr replaceTablesWithDummyTables(const QueryTreeNodePtr & query, const ContextPtr & context) { - ReplaceTableNodeToDummyVisitor visitor; - visitor.context = context; + ReplaceTableNodeToDummyVisitor visitor(context); visitor.visit(query); return query->cloneAndReplace(visitor.replacement_map); diff --git a/src/Planner/findQueryForParallelReplicas.h b/src/Planner/findQueryForParallelReplicas.h index f5dc69dfa0e..cdce4ad0b47 100644 --- a/src/Planner/findQueryForParallelReplicas.h +++ b/src/Planner/findQueryForParallelReplicas.h @@ -13,7 +13,7 @@ using QueryTreeNodePtr = std::shared_ptr; struct SelectQueryOptions; -/// Find a qury which can be executed with parallel replicas up to WithMergableStage. +/// Find a query which can be executed with parallel replicas up to WithMergableStage. /// Returned query will always contain some (>1) subqueries, possibly with joins. const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options); diff --git a/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.reference b/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.sql b/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.sql new file mode 100644 index 00000000000..12d2bd627a7 --- /dev/null +++ b/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.sql @@ -0,0 +1,15 @@ +create table t (number UInt64) engine MergeTree order by number; + +SELECT 1 +FROM +( + SELECT number IN ( + SELECT number + FROM view( + SELECT number + FROM numbers(1) + ) + ) + FROM t +) +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, allow_experimental_analyzer = 1; From 7160e954c16100e963371e416878837437569d74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 31 Jul 2024 13:32:24 +0200 Subject: [PATCH 1273/2361] 02995_new_settings_history: Update ref --- ..._23_12_1.tsv => 02995_baseline_24_7_1.tsv} | 182 +++++++++++++++--- .../0_stateless/02995_new_settings_history.sh | 14 +- 2 files changed, 167 insertions(+), 29 deletions(-) rename tests/queries/0_stateless/{02995_baseline_23_12_1.tsv => 02995_baseline_24_7_1.tsv} (82%) diff --git a/tests/queries/0_stateless/02995_baseline_23_12_1.tsv b/tests/queries/0_stateless/02995_baseline_24_7_1.tsv similarity index 82% rename from tests/queries/0_stateless/02995_baseline_23_12_1.tsv rename to tests/queries/0_stateless/02995_baseline_24_7_1.tsv index a391473e7c9..6c830da8646 100644 --- a/tests/queries/0_stateless/02995_baseline_23_12_1.tsv +++ b/tests/queries/0_stateless/02995_baseline_24_7_1.tsv @@ -11,23 +11,28 @@ allow_create_index_without_type 0 allow_custom_error_code_in_throwif 0 allow_ddl 1 allow_deprecated_database_ordinary 0 +allow_deprecated_error_prone_window_functions 0 +allow_deprecated_snowflake_conversion_functions 0 allow_deprecated_syntax_for_merge_tree 0 allow_distributed_ddl 1 allow_drop_detached 0 allow_execute_multiif_columnar 1 allow_experimental_alter_materialized_view_structure 1 -allow_experimental_analyzer 0 +allow_experimental_analyzer 1 allow_experimental_annoy_index 0 allow_experimental_bigint_types 1 allow_experimental_codecs 0 allow_experimental_database_atomic 1 allow_experimental_database_materialized_mysql 0 allow_experimental_database_materialized_postgresql 0 -allow_experimental_database_replicated 0 +allow_experimental_database_replicated 1 +allow_experimental_dynamic_type 0 +allow_experimental_full_text_index 0 allow_experimental_funnel_functions 0 allow_experimental_geo_types 1 allow_experimental_hash_functions 0 allow_experimental_inverted_index 0 +allow_experimental_join_condition 0 allow_experimental_lightweight_delete 1 allow_experimental_live_view 0 allow_experimental_map_type 1 @@ -40,12 +45,15 @@ allow_experimental_query_cache 1 allow_experimental_query_deduplication 0 allow_experimental_refreshable_materialized_view 0 allow_experimental_s3queue 1 -allow_experimental_shared_merge_tree 0 +allow_experimental_shared_merge_tree 1 +allow_experimental_statistic 0 allow_experimental_statistics 0 allow_experimental_undrop_table_query 1 allow_experimental_usearch_index 0 +allow_experimental_variant_type 0 allow_experimental_window_functions 1 allow_experimental_window_view 0 +allow_get_client_http_header 0 allow_hyperscan 1 allow_introspection_functions 0 allow_named_collection_override_by_default 1 @@ -58,17 +66,21 @@ allow_prefetched_read_pool_for_remote_filesystem 1 allow_push_predicate_when_subquery_contains_with 1 allow_settings_after_format_in_insert 0 allow_simdjson 1 +allow_statistic_optimize 0 allow_statistics_optimize 0 allow_suspicious_codecs 0 allow_suspicious_fixed_string_types 0 allow_suspicious_indices 0 allow_suspicious_low_cardinality_types 0 +allow_suspicious_primary_key 0 allow_suspicious_ttl_expressions 0 +allow_suspicious_variant_types 0 allow_unrestricted_reads_from_keeper 0 alter_move_to_space_execute_async 0 alter_partition_verbose_result 0 alter_sync 1 analyze_index_with_space_filling_curves 1 +analyzer_compatibility_join_using_top_level_identifier 0 annoy_index_search_k_nodes -1 any_join_distinct_right_table_keys 0 apply_deleted_mask 1 @@ -76,20 +88,42 @@ apply_mutations_on_fly 0 asterisk_include_alias_columns 0 asterisk_include_materialized_columns 0 async_insert 0 +async_insert_busy_timeout_decrease_rate 0.2 +async_insert_busy_timeout_increase_rate 0.2 +async_insert_busy_timeout_max_ms 200 +async_insert_busy_timeout_min_ms 50 async_insert_busy_timeout_ms 200 async_insert_cleanup_timeout_ms 1000 async_insert_deduplicate 0 -async_insert_max_data_size 1000000 +async_insert_max_data_size 10485760 async_insert_max_query_number 450 +async_insert_poll_timeout_ms 10 async_insert_stale_timeout_ms 0 async_insert_threads 16 +async_insert_use_adaptive_busy_timeout 1 async_query_sending_for_remote 1 async_socket_for_remote 1 +azure_allow_parallel_part_upload 1 azure_create_new_file_on_insert 0 +azure_ignore_file_doesnt_exist 0 azure_list_object_keys_size 1000 +azure_max_blocks_in_multipart_upload 50000 +azure_max_inflight_parts_for_one_file 20 +azure_max_single_part_copy_size 268435456 azure_max_single_part_upload_size 104857600 azure_max_single_read_retries 4 +azure_max_unexpected_write_error_retries 4 +azure_max_upload_part_size 5368709120 +azure_min_upload_part_size 16777216 +azure_sdk_max_retries 10 +azure_sdk_retry_initial_backoff_ms 10 +azure_sdk_retry_max_backoff_ms 1000 +azure_skip_empty_files 0 +azure_strict_upload_part_size 0 +azure_throw_on_zero_files_match 0 azure_truncate_on_insert 0 +azure_upload_part_size_multiply_factor 2 +azure_upload_part_size_multiply_parts_count_threshold 500 background_buffer_flush_schedule_pool_size 16 background_common_pool_size 8 background_distributed_schedule_pool_size 16 @@ -107,6 +141,7 @@ backup_restore_keeper_max_retries 20 backup_restore_keeper_retry_initial_backoff_ms 100 backup_restore_keeper_retry_max_backoff_ms 5000 backup_restore_keeper_value_max_size 1048576 +backup_restore_s3_retry_attempts 1000 backup_threads 16 bool_false_representation false bool_true_representation true @@ -115,6 +150,7 @@ calculate_text_stack_trace 1 cancel_http_readonly_queries_on_client_close 0 cast_ipv4_ipv6_default_on_conversion_error 0 cast_keep_nullable 0 +cast_string_to_dynamic_use_inference 0 check_query_single_value_result 1 check_referential_table_dependencies 0 check_table_dependencies 1 @@ -123,6 +159,7 @@ cloud_mode 0 cloud_mode_engine 1 cluster_for_parallel_replicas collect_hash_table_stats_during_aggregation 1 +collect_hash_table_stats_during_joins 1 column_names_for_schema_inference compatibility compatibility_ignore_auto_increment_in_create_table 0 @@ -141,9 +178,12 @@ count_distinct_optimization 0 create_index_ignore_unique 0 create_replicated_merge_tree_fault_injection_probability 0 create_table_empty_primary_key_by_default 0 +cross_join_min_bytes_to_compress 1073741824 +cross_join_min_rows_to_compress 10000000 cross_to_inner_join_rewrite 1 data_type_default_nullable 0 database_atomic_wait_for_drop_and_detach_synchronously 0 +database_replicated_allow_heavy_create 0 database_replicated_allow_only_replicated_engine 0 database_replicated_allow_replicated_engine_arguments 1 database_replicated_always_detach_permanently 0 @@ -156,15 +196,19 @@ date_time_overflow_behavior ignore decimal_check_overflow 1 deduplicate_blocks_in_dependent_materialized_views 0 default_database_engine Atomic +default_materialized_view_sql_security DEFINER default_max_bytes_in_join 1000000000 -default_table_engine None +default_normal_view_sql_security INVOKER +default_table_engine MergeTree default_temporary_table_engine Memory +default_view_definer CURRENT_USER describe_compact_output 0 describe_extend_object_types 0 describe_include_subcolumns 0 describe_include_virtual_columns 0 dialect clickhouse dictionary_use_async_executor 0 +dictionary_validate_primary_key_type 0 distinct_overflow_mode throw distributed_aggregation_memory_efficient 1 distributed_background_insert_batch 0 @@ -182,6 +226,7 @@ distributed_directory_monitor_sleep_time_ms 100 distributed_directory_monitor_split_batch_on_failure 0 distributed_foreground_insert 0 distributed_group_by_no_merge 0 +distributed_insert_skip_read_only_replicas 0 distributed_product_mode deny distributed_push_down_limit 1 distributed_replica_error_cap 1000 @@ -191,6 +236,7 @@ do_not_merge_across_partitions_select_final 0 drain_timeout 3 empty_result_for_aggregation_by_constant_keys_on_empty_set 1 empty_result_for_aggregation_by_empty_set 0 +enable_blob_storage_log 1 enable_debug_queries 0 enable_deflate_qpl_codec 0 enable_early_constant_folding 1 @@ -205,6 +251,7 @@ enable_job_stack_trace 0 enable_lightweight_delete 1 enable_memory_bound_merging_of_aggregation_results 1 enable_multiple_prewhere_read_steps 1 +enable_named_columns_in_function_tuple 1 enable_optimize_predicate_expression 1 enable_optimize_predicate_expression_to_final_subquery 1 enable_order_by_all 1 @@ -216,7 +263,9 @@ enable_sharing_sets_for_mutations 1 enable_software_prefetch_in_aggregation 1 enable_unaligned_array_join 0 enable_url_encoding 1 +enable_vertical_final 1 enable_writes_to_query_cache 1 +enable_zstd_qat_codec 0 engine_file_allow_create_multiple_files 0 engine_file_empty_if_not_exists 0 engine_file_skip_empty_files 0 @@ -231,10 +280,12 @@ external_storage_max_read_rows 0 external_storage_rw_timeout_sec 300 external_table_functions_use_nulls 1 external_table_strict_query 0 +extract_key_value_pairs_max_pairs_per_row 1000 extract_kvp_max_pairs_per_row 1000 extremes 0 fallback_to_stale_replicas_for_distributed_queries 1 filesystem_cache_max_download_size 137438953472 +filesystem_cache_reserve_space_wait_lock_timeout_milliseconds 1000 filesystem_cache_segments_batch_size 20 filesystem_prefetch_max_memory_usage 1073741824 filesystem_prefetch_min_bytes_for_single_read_task 2097152 @@ -278,7 +329,9 @@ format_regexp_escaping_rule Raw format_regexp_skip_unmatched 0 format_schema format_template_resultset +format_template_resultset_format format_template_row +format_template_row_format format_template_rows_between_delimiter \n format_tsv_null_representation \\N formatdatetime_f_prints_single_zero 0 @@ -288,8 +341,11 @@ fsync_metadata 1 function_implementation function_json_value_return_type_allow_complex 0 function_json_value_return_type_allow_nullable 0 +function_locate_has_mysql_compatible_argument_order 1 function_range_max_elements_in_block 500000000 function_sleep_max_microseconds_per_block 3000000 +function_visible_width_behavior 1 +geo_distance_returns_float64_on_float64_arguments 1 glob_expansion_max_elements 1000 grace_hash_join_initial_buckets 1 grace_hash_join_max_buckets 1024 @@ -300,8 +356,10 @@ group_by_use_nulls 0 handle_kafka_error_mode default handshake_timeout_ms 10000 hdfs_create_new_file_on_insert 0 +hdfs_ignore_file_doesnt_exist 0 hdfs_replication 0 hdfs_skip_empty_files 0 +hdfs_throw_on_zero_files_match 0 hdfs_truncate_on_insert 0 hedged_connection_timeout_ms 50 hsts_max_age 0 @@ -326,10 +384,14 @@ http_skip_not_found_url_for_globs 1 http_wait_end_of_query 0 http_write_exception_in_output_format 1 http_zlib_compression_level 3 +iceberg_engine_ignore_schema_evolution 0 idle_connection_timeout 3600 ignore_cold_parts_seconds 0 ignore_data_skipping_indices +ignore_drop_queries_probability 0 +ignore_materialized_views_with_dropped_target_table 0 ignore_on_cluster_for_replicated_access_entities_queries 0 +ignore_on_cluster_for_replicated_named_collections_queries 0 ignore_on_cluster_for_replicated_udf_queries 0 implicit_transaction 0 input_format_allow_errors_num 0 @@ -341,12 +403,14 @@ input_format_arrow_import_nested 0 input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference 0 input_format_avro_allow_missing_fields 0 input_format_avro_null_as_default 0 +input_format_binary_decode_types_in_binary_format 0 input_format_bson_skip_fields_with_unsupported_types_in_schema_inference 0 input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference 0 input_format_csv_allow_cr_end_of_line 0 input_format_csv_allow_variable_number_of_columns 0 input_format_csv_allow_whitespace_or_tab_as_delimiter 0 input_format_csv_arrays_as_nested_csv 0 +input_format_csv_deserialize_separate_columns_into_tuple 1 input_format_csv_detect_header 1 input_format_csv_empty_as_default 1 input_format_csv_enum_as_number 0 @@ -354,29 +418,37 @@ input_format_csv_skip_first_lines 0 input_format_csv_skip_trailing_empty_lines 0 input_format_csv_trim_whitespaces 1 input_format_csv_try_infer_numbers_from_strings 0 +input_format_csv_try_infer_strings_from_quoted_tuples 1 input_format_csv_use_best_effort_in_schema_inference 1 input_format_csv_use_default_on_bad_values 0 input_format_custom_allow_variable_number_of_columns 0 input_format_custom_detect_header 1 input_format_custom_skip_trailing_empty_lines 0 input_format_defaults_for_omitted_fields 1 +input_format_force_null_for_omitted_fields 0 +input_format_hive_text_allow_variable_number_of_columns 1 input_format_hive_text_collection_items_delimiter  input_format_hive_text_fields_delimiter  input_format_hive_text_map_keys_delimiter  input_format_import_nested_json 0 input_format_ipv4_default_on_conversion_error 0 input_format_ipv6_default_on_conversion_error 0 +input_format_json_case_insensitive_column_matching 0 input_format_json_compact_allow_variable_number_of_columns 0 input_format_json_defaults_for_missing_elements_in_named_tuple 1 input_format_json_ignore_unknown_keys_in_named_tuple 1 +input_format_json_ignore_unnecessary_fields 1 input_format_json_infer_incomplete_types_as_strings 1 input_format_json_named_tuples_as_objects 1 input_format_json_read_arrays_as_strings 1 input_format_json_read_bools_as_numbers 1 +input_format_json_read_bools_as_strings 1 input_format_json_read_numbers_as_strings 1 input_format_json_read_objects_as_strings 1 +input_format_json_throw_on_bad_escape_sequence 1 input_format_json_try_infer_named_tuples_from_objects 1 input_format_json_try_infer_numbers_from_strings 0 +input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects 0 input_format_json_validate_types_from_metadata 1 input_format_max_bytes_to_read_for_schema_inference 33554432 input_format_max_rows_to_read_for_schema_inference 25000 @@ -384,11 +456,13 @@ input_format_msgpack_number_of_columns 0 input_format_mysql_dump_map_column_names 1 input_format_mysql_dump_table_name input_format_native_allow_types_conversion 1 +input_format_native_decode_types_in_binary_format 0 input_format_null_as_default 1 input_format_orc_allow_missing_columns 1 input_format_orc_case_insensitive_column_matching 0 input_format_orc_filter_push_down 1 input_format_orc_import_nested 0 +input_format_orc_read_use_writer_time_zone 0 input_format_orc_row_batch_size 100000 input_format_orc_skip_columns_with_unsupported_types_in_schema_inference 0 input_format_orc_use_fast_decoder 1 @@ -398,17 +472,21 @@ input_format_parquet_case_insensitive_column_matching 0 input_format_parquet_filter_push_down 1 input_format_parquet_import_nested 0 input_format_parquet_local_file_min_bytes_for_seek 8192 -input_format_parquet_max_block_size 8192 +input_format_parquet_max_block_size 65409 +input_format_parquet_prefer_block_bytes 16744704 input_format_parquet_preserve_order 0 input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference 0 +input_format_parquet_use_native_reader 0 input_format_protobuf_flatten_google_wrappers 0 input_format_protobuf_skip_fields_with_unsupported_types_in_schema_inference 0 input_format_record_errors_file_path input_format_skip_unknown_fields 1 input_format_try_infer_dates 1 input_format_try_infer_datetimes 1 +input_format_try_infer_exponent_floats 0 input_format_try_infer_integers 1 input_format_tsv_allow_variable_number_of_columns 0 +input_format_tsv_crlf_end_of_line 0 input_format_tsv_detect_header 1 input_format_tsv_empty_as_default 0 input_format_tsv_enum_as_number 0 @@ -450,7 +528,12 @@ joined_subquery_requires_alias 1 kafka_disable_num_consumers_limit 0 kafka_max_wait_ms 5000 keeper_map_strict_mode 0 +keeper_max_retries 10 +keeper_retry_initial_backoff_ms 100 +keeper_retry_max_backoff_ms 5000 legacy_column_name_of_tuple_literal 0 +lightweight_deletes_sync 2 +lightweight_mutation_projection_mode throw limit 0 live_view_heartbeat_interval 15 load_balancing random @@ -461,7 +544,7 @@ local_filesystem_read_prefetch 0 lock_acquire_timeout 120 log_comment log_formatted_queries 0 -log_processors_profiles 0 +log_processors_profiles 1 log_profile_events 1 log_queries 1 log_queries_cut_to_length 100000 @@ -474,6 +557,8 @@ log_query_views 1 low_cardinality_allow_in_native_format 1 low_cardinality_max_dictionary_size 8192 low_cardinality_use_single_dictionary_for_part 0 +materialize_skip_indexes_on_insert 1 +materialize_statistics_on_insert 1 materialize_ttl_after_modify 1 materialized_views_ignore_errors 0 max_alter_threads \'auto(16)\' @@ -501,6 +586,7 @@ max_distributed_depth 5 max_download_buffer_size 10485760 max_download_threads 4 max_entries_for_hash_table_stats 10000 +max_estimated_execution_time 0 max_execution_speed 0 max_execution_speed_bytes 0 max_execution_time 0 @@ -528,7 +614,9 @@ max_network_bandwidth_for_user 0 max_network_bytes 0 max_number_of_partitions_for_independent_aggregation 128 max_parallel_replicas 1 +max_parser_backtracks 1000000 max_parser_depth 1000 +max_parsing_threads \'auto(16)\' max_partition_size_to_drop 50000000000 max_partitions_per_insert_block 100 max_partitions_to_read -1 @@ -537,6 +625,7 @@ max_query_size 262144 max_read_buffer_size 1048576 max_read_buffer_size_local_fs 131072 max_read_buffer_size_remote_fs 0 +max_recursive_cte_evaluation_depth 1000 max_remote_read_network_bandwidth 0 max_remote_read_network_bandwidth_for_server 0 max_remote_write_network_bandwidth 0 @@ -549,7 +638,7 @@ max_result_rows 0 max_rows_in_distinct 0 max_rows_in_join 0 max_rows_in_set 0 -max_rows_in_set_to_optimize_join 100000 +max_rows_in_set_to_optimize_join 0 max_rows_to_group_by 0 max_rows_to_read 0 max_rows_to_read_leaf 0 @@ -557,6 +646,7 @@ max_rows_to_sort 0 max_rows_to_transfer 0 max_sessions_for_user 0 max_size_to_preallocate_for_aggregation 100000000 +max_size_to_preallocate_for_joins 100000000 max_streams_for_merge_tree_reading 0 max_streams_multiplier_for_merge_tables 5 max_streams_to_max_threads_ratio 1 @@ -592,6 +682,7 @@ merge_tree_min_bytes_per_task_for_remote_reading 4194304 merge_tree_min_rows_for_concurrent_read 163840 merge_tree_min_rows_for_concurrent_read_for_remote_filesystem 163840 merge_tree_min_rows_for_seek 0 +merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0 merge_tree_use_const_size_tasks_for_remote_reading 1 metrics_perf_events_enabled 0 metrics_perf_events_list @@ -604,6 +695,8 @@ min_count_to_compile_expression 3 min_count_to_compile_sort_description 3 min_execution_speed 0 min_execution_speed_bytes 0 +min_external_table_block_size_bytes 268402944 +min_external_table_block_size_rows 1048449 min_free_disk_space_for_temporary_data 0 min_hit_rate_to_use_consecutive_keys_optimization 0.5 min_insert_block_size_bytes 268402944 @@ -619,8 +712,8 @@ mutations_execute_subqueries_on_initiator 0 mutations_max_literal_size_to_replace 16384 mutations_sync 0 mysql_datatypes_support_level -mysql_map_fixed_string_to_text_in_show_columns 0 -mysql_map_string_to_text_in_show_columns 0 +mysql_map_fixed_string_to_text_in_show_columns 1 +mysql_map_string_to_text_in_show_columns 1 mysql_max_rows_to_insert 65536 network_compression_method LZ4 network_zstd_compression_level 1 @@ -647,6 +740,7 @@ optimize_group_by_constant_keys 1 optimize_group_by_function_keys 1 optimize_if_chain_to_multiif 0 optimize_if_transform_strings_to_enum 0 +optimize_injective_functions_in_group_by 1 optimize_injective_functions_inside_uniq 1 optimize_min_equality_disjunction_chain_length 3 optimize_min_inequality_conjunction_chain_length 3 @@ -664,7 +758,7 @@ optimize_redundant_functions_in_order_by 1 optimize_respect_aliases 1 optimize_rewrite_aggregate_function_with_if 1 optimize_rewrite_array_exists_to_has 0 -optimize_rewrite_sum_if_to_count_if 0 +optimize_rewrite_sum_if_to_count_if 1 optimize_skip_merged_partitions 0 optimize_skip_unused_shards 0 optimize_skip_unused_shards_limit 1000 @@ -674,9 +768,10 @@ optimize_sorting_by_input_stream_properties 1 optimize_substitute_columns 0 optimize_syntax_fuse_functions 0 optimize_throw_if_noop 0 +optimize_time_filter_with_preimage 1 optimize_trivial_approximate_count_query 0 optimize_trivial_count_query 1 -optimize_trivial_insert_select 1 +optimize_trivial_insert_select 0 optimize_uniq_to_count 1 optimize_use_implicit_projections 1 optimize_use_projections 1 @@ -685,13 +780,19 @@ os_thread_priority 0 output_format_arrow_compression_method lz4_frame output_format_arrow_fixed_string_as_fixed_byte_array 1 output_format_arrow_low_cardinality_as_dictionary 0 -output_format_arrow_string_as_string 0 +output_format_arrow_string_as_string 1 +output_format_arrow_use_64_bit_indexes_for_dictionary 0 +output_format_arrow_use_signed_indexes_for_dictionary 1 output_format_avro_codec output_format_avro_rows_in_file 1 output_format_avro_string_column_pattern output_format_avro_sync_interval 16384 +output_format_binary_encode_types_in_binary_format 0 output_format_bson_string_as_string 0 +output_format_compression_level 3 +output_format_compression_zstd_window_log 0 output_format_csv_crlf_end_of_line 0 +output_format_csv_serialize_tuple_into_separate_columns 1 output_format_decimal_trailing_zeros 0 output_format_enable_streaming 0 output_format_json_array_of_rows 0 @@ -705,27 +806,34 @@ output_format_json_skip_null_value_in_named_tuples 0 output_format_json_validate_utf8 0 output_format_markdown_escape_special_characters 0 output_format_msgpack_uuid_representation ext -output_format_orc_compression_method lz4 +output_format_native_encode_types_in_binary_format 0 +output_format_orc_compression_method zstd output_format_orc_row_index_stride 10000 -output_format_orc_string_as_string 0 +output_format_orc_string_as_string 1 output_format_parallel_formatting 1 output_format_parquet_batch_size 1024 output_format_parquet_compliant_nested_types 1 -output_format_parquet_compression_method lz4 +output_format_parquet_compression_method zstd output_format_parquet_data_page_size 1048576 output_format_parquet_fixed_string_as_fixed_byte_array 1 output_format_parquet_parallel_encoding 1 output_format_parquet_row_group_size 1000000 output_format_parquet_row_group_size_bytes 536870912 -output_format_parquet_string_as_string 0 -output_format_parquet_use_custom_encoder 0 +output_format_parquet_string_as_string 1 +output_format_parquet_use_custom_encoder 1 output_format_parquet_version 2.latest -output_format_pretty_color 1 +output_format_parquet_write_page_index 1 +output_format_pretty_color auto +output_format_pretty_display_footer_column_names 1 +output_format_pretty_display_footer_column_names_min_rows 50 output_format_pretty_grid_charset UTF-8 +output_format_pretty_highlight_digit_groups 1 output_format_pretty_max_column_pad_width 250 output_format_pretty_max_rows 10000 output_format_pretty_max_value_width 10000 -output_format_pretty_row_numbers 0 +output_format_pretty_max_value_width_apply_for_single_value 0 +output_format_pretty_row_numbers 1 +output_format_pretty_single_large_number_tip_threshold 1000000 output_format_protobuf_nullables_with_google_wrappers 0 output_format_schema output_format_sql_insert_include_column_names 1 @@ -734,15 +842,22 @@ output_format_sql_insert_quote_names 1 output_format_sql_insert_table_name table output_format_sql_insert_use_replace 0 output_format_tsv_crlf_end_of_line 0 +output_format_values_escape_quote_with_quote 0 output_format_write_statistics 1 +page_cache_inject_eviction 0 parallel_distributed_insert_select 0 parallel_replica_offset 0 +parallel_replicas_allow_in_with_subquery 1 parallel_replicas_count 0 parallel_replicas_custom_key parallel_replicas_custom_key_filter_type default +parallel_replicas_custom_key_range_lower 0 +parallel_replicas_custom_key_range_upper 0 parallel_replicas_for_non_replicated_merge_tree 0 +parallel_replicas_mark_segment_size 128 parallel_replicas_min_number_of_granules_to_enable 0 parallel_replicas_min_number_of_rows_per_replica 0 +parallel_replicas_prefer_local_join 1 parallel_replicas_single_task_marks_count_multiplier 2 parallel_view_processing 0 parallelize_output_from_storages 1 @@ -755,11 +870,14 @@ parts_to_delay_insert 0 parts_to_throw_insert 0 periodic_live_view_refresh 60 poll_interval 10 +postgresql_connection_attempt_timeout 2 postgresql_connection_pool_auto_close_connection 0 +postgresql_connection_pool_retries 2 postgresql_connection_pool_size 16 postgresql_connection_pool_wait_timeout 5000 precise_float_parsing 0 prefer_column_name_to_alias 0 +prefer_external_sort_block_bytes 16744704 prefer_global_in_and_join 0 prefer_localhost_replica 1 prefer_warmed_unmerged_parts_seconds 0 @@ -767,7 +885,7 @@ preferred_block_size_bytes 1000000 preferred_max_column_in_block_size_bytes 0 preferred_optimize_projection_name prefetch_buffer_size 1048576 -print_pretty_type_names 0 +print_pretty_type_names 1 priority 0 query_cache_compress_entries 1 query_cache_max_entries 0 @@ -778,8 +896,10 @@ query_cache_nondeterministic_function_handling throw query_cache_share_between_users 0 query_cache_squash_partial_results 1 query_cache_store_results_of_queries_with_nondeterministic_functions 0 +query_cache_system_table_handling throw query_cache_ttl 60 query_plan_aggregation_in_order 1 +query_plan_convert_outer_join_to_inner_join 1 query_plan_enable_multithreading_after_window_functions 1 query_plan_enable_optimizations 1 query_plan_execute_functions_after_sorting 1 @@ -788,6 +908,8 @@ query_plan_lift_up_array_join 1 query_plan_lift_up_union 1 query_plan_max_optimizations_to_apply 10000 query_plan_merge_expressions 1 +query_plan_merge_filters 0 +query_plan_optimize_prewhere 1 query_plan_optimize_primary_key 1 query_plan_optimize_projection 1 query_plan_push_down_limit 1 @@ -806,7 +928,9 @@ read_backoff_min_events 2 read_backoff_min_interval_between_events_ms 1000 read_backoff_min_latency_ms 1000 read_from_filesystem_cache_if_exists_otherwise_bypass_cache 0 +read_from_page_cache_if_exists_otherwise_bypass_cache 0 read_in_order_two_level_merge_threshold 100 +read_in_order_use_buffering 1 read_overflow_mode throw read_overflow_mode_leaf throw read_priority 0 @@ -835,17 +959,20 @@ result_overflow_mode throw rewrite_count_distinct_if_with_count_distinct_implementation 0 s3_allow_parallel_part_upload 1 s3_check_objects_after_upload 0 +s3_connect_timeout_ms 1000 s3_create_new_file_on_insert 0 s3_disable_checksum 0 -s3_http_connection_pool_size 1000 +s3_ignore_file_doesnt_exist 0 s3_list_object_keys_size 1000 s3_max_connections 1024 s3_max_get_burst 0 s3_max_get_rps 0 s3_max_inflight_parts_for_one_file 20 +s3_max_part_number 10000 s3_max_put_burst 0 s3_max_put_rps 0 s3_max_redirects 10 +s3_max_single_operation_copy_size 33554432 s3_max_single_part_upload_size 33554432 s3_max_single_read_retries 4 s3_max_unexpected_write_error_retries 4 @@ -860,6 +987,8 @@ s3_truncate_on_insert 0 s3_upload_part_size_multiply_factor 2 s3_upload_part_size_multiply_parts_count_threshold 500 s3_use_adaptive_timeouts 1 +s3_validate_request_settings 1 +s3queue_allow_experimental_sharded_mode 0 s3queue_default_zookeeper_path /clickhouse/s3queue/ s3queue_enable_logging_to_s3queue_log 0 schema_inference_cache_require_modification_time_for_url 1 @@ -887,6 +1016,8 @@ sleep_after_receiving_query_ms 0 sleep_in_send_data_ms 0 sleep_in_send_tables_status_ms 0 sort_overflow_mode throw +split_intersecting_parts_ranges_into_layers_final 1 +split_parts_ranges_into_intersecting_and_non_intersecting_final 1 splitby_max_substrings_includes_remaining_string 0 stop_refreshable_materialized_views_on_startup 0 storage_file_read_method pread @@ -898,8 +1029,10 @@ stream_poll_timeout_ms 500 system_events_show_zero_values 0 table_function_remote_max_addresses 1000 tcp_keep_alive_timeout 290 +temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds 600000 temporary_files_codec LZ4 temporary_live_view_timeout 1 +throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert 1 throw_if_no_data_to_insert 1 throw_on_error_from_cache_on_write_operations 0 throw_on_max_partitions_per_insert_block 1 @@ -912,8 +1045,10 @@ totals_mode after_having_exclusive trace_profile_events 0 transfer_overflow_mode throw transform_null_in 0 +traverse_shadow_remote_data_paths 0 union_default_mode unknown_packet_in_send_data 0 +update_insert_deduplication_token_in_dependent_materialized_views 0 use_cache_for_count_from_files 1 use_client_time_zone 0 use_compact_format_in_distributed_parts_names 1 @@ -923,12 +1058,15 @@ use_index_for_in_with_subqueries 1 use_index_for_in_with_subqueries_max_values 0 use_local_cache_for_remote_storage 1 use_mysql_types_in_show_columns 0 +use_page_cache_for_disks_without_file_cache 0 use_query_cache 0 use_skip_indexes 1 use_skip_indexes_if_final 0 use_structure_from_insertion_table_in_table_functions 2 use_uncompressed_cache 0 +use_variant_as_common_type 0 use_with_fill_by_sorting_prefix 1 +validate_experimental_and_suspicious_types_inside_nested_types 1 validate_polygons 1 wait_changes_become_visible_after_commit_mode wait_unknown wait_for_async_insert 1 diff --git a/tests/queries/0_stateless/02995_new_settings_history.sh b/tests/queries/0_stateless/02995_new_settings_history.sh index 8de98c55b6a..917dacc04b0 100755 --- a/tests/queries/0_stateless/02995_new_settings_history.sh +++ b/tests/queries/0_stateless/02995_new_settings_history.sh @@ -7,12 +7,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # Note that this is a broad check. A per version check is done in the upgrade test -# Baseline generated with 23.12.1 -# clickhouse local --query "select name, default from system.settings order by name format TSV" > 02995_baseline_23_12_1.tsv +# Baseline generated with 24.7.1 +# clickhouse local --query "select name, default from system.settings order by name format TSV" > 02995_baseline_24_7_1.tsv $CLICKHOUSE_LOCAL --query " WITH old_settings AS ( - SELECT * FROM file('${CUR_DIR}/02995_baseline_23_12_1.tsv', 'TSV', 'name String, default String') + SELECT * FROM file('${CUR_DIR}/02995_baseline_24_7_1.tsv', 'TSV', 'name String, default String') ), new_settings AS ( @@ -21,7 +21,7 @@ $CLICKHOUSE_LOCAL --query " ) SELECT * FROM ( - SELECT 'PLEASE ADD THE NEW SETTING TO SettingsChangesHistory.h: ' || name || ' WAS ADDED', + SELECT 'PLEASE ADD THE NEW SETTING TO SettingsChangesHistory.cpp: ' || name || ' WAS ADDED', FROM new_settings WHERE (name NOT IN ( SELECT name @@ -29,17 +29,17 @@ $CLICKHOUSE_LOCAL --query " )) AND (name NOT IN ( SELECT arrayJoin(tupleElement(changes, 'name')) FROM system.settings_changes - WHERE splitByChar('.', version())[1] >= '24' + WHERE splitByChar('.', version)[1]::UInt64 >= 24 AND splitByChar('.', version)[2]::UInt64 > 7 )) UNION ALL ( - SELECT 'PLEASE ADD THE SETTING VALUE CHANGE TO SettingsChangesHistory.h: ' || name || ' WAS CHANGED FROM ' || old_settings.default || ' TO ' || new_settings.default, + SELECT 'PLEASE ADD THE SETTING VALUE CHANGE TO SettingsChangesHistory.cpp: ' || name || ' WAS CHANGED FROM ' || old_settings.default || ' TO ' || new_settings.default, FROM new_settings LEFT JOIN old_settings ON new_settings.name = old_settings.name WHERE (new_settings.default != old_settings.default) AND (name NOT IN ( SELECT arrayJoin(tupleElement(changes, 'name')) FROM system.settings_changes - WHERE splitByChar('.', version())[1] >= '24' + WHERE splitByChar('.', version)[1]::UInt64 >= 24 AND splitByChar('.', version)[2]::UInt64 > 7 )) ) ) From b178eea09ec80fed40b5043ccf1635d95b9cf19b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 31 Jul 2024 14:59:17 +0200 Subject: [PATCH 1274/2361] Fix broken settings --- src/Core/SettingsChangesHistory.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 9faf77e9087..ecc558e64d7 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,6 +57,16 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { + {"24.12",{ + }}, + {"24.11",{ + }}, + {"24.10",{ + }}, + {"24.9", { + }}, + {"24.8", {{"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, + }}, {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, @@ -76,7 +86,6 @@ static std::initializer_list Date: Wed, 31 Jul 2024 15:11:55 +0200 Subject: [PATCH 1275/2361] Try a less conflict prone format --- src/Core/SettingsChangesHistory.cpp | 709 +++++++++++++++++----------- 1 file changed, 441 insertions(+), 268 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ecc558e64d7..21c89b3c5c5 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,274 +57,447 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.12",{ - }}, - {"24.11",{ - }}, - {"24.10",{ - }}, - {"24.9", { - }}, - {"24.8", {{"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, - }}, - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, - {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, - {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, - {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, - {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, - {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, - {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, - {"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, - {"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."}, - {"collect_hash_table_stats_during_joins", false, true, "New setting."}, - {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, - {"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."}, - {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, - {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, - {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, - {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, - {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, - {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, - {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, - {"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."}, - {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, - {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."} - }}, - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, - {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, - {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, - {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, - {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, - {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, - {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, - {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, - {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, - {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, - {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, - {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, - {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, - {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, - {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, - {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, - {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, - {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, - {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, - {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, - {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, - {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, - {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, - {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, - {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, - {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, - {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, - {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, - {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, - {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"http_max_chunk_size", 0, 0, "Internal limitation"}, - {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, - {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, - {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, - {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, - {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, - }}, - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, - {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, - {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, - {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, - {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, - {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, - {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, - {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, - {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, - {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, - {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, - {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, - {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, - }}, - {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, - {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, - {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, - {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, - {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, - {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, - {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, - {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."}, - {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, - {"log_processors_profiles", false, true, "Enable by default"}, - {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, - {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, - {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, - {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, - {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, - {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, - {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, - {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, - {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, - {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, - {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, - {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, - {"allow_get_client_http_header", false, false, "Introduced a new function."}, - {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, - {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, - {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, - {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, - {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, - {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, - {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, - {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, - {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, - {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, - {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, - {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, - {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, - {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, - {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, - {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, - {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, - {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, - {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, - {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, - {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, - {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, - {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, - {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, - {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, - {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, - {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, + {"24.12", + { + } + }, + {"24.11", + { + } + }, + {"24.10", + { + } + }, + {"24.9", + { + } + }, + {"24.8", + { + {"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, + } + }, + {"24.7", + { + {"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, + {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, + {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, + {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, + {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, + {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, + {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, + {"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."}, + {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, + {"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."}, + {"collect_hash_table_stats_during_joins", false, true, "New setting."}, + {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, + {"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."}, + {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, + {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, + {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, + {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, + {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, + {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, + {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, + {"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."}, + {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, + {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."} + } + }, + {"24.6", + { + {"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, + {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, + {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, + {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, + {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, + {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, + {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, + {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, + {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, + {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, + {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, + {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, + {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, + {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, + {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, + {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, + {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, + {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, + {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, + {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, + {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, + {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, + {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, + {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, + {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, + {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, + {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, + {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, + {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, + } + }, + {"24.5", + { + {"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, + {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, + {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, + {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, + {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, + {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, + {"http_max_chunk_size", 0, 0, "Internal limitation"}, + {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, + {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, + {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, + {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, + {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, + } + }, + {"24.4", + { + {"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, + {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, + {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, + {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, + {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, + {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, + {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, + {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, + {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, + {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, + {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, + {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, + {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, + } + }, + {"24.3", + { + {"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, + {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, + {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, + {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, + {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, + {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, + {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, + {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, + {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."}, + {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, + {"log_processors_profiles", false, true, "Enable by default"}, + {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, + {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, + {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, + {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, + {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, + {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, + {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, + {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, + {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, + {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, + {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, + {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, + {"allow_get_client_http_header", false, false, "Introduced a new function."}, + {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, + {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, + {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, + {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, + {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, + {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, + {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, + {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, + {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, + {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, + {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, + {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, + {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, + {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, + {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, + {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, + {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, + {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, + } + }, + {"24.2", + { + {"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, + {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, + {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, + {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, + {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, + {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, + {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, + {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, + {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, + {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, + {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, + {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, + {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, + {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, + {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, + {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, + {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, + {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, + {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, + {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, + {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, + {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, + {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, + {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, + {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, + {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, + {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, + {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, + } + }, + {"24.1", + { + {"print_pretty_type_names", false, true, "Better user experience."}, + {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, + {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, + {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, + {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, + {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, + {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, + {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, + {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, + {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, + {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, + {"enable_vertical_final", false, true, "Use vertical final by default"}, + {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, + {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, + {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, + {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, + {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, + {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, + {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, + {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, + {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, + {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, + {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"} + } + }, + {"23.12", + { + {"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, + {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, + {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, + {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"} + } + }, + {"23.11", + { + {"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"} + } + }, + {"23.9", + { + {"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, + {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, + {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, + {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, + {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, + {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, + {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."} + } + }, + {"23.8", + { + {"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"} + } + }, + {"23.7", + { + {"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."} + } + }, + {"23.6", + { + {"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, + {"http_receive_timeout", 180, 30, "See http_send_timeout."} + } + }, + {"23.5", + { + {"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, + {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, + {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, + {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."} + } + }, + {"23.4", + { + {"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, + {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, + {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, + {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, + {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, + {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, + {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"} + } + }, + {"23.3", + { + {"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, + {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, + {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, + {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, + {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, + {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, + {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"} + } + }, + {"23.2", + { + {"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, + {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, + {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, + {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, + {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"} + } + }, + {"23.1", + { + {"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, + {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, + {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, + {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, + {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, + {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"} + } + }, + {"22.12", + { + {"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, + {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, + {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"} + } + }, + {"22.11", + { + {"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"} + } + }, + {"22.9", + { + {"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"} + } + }, + {"22.7", + { + {"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, + {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, + {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"} + } + }, + {"22.6", + { + {"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, + {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"} + } + }, + {"22.5", + { + {"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, + {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"} + } + }, + {"22.4", + { + {"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"} + } + }, + {"22.3", + { + {"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"} + } + }, + {"21.12", + { + {"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"} + } + }, + {"21.9", + { + {"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, + {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"} + } + }, + {"21.7", + { + {"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"} + } + }, + {"21.5", + { + {"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"} + } + }, + {"21.3", + { + {"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, + {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, + {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"} + } + }, + {"21.2", + { + {"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"} + } + }, + {"21.1", + { + {"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, + {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, + {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, + {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"} + } + }, + {"20.10", + { + {"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"} + } + }, + {"20.7", + { + {"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"} + } + }, + {"20.5", + { + {"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, + {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"} + } + }, + {"20.4", + { + {"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"} + } + }, + {"19.18", + { + {"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"} + } + }, + {"19.14", + { + {"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"} + } + }, + {"19.12", + { + {"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"} + } + }, + {"19.5", + { + {"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"} + } + }, + {"18.12.17", + { + {"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"} + } + }, }; From bca21bc6c735b710ed15061e41790066b5ffd843 Mon Sep 17 00:00:00 2001 From: sakulali Date: Wed, 31 Jul 2024 21:33:14 +0800 Subject: [PATCH 1276/2361] add test cases --- src/Common/Config/ConfigProcessor.cpp | 11 ++++------- ...89_clickhouse_client_config_format.reference | 4 ++++ .../01889_clickhouse_client_config_format.sh | 17 +++++++++++++++++ 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index f0b83c035c8..c4b4a1d5e7e 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -654,22 +654,19 @@ XMLDocumentPtr ConfigProcessor::parseConfig(const std::string & config_path) while (std::getline(file, line)) { const size_t pos = firstNonWhitespacePos(line); - if (pos == std::string::npos) - continue; if (pos < line.size() && '<' == line[pos]) { maybe_xml = true; + break; } - - break; + else if (pos != std::string::npos) + break; } } - if (maybe_xml) return dom_parser.parse(config_path); - else - return YAMLParser::parse(config_path); + return YAMLParser::parse(config_path); } } diff --git a/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference b/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference index afe27ddb063..ef0d9ffc538 100644 --- a/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference +++ b/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference @@ -18,3 +18,7 @@ autodetect xml (non leading whitespaces) 2 autodetect yaml 2 +autodetect invalid xml +Code: 1000, e.code() = 0, SAXParseException: Invalid token in '/config_test.badxml', line 2 column 12, Stack trace (when copying this message, always include the lines below): +autodetect invalid yaml +Code: 585. Unable to parse YAML configuration file /config_test.badyaml, yaml-cpp: error at line 2, column 12: illegal map value. (CANNOT_PARSE_YAML) diff --git a/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh b/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh index 14d67f943f1..9a44ec0d5f5 100755 --- a/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh +++ b/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh @@ -15,6 +15,8 @@ yaml_config=$CLICKHOUSE_TMP/config_$CLICKHOUSE_DATABASE.yaml autodetect_xml_with_leading_whitespace_config=$CLICKHOUSE_TMP/config_$CLICKHOUSE_DATABASE.config autodetect_xml_non_leading_whitespace_config=$CLICKHOUSE_TMP/config_$CLICKHOUSE_DATABASE.cfg autodetect_yaml_config=$CLICKHOUSE_TMP/config_$CLICKHOUSE_DATABASE.properties +autodetect_invalid_xml_config=$CLICKHOUSE_TMP/config_$CLICKHOUSE_DATABASE.badxml +autodetect_invalid_yaml_config=$CLICKHOUSE_TMP/config_$CLICKHOUSE_DATABASE.badyaml function cleanup() { @@ -27,6 +29,8 @@ function cleanup() rm "${autodetect_xml_with_leading_whitespace_config:?}" rm "${autodetect_xml_non_leading_whitespace_config:?}" rm "${autodetect_yaml_config:?}" + rm "${autodetect_invalid_xml_config:?}" + rm "${autodetect_invalid_yaml_config:?}" } trap cleanup EXIT @@ -70,6 +74,15 @@ EOL cat > "$autodetect_yaml_config" < "$autodetect_invalid_xml_config" < + +EOL +cat > "$autodetect_invalid_yaml_config" <&1 |& sed -n '1p' | sed -e "s#$CLICKHOUSE_TMP##" -e "s#Poco::Exception. ##" +echo 'autodetect invalid yaml' +$CLICKHOUSE_CLIENT --config "$autodetect_invalid_yaml_config" -q "select getSetting('max_threads')" 2>&1 |& sed -n '1p' | sed -e "s#$CLICKHOUSE_TMP##" -e "s#DB::Exception: ##" \ No newline at end of file From 7bd8061979204973d4a1c1ac956ab80fc1c6be38 Mon Sep 17 00:00:00 2001 From: heguangnan Date: Wed, 31 Jul 2024 21:35:55 +0800 Subject: [PATCH 1277/2361] fix test --- .../0_stateless/03214_count_distinct_null_key_memory_leak.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql b/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql index d8428ec6b4a..84804e4e016 100644 --- a/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql +++ b/tests/queries/0_stateless/03214_count_distinct_null_key_memory_leak.sql @@ -13,6 +13,6 @@ ORDER BY c SETTINGS index_granularity = 8192, allow_nullable_key=1; INSERT INTO testnull(b,c) SELECT toString(rand64()) AS b, toString(rand64()) AS c FROM numbers(1000000); -SELECT count(distinct b) FROM testnull GROUP BY a SETTINGS max_memory_usage = 54748364; -- {serverError MEMORY_LIMIT_EXCEEDED} +SELECT count(distinct b) FROM testnull GROUP BY a SETTINGS max_memory_usage = 10000000; -- {serverError MEMORY_LIMIT_EXCEEDED} DROP TABLE testnull; \ No newline at end of file From 31c142a96d49fbe1b46b21e4cdad366546dc7864 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Wed, 31 Jul 2024 14:44:54 +0100 Subject: [PATCH 1278/2361] make it possible to rerun test_storage_delta and test_checking_s3_blobs_paranoid --- .../test_checking_s3_blobs_paranoid/test.py | 2 ++ tests/integration/test_storage_delta/test.py | 29 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index dde636b5d29..afe8449b44a 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -61,6 +61,7 @@ def test_upload_after_check_works(cluster, broken_s3): node.query( """ + DROP TABLE IF EXISTS s3_upload_after_check_works; CREATE TABLE s3_upload_after_check_works ( id Int64, data String @@ -631,6 +632,7 @@ def test_no_key_found_disk(cluster, broken_s3): node.query( """ + DROP TABLE IF EXISTS no_key_found_disk; CREATE TABLE no_key_found_disk ( id Int64 ) ENGINE=MergeTree() diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index 67cc7cdd6da..698becc18c4 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -52,6 +52,11 @@ def get_spark(): return builder.master("local").getOrCreate() +def remove_local_directory_contents(local_path): + for local_file in glob.glob(local_path + "/**"): + os.unlink(local_file) + + @pytest.fixture(scope="module") def started_cluster(): try: @@ -169,6 +174,9 @@ def test_single_log_file(started_cluster): inserted_data ) + os.unlink(parquet_data_path) + remove_local_directory_contents(f"/{TABLE_NAME}") + def test_partition_by(started_cluster): instance = started_cluster.instances["node1"] @@ -191,6 +199,7 @@ def test_partition_by(started_cluster): create_delta_table(instance, TABLE_NAME) assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 10 + remove_local_directory_contents(f"/{TABLE_NAME}") def test_checkpoint(started_cluster): instance = started_cluster.instances["node1"] @@ -266,6 +275,9 @@ def test_checkpoint(started_cluster): ).strip() ) + remove_local_directory_contents(f"/{TABLE_NAME}") + spark.sql(f"DROP TABLE {TABLE_NAME}") + def test_multiple_log_files(started_cluster): instance = started_cluster.instances["node1"] @@ -304,6 +316,8 @@ def test_multiple_log_files(started_cluster): "SELECT number, toString(number + 1) FROM numbers(200)" ) + remove_local_directory_contents(f"/{TABLE_NAME}") + def test_metadata(started_cluster): instance = started_cluster.instances["node1"] @@ -337,6 +351,9 @@ def test_metadata(started_cluster): create_delta_table(instance, TABLE_NAME) assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 + os.unlink(parquet_data_path) + remove_local_directory_contents(f"/{TABLE_NAME}") + def test_types(started_cluster): TABLE_NAME = "test_types" @@ -409,6 +426,9 @@ def test_types(started_cluster): ] ) + remove_local_directory_contents(f"/{result_file}") + spark.sql(f"DROP TABLE {TABLE_NAME}") + def test_restart_broken(started_cluster): instance = started_cluster.instances["node1"] @@ -470,6 +490,9 @@ def test_restart_broken(started_cluster): assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 + os.unlink(parquet_data_path) + remove_local_directory_contents(f"/{TABLE_NAME}") + def test_restart_broken_table_function(started_cluster): instance = started_cluster.instances["node1"] @@ -524,6 +547,9 @@ def test_restart_broken_table_function(started_cluster): assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 + os.unlink(parquet_data_path) + remove_local_directory_contents(f"/{TABLE_NAME}") + def test_partition_columns(started_cluster): instance = started_cluster.instances["node1"] @@ -721,3 +747,6 @@ SELECT * FROM deltaLake('http://{started_cluster.minio_ip}:{started_cluster.mini ) == 1 ) + + remove_local_directory_contents(f"/{TABLE_NAME}") + spark.sql(f"DROP TABLE {TABLE_NAME}") From 2a2dba63cc0182247754a5a4819cb89f21825bfd Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 31 Jul 2024 13:48:04 +0000 Subject: [PATCH 1279/2361] Automatic style fix --- tests/integration/helpers/cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 7f0a9154be9..2e38aec3512 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2971,6 +2971,7 @@ class ClickHouseCluster: "Trying to create Azurite instance by command %s", " ".join(map(str, azurite_start_cmd)), ) + def logging_azurite_initialization(exception, retry_number, sleep_time): logging.info( f"Azurite initialization failed with error: {exception}" From 7dbd3d75340522195e7d08a725cf5ae116288c8e Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 31 Jul 2024 13:51:38 +0000 Subject: [PATCH 1280/2361] Automatic style fix --- tests/integration/test_storage_delta/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index 698becc18c4..e485bc90ee0 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -201,6 +201,7 @@ def test_partition_by(started_cluster): remove_local_directory_contents(f"/{TABLE_NAME}") + def test_checkpoint(started_cluster): instance = started_cluster.instances["node1"] spark = started_cluster.spark_session From 27f4f468b976e445e8b0dbc198ea9f0a9c62855b Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Wed, 31 Jul 2024 14:55:00 +0100 Subject: [PATCH 1281/2361] make it possible to rerun test_recovery_time_metric multiple times --- tests/integration/test_recovery_time_metric/test.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py index 628f2e744e0..6fcf2fad423 100644 --- a/tests/integration/test_recovery_time_metric/test.py +++ b/tests/integration/test_recovery_time_metric/test.py @@ -21,6 +21,7 @@ def start_cluster(): def test_recovery_time_metric(start_cluster): node.query( """ + DROP DATABASE IF EXISTS rdb; CREATE DATABASE rdb ENGINE = Replicated('/test/test_recovery_time_metric', 'shard1', 'replica1') """ @@ -28,6 +29,7 @@ def test_recovery_time_metric(start_cluster): node.query( """ + DROP TABLE IF EXISTS rdb.t; CREATE TABLE rdb.t ( `x` UInt32 @@ -51,3 +53,9 @@ def test_recovery_time_metric(start_cluster): ).strip() ) assert ret > 0 + + node.query( + """ + DROP DATABASE rdb + """ + ) From d6de2be4395e1bcc62ab32ad1d5b02e9db080303 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 31 Jul 2024 16:08:18 +0200 Subject: [PATCH 1282/2361] Fix build --- src/Planner/findParallelReplicasQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 1140f30ad9c..39edb1e6516 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -144,7 +144,7 @@ public: std::unordered_map replacement_map; }; -QueryTreeNodePtr replaceTablesWithDummyTables(const QueryTreeNodePtr & query, const ContextPtr & context) +QueryTreeNodePtr replaceTablesWithDummyTables(QueryTreeNodePtr query, const ContextPtr & context) { ReplaceTableNodeToDummyVisitor visitor(context); visitor.visit(query); From 9ffbd8f5073e180592a494742d1dc3af4427b55f Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 31 Jul 2024 14:13:43 +0000 Subject: [PATCH 1283/2361] Possible fix --- .../03164_s3_settings_for_queries_and_merges.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql index ac2070fbd76..e43c9ae7717 100644 --- a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql +++ b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql @@ -22,18 +22,18 @@ SELECT count() FROM t_compact_bytes_s3 WHERE NOT ignore(c2, c4); SYSTEM FLUSH LOGS; SELECT - ProfileEvents['S3ReadRequestsCount'], + ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsError'], ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 FROM system.query_log -WHERE event_date >= yesterday() AND type = 'QueryFinish' +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query ilike '%INSERT INTO t_compact_bytes_s3 SELECT number, number, number%'; SELECT - ProfileEvents['S3ReadRequestsCount'], + ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsError'], ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 FROM system.query_log -WHERE event_date >= yesterday() AND type = 'QueryFinish' +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query ilike '%OPTIMIZE TABLE t_compact_bytes_s3 FINAL%'; From 67f4792b77f2a2cf0de21ead6e95c3635d26aa88 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 31 Jul 2024 14:16:40 +0000 Subject: [PATCH 1284/2361] Style check --- tests/integration/helpers/cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 7f0a9154be9..2e38aec3512 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2971,6 +2971,7 @@ class ClickHouseCluster: "Trying to create Azurite instance by command %s", " ".join(map(str, azurite_start_cmd)), ) + def logging_azurite_initialization(exception, retry_number, sleep_time): logging.info( f"Azurite initialization failed with error: {exception}" From 1f1f0528ce3a1fb20ceee5513523787a14718b80 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 31 Jul 2024 14:32:07 +0000 Subject: [PATCH 1285/2361] Prefer constant to INPUT in PlannerActionsVisitor. --- src/Planner/PlannerActionsVisitor.cpp | 11 +++++++- ...lyzer_materialized_constants_bug.reference | 3 +++ ...15_analyzer_materialized_constants_bug.sql | 26 +++++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.reference create mode 100644 tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.sql diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 1960855792c..57457493844 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -491,7 +491,16 @@ public: { auto it = node_name_to_node.find(node_name); if (it != node_name_to_node.end()) - return it->second; + { + /// It is possible that ActionsDAG already has an input with the same name as constant. + /// In this case, prefer constant to input. + /// Constatns affect function return type, which should be consistent with QueryTree. + /// Query example: + /// SELECT materialize(toLowCardinality('b')) || 'a' FROM remote('127.0.0.{1,2}', system, one) GROUP BY 'a' + bool materialized_input = it->second->type == ActionsDAG::ActionType::INPUT && !it->second->column; + if (!materialized_input) + return it->second; + } const auto * node = &actions_dag.addColumn(column); node_name_to_node[node->result_name] = node; diff --git a/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.reference b/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.reference new file mode 100644 index 00000000000..584e34c0cde --- /dev/null +++ b/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.reference @@ -0,0 +1,3 @@ +ba +\N +1 111111111111111111111111111111111111111 diff --git a/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.sql b/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.sql new file mode 100644 index 00000000000..f9ec28d09d8 --- /dev/null +++ b/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.sql @@ -0,0 +1,26 @@ +SET allow_experimental_analyzer = 1; + +SELECT concat(materialize(toLowCardinality('b')), 'a') FROM remote('127.0.0.{1,2}', system, one) GROUP BY 'a'; + +SELECT concat(NULLIF(1, materialize(toLowCardinality(1))), concat(NULLIF(1, 1))) FROM remote('127.0.0.{1,2}', system, one) GROUP BY concat(NULLIF(1, 1)); + +DROP TABLE IF EXISTS test__fuzz_21; +CREATE TABLE test__fuzz_21 +( + `x` Decimal(18, 10) +) +ENGINE = MergeTree +ORDER BY x; + +INSERT INTO test__fuzz_21 VALUES (1), (2), (3); + +WITH ( + SELECT CAST(toFixedString(toFixedString(materialize(toFixedString('111111111111111111111111111111111111111', 39)), 39), 39), 'UInt128') + ) AS v +SELECT + coalesce(materialize(toLowCardinality(toNullable(1))), 10, NULL), + max(v) +FROM remote('127.0.0.{1,2}', default, test__fuzz_21) +GROUP BY + coalesce(NULL), + coalesce(1, 10, 10, materialize(NULL)); From e31569a065d4c81cdea671727c39983d7f3a84e5 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 31 Jul 2024 16:32:37 +0200 Subject: [PATCH 1286/2361] Expect an unknown cluster --- .../0_stateless/03215_analyzer_replace_with_dummy_tables.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.sql b/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.sql index 12d2bd627a7..6d084c2ac50 100644 --- a/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.sql +++ b/tests/queries/0_stateless/03215_analyzer_replace_with_dummy_tables.sql @@ -12,4 +12,4 @@ FROM ) FROM t ) -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, allow_experimental_analyzer = 1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, allow_experimental_analyzer = 1; -- { serverError CLUSTER_DOESNT_EXIST } From 8c36fbf4eddeba9282b53f726976b55f62d3ee19 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 31 Jul 2024 14:42:38 +0000 Subject: [PATCH 1287/2361] Remove unnecessary change --- access/quotas.list | Bin 0 -> 1 bytes access/roles.list | Bin 0 -> 1 bytes access/row_policies.list | Bin 0 -> 1 bytes access/settings_profiles.list | Bin 0 -> 1 bytes access/users.list | Bin 0 -> 1 bytes .../03164_s3_settings_for_queries_and_merges.sql | 4 ++-- 6 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 access/quotas.list create mode 100644 access/roles.list create mode 100644 access/row_policies.list create mode 100644 access/settings_profiles.list create mode 100644 access/users.list diff --git a/access/quotas.list b/access/quotas.list new file mode 100644 index 0000000000000000000000000000000000000000..f76dd238ade08917e6712764a16a22005a50573d GIT binary patch literal 1 IcmZPo000310RR91 literal 0 HcmV?d00001 diff --git a/access/roles.list b/access/roles.list new file mode 100644 index 0000000000000000000000000000000000000000..f76dd238ade08917e6712764a16a22005a50573d GIT binary patch literal 1 IcmZPo000310RR91 literal 0 HcmV?d00001 diff --git a/access/row_policies.list b/access/row_policies.list new file mode 100644 index 0000000000000000000000000000000000000000..f76dd238ade08917e6712764a16a22005a50573d GIT binary patch literal 1 IcmZPo000310RR91 literal 0 HcmV?d00001 diff --git a/access/settings_profiles.list b/access/settings_profiles.list new file mode 100644 index 0000000000000000000000000000000000000000..f76dd238ade08917e6712764a16a22005a50573d GIT binary patch literal 1 IcmZPo000310RR91 literal 0 HcmV?d00001 diff --git a/access/users.list b/access/users.list new file mode 100644 index 0000000000000000000000000000000000000000..f76dd238ade08917e6712764a16a22005a50573d GIT binary patch literal 1 IcmZPo000310RR91 literal 0 HcmV?d00001 diff --git a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql index e43c9ae7717..94e390537df 100644 --- a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql +++ b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql @@ -25,7 +25,7 @@ SELECT ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsError'], ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 FROM system.query_log -WHERE type = 'QueryFinish' +WHERE event_date >= yesterday() AND type = 'QueryFinish' AND current_database = currentDatabase() AND query ilike '%INSERT INTO t_compact_bytes_s3 SELECT number, number, number%'; @@ -33,7 +33,7 @@ SELECT ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsError'], ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 FROM system.query_log -WHERE type = 'QueryFinish' +WHERE event_date >= yesterday() AND type = 'QueryFinish' AND current_database = currentDatabase() AND query ilike '%OPTIMIZE TABLE t_compact_bytes_s3 FINAL%'; From 20ec27f9dc79d7ee81cd06f1587de83c8ce81441 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 31 Jul 2024 14:46:53 +0000 Subject: [PATCH 1288/2361] Remove trach dir --- access/quotas.list | Bin 1 -> 0 bytes access/roles.list | Bin 1 -> 0 bytes access/row_policies.list | Bin 1 -> 0 bytes access/settings_profiles.list | Bin 1 -> 0 bytes access/users.list | Bin 1 -> 0 bytes 5 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 access/quotas.list delete mode 100644 access/roles.list delete mode 100644 access/row_policies.list delete mode 100644 access/settings_profiles.list delete mode 100644 access/users.list diff --git a/access/quotas.list b/access/quotas.list deleted file mode 100644 index f76dd238ade08917e6712764a16a22005a50573d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1 IcmZPo000310RR91 diff --git a/access/roles.list b/access/roles.list deleted file mode 100644 index f76dd238ade08917e6712764a16a22005a50573d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1 IcmZPo000310RR91 diff --git a/access/row_policies.list b/access/row_policies.list deleted file mode 100644 index f76dd238ade08917e6712764a16a22005a50573d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1 IcmZPo000310RR91 diff --git a/access/settings_profiles.list b/access/settings_profiles.list deleted file mode 100644 index f76dd238ade08917e6712764a16a22005a50573d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1 IcmZPo000310RR91 diff --git a/access/users.list b/access/users.list deleted file mode 100644 index f76dd238ade08917e6712764a16a22005a50573d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1 IcmZPo000310RR91 From 650737890299f8cad2c77ad46022ee0a37b284eb Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 31 Jul 2024 14:49:30 +0000 Subject: [PATCH 1289/2361] Fix erroe with profile event name --- .../0_stateless/03164_s3_settings_for_queries_and_merges.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql index 94e390537df..001ef382850 100644 --- a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql +++ b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql @@ -22,7 +22,7 @@ SELECT count() FROM t_compact_bytes_s3 WHERE NOT ignore(c2, c4); SYSTEM FLUSH LOGS; SELECT - ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsError'], + ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsErrors'], ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 FROM system.query_log WHERE event_date >= yesterday() AND type = 'QueryFinish' @@ -30,7 +30,7 @@ WHERE event_date >= yesterday() AND type = 'QueryFinish' AND query ilike '%INSERT INTO t_compact_bytes_s3 SELECT number, number, number%'; SELECT - ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsError'], + ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsErrors'], ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 FROM system.query_log WHERE event_date >= yesterday() AND type = 'QueryFinish' From c3c653e7692a755c3467b77e866555734d50ef50 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Wed, 31 Jul 2024 14:58:52 +0000 Subject: [PATCH 1290/2361] Better --- src/Interpreters/DatabaseCatalog.cpp | 2 ++ .../config.d/database_catalog_drop_table_concurrency.xml | 3 +++ tests/config/install.sh | 1 + 3 files changed, 6 insertions(+) create mode 100644 tests/config/config.d/database_catalog_drop_table_concurrency.xml diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index a8e5fd7e6aa..48b01a9df43 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1406,6 +1406,8 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) return !tables_marked_dropped_ids.contains(uuid) || is_shutting_down; }); + LOG_DEBUG(log, "Done waiting for the table {} to be dropped. The outcome: {}", toString(uuid), tables_marked_dropped_ids).contains(uuid) ? "table still exists" : "table dropped successfully"); + /// TSA doesn't support unique_lock if (TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid)) throw Exception(ErrorCodes::UNFINISHED, "Did not finish dropping the table with UUID {} because the server is shutting down, " diff --git a/tests/config/config.d/database_catalog_drop_table_concurrency.xml b/tests/config/config.d/database_catalog_drop_table_concurrency.xml new file mode 100644 index 00000000000..ac118625f4e --- /dev/null +++ b/tests/config/config.d/database_catalog_drop_table_concurrency.xml @@ -0,0 +1,3 @@ + + 256 + diff --git a/tests/config/install.sh b/tests/config/install.sh index 1b0edc5fc16..7c4b36dc4bd 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -21,6 +21,7 @@ ln -sf $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/blob_storage_log.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/database_catalog_drop_table_concurrency.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/enable_access_control_improvements.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/secure_ports.xml $DEST_SERVER_PATH/config.d/ From 5ab8c0357a84d0265bdb43f43795b06317854772 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 31 Jul 2024 16:30:14 +0200 Subject: [PATCH 1291/2361] Fix potential busy loop in keepFreeSpaceRatioFunc --- src/Interpreters/Cache/FileCache.cpp | 30 +++++++++++++------ src/Interpreters/Cache/IFileCachePriority.h | 8 ++++- .../Cache/LRUFileCachePriority.cpp | 21 +++++++++++-- src/Interpreters/Cache/LRUFileCachePriority.h | 2 +- .../Cache/SLRUFileCachePriority.cpp | 15 ++++++---- .../Cache/SLRUFileCachePriority.h | 2 +- 6 files changed, 57 insertions(+), 21 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index a88c0de2cfe..bf8dd24a1db 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -998,18 +998,19 @@ void FileCache::freeSpaceRatioKeepingThreadFunc() FileCacheReserveStat stat; EvictionCandidates eviction_candidates; - bool limits_satisfied = true; + IFileCachePriority::DesiredSizeStatus desired_size_status; try { /// Collect at most `keep_up_free_space_remove_batch` elements to evict, /// (we use batches to make sure we do not block cache for too long, /// by default the batch size is quite small). - limits_satisfied = main_priority->collectCandidatesForEviction( + desired_size_status = main_priority->collectCandidatesForEviction( desired_size, desired_elements_num, keep_up_free_space_remove_batch, stat, eviction_candidates, lock); #ifdef DEBUG_OR_SANITIZER_BUILD /// Let's make sure that we correctly processed the limits. - if (limits_satisfied && eviction_candidates.size() < keep_up_free_space_remove_batch) + if (desired_size_status == IFileCachePriority::DesiredSizeStatus::SUCCESS + && eviction_candidates.size() < keep_up_free_space_remove_batch) { const auto current_size = main_priority->getSize(lock); chassert(current_size >= stat.total_stat.releasable_size); @@ -1063,13 +1064,24 @@ void FileCache::freeSpaceRatioKeepingThreadFunc() watch.stop(); ProfileEvents::increment(ProfileEvents::FilesystemCacheFreeSpaceKeepingThreadWorkMilliseconds, watch.elapsedMilliseconds()); - LOG_TRACE(log, "Free space ratio keeping thread finished in {} ms", watch.elapsedMilliseconds()); + LOG_TRACE(log, "Free space ratio keeping thread finished in {} ms (status: {})", + watch.elapsedMilliseconds(), desired_size_status); [[maybe_unused]] bool scheduled = false; - if (limits_satisfied) - scheduled = keep_up_free_space_ratio_task->scheduleAfter(general_reschedule_ms); - else - scheduled = keep_up_free_space_ratio_task->schedule(); + switch (desired_size_status) + { + case IFileCachePriority::DesiredSizeStatus::SUCCESS: [[fallthrough]]; + case IFileCachePriority::DesiredSizeStatus::CANNOT_EVICT: + { + scheduled = keep_up_free_space_ratio_task->scheduleAfter(general_reschedule_ms); + break; + } + case IFileCachePriority::DesiredSizeStatus::REACHED_MAX_CANDIDATES_LIMIT: + { + scheduled = keep_up_free_space_ratio_task->schedule(); + break; + } + } chassert(scheduled); } @@ -1546,7 +1558,7 @@ void FileCache::applySettingsIfPossible(const FileCacheSettings & new_settings, FileCacheReserveStat stat; if (main_priority->collectCandidatesForEviction( new_settings.max_size, new_settings.max_elements, 0/* max_candidates_to_evict */, - stat, eviction_candidates, cache_lock)) + stat, eviction_candidates, cache_lock) == IFileCachePriority::DesiredSizeStatus::SUCCESS) { if (eviction_candidates.size() == 0) { diff --git a/src/Interpreters/Cache/IFileCachePriority.h b/src/Interpreters/Cache/IFileCachePriority.h index 5d8eb9dd54a..9885ab00f78 100644 --- a/src/Interpreters/Cache/IFileCachePriority.h +++ b/src/Interpreters/Cache/IFileCachePriority.h @@ -151,7 +151,13 @@ public: /// and `desired_elements_num` as current cache state. /// Collect no more than `max_candidates_to_evict` elements. /// Return `true` if the first condition is satisfied. - virtual bool collectCandidatesForEviction( + enum class DesiredSizeStatus + { + SUCCESS, + CANNOT_EVICT, + REACHED_MAX_CANDIDATES_LIMIT, + }; + virtual DesiredSizeStatus collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, diff --git a/src/Interpreters/Cache/LRUFileCachePriority.cpp b/src/Interpreters/Cache/LRUFileCachePriority.cpp index ec96eb14a8a..7970eaa3e13 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.cpp +++ b/src/Interpreters/Cache/LRUFileCachePriority.cpp @@ -323,7 +323,7 @@ bool LRUFileCachePriority::collectCandidatesForEviction( } } -bool LRUFileCachePriority::collectCandidatesForEviction( +IFileCachePriority::DesiredSizeStatus LRUFileCachePriority::collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, @@ -336,12 +336,24 @@ bool LRUFileCachePriority::collectCandidatesForEviction( return canFit(0, 0, stat.total_stat.releasable_size, stat.total_stat.releasable_count, lock, &desired_size, &desired_elements_count); }; + auto status = DesiredSizeStatus::CANNOT_EVICT; auto stop_condition = [&]() { - return desired_limits_satisfied() || (max_candidates_to_evict && res.size() >= max_candidates_to_evict); + if (desired_limits_satisfied()) + { + status = DesiredSizeStatus::SUCCESS; + return true; + } + if (max_candidates_to_evict && res.size() >= max_candidates_to_evict) + { + status = DesiredSizeStatus::REACHED_MAX_CANDIDATES_LIMIT; + return true; + } + return false; }; iterateForEviction(res, stat, stop_condition, lock); - return desired_limits_satisfied(); + chassert(status != DesiredSizeStatus::SUCCESS || stop_condition()); + return status; } void LRUFileCachePriority::iterateForEviction( @@ -350,6 +362,9 @@ void LRUFileCachePriority::iterateForEviction( StopConditionFunc stop_condition, const CachePriorityGuard::Lock & lock) { + if (stop_condition()) + return; + ProfileEvents::increment(ProfileEvents::FilesystemCacheEvictionTries); IterateFunc iterate_func = [&](LockedKey & locked_key, const FileSegmentMetadataPtr & segment_metadata) diff --git a/src/Interpreters/Cache/LRUFileCachePriority.h b/src/Interpreters/Cache/LRUFileCachePriority.h index e0691cade43..9bced106727 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.h +++ b/src/Interpreters/Cache/LRUFileCachePriority.h @@ -63,7 +63,7 @@ public: const UserID & user_id, const CachePriorityGuard::Lock &) override; - bool collectCandidatesForEviction( + DesiredSizeStatus collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, diff --git a/src/Interpreters/Cache/SLRUFileCachePriority.cpp b/src/Interpreters/Cache/SLRUFileCachePriority.cpp index 7a3fdf5160e..dc0df223cb0 100644 --- a/src/Interpreters/Cache/SLRUFileCachePriority.cpp +++ b/src/Interpreters/Cache/SLRUFileCachePriority.cpp @@ -256,7 +256,7 @@ bool SLRUFileCachePriority::collectCandidatesForEvictionInProtected( return true; } -bool SLRUFileCachePriority::collectCandidatesForEviction( +IFileCachePriority::DesiredSizeStatus SLRUFileCachePriority::collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, @@ -268,7 +268,7 @@ bool SLRUFileCachePriority::collectCandidatesForEviction( const auto desired_probationary_elements_num = getRatio(desired_elements_count, 1 - size_ratio); FileCacheReserveStat probationary_stat; - const bool probationary_limit_satisfied = probationary_queue.collectCandidatesForEviction( + const auto probationary_desired_size_status = probationary_queue.collectCandidatesForEviction( desired_probationary_size, desired_probationary_elements_num, max_candidates_to_evict, probationary_stat, res, lock); @@ -285,14 +285,14 @@ bool SLRUFileCachePriority::collectCandidatesForEviction( chassert(!max_candidates_to_evict || res.size() <= max_candidates_to_evict); chassert(res.size() == stat.total_stat.releasable_count); - if (max_candidates_to_evict && res.size() >= max_candidates_to_evict) - return probationary_limit_satisfied; + if (probationary_desired_size_status == DesiredSizeStatus::REACHED_MAX_CANDIDATES_LIMIT) + return probationary_desired_size_status; const auto desired_protected_size = getRatio(desired_size, size_ratio); const auto desired_protected_elements_num = getRatio(desired_elements_count, size_ratio); FileCacheReserveStat protected_stat; - const bool protected_limit_satisfied = protected_queue.collectCandidatesForEviction( + const auto protected_desired_size_status = protected_queue.collectCandidatesForEviction( desired_protected_size, desired_protected_elements_num, max_candidates_to_evict - res.size(), protected_stat, res, lock); @@ -306,7 +306,10 @@ bool SLRUFileCachePriority::collectCandidatesForEviction( desired_protected_size, desired_protected_elements_num, protected_queue.getStateInfoForLog(lock)); - return probationary_limit_satisfied && protected_limit_satisfied; + if (probationary_desired_size_status == DesiredSizeStatus::SUCCESS) + return protected_desired_size_status; + else + return probationary_desired_size_status; } void SLRUFileCachePriority::downgrade(IteratorPtr iterator, const CachePriorityGuard::Lock & lock) diff --git a/src/Interpreters/Cache/SLRUFileCachePriority.h b/src/Interpreters/Cache/SLRUFileCachePriority.h index 2102a0ec558..e6d20e0d0ee 100644 --- a/src/Interpreters/Cache/SLRUFileCachePriority.h +++ b/src/Interpreters/Cache/SLRUFileCachePriority.h @@ -58,7 +58,7 @@ public: const UserID & user_id, const CachePriorityGuard::Lock &) override; - bool collectCandidatesForEviction( + DesiredSizeStatus collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, From 19cd00000373a5707178214744444b4d8c4034a5 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 31 Jul 2024 17:18:55 +0200 Subject: [PATCH 1292/2361] Update src/Interpreters/DatabaseCatalog.cpp --- src/Interpreters/DatabaseCatalog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 48b01a9df43..56d9c323d39 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1406,7 +1406,7 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) return !tables_marked_dropped_ids.contains(uuid) || is_shutting_down; }); - LOG_DEBUG(log, "Done waiting for the table {} to be dropped. The outcome: {}", toString(uuid), tables_marked_dropped_ids).contains(uuid) ? "table still exists" : "table dropped successfully"); + LOG_DEBUG(log, "Done waiting for the table {} to be dropped. The outcome: {}", toString(uuid), tables_marked_dropped_ids.contains(uuid) ? "table still exists" : "table dropped successfully"); /// TSA doesn't support unique_lock if (TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid)) From a45027f22c73b321f1f9cc110f0782fac10d5748 Mon Sep 17 00:00:00 2001 From: serxa Date: Wed, 31 Jul 2024 15:22:48 +0000 Subject: [PATCH 1293/2361] Fix flaky `test_delayed_replica_failover` --- tests/integration/test_delayed_replica_failover/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py index a480ee3f278..c545877797a 100644 --- a/tests/integration/test_delayed_replica_failover/test.py +++ b/tests/integration/test_delayed_replica_failover/test.py @@ -101,7 +101,7 @@ SELECT sum(x) FROM distributed WITH TOTALS SETTINGS # allow pings to zookeeper to timeout (must be greater than ZK session timeout). for _ in range(30): try: - node_2_2.query("SELECT * FROM system.zookeeper where path = '/'") + node_2_2.query("SELECT * FROM system.zookeeper where path = '/' SETTINGS insert_keeper_max_retries = 0") time.sleep(0.5) except: break @@ -120,7 +120,7 @@ SELECT sum(x) FROM distributed SETTINGS == "3" ) - # Regression for skip_unavailable_shards in conjunction with skip_unavailable_shards + # Prefer fallback_to_stale_replicas over skip_unavailable_shards assert ( instance_with_dist_table.query( """ From 33ed33d2af0821785165f0ffe80e3c8086c637e2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1294/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From f032c015ca9ec10b7938bbf3d67bb6181776d24a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 31 Jul 2024 15:40:37 +0000 Subject: [PATCH 1295/2361] Ignore some tests --- tests/queries/0_stateless/00705_drop_create_merge_tree.sh | 4 ++-- .../0_stateless/01019_alter_materialized_view_atomic.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh index fd002668696..ea8b9d02e49 100755 --- a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh +++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT & -yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT & +yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & +yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & wait ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table" diff --git a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh index eb12a76eb62..4bd21fcee02 100755 --- a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh +++ b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT < Date: Wed, 31 Jul 2024 15:41:37 +0000 Subject: [PATCH 1296/2361] Fix build --- src/Interpreters/DatabaseCatalog.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 56d9c323d39..273e5720679 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1406,10 +1406,11 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) return !tables_marked_dropped_ids.contains(uuid) || is_shutting_down; }); - LOG_DEBUG(log, "Done waiting for the table {} to be dropped. The outcome: {}", toString(uuid), tables_marked_dropped_ids.contains(uuid) ? "table still exists" : "table dropped successfully"); - /// TSA doesn't support unique_lock - if (TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid)) + const bool has_table = TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid); + LOG_DEBUG(log, "Done waiting for the table {} to be dropped. The outcome: {}", toString(uuid), has_table ? "table still exists" : "table dropped successfully"); + + if has_table) throw Exception(ErrorCodes::UNFINISHED, "Did not finish dropping the table with UUID {} because the server is shutting down, " "will finish after restart", uuid); } From af324f69e955310f54e451d1120e8526ac3150fb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:56:20 +0200 Subject: [PATCH 1297/2361] Update test --- tests/queries/1_stateful/00158_cache_dictionary_has.sql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/1_stateful/00158_cache_dictionary_has.sql b/tests/queries/1_stateful/00158_cache_dictionary_has.sql index 32c109417de..631a7751550 100644 --- a/tests/queries/1_stateful/00158_cache_dictionary_has.sql +++ b/tests/queries/1_stateful/00158_cache_dictionary_has.sql @@ -10,6 +10,8 @@ SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hits' PA LIFETIME(MIN 300 MAX 600) LAYOUT(CACHE(SIZE_IN_CELLS 100 QUERY_WAIT_TIMEOUT_MILLISECONDS 600000)); +SET timeout_before_checking_execution_speed = 300; + SELECT sum(flag) FROM (SELECT dictHas('db_dict.cache_hits', toUInt64(WatchID)) as flag FROM test.hits PREWHERE WatchID % 1400 == 0 LIMIT 100); SELECT count() from test.hits PREWHERE WatchID % 1400 == 0; @@ -20,4 +22,4 @@ SELECT sum(flag) FROM (SELECT dictHas('db_dict.cache_hits', toUInt64(WatchID)) a SELECT count() from test.hits PREWHERE WatchID % 5 == 0; DROP DICTIONARY IF EXISTS db_dict.cache_hits; -DROP DATABASE IF EXISTS db_dict; +DROP DATABASE IF EXISTS db_dict; From 8afe61e04581d0b95ac2d6e927bb9d2427247c7a Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Wed, 31 Jul 2024 17:58:41 +0200 Subject: [PATCH 1298/2361] Better --- src/Interpreters/DatabaseCatalog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 273e5720679..fb4fad85f66 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -1410,7 +1410,7 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) const bool has_table = TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid); LOG_DEBUG(log, "Done waiting for the table {} to be dropped. The outcome: {}", toString(uuid), has_table ? "table still exists" : "table dropped successfully"); - if has_table) + if (has_table) throw Exception(ErrorCodes::UNFINISHED, "Did not finish dropping the table with UUID {} because the server is shutting down, " "will finish after restart", uuid); } From 2a47bb5d2ba777a7220ce6fb2afd3eebdb40afce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 31 Jul 2024 18:36:46 +0200 Subject: [PATCH 1299/2361] Fix build --- src/Storages/Kafka/StorageKafkaCommon.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 13713ef6c43..883eae95a7f 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -767,8 +767,6 @@ void drainConsumer( void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler error_handler) { - assert(current == messages.begin()); - size_t skipped = std::erase_if( messages, [&](auto & message) From f9c9d85e4109511bed14f5e7edb0f31b0bf0beae Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 31 Jul 2024 16:50:56 +0000 Subject: [PATCH 1300/2361] Automatic style fix --- tests/integration/test_delayed_replica_failover/test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py index c545877797a..1116d225b8c 100644 --- a/tests/integration/test_delayed_replica_failover/test.py +++ b/tests/integration/test_delayed_replica_failover/test.py @@ -101,7 +101,9 @@ SELECT sum(x) FROM distributed WITH TOTALS SETTINGS # allow pings to zookeeper to timeout (must be greater than ZK session timeout). for _ in range(30): try: - node_2_2.query("SELECT * FROM system.zookeeper where path = '/' SETTINGS insert_keeper_max_retries = 0") + node_2_2.query( + "SELECT * FROM system.zookeeper where path = '/' SETTINGS insert_keeper_max_retries = 0" + ) time.sleep(0.5) except: break From 646b7e53d7c865bd16a5017d1a0d2e71f0a39cab Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 31 Jul 2024 17:06:30 +0000 Subject: [PATCH 1301/2361] add timeout reset --- src/Server/TCPHandler.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 13ec2ab102e..76295a9f45e 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -690,6 +690,9 @@ void TCPHandler::runImpl() exception = std::make_unique(Exception(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception")); } + /// In case of exception state was not reset, so socket's timouts must be reset explicitly + state.timeout_setter.reset(); + try { if (exception) From 743d63767a74f41b3628c52ccf166be773baecf2 Mon Sep 17 00:00:00 2001 From: serxa Date: Wed, 31 Jul 2024 17:06:49 +0000 Subject: [PATCH 1302/2361] fix AsyncLoader destruction race --- src/Common/AsyncLoader.cpp | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/src/Common/AsyncLoader.cpp b/src/Common/AsyncLoader.cpp index 6264eb03106..d40e320e741 100644 --- a/src/Common/AsyncLoader.cpp +++ b/src/Common/AsyncLoader.cpp @@ -218,20 +218,27 @@ AsyncLoader::~AsyncLoader() { // All `LoadTask` objects should be destructed before AsyncLoader destruction because they hold a reference. // To make sure we check for all pending jobs to be finished. - std::unique_lock lock{mutex}; - if (scheduled_jobs.empty() && finished_jobs.empty()) - return; + { + std::unique_lock lock{mutex}; + if (!scheduled_jobs.empty() || !finished_jobs.empty()) + { + std::vector scheduled; + std::vector finished; + scheduled.reserve(scheduled_jobs.size()); + finished.reserve(finished_jobs.size()); + for (const auto & [job, _] : scheduled_jobs) + scheduled.push_back(job->name); + for (const auto & job : finished_jobs) + finished.push_back(job->name); + LOG_ERROR(log, "Bug. Destruction with pending ({}) and finished ({}) load jobs.", fmt::join(scheduled, ", "), fmt::join(finished, ", ")); + abort(); + } + } - std::vector scheduled; - std::vector finished; - scheduled.reserve(scheduled_jobs.size()); - finished.reserve(finished_jobs.size()); - for (const auto & [job, _] : scheduled_jobs) - scheduled.push_back(job->name); - for (const auto & job : finished_jobs) - finished.push_back(job->name); - LOG_ERROR(log, "Bug. Destruction with pending ({}) and finished ({}) load jobs.", fmt::join(scheduled, ", "), fmt::join(finished, ", ")); - abort(); + // When all jobs are done we could still have finalizing workers. + // These workers could call updateCurrentPriorityAndSpawn() that scans all pools. + // We need to stop all of them before destructing any of them. + stop(); } void AsyncLoader::start() From e7fc206069796f662bb31aab671cbf75b95984ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 31 Jul 2024 17:20:01 +0000 Subject: [PATCH 1303/2361] Add test --- .../format_schemas/string_key_value.capnp | 6 + .../format_schemas/string_key_value.format | 1 + .../format_schemas/string_key_value.proto | 6 + .../clickhouse_path/format_schemas/test.capnp | 2 +- tests/integration/test_storage_kafka/test.py | 129 ++++++++++++++++++ 5 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.capnp create mode 100644 tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.format create mode 100644 tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.proto diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.capnp b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.capnp new file mode 100644 index 00000000000..4f3eabe22f0 --- /dev/null +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.capnp @@ -0,0 +1,6 @@ +@0x99f75f775fe63dae; + +struct StringKeyValuePair { + key@0 : Text; + value@1 : Text; +} diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.format b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.format new file mode 100644 index 00000000000..83dff6ce401 --- /dev/null +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.format @@ -0,0 +1 @@ +(key = ${key:CSV}, value = ${value:CSV}) diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.proto b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.proto new file mode 100644 index 00000000000..71905c63bdf --- /dev/null +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; + +message StringKeyValuePair { + string key = 1; + string value = 2; +} diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/test.capnp b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/test.capnp index 44f1961205b..247e7b9ceca 100644 --- a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/test.capnp +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/test.capnp @@ -7,4 +7,4 @@ struct TestRecordStruct val1 @2 : Text; val2 @3 : Float32; val3 @4 : UInt8; -} \ No newline at end of file +} diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 8393e88db88..596933c1566 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -5097,6 +5097,135 @@ def test_multiple_read_in_materialized_views(kafka_cluster, max_retries=15): ) +def test_kafka_produce_http_interface_row_based_format(kafka_cluster): + # reproduction of #https://github.com/ClickHouse/ClickHouse/issues/61060 with validating the written messages + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + + + topic_prefix = "http_row_" + + # It is important to have: + # - long enough messages + # - enough messages + # I don't know the exact requirement for message sizes, but it doesn't reproduce with short messages + # For the number of messages it seems like at least 3 messages is necessary + expected_key = "01234567890123456789" + expected_value = "aaaaabbbbbccccc" + + insert_query_end = f"(key, value) VALUES ('{expected_key}', '{expected_value}'), ('{expected_key}', '{expected_value}'), ('{expected_key}', '{expected_value}')" + insert_query_template = "INSERT INTO {table_name} " + insert_query_end + + extra_settings = { + "Protobuf": ", kafka_schema = 'string_key_value.proto:StringKeyValuePair'", + "CapnProto": ", kafka_schema='string_key_value:StringKeyValuePair'", + "Template": ", format_template_row='string_key_value.format'" + } + + # Only the formats that can be used both and input and output format are tested + # Reasons to exclude following formats: + # - JSONStrings: not actually an input format + # - ProtobufSingle: I cannot make it work to parse the messages. Probably something is broken, + # because the producer can write multiple rows into a same message, which makes them impossible to parse properly. Should added after #67549 is fixed. + # - ProtobufList: I didn't want to deal with the envelope and stuff + # - Npy: supports only single column + # - LineAsString: supports only single column + # - RawBLOB: supports only single column + formats_to_test = [ + "TabSeparated", + "TabSeparatedRaw", + "TabSeparatedWithNames", + "TabSeparatedWithNamesAndTypes", + "TabSeparatedRawWithNames", + "TabSeparatedRawWithNamesAndTypes", + "Template", + "CSV", + "CSVWithNames", + "CSVWithNamesAndTypes", + "CustomSeparated", + "CustomSeparatedWithNames", + "CustomSeparatedWithNamesAndTypes", + "Values", + "JSON", + "JSONColumns", + "JSONColumnsWithMetadata", + "JSONCompact", + "JSONCompactColumns", + "JSONEachRow", + "JSONStringsEachRow", + "JSONCompactEachRow", + "JSONCompactEachRowWithNames", + "JSONCompactEachRowWithNamesAndTypes", + "JSONCompactStringsEachRow", + "JSONCompactStringsEachRowWithNames", + "JSONCompactStringsEachRowWithNamesAndTypes", + "JSONObjectEachRow", + "BSONEachRow", + "TSKV", + "Protobuf", + "Avro", + "Parquet", + "Arrow", + "ArrowStream", + "ORC", + "RowBinary", + "RowBinaryWithNames", + "RowBinaryWithNamesAndTypes", + "Native", + "CapnProto", + "MsgPack", + ] + for format in formats_to_test: + logging.debug(f"Creating tables and writing messages to {format}") + topic = topic_prefix + format + kafka_create_topic(admin_client, topic) + + extra_setting = extra_settings.get(format, "") + + # kafka_max_rows_per_message is set to 2 to make sure every format produces at least 2 messages, thus increasing the chance of catching a bug + instance.query( + f""" + DROP TABLE IF EXISTS test.view_{topic}; + DROP TABLE IF EXISTS test.consumer_{topic}; + CREATE TABLE test.kafka_writer_{topic} (key String, value String) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{topic}', + kafka_group_name = '{topic}', + kafka_format = '{format}', + kafka_max_rows_per_message = 2 {extra_setting}; + + CREATE TABLE test.kafka_{topic} (key String, value String) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{topic}', + kafka_group_name = '{topic}', + kafka_format = '{format}' {extra_setting}; + + CREATE MATERIALIZED VIEW test.view_{topic} Engine=Log AS + SELECT key, value FROM test.kafka_{topic}; + """ + ) + + instance.http_query(insert_query_template.format(table_name="test.kafka_writer_"+topic), method="POST") + + expected = f"""\ +{expected_key}\t{expected_value} +{expected_key}\t{expected_value} +{expected_key}\t{expected_value} +""" + # give some times for the readers to read the messages + for format in formats_to_test: + logging.debug(f"Checking result for {format}") + topic = topic_prefix + format + + result = instance.query_with_retry(f"SELECT * FROM test.view_{topic}", check_callback=lambda res: res.count("\n") == 3) + + assert TSV(result) == TSV(expected) + + kafka_delete_topic(admin_client, topic) + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") From 461251b519120dd6b0cb64471bfba70160137e50 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 19:21:47 +0200 Subject: [PATCH 1304/2361] Update a test --- .../02450_kill_distributed_query_deadlock.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh b/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh index 445f907bcc5..96692ba325a 100755 --- a/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh +++ b/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh @@ -5,20 +5,24 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -# Test that running distributed query and cancel it ASAP, -# this can trigger a hung/deadlock in ProcessorList. -for i in {1..50}; do +# Test that runs a distributed query and cancels it ASAP, +# this has a chance to trigger a hung/deadlock in ProcessorList. +for i in {1..50} +do query_id="$CLICKHOUSE_TEST_UNIQUE_NAME-$i" - $CLICKHOUSE_CLIENT --format Null --query_id "$query_id" --max_rows_to_read 0 -q "select * from remote('127.{1|2|3|4|5|6}', numbers(1e12))" 2>/dev/null & - while :; do + $CLICKHOUSE_CLIENT --format Null --query_id "$query_id" --max_rows_to_read 0 --max_bytes_to_read 0 --max_result_rows 0 --max_result_bytes 0 -q "select * from remote('127.{1|2|3|4|5|6}', numbers(1e12))" 2>/dev/null & + while true + do killed_queries="$($CLICKHOUSE_CLIENT -q "kill query where query_id = '$query_id' sync" | wc -l)" - if [[ "$killed_queries" -ge 1 ]]; then + if [[ "$killed_queries" -ge 1 ]] + then break fi done wait -n query_return_status=$? - if [[ $query_return_status -eq 0 ]]; then + if [[ $query_return_status -eq 0 ]] + then echo "Query $query_id should be cancelled, however it returns successfully" fi done From 2b79da36c0701bb9ca392fddd9129a7e0e04ef3f Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Wed, 31 Jul 2024 19:26:45 +0200 Subject: [PATCH 1305/2361] Update 01605_adaptive_granularity_block_borders.sql --- .../0_stateless/01605_adaptive_granularity_block_borders.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql index 5f09dc423b2..f9b8bb1c1c6 100644 --- a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql +++ b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql @@ -1,4 +1,4 @@ --- Tags: no-random-merge-tree-settings, no-tsan, no-debug, no-object-storage, no-distributed-cache +-- Tags: long, no-random-merge-tree-settings, no-tsan, no-debug, no-object-storage, no-distributed-cache -- no-tsan: too slow -- no-object-storage: for remote tables we use thread pool even when reading with one stream, so memory consumption is higher From bfb0133f26f85e64f2d608595b653c20a594abca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1306/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From bada9ea9be4888ab4ef1b3f7fcdf015e11d994d1 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 31 Jul 2024 17:30:24 +0000 Subject: [PATCH 1307/2361] Automatic style fix --- tests/integration/test_storage_kafka/test.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 596933c1566..dd0bf1bf28f 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -5103,7 +5103,6 @@ def test_kafka_produce_http_interface_row_based_format(kafka_cluster): bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) - topic_prefix = "http_row_" # It is important to have: @@ -5120,7 +5119,7 @@ def test_kafka_produce_http_interface_row_based_format(kafka_cluster): extra_settings = { "Protobuf": ", kafka_schema = 'string_key_value.proto:StringKeyValuePair'", "CapnProto": ", kafka_schema='string_key_value:StringKeyValuePair'", - "Template": ", format_template_row='string_key_value.format'" + "Template": ", format_template_row='string_key_value.format'", } # Only the formats that can be used both and input and output format are tested @@ -5208,7 +5207,10 @@ def test_kafka_produce_http_interface_row_based_format(kafka_cluster): """ ) - instance.http_query(insert_query_template.format(table_name="test.kafka_writer_"+topic), method="POST") + instance.http_query( + insert_query_template.format(table_name="test.kafka_writer_" + topic), + method="POST", + ) expected = f"""\ {expected_key}\t{expected_value} @@ -5220,12 +5222,16 @@ def test_kafka_produce_http_interface_row_based_format(kafka_cluster): logging.debug(f"Checking result for {format}") topic = topic_prefix + format - result = instance.query_with_retry(f"SELECT * FROM test.view_{topic}", check_callback=lambda res: res.count("\n") == 3) + result = instance.query_with_retry( + f"SELECT * FROM test.view_{topic}", + check_callback=lambda res: res.count("\n") == 3, + ) assert TSV(result) == TSV(expected) kafka_delete_topic(admin_client, topic) + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") From 23fa85e3ff269fac0ad6aa8a9122f90ff4d70d28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 31 Jul 2024 19:30:58 +0200 Subject: [PATCH 1308/2361] Apply suggestions from code review Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- docs/en/engines/table-engines/integrations/kafka.md | 2 +- src/Storages/Kafka/StorageKafka2.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 389bb6c9029..38a9d696067 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -284,7 +284,7 @@ SETTINGS allow_experimental_kafka_offsets_storage_in_keeper=1; ### Known limitations As the new engine is experimental, it is not production ready yet. There are few known limitations of the implementation: - - The biggest limitation is the engine doesn't support direct reading from Kafka topic (insertion works, but reading doesn't), thus the direct `SELECT` queries will fail. + - The biggest limitation is the engine doesn't support direct reading. Reading from the engine using materialized views and writing to the engine work, but direct reading doesn't. As a result, all direct `SELECT` queries will fail. - Rapidly dropping and recreating the table or specifying the same ClickHouse Keeper path to different engines might cause issues. As best practice you can use the `{uuid}` in `kafka_keeper_path` to avoid clashing paths. - To make repeatable reads, messages cannot be consumed from multiple partitions on a single thread. On the other hand, the Kafka consumers have to be polled regularly to keep them alive. As a result of these two objectives, we decided to only allow creating multiple consumers if `kafka_thread_per_consumer` is enabled, otherwise it is too complicated to avoid issues regarding polling consumers regularly. - Consumers created by the new storage engine do not show up in [`system.kafka_consumers`](../../../operations/system-tables/kafka_consumers.md) table. diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 058bff18f56..318c04f1f91 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -40,7 +40,7 @@ using KafkaConsumer2Ptr = std::shared_ptr; /// It is similar to the already existing StorageKafka, it instead of storing the offsets /// in Kafka, its main source of information about offsets is Keeper. On top of the /// offsets, it also stores the number of messages (intent size) it tried to insert from -/// each topic. By storing the intent sizes it possible to retry the same batch of +/// each topic. By storing the intent sizes it is possible to retry the same batch of /// messages in case of any errors and giving deduplication a chance to deduplicate /// blocks. /// From 2c6c5c5c94a2f4131d898b172d6af285eec07c2f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 19:31:22 +0200 Subject: [PATCH 1309/2361] Add an assertion --- src/Common/TimerDescriptor.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Common/TimerDescriptor.cpp b/src/Common/TimerDescriptor.cpp index 9a171ae9487..716dcee2747 100644 --- a/src/Common/TimerDescriptor.cpp +++ b/src/Common/TimerDescriptor.cpp @@ -110,6 +110,9 @@ void TimerDescriptor::drain() const throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot readlink for a timer_fd {}", timer_fd); LOG_TRACE(log, "Received EINTR while trying to drain a TimerDescriptor, fd {}: {}", timer_fd, std::string_view(link_path, link_path_length)); + + /// Check that it's actually a timerfd. + chassert(std::string_view(link_path, link_path_length).contains("timerfd")); continue; } From beb5d02cdc1f5fae58a8ee43fadb1c581868b894 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 31 Jul 2024 17:58:20 +0000 Subject: [PATCH 1310/2361] Move THROW back to InterpreterDelete. --- src/Interpreters/InterpreterDeleteQuery.cpp | 13 +++++++++++++ src/Interpreters/MutationsInterpreter.cpp | 10 ---------- src/Storages/MergeTree/MutateTask.cpp | 12 ++++++++---- .../03161_lightweight_delete_projection.sql | 4 ++-- 4 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index 0e988e7d031..3000292f047 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -16,6 +16,7 @@ #include #include #include +#include namespace DB @@ -85,6 +86,18 @@ BlockIO InterpreterDeleteQuery::execute() "Lightweight delete mutate is disabled. " "Set `enable_lightweight_delete` setting to enable it"); + if (metadata_snapshot->hasProjections()) + { + if (const auto * merge_tree_data = dynamic_cast(table.get())) + if (merge_tree_data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "DELETE query is not allowed for table {} because as it has projections and setting " + "lightweight_mutation_projection_mode is set to THROW. " + "User should change lightweight_mutation_projection_mode OR " + "drop all the projections manually before running the query", + table_id.getFullTableName()); + } + /// Build "ALTER ... UPDATE _row_exists = 0 WHERE predicate" query String alter_query = "ALTER TABLE " + table->getStorageID().getFullTableName() diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index db4ea9c0754..480c6736bc5 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -491,16 +491,6 @@ static void validateUpdateColumns( { if (!source.supportsLightweightDelete()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Lightweight delete is not supported for table"); - - if (const MergeTreeData * merge_tree_data = source.getMergeTreeData(); merge_tree_data != nullptr) - { - if (merge_tree_data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW - && merge_tree_data->hasProjection()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DELETE query is not supported for table {} as it has projections. " - "User should drop all the projections manually before running the query", - source.getStorage()->getStorageID().getFullTableName()); - } } else if (virtual_columns.tryGet(column_name)) { diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 6245d80508b..8b5829eb058 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -2320,10 +2320,14 @@ bool MutateTask::prepare() ctx->context, ctx->materialized_indices); - bool lightweight_delete_projection_drop = lightweight_delete_mode - && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP; + auto lightweight_mutation_projection_mode = ctx->data->getSettings()->lightweight_mutation_projection_mode; + bool lightweight_delete_drops_projections = + lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP + || lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW; + + bool should_create_projections = !(lightweight_delete_mode && lightweight_delete_drops_projections); /// Under lightweight delete mode, if option is drop, projections_to_recalc should be empty. - if (!lightweight_delete_projection_drop) + if (should_create_projections) { ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( ctx->source_part, @@ -2342,7 +2346,7 @@ bool MutateTask::prepare() ctx->projections_to_recalc, ctx->stats_to_recalc, ctx->metadata_snapshot, - lightweight_delete_projection_drop); + !should_create_projections); ctx->files_to_rename = MutationHelpers::collectFilesForRenames( ctx->source_part, diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index f33653fc652..02b880d620a 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -21,7 +21,7 @@ SELECT 'testing throw default mode'; ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; -DELETE FROM users WHERE uid = 1231; -- { serverError NOT_IMPLEMENTED } +DELETE FROM users WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } SELECT 'testing drop mode'; ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; @@ -85,7 +85,7 @@ INSERT INTO users VALUES (1231, 'John', 33); SELECT 'testing throw default mode'; ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; -DELETE FROM users WHERE uid = 1231; -- { serverError NOT_IMPLEMENTED } +DELETE FROM users WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } SELECT 'testing drop mode'; ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; From 26a81e366d722314bbda12d5232fa49117ef4498 Mon Sep 17 00:00:00 2001 From: shiyer7474 Date: Wed, 31 Jul 2024 18:05:53 +0000 Subject: [PATCH 1311/2361] Fix the serialization of parameters for parameterized views Removed the call to convertFieldToString() and added datatype specific serialization code. Parameterized view substitution was broken for multiple datatypes when parameter value was a function or expression returning datatype instance. Testcase added to cover Date/Date32/UUID/IP datatypes. --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 10 +- ...zed_view_with_non_literal_params.reference | 31 ++++++ ...meterized_view_with_non_literal_params.sql | 97 +++++++++++++++++++ 3 files changed, 137 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.reference create mode 100644 tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 767d5c11075..6113a38d463 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -4546,7 +4546,15 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node, resolveExpressionNode(nodes[1], scope, /* allow_lambda_expression */false, /* allow_table_function */false); if (auto * constant = nodes[1]->as()) { - view_params[identifier_node->getIdentifier().getFullName()] = convertFieldToString(constant->getValue()); + /// Serialize the constant value using datatype specific + /// interfaces to match the deserialization in ReplaceQueryParametersVistor. + WriteBufferFromOwnString buf; + auto constval = constant->getValue(); + auto realtype = constant->getResultType(); + auto tempcol = realtype->createColumn(); + tempcol->insert(constval); + realtype->getDefaultSerialization()->serializeTextEscaped(*tempcol, 0, buf, {}); + view_params[identifier_node->getIdentifier().getFullName()] = buf.str(); } } } diff --git a/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.reference b/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.reference new file mode 100644 index 00000000000..e4e6c313b85 --- /dev/null +++ b/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.reference @@ -0,0 +1,31 @@ +Test with Date parameter +1 +2 +1 +3 +3 +3 +2 +Test with Date32 parameter +1 +2 +1 +3 +5 +3 +4 +Test with UUID parameter +4 +3 +3 +1 +2 +Test with 2 parameters +1 +1 +3 +3 +Test with IPv4 +1 +2 +3 diff --git a/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql b/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql new file mode 100644 index 00000000000..55795c7a785 --- /dev/null +++ b/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql @@ -0,0 +1,97 @@ + +select 'Test with Date parameter'; + +drop table if exists date_table_pv; +create table date_table_pv (id Int32, dt Date) engine = Memory(); + +insert into date_table_pv values(1, today()); +insert into date_table_pv values(2, yesterday()); +insert into date_table_pv values(3, toDate('1974-04-07')); + +drop view if exists date_pv; +create view date_pv as select * from date_table_pv where dt = {dtparam:Date}; + +select id from date_pv(dtparam=today()); +select id from date_pv(dtparam=yesterday()); +select id from date_pv(dtparam=yesterday()+1); +select id from date_pv(dtparam='1974-04-07'); +select id from date_pv(dtparam=toDate('1974-04-07')); +select id from date_pv(dtparam=toString(toDate('1974-04-07'))); +select id from date_pv(dtparam=toDate('1975-04-07')); +select id from date_pv(dtparam=(select dt from date_table_pv where id = 2)); + +select 'Test with Date32 parameter'; + +drop table if exists date32_table_pv; +create table date32_table_pv (id Int32, dt Date32) engine = Memory(); + +insert into date32_table_pv values(1, today()); +insert into date32_table_pv values(2, yesterday()); +insert into date32_table_pv values(3, toDate32('2199-12-31')); +insert into date32_table_pv values(4, toDate32('1950-12-25')); +insert into date32_table_pv values(5, toDate32('1900-01-01')); + +drop view if exists date32_pv; +create view date32_pv as select * from date32_table_pv where dt = {dtparam:Date32}; + +select id from date32_pv(dtparam=today()); +select id from date32_pv(dtparam=yesterday()); +select id from date32_pv(dtparam=yesterday()+1); +select id from date32_pv(dtparam='2199-12-31'); +select id from date32_pv(dtparam=toDate32('1900-01-01')); +select id from date32_pv(dtparam=(select dt from date32_table_pv where id = 3)); +select id from date32_pv(dtparam=(select dt from date32_table_pv where id = 4)); + + +select 'Test with UUID parameter'; +drop table if exists uuid_table_pv; +create table uuid_table_pv (id Int32, uu UUID) engine = Memory(); + +insert into uuid_table_pv values(1, generateUUIDv4()); +insert into uuid_table_pv values(2, generateUUIDv7()); +insert into uuid_table_pv values(3, toUUID('11111111-2222-3333-4444-555555555555')); +insert into uuid_table_pv select 4, serverUUID(); + + +drop view if exists uuid_pv; +create view uuid_pv as select * from uuid_table_pv where uu = {uuidparam:UUID}; +select id from uuid_pv(uuidparam=serverUUID()); +select id from uuid_pv(uuidparam=toUUID('11111111-2222-3333-4444-555555555555')); +select id from uuid_pv(uuidparam='11111111-2222-3333-4444-555555555555'); +select id from uuid_pv(uuidparam=(select uu from uuid_table_pv where id = 1)); +select id from uuid_pv(uuidparam=(select uu from uuid_table_pv where id = 2)); +-- generateUUIDv4() is not constant foldable, hence cannot be used as parameter value +select id from uuid_pv(uuidparam=generateUUIDv4()); -- { serverError UNKNOWN_QUERY_PARAMETER } +-- But nested "select generateUUIDv4()" works! +select id from uuid_pv(uuidparam=(select generateUUIDv4())); + +select 'Test with 2 parameters'; + +drop view if exists date_pv2; +create view date_pv2 as select * from date_table_pv where dt = {dtparam:Date} and id = {intparam:Int32}; +select id from date_pv2(dtparam=today(),intparam=1); +select id from date_pv2(dtparam=today(),intparam=length('A')); +select id from date_pv2(dtparam='1974-04-07',intparam=length('AAA')); +select id from date_pv2(dtparam=toDate('1974-04-07'),intparam=length('BBB')); + +select 'Test with IPv4'; + +drop table if exists ipv4_table_pv; +create table ipv4_table_pv (id Int32, ipaddr IPv4) ENGINE = Memory(); +insert into ipv4_table_pv values (1, '116.106.34.242'); +insert into ipv4_table_pv values (2, '116.106.34.243'); +insert into ipv4_table_pv values (3, '116.106.34.244'); + +drop view if exists ipv4_pv; +create view ipv4_pv as select * from ipv4_table_pv where ipaddr = {ipv4param:IPv4}; +select id from ipv4_pv(ipv4param='116.106.34.242'); +select id from ipv4_pv(ipv4param=toIPv4('116.106.34.243')); +select id from ipv4_pv(ipv4param=(select ipaddr from ipv4_table_pv where id=3)); + +drop view date_pv; +drop view date_pv2; +drop view uuid_pv; +drop view ipv4_pv; +drop table date_table_pv; +drop table uuid_table_pv; +drop table ipv4_table_pv; From 7aff8748b027b43c48f670446c06e704b1767a35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 31 Jul 2024 18:06:09 +0000 Subject: [PATCH 1312/2361] Address small review comments --- .../table-engines/integrations/kafka.md | 2 +- src/Storages/Kafka/KafkaConsumer2.cpp | 4 +-- src/Storages/Kafka/StorageKafka2.cpp | 28 ------------------- src/Storages/Kafka/StorageKafkaCommon.cpp | 2 +- 4 files changed, 4 insertions(+), 32 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 38a9d696067..de6492e8ea7 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -257,7 +257,7 @@ If `allow_experimental_kafka_offsets_storage_in_keeper` is enabled, then two mor - `kafka_keeper_path` specifies the path to the table in ClickHouse Keeper - `kafka_replica_name` specifies the replica name in ClickHouse Keeper -Either both of the settings must be specified or neither of them. When both of them are specified, then a new, experimental Kafka engine will be used. The new engine doesn't depend on storing the committed offsets in Kafka,but stores them in ClickHouse Keeper. It still tries to commit the offsets to Kafka, but it only depends on those offsets when the table is created. In any other circumstances (table is restarted, or recovered after some error) the offsets stored in ClickHouse Keeper will be used to consume messages from. Apart from the committed offset, it also stores how many messages were consumed in the last batch, so if the insert fails, the same amount of messages will be consumed, thus enabling deduplication if necessary. +Either both of the settings must be specified or neither of them. When both of them are specified, then a new, experimental Kafka engine will be used. The new engine doesn't depend on storing the committed offsets in Kafka, but stores them in ClickHouse Keeper. It still tries to commit the offsets to Kafka, but it only depends on those offsets when the table is created. In any other circumstances (table is restarted, or recovered after some error) the offsets stored in ClickHouse Keeper will be used as an offset to continue consuming messages from. Apart from the committed offset, it also stores how many messages were consumed in the last batch, so if the insert fails, the same amount of messages will be consumed, thus enabling deduplication if necessary. Example: diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 8659465a805..761186fbbdd 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -326,7 +326,7 @@ void KafkaConsumer2::commit(const TopicPartition & topic_partition) if (e.get_error() == RD_KAFKA_RESP_ERR__NO_OFFSET) committed = true; else - LOG_WARNING(log, "Exception during commit attempt: {}", e.what()); + LOG_ERROR(log, "Exception during attempt to commit to Kafka: {}", e.what()); } } @@ -334,7 +334,7 @@ void KafkaConsumer2::commit(const TopicPartition & topic_partition) { // The failure is not the biggest issue, it only counts when a table is dropped and recreated, otherwise the offsets are taken from keeper. ProfileEvents::increment(ProfileEvents::KafkaCommitFailures); - LOG_INFO(log, "All commit attempts failed"); + LOG_ERROR(log, "All commit attempts failed"); } else { diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index dc8d0f8a7df..42f7419def3 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -319,34 +319,6 @@ void StorageKafka2::assertActive() const throw Exception(ErrorCodes::LOGICAL_ERROR, "Table is not active (replica path: {})", replica_path); } -SettingsChanges StorageKafka2::createSettingsAdjustments() -{ - SettingsChanges result; - // Needed for backward compatibility - if (!kafka_settings->input_format_skip_unknown_fields.changed) - { - // Always skip unknown fields regardless of the context (JSON or TSKV) - kafka_settings->input_format_skip_unknown_fields = true; - } - - if (!kafka_settings->input_format_allow_errors_ratio.changed) - kafka_settings->input_format_allow_errors_ratio = 0.; - - if (!kafka_settings->input_format_allow_errors_num.changed) - kafka_settings->input_format_allow_errors_num = kafka_settings->kafka_skip_broken_messages.value; - - if (!schema_name.empty()) - result.emplace_back("format_schema", schema_name); - - for (const auto & setting : *kafka_settings) - { - const auto & name = setting.getName(); - if (name.find("kafka_") == std::string::npos) - result.emplace_back(name, setting.getValue()); - } - return result; -} - Pipe StorageKafka2::read( const Names & /*column_names */, diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp index 883eae95a7f..801c3a18a39 100644 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ b/src/Storages/Kafka/StorageKafkaCommon.cpp @@ -114,7 +114,7 @@ StorageKafkaInterceptors::rdKafkaOnThreadExit(rd_kafka_t *, rd_ka self->thread_statuses.erase(it); - return RD_KAFKA_RESP_ERR_NO_ERROR; + return RD_KAFKA_RESP_ERR_NO_ERROR; } template From 3c20297ae1aaad3c5c5e4bac18c898243d05fece Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 31 Jul 2024 18:07:33 +0000 Subject: [PATCH 1313/2361] Make sure consumers are created even when initial subscribe fails --- src/Storages/Kafka/KafkaConsumer2.cpp | 12 ++++++++++-- src/Storages/Kafka/KafkaConsumer2.h | 3 +++ src/Storages/Kafka/StorageKafka2.cpp | 17 ++++++++--------- src/Storages/Kafka/StorageKafka2.h | 1 - 4 files changed, 21 insertions(+), 12 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 761186fbbdd..a94afd2c8da 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -122,8 +122,6 @@ KafkaConsumer2::KafkaConsumer2( LOG_ERROR(log, "Rebalance error: {}", err); ProfileEvents::increment(ProfileEvents::KafkaRebalanceErrors); }); - - consumer->subscribe(topics); } KafkaConsumer2::~KafkaConsumer2() @@ -342,6 +340,16 @@ void KafkaConsumer2::commit(const TopicPartition & topic_partition) } } +void KafkaConsumer2::subscribeIfNotSubscribedYet() +{ + if (likely(is_subscribed)) + return; + + consumer->subscribe(topics); + is_subscribed = true; + LOG_DEBUG(log, "Subscribed."); +} + ReadBufferPtr KafkaConsumer2::getNextMessage() { if (current != messages.end()) diff --git a/src/Storages/Kafka/KafkaConsumer2.h b/src/Storages/Kafka/KafkaConsumer2.h index 3c91df8a02f..dd2cfe87aa0 100644 --- a/src/Storages/Kafka/KafkaConsumer2.h +++ b/src/Storages/Kafka/KafkaConsumer2.h @@ -115,6 +115,8 @@ public: const auto & currentHeaderList() const { return current[-1].get_header_list(); } String currentPayload() const { return current[-1].get_payload(); } + void subscribeIfNotSubscribedYet(); + private: using Messages = std::vector; CurrentMetrics::Increment metric_increment{CurrentMetrics::KafkaConsumers}; @@ -136,6 +138,7 @@ private: StalledStatus stalled_status = StalledStatus::NO_MESSAGES_RETURNED; const std::atomic & stopped; + bool is_subscribed = false; // order is important, need to be destructed before consumer Messages messages; diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 42f7419def3..e70be7d7a2a 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -372,6 +372,8 @@ void StorageKafka2::startup() consumers.push_back(ConsumerAndAssignmentInfo{.consumer = createConsumer(i), .keeper = getZooKeeper()}); LOG_DEBUG(log, "Created #{} consumer", num_created_consumers); ++num_created_consumers; + + consumers.back().consumer->subscribeIfNotSubscribedYet(); } catch (const cppkafka::Exception &) { @@ -404,16 +406,11 @@ KafkaConsumer2Ptr StorageKafka2::createConsumer(size_t consumer_number) consumer_impl->set_destroy_flags(RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE); /// NOTE: we pass |stream_cancelled| by reference here, so the buffers should not outlive the storage. - if (thread_per_consumer) - { - // call subscribe; - auto & stream_cancelled = tasks[consumer_number]->stream_cancelled; - return std::make_shared( - consumer_impl, log, getPollMaxBatchSize(), getPollTimeoutMillisecond(), stream_cancelled, topics); - } - + chassert((thread_per_consumer || num_consumers == 1) && "StorageKafka2 cannot handle multiple consumers on a single thread"); + auto & stream_cancelled = tasks[consumer_number]->stream_cancelled; return std::make_shared( - consumer_impl, log, getPollMaxBatchSize(), getPollTimeoutMillisecond(), tasks.back()->stream_cancelled, topics); + consumer_impl, log, getPollMaxBatchSize(), getPollTimeoutMillisecond(), stream_cancelled, topics); + } @@ -1067,6 +1064,8 @@ std::optional StorageKafka2::streamToViews(size_t id auto & consumer_info = consumers[idx]; consumer_info.watch.restart(); auto & consumer = consumer_info.consumer; + // In case the initial subscribe in startup failed, let's subscribe now + consumer->subscribeIfNotSubscribedYet(); // To keep the consumer alive const auto wait_for_assignment = consumer_info.locks.empty(); diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index 318c04f1f91..e2565100879 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -181,7 +181,6 @@ private: void partialShutdown(); void assertActive() const; - SettingsChanges createSettingsAdjustments(); KafkaConsumer2Ptr createConsumer(size_t consumer_number); // Returns full consumer related configuration, also the configuration // contains global kafka properties. From dde274f6fad979aa94ea31395b0434c81f72328a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 31 Jul 2024 18:08:14 +0000 Subject: [PATCH 1314/2361] Re-enable ICU on s390/x --- contrib/icu-cmake/CMakeLists.txt | 4 +--- contrib/icudata | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index f9d05f7fe97..adeaa7dcf33 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -4,9 +4,7 @@ else () option(ENABLE_ICU "Enable ICU" 0) endif () -# Temporarily disabled s390x because the ICU build links a blob (icudt71b_dat.S) and our friends from IBM did not explain how they generated -# the blob on s390x: https://github.com/ClickHouse/icudata/pull/2#issuecomment-2226957255 -if (NOT ENABLE_ICU OR ARCH_S390X) +if (NOT ENABLE_ICU) message(STATUS "Not using ICU") return() endif() diff --git a/contrib/icudata b/contrib/icudata index d345d6ac22f..4904951339a 160000 --- a/contrib/icudata +++ b/contrib/icudata @@ -1 +1 @@ -Subproject commit d345d6ac22f381c882420de9053d30ae1ff38d75 +Subproject commit 4904951339a70b4814d2d3723436b20d079cb01b From e2af1766eb1ea0ee2f6b862f53a0d3c13f53365b Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 31 Jul 2024 20:25:28 +0200 Subject: [PATCH 1315/2361] init --- src/Functions/DateTimeTransforms.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index a7bd398cdaa..fe26c5cf353 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -381,11 +381,13 @@ struct ToStartOfWeekImpl static UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + const auto & res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + return res >= 0 ? res : 0; } static UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { - return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + const auto & res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + return res >= 0 ? res : 0; } static UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone) { From d0c643180f408d84a5f10a917f413248b9267202 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 31 Jul 2024 20:29:36 +0200 Subject: [PATCH 1316/2361] add tests --- .../03215_toStartOfWeek_with_dateTime64_fix.reference | 2 ++ .../0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql | 2 ++ 2 files changed, 4 insertions(+) create mode 100644 tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.reference create mode 100644 tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql diff --git a/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.reference b/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.reference new file mode 100644 index 00000000000..fd698107f22 --- /dev/null +++ b/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.reference @@ -0,0 +1,2 @@ +1970-01-01 +1970-01-01 diff --git a/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql b/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql new file mode 100644 index 00000000000..0f00a52cb86 --- /dev/null +++ b/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql @@ -0,0 +1,2 @@ +SELECT toStartOfWeek(toDateTime64('1970-02-01', 6)); +SELECT toStartOfWeek(toDateTime('1970-01-01')); From 636c3f642340de6e5ca4892481ca156cb236a4cd Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 31 Jul 2024 20:31:22 +0200 Subject: [PATCH 1317/2361] Update DateTimeTransforms.h --- src/Functions/DateTimeTransforms.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index fe26c5cf353..1970ec3bdb0 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -382,12 +382,12 @@ struct ToStartOfWeekImpl static UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) { const auto & res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); - return res >= 0 ? res : 0; + return std::max(res, 0); } static UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { const auto & res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); - return res >= 0 ? res : 0; + return std::max(res, 0); } static UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone) { From 34ca3128ed0de60e03a105c43dc0924541f4a2c1 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 31 Jul 2024 20:36:13 +0200 Subject: [PATCH 1318/2361] Update DateTimeTransforms.h --- src/Functions/DateTimeTransforms.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 1970ec3bdb0..46fb3bb9f57 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -381,12 +381,12 @@ struct ToStartOfWeekImpl static UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) { - const auto & res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + const auto res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); return std::max(res, 0); } static UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { - const auto & res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + const auto res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); return std::max(res, 0); } static UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone) From be65f7be5133d16c162b679cbacad94e582b1493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 31 Jul 2024 18:38:16 +0000 Subject: [PATCH 1319/2361] Fix build --- src/Core/SettingsChangesHistory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 06fb47779d7..992b1220201 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -58,7 +58,7 @@ String ClickHouseVersion::toString() const static std::initializer_list> settings_changes_history_initializer = { {"24.8", {{"allow_experimental_kafka_offsets_storage_in_keeper", false, false, "Allow the usage of experimental Kafka storage engine that stores the committed offsets in ClickHouse Keeper"}, - }} + }}, {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, From 557f9dbe3fb02e3bce62adbeb1fd5056f2d36b6c Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 31 Jul 2024 18:51:27 +0000 Subject: [PATCH 1320/2361] fix test --- .../0_stateless/02319_lightweight_delete_on_merge_tree.sql | 2 +- tests/queries/0_stateless/02792_drop_projection_lwd.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql b/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql index f82f79dbe44..6491253cd5f 100644 --- a/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql +++ b/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql @@ -102,7 +102,7 @@ ALTER TABLE t_proj ADD PROJECTION p_1 (SELECT avg(a), avg(b), count()) SETTINGS INSERT INTO t_proj SELECT number + 1, number + 1 FROM numbers(1000); -DELETE FROM t_proj WHERE a < 100; -- { serverError NOT_IMPLEMENTED } +DELETE FROM t_proj WHERE a < 100; -- { serverError SUPPORT_IS_DISABLED } SELECT avg(a), avg(b), count() FROM t_proj; diff --git a/tests/queries/0_stateless/02792_drop_projection_lwd.sql b/tests/queries/0_stateless/02792_drop_projection_lwd.sql index dcde7dcc600..dad7f7cd028 100644 --- a/tests/queries/0_stateless/02792_drop_projection_lwd.sql +++ b/tests/queries/0_stateless/02792_drop_projection_lwd.sql @@ -7,7 +7,7 @@ CREATE TABLE t_projections_lwd (a UInt32, b UInt32, PROJECTION p (SELECT * ORDER INSERT INTO t_projections_lwd SELECT number, number FROM numbers(100); -- LWD does not work, as expected -DELETE FROM t_projections_lwd WHERE a = 1; -- { serverError NOT_IMPLEMENTED } +DELETE FROM t_projections_lwd WHERE a = 1; -- { serverError SUPPORT_IS_DISABLED } KILL MUTATION WHERE database = currentDatabase() AND table = 't_projections_lwd' SYNC FORMAT Null; -- drop projection From 9def8cea8121ae8001649629a96e73eb1e10159b Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 31 Jul 2024 18:57:08 +0000 Subject: [PATCH 1321/2361] Update version_date.tsv and changelogs after v24.4.4.107-stable --- docs/changelogs/v24.4.4.107-stable.md | 70 +++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 71 insertions(+) create mode 100644 docs/changelogs/v24.4.4.107-stable.md diff --git a/docs/changelogs/v24.4.4.107-stable.md b/docs/changelogs/v24.4.4.107-stable.md new file mode 100644 index 00000000000..ba7c576715e --- /dev/null +++ b/docs/changelogs/v24.4.4.107-stable.md @@ -0,0 +1,70 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.4.4.107-stable (af0ed6b197e) FIXME as compared to v24.4.3.25-stable (a915dd4eda4) + +#### Improvement +* Backported in [#65884](https://github.com/ClickHouse/ClickHouse/issues/65884): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#65303](https://github.com/ClickHouse/ClickHouse/issues/65303): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Backported in [#65894](https://github.com/ClickHouse/ClickHouse/issues/65894): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Backported in [#65372](https://github.com/ClickHouse/ClickHouse/issues/65372): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#66883](https://github.com/ClickHouse/ClickHouse/issues/66883): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#65435](https://github.com/ClickHouse/ClickHouse/issues/65435): Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#65448](https://github.com/ClickHouse/ClickHouse/issues/65448): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#65710](https://github.com/ClickHouse/ClickHouse/issues/65710): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66689](https://github.com/ClickHouse/ClickHouse/issues/66689): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#65353](https://github.com/ClickHouse/ClickHouse/issues/65353): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#65060](https://github.com/ClickHouse/ClickHouse/issues/65060): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65329](https://github.com/ClickHouse/ClickHouse/issues/65329): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)). +* Backported in [#64833](https://github.com/ClickHouse/ClickHouse/issues/64833): Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)). +* Backported in [#65086](https://github.com/ClickHouse/ClickHouse/issues/65086): Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65540](https://github.com/ClickHouse/ClickHouse/issues/65540): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)). +* Backported in [#65578](https://github.com/ClickHouse/ClickHouse/issues/65578): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)). +* Backported in [#65161](https://github.com/ClickHouse/ClickHouse/issues/65161): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#65616](https://github.com/ClickHouse/ClickHouse/issues/65616): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#65730](https://github.com/ClickHouse/ClickHouse/issues/65730): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#65668](https://github.com/ClickHouse/ClickHouse/issues/65668): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65786](https://github.com/ClickHouse/ClickHouse/issues/65786): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#65810](https://github.com/ClickHouse/ClickHouse/issues/65810): Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#65931](https://github.com/ClickHouse/ClickHouse/issues/65931): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#65826](https://github.com/ClickHouse/ClickHouse/issues/65826): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)). +* Backported in [#66299](https://github.com/ClickHouse/ClickHouse/issues/66299): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)). +* Backported in [#66326](https://github.com/ClickHouse/ClickHouse/issues/66326): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#66153](https://github.com/ClickHouse/ClickHouse/issues/66153): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#66459](https://github.com/ClickHouse/ClickHouse/issues/66459): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66224](https://github.com/ClickHouse/ClickHouse/issues/66224): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66267](https://github.com/ClickHouse/ClickHouse/issues/66267): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66678](https://github.com/ClickHouse/ClickHouse/issues/66678): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#66603](https://github.com/ClickHouse/ClickHouse/issues/66603): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)). +* Backported in [#66358](https://github.com/ClickHouse/ClickHouse/issues/66358): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66971](https://github.com/ClickHouse/ClickHouse/issues/66971): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66968](https://github.com/ClickHouse/ClickHouse/issues/66968): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66719](https://github.com/ClickHouse/ClickHouse/issues/66719): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#66950](https://github.com/ClickHouse/ClickHouse/issues/66950): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66947](https://github.com/ClickHouse/ClickHouse/issues/66947): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67195](https://github.com/ClickHouse/ClickHouse/issues/67195): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#67377](https://github.com/ClickHouse/ClickHouse/issues/67377): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67240](https://github.com/ClickHouse/ClickHouse/issues/67240): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#65410](https://github.com/ClickHouse/ClickHouse/issues/65410): Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#65903](https://github.com/ClickHouse/ClickHouse/issues/65903): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#66385](https://github.com/ClickHouse/ClickHouse/issues/66385): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)). +* Backported in [#66424](https://github.com/ClickHouse/ClickHouse/issues/66424): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66542](https://github.com/ClickHouse/ClickHouse/issues/66542): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66857](https://github.com/ClickHouse/ClickHouse/issues/66857): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)). +* Backported in [#66873](https://github.com/ClickHouse/ClickHouse/issues/66873): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)). +* Backported in [#67057](https://github.com/ClickHouse/ClickHouse/issues/67057): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)). +* Backported in [#66944](https://github.com/ClickHouse/ClickHouse/issues/66944): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67250](https://github.com/ClickHouse/ClickHouse/issues/67250): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)). +* Backported in [#67410](https://github.com/ClickHouse/ClickHouse/issues/67410): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 027b207d3ad..abd8f84ec74 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -5,6 +5,7 @@ v24.5.4.49-stable 2024-07-01 v24.5.3.5-stable 2024-06-13 v24.5.2.34-stable 2024-06-13 v24.5.1.1763-stable 2024-06-01 +v24.4.4.107-stable 2024-07-31 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 From cd3c6c3ae807a321d94079087eb1fb29f4764549 Mon Sep 17 00:00:00 2001 From: sakulali Date: Thu, 1 Aug 2024 03:37:45 +0800 Subject: [PATCH 1322/2361] try to fix flaky test 01889_clickhouse_client_config_format --- .../01889_clickhouse_client_config_format.reference | 2 +- .../0_stateless/01889_clickhouse_client_config_format.sh | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference b/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference index ef0d9ffc538..2575200e6fa 100644 --- a/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference +++ b/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference @@ -19,6 +19,6 @@ autodetect xml (non leading whitespaces) autodetect yaml 2 autodetect invalid xml -Code: 1000, e.code() = 0, SAXParseException: Invalid token in '/config_test.badxml', line 2 column 12, Stack trace (when copying this message, always include the lines below): +Correct: invalid xml parsed with exception autodetect invalid yaml Code: 585. Unable to parse YAML configuration file /config_test.badyaml, yaml-cpp: error at line 2, column 12: illegal map value. (CANNOT_PARSE_YAML) diff --git a/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh b/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh index 9a44ec0d5f5..58fd6852116 100755 --- a/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh +++ b/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh @@ -108,7 +108,9 @@ echo 'autodetect xml (non leading whitespaces)' $CLICKHOUSE_CLIENT --config "$autodetect_xml_non_leading_whitespace_config" -q "select getSetting('max_threads')" echo 'autodetect yaml' $CLICKHOUSE_CLIENT --config "$autodetect_yaml_config" -q "select getSetting('max_threads')" + +# Error code is 1000 (Poco::Exception). It is not ignored. echo 'autodetect invalid xml' -$CLICKHOUSE_CLIENT --config "$autodetect_invalid_xml_config" -q "select getSetting('max_threads')" 2>&1 |& sed -n '1p' | sed -e "s#$CLICKHOUSE_TMP##" -e "s#Poco::Exception. ##" +$CLICKHOUSE_CLIENT --config "$autodetect_invalid_xml_config" -q "select getSetting('max_threads')" 2>&1 |& grep -q "Code: 1000" && echo "Correct: invalid xml parsed with exception" || echo 'Fail: expected error code 1000 but got other' echo 'autodetect invalid yaml' -$CLICKHOUSE_CLIENT --config "$autodetect_invalid_yaml_config" -q "select getSetting('max_threads')" 2>&1 |& sed -n '1p' | sed -e "s#$CLICKHOUSE_TMP##" -e "s#DB::Exception: ##" \ No newline at end of file +$CLICKHOUSE_CLIENT --config "$autodetect_invalid_yaml_config" -q "select getSetting('max_threads')" 2>&1 |& sed -e "s#$CLICKHOUSE_TMP##" -e "s#DB::Exception: ##" \ No newline at end of file From 89ca6aee4ea23ed9cb9b36bb4fa9a6490efe576a Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Wed, 31 Jul 2024 20:00:37 +0000 Subject: [PATCH 1323/2361] Check for timeout when we stop logs replication --- docker/test/base/setup_export_logs.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/test/base/setup_export_logs.sh b/docker/test/base/setup_export_logs.sh index 0c869a95db2..db141bcc55e 100755 --- a/docker/test/base/setup_export_logs.sh +++ b/docker/test/base/setup_export_logs.sh @@ -215,7 +215,8 @@ function setup_logs_replication function stop_logs_replication { echo "Detach all logs replication" - clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | { + timeout --preserve-status --signal TERM --kill-after 10m 20m \ + clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | { tee /dev/stderr } | { xargs -n1 -r -i clickhouse-client --query "drop table {}" From cc27c254abd4b6fd8f64b47e0bdf6195041bd5ef Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1324/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From 867784d55c989943d0c79eb9179b01e878fabcbe Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 31 Jul 2024 22:48:16 +0200 Subject: [PATCH 1325/2361] Update DateTimeTransforms.h --- src/Functions/DateTimeTransforms.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 46fb3bb9f57..ce7da406e9a 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include From 91e48d8b1b1029af937433eca377b9187b03d49a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 31 Jul 2024 20:57:57 +0000 Subject: [PATCH 1326/2361] Split `StorageKafkaCommon` --- src/Storages/Kafka/KafkaConfigLoader.cpp | 475 ++++++++++++ src/Storages/Kafka/KafkaConfigLoader.h | 54 ++ src/Storages/Kafka/KafkaConsumer.cpp | 2 +- src/Storages/Kafka/KafkaConsumer2.cpp | 2 +- src/Storages/Kafka/StorageKafka.cpp | 3 +- src/Storages/Kafka/StorageKafka.h | 6 +- src/Storages/Kafka/StorageKafka2.cpp | 3 +- src/Storages/Kafka/StorageKafka2.h | 6 +- src/Storages/Kafka/StorageKafkaCommon.cpp | 875 ---------------------- src/Storages/Kafka/StorageKafkaCommon.h | 116 --- src/Storages/Kafka/StorageKafkaUtils.cpp | 458 +++++++++++ src/Storages/Kafka/StorageKafkaUtils.h | 61 ++ 12 files changed, 1060 insertions(+), 1001 deletions(-) create mode 100644 src/Storages/Kafka/KafkaConfigLoader.cpp create mode 100644 src/Storages/Kafka/KafkaConfigLoader.h delete mode 100644 src/Storages/Kafka/StorageKafkaCommon.cpp delete mode 100644 src/Storages/Kafka/StorageKafkaCommon.h create mode 100644 src/Storages/Kafka/StorageKafkaUtils.cpp create mode 100644 src/Storages/Kafka/StorageKafkaUtils.h diff --git a/src/Storages/Kafka/KafkaConfigLoader.cpp b/src/Storages/Kafka/KafkaConfigLoader.cpp new file mode 100644 index 00000000000..3d31a987395 --- /dev/null +++ b/src/Storages/Kafka/KafkaConfigLoader.cpp @@ -0,0 +1,475 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace CurrentMetrics +{ +extern const Metric KafkaLibrdkafkaThreads; +} + +namespace DB +{ + +template +struct KafkaInterceptors +{ + static rd_kafka_resp_err_t rdKafkaOnThreadStart(rd_kafka_t *, rd_kafka_thread_type_t thread_type, const char *, void * ctx); + + static rd_kafka_resp_err_t rdKafkaOnThreadExit(rd_kafka_t *, rd_kafka_thread_type_t, const char *, void * ctx); + + static rd_kafka_resp_err_t + rdKafkaOnNew(rd_kafka_t * rk, const rd_kafka_conf_t *, void * ctx, char * /*errstr*/, size_t /*errstr_size*/); + + static rd_kafka_resp_err_t rdKafkaOnConfDup( + rd_kafka_conf_t * new_conf, const rd_kafka_conf_t * /*old_conf*/, size_t /*filter_cnt*/, const char ** /*filter*/, void * ctx); +}; + +template +rd_kafka_resp_err_t +KafkaInterceptors::rdKafkaOnThreadStart(rd_kafka_t *, rd_kafka_thread_type_t thread_type, const char *, void * ctx) +{ + TStorageKafka * self = reinterpret_cast(ctx); + CurrentMetrics::add(CurrentMetrics::KafkaLibrdkafkaThreads, 1); + + const auto & storage_id = self->getStorageID(); + const auto & table = storage_id.getTableName(); + + switch (thread_type) + { + case RD_KAFKA_THREAD_MAIN: + setThreadName(("rdk:m/" + table.substr(0, 9)).c_str()); + break; + case RD_KAFKA_THREAD_BACKGROUND: + setThreadName(("rdk:bg/" + table.substr(0, 8)).c_str()); + break; + case RD_KAFKA_THREAD_BROKER: + setThreadName(("rdk:b/" + table.substr(0, 9)).c_str()); + break; + } + + /// Create ThreadStatus to track memory allocations from librdkafka threads. + // + /// And store them in a separate list (thread_statuses) to make sure that they will be destroyed, + /// regardless how librdkafka calls the hooks. + /// But this can trigger use-after-free if librdkafka will not destroy threads after rd_kafka_wait_destroyed() + auto thread_status = std::make_shared(); + std::lock_guard lock(self->thread_statuses_mutex); + self->thread_statuses.emplace_back(std::move(thread_status)); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +template +rd_kafka_resp_err_t KafkaInterceptors::rdKafkaOnThreadExit(rd_kafka_t *, rd_kafka_thread_type_t, const char *, void * ctx) +{ + TStorageKafka * self = reinterpret_cast(ctx); + CurrentMetrics::sub(CurrentMetrics::KafkaLibrdkafkaThreads, 1); + + std::lock_guard lock(self->thread_statuses_mutex); + const auto it = std::find_if( + self->thread_statuses.begin(), + self->thread_statuses.end(), + [](const auto & thread_status_ptr) { return thread_status_ptr.get() == current_thread; }); + if (it == self->thread_statuses.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No thread status for this librdkafka thread."); + + self->thread_statuses.erase(it); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +template +rd_kafka_resp_err_t KafkaInterceptors::rdKafkaOnNew( + rd_kafka_t * rk, const rd_kafka_conf_t *, void * ctx, char * /*errstr*/, size_t /*errstr_size*/) +{ + TStorageKafka * self = reinterpret_cast(ctx); + rd_kafka_resp_err_t status; + + status = rd_kafka_interceptor_add_on_thread_start(rk, "init-thread", rdKafkaOnThreadStart, ctx); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + { + LOG_ERROR(self->log, "Cannot set on thread start interceptor due to {} error", status); + return status; + } + + status = rd_kafka_interceptor_add_on_thread_exit(rk, "exit-thread", rdKafkaOnThreadExit, ctx); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(self->log, "Cannot set on thread exit interceptor due to {} error", status); + + return status; +} + +template +rd_kafka_resp_err_t KafkaInterceptors::rdKafkaOnConfDup( + rd_kafka_conf_t * new_conf, const rd_kafka_conf_t * /*old_conf*/, size_t /*filter_cnt*/, const char ** /*filter*/, void * ctx) +{ + TStorageKafka * self = reinterpret_cast(ctx); + rd_kafka_resp_err_t status; + + // cppkafka copies configuration multiple times + status = rd_kafka_conf_interceptor_add_on_conf_dup(new_conf, "init", rdKafkaOnConfDup, ctx); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + { + LOG_ERROR(self->log, "Cannot set on conf dup interceptor due to {} error", status); + return status; + } + + status = rd_kafka_conf_interceptor_add_on_new(new_conf, "init", rdKafkaOnNew, ctx); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(self->log, "Cannot set on conf new interceptor due to {} error", status); + + return status; +} + +template struct KafkaInterceptors; +template struct KafkaInterceptors; + +namespace +{ + +void setKafkaConfigValue(cppkafka::Configuration & kafka_config, const String & key, const String & value) +{ + /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. + /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md + const String setting_name_in_kafka_config = (key == "log_level") ? key : boost::replace_all_copy(key, "_", "."); + kafka_config.set(setting_name_in_kafka_config, value); +} + +void loadConfigProperty( + cppkafka::Configuration & kafka_config, + const Poco::Util::AbstractConfiguration & config, + const String & config_prefix, + const String & tag) +{ + const String property_path = config_prefix + "." + tag; + const String property_value = config.getString(property_path); + + setKafkaConfigValue(kafka_config, tag, property_value); +} + +void loadNamedCollectionConfig(cppkafka::Configuration & kafka_config, const String & collection_name, const String & config_prefix) +{ + const auto & collection = NamedCollectionFactory::instance().get(collection_name); + for (const auto & key : collection->getKeys(-1, config_prefix)) + { + // Cut prefix with '.' before actual config tag. + const auto param_name = key.substr(config_prefix.size() + 1); + setKafkaConfigValue(kafka_config, param_name, collection->get(key)); + } +} + +void loadLegacyTopicConfig( + cppkafka::Configuration & kafka_config, + const Poco::Util::AbstractConfiguration & config, + const String & collection_name, + const String & config_prefix) +{ + if (!collection_name.empty()) + { + loadNamedCollectionConfig(kafka_config, collection_name, config_prefix); + return; + } + + Poco::Util::AbstractConfiguration::Keys tags; + config.keys(config_prefix, tags); + + for (const auto & tag : tags) + { + loadConfigProperty(kafka_config, config, config_prefix, tag); + } +} + +/// Read server configuration into cppkafa configuration, used by new per-topic configuration +void loadTopicConfig( + cppkafka::Configuration & kafka_config, + const Poco::Util::AbstractConfiguration & config, + const String & collection_name, + const String & config_prefix, + const String & topic) +{ + if (!collection_name.empty()) + { + const auto topic_prefix = fmt::format("{}.{}", config_prefix, KafkaConfigLoader::CONFIG_KAFKA_TOPIC_TAG); + const auto & collection = NamedCollectionFactory::instance().get(collection_name); + for (const auto & key : collection->getKeys(1, config_prefix)) + { + /// Only consider key . Multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. + if (!key.starts_with(topic_prefix)) + continue; + + const String kafka_topic_path = config_prefix + "." + key; + const String kafka_topic_name_path = kafka_topic_path + "." + KafkaConfigLoader::CONFIG_NAME_TAG; + if (topic == collection->get(kafka_topic_name_path)) + /// Found it! Now read the per-topic configuration into cppkafka. + loadNamedCollectionConfig(kafka_config, collection_name, kafka_topic_path); + } + } + else + { + /// Read all tags one level below + Poco::Util::AbstractConfiguration::Keys tags; + config.keys(config_prefix, tags); + + for (const auto & tag : tags) + { + if (tag == KafkaConfigLoader::CONFIG_NAME_TAG) + continue; // ignore , it is used to match topic configurations + loadConfigProperty(kafka_config, config, config_prefix, tag); + } + } +} + +/// Read server configuration into cppkafka configuration, used by global configuration and by legacy per-topic configuration +void loadFromConfig( + cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params, const String & config_prefix) +{ + if (!params.collection_name.empty()) + { + loadNamedCollectionConfig(kafka_config, params.collection_name, config_prefix); + return; + } + + /// Read all tags one level below + Poco::Util::AbstractConfiguration::Keys tags; + params.config.keys(config_prefix, tags); + + for (const auto & tag : tags) + { + if (tag == KafkaConfigLoader::CONFIG_KAFKA_PRODUCER_TAG || tag == KafkaConfigLoader::CONFIG_KAFKA_CONSUMER_TAG) + /// Do not load consumer/producer properties, since they should be separated by different configuration objects. + continue; + + if (tag.starts_with( + KafkaConfigLoader::CONFIG_KAFKA_TOPIC_TAG)) /// multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. + { + // Update consumer topic-specific configuration (new syntax). Example with topics "football" and "baseball": + // + // + // football + // 250 + // 5000 + // + // + // baseball + // 300 + // 2000 + // + // + // Advantages: The period restriction no longer applies (e.g. sports.football will work), everything + // Kafka-related is below . + for (const auto & topic : params.topics) + { + /// Read topic name between ... + const String kafka_topic_path = config_prefix + "." + tag; + const String kafka_topic_name_path = kafka_topic_path + "." + KafkaConfigLoader::CONFIG_NAME_TAG; + const String topic_name = params.config.getString(kafka_topic_name_path); + + if (topic_name != topic) + continue; + loadTopicConfig(kafka_config, params.config, params.collection_name, kafka_topic_path, topic); + } + continue; + } + if (tag.starts_with(KafkaConfigLoader::CONFIG_KAFKA_TAG)) + /// skip legacy configuration per topic e.g. . + /// it will be processed is a separate function + continue; + // Update configuration from the configuration. Example: + // + // 250 + // 100000 + // + loadConfigProperty(kafka_config, params.config, config_prefix, tag); + } +} + +void loadLegacyConfigSyntax( + cppkafka::Configuration & kafka_config, + const Poco::Util::AbstractConfiguration & config, + const String & collection_name, + const Names & topics) +{ + for (const auto & topic : topics) + { + const String kafka_topic_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_TAG + "_" + topic; + loadLegacyTopicConfig(kafka_config, config, collection_name, kafka_topic_path); + } +} + +void loadConsumerConfig(cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params) +{ + const String consumer_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_CONSUMER_TAG; + loadLegacyConfigSyntax(kafka_config, params.config, params.collection_name, params.topics); + // A new syntax has higher priority + loadFromConfig(kafka_config, params, consumer_path); +} + +void loadProducerConfig(cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params) +{ + const String producer_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_PRODUCER_TAG; + loadLegacyConfigSyntax(kafka_config, params.config, params.collection_name, params.topics); + // A new syntax has higher priority + loadFromConfig(kafka_config, params, producer_path); +} + +template +void updateGlobalConfiguration( + cppkafka::Configuration & kafka_config, TKafkaStorage & storage, const KafkaConfigLoader::LoadConfigParams & params) +{ + loadFromConfig(kafka_config, params, KafkaConfigLoader::CONFIG_KAFKA_TAG); + +#if USE_KRB5 + if (kafka_config.has_property("sasl.kerberos.kinit.cmd")) + LOG_WARNING(params.log, "sasl.kerberos.kinit.cmd configuration parameter is ignored."); + + kafka_config.set("sasl.kerberos.kinit.cmd", ""); + kafka_config.set("sasl.kerberos.min.time.before.relogin", "0"); + + if (kafka_config.has_property("sasl.kerberos.keytab") && kafka_config.has_property("sasl.kerberos.principal")) + { + String keytab = kafka_config.get("sasl.kerberos.keytab"); + String principal = kafka_config.get("sasl.kerberos.principal"); + LOG_DEBUG(params.log, "Running KerberosInit"); + try + { + kerberosInit(keytab, principal); + } + catch (const Exception & e) + { + LOG_ERROR(params.log, "KerberosInit failure: {}", getExceptionMessage(e, false)); + } + LOG_DEBUG(params.log, "Finished KerberosInit"); + } +#else // USE_KRB5 + if (kafka_config.has_property("sasl.kerberos.keytab") || kafka_config.has_property("sasl.kerberos.principal")) + LOG_WARNING(log, "Ignoring Kerberos-related parameters because ClickHouse was built without krb5 library support."); +#endif // USE_KRB5 + // No need to add any prefix, messages can be distinguished + kafka_config.set_log_callback( + [log = params.log](cppkafka::KafkaHandleBase & handle, int level, const std::string & facility, const std::string & message) + { + auto [poco_level, client_logs_level] = parseSyslogLevel(level); + const auto & kafka_object_config = handle.get_configuration(); + const std::string client_id_key{"client.id"}; + chassert(kafka_object_config.has_property(client_id_key) && "Kafka configuration doesn't have expected client.id set"); + LOG_IMPL( + log, + client_logs_level, + poco_level, + "[client.id:{}] [rdk:{}] {}", + kafka_object_config.get(client_id_key), + facility, + message); + }); + + /// NOTE: statistics should be consumed, otherwise it creates too much + /// entries in the queue, that leads to memory leak and slow shutdown. + if (!kafka_config.has_property("statistics.interval.ms")) + { + // every 3 seconds by default. set to 0 to disable. + kafka_config.set("statistics.interval.ms", "3000"); + } + // Configure interceptor to change thread name + // + // TODO: add interceptors support into the cppkafka. + // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibility overrides it to noop. + { + // This should be safe, since we wait the rdkafka object anyway. + void * self = static_cast(&storage); + + int status; + + status + = rd_kafka_conf_interceptor_add_on_new(kafka_config.get_handle(), "init", KafkaInterceptors::rdKafkaOnNew, self); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(params.log, "Cannot set new interceptor due to {} error", status); + + // cppkafka always copy the configuration + status = rd_kafka_conf_interceptor_add_on_conf_dup( + kafka_config.get_handle(), "init", KafkaInterceptors::rdKafkaOnConfDup, self); + if (status != RD_KAFKA_RESP_ERR_NO_ERROR) + LOG_ERROR(params.log, "Cannot set dup conf interceptor due to {} error", status); + } +} + +} + +template +cppkafka::Configuration KafkaConfigLoader::getConsumerConfiguration(TKafkaStorage & storage, const ConsumerConfigParams & params) +{ + cppkafka::Configuration conf; + + conf.set("metadata.broker.list", params.brokers); + conf.set("group.id", params.group); + if (params.multiple_consumers) + conf.set("client.id", fmt::format("{}-{}", params.client_id, params.consumer_number)); + else + conf.set("client.id", params.client_id); + conf.set("client.software.name", VERSION_NAME); + conf.set("client.software.version", VERSION_DESCRIBE); + conf.set("auto.offset.reset", "earliest"); // If no offset stored for this group, read all messages from the start + + // that allows to prevent fast draining of the librdkafka queue + // during building of single insert block. Improves performance + // significantly, but may lead to bigger memory consumption. + size_t default_queued_min_messages = 100000; // must be greater than or equal to default + size_t max_allowed_queued_min_messages = 10000000; // must be less than or equal to max allowed value + conf.set( + "queued.min.messages", std::min(std::max(params.max_block_size, default_queued_min_messages), max_allowed_queued_min_messages)); + + updateGlobalConfiguration(conf, storage, params); + loadConsumerConfig(conf, params); + + // those settings should not be changed by users. + conf.set("enable.auto.commit", "false"); // We manually commit offsets after a stream successfully finished + conf.set("enable.auto.offset.store", "false"); // Update offset automatically - to commit them all at once. + conf.set("enable.partition.eof", "false"); // Ignore EOF messages + + for (auto & property : conf.get_all()) + { + LOG_TRACE(params.log, "Consumer set property {}:{}", property.first, property.second); + } + + return conf; +} + +template cppkafka::Configuration +KafkaConfigLoader::getConsumerConfiguration(StorageKafka & storage, const ConsumerConfigParams & params); +template cppkafka::Configuration +KafkaConfigLoader::getConsumerConfiguration(StorageKafka2 & storage, const ConsumerConfigParams & params); + +template +cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(TKafkaStorage & storage, const ProducerConfigParams & params) +{ + cppkafka::Configuration conf; + conf.set("metadata.broker.list", params.brokers); + conf.set("client.id", params.client_id); + conf.set("client.software.name", VERSION_NAME); + conf.set("client.software.version", VERSION_DESCRIBE); + + updateGlobalConfiguration(conf, storage, params); + loadProducerConfig(conf, params); + + for (auto & property : conf.get_all()) + { + LOG_TRACE(params.log, "Producer set property {}:{}", property.first, property.second); + } + + return conf; +} + +template cppkafka::Configuration +KafkaConfigLoader::getProducerConfiguration(StorageKafka & storage, const ProducerConfigParams & params); +template cppkafka::Configuration +KafkaConfigLoader::getProducerConfiguration(StorageKafka2 & storage, const ProducerConfigParams & params); + +} diff --git a/src/Storages/Kafka/KafkaConfigLoader.h b/src/Storages/Kafka/KafkaConfigLoader.h new file mode 100644 index 00000000000..f18683c17f0 --- /dev/null +++ b/src/Storages/Kafka/KafkaConfigLoader.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ +struct KafkaSettings; +class VirtualColumnsDescription; + +struct KafkaConfigLoader +{ + static inline const String CONFIG_KAFKA_TAG = "kafka"; + static inline const String CONFIG_KAFKA_TOPIC_TAG = "kafka_topic"; + static inline const String CONFIG_NAME_TAG = "name"; + static inline const String CONFIG_KAFKA_CONSUMER_TAG = "consumer"; + static inline const String CONFIG_KAFKA_PRODUCER_TAG = "producer"; + using LogCallback = cppkafka::Configuration::LogCallback; + + + struct LoadConfigParams + { + const Poco::Util::AbstractConfiguration & config; + String & collection_name; + const Names & topics; + LoggerPtr & log; + }; + + struct ConsumerConfigParams : public LoadConfigParams + { + String brokers; + String group; + bool multiple_consumers; + size_t consumer_number; + String client_id; + size_t max_block_size; + }; + + struct ProducerConfigParams : public LoadConfigParams + { + String brokers; + String client_id; + }; + + template + static cppkafka::Configuration getConsumerConfiguration(TKafkaStorage & storage, const ConsumerConfigParams & params); + + template + static cppkafka::Configuration getProducerConfiguration(TKafkaStorage & storage, const ProducerConfigParams & params); +}; +} diff --git a/src/Storages/Kafka/KafkaConsumer.cpp b/src/Storages/Kafka/KafkaConsumer.cpp index 1affbbaf8fd..d9256cf39ce 100644 --- a/src/Storages/Kafka/KafkaConsumer.cpp +++ b/src/Storages/Kafka/KafkaConsumer.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index a94afd2c8da..8581398aa90 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 0902ef838b4..f4f641d1c68 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -14,13 +14,14 @@ #include #include #include +#include #include #include #include #include #include #include -#include +#include #include #include #include diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index d02d86b468c..966d818d675 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -24,7 +24,7 @@ class StorageSystemKafkaConsumers; class ThreadStatus; template -struct StorageKafkaInterceptors; +struct KafkaInterceptors; using KafkaConsumerPtr = std::shared_ptr; using ConsumerPtr = std::shared_ptr; @@ -34,8 +34,8 @@ using ConsumerPtr = std::shared_ptr; */ class StorageKafka final : public IStorage, WithContext { - using StorageKafkaInterceptors = StorageKafkaInterceptors; - friend StorageKafkaInterceptors; + using KafkaInterceptors = KafkaInterceptors; + friend KafkaInterceptors; public: StorageKafka( diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index e70be7d7a2a..f58d629dd9b 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -18,10 +18,11 @@ #include #include #include +#include #include #include #include -#include +#include #include #include #include diff --git a/src/Storages/Kafka/StorageKafka2.h b/src/Storages/Kafka/StorageKafka2.h index e2565100879..f85fedb316a 100644 --- a/src/Storages/Kafka/StorageKafka2.h +++ b/src/Storages/Kafka/StorageKafka2.h @@ -30,7 +30,7 @@ namespace DB { template -struct StorageKafkaInterceptors; +struct KafkaInterceptors; using KafkaConsumer2Ptr = std::shared_ptr; @@ -51,8 +51,8 @@ using KafkaConsumer2Ptr = std::shared_ptr; /// hashes for deduplication. class StorageKafka2 final : public IStorage, WithContext { - using StorageKafkaInterceptors = StorageKafkaInterceptors; - friend StorageKafkaInterceptors; + using KafkaInterceptors = KafkaInterceptors; + friend KafkaInterceptors; public: StorageKafka2( diff --git a/src/Storages/Kafka/StorageKafkaCommon.cpp b/src/Storages/Kafka/StorageKafkaCommon.cpp deleted file mode 100644 index 801c3a18a39..00000000000 --- a/src/Storages/Kafka/StorageKafkaCommon.cpp +++ /dev/null @@ -1,875 +0,0 @@ -#include - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#if USE_KRB5 -# include -#endif // USE_KRB5 - -namespace CurrentMetrics -{ -extern const Metric KafkaLibrdkafkaThreads; -} - -namespace ProfileEvents -{ -extern const Event KafkaConsumerErrors; -} - -namespace DB -{ - -using namespace std::chrono_literals; - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; - extern const int BAD_ARGUMENTS; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int SUPPORT_IS_DISABLED; -} - -template -rd_kafka_resp_err_t -StorageKafkaInterceptors::rdKafkaOnThreadStart(rd_kafka_t *, rd_kafka_thread_type_t thread_type, const char *, void * ctx) -{ - TStorageKafka * self = reinterpret_cast(ctx); - CurrentMetrics::add(CurrentMetrics::KafkaLibrdkafkaThreads, 1); - - const auto & storage_id = self->getStorageID(); - const auto & table = storage_id.getTableName(); - - switch (thread_type) - { - case RD_KAFKA_THREAD_MAIN: - setThreadName(("rdk:m/" + table.substr(0, 9)).c_str()); - break; - case RD_KAFKA_THREAD_BACKGROUND: - setThreadName(("rdk:bg/" + table.substr(0, 8)).c_str()); - break; - case RD_KAFKA_THREAD_BROKER: - setThreadName(("rdk:b/" + table.substr(0, 9)).c_str()); - break; - } - - /// Create ThreadStatus to track memory allocations from librdkafka threads. - // - /// And store them in a separate list (thread_statuses) to make sure that they will be destroyed, - /// regardless how librdkafka calls the hooks. - /// But this can trigger use-after-free if librdkafka will not destroy threads after rd_kafka_wait_destroyed() - auto thread_status = std::make_shared(); - std::lock_guard lock(self->thread_statuses_mutex); - self->thread_statuses.emplace_back(std::move(thread_status)); - - return RD_KAFKA_RESP_ERR_NO_ERROR; -} - -template -rd_kafka_resp_err_t -StorageKafkaInterceptors::rdKafkaOnThreadExit(rd_kafka_t *, rd_kafka_thread_type_t, const char *, void * ctx) -{ - TStorageKafka * self = reinterpret_cast(ctx); - CurrentMetrics::sub(CurrentMetrics::KafkaLibrdkafkaThreads, 1); - - std::lock_guard lock(self->thread_statuses_mutex); - const auto it = std::find_if(self->thread_statuses.begin(), self->thread_statuses.end(), [](const auto & thread_status_ptr) - { - return thread_status_ptr.get() == current_thread; - }); - if (it == self->thread_statuses.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "No thread status for this librdkafka thread."); - - self->thread_statuses.erase(it); - - return RD_KAFKA_RESP_ERR_NO_ERROR; -} - -template -rd_kafka_resp_err_t StorageKafkaInterceptors::rdKafkaOnNew( - rd_kafka_t * rk, const rd_kafka_conf_t *, void * ctx, char * /*errstr*/, size_t /*errstr_size*/) -{ - TStorageKafka * self = reinterpret_cast(ctx); - rd_kafka_resp_err_t status; - - status = rd_kafka_interceptor_add_on_thread_start(rk, "init-thread", rdKafkaOnThreadStart, ctx); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - { - LOG_ERROR(self->log, "Cannot set on thread start interceptor due to {} error", status); - return status; - } - - status = rd_kafka_interceptor_add_on_thread_exit(rk, "exit-thread", rdKafkaOnThreadExit, ctx); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(self->log, "Cannot set on thread exit interceptor due to {} error", status); - - return status; -} - -template -rd_kafka_resp_err_t StorageKafkaInterceptors::rdKafkaOnConfDup( - rd_kafka_conf_t * new_conf, const rd_kafka_conf_t * /*old_conf*/, size_t /*filter_cnt*/, const char ** /*filter*/, void * ctx) -{ - TStorageKafka * self = reinterpret_cast(ctx); - rd_kafka_resp_err_t status; - - // cppkafka copies configuration multiple times - status = rd_kafka_conf_interceptor_add_on_conf_dup(new_conf, "init", rdKafkaOnConfDup, ctx); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - { - LOG_ERROR(self->log, "Cannot set on conf dup interceptor due to {} error", status); - return status; - } - - status = rd_kafka_conf_interceptor_add_on_new(new_conf, "init", rdKafkaOnNew, ctx); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(self->log, "Cannot set on conf new interceptor due to {} error", status); - - return status; -} - -void setKafkaConfigValue(cppkafka::Configuration & kafka_config, const String & key, const String & value) -{ - /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. - /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md - const String setting_name_in_kafka_config = (key == "log_level") ? key : boost::replace_all_copy(key, "_", "."); - kafka_config.set(setting_name_in_kafka_config, value); -} - -void loadConfigProperty(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const String & tag) -{ - const String property_path = config_prefix + "." + tag; - const String property_value = config.getString(property_path); - - setKafkaConfigValue(kafka_config, tag, property_value); -} - -void loadNamedCollectionConfig(cppkafka::Configuration & kafka_config, const String & collection_name, const String & config_prefix) -{ - const auto & collection = NamedCollectionFactory::instance().get(collection_name); - for (const auto & key : collection->getKeys(-1, config_prefix)) - { - // Cut prefix with '.' before actual config tag. - const auto param_name = key.substr(config_prefix.size() + 1); - setKafkaConfigValue(kafka_config, param_name, collection->get(key)); - } -} - -void loadLegacyTopicConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & collection_name, const String & config_prefix) -{ - if (!collection_name.empty()) - { - loadNamedCollectionConfig(kafka_config, collection_name, config_prefix); - return; - } - - Poco::Util::AbstractConfiguration::Keys tags; - config.keys(config_prefix, tags); - - for (const auto & tag : tags) - { - loadConfigProperty(kafka_config, config, config_prefix, tag); - } -} - -/// Read server configuration into cppkafa configuration, used by new per-topic configuration -void loadTopicConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & collection_name, const String & config_prefix, const String & topic) -{ - if (!collection_name.empty()) - { - const auto topic_prefix = fmt::format("{}.{}", config_prefix, KafkaConfigLoader::CONFIG_KAFKA_TOPIC_TAG); - const auto & collection = NamedCollectionFactory::instance().get(collection_name); - for (const auto & key : collection->getKeys(1, config_prefix)) - { - /// Only consider key . Multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. - if (!key.starts_with(topic_prefix)) - continue; - - const String kafka_topic_path = config_prefix + "." + key; - const String kafka_topic_name_path = kafka_topic_path + "." + KafkaConfigLoader::CONFIG_NAME_TAG; - if (topic == collection->get(kafka_topic_name_path)) - /// Found it! Now read the per-topic configuration into cppkafka. - loadNamedCollectionConfig(kafka_config, collection_name, kafka_topic_path); - } - } - else - { - /// Read all tags one level below - Poco::Util::AbstractConfiguration::Keys tags; - config.keys(config_prefix, tags); - - for (const auto & tag : tags) - { - if (tag == KafkaConfigLoader::CONFIG_NAME_TAG) - continue; // ignore , it is used to match topic configurations - loadConfigProperty(kafka_config, config, config_prefix, tag); - } - } -} - -/// Read server configuration into cppkafka configuration, used by global configuration and by legacy per-topic configuration -static void -loadFromConfig(cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params, const String & config_prefix) -{ - if (!params.collection_name.empty()) - { - loadNamedCollectionConfig(kafka_config, params.collection_name, config_prefix); - return; - } - - /// Read all tags one level below - Poco::Util::AbstractConfiguration::Keys tags; - params.config.keys(config_prefix, tags); - - for (const auto & tag : tags) - { - if (tag == KafkaConfigLoader::CONFIG_KAFKA_PRODUCER_TAG || tag == KafkaConfigLoader::CONFIG_KAFKA_CONSUMER_TAG) - /// Do not load consumer/producer properties, since they should be separated by different configuration objects. - continue; - - if (tag.starts_with(KafkaConfigLoader::CONFIG_KAFKA_TOPIC_TAG)) /// multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. - { - // Update consumer topic-specific configuration (new syntax). Example with topics "football" and "baseball": - // - // - // football - // 250 - // 5000 - // - // - // baseball - // 300 - // 2000 - // - // - // Advantages: The period restriction no longer applies (e.g. sports.football will work), everything - // Kafka-related is below . - for (const auto & topic : params.topics) - { - /// Read topic name between ... - const String kafka_topic_path = config_prefix + "." + tag; - const String kafka_topic_name_path = kafka_topic_path + "." + KafkaConfigLoader::CONFIG_NAME_TAG; - const String topic_name = params.config.getString(kafka_topic_name_path); - - if (topic_name != topic) - continue; - loadTopicConfig(kafka_config, params.config, params.collection_name, kafka_topic_path, topic); - } - continue; - } - if (tag.starts_with(KafkaConfigLoader::CONFIG_KAFKA_TAG)) - /// skip legacy configuration per topic e.g. . - /// it will be processed is a separate function - continue; - // Update configuration from the configuration. Example: - // - // 250 - // 100000 - // - loadConfigProperty(kafka_config, params.config, config_prefix, tag); - } -} - -void loadLegacyConfigSyntax( - cppkafka::Configuration & kafka_config, - const Poco::Util::AbstractConfiguration & config, - const String & collection_name, - const Names & topics) -{ - for (const auto & topic : topics) - { - const String kafka_topic_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_TAG + "_" + topic; - loadLegacyTopicConfig(kafka_config, config, collection_name, kafka_topic_path); - } -} - -static void loadConsumerConfig(cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params) -{ - const String consumer_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_CONSUMER_TAG; - loadLegacyConfigSyntax(kafka_config, params.config, params.collection_name, params.topics); - // A new syntax has higher priority - loadFromConfig(kafka_config, params, consumer_path); -} - -static void loadProducerConfig(cppkafka::Configuration & kafka_config, const KafkaConfigLoader::LoadConfigParams & params) -{ - const String producer_path = KafkaConfigLoader::CONFIG_KAFKA_TAG + "." + KafkaConfigLoader::CONFIG_KAFKA_PRODUCER_TAG; - loadLegacyConfigSyntax(kafka_config, params.config, params.collection_name, params.topics); - // A new syntax has higher priority - loadFromConfig(kafka_config, params, producer_path); -} - -template -static void updateGlobalConfiguration( - cppkafka::Configuration & kafka_config, TKafkaStorage & storage, const KafkaConfigLoader::LoadConfigParams & params) -{ - loadFromConfig(kafka_config, params, KafkaConfigLoader::CONFIG_KAFKA_TAG); - -#if USE_KRB5 - if (kafka_config.has_property("sasl.kerberos.kinit.cmd")) - LOG_WARNING(params.log, "sasl.kerberos.kinit.cmd configuration parameter is ignored."); - - kafka_config.set("sasl.kerberos.kinit.cmd", ""); - kafka_config.set("sasl.kerberos.min.time.before.relogin", "0"); - - if (kafka_config.has_property("sasl.kerberos.keytab") && kafka_config.has_property("sasl.kerberos.principal")) - { - String keytab = kafka_config.get("sasl.kerberos.keytab"); - String principal = kafka_config.get("sasl.kerberos.principal"); - LOG_DEBUG(params.log, "Running KerberosInit"); - try - { - kerberosInit(keytab, principal); - } - catch (const Exception & e) - { - LOG_ERROR(params.log, "KerberosInit failure: {}", getExceptionMessage(e, false)); - } - LOG_DEBUG(params.log, "Finished KerberosInit"); - } -#else // USE_KRB5 - if (kafka_config.has_property("sasl.kerberos.keytab") || kafka_config.has_property("sasl.kerberos.principal")) - LOG_WARNING(log, "Ignoring Kerberos-related parameters because ClickHouse was built without krb5 library support."); -#endif // USE_KRB5 - // No need to add any prefix, messages can be distinguished - kafka_config.set_log_callback( - [log = params.log](cppkafka::KafkaHandleBase & handle, int level, const std::string & facility, const std::string & message) - { - auto [poco_level, client_logs_level] = parseSyslogLevel(level); - const auto & kafka_object_config = handle.get_configuration(); - const std::string client_id_key{"client.id"}; - chassert(kafka_object_config.has_property(client_id_key) && "Kafka configuration doesn't have expected client.id set"); - LOG_IMPL( - log, - client_logs_level, - poco_level, - "[client.id:{}] [rdk:{}] {}", - kafka_object_config.get(client_id_key), - facility, - message); - }); - - /// NOTE: statistics should be consumed, otherwise it creates too much - /// entries in the queue, that leads to memory leak and slow shutdown. - if (!kafka_config.has_property("statistics.interval.ms")) - { - // every 3 seconds by default. set to 0 to disable. - kafka_config.set("statistics.interval.ms", "3000"); - } - // Configure interceptor to change thread name - // - // TODO: add interceptors support into the cppkafka. - // XXX: rdkafka uses pthread_set_name_np(), but glibc-compatibility overrides it to noop. - { - // This should be safe, since we wait the rdkafka object anyway. - void * self = static_cast(&storage); - - int status; - - status = rd_kafka_conf_interceptor_add_on_new( - kafka_config.get_handle(), "init", StorageKafkaInterceptors::rdKafkaOnNew, self); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(params.log, "Cannot set new interceptor due to {} error", status); - - // cppkafka always copy the configuration - status = rd_kafka_conf_interceptor_add_on_conf_dup( - kafka_config.get_handle(), "init", StorageKafkaInterceptors::rdKafkaOnConfDup, self); - if (status != RD_KAFKA_RESP_ERR_NO_ERROR) - LOG_ERROR(params.log, "Cannot set dup conf interceptor due to {} error", status); - } -} - -template -cppkafka::Configuration KafkaConfigLoader::getConsumerConfiguration(TKafkaStorage & storage, const ConsumerConfigParams & params) -{ - cppkafka::Configuration conf; - - conf.set("metadata.broker.list", params.brokers); - conf.set("group.id", params.group); - if (params.multiple_consumers) - conf.set("client.id", fmt::format("{}-{}", params.client_id, params.consumer_number)); - else - conf.set("client.id", params.client_id); - conf.set("client.software.name", VERSION_NAME); - conf.set("client.software.version", VERSION_DESCRIBE); - conf.set("auto.offset.reset", "earliest"); // If no offset stored for this group, read all messages from the start - - // that allows to prevent fast draining of the librdkafka queue - // during building of single insert block. Improves performance - // significantly, but may lead to bigger memory consumption. - size_t default_queued_min_messages = 100000; // must be greater than or equal to default - size_t max_allowed_queued_min_messages = 10000000; // must be less than or equal to max allowed value - conf.set( - "queued.min.messages", std::min(std::max(params.max_block_size, default_queued_min_messages), max_allowed_queued_min_messages)); - - updateGlobalConfiguration(conf, storage, params); - loadConsumerConfig(conf, params); - - // those settings should not be changed by users. - conf.set("enable.auto.commit", "false"); // We manually commit offsets after a stream successfully finished - conf.set("enable.auto.offset.store", "false"); // Update offset automatically - to commit them all at once. - conf.set("enable.partition.eof", "false"); // Ignore EOF messages - - for (auto & property : conf.get_all()) - { - LOG_TRACE(params.log, "Consumer set property {}:{}", property.first, property.second); - } - - return conf; -} - -template cppkafka::Configuration KafkaConfigLoader::getConsumerConfiguration(StorageKafka & storage, const ConsumerConfigParams & params); -template cppkafka::Configuration KafkaConfigLoader::getConsumerConfiguration(StorageKafka2 & storage, const ConsumerConfigParams & params); - -template -cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(TKafkaStorage & storage, const ProducerConfigParams & params) -{ - cppkafka::Configuration conf; - conf.set("metadata.broker.list", params.brokers); - conf.set("client.id", params.client_id); - conf.set("client.software.name", VERSION_NAME); - conf.set("client.software.version", VERSION_DESCRIBE); - - updateGlobalConfiguration(conf, storage, params); - loadProducerConfig(conf, params); - - for (auto & property : conf.get_all()) - { - LOG_TRACE(params.log, "Producer set property {}:{}", property.first, property.second); - } - - return conf; -} - -template cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(StorageKafka & storage, const ProducerConfigParams & params); -template cppkafka::Configuration KafkaConfigLoader::getProducerConfiguration(StorageKafka2 & storage, const ProducerConfigParams & params); - -void registerStorageKafka(StorageFactory & factory) -{ - auto creator_fn = [](const StorageFactory::Arguments & args) -> std::shared_ptr - { - ASTs & engine_args = args.engine_args; - size_t args_count = engine_args.size(); - const bool has_settings = args.storage_def->settings; - - auto kafka_settings = std::make_unique(); - String collection_name; - if (auto named_collection = tryGetNamedCollectionWithOverrides(args.engine_args, args.getLocalContext())) - { - for (const auto & setting : kafka_settings->all()) - { - const auto & setting_name = setting.getName(); - if (named_collection->has(setting_name)) - kafka_settings->set(setting_name, named_collection->get(setting_name)); - } - collection_name = assert_cast(args.engine_args[0].get())->name(); - } - - if (has_settings) - { - kafka_settings->loadFromQuery(*args.storage_def); - } - -// Check arguments and settings -#define CHECK_KAFKA_STORAGE_ARGUMENT(ARG_NUM, PAR_NAME, EVAL) \ - /* One of the four required arguments is not specified */ \ - if (args_count < (ARG_NUM) && (ARG_NUM) <= 4 && !kafka_settings->PAR_NAME.changed) \ - { \ - throw Exception( \ - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, \ - "Required parameter '{}' " \ - "for storage Kafka not specified", \ - #PAR_NAME); \ - } \ - if (args_count >= (ARG_NUM)) \ - { \ - /* The same argument is given in two places */ \ - if (has_settings && kafka_settings->PAR_NAME.changed) \ - { \ - throw Exception( \ - ErrorCodes::BAD_ARGUMENTS, \ - "The argument №{} of storage Kafka " \ - "and the parameter '{}' " \ - "in SETTINGS cannot be specified at the same time", \ - #ARG_NUM, \ - #PAR_NAME); \ - } \ - /* move engine args to settings */ \ - else \ - { \ - if constexpr ((EVAL) == 1) \ - { \ - engine_args[(ARG_NUM)-1] = evaluateConstantExpressionAsLiteral(engine_args[(ARG_NUM)-1], args.getLocalContext()); \ - } \ - if constexpr ((EVAL) == 2) \ - { \ - engine_args[(ARG_NUM)-1] \ - = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[(ARG_NUM)-1], args.getLocalContext()); \ - } \ - kafka_settings->PAR_NAME = engine_args[(ARG_NUM)-1]->as().value; \ - } \ - } - - /** Arguments of engine is following: - * - Kafka broker list - * - List of topics - * - Group ID (may be a constant expression with a string result) - * - Message format (string) - * - Row delimiter - * - Schema (optional, if the format supports it) - * - Number of consumers - * - Max block size for background consumption - * - Skip (at least) unreadable messages number - * - Do intermediate commits when the batch consumed and handled - */ - - /* 0 = raw, 1 = evaluateConstantExpressionAsLiteral, 2=evaluateConstantExpressionOrIdentifierAsLiteral */ - /// In case of named collection we already validated the arguments. - if (collection_name.empty()) - { - CHECK_KAFKA_STORAGE_ARGUMENT(1, kafka_broker_list, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(2, kafka_topic_list, 1) - CHECK_KAFKA_STORAGE_ARGUMENT(3, kafka_group_name, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(4, kafka_format, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(5, kafka_row_delimiter, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(6, kafka_schema, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(7, kafka_num_consumers, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(8, kafka_max_block_size, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(9, kafka_skip_broken_messages, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(10, kafka_commit_every_batch, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(11, kafka_client_id, 2) - CHECK_KAFKA_STORAGE_ARGUMENT(12, kafka_poll_timeout_ms, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(13, kafka_flush_interval_ms, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(14, kafka_thread_per_consumer, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(15, kafka_handle_error_mode, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(16, kafka_commit_on_select, 0) - CHECK_KAFKA_STORAGE_ARGUMENT(17, kafka_max_rows_per_message, 0) - } - -#undef CHECK_KAFKA_STORAGE_ARGUMENT - - auto num_consumers = kafka_settings->kafka_num_consumers.value; - auto max_consumers = std::max(getNumberOfPhysicalCPUCores(), 16); - - if (!args.getLocalContext()->getSettingsRef().kafka_disable_num_consumers_limit && num_consumers > max_consumers) - { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "The number of consumers can not be bigger than {}. " - "A single consumer can read any number of partitions. " - "Extra consumers are relatively expensive, " - "and using a lot of them can lead to high memory and CPU usage. " - "To achieve better performance " - "of getting data from Kafka, consider using a setting kafka_thread_per_consumer=1, " - "and ensure you have enough threads " - "in MessageBrokerSchedulePool (background_message_broker_schedule_pool_size). " - "See also https://clickhouse.com/docs/integrations/kafka/kafka-table-engine#tuning-performance", - max_consumers); - } - else if (num_consumers < 1) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Number of consumers can not be lower than 1"); - } - - if (kafka_settings->kafka_max_block_size.changed && kafka_settings->kafka_max_block_size.value < 1) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "kafka_max_block_size can not be lower than 1"); - } - - if (kafka_settings->kafka_poll_max_batch_size.changed && kafka_settings->kafka_poll_max_batch_size.value < 1) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "kafka_poll_max_batch_size can not be lower than 1"); - } - NamesAndTypesList supported_columns; - for (const auto & column : args.columns) - { - if (column.default_desc.kind == ColumnDefaultKind::Alias) - supported_columns.emplace_back(column.name, column.type); - if (column.default_desc.kind == ColumnDefaultKind::Default && !column.default_desc.expression) - supported_columns.emplace_back(column.name, column.type); - } - // Kafka engine allows only ordinary columns without default expression or alias columns. - if (args.columns.getAll() != supported_columns) - { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "KafkaEngine doesn't support DEFAULT/MATERIALIZED/EPHEMERAL expressions for columns. " - "See https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/#configuration"); - } - - const auto has_keeper_path = kafka_settings->kafka_keeper_path.changed && !kafka_settings->kafka_keeper_path.value.empty(); - const auto has_replica_name = kafka_settings->kafka_replica_name.changed && !kafka_settings->kafka_replica_name.value.empty(); - - if (!has_keeper_path && !has_replica_name) - return std::make_shared( - args.table_id, args.getContext(), args.columns, args.comment, std::move(kafka_settings), collection_name); - - if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_offsets_storage_in_keeper && !args.query.attach) - throw Exception( - ErrorCodes::SUPPORT_IS_DISABLED, - "Storing the Kafka offsets in Keeper is experimental. Set `allow_experimental_kafka_offsets_storage_in_keeper` setting " - "to enable it"); - - if (!has_keeper_path || !has_replica_name) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, "Either specify both zookeeper path and replica name or none of them"); - - const auto is_on_cluster = args.getLocalContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; - const auto is_replicated_database = args.getLocalContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY - && DatabaseCatalog::instance().getDatabase(args.table_id.database_name)->getEngineName() == "Replicated"; - - // UUID macro is only allowed: - // - with Atomic database only with ON CLUSTER queries, otherwise it is easy to misuse: each replica would have separate uuid generated. - // - with Replicated database - // - with attach queries, as those are used on server startup - const auto allow_uuid_macro = is_on_cluster || is_replicated_database || args.query.attach; - - auto context = args.getContext(); - // Unfold {database} and {table} macro on table creation, so table can be renamed. - if (args.mode < LoadingStrictnessLevel::ATTACH) - { - Macros::MacroExpansionInfo info; - /// NOTE: it's not recursive - info.expand_special_macros_only = true; - info.table_id = args.table_id; - // We could probably unfold UUID here too, but let's keep it similar to ReplicatedMergeTree, which doesn't do the unfolding. - info.table_id.uuid = UUIDHelpers::Nil; - kafka_settings->kafka_keeper_path.value = context->getMacros()->expand(kafka_settings->kafka_keeper_path.value, info); - - info.level = 0; - kafka_settings->kafka_replica_name.value = context->getMacros()->expand(kafka_settings->kafka_replica_name.value, info); - } - - - auto * settings_query = args.storage_def->settings; - chassert(has_settings && "Unexpected settings query in StorageKafka"); - - settings_query->changes.setSetting("kafka_keeper_path", kafka_settings->kafka_keeper_path.value); - settings_query->changes.setSetting("kafka_replica_name", kafka_settings->kafka_replica_name.value); - - // Expand other macros (such as {replica}). We do not expand them on previous step to make possible copying metadata files between replicas. - // Disable expanding {shard} macro, because it can lead to incorrect behavior and it doesn't make sense to shard Kafka tables. - Macros::MacroExpansionInfo info; - info.table_id = args.table_id; - if (is_replicated_database) - { - auto database = DatabaseCatalog::instance().getDatabase(args.table_id.database_name); - info.shard.reset(); - info.replica = getReplicatedDatabaseReplicaName(database); - } - if (!allow_uuid_macro) - info.table_id.uuid = UUIDHelpers::Nil; - kafka_settings->kafka_keeper_path.value = context->getMacros()->expand(kafka_settings->kafka_keeper_path.value, info); - - info.level = 0; - info.table_id.uuid = UUIDHelpers::Nil; - kafka_settings->kafka_replica_name.value = context->getMacros()->expand(kafka_settings->kafka_replica_name.value, info); - - return std::make_shared( - args.table_id, args.getContext(), args.columns, args.comment, std::move(kafka_settings), collection_name); - }; - - factory.registerStorage( - "Kafka", - creator_fn, - StorageFactory::StorageFeatures{ - .supports_settings = true, - }); -} - -namespace StorageKafkaUtils -{ -Names parseTopics(String topic_list) -{ - Names result; - boost::split(result, topic_list, [](char c) { return c == ','; }); - for (String & topic : result) - boost::trim(topic); - return result; -} - -String getDefaultClientId(const StorageID & table_id) -{ - return fmt::format("{}-{}-{}-{}", VERSION_NAME, getFQDNOrHostName(), table_id.database_name, table_id.table_name); -} - -void drainConsumer( - cppkafka::Consumer & consumer, const std::chrono::milliseconds drain_timeout, const LoggerPtr & log, ErrorHandler error_handler) -{ - auto start_time = std::chrono::steady_clock::now(); - cppkafka::Error last_error(RD_KAFKA_RESP_ERR_NO_ERROR); - - while (true) - { - auto msg = consumer.poll(100ms); - if (!msg) - break; - - auto error = msg.get_error(); - - if (error) - { - if (msg.is_eof() || error == last_error) - { - break; - } - else - { - LOG_ERROR(log, "Error during draining: {}", error); - error_handler(error); - } - } - - // i don't stop draining on first error, - // only if it repeats once again sequentially - last_error = error; - - auto ts = std::chrono::steady_clock::now(); - if (std::chrono::duration_cast(ts - start_time) > drain_timeout) - { - LOG_ERROR(log, "Timeout during draining."); - break; - } - } -} - -void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler error_handler) -{ - size_t skipped = std::erase_if( - messages, - [&](auto & message) - { - if (auto error = message.get_error()) - { - ProfileEvents::increment(ProfileEvents::KafkaConsumerErrors); - LOG_ERROR(log, "Consumer error: {}", error); - error_handler(error); - return true; - } - return false; - }); - - if (skipped) - LOG_ERROR(log, "There were {} messages with an error", skipped); -} - -SettingsChanges createSettingsAdjustments(KafkaSettings & kafka_settings, const String & schema_name) -{ - SettingsChanges result; - // Needed for backward compatibility - if (!kafka_settings.input_format_skip_unknown_fields.changed) - { - // Always skip unknown fields regardless of the context (JSON or TSKV) - kafka_settings.input_format_skip_unknown_fields = true; - } - - if (!kafka_settings.input_format_allow_errors_ratio.changed) - { - kafka_settings.input_format_allow_errors_ratio = 0.; - } - - if (!kafka_settings.input_format_allow_errors_num.changed) - { - kafka_settings.input_format_allow_errors_num = kafka_settings.kafka_skip_broken_messages.value; - } - - if (!schema_name.empty()) - result.emplace_back("format_schema", schema_name); - - for (const auto & setting : kafka_settings) - { - const auto & name = setting.getName(); - if (name.find("kafka_") == std::string::npos) - result.emplace_back(name, setting.getValue()); - } - return result; -} - - -bool checkDependencies(const StorageID & table_id, const ContextPtr& context) -{ - // Check if all dependencies are attached - auto view_ids = DatabaseCatalog::instance().getDependentViews(table_id); - if (view_ids.empty()) - return true; - - // Check the dependencies are ready? - for (const auto & view_id : view_ids) - { - auto view = DatabaseCatalog::instance().tryGetTable(view_id, context); - if (!view) - return false; - - // If it materialized view, check it's target table - auto * materialized_view = dynamic_cast(view.get()); - if (materialized_view && !materialized_view->tryGetTargetTable()) - return false; - - // Check all its dependencies - if (!checkDependencies(view_id, context)) - return false; - } - - return true; -} - - -VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode) -{ - VirtualColumnsDescription desc; - - desc.addEphemeral("_topic", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_key", std::make_shared(), ""); - desc.addEphemeral("_offset", std::make_shared(), ""); - desc.addEphemeral("_partition", std::make_shared(), ""); - desc.addEphemeral("_timestamp", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_timestamp_ms", std::make_shared(std::make_shared(3)), ""); - desc.addEphemeral("_headers.name", std::make_shared(std::make_shared()), ""); - desc.addEphemeral("_headers.value", std::make_shared(std::make_shared()), ""); - - if (handle_error_mode == StreamingHandleErrorMode::STREAM) - { - desc.addEphemeral("_raw_message", std::make_shared(), ""); - desc.addEphemeral("_error", std::make_shared(), ""); - } - - return desc; -} -} - -template struct StorageKafkaInterceptors; -template struct StorageKafkaInterceptors; - -} diff --git a/src/Storages/Kafka/StorageKafkaCommon.h b/src/Storages/Kafka/StorageKafkaCommon.h deleted file mode 100644 index dd38ee69675..00000000000 --- a/src/Storages/Kafka/StorageKafkaCommon.h +++ /dev/null @@ -1,116 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace Poco -{ -namespace Util -{ - class AbstractConfiguration; -} -} - -namespace DB -{ - -struct KafkaSettings; -class VirtualColumnsDescription; - -template -struct StorageKafkaInterceptors -{ - static rd_kafka_resp_err_t rdKafkaOnThreadStart(rd_kafka_t *, rd_kafka_thread_type_t thread_type, const char *, void * ctx); - - static rd_kafka_resp_err_t rdKafkaOnThreadExit(rd_kafka_t *, rd_kafka_thread_type_t, const char *, void * ctx); - - static rd_kafka_resp_err_t - rdKafkaOnNew(rd_kafka_t * rk, const rd_kafka_conf_t *, void * ctx, char * /*errstr*/, size_t /*errstr_size*/); - - static rd_kafka_resp_err_t rdKafkaOnConfDup( - rd_kafka_conf_t * new_conf, const rd_kafka_conf_t * /*old_conf*/, size_t /*filter_cnt*/, const char ** /*filter*/, void * ctx); -}; - -struct KafkaConfigLoader -{ - static inline const String CONFIG_KAFKA_TAG = "kafka"; - static inline const String CONFIG_KAFKA_TOPIC_TAG = "kafka_topic"; - static inline const String CONFIG_NAME_TAG = "name"; - static inline const String CONFIG_KAFKA_CONSUMER_TAG = "consumer"; - static inline const String CONFIG_KAFKA_PRODUCER_TAG = "producer"; - using LogCallback = cppkafka::Configuration::LogCallback; - - - struct LoadConfigParams - { - const Poco::Util::AbstractConfiguration & config; - String & collection_name; - const Names & topics; - LoggerPtr & log; - }; - - struct ConsumerConfigParams : public LoadConfigParams - { - String brokers; - String group; - bool multiple_consumers; - size_t consumer_number; - String client_id; - size_t max_block_size; - }; - - struct ProducerConfigParams : public LoadConfigParams - { - String brokers; - String client_id; - }; - - template - static cppkafka::Configuration getConsumerConfiguration(TKafkaStorage & storage, const ConsumerConfigParams & params); - - template - static cppkafka::Configuration getProducerConfiguration(TKafkaStorage & storage, const ProducerConfigParams & params); -}; - -namespace StorageKafkaUtils -{ -Names parseTopics(String topic_list); -String getDefaultClientId(const StorageID & table_id); - -using ErrorHandler = std::function; - -void drainConsumer( - cppkafka::Consumer & consumer, - std::chrono::milliseconds drain_timeout, - const LoggerPtr & log, - ErrorHandler error_handler = [](const cppkafka::Error & /*err*/) {}); - -using Messages = std::vector; -void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler error_handler = [](const cppkafka::Error & /*err*/) {}); - -SettingsChanges createSettingsAdjustments(KafkaSettings & kafka_settings, const String & schema_name); - -bool checkDependencies(const StorageID & table_id, const ContextPtr& context); - -VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode); -} -} - -template <> -struct fmt::formatter : fmt::ostream_formatter -{ -}; -template <> -struct fmt::formatter : fmt::ostream_formatter -{ -}; diff --git a/src/Storages/Kafka/StorageKafkaUtils.cpp b/src/Storages/Kafka/StorageKafkaUtils.cpp new file mode 100644 index 00000000000..c510303f45e --- /dev/null +++ b/src/Storages/Kafka/StorageKafkaUtils.cpp @@ -0,0 +1,458 @@ +#include + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if USE_KRB5 +# include +#endif // USE_KRB5 + +namespace CurrentMetrics +{ +extern const Metric KafkaLibrdkafkaThreads; +} + +namespace ProfileEvents +{ +extern const Event KafkaConsumerErrors; +} + +namespace DB +{ + +using namespace std::chrono_literals; + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int SUPPORT_IS_DISABLED; +} + + +void registerStorageKafka(StorageFactory & factory) +{ + auto creator_fn = [](const StorageFactory::Arguments & args) -> std::shared_ptr + { + ASTs & engine_args = args.engine_args; + size_t args_count = engine_args.size(); + const bool has_settings = args.storage_def->settings; + + auto kafka_settings = std::make_unique(); + String collection_name; + if (auto named_collection = tryGetNamedCollectionWithOverrides(args.engine_args, args.getLocalContext())) + { + for (const auto & setting : kafka_settings->all()) + { + const auto & setting_name = setting.getName(); + if (named_collection->has(setting_name)) + kafka_settings->set(setting_name, named_collection->get(setting_name)); + } + collection_name = assert_cast(args.engine_args[0].get())->name(); + } + + if (has_settings) + { + kafka_settings->loadFromQuery(*args.storage_def); + } + +// Check arguments and settings +#define CHECK_KAFKA_STORAGE_ARGUMENT(ARG_NUM, PAR_NAME, EVAL) \ + /* One of the four required arguments is not specified */ \ + if (args_count < (ARG_NUM) && (ARG_NUM) <= 4 && !kafka_settings->PAR_NAME.changed) \ + { \ + throw Exception( \ + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, \ + "Required parameter '{}' " \ + "for storage Kafka not specified", \ + #PAR_NAME); \ + } \ + if (args_count >= (ARG_NUM)) \ + { \ + /* The same argument is given in two places */ \ + if (has_settings && kafka_settings->PAR_NAME.changed) \ + { \ + throw Exception( \ + ErrorCodes::BAD_ARGUMENTS, \ + "The argument №{} of storage Kafka " \ + "and the parameter '{}' " \ + "in SETTINGS cannot be specified at the same time", \ + #ARG_NUM, \ + #PAR_NAME); \ + } \ + /* move engine args to settings */ \ + else \ + { \ + if constexpr ((EVAL) == 1) \ + { \ + engine_args[(ARG_NUM)-1] = evaluateConstantExpressionAsLiteral(engine_args[(ARG_NUM)-1], args.getLocalContext()); \ + } \ + if constexpr ((EVAL) == 2) \ + { \ + engine_args[(ARG_NUM)-1] \ + = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[(ARG_NUM)-1], args.getLocalContext()); \ + } \ + kafka_settings->PAR_NAME = engine_args[(ARG_NUM)-1]->as().value; \ + } \ + } + + /** Arguments of engine is following: + * - Kafka broker list + * - List of topics + * - Group ID (may be a constant expression with a string result) + * - Message format (string) + * - Row delimiter + * - Schema (optional, if the format supports it) + * - Number of consumers + * - Max block size for background consumption + * - Skip (at least) unreadable messages number + * - Do intermediate commits when the batch consumed and handled + */ + + /* 0 = raw, 1 = evaluateConstantExpressionAsLiteral, 2=evaluateConstantExpressionOrIdentifierAsLiteral */ + /// In case of named collection we already validated the arguments. + if (collection_name.empty()) + { + CHECK_KAFKA_STORAGE_ARGUMENT(1, kafka_broker_list, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(2, kafka_topic_list, 1) + CHECK_KAFKA_STORAGE_ARGUMENT(3, kafka_group_name, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(4, kafka_format, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(5, kafka_row_delimiter, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(6, kafka_schema, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(7, kafka_num_consumers, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(8, kafka_max_block_size, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(9, kafka_skip_broken_messages, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(10, kafka_commit_every_batch, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(11, kafka_client_id, 2) + CHECK_KAFKA_STORAGE_ARGUMENT(12, kafka_poll_timeout_ms, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(13, kafka_flush_interval_ms, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(14, kafka_thread_per_consumer, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(15, kafka_handle_error_mode, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(16, kafka_commit_on_select, 0) + CHECK_KAFKA_STORAGE_ARGUMENT(17, kafka_max_rows_per_message, 0) + } + +#undef CHECK_KAFKA_STORAGE_ARGUMENT + + auto num_consumers = kafka_settings->kafka_num_consumers.value; + auto max_consumers = std::max(getNumberOfPhysicalCPUCores(), 16); + + if (!args.getLocalContext()->getSettingsRef().kafka_disable_num_consumers_limit && num_consumers > max_consumers) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "The number of consumers can not be bigger than {}. " + "A single consumer can read any number of partitions. " + "Extra consumers are relatively expensive, " + "and using a lot of them can lead to high memory and CPU usage. " + "To achieve better performance " + "of getting data from Kafka, consider using a setting kafka_thread_per_consumer=1, " + "and ensure you have enough threads " + "in MessageBrokerSchedulePool (background_message_broker_schedule_pool_size). " + "See also https://clickhouse.com/docs/integrations/kafka/kafka-table-engine#tuning-performance", + max_consumers); + } + else if (num_consumers < 1) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Number of consumers can not be lower than 1"); + } + + if (kafka_settings->kafka_max_block_size.changed && kafka_settings->kafka_max_block_size.value < 1) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "kafka_max_block_size can not be lower than 1"); + } + + if (kafka_settings->kafka_poll_max_batch_size.changed && kafka_settings->kafka_poll_max_batch_size.value < 1) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "kafka_poll_max_batch_size can not be lower than 1"); + } + NamesAndTypesList supported_columns; + for (const auto & column : args.columns) + { + if (column.default_desc.kind == ColumnDefaultKind::Alias) + supported_columns.emplace_back(column.name, column.type); + if (column.default_desc.kind == ColumnDefaultKind::Default && !column.default_desc.expression) + supported_columns.emplace_back(column.name, column.type); + } + // Kafka engine allows only ordinary columns without default expression or alias columns. + if (args.columns.getAll() != supported_columns) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "KafkaEngine doesn't support DEFAULT/MATERIALIZED/EPHEMERAL expressions for columns. " + "See https://clickhouse.com/docs/en/engines/table-engines/integrations/kafka/#configuration"); + } + + const auto has_keeper_path = kafka_settings->kafka_keeper_path.changed && !kafka_settings->kafka_keeper_path.value.empty(); + const auto has_replica_name = kafka_settings->kafka_replica_name.changed && !kafka_settings->kafka_replica_name.value.empty(); + + if (!has_keeper_path && !has_replica_name) + return std::make_shared( + args.table_id, args.getContext(), args.columns, args.comment, std::move(kafka_settings), collection_name); + + if (!args.getLocalContext()->getSettingsRef().allow_experimental_kafka_offsets_storage_in_keeper && !args.query.attach) + throw Exception( + ErrorCodes::SUPPORT_IS_DISABLED, + "Storing the Kafka offsets in Keeper is experimental. Set `allow_experimental_kafka_offsets_storage_in_keeper` setting " + "to enable it"); + + if (!has_keeper_path || !has_replica_name) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Either specify both zookeeper path and replica name or none of them"); + + const auto is_on_cluster = args.getLocalContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; + const auto is_replicated_database = args.getLocalContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY + && DatabaseCatalog::instance().getDatabase(args.table_id.database_name)->getEngineName() == "Replicated"; + + // UUID macro is only allowed: + // - with Atomic database only with ON CLUSTER queries, otherwise it is easy to misuse: each replica would have separate uuid generated. + // - with Replicated database + // - with attach queries, as those are used on server startup + const auto allow_uuid_macro = is_on_cluster || is_replicated_database || args.query.attach; + + auto context = args.getContext(); + // Unfold {database} and {table} macro on table creation, so table can be renamed. + if (args.mode < LoadingStrictnessLevel::ATTACH) + { + Macros::MacroExpansionInfo info; + /// NOTE: it's not recursive + info.expand_special_macros_only = true; + info.table_id = args.table_id; + // We could probably unfold UUID here too, but let's keep it similar to ReplicatedMergeTree, which doesn't do the unfolding. + info.table_id.uuid = UUIDHelpers::Nil; + kafka_settings->kafka_keeper_path.value = context->getMacros()->expand(kafka_settings->kafka_keeper_path.value, info); + + info.level = 0; + kafka_settings->kafka_replica_name.value = context->getMacros()->expand(kafka_settings->kafka_replica_name.value, info); + } + + + auto * settings_query = args.storage_def->settings; + chassert(has_settings && "Unexpected settings query in StorageKafka"); + + settings_query->changes.setSetting("kafka_keeper_path", kafka_settings->kafka_keeper_path.value); + settings_query->changes.setSetting("kafka_replica_name", kafka_settings->kafka_replica_name.value); + + // Expand other macros (such as {replica}). We do not expand them on previous step to make possible copying metadata files between replicas. + // Disable expanding {shard} macro, because it can lead to incorrect behavior and it doesn't make sense to shard Kafka tables. + Macros::MacroExpansionInfo info; + info.table_id = args.table_id; + if (is_replicated_database) + { + auto database = DatabaseCatalog::instance().getDatabase(args.table_id.database_name); + info.shard.reset(); + info.replica = getReplicatedDatabaseReplicaName(database); + } + if (!allow_uuid_macro) + info.table_id.uuid = UUIDHelpers::Nil; + kafka_settings->kafka_keeper_path.value = context->getMacros()->expand(kafka_settings->kafka_keeper_path.value, info); + + info.level = 0; + info.table_id.uuid = UUIDHelpers::Nil; + kafka_settings->kafka_replica_name.value = context->getMacros()->expand(kafka_settings->kafka_replica_name.value, info); + + return std::make_shared( + args.table_id, args.getContext(), args.columns, args.comment, std::move(kafka_settings), collection_name); + }; + + factory.registerStorage( + "Kafka", + creator_fn, + StorageFactory::StorageFeatures{ + .supports_settings = true, + }); +} + +namespace StorageKafkaUtils +{ +Names parseTopics(String topic_list) +{ + Names result; + boost::split(result, topic_list, [](char c) { return c == ','; }); + for (String & topic : result) + boost::trim(topic); + return result; +} + +String getDefaultClientId(const StorageID & table_id) +{ + return fmt::format("{}-{}-{}-{}", VERSION_NAME, getFQDNOrHostName(), table_id.database_name, table_id.table_name); +} + +void drainConsumer( + cppkafka::Consumer & consumer, const std::chrono::milliseconds drain_timeout, const LoggerPtr & log, ErrorHandler error_handler) +{ + auto start_time = std::chrono::steady_clock::now(); + cppkafka::Error last_error(RD_KAFKA_RESP_ERR_NO_ERROR); + + while (true) + { + auto msg = consumer.poll(100ms); + if (!msg) + break; + + auto error = msg.get_error(); + + if (error) + { + if (msg.is_eof() || error == last_error) + { + break; + } + else + { + LOG_ERROR(log, "Error during draining: {}", error); + error_handler(error); + } + } + + // i don't stop draining on first error, + // only if it repeats once again sequentially + last_error = error; + + auto ts = std::chrono::steady_clock::now(); + if (std::chrono::duration_cast(ts - start_time) > drain_timeout) + { + LOG_ERROR(log, "Timeout during draining."); + break; + } + } +} + +void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler error_handler) +{ + size_t skipped = std::erase_if( + messages, + [&](auto & message) + { + if (auto error = message.get_error()) + { + ProfileEvents::increment(ProfileEvents::KafkaConsumerErrors); + LOG_ERROR(log, "Consumer error: {}", error); + error_handler(error); + return true; + } + return false; + }); + + if (skipped) + LOG_ERROR(log, "There were {} messages with an error", skipped); +} + +SettingsChanges createSettingsAdjustments(KafkaSettings & kafka_settings, const String & schema_name) +{ + SettingsChanges result; + // Needed for backward compatibility + if (!kafka_settings.input_format_skip_unknown_fields.changed) + { + // Always skip unknown fields regardless of the context (JSON or TSKV) + kafka_settings.input_format_skip_unknown_fields = true; + } + + if (!kafka_settings.input_format_allow_errors_ratio.changed) + { + kafka_settings.input_format_allow_errors_ratio = 0.; + } + + if (!kafka_settings.input_format_allow_errors_num.changed) + { + kafka_settings.input_format_allow_errors_num = kafka_settings.kafka_skip_broken_messages.value; + } + + if (!schema_name.empty()) + result.emplace_back("format_schema", schema_name); + + for (const auto & setting : kafka_settings) + { + const auto & name = setting.getName(); + if (name.find("kafka_") == std::string::npos) + result.emplace_back(name, setting.getValue()); + } + return result; +} + + +bool checkDependencies(const StorageID & table_id, const ContextPtr& context) +{ + // Check if all dependencies are attached + auto view_ids = DatabaseCatalog::instance().getDependentViews(table_id); + if (view_ids.empty()) + return true; + + // Check the dependencies are ready? + for (const auto & view_id : view_ids) + { + auto view = DatabaseCatalog::instance().tryGetTable(view_id, context); + if (!view) + return false; + + // If it materialized view, check it's target table + auto * materialized_view = dynamic_cast(view.get()); + if (materialized_view && !materialized_view->tryGetTargetTable()) + return false; + + // Check all its dependencies + if (!checkDependencies(view_id, context)) + return false; + } + + return true; +} + + +VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode) +{ + VirtualColumnsDescription desc; + + desc.addEphemeral("_topic", std::make_shared(std::make_shared()), ""); + desc.addEphemeral("_key", std::make_shared(), ""); + desc.addEphemeral("_offset", std::make_shared(), ""); + desc.addEphemeral("_partition", std::make_shared(), ""); + desc.addEphemeral("_timestamp", std::make_shared(std::make_shared()), ""); + desc.addEphemeral("_timestamp_ms", std::make_shared(std::make_shared(3)), ""); + desc.addEphemeral("_headers.name", std::make_shared(std::make_shared()), ""); + desc.addEphemeral("_headers.value", std::make_shared(std::make_shared()), ""); + + if (handle_error_mode == StreamingHandleErrorMode::STREAM) + { + desc.addEphemeral("_raw_message", std::make_shared(), ""); + desc.addEphemeral("_error", std::make_shared(), ""); + } + + return desc; +} +} +} diff --git a/src/Storages/Kafka/StorageKafkaUtils.h b/src/Storages/Kafka/StorageKafkaUtils.h new file mode 100644 index 00000000000..cc956dde78d --- /dev/null +++ b/src/Storages/Kafka/StorageKafkaUtils.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Poco +{ +namespace Util +{ + class AbstractConfiguration; +} +} + +namespace DB +{ + +class VirtualColumnsDescription; +struct KafkaSettings; + +namespace StorageKafkaUtils +{ +Names parseTopics(String topic_list); +String getDefaultClientId(const StorageID & table_id); + +using ErrorHandler = std::function; + +void drainConsumer( + cppkafka::Consumer & consumer, + std::chrono::milliseconds drain_timeout, + const LoggerPtr & log, + ErrorHandler error_handler = [](const cppkafka::Error & /*err*/) {}); + +using Messages = std::vector; +void eraseMessageErrors(Messages & messages, const LoggerPtr & log, ErrorHandler error_handler = [](const cppkafka::Error & /*err*/) {}); + +SettingsChanges createSettingsAdjustments(KafkaSettings & kafka_settings, const String & schema_name); + +bool checkDependencies(const StorageID & table_id, const ContextPtr& context); + +VirtualColumnsDescription createVirtuals(StreamingHandleErrorMode handle_error_mode); +} +} + +template <> +struct fmt::formatter : fmt::ostream_formatter +{ +}; +template <> +struct fmt::formatter : fmt::ostream_formatter +{ +}; From 9cb52bd1381ad3e0929062801df7c4b542cf1117 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 31 Jul 2024 23:11:35 +0200 Subject: [PATCH 1327/2361] fix build --- src/Functions/DateTimeTransforms.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index ce7da406e9a..15f1b9580f3 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -25,7 +24,7 @@ namespace DB static constexpr auto millisecond_multiplier = 1'000; static constexpr auto microsecond_multiplier = 1'000'000; -static constexpr auto nanosecond_multiplier = 1'000'000'000; +static constexpr auto nanosecond_multiplier = 1'000'000'000; static constexpr FormatSettings::DateTimeOverflowBehavior default_date_time_overflow_behavior = FormatSettings::DateTimeOverflowBehavior::Ignore; @@ -382,12 +381,12 @@ struct ToStartOfWeekImpl static UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) { - const auto res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + const int res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); return std::max(res, 0); } static UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { - const auto res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); + const int res = time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); return std::max(res, 0); } static UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone) From 6403f3f545bee153ffaf4ce5bda6fcde33ef88d2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 23:29:54 +0200 Subject: [PATCH 1328/2361] Miscellaneous --- src/Common/Epoll.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Common/Epoll.cpp b/src/Common/Epoll.cpp index 49c86222cf0..ef7c6e143a0 100644 --- a/src/Common/Epoll.cpp +++ b/src/Common/Epoll.cpp @@ -19,7 +19,7 @@ Epoll::Epoll() : events_count(0) { epoll_fd = epoll_create1(0); if (epoll_fd == -1) - throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot open epoll descriptor"); + throw ErrnoException(ErrorCodes::EPOLL_ERROR, "Cannot open epoll descriptor"); } Epoll::Epoll(Epoll && other) noexcept : epoll_fd(other.epoll_fd), events_count(other.events_count.load()) @@ -47,7 +47,7 @@ void Epoll::add(int fd, void * ptr, uint32_t events) ++events_count; if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &event) == -1) - throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot add new descriptor to epoll"); + throw ErrnoException(ErrorCodes::EPOLL_ERROR, "Cannot add new descriptor to epoll"); } void Epoll::remove(int fd) @@ -55,7 +55,7 @@ void Epoll::remove(int fd) --events_count; if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, nullptr) == -1) - throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Cannot remove descriptor from epoll"); + throw ErrnoException(ErrorCodes::EPOLL_ERROR, "Cannot remove descriptor from epoll"); } size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout) const @@ -82,7 +82,7 @@ size_t Epoll::getManyReady(int max_events, epoll_event * events_out, int timeout continue; } else - throw DB::ErrnoException(DB::ErrorCodes::EPOLL_ERROR, "Error in epoll_wait"); + throw ErrnoException(ErrorCodes::EPOLL_ERROR, "Error in epoll_wait"); } else break; From 6e914ff6da67be1c1381ffed2d04b5758704baf3 Mon Sep 17 00:00:00 2001 From: Thom O'Connor Date: Wed, 31 Jul 2024 21:59:37 +0000 Subject: [PATCH 1329/2361] Update settings.md Removing duplicate header "## background_merges_mutations_scheduling_policy" --- docs/en/operations/server-configuration-parameters/settings.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 8278f8c8699..a1e3c292b04 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -103,8 +103,6 @@ Default: 2 The policy on how to perform a scheduling for background merges and mutations. Possible values are: `round_robin` and `shortest_task_first`. -## background_merges_mutations_scheduling_policy - Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart. Could be applied from the `default` profile for backward compatibility. From 0e36db543762cb146aa6c233d4536fd62c6101b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 31 Jul 2024 22:09:04 +0000 Subject: [PATCH 1330/2361] Do not check the value of `num_messages_read` because it is not reliable librdkafka usually polls both messages on the first SELECT, but sometimes only one. If it polls only one message first, then it will read two messages before rebalancing at the second SELECT from the table. This means it usually reads a single message twice (thus num_messages_read = 4 is usually fine as 1 discarded message + 3 actually consumed message). But when only one message is read in the first SELECT, then 2 messages are discarded, thus num_messages_read will be 5 as 2 discarded message + 3 actually consumed messages. --- tests/integration/test_storage_kafka/test.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 8393e88db88..37457e00701 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -4771,7 +4771,7 @@ def test_system_kafka_consumers_rebalance(kafka_cluster, max_retries=15): assignments.current_offset, if(length(exceptions.time)>0, exceptions.time[1]::String, 'never') as last_exception_time_, if(length(exceptions.text)>0, exceptions.text[1], 'no exception') as last_exception_, - stable_timestamp(last_poll_time) as last_poll_time_, num_messages_read, stable_timestamp(last_commit_time) as last_commit_time_, + stable_timestamp(last_poll_time) as last_poll_time_, stable_timestamp(last_commit_time) as last_commit_time_, num_commits, stable_timestamp(last_rebalance_time) as last_rebalance_time_, num_rebalance_revocations, num_rebalance_assignments, is_currently_used FROM system.kafka_consumers WHERE database='test' and table IN ('kafka', 'kafka2') format Vertical; @@ -4791,7 +4791,6 @@ assignments.current_offset: [2] last_exception_time_: never last_exception_: no exception last_poll_time_: now -num_messages_read: 4 last_commit_time_: now num_commits: 2 last_rebalance_time_: now @@ -4810,7 +4809,6 @@ assignments.current_offset: [2] last_exception_time_: never last_exception_: no exception last_poll_time_: now -num_messages_read: 1 last_commit_time_: now num_commits: 1 last_rebalance_time_: never From a592ad3624dedd3080b95422151b17bb92f3c10a Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 31 Jul 2024 23:06:53 +0000 Subject: [PATCH 1331/2361] change error code --- src/Storages/MergeTree/MergeTreeData.cpp | 5 +++-- src/Storages/StorageFactory.cpp | 7 ++++--- src/Storages/StorageMergeTree.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- .../0_stateless/03174_projection_deduplicate.sql | 2 +- .../03206_projection_merge_special_mergetree.sql | 14 +++++++------- 6 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 677c4a92cda..7a0980a0e3b 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3216,9 +3216,10 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context { if (auto storage_name = getName(); storage_name != "MergeTree" && storage_name != "ReplicatedMergeTree" && settings_from_storage->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Projection is fully supported in (Replictaed)MergeTree, but also allowed in non-throw mode with other" - " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode."); + " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode." + " Current storage name is {}.", storage_name); } commands.apply(new_metadata, local_context); diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index a059d624cd8..7360d351e8a 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -20,7 +20,7 @@ namespace ErrorCodes extern const int FUNCTION_CANNOT_HAVE_PARAMETERS; extern const int BAD_ARGUMENTS; extern const int DATA_TYPE_CANNOT_BE_USED_IN_TABLES; - extern const int NOT_IMPLEMENTED; + extern const int SUPPORT_IS_DISABLED; } @@ -223,9 +223,10 @@ StoragePtr StorageFactory::get( } } if (!projection_allowed) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Projection is fully supported in (Replictaed)MergeTree, but also allowed in non-throw mode with other" - " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode."); + " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode." + " Current storage name is {}.", engine_name); } } } diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index a5d434796ba..84393a3f1b0 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1582,7 +1582,7 @@ bool StorageMergeTree::optimize( if (deduplicate && getInMemoryMetadataPtr()->hasProjections() && getSettings()->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "OPTIMIZE DEDUPLICATE query is not supported for table {} as it has projections. " "User should drop all the projections manually before running the query, " "or consider drop or rebuild option of deduplicate_merge_projection_mode", diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 3751883df24..ad578242010 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5796,7 +5796,7 @@ bool StorageReplicatedMergeTree::optimize( if (deduplicate && getInMemoryMetadataPtr()->hasProjections() && getSettings()->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "OPTIMIZE DEDUPLICATE query is not supported for table {} as it has projections. " "User should drop all the projections manually before running the query, " "or consider drop or rebuild option of deduplicate_merge_projection_mode", diff --git a/tests/queries/0_stateless/03174_projection_deduplicate.sql b/tests/queries/0_stateless/03174_projection_deduplicate.sql index 46222b69dc7..f43f0a1f236 100644 --- a/tests/queries/0_stateless/03174_projection_deduplicate.sql +++ b/tests/queries/0_stateless/03174_projection_deduplicate.sql @@ -17,7 +17,7 @@ PRIMARY KEY id; INSERT INTO test_projection_deduplicate VALUES (1, 'one'); INSERT INTO test_projection_deduplicate VALUES (1, 'one'); -OPTIMIZE TABLE test_projection_deduplicate DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } +OPTIMIZE TABLE test_projection_deduplicate DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } SELECT * FROM test_projection_deduplicate; diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql index e0a4f4f8cec..d3448138396 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -9,7 +9,7 @@ CREATE TABLE tp ( INSERT INTO tp SELECT number%3, 1 FROM numbers(3); -OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } DROP TABLE tp; @@ -26,7 +26,7 @@ OPTIMIZE TABLE tp DEDUPLICATE; ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; -OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } DROP TABLE tp; @@ -36,14 +36,14 @@ CREATE TABLE tp ( type Int32, eventcnt UInt64, PROJECTION p (select sum(eventcnt), type group by type) -) engine = ReplacingMergeTree order by type; -- { serverError NOT_IMPLEMENTED } +) engine = ReplacingMergeTree order by type; -- { serverError SUPPORT_IS_DISABLED } CREATE TABLE tp ( type Int32, eventcnt UInt64, PROJECTION p (select sum(eventcnt), type group by type) ) engine = ReplacingMergeTree order by type -SETTINGS deduplicate_merge_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } +SETTINGS deduplicate_merge_projection_mode = 'throw'; -- { serverError SUPPORT_IS_DISABLED } CREATE TABLE tp ( type Int32, @@ -65,7 +65,7 @@ WHERE (database = currentDatabase()) AND (`table` = 'tp') AND (active = 1); ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; -OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } DROP TABLE tp; @@ -78,7 +78,7 @@ SETTINGS deduplicate_merge_projection_mode = 'rebuild'; ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; -OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError NOT_IMPLEMENTED } +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } DROP TABLE tp; @@ -88,7 +88,7 @@ CREATE TABLE tp ( eventcnt UInt64 ) engine = ReplacingMergeTree order by type; -ALTER TABLE tp ADD PROJECTION p (SELECT sum(eventcnt), type GROUP BY type); -- { serverError NOT_IMPLEMENTED } +ALTER TABLE tp ADD PROJECTION p (SELECT sum(eventcnt), type GROUP BY type); -- { serverError SUPPORT_IS_DISABLED } ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'drop'; From 587d50380ad3d1cadc56bda0fa700e3441c16ab4 Mon Sep 17 00:00:00 2001 From: sakulali Date: Thu, 1 Aug 2024 09:12:00 +0800 Subject: [PATCH 1332/2361] fix clickhouse-test reference --- .../0_stateless/01889_clickhouse_client_config_format.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference b/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference index 2575200e6fa..149315ad9d5 100644 --- a/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference +++ b/tests/queries/0_stateless/01889_clickhouse_client_config_format.reference @@ -21,4 +21,4 @@ autodetect yaml autodetect invalid xml Correct: invalid xml parsed with exception autodetect invalid yaml -Code: 585. Unable to parse YAML configuration file /config_test.badyaml, yaml-cpp: error at line 2, column 12: illegal map value. (CANNOT_PARSE_YAML) +Code: 585. Unable to parse YAML configuration file /config_default.badyaml, yaml-cpp: error at line 2, column 12: illegal map value. (CANNOT_PARSE_YAML) From f162d6bd5e03c6f717b4f45cf4c7ba6491aaa5fa Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 1 Aug 2024 06:35:22 +0000 Subject: [PATCH 1333/2361] Update version_date.tsv and changelogs after v24.7.2.13-stable --- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v24.7.2.13-stable.md | 24 ++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 2 +- 5 files changed, 28 insertions(+), 4 deletions(-) create mode 100644 docs/changelogs/v24.7.2.13-stable.md diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index e99c86267f9..94603763572 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.7.1.2915" +ARG VERSION="24.7.2.13" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index fb562b911a3..f40118c7b06 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.7.1.2915" +ARG VERSION="24.7.2.13" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 51f4e6a0f40..032aa862e4a 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.7.1.2915" +ARG VERSION="24.7.2.13" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docs/changelogs/v24.7.2.13-stable.md b/docs/changelogs/v24.7.2.13-stable.md new file mode 100644 index 00000000000..4a2fb665116 --- /dev/null +++ b/docs/changelogs/v24.7.2.13-stable.md @@ -0,0 +1,24 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.7.2.13-stable (6e41f601b2f) FIXME as compared to v24.7.1.2915-stable (a37d2d43da7) + +#### Improvement +* Backported in [#67531](https://github.com/ClickHouse/ClickHouse/issues/67531): In pr : https://github.com/ClickHouse/ClickHouse/pull/66025, we introduce a settings `input_format_orc_read_use_writer_time_zone` to fix when read orc file, make the reader use writer timezone, not always use `GMT`. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Backported in [#67505](https://github.com/ClickHouse/ClickHouse/issues/67505): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#67580](https://github.com/ClickHouse/ClickHouse/issues/67580): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#67551](https://github.com/ClickHouse/ClickHouse/issues/67551): [Green CI] Fix test test_storage_s3_queue/test.py::test_max_set_age. [#67035](https://github.com/ClickHouse/ClickHouse/pull/67035) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#67514](https://github.com/ClickHouse/ClickHouse/issues/67514): Split test 02967_parallel_replicas_join_algo_and_analyzer. [#67211](https://github.com/ClickHouse/ClickHouse/pull/67211) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#67545](https://github.com/ClickHouse/ClickHouse/issues/67545): [Green CI] Fix WriteBuffer destructor when finalize has failed for MergeTreeDeduplicationLog::shutdown. [#67474](https://github.com/ClickHouse/ClickHouse/pull/67474) ([Alexey Katsman](https://github.com/alexkats)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index abd8f84ec74..b1391c2d781 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v24.7.2.13-stable 2024-08-01 v24.7.1.2915-stable 2024-07-30 v24.6.2.17-stable 2024-07-05 v24.6.1.4423-stable 2024-07-01 @@ -5,7 +6,6 @@ v24.5.4.49-stable 2024-07-01 v24.5.3.5-stable 2024-06-13 v24.5.2.34-stable 2024-06-13 v24.5.1.1763-stable 2024-06-01 -v24.4.4.107-stable 2024-07-31 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 From c2df527a32d640f52296ea7aefae177e22504082 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 1 Aug 2024 08:42:54 +0200 Subject: [PATCH 1334/2361] Reduce fault rate --- .../test_keeper_map_retries/configs/fault_injection.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_keeper_map_retries/configs/fault_injection.xml b/tests/integration/test_keeper_map_retries/configs/fault_injection.xml index 145945c7c7c..0933b6b3031 100644 --- a/tests/integration/test_keeper_map_retries/configs/fault_injection.xml +++ b/tests/integration/test_keeper_map_retries/configs/fault_injection.xml @@ -1,6 +1,6 @@ - 0.05 - 0.05 + 0.005 + 0.005 From 5564489cca1c14e95e7c543e03c508849abaf079 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 1 Aug 2024 15:31:54 +0800 Subject: [PATCH 1335/2361] change as request --- .../functions/tuple-map-functions.md | 18 +++++++++++--- src/Functions/map.cpp | 24 ++++++++++--------- .../0_stateless/01651_map_functions.reference | 4 ++++ .../0_stateless/01651_map_functions.sql | 7 +++++- 4 files changed, 38 insertions(+), 15 deletions(-) diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md index db66188b1f5..d670ed42a2a 100644 --- a/docs/en/sql-reference/functions/tuple-map-functions.md +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -43,7 +43,7 @@ Result: ## mapFromArrays -Creates a map from an array of keys and an array of values. +Creates a map from an array or map of keys and an array or map of values. The function is a convenient alternative to syntax `CAST([...], 'Map(key_type, value_type)')`. For example, instead of writing @@ -62,8 +62,8 @@ Alias: `MAP_FROM_ARRAYS(keys, values)` **Arguments** -- `keys` — Array or map of keys to create the map from. [Array(T)](../data-types/array.md) where `T` can be any type supported by [Map](../data-types/map.md) as key type, or [Map](../data-types/map.md). -- `values` - Array or map of values to create the map from. [Array](../data-types/array.md) or [Map](../data-types/map.md). +- `keys` — Array or map of keys to create the map from [Array](../data-types/array.md) or [Map](../data-types/map.md). If `keys` is an array, we accept `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as its type as long as it doesn't contain NULL value. +- `values` - Array or map of values to create the map from [Array](../data-types/array.md) or [Map](../data-types/map.md). **Returned value** @@ -99,6 +99,18 @@ Result: └───────────────────────────────────────────────────────┘ ``` +```sql +SELECT mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3]) +``` + +Result: + +``` +┌─mapFromArrays(map('a', 1, 'b', 2, 'c', 3), [1, 2, 3])─┠+│ {('a',1):1,('b',2):2,('c',3):3} │ +└───────────────────────────────────────────────────────┘ +``` + ## extractKeyValuePairs Converts a string of key-value pairs to a [Map(String, String)](../data-types/map.md). diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index a8e5f7ad90e..738c61164a3 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -23,6 +23,7 @@ namespace ErrorCodes extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int SIZES_OF_ARRAYS_DONT_MATCH; extern const int ILLEGAL_COLUMN; + extern const int BAD_ARGUMENTS; } namespace @@ -157,7 +158,7 @@ private: bool use_variant_as_common_type = false; }; -/// mapFromArrays(keys, values) is a function that allows you to make key-value pair from a pair of arrays +/// mapFromArrays(keys, values) is a function that allows you to make key-value pair from a pair of arrays or maps class FunctionMapFromArrays : public IFunction { public: @@ -181,13 +182,13 @@ public: getName(), arguments.size()); - auto get_nested_type = [this](const DataTypePtr & type) -> DataTypePtr + auto get_nested_type = [&](const DataTypePtr & type) { DataTypePtr nested; - if (const auto * array_type = checkAndGetDataType(type.get())) - nested = array_type->getNestedType(); - else if (const auto * map_type = checkAndGetDataType(type.get())) - nested = std::make_shared(map_type->getKeyValueTypes()); + if (const auto * type_as_array = checkAndGetDataType(type.get())) + nested = type_as_array->getNestedType(); + else if (const auto * type_as_map = checkAndGetDataType(type.get())) + nested = std::make_shared(type_as_map->getKeyValueTypes()); else throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, @@ -201,8 +202,9 @@ public: auto key_type = get_nested_type(arguments[0]); auto value_type = get_nested_type(arguments[1]); - /// Remove Nullable from key_type if needed for map key must not be Nullable + /// We accept Array(Nullable(T)) or Array(LowCardinality(Nullable(T))) as key types as long as the actual array doesn't contain NULL value(this is checked in executeImpl). key_type = removeNullableOrLowCardinalityNullable(key_type); + DataTypes key_value_types{key_type, value_type}; return std::make_shared(key_value_types); } @@ -210,7 +212,7 @@ public: ColumnPtr executeImpl( const ColumnsWithTypeAndName & arguments, const DataTypePtr & /* result_type */, size_t /* input_rows_count */) const override { - auto get_array_column = [this](const ColumnPtr & column) -> std::pair + auto get_array_column = [&](const ColumnPtr & column) -> std::pair { bool is_const = isColumnConst(*column); ColumnPtr holder = is_const ? column->convertToFullColumnIfConst() : column; @@ -231,8 +233,9 @@ public: }; auto [col_keys, key_holder] = get_array_column(arguments[0].column); + auto [col_values, values_holder] = get_array_column(arguments[1].column); - /// Check if nested column of first argument contains NULL value in case its nested type is Nullable(T) type. + /// Nullable(T) or LowCardinality(Nullable(T)) are okay as nested key types but actual NULL values are not okay. ColumnPtr data_keys = col_keys->getDataPtr(); if (isColumnNullableOrLowCardinalityNullable(*data_keys)) { @@ -253,10 +256,9 @@ public: if (null_map && !memoryIsZero(null_map->data(), 0, null_map->size())) throw Exception( - ErrorCodes::ILLEGAL_COLUMN, "The nested column of first argument in function {} must not contain NULLs", getName()); + ErrorCodes::BAD_ARGUMENTS, "The nested column of first argument in function {} must not contain NULLs", getName()); } - auto [col_values, values_holder] = get_array_column(arguments[1].column); if (!col_keys->hasEqualOffsets(*col_values)) throw Exception(ErrorCodes::SIZES_OF_ARRAYS_DONT_MATCH, "Two arguments of function {} must have equal sizes", getName()); diff --git a/tests/queries/0_stateless/01651_map_functions.reference b/tests/queries/0_stateless/01651_map_functions.reference index 9114aa419b1..e336a02318d 100644 --- a/tests/queries/0_stateless/01651_map_functions.reference +++ b/tests/queries/0_stateless/01651_map_functions.reference @@ -55,3 +55,7 @@ {1:3,2:4} {1:3,2:4} {1:3,2:4} {(1,3):'a',(2,4):'b'} +{(1,'a'):'c',(2,'b'):'d'} +{(1,'a'):'c',(2,'b'):'d'} +{(1,'a'):'c',(2,'b'):'d'} +{(1,'a'):'c',(2,'b'):'d'} diff --git a/tests/queries/0_stateless/01651_map_functions.sql b/tests/queries/0_stateless/01651_map_functions.sql index 4604ddd6db1..dc93a38b265 100644 --- a/tests/queries/0_stateless/01651_map_functions.sql +++ b/tests/queries/0_stateless/01651_map_functions.sql @@ -68,7 +68,7 @@ select mapFromArrays([[1,2], [3,4]], [4, 5, 6]); -- { serverError SIZES_OF_ARRAY select mapFromArrays(['a', 2], [4, 5]); -- { serverError NO_COMMON_TYPE} select mapFromArrays([1, 2], [4, 'a']); -- { serverError NO_COMMON_TYPE} select mapFromArrays(['aa', 'bb'], map('a', 4)); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } -select mapFromArrays([1,null]::Array(Nullable(UInt8)), [3,4]); -- { serverError ILLEGAL_COLUMN } +select mapFromArrays([1,null]::Array(Nullable(UInt8)), [3,4]); -- { serverError BAD_ARGUMENTS } select mapFromArrays(['aa', 'bb'], map('a', 4, 'b', 5)); select mapFromArrays(['aa', 'bb'], materialize(map('a', 4, 'b', 5))) from numbers(2); @@ -79,3 +79,8 @@ select mapFromArrays([toLowCardinality(1), toLowCardinality(2)], materialize([4, select mapFromArrays([1,2], [3,4]); select mapFromArrays([1,2]::Array(Nullable(UInt8)), [3,4]); select mapFromArrays([1,2], [3,4]) as x, mapFromArrays(x, ['a', 'b']); + +select mapFromArrays(map(1, 'a', 2, 'b'), array('c', 'd')); +select mapFromArrays(materialize(map(1, 'a', 2, 'b')), array('c', 'd')); +select mapFromArrays(map(1, 'a', 2, 'b'), materialize(array('c', 'd'))); +select mapFromArrays(materialize(map(1, 'a', 2, 'b')), materialize(array('c', 'd'))); From d83c0c1b3b189a78833afec5f87e7004b0f934e3 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 1 Aug 2024 07:36:53 +0000 Subject: [PATCH 1336/2361] prevent normalization of WITH RECURSIVE alias --- src/Interpreters/AddDefaultDatabaseVisitor.h | 8 ++++++++ .../0_stateless/03215_view_with_recursive.reference | 1 + .../0_stateless/03215_view_with_recursive.sql | 13 +++++++++++++ 3 files changed, 22 insertions(+) create mode 100644 tests/queries/0_stateless/03215_view_with_recursive.reference create mode 100644 tests/queries/0_stateless/03215_view_with_recursive.sql diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index 356bffa75e9..ced94963d5b 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -100,6 +101,7 @@ private: const String database_name; std::set external_tables; + mutable String with_alias; bool only_replace_current_database_function = false; bool only_replace_in_join = false; @@ -117,6 +119,9 @@ private: void visit(ASTSelectQuery & select, ASTPtr &) const { + if (auto with = select.with()) + with_alias = with->children[0]->as()->name; + if (select.tables()) tryVisit(select.refTables()); @@ -165,6 +170,9 @@ private: /// There is temporary table with such name, should not be rewritten. if (external_tables.contains(identifier.shortName())) return; + /// This is WITH RECURSIVE alias. + if (identifier.name() == with_alias) + return; auto qualified_identifier = std::make_shared(database_name, identifier.name()); if (!identifier.alias.empty()) diff --git a/tests/queries/0_stateless/03215_view_with_recursive.reference b/tests/queries/0_stateless/03215_view_with_recursive.reference new file mode 100644 index 00000000000..c3ac783e702 --- /dev/null +++ b/tests/queries/0_stateless/03215_view_with_recursive.reference @@ -0,0 +1 @@ +5050 diff --git a/tests/queries/0_stateless/03215_view_with_recursive.sql b/tests/queries/0_stateless/03215_view_with_recursive.sql new file mode 100644 index 00000000000..cac47124d51 --- /dev/null +++ b/tests/queries/0_stateless/03215_view_with_recursive.sql @@ -0,0 +1,13 @@ +CREATE VIEW 03215_test_v +AS WITH RECURSIVE test_table AS + ( + SELECT 1 AS number + UNION ALL + SELECT number + 1 + FROM test_table + WHERE number < 100 + ) +SELECT sum(number) +FROM test_table; + +SELECT * FROM 03215_test_v; From 681441e170202bc3963fb3fa1d7b7785192dbd2e Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Thu, 1 Aug 2024 16:01:39 +0800 Subject: [PATCH 1337/2361] fix style --- src/Functions/map.cpp | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index 738c61164a3..6e389f39dec 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -192,7 +192,7 @@ public: else throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Argument types of function {} must be Array or Map, but {} is given", + "Arguments of function {} must be Array or Map, but {} is given", getName(), type->getName()); @@ -275,10 +275,7 @@ public: static constexpr auto name = "mapUpdate"; static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override - { - return name; - } + String getName() const override { return name; } size_t getNumberOfArguments() const override { return 2; } @@ -287,9 +284,11 @@ public: DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { if (arguments.size() != 2) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Number of arguments for function {} doesn't match: passed {}, should be 2", - getName(), arguments.size()); + getName(), + arguments.size()); const auto * left = checkAndGetDataType(arguments[0].type.get()); const auto * right = checkAndGetDataType(arguments[1].type.get()); @@ -405,7 +404,6 @@ public: return ColumnMap::create(nested_column); } }; - } REGISTER_FUNCTION(Map) From 7db4065898633ace1f909711d4caeda8d135cace Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 1 Aug 2024 10:30:50 +0200 Subject: [PATCH 1338/2361] Add retries to create --- src/Storages/StorageKeeperMap.cpp | 373 +++++++++++------- src/Storages/StorageKeeperMap.h | 6 +- .../configs/keeper_retries.xml | 14 + tests/integration/test_keeper_map/test.py | 3 +- .../configs/fault_injection.xml | 1 + .../configs/keeper_retries.xml | 14 + .../test_keeper_map_retries/test.py | 13 +- 7 files changed, 275 insertions(+), 149 deletions(-) create mode 100644 tests/integration/test_keeper_map/configs/keeper_retries.xml create mode 100644 tests/integration/test_keeper_map_retries/configs/keeper_retries.xml diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 0634c7be6ee..a6be9f8da04 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -408,104 +408,192 @@ StorageKeeperMap::StorageKeeperMap( if (attach) { - checkTable(); + checkTable(context_); return; } - auto client = getClient(); + const auto & settings = context_->getSettingsRef(); + ZooKeeperRetriesControl zk_retry{ + getName(), + getLogger(getName()), + ZooKeeperRetriesInfo{settings.keeper_max_retries, settings.keeper_retry_initial_backoff_ms, settings.keeper_retry_max_backoff_ms}, + context_->getProcessListElement()}; - if (zk_root_path != "/" && !client->exists(zk_root_path)) - { - LOG_TRACE(log, "Creating root path {}", zk_root_path); - client->createAncestors(zk_root_path); - client->createIfNotExists(zk_root_path, ""); - } + zk_retry.retryLoop( + [&] + { + auto client = getClient(); + if (zk_root_path != "/" && !client->exists(zk_root_path)) + { + LOG_TRACE(log, "Creating root path {}", zk_root_path); + client->createAncestors(zk_root_path); + client->createIfNotExists(zk_root_path, ""); + } + }); + + std::shared_ptr metadata_drop_lock; + int32_t drop_lock_version = -1; for (size_t i = 0; i < 1000; ++i) { - std::string stored_metadata_string; - auto exists = client->tryGet(zk_metadata_path, stored_metadata_string); - - if (exists) - { - // this requires same name for columns - // maybe we can do a smarter comparison for columns and primary key expression - if (stored_metadata_string != metadata_string) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Path {} is already used but the stored table definition doesn't match. Stored metadata: {}", - zk_root_path, - stored_metadata_string); - - auto code = client->tryCreate(zk_table_path, "", zkutil::CreateMode::Persistent); - - /// A table on the same Keeper path already exists, we just appended our table id to subscribe as a new replica - /// We still don't know if the table matches the expected metadata so table_is_valid is not changed - /// It will be checked lazily on the first operation - if (code == Coordination::Error::ZOK) - return; - - if (code != Coordination::Error::ZNONODE) - throw zkutil::KeeperException(code, "Failed to create table on path {} because a table with same UUID already exists", zk_root_path); - - /// ZNONODE means we dropped zk_tables_path but didn't finish drop completely - } - - if (client->exists(zk_dropped_path)) - { - LOG_INFO(log, "Removing leftover nodes"); - auto code = client->tryCreate(zk_dropped_lock_path, "", zkutil::CreateMode::Ephemeral); - - if (code == Coordination::Error::ZNONODE) + bool success = false; + zk_retry.retryLoop( + [&] { - LOG_INFO(log, "Someone else removed leftover nodes"); - } - else if (code == Coordination::Error::ZNODEEXISTS) - { - LOG_INFO(log, "Someone else is removing leftover nodes"); - continue; - } - else if (code != Coordination::Error::ZOK) - { - throw Coordination::Exception::fromPath(code, zk_dropped_lock_path); - } - else - { - auto metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(zk_dropped_lock_path, *client); - if (!dropTable(client, metadata_drop_lock)) - continue; - } - } + auto client = getClient(); + std::string stored_metadata_string; + auto exists = client->tryGet(zk_metadata_path, stored_metadata_string); - Coordination::Requests create_requests - { - zkutil::makeCreateRequest(zk_metadata_path, metadata_string, zkutil::CreateMode::Persistent), - zkutil::makeCreateRequest(zk_data_path, metadata_string, zkutil::CreateMode::Persistent), - zkutil::makeCreateRequest(zk_tables_path, "", zkutil::CreateMode::Persistent), - zkutil::makeCreateRequest(zk_table_path, "", zkutil::CreateMode::Persistent), - }; + if (exists) + { + // this requires same name for columns + // maybe we can do a smarter comparison for columns and primary key expression + if (stored_metadata_string != metadata_string) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Path {} is already used but the stored table definition doesn't match. Stored metadata: {}", + zk_root_path, + stored_metadata_string); - Coordination::Responses create_responses; - auto code = client->tryMulti(create_requests, create_responses); - if (code == Coordination::Error::ZNODEEXISTS) - { - LOG_INFO(log, "It looks like a table on path {} was created by another server at the same moment, will retry", zk_root_path); - continue; - } - else if (code != Coordination::Error::ZOK) - { - zkutil::KeeperMultiException::check(code, create_requests, create_responses); - } + auto code = client->tryCreate(zk_table_path, "", zkutil::CreateMode::Persistent); + /// A table on the same Keeper path already exists, we just appended our table id to subscribe as a new replica + /// We still don't know if the table matches the expected metadata so table_is_valid is not changed + /// It will be checked lazily on the first operation + if (code == Coordination::Error::ZOK) + { + success = true; + return; + } - table_status = TableStatus::VALID; - /// we are the first table created for the specified Keeper path, i.e. we are the first replica - return; + /// We most likely created the path but got a timeout or disconnect + if (code == Coordination::Error::ZNODEEXISTS && zk_retry.isRetry()) + { + success = true; + return; + } + + if (code != Coordination::Error::ZNONODE) + throw zkutil::KeeperException( + code, "Failed to create table on path {} because a table with same UUID already exists", zk_root_path); + + /// ZNONODE means we dropped zk_tables_path but didn't finish drop completely + } + + if (client->exists(zk_dropped_path)) + { + LOG_INFO(log, "Removing leftover nodes"); + + bool drop_finished = false; + if (zk_retry.isRetry() && metadata_drop_lock != nullptr && drop_lock_version != -1) + { + /// if we have leftover lock from previous try, we need to recreate the ephemeral with our session + Coordination::Requests drop_lock_requests{ + zkutil::makeRemoveRequest(zk_dropped_lock_path, drop_lock_version), + zkutil::makeCreateRequest(zk_dropped_lock_path, "", zkutil::CreateMode::Ephemeral), + }; + + Coordination::Responses drop_lock_responses; + auto lock_code = client->tryMulti(drop_lock_requests, drop_lock_responses); + if (lock_code == Coordination::Error::ZBADVERSION) + { + LOG_INFO(log, "Someone else is removing leftover nodes"); + metadata_drop_lock->setAlreadyRemoved(); + metadata_drop_lock.reset(); + return; + } + + if (drop_lock_responses[0]->error == Coordination::Error::ZNONODE) + { + /// someone else removed metadata nodes or the previous ephemeral node expired + /// we will try creating dropped lock again to make sure + metadata_drop_lock->setAlreadyRemoved(); + metadata_drop_lock.reset(); + } + else if (lock_code == Coordination::Error::ZOK) + { + metadata_drop_lock->setAlreadyRemoved(); + metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(zk_dropped_lock_path, *client); + drop_lock_version = -1; + Coordination::Stat lock_stat; + client->get(zk_dropped_lock_path, &lock_stat); + drop_lock_version = lock_stat.version; + if (!dropTable(client, metadata_drop_lock)) + { + metadata_drop_lock.reset(); + return; + } + drop_finished = true; + } + } + + if (!drop_finished) + { + auto code = client->tryCreate(zk_dropped_lock_path, "", zkutil::CreateMode::Ephemeral); + + if (code == Coordination::Error::ZNONODE) + { + LOG_INFO(log, "Someone else removed leftover nodes"); + } + else if (code == Coordination::Error::ZNODEEXISTS) + { + LOG_INFO(log, "Someone else is removing leftover nodes"); + return; + } + else if (code != Coordination::Error::ZOK) + { + throw Coordination::Exception::fromPath(code, zk_dropped_lock_path); + } + else + { + metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(zk_dropped_lock_path, *client); + drop_lock_version = -1; + Coordination::Stat lock_stat; + client->get(zk_dropped_lock_path, &lock_stat); + drop_lock_version = lock_stat.version; + if (!dropTable(client, metadata_drop_lock)) + { + metadata_drop_lock.reset(); + return; + } + } + } + } + + Coordination::Requests create_requests{ + zkutil::makeCreateRequest(zk_metadata_path, metadata_string, zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(zk_data_path, metadata_string, zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(zk_tables_path, "", zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(zk_table_path, "", zkutil::CreateMode::Persistent), + }; + + Coordination::Responses create_responses; + auto code = client->tryMulti(create_requests, create_responses); + if (code == Coordination::Error::ZNODEEXISTS) + { + LOG_INFO( + log, "It looks like a table on path {} was created by another server at the same moment, will retry", zk_root_path); + return; + } + else if (code != Coordination::Error::ZOK) + { + zkutil::KeeperMultiException::check(code, create_requests, create_responses); + } + + table_status = TableStatus::VALID; + /// we are the first table created for the specified Keeper path, i.e. we are the first replica + success = true; + }); + + if (success) + return; } - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Cannot create metadata for table, because it is removed concurrently or because " - "of wrong zk_root_path ({})", zk_root_path); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Cannot create metadata for table, because it is removed concurrently or because " + "of wrong zk_root_path ({})", + zk_root_path); } @@ -518,7 +606,7 @@ Pipe StorageKeeperMap::read( size_t max_block_size, size_t num_streams) { - checkTable(); + checkTable(context_); storage_snapshot->check(column_names); FieldVectorPtr filtered_keys; @@ -592,13 +680,13 @@ Pipe StorageKeeperMap::read( SinkToStoragePtr StorageKeeperMap::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context, bool /*async_insert*/) { - checkTable(); + checkTable(local_context); return std::make_shared(*this, metadata_snapshot->getSampleBlock(), local_context); } void StorageKeeperMap::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr local_context, TableExclusiveLockHolder &) { - checkTable(); + checkTable(local_context); const auto & settings = local_context->getSettingsRef(); ZooKeeperRetriesControl zk_retry{ getName(), @@ -657,7 +745,7 @@ bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::E void StorageKeeperMap::drop() { - auto current_table_status = getTableStatus(); + auto current_table_status = getTableStatus(getContext()); if (current_table_status == TableStatus::UNKNOWN) { static constexpr auto error_msg = "Failed to activate table because of connection issues. It will be activated " @@ -666,7 +754,7 @@ void StorageKeeperMap::drop() } /// if only column metadata is wrong we can still drop the table correctly - if (current_table_status == TableStatus::INVALID_KEEPER_STRUCTURE) + if (current_table_status == TableStatus::INVALID_METADATA) return; auto client = getClient(); @@ -1029,7 +1117,7 @@ UInt64 StorageKeeperMap::keysLimit() const return keys_limit; } -StorageKeeperMap::TableStatus StorageKeeperMap::getTableStatus() const +StorageKeeperMap::TableStatus StorageKeeperMap::getTableStatus(const ContextPtr & local_context) const { std::lock_guard lock{init_mutex}; if (table_status != TableStatus::UNKNOWN) @@ -1039,57 +1127,70 @@ StorageKeeperMap::TableStatus StorageKeeperMap::getTableStatus() const { try { - auto client = getClient(); + const auto & settings = local_context->getSettingsRef(); + ZooKeeperRetriesControl zk_retry{ + getName(), + getLogger(getName()), + ZooKeeperRetriesInfo{ + settings.keeper_max_retries, + settings.keeper_retry_initial_backoff_ms, + settings.keeper_retry_max_backoff_ms}, + local_context->getProcessListElement()}; - Coordination::Stat metadata_stat; - auto stored_metadata_string = client->get(zk_metadata_path, &metadata_stat); - - if (metadata_stat.numChildren == 0) + zk_retry.retryLoop([&] { + auto client = getClient(); + + Coordination::Stat metadata_stat; + auto stored_metadata_string = client->get(zk_metadata_path, &metadata_stat); + + if (metadata_stat.numChildren == 0) + { + table_status = TableStatus::INVALID_KEEPER_STRUCTURE; + return; + } + + if (metadata_string != stored_metadata_string) + { + LOG_ERROR( + log, + "Table definition does not match to the one stored in the path {}. Stored definition: {}", + zk_root_path, + stored_metadata_string); + table_status = TableStatus::INVALID_METADATA; + return; + } + + // validate all metadata and data nodes are present + Coordination::Requests requests; + requests.push_back(zkutil::makeCheckRequest(zk_table_path, -1)); + requests.push_back(zkutil::makeCheckRequest(zk_data_path, -1)); + requests.push_back(zkutil::makeCheckRequest(zk_dropped_path, -1)); + + Coordination::Responses responses; + client->tryMulti(requests, responses); + table_status = TableStatus::INVALID_KEEPER_STRUCTURE; - return; - } + if (responses[0]->error != Coordination::Error::ZOK) + { + LOG_ERROR(log, "Table node ({}) is missing", zk_table_path); + return; + } - if (metadata_string != stored_metadata_string) - { - LOG_ERROR( - log, - "Table definition does not match to the one stored in the path {}. Stored definition: {}", - zk_root_path, - stored_metadata_string); - table_status = TableStatus::INVALID_METADATA; - return; - } + if (responses[1]->error != Coordination::Error::ZOK) + { + LOG_ERROR(log, "Data node ({}) is missing", zk_data_path); + return; + } - // validate all metadata and data nodes are present - Coordination::Requests requests; - requests.push_back(zkutil::makeCheckRequest(zk_table_path, -1)); - requests.push_back(zkutil::makeCheckRequest(zk_data_path, -1)); - requests.push_back(zkutil::makeCheckRequest(zk_dropped_path, -1)); + if (responses[2]->error == Coordination::Error::ZOK) + { + LOG_ERROR(log, "Tables with root node {} are being dropped", zk_root_path); + return; + } - Coordination::Responses responses; - client->tryMulti(requests, responses); - - table_status = TableStatus::INVALID_KEEPER_STRUCTURE; - if (responses[0]->error != Coordination::Error::ZOK) - { - LOG_ERROR(log, "Table node ({}) is missing", zk_table_path); - return; - } - - if (responses[1]->error != Coordination::Error::ZOK) - { - LOG_ERROR(log, "Data node ({}) is missing", zk_data_path); - return; - } - - if (responses[2]->error == Coordination::Error::ZOK) - { - LOG_ERROR(log, "Tables with root node {} are being dropped", zk_root_path); - return; - } - - table_status = TableStatus::VALID; + table_status = TableStatus::VALID; + }); } catch (const Coordination::Exception & e) { @@ -1227,7 +1328,7 @@ void StorageKeeperMap::checkMutationIsPossible(const MutationCommands & commands void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr local_context) { - checkTable(); + checkTable(local_context); if (commands.empty()) return; diff --git a/src/Storages/StorageKeeperMap.h b/src/Storages/StorageKeeperMap.h index 8ed348a4f6f..1464eeaabad 100644 --- a/src/Storages/StorageKeeperMap.h +++ b/src/Storages/StorageKeeperMap.h @@ -78,9 +78,9 @@ public: UInt64 keysLimit() const; template - void checkTable() const + void checkTable(const ContextPtr & local_context) const { - auto current_table_status = getTableStatus(); + auto current_table_status = getTableStatus(local_context); if (table_status == TableStatus::UNKNOWN) { static constexpr auto error_msg = "Failed to activate table because of connection issues. It will be activated " @@ -119,7 +119,7 @@ private: VALID }; - TableStatus getTableStatus() const; + TableStatus getTableStatus(const ContextPtr & context) const; void restoreDataImpl( const BackupPtr & backup, diff --git a/tests/integration/test_keeper_map/configs/keeper_retries.xml b/tests/integration/test_keeper_map/configs/keeper_retries.xml new file mode 100644 index 00000000000..43e5b9a09e8 --- /dev/null +++ b/tests/integration/test_keeper_map/configs/keeper_retries.xml @@ -0,0 +1,14 @@ + + + + 0 + 0 + + + + + + default + + + diff --git a/tests/integration/test_keeper_map/test.py b/tests/integration/test_keeper_map/test.py index 4b1bcd11cfe..861a7c47687 100644 --- a/tests/integration/test_keeper_map/test.py +++ b/tests/integration/test_keeper_map/test.py @@ -10,6 +10,7 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", main_configs=["configs/enable_keeper_map.xml"], + user_configs=["configs/keeper_retries.xml"], with_zookeeper=True, stay_alive=True, ) @@ -49,7 +50,6 @@ def assert_keeper_exception_after_partition(query): error = node.query_and_get_error_with_retry( query, sleep_time=1, - settings={"insert_keeper_max_retries": 1, "keeper_max_retries": 1}, ) assert "Coordination::Exception" in error except: @@ -91,7 +91,6 @@ def test_keeper_map_without_zk(started_cluster): error = node.query_and_get_error_with_retry( "SELECT * FROM test_keeper_map_without_zk", sleep_time=1, - settings={"keeper_max_retries": 1}, ) assert "Failed to activate table because of connection issues" in error except: diff --git a/tests/integration/test_keeper_map_retries/configs/fault_injection.xml b/tests/integration/test_keeper_map_retries/configs/fault_injection.xml index 0933b6b3031..8406b7db785 100644 --- a/tests/integration/test_keeper_map_retries/configs/fault_injection.xml +++ b/tests/integration/test_keeper_map_retries/configs/fault_injection.xml @@ -1,5 +1,6 @@ + 1 0.005 0.005 diff --git a/tests/integration/test_keeper_map_retries/configs/keeper_retries.xml b/tests/integration/test_keeper_map_retries/configs/keeper_retries.xml new file mode 100644 index 00000000000..208dd6e47fa --- /dev/null +++ b/tests/integration/test_keeper_map_retries/configs/keeper_retries.xml @@ -0,0 +1,14 @@ + + + + 20 + 10000 + + + + + + default + + + diff --git a/tests/integration/test_keeper_map_retries/test.py b/tests/integration/test_keeper_map_retries/test.py index 352119147cd..c6760e5d1a2 100644 --- a/tests/integration/test_keeper_map_retries/test.py +++ b/tests/integration/test_keeper_map_retries/test.py @@ -11,6 +11,7 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", main_configs=["configs/enable_keeper_map.xml"], + user_configs=["configs/keeper_retries.xml"], with_zookeeper=True, stay_alive=True, ) @@ -42,10 +43,6 @@ def repeat_query(query, repeat): for _ in range(repeat): node.query( query, - settings={ - "keeper_max_retries": 20, - "keeper_retry_max_backoff_ms": 10000, - }, ) @@ -53,10 +50,6 @@ def test_queries(started_cluster): start_clean_clickhouse() node.query("DROP TABLE IF EXISTS keeper_map_retries SYNC") - node.query( - "CREATE TABLE keeper_map_retries (a UInt64, b UInt64) Engine=KeeperMap('/keeper_map_retries') PRIMARY KEY a" - ) - node.stop_clickhouse() node.copy_file_to_container( os.path.join(CONFIG_DIR, "fault_injection.xml"), @@ -66,6 +59,10 @@ def test_queries(started_cluster): repeat_count = 10 + node.query( + "CREATE TABLE keeper_map_retries (a UInt64, b UInt64) Engine=KeeperMap('/keeper_map_retries') PRIMARY KEY a", + ) + repeat_query( "INSERT INTO keeper_map_retries SELECT number, number FROM numbers(500)", repeat_count, From a78b1ddd976f98b1952e7ee350b1fcd1935ede19 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 1 Aug 2024 08:44:49 +0000 Subject: [PATCH 1339/2361] fix --- src/Interpreters/AddDefaultDatabaseVisitor.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index ced94963d5b..efe39702fea 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -120,7 +120,8 @@ private: void visit(ASTSelectQuery & select, ASTPtr &) const { if (auto with = select.with()) - with_alias = with->children[0]->as()->name; + if (auto with_element = with->children[0]->as()) + with_alias = with_element->name; if (select.tables()) tryVisit(select.refTables()); @@ -171,7 +172,7 @@ private: if (external_tables.contains(identifier.shortName())) return; /// This is WITH RECURSIVE alias. - if (identifier.name() == with_alias) + if (!with_alias.empty() && identifier.name() == with_alias) return; auto qualified_identifier = std::make_shared(database_name, identifier.name()); From 09b4d4ff6509eaa7b42c8f0f174e879731ddce0a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 1 Aug 2024 08:56:37 +0000 Subject: [PATCH 1340/2361] fix --- src/Interpreters/AddDefaultDatabaseVisitor.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index efe39702fea..5e46a653efa 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -119,9 +119,8 @@ private: void visit(ASTSelectQuery & select, ASTPtr &) const { - if (auto with = select.with()) - if (auto with_element = with->children[0]->as()) - with_alias = with_element->name; + if (select.recursive_with) + with_alias = select.with()->children[0]->as()->name; if (select.tables()) tryVisit(select.refTables()); From bababe2dd2fe490e877f9c4ac03d979bd739376a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 29 Jul 2024 15:07:07 +0000 Subject: [PATCH 1341/2361] Fix remainders I --- src/Functions/FunctionsMultiStringSearch.h | 8 +++--- src/Functions/MultiMatchAnyImpl.h | 27 ++++++++++--------- src/Functions/MultiSearchFirstIndexImpl.h | 16 +++++------ src/Functions/MultiSearchFirstPositionImpl.h | 18 ++++++------- src/Functions/MultiSearchImpl.h | 18 ++++++------- .../0_stateless/00927_disable_hyperscan.sql | 2 +- 6 files changed, 45 insertions(+), 44 deletions(-) diff --git a/src/Functions/FunctionsMultiStringSearch.h b/src/Functions/FunctionsMultiStringSearch.h index 03db2651fd0..6bcc8581a38 100644 --- a/src/Functions/FunctionsMultiStringSearch.h +++ b/src/Functions/FunctionsMultiStringSearch.h @@ -81,7 +81,7 @@ public: return Impl::getReturnType(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr & haystack_ptr = arguments[0].column; const ColumnPtr & needles_ptr = arguments[1].column; @@ -110,13 +110,15 @@ public: col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), col_needles_const->getValue(), vec_res, offsets_res, - allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps); + allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps, + input_rows_count); else Impl::vectorVector( col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), col_needles_vector->getData(), col_needles_vector->getOffsets(), vec_res, offsets_res, - allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps); + allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps, + input_rows_count); // the combination of const haystack + const needle is not implemented because // useDefaultImplementationForConstants() == true makes upper layers convert both to diff --git a/src/Functions/MultiMatchAnyImpl.h b/src/Functions/MultiMatchAnyImpl.h index 20b2150048b..06d9eded9c9 100644 --- a/src/Functions/MultiMatchAnyImpl.h +++ b/src/Functions/MultiMatchAnyImpl.h @@ -66,9 +66,10 @@ struct MultiMatchAnyImpl bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length, - bool reject_expensive_hyperscan_regexps) + bool reject_expensive_hyperscan_regexps, + size_t input_rows_count) { - vectorConstant(haystack_data, haystack_offsets, needles_arr, res, offsets, std::nullopt, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps); + vectorConstant(haystack_data, haystack_offsets, needles_arr, res, offsets, std::nullopt, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps, input_rows_count); } static void vectorConstant( @@ -81,7 +82,8 @@ struct MultiMatchAnyImpl bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length, - bool reject_expensive_hyperscan_regexps) + bool reject_expensive_hyperscan_regexps, + size_t input_rows_count) { if (!allow_hyperscan) throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED, "Hyperscan functions are disabled, because setting 'allow_hyperscan' is set to 0"); @@ -101,7 +103,7 @@ struct MultiMatchAnyImpl throw Exception(ErrorCodes::HYPERSCAN_CANNOT_SCAN_TEXT, "Regular expression evaluation in vectorscan will be too slow. To ignore this error, disable setting 'reject_expensive_hyperscan_regexps'."); } - res.resize(haystack_offsets.size()); + res.resize(input_rows_count); if (needles_arr.empty()) { @@ -133,9 +135,8 @@ struct MultiMatchAnyImpl /// Once we hit the callback, there is no need to search for others. return 1; }; - const size_t haystack_offsets_size = haystack_offsets.size(); UInt64 offset = 0; - for (size_t i = 0; i < haystack_offsets_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { UInt64 length = haystack_offsets[i] - offset - 1; /// vectorscan restriction. @@ -186,9 +187,10 @@ struct MultiMatchAnyImpl bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length, - bool reject_expensive_hyperscan_regexps) + bool reject_expensive_hyperscan_regexps, + size_t input_rows_count) { - vectorVector(haystack_data, haystack_offsets, needles_data, needles_offsets, res, offsets, std::nullopt, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps); + vectorVector(haystack_data, haystack_offsets, needles_data, needles_offsets, res, offsets, std::nullopt, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps, input_rows_count); } static void vectorVector( @@ -202,12 +204,13 @@ struct MultiMatchAnyImpl bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length, - bool reject_expensive_hyperscan_regexps) + bool reject_expensive_hyperscan_regexps, + size_t input_rows_count) { if (!allow_hyperscan) throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED, "Hyperscan functions are disabled, because setting 'allow_hyperscan' is set to 0"); - res.resize(haystack_offsets.size()); + res.resize(input_rows_count); #if USE_VECTORSCAN size_t prev_haystack_offset = 0; size_t prev_needles_offset = 0; @@ -216,7 +219,7 @@ struct MultiMatchAnyImpl std::vector needles; - for (size_t i = 0; i < haystack_offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { needles.reserve(needles_offsets[i] - prev_needles_offset); @@ -306,7 +309,7 @@ struct MultiMatchAnyImpl std::vector needles; - for (size_t i = 0; i < haystack_offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const auto * const cur_haystack_data = &haystack_data[prev_haystack_offset]; const size_t cur_haystack_length = haystack_offsets[i] - prev_haystack_offset - 1; diff --git a/src/Functions/MultiSearchFirstIndexImpl.h b/src/Functions/MultiSearchFirstIndexImpl.h index 36a5fd514d9..5b34dbfe601 100644 --- a/src/Functions/MultiSearchFirstIndexImpl.h +++ b/src/Functions/MultiSearchFirstIndexImpl.h @@ -33,7 +33,8 @@ struct MultiSearchFirstIndexImpl bool /*allow_hyperscan*/, size_t /*max_hyperscan_regexp_length*/, size_t /*max_hyperscan_regexp_total_length*/, - bool /*reject_expensive_hyperscan_regexps*/) + bool /*reject_expensive_hyperscan_regexps*/, + size_t input_rows_count) { // For performance of Volnitsky search, it is crucial to save only one byte for pattern number. if (needles_arr.size() > std::numeric_limits::max()) @@ -48,14 +49,13 @@ struct MultiSearchFirstIndexImpl auto searcher = Impl::createMultiSearcherInBigHaystack(needles); - const size_t haystack_size = haystack_offsets.size(); - res.resize(haystack_size); + res.resize(input_rows_count); size_t iteration = 0; while (searcher.hasMoreToSearch()) { size_t prev_haystack_offset = 0; - for (size_t j = 0; j < haystack_size; ++j) + for (size_t j = 0; j < input_rows_count; ++j) { const auto * haystack = &haystack_data[prev_haystack_offset]; const auto * haystack_end = haystack + haystack_offsets[j] - prev_haystack_offset - 1; @@ -80,10 +80,10 @@ struct MultiSearchFirstIndexImpl bool /*allow_hyperscan*/, size_t /*max_hyperscan_regexp_length*/, size_t /*max_hyperscan_regexp_total_length*/, - bool /*reject_expensive_hyperscan_regexps*/) + bool /*reject_expensive_hyperscan_regexps*/, + size_t input_rows_count) { - const size_t haystack_size = haystack_offsets.size(); - res.resize(haystack_size); + res.resize(input_rows_count); size_t prev_haystack_offset = 0; size_t prev_needles_offset = 0; @@ -92,7 +92,7 @@ struct MultiSearchFirstIndexImpl std::vector needles; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { needles.reserve(needles_offsets[i] - prev_needles_offset); diff --git a/src/Functions/MultiSearchFirstPositionImpl.h b/src/Functions/MultiSearchFirstPositionImpl.h index ccdd82a0ee5..06bf7aa94d8 100644 --- a/src/Functions/MultiSearchFirstPositionImpl.h +++ b/src/Functions/MultiSearchFirstPositionImpl.h @@ -33,7 +33,8 @@ struct MultiSearchFirstPositionImpl bool /*allow_hyperscan*/, size_t /*max_hyperscan_regexp_length*/, size_t /*max_hyperscan_regexp_total_length*/, - bool /*reject_expensive_hyperscan_regexps*/) + bool /*reject_expensive_hyperscan_regexps*/, + size_t input_rows_count) { // For performance of Volnitsky search, it is crucial to save only one byte for pattern number. if (needles_arr.size() > std::numeric_limits::max()) @@ -52,14 +53,13 @@ struct MultiSearchFirstPositionImpl }; auto searcher = Impl::createMultiSearcherInBigHaystack(needles); - const size_t haystack_size = haystack_offsets.size(); - res.resize(haystack_size); + res.resize(input_rows_count); size_t iteration = 0; while (searcher.hasMoreToSearch()) { size_t prev_haystack_offset = 0; - for (size_t j = 0; j < haystack_size; ++j) + for (size_t j = 0; j < input_rows_count; ++j) { const auto * haystack = &haystack_data[prev_haystack_offset]; const auto * haystack_end = haystack + haystack_offsets[j] - prev_haystack_offset - 1; @@ -89,10 +89,10 @@ struct MultiSearchFirstPositionImpl bool /*allow_hyperscan*/, size_t /*max_hyperscan_regexp_length*/, size_t /*max_hyperscan_regexp_total_length*/, - bool /*reject_expensive_hyperscan_regexps*/) + bool /*reject_expensive_hyperscan_regexps*/, + size_t input_rows_count) { - const size_t haystack_size = haystack_offsets.size(); - res.resize(haystack_size); + res.resize(input_rows_count); size_t prev_haystack_offset = 0; size_t prev_needles_offset = 0; @@ -106,14 +106,12 @@ struct MultiSearchFirstPositionImpl return 1 + Impl::countChars(reinterpret_cast(start), reinterpret_cast(end)); }; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { needles.reserve(needles_offsets[i] - prev_needles_offset); for (size_t j = prev_needles_offset; j < needles_offsets[i]; ++j) - { needles.emplace_back(needles_data_string.getDataAt(j).toView()); - } auto searcher = Impl::createMultiSearcherInBigHaystack(needles); // sub-optimal diff --git a/src/Functions/MultiSearchImpl.h b/src/Functions/MultiSearchImpl.h index 467cc96a95f..909425f5a93 100644 --- a/src/Functions/MultiSearchImpl.h +++ b/src/Functions/MultiSearchImpl.h @@ -33,7 +33,8 @@ struct MultiSearchImpl bool /*allow_hyperscan*/, size_t /*max_hyperscan_regexp_length*/, size_t /*max_hyperscan_regexp_total_length*/, - bool /*reject_expensive_hyperscan_regexps*/) + bool /*reject_expensive_hyperscan_regexps*/, + size_t input_rows_count) { // For performance of Volnitsky search, it is crucial to save only one byte for pattern number. if (needles_arr.size() > std::numeric_limits::max()) @@ -48,14 +49,13 @@ struct MultiSearchImpl auto searcher = Impl::createMultiSearcherInBigHaystack(needles); - const size_t haystack_size = haystack_offsets.size(); - res.resize(haystack_size); + res.resize(input_rows_count); size_t iteration = 0; while (searcher.hasMoreToSearch()) { size_t prev_haystack_offset = 0; - for (size_t j = 0; j < haystack_size; ++j) + for (size_t j = 0; j < input_rows_count; ++j) { const auto * haystack = &haystack_data[prev_haystack_offset]; const auto * haystack_end = haystack + haystack_offsets[j] - prev_haystack_offset - 1; @@ -79,10 +79,10 @@ struct MultiSearchImpl bool /*allow_hyperscan*/, size_t /*max_hyperscan_regexp_length*/, size_t /*max_hyperscan_regexp_total_length*/, - bool /*reject_expensive_hyperscan_regexps*/) + bool /*reject_expensive_hyperscan_regexps*/, + size_t input_rows_count) { - const size_t haystack_size = haystack_offsets.size(); - res.resize(haystack_size); + res.resize(input_rows_count); size_t prev_haystack_offset = 0; size_t prev_needles_offset = 0; @@ -91,14 +91,12 @@ struct MultiSearchImpl std::vector needles; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { needles.reserve(needles_offsets[i] - prev_needles_offset); for (size_t j = prev_needles_offset; j < needles_offsets[i]; ++j) - { needles.emplace_back(needles_data_string.getDataAt(j).toView()); - } const auto * const haystack = &haystack_data[prev_haystack_offset]; const size_t haystack_length = haystack_offsets[i] - prev_haystack_offset - 1; diff --git a/tests/queries/0_stateless/00927_disable_hyperscan.sql b/tests/queries/0_stateless/00927_disable_hyperscan.sql index c07848a4fcc..24ec7a35adb 100644 --- a/tests/queries/0_stateless/00927_disable_hyperscan.sql +++ b/tests/queries/0_stateless/00927_disable_hyperscan.sql @@ -1,4 +1,4 @@ --- Tags: no-debug +-- Tags: no-fasttest SET allow_hyperscan = 1; From 0f850952fa304bcc63f7008d4bae54c47ea0564d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 30 Jul 2024 09:52:25 +0000 Subject: [PATCH 1342/2361] Fix remainders II --- .../FunctionsMultiStringFuzzySearch.h | 8 +++--- src/Functions/MultiMatchAllIndicesImpl.h | 27 +++++++++++-------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/src/Functions/FunctionsMultiStringFuzzySearch.h b/src/Functions/FunctionsMultiStringFuzzySearch.h index a92a6570279..8346380c35d 100644 --- a/src/Functions/FunctionsMultiStringFuzzySearch.h +++ b/src/Functions/FunctionsMultiStringFuzzySearch.h @@ -71,7 +71,7 @@ public: return Impl::getReturnType(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr & haystack_ptr = arguments[0].column; const ColumnPtr & edit_distance_ptr = arguments[1].column; @@ -114,14 +114,16 @@ public: col_needles_const->getValue(), vec_res, offsets_res, edit_distance, - allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps); + allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps, + input_rows_count); else Impl::vectorVector( col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), col_needles_vector->getData(), col_needles_vector->getOffsets(), vec_res, offsets_res, edit_distance, - allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps); + allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps, + input_rows_count); // the combination of const haystack + const needle is not implemented because // useDefaultImplementationForConstants() == true makes upper layers convert both to diff --git a/src/Functions/MultiMatchAllIndicesImpl.h b/src/Functions/MultiMatchAllIndicesImpl.h index 3e9c8fba215..3aeac808880 100644 --- a/src/Functions/MultiMatchAllIndicesImpl.h +++ b/src/Functions/MultiMatchAllIndicesImpl.h @@ -52,9 +52,10 @@ struct MultiMatchAllIndicesImpl bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length, - bool reject_expensive_hyperscan_regexps) + bool reject_expensive_hyperscan_regexps, + size_t input_rows_count) { - vectorConstant(haystack_data, haystack_offsets, needles_arr, res, offsets, std::nullopt, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps); + vectorConstant(haystack_data, haystack_offsets, needles_arr, res, offsets, std::nullopt, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps, input_rows_count); } static void vectorConstant( @@ -67,7 +68,8 @@ struct MultiMatchAllIndicesImpl bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length, - bool reject_expensive_hyperscan_regexps) + bool reject_expensive_hyperscan_regexps, + size_t input_rows_count) { if (!allow_hyperscan) throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED, "Hyperscan functions are disabled, because setting 'allow_hyperscan' is set to 0"); @@ -87,7 +89,7 @@ struct MultiMatchAllIndicesImpl throw Exception(ErrorCodes::HYPERSCAN_CANNOT_SCAN_TEXT, "Regular expression evaluation in vectorscan will be too slow. To ignore this error, disable setting 'reject_expensive_hyperscan_regexps'."); } - offsets.resize(haystack_offsets.size()); + offsets.resize(input_rows_count); if (needles_arr.empty()) { @@ -114,9 +116,8 @@ struct MultiMatchAllIndicesImpl static_cast*>(context)->push_back(id); return 0; }; - const size_t haystack_offsets_size = haystack_offsets.size(); UInt64 offset = 0; - for (size_t i = 0; i < haystack_offsets_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { UInt64 length = haystack_offsets[i] - offset - 1; /// vectorscan restriction. @@ -146,6 +147,7 @@ struct MultiMatchAllIndicesImpl (void)max_hyperscan_regexp_length; (void)max_hyperscan_regexp_total_length; (void)reject_expensive_hyperscan_regexps; + (void)input_rows_count; throw Exception(ErrorCodes::NOT_IMPLEMENTED, "multi-search all indices is not implemented when vectorscan is off"); #endif // USE_VECTORSCAN } @@ -160,9 +162,10 @@ struct MultiMatchAllIndicesImpl bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length, - bool reject_expensive_hyperscan_regexps) + bool reject_expensive_hyperscan_regexps, + size_t input_rows_count) { - vectorVector(haystack_data, haystack_offsets, needles_data, needles_offsets, res, offsets, std::nullopt, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps); + vectorVector(haystack_data, haystack_offsets, needles_data, needles_offsets, res, offsets, std::nullopt, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length, reject_expensive_hyperscan_regexps, input_rows_count); } static void vectorVector( @@ -176,12 +179,13 @@ struct MultiMatchAllIndicesImpl bool allow_hyperscan, size_t max_hyperscan_regexp_length, size_t max_hyperscan_regexp_total_length, - bool reject_expensive_hyperscan_regexps) + bool reject_expensive_hyperscan_regexps, + size_t input_rows_count) { if (!allow_hyperscan) throw Exception(ErrorCodes::FUNCTION_NOT_ALLOWED, "Hyperscan functions are disabled, because setting 'allow_hyperscan' is set to 0"); #if USE_VECTORSCAN - offsets.resize(haystack_offsets.size()); + offsets.resize(input_rows_count); size_t prev_haystack_offset = 0; size_t prev_needles_offset = 0; @@ -189,7 +193,7 @@ struct MultiMatchAllIndicesImpl std::vector needles; - for (size_t i = 0; i < haystack_offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { needles.reserve(needles_offsets[i] - prev_needles_offset); @@ -271,6 +275,7 @@ struct MultiMatchAllIndicesImpl (void)max_hyperscan_regexp_length; (void)max_hyperscan_regexp_total_length; (void)reject_expensive_hyperscan_regexps; + (void)input_rows_count; throw Exception(ErrorCodes::NOT_IMPLEMENTED, "multi-search all indices is not implemented when vectorscan is off"); #endif // USE_VECTORSCAN } From 99760ad7e4b1b7d4ee86dc2f5aa4df98ead9e260 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 30 Jul 2024 17:17:40 +0000 Subject: [PATCH 1343/2361] Fix remainders, pt. III --- src/Functions/FunctionsStringSearchToString.h | 4 ++-- src/Functions/FunctionsVisitParam.h | 5 +++-- src/Functions/URL/extractURLParameter.cpp | 7 ++++--- src/Functions/extract.cpp | 7 ++++--- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/Functions/FunctionsStringSearchToString.h b/src/Functions/FunctionsStringSearchToString.h index 978a84de472..c889cf062a3 100644 --- a/src/Functions/FunctionsStringSearchToString.h +++ b/src/Functions/FunctionsStringSearchToString.h @@ -60,7 +60,7 @@ public: return std::make_shared(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnPtr column = arguments[0].column; const ColumnPtr column_needle = arguments[1].column; @@ -75,7 +75,7 @@ public: ColumnString::Chars & vec_res = col_res->getChars(); ColumnString::Offsets & offsets_res = col_res->getOffsets(); - Impl::vector(col->getChars(), col->getOffsets(), col_needle->getValue(), vec_res, offsets_res); + Impl::vector(col->getChars(), col->getOffsets(), col_needle->getValue(), vec_res, offsets_res, input_rows_count); return col_res; } diff --git a/src/Functions/FunctionsVisitParam.h b/src/Functions/FunctionsVisitParam.h index 5e13fbbad5c..bcaaf0a1c20 100644 --- a/src/Functions/FunctionsVisitParam.h +++ b/src/Functions/FunctionsVisitParam.h @@ -168,11 +168,12 @@ struct ExtractParamToStringImpl { static void vector(const ColumnString::Chars & haystack_data, const ColumnString::Offsets & haystack_offsets, std::string needle, - ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) + ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, + size_t input_rows_count) { /// Constant 5 is taken from a function that performs a similar task FunctionsStringSearch.h::ExtractImpl res_data.reserve(haystack_data.size() / 5); - res_offsets.resize(haystack_offsets.size()); + res_offsets.resize(input_rows_count); /// We are looking for a parameter simply as a substring of the form "name" needle = "\"" + needle + "\":"; diff --git a/src/Functions/URL/extractURLParameter.cpp b/src/Functions/URL/extractURLParameter.cpp index f75875e0200..590c2779d9c 100644 --- a/src/Functions/URL/extractURLParameter.cpp +++ b/src/Functions/URL/extractURLParameter.cpp @@ -10,10 +10,11 @@ struct ExtractURLParameterImpl static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, std::string pattern, - ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) + ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets, + size_t input_rows_count) { res_data.reserve(data.size() / 5); - res_offsets.resize(offsets.size()); + res_offsets.resize(input_rows_count); pattern += '='; const char * param_str = pattern.c_str(); @@ -22,7 +23,7 @@ struct ExtractURLParameterImpl ColumnString::Offset prev_offset = 0; ColumnString::Offset res_offset = 0; - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { ColumnString::Offset cur_offset = offsets[i]; diff --git a/src/Functions/extract.cpp b/src/Functions/extract.cpp index 6bbdaff0e3f..c78ee9898b7 100644 --- a/src/Functions/extract.cpp +++ b/src/Functions/extract.cpp @@ -16,10 +16,11 @@ struct ExtractImpl const ColumnString::Offsets & offsets, const std::string & pattern, ColumnString::Chars & res_data, - ColumnString::Offsets & res_offsets) + ColumnString::Offsets & res_offsets, + size_t input_rows_count) { res_data.reserve(data.size() / 5); - res_offsets.resize(offsets.size()); + res_offsets.resize(input_rows_count); const OptimizedRegularExpression regexp = Regexps::createRegexp(pattern); @@ -29,7 +30,7 @@ struct ExtractImpl size_t prev_offset = 0; size_t res_offset = 0; - for (size_t i = 0; i < offsets.size(); ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t cur_offset = offsets[i]; From 0452768983f379d201e1871ffce63452430bd0c6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 31 Jul 2024 08:10:01 +0000 Subject: [PATCH 1344/2361] Fix remainders, pt. IV --- src/Functions/CountSubstringsImpl.h | 17 ++++----- src/Functions/FunctionsStringSearch.h | 17 ++++++--- src/Functions/FunctionsVisitParam.h | 3 +- src/Functions/HasTokenImpl.h | 5 ++- src/Functions/MatchImpl.h | 54 +++++++++++++-------------- src/Functions/MultiMatchAnyImpl.h | 2 +- src/Functions/PositionImpl.h | 22 +++++------ 7 files changed, 62 insertions(+), 58 deletions(-) diff --git a/src/Functions/CountSubstringsImpl.h b/src/Functions/CountSubstringsImpl.h index 9ff3e4e1f2a..8e91bc3aeb4 100644 --- a/src/Functions/CountSubstringsImpl.h +++ b/src/Functions/CountSubstringsImpl.h @@ -37,7 +37,8 @@ struct CountSubstringsImpl const std::string & needle, const ColumnPtr & start_pos, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t /*input_rows_count*/) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); @@ -150,7 +151,8 @@ struct CountSubstringsImpl const ColumnString::Offsets & needle_offsets, const ColumnPtr & start_pos, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); @@ -158,9 +160,7 @@ struct CountSubstringsImpl ColumnString::Offset prev_haystack_offset = 0; ColumnString::Offset prev_needle_offset = 0; - size_t size = haystack_offsets.size(); - - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t needle_size = needle_offsets[i] - prev_needle_offset - 1; size_t haystack_size = haystack_offsets[i] - prev_haystack_offset - 1; @@ -207,7 +207,8 @@ struct CountSubstringsImpl const ColumnString::Offsets & needle_offsets, const ColumnPtr & start_pos, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); @@ -215,9 +216,7 @@ struct CountSubstringsImpl /// NOTE You could use haystack indexing. But this is a rare case. ColumnString::Offset prev_needle_offset = 0; - size_t size = needle_offsets.size(); - - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { res[i] = 0; auto start = start_pos != nullptr ? std::max(start_pos->getUInt(i), UInt64(1)) : UInt64(1); diff --git a/src/Functions/FunctionsStringSearch.h b/src/Functions/FunctionsStringSearch.h index fba6336ebff..7ec0076e395 100644 --- a/src/Functions/FunctionsStringSearch.h +++ b/src/Functions/FunctionsStringSearch.h @@ -163,7 +163,7 @@ public: return return_type; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { const ColumnPtr & column_haystack = (argument_order == ArgumentOrder::HaystackNeedle) ? arguments[0].column : arguments[1].column; const ColumnPtr & column_needle = (argument_order == ArgumentOrder::HaystackNeedle) ? arguments[1].column : arguments[0].column; @@ -236,7 +236,8 @@ public: col_needle_vector->getOffsets(), column_start_pos, vec_res, - null_map.get()); + null_map.get(), + input_rows_count); else if (col_haystack_vector && col_needle_const) Impl::vectorConstant( col_haystack_vector->getChars(), @@ -244,7 +245,8 @@ public: col_needle_const->getValue(), column_start_pos, vec_res, - null_map.get()); + null_map.get(), + input_rows_count); else if (col_haystack_vector_fixed && col_needle_vector) Impl::vectorFixedVector( col_haystack_vector_fixed->getChars(), @@ -253,14 +255,16 @@ public: col_needle_vector->getOffsets(), column_start_pos, vec_res, - null_map.get()); + null_map.get(), + input_rows_count); else if (col_haystack_vector_fixed && col_needle_const) Impl::vectorFixedConstant( col_haystack_vector_fixed->getChars(), col_haystack_vector_fixed->getN(), col_needle_const->getValue(), vec_res, - null_map.get()); + null_map.get(), + input_rows_count); else if (col_haystack_const && col_needle_vector) Impl::constantVector( col_haystack_const->getValue(), @@ -268,7 +272,8 @@ public: col_needle_vector->getOffsets(), column_start_pos, vec_res, - null_map.get()); + null_map.get(), + input_rows_count); else throw Exception( ErrorCodes::ILLEGAL_COLUMN, diff --git a/src/Functions/FunctionsVisitParam.h b/src/Functions/FunctionsVisitParam.h index bcaaf0a1c20..a77fa740f9c 100644 --- a/src/Functions/FunctionsVisitParam.h +++ b/src/Functions/FunctionsVisitParam.h @@ -93,7 +93,8 @@ struct ExtractParamImpl std::string needle, const ColumnPtr & start_pos, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t /*input_rows_count*/) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); diff --git a/src/Functions/HasTokenImpl.h b/src/Functions/HasTokenImpl.h index a4ff49859cc..4943bf708c5 100644 --- a/src/Functions/HasTokenImpl.h +++ b/src/Functions/HasTokenImpl.h @@ -35,12 +35,13 @@ struct HasTokenImpl const std::string & pattern, const ColumnPtr & start_pos, PaddedPODArray & res, - ColumnUInt8 * res_null) + ColumnUInt8 * res_null, + size_t input_rows_count) { if (start_pos != nullptr) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function '{}' does not support start_pos argument", name); - if (haystack_offsets.empty()) + if (input_rows_count == 0) return; const UInt8 * const begin = haystack_data.data(); diff --git a/src/Functions/MatchImpl.h b/src/Functions/MatchImpl.h index 55b2fee5400..ceac753fe79 100644 --- a/src/Functions/MatchImpl.h +++ b/src/Functions/MatchImpl.h @@ -127,17 +127,17 @@ struct MatchImpl const String & needle, [[maybe_unused]] const ColumnPtr & start_pos_, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); - const size_t haystack_size = haystack_offsets.size(); - - assert(haystack_size == res.size()); + assert(res.size() == haystack_offsets.size()); + assert(res.size() == input_rows_count); assert(start_pos_ == nullptr); - if (haystack_offsets.empty()) + if (input_rows_count == 0) return; /// Shortcut for the silly but practical case that the pattern matches everything/nothing independently of the haystack: @@ -202,11 +202,11 @@ struct MatchImpl if (required_substring.empty()) { if (!regexp.getRE2()) /// An empty regexp. Always matches. - memset(res.data(), !negate, haystack_size * sizeof(res[0])); + memset(res.data(), !negate, input_rows_count * sizeof(res[0])); else { size_t prev_offset = 0; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const bool match = regexp.getRE2()->Match( {reinterpret_cast(&haystack_data[prev_offset]), haystack_offsets[i] - prev_offset - 1}, @@ -291,16 +291,16 @@ struct MatchImpl size_t N, const String & needle, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); - const size_t haystack_size = haystack.size() / N; + assert(res.size() == haystack.size() / N); + assert(res.size() == input_rows_count); - assert(haystack_size == res.size()); - - if (haystack.empty()) + if (input_rows_count == 0) return; /// Shortcut for the silly but practical case that the pattern matches everything/nothing independently of the haystack: @@ -370,11 +370,11 @@ struct MatchImpl if (required_substring.empty()) { if (!regexp.getRE2()) /// An empty regexp. Always matches. - memset(res.data(), !negate, haystack_size * sizeof(res[0])); + memset(res.data(), !negate, input_rows_count * sizeof(res[0])); else { size_t offset = 0; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const bool match = regexp.getRE2()->Match( {reinterpret_cast(&haystack[offset]), N}, @@ -464,18 +464,18 @@ struct MatchImpl const ColumnString::Offsets & needle_offset, [[maybe_unused]] const ColumnPtr & start_pos_, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); - const size_t haystack_size = haystack_offsets.size(); - - assert(haystack_size == needle_offset.size()); - assert(haystack_size == res.size()); + assert(haystack_offsets.size() == needle_offset.size()); + assert(res.size() == haystack_offsets.size()); + assert(res.size() == input_rows_count); assert(start_pos_ == nullptr); - if (haystack_offsets.empty()) + if (input_rows_count == 0) return; String required_substr; @@ -488,7 +488,7 @@ struct MatchImpl Regexps::LocalCacheTable cache; Regexps::RegexpPtr regexp; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const auto * const cur_haystack_data = &haystack_data[prev_haystack_offset]; const size_t cur_haystack_length = haystack_offsets[i] - prev_haystack_offset - 1; @@ -573,15 +573,15 @@ struct MatchImpl const ColumnString::Offsets & needle_offset, [[maybe_unused]] const ColumnPtr & start_pos_, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); - const size_t haystack_size = haystack.size()/N; - - assert(haystack_size == needle_offset.size()); - assert(haystack_size == res.size()); + assert(res.size() == input_rows_count); + assert(res.size() == haystack.size() / N); + assert(res.size() == needle_offset.size()); assert(start_pos_ == nullptr); if (haystack.empty()) @@ -597,7 +597,7 @@ struct MatchImpl Regexps::LocalCacheTable cache; Regexps::RegexpPtr regexp; - for (size_t i = 0; i < haystack_size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { const auto * const cur_haystack_data = &haystack[prev_haystack_offset]; const size_t cur_haystack_length = N; diff --git a/src/Functions/MultiMatchAnyImpl.h b/src/Functions/MultiMatchAnyImpl.h index 06d9eded9c9..fda752cbacc 100644 --- a/src/Functions/MultiMatchAnyImpl.h +++ b/src/Functions/MultiMatchAnyImpl.h @@ -165,7 +165,7 @@ struct MultiMatchAnyImpl memset(accum.data(), 0, accum.size()); for (size_t j = 0; j < needles.size(); ++j) { - MatchImpl::vectorConstant(haystack_data, haystack_offsets, String(needles[j].data(), needles[j].size()), nullptr, accum, nullptr); + MatchImpl::vectorConstant(haystack_data, haystack_offsets, String(needles[j].data(), needles[j].size()), nullptr, accum, nullptr, input_rows_count); for (size_t i = 0; i < res.size(); ++i) { if constexpr (FindAny) diff --git a/src/Functions/PositionImpl.h b/src/Functions/PositionImpl.h index eeb9d8b6a59..e525b5fab57 100644 --- a/src/Functions/PositionImpl.h +++ b/src/Functions/PositionImpl.h @@ -193,7 +193,8 @@ struct PositionImpl const std::string & needle, const ColumnPtr & start_pos, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); @@ -214,13 +215,12 @@ struct PositionImpl } ColumnString::Offset prev_offset = 0; - size_t rows = haystack_offsets.size(); if (const ColumnConst * start_pos_const = typeid_cast(&*start_pos)) { /// Needle is empty and start_pos is constant UInt64 start = std::max(start_pos_const->getUInt(0), static_cast(1)); - for (size_t i = 0; i < rows; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t haystack_size = Impl::countChars( reinterpret_cast(pos), reinterpret_cast(pos + haystack_offsets[i] - prev_offset - 1)); @@ -234,7 +234,7 @@ struct PositionImpl else { /// Needle is empty and start_pos is not constant - for (size_t i = 0; i < rows; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t haystack_size = Impl::countChars( reinterpret_cast(pos), reinterpret_cast(pos + haystack_offsets[i] - prev_offset - 1)); @@ -359,7 +359,8 @@ struct PositionImpl const ColumnString::Offsets & needle_offsets, const ColumnPtr & start_pos, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); @@ -367,9 +368,7 @@ struct PositionImpl ColumnString::Offset prev_haystack_offset = 0; ColumnString::Offset prev_needle_offset = 0; - size_t size = haystack_offsets.size(); - - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t needle_size = needle_offsets[i] - prev_needle_offset - 1; size_t haystack_size = haystack_offsets[i] - prev_haystack_offset - 1; @@ -423,7 +422,8 @@ struct PositionImpl const ColumnString::Offsets & needle_offsets, const ColumnPtr & start_pos, PaddedPODArray & res, - [[maybe_unused]] ColumnUInt8 * res_null) + [[maybe_unused]] ColumnUInt8 * res_null, + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. assert(!res_null); @@ -431,9 +431,7 @@ struct PositionImpl /// NOTE You could use haystack indexing. But this is a rare case. ColumnString::Offset prev_needle_offset = 0; - size_t size = needle_offsets.size(); - - for (size_t i = 0; i < size; ++i) + for (size_t i = 0; i < input_rows_count; ++i) { size_t needle_size = needle_offsets[i] - prev_needle_offset - 1; From 59e51de6f8df35a0df0e08e8b3aacd3ba40c4cbd Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 31 Jul 2024 14:58:01 +0000 Subject: [PATCH 1345/2361] Fix join on tuple with NULLs --- .../Passes/ComparisonTupleEliminationPass.cpp | 10 +++++++++- ...on_tuple_comparison_elimination_bug.reference | 8 ++++++++ ..._join_on_tuple_comparison_elimination_bug.sql | 16 ++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03214_join_on_tuple_comparison_elimination_bug.reference create mode 100644 tests/queries/0_stateless/03214_join_on_tuple_comparison_elimination_bug.sql diff --git a/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp b/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp index 180470952cd..76dc8ab94b4 100644 --- a/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp +++ b/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include namespace DB @@ -25,8 +26,15 @@ public: using Base = InDepthQueryTreeVisitorWithContext; using Base::Base; - static bool needChildVisit(QueryTreeNodePtr &, QueryTreeNodePtr & child) + static bool needChildVisit(QueryTreeNodePtr & parent, QueryTreeNodePtr & child) { + if (parent->getNodeType() == QueryTreeNodeType::JOIN) + { + /// In JOIN ON section comparison of tuples works a bit differently. + /// For example we can join on tuple(NULL) = tuple(NULL), join algorithms consider only NULLs on the top level. + if (parent->as().getJoinExpression().get() == child.get()) + return false; + } return child->getNodeType() != QueryTreeNodeType::TABLE_FUNCTION; } diff --git a/tests/queries/0_stateless/03214_join_on_tuple_comparison_elimination_bug.reference b/tests/queries/0_stateless/03214_join_on_tuple_comparison_elimination_bug.reference new file mode 100644 index 00000000000..c90a64e4a47 --- /dev/null +++ b/tests/queries/0_stateless/03214_join_on_tuple_comparison_elimination_bug.reference @@ -0,0 +1,8 @@ +1 +\N +1 +\N +1 +\N +1 +\N diff --git a/tests/queries/0_stateless/03214_join_on_tuple_comparison_elimination_bug.sql b/tests/queries/0_stateless/03214_join_on_tuple_comparison_elimination_bug.sql new file mode 100644 index 00000000000..7ef98f88cc7 --- /dev/null +++ b/tests/queries/0_stateless/03214_join_on_tuple_comparison_elimination_bug.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; + +CREATE TABLE a (key Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO a VALUES (NULL), ('1'); + +CREATE TABLE b (key Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO b VALUES (NULL), ('1'); + +SELECT a.key FROM a LEFT SEMI JOIN b ON tuple(a.key) = tuple(b.key) ORDER BY a.key; +SELECT a.key FROM a LEFT SEMI JOIN b ON a.key IS NOT DISTINCT FROM b.key ORDER BY a.key; +SELECT a.key FROM a LEFT SEMI JOIN b ON tuple(a.key) = tuple(b.key) ORDER BY a.key; +SELECT a.key FROM a LEFT ANY JOIN b ON tuple(a.key) = tuple(b.key) ORDER BY a.key; + +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; From ca01c1c5691e4562ae6fc71af7b1867cf39f7ad1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 11:33:22 +0200 Subject: [PATCH 1346/2361] Fix bad merge --- src/Core/SettingsChangesHistory.cpp | 259 ---------------------------- 1 file changed, 259 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index c3b9805700f..8f73e10c44f 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -497,265 +497,6 @@ static std::initializer_list col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, }; From 4768e3878552ae0ce9007c1e4f400943a5712825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 11:37:42 +0200 Subject: [PATCH 1347/2361] Update ref to 24.7.2 --- ...{02995_baseline_24_7_1.tsv => 02995_baseline_24_7_2.tsv} | 2 +- tests/queries/0_stateless/02995_new_settings_history.sh | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) rename tests/queries/0_stateless/{02995_baseline_24_7_1.tsv => 02995_baseline_24_7_2.tsv} (99%) diff --git a/tests/queries/0_stateless/02995_baseline_24_7_1.tsv b/tests/queries/0_stateless/02995_baseline_24_7_2.tsv similarity index 99% rename from tests/queries/0_stateless/02995_baseline_24_7_1.tsv rename to tests/queries/0_stateless/02995_baseline_24_7_2.tsv index 6c830da8646..10b392f3e04 100644 --- a/tests/queries/0_stateless/02995_baseline_24_7_1.tsv +++ b/tests/queries/0_stateless/02995_baseline_24_7_2.tsv @@ -462,7 +462,7 @@ input_format_orc_allow_missing_columns 1 input_format_orc_case_insensitive_column_matching 0 input_format_orc_filter_push_down 1 input_format_orc_import_nested 0 -input_format_orc_read_use_writer_time_zone 0 +input_format_orc_reader_time_zone_name GMT input_format_orc_row_batch_size 100000 input_format_orc_skip_columns_with_unsupported_types_in_schema_inference 0 input_format_orc_use_fast_decoder 1 diff --git a/tests/queries/0_stateless/02995_new_settings_history.sh b/tests/queries/0_stateless/02995_new_settings_history.sh index 917dacc04b0..7fb21f88fae 100755 --- a/tests/queries/0_stateless/02995_new_settings_history.sh +++ b/tests/queries/0_stateless/02995_new_settings_history.sh @@ -7,12 +7,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # Note that this is a broad check. A per version check is done in the upgrade test -# Baseline generated with 24.7.1 -# clickhouse local --query "select name, default from system.settings order by name format TSV" > 02995_baseline_24_7_1.tsv +# Baseline generated with 24.7.2 +# clickhouse local --query "select name, default from system.settings order by name format TSV" > 02995_baseline_24_7_2.tsv $CLICKHOUSE_LOCAL --query " WITH old_settings AS ( - SELECT * FROM file('${CUR_DIR}/02995_baseline_24_7_1.tsv', 'TSV', 'name String, default String') + SELECT * FROM file('${CUR_DIR}/02995_baseline_24_7_2.tsv', 'TSV', 'name String, default String') ), new_settings AS ( From da3a37c561679daaecbcdece74f92ce98380b2b5 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Thu, 1 Aug 2024 09:38:27 +0000 Subject: [PATCH 1348/2361] Improve regex to take into account the xdist name in the instance --- .../test.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_zookeeper_config_load_balancing/test.py b/tests/integration/test_zookeeper_config_load_balancing/test.py index 9cdf7db2b08..cc0a9022674 100644 --- a/tests/integration/test_zookeeper_config_load_balancing/test.py +++ b/tests/integration/test_zookeeper_config_load_balancing/test.py @@ -71,7 +71,7 @@ def test_first_or_random(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -99,7 +99,7 @@ def test_first_or_random(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -127,7 +127,7 @@ def test_first_or_random(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -161,7 +161,7 @@ def test_in_order(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -189,7 +189,7 @@ def test_in_order(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -217,7 +217,7 @@ def test_in_order(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -251,7 +251,7 @@ def test_nearest_hostname(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -279,7 +279,7 @@ def test_nearest_hostname(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo2_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -307,7 +307,7 @@ def test_nearest_hostname(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo3_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo3_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -341,7 +341,7 @@ def test_hostname_levenshtein_distance(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -369,7 +369,7 @@ def test_hostname_levenshtein_distance(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo2_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", @@ -397,7 +397,7 @@ def test_hostname_levenshtein_distance(started_cluster): [ "bash", "-c", - "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo3_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + "lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo3_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l", ], privileged=True, user="root", From c77f6d78d976430faf4353e350d0205bbecf2837 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 1 Aug 2024 12:09:58 +0200 Subject: [PATCH 1349/2361] Update minio --- tests/integration/compose/docker_compose_minio.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/compose/docker_compose_minio.yml b/tests/integration/compose/docker_compose_minio.yml index 4255a529f6d..40098d05b04 100644 --- a/tests/integration/compose/docker_compose_minio.yml +++ b/tests/integration/compose/docker_compose_minio.yml @@ -2,7 +2,7 @@ version: '2.3' services: minio1: - image: minio/minio:RELEASE.2023-09-30T07-02-29Z + image: minio/minio:RELEASE.2024-07-31T05-46-26Z volumes: - data1-1:/data1 - ${MINIO_CERTS_DIR:-}:/certs From 928d5df3f02559fab4fd9bbd258f6e9c6ac4c18a Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Thu, 1 Aug 2024 12:32:43 +0200 Subject: [PATCH 1350/2361] added tests for https://github.com/ClickHouse/ClickHouse/pull/65475 Support writting page index into parquet file --- .../test/integration/runner/requirements.txt | 1 + .../test_parquet_page_index/__init__.py | 0 .../test_parquet_page_index/test.py | 77 +++++++++++++++++++ 3 files changed, 78 insertions(+) create mode 100644 tests/integration/test_parquet_page_index/__init__.py create mode 100644 tests/integration/test_parquet_page_index/test.py diff --git a/docker/test/integration/runner/requirements.txt b/docker/test/integration/runner/requirements.txt index 8a77d8abf77..428986b5562 100644 --- a/docker/test/integration/runner/requirements.txt +++ b/docker/test/integration/runner/requirements.txt @@ -74,6 +74,7 @@ protobuf==4.25.2 psycopg2-binary==2.9.6 py4j==0.10.9.5 py==1.11.0 +pyarrow==17.0.0 pycparser==2.22 pycryptodome==3.20.0 pymongo==3.11.0 diff --git a/tests/integration/test_parquet_page_index/__init__.py b/tests/integration/test_parquet_page_index/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_parquet_page_index/test.py b/tests/integration/test_parquet_page_index/test.py new file mode 100644 index 00000000000..366216ea2c6 --- /dev/null +++ b/tests/integration/test_parquet_page_index/test.py @@ -0,0 +1,77 @@ +import pytest +from helpers.cluster import ClickHouseCluster +import pyarrow.parquet as pq +import os +import time + +cluster = ClickHouseCluster(__file__) +path_to_userfiles = "/var/lib/clickhouse/user_files/" +path_to_external_dirs = "/ClickHouse/tests/integration/test_parquet_page_index/_instances" +node = cluster.add_instance("node", external_dirs=[path_to_userfiles]) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def check_page_index(file_path): + metadata = pq.read_metadata(file_path) + assert metadata, "pyarrow.parquet library can't read parquet file written by Clickhouse" + return metadata.row_group(0).column(0).has_offset_index + + +def delete_if_exists(file_path): + if os.path.exists(file_path): + os.remove(file_path) + + +@pytest.mark.parametrize("query, expected_result", { + ("SElECT number, number+1 FROM system.numbers LIMIT 100 " + "INTO OUTFILE '{file_name}' FORMAT Parquet " + "SETTINGS output_format_parquet_use_custom_encoder = false, " + "output_format_parquet_write_page_index = true;", True), + ("SElECT number, number+1 FROM system.numbers LIMIT 100 " + "INTO OUTFILE '{file_name}' FORMAT Parquet " + "SETTINGS output_format_parquet_use_custom_encoder = false, " + "output_format_parquet_write_page_index = false;", False), + # # default settings: + # # output_format_parquet_use_custom_encoder = true + ("SElECT number, number+1 FROM system.numbers LIMIT 100 " + "INTO OUTFILE '{file_name}' FORMAT Parquet;", False), +}) +def test_parquet_page_index_select_into_outfile(query, expected_result, start_cluster): + file_name = 'export.parquet' + query = query.format(file_name=file_name) + delete_if_exists(file_name) + assert node.query(query) == '' + assert check_page_index(file_name) == expected_result, "Page offset index have wrong value" + delete_if_exists(file_name) + + +@pytest.mark.parametrize("query, expected_result", { + ("INSERT INTO TABLE FUNCTION file('{file_name}') " + "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SETTINGS output_format_parquet_use_custom_encoder=false, " + "output_format_parquet_write_page_index=true FORMAT Parquet", True), + ("INSERT INTO TABLE FUNCTION file('{file_name}') " + "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SETTINGS output_format_parquet_use_custom_encoder=false, " + "output_format_parquet_write_page_index=false FORMAT Parquet", False), + # # default settings: + # # output_format_parquet_use_custom_encoder = true + ("INSERT INTO TABLE FUNCTION file('{file_name}') " + "SElECT number, number+1 FROM system.numbers LIMIT 100 FORMAT Parquet", False), +}) +def test_parquet_page_index_insert_into_table_function_file(query, expected_result, start_cluster): + file_name = 'export.parquet' + query = query.format(file_name=file_name) + file_path = f"{path_to_external_dirs}{path_to_userfiles}{file_name}" + delete_if_exists(file_path) + assert node.query(query) == '' + assert check_page_index(file_path) == expected_result, "Page offset index have wrong value" + delete_if_exists(file_path) From 572ad2f6fe193d69e9c2c1c64125b3ffdff1de0d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 1 Aug 2024 11:11:52 +0000 Subject: [PATCH 1351/2361] Automatic style fix --- .../test_parquet_page_index/test.py | 108 ++++++++++++------ 1 file changed, 71 insertions(+), 37 deletions(-) diff --git a/tests/integration/test_parquet_page_index/test.py b/tests/integration/test_parquet_page_index/test.py index 366216ea2c6..a820d1b5bc5 100644 --- a/tests/integration/test_parquet_page_index/test.py +++ b/tests/integration/test_parquet_page_index/test.py @@ -6,7 +6,9 @@ import time cluster = ClickHouseCluster(__file__) path_to_userfiles = "/var/lib/clickhouse/user_files/" -path_to_external_dirs = "/ClickHouse/tests/integration/test_parquet_page_index/_instances" +path_to_external_dirs = ( + "/ClickHouse/tests/integration/test_parquet_page_index/_instances" +) node = cluster.add_instance("node", external_dirs=[path_to_userfiles]) @@ -21,7 +23,9 @@ def start_cluster(): def check_page_index(file_path): metadata = pq.read_metadata(file_path) - assert metadata, "pyarrow.parquet library can't read parquet file written by Clickhouse" + assert ( + metadata + ), "pyarrow.parquet library can't read parquet file written by Clickhouse" return metadata.row_group(0).column(0).has_offset_index @@ -30,48 +34,78 @@ def delete_if_exists(file_path): os.remove(file_path) -@pytest.mark.parametrize("query, expected_result", { - ("SElECT number, number+1 FROM system.numbers LIMIT 100 " - "INTO OUTFILE '{file_name}' FORMAT Parquet " - "SETTINGS output_format_parquet_use_custom_encoder = false, " - "output_format_parquet_write_page_index = true;", True), - ("SElECT number, number+1 FROM system.numbers LIMIT 100 " - "INTO OUTFILE '{file_name}' FORMAT Parquet " - "SETTINGS output_format_parquet_use_custom_encoder = false, " - "output_format_parquet_write_page_index = false;", False), - # # default settings: - # # output_format_parquet_use_custom_encoder = true - ("SElECT number, number+1 FROM system.numbers LIMIT 100 " - "INTO OUTFILE '{file_name}' FORMAT Parquet;", False), -}) +@pytest.mark.parametrize( + "query, expected_result", + { + ( + "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "INTO OUTFILE '{file_name}' FORMAT Parquet " + "SETTINGS output_format_parquet_use_custom_encoder = false, " + "output_format_parquet_write_page_index = true;", + True, + ), + ( + "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "INTO OUTFILE '{file_name}' FORMAT Parquet " + "SETTINGS output_format_parquet_use_custom_encoder = false, " + "output_format_parquet_write_page_index = false;", + False, + ), + # # default settings: + # # output_format_parquet_use_custom_encoder = true + ( + "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "INTO OUTFILE '{file_name}' FORMAT Parquet;", + False, + ), + }, +) def test_parquet_page_index_select_into_outfile(query, expected_result, start_cluster): - file_name = 'export.parquet' + file_name = "export.parquet" query = query.format(file_name=file_name) delete_if_exists(file_name) - assert node.query(query) == '' - assert check_page_index(file_name) == expected_result, "Page offset index have wrong value" + assert node.query(query) == "" + assert ( + check_page_index(file_name) == expected_result + ), "Page offset index have wrong value" delete_if_exists(file_name) -@pytest.mark.parametrize("query, expected_result", { - ("INSERT INTO TABLE FUNCTION file('{file_name}') " - "SElECT number, number+1 FROM system.numbers LIMIT 100 " - "SETTINGS output_format_parquet_use_custom_encoder=false, " - "output_format_parquet_write_page_index=true FORMAT Parquet", True), - ("INSERT INTO TABLE FUNCTION file('{file_name}') " - "SElECT number, number+1 FROM system.numbers LIMIT 100 " - "SETTINGS output_format_parquet_use_custom_encoder=false, " - "output_format_parquet_write_page_index=false FORMAT Parquet", False), - # # default settings: - # # output_format_parquet_use_custom_encoder = true - ("INSERT INTO TABLE FUNCTION file('{file_name}') " - "SElECT number, number+1 FROM system.numbers LIMIT 100 FORMAT Parquet", False), -}) -def test_parquet_page_index_insert_into_table_function_file(query, expected_result, start_cluster): - file_name = 'export.parquet' +@pytest.mark.parametrize( + "query, expected_result", + { + ( + "INSERT INTO TABLE FUNCTION file('{file_name}') " + "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SETTINGS output_format_parquet_use_custom_encoder=false, " + "output_format_parquet_write_page_index=true FORMAT Parquet", + True, + ), + ( + "INSERT INTO TABLE FUNCTION file('{file_name}') " + "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SETTINGS output_format_parquet_use_custom_encoder=false, " + "output_format_parquet_write_page_index=false FORMAT Parquet", + False, + ), + # # default settings: + # # output_format_parquet_use_custom_encoder = true + ( + "INSERT INTO TABLE FUNCTION file('{file_name}') " + "SElECT number, number+1 FROM system.numbers LIMIT 100 FORMAT Parquet", + False, + ), + }, +) +def test_parquet_page_index_insert_into_table_function_file( + query, expected_result, start_cluster +): + file_name = "export.parquet" query = query.format(file_name=file_name) file_path = f"{path_to_external_dirs}{path_to_userfiles}{file_name}" delete_if_exists(file_path) - assert node.query(query) == '' - assert check_page_index(file_path) == expected_result, "Page offset index have wrong value" + assert node.query(query) == "" + assert ( + check_page_index(file_path) == expected_result + ), "Page offset index have wrong value" delete_if_exists(file_path) From 8fc77bec6639c8f0361858d9e031f3cb1175ae30 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Thu, 1 Aug 2024 13:14:14 +0200 Subject: [PATCH 1352/2361] fix tests --- ...403_enable_extended_results_for_datetime_functions.reference | 2 +- .../0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02403_enable_extended_results_for_datetime_functions.reference b/tests/queries/0_stateless/02403_enable_extended_results_for_datetime_functions.reference index c830d790000..147e368b5c9 100644 --- a/tests/queries/0_stateless/02403_enable_extended_results_for_datetime_functions.reference +++ b/tests/queries/0_stateless/02403_enable_extended_results_for_datetime_functions.reference @@ -64,7 +64,7 @@ toStartOfMonth;toDateTime64;false 2099-07-07 type;toStartOfMonth;toDateTime64;false Date toStartOfWeek;toDate32;false 2099-07-07 type;toStartOfWeek;toDate32;false Date -toStartOfWeek;toDateTime64;false 2099-07-07 +toStartOfWeek;toDateTime64;false 1970-01-01 type;toStartOfWeek;toDateTime64;false Date toMonday;toDate32;false 2099-07-08 type;toMonday;toDate32;false Date diff --git a/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql b/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql index 0f00a52cb86..1769d96aa8d 100644 --- a/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql +++ b/tests/queries/0_stateless/03215_toStartOfWeek_with_dateTime64_fix.sql @@ -1,2 +1,2 @@ -SELECT toStartOfWeek(toDateTime64('1970-02-01', 6)); +SELECT toStartOfWeek(toDateTime64('1970-01-01', 6)); SELECT toStartOfWeek(toDateTime('1970-01-01')); From aaa7750bf98840e22f3521977b6d7cf168460b91 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 1 Aug 2024 11:24:50 +0000 Subject: [PATCH 1353/2361] Handle dynamic columns in typed paths --- src/Columns/ColumnObject.cpp | 10 ++++++++++ .../03214_json_typed_dynamic_path.reference | 4 ++++ .../03214_json_typed_dynamic_path.sql | 17 +++++++++++++++++ 3 files changed, 31 insertions(+) create mode 100644 tests/queries/0_stateless/03214_json_typed_dynamic_path.reference create mode 100644 tests/queries/0_stateless/03214_json_typed_dynamic_path.sql diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 8649e2314b9..e7bb7639dd2 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1211,6 +1211,16 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou } column->takeDynamicStructureFromSourceColumns(dynamic_path_source_columns); } + + /// Typed paths also can contain types with dynamic structure. + for (auto & [path, column] : typed_paths) + { + Columns typed_path_source_columns; + typed_path_source_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + typed_path_source_columns.push_back(assert_cast(*source_column).typed_paths.at(path)); + column->takeDynamicStructureFromSourceColumns(typed_path_source_columns); + } } size_t ColumnObject::findPathLowerBoundInSharedData(StringRef path, const ColumnString & shared_data_paths, size_t start, size_t end) diff --git a/tests/queries/0_stateless/03214_json_typed_dynamic_path.reference b/tests/queries/0_stateless/03214_json_typed_dynamic_path.reference new file mode 100644 index 00000000000..1b3e6b7a8db --- /dev/null +++ b/tests/queries/0_stateless/03214_json_typed_dynamic_path.reference @@ -0,0 +1,4 @@ +{"a":"42"} +{"a":["1","2","3"]} +{"a":"42"} +{"a":["1","2","3"]} diff --git a/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql new file mode 100644 index 00000000000..1f6a025825a --- /dev/null +++ b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type = 1; +drop table if exists test; +create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +insert into test select '{"a" : 42}'; +insert into test select '{"a" : [1, 2, 3]}'; +optimize table test; +select * from test order by toString(json); +drop table test; + +create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=10000000, min_bytes_for_wide_part=10000000; +insert into test select '{"a" : 42}'; +insert into test select '{"a" : [1, 2, 3]}'; +optimize table test; +select * from test order by toString(json); +drop table test; From d0b514f12843fd09f18888666aa98170a52fb9d5 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 1 Aug 2024 13:27:09 +0200 Subject: [PATCH 1354/2361] Review fixes --- src/Interpreters/Cache/FileCache.cpp | 12 ++++++------ src/Interpreters/Cache/IFileCachePriority.h | 6 +++--- src/Interpreters/Cache/LRUFileCachePriority.cpp | 10 +++++----- src/Interpreters/Cache/LRUFileCachePriority.h | 2 +- src/Interpreters/Cache/SLRUFileCachePriority.cpp | 6 +++--- src/Interpreters/Cache/SLRUFileCachePriority.h | 2 +- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index bf8dd24a1db..1f79c7d9032 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -998,7 +998,7 @@ void FileCache::freeSpaceRatioKeepingThreadFunc() FileCacheReserveStat stat; EvictionCandidates eviction_candidates; - IFileCachePriority::DesiredSizeStatus desired_size_status; + IFileCachePriority::CollectStatus desired_size_status; try { /// Collect at most `keep_up_free_space_remove_batch` elements to evict, @@ -1009,7 +1009,7 @@ void FileCache::freeSpaceRatioKeepingThreadFunc() #ifdef DEBUG_OR_SANITIZER_BUILD /// Let's make sure that we correctly processed the limits. - if (desired_size_status == IFileCachePriority::DesiredSizeStatus::SUCCESS + if (desired_size_status == IFileCachePriority::CollectStatus::SUCCESS && eviction_candidates.size() < keep_up_free_space_remove_batch) { const auto current_size = main_priority->getSize(lock); @@ -1070,13 +1070,13 @@ void FileCache::freeSpaceRatioKeepingThreadFunc() [[maybe_unused]] bool scheduled = false; switch (desired_size_status) { - case IFileCachePriority::DesiredSizeStatus::SUCCESS: [[fallthrough]]; - case IFileCachePriority::DesiredSizeStatus::CANNOT_EVICT: + case IFileCachePriority::CollectStatus::SUCCESS: [[fallthrough]]; + case IFileCachePriority::CollectStatus::CANNOT_EVICT: { scheduled = keep_up_free_space_ratio_task->scheduleAfter(general_reschedule_ms); break; } - case IFileCachePriority::DesiredSizeStatus::REACHED_MAX_CANDIDATES_LIMIT: + case IFileCachePriority::CollectStatus::REACHED_MAX_CANDIDATES_LIMIT: { scheduled = keep_up_free_space_ratio_task->schedule(); break; @@ -1558,7 +1558,7 @@ void FileCache::applySettingsIfPossible(const FileCacheSettings & new_settings, FileCacheReserveStat stat; if (main_priority->collectCandidatesForEviction( new_settings.max_size, new_settings.max_elements, 0/* max_candidates_to_evict */, - stat, eviction_candidates, cache_lock) == IFileCachePriority::DesiredSizeStatus::SUCCESS) + stat, eviction_candidates, cache_lock) == IFileCachePriority::CollectStatus::SUCCESS) { if (eviction_candidates.size() == 0) { diff --git a/src/Interpreters/Cache/IFileCachePriority.h b/src/Interpreters/Cache/IFileCachePriority.h index 9885ab00f78..6970d02473a 100644 --- a/src/Interpreters/Cache/IFileCachePriority.h +++ b/src/Interpreters/Cache/IFileCachePriority.h @@ -150,14 +150,14 @@ public: /// Collect eviction candidates sufficient to have `desired_size` /// and `desired_elements_num` as current cache state. /// Collect no more than `max_candidates_to_evict` elements. - /// Return `true` if the first condition is satisfied. - enum class DesiredSizeStatus + /// Return SUCCESS status if the first condition is satisfied. + enum class CollectStatus { SUCCESS, CANNOT_EVICT, REACHED_MAX_CANDIDATES_LIMIT, }; - virtual DesiredSizeStatus collectCandidatesForEviction( + virtual CollectStatus collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, diff --git a/src/Interpreters/Cache/LRUFileCachePriority.cpp b/src/Interpreters/Cache/LRUFileCachePriority.cpp index 7970eaa3e13..0e0170c76e3 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.cpp +++ b/src/Interpreters/Cache/LRUFileCachePriority.cpp @@ -323,7 +323,7 @@ bool LRUFileCachePriority::collectCandidatesForEviction( } } -IFileCachePriority::DesiredSizeStatus LRUFileCachePriority::collectCandidatesForEviction( +IFileCachePriority::CollectStatus LRUFileCachePriority::collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, @@ -336,23 +336,23 @@ IFileCachePriority::DesiredSizeStatus LRUFileCachePriority::collectCandidatesFor return canFit(0, 0, stat.total_stat.releasable_size, stat.total_stat.releasable_count, lock, &desired_size, &desired_elements_count); }; - auto status = DesiredSizeStatus::CANNOT_EVICT; + auto status = CollectStatus::CANNOT_EVICT; auto stop_condition = [&]() { if (desired_limits_satisfied()) { - status = DesiredSizeStatus::SUCCESS; + status = CollectStatus::SUCCESS; return true; } if (max_candidates_to_evict && res.size() >= max_candidates_to_evict) { - status = DesiredSizeStatus::REACHED_MAX_CANDIDATES_LIMIT; + status = CollectStatus::REACHED_MAX_CANDIDATES_LIMIT; return true; } return false; }; iterateForEviction(res, stat, stop_condition, lock); - chassert(status != DesiredSizeStatus::SUCCESS || stop_condition()); + chassert(status != CollectStatus::SUCCESS || stop_condition()); return status; } diff --git a/src/Interpreters/Cache/LRUFileCachePriority.h b/src/Interpreters/Cache/LRUFileCachePriority.h index 9bced106727..0ca62b19d37 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.h +++ b/src/Interpreters/Cache/LRUFileCachePriority.h @@ -63,7 +63,7 @@ public: const UserID & user_id, const CachePriorityGuard::Lock &) override; - DesiredSizeStatus collectCandidatesForEviction( + CollectStatus collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, diff --git a/src/Interpreters/Cache/SLRUFileCachePriority.cpp b/src/Interpreters/Cache/SLRUFileCachePriority.cpp index dc0df223cb0..f5ea519d7d4 100644 --- a/src/Interpreters/Cache/SLRUFileCachePriority.cpp +++ b/src/Interpreters/Cache/SLRUFileCachePriority.cpp @@ -256,7 +256,7 @@ bool SLRUFileCachePriority::collectCandidatesForEvictionInProtected( return true; } -IFileCachePriority::DesiredSizeStatus SLRUFileCachePriority::collectCandidatesForEviction( +IFileCachePriority::CollectStatus SLRUFileCachePriority::collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, @@ -285,7 +285,7 @@ IFileCachePriority::DesiredSizeStatus SLRUFileCachePriority::collectCandidatesFo chassert(!max_candidates_to_evict || res.size() <= max_candidates_to_evict); chassert(res.size() == stat.total_stat.releasable_count); - if (probationary_desired_size_status == DesiredSizeStatus::REACHED_MAX_CANDIDATES_LIMIT) + if (probationary_desired_size_status == CollectStatus::REACHED_MAX_CANDIDATES_LIMIT) return probationary_desired_size_status; const auto desired_protected_size = getRatio(desired_size, size_ratio); @@ -306,7 +306,7 @@ IFileCachePriority::DesiredSizeStatus SLRUFileCachePriority::collectCandidatesFo desired_protected_size, desired_protected_elements_num, protected_queue.getStateInfoForLog(lock)); - if (probationary_desired_size_status == DesiredSizeStatus::SUCCESS) + if (probationary_desired_size_status == CollectStatus::SUCCESS) return protected_desired_size_status; else return probationary_desired_size_status; diff --git a/src/Interpreters/Cache/SLRUFileCachePriority.h b/src/Interpreters/Cache/SLRUFileCachePriority.h index e6d20e0d0ee..23bc8c0908b 100644 --- a/src/Interpreters/Cache/SLRUFileCachePriority.h +++ b/src/Interpreters/Cache/SLRUFileCachePriority.h @@ -58,7 +58,7 @@ public: const UserID & user_id, const CachePriorityGuard::Lock &) override; - DesiredSizeStatus collectCandidatesForEviction( + CollectStatus collectCandidatesForEviction( size_t desired_size, size_t desired_elements_count, size_t max_candidates_to_evict, From 2c018d6f4d4d6fa1c04e91306b60b6e85d8e468f Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 1 Aug 2024 13:38:41 +0200 Subject: [PATCH 1355/2361] Update cluster.py --- tests/integration/helpers/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 90b28a4cda3..acf033de46d 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -248,7 +248,7 @@ def check_rabbitmq_is_available(rabbitmq_id, cookie): ), stdout=subprocess.PIPE, ) - p.communicate() + p.wait(timeout=60) return p.returncode == 0 From d2e0668d5129a4e60f462de5e5b683099f49bf4b Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Thu, 1 Aug 2024 13:51:35 +0200 Subject: [PATCH 1356/2361] fix settingsChangesHistory after merge with master --- src/Core/SettingsChangesHistory.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index a01c5faaf10..28a732c6177 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -81,9 +81,6 @@ static std::initializer_list Date: Thu, 1 Aug 2024 11:56:07 +0000 Subject: [PATCH 1357/2361] Fix 02434_cancel_insert_when_client_dies --- .../02434_cancel_insert_when_client_dies.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh b/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh index f85aaed7716..45f4194104e 100755 --- a/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh +++ b/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh @@ -10,22 +10,26 @@ export DATA_FILE="$CLICKHOUSE_TMP/deduptest.tsv" export TEST_MARK="02434_insert_${CLICKHOUSE_DATABASE}_" $CLICKHOUSE_CLIENT -q 'select * from numbers(5000000) format TSV' > $DATA_FILE -$CLICKHOUSE_CLIENT -q 'create table dedup_test(A Int64) Engine = MergeTree order by A settings non_replicated_deduplication_window=1000;' +$CLICKHOUSE_CLIENT -q "create table dedup_test(A Int64) Engine = MergeTree order by A + settings non_replicated_deduplication_window=1000 + , merge_tree_clear_old_temporary_directories_interval_seconds = 1 + ;" $CLICKHOUSE_CLIENT -q "create table dedup_dist(A Int64) Engine = Distributed('test_cluster_one_shard_two_replicas', currentDatabase(), dedup_test)" function insert_data { - SETTINGS="query_id=$ID&max_insert_block_size=110000&min_insert_block_size_rows=110000" + # send_logs_level: https://github.com/ClickHouse/ClickHouse/issues/67599 + SETTINGS="query_id=$ID&max_insert_block_size=110000&min_insert_block_size_rows=110000&send_logs_level=fatal" # max_block_size=10000, so external table will contain smaller blocks that will be squashed on insert-select (more chances to catch a bug on query cancellation) TRASH_SETTINGS="query_id=$ID&input_format_parallel_parsing=0&max_threads=1&max_insert_threads=1&max_insert_block_size=110000&max_block_size=10000&min_insert_block_size_bytes=0&min_insert_block_size_rows=110000&max_insert_block_size=110000" TYPE=$(( RANDOM % 5 )) if [[ "$TYPE" -eq 0 ]]; then # client will send 10000-rows blocks, server will squash them into 110000-rows blocks (more chances to catch a bug on query cancellation) - $CLICKHOUSE_CLIENT --max_block_size=10000 --max_insert_block_size=10000 --query_id="$ID" \ + $CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=fatal --max_block_size=10000 --max_insert_block_size=10000 --query_id="$ID" \ -q 'insert into dedup_test settings max_insert_block_size=110000, min_insert_block_size_rows=110000 format TSV' < $DATA_FILE elif [[ "$TYPE" -eq 1 ]]; then - $CLICKHOUSE_CLIENT --max_block_size=10000 --max_insert_block_size=10000 --query_id="$ID" --prefer_localhost_replica="$(( RANDOM % 2))" \ + $CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=fatal --max_block_size=10000 --max_insert_block_size=10000 --query_id="$ID" --prefer_localhost_replica="$(( RANDOM % 2))" \ -q 'insert into dedup_dist settings max_insert_block_size=110000, min_insert_block_size_rows=110000 format TSV' < $DATA_FILE elif [[ "$TYPE" -eq 2 ]]; then $CLICKHOUSE_CURL -sS -X POST --data-binary @- "$CLICKHOUSE_URL&$SETTINGS&query=insert+into+dedup_test+format+TSV" < $DATA_FILE From 029deaeee8431d0ef6f2a460c2bd8631c8025254 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 14:13:11 +0200 Subject: [PATCH 1358/2361] Fix 02910_bad_logs_level_in_local in fast tests! --- tests/queries/0_stateless/02910_bad_logs_level_in_local.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02910_bad_logs_level_in_local.sh b/tests/queries/0_stateless/02910_bad_logs_level_in_local.sh index badf7232a95..b5de10bf191 100755 --- a/tests/queries/0_stateless/02910_bad_logs_level_in_local.sh +++ b/tests/queries/0_stateless/02910_bad_logs_level_in_local.sh @@ -1,14 +1,14 @@ #!/usr/bin/expect -f log_user 0 -set timeout 60 +set timeout 30 match_max 100000 spawn bash -c "clickhouse-local" expect ":) " send -- "SET send_logs_level = 't'\r" -expect "Exception on client:" +expect "Unexpected value of LogsLevel:" {} timeout {exit 1} expect ":) " send -- "exit\r" expect eof From bcc75d3681d45b6637211aca0367703b3e957c05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 14:38:41 +0200 Subject: [PATCH 1359/2361] Make 02477_analyzer_function_hints.sh parallelizable --- .../0_stateless/02477_analyzer_function_hints.sh | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/02477_analyzer_function_hints.sh b/tests/queries/0_stateless/02477_analyzer_function_hints.sh index d49c20cab75..f83935e47fb 100755 --- a/tests/queries/0_stateless/02477_analyzer_function_hints.sh +++ b/tests/queries/0_stateless/02477_analyzer_function_hints.sh @@ -1,7 +1,5 @@ #!/usr/bin/env bash -# Tags: no-parallel - set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) @@ -14,11 +12,11 @@ $CLICKHOUSE_CLIENT -q "SELECT plu(1, 1) SETTINGS allow_experimental_analyzer = 1 $CLICKHOUSE_CLIENT -q "SELECT uniqExac(1, 1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['uniqExact'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "DROP FUNCTION IF EXISTS test_user_defined_function;" -$CLICKHOUSE_CLIENT -q "CREATE FUNCTION test_user_defined_function AS x -> x + 1;" -$CLICKHOUSE_CLIENT -q "SELECT test_user_defined_functio(1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ - | grep "Maybe you meant: \['test_user_defined_function'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "DROP FUNCTION test_user_defined_function"; +$CLICKHOUSE_CLIENT -q "DROP FUNCTION IF EXISTS test_user_defined_function_$CLICKHOUSE_DATABASE;" +$CLICKHOUSE_CLIENT -q "CREATE FUNCTION test_user_defined_function_$CLICKHOUSE_DATABASE AS x -> x + 1;" +$CLICKHOUSE_CLIENT -q "SELECT test_user_defined_function_${CLICKHOUSE_DATABASE}A(1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ + | grep -E "Maybe you meant: \[.*'test_user_defined_function_$CLICKHOUSE_DATABASE'.*\]" &>/dev/null; +$CLICKHOUSE_CLIENT -q "DROP FUNCTION test_user_defined_function_$CLICKHOUSE_DATABASE"; $CLICKHOUSE_CLIENT -q "WITH (x -> x + 1) AS lambda_function SELECT lambda_functio(1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['lambda_function'\]" &>/dev/null; From 4adc9523e403ab103ed3dec537b02566287b76ee Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Thu, 1 Aug 2024 14:38:07 +0200 Subject: [PATCH 1360/2361] added a stateless test for ENGINE=File(Parquet) --- .../0_stateless/03215_parquet_index.reference | 300 ++++++++++++++++++ .../0_stateless/03215_parquet_index.sql | 17 + 2 files changed, 317 insertions(+) create mode 100644 tests/queries/0_stateless/03215_parquet_index.reference create mode 100644 tests/queries/0_stateless/03215_parquet_index.sql diff --git a/tests/queries/0_stateless/03215_parquet_index.reference b/tests/queries/0_stateless/03215_parquet_index.reference new file mode 100644 index 00000000000..334f2f3824e --- /dev/null +++ b/tests/queries/0_stateless/03215_parquet_index.reference @@ -0,0 +1,300 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +46 46 +47 47 +48 48 +49 49 +50 50 +51 51 +52 52 +53 53 +54 54 +55 55 +56 56 +57 57 +58 58 +59 59 +60 60 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +71 71 +72 72 +73 73 +74 74 +75 75 +76 76 +77 77 +78 78 +79 79 +80 80 +81 81 +82 82 +83 83 +84 84 +85 85 +86 86 +87 87 +88 88 +89 89 +90 90 +91 91 +92 92 +93 93 +94 94 +95 95 +96 96 +97 97 +98 98 +99 99 +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +46 46 +47 47 +48 48 +49 49 +50 50 +51 51 +52 52 +53 53 +54 54 +55 55 +56 56 +57 57 +58 58 +59 59 +60 60 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +71 71 +72 72 +73 73 +74 74 +75 75 +76 76 +77 77 +78 78 +79 79 +80 80 +81 81 +82 82 +83 83 +84 84 +85 85 +86 86 +87 87 +88 88 +89 89 +90 90 +91 91 +92 92 +93 93 +94 94 +95 95 +96 96 +97 97 +98 98 +99 99 +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +46 46 +47 47 +48 48 +49 49 +50 50 +51 51 +52 52 +53 53 +54 54 +55 55 +56 56 +57 57 +58 58 +59 59 +60 60 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +71 71 +72 72 +73 73 +74 74 +75 75 +76 76 +77 77 +78 78 +79 79 +80 80 +81 81 +82 82 +83 83 +84 84 +85 85 +86 86 +87 87 +88 88 +89 89 +90 90 +91 91 +92 92 +93 93 +94 94 +95 95 +96 96 +97 97 +98 98 +99 99 diff --git a/tests/queries/0_stateless/03215_parquet_index.sql b/tests/queries/0_stateless/03215_parquet_index.sql new file mode 100644 index 00000000000..5b176ff70ba --- /dev/null +++ b/tests/queries/0_stateless/03215_parquet_index.sql @@ -0,0 +1,17 @@ +-- default settings. +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet); +INSERT INTO test_parquet SELECT number, toString(number) FROM numbers(100); +SELECT col1, col2 FROM test_parquet; + +-- Parquet will have indexes in columns. We are not checking that indexes exist here, there is an integration test test_parquet_page_index for that. We just check that a setting doesn't break the SELECT +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet) SETTINGS output_format_parquet_use_custom_encoder=false, output_format_parquet_write_page_index=true; +INSERT INTO test_parquet SELECT number, toString(number) FROM numbers(100); +SELECT col1, col2 FROM test_parquet; + +-- Parquet will not have indexes in columns. +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet) SETTINGS output_format_parquet_use_custom_encoder=false, output_format_parquet_write_page_index=false; +INSERT INTO test_parquet SELECT number, toString(number) FROM numbers(100); +SELECT col1, col2 FROM test_parquet; From a8e7c8ae9a5e345fa1725d664fd490891e4fa9a5 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Thu, 1 Aug 2024 14:45:46 +0200 Subject: [PATCH 1361/2361] fix for parallel execution --- tests/integration/test_parquet_page_index/test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_parquet_page_index/test.py b/tests/integration/test_parquet_page_index/test.py index a820d1b5bc5..db291e20b74 100644 --- a/tests/integration/test_parquet_page_index/test.py +++ b/tests/integration/test_parquet_page_index/test.py @@ -61,7 +61,7 @@ def delete_if_exists(file_path): }, ) def test_parquet_page_index_select_into_outfile(query, expected_result, start_cluster): - file_name = "export.parquet" + file_name = f"export{time.time()}.parquet" query = query.format(file_name=file_name) delete_if_exists(file_name) assert node.query(query) == "" @@ -76,14 +76,14 @@ def test_parquet_page_index_select_into_outfile(query, expected_result, start_cl { ( "INSERT INTO TABLE FUNCTION file('{file_name}') " - "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SELECT number, number+1 FROM system.numbers LIMIT 100 " "SETTINGS output_format_parquet_use_custom_encoder=false, " "output_format_parquet_write_page_index=true FORMAT Parquet", True, ), ( "INSERT INTO TABLE FUNCTION file('{file_name}') " - "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SELECT number, number+1 FROM system.numbers LIMIT 100 " "SETTINGS output_format_parquet_use_custom_encoder=false, " "output_format_parquet_write_page_index=false FORMAT Parquet", False, @@ -92,7 +92,7 @@ def test_parquet_page_index_select_into_outfile(query, expected_result, start_cl # # output_format_parquet_use_custom_encoder = true ( "INSERT INTO TABLE FUNCTION file('{file_name}') " - "SElECT number, number+1 FROM system.numbers LIMIT 100 FORMAT Parquet", + "SELECT number, number+1 FROM system.numbers LIMIT 100 FORMAT Parquet", False, ), }, @@ -100,7 +100,7 @@ def test_parquet_page_index_select_into_outfile(query, expected_result, start_cl def test_parquet_page_index_insert_into_table_function_file( query, expected_result, start_cluster ): - file_name = "export.parquet" + file_name = f"export{time.time()}.parquet" query = query.format(file_name=file_name) file_path = f"{path_to_external_dirs}{path_to_userfiles}{file_name}" delete_if_exists(file_path) From 008408c81f23ed615cb899048418dc46aa3c2a9f Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 1 Aug 2024 13:47:34 +0100 Subject: [PATCH 1362/2361] impl --- .../0_stateless/01605_adaptive_granularity_block_borders.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql index 9b96ce3e586..aaeee466794 100644 --- a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql +++ b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql @@ -1,4 +1,4 @@ --- Tags: no-random-merge-tree-settings, no-tsan, no-debug, no-object-storage +-- Tags: no-random-merge-tree-settings, no-random-settings, no-tsan, no-debug, no-object-storage, long -- no-tsan: too slow -- no-object-storage: for remote tables we use thread pool even when reading with one stream, so memory consumption is higher @@ -16,7 +16,7 @@ CREATE TABLE adaptive_table( value String ) ENGINE MergeTree() ORDER BY key -SETTINGS index_granularity_bytes=1048576, +SETTINGS index_granularity_bytes = 1048576, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0, enable_vertical_merge_algorithm = 0; From 70228acd7e809230582883a0b6b70c4cd9c04daa Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 1 Aug 2024 15:02:29 +0200 Subject: [PATCH 1363/2361] Update CHANGELOG.md --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4c873ba3f9..5cd4200d9ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,6 @@ #### New Feature * Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)). -* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)). * Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)). * Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)). * Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)). From 48c6e36dfd23f297907575ce4696f761aec49e11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 15:03:41 +0200 Subject: [PATCH 1364/2361] Make 01062_window_view_event_hop_watch_asc parallelizable --- .../01062_window_view_event_hop_watch_asc.py | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/tests/queries/0_stateless/01062_window_view_event_hop_watch_asc.py b/tests/queries/0_stateless/01062_window_view_event_hop_watch_asc.py index d6cc3ee1a88..3c85ff30ba8 100755 --- a/tests/queries/0_stateless/01062_window_view_event_hop_watch_asc.py +++ b/tests/queries/0_stateless/01062_window_view_event_hop_watch_asc.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# Tags: no-parallel import os import sys @@ -17,6 +16,7 @@ log = None with client(name="client1>", log=log) as client1, client( name="client2>", log=log ) as client2: + database_name = os.environ["CLICKHOUSE_DATABASE"] client1.expect(prompt) client2.expect(prompt) @@ -31,40 +31,38 @@ with client(name="client1>", log=log) as client1, client( client2.send("SET allow_experimental_analyzer = 0") client2.expect(prompt) - client1.send("CREATE DATABASE IF NOT EXISTS 01062_window_view_event_hop_watch_asc") + client1.send(f"DROP TABLE IF EXISTS {database_name}.mt") client1.expect(prompt) - client1.send("DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.mt") - client1.expect(prompt) - client1.send("DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.wv SYNC") + client1.send(f"DROP TABLE IF EXISTS {database_name}.wv SYNC") client1.expect(prompt) client1.send( - "CREATE TABLE 01062_window_view_event_hop_watch_asc.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()" + f"CREATE TABLE {database_name}.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()" ) client1.expect(prompt) client1.send( - "CREATE WINDOW VIEW 01062_window_view_event_hop_watch_asc.wv ENGINE Memory WATERMARK=ASCENDING AS SELECT count(a) AS count, hopEnd(wid) AS w_end FROM 01062_window_view_event_hop_watch_asc.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid" + f"CREATE WINDOW VIEW {database_name}.wv ENGINE Memory WATERMARK=ASCENDING AS SELECT count(a) AS count, hopEnd(wid) AS w_end FROM {database_name}.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid" ) client1.expect(prompt) - client1.send("WATCH 01062_window_view_event_hop_watch_asc.wv") + client1.send(f"WATCH {database_name}.wv") client1.expect("Query id" + end_of_block) client1.expect("Progress: 0.00 rows.*\\)") client2.send( - "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));" + f"INSERT INTO {database_name}.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));" ) client2.expect(prompt) client2.send( - "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:05', 'US/Samoa'));" + f"INSERT INTO {database_name}.mt VALUES (1, toDateTime('1990/01/01 12:00:05', 'US/Samoa'));" ) client2.expect(prompt) client1.expect("1*" + end_of_block) client2.send( - "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:06', 'US/Samoa'));" + f"INSERT INTO {database_name}.mt VALUES (1, toDateTime('1990/01/01 12:00:06', 'US/Samoa'));" ) client2.expect(prompt) client2.send( - "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:10', 'US/Samoa'));" + f"INSERT INTO {database_name}.mt VALUES (1, toDateTime('1990/01/01 12:00:10', 'US/Samoa'));" ) client2.expect(prompt) client1.expect("1" + end_of_block) @@ -77,9 +75,7 @@ with client(name="client1>", log=log) as client1, client( if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send("DROP TABLE 01062_window_view_event_hop_watch_asc.wv SYNC") + client1.send(f"DROP TABLE {database_name}.wv SYNC") client1.expect(prompt) - client1.send("DROP TABLE 01062_window_view_event_hop_watch_asc.mt") - client1.expect(prompt) - client1.send("DROP DATABASE IF EXISTS 01062_window_view_event_hop_watch_asc") + client1.send(f"DROP TABLE {database_name}.mt") client1.expect(prompt) From 7b72362e99a093a4f880d333c1b50cd114b590c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 15:06:27 +0200 Subject: [PATCH 1365/2361] 01493_alter_remove_properties_zookeeper is already parallelizable --- .../0_stateless/01493_alter_remove_properties_zookeeper.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql b/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql index 92e6fce2c93..362da3ac364 100644 --- a/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql +++ b/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql @@ -1,4 +1,4 @@ --- Tags: zookeeper, no-parallel +-- Tags: zookeeper DROP TABLE IF EXISTS r_prop_table1; DROP TABLE IF EXISTS r_prop_table2; From 56e48cf43b4d13810e2bb4b4e941954b654a1cb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 15:09:56 +0200 Subject: [PATCH 1366/2361] Make 01676_range_hashed_dictionary parallelizable --- .../01676_range_hashed_dictionary.sql | 92 +++++++++---------- 1 file changed, 42 insertions(+), 50 deletions(-) diff --git a/tests/queries/0_stateless/01676_range_hashed_dictionary.sql b/tests/queries/0_stateless/01676_range_hashed_dictionary.sql index 430f3a86dc1..ba2a9eba87f 100644 --- a/tests/queries/0_stateless/01676_range_hashed_dictionary.sql +++ b/tests/queries/0_stateless/01676_range_hashed_dictionary.sql @@ -1,10 +1,4 @@ --- Tags: no-parallel - -DROP DATABASE IF EXISTS database_for_range_dict; - -CREATE DATABASE database_for_range_dict; - -CREATE TABLE database_for_range_dict.date_table +CREATE TABLE date_table ( CountryID UInt64, StartDate Date, @@ -14,11 +8,11 @@ CREATE TABLE database_for_range_dict.date_table ENGINE = MergeTree() ORDER BY CountryID; -INSERT INTO database_for_range_dict.date_table VALUES(1, toDate('2019-05-05'), toDate('2019-05-20'), 0.33); -INSERT INTO database_for_range_dict.date_table VALUES(1, toDate('2019-05-21'), toDate('2019-05-30'), 0.42); -INSERT INTO database_for_range_dict.date_table VALUES(2, toDate('2019-05-21'), toDate('2019-05-30'), 0.46); +INSERT INTO date_table VALUES(1, toDate('2019-05-05'), toDate('2019-05-20'), 0.33); +INSERT INTO date_table VALUES(1, toDate('2019-05-21'), toDate('2019-05-30'), 0.42); +INSERT INTO date_table VALUES(2, toDate('2019-05-21'), toDate('2019-05-30'), 0.46); -CREATE DICTIONARY database_for_range_dict.range_dictionary +CREATE DICTIONARY range_dictionary ( CountryID UInt64, StartDate Date, @@ -26,7 +20,7 @@ CREATE DICTIONARY database_for_range_dict.range_dictionary Tax Float64 DEFAULT 0.2 ) PRIMARY KEY CountryID -SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'date_table' DB 'database_for_range_dict')) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'date_table' DB currentDatabase())) LIFETIME(MIN 1 MAX 1000) LAYOUT(RANGE_HASHED()) RANGE(MIN StartDate MAX EndDate) @@ -35,30 +29,30 @@ SETTINGS(dictionary_use_async_executor=1, max_threads=8) SELECT 'Dictionary not nullable'; SELECT 'dictGet'; -SELECT dictGet('database_for_range_dict.range_dictionary', 'Tax', toUInt64(1), toDate('2019-05-15')); -SELECT dictGet('database_for_range_dict.range_dictionary', 'Tax', toUInt64(1), toDate('2019-05-29')); -SELECT dictGet('database_for_range_dict.range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-29')); -SELECT dictGet('database_for_range_dict.range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-31')); -SELECT dictGetOrDefault('database_for_range_dict.range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-31'), 0.4); +SELECT dictGet('range_dictionary', 'Tax', toUInt64(1), toDate('2019-05-15')); +SELECT dictGet('range_dictionary', 'Tax', toUInt64(1), toDate('2019-05-29')); +SELECT dictGet('range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-29')); +SELECT dictGet('range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-31')); +SELECT dictGetOrDefault('range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-31'), 0.4); SELECT 'dictHas'; -SELECT dictHas('database_for_range_dict.range_dictionary', toUInt64(1), toDate('2019-05-15')); -SELECT dictHas('database_for_range_dict.range_dictionary', toUInt64(1), toDate('2019-05-29')); -SELECT dictHas('database_for_range_dict.range_dictionary', toUInt64(2), toDate('2019-05-29')); -SELECT dictHas('database_for_range_dict.range_dictionary', toUInt64(2), toDate('2019-05-31')); +SELECT dictHas('range_dictionary', toUInt64(1), toDate('2019-05-15')); +SELECT dictHas('range_dictionary', toUInt64(1), toDate('2019-05-29')); +SELECT dictHas('range_dictionary', toUInt64(2), toDate('2019-05-29')); +SELECT dictHas('range_dictionary', toUInt64(2), toDate('2019-05-31')); SELECT 'select columns from dictionary'; SELECT 'allColumns'; -SELECT * FROM database_for_range_dict.range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT * FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; SELECT 'noColumns'; -SELECT 1 FROM database_for_range_dict.range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT 1 FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; SELECT 'onlySpecificColumns'; -SELECT CountryID, StartDate, Tax FROM database_for_range_dict.range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT CountryID, StartDate, Tax FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; SELECT 'onlySpecificColumn'; -SELECT Tax FROM database_for_range_dict.range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT Tax FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; -DROP DICTIONARY database_for_range_dict.range_dictionary; -DROP TABLE database_for_range_dict.date_table; +DROP DICTIONARY range_dictionary; +DROP TABLE date_table; -CREATE TABLE database_for_range_dict.date_table +CREATE TABLE date_table ( CountryID UInt64, StartDate Date, @@ -68,11 +62,11 @@ CREATE TABLE database_for_range_dict.date_table ENGINE = MergeTree() ORDER BY CountryID; -INSERT INTO database_for_range_dict.date_table VALUES(1, toDate('2019-05-05'), toDate('2019-05-20'), 0.33); -INSERT INTO database_for_range_dict.date_table VALUES(1, toDate('2019-05-21'), toDate('2019-05-30'), 0.42); -INSERT INTO database_for_range_dict.date_table VALUES(2, toDate('2019-05-21'), toDate('2019-05-30'), NULL); +INSERT INTO date_table VALUES(1, toDate('2019-05-05'), toDate('2019-05-20'), 0.33); +INSERT INTO date_table VALUES(1, toDate('2019-05-21'), toDate('2019-05-30'), 0.42); +INSERT INTO date_table VALUES(2, toDate('2019-05-21'), toDate('2019-05-30'), NULL); -CREATE DICTIONARY database_for_range_dict.range_dictionary_nullable +CREATE DICTIONARY range_dictionary_nullable ( CountryID UInt64, StartDate Date, @@ -80,35 +74,33 @@ CREATE DICTIONARY database_for_range_dict.range_dictionary_nullable Tax Nullable(Float64) DEFAULT 0.2 ) PRIMARY KEY CountryID -SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'date_table' DB 'database_for_range_dict')) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'date_table' DB currentDatabase())) LIFETIME(MIN 1 MAX 1000) LAYOUT(RANGE_HASHED()) RANGE(MIN StartDate MAX EndDate); SELECT 'Dictionary nullable'; SELECT 'dictGet'; -SELECT dictGet('database_for_range_dict.range_dictionary_nullable', 'Tax', toUInt64(1), toDate('2019-05-15')); -SELECT dictGet('database_for_range_dict.range_dictionary_nullable', 'Tax', toUInt64(1), toDate('2019-05-29')); -SELECT dictGet('database_for_range_dict.range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-29')); -SELECT dictGet('database_for_range_dict.range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-31')); -SELECT dictGetOrDefault('database_for_range_dict.range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-31'), 0.4); +SELECT dictGet('range_dictionary_nullable', 'Tax', toUInt64(1), toDate('2019-05-15')); +SELECT dictGet('range_dictionary_nullable', 'Tax', toUInt64(1), toDate('2019-05-29')); +SELECT dictGet('range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-29')); +SELECT dictGet('range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-31')); +SELECT dictGetOrDefault('range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-31'), 0.4); SELECT 'dictHas'; -SELECT dictHas('database_for_range_dict.range_dictionary_nullable', toUInt64(1), toDate('2019-05-15')); -SELECT dictHas('database_for_range_dict.range_dictionary_nullable', toUInt64(1), toDate('2019-05-29')); -SELECT dictHas('database_for_range_dict.range_dictionary_nullable', toUInt64(2), toDate('2019-05-29')); -SELECT dictHas('database_for_range_dict.range_dictionary_nullable', toUInt64(2), toDate('2019-05-31')); +SELECT dictHas('range_dictionary_nullable', toUInt64(1), toDate('2019-05-15')); +SELECT dictHas('range_dictionary_nullable', toUInt64(1), toDate('2019-05-29')); +SELECT dictHas('range_dictionary_nullable', toUInt64(2), toDate('2019-05-29')); +SELECT dictHas('range_dictionary_nullable', toUInt64(2), toDate('2019-05-31')); SELECT 'select columns from dictionary'; SELECT 'allColumns'; -SELECT * FROM database_for_range_dict.range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT * FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; SELECT 'noColumns'; -SELECT 1 FROM database_for_range_dict.range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT 1 FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; SELECT 'onlySpecificColumns'; -SELECT CountryID, StartDate, Tax FROM database_for_range_dict.range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT CountryID, StartDate, Tax FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; SELECT 'onlySpecificColumn'; -SELECT Tax FROM database_for_range_dict.range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT Tax FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; -DROP DICTIONARY database_for_range_dict.range_dictionary_nullable; -DROP TABLE database_for_range_dict.date_table; - -DROP DATABASE database_for_range_dict; +DROP DICTIONARY range_dictionary_nullable; +DROP TABLE date_table; From d23e9fc9eea9f21e5168e618f63add35704dda4b Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 1 Aug 2024 13:10:13 +0000 Subject: [PATCH 1367/2361] Fix text --- ...56_optimize_skip_unused_shards_rewrite_in.reference | 10 +++++----- .../01756_optimize_skip_unused_shards_rewrite_in.sql | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference index 8d064020c1f..237bca6305a 100644 --- a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference +++ b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference @@ -27,7 +27,7 @@ order by query; (0, 2) (0, 2) -- --- w/ otimize_skip_unused_shards_rewrite_in=1 +-- w/ optimize_skip_unused_shards_rewrite_in=1 -- set optimize_skip_unused_shards_rewrite_in=1; @@ -49,7 +49,7 @@ order by query; tuple(2) select 'optimize_skip_unused_shards_rewrite_in(2,)'; optimize_skip_unused_shards_rewrite_in(2,) -with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2,); +with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2); system flush logs; select splitByString('IN', query)[-1] from system.query_log where event_date >= yesterday() and @@ -59,10 +59,10 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%id_2%') and type = 'QueryFinish' order by query; - tuple(2) + (2) select 'optimize_skip_unused_shards_rewrite_in(0,)'; optimize_skip_unused_shards_rewrite_in(0,) -with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0,); +with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0); 0 0 system flush logs; select splitByString('IN', query)[-1] from system.query_log where @@ -73,7 +73,7 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%id_00%') and type = 'QueryFinish' order by query; - tuple(0) + (0) -- signed column select 'signed column'; signed column diff --git a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql index 9a1a00cc0a1..0b7a44665dc 100644 --- a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql +++ b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql @@ -63,7 +63,7 @@ select splitByString('IN', query)[-1] from system.query_log where order by query; select 'optimize_skip_unused_shards_rewrite_in(2,)'; -with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2,); +with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2); system flush logs; select splitByString('IN', query)[-1] from system.query_log where event_date >= yesterday() and @@ -75,7 +75,7 @@ select splitByString('IN', query)[-1] from system.query_log where order by query; select 'optimize_skip_unused_shards_rewrite_in(0,)'; -with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0,); +with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0); system flush logs; select splitByString('IN', query)[-1] from system.query_log where event_date >= yesterday() and From bb7039eeec01ce59008103727f8c03ddd26a3d29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 15:17:00 +0200 Subject: [PATCH 1368/2361] Make 01107_atomic_db_detach_attach parallelizable --- .../01107_atomic_db_detach_attach.sh | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh index bcaa70abbb5..e9879344259 100755 --- a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh +++ b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh @@ -1,29 +1,30 @@ #!/usr/bin/env bash -# Tags: no-parallel, no-fasttest +# Tags: no-fasttest CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS test_01107" -$CLICKHOUSE_CLIENT -q "CREATE DATABASE test_01107 ENGINE=Atomic" -$CLICKHOUSE_CLIENT -q "CREATE TABLE test_01107.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple()" +NEW_DATABASE=test_01107_${CLICKHOUSE_DATABASE} +$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS ${NEW_DATABASE}" +$CLICKHOUSE_CLIENT -q "CREATE DATABASE ${NEW_DATABASE} ENGINE=Atomic" +$CLICKHOUSE_CLIENT -q "CREATE TABLE ${NEW_DATABASE}.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple()" -$CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 60000000 -q "INSERT INTO test_01107.mt SELECT number + sleepEachRow(3) FROM numbers(5)" & +$CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 60000000 -q "INSERT INTO ${NEW_DATABASE}.mt SELECT number + sleepEachRow(3) FROM numbers(5)" & sleep 1 -$CLICKHOUSE_CLIENT -q "DETACH TABLE test_01107.mt" --database_atomic_wait_for_drop_and_detach_synchronously=0 -$CLICKHOUSE_CLIENT -q "ATTACH TABLE test_01107.mt" --database_atomic_wait_for_drop_and_detach_synchronously=0 2>&1 | grep -F "Code: 57" > /dev/null && echo "OK" -$CLICKHOUSE_CLIENT -q "DETACH DATABASE test_01107" --database_atomic_wait_for_drop_and_detach_synchronously=0 2>&1 | grep -F "Code: 219" > /dev/null && echo "OK" +$CLICKHOUSE_CLIENT -q "DETACH TABLE ${NEW_DATABASE}.mt" --database_atomic_wait_for_drop_and_detach_synchronously=0 +$CLICKHOUSE_CLIENT -q "ATTACH TABLE ${NEW_DATABASE}.mt" --database_atomic_wait_for_drop_and_detach_synchronously=0 2>&1 | grep -F "Code: 57" > /dev/null && echo "OK" +$CLICKHOUSE_CLIENT -q "DETACH DATABASE ${NEW_DATABASE}" --database_atomic_wait_for_drop_and_detach_synchronously=0 2>&1 | grep -F "Code: 219" > /dev/null && echo "OK" wait -$CLICKHOUSE_CLIENT -q "ATTACH TABLE test_01107.mt" -$CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM test_01107.mt" -$CLICKHOUSE_CLIENT -q "DETACH DATABASE test_01107" --database_atomic_wait_for_drop_and_detach_synchronously=0 -$CLICKHOUSE_CLIENT -q "ATTACH DATABASE test_01107" -$CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM test_01107.mt" +$CLICKHOUSE_CLIENT -q "ATTACH TABLE ${NEW_DATABASE}.mt" +$CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM ${NEW_DATABASE}.mt" +$CLICKHOUSE_CLIENT -q "DETACH DATABASE ${NEW_DATABASE}" --database_atomic_wait_for_drop_and_detach_synchronously=0 +$CLICKHOUSE_CLIENT -q "ATTACH DATABASE ${NEW_DATABASE}" +$CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM ${NEW_DATABASE}.mt" -$CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 60000000 -q "INSERT INTO test_01107.mt SELECT number + sleepEachRow(1) FROM numbers(5)" && echo "end" & +$CLICKHOUSE_CLIENT --function_sleep_max_microseconds_per_block 60000000 -q "INSERT INTO ${NEW_DATABASE}.mt SELECT number + sleepEachRow(1) FROM numbers(5)" && echo "end" & sleep 1 -$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01107" --database_atomic_wait_for_drop_and_detach_synchronously=0 && sleep 1 && echo "dropped" +$CLICKHOUSE_CLIENT -q "DROP DATABASE ${NEW_DATABASE}" --database_atomic_wait_for_drop_and_detach_synchronously=0 && sleep 1 && echo "dropped" wait From 1d85f9b1cba3c8fe168286a660d3c0a4fd471a95 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Thu, 1 Aug 2024 14:42:58 +0100 Subject: [PATCH 1369/2361] fix remove_local_directory_contents --- tests/integration/test_storage_delta/test.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index e485bc90ee0..384b8296f66 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -52,9 +52,13 @@ def get_spark(): return builder.master("local").getOrCreate() -def remove_local_directory_contents(local_path): - for local_file in glob.glob(local_path + "/**"): - os.unlink(local_file) +def remove_local_directory_contents(full_path): + for path in glob.glob(f"{full_path}/**"): + if os.path.isfile(path): + os.unlink(path) + else: + remove_local_directory_contents(path) + os.rmdir(path) @pytest.fixture(scope="module") From 0913f0189ba350236d32d774265770b654860a80 Mon Sep 17 00:00:00 2001 From: Alex Katsman Date: Wed, 31 Jul 2024 09:06:30 +0000 Subject: [PATCH 1370/2361] Don't count a search query as a search pattern match --- tests/integration/helpers/cluster.py | 10 +++++++--- .../integration/test_mask_sensitive_info/test.py | 15 +++++++-------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 90b28a4cda3..6bc0ece63ca 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -3922,7 +3922,11 @@ class ClickHouseInstance: ) def contains_in_log( - self, substring, from_host=False, filename="clickhouse-server.log" + self, + substring, + from_host=False, + filename="clickhouse-server.log", + exclusion_substring="", ): if from_host: # We check fist file exists but want to look for all rotated logs as well @@ -3930,7 +3934,7 @@ class ClickHouseInstance: [ "bash", "-c", - f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* || true', + f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* | ( [ -z "{exclusion_substring}" ] && cat || grep -v "${exclusion_substring}" ) || true', ] ) else: @@ -3938,7 +3942,7 @@ class ClickHouseInstance: [ "bash", "-c", - f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} || true', + f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} | ( [ -z "{exclusion_substring}" ] && cat || grep -v "${exclusion_substring}" ) || true', ] ) return len(result) > 0 diff --git a/tests/integration/test_mask_sensitive_info/test.py b/tests/integration/test_mask_sensitive_info/test.py index 902d3800324..6f6dc4d287f 100644 --- a/tests/integration/test_mask_sensitive_info/test.py +++ b/tests/integration/test_mask_sensitive_info/test.py @@ -13,6 +13,7 @@ node = cluster.add_instance( with_zookeeper=True, with_azurite=True, ) +base_search_query = "SELECT COUNT() FROM system.query_log WHERE query LIKE " @pytest.fixture(scope="module", autouse=True) @@ -35,7 +36,7 @@ def check_logs(must_contain=[], must_not_contain=[]): .replace("]", "\\]") .replace("*", "\\*") ) - assert node.contains_in_log(escaped_str) + assert node.contains_in_log(escaped_str, exclusion_substring=base_search_query) for str in must_not_contain: escaped_str = ( @@ -44,7 +45,9 @@ def check_logs(must_contain=[], must_not_contain=[]): .replace("]", "\\]") .replace("*", "\\*") ) - assert not node.contains_in_log(escaped_str) + assert not node.contains_in_log( + escaped_str, exclusion_substring=base_search_query + ) for str in must_contain: escaped_str = str.replace("'", "\\'") @@ -60,7 +63,7 @@ def system_query_log_contains_search_pattern(search_pattern): return ( int( node.query( - f"SELECT COUNT() FROM system.query_log WHERE query LIKE '%{search_pattern}%'" + f"{base_search_query}'%{search_pattern}%' AND query NOT LIKE '{base_search_query}%'" ).strip() ) >= 1 @@ -105,7 +108,6 @@ def test_create_alter_user(): must_not_contain=[ password, "IDENTIFIED BY", - "IDENTIFIED BY", "IDENTIFIED WITH plaintext_password BY", ], ) @@ -366,10 +368,7 @@ def test_table_functions(): f"remoteSecure(named_collection_6, addresses_expr = '127.{{2..11}}', database = 'default', table = 'remote_table', user = 'remote_user', password = '{password}')", f"s3('http://minio1:9001/root/data/test9.csv.gz', 'NOSIGN', 'CSV')", f"s3('http://minio1:9001/root/data/test10.csv.gz', 'minio', '{password}')", - ( - f"deltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')", - "DNS_ERROR", - ), + f"deltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')", f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple.csv', 'CSV')", f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple_1.csv', 'CSV', 'none')", f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple_2.csv', 'CSV', 'none', 'auto')", From 048fbacc40062b05510916134c9d9525e7fab63a Mon Sep 17 00:00:00 2001 From: Tyler Hannan Date: Thu, 1 Aug 2024 16:48:19 +0200 Subject: [PATCH 1371/2361] Update README.md --- README.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 98f9108f14c..2120a4d1211 100644 --- a/README.md +++ b/README.md @@ -34,17 +34,13 @@ curl https://clickhouse.com/ | sh Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know. -* [v24.7 Community Call](https://clickhouse.com/company/events/v24-7-community-release-call) - Jul 30 +* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29 ## Upcoming Events Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `` clickhouse `` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc. -* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9 -* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9 -* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9 -* [AWS Summit in New York](https://clickhouse.com/company/events/2024-07-awssummit-nyc) - Jul 10 -* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11 +* MORE COMING SOON! ## Recent Recordings * **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments" From 582bcfdc03e1124500325c3104497719473657cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 16:57:41 +0200 Subject: [PATCH 1372/2361] Add no-parallel back to 01107_atomic_db_detach_attach --- tests/queries/0_stateless/01107_atomic_db_detach_attach.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh index e9879344259..a6a99aadac2 100755 --- a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh +++ b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-parallel +# no-parallel: FIXME: Timing issues with INSERT + DETACH (https://github.com/ClickHouse/ClickHouse/pull/67610/files#r1700345054) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 0978441a0261c6003c7a9f4661ac87138e909622 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1373/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From 53cbb4811047cad2bdf2b882bc89ff9a83ac4577 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 1 Aug 2024 18:06:05 +0200 Subject: [PATCH 1374/2361] Try fix 03143_asof_join_ddb_long --- tests/queries/0_stateless/03143_asof_join_ddb_long.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql index 17a67511030..0b17ade5d1c 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql @@ -1,4 +1,5 @@ --- Tags: long +-- Tags: long, no-random-merge-tree-settings +-- no-random-merge-tree-settings - times out in private DROP TABLE IF EXISTS build; DROP TABLE IF EXISTS skewed_probe; From 9362d1a5668bcd6e4e629ab26ec44d4bc8cb6513 Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 31 Jul 2024 15:04:13 +0200 Subject: [PATCH 1375/2361] CI: Create release workflow dry run fix fix --- .github/actions/release/action.yml | 30 +++--- .github/workflows/create_release.yml | 144 +++++++++++++++++++++++++-- tests/ci/create_release.py | 2 +- 3 files changed, 153 insertions(+), 23 deletions(-) diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml index c3897682a33..a287aa8b41d 100644 --- a/.github/actions/release/action.yml +++ b/.github/actions/release/action.yml @@ -16,8 +16,7 @@ inputs: - new dry-run: description: 'Dry run' - required: false - default: true + required: true type: boolean token: required: true @@ -30,8 +29,7 @@ runs: shell: bash run: | python3 ./tests/ci/create_release.py --prepare-release-info \ - --ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \ - ${{ inputs.dry-run && '--dry-run' || '' }} + --ref ${{ inputs.ref }} --release-type ${{ inputs.type }} ${{ inputs.dry-run == true && '--dry-run' || '' }} echo "::group::Release Info" python3 -m json.tool /tmp/release_info.json echo "::endgroup::" @@ -44,20 +42,20 @@ runs: if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Push Git Tag for the Release shell: bash run: | - python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Push New Release Branch if: ${{ inputs.type == 'new' }} shell: bash run: | - python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Bump CH Version and Update Contributors' List shell: bash run: | - python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Bump Docker versions, Changelog, Security if: ${{ inputs.type == 'patch' }} shell: bash @@ -107,37 +105,37 @@ runs: shell: bash if: ${{ inputs.type == 'patch' }} run: | - python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Export TGZ Packages if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Test TGZ Packages if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Export RPM Packages if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Test RPM Packages if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Export Debian Packages if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Test Debian Packages if: ${{ inputs.type == 'patch' }} shell: bash run: | - python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Docker clickhouse/clickhouse-server building if: ${{ inputs.type == 'patch' }} shell: bash @@ -165,4 +163,4 @@ runs: if: ${{ !cancelled() }} shell: bash run: | - python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run && '--dry-run' || '' }} + python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }} diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 55644bdd503..217f27086c5 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -35,10 +35,142 @@ jobs: with: token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} fetch-depth: 0 - - name: Call Release Action - uses: ./.github/actions/release + - name: Prepare Release Info + shell: bash + run: | + python3 ./tests/ci/create_release.py --prepare-release-info \ + --ref ${{ inputs.ref }} --release-type ${{ inputs.type }} ${{ inputs.dry-run == true && '--dry-run' || '' }} + echo "::group::Release Info" + python3 -m json.tool /tmp/release_info.json + echo "::endgroup::" + release_tag=$(jq -r '.release_tag' /tmp/release_info.json) + commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json) + echo "Release Tag: $release_tag" + echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV" + echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV" + - name: Download All Release Artifacts + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Push Git Tag for the Release + shell: bash + run: | + python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Push New Release Branch + if: ${{ inputs.type == 'new' }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Bump CH Version and Update Contributors' List + shell: bash + run: | + python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Bump Docker versions, Changelog, Security + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + git checkout master + python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security" + echo "List versions" + ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv + echo "Update docker version" + ./utils/list-versions/update-docker-version.sh + echo "Generate ChangeLog" + export CI=1 + docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ + --volume=".:/ClickHouse" clickhouse/style-test \ + /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ + --gh-user-or-token=${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} --jobs=5 \ + --output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} + git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md + echo "Generate Security" + python3 ./utils/security-generator/generate_security.py > SECURITY.md + git diff HEAD + - name: Create ChangeLog PR + if: ${{ inputs.type == 'patch' && ! inputs.dry-run }} + uses: peter-evans/create-pull-request@v6 with: - ref: ${{ inputs.ref }} - type: ${{ inputs.type }} - dry-run: ${{ inputs.dry-run }} - token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} + author: "robot-clickhouse " + token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} + committer: "robot-clickhouse " + commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} + branch: auto/${{ env.RELEASE_TAG }} + assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher + delete-branch: true + title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }} + labels: do not test + body: | + Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} + ### Changelog category (leave one): + - Not for changelog (changelog entry is not required) + - name: Complete previous steps and Restore git state + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --set-progress-completed + git reset --hard HEAD + git checkout "$GITHUB_REF_NAME" + - name: Create GH Release + shell: bash + if: ${{ inputs.type == 'patch' }} + run: | + python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Export TGZ Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Test TGZ Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Export RPM Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Test RPM Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Export Debian Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Test Debian Packages + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Docker clickhouse/clickhouse-server building + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + cd "./tests/ci" + python3 ./create_release.py --set-progress-started --progress "docker server release" + export CHECK_NAME="Docker server image" + python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + python3 ./create_release.py --set-progress-completed + - name: Docker clickhouse/clickhouse-keeper building + if: ${{ inputs.type == 'patch' }} + shell: bash + run: | + cd "./tests/ci" + python3 ./create_release.py --set-progress-started --progress "docker keeper release" + export CHECK_NAME="Docker keeper image" + python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + python3 ./create_release.py --set-progress-completed + - name: Set current Release progress to Completed with OK + shell: bash + run: | + python3 ./tests/ci/create_release.py --set-progress-started --progress "completed" + python3 ./tests/ci/create_release.py --set-progress-completed + - name: Post Slack Message + if: ${{ !cancelled() }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }} diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index a0b4083b673..b02a0bb8ed5 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -315,7 +315,7 @@ class ReleaseInfo: cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md") actor = os.getenv("GITHUB_ACTOR", "") or "me" - cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file '{body_file} --label 'do not test' --assignee @{actor}" + cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file {body_file} --label 'do not test' --assignee {actor}" Shell.run(cmd_commit_version_upd, check=True, dry_run=dry_run) Shell.run(cmd_push_branch, check=True, dry_run=dry_run) Shell.run(cmd_create_pr, check=True, dry_run=dry_run) From c534cd5bc21b59788750bdfcfb4177ebba0afc85 Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 31 Jul 2024 16:22:43 +0200 Subject: [PATCH 1376/2361] changelog.py to retrieve best token s3fs fix changelog.py to use base branch to filter prs --- .github/workflows/create_release.yml | 12 ++++++------ tests/ci/artifactory.py | 4 ++++ tests/ci/changelog.py | 23 +++++++++++++++++++++++ 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 217f27086c5..5e34f50fab5 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -81,7 +81,7 @@ jobs: docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ --volume=".:/ClickHouse" clickhouse/style-test \ /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ - --gh-user-or-token=${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} --jobs=5 \ + --jobs=5 \ --output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md echo "Generate Security" @@ -111,11 +111,11 @@ jobs: python3 ./tests/ci/create_release.py --set-progress-completed git reset --hard HEAD git checkout "$GITHUB_REF_NAME" - - name: Create GH Release - shell: bash - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }} +# - name: Create GH Release +# shell: bash +# if: ${{ inputs.type == 'patch' }} +# run: | +# python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Export TGZ Packages if: ${{ inputs.type == 'patch' }} shell: bash diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index 86dcaf79854..4ee326593e6 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -52,6 +52,10 @@ class R2MountPoint: if self.CACHE_ENABLED else "" ) + if not dry_run: + self.aux_mount_options += ( + "-o passwd_file /home/ubuntu/.passwd-s3fs_packages " + ) # without -o nomultipart there are errors like "Error 5 writing to /home/ubuntu/***.deb: Input/output error" self.mount_cmd = f"s3fs {self.bucket_name} {self.MOUNT_POINT} -o url={self.API_ENDPOINT} -o use_path_request_style -o umask=0000 -o nomultipart -o logfile={self.LOG_FILE} {self.aux_mount_options}" elif self.app == MountPointApp.RCLONE: diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py index 3ba618f3ae5..e23dd8e4c67 100755 --- a/tests/ci/changelog.py +++ b/tests/ci/changelog.py @@ -19,6 +19,8 @@ from env_helper import TEMP_PATH from git_helper import git_runner, is_shallow from github_helper import GitHub, PullRequest, PullRequests, Repository from s3_helper import S3Helper +from get_robot_token import get_best_robot_token +from ci_utils import Shell from version_helper import ( FILE_WITH_VERSION_PATH, get_abs_path, @@ -171,6 +173,7 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--gh-user-or-token", help="user name or GH token to authenticate", + default=get_best_robot_token(), ) parser.add_argument( "--gh-password", @@ -397,6 +400,15 @@ def get_year(prs: PullRequests) -> int: return max(pr.created_at.year for pr in prs) +def get_branch_by_tag(tag: str) -> Optional[str]: + tag.removeprefix("v") + versions = tag.split(".") + if len(versions) < 3: + print("ERROR: Can't get branch by tag") + return None + return f"{versions[0]}.{versions[1]}" + + def main(): log_levels = [logging.WARN, logging.INFO, logging.DEBUG] args = parse_args() @@ -446,6 +458,17 @@ def main(): gh_cache = GitHubCache(gh.cache_path, temp_path, S3Helper()) gh_cache.download() query = f"type:pr repo:{args.repo} is:merged" + branch = get_branch_by_tag(TO_REF) + if branch and Shell.check(f"git show-ref --quiet {branch}"): + try: + if int(branch.split(".")[-1]) > 1: + query += f" base:{branch}" + print(f"NOTE: will use base branch to filter PRs {branch}") + except ValueError: + print(f"ERROR: cannot get minor version from branch {branch} - pass") + pass + else: + print(f"ERROR: invalid branch {branch} - pass") prs = gh.get_pulls_from_search( query=query, merged=merged, sort="created", progress_func=tqdm.tqdm ) From 8214910cc7bd84f613631c2fada9682820df8003 Mon Sep 17 00:00:00 2001 From: Max K Date: Wed, 31 Jul 2024 20:14:22 +0200 Subject: [PATCH 1377/2361] add geesfs --- .github/workflows/create_release.yml | 10 +++---- .github/workflows/release.yml | 4 +-- tests/ci/artifactory.py | 35 +++++++++++++++++++++--- tests/ci/changelog.py | 41 ++++++++++++++++++---------- tests/ci/ci_utils.py | 11 ++++---- 5 files changed, 69 insertions(+), 32 deletions(-) diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 5e34f50fab5..c3126abe461 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -62,15 +62,14 @@ jobs: shell: bash run: | python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Bump CH Version and Update Contributors' List - shell: bash - run: | - python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} +# - name: Bump CH Version and Update Contributors' List +# shell: bash +# run: | +# python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Bump Docker versions, Changelog, Security if: ${{ inputs.type == 'patch' }} shell: bash run: | - git checkout master python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security" echo "List versions" ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv @@ -96,6 +95,7 @@ jobs: committer: "robot-clickhouse " commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} branch: auto/${{ env.RELEASE_TAG }} + base: master assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher delete-branch: true title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3bd6dfae6ca..8620d15ec19 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -55,12 +55,12 @@ jobs: run: | cd "$GITHUB_WORKSPACE/tests/ci" export CHECK_NAME="Docker server image" - python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push + python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --sha "$(git rev-list -n 1 $GITHUB_TAG)" --check-name "$CHECK_NAME" --push - name: Check docker clickhouse/clickhouse-keeper building run: | cd "$GITHUB_WORKSPACE/tests/ci" export CHECK_NAME="Docker keeper image" - python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --check-name "$CHECK_NAME" --push + python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --sha "$(git rev-list -n 1 $GITHUB_TAG)" --check-name "$CHECK_NAME" --push - name: Cleanup if: always() run: | diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index 4ee326593e6..a508374f856 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -15,6 +15,7 @@ from ci_utils import WithIter, Shell class MountPointApp(metaclass=WithIter): RCLONE = "rclone" S3FS = "s3fs" + GEESEFS = "geesefs" class R2MountPoint: @@ -70,6 +71,20 @@ class R2MountPoint: ) # Use --no-modtime to try to avoid: ERROR : rpm/lts/clickhouse-client-24.3.6.5.x86_64.rpm: Failed to apply pending mod time self.mount_cmd = f"rclone mount remote:{self.bucket_name} {self.MOUNT_POINT} --daemon --cache-dir {self.cache_dir} --umask 0000 --log-file {self.LOG_FILE} {self.aux_mount_options}" + elif self.app == MountPointApp.GEESEFS: + self.cache_dir = "/home/ubuntu/geesefs_cache" + self.aux_mount_options += ( + f" --cache={self.cache_dir} " if self.CACHE_ENABLED else "" + ) + if not dry_run: + self.aux_mount_options += f" --shared-config=/home/ubuntu/.r2_auth " + else: + self.aux_mount_options += ( + f" --shared-config=/home/ubuntu/.r2_auth_test " + ) + if self.DEBUG: + self.aux_mount_options += " --debug_s3 --debug_fuse " + self.mount_cmd = f"geesefs --endpoint={self.API_ENDPOINT} --cheap --memory-limit=2050 --gc-interval=100 --max-flushers=5 --max-parallel-parts=1 --max-parallel-copy=2 --log-file={self.LOG_FILE} {self.aux_mount_options} {self.bucket_name} {self.MOUNT_POINT}" else: assert False @@ -87,7 +102,7 @@ class R2MountPoint: Shell.run(_UNMOUNT_CMD) Shell.run(_MKDIR_CMD) Shell.run(_MKDIR_FOR_CACHE) - if self.app == MountPointApp.S3FS: + if self.app != MountPointApp.RCLONE: Shell.run(self.mount_cmd, check=True) else: # didn't manage to use simple run() and without blocking or failure @@ -158,7 +173,13 @@ class DebianArtifactory: cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command}"' print("Running test command:") print(f" {cmd}") - Shell.run(cmd, check=True) + assert Shell.check(cmd) + print(f"Test packages installation, version [latest]") + debian_command_2 = f"echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-common-static clickhouse-client" + cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command_2}"' + print("Running test command:") + print(f" {cmd}") + assert Shell.check(cmd) self.release_info.debian_command = debian_command self.release_info.dump() @@ -234,7 +255,13 @@ class RpmArtifactory: cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"' print("Running test command:") print(f" {cmd}") - Shell.run(cmd, check=True) + assert Shell.check(cmd) + print(f"Test package installation, version [latest]") + rpm_command_2 = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client" + cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command_2}"' + print("Running test command:") + print(f" {cmd}") + assert Shell.check(cmd) self.release_info.rpm_command = rpm_command self.release_info.dump() @@ -350,7 +377,7 @@ if __name__ == "__main__": ERROR : IO error: NotImplemented: versionId not implemented Failed to copy: NotImplemented: versionId not implemented """ - mp = R2MountPoint(MountPointApp.S3FS, dry_run=args.dry_run) + mp = R2MountPoint(MountPointApp.GEESEFS, dry_run=args.dry_run) if args.export_debian: with ReleaseContextManager( release_progress=ReleaseProgress.EXPORT_DEB diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py index e23dd8e4c67..929f0f3523a 100755 --- a/tests/ci/changelog.py +++ b/tests/ci/changelog.py @@ -7,7 +7,7 @@ import re from datetime import date, timedelta from pathlib import Path from subprocess import DEVNULL -from typing import Any, Dict, List, Optional, TextIO +from typing import Any, Dict, List, Optional, TextIO, Tuple import tqdm # type: ignore from github.GithubException import RateLimitExceededException, UnknownObjectException @@ -400,13 +400,19 @@ def get_year(prs: PullRequests) -> int: return max(pr.created_at.year for pr in prs) -def get_branch_by_tag(tag: str) -> Optional[str]: - tag.removeprefix("v") +def get_branch_and_patch_by_tag(tag: str) -> Tuple[Optional[str], Optional[int]]: + tag = tag.removeprefix("v") versions = tag.split(".") - if len(versions) < 3: + if len(versions) < 4: print("ERROR: Can't get branch by tag") - return None - return f"{versions[0]}.{versions[1]}" + return None, None + try: + patch_version = int(versions[2]) + branch = f"{int(versions[0])}.{int(versions[1])}" + print(f"Branch [{branch}], patch version [{patch_version}]") + except ValueError: + return None, None + return branch, patch_version def main(): @@ -458,17 +464,22 @@ def main(): gh_cache = GitHubCache(gh.cache_path, temp_path, S3Helper()) gh_cache.download() query = f"type:pr repo:{args.repo} is:merged" - branch = get_branch_by_tag(TO_REF) - if branch and Shell.check(f"git show-ref --quiet {branch}"): - try: - if int(branch.split(".")[-1]) > 1: - query += f" base:{branch}" - print(f"NOTE: will use base branch to filter PRs {branch}") - except ValueError: - print(f"ERROR: cannot get minor version from branch {branch} - pass") - pass + + branch, patch = get_branch_and_patch_by_tag(TO_REF) + if branch and patch and Shell.check(f"git show-ref --quiet {branch}"): + if patch > 1: + query += f" base:{branch}" + print( + f"NOTE: It's a patch [{patch}]. will use base branch to filter PRs [{branch}]" + ) + else: + print( + f"NOTE: It's a first patch version. should count PRs merged on master - won't filter PRs by branch" + ) else: print(f"ERROR: invalid branch {branch} - pass") + + print(f"Fetch PRs with query {query}") prs = gh.get_pulls_from_search( query=query, merged=merged, sort="created", progress_func=tqdm.tqdm ) diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 447aac74c7f..3182c0bc5d8 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -246,15 +246,14 @@ class Shell: @classmethod def check(cls, command): - result = subprocess.run( + proc = subprocess.Popen( command, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - check=False, + stdout=subprocess.STDOUT, + stderr=subprocess.STDOUT, ) - return result.returncode == 0 + proc.wait() + return proc.returncode == 0 class Utils: From 4802ea540a4691c13386b74416352155b93f713d Mon Sep 17 00:00:00 2001 From: Max K Date: Thu, 1 Aug 2024 11:57:54 +0200 Subject: [PATCH 1378/2361] improve ci_utils' Shell --- .github/workflows/pull_request.yml | 9 +++- .github/workflows/release.yml | 6 ++- pyproject.toml | 1 + tests/ci/artifactory.py | 84 +++++++++++------------------ tests/ci/auto_release.py | 5 +- tests/ci/ci_buddy.py | 6 ++- tests/ci/ci_definitions.py | 3 +- tests/ci/ci_utils.py | 72 ++++++++++++------------- tests/ci/create_release.py | 86 +++++++++++++++++------------- tests/ci/docker_images_helper.py | 8 +-- 10 files changed, 141 insertions(+), 139 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 854dff530e7..04bef1460a6 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -142,8 +142,13 @@ jobs: # Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3) Builds_Report: # run report check for failed builds to indicate the CI error - if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }} - needs: [RunConfig, StyleCheck, Builds_1, Builds_2] + if: ${{ !cancelled() + && needs.RunConfig.result == 'success' + && needs.StyleCheck.result != 'failure' + && needs.FastTest.result != 'failure' + && needs.BuildDockers.result != 'failure' + && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }} + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2] uses: ./.github/workflows/reusable_test.yml with: test_name: Builds diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8620d15ec19..7dc4e3298a6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -55,12 +55,14 @@ jobs: run: | cd "$GITHUB_WORKSPACE/tests/ci" export CHECK_NAME="Docker server image" - python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --sha "$(git rev-list -n 1 $GITHUB_TAG)" --check-name "$CHECK_NAME" --push + SHA=$(git rev-list -n 1 "$GITHUB_TAG") + python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --sha "$SHA" --check-name "$CHECK_NAME" --push - name: Check docker clickhouse/clickhouse-keeper building run: | cd "$GITHUB_WORKSPACE/tests/ci" export CHECK_NAME="Docker keeper image" - python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --sha "$(git rev-list -n 1 $GITHUB_TAG)" --check-name "$CHECK_NAME" --push + SHA=$(git rev-list -n 1 "$GITHUB_TAG") + python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --sha "$SHA" --check-name "$CHECK_NAME" --push - name: Cleanup if: always() run: | diff --git a/pyproject.toml b/pyproject.toml index c89d46c0929..9bbeac3ddae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ disable = ''' no-else-return, global-statement, f-string-without-interpolation, + consider-using-with, ''' [tool.pylint.SIMILARITIES] diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index a508374f856..71deaccf917 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -13,7 +13,6 @@ from ci_utils import WithIter, Shell class MountPointApp(metaclass=WithIter): - RCLONE = "rclone" S3FS = "s3fs" GEESEFS = "geesefs" @@ -31,9 +30,6 @@ class R2MountPoint: DEBUG = True # enable cache for mountpoint CACHE_ENABLED = False - # TODO: which mode is better: minimal/writes/full/off - _RCLONE_CACHE_MODE = "minimal" - UMASK = "0000" def __init__(self, app: str, dry_run: bool) -> None: assert app in MountPointApp @@ -59,18 +55,6 @@ class R2MountPoint: ) # without -o nomultipart there are errors like "Error 5 writing to /home/ubuntu/***.deb: Input/output error" self.mount_cmd = f"s3fs {self.bucket_name} {self.MOUNT_POINT} -o url={self.API_ENDPOINT} -o use_path_request_style -o umask=0000 -o nomultipart -o logfile={self.LOG_FILE} {self.aux_mount_options}" - elif self.app == MountPointApp.RCLONE: - # run rclone mount process asynchronously, otherwise subprocess.run(daemonized command) will not return - self.cache_dir = "/home/ubuntu/rclone_cache" - self.aux_mount_options += "--no-modtime " if self.NOMODTIME else "" - self.aux_mount_options += "-v " if self.DEBUG else "" # -vv too verbose - self.aux_mount_options += ( - f"--vfs-cache-mode {self._RCLONE_CACHE_MODE} --vfs-cache-max-size {self._CACHE_MAX_SIZE_GB}G" - if self.CACHE_ENABLED - else "--vfs-cache-mode off" - ) - # Use --no-modtime to try to avoid: ERROR : rpm/lts/clickhouse-client-24.3.6.5.x86_64.rpm: Failed to apply pending mod time - self.mount_cmd = f"rclone mount remote:{self.bucket_name} {self.MOUNT_POINT} --daemon --cache-dir {self.cache_dir} --umask 0000 --log-file {self.LOG_FILE} {self.aux_mount_options}" elif self.app == MountPointApp.GEESEFS: self.cache_dir = "/home/ubuntu/geesefs_cache" self.aux_mount_options += ( @@ -98,22 +82,17 @@ class R2MountPoint: ) _TEST_MOUNT_CMD = f"mount | grep -q {self.MOUNT_POINT}" - Shell.run(_CLEAN_LOG_FILE_CMD) - Shell.run(_UNMOUNT_CMD) - Shell.run(_MKDIR_CMD) - Shell.run(_MKDIR_FOR_CACHE) - if self.app != MountPointApp.RCLONE: - Shell.run(self.mount_cmd, check=True) - else: - # didn't manage to use simple run() and without blocking or failure - Shell.run_as_daemon(self.mount_cmd) + Shell.check(_CLEAN_LOG_FILE_CMD, verbose=True) + Shell.check(_UNMOUNT_CMD, verbose=True) + Shell.check(_MKDIR_CMD, verbose=True) + Shell.check(_MKDIR_FOR_CACHE, verbose=True) + Shell.check(self.mount_cmd, strict=True, verbose=True) time.sleep(3) - Shell.run(_TEST_MOUNT_CMD, check=True) + Shell.check(_TEST_MOUNT_CMD, strict=True, verbose=True) @classmethod def teardown(cls): - print(f"Unmount [{cls.MOUNT_POINT}]") - Shell.run(f"umount {cls.MOUNT_POINT}") + Shell.check(f"umount {cls.MOUNT_POINT}", verbose=True) class RepoCodenames(metaclass=WithIter): @@ -148,10 +127,9 @@ class DebianArtifactory: ] REPREPRO_CMD_PREFIX = f"reprepro --basedir {R2MountPoint.MOUNT_POINT}/configs/deb --outdir {R2MountPoint.MOUNT_POINT}/deb --verbose" cmd = f"{REPREPRO_CMD_PREFIX} includedeb {self.codename} {' '.join(paths)}" - print("Running export command:") - print(f" {cmd}") - Shell.run(cmd, check=True) - Shell.run("sync") + print("Running export commands:") + Shell.check(cmd, strict=True, verbose=True) + Shell.check("sync") if self.codename == RepoCodenames.LTS: packages_with_version = [ @@ -163,11 +141,11 @@ class DebianArtifactory: cmd = f"{REPREPRO_CMD_PREFIX} copy {RepoCodenames.STABLE} {RepoCodenames.LTS} {' '.join(packages_with_version)}" print("Running copy command:") print(f" {cmd}") - Shell.run(cmd, check=True) - Shell.run("sync") + Shell.check(cmd, strict=True) + Shell.check("sync") def test_packages(self): - Shell.run("docker pull ubuntu:latest") + Shell.check("docker pull ubuntu:latest", strict=True) print(f"Test packages installation, version [{self.version}]") debian_command = f"echo 'deb {self.repo_url} stable main' | tee /etc/apt/sources.list.d/clickhouse.list; apt update -y; apt-get install -y clickhouse-common-static={self.version} clickhouse-client={self.version}" cmd = f'docker run --rm ubuntu:latest bash -c "apt update -y; apt install -y sudo gnupg ca-certificates; apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754; {debian_command}"' @@ -236,20 +214,18 @@ class RpmArtifactory: print(f"Exporting RPM packages into [{codename}]") for command in commands: - print("Running command:") - print(f" {command}") - Shell.run(command, check=True) + Shell.check(command, strict=True, verbose=True) update_public_key = f"gpg --armor --export {self._SIGN_KEY}" pub_key_path = dest_dir / "repodata" / "repomd.xml.key" print("Updating repomd.xml.key") - pub_key_path.write_text(Shell.run(update_public_key, check=True)) + pub_key_path.write_text(Shell.get_output_or_raise(update_public_key)) if codename == RepoCodenames.LTS: self.export_packages(RepoCodenames.STABLE) - Shell.run("sync") + Shell.check("sync") def test_packages(self): - Shell.run("docker pull fedora:latest") + Shell.check("docker pull fedora:latest", strict=True) print(f"Test package installation, version [{self.version}]") rpm_command = f"dnf config-manager --add-repo={self.repo_url} && dnf makecache && dnf -y install clickhouse-client-{self.version}-1" cmd = f'docker run --rm fedora:latest /bin/bash -c "dnf -y install dnf-plugins-core && dnf config-manager --add-repo={self.repo_url} && {rpm_command}"' @@ -302,26 +278,30 @@ class TgzArtifactory: if codename == RepoCodenames.LTS: self.export_packages(RepoCodenames.STABLE) - Shell.run("sync") + Shell.check("sync") def test_packages(self): tgz_file = "/tmp/tmp.tgz" tgz_sha_file = "/tmp/tmp.tgz.sha512" cmd = f"curl -o {tgz_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz" - Shell.run( + Shell.check( cmd, - check=True, + strict=True, + verbose=True, ) - Shell.run( + Shell.check( f"curl -o {tgz_sha_file} -f0 {self.repo_url}/stable/clickhouse-client-{self.version}-arm64.tgz.sha512", - check=True, + strict=True, + verbose=True, + ) + expected_checksum = Shell.get_output_or_raise(f"cut -d ' ' -f 1 {tgz_sha_file}") + actual_checksum = Shell.get_output_or_raise( + f"sha512sum {tgz_file} | cut -d ' ' -f 1" ) - expected_checksum = Shell.run(f"cut -d ' ' -f 1 {tgz_sha_file}", check=True) - actual_checksum = Shell.run(f"sha512sum {tgz_file} | cut -d ' ' -f 1") assert ( expected_checksum == actual_checksum ), f"[{actual_checksum} != {expected_checksum}]" - Shell.run("rm /tmp/tmp.tgz*") + Shell.check("rm /tmp/tmp.tgz*", verbose=True) self.release_info.tgz_command = cmd self.release_info.dump() @@ -373,9 +353,9 @@ if __name__ == "__main__": args = parse_args() """ - Use S3FS. RCLONE has some errors with r2 remote which I didn't figure out how to resolve: - ERROR : IO error: NotImplemented: versionId not implemented - Failed to copy: NotImplemented: versionId not implemented + S3FS - very slow with a big repo + RCLONE - fuse had many different errors with r2 remote and completely removed + GEESEFS ? """ mp = R2MountPoint(MountPointApp.GEESEFS, dry_run=args.dry_run) if args.export_debian: diff --git a/tests/ci/auto_release.py b/tests/ci/auto_release.py index f2386fe207f..6c17b4c74ad 100644 --- a/tests/ci/auto_release.py +++ b/tests/ci/auto_release.py @@ -85,7 +85,7 @@ class AutoReleaseInfo: def _prepare(token): assert len(token) > 10 os.environ["GH_TOKEN"] = token - Shell.run("gh auth status", check=True) + Shell.check("gh auth status") gh = GitHub(token) prs = gh.get_release_pulls(GITHUB_REPOSITORY) @@ -106,9 +106,8 @@ def _prepare(token): latest_release_tag_ref = refs[-1] latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha) - commits = Shell.run( + commits = Shell.get_output_or_raise( f"git rev-list --first-parent {latest_release_tag.tag}..origin/{pr.head.ref}", - check=True, ).split("\n") commit_num = len(commits) print( diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index dfb5885270a..138909c1db0 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -120,8 +120,10 @@ class CIBuddy: ) -> None: instance_id, instance_type = "unknown", "unknown" if with_instance_info: - instance_id = Shell.run("ec2metadata --instance-id") or instance_id - instance_type = Shell.run("ec2metadata --instance-type") or instance_type + instance_id = Shell.get_output("ec2metadata --instance-id") or instance_id + instance_type = ( + Shell.get_output("ec2metadata --instance-type") or instance_type + ) if not job_name: job_name = os.getenv("CHECK_NAME", "unknown") sign = ":red_circle:" if not critical else ":black_circle:" diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 054b554b8fa..51de8c63509 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -554,7 +554,7 @@ class CommonJobConfigs: run_command="sqllogic_test.py", timeout=10800, release_only=True, - runner_type=Runners.STYLE_CHECKER, + runner_type=Runners.FUNC_TESTER, ) SQL_TEST = JobConfig( job_name_keyword="sqltest", @@ -582,6 +582,7 @@ class CommonJobConfigs: digest=DigestConfig( include_paths=[ "tests/ci/docker_server.py", + "tests/ci/docker_images_helper.py", "./docker/server", ] ), diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 3182c0bc5d8..cd21554788c 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -2,6 +2,7 @@ import json import os import re import subprocess +import sys import time from contextlib import contextmanager from pathlib import Path @@ -192,7 +193,7 @@ class GHActions: get_url_cmd = ( f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url'" ) - url = Shell.run(get_url_cmd) + url = Shell.get_output(get_url_cmd) if not url: print(f"ERROR: PR nor found, branch [{branch}]") return url @@ -200,59 +201,56 @@ class GHActions: class Shell: @classmethod - def run_strict(cls, command): + def get_output_or_raise(cls, command): + return cls.get_output(command, strict=True) + + @classmethod + def get_output(cls, command, strict=False): res = subprocess.run( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - check=True, + check=strict, ) return res.stdout.strip() @classmethod - def run(cls, command, check=False, dry_run=False, **kwargs): + def check( + cls, + command, + strict=False, + verbose=False, + dry_run=False, + stdin_str=None, + **kwargs, + ): if dry_run: print(f"Dry-ryn. Would run command [{command}]") - return "" - print(f"Run command [{command}]") - res = "" - result = subprocess.run( - command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - check=False, - **kwargs, - ) - if result.returncode == 0: - print(f"stdout: {result.stdout.strip()}") - res = result.stdout - else: - print( - f"ERROR: stdout: {result.stdout.strip()}, stderr: {result.stderr.strip()}" - ) - if check: - assert result.returncode == 0 - return res.strip() - - @classmethod - def run_as_daemon(cls, command): - print(f"Run daemon command [{command}]") - subprocess.Popen(command.split(" ")) # pylint:disable=consider-using-with - return 0, "" - - @classmethod - def check(cls, command): + return 0 + if verbose: + print(f"Run command [{command}]") proc = subprocess.Popen( command, shell=True, - stdout=subprocess.STDOUT, stderr=subprocess.STDOUT, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE if stdin_str else None, + universal_newlines=True, + start_new_session=True, + bufsize=1, + errors="backslashreplace", + **kwargs, ) + if stdin_str: + proc.communicate(input=stdin_str) + elif proc.stdout: + for line in proc.stdout: + sys.stdout.write(line) proc.wait() + if strict: + assert proc.returncode == 0 return proc.returncode == 0 @@ -277,7 +275,7 @@ class Utils: @staticmethod def clear_dmesg(): - Shell.run("sudo dmesg --clear ||:") + Shell.check("sudo dmesg --clear", verbose=True) @staticmethod def check_pr_description(pr_body: str, repo_name: str) -> Tuple[str, str]: diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index b02a0bb8ed5..0d505d6ccc7 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -137,12 +137,13 @@ class ReleaseInfo: assert release_type in ("patch", "new") if release_type == "new": # check commit_ref is right and on a right branch - Shell.run( + Shell.check( f"git merge-base --is-ancestor {commit_ref} origin/master", - check=True, + strict=True, + verbose=True, ) with checkout(commit_ref): - commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True) + commit_sha = Shell.get_output_or_raise(f"git rev-parse {commit_ref}") # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) @@ -154,13 +155,13 @@ class ReleaseInfo: ), f"BUG: latest tag [{git.latest_tag}], expected [{expected_prev_tag}]" release_tag = version.describe previous_release_tag = expected_prev_tag - previous_release_sha = Shell.run_strict( + previous_release_sha = Shell.get_output_or_raise( f"git rev-parse {previous_release_tag}" ) assert previous_release_sha if release_type == "patch": with checkout(commit_ref): - commit_sha = Shell.run(f"git rev-parse {commit_ref}", check=True) + commit_sha = Shell.get_output_or_raise(f"git rev-parse {commit_ref}") # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) @@ -168,11 +169,16 @@ class ReleaseInfo: version.with_description(codename) release_branch = f"{version.major}.{version.minor}" release_tag = version.describe - Shell.run(f"{GIT_PREFIX} fetch origin {release_branch} --tags", check=True) + Shell.check( + f"{GIT_PREFIX} fetch origin {release_branch} --tags", + strict=True, + verbose=True, + ) # check commit is right and on a right branch - Shell.run( + Shell.check( f"git merge-base --is-ancestor {commit_ref} origin/{release_branch}", - check=True, + strict=True, + verbose=True, ) if version.patch == 1: expected_version = copy(version) @@ -197,7 +203,7 @@ class ReleaseInfo: False ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]" - previous_release_sha = Shell.run_strict( + previous_release_sha = Shell.get_output_or_raise( f"git rev-parse {previous_release_tag}" ) assert previous_release_sha @@ -226,25 +232,26 @@ class ReleaseInfo: def push_release_tag(self, dry_run: bool) -> None: if dry_run: # remove locally created tag from prev run - Shell.run( - f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag} ||:" + Shell.check( + f"{GIT_PREFIX} tag -l | grep -q {self.release_tag} && git tag -d {self.release_tag}" ) # Create release tag print( f"Create and push release tag [{self.release_tag}], commit [{self.commit_sha}]" ) tag_message = f"Release {self.release_tag}" - Shell.run( + Shell.check( f"{GIT_PREFIX} tag -a -m '{tag_message}' {self.release_tag} {self.commit_sha}", - check=True, + strict=True, + verbose=True, ) cmd_push_tag = f"{GIT_PREFIX} push origin {self.release_tag}:{self.release_tag}" - Shell.run(cmd_push_tag, dry_run=dry_run, check=True) + Shell.check(cmd_push_tag, dry_run=dry_run, strict=True, verbose=True) @staticmethod def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None: cmd = f"gh api repos/{GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}" - Shell.run(cmd, dry_run=dry_run, check=True) + Shell.check(cmd, dry_run=dry_run, strict=True) def push_new_release_branch(self, dry_run: bool) -> None: assert ( @@ -261,7 +268,7 @@ class ReleaseInfo: ), f"Unexpected current version in git, must precede [{self.version}] by one step, actual [{version.string}]" if dry_run: # remove locally created branch from prev run - Shell.run( + Shell.check( f"{GIT_PREFIX} branch -l | grep -q {new_release_branch} && git branch -d {new_release_branch}" ) print( @@ -275,7 +282,7 @@ class ReleaseInfo: cmd_push_branch = ( f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}" ) - Shell.run(cmd_push_branch, dry_run=dry_run, check=True) + Shell.check(cmd_push_branch, dry_run=dry_run, strict=True, verbose=True) print("Create and push backport tags for new release branch") ReleaseInfo._create_gh_label( @@ -284,13 +291,14 @@ class ReleaseInfo: ReleaseInfo._create_gh_label( f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run ) - Shell.run( + Shell.check( f"""gh pr create --repo {GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}' --head {new_release_branch} {pr_labels} --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.' """, dry_run=dry_run, - check=True, + strict=True, + verbose=True, ) def update_version_and_contributors_list(self, dry_run: bool) -> None: @@ -316,13 +324,19 @@ class ReleaseInfo: body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md") actor = os.getenv("GITHUB_ACTOR", "") or "me" cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file {body_file} --label 'do not test' --assignee {actor}" - Shell.run(cmd_commit_version_upd, check=True, dry_run=dry_run) - Shell.run(cmd_push_branch, check=True, dry_run=dry_run) - Shell.run(cmd_create_pr, check=True, dry_run=dry_run) + Shell.check( + cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True + ) + Shell.check(cmd_push_branch, strict=True, dry_run=dry_run, verbose=True) + Shell.check(cmd_create_pr, strict=True, dry_run=dry_run, verbose=True) if dry_run: - Shell.run(f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'") - Shell.run( - f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'" + Shell.check( + f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + Shell.check( + f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, ) self.version_bump_pr = "dry-run" else: @@ -358,7 +372,7 @@ class ReleaseInfo: cmds.append(f"gh release upload {self.release_tag} {file}") if not dry_run: for cmd in cmds: - Shell.run(cmd, check=True) + Shell.check(cmd, strict=True, verbose=True) self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" else: print("Dry-run, would run commands:") @@ -424,7 +438,7 @@ class PackageDownloader: self.macos_package_files = ["clickhouse-macos", "clickhouse-macos-aarch64"] self.file_to_type = {} - Shell.run(f"mkdir -p {self.LOCAL_DIR}") + Shell.check(f"mkdir -p {self.LOCAL_DIR}") for package_type in self.PACKAGE_TYPES: for package in self.package_names: @@ -474,7 +488,7 @@ class PackageDownloader: return res def run(self): - Shell.run(f"rm -rf {self.LOCAL_DIR}/*") + Shell.check(f"rm -rf {self.LOCAL_DIR}/*") for package_file in ( self.deb_package_files + self.rpm_package_files + self.tgz_package_files ): @@ -549,33 +563,33 @@ class PackageDownloader: @contextmanager def checkout(ref: str) -> Iterator[None]: - orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True) + orig_ref = Shell.get_output_or_raise(f"{GIT_PREFIX} symbolic-ref --short HEAD") rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" assert orig_ref if ref not in (orig_ref,): - Shell.run(f"{GIT_PREFIX} checkout {ref}") + Shell.check(f"{GIT_PREFIX} checkout {ref}", strict=True, verbose=True) try: yield except (Exception, KeyboardInterrupt) as e: print(f"ERROR: Exception [{e}]") - Shell.run(rollback_cmd) + Shell.check(rollback_cmd, verbose=True) raise - Shell.run(rollback_cmd) + Shell.check(rollback_cmd, verbose=True) @contextmanager def checkout_new(ref: str) -> Iterator[None]: - orig_ref = Shell.run(f"{GIT_PREFIX} symbolic-ref --short HEAD", check=True) + orig_ref = Shell.get_output_or_raise(f"{GIT_PREFIX} symbolic-ref --short HEAD") rollback_cmd = f"{GIT_PREFIX} checkout {orig_ref}" assert orig_ref - Shell.run(f"{GIT_PREFIX} checkout -b {ref}", check=True) + Shell.check(f"{GIT_PREFIX} checkout -b {ref}", strict=True, verbose=True) try: yield except (Exception, KeyboardInterrupt) as e: print(f"ERROR: Exception [{e}]") - Shell.run(rollback_cmd) + Shell.check(rollback_cmd, verbose=True) raise - Shell.run(rollback_cmd) + Shell.check(rollback_cmd, verbose=True) def parse_args() -> argparse.Namespace: diff --git a/tests/ci/docker_images_helper.py b/tests/ci/docker_images_helper.py index e6869852c4e..f0323145cfa 100644 --- a/tests/ci/docker_images_helper.py +++ b/tests/ci/docker_images_helper.py @@ -19,11 +19,11 @@ def docker_login(relogin: bool = True) -> None: if relogin or not Shell.check( "docker system info | grep --quiet -E 'Username|Registry'" ): - Shell.run( # pylint: disable=unexpected-keyword-arg + Shell.check( # pylint: disable=unexpected-keyword-arg "docker login --username 'robotclickhouse' --password-stdin", - input=get_parameter_from_ssm("dockerhub_robot_password"), + strict=True, + stdin_str=get_parameter_from_ssm("dockerhub_robot_password"), encoding="utf-8", - check=True, ) @@ -42,7 +42,7 @@ class DockerImage: def pull_image(image: DockerImage) -> DockerImage: try: logging.info("Pulling image %s - start", image) - Shell.run(f"docker pull {image}", check=True) + Shell.check(f"docker pull {image}", strict=True) logging.info("Pulling image %s - done", image) except Exception as ex: logging.info("Got exception pulling docker %s", ex) From a6d0b7afbb8299eb8cf056368e93267ef51359ba Mon Sep 17 00:00:00 2001 From: Max K Date: Thu, 1 Aug 2024 18:05:19 +0200 Subject: [PATCH 1379/2361] recovery option --- .github/workflows/create_release.yml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index c3126abe461..424dfe60be4 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -16,6 +16,11 @@ concurrency: options: - patch - new + only-repo: + description: 'Run only repos updates including docker (recovery)' + required: false + default: false + type: boolean dry-run: description: 'Dry run' required: false @@ -54,11 +59,12 @@ jobs: run: | python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Push Git Tag for the Release + if: ${{ ! inputs.only-repo }} shell: bash run: | python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Push New Release Branch - if: ${{ inputs.type == 'new' }} + if: ${{ inputs.type == 'new' && ! inputs.only-repo }} shell: bash run: | python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }} @@ -67,7 +73,7 @@ jobs: # run: | # python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Bump Docker versions, Changelog, Security - if: ${{ inputs.type == 'patch' }} + if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash run: | python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security" @@ -87,7 +93,7 @@ jobs: python3 ./utils/security-generator/generate_security.py > SECURITY.md git diff HEAD - name: Create ChangeLog PR - if: ${{ inputs.type == 'patch' && ! inputs.dry-run }} + if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo }} uses: peter-evans/create-pull-request@v6 with: author: "robot-clickhouse " @@ -105,7 +111,7 @@ jobs: ### Changelog category (leave one): - Not for changelog (changelog entry is not required) - name: Complete previous steps and Restore git state - if: ${{ inputs.type == 'patch' }} + if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash run: | python3 ./tests/ci/create_release.py --set-progress-completed From dab5eb9c24cc2f43a0ad8ee65ecac613896cff10 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 1 Aug 2024 16:16:34 +0000 Subject: [PATCH 1380/2361] Fix INTERPOLATE by constant. Fix other tests. --- src/Analyzer/InterpolateNode.cpp | 2 +- src/Analyzer/InterpolateNode.h | 2 ++ src/Analyzer/Resolve/QueryAnalyzer.cpp | 13 ++++------- src/Planner/CollectTableExpressionData.cpp | 2 +- src/Planner/Planner.cpp | 22 +++++++++++++++++++ src/Planner/PlannerExpressionAnalysis.cpp | 3 +++ src/Processors/QueryPlan/FillingStep.cpp | 14 +++++++++++- ..._no_aggregates_and_constant_keys.reference | 4 ++-- ...15_analyzer_materialized_constants_bug.sql | 2 +- 9 files changed, 49 insertions(+), 15 deletions(-) diff --git a/src/Analyzer/InterpolateNode.cpp b/src/Analyzer/InterpolateNode.cpp index 97dc79f565b..17c734cf386 100644 --- a/src/Analyzer/InterpolateNode.cpp +++ b/src/Analyzer/InterpolateNode.cpp @@ -24,7 +24,7 @@ void InterpolateNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_st { buffer << std::string(indent, ' ') << "INTERPOLATE id: " << format_state.getNodeId(this); - buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION\n"; + buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION " << expression_name << " \n"; getExpression()->dumpTreeImpl(buffer, format_state, indent + 4); buffer << '\n' << std::string(indent + 2, ' ') << "INTERPOLATE_EXPRESSION\n"; diff --git a/src/Analyzer/InterpolateNode.h b/src/Analyzer/InterpolateNode.h index ec493ed8bdd..eb3d64d7170 100644 --- a/src/Analyzer/InterpolateNode.h +++ b/src/Analyzer/InterpolateNode.h @@ -50,6 +50,8 @@ public: return QueryTreeNodeType::INTERPOLATE; } + const std::string & getExpressionName() const { return expression_name; } + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; protected: diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 767d5c11075..e973bd8fb34 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -64,6 +64,8 @@ #include #include +#include + #include namespace ProfileEvents @@ -4122,11 +4124,7 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo { auto & interpolate_node_typed = interpolate_node->as(); - auto * column_to_interpolate = interpolate_node_typed.getExpression()->as(); - if (!column_to_interpolate) - throw Exception(ErrorCodes::LOGICAL_ERROR, "INTERPOLATE can work only for indentifiers, but {} is found", - interpolate_node_typed.getExpression()->formatASTForErrorMessage()); - auto column_to_interpolate_name = column_to_interpolate->getIdentifier().getFullName(); + auto column_to_interpolate_name = interpolate_node_typed.getExpressionName(); resolveExpressionNode(interpolate_node_typed.getExpression(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); @@ -4135,14 +4133,11 @@ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpo auto & interpolation_to_resolve = interpolate_node_typed.getInterpolateExpression(); IdentifierResolveScope interpolate_scope(interpolation_to_resolve, &scope /*parent_scope*/); - auto fake_column_node = std::make_shared(NameAndTypePair(column_to_interpolate_name, interpolate_node_typed.getExpression()->getResultType()), interpolate_node_typed.getExpression()); + auto fake_column_node = std::make_shared(NameAndTypePair(column_to_interpolate_name, interpolate_node_typed.getExpression()->getResultType()), interpolate_node); if (is_column_constant) interpolate_scope.expression_argument_name_to_node.emplace(column_to_interpolate_name, fake_column_node); resolveExpressionNode(interpolation_to_resolve, interpolate_scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); - - if (is_column_constant) - interpolation_to_resolve = interpolation_to_resolve->cloneAndReplace(fake_column_node, interpolate_node_typed.getExpression()); } } diff --git a/src/Planner/CollectTableExpressionData.cpp b/src/Planner/CollectTableExpressionData.cpp index 2fe62aa9be0..c48813a4ed4 100644 --- a/src/Planner/CollectTableExpressionData.cpp +++ b/src/Planner/CollectTableExpressionData.cpp @@ -46,7 +46,7 @@ public: auto column_source_node = column_node->getColumnSource(); auto column_source_node_type = column_source_node->getNodeType(); - if (column_source_node_type == QueryTreeNodeType::LAMBDA) + if (column_source_node_type == QueryTreeNodeType::LAMBDA || column_source_node_type == QueryTreeNodeType::INTERPOLATE) return; /// JOIN using expression diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 968642dc9de..b837d9428a1 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -744,6 +744,8 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, } else { + ActionsDAG rename_dag; + for (auto & interpolate_node : interpolate_list_nodes) { auto & interpolate_node_typed = interpolate_node->as(); @@ -772,8 +774,28 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, const auto * alias_node = &interpolate_actions_dag.addAlias(*interpolate_expression, expression_to_interpolate_name); interpolate_actions_dag.getOutputs().push_back(alias_node); + + /// Here we fix INTERPOLATE by constant expression. + /// Example from 02336_sort_optimization_with_fill: + /// + /// SELECT 5 AS x, 'Hello' AS s ORDER BY x WITH FILL FROM 1 TO 10 INTERPOLATE (s AS s||'A') + /// + /// For this query, INTERPOLATE_EXPRESSION would be : s AS concat(s, 'A'), + /// so that interpolate_actions_dag would have INPUT `s`. + /// + /// However, INPUT `s` does not exist. Instead, we have a constant with execution name 'Hello'_String. + /// To fix this, we prepend a rename : 'Hello'_String -> s + if (const auto * constant_node = interpolate_node_typed.getExpression()->as()) + { + const auto * node = &rename_dag.addInput(alias_node->result_name, alias_node->result_type); + node = &rename_dag.addAlias(*node, interpolate_node_typed.getExpressionName()); + rename_dag.getOutputs().push_back(node); + } } + if (!rename_dag.getOutputs().empty()) + interpolate_actions_dag = ActionsDAG::merge(std::move(rename_dag), std::move(interpolate_actions_dag)); + interpolate_actions_dag.removeUnusedActions(); } diff --git a/src/Planner/PlannerExpressionAnalysis.cpp b/src/Planner/PlannerExpressionAnalysis.cpp index 2b67c96d843..ed3f78193ee 100644 --- a/src/Planner/PlannerExpressionAnalysis.cpp +++ b/src/Planner/PlannerExpressionAnalysis.cpp @@ -462,6 +462,9 @@ SortAnalysisResult analyzeSort(const QueryNode & query_node, for (auto & interpolate_node : interpolate_list_node.getNodes()) { auto & interpolate_node_typed = interpolate_node->as(); + if (interpolate_node_typed.getExpression()->getNodeType() == QueryTreeNodeType::CONSTANT) + continue; + interpolate_actions_visitor.visit(interpolate_actions_dag, interpolate_node_typed.getInterpolateExpression()); } diff --git a/src/Processors/QueryPlan/FillingStep.cpp b/src/Processors/QueryPlan/FillingStep.cpp index 81622389ada..8687886447a 100644 --- a/src/Processors/QueryPlan/FillingStep.cpp +++ b/src/Processors/QueryPlan/FillingStep.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include namespace DB @@ -58,14 +59,25 @@ void FillingStep::transformPipeline(QueryPipelineBuilder & pipeline, const Build void FillingStep::describeActions(FormatSettings & settings) const { - settings.out << String(settings.offset, ' '); + String prefix(settings.offset, settings.indent_char); + settings.out << prefix; dumpSortDescription(sort_description, settings.out); settings.out << '\n'; + if (interpolate_description) + { + auto expression = std::make_shared(interpolate_description->actions.clone()); + expression->describeActions(settings.out, prefix); + } } void FillingStep::describeActions(JSONBuilder::JSONMap & map) const { map.add("Sort Description", explainSortDescription(sort_description)); + if (interpolate_description) + { + auto expression = std::make_shared(interpolate_description->actions.clone()); + map.add("Expression", expression->toTree()); + } } void FillingStep::updateOutputStream() diff --git a/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference index 63b8a9d14fc..fc77ed8a241 100644 --- a/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference +++ b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.reference @@ -8,13 +8,13 @@ 40 41 -0 +41 2 42 2 42 43 -0 +43 11 11 diff --git a/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.sql b/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.sql index f9ec28d09d8..b2fd69d75d0 100644 --- a/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.sql +++ b/tests/queries/0_stateless/03215_analyzer_materialized_constants_bug.sql @@ -20,7 +20,7 @@ WITH ( SELECT coalesce(materialize(toLowCardinality(toNullable(1))), 10, NULL), max(v) -FROM remote('127.0.0.{1,2}', default, test__fuzz_21) +FROM remote('127.0.0.{1,2}', currentDatabase(), test__fuzz_21) GROUP BY coalesce(NULL), coalesce(1, 10, 10, materialize(NULL)); From 4e9761acf93a58a93186f59d3ad083fd438329dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 18:59:17 +0200 Subject: [PATCH 1381/2361] Don't run ASAN unit tests under gdb --- docker/test/unit/Dockerfile | 2 +- docker/test/unit/run.sh | 21 ++++++++++++++++++++- tests/ci/unit_tests_check.py | 5 ++++- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/docker/test/unit/Dockerfile b/docker/test/unit/Dockerfile index af44dc930b2..5f907d94d39 100644 --- a/docker/test/unit/Dockerfile +++ b/docker/test/unit/Dockerfile @@ -4,4 +4,4 @@ ARG FROM_TAG=latest FROM clickhouse/test-base:$FROM_TAG COPY run.sh / -CMD ["/bin/bash", "/run.sh"] +ENTRYPOINT ["/run.sh"] diff --git a/docker/test/unit/run.sh b/docker/test/unit/run.sh index 7323c384d9c..ba11f568218 100644 --- a/docker/test/unit/run.sh +++ b/docker/test/unit/run.sh @@ -2,4 +2,23 @@ set -x -timeout 40m gdb -q -ex 'set print inferior-events off' -ex 'set confirm off' -ex 'set print thread-events off' -ex run -ex bt -ex quit --args ./unit_tests_dbms --gtest_output='json:test_output/test_result.json' | tee test_output/test_result.txt +if [ "$#" -ne 1 ]; then + echo "Expected exactly one argument" + exit 1 +fi + +if [ "$1" = "GDB" ]; +then + timeout 40m \ + gdb -q -ex "set print inferior-events off" -ex "set confirm off" -ex "set print thread-events off" -ex run -ex bt -ex quit --args \ + ./unit_tests_dbms --gtest_output='json:test_output/test_result.json' \ + | tee test_output/test_result.txt +elif [ "$1" = "NO_GDB" ]; +then + timeout 40m \ + ./unit_tests_dbms --gtest_output='json:test_output/test_result.json' \ + | tee test_output/test_result.txt +else + echo "Unknown argument: $1" + exit 1 +fi diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py index b66a4312657..716625d7077 100644 --- a/tests/ci/unit_tests_check.py +++ b/tests/ci/unit_tests_check.py @@ -174,10 +174,13 @@ def main(): test_output = temp_path / "test_output" test_output.mkdir(parents=True, exist_ok=True) + # Don't run ASAN under gdb since that breaks leak detection + gdb_enabled = "NO_GDB" if "asan" in check_name else "GDB" + run_command = ( f"docker run --cap-add=SYS_PTRACE --volume={tests_binary}:/unit_tests_dbms " "--security-opt seccomp=unconfined " # required to issue io_uring sys-calls - f"--volume={test_output}:/test_output {docker_image}" + f"--volume={test_output}:/test_output {docker_image} ${gdb_enabled}" ) run_log_path = test_output / "run.log" From d683fb05a009ed3f58c0e11fc329c3783f934369 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 1 Aug 2024 19:05:30 +0200 Subject: [PATCH 1382/2361] Fix --- .../IO/CachedOnDiskReadBufferFromFile.cpp | 17 ++++++--- src/Interpreters/Cache/FileCache.cpp | 35 ++++++++++++------- src/Interpreters/Cache/FileCache.h | 4 ++- src/Interpreters/Cache/FileSegment.cpp | 9 ++++- src/Interpreters/Cache/FileSegment.h | 4 ++- tests/config/config.d/storage_conf.xml | 3 +- 6 files changed, 51 insertions(+), 21 deletions(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index c928d25c7b8..b471f3fc58f 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -135,8 +135,11 @@ bool CachedOnDiskReadBufferFromFile::nextFileSegmentsBatch() else { CreateFileSegmentSettings create_settings(FileSegmentKind::Regular); - file_segments = cache->getOrSet(cache_key, file_offset_of_buffer_end, size, file_size.value(), create_settings, settings.filesystem_cache_segments_batch_size, user); + file_segments = cache->getOrSet( + cache_key, file_offset_of_buffer_end, size, file_size.value(), + create_settings, settings.filesystem_cache_segments_batch_size, user); } + return !file_segments->empty(); } @@ -158,8 +161,8 @@ void CachedOnDiskReadBufferFromFile::initialize() LOG_TEST( log, - "Having {} file segments to read: {}, current offset: {}", - file_segments->size(), file_segments->toString(), file_offset_of_buffer_end); + "Having {} file segments to read: {}, current read range: [{}, {})", + file_segments->size(), file_segments->toString(), file_offset_of_buffer_end, read_until_position); initialized = true; } @@ -1043,6 +1046,10 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() if (file_segments->size() == 1) { size_t remaining_size_to_read = std::min(current_read_range.right, read_until_position - 1) - file_offset_of_buffer_end + 1; + + LOG_TEST(log, "Remaining size to read: {}, read: {}. Resizing buffer to {}", + remaining_size_to_read, size, nextimpl_working_buffer_offset + std::min(size, remaining_size_to_read)); + size = std::min(size, remaining_size_to_read); chassert(implementation_buffer->buffer().size() >= nextimpl_working_buffer_offset + size); implementation_buffer->buffer().resize(nextimpl_working_buffer_offset + size); @@ -1055,8 +1062,8 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() chassert( file_offset_of_buffer_end <= read_until_position, - fmt::format("Expected {} <= {} (size: {}, read range: {})", - file_offset_of_buffer_end, read_until_position, size, current_read_range.toString())); + fmt::format("Expected {} <= {} (size: {}, read range: {}, hold file segments: {} ({}))", + file_offset_of_buffer_end, read_until_position, size, current_read_range.toString(), file_segments->size(), file_segments->toString(true))); } swap(*implementation_buffer); diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index a88c0de2cfe..0a03f5dcc7d 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -316,14 +316,14 @@ FileSegments FileCache::getImpl(const LockedKey & locked_key, const FileSegment: return result; } -std::vector FileCache::splitRange(size_t offset, size_t size) +std::vector FileCache::splitRange(size_t offset, size_t size, size_t aligned_size) { assert(size > 0); std::vector ranges; size_t current_pos = offset; size_t end_pos_non_included = offset + size; - size_t remaining_size = size; + size_t remaining_size = aligned_size; FileSegments file_segments; const size_t max_size = max_file_segment_size.load(); @@ -343,17 +343,20 @@ FileSegments FileCache::splitRangeIntoFileSegments( LockedKey & locked_key, size_t offset, size_t size, + size_t aligned_size, FileSegment::State state, size_t file_segments_limit, const CreateFileSegmentSettings & create_settings) { - assert(size > 0); + chassert(size > 0); + chassert(size <= aligned_size); + /// We take `size` as a soft limit and `aligned_size` as a hard limit. auto current_pos = offset; auto end_pos_non_included = offset + size; size_t current_file_segment_size; - size_t remaining_size = size; + size_t remaining_size = aligned_size; FileSegments file_segments; const size_t max_size = max_file_segment_size.load(); @@ -369,6 +372,8 @@ FileSegments FileCache::splitRangeIntoFileSegments( current_pos += current_file_segment_size; } + chassert(file_segments.size() == file_segments_limit || file_segments.back()->range().contains(offset + size - 1), + fmt::format("Offset: {}, size: {}, file segments: {}", offset, size, toString(file_segments))); return file_segments; } @@ -376,6 +381,7 @@ void FileCache::fillHolesWithEmptyFileSegments( LockedKey & locked_key, FileSegments & file_segments, const FileSegment::Range & range, + size_t non_aligned_right_offset, size_t file_segments_limit, bool fill_with_detached_file_segments, const CreateFileSegmentSettings & create_settings) @@ -442,7 +448,7 @@ void FileCache::fillHolesWithEmptyFileSegments( } else { - auto ranges = splitRange(current_pos, hole_size); + auto ranges = splitRange(current_pos, hole_size, hole_size); FileSegments hole; for (const auto & r : ranges) { @@ -479,7 +485,7 @@ void FileCache::fillHolesWithEmptyFileSegments( chassert(!file_segments_limit || file_segments.size() < file_segments_limit); - if (current_pos <= range.right) + if (current_pos <= non_aligned_right_offset) { /// ________] -- requested range /// _____] @@ -487,6 +493,7 @@ void FileCache::fillHolesWithEmptyFileSegments( /// segmentN auto hole_size = range.right - current_pos + 1; + auto non_aligned_size = non_aligned_right_offset - current_pos + 1; if (fill_with_detached_file_segments) { @@ -497,7 +504,7 @@ void FileCache::fillHolesWithEmptyFileSegments( } else { - auto ranges = splitRange(current_pos, hole_size); + auto ranges = splitRange(current_pos, non_aligned_size, hole_size); FileSegments hole; for (const auto & r : ranges) { @@ -542,7 +549,7 @@ FileSegmentsHolderPtr FileCache::set( else { file_segments = splitRangeIntoFileSegments( - *locked_key, offset, size, FileSegment::State::EMPTY, /* file_segments_limit */0, create_settings); + *locked_key, offset, size, size, FileSegment::State::EMPTY, /* file_segments_limit */0, create_settings); } return std::make_unique(std::move(file_segments)); @@ -659,9 +666,13 @@ FileCache::getOrSet( } } + chassert(range.left >= aligned_offset); + if (file_segments.empty()) { - file_segments = splitRangeIntoFileSegments(*locked_key, range.left, range.size(), FileSegment::State::EMPTY, file_segments_limit, create_settings); + file_segments = splitRangeIntoFileSegments( + *locked_key, range.left, /* size */offset + size - range.left, /* aligned_size */range.size(), + FileSegment::State::EMPTY, file_segments_limit, create_settings); } else { @@ -669,9 +680,9 @@ FileCache::getOrSet( chassert(file_segments.back()->range().left <= range.right); fillHolesWithEmptyFileSegments( - *locked_key, file_segments, range, file_segments_limit, /* fill_with_detached */false, create_settings); + *locked_key, file_segments, range, offset + size - 1, file_segments_limit, /* fill_with_detached */false, create_settings); - if (!file_segments.front()->range().contains(offset)) + if (!file_segments.front()->range().contains(range.left)) { throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected {} to include {} " "(end offset: {}, aligned offset: {}, aligned end offset: {})", @@ -713,7 +724,7 @@ FileSegmentsHolderPtr FileCache::get( } fillHolesWithEmptyFileSegments( - *locked_key, file_segments, range, file_segments_limit, /* fill_with_detached */true, CreateFileSegmentSettings{}); + *locked_key, file_segments, range, offset + size - 1, file_segments_limit, /* fill_with_detached */true, CreateFileSegmentSettings{}); chassert(!file_segments_limit || file_segments.size() <= file_segments_limit); return std::make_unique(std::move(file_segments)); diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 527fd9d5edf..3f7eec73b56 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -263,7 +263,7 @@ private: /// Split range into subranges by max_file_segment_size, /// each subrange size must be less or equal to max_file_segment_size. - std::vector splitRange(size_t offset, size_t size); + std::vector splitRange(size_t offset, size_t size, size_t aligned_size); /// Split range into subranges by max_file_segment_size (same as in splitRange()) /// and create a new file segment for each subrange. @@ -273,6 +273,7 @@ private: LockedKey & locked_key, size_t offset, size_t size, + size_t aligned_size, FileSegment::State state, size_t file_segments_limit, const CreateFileSegmentSettings & create_settings); @@ -281,6 +282,7 @@ private: LockedKey & locked_key, FileSegments & file_segments, const FileSegment::Range & range, + size_t non_aligned_right_offset, size_t file_segments_limit, bool fill_with_detached_file_segments, const CreateFileSegmentSettings & settings); diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 1664a91b694..c46fb978ae4 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -1008,7 +1008,12 @@ FileSegment & FileSegmentsHolder::add(FileSegmentPtr && file_segment) return *file_segments.back(); } -String FileSegmentsHolder::toString() +String FileSegmentsHolder::toString(bool with_state) +{ + return DB::toString(file_segments, with_state); +} + +String toString(const FileSegments & file_segments, bool with_state) { String ranges; for (const auto & file_segment : file_segments) @@ -1018,6 +1023,8 @@ String FileSegmentsHolder::toString() ranges += file_segment->range().toString(); if (file_segment->isUnbound()) ranges += "(unbound)"; + if (with_state) + ranges += "(" + FileSegment::stateToString(file_segment->state()) + ")"; } return ranges; } diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index d6b37b60dc1..25ffb880b45 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -291,7 +291,7 @@ struct FileSegmentsHolder : private boost::noncopyable size_t size() const { return file_segments.size(); } - String toString(); + String toString(bool with_state = false); void popFront() { completeAndPopFrontImpl(); } @@ -317,4 +317,6 @@ private: using FileSegmentsHolderPtr = std::unique_ptr; +String toString(const FileSegments & file_segments, bool with_state = false); + } diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index 7a9b579c00a..4daa64b520d 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -19,7 +19,8 @@ cache s3_disk s3_cache/ - 104857600 + 100Mi + 5Mi 1 100 LRU From 1e8d0d4a5e8d83a1d123a4b5b6c5a91b41caac1c Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Wed, 31 Jul 2024 06:09:14 +0000 Subject: [PATCH 1383/2361] disable parallel run for network_receive_time_metric_insert If run in parallel, several tests may affect the value of the `NetworkReceiveElapsedMicroseconds` profile event. This may contribute to test flakiness. --- .../0_stateless/01923_network_receive_time_metric_insert.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh index 97835d97965..77b909ed89e 100755 --- a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh +++ b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-parallel # Tag no-fasttest: needs pv +# Tag no-parallel: reads from a system table CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From c5a8653daf7dc9cb1031c2ac4b2be3623117848d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 19:27:12 +0200 Subject: [PATCH 1384/2361] Playing with Docker and the CI --- docker/test/unit/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/unit/Dockerfile b/docker/test/unit/Dockerfile index 5f907d94d39..9f4b86aa0ca 100644 --- a/docker/test/unit/Dockerfile +++ b/docker/test/unit/Dockerfile @@ -4,4 +4,5 @@ ARG FROM_TAG=latest FROM clickhouse/test-base:$FROM_TAG COPY run.sh / +RUN chmod +x run.sh ENTRYPOINT ["/run.sh"] From 4709222dd1f3a37c5f97e638526c21ade6b5218f Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Wed, 31 Jul 2024 23:16:43 +0000 Subject: [PATCH 1385/2361] print debug info if the test fails --- ...1923_network_receive_time_metric_insert.sh | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh index 77b909ed89e..adf4fd96a00 100755 --- a/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh +++ b/tests/queries/0_stateless/01923_network_receive_time_metric_insert.sh @@ -13,9 +13,23 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) seq 1 1000 | pv --quiet --rate-limit 400 | ${CLICKHOUSE_CLIENT} --query "INSERT INTO t FORMAT TSV" # We check that the value of NetworkReceiveElapsedMicroseconds correctly includes the time spent waiting data from the client. -${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS; - WITH ProfileEvents['NetworkReceiveElapsedMicroseconds'] AS time - SELECT time >= 1000000 ? 1 : time FROM system.query_log - WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 2 ORDER BY event_time DESC LIMIT 1;" +result=$(${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS; + WITH ProfileEvents['NetworkReceiveElapsedMicroseconds'] AS elapsed_us + SELECT elapsed_us FROM system.query_log + WHERE current_database = currentDatabase() AND query_kind = 'Insert' AND event_date >= yesterday() AND type = 'QueryFinish' + ORDER BY event_time DESC LIMIT 1;") + +elapsed_us=$(echo $result | sed 's/ .*//') + +min_elapsed_us=1000000 +if [[ "$elapsed_us" -ge "$min_elapsed_us" ]]; then + echo 1 +else + # Print debug info + ${CLICKHOUSE_CLIENT} --query " + WITH ProfileEvents['NetworkReceiveElapsedMicroseconds'] AS elapsed_us + SELECT query_start_time_microseconds, event_time_microseconds, query_duration_ms, elapsed_us, query FROM system.query_log + WHERE current_database = currentDatabase() and event_date >= yesterday() AND type = 'QueryFinish' ORDER BY query_start_time;" +fi ${CLICKHOUSE_CLIENT} --query "DROP TABLE t" From 35b6112b7bb06f18cd5b07860ec1b9c6ce38014f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 1 Aug 2024 19:50:13 +0200 Subject: [PATCH 1386/2361] Try to stop on leaks --- docker/test/base/Dockerfile | 2 ++ docker/test/stateless/run.sh | 2 +- tests/queries/shell_config.sh | 3 --- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index a81826ed6b5..38c4df459ae 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -28,12 +28,14 @@ RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 abort_on_error=1 history_ RUN echo "UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768'" >> /etc/environment RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768'" >> /etc/environment RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt max_allocation_size_mb=32768'" >> /etc/environment +RUN echo "ASAN_OPTIONS='halt_on_error=1 abort_on_error=1'" >> /etc/environment # Sanitizer options for current shell (not current, but the one that will be spawned on "docker run") # (but w/o verbosity for TSAN, otherwise test.reference will not match) ENV TSAN_OPTIONS='halt_on_error=1 abort_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1 max_allocation_size_mb=32768' ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768' ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768' ENV LSAN_OPTIONS='max_allocation_size_mb=32768' +ENV ASAN_OPTIONS='halt_on_error=1 abort_on_error=1' # for external_symbolizer_path RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index f9f96c76d59..c359d8a1847 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -164,7 +164,7 @@ do done setup_logs_replication -attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01 +attach_gdb_to_clickhouse function fn_exists() { declare -F "$1" > /dev/null; diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index f7017958635..9e5bf75d335 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -1,9 +1,6 @@ #!/usr/bin/env bash # shellcheck disable=SC2120 -# Don't check for ODR violation, since we may test shared build with ASAN -export ASAN_OPTIONS=detect_odr_violation=0 - # If ClickHouse was built with coverage - dump the coverage information at exit # (in other cases this environment variable has no effect) export CLICKHOUSE_WRITE_COVERAGE="coverage" From fc72742e37ae78f47b3e55a969e088f5c372ee36 Mon Sep 17 00:00:00 2001 From: sakulali Date: Fri, 2 Aug 2024 02:00:20 +0800 Subject: [PATCH 1387/2361] ping CI From 30e0c1a1b8479e9b6be0701ba21e6050906a7e43 Mon Sep 17 00:00:00 2001 From: Max K Date: Thu, 1 Aug 2024 18:46:09 +0200 Subject: [PATCH 1388/2361] try less mem for geesefs --- .github/workflows/create_release.yml | 21 +++++++++++---------- tests/ci/artifactory.py | 8 +++++--- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 424dfe60be4..3c61fa4cfe1 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -17,7 +17,7 @@ concurrency: - patch - new only-repo: - description: 'Run only repos updates including docker (recovery)' + description: 'Run only repos updates including docker (repo-recovery, tests)' required: false default: false type: boolean @@ -68,10 +68,11 @@ jobs: shell: bash run: | python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }} -# - name: Bump CH Version and Update Contributors' List -# shell: bash -# run: | -# python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Bump CH Version and Update Contributors' List + if: ${{ ! inputs.only-repo }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Bump Docker versions, Changelog, Security if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash @@ -117,11 +118,11 @@ jobs: python3 ./tests/ci/create_release.py --set-progress-completed git reset --hard HEAD git checkout "$GITHUB_REF_NAME" -# - name: Create GH Release -# shell: bash -# if: ${{ inputs.type == 'patch' }} -# run: | -# python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }} + - name: Create GH Release + if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} + shell: bash + run: | + python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Export TGZ Packages if: ${{ inputs.type == 'patch' }} shell: bash diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index 71deaccf917..8bba7bca30e 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -67,8 +67,8 @@ class R2MountPoint: f" --shared-config=/home/ubuntu/.r2_auth_test " ) if self.DEBUG: - self.aux_mount_options += " --debug_s3 --debug_fuse " - self.mount_cmd = f"geesefs --endpoint={self.API_ENDPOINT} --cheap --memory-limit=2050 --gc-interval=100 --max-flushers=5 --max-parallel-parts=1 --max-parallel-copy=2 --log-file={self.LOG_FILE} {self.aux_mount_options} {self.bucket_name} {self.MOUNT_POINT}" + self.aux_mount_options += " --debug_s3 " + self.mount_cmd = f"geesefs --endpoint={self.API_ENDPOINT} --cheap --memory-limit=1000 --gc-interval=100 --max-flushers=10 --max-parallel-parts=1 --max-parallel-copy=10 --log-file={self.LOG_FILE} {self.aux_mount_options} {self.bucket_name} {self.MOUNT_POINT}" else: assert False @@ -207,8 +207,10 @@ class RpmArtifactory: for package in paths: _copy_if_not_exists(Path(package), dest_dir) + # switching between different fuse providers invalidates --update option (apparently some fuse(s) can mess around with mtime) + # add --skip-stat to skip mtime check commands = ( - f"createrepo_c --local-sqlite --workers=2 --update --verbose {dest_dir}", + f"createrepo_c --local-sqlite --workers=2 --update --skip-stat --verbose {dest_dir}", f"gpg --sign-with {self._SIGN_KEY} --detach-sign --batch --yes --armor {dest_dir / 'repodata' / 'repomd.xml'}", ) print(f"Exporting RPM packages into [{codename}]") From 3150833aa517bd0293a6f3b7f9114cfb89af06ad Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Thu, 1 Aug 2024 20:16:10 +0200 Subject: [PATCH 1389/2361] fix merge with master --- docs/en/operations/settings/settings.md | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index b880c42a45b..c621f2db5ae 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5615,18 +5615,6 @@ Disable all insert and mutations (alter table update / alter table delete / alte Default value: `false`. -## restore_replace_external_engines_to_null - -For testing purposes. Replaces all external engines to Null to not initiate external connections. - -Default value: `False` - -## restore_replace_external_table_functions_to_null - -For testing purposes. Replaces all external table functions to Null to not initiate external connections. - -Default value: `False` - ## use_hive_partitioning When enabled, ClickHouse will detect Hive-style partitioning in path (`/name=value/`) in file-like table engines [File](../../engines/table-engines/special/file.md#hive-style-partitioning)/[S3](../../engines/table-engines/integrations/s3.md#hive-style-partitioning)/[URL](../../engines/table-engines/special/url.md#hive-style-partitioning)/[HDFS](../../engines/table-engines/integrations/hdfs.md#hive-style-partitioning)/[AzureBlobStorage](../../engines/table-engines/integrations/azureBlobStorage.md#hive-style-partitioning) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. From e034558f74a1cd46bb8fbdfac3b7dc6d25165f4e Mon Sep 17 00:00:00 2001 From: Max K Date: Thu, 1 Aug 2024 20:51:36 +0200 Subject: [PATCH 1390/2361] add automerge prs step --- .github/workflows/create_release.yml | 4 +++ pyproject.toml | 1 + tests/ci/ci_utils.py | 2 +- tests/ci/create_release.py | 47 +++++++++++++++++++++++++++- tests/ci/release.py | 1 + 5 files changed, 53 insertions(+), 2 deletions(-) diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 3c61fa4cfe1..e27db1b09a4 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -171,6 +171,10 @@ jobs: export CHECK_NAME="Docker keeper image" python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} python3 ./create_release.py --set-progress-completed + - name: Update release info. Merge created PRs + shell: bash + run: | + python3 ./tests/ci/create_release.py --merge-prs ${{ inputs.dry-run == true && '--dry-run' || '' }} - name: Set current Release progress to Completed with OK shell: bash run: | diff --git a/pyproject.toml b/pyproject.toml index 9bbeac3ddae..4268901e7f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,6 +40,7 @@ disable = ''' global-statement, f-string-without-interpolation, consider-using-with, + use-maxsplit-arg, ''' [tool.pylint.SIMILARITIES] diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index cd21554788c..4f696a2c55a 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -228,7 +228,7 @@ class Shell: ): if dry_run: print(f"Dry-ryn. Would run command [{command}]") - return 0 + return True if verbose: print(f"Run command [{command}]") proc = subprocess.Popen( diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index 0d505d6ccc7..c407a74fbf0 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -43,6 +43,7 @@ class ReleaseProgress: TEST_TGZ = "test TGZ packages" TEST_RPM = "test RPM packages" TEST_DEB = "test DEB packages" + MERGE_CREATED_PRS = "merge created PRs" COMPLETED = "completed" @@ -101,6 +102,7 @@ class ReleaseInfo: previous_release_sha: str changelog_pr: str = "" version_bump_pr: str = "" + prs_merged: bool = False release_url: str = "" debian_command: str = "" rpm_command: str = "" @@ -380,6 +382,38 @@ class ReleaseInfo: self.release_url = f"dry-run" self.dump() + def merge_prs(self, dry_run: bool) -> None: + repo = CI.Envs.GITHUB_REPOSITORY + assert self.version_bump_pr + if dry_run: + version_bump_pr_num = 12345 + else: + version_bump_pr_num = int(self.version_bump_pr.split("/")[-1]) + print("Merging Version bump PR") + res_1 = Shell.check( + f"gh pr merge {version_bump_pr_num} --repo {repo} --merge --auto", + verbose=True, + dry_run=dry_run, + ) + + res_2 = True + if not self.release_tag.endswith("-new"): + assert self.changelog_pr + print("Merging ChangeLog PR") + if dry_run: + changelog_pr_num = 23456 + else: + changelog_pr_num = int(self.changelog_pr.split("/")[-1]) + res_2 = Shell.check( + f"gh pr merge {changelog_pr_num} --repo {repo} --merge --auto", + verbose=True, + dry_run=dry_run, + ) + else: + assert not self.changelog_pr + + self.prs_merged = res_1 and res_2 + class RepoTypes: RPM = "rpm" @@ -627,6 +661,11 @@ def parse_args() -> argparse.Namespace: action="store_true", help="Create GH Release object and attach all packages", ) + parser.add_argument( + "--merge-prs", + action="store_true", + help="Merge PRs with version, changelog updates", + ) parser.add_argument( "--post-status", action="store_true", @@ -732,7 +771,6 @@ if __name__ == "__main__": if args.post_status: release_info = ReleaseInfo.from_file() - release_info.update_release_info(dry_run=args.dry_run) if release_info.is_new_release_branch(): title = "New release branch" else: @@ -766,6 +804,13 @@ if __name__ == "__main__": ri.progress_description = ReleaseProgressDescription.OK ri.dump() + if args.merge_prs: + with ReleaseContextManager( + release_progress=ReleaseProgress.MERGE_CREATED_PRS + ) as release_info: + release_info.update_release_info(dry_run=args.dry_run) + release_info.merge_prs(dry_run=args.dry_run) + # tear down ssh if _ssh_agent and _key_pub: _ssh_agent.remove(_key_pub) diff --git a/tests/ci/release.py b/tests/ci/release.py index 2de20d00a00..b26d6205f3b 100755 --- a/tests/ci/release.py +++ b/tests/ci/release.py @@ -689,4 +689,5 @@ def main(): if __name__ == "__main__": + assert False, "Script Deprecated, ask ci team for help" main() From 67b11300e45f6e24c3515a978d23a9bc998a666e Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 1 Aug 2024 21:05:49 +0000 Subject: [PATCH 1391/2361] Fix crash in KeyCondition::cloneASTWithInversionPushDown() caused by type change --- src/Storages/MergeTree/KeyCondition.cpp | 82 +++++++++---------- .../03215_key_condition_bug.reference | 1 + .../0_stateless/03215_key_condition_bug.sql | 3 + 3 files changed, 44 insertions(+), 42 deletions(-) create mode 100644 tests/queries/0_stateless/03215_key_condition_bug.reference create mode 100644 tests/queries/0_stateless/03215_key_condition_bug.sql diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 69bffac9160..eaf9f0af623 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -566,6 +566,7 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( } const ActionsDAG::Node * res = nullptr; + bool handled_inversion = false; switch (node.type) { @@ -582,7 +583,7 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( /// Re-generate column name for constant. /// DAG form query (with enabled analyzer) uses suffixes for constants, like 1_UInt8. /// DAG from PK does not use it. This breaks matching by column name sometimes. - /// Ideally, we should not compare manes, but DAG subtrees instead. + /// Ideally, we should not compare names, but DAG subtrees instead. name = ASTLiteral(column_const->getDataColumn()[0]).getColumnName(); else name = node.result_name; @@ -593,9 +594,9 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( case (ActionsDAG::ActionType::ALIAS): { /// Ignore aliases - const auto & alias = cloneASTWithInversionPushDown(*node.children.front(), inverted_dag, to_inverted, context, need_inversion); - to_inverted[&node] = &alias; - return alias; + res = &cloneASTWithInversionPushDown(*node.children.front(), inverted_dag, to_inverted, context, need_inversion); + handled_inversion = true; + break; } case (ActionsDAG::ActionType::ARRAY_JOIN): { @@ -608,20 +609,10 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( auto name = node.function_base->getName(); if (name == "not") { - const auto & arg = cloneASTWithInversionPushDown(*node.children.front(), inverted_dag, to_inverted, context, !need_inversion); - to_inverted[&node] = &arg; - return arg; + res = &cloneASTWithInversionPushDown(*node.children.front(), inverted_dag, to_inverted, context, !need_inversion); + handled_inversion = true; } - - if (name == "materialize") - { - /// Ignore materialize - const auto & arg = cloneASTWithInversionPushDown(*node.children.front(), inverted_dag, to_inverted, context, need_inversion); - to_inverted[&node] = &arg; - return arg; - } - - if (name == "indexHint") + else if (name == "indexHint") { ActionsDAG::NodeRawConstPtrs children; if (const auto * adaptor = typeid_cast(node.function_base.get())) @@ -636,12 +627,10 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( } } - const auto & func = inverted_dag.addFunction(node.function_base, children, ""); - to_inverted[&node] = &func; - return func; + res = &inverted_dag.addFunction(node.function_base, children, ""); + handled_inversion = true; } - - if (need_inversion && (name == "and" || name == "or")) + else if (need_inversion && (name == "and" || name == "or")) { ActionsDAG::NodeRawConstPtrs children(node.children); @@ -659,34 +648,43 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( /// We match columns by name, so it is important to fill name correctly. /// So, use empty string to make it automatically. - const auto & func = inverted_dag.addFunction(function_builder, children, ""); - to_inverted[&node] = &func; - return func; + res = &inverted_dag.addFunction(function_builder, children, ""); + handled_inversion = true; } - - ActionsDAG::NodeRawConstPtrs children(node.children); - - for (auto & arg : children) - arg = &cloneASTWithInversionPushDown(*arg, inverted_dag, to_inverted, context, false); - - auto it = inverse_relations.find(name); - if (it != inverse_relations.end()) + else { - const auto & func_name = need_inversion ? it->second : it->first; - auto function_builder = FunctionFactory::instance().get(func_name, context); - const auto & func = inverted_dag.addFunction(function_builder, children, ""); - to_inverted[&node] = &func; - return func; - } + ActionsDAG::NodeRawConstPtrs children(node.children); - res = &inverted_dag.addFunction(node.function_base, children, ""); - chassert(res->result_type == node.result_type); + for (auto & arg : children) + arg = &cloneASTWithInversionPushDown(*arg, inverted_dag, to_inverted, context, false); + + auto it = inverse_relations.find(name); + if (it != inverse_relations.end()) + { + const auto & func_name = need_inversion ? it->second : it->first; + auto function_builder = FunctionFactory::instance().get(func_name, context); + res = &inverted_dag.addFunction(function_builder, children, ""); + handled_inversion = true; + } + else + { + res = &inverted_dag.addFunction(node.function_base, children, ""); + chassert(res->result_type == node.result_type); + } + } } } - if (need_inversion) + if (!handled_inversion && need_inversion) res = &inverted_dag.addFunction(FunctionFactory::instance().get("not", context), {res}, ""); + /// Make sure we don't change any data types (e.g. remove LowCardinality). + /// If it turns out that we actually want to change data types sometimes, it's ok to remove this + /// check *and* replace all `addFunction(node.function_base, ...)` calls above with + /// `addFunction(FunctionFactory::instance().get(name, context), ...)` to re-resolve overloads. + if (!node.result_type->equals(*res->result_type)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "KeyCondition inadvertently changed subexpression data type: '{}' -> '{}', column `{}`", node.result_type->getName(), res->result_type->getName(), node.result_name); + to_inverted[&node] = res; return *res; } diff --git a/tests/queries/0_stateless/03215_key_condition_bug.reference b/tests/queries/0_stateless/03215_key_condition_bug.reference new file mode 100644 index 00000000000..84ab67a85e0 --- /dev/null +++ b/tests/queries/0_stateless/03215_key_condition_bug.reference @@ -0,0 +1 @@ +(0) diff --git a/tests/queries/0_stateless/03215_key_condition_bug.sql b/tests/queries/0_stateless/03215_key_condition_bug.sql new file mode 100644 index 00000000000..ef2113e81f8 --- /dev/null +++ b/tests/queries/0_stateless/03215_key_condition_bug.sql @@ -0,0 +1,3 @@ +CREATE TABLE t (x Int8) ENGINE MergeTree ORDER BY x; +INSERT INTO t VALUES (1); +SELECT arrayJoin([tuple((toNullable(10) * toLowCardinality(20)) < materialize(30))]) AS row FROM t WHERE row.1 = 0; \ No newline at end of file From 2e7a15df89a7a3d28445095cb4392b056c3f19a9 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky <43110995+evillique@users.noreply.github.com> Date: Thu, 1 Aug 2024 23:23:06 +0200 Subject: [PATCH 1392/2361] Update setup_export_logs.sh --- docker/test/base/setup_export_logs.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docker/test/base/setup_export_logs.sh b/docker/test/base/setup_export_logs.sh index db141bcc55e..e544397dd0c 100755 --- a/docker/test/base/setup_export_logs.sh +++ b/docker/test/base/setup_export_logs.sh @@ -215,10 +215,9 @@ function setup_logs_replication function stop_logs_replication { echo "Detach all logs replication" - timeout --preserve-status --signal TERM --kill-after 10m 20m \ - clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | { + clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | { tee /dev/stderr } | { - xargs -n1 -r -i clickhouse-client --query "drop table {}" + timeout --preserve-status --signal TERM --kill-after 5m 15m xargs -n1 -r -i clickhouse-client --query "drop table {}" } } From eac2c9fc3d8a88c1033e0f23e048421ecf4db850 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1393/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From af30e72e187713f4e435ff4f1f2382ea6671ba21 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 1 Aug 2024 23:58:57 +0200 Subject: [PATCH 1394/2361] Better limits --- tests/config/users.d/limits.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/config/users.d/limits.yaml b/tests/config/users.d/limits.yaml index 46cff73142c..53cbbfa744a 100644 --- a/tests/config/users.d/limits.yaml +++ b/tests/config/users.d/limits.yaml @@ -26,6 +26,7 @@ profiles: max_execution_time_leaf: 600 max_execution_speed: 100G max_execution_speed_bytes: 10T + timeout_before_checking_execution_speed: 300 max_estimated_execution_time: 600 max_columns_to_read: 20K max_temporary_columns: 20K From 69bd306a445a6bc8a55be14bb0080864921f8b69 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Aug 2024 00:48:25 +0200 Subject: [PATCH 1395/2361] Fix race condition in system.processes and Settings --- src/Backups/RestoreCoordinationRemote.cpp | 2 +- src/Backups/RestoreCoordinationRemote.h | 2 -- src/Databases/DatabaseLazy.cpp | 2 +- src/Databases/DatabaseLazy.h | 2 +- src/Interpreters/ProcessList.cpp | 2 +- 5 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/Backups/RestoreCoordinationRemote.cpp b/src/Backups/RestoreCoordinationRemote.cpp index 44214d00be5..0a69bc0eafb 100644 --- a/src/Backups/RestoreCoordinationRemote.cpp +++ b/src/Backups/RestoreCoordinationRemote.cpp @@ -323,7 +323,7 @@ bool RestoreCoordinationRemote::hasConcurrentRestores(const std::atomic return false; bool result = false; - std::string path = zookeeper_path +"/stage"; + std::string path = zookeeper_path + "/stage"; auto holder = with_retries.createRetriesControlHolder("createRootNodes"); holder.retries_ctl.retryLoop( diff --git a/src/Backups/RestoreCoordinationRemote.h b/src/Backups/RestoreCoordinationRemote.h index 9c299865cfa..a3d57e9a4d0 100644 --- a/src/Backups/RestoreCoordinationRemote.h +++ b/src/Backups/RestoreCoordinationRemote.h @@ -61,8 +61,6 @@ private: void createRootNodes(); void removeAllNodes(); - class ReplicatedDatabasesMetadataSync; - /// get_zookeeper will provide a zookeeper client without any fault injection const zkutil::GetZooKeeper get_zookeeper; const String root_zookeeper_path; diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index ca30ee6db15..3fb6d30fcb8 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -44,7 +44,7 @@ namespace ErrorCodes DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, time_t expiration_time_, ContextPtr context_) - : DatabaseOnDisk(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseLazy (" + name_ + ")", context_) + : DatabaseOnDisk(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseLazy (" + name_ + ")", context_) , expiration_time(expiration_time_) { } diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 4347649117d..41cfb751141 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -12,7 +12,7 @@ class DatabaseLazyIterator; class Context; /** Lazy engine of databases. - * Works like DatabaseOrdinary, but stores in memory only cache. + * Works like DatabaseOrdinary, but stores in memory only the cache. * Can be used only with *Log engines. */ class DatabaseLazy final : public DatabaseOnDisk diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index 271e23a7288..6cb50b310ad 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -657,7 +657,7 @@ QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_even { if (auto ctx = context.lock()) { - res.query_settings = std::make_shared(ctx->getSettingsRef()); + res.query_settings = std::make_shared(ctx->getSettingsCopy()); res.current_database = ctx->getCurrentDatabase(); } } From a80c7c080cdbb3bec4662501686133298b7a4a2d Mon Sep 17 00:00:00 2001 From: Jacob Reckhard Date: Thu, 1 Aug 2024 18:23:11 -0600 Subject: [PATCH 1396/2361] Added support for reading multilinestring wkts --- docs/en/sql-reference/data-types/geo.md | 42 ++++++++++++ .../en/sql-reference/functions/geo/polygon.md | 42 ++++++++++-- src/DataTypes/DataTypeCustomGeo.cpp | 7 ++ src/DataTypes/DataTypeCustomGeo.h | 6 ++ src/Functions/geometryConverters.h | 64 +++++++++++++++++++ src/Functions/polygonsIntersection.cpp | 2 + src/Functions/polygonsSymDifference.cpp | 2 + src/Functions/polygonsUnion.cpp | 2 + src/Functions/polygonsWithin.cpp | 2 + src/Functions/readWkt.cpp | 30 +++++++++ .../03215_multilinestring_geometry.reference | 17 +++++ .../03215_multilinestring_geometry.sql | 12 ++++ .../aspell-ignore/en/aspell-dict.txt | 4 +- 13 files changed, 227 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/03215_multilinestring_geometry.reference create mode 100644 tests/queries/0_stateless/03215_multilinestring_geometry.sql diff --git a/docs/en/sql-reference/data-types/geo.md b/docs/en/sql-reference/data-types/geo.md index 7ffc7447d96..8ce53bb2ef2 100644 --- a/docs/en/sql-reference/data-types/geo.md +++ b/docs/en/sql-reference/data-types/geo.md @@ -52,6 +52,48 @@ Result: └───────────────────────────────┴───────────────┘ ``` +## LineString + +`LineString` is a line stored as an array of points: [Array](array.md)([Point](#point)). + +**Example** + +Query: + +```sql +CREATE TABLE geo_linestring (l LineString) ENGINE = Memory(); +INSERT INTO geo_linestring VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]); +SELECT l, toTypeName(l) FROM geo_linestring; +``` +Result: + +``` text +┌─r─────────────────────────────┬─toTypeName(r)─┠+│ [(0,0),(10,0),(10,10),(0,10)] │ LineString │ +└───────────────────────────────┴───────────────┘ +``` + +## MultiLineString + +`MultiLineString` is multiple lines stored as an array of `LineString`: [Array](array.md)([LineString](#linestring)). + +**Example** + +Query: + +```sql +CREATE TABLE geo_multilinestring (l MultiLineString) ENGINE = Memory(); +INSERT INTO geo_multilinestring VALUES([[(0, 0), (10, 0), (10, 10), (0, 10)], [(1, 1), (2, 2), (3, 3)]]); +SELECT l, toTypeName(l) FROM geo_multilinestring; +``` +Result: + +``` text +┌─l───────────────────────────────────────────────────┬─toTypeName(l)───┠+│ [[(0,0),(10,0),(10,10),(0,10)],[(1,1),(2,2),(3,3)]] │ MultiLineString │ +└─────────────────────────────────────────────────────┴─────────────────┘ +``` + ## Polygon `Polygon` is a polygon with holes stored as an array of rings: [Array](array.md)([Ring](#ring)). First element of outer array is the outer shape of polygon and all the following elements are holes. diff --git a/docs/en/sql-reference/functions/geo/polygon.md b/docs/en/sql-reference/functions/geo/polygon.md index 25a7a1fac8e..c054e05d39c 100644 --- a/docs/en/sql-reference/functions/geo/polygon.md +++ b/docs/en/sql-reference/functions/geo/polygon.md @@ -6,11 +6,13 @@ title: "Functions for Working with Polygons" ## WKT -Returns a WKT (Well Known Text) geometric object from various [Geo Data Types](../../data-types/geo.md). Supported WKT objects are: +Returns a WKT (Well Known Text) geometric object from various [Geo Data Types](../../data-types/geo.md). Supported WKT objects are: - POINT - POLYGON - MULTIPOLYGON +- LINESTRING +- MULTILINESTRING **Syntax** @@ -26,12 +28,16 @@ WKT(geo_data) - [Ring](../../data-types/geo.md#ring) - [Polygon](../../data-types/geo.md#polygon) - [MultiPolygon](../../data-types/geo.md#multipolygon) +- [LineString](../../data-types/geo.md#linestring) +- [MultiLineString](../../data-types/geo.md#multilinestring) **Returned value** - WKT geometric object `POINT` is returned for a Point. - WKT geometric object `POLYGON` is returned for a Polygon -- WKT geometric object `MULTIPOLYGON` is returned for a MultiPolygon. +- WKT geometric object `MULTIPOLYGON` is returned for a MultiPolygon. +- WKT geometric object `LINESTRING` is returned for a LineString. +- WKT geometric object `MULTILINESTRING` is returned for a MultiLineString. **Examples** @@ -84,7 +90,7 @@ SELECT ### Input parameters -String starting with `MULTIPOLYGON` +String starting with `MULTIPOLYGON` ### Returned value @@ -170,6 +176,34 @@ SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); [(1,1),(2,2),(3,3),(1,1)] ``` +## readWKTMultiLineString + +Parses a Well-Known Text (WKT) representation of a MultiLineString geometry and returns it in the internal ClickHouse format. + +### Syntax + +```sql +readWKTMultiLineString(wkt_string) +``` + +### Arguments + +- `wkt_string`: The input WKT string representing a MultiLineString geometry. + +### Returned value + +The function returns a ClickHouse internal representation of the multilinestring geometry. + +### Example + +```sql +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6))')); +``` + +```response +[[(1,1),(2,2),(3,3)],[(4,4),(5,5),(6,6)]] +``` + ## readWKTRing Parses a Well-Known Text (WKT) representation of a Polygon geometry and returns a ring (closed linestring) in the internal ClickHouse format. @@ -219,7 +253,7 @@ UInt8, 0 for false, 1 for true ## polygonsDistanceSpherical -Calculates the minimal distance between two points where one point belongs to the first polygon and the second to another polygon. Spherical means that coordinates are interpreted as coordinates on a pure and ideal sphere, which is not true for the Earth. Using this type of coordinate system speeds up execution, but of course is not precise. +Calculates the minimal distance between two points where one point belongs to the first polygon and the second to another polygon. Spherical means that coordinates are interpreted as coordinates on a pure and ideal sphere, which is not true for the Earth. Using this type of coordinate system speeds up execution, but of course is not precise. ### Example diff --git a/src/DataTypes/DataTypeCustomGeo.cpp b/src/DataTypes/DataTypeCustomGeo.cpp index 0736d837d46..d72787647c3 100644 --- a/src/DataTypes/DataTypeCustomGeo.cpp +++ b/src/DataTypes/DataTypeCustomGeo.cpp @@ -24,6 +24,13 @@ void registerDataTypeDomainGeo(DataTypeFactory & factory) std::make_unique(std::make_unique())); }); + // Custom type for mulitple lines stored as Array(LineString) + factory.registerSimpleDataTypeCustom("MultiLineString", [] + { + return std::make_pair(DataTypeFactory::instance().get("Array(LineString)"), + std::make_unique(std::make_unique())); + }); + // Custom type for simple polygon without holes stored as Array(Point) factory.registerSimpleDataTypeCustom("Ring", [] { diff --git a/src/DataTypes/DataTypeCustomGeo.h b/src/DataTypes/DataTypeCustomGeo.h index 0a1c83e4638..6a632f0d05c 100644 --- a/src/DataTypes/DataTypeCustomGeo.h +++ b/src/DataTypes/DataTypeCustomGeo.h @@ -17,6 +17,12 @@ public: DataTypeLineStringName() : DataTypeCustomFixedName("LineString") {} }; +class DataTypeMultiLineStringName : public DataTypeCustomFixedName +{ +public: + DataTypeMultiLineStringName() : DataTypeCustomFixedName("MultiLineString") {} +}; + class DataTypeRingName : public DataTypeCustomFixedName { public: diff --git a/src/Functions/geometryConverters.h b/src/Functions/geometryConverters.h index 03831d37e0c..bf975017a6d 100644 --- a/src/Functions/geometryConverters.h +++ b/src/Functions/geometryConverters.h @@ -31,6 +31,9 @@ namespace ErrorCodes template using LineString = boost::geometry::model::linestring; +template +using MultiLineString = boost::geometry::model::multi_linestring>; + template using Ring = boost::geometry::model::ring; @@ -42,12 +45,14 @@ using MultiPolygon = boost::geometry::model::multi_polygon>; using CartesianPoint = boost::geometry::model::d2::point_xy; using CartesianLineString = LineString; +using CartesianMultiLineString = MultiLineString; using CartesianRing = Ring; using CartesianPolygon = Polygon; using CartesianMultiPolygon = MultiPolygon; using SphericalPoint = boost::geometry::model::point>; using SphericalLineString = LineString; +using SphericalMultiLineString = MultiLineString; using SphericalRing = Ring; using SphericalPolygon = Polygon; using SphericalMultiPolygon = MultiPolygon; @@ -113,6 +118,28 @@ struct ColumnToLineStringsConverter } }; +/** + * Class which converts Column with type Array(Array(Tuple(Float64, Float64))) to a vector of boost multi_linestring type. +*/ +template +struct ColumnToMultiLineStringsConverter +{ + static std::vector> convert(ColumnPtr col) + { + const IColumn::Offsets & offsets = typeid_cast(*col).getOffsets(); + size_t prev_offset = 0; + std::vector> answer(offsets.size()); + auto all_linestrings = ColumnToLineStringsConverter::convert(typeid_cast(*col).getDataPtr()); + for (size_t iter = 0; iter < offsets.size() && iter < all_linestrings.size(); ++iter) + { + for (size_t linestring_iter = prev_offset; linestring_iter < offsets[iter]; ++linestring_iter) + answer[iter].emplace_back(std::move(all_linestrings[linestring_iter])); + prev_offset = offsets[iter]; + } + return answer; + } +}; + /** * Class which converts Column with type Array(Tuple(Float64, Float64)) to a vector of boost ring type. */ @@ -268,6 +295,38 @@ private: ColumnUInt64::MutablePtr offsets; }; +/// Serialize Point, MultiLineString as MultiLineString +template +class MultiLineStringSerializer +{ +public: + MultiLineStringSerializer() + : offsets(ColumnUInt64::create()) + {} + + explicit MultiLineStringSerializer(size_t n) + : offsets(ColumnUInt64::create(n)) + {} + + void add(const MultiLineString & multilinestring) + { + size += multilinestring.size(); + offsets->insertValue(size); + for (const auto & linestring : multilinestring) + linestring_serializer.add(linestring); + } + + ColumnPtr finalize() + { + return ColumnArray::create(linestring_serializer.finalize(), std::move(offsets)); + } + +private: + size_t size = 0; + LineStringSerializer linestring_serializer; + ColumnUInt64::MutablePtr offsets; +}; + /// Almost the same as LineStringSerializer /// Serialize Point, Ring as Ring template @@ -411,6 +470,11 @@ static void callOnGeometryDataType(DataTypePtr type, F && f) else if (factory.get("LineString")->equals(*type) && type->getCustomName() && type->getCustomName()->getName() == "LineString") return f(ConverterType>()); + /// We should take the name into consideration to avoid ambiguity. + /// Because for example both MultiLineString and Polygon are resolved to Array(Tuple(Point)). + else if (factory.get("MultiLineString")->equals(*type) && type->getCustomName() && type->getCustomName()->getName() == "MultiLineString") + return f(ConverterType>()); + /// For backward compatibility if we call this function not on a custom type, we will consider Array(Tuple(Point)) as type Ring. else if (factory.get("Ring")->equals(*type)) return f(ConverterType>()); diff --git a/src/Functions/polygonsIntersection.cpp b/src/Functions/polygonsIntersection.cpp index 329242e762e..43ab03f8c1f 100644 --- a/src/Functions/polygonsIntersection.cpp +++ b/src/Functions/polygonsIntersection.cpp @@ -75,6 +75,8 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName()); + else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be MultiLineString", getName()); else { auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); diff --git a/src/Functions/polygonsSymDifference.cpp b/src/Functions/polygonsSymDifference.cpp index 3c219d0facb..6faec95bb7b 100644 --- a/src/Functions/polygonsSymDifference.cpp +++ b/src/Functions/polygonsSymDifference.cpp @@ -73,6 +73,8 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName()); + else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be MultiLineString", getName()); else { auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); diff --git a/src/Functions/polygonsUnion.cpp b/src/Functions/polygonsUnion.cpp index 969eb2f78fb..5378ff636f8 100644 --- a/src/Functions/polygonsUnion.cpp +++ b/src/Functions/polygonsUnion.cpp @@ -73,6 +73,8 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName()); + else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be MultiLineString", getName()); else { auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); diff --git a/src/Functions/polygonsWithin.cpp b/src/Functions/polygonsWithin.cpp index c63ad5ef868..dacd1c0e18f 100644 --- a/src/Functions/polygonsWithin.cpp +++ b/src/Functions/polygonsWithin.cpp @@ -77,6 +77,8 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName()); + else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be MultiLineString", getName()); else { auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); diff --git a/src/Functions/readWkt.cpp b/src/Functions/readWkt.cpp index eb262777b0d..2010b5167e7 100644 --- a/src/Functions/readWkt.cpp +++ b/src/Functions/readWkt.cpp @@ -87,6 +87,11 @@ struct ReadWKTLineStringNameHolder static constexpr const char * name = "readWKTLineString"; }; +struct ReadWKTMultiLineStringNameHolder +{ + static constexpr const char * name = "readWKTMultiLineString"; +}; + struct ReadWKTRingNameHolder { static constexpr const char * name = "readWKTRing"; @@ -131,6 +136,31 @@ Parses a Well-Known Text (WKT) representation of a LineString geometry and retur }, .categories{"Unique identifiers"} }); + factory.registerFunction, ReadWKTMultiLineStringNameHolder>>(FunctionDocumentation + { + .description=R"( +Parses a Well-Known Text (WKT) representation of a MultiLineString geometry and returns it in the internal ClickHouse format. +)", + .syntax = "readWKTMultiLineString(wkt_string)", + .arguments{ + {"wkt_string", "The input WKT string representing a MultiLineString geometry."} + }, + .returned_value = "The function returns a ClickHouse internal representation of the multilinestring geometry.", + .examples{ + {"first call", "SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6))');", R"( +┌─readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6))')─┠+│ [[(1,1),(2,2),(3,3)],[(4,4),(5,5),(6,6)]] │ +└──────────────────────────────────────────────────────────────────────────────┘ + + )"}, + {"second call", "SELECT toTypeName(readWKTLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'));", R"( +┌─toTypeName(readWKTLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'))─┠+│ MultiLineString │ +└─────────────────────────────────────────────────────────────────────────┘ + )"}, + }, + .categories{"Unique identifiers"} + }); factory.registerFunction, ReadWKTRingNameHolder>>(); factory.registerFunction, ReadWKTPolygonNameHolder>>(); factory.registerFunction, ReadWKTMultiPolygonNameHolder>>(); diff --git a/tests/queries/0_stateless/03215_multilinestring_geometry.reference b/tests/queries/0_stateless/03215_multilinestring_geometry.reference new file mode 100644 index 00000000000..f4c5774018e --- /dev/null +++ b/tests/queries/0_stateless/03215_multilinestring_geometry.reference @@ -0,0 +1,17 @@ +-- { echoOn } +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +[[(1,1),(2,2),(3,3),(1,1)]] +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); +MultiLineString +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); +MULTILINESTRING((1 1,2 2,3 3,1 1)) +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +[[(1,1),(2,2),(3,3),(1,1)],[(1,0),(2,0),(3,0)]] +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); +MultiLineString +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); +MULTILINESTRING((1 1,2 2,3 3,1 1),(1 0,2 0,3 0)) +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +WITH wkt(CAST([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'Array(Array(Tuple(Float64, Float64)))')) as x +SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); +POLYGON((1 1,2 2,3 3,1 1)) String [[(1,1),(2,2),(3,3),(1,1)]] Polygon diff --git a/tests/queries/0_stateless/03215_multilinestring_geometry.sql b/tests/queries/0_stateless/03215_multilinestring_geometry.sql new file mode 100644 index 00000000000..71344920c52 --- /dev/null +++ b/tests/queries/0_stateless/03215_multilinestring_geometry.sql @@ -0,0 +1,12 @@ +-- { echoOn } +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); + +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); + +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +WITH wkt(CAST([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'Array(Array(Tuple(Float64, Float64)))')) as x +SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index b21ae0764c6..3d7e77f213d 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1,4 +1,4 @@ -personal_ws-1.1 en 2942 +personal_ws-1.1 en 2942 AArch ACLs ALTERs @@ -561,6 +561,7 @@ MindsDB Mongodb Monotonicity MsgPack +MultiLineString MultiPolygon Multiline Multiqueries @@ -2361,6 +2362,7 @@ rankCorr rapidjson rawblob readWKTLineString +readWKTMultiLineString readWKTMultiPolygon readWKTPoint readWKTPolygon From 572831f865d66e046d3e507d214e0f5aeae49ad4 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Fri, 2 Aug 2024 00:09:26 +0000 Subject: [PATCH 1397/2361] async_insert_race_long flakiness fixes 1. Make the test truly asynchronous. The setting `--async_insert_max_data_size 1` leads to data being flushed synchronously for all inserts in this test. This triggers part creation and extra resource consumption. 2. Do not run the `--wait_for_async_insert` query as a background process with a fixed (50ms) sleep time. If the actual execution time is longer than the anticipated delay time, it may lead to excessive process creation. --- tests/queries/0_stateless/02481_async_insert_race_long.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/02481_async_insert_race_long.sh b/tests/queries/0_stateless/02481_async_insert_race_long.sh index b0088017d32..91e6c4960e0 100755 --- a/tests/queries/0_stateless/02481_async_insert_race_long.sh +++ b/tests/queries/0_stateless/02481_async_insert_race_long.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -export MY_CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --async_insert_busy_timeout_ms 10 --async_insert_max_data_size 1 --async_insert 1" +export MY_CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --async_insert_busy_timeout_min_ms 50 --async_insert_busy_timeout_max_ms 50 --async_insert 1" function insert1() { @@ -29,11 +29,8 @@ function insert3() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - ${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" & - sleep 0.05 + ${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" done - - wait } function select1() From c0d298781e72aaa1f34fc7fd610f5dbcaa9acf2d Mon Sep 17 00:00:00 2001 From: morning-color Date: Fri, 2 Aug 2024 10:36:15 +0800 Subject: [PATCH 1398/2361] Trigger test. --- .../queries/0_stateless/03174_exact_rows_before_aggregation.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql index 0afc0be4370..f9fd4ef5a7b 100644 --- a/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql +++ b/tests/queries/0_stateless/03174_exact_rows_before_aggregation.sql @@ -34,6 +34,7 @@ create table test (i int) engine MergeTree order by i; insert into test select arrayJoin(range(10000)); set optimize_aggregation_in_order=1; + select * from test where i < 10 group by i order by i FORMAT JSONCompact; select max(i) from test where i < 20 limit 1 FORMAT JSONCompact; From 1cb2904b447d631e14b7e3b7cc96e4be74947ef6 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 2 Aug 2024 03:04:09 +0000 Subject: [PATCH 1399/2361] fix test --- tests/queries/0_stateless/03215_view_with_recursive.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03215_view_with_recursive.sql b/tests/queries/0_stateless/03215_view_with_recursive.sql index cac47124d51..ef7908612af 100644 --- a/tests/queries/0_stateless/03215_view_with_recursive.sql +++ b/tests/queries/0_stateless/03215_view_with_recursive.sql @@ -1,3 +1,5 @@ +SET allow_experimental_analyzer = 1; + CREATE VIEW 03215_test_v AS WITH RECURSIVE test_table AS ( From c50ef37a03438003c21076c5700d9c1f52c1c435 Mon Sep 17 00:00:00 2001 From: pufit Date: Fri, 2 Aug 2024 00:01:41 -0400 Subject: [PATCH 1400/2361] Fix inconsistent formatting for `GRANT CURRENT GRANTS` --- src/Parsers/Access/ASTGrantQuery.cpp | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/src/Parsers/Access/ASTGrantQuery.cpp b/src/Parsers/Access/ASTGrantQuery.cpp index f60fa7e4a23..eac88c75513 100644 --- a/src/Parsers/Access/ASTGrantQuery.cpp +++ b/src/Parsers/Access/ASTGrantQuery.cpp @@ -97,24 +97,9 @@ namespace void formatCurrentGrantsElements(const AccessRightsElements & elements, const IAST::FormatSettings & settings) { - for (size_t i = 0; i != elements.size(); ++i) - { - const auto & element = elements[i]; - - bool next_element_on_same_db_and_table = false; - if (i != elements.size() - 1) - { - const auto & next_element = elements[i + 1]; - if (element.sameDatabaseAndTableAndParameter(next_element)) - next_element_on_same_db_and_table = true; - } - - if (!next_element_on_same_db_and_table) - { - settings.ostr << " "; - formatONClause(element, settings); - } - } + settings.ostr << "("; + formatElementsWithoutOptions(elements, settings); + settings.ostr << ")"; } } From 0772ed7f6ac92bb2e016e5db00e85deafeecb127 Mon Sep 17 00:00:00 2001 From: shiyer7474 Date: Fri, 2 Aug 2024 04:02:43 +0000 Subject: [PATCH 1401/2361] Code style feedback incorporated --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 10 +++++----- ...3209_parameterized_view_with_non_literal_params.sql | 2 ++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 6113a38d463..bffdba2f58a 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -4549,11 +4549,11 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node, /// Serialize the constant value using datatype specific /// interfaces to match the deserialization in ReplaceQueryParametersVistor. WriteBufferFromOwnString buf; - auto constval = constant->getValue(); - auto realtype = constant->getResultType(); - auto tempcol = realtype->createColumn(); - tempcol->insert(constval); - realtype->getDefaultSerialization()->serializeTextEscaped(*tempcol, 0, buf, {}); + const auto & value = constant->getValue(); + auto real_type = constant->getResultType(); + auto temporary_column = real_type->createColumn(); + temporary_column->insert(value); + real_type->getDefaultSerialization()->serializeTextEscaped(*temporary_column, 0, buf, {}); view_params[identifier_node->getIdentifier().getFullName()] = buf.str(); } } diff --git a/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql b/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql index 55795c7a785..f2c61e5cb1d 100644 --- a/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql +++ b/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql @@ -90,8 +90,10 @@ select id from ipv4_pv(ipv4param=(select ipaddr from ipv4_table_pv where id=3)); drop view date_pv; drop view date_pv2; +drop view date32_pv; drop view uuid_pv; drop view ipv4_pv; drop table date_table_pv; +drop table date32_table_pv; drop table uuid_table_pv; drop table ipv4_table_pv; From d6da86dad282e6ad176b115d4344944daa8b9756 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Wed, 26 Jun 2024 01:27:47 +0000 Subject: [PATCH 1402/2361] Store plain_rewritable metadata in a separate layout --- .../CommonPathPrefixKeyGenerator.cpp | 6 +- .../MetadataStorageFromPlainObjectStorage.cpp | 36 ++++-- .../MetadataStorageFromPlainObjectStorage.h | 16 ++- ...torageFromPlainObjectStorageOperations.cpp | 71 +++++++---- ...aStorageFromPlainObjectStorageOperations.h | 14 ++- ...torageFromPlainRewritableObjectStorage.cpp | 119 +++++++++++++++--- ...aStorageFromPlainRewritableObjectStorage.h | 15 ++- .../test_s3_plain_rewritable/test.py | 13 ++ 8 files changed, 225 insertions(+), 65 deletions(-) diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp index e321c8a3c5a..2a06d56e5c7 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp @@ -19,11 +19,11 @@ ObjectStorageKey CommonPathPrefixKeyGenerator::generate(const String & path, boo { const auto & [object_key_prefix, suffix_parts] = getLongestObjectKeyPrefix(path); - auto key = std::filesystem::path(object_key_prefix.empty() ? storage_key_prefix : object_key_prefix); + auto key = std::filesystem::path(object_key_prefix.empty() ? std::string() : object_key_prefix); /// The longest prefix is the same as path, meaning that the path is already mapped. if (suffix_parts.empty()) - return ObjectStorageKey::createAsRelative(std::move(key)); + return ObjectStorageKey::createAsRelative(storage_key_prefix, std::move(key)); /// File and top-level directory paths are mapped as is. if (!is_directory || object_key_prefix.empty()) @@ -39,7 +39,7 @@ ObjectStorageKey CommonPathPrefixKeyGenerator::generate(const String & path, boo key /= getRandomASCIIString(part_size); } - return ObjectStorageKey::createAsRelative(key); + return ObjectStorageKey::createAsRelative(storage_key_prefix, key); } std::tuple> CommonPathPrefixKeyGenerator::getLongestObjectKeyPrefix(const std::string & path) const diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index 30111d04d20..3da190c7256 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -7,6 +7,7 @@ #include #include +#include namespace DB { @@ -79,14 +80,16 @@ std::vector MetadataStorageFromPlainObjectStorage::listDirectory(co object_storage->listObjects(abs_key, files, 0); - return getDirectChildrenOnDisk(abs_key, files, path); + std::unordered_set directories; + getDirectChildrenOnDisk(abs_key, object_storage->getCommonKeyPrefix(), files, path, directories); + return std::vector(std::make_move_iterator(directories.begin()), std::make_move_iterator(directories.end())); } DirectoryIteratorPtr MetadataStorageFromPlainObjectStorage::iterateDirectory(const std::string & path) const { /// Required for MergeTree auto paths = listDirectory(path); - // Prepend path, since iterateDirectory() includes path, unlike listDirectory() + /// Prepend path, since iterateDirectory() includes path, unlike listDirectory() std::for_each(paths.begin(), paths.end(), [&](auto & child) { child = fs::path(path) / child; }); std::vector fs_paths(paths.begin(), paths.end()); return std::make_unique(std::move(fs_paths)); @@ -99,10 +102,13 @@ StoredObjects MetadataStorageFromPlainObjectStorage::getStorageObjects(const std return {StoredObject(object_key.serialize(), path, object_size)}; } -std::vector MetadataStorageFromPlainObjectStorage::getDirectChildrenOnDisk( - const std::string & storage_key, const RelativePathsWithMetadata & remote_paths, const std::string & /* local_path */) const +void MetadataStorageFromPlainObjectStorage::getDirectChildrenOnDisk( + const std::string & storage_key, + const std::string & /* storage_key_perfix */, + const RelativePathsWithMetadata & remote_paths, + const std::string & /* local_path */, + std::unordered_set & result) const { - std::unordered_set duplicates_filter; for (const auto & elem : remote_paths) { const auto & path = elem->relative_path; @@ -111,11 +117,10 @@ std::vector MetadataStorageFromPlainObjectStorage::getDirectChildre /// string::npos is ok. const auto slash_pos = path.find('/', child_pos); if (slash_pos == std::string::npos) - duplicates_filter.emplace(path.substr(child_pos)); + result.emplace(path.substr(child_pos)); else - duplicates_filter.emplace(path.substr(child_pos, slash_pos - child_pos)); + result.emplace(path.substr(child_pos, slash_pos - child_pos)); } - return std::vector(std::make_move_iterator(duplicates_filter.begin()), std::make_move_iterator(duplicates_filter.end())); } const IMetadataStorage & MetadataStorageFromPlainObjectStorageTransaction::getStorageForNonTransactionalReads() const @@ -140,7 +145,7 @@ void MetadataStorageFromPlainObjectStorageTransaction::removeDirectory(const std else { addOperation(std::make_unique( - normalizeDirectoryPath(path), *metadata_storage.getPathMap(), object_storage)); + normalizeDirectoryPath(path), *metadata_storage.getPathMap(), object_storage, metadata_storage.getMetadataKeyPrefix())); } } @@ -151,8 +156,13 @@ void MetadataStorageFromPlainObjectStorageTransaction::createDirectory(const std auto normalized_path = normalizeDirectoryPath(path); auto key_prefix = object_storage->generateObjectKeyPrefixForDirectoryPath(normalized_path).serialize(); + chassert(key_prefix.starts_with(object_storage->getCommonKeyPrefix())); auto op = std::make_unique( - std::move(normalized_path), std::move(key_prefix), *metadata_storage.getPathMap(), object_storage); + std::move(normalized_path), + key_prefix.substr(object_storage->getCommonKeyPrefix().size()), + *metadata_storage.getPathMap(), + object_storage, + metadata_storage.getMetadataKeyPrefix()); addOperation(std::move(op)); } @@ -167,7 +177,11 @@ void MetadataStorageFromPlainObjectStorageTransaction::moveDirectory(const std:: throwNotImplemented(); addOperation(std::make_unique( - normalizeDirectoryPath(path_from), normalizeDirectoryPath(path_to), *metadata_storage.getPathMap(), object_storage)); + normalizeDirectoryPath(path_from), + normalizeDirectoryPath(path_to), + *metadata_storage.getPathMap(), + object_storage, + metadata_storage.getMetadataKeyPrefix())); } void MetadataStorageFromPlainObjectStorageTransaction::addBlobToMetadata( diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h index 66da0f2431e..97c5715a937 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h @@ -6,6 +6,8 @@ #include #include +#include +#include namespace DB { @@ -78,10 +80,20 @@ public: bool supportsStat() const override { return false; } protected: + /// Get the object storage prefix for storing metadata files. If stored behind a separate endpoint, + /// the metadata keys reflect the layout of the regular files. + virtual std::string getMetadataKeyPrefix() const { return object_storage->getCommonKeyPrefix(); } + + /// Returns a map of local paths to paths in object storage. virtual std::shared_ptr getPathMap() const { throwNotImplemented(); } - virtual std::vector getDirectChildrenOnDisk( - const std::string & storage_key, const RelativePathsWithMetadata & remote_paths, const std::string & local_path) const; + /// Retrieves the immediate files and directories within a given directory on a disk. + virtual void getDirectChildrenOnDisk( + const std::string & storage_key, + const std::string & storage_key_perfix, + const RelativePathsWithMetadata & remote_paths, + const std::string & local_path, + std::unordered_set & result) const; }; class MetadataStorageFromPlainObjectStorageTransaction final : public IMetadataTransaction, private MetadataOperationsHolder diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index 7e4b1f69962..0a6086bd39d 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -20,14 +20,24 @@ namespace constexpr auto PREFIX_PATH_FILE_NAME = "prefix.path"; +ObjectStorageKey createMetadataObjectKey(const std::string & key_prefix, const std::string & metadata_key_prefix) +{ + auto prefix = std::filesystem::path(metadata_key_prefix) / key_prefix; + return ObjectStorageKey::createAsRelative(prefix.string(), PREFIX_PATH_FILE_NAME); +} } MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFromPlainObjectStorageCreateDirectoryOperation( std::filesystem::path && path_, std::string && key_prefix_, MetadataStorageFromPlainObjectStorage::PathMap & path_map_, - ObjectStoragePtr object_storage_) - : path(std::move(path_)), key_prefix(key_prefix_), path_map(path_map_), object_storage(object_storage_) + ObjectStoragePtr object_storage_, + const std::string & metadata_key_prefix_) + : path(std::move(path_)) + , key_prefix(key_prefix_) + , path_map(path_map_) + , object_storage(object_storage_) + , metadata_key_prefix(metadata_key_prefix_) { } @@ -36,13 +46,17 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: if (path_map.contains(path)) return; - LOG_TRACE(getLogger("MetadataStorageFromPlainObjectStorageCreateDirectoryOperation"), "Creating metadata for directory '{}'", path); + auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); - auto object_key = ObjectStorageKey::createAsRelative(key_prefix, PREFIX_PATH_FILE_NAME); + LOG_TRACE( + getLogger("MetadataStorageFromPlainObjectStorageCreateDirectoryOperation"), + "Creating metadata for directory '{}' with remote path='{}'", + path, + metadata_object_key.serialize()); - auto object = StoredObject(object_key.serialize(), path / PREFIX_PATH_FILE_NAME); + auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME); auto buf = object_storage->writeObject( - object, + metadata_object, WriteMode::Rewrite, /* object_attributes */ std::nullopt, /* buf_size */ DBMS_DEFAULT_BUFFER_SIZE, @@ -66,25 +80,31 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::unique_lock &) { - auto object_key = ObjectStorageKey::createAsRelative(key_prefix, PREFIX_PATH_FILE_NAME); + auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); + if (write_finalized) { path_map.erase(path); auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::sub(metric, 1); - object_storage->removeObject(StoredObject(object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); + object_storage->removeObject(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); } else if (write_created) - object_storage->removeObjectIfExists(StoredObject(object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); + object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); } MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFromPlainObjectStorageMoveDirectoryOperation( std::filesystem::path && path_from_, std::filesystem::path && path_to_, MetadataStorageFromPlainObjectStorage::PathMap & path_map_, - ObjectStoragePtr object_storage_) - : path_from(std::move(path_from_)), path_to(std::move(path_to_)), path_map(path_map_), object_storage(object_storage_) + ObjectStoragePtr object_storage_, + const std::string & metadata_key_prefix_) + : path_from(std::move(path_from_)) + , path_to(std::move(path_to_)) + , path_map(path_map_) + , object_storage(object_storage_) + , metadata_key_prefix(metadata_key_prefix_) { } @@ -98,26 +118,26 @@ std::unique_ptr MetadataStorageFromPlainObjectStorageMo if (path_map.contains(new_path)) throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Metadata object for the new (destination) path '{}' already exists", new_path); - auto object_key = ObjectStorageKey::createAsRelative(expected_it->second, PREFIX_PATH_FILE_NAME); + auto metadata_object_key = createMetadataObjectKey(expected_it->second, metadata_key_prefix); - auto object = StoredObject(object_key.serialize(), expected_path / PREFIX_PATH_FILE_NAME); + auto metadata_object = StoredObject(metadata_object_key.serialize(), expected_path / PREFIX_PATH_FILE_NAME); if (validate_content) { std::string data; - auto read_buf = object_storage->readObject(object); + auto read_buf = object_storage->readObject(metadata_object); readStringUntilEOF(data, *read_buf); if (data != path_from) throw Exception( ErrorCodes::INCORRECT_DATA, "Incorrect data for object key {}, expected {}, got {}", - object_key.serialize(), + metadata_object_key.serialize(), expected_path, data); } auto write_buf = object_storage->writeObject( - object, + metadata_object, WriteMode::Rewrite, /* object_attributes */ std::nullopt, /*buf_size*/ DBMS_DEFAULT_BUFFER_SIZE, @@ -156,8 +176,11 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::uniq } MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation( - std::filesystem::path && path_, MetadataStorageFromPlainObjectStorage::PathMap & path_map_, ObjectStoragePtr object_storage_) - : path(std::move(path_)), path_map(path_map_), object_storage(object_storage_) + std::filesystem::path && path_, + MetadataStorageFromPlainObjectStorage::PathMap & path_map_, + ObjectStoragePtr object_storage_, + const std::string & metadata_key_prefix_) + : path(std::move(path_)), path_map(path_map_), object_storage(object_storage_), metadata_key_prefix(metadata_key_prefix_) { } @@ -170,9 +193,9 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std: LOG_TRACE(getLogger("MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation"), "Removing directory '{}'", path); key_prefix = path_it->second; - auto object_key = ObjectStorageKey::createAsRelative(key_prefix, PREFIX_PATH_FILE_NAME); - auto object = StoredObject(object_key.serialize(), path / PREFIX_PATH_FILE_NAME); - object_storage->removeObject(object); + auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); + auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME); + object_storage->removeObject(metadata_object); path_map.erase(path_it); auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; @@ -189,10 +212,10 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::un if (!removed) return; - auto object_key = ObjectStorageKey::createAsRelative(key_prefix, PREFIX_PATH_FILE_NAME); - auto object = StoredObject(object_key.serialize(), path / PREFIX_PATH_FILE_NAME); + auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); + auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME); auto buf = object_storage->writeObject( - object, + metadata_object, WriteMode::Rewrite, /* object_attributes */ std::nullopt, /* buf_size */ DBMS_DEFAULT_BUFFER_SIZE, diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h index 4b196f787fd..e31e3cbb262 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h @@ -16,6 +16,7 @@ private: std::string key_prefix; MetadataStorageFromPlainObjectStorage::PathMap & path_map; ObjectStoragePtr object_storage; + const std::string metadata_key_prefix; bool write_created = false; bool write_finalized = false; @@ -26,7 +27,8 @@ public: std::filesystem::path && path_, std::string && key_prefix_, MetadataStorageFromPlainObjectStorage::PathMap & path_map_, - ObjectStoragePtr object_storage_); + ObjectStoragePtr object_storage_, + const std::string & metadata_key_prefix_); void execute(std::unique_lock & metadata_lock) override; void undo(std::unique_lock & metadata_lock) override; @@ -39,6 +41,7 @@ private: std::filesystem::path path_to; MetadataStorageFromPlainObjectStorage::PathMap & path_map; ObjectStoragePtr object_storage; + const std::string metadata_key_prefix; bool write_created = false; bool write_finalized = false; @@ -51,7 +54,8 @@ public: std::filesystem::path && path_from_, std::filesystem::path && path_to_, MetadataStorageFromPlainObjectStorage::PathMap & path_map_, - ObjectStoragePtr object_storage_); + ObjectStoragePtr object_storage_, + const std::string & metadata_key_prefix_); void execute(std::unique_lock & metadata_lock) override; @@ -65,13 +69,17 @@ private: MetadataStorageFromPlainObjectStorage::PathMap & path_map; ObjectStoragePtr object_storage; + const std::string metadata_key_prefix; std::string key_prefix; bool removed = false; public: MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation( - std::filesystem::path && path_, MetadataStorageFromPlainObjectStorage::PathMap & path_map_, ObjectStoragePtr object_storage_); + std::filesystem::path && path_, + MetadataStorageFromPlainObjectStorage::PathMap & path_map_, + ObjectStoragePtr object_storage_, + const std::string & metadata_key_prefix_); void execute(std::unique_lock & metadata_lock) override; void undo(std::unique_lock & metadata_lock) override; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index 7718fba9c28..f3d00a928e3 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -21,8 +22,22 @@ namespace { constexpr auto PREFIX_PATH_FILE_NAME = "prefix.path"; +constexpr auto METADATA_PATH_TOKEN = "__meta/"; -MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::string & root, ObjectStoragePtr object_storage) +/// Use a separate layout for metadata iff: +/// 1. The disk endpoint does not contain objects, OR +/// 2. The metadata is already stored behind a separate endpoint. +/// Otherwise, store metadata along with regular data for backward compatibility. +std::string getMetadataKeyPrefix(ObjectStoragePtr object_storage) +{ + const auto common_key_prefix = std::filesystem::path(object_storage->getCommonKeyPrefix()); + const auto metadata_key_prefix = std::filesystem::path(common_key_prefix) / METADATA_PATH_TOKEN; + return !object_storage->existsOrHasAnyChild(metadata_key_prefix / "") && object_storage->existsOrHasAnyChild(common_key_prefix / "") + ? common_key_prefix + : metadata_key_prefix; +} + +MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::string & metadata_key_prefix, ObjectStoragePtr object_storage) { MetadataStorageFromPlainObjectStorage::PathMap result; @@ -39,16 +54,16 @@ MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::stri LOG_DEBUG(log, "Loading metadata"); size_t num_files = 0; - for (auto iterator = object_storage->iterate(root, 0); iterator->isValid(); iterator->next()) + for (auto iterator = object_storage->iterate(metadata_key_prefix, 0); iterator->isValid(); iterator->next()) { ++num_files; auto file = iterator->current(); String path = file->getPath(); - auto remote_path = std::filesystem::path(path); - if (remote_path.filename() != PREFIX_PATH_FILE_NAME) + auto remote_metadata_path = std::filesystem::path(path); + if (remote_metadata_path.filename() != PREFIX_PATH_FILE_NAME) continue; - runner([remote_path, path, &object_storage, &result, &mutex, &log, &settings] + runner([remote_metadata_path, path, &object_storage, &result, &mutex, &log, &settings, &metadata_key_prefix] { setThreadName("PlainRWMetaLoad"); @@ -75,7 +90,10 @@ MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::stri throw; } - chassert(remote_path.has_parent_path()); + chassert(remote_metadata_path.has_parent_path()); + chassert(remote_metadata_path.string().starts_with(metadata_key_prefix)); + auto suffix = remote_metadata_path.string().substr(metadata_key_prefix.size()); + auto remote_path = std::filesystem::path(std::move(suffix)); std::pair res; { std::lock_guard lock(mutex); @@ -103,17 +121,17 @@ MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::stri return result; } -std::vector getDirectChildrenOnRewritableDisk( +void getDirectChildrenOnRewritableDisk( const std::string & storage_key, + const std::string & storage_key_perfix, const RelativePathsWithMetadata & remote_paths, const std::string & local_path, const MetadataStorageFromPlainObjectStorage::PathMap & local_path_prefixes, - SharedMutex & shared_mutex) + SharedMutex & shared_mutex, + std::unordered_set & result) { using PathMap = MetadataStorageFromPlainObjectStorage::PathMap; - std::unordered_set duplicates_filter; - /// Map remote paths into local subdirectories. std::unordered_map remote_to_local_subdir; @@ -149,22 +167,21 @@ std::vector getDirectChildrenOnRewritableDisk( /// File names. auto filename = path.substr(child_pos); if (!skip_list.contains(filename)) - duplicates_filter.emplace(std::move(filename)); + result.emplace(std::move(filename)); } else { /// Subdirectories. - auto it = remote_to_local_subdir.find(path.substr(0, slash_pos)); + chassert(path.find(storage_key_perfix) == 0); + auto it = remote_to_local_subdir.find(path.substr(storage_key_perfix.size(), slash_pos - storage_key_perfix.size())); /// Mapped subdirectories. if (it != remote_to_local_subdir.end()) - duplicates_filter.emplace(it->second); + result.emplace(it->second); /// The remote subdirectory name is the same as the local subdirectory. else - duplicates_filter.emplace(path.substr(child_pos, slash_pos - child_pos)); + result.emplace(path.substr(child_pos, slash_pos - child_pos)); } } - - return std::vector(std::make_move_iterator(duplicates_filter.begin()), std::make_move_iterator(duplicates_filter.end())); } } @@ -172,7 +189,8 @@ std::vector getDirectChildrenOnRewritableDisk( MetadataStorageFromPlainRewritableObjectStorage::MetadataStorageFromPlainRewritableObjectStorage( ObjectStoragePtr object_storage_, String storage_path_prefix_) : MetadataStorageFromPlainObjectStorage(object_storage_, storage_path_prefix_) - , path_map(std::make_shared(loadPathPrefixMap(object_storage->getCommonKeyPrefix(), object_storage))) + , metadata_key_prefix(DB::getMetadataKeyPrefix(object_storage)) + , path_map(std::make_shared(loadPathPrefixMap(metadata_key_prefix, object_storage))) { if (object_storage->isWriteOnce()) throw Exception( @@ -190,10 +208,71 @@ MetadataStorageFromPlainRewritableObjectStorage::~MetadataStorageFromPlainRewrit CurrentMetrics::sub(metric, path_map->size()); } -std::vector MetadataStorageFromPlainRewritableObjectStorage::getDirectChildrenOnDisk( - const std::string & storage_key, const RelativePathsWithMetadata & remote_paths, const std::string & local_path) const +bool MetadataStorageFromPlainRewritableObjectStorage::exists(const std::string & path) const { - return getDirectChildrenOnRewritableDisk(storage_key, remote_paths, local_path, *getPathMap(), metadata_mutex); + if (MetadataStorageFromPlainObjectStorage::exists(path)) + return true; + + if (getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix()) + { + auto key_prefix = object_storage->generateObjectKeyForPath(path).serialize(); + chassert(key_prefix.starts_with(object_storage->getCommonKeyPrefix())); + auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / key_prefix.substr(object_storage->getCommonKeyPrefix().size()); + return object_storage->existsOrHasAnyChild(metadata_key); + } + + return false; +} + +bool MetadataStorageFromPlainRewritableObjectStorage::isDirectory(const std::string & path) const +{ + if (getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix()) + { + auto directory = std::filesystem::path(object_storage->generateObjectKeyForPath(path).serialize()) / ""; + chassert(directory.string().starts_with(object_storage->getCommonKeyPrefix())); + auto metadata_key + = std::filesystem::path(getMetadataKeyPrefix()) / directory.string().substr(object_storage->getCommonKeyPrefix().size()); + return object_storage->existsOrHasAnyChild(metadata_key); + } + else + return MetadataStorageFromPlainObjectStorage::isDirectory(path); +} + +std::vector MetadataStorageFromPlainRewritableObjectStorage::listDirectory(const std::string & path) const +{ + auto key_prefix = object_storage->generateObjectKeyForPath(path).serialize(); + + RelativePathsWithMetadata files; + std::string abs_key = key_prefix; + if (!abs_key.ends_with('/')) + abs_key += '/'; + + object_storage->listObjects(abs_key, files, 0); + + std::unordered_set directories; + getDirectChildrenOnDisk(abs_key, object_storage->getCommonKeyPrefix(), files, path, directories); + /// List empty directories that are identified by the `prefix.path` metadata files. This is required to, e.g., remove + /// metadata along with regular files. + if (object_storage->getCommonKeyPrefix() != getMetadataKeyPrefix()) + { + chassert(abs_key.starts_with(object_storage->getCommonKeyPrefix())); + auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / abs_key.substr(object_storage->getCommonKeyPrefix().size()); + RelativePathsWithMetadata metadata_files; + object_storage->listObjects(metadata_key, metadata_files, 0); + getDirectChildrenOnDisk(metadata_key, getMetadataKeyPrefix(), metadata_files, path, directories); + } + + return std::vector(std::make_move_iterator(directories.begin()), std::make_move_iterator(directories.end())); +} + +void MetadataStorageFromPlainRewritableObjectStorage::getDirectChildrenOnDisk( + const std::string & storage_key, + const std::string & storage_key_perfix, + const RelativePathsWithMetadata & remote_paths, + const std::string & local_path, + std::unordered_set & result) const +{ + getDirectChildrenOnRewritableDisk(storage_key, storage_key_perfix, remote_paths, local_path, *getPathMap(), metadata_mutex, result); } } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h index a5394b9428d..71153cbdc25 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h @@ -3,6 +3,7 @@ #include #include +#include namespace DB @@ -11,6 +12,7 @@ namespace DB class MetadataStorageFromPlainRewritableObjectStorage final : public MetadataStorageFromPlainObjectStorage { private: + const std::string metadata_key_prefix; std::shared_ptr path_map; public: @@ -18,11 +20,20 @@ public: ~MetadataStorageFromPlainRewritableObjectStorage() override; MetadataStorageType getType() const override { return MetadataStorageType::PlainRewritable; } + bool exists(const std::string & path) const override; + bool isDirectory(const std::string & path) const override; + std::vector listDirectory(const std::string & path) const override; + protected: + std::string getMetadataKeyPrefix() const override { return metadata_key_prefix; } std::shared_ptr getPathMap() const override { return path_map; } - std::vector getDirectChildrenOnDisk( - const std::string & storage_key, const RelativePathsWithMetadata & remote_paths, const std::string & local_path) const override; + void getDirectChildrenOnDisk( + const std::string & storage_key, + const std::string & storage_key_perfix, + const RelativePathsWithMetadata & remote_paths, + const std::string & local_path, + std::unordered_set & result) const override; }; } diff --git a/tests/integration/test_s3_plain_rewritable/test.py b/tests/integration/test_s3_plain_rewritable/test.py index 4b1aaafc814..020e170eb48 100644 --- a/tests/integration/test_s3_plain_rewritable/test.py +++ b/tests/integration/test_s3_plain_rewritable/test.py @@ -139,6 +139,19 @@ def test(storage_policy): == insert_values_arr[i] ) + metadata_it = cluster.minio_client.list_objects( + cluster.minio_bucket, "data/", recursive=True + ) + metadata_count = 0 + for obj in list(metadata_it): + if "/__meta/" in obj.object_name: + assert obj.object_name.endswith("/prefix.path") + metadata_count += 1 + else: + assert not obj.object_name.endswith("/prefix.path") + + assert metadata_count > 0 + for i in range(NUM_WORKERS): node = cluster.instances[f"node{i + 1}"] node.query("DROP TABLE IF EXISTS test SYNC") From 98ad45ba960de4cc29ee794e2eeccf9fe6f8e0a8 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Fri, 28 Jun 2024 00:58:20 +0000 Subject: [PATCH 1403/2361] Add prefix_path parameter to object key generator --- src/Common/ObjectStorageKeyGenerator.cpp | 11 ++++++++--- src/Common/ObjectStorageKeyGenerator.h | 3 ++- .../AzureBlobStorage/AzureObjectStorage.cpp | 4 +++- .../AzureBlobStorage/AzureObjectStorage.h | 2 +- .../Cached/CachedObjectStorage.cpp | 10 ++++++---- .../ObjectStorages/Cached/CachedObjectStorage.h | 5 +++-- .../CommonPathPrefixKeyGenerator.cpp | 3 ++- .../CommonPathPrefixKeyGenerator.h | 3 ++- .../DiskObjectStorageTransaction.cpp | 6 +++--- .../ObjectStorages/HDFS/HDFSObjectStorage.cpp | 6 ++++-- .../ObjectStorages/HDFS/HDFSObjectStorage.h | 2 +- src/Disks/ObjectStorages/IObjectStorage.h | 5 +++-- .../ObjectStorages/Local/LocalObjectStorage.cpp | 14 ++++++++------ .../ObjectStorages/Local/LocalObjectStorage.h | 2 +- .../MetadataStorageFromPlainObjectStorage.cpp | 14 +++++++------- ...aStorageFromPlainRewritableObjectStorage.cpp | 6 +++--- src/Disks/ObjectStorages/PlainObjectStorage.h | 2 +- .../PlainRewritableObjectStorage.h | 17 +++++++++++------ src/Disks/ObjectStorages/S3/DiskS3Utils.cpp | 2 +- src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 4 ++-- src/Disks/ObjectStorages/S3/S3ObjectStorage.h | 2 +- src/Disks/ObjectStorages/Web/WebObjectStorage.h | 2 +- 22 files changed, 74 insertions(+), 51 deletions(-) diff --git a/src/Common/ObjectStorageKeyGenerator.cpp b/src/Common/ObjectStorageKeyGenerator.cpp index e9212c3f04d..3e7bf3116bd 100644 --- a/src/Common/ObjectStorageKeyGenerator.cpp +++ b/src/Common/ObjectStorageKeyGenerator.cpp @@ -3,6 +3,7 @@ #include #include +#include #include @@ -14,7 +15,10 @@ public: , re_gen(key_template) { } - DB::ObjectStorageKey generate(const String &, bool) const override { return DB::ObjectStorageKey::createAsAbsolute(re_gen.generate()); } + DB::ObjectStorageKey generate(const String &, bool /* is_directory */, const std::optional & /* key_prefix */) const override + { + return DB::ObjectStorageKey::createAsAbsolute(re_gen.generate()); + } private: String key_template; @@ -29,7 +33,7 @@ public: : key_prefix(std::move(key_prefix_)) {} - DB::ObjectStorageKey generate(const String &, bool) const override + DB::ObjectStorageKey generate(const String &, bool /* is_directory */, const std::optional & /* key_prefix */) const override { /// Path to store the new S3 object. @@ -60,7 +64,8 @@ public: : key_prefix(std::move(key_prefix_)) {} - DB::ObjectStorageKey generate(const String & path, bool) const override + DB::ObjectStorageKey + generate(const String & path, bool /* is_directory */, const std::optional & /* key_prefix */) const override { return DB::ObjectStorageKey::createAsRelative(key_prefix, path); } diff --git a/src/Common/ObjectStorageKeyGenerator.h b/src/Common/ObjectStorageKeyGenerator.h index 11da039b33b..12aeec1714d 100644 --- a/src/Common/ObjectStorageKeyGenerator.h +++ b/src/Common/ObjectStorageKeyGenerator.h @@ -11,7 +11,8 @@ class IObjectStorageKeysGenerator public: virtual ~IObjectStorageKeysGenerator() = default; - virtual ObjectStorageKey generate(const String & path, bool is_directory) const = 0; + /// Generates an object storage key based on a path in the virtual filesystem. + virtual ObjectStorageKey generate(const String & path, bool is_directory, const std::optional & key_prefix) const = 0; }; using ObjectStorageKeysGeneratorPtr = std::shared_ptr; diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index bc16955143b..0d92561d142 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -1,3 +1,4 @@ +#include #include #include "Common/Exception.h" @@ -117,7 +118,8 @@ AzureObjectStorage::AzureObjectStorage( { } -ObjectStorageKey AzureObjectStorage::generateObjectKeyForPath(const std::string & /* path */) const +ObjectStorageKey +AzureObjectStorage::generateObjectKeyForPath(const std::string & /* path */, const std::optional & /* key_prefix */) const { return ObjectStorageKey::createAsRelative(getRandomASCIIString(32)); } diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 2c7ce5e18dc..bc90b05e64d 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -101,7 +101,7 @@ public: const std::string & config_prefix, ContextPtr context) override; - ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override; + ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const override; bool isRemote() const override { return true; } diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index a3b6e25e8ea..fb817005399 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -34,14 +34,16 @@ FileCache::Key CachedObjectStorage::getCacheKey(const std::string & path) const return cache->createKeyForPath(path); } -ObjectStorageKey CachedObjectStorage::generateObjectKeyForPath(const std::string & path) const +ObjectStorageKey +CachedObjectStorage::generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const { - return object_storage->generateObjectKeyForPath(path); + return object_storage->generateObjectKeyForPath(path, key_prefix); } -ObjectStorageKey CachedObjectStorage::generateObjectKeyPrefixForDirectoryPath(const std::string & path) const +ObjectStorageKey +CachedObjectStorage::generateObjectKeyPrefixForDirectoryPath(const std::string & path, const std::optional & key_prefix) const { - return object_storage->generateObjectKeyPrefixForDirectoryPath(path); + return object_storage->generateObjectKeyPrefixForDirectoryPath(path, key_prefix); } ReadSettings CachedObjectStorage::patchSettings(const ReadSettings & read_settings) const diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index 93ef2659cbb..efcdbfebabf 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -98,9 +98,10 @@ public: const std::string & getCacheName() const override { return cache_config_name; } - ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override; + ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const override; - ObjectStorageKey generateObjectKeyPrefixForDirectoryPath(const std::string & path) const override; + ObjectStorageKey + generateObjectKeyPrefixForDirectoryPath(const std::string & path, const std::optional & key_prefix) const override; void setKeysGenerator(ObjectStorageKeysGeneratorPtr gen) override { object_storage->setKeysGenerator(gen); } diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp index 2a06d56e5c7..0a4426e8e66 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp @@ -15,7 +15,8 @@ CommonPathPrefixKeyGenerator::CommonPathPrefixKeyGenerator( { } -ObjectStorageKey CommonPathPrefixKeyGenerator::generate(const String & path, bool is_directory) const +ObjectStorageKey +CommonPathPrefixKeyGenerator::generate(const String & path, bool is_directory, const std::optional & /* key_prefix */) const { const auto & [object_key_prefix, suffix_parts] = getLongestObjectKeyPrefix(path); diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h index fb1140de908..08495738505 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h @@ -5,6 +5,7 @@ #include #include +#include namespace DB { @@ -26,7 +27,7 @@ public: explicit CommonPathPrefixKeyGenerator(String key_prefix_, SharedMutex & shared_mutex_, std::weak_ptr path_map_); - ObjectStorageKey generate(const String & path, bool is_directory) const override; + ObjectStorageKey generate(const String & path, bool is_directory, const std::optional & key_prefix) const override; private: /// Longest key prefix and unresolved parts of the source path. diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index b5805f6d23a..880911b9958 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -537,7 +537,7 @@ struct CopyFileObjectStorageOperation final : public IDiskObjectStorageOperation for (const auto & object_from : source_blobs) { - auto object_key = destination_object_storage.generateObjectKeyForPath(to_path); + auto object_key = destination_object_storage.generateObjectKeyForPath(to_path, std::nullopt /* key_prefix */); auto object_to = StoredObject(object_key.serialize()); object_storage.copyObjectToAnotherObjectStorage(object_from, object_to,read_settings,write_settings, destination_object_storage); @@ -738,7 +738,7 @@ std::unique_ptr DiskObjectStorageTransaction::writeFile const WriteSettings & settings, bool autocommit) { - auto object_key = object_storage.generateObjectKeyForPath(path); + auto object_key = object_storage.generateObjectKeyForPath(path, std::nullopt /* key_prefix */); std::optional object_attributes; if (metadata_helper) @@ -835,7 +835,7 @@ void DiskObjectStorageTransaction::writeFileUsingBlobWritingFunction( const String & path, WriteMode mode, WriteBlobFunction && write_blob_function) { /// This function is a simplified and adapted version of DiskObjectStorageTransaction::writeFile(). - auto object_key = object_storage.generateObjectKeyForPath(path); + auto object_key = object_storage.generateObjectKeyForPath(path, std::nullopt /* key_prefix */); std::optional object_attributes; if (metadata_helper) diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp index dcb2af9d4d3..3ce2a0f4903 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp @@ -4,8 +4,9 @@ #include #include -#include +#include #include +#include #include #include @@ -53,7 +54,8 @@ std::string HDFSObjectStorage::extractObjectKeyFromURL(const StoredObject & obje return path; } -ObjectStorageKey HDFSObjectStorage::generateObjectKeyForPath(const std::string & /* path */) const +ObjectStorageKey +HDFSObjectStorage::generateObjectKeyForPath(const std::string & /* path */, const std::optional & /* key_prefix */) const { initializeHDFSFS(); /// what ever data_source_description.description value is, consider that key as relative key diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h index 8aae90d0721..0cb31eb8b8b 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h @@ -111,7 +111,7 @@ public: const std::string & config_prefix, ContextPtr context) override; - ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override; + ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const override; bool isRemote() const override { return true; } diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index ceea4d5a2bb..529c79790fd 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -232,10 +232,11 @@ public: /// Generate blob name for passed absolute local path. /// Path can be generated either independently or based on `path`. - virtual ObjectStorageKey generateObjectKeyForPath(const std::string & path) const = 0; + virtual ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const = 0; /// Object key prefix for local paths in the directory 'path'. - virtual ObjectStorageKey generateObjectKeyPrefixForDirectoryPath(const std::string & /* path */) const + virtual ObjectStorageKey + generateObjectKeyPrefixForDirectoryPath(const std::string & /* path */, const std::optional & /* key_prefix */) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method 'generateObjectKeyPrefixForDirectoryPath' is not implemented"); } diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index a247d86ddce..20ef135cdf7 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -1,15 +1,16 @@ #include -#include -#include -#include +#include +#include +#include #include #include -#include #include #include +#include +#include #include -#include +#include namespace fs = std::filesystem; @@ -222,7 +223,8 @@ std::unique_ptr LocalObjectStorage::cloneObjectStorage( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "cloneObjectStorage() is not implemented for LocalObjectStorage"); } -ObjectStorageKey LocalObjectStorage::generateObjectKeyForPath(const std::string & /* path */) const +ObjectStorageKey +LocalObjectStorage::generateObjectKeyForPath(const std::string & /* path */, const std::optional & /* key_prefix */) const { constexpr size_t key_name_total_size = 32; return ObjectStorageKey::createAsRelative(key_prefix, getRandomASCIIString(key_name_total_size)); diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h index 371cd37f8b2..564d49bf876 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h @@ -81,7 +81,7 @@ public: const std::string & config_prefix, ContextPtr context) override; - ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override; + ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const override; bool isRemote() const override { return false; } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index 3da190c7256..589b18abca8 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -42,7 +42,7 @@ bool MetadataStorageFromPlainObjectStorage::exists(const std::string & path) con { /// NOTE: exists() cannot be used here since it works only for existing /// key, and does not work for some intermediate path. - auto object_key = object_storage->generateObjectKeyForPath(path); + auto object_key = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */); return object_storage->existsOrHasAnyChild(object_key.serialize()); } @@ -54,7 +54,7 @@ bool MetadataStorageFromPlainObjectStorage::isFile(const std::string & path) con bool MetadataStorageFromPlainObjectStorage::isDirectory(const std::string & path) const { - auto key_prefix = object_storage->generateObjectKeyForPath(path).serialize(); + auto key_prefix = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */).serialize(); auto directory = std::filesystem::path(std::move(key_prefix)) / ""; return object_storage->existsOrHasAnyChild(directory); @@ -62,7 +62,7 @@ bool MetadataStorageFromPlainObjectStorage::isDirectory(const std::string & path uint64_t MetadataStorageFromPlainObjectStorage::getFileSize(const String & path) const { - auto object_key = object_storage->generateObjectKeyForPath(path); + auto object_key = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */); auto metadata = object_storage->tryGetObjectMetadata(object_key.serialize()); if (metadata) return metadata->size_bytes; @@ -71,7 +71,7 @@ uint64_t MetadataStorageFromPlainObjectStorage::getFileSize(const String & path) std::vector MetadataStorageFromPlainObjectStorage::listDirectory(const std::string & path) const { - auto key_prefix = object_storage->generateObjectKeyForPath(path).serialize(); + auto key_prefix = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */).serialize(); RelativePathsWithMetadata files; std::string abs_key = key_prefix; @@ -98,7 +98,7 @@ DirectoryIteratorPtr MetadataStorageFromPlainObjectStorage::iterateDirectory(con StoredObjects MetadataStorageFromPlainObjectStorage::getStorageObjects(const std::string & path) const { size_t object_size = getFileSize(path); - auto object_key = object_storage->generateObjectKeyForPath(path); + auto object_key = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */); return {StoredObject(object_key.serialize(), path, object_size)}; } @@ -130,7 +130,7 @@ const IMetadataStorage & MetadataStorageFromPlainObjectStorageTransaction::getSt void MetadataStorageFromPlainObjectStorageTransaction::unlinkFile(const std::string & path) { - auto object_key = metadata_storage.object_storage->generateObjectKeyForPath(path); + auto object_key = metadata_storage.object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */); auto object = StoredObject(object_key.serialize()); metadata_storage.object_storage->removeObject(object); } @@ -155,7 +155,7 @@ void MetadataStorageFromPlainObjectStorageTransaction::createDirectory(const std return; auto normalized_path = normalizeDirectoryPath(path); - auto key_prefix = object_storage->generateObjectKeyPrefixForDirectoryPath(normalized_path).serialize(); + auto key_prefix = object_storage->generateObjectKeyPrefixForDirectoryPath(normalized_path, std::nullopt /* key_prefix */).serialize(); chassert(key_prefix.starts_with(object_storage->getCommonKeyPrefix())); auto op = std::make_unique( std::move(normalized_path), diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index f3d00a928e3..de65cd5c233 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -215,7 +215,7 @@ bool MetadataStorageFromPlainRewritableObjectStorage::exists(const std::string & if (getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix()) { - auto key_prefix = object_storage->generateObjectKeyForPath(path).serialize(); + auto key_prefix = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */).serialize(); chassert(key_prefix.starts_with(object_storage->getCommonKeyPrefix())); auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / key_prefix.substr(object_storage->getCommonKeyPrefix().size()); return object_storage->existsOrHasAnyChild(metadata_key); @@ -228,7 +228,7 @@ bool MetadataStorageFromPlainRewritableObjectStorage::isDirectory(const std::str { if (getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix()) { - auto directory = std::filesystem::path(object_storage->generateObjectKeyForPath(path).serialize()) / ""; + auto directory = std::filesystem::path(object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */).serialize()) / ""; chassert(directory.string().starts_with(object_storage->getCommonKeyPrefix())); auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / directory.string().substr(object_storage->getCommonKeyPrefix().size()); @@ -240,7 +240,7 @@ bool MetadataStorageFromPlainRewritableObjectStorage::isDirectory(const std::str std::vector MetadataStorageFromPlainRewritableObjectStorage::listDirectory(const std::string & path) const { - auto key_prefix = object_storage->generateObjectKeyForPath(path).serialize(); + auto key_prefix = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */).serialize(); RelativePathsWithMetadata files; std::string abs_key = key_prefix; diff --git a/src/Disks/ObjectStorages/PlainObjectStorage.h b/src/Disks/ObjectStorages/PlainObjectStorage.h index e0907d0b4d8..805b3436fce 100644 --- a/src/Disks/ObjectStorages/PlainObjectStorage.h +++ b/src/Disks/ObjectStorages/PlainObjectStorage.h @@ -26,7 +26,7 @@ public: bool isPlain() const override { return true; } - ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override + ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & /* key_prefix */) const override { return ObjectStorageKey::createAsRelative(BaseObjectStorage::getCommonKeyPrefix(), path); } diff --git a/src/Disks/ObjectStorages/PlainRewritableObjectStorage.h b/src/Disks/ObjectStorages/PlainRewritableObjectStorage.h index 5f000afe625..dcea5964fc5 100644 --- a/src/Disks/ObjectStorages/PlainRewritableObjectStorage.h +++ b/src/Disks/ObjectStorages/PlainRewritableObjectStorage.h @@ -1,5 +1,7 @@ #pragma once +#include +#include #include #include #include "CommonPathPrefixKeyGenerator.h" @@ -33,9 +35,10 @@ public: bool isPlain() const override { return true; } - ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override; + ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const override; - ObjectStorageKey generateObjectKeyPrefixForDirectoryPath(const std::string & path) const override; + ObjectStorageKey + generateObjectKeyPrefixForDirectoryPath(const std::string & path, const std::optional & key_prefix) const override; void setKeysGenerator(ObjectStorageKeysGeneratorPtr gen) override { key_generator = gen; } @@ -46,20 +49,22 @@ private: template -ObjectStorageKey PlainRewritableObjectStorage::generateObjectKeyForPath(const std::string & path) const +ObjectStorageKey PlainRewritableObjectStorage::generateObjectKeyForPath( + const std::string & path, const std::optional & key_prefix) const { if (!key_generator) throw Exception(ErrorCodes::LOGICAL_ERROR, "Key generator is not set"); - return key_generator->generate(path, /* is_directory */ false); + return key_generator->generate(path, /* is_directory */ false, key_prefix); } template -ObjectStorageKey PlainRewritableObjectStorage::generateObjectKeyPrefixForDirectoryPath(const std::string & path) const +ObjectStorageKey PlainRewritableObjectStorage::generateObjectKeyPrefixForDirectoryPath( + const std::string & path, const std::optional & key_prefix) const { if (!key_generator) throw Exception(ErrorCodes::LOGICAL_ERROR, "Key generator is not set"); - return key_generator->generate(path, /* is_directory */ true); + return key_generator->generate(path, /* is_directory */ true, key_prefix); } } diff --git a/src/Disks/ObjectStorages/S3/DiskS3Utils.cpp b/src/Disks/ObjectStorages/S3/DiskS3Utils.cpp index 63e7ebb00c5..b20a2940e47 100644 --- a/src/Disks/ObjectStorages/S3/DiskS3Utils.cpp +++ b/src/Disks/ObjectStorages/S3/DiskS3Utils.cpp @@ -79,7 +79,7 @@ bool checkBatchRemove(S3ObjectStorage & storage) /// We are using generateObjectKeyForPath() which returns random object key. /// That generated key is placed in a right directory where we should have write access. const String path = fmt::format("clickhouse_remove_objects_capability_{}", getServerUUID()); - const auto key = storage.generateObjectKeyForPath(path); + const auto key = storage.generateObjectKeyForPath(path, {} /* key_prefix */); StoredObject object(key.serialize(), path); try { diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index a6672e14e10..3c4b4d76bf5 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -624,12 +624,12 @@ std::unique_ptr S3ObjectStorage::cloneObjectStorage( std::move(new_client), std::move(new_s3_settings), new_uri, s3_capabilities, key_generator, disk_name); } -ObjectStorageKey S3ObjectStorage::generateObjectKeyForPath(const std::string & path) const +ObjectStorageKey S3ObjectStorage::generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const { if (!key_generator) throw Exception(ErrorCodes::LOGICAL_ERROR, "Key generator is not set"); - return key_generator->generate(path, /* is_directory */ false); + return key_generator->generate(path, /* is_directory */ false, key_prefix); } std::shared_ptr S3ObjectStorage::getS3StorageClient() diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index cbe004bc298..d786a6b37f3 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -164,7 +164,7 @@ public: bool supportParallelWrite() const override { return true; } - ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override; + ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & key_prefix) const override; bool isReadOnly() const override { return s3_settings.get()->read_only; } diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index 9ca2950dae0..ab357d6f50d 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -82,7 +82,7 @@ public: const std::string & config_prefix, ContextPtr context) override; - ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override + ObjectStorageKey generateObjectKeyForPath(const std::string & path, const std::optional & /* key_prefix */) const override { return ObjectStorageKey::createAsRelative(path); } From 27392fee6eec22c7f2dac3d17f73ab9a528f1fc8 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Fri, 28 Jun 2024 01:30:49 +0000 Subject: [PATCH 1404/2361] Minor refactor --- .../CommonPathPrefixKeyGenerator.cpp | 8 ++-- .../MetadataStorageFromPlainObjectStorage.cpp | 38 +++++++------------ .../MetadataStorageFromPlainObjectStorage.h | 8 ---- ...torageFromPlainRewritableObjectStorage.cpp | 26 +++++-------- ...aStorageFromPlainRewritableObjectStorage.h | 2 +- 5 files changed, 28 insertions(+), 54 deletions(-) diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp index 0a4426e8e66..ef599a2f366 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp @@ -16,15 +16,15 @@ CommonPathPrefixKeyGenerator::CommonPathPrefixKeyGenerator( } ObjectStorageKey -CommonPathPrefixKeyGenerator::generate(const String & path, bool is_directory, const std::optional & /* key_prefix */) const +CommonPathPrefixKeyGenerator::generate(const String & path, bool is_directory, const std::optional & key_prefix) const { const auto & [object_key_prefix, suffix_parts] = getLongestObjectKeyPrefix(path); - auto key = std::filesystem::path(object_key_prefix.empty() ? std::string() : object_key_prefix); + auto key = std::filesystem::path(object_key_prefix); /// The longest prefix is the same as path, meaning that the path is already mapped. if (suffix_parts.empty()) - return ObjectStorageKey::createAsRelative(storage_key_prefix, std::move(key)); + return ObjectStorageKey::createAsRelative(key_prefix.has_value() ? *key_prefix : storage_key_prefix, std::move(key)); /// File and top-level directory paths are mapped as is. if (!is_directory || object_key_prefix.empty()) @@ -40,7 +40,7 @@ CommonPathPrefixKeyGenerator::generate(const String & path, bool is_directory, c key /= getRandomASCIIString(part_size); } - return ObjectStorageKey::createAsRelative(storage_key_prefix, key); + return ObjectStorageKey::createAsRelative(key_prefix.has_value() ? *key_prefix : storage_key_prefix, key); } std::tuple> CommonPathPrefixKeyGenerator::getLongestObjectKeyPrefix(const std::string & path) const diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index 589b18abca8..02048c07a57 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -80,9 +80,20 @@ std::vector MetadataStorageFromPlainObjectStorage::listDirectory(co object_storage->listObjects(abs_key, files, 0); - std::unordered_set directories; - getDirectChildrenOnDisk(abs_key, object_storage->getCommonKeyPrefix(), files, path, directories); - return std::vector(std::make_move_iterator(directories.begin()), std::make_move_iterator(directories.end())); + std::unordered_set result; + for (const auto & elem : files) + { + const auto & p = elem->relative_path; + chassert(p.find(abs_key) == 0); + const auto child_pos = abs_key.size(); + /// string::npos is ok. + const auto slash_pos = p.find('/', child_pos); + if (slash_pos == std::string::npos) + result.emplace(p.substr(child_pos)); + else + result.emplace(p.substr(child_pos, slash_pos - child_pos)); + } + return std::vector(std::make_move_iterator(result.begin()), std::make_move_iterator(result.end())); } DirectoryIteratorPtr MetadataStorageFromPlainObjectStorage::iterateDirectory(const std::string & path) const @@ -102,27 +113,6 @@ StoredObjects MetadataStorageFromPlainObjectStorage::getStorageObjects(const std return {StoredObject(object_key.serialize(), path, object_size)}; } -void MetadataStorageFromPlainObjectStorage::getDirectChildrenOnDisk( - const std::string & storage_key, - const std::string & /* storage_key_perfix */, - const RelativePathsWithMetadata & remote_paths, - const std::string & /* local_path */, - std::unordered_set & result) const -{ - for (const auto & elem : remote_paths) - { - const auto & path = elem->relative_path; - chassert(path.find(storage_key) == 0); - const auto child_pos = storage_key.size(); - /// string::npos is ok. - const auto slash_pos = path.find('/', child_pos); - if (slash_pos == std::string::npos) - result.emplace(path.substr(child_pos)); - else - result.emplace(path.substr(child_pos, slash_pos - child_pos)); - } -} - const IMetadataStorage & MetadataStorageFromPlainObjectStorageTransaction::getStorageForNonTransactionalReads() const { return metadata_storage; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h index 97c5715a937..237327cd1f4 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h @@ -86,14 +86,6 @@ protected: /// Returns a map of local paths to paths in object storage. virtual std::shared_ptr getPathMap() const { throwNotImplemented(); } - - /// Retrieves the immediate files and directories within a given directory on a disk. - virtual void getDirectChildrenOnDisk( - const std::string & storage_key, - const std::string & storage_key_perfix, - const RelativePathsWithMetadata & remote_paths, - const std::string & local_path, - std::unordered_set & result) const; }; class MetadataStorageFromPlainObjectStorageTransaction final : public IMetadataTransaction, private MetadataOperationsHolder diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index de65cd5c233..b904c0d92b9 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -121,7 +121,7 @@ MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::stri return result; } -void getDirectChildrenOnRewritableDisk( +void getDirectChildrenOnDiskImpl( const std::string & storage_key, const std::string & storage_key_perfix, const RelativePathsWithMetadata & remote_paths, @@ -215,10 +215,8 @@ bool MetadataStorageFromPlainRewritableObjectStorage::exists(const std::string & if (getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix()) { - auto key_prefix = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */).serialize(); - chassert(key_prefix.starts_with(object_storage->getCommonKeyPrefix())); - auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / key_prefix.substr(object_storage->getCommonKeyPrefix().size()); - return object_storage->existsOrHasAnyChild(metadata_key); + auto key_prefix = object_storage->generateObjectKeyForPath(path, getMetadataKeyPrefix()).serialize(); + return object_storage->existsOrHasAnyChild(key_prefix); } return false; @@ -228,11 +226,8 @@ bool MetadataStorageFromPlainRewritableObjectStorage::isDirectory(const std::str { if (getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix()) { - auto directory = std::filesystem::path(object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */).serialize()) / ""; - chassert(directory.string().starts_with(object_storage->getCommonKeyPrefix())); - auto metadata_key - = std::filesystem::path(getMetadataKeyPrefix()) / directory.string().substr(object_storage->getCommonKeyPrefix().size()); - return object_storage->existsOrHasAnyChild(metadata_key); + auto directory = std::filesystem::path(object_storage->generateObjectKeyForPath(path, getMetadataKeyPrefix()).serialize()) / ""; + return object_storage->existsOrHasAnyChild(directory); } else return MetadataStorageFromPlainObjectStorage::isDirectory(path); @@ -240,12 +235,10 @@ bool MetadataStorageFromPlainRewritableObjectStorage::isDirectory(const std::str std::vector MetadataStorageFromPlainRewritableObjectStorage::listDirectory(const std::string & path) const { - auto key_prefix = object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */).serialize(); + auto key_prefix = object_storage->generateObjectKeyForPath(path, "" /* key_prefix */).serialize(); RelativePathsWithMetadata files; - std::string abs_key = key_prefix; - if (!abs_key.ends_with('/')) - abs_key += '/'; + auto abs_key = std::filesystem::path(object_storage->getCommonKeyPrefix()) / key_prefix / ""; object_storage->listObjects(abs_key, files, 0); @@ -255,8 +248,7 @@ std::vector MetadataStorageFromPlainRewritableObjectStorage::listDi /// metadata along with regular files. if (object_storage->getCommonKeyPrefix() != getMetadataKeyPrefix()) { - chassert(abs_key.starts_with(object_storage->getCommonKeyPrefix())); - auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / abs_key.substr(object_storage->getCommonKeyPrefix().size()); + auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / key_prefix / ""; RelativePathsWithMetadata metadata_files; object_storage->listObjects(metadata_key, metadata_files, 0); getDirectChildrenOnDisk(metadata_key, getMetadataKeyPrefix(), metadata_files, path, directories); @@ -272,7 +264,7 @@ void MetadataStorageFromPlainRewritableObjectStorage::getDirectChildrenOnDisk( const std::string & local_path, std::unordered_set & result) const { - getDirectChildrenOnRewritableDisk(storage_key, storage_key_perfix, remote_paths, local_path, *getPathMap(), metadata_mutex, result); + getDirectChildrenOnDiskImpl(storage_key, storage_key_perfix, remote_paths, local_path, *getPathMap(), metadata_mutex, result); } } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h index 71153cbdc25..b067b391878 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h @@ -33,7 +33,7 @@ protected: const std::string & storage_key_perfix, const RelativePathsWithMetadata & remote_paths, const std::string & local_path, - std::unordered_set & result) const override; + std::unordered_set & result) const; }; } From ecca720f9e076e49f280e48c6ff4046a19894b2a Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Fri, 28 Jun 2024 06:42:38 +0000 Subject: [PATCH 1405/2361] minor --- .../ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index 02048c07a57..7553c7733b5 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -145,11 +145,10 @@ void MetadataStorageFromPlainObjectStorageTransaction::createDirectory(const std return; auto normalized_path = normalizeDirectoryPath(path); - auto key_prefix = object_storage->generateObjectKeyPrefixForDirectoryPath(normalized_path, std::nullopt /* key_prefix */).serialize(); - chassert(key_prefix.starts_with(object_storage->getCommonKeyPrefix())); + auto key_prefix = object_storage->generateObjectKeyPrefixForDirectoryPath(normalized_path, "" /* key_prefix */).serialize(); auto op = std::make_unique( std::move(normalized_path), - key_prefix.substr(object_storage->getCommonKeyPrefix().size()), + std::move(key_prefix), *metadata_storage.getPathMap(), object_storage, metadata_storage.getMetadataKeyPrefix()); From 97519ae800b9a26942973d888354a2b013d53cc6 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Thu, 4 Jul 2024 07:02:13 +0000 Subject: [PATCH 1406/2361] in-memory map path comparator --- .../CommonPathPrefixKeyGenerator.cpp | 5 +++-- .../CommonPathPrefixKeyGenerator.h | 4 +++- .../MetadataStorageFromPlainObjectStorage.h | 3 ++- ...torageFromPlainObjectStorageOperations.cpp | 18 ++++++++--------- ...torageFromPlainRewritableObjectStorage.cpp | 9 ++++----- src/Disks/ObjectStorages/PathComparator.h | 20 +++++++++++++++++++ 6 files changed, 41 insertions(+), 18 deletions(-) create mode 100644 src/Disks/ObjectStorages/PathComparator.h diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp index ef599a2f366..062a2542654 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp @@ -18,7 +18,8 @@ CommonPathPrefixKeyGenerator::CommonPathPrefixKeyGenerator( ObjectStorageKey CommonPathPrefixKeyGenerator::generate(const String & path, bool is_directory, const std::optional & key_prefix) const { - const auto & [object_key_prefix, suffix_parts] = getLongestObjectKeyPrefix(path); + const auto & [object_key_prefix, suffix_parts] + = getLongestObjectKeyPrefix(is_directory ? std::filesystem::path(path).parent_path().string() : path); auto key = std::filesystem::path(object_key_prefix); @@ -54,7 +55,7 @@ std::tuple> CommonPathPrefixKeyGenerator:: while (p != p.root_path()) { - auto it = ptr->find(p / ""); + auto it = ptr->find(p); if (it != ptr->end()) { std::vector vec(std::make_move_iterator(dq.begin()), std::make_move_iterator(dq.end())); diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h index 08495738505..bca4f7060c4 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h @@ -3,6 +3,8 @@ #include #include +#include + #include #include #include @@ -23,7 +25,7 @@ class CommonPathPrefixKeyGenerator : public IObjectStorageKeysGenerator { public: /// Local to remote path map. Leverages filesystem::path comparator for paths. - using PathMap = std::map; + using PathMap = std::map; explicit CommonPathPrefixKeyGenerator(String key_prefix_, SharedMutex & shared_mutex_, std::weak_ptr path_map_); diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h index 237327cd1f4..9ea1c475821 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -29,7 +30,7 @@ class MetadataStorageFromPlainObjectStorage : public IMetadataStorage { public: /// Local path prefixes mapped to storage key prefixes. - using PathMap = std::map; + using PathMap = std::map; private: friend class MetadataStorageFromPlainObjectStorageTransaction; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index 0a6086bd39d..b0b384f62c7 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -43,7 +43,7 @@ MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFr void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std::unique_lock &) { - if (path_map.contains(path)) + if (path_map.contains(path.parent_path())) return; auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); @@ -64,7 +64,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: write_created = true; - [[maybe_unused]] auto result = path_map.emplace(path, std::move(key_prefix)); + [[maybe_unused]] auto result = path_map.emplace(path.parent_path(), std::move(key_prefix)); chassert(result.second); auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::add(metric, 1); @@ -84,7 +84,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::un if (write_finalized) { - path_map.erase(path); + path_map.erase(path.parent_path()); auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::sub(metric, 1); @@ -111,11 +111,11 @@ MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFrom std::unique_ptr MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::createWriteBuf( const std::filesystem::path & expected_path, const std::filesystem::path & new_path, bool validate_content) { - auto expected_it = path_map.find(expected_path); + auto expected_it = path_map.find(expected_path.parent_path()); if (expected_it == path_map.end()) throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Metadata object for the expected (source) path '{}' does not exist", expected_path); - if (path_map.contains(new_path)) + if (path_map.contains(new_path.parent_path())) throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Metadata object for the new (destination) path '{}' already exists", new_path); auto metadata_object_key = createMetadataObjectKey(expected_it->second, metadata_key_prefix); @@ -156,7 +156,7 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u writeString(path_to.string(), *write_buf); write_buf->finalize(); - [[maybe_unused]] auto result = path_map.emplace(path_to, path_map.extract(path_from).mapped()); + [[maybe_unused]] auto result = path_map.emplace(path_to.parent_path(), path_map.extract(path_from.parent_path()).mapped()); chassert(result.second); write_finalized = true; @@ -165,7 +165,7 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::unique_lock &) { if (write_finalized) - path_map.emplace(path_from, path_map.extract(path_to).mapped()); + path_map.emplace(path_from.parent_path(), path_map.extract(path_to.parent_path()).mapped()); if (write_created) { @@ -186,7 +186,7 @@ MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::MetadataStorageFr void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std::unique_lock & /* metadata_lock */) { - auto path_it = path_map.find(path); + auto path_it = path_map.find(path.parent_path()); if (path_it == path_map.end()) return; @@ -223,7 +223,7 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::un writeString(path.string(), *buf); buf->finalize(); - path_map.emplace(path, std::move(key_prefix)); + path_map.emplace(path.parent_path(), std::move(key_prefix)); auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::add(metric, 1); } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index b904c0d92b9..ba8dfc891dd 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -97,7 +97,7 @@ MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::stri std::pair res; { std::lock_guard lock(mutex); - res = result.emplace(local_path, remote_path.parent_path()); + res = result.emplace(std::filesystem::path(local_path).parent_path(), remote_path.parent_path()); } /// This can happen if table replication is enabled, then the same local path is written @@ -145,11 +145,10 @@ void getDirectChildrenOnDiskImpl( break; auto slash_num = count(k.begin() + local_path.size(), k.end(), '/'); - if (slash_num != 1) + if (slash_num != 0) continue; - chassert(k.back() == '/'); - remote_to_local_subdir.emplace(v, std::string(k.begin() + local_path.size(), k.end() - 1)); + remote_to_local_subdir.emplace(v, std::string(k.begin() + local_path.size(), k.end()) + "/"); } } @@ -243,7 +242,7 @@ std::vector MetadataStorageFromPlainRewritableObjectStorage::listDi object_storage->listObjects(abs_key, files, 0); std::unordered_set directories; - getDirectChildrenOnDisk(abs_key, object_storage->getCommonKeyPrefix(), files, path, directories); + getDirectChildrenOnDisk(abs_key, object_storage->getCommonKeyPrefix(), files, std::filesystem::path(path) / "", directories); /// List empty directories that are identified by the `prefix.path` metadata files. This is required to, e.g., remove /// metadata along with regular files. if (object_storage->getCommonKeyPrefix() != getMetadataKeyPrefix()) diff --git a/src/Disks/ObjectStorages/PathComparator.h b/src/Disks/ObjectStorages/PathComparator.h new file mode 100644 index 00000000000..fe97a465937 --- /dev/null +++ b/src/Disks/ObjectStorages/PathComparator.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace DB +{ +// TODO: rename +struct PathComparator +{ + bool operator()(const std::filesystem::path & path1, const std::filesystem::path & path2) const + { + auto d1 = std::distance(path1.begin(), path1.end()); + auto d2 = std::distance(path2.begin(), path2.end()); + if (d1 != d2) + return d1 < d2; + return path1 < path2; + } +}; + +} From aa290b6398a5affa8405d3584795bce6bf7450d4 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Wed, 10 Jul 2024 01:15:57 +0000 Subject: [PATCH 1407/2361] use a designated mutex for path_map --- .../CommonPathPrefixKeyGenerator.cpp | 13 ++- .../CommonPathPrefixKeyGenerator.h | 6 +- .../MetadataStorageFromPlainObjectStorage.h | 6 +- ...torageFromPlainObjectStorageOperations.cpp | 99 ++++++++++++++----- ...aStorageFromPlainObjectStorageOperations.h | 13 +-- ...torageFromPlainRewritableObjectStorage.cpp | 25 ++--- ...aStorageFromPlainRewritableObjectStorage.h | 4 +- src/Disks/ObjectStorages/PathComparator.h | 27 +++-- 8 files changed, 122 insertions(+), 71 deletions(-) diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp index 062a2542654..19dd819fc17 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp @@ -1,4 +1,5 @@ #include "CommonPathPrefixKeyGenerator.h" +#include "Disks/ObjectStorages/PathComparator.h" #include @@ -9,9 +10,8 @@ namespace DB { -CommonPathPrefixKeyGenerator::CommonPathPrefixKeyGenerator( - String key_prefix_, SharedMutex & shared_mutex_, std::weak_ptr path_map_) - : storage_key_prefix(key_prefix_), shared_mutex(shared_mutex_), path_map(std::move(path_map_)) +CommonPathPrefixKeyGenerator::CommonPathPrefixKeyGenerator(String key_prefix_, std::weak_ptr path_map_) + : storage_key_prefix(key_prefix_), path_map(std::move(path_map_)) { } @@ -49,14 +49,13 @@ std::tuple> CommonPathPrefixKeyGenerator:: std::filesystem::path p(path); std::deque dq; - std::shared_lock lock(shared_mutex); - auto ptr = path_map.lock(); + std::shared_lock lock(ptr->mutex); while (p != p.root_path()) { - auto it = ptr->find(p); - if (it != ptr->end()) + auto it = ptr->map.find(p); + if (it != ptr->map.end()) { std::vector vec(std::make_move_iterator(dq.begin()), std::make_move_iterator(dq.end())); return std::make_tuple(it->second, std::move(vec)); diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h index bca4f7060c4..e337745b627 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h @@ -25,9 +25,8 @@ class CommonPathPrefixKeyGenerator : public IObjectStorageKeysGenerator { public: /// Local to remote path map. Leverages filesystem::path comparator for paths. - using PathMap = std::map; - explicit CommonPathPrefixKeyGenerator(String key_prefix_, SharedMutex & shared_mutex_, std::weak_ptr path_map_); + explicit CommonPathPrefixKeyGenerator(String key_prefix_, std::weak_ptr path_map_); ObjectStorageKey generate(const String & path, bool is_directory, const std::optional & key_prefix) const override; @@ -37,8 +36,7 @@ private: const String storage_key_prefix; - SharedMutex & shared_mutex; - std::weak_ptr path_map; + std::weak_ptr path_map; }; } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h index 9ea1c475821..dfb9632666c 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h @@ -28,10 +28,6 @@ using UnlinkMetadataFileOperationOutcomePtr = std::shared_ptr; - private: friend class MetadataStorageFromPlainObjectStorageTransaction; @@ -86,7 +82,7 @@ protected: virtual std::string getMetadataKeyPrefix() const { return object_storage->getCommonKeyPrefix(); } /// Returns a map of local paths to paths in object storage. - virtual std::shared_ptr getPathMap() const { throwNotImplemented(); } + virtual std::shared_ptr getPathMap() const { throwNotImplemented(); } }; class MetadataStorageFromPlainObjectStorageTransaction final : public IMetadataTransaction, private MetadataOperationsHolder diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index b0b384f62c7..b4a85efbaab 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -1,4 +1,5 @@ #include "MetadataStorageFromPlainObjectStorageOperations.h" +#include "Disks/ObjectStorages/PathComparator.h" #include #include @@ -30,7 +31,7 @@ ObjectStorageKey createMetadataObjectKey(const std::string & key_prefix, const s MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFromPlainObjectStorageCreateDirectoryOperation( std::filesystem::path && path_, std::string && key_prefix_, - MetadataStorageFromPlainObjectStorage::PathMap & path_map_, + InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) : path(std::move(path_)) @@ -43,8 +44,13 @@ MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFr void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std::unique_lock &) { - if (path_map.contains(path.parent_path())) - return; + auto & map = path_map.map; + auto & mutex = path_map.mutex; + { + std::shared_lock lock(mutex); + if (map.contains(path.parent_path())) + return; + } auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); @@ -64,8 +70,11 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: write_created = true; - [[maybe_unused]] auto result = path_map.emplace(path.parent_path(), std::move(key_prefix)); - chassert(result.second); + { + std::unique_lock lock(mutex); + [[maybe_unused]] auto result = map.emplace(path.parent_path(), std::move(key_prefix)); + chassert(result.second); + } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::add(metric, 1); @@ -80,11 +89,17 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::unique_lock &) { + auto & map = path_map.map; + auto & mutex = path_map.mutex; + auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); if (write_finalized) { - path_map.erase(path.parent_path()); + { + std::unique_lock lock(mutex); + map.erase(path.parent_path()); + } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::sub(metric, 1); @@ -97,7 +112,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::un MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFromPlainObjectStorageMoveDirectoryOperation( std::filesystem::path && path_from_, std::filesystem::path && path_to_, - MetadataStorageFromPlainObjectStorage::PathMap & path_map_, + InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) : path_from(std::move(path_from_)) @@ -111,14 +126,25 @@ MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFrom std::unique_ptr MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::createWriteBuf( const std::filesystem::path & expected_path, const std::filesystem::path & new_path, bool validate_content) { - auto expected_it = path_map.find(expected_path.parent_path()); - if (expected_it == path_map.end()) - throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Metadata object for the expected (source) path '{}' does not exist", expected_path); + auto & map = path_map.map; + auto & mutex = path_map.mutex; - if (path_map.contains(new_path.parent_path())) - throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Metadata object for the new (destination) path '{}' already exists", new_path); + std::filesystem::path remote_path; + { + std::shared_lock lock(mutex); + auto expected_it = map.find(expected_path.parent_path()); + if (expected_it == map.end()) + throw Exception( + ErrorCodes::FILE_DOESNT_EXIST, "Metadata object for the expected (source) path '{}' does not exist", expected_path); - auto metadata_object_key = createMetadataObjectKey(expected_it->second, metadata_key_prefix); + if (map.contains(new_path.parent_path())) + throw Exception( + ErrorCodes::FILE_ALREADY_EXISTS, "Metadata object for the new (destination) path '{}' already exists", new_path); + + remote_path = expected_it->second; + } + + auto metadata_object_key = createMetadataObjectKey(remote_path, metadata_key_prefix); auto metadata_object = StoredObject(metadata_object_key.serialize(), expected_path / PREFIX_PATH_FILE_NAME); @@ -156,8 +182,13 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u writeString(path_to.string(), *write_buf); write_buf->finalize(); - [[maybe_unused]] auto result = path_map.emplace(path_to.parent_path(), path_map.extract(path_from.parent_path()).mapped()); - chassert(result.second); + auto & map = path_map.map; + auto & mutex = path_map.mutex; + { + std::unique_lock lock(mutex); + [[maybe_unused]] auto result = map.emplace(path_to.parent_path(), map.extract(path_from.parent_path()).mapped()); + chassert(result.second); + } write_finalized = true; } @@ -165,7 +196,12 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::unique_lock &) { if (write_finalized) - path_map.emplace(path_from.parent_path(), path_map.extract(path_to.parent_path()).mapped()); + { + auto & map = path_map.map; + auto & mutex = path_map.mutex; + std::unique_lock lock(mutex); + map.emplace(path_from.parent_path(), map.extract(path_to.parent_path()).mapped()); + } if (write_created) { @@ -176,28 +212,34 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::uniq } MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation( - std::filesystem::path && path_, - MetadataStorageFromPlainObjectStorage::PathMap & path_map_, - ObjectStoragePtr object_storage_, - const std::string & metadata_key_prefix_) + std::filesystem::path && path_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) : path(std::move(path_)), path_map(path_map_), object_storage(object_storage_), metadata_key_prefix(metadata_key_prefix_) { } void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std::unique_lock & /* metadata_lock */) { - auto path_it = path_map.find(path.parent_path()); - if (path_it == path_map.end()) - return; + auto & map = path_map.map; + auto & mutex = path_map.mutex; + { + std::shared_lock lock(mutex); + auto path_it = map.find(path.parent_path()); + if (path_it == map.end()) + return; + key_prefix = path_it->second; + } LOG_TRACE(getLogger("MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation"), "Removing directory '{}'", path); - key_prefix = path_it->second; auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME); object_storage->removeObject(metadata_object); - path_map.erase(path_it); + { + std::unique_lock lock(mutex); + map.erase(path.parent_path()); + } + auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::sub(metric, 1); @@ -223,7 +265,12 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::un writeString(path.string(), *buf); buf->finalize(); - path_map.emplace(path.parent_path(), std::move(key_prefix)); + auto & map = path_map.map; + auto & mutex = path_map.mutex; + { + std::unique_lock lock(mutex); + map.emplace(path.parent_path(), std::move(key_prefix)); + } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::add(metric, 1); } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h index e31e3cbb262..1b2471dd316 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h @@ -2,6 +2,7 @@ #include #include +#include "Disks/ObjectStorages/PathComparator.h" #include #include @@ -14,7 +15,7 @@ class MetadataStorageFromPlainObjectStorageCreateDirectoryOperation final : publ private: std::filesystem::path path; std::string key_prefix; - MetadataStorageFromPlainObjectStorage::PathMap & path_map; + InMemoryPathMap & path_map; ObjectStoragePtr object_storage; const std::string metadata_key_prefix; @@ -26,7 +27,7 @@ public: MetadataStorageFromPlainObjectStorageCreateDirectoryOperation( std::filesystem::path && path_, std::string && key_prefix_, - MetadataStorageFromPlainObjectStorage::PathMap & path_map_, + InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_); @@ -39,7 +40,7 @@ class MetadataStorageFromPlainObjectStorageMoveDirectoryOperation final : public private: std::filesystem::path path_from; std::filesystem::path path_to; - MetadataStorageFromPlainObjectStorage::PathMap & path_map; + InMemoryPathMap & path_map; ObjectStoragePtr object_storage; const std::string metadata_key_prefix; @@ -53,7 +54,7 @@ public: MetadataStorageFromPlainObjectStorageMoveDirectoryOperation( std::filesystem::path && path_from_, std::filesystem::path && path_to_, - MetadataStorageFromPlainObjectStorage::PathMap & path_map_, + InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_); @@ -67,7 +68,7 @@ class MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation final : publ private: std::filesystem::path path; - MetadataStorageFromPlainObjectStorage::PathMap & path_map; + InMemoryPathMap & path_map; ObjectStoragePtr object_storage; const std::string metadata_key_prefix; @@ -77,7 +78,7 @@ private: public: MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation( std::filesystem::path && path_, - MetadataStorageFromPlainObjectStorage::PathMap & path_map_, + InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_); diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index ba8dfc891dd..cf51a6a5314 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -3,11 +3,12 @@ #include #include -#include #include +#include #include #include #include "CommonPathPrefixKeyGenerator.h" +#include "Disks/ObjectStorages/PathComparator.h" namespace DB @@ -37,9 +38,10 @@ std::string getMetadataKeyPrefix(ObjectStoragePtr object_storage) : metadata_key_prefix; } -MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::string & metadata_key_prefix, ObjectStoragePtr object_storage) +InMemoryPathMap::Map loadPathPrefixMap(const std::string & metadata_key_prefix, ObjectStoragePtr object_storage) { - MetadataStorageFromPlainObjectStorage::PathMap result; + using Map = InMemoryPathMap::Map; + Map result; ThreadPool & pool = getIOThreadPool().get(); ThreadPoolCallbackRunnerLocal runner(pool, "PlainRWMetaLoad"); @@ -94,7 +96,7 @@ MetadataStorageFromPlainObjectStorage::PathMap loadPathPrefixMap(const std::stri chassert(remote_metadata_path.string().starts_with(metadata_key_prefix)); auto suffix = remote_metadata_path.string().substr(metadata_key_prefix.size()); auto remote_path = std::filesystem::path(std::move(suffix)); - std::pair res; + std::pair res; { std::lock_guard lock(mutex); res = result.emplace(std::filesystem::path(local_path).parent_path(), remote_path.parent_path()); @@ -126,14 +128,13 @@ void getDirectChildrenOnDiskImpl( const std::string & storage_key_perfix, const RelativePathsWithMetadata & remote_paths, const std::string & local_path, - const MetadataStorageFromPlainObjectStorage::PathMap & local_path_prefixes, + const InMemoryPathMap::Map & local_path_prefixes, SharedMutex & shared_mutex, std::unordered_set & result) { - using PathMap = MetadataStorageFromPlainObjectStorage::PathMap; - /// Map remote paths into local subdirectories. - std::unordered_map remote_to_local_subdir; + using Map = InMemoryPathMap::Map; + std::unordered_map remote_to_local_subdir; { std::shared_lock lock(shared_mutex); @@ -189,7 +190,7 @@ MetadataStorageFromPlainRewritableObjectStorage::MetadataStorageFromPlainRewrita ObjectStoragePtr object_storage_, String storage_path_prefix_) : MetadataStorageFromPlainObjectStorage(object_storage_, storage_path_prefix_) , metadata_key_prefix(DB::getMetadataKeyPrefix(object_storage)) - , path_map(std::make_shared(loadPathPrefixMap(metadata_key_prefix, object_storage))) + , path_map(std::make_shared(loadPathPrefixMap(metadata_key_prefix, object_storage))) { if (object_storage->isWriteOnce()) throw Exception( @@ -197,14 +198,14 @@ MetadataStorageFromPlainRewritableObjectStorage::MetadataStorageFromPlainRewrita "MetadataStorageFromPlainRewritableObjectStorage is not compatible with write-once storage '{}'", object_storage->getName()); - auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), metadata_mutex, path_map); + auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); object_storage->setKeysGenerator(keys_gen); } MetadataStorageFromPlainRewritableObjectStorage::~MetadataStorageFromPlainRewritableObjectStorage() { auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; - CurrentMetrics::sub(metric, path_map->size()); + CurrentMetrics::sub(metric, path_map->map.size()); } bool MetadataStorageFromPlainRewritableObjectStorage::exists(const std::string & path) const @@ -263,7 +264,7 @@ void MetadataStorageFromPlainRewritableObjectStorage::getDirectChildrenOnDisk( const std::string & local_path, std::unordered_set & result) const { - getDirectChildrenOnDiskImpl(storage_key, storage_key_perfix, remote_paths, local_path, *getPathMap(), metadata_mutex, result); + getDirectChildrenOnDiskImpl(storage_key, storage_key_perfix, remote_paths, local_path, getPathMap()->map, getPathMap()->mutex, result); } } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h index b067b391878..fea461abab8 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h @@ -13,7 +13,7 @@ class MetadataStorageFromPlainRewritableObjectStorage final : public MetadataSto { private: const std::string metadata_key_prefix; - std::shared_ptr path_map; + std::shared_ptr path_map; public: MetadataStorageFromPlainRewritableObjectStorage(ObjectStoragePtr object_storage_, String storage_path_prefix_); @@ -27,7 +27,7 @@ public: protected: std::string getMetadataKeyPrefix() const override { return metadata_key_prefix; } - std::shared_ptr getPathMap() const override { return path_map; } + std::shared_ptr getPathMap() const override { return path_map; } void getDirectChildrenOnDisk( const std::string & storage_key, const std::string & storage_key_perfix, diff --git a/src/Disks/ObjectStorages/PathComparator.h b/src/Disks/ObjectStorages/PathComparator.h index fe97a465937..fae82108789 100644 --- a/src/Disks/ObjectStorages/PathComparator.h +++ b/src/Disks/ObjectStorages/PathComparator.h @@ -1,20 +1,29 @@ #pragma once #include +#include +#include "Common/SharedMutex.h" namespace DB { -// TODO: rename -struct PathComparator + + +struct InMemoryPathMap { - bool operator()(const std::filesystem::path & path1, const std::filesystem::path & path2) const + struct PathComparator { - auto d1 = std::distance(path1.begin(), path1.end()); - auto d2 = std::distance(path2.begin(), path2.end()); - if (d1 != d2) - return d1 < d2; - return path1 < path2; - } + bool operator()(const std::filesystem::path & path1, const std::filesystem::path & path2) const + { + auto d1 = std::distance(path1.begin(), path1.end()); + auto d2 = std::distance(path2.begin(), path2.end()); + if (d1 != d2) + return d1 < d2; + return path1 < path2; + } + }; + using Map = std::map; + Map map; + SharedMutex mutex; }; } From 0e78ed6b580646cc08721eef415ffb3fe2f697cb Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Wed, 10 Jul 2024 23:04:21 +0000 Subject: [PATCH 1408/2361] simplify listDirectory --- ...torageFromPlainRewritableObjectStorage.cpp | 28 ++++--------------- ...aStorageFromPlainRewritableObjectStorage.h | 1 - 2 files changed, 5 insertions(+), 24 deletions(-) diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index cf51a6a5314..6a0eff0a136 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -125,17 +125,12 @@ InMemoryPathMap::Map loadPathPrefixMap(const std::string & metadata_key_prefix, void getDirectChildrenOnDiskImpl( const std::string & storage_key, - const std::string & storage_key_perfix, const RelativePathsWithMetadata & remote_paths, const std::string & local_path, const InMemoryPathMap::Map & local_path_prefixes, SharedMutex & shared_mutex, std::unordered_set & result) { - /// Map remote paths into local subdirectories. - using Map = InMemoryPathMap::Map; - std::unordered_map remote_to_local_subdir; - { std::shared_lock lock(shared_mutex); auto end_it = local_path_prefixes.end(); @@ -147,9 +142,9 @@ void getDirectChildrenOnDiskImpl( auto slash_num = count(k.begin() + local_path.size(), k.end(), '/'); if (slash_num != 0) - continue; + break; - remote_to_local_subdir.emplace(v, std::string(k.begin() + local_path.size(), k.end()) + "/"); + result.emplace(std::string(k.begin() + local_path.size(), k.end()) + "/"); } } @@ -169,18 +164,6 @@ void getDirectChildrenOnDiskImpl( if (!skip_list.contains(filename)) result.emplace(std::move(filename)); } - else - { - /// Subdirectories. - chassert(path.find(storage_key_perfix) == 0); - auto it = remote_to_local_subdir.find(path.substr(storage_key_perfix.size(), slash_pos - storage_key_perfix.size())); - /// Mapped subdirectories. - if (it != remote_to_local_subdir.end()) - result.emplace(it->second); - /// The remote subdirectory name is the same as the local subdirectory. - else - result.emplace(path.substr(child_pos, slash_pos - child_pos)); - } } } @@ -243,7 +226,7 @@ std::vector MetadataStorageFromPlainRewritableObjectStorage::listDi object_storage->listObjects(abs_key, files, 0); std::unordered_set directories; - getDirectChildrenOnDisk(abs_key, object_storage->getCommonKeyPrefix(), files, std::filesystem::path(path) / "", directories); + getDirectChildrenOnDisk(abs_key, files, std::filesystem::path(path) / "", directories); /// List empty directories that are identified by the `prefix.path` metadata files. This is required to, e.g., remove /// metadata along with regular files. if (object_storage->getCommonKeyPrefix() != getMetadataKeyPrefix()) @@ -251,7 +234,7 @@ std::vector MetadataStorageFromPlainRewritableObjectStorage::listDi auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / key_prefix / ""; RelativePathsWithMetadata metadata_files; object_storage->listObjects(metadata_key, metadata_files, 0); - getDirectChildrenOnDisk(metadata_key, getMetadataKeyPrefix(), metadata_files, path, directories); + getDirectChildrenOnDisk(metadata_key, metadata_files, std::filesystem::path(path) / "", directories); } return std::vector(std::make_move_iterator(directories.begin()), std::make_move_iterator(directories.end())); @@ -259,12 +242,11 @@ std::vector MetadataStorageFromPlainRewritableObjectStorage::listDi void MetadataStorageFromPlainRewritableObjectStorage::getDirectChildrenOnDisk( const std::string & storage_key, - const std::string & storage_key_perfix, const RelativePathsWithMetadata & remote_paths, const std::string & local_path, std::unordered_set & result) const { - getDirectChildrenOnDiskImpl(storage_key, storage_key_perfix, remote_paths, local_path, getPathMap()->map, getPathMap()->mutex, result); + getDirectChildrenOnDiskImpl(storage_key, remote_paths, local_path, getPathMap()->map, getPathMap()->mutex, result); } } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h index fea461abab8..8fd147e15b9 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h @@ -30,7 +30,6 @@ protected: std::shared_ptr getPathMap() const override { return path_map; } void getDirectChildrenOnDisk( const std::string & storage_key, - const std::string & storage_key_perfix, const RelativePathsWithMetadata & remote_paths, const std::string & local_path, std::unordered_set & result) const; From 82f5aceb484a322960065f973bec2b61c31219aa Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Thu, 11 Jul 2024 06:22:26 +0000 Subject: [PATCH 1409/2361] introduce flat structure --- .../FlatStructureKeyGenerator.cpp | 51 +++++++++++++++++++ .../FlatStructureKeyGenerator.h | 23 +++++++++ ...torageFromPlainRewritableObjectStorage.cpp | 15 ++++-- 3 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp create mode 100644 src/Disks/ObjectStorages/FlatStructureKeyGenerator.h diff --git a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp b/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp new file mode 100644 index 00000000000..d6fb32b65d4 --- /dev/null +++ b/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp @@ -0,0 +1,51 @@ +#include "FlatStructureKeyGenerator.h" +#include "Common/ObjectStorageKey.h" +#include "Common/SharedMutex.h" +#include "Disks/ObjectStorages/PathComparator.h" +#include + +#include +#include +#include + +namespace DB +{ + +FlatStructureKeyGenerator::FlatStructureKeyGenerator(String storage_key_prefix_, std::weak_ptr path_map_) + : storage_key_prefix(storage_key_prefix_), path_map(std::move(path_map_)) +{ +} + +ObjectStorageKey FlatStructureKeyGenerator::generate(const String & path, bool is_directory, const std::optional & key_prefix) const +{ + if (is_directory) + chassert(path.ends_with('/')); + + const auto p = std::filesystem::path(path); + auto directory = p.parent_path(); + + constexpr size_t part_size = 32; + + std::optional remote_path; + { + auto ptr = path_map.lock(); + std::shared_lock lock(ptr->mutex); + auto it = ptr->map.find(p); + if (it != ptr->map.end()) + return ObjectStorageKey::createAsRelative(key_prefix.has_value() ? *key_prefix : storage_key_prefix, it->second); + + it = ptr->map.find(directory); + if (it != ptr->map.end()) + remote_path = it->second; + } + std::filesystem::path key = remote_path.has_value() ? *remote_path + : is_directory ? std::filesystem::path(getRandomASCIIString(part_size)) + : directory; + + if (!is_directory) + key /= p.filename(); + + return ObjectStorageKey::createAsRelative(key_prefix.has_value() ? *key_prefix : storage_key_prefix, key); +} + +} diff --git a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h b/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h new file mode 100644 index 00000000000..2c585dffb81 --- /dev/null +++ b/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +#include +namespace DB +{ + +class FlatStructureKeyGenerator : public IObjectStorageKeysGenerator +{ +public: + explicit FlatStructureKeyGenerator(String storage_key_prefix_, std::weak_ptr path_map_); + + ObjectStorageKey generate(const String & path, bool is_directory, const std::optional & key_prefix) const override; + +private: + const String storage_key_prefix; + + std::weak_ptr path_map; +}; + +} diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index 6a0eff0a136..afaa7bf06ff 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -1,5 +1,7 @@ #include +#include #include +#include #include #include @@ -8,7 +10,6 @@ #include #include #include "CommonPathPrefixKeyGenerator.h" -#include "Disks/ObjectStorages/PathComparator.h" namespace DB @@ -181,8 +182,16 @@ MetadataStorageFromPlainRewritableObjectStorage::MetadataStorageFromPlainRewrita "MetadataStorageFromPlainRewritableObjectStorage is not compatible with write-once storage '{}'", object_storage->getName()); - auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); - object_storage->setKeysGenerator(keys_gen); + if (getMetadataKeyPrefix() == object_storage->getCommonKeyPrefix()) + { + auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); + object_storage->setKeysGenerator(keys_gen); + } + else + { + auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); + object_storage->setKeysGenerator(keys_gen); + } } MetadataStorageFromPlainRewritableObjectStorage::~MetadataStorageFromPlainRewritableObjectStorage() From c0e6780dfe5977316e50e587877f3fe6ef11d048 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Thu, 11 Jul 2024 22:48:07 +0000 Subject: [PATCH 1410/2361] rename PathComparator.h -> InMemoryPathMap.h --- src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp | 4 ++-- src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h | 5 ++--- src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp | 2 +- src/Disks/ObjectStorages/FlatStructureKeyGenerator.h | 2 +- .../ObjectStorages/{PathComparator.h => InMemoryPathMap.h} | 0 .../ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp | 1 + .../ObjectStorages/MetadataStorageFromPlainObjectStorage.h | 3 ++- .../MetadataStorageFromPlainObjectStorageOperations.cpp | 2 +- .../MetadataStorageFromPlainObjectStorageOperations.h | 2 +- .../MetadataStorageFromPlainRewritableObjectStorage.cpp | 4 ++-- 10 files changed, 13 insertions(+), 12 deletions(-) rename src/Disks/ObjectStorages/{PathComparator.h => InMemoryPathMap.h} (100%) diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp index 19dd819fc17..1fa06823bae 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp @@ -1,5 +1,5 @@ -#include "CommonPathPrefixKeyGenerator.h" -#include "Disks/ObjectStorages/PathComparator.h" +#include +#include #include diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h index e337745b627..8b5037e3804 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h @@ -1,9 +1,6 @@ #pragma once #include -#include - -#include #include #include @@ -21,6 +18,8 @@ namespace DB /// /// The key generator ensures that the original directory hierarchy is /// preserved, which is required for the MergeTree family. + +struct InMemoryPathMap; class CommonPathPrefixKeyGenerator : public IObjectStorageKeysGenerator { public: diff --git a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp b/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp index d6fb32b65d4..414aea2b08b 100644 --- a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp @@ -1,7 +1,7 @@ #include "FlatStructureKeyGenerator.h" +#include #include "Common/ObjectStorageKey.h" #include "Common/SharedMutex.h" -#include "Disks/ObjectStorages/PathComparator.h" #include #include diff --git a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h b/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h index 2c585dffb81..6b5b2203bed 100644 --- a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h +++ b/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h @@ -1,12 +1,12 @@ #pragma once -#include #include #include namespace DB { +struct InMemoryPathMap; class FlatStructureKeyGenerator : public IObjectStorageKeysGenerator { public: diff --git a/src/Disks/ObjectStorages/PathComparator.h b/src/Disks/ObjectStorages/InMemoryPathMap.h similarity index 100% rename from src/Disks/ObjectStorages/PathComparator.h rename to src/Disks/ObjectStorages/InMemoryPathMap.h diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index 7553c7733b5..364d04e2b52 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -1,5 +1,6 @@ #include "MetadataStorageFromPlainObjectStorage.h" #include +#include #include #include diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h index dfb9632666c..a9a1a648f96 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h @@ -2,9 +2,9 @@ #include #include +#include #include #include -#include #include #include @@ -13,6 +13,7 @@ namespace DB { +struct InMemoryPathMap; struct UnlinkMetadataFileOperationOutcome; using UnlinkMetadataFileOperationOutcomePtr = std::shared_ptr; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index b4a85efbaab..6f5109faec4 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -1,5 +1,5 @@ #include "MetadataStorageFromPlainObjectStorageOperations.h" -#include "Disks/ObjectStorages/PathComparator.h" +#include #include #include diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h index 1b2471dd316..778585fa758 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h @@ -1,8 +1,8 @@ #pragma once #include +#include #include -#include "Disks/ObjectStorages/PathComparator.h" #include #include diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index afaa7bf06ff..c312eae4077 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -1,7 +1,7 @@ -#include #include +#include +#include #include -#include #include #include From 3f066018fb0c74783ee486f54fc472ffd9cd7cc1 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Fri, 12 Jul 2024 02:27:45 +0000 Subject: [PATCH 1411/2361] style and doc --- src/Common/ObjectStorageKeyGenerator.cpp | 1 - src/Common/ObjectStorageKeyGenerator.h | 4 ++++ .../CommonPathPrefixKeyGenerator.h | 1 + ...=> FlatDirectoryStructureKeyGenerator.cpp} | 9 ++++--- ...h => FlatDirectoryStructureKeyGenerator.h} | 4 ++-- .../ObjectStorages/HDFS/HDFSObjectStorage.cpp | 1 - src/Disks/ObjectStorages/InMemoryPathMap.h | 2 +- .../Local/LocalObjectStorage.cpp | 1 - .../MetadataStorageFromPlainObjectStorage.cpp | 2 -- .../MetadataStorageFromPlainObjectStorage.h | 5 ++-- ...torageFromPlainObjectStorageOperations.cpp | 24 +++++++++---------- ...aStorageFromPlainObjectStorageOperations.h | 3 +-- ...torageFromPlainRewritableObjectStorage.cpp | 12 +++++++--- 13 files changed, 35 insertions(+), 34 deletions(-) rename src/Disks/ObjectStorages/{FlatStructureKeyGenerator.cpp => FlatDirectoryStructureKeyGenerator.cpp} (80%) rename src/Disks/ObjectStorages/{FlatStructureKeyGenerator.h => FlatDirectoryStructureKeyGenerator.h} (64%) diff --git a/src/Common/ObjectStorageKeyGenerator.cpp b/src/Common/ObjectStorageKeyGenerator.cpp index 3e7bf3116bd..3bdc0004198 100644 --- a/src/Common/ObjectStorageKeyGenerator.cpp +++ b/src/Common/ObjectStorageKeyGenerator.cpp @@ -3,7 +3,6 @@ #include #include -#include #include diff --git a/src/Common/ObjectStorageKeyGenerator.h b/src/Common/ObjectStorageKeyGenerator.h index 12aeec1714d..008e3c88fac 100644 --- a/src/Common/ObjectStorageKeyGenerator.h +++ b/src/Common/ObjectStorageKeyGenerator.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include "ObjectStorageKey.h" namespace DB @@ -12,6 +13,9 @@ public: virtual ~IObjectStorageKeysGenerator() = default; /// Generates an object storage key based on a path in the virtual filesystem. + /// @param path - Path in the virtual filesystem. + /// @param is_directory - If the path in the virtual filesystem corresponds to a directory. + /// @param key_prefix - Optional key prefix for the generated object storage key. If provided, this prefix will be added to the beginning of the generated key. virtual ObjectStorageKey generate(const String & path, bool is_directory, const std::optional & key_prefix) const = 0; }; diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h index 8b5037e3804..ea91d78600d 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.h @@ -9,6 +9,7 @@ namespace DB { +/// Deprecated. Used for backward compatibility with plain rewritable disks without a separate metadata layout. /// Object storage key generator used specifically with the /// MetadataStorageFromPlainObjectStorage if multiple writes are allowed. diff --git a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp b/src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.cpp similarity index 80% rename from src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp rename to src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.cpp index 414aea2b08b..64959b729b6 100644 --- a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.cpp @@ -1,4 +1,4 @@ -#include "FlatStructureKeyGenerator.h" +#include "FlatDirectoryStructureKeyGenerator.h" #include #include "Common/ObjectStorageKey.h" #include "Common/SharedMutex.h" @@ -11,12 +11,12 @@ namespace DB { -FlatStructureKeyGenerator::FlatStructureKeyGenerator(String storage_key_prefix_, std::weak_ptr path_map_) +FlatDirectoryStructureKeyGenerator::FlatDirectoryStructureKeyGenerator(String storage_key_prefix_, std::weak_ptr path_map_) : storage_key_prefix(storage_key_prefix_), path_map(std::move(path_map_)) { } -ObjectStorageKey FlatStructureKeyGenerator::generate(const String & path, bool is_directory, const std::optional & key_prefix) const +ObjectStorageKey FlatDirectoryStructureKeyGenerator::generate(const String & path, bool is_directory, const std::optional & key_prefix) const { if (is_directory) chassert(path.ends_with('/')); @@ -24,8 +24,6 @@ ObjectStorageKey FlatStructureKeyGenerator::generate(const String & path, bool i const auto p = std::filesystem::path(path); auto directory = p.parent_path(); - constexpr size_t part_size = 32; - std::optional remote_path; { auto ptr = path_map.lock(); @@ -38,6 +36,7 @@ ObjectStorageKey FlatStructureKeyGenerator::generate(const String & path, bool i if (it != ptr->map.end()) remote_path = it->second; } + constexpr size_t part_size = 32; std::filesystem::path key = remote_path.has_value() ? *remote_path : is_directory ? std::filesystem::path(getRandomASCIIString(part_size)) : directory; diff --git a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h b/src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.h similarity index 64% rename from src/Disks/ObjectStorages/FlatStructureKeyGenerator.h rename to src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.h index 6b5b2203bed..4dbac5d3003 100644 --- a/src/Disks/ObjectStorages/FlatStructureKeyGenerator.h +++ b/src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.h @@ -7,10 +7,10 @@ namespace DB { struct InMemoryPathMap; -class FlatStructureKeyGenerator : public IObjectStorageKeysGenerator +class FlatDirectoryStructureKeyGenerator : public IObjectStorageKeysGenerator { public: - explicit FlatStructureKeyGenerator(String storage_key_prefix_, std::weak_ptr path_map_); + explicit FlatDirectoryStructureKeyGenerator(String storage_key_prefix_, std::weak_ptr path_map_); ObjectStorageKey generate(const String & path, bool is_directory, const std::optional & key_prefix) const override; diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp index 3ce2a0f4903..00ef4b63e6f 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp @@ -4,7 +4,6 @@ #include #include -#include #include #include #include diff --git a/src/Disks/ObjectStorages/InMemoryPathMap.h b/src/Disks/ObjectStorages/InMemoryPathMap.h index fae82108789..ea08784719e 100644 --- a/src/Disks/ObjectStorages/InMemoryPathMap.h +++ b/src/Disks/ObjectStorages/InMemoryPathMap.h @@ -2,7 +2,7 @@ #include #include -#include "Common/SharedMutex.h" +#include namespace DB { diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 20ef135cdf7..5b61c57ca21 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -1,7 +1,6 @@ #include #include -#include #include #include #include diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index 364d04e2b52..2036208c389 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -146,10 +146,8 @@ void MetadataStorageFromPlainObjectStorageTransaction::createDirectory(const std return; auto normalized_path = normalizeDirectoryPath(path); - auto key_prefix = object_storage->generateObjectKeyPrefixForDirectoryPath(normalized_path, "" /* key_prefix */).serialize(); auto op = std::make_unique( std::move(normalized_path), - std::move(key_prefix), *metadata_storage.getPathMap(), object_storage, metadata_storage.getMetadataKeyPrefix()); diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h index a9a1a648f96..2aac7158bd5 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.h @@ -78,11 +78,10 @@ public: bool supportsStat() const override { return false; } protected: - /// Get the object storage prefix for storing metadata files. If stored behind a separate endpoint, - /// the metadata keys reflect the layout of the regular files. + /// Get the object storage prefix for storing metadata files. virtual std::string getMetadataKeyPrefix() const { return object_storage->getCommonKeyPrefix(); } - /// Returns a map of local paths to paths in object storage. + /// Returns a map of virtual filesystem paths to paths in the object storage. virtual std::shared_ptr getPathMap() const { throwNotImplemented(); } }; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index 6f5109faec4..9e18f6cdb08 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -29,25 +29,21 @@ ObjectStorageKey createMetadataObjectKey(const std::string & key_prefix, const s } MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFromPlainObjectStorageCreateDirectoryOperation( - std::filesystem::path && path_, - std::string && key_prefix_, - InMemoryPathMap & path_map_, - ObjectStoragePtr object_storage_, - const std::string & metadata_key_prefix_) + std::filesystem::path && path_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) : path(std::move(path_)) - , key_prefix(key_prefix_) , path_map(path_map_) , object_storage(object_storage_) , metadata_key_prefix(metadata_key_prefix_) + , key_prefix(object_storage->generateObjectKeyPrefixForDirectoryPath(path, "" /* key_prefix */).serialize()) { } void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std::unique_lock &) { - auto & map = path_map.map; auto & mutex = path_map.mutex; { std::shared_lock lock(mutex); + auto & map = path_map.map; if (map.contains(path.parent_path())) return; } @@ -72,6 +68,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: { std::unique_lock lock(mutex); + auto & map = path_map.map; [[maybe_unused]] auto result = map.emplace(path.parent_path(), std::move(key_prefix)); chassert(result.second); } @@ -89,7 +86,6 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::unique_lock &) { - auto & map = path_map.map; auto & mutex = path_map.mutex; auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); @@ -98,6 +94,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::un { { std::unique_lock lock(mutex); + auto & map = path_map.map; map.erase(path.parent_path()); } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; @@ -126,12 +123,12 @@ MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFrom std::unique_ptr MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::createWriteBuf( const std::filesystem::path & expected_path, const std::filesystem::path & new_path, bool validate_content) { - auto & map = path_map.map; auto & mutex = path_map.mutex; std::filesystem::path remote_path; { std::shared_lock lock(mutex); + auto & map = path_map.map; auto expected_it = map.find(expected_path.parent_path()); if (expected_it == map.end()) throw Exception( @@ -182,10 +179,10 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u writeString(path_to.string(), *write_buf); write_buf->finalize(); - auto & map = path_map.map; auto & mutex = path_map.mutex; { std::unique_lock lock(mutex); + auto & map = path_map.map; [[maybe_unused]] auto result = map.emplace(path_to.parent_path(), map.extract(path_from.parent_path()).mapped()); chassert(result.second); } @@ -197,9 +194,9 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::uniq { if (write_finalized) { - auto & map = path_map.map; auto & mutex = path_map.mutex; std::unique_lock lock(mutex); + auto & map = path_map.map; map.emplace(path_from.parent_path(), map.extract(path_to.parent_path()).mapped()); } @@ -219,10 +216,10 @@ MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::MetadataStorageFr void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std::unique_lock & /* metadata_lock */) { - auto & map = path_map.map; auto & mutex = path_map.mutex; { std::shared_lock lock(mutex); + auto & map = path_map.map; auto path_it = map.find(path.parent_path()); if (path_it == map.end()) return; @@ -237,6 +234,7 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std: { std::unique_lock lock(mutex); + auto & map = path_map.map; map.erase(path.parent_path()); } @@ -265,10 +263,10 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::un writeString(path.string(), *buf); buf->finalize(); - auto & map = path_map.map; auto & mutex = path_map.mutex; { std::unique_lock lock(mutex); + auto & map = path_map.map; map.emplace(path.parent_path(), std::move(key_prefix)); } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h index 778585fa758..3ac0ffef8d2 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h @@ -14,10 +14,10 @@ class MetadataStorageFromPlainObjectStorageCreateDirectoryOperation final : publ { private: std::filesystem::path path; - std::string key_prefix; InMemoryPathMap & path_map; ObjectStoragePtr object_storage; const std::string metadata_key_prefix; + const std::string key_prefix; bool write_created = false; bool write_finalized = false; @@ -26,7 +26,6 @@ public: // Assuming that paths are normalized. MetadataStorageFromPlainObjectStorageCreateDirectoryOperation( std::filesystem::path && path_, - std::string && key_prefix_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_); diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index c312eae4077..fd3b9523df6 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include @@ -132,16 +132,20 @@ void getDirectChildrenOnDiskImpl( SharedMutex & shared_mutex, std::unordered_set & result) { + /// Directories are retrieved from the in-memory path map. { std::shared_lock lock(shared_mutex); auto end_it = local_path_prefixes.end(); for (auto it = local_path_prefixes.lower_bound(local_path); it != end_it; ++it) { - const auto & [k, v] = std::make_tuple(it->first.string(), it->second); + const auto & [k, _] = std::make_tuple(it->first.string(), it->second); if (!k.starts_with(local_path)) break; auto slash_num = count(k.begin() + local_path.size(), k.end(), '/'); + /// The local_path_prefixes comparator ensures that the paths with the smallest number of + /// hops from the local_path are iterated first. The paths do not end with '/', hence + /// break the loop if the number of slashes is greater than 0. if (slash_num != 0) break; @@ -149,6 +153,7 @@ void getDirectChildrenOnDiskImpl( } } + /// Files. auto skip_list = std::set{PREFIX_PATH_FILE_NAME}; for (const auto & elem : remote_paths) { @@ -189,7 +194,8 @@ MetadataStorageFromPlainRewritableObjectStorage::MetadataStorageFromPlainRewrita } else { - auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); + /// Use flat directory structure if the metadata is stored separately from the table data. + auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); object_storage->setKeysGenerator(keys_gen); } } From d4c13714abb6b307c4344c74bd4b7973c03e68df Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Sat, 13 Jul 2024 06:51:02 +0000 Subject: [PATCH 1412/2361] address feedback: TSA_GUARDED_BY --- .../CommonPathPrefixKeyGenerator.cpp | 5 +-- .../FlatDirectoryStructureKeyGenerator.cpp | 7 ++-- src/Disks/ObjectStorages/InMemoryPathMap.h | 4 +-- ...torageFromPlainObjectStorageOperations.cpp | 34 +++++++------------ ...torageFromPlainRewritableObjectStorage.cpp | 34 +++++++++++-------- 5 files changed, 40 insertions(+), 44 deletions(-) diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp index 1fa06823bae..1d041626a7e 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -49,8 +50,8 @@ std::tuple> CommonPathPrefixKeyGenerator:: std::filesystem::path p(path); std::deque dq; - auto ptr = path_map.lock(); - std::shared_lock lock(ptr->mutex); + const auto ptr = path_map.lock(); + SharedLockGuard lock(ptr->mutex); while (p != p.root_path()) { diff --git a/src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.cpp b/src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.cpp index 64959b729b6..0f35bfd2427 100644 --- a/src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/FlatDirectoryStructureKeyGenerator.cpp @@ -1,7 +1,8 @@ #include "FlatDirectoryStructureKeyGenerator.h" #include #include "Common/ObjectStorageKey.h" -#include "Common/SharedMutex.h" +#include +#include #include #include @@ -26,8 +27,8 @@ ObjectStorageKey FlatDirectoryStructureKeyGenerator::generate(const String & pat std::optional remote_path; { - auto ptr = path_map.lock(); - std::shared_lock lock(ptr->mutex); + const auto ptr = path_map.lock(); + SharedLockGuard lock(ptr->mutex); auto it = ptr->map.find(p); if (it != ptr->map.end()) return ObjectStorageKey::createAsRelative(key_prefix.has_value() ? *key_prefix : storage_key_prefix, it->second); diff --git a/src/Disks/ObjectStorages/InMemoryPathMap.h b/src/Disks/ObjectStorages/InMemoryPathMap.h index ea08784719e..dcd28dfaf6c 100644 --- a/src/Disks/ObjectStorages/InMemoryPathMap.h +++ b/src/Disks/ObjectStorages/InMemoryPathMap.h @@ -22,8 +22,8 @@ struct InMemoryPathMap } }; using Map = std::map; - Map map; - SharedMutex mutex; + mutable SharedMutex mutex; + Map map TSA_GUARDED_BY(mutex); }; } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index 9e18f6cdb08..8a06b204cfc 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -1,4 +1,5 @@ #include "MetadataStorageFromPlainObjectStorageOperations.h" +#include "Common/SharedLockGuard.h" #include #include @@ -40,11 +41,9 @@ MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFr void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std::unique_lock &) { - auto & mutex = path_map.mutex; { - std::shared_lock lock(mutex); - auto & map = path_map.map; - if (map.contains(path.parent_path())) + SharedLockGuard lock(path_map.mutex); + if (path_map.map.contains(path.parent_path())) return; } @@ -67,7 +66,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: write_created = true; { - std::unique_lock lock(mutex); + std::lock_guard lock(path_map.mutex); auto & map = path_map.map; [[maybe_unused]] auto result = map.emplace(path.parent_path(), std::move(key_prefix)); chassert(result.second); @@ -86,16 +85,13 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::unique_lock &) { - auto & mutex = path_map.mutex; - auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); if (write_finalized) { { - std::unique_lock lock(mutex); - auto & map = path_map.map; - map.erase(path.parent_path()); + std::lock_guard lock(path_map.mutex); + path_map.map.erase(path.parent_path()); } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::sub(metric, 1); @@ -123,11 +119,9 @@ MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFrom std::unique_ptr MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::createWriteBuf( const std::filesystem::path & expected_path, const std::filesystem::path & new_path, bool validate_content) { - auto & mutex = path_map.mutex; - std::filesystem::path remote_path; { - std::shared_lock lock(mutex); + SharedLockGuard lock(path_map.mutex); auto & map = path_map.map; auto expected_it = map.find(expected_path.parent_path()); if (expected_it == map.end()) @@ -179,9 +173,8 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u writeString(path_to.string(), *write_buf); write_buf->finalize(); - auto & mutex = path_map.mutex; { - std::unique_lock lock(mutex); + std::lock_guard lock(path_map.mutex); auto & map = path_map.map; [[maybe_unused]] auto result = map.emplace(path_to.parent_path(), map.extract(path_from.parent_path()).mapped()); chassert(result.second); @@ -194,8 +187,7 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::uniq { if (write_finalized) { - auto & mutex = path_map.mutex; - std::unique_lock lock(mutex); + std::lock_guard lock(path_map.mutex); auto & map = path_map.map; map.emplace(path_from.parent_path(), map.extract(path_to.parent_path()).mapped()); } @@ -216,9 +208,8 @@ MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::MetadataStorageFr void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std::unique_lock & /* metadata_lock */) { - auto & mutex = path_map.mutex; { - std::shared_lock lock(mutex); + SharedLockGuard lock(path_map.mutex); auto & map = path_map.map; auto path_it = map.find(path.parent_path()); if (path_it == map.end()) @@ -233,7 +224,7 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std: object_storage->removeObject(metadata_object); { - std::unique_lock lock(mutex); + std::lock_guard lock(path_map.mutex); auto & map = path_map.map; map.erase(path.parent_path()); } @@ -263,9 +254,8 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::un writeString(path.string(), *buf); buf->finalize(); - auto & mutex = path_map.mutex; { - std::unique_lock lock(mutex); + std::lock_guard lock(path_map.mutex); auto & map = path_map.map; map.emplace(path.parent_path(), std::move(key_prefix)); } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index fd3b9523df6..22e73e36372 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -9,6 +9,8 @@ #include #include #include +#include "Common/SharedLockGuard.h" +#include "Common/SharedMutex.h" #include "CommonPathPrefixKeyGenerator.h" @@ -39,14 +41,13 @@ std::string getMetadataKeyPrefix(ObjectStoragePtr object_storage) : metadata_key_prefix; } -InMemoryPathMap::Map loadPathPrefixMap(const std::string & metadata_key_prefix, ObjectStoragePtr object_storage) +std::shared_ptr loadPathPrefixMap(const std::string & metadata_key_prefix, ObjectStoragePtr object_storage) { + auto result = std::make_shared(); using Map = InMemoryPathMap::Map; - Map result; ThreadPool & pool = getIOThreadPool().get(); ThreadPoolCallbackRunnerLocal runner(pool, "PlainRWMetaLoad"); - std::mutex mutex; LoggerPtr log = getLogger("MetadataStorageFromPlainObjectStorage"); @@ -66,7 +67,7 @@ InMemoryPathMap::Map loadPathPrefixMap(const std::string & metadata_key_prefix, if (remote_metadata_path.filename() != PREFIX_PATH_FILE_NAME) continue; - runner([remote_metadata_path, path, &object_storage, &result, &mutex, &log, &settings, &metadata_key_prefix] + runner([remote_metadata_path, path, &object_storage, &result, &log, &settings, &metadata_key_prefix] { setThreadName("PlainRWMetaLoad"); @@ -99,8 +100,8 @@ InMemoryPathMap::Map loadPathPrefixMap(const std::string & metadata_key_prefix, auto remote_path = std::filesystem::path(std::move(suffix)); std::pair res; { - std::lock_guard lock(mutex); - res = result.emplace(std::filesystem::path(local_path).parent_path(), remote_path.parent_path()); + std::lock_guard lock(result->mutex); + res = result->map.emplace(std::filesystem::path(local_path).parent_path(), remote_path.parent_path()); } /// This can happen if table replication is enabled, then the same local path is written @@ -117,10 +118,13 @@ InMemoryPathMap::Map loadPathPrefixMap(const std::string & metadata_key_prefix, } runner.waitForAllToFinishAndRethrowFirstError(); - LOG_DEBUG(log, "Loaded metadata for {} files, found {} directories", num_files, result.size()); + { + SharedLockGuard lock(result->mutex); + LOG_DEBUG(log, "Loaded metadata for {} files, found {} directories", num_files, result->map.size()); - auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; - CurrentMetrics::add(metric, result.size()); + auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; + CurrentMetrics::add(metric, result->map.size()); + } return result; } @@ -128,14 +132,14 @@ void getDirectChildrenOnDiskImpl( const std::string & storage_key, const RelativePathsWithMetadata & remote_paths, const std::string & local_path, - const InMemoryPathMap::Map & local_path_prefixes, - SharedMutex & shared_mutex, + const InMemoryPathMap & path_map, std::unordered_set & result) { /// Directories are retrieved from the in-memory path map. { - std::shared_lock lock(shared_mutex); - auto end_it = local_path_prefixes.end(); + SharedLockGuard lock(path_map.mutex); + const auto & local_path_prefixes = path_map.map; + const auto end_it = local_path_prefixes.end(); for (auto it = local_path_prefixes.lower_bound(local_path); it != end_it; ++it) { const auto & [k, _] = std::make_tuple(it->first.string(), it->second); @@ -179,7 +183,7 @@ MetadataStorageFromPlainRewritableObjectStorage::MetadataStorageFromPlainRewrita ObjectStoragePtr object_storage_, String storage_path_prefix_) : MetadataStorageFromPlainObjectStorage(object_storage_, storage_path_prefix_) , metadata_key_prefix(DB::getMetadataKeyPrefix(object_storage)) - , path_map(std::make_shared(loadPathPrefixMap(metadata_key_prefix, object_storage))) + , path_map(loadPathPrefixMap(metadata_key_prefix, object_storage)) { if (object_storage->isWriteOnce()) throw Exception( @@ -261,7 +265,7 @@ void MetadataStorageFromPlainRewritableObjectStorage::getDirectChildrenOnDisk( const std::string & local_path, std::unordered_set & result) const { - getDirectChildrenOnDiskImpl(storage_key, remote_paths, local_path, getPathMap()->map, getPathMap()->mutex, result); + getDirectChildrenOnDiskImpl(storage_key, remote_paths, local_path, *getPathMap(), result); } } From db13ba2c488303e90717fbcc5adf5304241ac474 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Sat, 13 Jul 2024 06:51:40 +0000 Subject: [PATCH 1413/2361] style fix --- .../CommonPathPrefixKeyGenerator.cpp | 2 +- ...torageFromPlainObjectStorageOperations.cpp | 2 +- ...torageFromPlainRewritableObjectStorage.cpp | 91 ++++++++++--------- 3 files changed, 48 insertions(+), 47 deletions(-) diff --git a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp index 1d041626a7e..521d5c037ab 100644 --- a/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp +++ b/src/Disks/ObjectStorages/CommonPathPrefixKeyGenerator.cpp @@ -1,8 +1,8 @@ #include #include -#include #include +#include #include #include diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index 8a06b204cfc..76090411bb9 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -1,6 +1,6 @@ #include "MetadataStorageFromPlainObjectStorageOperations.h" -#include "Common/SharedLockGuard.h" #include +#include "Common/SharedLockGuard.h" #include #include diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index 22e73e36372..dba63bba321 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -7,10 +7,10 @@ #include #include #include -#include -#include #include "Common/SharedLockGuard.h" #include "Common/SharedMutex.h" +#include +#include #include "CommonPathPrefixKeyGenerator.h" @@ -67,54 +67,55 @@ std::shared_ptr loadPathPrefixMap(const std::string & metadata_ if (remote_metadata_path.filename() != PREFIX_PATH_FILE_NAME) continue; - runner([remote_metadata_path, path, &object_storage, &result, &log, &settings, &metadata_key_prefix] - { - setThreadName("PlainRWMetaLoad"); - - StoredObject object{path}; - String local_path; - - try + runner( + [remote_metadata_path, path, &object_storage, &result, &log, &settings, &metadata_key_prefix] { - auto read_buf = object_storage->readObject(object, settings); - readStringUntilEOF(local_path, *read_buf); - } + setThreadName("PlainRWMetaLoad"); + + StoredObject object{path}; + String local_path; + + try + { + auto read_buf = object_storage->readObject(object, settings); + readStringUntilEOF(local_path, *read_buf); + } #if USE_AWS_S3 - catch (const S3Exception & e) - { - /// It is ok if a directory was removed just now. - /// We support attaching a filesystem that is concurrently modified by someone else. - if (e.getS3ErrorCode() == Aws::S3::S3Errors::NO_SUCH_KEY) - return; - throw; - } + catch (const S3Exception & e) + { + /// It is ok if a directory was removed just now. + /// We support attaching a filesystem that is concurrently modified by someone else. + if (e.getS3ErrorCode() == Aws::S3::S3Errors::NO_SUCH_KEY) + return; + throw; + } #endif - catch (...) - { - throw; - } + catch (...) + { + throw; + } - chassert(remote_metadata_path.has_parent_path()); - chassert(remote_metadata_path.string().starts_with(metadata_key_prefix)); - auto suffix = remote_metadata_path.string().substr(metadata_key_prefix.size()); - auto remote_path = std::filesystem::path(std::move(suffix)); - std::pair res; - { - std::lock_guard lock(result->mutex); - res = result->map.emplace(std::filesystem::path(local_path).parent_path(), remote_path.parent_path()); - } + chassert(remote_metadata_path.has_parent_path()); + chassert(remote_metadata_path.string().starts_with(metadata_key_prefix)); + auto suffix = remote_metadata_path.string().substr(metadata_key_prefix.size()); + auto remote_path = std::filesystem::path(std::move(suffix)); + std::pair res; + { + std::lock_guard lock(result->mutex); + res = result->map.emplace(std::filesystem::path(local_path).parent_path(), remote_path.parent_path()); + } - /// This can happen if table replication is enabled, then the same local path is written - /// in `prefix.path` of each replica. - /// TODO: should replicated tables (e.g., RMT) be explicitly disallowed? - if (!res.second) - LOG_WARNING( - log, - "The local path '{}' is already mapped to a remote path '{}', ignoring: '{}'", - local_path, - res.first->second, - remote_path.parent_path().string()); - }); + /// This can happen if table replication is enabled, then the same local path is written + /// in `prefix.path` of each replica. + /// TODO: should replicated tables (e.g., RMT) be explicitly disallowed? + if (!res.second) + LOG_WARNING( + log, + "The local path '{}' is already mapped to a remote path '{}', ignoring: '{}'", + local_path, + res.first->second, + remote_path.parent_path().string()); + }); } runner.waitForAllToFinishAndRethrowFirstError(); From 912bddf86f53f207b76ba453e43b6724b24ef6df Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Sun, 14 Jul 2024 20:49:37 -0700 Subject: [PATCH 1414/2361] Update src/Disks/ObjectStorages/InMemoryPathMap.h Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- src/Disks/ObjectStorages/InMemoryPathMap.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Disks/ObjectStorages/InMemoryPathMap.h b/src/Disks/ObjectStorages/InMemoryPathMap.h index dcd28dfaf6c..2ac291dfaf0 100644 --- a/src/Disks/ObjectStorages/InMemoryPathMap.h +++ b/src/Disks/ObjectStorages/InMemoryPathMap.h @@ -21,6 +21,7 @@ struct InMemoryPathMap return path1 < path2; } }; + /// Local -> Remote path. using Map = std::map; mutable SharedMutex mutex; Map map TSA_GUARDED_BY(mutex); From 727f5ed108e3b92c81d4ed295e0de438de8bae2b Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Sun, 14 Jul 2024 20:49:57 -0700 Subject: [PATCH 1415/2361] Update src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- .../MetadataStorageFromPlainRewritableObjectStorage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index dba63bba321..3380dec60ca 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -29,7 +29,7 @@ constexpr auto PREFIX_PATH_FILE_NAME = "prefix.path"; constexpr auto METADATA_PATH_TOKEN = "__meta/"; /// Use a separate layout for metadata iff: -/// 1. The disk endpoint does not contain objects, OR +/// 1. The disk endpoint does not contain any objects yet (empty), OR /// 2. The metadata is already stored behind a separate endpoint. /// Otherwise, store metadata along with regular data for backward compatibility. std::string getMetadataKeyPrefix(ObjectStoragePtr object_storage) From 4c78531c9c0681a84309e02ecfde17a36f1c1ad5 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Sun, 14 Jul 2024 20:50:20 -0700 Subject: [PATCH 1416/2361] Update src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- .../MetadataStorageFromPlainRewritableObjectStorage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index 3380dec60ca..40aed32c047 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -28,7 +28,7 @@ namespace constexpr auto PREFIX_PATH_FILE_NAME = "prefix.path"; constexpr auto METADATA_PATH_TOKEN = "__meta/"; -/// Use a separate layout for metadata iff: +/// Use a separate layout for metadata if: /// 1. The disk endpoint does not contain any objects yet (empty), OR /// 2. The metadata is already stored behind a separate endpoint. /// Otherwise, store metadata along with regular data for backward compatibility. From 3b986ef3400021cf18797a7872b926dcc191b547 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Mon, 15 Jul 2024 05:50:42 +0000 Subject: [PATCH 1417/2361] address feedback: useSeparateLayoutForMetadata --- ...StorageFromPlainRewritableObjectStorage.cpp | 18 +++++++++++------- ...taStorageFromPlainRewritableObjectStorage.h | 4 +++- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp index 40aed32c047..39b11d9a3e3 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.cpp @@ -192,15 +192,15 @@ MetadataStorageFromPlainRewritableObjectStorage::MetadataStorageFromPlainRewrita "MetadataStorageFromPlainRewritableObjectStorage is not compatible with write-once storage '{}'", object_storage->getName()); - if (getMetadataKeyPrefix() == object_storage->getCommonKeyPrefix()) + if (useSeparateLayoutForMetadata()) { - auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); + /// Use flat directory structure if the metadata is stored separately from the table data. + auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); object_storage->setKeysGenerator(keys_gen); } else { - /// Use flat directory structure if the metadata is stored separately from the table data. - auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); + auto keys_gen = std::make_shared(object_storage->getCommonKeyPrefix(), path_map); object_storage->setKeysGenerator(keys_gen); } } @@ -216,7 +216,7 @@ bool MetadataStorageFromPlainRewritableObjectStorage::exists(const std::string & if (MetadataStorageFromPlainObjectStorage::exists(path)) return true; - if (getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix()) + if (useSeparateLayoutForMetadata()) { auto key_prefix = object_storage->generateObjectKeyForPath(path, getMetadataKeyPrefix()).serialize(); return object_storage->existsOrHasAnyChild(key_prefix); @@ -227,7 +227,7 @@ bool MetadataStorageFromPlainRewritableObjectStorage::exists(const std::string & bool MetadataStorageFromPlainRewritableObjectStorage::isDirectory(const std::string & path) const { - if (getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix()) + if (useSeparateLayoutForMetadata()) { auto directory = std::filesystem::path(object_storage->generateObjectKeyForPath(path, getMetadataKeyPrefix()).serialize()) / ""; return object_storage->existsOrHasAnyChild(directory); @@ -249,7 +249,7 @@ std::vector MetadataStorageFromPlainRewritableObjectStorage::listDi getDirectChildrenOnDisk(abs_key, files, std::filesystem::path(path) / "", directories); /// List empty directories that are identified by the `prefix.path` metadata files. This is required to, e.g., remove /// metadata along with regular files. - if (object_storage->getCommonKeyPrefix() != getMetadataKeyPrefix()) + if (useSeparateLayoutForMetadata()) { auto metadata_key = std::filesystem::path(getMetadataKeyPrefix()) / key_prefix / ""; RelativePathsWithMetadata metadata_files; @@ -269,4 +269,8 @@ void MetadataStorageFromPlainRewritableObjectStorage::getDirectChildrenOnDisk( getDirectChildrenOnDiskImpl(storage_key, remote_paths, local_path, *getPathMap(), result); } +bool MetadataStorageFromPlainRewritableObjectStorage::useSeparateLayoutForMetadata() const +{ + return getMetadataKeyPrefix() != object_storage->getCommonKeyPrefix(); +} } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h index 8fd147e15b9..82d93e3e7ae 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainRewritableObjectStorage.h @@ -24,7 +24,6 @@ public: bool isDirectory(const std::string & path) const override; std::vector listDirectory(const std::string & path) const override; - protected: std::string getMetadataKeyPrefix() const override { return metadata_key_prefix; } std::shared_ptr getPathMap() const override { return path_map; } @@ -33,6 +32,9 @@ protected: const RelativePathsWithMetadata & remote_paths, const std::string & local_path, std::unordered_set & result) const; + +private: + bool useSeparateLayoutForMetadata() const; }; } From 359b42738a25aa02436c1bebc49d0b751e456ccb Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Mon, 15 Jul 2024 06:11:51 +0000 Subject: [PATCH 1418/2361] address feedback: key_prefix -> object_key_prefix --- ...tadataStorageFromPlainObjectStorageOperations.cpp | 12 ++++++------ ...MetadataStorageFromPlainObjectStorageOperations.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index 76090411bb9..31fb8c7ef97 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -22,9 +22,9 @@ namespace constexpr auto PREFIX_PATH_FILE_NAME = "prefix.path"; -ObjectStorageKey createMetadataObjectKey(const std::string & key_prefix, const std::string & metadata_key_prefix) +ObjectStorageKey createMetadataObjectKey(const std::string & object_key_prefix, const std::string & metadata_key_prefix) { - auto prefix = std::filesystem::path(metadata_key_prefix) / key_prefix; + auto prefix = std::filesystem::path(metadata_key_prefix) / object_key_prefix; return ObjectStorageKey::createAsRelative(prefix.string(), PREFIX_PATH_FILE_NAME); } } @@ -35,7 +35,7 @@ MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFr , path_map(path_map_) , object_storage(object_storage_) , metadata_key_prefix(metadata_key_prefix_) - , key_prefix(object_storage->generateObjectKeyPrefixForDirectoryPath(path, "" /* key_prefix */).serialize()) + , object_key_prefix(object_storage->generateObjectKeyPrefixForDirectoryPath(path, "" /* object_key_prefix */).serialize()) { } @@ -47,7 +47,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: return; } - auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); + auto metadata_object_key = createMetadataObjectKey(object_key_prefix, metadata_key_prefix); LOG_TRACE( getLogger("MetadataStorageFromPlainObjectStorageCreateDirectoryOperation"), @@ -68,7 +68,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: { std::lock_guard lock(path_map.mutex); auto & map = path_map.map; - [[maybe_unused]] auto result = map.emplace(path.parent_path(), std::move(key_prefix)); + [[maybe_unused]] auto result = map.emplace(path.parent_path(), std::move(object_key_prefix)); chassert(result.second); } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; @@ -85,7 +85,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::unique_lock &) { - auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); + auto metadata_object_key = createMetadataObjectKey(object_key_prefix, metadata_key_prefix); if (write_finalized) { diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h index 3ac0ffef8d2..02305767faf 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h @@ -17,7 +17,7 @@ private: InMemoryPathMap & path_map; ObjectStoragePtr object_storage; const std::string metadata_key_prefix; - const std::string key_prefix; + const std::string object_key_prefix; bool write_created = false; bool write_finalized = false; From 79a8cbe0c595e877a750c26cc27a8c68202279c7 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Mon, 15 Jul 2024 06:16:00 +0000 Subject: [PATCH 1419/2361] address feedback: documentation --- .../MetadataStorageFromPlainObjectStorageOperations.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index 31fb8c7ef97..be5168c5385 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -55,7 +55,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: path, metadata_object_key.serialize()); - auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME); + auto metadata_object = StoredObject(/*remote_path*/ metadata_object_key.serialize(), /*local_path*/ path / PREFIX_PATH_FILE_NAME); auto buf = object_storage->writeObject( metadata_object, WriteMode::Rewrite, @@ -137,7 +137,8 @@ std::unique_ptr MetadataStorageFromPlainObjectStorageMo auto metadata_object_key = createMetadataObjectKey(remote_path, metadata_key_prefix); - auto metadata_object = StoredObject(metadata_object_key.serialize(), expected_path / PREFIX_PATH_FILE_NAME); + auto metadata_object + = StoredObject(/*remote_path*/ metadata_object_key.serialize(), /*local_path*/ expected_path / PREFIX_PATH_FILE_NAME); if (validate_content) { @@ -220,7 +221,7 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std: LOG_TRACE(getLogger("MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation"), "Removing directory '{}'", path); auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); - auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME); + auto metadata_object = StoredObject(/*remote_path*/ metadata_object_key.serialize(), /*local_path*/ path / PREFIX_PATH_FILE_NAME); object_storage->removeObject(metadata_object); { From 42bd49dae6244590ba406ad502083bf610276eb9 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Mon, 15 Jul 2024 07:05:51 +0000 Subject: [PATCH 1420/2361] address feedback: parent_path() for directories --- ...torageFromPlainObjectStorageOperations.cpp | 30 ++++++++++++------- ...aStorageFromPlainObjectStorageOperations.h | 4 ++- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index be5168c5385..c0e3f8e1fc9 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -31,7 +31,7 @@ ObjectStorageKey createMetadataObjectKey(const std::string & object_key_prefix, MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFromPlainObjectStorageCreateDirectoryOperation( std::filesystem::path && path_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) - : path(std::move(path_)) + : path((chassert(path_.string().ends_with('/')), std::move(path_))) , path_map(path_map_) , object_storage(object_storage_) , metadata_key_prefix(metadata_key_prefix_) @@ -41,9 +41,11 @@ MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFr void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std::unique_lock &) { + /// parent_path() removes the trailing '/' + const auto base_path = path.parent_path(); { SharedLockGuard lock(path_map.mutex); - if (path_map.map.contains(path.parent_path())) + if (path_map.map.contains(base_path)) return; } @@ -68,7 +70,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: { std::lock_guard lock(path_map.mutex); auto & map = path_map.map; - [[maybe_unused]] auto result = map.emplace(path.parent_path(), std::move(object_key_prefix)); + [[maybe_unused]] auto result = map.emplace(base_path, std::move(object_key_prefix)); chassert(result.second); } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; @@ -89,9 +91,10 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::un if (write_finalized) { + const auto base_path = path.parent_path(); { std::lock_guard lock(path_map.mutex); - path_map.map.erase(path.parent_path()); + path_map.map.erase(base_path); } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::sub(metric, 1); @@ -108,8 +111,8 @@ MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFrom InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) - : path_from(std::move(path_from_)) - , path_to(std::move(path_to_)) + : path_from((chassert(path_from_.string().ends_with('/')), std::move(path_from_))) + , path_to((chassert(path_to_.string().ends_with('/')), std::move(path_to_))) , path_map(path_map_) , object_storage(object_storage_) , metadata_key_prefix(metadata_key_prefix_) @@ -123,6 +126,7 @@ std::unique_ptr MetadataStorageFromPlainObjectStorageMo { SharedLockGuard lock(path_map.mutex); auto & map = path_map.map; + /// parent_path() removes the trailing '/'. auto expected_it = map.find(expected_path.parent_path()); if (expected_it == map.end()) throw Exception( @@ -174,10 +178,14 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u writeString(path_to.string(), *write_buf); write_buf->finalize(); + /// parent_path() removes the trailing '/'. + auto base_path_to = path_to.parent_path(); + auto base_path_from = path_from.parent_path(); + { std::lock_guard lock(path_map.mutex); auto & map = path_map.map; - [[maybe_unused]] auto result = map.emplace(path_to.parent_path(), map.extract(path_from.parent_path()).mapped()); + [[maybe_unused]] auto result = map.emplace(base_path_to, map.extract(base_path_from).mapped()); chassert(result.second); } @@ -203,16 +211,18 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::uniq MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation( std::filesystem::path && path_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) - : path(std::move(path_)), path_map(path_map_), object_storage(object_storage_), metadata_key_prefix(metadata_key_prefix_) + : path((chassert(path_.string().ends_with('/')), std::move(path_))), path_map(path_map_), object_storage(object_storage_), metadata_key_prefix(metadata_key_prefix_) { } void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std::unique_lock & /* metadata_lock */) { + /// parent_path() removes the trailing '/' + const auto base_path = path.parent_path(); { SharedLockGuard lock(path_map.mutex); auto & map = path_map.map; - auto path_it = map.find(path.parent_path()); + auto path_it = map.find(base_path); if (path_it == map.end()) return; key_prefix = path_it->second; @@ -227,7 +237,7 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std: { std::lock_guard lock(path_map.mutex); auto & map = path_map.map; - map.erase(path.parent_path()); + map.erase(base_path); } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h index 02305767faf..93ebe668d56 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.h @@ -23,8 +23,8 @@ private: bool write_finalized = false; public: - // Assuming that paths are normalized. MetadataStorageFromPlainObjectStorageCreateDirectoryOperation( + /// path_ must end with a trailing '/'. std::filesystem::path && path_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, @@ -51,6 +51,7 @@ private: public: MetadataStorageFromPlainObjectStorageMoveDirectoryOperation( + /// Both path_from_ and path_to_ must end with a trailing '/'. std::filesystem::path && path_from_, std::filesystem::path && path_to_, InMemoryPathMap & path_map_, @@ -76,6 +77,7 @@ private: public: MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation( + /// path_ must end with a trailing '/'. std::filesystem::path && path_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, From 41fc84bb2df441d117681c589dbea5c516ed4748 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Mon, 15 Jul 2024 21:16:27 +0000 Subject: [PATCH 1421/2361] fix build --- src/Disks/ObjectStorages/InMemoryPathMap.h | 1 + ...taStorageFromPlainObjectStorageOperations.cpp | 16 ++++++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/Disks/ObjectStorages/InMemoryPathMap.h b/src/Disks/ObjectStorages/InMemoryPathMap.h index 2ac291dfaf0..e319c187ca7 100644 --- a/src/Disks/ObjectStorages/InMemoryPathMap.h +++ b/src/Disks/ObjectStorages/InMemoryPathMap.h @@ -2,6 +2,7 @@ #include #include +#include #include namespace DB diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index c0e3f8e1fc9..bfd203ef2e0 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -1,10 +1,10 @@ #include "MetadataStorageFromPlainObjectStorageOperations.h" #include -#include "Common/SharedLockGuard.h" #include #include #include +#include #include namespace DB @@ -31,12 +31,13 @@ ObjectStorageKey createMetadataObjectKey(const std::string & object_key_prefix, MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::MetadataStorageFromPlainObjectStorageCreateDirectoryOperation( std::filesystem::path && path_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) - : path((chassert(path_.string().ends_with('/')), std::move(path_))) + : path(std::move(path_)) , path_map(path_map_) , object_storage(object_storage_) , metadata_key_prefix(metadata_key_prefix_) , object_key_prefix(object_storage->generateObjectKeyPrefixForDirectoryPath(path, "" /* object_key_prefix */).serialize()) { + chassert(path.string().ends_with('/')); } void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std::unique_lock &) @@ -70,7 +71,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std: { std::lock_guard lock(path_map.mutex); auto & map = path_map.map; - [[maybe_unused]] auto result = map.emplace(base_path, std::move(object_key_prefix)); + [[maybe_unused]] auto result = map.emplace(base_path, object_key_prefix); chassert(result.second); } auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; @@ -111,12 +112,14 @@ MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFrom InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) - : path_from((chassert(path_from_.string().ends_with('/')), std::move(path_from_))) - , path_to((chassert(path_to_.string().ends_with('/')), std::move(path_to_))) + : path_from(std::move(path_from_)) + , path_to(std::move(path_to_)) , path_map(path_map_) , object_storage(object_storage_) , metadata_key_prefix(metadata_key_prefix_) { + chassert(path_from.string().ends_with('/')); + chassert(path_to.string().ends_with('/')); } std::unique_ptr MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::createWriteBuf( @@ -211,8 +214,9 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::uniq MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation( std::filesystem::path && path_, InMemoryPathMap & path_map_, ObjectStoragePtr object_storage_, const std::string & metadata_key_prefix_) - : path((chassert(path_.string().ends_with('/')), std::move(path_))), path_map(path_map_), object_storage(object_storage_), metadata_key_prefix(metadata_key_prefix_) + : path(std::move(path_)), path_map(path_map_), object_storage(object_storage_), metadata_key_prefix(metadata_key_prefix_) { + chassert(path.string().ends_with('/')); } void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std::unique_lock & /* metadata_lock */) From 9b4e02e8dabb649076389ee96a271da025913ddf Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Tue, 16 Jul 2024 22:48:37 +0000 Subject: [PATCH 1422/2361] fix macOs build --- src/Disks/ObjectStorages/InMemoryPathMap.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/InMemoryPathMap.h b/src/Disks/ObjectStorages/InMemoryPathMap.h index e319c187ca7..a9859d5e2b8 100644 --- a/src/Disks/ObjectStorages/InMemoryPathMap.h +++ b/src/Disks/ObjectStorages/InMemoryPathMap.h @@ -25,7 +25,13 @@ struct InMemoryPathMap /// Local -> Remote path. using Map = std::map; mutable SharedMutex mutex; - Map map TSA_GUARDED_BY(mutex); + +#ifdef OS_LINUX + Map TSA_GUARDED_BY(mutex) map; +/// std::shared_mutex may not be annotated with the 'capability' attribute in libcxx. +#else + Map map; +#endif }; } From 0ec292a65f190f69c24420d5ca85d5658bffba0a Mon Sep 17 00:00:00 2001 From: pufit Date: Fri, 2 Aug 2024 00:32:01 -0400 Subject: [PATCH 1423/2361] Revert "Revert "FuzzQuery table function"" This reverts commit ff44b206 --- .../table-functions/fuzzQuery.md | 36 ++++ programs/client/Client.h | 5 +- src/{Client => Common}/QueryFuzzer.cpp | 50 ++++-- src/{Client => Common}/QueryFuzzer.h | 35 ++-- src/Storages/StorageFuzzQuery.cpp | 169 ++++++++++++++++++ src/Storages/StorageFuzzQuery.h | 88 +++++++++ src/Storages/registerStorages.cpp | 2 + src/TableFunctions/TableFunctionFuzzQuery.cpp | 54 ++++++ src/TableFunctions/TableFunctionFuzzQuery.h | 42 +++++ src/TableFunctions/registerTableFunctions.cpp | 1 + src/TableFunctions/registerTableFunctions.h | 1 + .../03031_table_function_fuzzquery.reference | 2 + .../03031_table_function_fuzzquery.sql | 18 ++ 13 files changed, 473 insertions(+), 30 deletions(-) create mode 100644 docs/en/sql-reference/table-functions/fuzzQuery.md rename src/{Client => Common}/QueryFuzzer.cpp (97%) rename src/{Client => Common}/QueryFuzzer.h (91%) create mode 100644 src/Storages/StorageFuzzQuery.cpp create mode 100644 src/Storages/StorageFuzzQuery.h create mode 100644 src/TableFunctions/TableFunctionFuzzQuery.cpp create mode 100644 src/TableFunctions/TableFunctionFuzzQuery.h create mode 100644 tests/queries/0_stateless/03031_table_function_fuzzquery.reference create mode 100644 tests/queries/0_stateless/03031_table_function_fuzzquery.sql diff --git a/docs/en/sql-reference/table-functions/fuzzQuery.md b/docs/en/sql-reference/table-functions/fuzzQuery.md new file mode 100644 index 00000000000..e15f8a40156 --- /dev/null +++ b/docs/en/sql-reference/table-functions/fuzzQuery.md @@ -0,0 +1,36 @@ +--- +slug: /en/sql-reference/table-functions/fuzzQuery +sidebar_position: 75 +sidebar_label: fuzzQuery +--- + +# fuzzQuery + +Perturbs the given query string with random variations. + +``` sql +fuzzQuery(query[, max_query_length[, random_seed]]) +``` + +**Arguments** + +- `query` (String) - The source query to perform the fuzzing on. +- `max_query_length` (UInt64) - A maximum length the query can get during the fuzzing process. +- `random_seed` (UInt64) - A random seed for producing stable results. + +**Returned Value** + +A table object with a single column containing perturbed query strings. + +## Usage Example + +``` sql +SELECT * FROM fuzzQuery('SELECT materialize(\'a\' AS key) GROUP BY key') LIMIT 2; +``` + +``` + ┌─query──────────────────────────────────────────────────────────┠+1. │ SELECT 'a' AS key GROUP BY key │ +2. │ EXPLAIN PIPELINE compact = true SELECT 'a' AS key GROUP BY key │ + └────────────────────────────────────────────────────────────────┘ +``` diff --git a/programs/client/Client.h b/programs/client/Client.h index 7fdf77031ab..07a8e293b1a 100644 --- a/programs/client/Client.h +++ b/programs/client/Client.h @@ -11,7 +11,10 @@ class Client : public ClientApplicationBase public: using Arguments = ClientApplicationBase::Arguments; - Client() = default; + Client() + { + fuzzer = QueryFuzzer(randomSeed(), &std::cout, &std::cerr); + } void initialize(Poco::Util::Application & self) override; diff --git a/src/Client/QueryFuzzer.cpp b/src/Common/QueryFuzzer.cpp similarity index 97% rename from src/Client/QueryFuzzer.cpp rename to src/Common/QueryFuzzer.cpp index f5b700ea529..161c38f20e0 100644 --- a/src/Client/QueryFuzzer.cpp +++ b/src/Common/QueryFuzzer.cpp @@ -68,22 +68,21 @@ Field QueryFuzzer::getRandomField(int type) { case 0: { - return bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) - / sizeof(*bad_int64_values))]; + return bad_int64_values[fuzz_rand() % std::size(bad_int64_values)]; } case 1: { static constexpr double values[] = {NAN, INFINITY, -INFINITY, 0., -0., 0.0001, 0.5, 0.9999, 1., 1.0001, 2., 10.0001, 100.0001, 1000.0001, 1e10, 1e20, - FLT_MIN, FLT_MIN + FLT_EPSILON, FLT_MAX, FLT_MAX + FLT_EPSILON}; return values[fuzz_rand() % (sizeof(values) / sizeof(*values))]; + FLT_MIN, FLT_MIN + FLT_EPSILON, FLT_MAX, FLT_MAX + FLT_EPSILON}; return values[fuzz_rand() % std::size(values)]; } case 2: { static constexpr UInt64 scales[] = {0, 1, 2, 10}; return DecimalField( - bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) / sizeof(*bad_int64_values))], - static_cast(scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))]) + bad_int64_values[fuzz_rand() % std::size(bad_int64_values)], + static_cast(scales[fuzz_rand() % std::size(scales)]) ); } default: @@ -165,7 +164,8 @@ Field QueryFuzzer::fuzzField(Field field) { size_t pos = fuzz_rand() % arr.size(); arr.erase(arr.begin() + pos); - std::cerr << "erased\n"; + if (debug_stream) + *debug_stream << "erased\n"; } if (fuzz_rand() % 5 == 0) @@ -174,12 +174,14 @@ Field QueryFuzzer::fuzzField(Field field) { size_t pos = fuzz_rand() % arr.size(); arr.insert(arr.begin() + pos, fuzzField(arr[pos])); - std::cerr << fmt::format("inserted (pos {})\n", pos); + if (debug_stream) + *debug_stream << fmt::format("inserted (pos {})\n", pos); } else { arr.insert(arr.begin(), getRandomField(0)); - std::cerr << "inserted (0)\n"; + if (debug_stream) + *debug_stream << "inserted (0)\n"; } } @@ -197,7 +199,9 @@ Field QueryFuzzer::fuzzField(Field field) { size_t pos = fuzz_rand() % arr.size(); arr.erase(arr.begin() + pos); - std::cerr << "erased\n"; + + if (debug_stream) + *debug_stream << "erased\n"; } if (fuzz_rand() % 5 == 0) @@ -206,12 +210,16 @@ Field QueryFuzzer::fuzzField(Field field) { size_t pos = fuzz_rand() % arr.size(); arr.insert(arr.begin() + pos, fuzzField(arr[pos])); - std::cerr << fmt::format("inserted (pos {})\n", pos); + + if (debug_stream) + *debug_stream << fmt::format("inserted (pos {})\n", pos); } else { arr.insert(arr.begin(), getRandomField(0)); - std::cerr << "inserted (0)\n"; + + if (debug_stream) + *debug_stream << "inserted (0)\n"; } } @@ -344,7 +352,8 @@ void QueryFuzzer::fuzzOrderByList(IAST * ast) } else { - std::cerr << "No random column.\n"; + if (debug_stream) + *debug_stream << "No random column.\n"; } } @@ -378,7 +387,8 @@ void QueryFuzzer::fuzzColumnLikeExpressionList(IAST * ast) if (col) impl->children.insert(pos, col); else - std::cerr << "No random column.\n"; + if (debug_stream) + *debug_stream << "No random column.\n"; } // We don't have to recurse here to fuzz the children, this is handled by @@ -1361,11 +1371,15 @@ void QueryFuzzer::fuzzMain(ASTPtr & ast) collectFuzzInfoMain(ast); fuzz(ast); - std::cout << std::endl; - WriteBufferFromOStream ast_buf(std::cout, 4096); - formatAST(*ast, ast_buf, false /*highlight*/); - ast_buf.finalize(); - std::cout << std::endl << std::endl; + if (out_stream) + { + *out_stream << std::endl; + + WriteBufferFromOStream ast_buf(*out_stream, 4096); + formatAST(*ast, ast_buf, false /*highlight*/); + ast_buf.finalize(); + *out_stream << std::endl << std::endl; + } } } diff --git a/src/Client/QueryFuzzer.h b/src/Common/QueryFuzzer.h similarity index 91% rename from src/Client/QueryFuzzer.h rename to src/Common/QueryFuzzer.h index 6165e589cae..35d088809f2 100644 --- a/src/Client/QueryFuzzer.h +++ b/src/Common/QueryFuzzer.h @@ -35,9 +35,31 @@ struct ASTWindowDefinition; * queries, so you want to feed it a lot of queries to get some interesting mix * of them. Normally we feed SQL regression tests to it. */ -struct QueryFuzzer +class QueryFuzzer { - pcg64 fuzz_rand{randomSeed()}; +public: + explicit QueryFuzzer(pcg64 fuzz_rand_ = randomSeed(), std::ostream * out_stream_ = nullptr, std::ostream * debug_stream_ = nullptr) + : fuzz_rand(fuzz_rand_) + , out_stream(out_stream_) + , debug_stream(debug_stream_) + { + } + + // This is the only function you have to call -- it will modify the passed + // ASTPtr to point to new AST with some random changes. + void fuzzMain(ASTPtr & ast); + + ASTs getInsertQueriesForFuzzedTables(const String & full_query); + ASTs getDropQueriesForFuzzedTables(const ASTDropQuery & drop_query); + void notifyQueryFailed(ASTPtr ast); + + static bool isSuitableForFuzzing(const ASTCreateQuery & create); + +private: + pcg64 fuzz_rand; + + std::ostream * out_stream = nullptr; + std::ostream * debug_stream = nullptr; // We add elements to expression lists with fixed probability. Some elements // are so large, that the expected number of elements we add to them is @@ -66,10 +88,6 @@ struct QueryFuzzer std::unordered_map index_of_fuzzed_table; std::set created_tables_hashes; - // This is the only function you have to call -- it will modify the passed - // ASTPtr to point to new AST with some random changes. - void fuzzMain(ASTPtr & ast); - // Various helper functions follow, normally you shouldn't have to call them. Field getRandomField(int type); Field fuzzField(Field field); @@ -77,9 +95,6 @@ struct QueryFuzzer ASTPtr getRandomExpressionList(); DataTypePtr fuzzDataType(DataTypePtr type); DataTypePtr getRandomType(); - ASTs getInsertQueriesForFuzzedTables(const String & full_query); - ASTs getDropQueriesForFuzzedTables(const ASTDropQuery & drop_query); - void notifyQueryFailed(ASTPtr ast); void replaceWithColumnLike(ASTPtr & ast); void replaceWithTableLike(ASTPtr & ast); void fuzzOrderByElement(ASTOrderByElement * elem); @@ -102,8 +117,6 @@ struct QueryFuzzer void addTableLike(ASTPtr ast); void addColumnLike(ASTPtr ast); void collectFuzzInfoRecurse(ASTPtr ast); - - static bool isSuitableForFuzzing(const ASTCreateQuery & create); }; } diff --git a/src/Storages/StorageFuzzQuery.cpp b/src/Storages/StorageFuzzQuery.cpp new file mode 100644 index 00000000000..6e8f425f8dc --- /dev/null +++ b/src/Storages/StorageFuzzQuery.cpp @@ -0,0 +1,169 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + +ColumnPtr FuzzQuerySource::createColumn() +{ + auto column = ColumnString::create(); + ColumnString::Chars & data_to = column->getChars(); + ColumnString::Offsets & offsets_to = column->getOffsets(); + + offsets_to.resize(block_size); + IColumn::Offset offset = 0; + + auto fuzz_base = query; + size_t row_num = 0; + + while (row_num < block_size) + { + ASTPtr new_query = fuzz_base->clone(); + + auto base_before_fuzz = fuzz_base->formatForErrorMessage(); + fuzzer.fuzzMain(new_query); + auto fuzzed_text = new_query->formatForErrorMessage(); + + if (base_before_fuzz == fuzzed_text) + continue; + + /// AST is too long, will start from the original query. + if (config.max_query_length > 500) + { + fuzz_base = query; + continue; + } + + IColumn::Offset next_offset = offset + fuzzed_text.size() + 1; + data_to.resize(next_offset); + + std::copy(fuzzed_text.begin(), fuzzed_text.end(), &data_to[offset]); + + data_to[offset + fuzzed_text.size()] = 0; + offsets_to[row_num] = next_offset; + + offset = next_offset; + fuzz_base = new_query; + ++row_num; + } + + return column; +} + +StorageFuzzQuery::StorageFuzzQuery( + const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment_, const Configuration & config_) + : IStorage(table_id_), config(config_) +{ + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(columns_); + storage_metadata.setComment(comment_); + setInMemoryMetadata(storage_metadata); +} + +Pipe StorageFuzzQuery::read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & /*query_info*/, + ContextPtr /*context*/, + QueryProcessingStage::Enum /*processed_stage*/, + size_t max_block_size, + size_t num_streams) +{ + storage_snapshot->check(column_names); + + Pipes pipes; + pipes.reserve(num_streams); + + const ColumnsDescription & our_columns = storage_snapshot->metadata->getColumns(); + Block block_header; + for (const auto & name : column_names) + { + const auto & name_type = our_columns.get(name); + MutableColumnPtr column = name_type.type->createColumn(); + block_header.insert({std::move(column), name_type.type, name_type.name}); + } + + const char * begin = config.query.data(); + const char * end = begin + config.query.size(); + + ParserQuery parser(end, false); + auto query = parseQuery(parser, begin, end, "", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH, DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); + + for (UInt64 i = 0; i < num_streams; ++i) + pipes.emplace_back(std::make_shared(max_block_size, block_header, config, query)); + + return Pipe::unitePipes(std::move(pipes)); +} + +StorageFuzzQuery::Configuration StorageFuzzQuery::getConfiguration(ASTs & engine_args, ContextPtr local_context) +{ + StorageFuzzQuery::Configuration configuration{}; + + // Supported signatures: + // + // FuzzQuery(query) + // FuzzQuery(query, max_query_length) + // FuzzQuery(query, max_query_length, random_seed) + if (engine_args.empty() || engine_args.size() > 3) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "FuzzQuery requires 1 to 3 arguments: query, max_query_length, random_seed"); + + for (auto & engine_arg : engine_args) + engine_arg = evaluateConstantExpressionOrIdentifierAsLiteral(engine_arg, local_context); + + auto first_arg = checkAndGetLiteralArgument(engine_args[0], "query"); + configuration.query = std::move(first_arg); + + if (engine_args.size() >= 2) + { + const auto & literal = engine_args[1]->as(); + if (!literal.value.isNull()) + configuration.max_query_length = checkAndGetLiteralArgument(literal, "max_query_length"); + } + + if (engine_args.size() == 3) + { + const auto & literal = engine_args[2]->as(); + if (!literal.value.isNull()) + configuration.random_seed = checkAndGetLiteralArgument(literal, "random_seed"); + } + + return configuration; +} + +void registerStorageFuzzQuery(StorageFactory & factory) +{ + factory.registerStorage( + "FuzzQuery", + [](const StorageFactory::Arguments & args) -> std::shared_ptr + { + ASTs & engine_args = args.engine_args; + + if (engine_args.empty()) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Storage FuzzQuery must have arguments."); + + StorageFuzzQuery::Configuration configuration = StorageFuzzQuery::getConfiguration(engine_args, args.getLocalContext()); + + for (const auto& col : args.columns) + if (col.type->getTypeId() != TypeIndex::String) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "'StorageFuzzQuery' supports only columns of String type, got {}.", col.type->getName()); + + return std::make_shared(args.table_id, args.columns, args.comment, configuration); + }); +} + +} diff --git a/src/Storages/StorageFuzzQuery.h b/src/Storages/StorageFuzzQuery.h new file mode 100644 index 00000000000..125ef960e74 --- /dev/null +++ b/src/Storages/StorageFuzzQuery.h @@ -0,0 +1,88 @@ +#pragma once + +#include +#include +#include +#include + +#include "config.h" + +namespace DB +{ + +class NamedCollection; + +class StorageFuzzQuery final : public IStorage +{ +public: + struct Configuration : public StatelessTableEngineConfiguration + { + String query; + UInt64 max_query_length = 500; + UInt64 random_seed = randomSeed(); + }; + + StorageFuzzQuery( + const StorageID & table_id_, const ColumnsDescription & columns_, const String & comment_, const Configuration & config_); + + std::string getName() const override { return "FuzzQuery"; } + + Pipe read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + size_t num_streams) override; + + static StorageFuzzQuery::Configuration getConfiguration(ASTs & engine_args, ContextPtr local_context); + +private: + const Configuration config; +}; + + +class FuzzQuerySource : public ISource +{ +public: + FuzzQuerySource( + UInt64 block_size_, Block block_header_, const StorageFuzzQuery::Configuration & config_, ASTPtr query_) + : ISource(block_header_) + , block_size(block_size_) + , block_header(std::move(block_header_)) + , config(config_) + , query(query_) + , fuzzer(config_.random_seed) + { + } + + String getName() const override { return "FuzzQuery"; } + +protected: + Chunk generate() override + { + Columns columns; + columns.reserve(block_header.columns()); + for (const auto & col : block_header) + { + chassert(col.type->getTypeId() == TypeIndex::String); + columns.emplace_back(createColumn()); + } + + return {std::move(columns), block_size}; + } + +private: + ColumnPtr createColumn(); + + UInt64 block_size; + Block block_header; + + StorageFuzzQuery::Configuration config; + ASTPtr query; + + QueryFuzzer fuzzer; +}; + +} diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index 8f33314397c..adc1074b1fe 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -26,6 +26,7 @@ void registerStorageGenerateRandom(StorageFactory & factory); void registerStorageExecutable(StorageFactory & factory); void registerStorageWindowView(StorageFactory & factory); void registerStorageLoop(StorageFactory & factory); +void registerStorageFuzzQuery(StorageFactory & factory); #if USE_RAPIDJSON || USE_SIMDJSON void registerStorageFuzzJSON(StorageFactory & factory); #endif @@ -126,6 +127,7 @@ void registerStorages() registerStorageExecutable(factory); registerStorageWindowView(factory); registerStorageLoop(factory); + registerStorageFuzzQuery(factory); #if USE_RAPIDJSON || USE_SIMDJSON registerStorageFuzzJSON(factory); #endif diff --git a/src/TableFunctions/TableFunctionFuzzQuery.cpp b/src/TableFunctions/TableFunctionFuzzQuery.cpp new file mode 100644 index 00000000000..224f6666556 --- /dev/null +++ b/src/TableFunctions/TableFunctionFuzzQuery.cpp @@ -0,0 +1,54 @@ +#include + +#include +#include +#include +#include + +namespace DB +{ + + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + +void TableFunctionFuzzQuery::parseArguments(const ASTPtr & ast_function, ContextPtr context) +{ + ASTs & args_func = ast_function->children; + + if (args_func.size() != 1) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Table function '{}' must have arguments", getName()); + + auto args = args_func.at(0)->children; + configuration = StorageFuzzQuery::getConfiguration(args, context); +} + +StoragePtr TableFunctionFuzzQuery::executeImpl( + const ASTPtr & /*ast_function*/, + ContextPtr context, + const std::string & table_name, + ColumnsDescription /*cached_columns*/, + bool is_insert_query) const +{ + ColumnsDescription columns = getActualTableStructure(context, is_insert_query); + auto res = std::make_shared( + StorageID(getDatabaseName(), table_name), + columns, + /* comment */ String{}, + configuration); + res->startup(); + return res; +} + +void registerTableFunctionFuzzQuery(TableFunctionFactory & factory) +{ + factory.registerFunction( + {.documentation + = {.description = "Perturbs a query string with random variations.", + .returned_value = "A table object with a single column containing perturbed query strings."}, + .allow_readonly = true}); +} + +} diff --git a/src/TableFunctions/TableFunctionFuzzQuery.h b/src/TableFunctions/TableFunctionFuzzQuery.h new file mode 100644 index 00000000000..22d10341c4d --- /dev/null +++ b/src/TableFunctions/TableFunctionFuzzQuery.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +#include +#include +#include + +#include "config.h" + +namespace DB +{ + +class TableFunctionFuzzQuery : public ITableFunction +{ +public: + static constexpr auto name = "fuzzQuery"; + std::string getName() const override { return name; } + + void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; + + ColumnsDescription getActualTableStructure(ContextPtr /* context */, bool /* is_insert_query */) const override + { + return ColumnsDescription{{"query", std::make_shared()}}; + } + +private: + StoragePtr executeImpl( + const ASTPtr & ast_function, + ContextPtr context, + const std::string & table_name, + ColumnsDescription cached_columns, + bool is_insert_query) const override; + + const char * getStorageTypeName() const override { return "fuzzQuery"; } + + String source; + std::optional random_seed; + StorageFuzzQuery::Configuration configuration; +}; + +} diff --git a/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp index ca4913898f9..a6c90872f12 100644 --- a/src/TableFunctions/registerTableFunctions.cpp +++ b/src/TableFunctions/registerTableFunctions.cpp @@ -26,6 +26,7 @@ void registerTableFunctions() registerTableFunctionMongoDB(factory); registerTableFunctionRedis(factory); registerTableFunctionMergeTreeIndex(factory); + registerTableFunctionFuzzQuery(factory); #if USE_RAPIDJSON || USE_SIMDJSON registerTableFunctionFuzzJSON(factory); #endif diff --git a/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h index efde4d6dcdc..2a8864a9bfd 100644 --- a/src/TableFunctions/registerTableFunctions.h +++ b/src/TableFunctions/registerTableFunctions.h @@ -23,6 +23,7 @@ void registerTableFunctionGenerate(TableFunctionFactory & factory); void registerTableFunctionMongoDB(TableFunctionFactory & factory); void registerTableFunctionRedis(TableFunctionFactory & factory); void registerTableFunctionMergeTreeIndex(TableFunctionFactory & factory); +void registerTableFunctionFuzzQuery(TableFunctionFactory & factory); #if USE_RAPIDJSON || USE_SIMDJSON void registerTableFunctionFuzzJSON(TableFunctionFactory & factory); #endif diff --git a/tests/queries/0_stateless/03031_table_function_fuzzquery.reference b/tests/queries/0_stateless/03031_table_function_fuzzquery.reference new file mode 100644 index 00000000000..202e4557a33 --- /dev/null +++ b/tests/queries/0_stateless/03031_table_function_fuzzquery.reference @@ -0,0 +1,2 @@ +query +String diff --git a/tests/queries/0_stateless/03031_table_function_fuzzquery.sql b/tests/queries/0_stateless/03031_table_function_fuzzquery.sql new file mode 100644 index 00000000000..b26096f7f0e --- /dev/null +++ b/tests/queries/0_stateless/03031_table_function_fuzzquery.sql @@ -0,0 +1,18 @@ + +SELECT * FROM fuzzQuery('SELECT 1', 500, 8956) LIMIT 0 FORMAT TSVWithNamesAndTypes; + +SELECT * FROM fuzzQuery('SELECT * +FROM ( + SELECT + ([toString(number % 2)] :: Array(LowCardinality(String))) AS item_id, + count() + FROM numbers(3) + GROUP BY item_id WITH TOTALS +) AS l FULL JOIN ( + SELECT + ([toString((number % 2) * 2)] :: Array(String)) AS item_id + FROM numbers(3) +) AS r +ON l.item_id = r.item_id +ORDER BY 1,2,3; +', 500, 8956) LIMIT 10 FORMAT NULL; From 774cba09dfd4ab347d05caf45f8135a3a51771c3 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 2 Aug 2024 08:48:41 +0200 Subject: [PATCH 1424/2361] Fix flaky test_replicated_table_attach --- .../test_replicated_table_attach/configs/config.xml | 2 +- tests/integration/test_replicated_table_attach/test.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_replicated_table_attach/configs/config.xml b/tests/integration/test_replicated_table_attach/configs/config.xml index fea3eab4126..3f72f638776 100644 --- a/tests/integration/test_replicated_table_attach/configs/config.xml +++ b/tests/integration/test_replicated_table_attach/configs/config.xml @@ -1,6 +1,6 @@ 1 - 5 + 3 diff --git a/tests/integration/test_replicated_table_attach/test.py b/tests/integration/test_replicated_table_attach/test.py index de60b7ec291..4fe8064b26a 100644 --- a/tests/integration/test_replicated_table_attach/test.py +++ b/tests/integration/test_replicated_table_attach/test.py @@ -80,4 +80,8 @@ def test_startup_with_small_bg_pool_partitioned(started_cluster): assert_values() # check that we activate it in the end - node.query_with_retry("INSERT INTO replicated_table_partitioned VALUES(20, 30)") + node.query_with_retry( + "INSERT INTO replicated_table_partitioned VALUES(20, 30)", + retry_count=20, + sleep_time=3, + ) From 797144270b3e20e9e4306949bde95c9a9a32c5e0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 2 Aug 2024 07:09:39 +0000 Subject: [PATCH 1425/2361] Update version_date.tsv and changelogs after v24.4.4.113-stable --- docs/changelogs/v24.4.4.113-stable.md | 73 +++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 74 insertions(+) create mode 100644 docs/changelogs/v24.4.4.113-stable.md diff --git a/docs/changelogs/v24.4.4.113-stable.md b/docs/changelogs/v24.4.4.113-stable.md new file mode 100644 index 00000000000..1f8a221a0a2 --- /dev/null +++ b/docs/changelogs/v24.4.4.113-stable.md @@ -0,0 +1,73 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.4.4.113-stable (d63a54957bd) FIXME as compared to v24.4.3.25-stable (a915dd4eda4) + +#### Improvement +* Backported in [#65884](https://github.com/ClickHouse/ClickHouse/issues/65884): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#65303](https://github.com/ClickHouse/ClickHouse/issues/65303): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Backported in [#65894](https://github.com/ClickHouse/ClickHouse/issues/65894): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Backported in [#65372](https://github.com/ClickHouse/ClickHouse/issues/65372): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#66883](https://github.com/ClickHouse/ClickHouse/issues/66883): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#65435](https://github.com/ClickHouse/ClickHouse/issues/65435): Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#65448](https://github.com/ClickHouse/ClickHouse/issues/65448): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#65710](https://github.com/ClickHouse/ClickHouse/issues/65710): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66689](https://github.com/ClickHouse/ClickHouse/issues/66689): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). +* Backported in [#67499](https://github.com/ClickHouse/ClickHouse/issues/67499): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#65353](https://github.com/ClickHouse/ClickHouse/issues/65353): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#65060](https://github.com/ClickHouse/ClickHouse/issues/65060): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65329](https://github.com/ClickHouse/ClickHouse/issues/65329): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)). +* Backported in [#64833](https://github.com/ClickHouse/ClickHouse/issues/64833): Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)). +* Backported in [#65086](https://github.com/ClickHouse/ClickHouse/issues/65086): Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65540](https://github.com/ClickHouse/ClickHouse/issues/65540): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)). +* Backported in [#65578](https://github.com/ClickHouse/ClickHouse/issues/65578): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)). +* Backported in [#65161](https://github.com/ClickHouse/ClickHouse/issues/65161): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#65616](https://github.com/ClickHouse/ClickHouse/issues/65616): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#65730](https://github.com/ClickHouse/ClickHouse/issues/65730): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#65668](https://github.com/ClickHouse/ClickHouse/issues/65668): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65786](https://github.com/ClickHouse/ClickHouse/issues/65786): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#65810](https://github.com/ClickHouse/ClickHouse/issues/65810): Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#65931](https://github.com/ClickHouse/ClickHouse/issues/65931): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#65826](https://github.com/ClickHouse/ClickHouse/issues/65826): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)). +* Backported in [#66299](https://github.com/ClickHouse/ClickHouse/issues/66299): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)). +* Backported in [#66326](https://github.com/ClickHouse/ClickHouse/issues/66326): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#66153](https://github.com/ClickHouse/ClickHouse/issues/66153): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#66459](https://github.com/ClickHouse/ClickHouse/issues/66459): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66224](https://github.com/ClickHouse/ClickHouse/issues/66224): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66267](https://github.com/ClickHouse/ClickHouse/issues/66267): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66678](https://github.com/ClickHouse/ClickHouse/issues/66678): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#66603](https://github.com/ClickHouse/ClickHouse/issues/66603): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)). +* Backported in [#66358](https://github.com/ClickHouse/ClickHouse/issues/66358): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66971](https://github.com/ClickHouse/ClickHouse/issues/66971): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66968](https://github.com/ClickHouse/ClickHouse/issues/66968): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66719](https://github.com/ClickHouse/ClickHouse/issues/66719): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#66950](https://github.com/ClickHouse/ClickHouse/issues/66950): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66947](https://github.com/ClickHouse/ClickHouse/issues/66947): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67631](https://github.com/ClickHouse/ClickHouse/issues/67631): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#67195](https://github.com/ClickHouse/ClickHouse/issues/67195): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#67377](https://github.com/ClickHouse/ClickHouse/issues/67377): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67240](https://github.com/ClickHouse/ClickHouse/issues/67240): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#67574](https://github.com/ClickHouse/ClickHouse/issues/67574): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#65410](https://github.com/ClickHouse/ClickHouse/issues/65410): Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#65903](https://github.com/ClickHouse/ClickHouse/issues/65903): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#66385](https://github.com/ClickHouse/ClickHouse/issues/66385): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)). +* Backported in [#66424](https://github.com/ClickHouse/ClickHouse/issues/66424): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66542](https://github.com/ClickHouse/ClickHouse/issues/66542): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66857](https://github.com/ClickHouse/ClickHouse/issues/66857): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)). +* Backported in [#66873](https://github.com/ClickHouse/ClickHouse/issues/66873): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)). +* Backported in [#67057](https://github.com/ClickHouse/ClickHouse/issues/67057): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)). +* Backported in [#66944](https://github.com/ClickHouse/ClickHouse/issues/66944): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67250](https://github.com/ClickHouse/ClickHouse/issues/67250): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)). +* Backported in [#67410](https://github.com/ClickHouse/ClickHouse/issues/67410): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index b1391c2d781..7b5dcda82e3 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -6,6 +6,7 @@ v24.5.4.49-stable 2024-07-01 v24.5.3.5-stable 2024-06-13 v24.5.2.34-stable 2024-06-13 v24.5.1.1763-stable 2024-06-01 +v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 From dc65c0aa078cf06357291c0fe68f6c035698320f Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 2 Aug 2024 07:15:40 +0000 Subject: [PATCH 1426/2361] Fix doc for parallel test execution Copy-pasterino strikes again. I forgot to remove the single quote. With it, pytest thinks the whole argument is a file: (no name '/ClickHouse/tests/integration/test_storage_s3_queue/test.py::test_max_set_age -- --count 10 -n 5' in any of []) --- tests/integration/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/README.md b/tests/integration/README.md index a8deb97b526..85146c79b1e 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -142,7 +142,7 @@ of parallel workers for `pytest-xdist`. $ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/ $ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse $ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge -$ ./runner 'test_storage_s3_queue/test.py::test_max_set_age -- --count 10 -n 5' +$ ./runner test_storage_s3_queue/test.py::test_max_set_age --count 10 -n 5 Start tests =============================================================================== test session starts ================================================================================ platform linux -- Python 3.10.12, pytest-7.4.4, pluggy-1.5.0 -- /usr/bin/python3 From 9c05a0ad5a0269af02ae2234e1d01dc3ce64bce2 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 2 Aug 2024 09:34:32 +0100 Subject: [PATCH 1427/2361] rm dirs in test_storage_delta --- tests/integration/test_storage_delta/test.py | 28 +++++++------------- 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index 384b8296f66..92a870ab360 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -8,6 +8,7 @@ import os import json import time import glob +import shutil import pyspark import delta @@ -52,15 +53,6 @@ def get_spark(): return builder.master("local").getOrCreate() -def remove_local_directory_contents(full_path): - for path in glob.glob(f"{full_path}/**"): - if os.path.isfile(path): - os.unlink(path) - else: - remove_local_directory_contents(path) - os.rmdir(path) - - @pytest.fixture(scope="module") def started_cluster(): try: @@ -179,7 +171,7 @@ def test_single_log_file(started_cluster): ) os.unlink(parquet_data_path) - remove_local_directory_contents(f"/{TABLE_NAME}") + shutil.rmtree(f"/{TABLE_NAME}") def test_partition_by(started_cluster): @@ -203,7 +195,7 @@ def test_partition_by(started_cluster): create_delta_table(instance, TABLE_NAME) assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 10 - remove_local_directory_contents(f"/{TABLE_NAME}") + shutil.rmtree(f"/{TABLE_NAME}") def test_checkpoint(started_cluster): @@ -280,7 +272,7 @@ def test_checkpoint(started_cluster): ).strip() ) - remove_local_directory_contents(f"/{TABLE_NAME}") + shutil.rmtree(f"/{TABLE_NAME}") spark.sql(f"DROP TABLE {TABLE_NAME}") @@ -321,7 +313,7 @@ def test_multiple_log_files(started_cluster): "SELECT number, toString(number + 1) FROM numbers(200)" ) - remove_local_directory_contents(f"/{TABLE_NAME}") + shutil.rmtree(f"/{TABLE_NAME}") def test_metadata(started_cluster): @@ -357,7 +349,7 @@ def test_metadata(started_cluster): assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 os.unlink(parquet_data_path) - remove_local_directory_contents(f"/{TABLE_NAME}") + shutil.rmtree(f"/{TABLE_NAME}") def test_types(started_cluster): @@ -431,7 +423,7 @@ def test_types(started_cluster): ] ) - remove_local_directory_contents(f"/{result_file}") + shutil.rmtree(f"/{result_file}") spark.sql(f"DROP TABLE {TABLE_NAME}") @@ -496,7 +488,7 @@ def test_restart_broken(started_cluster): assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 os.unlink(parquet_data_path) - remove_local_directory_contents(f"/{TABLE_NAME}") + shutil.rmtree(f"/{TABLE_NAME}") def test_restart_broken_table_function(started_cluster): @@ -553,7 +545,7 @@ def test_restart_broken_table_function(started_cluster): assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 os.unlink(parquet_data_path) - remove_local_directory_contents(f"/{TABLE_NAME}") + shutil.rmtree(f"/{TABLE_NAME}") def test_partition_columns(started_cluster): @@ -753,5 +745,5 @@ SELECT * FROM deltaLake('http://{started_cluster.minio_ip}:{started_cluster.mini == 1 ) - remove_local_directory_contents(f"/{TABLE_NAME}") + shutil.rmtree(f"/{TABLE_NAME}") spark.sql(f"DROP TABLE {TABLE_NAME}") From 01ca36cb5a157ab961dbd4460acc7e2ebb37e72a Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 2 Aug 2024 09:37:47 +0100 Subject: [PATCH 1428/2361] empty From 27f4e1808e4cf299cd8eaf4a19c3bb979aa4e5bd Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 2 Aug 2024 10:43:02 +0200 Subject: [PATCH 1429/2361] Update tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh --- .../0_stateless/02434_cancel_insert_when_client_dies.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh b/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh index 45f4194104e..dca8dae22c3 100755 --- a/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh +++ b/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh @@ -10,10 +10,7 @@ export DATA_FILE="$CLICKHOUSE_TMP/deduptest.tsv" export TEST_MARK="02434_insert_${CLICKHOUSE_DATABASE}_" $CLICKHOUSE_CLIENT -q 'select * from numbers(5000000) format TSV' > $DATA_FILE -$CLICKHOUSE_CLIENT -q "create table dedup_test(A Int64) Engine = MergeTree order by A - settings non_replicated_deduplication_window=1000 - , merge_tree_clear_old_temporary_directories_interval_seconds = 1 - ;" +$CLICKHOUSE_CLIENT -q "create table dedup_test(A Int64) Engine = MergeTree order by A settings non_replicated_deduplication_window=1000, merge_tree_clear_old_temporary_directories_interval_seconds = 1;" $CLICKHOUSE_CLIENT -q "create table dedup_dist(A Int64) Engine = Distributed('test_cluster_one_shard_two_replicas', currentDatabase(), dedup_test)" function insert_data From 80eda5c647f25a2cf24cf076f3bedc6a14712942 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 2 Aug 2024 09:04:04 +0000 Subject: [PATCH 1430/2361] Fix binary deserialization of JSON --- .../Serializations/SerializationObject.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 305ebfa2e16..9091702326a 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -663,6 +663,19 @@ void SerializationObject::deserializeBinary(IColumn & col, ReadBuffer & istr, co restoreColumnObject(column_object, prev_size); throw; } + + /// Insert default to all remaining typed and dynamic paths. + for (auto & [_, column] : typed_paths) + { + if (column->size() == prev_size) + column->insertDefault(); + } + + for (auto & [_, column] : column_object.getDynamicPathsPtrs()) + { + if (column->size() == prev_size) + column->insertDefault(); + } } SerializationPtr SerializationObject::TypedPathSubcolumnCreator::create(const DB::SerializationPtr & prev) const From 6c8f458b0bf9981068c7fecfdd9cef627406419b Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 2 Aug 2024 11:13:41 +0200 Subject: [PATCH 1431/2361] Fix reloading SQL UDFs with UNION --- .../UserDefinedSQLFunctionFactory.cpp | 8 +++++-- .../UserDefinedSQLObjectsDiskStorage.cpp | 6 ++--- .../UserDefinedSQLObjectsDiskStorage.h | 1 - .../UserDefinedSQLObjectsStorageBase.cpp | 15 +++++++++--- .../UserDefinedSQLObjectsStorageBase.h | 4 ++++ .../UserDefinedSQLObjectsZooKeeperStorage.cpp | 2 +- .../UserDefinedSQLObjectsZooKeeperStorage.h | 2 -- .../NormalizeSelectWithUnionQueryVisitor.h | 2 -- .../test.py | 23 +++++++++++++++++-- .../test.py | 12 ++++++++++ .../03215_udf_with_union.reference | 1 + .../0_stateless/03215_udf_with_union.sql | 14 +++++++++++ 12 files changed, 74 insertions(+), 16 deletions(-) create mode 100644 tests/queries/0_stateless/03215_udf_with_union.reference create mode 100644 tests/queries/0_stateless/03215_udf_with_union.sql diff --git a/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.cpp b/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.cpp index e6796874e50..d0bc812f91d 100644 --- a/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLFunctionFactory.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -9,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -80,13 +82,15 @@ namespace validateFunctionRecursiveness(*function_body, name); } - ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query) + ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query, const ContextPtr & context) { auto ptr = create_function_query.clone(); auto & res = typeid_cast(*ptr); res.if_not_exists = false; res.or_replace = false; FunctionNameNormalizer::visit(res.function_core.get()); + NormalizeSelectWithUnionQueryVisitor::Data data{context->getSettingsRef().union_default_mode}; + NormalizeSelectWithUnionQueryVisitor{data}.visit(res.function_core); return ptr; } } @@ -125,7 +129,7 @@ void UserDefinedSQLFunctionFactory::checkCanBeUnregistered(const ContextPtr & co bool UserDefinedSQLFunctionFactory::registerFunction(const ContextMutablePtr & context, const String & function_name, ASTPtr create_function_query, bool throw_if_exists, bool replace_if_exists) { checkCanBeRegistered(context, function_name, *create_function_query); - create_function_query = normalizeCreateFunctionQuery(*create_function_query); + create_function_query = normalizeCreateFunctionQuery(*create_function_query, context); try { diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp index 4c004d2537c..8910b45e79d 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.cpp @@ -1,7 +1,7 @@ #include "Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h" -#include "Functions/UserDefined/UserDefinedSQLFunctionFactory.h" -#include "Functions/UserDefined/UserDefinedSQLObjectType.h" +#include +#include #include #include @@ -54,7 +54,7 @@ namespace } UserDefinedSQLObjectsDiskStorage::UserDefinedSQLObjectsDiskStorage(const ContextPtr & global_context_, const String & dir_path_) - : global_context(global_context_) + : UserDefinedSQLObjectsStorageBase(global_context_) , dir_path{makeDirectoryPathCanonical(dir_path_)} , log{getLogger("UserDefinedSQLObjectsLoaderFromDisk")} { diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h index ae0cbd0c589..cafbd140598 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsDiskStorage.h @@ -42,7 +42,6 @@ private: ASTPtr tryLoadObject(UserDefinedSQLObjectType object_type, const String & object_name, const String & file_path, bool check_file_exists); String getFilePath(UserDefinedSQLObjectType object_type, const String & object_name) const; - ContextPtr global_context; String dir_path; LoggerPtr log; std::atomic objects_loaded = false; diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.cpp index f251d11789f..225e919301d 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.cpp @@ -2,7 +2,10 @@ #include +#include +#include #include +#include #include namespace DB @@ -17,18 +20,24 @@ namespace ErrorCodes namespace { -ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query) +ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query, const ContextPtr & context) { auto ptr = create_function_query.clone(); auto & res = typeid_cast(*ptr); res.if_not_exists = false; res.or_replace = false; FunctionNameNormalizer::visit(res.function_core.get()); + NormalizeSelectWithUnionQueryVisitor::Data data{context->getSettingsRef().union_default_mode}; + NormalizeSelectWithUnionQueryVisitor{data}.visit(res.function_core); return ptr; } } +UserDefinedSQLObjectsStorageBase::UserDefinedSQLObjectsStorageBase(ContextPtr global_context_) + : global_context(std::move(global_context_)) +{} + ASTPtr UserDefinedSQLObjectsStorageBase::get(const String & object_name) const { std::lock_guard lock(mutex); @@ -148,7 +157,7 @@ void UserDefinedSQLObjectsStorageBase::setAllObjects(const std::vector normalized_functions; for (const auto & [function_name, create_query] : new_objects) - normalized_functions[function_name] = normalizeCreateFunctionQuery(*create_query); + normalized_functions[function_name] = normalizeCreateFunctionQuery(*create_query, global_context); std::lock_guard lock(mutex); object_name_to_create_object_map = std::move(normalized_functions); @@ -166,7 +175,7 @@ std::vector> UserDefinedSQLObjectsStorageBase::getAllO void UserDefinedSQLObjectsStorageBase::setObject(const String & object_name, const IAST & create_object_query) { std::lock_guard lock(mutex); - object_name_to_create_object_map[object_name] = normalizeCreateFunctionQuery(create_object_query); + object_name_to_create_object_map[object_name] = normalizeCreateFunctionQuery(create_object_query, global_context); } void UserDefinedSQLObjectsStorageBase::removeObject(const String & object_name) diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h b/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h index cab63a3bfcf..0dbc5586f08 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsStorageBase.h @@ -4,6 +4,7 @@ #include #include +#include #include @@ -13,6 +14,7 @@ namespace DB class UserDefinedSQLObjectsStorageBase : public IUserDefinedSQLObjectsStorage { public: + explicit UserDefinedSQLObjectsStorageBase(ContextPtr global_context_); ASTPtr get(const String & object_name) const override; ASTPtr tryGet(const String & object_name) const override; @@ -64,6 +66,8 @@ protected: std::unordered_map object_name_to_create_object_map; mutable std::recursive_mutex mutex; + + ContextPtr global_context; }; } diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp index 01e7e3995fa..12c1302a3fe 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.cpp @@ -48,7 +48,7 @@ namespace UserDefinedSQLObjectsZooKeeperStorage::UserDefinedSQLObjectsZooKeeperStorage( const ContextPtr & global_context_, const String & zookeeper_path_) - : global_context{global_context_} + : UserDefinedSQLObjectsStorageBase(global_context_) , zookeeper_getter{[global_context_]() { return global_context_->getZooKeeper(); }} , zookeeper_path{zookeeper_path_} , watch_queue{std::make_shared>>(std::numeric_limits::max())} diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.h b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.h index 61002be2bfd..0aa9b198398 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.h +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsZooKeeperStorage.h @@ -68,8 +68,6 @@ private: void refreshObjects(const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type); void syncObjects(const zkutil::ZooKeeperPtr & zookeeper, UserDefinedSQLObjectType object_type); - ContextPtr global_context; - zkutil::ZooKeeperCachingGetter zookeeper_getter; String zookeeper_path; std::atomic objects_loaded = false; diff --git a/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.h b/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.h index b2f55003da5..b642b5def91 100644 --- a/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.h +++ b/src/Interpreters/NormalizeSelectWithUnionQueryVisitor.h @@ -4,8 +4,6 @@ #include #include -#include - namespace DB { diff --git a/tests/integration/test_replicated_user_defined_functions/test.py b/tests/integration/test_replicated_user_defined_functions/test.py index e5f6683b90b..92d86a8fd2c 100644 --- a/tests/integration/test_replicated_user_defined_functions/test.py +++ b/tests/integration/test_replicated_user_defined_functions/test.py @@ -141,6 +141,9 @@ def test_drop_if_exists(): def test_replication(): node1.query("CREATE FUNCTION f2 AS (x, y) -> x - y") + node1.query( + "CREATE FUNCTION f3 AS () -> (SELECT sum(s) FROM (SELECT 1 as s UNION ALL SELECT 1 as s))" + ) assert ( node1.query("SELECT create_query FROM system.functions WHERE name='f2'") @@ -154,7 +157,11 @@ def test_replication(): assert node1.query("SELECT f2(12,3)") == "9\n" assert node2.query("SELECT f2(12,3)") == "9\n" + assert node1.query("SELECT f3()") == "2\n" + assert node2.query("SELECT f3()") == "2\n" + node1.query("DROP FUNCTION f2") + node1.query("DROP FUNCTION f3") assert ( node1.query("SELECT create_query FROM system.functions WHERE name='f2'") == "" ) @@ -214,7 +221,9 @@ def test_reload_zookeeper(): ) # config reloads, but can still work - node1.query("CREATE FUNCTION f2 AS (x, y) -> x - y") + node1.query( + "CREATE FUNCTION f2 AS () -> (SELECT sum(s) FROM (SELECT 1 as s UNION ALL SELECT 1 as s))" + ) assert_eq_with_retry( node2, "SELECT name FROM system.functions WHERE name IN ['f1', 'f2'] ORDER BY name", @@ -269,7 +278,7 @@ def test_reload_zookeeper(): TSV(["f1", "f2", "f3"]), ) - assert node2.query("SELECT f1(12, 3), f2(12, 3), f3(12, 3)") == TSV([[15, 9, 4]]) + assert node2.query("SELECT f1(12, 3), f2(), f3(12, 3)") == TSV([[15, 2, 4]]) active_zk_connections = get_active_zk_connections() assert ( @@ -307,3 +316,13 @@ def test_start_without_zookeeper(): "CREATE FUNCTION f1 AS (x, y) -> (x + y)\n", ) node1.query("DROP FUNCTION f1") + + +def test_server_restart(): + node1.query( + "CREATE FUNCTION f1 AS () -> (SELECT sum(s) FROM (SELECT 1 as s UNION ALL SELECT 1 as s))" + ) + assert node1.query("SELECT f1()") == "2\n" + node1.restart_clickhouse() + assert node1.query("SELECT f1()") == "2\n" + node1.query("DROP FUNCTION f1") diff --git a/tests/integration/test_user_defined_object_persistence/test.py b/tests/integration/test_user_defined_object_persistence/test.py index 986438a4eed..bd491dfa195 100644 --- a/tests/integration/test_user_defined_object_persistence/test.py +++ b/tests/integration/test_user_defined_object_persistence/test.py @@ -18,20 +18,25 @@ def started_cluster(): def test_persistence(): create_function_query1 = "CREATE FUNCTION MySum1 AS (a, b) -> a + b" create_function_query2 = "CREATE FUNCTION MySum2 AS (a, b) -> MySum1(a, b) + b" + create_function_query3 = "CREATE FUNCTION MyUnion AS () -> (SELECT sum(s) FROM (SELECT 1 as s UNION ALL SELECT 1 as s))" instance.query(create_function_query1) instance.query(create_function_query2) + instance.query(create_function_query3) assert instance.query("SELECT MySum1(1,2)") == "3\n" assert instance.query("SELECT MySum2(1,2)") == "5\n" + assert instance.query("SELECT MyUnion()") == "2\n" instance.restart_clickhouse() assert instance.query("SELECT MySum1(1,2)") == "3\n" assert instance.query("SELECT MySum2(1,2)") == "5\n" + assert instance.query("SELECT MyUnion()") == "2\n" instance.query("DROP FUNCTION MySum2") instance.query("DROP FUNCTION MySum1") + instance.query("DROP FUNCTION MyUnion") instance.restart_clickhouse() @@ -48,3 +53,10 @@ def test_persistence(): or "Function with name 'MySum2' does not exist. In scope SELECT MySum2(1, 2)" in error_message ) + + error_message = instance.query_and_get_error("SELECT MyUnion()") + assert ( + "Unknown function MyUnion" in error_message + or "Function with name 'MyUnion' does not exist. In scope SELECT MyUnion" + in error_message + ) diff --git a/tests/queries/0_stateless/03215_udf_with_union.reference b/tests/queries/0_stateless/03215_udf_with_union.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/03215_udf_with_union.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/03215_udf_with_union.sql b/tests/queries/0_stateless/03215_udf_with_union.sql new file mode 100644 index 00000000000..00390c5d930 --- /dev/null +++ b/tests/queries/0_stateless/03215_udf_with_union.sql @@ -0,0 +1,14 @@ +DROP FUNCTION IF EXISTS 03215_udf_with_union; +CREATE FUNCTION 03215_udf_with_union AS () -> ( + SELECT sum(s) + FROM + ( + SELECT 1 AS s + UNION ALL + SELECT 1 AS s + ) +); + +SELECT 03215_udf_with_union(); + +DROP FUNCTION 03215_udf_with_union; From 900e08d6e7c433758d13f5a669c2112bb3856007 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 11:32:09 +0200 Subject: [PATCH 1432/2361] Try fix --- tests/ci/unit_tests_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py index 716625d7077..6430fa78801 100644 --- a/tests/ci/unit_tests_check.py +++ b/tests/ci/unit_tests_check.py @@ -180,7 +180,7 @@ def main(): run_command = ( f"docker run --cap-add=SYS_PTRACE --volume={tests_binary}:/unit_tests_dbms " "--security-opt seccomp=unconfined " # required to issue io_uring sys-calls - f"--volume={test_output}:/test_output {docker_image} ${gdb_enabled}" + f"--volume={test_output}:/test_output {docker_image} {gdb_enabled}" ) run_log_path = test_output / "run.log" From af53ed4c02ba52b3f57e97941b37a5931620d447 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 2 Aug 2024 12:08:49 +0200 Subject: [PATCH 1433/2361] Ping CI From 62f0e09ecbb226ea72b5ee8d812436ef75038e33 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 2 Aug 2024 12:17:08 +0200 Subject: [PATCH 1434/2361] Fix setting changes --- src/Core/SettingsChangesHistory.cpp | 264 +--------------------------- 1 file changed, 2 insertions(+), 262 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 2438202f6a3..b6ef654438e 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,268 +57,6 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, - {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, - {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, - {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, - {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, - {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, - {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, - {"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, - {"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."}, - {"collect_hash_table_stats_during_joins", false, true, "New setting."}, - {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, - {"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."}, - {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, - {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, - {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, - {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, - {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, - {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, - {"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, - {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, - {"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."}, - {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, - {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}, - {"restore_replace_external_table_functions_to_null", false, false, "New setting."}, - {"restore_replace_external_engines_to_null", false, false, "New setting."} - }}, - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, - {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, - {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, - {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, - {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, - {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, - {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, - {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, - {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, - {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, - {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, - {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, - {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, - {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, - {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, - {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, - {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, - {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, - {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, - {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, - {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, - {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, - {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, - {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, - {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, - {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, - {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, - {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, - {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, - {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"http_max_chunk_size", 0, 0, "Internal limitation"}, - {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, - {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, - {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, - {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, - {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, - {"allow_archive_path_syntax", false, true, "Added new setting to allow disabling archive path syntax."}, - }}, - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, - {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, - {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, - {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, - {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, - {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, - {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, - {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, - {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, - {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, - {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, - {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, - {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, - }}, - {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, - {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, - {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, - {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, - {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, - {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, - {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, - {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."}, - {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, - {"log_processors_profiles", false, true, "Enable by default"}, - {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, - {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, - {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, - {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, - {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, - {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, - {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, - {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, - {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, - {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, - {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, - {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, - {"allow_get_client_http_header", false, false, "Introduced a new function."}, - {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, - {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, - {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, - {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, - {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, - {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, - {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, - {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, - {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, - {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, - {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, - {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, - {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, - {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, - {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, - {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, - {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, - {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, - {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, - {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, - {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, - {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, - {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, - {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, - {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, - {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, - {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, {"24.12", { } @@ -338,6 +76,7 @@ static std::initializer_list Date: Mon, 29 Jul 2024 12:54:36 +0000 Subject: [PATCH 1435/2361] Trying to fix test_cache_evicted_by_temporary_data and print debug info --- .../config.d/storage_configuration.xml | 6 +-- .../test_temporary_data_in_cache/test.py | 44 ++++++++++++------- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml b/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml index 5a087d03266..107864fde0c 100644 --- a/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_temporary_data_in_cache/configs/config.d/storage_configuration.xml @@ -10,9 +10,9 @@ cache local_disk /tiny_local_cache/ - 10M - 1M - 1M + 12M + 100K + 100K 1 diff --git a/tests/integration/test_temporary_data_in_cache/test.py b/tests/integration/test_temporary_data_in_cache/test.py index cab134dcce2..abdfb5f4064 100644 --- a/tests/integration/test_temporary_data_in_cache/test.py +++ b/tests/integration/test_temporary_data_in_cache/test.py @@ -7,6 +7,9 @@ import fnmatch from helpers.cluster import ClickHouseCluster from helpers.client import QueryRuntimeException + +MB = 1024 * 1024 + cluster = ClickHouseCluster(__file__) node = cluster.add_instance( @@ -36,15 +39,28 @@ def test_cache_evicted_by_temporary_data(start_cluster): q("SELECT sum(size) FROM system.filesystem_cache").strip() ) - assert get_cache_size() == 0 + def dump_debug_info(): + return "\n".join( + [ + ">>> filesystem_cache <<<", + q("SELECT * FROM system.filesystem_cache FORMAT Vertical"), + ">>> remote_data_paths <<<", + q("SELECT * FROM system.remote_data_paths FORMAT Vertical"), + ">>> tiny_local_cache_local_disk <<<", + q( + "SELECT * FROM system.disks WHERE name = 'tiny_local_cache_local_disk' FORMAT Vertical" + ), + ] + ) - assert get_free_space() > 8 * 1024 * 1024 + assert get_cache_size() == 0, dump_debug_info() + assert get_free_space() > 8 * MB, dump_debug_info() # Codec is NONE to make cache size predictable q( - "CREATE TABLE t1 (x UInt64 CODEC(NONE), y UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY x SETTINGS storage_policy = 'tiny_local_cache'" + "CREATE TABLE t1 (x UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY x SETTINGS storage_policy = 'tiny_local_cache'" ) - q("INSERT INTO t1 SELECT number, number FROM numbers(1024 * 1024)") + q("INSERT INTO t1 SELECT number FROM numbers(1024 * 1024)") # To be sure that nothing is reading the cache and entries for t1 can be evited q("OPTIMIZE TABLE t1 FINAL") @@ -54,11 +70,11 @@ def test_cache_evicted_by_temporary_data(start_cluster): q("SELECT sum(x) FROM t1") cache_size_with_t1 = get_cache_size() - assert cache_size_with_t1 > 8 * 1024 * 1024 + assert cache_size_with_t1 > 8 * MB, dump_debug_info() # Almost all disk space is occupied by t1 cache free_space_with_t1 = get_free_space() - assert free_space_with_t1 < 4 * 1024 * 1024 + assert free_space_with_t1 < 4 * MB, dump_debug_info() # Try to sort the table, but fail because of lack of disk space with pytest.raises(QueryRuntimeException) as exc: @@ -76,31 +92,27 @@ def test_cache_evicted_by_temporary_data(start_cluster): # Some data evicted from cache by temporary data cache_size_after_eviction = get_cache_size() - assert cache_size_after_eviction < cache_size_with_t1 + assert cache_size_after_eviction < cache_size_with_t1, dump_debug_info() # Disk space freed, at least 3 MB, because temporary data tried to write 4 MB - assert get_free_space() > free_space_with_t1 + 3 * 1024 * 1024 + assert get_free_space() > free_space_with_t1 + 3 * MB, dump_debug_info() # Read some data to fill the cache again - q("SELECT avg(y) FROM t1") + q("SELECT avg(x) FROM t1") cache_size_with_t1 = get_cache_size() - assert cache_size_with_t1 > 8 * 1024 * 1024, q( - "SELECT * FROM system.filesystem_cache FORMAT Vertical" - ) + assert cache_size_with_t1 > 8 * MB, dump_debug_info() # Almost all disk space is occupied by t1 cache free_space_with_t1 = get_free_space() - assert free_space_with_t1 < 4 * 1024 * 1024, q( - "SELECT * FROM system.disks WHERE name = 'tiny_local_cache_local_disk' FORMAT Vertical" - ) + assert free_space_with_t1 < 4 * MB, dump_debug_info() node.http_query( "SELECT randomPrintableASCII(1024) FROM numbers(8 * 1024) FORMAT TSV", params={"buffer_size": 0, "wait_end_of_query": 1}, ) - assert get_free_space() > free_space_with_t1 + 3 * 1024 * 1024 + assert get_free_space() > free_space_with_t1 + 3 * MB, dump_debug_info() # not enough space for buffering 32 MB with pytest.raises(Exception) as exc: From e2b686efea175e4ddc6472849934aa953f13138d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 12:26:38 +0200 Subject: [PATCH 1436/2361] Fix test --- src/Common/tests/gtest_lsan.cpp | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/Common/tests/gtest_lsan.cpp b/src/Common/tests/gtest_lsan.cpp index f6e1984ec58..7fc4ad2749e 100644 --- a/src/Common/tests/gtest_lsan.cpp +++ b/src/Common/tests/gtest_lsan.cpp @@ -14,20 +14,21 @@ /// because of broken getauxval() [1]. /// /// [1]: https://github.com/ClickHouse/ClickHouse/pull/33957 -TEST(Common, LSan) +TEST(SanitizerDeathTest, LSan) { - int sanitizers_exit_code = 1; - - ASSERT_EXIT({ - std::thread leak_in_thread([]() + EXPECT_DEATH( { - void * leak = malloc(4096); - ASSERT_NE(leak, nullptr); - }); - leak_in_thread.join(); + std::thread leak_in_thread( + []() + { + void * leak = malloc(4096); + ASSERT_NE(leak, nullptr); + }); + leak_in_thread.join(); - __lsan_do_leak_check(); - }, ::testing::ExitedWithCode(sanitizers_exit_code), ".*LeakSanitizer: detected memory leaks.*"); + __lsan_do_leak_check(); + }, + ".*LeakSanitizer: detected memory leaks.*"); } #endif From 1c533f714529dd3065cfcdb0d69e5bbd28f51c29 Mon Sep 17 00:00:00 2001 From: skyoct Date: Fri, 2 Aug 2024 18:32:43 +0800 Subject: [PATCH 1437/2361] CI From 064c0eb9587d9dbd1fa81cdbae8554c22dd11734 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov Date: Fri, 2 Aug 2024 10:35:32 +0000 Subject: [PATCH 1438/2361] even better healthcheck for ldap --- tests/integration/compose/docker_compose_ldap.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/compose/docker_compose_ldap.yml b/tests/integration/compose/docker_compose_ldap.yml index 1f50b34735d..f49e00400a2 100644 --- a/tests/integration/compose/docker_compose_ldap.yml +++ b/tests/integration/compose/docker_compose_ldap.yml @@ -19,6 +19,7 @@ services: ldapsearch -x -H ldap://localhost:$$LDAP_PORT_NUMBER -D $$LDAP_ADMIN_DN -w $$LDAP_ADMIN_PASSWORD -b $$LDAP_ROOT | grep -c -E "member: cn=j(ohn|ane)doe" | grep 2 >> /dev/null + && cat /run/slapd/slapd.pid interval: 10s retries: 10 timeout: 2s From 02e48436057e45a884a1381e1c9cda9e1fe7de17 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 30 Jul 2024 09:30:39 +0000 Subject: [PATCH 1439/2361] test_cache_evicted_by_temporary_data drop cache --- .../test_temporary_data_in_cache/test.py | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/tests/integration/test_temporary_data_in_cache/test.py b/tests/integration/test_temporary_data_in_cache/test.py index abdfb5f4064..87192a20975 100644 --- a/tests/integration/test_temporary_data_in_cache/test.py +++ b/tests/integration/test_temporary_data_in_cache/test.py @@ -39,19 +39,21 @@ def test_cache_evicted_by_temporary_data(start_cluster): q("SELECT sum(size) FROM system.filesystem_cache").strip() ) - def dump_debug_info(): - return "\n".join( - [ - ">>> filesystem_cache <<<", - q("SELECT * FROM system.filesystem_cache FORMAT Vertical"), - ">>> remote_data_paths <<<", - q("SELECT * FROM system.remote_data_paths FORMAT Vertical"), - ">>> tiny_local_cache_local_disk <<<", - q( - "SELECT * FROM system.disks WHERE name = 'tiny_local_cache_local_disk' FORMAT Vertical" - ), - ] - ) + dump_debug_info = lambda: "\n".join( + [ + ">>> filesystem_cache <<<", + q("SELECT * FROM system.filesystem_cache FORMAT Vertical"), + ">>> remote_data_paths <<<", + q("SELECT * FROM system.remote_data_paths FORMAT Vertical"), + ">>> tiny_local_cache_local_disk <<<", + q( + "SELECT * FROM system.disks WHERE name = 'tiny_local_cache_local_disk' FORMAT Vertical" + ), + ] + ) + + q("SYSTEM DROP FILESYSTEM CACHE") + q("DROP TABLE IF EXISTS t1 SYNC") assert get_cache_size() == 0, dump_debug_info() assert get_free_space() > 8 * MB, dump_debug_info() @@ -124,4 +126,4 @@ def test_cache_evicted_by_temporary_data(start_cluster): str(exc.value), "*Failed to reserve * for temporary file*" ), exc.value - q("DROP TABLE IF EXISTS t1") + q("DROP TABLE IF EXISTS t1 SYNC") From 97f1f6e22174916cc3b401fcebdf01dcd0fb0107 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 12:46:37 +0200 Subject: [PATCH 1440/2361] Don't hide errors on clickhouse local runs --- docker/test/stateless/stress_tests.lib | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/stress_tests.lib b/docker/test/stateless/stress_tests.lib index 682da1df837..2a833b17f14 100644 --- a/docker/test/stateless/stress_tests.lib +++ b/docker/test/stateless/stress_tests.lib @@ -308,7 +308,8 @@ function collect_query_and_trace_logs() { for table in query_log trace_log metric_log do - clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||: + # Don't ignore errors here, it leads to ignore sanitizer reports when running clickhouse-local + clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst done } From e5cf376c4ff3742fb0d3127ef6b50e08180eb153 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 12:56:49 +0200 Subject: [PATCH 1441/2361] Don't attach gdb in ASAN runs of fuzzer, stateless or stress checks --- docker/test/fuzzer/run-fuzzer.sh | 106 +++++++++++++++------------ docker/test/stateless/attach_gdb.lib | 78 +++++++++++--------- 2 files changed, 101 insertions(+), 83 deletions(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index b8f967ed9c2..ae1b9e94bed 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -193,53 +193,60 @@ function fuzz kill -0 $server_pid - # Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog - # and clickhouse-server can do fork-exec, for example, to run some bridge. - # Do not set nostop noprint for all signals, because some it may cause gdb to hang, - # explicitly ignore non-fatal signals that are used by server. - # Number of SIGRTMIN can be determined only in runtime. - RTMIN=$(kill -l SIGRTMIN) - echo " -set follow-fork-mode parent -handle SIGHUP nostop noprint pass -handle SIGINT nostop noprint pass -handle SIGQUIT nostop noprint pass -handle SIGPIPE nostop noprint pass -handle SIGTERM nostop noprint pass -handle SIGUSR1 nostop noprint pass -handle SIGUSR2 nostop noprint pass -handle SIG$RTMIN nostop noprint pass -info signals -continue -backtrace full -thread apply all backtrace full -info registers -disassemble /s -up -disassemble /s -up -disassemble /s -p \"done\" -detach -quit -" > script.gdb + IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)") + if [[ "$IS_ASAN" = "1" ]]; + then + echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections" + else + # Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog + # and clickhouse-server can do fork-exec, for example, to run some bridge. + # Do not set nostop noprint for all signals, because some it may cause gdb to hang, + # explicitly ignore non-fatal signals that are used by server. + # Number of SIGRTMIN can be determined only in runtime. + RTMIN=$(kill -l SIGRTMIN) + echo " + set follow-fork-mode parent + handle SIGHUP nostop noprint pass + handle SIGINT nostop noprint pass + handle SIGQUIT nostop noprint pass + handle SIGPIPE nostop noprint pass + handle SIGTERM nostop noprint pass + handle SIGUSR1 nostop noprint pass + handle SIGUSR2 nostop noprint pass + handle SIG$RTMIN nostop noprint pass + info signals + continue + backtrace full + thread apply all backtrace full + info registers + disassemble /s + up + disassemble /s + up + disassemble /s + p \"done\" + detach + quit + " > script.gdb - gdb -batch -command script.gdb -p $server_pid & - sleep 5 - # gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s) - time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||: + gdb -batch -command script.gdb -p $server_pid & + sleep 5 + # gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s) + time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||: + + # Check connectivity after we attach gdb, because it might cause the server + # to freeze, and the fuzzer will fail. In debug build, it can take a lot of time. + for _ in {1..180} + do + if clickhouse-client --query "select 1" + then + break + fi + sleep 1 + done + kill -0 $server_pid # This checks that it is our server that is started and not some other one + fi - # Check connectivity after we attach gdb, because it might cause the server - # to freeze, and the fuzzer will fail. In debug build, it can take a lot of time. - for _ in {1..180} - do - if clickhouse-client --query "select 1" - then - break - fi - sleep 1 - done - kill -0 $server_pid # This checks that it is our server that is started and not some other one echo 'Server started and responded.' setup_logs_replication @@ -264,8 +271,13 @@ quit # The fuzzer_pid belongs to the timeout process. actual_fuzzer_pid=$(ps -o pid= --ppid "$fuzzer_pid") - echo "Attaching gdb to the fuzzer itself" - gdb -batch -command script.gdb -p $actual_fuzzer_pid & + if [[ "$IS_ASAN" = "1" ]]; + then + echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections" + else + echo "Attaching gdb to the fuzzer itself" + gdb -batch -command script.gdb -p $actual_fuzzer_pid & + fi # Wait for the fuzzer to complete. # Note that the 'wait || ...' thing is required so that the script doesn't diff --git a/docker/test/stateless/attach_gdb.lib b/docker/test/stateless/attach_gdb.lib index d288288bb17..d0cac24481f 100644 --- a/docker/test/stateless/attach_gdb.lib +++ b/docker/test/stateless/attach_gdb.lib @@ -5,43 +5,49 @@ source /utils.lib function attach_gdb_to_clickhouse() { - # Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog - # and clickhouse-server can do fork-exec, for example, to run some bridge. - # Do not set nostop noprint for all signals, because some it may cause gdb to hang, - # explicitly ignore non-fatal signals that are used by server. - # Number of SIGRTMIN can be determined only in runtime. - RTMIN=$(kill -l SIGRTMIN) - echo " -set follow-fork-mode parent -handle SIGHUP nostop noprint pass -handle SIGINT nostop noprint pass -handle SIGQUIT nostop noprint pass -handle SIGPIPE nostop noprint pass -handle SIGTERM nostop noprint pass -handle SIGUSR1 nostop noprint pass -handle SIGUSR2 nostop noprint pass -handle SIG$RTMIN nostop noprint pass -info signals -continue -backtrace full -thread apply all backtrace full -info registers -disassemble /s -up -disassemble /s -up -disassemble /s -p \"done\" -detach -quit -" > script.gdb + IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)") + if [[ "$IS_ASAN" = "1" ]]; + then + echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections" + else + # Set follow-fork-mode to parent, because we attach to clickhouse-server, not to watchdog + # and clickhouse-server can do fork-exec, for example, to run some bridge. + # Do not set nostop noprint for all signals, because some it may cause gdb to hang, + # explicitly ignore non-fatal signals that are used by server. + # Number of SIGRTMIN can be determined only in runtime. + RTMIN=$(kill -l SIGRTMIN) + echo " + set follow-fork-mode parent + handle SIGHUP nostop noprint pass + handle SIGINT nostop noprint pass + handle SIGQUIT nostop noprint pass + handle SIGPIPE nostop noprint pass + handle SIGTERM nostop noprint pass + handle SIGUSR1 nostop noprint pass + handle SIGUSR2 nostop noprint pass + handle SIG$RTMIN nostop noprint pass + info signals + continue + backtrace full + thread apply all backtrace full + info registers + disassemble /s + up + disassemble /s + up + disassemble /s + p \"done\" + detach + quit + " > script.gdb - # FIXME Hung check may work incorrectly because of attached gdb - # We cannot attach another gdb to get stacktraces if some queries hung - gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log & - sleep 5 - # gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s) - run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" + # FIXME Hung check may work incorrectly because of attached gdb + # We cannot attach another gdb to get stacktraces if some queries hung + gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log & + sleep 5 + # gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s) + run_with_retry 60 clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" + fi } # vi: ft=bash From 092c837119a9be11cfcc85b4696e9a9c74d9bbc8 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 2 Aug 2024 12:13:26 +0100 Subject: [PATCH 1442/2361] randomize table name in test_storage_delta --- tests/integration/test_storage_delta/test.py | 53 +++++++------------- 1 file changed, 17 insertions(+), 36 deletions(-) diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index 92a870ab360..054b79ff6fe 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -8,7 +8,8 @@ import os import json import time import glob -import shutil +import random +import string import pyspark import delta @@ -53,6 +54,11 @@ def get_spark(): return builder.master("local").getOrCreate() +def randomize_table_name(table_name, random_suffix_length=10): + letters = string.ascii_letters + string.digits + return f"{table_name}{''.join(random.choice(letters) for _ in range(random_suffix_length))}" + + @pytest.fixture(scope="module") def started_cluster(): try: @@ -152,7 +158,7 @@ def test_single_log_file(started_cluster): spark = started_cluster.spark_session minio_client = started_cluster.minio_client bucket = started_cluster.minio_bucket - TABLE_NAME = "test_single_log_file" + TABLE_NAME = randomize_table_name("test_single_log_file") inserted_data = "SELECT number as a, toString(number + 1) as b FROM numbers(100)" parquet_data_path = create_initial_data_file( @@ -170,16 +176,13 @@ def test_single_log_file(started_cluster): inserted_data ) - os.unlink(parquet_data_path) - shutil.rmtree(f"/{TABLE_NAME}") - def test_partition_by(started_cluster): instance = started_cluster.instances["node1"] spark = started_cluster.spark_session minio_client = started_cluster.minio_client bucket = started_cluster.minio_bucket - TABLE_NAME = "test_partition_by" + TABLE_NAME = randomize_table_name("test_partition_by") write_delta_from_df( spark, @@ -195,15 +198,13 @@ def test_partition_by(started_cluster): create_delta_table(instance, TABLE_NAME) assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 10 - shutil.rmtree(f"/{TABLE_NAME}") - def test_checkpoint(started_cluster): instance = started_cluster.instances["node1"] spark = started_cluster.spark_session minio_client = started_cluster.minio_client bucket = started_cluster.minio_bucket - TABLE_NAME = "test_checkpoint" + TABLE_NAME = randomize_table_name("test_checkpoint") write_delta_from_df( spark, @@ -272,16 +273,13 @@ def test_checkpoint(started_cluster): ).strip() ) - shutil.rmtree(f"/{TABLE_NAME}") - spark.sql(f"DROP TABLE {TABLE_NAME}") - def test_multiple_log_files(started_cluster): instance = started_cluster.instances["node1"] spark = started_cluster.spark_session minio_client = started_cluster.minio_client bucket = started_cluster.minio_bucket - TABLE_NAME = "test_multiple_log_files" + TABLE_NAME = randomize_table_name("test_multiple_log_files") write_delta_from_df( spark, generate_data(spark, 0, 100), f"/{TABLE_NAME}", mode="overwrite" @@ -313,15 +311,13 @@ def test_multiple_log_files(started_cluster): "SELECT number, toString(number + 1) FROM numbers(200)" ) - shutil.rmtree(f"/{TABLE_NAME}") - def test_metadata(started_cluster): instance = started_cluster.instances["node1"] spark = started_cluster.spark_session minio_client = started_cluster.minio_client bucket = started_cluster.minio_bucket - TABLE_NAME = "test_metadata" + TABLE_NAME = randomize_table_name("test_metadata") parquet_data_path = create_initial_data_file( started_cluster, @@ -348,14 +344,11 @@ def test_metadata(started_cluster): create_delta_table(instance, TABLE_NAME) assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 - os.unlink(parquet_data_path) - shutil.rmtree(f"/{TABLE_NAME}") - def test_types(started_cluster): - TABLE_NAME = "test_types" + TABLE_NAME = randomize_table_name("test_types") spark = started_cluster.spark_session - result_file = f"{TABLE_NAME}_result_2" + result_file = randomize_table_name(f"{TABLE_NAME}_result_2") delta_table = ( DeltaTable.create(spark) @@ -423,16 +416,13 @@ def test_types(started_cluster): ] ) - shutil.rmtree(f"/{result_file}") - spark.sql(f"DROP TABLE {TABLE_NAME}") - def test_restart_broken(started_cluster): instance = started_cluster.instances["node1"] spark = started_cluster.spark_session minio_client = started_cluster.minio_client bucket = "broken" - TABLE_NAME = "test_restart_broken" + TABLE_NAME = randomize_table_name("test_restart_broken") if not minio_client.bucket_exists(bucket): minio_client.make_bucket(bucket) @@ -487,16 +477,13 @@ def test_restart_broken(started_cluster): assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 - os.unlink(parquet_data_path) - shutil.rmtree(f"/{TABLE_NAME}") - def test_restart_broken_table_function(started_cluster): instance = started_cluster.instances["node1"] spark = started_cluster.spark_session minio_client = started_cluster.minio_client bucket = "broken2" - TABLE_NAME = "test_restart_broken_table_function" + TABLE_NAME = randomize_table_name("test_restart_broken_table_function") if not minio_client.bucket_exists(bucket): minio_client.make_bucket(bucket) @@ -544,16 +531,13 @@ def test_restart_broken_table_function(started_cluster): assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100 - os.unlink(parquet_data_path) - shutil.rmtree(f"/{TABLE_NAME}") - def test_partition_columns(started_cluster): instance = started_cluster.instances["node1"] spark = started_cluster.spark_session minio_client = started_cluster.minio_client bucket = started_cluster.minio_bucket - TABLE_NAME = "test_partition_columns" + TABLE_NAME = randomize_table_name("test_partition_columns") result_file = f"{TABLE_NAME}" partition_columns = ["b", "c", "d", "e"] @@ -744,6 +728,3 @@ SELECT * FROM deltaLake('http://{started_cluster.minio_ip}:{started_cluster.mini ) == 1 ) - - shutil.rmtree(f"/{TABLE_NAME}") - spark.sql(f"DROP TABLE {TABLE_NAME}") From 7d1e958097e716f3ea1e0b7e51d6dfa575229c4c Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 2 Aug 2024 13:32:59 +0200 Subject: [PATCH 1443/2361] Integration tests: fix ports clashing problem --- tests/integration/conftest.py | 44 ++++++++++++++++++ tests/integration/helpers/cluster.py | 67 ++++++++++++++++++++++++---- 2 files changed, 103 insertions(+), 8 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index f4be31cc532..0a47840ede3 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -2,6 +2,8 @@ import logging import os +import socket +import multiprocessing import pytest # pylint:disable=import-error; for style check from helpers.cluster import run_and_check @@ -11,6 +13,7 @@ from helpers.network import _NetworkManager # # [1]: https://github.com/pytest-dev/pytest/issues/5502 logging.raiseExceptions = False +PORTS_PER_WORKER = 50 @pytest.fixture(scope="session", autouse=True) @@ -111,5 +114,46 @@ def pytest_addoption(parser): ) +def get_n_free_ports(total): + ports = [] + + while len(ports) < total: + with socket.socket() as s: + s.bind(("", 0)) + ports.append(s.getsockname()[1]) + + return ports + + def pytest_configure(config): os.environ["INTEGRATION_TESTS_RUN_ID"] = config.option.run_id + + # When running tests without pytest-xdist, + # the `pytest_xdist_setupnodes` hook is not executed + worker_ports = os.getenv("WORKER_FREE_PORTS", None) + if worker_ports is None: + os.environ["WORKER_FREE_PORTS"] = " ".join( + ([str(p) for p in get_n_free_ports(PORTS_PER_WORKER)]) + ) + + +def pytest_xdist_setupnodes(config, specs): + # Find {PORTS_PER_WORKER} * {number of xdist workers} ports and + # allocate pool of {PORTS_PER_WORKER} ports to each worker + + # Get number of xdist workers + num_workers = 1 + if os.environ.get("PYTEST_XDIST_WORKER", "master") == "master": + num_workers = config.getoption("numprocesses", 1) + if num_workers == "auto": + num_workers = multiprocessing.cpu_count() + + # Get free ports which will be distributed across workers + ports = get_n_free_ports(num_workers * PORTS_PER_WORKER) + + # Iterate over specs of workers and add allocated ports to env variable + for i, spec in enumerate(specs): + start_range = i * PORTS_PER_WORKER + spec.env["WORKER_FREE_PORTS"] = " ".join( + ([str(p) for p in ports[start_range : start_range + PORTS_PER_WORKER]]) + ) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 6bc0ece63ca..3480a3089fe 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -135,6 +135,52 @@ def get_free_port(): return s.getsockname()[1] +def is_port_free(port: int) -> bool: + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", port)) + return True + except socket.error: + return False + + +class PortPoolManager: + """ + This class is used for distribution of ports allocated to single pytest-xdist worker + It can be used by multiple ClickHouseCluster instances + """ + + # Shared between instances + all_ports = None + free_ports = None + + def __init__(self): + self.used_ports = [] + + if self.all_ports is None: + worker_ports = os.getenv("WORKER_FREE_PORTS") + ports = [int(p) for p in worker_ports.split(" ")] + + # Static vars + PortPoolManager.all_ports = ports + PortPoolManager.free_ports = ports + + def get_port(self): + for port in self.free_ports: + if is_port_free(port): + self.free_ports.remove(port) + self.used_ports.append(port) + return port + + raise Exception( + f"No free ports: {self.all_ports}", + ) + + def return_used_ports(self): + self.free_ports.extend(self.used_ports) + self.used_ports.clear() + + def retry_exception(num, delay, func, exception=Exception, *args, **kwargs): """ Retry if `func()` throws, `num` times. @@ -716,62 +762,67 @@ class ClickHouseCluster: .stop() ) + self.port_pool = PortPoolManager() + @property def kafka_port(self): if self._kafka_port: return self._kafka_port - self._kafka_port = get_free_port() + self._kafka_port = self.port_pool.get_port() return self._kafka_port @property def schema_registry_port(self): if self._schema_registry_port: return self._schema_registry_port - self._schema_registry_port = get_free_port() + self._schema_registry_port = self.port_pool.get_port() return self._schema_registry_port @property def schema_registry_auth_port(self): if self._schema_registry_auth_port: return self._schema_registry_auth_port - self._schema_registry_auth_port = get_free_port() + self._schema_registry_auth_port = self.port_pool.get_port() return self._schema_registry_auth_port @property def kerberized_kafka_port(self): if self._kerberized_kafka_port: return self._kerberized_kafka_port - self._kerberized_kafka_port = get_free_port() + self._kerberized_kafka_port = self.port_pool.get_port() return self._kerberized_kafka_port @property def azurite_port(self): if self._azurite_port: return self._azurite_port - self._azurite_port = get_free_port() + self._azurite_port = self.port_pool.get_port() return self._azurite_port @property def mongo_port(self): if self._mongo_port: return self._mongo_port - self._mongo_port = get_free_port() + self._mongo_port = self.port_pool.get_port() return self._mongo_port @property def mongo_no_cred_port(self): if self._mongo_no_cred_port: return self._mongo_no_cred_port - self._mongo_no_cred_port = get_free_port() + self._mongo_no_cred_port = self.port_pool.get_port() return self._mongo_no_cred_port @property def redis_port(self): if self._redis_port: return self._redis_port - self._redis_port = get_free_port() + self._redis_port = self.port_pool.get_port() return self._redis_port + def __exit__(self, exc_type, exc_val, exc_tb): + self.port_pool.return_used_ports() + def print_all_docker_pieces(self): res_networks = subprocess.check_output( f"docker network ls --filter name='{self.project_name}*'", From 7d45529fe8d28a6b39deb32d060343bb5d03b64f Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 2 Aug 2024 12:35:40 +0100 Subject: [PATCH 1444/2361] randomize query id in test_checking_s3_blobs_paranoid --- .../test_checking_s3_blobs_paranoid/test.py | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index afe8449b44a..c22142046c7 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -4,6 +4,8 @@ import logging import pytest import os import minio +import random +import string from helpers.cluster import ClickHouseCluster from helpers.mock_servers import start_s3_mock @@ -45,6 +47,11 @@ def cluster(): cluster.shutdown() +def randomize_query_id(query_id, random_suffix_length=10): + letters = string.ascii_letters + string.digits + return f"{query_id}_{''.join(random.choice(letters) for _ in range(random_suffix_length))}" + + @pytest.fixture(scope="module") def init_broken_s3(cluster): yield start_s3_mock(cluster, "broken_s3", "8083") @@ -128,7 +135,7 @@ def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3, compression broken_s3.setup_at_create_multi_part_upload() - insert_query_id = f"INSERT_INTO_TABLE_FUNCTION_FAIL_CREATE_MPU_{compression}" + insert_query_id = randomize_query_id(f"INSERT_INTO_TABLE_FUNCTION_FAIL_CREATE_MPU_{compression}") error = node.query_and_get_error( f""" INSERT INTO @@ -170,7 +177,7 @@ def test_upload_s3_fail_upload_part_when_multi_part_upload( broken_s3.setup_fake_multpartuploads() broken_s3.setup_at_part_upload(count=1, after=2) - insert_query_id = f"INSERT_INTO_TABLE_FUNCTION_FAIL_UPLOAD_PART_{compression}" + insert_query_id = randomize_query_id(f"INSERT_INTO_TABLE_FUNCTION_FAIL_UPLOAD_PART_{compression}") error = node.query_and_get_error( f""" INSERT INTO @@ -222,7 +229,7 @@ def test_when_error_is_retried(cluster, broken_s3, action_and_message): broken_s3.setup_fake_multpartuploads() broken_s3.setup_at_part_upload(count=3, after=2, action=action) - insert_query_id = f"INSERT_INTO_TABLE_{action}_RETRIED" + insert_query_id = randomize_query_id(f"INSERT_INTO_TABLE_{action}_RETRIED") node.query( f""" INSERT INTO @@ -251,7 +258,7 @@ def test_when_error_is_retried(cluster, broken_s3, action_and_message): assert s3_errors == 3 broken_s3.setup_at_part_upload(count=1000, after=2, action=action) - insert_query_id = f"INSERT_INTO_TABLE_{action}_RETRIED_1" + insert_query_id = randomize_query_id(f"INSERT_INTO_TABLE_{action}_RETRIED_1") error = node.query_and_get_error( f""" INSERT INTO @@ -286,7 +293,7 @@ def test_when_s3_broken_pipe_at_upload_is_retried(cluster, broken_s3): action="broken_pipe", ) - insert_query_id = f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD" + insert_query_id = randomize_query_id(f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD") node.query( f""" INSERT INTO @@ -320,7 +327,7 @@ def test_when_s3_broken_pipe_at_upload_is_retried(cluster, broken_s3): after=2, action="broken_pipe", ) - insert_query_id = f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD_1" + insert_query_id = randomize_query_id(f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD_1") error = node.query_and_get_error( f""" INSERT INTO @@ -362,7 +369,7 @@ def test_when_s3_connection_reset_by_peer_at_upload_is_retried( action_args=["1"] if send_something else ["0"], ) - insert_query_id = ( + insert_query_id = randomize_query_id( f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_UPLOAD_{send_something}" ) node.query( @@ -399,7 +406,7 @@ def test_when_s3_connection_reset_by_peer_at_upload_is_retried( action="connection_reset_by_peer", action_args=["1"] if send_something else ["0"], ) - insert_query_id = ( + insert_query_id = randomize_query_id( f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_UPLOAD_{send_something}_1" ) error = node.query_and_get_error( @@ -444,7 +451,7 @@ def test_when_s3_connection_reset_by_peer_at_create_mpu_retried( action_args=["1"] if send_something else ["0"], ) - insert_query_id = ( + insert_query_id = randomize_query_id( f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_MULTIPARTUPLOAD_{send_something}" ) node.query( @@ -482,7 +489,7 @@ def test_when_s3_connection_reset_by_peer_at_create_mpu_retried( action_args=["1"] if send_something else ["0"], ) - insert_query_id = ( + insert_query_id = randomize_query_id( f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_MULTIPARTUPLOAD_{send_something}_1" ) error = node.query_and_get_error( @@ -522,7 +529,7 @@ def test_query_is_canceled_with_inf_retries(cluster, broken_s3): action="connection_refused", ) - insert_query_id = f"TEST_QUERY_IS_CANCELED_WITH_INF_RETRIES" + insert_query_id = randomize_query_id(f"TEST_QUERY_IS_CANCELED_WITH_INF_RETRIES") request = node.get_query_request( f""" INSERT INTO @@ -580,7 +587,7 @@ def test_adaptive_timeouts(cluster, broken_s3, node_name): count=1000000, ) - insert_query_id = f"TEST_ADAPTIVE_TIMEOUTS_{node_name}" + insert_query_id = randomize_query_id(f"TEST_ADAPTIVE_TIMEOUTS_{node_name}") node.query( f""" INSERT INTO From 34cba1bdda55cdd2409c535be56e4fe6165c894a Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 2 Aug 2024 11:46:03 +0000 Subject: [PATCH 1445/2361] Automatic style fix --- tests/integration/test_checking_s3_blobs_paranoid/test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index c22142046c7..b995b4d6461 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -135,7 +135,9 @@ def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3, compression broken_s3.setup_at_create_multi_part_upload() - insert_query_id = randomize_query_id(f"INSERT_INTO_TABLE_FUNCTION_FAIL_CREATE_MPU_{compression}") + insert_query_id = randomize_query_id( + f"INSERT_INTO_TABLE_FUNCTION_FAIL_CREATE_MPU_{compression}" + ) error = node.query_and_get_error( f""" INSERT INTO @@ -177,7 +179,9 @@ def test_upload_s3_fail_upload_part_when_multi_part_upload( broken_s3.setup_fake_multpartuploads() broken_s3.setup_at_part_upload(count=1, after=2) - insert_query_id = randomize_query_id(f"INSERT_INTO_TABLE_FUNCTION_FAIL_UPLOAD_PART_{compression}") + insert_query_id = randomize_query_id( + f"INSERT_INTO_TABLE_FUNCTION_FAIL_UPLOAD_PART_{compression}" + ) error = node.query_and_get_error( f""" INSERT INTO From d2d8a16ca6c5c3df31a62894fe2bcfb26d570061 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 2 Aug 2024 13:51:03 +0200 Subject: [PATCH 1446/2361] Fix refreshable MVs --- src/Databases/DatabaseOrdinary.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 3ab5d3fa697..8808261654f 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -250,6 +251,8 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables convertMergeTreeToReplicatedIfNeeded(ast, qualified_name, file_name); + NormalizeSelectWithUnionQueryVisitor::Data data{local_context->getSettingsRef().union_default_mode}; + NormalizeSelectWithUnionQueryVisitor{data}.visit(ast); std::lock_guard lock{metadata.mutex}; metadata.parsed_tables[qualified_name] = ParsedTableMetadata{full_path.string(), ast}; metadata.total_dictionaries += create_query->is_dictionary; From c9b29ad11351b774d834ada642951a81d33b14e7 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Fri, 2 Aug 2024 13:53:48 +0200 Subject: [PATCH 1447/2361] squash! fix for parallel execution --- tests/integration/test_parquet_page_index/test.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/integration/test_parquet_page_index/test.py b/tests/integration/test_parquet_page_index/test.py index db291e20b74..59dbab09be5 100644 --- a/tests/integration/test_parquet_page_index/test.py +++ b/tests/integration/test_parquet_page_index/test.py @@ -6,9 +6,6 @@ import time cluster = ClickHouseCluster(__file__) path_to_userfiles = "/var/lib/clickhouse/user_files/" -path_to_external_dirs = ( - "/ClickHouse/tests/integration/test_parquet_page_index/_instances" -) node = cluster.add_instance("node", external_dirs=[path_to_userfiles]) @@ -45,7 +42,7 @@ def delete_if_exists(file_path): True, ), ( - "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SELECT number, number+1 FROM system.numbers LIMIT 100 " "INTO OUTFILE '{file_name}' FORMAT Parquet " "SETTINGS output_format_parquet_use_custom_encoder = false, " "output_format_parquet_write_page_index = false;", @@ -54,7 +51,7 @@ def delete_if_exists(file_path): # # default settings: # # output_format_parquet_use_custom_encoder = true ( - "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SELECT number, number+1 FROM system.numbers LIMIT 100 " "INTO OUTFILE '{file_name}' FORMAT Parquet;", False, ), @@ -102,7 +99,7 @@ def test_parquet_page_index_insert_into_table_function_file( ): file_name = f"export{time.time()}.parquet" query = query.format(file_name=file_name) - file_path = f"{path_to_external_dirs}{path_to_userfiles}{file_name}" + file_path = f"{cluster.instances_dir}{path_to_userfiles}{file_name}" delete_if_exists(file_path) assert node.query(query) == "" assert ( From cae76458504f46fefe52c7d93594870d0bac4479 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 14:00:50 +0200 Subject: [PATCH 1448/2361] Make 02514_null_dictionary_source parallelizable --- tests/queries/0_stateless/02514_null_dictionary_source.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/02514_null_dictionary_source.sql b/tests/queries/0_stateless/02514_null_dictionary_source.sql index 74fb57707ff..bfd36042f57 100644 --- a/tests/queries/0_stateless/02514_null_dictionary_source.sql +++ b/tests/queries/0_stateless/02514_null_dictionary_source.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel - DROP DICTIONARY IF EXISTS null_dict; CREATE DICTIONARY null_dict ( id UInt64, From 45f54c3633a388075898e3dcabc850dd1347587d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 14:01:01 +0200 Subject: [PATCH 1449/2361] Make 01086_window_view_cleanup parallelizable --- .../0_stateless/01086_window_view_cleanup.sh | 47 ++++++++++--------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/tests/queries/0_stateless/01086_window_view_cleanup.sh b/tests/queries/0_stateless/01086_window_view_cleanup.sh index 113bcffb2af..0bce08523e2 100755 --- a/tests/queries/0_stateless/01086_window_view_cleanup.sh +++ b/tests/queries/0_stateless/01086_window_view_cleanup.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash -# Tags: no-parallel # Creation of a database with Ordinary engine emits a warning. CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal @@ -12,38 +11,40 @@ opts=( "--allow_experimental_analyzer=0" ) -$CLICKHOUSE_CLIENT "${opts[@]}" --allow_deprecated_database_ordinary=1 < Date: Fri, 2 Aug 2024 14:03:27 +0200 Subject: [PATCH 1450/2361] Integration tests: fix ports clashing problem 2 --- tests/integration/conftest.py | 36 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 0a47840ede3..a386ed53009 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -2,11 +2,9 @@ import logging import os -import socket -import multiprocessing import pytest # pylint:disable=import-error; for style check -from helpers.cluster import run_and_check +from helpers.cluster import run_and_check, is_port_free from helpers.network import _NetworkManager # This is a workaround for a problem with logging in pytest [1]. @@ -114,15 +112,16 @@ def pytest_addoption(parser): ) -def get_n_free_ports(total): +def get_unique_free_ports(total): ports = [] + for port in range(30000, 55000): + if is_port_free(port) and port not in ports: + ports.append(port) - while len(ports) < total: - with socket.socket() as s: - s.bind(("", 0)) - ports.append(s.getsockname()[1]) + if len(ports) == total: + return ports - return ports + raise Exception(f"Can't collect {total} ports. Collected: {len(ports)}") def pytest_configure(config): @@ -132,9 +131,8 @@ def pytest_configure(config): # the `pytest_xdist_setupnodes` hook is not executed worker_ports = os.getenv("WORKER_FREE_PORTS", None) if worker_ports is None: - os.environ["WORKER_FREE_PORTS"] = " ".join( - ([str(p) for p in get_n_free_ports(PORTS_PER_WORKER)]) - ) + master_ports = get_unique_free_ports(PORTS_PER_WORKER) + os.environ["WORKER_FREE_PORTS"] = " ".join(([str(p) for p in master_ports])) def pytest_xdist_setupnodes(config, specs): @@ -142,18 +140,12 @@ def pytest_xdist_setupnodes(config, specs): # allocate pool of {PORTS_PER_WORKER} ports to each worker # Get number of xdist workers - num_workers = 1 - if os.environ.get("PYTEST_XDIST_WORKER", "master") == "master": - num_workers = config.getoption("numprocesses", 1) - if num_workers == "auto": - num_workers = multiprocessing.cpu_count() - + num_workers = len(specs) # Get free ports which will be distributed across workers - ports = get_n_free_ports(num_workers * PORTS_PER_WORKER) + ports = get_unique_free_ports(num_workers * PORTS_PER_WORKER) # Iterate over specs of workers and add allocated ports to env variable for i, spec in enumerate(specs): start_range = i * PORTS_PER_WORKER - spec.env["WORKER_FREE_PORTS"] = " ".join( - ([str(p) for p in ports[start_range : start_range + PORTS_PER_WORKER]]) - ) + per_workrer_ports = ports[start_range : start_range + PORTS_PER_WORKER] + spec.env["WORKER_FREE_PORTS"] = " ".join(([str(p) for p in per_workrer_ports])) From 85cecf990ddefc25ff0903b6f97f84cd46dc471a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 14:06:51 +0200 Subject: [PATCH 1451/2361] Parallelize 02703_row_policies_for_database_combination --- ...3_row_policies_for_database_combination.sh | 92 +++++++++++++++++++ ..._row_policies_for_database_combination.sql | 88 ------------------ 2 files changed, 92 insertions(+), 88 deletions(-) create mode 100755 tests/queries/0_stateless/02703_row_policies_for_database_combination.sh delete mode 100644 tests/queries/0_stateless/02703_row_policies_for_database_combination.sql diff --git a/tests/queries/0_stateless/02703_row_policies_for_database_combination.sh b/tests/queries/0_stateless/02703_row_policies_for_database_combination.sh new file mode 100755 index 00000000000..f7b7c814d29 --- /dev/null +++ b/tests/queries/0_stateless/02703_row_policies_for_database_combination.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --multiquery " + +DROP TABLE IF EXISTS 02703_rptable; +DROP TABLE IF EXISTS 02703_rptable_another; +CREATE TABLE 02703_rptable (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY x; + +INSERT INTO 02703_rptable VALUES (1, 10), (2, 20), (3, 30), (4, 40); + +CREATE TABLE 02703_rptable_another ENGINE = MergeTree ORDER BY x AS SELECT * FROM 02703_rptable; + + +DROP ROW POLICY IF EXISTS 02703_filter_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY IF EXISTS 02703_filter_2 ON ${CLICKHOUSE_DATABASE}.*; +DROP ROW POLICY IF EXISTS 02703_filter_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY IF EXISTS 02703_filter_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY IF EXISTS 02703_filter_5 ON ${CLICKHOUSE_DATABASE}.*; + +-- the test assumes users_without_row_policies_can_read_rows is true + +SELECT 'None'; +SELECT * FROM 02703_rptable; + +CREATE ROW POLICY 02703_filter_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x=1 AS permissive TO ALL; +SELECT 'R1: x == 1'; +SELECT * FROM 02703_rptable; + +CREATE ROW POLICY 02703_filter_2 ON ${CLICKHOUSE_DATABASE}.* USING x=2 AS permissive TO ALL; +SELECT 'R1, R2: (x == 1) OR (x == 2)'; +SELECT * FROM 02703_rptable; + +SELECT 'R1, R2: (x == 2) FROM ANOTHER'; +SELECT * FROM 02703_rptable_another; + +CREATE ROW POLICY 02703_filter_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x=3 AS permissive TO ALL; +SELECT 'R1, R2, R3: (x == 1) OR (x == 2) OR (x == 3)'; +SELECT * FROM 02703_rptable; + +CREATE ROW POLICY 02703_filter_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x<=2 AS restrictive TO ALL; +SELECT 'R1, R2, R3, R4: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2)'; +SELECT * FROM 02703_rptable; + +CREATE ROW POLICY 02703_filter_5 ON ${CLICKHOUSE_DATABASE}.* USING y>=20 AS restrictive TO ALL; +SELECT 'R1, R2, R3, R4, R5: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2) AND (y >= 20)'; +SELECT * FROM 02703_rptable; + +CREATE TABLE 02703_after_rp ENGINE = MergeTree ORDER BY x AS SELECT * FROM 02703_rptable; +SELECT * FROM 02703_after_rp; + +-- does not matter if policies or table are created first +SELECT 'R1, R2, R3, R4, R5: (x == 2) AND (y >= 20) FROM AFTER_RP'; +SELECT * FROM 02703_after_rp; + +SELECT 'R1, R2, R3, R4, R5: (x == 2) AND (y >= 20) FROM ANOTHER'; +SELECT * FROM 02703_rptable_another; + +DROP ROW POLICY 02703_filter_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +SELECT 'R2, R3, R4, R5: ((x == 2) OR (x == 3)) AND (x <= 2) AND (y >= 20)'; +SELECT * FROM 02703_rptable; + +DROP ROW POLICY 02703_filter_2 ON ${CLICKHOUSE_DATABASE}.*; +SELECT 'R3, R4, R5: (x == 3) AND (x <= 2) AND (y >= 20)'; +SELECT * FROM 02703_rptable; + +DROP ROW POLICY 02703_filter_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +SELECT 'R4, R5: (x <= 2) AND (y >= 20)'; +SELECT * FROM 02703_rptable; + +DROP ROW POLICY 02703_filter_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +SELECT 'R5: (x >= 2)'; +SELECT * FROM 02703_rptable; + +CREATE TABLE 02703_unexpected_columns (xx UInt8, yy UInt8) ENGINE = MergeTree ORDER BY xx; +SELECT 'Policy not applicable'; +SELECT * FROM 02703_unexpected_columns; -- { serverError UNKNOWN_IDENTIFIER } -- Missing columns: 'x' while processing query + +DROP ROW POLICY 02703_filter_5 ON ${CLICKHOUSE_DATABASE}.*; +SELECT 'None'; +SELECT * FROM 02703_rptable; + +SELECT 'No problematic policy, select works'; +SELECT 'Ok' FROM ${CLICKHOUSE_DATABASE}.02703_unexpected_columns; + +DROP TABLE 02703_rptable; +DROP TABLE 02703_rptable_another; +DROP TABLE 02703_unexpected_columns; + +" diff --git a/tests/queries/0_stateless/02703_row_policies_for_database_combination.sql b/tests/queries/0_stateless/02703_row_policies_for_database_combination.sql deleted file mode 100644 index 8c93fc595ba..00000000000 --- a/tests/queries/0_stateless/02703_row_policies_for_database_combination.sql +++ /dev/null @@ -1,88 +0,0 @@ --- Tags: no-parallel - -DROP DATABASE IF EXISTS 02703_db; -CREATE DATABASE 02703_db; -DROP TABLE IF EXISTS 02703_db.02703_rptable; -DROP TABLE IF EXISTS 02703_db.02703_rptable_another; -CREATE TABLE 02703_db.02703_rptable (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY x; - -INSERT INTO 02703_db.02703_rptable VALUES (1, 10), (2, 20), (3, 30), (4, 40); - -CREATE TABLE 02703_db.02703_rptable_another ENGINE = MergeTree ORDER BY x AS SELECT * FROM 02703_db.02703_rptable; - - -DROP ROW POLICY IF EXISTS 02703_filter_1 ON 02703_db.02703_rptable; -DROP ROW POLICY IF EXISTS 02703_filter_2 ON 02703_db.*; -DROP ROW POLICY IF EXISTS 02703_filter_3 ON 02703_db.02703_rptable; -DROP ROW POLICY IF EXISTS 02703_filter_4 ON 02703_db.02703_rptable; -DROP ROW POLICY IF EXISTS 02703_filter_5 ON 02703_db.*; - --- the test assumes users_without_row_policies_can_read_rows is true - -SELECT 'None'; -SELECT * FROM 02703_db.02703_rptable; - -CREATE ROW POLICY 02703_filter_1 ON 02703_db.02703_rptable USING x=1 AS permissive TO ALL; -SELECT 'R1: x == 1'; -SELECT * FROM 02703_db.02703_rptable; - -CREATE ROW POLICY 02703_filter_2 ON 02703_db.* USING x=2 AS permissive TO ALL; -SELECT 'R1, R2: (x == 1) OR (x == 2)'; -SELECT * FROM 02703_db.02703_rptable; - -SELECT 'R1, R2: (x == 2) FROM ANOTHER'; -SELECT * FROM 02703_db.02703_rptable_another; - -CREATE ROW POLICY 02703_filter_3 ON 02703_db.02703_rptable USING x=3 AS permissive TO ALL; -SELECT 'R1, R2, R3: (x == 1) OR (x == 2) OR (x == 3)'; -SELECT * FROM 02703_db.02703_rptable; - -CREATE ROW POLICY 02703_filter_4 ON 02703_db.02703_rptable USING x<=2 AS restrictive TO ALL; -SELECT 'R1, R2, R3, R4: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2)'; -SELECT * FROM 02703_db.02703_rptable; - -CREATE ROW POLICY 02703_filter_5 ON 02703_db.* USING y>=20 AS restrictive TO ALL; -SELECT 'R1, R2, R3, R4, R5: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2) AND (y >= 20)'; -SELECT * FROM 02703_db.02703_rptable; - -CREATE TABLE 02703_db.02703_after_rp ENGINE = MergeTree ORDER BY x AS SELECT * FROM 02703_db.02703_rptable; -SELECT * FROM 02703_db.02703_after_rp; - --- does not matter if policies or table are created first -SELECT 'R1, R2, R3, R4, R5: (x == 2) AND (y >= 20) FROM AFTER_RP'; -SELECT * FROM 02703_db.02703_after_rp; - -SELECT 'R1, R2, R3, R4, R5: (x == 2) AND (y >= 20) FROM ANOTHER'; -SELECT * FROM 02703_db.02703_rptable_another; - -DROP ROW POLICY 02703_filter_1 ON 02703_db.02703_rptable; -SELECT 'R2, R3, R4, R5: ((x == 2) OR (x == 3)) AND (x <= 2) AND (y >= 20)'; -SELECT * FROM 02703_db.02703_rptable; - -DROP ROW POLICY 02703_filter_2 ON 02703_db.*; -SELECT 'R3, R4, R5: (x == 3) AND (x <= 2) AND (y >= 20)'; -SELECT * FROM 02703_db.02703_rptable; - -DROP ROW POLICY 02703_filter_3 ON 02703_db.02703_rptable; -SELECT 'R4, R5: (x <= 2) AND (y >= 20)'; -SELECT * FROM 02703_db.02703_rptable; - -DROP ROW POLICY 02703_filter_4 ON 02703_db.02703_rptable; -SELECT 'R5: (x >= 2)'; -SELECT * FROM 02703_db.02703_rptable; - -CREATE TABLE 02703_db.02703_unexpected_columns (xx UInt8, yy UInt8) ENGINE = MergeTree ORDER BY xx; -SELECT 'Policy not applicable'; -SELECT * FROM 02703_db.02703_unexpected_columns; -- { serverError UNKNOWN_IDENTIFIER } -- Missing columns: 'x' while processing query - -DROP ROW POLICY 02703_filter_5 ON 02703_db.*; -SELECT 'None'; -SELECT * FROM 02703_db.02703_rptable; - -SELECT 'No problematic policy, select works'; -SELECT 'Ok' FROM 02703_db.02703_unexpected_columns; - -DROP TABLE 02703_db.02703_rptable; -DROP TABLE 02703_db.02703_rptable_another; -DROP TABLE 02703_db.02703_unexpected_columns; -DROP DATABASE 02703_db; From f55784c636c1dcb503dadb2f75bd6b586271bf0d Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 2 Aug 2024 12:07:05 +0000 Subject: [PATCH 1452/2361] Fix 03203_client_benchmark_options --- tests/queries/0_stateless/03203_client_benchmark_options.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03203_client_benchmark_options.sh b/tests/queries/0_stateless/03203_client_benchmark_options.sh index 37a1f2cd3ac..967db056c0b 100755 --- a/tests/queries/0_stateless/03203_client_benchmark_options.sh +++ b/tests/queries/0_stateless/03203_client_benchmark_options.sh @@ -5,10 +5,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh output=$(${CLICKHOUSE_CLIENT} -t -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1) -{ echo "$output" | grep -q "^2\." && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } +{ number=$(echo "$output" | grep -o "^[0-9]"); [[ -n "$number" && "$number" -ge 2 ]] && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } output=$(${CLICKHOUSE_CLIENT} --time -q "SELECT sleepEachRow(2) FORMAT Null" 2>&1) -{ echo "$output" | grep -q "^2\." && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } +{ number=$(echo "$output" | grep -o "^[0-9]"); [[ -n "$number" && "$number" -ge 2 ]] && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } output=$(${CLICKHOUSE_CLIENT} --memory-usage -q "SELECT sum(number) FROM numbers(10_000) FORMAT Null" 2>&1) { echo "$output" | grep -q "^[0-9]\+$" && echo "Ok"; } || { echo "Fail"; echo "'$output'"; } From cb83274d37f8e90ef9c79451ce01a2ba95363158 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 14:18:31 +0200 Subject: [PATCH 1453/2361] Make 02703_row_policy_for_database parallelizable --- .../02703_row_policy_for_database.reference | 22 +++---- .../02703_row_policy_for_database.sh | 59 +++++++++++++++++++ .../02703_row_policy_for_database.sql | 53 ----------------- 3 files changed, 70 insertions(+), 64 deletions(-) create mode 100755 tests/queries/0_stateless/02703_row_policy_for_database.sh delete mode 100644 tests/queries/0_stateless/02703_row_policy_for_database.sql diff --git a/tests/queries/0_stateless/02703_row_policy_for_database.reference b/tests/queries/0_stateless/02703_row_policy_for_database.reference index b67ea69ae72..56b0d8c6f20 100644 --- a/tests/queries/0_stateless/02703_row_policy_for_database.reference +++ b/tests/queries/0_stateless/02703_row_policy_for_database.reference @@ -1,20 +1,20 @@ -- row policies for database - -- SHOW CREATE POLICY db1_02703 ON db1_02703.* -CREATE ROW POLICY db1_02703 ON db1_02703.* FOR SELECT USING 1 TO ALL - -- SHOW CREATE POLICY ON db1_02703.* -CREATE ROW POLICY db1_02703 ON db1_02703.* FOR SELECT USING 1 TO ALL -CREATE ROW POLICY tbl1_02703 ON db1_02703.`table` FOR SELECT USING 1 TO ALL - -- SHOW CREATE POLICY ON db1_02703.`*` + -- SHOW CREATE POLICY default ON default.* +CREATE ROW POLICY default_db_policy ON default.* FOR SELECT USING 1 TO ALL + -- SHOW CREATE POLICY ON default.* +CREATE ROW POLICY default_db_policy ON default.* FOR SELECT USING 1 TO ALL +CREATE ROW POLICY default_tb_policy ON default.`table` FOR SELECT USING 1 TO ALL + -- SHOW CREATE POLICY ON default.`*` R1, R2: (x == 1) OR (x == 2) 1 2 Check system.query_log SELECT \'-- row policies for database\'; [] -SELECT \' -- SHOW CREATE POLICY db1_02703 ON db1_02703.*\'; [] -SELECT \' -- SHOW CREATE POLICY ON db1_02703.*\'; [] -SELECT \' -- SHOW CREATE POLICY ON db1_02703.`*`\'; [] +SELECT \' -- SHOW CREATE POLICY default ON default.*\'; [] +SELECT \' -- SHOW CREATE POLICY ON default.*\'; [] +SELECT \' -- SHOW CREATE POLICY ON default.`*`\'; [] SELECT \'R1, R2: (x == 1) OR (x == 2)\'; [] -SELECT * FROM 02703_rqtable_default; ['`02703_filter_11_db` ON default.*','`02703_filter_11` ON default.`02703_rqtable_default`'] +SELECT * FROM 02703_rqtable_default; ['default_filter_11_db_policy ON default.*','default_filter_11_policy ON default.`02703_rqtable_default`'] SELECT \'Check system.query_log\'; [] -- CREATE DATABASE-LEVEL POLICY IN CURRENT DATABASE -CREATE ROW POLICY db2_02703 ON db1_02703.* TO u1_02703 +CREATE ROW POLICY db2_02703 ON default.* TO user_default diff --git a/tests/queries/0_stateless/02703_row_policy_for_database.sh b/tests/queries/0_stateless/02703_row_policy_for_database.sh new file mode 100755 index 00000000000..e94bc7acd5e --- /dev/null +++ b/tests/queries/0_stateless/02703_row_policy_for_database.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CLICKHOUSE_USER="user_$CLICKHOUSE_DATABASE" + +$CLICKHOUSE_CLIENT --multiquery " + +DROP USER IF EXISTS ${CLICKHOUSE_USER}; +CREATE USER ${CLICKHOUSE_USER}; + +CREATE TABLE ${CLICKHOUSE_DATABASE}.02703_rqtable (x UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO ${CLICKHOUSE_DATABASE}.02703_rqtable VALUES (1), (2), (3), (4); + +SELECT '-- row policies for database'; +CREATE ROW POLICY ${CLICKHOUSE_DATABASE}_db_policy ON ${CLICKHOUSE_DATABASE}.* USING 1 AS PERMISSIVE TO ALL; +CREATE ROW POLICY ${CLICKHOUSE_DATABASE}_tb_policy ON ${CLICKHOUSE_DATABASE}.table USING 1 AS PERMISSIVE TO ALL; +SELECT ' -- SHOW CREATE POLICY ${CLICKHOUSE_DATABASE} ON ${CLICKHOUSE_DATABASE}.*'; +SHOW CREATE POLICY ${CLICKHOUSE_DATABASE}_db_policy ON ${CLICKHOUSE_DATABASE}.*; +SELECT ' -- SHOW CREATE POLICY ON ${CLICKHOUSE_DATABASE}.*'; +SHOW CREATE POLICY ON ${CLICKHOUSE_DATABASE}.*; +SELECT ' -- SHOW CREATE POLICY ON ${CLICKHOUSE_DATABASE}.\`*\`'; +SHOW CREATE POLICY ON ${CLICKHOUSE_DATABASE}.\`*\`; +DROP POLICY ${CLICKHOUSE_DATABASE}_db_policy ON ${CLICKHOUSE_DATABASE}.*; +DROP POLICY ${CLICKHOUSE_DATABASE}_tb_policy ON ${CLICKHOUSE_DATABASE}.table; +" + +$CLICKHOUSE_CLIENT --query "CREATE ROW POLICY any_02703 ON *.some_table USING 1 AS PERMISSIVE TO ALL;" 2>&1 | grep -q "SYNTAX_ERROR" + +$CLICKHOUSE_CLIENT --multiquery " +CREATE TABLE 02703_rqtable_default (x UInt8) ENGINE = MergeTree ORDER BY x; + +CREATE ROW POLICY ${CLICKHOUSE_DATABASE}_filter_11_db_policy ON * USING x=1 AS permissive TO ALL; +CREATE ROW POLICY ${CLICKHOUSE_DATABASE}_filter_11_policy ON 02703_rqtable_default USING x=2 AS permissive TO ALL; + +INSERT INTO 02703_rqtable_default VALUES (1), (2), (3), (4); + +SELECT 'R1, R2: (x == 1) OR (x == 2)'; +SELECT * FROM 02703_rqtable_default; + +DROP TABLE 02703_rqtable_default; + +SELECT 'Check system.query_log'; +SYSTEM FLUSH LOGS; +SELECT query, used_row_policies FROM system.query_log WHERE current_database == currentDatabase() AND type == 'QueryStart' AND query_kind == 'Select' ORDER BY event_time_microseconds; + +DROP ROW POLICY ${CLICKHOUSE_DATABASE}_filter_11_db_policy ON *; +DROP ROW POLICY ${CLICKHOUSE_DATABASE}_filter_11_policy ON 02703_rqtable_default; + +USE ${CLICKHOUSE_DATABASE}; +SELECT ' -- CREATE DATABASE-LEVEL POLICY IN CURRENT DATABASE'; +CREATE ROW POLICY db2_02703 ON * TO ${CLICKHOUSE_USER}; +SHOW CREATE POLICY db2_02703 ON *; + +DROP ROW POLICY db2_02703 ON *; + +DROP USER ${CLICKHOUSE_USER}; +" diff --git a/tests/queries/0_stateless/02703_row_policy_for_database.sql b/tests/queries/0_stateless/02703_row_policy_for_database.sql deleted file mode 100644 index 51ce5f4f870..00000000000 --- a/tests/queries/0_stateless/02703_row_policy_for_database.sql +++ /dev/null @@ -1,53 +0,0 @@ --- Tags: no-parallel - -DROP DATABASE IF EXISTS db1_02703; -DROP USER IF EXISTS u1_02703; -CREATE USER u1_02703; - -CREATE DATABASE db1_02703; - -CREATE TABLE db1_02703.02703_rqtable (x UInt8) ENGINE = MergeTree ORDER BY x; -INSERT INTO db1_02703.02703_rqtable VALUES (1), (2), (3), (4); - - -SELECT '-- row policies for database'; -CREATE ROW POLICY db1_02703 ON db1_02703.* USING 1 AS PERMISSIVE TO ALL; -CREATE ROW POLICY tbl1_02703 ON db1_02703.table USING 1 AS PERMISSIVE TO ALL; -SELECT ' -- SHOW CREATE POLICY db1_02703 ON db1_02703.*'; -SHOW CREATE POLICY db1_02703 ON db1_02703.*; -SELECT ' -- SHOW CREATE POLICY ON db1_02703.*'; -SHOW CREATE POLICY ON db1_02703.*; -SELECT ' -- SHOW CREATE POLICY ON db1_02703.`*`'; -SHOW CREATE POLICY ON db1_02703.`*`; -DROP POLICY db1_02703 ON db1_02703.*; -DROP POLICY tbl1_02703 ON db1_02703.table; - -CREATE ROW POLICY any_02703 ON *.some_table USING 1 AS PERMISSIVE TO ALL; -- { clientError SYNTAX_ERROR } - -CREATE TABLE 02703_rqtable_default (x UInt8) ENGINE = MergeTree ORDER BY x; - -CREATE ROW POLICY 02703_filter_11_db ON * USING x=1 AS permissive TO ALL; -CREATE ROW POLICY 02703_filter_11 ON 02703_rqtable_default USING x=2 AS permissive TO ALL; - -INSERT INTO 02703_rqtable_default VALUES (1), (2), (3), (4); - -SELECT 'R1, R2: (x == 1) OR (x == 2)'; -SELECT * FROM 02703_rqtable_default; - -DROP TABLE 02703_rqtable_default; - -SELECT 'Check system.query_log'; -SYSTEM FLUSH LOGS; -SELECT query, used_row_policies FROM system.query_log WHERE current_database == currentDatabase() AND type == 'QueryStart' AND query_kind == 'Select' ORDER BY event_time_microseconds; - -DROP ROW POLICY 02703_filter_11_db ON *; -DROP ROW POLICY 02703_filter_11 ON 02703_rqtable_default; - -USE db1_02703; -SELECT ' -- CREATE DATABASE-LEVEL POLICY IN CURRENT DATABASE'; -CREATE ROW POLICY db2_02703 ON * TO u1_02703; -SHOW CREATE POLICY db2_02703 ON *; - -DROP ROW POLICY db2_02703 ON *; - -DROP USER u1_02703; From 829d07c3a54716bb62cd36d4f2fa24da64ea6a0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 14:22:37 +0200 Subject: [PATCH 1454/2361] Make 02703_row_policies_for_asterisk parallelizable --- .../02703_row_policies_for_asterisk.sh | 14 +++++++++ .../02703_row_policies_for_asterisk.sql | 11 ------- ...3_row_policies_for_database_combination.sh | 30 +++++++++---------- 3 files changed, 29 insertions(+), 26 deletions(-) create mode 100755 tests/queries/0_stateless/02703_row_policies_for_asterisk.sh delete mode 100644 tests/queries/0_stateless/02703_row_policies_for_asterisk.sql diff --git a/tests/queries/0_stateless/02703_row_policies_for_asterisk.sh b/tests/queries/0_stateless/02703_row_policies_for_asterisk.sh new file mode 100755 index 00000000000..f9670e5f6f8 --- /dev/null +++ b/tests/queries/0_stateless/02703_row_policies_for_asterisk.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --multiquery " + SELECT 'Policy for table \`*\` does not affect other tables in the database'; + CREATE ROW POLICY 02703_asterisk_${CLICKHOUSE_DATABASE}_policy ON ${CLICKHOUSE_DATABASE}.\`*\` USING x=1 AS permissive TO ALL; + CREATE TABLE ${CLICKHOUSE_DATABASE}.\`*\` (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY x AS SELECT 100, 20; + CREATE TABLE ${CLICKHOUSE_DATABASE}.\`other\` (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY x AS SELECT 100, 20; + SELECT 'star', * FROM ${CLICKHOUSE_DATABASE}.\`*\`; + SELECT 'other', * FROM ${CLICKHOUSE_DATABASE}.other; + DROP ROW POLICY 02703_asterisk_${CLICKHOUSE_DATABASE}_policy ON ${CLICKHOUSE_DATABASE}.\`*\`; +" diff --git a/tests/queries/0_stateless/02703_row_policies_for_asterisk.sql b/tests/queries/0_stateless/02703_row_policies_for_asterisk.sql deleted file mode 100644 index 96b1c01a6d6..00000000000 --- a/tests/queries/0_stateless/02703_row_policies_for_asterisk.sql +++ /dev/null @@ -1,11 +0,0 @@ --- Tags: no-parallel - -SELECT 'Policy for table `*` does not affect other tables in the database'; -CREATE DATABASE 02703_db_asterisk; -CREATE ROW POLICY 02703_asterisk ON 02703_db_asterisk.`*` USING x=1 AS permissive TO ALL; -CREATE TABLE 02703_db_asterisk.`*` (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY x AS SELECT 100, 20; -CREATE TABLE 02703_db_asterisk.`other` (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY x AS SELECT 100, 20; -SELECT 'star', * FROM 02703_db_asterisk.`*`; -SELECT 'other', * FROM 02703_db_asterisk.other; -DROP ROW POLICY 02703_asterisk ON 02703_db_asterisk.`*`; -DROP DATABASE 02703_db_asterisk; diff --git a/tests/queries/0_stateless/02703_row_policies_for_database_combination.sh b/tests/queries/0_stateless/02703_row_policies_for_database_combination.sh index f7b7c814d29..35151eed220 100755 --- a/tests/queries/0_stateless/02703_row_policies_for_database_combination.sh +++ b/tests/queries/0_stateless/02703_row_policies_for_database_combination.sh @@ -14,37 +14,37 @@ INSERT INTO 02703_rptable VALUES (1, 10), (2, 20), (3, 30), (4, 40); CREATE TABLE 02703_rptable_another ENGINE = MergeTree ORDER BY x AS SELECT * FROM 02703_rptable; -DROP ROW POLICY IF EXISTS 02703_filter_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable; -DROP ROW POLICY IF EXISTS 02703_filter_2 ON ${CLICKHOUSE_DATABASE}.*; -DROP ROW POLICY IF EXISTS 02703_filter_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable; -DROP ROW POLICY IF EXISTS 02703_filter_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable; -DROP ROW POLICY IF EXISTS 02703_filter_5 ON ${CLICKHOUSE_DATABASE}.*; +DROP ROW POLICY IF EXISTS 02703_filter_policy_${CLICKHOUSE_DATABASE}_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY IF EXISTS 02703_filter_policy_${CLICKHOUSE_DATABASE}_2 ON ${CLICKHOUSE_DATABASE}.*; +DROP ROW POLICY IF EXISTS 02703_filter_policy_${CLICKHOUSE_DATABASE}_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY IF EXISTS 02703_filter_policy_${CLICKHOUSE_DATABASE}_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY IF EXISTS 02703_filter_policy_${CLICKHOUSE_DATABASE}_5 ON ${CLICKHOUSE_DATABASE}.*; -- the test assumes users_without_row_policies_can_read_rows is true SELECT 'None'; SELECT * FROM 02703_rptable; -CREATE ROW POLICY 02703_filter_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x=1 AS permissive TO ALL; +CREATE ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x=1 AS permissive TO ALL; SELECT 'R1: x == 1'; SELECT * FROM 02703_rptable; -CREATE ROW POLICY 02703_filter_2 ON ${CLICKHOUSE_DATABASE}.* USING x=2 AS permissive TO ALL; +CREATE ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_2 ON ${CLICKHOUSE_DATABASE}.* USING x=2 AS permissive TO ALL; SELECT 'R1, R2: (x == 1) OR (x == 2)'; SELECT * FROM 02703_rptable; SELECT 'R1, R2: (x == 2) FROM ANOTHER'; SELECT * FROM 02703_rptable_another; -CREATE ROW POLICY 02703_filter_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x=3 AS permissive TO ALL; +CREATE ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x=3 AS permissive TO ALL; SELECT 'R1, R2, R3: (x == 1) OR (x == 2) OR (x == 3)'; SELECT * FROM 02703_rptable; -CREATE ROW POLICY 02703_filter_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x<=2 AS restrictive TO ALL; +CREATE ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable USING x<=2 AS restrictive TO ALL; SELECT 'R1, R2, R3, R4: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2)'; SELECT * FROM 02703_rptable; -CREATE ROW POLICY 02703_filter_5 ON ${CLICKHOUSE_DATABASE}.* USING y>=20 AS restrictive TO ALL; +CREATE ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_5 ON ${CLICKHOUSE_DATABASE}.* USING y>=20 AS restrictive TO ALL; SELECT 'R1, R2, R3, R4, R5: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2) AND (y >= 20)'; SELECT * FROM 02703_rptable; @@ -58,19 +58,19 @@ SELECT * FROM 02703_after_rp; SELECT 'R1, R2, R3, R4, R5: (x == 2) AND (y >= 20) FROM ANOTHER'; SELECT * FROM 02703_rptable_another; -DROP ROW POLICY 02703_filter_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_1 ON ${CLICKHOUSE_DATABASE}.02703_rptable; SELECT 'R2, R3, R4, R5: ((x == 2) OR (x == 3)) AND (x <= 2) AND (y >= 20)'; SELECT * FROM 02703_rptable; -DROP ROW POLICY 02703_filter_2 ON ${CLICKHOUSE_DATABASE}.*; +DROP ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_2 ON ${CLICKHOUSE_DATABASE}.*; SELECT 'R3, R4, R5: (x == 3) AND (x <= 2) AND (y >= 20)'; SELECT * FROM 02703_rptable; -DROP ROW POLICY 02703_filter_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_3 ON ${CLICKHOUSE_DATABASE}.02703_rptable; SELECT 'R4, R5: (x <= 2) AND (y >= 20)'; SELECT * FROM 02703_rptable; -DROP ROW POLICY 02703_filter_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable; +DROP ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_4 ON ${CLICKHOUSE_DATABASE}.02703_rptable; SELECT 'R5: (x >= 2)'; SELECT * FROM 02703_rptable; @@ -78,7 +78,7 @@ CREATE TABLE 02703_unexpected_columns (xx UInt8, yy UInt8) ENGINE = MergeTree OR SELECT 'Policy not applicable'; SELECT * FROM 02703_unexpected_columns; -- { serverError UNKNOWN_IDENTIFIER } -- Missing columns: 'x' while processing query -DROP ROW POLICY 02703_filter_5 ON ${CLICKHOUSE_DATABASE}.*; +DROP ROW POLICY 02703_filter_policy_${CLICKHOUSE_DATABASE}_5 ON ${CLICKHOUSE_DATABASE}.*; SELECT 'None'; SELECT * FROM 02703_rptable; From 5256e8e6d08b5076be49b56fdebadb668892771c Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 2 Aug 2024 14:36:33 +0200 Subject: [PATCH 1455/2361] Integration tests: fix ports clashing problem 3 --- tests/integration/conftest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index a386ed53009..b4c86a1cd2f 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -132,7 +132,7 @@ def pytest_configure(config): worker_ports = os.getenv("WORKER_FREE_PORTS", None) if worker_ports is None: master_ports = get_unique_free_ports(PORTS_PER_WORKER) - os.environ["WORKER_FREE_PORTS"] = " ".join(([str(p) for p in master_ports])) + os.environ["WORKER_FREE_PORTS"] = " ".join([str(p) for p in master_ports]) def pytest_xdist_setupnodes(config, specs): @@ -148,4 +148,4 @@ def pytest_xdist_setupnodes(config, specs): for i, spec in enumerate(specs): start_range = i * PORTS_PER_WORKER per_workrer_ports = ports[start_range : start_range + PORTS_PER_WORKER] - spec.env["WORKER_FREE_PORTS"] = " ".join(([str(p) for p in per_workrer_ports])) + spec.env["WORKER_FREE_PORTS"] = " ".join([str(p) for p in per_workrer_ports]) From db0bce33526abf16e705b9e56d178d6e2c45a36b Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 2 Aug 2024 15:01:15 +0200 Subject: [PATCH 1456/2361] Try make the code more understandable --- src/Interpreters/Cache/FileCache.cpp | 164 ++++++++++++------------- src/Interpreters/Cache/FileCache.h | 12 +- tests/config/config.d/storage_conf.xml | 2 +- 3 files changed, 80 insertions(+), 98 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 0a03f5dcc7d..4c17afb79be 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -318,7 +318,29 @@ FileSegments FileCache::getImpl(const LockedKey & locked_key, const FileSegment: std::vector FileCache::splitRange(size_t offset, size_t size, size_t aligned_size) { - assert(size > 0); + chassert(size > 0); + chassert(size <= aligned_size); + + /// Consider this example to understand why we need to account here for both `size` and `aligned_size`. + /// [________________]__________________] <-- requested range + /// ^ ^ + /// right offset aligned_right_offset + /// [_________] <-- last cached file segment, e.g. we have uncovered suffix of the requested range + /// [________________] + /// size + /// [____________________________________] + /// aligned_size + /// + /// So it is possible that we split this hole range into sub-segments by `max_file_segment_size` + /// and get something like this: + /// + /// [________________________] + /// ^ ^ + /// right_offset right_offset + max_file_segment_size + /// e.g. there is no need to create sub-segment for range (right_offset + max_file_segment_size, aligned_right_offset]. + /// Because its left offset would be bigger than right_offset. + /// Therefore, we set end_pos_non_included as offset+size, but remaining_size as aligned_size. + std::vector ranges; size_t current_pos = offset; @@ -339,42 +361,23 @@ std::vector FileCache::splitRange(size_t offset, size_t size return ranges; } -FileSegments FileCache::splitRangeIntoFileSegments( +FileSegments FileCache::createFileSegmentsFromRanges( LockedKey & locked_key, - size_t offset, - size_t size, - size_t aligned_size, - FileSegment::State state, + const std::vector & ranges, + size_t & file_segments_count, size_t file_segments_limit, const CreateFileSegmentSettings & create_settings) { - chassert(size > 0); - chassert(size <= aligned_size); - /// We take `size` as a soft limit and `aligned_size` as a hard limit. - - auto current_pos = offset; - auto end_pos_non_included = offset + size; - - size_t current_file_segment_size; - size_t remaining_size = aligned_size; - - FileSegments file_segments; - const size_t max_size = max_file_segment_size.load(); - while (current_pos < end_pos_non_included && (!file_segments_limit || file_segments.size() < file_segments_limit)) + FileSegments result; + for (const auto & r : ranges) { - current_file_segment_size = std::min(remaining_size, max_size); - remaining_size -= current_file_segment_size; - - auto file_segment_metadata_it = addFileSegment( - locked_key, current_pos, current_file_segment_size, state, create_settings, nullptr); - file_segments.push_back(file_segment_metadata_it->second->file_segment); - - current_pos += current_file_segment_size; + if (file_segments_limit && file_segments_count >= file_segments_limit) + break; + auto metadata_it = addFileSegment(locked_key, r.left, r.size(), FileSegment::State::EMPTY, create_settings, nullptr); + result.push_back(metadata_it->second->file_segment); + ++file_segments_count; } - - chassert(file_segments.size() == file_segments_limit || file_segments.back()->range().contains(offset + size - 1), - fmt::format("Offset: {}, size: {}, file segments: {}", offset, size, toString(file_segments))); - return file_segments; + return result; } void FileCache::fillHolesWithEmptyFileSegments( @@ -448,18 +451,9 @@ void FileCache::fillHolesWithEmptyFileSegments( } else { - auto ranges = splitRange(current_pos, hole_size, hole_size); - FileSegments hole; - for (const auto & r : ranges) - { - auto metadata_it = addFileSegment(locked_key, r.left, r.size(), FileSegment::State::EMPTY, create_settings, nullptr); - hole.push_back(metadata_it->second->file_segment); - ++processed_count; - - if (is_limit_reached()) - break; - } - file_segments.splice(it, std::move(hole)); + const auto ranges = splitRange(current_pos, hole_size, hole_size); + auto hole_segments = createFileSegmentsFromRanges(locked_key, ranges, processed_count, file_segments_limit, create_settings); + file_segments.splice(it, std::move(hole_segments)); } if (is_limit_reached()) @@ -493,29 +487,20 @@ void FileCache::fillHolesWithEmptyFileSegments( /// segmentN auto hole_size = range.right - current_pos + 1; - auto non_aligned_size = non_aligned_right_offset - current_pos + 1; + auto non_aligned_hole_size = non_aligned_right_offset - current_pos + 1; if (fill_with_detached_file_segments) { auto file_segment = std::make_shared( - locked_key.getKey(), current_pos, hole_size, FileSegment::State::DETACHED, create_settings); + locked_key.getKey(), current_pos, non_aligned_hole_size, FileSegment::State::DETACHED, create_settings); file_segments.insert(file_segments.end(), file_segment); } else { - auto ranges = splitRange(current_pos, non_aligned_size, hole_size); - FileSegments hole; - for (const auto & r : ranges) - { - auto metadata_it = addFileSegment(locked_key, r.left, r.size(), FileSegment::State::EMPTY, create_settings, nullptr); - hole.push_back(metadata_it->second->file_segment); - ++processed_count; - - if (is_limit_reached()) - break; - } - file_segments.splice(it, std::move(hole)); + const auto ranges = splitRange(current_pos, non_aligned_hole_size, hole_size); + auto hole_segments = createFileSegmentsFromRanges(locked_key, ranges, processed_count, file_segments_limit, create_settings); + file_segments.splice(it, std::move(hole_segments)); if (is_limit_reached()) erase_unprocessed(); @@ -548,8 +533,9 @@ FileSegmentsHolderPtr FileCache::set( } else { - file_segments = splitRangeIntoFileSegments( - *locked_key, offset, size, size, FileSegment::State::EMPTY, /* file_segments_limit */0, create_settings); + const auto ranges = splitRange(offset, size, size); + size_t file_segments_count = 0; + file_segments = createFileSegmentsFromRanges(*locked_key, ranges, file_segments_count, /* file_segments_limit */0, create_settings); } return std::make_unique(std::move(file_segments)); @@ -569,23 +555,27 @@ FileCache::getOrSet( assertInitialized(); - FileSegment::Range range(offset, offset + size - 1); + FileSegment::Range initial_range(offset, offset + size - 1); + /// result_range is initial range, which will be adjusted according to + /// 1. aligned offset, alighed_end_offset + /// 2. max_file_segments_limit + FileSegment::Range result_range = initial_range; - const auto aligned_offset = roundDownToMultiple(range.left, boundary_alignment); - auto aligned_end_offset = std::min(roundUpToMultiple(offset + size, boundary_alignment), file_size) - 1; + const auto aligned_offset = roundDownToMultiple(initial_range.left, boundary_alignment); + auto aligned_end_offset = std::min(roundUpToMultiple(initial_range.right + 1, boundary_alignment), file_size) - 1; - chassert(aligned_offset <= range.left); - chassert(aligned_end_offset >= range.right); + chassert(aligned_offset <= initial_range.left); + chassert(aligned_end_offset >= initial_range.right); auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::CREATE_EMPTY, user); /// Get all segments which intersect with the given range. - auto file_segments = getImpl(*locked_key, range, file_segments_limit); + auto file_segments = getImpl(*locked_key, initial_range, file_segments_limit); if (file_segments_limit) { chassert(file_segments.size() <= file_segments_limit); if (file_segments.size() == file_segments_limit) - range.right = aligned_end_offset = file_segments.back()->range().right; + result_range.right = aligned_end_offset = file_segments.back()->range().right; } /// Check case if we have uncovered prefix, e.g. @@ -597,11 +587,11 @@ FileCache::getOrSet( /// [ ] /// ^----^ /// uncovered prefix. - const bool has_uncovered_prefix = file_segments.empty() || range.left < file_segments.front()->range().left; + const bool has_uncovered_prefix = file_segments.empty() || result_range.left < file_segments.front()->range().left; - if (aligned_offset < range.left && has_uncovered_prefix) + if (aligned_offset < result_range.left && has_uncovered_prefix) { - auto prefix_range = FileSegment::Range(aligned_offset, file_segments.empty() ? range.left - 1 : file_segments.front()->range().left - 1); + auto prefix_range = FileSegment::Range(aligned_offset, file_segments.empty() ? result_range.left - 1 : file_segments.front()->range().left - 1); auto prefix_file_segments = getImpl(*locked_key, prefix_range, /* file_segments_limit */0); if (prefix_file_segments.empty()) @@ -610,7 +600,7 @@ FileCache::getOrSet( /// ^ ^ ^ /// aligned_offset range.left range.right /// [___] [__________] <-- current cache (example) - range.left = aligned_offset; + result_range.left = aligned_offset; } else { @@ -621,10 +611,10 @@ FileCache::getOrSet( /// ^ /// prefix_file_segments.back().right - chassert(prefix_file_segments.back()->range().right < range.left); + chassert(prefix_file_segments.back()->range().right < result_range.left); chassert(prefix_file_segments.back()->range().right >= aligned_offset); - range.left = prefix_file_segments.back()->range().right + 1; + result_range.left = prefix_file_segments.back()->range().right + 1; } } @@ -637,11 +627,11 @@ FileCache::getOrSet( /// [___] /// ^---^ /// uncovered_suffix - const bool has_uncovered_suffix = file_segments.empty() || file_segments.back()->range().right < range.right; + const bool has_uncovered_suffix = file_segments.empty() || file_segments.back()->range().right < result_range.right; - if (range.right < aligned_end_offset && has_uncovered_suffix) + if (result_range.right < aligned_end_offset && has_uncovered_suffix) { - auto suffix_range = FileSegment::Range(range.right, aligned_end_offset); + auto suffix_range = FileSegment::Range(result_range.right, aligned_end_offset); /// We need to get 1 file segment, so file_segments_limit = 1 here. auto suffix_file_segments = getImpl(*locked_key, suffix_range, /* file_segments_limit */1); @@ -652,7 +642,7 @@ FileCache::getOrSet( /// range.left range.right aligned_end_offset /// [___] [___] <-- current cache (example) - range.right = aligned_end_offset; + result_range.right = aligned_end_offset; } else { @@ -662,35 +652,33 @@ FileCache::getOrSet( /// [___] [___] [_________] <-- current cache (example) /// ^ /// suffix_file_segments.front().left - range.right = suffix_file_segments.front()->range().left - 1; + result_range.right = suffix_file_segments.front()->range().left - 1; } } - chassert(range.left >= aligned_offset); - if (file_segments.empty()) { - file_segments = splitRangeIntoFileSegments( - *locked_key, range.left, /* size */offset + size - range.left, /* aligned_size */range.size(), - FileSegment::State::EMPTY, file_segments_limit, create_settings); + auto ranges = splitRange(result_range.left, initial_range.size() + (initial_range.left - result_range.left), result_range.size()); + size_t file_segments_count = file_segments.size(); + file_segments.splice(file_segments.end(), createFileSegmentsFromRanges(*locked_key, ranges, file_segments_count, file_segments_limit, create_settings)); } else { - chassert(file_segments.front()->range().right >= range.left); - chassert(file_segments.back()->range().left <= range.right); + chassert(file_segments.front()->range().right >= result_range.left); + chassert(file_segments.back()->range().left <= result_range.right); fillHolesWithEmptyFileSegments( - *locked_key, file_segments, range, offset + size - 1, file_segments_limit, /* fill_with_detached */false, create_settings); + *locked_key, file_segments, result_range, offset + size - 1, file_segments_limit, /* fill_with_detached */false, create_settings); - if (!file_segments.front()->range().contains(range.left)) + if (!file_segments.front()->range().contains(result_range.left)) { throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected {} to include {} " "(end offset: {}, aligned offset: {}, aligned end offset: {})", - file_segments.front()->range().toString(), offset, range.right, aligned_offset, aligned_end_offset); + file_segments.front()->range().toString(), offset, result_range.right, aligned_offset, aligned_end_offset); } } - chassert(file_segments_limit ? file_segments.back()->range().left <= range.right : file_segments.back()->range().contains(range.right)); + chassert(file_segments_limit ? file_segments.back()->range().left <= result_range.right : file_segments.back()->range().contains(result_range.right)); chassert(!file_segments_limit || file_segments.size() <= file_segments_limit); return std::make_unique(std::move(file_segments)); diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 3f7eec73b56..07be802a940 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -265,16 +265,10 @@ private: /// each subrange size must be less or equal to max_file_segment_size. std::vector splitRange(size_t offset, size_t size, size_t aligned_size); - /// Split range into subranges by max_file_segment_size (same as in splitRange()) - /// and create a new file segment for each subrange. - /// If `file_segments_limit` > 0, create no more than first file_segments_limit - /// file segments. - FileSegments splitRangeIntoFileSegments( + FileSegments createFileSegmentsFromRanges( LockedKey & locked_key, - size_t offset, - size_t size, - size_t aligned_size, - FileSegment::State state, + const std::vector & ranges, + size_t & file_segments_count, size_t file_segments_limit, const CreateFileSegmentSettings & create_settings); diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index 4daa64b520d..e106e3a0e6b 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -19,7 +19,7 @@ cache s3_disk s3_cache/ - 100Mi + 104857600 5Mi 1 100 From a94df1fb9af032c8ccccb704186513561e28e11c Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 31 Jul 2024 23:50:48 +0200 Subject: [PATCH 1457/2361] enable parallel_view_processing in perf tests --- tests/performance/materialized_view_parallel_insert.xml | 4 ++++ tests/performance/views_max_insert_threads.xml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/tests/performance/materialized_view_parallel_insert.xml b/tests/performance/materialized_view_parallel_insert.xml index df0a23182c3..c3e7cdc50c0 100644 --- a/tests/performance/materialized_view_parallel_insert.xml +++ b/tests/performance/materialized_view_parallel_insert.xml @@ -1,4 +1,8 @@ + + 1 + + CREATE MATERIALIZED VIEW hits_mv ENGINE MergeTree PARTITION BY toYYYYMM(EventDate) diff --git a/tests/performance/views_max_insert_threads.xml b/tests/performance/views_max_insert_threads.xml index 2988984f5d8..c16fb330b35 100644 --- a/tests/performance/views_max_insert_threads.xml +++ b/tests/performance/views_max_insert_threads.xml @@ -1,5 +1,9 @@ + + 1 + + create table views_max_insert_threads_null (a UInt64) Engine = Null create materialized view views_max_insert_threads_mv Engine = Null AS select now() as ts, max(a) from views_max_insert_threads_null group by ts From c3a1381d70e6d5da59e4b29e7a147e24e7fcd90b Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 1 Aug 2024 00:10:59 +0200 Subject: [PATCH 1458/2361] some corner case optimize --- src/Interpreters/Squashing.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/Interpreters/Squashing.cpp b/src/Interpreters/Squashing.cpp index 488177c3b4f..483112df6cb 100644 --- a/src/Interpreters/Squashing.cpp +++ b/src/Interpreters/Squashing.cpp @@ -106,6 +106,17 @@ Chunk Squashing::convertToChunk(CurrentData && data) const Chunk Squashing::squash(std::vector && input_chunks, Chunk::ChunkInfoCollection && infos) { + if (input_chunks.size() == 1) + { + /// this is just optimization, no logic changes + Chunk result = std::move(input_chunks.front()); + infos.appendIfUniq(std::move(result.getChunkInfos())); + result.setChunkInfos(infos); + + chassert(result); + return result; + } + std::vector mutable_columns = {}; size_t rows = 0; for (const Chunk & chunk : input_chunks) From 8d979680060f10e6bcec3fc83fb3bdbaa7bb3deb Mon Sep 17 00:00:00 2001 From: Lennard Eijsackers Date: Fri, 2 Aug 2024 15:13:37 +0200 Subject: [PATCH 1459/2361] Use FunctionArgumentDescriptors to check bitSlice function + add test case Signed-off-by: Lennard Eijsackers --- src/Functions/bitSlice.cpp | 30 ++++++++----------- ...214_bitslice_argument_evaluation.reference | 0 .../03214_bitslice_argument_evaluation.sql | 10 +++++++ 3 files changed, 22 insertions(+), 18 deletions(-) create mode 100644 tests/queries/0_stateless/03214_bitslice_argument_evaluation.reference create mode 100644 tests/queries/0_stateless/03214_bitslice_argument_evaluation.sql diff --git a/src/Functions/bitSlice.cpp b/src/Functions/bitSlice.cpp index e2b455846d8..f1d3bb57221 100644 --- a/src/Functions/bitSlice.cpp +++ b/src/Functions/bitSlice.cpp @@ -40,28 +40,22 @@ public: bool useDefaultImplementationForConstants() const override { return true; } - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { - const size_t number_of_arguments = arguments.size(); + FunctionArgumentDescriptors mandatory_args{ + {"s", static_cast(&isStringOrFixedString), nullptr, "String"}, + {"offset", static_cast(&isNativeNumber), nullptr, "(U)Int8, (U)Int16, (U)Int32, (U)Int64 or Float"}, + }; - if (number_of_arguments < 2 || number_of_arguments > 3) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Number of arguments for function {} doesn't match: passed {}, should be 2 or 3", - getName(), number_of_arguments); + FunctionArgumentDescriptors optional_args{ + {"length", static_cast(&isNativeNumber), nullptr, "(U)Int8, (U)Int16, (U)Int32, (U)Int64 or Float"}, + }; - if (!isString(arguments[0]) && !isStringOrFixedString(arguments[0])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", - arguments[0]->getName(), getName()); - if (arguments[0]->onlyNull()) - return arguments[0]; + validateFunctionArguments(*this, arguments, mandatory_args, optional_args); - if (!isNativeNumber(arguments[1])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of second argument of function {}", - arguments[1]->getName(), getName()); - - if (number_of_arguments == 3 && !isNativeNumber(arguments[2])) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of second argument of function {}", - arguments[2]->getName(), getName()); + const auto & type = arguments[0].type; + if (type->onlyNull()) + return type; return std::make_shared(); } diff --git a/tests/queries/0_stateless/03214_bitslice_argument_evaluation.reference b/tests/queries/0_stateless/03214_bitslice_argument_evaluation.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03214_bitslice_argument_evaluation.sql b/tests/queries/0_stateless/03214_bitslice_argument_evaluation.sql new file mode 100644 index 00000000000..b8488600fcb --- /dev/null +++ b/tests/queries/0_stateless/03214_bitslice_argument_evaluation.sql @@ -0,0 +1,10 @@ +-- No arguments passed +SELECT bitSlice(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- Invalid 1st argument passed +SELECT bitSlice(1, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Valid 1st argument, invalid 2nd argument passed +SELECT bitSlice('Hello', 'World'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Valid 1st argument & 2nd argument, invalid 3rd argument passed +SELECT bitSlice('Hello', 1, 'World'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- More arguments then expected +SELECT bitSlice('Hello', 1, 1, 'World'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } From 83096249a7480b0bfa1d9246c17136727bba904c Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 2 Aug 2024 13:54:32 +0000 Subject: [PATCH 1460/2361] Update version_date.tsv and changelogs after v24.3.6.48-lts --- docs/changelogs/v24.3.6.48-lts.md | 39 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 40 insertions(+) create mode 100644 docs/changelogs/v24.3.6.48-lts.md diff --git a/docs/changelogs/v24.3.6.48-lts.md b/docs/changelogs/v24.3.6.48-lts.md new file mode 100644 index 00000000000..f045afc619b --- /dev/null +++ b/docs/changelogs/v24.3.6.48-lts.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.6.48-lts (b2d33c3c45d) FIXME as compared to v24.3.5.46-lts (fe54cead6b6) + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Backported in [#66889](https://github.com/ClickHouse/ClickHouse/issues/66889): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66687](https://github.com/ClickHouse/ClickHouse/issues/66687): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). +* Backported in [#67497](https://github.com/ClickHouse/ClickHouse/issues/67497): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#66324](https://github.com/ClickHouse/ClickHouse/issues/66324): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#66151](https://github.com/ClickHouse/ClickHouse/issues/66151): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#66451](https://github.com/ClickHouse/ClickHouse/issues/66451): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66222](https://github.com/ClickHouse/ClickHouse/issues/66222): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66676](https://github.com/ClickHouse/ClickHouse/issues/66676): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#66602](https://github.com/ClickHouse/ClickHouse/issues/66602): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)). +* Backported in [#66356](https://github.com/ClickHouse/ClickHouse/issues/66356): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66970](https://github.com/ClickHouse/ClickHouse/issues/66970): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66967](https://github.com/ClickHouse/ClickHouse/issues/66967): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66718](https://github.com/ClickHouse/ClickHouse/issues/66718): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#66949](https://github.com/ClickHouse/ClickHouse/issues/66949): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66946](https://github.com/ClickHouse/ClickHouse/issues/66946): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67629](https://github.com/ClickHouse/ClickHouse/issues/67629): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#67193](https://github.com/ClickHouse/ClickHouse/issues/67193): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#67375](https://github.com/ClickHouse/ClickHouse/issues/67375): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67572](https://github.com/ClickHouse/ClickHouse/issues/67572): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#66422](https://github.com/ClickHouse/ClickHouse/issues/66422): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66855](https://github.com/ClickHouse/ClickHouse/issues/66855): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)). +* Backported in [#67055](https://github.com/ClickHouse/ClickHouse/issues/67055): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)). +* Backported in [#66943](https://github.com/ClickHouse/ClickHouse/issues/66943): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 7b5dcda82e3..24488066190 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -10,6 +10,7 @@ v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.6.48-lts 2024-08-02 v24.3.5.46-lts 2024-07-03 v24.3.4.147-lts 2024-06-13 v24.3.3.102-lts 2024-05-01 From 9c7464e0653782af385dbc884dd3acecfc69c6cc Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 2 Aug 2024 16:04:11 +0200 Subject: [PATCH 1461/2361] Stateless tests: reduce pure_http_client timeout to get reasons of timed out tests --- tests/queries/0_stateless/helpers/pure_http_client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/helpers/pure_http_client.py b/tests/queries/0_stateless/helpers/pure_http_client.py index 0e7a4d27f4f..7a8efec36bb 100644 --- a/tests/queries/0_stateless/helpers/pure_http_client.py +++ b/tests/queries/0_stateless/helpers/pure_http_client.py @@ -18,7 +18,7 @@ class ClickHouseClient: self.host = host def query( - self, query, connection_timeout=1500, settings=dict(), binary_result=False + self, query, connection_timeout=500, settings=dict(), binary_result=False ): NUMBER_OF_TRIES = 30 DELAY = 10 @@ -47,12 +47,12 @@ class ClickHouseClient: else: raise ValueError(r.text) - def query_return_df(self, query, connection_timeout=1500): + def query_return_df(self, query, connection_timeout=500): data = self.query(query, connection_timeout) df = pd.read_csv(io.StringIO(data), sep="\t") return df - def query_with_data(self, query, data, connection_timeout=1500, settings=dict()): + def query_with_data(self, query, data, connection_timeout=500, settings=dict()): params = { "query": query, "timeout_before_checking_execution_speed": 120, From 2c9cef38e56c65ec9bbe7f3af21d4865662f6e9a Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 2 Aug 2024 16:05:39 +0200 Subject: [PATCH 1462/2361] Stateless tests: fix hanging tests `02473_multistep_prewhere*` `00411_long_accurate_number_comparison*` --- ...411_long_accurate_number_comparison.python | 19 ++++++------------- .../02473_multistep_prewhere.python | 4 ++-- .../02473_multistep_split_prewhere.python | 4 ++-- .../0_stateless/helpers/pure_http_client.py | 17 ++++++++++++++++- 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/tests/queries/0_stateless/00411_long_accurate_number_comparison.python b/tests/queries/0_stateless/00411_long_accurate_number_comparison.python index 045de9ee7ee..38b108a696f 100644 --- a/tests/queries/0_stateless/00411_long_accurate_number_comparison.python +++ b/tests/queries/0_stateless/00411_long_accurate_number_comparison.python @@ -2,23 +2,16 @@ import os, itertools, urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, sys +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) -def get_ch_answer(query): - return ( - urllib.request.urlopen( - os.environ.get( - "CLICKHOUSE_URL", - "http://localhost:" + os.environ.get("CLICKHOUSE_PORT_HTTP", "8123"), - ), - data=query.encode(), - ) - .read() - .decode() - ) +from pure_http_client import ClickHouseClient + +client = ClickHouseClient() def check_answers(query, answer): - ch_answer = get_ch_answer(query) + ch_answer = client.query(query) if ch_answer.strip() != answer.strip(): print("FAIL on query:", query) print("Expected answer:", answer) diff --git a/tests/queries/0_stateless/02473_multistep_prewhere.python b/tests/queries/0_stateless/02473_multistep_prewhere.python index 11095202039..09326b6365d 100644 --- a/tests/queries/0_stateless/02473_multistep_prewhere.python +++ b/tests/queries/0_stateless/02473_multistep_prewhere.python @@ -6,7 +6,7 @@ import sys CURDIR = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(CURDIR, "helpers")) -from pure_http_client import ClickHouseClient +from pure_http_client import ClickHouseClient, requests_session_with_retries class Tester: @@ -195,7 +195,7 @@ def main(): default_index_granularity = 10 total_rows = 7 * default_index_granularity step = default_index_granularity - session = requests.Session() + session = requests_session_with_retries() for index_granularity in [ default_index_granularity - 1, default_index_granularity, diff --git a/tests/queries/0_stateless/02473_multistep_split_prewhere.python b/tests/queries/0_stateless/02473_multistep_split_prewhere.python index 19444994fd2..10e94059171 100644 --- a/tests/queries/0_stateless/02473_multistep_split_prewhere.python +++ b/tests/queries/0_stateless/02473_multistep_split_prewhere.python @@ -6,7 +6,7 @@ import sys CURDIR = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(CURDIR, "helpers")) -from pure_http_client import ClickHouseClient +from pure_http_client import ClickHouseClient, requests_session_with_retries class Tester: @@ -161,7 +161,7 @@ def main(): default_index_granularity = 10 total_rows = 8 * default_index_granularity step = default_index_granularity - session = requests.Session() + session = requests_session_with_retries() for index_granularity in [default_index_granularity - 1, default_index_granularity]: tester = Tester(session, url, index_granularity, total_rows) # Test combinations of ranges of columns c and d diff --git a/tests/queries/0_stateless/helpers/pure_http_client.py b/tests/queries/0_stateless/helpers/pure_http_client.py index 7a8efec36bb..a31a91e0550 100644 --- a/tests/queries/0_stateless/helpers/pure_http_client.py +++ b/tests/queries/0_stateless/helpers/pure_http_client.py @@ -1,7 +1,8 @@ import os import io -import sys import requests +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry import time import pandas as pd @@ -77,3 +78,17 @@ class ClickHouseClient: return result else: raise ValueError(r.text) + + +def requests_session_with_retries(retries=3, timeout=180): + session = requests.Session() + retry = Retry( + total=retries, + read=retries, + connect=retries, + ) + adapter = HTTPAdapter(max_retries=retry) + session.mount("http://", adapter) + session.mount("https://", adapter) + session.timeout = timeout + return session From aefed7cdd62e874f7507afe69d803c9164a283ea Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 2 Aug 2024 16:06:53 +0200 Subject: [PATCH 1463/2361] Update tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql --- .../0_stateless/03164_s3_settings_for_queries_and_merges.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql index 001ef382850..a6932e0536c 100644 --- a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql +++ b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql @@ -21,6 +21,7 @@ SYSTEM DROP MARK CACHE; SELECT count() FROM t_compact_bytes_s3 WHERE NOT ignore(c2, c4); SYSTEM FLUSH LOGS; +-- Errors in S3 requests will be automatically retried, however ProfileEvents can be wrong. That is why we subtract errors. SELECT ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsErrors'], ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 From 86bab5a78a9593862815869c5964f557159352ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 2 Aug 2024 16:09:43 +0200 Subject: [PATCH 1464/2361] Unit test: Don't return ok if leaks are detected --- docker/test/unit/run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/test/unit/run.sh b/docker/test/unit/run.sh index ba11f568218..210f31170a5 100644 --- a/docker/test/unit/run.sh +++ b/docker/test/unit/run.sh @@ -1,6 +1,9 @@ #!/bin/bash set -x +# Need to keep error from tests after `tee`. Otherwise we don't alert on asan errors +set -o pipefail +set -e if [ "$#" -ne 1 ]; then echo "Expected exactly one argument" From 664e131f4f2e46fc216305c440e840a5a5784328 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Fri, 2 Aug 2024 16:09:48 +0200 Subject: [PATCH 1465/2361] Integration tests: fix ports clashing problem 4 --- tests/integration/conftest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index b4c86a1cd2f..aa235118aed 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 +# pylint: disable=unused-argument +# pylint: disable=broad-exception-raised import logging import os From b3e2ce695514d4d314ed8ac1ecdb111c5f94ac7d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1466/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From 2110b765d19ce4e68d0c23eab219e199aec0ea96 Mon Sep 17 00:00:00 2001 From: Lennard Eijsackers Date: Fri, 2 Aug 2024 17:53:12 +0200 Subject: [PATCH 1467/2361] Style check fix + adding debug info to query output Signed-off-by: Lennard Eijsackers --- src/Functions/bitSlice.cpp | 2 -- .../03214_bitslice_argument_evaluation.reference | 11 +++++++++++ .../03214_bitslice_argument_evaluation.sql | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/Functions/bitSlice.cpp b/src/Functions/bitSlice.cpp index f1d3bb57221..f24473351ae 100644 --- a/src/Functions/bitSlice.cpp +++ b/src/Functions/bitSlice.cpp @@ -18,9 +18,7 @@ using namespace GatherUtils; namespace ErrorCodes { extern const int ILLEGAL_COLUMN; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ZERO_ARRAY_OR_TUPLE_INDEX; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } class FunctionBitSlice : public IFunction diff --git a/tests/queries/0_stateless/03214_bitslice_argument_evaluation.reference b/tests/queries/0_stateless/03214_bitslice_argument_evaluation.reference index e69de29bb2d..1731dfa0d79 100644 --- a/tests/queries/0_stateless/03214_bitslice_argument_evaluation.reference +++ b/tests/queries/0_stateless/03214_bitslice_argument_evaluation.reference @@ -0,0 +1,11 @@ +-- { echo } +-- No arguments passed +SELECT bitSlice(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- Invalid 1st argument passed +SELECT bitSlice(1, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Valid 1st argument, invalid 2nd argument passed +SELECT bitSlice('Hello', 'World'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Valid 1st argument & 2nd argument, invalid 3rd argument passed +SELECT bitSlice('Hello', 1, 'World'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- More arguments then expected +SELECT bitSlice('Hello', 1, 1, 'World'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/tests/queries/0_stateless/03214_bitslice_argument_evaluation.sql b/tests/queries/0_stateless/03214_bitslice_argument_evaluation.sql index b8488600fcb..1731dfa0d79 100644 --- a/tests/queries/0_stateless/03214_bitslice_argument_evaluation.sql +++ b/tests/queries/0_stateless/03214_bitslice_argument_evaluation.sql @@ -1,3 +1,4 @@ +-- { echo } -- No arguments passed SELECT bitSlice(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -- Invalid 1st argument passed From 364622f567028ffc70785b681fc246d7151eef04 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1468/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From fcb0ce7361f74dd8d97a3007f77248f293b2ce5f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Aug 2024 18:17:32 +0200 Subject: [PATCH 1469/2361] Fix docs build --- docker/docs/builder/run.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/docs/builder/run.sh b/docker/docs/builder/run.sh index 01c15cb4b0f..d73adb5d279 100755 --- a/docker/docs/builder/run.sh +++ b/docker/docs/builder/run.sh @@ -26,7 +26,6 @@ sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then export CI=true - yarn install exec yarn build "$@" fi From cebb3668380f65187b201e638013df40f8ac8ada Mon Sep 17 00:00:00 2001 From: Max K Date: Fri, 2 Aug 2024 09:23:40 +0200 Subject: [PATCH 1470/2361] more fixes --- .github/actions/check_workflow/action.yml | 21 ++++ .github/workflows/create_release.yml | 27 +++-- .github/workflows/pull_request.yml | 9 +- tests/ci/artifactory.py | 6 +- tests/ci/auto_release.py | 6 +- tests/ci/ci.py | 4 +- tests/ci/ci_buddy.py | 30 +++-- tests/ci/ci_cache.py | 14 +-- tests/ci/ci_config.py | 3 +- tests/ci/ci_definitions.py | 3 +- tests/ci/ci_metadata.py | 4 +- tests/ci/ci_utils.py | 30 ++++- tests/ci/create_release.py | 131 ++++++++++++++-------- tests/ci/docker_server.py | 73 +++--------- tests/ci/test_docker.py | 46 +------- 15 files changed, 213 insertions(+), 194 deletions(-) create mode 100644 .github/actions/check_workflow/action.yml diff --git a/.github/actions/check_workflow/action.yml b/.github/actions/check_workflow/action.yml new file mode 100644 index 00000000000..19a3cec76f5 --- /dev/null +++ b/.github/actions/check_workflow/action.yml @@ -0,0 +1,21 @@ +name: CheckWorkflowResults + +description: Check overall workflow status and post error to slack if any + +inputs: + needs: + description: github needs context as a json string + required: true + type: string + +runs: + using: "composite" + steps: + - name: Check Workflow + shell: bash + run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ inputs.needs }} + EOF + python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index e27db1b09a4..29094cc51a6 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -24,7 +24,7 @@ concurrency: dry-run: description: 'Dry run' required: false - default: true + default: false type: boolean jobs: @@ -43,16 +43,27 @@ jobs: - name: Prepare Release Info shell: bash run: | + if [ ${{ inputs.only-repo }} == "true" ]; then + git tag -l ${{ inputs.ref }} || { echo "With only-repo option ref must be a valid release tag"; exit 1; } + fi python3 ./tests/ci/create_release.py --prepare-release-info \ - --ref ${{ inputs.ref }} --release-type ${{ inputs.type }} ${{ inputs.dry-run == true && '--dry-run' || '' }} + --ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \ + ${{ inputs.dry-run == true && '--dry-run' || '' }} \ + ${{ inputs.only-repo == true && '--skip-tag-check' || '' }} echo "::group::Release Info" python3 -m json.tool /tmp/release_info.json echo "::endgroup::" release_tag=$(jq -r '.release_tag' /tmp/release_info.json) commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json) + is_latest=$(jq -r '.latest' /tmp/release_info.json) echo "Release Tag: $release_tag" echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV" echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV" + if [ "$is_latest" == "true" ]; then + echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV" + else + echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV" + fi - name: Download All Release Artifacts if: ${{ inputs.type == 'patch' }} shell: bash @@ -85,10 +96,11 @@ jobs: echo "Generate ChangeLog" export CI=1 docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ - --volume=".:/ClickHouse" clickhouse/style-test \ - /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ + --volume=".:/wd" --workdir="/wd" \ + clickhouse/style-test \ + ./tests/ci/changelog.py -v --debug-helpers \ --jobs=5 \ - --output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} + --output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md echo "Generate Security" python3 ./utils/security-generator/generate_security.py > SECURITY.md @@ -160,7 +172,7 @@ jobs: cd "./tests/ci" python3 ./create_release.py --set-progress-started --progress "docker server release" export CHECK_NAME="Docker server image" - python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} python3 ./create_release.py --set-progress-completed - name: Docker clickhouse/clickhouse-keeper building if: ${{ inputs.type == 'patch' }} @@ -169,7 +181,7 @@ jobs: cd "./tests/ci" python3 ./create_release.py --set-progress-started --progress "docker keeper release" export CHECK_NAME="Docker keeper image" - python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} + python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} python3 ./create_release.py --set-progress-completed - name: Update release info. Merge created PRs shell: bash @@ -178,6 +190,7 @@ jobs: - name: Set current Release progress to Completed with OK shell: bash run: | + # dummy stage to finalize release info with "progress: completed; status: OK" python3 ./tests/ci/create_release.py --set-progress-started --progress "completed" python3 ./tests/ci/create_release.py --set-progress-completed - name: Post Slack Message diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 04bef1460a6..071f0f1e20a 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -172,12 +172,9 @@ jobs: cd "$GITHUB_WORKSPACE/tests/ci" python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - name: Check Workflow results - run: | - export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" - cat > "$WORKFLOW_RESULT_FILE" << 'EOF' - ${{ toJson(needs) }} - EOF - python3 ./tests/ci/ci_buddy.py --check-wf-status + uses: ./.github/actions/check_workflow + with: + needs: ${{ toJson(needs) }} ################################# Stage Final ################################# # diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index 8bba7bca30e..f3d7d24f717 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -158,7 +158,7 @@ class DebianArtifactory: print("Running test command:") print(f" {cmd}") assert Shell.check(cmd) - self.release_info.debian_command = debian_command + self.release_info.debian = debian_command self.release_info.dump() @@ -240,7 +240,7 @@ class RpmArtifactory: print("Running test command:") print(f" {cmd}") assert Shell.check(cmd) - self.release_info.rpm_command = rpm_command + self.release_info.rpm = rpm_command self.release_info.dump() @@ -304,7 +304,7 @@ class TgzArtifactory: expected_checksum == actual_checksum ), f"[{actual_checksum} != {expected_checksum}]" Shell.check("rm /tmp/tmp.tgz*", verbose=True) - self.release_info.tgz_command = cmd + self.release_info.tgz = cmd self.release_info.dump() diff --git a/tests/ci/auto_release.py b/tests/ci/auto_release.py index 6c17b4c74ad..3cc88634004 100644 --- a/tests/ci/auto_release.py +++ b/tests/ci/auto_release.py @@ -127,15 +127,13 @@ def _prepare(token): ) commit_num -= 1 - is_completed = CI.GHActions.check_wf_completed( - token=token, commit_sha=commit - ) + is_completed = CI.GH.check_wf_completed(token=token, commit_sha=commit) if not is_completed: print(f"CI is in progress for [{commit}] - check previous commit") commits_to_branch_head += 1 continue - commit_ci_status = CI.GHActions.get_commit_status_by_name( + commit_ci_status = CI.GH.get_commit_status_by_name( token=token, commit_sha=commit, status_name=(CI.JobNames.BUILD_CHECK, "ClickHouse build check"), diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 935fe472e50..2565c8944e4 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -16,7 +16,7 @@ import upload_result_helper from build_check import get_release_or_pr from ci_config import CI from ci_metadata import CiMetadata -from ci_utils import GHActions, normalize_string, Utils +from ci_utils import GH, normalize_string, Utils from clickhouse_helper import ( CiLogsCredentials, ClickHouseHelper, @@ -368,7 +368,7 @@ def _pre_action(s3, job_name, batch, indata, pr_info): ) to_be_skipped = True # skip_status = SUCCESS already there - GHActions.print_in_group("Commit Status Data", job_status) + GH.print_in_group("Commit Status Data", job_status) # create pre report jr = JobReport.create_pre_report(status=skip_status, job_skipped=to_be_skipped) diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index 138909c1db0..f0e73e925fe 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -8,7 +8,7 @@ import requests from botocore.exceptions import ClientError from pr_info import PRInfo -from ci_utils import Shell, GHActions +from ci_config import CI class CIBuddy: @@ -31,10 +31,19 @@ class CIBuddy: self.sha = pr_info.sha[:10] def check_workflow(self): - GHActions.print_workflow_results() - res = GHActions.get_workflow_job_result(GHActions.ActionsNames.RunConfig) - if res != GHActions.ActionStatuses.SUCCESS: - self.post_job_error("Workflow Configuration Failed", critical=True) + CI.GH.print_workflow_results() + if CI.Envs.GITHUB_WORKFLOW == CI.WorkFlowNames.CreateRelease: + if not CI.GH.is_workflow_ok(): + self.post_job_error( + f"{CI.Envs.GITHUB_WORKFLOW} Workflow Failed", critical=True + ) + else: + res = CI.GH.get_workflow_job_result(CI.GH.ActionsNames.RunConfig) + if res != CI.GH.ActionStatuses.SUCCESS: + print(f"ERROR: RunConfig status is [{res}] - post report to slack") + self.post_job_error( + f"{CI.Envs.GITHUB_WORKFLOW} Workflow Failed", critical=True + ) @staticmethod def _get_webhooks(): @@ -74,10 +83,13 @@ class CIBuddy: message = title if isinstance(body, dict): for name, value in body.items(): - if "commit_sha" in name: + if "sha" in name and value and len(value) == 40: value = ( f"" ) + elif isinstance(value, str) and value.startswith("https://github.com/"): + value_shorten = value.split("/")[-1] + value = f"<{value}|{value_shorten}>" message += f" *{name}*: {value}\n" else: message += body + "\n" @@ -120,9 +132,11 @@ class CIBuddy: ) -> None: instance_id, instance_type = "unknown", "unknown" if with_instance_info: - instance_id = Shell.get_output("ec2metadata --instance-id") or instance_id + instance_id = ( + CI.Shell.get_output("ec2metadata --instance-id") or instance_id + ) instance_type = ( - Shell.get_output("ec2metadata --instance-type") or instance_type + CI.Shell.get_output("ec2metadata --instance-type") or instance_type ) if not job_name: job_name = os.getenv("CHECK_NAME", "unknown") diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 85eabb84f9f..4846233ab03 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -7,7 +7,7 @@ from typing import Dict, Optional, Any, Union, Sequence, List, Set from ci_config import CI -from ci_utils import is_hex, GHActions +from ci_utils import is_hex, GH from commit_status_helper import CommitStatusData from env_helper import ( TEMP_PATH, @@ -258,15 +258,15 @@ class CiCache: def print_status(self): print(f"Cache enabled: [{self.enabled}]") for record_type in self.RecordType: - GHActions.print_in_group( + GH.print_in_group( f"Cache records: [{record_type}]", list(self.records[record_type]) ) - GHActions.print_in_group( + GH.print_in_group( "Jobs to do:", list(self.jobs_to_do.items()), ) - GHActions.print_in_group("Jobs to skip:", self.jobs_to_skip) - GHActions.print_in_group( + GH.print_in_group("Jobs to skip:", self.jobs_to_skip) + GH.print_in_group( "Jobs to wait:", list(self.jobs_to_wait.items()), ) @@ -788,7 +788,7 @@ class CiCache: while round_cnt < MAX_ROUNDS_TO_WAIT: round_cnt += 1 - GHActions.print_in_group( + GH.print_in_group( f"Wait pending jobs, round [{round_cnt}/{MAX_ROUNDS_TO_WAIT}]:", list(self.jobs_to_wait), ) @@ -853,7 +853,7 @@ class CiCache: # make up for 2 iterations in dry_run expired_sec += int(TIMEOUT / 2) + 1 - GHActions.print_in_group( + GH.print_in_group( "Remaining jobs:", [list(self.jobs_to_wait)], ) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index a7df884a091..c031ca9b805 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -34,7 +34,8 @@ class CI: from ci_definitions import Runners as Runners from ci_utils import Envs as Envs from ci_utils import Utils as Utils - from ci_utils import GHActions as GHActions + from ci_utils import GH as GH + from ci_utils import Shell as Shell from ci_definitions import Labels as Labels from ci_definitions import TRUSTED_CONTRIBUTORS as TRUSTED_CONTRIBUTORS from ci_definitions import WorkFlowNames as WorkFlowNames diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 51de8c63509..de6791acda8 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -112,6 +112,7 @@ class WorkFlowNames(metaclass=WithIter): """ JEPSEN = "JepsenWorkflow" + CreateRelease = "CreateRelease" class BuildNames(metaclass=WithIter): @@ -578,7 +579,7 @@ class CommonJobConfigs: DOCKER_SERVER = JobConfig( job_name_keyword="docker", required_on_release_branch=True, - run_command='docker_server.py --check-name "$CHECK_NAME" --release-type head --allow-build-reuse', + run_command='docker_server.py --check-name "$CHECK_NAME" --tag-type head --allow-build-reuse', digest=DigestConfig( include_paths=[ "tests/ci/docker_server.py", diff --git a/tests/ci/ci_metadata.py b/tests/ci/ci_metadata.py index a767d102811..67106262634 100644 --- a/tests/ci/ci_metadata.py +++ b/tests/ci/ci_metadata.py @@ -9,7 +9,7 @@ from env_helper import ( S3_BUILDS_BUCKET_PUBLIC, ) from s3_helper import S3Helper -from ci_utils import GHActions +from ci_utils import GH from synchronizer_utils import SYNC_BRANCH_PREFIX @@ -111,7 +111,7 @@ class CiMetadata: else: log_title = f"Storing workflow metadata: PR [{self.pr_number}], upstream PR [{self.upstream_pr_number}]" - GHActions.print_in_group( + GH.print_in_group( log_title, [f"run_id: {self.run_id}"], ) diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index 4f696a2c55a..dae1520afb6 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -16,6 +16,8 @@ class Envs: WORKFLOW_RESULT_FILE = os.getenv( "WORKFLOW_RESULT_FILE", "/tmp/workflow_results.json" ) + S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds") + GITHUB_WORKFLOW = os.getenv("GITHUB_WORKFLOW", "") LABEL_CATEGORIES = { @@ -83,7 +85,7 @@ def normalize_string(string: str) -> str: return res -class GHActions: +class GH: class ActionsNames: RunConfig = "RunConfig" @@ -117,6 +119,14 @@ class GHActions: results = [f"{job}: {data['result']}" for job, data in res.items()] cls.print_in_group("Workflow results", results) + @classmethod + def is_workflow_ok(cls) -> bool: + res = cls._get_workflow_results() + for _job, data in res.items(): + if data["result"] == "failure": + return False + return bool(res) + @classmethod def get_workflow_job_result(cls, wf_job_name: str) -> Optional[str]: res = cls._get_workflow_results() @@ -189,15 +199,25 @@ class GHActions: return False @staticmethod - def get_pr_url_by_branch(repo, branch): - get_url_cmd = ( - f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url'" - ) + def get_pr_url_by_branch(branch, repo=None): + repo = repo or Envs.GITHUB_REPOSITORY + get_url_cmd = f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url' --state open" url = Shell.get_output(get_url_cmd) + if not url: + print(f"WARNING: No open PR found, branch [{branch}] - search for merged") + get_url_cmd = f"gh pr list --repo {repo} --head {branch} --json url --jq '.[0].url' --state merged" + url = Shell.get_output(get_url_cmd) if not url: print(f"ERROR: PR nor found, branch [{branch}]") return url + @staticmethod + def is_latest_release_branch(branch): + latest_branch = Shell.get_output( + 'gh pr list --label release --repo ClickHouse/ClickHouse --search "sort:created" -L1 --json headRefName' + ) + return latest_branch == branch + class Shell: @classmethod diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index c407a74fbf0..b4e08f29dbe 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -10,9 +10,8 @@ from typing import Iterator, List from git_helper import Git, GIT_PREFIX from ssh import SSHAgent -from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET from s3_helper import S3Helper -from ci_utils import Shell, GHActions +from ci_utils import Shell, GH from ci_buddy import CIBuddy from version_helper import ( FILE_WITH_VERSION_PATH, @@ -69,13 +68,14 @@ class ReleaseContextManager: previous_release_tag="NA", previous_release_sha="NA", release_progress=ReleaseProgress.STARTED, + latest=False, ).dump() else: # fetch release info from fs and update self.release_info = ReleaseInfo.from_file() assert self.release_info assert ( - self.release_info.progress_description == ReleaseProgressDescription.OK + self.release_info.progress_status == ReleaseProgressDescription.OK ), "Must be OK on the start of new context" self.release_info.release_progress = self.release_progress self.release_info.dump() @@ -84,9 +84,9 @@ class ReleaseContextManager: def __exit__(self, exc_type, exc_value, traceback): assert self.release_info if exc_type is not None: - self.release_info.progress_description = ReleaseProgressDescription.FAILED + self.release_info.progress_status = ReleaseProgressDescription.FAILED else: - self.release_info.progress_description = ReleaseProgressDescription.OK + self.release_info.progress_status = ReleaseProgressDescription.OK self.release_info.dump() @@ -96,6 +96,7 @@ class ReleaseInfo: release_tag: str release_branch: str commit_sha: str + latest: bool # lts or stable codename: str previous_release_tag: str @@ -104,12 +105,12 @@ class ReleaseInfo: version_bump_pr: str = "" prs_merged: bool = False release_url: str = "" - debian_command: str = "" - rpm_command: str = "" - tgz_command: str = "" - docker_command: str = "" + debian: str = "" + rpm: str = "" + tgz: str = "" + docker: str = "" release_progress: str = "" - progress_description: str = "" + progress_status: str = "" def is_patch(self): return self.release_branch != "master" @@ -129,12 +130,15 @@ class ReleaseInfo: print(json.dumps(dataclasses.asdict(self), indent=2), file=f) return self - def prepare(self, commit_ref: str, release_type: str) -> "ReleaseInfo": + def prepare( + self, commit_ref: str, release_type: str, skip_tag_check: bool + ) -> "ReleaseInfo": version = None release_branch = None release_tag = None previous_release_tag = None previous_release_sha = None + latest_release = False codename = "" assert release_type in ("patch", "new") if release_type == "new": @@ -145,7 +149,7 @@ class ReleaseInfo: verbose=True, ) with checkout(commit_ref): - commit_sha = Shell.get_output_or_raise(f"git rev-parse {commit_ref}") + commit_sha = Shell.get_output_or_raise(f"git rev-list -n1 {commit_ref}") # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) @@ -158,12 +162,12 @@ class ReleaseInfo: release_tag = version.describe previous_release_tag = expected_prev_tag previous_release_sha = Shell.get_output_or_raise( - f"git rev-parse {previous_release_tag}" + f"git rev-list -n1 {previous_release_tag}" ) assert previous_release_sha if release_type == "patch": with checkout(commit_ref): - commit_sha = Shell.get_output_or_raise(f"git rev-parse {commit_ref}") + commit_sha = Shell.get_output_or_raise(f"git rev-list -n1 {commit_ref}") # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) @@ -200,16 +204,20 @@ class ReleaseInfo: expected_tag_prefix ) and git.latest_tag.endswith(expected_tag_suffix): pass - else: + elif not skip_tag_check: assert ( False - ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]" + ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]. Already Released?" previous_release_sha = Shell.get_output_or_raise( - f"git rev-parse {previous_release_tag}" + f"git rev-list -n1 {previous_release_tag}" ) assert previous_release_sha + if CI.GH.is_latest_release_branch(release_branch): + print("This is going to be the latest release!") + latest_release = True + assert ( release_branch and previous_release_tag @@ -218,7 +226,7 @@ class ReleaseInfo: and release_tag and version and (codename in ("lts", "stable") or release_type == "new") - ) + ), f"Check: {release_branch}, {previous_release_tag}, {previous_release_sha}, {commit_sha}, {release_tag}, {version}" self.release_branch = release_branch self.commit_sha = commit_sha @@ -228,7 +236,8 @@ class ReleaseInfo: self.previous_release_tag = previous_release_tag self.previous_release_sha = previous_release_sha self.release_progress = ReleaseProgress.STARTED - self.progress_description = ReleaseProgressDescription.OK + self.progress_status = ReleaseProgressDescription.OK + self.latest = latest_release return self def push_release_tag(self, dry_run: bool) -> None: @@ -252,7 +261,7 @@ class ReleaseInfo: @staticmethod def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None: - cmd = f"gh api repos/{GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}" + cmd = f"gh api repos/{CI.Envs.GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}" Shell.check(cmd, dry_run=dry_run, strict=True) def push_new_release_branch(self, dry_run: bool) -> None: @@ -294,7 +303,7 @@ class ReleaseInfo: f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run ) Shell.check( - f"""gh pr create --repo {GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}' + f"""gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}' --head {new_release_branch} {pr_labels} --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.' """, @@ -303,9 +312,12 @@ class ReleaseInfo: verbose=True, ) + def get_version_bump_branch(self): + return f"bump_version_{self.version}" + def update_version_and_contributors_list(self, dry_run: bool) -> None: # Bump version, update contributors list, create PR - branch_upd_version_contributors = f"bump_version_{self.version}" + branch_upd_version_contributors = self.get_version_bump_branch() with checkout(self.commit_sha): git = Git() version = get_version_from_repo(git=git) @@ -323,9 +335,9 @@ class ReleaseInfo: update_contributors(raise_error=True) cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" - body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md") actor = os.getenv("GITHUB_ACTOR", "") or "me" - cmd_create_pr = f"gh pr create --repo {GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body-file {body_file} --label 'do not test' --assignee {actor}" + body = f"Automatic version bump after release {self.release_tag}\n### Changelog category (leave one):\n- Not for changelog (changelog entry is not required)\n" + cmd_create_pr = f"gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body \"{body}\" --assignee {actor}" Shell.check( cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True ) @@ -342,30 +354,42 @@ class ReleaseInfo: ) self.version_bump_pr = "dry-run" else: - self.version_bump_pr = GHActions.get_pr_url_by_branch( - repo=GITHUB_REPOSITORY, branch=branch_upd_version_contributors + self.version_bump_pr = GH.get_pr_url_by_branch( + branch=branch_upd_version_contributors ) + def get_change_log_branch(self): + return f"auto/{self.release_tag}" + def update_release_info(self, dry_run: bool) -> "ReleaseInfo": if self.release_branch != "master": - branch = f"auto/{release_info.release_tag}" - if not dry_run: - url = GHActions.get_pr_url_by_branch( - repo=GITHUB_REPOSITORY, branch=branch - ) - else: - url = "dry-run" - print(f"ChangeLog PR url [{url}]") - self.changelog_pr = url - print(f"Release url [{url}]") - self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" - if self.release_progress == ReleaseProgress.COMPLETED: - self.docker_command = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version" + if not self.changelog_pr: + branch = self.get_change_log_branch() + if not dry_run: + url = GH.get_pr_url_by_branch(branch=branch) + else: + url = "dry-run" + print(f"ChangeLog PR url [{url}]") + self.changelog_pr = url + + if not self.version_bump_pr: + branch = self.get_version_bump_branch() + if not dry_run: + url = GH.get_pr_url_by_branch(branch=branch) + else: + url = "dry-run" + print(f"Version bump PR url [{url}]") + self.version_bump_pr = url + + self.release_url = f"https://github.com/{CI.Envs.GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" + print(f"Release url [{self.release_url}]") + + self.docker = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version" self.dump() return self def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None: - repo = os.getenv("GITHUB_REPOSITORY") + repo = CI.Envs.GITHUB_REPOSITORY assert repo cmds = [ f"gh release create --repo {repo} --title 'Release {self.release_tag}' {self.release_tag}" @@ -375,7 +399,9 @@ class ReleaseInfo: if not dry_run: for cmd in cmds: Shell.check(cmd, strict=True, verbose=True) - self.release_url = f"https://github.com/{GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" + self.release_url = ( + f"https://github.com/{repo}/releases/tag/{self.release_tag}" + ) else: print("Dry-run, would run commands:") print("\n * ".join(cmds)) @@ -536,7 +562,7 @@ class PackageDownloader: ] ) self.s3.download_file( - bucket=S3_BUILDS_BUCKET, + bucket=CI.Envs.S3_BUILDS_BUCKET, s3_path=s3_path, local_file_path="/".join([self.LOCAL_DIR, package_file]), ) @@ -557,7 +583,7 @@ class PackageDownloader: ] ) self.s3.download_file( - bucket=S3_BUILDS_BUCKET, + bucket=CI.Envs.S3_BUILDS_BUCKET, s3_path=s3_path, local_file_path="/".join([self.LOCAL_DIR, destination_binary_name]), ) @@ -636,6 +662,11 @@ def parse_args() -> argparse.Namespace: action="store_true", help="Initial step to prepare info like release branch, release tag, etc.", ) + parser.add_argument( + "--skip-tag-check", + action="store_true", + help="To skip check against latest git tag on a release branch", + ) parser.add_argument( "--push-release-tag", action="store_true", @@ -725,7 +756,11 @@ if __name__ == "__main__": assert ( args.ref and args.release_type ), "--ref and --release-type must be provided with --prepare-release-info" - release_info.prepare(commit_ref=args.ref, release_type=args.release_type) + release_info.prepare( + commit_ref=args.ref, + release_type=args.release_type, + skip_tag_check=args.skip_tag_check, + ) if args.download_packages: with ReleaseContextManager( @@ -776,7 +811,7 @@ if __name__ == "__main__": else: title = "New release" if ( - release_info.progress_description == ReleaseProgressDescription.OK + release_info.progress_status == ReleaseProgressDescription.OK and release_info.release_progress == ReleaseProgress.COMPLETED ): title = "Completed: " + title @@ -792,16 +827,16 @@ if __name__ == "__main__": if args.set_progress_started: ri = ReleaseInfo.from_file() ri.release_progress = args.progress - ri.progress_description = ReleaseProgressDescription.FAILED + ri.progress_status = ReleaseProgressDescription.FAILED ri.dump() assert args.progress, "Progress step name must be provided" if args.set_progress_completed: ri = ReleaseInfo.from_file() assert ( - ri.progress_description == ReleaseProgressDescription.FAILED + ri.progress_status == ReleaseProgressDescription.FAILED ), "Must be FAILED before set to OK" - ri.progress_description = ReleaseProgressDescription.OK + ri.progress_status = ReleaseProgressDescription.OK ri.dump() if args.merge_prs: diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 3e782c079c6..8f0474d5053 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -69,13 +69,14 @@ def parse_args() -> argparse.Namespace: help="sha of the commit to use packages from", ) parser.add_argument( - "--release-type", + "--tag-type", type=str, - choices=("auto", "latest", "major", "minor", "patch", "head"), + choices=("head", "release", "latest-release"), default="head", - help="version part that will be updated when '--version' is set; " - "'auto' is a special case, it will get versions from github and detect the " - "release type (latest, major, minor or patch) automatically", + help="defines required tags for resulting docker image. " + "head - for master image (tag: head) " + "release - for release image (tags: XX, XX.XX, XX.XX.XX, XX.XX.XX.XX) " + "release-latest - for latest release image (tags: XX, XX.XX, XX.XX.XX, XX.XX.XX.XX, latest) ", ) parser.add_argument( "--image-path", @@ -149,74 +150,35 @@ def retry_popen(cmd: str, log_file: Path) -> int: return retcode -def auto_release_type(version: ClickHouseVersion, release_type: str) -> str: - if release_type != "auto": - return release_type - - git_versions = get_tagged_versions() - reference_version = git_versions[0] - for i in reversed(range(len(git_versions))): - if git_versions[i] <= version: - if i == len(git_versions) - 1: - return "latest" - reference_version = git_versions[i + 1] - break - - if version.major < reference_version.major: - return "major" - if version.minor < reference_version.minor: - return "minor" - if version.patch < reference_version.patch: - return "patch" - - raise ValueError( - "Release type 'tweak' is not supported for " - f"{version.string} < {reference_version.string}" - ) - - -def gen_tags(version: ClickHouseVersion, release_type: str) -> List[str]: +def gen_tags(version: ClickHouseVersion, tag_type: str) -> List[str]: """ - 22.2.2.2 + latest: + @tag_type release-latest, @version 22.2.2.2: - latest - 22 - 22.2 - 22.2.2 - 22.2.2.2 - 22.2.2.2 + major: + @tag_type release, @version 22.2.2.2: - 22 - 22.2 - 22.2.2 - 22.2.2.2 - 22.2.2.2 + minor: - - 22.2 - - 22.2.2 - - 22.2.2.2 - 22.2.2.2 + patch: - - 22.2.2 - - 22.2.2.2 - 22.2.2.2 + head: + @tag_type head: - head """ parts = version.string.split(".") tags = [] - if release_type == "latest": - tags.append(release_type) + if tag_type == "release-latest": + tags.append("latest") for i in range(len(parts)): tags.append(".".join(parts[: i + 1])) - elif release_type == "major": + elif tag_type == "head": + tags.append(tag_type) + elif tag_type == "release": for i in range(len(parts)): tags.append(".".join(parts[: i + 1])) - elif release_type == "minor": - for i in range(1, len(parts)): - tags.append(".".join(parts[: i + 1])) - elif release_type == "patch": - for i in range(2, len(parts)): - tags.append(".".join(parts[: i + 1])) - elif release_type == "head": - tags.append(release_type) else: - raise ValueError(f"{release_type} is not valid release part") + assert False, f"Invalid release type [{tag_type}]" return tags @@ -370,8 +332,7 @@ def main(): push = True image = DockerImageData(image_path, image_repo, False) - args.release_type = auto_release_type(args.version, args.release_type) - tags = gen_tags(args.version, args.release_type) + tags = gen_tags(args.version, args.tag_type) repo_urls = {} direct_urls: Dict[str, List[str]] = {} diff --git a/tests/ci/test_docker.py b/tests/ci/test_docker.py index 662143bfd9b..58ebe4ecbb1 100644 --- a/tests/ci/test_docker.py +++ b/tests/ci/test_docker.py @@ -1,61 +1,19 @@ #!/usr/bin/env python import unittest -from unittest.mock import patch, MagicMock from version_helper import get_version_from_string import docker_server as ds -# di.logging.basicConfig(level=di.logging.INFO) - class TestDockerServer(unittest.TestCase): def test_gen_tags(self): version = get_version_from_string("22.2.2.2") cases = ( - ("latest", ["latest", "22", "22.2", "22.2.2", "22.2.2.2"]), - ("major", ["22", "22.2", "22.2.2", "22.2.2.2"]), - ("minor", ["22.2", "22.2.2", "22.2.2.2"]), - ("patch", ["22.2.2", "22.2.2.2"]), + ("release-latest", ["latest", "22", "22.2", "22.2.2", "22.2.2.2"]), + ("release", ["22", "22.2", "22.2.2", "22.2.2.2"]), ("head", ["head"]), ) for case in cases: release_type = case[0] self.assertEqual(case[1], ds.gen_tags(version, release_type)) - - with self.assertRaises(ValueError): - ds.gen_tags(version, "auto") - - @patch("docker_server.get_tagged_versions") - def test_auto_release_type(self, mock_tagged_versions: MagicMock) -> None: - mock_tagged_versions.return_value = [ - get_version_from_string("1.1.1.1"), - get_version_from_string("1.2.1.1"), - get_version_from_string("2.1.1.1"), - get_version_from_string("2.2.1.1"), - get_version_from_string("2.2.2.1"), - ] - - cases_less = ( - (get_version_from_string("1.0.1.1"), "minor"), - (get_version_from_string("1.1.2.1"), "minor"), - (get_version_from_string("1.3.1.1"), "major"), - (get_version_from_string("2.1.2.1"), "minor"), - (get_version_from_string("2.2.1.3"), "patch"), - (get_version_from_string("2.2.3.1"), "latest"), - (get_version_from_string("2.3.1.1"), "latest"), - ) - for case in cases_less: - release = ds.auto_release_type(case[0], "auto") - self.assertEqual(case[1], release) - - cases_equal = ( - (get_version_from_string("1.1.1.1"), "minor"), - (get_version_from_string("1.2.1.1"), "major"), - (get_version_from_string("2.1.1.1"), "minor"), - (get_version_from_string("2.2.1.1"), "patch"), - (get_version_from_string("2.2.2.1"), "latest"), - ) - for case in cases_equal: - release = ds.auto_release_type(case[0], "auto") - self.assertEqual(case[1], release) From 51b39a6c745d61cb2e6feb39659ddb3cac57ad03 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 1 Aug 2024 18:24:45 +0100 Subject: [PATCH 1471/2361] some more --- tests/integration/test_executable_dictionary/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_executable_dictionary/test.py b/tests/integration/test_executable_dictionary/test.py index a1de429a235..2a6af75e751 100644 --- a/tests/integration/test_executable_dictionary/test.py +++ b/tests/integration/test_executable_dictionary/test.py @@ -235,6 +235,7 @@ def test_executable_implicit_input_signalled_python(started_cluster): ) +@pytest.mark.repeat(50) def test_executable_input_slow_python(started_cluster): skip_test_msan(node) assert node.query_and_get_error( From ae6d4bdd8aa2b010f27dd8ebbb6f816362eb2a9e Mon Sep 17 00:00:00 2001 From: Sasha Sheikin Date: Mon, 29 Jul 2024 10:31:35 +0200 Subject: [PATCH 1472/2361] Fix positionCaseInsensitive example --- .../en/sql-reference/functions/string-search-functions.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/functions/string-search-functions.md b/docs/en/sql-reference/functions/string-search-functions.md index b7ba1d4feb7..e9ff7ebf33b 100644 --- a/docs/en/sql-reference/functions/string-search-functions.md +++ b/docs/en/sql-reference/functions/string-search-functions.md @@ -150,15 +150,15 @@ A case insensitive invariant of [position](#position). Query: ``` sql -SELECT position('Hello, world!', 'hello'); +SELECT positionCaseInsensitive('Hello, world!', 'hello'); ``` Result: ``` text -┌─position('Hello, world!', 'hello')─┠-│ 0 │ -└────────────────────────────────────┘ +┌─positionCaseInsensitive('Hello, world!', 'hello')─┠+│ 1 │ +└───────────────────────────────────────────────────┘ ``` ## positionUTF8 From 89c47df559ba23d988f8af3c342e0c8d5531f4b8 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 2 Aug 2024 16:52:46 +0000 Subject: [PATCH 1473/2361] Fix variant as common type in if function with Tuples and Maps --- src/Functions/if.cpp | 10 ++++++++++ .../03215_varian_as_common_type_tuple_map.reference | 10 ++++++++++ .../03215_varian_as_common_type_tuple_map.sql | 7 +++++++ 3 files changed, 27 insertions(+) create mode 100644 tests/queries/0_stateless/03215_varian_as_common_type_tuple_map.reference create mode 100644 tests/queries/0_stateless/03215_varian_as_common_type_tuple_map.sql diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index 07dbee27a9d..64da6e95a43 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -632,6 +632,11 @@ private: ColumnPtr executeTuple(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const { + /// For different Tuples the result type can be Variant with this Tuples if use_variant_as_common_type=1. + /// In this case we should use generic implementation. + if (!isTuple(result_type)) + return nullptr; + /// Calculate function for each corresponding elements of tuples. const ColumnWithTypeAndName & arg1 = arguments[1]; @@ -677,6 +682,11 @@ private: ColumnPtr executeMap(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const { + /// For different Maps the result type can be Variant with this Maps if use_variant_as_common_type=1. + /// In this case we should use generic implementation. + if (!isMap(result_type)) + return nullptr; + auto extract_kv_from_map = [](const ColumnMap * map) { const ColumnTuple & tuple = map->getNestedData(); diff --git a/tests/queries/0_stateless/03215_varian_as_common_type_tuple_map.reference b/tests/queries/0_stateless/03215_varian_as_common_type_tuple_map.reference new file mode 100644 index 00000000000..7dabd5388f4 --- /dev/null +++ b/tests/queries/0_stateless/03215_varian_as_common_type_tuple_map.reference @@ -0,0 +1,10 @@ +('0') Variant(Tuple(String), Tuple(\n number UInt64)) +(1) Variant(Tuple(String), Tuple(\n number UInt64)) +('2') Variant(Tuple(String), Tuple(\n number UInt64)) +(3) Variant(Tuple(String), Tuple(\n number UInt64)) +('4') Variant(Tuple(String), Tuple(\n number UInt64)) +{'0':'0'} Variant(Map(String, String), Map(UInt64, UInt64)) +{1:1} Variant(Map(String, String), Map(UInt64, UInt64)) +{'2':'2'} Variant(Map(String, String), Map(UInt64, UInt64)) +{3:3} Variant(Map(String, String), Map(UInt64, UInt64)) +{'4':'4'} Variant(Map(String, String), Map(UInt64, UInt64)) diff --git a/tests/queries/0_stateless/03215_varian_as_common_type_tuple_map.sql b/tests/queries/0_stateless/03215_varian_as_common_type_tuple_map.sql new file mode 100644 index 00000000000..4a9a788ab18 --- /dev/null +++ b/tests/queries/0_stateless/03215_varian_as_common_type_tuple_map.sql @@ -0,0 +1,7 @@ +set use_variant_as_common_type = 1; +set allow_experimental_variant_type = 1; + +SELECT if(number % 2, tuple(number), tuple(toString(number))) as res, toTypeName(res) FROM numbers(5); +SELECT if(number % 2, map(number, number), map(toString(number), toString(number))) as res, toTypeName(res) FROM numbers(5); + + From b38c46a87d6eacbc7805562deb07ce586fd7e0fb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1474/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From e3239c6ee11eb5bf0466fb750c58125868885ec8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Aug 2024 19:09:16 +0200 Subject: [PATCH 1475/2361] Fix bad log message in JIT for sorting --- src/Core/SortDescription.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/Core/SortDescription.cpp b/src/Core/SortDescription.cpp index 9edc79a1ff1..1b3f81f8547 100644 --- a/src/Core/SortDescription.cpp +++ b/src/Core/SortDescription.cpp @@ -103,7 +103,15 @@ static std::string getSortDescriptionDump(const SortDescription & description, c WriteBufferFromOwnString buffer; for (size_t i = 0; i < description.size(); ++i) - buffer << header_types[i]->getName() << ' ' << description[i].direction << ' ' << description[i].nulls_direction; + { + if (i != 0) + buffer << ", "; + + buffer << "(type: " << header_types[i]->getName() + << ", direction: " << description[i].direction + << ", nulls_direction: " << description[i].nulls_direction + << ")"; + } return buffer.str(); } From 77a2eb61ef965a6460bbdb74447aa3871cb1d0c7 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 2 Aug 2024 17:43:33 +0000 Subject: [PATCH 1476/2361] Update test. --- ...61_lightweight_delete_projection.reference | 70 ++++++++++++++++++ .../03161_lightweight_delete_projection.sql | 74 ++++++++++--------- 2 files changed, 111 insertions(+), 33 deletions(-) diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index 960fa1dcc33..eef0c5a41b5 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -1,20 +1,90 @@ compact part testing throw default mode +-- { echoOn } + +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; +DELETE FROM users_compact WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } +SELECT 'testing drop mode'; testing drop mode +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; +DELETE FROM users_compact WHERE uid = 1231; +SELECT * FROM users_compact ORDER BY uid; +SYSTEM FLUSH LOGS; +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); all_1_1_0_2 +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +SELECT 'testing rebuild mode'; testing rebuild mode +INSERT INTO users_compact VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +DELETE FROM users_compact WHERE uid = 6666; +SELECT * FROM users_compact ORDER BY uid; 8888 Alice 50 +SYSTEM FLUSH LOGS; +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); all_1_1_0_4 all_3_3_0_4 +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); p1 all_3_3_0_4 p2 all_3_3_0_4 wide part testing throw default mode +-- { echoOn } + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; +DELETE FROM users_wide WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } +SELECT 'testing drop mode'; testing drop mode +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; +DELETE FROM users_wide WHERE uid = 1231; +SELECT * FROM users_wide ORDER BY uid; +SYSTEM FLUSH LOGS; +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); all_1_1_0_2 +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +SELECT 'testing rebuild mode'; testing rebuild mode +INSERT INTO users_wide VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +DELETE FROM users_wide WHERE uid = 6666; +SELECT * FROM users_wide ORDER BY uid; 8888 Alice 50 +SYSTEM FLUSH LOGS; +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); all_1_1_0_4 all_3_3_0_4 +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); p1 all_3_3_0_4 p2 all_3_3_0_4 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 02b880d620a..28e5612a529 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,12 +1,12 @@ SET lightweight_deletes_sync = 2, alter_sync = 2; -DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS users_compact; SELECT 'compact part'; -CREATE TABLE users ( +CREATE TABLE users_compact ( uid Int16, name String, age Int16, @@ -15,20 +15,22 @@ CREATE TABLE users ( ) ENGINE = MergeTree order by uid SETTINGS min_bytes_for_wide_part = 10485760; -INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users_compact VALUES (1231, 'John', 33); SELECT 'testing throw default mode'; -ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; +-- { echoOn } -DELETE FROM users WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; + +DELETE FROM users_compact WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } SELECT 'testing drop mode'; -ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; -DELETE FROM users WHERE uid = 1231; +DELETE FROM users_compact WHERE uid = 1231; -SELECT * FROM users ORDER BY uid; +SELECT * FROM users_compact ORDER BY uid; SYSTEM FLUSH LOGS; @@ -36,22 +38,22 @@ SYSTEM FLUSH LOGS; SELECT name FROM system.parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); -- expecting no projection SELECT name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); SELECT 'testing rebuild mode'; -INSERT INTO users VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +INSERT INTO users_compact VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); -ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; -DELETE FROM users WHERE uid = 6666; +DELETE FROM users_compact WHERE uid = 6666; -SELECT * FROM users ORDER BY uid; +SELECT * FROM users_compact ORDER BY uid; SYSTEM FLUSH LOGS; @@ -59,19 +61,21 @@ SYSTEM FLUSH LOGS; SELECT name FROM system.parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); -- expecting projection p1, p2 SELECT name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); -DROP TABLE users; +-- { echoOff } + +DROP TABLE users_compact; SELECT 'wide part'; -CREATE TABLE users ( +CREATE TABLE users_wide ( uid Int16, name String, age Int16, @@ -80,19 +84,22 @@ CREATE TABLE users ( ) ENGINE = MergeTree order by uid SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users_wide VALUES (1231, 'John', 33); SELECT 'testing throw default mode'; -ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; -DELETE FROM users WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } +-- { echoOn } + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; + +DELETE FROM users_wide WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } SELECT 'testing drop mode'; -ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; -DELETE FROM users WHERE uid = 1231; +DELETE FROM users_wide WHERE uid = 1231; -SELECT * FROM users ORDER BY uid; +SELECT * FROM users_wide ORDER BY uid; SYSTEM FLUSH LOGS; @@ -100,22 +107,22 @@ SYSTEM FLUSH LOGS; SELECT name FROM system.parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); -- expecting no projection SELECT name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); SELECT 'testing rebuild mode'; -INSERT INTO users VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +INSERT INTO users_wide VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); -ALTER TABLE users MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; -DELETE FROM users WHERE uid = 6666; +DELETE FROM users_wide WHERE uid = 6666; -SELECT * FROM users ORDER BY uid; +SELECT * FROM users_wide ORDER BY uid; SYSTEM FLUSH LOGS; @@ -123,13 +130,14 @@ SYSTEM FLUSH LOGS; SELECT name FROM system.parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); -- expecting projection p1, p2 SELECT name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +-- { echoOff } -DROP TABLE users; \ No newline at end of file +DROP TABLE users_wide; From 3c4389ec4d78db55ce742e5d5a3b0ed050c9c9e6 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Fri, 2 Aug 2024 14:57:19 -0300 Subject: [PATCH 1477/2361] doc/fix max_partitions_to_read description --- .../operations/settings/merge-tree-settings.md | 7 ++----- .../en/operations/settings/query-complexity.md | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index 7278b91f90d..67fa45c20cd 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -119,11 +119,6 @@ Minimum size of blocks of uncompressed data required for compression when writin You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting). The value specified when table is created overrides the global value for this setting. -## max_partitions_to_read - -Limits the maximum number of partitions that can be accessed in one query. -You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting. - ## max_suspicious_broken_parts If the number of broken parts in a single partition exceeds the `max_suspicious_broken_parts` value, automatic deletion is denied. @@ -691,6 +686,8 @@ Possible values: Default value: -1 (unlimited). +You can also specify a query complexity setting [max_partitions_to_read](query-complexity#max-partitions-to-read) at a query / session / profile level. + ## min_age_to_force_merge_seconds {#min_age_to_force_merge_seconds} Merge parts if every part in the range is older than the value of `min_age_to_force_merge_seconds`. diff --git a/docs/en/operations/settings/query-complexity.md b/docs/en/operations/settings/query-complexity.md index 2a20e74e20f..14ccb1167f9 100644 --- a/docs/en/operations/settings/query-complexity.md +++ b/docs/en/operations/settings/query-complexity.md @@ -188,7 +188,7 @@ If you set `timeout_before_checking_execution_speed `to 0, ClickHouse will use c What to do if the query is run longer than `max_execution_time` or the estimated running time is longer than `max_estimated_execution_time`: `throw` or `break`. By default, `throw`. -# max_execution_time_leaf +## max_execution_time_leaf Similar semantic to `max_execution_time` but only apply on leaf node for distributed or remote queries. @@ -204,7 +204,7 @@ We can use `max_execution_time_leaf` as the query settings: SELECT count() FROM cluster(cluster, view(SELECT * FROM t)) SETTINGS max_execution_time_leaf = 10; ``` -# timeout_overflow_mode_leaf +## timeout_overflow_mode_leaf What to do when the query in leaf node run longer than `max_execution_time_leaf`: `throw` or `break`. By default, `throw`. @@ -426,3 +426,17 @@ Example: ``` Default value: 0 (Infinite count of simultaneous sessions). + +## max_partitions_to_read {#max-partitions-to-read} + +Limits the maximum number of partitions that can be accessed in one query. + +The setting value specified when the table is created can be overridden via query-level setting. + +Possible values: + +- Any positive integer. + +Default value: -1 (unlimited). + +You can also specify a MergeTree setting [max_partitions_to_read](merge-tree-settings#max-partitions-to-read) in tables' setting. From ce39957983af8bdd7d97e4a3729e2f97d3e0cb85 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Aug 2024 20:11:25 +0200 Subject: [PATCH 1478/2361] Remove capitalization in test reports --- tests/ci/report.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/ci/report.py b/tests/ci/report.py index f50ed4c1f85..3f0fc596824 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -293,9 +293,9 @@ class JobReport: start_time: str duration: float additional_files: Union[Sequence[str], Sequence[Path]] - # clickhouse version, build job only + # ClickHouse version, build job only version: str = "" - # checkname to set in commit status, set if differs from jjob name + # check_name to be set in commit status, set it if it differs from the job name check_name: str = "" # directory with artifacts to upload on s3 build_dir_for_upload: Union[Path, str] = "" @@ -667,11 +667,7 @@ ColorTheme = Tuple[str, str, str] def _format_header( header: str, branch_name: str, branch_url: Optional[str] = None ) -> str: - # Following line does not lower CI->Ci and SQLancer->Sqlancer. It only - # capitalizes the first letter and doesn't touch the rest of the word - result = " ".join([w[0].upper() + w[1:] for w in header.split(" ") if w]) - result = result.replace("Clickhouse", "ClickHouse") - result = result.replace("clickhouse", "ClickHouse") + result = header if "ClickHouse" not in result: result = f"ClickHouse {result}" if branch_url: From 2c9b61d047c1afe22b0fa0a967a87db8bd4cf62f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Aug 2024 20:16:44 +0200 Subject: [PATCH 1479/2361] Miscellaneous --- tests/ci/ci.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 935fe472e50..6ca84a346e2 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1019,7 +1019,7 @@ def _get_ext_check_name(check_name: str) -> str: return check_name_with_group -def _cancel_pr_wf(s3: S3Helper, pr_number: int, cancel_sync: bool = False) -> None: +def _cancel_pr_workflow(s3: S3Helper, pr_number: int, cancel_sync: bool = False) -> None: wf_data = CiMetadata(s3, pr_number).fetch_meta() if not cancel_sync: if not wf_data.run_id: @@ -1368,12 +1368,12 @@ def main() -> int: assert indata, "Run config must be provided via --infile" _update_gh_statuses_action(indata=indata, s3=s3) - ### CANCEL PREVIOUS WORKFLOW RUN + ### CANCEL THE PREVIOUS WORKFLOW RUN elif args.cancel_previous_run: if pr_info.is_merge_queue: - _cancel_pr_wf(s3, pr_info.merged_pr) + _cancel_pr_workflow(s3, pr_info.merged_pr) elif pr_info.is_pr: - _cancel_pr_wf(s3, pr_info.number, cancel_sync=True) + _cancel_pr_workflow(s3, pr_info.number, cancel_sync=True) else: assert False, "BUG! Not supported scenario" From bd3606dac4954c673ec6c38dd6fbdb70bc7b53cc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Aug 2024 20:17:07 +0200 Subject: [PATCH 1480/2361] Fix typos --- tests/ci/commit_status_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py index fdc9c002b66..908ac4a7dca 100644 --- a/tests/ci/commit_status_helper.py +++ b/tests/ci/commit_status_helper.py @@ -301,7 +301,7 @@ def get_worst_state(statuses: CommitStatuses) -> StatusType: def create_ci_report(pr_info: PRInfo, statuses: CommitStatuses) -> str: - """The function converst the statuses to TestResults and uploads the report + """The function converts the statuses to TestResults and uploads the report to S3 tests bucket. Then it returns the URL""" test_results = [] # type: TestResults for status in statuses: From 675afda17210ca7e8e71e0899a5ed14d7227fb55 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Aug 2024 20:22:08 +0200 Subject: [PATCH 1481/2361] Fix check names in the CI Logs database --- tests/ci/clickhouse_helper.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py index 287970cce9a..0725f7100d1 100644 --- a/tests/ci/clickhouse_helper.py +++ b/tests/ci/clickhouse_helper.py @@ -3,6 +3,7 @@ import fileinput import json import logging import time +import os from pathlib import Path from typing import Any, Dict, List, Optional @@ -298,6 +299,11 @@ class CiLogsCredentials: def get_docker_arguments( self, pr_info: PRInfo, check_start_time: str, check_name: str ) -> str: + run_by_hash_total = int(os.getenv("RUN_BY_HASH_TOTAL", "0")) + if run_by_hash_total > 1: + run_by_hash_num = int(os.getenv("RUN_BY_HASH_NUM", "0")) + check_name = f"{check_name} [{run_by_hash_num + 1}/{run_by_hash_total}]" + self.create_ci_logs_credentials() if not self.config_path.exists(): logging.info("Do not use external logs pushing") From b282be83c500bac5544424378b9505fc8c28e432 Mon Sep 17 00:00:00 2001 From: Max K Date: Fri, 2 Aug 2024 20:03:43 +0200 Subject: [PATCH 1482/2361] remove old workflows --- .github/actions/release/action.yml | 166 --------------------------- .github/workflows/auto_release.yml | 111 ------------------ .github/workflows/create_release.yml | 2 + .github/workflows/release.yml | 71 ------------ .github/workflows/tags_stable.yml | 74 ------------ tests/ci/docker_server.py | 1 - 6 files changed, 2 insertions(+), 423 deletions(-) delete mode 100644 .github/actions/release/action.yml delete mode 100644 .github/workflows/auto_release.yml delete mode 100644 .github/workflows/release.yml delete mode 100644 .github/workflows/tags_stable.yml diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml deleted file mode 100644 index a287aa8b41d..00000000000 --- a/.github/actions/release/action.yml +++ /dev/null @@ -1,166 +0,0 @@ -name: Release - -description: Makes patch releases and creates new release branch - -inputs: - ref: - description: 'Git reference (branch or commit sha) from which to create the release' - required: true - type: string - type: - description: 'The type of release: "new" for a new release or "patch" for a patch release' - required: true - type: choice - options: - - patch - - new - dry-run: - description: 'Dry run' - required: true - type: boolean - token: - required: true - type: string - -runs: - using: "composite" - steps: - - name: Prepare Release Info - shell: bash - run: | - python3 ./tests/ci/create_release.py --prepare-release-info \ - --ref ${{ inputs.ref }} --release-type ${{ inputs.type }} ${{ inputs.dry-run == true && '--dry-run' || '' }} - echo "::group::Release Info" - python3 -m json.tool /tmp/release_info.json - echo "::endgroup::" - release_tag=$(jq -r '.release_tag' /tmp/release_info.json) - commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json) - echo "Release Tag: $release_tag" - echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV" - echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV" - - name: Download All Release Artifacts - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Push Git Tag for the Release - shell: bash - run: | - python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Push New Release Branch - if: ${{ inputs.type == 'new' }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Bump CH Version and Update Contributors' List - shell: bash - run: | - python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Bump Docker versions, Changelog, Security - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - git checkout master - python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security" - echo "List versions" - ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv - echo "Update docker version" - ./utils/list-versions/update-docker-version.sh - echo "Generate ChangeLog" - export CI=1 - docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ - --volume=".:/ClickHouse" clickhouse/style-test \ - /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ - --gh-user-or-token=${{ inputs.token }} --jobs=5 \ - --output="/ClickHouse/docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} - git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md - echo "Generate Security" - python3 ./utils/security-generator/generate_security.py > SECURITY.md - git diff HEAD - - name: Create ChangeLog PR - if: ${{ inputs.type == 'patch' && ! inputs.dry-run }} - uses: peter-evans/create-pull-request@v6 - with: - author: "robot-clickhouse " - token: ${{ inputs.token }} - committer: "robot-clickhouse " - commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} - branch: auto/${{ env.RELEASE_TAG }} - assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher - delete-branch: true - title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }} - labels: do not test - body: | - Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} - ### Changelog category (leave one): - - Not for changelog (changelog entry is not required) - - name: Complete previous steps and Restore git state - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --set-progress-completed - git reset --hard HEAD - git checkout "$GITHUB_REF_NAME" - - name: Create GH Release - shell: bash - if: ${{ inputs.type == 'patch' }} - run: | - python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Export TGZ Packages - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Test TGZ Packages - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Export RPM Packages - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Test RPM Packages - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Export Debian Packages - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Test Debian Packages - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Docker clickhouse/clickhouse-server building - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - cd "./tests/ci" - python3 ./create_release.py --set-progress-started --progress "docker server release" - export CHECK_NAME="Docker server image" - python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} - python3 ./create_release.py --set-progress-completed - - name: Docker clickhouse/clickhouse-keeper building - if: ${{ inputs.type == 'patch' }} - shell: bash - run: | - cd "./tests/ci" - python3 ./create_release.py --set-progress-started --progress "docker keeper release" - export CHECK_NAME="Docker keeper image" - python3 docker_server.py --release-type auto --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }} - python3 ./create_release.py --set-progress-completed - - name: Set current Release progress to Completed with OK - shell: bash - run: | - python3 ./tests/ci/create_release.py --set-progress-started --progress "completed" - python3 ./tests/ci/create_release.py --set-progress-completed - - name: Post Slack Message - if: ${{ !cancelled() }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }} diff --git a/.github/workflows/auto_release.yml b/.github/workflows/auto_release.yml deleted file mode 100644 index 457ffacc7a8..00000000000 --- a/.github/workflows/auto_release.yml +++ /dev/null @@ -1,111 +0,0 @@ -name: AutoRelease - -env: - PYTHONUNBUFFERED: 1 - DRY_RUN: true - -concurrency: - group: release -on: # yamllint disable-line rule:truthy - # Workflow uses a test bucket for packages and dry run mode (no real releases) - schedule: - - cron: '0 9 * * *' - - cron: '0 15 * * *' - workflow_dispatch: - inputs: - dry-run: - description: 'Dry run' - required: false - default: true - type: boolean - -jobs: - AutoRelease: - runs-on: [self-hosted, release-maker] - steps: - - name: DebugInfo - uses: hmarr/debug-action@f7318c783045ac39ed9bb497e22ce835fdafbfe6 - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" - - name: Set DRY_RUN for dispatch - if: ${{ github.event_name == 'workflow_dispatch' }} - run: echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_ENV" - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - fetch-depth: 0 - - name: Auto Release Prepare - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 auto_release.py --prepare - echo "::group::Auto Release Info" - python3 -m json.tool /tmp/autorelease_info.json - echo "::endgroup::" - { - echo 'AUTO_RELEASE_PARAMS<> "$GITHUB_ENV" - - name: Post Release Branch statuses - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 auto_release.py --post-status - - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].release_branch }} - if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[0].ready }} - uses: ./.github/actions/release - with: - ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0].commit_sha }} - type: patch - dry-run: ${{ env.DRY_RUN }} - token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].release_branch }} - if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[0] && fromJson(env.AUTO_RELEASE_PARAMS).releases[1].ready }} - uses: ./.github/actions/release - with: - ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[1].commit_sha }} - type: patch - dry-run: ${{ env.DRY_RUN }} - token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].release_branch }} - if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2] && fromJson(env.AUTO_RELEASE_PARAMS).releases[2].ready }} - uses: ./.github/actions/release - with: - ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[2].commit_sha }} - type: patch - dry-run: ${{ env.DRY_RUN }} - token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].release_branch }} - if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3] && fromJson(env.AUTO_RELEASE_PARAMS).releases[3].ready }} - uses: ./.github/actions/release - with: - ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[3].commit_sha }} - type: patch - dry-run: ${{ env.DRY_RUN }} - token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - - name: Release ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].release_branch }} - if: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4] && fromJson(env.AUTO_RELEASE_PARAMS).releases[4].ready }} - uses: ./.github/actions/release - with: - ref: ${{ fromJson(env.AUTO_RELEASE_PARAMS).releases[4].commit_sha }} - type: patch - dry-run: ${{ env.DRY_RUN }} - token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - - name: Post Slack Message - if: ${{ !cancelled() }} - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }} - - name: Clean up - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 29094cc51a6..d4993b373df 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -89,6 +89,8 @@ jobs: shell: bash run: | python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security" + + git checkout master # in case WF started from feature branch echo "List versions" ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv echo "Update docker version" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 7dc4e3298a6..00000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: PublishedReleaseCI -# - Gets artifacts from S3 -# - Sends it to JFROG Artifactory -# - Adds them to the release assets - -on: # yamllint disable-line rule:truthy - release: - types: - - published - workflow_dispatch: - inputs: - tag: - description: 'Release tag' - required: true - type: string - -jobs: - ReleasePublish: - runs-on: [self-hosted, style-checker] - steps: - - name: Set tag from input - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" - - name: Set tag from REF - if: github.event_name == 'release' - run: | - echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV" - - name: Deploy packages and assets - run: | - curl --silent --data '' --no-buffer \ - '${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true' - ############################################################################################ - ##################################### Docker images ####################################### - ############################################################################################ - DockerServerImages: - runs-on: [self-hosted, style-checker] - steps: - - name: Set tag from input - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" - - name: Set tag from REF - if: github.event_name == 'release' - run: | - echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV" - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # otherwise we will have no version info - filter: tree:0 - ref: ${{ env.GITHUB_TAG }} - - name: Check docker clickhouse/clickhouse-server building - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - export CHECK_NAME="Docker server image" - SHA=$(git rev-list -n 1 "$GITHUB_TAG") - python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --sha "$SHA" --check-name "$CHECK_NAME" --push - - name: Check docker clickhouse/clickhouse-keeper building - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - export CHECK_NAME="Docker keeper image" - SHA=$(git rev-list -n 1 "$GITHUB_TAG") - python3 docker_server.py --release-type auto --version "$GITHUB_TAG" --sha "$SHA" --check-name "$CHECK_NAME" --push - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" diff --git a/.github/workflows/tags_stable.yml b/.github/workflows/tags_stable.yml deleted file mode 100644 index 2aa7694bc41..00000000000 --- a/.github/workflows/tags_stable.yml +++ /dev/null @@ -1,74 +0,0 @@ -name: TagsStableWorkflow -# - Gets artifacts from S3 -# - Sends it to JFROG Artifactory -# - Adds them to the release assets - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -on: # yamllint disable-line rule:truthy - push: - tags: - - 'v*-prestable' - - 'v*-stable' - - 'v*-lts' - workflow_dispatch: - inputs: - tag: - description: 'Test tag' - required: true - type: string - - -jobs: - UpdateVersions: - runs-on: [self-hosted, style-checker] - steps: - - name: Set test tag - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" - - name: Get tag name - if: github.event_name != 'workflow_dispatch' - run: | - echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV" - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - ref: master - fetch-depth: 0 - filter: tree:0 - - name: Update versions, docker version, changelog, security - env: - GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} - run: | - ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv - ./utils/list-versions/update-docker-version.sh - GID=$(id -g "${UID}") - # --network=host and CI=1 are required for the S3 access from a container - docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ - --volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \ - /ClickHouse/tests/ci/changelog.py -v --debug-helpers \ - --gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \ - --output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}" - git add "./docs/changelogs/${GITHUB_TAG}.md" - python3 ./utils/security-generator/generate_security.py > SECURITY.md - git diff HEAD - - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 - with: - author: "robot-clickhouse " - token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} - committer: "robot-clickhouse " - commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} - branch: auto/${{ env.GITHUB_TAG }} - assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher - delete-branch: true - title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} - labels: do not test - body: | - Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} - - ### Changelog category (leave one): - - Not for changelog (changelog entry is not required) diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 8f0474d5053..3251ec5644e 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -27,7 +27,6 @@ from stopwatch import Stopwatch from tee_popen import TeePopen from version_helper import ( ClickHouseVersion, - get_tagged_versions, get_version_from_repo, version_arg, ) From aa38e78d7238d843737d1d268de6ee189c19edc3 Mon Sep 17 00:00:00 2001 From: Max K Date: Fri, 2 Aug 2024 20:27:59 +0200 Subject: [PATCH 1483/2361] update version_date.tsv --- docs/changelogs/v23.8.16.40-lts.md | 35 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 36 insertions(+) create mode 100644 docs/changelogs/v23.8.16.40-lts.md diff --git a/docs/changelogs/v23.8.16.40-lts.md b/docs/changelogs/v23.8.16.40-lts.md new file mode 100644 index 00000000000..75caf1ea277 --- /dev/null +++ b/docs/changelogs/v23.8.16.40-lts.md @@ -0,0 +1,35 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v23.8.16.40-lts (e143a9039ba) FIXME as compared to v23.8.15.35-lts (060ff8e813a) + +#### Improvement +* Backported in [#66962](https://github.com/ClickHouse/ClickHouse/issues/66962): Added support for parameterized view with analyzer to not analyze create parameterized view. Refactor existing parameterized view logic to not analyze create parameterized view. [#54211](https://github.com/ClickHouse/ClickHouse/pull/54211) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). +* Backported in [#65461](https://github.com/ClickHouse/ClickHouse/issues/65461): Reload certificate chain during certificate reload. [#61671](https://github.com/ClickHouse/ClickHouse/pull/61671) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). +* Backported in [#65880](https://github.com/ClickHouse/ClickHouse/issues/65880): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)). +* Backported in [#65912](https://github.com/ClickHouse/ClickHouse/issues/65912): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Backported in [#65281](https://github.com/ClickHouse/ClickHouse/issues/65281): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#65368](https://github.com/ClickHouse/ClickHouse/issues/65368): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#65743](https://github.com/ClickHouse/ClickHouse/issues/65743): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#65351](https://github.com/ClickHouse/ClickHouse/issues/65351): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#66037](https://github.com/ClickHouse/ClickHouse/issues/66037): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)). +* Backported in [#65782](https://github.com/ClickHouse/ClickHouse/issues/65782): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#65926](https://github.com/ClickHouse/ClickHouse/issues/65926): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#65822](https://github.com/ClickHouse/ClickHouse/issues/65822): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)). +* Backported in [#66449](https://github.com/ClickHouse/ClickHouse/issues/66449): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66717](https://github.com/ClickHouse/ClickHouse/issues/66717): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#65080](https://github.com/ClickHouse/ClickHouse/issues/65080): Follow up to [#56541](https://github.com/ClickHouse/ClickHouse/issues/56541). [#57141](https://github.com/ClickHouse/ClickHouse/pull/57141) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#65913](https://github.com/ClickHouse/ClickHouse/issues/65913): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#66853](https://github.com/ClickHouse/ClickHouse/issues/66853): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 24488066190..cb6b8f588da 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -50,6 +50,7 @@ v23.9.4.11-stable 2023-11-08 v23.9.3.12-stable 2023-10-31 v23.9.2.56-stable 2023-10-19 v23.9.1.1854-stable 2023-09-29 +v23.8.16.40-lts 2024-08-02 v23.8.15.35-lts 2024-06-14 v23.8.14.6-lts 2024-05-02 v23.8.13.25-lts 2024-04-26 From a45ba44dbaa2ed43eb63e49fe609a01be978eac9 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 2 Aug 2024 18:28:38 +0000 Subject: [PATCH 1484/2361] Automatic style fix --- tests/ci/ci.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 6ca84a346e2..805296d2bb2 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1019,7 +1019,9 @@ def _get_ext_check_name(check_name: str) -> str: return check_name_with_group -def _cancel_pr_workflow(s3: S3Helper, pr_number: int, cancel_sync: bool = False) -> None: +def _cancel_pr_workflow( + s3: S3Helper, pr_number: int, cancel_sync: bool = False +) -> None: wf_data = CiMetadata(s3, pr_number).fetch_meta() if not cancel_sync: if not wf_data.run_id: From a37eeb0f211c0c7b6251a9108d6b939e73c9a66e Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 2 Aug 2024 20:13:27 +0000 Subject: [PATCH 1485/2361] Allow types to change, re-resolve overloads --- src/Storages/MergeTree/KeyCondition.cpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index eaf9f0af623..91f054c3a71 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -627,7 +627,8 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( } } - res = &inverted_dag.addFunction(node.function_base, children, ""); + auto function_builder = FunctionFactory::instance().get(name, context); + res = &inverted_dag.addFunction(function_builder, children, ""); handled_inversion = true; } else if (need_inversion && (name == "and" || name == "or")) @@ -668,8 +669,13 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( } else { - res = &inverted_dag.addFunction(node.function_base, children, ""); - chassert(res->result_type == node.result_type); + /// Can't just addFunction(node.function_base) because argument types may have + /// changed slightly because of our transformations, e.g. maybe some subexpression + /// changed constness, which caused some function return value to change LowCardinality-ness. + /// (I don't have a specific counterexample, but it seems likely that it exists. + /// One was fixed in the past: https://github.com/ClickHouse/ClickHouse/issues/65143 ) + auto function_builder = FunctionFactory::instance().get(name, context); + res = &inverted_dag.addFunction(function_builder, children, ""); } } } @@ -678,13 +684,6 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( if (!handled_inversion && need_inversion) res = &inverted_dag.addFunction(FunctionFactory::instance().get("not", context), {res}, ""); - /// Make sure we don't change any data types (e.g. remove LowCardinality). - /// If it turns out that we actually want to change data types sometimes, it's ok to remove this - /// check *and* replace all `addFunction(node.function_base, ...)` calls above with - /// `addFunction(FunctionFactory::instance().get(name, context), ...)` to re-resolve overloads. - if (!node.result_type->equals(*res->result_type)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "KeyCondition inadvertently changed subexpression data type: '{}' -> '{}', column `{}`", node.result_type->getName(), res->result_type->getName(), node.result_name); - to_inverted[&node] = res; return *res; } From fb23cbdef6b38d1ba90aa1bb304487a3bb947e5c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1486/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From 15e9f8d9cbe4c2f0a1d9973650d93fa195a56276 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 2 Aug 2024 22:52:01 +0200 Subject: [PATCH 1487/2361] Fix `02481_async_insert_race_long` --- tests/queries/0_stateless/02481_async_insert_race_long.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02481_async_insert_race_long.sh b/tests/queries/0_stateless/02481_async_insert_race_long.sh index b0088017d32..def97409bc4 100755 --- a/tests/queries/0_stateless/02481_async_insert_race_long.sh +++ b/tests/queries/0_stateless/02481_async_insert_race_long.sh @@ -29,11 +29,8 @@ function insert3() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - ${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" & - sleep 0.05 + ${MY_CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" done - - wait } function select1() From 13b435d281ce39a4f8eee889da482dbf272dd1cf Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1488/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From eb0e12099a8fa45d79b2cff96e02ee273e879efa Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 2 Aug 2024 21:18:35 +0000 Subject: [PATCH 1489/2361] Another attempt --- src/Storages/MergeTree/KeyCondition.cpp | 27 ++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 91f054c3a71..9115cb5608e 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -627,8 +627,7 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( } } - auto function_builder = FunctionFactory::instance().get(name, context); - res = &inverted_dag.addFunction(function_builder, children, ""); + res = &inverted_dag.addFunction(node.function_base, children, ""); handled_inversion = true; } else if (need_inversion && (name == "and" || name == "or")) @@ -669,13 +668,23 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( } else { - /// Can't just addFunction(node.function_base) because argument types may have - /// changed slightly because of our transformations, e.g. maybe some subexpression - /// changed constness, which caused some function return value to change LowCardinality-ness. - /// (I don't have a specific counterexample, but it seems likely that it exists. - /// One was fixed in the past: https://github.com/ClickHouse/ClickHouse/issues/65143 ) - auto function_builder = FunctionFactory::instance().get(name, context); - res = &inverted_dag.addFunction(function_builder, children, ""); + /// Make sure we don't change types of function arguments (e.g. remove LowCardinality). + /// Otherwise the function may crash when passed columns of unexpected types. + /// * Why not check this for all subexperessions rather than function arguments? + /// Because types may change, e.g. in `NOT (u64 AND u64)` -> `(NOT u64 OR NOT u64)` + /// the AND's args were UInt64, but OR's args are UInt8. + /// * Why not re-resolve function overload, using FunctionFactory::instance().get(name, context)? + /// Because some functions can't be found through FunctionFactory, e.g. FunctionCapture. + /// (But maybe we could re-resolve only if argument types changed.) + for (size_t i = 0; i < children.size(); ++i) + { + if (!node.children[i]->result_type->equals(*children[i]->result_type)) + throw Exception( + ErrorCodes::LOGICAL_ERROR, "KeyCondition inadvertently changed subexpression data type: '{}' -> '{}', column `{}`", + node.children[i]->result_type->getName(), children[i]->result_type->getName(), node.children[i]->result_name); + } + + res = &inverted_dag.addFunction(node.function_base, children, ""); } } } From dd0ae04f90314ce6d5dbe748605e66f1a6d9024f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1490/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From 08bde9cb44dce6fdf069527852faa8ec29a71b10 Mon Sep 17 00:00:00 2001 From: pufit Date: Fri, 2 Aug 2024 18:28:33 -0400 Subject: [PATCH 1491/2361] fix conflict --- src/Client/ClientBase.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 1a23b6b1363..45251aea28a 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -2,7 +2,7 @@ #include -#include +#include #include #include #include From 51918dc080c9fa4b128a151cfbd0d28b294c56d3 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sat, 3 Aug 2024 00:24:00 +0100 Subject: [PATCH 1492/2361] impl --- src/Common/ShellCommand.cpp | 9 ++++- src/Processors/Sources/ShellCommandSource.cpp | 39 +++++++++++++------ 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index 98a21b43d76..79b0d667863 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -237,7 +237,14 @@ std::unique_ptr ShellCommand::executeImpl( res->write_fds.emplace(fd, fds.fds_rw[1]); } - LOG_TRACE(getLogger(), "Started shell command '{}' with pid {}", filename, pid); + LOG_TRACE( + getLogger(), + "Started shell command '{}' with pid {} and file descriptors: read {}, write {}", + filename, + pid, + res->out.getFD(), + res->err.getFD()); + return res; } diff --git a/src/Processors/Sources/ShellCommandSource.cpp b/src/Processors/Sources/ShellCommandSource.cpp index 1659287c227..923bdfad8f8 100644 --- a/src/Processors/Sources/ShellCommandSource.cpp +++ b/src/Processors/Sources/ShellCommandSource.cpp @@ -75,6 +75,15 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond while (true) { Stopwatch watch; + +#if defined(DEBUG_OR_SANITIZER_BUILD) + auto describe_fd = [](const auto & pollfd) { return fmt::format("(fd={}, flags={})", pollfd.fd, fcntl(pollfd.fd, F_GETFL)); }; + LOG_TRACE( + getLogger("TimeoutReadBufferFromFileDescriptor"), + "Polling descriptors: {}", + fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", ")); +#endif + res = poll(pfds, static_cast(num), static_cast(timeout_milliseconds)); if (res < 0) @@ -84,7 +93,16 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond const auto elapsed = watch.elapsedMilliseconds(); if (timeout_milliseconds <= elapsed) + { +#if defined(DEBUG_OR_SANITIZER_BUILD) + LOG_TRACE( + getLogger("TimeoutReadBufferFromFileDescriptor"), + "Timeout exceeded: elapsed={}, timeout={}", + elapsed, + timeout_milliseconds); +#endif break; + } timeout_milliseconds -= elapsed; } else @@ -93,6 +111,15 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond } } +#if defined(DEBUG_OR_SANITIZER_BUILD) + auto describe_fd = [](const auto & pollfd) { return fmt::format("(fd={}, flags={})", pollfd.fd, fcntl(pollfd.fd, F_GETFL)); }; + LOG_TRACE( + getLogger("TimeoutReadBufferFromFileDescriptor"), + "Poll for descriptors: {} returned {}", + fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", "), + res); +#endif + return res; } @@ -139,15 +166,9 @@ public: while (!bytes_read) { - LOG_TRACE( - getLogger("TimeoutReadBufferFromFileDescriptor"), - "Starting polling on descriptors ({}) with timeout {} ms", - fmt::join(std::span(pfds, pfds + num_pfds) | std::views::transform([](const auto & pollfd) { return pollfd.fd; }), ", "), - timeout_milliseconds); pfds[0].revents = 0; pfds[1].revents = 0; size_t num_events = pollWithTimeout(pfds, num_pfds, timeout_milliseconds); - LOG_TRACE(getLogger("TimeoutReadBufferFromFileDescriptor"), "Poll returned with num_events={}", num_events); if (0 == num_events) throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Pipe read timeout exceeded {} milliseconds", timeout_milliseconds); @@ -208,12 +229,6 @@ public: return true; } - void reset() const - { - makeFdBlocking(stdout_fd); - makeFdBlocking(stderr_fd); - } - ~TimeoutReadBufferFromFileDescriptor() override { tryMakeFdBlocking(stdout_fd); From a431ab3e4b6f925924a81d99997e6c028ae7950f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 01:31:02 +0200 Subject: [PATCH 1493/2361] Improve dashboard --- programs/server/dashboard.html | 66 +++++++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 45f988f7b1e..71880b9e228 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -17,7 +17,7 @@ --input-shadow-color: rgba(0, 255, 0, 1); --error-color: red; --global-error-color: white; - --legend-background: rgba(255, 255, 255, 0.75); + --legend-background: rgba(255, 255, 0, 0.75); --title-color: #666; --text-color: black; --edit-title-background: #FEE; @@ -41,7 +41,7 @@ --moving-shadow-color: rgba(255, 255, 255, 0.25); --input-shadow-color: rgba(255, 128, 0, 0.25); --error-color: #F66; - --legend-background: rgba(255, 255, 255, 0.25); + --legend-background: rgba(0, 96, 128, 0.75); --title-color: white; --text-color: white; --edit-title-background: #364f69; @@ -1004,14 +1004,14 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend- className && legendEl.classList.add(className); uPlot.assign(legendEl.style, { - textAlign: "left", + textAlign: "right", pointerEvents: "none", display: "none", position: "absolute", left: 0, top: 0, - zIndex: 100, - boxShadow: "2px 2px 10px rgba(0,0,0,0.1)", + zIndex: 200, + boxShadow: "2px 2px 10px rgba(0, 0, 0, 0.1)", ...style }); @@ -1051,8 +1051,10 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend- function update(u) { let { left, top } = u.cursor; - left -= legendEl.clientWidth / 2; - top -= legendEl.clientHeight / 2; + /// This will make the balloon to the right of the cursor when the cursor is on the left side, and vise-versa, + /// avoiding the borders of the chart. + left -= legendEl.clientWidth * (left / u.width); + top -= legendEl.clientHeight; legendEl.style.transform = "translate(" + left + "px, " + top + "px)"; if (multiline) { @@ -1229,14 +1231,53 @@ async function draw(idx, chart, url_params, query) { let sync = uPlot.sync("sync"); - let axis = { + function formatDateTime(t) { + return (new Date(t * 1000)).toISOString().replace('T', '\n').replace('.000Z', ''); + } + + function formatDateTimes(self, ticks) { + return ticks.map((t, idx) => { + let res = formatDateTime(t); + if (idx == 0 || res.substring(0, 10) != formatDateTime(ticks[idx - 1]).substring(0, 10)) { + return res; + } else { + return res.substring(11); + } + }); + } + + function formatValue(v) { + const a = Math.abs(v); + if (a >= 1000000000000000) { return (v / 1000000000000000) + 'P'; } + if (a >= 1000000000000) { return (v / 1000000000000) + 'T'; } + if (a >= 1000000000) { return (v / 1000000000) + 'G'; } + if (a >= 1000000) { return (v / 1000000) + 'M'; } + if (a >= 1000) { return (v / 1000) + 'K'; } + if (a > 0 && a < 0.001) { return (v * 1000000) + "μ"; } + return v; + } + + let axis_x = { stroke: axes_color, grid: { width: 1 / devicePixelRatio, stroke: grid_color }, - ticks: { width: 1 / devicePixelRatio, stroke: grid_color } + ticks: { width: 1 / devicePixelRatio, stroke: grid_color }, + values: formatDateTimes, + space: 80, + incrs: [1, 5, 10, 15, 30, + 60, 60 * 5, 60 * 10, 60 * 15, 60 * 30, + 3600, 3600 * 2, 3600 * 3, 3600 * 4, 3600 * 6, 3600 * 12, + 3600 * 24], }; - let axes = [axis, axis]; - let series = [{ label: "x" }]; + let axis_y = { + stroke: axes_color, + grid: { width: 1 / devicePixelRatio, stroke: grid_color }, + ticks: { width: 1 / devicePixelRatio, stroke: grid_color }, + values: (self, ticks) => ticks.map(formatValue) + }; + + let axes = [axis_x, axis_y]; + let series = [{ label: "time", value: (self, t) => formatDateTime(t) }]; let data = [reply.data[reply.meta[0].name]]; // Treat every column as series @@ -1254,9 +1295,10 @@ async function draw(idx, chart, url_params, query) { const opts = { width: chart.clientWidth, height: chart.clientHeight, + scales: { x: { time: false } }, /// Because we want to split and format time on our own. axes, series, - padding: [ null, null, null, (Math.round(max_value * 100) / 100).toString().length * 6 - 10 ], + padding: [ null, null, null, 3 ], plugins: [ legendAsTooltipPlugin() ], cursor: { sync: { From a6f9dd4447cbb475cbf77b07de35b40fbcad50b1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 01:35:10 +0200 Subject: [PATCH 1494/2361] Improve dashboard --- programs/server/dashboard.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 71880b9e228..c69acec7858 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -1010,7 +1010,7 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend- position: "absolute", left: 0, top: 0, - zIndex: 200, + zIndex: 100, boxShadow: "2px 2px 10px rgba(0, 0, 0, 0.1)", ...style }); From 95659de26573bdb17ab2b5649e6dad96fb75c479 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 01:45:41 +0200 Subject: [PATCH 1495/2361] Fix invalid detection of an empty result --- programs/server/dashboard.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index c69acec7858..238254f4ef8 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -1141,7 +1141,7 @@ async function draw(idx, chart, url_params, query) { let {reply, error} = await doFetch(query, url_params); if (!error) { - if (reply.rows.length == 0) { + if (reply.rows == 0) { error = "Query returned empty result."; } else if (reply.meta.length < 2) { error = "Query should return at least two columns: unix timestamp and value."; From a99f9bb603f78437fba8d3ebb031c2f41d00cd58 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 01:50:27 +0200 Subject: [PATCH 1496/2361] Focus on the mass editor --- programs/server/dashboard.html | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 238254f4ef8..8fb07d5da3b 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -945,6 +945,7 @@ function showMassEditor() { let editor = document.getElementById('mass-editor-textarea'); editor.value = JSON.stringify({params: params, queries: queries}, null, 2); + editor.focus(); mass_editor_active = true; } From eeb8c1caac9e8e2ba2f3a1a86f5603281e161610 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 02:06:53 +0200 Subject: [PATCH 1497/2361] Improve margins when there are many parameters --- programs/server/dashboard.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 8fb07d5da3b..344de779065 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -256,6 +256,7 @@ font-weight: bold; user-select: none; cursor: pointer; + margin-bottom: 1rem; } #run:hover { @@ -309,7 +310,7 @@ color: var(--param-text-color); display: inline-block; box-shadow: 1px 1px 0 var(--shadow-color); - margin-bottom: 1rem; + margin-bottom: 0.5rem; } input:focus { From 090fb59194462324507d75f032aa803303c3e041 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 02:20:58 +0200 Subject: [PATCH 1498/2361] Automatic field width of chart parameters --- programs/server/dashboard.html | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 344de779065..0b099b15536 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -218,6 +218,7 @@ #chart-params .param { width: 6%; + font-family: monospace; } input { @@ -658,6 +659,10 @@ function insertParam(name, value) { param_value.value = value; param_value.spellcheck = false; + let setWidth = e => { e.style.width = (e.value.length + 1) + 'ch' }; + if (value) { setWidth(param_value); } + param_value.addEventListener('input', e => setWidth(e.target)); + param_wrapper.appendChild(param_name); param_wrapper.appendChild(param_value); document.getElementById('chart-params').appendChild(param_wrapper); From 9a017528a4685fc4ed7eec7ba37f9e9804972c3b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 31 Jul 2024 17:38:20 +0200 Subject: [PATCH 1499/2361] Minor change --- src/Databases/DatabaseOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index f419f5811a1..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -313,7 +313,7 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr query_context, const Stri std::lock_guard lock(mutex); if (const auto it = snapshot_detached_tables.find(table_name); it == snapshot_detached_tables.end()) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table={}", table_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Snapshot doesn't contain info about detached table `{}`", table_name); } else { From dfeb1991164bd6c8b0efc8bdcfe9dcd5b8906928 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 03:06:37 +0200 Subject: [PATCH 1500/2361] Fix locking inside TimerDescriptor --- src/Common/TimerDescriptor.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/Common/TimerDescriptor.cpp b/src/Common/TimerDescriptor.cpp index 9a171ae9487..ce290a1cb31 100644 --- a/src/Common/TimerDescriptor.cpp +++ b/src/Common/TimerDescriptor.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -75,10 +76,22 @@ void TimerDescriptor::drain() const /// or since the last successful read(2), then the buffer given to read(2) returns an unsigned 8-byte integer (uint64_t) /// containing the number of expirations that have occurred. /// (The returned value is in host byte order—that is, the native byte order for integers on the host machine.) + + /// Due to a bug in Linux Kernel, reading from timerfd in non-blocking mode can be still blocking. + /// Avoid it with polling. + Epoll epoll; + epoll.add(timer_fd); + epoll_event event; + event.data.fd = -1; + size_t ready_count = epoll.getManyReady(1, &event, 0); + if (!ready_count) + return; + uint64_t buf; while (true) { ssize_t res = ::read(timer_fd, &buf, sizeof(buf)); + if (res < 0) { /// man timerfd_create: From 2605bb36b66ccfb4621244a28475a242778b6cc4 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 3 Aug 2024 01:42:11 +0000 Subject: [PATCH 1501/2361] fix conflict --- src/Core/SettingsChangesHistory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 8f73e10c44f..107a8e451c5 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -92,7 +92,7 @@ static std::initializer_list Date: Sat, 3 Aug 2024 01:49:53 +0000 Subject: [PATCH 1502/2361] e --- src/Storages/MergeTree/KeyCondition.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 9115cb5608e..2b89344d3d9 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -670,7 +670,7 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( { /// Make sure we don't change types of function arguments (e.g. remove LowCardinality). /// Otherwise the function may crash when passed columns of unexpected types. - /// * Why not check this for all subexperessions rather than function arguments? + /// * Why not check this for all subexpressions rather than function arguments? /// Because types may change, e.g. in `NOT (u64 AND u64)` -> `(NOT u64 OR NOT u64)` /// the AND's args were UInt64, but OR's args are UInt8. /// * Why not re-resolve function overload, using FunctionFactory::instance().get(name, context)? From 8cb2e308f41638ebb6ba7fddbd4f0bf89d4d612e Mon Sep 17 00:00:00 2001 From: shiyer7474 Date: Sat, 3 Aug 2024 01:55:24 +0000 Subject: [PATCH 1503/2361] Only new analyzer --- .../03209_parameterized_view_with_non_literal_params.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql b/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql index f2c61e5cb1d..7fe84929910 100644 --- a/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql +++ b/tests/queries/0_stateless/03209_parameterized_view_with_non_literal_params.sql @@ -1,4 +1,4 @@ - +SET allow_experimental_analyzer = 1; select 'Test with Date parameter'; drop table if exists date_table_pv; From 28ec383739d9a4974549e8c6491797f0eaafaffb Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 3 Aug 2024 02:07:24 +0000 Subject: [PATCH 1504/2361] add sharedmergetree --- src/Storages/MergeTree/MergeTreeData.cpp | 5 +++-- src/Storages/StorageFactory.cpp | 8 ++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index a158e375ae9..e849c4b794f 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3216,10 +3216,11 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context /// Block the case of alter table add projection for special merge trees. if (std::any_of(commands.begin(), commands.end(), [](const AlterCommand & c) { return c.type == AlterCommand::ADD_PROJECTION; })) { - if (auto storage_name = getName(); storage_name != "MergeTree" && storage_name != "ReplicatedMergeTree" + const std::unordered_set allowed_storages{"MergeTree", "ReplicatedMergeTree", "SharedMergeTree"}; + if (auto storage_name = getName(); !allowed_storages.contains(storage_name) && settings_from_storage->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Projection is fully supported in (Replictaed)MergeTree, but also allowed in non-throw mode with other" + "Projection is fully supported in (Replictaed, Shared)MergeTree, but also allowed in non-throw mode with other" " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode." " Current storage name is {}.", storage_name); } diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 7360d351e8a..557f53a9ada 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -204,10 +204,10 @@ StoragePtr StorageFactory::get( /// Now let's handle the merge tree family. Note we only handle in the mode of CREATE due to backward compatibility. /// Otherwise, it would fail to start in the case of existing projections with special mergetree. - /// Projection is fully supported in (Replictaed)MergeTree, but also allowed in non-throw mode with other mergetree family members. chassert(query.storage->engine); - if (std::string_view engine_name(query.storage->engine->name); mode == LoadingStrictnessLevel::CREATE - && engine_name != "MergeTree" && engine_name != "ReplicatedMergeTree") + const std::unordered_set allowed_engines{"MergeTree", "ReplicatedMergeTree", "SharedMergeTree"}; + if (auto engine_name(query.storage->engine->name); mode == LoadingStrictnessLevel::CREATE + && !allowed_engines.contains(engine_name)) { /// default throw mode in deduplicate_merge_projection_mode bool projection_allowed = false; @@ -224,7 +224,7 @@ StoragePtr StorageFactory::get( } if (!projection_allowed) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Projection is fully supported in (Replictaed)MergeTree, but also allowed in non-throw mode with other" + "Projection is fully supported in (Replictaed, Shared)MergeTree, but also allowed in non-throw mode with other" " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode." " Current storage name is {}.", engine_name); } From 96e826d154e9b55cf035a3ce025ac81194455ebc Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Sat, 3 Aug 2024 02:57:33 +0000 Subject: [PATCH 1505/2361] Change tactics again --- src/Storages/MergeTree/KeyCondition.cpp | 31 +++++++++++++++---------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 2b89344d3d9..dfb43c4e75d 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -668,23 +668,30 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown( } else { - /// Make sure we don't change types of function arguments (e.g. remove LowCardinality). - /// Otherwise the function may crash when passed columns of unexpected types. - /// * Why not check this for all subexpressions rather than function arguments? - /// Because types may change, e.g. in `NOT (u64 AND u64)` -> `(NOT u64 OR NOT u64)` - /// the AND's args were UInt64, but OR's args are UInt8. - /// * Why not re-resolve function overload, using FunctionFactory::instance().get(name, context)? - /// Because some functions can't be found through FunctionFactory, e.g. FunctionCapture. - /// (But maybe we could re-resolve only if argument types changed.) + /// Argument types could change slightly because of our transformations, e.g. + /// LowCardinality can be added because some subexpressions became constant + /// (in particular, sets). If that happens, re-run function overload resolver. + /// Otherwise don't re-run it because some functions may not be available + /// through FunctionFactory::get(), e.g. FunctionCapture. + bool types_changed = false; for (size_t i = 0; i < children.size(); ++i) { if (!node.children[i]->result_type->equals(*children[i]->result_type)) - throw Exception( - ErrorCodes::LOGICAL_ERROR, "KeyCondition inadvertently changed subexpression data type: '{}' -> '{}', column `{}`", - node.children[i]->result_type->getName(), children[i]->result_type->getName(), node.children[i]->result_name); + { + types_changed = true; + break; + } } - res = &inverted_dag.addFunction(node.function_base, children, ""); + if (types_changed) + { + auto function_builder = FunctionFactory::instance().get(name, context); + res = &inverted_dag.addFunction(function_builder, children, ""); + } + else + { + res = &inverted_dag.addFunction(node.function_base, children, ""); + } } } } From f97abf69949f8822d70f4b1251e1945f279dd0ec Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 09:35:37 +0200 Subject: [PATCH 1506/2361] tests: avoid endless wait in 01042_system_reload_dictionary_reloads_completely Signed-off-by: Azat Khuzhin --- ...em_reload_dictionary_reloads_completely.sh | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh index 03dd376f802..ebc4110332f 100755 --- a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh +++ b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh @@ -8,6 +8,18 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -o pipefail +# Wait when the dictionary will update the value for 13 on its own: +function wait_for_dict_upate() +{ + for ((i = 0; i < 100; ++i)); do + if [ "$(${CLICKHOUSE_CLIENT} --query "SELECT dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))")" != -1 ]; then + return 0 + fi + sleep 0.5 + done + return 1 +} + $CLICKHOUSE_CLIENT < ', dictGetInt64('${CLICKHOUSE_DATABASE $CLICKHOUSE_CLIENT --query "INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (13, 103, now())" $CLICKHOUSE_CLIENT --query "INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (14, 104, now() - INTERVAL 1 DAY)" -# Wait when the dictionary will update the value for 13 on its own: -while [ "$(${CLICKHOUSE_CLIENT} --query "SELECT dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))")" = -1 ] -do - sleep 0.5 -done +if ! wait_for_dict_upate; then + echo "Dictionary had not been reloaded" >&2 + exit 1 +fi $CLICKHOUSE_CLIENT --query "SELECT '13 -> ', dictGetInt64('${CLICKHOUSE_DATABASE}.dict', 'y', toUInt64(13))" From 40cd5467c18d65a6624d273ac1a8fd9cc9257d8c Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 09:39:33 +0200 Subject: [PATCH 1507/2361] tests: fix 01042_system_reload_dictionary_reloads_completely flakiness (increase lag) The test fails in case of INSERT takes > 1 sec: 2024.08.02 13:06:07.746869 [ 45445 ] {c9b55378-6bc5-46d5-80c1-5385a880f88b} executeQuery: (from [::1]:37208) (comment: 01042_system_reload_dictionary_reloads_completely.sh) CREATE DICTIONARY test_m4lx2bit.dict ( x Int64 DEFAULT -1, y Int64 DEFAULT -1, insert_time DateTime ) PRIMARY KEY x SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table' DB 'test_m4lx2bit' UPDATE_FIELD 'insert_time')) LAYOUT(FLAT()) LIFETIME(1); (stage: Complete) ... 2024.08.02 13:06:08.263832 [ 59987 ] {744da223-67b9-4e32-b12a-eb2550a92fdb} DictionaryFactory: Created dictionary source 'ClickHouse: test_m4lx2bit.table' for dictionary '5b2b98a9-9372-47c9-bda3-830794cb96e7' 2024.08.02 13:06:08.268118 [ 59987 ] {744da223-67b9-4e32-b12a-eb2550a92fdb} executeQuery: (internal) SELECT `x`, `y`, `insert_time` FROM `test_m4lx2bit`.`table`; (stage: Complete) ... 2024.08.02 13:06:09.193190 [ 45445 ] {b6033498-4666-452f-bcf9-02ecf257ba7f} executeQuery: (from [::1]:37262) (comment: 01042_system_reload_dictionary_reloads_completely.sh) INSERT INTO test_m4lx2bit.table VALUES (stage: Complete) ... 2024.08.02 13:06:11.342119 [ 50962 ] {} executeQuery: (internal) SELECT `x`, `y`, `insert_time` FROM `test_m4lx2bit`.`table` WHERE insert_time >= '2024-08-02 13:06:07'; (stage: Complete) ... 2024.08.02 13:06:11.832158 [ 45445 ] {b6033498-4666-452f-bcf9-02ecf257ba7f} TCPHandler: Processed in 2.642106236 sec. ... 2024.08.02 13:06:16.357448 [ 41632 ] {} executeQuery: (internal) SELECT `x`, `y`, `insert_time` FROM `test_m4lx2bit`.`table` WHERE insert_time >= '2024-08-02 13:06:10'; (stage: Complete) Signed-off-by: Azat Khuzhin --- .../01042_system_reload_dictionary_reloads_completely.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh index ebc4110332f..453e1bb8f0a 100755 --- a/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh +++ b/tests/queries/0_stateless/01042_system_reload_dictionary_reloads_completely.sh @@ -31,7 +31,7 @@ CREATE DICTIONARY ${CLICKHOUSE_DATABASE}.dict insert_time DateTime ) PRIMARY KEY x -SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table' DB '${CLICKHOUSE_DATABASE}' UPDATE_FIELD 'insert_time')) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table' DB '${CLICKHOUSE_DATABASE}' UPDATE_FIELD 'insert_time' UPDATE_LAG 60)) LAYOUT(FLAT()) LIFETIME(1); EOF From 6ce6af0647590f4b58a6ab87ee5f29b8487e8c2f Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 3 Aug 2024 14:16:24 +0200 Subject: [PATCH 1508/2361] Fix completion RESTORE ON CLUSTER. --- src/Backups/RestorerFromBackup.cpp | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index 3056f9fe421..278af9d4eb3 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -222,10 +222,19 @@ void RestorerFromBackup::setStage(const String & new_stage, const String & messa if (restore_coordination) { restore_coordination->setStage(new_stage, message); - if (new_stage == Stage::FINDING_TABLES_IN_BACKUP) - restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout); - else - restore_coordination->waitForStage(new_stage); + + /// The initiator of a RESTORE ON CLUSTER query waits for other hosts to complete their work (see waitForStage(Stage::COMPLETED) in BackupsWorker::doRestore), + /// but other hosts shouldn't wait for each others' completion. (That's simply unnecessary and also + /// the initiator may start cleaning up (e.g. removing restore-coordination ZooKeeper nodes) once all other hosts are in Stage::COMPLETED.) + bool need_wait = (new_stage != Stage::COMPLETED); + + if (need_wait) + { + if (new_stage == Stage::FINDING_TABLES_IN_BACKUP) + restore_coordination->waitForStage(new_stage, on_cluster_first_sync_timeout); + else + restore_coordination->waitForStage(new_stage); + } } } From 73080d25a93aa715c4126ed82b89e3ada89d7a3f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 15:10:05 +0200 Subject: [PATCH 1509/2361] Fix test `00002_log_and_exception_messages_formatting` --- src/Databases/DatabaseHDFS.cpp | 4 ++-- src/Storages/ObjectStorage/HDFS/Configuration.cpp | 6 +++--- .../00002_log_and_exception_messages_formatting.sql | 5 ++++- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/Databases/DatabaseHDFS.cpp b/src/Databases/DatabaseHDFS.cpp index eccaae5f22e..f58f1b76e71 100644 --- a/src/Databases/DatabaseHDFS.cpp +++ b/src/Databases/DatabaseHDFS.cpp @@ -75,8 +75,8 @@ std::string DatabaseHDFS::getTablePath(const std::string & table_name) const return table_name; if (source.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}. " - "It should have structure 'hdfs://:/path'", table_name); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad HDFS URL: {}. " + "It should have the following structure 'hdfs://:/path'", table_name); return fs::path(source) / table_name; } diff --git a/src/Storages/ObjectStorage/HDFS/Configuration.cpp b/src/Storages/ObjectStorage/HDFS/Configuration.cpp index e8071be6f02..85eb29a3868 100644 --- a/src/Storages/ObjectStorage/HDFS/Configuration.cpp +++ b/src/Storages/ObjectStorage/HDFS/Configuration.cpp @@ -142,11 +142,11 @@ void StorageHDFSConfiguration::setURL(const std::string & url_) { auto pos = url_.find("//"); if (pos == std::string::npos) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}", url_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad HDFS URL: {}. It should have the following structure 'hdfs://:/path'", url_); pos = url_.find('/', pos + 2); if (pos == std::string::npos) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}", url_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad HDFS URL: {}. It should have the following structure 'hdfs://:/path'", url_); path = url_.substr(pos + 1); if (!path.starts_with('/')) @@ -155,7 +155,7 @@ void StorageHDFSConfiguration::setURL(const std::string & url_) url = url_.substr(0, pos); paths = {path}; - LOG_TRACE(getLogger("StorageHDFSConfiguration"), "Using url: {}, path: {}", url, path); + LOG_TRACE(getLogger("StorageHDFSConfiguration"), "Using URL: {}, path: {}", url, path); } void StorageHDFSConfiguration::addStructureAndFormatToArgs( diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 07c42d6d039..e916fdfc1ff 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -163,7 +163,10 @@ create temporary table known_short_messages (s String) as select * from (select '{} -> {}', '{} {}', '{}%', - '{}: {}' + '{}: {}', + 'Unknown data type family: {}', + 'Cannot load time zone {}', + 'Unknown table engine {}' ] as arr) array join arr; -- Check that we don't have too many short meaningless message patterns. From 8b7a294bc7db64f675faaf9b91a40e3e9f613935 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 30 May 2024 17:15:29 +0200 Subject: [PATCH 1510/2361] Support constructing SnappyWriteBuffer from a reference to other write buffer. --- src/IO/SnappyWriteBuffer.cpp | 8 +++++++- src/IO/SnappyWriteBuffer.h | 10 +++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/IO/SnappyWriteBuffer.cpp b/src/IO/SnappyWriteBuffer.cpp index ca40d0656d1..0e02b48e1e0 100644 --- a/src/IO/SnappyWriteBuffer.cpp +++ b/src/IO/SnappyWriteBuffer.cpp @@ -16,7 +16,13 @@ namespace ErrorCodes } SnappyWriteBuffer::SnappyWriteBuffer(std::unique_ptr out_, size_t buf_size, char * existing_memory, size_t alignment) - : BufferWithOwnMemory(buf_size, existing_memory, alignment), out(std::move(out_)) + : SnappyWriteBuffer(*out_, buf_size, existing_memory, alignment) +{ + out_holder = std::move(out_); +} + +SnappyWriteBuffer::SnappyWriteBuffer(WriteBuffer & out_, size_t buf_size, char * existing_memory, size_t alignment) + : BufferWithOwnMemory(buf_size, existing_memory, alignment), out(&out_) { } diff --git a/src/IO/SnappyWriteBuffer.h b/src/IO/SnappyWriteBuffer.h index 2ff86fb64ef..b7a084d0f80 100644 --- a/src/IO/SnappyWriteBuffer.h +++ b/src/IO/SnappyWriteBuffer.h @@ -18,6 +18,12 @@ public: char * existing_memory = nullptr, size_t alignment = 0); + explicit SnappyWriteBuffer( + WriteBuffer & out_, + size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, + char * existing_memory = nullptr, + size_t alignment = 0); + ~SnappyWriteBuffer() override; void finalizeImpl() override { finish(); } @@ -28,7 +34,9 @@ private: void finishImpl(); void finish(); - std::unique_ptr out; + WriteBuffer * out; + std::unique_ptr out_holder; + bool finished = false; String uncompress_buffer; From 2a6f498b7751df0e37f281136d524f542f7910c2 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 12 Jul 2024 19:18:37 +0200 Subject: [PATCH 1511/2361] Add view targets Data, Tags, Metrics. --- src/Parsers/ASTCreateQuery.cpp | 7 +++++++ src/Parsers/ASTCreateQuery.h | 1 + src/Parsers/ASTViewTargets.cpp | 12 ++++++++++++ src/Parsers/ASTViewTargets.h | 11 ++++++++++- src/Parsers/CommonParsers.h | 6 ++++++ src/Parsers/CreateQueryUUIDs.cpp | 7 +++++++ src/Parsers/ParserCreateQuery.cpp | 9 +++++++++ 7 files changed, 52 insertions(+), 1 deletion(-) diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index f0f782c0a63..359e93ab269 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -483,6 +483,13 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat if (auto to_storage = getTargetInnerEngine(ViewTarget::To)) to_storage->formatImpl(settings, state, frame); + if (targets) + { + targets->formatTarget(ViewTarget::Data, settings, state, frame); + targets->formatTarget(ViewTarget::Tags, settings, state, frame); + targets->formatTarget(ViewTarget::Metrics, settings, state, frame); + } + if (dictionary) dictionary->formatImpl(settings, state, frame); diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index a95010aea31..6be0fa78903 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -97,6 +97,7 @@ public: bool is_materialized_view{false}; bool is_live_view{false}; bool is_window_view{false}; + bool is_time_series_table{false}; /// CREATE TABLE ... ENGINE=TimeSeries() ... bool is_populate{false}; bool is_create_empty{false}; /// CREATE TABLE ... EMPTY AS SELECT ... bool replace_view{false}; /// CREATE OR REPLACE VIEW diff --git a/src/Parsers/ASTViewTargets.cpp b/src/Parsers/ASTViewTargets.cpp index 8ee98e704df..ffd746cc38a 100644 --- a/src/Parsers/ASTViewTargets.cpp +++ b/src/Parsers/ASTViewTargets.cpp @@ -21,6 +21,9 @@ std::string_view toString(ViewTarget::Kind kind) { case ViewTarget::To: return "to"; case ViewTarget::Inner: return "inner"; + case ViewTarget::Data: return "data"; + case ViewTarget::Tags: return "tags"; + case ViewTarget::Metrics: return "metrics"; } throw Exception(ErrorCodes::LOGICAL_ERROR, "{} doesn't support kind {}", __FUNCTION__, kind); } @@ -254,6 +257,9 @@ std::optional ASTViewTargets::getKeywordForTableID(ViewTarget::Kind kin { case ViewTarget::To: return Keyword::TO; /// TO mydb.mydata case ViewTarget::Inner: return std::nullopt; + case ViewTarget::Data: return Keyword::DATA; /// DATA mydb.mydata + case ViewTarget::Tags: return Keyword::TAGS; /// TAGS mydb.mytags + case ViewTarget::Metrics: return Keyword::METRICS; /// METRICS mydb.mymetrics } UNREACHABLE(); } @@ -264,6 +270,9 @@ std::optional ASTViewTargets::getKeywordForInnerStorage(ViewTarget::Kin { case ViewTarget::To: return std::nullopt; /// ENGINE = MergeTree() case ViewTarget::Inner: return Keyword::INNER; /// INNER ENGINE = MergeTree() + case ViewTarget::Data: return Keyword::DATA; /// DATA ENGINE = MergeTree() + case ViewTarget::Tags: return Keyword::TAGS; /// TAGS ENGINE = MergeTree() + case ViewTarget::Metrics: return Keyword::METRICS; /// METRICS ENGINE = MergeTree() } UNREACHABLE(); } @@ -274,6 +283,9 @@ std::optional ASTViewTargets::getKeywordForInnerUUID(ViewTarget::Kind k { case ViewTarget::To: return Keyword::TO_INNER_UUID; /// TO INNER UUID 'XXX' case ViewTarget::Inner: return std::nullopt; + case ViewTarget::Data: return Keyword::DATA_INNER_UUID; /// DATA INNER UUID 'XXX' + case ViewTarget::Tags: return Keyword::TAGS_INNER_UUID; /// TAGS INNER UUID 'XXX' + case ViewTarget::Metrics: return Keyword::METRICS_INNER_UUID; /// METRICS INNER UUID 'XXX' } UNREACHABLE(); } diff --git a/src/Parsers/ASTViewTargets.h b/src/Parsers/ASTViewTargets.h index 12182919f0e..7814dd5249c 100644 --- a/src/Parsers/ASTViewTargets.h +++ b/src/Parsers/ASTViewTargets.h @@ -9,7 +9,7 @@ namespace DB class ASTStorage; enum class Keyword : size_t; -/// Information about target tables (external or inner) of a materialized view or a window view. +/// Information about target tables (external or inner) of a materialized view or a window view or a TimeSeries table. /// See ASTViewTargets for more details. struct ViewTarget { @@ -24,6 +24,15 @@ struct ViewTarget /// If `kind == ViewTarget::Inner` then `ViewTarget` contains information about the "INNER" table of a window view: /// CREATE WINDOW VIEW db.wv_name {INNER ENGINE inner_engine} AS SELECT ... Inner, + + /// The "data" table for a TimeSeries table, contains time series. + Data, + + /// The "tags" table for a TimeSeries table, contains identifiers for each combination of a metric name and tags (labels). + Tags, + + /// The "metrics" table for a TimeSeries table, contains general information (metadata) about metrics. + Metrics, }; Kind kind = To; diff --git a/src/Parsers/CommonParsers.h b/src/Parsers/CommonParsers.h index 0ae9ee4833c..34df4b135bb 100644 --- a/src/Parsers/CommonParsers.h +++ b/src/Parsers/CommonParsers.h @@ -116,6 +116,8 @@ namespace DB MR_MACROS(CURRENT_TRANSACTION, "CURRENT TRANSACTION") \ MR_MACROS(CURRENTUSER, "CURRENTUSER") \ MR_MACROS(D, "D") \ + MR_MACROS(DATA, "DATA") \ + MR_MACROS(DATA_INNER_UUID, "DATA INNER UUID") \ MR_MACROS(DATABASE, "DATABASE") \ MR_MACROS(DATABASES, "DATABASES") \ MR_MACROS(DATE, "DATE") \ @@ -288,6 +290,8 @@ namespace DB MR_MACROS(MCS, "MCS") \ MR_MACROS(MEMORY, "MEMORY") \ MR_MACROS(MERGES, "MERGES") \ + MR_MACROS(METRICS, "METRICS") \ + MR_MACROS(METRICS_INNER_UUID, "METRICS INNER UUID") \ MR_MACROS(MI, "MI") \ MR_MACROS(MICROSECOND, "MICROSECOND") \ MR_MACROS(MICROSECONDS, "MICROSECONDS") \ @@ -464,6 +468,8 @@ namespace DB MR_MACROS(TABLE_OVERRIDE, "TABLE OVERRIDE") \ MR_MACROS(TABLE, "TABLE") \ MR_MACROS(TABLES, "TABLES") \ + MR_MACROS(TAGS, "TAGS") \ + MR_MACROS(TAGS_INNER_UUID, "TAGS INNER UUID") \ MR_MACROS(TEMPORARY_TABLE, "TEMPORARY TABLE") \ MR_MACROS(TEMPORARY, "TEMPORARY") \ MR_MACROS(TEST, "TEST") \ diff --git a/src/Parsers/CreateQueryUUIDs.cpp b/src/Parsers/CreateQueryUUIDs.cpp index 4dfee67b537..fbdc6161408 100644 --- a/src/Parsers/CreateQueryUUIDs.cpp +++ b/src/Parsers/CreateQueryUUIDs.cpp @@ -45,6 +45,13 @@ CreateQueryUUIDs::CreateQueryUUIDs(const ASTCreateQuery & query, bool generate_r /// then MV will create inner table. We should generate UUID of inner table here. if (query.is_materialized_view) generate_target_uuid(ViewTarget::To); + + if (query.is_time_series_table) + { + generate_target_uuid(ViewTarget::Data); + generate_target_uuid(ViewTarget::Tags); + generate_target_uuid(ViewTarget::Metrics); + } } } } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index a592975613b..66965903ab0 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -696,6 +696,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ASTPtr table; ASTPtr columns_list; std::shared_ptr storage; + bool is_time_series_table = false; ASTPtr targets; ASTPtr as_database; ASTPtr as_table; @@ -784,6 +785,13 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; storage = typeid_cast>(ast); + + if (storage && storage->engine && (storage->engine->name == "TimeSeries")) + { + is_time_series_table = true; + ParserViewTargets({ViewTarget::Data, ViewTarget::Tags, ViewTarget::Metrics}).parse(pos, targets, expected); + } + return true; }; @@ -873,6 +881,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->create_or_replace = or_replace; query->if_not_exists = if_not_exists; query->temporary = is_temporary; + query->is_time_series_table = is_time_series_table; query->database = table_id->getDatabase(); query->table = table_id->getTable(); From 50604e8ea95b1d91850b8896ac21617b219b0309 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 6 May 2024 16:15:39 +0200 Subject: [PATCH 1512/2361] Add new table engine TimeSeries. --- src/CMakeLists.txt | 1 + src/Interpreters/InterpreterCreateQuery.cpp | 6 + src/Parsers/ASTFunction.h | 16 + src/Storages/StorageTimeSeries.cpp | 445 ++++++++++++++++++ src/Storages/StorageTimeSeries.h | 107 +++++ .../TimeSeries/TimeSeriesColumnNames.h | 34 ++ .../TimeSeries/TimeSeriesColumnsValidator.cpp | 246 ++++++++++ .../TimeSeries/TimeSeriesColumnsValidator.h | 51 ++ .../TimeSeriesDefinitionNormalizer.cpp | 416 ++++++++++++++++ .../TimeSeriesDefinitionNormalizer.h | 55 +++ .../TimeSeriesInnerTablesCreator.cpp | 163 +++++++ .../TimeSeries/TimeSeriesInnerTablesCreator.h | 47 ++ .../TimeSeries/TimeSeriesSettings.cpp | 34 ++ src/Storages/TimeSeries/TimeSeriesSettings.h | 25 + src/Storages/registerStorages.cpp | 4 + 15 files changed, 1650 insertions(+) create mode 100644 src/Storages/StorageTimeSeries.cpp create mode 100644 src/Storages/StorageTimeSeries.h create mode 100644 src/Storages/TimeSeries/TimeSeriesColumnNames.h create mode 100644 src/Storages/TimeSeries/TimeSeriesColumnsValidator.cpp create mode 100644 src/Storages/TimeSeries/TimeSeriesColumnsValidator.h create mode 100644 src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp create mode 100644 src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.h create mode 100644 src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp create mode 100644 src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.h create mode 100644 src/Storages/TimeSeries/TimeSeriesSettings.cpp create mode 100644 src/Storages/TimeSeries/TimeSeriesSettings.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 8c133971785..cc10fdf9646 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -225,6 +225,7 @@ add_object_library(clickhouse_storages_liveview Storages/LiveView) add_object_library(clickhouse_storages_windowview Storages/WindowView) add_object_library(clickhouse_storages_s3queue Storages/ObjectStorageQueue) add_object_library(clickhouse_storages_materializedview Storages/MaterializedView) +add_object_library(clickhouse_storages_time_series Storages/TimeSeries) add_object_library(clickhouse_client Client) # Always compile this file with the highest possible level of optimizations, even in Debug builds. # https://github.com/ClickHouse/ClickHouse/issues/65745 diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 971f90bd3cd..995ff8fac21 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -751,6 +752,10 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti if (create.storage && create.storage->engine) getContext()->checkAccess(AccessType::TABLE_ENGINE, create.storage->engine->name); + /// If this is a TimeSeries table then we need to normalize list of columns (add missing columns and reorder), and also set inner table engines. + if (create.is_time_series_table && (mode < LoadingStrictnessLevel::ATTACH)) + StorageTimeSeries::normalizeTableDefinition(create, getContext()); + TableProperties properties; TableLockHolder as_storage_lock; @@ -1058,6 +1063,7 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const else if (as_create.storage) { storage_def = typeid_cast>(as_create.storage->ptr()); + create.is_time_series_table = as_create.is_time_series_table; } else { diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index 1b4a5928d1c..5a14d66f31c 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -116,4 +116,20 @@ inline bool tryGetFunctionNameInto(const ASTPtr & ast, String & name) { return t /// Checks if function is a lambda function definition `lambda((x, y), x + y)` bool isASTLambdaFunction(const ASTFunction & function); +/// Makes an ASTFunction to represent a data type. +template +std::shared_ptr makeASTDataType(const String & type_name, Args &&... args) +{ + auto function = std::make_shared(); + function->name = type_name; + function->no_empty_args = true; + if (sizeof...(args)) + { + function->arguments = std::make_shared(); + function->children.push_back(function->arguments); + function->arguments->children = { std::forward(args)... }; + } + return function; +} + } diff --git a/src/Storages/StorageTimeSeries.cpp b/src/Storages/StorageTimeSeries.cpp new file mode 100644 index 00000000000..d85db53d78d --- /dev/null +++ b/src/Storages/StorageTimeSeries.cpp @@ -0,0 +1,445 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INCORRECT_QUERY; + extern const int LOGICAL_ERROR; + extern const int NOT_IMPLEMENTED; +} + + +namespace +{ + namespace fs = std::filesystem; + + /// Loads TimeSeries storage settings from a create query. + std::shared_ptr getTimeSeriesSettingsFromQuery(const ASTCreateQuery & query) + { + auto storage_settings = std::make_shared(); + if (query.storage) + storage_settings->loadFromQuery(*query.storage); + return storage_settings; + } + + /// Creates an inner target table or just makes its storage ID. + /// This function is used by the constructor of StorageTimeSeries to find (or create) its target tables. + StorageID initTarget( + ViewTarget::Kind kind, + const ViewTarget * target_info, + const ContextPtr & context, + const StorageID & time_series_storage_id, + const ColumnsDescription & time_series_columns, + const TimeSeriesSettings & time_series_settings, + LoadingStrictnessLevel mode) + { + StorageID target_table_id = StorageID::createEmpty(); + + bool is_external_target = target_info && !target_info->table_id.empty(); + if (is_external_target) + { + /// A target table is specified. + target_table_id = target_info->table_id; + + if (mode < LoadingStrictnessLevel::ATTACH) + { + /// If it's not an ATTACH request then + /// check that the specified target table has all the required columns. + auto target_table = DatabaseCatalog::instance().getTable(target_table_id, context); + auto target_metadata = target_table->getInMemoryMetadataPtr(); + const auto & target_columns = target_metadata->columns; + TimeSeriesColumnsValidator validator{time_series_storage_id, time_series_settings}; + validator.validateTargetColumns(kind, target_table_id, target_columns); + } + } + else + { + TimeSeriesInnerTablesCreator inner_tables_creator{context, time_series_storage_id, time_series_columns, time_series_settings}; + auto inner_uuid = target_info ? target_info->inner_uuid : UUIDHelpers::Nil; + + /// An inner target table should be used. + if (mode >= LoadingStrictnessLevel::ATTACH) + { + /// If it's an ATTACH request, then the inner target table must be already created. + target_table_id = inner_tables_creator.getInnerTableID(kind, inner_uuid); + } + else + { + /// Create the inner target table. + auto inner_table_engine = target_info ? target_info->inner_engine : nullptr; + target_table_id = inner_tables_creator.createInnerTable(kind, inner_uuid, inner_table_engine); + } + } + + return target_table_id; + } +} + + +void StorageTimeSeries::normalizeTableDefinition(ASTCreateQuery & create_query, const ContextPtr & local_context) +{ + StorageID time_series_storage_id{create_query.getDatabase(), create_query.getTable()}; + TimeSeriesSettings time_series_settings; + if (create_query.storage) + time_series_settings.loadFromQuery(*create_query.storage); + std::shared_ptr as_create_query; + if (!create_query.as_table.empty()) + { + auto as_database = local_context->resolveDatabase(create_query.as_database); + as_create_query = typeid_cast>( + DatabaseCatalog::instance().getDatabase(as_database)->getCreateTableQuery(create_query.as_table, local_context)); + } + TimeSeriesDefinitionNormalizer normalizer{time_series_storage_id, time_series_settings, as_create_query.get()}; + normalizer.normalize(create_query); +} + + +StorageTimeSeries::StorageTimeSeries( + const StorageID & table_id, + const ContextPtr & local_context, + LoadingStrictnessLevel mode, + const ASTCreateQuery & query, + const ColumnsDescription & columns, + const String & comment) + : IStorage(table_id) + , WithContext(local_context->getGlobalContext()) +{ + storage_settings = getTimeSeriesSettingsFromQuery(query); + + if (mode < LoadingStrictnessLevel::ATTACH) + { + TimeSeriesColumnsValidator validator{table_id, *storage_settings}; + validator.validateColumns(columns); + } + + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(columns); + if (!comment.empty()) + storage_metadata.setComment(comment); + setInMemoryMetadata(storage_metadata); + + has_inner_tables = false; + + for (auto target_kind : {ViewTarget::Data, ViewTarget::Tags, ViewTarget::Metrics}) + { + const ViewTarget * target_info = query.targets ? query.targets->tryGetTarget(target_kind) : nullptr; + auto & target = targets.emplace_back(); + target.kind = target_kind; + target.table_id = initTarget(target_kind, target_info, local_context, getStorageID(), columns, *storage_settings, mode); + target.is_inner_table = target_info->table_id.empty(); + has_inner_tables |= target.is_inner_table; + } +} + + +StorageTimeSeries::~StorageTimeSeries() = default; + + +TimeSeriesSettings StorageTimeSeries::getStorageSettings() const +{ + return *getStorageSettingsPtr(); +} + +void StorageTimeSeries::startup() +{ +} + +void StorageTimeSeries::shutdown(bool) +{ +} + + +void StorageTimeSeries::drop() +{ + /// Sync flag and the setting make sense for Atomic databases only. + /// However, with Atomic databases, IStorage::drop() can be called only from a background task in DatabaseCatalog. + /// Running synchronous DROP from that task leads to deadlock. + dropInnerTableIfAny(/* sync= */ false, getContext()); +} + +void StorageTimeSeries::dropInnerTableIfAny(bool sync, ContextPtr local_context) +{ + if (!has_inner_tables) + return; + + for (const auto & target : targets) + { + if (target.is_inner_table && DatabaseCatalog::instance().tryGetTable(target.table_id, getContext())) + { + /// Best-effort to make them work: the inner table name is almost always less than the TimeSeries name (so it's safe to lock DDLGuard). + /// (See the comment in StorageMaterializedView::dropInnerTableIfAny.) + bool may_lock_ddl_guard = getStorageID().getQualifiedName() < target.table_id.getQualifiedName(); + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), local_context, target.table_id, + sync, /* ignore_sync_setting= */ true, may_lock_ddl_guard); + } + } +} + +void StorageTimeSeries::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr local_context, TableExclusiveLockHolder &) +{ + if (!has_inner_tables) + return; + + for (const auto & target : targets) + { + /// We truncate only inner tables here. + if (target.is_inner_table) + InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Truncate, getContext(), local_context, target.table_id, /* sync= */ true); + } +} + + +StorageID StorageTimeSeries::getTargetTableId(ViewTarget::Kind target_kind) const +{ + for (const auto & target : targets) + { + if (target.kind == target_kind) + return target.table_id; + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected target kind {}", toString(target_kind)); +} + +StoragePtr StorageTimeSeries::getTargetTable(ViewTarget::Kind target_kind, const ContextPtr & local_context) const +{ + return DatabaseCatalog::instance().getTable(getTargetTableId(target_kind), local_context); +} + +StoragePtr StorageTimeSeries::tryGetTargetTable(ViewTarget::Kind target_kind, const ContextPtr & local_context) const +{ + return DatabaseCatalog::instance().tryGetTable(getTargetTableId(target_kind), local_context); +} + + +std::optional StorageTimeSeries::totalRows(const Settings & settings) const +{ + UInt64 total_rows = 0; + if (has_inner_tables) + { + for (const auto & target : targets) + { + if (target.is_inner_table) + { + auto inner_table = DatabaseCatalog::instance().tryGetTable(target.table_id, getContext()); + if (!inner_table) + return std::nullopt; + + auto total_rows_in_inner_table = inner_table->totalRows(settings); + if (!total_rows_in_inner_table) + return std::nullopt; + + total_rows += *total_rows_in_inner_table; + } + } + } + return total_rows; +} + +std::optional StorageTimeSeries::totalBytes(const Settings & settings) const +{ + UInt64 total_bytes = 0; + if (has_inner_tables) + { + for (const auto & target : targets) + { + if (target.is_inner_table) + { + auto inner_table = DatabaseCatalog::instance().tryGetTable(target.table_id, getContext()); + if (!inner_table) + return std::nullopt; + + auto total_bytes_in_inner_table = inner_table->totalBytes(settings); + if (!total_bytes_in_inner_table) + return std::nullopt; + + total_bytes += *total_bytes_in_inner_table; + } + } + } + return total_bytes; +} + +std::optional StorageTimeSeries::totalBytesUncompressed(const Settings & settings) const +{ + UInt64 total_bytes = 0; + if (has_inner_tables) + { + for (const auto & target : targets) + { + if (target.is_inner_table) + { + auto inner_table = DatabaseCatalog::instance().tryGetTable(target.table_id, getContext()); + if (!inner_table) + return std::nullopt; + + auto total_bytes_in_inner_table = inner_table->totalBytesUncompressed(settings); + if (!total_bytes_in_inner_table) + return std::nullopt; + + total_bytes += *total_bytes_in_inner_table; + } + } + } + return total_bytes; +} + +Strings StorageTimeSeries::getDataPaths() const +{ + Strings data_paths; + for (const auto & target : targets) + { + auto table = DatabaseCatalog::instance().tryGetTable(target.table_id, getContext()); + if (!table) + continue; + + insertAtEnd(data_paths, table->getDataPaths()); + } + return data_paths; +} + + +bool StorageTimeSeries::optimize( + const ASTPtr & query, + const StorageMetadataPtr &, + const ASTPtr & partition, + bool final, + bool deduplicate, + const Names & deduplicate_by_columns, + bool cleanup, + ContextPtr local_context) +{ + if (!has_inner_tables) + { + throw Exception(ErrorCodes::INCORRECT_QUERY, "TimeSeries table {} targets only existing tables. Execute the statement directly on it.", + getStorageID().getNameForLogs()); + } + + bool optimized = false; + for (const auto & target : targets) + { + if (target.is_inner_table) + { + auto inner_table = DatabaseCatalog::instance().getTable(target.table_id, local_context); + optimized |= inner_table->optimize(query, inner_table->getInMemoryMetadataPtr(), partition, final, deduplicate, deduplicate_by_columns, cleanup, local_context); + } + } + + return optimized; +} + + +void StorageTimeSeries::checkAlterIsPossible(const AlterCommands & commands, ContextPtr) const +{ + for (const auto & command : commands) + { + if (!command.isCommentAlter() && command.type != AlterCommand::MODIFY_SQL_SECURITY) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Alter of type '{}' is not supported by storage {}", command.type, getName()); + } +} + +void StorageTimeSeries::alter(const AlterCommands & params, ContextPtr local_context, AlterLockHolder & table_lock_holder) +{ + IStorage::alter(params, local_context, table_lock_holder); +} + + +void StorageTimeSeries::renameInMemory(const StorageID & new_table_id) +{ + UNUSED(new_table_id); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Renaming is not supported by storage {} yet", getName()); +} + + +void StorageTimeSeries::backupData(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, const std::optional &) +{ + for (const auto & target : targets) + { + /// We backup the target table's data only if it's inner. + if (target.is_inner_table) + { + auto table = DatabaseCatalog::instance().getTable(target.table_id, getContext()); + table->backupData(backup_entries_collector, fs::path{data_path_in_backup} / toString(target.kind), {}); + } + } +} + +void StorageTimeSeries::restoreDataFromBackup(RestorerFromBackup & restorer, const String & data_path_in_backup, const std::optional &) +{ + for (const auto & target : targets) + { + /// We backup the target table's data only if it's inner. + if (target.is_inner_table) + { + auto table = DatabaseCatalog::instance().getTable(target.table_id, getContext()); + table->restoreDataFromBackup(restorer, fs::path{data_path_in_backup} / toString(target.kind), {}); + } + } +} + + +void StorageTimeSeries::read( + QueryPlan & query_plan, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr local_context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + size_t num_streams) +{ + UNUSED(query_plan); + UNUSED(column_names); + UNUSED(storage_snapshot); + UNUSED(query_info); + UNUSED(local_context); + UNUSED(processed_stage); + UNUSED(max_block_size); + UNUSED(num_streams); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "SELECT is not supported by storage {} yet", getName()); +} + + +SinkToStoragePtr StorageTimeSeries::write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context, bool async_insert) +{ + UNUSED(query); + UNUSED(metadata_snapshot); + UNUSED(local_context); + UNUSED(async_insert); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "INSERT is not supported by storage {} yet", getName()); +} + + +void registerStorageTimeSeries(StorageFactory & factory) +{ + factory.registerStorage("TimeSeries", [](const StorageFactory::Arguments & args) + { + /// Pass local_context here to convey setting to inner tables. + return std::make_shared( + args.table_id, args.getLocalContext(), args.mode, args.query, args.columns, args.comment); + } + , + { + .supports_settings = true, + .supports_schema_inference = true, + }); +} + +} diff --git a/src/Storages/StorageTimeSeries.h b/src/Storages/StorageTimeSeries.h new file mode 100644 index 00000000000..9ee09108803 --- /dev/null +++ b/src/Storages/StorageTimeSeries.h @@ -0,0 +1,107 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ +struct TimeSeriesSettings; +using TimeSeriesSettingsPtr = std::shared_ptr; + +/// Represents a table engine to keep time series received by Prometheus protocols. +/// Examples of using this table engine: +/// +/// CREATE TABLE ts ENGINE = TimeSeries() +/// -OR- +/// CREATE TABLE ts ENGINE = TimeSeries() DATA [db].table1 TAGS [db].table2 METRICS [db].table3 +/// -OR- +/// CREATE TABLE ts ENGINE = TimeSeries() DATA ENGINE = MergeTree TAGS ENGINE = ReplacingMergeTree METRICS ENGINE = ReplacingMergeTree +/// -OR- +/// CREATE TABLE ts ( +/// id UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)) CODEC(ZSTD(3)), +/// instance LowCardinality(String), +/// job String +/// ) ENGINE = TimeSeries() +/// SETTINGS tags_to_columns = {'instance': 'instance', 'job': 'job'} +/// DATA ENGINE = ReplicatedMergeTree('zkpath', 'replica'), ... +/// +class StorageTimeSeries final : public IStorage, WithContext +{ +public: + /// Adds missing columns and reorder columns, and also adds inner table engines if they aren't specified. + static void normalizeTableDefinition(ASTCreateQuery & create_query, const ContextPtr & local_context); + + StorageTimeSeries(const StorageID & table_id, const ContextPtr & local_context, LoadingStrictnessLevel mode, + const ASTCreateQuery & query, const ColumnsDescription & columns, const String & comment); + + ~StorageTimeSeries() override; + + std::string getName() const override { return "TimeSeries"; } + + TimeSeriesSettings getStorageSettings() const; + TimeSeriesSettingsPtr getStorageSettingsPtr() const { return storage_settings; } + + StorageID getTargetTableId(ViewTarget::Kind target_kind) const; + StoragePtr getTargetTable(ViewTarget::Kind target_kind, const ContextPtr & local_context) const; + StoragePtr tryGetTargetTable(ViewTarget::Kind target_kind, const ContextPtr & local_context) const; + + void startup() override; + void shutdown(bool is_drop) override; + + void read( + QueryPlan & query_plan, + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + size_t num_streams) override; + + SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context, bool async_insert) override; + + bool optimize( + const ASTPtr & query, + const StorageMetadataPtr & metadata_snapshot, + const ASTPtr & partition, + bool final, + bool deduplicate, + const Names & deduplicate_by_columns, + bool cleanup, + ContextPtr local_context) override; + + void drop() override; + void dropInnerTableIfAny(bool sync, ContextPtr local_context) override; + + void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) override; + + void renameInMemory(const StorageID & new_table_id) override; + + void checkAlterIsPossible(const AlterCommands & commands, ContextPtr local_context) const override; + void alter(const AlterCommands & params, ContextPtr local_context, AlterLockHolder & table_lock_holder) override; + + void backupData(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, const std::optional & partitions) override; + void restoreDataFromBackup(RestorerFromBackup & restorer, const String & data_path_in_backup, const std::optional & partitions) override; + + std::optional totalRows(const Settings & settings) const override; + std::optional totalBytes(const Settings & settings) const override; + std::optional totalBytesUncompressed(const Settings & settings) const override; + Strings getDataPaths() const override; + +private: + TimeSeriesSettingsPtr storage_settings; + + struct Target + { + ViewTarget::Kind kind; + StorageID table_id = StorageID::createEmpty(); + bool is_inner_table; + }; + + std::vector targets; + bool has_inner_tables; +}; + +} diff --git a/src/Storages/TimeSeries/TimeSeriesColumnNames.h b/src/Storages/TimeSeries/TimeSeriesColumnNames.h new file mode 100644 index 00000000000..9176ec5384a --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesColumnNames.h @@ -0,0 +1,34 @@ +#pragma once + + +namespace DB +{ + +struct TimeSeriesColumnNames +{ + /// The "data" table contains time series: + static constexpr const char * ID = "id"; + static constexpr const char * Timestamp = "timestamp"; + static constexpr const char * Value = "value"; + + /// The "tags" table contains identifiers for each combination of a metric name with corresponding tags (labels): + + /// The default expression specified for the "id" column contains an expression for calculating an identifier of a time series by a metric name and tags. + //static constexpr const char * kID = "id"; + static constexpr const char * MetricName = "metric_name"; + + /// Contains tags which have no corresponding columns specified in the "tags_to_columns" setting. + static constexpr const char * Tags = "tags"; + + /// Contains all tags, including those ones which have corresponding columns specified in the "tags_to_columns" setting. + /// This is a generated column, it's not stored anywhere, it's generated on the fly. + static constexpr const char * AllTags = "all_tags"; + + /// The "metrics" table contains general information (metadata) about metrics: + static constexpr const char * MetricFamilyName = "metric_family_name"; + static constexpr const char * Type = "type"; + static constexpr const char * Unit = "unit"; + static constexpr const char * Help = "help"; +}; + +} diff --git a/src/Storages/TimeSeries/TimeSeriesColumnsValidator.cpp b/src/Storages/TimeSeries/TimeSeriesColumnsValidator.cpp new file mode 100644 index 00000000000..0ce5528939a --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesColumnsValidator.cpp @@ -0,0 +1,246 @@ +#include + +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INCOMPATIBLE_COLUMNS; + extern const int THERE_IS_NO_COLUMN; +} + + +TimeSeriesColumnsValidator::TimeSeriesColumnsValidator(StorageID time_series_storage_id_, + std::reference_wrapper time_series_settings_) + : time_series_storage_id(std::move(time_series_storage_id_)) + , time_series_settings(time_series_settings_) +{ +} + + +void TimeSeriesColumnsValidator::validateColumns(const ColumnsDescription & columns) const +{ + try + { + validateColumnsImpl(columns); + } + catch (Exception & e) + { + e.addMessage("While checking columns of TimeSeries table {}", time_series_storage_id.getNameForLogs()); + throw; + } +} + + +void TimeSeriesColumnsValidator::validateColumnsImpl(const ColumnsDescription & columns) const +{ + + auto get_column_description = [&](const String & column_name) -> const ColumnDescription & + { + const auto * column = columns.tryGet(column_name); + if (!column) + { + throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Column {} is required for the TimeSeries table engine", column_name); + } + return *column; + }; + + /// Validate columns for the "data" table. + validateColumnForID(get_column_description(TimeSeriesColumnNames::ID)); + validateColumnForTimestamp(get_column_description(TimeSeriesColumnNames::Timestamp)); + validateColumnForValue(get_column_description(TimeSeriesColumnNames::Value)); + + /// Validate columns for the "tags" table. + validateColumnForMetricName(get_column_description(TimeSeriesColumnNames::MetricName)); + + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + validateColumnForTagValue(get_column_description(column_name)); + } + + validateColumnForTagsMap(get_column_description(TimeSeriesColumnNames::Tags)); + validateColumnForTagsMap(get_column_description(TimeSeriesColumnNames::AllTags)); + + /// Validate columns for the "metrics" table. + validateColumnForMetricFamilyName(get_column_description(TimeSeriesColumnNames::MetricFamilyName)); + validateColumnForType(get_column_description(TimeSeriesColumnNames::Type)); + validateColumnForUnit(get_column_description(TimeSeriesColumnNames::Unit)); + validateColumnForHelp(get_column_description(TimeSeriesColumnNames::Help)); +} + + +void TimeSeriesColumnsValidator::validateTargetColumns(ViewTarget::Kind target_kind, const StorageID & target_table_id, const ColumnsDescription & target_columns) const +{ + try + { + validateTargetColumnsImpl(target_kind, target_columns); + } + catch (Exception & e) + { + e.addMessage("While checking columns of table {} which is the {} target of TimeSeries table {}", target_table_id.getNameForLogs(), + toString(target_kind), time_series_storage_id.getNameForLogs()); + throw; + } +} + + +void TimeSeriesColumnsValidator::validateTargetColumnsImpl(ViewTarget::Kind target_kind, const ColumnsDescription & target_columns) const +{ + auto get_column_description = [&](const String & column_name) -> const ColumnDescription & + { + const auto * column = target_columns.tryGet(column_name); + if (!column) + { + throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "Column {} is required for the TimeSeries table engine", column_name); + } + return *column; + }; + + switch (target_kind) + { + case ViewTarget::Data: + { + /// Here "check_default = false" because it's ok for the "id" column in the target table not to contain + /// an expression for calculating the identifier of a time series. + validateColumnForID(get_column_description(TimeSeriesColumnNames::ID), /* check_default= */ false); + + validateColumnForTimestamp(get_column_description(TimeSeriesColumnNames::Timestamp)); + validateColumnForValue(get_column_description(TimeSeriesColumnNames::Value)); + + break; + } + + case ViewTarget::Tags: + { + validateColumnForMetricName(get_column_description(TimeSeriesColumnNames::MetricName)); + + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + validateColumnForTagValue(get_column_description(column_name)); + } + + validateColumnForTagsMap(get_column_description(TimeSeriesColumnNames::Tags)); + + break; + } + + case ViewTarget::Metrics: + { + validateColumnForMetricFamilyName(get_column_description(TimeSeriesColumnNames::MetricFamilyName)); + validateColumnForType(get_column_description(TimeSeriesColumnNames::Type)); + validateColumnForUnit(get_column_description(TimeSeriesColumnNames::Unit)); + validateColumnForHelp(get_column_description(TimeSeriesColumnNames::Help)); + break; + } + + default: + UNREACHABLE(); + } +} + + +void TimeSeriesColumnsValidator::validateColumnForID(const ColumnDescription & column, bool check_default) const +{ + if (check_default && !column.default_desc.expression) + { + throw Exception(ErrorCodes::INCOMPATIBLE_COLUMNS, "The DEFAULT expression for column {} must contain an expression " + "which will be used to calculate the identifier of each time series: {} {} DEFAULT ...", + column.name, column.name, column.type->getName()); + } +} + +void TimeSeriesColumnsValidator::validateColumnForTimestamp(const ColumnDescription & column) const +{ + if (!isDateTime64(removeNullable(column.type))) + { + throw Exception(ErrorCodes::INCOMPATIBLE_COLUMNS, "Column {} has illegal data type {}, expected DateTime64", + column.name, column.type->getName()); + } +} + +void TimeSeriesColumnsValidator::validateColumnForTimestamp(const ColumnDescription & column, UInt32 & out_scale) const +{ + auto maybe_datetime64_type = removeNullable(column.type); + if (!isDateTime64(maybe_datetime64_type)) + { + throw Exception(ErrorCodes::INCOMPATIBLE_COLUMNS, "Column {} has illegal data type {}, expected DateTime64", + column.name, column.type->getName()); + } + const auto & datetime64_type = typeid_cast(*maybe_datetime64_type); + out_scale = datetime64_type.getScale(); +} + +void TimeSeriesColumnsValidator::validateColumnForValue(const ColumnDescription & column) const +{ + if (!isFloat(removeNullable(column.type))) + { + throw Exception(ErrorCodes::INCOMPATIBLE_COLUMNS, "Column {} has illegal data type {}, expected Float32 or Float64", + column.name, column.type->getName()); + } +} + +void TimeSeriesColumnsValidator::validateColumnForMetricName(const ColumnDescription & column) const +{ + validateColumnForTagValue(column); +} + +void TimeSeriesColumnsValidator::validateColumnForTagValue(const ColumnDescription & column) const +{ + if (!isString(removeLowCardinalityAndNullable(column.type))) + { + throw Exception(ErrorCodes::INCOMPATIBLE_COLUMNS, "Column {} has illegal data type {}, expected String or LowCardinality(String)", + column.name, column.type->getName()); + } +} + +void TimeSeriesColumnsValidator::validateColumnForTagsMap(const ColumnDescription & column) const +{ + if (!isMap(column.type) + || !isString(removeLowCardinality(typeid_cast(*column.type).getKeyType())) + || !isString(removeLowCardinality(typeid_cast(*column.type).getValueType()))) + { + throw Exception(ErrorCodes::INCOMPATIBLE_COLUMNS, "Column {} has illegal data type {}, expected Map(String, String) or Map(LowCardinality(String), String)", + column.name, column.type->getName()); + } +} + +void TimeSeriesColumnsValidator::validateColumnForMetricFamilyName(const ColumnDescription & column) const +{ + if (!isString(removeLowCardinalityAndNullable(column.type))) + { + throw Exception(ErrorCodes::INCOMPATIBLE_COLUMNS, "Column {} has illegal data type {}, expected String or LowCardinality(String)", + column.name, column.type->getName()); + } +} + +void TimeSeriesColumnsValidator::validateColumnForType(const ColumnDescription & column) const +{ + validateColumnForMetricFamilyName(column); +} + +void TimeSeriesColumnsValidator::validateColumnForUnit(const ColumnDescription & column) const +{ + validateColumnForMetricFamilyName(column); +} + +void TimeSeriesColumnsValidator::validateColumnForHelp(const ColumnDescription & column) const +{ + validateColumnForMetricFamilyName(column); +} + +} diff --git a/src/Storages/TimeSeries/TimeSeriesColumnsValidator.h b/src/Storages/TimeSeries/TimeSeriesColumnsValidator.h new file mode 100644 index 00000000000..cafee9da03c --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesColumnsValidator.h @@ -0,0 +1,51 @@ +#pragma once + +#include +#include + + +namespace DB +{ +class ColumnsDescription; +struct ColumnDescription; +struct TimeSeriesSettings; + +/// Checks the types of columns of a TimeSeries table. +class TimeSeriesColumnsValidator +{ +public: + /// Constructor stores a reference to argument `time_series_settings_` (it's unnecessary to copy it). + TimeSeriesColumnsValidator(StorageID time_series_storage_id_, + std::reference_wrapper time_series_settings_); + + /// Checks the columns of a TimeSeries table and throws an exception if some of the required columns don't exist or have illegal types. + void validateColumns(const ColumnsDescription & columns) const; + + /// Checks columns of a target table that a TimeSeries table is going to use. + /// Throws an exception if some of the required columns don't exist or have illegal types. + void validateTargetColumns(ViewTarget::Kind target_kind, const StorageID & target_table_id, const ColumnsDescription & target_columns) const; + + /// Each of the following functions validates a specific column type. + void validateColumnForID(const ColumnDescription & column, bool check_default = true) const; + void validateColumnForTimestamp(const ColumnDescription & column) const; + void validateColumnForTimestamp(const ColumnDescription & column, UInt32 & out_scale) const; + void validateColumnForValue(const ColumnDescription & column) const; + + void validateColumnForMetricName(const ColumnDescription & column) const; + void validateColumnForTagValue(const ColumnDescription & column) const; + void validateColumnForTagsMap(const ColumnDescription & column) const; + + void validateColumnForMetricFamilyName(const ColumnDescription & column) const; + void validateColumnForType(const ColumnDescription & column) const; + void validateColumnForUnit(const ColumnDescription & column) const; + void validateColumnForHelp(const ColumnDescription & column) const; + +private: + void validateColumnsImpl(const ColumnsDescription & columns) const; + void validateTargetColumnsImpl(ViewTarget::Kind target_kind, const ColumnsDescription & target_columns) const; + + const StorageID time_series_storage_id; + const TimeSeriesSettings & time_series_settings; +}; + +} diff --git a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp new file mode 100644 index 00000000000..78f8afe2528 --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp @@ -0,0 +1,416 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INCOMPATIBLE_COLUMNS; + extern const int INCORRECT_QUERY; +} + + +TimeSeriesDefinitionNormalizer::TimeSeriesDefinitionNormalizer(StorageID time_series_storage_id_, + std::reference_wrapper time_series_settings_, + const ASTCreateQuery * as_create_query_) + : time_series_storage_id(std::move(time_series_storage_id_)) + , time_series_settings(time_series_settings_) + , as_create_query(as_create_query_) +{ +} + + +void TimeSeriesDefinitionNormalizer::normalize(ASTCreateQuery & create_query) const +{ + reorderColumns(create_query); + addMissingColumns(create_query); + addMissingDefaultForIDColumn(create_query); + + if (as_create_query) + addMissingInnerEnginesFromAsTable(create_query); + + addMissingInnerEngines(create_query); +} + + +void TimeSeriesDefinitionNormalizer::reorderColumns(ASTCreateQuery & create) const +{ + if (!create.columns_list || !create.columns_list->columns) + return; + + auto & columns = create.columns_list->columns->children; + + /// Build a map "column_name -> column_declaration". + std::unordered_map> columns_by_name; + for (const auto & column : columns) + { + auto column_declaration = typeid_cast>(column); + columns_by_name[column_declaration->name] = column_declaration; + } + + /// Remove all columns and then add them again in the canonical order. + columns.clear(); + + auto add_column_in_correct_order = [&](std::string_view column_name) + { + auto it = columns_by_name.find(column_name); + if (it != columns_by_name.end()) + { + /// Add the column back to the list. + columns.push_back(it->second); + + /// Remove the column from the map to allow the check at the end of this function + /// that all columns from the original list are added back to the list. + columns_by_name.erase(it); + } + }; + + /// Reorder columns for the "data" table. + add_column_in_correct_order(TimeSeriesColumnNames::ID); + add_column_in_correct_order(TimeSeriesColumnNames::Timestamp); + add_column_in_correct_order(TimeSeriesColumnNames::Value); + + /// Reorder columns for the "tags" table. + add_column_in_correct_order(TimeSeriesColumnNames::MetricName); + + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + add_column_in_correct_order(column_name); + } + + add_column_in_correct_order(TimeSeriesColumnNames::Tags); + add_column_in_correct_order(TimeSeriesColumnNames::AllTags); + + /// Reorder columns for the "metrics" table. + add_column_in_correct_order(TimeSeriesColumnNames::MetricFamilyName); + add_column_in_correct_order(TimeSeriesColumnNames::Type); + add_column_in_correct_order(TimeSeriesColumnNames::Unit); + add_column_in_correct_order(TimeSeriesColumnNames::Help); + + /// All columns from the original list must be added back to the list. + if (!columns_by_name.empty()) + { + throw Exception( + ErrorCodes::INCOMPATIBLE_COLUMNS, + "{}: Column {} can't be used in this table. " + "The TimeSeries table engine supports only a limited set of columns (id, timestamp, value, metric_name, tags, metric_family_name, type, unit, help). " + "Extra columns representing tags must be specified in the 'tags_to_columns' setting.", + time_series_storage_id.getNameForLogs(), columns_by_name.begin()->first); + } +} + + +void TimeSeriesDefinitionNormalizer::addMissingColumns(ASTCreateQuery & create) const +{ + if (!create.as_table.empty()) + { + /// If the create query has the "AS other_table" clause ("CREATE TABLE table AS other_table") + /// then all columns must be extracted from that "other_table". + /// Function InterpreterCreateQuery::getTablePropertiesAndNormalizeCreateQuery() will do that for us, + /// we don't need to fill missing columns by default in that case. + return; + } + + if (!create.columns_list) + create.set(create.columns_list, std::make_shared()); + + if (!create.columns_list->columns) + create.columns_list->set(create.columns_list->columns, std::make_shared()); + auto & columns = create.columns_list->columns->children; + + /// Here in this function we rely on that the columns are already sorted in the canonical order (see the reorderColumns() function). + /// NOTE: The order in which this function processes columns MUST be exactly the same as the order in reorderColumns(). + size_t position = 0; + + auto is_next_column_named = [&](std::string_view column_name) + { + if (position < columns.size() && (typeid_cast(*columns[position]).name == column_name)) + { + ++position; + return true; + } + return false; + }; + + auto make_new_column = [&](const String & column_name, ASTPtr type) + { + auto new_column = std::make_shared(); + new_column->name = column_name; + new_column->type = type; + columns.insert(columns.begin() + position, new_column); + ++position; + }; + + auto get_uuid_type = [] { return makeASTDataType("UUID"); }; + auto get_datetime_type = [] { return makeASTDataType("DateTime64", std::make_shared(3ul)); }; + auto get_float_type = [] { return makeASTDataType("Float64"); }; + auto get_string_type = [] { return makeASTDataType("String"); }; + auto get_lc_string_type = [&] { return makeASTDataType("LowCardinality", get_string_type()); }; + auto get_string_to_string_map_type = [&] { return makeASTDataType("Map", get_string_type(), get_string_type()); }; + auto get_lc_string_to_string_map_type = [&] { return makeASTDataType("Map", get_lc_string_type(), get_string_type()); }; + + /// Add missing columns for the "data" table. + if (!is_next_column_named(TimeSeriesColumnNames::ID)) + make_new_column(TimeSeriesColumnNames::ID, get_uuid_type()); + + if (!is_next_column_named(TimeSeriesColumnNames::Timestamp)) + make_new_column(TimeSeriesColumnNames::Timestamp, get_datetime_type()); + + if (!is_next_column_named(TimeSeriesColumnNames::Value)) + make_new_column(TimeSeriesColumnNames::Value, get_float_type()); + + /// Add missing columns for the "tags" table. + if (!is_next_column_named(TimeSeriesColumnNames::MetricName)) + { + /// We use 'LowCardinality(String)' as the default type of the `metric_name` column: + /// it looks like a correct optimization because there are shouldn't be too many different metrics. + make_new_column(TimeSeriesColumnNames::MetricName, get_lc_string_type()); + } + + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + if (!is_next_column_named(column_name)) + make_new_column(column_name, get_string_type()); + } + + if (!is_next_column_named(TimeSeriesColumnNames::Tags)) + { + /// We use 'Map(LowCardinality(String), String)' as the default type of the `tags` column: + /// it looks like a correct optimization because there are shouldn't be too many different tag names. + make_new_column(TimeSeriesColumnNames::Tags, get_lc_string_to_string_map_type()); + } + + if (!is_next_column_named(TimeSeriesColumnNames::AllTags)) + { + /// The `all_tags` column is virtual (it's calculated on the fly and never stored anywhere) + /// so here we don't need to use the LowCardinality optimization as for the `tags` column. + make_new_column(TimeSeriesColumnNames::AllTags, get_string_to_string_map_type()); + } + + /// Add missing columns for the "metrics" table. + if (!is_next_column_named(TimeSeriesColumnNames::MetricFamilyName)) + make_new_column(TimeSeriesColumnNames::MetricFamilyName, get_string_type()); + + if (!is_next_column_named(TimeSeriesColumnNames::Type)) + make_new_column(TimeSeriesColumnNames::Type, get_string_type()); + + if (!is_next_column_named(TimeSeriesColumnNames::Unit)) + make_new_column(TimeSeriesColumnNames::Unit, get_string_type()); + + if (!is_next_column_named(TimeSeriesColumnNames::Help)) + make_new_column(TimeSeriesColumnNames::Help, get_string_type()); + + /// If the following fails that means the order in which columns are processed in this function doesn't match the order of columns in reorderColumns(). + chassert(position == columns.size()); +} + + +void TimeSeriesDefinitionNormalizer::addMissingDefaultForIDColumn(ASTCreateQuery & create) const +{ + /// Find the 'id' column and make a default expression for it. + if (!create.columns_list || !create.columns_list->columns) + return; + + auto & columns = create.columns_list->columns->children; + auto * it = std::find_if(columns.begin(), columns.end(), [](const ASTPtr & column) + { + return typeid_cast(*column).name == TimeSeriesColumnNames::ID; + }); + + if (it == columns.end()) + return; + + auto & column_declaration = typeid_cast(**it); + + /// We add a DEFAULT for the 'id' column only if it's not specified yet. + if (column_declaration.default_specifier.empty() && !column_declaration.default_expression) + { + column_declaration.default_specifier = "DEFAULT"; + column_declaration.default_expression = chooseIDAlgorithm(column_declaration); + } +} + + +ASTPtr TimeSeriesDefinitionNormalizer::chooseIDAlgorithm(const ASTColumnDeclaration & id_column) const +{ + /// Build a list of arguments for a hash function. + /// All hash functions below allow multiple arguments, so we use two arguments: metric_name, all_tags. + ASTs arguments_for_hash_function; + arguments_for_hash_function.push_back(std::make_shared(TimeSeriesColumnNames::MetricName)); + arguments_for_hash_function.push_back(std::make_shared(TimeSeriesColumnNames::AllTags)); + + auto make_hash_function = [&](const String & function_name) + { + auto function = std::make_shared(); + function->name = function_name; + auto arguments_list = std::make_shared(); + arguments_list->children = std::move(arguments_for_hash_function); + function->arguments = arguments_list; + return function; + }; + + /// The type of a hash function depends on the type of the 'id' column. + auto id_type = DataTypeFactory::instance().get(id_column.type); + WhichDataType id_type_which(*id_type); + + if (id_type_which.isUInt64()) + { + return make_hash_function("sipHash64"); + } + else if (id_type_which.isFixedString() && typeid_cast(*id_type).getN() == 16) + { + return make_hash_function("sipHash128"); + } + else if (id_type_which.isUUID()) + { + return makeASTFunction("reinterpretAsUUID", make_hash_function("sipHash128")); + } + else if (id_type_which.isUInt128()) + { + return makeASTFunction("reinterpretAsUInt128", make_hash_function("sipHash128")); + } + else + { + throw Exception(ErrorCodes::INCOMPATIBLE_COLUMNS, "{}: The DEFAULT expression for column {} must contain an expression " + "which will be used to calculate the identifier of each time series: {} {} DEFAULT ... " + "If the DEFAULT expression is not specified then it can be chosen implicitly but only if the column type is one of these: UInt64, UInt128, UUID. " + "For type {} the DEFAULT expression can't be chosen automatically, so please specify it explicitly", + time_series_storage_id.getNameForLogs(), id_column.name, id_column.name, id_type->getName(), id_type->getName()); + } +} + + +void TimeSeriesDefinitionNormalizer::addMissingInnerEnginesFromAsTable(ASTCreateQuery & create) const +{ + if (!as_create_query) + return; + + for (auto target_kind : {ViewTarget::Data, ViewTarget::Tags, ViewTarget::Metrics}) + { + if (as_create_query->hasTargetTableID(target_kind)) + { + /// It's unlikely correct to use "CREATE table AS other_table" when "other_table" has external tables like this: + /// CREATE TABLE other_table ENGINE=TimeSeries data mydata + /// (because `table` would use the same table "mydata"). + /// Thus we just prohibit that. + QualifiedTableName as_table{as_create_query->getDatabase(), as_create_query->getTable()}; + throw Exception( + ErrorCodes::INCORRECT_QUERY, + "Cannot CREATE a table AS {}.{} because it has external tables", + backQuoteIfNeed(as_table.database), backQuoteIfNeed(as_table.table)); + } + + auto inner_table_engine = create.getTargetInnerEngine(target_kind); + if (!inner_table_engine) + { + /// Copy an inner engine's definition from the other table. + inner_table_engine = as_create_query->getTargetInnerEngine(target_kind); + if (inner_table_engine) + create.setTargetInnerEngine(target_kind, typeid_cast>(inner_table_engine->clone())); + } + } +} + + +void TimeSeriesDefinitionNormalizer::addMissingInnerEngines(ASTCreateQuery & create) const +{ + for (auto target_kind : {ViewTarget::Data, ViewTarget::Tags, ViewTarget::Metrics}) + { + if (create.hasTargetTableID(target_kind)) + continue; /// External target is set, inner engine is not needed. + + auto inner_table_engine = create.getTargetInnerEngine(target_kind); + if (inner_table_engine && inner_table_engine->engine) + continue; /// Engine is set already, skip it. + + if (!inner_table_engine) + { + /// Some part of storage definition (such as PARTITION BY) is specified, but the inner ENGINE is not: just set default one. + inner_table_engine = std::make_shared(); + create.setTargetInnerEngine(target_kind, inner_table_engine); + } + + /// Set engine by default. + setInnerEngineByDefault(target_kind, *inner_table_engine); + } +} + + +void TimeSeriesDefinitionNormalizer::setInnerEngineByDefault(ViewTarget::Kind inner_table_kind, ASTStorage & inner_storage_def) const +{ + switch (inner_table_kind) + { + case ViewTarget::Data: + { + inner_storage_def.set(inner_storage_def.engine, makeASTFunction("MergeTree")); + inner_storage_def.engine->no_empty_args = false; + + if (!inner_storage_def.order_by && !inner_storage_def.primary_key && inner_storage_def.engine->name.ends_with("MergeTree")) + { + inner_storage_def.set(inner_storage_def.order_by, + makeASTFunction("tuple", + std::make_shared(TimeSeriesColumnNames::ID), + std::make_shared(TimeSeriesColumnNames::Timestamp))); + } + break; + } + + case ViewTarget::Tags: + { + inner_storage_def.set(inner_storage_def.engine, makeASTFunction("ReplacingMergeTree")); + inner_storage_def.engine->no_empty_args = false; + + if (!inner_storage_def.order_by && !inner_storage_def.primary_key && inner_storage_def.engine->name.ends_with("MergeTree")) + { + inner_storage_def.set(inner_storage_def.primary_key, + std::make_shared(TimeSeriesColumnNames::MetricName)); + + ASTs order_by_list; + order_by_list.push_back(std::make_shared(TimeSeriesColumnNames::MetricName)); + order_by_list.push_back(std::make_shared(TimeSeriesColumnNames::ID)); + + auto order_by_tuple = std::make_shared(); + order_by_tuple->name = "tuple"; + auto arguments_list = std::make_shared(); + arguments_list->children = std::move(order_by_list); + order_by_tuple->arguments = arguments_list; + inner_storage_def.set(inner_storage_def.order_by, order_by_tuple); + } + break; + } + + case ViewTarget::Metrics: + { + inner_storage_def.set(inner_storage_def.engine, makeASTFunction("ReplacingMergeTree")); + inner_storage_def.engine->no_empty_args = false; + + if (!inner_storage_def.order_by && !inner_storage_def.primary_key && inner_storage_def.engine->name.ends_with("MergeTree")) + { + inner_storage_def.set(inner_storage_def.order_by, std::make_shared(TimeSeriesColumnNames::MetricFamilyName)); + } + break; + } + + default: + UNREACHABLE(); /// This function must not be called with any other `kind`. + } +} + +} diff --git a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.h b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.h new file mode 100644 index 00000000000..1f959eb3ce0 --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include + + +namespace DB +{ +class ASTColumnDeclaration; +class ASTCreateQuery; +struct ColumnDescription; +struct TimeSeriesSettings; + +/// Normalizes a TimeSeries table definition. +class TimeSeriesDefinitionNormalizer +{ +public: + /// Constructor stores a reference to argument `time_series_settings_` (it's unnecessary to copy it). + TimeSeriesDefinitionNormalizer(StorageID time_series_storage_id_, + std::reference_wrapper time_series_settings_, + const ASTCreateQuery * as_create_query_); + + /// Adds missing columns to the definition and reorders all the columns in the canonical way. + /// Also adds engines of inner tables to the definition if they aren't specified yet. + /// The `as_table_create_query` parameter must be nullptr if it isn't a "CREATE AS query". + void normalize(ASTCreateQuery & create_query) const; + +private: + /// Reorders existing columns in the canonical way. + void reorderColumns(ASTCreateQuery & create) const; + + /// Adds missing columns with data types set by default.. + void addMissingColumns(ASTCreateQuery & create) const; + + /// Adds the DEFAULT expression for the 'id' column if it isn't specified yet. + void addMissingDefaultForIDColumn(ASTCreateQuery & create) const; + + /// Generates a formulae for calculating the identifier of a time series from the metric name and all the tags. + ASTPtr chooseIDAlgorithm(const ASTColumnDeclaration & id_column) const; + + /// Copies the definitions of inner engines from "CREATE AS
" if this is that kind of query. + void addMissingInnerEnginesFromAsTable(ASTCreateQuery & create) const; + + /// Adds engines of inner tables to the definition if they aren't specified yet. + void addMissingInnerEngines(ASTCreateQuery & create) const; + + /// Sets the engine of an inner table by default. + void setInnerEngineByDefault(ViewTarget::Kind inner_table_kind, ASTStorage & inner_storage_def) const; + + const StorageID time_series_storage_id; + const TimeSeriesSettings & time_series_settings; + const ASTCreateQuery * as_create_query = nullptr; +}; + +} diff --git a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp new file mode 100644 index 00000000000..e43cba01cdb --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp @@ -0,0 +1,163 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +TimeSeriesInnerTablesCreator::TimeSeriesInnerTablesCreator(ContextPtr context_, + StorageID time_series_storage_id_, + std::reference_wrapper time_series_columns_, + std::reference_wrapper time_series_settings_) + : WithContext(context_) + , time_series_storage_id(std::move(time_series_storage_id_)) + , time_series_columns(time_series_columns_) + , time_series_settings(time_series_settings_) +{ +} + +TimeSeriesInnerTablesCreator::~TimeSeriesInnerTablesCreator() = default; + + +ColumnsDescription TimeSeriesInnerTablesCreator::getInnerTableColumnsDescription(ViewTarget::Kind inner_table_kind) const +{ + ColumnsDescription columns; + + switch (inner_table_kind) + { + case ViewTarget::Data: + { + /// Column "id". + { + auto id_column = time_series_columns.get(TimeSeriesColumnNames::ID); + /// The expression for calculating the identifier of a time series can be transferred only to the "tags" inner table + /// (because it usually depends on columns like "metric_name" or "all_tags"). + id_column.default_desc = {}; + columns.add(std::move(id_column)); + } + + /// Column "timestamp". + columns.add(time_series_columns.get(TimeSeriesColumnNames::Timestamp)); + + /// Column "value". + columns.add(time_series_columns.get(TimeSeriesColumnNames::Value)); + break; + } + + case ViewTarget::Tags: + { + /// Column "id". + columns.add(time_series_columns.get(TimeSeriesColumnNames::ID)); + + /// Column "metric_name". + columns.add(time_series_columns.get(TimeSeriesColumnNames::MetricName)); + + /// Columns corresponding to specific tags specified in the "tags_to_columns" setting. + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + columns.add(time_series_columns.get(column_name)); + } + + /// Column "tags". + columns.add(time_series_columns.get(TimeSeriesColumnNames::Tags)); + + /// Column "all_tags". + ColumnDescription all_tags_column = time_series_columns.get(TimeSeriesColumnNames::AllTags); + /// Column "all_tags" is here only to calculate the identifier of a time series for the "id" column, so it can be ephemeral. + all_tags_column.default_desc.kind = ColumnDefaultKind::Ephemeral; + if (!all_tags_column.default_desc.expression) + { + all_tags_column.default_desc.ephemeral_default = true; + all_tags_column.default_desc.expression = makeASTFunction("defaultValueOfTypeName", std::make_shared(all_tags_column.type->getName())); + } + columns.add(std::move(all_tags_column)); + + break; + } + + case ViewTarget::Metrics: + { + columns.add(time_series_columns.get(TimeSeriesColumnNames::MetricFamilyName)); + columns.add(time_series_columns.get(TimeSeriesColumnNames::Type)); + columns.add(time_series_columns.get(TimeSeriesColumnNames::Unit)); + columns.add(time_series_columns.get(TimeSeriesColumnNames::Help)); + break; + } + + default: + UNREACHABLE(); + } + + return columns; +} + + +StorageID TimeSeriesInnerTablesCreator::getInnerTableID(ViewTarget::Kind inner_table_kind, const UUID & inner_table_uuid) const +{ + StorageID res = time_series_storage_id; + if (time_series_storage_id.hasUUID()) + res.table_name = fmt::format(".inner_id.{}.{}", toString(inner_table_kind), time_series_storage_id.uuid); + else + res.table_name = fmt::format(".inner.{}.{}", toString(inner_table_kind), time_series_storage_id.table_name); + res.uuid = inner_table_uuid; + return res; +} + + +std::shared_ptr TimeSeriesInnerTablesCreator::getInnerTableCreateQuery( + ViewTarget::Kind inner_table_kind, + const UUID & inner_table_uuid, + const std::shared_ptr inner_storage_def) const +{ + auto manual_create_query = std::make_shared(); + + auto inner_table_id = getInnerTableID(inner_table_kind, inner_table_uuid); + manual_create_query->setDatabase(inner_table_id.database_name); + manual_create_query->setTable(inner_table_id.table_name); + manual_create_query->uuid = inner_table_id.uuid; + manual_create_query->has_uuid = inner_table_id.uuid != UUIDHelpers::Nil; + + auto new_columns_list = std::make_shared(); + new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(getInnerTableColumnsDescription(inner_table_kind))); + manual_create_query->set(manual_create_query->columns_list, new_columns_list); + + if (inner_storage_def) + manual_create_query->set(manual_create_query->storage, inner_storage_def->clone()); + + return manual_create_query; +} + +StorageID TimeSeriesInnerTablesCreator::createInnerTable( + ViewTarget::Kind inner_table_kind, + const UUID & inner_table_uuid, + const std::shared_ptr inner_storage_def) const +{ + /// We will make a query to create the inner target table. + auto create_context = Context::createCopy(getContext()); + + auto manual_create_query = getInnerTableCreateQuery(inner_table_kind, inner_table_uuid, inner_storage_def); + + /// Create the inner target table. + InterpreterCreateQuery create_interpreter(manual_create_query, create_context); + create_interpreter.setInternal(true); + create_interpreter.execute(); + + return DatabaseCatalog::instance().getTable({manual_create_query->getDatabase(), manual_create_query->getTable()}, getContext())->getStorageID(); +} + +} diff --git a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.h b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.h new file mode 100644 index 00000000000..a59bd2107bb --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + + +namespace DB +{ +class ASTCreateQuery; +class ColumnsDescription; +struct TimeSeriesSettings; + +/// Generates inner tables for the TimeSeries table engine. +class TimeSeriesInnerTablesCreator : public WithContext +{ +public: + /// Constructor stores references to arguments `time_series_columns_` and `time_series_settings_` (it's unnecessary to copy them). + TimeSeriesInnerTablesCreator(ContextPtr context_, + StorageID time_series_storage_id_, + std::reference_wrapper time_series_columns_, + std::reference_wrapper time_series_settings_); + + ~TimeSeriesInnerTablesCreator(); + + /// Returns a column description of an inner table. + ColumnsDescription getInnerTableColumnsDescription(ViewTarget::Kind inner_table_kind) const; + + /// Returns a StorageID of an inner table. + StorageID getInnerTableID(ViewTarget::Kind inner_table_kind, const UUID & inner_table_uuid) const; + + /// Generates a CREATE TABLE query for an inner table. + std::shared_ptr getInnerTableCreateQuery(ViewTarget::Kind inner_table_kind, + const UUID & inner_table_uuid, + const std::shared_ptr inner_storage_def) const; + + /// Creates an inner table. + StorageID createInnerTable(ViewTarget::Kind inner_table_kind, + const UUID & inner_table_uuid, + const std::shared_ptr inner_storage_def) const; + +private: + const StorageID time_series_storage_id; + const ColumnsDescription & time_series_columns; + const TimeSeriesSettings & time_series_settings; +}; + +} diff --git a/src/Storages/TimeSeries/TimeSeriesSettings.cpp b/src/Storages/TimeSeries/TimeSeriesSettings.cpp new file mode 100644 index 00000000000..3a15be59191 --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesSettings.cpp @@ -0,0 +1,34 @@ +#include + +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNKNOWN_SETTING; +} + +IMPLEMENT_SETTINGS_TRAITS(TimeSeriesSettingsTraits, LIST_OF_TIME_SERIES_SETTINGS) + +void TimeSeriesSettings::loadFromQuery(ASTStorage & storage_def) +{ + if (storage_def.settings) + { + try + { + applyChanges(storage_def.settings->changes); + } + catch (Exception & e) + { + if (e.code() == ErrorCodes::UNKNOWN_SETTING) + e.addMessage("for storage " + storage_def.engine->name); + throw; + } + } +} + +} diff --git a/src/Storages/TimeSeries/TimeSeriesSettings.h b/src/Storages/TimeSeries/TimeSeriesSettings.h new file mode 100644 index 00000000000..ea31aa8dac1 --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesSettings.h @@ -0,0 +1,25 @@ +#pragma once + +#include + + +namespace DB +{ +class ASTStorage; + +#define LIST_OF_TIME_SERIES_SETTINGS(M, ALIAS) \ + M(Map, tags_to_columns, Map{}, "Map specifying which tags should be put to separate columns of the 'tags' table. Syntax: {'tag1': 'column1', 'tag2' : column2, ...}", 0) \ + +DECLARE_SETTINGS_TRAITS(TimeSeriesSettingsTraits, LIST_OF_TIME_SERIES_SETTINGS) + +/// Settings for the TimeSeries table engine. +/// Could be loaded from a CREATE TABLE query (SETTINGS clause). For example: +/// CREATE TABLE mytable ENGINE = TimeSeries() SETTINGS tags_to_columns = {'job':'job', 'instance':'instance'} DATA ENGINE = ReplicatedMergeTree('zkpath', 'replica'), ... +struct TimeSeriesSettings : public BaseSettings +{ + void loadFromQuery(ASTStorage & storage_def); +}; + +using TimeSeriesSettingsPtr = std::shared_ptr; + +} diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index 8f33314397c..14474bf001d 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -26,6 +26,8 @@ void registerStorageGenerateRandom(StorageFactory & factory); void registerStorageExecutable(StorageFactory & factory); void registerStorageWindowView(StorageFactory & factory); void registerStorageLoop(StorageFactory & factory); +void registerStorageTimeSeries(StorageFactory & factory); + #if USE_RAPIDJSON || USE_SIMDJSON void registerStorageFuzzJSON(StorageFactory & factory); #endif @@ -126,6 +128,8 @@ void registerStorages() registerStorageExecutable(factory); registerStorageWindowView(factory); registerStorageLoop(factory); + registerStorageTimeSeries(factory); + #if USE_RAPIDJSON || USE_SIMDJSON registerStorageFuzzJSON(factory); #endif From 03ebab2ca52f8e7c5e8a96a692b9dc863bba2271 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Tue, 14 May 2024 17:08:14 +0200 Subject: [PATCH 1513/2361] Refactor factory of PrometheusRequestHandler to make extending it easier. --- programs/keeper/Keeper.cpp | 5 +- src/Server/HTTPHandlerFactory.cpp | 28 +-- src/Server/HTTPHandlerFactory.h | 18 +- src/Server/PrometheusMetricsWriter.cpp | 152 ++++++++-------- src/Server/PrometheusMetricsWriter.h | 37 ++-- src/Server/PrometheusRequestHandler.cpp | 171 +++++++++++++----- src/Server/PrometheusRequestHandler.h | 45 +++-- src/Server/PrometheusRequestHandlerConfig.h | 27 +++ .../PrometheusRequestHandlerFactory.cpp | 151 ++++++++++++++++ src/Server/PrometheusRequestHandlerFactory.h | 97 ++++++++++ 10 files changed, 534 insertions(+), 197 deletions(-) create mode 100644 src/Server/PrometheusRequestHandlerConfig.h create mode 100644 src/Server/PrometheusRequestHandlerFactory.cpp create mode 100644 src/Server/PrometheusRequestHandlerFactory.h diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index 783f60cb8ff..0cf3f5c86d6 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include "Core/Defines.h" @@ -509,14 +509,13 @@ try auto address = socketBindListen(socket, listen_host, port); socket.setReceiveTimeout(my_http_context->getReceiveTimeout()); socket.setSendTimeout(my_http_context->getSendTimeout()); - auto metrics_writer = std::make_shared(config, "prometheus", async_metrics); servers->emplace_back( listen_host, port_name, "Prometheus: http://" + address.toString(), std::make_unique( std::move(my_http_context), - createPrometheusMainHandlerFactory(*this, config_getter(), metrics_writer, "PrometheusHandler-factory"), + createKeeperPrometheusHandlerFactory(*this, config_getter(), async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params)); diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 5344b2d024b..2e7f16f59d7 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -1,18 +1,16 @@ -#include #include #include +#include +#include #include -#include #include #include "HTTPHandler.h" -#include "Server/PrometheusMetricsWriter.h" #include "StaticRequestHandler.h" #include "ReplicasStatusHandler.h" #include "InterserverIOHTTPHandler.h" -#include "PrometheusRequestHandler.h" #include "WebUIRequestHandler.h" @@ -124,7 +122,7 @@ static inline auto createHandlersFactoryFromConfig( } else if (handler_type == "prometheus") { - main_handler_factory->addHandler(createPrometheusHandlerFactory(server, config, async_metrics, prefix + "." + key)); + main_handler_factory->addHandler(createPrometheusHandlerFactoryForHTTPRule(server, config, prefix + "." + key, async_metrics)); } else if (handler_type == "replicas_status") { @@ -201,10 +199,7 @@ HTTPRequestHandlerFactoryPtr createHandlerFactory(IServer & server, const Poco:: else if (name == "InterserverIOHTTPHandler-factory" || name == "InterserverIOHTTPSHandler-factory") return createInterserverHTTPHandlerFactory(server, name); else if (name == "PrometheusHandler-factory") - { - auto metrics_writer = std::make_shared(config, "prometheus", async_metrics); - return createPrometheusMainHandlerFactory(server, config, metrics_writer, name); - } + return createPrometheusHandlerFactory(server, config, async_metrics, name); throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown HTTP handler factory name."); } @@ -291,20 +286,9 @@ void addDefaultHandlersFactory( ); factory.addHandler(query_handler); - /// We check that prometheus handler will be served on current (default) port. - /// Otherwise it will be created separately, see createHandlerFactory(...). - if (config.has("prometheus") && config.getInt("prometheus.port", 0) == 0) - { - auto writer = std::make_shared(config, "prometheus", async_metrics); - auto creator = [&server, writer] () -> std::unique_ptr - { - return std::make_unique(server, writer); - }; - auto prometheus_handler = std::make_shared>(std::move(creator)); - prometheus_handler->attachStrictPath(config.getString("prometheus.endpoint", "/metrics")); - prometheus_handler->allowGetAndHeadRequest(); + /// createPrometheusHandlerFactoryForHTTPRuleDefaults() can return nullptr if prometheus protocols must not be served on http port. + if (auto prometheus_handler = createPrometheusHandlerFactoryForHTTPRuleDefaults(server, config, async_metrics)) factory.addHandler(prometheus_handler); - } } } diff --git a/src/Server/HTTPHandlerFactory.h b/src/Server/HTTPHandlerFactory.h index b4c32366463..db4bb73cbc4 100644 --- a/src/Server/HTTPHandlerFactory.h +++ b/src/Server/HTTPHandlerFactory.h @@ -1,15 +1,12 @@ #pragma once -#include -#include #include #include #include #include -#include - #include + namespace DB { @@ -19,6 +16,7 @@ namespace ErrorCodes } class IServer; +class AsynchronousMetrics; template class HandlingRuleHTTPHandlerFactory : public HTTPRequestHandlerFactory @@ -126,18 +124,6 @@ HTTPRequestHandlerFactoryPtr createReplicasStatusHandlerFactory(IServer & server const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix); -HTTPRequestHandlerFactoryPtr -createPrometheusHandlerFactory(IServer & server, - const Poco::Util::AbstractConfiguration & config, - AsynchronousMetrics & async_metrics, - const std::string & config_prefix); - -HTTPRequestHandlerFactoryPtr createPrometheusMainHandlerFactory( - IServer & server, - const Poco::Util::AbstractConfiguration & config, - PrometheusMetricsWriterPtr metrics_writer, - const std::string & name); - /// @param server - used in handlers to check IServer::isCancelled() /// @param config - not the same as server.config(), since it can be newer /// @param async_metrics - used for prometheus (in case of prometheus.asynchronous_metrics=true) diff --git a/src/Server/PrometheusMetricsWriter.cpp b/src/Server/PrometheusMetricsWriter.cpp index 85eafbe4808..43370116015 100644 --- a/src/Server/PrometheusMetricsWriter.cpp +++ b/src/Server/PrometheusMetricsWriter.cpp @@ -1,13 +1,27 @@ #include "PrometheusMetricsWriter.h" -#include +#include +#include #include #include - -#include +#include #include "config.h" + +#if USE_NURAFT +namespace ProfileEvents +{ + extern const std::vector keeper_profile_events; +} + +namespace CurrentMetrics +{ + extern const std::vector keeper_metrics; +} +#endif + + namespace { @@ -107,100 +121,84 @@ void writeAsyncMetrics(DB::WriteBuffer & wb, const DB::AsynchronousMetricValues } -#if USE_NURAFT -namespace ProfileEvents -{ - extern const std::vector keeper_profile_events; -} - -namespace CurrentMetrics -{ - extern const std::vector keeper_metrics; -} -#endif - namespace DB { -PrometheusMetricsWriter::PrometheusMetricsWriter( - const Poco::Util::AbstractConfiguration & config, const std::string & config_name, - const AsynchronousMetrics & async_metrics_) - : async_metrics(async_metrics_) - , send_events(config.getBool(config_name + ".events", true)) - , send_metrics(config.getBool(config_name + ".metrics", true)) - , send_asynchronous_metrics(config.getBool(config_name + ".asynchronous_metrics", true)) - , send_errors(config.getBool(config_name + ".errors", true)) +void PrometheusMetricsWriter::writeEvents(WriteBuffer & wb) const { + for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i) + writeEvent(wb, i); } -void PrometheusMetricsWriter::write(WriteBuffer & wb) const +void PrometheusMetricsWriter::writeMetrics(WriteBuffer & wb) const { - if (send_events) + for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i) + writeMetric(wb, i); +} + +void PrometheusMetricsWriter::writeAsynchronousMetrics(WriteBuffer & wb, const AsynchronousMetrics & async_metrics) const +{ + writeAsyncMetrics(wb, async_metrics.getValues()); +} + +void PrometheusMetricsWriter::writeErrors(WriteBuffer & wb) const +{ + size_t total_count = 0; + + for (size_t i = 0, end = ErrorCodes::end(); i < end; ++i) { - for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i) - writeEvent(wb, i); - } + const auto & error = ErrorCodes::values[i].get(); + std::string_view name = ErrorCodes::getName(static_cast(i)); - if (send_metrics) - { - for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i) - writeMetric(wb, i); - } + if (name.empty()) + continue; - if (send_asynchronous_metrics) - writeAsyncMetrics(wb, async_metrics.getValues()); + std::string key{error_metrics_prefix + toString(name)}; + std::string help = fmt::format("The number of {} errors since last server restart", name); - if (send_errors) - { - size_t total_count = 0; - - for (size_t i = 0, end = ErrorCodes::end(); i < end; ++i) - { - const auto & error = ErrorCodes::values[i].get(); - std::string_view name = ErrorCodes::getName(static_cast(i)); - - if (name.empty()) - continue; - - std::string key{error_metrics_prefix + toString(name)}; - std::string help = fmt::format("The number of {} errors since last server restart", name); - - writeOutLine(wb, "# HELP", key, help); - writeOutLine(wb, "# TYPE", key, "counter"); - /// We are interested in errors which are happened only on this server. - writeOutLine(wb, key, error.local.count); - - total_count += error.local.count; - } - - /// Write the total number of errors as a separate metric - std::string key{error_metrics_prefix + toString("ALL")}; - writeOutLine(wb, "# HELP", key, "The total number of errors since last server restart"); + writeOutLine(wb, "# HELP", key, help); writeOutLine(wb, "# TYPE", key, "counter"); - writeOutLine(wb, key, total_count); + /// We are interested in errors which are happened only on this server. + writeOutLine(wb, key, error.local.count); + + total_count += error.local.count; } + /// Write the total number of errors as a separate metric + std::string key{error_metrics_prefix + toString("ALL")}; + writeOutLine(wb, "# HELP", key, "The total number of errors since last server restart"); + writeOutLine(wb, "# TYPE", key, "counter"); + writeOutLine(wb, key, total_count); } -void KeeperPrometheusMetricsWriter::write([[maybe_unused]] WriteBuffer & wb) const + +void KeeperPrometheusMetricsWriter::writeEvents([[maybe_unused]] WriteBuffer & wb) const { #if USE_NURAFT - if (send_events) - { - for (auto event : ProfileEvents::keeper_profile_events) - writeEvent(wb, event); - } - - if (send_metrics) - { - for (auto metric : CurrentMetrics::keeper_metrics) - writeMetric(wb, metric); - } - - if (send_asynchronous_metrics) - writeAsyncMetrics(wb, async_metrics.getValues()); + for (auto event : ProfileEvents::keeper_profile_events) + writeEvent(wb, event); #endif } +void KeeperPrometheusMetricsWriter::writeMetrics([[maybe_unused]] WriteBuffer & wb) const +{ +#if USE_NURAFT + for (auto metric : CurrentMetrics::keeper_metrics) + writeMetric(wb, metric); +#endif +} + +void KeeperPrometheusMetricsWriter::writeAsynchronousMetrics([[maybe_unused]] WriteBuffer & wb, + [[maybe_unused]] const AsynchronousMetrics & async_metrics) const +{ +#if USE_NURAFT + writeAsyncMetrics(wb, async_metrics.getValues()); +#endif +} + +void KeeperPrometheusMetricsWriter::writeErrors(WriteBuffer &) const +{ +} + } diff --git a/src/Server/PrometheusMetricsWriter.h b/src/Server/PrometheusMetricsWriter.h index 933ad909ee0..cf2587d80b8 100644 --- a/src/Server/PrometheusMetricsWriter.h +++ b/src/Server/PrometheusMetricsWriter.h @@ -1,44 +1,33 @@ #pragma once -#include - -#include -#include -#include - -#include +#include namespace DB { +class AsynchronousMetrics; +class WriteBuffer; /// Write metrics in Prometheus format class PrometheusMetricsWriter { public: - PrometheusMetricsWriter( - const Poco::Util::AbstractConfiguration & config, const std::string & config_name, - const AsynchronousMetrics & async_metrics_); - - virtual void write(WriteBuffer & wb) const; - virtual ~PrometheusMetricsWriter() = default; -protected: - const AsynchronousMetrics & async_metrics; - const bool send_events; - const bool send_metrics; - const bool send_asynchronous_metrics; - const bool send_errors; + virtual void writeMetrics(WriteBuffer & wb) const; + virtual void writeAsynchronousMetrics(WriteBuffer & wb, const AsynchronousMetrics & async_metrics) const; + virtual void writeEvents(WriteBuffer & wb) const; + virtual void writeErrors(WriteBuffer & wb) const; }; + class KeeperPrometheusMetricsWriter : public PrometheusMetricsWriter { - using PrometheusMetricsWriter::PrometheusMetricsWriter; - - void write(WriteBuffer & wb) const override; +public: + void writeMetrics(WriteBuffer & wb) const override; + void writeAsynchronousMetrics(WriteBuffer & wb, const AsynchronousMetrics & async_metrics) const override; + void writeEvents(WriteBuffer & wb) const override; + void writeErrors(WriteBuffer & wb) const override; }; -using PrometheusMetricsWriterPtr = std::shared_ptr; - } diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 87c106c3fc0..7aabe07753d 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -1,74 +1,159 @@ #include +#include #include #include -#include #include -#include -#include -#include -#include "Server/PrometheusMetricsWriter.h" - -#include +#include +#include "config.h" namespace DB { -void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int SUPPORT_IS_DISABLED; +} + +/// Base implementation of a prometheus protocol. +class PrometheusRequestHandler::Impl +{ +public: + explicit Impl(PrometheusRequestHandler & parent) : parent_ref(parent) {} + virtual ~Impl() = default; + virtual void beforeHandlingRequest(HTTPServerRequest & /* request */) {} + virtual void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) = 0; + virtual void onException() {} + +protected: + PrometheusRequestHandler & parent() { return parent_ref; } + IServer & server() { return parent().server; } + const PrometheusRequestHandlerConfig & config() { return parent().config; } + PrometheusMetricsWriter & metrics_writer() { return *parent().metrics_writer; } + LoggerPtr log() { return parent().log; } + WriteBuffer & getOutputStream(HTTPServerResponse & response) { return parent().getOutputStream(response); } + +private: + PrometheusRequestHandler & parent_ref; +}; + + +/// Implementation of the exposing metrics protocol. +class PrometheusRequestHandler::ExposeMetricsImpl : public Impl +{ +public: + explicit ExposeMetricsImpl(PrometheusRequestHandler & parent) : Impl(parent) {} + + void beforeHandlingRequest(HTTPServerRequest & request) override + { + LOG_INFO(log(), "Handling metrics request from {}", request.get("User-Agent")); + chassert(config().type == PrometheusRequestHandlerConfig::Type::ExposeMetrics); + } + + void handleRequest(HTTPServerRequest & /* request */, HTTPServerResponse & response) override + { + response.setContentType("text/plain; version=0.0.4; charset=UTF-8"); + auto & out = getOutputStream(response); + + if (config().expose_events) + metrics_writer().writeEvents(out); + + if (config().expose_metrics) + metrics_writer().writeMetrics(out); + + if (config().expose_asynchronous_metrics) + metrics_writer().writeAsynchronousMetrics(out, parent().async_metrics); + + if (config().expose_errors) + metrics_writer().writeErrors(out); + } +}; + + +PrometheusRequestHandler::PrometheusRequestHandler( + IServer & server_, + const PrometheusRequestHandlerConfig & config_, + const AsynchronousMetrics & async_metrics_, + std::shared_ptr metrics_writer_) + : server(server_) + , config(config_) + , async_metrics(async_metrics_) + , metrics_writer(metrics_writer_) + , log(getLogger("PrometheusRequestHandler")) +{ + createImpl(); +} + +PrometheusRequestHandler::~PrometheusRequestHandler() = default; + +void PrometheusRequestHandler::createImpl() +{ + switch (config.type) + { + case PrometheusRequestHandlerConfig::Type::ExposeMetrics: + { + impl = std::make_unique(*this); + return; + } + } + UNREACHABLE(); +} + +void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event_) { try { - const auto & config = server.config(); - unsigned keep_alive_timeout = config.getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT); + write_event = write_event_; + http_method = request.getMethod(); + chassert(!write_buffer_from_response); - /// In order to make keep-alive works. + /// Make keep-alive works. if (request.getVersion() == HTTPServerRequest::HTTP_1_1) response.setChunkedTransferEncoding(true); - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response, config.keep_alive_timeout); - response.setContentType("text/plain; version=0.0.4; charset=UTF-8"); + impl->beforeHandlingRequest(request); + impl->handleRequest(request, response); - WriteBufferFromHTTPServerResponse wb(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout, write_event); - metrics_writer->write(wb); - wb.finalize(); + if (write_buffer_from_response) + { + write_buffer_from_response->finalize(); + write_buffer_from_response = nullptr; + } } catch (...) { - tryLogCurrentException("PrometheusRequestHandler"); + tryLogCurrentException(log); + tryCallOnException(); + + /// `write_buffer_from_response` must be finalized already or at least tried to finalize. + write_buffer_from_response = nullptr; } } -HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactory( - IServer & server, - const Poco::Util::AbstractConfiguration & config, - AsynchronousMetrics & async_metrics, - const std::string & config_prefix) +WriteBuffer & PrometheusRequestHandler::getOutputStream(HTTPServerResponse & response) { - auto writer = std::make_shared(config, config_prefix + ".handler", async_metrics); - auto creator = [&server, writer]() -> std::unique_ptr - { - return std::make_unique(server, writer); - }; - - auto factory = std::make_shared>(std::move(creator)); - factory->addFiltersFromConfig(config, config_prefix); - return factory; + if (write_buffer_from_response) + return *write_buffer_from_response; + write_buffer_from_response = std::make_unique( + response, http_method == HTTPRequest::HTTP_HEAD, config.keep_alive_timeout, write_event); + return *write_buffer_from_response; } -HTTPRequestHandlerFactoryPtr createPrometheusMainHandlerFactory( - IServer & server, const Poco::Util::AbstractConfiguration & config, PrometheusMetricsWriterPtr metrics_writer, const std::string & name) +void PrometheusRequestHandler::tryCallOnException() { - auto factory = std::make_shared(name); - auto creator = [&server, metrics_writer] + try { - return std::make_unique(server, metrics_writer); - }; + if (impl) + impl->onException(); + } + catch (...) + { + tryLogCurrentException(log, "onException"); + } +} - auto handler = std::make_shared>(std::move(creator)); - handler->attachStrictPath(config.getString("prometheus.endpoint", "/metrics")); - handler->allowGetAndHeadRequest(); - factory->addHandler(handler); - return factory; -} } diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index d120752c8c5..c850cc67277 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -1,28 +1,49 @@ #pragma once #include +#include -#include "PrometheusMetricsWriter.h" namespace DB { - +class AsynchronousMetrics; class IServer; +class PrometheusMetricsWriter; +class WriteBufferFromHTTPServerResponse; +/// Handles requests for prometheus protocols (expose_metrics). class PrometheusRequestHandler : public HTTPRequestHandler { -private: - IServer & server; - PrometheusMetricsWriterPtr metrics_writer; - public: - PrometheusRequestHandler(IServer & server_, PrometheusMetricsWriterPtr metrics_writer_) - : server(server_) - , metrics_writer(std::move(metrics_writer_)) - { - } + PrometheusRequestHandler(IServer & server_, const PrometheusRequestHandlerConfig & config_, + const AsynchronousMetrics & async_metrics_, std::shared_ptr metrics_writer_); + ~PrometheusRequestHandler() override; - void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event_) override; + +private: + /// Creates an internal implementation based on which PrometheusRequestHandlerConfig::Type is used. + void createImpl(); + + /// Returns the write buffer used for the current HTTP response. + WriteBuffer & getOutputStream(HTTPServerResponse & response); + + /// Calls onException() in a try-catch block. + void tryCallOnException(); + + IServer & server; + const PrometheusRequestHandlerConfig config; + const AsynchronousMetrics & async_metrics; + const std::shared_ptr metrics_writer; + const LoggerPtr log; + + class Impl; + class ExposeMetricsImpl; + std::unique_ptr impl; + + String http_method; + std::unique_ptr write_buffer_from_response; + ProfileEvents::Event write_event; }; } diff --git a/src/Server/PrometheusRequestHandlerConfig.h b/src/Server/PrometheusRequestHandlerConfig.h new file mode 100644 index 00000000000..d584e501985 --- /dev/null +++ b/src/Server/PrometheusRequestHandlerConfig.h @@ -0,0 +1,27 @@ +#pragma once + + +namespace DB +{ + +/// Configuration of a Prometheus protocol handler after it's parsed from a configuration file. +struct PrometheusRequestHandlerConfig +{ + enum class Type + { + /// Exposes ClickHouse metrics for scraping by Prometheus. + ExposeMetrics, + }; + + Type type = Type::ExposeMetrics; + + /// Settings for type ExposeMetrics: + bool expose_metrics = false; + bool expose_asynchronous_metrics = false; + bool expose_events = false; + bool expose_errors = false; + + size_t keep_alive_timeout = 0; +}; + +} diff --git a/src/Server/PrometheusRequestHandlerFactory.cpp b/src/Server/PrometheusRequestHandlerFactory.cpp new file mode 100644 index 00000000000..d06aa5fa569 --- /dev/null +++ b/src/Server/PrometheusRequestHandlerFactory.cpp @@ -0,0 +1,151 @@ +#include + +#include +#include +#include +#include + + +namespace DB +{ + +namespace +{ + /// Parses common configuration which is attached to any other configuration. The common configuration looks like this: + /// 30 + void parseCommonConfig(const Poco::Util::AbstractConfiguration & config, PrometheusRequestHandlerConfig & res) + { + res.keep_alive_timeout = config.getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT); + } + + /// Parses a configuration like this: + /// + /// true + /// true + /// true + /// true + PrometheusRequestHandlerConfig parseExposeMetricsConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix) + { + PrometheusRequestHandlerConfig res; + res.type = PrometheusRequestHandlerConfig::Type::ExposeMetrics; + res.expose_metrics = config.getBool(config_prefix + ".metrics", true); + res.expose_asynchronous_metrics = config.getBool(config_prefix + ".asynchronous_metrics", true); + res.expose_events = config.getBool(config_prefix + ".events", true); + res.expose_errors = config.getBool(config_prefix + ".errors", true); + parseCommonConfig(config, res); + return res; + } + + /// Returns true if the protocol represented by a passed config can be handled. + bool canBeHandled(const PrometheusRequestHandlerConfig & config, bool for_keeper) + { + /// The standalone ClickHouse Keeper can only expose its metrics. + /// It can't handle other Prometheus protocols. + return !for_keeper || (config.type == PrometheusRequestHandlerConfig::Type::ExposeMetrics); + } + + /// Creates a writer which serializes exposing metrics. + std::shared_ptr createPrometheusMetricWriter(bool for_keeper) + { + if (for_keeper) + return std::make_unique(); + else + return std::make_unique(); + } + + /// Base function for making a factory for PrometheusRequestHandler. This function can return nullptr. + std::shared_ptr> createPrometheusHandlerFactoryFromConfig( + IServer & server, + const AsynchronousMetrics & async_metrics, + const PrometheusRequestHandlerConfig & config, + bool for_keeper) + { + if (!canBeHandled(config, for_keeper)) + return nullptr; + auto metric_writer = createPrometheusMetricWriter(for_keeper); + auto creator = [&server, &async_metrics, config, metric_writer]() -> std::unique_ptr + { + return std::make_unique(server, config, async_metrics, metric_writer); + }; + return std::make_shared>(std::move(creator)); + } + + /// Generic function for createPrometheusHandlerFactory() and createKeeperPrometheusHandlerFactory(). + HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactoryImpl( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const AsynchronousMetrics & asynchronous_metrics, + const String & name, + bool for_keeper) + { + auto factory = std::make_shared(name); + + auto parsed_config = parseExposeMetricsConfig(config, "prometheus"); + if (auto handler = createPrometheusHandlerFactoryFromConfig(server, asynchronous_metrics, parsed_config, for_keeper)) + { + String endpoint = config.getString("prometheus.endpoint", "/metrics"); + handler->attachStrictPath(endpoint); + handler->allowGetAndHeadRequest(); + factory->addHandler(handler); + } + + return factory; + } + +} + + +HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactory( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const AsynchronousMetrics & asynchronous_metrics, + const String & name) +{ + return createPrometheusHandlerFactoryImpl(server, config, asynchronous_metrics, name, /* for_keeper= */ false); +} + + +HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactoryForHTTPRule( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const String & config_prefix, + const AsynchronousMetrics & asynchronous_metrics) +{ + auto parsed_config = parseExposeMetricsConfig(config, config_prefix + ".handler"); + auto handler = createPrometheusHandlerFactoryFromConfig(server, asynchronous_metrics, parsed_config, /* for_keeper= */ false); + chassert(handler); /// `handler` can't be nullptr here because `for_keeper` is false. + handler->addFiltersFromConfig(config, config_prefix); + return handler; +} + + +HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactoryForHTTPRuleDefaults( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const AsynchronousMetrics & asynchronous_metrics) +{ + /// The "defaults" HTTP handler should serve the prometheus exposing metrics protocol on the http port + /// only if it isn't already served on its own port and if there is no section. + if (!config.has("prometheus") || config.getInt("prometheus.port", 0) || config.has("prometheus.handlers")) + return nullptr; + + auto parsed_config = parseExposeMetricsConfig(config, "prometheus"); + String endpoint = config.getString("prometheus.endpoint", "/metrics"); + auto handler = createPrometheusHandlerFactoryFromConfig(server, asynchronous_metrics, parsed_config, /* for_keeper= */ false); + chassert(handler); /// `handler` can't be nullptr here because `for_keeper` is false. + handler->attachStrictPath(endpoint); + handler->allowGetAndHeadRequest(); + return handler; +} + + +HTTPRequestHandlerFactoryPtr createKeeperPrometheusHandlerFactory( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const AsynchronousMetrics & asynchronous_metrics, + const String & name) +{ + return createPrometheusHandlerFactoryImpl(server, config, asynchronous_metrics, name, /* for_keeper= */ true); +} + +} diff --git a/src/Server/PrometheusRequestHandlerFactory.h b/src/Server/PrometheusRequestHandlerFactory.h new file mode 100644 index 00000000000..fb19e9d4070 --- /dev/null +++ b/src/Server/PrometheusRequestHandlerFactory.h @@ -0,0 +1,97 @@ +#pragma once + +#include +#include + + +namespace Poco::Util { class AbstractConfiguration; } + +namespace DB +{ + +class IServer; +class HTTPRequestHandlerFactory; +using HTTPRequestHandlerFactoryPtr = std::shared_ptr; +class AsynchronousMetrics; + +/// Makes a handler factory to handle prometheus protocols. +/// Expects a configuration like this: +/// +/// +/// 1234 +/// /metric +/// true +/// true +/// true +/// true +/// +/// +/// An alternative port to serve prometheus protocols can be specified in the section: +/// +/// +/// +/// 4321 +/// prometheus +/// +/// +HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactory( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const AsynchronousMetrics & asynchronous_metrics, + const String & name); + +/// Makes a HTTP handler factory to handle requests for prometheus metrics for a HTTP rule in the section. +/// Expects a configuration like this: +/// +/// 8123 +/// +/// +/// /metrics +/// +/// prometheus +/// true +/// true +/// true +/// true +/// +/// +/// +HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactoryForHTTPRule( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const String & config_prefix, /// path to "http_handlers.my_handler_1" + const AsynchronousMetrics & asynchronous_metrics); + +/// Makes a HTTP Handler factory to handle requests for prometheus metrics as a part of the default HTTP rule in the section. +/// Expects a configuration like this: +/// +/// 8123 +/// +/// +/// +/// +/// /metric +/// true +/// true +/// true +/// true +/// +/// +/// The "defaults" HTTP handler should serve the prometheus exposing metrics protocol on the http port +/// only if it isn't already served on its own port , +/// and also if there is no section in the configuration +/// (because if that section exists then it must be in charge of how prometheus protocols are handled). +HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactoryForHTTPRuleDefaults( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const AsynchronousMetrics & asynchronous_metrics); + +/// Makes a handler factory to handle prometheus protocols. +/// Supports the "expose_metrics" protocol only. +HTTPRequestHandlerFactoryPtr createKeeperPrometheusHandlerFactory( + IServer & server, + const Poco::Util::AbstractConfiguration & config, + const AsynchronousMetrics & asynchronous_metrics, + const String & name); + +} From 03b93059566690e4f509d7705abe1e670c4f73dd Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 11 Jul 2024 22:45:02 +0200 Subject: [PATCH 1514/2361] Send exception from PrometheusRequestHandler back to client. Set thread name. --- src/Server/PrometheusRequestHandler.cpp | 32 +++++++++++++++++++ src/Server/PrometheusRequestHandler.h | 4 +++ src/Server/PrometheusRequestHandlerConfig.h | 1 + .../PrometheusRequestHandlerFactory.cpp | 4 +++ 4 files changed, 41 insertions(+) diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 7aabe07753d..219e6ae55da 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -1,8 +1,10 @@ #include #include +#include #include #include +#include #include #include #include "config.h" @@ -103,6 +105,8 @@ void PrometheusRequestHandler::createImpl() void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event_) { + setThreadName("PrometheusHndlr"); + try { write_event = write_event_; @@ -129,6 +133,10 @@ void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPSe tryLogCurrentException(log); tryCallOnException(); + ExecutionStatus status = ExecutionStatus::fromCurrentException("", send_stacktrace); + trySendExceptionToClient(status.message, status.code, request, response); + tryCallOnException(); + /// `write_buffer_from_response` must be finalized already or at least tried to finalize. write_buffer_from_response = nullptr; } @@ -143,6 +151,30 @@ WriteBuffer & PrometheusRequestHandler::getOutputStream(HTTPServerResponse & res return *write_buffer_from_response; } +void PrometheusRequestHandler::trySendExceptionToClient(const String & exception_message, int exception_code, HTTPServerRequest & request, HTTPServerResponse & response) +{ + try + { + sendExceptionToHTTPClient(exception_message, exception_code, request, response, write_buffer_from_response.get(), log); + } + catch (...) + { + tryLogCurrentException(log, "Couldn't send exception to client"); + + if (write_buffer_from_response) + { + try + { + write_buffer_from_response->finalize(); + } + catch (...) + { + tryLogCurrentException(log, "Cannot flush data to client (after sending exception)"); + } + } + } +} + void PrometheusRequestHandler::tryCallOnException() { try diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index c850cc67277..1d985a23fbe 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -28,6 +28,9 @@ private: /// Returns the write buffer used for the current HTTP response. WriteBuffer & getOutputStream(HTTPServerResponse & response); + /// Writes the current exception to the response. + void trySendExceptionToClient(const String & exception_message, int exception_code, HTTPServerRequest & request, HTTPServerResponse & response); + /// Calls onException() in a try-catch block. void tryCallOnException(); @@ -42,6 +45,7 @@ private: std::unique_ptr impl; String http_method; + bool send_stacktrace = false; std::unique_ptr write_buffer_from_response; ProfileEvents::Event write_event; }; diff --git a/src/Server/PrometheusRequestHandlerConfig.h b/src/Server/PrometheusRequestHandlerConfig.h index d584e501985..bae0a8e1199 100644 --- a/src/Server/PrometheusRequestHandlerConfig.h +++ b/src/Server/PrometheusRequestHandlerConfig.h @@ -22,6 +22,7 @@ struct PrometheusRequestHandlerConfig bool expose_errors = false; size_t keep_alive_timeout = 0; + bool is_stacktrace_enabled = true; }; } diff --git a/src/Server/PrometheusRequestHandlerFactory.cpp b/src/Server/PrometheusRequestHandlerFactory.cpp index d06aa5fa569..35d736f6d3a 100644 --- a/src/Server/PrometheusRequestHandlerFactory.cpp +++ b/src/Server/PrometheusRequestHandlerFactory.cpp @@ -12,9 +12,13 @@ namespace DB namespace { /// Parses common configuration which is attached to any other configuration. The common configuration looks like this: + /// + /// true + /// /// 30 void parseCommonConfig(const Poco::Util::AbstractConfiguration & config, PrometheusRequestHandlerConfig & res) { + res.is_stacktrace_enabled = config.getBool("prometheus.enable_stacktrace", true); res.keep_alive_timeout = config.getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT); } From 7d88995f42bd1a12dd691ac0c67f8609ba6f9d2b Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 11 Jul 2024 22:11:16 +0200 Subject: [PATCH 1515/2361] Provide alternative way to setup configuration for exposing metrics using new section in configuration. --- .../PrometheusRequestHandlerFactory.cpp | 51 ++++++++++++++++--- src/Server/PrometheusRequestHandlerFactory.h | 19 +++++++ 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/src/Server/PrometheusRequestHandlerFactory.cpp b/src/Server/PrometheusRequestHandlerFactory.cpp index 35d736f6d3a..00b87515fc8 100644 --- a/src/Server/PrometheusRequestHandlerFactory.cpp +++ b/src/Server/PrometheusRequestHandlerFactory.cpp @@ -9,6 +9,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int UNKNOWN_ELEMENT_IN_CONFIG; +} + namespace { /// Parses common configuration which is attached to any other configuration. The common configuration looks like this: @@ -40,6 +45,22 @@ namespace return res; } + /// Parses a configuration like this: + /// expose_metrics + /// true + /// true + /// true + /// true + PrometheusRequestHandlerConfig parseHandlerConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix) + { + String type = config.getString(config_prefix + ".type"); + + if (type == "expose_metrics") + return parseExposeMetricsConfig(config, config_prefix); + else + throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown type {} is specified in the configuration for a prometheus protocol", type); + } + /// Returns true if the protocol represented by a passed config can be handled. bool canBeHandled(const PrometheusRequestHandlerConfig & config, bool for_keeper) { @@ -84,13 +105,31 @@ namespace { auto factory = std::make_shared(name); - auto parsed_config = parseExposeMetricsConfig(config, "prometheus"); - if (auto handler = createPrometheusHandlerFactoryFromConfig(server, asynchronous_metrics, parsed_config, for_keeper)) + if (config.has("prometheus.handlers")) { - String endpoint = config.getString("prometheus.endpoint", "/metrics"); - handler->attachStrictPath(endpoint); - handler->allowGetAndHeadRequest(); - factory->addHandler(handler); + Strings keys; + config.keys("prometheus.handlers", keys); + for (const String & key : keys) + { + String prefix = "prometheus.handlers." + key; + auto parsed_config = parseHandlerConfig(config, prefix + ".handler"); + if (auto handler = createPrometheusHandlerFactoryFromConfig(server, asynchronous_metrics, parsed_config, for_keeper)) + { + handler->addFiltersFromConfig(config, prefix); + factory->addHandler(handler); + } + } + } + else + { + auto parsed_config = parseExposeMetricsConfig(config, "prometheus"); + if (auto handler = createPrometheusHandlerFactoryFromConfig(server, asynchronous_metrics, parsed_config, for_keeper)) + { + String endpoint = config.getString("prometheus.endpoint", "/metrics"); + handler->attachStrictPath(endpoint); + handler->allowGetAndHeadRequest(); + factory->addHandler(handler); + } } return factory; diff --git a/src/Server/PrometheusRequestHandlerFactory.h b/src/Server/PrometheusRequestHandlerFactory.h index fb19e9d4070..50961ed0bc0 100644 --- a/src/Server/PrometheusRequestHandlerFactory.h +++ b/src/Server/PrometheusRequestHandlerFactory.h @@ -26,6 +26,25 @@ class AsynchronousMetrics; /// true /// /// +/// More prometheus protocols can be supported with using a different configuration +/// (which is similar to the section): +/// +/// +/// 1234 +/// +/// +/// /metrics +/// +/// expose_metrics +/// true +/// true +/// true +/// true +/// +/// +/// +/// +/// /// An alternative port to serve prometheus protocols can be specified in the section: /// /// From 24103c733d868c2ebb64f6096bcf047d8c00d6c2 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 27 May 2024 17:19:36 +0200 Subject: [PATCH 1516/2361] Support prometheus remote write protocol. --- src/CMakeLists.txt | 2 + src/Common/ErrorCodes.cpp | 4 + ...tobufZeroCopyInputStreamFromReadBuffer.cpp | 56 ++ ...rotobufZeroCopyInputStreamFromReadBuffer.h | 38 ++ src/Interpreters/ClientInfo.cpp | 2 + src/Interpreters/ClientInfo.h | 1 + src/Interpreters/SessionLog.cpp | 5 +- src/Server/HTTP/checkHTTPHeader.cpp | 22 + src/Server/HTTP/checkHTTPHeader.h | 13 + src/Server/PrometheusRequestHandler.cpp | 166 ++++++ src/Server/PrometheusRequestHandler.h | 4 +- src/Server/PrometheusRequestHandlerConfig.h | 8 + .../PrometheusRequestHandlerFactory.cpp | 34 ++ src/Server/PrometheusRequestHandlerFactory.h | 7 + src/Storages/StorageTimeSeries.cpp | 24 + src/Storages/StorageTimeSeries.h | 4 + .../PrometheusRemoteWriteProtocol.cpp | 538 ++++++++++++++++++ .../PrometheusRemoteWriteProtocol.h | 35 ++ src/Storages/TimeSeries/TimeSeriesTagNames.h | 13 + 19 files changed, 973 insertions(+), 3 deletions(-) create mode 100644 src/IO/Protobuf/ProtobufZeroCopyInputStreamFromReadBuffer.cpp create mode 100644 src/IO/Protobuf/ProtobufZeroCopyInputStreamFromReadBuffer.h create mode 100644 src/Server/HTTP/checkHTTPHeader.cpp create mode 100644 src/Server/HTTP/checkHTTPHeader.h create mode 100644 src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp create mode 100644 src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.h create mode 100644 src/Storages/TimeSeries/TimeSeriesTagNames.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index cc10fdf9646..98dd0601a1b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -78,6 +78,7 @@ add_headers_and_sources(clickhouse_common_io Common/Scheduler) add_headers_and_sources(clickhouse_common_io Common/Scheduler/Nodes) add_headers_and_sources(clickhouse_common_io IO) add_headers_and_sources(clickhouse_common_io IO/Archives) +add_headers_and_sources(clickhouse_common_io IO/Protobuf) add_headers_and_sources(clickhouse_common_io IO/S3) add_headers_and_sources(clickhouse_common_io IO/AzureBlobStorage) list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp) @@ -470,6 +471,7 @@ dbms_target_link_libraries (PUBLIC ch_contrib::sparsehash) if (TARGET ch_contrib::protobuf) dbms_target_link_libraries (PRIVATE ch_contrib::protobuf) + target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::protobuf) endif () if (TARGET clickhouse_grpc_protos) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index b1b8e2367a4..44a1cd071cb 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -604,6 +604,10 @@ M(723, PARQUET_EXCEPTION) \ M(724, TOO_MANY_TABLES) \ M(725, TOO_MANY_DATABASES) \ + M(726, UNEXPECTED_HTTP_HEADERS) \ + M(727, UNEXPECTED_TABLE_ENGINE) \ + M(728, UNEXPECTED_DATA_TYPE) \ + M(729, ILLEGAL_TIME_SERIES_TAGS) \ \ M(900, DISTRIBUTED_CACHE_ERROR) \ M(901, CANNOT_USE_DISTRIBUTED_CACHE) \ diff --git a/src/IO/Protobuf/ProtobufZeroCopyInputStreamFromReadBuffer.cpp b/src/IO/Protobuf/ProtobufZeroCopyInputStreamFromReadBuffer.cpp new file mode 100644 index 00000000000..86b7eb4d7f7 --- /dev/null +++ b/src/IO/Protobuf/ProtobufZeroCopyInputStreamFromReadBuffer.cpp @@ -0,0 +1,56 @@ +#include "config.h" + +#if USE_PROTOBUF +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +ProtobufZeroCopyInputStreamFromReadBuffer::ProtobufZeroCopyInputStreamFromReadBuffer(std::unique_ptr in_) : in(std::move(in_)) +{ +} + +ProtobufZeroCopyInputStreamFromReadBuffer::~ProtobufZeroCopyInputStreamFromReadBuffer() = default; + +bool ProtobufZeroCopyInputStreamFromReadBuffer::Next(const void ** data, int * size) +{ + if (in->eof()) + return false; + *data = in->position(); + *size = static_cast(in->available()); + in->position() += *size; + return true; +} + +void ProtobufZeroCopyInputStreamFromReadBuffer::BackUp(int count) +{ + if (static_cast(in->offset()) < count) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "ProtobufZeroCopyInputStreamFromReadBuffer::BackUp() cannot back up {} bytes (max = {} bytes)", + count, + in->offset()); + + in->position() -= count; +} + +bool ProtobufZeroCopyInputStreamFromReadBuffer::Skip(int count) +{ + return static_cast(in->tryIgnore(count)) == count; +} + +int64_t ProtobufZeroCopyInputStreamFromReadBuffer::ByteCount() const +{ + return in->count(); +} + +} + +#endif diff --git a/src/IO/Protobuf/ProtobufZeroCopyInputStreamFromReadBuffer.h b/src/IO/Protobuf/ProtobufZeroCopyInputStreamFromReadBuffer.h new file mode 100644 index 00000000000..3f86815ef3f --- /dev/null +++ b/src/IO/Protobuf/ProtobufZeroCopyInputStreamFromReadBuffer.h @@ -0,0 +1,38 @@ +#pragma once + +#include "config.h" +#if USE_PROTOBUF + +#include + + +namespace DB +{ +class ReadBuffer; + +class ProtobufZeroCopyInputStreamFromReadBuffer : public google::protobuf::io::ZeroCopyInputStream +{ +public: + explicit ProtobufZeroCopyInputStreamFromReadBuffer(std::unique_ptr in_); + ~ProtobufZeroCopyInputStreamFromReadBuffer() override; + + // Obtains a chunk of data from the stream. + bool Next(const void ** data, int * size) override; + + // Backs up a number of bytes, so that the next call to Next() returns + // data again that was already returned by the last call to Next(). + void BackUp(int count) override; + + // Skips a number of bytes. + bool Skip(int count) override; + + // Returns the total number of bytes read since this object was created. + int64_t ByteCount() const override; + +private: + std::unique_ptr in; +}; + +} + +#endif diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index 39fdef23baa..daf1e300046 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -254,6 +254,8 @@ String toString(ClientInfo::Interface interface) return "LOCAL"; case ClientInfo::Interface::TCP_INTERSERVER: return "TCP_INTERSERVER"; + case ClientInfo::Interface::PROMETHEUS: + return "PROMETHEUS"; } return std::format("Unknown server interface ({}).", static_cast(interface)); diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index ca32b4c5cfa..48dea3cc3ea 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -38,6 +38,7 @@ public: POSTGRESQL = 5, LOCAL = 6, TCP_INTERSERVER = 7, + PROMETHEUS = 8, }; enum class HTTPMethod : uint8_t diff --git a/src/Interpreters/SessionLog.cpp b/src/Interpreters/SessionLog.cpp index 0615a2a1d62..866f5ba8c0a 100644 --- a/src/Interpreters/SessionLog.cpp +++ b/src/Interpreters/SessionLog.cpp @@ -105,9 +105,10 @@ ColumnsDescription SessionLogElement::getColumnsDescription() {"MySQL", static_cast(Interface::MYSQL)}, {"PostgreSQL", static_cast(Interface::POSTGRESQL)}, {"Local", static_cast(Interface::LOCAL)}, - {"TCP_Interserver", static_cast(Interface::TCP_INTERSERVER)} + {"TCP_Interserver", static_cast(Interface::TCP_INTERSERVER)}, + {"Prometheus", static_cast(Interface::PROMETHEUS)}, }); - static_assert(magic_enum::enum_count() == 7); + static_assert(magic_enum::enum_count() == 8); auto lc_string_datatype = std::make_shared(std::make_shared()); diff --git a/src/Server/HTTP/checkHTTPHeader.cpp b/src/Server/HTTP/checkHTTPHeader.cpp new file mode 100644 index 00000000000..812adde022a --- /dev/null +++ b/src/Server/HTTP/checkHTTPHeader.cpp @@ -0,0 +1,22 @@ +#include + +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNEXPECTED_HTTP_HEADERS; +} + +void checkHTTPHeader(const HTTPRequest & request, const String & header_name, const String & expected_value) +{ + if (!request.has(header_name)) + throw Exception(ErrorCodes::UNEXPECTED_HTTP_HEADERS, "No HTTP header {}", header_name); + if (request.get(header_name) != expected_value) + throw Exception(ErrorCodes::UNEXPECTED_HTTP_HEADERS, "HTTP header {} has unexpected value '{}' (instead of '{}')", header_name, request.get(header_name), expected_value); +} + +} diff --git a/src/Server/HTTP/checkHTTPHeader.h b/src/Server/HTTP/checkHTTPHeader.h new file mode 100644 index 00000000000..956599ae66b --- /dev/null +++ b/src/Server/HTTP/checkHTTPHeader.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/// Checks that the HTTP request has a specified header with a specified value. +void checkHTTPHeader(const HTTPRequest & request, const String & header_name, const String & expected_value); + +} diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 219e6ae55da..98d652540d3 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -9,6 +9,19 @@ #include #include "config.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + namespace DB { @@ -74,6 +87,154 @@ public: }; +/// Base implementation of a protocol with Context and authentication. +class PrometheusRequestHandler::ImplWithContext : public Impl +{ +public: + explicit ImplWithContext(PrometheusRequestHandler & parent) : Impl(parent), default_settings(parent.server.context()->getSettingsRef()) { } + + virtual void handlingRequestWithContext(HTTPServerRequest & request, HTTPServerResponse & response) = 0; + +protected: + void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override + { + SCOPE_EXIT({ + request_credentials.reset(); + context.reset(); + session.reset(); + params.reset(); + }); + + params = std::make_unique(default_settings, request); + parent().send_stacktrace = config().is_stacktrace_enabled && params->getParsed("stacktrace", false); + + if (!authenticateUserAndMakeContext(request, response)) + return; /// The user is not authenticated yet, and the HTTP_UNAUTHORIZED response is sent with the "WWW-Authenticate" header, + /// and `request_credentials` must be preserved until the next request or until any exception. + + /// Initialize query scope. + std::optional query_scope; + if (context) + query_scope.emplace(context); + + handlingRequestWithContext(request, response); + } + + bool authenticateUserAndMakeContext(HTTPServerRequest & request, HTTPServerResponse & response) + { + session = std::make_unique(server().context(), ClientInfo::Interface::PROMETHEUS, request.isSecure()); + + if (!authenticateUser(request, response)) + return false; + + makeContext(request); + return true; + } + + bool authenticateUser(HTTPServerRequest & request, HTTPServerResponse & response) + { + return authenticateUserByHTTP(request, *params, response, *session, request_credentials, server().context(), log()); + } + + void makeContext(HTTPServerRequest & request) + { + context = session->makeQueryContext(); + + /// Anything else beside HTTP POST should be readonly queries. + setReadOnlyIfHTTPMethodIdempotent(context, request.getMethod()); + + auto roles = params->getAll("role"); + if (!roles.empty()) + context->setCurrentRoles(roles); + + auto param_could_be_skipped = [&] (const String & name) + { + /// Empty parameter appears when URL like ?&a=b or a=b&&c=d. Just skip them for user's convenience. + if (name.empty()) + return true; + + /// Some parameters (database, default_format, everything used in the code above) do not + /// belong to the Settings class. + static const NameSet reserved_param_names{"user", "password", "quota_key", "stacktrace", "role", "query_id"}; + return reserved_param_names.contains(name); + }; + + /// Settings can be overridden in the query. + SettingsChanges settings_changes; + for (const auto & [key, value] : *params) + { + if (!param_could_be_skipped(key)) + { + /// Other than query parameters are treated as settings. + settings_changes.push_back({key, value}); + } + } + + context->checkSettingsConstraints(settings_changes, SettingSource::QUERY); + context->applySettingsChanges(settings_changes); + + /// Set the query id supplied by the user, if any, and also update the OpenTelemetry fields. + context->setCurrentQueryId(params->get("query_id", request.get("X-ClickHouse-Query-Id", ""))); + } + + void onException() override + { + // So that the next requests on the connection have to always start afresh in case of exceptions. + request_credentials.reset(); + } + + const Settings & default_settings; + std::unique_ptr params; + std::unique_ptr session; + std::unique_ptr request_credentials; + ContextMutablePtr context; +}; + + +/// Implementation of the remote-write protocol. +class PrometheusRequestHandler::RemoteWriteImpl : public ImplWithContext +{ +public: + using ImplWithContext::ImplWithContext; + + void beforeHandlingRequest(HTTPServerRequest & request) override + { + LOG_INFO(log(), "Handling remote write request from {}", request.get("User-Agent", "")); + chassert(config().type == PrometheusRequestHandlerConfig::Type::RemoteWrite); + } + + void handlingRequestWithContext([[maybe_unused]] HTTPServerRequest & request, [[maybe_unused]] HTTPServerResponse & response) override + { +#if USE_PROMETHEUS_PROTOBUFS + checkHTTPHeader(request, "Content-Type", "application/x-protobuf"); + checkHTTPHeader(request, "Content-Encoding", "snappy"); + + ProtobufZeroCopyInputStreamFromReadBuffer zero_copy_input_stream{ + std::make_unique(wrapReadBufferReference(request.getStream()))}; + + prometheus::WriteRequest write_request; + if (!write_request.ParsePartialFromZeroCopyStream(&zero_copy_input_stream)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot parse WriteRequest"); + + auto table = DatabaseCatalog::instance().getTable(StorageID{config().time_series_table_name}, context); + PrometheusRemoteWriteProtocol protocol{table, context}; + + if (write_request.timeseries_size()) + protocol.writeTimeSeries(write_request.timeseries()); + + if (write_request.metadata_size()) + protocol.writeMetricsMetadata(write_request.metadata()); + + response.setContentType("text/plain; charset=UTF-8"); + response.send(); + +#else + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Prometheus remote write protocol is disabled"); +#endif + } +}; + + PrometheusRequestHandler::PrometheusRequestHandler( IServer & server_, const PrometheusRequestHandlerConfig & config_, @@ -99,6 +260,11 @@ void PrometheusRequestHandler::createImpl() impl = std::make_unique(*this); return; } + case PrometheusRequestHandlerConfig::Type::RemoteWrite: + { + impl = std::make_unique(*this); + return; + } } UNREACHABLE(); } diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index 1d985a23fbe..b4d1e849bdd 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -11,7 +11,7 @@ class IServer; class PrometheusMetricsWriter; class WriteBufferFromHTTPServerResponse; -/// Handles requests for prometheus protocols (expose_metrics). +/// Handles requests for prometheus protocols (expose_metrics, remote_write). class PrometheusRequestHandler : public HTTPRequestHandler { public: @@ -41,7 +41,9 @@ private: const LoggerPtr log; class Impl; + class ImplWithContext; class ExposeMetricsImpl; + class RemoteWriteImpl; std::unique_ptr impl; String http_method; diff --git a/src/Server/PrometheusRequestHandlerConfig.h b/src/Server/PrometheusRequestHandlerConfig.h index bae0a8e1199..d8fd03f19b2 100644 --- a/src/Server/PrometheusRequestHandlerConfig.h +++ b/src/Server/PrometheusRequestHandlerConfig.h @@ -1,5 +1,7 @@ #pragma once +#include + namespace DB { @@ -11,6 +13,9 @@ struct PrometheusRequestHandlerConfig { /// Exposes ClickHouse metrics for scraping by Prometheus. ExposeMetrics, + + /// Handles Prometheus remote-write protocol. + RemoteWrite, }; Type type = Type::ExposeMetrics; @@ -21,6 +26,9 @@ struct PrometheusRequestHandlerConfig bool expose_events = false; bool expose_errors = false; + /// Settings for types RemoteWrite, RemoteRead: + QualifiedTableName time_series_table_name; + size_t keep_alive_timeout = 0; bool is_stacktrace_enabled = true; }; diff --git a/src/Server/PrometheusRequestHandlerFactory.cpp b/src/Server/PrometheusRequestHandlerFactory.cpp index 00b87515fc8..d4b1ab6cd93 100644 --- a/src/Server/PrometheusRequestHandlerFactory.cpp +++ b/src/Server/PrometheusRequestHandlerFactory.cpp @@ -45,18 +45,52 @@ namespace return res; } + /// Extracts a qualified table name from the config. It can be set either as + ///
mydb.prometheus
+ /// or + /// mydb + /// prometheus
+ QualifiedTableName parseTableNameFromConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix) + { + QualifiedTableName res; + res.table = config.getString(config_prefix + ".table", "prometheus"); + res.database = config.getString(config_prefix + ".database", ""); + if (res.database.empty()) + res = QualifiedTableName::parseFromString(res.table); + if (res.database.empty()) + res.database = "default"; + return res; + } + + /// Parses a configuration like this: + /// + /// db.time_series_table_name
+ PrometheusRequestHandlerConfig parseRemoteWriteConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix) + { + PrometheusRequestHandlerConfig res; + res.type = PrometheusRequestHandlerConfig::Type::RemoteWrite; + res.time_series_table_name = parseTableNameFromConfig(config, config_prefix); + parseCommonConfig(config, res); + return res; + } + /// Parses a configuration like this: /// expose_metrics /// true /// true /// true /// true + /// -OR- + /// remote_write + /// db.time_series_table_name
PrometheusRequestHandlerConfig parseHandlerConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix) { String type = config.getString(config_prefix + ".type"); if (type == "expose_metrics") return parseExposeMetricsConfig(config, config_prefix); + else if (type == "remote_write") + return parseRemoteWriteConfig(config, config_prefix); else throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown type {} is specified in the configuration for a prometheus protocol", type); } diff --git a/src/Server/PrometheusRequestHandlerFactory.h b/src/Server/PrometheusRequestHandlerFactory.h index 50961ed0bc0..a7227b12f7e 100644 --- a/src/Server/PrometheusRequestHandlerFactory.h +++ b/src/Server/PrometheusRequestHandlerFactory.h @@ -74,6 +74,13 @@ HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactory( /// true /// /// +/// +/// /write +/// +/// remote_write +/// db.time_series_table_name
+///
+///
/// HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactoryForHTTPRule( IServer & server, diff --git a/src/Storages/StorageTimeSeries.cpp b/src/Storages/StorageTimeSeries.cpp index d85db53d78d..e2ef586c3ca 100644 --- a/src/Storages/StorageTimeSeries.cpp +++ b/src/Storages/StorageTimeSeries.cpp @@ -25,6 +25,7 @@ namespace ErrorCodes extern const int INCORRECT_QUERY; extern const int LOGICAL_ERROR; extern const int NOT_IMPLEMENTED; + extern const int UNEXPECTED_TABLE_ENGINE; } @@ -427,6 +428,29 @@ SinkToStoragePtr StorageTimeSeries::write(const ASTPtr & query, const StorageMet } +std::shared_ptr storagePtrToTimeSeries(StoragePtr storage) +{ + if (auto res = typeid_cast>(storage)) + return res; + + throw Exception( + ErrorCodes::UNEXPECTED_TABLE_ENGINE, + "This operation can be executed on a TimeSeries table only, the engine of table {} is not TimeSeries", + storage->getStorageID().getNameForLogs()); +} + +std::shared_ptr storagePtrToTimeSeries(ConstStoragePtr storage) +{ + if (auto res = typeid_cast>(storage)) + return res; + + throw Exception( + ErrorCodes::UNEXPECTED_TABLE_ENGINE, + "This operation can be executed on a TimeSeries table only, the engine of table {} is not TimeSeries", + storage->getStorageID().getNameForLogs()); +} + + void registerStorageTimeSeries(StorageFactory & factory) { factory.registerStorage("TimeSeries", [](const StorageFactory::Arguments & args) diff --git a/src/Storages/StorageTimeSeries.h b/src/Storages/StorageTimeSeries.h index 9ee09108803..35db3131a0b 100644 --- a/src/Storages/StorageTimeSeries.h +++ b/src/Storages/StorageTimeSeries.h @@ -2,6 +2,7 @@ #include #include +#include #include @@ -104,4 +105,7 @@ private: bool has_inner_tables; }; +std::shared_ptr storagePtrToTimeSeries(StoragePtr storage); +std::shared_ptr storagePtrToTimeSeries(ConstStoragePtr storage); + } diff --git a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp new file mode 100644 index 00000000000..b3845e88406 --- /dev/null +++ b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp @@ -0,0 +1,538 @@ +#include + +#include "config.h" +#if USE_PROMETHEUS_PROTOBUFS + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_TIME_SERIES_TAGS; + extern const int ILLEGAL_COLUMN; +} + + +namespace +{ + /// Checks that a specified set of labels is sorted and has no duplications, and there is one label named "__name__". + void checkLabels(const ::google::protobuf::RepeatedPtrField<::prometheus::Label> & labels) + { + bool metric_name_found = false; + for (size_t i = 0; i != static_cast(labels.size()); ++i) + { + const auto & label = labels[static_cast(i)]; + const auto & label_name = label.name(); + const auto & label_value = label.value(); + + if (label_name.empty()) + throw Exception(ErrorCodes::ILLEGAL_TIME_SERIES_TAGS, "Label name should not be empty"); + if (label_value.empty()) + continue; /// Empty label value is treated like the label doesn't exist. + + if (label_name == TimeSeriesTagNames::MetricName) + metric_name_found = true; + + if (i) + { + /// Check that labels are sorted. + const auto & previous_label_name = labels[static_cast(i - 1)].name(); + if (label_name <= previous_label_name) + { + if (label_name == previous_label_name) + throw Exception(ErrorCodes::ILLEGAL_TIME_SERIES_TAGS, "Found duplicate label {}", label_name); + else + throw Exception(ErrorCodes::ILLEGAL_TIME_SERIES_TAGS, "Label names are not sorted in lexicographical order ({} > {})", + previous_label_name, label_name); + } + } + } + + if (!metric_name_found) + throw Exception(ErrorCodes::ILLEGAL_TIME_SERIES_TAGS, "Metric name (label {}) not found", TimeSeriesTagNames::MetricName); + } + + /// Finds the description of an insertable column in the list. + const ColumnDescription & getInsertableColumnDescription(const ColumnsDescription & columns, const String & column_name, const StorageID & time_series_storage_id) + { + const ColumnDescription * column = columns.tryGet(column_name); + if (!column || ((column->default_desc.kind != ColumnDefaultKind::Default) && (column->default_desc.kind != ColumnDefaultKind::Ephemeral))) + { + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "{}: Column {} {}", + time_series_storage_id.getNameForLogs(), column_name, column ? "non-insertable" : "doesn't exist"); + } + return *column; + } + + /// Calculates the identifier of each time series in "tags_block" using the default expression for the "id" column, + /// and adds column "id" with the results to "tags_block". + IColumn & calculateId(const ContextPtr & context, const ColumnDescription & id_column_description, Block & tags_block) + { + auto blocks = std::make_shared(); + blocks->push_back(tags_block); + + auto header = tags_block.cloneEmpty(); + auto pipe = Pipe(std::make_shared(blocks, header)); + + Block header_with_id; + const auto & id_name = id_column_description.name; + auto id_type = id_column_description.type; + header_with_id.insert(ColumnWithTypeAndName{id_type, id_name}); + + auto adding_missing_defaults_dag = addMissingDefaults( + pipe.getHeader(), + header_with_id.getNamesAndTypesList(), + ColumnsDescription{id_column_description}, + context); + + auto adding_missing_defaults_actions = std::make_shared(adding_missing_defaults_dag); + pipe.addSimpleTransform([&](const Block & stream_header) + { + return std::make_shared(stream_header, adding_missing_defaults_actions); + }); + + auto convert_actions_dag = ActionsDAG::makeConvertingActions( + pipe.getHeader().getColumnsWithTypeAndName(), + header_with_id.getColumnsWithTypeAndName(), + ActionsDAG::MatchColumnsMode::Position); + auto actions = std::make_shared( + convert_actions_dag, + ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)); + pipe.addSimpleTransform([&](const Block & stream_header) + { + return std::make_shared(stream_header, actions); + }); + + QueryPipeline pipeline{std::move(pipe)}; + PullingPipelineExecutor executor{pipeline}; + + MutableColumnPtr id_column; + + Block block_from_executor; + while (executor.pull(block_from_executor)) + { + if (block_from_executor) + { + MutableColumnPtr id_column_part = block_from_executor.getByName(id_name).column->assumeMutable(); + if (id_column) + id_column->insertRangeFrom(*id_column_part, 0, id_column_part->size()); + else + id_column = std::move(id_column_part); + } + } + + if (!id_column) + id_column = id_type->createColumn(); + + IColumn & id_column_ref = *id_column; + tags_block.insert(0, ColumnWithTypeAndName{std::move(id_column), id_type, id_name}); + return id_column_ref; + } + + /// Converts a timestamp in milliseconds to a DateTime64 with a specified scale. + DateTime64 scaleTimestamp(Int64 timestamp_ms, UInt32 scale) + { + if (scale == 3) + return timestamp_ms; + else if (scale > 3) + return timestamp_ms * DecimalUtils::scaleMultiplier(scale - 3); + else + return timestamp_ms / DecimalUtils::scaleMultiplier(3 - scale); + } + + struct BlocksToInsert + { + std::vector> blocks; + }; + + /// Converts time series from the protobuf format to prepared blocks for inserting into target tables. + BlocksToInsert toBlocks(const google::protobuf::RepeatedPtrField & time_series, + const ContextPtr & context, + const StorageID & time_series_storage_id, + const StorageInMemoryMetadata & time_series_storage_metadata, + const TimeSeriesSettings & time_series_settings) + { + size_t num_tags_rows = time_series.size(); + + size_t num_data_rows = 0; + for (const auto & element : time_series) + num_data_rows += element.samples_size(); + + if (!num_data_rows) + return {}; /// Nothing to insert into target tables. + + /// Column types must be extracted from the target tables' metadata. + const auto & columns_description = time_series_storage_metadata.columns; + + auto get_column_description = [&](const String & column_name) -> const ColumnDescription & + { + return getInsertableColumnDescription(columns_description, column_name, time_series_storage_id); + }; + + /// We're going to prepare two blocks - one for the "data" table, and one for the "tags" table. + Block data_block, tags_block; + + auto make_column_for_data_block = [&](const ColumnDescription & column_description) -> IColumn & + { + auto column = column_description.type->createColumn(); + column->reserve(num_data_rows); + auto * column_ptr = column.get(); + data_block.insert(ColumnWithTypeAndName{std::move(column), column_description.type, column_description.name}); + return *column_ptr; + }; + + auto make_column_for_tags_block = [&](const ColumnDescription & column_description) -> IColumn & + { + auto column = column_description.type->createColumn(); + column->reserve(num_tags_rows); + auto * column_ptr = column.get(); + tags_block.insert(ColumnWithTypeAndName{std::move(column), column_description.type, column_description.name}); + return *column_ptr; + }; + + /// Create columns. + + /// Column "id". + const auto & id_description = get_column_description(TimeSeriesColumnNames::ID); + TimeSeriesColumnsValidator validator{time_series_storage_id, time_series_settings}; + validator.validateColumnForID(id_description); + auto & id_column_in_data_table = make_column_for_data_block(id_description); + + /// Column "timestamp". + const auto & timestamp_description = get_column_description(TimeSeriesColumnNames::Timestamp); + UInt32 timestamp_scale; + validator.validateColumnForTimestamp(timestamp_description, timestamp_scale); + auto & timestamp_column = make_column_for_data_block(timestamp_description); + + /// Column "value". + const auto & value_description = get_column_description(TimeSeriesColumnNames::Value); + validator.validateColumnForValue(value_description); + auto & value_column = make_column_for_data_block(value_description); + + /// Column "metric_name". + const auto & metric_name_description = get_column_description(TimeSeriesColumnNames::MetricName); + validator.validateColumnForMetricName(metric_name_description); + auto & metric_name_column = make_column_for_tags_block(metric_name_description); + + /// Columns we should check explicitly that they're filled after filling each row. + std::vector columns_to_fill_in_tags_table; + + /// Columns corresponding to specific tags specified in the "tags_to_columns" setting. + std::unordered_map columns_by_tag_name; + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & tag_name = tuple.at(0).safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + const auto & column_description = get_column_description(column_name); + validator.validateColumnForTagValue(column_description); + auto & column = make_column_for_tags_block(column_description); + columns_by_tag_name[tag_name] = &column; + columns_to_fill_in_tags_table.emplace_back(&column); + } + + /// Column "tags". + const auto & tags_description = get_column_description(TimeSeriesColumnNames::Tags); + validator.validateColumnForTagsMap(tags_description); + auto & tags_column = typeid_cast(make_column_for_tags_block(tags_description)); + IColumn & tags_names = tags_column.getNestedData().getColumn(0); + IColumn & tags_values = tags_column.getNestedData().getColumn(1); + auto & tags_offsets = tags_column.getNestedColumn().getOffsets(); + + /// Column "all_tags". + const auto & all_tags_description = get_column_description(TimeSeriesColumnNames::AllTags); + validator.validateColumnForTagsMap(all_tags_description); + auto & all_tags_column = typeid_cast(make_column_for_tags_block(all_tags_description)); + IColumn & all_tags_names = all_tags_column.getNestedData().getColumn(0); + IColumn & all_tags_values = all_tags_column.getNestedData().getColumn(1); + auto & all_tags_offsets = all_tags_column.getNestedColumn().getOffsets(); + + /// Prepare a block for inserting into the "tags" table. + size_t current_row_in_tags = 0; + for (size_t i = 0; i != static_cast(time_series.size()); ++i) + { + const auto & element = time_series[static_cast(i)]; + if (!element.samples_size()) + continue; + + const auto & labels = element.labels(); + checkLabels(labels); + + for (size_t j = 0; j != static_cast(labels.size()); ++j) + { + const auto & label = labels[static_cast(j)]; + const auto & tag_name = label.name(); + const auto & tag_value = label.value(); + + if (tag_name == TimeSeriesTagNames::MetricName) + { + metric_name_column.insertData(tag_value.data(), tag_value.length()); + } + else + { + all_tags_names.insertData(tag_name.data(), tag_name.length()); + all_tags_values.insertData(tag_value.data(), tag_value.length()); + + auto it = columns_by_tag_name.find(tag_name); + bool has_column_for_tag_value = (it != columns_by_tag_name.end()); + if (has_column_for_tag_value) + { + auto * column = it->second; + column->insertData(tag_value.data(), tag_value.length()); + } + else + { + tags_names.insertData(tag_name.data(), tag_name.length()); + tags_values.insertData(tag_value.data(), tag_value.length()); + } + } + } + + all_tags_offsets.push_back(all_tags_names.size()); + tags_offsets.push_back(tags_names.size()); + + for (auto * column : columns_to_fill_in_tags_table) + { + if (column->size() == current_row_in_tags) + column->insertDefault(); + } + + ++current_row_in_tags; + } + + /// Calculate an identifier for each time series, make a new column from those identifiers, and add it to "tags_block". + auto & id_column_in_tags_table = calculateId(context, columns_description.get(TimeSeriesColumnNames::ID), tags_block); + + /// Prepare a block for inserting to the "data" table. + current_row_in_tags = 0; + for (size_t i = 0; i != static_cast(time_series.size()); ++i) + { + const auto & element = time_series[static_cast(i)]; + if (!element.samples_size()) + continue; + + id_column_in_data_table.insertManyFrom(id_column_in_tags_table, current_row_in_tags, element.samples_size()); + for (const auto & sample : element.samples()) + { + timestamp_column.insert(scaleTimestamp(sample.timestamp(), timestamp_scale)); + value_column.insert(sample.value()); + } + + ++current_row_in_tags; + } + + /// The "all_tags" column in the "tags" table is either ephemeral or doesn't exists. + /// We've used the "all_tags" column to calculate the "id" column already, + /// and now we don't need it to insert to the "tags" table. + tags_block.erase(TimeSeriesColumnNames::AllTags); + + BlocksToInsert res; + + /// A block to the "tags" table should be inserted first. + /// (Because any INSERT can fail and we don't want to have rows in the data table with no corresponding "id" written to the "tags" table.) + res.blocks.emplace_back(ViewTarget::Tags, std::move(tags_block)); + res.blocks.emplace_back(ViewTarget::Data, std::move(data_block)); + + return res; + } + + std::string_view metricTypeToString(prometheus::MetricMetadata::MetricType metric_type) + { + using namespace std::literals; + switch (metric_type) + { + case prometheus::MetricMetadata::UNKNOWN: return "unknown"sv; + case prometheus::MetricMetadata::COUNTER: return "counter"sv; + case prometheus::MetricMetadata::GAUGE: return "gauge"sv; + case prometheus::MetricMetadata::HISTOGRAM: return "histogram"sv; + case prometheus::MetricMetadata::GAUGEHISTOGRAM: return "gaugehistogram"sv; + case prometheus::MetricMetadata::SUMMARY: return "summary"sv; + case prometheus::MetricMetadata::INFO: return "info"sv; + case prometheus::MetricMetadata::STATESET: return "stateset"sv; + default: break; + } + return ""; + } + + /// Converts metrics metadata from the protobuf format to prepared blocks for inserting into target tables. + BlocksToInsert toBlocks(const google::protobuf::RepeatedPtrField & metrics_metadata, + const StorageID & time_series_storage_id, + const StorageInMemoryMetadata & time_series_storage_metadata, + const TimeSeriesSettings & time_series_settings) + { + size_t num_rows = metrics_metadata.size(); + + if (!num_rows) + return {}; /// Nothing to insert into target tables. + + /// Column types must be extracted from the target tables' metadata. + const auto & columns_description = time_series_storage_metadata.columns; + + auto get_column_description = [&](const String & column_name) -> const ColumnDescription & + { + return getInsertableColumnDescription(columns_description, column_name, time_series_storage_id); + }; + + /// We're going to prepare one blocks for the "metrics" table. + Block block; + + auto make_column = [&](const ColumnDescription & column_description) -> IColumn & + { + auto column = column_description.type->createColumn(); + column->reserve(num_rows); + auto * column_ptr = column.get(); + block.insert(ColumnWithTypeAndName{std::move(column), column_description.type, column_description.name}); + return *column_ptr; + }; + + /// Create columns. + + /// Column "metric_family_name". + const auto & metric_family_name_description = get_column_description(TimeSeriesColumnNames::MetricFamilyName); + TimeSeriesColumnsValidator validator{time_series_storage_id, time_series_settings}; + validator.validateColumnForMetricFamilyName(metric_family_name_description); + auto & metric_family_name_column = make_column(metric_family_name_description); + + /// Column "type". + const auto & type_description = get_column_description(TimeSeriesColumnNames::Type); + validator.validateColumnForType(type_description); + auto & type_column = make_column(type_description); + + /// Column "unit". + const auto & unit_description = get_column_description(TimeSeriesColumnNames::Unit); + validator.validateColumnForUnit(unit_description); + auto & unit_column = make_column(unit_description); + + /// Column "help". + const auto & help_description = get_column_description(TimeSeriesColumnNames::Help); + validator.validateColumnForHelp(help_description); + auto & help_column = make_column(help_description); + + /// Fill those columns. + for (const auto & element : metrics_metadata) + { + const auto & metric_family_name = element.metric_family_name(); + const auto & type_str = metricTypeToString(element.type()); + const auto & help = element.help(); + const auto & unit = element.unit(); + + metric_family_name_column.insertData(metric_family_name.data(), metric_family_name.length()); + type_column.insertData(type_str.data(), type_str.length()); + unit_column.insertData(unit.data(), unit.length()); + help_column.insertData(help.data(), help.length()); + } + + /// Prepare a result. + BlocksToInsert res; + res.blocks.emplace_back(ViewTarget::Metrics, std::move(block)); + return res; + } + + /// Inserts blocks to target tables. + void insertToTargetTables(BlocksToInsert && blocks, StorageTimeSeries & time_series_storage, ContextPtr context, Poco::Logger * log) + { + auto time_series_storage_id = time_series_storage.getStorageID(); + + for (auto & [table_kind, block] : blocks.blocks) + { + if (block) + { + const auto & target_table_id = time_series_storage.getTargetTableId(table_kind); + + LOG_INFO(log, "{}: Inserting {} rows to the {} table", + time_series_storage_id.getNameForLogs(), block.rows(), toString(table_kind)); + + auto insert_query = std::make_shared(); + insert_query->table_id = target_table_id; + + auto columns_ast = std::make_shared(); + for (const auto & name : block.getNames()) + columns_ast->children.emplace_back(std::make_shared(name)); + insert_query->columns = columns_ast; + + ContextMutablePtr insert_context = Context::createCopy(context); + insert_context->setCurrentQueryId(context->getCurrentQueryId() + ":" + String{toString(table_kind)}); + + InterpreterInsertQuery interpreter(insert_query, insert_context); + BlockIO io = interpreter.execute(); + PushingPipelineExecutor executor(io.pipeline); + + executor.start(); + executor.push(std::move(block)); + executor.finish(); + } + } + } +} + + +PrometheusRemoteWriteProtocol::PrometheusRemoteWriteProtocol(StoragePtr time_series_storage_, const ContextPtr & context_) + : WithContext(context_) + , time_series_storage(storagePtrToTimeSeries(time_series_storage_)) + , log(getLogger("PrometheusRemoteWriteProtocol")) +{ +} + +PrometheusRemoteWriteProtocol::~PrometheusRemoteWriteProtocol() = default; + + +void PrometheusRemoteWriteProtocol::writeTimeSeries(const google::protobuf::RepeatedPtrField & time_series) +{ + auto time_series_storage_id = time_series_storage->getStorageID(); + + LOG_TRACE(log, "{}: Writing {} time series", + time_series_storage_id.getNameForLogs(), time_series.size()); + + auto time_series_storage_metadata = time_series_storage->getInMemoryMetadataPtr(); + auto time_series_settings = time_series_storage->getStorageSettingsPtr(); + + auto blocks = toBlocks(time_series, getContext(), time_series_storage_id, *time_series_storage_metadata, *time_series_settings); + insertToTargetTables(std::move(blocks), *time_series_storage, getContext(), log.get()); + + LOG_TRACE(log, "{}: {} time series written", + time_series_storage_id.getNameForLogs(), time_series.size()); +} + +void PrometheusRemoteWriteProtocol::writeMetricsMetadata(const google::protobuf::RepeatedPtrField & metrics_metadata) +{ + auto time_series_storage_id = time_series_storage->getStorageID(); + + LOG_TRACE(log, "{}: Writing {} metrics metadata", + time_series_storage_id.getNameForLogs(), metrics_metadata.size()); + + auto time_series_storage_metadata = time_series_storage->getInMemoryMetadataPtr(); + auto time_series_settings = time_series_storage->getStorageSettingsPtr(); + + auto blocks = toBlocks(metrics_metadata, time_series_storage_id, *time_series_storage_metadata, *time_series_settings); + insertToTargetTables(std::move(blocks), *time_series_storage, getContext(), log.get()); + + LOG_TRACE(log, "{}: {} metrics metadata written", + time_series_storage_id.getNameForLogs(), metrics_metadata.size()); +} + +} + +#endif diff --git a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.h b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.h new file mode 100644 index 00000000000..24c65e96cbe --- /dev/null +++ b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.h @@ -0,0 +1,35 @@ +#pragma once + +#include "config.h" +#if USE_PROMETHEUS_PROTOBUFS + +#include +#include +#include + + +namespace DB +{ +class StorageTimeSeries; + +/// Helper class to support the prometheus remote write protocol. +class PrometheusRemoteWriteProtocol : WithContext +{ +public: + PrometheusRemoteWriteProtocol(StoragePtr time_series_storage_, const ContextPtr & context_); + ~PrometheusRemoteWriteProtocol(); + + /// Insert time series received by remote write protocol to our table. + void writeTimeSeries(const google::protobuf::RepeatedPtrField & time_series); + + /// Insert metrics metadata received by remote write protocol to our table. + void writeMetricsMetadata(const google::protobuf::RepeatedPtrField & metrics_metadata); + +private: + std::shared_ptr time_series_storage; + Poco::LoggerPtr log; +}; + +} + +#endif diff --git a/src/Storages/TimeSeries/TimeSeriesTagNames.h b/src/Storages/TimeSeries/TimeSeriesTagNames.h new file mode 100644 index 00000000000..23b005ed414 --- /dev/null +++ b/src/Storages/TimeSeries/TimeSeriesTagNames.h @@ -0,0 +1,13 @@ +#pragma once + + +namespace DB +{ + +/// Label names with special meaning. +struct TimeSeriesTagNames +{ + static constexpr const char * MetricName = "__name__"; +}; + +} From eacbadf560997c22a178e365f61d60868a08c036 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Thu, 30 May 2024 17:23:38 +0200 Subject: [PATCH 1517/2361] Support prometheus remote read protocol. --- ...bufZeroCopyOutputStreamFromWriteBuffer.cpp | 60 +++ ...tobufZeroCopyOutputStreamFromWriteBuffer.h | 40 ++ src/Server/PrometheusRequestHandler.cpp | 68 +++ src/Server/PrometheusRequestHandler.h | 3 +- src/Server/PrometheusRequestHandlerConfig.h | 3 + .../PrometheusRequestHandlerFactory.cpp | 14 + src/Server/PrometheusRequestHandlerFactory.h | 7 + .../PrometheusRemoteReadProtocol.cpp | 444 ++++++++++++++++++ .../TimeSeries/PrometheusRemoteReadProtocol.h | 36 ++ .../TimeSeries/TimeSeriesColumnsValidator.cpp | 26 + .../TimeSeries/TimeSeriesColumnsValidator.h | 4 + 11 files changed, 704 insertions(+), 1 deletion(-) create mode 100644 src/IO/Protobuf/ProtobufZeroCopyOutputStreamFromWriteBuffer.cpp create mode 100644 src/IO/Protobuf/ProtobufZeroCopyOutputStreamFromWriteBuffer.h create mode 100644 src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp create mode 100644 src/Storages/TimeSeries/PrometheusRemoteReadProtocol.h diff --git a/src/IO/Protobuf/ProtobufZeroCopyOutputStreamFromWriteBuffer.cpp b/src/IO/Protobuf/ProtobufZeroCopyOutputStreamFromWriteBuffer.cpp new file mode 100644 index 00000000000..d1e02b436f3 --- /dev/null +++ b/src/IO/Protobuf/ProtobufZeroCopyOutputStreamFromWriteBuffer.cpp @@ -0,0 +1,60 @@ +#include "config.h" + +#if USE_PROTOBUF +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +ProtobufZeroCopyOutputStreamFromWriteBuffer::ProtobufZeroCopyOutputStreamFromWriteBuffer(WriteBuffer & out_) : out(&out_) +{ +} + +ProtobufZeroCopyOutputStreamFromWriteBuffer::ProtobufZeroCopyOutputStreamFromWriteBuffer(std::unique_ptr out_) + : ProtobufZeroCopyOutputStreamFromWriteBuffer(*out_) +{ + out_holder = std::move(out_); +} + +ProtobufZeroCopyOutputStreamFromWriteBuffer::~ProtobufZeroCopyOutputStreamFromWriteBuffer() = default; + +bool ProtobufZeroCopyOutputStreamFromWriteBuffer::Next(void ** data, int * size) +{ + *data = out->position(); + *size = static_cast(out->available()); + out->position() += *size; + return true; +} + +void ProtobufZeroCopyOutputStreamFromWriteBuffer::BackUp(int count) +{ + if (static_cast(out->offset()) < count) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "ProtobufZeroCopyOutputStreamFromWriteBuffer::BackUp() cannot back up {} bytes (max = {} bytes)", + count, + out->offset()); + + out->position() -= count; +} + +int64_t ProtobufZeroCopyOutputStreamFromWriteBuffer::ByteCount() const +{ + return out->count(); +} + +void ProtobufZeroCopyOutputStreamFromWriteBuffer::finalize() +{ + out->finalize(); +} + +} + +#endif diff --git a/src/IO/Protobuf/ProtobufZeroCopyOutputStreamFromWriteBuffer.h b/src/IO/Protobuf/ProtobufZeroCopyOutputStreamFromWriteBuffer.h new file mode 100644 index 00000000000..c47cef9ff4d --- /dev/null +++ b/src/IO/Protobuf/ProtobufZeroCopyOutputStreamFromWriteBuffer.h @@ -0,0 +1,40 @@ +#pragma once + +#include "config.h" +#if USE_PROTOBUF + +#include + + +namespace DB +{ +class WriteBuffer; + +class ProtobufZeroCopyOutputStreamFromWriteBuffer : public google::protobuf::io::ZeroCopyOutputStream +{ +public: + explicit ProtobufZeroCopyOutputStreamFromWriteBuffer(WriteBuffer & out_); + explicit ProtobufZeroCopyOutputStreamFromWriteBuffer(std::unique_ptr out_); + + ~ProtobufZeroCopyOutputStreamFromWriteBuffer() override; + + // Obtains a buffer into which data can be written. + bool Next(void ** data, int * size) override; + + // Backs up a number of bytes, so that the end of the last buffer returned + // by Next() is not actually written. + void BackUp(int count) override; + + // Returns the total number of bytes written since this object was created. + int64_t ByteCount() const override; + + void finalize(); + +private: + WriteBuffer * out; + std::unique_ptr out_holder; +}; + +} + +#endif diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 98d652540d3..275f6f3c04b 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -12,7 +12,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -20,6 +22,7 @@ #include #include #include +#include #include @@ -234,6 +237,66 @@ public: } }; +/// Implementation of the remote-read protocol. +class PrometheusRequestHandler::RemoteReadImpl : public ImplWithContext +{ +public: + using ImplWithContext::ImplWithContext; + + void beforeHandlingRequest(HTTPServerRequest & request) override + { + LOG_INFO(log(), "Handling remote read request from {}", request.get("User-Agent", "")); + chassert(config().type == PrometheusRequestHandlerConfig::Type::RemoteRead); + } + + void handlingRequestWithContext([[maybe_unused]] HTTPServerRequest & request, [[maybe_unused]] HTTPServerResponse & response) override + { +#if USE_PROMETHEUS_PROTOBUFS + checkHTTPHeader(request, "Content-Type", "application/x-protobuf"); + checkHTTPHeader(request, "Content-Encoding", "snappy"); + + auto table = DatabaseCatalog::instance().getTable(StorageID{config().time_series_table_name}, context); + PrometheusRemoteReadProtocol protocol{table, context}; + + ProtobufZeroCopyInputStreamFromReadBuffer zero_copy_input_stream{ + std::make_unique(wrapReadBufferReference(request.getStream()))}; + + prometheus::ReadRequest read_request; + if (!read_request.ParseFromZeroCopyStream(&zero_copy_input_stream)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot parse ReadRequest"); + + prometheus::ReadResponse read_response; + + size_t num_queries = read_request.queries_size(); + for (size_t i = 0; i != num_queries; ++i) + { + const auto & query = read_request.queries(static_cast(i)); + auto & new_query_result = *read_response.add_results(); + protocol.readTimeSeries( + *new_query_result.mutable_timeseries(), + query.start_timestamp_ms(), + query.end_timestamp_ms(), + query.matchers(), + query.hints()); + } + +# if 0 + LOG_DEBUG(log, "ReadResponse = {}", read_response.DebugString()); +# endif + + response.setContentType("application/x-protobuf"); + response.set("Content-Encoding", "snappy"); + + ProtobufZeroCopyOutputStreamFromWriteBuffer zero_copy_output_stream{std::make_unique(getOutputStream(response))}; + read_response.SerializeToZeroCopyStream(&zero_copy_output_stream); + zero_copy_output_stream.finalize(); + +#else + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Prometheus remote read protocol is disabled"); +#endif + } +}; + PrometheusRequestHandler::PrometheusRequestHandler( IServer & server_, @@ -265,6 +328,11 @@ void PrometheusRequestHandler::createImpl() impl = std::make_unique(*this); return; } + case PrometheusRequestHandlerConfig::Type::RemoteRead: + { + impl = std::make_unique(*this); + return; + } } UNREACHABLE(); } diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index b4d1e849bdd..6df718f2a05 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -11,7 +11,7 @@ class IServer; class PrometheusMetricsWriter; class WriteBufferFromHTTPServerResponse; -/// Handles requests for prometheus protocols (expose_metrics, remote_write). +/// Handles requests for prometheus protocols (expose_metrics, remote_write, remote-read). class PrometheusRequestHandler : public HTTPRequestHandler { public: @@ -44,6 +44,7 @@ private: class ImplWithContext; class ExposeMetricsImpl; class RemoteWriteImpl; + class RemoteReadImpl; std::unique_ptr impl; String http_method; diff --git a/src/Server/PrometheusRequestHandlerConfig.h b/src/Server/PrometheusRequestHandlerConfig.h index d8fd03f19b2..d01d28f702c 100644 --- a/src/Server/PrometheusRequestHandlerConfig.h +++ b/src/Server/PrometheusRequestHandlerConfig.h @@ -16,6 +16,9 @@ struct PrometheusRequestHandlerConfig /// Handles Prometheus remote-write protocol. RemoteWrite, + + /// Handles Prometheus remote-read protocol. + RemoteRead, }; Type type = Type::ExposeMetrics; diff --git a/src/Server/PrometheusRequestHandlerFactory.cpp b/src/Server/PrometheusRequestHandlerFactory.cpp index d4b1ab6cd93..52f1d3b64c1 100644 --- a/src/Server/PrometheusRequestHandlerFactory.cpp +++ b/src/Server/PrometheusRequestHandlerFactory.cpp @@ -74,6 +74,18 @@ namespace return res; } + /// Parses a configuration like this: + /// + /// db.time_series_table_name
+ PrometheusRequestHandlerConfig parseRemoteReadConfig(const Poco::Util::AbstractConfiguration & config, const String & config_prefix) + { + PrometheusRequestHandlerConfig res; + res.type = PrometheusRequestHandlerConfig::Type::RemoteRead; + res.time_series_table_name = parseTableNameFromConfig(config, config_prefix); + parseCommonConfig(config, res); + return res; + } + /// Parses a configuration like this: /// expose_metrics /// true @@ -91,6 +103,8 @@ namespace return parseExposeMetricsConfig(config, config_prefix); else if (type == "remote_write") return parseRemoteWriteConfig(config, config_prefix); + else if (type == "remote_read") + return parseRemoteReadConfig(config, config_prefix); else throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown type {} is specified in the configuration for a prometheus protocol", type); } diff --git a/src/Server/PrometheusRequestHandlerFactory.h b/src/Server/PrometheusRequestHandlerFactory.h index a7227b12f7e..c52395ca93f 100644 --- a/src/Server/PrometheusRequestHandlerFactory.h +++ b/src/Server/PrometheusRequestHandlerFactory.h @@ -81,6 +81,13 @@ HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactory( /// db.time_series_table_name
/// /// +/// +/// /read +/// +/// remote_read +/// db.time_series_table_name
+///
+///
/// HTTPRequestHandlerFactoryPtr createPrometheusHandlerFactoryForHTTPRule( IServer & server, diff --git a/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp new file mode 100644 index 00000000000..bb7adb2b0df --- /dev/null +++ b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp @@ -0,0 +1,444 @@ +#include + +#include "config.h" +#if USE_PROMETHEUS_PROTOBUFS + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_REQUEST_PARAMETER; +} + + +namespace +{ + /// Makes an ASTIdentifier for a column of the specified table. + ASTPtr makeASTColumn(const StorageID & table_id, const String & column_name) + { + return std::make_shared(Strings{table_id.database_name, table_id.table_name, column_name}); + } + + /// Makes an AST for condition `data_table.timestamp >= min_timestamp_ms` + ASTPtr makeASTTimestampGreaterOrEquals(Int64 min_timestamp_ms, const StorageID & data_table_id) + { + return makeASTFunction("greaterOrEquals", + makeASTColumn(data_table_id, TimeSeriesColumnNames::Timestamp), + std::make_shared(Field{DecimalField{DateTime64{min_timestamp_ms}, 3}})); + } + + /// Makes an AST for condition `data_table.timestamp <= max_timestamp_ms` + ASTPtr makeASTTimestampLessOrEquals(Int64 max_timestamp_ms, const StorageID & data_table_id) + { + return makeASTFunction("lessOrEquals", + makeASTColumn(data_table_id, TimeSeriesColumnNames::Timestamp), + std::make_shared(Field{DecimalField{DateTime64{max_timestamp_ms}, 3}})); + } + + /// Makes an AST for the expression referencing a tag value. + ASTPtr makeASTLabelName(const String & label_name, const StorageID & tags_table_id, const std::unordered_map & column_name_by_tag_name) + { + if (label_name == TimeSeriesTagNames::MetricName) + return makeASTColumn(tags_table_id, TimeSeriesColumnNames::MetricName); + + auto it = column_name_by_tag_name.find(label_name); + if (it != column_name_by_tag_name.end()) + return makeASTColumn(tags_table_id, it->second); + + /// arrayElement() can be used to extract a value from a Map too. + return makeASTFunction("arrayElement", makeASTColumn(tags_table_id, TimeSeriesColumnNames::Tags), std::make_shared(label_name)); + } + + /// Makes an AST for a label matcher, for example `metric_name == 'value'` or `NOT match(labels['label_name'], 'regexp')`. + ASTPtr makeASTLabelMatcher( + const prometheus::LabelMatcher & label_matcher, + const StorageID & tags_table_id, + const std::unordered_map & column_name_by_tag_name) + { + const auto & label_name = label_matcher.name(); + const auto & label_value = label_matcher.value(); + auto type = label_matcher.type(); + + if (type == prometheus::LabelMatcher::EQ) + return makeASTFunction("equals", makeASTLabelName(label_name, tags_table_id, column_name_by_tag_name), std::make_shared(label_value)); + else if (type == prometheus::LabelMatcher::NEQ) + return makeASTFunction("notEquals", makeASTLabelName(label_name, tags_table_id, column_name_by_tag_name), std::make_shared(label_value)); + else if (type == prometheus::LabelMatcher::RE) + return makeASTFunction("match", makeASTLabelName(label_name, tags_table_id, column_name_by_tag_name), std::make_shared(label_value)); + else if (type == prometheus::LabelMatcher::NRE) + return makeASTFunction("not", makeASTFunction("match", makeASTLabelName(label_name, tags_table_id, column_name_by_tag_name), std::make_shared(label_value))); + else + throw Exception(ErrorCodes::BAD_REQUEST_PARAMETER, "Unexpected type of label matcher: {}", type); + } + + /// Makes an AST checking that tags match a specified label matcher and that timestamp is in range [min_timestamp_ms, max_timestamp_ms]. + ASTPtr makeASTFilterForReadingTimeSeries( + const google::protobuf::RepeatedPtrField & label_matcher, + Int64 min_timestamp_ms, + Int64 max_timestamp_ms, + const StorageID & data_table_id, + const StorageID & tags_table_id, + const std::unordered_map & column_name_by_tag_name) + { + ASTs filters; + + if (min_timestamp_ms) + filters.push_back(makeASTTimestampGreaterOrEquals(min_timestamp_ms, data_table_id)); + + if (max_timestamp_ms) + filters.push_back(makeASTTimestampLessOrEquals(max_timestamp_ms, data_table_id)); + + for (const auto & label_matcher_element : label_matcher) + filters.push_back(makeASTLabelMatcher(label_matcher_element, tags_table_id, column_name_by_tag_name)); + + if (filters.empty()) + return nullptr; + + return makeASTForLogicalAnd(std::move(filters)); + } + + /// Makes a mapping from a tag name to a column name. + std::unordered_map makeColumnNameByTagNameMap(const TimeSeriesSettings & storage_settings) + { + std::unordered_map res; + const Map & tags_to_columns = storage_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & tag_name = tuple.at(0).safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + res[tag_name] = column_name; + } + return res; + } + + /// The function builds a SELECT query for reading time series: + /// SELECT tags_table.metric_name, tags_table.tag_column1, ... tags_table.tag_columnN, tags_table.tags, + /// groupArray(CAST(data_table.timestamp, 'DateTime64(3)'), CAST(data_table.value, 'Float64')) + /// FROM data_table + /// SEMI LEFT JOIN tag_table ON data_table.id = tags_table.id + /// WHERE filter + /// GROUP BY tags_table.tag_column1, ..., tags_table.tag_columnN, tags_table.tags + ASTPtr buildSelectQueryForReadingTimeSeries( + Int64 min_timestamp_ms, + Int64 max_timestamp_ms, + const google::protobuf::RepeatedPtrField & label_matcher, + const TimeSeriesSettings & time_series_settings, + const StorageID & data_table_id, + const StorageID & tags_table_id) + { + auto select_query = std::make_shared(); + + /// SELECT tags_table.metric_name, any(tags_table.tag_column1), ... any(tags_table.tag_columnN), any(tags_table.tags), + /// groupArray(data_table.timestamp, data_table.value) + { + auto exp_list = std::make_shared(); + + exp_list->children.push_back( + makeASTColumn(tags_table_id, TimeSeriesColumnNames::MetricName)); + + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + exp_list->children.push_back( + makeASTColumn(tags_table_id, column_name)); + } + + exp_list->children.push_back( + makeASTColumn(tags_table_id, TimeSeriesColumnNames::Tags)); + + exp_list->children.push_back( + makeASTFunction("groupArray", + makeASTFunction("tuple", + makeASTFunction("CAST", makeASTColumn(data_table_id, TimeSeriesColumnNames::Timestamp), std::make_shared("DateTime64(3)")), + makeASTFunction("CAST", makeASTColumn(data_table_id, TimeSeriesColumnNames::Value), std::make_shared("Float64"))))); + + select_query->setExpression(ASTSelectQuery::Expression::SELECT, exp_list); + } + + /// FROM data_table + auto tables = std::make_shared(); + + { + auto table = std::make_shared(); + auto table_exp = std::make_shared(); + table_exp->database_and_table_name = std::make_shared(data_table_id); + table_exp->children.emplace_back(table_exp->database_and_table_name); + + table->table_expression = table_exp; + tables->children.push_back(table); + } + + /// SEMI LEFT JOIN tags_table ON data_table.id = tags_table.id + { + auto table = std::make_shared(); + + auto table_join = std::make_shared(); + table_join->kind = JoinKind::Left; + table_join->strictness = JoinStrictness::Semi; + + table_join->on_expression = makeASTFunction("equals", makeASTColumn(data_table_id, TimeSeriesColumnNames::ID), makeASTColumn(tags_table_id, TimeSeriesColumnNames::ID)); + table->table_join = table_join; + + auto table_exp = std::make_shared(); + table_exp->database_and_table_name = std::make_shared(tags_table_id); + table_exp->children.emplace_back(table_exp->database_and_table_name); + + table->table_expression = table_exp; + tables->children.push_back(table); + + select_query->setExpression(ASTSelectQuery::Expression::TABLES, tables); + } + + auto column_name_by_tag_name = makeColumnNameByTagNameMap(time_series_settings); + + /// WHERE + if (auto where = makeASTFilterForReadingTimeSeries(label_matcher, min_timestamp_ms, max_timestamp_ms, data_table_id, tags_table_id, column_name_by_tag_name)) + select_query->setExpression(ASTSelectQuery::Expression::WHERE, std::move(where)); + + /// GROUP BY tags_table.metric_name, tags_table.tag_column1, ..., tags_table.tag_columnN, tags_table.tags + { + auto exp_list = std::make_shared(); + + exp_list->children.push_back( + makeASTColumn(tags_table_id, TimeSeriesColumnNames::MetricName)); + + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + exp_list->children.push_back( + makeASTColumn(tags_table_id, column_name)); + } + + exp_list->children.push_back(makeASTColumn(tags_table_id, TimeSeriesColumnNames::Tags)); + + select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, exp_list); + } + + return select_query; + } + + /// Sorts a list of pairs {tag_name, tag_value} by tag name. + void sortLabelsByName(std::vector> & labels) + { + auto less_by_label_name = [](const std::pair & left, const std::pair & right) + { + return left.first < right.first; + }; + std::sort(labels.begin(), labels.end(), less_by_label_name); + } + + /// Sorts a list of pairs {timestamp, value} by timestamp. + void sortTimeSeriesByTimestamp(std::vector> & time_series) + { + auto less_by_timestamp = [](const std::pair & left, const std::pair & right) + { + return left.first < right.first; + }; + std::sort(time_series.begin(), time_series.end(), less_by_timestamp); + } + + /// Converts a block generated by the SELECT query for converting time series to the protobuf format. + void convertBlockToProtobuf( + Block && block, + google::protobuf::RepeatedPtrField & out_time_series, + const StorageID & time_series_storage_id, + const TimeSeriesSettings & time_series_settings) + { + size_t num_rows = block.rows(); + if (!num_rows) + return; + + size_t column_index = 0; + + /// We analyze columns sequentially. + auto get_next_column_with_type = [&] -> const ColumnWithTypeAndName & { return block.getByPosition(column_index++); }; + auto get_next_column = [&] -> const IColumn & { return *(get_next_column_with_type().column); }; + + /// Column "metric_name". + const auto & metric_name_column_with_type = get_next_column_with_type(); + TimeSeriesColumnsValidator validator{time_series_storage_id, time_series_settings}; + validator.validateColumnForMetricName(metric_name_column_with_type); + const auto & metric_name_column = *metric_name_column_with_type.column; + + /// Columns corresponding to specific tags specified in the "tags_to_columns" setting. + std::unordered_map column_by_tag_name; + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & tag_name = tuple.at(0).safeGet(); + const auto & column_with_type = get_next_column_with_type(); + validator.validateColumnForTagValue(column_with_type); + const auto & column = *column_with_type.column; + column_by_tag_name[tag_name] = &column; + } + + /// Column "tags". + const auto & tags_column_with_type = get_next_column_with_type(); + validator.validateColumnForTagsMap(tags_column_with_type); + const auto & tags_column = checkAndGetColumn(*tags_column_with_type.column); + const auto & tags_names = tags_column.getNestedData().getColumn(0); + const auto & tags_values = tags_column.getNestedData().getColumn(1); + const auto & tags_offsets = tags_column.getNestedColumn().getOffsets(); + + /// Column containing time series: groupArray(CAST(data_table.timestamp, 'DateTime64(3)'), CAST(data_table.value, 'Float64')) + const auto & time_series_column = checkAndGetColumn(get_next_column()); + const auto & time_series_timestamps = checkAndGetColumn>(checkAndGetColumn(time_series_column.getData()).getColumn(0)); + const auto & time_series_values = checkAndGetColumn(checkAndGetColumn(time_series_column.getData()).getColumn(1)); + const auto & time_series_offsets = time_series_column.getOffsets(); + + /// We will sort labels lexicographically and time series by timestamp before sending them to a client. + std::vector> labels; + std::vector> time_series; + + for (size_t i = 0; i != num_rows; ++i) + { + /// Collect labels. + size_t num_labels = 1; /* 1 for a metric name */ + + for (const auto & [_, column] : column_by_tag_name) + { + if (!column->isNullAt(i) && !column->getDataAt(i).empty()) + ++num_labels; + } + + size_t tags_start_offset = tags_offsets[i - 1]; + size_t tags_end_offset = tags_offsets[i]; + num_labels += tags_end_offset - tags_start_offset; + + labels.clear(); + labels.reserve(num_labels); + + labels.emplace_back(TimeSeriesTagNames::MetricName, metric_name_column.getDataAt(i)); + + for (const auto & [tag_name, column] : column_by_tag_name) + { + if (!column->isNullAt(i) && !column->getDataAt(i).empty()) + labels.emplace_back(tag_name, column->getDataAt(i)); + } + + for (size_t j = tags_start_offset; j != tags_end_offset; ++j) + { + std::string_view tag_name{tags_names.getDataAt(j)}; + std::string_view tag_value{tags_values.getDataAt(j)}; + labels.emplace_back(tag_name, tag_value); + } + + /// Sort labels. + sortLabelsByName(labels); + + /// Collect time series. + size_t time_series_start_offset = time_series_offsets[i - 1]; + size_t time_series_end_offset = time_series_offsets[i]; + size_t num_time_series = time_series_end_offset - time_series_start_offset; + + time_series.clear(); + time_series.reserve(num_time_series); + + for (size_t j = time_series_start_offset; j != time_series_end_offset; ++j) + time_series.emplace_back(time_series_timestamps.getElement(j), time_series_values.getElement(j)); + + /// Sort time series. + sortTimeSeriesByTimestamp(time_series); + + /// Prepare a result. + auto & new_time_series = *out_time_series.Add(); + + for (const auto & [label_name, label_value] : labels) + { + auto & new_label = *new_time_series.add_labels(); + new_label.set_name(label_name); + new_label.set_value(label_value); + } + + for (const auto & [timestamp, value] : time_series) + { + auto & new_sample = *new_time_series.add_samples(); + new_sample.set_timestamp(timestamp); + new_sample.set_value(value); + } + } + } +} + + +PrometheusRemoteReadProtocol::PrometheusRemoteReadProtocol(ConstStoragePtr time_series_storage_, const ContextPtr & context_) + : WithContext{context_} + , time_series_storage(storagePtrToTimeSeries(time_series_storage_)) + , log(getLogger("PrometheusRemoteReadProtocol")) +{ +} + +PrometheusRemoteReadProtocol::~PrometheusRemoteReadProtocol() = default; + +void PrometheusRemoteReadProtocol::readTimeSeries(google::protobuf::RepeatedPtrField & out_time_series, + Int64 start_timestamp_ms, + Int64 end_timestamp_ms, + const google::protobuf::RepeatedPtrField & label_matcher, + const prometheus::ReadHints &) +{ + out_time_series.Clear(); + + auto time_series_storage_id = time_series_storage->getStorageID(); + auto time_series_settings = time_series_storage->getStorageSettingsPtr(); + auto data_table_id = time_series_storage->getTargetTableId(ViewTarget::Data); + auto tags_table_id = time_series_storage->getTargetTableId(ViewTarget::Tags); + + ASTPtr select_query = buildSelectQueryForReadingTimeSeries( + start_timestamp_ms, end_timestamp_ms, label_matcher, *time_series_settings, data_table_id, tags_table_id); + + LOG_TRACE(log, "{}: Executing query {}", + time_series_storage_id.getNameForLogs(), select_query); + + InterpreterSelectQuery interpreter(select_query, getContext(), SelectQueryOptions{}); + BlockIO io = interpreter.execute(); + PullingPipelineExecutor executor(io.pipeline); + + Block block; + while (executor.pull(block)) + { + LOG_TRACE(log, "{}: Pulled block with {} columns and {} rows", + time_series_storage_id.getNameForLogs(), block.columns(), block.rows()); + + if (block) + convertBlockToProtobuf(std::move(block), out_time_series, time_series_storage_id, *time_series_settings); + } + + LOG_TRACE(log, "{}: {} time series read", + time_series_storage_id.getNameForLogs(), out_time_series.size()); +} + +} + +#endif diff --git a/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.h b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.h new file mode 100644 index 00000000000..e10e1f8c8cf --- /dev/null +++ b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.h @@ -0,0 +1,36 @@ +#pragma once + +#include "config.h" +#if USE_PROMETHEUS_PROTOBUFS + +#include +#include +#include + + +namespace DB +{ +class StorageTimeSeries; + +/// Helper class to support the prometheus remote read protocol. +class PrometheusRemoteReadProtocol : public WithContext +{ +public: + PrometheusRemoteReadProtocol(ConstStoragePtr time_series_storage_, const ContextPtr & context_); + ~PrometheusRemoteReadProtocol(); + + /// Reads time series to send to client by remote read protocol. + void readTimeSeries(google::protobuf::RepeatedPtrField & out_time_series, + Int64 start_timestamp_ms, + Int64 end_timestamp_ms, + const google::protobuf::RepeatedPtrField & label_matcher, + const prometheus::ReadHints & read_hints); + +private: + std::shared_ptr time_series_storage; + Poco::LoggerPtr log; +}; + +} + +#endif diff --git a/src/Storages/TimeSeries/TimeSeriesColumnsValidator.cpp b/src/Storages/TimeSeries/TimeSeriesColumnsValidator.cpp index 0ce5528939a..a2308857e2e 100644 --- a/src/Storages/TimeSeries/TimeSeriesColumnsValidator.cpp +++ b/src/Storages/TimeSeries/TimeSeriesColumnsValidator.cpp @@ -14,6 +14,7 @@ namespace DB namespace ErrorCodes { + extern const int ILLEGAL_COLUMN; extern const int INCOMPATIBLE_COLUMNS; extern const int THERE_IS_NO_COLUMN; } @@ -199,6 +200,11 @@ void TimeSeriesColumnsValidator::validateColumnForMetricName(const ColumnDescrip validateColumnForTagValue(column); } +void TimeSeriesColumnsValidator::validateColumnForMetricName(const ColumnWithTypeAndName & column) const +{ + validateColumnForTagValue(column); +} + void TimeSeriesColumnsValidator::validateColumnForTagValue(const ColumnDescription & column) const { if (!isString(removeLowCardinalityAndNullable(column.type))) @@ -208,6 +214,15 @@ void TimeSeriesColumnsValidator::validateColumnForTagValue(const ColumnDescripti } } +void TimeSeriesColumnsValidator::validateColumnForTagValue(const ColumnWithTypeAndName & column) const +{ + if (!isString(removeLowCardinalityAndNullable(column.type))) + { + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column {} has illegal data type {}, expected String or LowCardinality(String)", + column.name, column.type->getName()); + } +} + void TimeSeriesColumnsValidator::validateColumnForTagsMap(const ColumnDescription & column) const { if (!isMap(column.type) @@ -219,6 +234,17 @@ void TimeSeriesColumnsValidator::validateColumnForTagsMap(const ColumnDescriptio } } +void TimeSeriesColumnsValidator::validateColumnForTagsMap(const ColumnWithTypeAndName & column) const +{ + if (!isMap(column.type) + || !isString(removeLowCardinality(typeid_cast(*column.type).getKeyType())) + || !isString(removeLowCardinality(typeid_cast(*column.type).getValueType()))) + { + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Column {} has illegal data type {}, expected Map(String, String) or Map(LowCardinality(String), String)", + column.name, column.type->getName()); + } +} + void TimeSeriesColumnsValidator::validateColumnForMetricFamilyName(const ColumnDescription & column) const { if (!isString(removeLowCardinalityAndNullable(column.type))) diff --git a/src/Storages/TimeSeries/TimeSeriesColumnsValidator.h b/src/Storages/TimeSeries/TimeSeriesColumnsValidator.h index cafee9da03c..43a54bf2ad6 100644 --- a/src/Storages/TimeSeries/TimeSeriesColumnsValidator.h +++ b/src/Storages/TimeSeries/TimeSeriesColumnsValidator.h @@ -8,6 +8,7 @@ namespace DB { class ColumnsDescription; struct ColumnDescription; +struct ColumnWithTypeAndName; struct TimeSeriesSettings; /// Checks the types of columns of a TimeSeries table. @@ -32,8 +33,11 @@ public: void validateColumnForValue(const ColumnDescription & column) const; void validateColumnForMetricName(const ColumnDescription & column) const; + void validateColumnForMetricName(const ColumnWithTypeAndName & column) const; void validateColumnForTagValue(const ColumnDescription & column) const; + void validateColumnForTagValue(const ColumnWithTypeAndName & column) const; void validateColumnForTagsMap(const ColumnDescription & column) const; + void validateColumnForTagsMap(const ColumnWithTypeAndName & column) const; void validateColumnForMetricFamilyName(const ColumnDescription & column) const; void validateColumnForType(const ColumnDescription & column) const; From a5febd4ea0a5609e3acc00751c5a4d8919eb14e7 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 27 May 2024 21:40:50 +0200 Subject: [PATCH 1518/2361] Add table functions timeSeriesData(), timeSeriesTags(), timeSeriesMetrics() to retrieve the target tables of a TimeSeries table. --- .../TableFunctionTimeSeries.cpp | 128 ++++++++++++++++++ src/TableFunctions/TableFunctionTimeSeries.h | 42 ++++++ src/TableFunctions/registerTableFunctions.cpp | 1 + src/TableFunctions/registerTableFunctions.h | 2 + 4 files changed, 173 insertions(+) create mode 100644 src/TableFunctions/TableFunctionTimeSeries.cpp create mode 100644 src/TableFunctions/TableFunctionTimeSeries.h diff --git a/src/TableFunctions/TableFunctionTimeSeries.cpp b/src/TableFunctions/TableFunctionTimeSeries.cpp new file mode 100644 index 00000000000..62ea088eba0 --- /dev/null +++ b/src/TableFunctions/TableFunctionTimeSeries.cpp @@ -0,0 +1,128 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + + +template +void TableFunctionTimeSeriesTarget::parseArguments(const ASTPtr & ast_function, ContextPtr context) +{ + const auto & args_func = ast_function->as(); + + if (!args_func.arguments) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table function '{}' must have arguments.", name); + + auto & args = args_func.arguments->children; + + if ((args.size() != 1) && (args.size() != 2)) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Table function '{}' requires one or two arguments: {}([database, ] time_series_table)", name, name); + + if (args.size() == 1) + { + /// timeSeriesMetrics( [my_db.]my_time_series_table ) + if (const auto * id = args[0]->as()) + { + if (auto table_id = id->createTable()) + time_series_storage_id = table_id->getTableId(); + } + } + + if (time_series_storage_id.empty()) + { + for (auto & arg : args) + arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context); + + if (args.size() == 1) + { + /// timeSeriesMetrics( 'my_time_series_table' ) + time_series_storage_id.table_name = checkAndGetLiteralArgument(args[0], "table_name"); + } + else + { + /// timeSeriesMetrics( 'mydb', 'my_time_series_table' ) + time_series_storage_id.database_name = checkAndGetLiteralArgument(args[0], "database_name"); + time_series_storage_id.table_name = checkAndGetLiteralArgument(args[1], "table_name"); + } + } + + if (time_series_storage_id.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Couldn't get a table name from the arguments of the {} table function", name); + + time_series_storage_id = context->resolveStorageID(time_series_storage_id); + target_table_type_name = getTargetTable(context)->getName(); +} + + +template +StoragePtr TableFunctionTimeSeriesTarget::getTargetTable(const ContextPtr & context) const +{ + auto time_series_storage = storagePtrToTimeSeries(DatabaseCatalog::instance().getTable(time_series_storage_id, context)); + return time_series_storage->getTargetTable(target_kind, context); +} + + +template +StoragePtr TableFunctionTimeSeriesTarget::executeImpl( + const ASTPtr & /* ast_function */, + ContextPtr context, + const String & /* table_name */, + ColumnsDescription /* cached_columns */, + bool /* is_insert_query */) const +{ + return getTargetTable(context); +} + +template +ColumnsDescription TableFunctionTimeSeriesTarget::getActualTableStructure(ContextPtr context, bool /* is_insert_query */) const +{ + return getTargetTable(context)->getInMemoryMetadataPtr()->columns; +} + +template +const char * TableFunctionTimeSeriesTarget::getStorageTypeName() const +{ + return target_table_type_name.c_str(); +} + + +void registerTableFunctionTimeSeries(TableFunctionFactory & factory) +{ + factory.registerFunction>( + {.documentation = { + .description=R"(Provides direct access to the 'data' target table for a specified TimeSeries table.)", + .examples{{"timeSeriesData", "SELECT * from timeSeriesData('mydb', 'time_series_table');", ""}}, + .categories{"Time Series"}} + }); + factory.registerFunction>( + {.documentation = { + .description=R"(Provides direct access to the 'tags' target table for a specified TimeSeries table.)", + .examples{{"timeSeriesTags", "SELECT * from timeSeriesTags('mydb', 'time_series_table');", ""}}, + .categories{"Time Series"}} + }); + factory.registerFunction>( + {.documentation = { + .description=R"(Provides direct access to the 'metrics' target table for a specified TimeSeries table.)", + .examples{{"timeSeriesMetrics", "SELECT * from timeSeriesMetrics('mydb', 'time_series_table');", ""}}, + .categories{"Time Series"}} + }); +} + +} diff --git a/src/TableFunctions/TableFunctionTimeSeries.h b/src/TableFunctions/TableFunctionTimeSeries.h new file mode 100644 index 00000000000..57654413fe4 --- /dev/null +++ b/src/TableFunctions/TableFunctionTimeSeries.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ + +/// Table functions timeSeriesData('mydb', 'my_ts_table'), timeSeriesTags('mydb', 'my_ts_table'), timeSeriesMetrics('mydb', 'my_ts_table') +/// return the data table, the tags table, and the metrics table respectively associated with any TimeSeries table mydb.my_ts_table +template +class TableFunctionTimeSeriesTarget : public ITableFunction +{ +public: + static constexpr auto name = (target_kind == ViewTarget::Data) + ? "timeSeriesData" + : ((target_kind == ViewTarget::Tags) ? "timeSeriesTags" : "timeSeriesMetrics"); + + String getName() const override { return name; } + +private: + void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; + + StoragePtr executeImpl( + const ASTPtr & ast_function, + ContextPtr context, + const std::string & table_name, + ColumnsDescription cached_columns, + bool is_insert_query) const override; + + ColumnsDescription getActualTableStructure(ContextPtr context, bool is_insert_query) const override; + const char * getStorageTypeName() const override; + + StoragePtr getTargetTable(const ContextPtr & context) const; + + StorageID time_series_storage_id = StorageID::createEmpty(); + String target_table_type_name; +}; + +} diff --git a/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp index ca4913898f9..42987a6a5b9 100644 --- a/src/TableFunctions/registerTableFunctions.cpp +++ b/src/TableFunctions/registerTableFunctions.cpp @@ -56,6 +56,7 @@ void registerTableFunctions() registerTableFunctionFormat(factory); registerTableFunctionExplain(factory); + registerTableFunctionTimeSeries(factory); registerTableFunctionObjectStorage(factory); registerTableFunctionObjectStorageCluster(factory); diff --git a/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h index efde4d6dcdc..1957888d478 100644 --- a/src/TableFunctions/registerTableFunctions.h +++ b/src/TableFunctions/registerTableFunctions.h @@ -67,6 +67,8 @@ void registerTableFunctionObjectStorage(TableFunctionFactory & factory); void registerTableFunctionObjectStorageCluster(TableFunctionFactory & factory); void registerDataLakeTableFunctions(TableFunctionFactory & factory); +void registerTableFunctionTimeSeries(TableFunctionFactory & factory); + void registerTableFunctions(); } From 30231797c7e48549af7d551be05ec798dde79abe Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sun, 23 Jun 2024 16:10:42 +0200 Subject: [PATCH 1519/2361] Add columns `min_time` and `max_time`. --- .../PrometheusRemoteReadProtocol.cpp | 32 ++++++++++++++- .../PrometheusRemoteWriteProtocol.cpp | 41 +++++++++++++++++++ .../TimeSeries/TimeSeriesColumnNames.h | 4 ++ .../TimeSeriesDefinitionNormalizer.cpp | 40 +++++++++++++++++- .../TimeSeriesInnerTablesCreator.cpp | 23 +++++++++++ src/Storages/TimeSeries/TimeSeriesSettings.h | 3 ++ 6 files changed, 140 insertions(+), 3 deletions(-) diff --git a/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp index bb7adb2b0df..d6d258f5ff6 100644 --- a/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp +++ b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp @@ -61,6 +61,22 @@ namespace std::make_shared(Field{DecimalField{DateTime64{max_timestamp_ms}, 3}})); } + /// Makes an AST for condition `tags_table.max_time >= min_timestamp_ms` + ASTPtr makeASTMaxTimeGreaterOrEquals(Int64 min_timestamp_ms, const StorageID & tags_table_id) + { + return makeASTFunction("greaterOrEquals", + makeASTColumn(tags_table_id, TimeSeriesColumnNames::MaxTime), + std::make_shared(Field{DecimalField{DateTime64{min_timestamp_ms}, 3}})); + } + + /// Makes an AST for condition `tags_table.min_time <= max_timestamp_ms` + ASTPtr makeASTMinTimeLessOrEquals(Int64 max_timestamp_ms, const StorageID & tags_table_id) + { + return makeASTFunction("lessOrEquals", + makeASTColumn(tags_table_id, TimeSeriesColumnNames::MinTime), + std::make_shared(Field{DecimalField{DateTime64{max_timestamp_ms}, 3}})); + } + /// Makes an AST for the expression referencing a tag value. ASTPtr makeASTLabelName(const String & label_name, const StorageID & tags_table_id, const std::unordered_map & column_name_by_tag_name) { @@ -104,15 +120,24 @@ namespace Int64 max_timestamp_ms, const StorageID & data_table_id, const StorageID & tags_table_id, - const std::unordered_map & column_name_by_tag_name) + const std::unordered_map & column_name_by_tag_name, + bool filter_by_min_time_and_max_time) { ASTs filters; if (min_timestamp_ms) + { filters.push_back(makeASTTimestampGreaterOrEquals(min_timestamp_ms, data_table_id)); + if (filter_by_min_time_and_max_time) + filters.push_back(makeASTMaxTimeGreaterOrEquals(min_timestamp_ms, tags_table_id)); + } if (max_timestamp_ms) + { filters.push_back(makeASTTimestampLessOrEquals(max_timestamp_ms, data_table_id)); + if (filter_by_min_time_and_max_time) + filters.push_back(makeASTMinTimeLessOrEquals(max_timestamp_ms, tags_table_id)); + } for (const auto & label_matcher_element : label_matcher) filters.push_back(makeASTLabelMatcher(label_matcher_element, tags_table_id, column_name_by_tag_name)); @@ -221,8 +246,11 @@ namespace auto column_name_by_tag_name = makeColumnNameByTagNameMap(time_series_settings); /// WHERE - if (auto where = makeASTFilterForReadingTimeSeries(label_matcher, min_timestamp_ms, max_timestamp_ms, data_table_id, tags_table_id, column_name_by_tag_name)) + if (auto where = makeASTFilterForReadingTimeSeries(label_matcher, min_timestamp_ms, max_timestamp_ms, data_table_id, tags_table_id, + column_name_by_tag_name, time_series_settings.filter_by_min_time_and_max_time)) + { select_query->setExpression(ASTSelectQuery::Expression::WHERE, std::move(where)); + } /// GROUP BY tags_table.metric_name, tags_table.tag_column1, ..., tags_table.tag_columnN, tags_table.tags { diff --git a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp index b3845e88406..ca62a2729dc 100644 --- a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp +++ b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp @@ -162,6 +162,23 @@ namespace return timestamp_ms / DecimalUtils::scaleMultiplier(3 - scale); } + /// Finds min time and max time in a time series. + std::pair findMinTimeAndMaxTime(const google::protobuf::RepeatedPtrField & samples) + { + chassert(!samples.empty()); + Int64 min_time = std::numeric_limits::max(); + Int64 max_time = std::numeric_limits::min(); + for (const auto & sample : samples) + { + Int64 timestamp = sample.timestamp(); + if (timestamp < min_time) + min_time = timestamp; + if (timestamp > max_time) + max_time = timestamp; + } + return {min_time, max_time}; + } + struct BlocksToInsert { std::vector> blocks; @@ -270,6 +287,23 @@ namespace IColumn & all_tags_values = all_tags_column.getNestedData().getColumn(1); auto & all_tags_offsets = all_tags_column.getNestedColumn().getOffsets(); + /// Columns "min_time" and "max_time". + IColumn * min_time_column = nullptr; + IColumn * max_time_column = nullptr; + UInt32 min_time_scale = 0; + UInt32 max_time_scale = 0; + if (time_series_settings.store_min_time_and_max_time) + { + const auto & min_time_description = get_column_description(TimeSeriesColumnNames::MinTime); + const auto & max_time_description = get_column_description(TimeSeriesColumnNames::MaxTime); + validator.validateColumnForTimestamp(min_time_description, min_time_scale); + validator.validateColumnForTimestamp(max_time_description, max_time_scale); + min_time_column = &make_column_for_tags_block(min_time_description); + max_time_column = &make_column_for_tags_block(max_time_description); + columns_to_fill_in_tags_table.emplace_back(min_time_column); + columns_to_fill_in_tags_table.emplace_back(max_time_column); + } + /// Prepare a block for inserting into the "tags" table. size_t current_row_in_tags = 0; for (size_t i = 0; i != static_cast(time_series.size()); ++i) @@ -314,6 +348,13 @@ namespace all_tags_offsets.push_back(all_tags_names.size()); tags_offsets.push_back(tags_names.size()); + if (time_series_settings.store_min_time_and_max_time) + { + auto [min_time, max_time] = findMinTimeAndMaxTime(element.samples()); + min_time_column->insert(scaleTimestamp(min_time, min_time_scale)); + max_time_column->insert(scaleTimestamp(max_time, max_time_scale)); + } + for (auto * column : columns_to_fill_in_tags_table) { if (column->size() == current_row_in_tags) diff --git a/src/Storages/TimeSeries/TimeSeriesColumnNames.h b/src/Storages/TimeSeries/TimeSeriesColumnNames.h index 9176ec5384a..d7b12fdeea8 100644 --- a/src/Storages/TimeSeries/TimeSeriesColumnNames.h +++ b/src/Storages/TimeSeries/TimeSeriesColumnNames.h @@ -24,6 +24,10 @@ struct TimeSeriesColumnNames /// This is a generated column, it's not stored anywhere, it's generated on the fly. static constexpr const char * AllTags = "all_tags"; + /// Contains the time range of a time series. + static constexpr const char * MinTime = "min_time"; + static constexpr const char * MaxTime = "max_time"; + /// The "metrics" table contains general information (metadata) about metrics: static constexpr const char * MetricFamilyName = "metric_family_name"; static constexpr const char * Type = "type"; diff --git a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp index 78f8afe2528..8c2f56d9858 100644 --- a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp +++ b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp @@ -95,6 +95,12 @@ void TimeSeriesDefinitionNormalizer::reorderColumns(ASTCreateQuery & create) con add_column_in_correct_order(TimeSeriesColumnNames::Tags); add_column_in_correct_order(TimeSeriesColumnNames::AllTags); + if (time_series_settings.store_min_time_and_max_time) + { + add_column_in_correct_order(TimeSeriesColumnNames::MinTime); + add_column_in_correct_order(TimeSeriesColumnNames::MaxTime); + } + /// Reorder columns for the "metrics" table. add_column_in_correct_order(TimeSeriesColumnNames::MetricFamilyName); add_column_in_correct_order(TimeSeriesColumnNames::Type); @@ -163,6 +169,14 @@ void TimeSeriesDefinitionNormalizer::addMissingColumns(ASTCreateQuery & create) auto get_string_to_string_map_type = [&] { return makeASTDataType("Map", get_string_type(), get_string_type()); }; auto get_lc_string_to_string_map_type = [&] { return makeASTDataType("Map", get_lc_string_type(), get_string_type()); }; + auto make_nullable = [&](std::shared_ptr type) + { + if (type->name == "Nullable") + return type; + else + return makeASTDataType("Nullable", type); + }; + /// Add missing columns for the "data" table. if (!is_next_column_named(TimeSeriesColumnNames::ID)) make_new_column(TimeSeriesColumnNames::ID, get_uuid_type()); @@ -170,6 +184,9 @@ void TimeSeriesDefinitionNormalizer::addMissingColumns(ASTCreateQuery & create) if (!is_next_column_named(TimeSeriesColumnNames::Timestamp)) make_new_column(TimeSeriesColumnNames::Timestamp, get_datetime_type()); + auto timestamp_column = typeid_cast>(columns[position - 1]); + auto timestamp_type = typeid_cast>(timestamp_column->type->ptr()); + if (!is_next_column_named(TimeSeriesColumnNames::Value)) make_new_column(TimeSeriesColumnNames::Value, get_float_type()); @@ -204,6 +221,15 @@ void TimeSeriesDefinitionNormalizer::addMissingColumns(ASTCreateQuery & create) make_new_column(TimeSeriesColumnNames::AllTags, get_string_to_string_map_type()); } + if (time_series_settings.store_min_time_and_max_time) + { + /// We use Nullable(DateTime64(3)) as the default type of the `min_time` and `max_time` columns. + /// It's nullable because it allows the aggregation (see aggregate_min_time_and_max_time) work correctly even + /// for rows in the "tags" table which doesn't have `min_time` and `max_time` (because they have no matching rows in the "data" table). + make_new_column(TimeSeriesColumnNames::MinTime, make_nullable(timestamp_type)); + make_new_column(TimeSeriesColumnNames::MaxTime, make_nullable(timestamp_type)); + } + /// Add missing columns for the "metrics" table. if (!is_next_column_named(TimeSeriesColumnNames::MetricFamilyName)) make_new_column(TimeSeriesColumnNames::MetricFamilyName, get_string_type()); @@ -374,7 +400,13 @@ void TimeSeriesDefinitionNormalizer::setInnerEngineByDefault(ViewTarget::Kind in case ViewTarget::Tags: { - inner_storage_def.set(inner_storage_def.engine, makeASTFunction("ReplacingMergeTree")); + String engine_name; + if (time_series_settings.aggregate_min_time_and_max_time) + engine_name = "AggregatingMergeTree"; + else + engine_name = "ReplacingMergeTree"; + + inner_storage_def.set(inner_storage_def.engine, makeASTFunction(engine_name)); inner_storage_def.engine->no_empty_args = false; if (!inner_storage_def.order_by && !inner_storage_def.primary_key && inner_storage_def.engine->name.ends_with("MergeTree")) @@ -386,6 +418,12 @@ void TimeSeriesDefinitionNormalizer::setInnerEngineByDefault(ViewTarget::Kind in order_by_list.push_back(std::make_shared(TimeSeriesColumnNames::MetricName)); order_by_list.push_back(std::make_shared(TimeSeriesColumnNames::ID)); + if (time_series_settings.store_min_time_and_max_time && !time_series_settings.aggregate_min_time_and_max_time) + { + order_by_list.push_back(std::make_shared(TimeSeriesColumnNames::MinTime)); + order_by_list.push_back(std::make_shared(TimeSeriesColumnNames::MaxTime)); + } + auto order_by_tuple = std::make_shared(); order_by_tuple->name = "tuple"; auto arguments_list = std::make_shared(); diff --git a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp index e43cba01cdb..5376ec2b124 100644 --- a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp +++ b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp @@ -1,5 +1,8 @@ #include +#include +#include +#include #include #include #include @@ -87,6 +90,26 @@ ColumnsDescription TimeSeriesInnerTablesCreator::getInnerTableColumnsDescription } columns.add(std::move(all_tags_column)); + /// Columns "min_time" and "max_time". + if (time_series_settings.store_min_time_and_max_time) + { + auto min_time_column = time_series_columns.get(TimeSeriesColumnNames::MinTime); + auto max_time_column = time_series_columns.get(TimeSeriesColumnNames::MaxTime); + if (time_series_settings.aggregate_min_time_and_max_time) + { + AggregateFunctionProperties properties; + auto min_function = AggregateFunctionFactory::instance().get("min", NullsAction::EMPTY, {min_time_column.type}, {}, properties); + auto custom_name = std::make_unique(min_function, DataTypes{min_time_column.type}, Array{}); + min_time_column.type = DataTypeFactory::instance().getCustom(std::make_unique(std::move(custom_name))); + + auto max_function = AggregateFunctionFactory::instance().get("max", NullsAction::EMPTY, {max_time_column.type}, {}, properties); + custom_name = std::make_unique(max_function, DataTypes{max_time_column.type}, Array{}); + max_time_column.type = DataTypeFactory::instance().getCustom(std::make_unique(std::move(custom_name))); + } + columns.add(std::move(min_time_column)); + columns.add(std::move(max_time_column)); + } + break; } diff --git a/src/Storages/TimeSeries/TimeSeriesSettings.h b/src/Storages/TimeSeries/TimeSeriesSettings.h index ea31aa8dac1..acbbc7e7806 100644 --- a/src/Storages/TimeSeries/TimeSeriesSettings.h +++ b/src/Storages/TimeSeries/TimeSeriesSettings.h @@ -9,6 +9,9 @@ class ASTStorage; #define LIST_OF_TIME_SERIES_SETTINGS(M, ALIAS) \ M(Map, tags_to_columns, Map{}, "Map specifying which tags should be put to separate columns of the 'tags' table. Syntax: {'tag1': 'column1', 'tag2' : column2, ...}", 0) \ + M(Bool, store_min_time_and_max_time, true, "If set to true then the table will store 'min_time' and 'max_time' for each time series", 0) \ + M(Bool, aggregate_min_time_and_max_time, true, "When creating an inner target 'tags' table, this flag enables using 'SimpleAggregateFunction(min, Nullable(DateTime64(3)))' instead of just 'Nullable(DateTime64(3))' as the type of the 'min_time' column, and the same for the 'max_time' column", 0) \ + M(Bool, filter_by_min_time_and_max_time, true, "If set to true then the table will use the 'min_time' and 'max_time' columns for filtering time series", 0) \ DECLARE_SETTINGS_TRAITS(TimeSeriesSettingsTraits, LIST_OF_TIME_SERIES_SETTINGS) From fae893e294ee26600146d4f67853fb74bb6ba8d1 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sun, 16 Jun 2024 12:35:48 +0200 Subject: [PATCH 1520/2361] Add TimeSeries setting `use_all_tags_column_to_generate_id`. --- .../PrometheusRemoteWriteProtocol.cpp | 29 +++++++++++++------ .../TimeSeriesDefinitionNormalizer.cpp | 17 ++++++++++- .../TimeSeriesInnerTablesCreator.cpp | 19 +++++++----- src/Storages/TimeSeries/TimeSeriesSettings.h | 1 + 4 files changed, 49 insertions(+), 17 deletions(-) diff --git a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp index ca62a2729dc..23d28894acc 100644 --- a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp +++ b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp @@ -280,12 +280,18 @@ namespace auto & tags_offsets = tags_column.getNestedColumn().getOffsets(); /// Column "all_tags". - const auto & all_tags_description = get_column_description(TimeSeriesColumnNames::AllTags); - validator.validateColumnForTagsMap(all_tags_description); - auto & all_tags_column = typeid_cast(make_column_for_tags_block(all_tags_description)); - IColumn & all_tags_names = all_tags_column.getNestedData().getColumn(0); - IColumn & all_tags_values = all_tags_column.getNestedData().getColumn(1); - auto & all_tags_offsets = all_tags_column.getNestedColumn().getOffsets(); + IColumn * all_tags_names = nullptr; + IColumn * all_tags_values = nullptr; + IColumn::Offsets * all_tags_offsets = nullptr; + if (time_series_settings.use_all_tags_column_to_generate_id) + { + const auto & all_tags_description = get_column_description(TimeSeriesColumnNames::AllTags); + validator.validateColumnForTagsMap(all_tags_description); + auto & all_tags_column = typeid_cast(make_column_for_tags_block(all_tags_description)); + all_tags_names = &all_tags_column.getNestedData().getColumn(0); + all_tags_values = &all_tags_column.getNestedData().getColumn(1); + all_tags_offsets = &all_tags_column.getNestedColumn().getOffsets(); + } /// Columns "min_time" and "max_time". IColumn * min_time_column = nullptr; @@ -327,8 +333,11 @@ namespace } else { - all_tags_names.insertData(tag_name.data(), tag_name.length()); - all_tags_values.insertData(tag_value.data(), tag_value.length()); + if (time_series_settings.use_all_tags_column_to_generate_id) + { + all_tags_names->insertData(tag_name.data(), tag_name.length()); + all_tags_values->insertData(tag_value.data(), tag_value.length()); + } auto it = columns_by_tag_name.find(tag_name); bool has_column_for_tag_value = (it != columns_by_tag_name.end()); @@ -345,9 +354,11 @@ namespace } } - all_tags_offsets.push_back(all_tags_names.size()); tags_offsets.push_back(tags_names.size()); + if (time_series_settings.use_all_tags_column_to_generate_id) + all_tags_offsets->push_back(all_tags_names->size()); + if (time_series_settings.store_min_time_and_max_time) { auto [min_time, max_time] = findMinTimeAndMaxTime(element.samples()); diff --git a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp index 8c2f56d9858..49dc2f3d1c1 100644 --- a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp +++ b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp @@ -280,7 +280,22 @@ ASTPtr TimeSeriesDefinitionNormalizer::chooseIDAlgorithm(const ASTColumnDeclarat /// All hash functions below allow multiple arguments, so we use two arguments: metric_name, all_tags. ASTs arguments_for_hash_function; arguments_for_hash_function.push_back(std::make_shared(TimeSeriesColumnNames::MetricName)); - arguments_for_hash_function.push_back(std::make_shared(TimeSeriesColumnNames::AllTags)); + + if (time_series_settings.use_all_tags_column_to_generate_id) + { + arguments_for_hash_function.push_back(std::make_shared(TimeSeriesColumnNames::AllTags)); + } + else + { + const Map & tags_to_columns = time_series_settings.tags_to_columns; + for (const auto & tag_name_and_column_name : tags_to_columns) + { + const auto & tuple = tag_name_and_column_name.safeGet(); + const auto & column_name = tuple.at(1).safeGet(); + arguments_for_hash_function.push_back(std::make_shared(column_name)); + } + arguments_for_hash_function.push_back(std::make_shared(TimeSeriesColumnNames::Tags)); + } auto make_hash_function = [&](const String & function_name) { diff --git a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp index 5376ec2b124..130d49c4c68 100644 --- a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp +++ b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp @@ -3,6 +3,8 @@ #include #include #include +#include +#include #include #include #include @@ -80,15 +82,18 @@ ColumnsDescription TimeSeriesInnerTablesCreator::getInnerTableColumnsDescription columns.add(time_series_columns.get(TimeSeriesColumnNames::Tags)); /// Column "all_tags". - ColumnDescription all_tags_column = time_series_columns.get(TimeSeriesColumnNames::AllTags); - /// Column "all_tags" is here only to calculate the identifier of a time series for the "id" column, so it can be ephemeral. - all_tags_column.default_desc.kind = ColumnDefaultKind::Ephemeral; - if (!all_tags_column.default_desc.expression) + if (time_series_settings.use_all_tags_column_to_generate_id) { - all_tags_column.default_desc.ephemeral_default = true; - all_tags_column.default_desc.expression = makeASTFunction("defaultValueOfTypeName", std::make_shared(all_tags_column.type->getName())); + ColumnDescription all_tags_column = time_series_columns.get(TimeSeriesColumnNames::AllTags); + /// Column "all_tags" is here only to calculate the identifier of a time series for the "id" column, so it can be ephemeral. + all_tags_column.default_desc.kind = ColumnDefaultKind::Ephemeral; + if (!all_tags_column.default_desc.expression) + { + all_tags_column.default_desc.ephemeral_default = true; + all_tags_column.default_desc.expression = makeASTFunction("defaultValueOfTypeName", std::make_shared(all_tags_column.type->getName())); + } + columns.add(std::move(all_tags_column)); } - columns.add(std::move(all_tags_column)); /// Columns "min_time" and "max_time". if (time_series_settings.store_min_time_and_max_time) diff --git a/src/Storages/TimeSeries/TimeSeriesSettings.h b/src/Storages/TimeSeries/TimeSeriesSettings.h index acbbc7e7806..4dc6a436cd0 100644 --- a/src/Storages/TimeSeries/TimeSeriesSettings.h +++ b/src/Storages/TimeSeries/TimeSeriesSettings.h @@ -9,6 +9,7 @@ class ASTStorage; #define LIST_OF_TIME_SERIES_SETTINGS(M, ALIAS) \ M(Map, tags_to_columns, Map{}, "Map specifying which tags should be put to separate columns of the 'tags' table. Syntax: {'tag1': 'column1', 'tag2' : column2, ...}", 0) \ + M(Bool, use_all_tags_column_to_generate_id, true, "When generating an expression to calculate an identifier of a time series, this flag enables using the 'all_tags' column in that calculation. The 'all_tags' is a virtual column containing all tags except the metric name", 0) \ M(Bool, store_min_time_and_max_time, true, "If set to true then the table will store 'min_time' and 'max_time' for each time series", 0) \ M(Bool, aggregate_min_time_and_max_time, true, "When creating an inner target 'tags' table, this flag enables using 'SimpleAggregateFunction(min, Nullable(DateTime64(3)))' instead of just 'Nullable(DateTime64(3))' as the type of the 'min_time' column, and the same for the 'max_time' column", 0) \ M(Bool, filter_by_min_time_and_max_time, true, "If set to true then the table will use the 'min_time' and 'max_time' columns for filtering time series", 0) \ From 6fb7ee3c68886af50488289440813ee7e8873d87 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 12 Jul 2024 01:59:54 +0200 Subject: [PATCH 1521/2361] Add global setting `allow_experimental_time_series_table`. --- src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.cpp | 1 + src/Storages/StorageTimeSeries.cpp | 9 +++++++++ 3 files changed, 11 insertions(+) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 6988a66cf1e..115554d44c5 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -903,6 +903,7 @@ class IColumn; M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \ M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions", 0) \ M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \ + M(Bool, allow_experimental_time_series_table, false, "Allows to use the TimeSeries table engine. Disabled by default, because this feature is experimental", 0) \ M(Bool, allow_experimental_variant_type, false, "Allow Variant data type", 0) \ M(Bool, allow_experimental_dynamic_type, false, "Allow Dynamic data type", 0) \ M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index b6ef654438e..75a80694d43 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -77,6 +77,7 @@ static std::initializer_list +#include #include #include #include @@ -25,6 +26,7 @@ namespace ErrorCodes extern const int INCORRECT_QUERY; extern const int LOGICAL_ERROR; extern const int NOT_IMPLEMENTED; + extern const int SUPPORT_IS_DISABLED; extern const int UNEXPECTED_TABLE_ENGINE; } @@ -124,6 +126,13 @@ StorageTimeSeries::StorageTimeSeries( : IStorage(table_id) , WithContext(local_context->getGlobalContext()) { + if (mode <= LoadingStrictnessLevel::CREATE && !local_context->getSettingsRef().allow_experimental_time_series_table) + { + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "Experimental TimeSeries table engine " + "is not enabled (the setting 'allow_experimental_time_series_table')"); + } + storage_settings = getTimeSeriesSettingsFromQuery(query); if (mode < LoadingStrictnessLevel::ATTACH) From 083fff6ed6ccff44b678ae3ea6af75501d9359fb Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 17 Jun 2024 10:39:10 +0200 Subject: [PATCH 1522/2361] Add documentation. --- docs/en/engines/table-engines/index.md | 1 + .../table-engines/integrations/time-series.md | 295 ++++++++++++++++++ docs/en/interfaces/prometheus.md | 160 ++++++++++ .../settings.md | 42 --- docs/en/operations/settings/settings.md | 11 + .../table-functions/timeSeriesData.md | 28 ++ .../table-functions/timeSeriesMetrics.md | 28 ++ .../table-functions/timeSeriesTags.md | 28 ++ .../aspell-ignore/en/aspell-dict.txt | 6 + 9 files changed, 557 insertions(+), 42 deletions(-) create mode 100644 docs/en/engines/table-engines/integrations/time-series.md create mode 100644 docs/en/interfaces/prometheus.md create mode 100644 docs/en/sql-reference/table-functions/timeSeriesData.md create mode 100644 docs/en/sql-reference/table-functions/timeSeriesMetrics.md create mode 100644 docs/en/sql-reference/table-functions/timeSeriesTags.md diff --git a/docs/en/engines/table-engines/index.md b/docs/en/engines/table-engines/index.md index 5e81eacc937..20c7c511aa9 100644 --- a/docs/en/engines/table-engines/index.md +++ b/docs/en/engines/table-engines/index.md @@ -61,6 +61,7 @@ Engines in the family: - [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md) - [PostgreSQL](../../engines/table-engines/integrations/postgresql.md) - [S3Queue](../../engines/table-engines/integrations/s3queue.md) +- [TimeSeries](../../engines/table-engines/integrations/time-series.md) ### Special Engines {#special-engines} diff --git a/docs/en/engines/table-engines/integrations/time-series.md b/docs/en/engines/table-engines/integrations/time-series.md new file mode 100644 index 00000000000..4830fd61d27 --- /dev/null +++ b/docs/en/engines/table-engines/integrations/time-series.md @@ -0,0 +1,295 @@ +--- +slug: /en/engines/table-engines/special/time_series +sidebar_position: 60 +sidebar_label: TimeSeries +--- + +# TimeSeries Engine [Experimental] + +A table engine storing time series, i.e. a set of values associated with timestamps and tags (or labels): + +``` +metric_name1[tag1=value1, tag2=value2, ...] = {timestamp1: value1, timestamp2: value2, ...} +metric_name2[...] = ... +``` + +:::info +This is an experimental feature that may change in backwards-incompatible ways in the future releases. +Enable usage of the TimeSeries table engine +with [allow_experimental_time_series_table](../../../operations/settings/settings.md#allow-experimental-time-series-table) setting. +Input the command `set allow_experimental_time_series_table = 1`. +::: + +## Syntax {#syntax} + +``` sql +CREATE TABLE name [(columns)] ENGINE=TimeSeries +[SETTINGS var1=value1, ...] +[DATA db.data_table_name | DATA ENGINE data_table_engine(arguments)] +[TAGS db.tags_table_name | TAGS ENGINE tags_table_engine(arguments)] +[METRICS db.metrics_table_name | METRICS ENGINE metrics_table_engine(arguments)] +``` + +## Usage {#usage} + +It's easier to start with everything set by default (it's allowed to create a `TimeSeries` table without specifying a list of columns): + +``` sql +CREATE TABLE my_table ENGINE=TimeSeries +``` + +Then this table can be used with the following protocols (a port must be assigned in the server configuration): +- [prometheus remote-write](../../../interfaces/prometheus.md#remote-write) +- [prometheus remote-read](../../../interfaces/prometheus.md#remote-read) + +## Target tables {#target-tables} + +A `TimeSeries` table doesn't have its own data, everything is stored in its target tables. +This is similar to how a [materialized view](../../../sql-reference/statements/create/view#materialized-view) works, +with the difference that a materialized view has one target table +whereas a `TimeSeries` table has three target tables named [data]{#data-table}, [tags]{#tags-table], and [metrics]{#metrics-table}. + +The target tables can be either specified explicitly in the `CREATE TABLE` query +or the `TimeSeries` table engine can generate inner target tables automatically. + +The target tables are the following: +1. The _data_ table {#data-table} contains time series associated with some identifier. +The _data_ table must have columns: + +| Name | Mandatory? | Default type | Possible types | Description | +|---|---|---|---|---| +| `id` | [x] | `UUID` | any | Identifies a combination of a metric names and tags | +| `timestamp` | [x] | `DateTime64(3)` | `DateTime64(X)` | A time point | +| `value` | [x] | `Float64` | `Float32` or `Float64` | A value associated with the `timestamp` | + +2. The _tags_ table {#tags-table} contains identifiers calculated for each combination of a metric name and tags. +The _tags_ table must have columns: + +| Name | Mandatory? | Default type | Possible types | Description | +|---|---|---|---|---| +| `id` | [x] | `UUID` | any (must match the type of `id` in the [data]{#data-table} table) | An `id` identifies a combination of a metric name and tags. The DEFAULT expression specifies how to calculate such an identifier | +| `metric_name` | [x] | `LowCardinality(String)` | `String` or `LowCardinality(String)` | The name of a metric | +| `` | [ ] | `String` | `String` or `LowCardinality(String)` or `LowCardinality(Nullable(String))` | The value of a specific tag, the tag's name and the name of a corresponding column are specified in the [tags_to_columns](#settings) setting | +| `tags` | [x] | `Map(LowCardinality(String), String)` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Map of tags excluding the tag `__name__` containing the name of a metric and excluding tags with names enumerated in the [tags_to_columns](#settings) setting | +| `all_tags` | [ ] | `Map(String, String)` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Ephemeral column, each row is a map of all the tags excluding only the tag `__name__` containing the name of a metric. The only purpose of that column is to be used while calculating `id` | +| `min_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` or `Nullable(DateTime64(X))` | Minimum timestamp of time series with that `id`. The column is created if [store_min_time_and_max_time](#settings) is `true` | +| `max_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` or `Nullable(DateTime64(X))` | Maximum timestamp of time series with that `id`. The column is created if [store_min_time_and_max_time](#settings) is `true` | + +3. The _metrics_ table {#metrics-table} contains some information about metrics been collected, the types of those metrics and their descriptions. +The _metrics_ table must have columns: + +| Name | Mandatory? | Default type | Possible types | Description | +|---|---|---|---|---| +| `metric_family_name` | [x] | `String` | `String` or `LowCardinality(String)` | The name of a metric family | +| `type` | [x] | `String` | `String` or `LowCardinality(String)` | The type of a metric family, one of "counter", "gauge", "summary", "stateset", "histogram", "gaugehistogram" | +| `unit` | [x] | `String` | `String` or `LowCardinality(String)` | The unit used in a metric | +| `help` | [x] | `String` | `String` or `LowCardinality(String)` | The description of a metric | + +Any row inserted into a `TimeSeries` table will be in fact stored in those three target tables. +A `TimeSeries` table contains all those columns from the [data]{#data-table}, [tags]{#tags-table}, [metrics]{#metrics-table} tables. + +## Creation {#creation} + +There are multiple ways to create a table with the `TimeSeries` table engine. +The simplest statement + +``` sql +CREATE TABLE my_table ENGINE=TimeSeries +``` + +will actually create the following table (you can see that by executing `SHOW CREATE TABLE my_table`): + +``` sql +CREATE TABLE my_table +( + `id` UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)), + `timestamp` DateTime64(3), + `value` Float64, + `metric_name` LowCardinality(String), + `tags` Map(LowCardinality(String), String), + `all_tags` Map(String, String), + `min_time` Nullable(DateTime64(3)), + `max_time` Nullable(DateTime64(3)), + `metric_family_name` String, + `type` String, + `unit` String, + `help` String +) +ENGINE = TimeSeries +DATA ENGINE = MergeTree ORDER BY (id, timestamp) +DATA INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +TAGS ENGINE = AggregatingMergeTree PRIMARY KEY metric_name ORDER BY (metric_name, id) +TAGS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +METRICS ENGINE = ReplacingMergeTree ORDER BY metric_family_name +METRICS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +``` + +So the columns were generated automatically and also there are three inner UUIDs in this statement - +one per each inner target table that was created. +(Inner UUIDs are not shown normally until setting +[show_table_uuid_in_table_create_query_if_not_nil](../../../operations/settings/settings#show_table_uuid_in_table_create_query_if_not_nil) +is set.) + +Inner target tables have names like `.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, +`.inner_id.tags.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, `.inner_id.metrics.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +and each target table has columns which is a subset of the columns of the main `TimeSeries` table: + +``` sql +CREATE TABLE default.`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +( + `id` UUID, + `timestamp` DateTime64(3), + `value` Float64 +) +ENGINE = MergeTree +ORDER BY (id, timestamp) +``` + +``` sql +CREATE TABLE default.`.inner_id.tags.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +( + `id` UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)), + `metric_name` LowCardinality(String), + `tags` Map(LowCardinality(String), String), + `all_tags` Map(String, String) EPHEMERAL, + `min_time` SimpleAggregateFunction(min, Nullable(DateTime64(3))), + `max_time` SimpleAggregateFunction(max, Nullable(DateTime64(3))) +) +ENGINE = AggregatingMergeTree +PRIMARY KEY metric_name +ORDER BY (metric_name, id) +``` + +``` sql +CREATE TABLE default.`.inner_id.metrics.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +( + `metric_family_name` String, + `type` String, + `unit` String, + `help` String +) +ENGINE = ReplacingMergeTree +ORDER BY metric_family_name +``` + +## Adjusting types of columns {#adjusting-column-types} + +You can adjust the types of almost any column of the inner target tables by specifying them explicitly +while defining the main table. For example, + +``` sql +CREATE TABLE my_table +( + timestamp DateTime64(6) +) ENGINE=TimeSeries +``` + +will make the inner [data]{#data-table} table store timestamp in microseconds instead of milliseconds: + +``` sql +CREATE TABLE default.`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +( + `id` UUID, + `timestamp` DateTime64(6), + `value` Float64 +) +ENGINE = MergeTree +ORDER BY (id, timestamp) +``` + +## The `id` column {#id-column} + +The `id` column contains identifiers, every identifier is calculated for a combination of a metric name and tags. +The DEFAULT expression for the `id` column is an expression which will be used to calculate such identifiers. +Both the type of the `id` column and that expression can be adjusted by specifying them explicitly: + +``` sql +CREATE TABLE my_table +( + id UInt64 DEFAULT sipHash64(metric_name, all_tags) +) ENGINE=TimeSeries +``` + +## The `tags` and `all_tags` columns {#tags-and-all-tags} + +There are two columns containing maps of tags - `tags` and `all_tags`. In this example they mean the same, however they can be different +if setting `tags_to_columns` is used. This setting allows to specify that a specific tag should be stored in a separate column instead of storing +in a map inside the `tags` column: + +``` sql +CREATE TABLE my_table ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} +``` + +This statement will add columns +``` + `instance` String, + `job` String +``` +to the definition of both `my_table` and its inner [tags]{#tags-table} target table. In this case the `tags` column will not contain tags `instance` and `job`, +but the `all_tags` column will contain them. The `all_tags` column is ephemeral and its only purpose to be used in the DEFAULT expression +for the `id` column. + +The types of columns can be adjusted by specifying them explicitly: + +``` sql +CREATE TABLE my_table (instance LowCardinality(String), job LowCardinality(Nullable(String))) +ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} +``` + +## Table engines of inner target tables {#inner-table-engines} + +By default inner target tables use the following table engines: +- the [data]{#data-table} table uses [MergeTree](../mergetree-family/mergetree); +- the [tags]{#tags-table} table uses [AggregatingMergeTree](../mergetree-family/aggregatingmergetree) because the same data is often inserted multiple times to this table so we need a way +to remove duplicates, and also because it's required to do aggregation for columns `min_time` and `max_time`; +- the [metrics]{#metrics-table} table uses [ReplacingMergeTree](../mergetree-family/replacingmergetree) because the same data is often inserted multiple times to this table so we need a way +to remove duplicates. + +Other table engines also can be used for inner target tables if it's specified so: + +``` sql +CREATE TABLE my_table ENGINE=TimeSeries +DATA ENGINE=ReplicatedMergeTree +TAGS ENGINE=ReplicatedAggregatingMergeTree +METRICS ENGINE=ReplicatedReplacingMergeTree +``` + +## External target tables {#external-target-tables} + +It's possible to make a `TimeSeries` table use a manually created table: + +``` sql +CREATE TABLE data_for_my_table +( + `id` UUID, + `timestamp` DateTime64(3), + `value` Float64 +) +ENGINE = MergeTree +ORDER BY (id, timestamp); + +CREATE TABLE tags_for_my_table ... + +CREATE TABLE metrics_for_my_table ... + +CREATE TABLE my_table ENGINE=TimeSeries DATA data_for_my_table TAGS tags_for_my_table METRICS metrics_for_my_table; +``` + +## Settings {#settings} + +Here is a list of settings which can be specified while defining a `TimeSeries` table: + +| Name | Type | Default | Description | +|---|---|---|---| +| `tags_to_columns` | Map | {} | Map specifying which tags should be put to separate columns in the [tags]{#tags-table} table. Syntax: `{'tag1': 'column1', 'tag2' : column2, ...}` | +| `use_all_tags_column_to_generate_id` | Bool | true | When generating an expression to calculate an identifier of a time series, this flag enables using the `all_tags` column in that calculation | +| `store_min_time_and_max_time` | Bool | true | If set to true then the table will store `min_time` and `max_time` for each time series | +| `aggregate_min_time_and_max_time` | Bool | true | When creating an inner target `tags` table, this flag enables using `SimpleAggregateFunction(min, Nullable(DateTime64(3)))` instead of just `Nullable(DateTime64(3))` as the type of the `min_time` column, and the same for the `max_time` column | +| `filter_by_min_time_and_max_time` | Bool | true | If set to true then the table will use the `min_time` and `max_time` columns for filtering time series | + +# Functions {#functions} + +Here is a list of functions supporting a `TimeSeries` table as an argument: +- [timeSeriesData](../../../sql-reference/table-functions/timeSeriesData.md) +- [timeSeriesTags](../../../sql-reference/table-functions/timeSeriesTags.md) +- [timeSeriesMetrics](../../../sql-reference/table-functions/timeSeriesMetrics.md) diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md new file mode 100644 index 00000000000..75a68c59219 --- /dev/null +++ b/docs/en/interfaces/prometheus.md @@ -0,0 +1,160 @@ +--- +slug: /en/interfaces/prometheus +sidebar_position: 19 +sidebar_label: Prometheus protocols +--- + +# Prometheus protocols + +## Exposing metrics {#expose} + +:::note +ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com. +::: + +ClickHouse can expose its own metrics for scraping from Prometheus: + +```xml + + 9363 + /metrics + true + true + true + true + + +Section `` can be used to make more extended handlers. +This section is similar to [](http.md) but works for prometheus protocols: + +```xml + + 9363 + + + /metrics + + expose_metrics + true + true + true + true + + + + +``` + +Settings: + +| Name | Default | Description | +|---|---|---|---| +| `port` | none | Port for serving the exposing metrics protocol. | +| `endpoint` | `/metrics` | HTTP endpoint for scraping metrics by prometheus server. Starts with `/`. Should not be used with the `` section. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | +| `metrics` | true | Expose metrics from the [system.metrics](../operations/system-tables/metrics.md) table. | +| `asynchronous_metrics` | true | Expose current metrics values from the [system.asynchronous_metrics](../operations/system-tables/asynchronous_metrics.md) table. | +| `events` | true | Expose metrics from the [system.events](../operations/system-tables/events.md) table. | +| `errors` | true | Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../operations/system-tables/errors.md) as well. | + +Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): +```bash +curl 127.0.0.1:9363/metrics +``` + +## Remote-write protocol {#remote-write} + +ClickHouse supports the [remote-write](https://prometheus.io/docs/specs/remote_write_spec/) protocol. +Data are received by this protocol and written to a [TimeSeries](../engines/table-engines/integrations/time-series.md) table +(which should be created beforehand). + +```xml + + 9363 + + + /write + + remote_writedb_name + time_series_table
+
+
+
+
+``` + +Settings: + +| Name | Default | Description | +|---|---|---|---| +| `port` | none | Port for serving the `remote-write` protocol. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | +| `table` | none | The name of a [TimeSeries](../engines/table-engines/integrations/time-series.md) table to write data received by the `remote-write` protocol. This name can optionally contain the name of a database too. | +| `database` | none | The name of a database where the table specified in the `table` setting is located if it's not specified in the `table` setting. | + +## Remote-read protocol {#remote-read} + +ClickHouse supports the [remote-read](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/) protocol. +Data are read from a [TimeSeries](../engines/table-engines/integrations/time-series.md) table and sent via this protocol. + +```xml + + 9363 + + + /read + + remote_readdb_name + time_series_table
+
+
+
+
+``` + +Settings: + +| Name | Default | Description | +|---|---|---|---| +| `port` | none | Port for serving the `remote-read` protocol. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | +| `table` | none | The name of a [TimeSeries](../engines/table-engines/integrations/time-series.md) table to read data to send by the `remote-read` protocol. This name can optionally contain the name of a database too. | +| `database` | none | The name of a database where the table specified in the `table` setting is located if it's not specified in the `table` setting. | + +## Configuration for multiple protocols {#multiple-protocols} + +Multiple protocols can be specified together in one place: + +```xml + + 9363 + + + /metrics + + expose_metrics + true + true + true + true + + + + /write + + remote_writedb_name.time_series_table + + + + /read + + remote_readdb_name.time_series_table + + + + +``` diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index a1e3c292b04..68f61650e00 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -2112,48 +2112,6 @@ The trailing slash is mandatory. /var/lib/clickhouse/ ``` -## Prometheus {#prometheus} - -:::note -ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com. -::: - -Exposing metrics data for scraping from [Prometheus](https://prometheus.io). - -Settings: - -- `endpoint` – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. -- `port` – Port for `endpoint`. -- `metrics` – Expose metrics from the [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) table. -- `events` – Expose metrics from the [system.events](../../operations/system-tables/events.md#system_tables-events) table. -- `asynchronous_metrics` – Expose current metrics values from the [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) table. -- `errors` - Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../../operations/system-tables/asynchronous_metrics.md#system_tables-errors) as well. - -**Example** - -``` xml - - 0.0.0.0 - 8123 - 9000 - - - /metrics - 9363 - true - true - true - true - - - -``` - -Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): -```bash -curl 127.0.0.1:9363/metrics -``` - ## query_log {#query-log} Setting for logging queries received with the [log_queries=1](../../operations/settings/settings.md) setting. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 143ce836beb..c0103aca5f6 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5614,3 +5614,14 @@ Default value: `1GiB`. Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries. Default value: `false`. + +## allow_experimental_time_series_table {#allow-experimental-time-series-table} + +Allows creation of tables with the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine. + +Possible values: + +- 0 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is disabled. +- 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled. + +Default value: `0`. diff --git a/docs/en/sql-reference/table-functions/timeSeriesData.md b/docs/en/sql-reference/table-functions/timeSeriesData.md new file mode 100644 index 00000000000..aa7a9d30c2a --- /dev/null +++ b/docs/en/sql-reference/table-functions/timeSeriesData.md @@ -0,0 +1,28 @@ +--- +slug: /en/sql-reference/table-functions/timeSeriesData +sidebar_position: 145 +sidebar_label: timeSeriesData +--- + +# timeSeriesData + +`timeSeriesData(db_name.time_series_table)` - Returns the [data](../../engines/table-engines/integrations/time-series.md#data-table) table +used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries DATA data_table +``` + +The function also works if the _data_ table is inner: + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries DATA INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +``` + +The following queries are equivalent: + +``` sql +SELECT * FROM timeSeriesData(db_name.time_series_table); +SELECT * FROM timeSeriesData('db_name.time_series_table'); +SELECT * FROM timeSeriesData('db_name', 'time_series_table'); +``` diff --git a/docs/en/sql-reference/table-functions/timeSeriesMetrics.md b/docs/en/sql-reference/table-functions/timeSeriesMetrics.md new file mode 100644 index 00000000000..913f1185bca --- /dev/null +++ b/docs/en/sql-reference/table-functions/timeSeriesMetrics.md @@ -0,0 +1,28 @@ +--- +slug: /en/sql-reference/table-functions/timeSeriesMetrics +sidebar_position: 145 +sidebar_label: timeSeriesMetrics +--- + +# timeSeriesMetrics + +`timeSeriesMetrics(db_name.time_series_table)` - Returns the [metrics](../../engines/table-engines/integrations/time-series.md#metrics-table) table +used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries METRICS metrics_table +``` + +The function also works if the _metrics_ table is inner: + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries METRICS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +``` + +The following queries are equivalent: + +``` sql +SELECT * FROM timeSeriesMetrics(db_name.time_series_table); +SELECT * FROM timeSeriesMetrics('db_name.time_series_table'); +SELECT * FROM timeSeriesMetrics('db_name', 'time_series_table'); +``` diff --git a/docs/en/sql-reference/table-functions/timeSeriesTags.md b/docs/en/sql-reference/table-functions/timeSeriesTags.md new file mode 100644 index 00000000000..663a7dc6ac8 --- /dev/null +++ b/docs/en/sql-reference/table-functions/timeSeriesTags.md @@ -0,0 +1,28 @@ +--- +slug: /en/sql-reference/table-functions/timeSeriesTags +sidebar_position: 145 +sidebar_label: timeSeriesTags +--- + +# timeSeriesTags + +`timeSeriesTags(db_name.time_series_table)` - Returns the [tags](../../engines/table-engines/integrations/time-series.md#tags-table) table +used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries TAGS tags_table +``` + +The function also works if the _tags_ table is inner: + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries TAGS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +``` + +The following queries are equivalent: + +``` sql +SELECT * FROM timeSeriesTags(db_name.time_series_table); +SELECT * FROM timeSeriesTags('db_name.time_series_table'); +SELECT * FROM timeSeriesTags('db_name', 'time_series_table'); +``` diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index d82b70cfdb4..47d30759174 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1672,6 +1672,7 @@ fuzzQuery fuzzer fuzzers gRPC +gaugehistogram gccMurmurHash gcem generateRandom @@ -2555,6 +2556,7 @@ startsWithUTF startswith statbox stateful +stateset stddev stddevPop stddevPopStable @@ -2686,6 +2688,10 @@ themself threadpool throwIf timeDiff +TimeSeries +timeSeriesData +timeSeriesMetrics +timeSeriesTags timeSlot timeSlots timeZone From bd22140fa4a0c48eb99216889ef4c1a02ba36404 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 11 Mar 2024 23:58:05 +0100 Subject: [PATCH 1523/2361] Add test. --- .../compose/docker_compose_prometheus.yml | 57 ++++++ tests/integration/helpers/cluster.py | 75 ++++++++ .../test_prometheus_protocols/__init__.py | 0 .../allow_experimental_time_series_table.xml | 7 + .../configs/prometheus.xml | 21 +++ .../test_prometheus_protocols/test.py | 168 ++++++++++++++++++ 6 files changed, 328 insertions(+) create mode 100644 tests/integration/compose/docker_compose_prometheus.yml create mode 100644 tests/integration/test_prometheus_protocols/__init__.py create mode 100644 tests/integration/test_prometheus_protocols/configs/allow_experimental_time_series_table.xml create mode 100644 tests/integration/test_prometheus_protocols/configs/prometheus.xml create mode 100644 tests/integration/test_prometheus_protocols/test.py diff --git a/tests/integration/compose/docker_compose_prometheus.yml b/tests/integration/compose/docker_compose_prometheus.yml new file mode 100644 index 00000000000..0a1db2138ba --- /dev/null +++ b/tests/integration/compose/docker_compose_prometheus.yml @@ -0,0 +1,57 @@ +version: '2.3' +services: + prometheus_writer: + image: prom/prometheus:v2.50.1 + hostname: ${PROMETHEUS_WRITER_HOSTNAME:-prometheus_writer} + restart: always + entrypoint: | + /bin/sh -c 'truncate -s 0 /etc/prometheus/prometheus.yml + cat << EOF >> /etc/prometheus/prometheus.yml + global: + scrape_interval: 1s + scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["localhost:${PROMETHEUS_WRITER_PORT}"] + EOF + if [ -n "${PROMETHEUS_REMOTE_WRITE_HANDLER}" ]; then + echo "remote_write:" >> /etc/prometheus/prometheus.yml + echo " - url: \"${PROMETHEUS_REMOTE_WRITE_HANDLER}\"" >> /etc/prometheus/prometheus.yml + fi + #cat "/etc/prometheus/prometheus.yml" + /bin/prometheus --config.file="/etc/prometheus/prometheus.yml" --storage.tsdb.path="/prometheus" --web.console.libraries="/usr/share/prometheus/console_libraries" --web.console.templates="/usr/share/prometheus/consoles" --web.listen-address="0.0.0.0:${PROMETHEUS_WRITER_PORT}" &> /var/log/prometheus/prometheus.log' + expose: + - ${PROMETHEUS_WRITER_PORT} + healthcheck: + test: curl -f "ttps://localhost:${PROMETHEUS_WRITER_PORT}/api/v1/status/runtimeinfo" || exit 1 + interval: 5s + timeout: 3s + retries: 30 + volumes: + - type: ${PROMETHEUS_WRITER_LOGS_FS:-tmpfs} + source: ${PROMETHEUS_WRITER_LOGS:-} + target: /var/log/prometheus + + prometheus_reader: + image: prom/prometheus:v2.50.1 + hostname: ${PROMETHEUS_READER_HOSTNAME:-prometheus_reader} + restart: always + entrypoint: | + /bin/sh -c 'truncate -s 0 /etc/prometheus/prometheus.yml + if [ -n "${PROMETHEUS_REMOTE_READ_HANDLER}" ]; then + echo "remote_read:" >> /etc/prometheus/prometheus.yml + echo " - url: \"${PROMETHEUS_REMOTE_READ_HANDLER}\"" >> /etc/prometheus/prometheus.yml + fi + #cat "/etc/prometheus/prometheus.yml" + /bin/prometheus --config.file="/etc/prometheus/prometheus.yml" --storage.tsdb.path="/prometheus" --web.console.libraries="/usr/share/prometheus/console_libraries" --web.console.templates="/usr/share/prometheus/consoles" --web.listen-address="0.0.0.0:${PROMETHEUS_READER_PORT}" &> /var/log/prometheus/prometheus.log' + expose: + - ${PROMETHEUS_READER_PORT} + healthcheck: + test: curl -f "ttps://localhost:${PROMETHEUS_READER_PORT}/api/v1/status/runtimeinfo" || exit 1 + interval: 5s + timeout: 3s + retries: 30 + volumes: + - type: ${PROMETHEUS_READER_LOGS_FS:-tmpfs} + source: ${PROMETHEUS_READER_LOGS:-} + target: /var/log/prometheus diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 9259c720ff0..a0b545654fe 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -737,6 +737,25 @@ class ClickHouseCluster: self.jdbc_driver_dir = p.abspath(p.join(self.instances_dir, "jdbc_driver")) self.jdbc_driver_logs_dir = os.path.join(self.jdbc_driver_dir, "logs") + # available when with_prometheus == True + self.with_prometheus = False + self.prometheus_writer_host = "prometheus_writer" + self.prometheus_writer_port = 9090 + self.prometheus_writer_logs_dir = p.abspath( + p.join(self.instances_dir, "prometheus_writer/logs") + ) + self.prometheus_reader_host = "prometheus_reader" + self.prometheus_reader_port = 9091 + self.prometheus_reader_logs_dir = p.abspath( + p.join(self.instances_dir, "prometheus_reader/logs") + ) + self.prometheus_remote_write_handler_host = None + self.prometheus_remote_write_handler_port = 9092 + self.prometheus_remote_write_handler_path = "/write" + self.prometheus_remote_read_handler_host = None + self.prometheus_remote_read_handler_port = 9092 + self.prometheus_remote_read_handler_path = "/read" + self.docker_client = None self.is_up = False self.env = os.environ.copy() @@ -1619,6 +1638,42 @@ class ClickHouseCluster: ] return self.base_hive_cmd + def setup_prometheus_cmd(self, instance, env_variables, docker_compose_yml_dir): + env_variables["PROMETHEUS_WRITER_HOST"] = self.prometheus_writer_host + env_variables["PROMETHEUS_WRITER_PORT"] = str(self.prometheus_writer_port) + env_variables["PROMETHEUS_WRITER_LOGS"] = self.prometheus_writer_logs_dir + env_variables["PROMETHEUS_WRITER_LOGS_FS"] = "bind" + env_variables["PROMETHEUS_READER_HOST"] = self.prometheus_reader_host + env_variables["PROMETHEUS_READER_PORT"] = str(self.prometheus_reader_port) + env_variables["PROMETHEUS_READER_LOGS"] = self.prometheus_reader_logs_dir + env_variables["PROMETHEUS_READER_LOGS_FS"] = "bind" + if self.prometheus_remote_write_handler_host: + env_variables["PROMETHEUS_REMOTE_WRITE_HANDLER"] = ( + f"http://{self.prometheus_remote_write_handler_host}:{self.prometheus_remote_write_handler_port}/{self.prometheus_remote_write_handler_path.strip('/')}" + ) + if self.prometheus_remote_read_handler_host: + env_variables["PROMETHEUS_REMOTE_READ_HANDLER"] = ( + f"http://{self.prometheus_remote_read_handler_host}:{self.prometheus_remote_read_handler_port}/{self.prometheus_remote_read_handler_path.strip('/')}" + ) + if not self.with_prometheus: + self.with_prometheus = True + self.base_cmd.extend( + [ + "--file", + p.join(docker_compose_yml_dir, "docker_compose_prometheus.yml"), + ] + ) + self.base_prometheus_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_prometheus.yml"), + ] + return self.base_prometheus_cmd + def add_instance( self, name, @@ -1659,6 +1714,9 @@ class ClickHouseCluster: with_jdbc_bridge=False, with_hive=False, with_coredns=False, + with_prometheus=False, + handle_prometheus_remote_write=False, + handle_prometheus_remote_read=False, use_old_analyzer=None, hostname=None, env_variables=None, @@ -2001,6 +2059,17 @@ class ClickHouseCluster: self.setup_hive(instance, env_variables, docker_compose_yml_dir) ) + if with_prometheus: + if handle_prometheus_remote_write: + self.prometheus_remote_write_handler_host = instance.hostname + if handle_prometheus_remote_read: + self.prometheus_remote_read_handler_host = instance.hostname + cmds.append( + self.setup_prometheus_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) + logging.debug( "Cluster name:{} project_name:{}. Added instance name:{} tag:{} base_cmd:{} docker_compose_yml_dir:{}".format( self.name, @@ -3056,6 +3125,12 @@ class ClickHouseCluster: f"http://{self.jdbc_bridge_ip}:{self.jdbc_bridge_port}/ping" ) + if self.with_prometheus: + os.makedirs(self.prometheus_writer_logs_dir) + os.chmod(self.prometheus_writer_logs_dir, stat.S_IRWXU | stat.S_IRWXO) + os.makedirs(self.prometheus_reader_logs_dir) + os.chmod(self.prometheus_reader_logs_dir, stat.S_IRWXU | stat.S_IRWXO) + clickhouse_start_cmd = self.base_cmd + ["up", "-d", "--no-recreate"] logging.debug( ( diff --git a/tests/integration/test_prometheus_protocols/__init__.py b/tests/integration/test_prometheus_protocols/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_prometheus_protocols/configs/allow_experimental_time_series_table.xml b/tests/integration/test_prometheus_protocols/configs/allow_experimental_time_series_table.xml new file mode 100644 index 00000000000..d71cfcaf2c8 --- /dev/null +++ b/tests/integration/test_prometheus_protocols/configs/allow_experimental_time_series_table.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_prometheus_protocols/configs/prometheus.xml b/tests/integration/test_prometheus_protocols/configs/prometheus.xml new file mode 100644 index 00000000000..071a29620cf --- /dev/null +++ b/tests/integration/test_prometheus_protocols/configs/prometheus.xml @@ -0,0 +1,21 @@ + + + 9092 + + + /write + + remote_write + default.prometheus
+
+
+ + /read + + remote_read + default.prometheus
+
+
+
+
+
diff --git a/tests/integration/test_prometheus_protocols/test.py b/tests/integration/test_prometheus_protocols/test.py new file mode 100644 index 00000000000..488c5369742 --- /dev/null +++ b/tests/integration/test_prometheus_protocols/test.py @@ -0,0 +1,168 @@ +import pytest +import time +import requests +from http import HTTPStatus +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance( + "node", + main_configs=["configs/prometheus.xml"], + user_configs=["configs/allow_experimental_time_series_table.xml"], + with_prometheus=True, + handle_prometheus_remote_write=True, + handle_prometheus_remote_read=True, +) + + +def execute_query_on_prometheus_writer(query, timestamp): + return execute_query_impl( + cluster.get_instance_ip(cluster.prometheus_writer_host), + cluster.prometheus_writer_port, + "/api/v1/query", + query, + timestamp, + ) + + +def execute_query_on_prometheus_reader(query, timestamp): + return execute_query_impl( + cluster.get_instance_ip(cluster.prometheus_reader_host), + cluster.prometheus_reader_port, + "/api/v1/query", + query, + timestamp, + ) + + +def execute_query_impl(host, port, path, query, timestamp): + if not path.startswith("/"): + path += "/" + url = f"http://{host}:{port}/{path.strip('/')}?query={query}&time={timestamp}" + print(f"Requesting {url}") + r = requests.get(url) + print(f"Status code: {r.status_code} {HTTPStatus(r.status_code).phrase}") + if r.status_code != requests.codes.ok: + print(f"Response: {r.text}") + raise Exception(f"Got unexpected status code {r.status_code}") + return r.json() + + +def show_query_result(query): + evaluation_time = time.time() + print(f"Evaluating query: {query}") + print(f"Evaluation time: {evaluation_time}") + result_from_writer = execute_query_on_prometheus_writer(query, evaluation_time) + print(f"Result from prometheus_writer: {result_from_writer}") + result_from_reader = execute_query_on_prometheus_reader(query, evaluation_time) + print(f"Result from prometheus_reader: {result_from_reader}") + + +def compare_query(query): + timeout = 30 + start_time = time.time() + evaluation_time = start_time + print(f"Evaluating query: {query}") + print(f"Evaluation time: {evaluation_time}") + while time.time() < start_time + timeout: + result_from_writer = execute_query_on_prometheus_writer(query, evaluation_time) + result_from_reader = execute_query_on_prometheus_reader(query, evaluation_time) + print(f"Result from prometheus_writer: {result_from_writer}") + print(f"Result from prometheus_reader: {result_from_reader}") + if result_from_writer == result_from_reader: + return + time.sleep(1) + raise Exception( + f"Got different results from prometheus_writer and prometheus_reader" + ) + + +def compare_queries(): + compare_query("up") + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def cleanup_after_test(): + try: + yield + finally: + node.query("DROP TABLE IF EXISTS prometheus SYNC") + node.query("DROP TABLE IF EXISTS original SYNC") + node.query("DROP TABLE IF EXISTS mydata SYNC") + node.query("DROP TABLE IF EXISTS mytable SYNC") + node.query("DROP TABLE IF EXISTS mymetrics SYNC") + + +def test_default(): + node.query("CREATE TABLE prometheus ENGINE=TimeSeries") + compare_queries() + + +def test_tags_to_columns(): + node.query( + "CREATE TABLE prometheus ENGINE=TimeSeries SETTINGS tags_to_columns = {'job': 'job', 'instance': 'instance'}" + ) + compare_queries() + + +def test_64bit_id(): + node.query("CREATE TABLE prometheus (id UInt64) ENGINE=TimeSeries") + compare_queries() + + +def test_custom_id_algorithm(): + node.query( + "CREATE TABLE prometheus (id FixedString(16) DEFAULT murmurHash3_128(metric_name, all_tags)) ENGINE=TimeSeries" + ) + compare_queries() + + +def test_create_as_table(): + node.query("CREATE TABLE original ENGINE=TimeSeries") + node.query("CREATE TABLE prometheus AS original") + compare_queries() + + +def test_inner_engines(): + node.query( + "CREATE TABLE prometheus ENGINE=TimeSeries " + "DATA ENGINE=MergeTree ORDER BY (id, timestamp) " + "TAGS ENGINE=AggregatingMergeTree ORDER BY (metric_name, id) " + "METRICS ENGINE=ReplacingMergeTree ORDER BY metric_family_name" + ) + compare_queries() + + +def test_external_tables(): + node.query( + "CREATE TABLE mydata (id UUID, timestamp DateTime64(3), value Float64) " + "ENGINE=MergeTree ORDER BY (id, timestamp)" + ) + node.query( + "CREATE TABLE mytags (" + "id UUID, " + "metric_name LowCardinality(String), " + "tags Map(LowCardinality(String), String), " + "min_time SimpleAggregateFunction(min, Nullable(DateTime64(3))), " + "max_time SimpleAggregateFunction(max, Nullable(DateTime64(3)))) " + "ENGINE=AggregatingMergeTree ORDER BY (metric_name, id)" + ) + node.query( + "CREATE TABLE mymetrics (metric_family_name String, type LowCardinality(String), unit LowCardinality(String), help String) " + "ENGINE=ReplacingMergeTree ORDER BY metric_family_name" + ) + node.query( + "CREATE TABLE prometheus ENGINE=TimeSeries " + "DATA mydata TAGS mytags METRICS mymetrics" + ) + compare_queries() From e85dfa9e85e383ec7acdb4703b0e00e199fa7d96 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Wed, 31 Jul 2024 19:49:28 +0200 Subject: [PATCH 1524/2361] Fix exception handling in PrometheusRequestHandler. --- src/Server/HTTP/sendExceptionToHTTPClient.cpp | 1 - src/Server/PrometheusRequestHandler.cpp | 68 ++++++++++++------- src/Server/PrometheusRequestHandler.h | 7 +- 3 files changed, 51 insertions(+), 25 deletions(-) diff --git a/src/Server/HTTP/sendExceptionToHTTPClient.cpp b/src/Server/HTTP/sendExceptionToHTTPClient.cpp index 022a763a9a2..321c4866e8f 100644 --- a/src/Server/HTTP/sendExceptionToHTTPClient.cpp +++ b/src/Server/HTTP/sendExceptionToHTTPClient.cpp @@ -43,7 +43,6 @@ void sendExceptionToHTTPClient( out->position() = out->buffer().begin(); out->writeln(exception_message); - out->finalize(); } } diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 275f6f3c04b..ddbccab05df 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -33,6 +33,7 @@ namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int SUPPORT_IS_DISABLED; + extern const int LOGICAL_ERROR; } /// Base implementation of a prometheus protocol. @@ -343,9 +344,10 @@ void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPSe try { + response_finalized = false; write_event = write_event_; http_method = request.getMethod(); - chassert(!write_buffer_from_response); + chassert(!write_buffer_from_response); /// Nothing is written to the response yet. /// Make keep-alive works. if (request.getVersion() == HTTPServerRequest::HTTP_1_1) @@ -356,28 +358,24 @@ void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPSe impl->beforeHandlingRequest(request); impl->handleRequest(request, response); - if (write_buffer_from_response) - { - write_buffer_from_response->finalize(); - write_buffer_from_response = nullptr; - } + finalizeResponse(response); } catch (...) { tryLogCurrentException(log); - tryCallOnException(); ExecutionStatus status = ExecutionStatus::fromCurrentException("", send_stacktrace); trySendExceptionToClient(status.message, status.code, request, response); - tryCallOnException(); + tryFinalizeResponse(response); - /// `write_buffer_from_response` must be finalized already or at least tried to finalize. - write_buffer_from_response = nullptr; + tryCallOnException(); } } -WriteBuffer & PrometheusRequestHandler::getOutputStream(HTTPServerResponse & response) +WriteBufferFromHTTPServerResponse & PrometheusRequestHandler::getOutputStream(HTTPServerResponse & response) { + if (response_finalized) + throw Exception(ErrorCodes::LOGICAL_ERROR, "PrometheusRequestHandler: Response already sent"); if (write_buffer_from_response) return *write_buffer_from_response; write_buffer_from_response = std::make_unique( @@ -385,27 +383,51 @@ WriteBuffer & PrometheusRequestHandler::getOutputStream(HTTPServerResponse & res return *write_buffer_from_response; } +void PrometheusRequestHandler::finalizeResponse(HTTPServerResponse & response) +{ + if (response_finalized) + { + /// Response is already finalized or at least tried to. We don't need the write buffer anymore in either case. + write_buffer_from_response = nullptr; + } + else + { + /// We set `response_finalized = true` before actually calling `write_buffer_from_response->finalize()` + /// because we shouldn't call finalize() again even if finalize() throws an exception. + response_finalized = true; + + if (write_buffer_from_response) + std::exchange(write_buffer_from_response, {})->finalize(); + else + WriteBufferFromHTTPServerResponse{response, http_method == HTTPRequest::HTTP_HEAD, config.keep_alive_timeout, write_event}.finalize(); + } + chassert(response_finalized && !write_buffer_from_response); +} + void PrometheusRequestHandler::trySendExceptionToClient(const String & exception_message, int exception_code, HTTPServerRequest & request, HTTPServerResponse & response) { + if (response_finalized) + return; /// Response is already finalized (or tried to). We can't write the error message to the response in either case. + try { - sendExceptionToHTTPClient(exception_message, exception_code, request, response, write_buffer_from_response.get(), log); + sendExceptionToHTTPClient(exception_message, exception_code, request, response, &getOutputStream(response), log); } catch (...) { tryLogCurrentException(log, "Couldn't send exception to client"); + } +} - if (write_buffer_from_response) - { - try - { - write_buffer_from_response->finalize(); - } - catch (...) - { - tryLogCurrentException(log, "Cannot flush data to client (after sending exception)"); - } - } +void PrometheusRequestHandler::tryFinalizeResponse(HTTPServerResponse & response) +{ + try + { + finalizeResponse(response); + } + catch (...) + { + tryLogCurrentException(log, "Cannot flush data to client (after sending exception)"); } } diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index 6df718f2a05..3a2dac0b7ad 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -26,7 +26,11 @@ private: void createImpl(); /// Returns the write buffer used for the current HTTP response. - WriteBuffer & getOutputStream(HTTPServerResponse & response); + WriteBufferFromHTTPServerResponse & getOutputStream(HTTPServerResponse & response); + + /// Finalizes the output stream and sends the response to the client. + void finalizeResponse(HTTPServerResponse & response); + void tryFinalizeResponse(HTTPServerResponse & response); /// Writes the current exception to the response. void trySendExceptionToClient(const String & exception_message, int exception_code, HTTPServerRequest & request, HTTPServerResponse & response); @@ -50,6 +54,7 @@ private: String http_method; bool send_stacktrace = false; std::unique_ptr write_buffer_from_response; + bool response_finalized = false; ProfileEvents::Event write_event; }; From 4ef382360d96fde13a787b0ebc43aefa44c822ab Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 2 Aug 2024 16:35:11 +0200 Subject: [PATCH 1525/2361] Fix compilation. --- src/Parsers/ASTFunction.h | 16 ---------------- .../TimeSeries/PrometheusRemoteWriteProtocol.cpp | 14 +++++++++++--- .../TimeSeriesDefinitionNormalizer.cpp | 5 +++-- .../TimeSeries/TimeSeriesInnerTablesCreator.cpp | 4 ++-- .../TimeSeries/TimeSeriesInnerTablesCreator.h | 4 ++-- 5 files changed, 18 insertions(+), 25 deletions(-) diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index 5a14d66f31c..1b4a5928d1c 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -116,20 +116,4 @@ inline bool tryGetFunctionNameInto(const ASTPtr & ast, String & name) { return t /// Checks if function is a lambda function definition `lambda((x, y), x + y)` bool isASTLambdaFunction(const ASTFunction & function); -/// Makes an ASTFunction to represent a data type. -template -std::shared_ptr makeASTDataType(const String & type_name, Args &&... args) -{ - auto function = std::make_shared(); - function->name = type_name; - function->no_empty_args = true; - if (sizeof...(args)) - { - function->arguments = std::make_shared(); - function->children.push_back(function->arguments); - function->arguments->children = { std::forward(args)... }; - } - return function; -} - } diff --git a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp index 23d28894acc..1e8164152a5 100644 --- a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp +++ b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -107,7 +108,7 @@ namespace ColumnsDescription{id_column_description}, context); - auto adding_missing_defaults_actions = std::make_shared(adding_missing_defaults_dag); + auto adding_missing_defaults_actions = std::make_shared(std::move(adding_missing_defaults_dag)); pipe.addSimpleTransform([&](const Block & stream_header) { return std::make_shared(stream_header, adding_missing_defaults_actions); @@ -118,7 +119,7 @@ namespace header_with_id.getColumnsWithTypeAndName(), ActionsDAG::MatchColumnsMode::Position); auto actions = std::make_shared( - convert_actions_dag, + std::move(convert_actions_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes)); pipe.addSimpleTransform([&](const Block & stream_header) { @@ -528,7 +529,14 @@ namespace ContextMutablePtr insert_context = Context::createCopy(context); insert_context->setCurrentQueryId(context->getCurrentQueryId() + ":" + String{toString(table_kind)}); - InterpreterInsertQuery interpreter(insert_query, insert_context); + InterpreterInsertQuery interpreter( + insert_query, + insert_context, + /* allow_materialized= */ false, + /* no_squash= */ false, + /* no_destination= */ false, + /* async_insert= */ false); + BlockIO io = interpreter.execute(); PushingPipelineExecutor executor(io.pipeline); diff --git a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp index 49dc2f3d1c1..f9e7290e514 100644 --- a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp +++ b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -169,7 +170,7 @@ void TimeSeriesDefinitionNormalizer::addMissingColumns(ASTCreateQuery & create) auto get_string_to_string_map_type = [&] { return makeASTDataType("Map", get_string_type(), get_string_type()); }; auto get_lc_string_to_string_map_type = [&] { return makeASTDataType("Map", get_lc_string_type(), get_string_type()); }; - auto make_nullable = [&](std::shared_ptr type) + auto make_nullable = [&](std::shared_ptr type) { if (type->name == "Nullable") return type; @@ -185,7 +186,7 @@ void TimeSeriesDefinitionNormalizer::addMissingColumns(ASTCreateQuery & create) make_new_column(TimeSeriesColumnNames::Timestamp, get_datetime_type()); auto timestamp_column = typeid_cast>(columns[position - 1]); - auto timestamp_type = typeid_cast>(timestamp_column->type->ptr()); + auto timestamp_type = typeid_cast>(timestamp_column->type->ptr()); if (!is_next_column_named(TimeSeriesColumnNames::Value)) make_new_column(TimeSeriesColumnNames::Value, get_float_type()); diff --git a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp index 130d49c4c68..5f616982a6f 100644 --- a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp +++ b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.cpp @@ -150,7 +150,7 @@ StorageID TimeSeriesInnerTablesCreator::getInnerTableID(ViewTarget::Kind inner_t std::shared_ptr TimeSeriesInnerTablesCreator::getInnerTableCreateQuery( ViewTarget::Kind inner_table_kind, const UUID & inner_table_uuid, - const std::shared_ptr inner_storage_def) const + const std::shared_ptr & inner_storage_def) const { auto manual_create_query = std::make_shared(); @@ -173,7 +173,7 @@ std::shared_ptr TimeSeriesInnerTablesCreator::getInnerTableCreat StorageID TimeSeriesInnerTablesCreator::createInnerTable( ViewTarget::Kind inner_table_kind, const UUID & inner_table_uuid, - const std::shared_ptr inner_storage_def) const + const std::shared_ptr & inner_storage_def) const { /// We will make a query to create the inner target table. auto create_context = Context::createCopy(getContext()); diff --git a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.h b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.h index a59bd2107bb..5778dd77398 100644 --- a/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.h +++ b/src/Storages/TimeSeries/TimeSeriesInnerTablesCreator.h @@ -31,12 +31,12 @@ public: /// Generates a CREATE TABLE query for an inner table. std::shared_ptr getInnerTableCreateQuery(ViewTarget::Kind inner_table_kind, const UUID & inner_table_uuid, - const std::shared_ptr inner_storage_def) const; + const std::shared_ptr & inner_storage_def) const; /// Creates an inner table. StorageID createInnerTable(ViewTarget::Kind inner_table_kind, const UUID & inner_table_uuid, - const std::shared_ptr inner_storage_def) const; + const std::shared_ptr & inner_storage_def) const; private: const StorageID time_series_storage_id; From a749223251b7e580f5d7bbcb4fc59aa6b5fffbe2 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Sat, 3 Aug 2024 22:43:12 +0800 Subject: [PATCH 1526/2361] change as request --- src/Functions/printf.cpp | 39 ++++++++++++++----- .../0_stateless/03203_function_printf.sql | 7 +++- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/src/Functions/printf.cpp b/src/Functions/printf.cpp index 3efe854a53b..3cf3efaf534 100644 --- a/src/Functions/printf.cpp +++ b/src/Functions/printf.cpp @@ -6,11 +6,10 @@ #include #include #include +#include #include #include -#include -#include #include #include #include @@ -22,6 +21,7 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int BAD_ARGUMENTS; } namespace @@ -52,10 +52,9 @@ private: [[maybe_unused]] String toString() const { - std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM - oss << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() - << std::endl; - return oss.str(); + WriteBufferFromOwnString buf; + buf << "format:" << format << ", rows:" << rows << ", is_literal:" << is_literal << ", input:" << input.dumpStructure() << "\n"; + return buf.str(); } private: @@ -229,9 +228,31 @@ public: ColumnsWithTypeAndName concat_args(instructions.size()); for (size_t i = 0; i < instructions.size(); ++i) { - // std::cout << "instruction[" << i << "]:" << instructions[i].toString() << std::endl; - concat_args[i] = instructions[i].execute(); - // std::cout << "concat_args[" << i << "]:" << concat_args[i].dumpStructure() << std::endl; + const auto & instruction = instructions[i]; + try + { + // std::cout << "instruction[" << i << "]:" << instructions[i].toString() << std::endl; + concat_args[i] = instruction.execute(); + // std::cout << "concat_args[" << i << "]:" << concat_args[i].dumpStructure() << std::endl; + } + catch (const fmt::v9::format_error & e) + { + if (instruction.is_literal) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Bad format {} in function {} without input argument, reason: {}", + instruction.format, + getName(), + e.what()); + else + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Bad format {} in function {} with {} as input argument, reason: {}", + instructions[i].format, + getName(), + instruction.input.dumpStructure(), + e.what()); + } } auto res = function_concat->build(concat_args)->execute(concat_args, std::make_shared(), input_rows_count); diff --git a/tests/queries/0_stateless/03203_function_printf.sql b/tests/queries/0_stateless/03203_function_printf.sql index c41cbf0b5e9..6ff4699c8a7 100644 --- a/tests/queries/0_stateless/03203_function_printf.sql +++ b/tests/queries/0_stateless/03203_function_printf.sql @@ -31,4 +31,9 @@ select printf('%%.2e: %.2e', 123.456) = '%.2e: 1.23e+02'; select printf('%%.2g: %.2g', 123.456) = '%.2g: 1.2e+02'; -- Testing character formats with precision -select printf('%%.2s: %.2s', 'abc') = '%.2s: ab'; \ No newline at end of file +select printf('%%.2s: %.2s', 'abc') = '%.2s: ab'; + +select printf('%%X: %X', 123.123); -- { serverError BAD_ARGUMENTS } +select printf('%%A: %A', 'abc'); -- { serverError BAD_ARGUMENTS } +select printf('%%s: %s', 100); -- { serverError BAD_ARGUMENTS } +select printf('%%n: %n', 100); -- { serverError BAD_ARGUMENTS } From abb747498d04fd6d6cac71d6097741b41bf572e3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 16:54:44 +0200 Subject: [PATCH 1527/2361] Fix test `02789_reading_from_s3_with_connection_pool` --- ...89_reading_from_s3_with_connection_pool.sh | 84 +++++++++++-------- 1 file changed, 50 insertions(+), 34 deletions(-) diff --git a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh index 751b2798243..39399842db1 100755 --- a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh +++ b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh @@ -14,43 +14,59 @@ SETTINGS disk = 's3_disk', min_bytes_for_wide_part = 0; INSERT INTO test_s3 SELECT number, number FROM numbers_mt(1e7); " -query="SELECT a, b FROM test_s3" -query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) -${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" -${CLICKHOUSE_CLIENT} -nm --query " -SELECT ProfileEvents['DiskConnectionsPreserved'] > 0 -FROM system.query_log -WHERE type = 'QueryFinish' - AND current_database = currentDatabase() - AND query_id='$query_id'; -" + +# This (reusing connections from the pool) is not guaranteed to always happen, +# (due to random time difference between the queries and random activity in parallel) +# but should happen most of the time. + +while true +do + query="SELECT a, b FROM test_s3" + query_id=$(${CLICKHOUSE_CLIENT} --query "select queryID() from ($query) limit 1" 2>&1) + ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" + + RES=$(${CLICKHOUSE_CLIENT} -nm --query " + SELECT ProfileEvents['DiskConnectionsPreserved'] > 0 + FROM system.query_log + WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query_id='$query_id'; + ") + + [[ $RES -eq 1 ]] && echo "$RES" && break; +done # Test connection pool in ReadWriteBufferFromHTTP -query_id=$(${CLICKHOUSE_CLIENT} -nq " -create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n; -set insert_keeper_fault_injection_probability=0; -insert into mut values (1, 2, 3), (10, 20, 30); +while true +do + query_id=$(${CLICKHOUSE_CLIENT} -nq " + create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n; + set insert_keeper_fault_injection_probability=0; + insert into mut values (1, 2, 3), (10, 20, 30); -system stop merges mut; -alter table mut delete where n = 10; + system stop merges mut; + alter table mut delete where n = 10; -select queryID() from( - -- a funny way to wait for a MUTATE_PART to be assigned - select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( - 'select 1 where ''MUTATE_PART'' not in (select type from system.replication_queue where database=''' || currentDatabase() || ''' and table=''mut'')' - ), 'LineAsString', 's String') - -- queryID() will be returned for each row, since the query above doesn't return anything we need to return a fake row - union all - select 1 -) limit 1 settings max_threads=1; -" 2>&1) -${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" -${CLICKHOUSE_CLIENT} -nm --query " -SELECT ProfileEvents['StorageConnectionsPreserved'] > 0 -FROM system.query_log -WHERE type = 'QueryFinish' - AND current_database = currentDatabase() - AND query_id='$query_id'; -" + select queryID() from( + -- a funny way to wait for a MUTATE_PART to be assigned + select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( + 'select 1 where ''MUTATE_PART'' not in (select type from system.replication_queue where database=''' || currentDatabase() || ''' and table=''mut'')' + ), 'LineAsString', 's String') + -- queryID() will be returned for each row, since the query above doesn't return anything we need to return a fake row + union all + select 1 + ) limit 1 settings max_threads=1; + " 2>&1) + ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" + RES=$(${CLICKHOUSE_CLIENT} -nm --query " + SELECT ProfileEvents['StorageConnectionsPreserved'] > 0 + FROM system.query_log + WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query_id='$query_id'; + ") + + [[ $RES -eq 1 ]] && echo "$RES" && break; +done From fc651cc0c61feb37e9cf104612cc0ac0cd7448e9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 17:57:48 +0200 Subject: [PATCH 1528/2361] Fix strange code in HostResolvePool --- src/Common/HostResolvePool.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/Common/HostResolvePool.cpp b/src/Common/HostResolvePool.cpp index cad64ee7204..e8a05a269bc 100644 --- a/src/Common/HostResolvePool.cpp +++ b/src/Common/HostResolvePool.cpp @@ -253,18 +253,18 @@ void HostResolver::updateImpl(Poco::Timestamp now, std::vector Date: Sat, 3 Aug 2024 18:30:33 +0200 Subject: [PATCH 1529/2361] Fix typo --- programs/server/Server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 20db4c2773c..7800ee9ff00 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -849,7 +849,7 @@ try #endif #if defined(SANITIZER) - LOG_INFO(log, "Query Profiler disabled because they cannot work under sanitizers" + LOG_INFO(log, "Query Profiler is disabled because it cannot work under sanitizers" " when two different stack unwinding methods will interfere with each other."); #endif From 60648e5240fecb92344ff029d2b280f542c3a86e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 18:41:17 +0200 Subject: [PATCH 1530/2361] Revert "Add replication lag and recovery time metrics" --- src/Databases/DatabaseReplicated.cpp | 57 ++++------------- src/Databases/DatabaseReplicated.h | 10 +-- src/Databases/DatabaseReplicatedWorker.cpp | 21 ------- src/Databases/DatabaseReplicatedWorker.h | 5 -- src/Storages/System/StorageSystemClusters.cpp | 37 ++++------- src/Storages/System/StorageSystemClusters.h | 4 +- .../test_recovery_time_metric/__init__.py | 0 .../configs/config.xml | 41 ------------- .../test_recovery_time_metric/test.py | 61 ------------------- .../02117_show_create_table_system.reference | 2 - .../03206_replication_lag_metric.reference | 4 -- .../03206_replication_lag_metric.sql | 11 ---- 12 files changed, 27 insertions(+), 226 deletions(-) delete mode 100644 tests/integration/test_recovery_time_metric/__init__.py delete mode 100644 tests/integration/test_recovery_time_metric/configs/config.xml delete mode 100644 tests/integration/test_recovery_time_metric/test.py delete mode 100644 tests/queries/0_stateless/03206_replication_lag_metric.reference delete mode 100644 tests/queries/0_stateless/03206_replication_lag_metric.sql diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index d2dee9b5994..f127ccbc224 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -339,12 +338,9 @@ ClusterPtr DatabaseReplicated::getClusterImpl(bool all_groups) const return std::make_shared(getContext()->getSettingsRef(), shards, params); } -ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) const +std::vector DatabaseReplicated::tryGetAreReplicasActive(const ClusterPtr & cluster_) const { - Strings paths_get, paths_exists; - - paths_get.emplace_back(fs::path(zookeeper_path) / "max_log_ptr"); - + Strings paths; const auto & addresses_with_failover = cluster_->getShardsAddresses(); const auto & shards_info = cluster_->getShardsInfo(); for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) @@ -352,59 +348,32 @@ ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) for (const auto & replica : addresses_with_failover[shard_index]) { String full_name = getFullReplicaName(replica.database_shard_name, replica.database_replica_name); - paths_exists.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "active"); - paths_get.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "log_ptr"); + paths.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "active"); } } try { auto current_zookeeper = getZooKeeper(); - auto get_res = current_zookeeper->get(paths_get); - auto exist_res = current_zookeeper->exists(paths_exists); - chassert(get_res.size() == exist_res.size() + 1); + auto res = current_zookeeper->exists(paths); - auto max_log_ptr_zk = get_res[0]; - if (max_log_ptr_zk.error != Coordination::Error::ZOK) - throw Coordination::Exception(max_log_ptr_zk.error); + std::vector statuses; + statuses.resize(paths.size()); - UInt32 max_log_ptr = parse(max_log_ptr_zk.data); + for (size_t i = 0; i < res.size(); ++i) + if (res[i].error == Coordination::Error::ZOK) + statuses[i] = 1; - ReplicasInfo replicas_info; - replicas_info.resize(exist_res.size()); - - size_t global_replica_index = 0; - for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) - { - for (const auto & replica : addresses_with_failover[shard_index]) - { - auto replica_active = exist_res[global_replica_index]; - auto replica_log_ptr = get_res[global_replica_index + 1]; - - if (replica_active.error != Coordination::Error::ZOK && replica_active.error != Coordination::Error::ZNONODE) - throw Coordination::Exception(replica_active.error); - - if (replica_log_ptr.error != Coordination::Error::ZOK) - throw Coordination::Exception(replica_log_ptr.error); - - replicas_info[global_replica_index] = ReplicaInfo{ - .is_active = replica_active.error == Coordination::Error::ZOK, - .replication_lag = max_log_ptr - parse(replica_log_ptr.data), - .recovery_time = replica.is_local ? ddl_worker->getCurrentInitializationDurationMs() : 0, - }; - - ++global_replica_index; - } - } - - return replicas_info; - } catch (...) + return statuses; + } + catch (...) { tryLogCurrentException(log); return {}; } } + void DatabaseReplicated::fillClusterAuthInfo(String collection_name, const Poco::Util::AbstractConfiguration & config_ref) { const auto & config_prefix = fmt::format("named_collections.{}", collection_name); diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index 5a1570ae2e2..27ab262d1f1 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -17,14 +17,6 @@ using ZooKeeperPtr = std::shared_ptr; class Cluster; using ClusterPtr = std::shared_ptr; -struct ReplicaInfo -{ - bool is_active; - UInt32 replication_lag; - UInt64 recovery_time; -}; -using ReplicasInfo = std::vector; - class DatabaseReplicated : public DatabaseAtomic { public: @@ -92,7 +84,7 @@ public: static void dropReplica(DatabaseReplicated * database, const String & database_zookeeper_path, const String & shard, const String & replica, bool throw_if_noop); - ReplicasInfo tryGetReplicasInfo(const ClusterPtr & cluster_) const; + std::vector tryGetAreReplicasActive(const ClusterPtr & cluster_) const; void renameDatabase(ContextPtr query_context, const String & new_name) override; diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index 4e7408aa96e..1ef88dc03bc 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -32,12 +32,6 @@ DatabaseReplicatedDDLWorker::DatabaseReplicatedDDLWorker(DatabaseReplicated * db bool DatabaseReplicatedDDLWorker::initializeMainThread() { - { - std::lock_guard lock(initialization_duration_timer_mutex); - initialization_duration_timer.emplace(); - initialization_duration_timer->start(); - } - while (!stop_flag) { try @@ -75,10 +69,6 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() initializeReplication(); initialized = true; - { - std::lock_guard lock(initialization_duration_timer_mutex); - initialization_duration_timer.reset(); - } return true; } catch (...) @@ -88,11 +78,6 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() } } - { - std::lock_guard lock(initialization_duration_timer_mutex); - initialization_duration_timer.reset(); - } - return false; } @@ -474,10 +459,4 @@ UInt32 DatabaseReplicatedDDLWorker::getLogPointer() const return max_id.load(); } -UInt64 DatabaseReplicatedDDLWorker::getCurrentInitializationDurationMs() const -{ - std::lock_guard lock(initialization_duration_timer_mutex); - return initialization_duration_timer ? initialization_duration_timer->elapsedMilliseconds() : 0; -} - } diff --git a/src/Databases/DatabaseReplicatedWorker.h b/src/Databases/DatabaseReplicatedWorker.h index 2309c831839..41edf2221b8 100644 --- a/src/Databases/DatabaseReplicatedWorker.h +++ b/src/Databases/DatabaseReplicatedWorker.h @@ -36,8 +36,6 @@ public: DatabaseReplicated * const database, bool committed = false); /// NOLINT UInt32 getLogPointer() const; - - UInt64 getCurrentInitializationDurationMs() const; private: bool initializeMainThread() override; void initializeReplication(); @@ -58,9 +56,6 @@ private: ZooKeeperPtr active_node_holder_zookeeper; /// It will remove "active" node when database is detached zkutil::EphemeralNodeHolderPtr active_node_holder; - - std::optional initialization_duration_timer; - mutable std::mutex initialization_duration_timer_mutex; }; } diff --git a/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp index d03b600b6ef..160c8d6270e 100644 --- a/src/Storages/System/StorageSystemClusters.cpp +++ b/src/Storages/System/StorageSystemClusters.cpp @@ -31,8 +31,6 @@ ColumnsDescription StorageSystemClusters::getColumnsDescription() {"database_shard_name", std::make_shared(), "The name of the `Replicated` database shard (for clusters that belong to a `Replicated` database)."}, {"database_replica_name", std::make_shared(), "The name of the `Replicated` database replica (for clusters that belong to a `Replicated` database)."}, {"is_active", std::make_shared(std::make_shared()), "The status of the Replicated database replica (for clusters that belong to a Replicated database): 1 means 'replica is online', 0 means 'replica is offline', NULL means 'unknown'."}, - {"replication_lag", std::make_shared(std::make_shared()), "The replication lag of the `Replicated` database replica (for clusters that belong to a Replicated database)."}, - {"recovery_time", std::make_shared(std::make_shared()), "The recovery time of the `Replicated` database replica (for clusters that belong to a Replicated database), in milliseconds."}, }; description.setAliases({ @@ -48,30 +46,31 @@ void StorageSystemClusters::fillData(MutableColumns & res_columns, ContextPtr co writeCluster(res_columns, name_and_cluster, {}); const auto databases = DatabaseCatalog::instance().getDatabases(); - for (const auto & [database_name, database] : databases) + for (const auto & name_and_database : databases) { - if (const auto * replicated = typeid_cast(database.get())) + if (const auto * replicated = typeid_cast(name_and_database.second.get())) { + if (auto database_cluster = replicated->tryGetCluster()) - writeCluster(res_columns, {database_name, database_cluster}, - replicated->tryGetReplicasInfo(database_cluster)); + writeCluster(res_columns, {name_and_database.first, database_cluster}, + replicated->tryGetAreReplicasActive(database_cluster)); if (auto database_cluster = replicated->tryGetAllGroupsCluster()) - writeCluster(res_columns, {DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX + database_name, database_cluster}, - replicated->tryGetReplicasInfo(database_cluster)); + writeCluster(res_columns, {DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX + name_and_database.first, database_cluster}, + replicated->tryGetAreReplicasActive(database_cluster)); } } } void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const NameAndCluster & name_and_cluster, - const ReplicasInfo & replicas_info) + const std::vector & is_active) { const String & cluster_name = name_and_cluster.first; const ClusterPtr & cluster = name_and_cluster.second; const auto & shards_info = cluster->getShardsInfo(); const auto & addresses_with_failover = cluster->getShardsAddresses(); - size_t global_replica_idx = 0; + size_t replica_idx = 0; for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) { const auto & shard_info = shards_info[shard_index]; @@ -100,24 +99,10 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const Nam res_columns[i++]->insert(pool_status[replica_index].estimated_recovery_time.count()); res_columns[i++]->insert(address.database_shard_name); res_columns[i++]->insert(address.database_replica_name); - if (replicas_info.empty()) - { + if (is_active.empty()) res_columns[i++]->insertDefault(); - res_columns[i++]->insertDefault(); - res_columns[i++]->insertDefault(); - } else - { - const auto & replica_info = replicas_info[global_replica_idx]; - res_columns[i++]->insert(replica_info.is_active); - res_columns[i++]->insert(replica_info.replication_lag); - if (replica_info.recovery_time != 0) - res_columns[i++]->insert(replica_info.recovery_time); - else - res_columns[i++]->insertDefault(); - } - - ++global_replica_idx; + res_columns[i++]->insert(is_active[replica_idx++]); } } } diff --git a/src/Storages/System/StorageSystemClusters.h b/src/Storages/System/StorageSystemClusters.h index f6e08734896..0f7c792261d 100644 --- a/src/Storages/System/StorageSystemClusters.h +++ b/src/Storages/System/StorageSystemClusters.h @@ -1,10 +1,10 @@ #pragma once -#include #include #include #include + namespace DB { @@ -27,7 +27,7 @@ protected: using NameAndCluster = std::pair>; void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node *, std::vector) const override; - static void writeCluster(MutableColumns & res_columns, const NameAndCluster & name_and_cluster, const ReplicasInfo & replicas_info); + static void writeCluster(MutableColumns & res_columns, const NameAndCluster & name_and_cluster, const std::vector & is_active); }; } diff --git a/tests/integration/test_recovery_time_metric/__init__.py b/tests/integration/test_recovery_time_metric/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_recovery_time_metric/configs/config.xml b/tests/integration/test_recovery_time_metric/configs/config.xml deleted file mode 100644 index bad9b1fa9ea..00000000000 --- a/tests/integration/test_recovery_time_metric/configs/config.xml +++ /dev/null @@ -1,41 +0,0 @@ - - 9000 - - - - - - - - - default - - - - - - 2181 - 1 - /var/lib/clickhouse/coordination/log - /var/lib/clickhouse/coordination/snapshots - - 20000 - - - - 1 - localhost - 9444 - - - - - - - localhost - 2181 - - 20000 - - - diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py deleted file mode 100644 index 6fcf2fad423..00000000000 --- a/tests/integration/test_recovery_time_metric/test.py +++ /dev/null @@ -1,61 +0,0 @@ -import pytest -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -node = cluster.add_instance( - "node", - main_configs=["configs/config.xml"], - stay_alive=True, -) - - -@pytest.fixture(scope="module") -def start_cluster(): - try: - cluster.start() - yield cluster - finally: - cluster.shutdown() - - -def test_recovery_time_metric(start_cluster): - node.query( - """ - DROP DATABASE IF EXISTS rdb; - CREATE DATABASE rdb - ENGINE = Replicated('/test/test_recovery_time_metric', 'shard1', 'replica1') - """ - ) - - node.query( - """ - DROP TABLE IF EXISTS rdb.t; - CREATE TABLE rdb.t - ( - `x` UInt32 - ) - ENGINE = MergeTree - ORDER BY x - """ - ) - - node.exec_in_container(["bash", "-c", "rm /var/lib/clickhouse/metadata/rdb/t.sql"]) - - node.restart_clickhouse() - - ret = int( - node.query( - """ - SELECT recovery_time - FROM system.clusters - WHERE cluster = 'rdb' - """ - ).strip() - ) - assert ret > 0 - - node.query( - """ - DROP DATABASE rdb - """ - ) diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 32e8b2f4312..cfae4fee6c2 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -52,8 +52,6 @@ CREATE TABLE system.clusters `database_shard_name` String, `database_replica_name` String, `is_active` Nullable(UInt8), - `replication_lag` Nullable(UInt32), - `recovery_time` Nullable(UInt64), `name` String ALIAS cluster ) ENGINE = SystemClusters diff --git a/tests/queries/0_stateless/03206_replication_lag_metric.reference b/tests/queries/0_stateless/03206_replication_lag_metric.reference deleted file mode 100644 index 02f4a7264b1..00000000000 --- a/tests/queries/0_stateless/03206_replication_lag_metric.reference +++ /dev/null @@ -1,4 +0,0 @@ -0 -2 -0 -2 diff --git a/tests/queries/0_stateless/03206_replication_lag_metric.sql b/tests/queries/0_stateless/03206_replication_lag_metric.sql deleted file mode 100644 index 998c332a11c..00000000000 --- a/tests/queries/0_stateless/03206_replication_lag_metric.sql +++ /dev/null @@ -1,11 +0,0 @@ --- Tags: no-parallel - -CREATE DATABASE rdb1 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica1'); -CREATE DATABASE rdb2 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica2'); - -SET distributed_ddl_task_timeout = 0; -CREATE TABLE rdb1.t (id UInt32) ENGINE = ReplicatedMergeTree ORDER BY id; -SELECT replication_lag FROM system.clusters WHERE cluster IN ('rdb1', 'rdb2') ORDER BY cluster ASC, replica_num ASC; - -DROP DATABASE rdb1; -DROP DATABASE rdb2; From 113f7e0c8c9cc61ab72f5ccd8bb6e5fac58cbaea Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 19:37:38 +0200 Subject: [PATCH 1531/2361] Maybe better --- tests/queries/1_stateful/00157_cache_dictionary.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/1_stateful/00157_cache_dictionary.sql b/tests/queries/1_stateful/00157_cache_dictionary.sql index bb5a21d0779..f1bee538828 100644 --- a/tests/queries/1_stateful/00157_cache_dictionary.sql +++ b/tests/queries/1_stateful/00157_cache_dictionary.sql @@ -9,7 +9,8 @@ ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS storage_policy = 'default'; -INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000 SETTINGS min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_block_size = 8192; +INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000 + SETTINGS min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_block_size = 8192, max_insert_threads = 1, max_threads = 1; CREATE DATABASE IF NOT EXISTS db_dict; DROP DICTIONARY IF EXISTS db_dict.cache_hits; From 3dedd8d76b1c3fdb533e9f47d537e4cdf369af5e Mon Sep 17 00:00:00 2001 From: Max K Date: Sat, 3 Aug 2024 10:40:12 +0200 Subject: [PATCH 1532/2361] CI: Minor refactoring in ci_utils --- tests/ci/bugfix_validate_check.py | 5 +- tests/ci/ci.py | 8 +- tests/ci/ci_cache.py | 4 +- tests/ci/ci_config.py | 7 +- tests/ci/ci_definitions.py | 22 ----- tests/ci/ci_settings.py | 9 +- tests/ci/ci_utils.py | 150 +++++------------------------ tests/ci/report.py | 3 +- tests/ci/run_check.py | 152 +++++++++++++++++++++++++++--- tests/ci/test_ci_config.py | 12 ++- 10 files changed, 186 insertions(+), 186 deletions(-) diff --git a/tests/ci/bugfix_validate_check.py b/tests/ci/bugfix_validate_check.py index 71b18572938..932d709a7b8 100644 --- a/tests/ci/bugfix_validate_check.py +++ b/tests/ci/bugfix_validate_check.py @@ -8,7 +8,6 @@ from pathlib import Path from typing import List, Sequence, Tuple from ci_config import CI -from ci_utils import normalize_string from env_helper import TEMP_PATH from functional_test_check import NO_CHANGES_MSG from report import ( @@ -142,7 +141,9 @@ def main(): for file in set(jr.additional_files): file_ = Path(file) file_name = file_.name - file_name = file_name.replace(".", "__" + normalize_string(job_id) + ".", 1) + file_name = file_name.replace( + ".", "__" + CI.Utils.normalize_string(job_id) + ".", 1 + ) file_ = file_.rename(file_.parent / file_name) additional_files.append(file_) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 2565c8944e4..e36f2904182 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -16,7 +16,7 @@ import upload_result_helper from build_check import get_release_or_pr from ci_config import CI from ci_metadata import CiMetadata -from ci_utils import GH, normalize_string, Utils +from ci_utils import GH, Utils from clickhouse_helper import ( CiLogsCredentials, ClickHouseHelper, @@ -296,7 +296,7 @@ def _pre_action(s3, job_name, batch, indata, pr_info): # do not set report prefix for scheduled or dispatched wf (in case it started from feature branch while # testing), otherwise reports won't be found if not (pr_info.is_scheduled or pr_info.is_dispatched): - report_prefix = normalize_string(pr_info.head_ref) + report_prefix = Utils.normalize_string(pr_info.head_ref) print( f"Use report prefix [{report_prefix}], pr_num [{pr_info.number}], head_ref [{pr_info.head_ref}]" ) @@ -718,7 +718,7 @@ def _upload_build_artifacts( ( get_release_or_pr(pr_info, get_version_from_repo())[1], pr_info.sha, - normalize_string(build_name), + Utils.normalize_string(build_name), "performance.tar.zst", ) ) @@ -1248,7 +1248,7 @@ def main() -> int: ( get_release_or_pr(pr_info, get_version_from_repo())[0], pr_info.sha, - normalize_string( + Utils.normalize_string( job_report.check_name or _get_ext_check_name(args.job_name) ), ) diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 4846233ab03..a59fd3e5a29 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -7,7 +7,7 @@ from typing import Dict, Optional, Any, Union, Sequence, List, Set from ci_config import CI -from ci_utils import is_hex, GH +from ci_utils import Utils, GH from commit_status_helper import CommitStatusData from env_helper import ( TEMP_PATH, @@ -240,7 +240,7 @@ class CiCache: int(job_properties[-1]), ) - if not is_hex(job_digest): + if not Utils.is_hex(job_digest): print("ERROR: wrong record job digest") return None diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index c031ca9b805..ef48466e451 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -3,7 +3,7 @@ import re from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from typing import Dict, Optional, List -from ci_utils import normalize_string +from ci_utils import Utils from ci_definitions import * @@ -13,7 +13,6 @@ class CI: each config item in the below dicts should be an instance of JobConfig class or inherited from it """ - MAX_TOTAL_FAILURES_BEFORE_BLOCKING_CI = 5 MAX_TOTAL_FAILURES_PER_JOB_BEFORE_BLOCKING_CI = 2 # reimport types to CI class so that they visible as CI.* and mypy is happy @@ -37,9 +36,7 @@ class CI: from ci_utils import GH as GH from ci_utils import Shell as Shell from ci_definitions import Labels as Labels - from ci_definitions import TRUSTED_CONTRIBUTORS as TRUSTED_CONTRIBUTORS from ci_definitions import WorkFlowNames as WorkFlowNames - from ci_utils import CATEGORY_TO_LABEL as CATEGORY_TO_LABEL # Jobs that run for doc related updates _DOCS_CHECK_JOBS = [JobNames.DOCS_CHECK, JobNames.STYLE_CHECK] @@ -558,7 +555,7 @@ class CI: @classmethod def get_tag_config(cls, label_name: str) -> Optional[LabelConfig]: for label, config in cls.TAG_CONFIGS.items(): - if normalize_string(label_name) == normalize_string(label): + if Utils.normalize_string(label_name) == Utils.normalize_string(label): return config return None diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index de6791acda8..795bda3d4b0 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -32,28 +32,6 @@ class Labels: AUTO_BACKPORT = {"pr-critical-bugfix"} -TRUSTED_CONTRIBUTORS = { - e.lower() - for e in [ - "amosbird", - "azat", # SEMRush - "bharatnc", # Many contributions. - "cwurm", # ClickHouse, Inc - "den-crane", # Documentation contributor - "ildus", # adjust, ex-pgpro - "nvartolomei", # Seasoned contributor, CloudFlare - "taiyang-li", - "ucasFL", # Amos Bird's friend - "thomoco", # ClickHouse, Inc - "tonickkozlov", # Cloudflare - "tylerhannan", # ClickHouse, Inc - "tsolodov", # ClickHouse, Inc - "justindeguzman", # ClickHouse, Inc - "XuJia0210", # ClickHouse, Inc - ] -} - - class WorkflowStages(metaclass=WithIter): """ Stages of GitHUb actions workflow diff --git a/tests/ci/ci_settings.py b/tests/ci/ci_settings.py index d6e9765ceb7..05929179e06 100644 --- a/tests/ci/ci_settings.py +++ b/tests/ci/ci_settings.py @@ -2,7 +2,6 @@ import re from dataclasses import dataclass, asdict from typing import Optional, List, Dict, Any, Iterable -from ci_utils import normalize_string from ci_config import CI from git_helper import Runner as GitRunner, GIT_PREFIX from pr_info import PRInfo @@ -89,14 +88,14 @@ class CiSettings: if not res.include_keywords: res.include_keywords = [] res.include_keywords.append( - normalize_string(match.removeprefix("ci_include_")) + CI.Utils.normalize_string(match.removeprefix("ci_include_")) ) elif match.startswith("ci_exclude_"): if not res.exclude_keywords: res.exclude_keywords = [] keywords = match.removeprefix("ci_exclude_").split("|") res.exclude_keywords += [ - normalize_string(keyword) for keyword in keywords + CI.Utils.normalize_string(keyword) for keyword in keywords ] elif match == CI.Tags.NO_CI_CACHE: res.no_ci_cache = True @@ -163,7 +162,7 @@ class CiSettings: # do not exclude builds if self.exclude_keywords and not CI.is_build_job(job): for keyword in self.exclude_keywords: - if keyword in normalize_string(job): + if keyword in CI.Utils.normalize_string(job): print(f"Job [{job}] matches Exclude keyword [{keyword}] - deny") return False @@ -174,7 +173,7 @@ class CiSettings: # never exclude Style Check by include keywords return True for keyword in self.include_keywords: - if keyword in normalize_string(job): + if keyword in CI.Utils.normalize_string(job): print(f"Job [{job}] matches Include keyword [{keyword}] - pass") return True to_deny = True diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index dae1520afb6..067bedb19c3 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -6,7 +6,7 @@ import sys import time from contextlib import contextmanager from pathlib import Path -from typing import Any, Iterator, List, Union, Optional, Sequence, Tuple +from typing import Any, Iterator, List, Union, Optional, Sequence import requests @@ -20,41 +20,6 @@ class Envs: GITHUB_WORKFLOW = os.getenv("GITHUB_WORKFLOW", "") -LABEL_CATEGORIES = { - "pr-backward-incompatible": ["Backward Incompatible Change"], - "pr-bugfix": [ - "Bug Fix", - "Bug Fix (user-visible misbehavior in an official stable release)", - "Bug Fix (user-visible misbehaviour in official stable or prestable release)", - "Bug Fix (user-visible misbehavior in official stable or prestable release)", - ], - "pr-critical-bugfix": ["Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)"], - "pr-build": [ - "Build/Testing/Packaging Improvement", - "Build Improvement", - "Build/Testing Improvement", - "Build", - "Packaging Improvement", - ], - "pr-documentation": [ - "Documentation (changelog entry is not required)", - "Documentation", - ], - "pr-feature": ["New Feature"], - "pr-improvement": ["Improvement"], - "pr-not-for-changelog": [ - "Not for changelog (changelog entry is not required)", - "Not for changelog", - ], - "pr-performance": ["Performance Improvement"], - "pr-ci": ["CI Fix or Improvement (changelog entry is not required)"], -} - -CATEGORY_TO_LABEL = { - c: lb for lb, categories in LABEL_CATEGORIES.items() for c in categories -} - - class WithIter(type): def __iter__(cls): return (v for k, v in cls.__dict__.items() if not k.startswith("_")) @@ -70,21 +35,6 @@ def cd(path: Union[Path, str]) -> Iterator[None]: os.chdir(oldpwd) -def is_hex(s): - try: - int(s, 16) - return True - except ValueError: - return False - - -def normalize_string(string: str) -> str: - res = string.lower() - for r in ((" ", "_"), ("(", "_"), (")", "_"), (",", "_"), ("/", "_"), ("-", "_")): - res = res.replace(*r) - return res - - class GH: class ActionsNames: RunConfig = "RunConfig" @@ -149,8 +99,8 @@ class GH: ) -> str: assert len(token) == 40 assert len(commit_sha) == 40 - assert is_hex(commit_sha) - assert not is_hex(token) + assert Utils.is_hex(commit_sha) + assert not Utils.is_hex(token) url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/statuses?per_page={200}" headers = { "Authorization": f"token {token}", @@ -298,79 +248,23 @@ class Utils: Shell.check("sudo dmesg --clear", verbose=True) @staticmethod - def check_pr_description(pr_body: str, repo_name: str) -> Tuple[str, str]: - """The function checks the body to being properly formatted according to - .github/PULL_REQUEST_TEMPLATE.md, if the first returned string is not empty, - then there is an error.""" - lines = list(map(lambda x: x.strip(), pr_body.split("\n") if pr_body else [])) - lines = [re.sub(r"\s+", " ", line) for line in lines] + def is_hex(s): + try: + int(s, 16) + return True + except ValueError: + return False - # Check if body contains "Reverts ClickHouse/ClickHouse#36337" - if [ - True for line in lines if re.match(rf"\AReverts {repo_name}#[\d]+\Z", line) - ]: - return "", LABEL_CATEGORIES["pr-not-for-changelog"][0] - - category = "" - entry = "" - description_error = "" - - i = 0 - while i < len(lines): - if re.match(r"(?i)^[#>*_ ]*change\s*log\s*category", lines[i]): - i += 1 - if i >= len(lines): - break - # Can have one empty line between header and the category - # itself. Filter it out. - if not lines[i]: - i += 1 - if i >= len(lines): - break - category = re.sub(r"^[-*\s]*", "", lines[i]) - i += 1 - - # Should not have more than one category. Require empty line - # after the first found category. - if i >= len(lines): - break - if lines[i]: - second_category = re.sub(r"^[-*\s]*", "", lines[i]) - description_error = ( - "More than one changelog category specified: " - f"'{category}', '{second_category}'" - ) - return description_error, category - - elif re.match( - r"(?i)^[#>*_ ]*(short\s*description|change\s*log\s*entry)", lines[i] - ): - i += 1 - # Can have one empty line between header and the entry itself. - # Filter it out. - if i < len(lines) and not lines[i]: - i += 1 - # All following lines until empty one are the changelog entry. - entry_lines = [] - while i < len(lines) and lines[i]: - entry_lines.append(lines[i]) - i += 1 - entry = " ".join(entry_lines) - # Don't accept changelog entries like '...'. - entry = re.sub(r"[#>*_.\- ]", "", entry) - # Don't accept changelog entries like 'Close #12345'. - entry = re.sub(r"^[\w\-\s]{0,10}#?\d{5,6}\.?$", "", entry) - else: - i += 1 - - if not category: - description_error = "Changelog category is empty" - # Filter out the PR categories that are not for changelog. - elif "(changelog entry is not required)" in category: - pass # to not check the rest of the conditions - elif category not in CATEGORY_TO_LABEL: - description_error, category = f"Category '{category}' is not valid", "" - elif not entry: - description_error = f"Changelog entry required for category '{category}'" - - return description_error, category + @staticmethod + def normalize_string(string: str) -> str: + res = string.lower() + for r in ( + (" ", "_"), + ("(", "_"), + (")", "_"), + (",", "_"), + ("/", "_"), + ("-", "_"), + ): + res = res.replace(*r) + return res diff --git a/tests/ci/report.py b/tests/ci/report.py index f50ed4c1f85..f5571939d0b 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -22,7 +22,6 @@ from typing import ( from build_download_helper import get_gh_api from ci_config import CI -from ci_utils import normalize_string from env_helper import REPORT_PATH, GITHUB_WORKSPACE logger = logging.getLogger(__name__) @@ -622,7 +621,7 @@ class BuildResult: def write_json(self, directory: Union[Path, str] = REPORT_PATH) -> Path: path = Path(directory) / self.get_report_name( - self.build_name, self.pr_number or normalize_string(self.head_ref) + self.build_name, self.pr_number or CI.Utils.normalize_string(self.head_ref) ) path.write_text( json.dumps( diff --git a/tests/ci/run_check.py b/tests/ci/run_check.py index 0ad01e3accd..55a0c383812 100644 --- a/tests/ci/run_check.py +++ b/tests/ci/run_check.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 import logging +import re import sys from typing import Tuple @@ -16,7 +17,6 @@ from commit_status_helper import ( from env_helper import GITHUB_REPOSITORY, GITHUB_SERVER_URL from get_robot_token import get_best_robot_token from ci_config import CI -from ci_utils import Utils from pr_info import PRInfo from report import FAILURE, PENDING, SUCCESS, StatusType @@ -25,12 +25,144 @@ TRUSTED_ORG_IDS = { 54801242, # clickhouse } +TRUSTED_CONTRIBUTORS = { + e.lower() + for e in [ + "amosbird", + "azat", # SEMRush + "bharatnc", # Many contributions. + "cwurm", # ClickHouse, Inc + "den-crane", # Documentation contributor + "ildus", # adjust, ex-pgpro + "nvartolomei", # Seasoned contributor, CloudFlare + "taiyang-li", + "ucasFL", # Amos Bird's friend + "thomoco", # ClickHouse, Inc + "tonickkozlov", # Cloudflare + "tylerhannan", # ClickHouse, Inc + "tsolodov", # ClickHouse, Inc + "justindeguzman", # ClickHouse, Inc + "XuJia0210", # ClickHouse, Inc + ] +} + OK_SKIP_LABELS = {CI.Labels.RELEASE, CI.Labels.PR_BACKPORT, CI.Labels.PR_CHERRYPICK} PR_CHECK = "PR Check" +LABEL_CATEGORIES = { + "pr-backward-incompatible": ["Backward Incompatible Change"], + "pr-bugfix": [ + "Bug Fix", + "Bug Fix (user-visible misbehavior in an official stable release)", + "Bug Fix (user-visible misbehaviour in official stable or prestable release)", + "Bug Fix (user-visible misbehavior in official stable or prestable release)", + ], + "pr-critical-bugfix": ["Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)"], + "pr-build": [ + "Build/Testing/Packaging Improvement", + "Build Improvement", + "Build/Testing Improvement", + "Build", + "Packaging Improvement", + ], + "pr-documentation": [ + "Documentation (changelog entry is not required)", + "Documentation", + ], + "pr-feature": ["New Feature"], + "pr-improvement": ["Improvement"], + "pr-not-for-changelog": [ + "Not for changelog (changelog entry is not required)", + "Not for changelog", + ], + "pr-performance": ["Performance Improvement"], + "pr-ci": ["CI Fix or Improvement (changelog entry is not required)"], +} + +CATEGORY_TO_LABEL = { + c: lb for lb, categories in LABEL_CATEGORIES.items() for c in categories +} + + +def check_pr_description(pr_body: str, repo_name: str) -> Tuple[str, str]: + """The function checks the body to being properly formatted according to + .github/PULL_REQUEST_TEMPLATE.md, if the first returned string is not empty, + then there is an error.""" + lines = list(map(lambda x: x.strip(), pr_body.split("\n") if pr_body else [])) + lines = [re.sub(r"\s+", " ", line) for line in lines] + + # Check if body contains "Reverts ClickHouse/ClickHouse#36337" + if [True for line in lines if re.match(rf"\AReverts {repo_name}#[\d]+\Z", line)]: + return "", LABEL_CATEGORIES["pr-not-for-changelog"][0] + + category = "" + entry = "" + description_error = "" + + i = 0 + while i < len(lines): + if re.match(r"(?i)^[#>*_ ]*change\s*log\s*category", lines[i]): + i += 1 + if i >= len(lines): + break + # Can have one empty line between header and the category + # itself. Filter it out. + if not lines[i]: + i += 1 + if i >= len(lines): + break + category = re.sub(r"^[-*\s]*", "", lines[i]) + i += 1 + + # Should not have more than one category. Require empty line + # after the first found category. + if i >= len(lines): + break + if lines[i]: + second_category = re.sub(r"^[-*\s]*", "", lines[i]) + description_error = ( + "More than one changelog category specified: " + f"'{category}', '{second_category}'" + ) + return description_error, category + + elif re.match( + r"(?i)^[#>*_ ]*(short\s*description|change\s*log\s*entry)", lines[i] + ): + i += 1 + # Can have one empty line between header and the entry itself. + # Filter it out. + if i < len(lines) and not lines[i]: + i += 1 + # All following lines until empty one are the changelog entry. + entry_lines = [] + while i < len(lines) and lines[i]: + entry_lines.append(lines[i]) + i += 1 + entry = " ".join(entry_lines) + # Don't accept changelog entries like '...'. + entry = re.sub(r"[#>*_.\- ]", "", entry) + # Don't accept changelog entries like 'Close #12345'. + entry = re.sub(r"^[\w\-\s]{0,10}#?\d{5,6}\.?$", "", entry) + else: + i += 1 + + if not category: + description_error = "Changelog category is empty" + # Filter out the PR categories that are not for changelog. + elif "(changelog entry is not required)" in category: + pass # to not check the rest of the conditions + elif category not in CATEGORY_TO_LABEL: + description_error, category = f"Category '{category}' is not valid", "" + elif not entry: + description_error = f"Changelog entry required for category '{category}'" + + return description_error, category + + def pr_is_by_trusted_user(pr_user_login, pr_user_orgs): - if pr_user_login.lower() in CI.TRUSTED_CONTRIBUTORS: + if pr_user_login.lower() in TRUSTED_CONTRIBUTORS: logging.info("User '%s' is trusted", pr_user_login) return True @@ -92,22 +224,20 @@ def main(): commit = get_commit(gh, pr_info.sha) status = SUCCESS # type: StatusType - description_error, category = Utils.check_pr_description( - pr_info.body, GITHUB_REPOSITORY - ) + description_error, category = check_pr_description(pr_info.body, GITHUB_REPOSITORY) pr_labels_to_add = [] pr_labels_to_remove = [] if ( - category in CI.CATEGORY_TO_LABEL - and CI.CATEGORY_TO_LABEL[category] not in pr_info.labels + category in CATEGORY_TO_LABEL + and CATEGORY_TO_LABEL[category] not in pr_info.labels ): - pr_labels_to_add.append(CI.CATEGORY_TO_LABEL[category]) + pr_labels_to_add.append(CATEGORY_TO_LABEL[category]) for label in pr_info.labels: if ( - label in CI.CATEGORY_TO_LABEL.values() - and category in CI.CATEGORY_TO_LABEL - and label != CI.CATEGORY_TO_LABEL[category] + label in CATEGORY_TO_LABEL.values() + and category in CATEGORY_TO_LABEL + and label != CATEGORY_TO_LABEL[category] ): pr_labels_to_remove.append(label) diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index f376a129e6f..6ffedfdecd4 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -9,7 +9,7 @@ from ci_settings import CiSettings from pr_info import PRInfo, EventType from s3_helper import S3Helper from ci_cache import CiCache -from ci_utils import normalize_string +from ci_utils import Utils _TEST_EVENT_JSON = {"dummy": "dummy"} @@ -55,7 +55,7 @@ class TestCIConfig(unittest.TestCase): if CI.JOB_CONFIGS[job].job_name_keyword: self.assertTrue( CI.JOB_CONFIGS[job].job_name_keyword.lower() - in normalize_string(job), + in Utils.normalize_string(job), f"Job [{job}] apparently uses wrong common config with job keyword [{CI.JOB_CONFIGS[job].job_name_keyword}]", ) @@ -291,7 +291,9 @@ class TestCIConfig(unittest.TestCase): assert tag_config set_jobs = tag_config.run_jobs for job in set_jobs: - if any(k in normalize_string(job) for k in settings.exclude_keywords): + if any( + k in Utils.normalize_string(job) for k in settings.exclude_keywords + ): continue expected_jobs_to_do.append(job) for job, config in CI.JOB_CONFIGS.items(): @@ -303,12 +305,12 @@ class TestCIConfig(unittest.TestCase): # expected to run all builds jobs expected_jobs_to_do.append(job) if not any( - keyword in normalize_string(job) + keyword in Utils.normalize_string(job) for keyword in settings.include_keywords ): continue if any( - keyword in normalize_string(job) + keyword in Utils.normalize_string(job) for keyword in settings.exclude_keywords ): continue From a19750234153e760907f3c7bc040f949100534df Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 23:11:20 +0200 Subject: [PATCH 1533/2361] Fix test retries Should fix issues like: - 02494_zero_copy_projection_cancel_fetch - https://s3.amazonaws.com/clickhouse-test-reports/67719/40cd5467c18d65a6624d273ac1a8fd9cc9257d8c/stateless_tests__tsan__s3_storage__[4_4].html Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a29c786e998..877548e577e 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2218,7 +2218,6 @@ def run_tests_array(all_tests_with_params: Tuple[List[str], int, TestSuite, bool args, test_suite, client_options, server_logs_level ) test_result = test_case.process_result(test_result, MESSAGES) - break except TimeoutError: break finally: From 087aff87dd23b4821965904e0a760b3ec7c4f8f4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 23:12:07 +0200 Subject: [PATCH 1534/2361] Something --- src/Databases/DatabaseLazy.cpp | 2 +- src/Databases/DatabaseOnDisk.cpp | 8 ++++---- src/Databases/DatabaseOnDisk.h | 2 +- src/Databases/DatabaseOrdinary.cpp | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index ca30ee6db15..baa6910f6a8 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -52,7 +52,7 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, void DatabaseLazy::loadStoredObjects(ContextMutablePtr local_context, LoadingStrictnessLevel /*mode*/) { - iterateMetadataFiles(local_context, [this, &local_context](const String & file_name) + iterateMetadataFiles([this, &local_context](const String & file_name) { const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 01d8867661b..82a81b0b32d 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -579,14 +579,14 @@ void DatabaseOnDisk::drop(ContextPtr local_context) assert(TSA_SUPPRESS_WARNING_FOR_READ(tables).empty()); if (local_context->getSettingsRef().force_remove_data_recursively_on_drop) { - (void)fs::remove_all(local_context->getPath() + getDataPath()); + (void)fs::remove_all(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove_all(getMetadataPath()); } else { try { - (void)fs::remove(local_context->getPath() + getDataPath()); + (void)fs::remove(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove(getMetadataPath()); } catch (const fs::filesystem_error & e) @@ -624,7 +624,7 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n } } -void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const +void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const { if (!fs::exists(metadata_path)) return; @@ -635,7 +635,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat static const char * tmp_drop_ext = ".sql.tmp_drop"; const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext)); - if (fs::exists(local_context->getPath() + getDataPath() + '/' + object_name)) + if (fs::exists(std::filesystem::path(getContext()->getPath()) / data_path / object_name)) { fs::rename(getMetadataPath() + file_name, getMetadataPath() + object_name + ".sql"); LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name)); diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index a8be674a4e2..0c0ecf76a26 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -83,7 +83,7 @@ protected: using IteratingFunction = std::function; - void iterateMetadataFiles(ContextPtr context, const IteratingFunction & process_metadata_file) const; + void iterateMetadataFiles(const IteratingFunction & process_metadata_file) const; ASTPtr getCreateTableQueryImpl( const String & table_name, diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 49719c25319..dd8a3f42ea8 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -265,7 +265,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables } }; - iterateMetadataFiles(local_context, process_metadata); + iterateMetadataFiles(process_metadata); size_t objects_in_database = metadata.parsed_tables.size() - prev_tables_count; size_t dictionaries_in_database = metadata.total_dictionaries - prev_total_dictionaries; From ced8c3445c929efdb62ec707a7a8af7ef1a9f541 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 23:14:40 +0200 Subject: [PATCH 1535/2361] Something --- src/Databases/DatabaseLazy.cpp | 1 + src/Databases/DatabaseLazy.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index baa6910f6a8..e80ee930d79 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -47,6 +47,7 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, : DatabaseOnDisk(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseLazy (" + name_ + ")", context_) , expiration_time(expiration_time_) { + createDirectories(); } diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 4347649117d..aeac130594f 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -12,7 +12,7 @@ class DatabaseLazyIterator; class Context; /** Lazy engine of databases. - * Works like DatabaseOrdinary, but stores in memory only cache. + * Works like DatabaseOrdinary, but stores only recently accessed tables in memory. * Can be used only with *Log engines. */ class DatabaseLazy final : public DatabaseOnDisk From f06ae2f5518ff8cb610b337d4900fd6f0088190f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 23:27:19 +0200 Subject: [PATCH 1536/2361] Fill only selected columns from system.clusters Some of them pretty heavy, i.e. is_active for ReplicatedDatabase This should fix 02903_rmt_retriable_merge_exception flakiness [1]. [1]: https://s3.amazonaws.com/clickhouse-test-reports/67687/89c47df559ba23d988f8af3c342e0c8d5531f4b8/fast_test.html Signed-off-by: Azat Khuzhin --- src/Storages/System/StorageSystemClusters.cpp | 82 ++++++++++++------- src/Storages/System/StorageSystemClusters.h | 6 +- 2 files changed, 56 insertions(+), 32 deletions(-) diff --git a/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp index 160c8d6270e..9c5c07ae49f 100644 --- a/src/Storages/System/StorageSystemClusters.cpp +++ b/src/Storages/System/StorageSystemClusters.cpp @@ -40,10 +40,10 @@ ColumnsDescription StorageSystemClusters::getColumnsDescription() return description; } -void StorageSystemClusters::fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node *, std::vector) const +void StorageSystemClusters::fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node *, std::vector columns_mask) const { for (const auto & name_and_cluster : context->getClusters()) - writeCluster(res_columns, name_and_cluster, {}); + writeCluster(res_columns, columns_mask, name_and_cluster, /* replicated= */ nullptr); const auto databases = DatabaseCatalog::instance().getDatabases(); for (const auto & name_and_database : databases) @@ -52,18 +52,15 @@ void StorageSystemClusters::fillData(MutableColumns & res_columns, ContextPtr co { if (auto database_cluster = replicated->tryGetCluster()) - writeCluster(res_columns, {name_and_database.first, database_cluster}, - replicated->tryGetAreReplicasActive(database_cluster)); + writeCluster(res_columns, columns_mask, {name_and_database.first, database_cluster}, replicated); if (auto database_cluster = replicated->tryGetAllGroupsCluster()) - writeCluster(res_columns, {DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX + name_and_database.first, database_cluster}, - replicated->tryGetAreReplicasActive(database_cluster)); + writeCluster(res_columns, columns_mask, {DatabaseReplicated::ALL_GROUPS_CLUSTER_PREFIX + name_and_database.first, database_cluster}, replicated); } } } -void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const NameAndCluster & name_and_cluster, - const std::vector & is_active) +void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const std::vector & columns_mask, const NameAndCluster & name_and_cluster, const DatabaseReplicated * replicated) { const String & cluster_name = name_and_cluster.first; const ClusterPtr & cluster = name_and_cluster.second; @@ -79,30 +76,55 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const Nam for (size_t replica_index = 0; replica_index < shard_addresses.size(); ++replica_index) { - size_t i = 0; + size_t src_index = 0, res_index = 0; const auto & address = shard_addresses[replica_index]; - res_columns[i++]->insert(cluster_name); - res_columns[i++]->insert(shard_info.shard_num); - res_columns[i++]->insert(shard_info.weight); - res_columns[i++]->insert(shard_info.has_internal_replication); - res_columns[i++]->insert(replica_index + 1); - res_columns[i++]->insert(address.host_name); - auto resolved = address.getResolvedAddress(); - res_columns[i++]->insert(resolved ? resolved->host().toString() : String()); - res_columns[i++]->insert(address.port); - res_columns[i++]->insert(address.is_local); - res_columns[i++]->insert(address.user); - res_columns[i++]->insert(address.default_database); - res_columns[i++]->insert(pool_status[replica_index].error_count); - res_columns[i++]->insert(pool_status[replica_index].slowdown_count); - res_columns[i++]->insert(pool_status[replica_index].estimated_recovery_time.count()); - res_columns[i++]->insert(address.database_shard_name); - res_columns[i++]->insert(address.database_replica_name); - if (is_active.empty()) - res_columns[i++]->insertDefault(); - else - res_columns[i++]->insert(is_active[replica_idx++]); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(cluster_name); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(shard_info.shard_num); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(shard_info.weight); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(shard_info.has_internal_replication); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(replica_index + 1); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(address.host_name); + if (columns_mask[src_index++]) + { + auto resolved = address.getResolvedAddress(); + res_columns[res_index++]->insert(resolved ? resolved->host().toString() : String()); + } + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(address.port); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(address.is_local); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(address.user); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(address.default_database); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(pool_status[replica_index].error_count); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(pool_status[replica_index].slowdown_count); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(pool_status[replica_index].estimated_recovery_time.count()); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(address.database_shard_name); + if (columns_mask[src_index++]) + res_columns[res_index++]->insert(address.database_replica_name); + if (columns_mask[src_index++]) + { + std::vector is_active; + if (replicated) + is_active = replicated->tryGetAreReplicasActive(name_and_cluster.second); + + if (is_active.empty()) + res_columns[res_index++]->insertDefault(); + else + res_columns[res_index++]->insert(is_active[replica_idx++]); + } } } } diff --git a/src/Storages/System/StorageSystemClusters.h b/src/Storages/System/StorageSystemClusters.h index 0f7c792261d..f6adb902f43 100644 --- a/src/Storages/System/StorageSystemClusters.h +++ b/src/Storages/System/StorageSystemClusters.h @@ -10,6 +10,7 @@ namespace DB class Context; class Cluster; +class DatabaseReplicated; /** Implements system table 'clusters' * that allows to obtain information about available clusters @@ -26,8 +27,9 @@ protected: using IStorageSystemOneBlock::IStorageSystemOneBlock; using NameAndCluster = std::pair>; - void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node *, std::vector) const override; - static void writeCluster(MutableColumns & res_columns, const NameAndCluster & name_and_cluster, const std::vector & is_active); + void fillData(MutableColumns & res_columns, ContextPtr context, const ActionsDAG::Node *, std::vector columns_mask) const override; + static void writeCluster(MutableColumns & res_columns, const std::vector & columns_mask, const NameAndCluster & name_and_cluster, const DatabaseReplicated * replicated); + bool supportsColumnsMask() const override { return true; } }; } From 9d0e066cda8d0ccb6bd4f9e07fee36a2bfae707a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 23:41:10 +0200 Subject: [PATCH 1537/2361] Bump NuRaft (to properly catch thread exceptions) Refs: https://github.com/ClickHouse/NuRaft/pull/75 Refs: https://github.com/eBay/NuRaft/pull/525 Signed-off-by: Azat Khuzhin --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index cb5dc3c906e..c2b0811f164 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0 +Subproject commit c2b0811f164a7948208489562dab4f186eb305ce From 8562a6106c286882f26383086e52e399106893be Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 23:45:18 +0200 Subject: [PATCH 1538/2361] Better safety thresholds in `arrayWithConstant` --- src/Functions/array/arrayWithConstant.cpp | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/Functions/array/arrayWithConstant.cpp b/src/Functions/array/arrayWithConstant.cpp index 48262870553..4cbc6404b9b 100644 --- a/src/Functions/array/arrayWithConstant.cpp +++ b/src/Functions/array/arrayWithConstant.cpp @@ -1,9 +1,9 @@ #include -#include #include #include #include #include +#include namespace DB @@ -15,7 +15,8 @@ namespace ErrorCodes extern const int TOO_LARGE_ARRAY_SIZE; } -/// Reasonable threshold. +/// Reasonable thresholds. +static constexpr Int64 max_array_size_in_columns_bytes = 1000000000; static constexpr size_t max_arrays_size_in_columns = 1000000000; @@ -63,12 +64,19 @@ public: auto array_size = col_num->getInt(i); if (unlikely(array_size < 0)) - throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size cannot be negative: while executing function {}", getName()); + throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size {} cannot be negative: while executing function {}", array_size, getName()); + + Int64 estimated_size = 0; + if (unlikely(common::mulOverflow(array_size, col_value->byteSize(), estimated_size))) + throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size {} with element size {} bytes is too large: while executing function {}", array_size, col_value->byteSize(), getName()); + + if (unlikely(estimated_size > max_array_size_in_columns_bytes)) + throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array size {} with element size {} bytes is too large: while executing function {}", array_size, col_value->byteSize(), getName()); offset += array_size; if (unlikely(offset > max_arrays_size_in_columns)) - throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size while executing function {}", getName()); + throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Too large array size {} (will generate at least {} elements) while executing function {}", array_size, offset, getName()); offsets.push_back(offset); } From 7a066a6505108b14bd49da8766c1bc473a978b1f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 3 Aug 2024 23:48:51 +0200 Subject: [PATCH 1539/2361] Add a test --- .../0_stateless/03216_arrayWithConstant_limits.reference | 1 + tests/queries/0_stateless/03216_arrayWithConstant_limits.sql | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 tests/queries/0_stateless/03216_arrayWithConstant_limits.reference create mode 100644 tests/queries/0_stateless/03216_arrayWithConstant_limits.sql diff --git a/tests/queries/0_stateless/03216_arrayWithConstant_limits.reference b/tests/queries/0_stateless/03216_arrayWithConstant_limits.reference new file mode 100644 index 00000000000..825319e1c5b --- /dev/null +++ b/tests/queries/0_stateless/03216_arrayWithConstant_limits.reference @@ -0,0 +1 @@ +10000000 diff --git a/tests/queries/0_stateless/03216_arrayWithConstant_limits.sql b/tests/queries/0_stateless/03216_arrayWithConstant_limits.sql new file mode 100644 index 00000000000..c46524c50e6 --- /dev/null +++ b/tests/queries/0_stateless/03216_arrayWithConstant_limits.sql @@ -0,0 +1,3 @@ +SELECT arrayWithConstant(96142475, ['qMUF']); -- { serverError TOO_LARGE_ARRAY_SIZE } +SELECT arrayWithConstant(100000000, materialize([[[[[[[[[['Hello, world!']]]]]]]]]])); -- { serverError TOO_LARGE_ARRAY_SIZE } +SELECT length(arrayWithConstant(10000000, materialize([[[[[[[[[['Hello world']]]]]]]]]]))); From 6013e4b81d29f2b46bec64859132a4e2ff1bbdc9 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 17 Jul 2024 20:52:48 +0200 Subject: [PATCH 1540/2361] poco/MongoDB: Support broader UUID types (generated by python uuid.UUID) Signed-off-by: Azat Khuzhin --- base/poco/MongoDB/src/Binary.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/poco/MongoDB/src/Binary.cpp b/base/poco/MongoDB/src/Binary.cpp index ea814d6969f..47f3453630f 100644 --- a/base/poco/MongoDB/src/Binary.cpp +++ b/base/poco/MongoDB/src/Binary.cpp @@ -76,7 +76,7 @@ std::string Binary::toString(int indent) const UUID Binary::uuid() const { - if (_subtype == 0x04 && _buffer.size() == 16) + if ((_subtype == 0x04 || _subtype == 0x03) && _buffer.size() == 16) { UUID uuid; uuid.copyFrom((const char*) _buffer.begin()); From 2f00c962711e13ca00af324366421fe4593b4ce6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 17 Jul 2024 20:34:15 +0200 Subject: [PATCH 1541/2361] Support true UUID type for MongoDB engine Signed-off-by: Azat Khuzhin --- base/poco/Foundation/include/Poco/UUID.h | 7 +++ base/poco/MongoDB/src/Binary.cpp | 2 +- src/Processors/Sources/MongoDBSource.cpp | 46 +++++++++++++++++-- .../integration/test_storage_mongodb/test.py | 33 +++++++++++++ 4 files changed, 82 insertions(+), 6 deletions(-) diff --git a/base/poco/Foundation/include/Poco/UUID.h b/base/poco/Foundation/include/Poco/UUID.h index df67ef73e4b..6466d226b2e 100644 --- a/base/poco/Foundation/include/Poco/UUID.h +++ b/base/poco/Foundation/include/Poco/UUID.h @@ -19,6 +19,7 @@ #include "Poco/Foundation.h" +#include namespace Poco @@ -135,6 +136,12 @@ public: static const UUID & x500(); /// Returns the namespace identifier for the X500 namespace. + UInt32 getTimeLow() const { return _timeLow; } + UInt16 getTimeMid() const { return _timeMid; } + UInt16 getTimeHiAndVersion() const { return _timeHiAndVersion; } + UInt16 getClockSeq() const { return _clockSeq; } + std::array getNode() const { return std::array{_node[0], _node[1], _node[2], _node[3], _node[4], _node[5]}; } + protected: UUID(UInt32 timeLow, UInt32 timeMid, UInt32 timeHiAndVersion, UInt16 clockSeq, UInt8 node[]); UUID(const char * bytes, Version version); diff --git a/base/poco/MongoDB/src/Binary.cpp b/base/poco/MongoDB/src/Binary.cpp index 47f3453630f..8b0e6baeccb 100644 --- a/base/poco/MongoDB/src/Binary.cpp +++ b/base/poco/MongoDB/src/Binary.cpp @@ -82,7 +82,7 @@ UUID Binary::uuid() const uuid.copyFrom((const char*) _buffer.begin()); return uuid; } - throw BadCastException("Invalid subtype"); + throw BadCastException("Invalid subtype: " + std::to_string(_subtype) + ", size: " + std::to_string(_buffer.size())); } diff --git a/src/Processors/Sources/MongoDBSource.cpp b/src/Processors/Sources/MongoDBSource.cpp index 0d583cf6be5..e00a541b300 100644 --- a/src/Processors/Sources/MongoDBSource.cpp +++ b/src/Processors/Sources/MongoDBSource.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include "base/types.h" #include #include @@ -45,8 +47,28 @@ namespace using ValueType = ExternalResultDescription::ValueType; using ObjectId = Poco::MongoDB::ObjectId; using MongoArray = Poco::MongoDB::Array; + using MongoUUID = Poco::MongoDB::Binary::Ptr; + UUID parsePocoUUID(const Poco::UUID & src) + { + UUID uuid; + + std::array src_node = src.getNode(); + UInt64 node = 0; + node |= UInt64(src_node[0]) << 40; + node |= UInt64(src_node[1]) << 32; + node |= UInt64(src_node[2]) << 24; + node |= UInt64(src_node[3]) << 16; + node |= UInt64(src_node[4]) << 8; + node |= src_node[5]; + + UUIDHelpers::getHighBytes(uuid) = UInt64(src.getTimeLow()) << 32 | UInt32(src.getTimeMid() << 16 | src.getTimeHiAndVersion()); + UUIDHelpers::getLowBytes(uuid) = UInt64(src.getClockSeq()) << 48 | node; + + return uuid; + } + template Field getNumber(const Poco::MongoDB::Element & value, const std::string & name) { @@ -149,12 +171,20 @@ namespace else if (which.isUUID()) parser = [](const Poco::MongoDB::Element & value, const std::string & name) -> Field { - if (value.type() != Poco::MongoDB::ElementTraits::TypeId) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String (UUID), got type id = {} for column {}", + if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + String string = static_cast &>(value).value(); + return parse(string); + } + else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + const Poco::UUID & poco_uuid = static_cast &>(value).value()->uuid(); + return parsePocoUUID(poco_uuid); + } + else + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}", toString(value.type()), name); - String string = static_cast &>(value).value(); - return parse(string); }; else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Type conversion to {} is not supported", nested->getName()); @@ -286,8 +316,14 @@ namespace String string = static_cast &>(value).value(); assert_cast(column).getData().push_back(parse(string)); } + else if (value.type() == Poco::MongoDB::ElementTraits::TypeId) + { + const Poco::UUID & poco_uuid = static_cast &>(value).value()->uuid(); + UUID uuid = parsePocoUUID(poco_uuid); + assert_cast(column).getData().push_back(uuid); + } else - throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String (UUID), got type id = {} for column {}", + throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch, expected String/UUID, got type id = {} for column {}", toString(value.type()), name); break; } diff --git a/tests/integration/test_storage_mongodb/test.py b/tests/integration/test_storage_mongodb/test.py index 3957afe8b29..1a1a790e8e8 100644 --- a/tests/integration/test_storage_mongodb/test.py +++ b/tests/integration/test_storage_mongodb/test.py @@ -1,4 +1,5 @@ import pymongo +from uuid import UUID import pytest from helpers.client import QueryRuntimeException @@ -72,6 +73,28 @@ def test_simple_select(started_cluster): simple_mongo_table.drop() +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) +def test_uuid(started_cluster): + mongo_connection = get_mongo_connection(started_cluster) + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + mongo_table = db["uuid_table"] + mongo_table.insert({"key": 0, "data": UUID("f0e77736-91d1-48ce-8f01-15123ca1c7ed")}) + + node = started_cluster.instances["node"] + node.query( + "CREATE TABLE uuid_mongo_table(key UInt64, data UUID) ENGINE = MongoDB('mongo1:27017', 'test', 'uuid_table', 'root', 'clickhouse')" + ) + + assert node.query("SELECT COUNT() FROM uuid_mongo_table") == "1\n" + assert ( + node.query("SELECT data from uuid_mongo_table where key = 0") + == "f0e77736-91d1-48ce-8f01-15123ca1c7ed\n" + ) + node.query("DROP TABLE uuid_mongo_table") + mongo_table.drop() + + @pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_simple_select_from_view(started_cluster): mongo_connection = get_mongo_connection(started_cluster) @@ -140,6 +163,10 @@ def test_arrays(started_cluster): "f0e77736-91d1-48ce-8f01-15123ca1c7ed", "93376a07-c044-4281-a76e-ad27cf6973c5", ], + "arr_mongo_uuid": [ + UUID("f0e77736-91d1-48ce-8f01-15123ca1c7ed"), + UUID("93376a07-c044-4281-a76e-ad27cf6973c5"), + ], "arr_arr_bool": [ [True, False, True], [True], @@ -174,6 +201,7 @@ def test_arrays(started_cluster): "arr_datetime Array(DateTime)," "arr_string Array(String)," "arr_uuid Array(UUID)," + "arr_mongo_uuid Array(UUID)," "arr_arr_bool Array(Array(Bool))," "arr_empty Array(UInt64)," "arr_null Array(UInt64)," @@ -222,6 +250,11 @@ def test_arrays(started_cluster): == "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n" ) + assert ( + node.query(f"SELECT arr_mongo_uuid FROM arrays_mongo_table WHERE key = 42") + == "['f0e77736-91d1-48ce-8f01-15123ca1c7ed','93376a07-c044-4281-a76e-ad27cf6973c5']\n" + ) + assert ( node.query(f"SELECT arr_arr_bool FROM arrays_mongo_table WHERE key = 42") == "[[true,false,true],[true],[],[],[false],[false]]\n" From a051eb2a5d1c29a61d3c2d9e2f3f0841bbb2817c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 00:24:17 +0200 Subject: [PATCH 1542/2361] Fix tests --- src/Databases/DatabaseHDFS.cpp | 2 +- src/Storages/ObjectStorage/HDFS/HDFSCommon.cpp | 2 +- tests/integration/test_storage_hdfs/test.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Databases/DatabaseHDFS.cpp b/src/Databases/DatabaseHDFS.cpp index f58f1b76e71..7fa67a5678e 100644 --- a/src/Databases/DatabaseHDFS.cpp +++ b/src/Databases/DatabaseHDFS.cpp @@ -51,7 +51,7 @@ DatabaseHDFS::DatabaseHDFS(const String & name_, const String & source_url, Cont if (!source.empty()) { if (!re2::RE2::FullMatch(source, std::string(HDFS_HOST_REGEXP))) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs host: {}. " + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad HDFS host: {}. " "It should have structure 'hdfs://:'", source); context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(source)); diff --git a/src/Storages/ObjectStorage/HDFS/HDFSCommon.cpp b/src/Storages/ObjectStorage/HDFS/HDFSCommon.cpp index 365828bcc40..7f8727eea1c 100644 --- a/src/Storages/ObjectStorage/HDFS/HDFSCommon.cpp +++ b/src/Storages/ObjectStorage/HDFS/HDFSCommon.cpp @@ -192,7 +192,7 @@ String getNameNodeCluster(const String &hdfs_url) void checkHDFSURL(const String & url) { if (!re2::RE2::FullMatch(url, std::string(HDFS_URL_REGEXP))) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}. It should have structure 'hdfs://:/'", url); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad HDFS URL: {}. It should have structure 'hdfs://:/'", url); } } diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 47d8f44c0b7..ccd2c7eaf11 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -157,7 +157,7 @@ def test_bad_hdfs_uri(started_cluster): ) except Exception as ex: print(ex) - assert "Bad hdfs url" in str(ex) + assert "Bad HDFS URL" in str(ex) try: node1.query( "create table BadStorage2 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs100500:9000/other_storage', 'TSV')" From 185b6a54da8f24a97f130bdebe7bb1ec2bd266c0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 00:34:58 +0200 Subject: [PATCH 1543/2361] Merge with master --- src/Core/SettingsChangesHistory.cpp | 261 +--------------------------- 1 file changed, 1 insertion(+), 260 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 3f07bfdb933..5b94391bade 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,266 +57,6 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.8", {{"input_format_json_max_depth", 1000000, 1000, "It was unlimited in previous versions, but that was unsafe."}}}, - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, - {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, - {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, - {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, - {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, - {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, - {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, - {"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, - {"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."}, - {"collect_hash_table_stats_during_joins", false, true, "New setting."}, - {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, - {"input_format_orc_reader_time_zone_name", "GMT", "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT."}, - {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, - {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, - {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, - {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, - {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, - {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, - {"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, - {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, - {"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."}, - {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, - {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."} - }}, - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, - {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, - {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, - {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, - {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, - {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, - {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, - {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, - {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, - {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, - {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, - {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, - {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, - {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, - {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, - {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, - {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, - {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, - {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, - {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, - {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, - {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, - {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, - {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, - {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, - {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, - {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, - {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, - {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, - {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"http_max_chunk_size", 0, 0, "Internal limitation"}, - {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, - {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, - {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, - {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, - {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, - }}, - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, - {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, - {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, - {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, - {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, - {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, - {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, - {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, - {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, - {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, - {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, - {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, - {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, - }}, - {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, - {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, - {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, - {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, - {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, - {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, - {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, - {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."}, - {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, - {"log_processors_profiles", false, true, "Enable by default"}, - {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, - {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, - {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, - {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, - {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, - {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, - {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, - {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, - {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, - {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, - {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, - {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, - {"allow_get_client_http_header", false, false, "Introduced a new function."}, - {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, - {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, - {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, - {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, - {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, - {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, - {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, - {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, - {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, - {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, - {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, - {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, - {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, - {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, - {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, - {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, - {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, - {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, - {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, - {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, - {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, - {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, - {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, - {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, - {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, - {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, - {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, {"24.12", { } @@ -335,6 +75,7 @@ static std::initializer_list Date: Sun, 4 Aug 2024 09:02:19 +0200 Subject: [PATCH 1544/2361] Use RabbitMQ without management in tests (attempt to improve startup) Sometimes startup can take ~90 seconds [1]: 2024-08-03 23:11:38.756067+00:00 [info] <0.9.0> Time to start RabbitMQ: 94651980 us Unlike normally ~10 [2]: 2024-08-04 00:33:07.016137+00:00 [info] <0.9.0> Time to start RabbitMQ: 10082489 us [1]: https://s3.amazonaws.com/clickhouse-test-reports/67737/b4e3bbcb82158bea4f5db1d9f5c28cfb741d1d51/integration_tests__asan__old_analyzer__[4_6].html [2]: https://s3.amazonaws.com/clickhouse-test-reports/66671/2f00c962711e13ca00af324366421fe4593b4ce6/integration_tests__asan__old_analyzer__[4_6].html I've tried locally, the difference is very small, 3135665 us (+management) vs (2740747 us), but still something, and who knows how it works under pressure. Signed-off-by: Azat Khuzhin --- tests/integration/compose/docker_compose_rabbitmq.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/compose/docker_compose_rabbitmq.yml b/tests/integration/compose/docker_compose_rabbitmq.yml index 61b21e0e3d9..94c7f0111c4 100644 --- a/tests/integration/compose/docker_compose_rabbitmq.yml +++ b/tests/integration/compose/docker_compose_rabbitmq.yml @@ -2,7 +2,7 @@ version: '2.3' services: rabbitmq1: - image: rabbitmq:3.12.6-management-alpine + image: rabbitmq:3.12.6-alpine hostname: rabbitmq1 expose: - ${RABBITMQ_PORT:-5672} From 264be9c598b42d91ee0a19f718c4d9a4291c7bc4 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 09:06:50 +0200 Subject: [PATCH 1545/2361] tests: increase timeout for RabbitMQ startup Signed-off-by: Azat Khuzhin --- tests/integration/helpers/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 9259c720ff0..a1bdee33d57 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2371,7 +2371,7 @@ class ClickHouseCluster: time.sleep(0.5) raise Exception("Cannot wait PostgreSQL Java Client container") - def wait_rabbitmq_to_start(self, timeout=30): + def wait_rabbitmq_to_start(self, timeout=60): self.print_all_docker_pieces() self.rabbitmq_ip = self.get_instance_ip(self.rabbitmq_host) From dc527b6fd1dfdffb1d177237bffc69cd110cd2a7 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 09:07:05 +0200 Subject: [PATCH 1546/2361] tests: detailed errors for RabbitMQ startup Signed-off-by: Azat Khuzhin --- tests/integration/helpers/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index a1bdee33d57..56d111629c9 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2399,7 +2399,7 @@ class ClickHouseCluster: ) rabbitmq_debuginfo(self.rabbitmq_docker_id, self.rabbitmq_cookie) except Exception as e: - logging.debug("Unable to get logs from docker.") + logging.debug(f"Unable to get logs from docker: {e}.") raise Exception("Cannot wait RabbitMQ container") def wait_nats_is_available(self, max_retries=5): From 27db715761ec66b0dfc915269270697e6cf67909 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 4 Aug 2024 09:52:36 +0000 Subject: [PATCH 1547/2361] Incorporate review feedback --- src/Functions/CountSubstringsImpl.h | 16 +++++++++----- src/Functions/MatchImpl.h | 34 ++++++++++++++--------------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/src/Functions/CountSubstringsImpl.h b/src/Functions/CountSubstringsImpl.h index 8e91bc3aeb4..b1cefae6f1d 100644 --- a/src/Functions/CountSubstringsImpl.h +++ b/src/Functions/CountSubstringsImpl.h @@ -38,10 +38,10 @@ struct CountSubstringsImpl const ColumnPtr & start_pos, PaddedPODArray & res, [[maybe_unused]] ColumnUInt8 * res_null, - size_t /*input_rows_count*/) + size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. - assert(!res_null); + chassert(!res_null); const UInt8 * const begin = haystack_data.data(); const UInt8 * const end = haystack_data.data() + haystack_data.size(); @@ -81,6 +81,8 @@ struct CountSubstringsImpl } pos = begin + haystack_offsets[i]; ++i; + + chassert(i < input_rows_count); } } @@ -116,7 +118,7 @@ struct CountSubstringsImpl [[maybe_unused]] ColumnUInt8 * res_null) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. - assert(!res_null); + chassert(!res_null); Impl::toLowerIfNeed(haystack); Impl::toLowerIfNeed(needle); @@ -154,8 +156,10 @@ struct CountSubstringsImpl [[maybe_unused]] ColumnUInt8 * res_null, size_t input_rows_count) { + chassert(input_rows_count == haystack_offsets.size()); + /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. - assert(!res_null); + chassert(!res_null); ColumnString::Offset prev_haystack_offset = 0; ColumnString::Offset prev_needle_offset = 0; @@ -210,8 +214,10 @@ struct CountSubstringsImpl [[maybe_unused]] ColumnUInt8 * res_null, size_t input_rows_count) { + chassert(input_rows_count == needle_offsets.size()); + /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. - assert(!res_null); + chassert(!res_null); /// NOTE You could use haystack indexing. But this is a rare case. ColumnString::Offset prev_needle_offset = 0; diff --git a/src/Functions/MatchImpl.h b/src/Functions/MatchImpl.h index ceac753fe79..7dc93ba79e0 100644 --- a/src/Functions/MatchImpl.h +++ b/src/Functions/MatchImpl.h @@ -131,11 +131,11 @@ struct MatchImpl size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. - assert(!res_null); + chassert(!res_null); - assert(res.size() == haystack_offsets.size()); - assert(res.size() == input_rows_count); - assert(start_pos_ == nullptr); + chassert(res.size() == haystack_offsets.size()); + chassert(res.size() == input_rows_count); + chassert(start_pos_ == nullptr); if (input_rows_count == 0) return; @@ -295,10 +295,10 @@ struct MatchImpl size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. - assert(!res_null); + chassert(!res_null); - assert(res.size() == haystack.size() / N); - assert(res.size() == input_rows_count); + chassert(res.size() == haystack.size() / N); + chassert(res.size() == input_rows_count); if (input_rows_count == 0) return; @@ -468,12 +468,12 @@ struct MatchImpl size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. - assert(!res_null); + chassert(!res_null); - assert(haystack_offsets.size() == needle_offset.size()); - assert(res.size() == haystack_offsets.size()); - assert(res.size() == input_rows_count); - assert(start_pos_ == nullptr); + chassert(haystack_offsets.size() == needle_offset.size()); + chassert(res.size() == haystack_offsets.size()); + chassert(res.size() == input_rows_count); + chassert(start_pos_ == nullptr); if (input_rows_count == 0) return; @@ -577,12 +577,12 @@ struct MatchImpl size_t input_rows_count) { /// `res_null` serves as an output parameter for implementing an XYZOrNull variant. - assert(!res_null); + chassert(!res_null); - assert(res.size() == input_rows_count); - assert(res.size() == haystack.size() / N); - assert(res.size() == needle_offset.size()); - assert(start_pos_ == nullptr); + chassert(res.size() == input_rows_count); + chassert(res.size() == haystack.size() / N); + chassert(res.size() == needle_offset.size()); + chassert(start_pos_ == nullptr); if (haystack.empty()) return; From f0aaac3bd19c21a796f54ba080fd67f92959131a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 14:14:28 +0200 Subject: [PATCH 1548/2361] tests: remove useless retries from test_ttl_move::test_alter_with_merge_work Signed-off-by: Azat Khuzhin --- tests/integration/test_ttl_move/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index 3b79ea7916d..48a6224347d 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -76,7 +76,7 @@ def get_used_disks_for_table(node, table_name, partition=None): ) -def check_used_disks_with_retry(node, table_name, expected_disks, retries): +def check_used_disks_with_retry(node, table_name, expected_disks, retries=1): for _ in range(retries): used_disks = get_used_disks_for_table(node, table_name) if set(used_disks).issubset(expected_disks): @@ -1635,9 +1635,9 @@ def test_alter_with_merge_work(started_cluster, name, engine, positive): optimize_table(20) if positive: - assert check_used_disks_with_retry(node1, name, set(["external"]), 100) + assert check_used_disks_with_retry(node1, name, set(["external"])) else: - assert check_used_disks_with_retry(node1, name, set(["jbod1", "jbod2"]), 50) + assert check_used_disks_with_retry(node1, name, set(["jbod1", "jbod2"])) time.sleep(5) From 47dbc5e05b12213a08c25ade9536603a3fd2b175 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 14:16:17 +0200 Subject: [PATCH 1549/2361] tests: add debug info into test_ttl_move::test_alter_with_merge_work Signed-off-by: Azat Khuzhin --- tests/integration/test_ttl_move/test.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index 48a6224347d..4ebe9a30699 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -1635,9 +1635,17 @@ def test_alter_with_merge_work(started_cluster, name, engine, positive): optimize_table(20) if positive: - assert check_used_disks_with_retry(node1, name, set(["external"])) + assert check_used_disks_with_retry( + node1, name, set(["external"]) + ), "Parts: " + node1.query( + f"SELECT disk_name, name FROM system.parts WHERE table = '{name}' AND active = 1" + ) else: - assert check_used_disks_with_retry(node1, name, set(["jbod1", "jbod2"])) + assert check_used_disks_with_retry( + node1, name, set(["jbod1", "jbod2"]) + ), "Parts: " + node1.query( + f"SELECT disk_name, name FROM system.parts WHERE table = '{name}' AND active = 1" + ) time.sleep(5) From 062490e1b40a8df8d63fca567b11e7dd26cf52ee Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 14:22:24 +0200 Subject: [PATCH 1550/2361] tests: fix test_ttl_move::test_alter_with_merge_work flakiness MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Increase timeout for TTL DELETE, since otherwise if other routines will take too long, the part will be removed when it should be still be on "external" disk: 2024.08.04 03:48:53.803032 [ 622 ] {} default.mt_test_alter_with_merge_work_1722743323 (9dc6904a-f082-4f06-be7a-efe4733e811c): Will drop empty part all_1_3_4_4 And this is how part_log looks like: SELECT event_time, event_type, rows, part_name, error, database, disk_name FROM system.part_log WHERE `table` = 'mt_test_alter_with_merge_work_1722743323' ORDER BY event_time ASC Query id: a118b3cd-e4fe-45a5-b675-d73bdd887d79 ┌──────────event_time─┬─event_type─┬─rows─┬─part_name───┬─error─┬─database─┬─disk_name─┠1. │ 2024-08-04 03:48:44 │ NewPart │ 2 │ all_1_1_0 │ 0 │ default │ jbod1 │ 2. │ 2024-08-04 03:48:44 │ NewPart │ 2 │ all_2_2_0 │ 0 │ default │ jbod2 │ 3. │ 2024-08-04 03:48:45 │ NewPart │ 2 │ all_3_3_0 │ 0 │ default │ jbod1 │ 4. │ 2024-08-04 03:48:46 │ MutatePart │ 2 │ all_1_1_0_4 │ 0 │ default │ jbod1 │ 5. │ 2024-08-04 03:48:46 │ MutatePart │ 2 │ all_2_2_0_4 │ 0 │ default │ jbod2 │ 6. │ 2024-08-04 03:48:46 │ MutatePart │ 2 │ all_3_3_0_4 │ 0 │ default │ jbod1 │ 7. │ 2024-08-04 03:48:47 │ MovePart │ 2 │ all_1_1_0_4 │ 0 │ default │ external │ 8. │ 2024-08-04 03:48:47 │ MovePart │ 2 │ all_3_3_0_4 │ 0 │ default │ jbod2 │ 9. │ 2024-08-04 03:48:47 │ MergeParts │ 6 │ all_1_3_1_4 │ 0 │ default │ jbod2 │ 10. │ 2024-08-04 03:48:48 │ MovePart │ 6 │ all_1_3_1_4 │ 0 │ default │ external │ 11. │ 2024-08-04 03:48:52 │ MergeParts │ 4 │ all_1_3_2_4 │ 0 │ default │ external │ 12. │ 2024-08-04 03:48:53 │ MergeParts │ 0 │ all_1_3_3_4 │ 0 │ default │ external │ # rows==0 13. │ 2024-08-04 03:48:53 │ MergeParts │ 0 │ all_1_3_4_4 │ 0 │ default │ external │ └─────────────────────┴────────────┴──────┴─────────────┴───────┴──────────┴───────────┘ CI: https://s3.amazonaws.com/clickhouse-test-reports/66671/2f00c962711e13ca00af324366421fe4593b4ce6/integration_tests__tsan__[5_6].html Signed-off-by: Azat Khuzhin --- tests/integration/test_ttl_move/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index 4ebe9a30699..925bdf9baaa 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -1613,7 +1613,7 @@ def test_alter_with_merge_work(started_cluster, name, engine, positive): ALTER TABLE {name} MODIFY TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2', d1 + INTERVAL 5 SECOND TO VOLUME 'external', - d1 + INTERVAL 10 SECOND DELETE + d1 + INTERVAL 30 SECOND DELETE """.format( name=name ) @@ -1647,7 +1647,7 @@ def test_alter_with_merge_work(started_cluster, name, engine, positive): f"SELECT disk_name, name FROM system.parts WHERE table = '{name}' AND active = 1" ) - time.sleep(5) + time.sleep(25) optimize_table(20) From 9f31488e502c2b2c02e3058f9794829aac14f8b9 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 15:43:18 +0200 Subject: [PATCH 1551/2361] Fix dictionary hang in case of CANNOT_SCHEDULE_TASK while loading On CI you can find that 01747_executable_pool_dictionary_implicit_key can hang [1], it is possible due to after CANNOT_SCHEDULE_TASK the async loading will hang: 2024.07.18 03:56:32.365226 [ 6138 ] {6206a18f-668c-4a5c-a5ad-07f577220762} ExternalDictionariesLoader: Will load the object 'executable_pool_simple_implicit_key' in background, force = false, loading_id = 2 2024.07.18 03:56:32.368005 [ 6138 ] {6206a18f-668c-4a5c-a5ad-07f577220762} executeQuery: Code: 439. DB::Exception: Cannot schedule a task: fault injected (threads=766, jobs=746): In scope SELECT dictGet('executable_pool_simple_implicit_key', 'a', toUInt64(1)). (CANNOT_SCHEDULE_TASK) (version 24.7.1.2241) (from [::1]:56446) (comment: 01747_executable_pool_dictionary_implicit_key.sql) (in query: SELECT dictGet('executable_pool_simple_implicit_key', 'a', toUInt64(1));), Stack trace (when copying this message, always include the lines below): 0. /build/contrib/llvm-project/libcxx/include/exception:141: Poco::Exception::Exception(String const&, int) @ 0x0000000015f8a292 1. /build/src/Common/Exception.cpp:110: DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x000000000c3df6b9 2. /build/contrib/llvm-project/libcxx/include/string:1499: DB::Exception::Exception(PreformattedMessage&&, int) @ 0x0000000006de714c 3. /build/contrib/llvm-project/libcxx/include/vector:438: DB::Exception::Exception(int, FormatStringHelperImpl::type, std::type_identity::type, std::type_identity::type>, String const&, unsigned long&&, unsigned long&) @ 0x000000000c4838eb 4. /build/src/Common/ThreadPool.cpp:0: void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda'(String const&)::operator()(String const&) const @ 0x000000000c4832d3 5. /build/src/Common/ThreadPool.cpp:186: void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool) @ 0x000000000c47e7db 6. /build/contrib/llvm-project/libcxx/include/__functional/function.h:818: ? @ 0x000000000c47ec8d 7. /build/contrib/llvm-project/libcxx/include/__functional/function.h:818: ? @ 0x000000001114b16e 8. /build/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h:701: DB::ExternalLoader::LoadingDispatcher::startLoading(DB::ExternalLoader::LoadingDispatcher::Info&, bool, unsigned long) @ 0x0000000011147733 9. /build/src/Interpreters/ExternalLoader.cpp:837: DB::ExternalLoader::LoadingDispatcher::loadImpl(String const&, std::chrono::duration>, bool, std::unique_lock&)::'lambda'()::operator()() const @ 0x0000000011158bf9 10. /build/contrib/llvm-project/libcxx/include/__mutex_base:397: DB::ExternalLoader::LoadingDispatcher::loadImpl(String const&, std::chrono::duration>, bool, std::unique_lock&) @ 0x00000000111588bc 11. /build/src/Interpreters/ExternalLoader.cpp:604: DB::ExternalLoader::LoadResult DB::ExternalLoader::LoadingDispatcher::tryLoad(String const&, std::chrono::duration>) @ 0x00000000111440bf 12. /build/src/Interpreters/ExternalLoader.cpp:1381: std::shared_ptr DB::ExternalLoader::load, void>(String const&) const @ 0x00000000111442f5 13. /build/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h:587: DB::ExternalDictionariesLoader::getDictionary(String const&, std::shared_ptr) const @ 0x0000000011141028 14. /build/src/Functions/FunctionsExternalDictionaries.h:76: DB::FunctionDictHelper::getDictionary(String const&) @ 0x00000000071d28ec ... 2024.07.18 03:58:29.000900 [ 48468 ] {8cf63d7e-dcbf-4af6-bd7c-0e1789ddce3b} executeQuery: (from [::1]:40410) (comment: 01747_executable_pool_dictionary_implicit_key.sql) SELECT dictGet('executable_pool_simple_implicit_key', 'a', toUInt64(1)); (stage: Complete) # and no more rows for 8cf63d7e-dcbf-4af6-bd7c-0e1789ddce3b [1]: https://s3.amazonaws.com/clickhouse-test-reports/66495/bc029ed8207ac75e96e9cb48cb79d27a9ffa4e2f/stress_test__debug_.html The problem that it should be properly cancelled, otherwise it will not be loaded in loadImpl(), but will be waited. Signed-off-by: Azat Khuzhin --- src/Interpreters/ExternalLoader.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index 96405f35f3f..511300be2e0 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -922,7 +922,16 @@ private: if (enable_async_loading) { /// Put a job to the thread pool for the loading. - auto thread = ThreadFromGlobalPool{&LoadingDispatcher::doLoading, this, info.name, loading_id, forced_to_reload, min_id_to_finish_loading_dependencies_, true, CurrentThread::getGroup()}; + ThreadFromGlobalPool thread; + try + { + thread = ThreadFromGlobalPool{&LoadingDispatcher::doLoading, this, info.name, loading_id, forced_to_reload, min_id_to_finish_loading_dependencies_, true, CurrentThread::getGroup()}; + } + catch (...) + { + cancelLoading(info); + throw; + } loading_threads.try_emplace(loading_id, std::move(thread)); } else From dea95e6c332392a87ec3175bb32a50770a57d65b Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 14 Jul 2024 16:06:07 +0200 Subject: [PATCH 1552/2361] Ensure that llvm-symbolizer is used for symbolizing sanitizer reports Since you don't want to fallback to addr2line: # addr2line $ time /bin/test set_flag_impl: Success set_flag_if: Success /usr/bin/addr2line: DWARF error: invalid or unhandled FORM value: 0x23 ================== WARNING: ThreadSanitizer: data race (pid=18) ... real 3m8.580s user 0m21.967s sys 0m40.628s # llvm-symbolizer $ time ./test set_flag_impl: Success set_flag_if: Success ================== WARNING: ThreadSanitizer: data race (pid=24884) real 0m0.028s user 0m0.003s sys 0m0.006s Signed-off-by: Azat Khuzhin --- docker/test/base/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index a81826ed6b5..e7ac62604dc 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -35,7 +35,9 @@ ENV UBSAN_OPTIONS='print_stacktrace=1 max_allocation_size_mb=32768' ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1 max_allocation_size_mb=32768' ENV LSAN_OPTIONS='max_allocation_size_mb=32768' -# for external_symbolizer_path +# for external_symbolizer_path, and also ensure that llvm-symbolizer really +# exists (since you don't want to fallback to addr2line, it is very slow) +RUN test -f /usr/bin/llvm-symbolizer-${LLVM_VERSION} RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8 From 1f2bb6676f39259ec20dd0b077381d251b591d3a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 14 Jul 2024 16:08:09 +0200 Subject: [PATCH 1553/2361] Remove outdated comments about workaround for #64086 Signed-off-by: Azat Khuzhin --- docker/test/util/Dockerfile | 8 -------- 1 file changed, 8 deletions(-) diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index babddbd228c..dc928ba7195 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -5,14 +5,6 @@ FROM ubuntu:22.04 ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list -# FIXME: rebuild for clang 18.1.3, that contains a workaround [1] for -# sanitizers issue [2]: -# -# $ git tag --contains c2a57034eff048cd36c563c8e0051db3a70991b3 | tail -1 -# llvmorg-18.1.3 -# -# [1]: https://github.com/llvm/llvm-project/commit/c2a57034eff048cd36c563c8e0051db3a70991b3 -# [2]: https://github.com/ClickHouse/ClickHouse/issues/64086 ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18 RUN apt-get update \ From eed2edd7db5f2424d85a66bfee63281cba5a5c94 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 2 Jul 2024 12:23:10 +0200 Subject: [PATCH 1554/2361] Comment certificateFile/privateKeyFile/dhParamsFile in keeper config Otherwise you will get annoying messages at startup: 2024.07.02 10:03:38.331593 [ 1 ] {} CertificateReloader: Cannot obtain modification time for certificate file /etc/clickhouse-keeper/server.crt, skipping update. errno: 2, strerror: 0 2024.07.02 10:03:38.331658 [ 1 ] {} CertificateReloader: Cannot obtain modification time for key file /etc/clickhouse-keeper/server.key, skipping update. errno: 2, strerror: 0 2024.07.02 10:03:38.341085 [ 1 ] {} CertificateReloader: Poco::Exception. Code: 1000, e.code() = 0, SSL context exception: Error loading private key from file /etc/clickhouse-keeper/server.key: error:80000002:system library::No such file or directory Signed-off-by: Azat Khuzhin --- programs/keeper/keeper_config.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/programs/keeper/keeper_config.xml b/programs/keeper/keeper_config.xml index 4cf84cffc86..efd0010d184 100644 --- a/programs/keeper/keeper_config.xml +++ b/programs/keeper/keeper_config.xml @@ -66,14 +66,14 @@ - /etc/clickhouse-keeper/server.crt - /etc/clickhouse-keeper/server.key + + - /etc/clickhouse-keeper/dhparam.pem + none true true From a75c4b9f9fe519cb74a47d88efb55216da22a64b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 18:07:40 +0200 Subject: [PATCH 1555/2361] Introduce `no-flaky-check` tag --- tests/clickhouse-test | 3 +++ .../00002_log_and_exception_messages_formatting.sql | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 893fded8d23..b70dd61a25a 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -1232,6 +1232,9 @@ class TestCase: ): return FailureReason.SKIP + elif "no-flaky-check" in tags and (args.test_runs > 1): + return FailureReason.SKIP + elif tags: for build_flag in args.build_flags: if "no-" + build_flag in tags: diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 07c42d6d039..32db77cd8dd 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-fasttest, no-ubsan, no-batch +-- Tags: no-parallel, no-fasttest, no-ubsan, no-batch, no-flaky-check -- no-parallel because we want to run this test when most of the other tests already passed -- If this test fails, see the "Top patterns of log messages" diagnostics in the end of run.log From 87e0cf6b7635b7249d72c6ac0e7aad75c863d01d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 18:25:36 +0200 Subject: [PATCH 1556/2361] Fix stack overflow in JSONMergePatch --- src/Common/JSONParsers/RapidJSONParser.h | 12 ++++++--- src/Functions/jsonMergePatch.cpp | 34 +++++++++++++++--------- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/src/Common/JSONParsers/RapidJSONParser.h b/src/Common/JSONParsers/RapidJSONParser.h index 6c5ea938bfe..ad7a4cbf53a 100644 --- a/src/Common/JSONParsers/RapidJSONParser.h +++ b/src/Common/JSONParsers/RapidJSONParser.h @@ -3,10 +3,14 @@ #include "config.h" #if USE_RAPIDJSON -# include -# include -# include -# include "ElementTypes.h" + +/// Prevent stack overflow: +#define RAPIDJSON_PARSE_DEFAULT_FLAGS (kParseIterativeFlag) + +#include +#include +#include +#include "ElementTypes.h" namespace DB { diff --git a/src/Functions/jsonMergePatch.cpp b/src/Functions/jsonMergePatch.cpp index a83daacdbf6..3bde415aabf 100644 --- a/src/Functions/jsonMergePatch.cpp +++ b/src/Functions/jsonMergePatch.cpp @@ -10,12 +10,14 @@ #if USE_RAPIDJSON -#include "rapidjson/document.h" -#include "rapidjson/writer.h" -#include "rapidjson/stringbuffer.h" -#include "rapidjson/filewritestream.h" -#include "rapidjson/prettywriter.h" -#include "rapidjson/filereadstream.h" +/// Prevent stack overflow: +#define RAPIDJSON_PARSE_DEFAULT_FLAGS (kParseIterativeFlag) + +#include +#include +#include +#include +#include namespace DB @@ -31,17 +33,17 @@ namespace ErrorCodes namespace { - // select jsonMergePatch('{"a":1}','{"name": "joey"}','{"name": "tom"}','{"name": "zoey"}'); + // select JSONMergePatch('{"a":1}','{"name": "joey"}','{"name": "tom"}','{"name": "zoey"}'); // || // \/ // ┌───────────────────────┠// │ {"a":1,"name":"zoey"} │ // └───────────────────────┘ - class FunctionjsonMergePatch : public IFunction + class FunctionJSONMergePatch : public IFunction { public: - static constexpr auto name = "jsonMergePatch"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static constexpr auto name = "JSONMergePatch"; + static FunctionPtr create(ContextPtr) { return std::make_shared(); } String getName() const override { return name; } bool isVariadic() const override { return true; } @@ -98,7 +100,11 @@ namespace const char * json = str_ref.data; document.Parse(json); - if (document.HasParseError() || !document.IsObject()) + + if (document.HasParseError()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong JSON string to merge: {}", rapidjson::GetParseError_En(document.GetParseError())); + + if (!document.IsObject()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong JSON string to merge. Expected JSON object"); }; @@ -162,10 +168,12 @@ namespace } -REGISTER_FUNCTION(jsonMergePatch) +REGISTER_FUNCTION(JSONMergePatch) { - factory.registerFunction(FunctionDocumentation{ + factory.registerFunction(FunctionDocumentation{ .description="Returns the merged JSON object string, which is formed by merging multiple JSON objects."}); + + factory.registerAlias("jsonMergePatch", "JSONMergePatch"); } } From 37da0b3c307d249a078b94cb7bc53d8acdd22a98 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 18:27:37 +0200 Subject: [PATCH 1557/2361] Add a test --- .../03217_json_merge_patch_stack_overflow.reference | 0 .../03217_json_merge_patch_stack_overflow.sql | 9 +++++++++ 2 files changed, 9 insertions(+) create mode 100644 tests/queries/0_stateless/03217_json_merge_patch_stack_overflow.reference create mode 100644 tests/queries/0_stateless/03217_json_merge_patch_stack_overflow.sql diff --git a/tests/queries/0_stateless/03217_json_merge_patch_stack_overflow.reference b/tests/queries/0_stateless/03217_json_merge_patch_stack_overflow.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03217_json_merge_patch_stack_overflow.sql b/tests/queries/0_stateless/03217_json_merge_patch_stack_overflow.sql new file mode 100644 index 00000000000..4b366b08c6b --- /dev/null +++ b/tests/queries/0_stateless/03217_json_merge_patch_stack_overflow.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest +-- Needs rapidjson library +SELECT JSONMergePatch(REPEAT('{"c":', 1000000)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 100000)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 10000)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 1000)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 100)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 10)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 1)); -- { serverError BAD_ARGUMENTS } From a433115434ebe4c1d69f2ed200005fa93c7adcb7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 19:18:00 +0200 Subject: [PATCH 1558/2361] Fix typo --- docker/test/util/process_functional_tests_result.py | 6 +++--- docs/changelogs/v20.5.1.3833-prestable.md | 2 +- docs/changelogs/v21.11.1.8636-prestable.md | 6 +++--- docs/changelogs/v21.12.1.9017-prestable.md | 10 +++++----- docs/changelogs/v21.5.1.6601-prestable.md | 2 +- docs/changelogs/v21.6.9.7-stable.md | 2 +- docs/changelogs/v21.7.9.7-stable.md | 2 +- docs/changelogs/v21.8.1.7409-prestable.md | 2 +- docs/changelogs/v21.8.5.7-lts.md | 2 +- docs/changelogs/v21.9.1.8000-prestable.md | 2 +- docs/changelogs/v22.1.1.2542-prestable.md | 4 ++-- docs/changelogs/v22.4.1.2305-prestable.md | 2 +- docs/changelogs/v23.4.1.1943-stable.md | 3 +-- docs/changelogs/v23.6.1.1524-stable.md | 2 +- docs/zh/changelog/index.md | 2 +- src/Common/SystemLogBase.cpp | 2 +- 16 files changed, 25 insertions(+), 26 deletions(-) diff --git a/docker/test/util/process_functional_tests_result.py b/docker/test/util/process_functional_tests_result.py index 3da1a8f3674..aa2ea686c46 100755 --- a/docker/test/util/process_functional_tests_result.py +++ b/docker/test/util/process_functional_tests_result.py @@ -161,11 +161,11 @@ def process_result(result_path, broken_tests): retries, test_results, ) = process_test_log(result_path, broken_tests) - is_flacky_check = 1 < int(os.environ.get("NUM_TRIES", 1)) - logging.info("Is flaky check: %s", is_flacky_check) + is_flaky_check = 1 < int(os.environ.get("NUM_TRIES", 1)) + logging.info("Is flaky check: %s", is_flaky_check) # If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately) # But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped. - if failed != 0 or unknown != 0 or (success == 0 and (not is_flacky_check)): + if failed != 0 or unknown != 0 or (success == 0 and (not is_flaky_check)): state = "failure" if hung: diff --git a/docs/changelogs/v20.5.1.3833-prestable.md b/docs/changelogs/v20.5.1.3833-prestable.md index 79d61fb12f9..3de263f5e37 100644 --- a/docs/changelogs/v20.5.1.3833-prestable.md +++ b/docs/changelogs/v20.5.1.3833-prestable.md @@ -331,7 +331,7 @@ * Fix several non significant errors in unit tests. [#11262](https://github.com/ClickHouse/ClickHouse/pull/11262) ([alesapin](https://github.com/alesapin)). * Add a test for Join table engine from @donmikel. This closes [#9158](https://github.com/ClickHouse/ClickHouse/issues/9158). [#11265](https://github.com/ClickHouse/ClickHouse/pull/11265) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Repeat test in CI if `curl` invocation was timed out. It is possible due to system hangups for 10+ seconds that are typical in our CI infrastructure. This fixes [#11267](https://github.com/ClickHouse/ClickHouse/issues/11267). [#11268](https://github.com/ClickHouse/ClickHouse/pull/11268) ([Alexey Milovidov](https://github.com/alexey-milovidov)). -* Fix potentially flacky test `00731_long_merge_tree_select_opened_files.sh`. It does not fail frequently but we have discovered potential race condition in this test while experimenting with ThreadFuzzer: [#9814](https://github.com/ClickHouse/ClickHouse/issues/9814) See [link](https://clickhouse-test-reports.s3.yandex.net/9814/40e3023e215df22985d275bf85f4d2290897b76b/functional_stateless_tests_(unbundled).html#fail1) for the example. [#11270](https://github.com/ClickHouse/ClickHouse/pull/11270) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix potentially flaky test `00731_long_merge_tree_select_opened_files.sh`. It does not fail frequently but we have discovered potential race condition in this test while experimenting with ThreadFuzzer: [#9814](https://github.com/ClickHouse/ClickHouse/issues/9814) See [link](https://clickhouse-test-reports.s3.yandex.net/9814/40e3023e215df22985d275bf85f4d2290897b76b/functional_stateless_tests_(unbundled).html#fail1) for the example. [#11270](https://github.com/ClickHouse/ClickHouse/pull/11270) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Now clickhouse-test check the server aliveness before tests run. [#11285](https://github.com/ClickHouse/ClickHouse/pull/11285) ([alesapin](https://github.com/alesapin)). * Emit a warning if server was build in debug or with sanitizers. [#11304](https://github.com/ClickHouse/ClickHouse/pull/11304) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Better check for hung queries in clickhouse-test. [#11321](https://github.com/ClickHouse/ClickHouse/pull/11321) ([Alexey Milovidov](https://github.com/alexey-milovidov)). diff --git a/docs/changelogs/v21.11.1.8636-prestable.md b/docs/changelogs/v21.11.1.8636-prestable.md index d6a435dd3ce..95c8580d591 100644 --- a/docs/changelogs/v21.11.1.8636-prestable.md +++ b/docs/changelogs/v21.11.1.8636-prestable.md @@ -280,7 +280,7 @@ sidebar_label: 2022 * Cleanup unbundled image [#29689](https://github.com/ClickHouse/ClickHouse/pull/29689) ([Azat Khuzhin](https://github.com/azat)). * Fix memory tracking for merges and mutations [#29691](https://github.com/ClickHouse/ClickHouse/pull/29691) ([Azat Khuzhin](https://github.com/azat)). * Fix data-race in WriteIndirectBuffer (used in DiskMemory) [#29692](https://github.com/ClickHouse/ClickHouse/pull/29692) ([Azat Khuzhin](https://github.com/azat)). -* Fix flacky test [#29706](https://github.com/ClickHouse/ClickHouse/pull/29706) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test [#29706](https://github.com/ClickHouse/ClickHouse/pull/29706) ([Kseniia Sumarokova](https://github.com/kssenii)). * BorrowedObjectPool condition variable notify fix [#29722](https://github.com/ClickHouse/ClickHouse/pull/29722) ([Maksim Kita](https://github.com/kitaisreal)). * Better exception message for local interactive [#29737](https://github.com/ClickHouse/ClickHouse/pull/29737) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix --stage for clickhouse-local [#29745](https://github.com/ClickHouse/ClickHouse/pull/29745) ([Azat Khuzhin](https://github.com/azat)). @@ -308,7 +308,7 @@ sidebar_label: 2022 * Fix client [#29864](https://github.com/ClickHouse/ClickHouse/pull/29864) ([Kseniia Sumarokova](https://github.com/kssenii)). * Remove some more streams. [#29898](https://github.com/ClickHouse/ClickHouse/pull/29898) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Add logging in ZooKeeper client [#29901](https://github.com/ClickHouse/ClickHouse/pull/29901) ([Alexander Tokmakov](https://github.com/tavplubix)). -* Fix some flacky tests [#29902](https://github.com/ClickHouse/ClickHouse/pull/29902) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix some flaky tests [#29902](https://github.com/ClickHouse/ClickHouse/pull/29902) ([Kseniia Sumarokova](https://github.com/kssenii)). * Grep server log even if it contains binary data [#29903](https://github.com/ClickHouse/ClickHouse/pull/29903) ([Alexander Tokmakov](https://github.com/tavplubix)). * Cosmetic refactoring of server constants. [#29913](https://github.com/ClickHouse/ClickHouse/pull/29913) ([Amos Bird](https://github.com/amosbird)). * Format improvement of AlterQuery [#29916](https://github.com/ClickHouse/ClickHouse/pull/29916) ([flynn](https://github.com/ucasfl)). @@ -465,7 +465,7 @@ sidebar_label: 2022 * Fix docs release [#30933](https://github.com/ClickHouse/ClickHouse/pull/30933) ([alesapin](https://github.com/alesapin)). * Fix style check [#30937](https://github.com/ClickHouse/ClickHouse/pull/30937) ([alesapin](https://github.com/alesapin)). * Fix file progress for clickhouse-local [#30938](https://github.com/ClickHouse/ClickHouse/pull/30938) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix flacky test [#30940](https://github.com/ClickHouse/ClickHouse/pull/30940) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test [#30940](https://github.com/ClickHouse/ClickHouse/pull/30940) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix reading from TinyLog [#30941](https://github.com/ClickHouse/ClickHouse/pull/30941) ([Vitaly Baranov](https://github.com/vitlibar)). * Add github to known hosts in docs release [#30947](https://github.com/ClickHouse/ClickHouse/pull/30947) ([alesapin](https://github.com/alesapin)). * Parse json from response in ci checks [#30948](https://github.com/ClickHouse/ClickHouse/pull/30948) ([alesapin](https://github.com/alesapin)). diff --git a/docs/changelogs/v21.12.1.9017-prestable.md b/docs/changelogs/v21.12.1.9017-prestable.md index bd84873e67a..f5416664d35 100644 --- a/docs/changelogs/v21.12.1.9017-prestable.md +++ b/docs/changelogs/v21.12.1.9017-prestable.md @@ -220,7 +220,7 @@ sidebar_label: 2022 * Fix test_backward_compatibility [#30950](https://github.com/ClickHouse/ClickHouse/pull/30950) ([Ilya Yatsishin](https://github.com/qoega)). * Add stress test to github actions [#30952](https://github.com/ClickHouse/ClickHouse/pull/30952) ([alesapin](https://github.com/alesapin)). * Try smaller blacklist of non parallel integration tests [#30963](https://github.com/ClickHouse/ClickHouse/pull/30963) ([Ilya Yatsishin](https://github.com/qoega)). -* Fix flacky test [#30967](https://github.com/ClickHouse/ClickHouse/pull/30967) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test [#30967](https://github.com/ClickHouse/ClickHouse/pull/30967) ([Kseniia Sumarokova](https://github.com/kssenii)). * Move access-rights source code [#30973](https://github.com/ClickHouse/ClickHouse/pull/30973) ([Vitaly Baranov](https://github.com/vitlibar)). * Set output_format_avro_rows_in_file default to 1 [#30990](https://github.com/ClickHouse/ClickHouse/pull/30990) ([Kruglov Pavel](https://github.com/Avogar)). * Remove remaining usages of Y_IGNORE [#30993](https://github.com/ClickHouse/ClickHouse/pull/30993) ([Yuriy Chernyshov](https://github.com/georgthegreat)). @@ -353,7 +353,7 @@ sidebar_label: 2022 * Support toUInt8/toInt8 for if constant condition optimization. [#31866](https://github.com/ClickHouse/ClickHouse/pull/31866) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Added -no-sanitize=unsigned-integer-overflow build flag [#31881](https://github.com/ClickHouse/ClickHouse/pull/31881) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). * Fix typos [#31886](https://github.com/ClickHouse/ClickHouse/pull/31886) ([Anton Popov](https://github.com/CurtizJ)). -* Try to fix flacky test. [#31889](https://github.com/ClickHouse/ClickHouse/pull/31889) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Try to fix flaky test. [#31889](https://github.com/ClickHouse/ClickHouse/pull/31889) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Reduce the files that depend on parser headers [#31896](https://github.com/ClickHouse/ClickHouse/pull/31896) ([Raúl Marín](https://github.com/Algunenano)). * Fix magic_enum for debug helpers (fixes build w/ USE_DEBUG_HELPERS) [#31922](https://github.com/ClickHouse/ClickHouse/pull/31922) ([Azat Khuzhin](https://github.com/azat)). * Remove some trash from build [#31923](https://github.com/ClickHouse/ClickHouse/pull/31923) ([Alexey Milovidov](https://github.com/alexey-milovidov)). @@ -387,7 +387,7 @@ sidebar_label: 2022 * make looping in H3 funcs uniform [#32110](https://github.com/ClickHouse/ClickHouse/pull/32110) ([Bharat Nallan](https://github.com/bharatnc)). * Remove PVS check from master [#32114](https://github.com/ClickHouse/ClickHouse/pull/32114) ([alesapin](https://github.com/alesapin)). * Fix flaky keeper whitelist test [#32115](https://github.com/ClickHouse/ClickHouse/pull/32115) ([alesapin](https://github.com/alesapin)). -* Fix flacky test test_executable_storage_input [#32118](https://github.com/ClickHouse/ClickHouse/pull/32118) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix flaky test test_executable_storage_input [#32118](https://github.com/ClickHouse/ClickHouse/pull/32118) ([Maksim Kita](https://github.com/kitaisreal)). * Fix data race in `removePartAndEnqueueFetch(...)` [#32119](https://github.com/ClickHouse/ClickHouse/pull/32119) ([Alexander Tokmakov](https://github.com/tavplubix)). * Move fuzzers and unit tests to another group [#32120](https://github.com/ClickHouse/ClickHouse/pull/32120) ([alesapin](https://github.com/alesapin)). * Add a test with 20000 mutations in one query [#32122](https://github.com/ClickHouse/ClickHouse/pull/32122) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). @@ -411,11 +411,11 @@ sidebar_label: 2022 * Add test for [#32186](https://github.com/ClickHouse/ClickHouse/issues/32186) [#32203](https://github.com/ClickHouse/ClickHouse/pull/32203) ([Raúl Marín](https://github.com/Algunenano)). * Fix uncaught exception in DatabaseLazy [#32206](https://github.com/ClickHouse/ClickHouse/pull/32206) ([Alexander Tokmakov](https://github.com/tavplubix)). * Update ASTCreateQuery.cpp [#32208](https://github.com/ClickHouse/ClickHouse/pull/32208) ([Kseniia Sumarokova](https://github.com/kssenii)). -* Fix flacky fileLog test (probably) [#32209](https://github.com/ClickHouse/ClickHouse/pull/32209) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky fileLog test (probably) [#32209](https://github.com/ClickHouse/ClickHouse/pull/32209) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix jemalloc under osx [#32219](https://github.com/ClickHouse/ClickHouse/pull/32219) ([Azat Khuzhin](https://github.com/azat)). * Add missing timezones to some tests [#32222](https://github.com/ClickHouse/ClickHouse/pull/32222) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix versioning of aggregate functions (fixes performance tests) [#32236](https://github.com/ClickHouse/ClickHouse/pull/32236) ([Azat Khuzhin](https://github.com/azat)). -* Disable window view tests temporarily because still flacky [#32257](https://github.com/ClickHouse/ClickHouse/pull/32257) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Disable window view tests temporarily because still flaky [#32257](https://github.com/ClickHouse/ClickHouse/pull/32257) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix typo in tupleToNameValuePairs doc [#32262](https://github.com/ClickHouse/ClickHouse/pull/32262) ([Vladimir C](https://github.com/vdimir)). * Fix possible Pipeline stuck in case of StrictResize processor. [#32270](https://github.com/ClickHouse/ClickHouse/pull/32270) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix possible crash in DataTypeAggregateFunction [#32287](https://github.com/ClickHouse/ClickHouse/pull/32287) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). diff --git a/docs/changelogs/v21.5.1.6601-prestable.md b/docs/changelogs/v21.5.1.6601-prestable.md index b7dd8ae87c0..69ea9cb8d0e 100644 --- a/docs/changelogs/v21.5.1.6601-prestable.md +++ b/docs/changelogs/v21.5.1.6601-prestable.md @@ -158,7 +158,7 @@ sidebar_label: 2022 * MemoryStorage sync comments and code [#22721](https://github.com/ClickHouse/ClickHouse/pull/22721) ([Maksim Kita](https://github.com/kitaisreal)). * Fix potential segfault on Keeper startup [#22743](https://github.com/ClickHouse/ClickHouse/pull/22743) ([alesapin](https://github.com/alesapin)). * Avoid using harmful function rand() [#22744](https://github.com/ClickHouse/ClickHouse/pull/22744) ([Amos Bird](https://github.com/amosbird)). -* Fix flacky hedged tests [#22746](https://github.com/ClickHouse/ClickHouse/pull/22746) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix flaky hedged tests [#22746](https://github.com/ClickHouse/ClickHouse/pull/22746) ([Kruglov Pavel](https://github.com/Avogar)). * add more messages when flushing the logs [#22761](https://github.com/ClickHouse/ClickHouse/pull/22761) ([Alexander Kuzmenkov](https://github.com/akuzm)). * Moved BorrowedObjectPool to common [#22764](https://github.com/ClickHouse/ClickHouse/pull/22764) ([Maksim Kita](https://github.com/kitaisreal)). * Functions ExternalDictionaries standardize exception throw [#22821](https://github.com/ClickHouse/ClickHouse/pull/22821) ([Maksim Kita](https://github.com/kitaisreal)). diff --git a/docs/changelogs/v21.6.9.7-stable.md b/docs/changelogs/v21.6.9.7-stable.md index 0a989e4d6b7..533c58badac 100644 --- a/docs/changelogs/v21.6.9.7-stable.md +++ b/docs/changelogs/v21.6.9.7-stable.md @@ -55,7 +55,7 @@ sidebar_label: 2022 * Try fix rabbitmq tests [#26826](https://github.com/ClickHouse/ClickHouse/pull/26826) ([Kseniia Sumarokova](https://github.com/kssenii)). * One more library bridge fix [#26873](https://github.com/ClickHouse/ClickHouse/pull/26873) ([Kseniia Sumarokova](https://github.com/kssenii)). * Update PVS checksum [#27317](https://github.com/ClickHouse/ClickHouse/pull/27317) ([Alexander Tokmakov](https://github.com/tavplubix)). -* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)). * Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)). diff --git a/docs/changelogs/v21.7.9.7-stable.md b/docs/changelogs/v21.7.9.7-stable.md index 7aaab54af6b..684d0e8995e 100644 --- a/docs/changelogs/v21.7.9.7-stable.md +++ b/docs/changelogs/v21.7.9.7-stable.md @@ -35,7 +35,7 @@ sidebar_label: 2022 #### NOT FOR CHANGELOG / INSIGNIFICANT * Fix prometheus metric name [#26140](https://github.com/ClickHouse/ClickHouse/pull/26140) ([Vladimir C](https://github.com/vdimir)). -* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)). * Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)). diff --git a/docs/changelogs/v21.8.1.7409-prestable.md b/docs/changelogs/v21.8.1.7409-prestable.md index cb6ab82b30f..6ef2f1b50d2 100644 --- a/docs/changelogs/v21.8.1.7409-prestable.md +++ b/docs/changelogs/v21.8.1.7409-prestable.md @@ -101,7 +101,7 @@ sidebar_label: 2022 * Separate log files for separate runs in stress test [#25741](https://github.com/ClickHouse/ClickHouse/pull/25741) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Fix slow performance test [#25742](https://github.com/ClickHouse/ClickHouse/pull/25742) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * DatabaseAtomic EXCHANGE DICTIONARIES fix test [#25753](https://github.com/ClickHouse/ClickHouse/pull/25753) ([Maksim Kita](https://github.com/kitaisreal)). -* Try fix flacky rabbitmq test [#25756](https://github.com/ClickHouse/ClickHouse/pull/25756) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Try fix flaky rabbitmq test [#25756](https://github.com/ClickHouse/ClickHouse/pull/25756) ([Kseniia Sumarokova](https://github.com/kssenii)). * Add a test for [#13993](https://github.com/ClickHouse/ClickHouse/issues/13993) [#25758](https://github.com/ClickHouse/ClickHouse/pull/25758) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Set follow-fork-mode child for gdb in stress/fasttest/fuzzer [#25769](https://github.com/ClickHouse/ClickHouse/pull/25769) ([Azat Khuzhin](https://github.com/azat)). * Ignore TOO_DEEP_RECURSION server exception during fuzzing [#25770](https://github.com/ClickHouse/ClickHouse/pull/25770) ([Azat Khuzhin](https://github.com/azat)). diff --git a/docs/changelogs/v21.8.5.7-lts.md b/docs/changelogs/v21.8.5.7-lts.md index fa459e093f7..4d0727e362c 100644 --- a/docs/changelogs/v21.8.5.7-lts.md +++ b/docs/changelogs/v21.8.5.7-lts.md @@ -40,7 +40,7 @@ sidebar_label: 2022 * Fix several bugs in ZooKeeper snapshots deserialization [#26127](https://github.com/ClickHouse/ClickHouse/pull/26127) ([alesapin](https://github.com/alesapin)). * Fix prometheus metric name [#26140](https://github.com/ClickHouse/ClickHouse/pull/26140) ([Vladimir C](https://github.com/vdimir)). -* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix throw without exception in MySQL source. [#28027](https://github.com/ClickHouse/ClickHouse/pull/28027) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Fix race between REPLACE PARTITION and MOVE PARTITION [#28035](https://github.com/ClickHouse/ClickHouse/pull/28035) ([Alexander Tokmakov](https://github.com/tavplubix)). * Follow-up to [#28016](https://github.com/ClickHouse/ClickHouse/issues/28016) [#28036](https://github.com/ClickHouse/ClickHouse/pull/28036) ([Alexander Tokmakov](https://github.com/tavplubix)). diff --git a/docs/changelogs/v21.9.1.8000-prestable.md b/docs/changelogs/v21.9.1.8000-prestable.md index bc921a68693..70ae3697e92 100644 --- a/docs/changelogs/v21.9.1.8000-prestable.md +++ b/docs/changelogs/v21.9.1.8000-prestable.md @@ -346,7 +346,7 @@ sidebar_label: 2022 * Update PVS checksum [#27317](https://github.com/ClickHouse/ClickHouse/pull/27317) ([Alexander Tokmakov](https://github.com/tavplubix)). * Fix 01300_client_save_history_when_terminated_long [#27324](https://github.com/ClickHouse/ClickHouse/pull/27324) ([Raúl Marín](https://github.com/Algunenano)). * Try update contrib/zlib-ng [#27327](https://github.com/ClickHouse/ClickHouse/pull/27327) ([Ilya Yatsishin](https://github.com/qoega)). -* Fix flacky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test [#27383](https://github.com/ClickHouse/ClickHouse/pull/27383) ([Kseniia Sumarokova](https://github.com/kssenii)). * Add and check system.mutations for database filter [#27384](https://github.com/ClickHouse/ClickHouse/pull/27384) ([Azat Khuzhin](https://github.com/azat)). * Correct the key data type used in mapContains [#27423](https://github.com/ClickHouse/ClickHouse/pull/27423) ([Fuwang Hu](https://github.com/fuwhu)). * Fix tests for WithMergeableStateAfterAggregationAndLimit [#27424](https://github.com/ClickHouse/ClickHouse/pull/27424) ([Azat Khuzhin](https://github.com/azat)). diff --git a/docs/changelogs/v22.1.1.2542-prestable.md b/docs/changelogs/v22.1.1.2542-prestable.md index cacd13c1e12..3b0422abb11 100644 --- a/docs/changelogs/v22.1.1.2542-prestable.md +++ b/docs/changelogs/v22.1.1.2542-prestable.md @@ -398,7 +398,7 @@ sidebar_label: 2022 * test for [#24410](https://github.com/ClickHouse/ClickHouse/issues/24410) [#33265](https://github.com/ClickHouse/ClickHouse/pull/33265) ([Denny Crane](https://github.com/den-crane)). * Wait for RabbitMQ container to actually start when it was restarted in test on purpose [#33266](https://github.com/ClickHouse/ClickHouse/pull/33266) ([Kseniia Sumarokova](https://github.com/kssenii)). * Mark max_alter_threads as obsolete [#33268](https://github.com/ClickHouse/ClickHouse/pull/33268) ([Denny Crane](https://github.com/den-crane)). -* Fix azure tests flackyness because of azure server closing connection [#33269](https://github.com/ClickHouse/ClickHouse/pull/33269) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix azure tests flakyness because of azure server closing connection [#33269](https://github.com/ClickHouse/ClickHouse/pull/33269) ([Kseniia Sumarokova](https://github.com/kssenii)). * Test for [#26920](https://github.com/ClickHouse/ClickHouse/issues/26920) [#33272](https://github.com/ClickHouse/ClickHouse/pull/33272) ([Denny Crane](https://github.com/den-crane)). * Fix test_storage_kafka failures by adjusting retention.ms [#33278](https://github.com/ClickHouse/ClickHouse/pull/33278) ([Azat Khuzhin](https://github.com/azat)). * Disable FunctionConvertFromString::canBeExecutedOnDefaultArguments [#33286](https://github.com/ClickHouse/ClickHouse/pull/33286) ([Vladimir C](https://github.com/vdimir)). @@ -447,7 +447,7 @@ sidebar_label: 2022 * Update mongodb.md [#33585](https://github.com/ClickHouse/ClickHouse/pull/33585) ([Kseniia Sumarokova](https://github.com/kssenii)). * Restore existing static builds links [#33597](https://github.com/ClickHouse/ClickHouse/pull/33597) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * Fix pylint for run_check.py [#33600](https://github.com/ClickHouse/ClickHouse/pull/33600) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). -* Fix flacky test_dictionaries_postgresql/ [#33601](https://github.com/ClickHouse/ClickHouse/pull/33601) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test_dictionaries_postgresql/ [#33601](https://github.com/ClickHouse/ClickHouse/pull/33601) ([Kseniia Sumarokova](https://github.com/kssenii)). * Make ZooKeeper client better interpret keeper server connection reject [#33602](https://github.com/ClickHouse/ClickHouse/pull/33602) ([alesapin](https://github.com/alesapin)). * Fix broken workflow dependencies [#33608](https://github.com/ClickHouse/ClickHouse/pull/33608) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * Force rebuild images in CI [#33609](https://github.com/ClickHouse/ClickHouse/pull/33609) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). diff --git a/docs/changelogs/v22.4.1.2305-prestable.md b/docs/changelogs/v22.4.1.2305-prestable.md index b277137ca7e..e8304e6505f 100644 --- a/docs/changelogs/v22.4.1.2305-prestable.md +++ b/docs/changelogs/v22.4.1.2305-prestable.md @@ -410,7 +410,7 @@ sidebar_label: 2022 * Fix mongodb test with new cert [#36161](https://github.com/ClickHouse/ClickHouse/pull/36161) ([alesapin](https://github.com/alesapin)). * Some fixes for ReplicatedMergeTree [#36163](https://github.com/ClickHouse/ClickHouse/pull/36163) ([Alexander Tokmakov](https://github.com/tavplubix)). * clickhouse-client: properly cancel query in case of error during formatting data [#36164](https://github.com/ClickHouse/ClickHouse/pull/36164) ([Azat Khuzhin](https://github.com/azat)). -* Fix flacky test 01161_all_system_tables under s3 storage [#36175](https://github.com/ClickHouse/ClickHouse/pull/36175) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix flaky test 01161_all_system_tables under s3 storage [#36175](https://github.com/ClickHouse/ClickHouse/pull/36175) ([Kseniia Sumarokova](https://github.com/kssenii)). * Revert "Fix possible mutation stuck due to race with DROP_RANGE" [#36190](https://github.com/ClickHouse/ClickHouse/pull/36190) ([Azat Khuzhin](https://github.com/azat)). * Use atomic instead of mutex + condvar in ParallelReadBuffer [#36192](https://github.com/ClickHouse/ClickHouse/pull/36192) ([Kruglov Pavel](https://github.com/Avogar)). * Follow-up to [#36138](https://github.com/ClickHouse/ClickHouse/issues/36138) [#36194](https://github.com/ClickHouse/ClickHouse/pull/36194) ([Alexander Tokmakov](https://github.com/tavplubix)). diff --git a/docs/changelogs/v23.4.1.1943-stable.md b/docs/changelogs/v23.4.1.1943-stable.md index ea16f5856be..34590ba9d37 100644 --- a/docs/changelogs/v23.4.1.1943-stable.md +++ b/docs/changelogs/v23.4.1.1943-stable.md @@ -321,7 +321,7 @@ sidebar_label: 2023 * Add a test for [#38128](https://github.com/ClickHouse/ClickHouse/issues/38128) [#48817](https://github.com/ClickHouse/ClickHouse/pull/48817) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Remove excessive logging [#48826](https://github.com/ClickHouse/ClickHouse/pull/48826) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * remove duplicate indentwith in clang-format [#48834](https://github.com/ClickHouse/ClickHouse/pull/48834) ([cluster](https://github.com/infdahai)). -* Try fix flacky test_concurrent_alter_move_and_drop [#48843](https://github.com/ClickHouse/ClickHouse/pull/48843) ([Sergei Trifonov](https://github.com/serxa)). +* Try fix flaky test_concurrent_alter_move_and_drop [#48843](https://github.com/ClickHouse/ClickHouse/pull/48843) ([Sergei Trifonov](https://github.com/serxa)). * fix the race wait loading parts [#48844](https://github.com/ClickHouse/ClickHouse/pull/48844) ([Sema Checherinda](https://github.com/CheSema)). * suppress assert of progress for test_system_replicated_fetches [#48856](https://github.com/ClickHouse/ClickHouse/pull/48856) ([Han Fei](https://github.com/hanfei1991)). * Fix: do not run test_store_cleanup_disk_s3 in parallel [#48863](https://github.com/ClickHouse/ClickHouse/pull/48863) ([Igor Nikonov](https://github.com/devcrafter)). @@ -372,4 +372,3 @@ sidebar_label: 2023 * suppress two timeout tests [#49175](https://github.com/ClickHouse/ClickHouse/pull/49175) ([Han Fei](https://github.com/hanfei1991)). * Document makeDateTime() and its variants [#49183](https://github.com/ClickHouse/ClickHouse/pull/49183) ([Robert Schulze](https://github.com/rschu1ze)). * Fix after [#49110](https://github.com/ClickHouse/ClickHouse/issues/49110) [#49206](https://github.com/ClickHouse/ClickHouse/pull/49206) ([Kseniia Sumarokova](https://github.com/kssenii)). - diff --git a/docs/changelogs/v23.6.1.1524-stable.md b/docs/changelogs/v23.6.1.1524-stable.md index b91c5340789..0de9ab37653 100644 --- a/docs/changelogs/v23.6.1.1524-stable.md +++ b/docs/changelogs/v23.6.1.1524-stable.md @@ -263,7 +263,7 @@ sidebar_label: 2023 * Fix broken labeling for `manual approve` [#51405](https://github.com/ClickHouse/ClickHouse/pull/51405) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * Fix parts lifetime in `MergeTreeTransaction` [#51407](https://github.com/ClickHouse/ClickHouse/pull/51407) ([Alexander Tokmakov](https://github.com/tavplubix)). * Fix flaky test test_skip_empty_files [#51409](https://github.com/ClickHouse/ClickHouse/pull/51409) ([Kruglov Pavel](https://github.com/Avogar)). -* fix flacky test test_profile_events_s3 [#51412](https://github.com/ClickHouse/ClickHouse/pull/51412) ([Sema Checherinda](https://github.com/CheSema)). +* fix flaky test test_profile_events_s3 [#51412](https://github.com/ClickHouse/ClickHouse/pull/51412) ([Sema Checherinda](https://github.com/CheSema)). * Update README.md [#51413](https://github.com/ClickHouse/ClickHouse/pull/51413) ([Tyler Hannan](https://github.com/tylerhannan)). * Replace try/catch logic in hasTokenOrNull() by something more lightweight [#51425](https://github.com/ClickHouse/ClickHouse/pull/51425) ([Robert Schulze](https://github.com/rschu1ze)). * Add retries to `tlsv1_3` tests [#51434](https://github.com/ClickHouse/ClickHouse/pull/51434) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). diff --git a/docs/zh/changelog/index.md b/docs/zh/changelog/index.md index cd77a8c03cf..fb50dfcee85 100644 --- a/docs/zh/changelog/index.md +++ b/docs/zh/changelog/index.md @@ -252,7 +252,7 @@ sidebar_label: "\u53D8\u66F4\u65E5\u5FD7" - 抑制MSan下的一些测试失败。 [#8780](https://github.com/ClickHouse/ClickHouse/pull/8780) ([Alexander Kuzmenkov](https://github.com/akuzm)) - 加速 “exception while insert†测试 此测试通常在具有å¤ç›–率的调试版本中超时。 [#8711](https://github.com/ClickHouse/ClickHouse/pull/8711) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) - æ›´æ–° `libcxx` å’Œ `libcxxabi` 为了主人 在准备 [#9304](https://github.com/ClickHouse/ClickHouse/issues/9304) [#9308](https://github.com/ClickHouse/ClickHouse/pull/9308) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) -- ä¿®å¤flacky测试 `00910_zookeeper_test_alter_compression_codecs`. [#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) +- ä¿®å¤flaky测试 `00910_zookeeper_test_alter_compression_codecs`. [#9525](https://github.com/ClickHouse/ClickHouse/pull/9525) ([阿列克谢-米洛维多夫](https://github.com/alexey-milovidov)) - 清ç†é‡å¤çš„链接器标志。 ç¡®ä¿é“¾æŽ¥å™¨ä¸ä¼šæŸ¥æ‰¾æ„想ä¸åˆ°çš„符å·ã€‚ [#9433](https://github.com/ClickHouse/ClickHouse/pull/9433) ([阿莫斯鸟](https://github.com/amosbird)) - 添加 `clickhouse-odbc` 驱动程åºè¿›å…¥æµ‹è¯•å›¾åƒã€‚ è¿™å…许通过自己的ODBC驱动程åºæµ‹è¯•ClickHouse与ClickHouse的交互。 [#9348](https://github.com/ClickHouse/ClickHouse/pull/9348) ([filimonov](https://github.com/filimonov)) - ä¿®å¤å•å…ƒæµ‹è¯•ä¸­çš„几个错误。 [#9047](https://github.com/ClickHouse/ClickHouse/pull/9047) ([阿利沙平](https://github.com/alesapin)) diff --git a/src/Common/SystemLogBase.cpp b/src/Common/SystemLogBase.cpp index a9307c3be99..7d2c15714e2 100644 --- a/src/Common/SystemLogBase.cpp +++ b/src/Common/SystemLogBase.cpp @@ -65,7 +65,7 @@ void SystemLogQueue::push(LogElement&& element) /// Memory can be allocated while resizing on queue.push_back. /// The size of allocation can be in order of a few megabytes. /// But this should not be accounted for query memory usage. - /// Otherwise the tests like 01017_uniqCombined_memory_usage.sql will be flacky. + /// Otherwise the tests like 01017_uniqCombined_memory_usage.sql will be flaky. MemoryTrackerBlockerInThread temporarily_disable_memory_tracker; /// Should not log messages under mutex. From bd5022432909bcb633f31617a764ca223ad585d5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 20:57:22 +0200 Subject: [PATCH 1559/2361] Whitespaces --- docs/changelogs/v23.4.1.1943-stable.md | 1 - src/Processors/Chunk.h | 2 +- tests/queries/0_stateless/02099_tsv_raw_format.sh | 7 +++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/changelogs/v23.4.1.1943-stable.md b/docs/changelogs/v23.4.1.1943-stable.md index ea16f5856be..6ab6d8b457a 100644 --- a/docs/changelogs/v23.4.1.1943-stable.md +++ b/docs/changelogs/v23.4.1.1943-stable.md @@ -372,4 +372,3 @@ sidebar_label: 2023 * suppress two timeout tests [#49175](https://github.com/ClickHouse/ClickHouse/pull/49175) ([Han Fei](https://github.com/hanfei1991)). * Document makeDateTime() and its variants [#49183](https://github.com/ClickHouse/ClickHouse/pull/49183) ([Robert Schulze](https://github.com/rschu1ze)). * Fix after [#49110](https://github.com/ClickHouse/ClickHouse/issues/49110) [#49206](https://github.com/ClickHouse/ClickHouse/pull/49206) ([Kseniia Sumarokova](https://github.com/kssenii)). - diff --git a/src/Processors/Chunk.h b/src/Processors/Chunk.h index 1348966c0d3..f45e2c4619e 100644 --- a/src/Processors/Chunk.h +++ b/src/Processors/Chunk.h @@ -22,7 +22,7 @@ public: }; -template +template class ChunkInfoCloneable : public ChunkInfo { public: diff --git a/tests/queries/0_stateless/02099_tsv_raw_format.sh b/tests/queries/0_stateless/02099_tsv_raw_format.sh index 16b695e4037..026607ac6d5 100755 --- a/tests/queries/0_stateless/02099_tsv_raw_format.sh +++ b/tests/queries/0_stateless/02099_tsv_raw_format.sh @@ -14,7 +14,7 @@ do echo $format $CLICKHOUSE_CLIENT -q "INSERT INTO test_02099 SELECT number, toString(number), toDate(number) FROM numbers(3)" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02099 FORMAT $format" - + $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02099 FORMAT $format" | $CLICKHOUSE_CLIENT -q "INSERT INTO test_02099 FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_02099" @@ -49,13 +49,12 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE test_nullable_string_02099" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_parallel_parsing_02099" $CLICKHOUSE_CLIENT -q "CREATE TABLE test_parallel_parsing_02099 (x UInt64, a Array(UInt64), s String) ENGINE=Memory()"; -$CLICKHOUSE_CLIENT -q "SELECT number AS x, range(number % 50) AS a, toString(a) AS s FROM numbers(1000000) FORMAT TSVRaw" | $CLICKHOUSE_CLIENT --input_format_parallel_parsing=0 -q "INSERT INTO test_parallel_parsing_02099 FORMAT TSVRaw" +$CLICKHOUSE_CLIENT -q "SELECT number AS x, range(number % 50) AS a, toString(a) AS s FROM numbers(1000000) FORMAT TSVRaw" | $CLICKHOUSE_CLIENT --input_format_parallel_parsing=0 -q "INSERT INTO test_parallel_parsing_02099 FORMAT TSVRaw" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_parallel_parsing_02099 ORDER BY x" | md5sum $CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_parallel_parsing_02099" -$CLICKHOUSE_CLIENT -q "SELECT number AS x, range(number % 50) AS a, toString(a) AS s FROM numbers(1000000) FORMAT TSVRaw" | $CLICKHOUSE_CLIENT --input_format_parallel_parsing=1 -q "INSERT INTO test_parallel_parsing_02099 FORMAT TSVRaw" +$CLICKHOUSE_CLIENT -q "SELECT number AS x, range(number % 50) AS a, toString(a) AS s FROM numbers(1000000) FORMAT TSVRaw" | $CLICKHOUSE_CLIENT --input_format_parallel_parsing=1 -q "INSERT INTO test_parallel_parsing_02099 FORMAT TSVRaw" $CLICKHOUSE_CLIENT -q "SELECT * FROM test_parallel_parsing_02099 ORDER BY x" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE test_parallel_parsing_02099" - From a49924b3834d577827279f6278e5df21e3065035 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 13:27:06 +0200 Subject: [PATCH 1560/2361] tests: avoid leaving leftovers after test_storage_mongodb (fixes flaky chec) CI: https://s3.amazonaws.com/clickhouse-test-reports/66671/2f00c962711e13ca00af324366421fe4593b4ce6/integration_tests_flaky_check__asan_.html Signed-off-by: Azat Khuzhin --- tests/integration/test_storage_mongodb/test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/test_storage_mongodb/test.py b/tests/integration/test_storage_mongodb/test.py index 1a1a790e8e8..6e2d15b03be 100644 --- a/tests/integration/test_storage_mongodb/test.py +++ b/tests/integration/test_storage_mongodb/test.py @@ -410,6 +410,7 @@ def test_no_credentials(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] + node.query("drop table if exists simple_mongo_table_2") node.query( "create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', '', '')" ) @@ -439,10 +440,13 @@ def test_auth_source(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] + node.query("drop table if exists simple_mongo_table_fail") node.query( "create table simple_mongo_table_fail(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse')" ) node.query_and_get_error("SELECT count() FROM simple_mongo_table_fail") + + node.query("drop table if exists simple_mongo_table_ok") node.query( "create table simple_mongo_table_ok(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')" ) From 9f1753bb4f825349ac4238650bc87d7792f99cc8 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 2 Aug 2024 16:58:04 +0200 Subject: [PATCH 1561/2361] Corrections after review. --- .../table-engines/integrations/time-series.md | 4 +-- src/Core/Settings.h | 2 +- src/Server/PrometheusRequestHandler.cpp | 12 +++---- src/Storages/StorageTimeSeries.cpp | 34 ++++++------------- 4 files changed, 20 insertions(+), 32 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/time-series.md b/docs/en/engines/table-engines/integrations/time-series.md index 4830fd61d27..b9e47e8d2c9 100644 --- a/docs/en/engines/table-engines/integrations/time-series.md +++ b/docs/en/engines/table-engines/integrations/time-series.md @@ -213,8 +213,8 @@ CREATE TABLE my_table ## The `tags` and `all_tags` columns {#tags-and-all-tags} There are two columns containing maps of tags - `tags` and `all_tags`. In this example they mean the same, however they can be different -if setting `tags_to_columns` is used. This setting allows to specify that a specific tag should be stored in a separate column instead of storing -in a map inside the `tags` column: +if setting `tags_to_columns` is used. This setting allows you to specify that a specific tag should be stored in a separate column instead of storing +it in a map inside the `tags` column: ``` sql CREATE TABLE my_table ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 115554d44c5..82e0e320cf1 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -903,7 +903,7 @@ class IColumn; M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \ M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions", 0) \ M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \ - M(Bool, allow_experimental_time_series_table, false, "Allows to use the TimeSeries table engine. Disabled by default, because this feature is experimental", 0) \ + M(Bool, allow_experimental_time_series_table, false, "Allows experimental TimeSeries table engine", 0) \ M(Bool, allow_experimental_variant_type, false, "Allow Variant data type", 0) \ M(Bool, allow_experimental_dynamic_type, false, "Allow Dynamic data type", 0) \ M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \ diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index ddbccab05df..bbd9a978c91 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -151,25 +151,25 @@ protected: if (!roles.empty()) context->setCurrentRoles(roles); - auto param_could_be_skipped = [&] (const String & name) + /// Settings can be overridden in the URL query. + auto is_setting_like_parameter = [&] (const String & name) { /// Empty parameter appears when URL like ?&a=b or a=b&&c=d. Just skip them for user's convenience. if (name.empty()) - return true; + return false; /// Some parameters (database, default_format, everything used in the code above) do not /// belong to the Settings class. static const NameSet reserved_param_names{"user", "password", "quota_key", "stacktrace", "role", "query_id"}; - return reserved_param_names.contains(name); + return !reserved_param_names.contains(name); }; - /// Settings can be overridden in the query. SettingsChanges settings_changes; for (const auto & [key, value] : *params) { - if (!param_could_be_skipped(key)) + if (is_setting_like_parameter(key)) { - /// Other than query parameters are treated as settings. + /// This query parameter should be considered as a ClickHouse setting. settings_changes.push_back({key, value}); } } diff --git a/src/Storages/StorageTimeSeries.cpp b/src/Storages/StorageTimeSeries.cpp index b1d566564ea..f5a2c0c59a2 100644 --- a/src/Storages/StorageTimeSeries.cpp +++ b/src/Storages/StorageTimeSeries.cpp @@ -371,9 +371,8 @@ void StorageTimeSeries::alter(const AlterCommands & params, ContextPtr local_con } -void StorageTimeSeries::renameInMemory(const StorageID & new_table_id) +void StorageTimeSeries::renameInMemory(const StorageID & /* new_table_id */) { - UNUSED(new_table_id); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Renaming is not supported by storage {} yet", getName()); } @@ -406,33 +405,22 @@ void StorageTimeSeries::restoreDataFromBackup(RestorerFromBackup & restorer, con void StorageTimeSeries::read( - QueryPlan & query_plan, - const Names & column_names, - const StorageSnapshotPtr & storage_snapshot, - SelectQueryInfo & query_info, - ContextPtr local_context, - QueryProcessingStage::Enum processed_stage, - size_t max_block_size, - size_t num_streams) + QueryPlan & /* query_plan */, + const Names & /* column_names */, + const StorageSnapshotPtr & /* storage_snapshot */, + SelectQueryInfo & /* query_info */, + ContextPtr /* local_context */, + QueryProcessingStage::Enum /* processed_stage */, + size_t /* max_block_size */, + size_t /* num_streams */) { - UNUSED(query_plan); - UNUSED(column_names); - UNUSED(storage_snapshot); - UNUSED(query_info); - UNUSED(local_context); - UNUSED(processed_stage); - UNUSED(max_block_size); - UNUSED(num_streams); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "SELECT is not supported by storage {} yet", getName()); } -SinkToStoragePtr StorageTimeSeries::write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context, bool async_insert) +SinkToStoragePtr StorageTimeSeries::write( + const ASTPtr & /* query */, const StorageMetadataPtr & /* metadata_snapshot */, ContextPtr /* local_context */, bool /* async_insert */) { - UNUSED(query); - UNUSED(metadata_snapshot); - UNUSED(local_context); - UNUSED(async_insert); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "INSERT is not supported by storage {} yet", getName()); } From c200f437746ddbd03e5ce7c7ad9d9613015ce929 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 16:00:46 +0200 Subject: [PATCH 1562/2361] ci: fix stateless runner to correctly gather artifacts when server failed Right now it simply does not work due to "set -e", with it you cannot use "foo=$(false)" since bash will break execution after, rewrite it to a plain shell, with "if". Also use ZSTD everywhere (ugh) Signed-off-by: Azat Khuzhin --- docker/test/stateless/run.sh | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index da17b82d91b..5c15c05652b 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -299,22 +299,22 @@ stop_logs_replication failed_to_save_logs=0 for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log do - err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes") - echo "$err" - [[ "0" != "${#err}" ]] && failed_to_save_logs=1 + if ! clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.zst' format TSVWithNamesAndTypes"; then + failed_to_save_logs=1 + fi if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then - err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 ) - echo "$err" - [[ "0" != "${#err}" ]] && failed_to_save_logs=1 - err=$( { clickhouse-client --port 29000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst; } 2>&1 ) - echo "$err" - [[ "0" != "${#err}" ]] && failed_to_save_logs=1 + if ! clickhouse-client --port 19000 -q "select * from system.$table into outfile '/test_output/$table.1.tsv.zst' format TSVWithNamesAndTypes"; then + failed_to_save_logs=1 + fi + if ! clickhouse-client --port 29000 -q "select * from system.$table into outfile '/test_output/$table.2.tsv.zst' format TSVWithNamesAndTypes"; then + failed_to_save_logs=1 + fi fi if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then - err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 ) - echo "$err" - [[ "0" != "${#err}" ]] && failed_to_save_logs=1 + if ! clickhouse-client --port 29000 -q "select * from system.$table into outfile '/test_output/$table.2.tsv.zst' format TSVWithNamesAndTypes"; then + failed_to_save_logs=1 + fi fi done From 43cf85ef2841190a5d12a592a65a5181bb110661 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 16:09:48 +0200 Subject: [PATCH 1563/2361] ci: collect basic issues in stateless tests (dmesg, fatal and similar) Signed-off-by: Azat Khuzhin --- docker/test/stateless/run.sh | 9 +++++++++ docker/test/stateless/stress_tests.lib | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 5c15c05652b..b352539cc1a 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -3,6 +3,12 @@ # shellcheck disable=SC1091 source /setup_export_logs.sh +# shellcheck source=../stateless/stress_tests.lib +source /stress_tests.lib + +# Avoid overlaps with previous runs +dmesg --clear + # fail on errors, verbose and export all env variables set -e -x -a @@ -420,4 +426,7 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||: fi +# Grep logs for sanitizer asserts, crashes and other critical errors +check_logs_for_critical_errors + collect_core_dumps diff --git a/docker/test/stateless/stress_tests.lib b/docker/test/stateless/stress_tests.lib index 682da1df837..36782101fa7 100644 --- a/docker/test/stateless/stress_tests.lib +++ b/docker/test/stateless/stress_tests.lib @@ -242,7 +242,7 @@ function check_server_start() function check_logs_for_critical_errors() { # Sanitizer asserts - sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log >> /test_output/tmp + sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr*.log >> /test_output/tmp rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \ && echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \ || echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv From 83be27cdf59ec5886abc23441f5cf92740b04c57 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 23:34:01 +0200 Subject: [PATCH 1564/2361] Fix test `02833_concurrent_sessions`, Fix test `02835_drop_user_during_session` --- .../0_stateless/02833_concurrent_sessions.sh | 18 ++++++++++++- .../02835_drop_user_during_session.sh | 26 +++++++++++++++---- 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/02833_concurrent_sessions.sh b/tests/queries/0_stateless/02833_concurrent_sessions.sh index 846661cfeed..feaff3a38a3 100755 --- a/tests/queries/0_stateless/02833_concurrent_sessions.sh +++ b/tests/queries/0_stateless/02833_concurrent_sessions.sh @@ -137,7 +137,23 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user IN ( for user in "${ALL_USERS[@]}"; do ${CLICKHOUSE_CLIENT} -q "DROP USER ${user}" echo "Corresponding LoginSuccess/Logout" - ${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${user}' AND type = 'LoginSuccess' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${user}' AND type = 'Logout')" + + # The client can exit sooner than the server records its disconnection and closes the session. + # When the client disconnects, two processes happen at the same time and are in the race condition: + # - the client application exits and returns control to the shell; + # - the server closes the session and records the logout event to the session log. + # We cannot expect that after the control is returned to the shell, the server records the logout event. + while true + do + [[ 3 -eq $(${CLICKHOUSE_CLIENT} -q " + SELECT COUNT(*) FROM ( + SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${user}' AND type = 'LoginSuccess' + INTERSECT + SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${user}' AND type = 'Logout' + )") ]] && echo 3 && break; + sleep 0.1 + done + echo "LoginFailure" ${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM system.session_log WHERE user = '${user}' AND type = 'LoginFailure'" done diff --git a/tests/queries/0_stateless/02835_drop_user_during_session.sh b/tests/queries/0_stateless/02835_drop_user_during_session.sh index 347ebd22f96..c32003a2a11 100755 --- a/tests/queries/0_stateless/02835_drop_user_during_session.sh +++ b/tests/queries/0_stateless/02835_drop_user_during_session.sh @@ -24,7 +24,7 @@ function http_session() ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=${user}&password=pass" -d "SELECT COUNT(*) FROM system.numbers" } -function http_with_session_id_session() +function http_with_session_id_session() { local user=$1 ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=${user}&password=pass" -d "SELECT COUNT(*) FROM system.numbers" @@ -104,11 +104,27 @@ wait ${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS" -echo "port_0_sessions:" +echo "port_0_sessions:" ${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND client_port = 0" echo "address_0_sessions:" ${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND client_address = toIPv6('::')" -echo "Corresponding LoginSuccess/Logout" -${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS}, FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout')" +echo "Corresponding LoginSuccess/Logout" + +# The client can exit sooner than the server records its disconnection and closes the session. +# When the client disconnects, two processes happen at the same time and are in the race condition: +# - the client application exits and returns control to the shell; +# - the server closes the session and records the logout event to the session log. +# We cannot expect that after the control is returned to the shell, the server records the logout event. +while true +do + [[ 9 -eq $(${CLICKHOUSE_CLIENT} -q " + SELECT COUNT(*) FROM ( + SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' + INTERSECT + SELECT ${SESSION_LOG_MATCHING_FIELDS}, FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout' + )") ]] && echo 9 && break; + sleep 0.1 +done + echo "LoginFailure" -${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginFailure'" +${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginFailure'" From d5629655c77653f51e44e1bfa9f8935b000da891 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sun, 4 Aug 2024 21:52:29 +0000 Subject: [PATCH 1565/2361] fix --- .../MergeTree/MergeTreeIndexBloomFilter.cpp | 26 ++++++++++++++++++- ..._bloom_filter_not_supported_func.reference | 2 ++ .../03215_bloom_filter_not_supported_func.sql | 14 ++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03215_bloom_filter_not_supported_func.reference create mode 100644 tests/queries/0_stateless/03215_bloom_filter_not_supported_func.sql diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index c6a00751f25..7b873b0e3f2 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -366,7 +366,31 @@ bool MergeTreeIndexConditionBloomFilter::extractAtomFromTree(const RPNBuilderTre } } - return traverseFunction(node, out, nullptr /*parent*/); + if (node.isFunction()) + { + /// Similar to the logic of KeyCondition, restrict the usage of bloom filter, in case of func like cast(c=1 or c=9999 as Bool). + const std::unordered_set atom_map + { + "equals", + "notEquals", + "has", + "mapContains", + "indexOf", + "hasAny", + "hasAll", + "in", + "notIn", + "globalIn", + "globalNotIn" + }; + + auto func_name = node.toFunctionNode().getFunctionName(); + if (atom_map.find(func_name) == std::end(atom_map)) + return false; + } + + bool res = traverseFunction(node, out, nullptr /*parent*/); + return res; } bool MergeTreeIndexConditionBloomFilter::traverseFunction(const RPNBuilderTreeNode & node, RPNElement & out, const RPNBuilderTreeNode * parent) diff --git a/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.reference b/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.sql b/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.sql new file mode 100644 index 00000000000..3d094244892 --- /dev/null +++ b/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.sql @@ -0,0 +1,14 @@ +drop table if exists t; + +create table t ( + c Int32, + index x1 (c) type bloom_filter +) engine=MergeTree order by c as select 1; + +SELECT count() FROM t WHERE cast(c=1 or c=9999 as Bool) +settings use_skip_indexes=0; + +SELECT count() FROM t WHERE cast(c=1 or c=9999 as Bool) +settings use_skip_indexes=1; + +drop table t; \ No newline at end of file From 0bdaa57023ef69e49ab5cf9d54ed2e52c1fd2dae Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 23:53:54 +0200 Subject: [PATCH 1566/2361] Fix diagnostics in the test script --- docker/test/stateless/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index da17b82d91b..bec8b9cc4d1 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -260,7 +260,7 @@ function run_tests() | ts '%Y-%m-%d %H:%M:%S' \ | tee -a test_output/test_result.txt set -e - DURATION=$((START_TIME - SECONDS)) + DURATION=$((SECONDS - START_TIME)) echo "Elapsed ${DURATION} seconds." if [[ $DURATION -ge $TIMEOUT ]] From a573b2926e13ea31c7947b0429b1b5723c7fb938 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 4 Aug 2024 23:59:01 +0200 Subject: [PATCH 1567/2361] Fixes for the script --- docker/test/stateless/run.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index da17b82d91b..8b9e729970c 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -72,8 +72,12 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+" fi +export IS_FLAKY_CHECK=0 + # For flaky check we also enable thread fuzzer if [ "$NUM_TRIES" -gt "1" ]; then + export IS_FLAKY_CHECK=1 + export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000 export THREAD_FUZZER_SLEEP_PROBABILITY=0.1 export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000 From 538761b43dbf704d0700548e61b4034ef66c5766 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Aug 2024 00:02:32 +0200 Subject: [PATCH 1568/2361] Fix flaky check --- tests/clickhouse-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index b70dd61a25a..38b0e99760e 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -1232,7 +1232,7 @@ class TestCase: ): return FailureReason.SKIP - elif "no-flaky-check" in tags and (args.test_runs > 1): + elif "no-flaky-check" in tags and (1 == int(os.environ.get("IS_FLAKY_CHECK", 0))): return FailureReason.SKIP elif tags: From 270dddc00fd5533ba24914e7d9ae0aebc50a3fd0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Aug 2024 00:22:48 +0200 Subject: [PATCH 1569/2361] Fix test `02231_bloom_filter_sizing` --- tests/queries/0_stateless/02231_bloom_filter_sizing.reference | 4 ++-- tests/queries/0_stateless/02231_bloom_filter_sizing.sql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02231_bloom_filter_sizing.reference b/tests/queries/0_stateless/02231_bloom_filter_sizing.reference index bdba311c092..aabadfc92fd 100644 --- a/tests/queries/0_stateless/02231_bloom_filter_sizing.reference +++ b/tests/queries/0_stateless/02231_bloom_filter_sizing.reference @@ -1,6 +1,6 @@ Bloom filter on sort key -10000 +1000 0 Bloom filter on non-sort key -10000 +1000 0 diff --git a/tests/queries/0_stateless/02231_bloom_filter_sizing.sql b/tests/queries/0_stateless/02231_bloom_filter_sizing.sql index 233e3111067..ee896675d64 100644 --- a/tests/queries/0_stateless/02231_bloom_filter_sizing.sql +++ b/tests/queries/0_stateless/02231_bloom_filter_sizing.sql @@ -12,7 +12,7 @@ INSERT INTO bloom_filter_sizing_pk SELECT number % 100 as key, -- 100 unique keys number as value -- whatever -FROM numbers(1000 * 1000); +FROM numbers(100_000); -- -- Merge everything into a single part @@ -40,7 +40,7 @@ SELECT number % 100 as key1, -- 100 unique keys rand() % 100 as key2, -- 100 unique keys number as value -- whatever -FROM numbers(1000 * 1000); +FROM numbers(100_000); -- -- Merge everything into a single part From b2ec479cee2a07f7e1fe0a8384a36b5ffc999fda Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Sun, 4 Aug 2024 22:23:08 +0000 Subject: [PATCH 1570/2361] Fixed race condition in session log tests --- tests/integration/test_session_log/test.py | 19 +++++++++++++++---- .../0_stateless/02834_remote_session_log.sh | 10 ++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_session_log/test.py b/tests/integration/test_session_log/test.py index 922e2557c50..cf2d0e62e1f 100644 --- a/tests/integration/test_session_log/test.py +++ b/tests/integration/test_session_log/test.py @@ -5,6 +5,7 @@ import pytest import random import sys import threading +import time from helpers.cluster import ClickHouseCluster, run_and_check @@ -117,6 +118,19 @@ def mysql_query(query, user_, pass_, raise_exception): assert raise_exception +def wait_for_corresponding_login_succecss_and_logout(user, expected_login_count): + # The client can exit sooner than the server records its disconnection and closes the session. + # When the client disconnects, two processes happen at the same time and are in the race condition: + # - the client application exits and returns control to the shell; + # - the server closes the session and records the logout event to the session log. + # We cannot expect that after the control is returned to the shell, the server records the logout event. + sql = f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '{user}' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '{user}' AND type = 'Logout')" + logins_and_logouts = instance.query(sql) + while int(logins_and_logouts) != expected_login_count: + time.sleep(0.1) + logins_and_logouts = instance.query(sql) + + @pytest.fixture(scope="module") def started_cluster(): try: @@ -276,10 +290,7 @@ def test_parallel_sessions(started_cluster): ) assert postgres_sessions == "30\n" - logins_and_logouts = instance.query( - f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'parallel_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'parallel_user' AND type = 'Logout')" - ) - assert logins_and_logouts == "30\n" + wait_for_corresponding_login_succecss_and_logout("parallel_user", 30) logout_failure_sessions = instance.query( f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND type = 'LoginFailure'" diff --git a/tests/queries/0_stateless/02834_remote_session_log.sh b/tests/queries/0_stateless/02834_remote_session_log.sh index 3bedfb6c9ee..0581cb36136 100755 --- a/tests/queries/0_stateless/02834_remote_session_log.sh +++ b/tests/queries/0_stateless/02834_remote_session_log.sh @@ -44,6 +44,16 @@ for interface in 'TCP' 'HTTP' 'MySQL' do LOGIN_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT count(*) FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}'"` CORRESPONDING_LOGOUT_RECORDS_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout' AND interface = '${interface}')"` + # The client can exit sooner than the server records its disconnection and closes the session. + # When the client disconnects, two processes happen at the same time and are in the race condition: + # - the client application exits and returns control to the shell; + # - the server closes the session and records the logout event to the session log. + # We cannot expect that after the control is returned to the shell, the server records the logout event. + while [ "$LOGIN_COUNT" != "$CORRESPONDING_LOGOUT_RECORDS_COUNT" ] + do + sleep 0.1 + CORRESPONDING_LOGOUT_RECORDS_COUNT=`${CLICKHOUSE_CLIENT} -q "SELECT COUNT(*) FROM (SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'LoginSuccess' AND interface = '${interface}' INTERSECT SELECT ${SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = '${TEST_USER}' AND type = 'Logout' AND interface = '${interface}')"` + done if [ "$LOGIN_COUNT" == "$CORRESPONDING_LOGOUT_RECORDS_COUNT" ]; then echo "${interface} Login and logout count is equal" From 5a860fcc3aaf4e77025cd9286da10d0587134c3f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Aug 2024 00:25:56 +0200 Subject: [PATCH 1571/2361] Update 02099_tsv_raw_format.sh --- tests/queries/0_stateless/02099_tsv_raw_format.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02099_tsv_raw_format.sh b/tests/queries/0_stateless/02099_tsv_raw_format.sh index 026607ac6d5..a69c96ab613 100755 --- a/tests/queries/0_stateless/02099_tsv_raw_format.sh +++ b/tests/queries/0_stateless/02099_tsv_raw_format.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: long CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 6897178c41286fd3d4f3064ad8e2c716d12e002f Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Sun, 4 Aug 2024 22:29:03 +0000 Subject: [PATCH 1572/2361] fixed typo --- tests/integration/test_session_log/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_session_log/test.py b/tests/integration/test_session_log/test.py index cf2d0e62e1f..5e424610ba2 100644 --- a/tests/integration/test_session_log/test.py +++ b/tests/integration/test_session_log/test.py @@ -118,7 +118,7 @@ def mysql_query(query, user_, pass_, raise_exception): assert raise_exception -def wait_for_corresponding_login_succecss_and_logout(user, expected_login_count): +def wait_for_corresponding_login_success_and_logout(user, expected_login_count): # The client can exit sooner than the server records its disconnection and closes the session. # When the client disconnects, two processes happen at the same time and are in the race condition: # - the client application exits and returns control to the shell; @@ -290,7 +290,7 @@ def test_parallel_sessions(started_cluster): ) assert postgres_sessions == "30\n" - wait_for_corresponding_login_succecss_and_logout("parallel_user", 30) + wait_for_corresponding_login_success_and_logout("parallel_user", 30) logout_failure_sessions = instance.query( f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND type = 'LoginFailure'" From 7adeaf9c28016b29453d6be2bca1aa1ef866ecdf Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 4 Aug 2024 22:35:07 +0000 Subject: [PATCH 1573/2361] Automatic style fix --- tests/clickhouse-test | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 38b0e99760e..907d773337a 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -1232,7 +1232,9 @@ class TestCase: ): return FailureReason.SKIP - elif "no-flaky-check" in tags and (1 == int(os.environ.get("IS_FLAKY_CHECK", 0))): + elif "no-flaky-check" in tags and ( + 1 == int(os.environ.get("IS_FLAKY_CHECK", 0)) + ): return FailureReason.SKIP elif tags: From 94611dbddc08565c0ac154ad62dab1a7e8c6d8ed Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 5 Aug 2024 11:48:46 +0800 Subject: [PATCH 1574/2361] keep ColumnLowCardinality::getName() the same style with other columns --- src/Columns/ColumnLowCardinality.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h index 3766b247d60..5a23853e961 100644 --- a/src/Columns/ColumnLowCardinality.h +++ b/src/Columns/ColumnLowCardinality.h @@ -46,8 +46,8 @@ public: return Base::create(std::move(column_unique), std::move(indexes), is_shared); } - std::string getName() const override { return "ColumnLowCardinality"; } - const char * getFamilyName() const override { return "ColumnLowCardinality"; } + std::string getName() const override { return "LowCardinality(" + getDictionaryPtr()->getName() + ")"; } + const char * getFamilyName() const override { return "LowCardinality"; } TypeIndex getDataType() const override { return TypeIndex::LowCardinality; } ColumnPtr convertToFullColumn() const { return getDictionary().getNestedColumn()->index(getIndexes(), 0); } From 6b1e184e12bed759487d89f54f5ac4f269dffda2 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 22:50:46 +0200 Subject: [PATCH 1575/2361] Print original query for AST formatting check on CI It may be tricky to understand the root cause of the AST formatting issue in case of syntax error, so add one knob to control this - debug_ast_formatting_print_original_query. And CI contains core dumps anyway, so let's enable for CI. P.S. There was concern from @al13n321 that printing original query even in debug build is not a good idea [1], hence a knob for this. [1]: https://github.com/ClickHouse/ClickHouse/pull/63357/files#r1674809348 Signed-off-by: Azat Khuzhin --- src/Interpreters/executeQuery.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index ba6fc0f14a0..ce58f7f922c 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -796,10 +796,9 @@ static std::tuple executeQueryImpl( catch (const Exception & e) { if (e.code() == ErrorCodes::SYNTAX_ERROR) - /// Don't print the original query text because it may contain sensitive data. throw Exception(ErrorCodes::LOGICAL_ERROR, - "Inconsistent AST formatting: the query:\n{}\ncannot parse.", - formatted1); + "Inconsistent AST formatting: the query:\n{}\ncannot parse query back from {}", + formatted1, std::string_view(begin, end-begin)); else throw; } From 8bca80f4dd08d8ad05db0325b96365d82e6c4076 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 22:35:21 +0200 Subject: [PATCH 1576/2361] Fix REPLACE modifier formatting (forbid omitting brackets) It is too tricky to verify does brackets required or not, i.e. "SELECT * REPLACE(1/3/3 AS dummy)" will be formatted to "SELECT * REPLACE (1/3)/3 AS dummy" which is already invalid query. So let's simply always print them. Signed-off-by: Azat Khuzhin v2: move the fix into correct place ASTColumnsReplaceTransformer::formatImpl() instead of ASTColumnsReplaceTransformer::Replacement::formatImpl() --- src/Parsers/ASTColumnsTransformers.cpp | 8 ++------ .../03220_replace_formatting.reference | 16 ++++++++++++++++ .../0_stateless/03220_replace_formatting.sh | 14 ++++++++++++++ 3 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/03220_replace_formatting.reference create mode 100755 tests/queries/0_stateless/03220_replace_formatting.sh diff --git a/src/Parsers/ASTColumnsTransformers.cpp b/src/Parsers/ASTColumnsTransformers.cpp index 2a61892f8cc..332ebca3bdb 100644 --- a/src/Parsers/ASTColumnsTransformers.cpp +++ b/src/Parsers/ASTColumnsTransformers.cpp @@ -323,9 +323,7 @@ void ASTColumnsReplaceTransformer::formatImpl(const FormatSettings & settings, F { settings.ostr << (settings.hilite ? hilite_keyword : "") << "REPLACE" << (is_strict ? " STRICT " : " ") << (settings.hilite ? hilite_none : ""); - if (children.size() > 1) - settings.ostr << "("; - + settings.ostr << "("; for (ASTs::const_iterator it = children.begin(); it != children.end(); ++it) { if (it != children.begin()) @@ -333,9 +331,7 @@ void ASTColumnsReplaceTransformer::formatImpl(const FormatSettings & settings, F (*it)->formatImpl(settings, state, frame); } - - if (children.size() > 1) - settings.ostr << ")"; + settings.ostr << ")"; } void ASTColumnsReplaceTransformer::appendColumnName(WriteBuffer & ostr) const diff --git a/tests/queries/0_stateless/03220_replace_formatting.reference b/tests/queries/0_stateless/03220_replace_formatting.reference new file mode 100644 index 00000000000..cbcd63839b1 --- /dev/null +++ b/tests/queries/0_stateless/03220_replace_formatting.reference @@ -0,0 +1,16 @@ +SELECT * REPLACE ((1 / 3) / 3 AS dummy) +SELECT * REPLACE ((1 / 3) / 3 AS dummy) +SELECT * REPLACE STRICT (1 AS id, 2 AS value) +FROM +( + SELECT + 0 AS id, + 1 AS value +) +SELECT * REPLACE STRICT (1 AS id, 2 AS value) +FROM +( + SELECT + 0 AS id, + 1 AS value +) diff --git a/tests/queries/0_stateless/03220_replace_formatting.sh b/tests/queries/0_stateless/03220_replace_formatting.sh new file mode 100755 index 00000000000..1c11ed6da8d --- /dev/null +++ b/tests/queries/0_stateless/03220_replace_formatting.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +q=$($CLICKHOUSE_FORMAT <<<"SELECT * REPLACE(1/3/3 AS dummy)") +echo "$q" +$CLICKHOUSE_FORMAT <<<"$q" + +# multiple columns +q=$($CLICKHOUSE_FORMAT <<<"SELECT * REPLACE STRICT (1 AS id, 2 AS value) FROM (SELECT 0 id, 1 value)") +echo "$q" +$CLICKHOUSE_FORMAT <<<"$q" From 2a7ad3a1f979708fd152e364296c505db8926aba Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 4 Aug 2024 23:21:19 +0200 Subject: [PATCH 1577/2361] Update test references for new REPLACE modifier syntax Signed-off-by: Azat Khuzhin --- .../01913_fix_column_transformer_replace_format.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01913_fix_column_transformer_replace_format.reference b/tests/queries/0_stateless/01913_fix_column_transformer_replace_format.reference index 33be11c07d5..6fabd33c804 100644 --- a/tests/queries/0_stateless/01913_fix_column_transformer_replace_format.reference +++ b/tests/queries/0_stateless/01913_fix_column_transformer_replace_format.reference @@ -1 +1 @@ -CREATE VIEW default.my_view\n(\n `Id` UInt32,\n `Object.Key` Array(UInt16),\n `Object.Value` Array(String)\n)\nAS SELECT * REPLACE arrayMap(x -> (x + 1), `Object.Key`) AS `Object.Key`\nFROM default.my_table +CREATE VIEW default.my_view\n(\n `Id` UInt32,\n `Object.Key` Array(UInt16),\n `Object.Value` Array(String)\n)\nAS SELECT * REPLACE (arrayMap(x -> (x + 1), `Object.Key`) AS `Object.Key`)\nFROM default.my_table From e5134e14ea68ff4d02bde892a7c66d00d4c1e800 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Mon, 5 Aug 2024 07:59:51 +0000 Subject: [PATCH 1578/2361] Disabled parallel run --- tests/ci/integration_tests_runner.py | 4 ++-- tests/integration/parallel_skip.json | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index 2b348be8b51..22c52521c19 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -784,8 +784,8 @@ class ClickhouseIntegrationTestsRunner: logging.info("Starting check with retries") final_retry = 0 logs = [] - tires_num = 1 if should_fail else FLAKY_TRIES_COUNT - for i in range(tires_num): + tries_num = 1 if should_fail else FLAKY_TRIES_COUNT + for i in range(tries_num): final_retry += 1 logging.info("Running tests for the %s time", i) counters, tests_times, log_paths = self.try_run_test_group( diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index 99fa626bd1e..9b8109f3f17 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -94,6 +94,11 @@ "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_setting_in_query", "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_client_suggestions_load", + "test_session_log/test.py::test_grpc_session", + "test_session_log/test.py::test_mysql_session", + "test_session_log/test.py::test_postgres_session", + "test_session_log/test.py::test_parallel_sessions", + "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_stop_moves_query", "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_table_detach", "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_zookeeper_disconnect", From 29f1d9df36d34fe417624b0828b68a819ed74377 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Mon, 5 Aug 2024 10:03:49 +0200 Subject: [PATCH 1579/2361] Integration tests: fix flaky test_dictionaries_update_and_reload::test_reload_after_fail_by_timer --- .../test_dictionaries_update_and_reload/test.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_dictionaries_update_and_reload/test.py b/tests/integration/test_dictionaries_update_and_reload/test.py index db1c8e47467..32a9e1a033e 100644 --- a/tests/integration/test_dictionaries_update_and_reload/test.py +++ b/tests/integration/test_dictionaries_update_and_reload/test.py @@ -37,7 +37,7 @@ def get_status(dictionary_name): ).rstrip("\n") -def get_status_retry(dictionary_name, expect, retry_count=10, sleep_time=0.5): +def get_status_retry(dictionary_name, expect, retry_count=50, sleep_time=0.5): for _ in range(retry_count): res = get_status(dictionary_name) if res == expect: @@ -284,6 +284,11 @@ def test_reload_after_fail_by_timer(started_cluster): ) instance.query("SYSTEM RELOAD DICTIONARY no_file_2") instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n" + if ( + instance.is_built_with_sanitizer() + and get_status("no_file_2") == "LOADED_AND_RELOADING" + ): + get_status_retry("no_file_2", expect="LOADED") assert get_status("no_file_2") == "LOADED" # Removing the file source should not spoil the loaded dictionary. @@ -292,6 +297,11 @@ def test_reload_after_fail_by_timer(started_cluster): ) time.sleep(6) instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n" + if ( + instance.is_built_with_sanitizer() + and get_status("no_file_2") == "LOADED_AND_RELOADING" + ): + get_status_retry("no_file_2", expect="LOADED") assert get_status("no_file_2") == "LOADED" From cebdc5ecf6b7e41cf3c75fd8ea9765972afa084c Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 31 Jul 2024 15:19:51 +0000 Subject: [PATCH 1580/2361] Bump rocksdb to v8.0.0 --- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 1 + src/Coordination/KeeperContext.cpp | 7 ++++--- src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp | 17 ++++++++++------- tests/config/config.d/rocksdb.xml | 3 ++- 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index 01e43568fa9..fdf403f5918 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 01e43568fa9f3f7bf107b2b66c00b286b456f33e +Subproject commit fdf403f5918a2b4355cf75ebe5e21d0fc22db880 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 98790158baa..8660bd0e7ba 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -309,6 +309,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc ${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc ${ROCKSDB_SOURCE_DIR}/util/crc32c.cc + ${ROCKSDB_SOURCE_DIR}/util/data_structure.cc ${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc ${ROCKSDB_SOURCE_DIR}/util/hash.cc ${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc diff --git a/src/Coordination/KeeperContext.cpp b/src/Coordination/KeeperContext.cpp index 1f66882ecad..dd2c1d59d56 100644 --- a/src/Coordination/KeeperContext.cpp +++ b/src/Coordination/KeeperContext.cpp @@ -23,6 +23,7 @@ #if USE_ROCKSDB #include #include +#include #include #endif @@ -88,7 +89,7 @@ static rocksdb::Options getRocksDBOptionsFromConfig(const Poco::Util::AbstractCo if (config.has("keeper_server.rocksdb.options")) { auto config_options = getOptionsFromConfig(config, "keeper_server.rocksdb.options"); - status = rocksdb::GetDBOptionsFromMap(merged, config_options, &merged); + status = rocksdb::GetDBOptionsFromMap({}, merged, config_options, &merged); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.options' : {}", @@ -98,7 +99,7 @@ static rocksdb::Options getRocksDBOptionsFromConfig(const Poco::Util::AbstractCo if (config.has("rocksdb.column_family_options")) { auto column_family_options = getOptionsFromConfig(config, "rocksdb.column_family_options"); - status = rocksdb::GetColumnFamilyOptionsFromMap(merged, column_family_options, &merged); + status = rocksdb::GetColumnFamilyOptionsFromMap({}, merged, column_family_options, &merged); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.column_family_options' at: {}", status.ToString()); @@ -107,7 +108,7 @@ static rocksdb::Options getRocksDBOptionsFromConfig(const Poco::Util::AbstractCo if (config.has("rocksdb.block_based_table_options")) { auto block_based_table_options = getOptionsFromConfig(config, "rocksdb.block_based_table_options"); - status = rocksdb::GetBlockBasedTableOptionsFromMap(table_options, block_based_table_options, &table_options); + status = rocksdb::GetBlockBasedTableOptionsFromMap({}, table_options, block_based_table_options, &table_options); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.block_based_table_options' at: {}", status.ToString()); diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp index fafc72da04e..50f6266cb2f 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp @@ -36,10 +36,12 @@ #include #include +#include +#include #include #include +#include #include -#include #include #include @@ -428,6 +430,7 @@ void StorageEmbeddedRocksDB::initDB() rocksdb::Options base; base.create_if_missing = true; + base.compression = rocksdb::CompressionType::kZSTD; base.statistics = rocksdb::CreateDBStatistics(); /// It is too verbose by default, and in fact we don't care about rocksdb logs at all. base.info_log_level = rocksdb::ERROR_LEVEL; @@ -439,7 +442,7 @@ void StorageEmbeddedRocksDB::initDB() if (config.has("rocksdb.options")) { auto config_options = getOptionsFromConfig(config, "rocksdb.options"); - status = rocksdb::GetDBOptionsFromMap(merged, config_options, &merged); + status = rocksdb::GetDBOptionsFromMap({}, merged, config_options, &merged); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.options' at: {}: {}", @@ -449,7 +452,7 @@ void StorageEmbeddedRocksDB::initDB() if (config.has("rocksdb.column_family_options")) { auto column_family_options = getOptionsFromConfig(config, "rocksdb.column_family_options"); - status = rocksdb::GetColumnFamilyOptionsFromMap(merged, column_family_options, &merged); + status = rocksdb::GetColumnFamilyOptionsFromMap({}, merged, column_family_options, &merged); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.column_family_options' at: {}: {}", @@ -459,7 +462,7 @@ void StorageEmbeddedRocksDB::initDB() if (config.has("rocksdb.block_based_table_options")) { auto block_based_table_options = getOptionsFromConfig(config, "rocksdb.block_based_table_options"); - status = rocksdb::GetBlockBasedTableOptionsFromMap(table_options, block_based_table_options, &table_options); + status = rocksdb::GetBlockBasedTableOptionsFromMap({}, table_options, block_based_table_options, &table_options); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from 'rocksdb.block_based_table_options' at: {}: {}", @@ -484,7 +487,7 @@ void StorageEmbeddedRocksDB::initDB() if (config.has(config_key)) { auto table_config_options = getOptionsFromConfig(config, config_key); - status = rocksdb::GetDBOptionsFromMap(merged, table_config_options, &merged); + status = rocksdb::GetDBOptionsFromMap({}, merged, table_config_options, &merged); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from '{}' at: {}: {}", @@ -496,7 +499,7 @@ void StorageEmbeddedRocksDB::initDB() if (config.has(config_key)) { auto table_column_family_options = getOptionsFromConfig(config, config_key); - status = rocksdb::GetColumnFamilyOptionsFromMap(merged, table_column_family_options, &merged); + status = rocksdb::GetColumnFamilyOptionsFromMap({}, merged, table_column_family_options, &merged); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from '{}' at: {}: {}", @@ -508,7 +511,7 @@ void StorageEmbeddedRocksDB::initDB() if (config.has(config_key)) { auto block_based_table_options = getOptionsFromConfig(config, config_key); - status = rocksdb::GetBlockBasedTableOptionsFromMap(table_options, block_based_table_options, &table_options); + status = rocksdb::GetBlockBasedTableOptionsFromMap({}, table_options, block_based_table_options, &table_options); if (!status.ok()) { throw Exception(ErrorCodes::ROCKSDB_ERROR, "Fail to merge rocksdb options from '{}' at: {}: {}", diff --git a/tests/config/config.d/rocksdb.xml b/tests/config/config.d/rocksdb.xml index a3790a3dc1d..3002e008a2d 100644 --- a/tests/config/config.d/rocksdb.xml +++ b/tests/config/config.d/rocksdb.xml @@ -1,7 +1,8 @@ - DEBUG_LEVEL + + ERROR_LEVEL From d0bc728d52140b60a4c8b9d24d2c4dd4cb9582a8 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 13 Jul 2024 19:09:45 +0000 Subject: [PATCH 1581/2361] Bump rocksdb to v8.9.1 --- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 23 ++++++++----------- src/Coordination/tests/gtest_coordination.cpp | 11 +++++---- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index fdf403f5918..49ce8a1064d 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit fdf403f5918a2b4355cf75ebe5e21d0fc22db880 +Subproject commit 49ce8a1064dd1ad89117899839bf136365e49e79 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 8660bd0e7ba..57c056532c6 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -1,6 +1,6 @@ option (ENABLE_ROCKSDB "Enable RocksDB" ${ENABLE_LIBRARIES}) -if (NOT ENABLE_ROCKSDB) +if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL message (STATUS "Not using RocksDB") return() endif() @@ -39,13 +39,6 @@ if(WITH_ZSTD) list(APPEND THIRDPARTY_LIBS ch_contrib::zstd) endif() -add_definitions(-DROCKSDB_PORTABLE) - -if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ) - add_definitions(-DHAVE_SSE42) - add_definitions(-DHAVE_PCLMUL) -endif() - if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64") set (HAS_ARMV8_CRC 1) # the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general @@ -91,7 +84,9 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/cache/compressed_secondary_cache.cc ${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc ${ROCKSDB_SOURCE_DIR}/cache/secondary_cache.cc + ${ROCKSDB_SOURCE_DIR}/cache/secondary_cache_adapter.cc ${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc + ${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc ${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc @@ -174,9 +169,11 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc ${ROCKSDB_SOURCE_DIR}/db/wide/wide_column_serialization.cc ${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns.cc + ${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns_helper.cc ${ROCKSDB_SOURCE_DIR}/db/write_batch.cc ${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc ${ROCKSDB_SOURCE_DIR}/db/write_controller.cc + ${ROCKSDB_SOURCE_DIR}/db/write_stall_stats.cc ${ROCKSDB_SOURCE_DIR}/db/write_thread.cc ${ROCKSDB_SOURCE_DIR}/env/composite_env.cc ${ROCKSDB_SOURCE_DIR}/env/env.cc @@ -229,6 +226,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/options/configurable.cc ${ROCKSDB_SOURCE_DIR}/options/customizable.cc ${ROCKSDB_SOURCE_DIR}/options/db_options.cc + ${ROCKSDB_SOURCE_DIR}/options/offpeak_time_info.cc ${ROCKSDB_SOURCE_DIR}/options/options.cc ${ROCKSDB_SOURCE_DIR}/options/options_helper.cc ${ROCKSDB_SOURCE_DIR}/options/options_parser.cc @@ -268,6 +266,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/table/get_context.cc ${ROCKSDB_SOURCE_DIR}/table/iterator.cc ${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc + ${ROCKSDB_SOURCE_DIR}/table/compaction_merging_iterator.cc ${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc ${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc ${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc @@ -323,6 +322,8 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/util/string_util.cc ${ROCKSDB_SOURCE_DIR}/util/thread_local.cc ${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc + ${ROCKSDB_SOURCE_DIR}/util/udt_util.cc + ${ROCKSDB_SOURCE_DIR}/util/write_batch_util.cc ${ROCKSDB_SOURCE_DIR}/util/xxhash.cc ${ROCKSDB_SOURCE_DIR}/utilities/agg_merge/agg_merge.cc ${ROCKSDB_SOURCE_DIR}/utilities/backup/backup_engine.cc @@ -405,12 +406,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc build_version.cc) # generated by hand -if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ) - set_source_files_properties( - "${ROCKSDB_SOURCE_DIR}/util/crc32c.cc" - PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul") -endif() - if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") list(APPEND SOURCES "${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c" diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index beae6254562..d39031773cd 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -111,10 +111,13 @@ public: } }; -using Implementation = testing::Types, - TestParam, - TestParam, - TestParam>; +using Implementation = testing::Types + ,TestParam +#if USE_ROCKSDB + ,TestParam + ,TestParam +#endif + >; TYPED_TEST_SUITE(CoordinationTest, Implementation); TYPED_TEST(CoordinationTest, RaftServerConfigParse) From 2251ad963992d4656c5ae20a7221aff36a86cc1d Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 5 Aug 2024 16:44:53 +0800 Subject: [PATCH 1582/2361] optimize orc string column reading --- .../Impl/NativeORCBlockInputFormat.cpp | 34 ++++++++++++++----- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 649721f28bf..a0a80ec4a58 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -1143,24 +1143,42 @@ readColumnWithStringData(const orc::ColumnVectorBatch * orc_column, const orc::T reserver_size += 1; } - column_chars_t.reserve(reserver_size); - column_offsets.reserve(orc_str_column->numElements); + column_chars_t.resize_exact(reserver_size); + column_offsets.resize_exact(orc_str_column->numElements); size_t curr_offset = 0; - for (size_t i = 0; i < orc_str_column->numElements; ++i) + if (!orc_str_column->hasNulls) { - if (!orc_str_column->hasNulls || orc_str_column->notNull[i]) + for (size_t i = 0; i < orc_str_column->numElements; ++i) { const auto * buf = orc_str_column->data[i]; size_t buf_size = orc_str_column->length[i]; - column_chars_t.insert_assume_reserved(buf, buf + buf_size); + memcpy(&column_chars_t[curr_offset], buf, buf_size); curr_offset += buf_size; + + column_chars_t[curr_offset] = 0; + ++curr_offset; + + column_offsets[i] = curr_offset; } + } + else + { + for (size_t i = 0; i < orc_str_column->numElements; ++i) + { + if (orc_str_column->notNull[i]) + { + const auto * buf = orc_str_column->data[i]; + size_t buf_size = orc_str_column->length[i]; + memcpy(&column_chars_t[curr_offset], buf, buf_size); + curr_offset += buf_size; + } - column_chars_t.push_back(0); - ++curr_offset; + column_chars_t[curr_offset] = 0; + ++curr_offset; - column_offsets.push_back(curr_offset); + column_offsets[i] = curr_offset; + } } return {std::move(internal_column), std::move(internal_type), column_name}; } From f816158dbc000c4fb02ba413f5b47349795995c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 5 Aug 2024 09:11:55 +0000 Subject: [PATCH 1583/2361] Address review comments --- src/Storages/MessageQueueSink.cpp | 4 ++-- tests/integration/test_storage_kafka/test.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Storages/MessageQueueSink.cpp b/src/Storages/MessageQueueSink.cpp index d4dabd60ef8..104e3506ade 100644 --- a/src/Storages/MessageQueueSink.cpp +++ b/src/Storages/MessageQueueSink.cpp @@ -46,6 +46,8 @@ void MessageQueueSink::consume(Chunk & chunk) if (columns.empty()) return; + /// The formatter might hold pointers to buffer (e.g. if PeekableWriteBuffer is used), which means the formatter + /// needs to be reset after buffer might reallocate its memory. In this exact case after restarting the buffer. if (row_format) { size_t row = 0; @@ -77,6 +79,4 @@ void MessageQueueSink::consume(Chunk & chunk) format->resetFormatter(); } } - - } diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index dd0bf1bf28f..8793ab72a16 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -5098,7 +5098,7 @@ def test_multiple_read_in_materialized_views(kafka_cluster, max_retries=15): def test_kafka_produce_http_interface_row_based_format(kafka_cluster): - # reproduction of #https://github.com/ClickHouse/ClickHouse/issues/61060 with validating the written messages + # reproduction of #61060 with validating the written messages admin_client = KafkaAdminClient( bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) ) From b47f8a733f926c0f52a5837414bcd6ecfece9089 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 5 Aug 2024 08:40:35 +0200 Subject: [PATCH 1584/2361] ci: fix basic errors collecting after stateless tests Signed-off-by: Azat Khuzhin --- docker/test/stateless/run.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index b352539cc1a..c582d3a982b 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -393,6 +393,8 @@ do | zstd --threads=0 > "/test_output/trace-log-$trace_type-flamegraph.tsv.zst" ||: done +# Grep logs for sanitizer asserts, crashes and other critical errors +check_logs_for_critical_errors # Compressed (FIXME: remove once only github actions will be left) rm /var/log/clickhouse-server/clickhouse-server.log @@ -426,7 +428,4 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||: fi -# Grep logs for sanitizer asserts, crashes and other critical errors -check_logs_for_critical_errors - collect_core_dumps From cdbc4f357324ad0b41d46b6e54475ac2cebdc630 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 5 Aug 2024 08:45:20 +0200 Subject: [PATCH 1585/2361] ci: fail the test if the entrypoint script failed Signed-off-by: Azat Khuzhin --- tests/ci/functional_test_check.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 66db082677f..52970404d2d 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -166,6 +166,7 @@ def _get_statless_tests_to_run(pr_info: PRInfo) -> List[str]: def process_results( + ret_code: int, result_directory: Path, server_log_path: Path, ) -> Tuple[StatusType, str, TestResults, List[Path]]: @@ -192,6 +193,9 @@ def process_results( logging.info("Files in result folder %s", os.listdir(result_directory)) return ERROR, "Invalid check_status.tsv", test_results, additional_files state, description = status[0][0], status[0][1] + if ret_code != 0: + state = ERROR + description += " (but script exited with an error)" try: results_path = result_directory / "test_results.tsv" @@ -339,7 +343,7 @@ def main(): ci_logs_credentials.clean_ci_logs_from_credentials(run_log_path) state, description, test_results, additional_logs = process_results( - result_path, server_log_path + retcode, result_path, server_log_path ) else: print( From 9ce55b69b49ac8426ed0b3db16b95964e3c4db4d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 5 Aug 2024 11:22:55 +0200 Subject: [PATCH 1586/2361] Fix possible CANNOT_READ_ALL_DATA during server startup in performance tests CI [1]: 2024.08.04 22:09:11.646800 [ 1052 ] {} Application: Code: 33. DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.: While checking access for disk backups. (CANNOT_READ_ALL_DATA), Stack trace (when copying this message, always include the lines below): [1]: https://s3.amazonaws.com/clickhouse-test-reports/64955/6702acf6f2e4a0ee9697066e38006631fc7f69df/performance_comparison__aarch64__[2_4].html Signed-off-by: Azat Khuzhin --- tests/performance/scripts/compare.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/performance/scripts/compare.sh b/tests/performance/scripts/compare.sh index cb56ab6c5bf..da7bbf77a28 100755 --- a/tests/performance/scripts/compare.sh +++ b/tests/performance/scripts/compare.sh @@ -71,6 +71,8 @@ function configure { # Use the new config for both servers, so that we can change it in a PR. rm right/config/config.d/text_log.xml ||: + # backups disk uses absolute path, and this overlaps between servers, that could lead to errors + rm right/config/config.d/backups.xml ||: cp -rv right/config left ||: # Start a temporary server to rename the tables From a499cd25c7e12c05f2f8fa3fe546715c751ad88d Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 5 Aug 2024 09:31:41 +0000 Subject: [PATCH 1587/2361] Fix for integers --- src/Functions/if.cpp | 16 ++++++---------- ...3215_varian_as_common_type_integers.reference | 8 ++++++++ .../03215_varian_as_common_type_integers.sql | 8 ++++++++ 3 files changed, 22 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/03215_varian_as_common_type_integers.reference create mode 100644 tests/queries/0_stateless/03215_varian_as_common_type_integers.sql diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index 64da6e95a43..8829b3c4ff1 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -632,11 +632,6 @@ private: ColumnPtr executeTuple(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const { - /// For different Tuples the result type can be Variant with this Tuples if use_variant_as_common_type=1. - /// In this case we should use generic implementation. - if (!isTuple(result_type)) - return nullptr; - /// Calculate function for each corresponding elements of tuples. const ColumnWithTypeAndName & arg1 = arguments[1]; @@ -682,11 +677,6 @@ private: ColumnPtr executeMap(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const { - /// For different Maps the result type can be Variant with this Maps if use_variant_as_common_type=1. - /// In this case we should use generic implementation. - if (!isMap(result_type)) - return nullptr; - auto extract_kv_from_map = [](const ColumnMap * map) { const ColumnTuple & tuple = map->getNestedData(); @@ -1243,6 +1233,12 @@ public: throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}. " "Must be ColumnUInt8 or ColumnConstUInt8.", arg_cond.column->getName(), getName()); + /// If result is Variant, always use generic implementation. + /// Using typed implementations may lead to incorrect result column type when + /// resulting Variant is created by use_variant_when_no_common_type. + if (isVariant(result_type)) + return executeGeneric(cond_col, arguments, input_rows_count, use_variant_when_no_common_type); + auto call = [&](const auto & types) -> bool { using Types = std::decay_t; diff --git a/tests/queries/0_stateless/03215_varian_as_common_type_integers.reference b/tests/queries/0_stateless/03215_varian_as_common_type_integers.reference new file mode 100644 index 00000000000..c5edc9e9963 --- /dev/null +++ b/tests/queries/0_stateless/03215_varian_as_common_type_integers.reference @@ -0,0 +1,8 @@ +0 Variant(Int64, UInt64) +1 Variant(Int64, UInt64) +0 Variant(Int32, UInt64) +1 Variant(Int32, UInt64) +0 Variant(Int16, UInt64) +1 Variant(Int16, UInt64) +0 Variant(Int8, UInt64) +1 Variant(Int8, UInt64) diff --git a/tests/queries/0_stateless/03215_varian_as_common_type_integers.sql b/tests/queries/0_stateless/03215_varian_as_common_type_integers.sql new file mode 100644 index 00000000000..dcc69735534 --- /dev/null +++ b/tests/queries/0_stateless/03215_varian_as_common_type_integers.sql @@ -0,0 +1,8 @@ +set use_variant_as_common_type = 1; +set allow_experimental_variant_type = 1; + +SELECT if(number % 2, number::Int64, number::UInt64) as res, toTypeName(res) FROM numbers(2); +SELECT if(number % 2, number::Int32, number::UInt64) as res, toTypeName(res) FROM numbers(2); +SELECT if(number % 2, number::Int16, number::UInt64) as res, toTypeName(res) FROM numbers(2); +SELECT if(number % 2, number::Int8, number::UInt64) as res, toTypeName(res) FROM numbers(2); + From 2b369cccdd58902e9da3fd3947e5cb5759ba2881 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 5 Aug 2024 09:35:56 +0000 Subject: [PATCH 1588/2361] Reduce table size in 03037_dynamic_merges_2_vertical_wide_merge_tree test --- ...3037_dynamic_merges_2_vertical_wide_merge_tree.reference | 6 +++--- .../03037_dynamic_merges_2_vertical_wide_merge_tree.sql | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.reference index afd392002e5..253d87de5f0 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.reference @@ -1,3 +1,3 @@ -1000000 Array(UInt16) -1000000 String -1000000 UInt64 +200000 Array(UInt16) +200000 String +200000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql index 104d6018e41..dd643f8dffd 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql @@ -5,9 +5,9 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; -insert into test select number, number from numbers(1000000); -insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); -insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000); +insert into test select number, number from numbers(200000); +insert into test select number, 'str_' || toString(number) from numbers(200000, 200000); +insert into test select number, range(number % 10 + 1) from numbers(400000, 200000); system start merges test; optimize table test final; select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); From cec8a5d52b83f0c1cdcaed833aec9bf79941b2a8 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 5 Aug 2024 09:42:22 +0000 Subject: [PATCH 1589/2361] Reduce table size in similar tests --- ...dynamic_merges_2_horizontal_compact_merge_tree.reference | 6 +++--- ...03037_dynamic_merges_2_horizontal_compact_merge_tree.sql | 6 +++--- ...37_dynamic_merges_2_horizontal_wide_merge_tree.reference | 6 +++--- .../03037_dynamic_merges_2_horizontal_wide_merge_tree.sql | 6 +++--- ...7_dynamic_merges_2_vertical_compact_merge_tree.reference | 6 +++--- .../03037_dynamic_merges_2_vertical_compact_merge_tree.sql | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.reference index afd392002e5..253d87de5f0 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.reference @@ -1,3 +1,3 @@ -1000000 Array(UInt16) -1000000 String -1000000 UInt64 +200000 Array(UInt16) +200000 String +200000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql index e133ac3001f..fa64ed2f8fd 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql @@ -5,9 +5,9 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; system stop merges test; -insert into test select number, number from numbers(1000000); -insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); -insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000); +insert into test select number, number from numbers(200000); +insert into test select number, 'str_' || toString(number) from numbers(200000, 200000); +insert into test select number, range(number % 10 + 1) from numbers(400000, 200000); system start merges test; optimize table test final; select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.reference index afd392002e5..253d87de5f0 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.reference @@ -1,3 +1,3 @@ -1000000 Array(UInt16) -1000000 String -1000000 UInt64 +200000 Array(UInt16) +200000 String +200000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql index d527081b763..4b8a036f166 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql @@ -5,9 +5,9 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; -insert into test select number, number from numbers(1000000); -insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); -insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000); +insert into test select number, number from numbers(200000); +insert into test select number, 'str_' || toString(number) from numbers(200000, 200000); +insert into test select number, range(number % 10 + 1) from numbers(400000, 200000); system start merges test; optimize table test final; select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.reference index afd392002e5..253d87de5f0 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.reference @@ -1,3 +1,3 @@ -1000000 Array(UInt16) -1000000 String -1000000 UInt64 +200000 Array(UInt16) +200000 String +200000 UInt64 diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql index ebccfb77922..a4e67de76db 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql @@ -5,9 +5,9 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; -insert into test select number, number from numbers(1000000); -insert into test select number, 'str_' || toString(number) from numbers(1000000, 1000000); -insert into test select number, range(number % 10 + 1) from numbers(2000000, 1000000); +insert into test select number, number from numbers(200000); +insert into test select number, 'str_' || toString(number) from numbers(200000, 200000); +insert into test select number, range(number % 10 + 1) from numbers(400000, 200000); system start merges test; optimize table test final; select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); From bd6c7a504d214db606b7ba41bd3f9e172df8ae68 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Mon, 5 Aug 2024 11:45:49 +0200 Subject: [PATCH 1590/2361] Integration tests: fix flaky test_dictionaries_update_and_reload::test_reload_after_fail_by_timer --- .../test.py | 32 ++----------------- 1 file changed, 3 insertions(+), 29 deletions(-) diff --git a/tests/integration/test_dictionaries_update_and_reload/test.py b/tests/integration/test_dictionaries_update_and_reload/test.py index 32a9e1a033e..3ed854d2c9f 100644 --- a/tests/integration/test_dictionaries_update_and_reload/test.py +++ b/tests/integration/test_dictionaries_update_and_reload/test.py @@ -37,16 +37,6 @@ def get_status(dictionary_name): ).rstrip("\n") -def get_status_retry(dictionary_name, expect, retry_count=50, sleep_time=0.5): - for _ in range(retry_count): - res = get_status(dictionary_name) - if res == expect: - return res - time.sleep(sleep_time) - - raise Exception(f'Expected result "{expect}" did not occur') - - def get_last_exception(dictionary_name): return ( instance.query( @@ -263,13 +253,7 @@ def test_reload_after_fail_by_timer(started_cluster): # on sanitizers builds it can return 'FAILED_AND_RELOADING' which is not quite right # add retry for these builds - if ( - instance.is_built_with_sanitizer() - and get_status("no_file_2") == "FAILED_AND_RELOADING" - ): - get_status_retry("no_file_2", expect="FAILED") - - assert get_status("no_file_2") == "FAILED" + assert get_status("no_file_2") in ["FAILED", "FAILED_AND_RELOADING"] # Creating the file source makes the dictionary able to load. instance.copy_file_to_container( @@ -284,12 +268,7 @@ def test_reload_after_fail_by_timer(started_cluster): ) instance.query("SYSTEM RELOAD DICTIONARY no_file_2") instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n" - if ( - instance.is_built_with_sanitizer() - and get_status("no_file_2") == "LOADED_AND_RELOADING" - ): - get_status_retry("no_file_2", expect="LOADED") - assert get_status("no_file_2") == "LOADED" + assert get_status("no_file_2") in ["LOADED", "LOADED_AND_RELOADING"] # Removing the file source should not spoil the loaded dictionary. instance.exec_in_container( @@ -297,12 +276,7 @@ def test_reload_after_fail_by_timer(started_cluster): ) time.sleep(6) instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n" - if ( - instance.is_built_with_sanitizer() - and get_status("no_file_2") == "LOADED_AND_RELOADING" - ): - get_status_retry("no_file_2", expect="LOADED") - assert get_status("no_file_2") == "LOADED" + assert get_status("no_file_2") in ["LOADED", "LOADED_AND_RELOADING"] def test_reload_after_fail_in_cache_dictionary(started_cluster): From e0362b00f5a24ea19e16b27b71963efe64a174c0 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Mon, 5 Aug 2024 11:52:07 +0200 Subject: [PATCH 1591/2361] squash! fix for parallel execution --- tests/integration/test_parquet_page_index/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_parquet_page_index/test.py b/tests/integration/test_parquet_page_index/test.py index 59dbab09be5..778b6618a61 100644 --- a/tests/integration/test_parquet_page_index/test.py +++ b/tests/integration/test_parquet_page_index/test.py @@ -35,7 +35,7 @@ def delete_if_exists(file_path): "query, expected_result", { ( - "SElECT number, number+1 FROM system.numbers LIMIT 100 " + "SELECT number, number+1 FROM system.numbers LIMIT 100 " "INTO OUTFILE '{file_name}' FORMAT Parquet " "SETTINGS output_format_parquet_use_custom_encoder = false, " "output_format_parquet_write_page_index = true;", From 0a7a67b8e0e05bc9476d4f9dd38747bf61b6bb8e Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 5 Aug 2024 09:57:13 +0000 Subject: [PATCH 1592/2361] Disable 03038_nested_dynamic_merges* under sanitizers because it's too slow --- ...sted_dynamic_merges_compact_horizontal.sql | 2 +- ...nested_dynamic_merges_compact_vertical.sql | 2 +- ...8_nested_dynamic_merges_small.reference.j2 | 84 +++++++++++++++++++ .../03038_nested_dynamic_merges_small.sql.j2 | 35 ++++++++ ..._nested_dynamic_merges_wide_horizontal.sql | 2 +- ...38_nested_dynamic_merges_wide_vertical.sql | 2 +- 6 files changed, 123 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 create mode 100644 tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql index 1d5c63dcdf1..81888946681 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql index 2bffe35c577..ba58ca471a2 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 new file mode 100644 index 00000000000..ae07c164074 --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 @@ -0,0 +1,84 @@ +2 Tuple(a Dynamic(max_types=3)):Date +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):String +5 Tuple(a Dynamic(max_types=3)):UInt64 +10 UInt64:None +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):UInt64 +7 Tuple(a Dynamic(max_types=3)):String +10 UInt64:None +2 Tuple(a Dynamic(max_types=3)):DateTime +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):UInt64 +7 Tuple(a Dynamic(max_types=3)):String +10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) +10 UInt64:None +13 Tuple(a Dynamic(max_types=3)):None +5 Tuple(a Dynamic(max_types=3)):UInt64 +10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) +10 UInt64:None +12 Tuple(a Dynamic(max_types=3)):String +13 Tuple(a Dynamic(max_types=3)):None +2 Tuple(a Dynamic(max_types=3)):Date +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):String +5 Tuple(a Dynamic(max_types=3)):UInt64 +10 UInt64:None +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):UInt64 +7 Tuple(a Dynamic(max_types=3)):String +10 UInt64:None +2 Tuple(a Dynamic(max_types=3)):DateTime +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):UInt64 +7 Tuple(a Dynamic(max_types=3)):String +10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) +10 UInt64:None +13 Tuple(a Dynamic(max_types=3)):None +5 Tuple(a Dynamic(max_types=3)):UInt64 +10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) +10 UInt64:None +12 Tuple(a Dynamic(max_types=3)):String +13 Tuple(a Dynamic(max_types=3)):None +2 Tuple(a Dynamic(max_types=3)):Date +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):String +5 Tuple(a Dynamic(max_types=3)):UInt64 +10 UInt64:None +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):UInt64 +7 Tuple(a Dynamic(max_types=3)):String +10 UInt64:None +2 Tuple(a Dynamic(max_types=3)):DateTime +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):UInt64 +7 Tuple(a Dynamic(max_types=3)):String +10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) +10 UInt64:None +13 Tuple(a Dynamic(max_types=3)):None +5 Tuple(a Dynamic(max_types=3)):UInt64 +10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) +10 UInt64:None +12 Tuple(a Dynamic(max_types=3)):String +13 Tuple(a Dynamic(max_types=3)):None +2 Tuple(a Dynamic(max_types=3)):Date +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):String +5 Tuple(a Dynamic(max_types=3)):UInt64 +10 UInt64:None +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):UInt64 +7 Tuple(a Dynamic(max_types=3)):String +10 UInt64:None +2 Tuple(a Dynamic(max_types=3)):DateTime +3 Tuple(a Dynamic(max_types=3)):Array(UInt8) +5 Tuple(a Dynamic(max_types=3)):UInt64 +7 Tuple(a Dynamic(max_types=3)):String +10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) +10 UInt64:None +13 Tuple(a Dynamic(max_types=3)):None +5 Tuple(a Dynamic(max_types=3)):UInt64 +10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) +10 UInt64:None +12 Tuple(a Dynamic(max_types=3)):String +13 Tuple(a Dynamic(max_types=3)):None diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 new file mode 100644 index 00000000000..7828c2af49c --- /dev/null +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 @@ -0,0 +1,35 @@ +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test; + +{% for engine in ['MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000', + 'MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1', + 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1', + 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1'] -%} + +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(10); +insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(10); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(5); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(5); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(20); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +system start merges test; +optimize table test final; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql index fb686091ebb..a53c5b0b2a5 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql index ed195452d56..4256b010ec0 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; From b53e757656e298bb308862a8294cde5718e37580 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 24 Jul 2024 10:01:50 +0000 Subject: [PATCH 1593/2361] Check argument types in DataTypeAggregateFunction ctor --- src/DataTypes/DataTypeAggregateFunction.cpp | 27 +++++++++++++++++++++ src/DataTypes/DataTypeAggregateFunction.h | 8 +----- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/src/DataTypes/DataTypeAggregateFunction.cpp b/src/DataTypes/DataTypeAggregateFunction.cpp index 09175617bf1..ee42e4fea11 100644 --- a/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/src/DataTypes/DataTypeAggregateFunction.cpp @@ -33,6 +33,33 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } + +DataTypeAggregateFunction::DataTypeAggregateFunction(AggregateFunctionPtr function_, const DataTypes & argument_types_, + const Array & parameters_, std::optional version_) + : function(std::move(function_)) + , argument_types(argument_types_) + , parameters(parameters_) + , version(version_) +{ + Strings argument_type_names; + for (const auto & argument_type : argument_types) + argument_type_names.push_back(argument_type->getName()); + + Strings function_argument_type_names; + const auto & function_argument_types = function->getArgumentTypes(); + for (const auto & argument_type : function_argument_types) + function_argument_type_names.push_back(argument_type->getName()); + + size_t argument_types_size = std::max(argument_types.size(), function_argument_types.size()); + for (size_t i = 0; i < argument_types_size; ++i) + { + if (argument_types.size() != function_argument_types.size() || !argument_types[i]->equals(*function_argument_types[i])) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Data type AggregateFunction {} got argument types different from function argument types: [{}] != [{}]", + function->getName(), fmt::join(argument_type_names, ", "), fmt::join(function_argument_type_names, ", ")); + } +} + String DataTypeAggregateFunction::getFunctionName() const { return function->getName(); diff --git a/src/DataTypes/DataTypeAggregateFunction.h b/src/DataTypes/DataTypeAggregateFunction.h index 52ed151107e..e3a4f9726d9 100644 --- a/src/DataTypes/DataTypeAggregateFunction.h +++ b/src/DataTypes/DataTypeAggregateFunction.h @@ -30,13 +30,7 @@ public: static constexpr bool is_parametric = true; DataTypeAggregateFunction(AggregateFunctionPtr function_, const DataTypes & argument_types_, - const Array & parameters_, std::optional version_ = std::nullopt) - : function(std::move(function_)) - , argument_types(argument_types_) - , parameters(parameters_) - , version(version_) - { - } + const Array & parameters_, std::optional version_ = std::nullopt); size_t getVersion() const; From 55fd2e04e331a58b83516fbefa6bad921fa842a3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 24 Jul 2024 17:00:59 +0000 Subject: [PATCH 1594/2361] wip --- src/Storages/StorageBuffer.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 04e6d6676d1..4ae9e029e1b 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -271,6 +271,8 @@ void StorageBuffer::read( } else { + if (processed_stage > QueryProcessingStage::FetchColumns) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot read from Buffer table with different structure in non-initial stage of query execution"); /// There is a struct mismatch and we need to convert read blocks from the destination table. const Block header = metadata_snapshot->getSampleBlock(); Names columns_intersection = column_names; From 12d917b74268ecb6a86b032d5c00c418c8a48f4e Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 24 Jul 2024 17:01:26 +0000 Subject: [PATCH 1595/2361] wip --- src/DataTypes/DataTypeAggregateFunction.cpp | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/DataTypes/DataTypeAggregateFunction.cpp b/src/DataTypes/DataTypeAggregateFunction.cpp index ee42e4fea11..a4cd3b9e511 100644 --- a/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/src/DataTypes/DataTypeAggregateFunction.cpp @@ -41,23 +41,6 @@ DataTypeAggregateFunction::DataTypeAggregateFunction(AggregateFunctionPtr functi , parameters(parameters_) , version(version_) { - Strings argument_type_names; - for (const auto & argument_type : argument_types) - argument_type_names.push_back(argument_type->getName()); - - Strings function_argument_type_names; - const auto & function_argument_types = function->getArgumentTypes(); - for (const auto & argument_type : function_argument_types) - function_argument_type_names.push_back(argument_type->getName()); - - size_t argument_types_size = std::max(argument_types.size(), function_argument_types.size()); - for (size_t i = 0; i < argument_types_size; ++i) - { - if (argument_types.size() != function_argument_types.size() || !argument_types[i]->equals(*function_argument_types[i])) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Data type AggregateFunction {} got argument types different from function argument types: [{}] != [{}]", - function->getName(), fmt::join(argument_type_names, ", "), fmt::join(function_argument_type_names, ", ")); - } } String DataTypeAggregateFunction::getFunctionName() const From 1329b5eb0b17d5499639bbb973aab7b17c95b644 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 26 Jul 2024 16:22:54 +0000 Subject: [PATCH 1596/2361] Fix Buffer over Distributed --- src/Storages/StorageBuffer.cpp | 46 ++++++++++++-- src/Storages/StorageBuffer.h | 1 + ...r_over_distributed_type_mismatch.reference | 18 ++++++ ..._buffer_over_distributed_type_mismatch.sql | 60 +++++++++++++++++++ 4 files changed, 119 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.reference create mode 100644 tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.sql diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 4ae9e029e1b..f753d369d2d 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -1,3 +1,7 @@ +#include + +#include +#include #include #include #include @@ -23,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -232,6 +235,12 @@ QueryProcessingStage::Enum StorageBuffer::getQueryProcessingStage( return QueryProcessingStage::FetchColumns; } +bool StorageBuffer::isRemote() const +{ + auto destination = getDestinationTable(); + return destination && destination->isRemote(); +} + void StorageBuffer::read( QueryPlan & query_plan, const Names & column_names, @@ -242,6 +251,29 @@ void StorageBuffer::read( size_t max_block_size, size_t num_streams) { + bool allow_experimental_analyzer = local_context->getSettingsRef().allow_experimental_analyzer; + + if (allow_experimental_analyzer && processed_stage > QueryProcessingStage::FetchColumns) + { + /** For query processing stages after FetchColumns, we do not allow using the same table more than once in the query. + * For example: SELECT * FROM buffer t1 JOIN buffer t2 USING (column) + * In that case, we will execute this query separately for the destination table and for the buffer, resulting in incorrect results. + */ + const auto & current_storage_id = getStorageID(); + auto table_nodes = extractAllTableReferences(query_info.query_tree); + size_t count_of_current_storage = 0; + for (const auto & node : table_nodes) + { + const auto & table_node = node->as(); + if (table_node.getStorageID().getFullNameNotQuoted() == current_storage_id.getFullNameNotQuoted()) + { + count_of_current_storage++; + if (count_of_current_storage > 1) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "StorageBuffer over Distributed does not support using the same table more than once in the query"); + } + } + } + const auto & metadata_snapshot = storage_snapshot->metadata; if (auto destination = getDestinationTable()) @@ -271,8 +303,6 @@ void StorageBuffer::read( } else { - if (processed_stage > QueryProcessingStage::FetchColumns) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot read from Buffer table with different structure in non-initial stage of query execution"); /// There is a struct mismatch and we need to convert read blocks from the destination table. const Block header = metadata_snapshot->getSampleBlock(); Names columns_intersection = column_names; @@ -330,13 +360,17 @@ void StorageBuffer::read( } } + src_table_query_info.merge_storage_snapshot = storage_snapshot; destination->read( query_plan, columns_intersection, destination_snapshot, src_table_query_info, local_context, processed_stage, max_block_size, num_streams); - if (query_plan.isInitialized()) + if (query_plan.isInitialized() && processed_stage <= QueryProcessingStage::FetchColumns) { - + /** The code below converts columns from metadata_snapshot to columns from destination_metadata_snapshot. + * This conversion is not applicable for processed_stage > FetchColumns. + * Instead, we rely on the converting actions at the end of this function. + */ auto actions = addMissingDefaults( query_plan.getCurrentDataStream().header, header_after_adding_defaults.getNamesAndTypesList(), @@ -399,7 +433,7 @@ void StorageBuffer::read( /// TODO: Find a way to support projections for StorageBuffer if (processed_stage > QueryProcessingStage::FetchColumns) { - if (local_context->getSettingsRef().allow_experimental_analyzer) + if (allow_experimental_analyzer) { auto storage = std::make_shared( getStorageID(), diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index cd6dd7b933f..02376f286b1 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -84,6 +84,7 @@ public: QueryProcessingStage::Enum processed_stage, size_t max_block_size, size_t num_streams) override; + bool isRemote() const override; bool supportsParallelInsert() const override { return true; } diff --git a/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.reference b/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.reference new file mode 100644 index 00000000000..1dc3acfeccb --- /dev/null +++ b/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.reference @@ -0,0 +1,18 @@ +100 +100 +101 +101 +101 +102 +101 +101 +102 +100 +100 +101 +101 +101 +102 +101 +101 +102 diff --git a/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.sql b/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.sql new file mode 100644 index 00000000000..5a7c89074cf --- /dev/null +++ b/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.sql @@ -0,0 +1,60 @@ + +DROP TABLE IF EXISTS realtimedrep; +CREATE TABLE realtimedrep (`amount` Int32) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO realtimedrep FORMAT Values (100); + +DROP TABLE IF EXISTS realtimedistributed; +CREATE TABLE realtimedistributed (`amount` Int32) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), realtimedrep, rand()); + +DROP TABLE IF EXISTS realtimebuff__fuzz_19; +CREATE TABLE realtimebuff__fuzz_19 (`amount` UInt32) ENGINE = Buffer(currentDatabase(), 'realtimedistributed', 16, 3600, 36000, 10000, 1000000, 10000000, 100000000); +INSERT INTO realtimebuff__fuzz_19 FORMAT Values (101); + +DROP TABLE IF EXISTS realtimebuff__fuzz_20; +CREATE TABLE realtimebuff__fuzz_20 (`amount` Nullable(Int32)) ENGINE = Buffer(currentDatabase(), 'realtimedistributed', 16, 3600, 36000, 10000, 1000000, 10000000, 100000000); +INSERT INTO realtimebuff__fuzz_20 FORMAT Values (101); + +SELECT amount FROM realtimebuff__fuzz_19 t1 ORDER BY ALL; +SELECT amount + 1 FROM realtimebuff__fuzz_19 t1 ORDER BY ALL; +SELECT amount + 1 FROM realtimebuff__fuzz_20 t1 ORDER BY ALL; +SELECT sum(amount) = 100 FROM realtimebuff__fuzz_19 ORDER BY ALL; -- { serverError CANNOT_CONVERT_TYPE } +SELECT sum(amount) = 100 FROM realtimebuff__fuzz_20 ORDER BY ALL; -- { serverError CANNOT_CONVERT_TYPE } + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN (SELECT number :: UInt32 AS amount FROM numbers(3) ) t2 ON t1.amount = t2.amount +ORDER BY ALL +SETTINGS allow_experimental_analyzer = 0; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN (SELECT number :: UInt32 AS amount FROM numbers(3) ) t2 ON t1.amount = t2.amount +ORDER BY ALL +SETTINGS allow_experimental_analyzer = 1; + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN (SELECT number :: UInt32 AS amount FROM numbers(300) ) t2 ON t1.amount = t2.amount +ORDER BY ALL +SETTINGS allow_experimental_analyzer = 0; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN (SELECT number :: UInt32 AS amount FROM numbers(300) ) t2 ON t1.amount = t2.amount +ORDER BY ALL +SETTINGS allow_experimental_analyzer = 1; + +SELECT t2.amount + 1 FROM (SELECT number :: UInt32 AS amount FROM numbers(300) ) t1 +JOIN realtimebuff__fuzz_19 t2 USING (amount) +ORDER BY ALL +; + +SELECT t2.amount + 1 FROM (SELECT number :: UInt32 AS amount FROM numbers(300) ) t1 +JOIN realtimebuff__fuzz_19 t2 ON t1.amount = t2.amount +ORDER BY ALL +; + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN realtimebuff__fuzz_19 t2 ON t1.amount = t2.amount +; -- { serverError NOT_IMPLEMENTED,UNKNOWN_IDENTIFIER } + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN realtimebuff__fuzz_19 t2 ON t1.amount = t2.amount +JOIN realtimebuff__fuzz_19 t3 ON t1.amount = t3.amount +; -- { serverError NOT_IMPLEMENTED,AMBIGUOUS_COLUMN_NAME } From 65c0efb2d50dc4ac37750505cde1b8d26729b871 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 5 Aug 2024 10:29:49 +0000 Subject: [PATCH 1597/2361] Revert "Merge pull request #66510 from canhld94/fix_trivial_count_non_deterministic_func" This reverts commit bf595ca374af503c087e2eb0f80f79490e5b8faa, reversing changes made to b6b1a7a7790fcce40d2de67c62998a228246e729. --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- src/Storages/VirtualColumnUtils.cpp | 21 +++++++------------ src/Storages/VirtualColumnUtils.h | 10 +-------- ..._with_non_deterministic_function.reference | 2 -- ..._count_with_non_deterministic_function.sql | 4 ---- 5 files changed, 9 insertions(+), 30 deletions(-) delete mode 100644 tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference delete mode 100644 tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ce27ad24e10..2286530aa83 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1146,7 +1146,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr, /*allow_non_deterministic_functions=*/ false); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr); if (!filter_dag) return {}; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 90c2c7f93c1..ba1f4488005 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -275,8 +275,7 @@ bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( const ActionsDAG::Node * node, const Block * allowed_inputs, - ActionsDAG::Nodes & additional_nodes, - bool allow_non_deterministic_functions) + ActionsDAG::Nodes & additional_nodes) { if (node->type == ActionsDAG::ActionType::FUNCTION) { @@ -285,14 +284,8 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto & node_copy = additional_nodes.emplace_back(*node); node_copy.children.clear(); for (const auto * child : node->children) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) + if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes)) node_copy.children.push_back(child_copy); - /// Expression like (now_allowed AND allowed) is not allowed if allow_non_deterministic_functions = true. This is important for - /// trivial count optimization, otherwise we can get incorrect results. For example, if the query is - /// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply - /// trivial count. - else if (!allow_non_deterministic_functions) - return nullptr; if (node_copy.children.empty()) return nullptr; @@ -318,7 +311,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { auto & node_copy = additional_nodes.emplace_back(*node); for (auto & child : node_copy.children) - if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions); !child) + if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes); !child) return nullptr; return &node_copy; @@ -332,7 +325,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto index_hint_dag = index_hint->getActions().clone(); ActionsDAG::NodeRawConstPtrs atoms; for (const auto & output : index_hint_dag.getOutputs()) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) + if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes)) atoms.push_back(child_copy); if (!atoms.empty()) @@ -366,13 +359,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( return node; } -std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions) +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs) { if (!predicate) return {}; ActionsDAG::Nodes additional_nodes; - const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, allow_non_deterministic_functions); + const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes); if (!res) return {}; @@ -381,7 +374,7 @@ std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context) { - auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_non_deterministic_functions=*/ false); + auto dag = splitFilterDagForAllowedInputs(predicate, &block); if (dag) filterBlockWithExpression(buildFilterExpression(std::move(*dag), context), block); } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 73b7908b75c..919513b3b38 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -33,15 +33,7 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context); bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); /// Extract a part of predicate that can be evaluated using only columns from input_names. -/// When allow_non_deterministic_functions is true then even if the predicate contains non-deterministic -/// functions, we still allow to extract a part of the predicate, otherwise we return nullptr. -/// allow_non_deterministic_functions must be false when we are going to use the result to filter parts in -/// MergeTreeData::totalRowsByPartitionPredicateImp. For example, if the query is -/// `SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1` -/// The predicate will be `_partition_id = '0' AND rowNumberInBlock() = 1`, and `rowNumberInBlock()` is -/// non-deterministic. If we still extract the part `_partition_id = '0'` for filtering parts, then trivial -/// count optimization will be mistakenly applied to the query. -std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions = true); +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs); /// Extract from the input stream a set of `name` column values template diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference deleted file mode 100644 index 6ed281c757a..00000000000 --- a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference +++ /dev/null @@ -1,2 +0,0 @@ -1 -1 diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql deleted file mode 100644 index bb3269da597..00000000000 --- a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql +++ /dev/null @@ -1,4 +0,0 @@ -CREATE TABLE t (p UInt8, x UInt64) Engine = MergeTree PARTITION BY p ORDER BY x; -INSERT INTO t SELECT 0, number FROM numbers(10) SETTINGS max_block_size = 100; -SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 0; -SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 1; From 719ccaba5acb87734d1bb4cc2c4f5e76ad978c0a Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 5 Aug 2024 18:36:46 +0800 Subject: [PATCH 1598/2361] optimize parquet string column reading --- .../Formats/Impl/ArrowColumnToCHColumn.cpp | 25 +++++++++++++++---- .../Impl/NativeORCBlockInputFormat.cpp | 1 + 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp index ed91913de4d..fb56fdd4fe0 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp @@ -133,16 +133,31 @@ static ColumnWithTypeAndName readColumnWithStringData(const std::shared_ptr buffer = chunk.value_data(); const size_t chunk_length = chunk.length(); - for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i) + const size_t null_count = chunk.null_count(); + if (null_count == 0) { - if (!chunk.IsNull(offset_i) && buffer) + for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i) { const auto * raw_data = buffer->data() + chunk.value_offset(offset_i); column_chars_t.insert_assume_reserved(raw_data, raw_data + chunk.value_length(offset_i)); - } - column_chars_t.emplace_back('\0'); + column_chars_t.emplace_back('\0'); - column_offsets.emplace_back(column_chars_t.size()); + column_offsets.emplace_back(column_chars_t.size()); + } + } + else + { + for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i) + { + if (!chunk.IsNull(offset_i) && buffer) + { + const auto * raw_data = buffer->data() + chunk.value_offset(offset_i); + column_chars_t.insert_assume_reserved(raw_data, raw_data + chunk.value_length(offset_i)); + } + column_chars_t.emplace_back('\0'); + + column_offsets.emplace_back(column_chars_t.size()); + } } } return {std::move(internal_column), std::move(internal_type), column_name}; diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index a0a80ec4a58..81bea0af53b 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -1,4 +1,5 @@ #include "NativeORCBlockInputFormat.h" +#include "Columns/ColumnsCommon.h" #if USE_ORC # include From f147e5c39e19d1097361571ebedf4507c744c700 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Mon, 5 Aug 2024 18:37:55 +0800 Subject: [PATCH 1599/2361] optimize parquet string column reading --- src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 81bea0af53b..a0a80ec4a58 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -1,5 +1,4 @@ #include "NativeORCBlockInputFormat.h" -#include "Columns/ColumnsCommon.h" #if USE_ORC # include From 3802b1ed6c7174b0d95bf1c89d339187fe6dc69d Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 5 Aug 2024 12:42:57 +0200 Subject: [PATCH 1600/2361] Update comment --- src/Interpreters/Cache/FileCache.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 4c17afb79be..aff4e48d01d 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -326,6 +326,8 @@ std::vector FileCache::splitRange(size_t offset, size_t size /// ^ ^ /// right offset aligned_right_offset /// [_________] <-- last cached file segment, e.g. we have uncovered suffix of the requested range + /// ^ + /// last_file_segment_right_offset /// [________________] /// size /// [____________________________________] @@ -335,8 +337,9 @@ std::vector FileCache::splitRange(size_t offset, size_t size /// and get something like this: /// /// [________________________] - /// ^ ^ - /// right_offset right_offset + max_file_segment_size + /// ^ ^ + /// | last_file_segment_right_offset + max_file_segment_size + /// last_file_segment_right_offset /// e.g. there is no need to create sub-segment for range (right_offset + max_file_segment_size, aligned_right_offset]. /// Because its left offset would be bigger than right_offset. /// Therefore, we set end_pos_non_included as offset+size, but remaining_size as aligned_size. From 6a0c0e7b1d4922b616d706820d9000ffe8040d63 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 5 Aug 2024 12:48:21 +0200 Subject: [PATCH 1601/2361] Update FileCache.cpp --- src/Interpreters/Cache/FileCache.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index aff4e48d01d..ed91e41db17 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -340,7 +340,7 @@ std::vector FileCache::splitRange(size_t offset, size_t size /// ^ ^ /// | last_file_segment_right_offset + max_file_segment_size /// last_file_segment_right_offset - /// e.g. there is no need to create sub-segment for range (right_offset + max_file_segment_size, aligned_right_offset]. + /// e.g. there is no need to create sub-segment for range (last_file_segment_right_offset + max_file_segment_size, aligned_right_offset]. /// Because its left offset would be bigger than right_offset. /// Therefore, we set end_pos_non_included as offset+size, but remaining_size as aligned_size. From 67fe443133c277f78ecf1a11d542c61b1d805a59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 5 Aug 2024 10:48:47 +0000 Subject: [PATCH 1602/2361] Fix build --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 2286530aa83..49888596fbb 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1146,7 +1146,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr); if (!filter_dag) return {}; From 1a3e9d147441f86d7a00b78873b0a07b6f292e1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 5 Aug 2024 11:15:37 +0000 Subject: [PATCH 1603/2361] Style fixes --- src/Storages/Kafka/KafkaConfigLoader.cpp | 5 +++++ src/Storages/Kafka/StorageKafkaUtils.cpp | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Storages/Kafka/KafkaConfigLoader.cpp b/src/Storages/Kafka/KafkaConfigLoader.cpp index 3d31a987395..000e08e2276 100644 --- a/src/Storages/Kafka/KafkaConfigLoader.cpp +++ b/src/Storages/Kafka/KafkaConfigLoader.cpp @@ -19,6 +19,11 @@ extern const Metric KafkaLibrdkafkaThreads; namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + template struct KafkaInterceptors { diff --git a/src/Storages/Kafka/StorageKafkaUtils.cpp b/src/Storages/Kafka/StorageKafkaUtils.cpp index c510303f45e..a2e3683f769 100644 --- a/src/Storages/Kafka/StorageKafkaUtils.cpp +++ b/src/Storages/Kafka/StorageKafkaUtils.cpp @@ -56,7 +56,6 @@ using namespace std::chrono_literals; namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int BAD_ARGUMENTS; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int SUPPORT_IS_DISABLED; From 048e3f56e864e1cd36b5e7df7e73482f71c8a9a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 5 Aug 2024 11:15:48 +0000 Subject: [PATCH 1604/2361] Remove redundant scope --- src/Storages/Kafka/StorageKafka2.cpp | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index f58d629dd9b..3574b46e3b0 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -652,12 +652,10 @@ void StorageKafka2::dropReplica() return; } - { - my_keeper->tryRemoveChildrenRecursive(replica_path); + my_keeper->tryRemoveChildrenRecursive(replica_path); - if (my_keeper->tryRemove(replica_path) != Coordination::Error::ZOK) - LOG_ERROR(log, "Replica was not completely removed from Keeper, {} still exists and may contain some garbage.", replica_path); - } + if (my_keeper->tryRemove(replica_path) != Coordination::Error::ZOK) + LOG_ERROR(log, "Replica was not completely removed from Keeper, {} still exists and may contain some garbage.", replica_path); /// Check that `zookeeper_path` exists: it could have been deleted by another replica after execution of previous line. Strings replicas; From 67bddde6287f21a702d2e134921a9f00073959f6 Mon Sep 17 00:00:00 2001 From: Max K Date: Sat, 3 Aug 2024 21:26:05 +0200 Subject: [PATCH 1605/2361] move Check Descriptions to commit_status_helper --- .yamllint | 6 - tests/ci/ci_config.py | 2 - tests/ci/ci_definitions.py | 187 +--------------------------- tests/ci/commit_status_helper.py | 203 +++++++++++++++++++++++++++++-- 4 files changed, 195 insertions(+), 203 deletions(-) diff --git a/.yamllint b/.yamllint index 7fb741ec9f4..b8f7c93e246 100644 --- a/.yamllint +++ b/.yamllint @@ -5,12 +5,6 @@ rules: indentation: level: warning indent-sequences: consistent - line-length: - # there are: - # - bash -c "", so this is OK - # - yaml in tests - max: 1000 - level: warning comments: min-spaces-from-content: 1 document-start: disable diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index ef48466e451..8cb587a1062 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -20,12 +20,10 @@ class CI: from ci_definitions import BuildConfig as BuildConfig from ci_definitions import DigestConfig as DigestConfig from ci_definitions import JobConfig as JobConfig - from ci_definitions import CheckDescription as CheckDescription from ci_definitions import Tags as Tags from ci_definitions import JobNames as JobNames from ci_definitions import BuildNames as BuildNames from ci_definitions import StatusNames as StatusNames - from ci_definitions import CHECK_DESCRIPTIONS as CHECK_DESCRIPTIONS from ci_definitions import REQUIRED_CHECKS as REQUIRED_CHECKS from ci_definitions import SyncState as SyncState from ci_definitions import MQ_JOBS as MQ_JOBS diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 795bda3d4b0..48847b0d7a6 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -1,7 +1,7 @@ import copy from dataclasses import dataclass, field from pathlib import Path -from typing import Callable, List, Union, Iterable, Optional, Literal, Any +from typing import List, Union, Iterable, Optional, Literal, Any from ci_utils import WithIter from integration_test_images import IMAGES @@ -646,188 +646,3 @@ MQ_JOBS = [ BuildNames.BINARY_RELEASE, JobNames.UNIT_TEST, ] - - -@dataclass -class CheckDescription: - name: str - description: str # the check descriptions, will be put into the status table - match_func: Callable[[str], bool] # the function to check vs the commit status - - def __hash__(self) -> int: - return hash(self.name + self.description) - - -CHECK_DESCRIPTIONS = [ - CheckDescription( - StatusNames.PR_CHECK, - "Checks correctness of the PR's body", - lambda x: x == "PR Check", - ), - CheckDescription( - StatusNames.SYNC, - "If it fails, ask a maintainer for help", - lambda x: x == StatusNames.SYNC, - ), - CheckDescription( - "AST fuzzer", - "Runs randomly generated queries to catch program errors. " - "The build type is optionally given in parenthesis. " - "If it fails, ask a maintainer for help", - lambda x: x.startswith("AST fuzzer"), - ), - CheckDescription( - JobNames.BUGFIX_VALIDATE, - "Checks that either a new test (functional or integration) or there " - "some changed tests that fail with the binary built on master branch", - lambda x: x == JobNames.BUGFIX_VALIDATE, - ), - CheckDescription( - StatusNames.CI, - "A meta-check that indicates the running CI. Normally, it's in success or " - "pending state. The failed status indicates some problems with the PR", - lambda x: x == "CI running", - ), - CheckDescription( - "Builds", - "Builds ClickHouse in various configurations for use in further steps. " - "You have to fix the builds that fail. Build logs often has enough " - "information to fix the error, but you might have to reproduce the failure " - "locally. The cmake options can be found in the build log, grepping for " - 'cmake. Use these options and follow the
general build process', - lambda x: x.startswith("ClickHouse") and x.endswith("build check"), - ), - CheckDescription( - "Compatibility check", - "Checks that clickhouse binary runs on distributions with old libc " - "versions. If it fails, ask a maintainer for help", - lambda x: x.startswith("Compatibility check"), - ), - CheckDescription( - JobNames.DOCKER_SERVER, - "The check to build and optionally push the mentioned image to docker hub", - lambda x: x.startswith("Docker server"), - ), - CheckDescription( - JobNames.DOCKER_KEEPER, - "The check to build and optionally push the mentioned image to docker hub", - lambda x: x.startswith("Docker keeper"), - ), - CheckDescription( - JobNames.DOCS_CHECK, - "Builds and tests the documentation", - lambda x: x == JobNames.DOCS_CHECK, - ), - CheckDescription( - JobNames.FAST_TEST, - "Normally this is the first check that is ran for a PR. It builds ClickHouse " - 'and runs most of stateless functional tests, ' - "omitting some. If it fails, further checks are not started until it is fixed. " - "Look at the report to see which tests fail, then reproduce the failure " - 'locally as described here', - lambda x: x == JobNames.FAST_TEST, - ), - CheckDescription( - "Flaky tests", - "Checks if new added or modified tests are flaky by running them repeatedly, " - "in parallel, with more randomization. Functional tests are run 100 times " - "with address sanitizer, and additional randomization of thread scheduling. " - "Integration tests are run up to 10 times. If at least once a new test has " - "failed, or was too long, this check will be red. We don't allow flaky tests, " - 'read the doc', - lambda x: "tests flaky check" in x, - ), - CheckDescription( - "Install packages", - "Checks that the built packages are installable in a clear environment", - lambda x: x.startswith("Install packages ("), - ), - CheckDescription( - "Integration tests", - "The integration tests report. In parenthesis the package type is given, " - "and in square brackets are the optional part/total tests", - lambda x: x.startswith("Integration tests ("), - ), - CheckDescription( - StatusNames.MERGEABLE, - "Checks if all other necessary checks are successful", - lambda x: x == StatusNames.MERGEABLE, - ), - CheckDescription( - "Performance Comparison", - "Measure changes in query performance. The performance test report is " - 'described in detail here. ' - "In square brackets are the optional part/total tests", - lambda x: x.startswith("Performance Comparison"), - ), - CheckDescription( - "Push to Dockerhub", - "The check for building and pushing the CI related docker images to docker hub", - lambda x: x.startswith("Push") and "to Dockerhub" in x, - ), - CheckDescription( - "Sqllogic", - "Run clickhouse on the " - 'sqllogic ' - "test set against sqlite and checks that all statements are passed", - lambda x: x.startswith("Sqllogic test"), - ), - CheckDescription( - "SQLancer", - "Fuzzing tests that detect logical bugs with " - 'SQLancer tool', - lambda x: x.startswith("SQLancer"), - ), - CheckDescription( - "Stateful tests", - "Runs stateful functional tests for ClickHouse binaries built in various " - "configurations -- release, debug, with sanitizers, etc", - lambda x: x.startswith("Stateful tests ("), - ), - CheckDescription( - "Stateless tests", - "Runs stateless functional tests for ClickHouse binaries built in various " - "configurations -- release, debug, with sanitizers, etc", - lambda x: x.startswith("Stateless tests ("), - ), - CheckDescription( - "Stress test", - "Runs stateless functional tests concurrently from several clients to detect " - "concurrency-related errors", - lambda x: x.startswith("Stress test ("), - ), - CheckDescription( - JobNames.STYLE_CHECK, - "Runs a set of checks to keep the code style clean. If some of tests failed, " - "see the related log from the report", - lambda x: x == JobNames.STYLE_CHECK, - ), - CheckDescription( - "Unit tests", - "Runs the unit tests for different release types", - lambda x: x.startswith("Unit tests ("), - ), - CheckDescription( - "Upgrade check", - "Runs stress tests on server version from last release and then tries to " - "upgrade it to the version from the PR. It checks if the new server can " - "successfully startup without any errors, crashes or sanitizer asserts", - lambda x: x.startswith("Upgrade check ("), - ), - CheckDescription( - "ClickBench", - "Runs [ClickBench](https://github.com/ClickHouse/ClickBench/) with instant-attach table", - lambda x: x.startswith("ClickBench"), - ), - CheckDescription( - "Fallback for unknown", - "There's no description for the check yet, please add it to " - "tests/ci/ci_config.py:CHECK_DESCRIPTIONS", - lambda x: True, - ), -] diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py index fdc9c002b66..8967d453622 100644 --- a/tests/ci/commit_status_helper.py +++ b/tests/ci/commit_status_helper.py @@ -7,7 +7,7 @@ import time from collections import defaultdict from dataclasses import asdict, dataclass from pathlib import Path -from typing import Dict, List, Optional, Union +from typing import Dict, List, Optional, Union, Callable from github import Github from github.Commit import Commit @@ -176,7 +176,7 @@ def set_status_comment(commit: Commit, pr_info: PRInfo) -> None: if not [status for status in statuses if status.context == CI.StatusNames.CI]: # This is the case, when some statuses already exist for the check, - # but not the StatusNames.CI. We should create it as pending. + # but not the CI.StatusNames.CI. We should create it as pending. # W/o pr_info to avoid recursion, and yes, one extra create_ci_report post_commit_status( commit, @@ -226,20 +226,20 @@ def generate_status_comment(pr_info: PRInfo, statuses: CommitStatuses) -> str: f"\n" ) # group checks by the name to get the worst one per each - grouped_statuses = {} # type: Dict[CI.CheckDescription, CommitStatuses] + grouped_statuses = {} # type: Dict[CheckDescription, CommitStatuses] for status in statuses: cd = None - for c in CI.CHECK_DESCRIPTIONS: + for c in CHECK_DESCRIPTIONS: if c.match_func(status.context): cd = c break - if cd is None or cd == CI.CHECK_DESCRIPTIONS[-1]: + if cd is None or cd == CHECK_DESCRIPTIONS[-1]: # This is the case for either non-found description or a fallback - cd = CI.CheckDescription( + cd = CheckDescription( status.context, - CI.CHECK_DESCRIPTIONS[-1].description, - CI.CHECK_DESCRIPTIONS[-1].match_func, + CHECK_DESCRIPTIONS[-1].description, + CHECK_DESCRIPTIONS[-1].match_func, ) if cd in grouped_statuses: @@ -459,7 +459,7 @@ def trigger_mergeable_check( set_from_sync: bool = False, workflow_failed: bool = False, ) -> StatusType: - """calculate and update StatusNames.MERGEABLE""" + """calculate and update CI.StatusNames.MERGEABLE""" required_checks = [status for status in statuses if CI.is_required(status.context)] mergeable_status = None @@ -536,3 +536,188 @@ def update_upstream_sync_status( get_commit_filtered_statuses(last_synced_upstream_commit), set_from_sync=True, ) + + +@dataclass +class CheckDescription: + name: str + description: str # the check descriptions, will be put into the status table + match_func: Callable[[str], bool] # the function to check vs the commit status + + def __hash__(self) -> int: + return hash(self.name + self.description) + + +CHECK_DESCRIPTIONS = [ + CheckDescription( + CI.StatusNames.PR_CHECK, + "Checks correctness of the PR's body", + lambda x: x == "PR Check", + ), + CheckDescription( + CI.StatusNames.SYNC, + "If it fails, ask a maintainer for help", + lambda x: x == CI.StatusNames.SYNC, + ), + CheckDescription( + "AST fuzzer", + "Runs randomly generated queries to catch program errors. " + "The build type is optionally given in parenthesis. " + "If it fails, ask a maintainer for help", + lambda x: x.startswith("AST fuzzer"), + ), + CheckDescription( + CI.JobNames.BUGFIX_VALIDATE, + "Checks that either a new test (functional or integration) or there " + "some changed tests that fail with the binary built on master branch", + lambda x: x == CI.JobNames.BUGFIX_VALIDATE, + ), + CheckDescription( + CI.StatusNames.CI, + "A meta-check that indicates the running CI. Normally, it's in success or " + "pending state. The failed status indicates some problems with the PR", + lambda x: x == "CI running", + ), + CheckDescription( + "Builds", + "Builds ClickHouse in various configurations for use in further steps. " + "You have to fix the builds that fail. Build logs often has enough " + "information to fix the error, but you might have to reproduce the failure " + "locally. The cmake options can be found in the build log, grepping for " + 'cmake. Use these options and follow the general build process', + lambda x: x.startswith("ClickHouse") and x.endswith("build check"), + ), + CheckDescription( + "Compatibility check", + "Checks that clickhouse binary runs on distributions with old libc " + "versions. If it fails, ask a maintainer for help", + lambda x: x.startswith("Compatibility check"), + ), + CheckDescription( + CI.JobNames.DOCKER_SERVER, + "The check to build and optionally push the mentioned image to docker hub", + lambda x: x.startswith("Docker server"), + ), + CheckDescription( + CI.JobNames.DOCKER_KEEPER, + "The check to build and optionally push the mentioned image to docker hub", + lambda x: x.startswith("Docker keeper"), + ), + CheckDescription( + CI.JobNames.DOCS_CHECK, + "Builds and tests the documentation", + lambda x: x == CI.JobNames.DOCS_CHECK, + ), + CheckDescription( + CI.JobNames.FAST_TEST, + "Normally this is the first check that is ran for a PR. It builds ClickHouse " + 'and runs most of stateless functional tests, ' + "omitting some. If it fails, further checks are not started until it is fixed. " + "Look at the report to see which tests fail, then reproduce the failure " + 'locally as described here', + lambda x: x == CI.JobNames.FAST_TEST, + ), + CheckDescription( + "Flaky tests", + "Checks if new added or modified tests are flaky by running them repeatedly, " + "in parallel, with more randomization. Functional tests are run 100 times " + "with address sanitizer, and additional randomization of thread scheduling. " + "Integration tests are run up to 10 times. If at least once a new test has " + "failed, or was too long, this check will be red. We don't allow flaky tests, " + 'read the doc', + lambda x: "tests flaky check" in x, + ), + CheckDescription( + "Install packages", + "Checks that the built packages are installable in a clear environment", + lambda x: x.startswith("Install packages ("), + ), + CheckDescription( + "Integration tests", + "The integration tests report. In parenthesis the package type is given, " + "and in square brackets are the optional part/total tests", + lambda x: x.startswith("Integration tests ("), + ), + CheckDescription( + CI.StatusNames.MERGEABLE, + "Checks if all other necessary checks are successful", + lambda x: x == CI.StatusNames.MERGEABLE, + ), + CheckDescription( + "Performance Comparison", + "Measure changes in query performance. The performance test report is " + 'described in detail here. ' + "In square brackets are the optional part/total tests", + lambda x: x.startswith("Performance Comparison"), + ), + CheckDescription( + "Push to Dockerhub", + "The check for building and pushing the CI related docker images to docker hub", + lambda x: x.startswith("Push") and "to Dockerhub" in x, + ), + CheckDescription( + "Sqllogic", + "Run clickhouse on the " + 'sqllogic ' + "test set against sqlite and checks that all statements are passed", + lambda x: x.startswith("Sqllogic test"), + ), + CheckDescription( + "SQLancer", + "Fuzzing tests that detect logical bugs with " + 'SQLancer tool', + lambda x: x.startswith("SQLancer"), + ), + CheckDescription( + "Stateful tests", + "Runs stateful functional tests for ClickHouse binaries built in various " + "configurations -- release, debug, with sanitizers, etc", + lambda x: x.startswith("Stateful tests ("), + ), + CheckDescription( + "Stateless tests", + "Runs stateless functional tests for ClickHouse binaries built in various " + "configurations -- release, debug, with sanitizers, etc", + lambda x: x.startswith("Stateless tests ("), + ), + CheckDescription( + "Stress test", + "Runs stateless functional tests concurrently from several clients to detect " + "concurrency-related errors", + lambda x: x.startswith("Stress test ("), + ), + CheckDescription( + CI.JobNames.STYLE_CHECK, + "Runs a set of checks to keep the code style clean. If some of tests failed, " + "see the related log from the report", + lambda x: x == CI.JobNames.STYLE_CHECK, + ), + CheckDescription( + "Unit tests", + "Runs the unit tests for different release types", + lambda x: x.startswith("Unit tests ("), + ), + CheckDescription( + "Upgrade check", + "Runs stress tests on server version from last release and then tries to " + "upgrade it to the version from the PR. It checks if the new server can " + "successfully startup without any errors, crashes or sanitizer asserts", + lambda x: x.startswith("Upgrade check ("), + ), + CheckDescription( + "ClickBench", + "Runs [ClickBench](https://github.com/ClickHouse/ClickBench/) with instant-attach table", + lambda x: x.startswith("ClickBench"), + ), + CheckDescription( + "Fallback for unknown", + "There's no description for the check yet, please add it to " + "tests/ci/ci_config.py:CHECK_DESCRIPTIONS", + lambda x: True, + ), +] From 2d92cd71a83ed07e6f22c2ba37d39926837a9df0 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 5 Aug 2024 09:36:09 +0000 Subject: [PATCH 1606/2361] add some comments --- src/Interpreters/inplaceBlockConversions.cpp | 9 +++++++-- src/Storages/MergeTree/IMergeTreeReader.cpp | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/inplaceBlockConversions.cpp b/src/Interpreters/inplaceBlockConversions.cpp index 429d467ffbf..68254768a7d 100644 --- a/src/Interpreters/inplaceBlockConversions.cpp +++ b/src/Interpreters/inplaceBlockConversions.cpp @@ -309,6 +309,7 @@ static bool hasDefault(const StorageMetadataPtr & metadata_snapshot, const NameA static String removeTupleElementsFromSubcolumn(String subcolumn_name, const Names & tuple_elements) { + /// Add a dot to the end of name for convenience. subcolumn_name += "."; for (const auto & elem : tuple_elements) { @@ -396,19 +397,23 @@ void fillMissingColumns( Names tuple_elements; auto serialization = IDataType::getSerialization(*requested_column); + /// For Nested columns collect names of tuple elements and skip them while getting the base type of array. IDataType::forEachSubcolumn([&](const auto & path, const auto &, const auto &) { if (path.back().type == ISerialization::Substream::TupleElement) tuple_elements.push_back(path.back().name_of_substream); }, ISerialization::SubstreamData(serialization)); + /// The number of dimensions that belongs to the array itself but not shared in Nested column. + /// For example for column "n Nested(a UInt64, b Array(UInt64))" this value is 0 for `n.a` and 1 for `n.b`. size_t num_empty_dimensions = num_dimensions - current_offsets.size(); + auto base_type = getBaseTypeOfArray(requested_column->getTypeInStorage(), tuple_elements); auto scalar_type = createArrayOfType(base_type, num_empty_dimensions); - size_t data_size = assert_cast(*current_offsets.back()).getData().back(); - auto subcolumn_name = removeTupleElementsFromSubcolumn(requested_column->getSubcolumnName(), tuple_elements); + /// Remove names of tuple elements because they are already processed by 'getBaseTypeOfArray'. + auto subcolumn_name = removeTupleElementsFromSubcolumn(requested_column->getSubcolumnName(), tuple_elements); res_columns[i] = createColumnWithDefaultValue(*scalar_type, subcolumn_name, data_size); for (auto it = current_offsets.rbegin(); it != current_offsets.rend(); ++it) diff --git a/src/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp index 05a0b5a7dbc..e0b2710c61f 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -150,6 +150,7 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns NamesAndTypesList full_requested_columns; /// Convert columns list to block. And convert subcolumns to full columns. + /// Defaults should be executed on full columns to get correct values for subcolumns. /// TODO: rewrite with columns interface. It will be possible after changes in ExpressionActions. auto it = original_requested_columns.begin(); From 3244002cae58da99c8b088888376c0fda7f3f1f4 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 5 Aug 2024 13:22:10 +0200 Subject: [PATCH 1607/2361] Update FileCache.cpp --- src/Interpreters/Cache/FileCache.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index ed91e41db17..1a15efa7cf8 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -560,7 +560,7 @@ FileCache::getOrSet( FileSegment::Range initial_range(offset, offset + size - 1); /// result_range is initial range, which will be adjusted according to - /// 1. aligned offset, alighed_end_offset + /// 1. aligned_offset, aligned_end_offset /// 2. max_file_segments_limit FileSegment::Range result_range = initial_range; From a296beb39084492a6825879ff5258f4377ead75b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 5 Aug 2024 13:31:53 +0200 Subject: [PATCH 1608/2361] Unit test: Mark as FAILURE if retcode != 0 --- tests/ci/unit_tests_check.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py index 6430fa78801..9cc8ec379bf 100644 --- a/tests/ci/unit_tests_check.py +++ b/tests/ci/unit_tests_check.py @@ -197,6 +197,11 @@ def main(): subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {TEMP_PATH}", shell=True) state, description, test_results = process_results(test_output) + if retcode != 0 and state == SUCCESS: + # The process might have failed without reporting it in the test_output (e.g. LeakSanitizer) + state = FAILURE + description = "Invalid return code. Check run.log" + additional_files = [run_log_path] + [ p for p in test_output.iterdir() if not p.is_dir() ] From fd1e354e8503a968eff9dbe614a47c1135f67bdd Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Mon, 5 Aug 2024 13:43:06 +0200 Subject: [PATCH 1609/2361] fix flaky check for integration tests --- .../test_storage_azure_blob_storage/test.py | 82 +++++++++++++------ .../test_cluster.py | 18 ++-- tests/integration/test_storage_hdfs/test.py | 47 +++++++---- 3 files changed, 94 insertions(+), 53 deletions(-) diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index 6966abfee4f..15a1f6db2c1 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -135,6 +135,7 @@ def test_create_table_connection_string(cluster): Engine = AzureBlobStorage('{cluster.env_variables['AZURITE_CONNECTION_STRING']}', 'cont', 'test_create_connection_string', 'CSV') """, ) + azure_query(node, "DROP TABLE IF EXISTS test_create_table_conn_string") def test_create_table_account_string(cluster): @@ -144,6 +145,7 @@ def test_create_table_account_string(cluster): f"CREATE TABLE test_create_table_account_url (key UInt64, data String) Engine = AzureBlobStorage('{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," f"'cont', 'test_create_connection_string', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV')", ) + azure_query(node, "DROP TABLE IF EXISTS test_create_table_account_url") def test_simple_write_account_string(cluster): @@ -157,6 +159,7 @@ def test_simple_write_account_string(cluster): azure_query(node, "INSERT INTO test_simple_write VALUES (1, 'a')") print(get_azure_file_content("test_simple_write.csv", port)) assert get_azure_file_content("test_simple_write.csv", port) == '1,"a"\n' + azure_query(node, "DROP TABLE test_simple_write") def test_simple_write_connection_string(cluster): @@ -170,6 +173,7 @@ def test_simple_write_connection_string(cluster): azure_query(node, "INSERT INTO test_simple_write_connection_string VALUES (1, 'a')") print(get_azure_file_content("test_simple_write_c.csv", port)) assert get_azure_file_content("test_simple_write_c.csv", port) == '1,"a"\n' + azure_query(node, "DROP TABLE test_simple_write_connection_string") def test_simple_write_named_collection_1(cluster): @@ -185,7 +189,7 @@ def test_simple_write_named_collection_1(cluster): ) print(get_azure_file_content("test_simple_write_named.csv", port)) assert get_azure_file_content("test_simple_write_named.csv", port) == '1,"a"\n' - azure_query(node, "TRUNCATE TABLE test_simple_write_named_collection_1") + azure_query(node, "DROP TABLE test_simple_write_named_collection_1") def test_simple_write_named_collection_2(cluster): @@ -202,6 +206,7 @@ def test_simple_write_named_collection_2(cluster): ) print(get_azure_file_content("test_simple_write_named_2.csv", port)) assert get_azure_file_content("test_simple_write_named_2.csv", port) == '1,"a"\n' + azure_query(node, "DROP TABLE test_simple_write_named_collection_2") def test_partition_by(cluster): @@ -223,6 +228,7 @@ def test_partition_by(cluster): assert "1,2,3\n" == get_azure_file_content("test_3.csv", port) assert "3,2,1\n" == get_azure_file_content("test_1.csv", port) assert "78,43,45\n" == get_azure_file_content("test_45.csv", port) + azure_query(node, "DROP TABLE test_partitioned_write") def test_partition_by_string_column(cluster): @@ -243,6 +249,7 @@ def test_partition_by_string_column(cluster): assert '1,"foo/bar"\n' == get_azure_file_content("test_foo/bar.csv", port) assert '3,"йцук"\n' == get_azure_file_content("test_йцук.csv", port) assert '78,"你好"\n' == get_azure_file_content("test_你好.csv", port) + azure_query(node, "DROP TABLE test_partitioned_string_write") def test_partition_by_const_column(cluster): @@ -261,6 +268,7 @@ def test_partition_by_const_column(cluster): ) azure_query(node, f"INSERT INTO test_partitioned_const_write VALUES {values}") assert values_csv == get_azure_file_content("test_88.csv", port) + azure_query(node, "DROP TABLE test_partitioned_const_write") def test_truncate(cluster): @@ -276,6 +284,7 @@ def test_truncate(cluster): azure_query(node, "TRUNCATE TABLE test_truncate") with pytest.raises(Exception): print(get_azure_file_content("test_truncate.csv", port)) + azure_query(node, "DROP TABLE test_truncate") def test_simple_read_write(cluster): @@ -292,6 +301,7 @@ def test_simple_read_write(cluster): assert get_azure_file_content("test_simple_read_write.csv", port) == '1,"a"\n' print(azure_query(node, "SELECT * FROM test_simple_read_write")) assert azure_query(node, "SELECT * FROM test_simple_read_write") == "1\ta\n" + azure_query(node, "DROP TABLE test_simple_read_write") def test_create_new_files_on_insert(cluster): @@ -344,6 +354,7 @@ def test_overwrite(cluster): result = azure_query(node, f"select count() from test_overwrite") assert int(result) == 200 + azure_query(node, f"DROP TABLE test_overwrite") def test_insert_with_path_with_globs(cluster): @@ -356,6 +367,7 @@ def test_insert_with_path_with_globs(cluster): node.query_and_get_error( f"insert into table function test_insert_globs SELECT number, randomString(100) FROM numbers(500)" ) + azure_query(node, f"DROP TABLE test_insert_globs") def test_put_get_with_globs(cluster): @@ -364,6 +376,7 @@ def test_put_get_with_globs(cluster): node = cluster.instances["node"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" max_path = "" + used_names = [] for i in range(10): for j in range(10): path = "{}/{}_{}/{}.csv".format( @@ -372,6 +385,8 @@ def test_put_get_with_globs(cluster): max_path = max(path, max_path) values = f"({i},{j},{i + j})" + used_names.append(f"test_put_{i}_{j}") + azure_query( node, f"CREATE TABLE test_put_{i}_{j} ({table_format}) Engine = AzureBlobStorage(azure_conf2, " @@ -392,6 +407,9 @@ def test_put_get_with_globs(cluster): bucket="cont", max_path=max_path ) ] + azure_query(node, "DROP TABLE test_glob_select") + for name in used_names: + azure_query(node, f"DROP TABLE {name}") def test_azure_glob_scheherazade(cluster): @@ -400,12 +418,14 @@ def test_azure_glob_scheherazade(cluster): values = "(1, 1, 1)" nights_per_job = 1001 // 30 jobs = [] + used_names = [] for night in range(0, 1001, nights_per_job): def add_tales(start, end): for i in range(start, end): path = "night_{}/tale.csv".format(i) unique_num = random.randint(1, 10000) + used_names.append(f"test_scheherazade_{i}_{unique_num}") azure_query( node, f"CREATE TABLE test_scheherazade_{i}_{unique_num} ({table_format}) Engine = AzureBlobStorage(azure_conf2, " @@ -433,6 +453,9 @@ def test_azure_glob_scheherazade(cluster): ) query = "select count(), sum(column1), sum(column2), sum(column3) from test_glob_select_scheherazade" assert azure_query(node, query).splitlines() == ["1001\t1001\t1001\t1001"] + azure_query(node, "DROP TABLE test_glob_select_scheherazade") + for name in used_names: + azure_query(node, f"DROP TABLE {name}") @pytest.mark.parametrize( @@ -506,6 +529,8 @@ def test_schema_inference_no_globs(cluster): assert azure_query(node, query).splitlines() == [ "499500\t2890\t332833500\ttest_schema_inference_no_globs.csv\tcont/test_schema_inference_no_globs.csv" ] + azure_query(node, f"DROP TABLE test_schema_inference_src") + azure_query(node, f"DROP TABLE test_select_inference") def test_schema_inference_from_globs(cluster): @@ -514,6 +539,7 @@ def test_schema_inference_from_globs(cluster): node = cluster.instances["node"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" max_path = "" + used_names = [] for i in range(10): for j in range(10): path = "{}/{}_{}/{}.csv".format( @@ -521,6 +547,7 @@ def test_schema_inference_from_globs(cluster): ) max_path = max(path, max_path) values = f"({i},{j},{i + j})" + used_names.append(f"test_schema_{i}_{j}") azure_query( node, @@ -546,6 +573,9 @@ def test_schema_inference_from_globs(cluster): bucket="cont", max_path=max_path ) ] + azure_query(node, "DROP TABLE test_glob_select_inference") + for name in used_names: + azure_query(node, f"DROP TABLE {name}") def test_simple_write_account_string_table_function(cluster): @@ -595,7 +625,7 @@ def test_simple_write_named_collection_1_table_function(cluster): azure_query( node, - "TRUNCATE TABLE drop_table", + "DROP TABLE drop_table", ) @@ -605,7 +635,7 @@ def test_simple_write_named_collection_2_table_function(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='test_simple_write_named_2_tf.csv', format='CSV', structure='key UInt64, data String') VALUES (1, 'a')", + f" container='cont', blob_path='test_simple_write_named_2_tf.csv', format='CSV', structure='key UInt64, data String') VALUES (1, 'a') SETTINGS azure_truncate_on_insert=1", ) print(get_azure_file_content("test_simple_write_named_2_tf.csv", port)) assert get_azure_file_content("test_simple_write_named_2_tf.csv", port) == '1,"a"\n' @@ -628,7 +658,7 @@ def test_put_get_with_globs_tf(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values} SETTINGS azure_truncate_on_insert=1", ) query = ( f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azureBlobStorage(azure_conf2, " @@ -649,7 +679,7 @@ def test_schema_inference_no_globs_tf(cluster): query = ( f"insert into table function azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', " f"container='cont', blob_path='test_schema_inference_no_globs_tf.csv', format='CSVWithNames', structure='{table_format}') " - f"SELECT number, toString(number), number * number FROM numbers(1000)" + f"SELECT number, toString(number), number * number FROM numbers(1000) SETTINGS azure_truncate_on_insert=1" ) azure_query(node, query) @@ -680,7 +710,7 @@ def test_schema_inference_from_globs_tf(cluster): query = ( f"insert into table function azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', " - f"container='cont', blob_path='{path}', format='CSVWithNames', structure='{table_format}') VALUES {values}" + f"container='cont', blob_path='{path}', format='CSVWithNames', structure='{table_format}') VALUES {values} SETTINGS azure_truncate_on_insert=1" ) azure_query(node, query) @@ -708,7 +738,7 @@ def test_partition_by_tf(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', " f"'cont', '{filename}', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', " - f"'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values}", + f"'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values} SETTINGS azure_truncate_on_insert=1", ) assert "1,2,3\n" == get_azure_file_content("test_partition_tf_3.csv", port) @@ -727,7 +757,7 @@ def test_filter_using_file(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', 'cont', '{filename}', " f"'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', " - f"'{table_format}') PARTITION BY {partition_by} VALUES {values}", + f"'{table_format}') PARTITION BY {partition_by} VALUES {values} SETTINGS azure_truncate_on_insert=1", ) query = ( @@ -745,7 +775,7 @@ def test_read_subcolumns(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_subcolumns.tsv', " f"'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto'," - f" 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)", + f" 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS azure_truncate_on_insert=1", ) azure_query( @@ -795,7 +825,7 @@ def test_read_subcolumn_time(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_subcolumn_time.tsv', " f"'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto'," - f" 'a UInt32') select (42)", + f" 'a UInt32') select (42) SETTINGS azure_truncate_on_insert=1", ) res = node.query( @@ -825,7 +855,7 @@ def test_function_signatures(cluster): account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_signature.csv', '{account_name}', '{account_key}', 'CSV', 'auto', 'column1 UInt32') VALUES (1),(2),(3)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_signature.csv', '{account_name}', '{account_key}', 'CSV', 'auto', 'column1 UInt32') VALUES (1),(2),(3) SETTINGS azure_truncate_on_insert=1", ) # " - connection_string, container_name, blobpath\n" @@ -939,12 +969,12 @@ def test_union_schema_inference_mode(cluster): account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference1.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'a UInt32') VALUES (1)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference1.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'a UInt32') VALUES (1) SETTINGS azure_truncate_on_insert=1", ) azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference2.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'b UInt32') VALUES (2)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference2.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'b UInt32') VALUES (2) SETTINGS azure_truncate_on_insert=1", ) node.query("system drop schema cache for azure") @@ -981,7 +1011,7 @@ def test_union_schema_inference_mode(cluster): assert result == "a\tNullable(Int64)\n" "b\tNullable(Int64)\n" azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference3.jsonl', '{account_name}', '{account_key}', 'CSV', 'auto', 's String') VALUES ('Error')", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference3.jsonl', '{account_name}', '{account_key}', 'CSV', 'auto', 's String') VALUES ('Error') SETTINGS azure_truncate_on_insert=1", ) error = azure_query( @@ -1003,7 +1033,7 @@ def test_schema_inference_cache(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cache0.jsonl', '{account_name}', '{account_key}') " - f"select * from numbers(100)", + f"select * from numbers(100) SETTINGS azure_truncate_on_insert=1", ) time.sleep(1) @@ -1210,19 +1240,19 @@ def test_filtering_by_file_or_path(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_filter1.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1 SETTINGS azure_truncate_on_insert=1", ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_filter2.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 2", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 2 SETTINGS azure_truncate_on_insert=1", ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_filter3.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 3", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 3 SETTINGS azure_truncate_on_insert=1", ) node.query( @@ -1246,19 +1276,19 @@ def test_size_virtual_column(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column1.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1 SETTINGS azure_truncate_on_insert=1", ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column2.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 11", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 11 SETTINGS azure_truncate_on_insert=1", ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_size_virtual_column3.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 111", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 111 SETTINGS azure_truncate_on_insert=1", ) result = azure_query( @@ -1281,7 +1311,7 @@ def test_format_detection(cluster): account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection0', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt64, y String') select number as x, 'str_' || toString(number) from numbers(0)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection0', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt64, y String') select number as x, 'str_' || toString(number) from numbers(0) SETTINGS azure_truncate_on_insert=1", ) azure_query( @@ -1351,7 +1381,7 @@ def test_write_to_globbed_partitioned_path(cluster): account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" error = azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_data_*_{{_partition_id}}', '{account_name}', '{account_key}', 'CSV', 'auto', 'x UInt64') partition by 42 select 42", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_data_*_{{_partition_id}}', '{account_name}', '{account_key}', 'CSV', 'auto', 'x UInt64') partition by 42 select 42 SETTINGS azure_truncate_on_insert=1", expect_error="true", ) @@ -1475,7 +1505,7 @@ def test_hive_partitioning_with_one_parameter(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values} SETTINGS azure_truncate_on_insert=1", ) query = ( @@ -1512,7 +1542,7 @@ def test_hive_partitioning_with_two_parameters(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2} SETTINGS azure_truncate_on_insert=1", ) query = ( @@ -1558,7 +1588,7 @@ def test_hive_partitioning_without_setting(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2} SETTINGS azure_truncate_on_insert=1", ) query = ( diff --git a/tests/integration/test_storage_azure_blob_storage/test_cluster.py b/tests/integration/test_storage_azure_blob_storage/test_cluster.py index 6c5e2d20ca5..4d63016cf9a 100644 --- a/tests/integration/test_storage_azure_blob_storage/test_cluster.py +++ b/tests/integration/test_storage_azure_blob_storage/test_cluster.py @@ -71,7 +71,7 @@ def test_select_all(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_select_all.csv', 'devstoreaccount1'," f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', 'key UInt64, data String') " - f"VALUES (1, 'a'), (2, 'b')", + f"VALUES (1, 'a'), (2, 'b') SETTINGS azure_truncate_on_insert=1", ) print(get_azure_file_content("test_cluster_select_all.csv", port)) @@ -100,7 +100,7 @@ def test_count(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_count.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', " - f"'auto', 'key UInt64') VALUES (1), (2)", + f"'auto', 'key UInt64') VALUES (1), (2) SETTINGS azure_truncate_on_insert=1", ) print(get_azure_file_content("test_cluster_count.csv", port)) @@ -128,7 +128,7 @@ def test_union_all(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_parquet_union_all', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'Parquet', " - f"'auto', 'a Int32, b String') VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')", + f"'auto', 'a Int32, b String') VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd') SETTINGS azure_truncate_on_insert=1", ) pure_azure = azure_query( @@ -179,7 +179,7 @@ def test_skip_unavailable_shards(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_skip_unavailable.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " - f"'auto', 'a UInt64') VALUES (1), (2)", + f"'auto', 'a UInt64') VALUES (1), (2) SETTINGS azure_truncate_on_insert=1", ) result = azure_query( node, @@ -199,7 +199,7 @@ def test_unset_skip_unavailable_shards(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_unset_skip_unavailable.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " - f"'auto', 'a UInt64') VALUES (1), (2)", + f"'auto', 'a UInt64') VALUES (1), (2) SETTINGS azure_truncate_on_insert=1", ) result = azure_query( node, @@ -217,7 +217,7 @@ def test_cluster_with_named_collection(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_with_named_collection.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " - f"'auto', 'a UInt64') VALUES (1), (2)", + f"'auto', 'a UInt64') VALUES (1), (2) SETTINGS azure_truncate_on_insert=1", ) pure_azure = azure_query( @@ -248,7 +248,7 @@ def test_partition_parallel_reading_with_cluster(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', '{filename}', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', '{table_format}') " - f"PARTITION BY {partition_by} VALUES {values}", + f"PARTITION BY {partition_by} VALUES {values} SETTINGS azure_truncate_on_insert=1", ) assert "1,2,3\n" == get_azure_file_content("test_tf_3.csv", port) @@ -272,12 +272,12 @@ def test_format_detection(cluster): azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection0', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt32, y String') select number as x, 'str_' || toString(number) from numbers(10)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection0', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt32, y String') select number as x, 'str_' || toString(number) from numbers(10) SETTINGS azure_truncate_on_insert=1", ) azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection1', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt32, y String') select number as x, 'str_' || toString(number) from numbers(10, 10)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection1', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt32, y String') select number as x, 'str_' || toString(number) from numbers(10, 10) SETTINGS azure_truncate_on_insert=1", ) expected_desc_result = azure_query( diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 79914855782..ca072f59e4b 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -41,6 +41,7 @@ def test_read_write_storage(started_cluster): node1.query("insert into SimpleHDFSStorage values (1, 'Mark', 72.53)") assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n" assert node1.query("select * from SimpleHDFSStorage") == "1\tMark\t72.53\n" + node1.query("drop table if exists SimpleHDFSStorage") def test_read_write_storage_with_globs(started_cluster): @@ -94,6 +95,11 @@ def test_read_write_storage_with_globs(started_cluster): print(ex) assert "in readonly mode" in str(ex) + node1.query("DROP TABLE HDFSStorageWithRange") + node1.query("DROP TABLE HDFSStorageWithEnum") + node1.query("DROP TABLE HDFSStorageWithQuestionMark") + node1.query("DROP TABLE HDFSStorageWithAsterisk") + def test_storage_with_multidirectory_glob(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -335,6 +341,7 @@ def test_virtual_columns(started_cluster): ) == expected ) + node1.query("DROP TABLE virual_cols") def test_read_files_with_spaces(started_cluster): @@ -356,6 +363,7 @@ def test_read_files_with_spaces(started_cluster): ) assert node1.query("select * from test order by id") == "1\n2\n3\n" fs.delete(dir, recursive=True) + node1.query("DROP TABLE test") def test_truncate_table(started_cluster): @@ -427,7 +435,7 @@ def test_seekable_formats(started_cluster): f"hdfs('hdfs://hdfs1:9000/parquet', 'Parquet', 'a Int32, b String')" ) node1.query( - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)" + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query(f"SELECT count() FROM {table_function}") @@ -435,7 +443,7 @@ def test_seekable_formats(started_cluster): table_function = f"hdfs('hdfs://hdfs1:9000/orc', 'ORC', 'a Int32, b String')" node1.query( - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)" + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query(f"SELECT count() FROM {table_function}") assert int(result) == 5000000 @@ -459,7 +467,7 @@ def test_read_table_with_default(started_cluster): def test_schema_inference(started_cluster): node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000)" + f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/native', 'Native')") @@ -512,6 +520,7 @@ def test_hdfs_directory_not_exist(started_cluster): assert "" == node1.query( "select * from HDFSStorageWithNotExistDir settings hdfs_ignore_file_doesnt_exist=1" ) + node1.query("DROP TABLE HDFSStorageWithNotExistDir") def test_overwrite(started_cluster): @@ -531,6 +540,7 @@ def test_overwrite(started_cluster): result = node1.query(f"select count() from test_overwrite") assert int(result) == 10 + node1.query(f"DROP TABLE test_overwrite") def test_multiple_inserts(started_cluster): @@ -567,6 +577,7 @@ def test_multiple_inserts(started_cluster): result = node1.query(f"select count() from test_multiple_inserts") assert int(result) == 60 + node1.query(f"DROP TABLE test_multiple_inserts") def test_format_detection(started_cluster): @@ -580,10 +591,10 @@ def test_format_detection(started_cluster): def test_schema_inference_with_globs(started_cluster): node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" + f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL SETTINGS hdfs_truncate_on_insert=1" ) node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0" + f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0 SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query( @@ -597,7 +608,7 @@ def test_schema_inference_with_globs(started_cluster): assert sorted(result.split()) == ["0", "\\N"] node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" + f"insert into table function hdfs('hdfs://hdfs1:9000/data3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL SETTINGS hdfs_truncate_on_insert=1" ) filename = "data{1,3}.jsoncompacteachrow" @@ -609,7 +620,7 @@ def test_schema_inference_with_globs(started_cluster): assert "All attempts to extract table structure from files failed" in result node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]'" + f"insert into table function hdfs('hdfs://hdfs1:9000/data0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]' SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query_and_get_error( @@ -621,7 +632,7 @@ def test_schema_inference_with_globs(started_cluster): def test_insert_select_schema_inference(started_cluster): node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x" + f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x SETTINGS hdfs_truncate_on_insert=1" ) result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/test.native.zst')") @@ -664,7 +675,7 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_2', 'Parquet', 'a Int32, b String')" ) - node1.query(f"insert into table function {table_function} SELECT 1, 'kek'") + node1.query(f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1") result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "parquet_2" @@ -672,7 +683,7 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_3', 'Parquet', 'a Int32, _path String')" ) - node1.query(f"insert into table function {table_function} SELECT 1, 'kek'") + node1.query(f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1") result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "kek" @@ -969,11 +980,11 @@ def test_read_subcolumns(started_cluster): node = started_cluster.instances["node1"] node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)" + f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS hdfs_truncate_on_insert=1" ) node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)" + f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS hdfs_truncate_on_insert=1" ) res = node.query( @@ -1019,11 +1030,11 @@ def test_union_schema_inference_mode(started_cluster): node = started_cluster.instances["node1"] node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference1.jsonl') select 1 as a" + "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference1.jsonl') select 1 as a SETTINGS hdfs_truncate_on_insert=1" ) node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') select 2 as b" + "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') select 2 as b SETTINGS hdfs_truncate_on_insert=1" ) node.query("system drop schema cache for hdfs") @@ -1055,7 +1066,7 @@ def test_union_schema_inference_mode(started_cluster): ) assert result == "a\tNullable(Int64)\n" "b\tNullable(Int64)\n" node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference3.jsonl', TSV) select 'Error'" + f"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference3.jsonl', TSV) select 'Error' SETTINGS hdfs_truncate_on_insert=1" ) error = node.query_and_get_error( @@ -1068,11 +1079,11 @@ def test_format_detection(started_cluster): node = started_cluster.instances["node1"] node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection0', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(0)" + "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection0', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(0) SETTINGS hdfs_truncate_on_insert=1" ) node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(10)" + "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(10) SETTINGS hdfs_truncate_on_insert=1" ) expected_desc_result = node.query( @@ -1136,7 +1147,7 @@ def test_write_to_globbed_partitioned_path(started_cluster): node = started_cluster.instances["node1"] error = node.query_and_get_error( - "insert into function hdfs('hdfs://hdfs1:9000/test_data_*_{_partition_id}.csv') partition by 42 select 42" + "insert into function hdfs('hdfs://hdfs1:9000/test_data_*_{_partition_id}.csv') partition by 42 select 42 SETTINGS hdfs_truncate_on_insert=1" ) assert "DATABASE_ACCESS_DENIED" in error From 07cfcdeaaec0289d78b033260dd657bebad70674 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 5 Aug 2024 11:31:46 +0000 Subject: [PATCH 1610/2361] Fix filter pushdown for aggregation without keys --- .../Optimizations/filterPushDown.cpp | 9 +++++++ .../03217_fliter_pushdown_no_keys.reference | 6 +++++ .../03217_fliter_pushdown_no_keys.sql | 26 +++++++++++++++++++ 3 files changed, 41 insertions(+) create mode 100644 tests/queries/0_stateless/03217_fliter_pushdown_no_keys.reference create mode 100644 tests/queries/0_stateless/03217_fliter_pushdown_no_keys.sql diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 73314f005b6..b71326ff75b 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -442,6 +442,15 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes const auto & params = aggregating->getParams(); const auto & keys = params.keys; + /** The filter is applied either to aggregation keys or aggregation result + * (columns under aggregation is not available in outer scope, so we can't have a filter for them). + * The filter for the aggregation result is not pushed down, so the only valid case is filtering aggregation keys. + * In case keys are empty, do not push down the filter. + * Also with empty keys we can have an issue with `empty_result_for_aggregation_by_empty_set`, + * since we can gen a result row when everything is filtered. + */ + if (keys.empty()) + return 0; const bool filter_column_is_not_among_aggregation_keys = std::find(keys.begin(), keys.end(), filter->getFilterColumnName()) == keys.end(); diff --git a/tests/queries/0_stateless/03217_fliter_pushdown_no_keys.reference b/tests/queries/0_stateless/03217_fliter_pushdown_no_keys.reference new file mode 100644 index 00000000000..9838dd1b936 --- /dev/null +++ b/tests/queries/0_stateless/03217_fliter_pushdown_no_keys.reference @@ -0,0 +1,6 @@ +--- +1 1 +--- +3 3 +--- +--- diff --git a/tests/queries/0_stateless/03217_fliter_pushdown_no_keys.sql b/tests/queries/0_stateless/03217_fliter_pushdown_no_keys.sql new file mode 100644 index 00000000000..cb8bf59e790 --- /dev/null +++ b/tests/queries/0_stateless/03217_fliter_pushdown_no_keys.sql @@ -0,0 +1,26 @@ + + + +select * from ( select sum(last_seen) as dates_seen, materialize(1) as last_seen ) where last_seen > 2; +select * from ( select sum(last_seen) as dates_seen, materialize(2) as last_seen ) where last_seen < 2; +select * from ( select sum(last_seen) as dates_seen, materialize(2) as last_seen GROUP BY 'a' ) where last_seen < 2; + +select '---'; +select * from ( select sum(last_seen) as dates_seen, 1 as last_seen UNION ALL select sum(last_seen) as dates_seen, 3 as last_seen ) where last_seen < 2; + +select '---'; +select * from ( select sum(last_seen) as dates_seen, 1 as last_seen UNION ALL select sum(last_seen) as dates_seen, 3 as last_seen ) where last_seen > 2; + +select '---'; +with activity as ( + select + groupUniqArrayState(toDate('2025-01-01 01:00:00')) as dates_seen, + toDateTime('2025-01-01 01:00:00') as last_seen + union all + select + groupUniqArrayState(toDate('2023-11-11 11:11:11')) as dates_seen, + toDateTime('2023-11-11 11:11:11') as last_seen +) +select last_seen from activity +where last_seen < toDateTime('2020-01-01 00:00:00'); +select '---'; From 23190c30cf696075017f09a4997a7969c1d2f651 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Aug 2024 14:32:08 +0200 Subject: [PATCH 1611/2361] Fix bad test `03032_redundant_equals` --- tests/queries/0_stateless/03032_redundant_equals.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03032_redundant_equals.sql b/tests/queries/0_stateless/03032_redundant_equals.sql index de85ec5cf00..eee2be4ebf0 100644 --- a/tests/queries/0_stateless/03032_redundant_equals.sql +++ b/tests/queries/0_stateless/03032_redundant_equals.sql @@ -5,9 +5,9 @@ CREATE TABLE test_table k UInt64, ) ENGINE = MergeTree -ORDER BY k; +ORDER BY k SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -INSERT INTO test_table SELECT number FROM numbers(10000000); +INSERT INTO test_table SELECT number FROM numbers(100000); SET allow_experimental_analyzer = 1; @@ -25,7 +25,7 @@ SELECT * FROM test_table WHERE (NOT ((k not in (100) = 0) OR (k in (100) = 1))) SELECT * FROM test_table WHERE (NOT ((k in (101) = 0) OR (k in (100) = 1))) = 1; SELECT * FROM test_table WHERE ((k not in (101) = 0) OR (k in (100) = 1)) = 1; SELECT * FROM test_table WHERE ((k not in (99) = 1) AND (k in (100) = 1)) = 1; --- we skip optimizing queries with toNullable(0 or 1) but lets make sure they still work +-- we skip optimizing queries with toNullable(0 or 1) but lets make sure they still work SELECT * FROM test_table WHERE (k = 101) = toLowCardinality(toNullable(1)); SELECT * FROM test_table WHERE (k = 101) = toNullable(1); SELECT * FROM test_table WHERE (k = 101) = toLowCardinality(1); From ff0b8889ab8bf347343e18b4e9a407b3cab8264b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Aug 2024 14:36:24 +0200 Subject: [PATCH 1612/2361] Update 02789_reading_from_s3_with_connection_pool.sh --- .../0_stateless/02789_reading_from_s3_with_connection_pool.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh index 39399842db1..5a37d51233d 100755 --- a/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh +++ b/tests/queries/0_stateless/02789_reading_from_s3_with_connection_pool.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, no-random-settings, no-replicated-database +# Tags: no-fasttest, no-random-settings, no-replicated-database, no-distributed-cache CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From c21b97672c86a18b693c8c60335271248c279fdd Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:36:54 +0200 Subject: [PATCH 1613/2361] Fix trailing whitespace --- src/Interpreters/Cache/FileCache.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 1a15efa7cf8..217ae614c22 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -337,7 +337,7 @@ std::vector FileCache::splitRange(size_t offset, size_t size /// and get something like this: /// /// [________________________] - /// ^ ^ + /// ^ ^ /// | last_file_segment_right_offset + max_file_segment_size /// last_file_segment_right_offset /// e.g. there is no need to create sub-segment for range (last_file_segment_right_offset + max_file_segment_size, aligned_right_offset]. From 7bb236d66ee30fe4c0080f1d7a5a358cdc2a81af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 5 Aug 2024 14:43:39 +0200 Subject: [PATCH 1614/2361] Fix style --- src/Storages/Kafka/StorageKafkaUtils.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Storages/Kafka/StorageKafkaUtils.cpp b/src/Storages/Kafka/StorageKafkaUtils.cpp index a2e3683f769..cdc32d775eb 100644 --- a/src/Storages/Kafka/StorageKafkaUtils.cpp +++ b/src/Storages/Kafka/StorageKafkaUtils.cpp @@ -39,11 +39,6 @@ # include #endif // USE_KRB5 -namespace CurrentMetrics -{ -extern const Metric KafkaLibrdkafkaThreads; -} - namespace ProfileEvents { extern const Event KafkaConsumerErrors; From 8393f7f54737c0ff5c0845e9481784b5d3764bfc Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 2 Aug 2024 16:26:14 +0100 Subject: [PATCH 1615/2361] impl --- .../0_stateless/02313_filesystem_cache_seeks.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh index b54e3d7f805..fc91f3f1448 100755 --- a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh +++ b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh @@ -8,13 +8,17 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh +client_opts=( + --distributed_ddl_output_mode 'null_status_on_timeout' +) + for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; do echo "Using storage policy: $STORAGE_POLICY" $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" - $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02313" + $CLICKHOUSE_CLIENT "${client_opts[@]}" --query "DROP TABLE IF EXISTS test_02313" - $CLICKHOUSE_CLIENT --query "CREATE TABLE test_02313 (id Int32, val String) + $CLICKHOUSE_CLIENT "${client_opts[@]}" --query "CREATE TABLE test_02313 (id Int32, val String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS storage_policy = '$STORAGE_POLICY'" @@ -32,6 +36,6 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; d $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02313 WHERE val LIKE concat('%', randomPrintableASCII(3), '%') FORMAT Null" $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02313 WHERE val LIKE concat('%', randomPrintableASCII(3), '%') FORMAT Null" - $CLICKHOUSE_CLIENT --query "DROP TABLE test_02313" + $CLICKHOUSE_CLIENT "${client_opts[@]}" --query "DROP TABLE test_02313" done From fd562086ae989a6955a3355f4298465d91b3934d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 5 Aug 2024 15:37:23 +0200 Subject: [PATCH 1616/2361] Fix bad merge --- src/Core/SettingsChangesHistory.cpp | 262 +--------------------------- 1 file changed, 1 insertion(+), 261 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index a538151798d..e689382c5c4 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,267 +57,6 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.8", {{"allow_experimental_kafka_offsets_storage_in_keeper", false, false, "Allow the usage of experimental Kafka storage engine that stores the committed offsets in ClickHouse Keeper"}, - }}, - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, - {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, - {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, - {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, - {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, - {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, - {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, - {"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, - {"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."}, - {"collect_hash_table_stats_during_joins", false, true, "New setting."}, - {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, - {"input_format_orc_reader_time_zone_name", "GMT", "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT."}, - {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, - {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, - {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, - {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, - {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, - {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, - {"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, - {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, - {"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."}, - {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, - {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."} - }}, - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, - {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, - {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, - {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, - {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, - {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, - {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, - {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, - {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, - {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, - {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, - {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, - {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, - {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, - {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, - {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, - {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, - {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, - {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, - {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, - {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, - {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, - {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, - {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, - {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, - {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, - {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, - {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, - {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, - {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"http_max_chunk_size", 0, 0, "Internal limitation"}, - {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, - {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, - {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, - {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, - {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, - }}, - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, - {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, - {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, - {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, - {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, - {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, - {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, - {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, - {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, - {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, - {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, - {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, - {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, - }}, - {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, - {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, - {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, - {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, - {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, - {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, - {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, - {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."}, - {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, - {"log_processors_profiles", false, true, "Enable by default"}, - {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, - {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, - {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, - {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, - {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, - {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, - {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, - {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, - {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, - {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, - {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, - {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, - {"allow_get_client_http_header", false, false, "Introduced a new function."}, - {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, - {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, - {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, - {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, - {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, - {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, - {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, - {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, - {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, - {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, - {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, - {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, - {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, - {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, - {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, - {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, - {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, - {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, - {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, - {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, - {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, - {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, - {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, - {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, - {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, - {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, - {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, {"24.12", { } @@ -338,6 +77,7 @@ static std::initializer_list Date: Mon, 5 Aug 2024 15:38:15 +0200 Subject: [PATCH 1617/2361] Update tags --- tests/queries/0_stateless/02995_index_1.sh | 4 ++-- tests/queries/0_stateless/02995_index_10.sh | 4 ++-- tests/queries/0_stateless/02995_index_5.sh | 4 ++-- tests/queries/0_stateless/02995_index_6.sh | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/02995_index_1.sh b/tests/queries/0_stateless/02995_index_1.sh index 128697fd0fe..76be6341d53 100755 --- a/tests/queries/0_stateless/02995_index_1.sh +++ b/tests/queries/0_stateless/02995_index_1.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage, no-distributed-cache CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} +done | ${CLICKHOUSE_CLIENT} ${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_10.sh b/tests/queries/0_stateless/02995_index_10.sh index c15ba00fd05..813cc49cbd8 100755 --- a/tests/queries/0_stateless/02995_index_10.sh +++ b/tests/queries/0_stateless/02995_index_10.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage, no-distributed-cache CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} +done | ${CLICKHOUSE_CLIENT} ${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_5.sh b/tests/queries/0_stateless/02995_index_5.sh index 80f75a532e3..58e53a2c481 100755 --- a/tests/queries/0_stateless/02995_index_5.sh +++ b/tests/queries/0_stateless/02995_index_5.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage, no-distributed-cache CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -39,6 +39,6 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String HAVING count() > 0; " -done | ${CLICKHOUSE_CLIENT} +done | ${CLICKHOUSE_CLIENT} ${CLICKHOUSE_CLIENT} "DROP TABLE test" diff --git a/tests/queries/0_stateless/02995_index_6.sh b/tests/queries/0_stateless/02995_index_6.sh index e90387c7c0c..ef35f0e1126 100755 --- a/tests/queries/0_stateless/02995_index_6.sh +++ b/tests/queries/0_stateless/02995_index_6.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage +# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage, no-distributed-cache CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 41dfec0fe76a14bde54711de1cd8781b5d00a83e Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 5 Aug 2024 14:58:40 +0100 Subject: [PATCH 1618/2361] fix test --- tests/queries/0_stateless/02313_filesystem_cache_seeks.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh index fc91f3f1448..b7adde6fcbb 100755 --- a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh +++ b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh @@ -16,12 +16,12 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; d echo "Using storage policy: $STORAGE_POLICY" $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE" - $CLICKHOUSE_CLIENT "${client_opts[@]}" --query "DROP TABLE IF EXISTS test_02313" + $CLICKHOUSE_CLIENT "${client_opts[@]}" --query "DROP TABLE IF EXISTS test_02313" > /dev/null $CLICKHOUSE_CLIENT "${client_opts[@]}" --query "CREATE TABLE test_02313 (id Int32, val String) ENGINE = MergeTree() ORDER BY tuple() - SETTINGS storage_policy = '$STORAGE_POLICY'" + SETTINGS storage_policy = '$STORAGE_POLICY'" > /dev/null $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 -n --query "INSERT INTO test_02313 SELECT * FROM @@ -36,6 +36,6 @@ for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; d $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02313 WHERE val LIKE concat('%', randomPrintableASCII(3), '%') FORMAT Null" $CLICKHOUSE_CLIENT --query "SELECT * FROM test_02313 WHERE val LIKE concat('%', randomPrintableASCII(3), '%') FORMAT Null" - $CLICKHOUSE_CLIENT "${client_opts[@]}" --query "DROP TABLE test_02313" + $CLICKHOUSE_CLIENT "${client_opts[@]}" --query "DROP TABLE test_02313" > /dev/null done From 462fe8b7ffa98743ecc01c98eb0bfae840b05dca Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Fri, 2 Aug 2024 18:23:56 +0200 Subject: [PATCH 1619/2361] repro for #67478 --- .../test_storage_policies/configs/disks.xml | 8 ++ .../integration/test_storage_policies/test.py | 79 +++++++++++++++++++ 2 files changed, 87 insertions(+) diff --git a/tests/integration/test_storage_policies/configs/disks.xml b/tests/integration/test_storage_policies/configs/disks.xml index 3331fee4e4f..dc60d93208c 100644 --- a/tests/integration/test_storage_policies/configs/disks.xml +++ b/tests/integration/test_storage_policies/configs/disks.xml @@ -1,4 +1,12 @@ + + system + blob_storage_log
+ toYYYYMM(event_date) + 7500 + event_date + INTERVAL 30 DAY +
+ diff --git a/tests/integration/test_storage_policies/test.py b/tests/integration/test_storage_policies/test.py index 389146b2171..08a5cc71b99 100644 --- a/tests/integration/test_storage_policies/test.py +++ b/tests/integration/test_storage_policies/test.py @@ -38,3 +38,82 @@ def test_storage_policy_configuration_change(started_cluster): "/etc/clickhouse-server/config.d/disks.xml", ) node.start_clickhouse() + + +def test_disk_is_immutable(started_cluster): + node.query("DROP TABLE IF EXISTS test_1") + + node.query( + """ + create table test_1 (a Int32) + engine = MergeTree() + order by tuple() + settings + disk=disk( + name='not_uniq_disk_name', + type = object_storage, + object_storage_type = local_blob_storage, + path='./03215_data_test_1/') + """ + ) + + node.query("INSERT INTO test_1 VALUES (1)") + node.query("SYSTEM FLUSH LOGS;") + + print( + node.query( + "SELECT 'test_1', * FROM system.blob_storage_log" + ) + ) + + print( + node.query( + "SELECT 'test_1', * FROM test_1" + ) + ) + + node.query("DROP TABLE test_1 SYNC") + node.query("DROP TABLE IF EXISTS test_2") + + node.query( + """ + create table test_2 (a Int32) + engine = MergeTree() + order by tuple() + settings + disk=disk( + name='not_uniq_disk_name', + type = object_storage, + object_storage_type = local_blob_storage, + path='./03215_data_test_2/') + """ + ) + + node.query("INSERT INTO test_2 VALUES (1)") + node.query("SYSTEM FLUSH LOGS;") + + print( + node.query( + "SELECT 'test_2', * FROM system.blob_storage_log" + ) + ) + + print( + node.query( + "SELECT 'test_2', * FROM test_2" + ) + ) + + node.restart_clickhouse() + + print( + node.query( + "SELECT 'test_2', * FROM system.blob_storage_log" + ) + ) + + print( + node.query( + "SELECT 'test_2', * FROM test_2" + ) + ) From 5d9d5bf919527822b4a3d42e4f1595ccd374ead9 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 2 Aug 2024 18:14:22 +0000 Subject: [PATCH 1620/2361] Automatic style fix --- .../integration/test_storage_policies/test.py | 36 ++++--------------- 1 file changed, 6 insertions(+), 30 deletions(-) diff --git a/tests/integration/test_storage_policies/test.py b/tests/integration/test_storage_policies/test.py index 08a5cc71b99..f65096389af 100644 --- a/tests/integration/test_storage_policies/test.py +++ b/tests/integration/test_storage_policies/test.py @@ -60,17 +60,9 @@ def test_disk_is_immutable(started_cluster): node.query("INSERT INTO test_1 VALUES (1)") node.query("SYSTEM FLUSH LOGS;") - print( - node.query( - "SELECT 'test_1', * FROM system.blob_storage_log" - ) - ) + print(node.query("SELECT 'test_1', * FROM system.blob_storage_log")) - print( - node.query( - "SELECT 'test_1', * FROM test_1" - ) - ) + print(node.query("SELECT 'test_1', * FROM test_1")) node.query("DROP TABLE test_1 SYNC") node.query("DROP TABLE IF EXISTS test_2") @@ -92,28 +84,12 @@ def test_disk_is_immutable(started_cluster): node.query("INSERT INTO test_2 VALUES (1)") node.query("SYSTEM FLUSH LOGS;") - print( - node.query( - "SELECT 'test_2', * FROM system.blob_storage_log" - ) - ) + print(node.query("SELECT 'test_2', * FROM system.blob_storage_log")) - print( - node.query( - "SELECT 'test_2', * FROM test_2" - ) - ) + print(node.query("SELECT 'test_2', * FROM test_2")) node.restart_clickhouse() - print( - node.query( - "SELECT 'test_2', * FROM system.blob_storage_log" - ) - ) + print(node.query("SELECT 'test_2', * FROM system.blob_storage_log")) - print( - node.query( - "SELECT 'test_2', * FROM test_2" - ) - ) + print(node.query("SELECT 'test_2', * FROM test_2")) From e9506202d699c24e9b7a3e5b643bc9871680e5dc Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 5 Aug 2024 16:40:54 +0200 Subject: [PATCH 1621/2361] Add debug logging --- src/Storages/WindowView/StorageWindowView.cpp | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 65bf6768b1b..bf934ed00d9 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1055,6 +1055,8 @@ void StorageWindowView::threadFuncFireProc() /// TODO: consider using time_t instead (for every timestamp in this class) UInt32 timestamp_now = now(); + LOG_TRACE(log, "Now: {}, next fire signal: {}, max watermark: {}", timestamp_now, next_fire_signal, max_watermark); + while (next_fire_signal <= timestamp_now) { try @@ -1072,6 +1074,9 @@ void StorageWindowView::threadFuncFireProc() if (slide_kind > IntervalKind::Kind::Day) slide_interval *= 86400; next_fire_signal += slide_interval; + + LOG_TRACE(log, "Now: {}, next fire signal: {}, max watermark: {}, max fired watermark: {}, slide interval: {}", + timestamp_now, next_fire_signal, max_watermark, max_fired_watermark, slide_interval); } if (max_watermark >= timestamp_now) @@ -1433,16 +1438,19 @@ void StorageWindowView::writeIntoWindowView( while (window_view.modifying_query) std::this_thread::sleep_for(std::chrono::milliseconds(100)); - if (!window_view.is_proctime && window_view.max_watermark == 0 && block.rows() > 0) + const size_t block_rows = block.rows(); + if (!window_view.is_proctime && window_view.max_watermark == 0 && block_rows > 0) { std::lock_guard lock(window_view.fire_signal_mutex); const auto & window_column = block.getByName(window_view.timestamp_column_name); const ColumnUInt32::Container & window_end_data = static_cast(*window_column.column).getData(); UInt32 first_record_timestamp = window_end_data[0]; window_view.max_watermark = window_view.getWindowUpperBound(first_record_timestamp); + + LOG_TRACE(window_view.log, "New max watermark: {}", window_view.max_watermark); } - Pipe pipe(std::make_shared(block)); + Pipe pipe(std::make_shared(std::move(block))); UInt32 lateness_bound = 0; UInt32 t_max_watermark = 0; @@ -1649,6 +1657,8 @@ void StorageWindowView::writeIntoWindowView( auto executor = builder.execute(); executor->execute(builder.getNumThreads(), local_context->getSettingsRef().use_concurrency_control); + + LOG_TRACE(window_view.log, "Wrote {} rows into inner table ({})", block_rows, inner_table->getStorageID().getFullTableName()); } void StorageWindowView::startup() From 3e633ad0d0c579ca65467e35dd80504a92e3b7a4 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:43:28 +0000 Subject: [PATCH 1622/2361] more changes --- .../test_access_control_on_cluster/test.py | 2 ++ .../integration/test_access_for_functions/test.py | 11 ++++++++--- .../test_alter_comment_on_cluster/test.py | 1 + .../integration/test_alter_moving_garbage/test.py | 14 ++++++++++++++ .../test_alter_on_mixed_type_cluster/test.py | 6 ++++++ 5 files changed, 31 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_access_control_on_cluster/test.py b/tests/integration/test_access_control_on_cluster/test.py index b12add7ad3f..c292d0cc3a4 100644 --- a/tests/integration/test_access_control_on_cluster/test.py +++ b/tests/integration/test_access_control_on_cluster/test.py @@ -89,3 +89,5 @@ def test_grant_current_database_on_cluster(): assert ch1.query("SHOW DATABASES", user="test_user") == "user_db\n" ch1.query("GRANT SELECT ON * TO test_user ON CLUSTER 'cluster'", user="test_user") assert ch1.query("SHOW DATABASES", user="test_user") == "user_db\n" + ch1.query("DROP DATABASE user_db ON CLUSTER 'cluster'") + ch1.query("DROP USER test_user ON CLUSTER 'cluster'") diff --git a/tests/integration/test_access_for_functions/test.py b/tests/integration/test_access_for_functions/test.py index 004d39e1dea..52777c60729 100644 --- a/tests/integration/test_access_for_functions/test.py +++ b/tests/integration/test_access_for_functions/test.py @@ -65,8 +65,8 @@ def test_ignore_obsolete_grant_on_database(): "-c", f""" cat > /var/lib/clickhouse/access/{user_id}.sql << EOF -ATTACH USER X; -ATTACH GRANT CREATE FUNCTION, SELECT ON mydb.* TO X; +ATTACH USER \`{user_id}\`; +ATTACH GRANT CREATE FUNCTION, SELECT ON mydb.* TO \`{user_id}\`; EOF""", ] ) @@ -76,4 +76,9 @@ EOF""", ) instance.start_clickhouse() - assert instance.query("SHOW GRANTS FOR X") == "GRANT SELECT ON mydb.* TO X\n" + assert ( + instance.query(f"SHOW GRANTS FOR `{user_id}`") + == f"GRANT SELECT ON mydb.* TO `{user_id}`\n" + ) + instance.stop_clickhouse() + instance.start_clickhouse() diff --git a/tests/integration/test_alter_comment_on_cluster/test.py b/tests/integration/test_alter_comment_on_cluster/test.py index e6767e35c1b..4cb10bbc751 100644 --- a/tests/integration/test_alter_comment_on_cluster/test.py +++ b/tests/integration/test_alter_comment_on_cluster/test.py @@ -59,3 +59,4 @@ def test_comment(started_cluster): expected = "CREATE TABLE default.test_table (`id` Int64 COMMENT \\'column_comment_2\\') ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\') ORDER BY id SETTINGS index_granularity = 8192 COMMENT \\'table_comment_2\\'" assert_create_query([node_1, node_2], "default", "test_table", expected) + node_1.query("DROP TABLE test_table ON CLUSTER 'cluster'") diff --git a/tests/integration/test_alter_moving_garbage/test.py b/tests/integration/test_alter_moving_garbage/test.py index 21be46a7e1b..76b40b0df8d 100644 --- a/tests/integration/test_alter_moving_garbage/test.py +++ b/tests/integration/test_alter_moving_garbage/test.py @@ -36,6 +36,16 @@ def cluster(): cluster.shutdown() +def drop_table(node, table_name, replicated): + + create_table_statement = f"DROP TABLE {table_name} SYNC" + + if replicated: + node.query_with_retry(create_table_statement) + else: + node.query(create_table_statement) + + def create_table(node, table_name, replicated, additional_settings): settings = { "storage_policy": "two_disks", @@ -158,6 +168,9 @@ def test_alter_moving( assert data_digest == "1000\n" + for node in nodes: + drop_table(node, table_name, replicated_engine) + def test_delete_race_leftovers(cluster): """ @@ -248,3 +261,4 @@ def test_delete_race_leftovers(cluster): # Check that we have all data assert table_digest == node.query(table_digest_query) + drop_table(node, table_name, replicated=True) diff --git a/tests/integration/test_alter_on_mixed_type_cluster/test.py b/tests/integration/test_alter_on_mixed_type_cluster/test.py index f21a97d40e1..da126e307a0 100644 --- a/tests/integration/test_alter_on_mixed_type_cluster/test.py +++ b/tests/integration/test_alter_on_mixed_type_cluster/test.py @@ -88,6 +88,9 @@ def test_alter_on_cluter_non_replicated(started_cluster): assert node3.query("SELECT COUNT() FROM test_table") == "2\n" assert node4.query("SELECT COUNT() FROM test_table") == "2\n" + for node in [node1, node2, node3, node4]: + node.query("TRUNCATE TABLE test_table") + def test_alter_replicated_on_cluster(started_cluster): for node in [node1, node3]: @@ -133,3 +136,6 @@ def test_alter_replicated_on_cluster(started_cluster): assert node2.query("SELECT COUNT() FROM test_table_replicated") == "2\n" assert node3.query("SELECT COUNT() FROM test_table_replicated") == "2\n" assert node4.query("SELECT COUNT() FROM test_table_replicated") == "2\n" + + for node in [node1, node2, node3, node4]: + node.query("TRUNCATE TABLE test_table_replicated") From 65f5f628a9c47eb4dfa729e26fde131ad7bdc5e6 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:46:41 +0000 Subject: [PATCH 1623/2361] Revert "Update docker/test/integration/runner/Dockerfile" This reverts commit d296e62bf363d7dfab9a5bf6925b67b5e4188151. --- docker/test/integration/runner/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index 71cf3a16967..ceb8a1b2b58 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -47,7 +47,7 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ && add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \ && apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ - docker-ce="5:27.0.3*" \ + docker-ce \ && rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ From 6573482f9f030e93773c6f52cac15f9e79cd0dbe Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:52:08 +0000 Subject: [PATCH 1624/2361] Revert "Unpin docker-ce in integration-tests-runner" This reverts commit 340214a246cd1c35d96cfb21be0576d87e05fea0. --- docker/test/integration/runner/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index ceb8a1b2b58..d250b746e7d 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -43,11 +43,13 @@ ENV TZ=Etc/UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ENV DOCKER_CHANNEL stable +# Unpin the docker version after the release 24.0.3 is released +# https://github.com/moby/moby/issues/45770#issuecomment-1618255130 RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ && add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \ && apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ - docker-ce \ + docker-ce='5:23.*' \ && rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ From b531f6b78c080605f1ab91cb45c4a11b4b6aafb8 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 5 Aug 2024 15:00:38 +0000 Subject: [PATCH 1625/2361] Fixing tests. --- src/Interpreters/ActionsDAG.cpp | 91 ++++++++++++++++++++------------- 1 file changed, 55 insertions(+), 36 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 7bf65ee2416..df1c0aa1f2a 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -567,49 +567,68 @@ void ActionsDAG::removeUnusedActions(const std::unordered_set & us std::stack stack; - for (const auto * root : roots) + enum class VisitStage { NonDeterministic, Required }; + + for (auto stage : {VisitStage::NonDeterministic, VisitStage::Required}) { - if (!required_nodes.contains(root)) - { - required_nodes.insert(root); - stack.push({.node = root}); - } + required_nodes.clear(); - while (!stack.empty()) + for (const auto * root : roots) { - auto & frame = stack.top(); - auto * node = const_cast(frame.node); - - while (frame.next_child_to_visit < node->children.size()) + if (!required_nodes.contains(root)) { - const auto * child = node->children[frame.next_child_to_visit]; - ++frame.next_child_to_visit; - - if (!required_nodes.contains(child)) - { - required_nodes.insert(child); - stack.push({.node = child}); - break; - } - - if (non_deterministic_nodes.contains(child)) - non_deterministic_nodes.insert(node); + required_nodes.insert(root); + stack.push({.node = root}); } - if (stack.top().node != node) - continue; - - if (!node->isDeterministic()) - non_deterministic_nodes.insert(node); - - stack.pop(); - - /// Constant folding. - if (allow_constant_folding && !node->children.empty() - && node->column && isColumnConst(*node->column) && !non_deterministic_nodes.contains(node)) + while (!stack.empty()) { - node->type = ActionsDAG::ActionType::COLUMN; - node->children.clear(); + auto & frame = stack.top(); + auto * node = const_cast(frame.node); + + while (frame.next_child_to_visit < node->children.size()) + { + const auto * child = node->children[frame.next_child_to_visit]; + ++frame.next_child_to_visit; + + if (!required_nodes.contains(child)) + { + required_nodes.insert(child); + stack.push({.node = child}); + break; + } + } + + if (stack.top().node != node) + continue; + + stack.pop(); + + if (stage == VisitStage::Required) + continue; + + if (!node->isDeterministic()) + non_deterministic_nodes.insert(node); + else + { + for (const auto * child : node->children) + { + if (non_deterministic_nodes.contains(child)) + { + non_deterministic_nodes.insert(node); + break; + } + } + } + + /// Constant folding. + if (allow_constant_folding && !node->children.empty() + && node->column && isColumnConst(*node->column)) + { + node->type = ActionsDAG::ActionType::COLUMN; + node->children.clear(); + node->is_deterministic_constant = !non_deterministic_nodes.contains(node); + } } } } From 59bffda9328a9d78c6a4422431f1014a16dd9cc2 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 5 Aug 2024 17:06:28 +0200 Subject: [PATCH 1626/2361] Revert "Revert "Slightly better calculation of primary index"" --- .../MergeTree/IMergeTreeDataPartWriter.cpp | 19 +++++- .../MergeTreeDataPartWriterOnDisk.cpp | 65 ++++++++++--------- .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 9 +-- .../02993_lazy_index_loading.reference | 2 +- ..._system_unload_primary_key_table.reference | 8 +-- .../03128_system_unload_primary_key.reference | 4 +- 6 files changed, 62 insertions(+), 45 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp index 6152da78395..c87f66b64f3 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB { @@ -71,9 +72,21 @@ IMergeTreeDataPartWriter::IMergeTreeDataPartWriter( Columns IMergeTreeDataPartWriter::releaseIndexColumns() { - return Columns( - std::make_move_iterator(index_columns.begin()), - std::make_move_iterator(index_columns.end())); + /// The memory for index was allocated without thread memory tracker. + /// We need to deallocate it in shrinkToFit without memory tracker as well. + MemoryTrackerBlockerInThread temporarily_disable_memory_tracker; + + Columns result; + result.reserve(index_columns.size()); + + for (auto & column : index_columns) + { + column->shrinkToFit(); + result.push_back(std::move(column)); + } + + index_columns.clear(); + return result; } SerializationPtr IMergeTreeDataPartWriter::getSerialization(const String & column_name) const diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 46dd766139a..6dc7e649b06 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -255,6 +255,12 @@ void MergeTreeDataPartWriterOnDisk::initPrimaryIndex() index_compressor_stream = std::make_unique(*index_file_hashing_stream, primary_key_compression_codec, settings.primary_key_compress_block_size); index_source_hashing_stream = std::make_unique(*index_compressor_stream); } + + const auto & primary_key_types = metadata_snapshot->getPrimaryKey().data_types; + index_serializations.reserve(primary_key_types.size()); + + for (const auto & type : primary_key_types) + index_serializations.push_back(type->getDefaultSerialization()); } } @@ -300,22 +306,33 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices() store = std::make_shared(stream_name, data_part_storage, data_part_storage, storage_settings->max_digestion_size_per_segment); gin_index_stores[stream_name] = store; } + skip_indices_aggregators.push_back(skip_index->createIndexAggregatorForPart(store, settings)); skip_index_accumulated_marks.push_back(0); } } +void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndexRow(const Block & index_block, size_t row) +{ + chassert(index_block.columns() == index_serializations.size()); + auto & index_stream = compress_primary_key ? *index_source_hashing_stream : *index_file_hashing_stream; + + for (size_t i = 0; i < index_block.columns(); ++i) + { + const auto & column = index_block.getByPosition(i).column; + + index_columns[i]->insertFrom(*column, row); + index_serializations[i]->serializeBinary(*column, row, index_stream, {}); + } +} + void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndex(const Block & primary_index_block, const Granules & granules_to_write) { - size_t primary_columns_num = primary_index_block.columns(); + if (!metadata_snapshot->hasPrimaryKey()) + return; + if (index_columns.empty()) - { - index_types = primary_index_block.getDataTypes(); - index_columns.resize(primary_columns_num); - last_block_index_columns.resize(primary_columns_num); - for (size_t i = 0; i < primary_columns_num; ++i) - index_columns[i] = primary_index_block.getByPosition(i).column->cloneEmpty(); - } + index_columns = primary_index_block.cloneEmptyColumns(); { /** While filling index (index_columns), disable memory tracker. @@ -329,22 +346,14 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndex(const Bloc /// Write index. The index contains Primary Key value for each `index_granularity` row. for (const auto & granule : granules_to_write) { - if (metadata_snapshot->hasPrimaryKey() && granule.mark_on_start) - { - for (size_t j = 0; j < primary_columns_num; ++j) - { - const auto & primary_column = primary_index_block.getByPosition(j); - index_columns[j]->insertFrom(*primary_column.column, granule.start_row); - primary_column.type->getDefaultSerialization()->serializeBinary( - *primary_column.column, granule.start_row, compress_primary_key ? *index_source_hashing_stream : *index_file_hashing_stream, {}); - } - } + if (granule.mark_on_start) + calculateAndSerializePrimaryIndexRow(primary_index_block, granule.start_row); } } - /// store last index row to write final mark at the end of column - for (size_t j = 0; j < primary_columns_num; ++j) - last_block_index_columns[j] = primary_index_block.getByPosition(j).column; + /// Store block with last index row to write final mark at the end of column + if (with_final_mark) + last_index_block = primary_index_block; } void MergeTreeDataPartWriterOnDisk::calculateAndSerializeStatistics(const Block & block) @@ -421,17 +430,11 @@ void MergeTreeDataPartWriterOnDisk::fillPrimaryIndexChecksums(MergeTreeData::Dat if (index_file_hashing_stream) { - if (write_final_mark) + if (write_final_mark && last_index_block) { - for (size_t j = 0; j < index_columns.size(); ++j) - { - const auto & column = *last_block_index_columns[j]; - size_t last_row_number = column.size() - 1; - index_columns[j]->insertFrom(column, last_row_number); - index_types[j]->getDefaultSerialization()->serializeBinary( - column, last_row_number, compress_primary_key ? *index_source_hashing_stream : *index_file_hashing_stream, {}); - } - last_block_index_columns.clear(); + MemoryTrackerBlockerInThread temporarily_disable_memory_tracker; + calculateAndSerializePrimaryIndexRow(last_index_block, last_index_block.rows() - 1); + last_index_block.clear(); } if (compress_primary_key) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index bdf0fdb7f32..8d84442981e 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -173,10 +173,10 @@ protected: std::unique_ptr index_source_hashing_stream; bool compress_primary_key; - DataTypes index_types; - /// Index columns from the last block - /// It's written to index file in the `writeSuffixAndFinalizePart` method - Columns last_block_index_columns; + /// Last block with index columns. + /// It's written to index file in the `writeSuffixAndFinalizePart` method. + Block last_index_block; + Serializations index_serializations; bool data_written = false; @@ -193,6 +193,7 @@ private: void initStatistics(); virtual void fillIndexGranularity(size_t index_granularity_for_block, size_t rows_in_block) = 0; + void calculateAndSerializePrimaryIndexRow(const Block & index_block, size_t row); struct ExecutionStatistics { diff --git a/tests/queries/0_stateless/02993_lazy_index_loading.reference b/tests/queries/0_stateless/02993_lazy_index_loading.reference index 5bc329ae4eb..08f07a92815 100644 --- a/tests/queries/0_stateless/02993_lazy_index_loading.reference +++ b/tests/queries/0_stateless/02993_lazy_index_loading.reference @@ -1,4 +1,4 @@ -100000000 140000000 +100000000 100000000 0 0 1 100000000 100000000 diff --git a/tests/queries/0_stateless/03127_system_unload_primary_key_table.reference b/tests/queries/0_stateless/03127_system_unload_primary_key_table.reference index 3ac6127fb21..2d33f7f6683 100644 --- a/tests/queries/0_stateless/03127_system_unload_primary_key_table.reference +++ b/tests/queries/0_stateless/03127_system_unload_primary_key_table.reference @@ -1,8 +1,8 @@ -100000000 140000000 -100000000 140000000 -100000000 140000000 +100000000 100000000 +100000000 100000000 +100000000 100000000 0 0 -100000000 140000000 +100000000 100000000 0 0 0 0 1 diff --git a/tests/queries/0_stateless/03128_system_unload_primary_key.reference b/tests/queries/0_stateless/03128_system_unload_primary_key.reference index c7b40ae5b06..2646dc7247f 100644 --- a/tests/queries/0_stateless/03128_system_unload_primary_key.reference +++ b/tests/queries/0_stateless/03128_system_unload_primary_key.reference @@ -1,4 +1,4 @@ -100000000 140000000 -100000000 140000000 +100000000 100000000 +100000000 100000000 0 0 0 0 From d080f863ea41420ecbb1c5d65769d74e21a46aba Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Mon, 5 Aug 2024 17:07:17 +0200 Subject: [PATCH 1627/2361] fix black --- tests/integration/test_storage_hdfs/test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 9ee8ac4cdfd..856715f28c8 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -675,7 +675,9 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_2', 'Parquet', 'a Int32, b String')" ) - node1.query(f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1") + node1.query( + f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1" + ) result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "parquet_2" @@ -683,7 +685,9 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_3', 'Parquet', 'a Int32, _path String')" ) - node1.query(f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1") + node1.query( + f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1" + ) result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "kek" From 5448bf7b86a27f7061e8fe2a02ef5b64ea4b2cc2 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 5 Aug 2024 15:07:55 +0000 Subject: [PATCH 1628/2361] fix memory leak during index calculation --- .../MergeTree/MergeTreeDataPartWriterOnDisk.cpp | 9 +++++---- .../03217_primary_index_memory_leak.reference | 1 + .../03217_primary_index_memory_leak.sql | 15 +++++++++++++++ 3 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/03217_primary_index_memory_leak.reference create mode 100644 tests/queries/0_stateless/03217_primary_index_memory_leak.sql diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 6dc7e649b06..b0e70e94b73 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -331,9 +331,6 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndex(const Bloc if (!metadata_snapshot->hasPrimaryKey()) return; - if (index_columns.empty()) - index_columns = primary_index_block.cloneEmptyColumns(); - { /** While filling index (index_columns), disable memory tracker. * Because memory is allocated here (maybe in context of INSERT query), @@ -343,6 +340,9 @@ void MergeTreeDataPartWriterOnDisk::calculateAndSerializePrimaryIndex(const Bloc */ MemoryTrackerBlockerInThread temporarily_disable_memory_tracker; + if (index_columns.empty()) + index_columns = primary_index_block.cloneEmptyColumns(); + /// Write index. The index contains Primary Key value for each `index_granularity` row. for (const auto & granule : granules_to_write) { @@ -434,9 +434,10 @@ void MergeTreeDataPartWriterOnDisk::fillPrimaryIndexChecksums(MergeTreeData::Dat { MemoryTrackerBlockerInThread temporarily_disable_memory_tracker; calculateAndSerializePrimaryIndexRow(last_index_block, last_index_block.rows() - 1); - last_index_block.clear(); } + last_index_block.clear(); + if (compress_primary_key) { index_source_hashing_stream->finalize(); diff --git a/tests/queries/0_stateless/03217_primary_index_memory_leak.reference b/tests/queries/0_stateless/03217_primary_index_memory_leak.reference new file mode 100644 index 00000000000..4913dd5e690 --- /dev/null +++ b/tests/queries/0_stateless/03217_primary_index_memory_leak.reference @@ -0,0 +1 @@ +150000 diff --git a/tests/queries/0_stateless/03217_primary_index_memory_leak.sql b/tests/queries/0_stateless/03217_primary_index_memory_leak.sql new file mode 100644 index 00000000000..d5a553c7d72 --- /dev/null +++ b/tests/queries/0_stateless/03217_primary_index_memory_leak.sql @@ -0,0 +1,15 @@ +-- Tags: no-debug, no-tsan, no-msan, no-asan, no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_primary_index_memory; +CREATE TABLE t_primary_index_memory (s String) ENGINE = MergeTree +ORDER BY s SETTINGS index_granularity = 1; + +INSERT INTO t_primary_index_memory SELECT repeat('a', 10000) FROM numbers(150000) +SETTINGS + max_block_size = 32, + max_memory_usage = '100M', + max_insert_block_size = 1024, + min_insert_block_size_rows = 1024; + +SELECT count() FROM t_primary_index_memory; +DROP TABLE t_primary_index_memory; From 146b8afce61ca2db2cdfd0621b0378746e90de59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 5 Aug 2024 17:20:41 +0200 Subject: [PATCH 1629/2361] Disable bad tests --- .../Nodes/tests/gtest_throttler_constraint.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/Common/Scheduler/Nodes/tests/gtest_throttler_constraint.cpp b/src/Common/Scheduler/Nodes/tests/gtest_throttler_constraint.cpp index 6cfccb252fa..363e286c91c 100644 --- a/src/Common/Scheduler/Nodes/tests/gtest_throttler_constraint.cpp +++ b/src/Common/Scheduler/Nodes/tests/gtest_throttler_constraint.cpp @@ -10,7 +10,9 @@ using namespace DB; using ResourceTest = ResourceTestClass; -TEST(SchedulerThrottlerConstraint, LeakyBucketConstraint) +/// Test disabled because of leaks in the test themselves: https://github.com/ClickHouse/ClickHouse/issues/67678 + +TEST(DISABLED_SchedulerThrottlerConstraint, LeakyBucketConstraint) { ResourceTest t; EventQueue::TimePoint start = std::chrono::system_clock::now(); @@ -40,7 +42,7 @@ TEST(SchedulerThrottlerConstraint, LeakyBucketConstraint) t.consumed("A", 10); } -TEST(SchedulerThrottlerConstraint, Unlimited) +TEST(DISABLED_SchedulerThrottlerConstraint, Unlimited) { ResourceTest t; EventQueue::TimePoint start = std::chrono::system_clock::now(); @@ -57,7 +59,7 @@ TEST(SchedulerThrottlerConstraint, Unlimited) } } -TEST(SchedulerThrottlerConstraint, Pacing) +TEST(DISABLED_SchedulerThrottlerConstraint, Pacing) { ResourceTest t; EventQueue::TimePoint start = std::chrono::system_clock::now(); @@ -77,7 +79,7 @@ TEST(SchedulerThrottlerConstraint, Pacing) } } -TEST(SchedulerThrottlerConstraint, BucketFilling) +TEST(DISABLED_SchedulerThrottlerConstraint, BucketFilling) { ResourceTest t; EventQueue::TimePoint start = std::chrono::system_clock::now(); @@ -111,7 +113,7 @@ TEST(SchedulerThrottlerConstraint, BucketFilling) t.consumed("A", 3); } -TEST(SchedulerThrottlerConstraint, PeekAndAvgLimits) +TEST(DISABLED_SchedulerThrottlerConstraint, PeekAndAvgLimits) { ResourceTest t; EventQueue::TimePoint start = std::chrono::system_clock::now(); @@ -139,7 +141,7 @@ TEST(SchedulerThrottlerConstraint, PeekAndAvgLimits) } } -TEST(SchedulerThrottlerConstraint, ThrottlerAndFairness) +TEST(DISABLED_SchedulerThrottlerConstraint, ThrottlerAndFairness) { ResourceTest t; EventQueue::TimePoint start = std::chrono::system_clock::now(); From 4e9c3baa25cc7f02a8ba9ad5be6d68964b6a04f5 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 12 Jul 2024 12:49:26 +0000 Subject: [PATCH 1630/2361] Move analyzer to Beta stage --- .../test/fuzzer/query-fuzzer-tweaks-users.xml | 4 +- docker/test/stateless/stress_tests.lib | 4 +- docs/en/development/architecture.md | 2 +- docs/en/operations/analyzer.md | 8 +- docs/en/operations/settings/settings.md | 2 +- docs/ru/development/architecture.md | 2 +- src/Client/HedgedConnections.cpp | 4 +- src/Client/MultiplexedConnections.cpp | 4 +- src/Core/Settings.h | 5 +- src/Core/SettingsChangesHistory.cpp | 259 ++++++++++++++++++ src/Interpreters/ActionsVisitor.cpp | 2 +- .../ClusterProxy/SelectStreamFactory.cpp | 4 +- .../ClusterProxy/executeQuery.cpp | 4 +- src/Interpreters/InterpreterCreateQuery.cpp | 4 +- src/Interpreters/InterpreterDescribeQuery.cpp | 2 +- src/Interpreters/InterpreterExplainQuery.cpp | 10 +- src/Interpreters/InterpreterFactory.cpp | 6 +- src/Interpreters/InterpreterInsertQuery.cpp | 2 +- src/Interpreters/MutationsInterpreter.cpp | 6 +- src/Interpreters/executeQuery.cpp | 6 +- .../getHeaderForProcessingStage.cpp | 2 +- .../QueryPlan/DistributedCreateLocalPlan.cpp | 2 +- .../Transforms/buildPushingToViewsChain.cpp | 4 +- src/Server/TCPHandler.cpp | 6 +- src/Storages/AlterCommands.cpp | 2 +- src/Storages/IStorageCluster.cpp | 2 +- src/Storages/LiveView/StorageLiveView.cpp | 10 +- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- src/Storages/MergeTree/RPNBuilder.cpp | 18 +- src/Storages/StorageBuffer.cpp | 2 +- src/Storages/StorageDistributed.cpp | 6 +- src/Storages/StorageExecutable.cpp | 2 +- src/Storages/StorageMerge.cpp | 20 +- src/Storages/StorageMergeTree.cpp | 6 +- src/Storages/StorageReplicatedMergeTree.cpp | 6 +- src/Storages/StorageView.cpp | 2 +- src/Storages/TTLDescription.cpp | 2 +- src/Storages/WindowView/StorageWindowView.cpp | 6 +- src/TableFunctions/TableFunctionView.cpp | 2 +- .../TableFunctionViewIfPermitted.cpp | 2 +- tests/config/users.d/analyzer.xml | 2 +- .../helpers/0_common_enable_old_analyzer.xml | 2 +- tests/integration/helpers/cluster.py | 2 +- .../test_analyzer_compatibility/test.py | 8 +- .../test_distributed_type_object/test.py | 2 +- .../configs/enable_parallel_replicas.xml | 2 +- .../integration/test_settings_profile/test.py | 6 +- .../performance/storage_join_direct_join.xml | 4 +- tests/performance/uniq_to_count.xml | 4 +- .../queries/0_stateless/00116_storage_set.sql | 4 +- .../queries/0_stateless/00202_cross_join.sql | 3 +- ..._shard_no_aggregates_and_constant_keys.sql | 2 +- .../00313_const_totals_extremes.sh | 12 +- ...inal_and_prewhere_condition_ver_column.sql | 4 +- .../00370_duplicate_columns_in_subqueries.sql | 2 +- .../00378_json_quote_64bit_integers.sql | 2 +- .../0_stateless/00445_join_nullable_keys.sql | 2 +- .../queries/0_stateless/00490_with_select.sql | 2 +- .../00597_push_down_predicate_long.reference | 10 +- .../00597_push_down_predicate_long.sql | 30 +- ...00621_regression_for_in_operator.reference | 4 +- .../00621_regression_for_in_operator.sql | 8 +- .../0_stateless/00674_join_on_syntax.sql | 2 +- .../0_stateless/00700_decimal_compare.sql | 2 +- .../queries/0_stateless/00722_inner_join.sql | 2 +- .../00736_disjunction_optimisation.reference | 18 +- .../00736_disjunction_optimisation.sql | 36 +-- .../0_stateless/00757_enum_defaults_const.sql | 2 +- .../00757_enum_defaults_const_analyzer.sql | 2 +- .../00800_low_cardinality_join.sql | 2 +- .../00800_low_cardinality_merge_join.sql.j2 | 2 +- .../0_stateless/00818_alias_bug_4110.sql | 2 +- .../0_stateless/00818_inner_join_bug_3567.sql | 2 +- ...00819_full_join_wrong_columns_in_block.sql | 2 +- .../0_stateless/00820_multiple_joins.sql | 2 +- ...multiple_joins_subquery_requires_alias.sql | 2 +- .../0_stateless/00830_join_overwrite.sql | 2 +- .../0_stateless/00835_if_generic_case.sql | 2 +- .../00848_join_use_nulls_segfault.sql | 2 +- .../00849_multiple_comma_join_2.sql | 34 +-- .../00855_join_with_array_join.sql | 2 +- .../queries/0_stateless/00858_issue_4756.sql | 2 +- tests/queries/0_stateless/00897_flatten.sql | 2 +- ...0940_order_by_read_in_order_query_plan.sql | 2 +- .../01000_subquery_requires_alias.sql | 2 +- .../01013_totals_without_aggregation.sql | 2 +- .../0_stateless/01018_ambiguous_column.sql | 2 +- .../01047_window_view_parser_inner_table.sql | 2 +- .../0_stateless/01048_window_view_parser.sql | 2 +- .../01049_join_low_card_bug_long.reference.j2 | 64 ++--- .../01049_join_low_card_bug_long.sql.j2 | 4 +- .../01050_window_view_parser_tumble.sql | 2 +- .../01051_window_view_parser_hop.sql | 2 +- .../01052_window_view_proc_tumble_to_now.sh | 2 +- .../01053_window_view_proc_hop_to_now.sh | 2 +- .../01054_window_view_proc_tumble_to.sh | 2 +- .../01055_window_view_proc_hop_to.sh | 2 +- .../01056_window_view_proc_hop_watch.py | 4 +- ..._window_view_event_tumble_to_strict_asc.sh | 2 +- ...058_window_view_event_hop_to_strict_asc.sh | 2 +- ..._window_view_event_hop_watch_strict_asc.py | 4 +- .../01060_window_view_event_tumble_to_asc.sh | 2 +- .../01061_window_view_event_hop_to_asc.sh | 2 +- .../01062_window_view_event_hop_watch_asc.py | 4 +- ...063_window_view_event_tumble_to_bounded.sh | 2 +- .../01064_window_view_event_hop_to_bounded.sh | 2 +- ...065_window_view_event_hop_watch_bounded.py | 4 +- ...iew_event_tumble_to_strict_asc_lateness.sh | 2 +- ...indow_view_event_tumble_to_asc_lateness.sh | 2 +- ...w_view_event_tumble_to_bounded_lateness.sh | 2 +- .../01069_window_view_proc_tumble_watch.py | 4 +- .../01070_window_view_watch_events.py | 4 +- ...01071_window_view_event_tumble_asc_join.sh | 2 +- ...72_window_view_multiple_columns_groupby.sh | 2 +- ...indow_view_event_tumble_to_asc_populate.sh | 2 +- ...dow_view_event_tumble_asc_join_populate.sh | 2 +- ...window_view_proc_tumble_to_now_populate.sh | 2 +- .../01076_window_view_alter_query_to.sh | 2 +- ...indow_view_alter_query_to_modify_source.sh | 2 +- .../01078_window_view_alter_query_watch.py | 6 +- ...9_window_view_inner_table_memory_tumble.sh | 2 +- ...1080_window_view_inner_table_memory_hop.sh | 2 +- .../01081_window_view_target_table_engine.sh | 2 +- .../01082_window_view_watch_limit.py | 4 +- .../0_stateless/01083_window_view_select.sh | 2 +- ...01084_window_view_with_table_identifier.sh | 2 +- .../0_stateless/01085_window_view_attach.sql | 2 +- .../0_stateless/01086_window_view_cleanup.sh | 2 +- .../01087_window_view_alter_query.sh | 2 +- .../01088_window_view_default_column.sh | 2 +- .../01142_join_lc_and_nullable_in_key.sql | 28 +- ...1142_merge_join_lc_and_nullable_in_key.sql | 24 +- tests/queries/0_stateless/01232_untuple.sql | 2 +- ...01300_group_by_other_keys_having.reference | 6 +- .../01300_group_by_other_keys_having.sql | 8 +- ..._redundant_functions_in_order_by.reference | 16 +- .../01323_redundant_functions_in_order_by.sql | 28 +- .../01353_low_cardinality_join_types.sql | 4 +- ...BY_injective_elimination_dictGet.reference | 2 +- ...GROUP_BY_injective_elimination_dictGet.sql | 2 +- .../0_stateless/01428_nullable_asof_join.sql | 12 +- .../01455_opentelemetry_distributed.reference | 4 +- .../01455_opentelemetry_distributed.sh | 6 +- .../01476_right_full_join_switch.sql | 4 +- .../01477_lc_in_merge_join_left_key.sql.j2 | 6 +- .../0_stateless/01479_cross_join_9855.sql | 4 +- .../0_stateless/01508_explain_header.sql | 2 +- .../01556_explain_select_with_union_query.sql | 2 +- .../01561_clickhouse_client_stage.reference | 10 +- .../01561_clickhouse_client_stage.sh | 2 +- .../0_stateless/01591_window_functions.sql | 2 +- .../0_stateless/01600_detach_permanently.sh | 4 +- ..._constraints_simple_optimization.reference | 6 +- .../01622_constraints_simple_optimization.sql | 6 +- ...2_constraints_where_optimization.reference | 12 +- .../01622_constraints_where_optimization.sql | 12 +- .../01623_constraints_column_swap.reference | 26 +- .../01623_constraints_column_swap.sql | 26 +- .../0_stateless/01646_rewrite_sum_if.sql | 2 +- .../0_stateless/01651_bugs_from_15889.sql | 2 +- .../0_stateless/01655_plan_optimizations.sh | 48 ++-- ...01655_plan_optimizations_merge_filters.sql | 4 +- ...ns_optimize_read_in_window_order.reference | 8 +- ...mizations_optimize_read_in_window_order.sh | 24 +- .../01671_merge_join_and_constants.sql | 2 +- .../01721_join_implicit_cast_long.sql.j2 | 12 +- .../0_stateless/01739_index_hint.reference | 4 +- .../queries/0_stateless/01739_index_hint.sql | 4 +- ...ze_skip_unused_shards_rewrite_in.reference | 4 +- ...optimize_skip_unused_shards_rewrite_in.sql | 4 +- ...1757_optimize_skip_unused_shards_limit.sql | 6 +- .../01763_filter_push_down_bugs.sql | 4 +- .../0_stateless/01786_explain_merge_tree.sh | 2 +- .../queries/0_stateless/01823_explain_json.sh | 2 +- .../01852_multiple_joins_with_union_join.sql | 2 +- ...01872_functions_to_subcolumns_analyzer.sql | 2 +- .../01890_cross_join_explain_crash.sql | 2 +- .../01913_names_of_tuple_literal.sql | 2 +- .../01925_join_materialized_columns.sql | 8 +- ...25_test_storage_merge_aliases_analyzer.sql | 2 +- ...hree_parts_identifiers_in_wrong_places.sql | 2 +- .../01942_dateTimeToSnowflakeID.sql | 2 +- .../01942_snowflakeIDToDateTime.sql | 2 +- ...istributed_group_by_sharding_key.reference | 2 +- ...mize_distributed_group_by_sharding_key.sql | 4 +- .../0_stateless/02000_join_on_const.reference | 12 +- .../0_stateless/02000_join_on_const.sql | 41 ++- .../0_stateless/02030_tuple_filter.sql | 2 +- .../02048_clickhouse_local_stage.reference | 10 +- .../02048_clickhouse_local_stage.sh | 2 +- .../02115_map_contains_analyzer.sql | 2 +- .../02116_tuple_element_analyzer.sql | 2 +- .../02125_query_views_log_window_function.sql | 2 +- ...window_functions_disable_optimizations.sql | 2 +- .../02136_scalar_read_rows_json.sh | 2 +- .../02149_read_in_order_fixed_prefix.sql | 12 +- .../02151_hash_table_sizes_stats_joins.sh | 4 +- .../02154_dictionary_get_http_json.sh | 2 +- .../02156_storage_merge_prewhere.sql | 4 +- .../0_stateless/02174_cte_scalar_cache.sql | 4 +- .../0_stateless/02174_cte_scalar_cache_mv.sql | 12 +- .../02184_hash_functions_and_ip_types.sql | 2 +- .../02226_analyzer_or_like_combine.reference | 4 +- .../02226_analyzer_or_like_combine.sql | 14 +- .../0_stateless/02227_union_match_by_name.sql | 2 +- .../02233_with_total_empty_chunk.sql | 2 +- .../02234_clickhouse_local_test_mode.sh | 5 +- .../02267_join_dup_columns_issue36199.sql | 4 +- ..._column_matcher_and_column_transformer.sql | 2 +- .../0_stateless/02303_query_kind.reference | 8 +- tests/queries/0_stateless/02303_query_kind.sh | 2 +- .../02315_grouping_constant_folding.reference | 2 +- .../02315_grouping_constant_folding.sql | 2 +- ..._distinct_in_order_optimization_explain.sh | 4 +- .../02337_analyzer_columns_basic.sql | 2 +- .../02337_multiple_joins_original_names.sql | 2 +- .../02338_analyzer_constants_basic.sql | 2 +- .../02339_analyzer_matcher_basic.sql | 2 +- .../0_stateless/02340_analyzer_functions.sql | 2 +- .../02341_analyzer_aliases_basics.sql | 2 +- .../02341_global_join_cte.reference | 4 +- .../0_stateless/02341_global_join_cte.sql | 4 +- .../02342_analyzer_compound_types.sql | 2 +- .../02342_window_view_different_struct.sql | 2 +- ...43_analyzer_column_transformers_strict.sql | 2 +- .../0_stateless/02343_analyzer_lambdas.sql | 2 +- .../02343_analyzer_lambdas_issue_28083.sql | 6 +- .../02343_analyzer_lambdas_issue_36677.sql | 2 +- ...alyzer_multiple_aliases_for_expression.sql | 2 +- .../0_stateless/02345_analyzer_subqueries.sql | 2 +- .../02346_fulltext_index_bug52019.sql | 6 +- .../02346_fulltext_index_match_predicate.sql | 12 +- .../0_stateless/02354_vector_search_bugs.sql | 2 +- .../0_stateless/02364_window_view_segfault.sh | 2 +- .../0_stateless/02366_explain_query_tree.sql | 2 +- .../02367_analyzer_table_alias_columns.sql | 2 +- .../02368_analyzer_table_functions.sql | 2 +- .../02369_analyzer_array_join_function.sql | 2 +- .../02370_analyzer_in_function.sql | 2 +- .../0_stateless/02371_analyzer_join_cross.sql | 2 +- .../0_stateless/02372_analyzer_join.sql.j2 | 2 +- .../02373_analyzer_join_use_nulls.sql | 2 +- .../0_stateless/02374_analyzer_array_join.sql | 2 +- .../02374_analyzer_join_using.sql.j2 | 2 +- ...2374_combine_multi_if_and_count_if_opt.sql | 2 +- .../0_stateless/02375_analyzer_union.sql | 2 +- .../02376_analyzer_in_function_subquery.sql | 2 +- .../02377_analyzer_in_function_set.sql | 2 +- ...ting_by_input_stream_properties_explain.sh | 4 +- .../02378_analyzer_projection_names.sql | 2 +- .../02379_analyzer_subquery_depth.sql | 2 +- .../02380_analyzer_join_sample.sql | 2 +- .../0_stateless/02381_analyzer_join_final.sql | 2 +- .../02381_join_dup_columns_in_plan.sql.j2 | 2 +- .../02382_analyzer_matcher_join_using.sql | 2 +- .../02383_analyzer_merge_tree_self_join.sql | 2 +- .../02384_analyzer_dict_get_join_get.sql | 2 +- ...5_analyzer_aliases_compound_expression.sql | 2 +- ...analyzer_in_function_nested_subqueries.sql | 2 +- .../0_stateless/02387_analyzer_cte.sql | 2 +- .../02388_analyzer_recursive_lambda.sql | 2 +- .../02389_analyzer_nested_lambda.sql | 2 +- .../02420_final_setting_analyzer.reference | 2 +- .../02420_final_setting_analyzer.sql | 2 +- ...02421_decimal_in_precision_issue_41125.sql | 2 +- .../0_stateless/02421_explain_subquery.sql | 4 +- ...2428_decimal_in_floating_point_literal.sql | 2 +- .../0_stateless/02428_parameterized_view.sh | 4 +- .../0_stateless/02451_order_by_monotonic.sh | 2 +- .../0_stateless/02459_group_by_all.sql | 2 +- ..._subqueries_table_expression_modifiers.sql | 2 +- .../02475_analyzer_join_tree_subquery.sql | 2 +- ..._analyzer_subquery_compound_expression.sql | 2 +- ...2475_or_function_alias_and_const_where.sql | 2 +- .../02476_analyzer_identifier_hints.sh | 90 +++--- ...2476_analyzer_join_with_unused_columns.sql | 2 +- .../0_stateless/02476_fuse_sum_count.sql | 2 +- .../02477_analyzer_array_join_with_join.sql | 2 +- ...02477_analyzer_ast_key_condition_crash.sql | 2 +- .../02477_analyzer_function_hints.sh | 10 +- .../0_stateless/02477_exists_fuzz_43478.sql | 2 +- .../0_stateless/02477_fuse_quantiles.sql | 2 +- ...ssions_optimizer_low_cardinality.reference | 12 +- ..._expressions_optimizer_low_cardinality.sql | 12 +- ...2478_analyzer_table_expression_aliases.sql | 2 +- .../02478_window_frame_type_groups.sql | 4 +- .../02479_analyzer_aggregation_crash.sql | 2 +- ...er_aggregation_totals_rollup_crash_fix.sql | 2 +- .../02479_analyzer_join_with_constants.sql | 2 +- .../02479_mysql_connect_to_self.sql | 2 +- .../02480_analyzer_alias_nullptr.sql | 2 +- .../0_stateless/02480_tlp_nan.reference | 20 +- tests/queries/0_stateless/02480_tlp_nan.sql | 20 +- .../02481_aggregation_in_order_plan.sql | 4 +- ...er_join_alias_unknown_identifier_crash.sql | 2 +- ...lyzer_optimize_aggregation_arithmetics.sql | 2 +- ...1_analyzer_optimize_grouping_sets_keys.sql | 2 +- .../02483_cuturlparameter_with_arrays.sql | 2 +- .../queries/0_stateless/02483_elapsed_time.sh | 2 +- .../0_stateless/02489_analyzer_indexes.sql | 3 +- .../02493_analyzer_sum_if_to_count_if.sql | 2 +- ...02493_analyzer_table_functions_untuple.sql | 2 +- ...r_uniq_injective_functions_elimination.sql | 2 +- ...analyzer_compound_expression_crash_fix.sql | 2 +- .../0_stateless/02494_query_cache_explain.sql | 2 +- .../02494_query_cache_nested_query_bug.sh | 4 +- .../02495_analyzer_storage_join.sql | 2 +- .../02495_sum_if_to_count_if_bug.sql | 5 +- .../02496_remove_redundant_sorting.sh | 4 +- ...nalyzer_sum_if_count_if_pass_crash_fix.sql | 2 +- ..._having_without_actual_aggregation_bug.sql | 2 +- .../02497_if_transform_strings_to_enum.sql | 2 +- .../02497_storage_join_right_assert.sql | 4 +- ...nctions_arithmetic_operations_pass_fix.sql | 2 +- .../02498_analyzer_settings_push_down.sql | 2 +- .../02498_storage_join_key_positions.sql.j2 | 2 +- ...er_aggregate_function_lambda_crash_fix.sql | 2 +- .../0_stateless/02499_analyzer_set_index.sql | 2 +- .../02500_analyzer_storage_view_crash_fix.sql | 2 +- .../02500_remove_redundant_distinct.sh | 4 +- ...501_analyzer_expired_context_crash_fix.sql | 2 +- ...02502_analyzer_insert_select_crash_fix.sql | 2 +- .../02503_join_switch_alias_fuzz.sql | 2 +- ...513_analyzer_duplicate_alias_crash_fix.sql | 2 +- .../0_stateless/02513_analyzer_sort_msan.sql | 2 +- .../02514_analyzer_drop_join_on.sql | 2 +- .../02515_analyzer_null_for_empty.sql | 2 +- ...6_join_with_totals_and_subquery_bug.sql.j2 | 4 +- .../02516_projections_and_context.sql | 4 +- ...518_rewrite_aggregate_function_with_if.sql | 2 +- ...21_analyzer_aggregation_without_column.sql | 2 +- .../02521_analyzer_array_join_crash.sql | 2 +- .../02525_analyzer_function_in_crash_fix.sql | 2 +- ...02532_analyzer_aggregation_with_rollup.sql | 2 +- .../02534_analyzer_grouping_function.sql | 2 +- .../02535_analyzer_group_by_use_nulls.sql | 2 +- .../02535_analyzer_limit_offset.sql | 2 +- .../02538_analyzer_create_table_as_select.sql | 2 +- ...zer_matcher_alias_materialized_columns.sql | 2 +- ...json_ignore_unknown_keys_in_named_tuple.sh | 2 +- ...02541_analyzer_grouping_sets_crash_fix.sql | 2 +- ..._optimize_group_by_function_keys_crash.sql | 2 +- .../02553_type_object_analyzer.sql | 2 +- ..._fix_grouping_sets_predicate_push_down.sql | 8 +- .../02560_analyzer_materialized_view.sql | 2 +- .../0_stateless/02563_analyzer_merge.sql | 2 +- .../02564_analyzer_cross_to_inner.sql | 2 +- .../02565_analyzer_limit_settings.sql | 2 +- ...66_analyzer_limit_settings_distributed.sql | 2 +- .../02567_and_consistency.reference | 2 +- .../0_stateless/02567_and_consistency.sql | 4 +- .../02576_predicate_push_down_sorting_fix.sql | 2 +- .../02576_rewrite_array_exists_to_has.sql | 4 +- .../02577_analyzer_array_join_calc_twice.sql | 2 +- .../0_stateless/02579_fill_empty_chunk.sql | 2 +- .../02579_fill_empty_chunk_analyzer.sql | 2 +- ...alyzer_join_subquery_empty_column_list.sql | 4 +- .../02661_quantile_approx.reference | 12 +- .../0_stateless/02661_quantile_approx.sql | 14 +- .../02662_first_last_value.reference | 2 +- .../0_stateless/02662_first_last_value.sql | 2 +- ...al_optimizer_removing_redundant_checks.sql | 2 +- .../02674_trivial_count_analyzer.reference | 2 +- .../02674_trivial_count_analyzer.sql | 2 +- ...75_predicate_push_down_filled_join_fix.sql | 2 +- .../02676_analyzer_limit_offset.sql | 2 +- ...676_distinct_reading_in_order_analyzer.sql | 2 +- .../02677_analyzer_bitmap_has_any.sql | 5 +- .../02677_analyzer_compound_expressions.sql | 2 +- .../02677_get_subcolumn_array_of_tuples.sql | 2 +- ...explain_merge_tree_prewhere_row_policy.sql | 4 +- .../02699_polygons_sym_difference_rollup.sql | 4 +- .../02699_polygons_sym_difference_total.sql | 2 +- ...polygons_sym_difference_total_analyzer.sql | 2 +- .../02701_invalid_having_NOT_AN_AGGREGATE.sql | 2 +- .../02702_logical_optimizer_with_nulls.sql | 2 +- ...ry_tree_is_forbidden_with_old_analyzer.sql | 2 +- ...2704_storage_merge_explain_graph_crash.sql | 2 +- .../02707_analyzer_nested_lambdas_types.sql | 10 +- .../02722_matcher_join_use_nulls.sql.j2 | 2 +- .../0_stateless/02725_cnf_large_check.sql | 8 +- .../02731_analyzer_join_resolve_nested.sql.j2 | 2 +- ..._parallel_replicas_join_subquery.reference | 8 +- .../02731_parallel_replicas_join_subquery.sql | 12 +- .../0_stateless/02734_optimize_group_by.sql | 4 +- ...s_with_subqueries_profile_events.reference | 24 +- ..._queries_with_subqueries_profile_events.sh | 26 +- .../02767_into_outfile_extensions_msan.sh | 2 +- .../02771_ignore_data_skipping_indices.sql | 4 +- ...02771_parallel_replicas_analyzer.reference | 2 +- .../02771_parallel_replicas_analyzer.sql | 2 +- .../02771_semi_join_use_nulls.sql.j2 | 4 +- .../02783_date_predicate_optimizations.sql | 100 +++---- ...lel_replicas_trivial_count_optimization.sh | 8 +- ...l_conditions_to_prewhere_analyzer_asan.sql | 2 +- ...mizations_ast_query_tree_rewrite.reference | 56 ++-- ...e_optimizations_ast_query_tree_rewrite.sql | 56 ++-- .../02803_remote_cannot_clone_block.sql | 2 +- .../02812_bug_with_unused_join_columns.sql | 2 +- ...r_aggregate_functions_of_group_by_keys.sql | 2 +- .../02815_join_algorithm_setting.sql | 4 +- ...834_analyzer_with_statement_references.sql | 2 +- .../0_stateless/02835_join_step_explain.sql | 2 +- .../02840_merge__table_or_filter.reference | 32 +-- .../02840_merge__table_or_filter.sql.j2 | 8 +- ...41_valid_json_and_xml_on_http_exception.sh | 2 +- .../0_stateless/02841_with_clause_resolve.sql | 48 ++-- .../02861_filter_pushdown_const_bug.sql | 2 +- .../02864_statistics_materialize_in_merge.sql | 2 +- .../02866_size_of_marks_skip_idx_explain.sql | 2 +- ...8_distinct_to_count_optimization.reference | 32 +-- .../02868_distinct_to_count_optimization.sql | 64 ++--- ..._key_index_in_function_different_types.sql | 4 +- .../02890_named_tuple_functions.sql | 2 +- .../02890_untuple_column_names.reference | 2 +- .../02890_untuple_column_names.sql | 44 +-- .../02911_analyzer_explain_estimate.sql | 2 +- ...yzer_order_by_read_in_order_query_plan.sql | 2 +- ...lyzer_remove_unused_projection_columns.sql | 2 +- .../02911_join_on_nullsafe_optimization.sql | 2 +- .../02911_support_alias_column_in_indices.sql | 8 +- .../0_stateless/02915_analyzer_fuzz_1.sql | 2 +- .../0_stateless/02915_analyzer_fuzz_2.sql | 3 +- .../0_stateless/02915_analyzer_fuzz_5.sql | 2 +- .../0_stateless/02915_analyzer_fuzz_6.sql | 2 +- .../0_stateless/02918_join_pm_lc_crash.sql | 9 +- .../02918_optimize_count_for_merge_tables.sql | 4 +- .../02922_respect_nulls_parser.sql | 2 +- ...alyzer_rewrite_sum_column_and_constant.sql | 2 +- .../02932_parallel_replicas_fuzzer.sql | 2 +- .../queries/0_stateless/02933_paste_join.sql | 6 +- .../0_stateless/02943_order_by_all.sql | 32 +-- ...ngrambf_indexes_support_match_function.sql | 24 +- .../02944_variant_as_common_type_analyzer.sql | 3 +- .../02952_conjunction_optimization.sql | 2 +- .../02954_analyzer_fuzz_i57086.sql | 2 +- ...nalyzer_using_functional_args.reference.j2 | 20 +- ...2955_analyzer_using_functional_args.sql.j2 | 28 +- .../02955_sparkBar_alias_sparkbar.sql | 3 +- .../02962_join_using_bug_57894.sql | 4 +- .../0_stateless/02967_analyzer_fuzz.sql | 4 +- ...allel_replicas_join_algo_and_analyzer_3.sh | 20 +- ...llel_replicas_joins_and_analyzer.reference | 72 ++--- ...arallel_replicas_joins_and_analyzer.sql.j2 | 36 +-- ...analyzer_eliminate_injective_functions.sql | 2 +- .../02969_functions_to_subcolumns_if_null.sql | 8 +- .../0_stateless/02971_analyzer_remote_id.sh | 2 +- ...1_functions_to_subcolumns_column_names.sql | 2 +- .../02971_functions_to_subcolumns_map.sql | 2 +- .../02971_functions_to_subcolumns_variant.sql | 2 +- .../02972_parallel_replicas_cte.sql | 6 +- .../02974_analyzer_array_join_subcolumn.sql | 10 +- ..._logical_optimizer_pass_lowcardinality.sql | 2 +- .../02989_join_using_parent_scope.reference | 32 +-- .../02989_join_using_parent_scope.sql | 32 +-- .../02991_count_rewrite_analyzer.sql | 2 +- .../02992_analyzer_group_by_const.sql | 2 +- .../02996_analyzer_prewhere_projection.sql | 2 +- .../02998_analyzer_prewhere_report.sql | 2 +- ...8_analyzer_secret_args_tree_node.reference | 10 +- .../02998_analyzer_secret_args_tree_node.sql | 2 +- ...rojection_after_attach_partition.reference | 4 +- ...2998_projection_after_attach_partition.sql | 4 +- .../02999_analyzer_preimage_null.sql | 2 +- .../03001_analyzer_nullable_nothing.sql | 2 +- .../0_stateless/03002_analyzer_prewhere.sql | 2 +- .../0_stateless/03003_analyzer_setting.sql | 8 +- .../03003_functions_to_subcolumns_final.sql | 2 +- .../03006_join_on_inequal_expression_2.sql.j2 | 2 +- .../03006_join_on_inequal_expression_3.sql.j2 | 2 +- .../03006_join_on_inequal_expression_4.sql.j2 | 2 +- ...006_join_on_inequal_expression_fast.sql.j2 | 2 +- ...llel_replicas_cte_explain_syntax_crash.sql | 2 +- ...007_column_nullable_uninitialzed_value.sql | 2 +- .../03010_sum_to_to_count_if_nullable.sql | 4 +- ...se_nulls_with_materialize_and_analyzer.sql | 3 +- .../03014_analyzer_groupby_fuzz_60317.sql | 4 +- ...nulls_injective_functions_and_analyzer.sql | 3 +- .../03015_analyzer_groupby_fuzz_60772.sql | 4 +- .../03016_analyzer_groupby_fuzz_59796.sql | 2 +- .../03017_analyzer_groupby_fuzz_61600.sql | 4 +- ...mize_group_by_function_keys_with_nulls.sql | 3 +- ...23_group_by_use_nulls_analyzer_crashes.sql | 4 +- .../03023_remove_unused_column_distinct.sql | 2 +- .../03031_filter_float64_logical_error.sql | 4 +- .../03031_tuple_elimination_analyzer.sql | 2 +- .../0_stateless/03032_redundant_equals.sql | 4 +- ...analyzer_merge_engine_filter_push_down.sql | 3 +- .../03033_analyzer_query_parameters.sh | 4 +- .../0_stateless/03033_cte_numbers_memory.sql | 2 +- .../03033_recursive_cte_basic.reference | 2 +- .../0_stateless/03033_recursive_cte_basic.sql | 2 +- .../03033_with_fill_interpolate.sql | 2 +- .../0_stateless/03034_normalized_ast.sql | 2 +- .../0_stateless/03034_recursive_cte_tree.sql | 2 +- ...3034_recursive_cte_tree_fuzz_crash_fix.sql | 2 +- ...34_recursive_cte_tree_merge_tree.reference | 2 +- .../03034_recursive_cte_tree_merge_tree.sql | 2 +- .../03035_alias_column_bug_distributed.sql | 2 +- .../03035_internal_functions_direct_call.sql | 2 +- .../03035_recursive_cte_postgres_1.reference | 2 +- .../03035_recursive_cte_postgres_1.sql | 2 +- ..._join_filter_push_down_equivalent_sets.sql | 2 +- .../03036_recursive_cte_postgres_2.reference | 2 +- .../03036_recursive_cte_postgres_2.sql | 2 +- .../0_stateless/03036_with_numbers.sql | 2 +- .../03037_recursive_cte_postgres_3.reference | 2 +- .../03037_recursive_cte_postgres_3.sql | 2 +- .../queries/0_stateless/03037_union_view.sql | 4 +- .../0_stateless/03038_ambiguous_column.sql | 2 +- .../03038_recursive_cte_postgres_4.reference | 2 +- .../03038_recursive_cte_postgres_4.sql | 2 +- .../03039_recursive_cte_postgres_5.reference | 2 +- .../03039_recursive_cte_postgres_5.sql | 2 +- ...039_unknown_identifier_window_function.sql | 2 +- .../0_stateless/03040_alias_column_join.sql | 2 +- .../0_stateless/03040_array_sum_and_join.sql | 2 +- .../03040_recursive_cte_postgres_6.reference | 2 +- .../03040_recursive_cte_postgres_6.sql | 2 +- .../03041_analyzer_gigachad_join.sql | 2 +- .../03041_recursive_cte_postgres_7.reference | 2 +- .../03041_recursive_cte_postgres_7.sql | 2 +- .../03041_select_with_query_result.sql | 2 +- .../0_stateless/03042_analyzer_alias_join.sql | 2 +- .../0_stateless/03042_not_found_column_c1.sql | 2 +- .../03043_group_array_result_is_expected.sql | 2 +- .../0_stateless/03044_analyzer_alias_join.sql | 2 +- ...044_array_join_columns_in_nested_table.sql | 2 +- .../03045_analyzer_alias_join_with_if.sql | 2 +- ..._unknown_identifier_alias_substitution.sql | 2 +- .../03046_column_in_block_array_join.sql | 2 +- .../0_stateless/03047_analyzer_alias_join.sql | 2 +- ..._group_by_field_identified_aggregation.sql | 2 +- .../03048_not_found_column_xxx_in_block.sql | 2 +- .../03049_analyzer_group_by_alias.sql | 2 +- ...unknown_identifier_materialized_column.sql | 2 +- .../0_stateless/03050_select_one_one_one.sql | 2 +- tests/queries/0_stateless/03051_many_ctes.sql | 2 +- .../03052_query_hash_includes_aliases.sql | 2 +- .../0_stateless/03053_analyzer_join_alias.sql | 2 +- .../0_stateless/03054_analyzer_join_alias.sql | 2 +- .../03055_analyzer_subquery_group_array.sql | 2 +- .../03057_analyzer_subquery_alias_join.sql | 2 +- .../03058_analyzer_ambiguous_columns.sql | 3 +- ...59_analyzer_join_engine_missing_column.sql | 2 +- .../03060_analyzer_regular_view_alias.sql | 2 +- ...61_analyzer_alias_as_right_key_in_join.sql | 2 +- ...62_analyzer_join_engine_missing_column.sql | 2 +- ...lyzer_multi_join_wrong_table_specifier.sql | 3 +- .../03064_analyzer_named_subqueries.sql | 2 +- ...065_analyzer_cross_join_and_array_join.sql | 2 +- .../03066_analyzer_global_with_statement.sql | 2 +- .../03067_analyzer_complex_alias_join.sql | 2 +- .../03068_analyzer_distributed_join.sql | 2 +- ...3069_analyzer_with_alias_in_array_join.sql | 2 +- .../03070_analyzer_CTE_scalar_as_numbers.sql | 2 +- ...array_join_forbid_non_existing_columns.sql | 2 +- .../03071_fix_short_circuit_logic.sql | 2 +- ...analyzer_missing_columns_from_subquery.sql | 2 +- .../03073_analyzer_alias_as_column_name.sql | 2 +- .../03074_analyzer_alias_column_in_view.sql | 2 +- .../03075_analyzer_subquery_alias.sql | 2 +- .../03076_analyzer_multiple_joins_alias.sql | 3 +- ...analyzer_multi_scalar_subquery_aliases.sql | 2 +- ...analyzer_multi_scalar_subquery_aliases.sql | 2 +- ...lyzer_numeric_literals_as_column_names.sql | 2 +- ..._column_name_to_alias__virtual_columns.sql | 2 +- .../03080_incorrect_join_with_merge.sql | 2 +- .../03081_analyzer_agg_func_CTE.sql | 2 +- ...3082_analyzer_left_join_correct_column.sql | 2 +- .../03084_analyzer_join_column_alias.sql | 2 +- .../03085_analyzer_alias_column_group_by.sql | 2 +- ..._analyzer_window_func_part_of_group_by.sql | 2 +- .../03087_analyzer_subquery_with_alias.sql | 2 +- ...8_analyzer_ambiguous_column_multi_call.sql | 2 +- .../03089_analyzer_alias_replacement.sql | 2 +- ...090_analyzer_multiple_using_statements.sql | 2 +- ...same_table_name_in_different_databases.sql | 2 +- ...same_table_name_in_different_databases.sql | 2 +- .../03093_analyzer_column_alias.sql | 2 +- .../0_stateless/03093_analyzer_miel_test.sql | 2 +- .../03093_bug37909_query_does_not_finish.sql | 2 +- .../03094_analyzer_fiddle_multiif.sql | 2 +- .../03094_named_tuple_bug24607.sql | 2 +- .../0_stateless/03094_one_thousand_joins.sql | 2 +- .../03095_window_functions_qualify.sql | 2 +- ..._text_log_format_string_args_not_empty.sql | 2 +- .../03097_query_log_join_processes.sql | 2 +- .../03098_prefer_column_to_alias_subquery.sql | 4 +- .../0_stateless/03099_analyzer_multi_join.sql | 2 +- .../03100_analyzer_constants_in_multiif.sql | 2 +- .../03101_analyzer_identifiers_1.sql | 2 +- .../03101_analyzer_identifiers_2.sql | 2 +- .../03101_analyzer_identifiers_3.sql | 2 +- .../03101_analyzer_identifiers_4.sql | 2 +- .../03101_analyzer_invalid_join_on.sql | 14 +- .../03102_prefer_column_name_to_alias.sql | 2 +- .../03103_positional_arguments.sql | 2 +- .../0_stateless/03104_create_view_join.sql | 8 +- ...ill_formed_select_in_materialized_view.sql | 4 +- .../0_stateless/03108_describe_union_all.sql | 4 +- .../queries/0_stateless/03109_ast_too_big.sql | 4 +- .../0_stateless/03110_unicode_alias.sql | 2 +- .../0_stateless/03111_inner_join_group_by.sql | 4 +- ...112_analyzer_not_found_column_in_block.sql | 4 +- ...3_analyzer_not_found_column_in_block_2.sql | 6 +- .../03114_analyzer_cte_with_join.sql | 2 +- .../0_stateless/03115_alias_exists_column.sql | 2 +- ...analyzer_explicit_alias_as_column_name.sql | 2 +- ...3117_analyzer_same_column_name_as_func.sql | 2 +- .../03118_analyzer_multi_join_prewhere.sql | 2 +- ..._analyzer_window_function_in_CTE_alias.sql | 2 +- .../0_stateless/03120_analyzer_dist_join.sql | 2 +- .../03120_analyzer_param_in_CTE_alias.sql | 2 +- ...nalyzer_filed_redefenition_in_subquery.sql | 2 +- ...22_analyzer_collate_in_window_function.sql | 2 +- .../03123_analyzer_dist_join_CTE.sql | 2 +- .../03124_analyzer_nested_CTE_dist_in.sql | 2 +- .../03125_analyzer_CTE_two_joins.sql | 2 +- .../03126_column_not_under_group_by.sql | 3 +- .../0_stateless/03129_cte_with_final.sql | 2 +- .../03130_analyzer_self_join_group_by.sql | 2 +- ...03130_convert_outer_join_to_inner_join.sql | 2 +- ...gregate_function_with_if_implicit_cast.sql | 2 +- .../0_stateless/03132_sqlancer_union_all.sql | 2 +- .../0_stateless/03142_untuple_crash.sql | 2 +- .../03142_window_function_limit_by.sql | 7 +- tests/queries/0_stateless/03143_cte_scope.sql | 2 +- .../03143_group_by_constant_secondary.sql | 2 +- .../03143_parallel_replicas_mat_view_bug.sql | 2 +- ..._aggregate_states_with_different_types.sql | 2 +- .../0_stateless/03144_invalid_filter.sql | 2 +- tests/queries/0_stateless/03146_bug47862.sql | 2 +- .../0_stateless/03146_tpc_ds_grouping.sql | 2 +- .../03148_query_log_used_dictionaries.sql | 8 +- ..._streams_to_max_threads_ratio_overflow.sql | 4 +- ...03150_grouping_sets_use_nulls_pushdown.sql | 6 +- ...lyzer_view_read_only_necessary_columns.sql | 2 +- ...in_filter_push_down_equivalent_columns.sql | 2 +- .../03154_recursive_cte_distributed.sql | 2 +- .../03155_analyzer_interpolate.sql | 2 +- .../03155_in_nested_subselects.sql | 8 +- .../0_stateless/03161_cnf_reduction.reference | 4 +- .../0_stateless/03161_cnf_reduction.sql | 12 +- .../03164_analyzer_global_in_alias.sql | 2 +- .../03164_early_constant_folding_analyzer.sql | 2 +- .../03164_materialize_skip_index.sql | 2 +- .../03165_order_by_duplicate.reference | 2 +- .../0_stateless/03165_order_by_duplicate.sql | 2 +- ...03166_mv_prewhere_duplicating_name_bug.sql | 4 +- .../03166_skip_indexes_vertical_merge_1.sql | 2 +- .../03167_parametrized_view_with_cte.sql | 2 +- ..._injective_functions_inside_uniq_crash.sql | 4 +- .../03170_part_offset_as_table_column.sql | 4 +- .../0_stateless/03171_condition_pushdown.sql | 2 +- .../03171_function_to_subcolumns_fuzzer.sql | 2 +- .../0_stateless/03173_forbid_qualify.sql | 4 +- .../03173_parallel_replicas_join_bug.sh | 2 +- .../0_stateless/03174_merge_join_bug.sql | 2 +- .../03199_join_with_materialized_column.sql | 2 +- .../03199_queries_with_new_analyzer.sql | 3 +- .../03200_memory_engine_alter_dynamic.sql | 5 +- .../03200_subcolumns_join_use_nulls.sql | 2 +- .../03201_sumIf_to_countIf_return_type.sql | 2 +- .../00081_group_by_without_key_and_totals.sql | 3 +- .../00172_early_constant_folding.sql | 4 +- .../00173_group_by_use_nulls.reference | 2 +- .../1_stateful/00173_group_by_use_nulls.sql | 2 +- 668 files changed, 1903 insertions(+), 1667 deletions(-) diff --git a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml index d5b876a4c85..476464e9cc2 100644 --- a/docker/test/fuzzer/query-fuzzer-tweaks-users.xml +++ b/docker/test/fuzzer/query-fuzzer-tweaks-users.xml @@ -28,9 +28,9 @@
- + - + diff --git a/docker/test/stateless/stress_tests.lib b/docker/test/stateless/stress_tests.lib index 36782101fa7..73e0376d95a 100644 --- a/docker/test/stateless/stress_tests.lib +++ b/docker/test/stateless/stress_tests.lib @@ -139,9 +139,9 @@ EOL - + - + diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index 6428c0e90d5..c5d13ab63a5 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -118,7 +118,7 @@ And the result of interpreting the `INSERT SELECT` query is a "completed" `Query `InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are performed. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted into separate classes to allow for modular transformations of the query. -To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `allow_experimental_analyzer` flag. +To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `enable_analyzer` flag. ## Functions {#functions} diff --git a/docs/en/operations/analyzer.md b/docs/en/operations/analyzer.md index 298c6dacd06..c9b3c30d30d 100644 --- a/docs/en/operations/analyzer.md +++ b/docs/en/operations/analyzer.md @@ -123,7 +123,7 @@ To ensure consistent and expected results, especially when migrating old queries In the new version of the analyzer, the rules for determining the common supertype for columns specified in the `USING` clause have been standardized to produce more predictable outcomes, especially when dealing with type modifiers like `LowCardinality` and `Nullable`. - `LowCardinality(T)` and `T`: When a column of type `LowCardinality(T)` is joined with a column of type `T`, the resulting common supertype will be `T`, effectively discarding the `LowCardinality` modifier. - + - `Nullable(T)` and `T`: When a column of type `Nullable(T)` is joined with a column of type `T`, the resulting common supertype will be `Nullable(T)`, ensuring that the nullable property is preserved. **Example:** @@ -144,7 +144,7 @@ During projection names computation, aliases are not substituted. SELECT 1 + 1 AS x, x + 1 -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 FORMAT PrettyCompact ┌─x─┬─plus(plus(1, 1), 1)─┠@@ -154,7 +154,7 @@ FORMAT PrettyCompact SELECT 1 + 1 AS x, x + 1 -SETTINGS allow_experimental_analyzer = 1 +SETTINGS enable_analyzer = 1 FORMAT PrettyCompact ┌─x─┬─plus(x, 1)─┠@@ -177,7 +177,7 @@ SELECT toTypeName(if(0, [2, 3, 4], 'String')) ### Heterogeneous clusters -The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `allow_experimental_analyzer` setting values. +The new analyzer significantly changed the communication protocol between servers in the cluster. Thus, it's impossible to run distributed queries on servers with different `enable_analyzer` setting values. ### Mutations are interpreted by previous analyzer diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 143ce836beb..35547c3a9a6 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4051,7 +4051,7 @@ Rewrite aggregate functions with if expression as argument when logically equiva For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, col)`. It may improve performance. :::note -Supported only with experimental analyzer (`allow_experimental_analyzer = 1`). +Supported only with experimental analyzer (`enable_analyzer = 1`). ::: ## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec} diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index 575799cccc4..0701c8f4a51 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -115,7 +115,7 @@ ClickHouse — Ð¿Ð¾Ð»Ð½Ð¾Ñ†ÐµÐ½Ð½Ð°Ñ ÑÑ‚Ð¾Ð»Ð±Ñ†Ð¾Ð²Ð°Ñ Ð¡Ð£Ð‘Ð”. Данны `InterpreterSelectQuery` иÑпользует `ExpressionAnalyzer` и `ExpressionActions` механизмы Ð´Ð»Ñ Ð°Ð½Ð°Ð»Ð¸Ð·Ð° запроÑов и преобразований. Именно здеÑÑŒ выполнÑетÑÑ Ð±Ð¾Ð»ÑŒÑˆÐ¸Ð½Ñтво оптимизаций запроÑов на оÑнове правил. `ExpressionAnalyzer` напиÑан довольно грÑзно и должен быть перепиÑан: различные Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñов и оптимизации должны быть извлечены в отдельные клаÑÑÑ‹, чтобы позволить модульные Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¸Ð»Ð¸ запроÑÑ‹. -Ð”Ð»Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ñ‚ÐµÐºÑƒÑ‰Ð¸Ñ… проблем, ÑущеÑтвующих в интерпретаторах, разрабатываетÑÑ Ð½Ð¾Ð²Ñ‹Ð¹ `InterpreterSelectQueryAnalyzer`. Это Ð½Ð¾Ð²Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ `InterpreterSelectQuery`, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð½Ðµ иÑпользует `ExpressionAnalyzer` и вводит дополнительный уровень абÑтракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к иÑпользованию в продакшене, но его можно протеÑтировать Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ флага `allow_experimental_analyzer`. +Ð”Ð»Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ñ‚ÐµÐºÑƒÑ‰Ð¸Ñ… проблем, ÑущеÑтвующих в интерпретаторах, разрабатываетÑÑ Ð½Ð¾Ð²Ñ‹Ð¹ `InterpreterSelectQueryAnalyzer`. Это Ð½Ð¾Ð²Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ `InterpreterSelectQuery`, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð½Ðµ иÑпользует `ExpressionAnalyzer` и вводит дополнительный уровень абÑтракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к иÑпользованию в продакшене, но его можно протеÑтировать Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ флага `enable_analyzer`. ## Функции {#functions} diff --git a/src/Client/HedgedConnections.cpp b/src/Client/HedgedConnections.cpp index dd8348ea04f..1c7f222aa78 100644 --- a/src/Client/HedgedConnections.cpp +++ b/src/Client/HedgedConnections.cpp @@ -196,11 +196,11 @@ void HedgedConnections::sendQuery( modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset; } - /// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting. + /// FIXME: Remove once we will make `enable_analyzer` obsolete setting. /// Make the analyzer being set, so it will be effectively applied on the remote server. /// In other words, the initiator always controls whether the analyzer enabled or not for /// all servers involved in the distributed query processing. - modified_settings.set("allow_experimental_analyzer", static_cast(modified_settings.allow_experimental_analyzer)); + modified_settings.set("enable_analyzer", static_cast(modified_settings.enable_analyzer)); replica.connection->sendQuery( timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {}); diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index 244eccf1ed9..7ca22ae4c81 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -150,11 +150,11 @@ void MultiplexedConnections::sendQuery( client_info.number_of_current_replica = replica_info->number_of_current_replica; } - /// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting. + /// FIXME: Remove once we will make `enable_analyzer` obsolete setting. /// Make the analyzer being set, so it will be effectively applied on the remote server. /// In other words, the initiator always controls whether the analyzer enabled or not for /// all servers involved in the distributed query processing. - modified_settings.set("allow_experimental_analyzer", static_cast(modified_settings.allow_experimental_analyzer)); + modified_settings.set("enable_analyzer", static_cast(modified_settings.enable_analyzer)); const bool enable_offset_parallel_processing = context->canUseOffsetParallelReplicas(); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0d498ce7699..d6c0dc223b2 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -638,7 +638,7 @@ class IColumn; M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \ M(Bool, enable_global_with_statement, true, "Propagate WITH statements to UNION queries and all subqueries", 0) \ M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \ - M(Bool, optimize_syntax_fuse_functions, false, "Allow apply fuse aggregating function. Available only with `allow_experimental_analyzer`", 0) \ + M(Bool, optimize_syntax_fuse_functions, false, "Allow apply fuse aggregating function. Available only with `enable_analyzer`", 0) \ M(Bool, flatten_nested, true, "If true, columns of type Nested will be flatten to separate array columns instead of one array of tuples", 0) \ M(Bool, asterisk_include_materialized_columns, false, "Include MATERIALIZED columns for wildcard query", 0) \ M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \ @@ -943,8 +943,7 @@ class IColumn; \ M(Bool, allow_experimental_join_condition, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y.", 0) \ \ - /* Analyzer: It's not experimental anymore (WIP) */ \ - M(Bool, allow_experimental_analyzer, true, "Allow new query analyzer.", IMPORTANT) \ + M(Bool, enable_analyzer, true, "Allow new query analyzer.", IMPORTANT) ALIAS(allow_experimental_analyzer) \ M(Bool, analyzer_compatibility_join_using_top_level_identifier, false, "Force to resolve identifier in JOIN USING from projection (for example, in `SELECT a + 1 AS b FROM t1 JOIN t2 USING (b)` join will be performed by `t1.a + 1 = t2.b`, rather then `t1.b = t2.b`).", 0) \ \ M(Bool, allow_experimental_live_view, false, "Enable LIVE VIEW. Not mature enough.", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 5b94391bade..08fb6dc3301 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -500,6 +500,265 @@ static std::initializer_list col >= '2023-01-01' AND col <= '2023-12-31')"}, + {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, + {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, + {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, + {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, + {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, + {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, + }}, + {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, + {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, + {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, + {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, + {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, + {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, + {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, + {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, + {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, + {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, + {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, + {"enable_vertical_final", false, true, "Use vertical final by default"}, + {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, + {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, + {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, + {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, + {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, + {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, + {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, + {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, + {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, + {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, + {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, + {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, + {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, + {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, + {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, + {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, + {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, + {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, + {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, + {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, + {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, + {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, + {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, + {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, + {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, + {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, + {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, + {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, + {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, + {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, + {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, + {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, + {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, + {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, + {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, + {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, + {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, + {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, + {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, + {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, + {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, + {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, + {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, + {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, + {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, + {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, + {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, + {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, + {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, + {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, + {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, + {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, + {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, + {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, + {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, + {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, + {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, + {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, + {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, + {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, + {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, + {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, + {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, + {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, + {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, + {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, + {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, + {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, + {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, + {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, + {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, + {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, + {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, + {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, + {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, + {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, + {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, + {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, + {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, + {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, + {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, + {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, + {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, + {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, + {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, + {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, + {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, + {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, + {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, + {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, + {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, + {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, + {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, }; diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index e1b7e92ee5d..b8d70e5d5dd 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -1411,7 +1411,7 @@ FutureSetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool return {}; PreparedSets::Hash set_key; - if (data.getContext()->getSettingsRef().allow_experimental_analyzer && !identifier) + if (data.getContext()->getSettingsRef().enable_analyzer && !identifier) { /// Here we can be only from mutation interpreter. Normal selects with analyzed use other interpreter. /// This is a hacky way to allow reusing cache for prepared sets. diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index e35d31d2350..0948f24eca0 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -68,7 +68,7 @@ ASTPtr rewriteSelectQuery( // are written into the query context and will be sent by the query pipeline. select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, {}); - if (!context->getSettingsRef().allow_experimental_analyzer) + if (!context->getSettingsRef().enable_analyzer) { if (table_function_ptr) select_query.addTableFunction(table_function_ptr); @@ -165,7 +165,7 @@ void SelectStreamFactory::createForShardImpl( auto emplace_remote_stream = [&](bool lazy = false, time_t local_delay = 0) { Block shard_header; - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) shard_header = InterpreterSelectQueryAnalyzer::getSampleBlock(query_tree, context, SelectQueryOptions(processed_stage).analyze()); else shard_header = header; diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index d04a73e384e..6c8ab11bfc9 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -300,7 +300,7 @@ void executeQuery( const size_t shards = cluster->getShardCount(); - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { for (size_t i = 0, s = cluster->getShardsInfo().size(); i < s; ++i) { @@ -581,7 +581,7 @@ void executeQueryWithParallelReplicasCustomKey( /// Return directly (with correct header) if no shard to query. if (query_info.getCluster()->getShardsInfo().empty()) { - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) return; Pipe pipe(std::make_shared(header)); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 971f90bd3cd..ea631ef01d5 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -834,7 +834,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti Block as_select_sample; - if (getContext()->getSettingsRef().allow_experimental_analyzer) + if (getContext()->getSettingsRef().enable_analyzer) { as_select_sample = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); } @@ -1327,7 +1327,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) { Block input_block; - if (getContext()->getSettingsRef().allow_experimental_analyzer) + if (getContext()->getSettingsRef().enable_analyzer) { input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); } diff --git a/src/Interpreters/InterpreterDescribeQuery.cpp b/src/Interpreters/InterpreterDescribeQuery.cpp index 39fc85a5e23..4a061f02c2b 100644 --- a/src/Interpreters/InterpreterDescribeQuery.cpp +++ b/src/Interpreters/InterpreterDescribeQuery.cpp @@ -129,7 +129,7 @@ void InterpreterDescribeQuery::fillColumnsFromSubquery(const ASTTableExpression auto select_query = table_expression.subquery->children.at(0); auto current_context = getContext(); - if (settings.allow_experimental_analyzer) + if (settings.enable_analyzer) { SelectQueryOptions select_query_options; sample_block = InterpreterSelectQueryAnalyzer(select_query, current_context, select_query_options).getSampleBlock(); diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index bedd9cb4a80..2fbfbf3a809 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -394,9 +394,9 @@ QueryPipeline InterpreterExplainQuery::executeImpl() } case ASTExplainQuery::QueryTree: { - if (!getContext()->getSettingsRef().allow_experimental_analyzer) + if (!getContext()->getSettingsRef().enable_analyzer) throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "EXPLAIN QUERY TREE is only supported with a new analyzer. Set allow_experimental_analyzer = 1."); + "EXPLAIN QUERY TREE is only supported with a new analyzer. Set enable_analyzer = 1."); if (ast.getExplainedQuery()->as() == nullptr) throw Exception(ErrorCodes::INCORRECT_QUERY, "Only SELECT is supported for EXPLAIN QUERY TREE query"); @@ -453,7 +453,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl() ContextPtr context; - if (getContext()->getSettingsRef().allow_experimental_analyzer) + if (getContext()->getSettingsRef().enable_analyzer) { InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), options); context = interpreter.getContext(); @@ -499,7 +499,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl() QueryPlan plan; ContextPtr context; - if (getContext()->getSettingsRef().allow_experimental_analyzer) + if (getContext()->getSettingsRef().enable_analyzer) { InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), options); context = interpreter.getContext(); @@ -558,7 +558,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl() QueryPlan plan; ContextPtr context = getContext(); - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), SelectQueryOptions()); context = interpreter.getContext(); diff --git a/src/Interpreters/InterpreterFactory.cpp b/src/Interpreters/InterpreterFactory.cpp index 12b3b510098..a909c4e602d 100644 --- a/src/Interpreters/InterpreterFactory.cpp +++ b/src/Interpreters/InterpreterFactory.cpp @@ -118,7 +118,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte if (query->as()) { - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) interpreter_name = "InterpreterSelectQueryAnalyzer"; /// This is internal part of ASTSelectWithUnionQuery. /// Even if there is SELECT without union, it is represented by ASTSelectWithUnionQuery with single ASTSelectQuery as a child. @@ -129,7 +129,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte { ProfileEvents::increment(ProfileEvents::SelectQuery); - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) interpreter_name = "InterpreterSelectQueryAnalyzer"; else interpreter_name = "InterpreterSelectWithUnionQuery"; @@ -222,7 +222,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte { const auto kind = query->as()->getKind(); if (kind == ASTExplainQuery::ParsedAST || kind == ASTExplainQuery::AnalyzedSyntax) - context->setSetting("allow_experimental_analyzer", false); + context->setSetting("enable_analyzer", false); interpreter_name = "InterpreterExplainQuery"; } diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index c97593a1781..0213e2a2c42 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -95,7 +95,7 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query) Block header_block; auto select_query_options = SelectQueryOptions(QueryProcessingStage::Complete, 1); - if (current_context->getSettingsRef().allow_experimental_analyzer) + if (current_context->getSettingsRef().enable_analyzer) { InterpreterSelectQueryAnalyzer interpreter_select(query.select, current_context, select_query_options); header_block = interpreter_select.getSampleBlock(); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 57ad5caa4c7..c049dbc9cc1 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -189,7 +189,7 @@ bool isStorageTouchedByMutations( std::optional interpreter_select_query; BlockIO io; - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { auto select_query_tree = prepareQueryAffectedQueryTree(commands, storage.shared_from_this(), context); InterpreterSelectQueryAnalyzer interpreter(select_query_tree, context, SelectQueryOptions().ignoreLimits()); @@ -415,9 +415,9 @@ MutationsInterpreter::MutationsInterpreter( , logger(getLogger("MutationsInterpreter(" + source.getStorage()->getStorageID().getFullTableName() + ")")) { auto new_context = Context::createCopy(context_); - if (new_context->getSettingsRef().allow_experimental_analyzer) + if (new_context->getSettingsRef().enable_analyzer) { - new_context->setSetting("allow_experimental_analyzer", false); + new_context->setSetting("enable_analyzer", false); LOG_DEBUG(logger, "Will use old analyzer to prepare mutation"); } context = std::move(new_context); diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index ce58f7f922c..7476915ab8a 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -685,10 +685,10 @@ void validateAnalyzerSettings(ASTPtr ast, bool context_value) if (auto * set_query = node->as()) { - if (auto * value = set_query->changes.tryGet("allow_experimental_analyzer")) + if (auto * value = set_query->changes.tryGet("enable_analyzer")) { if (top_level != value->safeGet()) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'allow_experimental_analyzer' is changed in the subquery. Top level value: {}", top_level); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'enable_analyzer' is changed in the subquery. Top level value: {}", top_level); } } @@ -912,7 +912,7 @@ static std::tuple executeQueryImpl( /// Interpret SETTINGS clauses as early as possible (before invoking the corresponding interpreter), /// to allow settings to take effect. InterpreterSetQuery::applySettingsFromQuery(ast, context); - validateAnalyzerSettings(ast, context->getSettingsRef().allow_experimental_analyzer); + validateAnalyzerSettings(ast, context->getSettingsRef().enable_analyzer); if (auto * insert_query = ast->as()) insert_query->tail = istr; diff --git a/src/Interpreters/getHeaderForProcessingStage.cpp b/src/Interpreters/getHeaderForProcessingStage.cpp index cf18cbbb54a..c4a791e85e1 100644 --- a/src/Interpreters/getHeaderForProcessingStage.cpp +++ b/src/Interpreters/getHeaderForProcessingStage.cpp @@ -141,7 +141,7 @@ Block getHeaderForProcessingStage( Block result; - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { auto storage = std::make_shared(storage_snapshot->storage.getStorageID(), storage_snapshot->getAllColumnsDescription(), diff --git a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp index d8624a1c99b..dc4b7fd733b 100644 --- a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp +++ b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp @@ -65,7 +65,7 @@ std::unique_ptr createLocalPlan( .setShardInfo(static_cast(shard_num), static_cast(shard_count)) .ignoreASTOptimizations(); - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { /// For Analyzer, identifier in GROUP BY/ORDER BY/LIMIT BY lists has been resolved to /// ConstantNode in QueryTree if it is an alias of a constant, so we should not replace diff --git a/src/Processors/Transforms/buildPushingToViewsChain.cpp b/src/Processors/Transforms/buildPushingToViewsChain.cpp index 98d66ed77c3..a2d5ec5d1cb 100644 --- a/src/Processors/Transforms/buildPushingToViewsChain.cpp +++ b/src/Processors/Transforms/buildPushingToViewsChain.cpp @@ -319,7 +319,7 @@ std::optional generateViewChain( Block header; /// Get list of columns we get from select query. - if (select_context->getSettingsRef().allow_experimental_analyzer) + if (select_context->getSettingsRef().enable_analyzer) header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context); else header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock(); @@ -613,7 +613,7 @@ static QueryPipeline process(Block block, ViewRuntimeData & view, const ViewsDat QueryPipelineBuilder pipeline; - if (local_context->getSettingsRef().allow_experimental_analyzer) + if (local_context->getSettingsRef().enable_analyzer) { InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions().ignoreAccessCheck()); pipeline = interpreter.buildQueryPipeline(); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index c5dfe3e6e5f..4262716b406 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1904,14 +1904,14 @@ void TCPHandler::receiveQuery() /// Settings /// - /// FIXME: Remove when allow_experimental_analyzer will become obsolete. + /// FIXME: Remove when enable_analyzer will become obsolete. /// Analyzer became Beta in 24.3 and started to be enabled by default. /// We have to disable it for ourselves to make sure we don't have different settings on /// different servers. if (query_kind == ClientInfo::QueryKind::SECONDARY_QUERY && client_info.getVersionNumber() < VersionNumber(23, 3, 0) - && !passed_settings.allow_experimental_analyzer.changed) - passed_settings.set("allow_experimental_analyzer", false); + && !passed_settings.enable_analyzer.changed) + passed_settings.set("enable_analyzer", false); auto settings_changes = passed_settings.changes(); query_kind = query_context->getClientInfo().query_kind; diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 7891042bb96..2843ff5a14e 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -806,7 +806,7 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) metadata.select = SelectQueryDescription::getSelectQueryFromASTForMatView(select, metadata.refresh != nullptr, context); Block as_select_sample; - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { as_select_sample = InterpreterSelectQueryAnalyzer::getSampleBlock(select->clone(), context); } diff --git a/src/Storages/IStorageCluster.cpp b/src/Storages/IStorageCluster.cpp index 63467603d16..b485ab9cbb5 100644 --- a/src/Storages/IStorageCluster.cpp +++ b/src/Storages/IStorageCluster.cpp @@ -125,7 +125,7 @@ void IStorageCluster::read( Block sample_block; ASTPtr query_to_send = query_info.query; - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(query_info.query, context, SelectQueryOptions(processed_stage)); } diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index 71b1a0a73c9..c93da7ca512 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -378,7 +378,7 @@ void StorageLiveView::writeBlock(StorageLiveView & live_view, Block && block, Ch QueryPipelineBuilder builder; - if (local_context->getSettingsRef().allow_experimental_analyzer) + if (local_context->getSettingsRef().enable_analyzer) { auto select_description = buildSelectQueryTreeDescription(select_query_description.inner_query, local_context); if (select_description.dependent_table_node) @@ -475,7 +475,7 @@ Block StorageLiveView::getHeader() const if (!sample_block) { - if (live_view_context->getSettingsRef().allow_experimental_analyzer) + if (live_view_context->getSettingsRef().enable_analyzer) { sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(select_query_description.select_query, live_view_context, @@ -519,7 +519,7 @@ ASTPtr StorageLiveView::getInnerBlocksQuery() auto & select_with_union_query = select_query_description.select_query->as(); auto blocks_query = select_with_union_query.list_of_selects->children.at(0)->clone(); - if (!live_view_context->getSettingsRef().allow_experimental_analyzer) + if (!live_view_context->getSettingsRef().enable_analyzer) { /// Rewrite inner query with right aliases for JOIN. /// It cannot be done in constructor or startup() because InterpreterSelectQuery may access table, @@ -543,7 +543,7 @@ MergeableBlocksPtr StorageLiveView::collectMergeableBlocks(ContextPtr local_cont QueryPipelineBuilder builder; - if (local_context->getSettingsRef().allow_experimental_analyzer) + if (local_context->getSettingsRef().enable_analyzer) { InterpreterSelectQueryAnalyzer interpreter(select_query_description.inner_query, local_context, @@ -599,7 +599,7 @@ QueryPipelineBuilder StorageLiveView::completeQuery(Pipes pipes) QueryPipelineBuilder builder; - if (block_context->getSettingsRef().allow_experimental_analyzer) + if (block_context->getSettingsRef().enable_analyzer) { auto select_description = buildSelectQueryTreeDescription(select_query_description.select_query, block_context); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ce27ad24e10..01ef0a409b0 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -7097,7 +7097,7 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage( SelectQueryInfo &) const { /// with new analyzer, Planner make decision regarding parallel replicas usage, and so about processing stage on reading - if (!query_context->getSettingsRef().allow_experimental_analyzer) + if (!query_context->getSettingsRef().enable_analyzer) { const auto & settings = query_context->getSettingsRef(); if (query_context->canUseParallelReplicasCustomKey()) diff --git a/src/Storages/MergeTree/RPNBuilder.cpp b/src/Storages/MergeTree/RPNBuilder.cpp index 6e963066f39..fccb20c2b0a 100644 --- a/src/Storages/MergeTree/RPNBuilder.cpp +++ b/src/Storages/MergeTree/RPNBuilder.cpp @@ -33,7 +33,7 @@ namespace ErrorCodes namespace { -void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & out, bool allow_experimental_analyzer, bool legacy = false) +void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & out, bool enable_analyzer, bool legacy = false) { switch (node.type) { @@ -45,18 +45,18 @@ void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & o /// If it was created from ASTLiteral, then result_name can be an alias. /// We need to convert value back to string here. const auto * column_const = typeid_cast(node.column.get()); - if (column_const && !allow_experimental_analyzer) + if (column_const && !enable_analyzer) writeString(applyVisitor(FieldVisitorToString(), column_const->getField()), out); else writeString(node.result_name, out); break; } case ActionsDAG::ActionType::ALIAS: - appendColumnNameWithoutAlias(*node.children.front(), out, allow_experimental_analyzer, legacy); + appendColumnNameWithoutAlias(*node.children.front(), out, enable_analyzer, legacy); break; case ActionsDAG::ActionType::ARRAY_JOIN: writeCString("arrayJoin(", out); - appendColumnNameWithoutAlias(*node.children.front(), out, allow_experimental_analyzer, legacy); + appendColumnNameWithoutAlias(*node.children.front(), out, enable_analyzer, legacy); writeChar(')', out); break; case ActionsDAG::ActionType::FUNCTION: @@ -75,17 +75,17 @@ void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & o writeCString(", ", out); first = false; - appendColumnNameWithoutAlias(*arg, out, allow_experimental_analyzer, legacy); + appendColumnNameWithoutAlias(*arg, out, enable_analyzer, legacy); } writeChar(')', out); } } } -String getColumnNameWithoutAlias(const ActionsDAG::Node & node, bool allow_experimental_analyzer, bool legacy = false) +String getColumnNameWithoutAlias(const ActionsDAG::Node & node, bool enable_analyzer, bool legacy = false) { WriteBufferFromOwnString out; - appendColumnNameWithoutAlias(node, out, allow_experimental_analyzer, legacy); + appendColumnNameWithoutAlias(node, out, enable_analyzer, legacy); return std::move(out.str()); } @@ -131,7 +131,7 @@ std::string RPNBuilderTreeNode::getColumnName() const if (ast_node) return ast_node->getColumnNameWithoutAlias(); else - return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().allow_experimental_analyzer); + return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().enable_analyzer); } std::string RPNBuilderTreeNode::getColumnNameWithModuloLegacy() const @@ -144,7 +144,7 @@ std::string RPNBuilderTreeNode::getColumnNameWithModuloLegacy() const } else { - return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().allow_experimental_analyzer, true /*legacy*/); + return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().enable_analyzer, true /*legacy*/); } } diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 04e6d6676d1..da427ca4a6a 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -397,7 +397,7 @@ void StorageBuffer::read( /// TODO: Find a way to support projections for StorageBuffer if (processed_stage > QueryProcessingStage::FetchColumns) { - if (local_context->getSettingsRef().allow_experimental_analyzer) + if (local_context->getSettingsRef().enable_analyzer) { auto storage = std::make_shared( getStorageID(), diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 3e38ddf830a..f1fe70b4594 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -833,7 +833,7 @@ void StorageDistributed::read( const auto & settings = local_context->getSettingsRef(); - if (settings.allow_experimental_analyzer) + if (settings.enable_analyzer) { StorageID remote_storage_id = StorageID::createEmpty(); if (!remote_table_function_ptr) @@ -1057,7 +1057,7 @@ static std::optional getFilterFromQuery(const ASTPtr & ast, ContextP QueryPlan plan; SelectQueryOptions options; options.only_analyze = true; - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { InterpreterSelectQueryAnalyzer interpreter(ast, context, options); plan = std::move(interpreter).extractQueryPlan(); @@ -1611,7 +1611,7 @@ ClusterPtr StorageDistributed::skipUnusedShards( const StorageSnapshotPtr & storage_snapshot, ContextPtr local_context) const { - if (local_context->getSettingsRef().allow_experimental_analyzer) + if (local_context->getSettingsRef().enable_analyzer) return skipUnusedShardsWithAnalyzer(cluster, query_info, storage_snapshot, local_context); const auto & select = query_info.query->as(); diff --git a/src/Storages/StorageExecutable.cpp b/src/Storages/StorageExecutable.cpp index 0094723e3fd..27bfa6f854c 100644 --- a/src/Storages/StorageExecutable.cpp +++ b/src/Storages/StorageExecutable.cpp @@ -150,7 +150,7 @@ void StorageExecutable::read( for (auto & input_query : input_queries) { QueryPipelineBuilder builder; - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) builder = InterpreterSelectQueryAnalyzer(input_query, context, {}).buildQueryPipeline(); else builder = InterpreterSelectWithUnionQuery(input_query, context, {}).buildQueryPipeline(); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 7c268d36a7b..613317b2564 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -590,7 +590,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ auto modified_query_info = getModifiedQueryInfo(modified_context, table, nested_storage_snaphsot, real_column_names, column_names_as_aliases, aliases); - if (!context->getSettingsRef().allow_experimental_analyzer) + if (!context->getSettingsRef().enable_analyzer) { auto storage_columns = storage_metadata_snapshot->getColumns(); auto syntax_result = TreeRewriter(context).analyzeSelect( @@ -1047,13 +1047,13 @@ void ReadFromMerge::addVirtualColumns( const StorageWithLockAndName & storage_with_lock) const { const auto & [database_name, _, storage, table_name] = storage_with_lock; - bool allow_experimental_analyzer = context->getSettingsRef().allow_experimental_analyzer; + bool enable_analyzer = context->getSettingsRef().enable_analyzer; /// Add virtual columns if we don't already have them. Block plan_header = child.plan.getCurrentDataStream().header; - if (allow_experimental_analyzer) + if (enable_analyzer) { String table_alias = modified_query_info.query_tree->as()->getJoinTree()->as()->getAlias(); @@ -1133,8 +1133,8 @@ QueryPipelineBuilderPtr ReadFromMerge::buildPipeline( if (!builder->initialized()) return builder; - bool allow_experimental_analyzer = context->getSettingsRef().allow_experimental_analyzer; - if (processed_stage > child.stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns)) + bool enable_analyzer = context->getSettingsRef().enable_analyzer; + if (processed_stage > child.stage || (enable_analyzer && processed_stage != QueryProcessingStage::FetchColumns)) { /** Materialization is needed, since from distributed storage the constants come materialized. * If you do not do this, different types (Const and non-Const) columns will be produced in different threads, @@ -1168,7 +1168,7 @@ ReadFromMerge::ChildPlan ReadFromMerge::createPlanForTable( modified_select.setFinal(); } - bool allow_experimental_analyzer = modified_context->getSettingsRef().allow_experimental_analyzer; + bool enable_analyzer = modified_context->getSettingsRef().enable_analyzer; auto storage_stage = storage->getQueryProcessingStage(modified_context, processed_stage, @@ -1201,13 +1201,13 @@ ReadFromMerge::ChildPlan ReadFromMerge::createPlanForTable( row_policy_data_opt->addStorageFilter(source_step_with_filter); } } - else if (processed_stage > storage_stage || allow_experimental_analyzer) + else if (processed_stage > storage_stage || enable_analyzer) { /// Maximum permissible parallelism is streams_num modified_context->setSetting("max_threads", streams_num); modified_context->setSetting("max_streams_to_max_threads_ratio", 1); - if (allow_experimental_analyzer) + if (enable_analyzer) { /// Converting query to AST because types might be different in the source table. /// Need to resolve types again. @@ -1479,7 +1479,7 @@ void ReadFromMerge::convertAndFilterSourceStream( auto storage_sample_block = snapshot->metadata->getSampleBlock(); auto pipe_columns = before_block_header.getNamesAndTypesList(); - if (local_context->getSettingsRef().allow_experimental_analyzer) + if (local_context->getSettingsRef().enable_analyzer) { for (const auto & alias : aliases) { @@ -1522,7 +1522,7 @@ void ReadFromMerge::convertAndFilterSourceStream( ActionsDAG::MatchColumnsMode convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Name; - if (local_context->getSettingsRef().allow_experimental_analyzer + if (local_context->getSettingsRef().enable_analyzer && (child.stage != QueryProcessingStage::FetchColumns || dynamic_cast(&snapshot->storage) != nullptr)) convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Position; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index f55f672fe5e..ebc88993ee4 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -208,7 +208,7 @@ void StorageMergeTree::read( const auto & settings = local_context->getSettingsRef(); /// reading step for parallel replicas with new analyzer is built in Planner, so don't do it here if (local_context->canUseParallelReplicasOnInitiator() && settings.parallel_replicas_for_non_replicated_merge_tree - && !settings.allow_experimental_analyzer) + && !settings.enable_analyzer) { ClusterProxy::executeQueryWithParallelReplicas( query_plan, getStorageID(), processed_stage, query_info.query, local_context, query_info.storage_limits); @@ -216,7 +216,7 @@ void StorageMergeTree::read( } if (local_context->canUseParallelReplicasCustomKey() && settings.parallel_replicas_for_non_replicated_merge_tree - && !settings.allow_experimental_analyzer && local_context->getClientInfo().distributed_depth == 0) + && !settings.enable_analyzer && local_context->getClientInfo().distributed_depth == 0) { if (auto cluster = local_context->getClusterForParallelReplicas(); local_context->canUseParallelReplicasCustomKeyForCluster(*cluster)) @@ -244,7 +244,7 @@ void StorageMergeTree::read( const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree - && (!local_context->getSettingsRef().allow_experimental_analyzer || query_info.current_table_chosen_for_reading_with_parallel_replicas); + && (!local_context->getSettingsRef().enable_analyzer || query_info.current_table_chosen_for_reading_with_parallel_replicas); if (auto plan = reader.read( column_names, diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 2d826c6c2df..a3965e7a6d4 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5480,13 +5480,13 @@ void StorageReplicatedMergeTree::read( return; } /// reading step for parallel replicas with new analyzer is built in Planner, so don't do it here - if (local_context->canUseParallelReplicasOnInitiator() && !settings.allow_experimental_analyzer) + if (local_context->canUseParallelReplicasOnInitiator() && !settings.enable_analyzer) { readParallelReplicasImpl(query_plan, column_names, query_info, local_context, processed_stage); return; } - if (local_context->canUseParallelReplicasCustomKey() && !settings.allow_experimental_analyzer + if (local_context->canUseParallelReplicasCustomKey() && !settings.enable_analyzer && local_context->getClientInfo().distributed_depth == 0) { if (auto cluster = local_context->getClusterForParallelReplicas(); @@ -5555,7 +5555,7 @@ void StorageReplicatedMergeTree::readLocalImpl( const size_t num_streams) { const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() - && (!local_context->getSettingsRef().allow_experimental_analyzer + && (!local_context->getSettingsRef().enable_analyzer || query_info.current_table_chosen_for_reading_with_parallel_replicas); auto plan = reader.read( diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 878998ebf12..dcb5ef2ae77 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -164,7 +164,7 @@ void StorageView::read( auto options = SelectQueryOptions(QueryProcessingStage::Complete, 0, false, query_info.settings_limit_offset_done); - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context, storage_snapshot), options, column_names); interpreter.addStorageLimits(*query_info.storage_limits); diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index d674f054632..16eccfd7343 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -172,7 +172,7 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType /// with subqueries it's possible that new analyzer will be enabled in ::read method /// of underlying storage when all other parts of infra are not ready for it /// (built with old analyzer). - context_copy->setSetting("allow_experimental_analyzer", false); + context_copy->setSetting("enable_analyzer", false); auto syntax_analyzer_result = TreeRewriter(context_copy).analyze(ast, columns); ExpressionAnalyzer analyzer(ast, syntax_analyzer_result, context_copy); auto dag = analyzer.getActionsDAG(false); diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 65bf6768b1b..a2b1704f24b 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1197,7 +1197,7 @@ StorageWindowView::StorageWindowView( , fire_signal_timeout_s(context_->getSettingsRef().wait_for_window_view_fire_signal_timeout.totalSeconds()) , clean_interval_usec(context_->getSettingsRef().window_view_clean_interval.totalMicroseconds()) { - if (context_->getSettingsRef().allow_experimental_analyzer) + if (context_->getSettingsRef().enable_analyzer) disabled_due_to_analyzer = true; if (mode <= LoadingStrictnessLevel::CREATE) @@ -1753,9 +1753,9 @@ StoragePtr StorageWindowView::getTargetTable() const void StorageWindowView::throwIfWindowViewIsDisabled(ContextPtr local_context) const { - if (disabled_due_to_analyzer || (local_context && local_context->getSettingsRef().allow_experimental_analyzer)) + if (disabled_due_to_analyzer || (local_context && local_context->getSettingsRef().enable_analyzer)) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Experimental WINDOW VIEW feature is not supported " - "in the current infrastructure for query analysis (the setting 'allow_experimental_analyzer')"); + "in the current infrastructure for query analysis (the setting 'enable_analyzer')"); } void registerStorageWindowView(StorageFactory & factory) diff --git a/src/TableFunctions/TableFunctionView.cpp b/src/TableFunctions/TableFunctionView.cpp index 57501df6d4d..02a278cf590 100644 --- a/src/TableFunctions/TableFunctionView.cpp +++ b/src/TableFunctions/TableFunctionView.cpp @@ -50,7 +50,7 @@ ColumnsDescription TableFunctionView::getActualTableStructure(ContextPtr context Block sample_block; - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.children[0], context); else sample_block = InterpreterSelectWithUnionQuery::getSampleBlock(create.children[0], context); diff --git a/src/TableFunctions/TableFunctionViewIfPermitted.cpp b/src/TableFunctions/TableFunctionViewIfPermitted.cpp index 935be6c1987..7bae2731525 100644 --- a/src/TableFunctions/TableFunctionViewIfPermitted.cpp +++ b/src/TableFunctions/TableFunctionViewIfPermitted.cpp @@ -114,7 +114,7 @@ bool TableFunctionViewIfPermitted::isPermitted(const ContextPtr & context, const try { - if (context->getSettingsRef().allow_experimental_analyzer) + if (context->getSettingsRef().enable_analyzer) { sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.children[0], context); } diff --git a/tests/config/users.d/analyzer.xml b/tests/config/users.d/analyzer.xml index 4b9764526fa..edba8b8578e 100644 --- a/tests/config/users.d/analyzer.xml +++ b/tests/config/users.d/analyzer.xml @@ -1,7 +1,7 @@ - 0 + 0 diff --git a/tests/integration/helpers/0_common_enable_old_analyzer.xml b/tests/integration/helpers/0_common_enable_old_analyzer.xml index 4b9764526fa..edba8b8578e 100644 --- a/tests/integration/helpers/0_common_enable_old_analyzer.xml +++ b/tests/integration/helpers/0_common_enable_old_analyzer.xml @@ -1,7 +1,7 @@ - 0 + 0 diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 673fc07fe94..5e0352df617 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -4484,7 +4484,7 @@ class ClickHouseInstance: use_old_analyzer = os.environ.get("CLICKHOUSE_USE_OLD_ANALYZER") is not None # If specific version was used there can be no - # allow_experimental_analyzer setting, so do this only if it was + # enable_analyzer setting, so do this only if it was # explicitly requested. if self.tag: use_old_analyzer = False diff --git a/tests/integration/test_analyzer_compatibility/test.py b/tests/integration/test_analyzer_compatibility/test.py index d4ded420c61..2c840154eb5 100644 --- a/tests/integration/test_analyzer_compatibility/test.py +++ b/tests/integration/test_analyzer_compatibility/test.py @@ -51,7 +51,7 @@ def test_two_new_versions(start_cluster): assert ( current.query( """ -SELECT hostname() AS h, getSetting('allow_experimental_analyzer') +SELECT hostname() AS h, getSetting('enable_analyzer') FROM clusterAllReplicas('test_cluster_mixed', system.one) ORDER BY h;""" ) @@ -62,7 +62,7 @@ ORDER BY h;""" analyzer_enabled = current.query( f""" SELECT -DISTINCT Settings['allow_experimental_analyzer'] +DISTINCT Settings['enable_analyzer'] FROM clusterAllReplicas('test_cluster_mixed', system.query_log) WHERE initial_query_id = '{query_id}';""" ) @@ -81,7 +81,7 @@ WHERE initial_query_id = '{query_id}';""" assert ( backward.query( """ -SELECT hostname() AS h, getSetting('allow_experimental_analyzer') +SELECT hostname() AS h, getSetting('enable_analyzer') FROM clusterAllReplicas('test_cluster_mixed', system.one) ORDER BY h;""" ) @@ -92,7 +92,7 @@ ORDER BY h;""" analyzer_enabled = backward.query( f""" SELECT -DISTINCT Settings['allow_experimental_analyzer'] +DISTINCT Settings['enable_analyzer'] FROM clusterAllReplicas('test_cluster_mixed', system.query_log) WHERE initial_query_id = '{query_id}';""" ) diff --git a/tests/integration/test_distributed_type_object/test.py b/tests/integration/test_distributed_type_object/test.py index 360087c9dda..e774876bc8b 100644 --- a/tests/integration/test_distributed_type_object/test.py +++ b/tests/integration/test_distributed_type_object/test.py @@ -89,7 +89,7 @@ def test_distributed_type_object(started_cluster): assert ( TSV( node1.query( - "SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM dist_table ORDER BY id SETTINGS allow_experimental_analyzer = 0" + "SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM dist_table ORDER BY id SETTINGS enable_analyzer = 0" ) ) == expected diff --git a/tests/integration/test_replicated_merge_tree_replicated_db_ttl/configs/enable_parallel_replicas.xml b/tests/integration/test_replicated_merge_tree_replicated_db_ttl/configs/enable_parallel_replicas.xml index c654074740a..30a0b6276b7 100644 --- a/tests/integration/test_replicated_merge_tree_replicated_db_ttl/configs/enable_parallel_replicas.xml +++ b/tests/integration/test_replicated_merge_tree_replicated_db_ttl/configs/enable_parallel_replicas.xml @@ -1,7 +1,7 @@ - 1 + 1 1 default 100 diff --git a/tests/integration/test_settings_profile/test.py b/tests/integration/test_settings_profile/test.py index e5c0a072ff9..4800ab798bf 100644 --- a/tests/integration/test_settings_profile/test.py +++ b/tests/integration/test_settings_profile/test.py @@ -459,7 +459,7 @@ def test_show_profiles(): query_possible_response = [ "CREATE SETTINGS PROFILE `default`\n", - "CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n", + "CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n", ] assert ( instance.query("SHOW CREATE SETTINGS PROFILE default") @@ -470,7 +470,7 @@ def test_show_profiles(): "CREATE SETTINGS PROFILE `default`\n" "CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n" "CREATE SETTINGS PROFILE `xyz`\n", - "CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n" + "CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n" "CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n" "CREATE SETTINGS PROFILE `xyz`\n", ] @@ -482,7 +482,7 @@ def test_show_profiles(): "CREATE SETTINGS PROFILE `xyz`\n" ) expected_access_analyzer = ( - "CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n" + "CREATE SETTINGS PROFILE `default` SETTINGS enable_analyzer = true\n" "CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n" "CREATE SETTINGS PROFILE `xyz`\n" ) diff --git a/tests/performance/storage_join_direct_join.xml b/tests/performance/storage_join_direct_join.xml index 2fc63c2c926..987500bb4f0 100644 --- a/tests/performance/storage_join_direct_join.xml +++ b/tests/performance/storage_join_direct_join.xml @@ -15,5 +15,5 @@ SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null; SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null SETTINGS - allow_experimental_analyzer=1 - \ No newline at end of file + enable_analyzer=1 + diff --git a/tests/performance/uniq_to_count.xml b/tests/performance/uniq_to_count.xml index 64e4cf1cc0d..57b0085d8fa 100644 --- a/tests/performance/uniq_to_count.xml +++ b/tests/performance/uniq_to_count.xml @@ -3,6 +3,6 @@ select uniq(number) from (select number from numbers(1000000) group by number) - select uniq(number) from (select DISTINCT number from numbers(1000000)) SETTINGS allow_experimental_analyzer=1 - select uniq(number) from (select number from numbers(1000000) group by number) SETTINGS allow_experimental_analyzer=1 + select uniq(number) from (select DISTINCT number from numbers(1000000)) SETTINGS enable_analyzer=1 + select uniq(number) from (select number from numbers(1000000) group by number) SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/00116_storage_set.sql b/tests/queries/0_stateless/00116_storage_set.sql index c156b387c8f..36ad015c69e 100644 --- a/tests/queries/0_stateless/00116_storage_set.sql +++ b/tests/queries/0_stateless/00116_storage_set.sql @@ -28,8 +28,8 @@ RENAME TABLE set2 TO set; SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set; create table tab (x String) engine = MergeTree order by x as select 'Hello'; -SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=0; -SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings allow_experimental_analyzer=1; +SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings enable_analyzer=0; +SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings enable_analyzer=1; DROP TABLE tab; DROP TABLE set; diff --git a/tests/queries/0_stateless/00202_cross_join.sql b/tests/queries/0_stateless/00202_cross_join.sql index 8d62c56b3f1..ea327817a58 100644 --- a/tests/queries/0_stateless/00202_cross_join.sql +++ b/tests/queries/0_stateless/00202_cross_join.sql @@ -3,5 +3,6 @@ SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN SET join_algorithm = 'auto'; SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2; -SET allow_experimental_analyzer = 1; +-- Just to test that we preserved old setting name this we use `enable_analyzer` instead of `enable_analyzer` here. +SET enable_analyzer = 1; SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2; diff --git a/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql index e3634141613..b72162be49a 100644 --- a/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql +++ b/tests/queries/0_stateless/00257_shard_no_aggregates_and_constant_keys.sql @@ -1,6 +1,6 @@ -- Tags: shard -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; set enable_positional_arguments = 0; select 40 as z from (select * from system.numbers limit 3) group by z; diff --git a/tests/queries/0_stateless/00313_const_totals_extremes.sh b/tests/queries/0_stateless/00313_const_totals_extremes.sh index 539a19817e2..6267d5066dd 100755 --- a/tests/queries/0_stateless/00313_const_totals_extremes.sh +++ b/tests/queries/0_stateless/00313_const_totals_extremes.sh @@ -4,10 +4,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1"; -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1234567890123 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSON"; -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toFloat32(1.23) AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSONCompact"; +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1"; +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1234567890123 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSON"; +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toFloat32(1.23) AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSONCompact"; -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDate('2010-01-01') AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1"; -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDateTime('2010-01-01 01:02:03', 'UTC') AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSON"; -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1.1 AS k, count() GROUP BY k WITH TOTALS SETTINGS allow_experimental_analyzer = 1 FORMAT JSONCompact"; +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDate('2010-01-01') AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1"; +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT toDateTime('2010-01-01 01:02:03', 'UTC') AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSON"; +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1.1 AS k, count() GROUP BY k WITH TOTALS SETTINGS enable_analyzer = 1 FORMAT JSONCompact"; diff --git a/tests/queries/0_stateless/00331_final_and_prewhere_condition_ver_column.sql b/tests/queries/0_stateless/00331_final_and_prewhere_condition_ver_column.sql index 78a58a979d1..a3c499f1688 100644 --- a/tests/queries/0_stateless/00331_final_and_prewhere_condition_ver_column.sql +++ b/tests/queries/0_stateless/00331_final_and_prewhere_condition_ver_column.sql @@ -1,8 +1,8 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- https://github.com/ClickHouse/ClickHouse/issues/45804 -CREATE TABLE myRMT( +CREATE TABLE myRMT( key Int64, someCol String, ver DateTime diff --git a/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql b/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql index 118e50c35e0..8a93c0a9d26 100644 --- a/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql +++ b/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql @@ -1,6 +1,6 @@ SET any_join_distinct_right_table_keys = 1; SET joined_subquery_requires_alias = 0; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; select x, y from (select 1 as x, 2 as y, x, y); select x, y from (select 1 as x, 1 as y, x, y); diff --git a/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql b/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql index e7b59bc3f7f..941d5ecd135 100644 --- a/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql +++ b/tests/queries/0_stateless/00378_json_quote_64bit_integers.sql @@ -2,7 +2,7 @@ SET output_format_write_statistics = 0; SET extremes = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET output_format_json_quote_64bit_integers = 1; SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSON; diff --git a/tests/queries/0_stateless/00445_join_nullable_keys.sql b/tests/queries/0_stateless/00445_join_nullable_keys.sql index 774594f90f3..bec0c76eb5f 100644 --- a/tests/queries/0_stateless/00445_join_nullable_keys.sql +++ b/tests/queries/0_stateless/00445_join_nullable_keys.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET join_use_nulls = 0; SET any_join_distinct_right_table_keys = 1; diff --git a/tests/queries/0_stateless/00490_with_select.sql b/tests/queries/0_stateless/00490_with_select.sql index c803cf1d3ad..d63f0ca4f31 100644 --- a/tests/queries/0_stateless/00490_with_select.sql +++ b/tests/queries/0_stateless/00490_with_select.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; with pow(2,2) as four select pow(four, 2), 2 as two, pow(two, 2); select `pow(four, 2)`, `pow(two, 2)` from (with pow(2,2) as four select pow(four, 2), 2 as two, pow(two, 2)); diff --git a/tests/queries/0_stateless/00597_push_down_predicate_long.reference b/tests/queries/0_stateless/00597_push_down_predicate_long.reference index 2c46edc98bf..55b7cdd3c64 100644 --- a/tests/queries/0_stateless/00597_push_down_predicate_long.reference +++ b/tests/queries/0_stateless/00597_push_down_predicate_long.reference @@ -390,7 +390,7 @@ ANY LEFT JOIN ) USING (id) WHERE id = 1 2000-01-01 1 test string 1 1 2000-01-01 test string 1 1 -------- allow_experimental_analyzer=1 ------- +------- enable_analyzer=1 ------- 1 2000-01-01 test string 1 1 2000-01-01 test string 1 1 SELECT id, @@ -454,7 +454,7 @@ FROM ) WHERE id = 1 2000-01-01 1 test string 1 1 -------- allow_experimental_analyzer=1 ------- +------- enable_analyzer=1 ------- 1 2000-01-01 test string 1 1 2000-01-01 test string 1 1 SELECT date, @@ -484,7 +484,7 @@ ANY LEFT JOIN ) AS b USING (id) WHERE b.id = 1 2000-01-01 1 test string 1 1 2000-01-01 test string 1 1 -------- allow_experimental_analyzer=1 ------- +------- enable_analyzer=1 ------- 1 2000-01-01 test string 1 1 2000-01-01 test string 1 1 SELECT id, @@ -510,7 +510,7 @@ ANY LEFT JOIN ) AS b USING (date, id) WHERE b.date = toDate(\'2000-01-01\') 1 2000-01-01 test string 1 1 -------- allow_experimental_analyzer=1 ------- +------- enable_analyzer=1 ------- 2000-01-01 1 test string 1 1 SELECT date, @@ -593,7 +593,7 @@ SEMI LEFT JOIN ) AS r USING (id) WHERE r.id = 1 2000-01-01 1 test string 1 1 2000-01-01 test string 1 1 -------- allow_experimental_analyzer=1 ------- +------- enable_analyzer=1 ------- 1 2000-01-01 test string 1 1 2000-01-01 test string 1 1 SELECT value + t1.value AS expr FROM diff --git a/tests/queries/0_stateless/00597_push_down_predicate_long.sql b/tests/queries/0_stateless/00597_push_down_predicate_long.sql index caf6edd7372..f79b24abe56 100644 --- a/tests/queries/0_stateless/00597_push_down_predicate_long.sql +++ b/tests/queries/0_stateless/00597_push_down_predicate_long.sql @@ -110,9 +110,9 @@ SELECT * FROM (SELECT * FROM test_00597 UNION ALL SELECT * FROM test_00597) WHER -- Optimize predicate expression with join query EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1; -SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS allow_experimental_analyzer=0; -SELECT '------- allow_experimental_analyzer=1 -------'; -SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS allow_experimental_analyzer=1; +SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1; SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1; @@ -123,30 +123,30 @@ SELECT b.value FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 AS b USING -- Optimize predicate expression with join and nested subquery EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1; -SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS allow_experimental_analyzer=0; -SELECT '------- allow_experimental_analyzer=1 -------'; -SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS allow_experimental_analyzer=1; +SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS enable_analyzer=1; -- Optimize predicate expression with join query and qualified EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1; -SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS allow_experimental_analyzer=0; -SELECT '------- allow_experimental_analyzer=1 -------'; -SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS allow_experimental_analyzer=1; +SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS enable_analyzer=1; -- Compatibility test EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01'); -SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS allow_experimental_analyzer=0; -SELECT '------- allow_experimental_analyzer=1 -------'; -SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS allow_experimental_analyzer=1; +SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1; SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1; -- Explain with join subquery EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1; -SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS allow_experimental_analyzer=0; -SELECT '------- allow_experimental_analyzer=1 -------'; -SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS allow_experimental_analyzer=1; +SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS enable_analyzer=1; -- issue 20497 EXPLAIN SYNTAX SELECT value + t1.value AS expr FROM (SELECT t0.value, t1.value FROM test_00597 AS t0 FULL JOIN test_00597 AS t1 USING date) WHERE expr < 3; diff --git a/tests/queries/0_stateless/00621_regression_for_in_operator.reference b/tests/queries/0_stateless/00621_regression_for_in_operator.reference index b68f550a742..ab8a1499f6d 100644 --- a/tests/queries/0_stateless/00621_regression_for_in_operator.reference +++ b/tests/queries/0_stateless/00621_regression_for_in_operator.reference @@ -17,7 +17,7 @@ QUERY id: 0 LIST id: 5, nodes: 2 COLUMN id: 6, column_name: g, result_type: String, source_id: 3 CONSTANT id: 7, constant_value: Tuple_(\'5\', \'6\'), constant_value_type: Tuple(String, String) - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 2 2 QUERY id: 0 @@ -42,4 +42,4 @@ QUERY id: 0 LIST id: 11, nodes: 2 COLUMN id: 8, column_name: g, result_type: String, source_id: 3 CONSTANT id: 12, constant_value: \'6\', constant_value_type: String - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/00621_regression_for_in_operator.sql b/tests/queries/0_stateless/00621_regression_for_in_operator.sql index db1bcb4a39a..0d8c4933c65 100644 --- a/tests/queries/0_stateless/00621_regression_for_in_operator.sql +++ b/tests/queries/0_stateless/00621_regression_for_in_operator.sql @@ -12,13 +12,13 @@ SELECT count() FROM regression_for_in_operator_view WHERE g IN ('5','6'); SET optimize_min_equality_disjunction_chain_length = 1; SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6'; -SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1; -EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1; +SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1; +EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1; SET optimize_min_equality_disjunction_chain_length = 3; SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6'; -SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1; -EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS allow_experimental_analyzer = 1; +SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1; +EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1; DROP TABLE regression_for_in_operator_view; DROP TABLE regression_for_in_operator; diff --git a/tests/queries/0_stateless/00674_join_on_syntax.sql b/tests/queries/0_stateless/00674_join_on_syntax.sql index 9ff26db1536..584e43b88bf 100644 --- a/tests/queries/0_stateless/00674_join_on_syntax.sql +++ b/tests/queries/0_stateless/00674_join_on_syntax.sql @@ -1,5 +1,5 @@ SET joined_subquery_requires_alias = 0; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; drop table if exists tab1; drop table if exists tab2; diff --git a/tests/queries/0_stateless/00700_decimal_compare.sql b/tests/queries/0_stateless/00700_decimal_compare.sql index beadbdade16..1b901e04c28 100644 --- a/tests/queries/0_stateless/00700_decimal_compare.sql +++ b/tests/queries/0_stateless/00700_decimal_compare.sql @@ -27,7 +27,7 @@ SELECT a > 0, b > 0, g > 0 FROM decimal ORDER BY a DESC; SELECT a, g > toInt8(0), g > toInt16(0), g > toInt32(0), g > toInt64(0) FROM decimal ORDER BY a; SELECT a, g > toUInt8(0), g > toUInt16(0), g > toUInt32(0), g > toUInt64(0) FROM decimal ORDER BY a; SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42); -SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42) SETTINGS allow_experimental_analyzer = 1; +SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42) SETTINGS enable_analyzer = 1; SELECT a, b, g FROM decimal WHERE a > 0 AND a <= 42 AND b <= 42 AND g <= 42; SELECT d, e, f from decimal WHERE d > 0 AND d < 1 AND e > 0 AND e < 1 AND f > 0 AND f < 1; diff --git a/tests/queries/0_stateless/00722_inner_join.sql b/tests/queries/0_stateless/00722_inner_join.sql index 0d5a543b99d..aa590f470ae 100644 --- a/tests/queries/0_stateless/00722_inner_join.sql +++ b/tests/queries/0_stateless/00722_inner_join.sql @@ -1,6 +1,6 @@ -- Tags: no-parallel -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS one; CREATE TABLE one(dummy UInt8) ENGINE = Memory; diff --git a/tests/queries/0_stateless/00736_disjunction_optimisation.reference b/tests/queries/0_stateless/00736_disjunction_optimisation.reference index f28dcacef0e..4e7f2e09dea 100644 --- a/tests/queries/0_stateless/00736_disjunction_optimisation.reference +++ b/tests/queries/0_stateless/00736_disjunction_optimisation.reference @@ -49,7 +49,7 @@ QUERY id: 0 LIST id: 12, nodes: 2 COLUMN id: 13, column_name: s, result_type: UInt64, source_id: 3 CONSTANT id: 14, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 1 21 1 22 1 23 @@ -99,7 +99,7 @@ QUERY id: 0 LIST id: 14, nodes: 2 COLUMN id: 15, column_name: s, result_type: UInt64, source_id: 3 CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 1 1 21 1 1 1 1 1 22 0 1 1 1 1 23 0 0 1 @@ -152,7 +152,7 @@ QUERY id: 0 CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) JOIN TREE TABLE id: 3, alias: __table1, table_name: default.bug - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 21 1 22 1 23 1 @@ -185,7 +185,7 @@ QUERY id: 0 CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) JOIN TREE TABLE id: 3, alias: __table1, table_name: default.bug - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 1 21 1 22 1 23 @@ -237,7 +237,7 @@ QUERY id: 0 LIST id: 12, nodes: 2 COLUMN id: 13, column_name: s, result_type: UInt64, source_id: 3 CONSTANT id: 14, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 1 21 1 22 1 23 @@ -287,7 +287,7 @@ QUERY id: 0 LIST id: 14, nodes: 2 COLUMN id: 15, column_name: s, result_type: UInt64, source_id: 3 CONSTANT id: 16, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 1 1 21 1 1 1 1 1 22 0 1 1 1 1 23 0 0 1 @@ -348,7 +348,7 @@ QUERY id: 0 CONSTANT id: 21, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) JOIN TREE TABLE id: 3, alias: __table1, table_name: default.bug - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 21 1 22 1 23 1 @@ -381,7 +381,7 @@ QUERY id: 0 CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) JOIN TREE TABLE id: 3, alias: __table1, table_name: default.bug - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 21 1 22 1 23 1 @@ -414,4 +414,4 @@ QUERY id: 0 CONSTANT id: 6, constant_value: Tuple_(UInt64_21, UInt64_22, UInt64_23), constant_value_type: Tuple(UInt8, UInt8, UInt8) JOIN TREE TABLE id: 3, alias: __table1, table_name: default.bug - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/00736_disjunction_optimisation.sql b/tests/queries/0_stateless/00736_disjunction_optimisation.sql index e5bfc81f7ae..38f77622bd4 100644 --- a/tests/queries/0_stateless/00736_disjunction_optimisation.sql +++ b/tests/queries/0_stateless/00736_disjunction_optimisation.sql @@ -7,43 +7,43 @@ set optimize_min_equality_disjunction_chain_length = 2; select * from bug; select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23); -select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1; -explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;; +select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1; +explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23); -select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;; -explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;; +select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; +explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug; -select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;; -explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;; +select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; +explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; select s, (s=21 or s=22 or s=23) from bug; -select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;; -explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;; +select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; +explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; set optimize_min_equality_disjunction_chain_length = 3; select * from bug; select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23); -select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1; -explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;; +select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1; +explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23); -select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;; -explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS allow_experimental_analyzer = 1;; +select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; +explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug; -select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;; -explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;; +select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; +explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; select s, (s=21 or s=22 or s=23) from bug; -select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;; -explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS allow_experimental_analyzer = 1;; +select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; +explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; select s, (s=21 or 22=s or 23=s) from bug; -select s, (s=21 or 22=s or 23=s) from bug SETTINGS allow_experimental_analyzer = 1;; -explain query tree select s, (s=21 or 22=s or 23=s) from bug SETTINGS allow_experimental_analyzer = 1;; +select s, (s=21 or 22=s or 23=s) from bug SETTINGS enable_analyzer = 1;; +explain query tree select s, (s=21 or 22=s or 23=s) from bug SETTINGS enable_analyzer = 1;; DROP TABLE bug; diff --git a/tests/queries/0_stateless/00757_enum_defaults_const.sql b/tests/queries/0_stateless/00757_enum_defaults_const.sql index 64271a37473..048c9dee88f 100644 --- a/tests/queries/0_stateless/00757_enum_defaults_const.sql +++ b/tests/queries/0_stateless/00757_enum_defaults_const.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer=0; +SET enable_analyzer=0; select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; diff --git a/tests/queries/0_stateless/00757_enum_defaults_const_analyzer.sql b/tests/queries/0_stateless/00757_enum_defaults_const_analyzer.sql index bf079539019..c202ed630db 100644 --- a/tests/queries/0_stateless/00757_enum_defaults_const_analyzer.sql +++ b/tests/queries/0_stateless/00757_enum_defaults_const_analyzer.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; diff --git a/tests/queries/0_stateless/00800_low_cardinality_join.sql b/tests/queries/0_stateless/00800_low_cardinality_join.sql index ecb5194253c..fc5f5d1860c 100644 --- a/tests/queries/0_stateless/00800_low_cardinality_join.sql +++ b/tests/queries/0_stateless/00800_low_cardinality_join.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; set joined_subquery_requires_alias = 0; select * from (select dummy as val from system.one) any left join (select dummy as val from system.one) using val; diff --git a/tests/queries/0_stateless/00800_low_cardinality_merge_join.sql.j2 b/tests/queries/0_stateless/00800_low_cardinality_merge_join.sql.j2 index 8e2037480c7..a5f5e9b33b8 100644 --- a/tests/queries/0_stateless/00800_low_cardinality_merge_join.sql.j2 +++ b/tests/queries/0_stateless/00800_low_cardinality_merge_join.sql.j2 @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; {% for join_algorithm in ['partial_merge', 'full_sorting_merge', 'grace_hash'] -%} diff --git a/tests/queries/0_stateless/00818_alias_bug_4110.sql b/tests/queries/0_stateless/00818_alias_bug_4110.sql index d057bacc908..1242a3b605d 100644 --- a/tests/queries/0_stateless/00818_alias_bug_4110.sql +++ b/tests/queries/0_stateless/00818_alias_bug_4110.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; select s.a as a, s.a + 1 as b from (select 10 as a) s; select s.a + 1 as a, s.a as b from (select 10 as a) s; diff --git a/tests/queries/0_stateless/00818_inner_join_bug_3567.sql b/tests/queries/0_stateless/00818_inner_join_bug_3567.sql index 2dec5ce3221..3b4b3cd77d3 100644 --- a/tests/queries/0_stateless/00818_inner_join_bug_3567.sql +++ b/tests/queries/0_stateless/00818_inner_join_bug_3567.sql @@ -1,5 +1,5 @@ SET output_format_pretty_color = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS table1; DROP TABLE IF EXISTS table2; diff --git a/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql b/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql index 3c0246619da..8b3d3ec1bc1 100644 --- a/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql +++ b/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql @@ -1,6 +1,6 @@ SET any_join_distinct_right_table_keys = 1; SET joined_subquery_requires_alias = 0; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT * FROM (SELECT 1 AS a, 'x' AS b) join (SELECT 1 as a, 'y' as b) using a; SELECT * FROM (SELECT 1 AS a, 'x' AS b) left join (SELECT 1 as a, 'y' as b) using a; diff --git a/tests/queries/0_stateless/00820_multiple_joins.sql b/tests/queries/0_stateless/00820_multiple_joins.sql index 5c7a7bebb0b..b4197570cfa 100644 --- a/tests/queries/0_stateless/00820_multiple_joins.sql +++ b/tests/queries/0_stateless/00820_multiple_joins.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS table1; DROP TABLE IF EXISTS table2; diff --git a/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql index 3da2cad4eff..538e6967ff3 100644 --- a/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql +++ b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS table1; DROP TABLE IF EXISTS table2; diff --git a/tests/queries/0_stateless/00830_join_overwrite.sql b/tests/queries/0_stateless/00830_join_overwrite.sql index bc3662528db..f51a152ea46 100644 --- a/tests/queries/0_stateless/00830_join_overwrite.sql +++ b/tests/queries/0_stateless/00830_join_overwrite.sql @@ -13,7 +13,7 @@ SELECT joinGet('kv_overwrite', 'v', toUInt32(1)); CREATE TABLE t2 (k UInt32, v UInt32) ENGINE = Memory; INSERT INTO t2 VALUES (1, 2), (1, 3); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT v FROM (SELECT 1 as k) t1 ANY INNER JOIN t2 USING (k) SETTINGS join_any_take_last_row = 0; SELECT v FROM (SELECT 1 as k) t1 ANY INNER JOIN t2 USING (k) SETTINGS join_any_take_last_row = 1; diff --git a/tests/queries/0_stateless/00835_if_generic_case.sql b/tests/queries/0_stateless/00835_if_generic_case.sql index 051fad14603..e06e610dbb5 100644 --- a/tests/queries/0_stateless/00835_if_generic_case.sql +++ b/tests/queries/0_stateless/00835_if_generic_case.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, toDate('2000-01-02') AS y, x > y ? x : y AS z; SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, toDate('2000-01-02') AS y, x > y ? x : y AS z; diff --git a/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql b/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql index 2f6cca0284c..275968236ae 100644 --- a/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql +++ b/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql @@ -1,5 +1,5 @@ SET any_join_distinct_right_table_keys = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS t1_00848; DROP TABLE IF EXISTS t2_00848; diff --git a/tests/queries/0_stateless/00849_multiple_comma_join_2.sql b/tests/queries/0_stateless/00849_multiple_comma_join_2.sql index 6530f691087..250dd4a47ab 100644 --- a/tests/queries/0_stateless/00849_multiple_comma_join_2.sql +++ b/tests/queries/0_stateless/00849_multiple_comma_join_2.sql @@ -12,7 +12,7 @@ CREATE TABLE t2 (a UInt32, b Nullable(Int32)) ENGINE = Memory; CREATE TABLE t3 (a UInt32, b Nullable(Int32)) ENGINE = Memory; CREATE TABLE t4 (a UInt32, b Nullable(Int32)) ENGINE = Memory; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; --- EXPLAIN SYNTAX (old AST based optimization) SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( @@ -62,56 +62,56 @@ SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explai --- EXPLAIN QUERY TREE SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 CROSS JOIN t3) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3) SETTINGS enable_analyzer = 1; SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( - EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3) SETTINGS allow_experimental_analyzer = 1; + EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3) SETTINGS enable_analyzer = 1; INSERT INTO t1 values (1,1), (2,2), (3,3), (4,4); INSERT INTO t2 values (1,1), (1, Null); INSERT INTO t3 values (1,1), (1, Null); INSERT INTO t4 values (1,1), (1, Null); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT 'SELECT * FROM t1, t2'; SELECT * FROM t1, t2 diff --git a/tests/queries/0_stateless/00855_join_with_array_join.sql b/tests/queries/0_stateless/00855_join_with_array_join.sql index c278ff0738a..2d5bc6cb1f6 100644 --- a/tests/queries/0_stateless/00855_join_with_array_join.sql +++ b/tests/queries/0_stateless/00855_join_with_array_join.sql @@ -1,5 +1,5 @@ SET joined_subquery_requires_alias = 0; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT ax, c FROM (SELECT [1,2] ax, 0 c) ARRAY JOIN ax JOIN (SELECT 0 c) USING (c); SELECT ax, c FROM (SELECT [3,4] ax, 0 c) JOIN (SELECT 0 c) USING (c) ARRAY JOIN ax; diff --git a/tests/queries/0_stateless/00858_issue_4756.sql b/tests/queries/0_stateless/00858_issue_4756.sql index 9eacd5ef364..3f6ab037c02 100644 --- a/tests/queries/0_stateless/00858_issue_4756.sql +++ b/tests/queries/0_stateless/00858_issue_4756.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; set distributed_product_mode = 'local'; drop table if exists shard1; diff --git a/tests/queries/0_stateless/00897_flatten.sql b/tests/queries/0_stateless/00897_flatten.sql index 0d67a1708fd..45d1a225a49 100644 --- a/tests/queries/0_stateless/00897_flatten.sql +++ b/tests/queries/0_stateless/00897_flatten.sql @@ -2,7 +2,7 @@ SELECT flatten(arrayJoin([[[1, 2, 3], [4, 5]], [[6], [7, 8]]])); SELECT arrayFlatten(arrayJoin([[[[]], [[1], [], [2, 3]]], [[[4]]]])); SELECT flatten(arrayMap(x -> arrayMap(y -> arrayMap(z -> range(x), range(x)), range(x)), range(number))) FROM numbers(6); SELECT flatten(arrayMap(x -> arrayMap(y -> arrayMap(z -> range(z), range(y)), range(x)), range(number))) FROM numbers(6); -SELECT flatten(arrayMap(x -> arrayMap(x -> arrayMap(x -> range(x), range(x)), range(x)), range(number))) FROM numbers(6) SETTINGS allow_experimental_analyzer=1; +SELECT flatten(arrayMap(x -> arrayMap(x -> arrayMap(x -> range(x), range(x)), range(x)), range(number))) FROM numbers(6) SETTINGS enable_analyzer=1; SELECT arrayFlatten([[[1, 2, 3], [4, 5]], [[6], [7, 8]]]); SELECT flatten([[[]]]); SELECT arrayFlatten([]); diff --git a/tests/queries/0_stateless/00940_order_by_read_in_order_query_plan.sql b/tests/queries/0_stateless/00940_order_by_read_in_order_query_plan.sql index 532539206f7..0421cadf868 100644 --- a/tests/queries/0_stateless/00940_order_by_read_in_order_query_plan.sql +++ b/tests/queries/0_stateless/00940_order_by_read_in_order_query_plan.sql @@ -1,4 +1,4 @@ -SET optimize_read_in_order = 1, query_plan_read_in_order = 1, allow_experimental_analyzer = 0; +SET optimize_read_in_order = 1, query_plan_read_in_order = 1, enable_analyzer = 0; drop table if exists tab; drop table if exists tab2; diff --git a/tests/queries/0_stateless/01000_subquery_requires_alias.sql b/tests/queries/0_stateless/01000_subquery_requires_alias.sql index 3cd522a8389..38ba1798dc1 100644 --- a/tests/queries/0_stateless/01000_subquery_requires_alias.sql +++ b/tests/queries/0_stateless/01000_subquery_requires_alias.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET joined_subquery_requires_alias = 1; SELECT * FROM (SELECT 1 as A, 2 as B) X diff --git a/tests/queries/0_stateless/01013_totals_without_aggregation.sql b/tests/queries/0_stateless/01013_totals_without_aggregation.sql index ab656cd92b5..08be45754bb 100644 --- a/tests/queries/0_stateless/01013_totals_without_aggregation.sql +++ b/tests/queries/0_stateless/01013_totals_without_aggregation.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT 11 AS n GROUP BY n WITH TOTALS; SELECT 12 AS n GROUP BY n WITH ROLLUP; diff --git a/tests/queries/0_stateless/01018_ambiguous_column.sql b/tests/queries/0_stateless/01018_ambiguous_column.sql index e9e754ed7a8..b2e0e8fc522 100644 --- a/tests/queries/0_stateless/01018_ambiguous_column.sql +++ b/tests/queries/0_stateless/01018_ambiguous_column.sql @@ -1,5 +1,5 @@ SET output_format_pretty_color=1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; select * from system.one cross join system.one; select * from system.one cross join system.one r; diff --git a/tests/queries/0_stateless/01047_window_view_parser_inner_table.sql b/tests/queries/0_stateless/01047_window_view_parser_inner_table.sql index f17f3ac63b0..de436f1b28e 100644 --- a/tests/queries/0_stateless/01047_window_view_parser_inner_table.sql +++ b/tests/queries/0_stateless/01047_window_view_parser_inner_table.sql @@ -1,5 +1,5 @@ SET send_logs_level = 'fatal'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SET allow_experimental_window_view = 1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; set allow_deprecated_database_ordinary=1; diff --git a/tests/queries/0_stateless/01048_window_view_parser.sql b/tests/queries/0_stateless/01048_window_view_parser.sql index adcb4a6364d..bcd80e37c00 100644 --- a/tests/queries/0_stateless/01048_window_view_parser.sql +++ b/tests/queries/0_stateless/01048_window_view_parser.sql @@ -1,7 +1,7 @@ -- Tags: no-parallel SET send_logs_level = 'fatal'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SET allow_experimental_window_view = 1; DROP DATABASE IF EXISTS test_01048; set allow_deprecated_database_ordinary=1; diff --git a/tests/queries/0_stateless/01049_join_low_card_bug_long.reference.j2 b/tests/queries/0_stateless/01049_join_low_card_bug_long.reference.j2 index 2ebe5c373b2..872bb448027 100644 --- a/tests/queries/0_stateless/01049_join_low_card_bug_long.reference.j2 +++ b/tests/queries/0_stateless/01049_join_low_card_bug_long.reference.j2 @@ -1,5 +1,5 @@ -- { echoOn } -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; {% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%} SET join_algorithm = '{{ join_algorithm }}'; SET join_use_nulls = 0; @@ -19,17 +19,17 @@ str_r LowCardinality(String) str_l LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l -- @@ -49,17 +49,17 @@ str_r LowCardinality(String) str_l LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (x) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (lc) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (x) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) String String LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (lc) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) String String LowCardinality(String) LowCardinality(String) str_l str_l -- @@ -79,17 +79,17 @@ str_r String str_l String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String LowCardinality(String) LowCardinality(String) String String str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String LowCardinality(String) LowCardinality(String) String String str_l str_l -- @@ -109,17 +109,17 @@ str_r LowCardinality(String) str_l LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l -- @@ -313,7 +313,7 @@ SELECT r.lc, materialize(r.lc), toTypeName(r.lc), toTypeName(materialize(r.lc)) str str LowCardinality(Nullable(String)) LowCardinality(Nullable(String)) str_r str_r LowCardinality(Nullable(String)) LowCardinality(Nullable(String)) \N \N LowCardinality(Nullable(String)) LowCardinality(Nullable(String)) -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; {% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%} SET join_algorithm = '{{ join_algorithm }}'; SET join_use_nulls = 0; @@ -333,17 +333,17 @@ str_r LowCardinality(String) str_l LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l -- @@ -363,17 +363,17 @@ str_r String str_l String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (x) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (lc) ORDER BY x, r.lc, l.lc; String String str str String String str str -String String str_r str_r String String +String String str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (x) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) String String LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (lc) ORDER BY x, r.lc, l.lc; String String str str String String str str -String String str_r str_r String String +String String str_r str_r String String String String String String str_l str_l -- @@ -393,17 +393,17 @@ str_r String str_l String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; String String str str String String str str -String String str_r str_r String String +String String str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String LowCardinality(String) LowCardinality(String) String String str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; String String str str String String str str -String String str_r str_r String String +String String str_r str_r String String String String String String str_l str_l -- @@ -423,13 +423,13 @@ str_r Nullable(String) str_l Nullable(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str Nullable(String) Nullable(String) str str Nullable(String) Nullable(String) str_r str_r Nullable(String) Nullable(String) \N \N SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str Nullable(String) Nullable(String) str str diff --git a/tests/queries/0_stateless/01049_join_low_card_bug_long.sql.j2 b/tests/queries/0_stateless/01049_join_low_card_bug_long.sql.j2 index 64ec34ef1bf..7e7b5cb1fed 100644 --- a/tests/queries/0_stateless/01049_join_low_card_bug_long.sql.j2 +++ b/tests/queries/0_stateless/01049_join_low_card_bug_long.sql.j2 @@ -23,9 +23,9 @@ INSERT INTO nl VALUES (0, 'str'), (2, 'str_l'); INSERT INTO l_lc VALUES (0, 'str'), (2, 'str_l'); -- { echoOn } -{% for allow_experimental_analyzer in [0, 1] -%} +{% for enable_analyzer in [0, 1] -%} -SET allow_experimental_analyzer = {{ allow_experimental_analyzer }}; +SET enable_analyzer = {{ enable_analyzer }}; {% for join_algorithm in ['default', 'partial_merge', 'parallel_hash'] -%} SET join_algorithm = '{{ join_algorithm }}'; diff --git a/tests/queries/0_stateless/01050_window_view_parser_tumble.sql b/tests/queries/0_stateless/01050_window_view_parser_tumble.sql index c52a6fefacb..9c4d312bab6 100644 --- a/tests/queries/0_stateless/01050_window_view_parser_tumble.sql +++ b/tests/queries/0_stateless/01050_window_view_parser_tumble.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SET allow_experimental_window_view = 1; DROP TABLE IF EXISTS mt; diff --git a/tests/queries/0_stateless/01051_window_view_parser_hop.sql b/tests/queries/0_stateless/01051_window_view_parser_hop.sql index b37e4ed3095..569e3b2a6bf 100644 --- a/tests/queries/0_stateless/01051_window_view_parser_hop.sql +++ b/tests/queries/0_stateless/01051_window_view_parser_hop.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SET allow_experimental_window_view = 1; DROP TABLE IF EXISTS mt; diff --git a/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh b/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh index 4f8482558c8..c473bf766b0 100755 --- a/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh +++ b/tests/queries/0_stateless/01052_window_view_proc_tumble_to_now.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh opts=( - "--allow_experimental_analyzer=0" + "--enable_analyzer=0" ) $CLICKHOUSE_CLIENT "${opts[@]}" < 3465735.3 ORDER BY k SETTINGS allow_experimental_analyzer=1; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k SETTINGS enable_analyzer=1; 3465735.9028 3465735.9028 3465736.595947 @@ -11,7 +11,7 @@ SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY numbe 3465734.169932 3465734.863079 3465735.556226 -SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS allow_experimental_analyzer=1; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS enable_analyzer=1; 3465734.169932 3465734.863079 3465735.556226 @@ -19,7 +19,7 @@ SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 0 1 4 -SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS allow_experimental_analyzer=1; +SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS enable_analyzer=1; 0 1 4 diff --git a/tests/queries/0_stateless/01300_group_by_other_keys_having.sql b/tests/queries/0_stateless/01300_group_by_other_keys_having.sql index 203e8322ad9..a73b122a9f9 100644 --- a/tests/queries/0_stateless/01300_group_by_other_keys_having.sql +++ b/tests/queries/0_stateless/01300_group_by_other_keys_having.sql @@ -1,16 +1,16 @@ set optimize_group_by_function_keys = 1; set optimize_syntax_fuse_functions = 0; -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; -- { echoOn } SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; -SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k SETTINGS allow_experimental_analyzer=1; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k SETTINGS enable_analyzer=1; SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; -SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS allow_experimental_analyzer=1; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS enable_analyzer=1; SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; -SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS allow_experimental_analyzer=1; +SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS enable_analyzer=1; -- { echoOff } diff --git a/tests/queries/0_stateless/01323_redundant_functions_in_order_by.reference b/tests/queries/0_stateless/01323_redundant_functions_in_order_by.reference index d47f12ff4d1..c2c37cc4de6 100644 --- a/tests/queries/0_stateless/01323_redundant_functions_in_order_by.reference +++ b/tests/queries/0_stateless/01323_redundant_functions_in_order_by.reference @@ -65,7 +65,7 @@ QUERY id: 0 SORT id: 12, sort_direction: ASCENDING, with_fill: 0 EXPRESSION COLUMN id: 7, column_name: number, result_type: UInt64, source_id: 8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT groupArray(x) FROM ( @@ -99,7 +99,7 @@ QUERY id: 0 SORT id: 12, sort_direction: ASCENDING, with_fill: 0 EXPRESSION COLUMN id: 7, column_name: number, result_type: UInt64, source_id: 8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT groupArray(x) FROM ( @@ -141,7 +141,7 @@ QUERY id: 0 SORT id: 15, sort_direction: ASCENDING, with_fill: 0 EXPRESSION COLUMN id: 7, column_name: number, result_type: UInt64, source_id: 8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT key, a, @@ -203,7 +203,7 @@ QUERY id: 0 SORT id: 25, sort_direction: ASCENDING, with_fill: 0 EXPRESSION COLUMN id: 26, column_name: key, result_type: UInt64, source_id: 5 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT key, a @@ -229,7 +229,7 @@ QUERY id: 0 SORT id: 7, sort_direction: ASCENDING, with_fill: 0 EXPRESSION COLUMN id: 4, column_name: a, result_type: UInt8, source_id: 3 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT key, a @@ -262,7 +262,7 @@ QUERY id: 0 LIST id: 11, nodes: 2 COLUMN id: 2, column_name: key, result_type: UInt64, source_id: 3 COLUMN id: 4, column_name: a, result_type: UInt8, source_id: 3 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 QUERY id: 0 PROJECTION COLUMNS key UInt64 @@ -285,7 +285,7 @@ QUERY id: 0 SORT id: 10, sort_direction: ASCENDING, with_fill: 0 EXPRESSION COLUMN id: 2, column_name: key, result_type: UInt64, source_id: 3 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 QUERY id: 0 PROJECTION COLUMNS t1.id UInt64 @@ -314,7 +314,7 @@ QUERY id: 0 SORT id: 14, sort_direction: ASCENDING, with_fill: 0 EXPRESSION COLUMN id: 15, column_name: id, result_type: UInt64, source_id: 5 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 [0,1,2] [0,1,2] [0,1,2] diff --git a/tests/queries/0_stateless/01323_redundant_functions_in_order_by.sql b/tests/queries/0_stateless/01323_redundant_functions_in_order_by.sql index 738ad581e3d..fb1eed1666e 100644 --- a/tests/queries/0_stateless/01323_redundant_functions_in_order_by.sql +++ b/tests/queries/0_stateless/01323_redundant_functions_in_order_by.sql @@ -8,37 +8,37 @@ INSERT INTO test SELECT number, number, toString(number), number from numbers(4) set optimize_redundant_functions_in_order_by = 1; SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); -SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)) SETTINGS allow_experimental_analyzer=1; +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)) SETTINGS enable_analyzer=1; SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); -SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))) SETTINGS allow_experimental_analyzer=1; +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))) SETTINGS enable_analyzer=1; SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); -SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x) SETTINGS allow_experimental_analyzer=1; +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x) SETTINGS enable_analyzer=1; SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; -SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key SETTINGS allow_experimental_analyzer=1; +SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key SETTINGS enable_analyzer=1; SELECT key, a FROM test ORDER BY key, a, exp(key + a); -SELECT key, a FROM test ORDER BY key, a, exp(key + a) SETTINGS allow_experimental_analyzer=1; +SELECT key, a FROM test ORDER BY key, a, exp(key + a) SETTINGS enable_analyzer=1; SELECT key, a FROM test ORDER BY key, exp(key + a); -SELECT key, a FROM test ORDER BY key, exp(key + a) SETTINGS allow_experimental_analyzer=1; +SELECT key, a FROM test ORDER BY key, exp(key + a) SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); -EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)) settings allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)) settings enable_analyzer=1; EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); -EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))) settings allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))) settings enable_analyzer=1; EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); -EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x) settings allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x) settings enable_analyzer=1; EXPLAIN SYNTAX SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; -EXPLAIN QUERY TREE run_passes=1 SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key settings allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key settings enable_analyzer=1; EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, a, exp(key + a); -EXPLAIN QUERY TREE run_passes=1 SELECT key, a FROM test ORDER BY key, a, exp(key + a) settings allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT key, a FROM test ORDER BY key, a, exp(key + a) settings enable_analyzer=1; EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, exp(key + a); -EXPLAIN QUERY TREE run_passes=1 SELECT key, a FROM test ORDER BY key, exp(key + a) settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE run_passes=1 SELECT key FROM test GROUP BY key ORDER BY avg(a), key settings allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT key, a FROM test ORDER BY key, exp(key + a) settings enable_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT key FROM test GROUP BY key ORDER BY avg(a), key settings enable_analyzer=1; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; CREATE TABLE t1 (id UInt64) ENGINE = MergeTree() ORDER BY id; CREATE TABLE t2 (id UInt64) ENGINE = MergeTree() ORDER BY id; -EXPLAIN QUERY TREE run_passes=1 SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id ORDER BY t1.id, t2.id settings allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id ORDER BY t1.id, t2.id settings enable_analyzer=1; set optimize_redundant_functions_in_order_by = 0; diff --git a/tests/queries/0_stateless/01353_low_cardinality_join_types.sql b/tests/queries/0_stateless/01353_low_cardinality_join_types.sql index 93953f1d74a..2aa42f33fd3 100644 --- a/tests/queries/0_stateless/01353_low_cardinality_join_types.sql +++ b/tests/queries/0_stateless/01353_low_cardinality_join_types.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; set join_algorithm = 'hash'; select '-'; @@ -75,7 +75,7 @@ from (select toLowCardinality(number) k, toLowCardinality(toString(number)) s fr full join (select toLowCardinality(number+1) k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 using k order by js1.k, js2.k; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; set join_algorithm = 'hash'; select '-'; diff --git a/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.reference b/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.reference index 6de0a5be0a5..2d16e71f099 100644 --- a/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.reference +++ b/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.reference @@ -21,4 +21,4 @@ QUERY id: 0 GROUP BY LIST id: 10, nodes: 1 COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.sql b/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.sql index a868b38b4d7..08ca9ed3c2d 100644 --- a/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.sql +++ b/tests/queries/0_stateless/01376_GROUP_BY_injective_elimination_dictGet.sql @@ -38,7 +38,7 @@ EXPLAIN QUERY TREE SELECT dictGet('dictdb_01376.dict_exists', 'value', number) as val FROM numbers(2) GROUP BY val -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; DROP DICTIONARY dictdb_01376.dict_exists; DROP TABLE dictdb_01376.table_for_dict; diff --git a/tests/queries/0_stateless/01428_nullable_asof_join.sql b/tests/queries/0_stateless/01428_nullable_asof_join.sql index f07a26edd97..41f6ba8a03d 100644 --- a/tests/queries/0_stateless/01428_nullable_asof_join.sql +++ b/tests/queries/0_stateless/01428_nullable_asof_join.sql @@ -18,13 +18,13 @@ SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(ma FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a ASOF LEFT JOIN (SELECT 1 as pk, toNullable(0) as dt) b USING(pk, dt) -ORDER BY a.dt SETTINGS allow_experimental_analyzer = 0; +ORDER BY a.dt SETTINGS enable_analyzer = 0; SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a ASOF LEFT JOIN (SELECT 1 as pk, toNullable(0) as dt) b USING(pk, dt) -ORDER BY a.dt SETTINGS allow_experimental_analyzer = 1; +ORDER BY a.dt SETTINGS enable_analyzer = 1; SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a @@ -70,25 +70,25 @@ SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(ma FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a ASOF JOIN (SELECT 1 as pk, 2 as dt) b USING(pk, dt) -ORDER BY a.dt SETTINGS allow_experimental_analyzer = 0; +ORDER BY a.dt SETTINGS enable_analyzer = 0; SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a ASOF JOIN (SELECT 1 as pk, 2 as dt) b USING(pk, dt) -ORDER BY a.dt SETTINGS allow_experimental_analyzer = 1; +ORDER BY a.dt SETTINGS enable_analyzer = 1; SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b USING(pk, dt) -ORDER BY a.dt SETTINGS allow_experimental_analyzer = 0; +ORDER BY a.dt SETTINGS enable_analyzer = 0; SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b USING(pk, dt) -ORDER BY a.dt SETTINGS allow_experimental_analyzer = 1; +ORDER BY a.dt SETTINGS enable_analyzer = 1; SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference index 2920b387aa2..2b08db1f27f 100644 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference @@ -1,10 +1,10 @@ ===http=== -{"query":"select 1 from remote('127.0.0.2', system, one) settings allow_experimental_analyzer = 1 format Null\n","status":"QueryFinish","tracestate":"some custom state","sorted_by_start_time":1} +{"query":"select 1 from remote('127.0.0.2', system, one) settings enable_analyzer = 1 format Null\n","status":"QueryFinish","tracestate":"some custom state","sorted_by_start_time":1} {"query":"DESC TABLE system.one","status":"QueryFinish","tracestate":"some custom state","sorted_by_start_time":1} {"query":"SELECT 1 AS `1` FROM `system`.`one` AS `__table1`","status":"QueryFinish","tracestate":"some custom state","sorted_by_start_time":1} {"query":"DESC TABLE system.one","query_status":"QueryFinish","tracestate":"some custom state","sorted_by_finish_time":1} {"query":"SELECT 1 AS `1` FROM `system`.`one` AS `__table1`","query_status":"QueryFinish","tracestate":"some custom state","sorted_by_finish_time":1} -{"query":"select 1 from remote('127.0.0.2', system, one) settings allow_experimental_analyzer = 1 format Null\n","query_status":"QueryFinish","tracestate":"some custom state","sorted_by_finish_time":1} +{"query":"select 1 from remote('127.0.0.2', system, one) settings enable_analyzer = 1 format Null\n","query_status":"QueryFinish","tracestate":"some custom state","sorted_by_finish_time":1} {"total spans":"3","unique spans":"3","unique non-zero parent spans":"3"} {"initial query spans with proper parent":"2"} {"unique non-empty tracestate values":"1"} diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index e19810dd7f1..2b6da6132ed 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function check_log { ${CLICKHOUSE_CLIENT} --format=JSONEachRow -nq " -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; system flush logs; -- Show queries sorted by start time. @@ -75,7 +75,7 @@ select uniqExact(value) "'"'"unique non-empty tracestate values"'"'" # Generate some random trace id so that the prevous runs of the test do not interfere. echo "===http===" -trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4())))) settings allow_experimental_analyzer = 1") +trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4())))) settings enable_analyzer = 1") # Check that the HTTP traceparent is read, and then passed through `remote` # table function. We expect 4 queries -- one initial, one SELECT and two @@ -85,7 +85,7 @@ ${CLICKHOUSE_CURL} \ --header "traceparent: 00-$trace_id-0000000000000073-01" \ --header "tracestate: some custom state" "$CLICKHOUSE_URL" \ --get \ - --data-urlencode "query=select 1 from remote('127.0.0.2', system, one) settings allow_experimental_analyzer = 1 format Null" + --data-urlencode "query=select 1 from remote('127.0.0.2', system, one) settings enable_analyzer = 1 format Null" check_log diff --git a/tests/queries/0_stateless/01476_right_full_join_switch.sql b/tests/queries/0_stateless/01476_right_full_join_switch.sql index dfbdec47e1f..7c8c3157844 100644 --- a/tests/queries/0_stateless/01476_right_full_join_switch.sql +++ b/tests/queries/0_stateless/01476_right_full_join_switch.sql @@ -13,7 +13,7 @@ INSERT INTO nr VALUES (2, NULL); SET join_use_nulls = 0; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- t.x is supertupe for `x` from left and right since `x` is inside `USING`. SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM t AS l LEFT JOIN nr AS r USING (x) ORDER BY t.x; @@ -28,7 +28,7 @@ SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM nr AS l FULL JOIN t SELECT '-'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; -- t.x is supertupe for `x` from left and right since `x` is inside `USING`. SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM t AS l LEFT JOIN nr AS r USING (x) ORDER BY t.x; diff --git a/tests/queries/0_stateless/01477_lc_in_merge_join_left_key.sql.j2 b/tests/queries/0_stateless/01477_lc_in_merge_join_left_key.sql.j2 index 6eafd41b411..b43db222e43 100644 --- a/tests/queries/0_stateless/01477_lc_in_merge_join_left_key.sql.j2 +++ b/tests/queries/0_stateless/01477_lc_in_merge_join_left_key.sql.j2 @@ -10,11 +10,11 @@ CREATE TABLE nr (`x` Nullable(UInt32), `s` Nullable(String)) ENGINE = Memory; INSERT INTO t VALUES (1, 'l'); INSERT INTO nr VALUES (2, NULL); -{% for allow_experimental_analyzer in [0, 1] -%} +{% for enable_analyzer in [0, 1] -%} -SET allow_experimental_analyzer = {{ allow_experimental_analyzer }}; +SET enable_analyzer = {{ enable_analyzer }}; -{% if allow_experimental_analyzer -%} +{% if enable_analyzer -%} SELECT '- analyzer -'; {% endif -%} diff --git a/tests/queries/0_stateless/01479_cross_join_9855.sql b/tests/queries/0_stateless/01479_cross_join_9855.sql index 9dcf209a1cd..19cd0ab18fd 100644 --- a/tests/queries/0_stateless/01479_cross_join_9855.sql +++ b/tests/queries/0_stateless/01479_cross_join_9855.sql @@ -2,8 +2,8 @@ SET cross_to_inner_join_rewrite = 1; SELECT count() FROM numbers(4) AS n1, numbers(3) AS n2 -WHERE n1.number > (select avg(n.number) from numbers(3) n) SETTINGS allow_experimental_analyzer=0; +WHERE n1.number > (select avg(n.number) from numbers(3) n) SETTINGS enable_analyzer=0; SELECT count() FROM numbers(4) AS n1, numbers(3) AS n2, numbers(6) AS n3 -WHERE n1.number > (select avg(n.number) from numbers(3) n) SETTINGS allow_experimental_analyzer=0; +WHERE n1.number > (select avg(n.number) from numbers(3) n) SETTINGS enable_analyzer=0; diff --git a/tests/queries/0_stateless/01508_explain_header.sql b/tests/queries/0_stateless/01508_explain_header.sql index a9f876068aa..03452e4bdac 100644 --- a/tests/queries/0_stateless/01508_explain_header.sql +++ b/tests/queries/0_stateless/01508_explain_header.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; explain header = 1 select 1 as x; diff --git a/tests/queries/0_stateless/01556_explain_select_with_union_query.sql b/tests/queries/0_stateless/01556_explain_select_with_union_query.sql index bbd96ef5c69..d8278e1887a 100644 --- a/tests/queries/0_stateless/01556_explain_select_with_union_query.sql +++ b/tests/queries/0_stateless/01556_explain_select_with_union_query.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET union_default_mode = 'DISTINCT'; set enable_global_with_statement = 1; diff --git a/tests/queries/0_stateless/01561_clickhouse_client_stage.reference b/tests/queries/0_stateless/01561_clickhouse_client_stage.reference index 2631199cbab..b6200464293 100644 --- a/tests/queries/0_stateless/01561_clickhouse_client_stage.reference +++ b/tests/queries/0_stateless/01561_clickhouse_client_stage.reference @@ -1,15 +1,15 @@ -execute: --allow_experimental_analyzer=1 +execute: --enable_analyzer=1 "foo" 1 -execute: --allow_experimental_analyzer=1 --stage fetch_columns +execute: --enable_analyzer=1 --stage fetch_columns "__table1.dummy" 0 -execute: --allow_experimental_analyzer=1 --stage with_mergeable_state +execute: --enable_analyzer=1 --stage with_mergeable_state "1_UInt8" 1 -execute: --allow_experimental_analyzer=1 --stage with_mergeable_state_after_aggregation +execute: --enable_analyzer=1 --stage with_mergeable_state_after_aggregation "1_UInt8" 1 -execute: --allow_experimental_analyzer=1 --stage complete +execute: --enable_analyzer=1 --stage complete "foo" 1 diff --git a/tests/queries/0_stateless/01561_clickhouse_client_stage.sh b/tests/queries/0_stateless/01561_clickhouse_client_stage.sh index 99267458421..79c9bb6ae10 100755 --- a/tests/queries/0_stateless/01561_clickhouse_client_stage.sh +++ b/tests/queries/0_stateless/01561_clickhouse_client_stage.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh opts=( - "--allow_experimental_analyzer=1" + "--enable_analyzer=1" ) function execute_query() diff --git a/tests/queries/0_stateless/01591_window_functions.sql b/tests/queries/0_stateless/01591_window_functions.sql index b821ba13721..db727599d2c 100644 --- a/tests/queries/0_stateless/01591_window_functions.sql +++ b/tests/queries/0_stateless/01591_window_functions.sql @@ -1,6 +1,6 @@ -- Tags: long -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- Too slow SET max_bytes_before_external_sort = 0; diff --git a/tests/queries/0_stateless/01600_detach_permanently.sh b/tests/queries/0_stateless/01600_detach_permanently.sh index 679e9a749ee..be405f8e7d9 100755 --- a/tests/queries/0_stateless/01600_detach_permanently.sh +++ b/tests/queries/0_stateless/01600_detach_permanently.sh @@ -111,8 +111,8 @@ clickhouse_local "INSERT INTO db_ordinary.src SELECT * FROM numbers(10)" clickhouse_local "SELECT if(count() = 10, 'MV is working', 'MV failed') FROM db_ordinary.src_mv_with_inner" clickhouse_local "DETACH VIEW db_ordinary.src_mv_with_inner PERMANENTLY; INSERT INTO db_ordinary.src SELECT * FROM numbers(10)" --stacktrace -clickhouse_local "SELECT if(count() = 10, 'MV can be detached permanently', 'MV detach failed') FROM db_ordinary.src_mv_with_inner SETTINGS allow_experimental_analyzer = 0" 2>&1 | grep -c "db_ordinary.src_mv_with_inner does not exist" -clickhouse_local "SELECT if(count() = 10, 'MV can be detached permanently', 'MV detach failed') FROM db_ordinary.src_mv_with_inner SETTINGS allow_experimental_analyzer = 1" 2>&1 | grep -c "Unknown table expression identifier 'db_ordinary.src_mv_with_inner'" +clickhouse_local "SELECT if(count() = 10, 'MV can be detached permanently', 'MV detach failed') FROM db_ordinary.src_mv_with_inner SETTINGS enable_analyzer = 0" 2>&1 | grep -c "db_ordinary.src_mv_with_inner does not exist" +clickhouse_local "SELECT if(count() = 10, 'MV can be detached permanently', 'MV detach failed') FROM db_ordinary.src_mv_with_inner SETTINGS enable_analyzer = 1" 2>&1 | grep -c "Unknown table expression identifier 'db_ordinary.src_mv_with_inner'" ## Quite silly: ATTACH MATERIALIZED VIEW don't work with short syntax (w/o select), but i can attach it using ATTACH TABLE ... clickhouse_local "ATTACH TABLE db_ordinary.src_mv_with_inner" diff --git a/tests/queries/0_stateless/01622_constraints_simple_optimization.reference b/tests/queries/0_stateless/01622_constraints_simple_optimization.reference index 84c872856ff..64a9db37a68 100644 --- a/tests/queries/0_stateless/01622_constraints_simple_optimization.reference +++ b/tests/queries/0_stateless/01622_constraints_simple_optimization.reference @@ -52,7 +52,7 @@ QUERY id: 0 LIST id: 5, nodes: 2 COLUMN id: 6, column_name: c, result_type: Int64, source_id: 3 CONSTANT id: 7, constant_value: UInt64_100, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT count() FROM constraint_test_constants WHERE c > 100 @@ -70,7 +70,7 @@ QUERY id: 0 LIST id: 5, nodes: 2 COLUMN id: 6, column_name: c, result_type: Int64, source_id: 3 CONSTANT id: 7, constant_value: UInt64_100, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT count() FROM constraint_test_constants QUERY id: 0 @@ -81,4 +81,4 @@ QUERY id: 0 FUNCTION id: 2, function_name: count, function_type: aggregate, result_type: UInt64 JOIN TREE TABLE id: 3, alias: __table1, table_name: default.constraint_test_constants - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/01622_constraints_simple_optimization.sql b/tests/queries/0_stateless/01622_constraints_simple_optimization.sql index acde02e2c67..e549467de02 100644 --- a/tests/queries/0_stateless/01622_constraints_simple_optimization.sql +++ b/tests/queries/0_stateless/01622_constraints_simple_optimization.sql @@ -101,10 +101,10 @@ SELECT count() FROM constraint_test_constants WHERE 11 <= a; ---> assumption -> EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100); -- EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100); ---> the order of the generated checks is not consistent EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100); -EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c > 100); -EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c > 100) SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c > 100) SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c <= 100); -EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c <= 100) SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c <= 100) SETTINGS enable_analyzer = 1; DROP TABLE constraint_test_constants; diff --git a/tests/queries/0_stateless/01622_constraints_where_optimization.reference b/tests/queries/0_stateless/01622_constraints_where_optimization.reference index 3f6e8211f1a..09a6dd3d0e7 100644 --- a/tests/queries/0_stateless/01622_constraints_where_optimization.reference +++ b/tests/queries/0_stateless/01622_constraints_where_optimization.reference @@ -11,7 +11,7 @@ QUERY id: 0 TABLE id: 3, alias: __table1, table_name: default.t_constraints_where WHERE CONSTANT id: 4, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT count() FROM t_constraints_where WHERE 0 @@ -25,7 +25,7 @@ QUERY id: 0 TABLE id: 3, alias: __table1, table_name: default.t_constraints_where WHERE CONSTANT id: 4, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT count() FROM t_constraints_where WHERE 0 @@ -39,7 +39,7 @@ QUERY id: 0 TABLE id: 3, alias: __table1, table_name: default.t_constraints_where WHERE CONSTANT id: 4, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT count() FROM t_constraints_where WHERE b < 8 @@ -57,7 +57,7 @@ QUERY id: 0 LIST id: 5, nodes: 2 COLUMN id: 6, column_name: b, result_type: UInt32, source_id: 3 CONSTANT id: 7, constant_value: UInt64_8, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT count() FROM t_constraints_where PREWHERE (b > 20) OR (b < 8) @@ -75,7 +75,7 @@ QUERY id: 0 LIST id: 5, nodes: 2 COLUMN id: 6, column_name: b, result_type: UInt32, source_id: 3 CONSTANT id: 7, constant_value: UInt64_8, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT count() FROM t_constraints_where QUERY id: 0 @@ -86,4 +86,4 @@ QUERY id: 0 FUNCTION id: 2, function_name: count, function_type: aggregate, result_type: UInt64 JOIN TREE TABLE id: 3, alias: __table1, table_name: default.t_constraints_where - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/01622_constraints_where_optimization.sql b/tests/queries/0_stateless/01622_constraints_where_optimization.sql index d41b1988bdd..63803ec8ce6 100644 --- a/tests/queries/0_stateless/01622_constraints_where_optimization.sql +++ b/tests/queries/0_stateless/01622_constraints_where_optimization.sql @@ -9,15 +9,15 @@ CREATE TABLE t_constraints_where(a UInt32, b UInt32, CONSTRAINT c1 ASSUME b >= 5 INSERT INTO t_constraints_where VALUES (1, 7); EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b > 15; -- assumption -> 0 -EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b > 15 SETTINGS allow_experimental_analyzer = 1; -- assumption -> 0 +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b > 15 SETTINGS enable_analyzer = 1; -- assumption -> 0 EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b = 20; -- assumption -> 0 -EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b = 20 SETTINGS allow_experimental_analyzer = 1; -- assumption -> 0 +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b = 20 SETTINGS enable_analyzer = 1; -- assumption -> 0 EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b < 2; -- assumption -> 0 -EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b < 2 SETTINGS allow_experimental_analyzer = 1; -- assumption -> 0 +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b < 2 SETTINGS enable_analyzer = 1; -- assumption -> 0 EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b > 20 OR b < 8; -- assumption -> remove (b < 20) -EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b > 20 OR b < 8 SETTINGS allow_experimental_analyzer = 1; -- assumption -> remove (b < 20) +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b > 20 OR b < 8 SETTINGS enable_analyzer = 1; -- assumption -> remove (b < 20) EXPLAIN SYNTAX SELECT count() FROM t_constraints_where PREWHERE b > 20 OR b < 8; -- assumption -> remove (b < 20) -EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where PREWHERE b > 20 OR b < 8 SETTINGS allow_experimental_analyzer = 1; -- assumption -> remove (b < 20) +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where PREWHERE b > 20 OR b < 8 SETTINGS enable_analyzer = 1; -- assumption -> remove (b < 20) DROP TABLE t_constraints_where; @@ -26,6 +26,6 @@ CREATE TABLE t_constraints_where(a UInt32, b UInt32, CONSTRAINT c1 ASSUME b < 10 INSERT INTO t_constraints_where VALUES (1, 7); EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b = 1 OR b < 18 OR b > 5; -- assumption -> (b < 20) -> 0; -EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b = 1 OR b < 18 OR b > 5 SETTINGS allow_experimental_analyzer = 1; -- assumption -> (b < 20) -> 0; +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b = 1 OR b < 18 OR b > 5 SETTINGS enable_analyzer = 1; -- assumption -> (b < 20) -> 0; DROP TABLE t_constraints_where; diff --git a/tests/queries/0_stateless/01623_constraints_column_swap.reference b/tests/queries/0_stateless/01623_constraints_column_swap.reference index d504a86365b..b49884b4798 100644 --- a/tests/queries/0_stateless/01623_constraints_column_swap.reference +++ b/tests/queries/0_stateless/01623_constraints_column_swap.reference @@ -27,7 +27,7 @@ QUERY id: 0 LIST id: 12, nodes: 2 COLUMN id: 13, column_name: b, result_type: UInt64, source_id: 5 CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT cityHash64(a) + 10, b + 3 @@ -57,7 +57,7 @@ QUERY id: 0 LIST id: 12, nodes: 2 COLUMN id: 13, column_name: b, result_type: UInt64, source_id: 5 CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT (b AS `cityHash64(a)`) + 10, (b AS b) + 3 @@ -87,7 +87,7 @@ QUERY id: 0 LIST id: 12, nodes: 2 COLUMN id: 13, column_name: b, result_type: UInt64, source_id: 5 CONSTANT id: 14, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT (b AS `cityHash64(a)`) + 10, (b AS b) + 3 @@ -117,7 +117,7 @@ QUERY id: 0 LIST id: 12, nodes: 2 COLUMN id: 13, column_name: b, result_type: UInt64, source_id: 5 CONSTANT id: 14, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT (b AS `cityHash64(a)`) + 10, (b AS b) + 3 @@ -147,7 +147,7 @@ QUERY id: 0 LIST id: 12, nodes: 2 COLUMN id: 13, column_name: b, result_type: UInt64, source_id: 5 CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT (b AS `cityHash64(a)`) + 10 FROM column_swap_test_test WHERE b = 0 @@ -169,7 +169,7 @@ QUERY id: 0 LIST id: 8, nodes: 2 COLUMN id: 9, column_name: b, result_type: UInt64, source_id: 5 CONSTANT id: 10, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT (cityHash64(a) AS `cityHash64(a)`) + 10, a @@ -201,7 +201,7 @@ QUERY id: 0 LIST id: 13, nodes: 1 COLUMN id: 14, column_name: a, result_type: String, source_id: 7 CONSTANT id: 15, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT (cityHash64(a) AS b) + 10, a @@ -233,7 +233,7 @@ QUERY id: 0 LIST id: 13, nodes: 1 COLUMN id: 14, column_name: a, result_type: String, source_id: 7 CONSTANT id: 15, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a AS `substring(reverse(b), 1, 1)`, a AS a @@ -255,7 +255,7 @@ QUERY id: 0 LIST id: 6, nodes: 2 COLUMN id: 7, column_name: a, result_type: String, source_id: 3 CONSTANT id: 8, constant_value: \'c\', constant_value_type: String - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a AS `substring(reverse(b), 1, 1)`, a AS a @@ -277,7 +277,7 @@ QUERY id: 0 LIST id: 6, nodes: 2 COLUMN id: 7, column_name: a, result_type: String, source_id: 3 CONSTANT id: 8, constant_value: \'c\', constant_value_type: String - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a AS t1, a AS t2 @@ -299,7 +299,7 @@ QUERY id: 0 LIST id: 6, nodes: 2 COLUMN id: 7, column_name: a, result_type: String, source_id: 3 CONSTANT id: 8, constant_value: \'c\', constant_value_type: String - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a AS `substring(reverse(b), 1, 1)` FROM column_swap_test_test WHERE a = \'c\' @@ -317,7 +317,7 @@ QUERY id: 0 LIST id: 5, nodes: 2 COLUMN id: 6, column_name: a, result_type: String, source_id: 3 CONSTANT id: 7, constant_value: \'c\', constant_value_type: String - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a FROM t_bad_constraint QUERY id: 0 @@ -328,4 +328,4 @@ QUERY id: 0 COLUMN id: 2, column_name: a, result_type: UInt32, source_id: 3 JOIN TREE TABLE id: 3, alias: __table1, table_name: default.t_bad_constraint - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/01623_constraints_column_swap.sql b/tests/queries/0_stateless/01623_constraints_column_swap.sql index 242be87938d..ccd387c9f8d 100644 --- a/tests/queries/0_stateless/01623_constraints_column_swap.sql +++ b/tests/queries/0_stateless/01623_constraints_column_swap.sql @@ -14,22 +14,22 @@ INSERT INTO column_swap_test_test VALUES (1, 'cat', 1), (2, 'dog', 2); INSERT INTO column_swap_test_test SELECT number AS i, format('test {} kek {}', toString(number), toString(number + 10)) AS a, 1 AS b FROM system.numbers LIMIT 1000000; EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 1; -EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 1 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 1 SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test PREWHERE cityHash64(a) = 1; -EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test PREWHERE cityHash64(a) = 1 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test PREWHERE cityHash64(a) = 1 SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 0; -EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 0; -EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 0 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 0 SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 1; -EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 1 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 1 SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT cityHash64(a) + 10 FROM column_swap_test_test WHERE cityHash64(a) = 0; -EXPLAIN QUERY TREE SELECT cityHash64(a) + 10 FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10 FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT cityHash64(a) + 10, a FROM column_swap_test_test WHERE cityHash64(a) = 0; -EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, a FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, a FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT b + 10, a FROM column_swap_test_test WHERE b = 0; -EXPLAIN QUERY TREE SELECT b + 10, a FROM column_swap_test_test WHERE b = 0 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT b + 10, a FROM column_swap_test_test WHERE b = 0 SETTINGS enable_analyzer = 1; DROP TABLE column_swap_test_test; @@ -37,13 +37,13 @@ CREATE TABLE column_swap_test_test (i Int64, a String, b String, CONSTRAINT c1 A INSERT INTO column_swap_test_test SELECT number AS i, toString(number) AS a, format('test {} kek {}', toString(number), toString(number + 10)) b FROM system.numbers LIMIT 1000000; EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE a = 'c'; -EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE a = 'c' SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE a = 'c' SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; -EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1) AS t1, a AS t2 FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; -EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1) AS t1, a AS t2 FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1) AS t1, a AS t2 FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1) FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; -EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1) FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1) FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS enable_analyzer = 1; DROP TABLE column_swap_test_test; @@ -54,6 +54,6 @@ CREATE TABLE t_bad_constraint(a UInt32, s String, CONSTRAINT c1 ASSUME a = toUIn INSERT INTO t_bad_constraint SELECT number, randomPrintableASCII(100) FROM numbers(10000); EXPLAIN SYNTAX SELECT a FROM t_bad_constraint; -EXPLAIN QUERY TREE SELECT a FROM t_bad_constraint SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT a FROM t_bad_constraint SETTINGS enable_analyzer = 1; DROP TABLE t_bad_constraint; diff --git a/tests/queries/0_stateless/01646_rewrite_sum_if.sql b/tests/queries/0_stateless/01646_rewrite_sum_if.sql index da341a3f7db..fd98a157ac6 100644 --- a/tests/queries/0_stateless/01646_rewrite_sum_if.sql +++ b/tests/queries/0_stateless/01646_rewrite_sum_if.sql @@ -34,7 +34,7 @@ SELECT sum(if(number % 2 == 0, 0, 1)) FROM numbers(100); SELECT sum(if(number % 2 == 0 as cond_expr, 0 as zero_expr, 1 as one_expr) as if_expr), sum(cond_expr), sum(if_expr), one_expr, zero_expr FROM numbers(100); SELECT countIf(number % 2 != 0) FROM numbers(100); -set allow_experimental_analyzer = true; +set enable_analyzer = true; EXPLAIN QUERY TREE run_passes=1 SELECT sumIf(123, number % 2 == 0) FROM numbers(100); EXPLAIN QUERY TREE run_passes=1 SELECT sum(if(number % 2 == 0, 123, 0)) FROM numbers(100); diff --git a/tests/queries/0_stateless/01651_bugs_from_15889.sql b/tests/queries/0_stateless/01651_bugs_from_15889.sql index dd31f2941ef..b98feaa1f65 100644 --- a/tests/queries/0_stateless/01651_bugs_from_15889.sql +++ b/tests/queries/0_stateless/01651_bugs_from_15889.sql @@ -111,5 +111,5 @@ WITH ( ) AS t) SELECT if(dateDiff('second', toDateTime(time_with_microseconds), toDateTime(t)) = -9223372036854775808, 'ok', ''); -set joined_subquery_requires_alias=0, allow_experimental_analyzer=0; -- the query is invalid with a new analyzer +set joined_subquery_requires_alias=0, enable_analyzer=0; -- the query is invalid with a new analyzer SELECT number, number / 2 AS n, j1, j2 FROM remote('127.0.0.{2,3}', system.numbers) GLOBAL ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 1048577) USING (n) LIMIT 10 format Null; diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index 4bd0eb7d908..42cdac8c01f 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -26,13 +26,13 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> filter should be pushed down after aggregating, column after aggregation is const" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select s, y, y != 0 from (select sum(x) as s, y from ( select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter\|COLUMN Const(UInt8) -> notEquals(y, 0)" echo "> (analyzer) filter should be pushed down after aggregating, column after aggregation is const" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select s, y, y != 0 from (select sum(x) as s, y from ( select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 @@ -44,14 +44,14 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other condition is aliased" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|ALIAS notEquals(s, 4) :: 4 -> and(notEquals(y, 0), notEquals(s, 4)) UInt8 : 2" echo "> (analyzer) one condition of filter should be pushed down after aggregating, other condition is aliased" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 4 @@ -64,14 +64,14 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other condition is casted" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 4) :: 5, 1 :: 3) -> and(notEquals(y, 0), minus(s, 4))" echo "> (analyzer) one condition of filter should be pushed down after aggregating, other condition is casted" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 4 @@ -84,14 +84,14 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other two conditions are ANDed" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 --convert_query_to_cnf=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 8) :: 5, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" echo "> (analyzer) one condition of filter should be pushed down after aggregating, other two conditions are ANDed" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 --convert_query_to_cnf=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 @@ -104,14 +104,14 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 --convert_query_to_cnf=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 8 and y - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: and(notEquals(y, 0), minus(y, 4))\|ALIAS notEquals(s, 8) :: 4 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4))" echo "> (analyzer) two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 --convert_query_to_cnf=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 8 and y - 4 @@ -124,13 +124,13 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> filter is split, one part is filtered before ARRAY JOIN" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select x, y from ( select range(number) as x, number + 1 as y from numbers(3) ) array join x where y != 2 and x != 0" | grep -o "Filter column: and(notEquals(y, 2), notEquals(x, 0))\|ARRAY JOIN x\|Filter column: notEquals(y, 2)" echo "> (analyzer) filter is split, one part is filtered before ARRAY JOIN" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select x, y from ( select range(number) as x, number + 1 as y from numbers(3) ) array join x where y != 2 and x != 0" | @@ -154,14 +154,14 @@ $CLICKHOUSE_CLIENT -q " # settings enable_optimize_predicate_expression=0" echo "> filter is pushed down before Distinct" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select x, y from ( select distinct x, y from (select number % 2 as x, number % 3 as y from numbers(10)) ) where y != 2 settings enable_optimize_predicate_expression=0" | grep -o "Distinct\|Filter column: notEquals(y, 2)" echo "> (analyzer) filter is pushed down before Distinct" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select x, y from ( select distinct x, y from (select number % 2 as x, number % 3 as y from numbers(10)) ) where y != 2 @@ -174,14 +174,14 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression=0" echo "> filter is pushed down before sorting steps" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 --convert_query_to_cnf=0 -q " explain actions = 1 select x, y from ( select number % 2 as x, number % 3 as y from numbers(6) order by y desc ) where x != 0 and y != 0 settings enable_optimize_predicate_expression = 0" | grep -o "Sorting\|Filter column: and(notEquals(x, 0), notEquals(y, 0))" echo "> (analyzer) filter is pushed down before sorting steps" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 --convert_query_to_cnf=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 --convert_query_to_cnf=0 -q " explain actions = 1 select x, y from ( select number % 2 as x, number % 3 as y from numbers(6) order by y desc ) where x != 0 and y != 0 @@ -194,14 +194,14 @@ $CLICKHOUSE_CLIENT -q " settings enable_optimize_predicate_expression = 0" echo "> filter is pushed down before TOTALS HAVING and aggregating" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select * from ( select y, sum(x) from (select number as x, number % 4 as y from numbers(10)) group by y with totals ) where y != 2 settings enable_optimize_predicate_expression=0" | grep -o "TotalsHaving\|Aggregating\|Filter column: notEquals(y, 2)" echo "> (analyzer) filter is pushed down before TOTALS HAVING and aggregating" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select * from ( select y, sum(x) from (select number as x, number % 4 as y from numbers(10)) group by y with totals ) where y != 2 @@ -224,14 +224,14 @@ $CLICKHOUSE_CLIENT -q " ) where number != 2 settings enable_optimize_predicate_expression=0" echo "> one condition of filter is pushed down before LEFT JOIN" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select number as a, r.b from numbers(4) as l any left join ( select number + 2 as b from numbers(3) ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | grep -o "Join\|Filter column: notEquals(number, 1)" echo "> (analyzer) one condition of filter is pushed down before LEFT JOIN" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select number as a, r.b from numbers(4) as l any left join ( select number + 2 as b from numbers(3) @@ -243,14 +243,14 @@ $CLICKHOUSE_CLIENT -q " ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | sort echo "> one condition of filter is pushed down before INNER JOIN" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select number as a, r.b from numbers(4) as l any inner join ( select number + 2 as b from numbers(3) ) as r on a = r.b where a != 1 and b != 2 settings enable_optimize_predicate_expression = 0" | grep -o "Join\|Filter column: and(notEquals(number, 1), notEquals(number, 2))\|Filter column: and(notEquals(b, 2), notEquals(b, 1))" echo "> (analyzer) one condition of filter is pushed down before INNER JOIN" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select number as a, r.b from numbers(4) as l any inner join ( select number + 2 as b from numbers(3) @@ -274,12 +274,12 @@ $CLICKHOUSE_CLIENT -q " echo "> function calculation should be done after sorting and limit (if possible)" echo "> Expression should be divided into two subexpressions and only one of them should be moved after Sorting" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q " explain actions = 1 select number as n, sipHash64(n) from numbers(100) order by number + 1 limit 5" | sed 's/^ *//g' | grep -o "^ *\(Expression (.*Before ORDER BY.*)\|Sorting\|FUNCTION \w\+\)" echo "> (analyzer) function calculation should be done after sorting and limit (if possible)" echo "> Expression should be divided into two subexpressions and only one of them should be moved after Sorting" -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 -q " +$CLICKHOUSE_CLIENT --enable_analyzer=1 -q " explain actions = 1 select number as n, sipHash64(n) from numbers(100) order by number + 1 limit 5" | sed 's/^ *//g' | grep -o "^ *\(Expression (.*Before ORDER BY.*)\|Sorting\|FUNCTION \w\+\)" echo "> this query should be executed without throwing an exception" diff --git a/tests/queries/0_stateless/01655_plan_optimizations_merge_filters.sql b/tests/queries/0_stateless/01655_plan_optimizations_merge_filters.sql index 2193fc7a8f4..c6620184a43 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations_merge_filters.sql +++ b/tests/queries/0_stateless/01655_plan_optimizations_merge_filters.sql @@ -1,7 +1,7 @@ set query_plan_merge_filters=1; -set allow_experimental_analyzer=1; +set enable_analyzer=1; select explain from (explain actions = 1 select * from (select sum(number) as v, bitAnd(number, 15) as key from numbers(1e8) group by key having v != 0) where key = 7) where explain like '%Filter%' or explain like '%Aggregating%'; -set allow_experimental_analyzer=0; +set enable_analyzer=0; select explain from (explain actions = 1 select * from (select sum(number) as v, bitAnd(number, 15) as key from numbers(1e8) group by key having v != 0) where key = 7) where explain like '%Filter%' or explain like '%Aggregating%'; diff --git a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.reference b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.reference index 7c2753124b3..c8b3c5ca954 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.reference @@ -1,23 +1,23 @@ Partial sorting plan optimize_read_in_window_order=0 Sort description: n ASC, x ASC - optimize_read_in_window_order=0, allow_experimental_analyzer=1 + optimize_read_in_window_order=0, enable_analyzer=1 Sort description: n ASC, x ASC optimize_read_in_window_order=1 Prefix sort description: n ASC Result sort description: n ASC, x ASC - optimize_read_in_window_order=1, allow_experimental_analyzer=1 + optimize_read_in_window_order=1, enable_analyzer=1 Prefix sort description: __table1.n ASC Result sort description: __table1.n ASC, __table1.x ASC No sorting plan optimize_read_in_window_order=0 Sort description: n ASC, x ASC - optimize_read_in_window_order=0, allow_experimental_analyzer=1 + optimize_read_in_window_order=0, enable_analyzer=1 Sort description: __table1.n ASC, __table1.x ASC optimize_read_in_window_order=1 Prefix sort description: n ASC, x ASC Result sort description: n ASC, x ASC - optimize_read_in_window_order=1, allow_experimental_analyzer=1 + optimize_read_in_window_order=1, enable_analyzer=1 Prefix sort description: __table1.n ASC, __table1.x ASC Result sort description: __table1.n ASC, __table1.x ASC Complex ORDER BY diff --git a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.sh b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.sh index 24c8cf5052e..d74ea328d04 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations_optimize_read_in_window_order.sh @@ -19,25 +19,25 @@ $CLICKHOUSE_CLIENT -q "optimize table ${name}_n_x final" echo 'Partial sorting plan' echo ' optimize_read_in_window_order=0' -$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_order=0,optimize_read_in_window_order=0,allow_experimental_analyzer=0" | grep -i "sort description" -echo ' optimize_read_in_window_order=0, allow_experimental_analyzer=1' -$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_order=0,optimize_read_in_window_order=0,allow_experimental_analyzer=0" | grep -i "sort description" +$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_order=0,optimize_read_in_window_order=0,enable_analyzer=0" | grep -i "sort description" +echo ' optimize_read_in_window_order=0, enable_analyzer=1' +$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_order=0,optimize_read_in_window_order=0,enable_analyzer=0" | grep -i "sort description" echo ' optimize_read_in_window_order=1' -$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_order=1,allow_experimental_analyzer=0" | grep -i "sort description" -echo ' optimize_read_in_window_order=1, allow_experimental_analyzer=1' -$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_order=1,allow_experimental_analyzer=1" | grep -i "sort description" +$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_order=1,enable_analyzer=0" | grep -i "sort description" +echo ' optimize_read_in_window_order=1, enable_analyzer=1' +$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n SETTINGS optimize_read_in_order=1,enable_analyzer=1" | grep -i "sort description" echo 'No sorting plan' echo ' optimize_read_in_window_order=0' -$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_order=0,optimize_read_in_window_order=0,allow_experimental_analyzer=0" | grep -i "sort description" -echo ' optimize_read_in_window_order=0, allow_experimental_analyzer=1' -$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_order=0,optimize_read_in_window_order=0,allow_experimental_analyzer=1" | grep -i "sort description" +$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_order=0,optimize_read_in_window_order=0,enable_analyzer=0" | grep -i "sort description" +echo ' optimize_read_in_window_order=0, enable_analyzer=1' +$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_order=0,optimize_read_in_window_order=0,enable_analyzer=1" | grep -i "sort description" echo ' optimize_read_in_window_order=1' -$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_order=1,allow_experimental_analyzer=0" | grep -i "sort description" -echo ' optimize_read_in_window_order=1, allow_experimental_analyzer=1' -$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_order=1,allow_experimental_analyzer=1" | grep -i "sort description" +$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_order=1,enable_analyzer=0" | grep -i "sort description" +echo ' optimize_read_in_window_order=1, enable_analyzer=1' +$CLICKHOUSE_CLIENT -q "explain plan actions=1, description=1 select n, sum(x) OVER (ORDER BY n, x ROWS BETWEEN 100 PRECEDING AND CURRENT ROW) from ${name}_n_x SETTINGS optimize_read_in_order=1,enable_analyzer=1" | grep -i "sort description" echo 'Complex ORDER BY' $CLICKHOUSE_CLIENT -q "CREATE TABLE ${name}_complex (unique1 Int32, unique2 Int32, ten Int32) ENGINE=MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192" diff --git a/tests/queries/0_stateless/01671_merge_join_and_constants.sql b/tests/queries/0_stateless/01671_merge_join_and_constants.sql index 7a84bd4e97a..a2153bf0093 100644 --- a/tests/queries/0_stateless/01671_merge_join_and_constants.sql +++ b/tests/queries/0_stateless/01671_merge_join_and_constants.sql @@ -1,5 +1,5 @@ SET output_format_pretty_color=1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS table1; DROP TABLE IF EXISTS table2; diff --git a/tests/queries/0_stateless/01721_join_implicit_cast_long.sql.j2 b/tests/queries/0_stateless/01721_join_implicit_cast_long.sql.j2 index db004c13d96..3c2fa9877db 100644 --- a/tests/queries/0_stateless/01721_join_implicit_cast_long.sql.j2 +++ b/tests/queries/0_stateless/01721_join_implicit_cast_long.sql.j2 @@ -37,8 +37,8 @@ SELECT a, t1.a, t2.a FROM t1 FULL JOIN t2 USING (a) ORDER BY (t1.a, t2.a); {{ is SELECT '= left ='; SELECT a, t1.a, t2.a FROM t1 LEFT JOIN t2 USING (a) ORDER BY (t1.a, t2.a); SELECT '= right ='; -SELECT a, t1.a, t2.a FROM t1 RIGHT JOIN t2 USING (a) ORDER BY (t1.a, t2.a) SETTINGS allow_experimental_analyzer = 0; {{ is_implemented(join_algorithm) }} -SELECT a, t1.a, t2.a FROM t1 RIGHT JOIN t2 USING (a) ORDER BY (t1.a, t2.a) SETTINGS allow_experimental_analyzer = 1; {{ is_implemented(join_algorithm) }} +SELECT a, t1.a, t2.a FROM t1 RIGHT JOIN t2 USING (a) ORDER BY (t1.a, t2.a) SETTINGS enable_analyzer = 0; {{ is_implemented(join_algorithm) }} +SELECT a, t1.a, t2.a FROM t1 RIGHT JOIN t2 USING (a) ORDER BY (t1.a, t2.a) SETTINGS enable_analyzer = 1; {{ is_implemented(join_algorithm) }} SELECT '= inner ='; SELECT a, t1.a, t2.a FROM t1 INNER JOIN t2 USING (a) ORDER BY (t1.a, t2.a); @@ -119,8 +119,8 @@ SELECT a, t1.a, t2.a FROM t1 FULL JOIN t2 USING (a) ORDER BY (t1.a, t2.a); {{ is SELECT '= left ='; SELECT a, t1.a, t2.a FROM t1 LEFT JOIN t2 USING (a) ORDER BY (t1.a, t2.a); SELECT '= right ='; -SELECT a, t1.a, t2.a FROM t1 RIGHT JOIN t2 USING (a) ORDER BY (t1.a, t2.a) SETTINGS allow_experimental_analyzer = 0; {{ is_implemented(join_algorithm) }} -SELECT a, t1.a, t2.a FROM t1 RIGHT JOIN t2 USING (a) ORDER BY (t1.a, t2.a) SETTINGS allow_experimental_analyzer = 1; {{ is_implemented(join_algorithm) }} +SELECT a, t1.a, t2.a FROM t1 RIGHT JOIN t2 USING (a) ORDER BY (t1.a, t2.a) SETTINGS enable_analyzer = 0; {{ is_implemented(join_algorithm) }} +SELECT a, t1.a, t2.a FROM t1 RIGHT JOIN t2 USING (a) ORDER BY (t1.a, t2.a) SETTINGS enable_analyzer = 1; {{ is_implemented(join_algorithm) }} SELECT '= inner ='; SELECT a, t1.a, t2.a FROM t1 INNER JOIN t2 USING (a) ORDER BY (t1.a, t2.a); @@ -166,8 +166,8 @@ SELECT '= types ='; SELECT any(toTypeName(a)) == 'Nullable(Int32)' AND any(toTypeName(t2.a)) == 'Nullable(Int32)' FROM t1 FULL JOIN t2 USING (a); {{ is_implemented(join_algorithm) }} SELECT any(toTypeName(a)) == 'Int32' AND any(toTypeName(t2.a)) == 'Nullable(Int32)' FROM t1 LEFT JOIN t2 USING (a); -SELECT any(toTypeName(a)) == 'Nullable(Int32)' AND any(toTypeName(t2.a)) == 'Int32' FROM t1 RIGHT JOIN t2 USING (a) SETTINGS allow_experimental_analyzer = 0; {{ is_implemented(join_algorithm) }} -SELECT any(toTypeName(a)) == 'Int32' AND any(toTypeName(t2.a)) == 'Int32' FROM t1 RIGHT JOIN t2 USING (a) SETTINGS allow_experimental_analyzer = 1; {{ is_implemented(join_algorithm) }} +SELECT any(toTypeName(a)) == 'Nullable(Int32)' AND any(toTypeName(t2.a)) == 'Int32' FROM t1 RIGHT JOIN t2 USING (a) SETTINGS enable_analyzer = 0; {{ is_implemented(join_algorithm) }} +SELECT any(toTypeName(a)) == 'Int32' AND any(toTypeName(t2.a)) == 'Int32' FROM t1 RIGHT JOIN t2 USING (a) SETTINGS enable_analyzer = 1; {{ is_implemented(join_algorithm) }} SELECT any(toTypeName(a)) == 'Int32' AND any(toTypeName(t2.a)) == 'Int32' FROM t1 INNER JOIN t2 USING (a); diff --git a/tests/queries/0_stateless/01739_index_hint.reference b/tests/queries/0_stateless/01739_index_hint.reference index 21f4edc0049..b921dc6c1f9 100644 --- a/tests/queries/0_stateless/01739_index_hint.reference +++ b/tests/queries/0_stateless/01739_index_hint.reference @@ -35,9 +35,9 @@ SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0)) SETTINGS optimize_us drop table XXXX; CREATE TABLE XXXX (p Nullable(Int64), k Decimal(76, 39)) ENGINE = MergeTree PARTITION BY toDate(p) ORDER BY k SETTINGS index_granularity = 1, allow_nullable_key = 1; INSERT INTO XXXX FORMAT Values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3); -SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, allow_experimental_analyzer=0; +SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, enable_analyzer=0; 0 -- TODO: optimize_use_implicit_projections ignores indexHint (with analyzer) because source columns might be aliased. -SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, allow_experimental_analyzer=1; +SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, enable_analyzer=1; 3 drop table XXXX; diff --git a/tests/queries/0_stateless/01739_index_hint.sql b/tests/queries/0_stateless/01739_index_hint.sql index 1eca65f0892..b208063e7c4 100644 --- a/tests/queries/0_stateless/01739_index_hint.sql +++ b/tests/queries/0_stateless/01739_index_hint.sql @@ -38,8 +38,8 @@ CREATE TABLE XXXX (p Nullable(Int64), k Decimal(76, 39)) ENGINE = MergeTree PART INSERT INTO XXXX FORMAT Values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3); -SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, allow_experimental_analyzer=0; +SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, enable_analyzer=0; -- TODO: optimize_use_implicit_projections ignores indexHint (with analyzer) because source columns might be aliased. -SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, allow_experimental_analyzer=1; +SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, enable_analyzer=1; drop table XXXX; diff --git a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference index 28dbb9215a8..74a0356b11e 100644 --- a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference +++ b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference @@ -103,7 +103,7 @@ errors -- optimize_skip_unused_shards does not support non-constants select * from dist_01756 where dummy in (select * from system.one); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -- this is a constant for analyzer -select * from dist_01756 where dummy in (toUInt8(0)) settings allow_experimental_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01756 where dummy in (toUInt8(0)) settings enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -- NOT IN does not supported select * from dist_01756 where dummy not in (0, 2); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -- @@ -140,7 +140,7 @@ select * from dist_01756_str where key in ('0', '2'); select * from dist_01756_str where key in (0, 2); 0 -- analyzer does support this -select * from dist_01756_str where key in ('0', Null) settings allow_experimental_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01756_str where key in ('0', Null) settings enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -- select * from dist_01756_str where key in (0, 2); -- { serverError TYPE_MISMATCH } -- select * from dist_01756_str where key in (0, Null); -- { serverError TYPE_MISMATCH } diff --git a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql index 9a1a00cc0a1..bcbedeb3ada 100644 --- a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql +++ b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql @@ -113,7 +113,7 @@ select 'errors'; -- optimize_skip_unused_shards does not support non-constants select * from dist_01756 where dummy in (select * from system.one); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -- this is a constant for analyzer -select * from dist_01756 where dummy in (toUInt8(0)) settings allow_experimental_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01756 where dummy in (toUInt8(0)) settings enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -- NOT IN does not supported select * from dist_01756 where dummy not in (0, 2); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } @@ -146,7 +146,7 @@ create table dist_01756_str as data_01756_str engine=Distributed(test_cluster_tw select * from dist_01756_str where key in ('0', '2'); select * from dist_01756_str where key in (0, 2); -- analyzer does support this -select * from dist_01756_str where key in ('0', Null) settings allow_experimental_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01756_str where key in ('0', Null) settings enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -- select * from dist_01756_str where key in (0, 2); -- { serverError TYPE_MISMATCH } -- select * from dist_01756_str where key in (0, Null); -- { serverError TYPE_MISMATCH } diff --git a/tests/queries/0_stateless/01757_optimize_skip_unused_shards_limit.sql b/tests/queries/0_stateless/01757_optimize_skip_unused_shards_limit.sql index 3853ccb4080..6fcf98d47cb 100644 --- a/tests/queries/0_stateless/01757_optimize_skip_unused_shards_limit.sql +++ b/tests/queries/0_stateless/01757_optimize_skip_unused_shards_limit.sql @@ -21,9 +21,9 @@ select * from dist_01757 where dummy = 0 or dummy = 1 format Null settings optim -- and negative -- disabled for analyzer cause new implementation consider `dummy = 0 and dummy = 1` as constant False. -select * from dist_01757 where dummy = 0 and dummy = 1 settings optimize_skip_unused_shards_limit=1, allow_experimental_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -select * from dist_01757 where dummy = 0 and dummy = 2 and dummy = 3 settings optimize_skip_unused_shards_limit=1, allow_experimental_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -select * from dist_01757 where dummy = 0 and dummy = 2 and dummy = 3 settings optimize_skip_unused_shards_limit=2, allow_experimental_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01757 where dummy = 0 and dummy = 1 settings optimize_skip_unused_shards_limit=1, enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01757 where dummy = 0 and dummy = 2 and dummy = 3 settings optimize_skip_unused_shards_limit=1, enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01757 where dummy = 0 and dummy = 2 and dummy = 3 settings optimize_skip_unused_shards_limit=2, enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } -- and select * from dist_01757 where dummy = 0 and dummy = 1 settings optimize_skip_unused_shards_limit=2; diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql index 367baef142b..aa610e19c12 100644 --- a/tests/queries/0_stateless/01763_filter_push_down_bugs.sql +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.sql @@ -57,13 +57,13 @@ EXPLAIN indexes=1 SELECT id, delete_time FROM t1 CROSS JOIN ( SELECT delete_time FROM t2 -) AS d WHERE create_time < delete_time AND id = 101 SETTINGS allow_experimental_analyzer=0; +) AS d WHERE create_time < delete_time AND id = 101 SETTINGS enable_analyzer=0; EXPLAIN indexes=1 SELECT id, delete_time FROM t1 CROSS JOIN ( SELECT delete_time FROM t2 -) AS d WHERE create_time < delete_time AND id = 101 SETTINGS allow_experimental_analyzer=1; +) AS d WHERE create_time < delete_time AND id = 101 SETTINGS enable_analyzer=1; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.sh b/tests/queries/0_stateless/01786_explain_merge_tree.sh index e3b28acdc41..828012f56bc 100755 --- a/tests/queries/0_stateless/01786_explain_merge_tree.sh +++ b/tests/queries/0_stateless/01786_explain_merge_tree.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for i in $(seq 0 1) do - CH_CLIENT="$CLICKHOUSE_CLIENT --optimize_move_to_prewhere=1 --convert_query_to_cnf=0 --optimize_read_in_order=1 --allow_experimental_analyzer=$i" + CH_CLIENT="$CLICKHOUSE_CLIENT --optimize_move_to_prewhere=1 --convert_query_to_cnf=0 --optimize_read_in_order=1 --enable_analyzer=$i" $CH_CLIENT -q "drop table if exists test_index" $CH_CLIENT -q "drop table if exists idx" diff --git a/tests/queries/0_stateless/01823_explain_json.sh b/tests/queries/0_stateless/01823_explain_json.sh index 39128773069..356a317ae57 100755 --- a/tests/queries/0_stateless/01823_explain_json.sh +++ b/tests/queries/0_stateless/01823_explain_json.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh opts=( - "--allow_experimental_analyzer=1" + "--enable_analyzer=1" ) $CLICKHOUSE_CLIENT "${opts[@]}" -q "EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw" echo "--------" diff --git a/tests/queries/0_stateless/01852_multiple_joins_with_union_join.sql b/tests/queries/0_stateless/01852_multiple_joins_with_union_join.sql index 8c6937eb581..4387a697a83 100644 --- a/tests/queries/0_stateless/01852_multiple_joins_with_union_join.sql +++ b/tests/queries/0_stateless/01852_multiple_joins_with_union_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS v1; DROP TABLE IF EXISTS v2; diff --git a/tests/queries/0_stateless/01872_functions_to_subcolumns_analyzer.sql b/tests/queries/0_stateless/01872_functions_to_subcolumns_analyzer.sql index b544f6829cf..032d83890ec 100644 --- a/tests/queries/0_stateless/01872_functions_to_subcolumns_analyzer.sql +++ b/tests/queries/0_stateless/01872_functions_to_subcolumns_analyzer.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS t_func_to_subcolumns; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_functions_to_subcolumns = 1; CREATE TABLE t_func_to_subcolumns (id UInt64, arr Array(UInt64), n Nullable(String), m Map(String, UInt64)) diff --git a/tests/queries/0_stateless/01890_cross_join_explain_crash.sql b/tests/queries/0_stateless/01890_cross_join_explain_crash.sql index bb2bc606870..79aea3884b7 100644 --- a/tests/queries/0_stateless/01890_cross_join_explain_crash.sql +++ b/tests/queries/0_stateless/01890_cross_join_explain_crash.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET joined_subquery_requires_alias = 0; select * FROM (SELECT 1), (SELECT 1), (SELECT 1); diff --git a/tests/queries/0_stateless/01913_names_of_tuple_literal.sql b/tests/queries/0_stateless/01913_names_of_tuple_literal.sql index 879f4c91587..d6dda4fda9b 100644 --- a/tests/queries/0_stateless/01913_names_of_tuple_literal.sql +++ b/tests/queries/0_stateless/01913_names_of_tuple_literal.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT ((1, 2), (2, 3), (3, 4)) FORMAT TSVWithNames; SELECT ((1, 2), (2, 3), (3, 4)) FORMAT TSVWithNames SETTINGS legacy_column_name_of_tuple_literal = 1; diff --git a/tests/queries/0_stateless/01925_join_materialized_columns.sql b/tests/queries/0_stateless/01925_join_materialized_columns.sql index abb6fda3bfb..ce71cfed8a3 100644 --- a/tests/queries/0_stateless/01925_join_materialized_columns.sql +++ b/tests/queries/0_stateless/01925_join_materialized_columns.sql @@ -32,8 +32,8 @@ SELECT t1.dt, t2.dt FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.dt; SELECT '-'; SELECT * FROM t1 ALL JOIN t2 ON t1.dt = t2.dt ORDER BY t1.time, t2.time; SELECT '-'; -SELECT * FROM t1 ALL JOIN t2 USING (dt) ORDER BY t1.time, t2.time settings allow_experimental_analyzer=0; -SELECT * FROM t1 ALL JOIN t2 USING (dt) ORDER BY t1.time, t2.time settings allow_experimental_analyzer=1; +SELECT * FROM t1 ALL JOIN t2 USING (dt) ORDER BY t1.time, t2.time settings enable_analyzer=0; +SELECT * FROM t1 ALL JOIN t2 USING (dt) ORDER BY t1.time, t2.time settings enable_analyzer=1; SELECT '-'; SELECT * FROM t1 JOIN t2 ON t1.dt1 = t2.dt2 ORDER BY t1.time, t1.dimension_1, t2.time, t2.dimension_2; SELECT '-'; @@ -53,5 +53,5 @@ SELECT t1.time as talias FROM t1 JOIN t2 ON talias = t2.time_alias; SELECT t2.time as talias FROM t1 JOIN t2 ON t1.time = talias; SELECT t2.time as talias FROM t1 JOIN t2 ON t1.time_alias = talias; SELECT time as talias FROM t1 JOIN t2 ON t1.time = talias; -- { serverError AMBIGUOUS_COLUMN_NAME, INVALID_JOIN_ON_EXPRESSION } -SELECT time as talias FROM t1 JOIN t2 ON talias = t2.time settings allow_experimental_analyzer=0; -- { serverError AMBIGUOUS_COLUMN_NAME } -SELECT time as talias FROM t1 JOIN t2 ON talias = t2.time settings allow_experimental_analyzer=1; +SELECT time as talias FROM t1 JOIN t2 ON talias = t2.time settings enable_analyzer=0; -- { serverError AMBIGUOUS_COLUMN_NAME } +SELECT time as talias FROM t1 JOIN t2 ON talias = t2.time settings enable_analyzer=1; diff --git a/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.sql b/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.sql index 31035aa80cd..28b9c8650af 100644 --- a/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.sql +++ b/tests/queries/0_stateless/01925_test_storage_merge_aliases_analyzer.sql @@ -1,7 +1,7 @@ -- Tags: no-parallel drop table if exists merge; -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; create table merge ( dt Date, diff --git a/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.sql b/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.sql index f344b7007d0..4efbb461501 100644 --- a/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.sql +++ b/tests/queries/0_stateless/01936_three_parts_identifiers_in_wrong_places.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT dictGet(t.nest.a, concat(currentDatabase(), '.dict.dict'), 's', number) FROM numbers(5); -- { serverError INVALID_IDENTIFIER } diff --git a/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql b/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql index 945b399157f..0154265ef72 100644 --- a/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql +++ b/tests/queries/0_stateless/01942_dateTimeToSnowflakeID.sql @@ -1,5 +1,5 @@ SET session_timezone = 'UTC'; -- disable timezone randomization -SET allow_experimental_analyzer = 1; -- The old path formats the result with different whitespaces +SET enable_analyzer = 1; -- The old path formats the result with different whitespaces SELECT '-- Negative tests'; SELECT dateTimeToSnowflakeID(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} diff --git a/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql b/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql index 48316691c71..41e5beb9c16 100644 --- a/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql +++ b/tests/queries/0_stateless/01942_snowflakeIDToDateTime.sql @@ -1,5 +1,5 @@ SET session_timezone = 'UTC'; -- disable timezone randomization -SET allow_experimental_analyzer = 1; -- The old path formats the result with different whitespaces +SET enable_analyzer = 1; -- The old path formats the result with different whitespaces SELECT '-- Negative tests'; SELECT snowflakeIDToDateTime(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference index 5acefdb365e..e786532f25a 100644 --- a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference @@ -71,7 +71,7 @@ Expression (Projection) Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))) ReadFromSystemNumbers ReadFromRemote (Read from remote replica) -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized Expression (Project names) Distinct (DISTINCT) diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql index adf55a9dd7f..960fd227a39 100644 --- a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql @@ -4,7 +4,7 @@ set optimize_skip_unused_shards=1; set optimize_distributed_group_by_sharding_key=1; set prefer_localhost_replica=1; -set allow_experimental_analyzer = 0; +set enable_analyzer = 0; -- { echo } explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized @@ -17,7 +17,7 @@ explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized diff --git a/tests/queries/0_stateless/02000_join_on_const.reference b/tests/queries/0_stateless/02000_join_on_const.reference index 848ecedf9e3..3bd1633ce32 100644 --- a/tests/queries/0_stateless/02000_join_on_const.reference +++ b/tests/queries/0_stateless/02000_join_on_const.reference @@ -33,23 +33,23 @@ 2 2 2 2 -- { echoOn } -SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; 1 0 2 2 -SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; 2 2 0 3 -SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; 1 0 2 2 0 3 -SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; 1 0 2 0 -SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; 0 2 0 3 -SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; 1 0 2 0 0 2 diff --git a/tests/queries/0_stateless/02000_join_on_const.sql b/tests/queries/0_stateless/02000_join_on_const.sql index 2c1152e0ae6..da70973ed87 100644 --- a/tests/queries/0_stateless/02000_join_on_const.sql +++ b/tests/queries/0_stateless/02000_join_on_const.sql @@ -56,30 +56,30 @@ SELECT * FROM t1 RIGHT JOIN t2 ON NULL SETTINGS join_algorithm = 'auto'; -- { se SELECT * FROM t1 FULL JOIN t2 ON NULL SETTINGS join_algorithm = 'partial_merge'; -- { serverError INVALID_JOIN_ON_EXPRESSION,NOT_IMPLEMENTED } -- mixing of constant and non-constant expressions in ON is not allowed -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 1 SETTINGS allow_experimental_analyzer = 0; -- { serverError AMBIGUOUS_COLUMN_NAME } -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 1 SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 2 SETTINGS allow_experimental_analyzer = 0; -- { serverError AMBIGUOUS_COLUMN_NAME } -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 2 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 1 SETTINGS enable_analyzer = 0; -- { serverError AMBIGUOUS_COLUMN_NAME } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 2 SETTINGS enable_analyzer = 0; -- { serverError AMBIGUOUS_COLUMN_NAME } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 2 SETTINGS enable_analyzer = 1; -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 != 1 SETTINGS allow_experimental_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 != 1 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 != 1 SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 != 1 SETTINGS enable_analyzer = 1; SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND NULL; -- { serverError INVALID_JOIN_ON_EXPRESSION } SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 'aaa'; -- { serverError INVALID_JOIN_ON_EXPRESSION,ILLEGAL_TYPE_OF_ARGUMENT } SELECT * FROM t1 JOIN t2 ON 'aaa'; -- { serverError INVALID_JOIN_ON_EXPRESSION } -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 0 SETTINGS allow_experimental_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 0 SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 SETTINGS allow_experimental_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } -SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 0 SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 0 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 SETTINGS enable_analyzer = 1; -- { echoOn } -SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; -SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; SELECT * FROM (SELECT 1 as a) as t1 INNER JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; SELECT * FROM (SELECT 1 as a) as t1 LEFT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; @@ -94,27 +94,26 @@ SELECT a + 1 FROM (SELECT 1 as x) as t1 LEFT JOIN ( SELECT 1 AS a ) AS t2 ON TRUE -SETTINGS allow_experimental_analyzer=1, join_use_nulls=1; +SETTINGS enable_analyzer=1, join_use_nulls=1; SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) FROM (SELECT 1 as x) as t1 LEFT JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 ON TRUE -SETTINGS allow_experimental_analyzer=1, join_use_nulls=1; +SETTINGS enable_analyzer=1, join_use_nulls=1; SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) FROM (SELECT 1 as x) as t1 RIGHT JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 ON TRUE -SETTINGS allow_experimental_analyzer=1, join_use_nulls=1; +SETTINGS enable_analyzer=1, join_use_nulls=1; SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) FROM (SELECT 1 as x) as t1 FULL JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 ON TRUE -SETTINGS allow_experimental_analyzer=1, join_use_nulls=1; +SETTINGS enable_analyzer=1, join_use_nulls=1; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; - diff --git a/tests/queries/0_stateless/02030_tuple_filter.sql b/tests/queries/0_stateless/02030_tuple_filter.sql index 42853dec681..c8f344f5076 100644 --- a/tests/queries/0_stateless/02030_tuple_filter.sql +++ b/tests/queries/0_stateless/02030_tuple_filter.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_tuple_filter; diff --git a/tests/queries/0_stateless/02048_clickhouse_local_stage.reference b/tests/queries/0_stateless/02048_clickhouse_local_stage.reference index 2631199cbab..b6200464293 100644 --- a/tests/queries/0_stateless/02048_clickhouse_local_stage.reference +++ b/tests/queries/0_stateless/02048_clickhouse_local_stage.reference @@ -1,15 +1,15 @@ -execute: --allow_experimental_analyzer=1 +execute: --enable_analyzer=1 "foo" 1 -execute: --allow_experimental_analyzer=1 --stage fetch_columns +execute: --enable_analyzer=1 --stage fetch_columns "__table1.dummy" 0 -execute: --allow_experimental_analyzer=1 --stage with_mergeable_state +execute: --enable_analyzer=1 --stage with_mergeable_state "1_UInt8" 1 -execute: --allow_experimental_analyzer=1 --stage with_mergeable_state_after_aggregation +execute: --enable_analyzer=1 --stage with_mergeable_state_after_aggregation "1_UInt8" 1 -execute: --allow_experimental_analyzer=1 --stage complete +execute: --enable_analyzer=1 --stage complete "foo" 1 diff --git a/tests/queries/0_stateless/02048_clickhouse_local_stage.sh b/tests/queries/0_stateless/02048_clickhouse_local_stage.sh index 182acc23a13..09a7e8efefc 100755 --- a/tests/queries/0_stateless/02048_clickhouse_local_stage.sh +++ b/tests/queries/0_stateless/02048_clickhouse_local_stage.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh opts=( - "--allow_experimental_analyzer=1" + "--enable_analyzer=1" ) function execute_query() diff --git a/tests/queries/0_stateless/02115_map_contains_analyzer.sql b/tests/queries/0_stateless/02115_map_contains_analyzer.sql index 46e02eca4f0..00285404987 100644 --- a/tests/queries/0_stateless/02115_map_contains_analyzer.sql +++ b/tests/queries/0_stateless/02115_map_contains_analyzer.sql @@ -5,7 +5,7 @@ CREATE TABLE t_map_contains (m Map(String, UInt32)) ENGINE = Memory; INSERT INTO t_map_contains VALUES (map('a', 1, 'b', 2)), (map('c', 3, 'd', 4)); SET optimize_functions_to_subcolumns = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT mapContains(m, 'a') FROM t_map_contains; SELECT mapContains(m, 'a') FROM t_map_contains; diff --git a/tests/queries/0_stateless/02116_tuple_element_analyzer.sql b/tests/queries/0_stateless/02116_tuple_element_analyzer.sql index 5aeb72c9ee4..ef3729bdc95 100644 --- a/tests/queries/0_stateless/02116_tuple_element_analyzer.sql +++ b/tests/queries/0_stateless/02116_tuple_element_analyzer.sql @@ -4,7 +4,7 @@ CREATE TABLE t_tuple_element(t1 Tuple(a UInt32, s String), t2 Tuple(UInt32, Stri INSERT INTO t_tuple_element VALUES ((1, 'a'), (2, 'b')); SET optimize_functions_to_subcolumns = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT t1.1 FROM t_tuple_element; EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT t1.1 FROM t_tuple_element; diff --git a/tests/queries/0_stateless/02125_query_views_log_window_function.sql b/tests/queries/0_stateless/02125_query_views_log_window_function.sql index fff1e943c58..3f15cf5b998 100644 --- a/tests/queries/0_stateless/02125_query_views_log_window_function.sql +++ b/tests/queries/0_stateless/02125_query_views_log_window_function.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 0; +set enable_analyzer = 0; set allow_experimental_window_view = 1; CREATE TABLE data ( `id` UInt64, `timestamp` DateTime) ENGINE = Memory; diff --git a/tests/queries/0_stateless/02129_window_functions_disable_optimizations.sql b/tests/queries/0_stateless/02129_window_functions_disable_optimizations.sql index 0f12bc2eb9c..20a4f2bcf5f 100644 --- a/tests/queries/0_stateless/02129_window_functions_disable_optimizations.sql +++ b/tests/queries/0_stateless/02129_window_functions_disable_optimizations.sql @@ -29,4 +29,4 @@ HAVING sum(log(2) * number) > 346.57353 ORDER BY k; SELECT round(sum(log(2) * number), 6) AS k FROM numbers(10000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING sum(log(2) * number) > 346.57353 ORDER BY k -SETTINGS allow_experimental_analyzer=1; +SETTINGS enable_analyzer=1; diff --git a/tests/queries/0_stateless/02136_scalar_read_rows_json.sh b/tests/queries/0_stateless/02136_scalar_read_rows_json.sh index 1fe345d266d..00f2c92161f 100755 --- a/tests/queries/0_stateless/02136_scalar_read_rows_json.sh +++ b/tests/queries/0_stateless/02136_scalar_read_rows_json.sh @@ -7,4 +7,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "#1" ${CLICKHOUSE_CLIENT} --query='SELECT count() FROM numbers(100) FORMAT JSON;' | grep -a -v "elapsed" echo "#2" -${CLICKHOUSE_CLIENT} --query='SELECT (SELECT max(number), count(number) FROM numbers(100000) as n) SETTINGS max_block_size = 65505, allow_experimental_analyzer = 1 FORMAT JSON;' | grep -a -v "elapsed" | grep -v "_subquery" +${CLICKHOUSE_CLIENT} --query='SELECT (SELECT max(number), count(number) FROM numbers(100000) as n) SETTINGS max_block_size = 65505, enable_analyzer = 1 FORMAT JSON;' | grep -a -v "elapsed" | grep -v "_subquery" diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql index ae8c39b49bc..7bbdecf5501 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql @@ -22,20 +22,20 @@ SELECT toStartOfMonth(date) as d, i FROM t_read_in_order ORDER BY d, -i LIMIT 5; EXPLAIN PIPELINE SELECT toStartOfMonth(date) as d, i FROM t_read_in_order ORDER BY d, -i LIMIT 5; SELECT date, i FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i LIMIT 5; -EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i LIMIT 5 settings allow_experimental_analyzer=0; -EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i LIMIT 5 settings allow_experimental_analyzer=1; +EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i LIMIT 5 settings enable_analyzer=0; +EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i LIMIT 5 settings enable_analyzer=1; SELECT * FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i, v LIMIT 5; -EXPLAIN PIPELINE SELECT * FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i, v LIMIT 5 settings allow_experimental_analyzer=0; -EXPLAIN PIPELINE SELECT * FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i, v LIMIT 5 settings allow_experimental_analyzer=1; +EXPLAIN PIPELINE SELECT * FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i, v LIMIT 5 settings enable_analyzer=0; +EXPLAIN PIPELINE SELECT * FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i, v LIMIT 5 settings enable_analyzer=1; INSERT INTO t_read_in_order SELECT '2020-10-12', number, number FROM numbers(100000); SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i LIMIT 5; EXPLAIN SYNTAX SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5; -EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5 settings allow_experimental_analyzer=0; -EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5 settings allow_experimental_analyzer=1; +EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5 settings enable_analyzer=0; +EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5 settings enable_analyzer=1; SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5; DROP TABLE IF EXISTS t_read_in_order; diff --git a/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh b/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh index 6d715604d93..007dae6e427 100755 --- a/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh +++ b/tests/queries/0_stateless/02151_hash_table_sizes_stats_joins.sh @@ -58,7 +58,7 @@ $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" for i in "${!queries_without_preallocation[@]}"; do $CLICKHOUSE_CLIENT --param_query_id="${queries_without_preallocation[$i]}" -q " -- the old analyzer is not supported - SELECT sum(if(getSetting('allow_experimental_analyzer'), ProfileEvents['HashJoinPreallocatedElementsInHashTables'] = 0, 1)) + SELECT sum(if(getSetting('enable_analyzer'), ProfileEvents['HashJoinPreallocatedElementsInHashTables'] = 0, 1)) FROM system.query_log WHERE event_date >= yesterday() AND query_id = {query_id:String} AND current_database = currentDatabase() AND type = 'QueryFinish' " @@ -67,7 +67,7 @@ done for i in "${!queries_with_preallocation[@]}"; do $CLICKHOUSE_CLIENT --param_query_id="${queries_with_preallocation[$i]}" -q " -- the old analyzer is not supported - SELECT sum(if(getSetting('allow_experimental_analyzer'), ProfileEvents['HashJoinPreallocatedElementsInHashTables'] > 0, 1)) + SELECT sum(if(getSetting('enable_analyzer'), ProfileEvents['HashJoinPreallocatedElementsInHashTables'] > 0, 1)) FROM system.query_log WHERE event_date >= yesterday() AND query_id = {query_id:String} AND current_database = currentDatabase() AND type = 'QueryFinish' " diff --git a/tests/queries/0_stateless/02154_dictionary_get_http_json.sh b/tests/queries/0_stateless/02154_dictionary_get_http_json.sh index fbaf67fff2f..bcd9f4f5c1a 100755 --- a/tests/queries/0_stateless/02154_dictionary_get_http_json.sh +++ b/tests/queries/0_stateless/02154_dictionary_get_http_json.sh @@ -32,7 +32,7 @@ $CLICKHOUSE_CLIENT -q """ echo """ SELECT dictGet(02154_test_dictionary, 'value', toUInt64(0)), dictGet(02154_test_dictionary, 'value', toUInt64(1)) - SETTINGS allow_experimental_analyzer = 1 + SETTINGS enable_analyzer = 1 FORMAT JSON """ | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}&wait_end_of_query=1&output_format_write_statistics=0" -d @- diff --git a/tests/queries/0_stateless/02156_storage_merge_prewhere.sql b/tests/queries/0_stateless/02156_storage_merge_prewhere.sql index 4f010ebadfd..2e5066fc488 100644 --- a/tests/queries/0_stateless/02156_storage_merge_prewhere.sql +++ b/tests/queries/0_stateless/02156_storage_merge_prewhere.sql @@ -24,8 +24,8 @@ INSERT INTO t_02156_mt1 SELECT number, toString(number) FROM numbers(10000); INSERT INTO t_02156_mt2 SELECT number, toString(number) FROM numbers(10000); INSERT INTO t_02156_log SELECT number, toString(number) FROM numbers(10000); -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' settings allow_experimental_analyzer=1; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' settings allow_experimental_analyzer=0; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' settings enable_analyzer=1; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' settings enable_analyzer=0; SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v); SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge2 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; diff --git a/tests/queries/0_stateless/02174_cte_scalar_cache.sql b/tests/queries/0_stateless/02174_cte_scalar_cache.sql index 86cfff21446..d14475c843a 100644 --- a/tests/queries/0_stateless/02174_cte_scalar_cache.sql +++ b/tests/queries/0_stateless/02174_cte_scalar_cache.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH ( SELECT sleep(0.0001) FROM system.one ) as a1, @@ -28,7 +28,7 @@ WITH ( SELECT sleep(0.0001) FROM system.one ) as a5 SELECT '02177_CTE_NEW_ANALYZER', a1, a2, a3, a4, a5 FROM system.numbers LIMIT 100 FORMAT Null -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; SYSTEM FLUSH LOGS; SELECT diff --git a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql index ca54b9e1400..a2be0a11faf 100644 --- a/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql +++ b/tests/queries/0_stateless/02174_cte_scalar_cache_mv.sql @@ -14,7 +14,7 @@ CREATE MATERIALIZED VIEW mv1 TO t2 AS FROM t1 LIMIT 5; -set allow_experimental_analyzer = 0; +set enable_analyzer = 0; -- FIRST INSERT INSERT INTO t1 @@ -61,7 +61,7 @@ WHERE AND event_date >= yesterday() AND event_time > now() - interval 10 minute; truncate table t2; -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; -- FIRST INSERT ANALYZER INSERT INTO t1 @@ -100,7 +100,7 @@ WHERE DROP TABLE mv1; -set allow_experimental_analyzer = 0; +set enable_analyzer = 0; CREATE TABLE t3 (z Int64) ENGINE = Memory; CREATE MATERIALIZED VIEW mv2 TO t3 AS @@ -134,7 +134,7 @@ WHERE AND event_date >= yesterday() AND event_time > now() - interval 10 minute; truncate table t3; -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; -- SECOND INSERT ANALYZER INSERT INTO t1 @@ -162,7 +162,7 @@ WHERE DROP TABLE mv2; -set allow_experimental_analyzer = 0; +set enable_analyzer = 0; CREATE TABLE t4 (z Int64) ENGINE = Memory; CREATE MATERIALIZED VIEW mv3 TO t4 AS @@ -197,7 +197,7 @@ WHERE AND event_date >= yesterday() AND event_time > now() - interval 10 minute; truncate table t4; -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; -- THIRD INSERT ANALYZER INSERT INTO t1 diff --git a/tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql b/tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql index c90c8b90c9e..e7d1909cae6 100644 --- a/tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql +++ b/tests/queries/0_stateless/02184_hash_functions_and_ip_types.sql @@ -1,6 +1,6 @@ -- Tags: no-fasttest -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT toIPv4('1.2.3.4') AS ipv4, diff --git a/tests/queries/0_stateless/02226_analyzer_or_like_combine.reference b/tests/queries/0_stateless/02226_analyzer_or_like_combine.reference index 0ff24b39709..61dfd1c6779 100644 --- a/tests/queries/0_stateless/02226_analyzer_or_like_combine.reference +++ b/tests/queries/0_stateless/02226_analyzer_or_like_combine.reference @@ -40,7 +40,7 @@ QUERY id: 0 LIST id: 3, nodes: 1 CONSTANT id: 4, constant_value: \'Привет, World\', constant_value_type: String CONSTANT id: 16, constant_value: \'world%\', constant_value_type: String - SETTINGS optimize_or_like_chain=0 allow_experimental_analyzer=1 + SETTINGS optimize_or_like_chain=0 enable_analyzer=1 SELECT materialize(\'Привет, World\') AS s WHERE multiMatchAny(s, [\'^hell\', \'(?i)привет\', \'(?i)^world\']) OR false SETTINGS optimize_or_like_chain = 1 @@ -68,7 +68,7 @@ QUERY id: 0 CONSTANT id: 4, constant_value: \'Привет, World\', constant_value_type: String CONSTANT id: 10, constant_value: Array_[\'^hell\', \'(?i)привет\', \'(?i)^world\'], constant_value_type: Array(String) CONSTANT id: 11, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS optimize_or_like_chain=1 allow_experimental_analyzer=1 + SETTINGS optimize_or_like_chain=1 enable_analyzer=1 SELECT materialize(\'Привет, World\') AS s1, materialize(\'Привет, World\') AS s2 diff --git a/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql b/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql index fbebfc6d281..b23e5640b8f 100644 --- a/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql +++ b/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql @@ -1,7 +1,7 @@ EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0; -EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0, allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0, enable_analyzer = 1; EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1; -EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, enable_analyzer = 1; EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s1, materialize('Привет, World') AS s2 WHERE (s1 LIKE 'hell%') OR (s2 ILIKE '%привет%') OR (s1 ILIKE 'world%') SETTINGS optimize_or_like_chain = 1; EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s1, materialize('Привет, World') AS s2 WHERE (s1 LIKE 'hell%') OR (s2 ILIKE '%привет%') OR (s1 ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, allow_hyperscan = 0; @@ -11,18 +11,18 @@ EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s1, materialize('П SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 1; -SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 1, allow_experimental_analyzer = 1; +SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 1, enable_analyzer = 1; SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 0; -SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 0, allow_experimental_analyzer = 1; +SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 0, enable_analyzer = 1; SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 1; -SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 1, allow_experimental_analyzer = 1; +SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 1, enable_analyzer = 1; SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 0; -SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 0, allow_experimental_analyzer = 1; +SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 0, enable_analyzer = 1; -SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, allow_experimental_analyzer = 1; +SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, enable_analyzer = 1; -- Aliases diff --git a/tests/queries/0_stateless/02227_union_match_by_name.sql b/tests/queries/0_stateless/02227_union_match_by_name.sql index 6a19add1d37..489c3d976ea 100644 --- a/tests/queries/0_stateless/02227_union_match_by_name.sql +++ b/tests/queries/0_stateless/02227_union_match_by_name.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } diff --git a/tests/queries/0_stateless/02233_with_total_empty_chunk.sql b/tests/queries/0_stateless/02233_with_total_empty_chunk.sql index d59319ac75e..c70b35df459 100644 --- a/tests/queries/0_stateless/02233_with_total_empty_chunk.sql +++ b/tests/queries/0_stateless/02233_with_total_empty_chunk.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT (NULL, NULL, NULL, NULL, NULL, NULL, NULL) FROM numbers(0) GROUP BY number WITH TOTALS HAVING sum(number) <= arrayJoin([]) -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER }; diff --git a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh index a6c47d80fa9..d387fda746c 100755 --- a/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh +++ b/tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh @@ -4,6 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh - -$CLICKHOUSE_LOCAL --query="SELECT n SETTINGS allow_experimental_analyzer = 1" 2>&1 | grep -q "Code: 47. DB::Exception:" && echo 'OK' || echo 'FAIL' -$CLICKHOUSE_LOCAL --query="SELECT n SETTINGS allow_experimental_analyzer = 0" 2>&1 | grep -q "Code: 47. DB::Exception:" && echo 'OK' || echo 'FAIL' +$CLICKHOUSE_LOCAL --query="SELECT n SETTINGS enable_analyzer = 1" 2>&1 | grep -q "Code: 47. DB::Exception:" && echo 'OK' || echo 'FAIL' +$CLICKHOUSE_LOCAL --query="SELECT n SETTINGS enable_analyzer = 0" 2>&1 | grep -q "Code: 47. DB::Exception:" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/02267_join_dup_columns_issue36199.sql b/tests/queries/0_stateless/02267_join_dup_columns_issue36199.sql index fbcc374ba10..ecb2198237f 100644 --- a/tests/queries/0_stateless/02267_join_dup_columns_issue36199.sql +++ b/tests/queries/0_stateless/02267_join_dup_columns_issue36199.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SET join_algorithm = 'hash'; @@ -22,7 +22,7 @@ SELECT * FROM ( SELECT count('a'), count('b'), count('c'), 2 AS x ) as t1 RIGHT SELECT 'y', * FROM (SELECT count('y'), count('y'), 2 AS x) AS t1 RIGHT JOIN (SELECT count('x'), count('y'), 3 AS x) AS t2 ON t1.x = t2.x; SELECT * FROM (SELECT arrayJoin([NULL]), 9223372036854775806, arrayJoin([NULL]), NULL AS x) AS t1 RIGHT JOIN (SELECT arrayJoin([arrayJoin([10000000000.])]), NULL AS x) AS t2 ON t1.x = t2.x; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET join_algorithm = 'hash'; SELECT * FROM ( SELECT 2 AS x ) AS t1 RIGHT JOIN ( SELECT count('x'), count('y'), 2 AS x ) AS t2 ON t1.x = t2.x; diff --git a/tests/queries/0_stateless/02271_fix_column_matcher_and_column_transformer.sql b/tests/queries/0_stateless/02271_fix_column_matcher_and_column_transformer.sql index f8faa3e653b..ab89d98c608 100644 --- a/tests/queries/0_stateless/02271_fix_column_matcher_and_column_transformer.sql +++ b/tests/queries/0_stateless/02271_fix_column_matcher_and_column_transformer.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS github_events; diff --git a/tests/queries/0_stateless/02303_query_kind.reference b/tests/queries/0_stateless/02303_query_kind.reference index 9f1c026f889..c26242098e3 100644 --- a/tests/queries/0_stateless/02303_query_kind.reference +++ b/tests/queries/0_stateless/02303_query_kind.reference @@ -1,4 +1,4 @@ -clickhouse-client --allow_experimental_analyzer=1 --query_kind secondary_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy +clickhouse-client --enable_analyzer=1 --query_kind secondary_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy Expression ((Project names + Projection)) Header: dummy String Aggregating @@ -7,7 +7,7 @@ Header: dummy String Header: toString(__table1.dummy) String ReadFromStorage (SystemOne) Header: dummy UInt8 -clickhouse-local --allow_experimental_analyzer=1 --query_kind secondary_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy +clickhouse-local --enable_analyzer=1 --query_kind secondary_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy Expression ((Project names + Projection)) Header: dummy String Aggregating @@ -16,7 +16,7 @@ Header: dummy String Header: toString(__table1.dummy) String ReadFromStorage (SystemOne) Header: dummy UInt8 -clickhouse-client --allow_experimental_analyzer=1 --query_kind initial_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy +clickhouse-client --enable_analyzer=1 --query_kind initial_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy Expression ((Project names + Projection)) Header: dummy String Aggregating @@ -25,7 +25,7 @@ Header: dummy String Header: __table1.dummy UInt8 ReadFromStorage (SystemOne) Header: dummy UInt8 -clickhouse-local --allow_experimental_analyzer=1 --query_kind initial_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy +clickhouse-local --enable_analyzer=1 --query_kind initial_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy Expression ((Project names + Projection)) Header: dummy String Aggregating diff --git a/tests/queries/0_stateless/02303_query_kind.sh b/tests/queries/0_stateless/02303_query_kind.sh index 1d883a2dcc7..7fe491b9deb 100755 --- a/tests/queries/0_stateless/02303_query_kind.sh +++ b/tests/queries/0_stateless/02303_query_kind.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh opts=( - "--allow_experimental_analyzer=1" + "--enable_analyzer=1" ) function run_query() diff --git a/tests/queries/0_stateless/02315_grouping_constant_folding.reference b/tests/queries/0_stateless/02315_grouping_constant_folding.reference index 31816318a42..7b8c75b2304 100644 --- a/tests/queries/0_stateless/02315_grouping_constant_folding.reference +++ b/tests/queries/0_stateless/02315_grouping_constant_folding.reference @@ -27,7 +27,7 @@ SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY ROLLUP(a, 5 0 0 2 5 1 0 2 10 0 0 0 -SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY GROUPING SETS ((a, b), (a, a), ()) ORDER BY (amount, a, b) SETTINGS force_grouping_standard_compatibility=0, allow_experimental_analyzer=1; +SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY GROUPING SETS ((a, b), (a, a), ()) ORDER BY (amount, a, b) SETTINGS force_grouping_standard_compatibility=0, enable_analyzer=1; 1 0 0 3 1 0 2 3 1 0 4 3 diff --git a/tests/queries/0_stateless/02315_grouping_constant_folding.sql b/tests/queries/0_stateless/02315_grouping_constant_folding.sql index f992aa0da32..5e305d2e6c5 100644 --- a/tests/queries/0_stateless/02315_grouping_constant_folding.sql +++ b/tests/queries/0_stateless/02315_grouping_constant_folding.sql @@ -9,7 +9,7 @@ SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY GROUPING SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY ROLLUP(a, b) ORDER BY (amount, a, b) SETTINGS force_grouping_standard_compatibility=0; -SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY GROUPING SETS ((a, b), (a, a), ()) ORDER BY (amount, a, b) SETTINGS force_grouping_standard_compatibility=0, allow_experimental_analyzer=1; +SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY GROUPING SETS ((a, b), (a, a), ()) ORDER BY (amount, a, b) SETTINGS force_grouping_standard_compatibility=0, enable_analyzer=1; -- { echoOff } DROP TABLE test02315; diff --git a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh index 07c7bc4af56..bd7e6be3987 100755 --- a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh +++ b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh @@ -77,7 +77,7 @@ echo "-- enabled, only part of distinct columns form prefix of sorting key" $CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "=== disable new analyzer ===" -DISABLE_ANALYZER="set allow_experimental_analyzer=0" +DISABLE_ANALYZER="set enable_analyzer=0" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" $CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES @@ -99,7 +99,7 @@ echo "-- enabled, check that disabling other 'read in order' optimizations do no $CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES echo "=== enable new analyzer ===" -ENABLE_ANALYZER="set allow_experimental_analyzer=1" +ENABLE_ANALYZER="set enable_analyzer=1" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" $CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES diff --git a/tests/queries/0_stateless/02337_analyzer_columns_basic.sql b/tests/queries/0_stateless/02337_analyzer_columns_basic.sql index 167eecc6fb8..c132a69ac21 100644 --- a/tests/queries/0_stateless/02337_analyzer_columns_basic.sql +++ b/tests/queries/0_stateless/02337_analyzer_columns_basic.sql @@ -1,6 +1,6 @@ -- Tags: no-parallel -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- Empty from section diff --git a/tests/queries/0_stateless/02337_multiple_joins_original_names.sql b/tests/queries/0_stateless/02337_multiple_joins_original_names.sql index 63bbfe6873d..37c7077b56e 100644 --- a/tests/queries/0_stateless/02337_multiple_joins_original_names.sql +++ b/tests/queries/0_stateless/02337_multiple_joins_original_names.sql @@ -1,6 +1,6 @@ -- https://github.com/ClickHouse/ClickHouse/issues/34697 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT table1_id FROM ( SELECT first.table1_id diff --git a/tests/queries/0_stateless/02338_analyzer_constants_basic.sql b/tests/queries/0_stateless/02338_analyzer_constants_basic.sql index 6d6249538a4..536202dc2cf 100644 --- a/tests/queries/0_stateless/02338_analyzer_constants_basic.sql +++ b/tests/queries/0_stateless/02338_analyzer_constants_basic.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DESCRIBE (SELECT 1); SELECT 1; diff --git a/tests/queries/0_stateless/02339_analyzer_matcher_basic.sql b/tests/queries/0_stateless/02339_analyzer_matcher_basic.sql index a09325fa43b..57c6a9479ec 100644 --- a/tests/queries/0_stateless/02339_analyzer_matcher_basic.sql +++ b/tests/queries/0_stateless/02339_analyzer_matcher_basic.sql @@ -1,6 +1,6 @@ -- Tags: no-parallel -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT 'Matchers without FROM section'; diff --git a/tests/queries/0_stateless/02340_analyzer_functions.sql b/tests/queries/0_stateless/02340_analyzer_functions.sql index 101a5bfcc86..bd018302913 100644 --- a/tests/queries/0_stateless/02340_analyzer_functions.sql +++ b/tests/queries/0_stateless/02340_analyzer_functions.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DESCRIBE (SELECT 1 + 1); SELECT 1 + 1; diff --git a/tests/queries/0_stateless/02341_analyzer_aliases_basics.sql b/tests/queries/0_stateless/02341_analyzer_aliases_basics.sql index 9f21db8e659..8bed0c8bc1e 100644 --- a/tests/queries/0_stateless/02341_analyzer_aliases_basics.sql +++ b/tests/queries/0_stateless/02341_analyzer_aliases_basics.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT 'Aliases to constants'; diff --git a/tests/queries/0_stateless/02341_global_join_cte.reference b/tests/queries/0_stateless/02341_global_join_cte.reference index f2cfe994ffa..4f854ca3bef 100644 --- a/tests/queries/0_stateless/02341_global_join_cte.reference +++ b/tests/queries/0_stateless/02341_global_join_cte.reference @@ -1,6 +1,6 @@ -- { echo } -with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings allow_experimental_analyzer=0; -- { serverError ALIAS_REQUIRED } -with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings allow_experimental_analyzer=1; -- It works with analyzer; rhs is an alias itself. +with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings enable_analyzer=0; -- { serverError ALIAS_REQUIRED } +with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings enable_analyzer=1; -- It works with analyzer; rhs is an alias itself. 0 0 0 diff --git a/tests/queries/0_stateless/02341_global_join_cte.sql b/tests/queries/0_stateless/02341_global_join_cte.sql index b9b906afd70..f6acd822f14 100644 --- a/tests/queries/0_stateless/02341_global_join_cte.sql +++ b/tests/queries/0_stateless/02341_global_join_cte.sql @@ -1,5 +1,5 @@ -- { echo } -with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings allow_experimental_analyzer=0; -- { serverError ALIAS_REQUIRED } -with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings allow_experimental_analyzer=1; -- It works with analyzer; rhs is an alias itself. +with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings enable_analyzer=0; -- { serverError ALIAS_REQUIRED } +with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings enable_analyzer=1; -- It works with analyzer; rhs is an alias itself. with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings joined_subquery_requires_alias=0; with rhs_ as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs_ rhs using (d1) order by rhs.d2 settings joined_subquery_requires_alias=0; diff --git a/tests/queries/0_stateless/02342_analyzer_compound_types.sql b/tests/queries/0_stateless/02342_analyzer_compound_types.sql index 0fd96928496..36617aab2f8 100644 --- a/tests/queries/0_stateless/02342_analyzer_compound_types.sql +++ b/tests/queries/0_stateless/02342_analyzer_compound_types.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT 'Constant tuple'; diff --git a/tests/queries/0_stateless/02342_window_view_different_struct.sql b/tests/queries/0_stateless/02342_window_view_different_struct.sql index a5b2b8daa5a..9c35459ecef 100644 --- a/tests/queries/0_stateless/02342_window_view_different_struct.sql +++ b/tests/queries/0_stateless/02342_window_view_different_struct.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SET allow_experimental_window_view = 1; DROP TABLE IF EXISTS data_02342; diff --git a/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.sql b/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.sql index 7e323c570b8..b55cb85c0da 100644 --- a/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.sql +++ b/tests/queries/0_stateless/02343_analyzer_column_transformers_strict.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas.sql b/tests/queries/0_stateless/02343_analyzer_lambdas.sql index 80fa47fc325..07f382700ae 100644 --- a/tests/queries/0_stateless/02343_analyzer_lambdas.sql +++ b/tests/queries/0_stateless/02343_analyzer_lambdas.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.sql b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.sql index 3b780e1dec3..dff0d7f82ca 100644 --- a/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.sql +++ b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_28083.sql @@ -1,11 +1,11 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; select so, r from - (select [('y',0),('n',1)] as cg, + (select [('y',0),('n',1)] as cg, if( arrayMap( x -> x.1, cg ) != ['y', 'n'], 'y', 'n') as so, - arrayFilter( x -> x.1 = so , cg) as r + arrayFilter( x -> x.1 = so , cg) as r ); select diff --git a/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.sql b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.sql index b07f3f33ac3..da0b4e8ef57 100644 --- a/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.sql +++ b/tests/queries/0_stateless/02343_analyzer_lambdas_issue_36677.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT arraySum(x -> ((x.1) / ((x.2) * (x.2))), arrayZip(mag, magerr)) / arraySum(x -> (1. / (x * x)), magerr) AS weightedmeanmag, diff --git a/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.sql b/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.sql index ee02b79cc32..3c7ea467734 100644 --- a/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.sql +++ b/tests/queries/0_stateless/02344_analyzer_multiple_aliases_for_expression.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02345_analyzer_subqueries.sql b/tests/queries/0_stateless/02345_analyzer_subqueries.sql index c0cc242b57b..d1ec9b58e27 100644 --- a/tests/queries/0_stateless/02345_analyzer_subqueries.sql +++ b/tests/queries/0_stateless/02345_analyzer_subqueries.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02346_fulltext_index_bug52019.sql b/tests/queries/0_stateless/02346_fulltext_index_bug52019.sql index a643df65095..e29c3c51e5e 100644 --- a/tests/queries/0_stateless/02346_fulltext_index_bug52019.sql +++ b/tests/queries/0_stateless/02346_fulltext_index_bug52019.sql @@ -13,8 +13,8 @@ ORDER BY k SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; INSERT INTO tab (k) VALUES (0); -SELECT * FROM tab PREWHERE (s[NULL]) = 'Click a03' SETTINGS allow_experimental_analyzer=1; -SELECT * FROM tab PREWHERE (s[1]) = 'Click a03' SETTINGS allow_experimental_analyzer=1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -SELECT * FROM tab PREWHERE (s['foo']) = 'Click a03' SETTINGS allow_experimental_analyzer=1; +SELECT * FROM tab PREWHERE (s[NULL]) = 'Click a03' SETTINGS enable_analyzer=1; +SELECT * FROM tab PREWHERE (s[1]) = 'Click a03' SETTINGS enable_analyzer=1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT * FROM tab PREWHERE (s['foo']) = 'Click a03' SETTINGS enable_analyzer=1; DROP TABLE tab; diff --git a/tests/queries/0_stateless/02346_fulltext_index_match_predicate.sql b/tests/queries/0_stateless/02346_fulltext_index_match_predicate.sql index 7f36c423a41..a20c8dc9afd 100644 --- a/tests/queries/0_stateless/02346_fulltext_index_match_predicate.sql +++ b/tests/queries/0_stateless/02346_fulltext_index_match_predicate.sql @@ -31,7 +31,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM @@ -42,7 +42,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; SELECT '---'; @@ -61,7 +61,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM @@ -72,7 +72,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; SELECT '---'; @@ -91,7 +91,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM @@ -102,6 +102,6 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql index db0c06c4e16..f03c36f6550 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ b/tests/queries/0_stateless/02354_vector_search_bugs.sql @@ -8,7 +8,7 @@ SET allow_experimental_annoy_index = 1; SET allow_experimental_usearch_index = 1; -SET allow_experimental_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make it future-proof +SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make it future-proof DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02364_window_view_segfault.sh b/tests/queries/0_stateless/02364_window_view_segfault.sh index 4173b6c6b8d..833d8967558 100755 --- a/tests/queries/0_stateless/02364_window_view_segfault.sh +++ b/tests/queries/0_stateless/02364_window_view_segfault.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh opts=( - "--allow_experimental_analyzer=0" + "--enable_analyzer=0" ) ${CLICKHOUSE_CLIENT} "${opts[@]}" --multiline --query """ diff --git a/tests/queries/0_stateless/02366_explain_query_tree.sql b/tests/queries/0_stateless/02366_explain_query_tree.sql index c38b2d819d1..82621ec90f7 100644 --- a/tests/queries/0_stateless/02366_explain_query_tree.sql +++ b/tests/queries/0_stateless/02366_explain_query_tree.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE run_passes = 0 SELECT 1; diff --git a/tests/queries/0_stateless/02367_analyzer_table_alias_columns.sql b/tests/queries/0_stateless/02367_analyzer_table_alias_columns.sql index f41680cd9f4..a6408abca9b 100644 --- a/tests/queries/0_stateless/02367_analyzer_table_alias_columns.sql +++ b/tests/queries/0_stateless/02367_analyzer_table_alias_columns.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02368_analyzer_table_functions.sql b/tests/queries/0_stateless/02368_analyzer_table_functions.sql index 456e095c6c1..cc65848a5d8 100644 --- a/tests/queries/0_stateless/02368_analyzer_table_functions.sql +++ b/tests/queries/0_stateless/02368_analyzer_table_functions.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT c1, c2, c3, c4 FROM format('CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); SELECT f.c1, f.c2, f.c3, f.c4 FROM format('CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"') AS f; diff --git a/tests/queries/0_stateless/02369_analyzer_array_join_function.sql b/tests/queries/0_stateless/02369_analyzer_array_join_function.sql index e60ec7e71a9..f84c96ee6a6 100644 --- a/tests/queries/0_stateless/02369_analyzer_array_join_function.sql +++ b/tests/queries/0_stateless/02369_analyzer_array_join_function.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT arrayJoin([1, 2, 3]); diff --git a/tests/queries/0_stateless/02370_analyzer_in_function.sql b/tests/queries/0_stateless/02370_analyzer_in_function.sql index a6e4400e101..9fb05ada829 100644 --- a/tests/queries/0_stateless/02370_analyzer_in_function.sql +++ b/tests/queries/0_stateless/02370_analyzer_in_function.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT 1 IN 1; SELECT 1 IN (1); diff --git a/tests/queries/0_stateless/02371_analyzer_join_cross.sql b/tests/queries/0_stateless/02371_analyzer_join_cross.sql index 3624a1d2282..660dc19770a 100644 --- a/tests/queries/0_stateless/02371_analyzer_join_cross.sql +++ b/tests/queries/0_stateless/02371_analyzer_join_cross.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET single_join_prefer_left_table = 0; DROP TABLE IF EXISTS test_table_join_1; diff --git a/tests/queries/0_stateless/02372_analyzer_join.sql.j2 b/tests/queries/0_stateless/02372_analyzer_join.sql.j2 index 45ae63b9a49..fb75ddeac09 100644 --- a/tests/queries/0_stateless/02372_analyzer_join.sql.j2 +++ b/tests/queries/0_stateless/02372_analyzer_join.sql.j2 @@ -1,6 +1,6 @@ -- Tags: long -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET single_join_prefer_left_table = 0; DROP TABLE IF EXISTS test_table_join_1; diff --git a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql index bcec6d178a8..16a37ee7f50 100644 --- a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql +++ b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET join_use_nulls = 1; DROP TABLE IF EXISTS test_table_join_1; diff --git a/tests/queries/0_stateless/02374_analyzer_array_join.sql b/tests/queries/0_stateless/02374_analyzer_array_join.sql index 8c26df1806e..fe1508f5f78 100644 --- a/tests/queries/0_stateless/02374_analyzer_array_join.sql +++ b/tests/queries/0_stateless/02374_analyzer_array_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 b/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 index 26fb52716ff..2c64efc7fc8 100644 --- a/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 +++ b/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table_join_1; CREATE TABLE test_table_join_1 diff --git a/tests/queries/0_stateless/02374_combine_multi_if_and_count_if_opt.sql b/tests/queries/0_stateless/02374_combine_multi_if_and_count_if_opt.sql index 4371c2e5641..05472e5e35a 100644 --- a/tests/queries/0_stateless/02374_combine_multi_if_and_count_if_opt.sql +++ b/tests/queries/0_stateless/02374_combine_multi_if_and_count_if_opt.sql @@ -4,7 +4,7 @@ create table m (a int) engine Log; insert into m values (1); -set allow_experimental_analyzer = true, optimize_rewrite_sum_if_to_count_if=1; +set enable_analyzer = true, optimize_rewrite_sum_if_to_count_if=1; EXPLAIN QUERY TREE select sum(multiIf(a = 1, 1, 0)) from m; diff --git a/tests/queries/0_stateless/02375_analyzer_union.sql b/tests/queries/0_stateless/02375_analyzer_union.sql index 5e41f07d217..cf9a99ffbba 100644 --- a/tests/queries/0_stateless/02375_analyzer_union.sql +++ b/tests/queries/0_stateless/02375_analyzer_union.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02376_analyzer_in_function_subquery.sql b/tests/queries/0_stateless/02376_analyzer_in_function_subquery.sql index 72a4edb8567..295d8a8f97e 100644 --- a/tests/queries/0_stateless/02376_analyzer_in_function_subquery.sql +++ b/tests/queries/0_stateless/02376_analyzer_in_function_subquery.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02377_analyzer_in_function_set.sql b/tests/queries/0_stateless/02377_analyzer_in_function_set.sql index e3cbcf75a9c..00aa40ccf17 100644 --- a/tests/queries/0_stateless/02377_analyzer_in_function_set.sql +++ b/tests/queries/0_stateless/02377_analyzer_in_function_set.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh index c223fcc86bc..4b9793da5bb 100755 --- a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh +++ b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh @@ -20,9 +20,9 @@ function explain_sorting { function explain_sortmode { echo "-- QUERY: "$1 - $CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTMODE + $CLICKHOUSE_CLIENT --enable_analyzer=0 --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTMODE echo "-- QUERY (analyzer): "$1 - $CLICKHOUSE_CLIENT --allow_experimental_analyzer=1 --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTMODE + $CLICKHOUSE_CLIENT --enable_analyzer=1 --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTMODE } $CLICKHOUSE_CLIENT -q "drop table if exists optimize_sorting sync" diff --git a/tests/queries/0_stateless/02378_analyzer_projection_names.sql b/tests/queries/0_stateless/02378_analyzer_projection_names.sql index f41afe6a950..7b3099c4f36 100644 --- a/tests/queries/0_stateless/02378_analyzer_projection_names.sql +++ b/tests/queries/0_stateless/02378_analyzer_projection_names.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET single_join_prefer_left_table = 0; DROP TABLE IF EXISTS test_table; diff --git a/tests/queries/0_stateless/02379_analyzer_subquery_depth.sql b/tests/queries/0_stateless/02379_analyzer_subquery_depth.sql index 5699a15aead..40303e0f92a 100644 --- a/tests/queries/0_stateless/02379_analyzer_subquery_depth.sql +++ b/tests/queries/0_stateless/02379_analyzer_subquery_depth.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT (SELECT a FROM (SELECT 1 AS a)) SETTINGS max_subquery_depth = 1; -- { serverError TOO_DEEP_SUBQUERIES } SELECT (SELECT a FROM (SELECT 1 AS a)) SETTINGS max_subquery_depth = 2; diff --git a/tests/queries/0_stateless/02380_analyzer_join_sample.sql b/tests/queries/0_stateless/02380_analyzer_join_sample.sql index e417f47d173..bc77f3623ae 100644 --- a/tests/queries/0_stateless/02380_analyzer_join_sample.sql +++ b/tests/queries/0_stateless/02380_analyzer_join_sample.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table_join_1; CREATE TABLE test_table_join_1 diff --git a/tests/queries/0_stateless/02381_analyzer_join_final.sql b/tests/queries/0_stateless/02381_analyzer_join_final.sql index 57fc3aedd8f..0db81ac7728 100644 --- a/tests/queries/0_stateless/02381_analyzer_join_final.sql +++ b/tests/queries/0_stateless/02381_analyzer_join_final.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table_join_1; CREATE TABLE test_table_join_1 diff --git a/tests/queries/0_stateless/02381_join_dup_columns_in_plan.sql.j2 b/tests/queries/0_stateless/02381_join_dup_columns_in_plan.sql.j2 index ca4af4df6b6..4e88b67e3dd 100644 --- a/tests/queries/0_stateless/02381_join_dup_columns_in_plan.sql.j2 +++ b/tests/queries/0_stateless/02381_join_dup_columns_in_plan.sql.j2 @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } diff --git a/tests/queries/0_stateless/02382_analyzer_matcher_join_using.sql b/tests/queries/0_stateless/02382_analyzer_matcher_join_using.sql index 7983b05a69e..6a0b58e7b28 100644 --- a/tests/queries/0_stateless/02382_analyzer_matcher_join_using.sql +++ b/tests/queries/0_stateless/02382_analyzer_matcher_join_using.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table_join_1; CREATE TABLE test_table_join_1 diff --git a/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.sql b/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.sql index c22a0f4244b..fbd6fe4db4f 100644 --- a/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.sql +++ b/tests/queries/0_stateless/02383_analyzer_merge_tree_self_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table_join_1; CREATE TABLE test_table_join_1 diff --git a/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.sql b/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.sql index f4619f20765..a8ad5c4d957 100644 --- a/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.sql +++ b/tests/queries/0_stateless/02384_analyzer_dict_get_join_get.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.sql b/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.sql index 861ada9623a..025e064d23c 100644 --- a/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.sql +++ b/tests/queries/0_stateless/02385_analyzer_aliases_compound_expression.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT cast(tuple(1, 'Value'), 'Tuple(first UInt64, second String)') AS value, value.first, value.second; diff --git a/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.sql b/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.sql index c8ca3ff21d4..64d24aae1b1 100644 --- a/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.sql +++ b/tests/queries/0_stateless/02386_analyzer_in_function_nested_subqueries.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT (NULL IN (SELECT 9223372036854775806 IN (SELECT 65536), inf, NULL IN (NULL))) IN (SELECT NULL IN (NULL)); diff --git a/tests/queries/0_stateless/02387_analyzer_cte.sql b/tests/queries/0_stateless/02387_analyzer_cte.sql index 1f10ac10438..149eab7f741 100644 --- a/tests/queries/0_stateless/02387_analyzer_cte.sql +++ b/tests/queries/0_stateless/02387_analyzer_cte.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02388_analyzer_recursive_lambda.sql b/tests/queries/0_stateless/02388_analyzer_recursive_lambda.sql index 9fd2f73703d..31d6f91a39d 100644 --- a/tests/queries/0_stateless/02388_analyzer_recursive_lambda.sql +++ b/tests/queries/0_stateless/02388_analyzer_recursive_lambda.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH x -> plus(lambda(1), x) AS lambda SELECT lambda(1048576); -- { serverError UNSUPPORTED_METHOD }; diff --git a/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql b/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql index 8e3777ebc15..51a10adae6a 100644 --- a/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql +++ b/tests/queries/0_stateless/02389_analyzer_nested_lambda.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } diff --git a/tests/queries/0_stateless/02420_final_setting_analyzer.reference b/tests/queries/0_stateless/02420_final_setting_analyzer.reference index 780a6e5de68..763ecad84ab 100644 --- a/tests/queries/0_stateless/02420_final_setting_analyzer.reference +++ b/tests/queries/0_stateless/02420_final_setting_analyzer.reference @@ -1,5 +1,5 @@ -- { echoOn } -set allow_experimental_analyzer=1; +set enable_analyzer=1; SYSTEM STOP MERGES tbl; -- simple test case create table if not exists replacing_mt (x String) engine=ReplacingMergeTree() ORDER BY x; diff --git a/tests/queries/0_stateless/02420_final_setting_analyzer.sql b/tests/queries/0_stateless/02420_final_setting_analyzer.sql index cbdec017602..89fff094825 100644 --- a/tests/queries/0_stateless/02420_final_setting_analyzer.sql +++ b/tests/queries/0_stateless/02420_final_setting_analyzer.sql @@ -1,5 +1,5 @@ -- { echoOn } -set allow_experimental_analyzer=1; +set enable_analyzer=1; SYSTEM STOP MERGES tbl; -- simple test case diff --git a/tests/queries/0_stateless/02421_decimal_in_precision_issue_41125.sql b/tests/queries/0_stateless/02421_decimal_in_precision_issue_41125.sql index fde893626c1..5a4e23c10bd 100644 --- a/tests/queries/0_stateless/02421_decimal_in_precision_issue_41125.sql +++ b/tests/queries/0_stateless/02421_decimal_in_precision_issue_41125.sql @@ -21,7 +21,7 @@ SELECT count() == 1 FROM dtest WHERE b IN toDecimal64('44.4000', 4); SELECT count() == 1 FROM dtest WHERE b IN toDecimal128('44.4000', 4); SELECT count() == 1 FROM dtest WHERE b IN toDecimal256('44.4000', 4); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT count() == 0 FROM (SELECT '33.3' :: Decimal(9, 1) AS a WHERE a IN ('33.33' :: Decimal(9, 2))); diff --git a/tests/queries/0_stateless/02421_explain_subquery.sql b/tests/queries/0_stateless/02421_explain_subquery.sql index 2970003cb1c..02f45e8cc3b 100644 --- a/tests/queries/0_stateless/02421_explain_subquery.sql +++ b/tests/queries/0_stateless/02421_explain_subquery.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT count() > 3 FROM (EXPLAIN PIPELINE header = 1 SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain LIKE '%Header: number UInt64%'; SELECT count() > 0 FROM (EXPLAIN PLAN SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain ILIKE '%Sort%'; @@ -32,7 +32,7 @@ SELECT count() == 1 FROM (EXPLAIN ESTIMATE SELECT sum(a) FROM t1); DROP TABLE t1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT count() > 3 FROM (EXPLAIN PIPELINE header = 1 SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain LIKE '%Header: \_\_table1.number UInt64%'; SELECT count() > 0 FROM (EXPLAIN PLAN SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain ILIKE '%Sort%'; diff --git a/tests/queries/0_stateless/02428_decimal_in_floating_point_literal.sql b/tests/queries/0_stateless/02428_decimal_in_floating_point_literal.sql index a84cb5572ba..a0d92115188 100644 --- a/tests/queries/0_stateless/02428_decimal_in_floating_point_literal.sql +++ b/tests/queries/0_stateless/02428_decimal_in_floating_point_literal.sql @@ -30,7 +30,7 @@ SELECT count() == 1 FROM decimal_in_float_test WHERE a NOT IN (33.333); SELECT count() == 1 FROM decimal_in_float_test WHERE b IN (44.44); SELECT count() == 1 FROM decimal_in_float_test WHERE b NOT IN (44.4,44.444); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT toDecimal32(1.555,3) IN (1.5551); diff --git a/tests/queries/0_stateless/02428_parameterized_view.sh b/tests/queries/0_stateless/02428_parameterized_view.sh index c6f0927db36..95f82db5454 100755 --- a/tests/queries/0_stateless/02428_parameterized_view.sh +++ b/tests/queries/0_stateless/02428_parameterized_view.sh @@ -72,8 +72,8 @@ $CLICKHOUSE_CLIENT -q "INSERT INTO ${CLICKHOUSE_TEST_UNIQUE_NAME}.Catalog VALUES $CLICKHOUSE_CLIENT -q "INSERT INTO ${CLICKHOUSE_TEST_UNIQUE_NAME}.Catalog VALUES ('Paper', 20, 1)" $CLICKHOUSE_CLIENT -q "CREATE VIEW ${CLICKHOUSE_TEST_UNIQUE_NAME}.pv1 AS SELECT * FROM ${CLICKHOUSE_TEST_UNIQUE_NAME}.Catalog WHERE Price={price:UInt64}" $CLICKHOUSE_CLIENT -q "SELECT Price FROM ${CLICKHOUSE_TEST_UNIQUE_NAME}.pv1(price=20)" -$CLICKHOUSE_CLIENT -q "SELECT Price FROM \`${CLICKHOUSE_TEST_UNIQUE_NAME}.pv1\`(price=20) SETTINGS allow_experimental_analyzer = 0" 2>&1 | grep -Fq "UNKNOWN_FUNCTION" && echo 'ERROR' || echo 'OK' -$CLICKHOUSE_CLIENT -q "SELECT Price FROM \`${CLICKHOUSE_TEST_UNIQUE_NAME}.pv1\`(price=20) SETTINGS allow_experimental_analyzer = 1" +$CLICKHOUSE_CLIENT -q "SELECT Price FROM \`${CLICKHOUSE_TEST_UNIQUE_NAME}.pv1\`(price=20) SETTINGS enable_analyzer = 0" 2>&1 | grep -Fq "UNKNOWN_FUNCTION" && echo 'ERROR' || echo 'OK' +$CLICKHOUSE_CLIENT -q "SELECT Price FROM \`${CLICKHOUSE_TEST_UNIQUE_NAME}.pv1\`(price=20) SETTINGS enable_analyzer = 1" $CLICKHOUSE_CLIENT -q "INSERT INTO test_02428_Catalog VALUES ('Book2', 30, 8)" diff --git a/tests/queries/0_stateless/02451_order_by_monotonic.sh b/tests/queries/0_stateless/02451_order_by_monotonic.sh index 7d1356b4445..fa0a37678f0 100755 --- a/tests/queries/0_stateless/02451_order_by_monotonic.sh +++ b/tests/queries/0_stateless/02451_order_by_monotonic.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh opts=( - "--allow_experimental_analyzer=1" + "--enable_analyzer=1" ) function explain_sort_description() diff --git a/tests/queries/0_stateless/02459_group_by_all.sql b/tests/queries/0_stateless/02459_group_by_all.sql index 4f08ee331a4..8281c201edf 100644 --- a/tests/queries/0_stateless/02459_group_by_all.sql +++ b/tests/queries/0_stateless/02459_group_by_all.sql @@ -21,7 +21,7 @@ select substring(a, 1, 3), substring(substring(substring(a, c, count(b)), 1, cou select substring(a, 1, 3), substring(a, 1, count(b)) from group_by_all group by all; select count(b) AS len, substring(a, 1, 3), substring(a, 1, len) from group_by_all group by all; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; select a, count(b) from group_by_all group by all order by a; select substring(a, 1, 3), count(b) from group_by_all group by all; diff --git a/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.sql b/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.sql index 5ac8c79d4ed..e86c867043d 100644 --- a/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.sql +++ b/tests/queries/0_stateless/02474_analyzer_subqueries_table_expression_modifiers.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT * FROM (SELECT 1) FINAL; -- { serverError UNSUPPORTED_METHOD } SELECT * FROM (SELECT 1) SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } diff --git a/tests/queries/0_stateless/02475_analyzer_join_tree_subquery.sql b/tests/queries/0_stateless/02475_analyzer_join_tree_subquery.sql index eda90529166..c9e7ac19157 100644 --- a/tests/queries/0_stateless/02475_analyzer_join_tree_subquery.sql +++ b/tests/queries/0_stateless/02475_analyzer_join_tree_subquery.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH subquery AS (SELECT sum(number) FROM numbers(10)) SELECT * FROM subquery; diff --git a/tests/queries/0_stateless/02475_analyzer_subquery_compound_expression.sql b/tests/queries/0_stateless/02475_analyzer_subquery_compound_expression.sql index f96c834c057..fc9e9d44b42 100644 --- a/tests/queries/0_stateless/02475_analyzer_subquery_compound_expression.sql +++ b/tests/queries/0_stateless/02475_analyzer_subquery_compound_expression.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT cast(tuple(1, 2), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, value.value_1, value.value_2; diff --git a/tests/queries/0_stateless/02475_or_function_alias_and_const_where.sql b/tests/queries/0_stateless/02475_or_function_alias_and_const_where.sql index ddb0f70c6de..53b97a1dd2a 100644 --- a/tests/queries/0_stateless/02475_or_function_alias_and_const_where.sql +++ b/tests/queries/0_stateless/02475_or_function_alias_and_const_where.sql @@ -1,2 +1,2 @@ SELECT (number = 1) AND (number = 2) AS value, sum(value) OVER () FROM numbers(1) WHERE 1; -SELECT (number = 1) AND (number = 2) AS value, sum(value) OVER () FROM numbers(1) WHERE 1 SETTINGS allow_experimental_analyzer=1; \ No newline at end of file +SELECT (number = 1) AND (number = 2) AS value, sum(value) OVER () FROM numbers(1) WHERE 1 SETTINGS enable_analyzer=1; diff --git a/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh b/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh index 0702c146426..4c850a6ec9e 100755 --- a/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh +++ b/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh @@ -17,61 +17,61 @@ $CLICKHOUSE_CLIENT -n -q " INSERT INTO test_table VALUES (0, 'Value'); "; -$CLICKHOUSE_CLIENT -q "SELECT value_ FROM test_table SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT value_ FROM test_table SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table.value_ FROM test_table SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table.value_ FROM test_table SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_tabl.value_ FROM test_table SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_tabl.value_ FROM test_table SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table.value_ FROM test_table AS test_table_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table.value_ FROM test_table AS test_table_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_tabl.value_ FROM test_table AS test_table_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_tabl.value_ FROM test_table AS test_table_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table_alias.value_ FROM test_table AS test_table_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table_alias.value_ FROM test_table AS test_table_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_alias.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table_alia.value_ FROM test_table AS test_table_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table_alia.value_ FROM test_table AS test_table_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_alias.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT value_ FROM (SELECT 1 AS value) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT value_ FROM (SELECT 1 AS value) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT subquery.value_ FROM (SELECT 1 AS value) AS subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT subquery.value_ FROM (SELECT 1 AS value) AS subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['subquery.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT subquer.value_ FROM (SELECT 1 AS value) AS subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT subquer.value_ FROM (SELECT 1 AS value) AS subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['subquery.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT value_ FROM cte_subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT value_ FROM cte_subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT cte_subquery.value_ FROM cte_subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT cte_subquery.value_ FROM cte_subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['cte_subquery.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT cte_subquer.value_ FROM cte_subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT cte_subquer.value_ FROM cte_subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['cte_subquery.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT cte_subquery_alias.value_ FROM cte_subquery AS cte_subquery_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT cte_subquery_alias.value_ FROM cte_subquery AS cte_subquery_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['cte_subquery_alias.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT cte_subquery_alia.value_ FROM cte_subquery AS cte_subquery_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT 1 AS value) SELECT cte_subquery_alia.value_ FROM cte_subquery AS cte_subquery_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['cte_subquery_alias.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, constant_valu SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, constant_valu SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, constant_valu SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, constant_valu SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, arrayMap(lambda_argument -> lambda_argument + constant_valu, [1, 2, 3]) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, arrayMap(lambda_argument -> lambda_argument + constant_valu, [1, 2, 3]) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH 1 AS constant_value SELECT (SELECT constant_valu) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH 1 AS constant_value SELECT (SELECT constant_valu) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value'\]" &>/dev/null; $CLICKHOUSE_CLIENT -n -q " @@ -85,61 +85,61 @@ $CLICKHOUSE_CLIENT -n -q " INSERT INTO test_table_compound VALUES (0, tuple('Value_1')); "; -$CLICKHOUSE_CLIENT -q "SELECT value.value_ FROM test_table_compound SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT value.value_ FROM test_table_compound SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table_compound.value.value_ FROM test_table_compound SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table_compound.value.value_ FROM test_table_compound SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_compound.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_tabl_compound.value.value_ FROM test_table_compound SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_tabl_compound.value.value_ FROM test_table_compound SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_compound.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table_compound.value.value_ FROM test_table_compound AS test_table_compound_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table_compound.value.value_ FROM test_table_compound AS test_table_compound_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_compound.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_tabl_compound.value.value_ FROM test_table_compound AS test_table_compound_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_tabl_compound.value.value_ FROM test_table_compound AS test_table_compound_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_compound.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table_compound_alias.value.value_ FROM test_table_compound AS test_table_compound_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table_compound_alias.value.value_ FROM test_table_compound AS test_table_compound_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_compound_alias.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table_compound_alia.value.value_ FROM test_table_compound AS test_table_compound_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table_compound_alia.value.value_ FROM test_table_compound AS test_table_compound_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_compound_alias.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT value.value_ FROM (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT value.value_ FROM (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT subquery.value.value_ FROM (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) AS subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT subquery.value.value_ FROM (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) AS subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['subquery.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT subquer.value.value_ FROM (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) AS subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT subquer.value.value_ FROM (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) AS subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['subquery.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT value.value_ FROM cte_subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT value.value_ FROM cte_subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT cte_subquery.value.value_ FROM cte_subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT cte_subquery.value.value_ FROM cte_subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['cte_subquery.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT cte_subquer.value.value_ FROM cte_subquery SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT cte_subquer.value.value_ FROM cte_subquery SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['cte_subquery.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT cte_subquery_alias.value.value_ FROM cte_subquery AS cte_subquery_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT cte_subquery_alias.value.value_ FROM cte_subquery AS cte_subquery_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['cte_subquery_alias.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT cte_subquery_alia.value.value_ FROM cte_subquery AS cte_subquery_alias SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cte_subquery AS (SELECT cast(tuple(1), 'Tuple(value_1 String)') AS value) SELECT cte_subquery_alia.value.value_ FROM cte_subquery AS cte_subquery_alias SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['cte_subquery_alias.value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constant_value, constant_value.value_ SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constant_value, constant_value.value_ SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constant_value, constant_valu.value_ SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constant_value, constant_valu.value_ SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constant_value, arrayMap(lambda_argument -> lambda_argument + constant_value.value_, [1, 2, 3]) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constant_value, arrayMap(lambda_argument -> lambda_argument + constant_value.value_, [1, 2, 3]) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "WITH cast(tuple(1), 'Tuple(value_1 String)') AS constant_value SELECT (SELECT constant_value.value_) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH cast(tuple(1), 'Tuple(value_1 String)') AS constant_value SELECT (SELECT constant_value.value_) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value.value_1'\]" &>/dev/null; $CLICKHOUSE_CLIENT -n -q " @@ -162,25 +162,25 @@ $CLICKHOUSE_CLIENT -n -q " INSERT INTO test_table_2 VALUES (0, 'Value'); "; -$CLICKHOUSE_CLIENT -q "SELECT test_table_1.value_ FROM test_table_1 INNER JOIN test_table_2 ON test_table_1.id = test_table_2.id SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table_1.value_ FROM test_table_1 INNER JOIN test_table_2 ON test_table_1.id = test_table_2.id SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_1.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT test_table_2.value_ FROM test_table_1 INNER JOIN test_table_2 ON test_table_1.id = test_table_2.id SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_table_2.value_ FROM test_table_1 INNER JOIN test_table_2 ON test_table_1.id = test_table_2.id SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['test_table_2.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT t1.value_ FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT t1.value_ FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['t1.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT t2.value_ FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT t2.value_ FROM test_table_1 AS t1 INNER JOIN test_table_2 AS t2 ON t1.id = t2.id SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['t2.value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT [1] AS a, a.size1 SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT [1] AS a, a.size1 SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['a.size0'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT ((1))::Tuple(a Tuple(b UInt32)) AS t, t.c SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT ((1))::Tuple(a Tuple(b UInt32)) AS t, t.c SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['t.a'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT ((1))::Tuple(a Tuple(b UInt32)) AS t, t.a.c SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT ((1))::Tuple(a Tuple(b UInt32)) AS t, t.a.c SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['t.a.b'\]" &>/dev/null; $CLICKHOUSE_CLIENT -q "SELECT 1"; diff --git a/tests/queries/0_stateless/02476_analyzer_join_with_unused_columns.sql b/tests/queries/0_stateless/02476_analyzer_join_with_unused_columns.sql index ca937e01238..feb6786ffb6 100644 --- a/tests/queries/0_stateless/02476_analyzer_join_with_unused_columns.sql +++ b/tests/queries/0_stateless/02476_analyzer_join_with_unused_columns.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT subquery_1.id, subquery_2.id FROM (SELECT 1 AS id, 2 AS value) AS subquery_1, (SELECT 3 AS id, 4 AS value) AS subquery_2; diff --git a/tests/queries/0_stateless/02476_fuse_sum_count.sql b/tests/queries/0_stateless/02476_fuse_sum_count.sql index 315bbd10a65..2319e81440b 100644 --- a/tests/queries/0_stateless/02476_fuse_sum_count.sql +++ b/tests/queries/0_stateless/02476_fuse_sum_count.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_syntax_fuse_functions = 1; DROP TABLE IF EXISTS fuse_tbl; diff --git a/tests/queries/0_stateless/02477_analyzer_array_join_with_join.sql b/tests/queries/0_stateless/02477_analyzer_array_join_with_join.sql index 7e4ecb98ae8..3d2cc1b1620 100644 --- a/tests/queries/0_stateless/02477_analyzer_array_join_with_join.sql +++ b/tests/queries/0_stateless/02477_analyzer_array_join_with_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.sql b/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.sql index 53f3a9b23ec..2fc1cc45ce0 100644 --- a/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.sql +++ b/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02477_analyzer_function_hints.sh b/tests/queries/0_stateless/02477_analyzer_function_hints.sh index f83935e47fb..23cc5651257 100755 --- a/tests/queries/0_stateless/02477_analyzer_function_hints.sh +++ b/tests/queries/0_stateless/02477_analyzer_function_hints.sh @@ -6,22 +6,22 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "SELECT plu(1, 1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT plu(1, 1) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['plus'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT uniqExac(1, 1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT uniqExac(1, 1) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['uniqExact'\]" &>/dev/null; $CLICKHOUSE_CLIENT -q "DROP FUNCTION IF EXISTS test_user_defined_function_$CLICKHOUSE_DATABASE;" $CLICKHOUSE_CLIENT -q "CREATE FUNCTION test_user_defined_function_$CLICKHOUSE_DATABASE AS x -> x + 1;" -$CLICKHOUSE_CLIENT -q "SELECT test_user_defined_function_${CLICKHOUSE_DATABASE}A(1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT test_user_defined_function_${CLICKHOUSE_DATABASE}A(1) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep -E "Maybe you meant: \[.*'test_user_defined_function_$CLICKHOUSE_DATABASE'.*\]" &>/dev/null; $CLICKHOUSE_CLIENT -q "DROP FUNCTION test_user_defined_function_$CLICKHOUSE_DATABASE"; -$CLICKHOUSE_CLIENT -q "WITH (x -> x + 1) AS lambda_function SELECT lambda_functio(1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "WITH (x -> x + 1) AS lambda_function SELECT lambda_functio(1) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['lambda_function'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -q "SELECT row_numbe() OVER (PARTITION BY 1) SETTINGS allow_experimental_analyzer = 1;" 2>&1 \ +$CLICKHOUSE_CLIENT -q "SELECT row_numbe() OVER (PARTITION BY 1) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['row_number'\]" &>/dev/null; $CLICKHOUSE_CLIENT -q "SELECT 1"; diff --git a/tests/queries/0_stateless/02477_exists_fuzz_43478.sql b/tests/queries/0_stateless/02477_exists_fuzz_43478.sql index 8ec876eb252..c225befed0e 100644 --- a/tests/queries/0_stateless/02477_exists_fuzz_43478.sql +++ b/tests/queries/0_stateless/02477_exists_fuzz_43478.sql @@ -1,3 +1,3 @@ create table test_rows_compact_part__fuzz_11 (x UInt32) engine = MergeTree order by x; insert into test_rows_compact_part__fuzz_11 select 1; -select 1 from test_rows_compact_part__fuzz_11 where exists(select 1) settings allow_experimental_analyzer=1; +select 1 from test_rows_compact_part__fuzz_11 where exists(select 1) settings enable_analyzer=1; diff --git a/tests/queries/0_stateless/02477_fuse_quantiles.sql b/tests/queries/0_stateless/02477_fuse_quantiles.sql index c0719d771d7..8ddc029f75f 100644 --- a/tests/queries/0_stateless/02477_fuse_quantiles.sql +++ b/tests/queries/0_stateless/02477_fuse_quantiles.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_syntax_fuse_functions = 1; DROP TABLE IF EXISTS fuse_tbl; diff --git a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference index 11c178ac0d0..0ce90cde108 100644 --- a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference +++ b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.reference @@ -19,7 +19,7 @@ QUERY id: 0 COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 CONSTANT id: 8, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) CONSTANT id: 9, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE (a = \'x\') OR (\'y\' = a) @@ -41,7 +41,7 @@ QUERY id: 0 COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 CONSTANT id: 8, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) CONSTANT id: 9, constant_value: UInt64_0, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE (a != \'x\') AND (a != \'y\') @@ -63,7 +63,7 @@ QUERY id: 0 COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 CONSTANT id: 8, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) CONSTANT id: 9, constant_value: \'UInt8\', constant_value_type: String - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE (a != \'x\') AND (\'y\' != a) @@ -85,7 +85,7 @@ QUERY id: 0 COLUMN id: 2, column_name: a, result_type: LowCardinality(String), source_id: 3 CONSTANT id: 8, constant_value: Tuple_(\'x\', \'y\'), constant_value_type: Tuple(String, String) CONSTANT id: 9, constant_value: \'UInt8\', constant_value_type: String - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE (b = 0) OR (b = 1) @@ -111,7 +111,7 @@ QUERY id: 0 LIST id: 11, nodes: 2 COLUMN id: 8, column_name: b, result_type: UInt32, source_id: 3 CONSTANT id: 12, constant_value: UInt64_1, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE (b != 0) AND (b != 1) @@ -137,4 +137,4 @@ QUERY id: 0 LIST id: 11, nodes: 2 COLUMN id: 8, column_name: b, result_type: UInt32, source_id: 3 CONSTANT id: 12, constant_value: UInt64_1, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.sql b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.sql index 976b21a7e29..b328e9658d0 100644 --- a/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.sql +++ b/tests/queries/0_stateless/02477_logical_expressions_optimizer_low_cardinality.sql @@ -5,21 +5,21 @@ CREATE TABLE t_logical_expressions_optimizer_low_cardinality (a LowCardinality(S -- LowCardinality case, ignore optimize_min_equality_disjunction_chain_length limit, optimizer applied -- Chain of OR equals EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR a = 'y'; -EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR a = 'y' SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR a = 'y' SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR 'y' = a; -EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR 'y' = a SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR 'y' = a SETTINGS enable_analyzer = 1; -- Chain of AND notEquals EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND a <> 'y'; -EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND a <> 'y' SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND a <> 'y' SETTINGS enable_analyzer = 1; EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND 'y' <> a; -EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND 'y' <> a SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND 'y' <> a SETTINGS enable_analyzer = 1; -- Non-LowCardinality case, optimizer not applied for short chains -- Chain of OR equals EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b = 0 OR b = 1; -EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b = 0 OR b = 1 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b = 0 OR b = 1 SETTINGS enable_analyzer = 1; -- Chain of AND notEquals EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b <> 0 AND b <> 1; -EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b <> 0 AND b <> 1 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b <> 0 AND b <> 1 SETTINGS enable_analyzer = 1; DROP TABLE t_logical_expressions_optimizer_low_cardinality; diff --git a/tests/queries/0_stateless/02478_analyzer_table_expression_aliases.sql b/tests/queries/0_stateless/02478_analyzer_table_expression_aliases.sql index 66f50f7b26d..a1eb88c634d 100644 --- a/tests/queries/0_stateless/02478_analyzer_table_expression_aliases.sql +++ b/tests/queries/0_stateless/02478_analyzer_table_expression_aliases.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02478_window_frame_type_groups.sql b/tests/queries/0_stateless/02478_window_frame_type_groups.sql index f762bcb61ee..a01e1813cc1 100644 --- a/tests/queries/0_stateless/02478_window_frame_type_groups.sql +++ b/tests/queries/0_stateless/02478_window_frame_type_groups.sql @@ -1,7 +1,7 @@ -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT toUInt64(dense_rank(1) OVER (ORDER BY 100 ASC GROUPS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)) FROM numbers(10); -- { serverError NOT_IMPLEMENTED } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT toUInt64(dense_rank(1) OVER (ORDER BY 100 ASC GROUPS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)) FROM numbers(10); -- { serverError NOT_IMPLEMENTED } diff --git a/tests/queries/0_stateless/02479_analyzer_aggregation_crash.sql b/tests/queries/0_stateless/02479_analyzer_aggregation_crash.sql index c931a3ab634..1e890740665 100644 --- a/tests/queries/0_stateless/02479_analyzer_aggregation_crash.sql +++ b/tests/queries/0_stateless/02479_analyzer_aggregation_crash.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET compile_aggregate_expressions = 1; SET min_count_to_compile_aggregate_expression = 0; diff --git a/tests/queries/0_stateless/02479_analyzer_aggregation_totals_rollup_crash_fix.sql b/tests/queries/0_stateless/02479_analyzer_aggregation_totals_rollup_crash_fix.sql index 6cd3e6a9385..004e61ee1a2 100644 --- a/tests/queries/0_stateless/02479_analyzer_aggregation_totals_rollup_crash_fix.sql +++ b/tests/queries/0_stateless/02479_analyzer_aggregation_totals_rollup_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT anyLast(number) FROM numbers(1) GROUP BY number WITH ROLLUP WITH TOTALS; diff --git a/tests/queries/0_stateless/02479_analyzer_join_with_constants.sql b/tests/queries/0_stateless/02479_analyzer_join_with_constants.sql index 9f77cf39f47..503bbe63347 100644 --- a/tests/queries/0_stateless/02479_analyzer_join_with_constants.sql +++ b/tests/queries/0_stateless/02479_analyzer_join_with_constants.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT * FROM (SELECT 1 AS id) AS t1 INNER JOIN (SELECT 1 AS id) AS t2 ON t1.id = t2.id AND 1; diff --git a/tests/queries/0_stateless/02479_mysql_connect_to_self.sql b/tests/queries/0_stateless/02479_mysql_connect_to_self.sql index cf2220073d3..b8ed7e0c03e 100644 --- a/tests/queries/0_stateless/02479_mysql_connect_to_self.sql +++ b/tests/queries/0_stateless/02479_mysql_connect_to_self.sql @@ -7,7 +7,7 @@ DROP TABLE IF EXISTS foo; CREATE TABLE foo (key UInt32, a String, b Int64, c String) ENGINE = TinyLog; INSERT INTO foo VALUES (1, 'one', -1, 'een'), (2, 'two', -2, 'twee'), (3, 'three', -3, 'drie'), (4, 'four', -4, 'vier'), (5, 'five', -5, 'vijf'); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT '---'; SELECT * FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY key; diff --git a/tests/queries/0_stateless/02480_analyzer_alias_nullptr.sql b/tests/queries/0_stateless/02480_analyzer_alias_nullptr.sql index f6b381e5c70..07503de1b10 100644 --- a/tests/queries/0_stateless/02480_analyzer_alias_nullptr.sql +++ b/tests/queries/0_stateless/02480_analyzer_alias_nullptr.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT min(b), x AS b FROM (SELECT max(number) FROM numbers(1)); -- { serverError UNKNOWN_IDENTIFIER } diff --git a/tests/queries/0_stateless/02480_tlp_nan.reference b/tests/queries/0_stateless/02480_tlp_nan.reference index befd1f66564..29e5bffa7f0 100644 --- a/tests/queries/0_stateless/02480_tlp_nan.reference +++ b/tests/queries/0_stateless/02480_tlp_nan.reference @@ -1,21 +1,21 @@ -- {echo} -SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; +SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; nan 0 1 0 -SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; nan 0 1 0 -SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; +SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; -inf 0 1 0 -SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; -inf 0 1 0 -SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; +SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; \N \N \N 1 -SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; \N \N \N 1 -SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; +SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; inf 0 1 0 -SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; inf 0 1 0 -SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; +SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; nan 0 1 0 -SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; nan 0 1 0 diff --git a/tests/queries/0_stateless/02480_tlp_nan.sql b/tests/queries/0_stateless/02480_tlp_nan.sql index e24bc9a9830..55318e0cb30 100644 --- a/tests/queries/0_stateless/02480_tlp_nan.sql +++ b/tests/queries/0_stateless/02480_tlp_nan.sql @@ -1,15 +1,15 @@ -- {echo} -SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; -SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; -SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; -SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; -SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; -SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; -SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; -SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; -SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=1; -SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS allow_experimental_analyzer=0; +SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; diff --git a/tests/queries/0_stateless/02481_aggregation_in_order_plan.sql b/tests/queries/0_stateless/02481_aggregation_in_order_plan.sql index 490060dee6f..139e0ed4b77 100644 --- a/tests/queries/0_stateless/02481_aggregation_in_order_plan.sql +++ b/tests/queries/0_stateless/02481_aggregation_in_order_plan.sql @@ -5,5 +5,5 @@ insert into tab select 0, number % 3, 2 - intDiv(number, 3), (number % 3 + 1) * insert into tab select 0, number % 3, 2 - intDiv(number, 3), (number % 3 + 1) * 100 from numbers(6); select a, any(b), c, d from tab where b = 1 group by a, c, d order by c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1; -select * from (explain actions = 1, sorting=1 select a, any(b), c, d from tab where b = 1 group by a, c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1) where explain like '%ReadFromMergeTree%' or explain like '%Aggregating%' or explain like '%Order:%' settings allow_experimental_analyzer=0; -select * from (explain actions = 1, sorting=1 select a, any(b), c, d from tab where b = 1 group by a, c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1) where explain like '%ReadFromMergeTree%' or explain like '%Aggregating%' or explain like '%Order:%' settings allow_experimental_analyzer=1; +select * from (explain actions = 1, sorting=1 select a, any(b), c, d from tab where b = 1 group by a, c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1) where explain like '%ReadFromMergeTree%' or explain like '%Aggregating%' or explain like '%Order:%' settings enable_analyzer=0; +select * from (explain actions = 1, sorting=1 select a, any(b), c, d from tab where b = 1 group by a, c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1) where explain like '%ReadFromMergeTree%' or explain like '%Aggregating%' or explain like '%Order:%' settings enable_analyzer=1; diff --git a/tests/queries/0_stateless/02481_analyzer_join_alias_unknown_identifier_crash.sql b/tests/queries/0_stateless/02481_analyzer_join_alias_unknown_identifier_crash.sql index 0c5f0eba750..8b5b272f547 100644 --- a/tests/queries/0_stateless/02481_analyzer_join_alias_unknown_identifier_crash.sql +++ b/tests/queries/0_stateless/02481_analyzer_join_alias_unknown_identifier_crash.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table_join_1; CREATE TABLE test_table_join_1 diff --git a/tests/queries/0_stateless/02481_analyzer_optimize_aggregation_arithmetics.sql b/tests/queries/0_stateless/02481_analyzer_optimize_aggregation_arithmetics.sql index ca91d137bf4..e68de0af522 100644 --- a/tests/queries/0_stateless/02481_analyzer_optimize_aggregation_arithmetics.sql +++ b/tests/queries/0_stateless/02481_analyzer_optimize_aggregation_arithmetics.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_arithmetic_operations_in_aggregate_functions = 1; -- { echoOn } diff --git a/tests/queries/0_stateless/02481_analyzer_optimize_grouping_sets_keys.sql b/tests/queries/0_stateless/02481_analyzer_optimize_grouping_sets_keys.sql index fef71fdf94f..8e6b132f5b7 100644 --- a/tests/queries/0_stateless/02481_analyzer_optimize_grouping_sets_keys.sql +++ b/tests/queries/0_stateless/02481_analyzer_optimize_grouping_sets_keys.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; set optimize_syntax_fuse_functions = 0; EXPLAIN QUERY TREE run_passes=1 diff --git a/tests/queries/0_stateless/02483_cuturlparameter_with_arrays.sql b/tests/queries/0_stateless/02483_cuturlparameter_with_arrays.sql index 6d64d2685b7..0cf95d10a88 100644 --- a/tests/queries/0_stateless/02483_cuturlparameter_with_arrays.sql +++ b/tests/queries/0_stateless/02483_cuturlparameter_with_arrays.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } diff --git a/tests/queries/0_stateless/02483_elapsed_time.sh b/tests/queries/0_stateless/02483_elapsed_time.sh index fdb23d6da01..274b30cefb9 100755 --- a/tests/queries/0_stateless/02483_elapsed_time.sh +++ b/tests/queries/0_stateless/02483_elapsed_time.sh @@ -17,7 +17,7 @@ EXCEPTION_BEFORE_START_QUERY="WITH FROM system.numbers WHERE number IN (sub) ) - SETTINGS enable_global_with_statement = 0, allow_experimental_analyzer = 1" + SETTINGS enable_global_with_statement = 0, enable_analyzer = 1" # For this query the system.query_log needs to show ExceptionBeforeStart and elapsed seconds <= 1.0 diff --git a/tests/queries/0_stateless/02489_analyzer_indexes.sql b/tests/queries/0_stateless/02489_analyzer_indexes.sql index b5438ddd31a..dcf18016da6 100644 --- a/tests/queries/0_stateless/02489_analyzer_indexes.sql +++ b/tests/queries/0_stateless/02489_analyzer_indexes.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table @@ -59,4 +59,3 @@ WHERE t1.id = 1 AND t1.value_1 = '1' AND t1.value_2 = '1' AND t1.value_3 = '1' SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx, value_3_idx'; DROP TABLE test_table; - diff --git a/tests/queries/0_stateless/02493_analyzer_sum_if_to_count_if.sql b/tests/queries/0_stateless/02493_analyzer_sum_if_to_count_if.sql index f1dbfa1f32a..171e080961a 100644 --- a/tests/queries/0_stateless/02493_analyzer_sum_if_to_count_if.sql +++ b/tests/queries/0_stateless/02493_analyzer_sum_if_to_count_if.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_rewrite_sum_if_to_count_if = 1; EXPLAIN QUERY TREE (SELECT sumIf(1, (number % 2) == 0) FROM numbers(10)); diff --git a/tests/queries/0_stateless/02493_analyzer_table_functions_untuple.sql b/tests/queries/0_stateless/02493_analyzer_table_functions_untuple.sql index bdbe65c643b..c9687783dc9 100644 --- a/tests/queries/0_stateless/02493_analyzer_table_functions_untuple.sql +++ b/tests/queries/0_stateless/02493_analyzer_table_functions_untuple.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT number FROM numbers(untuple(tuple(1))); diff --git a/tests/queries/0_stateless/02493_analyzer_uniq_injective_functions_elimination.sql b/tests/queries/0_stateless/02493_analyzer_uniq_injective_functions_elimination.sql index 5a3b2379fde..ca37c6f3833 100644 --- a/tests/queries/0_stateless/02493_analyzer_uniq_injective_functions_elimination.sql +++ b/tests/queries/0_stateless/02493_analyzer_uniq_injective_functions_elimination.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1, optimize_injective_functions_inside_uniq = 1; +SET enable_analyzer = 1, optimize_injective_functions_inside_uniq = 1; -- Simple test EXPLAIN QUERY TREE SELECT uniqCombined(tuple('')) FROM numbers(1); diff --git a/tests/queries/0_stateless/02494_analyzer_compound_expression_crash_fix.sql b/tests/queries/0_stateless/02494_analyzer_compound_expression_crash_fix.sql index 3e6f9f42724..20b0bdd46b0 100644 --- a/tests/queries/0_stateless/02494_analyzer_compound_expression_crash_fix.sql +++ b/tests/queries/0_stateless/02494_analyzer_compound_expression_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table ( diff --git a/tests/queries/0_stateless/02494_query_cache_explain.sql b/tests/queries/0_stateless/02494_query_cache_explain.sql index bf376b47fdb..decdd92c477 100644 --- a/tests/queries/0_stateless/02494_query_cache_explain.sql +++ b/tests/queries/0_stateless/02494_query_cache_explain.sql @@ -1,7 +1,7 @@ -- Tags: no-parallel -- Tag no-parallel: Messes with internal cache -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET query_cache_system_table_handling = 'save'; SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh b/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh index 24803ed7420..9a07ddf0386 100755 --- a/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh +++ b/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh @@ -15,8 +15,8 @@ ${CLICKHOUSE_CLIENT} --query "CREATE TABLE tab (a UInt64) ENGINE=MergeTree() ORD ${CLICKHOUSE_CLIENT} --query "INSERT INTO tab VALUES (1) (2) (3)" ${CLICKHOUSE_CLIENT} --query "INSERT INTO tab VALUES (3) (4) (5)" -SETTINGS_NO_ANALYZER="SETTINGS use_query_cache=1, max_threads=1, allow_experimental_analyzer=0, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0" -SETTINGS_ANALYZER="SETTINGS use_query_cache=1, max_threads=1, allow_experimental_analyzer=1, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0" +SETTINGS_NO_ANALYZER="SETTINGS use_query_cache=1, max_threads=1, enable_analyzer=0, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0" +SETTINGS_ANALYZER="SETTINGS use_query_cache=1, max_threads=1, enable_analyzer=1, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0" # Verify that the first query does two aggregations and the second query zero aggregations. Since query cache is currently not integrated # with EXPLAIN PLAN, we need to check the logs. diff --git a/tests/queries/0_stateless/02495_analyzer_storage_join.sql b/tests/queries/0_stateless/02495_analyzer_storage_join.sql index 7e6c03971f9..1148cd68338 100644 --- a/tests/queries/0_stateless/02495_analyzer_storage_join.sql +++ b/tests/queries/0_stateless/02495_analyzer_storage_join.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS t; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS tj; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET single_join_prefer_left_table = 0; CREATE TABLE tj (key2 UInt64, key1 Int64, a UInt64, b UInt64, x UInt64, y UInt64) ENGINE = Join(ALL, RIGHT, key1, key2); diff --git a/tests/queries/0_stateless/02495_sum_if_to_count_if_bug.sql b/tests/queries/0_stateless/02495_sum_if_to_count_if_bug.sql index 0791b374668..c00c0ba4e07 100644 --- a/tests/queries/0_stateless/02495_sum_if_to_count_if_bug.sql +++ b/tests/queries/0_stateless/02495_sum_if_to_count_if_bug.sql @@ -1,4 +1,3 @@ select sum(if((number % NULL) = 2, 0, 1)) FROM numbers(1024) settings optimize_rewrite_sum_if_to_count_if=0; -select sum(if((number % NULL) = 2, 0, 1)) FROM numbers(1024) settings optimize_rewrite_sum_if_to_count_if=1, allow_experimental_analyzer=0; -select sum(if((number % NULL) = 2, 0, 1)) FROM numbers(1024) settings optimize_rewrite_sum_if_to_count_if=1, allow_experimental_analyzer=1; - +select sum(if((number % NULL) = 2, 0, 1)) FROM numbers(1024) settings optimize_rewrite_sum_if_to_count_if=1, enable_analyzer=0; +select sum(if((number % NULL) = 2, 0, 1)) FROM numbers(1024) settings optimize_rewrite_sum_if_to_count_if=1, enable_analyzer=1; diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh index 661b32fce72..646e2501a99 100755 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh @@ -8,8 +8,8 @@ if [ -z ${ENABLE_ANALYZER+x} ]; then ENABLE_ANALYZER=0 fi -DISABLE_OPTIMIZATION="SET allow_experimental_analyzer=$ENABLE_ANALYZER;SET query_plan_remove_redundant_sorting=0;SET optimize_duplicate_order_by_and_distinct=0" -ENABLE_OPTIMIZATION="SET allow_experimental_analyzer=$ENABLE_ANALYZER;SET query_plan_remove_redundant_sorting=1;SET optimize_duplicate_order_by_and_distinct=0" +DISABLE_OPTIMIZATION="SET enable_analyzer=$ENABLE_ANALYZER;SET query_plan_remove_redundant_sorting=0;SET optimize_duplicate_order_by_and_distinct=0" +ENABLE_OPTIMIZATION="SET enable_analyzer=$ENABLE_ANALYZER;SET query_plan_remove_redundant_sorting=1;SET optimize_duplicate_order_by_and_distinct=0" echo "-- Disabled query_plan_remove_redundant_sorting" echo "-- ORDER BY clauses in subqueries are untouched" diff --git a/tests/queries/0_stateless/02497_analyzer_sum_if_count_if_pass_crash_fix.sql b/tests/queries/0_stateless/02497_analyzer_sum_if_count_if_pass_crash_fix.sql index 51522565014..7533a333225 100644 --- a/tests/queries/0_stateless/02497_analyzer_sum_if_count_if_pass_crash_fix.sql +++ b/tests/queries/0_stateless/02497_analyzer_sum_if_count_if_pass_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_rewrite_sum_if_to_count_if = 1; SELECT sum(if((number % 2) = 0 AS cond_expr, 1 AS one_expr, 0 AS zero_expr) AS if_expr), sum(cond_expr), sum(if_expr), one_expr, zero_expr FROM numbers(100); diff --git a/tests/queries/0_stateless/02497_having_without_actual_aggregation_bug.sql b/tests/queries/0_stateless/02497_having_without_actual_aggregation_bug.sql index b28cbd4861e..e5fd26e484e 100644 --- a/tests/queries/0_stateless/02497_having_without_actual_aggregation_bug.sql +++ b/tests/queries/0_stateless/02497_having_without_actual_aggregation_bug.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; select number from numbers_mt(10) having number >= 9; diff --git a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql index 131eac390f1..cc2d839fbc1 100644 --- a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql +++ b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_if_transform_strings_to_enum = 1; SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/02497_storage_join_right_assert.sql b/tests/queries/0_stateless/02497_storage_join_right_assert.sql index 8f9134e9504..eabaa236379 100644 --- a/tests/queries/0_stateless/02497_storage_join_right_assert.sql +++ b/tests/queries/0_stateless/02497_storage_join_right_assert.sql @@ -7,8 +7,8 @@ CREATE TABLE t2 (key UInt64, a UInt64) ENGINE = Join(ALL, RIGHT, key); INSERT INTO t1 VALUES (1, 1), (2, 2); INSERT INTO t2 VALUES (2, 2), (3, 3); -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT * FROM t1 ALL RIGHT JOIN t2 USING (key) ORDER BY key; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT * FROM t1 ALL RIGHT JOIN t2 USING (key) ORDER BY key; diff --git a/tests/queries/0_stateless/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix.sql b/tests/queries/0_stateless/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix.sql index 8491018eb72..76c44f9e0cb 100644 --- a/tests/queries/0_stateless/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix.sql +++ b/tests/queries/0_stateless/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_arithmetic_operations_in_aggregate_functions = 1; DROP TABLE IF EXISTS test_table; diff --git a/tests/queries/0_stateless/02498_analyzer_settings_push_down.sql b/tests/queries/0_stateless/02498_analyzer_settings_push_down.sql index 67623869f0a..472ab358d97 100644 --- a/tests/queries/0_stateless/02498_analyzer_settings_push_down.sql +++ b/tests/queries/0_stateless/02498_analyzer_settings_push_down.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_functions_to_subcolumns = 0; DROP TABLE IF EXISTS test_table; diff --git a/tests/queries/0_stateless/02498_storage_join_key_positions.sql.j2 b/tests/queries/0_stateless/02498_storage_join_key_positions.sql.j2 index e2dad61a93e..e814b8eaf3f 100644 --- a/tests/queries/0_stateless/02498_storage_join_key_positions.sql.j2 +++ b/tests/queries/0_stateless/02498_storage_join_key_positions.sql.j2 @@ -13,7 +13,7 @@ INSERT INTO tjj VALUES (11, 11, 11, 1000), (21, 21, 21, 2000), (31, 31, 31, 3000 {% for use_analyzer in [0, 1] -%} -SET allow_experimental_analyzer = '{{ use_analyzer }}'; +SET enable_analyzer = '{{ use_analyzer }}'; SELECT '--- using ---'; SELECT * FROM t1 ALL INNER JOIN tj USING (key1, key2, key3) ORDER BY key1; diff --git a/tests/queries/0_stateless/02499_analyzer_aggregate_function_lambda_crash_fix.sql b/tests/queries/0_stateless/02499_analyzer_aggregate_function_lambda_crash_fix.sql index f2698512112..7ac817aecde 100644 --- a/tests/queries/0_stateless/02499_analyzer_aggregate_function_lambda_crash_fix.sql +++ b/tests/queries/0_stateless/02499_analyzer_aggregate_function_lambda_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT count((t, x_0, x_1) -> ((key_2, x_0, x_1) IN (NULL, NULL, '0.3'))) FROM numbers(10); -- { serverError UNSUPPORTED_METHOD } SELECT count((t, x_0, x_1) -> ((key_2, x_0, x_1) IN (NULL, NULL, '0.3'))) OVER (PARTITION BY id) FROM numbers(10); -- { serverError UNSUPPORTED_METHOD } diff --git a/tests/queries/0_stateless/02499_analyzer_set_index.sql b/tests/queries/0_stateless/02499_analyzer_set_index.sql index f90ae61541f..52d96cfcabf 100644 --- a/tests/queries/0_stateless/02499_analyzer_set_index.sql +++ b/tests/queries/0_stateless/02499_analyzer_set_index.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02500_analyzer_storage_view_crash_fix.sql b/tests/queries/0_stateless/02500_analyzer_storage_view_crash_fix.sql index 8f4d14b95cc..f0484a68566 100644 --- a/tests/queries/0_stateless/02500_analyzer_storage_view_crash_fix.sql +++ b/tests/queries/0_stateless/02500_analyzer_storage_view_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh index c4f0994cd13..3c06119e8d2 100755 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh @@ -9,8 +9,8 @@ if [ -z ${ENABLE_ANALYZER+x} ]; then fi OPTIMIZATION_SETTING="query_plan_remove_redundant_distinct" -DISABLE_OPTIMIZATION="set allow_experimental_analyzer=$ENABLE_ANALYZER;SET $OPTIMIZATION_SETTING=0;SET optimize_duplicate_order_by_and_distinct=0" -ENABLE_OPTIMIZATION="set allow_experimental_analyzer=$ENABLE_ANALYZER;SET $OPTIMIZATION_SETTING=1;SET optimize_duplicate_order_by_and_distinct=0" +DISABLE_OPTIMIZATION="set enable_analyzer=$ENABLE_ANALYZER;SET $OPTIMIZATION_SETTING=0;SET optimize_duplicate_order_by_and_distinct=0" +ENABLE_OPTIMIZATION="set enable_analyzer=$ENABLE_ANALYZER;SET $OPTIMIZATION_SETTING=1;SET optimize_duplicate_order_by_and_distinct=0" echo "-- Disabled $OPTIMIZATION_SETTING" query="SELECT DISTINCT * diff --git a/tests/queries/0_stateless/02501_analyzer_expired_context_crash_fix.sql b/tests/queries/0_stateless/02501_analyzer_expired_context_crash_fix.sql index b9ec14501bd..e2c940c829a 100644 --- a/tests/queries/0_stateless/02501_analyzer_expired_context_crash_fix.sql +++ b/tests/queries/0_stateless/02501_analyzer_expired_context_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02502_analyzer_insert_select_crash_fix.sql b/tests/queries/0_stateless/02502_analyzer_insert_select_crash_fix.sql index 4643f65988a..a438276bda5 100644 --- a/tests/queries/0_stateless/02502_analyzer_insert_select_crash_fix.sql +++ b/tests/queries/0_stateless/02502_analyzer_insert_select_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02503_join_switch_alias_fuzz.sql b/tests/queries/0_stateless/02503_join_switch_alias_fuzz.sql index 28d64bf3881..113a8493de6 100644 --- a/tests/queries/0_stateless/02503_join_switch_alias_fuzz.sql +++ b/tests/queries/0_stateless/02503_join_switch_alias_fuzz.sql @@ -1,4 +1,4 @@ SELECT * FROM (SELECT 1 AS id, '' AS test) AS a LEFT JOIN (SELECT test, 1 AS id, NULL AS test) AS b ON b.id = a.id -SETTINGS join_algorithm = 'auto', max_rows_in_join = 1, allow_experimental_analyzer = 1 +SETTINGS join_algorithm = 'auto', max_rows_in_join = 1, enable_analyzer = 1 ; diff --git a/tests/queries/0_stateless/02513_analyzer_duplicate_alias_crash_fix.sql b/tests/queries/0_stateless/02513_analyzer_duplicate_alias_crash_fix.sql index fb50ea2c4ca..e54252b5c5f 100644 --- a/tests/queries/0_stateless/02513_analyzer_duplicate_alias_crash_fix.sql +++ b/tests/queries/0_stateless/02513_analyzer_duplicate_alias_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT toUInt64(NULL) AS x FROM (SELECT 1) HAVING x IN (SELECT NULL FROM (SELECT x IN (SELECT x IN (SELECT 1), x IN (SELECT 1) FROM (SELECT 1 WHERE x IN (SELECT NULL FROM (SELECT NULL)))))); diff --git a/tests/queries/0_stateless/02513_analyzer_sort_msan.sql b/tests/queries/0_stateless/02513_analyzer_sort_msan.sql index e5beccaff2a..b86a15e9ef0 100644 --- a/tests/queries/0_stateless/02513_analyzer_sort_msan.sql +++ b/tests/queries/0_stateless/02513_analyzer_sort_msan.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS products; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; CREATE TABLE products (`price` UInt32) ENGINE = Memory; INSERT INTO products VALUES (1); diff --git a/tests/queries/0_stateless/02514_analyzer_drop_join_on.sql b/tests/queries/0_stateless/02514_analyzer_drop_join_on.sql index 2406be13aa8..df84e2f50b2 100644 --- a/tests/queries/0_stateless/02514_analyzer_drop_join_on.sql +++ b/tests/queries/0_stateless/02514_analyzer_drop_join_on.sql @@ -15,7 +15,7 @@ INSERT INTO c VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c'); CREATE TABLE d (k UInt64, d1 UInt64, d2 String) ENGINE = Memory; INSERT INTO d VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c'); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } diff --git a/tests/queries/0_stateless/02515_analyzer_null_for_empty.sql b/tests/queries/0_stateless/02515_analyzer_null_for_empty.sql index de21e9b475e..e12f215743b 100644 --- a/tests/queries/0_stateless/02515_analyzer_null_for_empty.sql +++ b/tests/queries/0_stateless/02515_analyzer_null_for_empty.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET aggregate_functions_null_for_empty = 1; SELECT max(aggr) FROM (SELECT max('92233720368547758.06') AS aggr FROM system.one); diff --git a/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.sql.j2 b/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.sql.j2 index 09447dfce65..a199165a38f 100644 --- a/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.sql.j2 +++ b/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.sql.j2 @@ -13,7 +13,7 @@ INNER JOIN GROUP BY 1 WITH TOTALS ) AS t2 USING (a) -SETTINGS allow_experimental_analyzer=0; +SETTINGS enable_analyzer=0; SELECT * FROM @@ -30,7 +30,7 @@ INNER JOIN GROUP BY 1 WITH TOTALS ) AS t2 USING (a) -SETTINGS allow_experimental_analyzer=1; +SETTINGS enable_analyzer=1; SELECT a FROM diff --git a/tests/queries/0_stateless/02516_projections_and_context.sql b/tests/queries/0_stateless/02516_projections_and_context.sql index 334544eb4fa..ec14fc0aa9e 100644 --- a/tests/queries/0_stateless/02516_projections_and_context.sql +++ b/tests/queries/0_stateless/02516_projections_and_context.sql @@ -1,10 +1,10 @@ DROP TABLE IF EXISTS test1__fuzz_37; CREATE TABLE test1__fuzz_37 (`i` Date) ENGINE = MergeTree ORDER BY i; insert into test1__fuzz_37 values ('2020-10-10'); -set allow_experimental_analyzer = 0; +set enable_analyzer = 0; SELECT count() FROM test1__fuzz_37 GROUP BY dictHas(NULL, (dictHas(NULL, (('', materialize(NULL)), materialize(NULL))), 'KeyKey')), dictHas('test_dictionary', tuple(materialize('Ke\0'))), tuple(dictHas(NULL, (tuple('Ke\0Ke\0Ke\0Ke\0Ke\0Ke\0\0\0\0Ke\0'), materialize(NULL)))), 'test_dicti\0nary', (('', materialize(NULL)), dictHas(NULL, (dictHas(NULL, tuple(materialize(NULL))), 'KeyKeyKeyKeyKeyKeyKeyKey')), materialize(NULL)); -- { serverError BAD_ARGUMENTS } SELECT count() FROM test1__fuzz_37 GROUP BY dictHas('non_existing_dictionary', materialize('a')); -- { serverError BAD_ARGUMENTS } -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; SELECT count() FROM test1__fuzz_37 GROUP BY dictHas(NULL, (dictHas(NULL, (('', materialize(NULL)), materialize(NULL))), 'KeyKey')), dictHas('test_dictionary', tuple(materialize('Ke\0'))), tuple(dictHas(NULL, (tuple('Ke\0Ke\0Ke\0Ke\0Ke\0Ke\0\0\0\0Ke\0'), materialize(NULL)))), 'test_dicti\0nary', (('', materialize(NULL)), dictHas(NULL, (dictHas(NULL, tuple(materialize(NULL))), 'KeyKeyKeyKeyKeyKeyKeyKey')), materialize(NULL)); -- { serverError BAD_ARGUMENTS } SELECT count() FROM test1__fuzz_37 GROUP BY dictHas('non_existing_dictionary', materialize('a')); -- { serverError BAD_ARGUMENTS } DROP TABLE test1__fuzz_37; diff --git a/tests/queries/0_stateless/02518_rewrite_aggregate_function_with_if.sql b/tests/queries/0_stateless/02518_rewrite_aggregate_function_with_if.sql index fe882da67cb..4ed13307c29 100644 --- a/tests/queries/0_stateless/02518_rewrite_aggregate_function_with_if.sql +++ b/tests/queries/0_stateless/02518_rewrite_aggregate_function_with_if.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = true; +set enable_analyzer = true; -- { echoOn } set optimize_rewrite_aggregate_function_with_if = false; diff --git a/tests/queries/0_stateless/02521_analyzer_aggregation_without_column.sql b/tests/queries/0_stateless/02521_analyzer_aggregation_without_column.sql index 105bce6711c..50bf3cd45e4 100644 --- a/tests/queries/0_stateless/02521_analyzer_aggregation_without_column.sql +++ b/tests/queries/0_stateless/02521_analyzer_aggregation_without_column.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql b/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql index 7842d47d757..f5d601303a2 100644 --- a/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql +++ b/tests/queries/0_stateless/02521_analyzer_array_join_crash.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02525_analyzer_function_in_crash_fix.sql b/tests/queries/0_stateless/02525_analyzer_function_in_crash_fix.sql index 95b896d38ab..dd1688ad400 100644 --- a/tests/queries/0_stateless/02525_analyzer_function_in_crash_fix.sql +++ b/tests/queries/0_stateless/02525_analyzer_function_in_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02532_analyzer_aggregation_with_rollup.sql b/tests/queries/0_stateless/02532_analyzer_aggregation_with_rollup.sql index 09097eb029f..587ef71df8a 100644 --- a/tests/queries/0_stateless/02532_analyzer_aggregation_with_rollup.sql +++ b/tests/queries/0_stateless/02532_analyzer_aggregation_with_rollup.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT sum(a.number) AS total, diff --git a/tests/queries/0_stateless/02534_analyzer_grouping_function.sql b/tests/queries/0_stateless/02534_analyzer_grouping_function.sql index 3163e03d579..ee1cc1d88d1 100644 --- a/tests/queries/0_stateless/02534_analyzer_grouping_function.sql +++ b/tests/queries/0_stateless/02534_analyzer_grouping_function.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.sql b/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.sql index 4ae5df9629a..59bbfc96289 100644 --- a/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.sql +++ b/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; -- { echoOn } SELECT number, number % 2, sum(number) AS val diff --git a/tests/queries/0_stateless/02535_analyzer_limit_offset.sql b/tests/queries/0_stateless/02535_analyzer_limit_offset.sql index 8f98d823e5c..96aef9557c5 100644 --- a/tests/queries/0_stateless/02535_analyzer_limit_offset.sql +++ b/tests/queries/0_stateless/02535_analyzer_limit_offset.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT number FROM numbers(100) LIMIT 10 OFFSET 10; diff --git a/tests/queries/0_stateless/02538_analyzer_create_table_as_select.sql b/tests/queries/0_stateless/02538_analyzer_create_table_as_select.sql index 168066ce2f9..16634e996fe 100644 --- a/tests/queries/0_stateless/02538_analyzer_create_table_as_select.sql +++ b/tests/queries/0_stateless/02538_analyzer_create_table_as_select.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table_data; CREATE TABLE test_table_data diff --git a/tests/queries/0_stateless/02540_analyzer_matcher_alias_materialized_columns.sql b/tests/queries/0_stateless/02540_analyzer_matcher_alias_materialized_columns.sql index cc622dde8fe..58840796c7e 100644 --- a/tests/queries/0_stateless/02540_analyzer_matcher_alias_materialized_columns.sql +++ b/tests/queries/0_stateless/02540_analyzer_matcher_alias_materialized_columns.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02540_input_format_json_ignore_unknown_keys_in_named_tuple.sh b/tests/queries/0_stateless/02540_input_format_json_ignore_unknown_keys_in_named_tuple.sh index eccac543215..48b3b27680c 100755 --- a/tests/queries/0_stateless/02540_input_format_json_ignore_unknown_keys_in_named_tuple.sh +++ b/tests/queries/0_stateless/02540_input_format_json_ignore_unknown_keys_in_named_tuple.sh @@ -115,7 +115,7 @@ EOL # NOTE: due to [1] we cannot use dot.dot notation, only tupleElement() # # [1]: https://github.com/ClickHouse/ClickHouse/issues/24607 -$CLICKHOUSE_LOCAL --allow_experimental_analyzer=1 "${gharchive_settings[@]}" --structure="${gharchive_structure[*]}" -q " +$CLICKHOUSE_LOCAL --enable_analyzer=1 "${gharchive_settings[@]}" --structure="${gharchive_structure[*]}" -q " SELECT payload.issue.labels.name AS labels, payload.pull_request.merged_by.login AS merged_by diff --git a/tests/queries/0_stateless/02541_analyzer_grouping_sets_crash_fix.sql b/tests/queries/0_stateless/02541_analyzer_grouping_sets_crash_fix.sql index d7af475bbe7..b9aa251bc96 100644 --- a/tests/queries/0_stateless/02541_analyzer_grouping_sets_crash_fix.sql +++ b/tests/queries/0_stateless/02541_analyzer_grouping_sets_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH pow(NULL, 256) AS four SELECT NULL AS two GROUP BY GROUPING SETS ((pow(two, 65536))); diff --git a/tests/queries/0_stateless/02552_analyzer_optimize_group_by_function_keys_crash.sql b/tests/queries/0_stateless/02552_analyzer_optimize_group_by_function_keys_crash.sql index ee9032472a7..85740cd85a2 100644 --- a/tests/queries/0_stateless/02552_analyzer_optimize_group_by_function_keys_crash.sql +++ b/tests/queries/0_stateless/02552_analyzer_optimize_group_by_function_keys_crash.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT NULL GROUP BY tuple('0.0000000007'), count(NULL) OVER (ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) -- { serverError ILLEGAL_AGGREGATION }; diff --git a/tests/queries/0_stateless/02553_type_object_analyzer.sql b/tests/queries/0_stateless/02553_type_object_analyzer.sql index 55482a02ed1..eb4e49757cf 100644 --- a/tests/queries/0_stateless/02553_type_object_analyzer.sql +++ b/tests/queries/0_stateless/02553_type_object_analyzer.sql @@ -1,6 +1,6 @@ SET output_format_json_named_tuples_as_objects = 1; SET allow_experimental_object_type = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS t_json_analyzer; CREATE TABLE t_json_analyzer (a JSON) ENGINE = Memory; diff --git a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.sql b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.sql index f926b9037d2..0eed4a8c592 100644 --- a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.sql +++ b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.sql @@ -60,7 +60,7 @@ FROM (day_)) ) AS t ) -WHERE type_1 = 'all' settings allow_experimental_analyzer=0; +WHERE type_1 = 'all' settings enable_analyzer=0; -- Query plan with analyzer has less Filter steps (which is more optimal) EXPLAIN PIPELINE @@ -83,7 +83,7 @@ FROM (day_)) ) AS t ) -WHERE type_1 = 'all' settings allow_experimental_analyzer=1; +WHERE type_1 = 'all' settings enable_analyzer=1; SELECT ''; SELECT '---Result---'; @@ -129,7 +129,7 @@ FROM (day_)) ) AS t ) -WHERE day_ = '2023-01-05' settings allow_experimental_analyzer=0; +WHERE day_ = '2023-01-05' settings enable_analyzer=0; -- Query plan with analyzer has less Filter steps (which is more optimal) EXPLAIN PIPELINE @@ -151,6 +151,6 @@ FROM (day_)) ) AS t ) -WHERE day_ = '2023-01-05' settings allow_experimental_analyzer=1; +WHERE day_ = '2023-01-05' settings enable_analyzer=1; DROP TABLE test_grouping_sets_predicate; diff --git a/tests/queries/0_stateless/02560_analyzer_materialized_view.sql b/tests/queries/0_stateless/02560_analyzer_materialized_view.sql index 1f268fe1e16..3fdef366dc9 100644 --- a/tests/queries/0_stateless/02560_analyzer_materialized_view.sql +++ b/tests/queries/0_stateless/02560_analyzer_materialized_view.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02563_analyzer_merge.sql b/tests/queries/0_stateless/02563_analyzer_merge.sql index 217fb7019c4..6c252c22773 100644 --- a/tests/queries/0_stateless/02563_analyzer_merge.sql +++ b/tests/queries/0_stateless/02563_analyzer_merge.sql @@ -1,6 +1,6 @@ -- Tags: no-parallel -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP DATABASE IF EXISTS 02563_db; CREATE DATABASE 02563_db; diff --git a/tests/queries/0_stateless/02564_analyzer_cross_to_inner.sql b/tests/queries/0_stateless/02564_analyzer_cross_to_inner.sql index a83cd238982..7032559e066 100644 --- a/tests/queries/0_stateless/02564_analyzer_cross_to_inner.sql +++ b/tests/queries/0_stateless/02564_analyzer_cross_to_inner.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/02565_analyzer_limit_settings.sql b/tests/queries/0_stateless/02565_analyzer_limit_settings.sql index 7c02c2d0d20..1dd6735e64d 100644 --- a/tests/queries/0_stateless/02565_analyzer_limit_settings.sql +++ b/tests/queries/0_stateless/02565_analyzer_limit_settings.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } SET limit = 0; diff --git a/tests/queries/0_stateless/02566_analyzer_limit_settings_distributed.sql b/tests/queries/0_stateless/02566_analyzer_limit_settings_distributed.sql index 1624344b5a9..a2620f436f4 100644 --- a/tests/queries/0_stateless/02566_analyzer_limit_settings_distributed.sql +++ b/tests/queries/0_stateless/02566_analyzer_limit_settings_distributed.sql @@ -1,6 +1,6 @@ -- Tags: distributed -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT 'limit', * FROM remote('127.1', view(SELECT * FROM numbers(10))) SETTINGS limit=5; SELECT 'offset', * FROM remote('127.1', view(SELECT * FROM numbers(10))) SETTINGS offset=5; diff --git a/tests/queries/0_stateless/02567_and_consistency.reference b/tests/queries/0_stateless/02567_and_consistency.reference index e0014f187a8..7e6d1f24f43 100644 --- a/tests/queries/0_stateless/02567_and_consistency.reference +++ b/tests/queries/0_stateless/02567_and_consistency.reference @@ -8,7 +8,7 @@ true ===== 1 ===== -allow_experimental_analyzer +enable_analyzer true #45440 2086579505 0 1 0 0 diff --git a/tests/queries/0_stateless/02567_and_consistency.sql b/tests/queries/0_stateless/02567_and_consistency.sql index 0eeab99e539..0442a6dad7f 100644 --- a/tests/queries/0_stateless/02567_and_consistency.sql +++ b/tests/queries/0_stateless/02567_and_consistency.sql @@ -50,9 +50,9 @@ SELECT 1 and sin(1); SELECT '====='; -SELECT 'allow_experimental_analyzer'; +SELECT 'enable_analyzer'; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT toBool(sin(SUM(number))) AS x FROM diff --git a/tests/queries/0_stateless/02576_predicate_push_down_sorting_fix.sql b/tests/queries/0_stateless/02576_predicate_push_down_sorting_fix.sql index 2dade7837b7..486a26613f6 100644 --- a/tests/queries/0_stateless/02576_predicate_push_down_sorting_fix.sql +++ b/tests/queries/0_stateless/02576_predicate_push_down_sorting_fix.sql @@ -1,3 +1,3 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN header = 1, actions = 1 SELECT number FROM (SELECT number FROM numbers(2) ORDER BY ignore(2)) WHERE ignore(2); diff --git a/tests/queries/0_stateless/02576_rewrite_array_exists_to_has.sql b/tests/queries/0_stateless/02576_rewrite_array_exists_to_has.sql index 5233f2f7e3c..b5a123e3767 100644 --- a/tests/queries/0_stateless/02576_rewrite_array_exists_to_has.sql +++ b/tests/queries/0_stateless/02576_rewrite_array_exists_to_has.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = true; +set enable_analyzer = true; set optimize_rewrite_array_exists_to_has = false; EXPLAIN QUERY TREE run_passes = 1 select arrayExists(x -> x = 5 , materialize(range(10))) from numbers(10); @@ -8,7 +8,7 @@ set optimize_rewrite_array_exists_to_has = true; EXPLAIN QUERY TREE run_passes = 1 select arrayExists(x -> x = 5 , materialize(range(10))) from numbers(10); EXPLAIN QUERY TREE run_passes = 1 select arrayExists(x -> 5 = x , materialize(range(10))) from numbers(10); -set allow_experimental_analyzer = false; +set enable_analyzer = false; set optimize_rewrite_array_exists_to_has = false; EXPLAIN SYNTAX select arrayExists(x -> x = 5 , materialize(range(10))) from numbers(10); diff --git a/tests/queries/0_stateless/02577_analyzer_array_join_calc_twice.sql b/tests/queries/0_stateless/02577_analyzer_array_join_calc_twice.sql index b6bb258db28..0b281dd4f81 100644 --- a/tests/queries/0_stateless/02577_analyzer_array_join_calc_twice.sql +++ b/tests/queries/0_stateless/02577_analyzer_array_join_calc_twice.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT 1 + arrayJoin(a) AS m FROM (SELECT [1, 2, 3] AS a) GROUP BY m; diff --git a/tests/queries/0_stateless/02579_fill_empty_chunk.sql b/tests/queries/0_stateless/02579_fill_empty_chunk.sql index 30942b154c9..aeae98df7a3 100644 --- a/tests/queries/0_stateless/02579_fill_empty_chunk.sql +++ b/tests/queries/0_stateless/02579_fill_empty_chunk.sql @@ -1,7 +1,7 @@ -- this SELECT produces empty chunk in FillingTransform SET enable_positional_arguments = 0; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT 2 AS x, diff --git a/tests/queries/0_stateless/02579_fill_empty_chunk_analyzer.sql b/tests/queries/0_stateless/02579_fill_empty_chunk_analyzer.sql index 8350173f443..144640149ea 100644 --- a/tests/queries/0_stateless/02579_fill_empty_chunk_analyzer.sql +++ b/tests/queries/0_stateless/02579_fill_empty_chunk_analyzer.sql @@ -1,7 +1,7 @@ -- this SELECT produces empty chunk in FillingTransform SET enable_positional_arguments = 0; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- With analyzer this special query has correct output SELECT diff --git a/tests/queries/0_stateless/02582_analyzer_join_subquery_empty_column_list.sql b/tests/queries/0_stateless/02582_analyzer_join_subquery_empty_column_list.sql index 10e5871cc44..33c9296a0d8 100644 --- a/tests/queries/0_stateless/02582_analyzer_join_subquery_empty_column_list.sql +++ b/tests/queries/0_stateless/02582_analyzer_join_subquery_empty_column_list.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } SELECT a FROM ( select 1 AS a ) AS t1, ( select 2 AS b, 3 AS c) AS t2; @@ -10,5 +10,3 @@ SELECT b FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, SELECT c FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; SELECT 42 FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; SELECT count() FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; - - diff --git a/tests/queries/0_stateless/02661_quantile_approx.reference b/tests/queries/0_stateless/02661_quantile_approx.reference index 0ee846a268b..239516f9ac4 100644 --- a/tests/queries/0_stateless/02661_quantile_approx.reference +++ b/tests/queries/0_stateless/02661_quantile_approx.reference @@ -33,10 +33,10 @@ FROM FROM numbers(49999) ); [24902,44518,49999] -select medianGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } -select medianGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -select quantileGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } -select quantileGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select medianGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select medianGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select quantileGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantileGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select medianGK(100)(number) from numbers(10); 4 select quantileGK(100)(number) from numbers(10); @@ -47,8 +47,8 @@ select quantileGK(100, 0.5, 0.75)(number) from numbers(10); -- { serverError NUM select quantileGK('abc', 0.5)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } select quantileGK(1.23, 0.5)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } select quantileGK(-100, 0.5)(number) from numbers(10); -- { serverError BAD_ARGUMENTS } -select quantilesGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } -select quantilesGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select quantilesGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantilesGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select quantilesGK(100)(number) from numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select quantilesGK(100, 0.5)(number) from numbers(10); [4] diff --git a/tests/queries/0_stateless/02661_quantile_approx.sql b/tests/queries/0_stateless/02661_quantile_approx.sql index c0004260fa1..732ce645c98 100644 --- a/tests/queries/0_stateless/02661_quantile_approx.sql +++ b/tests/queries/0_stateless/02661_quantile_approx.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; -- { echoOn } with arrayJoin([0, 1, 2, 10]) as x select quantilesGK(100, 0.5, 0.4, 0.1)(x); @@ -29,11 +29,11 @@ FROM FROM numbers(49999) ); -select medianGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } -select medianGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select medianGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select medianGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -select quantileGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } -select quantileGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select quantileGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantileGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select medianGK(100)(number) from numbers(10); select quantileGK(100)(number) from numbers(10); @@ -43,8 +43,8 @@ select quantileGK('abc', 0.5)(number) from numbers(10); -- { serverError ILLEGAL select quantileGK(1.23, 0.5)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } select quantileGK(-100, 0.5)(number) from numbers(10); -- { serverError BAD_ARGUMENTS } -select quantilesGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } -select quantilesGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select quantilesGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantilesGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select quantilesGK(100)(number) from numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select quantilesGK(100, 0.5)(number) from numbers(10); diff --git a/tests/queries/0_stateless/02662_first_last_value.reference b/tests/queries/0_stateless/02662_first_last_value.reference index b0783399623..308cbf850b0 100644 --- a/tests/queries/0_stateless/02662_first_last_value.reference +++ b/tests/queries/0_stateless/02662_first_last_value.reference @@ -18,7 +18,7 @@ select last_value(b) ignore nulls from test; 5 select last_value(b) respect nulls from test; \N -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- first value select first_value(b) from test; 3 diff --git a/tests/queries/0_stateless/02662_first_last_value.sql b/tests/queries/0_stateless/02662_first_last_value.sql index 8e429e2e27d..16768bd6f1e 100644 --- a/tests/queries/0_stateless/02662_first_last_value.sql +++ b/tests/queries/0_stateless/02662_first_last_value.sql @@ -15,7 +15,7 @@ select last_value(b) from test; select last_value(b) ignore nulls from test; select last_value(b) respect nulls from test; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- first value select first_value(b) from test; diff --git a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql index 7d624195df9..dabdcfd5507 100644 --- a/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql +++ b/tests/queries/0_stateless/02668_logical_optimizer_removing_redundant_checks.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS 02668_logical_optimizer; diff --git a/tests/queries/0_stateless/02674_trivial_count_analyzer.reference b/tests/queries/0_stateless/02674_trivial_count_analyzer.reference index 05feadb58a0..9d7f393e71c 100644 --- a/tests/queries/0_stateless/02674_trivial_count_analyzer.reference +++ b/tests/queries/0_stateless/02674_trivial_count_analyzer.reference @@ -1,5 +1,5 @@ -- { echoOn } -set allow_experimental_analyzer=1; +set enable_analyzer=1; set optimize_trivial_count_query=1; create table m3(a Int64, b UInt64) Engine=MergeTree order by tuple(); select count() from m3; diff --git a/tests/queries/0_stateless/02674_trivial_count_analyzer.sql b/tests/queries/0_stateless/02674_trivial_count_analyzer.sql index 988d1b9ba92..c13a9dc68f2 100644 --- a/tests/queries/0_stateless/02674_trivial_count_analyzer.sql +++ b/tests/queries/0_stateless/02674_trivial_count_analyzer.sql @@ -2,7 +2,7 @@ drop table if exists m3; drop table if exists replacing_m3; -- { echoOn } -set allow_experimental_analyzer=1; +set enable_analyzer=1; set optimize_trivial_count_query=1; create table m3(a Int64, b UInt64) Engine=MergeTree order by tuple(); diff --git a/tests/queries/0_stateless/02675_predicate_push_down_filled_join_fix.sql b/tests/queries/0_stateless/02675_predicate_push_down_filled_join_fix.sql index 930127497ae..6c8932b5d58 100644 --- a/tests/queries/0_stateless/02675_predicate_push_down_filled_join_fix.sql +++ b/tests/queries/0_stateless/02675_predicate_push_down_filled_join_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET single_join_prefer_left_table = 0; SET optimize_move_to_prewhere = 0; diff --git a/tests/queries/0_stateless/02676_analyzer_limit_offset.sql b/tests/queries/0_stateless/02676_analyzer_limit_offset.sql index 39c6b85f088..5dbe55e916c 100644 --- a/tests/queries/0_stateless/02676_analyzer_limit_offset.sql +++ b/tests/queries/0_stateless/02676_analyzer_limit_offset.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer=1; +set enable_analyzer=1; DROP TABLE IF EXISTS test; CREATE TABLE test (i UInt64) Engine = MergeTree() order by i; diff --git a/tests/queries/0_stateless/02676_distinct_reading_in_order_analyzer.sql b/tests/queries/0_stateless/02676_distinct_reading_in_order_analyzer.sql index f9ff1eed111..6a219cd3781 100644 --- a/tests/queries/0_stateless/02676_distinct_reading_in_order_analyzer.sql +++ b/tests/queries/0_stateless/02676_distinct_reading_in_order_analyzer.sql @@ -1,6 +1,6 @@ drop table if exists t; -set allow_experimental_analyzer=1; +set enable_analyzer=1; create table t (a UInt64, b UInt64) engine=MergeTree() order by (a); insert into t select number % 2, number from numbers(10); diff --git a/tests/queries/0_stateless/02677_analyzer_bitmap_has_any.sql b/tests/queries/0_stateless/02677_analyzer_bitmap_has_any.sql index c06ea009c1d..dc906a92f1e 100644 --- a/tests/queries/0_stateless/02677_analyzer_bitmap_has_any.sql +++ b/tests/queries/0_stateless/02677_analyzer_bitmap_has_any.sql @@ -18,7 +18,7 @@ FROM bitmapHasAny(bitmapBuild([toUInt64(1)]), ( SELECT groupBitmapState(toUInt64(2)) )) has2 -) SETTINGS allow_experimental_analyzer = 0; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +) SETTINGS enable_analyzer = 0; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT '--------------'; @@ -32,5 +32,4 @@ FROM bitmapHasAny(bitmapBuild([toUInt64(1)]), ( SELECT groupBitmapState(toUInt64(2)) )) has2 -) SETTINGS allow_experimental_analyzer = 1; - +) SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/02677_analyzer_compound_expressions.sql b/tests/queries/0_stateless/02677_analyzer_compound_expressions.sql index 6b7fdab8993..90781f70158 100644 --- a/tests/queries/0_stateless/02677_analyzer_compound_expressions.sql +++ b/tests/queries/0_stateless/02677_analyzer_compound_expressions.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH ('a', 'b')::Tuple(c1 String, c2 String) AS t SELECT t.c1, t.c2; diff --git a/tests/queries/0_stateless/02677_get_subcolumn_array_of_tuples.sql b/tests/queries/0_stateless/02677_get_subcolumn_array_of_tuples.sql index 5779821afaa..95665979857 100644 --- a/tests/queries/0_stateless/02677_get_subcolumn_array_of_tuples.sql +++ b/tests/queries/0_stateless/02677_get_subcolumn_array_of_tuples.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS t_get_subcolumn; diff --git a/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.sql b/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.sql index 4bc7be13490..4911979394a 100644 --- a/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.sql +++ b/tests/queries/0_stateless/02679_explain_merge_tree_prewhere_row_policy.sql @@ -10,8 +10,8 @@ INSERT INTO test_table VALUES (0, 'Value'); DROP ROW POLICY IF EXISTS test_row_policy ON test_table; CREATE ROW POLICY test_row_policy ON test_table USING id >= 5 TO ALL; -EXPLAIN header = 1, actions = 1 SELECT id, value FROM test_table PREWHERE id = 5 settings allow_experimental_analyzer=0; -EXPLAIN header = 1, actions = 1 SELECT id, value FROM test_table PREWHERE id = 5 settings allow_experimental_analyzer=1; +EXPLAIN header = 1, actions = 1 SELECT id, value FROM test_table PREWHERE id = 5 settings enable_analyzer=0; +EXPLAIN header = 1, actions = 1 SELECT id, value FROM test_table PREWHERE id = 5 settings enable_analyzer=1; DROP ROW POLICY test_row_policy ON test_table; DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.sql b/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.sql index 502cca20ab2..680b98fb1bd 100644 --- a/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.sql +++ b/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.sql @@ -1,4 +1,4 @@ SELECT polygonsSymDifferenceCartesian([[[(1., 1.)]] AS x], [x]) GROUP BY x WITH ROLLUP; -SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP SETTINGS allow_experimental_analyzer=0; -SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP SETTINGS allow_experimental_analyzer=1; +SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP SETTINGS enable_analyzer=0; +SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP SETTINGS enable_analyzer=1; SELECT polygonsSymDifferenceCartesian([[[(100.0001, 1000.0001), (-20., 20.), (10., 10.), (20., 20.), (20., -20.), (1000.0001, 1.1920928955078125e-7)]],[[(0.0001, 100000000000000000000.)]] AS x],[x]) GROUP BY x WITH ROLLUP; diff --git a/tests/queries/0_stateless/02699_polygons_sym_difference_total.sql b/tests/queries/0_stateless/02699_polygons_sym_difference_total.sql index 0fac4b11320..53d0a3bb543 100644 --- a/tests/queries/0_stateless/02699_polygons_sym_difference_total.sql +++ b/tests/queries/0_stateless/02699_polygons_sym_difference_total.sql @@ -1,2 +1,2 @@ -SET allow_experimental_analyzer=0; +SET enable_analyzer=0; SELECT [(9223372036854775807, 1.1754943508222875e-38)], x, NULL, polygonsSymDifferenceCartesian([[[(1.1754943508222875e-38, 1.1920928955078125e-7), (0.5, 0.5)]], [[(1.1754943508222875e-38, 1.1920928955078125e-7), (1.1754943508222875e-38, 1.1920928955078125e-7)], [(0., 1.0001)]], [[(1., 1.0001)]] AS x], [[[(3.4028234663852886e38, 0.9999)]]]) GROUP BY GROUPING SETS ((x)) WITH TOTALS diff --git a/tests/queries/0_stateless/02699_polygons_sym_difference_total_analyzer.sql b/tests/queries/0_stateless/02699_polygons_sym_difference_total_analyzer.sql index 879e0e5297f..40f610ae5a6 100644 --- a/tests/queries/0_stateless/02699_polygons_sym_difference_total_analyzer.sql +++ b/tests/queries/0_stateless/02699_polygons_sym_difference_total_analyzer.sql @@ -1,2 +1,2 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT [(9223372036854775807, 1.1754943508222875e-38)], x, NULL, polygonsSymDifferenceCartesian([[[(1.1754943508222875e-38, 1.1920928955078125e-7), (0.5, 0.5)]], [[(1.1754943508222875e-38, 1.1920928955078125e-7), (1.1754943508222875e-38, 1.1920928955078125e-7)], [(0., 1.0001)]], [[(1., 1.0001)]] AS x], [[[(3.4028234663852886e38, 0.9999)]]]) GROUP BY GROUPING SETS ((x)) WITH TOTALS diff --git a/tests/queries/0_stateless/02701_invalid_having_NOT_AN_AGGREGATE.sql b/tests/queries/0_stateless/02701_invalid_having_NOT_AN_AGGREGATE.sql index 092bda23164..9cfc4d83058 100644 --- a/tests/queries/0_stateless/02701_invalid_having_NOT_AN_AGGREGATE.sql +++ b/tests/queries/0_stateless/02701_invalid_having_NOT_AN_AGGREGATE.sql @@ -1 +1 @@ -SELECT a, sum(b) FROM (SELECT 1 AS a, 1 AS b, 0 AS c) GROUP BY a HAVING c SETTINGS allow_experimental_analyzer=1 -- { serverError NOT_AN_AGGREGATE } +SELECT a, sum(b) FROM (SELECT 1 AS a, 1 AS b, 0 AS c) GROUP BY a HAVING c SETTINGS enable_analyzer=1 -- { serverError NOT_AN_AGGREGATE } diff --git a/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.sql b/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.sql index 72ab507f541..9e2927334e1 100644 --- a/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.sql +++ b/tests/queries/0_stateless/02702_logical_optimizer_with_nulls.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS 02702_logical_optimizer; diff --git a/tests/queries/0_stateless/02703_explain_query_tree_is_forbidden_with_old_analyzer.sql b/tests/queries/0_stateless/02703_explain_query_tree_is_forbidden_with_old_analyzer.sql index d351bfe402c..c028e74f1b5 100644 --- a/tests/queries/0_stateless/02703_explain_query_tree_is_forbidden_with_old_analyzer.sql +++ b/tests/queries/0_stateless/02703_explain_query_tree_is_forbidden_with_old_analyzer.sql @@ -1,2 +1,2 @@ -set allow_experimental_analyzer=0; +set enable_analyzer=0; EXPLAIN QUERY TREE run_passes = true, dump_passes = true SELECT 1; -- { serverError NOT_IMPLEMENTED } diff --git a/tests/queries/0_stateless/02704_storage_merge_explain_graph_crash.sql b/tests/queries/0_stateless/02704_storage_merge_explain_graph_crash.sql index 44a8fe4f049..db5eddf2a90 100644 --- a/tests/queries/0_stateless/02704_storage_merge_explain_graph_crash.sql +++ b/tests/queries/0_stateless/02704_storage_merge_explain_graph_crash.sql @@ -13,4 +13,4 @@ CREATE TABLE foo2_dist (`Id` UInt32, `Val` String) ENGINE = Distributed(test_sha CREATE TABLE merge1 AS foo ENGINE = Merge(currentDatabase(), '^(foo|foo2_dist)$'); EXPLAIN PIPELINE graph = 1, compact = 1 SELECT * FROM merge1 FORMAT Null; -EXPLAIN PIPELINE graph = 1, compact = 1 SELECT * FROM merge1 FORMAT Null SETTINGS allow_experimental_analyzer=1; +EXPLAIN PIPELINE graph = 1, compact = 1 SELECT * FROM merge1 FORMAT Null SETTINGS enable_analyzer=1; diff --git a/tests/queries/0_stateless/02707_analyzer_nested_lambdas_types.sql b/tests/queries/0_stateless/02707_analyzer_nested_lambdas_types.sql index f9258d61900..320e1111e65 100644 --- a/tests/queries/0_stateless/02707_analyzer_nested_lambdas_types.sql +++ b/tests/queries/0_stateless/02707_analyzer_nested_lambdas_types.sql @@ -1,24 +1,24 @@ SELECT range(1), arrayMap(x -> arrayMap(x -> x, range(x)), [1]) -SETTINGS allow_experimental_analyzer = 0; +SETTINGS enable_analyzer = 0; SELECT range(1), arrayMap(x -> arrayMap(x -> x, range(x)), [1]) -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; SELECT range(1), arrayMap(x -> arrayMap(x -> 1, range(x)), [1]) -SETTINGS allow_experimental_analyzer = 0; +SETTINGS enable_analyzer = 0; SELECT range(1), arrayMap(x -> arrayMap(x -> 1, range(x)), [1]) -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; SELECT range(1), arrayMap(x -> arrayMap(y -> 1, range(x)), [1]) -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/02722_matcher_join_use_nulls.sql.j2 b/tests/queries/0_stateless/02722_matcher_join_use_nulls.sql.j2 index 25451a34867..6a8472fecdf 100644 --- a/tests/queries/0_stateless/02722_matcher_join_use_nulls.sql.j2 +++ b/tests/queries/0_stateless/02722_matcher_join_use_nulls.sql.j2 @@ -8,7 +8,7 @@ INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (2, 2); SET join_use_nulls = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } diff --git a/tests/queries/0_stateless/02725_cnf_large_check.sql b/tests/queries/0_stateless/02725_cnf_large_check.sql index 0780e6bcdd3..2567636c02c 100644 --- a/tests/queries/0_stateless/02725_cnf_large_check.sql +++ b/tests/queries/0_stateless/02725_cnf_large_check.sql @@ -7,21 +7,21 @@ INSERT INTO 02725_cnf VALUES (0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, SELECT count() FROM 02725_cnf WHERE (c5 AND (NOT c0)) OR ((NOT c3) AND (NOT c6) AND (NOT c1) AND (NOT c6)) OR (c7 AND (NOT c3) AND (NOT c5) AND (NOT c7)) OR ((NOT c8) AND c5) OR ((NOT c0)) OR ((NOT c8) AND (NOT c5) AND c1 AND c6 AND c3) OR (c7 AND (NOT c0) AND c6 AND c1 AND (NOT c2)) OR (c3 AND (NOT c9) AND c1) -SETTINGS convert_query_to_cnf = 1, allow_experimental_analyzer = 1; +SETTINGS convert_query_to_cnf = 1, enable_analyzer = 1; SELECT count() FROM 02725_cnf WHERE (c5 AND (NOT c0)) OR ((NOT c3) AND (NOT c6) AND (NOT c1) AND (NOT c6)) OR (c7 AND (NOT c3) AND (NOT c5) AND (NOT c7)) OR ((NOT c8) AND c5) OR ((NOT c0)) OR ((NOT c8) AND (NOT c5) AND c1 AND c6 AND c3) OR (c7 AND (NOT c0) AND c6 AND c1 AND (NOT c2)) OR (c3 AND (NOT c9) AND c1) -SETTINGS convert_query_to_cnf = 1, allow_experimental_analyzer = 0; +SETTINGS convert_query_to_cnf = 1, enable_analyzer = 0; SELECT count() FROM 02725_cnf WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7)) -SETTINGS convert_query_to_cnf = 1, allow_experimental_analyzer = 1; +SETTINGS convert_query_to_cnf = 1, enable_analyzer = 1; SELECT count() FROM 02725_cnf WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7)) -SETTINGS convert_query_to_cnf = 1, allow_experimental_analyzer = 0; +SETTINGS convert_query_to_cnf = 1, enable_analyzer = 0; DROP TABLE 02725_cnf; diff --git a/tests/queries/0_stateless/02731_analyzer_join_resolve_nested.sql.j2 b/tests/queries/0_stateless/02731_analyzer_join_resolve_nested.sql.j2 index c2f3c51b17a..4ddf41c4d6d 100644 --- a/tests/queries/0_stateless/02731_analyzer_join_resolve_nested.sql.j2 +++ b/tests/queries/0_stateless/02731_analyzer_join_resolve_nested.sql.j2 @@ -31,7 +31,7 @@ INSERT INTO nnna VALUES (1, [[([([(1,'d')],'d')], 's')]], ['s']); CREATE TABLE nnnb ( x UInt64, t Nested(t Nested(t Nested(t Nested(t UInt32, s String), s String), s String), s String) ) ENGINE = MergeTree ORDER BY x; INSERT INTO nnnb VALUES (1, [[([([(1,'d')],'d')], 's')]], ['s']); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; {% for join_use_nulls in [0, 1] -%} diff --git a/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.reference b/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.reference index 451f0d6d485..125fa524c4a 100644 --- a/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.reference +++ b/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.reference @@ -33,8 +33,8 @@ =============== QUERIES EXECUTED BY PARALLEL INNER QUERY ALONE =============== 0 3 SELECT `__table1`.`key` AS `key`, `__table1`.`value1` AS `value1`, `__table1`.`value2` AS `value2`, toUInt64(min(`__table1`.`time`)) AS `start_ts` FROM `default`.`join_inner_table` AS `__table1` PREWHERE (`__table1`.`id` = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (`__table1`.`number` > _CAST(1610517366120, \'UInt64\')) GROUP BY `__table1`.`key`, `__table1`.`value1`, `__table1`.`value2` ORDER BY `__table1`.`key` ASC, `__table1`.`value1` ASC, `__table1`.`value2` ASC LIMIT _CAST(10, \'UInt64\') 0 3 SELECT `key`, `value1`, `value2`, toUInt64(min(`time`)) AS `start_ts` FROM `default`.`join_inner_table` PREWHERE (`id` = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (`number` > toUInt64(\'1610517366120\')) GROUP BY `key`, `value1`, `value2` ORDER BY `key` ASC, `value1` ASC, `value2` ASC LIMIT 10 -1 1 -- Parallel inner query alone\nSELECT\n key,\n value1,\n value2,\n toUInt64(min(time)) AS start_ts\nFROM join_inner_table\nPREWHERE (id = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (number > toUInt64(\'1610517366120\'))\nGROUP BY key, value1, value2\nORDER BY key, value1, value2\nLIMIT 10\nSETTINGS allow_experimental_parallel_reading_from_replicas = 1, allow_experimental_analyzer=0; -1 1 -- Parallel inner query alone\nSELECT\n key,\n value1,\n value2,\n toUInt64(min(time)) AS start_ts\nFROM join_inner_table\nPREWHERE (id = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (number > toUInt64(\'1610517366120\'))\nGROUP BY key, value1, value2\nORDER BY key, value1, value2\nLIMIT 10\nSETTINGS allow_experimental_parallel_reading_from_replicas = 1, allow_experimental_analyzer=1; +1 1 -- Parallel inner query alone\nSELECT\n key,\n value1,\n value2,\n toUInt64(min(time)) AS start_ts\nFROM join_inner_table\nPREWHERE (id = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (number > toUInt64(\'1610517366120\'))\nGROUP BY key, value1, value2\nORDER BY key, value1, value2\nLIMIT 10\nSETTINGS allow_experimental_parallel_reading_from_replicas = 1, enable_analyzer=0; +1 1 -- Parallel inner query alone\nSELECT\n key,\n value1,\n value2,\n toUInt64(min(time)) AS start_ts\nFROM join_inner_table\nPREWHERE (id = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (number > toUInt64(\'1610517366120\'))\nGROUP BY key, value1, value2\nORDER BY key, value1, value2\nLIMIT 10\nSETTINGS allow_experimental_parallel_reading_from_replicas = 1, enable_analyzer=1; =============== OUTER QUERY (NO PARALLEL) =============== >T%O ,z< 10 NQTpY# W\\Xx4 10 @@ -61,5 +61,5 @@ t _CAST(1610517366120, \'UInt64\')) GROUP BY `__table3`.`key`, `__table3`.`value1`, `__table3`.`value2`) AS `__table2` USING (`key`) GROUP BY `__table1`.`key`, `__table2`.`value1`, `__table2`.`value2` 0 3 SELECT `key`, `value1`, `value2` FROM `default`.`join_inner_table` PREWHERE (`id` = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (`number` > toUInt64(\'1610517366120\')) GROUP BY `key`, `value1`, `value2` 0 3 SELECT `value1`, `value2`, count() AS `count` FROM `default`.`join_outer_table` ALL INNER JOIN `_data_` USING (`key`) GROUP BY `key`, `value1`, `value2` -1 1 -- Parallel full query\nSELECT\n value1,\n value2,\n avg(count) AS avg\nFROM\n (\n SELECT\n key,\n value1,\n value2,\n count() AS count\n FROM join_outer_table\n INNER JOIN\n (\n SELECT\n key,\n value1,\n value2,\n toUInt64(min(time)) AS start_ts\n FROM join_inner_table\n PREWHERE (id = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (number > toUInt64(\'1610517366120\'))\n GROUP BY key, value1, value2\n ) USING (key)\n GROUP BY key, value1, value2\n )\nGROUP BY value1, value2\nORDER BY value1, value2\nSETTINGS allow_experimental_parallel_reading_from_replicas = 1, allow_experimental_analyzer=0; -1 1 -- Parallel full query\nSELECT\n value1,\n value2,\n avg(count) AS avg\nFROM\n (\n SELECT\n key,\n value1,\n value2,\n count() AS count\n FROM join_outer_table\n INNER JOIN\n (\n SELECT\n key,\n value1,\n value2,\n toUInt64(min(time)) AS start_ts\n FROM join_inner_table\n PREWHERE (id = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (number > toUInt64(\'1610517366120\'))\n GROUP BY key, value1, value2\n ) USING (key)\n GROUP BY key, value1, value2\n )\nGROUP BY value1, value2\nORDER BY value1, value2\nSETTINGS allow_experimental_parallel_reading_from_replicas = 1, allow_experimental_analyzer=1; +1 1 -- Parallel full query\nSELECT\n value1,\n value2,\n avg(count) AS avg\nFROM\n (\n SELECT\n key,\n value1,\n value2,\n count() AS count\n FROM join_outer_table\n INNER JOIN\n (\n SELECT\n key,\n value1,\n value2,\n toUInt64(min(time)) AS start_ts\n FROM join_inner_table\n PREWHERE (id = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (number > toUInt64(\'1610517366120\'))\n GROUP BY key, value1, value2\n ) USING (key)\n GROUP BY key, value1, value2\n )\nGROUP BY value1, value2\nORDER BY value1, value2\nSETTINGS allow_experimental_parallel_reading_from_replicas = 1, enable_analyzer=0; +1 1 -- Parallel full query\nSELECT\n value1,\n value2,\n avg(count) AS avg\nFROM\n (\n SELECT\n key,\n value1,\n value2,\n count() AS count\n FROM join_outer_table\n INNER JOIN\n (\n SELECT\n key,\n value1,\n value2,\n toUInt64(min(time)) AS start_ts\n FROM join_inner_table\n PREWHERE (id = \'833c9e22-c245-4eb5-8745-117a9a1f26b1\') AND (number > toUInt64(\'1610517366120\'))\n GROUP BY key, value1, value2\n ) USING (key)\n GROUP BY key, value1, value2\n )\nGROUP BY value1, value2\nORDER BY value1, value2\nSETTINGS allow_experimental_parallel_reading_from_replicas = 1, enable_analyzer=1; diff --git a/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.sql b/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.sql index 7693d0da295..8121d60a05b 100644 --- a/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.sql +++ b/tests/queries/0_stateless/02731_parallel_replicas_join_subquery.sql @@ -38,7 +38,7 @@ FROM join_inner_table GROUP BY key, value1, value2 ORDER BY key, value1, value2 LIMIT 10; --- settings allow_experimental_analyzer=0; +-- settings enable_analyzer=0; -- SELECT -- key, @@ -49,7 +49,7 @@ LIMIT 10; -- PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1610517366120')) -- GROUP BY key, value1, value2 -- ORDER BY key, value1, value2 --- LIMIT 10 settings allow_experimental_analyzer=1; +-- LIMIT 10 settings enable_analyzer=1; SELECT '=============== INNER QUERY (PARALLEL) ==============='; @@ -64,7 +64,7 @@ PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1 GROUP BY key, value1, value2 ORDER BY key, value1, value2 LIMIT 10 -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, allow_experimental_analyzer=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, enable_analyzer=0; -- Parallel inner query alone SELECT @@ -77,7 +77,7 @@ PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1 GROUP BY key, value1, value2 ORDER BY key, value1, value2 LIMIT 10 -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, enable_analyzer=1; SELECT '=============== QUERIES EXECUTED BY PARALLEL INNER QUERY ALONE ==============='; @@ -184,7 +184,7 @@ FROM ) GROUP BY value1, value2 ORDER BY value1, value2 -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, allow_experimental_analyzer=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, enable_analyzer=0; -- Parallel full query SELECT @@ -214,7 +214,7 @@ FROM ) GROUP BY value1, value2 ORDER BY value1, value2 -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, enable_analyzer=1; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02734_optimize_group_by.sql b/tests/queries/0_stateless/02734_optimize_group_by.sql index 28e86c04b0f..626805d028d 100644 --- a/tests/queries/0_stateless/02734_optimize_group_by.sql +++ b/tests/queries/0_stateless/02734_optimize_group_by.sql @@ -1,5 +1,5 @@ -SELECT 'a' AS key, 'b' as value GROUP BY key WITH CUBE SETTINGS allow_experimental_analyzer = 0; -SELECT 'a' AS key, 'b' as value GROUP BY key WITH CUBE SETTINGS allow_experimental_analyzer = 1; +SELECT 'a' AS key, 'b' as value GROUP BY key WITH CUBE SETTINGS enable_analyzer = 0; +SELECT 'a' AS key, 'b' as value GROUP BY key WITH CUBE SETTINGS enable_analyzer = 1; SELECT 'a' AS key, 'b' as value GROUP BY ignore(1) WITH CUBE; diff --git a/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.reference b/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.reference index d21f914f0dc..a6c8c0bbc3e 100644 --- a/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.reference +++ b/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.reference @@ -1,24 +1,24 @@ -view allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +view enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 0 1 0 1 2 3 -subquery allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +subquery enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 0 0 1 0 2 2 -CSE allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +CSE enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 0 0 1 0 2 2 -CSE_Multi allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +CSE_Multi enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 0 0 1 0 2 2 -CTE allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +CTE enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 0 0 1 0 2 2 -CTE_Multi allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +CTE_Multi enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 0 0 1 0 4 4 -view allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +view enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 1 1 0 1 3 4 -subquery allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +subquery enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 1 0 1 0 2 2 -CSE allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +CSE enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 1 0 1 0 2 2 -CSE_Multi allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +CSE_Multi enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 1 0 1 0 2 2 -CTE allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +CTE enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 1 0 1 0 2 2 -CTE_Multi allow_experimental_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries +CTE_Multi enable_analyzer InsertQuery SelectQuery InsertQueriesWithSubqueries SelectQueriesWithSubqueries QueriesWithSubqueries 1 1 0 1 0 4 4 diff --git a/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.sh b/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.sh index 84031ad9081..b7d93b5396c 100755 --- a/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.sh +++ b/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.sh @@ -14,14 +14,14 @@ $CLICKHOUSE_CLIENT -n -q " CREATE MATERIALIZED VIEW mv TO output SQL SECURITY NONE AS SELECT * FROM input; " -for allow_experimental_analyzer in 0 1; do +for enable_analyzer in 0 1; do query_id="$(random_str 10)" - $CLICKHOUSE_CLIENT --allow_experimental_analyzer "$allow_experimental_analyzer" --query_id "$query_id" -q "INSERT INTO input SELECT * FROM numbers(1)" + $CLICKHOUSE_CLIENT --enable_analyzer "$enable_analyzer" --query_id "$query_id" -q "INSERT INTO input SELECT * FROM numbers(1)" $CLICKHOUSE_CLIENT -mn -q " SYSTEM FLUSH LOGS; SELECT 1 view, - $allow_experimental_analyzer allow_experimental_analyzer, + $enable_analyzer enable_analyzer, ProfileEvents['InsertQuery'] InsertQuery, ProfileEvents['SelectQuery'] SelectQuery, ProfileEvents['InsertQueriesWithSubqueries'] InsertQueriesWithSubqueries, @@ -34,12 +34,12 @@ for allow_experimental_analyzer in 0 1; do " query_id="$(random_str 10)" - $CLICKHOUSE_CLIENT --allow_experimental_analyzer "$allow_experimental_analyzer" --query_id "$query_id" -q "SELECT * FROM system.one WHERE dummy IN (SELECT * FROM system.one) FORMAT Null" + $CLICKHOUSE_CLIENT --enable_analyzer "$enable_analyzer" --query_id "$query_id" -q "SELECT * FROM system.one WHERE dummy IN (SELECT * FROM system.one) FORMAT Null" $CLICKHOUSE_CLIENT -mn -q " SYSTEM FLUSH LOGS; SELECT 1 subquery, - $allow_experimental_analyzer allow_experimental_analyzer, + $enable_analyzer enable_analyzer, ProfileEvents['InsertQuery'] InsertQuery, ProfileEvents['SelectQuery'] SelectQuery, ProfileEvents['InsertQueriesWithSubqueries'] InsertQueriesWithSubqueries, @@ -51,12 +51,12 @@ for allow_experimental_analyzer in 0 1; do " query_id="$(random_str 10)" - $CLICKHOUSE_CLIENT --allow_experimental_analyzer "$allow_experimental_analyzer" --query_id "$query_id" -q "WITH (SELECT * FROM system.one) AS x SELECT x FORMAT Null" + $CLICKHOUSE_CLIENT --enable_analyzer "$enable_analyzer" --query_id "$query_id" -q "WITH (SELECT * FROM system.one) AS x SELECT x FORMAT Null" $CLICKHOUSE_CLIENT -mn -q " SYSTEM FLUSH LOGS; SELECT 1 CSE, - $allow_experimental_analyzer allow_experimental_analyzer, + $enable_analyzer enable_analyzer, ProfileEvents['InsertQuery'] InsertQuery, ProfileEvents['SelectQuery'] SelectQuery, ProfileEvents['InsertQueriesWithSubqueries'] InsertQueriesWithSubqueries, @@ -68,12 +68,12 @@ for allow_experimental_analyzer in 0 1; do " query_id="$(random_str 10)" - $CLICKHOUSE_CLIENT --allow_experimental_analyzer "$allow_experimental_analyzer" --query_id "$query_id" -q "WITH (SELECT * FROM system.one) AS x SELECT x, x FORMAT Null" + $CLICKHOUSE_CLIENT --enable_analyzer "$enable_analyzer" --query_id "$query_id" -q "WITH (SELECT * FROM system.one) AS x SELECT x, x FORMAT Null" $CLICKHOUSE_CLIENT -mn -q " SYSTEM FLUSH LOGS; SELECT 1 CSE_Multi, - $allow_experimental_analyzer allow_experimental_analyzer, + $enable_analyzer enable_analyzer, ProfileEvents['InsertQuery'] InsertQuery, ProfileEvents['SelectQuery'] SelectQuery, ProfileEvents['InsertQueriesWithSubqueries'] InsertQueriesWithSubqueries, @@ -85,12 +85,12 @@ for allow_experimental_analyzer in 0 1; do " query_id="$(random_str 10)" - $CLICKHOUSE_CLIENT --allow_experimental_analyzer "$allow_experimental_analyzer" --query_id "$query_id" -q "WITH x AS (SELECT * FROM system.one) SELECT * FROM x FORMAT Null" + $CLICKHOUSE_CLIENT --enable_analyzer "$enable_analyzer" --query_id "$query_id" -q "WITH x AS (SELECT * FROM system.one) SELECT * FROM x FORMAT Null" $CLICKHOUSE_CLIENT -mn -q " SYSTEM FLUSH LOGS; SELECT 1 CTE, - $allow_experimental_analyzer allow_experimental_analyzer, + $enable_analyzer enable_analyzer, ProfileEvents['InsertQuery'] InsertQuery, ProfileEvents['SelectQuery'] SelectQuery, ProfileEvents['InsertQueriesWithSubqueries'] InsertQueriesWithSubqueries, @@ -102,12 +102,12 @@ for allow_experimental_analyzer in 0 1; do " query_id="$(random_str 10)" - $CLICKHOUSE_CLIENT --allow_experimental_analyzer "$allow_experimental_analyzer" --query_id "$query_id" -q "WITH x AS (SELECT * FROM system.one) SELECT * FROM x UNION ALL SELECT * FROM x FORMAT Null" + $CLICKHOUSE_CLIENT --enable_analyzer "$enable_analyzer" --query_id "$query_id" -q "WITH x AS (SELECT * FROM system.one) SELECT * FROM x UNION ALL SELECT * FROM x FORMAT Null" $CLICKHOUSE_CLIENT -mn -q " SYSTEM FLUSH LOGS; SELECT 1 CTE_Multi, - $allow_experimental_analyzer allow_experimental_analyzer, + $enable_analyzer enable_analyzer, ProfileEvents['InsertQuery'] InsertQuery, ProfileEvents['SelectQuery'] SelectQuery, ProfileEvents['InsertQueriesWithSubqueries'] InsertQueriesWithSubqueries, diff --git a/tests/queries/0_stateless/02767_into_outfile_extensions_msan.sh b/tests/queries/0_stateless/02767_into_outfile_extensions_msan.sh index 0c5767314d5..2d2ee328a29 100755 --- a/tests/queries/0_stateless/02767_into_outfile_extensions_msan.sh +++ b/tests/queries/0_stateless/02767_into_outfile_extensions_msan.sh @@ -6,6 +6,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) out="explain1.$CLICKHOUSE_TEST_UNIQUE_NAME.out" # only EXPLAIN triggers the problem under MSan -$CLICKHOUSE_CLIENT --allow_experimental_analyzer=0 -q "explain select * from numbers(1) into outfile '$out'" +$CLICKHOUSE_CLIENT --enable_analyzer=0 -q "explain select * from numbers(1) into outfile '$out'" cat "$out" rm -f "$out" diff --git a/tests/queries/0_stateless/02771_ignore_data_skipping_indices.sql b/tests/queries/0_stateless/02771_ignore_data_skipping_indices.sql index d86b65c3291..91ca5ef0340 100644 --- a/tests/queries/0_stateless/02771_ignore_data_skipping_indices.sql +++ b/tests/queries/0_stateless/02771_ignore_data_skipping_indices.sql @@ -23,12 +23,12 @@ SELECT * FROM data_02771 SETTINGS ignore_data_skipping_indices='na_idx'; SELECT * FROM data_02771 WHERE x = 1 AND y = 1 SETTINGS ignore_data_skipping_indices='xy_idx',force_data_skipping_indices='xy_idx' ; -- { serverError INDEX_NOT_USED } SELECT * FROM data_02771 WHERE x = 1 AND y = 2 SETTINGS ignore_data_skipping_indices='xy_idx'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT * from ( EXPLAIN indexes = 1 SELECT * FROM data_02771 WHERE x = 1 AND y = 2 ) WHERE explain NOT LIKE '%Expression%' AND explain NOT LIKE '%Filter%'; SELECT * from ( EXPLAIN indexes = 1 SELECT * FROM data_02771 WHERE x = 1 AND y = 2 SETTINGS ignore_data_skipping_indices='xy_idx' ) WHERE explain NOT LIKE '%Expression%' AND explain NOT LIKE '%Filter%'; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT * from ( EXPLAIN indexes = 1 SELECT * FROM data_02771 WHERE x = 1 AND y = 2 ) WHERE explain NOT LIKE '%Expression%' AND explain NOT LIKE '%Filter%'; SELECT * from ( EXPLAIN indexes = 1 SELECT * FROM data_02771 WHERE x = 1 AND y = 2 SETTINGS ignore_data_skipping_indices='xy_idx' ) WHERE explain NOT LIKE '%Expression%' AND explain NOT LIKE '%Filter%'; diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference index 5bf3520ccdb..f60f1e0a376 100644 --- a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference @@ -8,5 +8,5 @@ 5935810273536892891 7885388429666205427 8124171311239967992 -1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n allow_experimental_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1; +1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n enable_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1; 0 2 SELECT `__table1`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` AS `__table1` diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql index 88a0d2163d6..e60049f2756 100644 --- a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql @@ -21,7 +21,7 @@ INSERT INTO join_inner_table__fuzz_146_replicated SELECT number FROM join_inner_table__fuzz_146_replicated SETTINGS - allow_experimental_analyzer = 1, + enable_analyzer = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 1; diff --git a/tests/queries/0_stateless/02771_semi_join_use_nulls.sql.j2 b/tests/queries/0_stateless/02771_semi_join_use_nulls.sql.j2 index 248461a98bb..74e252c785b 100644 --- a/tests/queries/0_stateless/02771_semi_join_use_nulls.sql.j2 +++ b/tests/queries/0_stateless/02771_semi_join_use_nulls.sql.j2 @@ -1,11 +1,11 @@ -{% for allow_experimental_analyzer in [0, 1] -%} +{% for enable_analyzer in [0, 1] -%} {% for join_use_nulls in [0, 1] -%} {% for kind in ['LEFT', 'RIGHT'] -%} {% for strictness in ['SEMI', 'ANTI'] -%} {% for maybe_materialize in ['', 'materialize'] -%} -SET allow_experimental_analyzer = {{ allow_experimental_analyzer }}; +SET enable_analyzer = {{ enable_analyzer }}; SET join_use_nulls = {{ join_use_nulls }}; diff --git a/tests/queries/0_stateless/02783_date_predicate_optimizations.sql b/tests/queries/0_stateless/02783_date_predicate_optimizations.sql index 4da8cebff1c..b127af677ee 100644 --- a/tests/queries/0_stateless/02783_date_predicate_optimizations.sql +++ b/tests/queries/0_stateless/02783_date_predicate_optimizations.sql @@ -11,8 +11,8 @@ INSERT INTO source values ('2021-12-31 23:00:00', 0); SELECT * FROM source WHERE toYYYYMM(ts) = 202112; SELECT * FROM source WHERE toYear(ts) = 2021; -SELECT * FROM source WHERE toYYYYMM(ts) = 202112 SETTINGS allow_experimental_analyzer=1; -SELECT * FROM source WHERE toYear(ts) = 2021 SETTINGS allow_experimental_analyzer=1; +SELECT * FROM source WHERE toYYYYMM(ts) = 202112 SETTINGS enable_analyzer=1; +SELECT * FROM source WHERE toYear(ts) = 2021 SETTINGS enable_analyzer=1; DROP TABLE IF EXISTS source; CREATE TABLE source @@ -46,18 +46,18 @@ SELECT count(*) FROM source WHERE toYear(dt) < 2023; SELECT count(*) FROM source WHERE toYear(dt) <= 2023; SELECT count(*) FROM source WHERE toYear(dt) > 2023; SELECT count(*) FROM source WHERE toYear(dt) >= 2023; -SELECT count(*) FROM source WHERE toYYYYMM(dt) = 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt) <> 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt) < 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt) <= 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt) > 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt) >= 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt) = 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt) <> 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt) < 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt) <= 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt) > 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt) >= 2023 SETTINGS allow_experimental_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) = 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) <> 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) < 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) <= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) > 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) >= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) = 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) <> 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) < 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) <= 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) > 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) >= 2023 SETTINGS enable_analyzer=1; SELECT 'DateTime'; SELECT count(*) FROM source WHERE toYYYYMM(ts) = 202312; @@ -72,18 +72,18 @@ SELECT count(*) FROM source WHERE toYear(ts) < 2023; SELECT count(*) FROM source WHERE toYear(ts) <= 2023; SELECT count(*) FROM source WHERE toYear(ts) > 2023; SELECT count(*) FROM source WHERE toYear(ts) >= 2023; -SELECT count(*) FROM source WHERE toYYYYMM(ts) = 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts) <> 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts) < 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts) <= 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts) > 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts) >= 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts) = 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts) <> 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts) < 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts) <= 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts) > 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts) >= 2023 SETTINGS allow_experimental_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) = 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) <> 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) < 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) <= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) > 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) >= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) = 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) <> 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) < 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) <= 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) > 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) >= 2023 SETTINGS enable_analyzer=1; SELECT 'Date32'; SELECT count(*) FROM source WHERE toYYYYMM(dt_32) = 202312; @@ -98,18 +98,18 @@ SELECT count(*) FROM source WHERE toYear(dt_32) < 2023; SELECT count(*) FROM source WHERE toYear(dt_32) <= 2023; SELECT count(*) FROM source WHERE toYear(dt_32) > 2023; SELECT count(*) FROM source WHERE toYear(dt_32) >= 2023; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) = 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <> 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) < 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <= 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) > 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) >= 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt_32) = 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt_32) <> 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt_32) < 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt_32) <= 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt_32) > 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(dt_32) >= 2023 SETTINGS allow_experimental_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) = 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <> 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) < 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) > 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) >= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) = 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) <> 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) < 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) <= 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) > 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) >= 2023 SETTINGS enable_analyzer=1; SELECT 'DateTime64'; SELECT count(*) FROM source WHERE toYYYYMM(ts_64) = 202312; @@ -124,16 +124,16 @@ SELECT count(*) FROM source WHERE toYear(ts_64) < 2023; SELECT count(*) FROM source WHERE toYear(ts_64) <= 2023; SELECT count(*) FROM source WHERE toYear(ts_64) > 2023; SELECT count(*) FROM source WHERE toYear(ts_64) >= 2023; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) = 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <> 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) < 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <= 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) > 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) >= 202312 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts_64) = 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts_64) <> 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts_64) < 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts_64) <= 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts_64) > 2023 SETTINGS allow_experimental_analyzer=1; -SELECT count(*) FROM source WHERE toYear(ts_64) >= 2023 SETTINGS allow_experimental_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) = 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <> 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) < 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) > 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) >= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) = 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) <> 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) < 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) <= 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) > 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) >= 2023 SETTINGS enable_analyzer=1; DROP TABLE source; diff --git a/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh b/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh index 20b3efedd49..bf7170fd7fb 100755 --- a/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh +++ b/tests/queries/0_stateless/02783_parallel_replicas_trivial_count_optimization.sh @@ -34,7 +34,7 @@ function run_query_with_pure_parallel_replicas () { --prefer_localhost_replica 1 \ --cluster_for_parallel_replicas 'test_cluster_one_shard_three_replicas_localhost' \ --allow_experimental_parallel_reading_from_replicas 1 \ - --allow_experimental_analyzer 0 + --enable_analyzer 0 $CLICKHOUSE_CLIENT \ --query "$2" \ @@ -43,7 +43,7 @@ function run_query_with_pure_parallel_replicas () { --prefer_localhost_replica 1 \ --cluster_for_parallel_replicas 'test_cluster_one_shard_three_replicas_localhost' \ --allow_experimental_parallel_reading_from_replicas 1 \ - --allow_experimental_analyzer 1 + --enable_analyzer 1 } function run_query_with_custom_key_parallel_replicas () { @@ -58,7 +58,7 @@ function run_query_with_custom_key_parallel_replicas () { --max_parallel_replicas 3 \ --parallel_replicas_custom_key_filter_type 'default' \ --parallel_replicas_custom_key "$2" \ - --allow_experimental_analyzer 0 + --enable_analyzer 0 $CLICKHOUSE_CLIENT \ --query "$2" \ @@ -66,7 +66,7 @@ function run_query_with_custom_key_parallel_replicas () { --max_parallel_replicas 3 \ --parallel_replicas_custom_key_filter_type 'default' \ --parallel_replicas_custom_key "$2" \ - --allow_experimental_analyzer 1 + --enable_analyzer 1 } $CLICKHOUSE_CLIENT --query " diff --git a/tests/queries/0_stateless/02784_move_all_conditions_to_prewhere_analyzer_asan.sql b/tests/queries/0_stateless/02784_move_all_conditions_to_prewhere_analyzer_asan.sql index 44b9ce4fdc1..3766e5b0c8f 100644 --- a/tests/queries/0_stateless/02784_move_all_conditions_to_prewhere_analyzer_asan.sql +++ b/tests/queries/0_stateless/02784_move_all_conditions_to_prewhere_analyzer_asan.sql @@ -4,7 +4,7 @@ CREATE TABLE t_02784 (c1 UInt64, c2 UInt64) ENGINE=MergeTree() ORDER BY c1 SETTI INSERT INTO t_02784 SELECT number, number FROM numbers(1); -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET move_all_conditions_to_prewhere=1; SELECT c1, c2 FROM t_02784 WHERE c1 = 0 AND c2 = 0; diff --git a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_query_tree_rewrite.reference b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_query_tree_rewrite.reference index fca48238778..c2c77a4aa31 100644 --- a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_query_tree_rewrite.reference +++ b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_query_tree_rewrite.reference @@ -39,7 +39,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE ((date1 < \'1993-01-01\') OR (date1 >= \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) @@ -81,7 +81,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 < \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) @@ -115,7 +115,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 >= \'1994-01-01\') AND ((id >= 1) AND (id <= 3)) @@ -149,7 +149,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 < \'1994-01-01\') AND ((id >= 1) AND (id <= 3)) @@ -183,7 +183,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 >= \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) @@ -217,7 +217,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1998-01-01\')) AND ((id >= 1) AND (id <= 3)) @@ -259,7 +259,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) OR ((date1 >= \'1994-01-01\') AND (date1 < \'1995-01-01\'))) AND ((id >= 1) AND (id <= 3)) @@ -317,7 +317,7 @@ QUERY id: 0 LIST id: 32, nodes: 2 COLUMN id: 29, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 33, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1, toYear(date1) AS year1 @@ -366,7 +366,7 @@ QUERY id: 0 LIST id: 24, nodes: 2 COLUMN id: 21, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 25, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 < \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) @@ -400,7 +400,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t PREWHERE (date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\') @@ -441,7 +441,7 @@ QUERY id: 0 LIST id: 20, nodes: 2 COLUMN id: 17, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 21, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE ((id >= 1) AND (id <= 3)) AND ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) @@ -481,7 +481,7 @@ QUERY id: 0 LIST id: 20, nodes: 2 COLUMN id: 17, column_name: date1, result_type: Date, source_id: 3 CONSTANT id: 21, constant_value: \'1994-01-01\', constant_value_type: String - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (toYYYYMM(date1) = 199300) AND ((id >= 1) AND (id <= 3)) @@ -518,7 +518,7 @@ QUERY id: 0 LIST id: 19, nodes: 2 COLUMN id: 16, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 20, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (toYYYYMM(date1) = 199313) AND ((id >= 1) AND (id <= 3)) @@ -555,7 +555,7 @@ QUERY id: 0 LIST id: 19, nodes: 2 COLUMN id: 16, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 20, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE ((date1 >= \'1993-12-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) @@ -597,7 +597,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE ((date1 >= \'1992-03-01\') AND (date1 < \'1992-04-01\')) AND ((id >= 1) AND (id <= 3)) @@ -639,7 +639,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE ((date1 < \'1992-03-01\') OR (date1 >= \'1992-04-01\')) AND ((id >= 1) AND (id <= 3)) @@ -681,7 +681,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 < \'1992-03-01\') AND ((id >= 1) AND (id <= 3)) @@ -715,7 +715,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 >= \'1992-04-01\') AND ((id >= 1) AND (id <= 3)) @@ -749,7 +749,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 < \'1992-04-01\') AND ((id >= 1) AND (id <= 3)) @@ -783,7 +783,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE (date1 >= \'1992-03-01\') AND ((id >= 1) AND (id <= 3)) @@ -817,7 +817,7 @@ QUERY id: 0 LIST id: 17, nodes: 2 COLUMN id: 14, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date_t WHERE ((date1 >= \'1992-03-01\') OR ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\'))) AND ((id >= 1) AND (id <= 3)) @@ -867,7 +867,7 @@ QUERY id: 0 LIST id: 27, nodes: 2 COLUMN id: 24, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 28, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM datetime_t WHERE ((date1 >= \'1993-01-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) @@ -909,7 +909,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM datetime_t WHERE ((date1 >= \'1993-12-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) @@ -951,7 +951,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date32_t WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) @@ -993,7 +993,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM date32_t WHERE ((date1 >= \'1993-12-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) @@ -1035,7 +1035,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM datetime64_t WHERE ((date1 >= \'1993-01-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) @@ -1077,7 +1077,7 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 SELECT value1 FROM datetime64_t WHERE ((date1 >= \'1993-12-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) @@ -1119,4 +1119,4 @@ QUERY id: 0 LIST id: 22, nodes: 2 COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 CONSTANT id: 23, constant_value: UInt64_3, constant_value_type: UInt8 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_query_tree_rewrite.sql b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_query_tree_rewrite.sql index 9cc8dd74e5d..5ff62cb4bb3 100644 --- a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_query_tree_rewrite.sql +++ b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_query_tree_rewrite.sql @@ -2,74 +2,74 @@ DROP TABLE IF EXISTS date_t; CREATE TABLE date_t (id UInt32, value1 String, date1 Date) ENGINE ReplacingMergeTree() ORDER BY id; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) <> 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) <> 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) <> 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) < 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) < 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) < 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) > 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) > 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) > 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) <= 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) <= 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) <= 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) >= 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) >= 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) >= 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) BETWEEN 1993 AND 1997 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) BETWEEN 1993 AND 1997 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) BETWEEN 1993 AND 1997 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE (toYear(date1) = 1993 OR toYear(date1) = 1994) AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE (toYear(date1) = 1993 OR toYear(date1) = 1994) AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE (toYear(date1) = 1993 OR toYear(date1) = 1994) AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1, toYear(date1) as year1 FROM date_t WHERE year1 = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1, toYear(date1) as year1 FROM date_t WHERE year1 = 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1, toYear(date1) as year1 FROM date_t WHERE year1 = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE 1993 > toYear(date1) AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE 1993 > toYear(date1) AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE 1993 > toYear(date1) AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t PREWHERE toYear(date1) = 1993 WHERE id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t PREWHERE toYear(date1) = 1993 WHERE id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t PREWHERE toYear(date1) = 1993 WHERE id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE id BETWEEN 1 AND 3 HAVING toYear(date1) = 1993; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE id BETWEEN 1 AND 3 HAVING toYear(date1) = 1993 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE id BETWEEN 1 AND 3 HAVING toYear(date1) = 1993 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199300 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199300 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199300 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199313 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199313 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199313 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199203 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199203 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) <> 199203 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) <> 199203 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) <> 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) < 199203 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) < 199203 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) < 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) > 199203 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) > 199203 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) > 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) <= 199203 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) <= 199203 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) <= 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) >= 199203 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) >= 199203 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) >= 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE (toYYYYMM(date1) >= 199203 OR toYear(date1) = 1993) AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE (toYYYYMM(date1) >= 199203 OR toYear(date1) = 1993) AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE (toYYYYMM(date1) >= 199203 OR toYear(date1) = 1993) AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; DROP TABLE date_t; DROP TABLE IF EXISTS datetime_t; CREATE TABLE datetime_t (id UInt32, value1 String, date1 Datetime) ENGINE ReplacingMergeTree() ORDER BY id; EXPLAIN SYNTAX SELECT value1 FROM datetime_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM datetime_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; DROP TABLE datetime_t; DROP TABLE IF EXISTS date32_t; CREATE TABLE date32_t (id UInt32, value1 String, date1 Date32) ENGINE ReplacingMergeTree() ORDER BY id; EXPLAIN SYNTAX SELECT value1 FROM date32_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date32_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date32_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM date32_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date32_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date32_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; DROP TABLE date32_t; DROP TABLE IF EXISTS datetime64_t; CREATE TABLE datetime64_t (id UInt32, value1 String, date1 Datetime64) ENGINE ReplacingMergeTree() ORDER BY id; EXPLAIN SYNTAX SELECT value1 FROM datetime64_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime64_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime64_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; EXPLAIN SYNTAX SELECT value1 FROM datetime64_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; -EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime64_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS allow_experimental_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime64_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; DROP TABLE datetime64_t; diff --git a/tests/queries/0_stateless/02803_remote_cannot_clone_block.sql b/tests/queries/0_stateless/02803_remote_cannot_clone_block.sql index 6d79aa76d18..dd72b990445 100644 --- a/tests/queries/0_stateless/02803_remote_cannot_clone_block.sql +++ b/tests/queries/0_stateless/02803_remote_cannot_clone_block.sql @@ -6,7 +6,7 @@ SELECT * FROM system.numbers LIMIT 10000; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT * FROM diff --git a/tests/queries/0_stateless/02812_bug_with_unused_join_columns.sql b/tests/queries/0_stateless/02812_bug_with_unused_join_columns.sql index 6c801b5b73e..d791b8f3367 100644 --- a/tests/queries/0_stateless/02812_bug_with_unused_join_columns.sql +++ b/tests/queries/0_stateless/02812_bug_with_unused_join_columns.sql @@ -1 +1 @@ -SELECT concat(func.name, comb.name) AS x FROM system.functions AS func JOIN system.aggregate_function_combinators AS comb using name WHERE is_aggregate settings allow_experimental_analyzer=1; +SELECT concat(func.name, comb.name) AS x FROM system.functions AS func JOIN system.aggregate_function_combinators AS comb using name WHERE is_aggregate settings enable_analyzer=1; diff --git a/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.sql b/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.sql index ca03cbb6f9f..dfb885f5a69 100644 --- a/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.sql +++ b/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; set optimize_move_functions_out_of_any = 0; SELECT 'set optimize_aggregators_of_group_by_keys = 1'; diff --git a/tests/queries/0_stateless/02815_join_algorithm_setting.sql b/tests/queries/0_stateless/02815_join_algorithm_setting.sql index a8733eebc91..4a5ae784b31 100644 --- a/tests/queries/0_stateless/02815_join_algorithm_setting.sql +++ b/tests/queries/0_stateless/02815_join_algorithm_setting.sql @@ -104,9 +104,9 @@ JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 ON t1.key = t2.key OR t1 -- But for CROSS choose `hash` algorithm even though it's not enabled SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 CROSS JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 FORMAT Null -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; -- ... (not for old analyzer) SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 CROSS JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 FORMAT Null -SETTINGS allow_experimental_analyzer = 0; -- { serverError NOT_IMPLEMENTED } +SETTINGS enable_analyzer = 0; -- { serverError NOT_IMPLEMENTED } diff --git a/tests/queries/0_stateless/02834_analyzer_with_statement_references.sql b/tests/queries/0_stateless/02834_analyzer_with_statement_references.sql index 29ed6e3f0da..ce1eaa7ae77 100644 --- a/tests/queries/0_stateless/02834_analyzer_with_statement_references.sql +++ b/tests/queries/0_stateless/02834_analyzer_with_statement_references.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH test_aliases AS (SELECT number FROM numbers(20)), alias2 AS (SELECT number FROM test_aliases) SELECT number FROM alias2 SETTINGS enable_global_with_statement = 1; diff --git a/tests/queries/0_stateless/02835_join_step_explain.sql b/tests/queries/0_stateless/02835_join_step_explain.sql index d0475fa14b6..1cdd3684a0b 100644 --- a/tests/queries/0_stateless/02835_join_step_explain.sql +++ b/tests/queries/0_stateless/02835_join_step_explain.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table_1; CREATE TABLE test_table_1 diff --git a/tests/queries/0_stateless/02840_merge__table_or_filter.reference b/tests/queries/0_stateless/02840_merge__table_or_filter.reference index ff5e0865a22..21b54a0121e 100644 --- a/tests/queries/0_stateless/02840_merge__table_or_filter.reference +++ b/tests/queries/0_stateless/02840_merge__table_or_filter.reference @@ -1,38 +1,38 @@ -- { echoOn } -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings allow_experimental_analyzer=0, convert_query_to_cnf=0; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings enable_analyzer=0, convert_query_to_cnf=0; v1 1 v1 2 -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings allow_experimental_analyzer=0, convert_query_to_cnf=0; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings enable_analyzer=0, convert_query_to_cnf=0; v1 1 v2 2 -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v3') settings allow_experimental_analyzer=0, convert_query_to_cnf=0; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v3') settings enable_analyzer=0, convert_query_to_cnf=0; v1 1 -select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') settings allow_experimental_analyzer=0, convert_query_to_cnf=0; -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings allow_experimental_analyzer=0, convert_query_to_cnf=1; +select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') settings enable_analyzer=0, convert_query_to_cnf=0; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings enable_analyzer=0, convert_query_to_cnf=1; v1 1 v1 2 -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings allow_experimental_analyzer=0, convert_query_to_cnf=1; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings enable_analyzer=0, convert_query_to_cnf=1; v1 1 v2 2 -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v3') settings allow_experimental_analyzer=0, convert_query_to_cnf=1; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v3') settings enable_analyzer=0, convert_query_to_cnf=1; v1 1 -select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') settings allow_experimental_analyzer=0, convert_query_to_cnf=1; -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings allow_experimental_analyzer=1, convert_query_to_cnf=0; +select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') settings enable_analyzer=0, convert_query_to_cnf=1; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings enable_analyzer=1, convert_query_to_cnf=0; v1 1 v1 2 -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings allow_experimental_analyzer=1, convert_query_to_cnf=0; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings enable_analyzer=1, convert_query_to_cnf=0; v1 1 v2 2 -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v3') settings allow_experimental_analyzer=1, convert_query_to_cnf=0; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v3') settings enable_analyzer=1, convert_query_to_cnf=0; v1 1 -select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') settings allow_experimental_analyzer=1, convert_query_to_cnf=0; -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings allow_experimental_analyzer=1, convert_query_to_cnf=1; +select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') settings enable_analyzer=1, convert_query_to_cnf=0; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings enable_analyzer=1, convert_query_to_cnf=1; v1 1 v1 2 -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings allow_experimental_analyzer=1, convert_query_to_cnf=1; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings enable_analyzer=1, convert_query_to_cnf=1; v1 1 v2 2 -select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v3') settings allow_experimental_analyzer=1, convert_query_to_cnf=1; +select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v3') settings enable_analyzer=1, convert_query_to_cnf=1; v1 1 -select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') settings allow_experimental_analyzer=1, convert_query_to_cnf=1; +select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') settings enable_analyzer=1, convert_query_to_cnf=1; diff --git a/tests/queries/0_stateless/02840_merge__table_or_filter.sql.j2 b/tests/queries/0_stateless/02840_merge__table_or_filter.sql.j2 index 286e4545ef7..f7413dc3ee6 100644 --- a/tests/queries/0_stateless/02840_merge__table_or_filter.sql.j2 +++ b/tests/queries/0_stateless/02840_merge__table_or_filter.sql.j2 @@ -27,10 +27,10 @@ select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 an set max_threads=1; -- { echoOn } {% for settings in [ - 'allow_experimental_analyzer=0, convert_query_to_cnf=0', - 'allow_experimental_analyzer=0, convert_query_to_cnf=1', - 'allow_experimental_analyzer=1, convert_query_to_cnf=0', - 'allow_experimental_analyzer=1, convert_query_to_cnf=1' + 'enable_analyzer=0, convert_query_to_cnf=0', + 'enable_analyzer=0, convert_query_to_cnf=1', + 'enable_analyzer=1, convert_query_to_cnf=0', + 'enable_analyzer=1, convert_query_to_cnf=1' ] %} select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') settings {{ settings }}; select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v2') settings {{ settings }}; diff --git a/tests/queries/0_stateless/02841_valid_json_and_xml_on_http_exception.sh b/tests/queries/0_stateless/02841_valid_json_and_xml_on_http_exception.sh index c47fe5c7e94..3dda63e1e49 100755 --- a/tests/queries/0_stateless/02841_valid_json_and_xml_on_http_exception.sh +++ b/tests/queries/0_stateless/02841_valid_json_and_xml_on_http_exception.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_URL_BASE="$CLICKHOUSE_URL&http_write_exception_in_output_format=1&allow_experimental_analyzer=0" +CH_URL_BASE="$CLICKHOUSE_URL&http_write_exception_in_output_format=1&enable_analyzer=0" for wait_end_of_query in 0 1 do diff --git a/tests/queries/0_stateless/02841_with_clause_resolve.sql b/tests/queries/0_stateless/02841_with_clause_resolve.sql index b416446461b..fe94a26110d 100644 --- a/tests/queries/0_stateless/02841_with_clause_resolve.sql +++ b/tests/queries/0_stateless/02841_with_clause_resolve.sql @@ -1,16 +1,16 @@ -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; WITH -- Input 44100 AS sample_frequency , number AS tick , tick / sample_frequency AS time - + -- Delay , (time, wave, delay_, decay, count) -> arraySum(n1 -> wave(time - delay_ * n1), range(count)) AS delay , delay(time, (time -> 0.5), 0.2, 0.5, 5) AS kick - + SELECT kick @@ -23,29 +23,29 @@ WITH 44100 AS sample_frequency , number AS tick , tick / sample_frequency AS time - + -- Output control , 1 AS master_volume , level -> least(1.0, greatest(-1.0, level)) AS clamp , level -> (clamp(level) * 0x7FFF * master_volume)::Int16 AS output , x -> (x, x) AS mono - + -- Basic waves , time -> sin(time * 2 * pi()) AS sine_wave , time -> time::UInt64 % 2 * 2 - 1 AS square_wave , time -> (time - floor(time)) * 2 - 1 AS sawtooth_wave , time -> abs(sawtooth_wave(time)) * 2 - 1 AS triangle_wave - + -- Helpers , (from, to, wave, time) -> from + ((wave(time) + 1) / 2) * (to - from) AS lfo , (from, to, steps, time) -> from + floor((time - floor(time)) * steps) / steps * (to - from) AS step_lfo , (from, to, steps, time) -> exp(step_lfo(log(from), log(to), steps, time)) AS exp_step_lfo - + -- Noise , time -> cityHash64(time) / 0xFFFFFFFFFFFFFFFF AS uniform_noise , time -> erf(uniform_noise(time)) AS white_noise , time -> cityHash64(time) % 2 ? 1 : -1 AS bernoulli_noise - + -- Distortion , (x, amount) -> clamp(x * amount) AS clipping , (x, amount) -> clamp(x > 0 ? pow(x, amount) : -pow(-x, amount)) AS power_distortion @@ -53,10 +53,10 @@ WITH , (time, sample_frequency) -> round(time * sample_frequency) / sample_frequency AS desample , (time, wave, amount) -> (time - floor(time) < (1 - amount)) ? wave(time * (1 - amount)) : 0 AS thin , (time, wave, amount) -> wave(floor(time) + pow(time - floor(time), amount)) AS skew - + -- Combining , (a, b, weight) -> a * (1 - weight) + b * weight AS combine - + -- Envelopes , (time, offset, attack, hold, release) -> time < offset ? 0 @@ -64,7 +64,7 @@ WITH : (time < offset + attack + hold ? 1 : (time < offset + attack + hold + release ? (offset + attack + hold + release - time) / release : 0))) AS envelope - + , (bpm, time, offset, attack, hold, release) -> envelope( time * (bpm / 60) - floor(time * (bpm / 60)), @@ -72,20 +72,20 @@ WITH attack, hold, release) AS running_envelope - + -- Sequencers , (sequence, time) -> sequence[1 + time::UInt64 % length(sequence)] AS sequencer - + -- Delay , (time, wave, delay, decay, count) -> arraySum(n -> wave(time - delay * n) * pow(decay, n), range(count)) AS delay - - + + , delay(time, (time -> power_distortion(sine_wave(time * 80 + sine_wave(time * 2)), lfo(0.5, 1, sine_wave, time / 16)) * running_envelope(60, time, 0, 0.0, 0.01, 0.1)), 0.2, 0.5, 5) AS kick - + SELECT - + (output( kick + delay(time, (time -> @@ -95,7 +95,7 @@ SELECT lfo(1, 0.75, triangle_wave, time / 8))), 0.2, 0.5, 10) * lfo(0.5, 1, triangle_wave, time / 7) - + + delay(time, (time -> power_distortion( sine_wave(time * sequencer([50, 100, 200, 400], time / 2) + 1 * sine_wave(time * sequencer([50, 100, 200], time / 4) + 1/4)) @@ -103,16 +103,16 @@ SELECT lfo(1, 0.75, triangle_wave, time / 8))), 0.2, 0.5, 10) * lfo(0.5, 1, triangle_wave, 16 + time / 11) - + + delay(time, (time -> white_noise(time) * running_envelope(60, time, 0.75, 0.01, 0.01, 0.1)), 0.2, 0.5, 10) * lfo(0.5, 1, triangle_wave, 24 + time / 13) - + + sine_wave(time * 100 + 1 * sine_wave(time * 10 + 1/4)) * running_envelope(120, time, 0, 0.01, 0.01, 0.1) ), - + output( kick + delay(time + 0.01, (time -> @@ -122,7 +122,7 @@ SELECT lfo(1, 0.75, triangle_wave, time / 8))), 0.2, 0.5, 10) * lfo(0.5, 1, triangle_wave, time / 7) - + + delay(time - 0.01, (time -> power_distortion( sine_wave(time * sequencer([50, 100, 200, 400], time / 2) + 1 * sine_wave(time * sequencer([50, 100, 200], time / 4) + 1/4)) @@ -130,12 +130,12 @@ SELECT lfo(1, 0.75, triangle_wave, time / 8))), 0.2, 0.5, 10) * lfo(0.5, 1, triangle_wave, 16 + time / 11) - + + delay(time + 0.005, (time -> white_noise(time) * running_envelope(60, time, 0.75, 0.01, 0.01, 0.1)), 0.2, 0.5, 10) * lfo(0.5, 1, triangle_wave, 24 + time / 13) )) - + FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql index ea52df5d4b4..c9cdab0cea2 100644 --- a/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql +++ b/tests/queries/0_stateless/02861_filter_pushdown_const_bug.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS t1; diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql index 3e15ec1148e..6606cff263f 100644 --- a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql +++ b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS tab; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET allow_experimental_statistics = 1; SET allow_statistics_optimize = 1; diff --git a/tests/queries/0_stateless/02866_size_of_marks_skip_idx_explain.sql b/tests/queries/0_stateless/02866_size_of_marks_skip_idx_explain.sql index b916c5ca13d..b3adf38710a 100644 --- a/tests/queries/0_stateless/02866_size_of_marks_skip_idx_explain.sql +++ b/tests/queries/0_stateless/02866_size_of_marks_skip_idx_explain.sql @@ -4,7 +4,7 @@ SET optimize_move_to_prewhere = 1; SET convert_query_to_cnf = 0; SET optimize_read_in_order = 1; -SET allow_experimental_analyzer = 1; -- slightly different operator names than w/o +SET enable_analyzer = 1; -- slightly different operator names than w/o DROP TABLE IF EXISTS test_skip_idx; diff --git a/tests/queries/0_stateless/02868_distinct_to_count_optimization.reference b/tests/queries/0_stateless/02868_distinct_to_count_optimization.reference index c2075f72f33..3dedcedbcee 100644 --- a/tests/queries/0_stateless/02868_distinct_to_count_optimization.reference +++ b/tests/queries/0_stateless/02868_distinct_to_count_optimization.reference @@ -6,7 +6,7 @@ FROM SELECT DISTINCT a FROM test_rewrite_uniq_to_count ) -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 3 QUERY id: 0 PROJECTION COLUMNS @@ -23,7 +23,7 @@ QUERY id: 0 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 JOIN TREE TABLE id: 6, alias: __table2, table_name: default.test_rewrite_uniq_to_count - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 2. test distinct with subquery alias 3 SELECT count() @@ -32,7 +32,7 @@ FROM SELECT DISTINCT a FROM test_rewrite_uniq_to_count ) AS t -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 3 QUERY id: 0 PROJECTION COLUMNS @@ -49,7 +49,7 @@ QUERY id: 0 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 JOIN TREE TABLE id: 6, alias: __table2, table_name: default.test_rewrite_uniq_to_count - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 3. test distinct with compound column name 3 SELECT count() @@ -58,7 +58,7 @@ FROM SELECT DISTINCT a FROM test_rewrite_uniq_to_count ) AS t -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 3 QUERY id: 0 PROJECTION COLUMNS @@ -75,7 +75,7 @@ QUERY id: 0 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 JOIN TREE TABLE id: 6, alias: __table2, table_name: default.test_rewrite_uniq_to_count - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 4. test distinct with select expression alias 3 SELECT count() @@ -84,7 +84,7 @@ FROM SELECT DISTINCT a AS alias_of_a FROM test_rewrite_uniq_to_count ) AS t -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 3 QUERY id: 0 PROJECTION COLUMNS @@ -101,7 +101,7 @@ QUERY id: 0 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 JOIN TREE TABLE id: 6, alias: __table2, table_name: default.test_rewrite_uniq_to_count - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 5. test simple group by 3 SELECT count() @@ -113,7 +113,7 @@ FROM FROM test_rewrite_uniq_to_count GROUP BY a ) -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 3 QUERY id: 0 PROJECTION COLUMNS @@ -133,7 +133,7 @@ QUERY id: 0 GROUP BY LIST id: 7, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 6. test group by with subquery alias 3 SELECT count() @@ -145,7 +145,7 @@ FROM FROM test_rewrite_uniq_to_count GROUP BY a ) AS t -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 3 QUERY id: 0 PROJECTION COLUMNS @@ -165,7 +165,7 @@ QUERY id: 0 GROUP BY LIST id: 7, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 7. test group by with compound column name 3 SELECT count() @@ -177,7 +177,7 @@ FROM FROM test_rewrite_uniq_to_count GROUP BY a ) AS t -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 3 QUERY id: 0 PROJECTION COLUMNS @@ -197,7 +197,7 @@ QUERY id: 0 GROUP BY LIST id: 7, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 8. test group by with select expression alias 3 SELECT count() @@ -209,7 +209,7 @@ FROM FROM test_rewrite_uniq_to_count GROUP BY alias_of_a ) AS t -SETTINGS allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 0 3 QUERY id: 0 PROJECTION COLUMNS @@ -229,4 +229,4 @@ QUERY id: 0 GROUP BY LIST id: 7, nodes: 1 COLUMN id: 5, column_name: a, result_type: UInt8, source_id: 6 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/02868_distinct_to_count_optimization.sql b/tests/queries/0_stateless/02868_distinct_to_count_optimization.sql index 66431b7c36b..d30bade4dd5 100644 --- a/tests/queries/0_stateless/02868_distinct_to_count_optimization.sql +++ b/tests/queries/0_stateless/02868_distinct_to_count_optimization.sql @@ -16,53 +16,53 @@ set optimize_uniq_to_count=true; SELECT '1. test simple distinct'; -SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings allow_experimental_analyzer=0; -EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings allow_experimental_analyzer=0; -SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings allow_experimental_analyzer=1; +SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings enable_analyzer=0; +SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings enable_analyzer=1; SELECT '2. test distinct with subquery alias'; -SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=0; -EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=0; -SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=1; +SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; SELECT '3. test distinct with compound column name'; -SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=0; -EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=0; -SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=1; +SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; SELECT '4. test distinct with select expression alias'; -SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=0; -EXPLAIN SYNTAX SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=0; -SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings allow_experimental_analyzer=1; +SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; SELECT '5. test simple group by'; -SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings allow_experimental_analyzer=0; -EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings allow_experimental_analyzer=0; -SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings allow_experimental_analyzer=1; +SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings enable_analyzer=0; +SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings enable_analyzer=1; SELECT '6. test group by with subquery alias'; -SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings allow_experimental_analyzer=0; -EXPLAIN SYNTAX SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings allow_experimental_analyzer=0; -SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings allow_experimental_analyzer=1; +SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=0; +SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=1; SELECT '7. test group by with compound column name'; -SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings allow_experimental_analyzer=0; -EXPLAIN SYNTAX SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings allow_experimental_analyzer=0; -SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings allow_experimental_analyzer=1; +SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=0; +SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=1; SELECT '8. test group by with select expression alias'; -SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings allow_experimental_analyzer=0; -EXPLAIN SYNTAX SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings allow_experimental_analyzer=0; -SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings allow_experimental_analyzer=1; -EXPLAIN QUERY TREE SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings allow_experimental_analyzer=1; +SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings enable_analyzer=0; +SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings enable_analyzer=1; drop table if exists test_rewrite_uniq_to_count; diff --git a/tests/queries/0_stateless/02882_primary_key_index_in_function_different_types.sql b/tests/queries/0_stateless/02882_primary_key_index_in_function_different_types.sql index 1b1a7607344..83b38955734 100644 --- a/tests/queries/0_stateless/02882_primary_key_index_in_function_different_types.sql +++ b/tests/queries/0_stateless/02882_primary_key_index_in_function_different_types.sql @@ -7,14 +7,14 @@ CREATE TABLE test_table INSERT INTO test_table SELECT number, number FROM numbers(10); -set allow_experimental_analyzer = 0; +set enable_analyzer = 0; EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT 5); EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT '5'); EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT toUInt8(number) FROM numbers(5)); EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT toString(number) FROM numbers(5)); -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT 5); EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT '5'); diff --git a/tests/queries/0_stateless/02890_named_tuple_functions.sql b/tests/queries/0_stateless/02890_named_tuple_functions.sql index 0033102bd53..6724462562a 100644 --- a/tests/queries/0_stateless/02890_named_tuple_functions.sql +++ b/tests/queries/0_stateless/02890_named_tuple_functions.sql @@ -1,5 +1,5 @@ set enable_named_columns_in_function_tuple = 1; -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; drop table if exists x; create table x (i int, j int) engine MergeTree order by i; diff --git a/tests/queries/0_stateless/02890_untuple_column_names.reference b/tests/queries/0_stateless/02890_untuple_column_names.reference index 13a85c70138..f91a63fa15f 100644 --- a/tests/queries/0_stateless/02890_untuple_column_names.reference +++ b/tests/queries/0_stateless/02890_untuple_column_names.reference @@ -57,7 +57,7 @@ t.1: 1 Row 1: ────── t.1: 1 --- tuple() with enable_named_columns_in_function_tuple = 1 and allow_experimental_analyzer = 1 keeps the column names +-- tuple() with enable_named_columns_in_function_tuple = 1 and enable_analyzer = 1 keeps the column names Row 1: ────── t.a: 1 diff --git a/tests/queries/0_stateless/02890_untuple_column_names.sql b/tests/queries/0_stateless/02890_untuple_column_names.sql index cd490ca3522..9773e2e53a0 100644 --- a/tests/queries/0_stateless/02890_untuple_column_names.sql +++ b/tests/queries/0_stateless/02890_untuple_column_names.sql @@ -6,43 +6,43 @@ SELECT '-- tuple element alias'; -SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple('s')::Tuple(a String)) FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple('s')::Tuple(a String)) FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; +SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple('s')::Tuple(a String)) FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple('s')::Tuple(a String)) FORMAT Vertical SETTINGS enable_analyzer = 1; -SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple(1)::Tuple(a Int)) FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -- { serverError DUPLICATE_COLUMN } -SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple(1)::Tuple(a Int)) FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; -- Bug: doesn't throw an exception +SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple(1)::Tuple(a Int)) FORMAT Vertical SETTINGS enable_analyzer = 0; -- { serverError DUPLICATE_COLUMN } +SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple(1)::Tuple(a Int)) FORMAT Vertical SETTINGS enable_analyzer = 1; -- Bug: doesn't throw an exception SELECT '-- tuple element alias + untuple() alias'; -SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple('s')::Tuple(a String)) y FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple('s')::Tuple(a String)) y FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; +SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple('s')::Tuple(a String)) y FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple('s')::Tuple(a String)) y FORMAT Vertical SETTINGS enable_analyzer = 1; -SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple(1)::Tuple(a Int)) x FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -- { serverError DUPLICATE_COLUMN } -SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple(1)::Tuple(a Int)) x FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; -- Bug: doesn't throw an exception +SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple(1)::Tuple(a Int)) x FORMAT Vertical SETTINGS enable_analyzer = 0; -- { serverError DUPLICATE_COLUMN } +SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple(1)::Tuple(a Int)) x FORMAT Vertical SETTINGS enable_analyzer = 1; -- Bug: doesn't throw an exception SELECT '-- untuple() alias'; -SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple('s')::Tuple(String)) y FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple('s')::Tuple(String)) y FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; +SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple('s')::Tuple(String)) y FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple('s')::Tuple(String)) y FORMAT Vertical SETTINGS enable_analyzer = 1; -SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple(1)::Tuple(Int)) x FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -- { serverError DUPLICATE_COLUMN } -SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple(1)::Tuple(Int)) x FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; -- Bug: doesn't throw an exception +SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple(1)::Tuple(Int)) x FORMAT Vertical SETTINGS enable_analyzer = 0; -- { serverError DUPLICATE_COLUMN } +SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple(1)::Tuple(Int)) x FORMAT Vertical SETTINGS enable_analyzer = 1; -- Bug: doesn't throw an exception SELECT '-- no aliases'; -SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple('s')::Tuple(String)) FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple('s')::Tuple(String)) FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; +SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple('s')::Tuple(String)) FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple('s')::Tuple(String)) FORMAT Vertical SETTINGS enable_analyzer = 1; -SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple(1)::Tuple(Int)) FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -- { serverError DUPLICATE_COLUMN } -SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple(1)::Tuple(Int)) FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; -- Bug: doesn't throw an exception +SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple(1)::Tuple(Int)) FORMAT Vertical SETTINGS enable_analyzer = 0; -- { serverError DUPLICATE_COLUMN } +SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple(1)::Tuple(Int)) FORMAT Vertical SETTINGS enable_analyzer = 1; -- Bug: doesn't throw an exception SELECT '-- tuple() loses the column names (would be good to fix, see #36773)'; -SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS allow_experimental_analyzer = 0, enable_named_columns_in_function_tuple = 0; -SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS allow_experimental_analyzer = 1, enable_named_columns_in_function_tuple = 0; +SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS enable_analyzer = 0, enable_named_columns_in_function_tuple = 0; +SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS enable_analyzer = 1, enable_named_columns_in_function_tuple = 0; -SELECT '-- tuple() with enable_named_columns_in_function_tuple = 1 and allow_experimental_analyzer = 1 keeps the column names'; -SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS allow_experimental_analyzer = 1, enable_named_columns_in_function_tuple = 1; +SELECT '-- tuple() with enable_named_columns_in_function_tuple = 1 and enable_analyzer = 1 keeps the column names'; +SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS enable_analyzer = 1, enable_named_columns_in_function_tuple = 1; SELECT '-- thankfully JSONExtract() keeps them'; -SELECT untuple(JSONExtract('{"key": "value"}', 'Tuple(key String)')) x FORMAT Vertical SETTINGS allow_experimental_analyzer = 0; -SELECT untuple(JSONExtract('{"key": "value"}', 'Tuple(key String)')) x FORMAT Vertical SETTINGS allow_experimental_analyzer = 1; +SELECT untuple(JSONExtract('{"key": "value"}', 'Tuple(key String)')) x FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(JSONExtract('{"key": "value"}', 'Tuple(key String)')) x FORMAT Vertical SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/02911_analyzer_explain_estimate.sql b/tests/queries/0_stateless/02911_analyzer_explain_estimate.sql index b082f2f33b2..77f30ba82ba 100644 --- a/tests/queries/0_stateless/02911_analyzer_explain_estimate.sql +++ b/tests/queries/0_stateless/02911_analyzer_explain_estimate.sql @@ -1,5 +1,5 @@ -- Tags: distributed -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN ESTIMATE SELECT 0 = 1048577, NULL, groupBitmapOr(bitmapBuild([toInt32(65537)])) FROM cluster(test_cluster_two_shards) WHERE NULL = 1048575; diff --git a/tests/queries/0_stateless/02911_analyzer_order_by_read_in_order_query_plan.sql b/tests/queries/0_stateless/02911_analyzer_order_by_read_in_order_query_plan.sql index 77a72c24f5a..907263168d0 100644 --- a/tests/queries/0_stateless/02911_analyzer_order_by_read_in_order_query_plan.sql +++ b/tests/queries/0_stateless/02911_analyzer_order_by_read_in_order_query_plan.sql @@ -1,4 +1,4 @@ -SET optimize_read_in_order = 1, query_plan_read_in_order = 1, allow_experimental_analyzer = 1; +SET optimize_read_in_order = 1, query_plan_read_in_order = 1, enable_analyzer = 1; drop table if exists tab; drop table if exists tab2; diff --git a/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.sql b/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.sql index 70de63c592d..d567ac6c8f3 100644 --- a/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.sql +++ b/tests/queries/0_stateless/02911_analyzer_remove_unused_projection_columns.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql index ee5e1582015..e1e095a6eae 100644 --- a/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql +++ b/tests/queries/0_stateless/02911_join_on_nullsafe_optimization.sql @@ -15,7 +15,7 @@ CREATE TABLE t2n (x Int64, y UInt64) ENGINE = TinyLog; INSERT INTO t1n VALUES (1,42), (2,2), (3,3); INSERT INTO t2n VALUES (2,2), (3,33), (4,42); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) ORDER BY t1.x NULLS LAST; diff --git a/tests/queries/0_stateless/02911_support_alias_column_in_indices.sql b/tests/queries/0_stateless/02911_support_alias_column_in_indices.sql index 4d68786d7db..5ab50044e64 100644 --- a/tests/queries/0_stateless/02911_support_alias_column_in_indices.sql +++ b/tests/queries/0_stateless/02911_support_alias_column_in_indices.sql @@ -17,8 +17,8 @@ settings index_granularity = 8192, min_index_granularity_bytes = 1024, index_gra insert into test1 select * from numbers(10); insert into test1 select * from numbers(11, 20); -explain indexes = 1 select * from test1 where a > 10 settings allow_experimental_analyzer = 0; -explain indexes = 1 select * from test1 where a > 10 settings allow_experimental_analyzer = 1; +explain indexes = 1 select * from test1 where a > 10 settings enable_analyzer = 0; +explain indexes = 1 select * from test1 where a > 10 settings enable_analyzer = 1; create table test2 ( @@ -34,7 +34,7 @@ settings index_granularity = 8192, min_index_granularity_bytes = 1024, index_gra insert into test2 select * from numbers(10); insert into test2 select * from numbers(11, 20); -explain indexes = 1 select * from test2 where a2 > 15 settings allow_experimental_analyzer = 0; -explain indexes = 1 select * from test2 where a2 > 15 settings allow_experimental_analyzer = 1; +explain indexes = 1 select * from test2 where a2 > 15 settings enable_analyzer = 0; +explain indexes = 1 select * from test2 where a2 > 15 settings enable_analyzer = 1; drop database 02911_support_alias_column_in_indices; diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_1.sql b/tests/queries/0_stateless/02915_analyzer_fuzz_1.sql index 94849453063..f1d606ab1cd 100644 --- a/tests/queries/0_stateless/02915_analyzer_fuzz_1.sql +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_1.sql @@ -1,2 +1,2 @@ -set allow_experimental_analyzer=1; +set enable_analyzer=1; SELECT concat('With ', materialize(_CAST('ba\0', 'LowCardinality(FixedString(3))'))) AS `concat('With ', materialize(CAST('ba\\0', 'LowCardinality(FixedString(3))')))` FROM system.one GROUP BY 'With '; diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_2.sql b/tests/queries/0_stateless/02915_analyzer_fuzz_2.sql index ca9fff68446..8921d36c546 100644 --- a/tests/queries/0_stateless/02915_analyzer_fuzz_2.sql +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_2.sql @@ -1,5 +1,4 @@ SET aggregate_functions_null_for_empty = 1; ---set allow_experimental_analyzer=1; +--set enable_analyzer=1; create table t_delete_projection (x UInt32, y UInt64, projection p (select sum(y))) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; insert into t_delete_projection select number, toString(number) from numbers(8192 * 10); - diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_5.sql b/tests/queries/0_stateless/02915_analyzer_fuzz_5.sql index 29d06d2c315..d75d4f4eb72 100644 --- a/tests/queries/0_stateless/02915_analyzer_fuzz_5.sql +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_5.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer=1; +set enable_analyzer=1; SET max_block_size = 1000; SET max_threads = 4; SET max_rows_to_group_by = 3000, group_by_overflow_mode = 'any'; diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql b/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql index b4eb1b4aff4..cc276ec4074 100644 --- a/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql +++ b/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql @@ -1,5 +1,5 @@ set allow_suspicious_low_cardinality_types=1; -set allow_experimental_analyzer=1; +set enable_analyzer=1; create table tab (x LowCardinality(Nullable(Float64))) engine = MergeTree order by x settings allow_nullable_key=1; insert into tab select number from numbers(2); diff --git a/tests/queries/0_stateless/02918_join_pm_lc_crash.sql b/tests/queries/0_stateless/02918_join_pm_lc_crash.sql index 123208ee981..0326acff4a6 100644 --- a/tests/queries/0_stateless/02918_join_pm_lc_crash.sql +++ b/tests/queries/0_stateless/02918_join_pm_lc_crash.sql @@ -1,31 +1,30 @@ SET joined_subquery_requires_alias = 0, join_algorithm = 'partial_merge'; -SET allow_experimental_analyzer = 0, join_use_nulls = 0; +SET enable_analyzer = 0, join_use_nulls = 0; SELECT * FROM (SELECT dummy AS val FROM system.one) JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val FROM system.one GROUP BY val WITH TOTALS) USING (val); -SET allow_experimental_analyzer = 0, join_use_nulls = 1; +SET enable_analyzer = 0, join_use_nulls = 1; SELECT * FROM (SELECT dummy AS val FROM system.one) JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val FROM system.one GROUP BY val WITH TOTALS) USING (val); -SET allow_experimental_analyzer = 1, join_use_nulls = 0; +SET enable_analyzer = 1, join_use_nulls = 0; SELECT * FROM (SELECT dummy AS val FROM system.one) JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val FROM system.one GROUP BY val WITH TOTALS) USING (val); -SET allow_experimental_analyzer = 1, join_use_nulls = 1; +SET enable_analyzer = 1, join_use_nulls = 1; SELECT * FROM (SELECT dummy AS val FROM system.one) JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val FROM system.one GROUP BY val WITH TOTALS) USING (val); - diff --git a/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.sql b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.sql index 9feb2aa2ad6..f5647f01ab9 100644 --- a/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.sql +++ b/tests/queries/0_stateless/02918_optimize_count_for_merge_tables.sql @@ -15,7 +15,7 @@ INSERT INTO mt2 VALUES (1); SELECT count() FROM merge; -- can use the trivial count optimization -EXPLAIN SELECT count() FROM merge settings allow_experimental_analyzer=0; +EXPLAIN SELECT count() FROM merge settings enable_analyzer=0; CREATE TABLE mt3 (id UInt64) ENGINE = TinyLog; @@ -24,7 +24,7 @@ INSERT INTO mt2 VALUES (2); SELECT count() FROM merge; -- can't use the trivial count optimization as TinyLog doesn't support it -EXPLAIN SELECT count() FROM merge settings allow_experimental_analyzer=0; +EXPLAIN SELECT count() FROM merge settings enable_analyzer=0; DROP TABLE IF EXISTS mt1; DROP TABLE IF EXISTS mt2; diff --git a/tests/queries/0_stateless/02922_respect_nulls_parser.sql b/tests/queries/0_stateless/02922_respect_nulls_parser.sql index c9a17fdfbfe..ccd67dbe676 100644 --- a/tests/queries/0_stateless/02922_respect_nulls_parser.sql +++ b/tests/queries/0_stateless/02922_respect_nulls_parser.sql @@ -13,7 +13,7 @@ SELECT toDateTimeNonExistingFunction(now()) RESPECT NULLS b; -- { serverError UN SELECT toDateTime(now()) RESPECT NULLS b; -- { serverError SYNTAX_ERROR } SELECT count() from numbers(10) where in(number, (0)) RESPECT NULLS; -- { serverError SYNTAX_ERROR } SELECT if(number > 0, number, 0) respect nulls from numbers(0); -- { serverError SYNTAX_ERROR } -WITH (x -> x + 1) AS lambda SELECT lambda(number) RESPECT NULLS FROM numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError SYNTAX_ERROR } +WITH (x -> x + 1) AS lambda SELECT lambda(number) RESPECT NULLS FROM numbers(10) SETTINGS enable_analyzer = 1; -- { serverError SYNTAX_ERROR } SELECT * from system.one WHERE indexHint(dummy = 1) RESPECT NULLS; -- { serverError SYNTAX_ERROR } SELECT arrayJoin([[3,4,5], [6,7], [2], [1,1]]) IGNORE NULLS; -- { serverError SYNTAX_ERROR } SELECT number, grouping(number % 2, number) RESPECT NULLS AS gr FROM numbers(10) GROUP BY GROUPING SETS ((number), (number % 2)) SETTINGS force_grouping_standard_compatibility = 0; -- { serverError SYNTAX_ERROR } diff --git a/tests/queries/0_stateless/02932_analyzer_rewrite_sum_column_and_constant.sql b/tests/queries/0_stateless/02932_analyzer_rewrite_sum_column_and_constant.sql index 43dad8eb8e0..5492d061c12 100644 --- a/tests/queries/0_stateless/02932_analyzer_rewrite_sum_column_and_constant.sql +++ b/tests/queries/0_stateless/02932_analyzer_rewrite_sum_column_and_constant.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; -- { echoOn } Select sum(number + 1) from numbers(10); diff --git a/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.sql b/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.sql index 3daaf36188a..e5cbe100a58 100644 --- a/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.sql +++ b/tests/queries/0_stateless/02932_parallel_replicas_fuzzer.sql @@ -35,4 +35,4 @@ ORDER BY nan DESC, _CAST([0, NULL, NULL, NULL, NULL], 'Array(Nullable(UInt8))') DESC FORMAT Null -SETTINGS receive_timeout = 10., receive_data_timeout_ms = 10000, use_hedged_requests = 0, allow_suspicious_low_cardinality_types = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, log_queries = 1, table_function_remote_max_addresses = 200, allow_experimental_analyzer = 1; +SETTINGS receive_timeout = 10., receive_data_timeout_ms = 10000, use_hedged_requests = 0, allow_suspicious_low_cardinality_types = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, log_queries = 1, table_function_remote_max_addresses = 200, enable_analyzer = 1; diff --git a/tests/queries/0_stateless/02933_paste_join.sql b/tests/queries/0_stateless/02933_paste_join.sql index 604078d1c3a..6c5a923d028 100644 --- a/tests/queries/0_stateless/02933_paste_join.sql +++ b/tests/queries/0_stateless/02933_paste_join.sql @@ -41,10 +41,10 @@ INSERT INTO test SELECT number from numbers(6); SELECT * FROM (SELECT number FROM test) PASTE JOIN (SELECT number FROM numbers(6) ORDER BY number) SETTINGS joined_subquery_requires_alias = 0; SELECT * FROM (SELECT number FROM test PASTE JOIN (Select number FROM numbers(7))) PASTE JOIN (SELECT number FROM numbers(6) PASTE JOIN (SELECT number FROM test)) SETTINGS joined_subquery_requires_alias = 0; SELECT * FROM (SELECT number FROM test PASTE JOIN (SELECT number FROM test PASTE JOIN (Select number FROM numbers(7)))) PASTE JOIN (SELECT number FROM numbers(6) PASTE JOIN (SELECT number FROM test)) SETTINGS joined_subquery_requires_alias = 0; -SELECT * FROM (SELECT 1 AS a) PASTE JOIN (SELECT 2 AS b) PASTE JOIN (SELECT 3 AS c) SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM (SELECT 1 AS a) PASTE JOIN (SELECT 2 AS b) PASTE JOIN (SELECT 3 AS a) SETTINGS allow_experimental_analyzer = 1; -- { serverError AMBIGUOUS_COLUMN_NAME } +SELECT * FROM (SELECT 1 AS a) PASTE JOIN (SELECT 2 AS b) PASTE JOIN (SELECT 3 AS c) SETTINGS enable_analyzer = 1; +SELECT * FROM (SELECT 1 AS a) PASTE JOIN (SELECT 2 AS b) PASTE JOIN (SELECT 3 AS a) SETTINGS enable_analyzer = 1; -- { serverError AMBIGUOUS_COLUMN_NAME } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; CREATE TABLE test1 (a Int32) engine=MergeTree order by a; INSERT INTO test1 SELECT * FROM numbers(2); CREATE TABLE test2 (a Int32) engine=MergeTree order by a; diff --git a/tests/queries/0_stateless/02943_order_by_all.sql b/tests/queries/0_stateless/02943_order_by_all.sql index 0d5e0ea52e4..4ce59e84e6c 100644 --- a/tests/queries/0_stateless/02943_order_by_all.sql +++ b/tests/queries/0_stateless/02943_order_by_all.sql @@ -13,40 +13,40 @@ INSERT INTO order_by_all VALUES ('B', 3), ('C', NULL), ('D', 1), ('A', 2); SELECT '-- no modifiers'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT a, b FROM order_by_all ORDER BY ALL; SELECT b, a FROM order_by_all ORDER BY ALL; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT a, b FROM order_by_all ORDER BY ALL; SELECT b, a FROM order_by_all ORDER BY ALL; SELECT '-- with ASC/DESC modifiers'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT a, b FROM order_by_all ORDER BY ALL ASC; SELECT a, b FROM order_by_all ORDER BY ALL DESC; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT a, b FROM order_by_all ORDER BY ALL ASC; SELECT a, b FROM order_by_all ORDER BY ALL DESC; SELECT '-- with NULLS FIRST/LAST modifiers'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT b, a FROM order_by_all ORDER BY ALL NULLS FIRST; SELECT b, a FROM order_by_all ORDER BY ALL NULLS LAST; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT b, a FROM order_by_all ORDER BY ALL NULLS FIRST; SELECT b, a FROM order_by_all ORDER BY ALL NULLS LAST; SELECT '-- SELECT *'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT * FROM order_by_all ORDER BY all; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT * FROM order_by_all ORDER BY all; DROP TABLE order_by_all; @@ -65,7 +65,7 @@ INSERT INTO order_by_all VALUES ('B', 3, 10), ('C', NULL, 40), ('D', 1, 20), ('A SELECT ' -- columns'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT a, b, all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; SELECT a FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } @@ -73,7 +73,7 @@ SELECT a FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; SELECT * FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } SELECT * FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT a, b, all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; SELECT a FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; @@ -86,30 +86,30 @@ SELECT * FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; SELECT ' -- column aliases'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT a, b AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } SELECT a, b AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT a, b AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } SELECT a, b AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; SELECT ' -- expressions'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; SELECT ' -- ORDER BY ALL loses its special meaning when used in conjunction with other columns'; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT a, b, all FROM order_by_all ORDER BY all, a; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT a, b, all FROM order_by_all ORDER BY all, a; DROP TABLE order_by_all; diff --git a/tests/queries/0_stateless/02943_tokenbf_and_ngrambf_indexes_support_match_function.sql b/tests/queries/0_stateless/02943_tokenbf_and_ngrambf_indexes_support_match_function.sql index 42175cbb2c6..5ad54c872fa 100644 --- a/tests/queries/0_stateless/02943_tokenbf_and_ngrambf_indexes_support_match_function.sql +++ b/tests/queries/0_stateless/02943_tokenbf_and_ngrambf_indexes_support_match_function.sql @@ -41,7 +41,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM @@ -52,7 +52,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; SELECT * FROM @@ -63,7 +63,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM @@ -74,7 +74,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; SELECT '---'; @@ -96,7 +96,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM @@ -107,7 +107,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; SELECT * FROM @@ -118,7 +118,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM @@ -129,7 +129,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; SELECT '---'; @@ -150,7 +150,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM ( @@ -160,7 +160,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; SELECT * FROM @@ -171,7 +171,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 0; + enable_analyzer = 0; SELECT * FROM @@ -182,7 +182,7 @@ FROM WHERE explain LIKE '%Granules: %' SETTINGS - allow_experimental_analyzer = 1; + enable_analyzer = 1; DROP TABLE tokenbf_tab; DROP TABLE ngrambf_tab; diff --git a/tests/queries/0_stateless/02944_variant_as_common_type_analyzer.sql b/tests/queries/0_stateless/02944_variant_as_common_type_analyzer.sql index 77bed6cf796..7947c8a0ce1 100644 --- a/tests/queries/0_stateless/02944_variant_as_common_type_analyzer.sql +++ b/tests/queries/0_stateless/02944_variant_as_common_type_analyzer.sql @@ -1,7 +1,7 @@ -- this test is just like 02944_variant_as_common_type, but with different expected output, because -- analyzer changes some return types. Specifically, if(c, x, y) always casts to the common type of -- x and y, even if c is constant. -set allow_experimental_analyzer=1; +set enable_analyzer=1; set allow_experimental_variant_type=1; set use_variant_as_common_type=1; @@ -76,4 +76,3 @@ select toTypeName(res), array([1, 2, 3], [[1, 2, 3]]) as res; select toTypeName(res), map('a', 1, 'b', 'str_1') as res; select toTypeName(res), map('a', 1, 'b', map('c', 2, 'd', 'str_1')) as res; select toTypeName(res), map('a', 1, 'b', [1, 2, 3], 'c', [[4, 5, 6]]) as res; - diff --git a/tests/queries/0_stateless/02952_conjunction_optimization.sql b/tests/queries/0_stateless/02952_conjunction_optimization.sql index 94bc352e4c5..fb6c26a2e1f 100644 --- a/tests/queries/0_stateless/02952_conjunction_optimization.sql +++ b/tests/queries/0_stateless/02952_conjunction_optimization.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS 02952_disjunction_optimization; diff --git a/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.sql b/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.sql index a8029fdd3d6..886944e30c0 100644 --- a/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.sql +++ b/tests/queries/0_stateless/02954_analyzer_fuzz_i57086.sql @@ -12,4 +12,4 @@ WITH ROLLUP ORDER BY count() ASC, number DESC NULLS LAST - SETTINGS limit = 2, allow_experimental_analyzer = 1; + SETTINGS limit = 2, enable_analyzer = 1; diff --git a/tests/queries/0_stateless/02955_analyzer_using_functional_args.reference.j2 b/tests/queries/0_stateless/02955_analyzer_using_functional_args.reference.j2 index e0c6a439112..95a79c56194 100644 --- a/tests/queries/0_stateless/02955_analyzer_using_functional_args.reference.j2 +++ b/tests/queries/0_stateless/02955_analyzer_using_functional_args.reference.j2 @@ -10,7 +10,7 @@ a -- { echoOn } -- USING alias column contains default in old analyzer (but both queries below should have the same result) -SELECT y * 2, s || 'a' FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT y * 2, s || 'a' FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS enable_analyzer = 1; 738 ba 7386 aa 13332 a @@ -18,11 +18,11 @@ SELECT y * 2, s || 'a' FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2 738 ba 7386 aa 13332 a -SELECT (1, *) FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT (1, *) FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS enable_analyzer = 1; (1,369,123,'b',124) (1,3693,1231,'a',0) (1,6666,0,'',48) -SELECT (1, *) FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT (1, *) FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL SETTINGS enable_analyzer = 1; (1,369,'b') (1,3693,'a') (1,6666,'') @@ -42,27 +42,27 @@ SELECT (1, t1.*, t2.*) FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2 (1,'',0,6666) (1,'a',3693,0) (1,'b',369,369) -SELECT t1.z, t2.z, t3.z FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS allow_experimental_analyzer = 1; +SELECT t1.z, t2.z, t3.z FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS enable_analyzer = 1; 0 0 43 0 48 0 124 124 0 1232 0 1232 -SELECT * FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS enable_analyzer = 1; 126 0 0 42 369 123 b 124 0 3693 1231 a 0 1231 6666 0 48 0 -SELECT t1.*, t2.*, t3.* FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS allow_experimental_analyzer = 1; +SELECT t1.*, t2.*, t3.* FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS enable_analyzer = 1; 0 126 0 42 0 6666 48 0 123 b 369 124 0 1231 a 3693 0 1231 -SELECT (1, t1.*, t2.*, t3.*) FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1 SETTINGS allow_experimental_analyzer = 1; +SELECT (1, t1.*, t2.*, t3.*) FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1 SETTINGS enable_analyzer = 1; (1,0,'',126,0,42) (1,0,'',6666,48,0) (1,123,'b',369,124,0) (1,1231,'a',3693,0,1231) -SELECT y FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT y FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS enable_analyzer = 1; 369 3693 6666 @@ -78,11 +78,11 @@ SELECT s FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) a b -SELECT y FROM t1 FULL JOIN t2 USING (y) PREWHERE y * 2 > 2 ORDER BY ALL SETTINGS allow_experimental_analyzer = 1, join_use_nulls = 0; +SELECT y FROM t1 FULL JOIN t2 USING (y) PREWHERE y * 2 > 2 ORDER BY ALL SETTINGS enable_analyzer = 1, join_use_nulls = 0; 369 3693 6666 -SELECT y FROM t1 FULL JOIN t2 USING (y) PREWHERE y * 2 > 2 ORDER BY ALL SETTINGS allow_experimental_analyzer = 1, join_use_nulls = 1; +SELECT y FROM t1 FULL JOIN t2 USING (y) PREWHERE y * 2 > 2 ORDER BY ALL SETTINGS enable_analyzer = 1, join_use_nulls = 1; 369 3693 6666 diff --git a/tests/queries/0_stateless/02955_analyzer_using_functional_args.sql.j2 b/tests/queries/0_stateless/02955_analyzer_using_functional_args.sql.j2 index f5b81231afe..1dde83149b6 100644 --- a/tests/queries/0_stateless/02955_analyzer_using_functional_args.sql.j2 +++ b/tests/queries/0_stateless/02955_analyzer_using_functional_args.sql.j2 @@ -24,21 +24,21 @@ SELECT count() FROM t1 INNER JOIN t2 USING (y); SELECT count() FROM t2 INNER JOIN t1 USING (y); -- `SELECT *` works differently for ALIAS columns with analyzer -SELECT * FROM t1 INNER JOIN t2 USING (y, z) SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t2 INNER JOIN t1 USING (y, z) SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 INNER JOIN t2 USING (y, z) SETTINGS enable_analyzer = 1; +SELECT * FROM t2 INNER JOIN t1 USING (y, z) SETTINGS enable_analyzer = 1; SELECT t2.z FROM t1 INNER JOIN t2 USING (y); -SELECT * FROM t1 INNER JOIN t3 USING (y) SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t3 INNER JOIN t1 USING (y, z) SETTINGS allow_experimental_analyzer = 1; +SELECT * FROM t1 INNER JOIN t3 USING (y) SETTINGS enable_analyzer = 1; +SELECT * FROM t3 INNER JOIN t1 USING (y, z) SETTINGS enable_analyzer = 1; SELECT s FROM t1 INNER JOIN t3 USING (y); -- { echoOn } -- USING alias column contains default in old analyzer (but both queries below should have the same result) -SELECT y * 2, s || 'a' FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT y * 2, s || 'a' FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS enable_analyzer = 1; SELECT y * 2, s || 'a' FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL; -SELECT (1, *) FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT (1, *) FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT (1, *) FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT (1, *) FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL SETTINGS enable_analyzer = 1; SELECT (1, t1.*) FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL; SELECT (1, t1.*) FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL; @@ -46,19 +46,19 @@ SELECT (1, t1.*) FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 U SELECT (1, t1.*, t2.*) FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL; SELECT (1, t1.*, t2.*) FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL; -SELECT t1.z, t2.z, t3.z FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS allow_experimental_analyzer = 1; -SELECT * FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS allow_experimental_analyzer = 1; -SELECT t1.*, t2.*, t3.* FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS allow_experimental_analyzer = 1; -SELECT (1, t1.*, t2.*, t3.*) FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1 SETTINGS allow_experimental_analyzer = 1; +SELECT t1.z, t2.z, t3.z FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS enable_analyzer = 1; +SELECT t1.*, t2.*, t3.* FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1,2,3 SETTINGS enable_analyzer = 1; +SELECT (1, t1.*, t2.*, t3.*) FROM t1 FULL JOIN t2 USING (y) FULL JOIN t3 USING (y) ORDER BY 1 SETTINGS enable_analyzer = 1; -SELECT y FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT y FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL SETTINGS enable_analyzer = 1; SELECT y FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL; SELECT s FROM t1 FULL JOIN t2 USING (y) ORDER BY ALL; SELECT s FROM (SELECT s, y FROM t1) t1 FULL JOIN (SELECT y FROM t2) t2 USING (y) ORDER BY ALL; -SELECT y FROM t1 FULL JOIN t2 USING (y) PREWHERE y * 2 > 2 ORDER BY ALL SETTINGS allow_experimental_analyzer = 1, join_use_nulls = 0; -SELECT y FROM t1 FULL JOIN t2 USING (y) PREWHERE y * 2 > 2 ORDER BY ALL SETTINGS allow_experimental_analyzer = 1, join_use_nulls = 1; +SELECT y FROM t1 FULL JOIN t2 USING (y) PREWHERE y * 2 > 2 ORDER BY ALL SETTINGS enable_analyzer = 1, join_use_nulls = 0; +SELECT y FROM t1 FULL JOIN t2 USING (y) PREWHERE y * 2 > 2 ORDER BY ALL SETTINGS enable_analyzer = 1, join_use_nulls = 1; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.sql b/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.sql index 98259fc8029..0f658379ff9 100644 --- a/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.sql +++ b/tests/queries/0_stateless/02955_sparkBar_alias_sparkbar.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS spark_bar_test; CREATE TABLE spark_bar_test (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; @@ -9,4 +9,3 @@ SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FR SELECT sparkBar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_test GROUP BY event_date); DROP TABLE IF EXISTS spark_bar_test; - diff --git a/tests/queries/0_stateless/02962_join_using_bug_57894.sql b/tests/queries/0_stateless/02962_join_using_bug_57894.sql index 87aef8b1a71..96190241da5 100644 --- a/tests/queries/0_stateless/02962_join_using_bug_57894.sql +++ b/tests/queries/0_stateless/02962_join_using_bug_57894.sql @@ -9,7 +9,7 @@ CREATE TABLE r (`x` LowCardinality(Nullable(UInt32)), `s` Nullable(String)) ENGI INSERT INTO r SELECT number, toString(number) FROM numbers(2, 8); INSERT INTO r VALUES (NULL, NULL); -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL ; @@ -21,7 +21,7 @@ SETTINGS join_algorithm = 'partial_merge'; SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL SETTINGS join_algorithm = 'full_sorting_merge'; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL ; diff --git a/tests/queries/0_stateless/02967_analyzer_fuzz.sql b/tests/queries/0_stateless/02967_analyzer_fuzz.sql index 7f2d9afcc71..dab6ec2af22 100644 --- a/tests/queries/0_stateless/02967_analyzer_fuzz.sql +++ b/tests/queries/0_stateless/02967_analyzer_fuzz.sql @@ -7,7 +7,7 @@ GROUP BY makeDateTime64(NULL, NULL, pow(NULL, '257') - '-1', '0.2147483647', 257), makeDateTime64(pow(pow(NULL, '21474836.46') - '0.0000065535', 1048577), '922337203685477580.6', NULL, NULL, pow(NULL, 1.0001) - 65536, NULL) WITH CUBE - SETTINGS allow_experimental_analyzer = 1; + SETTINGS enable_analyzer = 1; CREATE TABLE data_01223 (`key` Int) ENGINE = Memory; @@ -17,4 +17,4 @@ SELECT count(round('92233720368547758.07', '-0.01', NULL, nan, '25.7', '-9223372 FROM dist_01223 WHERE round(NULL, 1025, 1.1754943508222875e-38, NULL) WITH TOTALS - SETTINGS allow_experimental_analyzer = 1; + SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh index e49a340ab67..e954cb0e78e 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh @@ -26,7 +26,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=0) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, +SETTINGS enable_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" @@ -35,7 +35,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=0) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', +SETTINGS enable_analyzer=1, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -51,7 +51,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, +SETTINGS enable_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" @@ -60,7 +60,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', +SETTINGS enable_analyzer=1, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -77,7 +77,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, +SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" @@ -86,7 +86,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -102,7 +102,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='hash') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, +SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" @@ -111,7 +111,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='hash') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | @@ -127,7 +127,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, +SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='hash'" @@ -136,7 +136,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', +SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, send_logs_level='trace', allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='hash'" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | diff --git a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference index 100e4e500cd..36f02b2f764 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference @@ -2,7 +2,7 @@ set parallel_replicas_prefer_local_join = 0; -- A query with only INNER/LEFT joins is fully send to replicas. JOIN is executed in GLOBAL mode. -select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -18,7 +18,7 @@ select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x 13 13 0 0 0 0 14 14 14 14 0 0 15 15 0 0 0 0 -explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression ReadFromRemoteParallelReplicas -- @@ -29,7 +29,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -52,7 +52,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -65,7 +65,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) select * from sub5 order by key -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 54 54 50 50 12 12 0 64 64 0 0 0 0 1 explain description=0 @@ -75,7 +75,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) select * from sub5 order by key -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -90,7 +90,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -113,7 +113,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -130,7 +130,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -153,7 +153,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -174,7 +174,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 6 6 6 6 0 0 8 8 8 8 0 0 @@ -197,7 +197,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Join Expression @@ -215,7 +215,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 @@ -237,7 +237,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -258,7 +258,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -281,7 +281,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -294,7 +294,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -317,7 +317,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1, parallel_replicas_allow_in_with_subquery=0;-- { echoOn } +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0;-- { echoOn } Expression Sorting Expression @@ -339,7 +339,7 @@ Expression ReadFromRemoteParallelReplicas set parallel_replicas_prefer_local_join = 1; -- A query with only INNER/LEFT joins is fully send to replicas. JOIN is executed in GLOBAL mode. -select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -355,7 +355,7 @@ select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x 13 13 0 0 0 0 14 14 14 14 0 0 15 15 0 0 0 0 -explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression ReadFromRemoteParallelReplicas -- @@ -366,7 +366,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -389,7 +389,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -402,7 +402,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) select * from sub5 order by key -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 54 54 50 50 12 12 0 64 64 0 0 0 0 1 explain description=0 @@ -412,7 +412,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) select * from sub5 order by key -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -427,7 +427,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -450,7 +450,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -467,7 +467,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -490,7 +490,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -511,7 +511,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 6 6 6 6 0 0 8 8 8 8 0 0 @@ -534,7 +534,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Join Expression @@ -552,7 +552,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 @@ -574,7 +574,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -595,7 +595,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -618,7 +618,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting Expression @@ -631,7 +631,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -654,7 +654,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; Expression Sorting Expression diff --git a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.sql.j2 b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.sql.j2 index 54505b147a3..775663768fe 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.sql.j2 +++ b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.sql.j2 @@ -17,8 +17,8 @@ insert into tab3 select number * 4, number * 4 from numbers(4); set parallel_replicas_prefer_local_join = {{use_global_in}}; -- A query with only INNER/LEFT joins is fully send to replicas. JOIN is executed in GLOBAL mode. -select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; -explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -- -- The same query with cte; with sub1 as (select x, y from tab1 where x != 2), @@ -27,7 +27,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -36,7 +36,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -- -- GROUP BY should work up to WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -45,7 +45,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) select * from sub5 order by key -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -54,7 +54,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) select * from sub5 order by key -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -- -- ORDER BY in sub3 : sub1 is fully pushed, sub3 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -63,7 +63,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -72,7 +72,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -- -- ORDER BY in sub1 : sub1 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2 order by y), @@ -81,7 +81,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; explain description=0 with sub1 as (select x, y from tab1 where x != 2 order by y), @@ -90,7 +90,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -- -- RIGHT JOIN in sub3: sub3 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -99,7 +99,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -108,7 +108,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -116,7 +116,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -124,7 +124,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +select * from sub5 order by x SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -- -- Subqueries for IN allowed @@ -134,7 +134,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; explain description=0 with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), @@ -143,7 +143,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1; +SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -- -- Subqueries for IN are not allowed @@ -153,7 +153,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; explain description=0 with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), @@ -162,6 +162,6 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; {%- endfor %} diff --git a/tests/queries/0_stateless/02969_analyzer_eliminate_injective_functions.sql b/tests/queries/0_stateless/02969_analyzer_eliminate_injective_functions.sql index 15f2550a63e..a7d0c7793db 100644 --- a/tests/queries/0_stateless/02969_analyzer_eliminate_injective_functions.sql +++ b/tests/queries/0_stateless/02969_analyzer_eliminate_injective_functions.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; EXPLAIN QUERY TREE SELECT toString(toString(number + 1)) as val, count() diff --git a/tests/queries/0_stateless/02969_functions_to_subcolumns_if_null.sql b/tests/queries/0_stateless/02969_functions_to_subcolumns_if_null.sql index 361fd7c7a4e..859d26af1a1 100644 --- a/tests/queries/0_stateless/02969_functions_to_subcolumns_if_null.sql +++ b/tests/queries/0_stateless/02969_functions_to_subcolumns_if_null.sql @@ -7,21 +7,21 @@ INSERT INTO t_subcolumns_if SELECT number::Nullable(Int64) as number FROM number SELECT sum(multiIf(id IS NOT NULL, 1, 0)) FROM t_subcolumns_if -SETTINGS allow_experimental_analyzer = 1, optimize_functions_to_subcolumns = 1; +SETTINGS enable_analyzer = 1, optimize_functions_to_subcolumns = 1; SELECT sum(multiIf(id IS NULL, 1, 0)) FROM t_subcolumns_if -SETTINGS allow_experimental_analyzer = 0, optimize_functions_to_subcolumns = 1; +SETTINGS enable_analyzer = 0, optimize_functions_to_subcolumns = 1; SELECT sum(multiIf(id IS NULL, 1, 0)) FROM t_subcolumns_if -SETTINGS allow_experimental_analyzer = 1, optimize_functions_to_subcolumns = 0; +SETTINGS enable_analyzer = 1, optimize_functions_to_subcolumns = 0; SELECT sum(multiIf(id IS NULL, 1, 0)) FROM t_subcolumns_if -SETTINGS allow_experimental_analyzer = 1, optimize_functions_to_subcolumns = 1; +SETTINGS enable_analyzer = 1, optimize_functions_to_subcolumns = 1; DROP TABLE IF EXISTS t_subcolumns_if; diff --git a/tests/queries/0_stateless/02971_analyzer_remote_id.sh b/tests/queries/0_stateless/02971_analyzer_remote_id.sh index ab3c5292529..6d504ce3568 100755 --- a/tests/queries/0_stateless/02971_analyzer_remote_id.sh +++ b/tests/queries/0_stateless/02971_analyzer_remote_id.sh @@ -5,5 +5,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="CREATE TABLE ${CLICKHOUSE_DATABASE}.x ENGINE = MergeTree() ORDER BY number AS SELECT * FROM numbers(2)" -${CLICKHOUSE_LOCAL} --query="SELECT count() FROM remote('127.0.0.{2,3}', '${CLICKHOUSE_DATABASE}.x') SETTINGS allow_experimental_analyzer = 1" 2>&1 \ +${CLICKHOUSE_LOCAL} --query="SELECT count() FROM remote('127.0.0.{2,3}', '${CLICKHOUSE_DATABASE}.x') SETTINGS enable_analyzer = 1" 2>&1 \ | grep -av "ASan doesn't fully support makecontext/swapcontext functions" diff --git a/tests/queries/0_stateless/02971_functions_to_subcolumns_column_names.sql b/tests/queries/0_stateless/02971_functions_to_subcolumns_column_names.sql index 48e5232d18b..6df2f27dbbf 100644 --- a/tests/queries/0_stateless/02971_functions_to_subcolumns_column_names.sql +++ b/tests/queries/0_stateless/02971_functions_to_subcolumns_column_names.sql @@ -5,7 +5,7 @@ CREATE TABLE t_column_names (arr Array(UInt64), n Nullable(String)) ENGINE = Mem INSERT INTO t_column_names VALUES ([1, 2, 3], 'foo'); SET optimize_functions_to_subcolumns = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT length(arr), isNull(n) FROM t_column_names; SELECT length(arr), isNull(n) FROM t_column_names FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/02971_functions_to_subcolumns_map.sql b/tests/queries/0_stateless/02971_functions_to_subcolumns_map.sql index e8a752a82d5..c53a03b8ccd 100644 --- a/tests/queries/0_stateless/02971_functions_to_subcolumns_map.sql +++ b/tests/queries/0_stateless/02971_functions_to_subcolumns_map.sql @@ -5,7 +5,7 @@ CREATE TABLE t_func_to_subcolumns_map (id UInt64, m Map(String, UInt64)) ENGINE INSERT INTO t_func_to_subcolumns_map VALUES (1, map('aaa', 1, 'bbb', 2)) (2, map('ccc', 3)); SET optimize_functions_to_subcolumns = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT length(m) FROM t_func_to_subcolumns_map; SELECT length(m) FROM t_func_to_subcolumns_map; diff --git a/tests/queries/0_stateless/02971_functions_to_subcolumns_variant.sql b/tests/queries/0_stateless/02971_functions_to_subcolumns_variant.sql index 511bcc44514..2612664a7b2 100644 --- a/tests/queries/0_stateless/02971_functions_to_subcolumns_variant.sql +++ b/tests/queries/0_stateless/02971_functions_to_subcolumns_variant.sql @@ -7,7 +7,7 @@ CREATE TABLE t_func_to_subcolumns_variant (id UInt64, v Variant(String, UInt64)) INSERT INTO t_func_to_subcolumns_variant VALUES (1, 'foo') (2, 111); SET optimize_functions_to_subcolumns = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT variantElement(v, 'String') FROM t_func_to_subcolumns_variant; SELECT variantElement(v, 'String') FROM t_func_to_subcolumns_variant; diff --git a/tests/queries/0_stateless/02972_parallel_replicas_cte.sql b/tests/queries/0_stateless/02972_parallel_replicas_cte.sql index 083b0ecc5c9..767cd61216b 100644 --- a/tests/queries/0_stateless/02972_parallel_replicas_cte.sql +++ b/tests/queries/0_stateless/02972_parallel_replicas_cte.sql @@ -15,15 +15,15 @@ WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) SELECT count() FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a SETTINGS allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; --- Testing that it is disabled for allow_experimental_analyzer=0. With analyzer it will be supported (with correct result) +-- Testing that it is disabled for enable_analyzer=0. With analyzer it will be supported (with correct result) WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) SELECT count() FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a -SETTINGS allow_experimental_analyzer = 0, allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; -- { serverError SUPPORT_IS_DISABLED } +SETTINGS enable_analyzer = 0, allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; -- { serverError SUPPORT_IS_DISABLED } -- Disabled for any value of allow_experimental_parallel_reading_from_replicas != 1, not just 2 WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) SELECT count() FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a -SETTINGS allow_experimental_analyzer = 0, allow_experimental_parallel_reading_from_replicas = 512, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; -- { serverError SUPPORT_IS_DISABLED } +SETTINGS enable_analyzer = 0, allow_experimental_parallel_reading_from_replicas = 512, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; -- { serverError SUPPORT_IS_DISABLED } -- Sanitizer SELECT count() FROM pr_2 JOIN numbers(10) as pr_1 ON pr_2.a = pr_1.number diff --git a/tests/queries/0_stateless/02974_analyzer_array_join_subcolumn.sql b/tests/queries/0_stateless/02974_analyzer_array_join_subcolumn.sql index 30fb3c76c1f..1fd103d0bb0 100644 --- a/tests/queries/0_stateless/02974_analyzer_array_join_subcolumn.sql +++ b/tests/queries/0_stateless/02974_analyzer_array_join_subcolumn.sql @@ -7,16 +7,16 @@ INSERT INTO t2 VALUES (1, {'a': (1, 2), 'b': (2, 3)}),; CREATE TABLE t3 (id Int32, c Tuple(v String, pe Map(String, Tuple(a UInt64, b UInt64)))) ENGINE = MergeTree ORDER BY id; INSERT INTO t3 VALUES (1, ('A', {'a':(1, 2),'b':(2, 3)})); -SELECT pe, pe.values.a FROM (SELECT * FROM t2) ARRAY JOIN pe SETTINGS allow_experimental_analyzer = 1; -SELECT p, p.values.a FROM (SELECT * FROM t2) ARRAY JOIN pe AS p SETTINGS allow_experimental_analyzer = 1; +SELECT pe, pe.values.a FROM (SELECT * FROM t2) ARRAY JOIN pe SETTINGS enable_analyzer = 1; +SELECT p, p.values.a FROM (SELECT * FROM t2) ARRAY JOIN pe AS p SETTINGS enable_analyzer = 1; SELECT pe, pe.values.a FROM t2 ARRAY JOIN pe; SELECT p, p.values.a FROM t2 ARRAY JOIN pe AS p; -SELECT c.pe, c.pe.values.a FROM (SELECT * FROM t3) ARRAY JOIN c.pe SETTINGS allow_experimental_analyzer = 1; -SELECT p, p.values.a FROM (SELECT * FROM t3) ARRAY JOIN c.pe as p SETTINGS allow_experimental_analyzer = 1; +SELECT c.pe, c.pe.values.a FROM (SELECT * FROM t3) ARRAY JOIN c.pe SETTINGS enable_analyzer = 1; +SELECT p, p.values.a FROM (SELECT * FROM t3) ARRAY JOIN c.pe as p SETTINGS enable_analyzer = 1; -SELECT c.pe, c.pe.values.a FROM t3 ARRAY JOIN c.pe SETTINGS allow_experimental_analyzer = 1; +SELECT c.pe, c.pe.values.a FROM t3 ARRAY JOIN c.pe SETTINGS enable_analyzer = 1; SELECT p, p.values.a FROM t3 ARRAY JOIN c.pe as p; diff --git a/tests/queries/0_stateless/02987_logical_optimizer_pass_lowcardinality.sql b/tests/queries/0_stateless/02987_logical_optimizer_pass_lowcardinality.sql index be7689025b2..266270562cc 100644 --- a/tests/queries/0_stateless/02987_logical_optimizer_pass_lowcardinality.sql +++ b/tests/queries/0_stateless/02987_logical_optimizer_pass_lowcardinality.sql @@ -2,4 +2,4 @@ CREATE TABLE 02987_logical_optimizer_table (key Int, value Int) ENGINE=Memory(); CREATE VIEW v1 AS SELECT * FROM 02987_logical_optimizer_table; CREATE TABLE 02987_logical_optimizer_merge AS v1 ENGINE=Merge(currentDatabase(), 'v1'); -SELECT _table, key FROM 02987_logical_optimizer_merge WHERE (_table = toFixedString(toFixedString(toFixedString('v1', toNullable(2)), 2), 2)) OR ((value = toLowCardinality(toNullable(10))) AND (_table = toFixedString(toNullable('v3'), 2))) OR ((value = 20) AND (_table = toFixedString(toFixedString(toFixedString('v1', 2), 2), 2)) AND (_table = toFixedString(toLowCardinality(toFixedString('v3', 2)), 2))) SETTINGS allow_experimental_analyzer = true, join_use_nulls = true, convert_query_to_cnf = true; +SELECT _table, key FROM 02987_logical_optimizer_merge WHERE (_table = toFixedString(toFixedString(toFixedString('v1', toNullable(2)), 2), 2)) OR ((value = toLowCardinality(toNullable(10))) AND (_table = toFixedString(toNullable('v3'), 2))) OR ((value = 20) AND (_table = toFixedString(toFixedString(toFixedString('v1', 2), 2), 2)) AND (_table = toFixedString(toLowCardinality(toFixedString('v3', 2)), 2))) SETTINGS enable_analyzer = true, join_use_nulls = true, convert_query_to_cnf = true; diff --git a/tests/queries/0_stateless/02989_join_using_parent_scope.reference b/tests/queries/0_stateless/02989_join_using_parent_scope.reference index 965bfaf7c43..72678a1db4d 100644 --- a/tests/queries/0_stateless/02989_join_using_parent_scope.reference +++ b/tests/queries/0_stateless/02989_join_using_parent_scope.reference @@ -23,7 +23,7 @@ SELECT b AS a, a FROM tb JOIN tabc USING (a) ORDER BY ALL; 3 3 SELECT 1 AS b FROM tb JOIN ta USING (b); -- { serverError UNKNOWN_IDENTIFIER } -- SELECT * returns all columns from both tables in new analyzer -SELECT 3 AS a, a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT 3 AS a, a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 3 3 0 3 abc3 3 3 1 3 abc3 3 3 2 3 abc3 @@ -31,57 +31,57 @@ SELECT 3 AS a, a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS allow 3 3 \N 0 abc0 3 3 \N 1 abc1 3 3 \N 2 abc2 -SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 abc1 2 1 2 abc2 3 2 3 abc3 -SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 abc1 2 1 2 abc2 3 2 3 abc3 -SELECT b + 1 AS a, * FROM tb LEFT JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM tb LEFT JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 abc1 2 1 2 abc2 3 2 3 abc3 4 3 \N \N -SELECT b + 1 AS a, * FROM tb RIGHT JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM tb RIGHT JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 abc1 2 1 2 abc2 3 2 3 abc3 \N \N 0 abc0 -SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 abc1 2 1 2 abc2 3 2 3 abc3 4 3 \N \N \N \N 0 abc0 -SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS asterisk_include_alias_columns = 1, allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS asterisk_include_alias_columns = 1, enable_analyzer = 1; 1 0 1 abc1 2 3 2 1 2 abc2 3 4 3 2 3 abc3 4 5 4 3 \N \N \N \N \N \N 0 abc0 1 2 -SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 2 2 1 2 3 3 2 3 4 -SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 LEFT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 LEFT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 2 2 1 2 3 3 2 3 4 4 3 \N \N -SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 RIGHT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 RIGHT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 2 2 1 2 3 3 2 3 4 \N \N 0 1 -SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 FULL JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 FULL JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; 1 0 1 2 2 1 2 3 3 2 3 4 4 3 \N \N \N \N 0 1 -SELECT b + 1 AS a, s FROM tb FULL OUTER JOIN tabc USING (a) PREWHERE a > 2 ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, s FROM tb FULL OUTER JOIN tabc USING (a) PREWHERE a > 2 ORDER BY ALL SETTINGS enable_analyzer = 1; 3 abc3 4 \N \N abc0 @@ -94,7 +94,7 @@ SELECT a + 2 AS b FROM tb JOIN tabc USING (b) ORDER BY ALL SETTINGS analyzer_compatibility_join_using_top_level_identifier = 1; -- { serverError UNKNOWN_IDENTIFIER } -- In new analyzer with `analyzer_compatibility_join_using_top_level_identifier = 0` we get `b` from left table SELECT a + 2 AS b FROM tb JOIN tabc USING (b) ORDER BY ALL -SETTINGS analyzer_compatibility_join_using_top_level_identifier = 0, allow_experimental_analyzer = 1; +SETTINGS analyzer_compatibility_join_using_top_level_identifier = 0, enable_analyzer = 1; 2 3 4 @@ -108,7 +108,7 @@ SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name FROM users u1 JOIN users u2 USING (name) ORDER BY u1.uid FORMAT TSVWithNamesAndTypes -SETTINGS allow_experimental_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 1; +SETTINGS enable_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 1; uid name u2.uid u2.name Int16 String Int16 String 1231 Ksenia 6666 Ksenia @@ -116,7 +116,7 @@ SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name FROM users u1 JOIN users u2 USING (name) ORDER BY u1.uid FORMAT TSVWithNamesAndTypes -SETTINGS allow_experimental_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 0; +SETTINGS enable_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 0; uid name u2.uid u2.name Int16 String Int16 String 1231 Ksenia 1231 John @@ -125,7 +125,7 @@ SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name FROM users u1 JOIN users u2 USING (name) ORDER BY u1.uid FORMAT TSVWithNamesAndTypes -SETTINGS allow_experimental_analyzer = 0; +SETTINGS enable_analyzer = 0; uid name u2.uid u2.name Int16 String Int16 String 1231 Ksenia 6666 Ksenia diff --git a/tests/queries/0_stateless/02989_join_using_parent_scope.sql b/tests/queries/0_stateless/02989_join_using_parent_scope.sql index 2e4d9f097f7..4283d8b6eca 100644 --- a/tests/queries/0_stateless/02989_join_using_parent_scope.sql +++ b/tests/queries/0_stateless/02989_join_using_parent_scope.sql @@ -24,21 +24,21 @@ SELECT b AS a, a FROM tb JOIN tabc USING (a) ORDER BY ALL; SELECT 1 AS b FROM tb JOIN ta USING (b); -- { serverError UNKNOWN_IDENTIFIER } -- SELECT * returns all columns from both tables in new analyzer -SELECT 3 AS a, a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT 3 AS a, a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; -SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT b + 1 AS a, * FROM tb LEFT JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT b + 1 AS a, * FROM tb RIGHT JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS asterisk_include_alias_columns = 1, allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb LEFT JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb RIGHT JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS asterisk_include_alias_columns = 1, enable_analyzer = 1; -SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 LEFT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 RIGHT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; -SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 FULL JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 LEFT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 RIGHT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 FULL JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; -SELECT b + 1 AS a, s FROM tb FULL OUTER JOIN tabc USING (a) PREWHERE a > 2 ORDER BY ALL SETTINGS allow_experimental_analyzer = 1; +SELECT b + 1 AS a, s FROM tb FULL OUTER JOIN tabc USING (a) PREWHERE a > 2 ORDER BY ALL SETTINGS enable_analyzer = 1; -- It's a default behavior for old analyzer and new with analyzer_compatibility_join_using_top_level_identifier @@ -49,7 +49,7 @@ SETTINGS analyzer_compatibility_join_using_top_level_identifier = 1; -- { server -- In new analyzer with `analyzer_compatibility_join_using_top_level_identifier = 0` we get `b` from left table SELECT a + 2 AS b FROM tb JOIN tabc USING (b) ORDER BY ALL -SETTINGS analyzer_compatibility_join_using_top_level_identifier = 0, allow_experimental_analyzer = 1; +SETTINGS analyzer_compatibility_join_using_top_level_identifier = 0, enable_analyzer = 1; -- This is example where query may return different results with different `analyzer_compatibility_join_using_top_level_identifier` @@ -63,19 +63,19 @@ SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name FROM users u1 JOIN users u2 USING (name) ORDER BY u1.uid FORMAT TSVWithNamesAndTypes -SETTINGS allow_experimental_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 1; +SETTINGS enable_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 1; SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name FROM users u1 JOIN users u2 USING (name) ORDER BY u1.uid FORMAT TSVWithNamesAndTypes -SETTINGS allow_experimental_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 0; +SETTINGS enable_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 0; SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name FROM users u1 JOIN users u2 USING (name) ORDER BY u1.uid FORMAT TSVWithNamesAndTypes -SETTINGS allow_experimental_analyzer = 0; +SETTINGS enable_analyzer = 0; DROP TABLE IF EXISTS users; diff --git a/tests/queries/0_stateless/02991_count_rewrite_analyzer.sql b/tests/queries/0_stateless/02991_count_rewrite_analyzer.sql index b11aeedd225..bb0d3a1a9d1 100644 --- a/tests/queries/0_stateless/02991_count_rewrite_analyzer.sql +++ b/tests/queries/0_stateless/02991_count_rewrite_analyzer.sql @@ -1,5 +1,5 @@ -- Regression test for https://github.com/ClickHouse/ClickHouse/issues/59919 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT toTypeName(sum(toNullable('a') IN toNullable('a'))) AS x; SELECT toTypeName(count(toNullable('a') IN toNullable('a'))) AS x; diff --git a/tests/queries/0_stateless/02992_analyzer_group_by_const.sql b/tests/queries/0_stateless/02992_analyzer_group_by_const.sql index 2a9e673d7bc..efe18918c93 100644 --- a/tests/queries/0_stateless/02992_analyzer_group_by_const.sql +++ b/tests/queries/0_stateless/02992_analyzer_group_by_const.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; -- Illegal column String of first argument of function concatWithSeparator. Must be a constant String. SELECT concatWithSeparator('a', 'b') GROUP BY 'a'; diff --git a/tests/queries/0_stateless/02996_analyzer_prewhere_projection.sql b/tests/queries/0_stateless/02996_analyzer_prewhere_projection.sql index 9d676001010..66f7c37a893 100644 --- a/tests/queries/0_stateless/02996_analyzer_prewhere_projection.sql +++ b/tests/queries/0_stateless/02996_analyzer_prewhere_projection.sql @@ -4,4 +4,4 @@ CREATE TABLE t__fuzz_0 (`i` LowCardinality(Int32), `j` Int32, `k` Int32, PROJECT INSERT INTO t__fuzz_0 Select number, number, number FROM numbers(100); SELECT * FROM t__fuzz_0 PREWHERE 7 AND (i < 2147483647) AND (j IN (2147483646, -2, 1)) -SETTINGS allow_experimental_analyzer = true; +SETTINGS enable_analyzer = true; diff --git a/tests/queries/0_stateless/02998_analyzer_prewhere_report.sql b/tests/queries/0_stateless/02998_analyzer_prewhere_report.sql index b3027181901..5a7cab854e9 100644 --- a/tests/queries/0_stateless/02998_analyzer_prewhere_report.sql +++ b/tests/queries/0_stateless/02998_analyzer_prewhere_report.sql @@ -15,4 +15,4 @@ SELECT arrayFilter(x -> (x IN (2, 3)), data) AS filtered FROM hits WHERE arrayExists(x -> (x IN (2, 3)), data) -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.reference b/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.reference index ffd2f68990b..ccd9540cb49 100644 --- a/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.reference +++ b/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.reference @@ -1,5 +1,5 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE SELECT encrypt('aes-256-ofb', (SELECT 'qwerty'), '12345678901234567890123456789012'), encrypt('aes-256-ofb', (SELECT 'asdf'), '12345678901234567890123456789012'); QUERY id: 0 PROJECTION COLUMNS @@ -7,7 +7,7 @@ QUERY id: 0 encrypt(\'aes-256-ofb\', [HIDDEN id: 3], [HIDDEN id: 2]) Nullable(String) PROJECTION LIST id: 1, nodes: 2 - CONSTANT id: 2, constant_value: \'\\nãì&\', constant_value_type: Nullable(String) + CONSTANT id: 2, constant_value: \'\\n��&\', constant_value_type: Nullable(String) EXPRESSION FUNCTION id: 3, function_name: encrypt, function_type: ordinary, result_type: Nullable(String) ARGUMENTS @@ -15,7 +15,7 @@ QUERY id: 0 CONSTANT id: 5, constant_value: \'aes-256-ofb\', constant_value_type: String CONSTANT id: 6, constant_value: [HIDDEN id: 1], constant_value_type: Nullable(String) CONSTANT id: 7, constant_value: [HIDDEN id: 2], constant_value_type: String - CONSTANT id: 8, constant_value: \'çø\', constant_value_type: Nullable(String) + CONSTANT id: 8, constant_value: \'��\', constant_value_type: Nullable(String) EXPRESSION FUNCTION id: 9, function_name: encrypt, function_type: ordinary, result_type: Nullable(String) ARGUMENTS @@ -33,7 +33,7 @@ QUERY id: 0 encrypt(\'aes-256-ofb\', _subquery_2, \'12345678901234567890123456789012\') Nullable(String) PROJECTION LIST id: 1, nodes: 2 - CONSTANT id: 2, constant_value: \'\\nãì&\', constant_value_type: Nullable(String) + CONSTANT id: 2, constant_value: \'\\n��&\', constant_value_type: Nullable(String) EXPRESSION FUNCTION id: 3, function_name: encrypt, function_type: ordinary, result_type: Nullable(String) ARGUMENTS @@ -50,7 +50,7 @@ QUERY id: 0 JOIN TREE TABLE id: 10, table_name: system.one CONSTANT id: 11, constant_value: \'12345678901234567890123456789012\', constant_value_type: String - CONSTANT id: 12, constant_value: \'çø\', constant_value_type: Nullable(String) + CONSTANT id: 12, constant_value: \'��\', constant_value_type: Nullable(String) EXPRESSION FUNCTION id: 13, function_name: encrypt, function_type: ordinary, result_type: Nullable(String) ARGUMENTS diff --git a/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.sql b/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.sql index f40b40b6c8c..a216f886f8a 100644 --- a/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.sql +++ b/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.sql @@ -2,7 +2,7 @@ -- encrypt function doesn't exist in the fastest build -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE SELECT encrypt('aes-256-ofb', (SELECT 'qwerty'), '12345678901234567890123456789012'), encrypt('aes-256-ofb', (SELECT 'asdf'), '12345678901234567890123456789012'); diff --git a/tests/queries/0_stateless/02998_projection_after_attach_partition.reference b/tests/queries/0_stateless/02998_projection_after_attach_partition.reference index 1cb984f0f34..93cae129842 100644 --- a/tests/queries/0_stateless/02998_projection_after_attach_partition.reference +++ b/tests/queries/0_stateless/02998_projection_after_attach_partition.reference @@ -19,12 +19,12 @@ INSERT INTO visits_order SELECT 2, 'user2', number from numbers(1, 10); INSERT INTO visits_order SELECT 2, 'another_user2', number*2 from numbers(1, 10); INSERT INTO visits_order SELECT 2, 'yet_another_user2', number*3 from numbers(1, 10); ALTER TABLE visits_order_dst ATTACH PARTITION ID '2' FROM visits_order; -SET allow_experimental_analyzer=0; +SET enable_analyzer=0; EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; Expression ((Projection + Before ORDER BY)) Filter ReadFromMergeTree (user_name_projection) -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; Expression ((Project names + Projection)) Filter diff --git a/tests/queries/0_stateless/02998_projection_after_attach_partition.sql b/tests/queries/0_stateless/02998_projection_after_attach_partition.sql index 4e0121dafe9..72ee4ad81e8 100644 --- a/tests/queries/0_stateless/02998_projection_after_attach_partition.sql +++ b/tests/queries/0_stateless/02998_projection_after_attach_partition.sql @@ -25,10 +25,10 @@ INSERT INTO visits_order SELECT 2, 'yet_another_user2', number*3 from numbers(1, ALTER TABLE visits_order_dst ATTACH PARTITION ID '2' FROM visits_order; -SET allow_experimental_analyzer=0; +SET enable_analyzer=0; EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; diff --git a/tests/queries/0_stateless/02999_analyzer_preimage_null.sql b/tests/queries/0_stateless/02999_analyzer_preimage_null.sql index 07d3a0f69c1..0fc61cf0836 100644 --- a/tests/queries/0_stateless/02999_analyzer_preimage_null.sql +++ b/tests/queries/0_stateless/02999_analyzer_preimage_null.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET optimize_time_filter_with_preimage=1; CREATE TABLE date_t__fuzz_0 (`id` UInt32, `value1` String, `date1` Date) ENGINE = ReplacingMergeTree ORDER BY id SETTINGS allow_nullable_key=1; diff --git a/tests/queries/0_stateless/03001_analyzer_nullable_nothing.sql b/tests/queries/0_stateless/03001_analyzer_nullable_nothing.sql index 32c378ebf0a..c1c7ca87b5f 100644 --- a/tests/queries/0_stateless/03001_analyzer_nullable_nothing.sql +++ b/tests/queries/0_stateless/03001_analyzer_nullable_nothing.sql @@ -3,4 +3,4 @@ SELECT count(_CAST(NULL, 'Nullable(Nothing)')), round(avg(_CAST(NULL, 'Nullable(Nothing)'))) AS k FROM numbers(256) - SETTINGS allow_experimental_analyzer = 1; + SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03002_analyzer_prewhere.sql b/tests/queries/0_stateless/03002_analyzer_prewhere.sql index 0edf16f1cbe..976e7cab73d 100644 --- a/tests/queries/0_stateless/03002_analyzer_prewhere.sql +++ b/tests/queries/0_stateless/03002_analyzer_prewhere.sql @@ -1,4 +1,4 @@ -SET max_threads = 16, receive_timeout = 10., receive_data_timeout_ms = 10000, allow_suspicious_low_cardinality_types = true, enable_positional_arguments = false, log_queries = true, table_function_remote_max_addresses = 200, any_join_distinct_right_table_keys = true, joined_subquery_requires_alias = false, allow_experimental_analyzer = true, max_execution_time = 10., max_memory_usage = 10000000000, log_comment = '/workspace/ch/tests/queries/0_stateless/01710_projection_in_index.sql', send_logs_level = 'fatal', enable_optimize_predicate_expression = false, prefer_localhost_replica = true, allow_introspection_functions = true, optimize_functions_to_subcolumns = false, transform_null_in = true, optimize_use_projections = true, allow_deprecated_syntax_for_merge_tree = true, parallelize_output_from_storages = false; +SET max_threads = 16, receive_timeout = 10., receive_data_timeout_ms = 10000, allow_suspicious_low_cardinality_types = true, enable_positional_arguments = false, log_queries = true, table_function_remote_max_addresses = 200, any_join_distinct_right_table_keys = true, joined_subquery_requires_alias = false, enable_analyzer = true, max_execution_time = 10., max_memory_usage = 10000000000, log_comment = '/workspace/ch/tests/queries/0_stateless/01710_projection_in_index.sql', send_logs_level = 'fatal', enable_optimize_predicate_expression = false, prefer_localhost_replica = true, allow_introspection_functions = true, optimize_functions_to_subcolumns = false, transform_null_in = true, optimize_use_projections = true, allow_deprecated_syntax_for_merge_tree = true, parallelize_output_from_storages = false; CREATE TABLE t__fuzz_0 (`i` Int32, `j` Nullable(Int32), `k` Int32, PROJECTION p (SELECT * ORDER BY j)) ENGINE = MergeTree ORDER BY i SETTINGS index_granularity = 1, allow_nullable_key=1; diff --git a/tests/queries/0_stateless/03003_analyzer_setting.sql b/tests/queries/0_stateless/03003_analyzer_setting.sql index 2e5cab71277..3dbdaed4ad0 100644 --- a/tests/queries/0_stateless/03003_analyzer_setting.sql +++ b/tests/queries/0_stateless/03003_analyzer_setting.sql @@ -1,9 +1,9 @@ CREATE TABLE test (dummy Int8) ENGINE = Distributed(test_cluster_two_shards, 'system', 'one'); -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; -SELECT * FROM (SELECT * FROM test SETTINGS allow_experimental_analyzer = 1); -- { serverError INCORRECT_QUERY } +SELECT * FROM (SELECT * FROM test SETTINGS enable_analyzer = 1); -- { serverError INCORRECT_QUERY } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -SELECT * FROM (SELECT * FROM test SETTINGS allow_experimental_analyzer = 0); -- { serverError INCORRECT_QUERY } +SELECT * FROM (SELECT * FROM test SETTINGS enable_analyzer = 0); -- { serverError INCORRECT_QUERY } diff --git a/tests/queries/0_stateless/03003_functions_to_subcolumns_final.sql b/tests/queries/0_stateless/03003_functions_to_subcolumns_final.sql index 3fe29139c5f..b2ca478daa4 100644 --- a/tests/queries/0_stateless/03003_functions_to_subcolumns_final.sql +++ b/tests/queries/0_stateless/03003_functions_to_subcolumns_final.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS t_length_1; DROP TABLE IF EXISTS t_length_2; SET optimize_functions_to_subcolumns = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_on_insert = 0; CREATE TABLE t_length_1 (id UInt64, arr Array(UInt64)) ENGINE = ReplacingMergeTree ORDER BY id; diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_2.sql.j2 b/tests/queries/0_stateless/03006_join_on_inequal_expression_2.sql.j2 index f15fced161c..a09dc18739f 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_2.sql.j2 +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_2.sql.j2 @@ -18,7 +18,7 @@ INSERT INTO t1 (key, a, attr) VALUES (1, 10, 'alpha'), (2, 15, 'beta'), (3, 20, INSERT INTO t2 (key, a, attr) VALUES (1, 5, 'ALPHA'), (2, 10, 'beta'), (4, 25, 'delta'); -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET allow_experimental_join_condition=1; SET join_use_nulls=0; -- { echoOn } diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_3.sql.j2 b/tests/queries/0_stateless/03006_join_on_inequal_expression_3.sql.j2 index a97153ce3aa..009ae10e4ff 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_3.sql.j2 +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_3.sql.j2 @@ -18,7 +18,7 @@ INSERT INTO t1 (key, a, attr) VALUES (1, 10, 'alpha'), (2, 15, 'beta'), (3, 20, INSERT INTO t2 (key, a, attr) VALUES (1, 5, 'ALPHA'), (2, 10, 'beta'), (4, 25, 'delta'); -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET allow_experimental_join_condition=1; SET join_use_nulls=0; -- { echoOn } diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_4.sql.j2 b/tests/queries/0_stateless/03006_join_on_inequal_expression_4.sql.j2 index 3235019821b..37eaaa8ab5c 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_4.sql.j2 +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_4.sql.j2 @@ -15,7 +15,7 @@ CREATE TABLE t2 ( INSERT INTO t1 (key, a) VALUES (1, 10), (2, 15), (3, 20); INSERT INTO t2 (key, a) VALUES (1, 5), (2, 10), (4, 25); -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET allow_experimental_join_condition=1; SET join_algorithm='hash'; -- { echoOn } diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 index a363101ca69..61ad5ec0bf1 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 @@ -6,7 +6,7 @@ INSERT INTO t1 VALUES ('key1', 'a', 1, 1, 2), ('key1', 'b', 2, 3, 2), ('key1', ' CREATE TABLE t2 (key String, attr String, a UInt64, b UInt64, c Nullable(UInt64)) ENGINE = MergeTree ORDER BY key; INSERT INTO t2 VALUES ('key1', 'A', 1, 2, 1), ('key1', 'B', 2, 1, 2), ('key1', 'C', 3, 4, 5), ('key1', 'D', 4, 1, 6), ('key3', 'a3', 1, 1, 1), ('key4', 'F', 1,1,1); -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET allow_experimental_join_condition=1; SET join_use_nulls=0; -- { echoOn } diff --git a/tests/queries/0_stateless/03006_parallel_replicas_cte_explain_syntax_crash.sql b/tests/queries/0_stateless/03006_parallel_replicas_cte_explain_syntax_crash.sql index df4ec9d26a3..7c8d6dd9aff 100644 --- a/tests/queries/0_stateless/03006_parallel_replicas_cte_explain_syntax_crash.sql +++ b/tests/queries/0_stateless/03006_parallel_replicas_cte_explain_syntax_crash.sql @@ -20,7 +20,7 @@ ORDER BY n AS SELECT * FROM numbers(10); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3, parallel_replicas_min_number_of_rows_per_replica=0; EXPLAIN SYNTAX diff --git a/tests/queries/0_stateless/03007_column_nullable_uninitialzed_value.sql b/tests/queries/0_stateless/03007_column_nullable_uninitialzed_value.sql index 9479044e0e0..44f6642d2a5 100644 --- a/tests/queries/0_stateless/03007_column_nullable_uninitialzed_value.sql +++ b/tests/queries/0_stateless/03007_column_nullable_uninitialzed_value.sql @@ -1 +1 @@ -SELECT count(NULL) IGNORE NULLS > avg(toDecimal32(NULL)) IGNORE NULLS, count() FROM numbers(1000) WITH TOTALS SETTINGS allow_experimental_analyzer = 1; +SELECT count(NULL) IGNORE NULLS > avg(toDecimal32(NULL)) IGNORE NULLS, count() FROM numbers(1000) WITH TOTALS SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql index 71a175faac8..5ec6ee5a996 100644 --- a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql +++ b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.sql @@ -1,11 +1,11 @@ SET optimize_rewrite_sum_if_to_count_if = 1; -SET allow_experimental_analyzer = 0; +SET enable_analyzer = 0; SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); SELECT (sum(if((number % 2) = 0, toNullable(1), 0)), NULL) FROM numbers(10); SELECT (tuple(sum(if((number % 2) = 0, toNullable(0), 123)) IGNORE NULLS), toUInt8(3)) FROM numbers(100); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); EXPLAIN QUERY TREE SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); SELECT (sum(if((number % 2) = 0, toNullable(1), 0)), NULL) FROM numbers(10); diff --git a/tests/queries/0_stateless/03013_group_by_use_nulls_with_materialize_and_analyzer.sql b/tests/queries/0_stateless/03013_group_by_use_nulls_with_materialize_and_analyzer.sql index b15593b2abd..7b57dbd807d 100644 --- a/tests/queries/0_stateless/03013_group_by_use_nulls_with_materialize_and_analyzer.sql +++ b/tests/queries/0_stateless/03013_group_by_use_nulls_with_materialize_and_analyzer.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; set group_by_use_nulls = 1; set optimize_group_by_function_keys = 1; set optimize_injective_functions_in_group_by = 1; @@ -8,4 +8,3 @@ SELECT materialize(3) from numbers(10) GROUP BY GROUPING SETS (('str'), (materia SELECT ignore(3) from numbers(10) GROUP BY GROUPING SETS (('str'), (ignore(3))) order by all; SELECT materialize(ignore(3)) from numbers(10) GROUP BY GROUPING SETS (('str'), (materialize(ignore(3)))) order by all; SELECT ignore(materialize(3)) from numbers(10) GROUP BY GROUPING SETS (('str'), (ignore(materialize(3)))) order by all; - diff --git a/tests/queries/0_stateless/03014_analyzer_groupby_fuzz_60317.sql b/tests/queries/0_stateless/03014_analyzer_groupby_fuzz_60317.sql index 094614cb78d..295f89c5a0a 100644 --- a/tests/queries/0_stateless/03014_analyzer_groupby_fuzz_60317.sql +++ b/tests/queries/0_stateless/03014_analyzer_groupby_fuzz_60317.sql @@ -6,7 +6,7 @@ SELECT FROM system.one GROUP BY _CAST(30, 'Nullable(UInt8)') -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; -- WITH CUBE (note that result is different with the analyzer (analyzer is correct including all combinations) SELECT @@ -24,4 +24,4 @@ GROUP BY _CAST(30, 'Nullable(UInt8)') WITH CUBE WITH TOTALS -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03014_group_by_use_nulls_injective_functions_and_analyzer.sql b/tests/queries/0_stateless/03014_group_by_use_nulls_injective_functions_and_analyzer.sql index e7ea964b876..d700f9ba3b1 100644 --- a/tests/queries/0_stateless/03014_group_by_use_nulls_injective_functions_and_analyzer.sql +++ b/tests/queries/0_stateless/03014_group_by_use_nulls_injective_functions_and_analyzer.sql @@ -1,5 +1,4 @@ -set allow_experimental_analyzer=1, group_by_use_nulls=1, optimize_injective_functions_in_group_by=1; +set enable_analyzer=1, group_by_use_nulls=1, optimize_injective_functions_in_group_by=1; SELECT bitNot(bitNot(number)) + 3 FROM numbers(10) GROUP BY GROUPING SETS (('str', bitNot(bitNot(number))), ('str')) order by all; SELECT tuple(tuple(tuple(number))) FROM numbers(10) GROUP BY GROUPING SETS (('str', tuple(tuple(number))), ('str')) order by all; SELECT materialize(3) + 3 FROM numbers(10) GROUP BY GROUPING SETS (('str', materialize(materialize(3))), ('str')) order by all; - diff --git a/tests/queries/0_stateless/03015_analyzer_groupby_fuzz_60772.sql b/tests/queries/0_stateless/03015_analyzer_groupby_fuzz_60772.sql index d3bd9ef0ce3..5190b8635d6 100644 --- a/tests/queries/0_stateless/03015_analyzer_groupby_fuzz_60772.sql +++ b/tests/queries/0_stateless/03015_analyzer_groupby_fuzz_60772.sql @@ -7,7 +7,7 @@ GROUP BY toFixedString(toFixedString('2018-01-02 22:33:44', 19), 19), 'gr', '2018-01-02 22:33:44' -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; -- WITH CUBE (note that result is different with the analyzer (analyzer is correct including all combinations) SELECT @@ -20,4 +20,4 @@ GROUP BY 'gr', '2018-01-02 22:33:44' WITH CUBE -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03016_analyzer_groupby_fuzz_59796.sql b/tests/queries/0_stateless/03016_analyzer_groupby_fuzz_59796.sql index c00a75a631c..6c926c32887 100644 --- a/tests/queries/0_stateless/03016_analyzer_groupby_fuzz_59796.sql +++ b/tests/queries/0_stateless/03016_analyzer_groupby_fuzz_59796.sql @@ -3,4 +3,4 @@ SELECT GROUP BY concat(unhex('00'), toFixedString(materialize(toFixedString(' key="v" ', 9)), 9), toFixedString(toFixedString('00', 2), toNullable(2)), toFixedString(toFixedString(toFixedString(' key="v" ', 9), 9), 9)), concat(' key="v" ') -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03017_analyzer_groupby_fuzz_61600.sql b/tests/queries/0_stateless/03017_analyzer_groupby_fuzz_61600.sql index 53a5cfe9b1a..b22ea42b686 100644 --- a/tests/queries/0_stateless/03017_analyzer_groupby_fuzz_61600.sql +++ b/tests/queries/0_stateless/03017_analyzer_groupby_fuzz_61600.sql @@ -11,7 +11,7 @@ FROM set_index_not__fuzz_0 GROUP BY toNullable(3), concat(concat(NULLIF(1, 1), toNullable(toNullable(3)))) -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; -- WITH ROLLUP (note that result is different with the analyzer (analyzer is correct including all combinations) SELECT @@ -22,4 +22,4 @@ GROUP BY toNullable(3), concat(concat(NULLIF(1, 1), toNullable(toNullable(3)))) WITH ROLLUP -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03023_analyzer_optimize_group_by_function_keys_with_nulls.sql b/tests/queries/0_stateless/03023_analyzer_optimize_group_by_function_keys_with_nulls.sql index f0b60caca36..0d0a81c9105 100644 --- a/tests/queries/0_stateless/03023_analyzer_optimize_group_by_function_keys_with_nulls.sql +++ b/tests/queries/0_stateless/03023_analyzer_optimize_group_by_function_keys_with_nulls.sql @@ -1,5 +1,4 @@ -set allow_experimental_analyzer=1; +set enable_analyzer=1; set group_by_use_nulls=1; set optimize_group_by_function_keys=1; SELECT ignore(toLowCardinality(number)) FROM numbers(10) GROUP BY GROUPING SETS ((ignore(toLowCardinality(number)), toLowCardinality(number))); - diff --git a/tests/queries/0_stateless/03023_group_by_use_nulls_analyzer_crashes.sql b/tests/queries/0_stateless/03023_group_by_use_nulls_analyzer_crashes.sql index b8c173520a9..d3d6ecaadaf 100644 --- a/tests/queries/0_stateless/03023_group_by_use_nulls_analyzer_crashes.sql +++ b/tests/queries/0_stateless/03023_group_by_use_nulls_analyzer_crashes.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = 1, group_by_use_nulls = 1; +set enable_analyzer = 1, group_by_use_nulls = 1; SELECT tuple(tuple(number)) as x FROM numbers(10) GROUP BY (number, tuple(number)) with cube order by x; @@ -56,4 +56,4 @@ SELECT arraySplit(number -> toUInt8(number), []) from numbers(1) GROUP BY toUInt SELECT count(arraySplit(number -> toUInt8(number), [arraySplit(x -> toUInt8(number), [])])) FROM numbers(10) GROUP BY number, [number] WITH ROLLUP settings group_by_use_nulls=1; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -SELECT count(arraySplit(x -> toUInt8(number), [])) FROM numbers(10) GROUP BY number, [number] WITH ROLLUP settings group_by_use_nulls=1; \ No newline at end of file +SELECT count(arraySplit(x -> toUInt8(number), [])) FROM numbers(10) GROUP BY number, [number] WITH ROLLUP settings group_by_use_nulls=1; diff --git a/tests/queries/0_stateless/03023_remove_unused_column_distinct.sql b/tests/queries/0_stateless/03023_remove_unused_column_distinct.sql index c2f32bfe3c1..af8756f1fbc 100644 --- a/tests/queries/0_stateless/03023_remove_unused_column_distinct.sql +++ b/tests/queries/0_stateless/03023_remove_unused_column_distinct.sql @@ -12,4 +12,4 @@ FROM FROM numbers(10) ) ) -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03031_filter_float64_logical_error.sql b/tests/queries/0_stateless/03031_filter_float64_logical_error.sql index 59d4a06c8f7..df35cc7b14a 100644 --- a/tests/queries/0_stateless/03031_filter_float64_logical_error.sql +++ b/tests/queries/0_stateless/03031_filter_float64_logical_error.sql @@ -26,7 +26,7 @@ PREWHERE (id = NULL) AND 1024 WHERE 0.0001 GROUP BY '0.03' WITH ROLLUP -SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx', allow_experimental_analyzer=0; +SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx', enable_analyzer=0; SELECT @@ -37,4 +37,4 @@ PREWHERE (id = NULL) AND 1024 WHERE 0.0001 GROUP BY '0.03' WITH ROLLUP -SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx', allow_experimental_analyzer=1; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } +SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx', enable_analyzer=1; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } diff --git a/tests/queries/0_stateless/03031_tuple_elimination_analyzer.sql b/tests/queries/0_stateless/03031_tuple_elimination_analyzer.sql index 97a19cda7d3..42bd5004933 100644 --- a/tests/queries/0_stateless/03031_tuple_elimination_analyzer.sql +++ b/tests/queries/0_stateless/03031_tuple_elimination_analyzer.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS test; -SET allow_suspicious_low_cardinality_types = true, allow_experimental_analyzer = true; +SET allow_suspicious_low_cardinality_types = true, enable_analyzer = true; CREATE TABLE test (`id` LowCardinality(UInt32)) ENGINE = MergeTree ORDER BY id AS SELECT 0; diff --git a/tests/queries/0_stateless/03032_redundant_equals.sql b/tests/queries/0_stateless/03032_redundant_equals.sql index de85ec5cf00..63073dbcefd 100644 --- a/tests/queries/0_stateless/03032_redundant_equals.sql +++ b/tests/queries/0_stateless/03032_redundant_equals.sql @@ -9,7 +9,7 @@ ORDER BY k; INSERT INTO test_table SELECT number FROM numbers(10000000); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT * FROM test_table WHERE k in (100) = 1; SELECT * FROM test_table WHERE k = (100) = 1; @@ -25,7 +25,7 @@ SELECT * FROM test_table WHERE (NOT ((k not in (100) = 0) OR (k in (100) = 1))) SELECT * FROM test_table WHERE (NOT ((k in (101) = 0) OR (k in (100) = 1))) = 1; SELECT * FROM test_table WHERE ((k not in (101) = 0) OR (k in (100) = 1)) = 1; SELECT * FROM test_table WHERE ((k not in (99) = 1) AND (k in (100) = 1)) = 1; --- we skip optimizing queries with toNullable(0 or 1) but lets make sure they still work +-- we skip optimizing queries with toNullable(0 or 1) but lets make sure they still work SELECT * FROM test_table WHERE (k = 101) = toLowCardinality(toNullable(1)); SELECT * FROM test_table WHERE (k = 101) = toNullable(1); SELECT * FROM test_table WHERE (k = 101) = toLowCardinality(1); diff --git a/tests/queries/0_stateless/03033_analyzer_merge_engine_filter_push_down.sql b/tests/queries/0_stateless/03033_analyzer_merge_engine_filter_push_down.sql index 9be1152bbbf..d01e458a544 100644 --- a/tests/queries/0_stateless/03033_analyzer_merge_engine_filter_push_down.sql +++ b/tests/queries/0_stateless/03033_analyzer_merge_engine_filter_push_down.sql @@ -3,6 +3,5 @@ drop table if exists test; create table test (`x` LowCardinality(Nullable(UInt32)), `y` String) engine = MergeTree order by tuple(); insert into test values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); create table m_table (x UInt32, y String) engine = Merge(currentDatabase(), 'test*'); -select toTypeName(x), x FROM m_table SETTINGS additional_table_filters = {'m_table':'x != 4'}, optimize_move_to_prewhere=1, allow_experimental_analyzer=1; +select toTypeName(x), x FROM m_table SETTINGS additional_table_filters = {'m_table':'x != 4'}, optimize_move_to_prewhere=1, enable_analyzer=1; drop table test; - diff --git a/tests/queries/0_stateless/03033_analyzer_query_parameters.sh b/tests/queries/0_stateless/03033_analyzer_query_parameters.sh index cf46067df99..67ddf2449eb 100755 --- a/tests/queries/0_stateless/03033_analyzer_query_parameters.sh +++ b/tests/queries/0_stateless/03033_analyzer_query_parameters.sh @@ -4,5 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_LOCAL} --param_rounding 1 --query "SELECT 1 AS x ORDER BY x WITH FILL STEP {rounding:UInt32} SETTINGS allow_experimental_analyzer = 1" -${CLICKHOUSE_LOCAL} --param_rounding 1 --query "SELECT 1 AS x ORDER BY x WITH FILL STEP {rounding:UInt32} SETTINGS allow_experimental_analyzer = 0" +${CLICKHOUSE_LOCAL} --param_rounding 1 --query "SELECT 1 AS x ORDER BY x WITH FILL STEP {rounding:UInt32} SETTINGS enable_analyzer = 1" +${CLICKHOUSE_LOCAL} --param_rounding 1 --query "SELECT 1 AS x ORDER BY x WITH FILL STEP {rounding:UInt32} SETTINGS enable_analyzer = 0" diff --git a/tests/queries/0_stateless/03033_cte_numbers_memory.sql b/tests/queries/0_stateless/03033_cte_numbers_memory.sql index 66b11cbfaa5..b362f42f89a 100644 --- a/tests/queries/0_stateless/03033_cte_numbers_memory.sql +++ b/tests/queries/0_stateless/03033_cte_numbers_memory.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/61238 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; WITH (SELECT number FROM system.numbers LIMIT 1) as w1, diff --git a/tests/queries/0_stateless/03033_recursive_cte_basic.reference b/tests/queries/0_stateless/03033_recursive_cte_basic.reference index a890fccef2b..dc6a9cd8f89 100644 --- a/tests/queries/0_stateless/03033_recursive_cte_basic.reference +++ b/tests/queries/0_stateless/03033_recursive_cte_basic.reference @@ -1,6 +1,6 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH RECURSIVE recursive_cte AS (SELECT 1 AS n UNION ALL SELECT n + 1 FROM recursive_cte WHERE n < 10) SELECT n FROM recursive_cte; 1 diff --git a/tests/queries/0_stateless/03033_recursive_cte_basic.sql b/tests/queries/0_stateless/03033_recursive_cte_basic.sql index f85e1ffe0bd..63014e9ccd4 100644 --- a/tests/queries/0_stateless/03033_recursive_cte_basic.sql +++ b/tests/queries/0_stateless/03033_recursive_cte_basic.sql @@ -1,6 +1,6 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH RECURSIVE recursive_cte AS (SELECT 1 AS n UNION ALL SELECT n + 1 FROM recursive_cte WHERE n < 10) SELECT n FROM recursive_cte; diff --git a/tests/queries/0_stateless/03033_with_fill_interpolate.sql b/tests/queries/0_stateless/03033_with_fill_interpolate.sql index 0ec0050a922..48457341e0d 100644 --- a/tests/queries/0_stateless/03033_with_fill_interpolate.sql +++ b/tests/queries/0_stateless/03033_with_fill_interpolate.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/55794 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS 03033_example_table; CREATE TABLE 03033_example_table diff --git a/tests/queries/0_stateless/03034_normalized_ast.sql b/tests/queries/0_stateless/03034_normalized_ast.sql index 385af4e2c34..8b518d6d11b 100644 --- a/tests/queries/0_stateless/03034_normalized_ast.sql +++ b/tests/queries/0_stateless/03034_normalized_ast.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/49472 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT concat(database, table) AS name, count() diff --git a/tests/queries/0_stateless/03034_recursive_cte_tree.sql b/tests/queries/0_stateless/03034_recursive_cte_tree.sql index a3ff43e435a..fa62298939a 100644 --- a/tests/queries/0_stateless/03034_recursive_cte_tree.sql +++ b/tests/queries/0_stateless/03034_recursive_cte_tree.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS tree; CREATE TABLE tree diff --git a/tests/queries/0_stateless/03034_recursive_cte_tree_fuzz_crash_fix.sql b/tests/queries/0_stateless/03034_recursive_cte_tree_fuzz_crash_fix.sql index ae04fa8f377..1e26c53769b 100644 --- a/tests/queries/0_stateless/03034_recursive_cte_tree_fuzz_crash_fix.sql +++ b/tests/queries/0_stateless/03034_recursive_cte_tree_fuzz_crash_fix.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET enable_global_with_statement=1; SET session_timezone = 'Etc/UTC'; diff --git a/tests/queries/0_stateless/03034_recursive_cte_tree_merge_tree.reference b/tests/queries/0_stateless/03034_recursive_cte_tree_merge_tree.reference index 85f8c3267c6..f5b950db867 100644 --- a/tests/queries/0_stateless/03034_recursive_cte_tree_merge_tree.reference +++ b/tests/queries/0_stateless/03034_recursive_cte_tree_merge_tree.reference @@ -1,6 +1,6 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS tree; CREATE TABLE tree ( diff --git a/tests/queries/0_stateless/03034_recursive_cte_tree_merge_tree.sql b/tests/queries/0_stateless/03034_recursive_cte_tree_merge_tree.sql index 15acbaf6da9..231aae296e6 100644 --- a/tests/queries/0_stateless/03034_recursive_cte_tree_merge_tree.sql +++ b/tests/queries/0_stateless/03034_recursive_cte_tree_merge_tree.sql @@ -1,6 +1,6 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS tree; CREATE TABLE tree diff --git a/tests/queries/0_stateless/03035_alias_column_bug_distributed.sql b/tests/queries/0_stateless/03035_alias_column_bug_distributed.sql index 74463743b01..8f60808d700 100644 --- a/tests/queries/0_stateless/03035_alias_column_bug_distributed.sql +++ b/tests/queries/0_stateless/03035_alias_column_bug_distributed.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/44414 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS alias_bug; DROP TABLE IF EXISTS alias_bug_dist; CREATE TABLE alias_bug diff --git a/tests/queries/0_stateless/03035_internal_functions_direct_call.sql b/tests/queries/0_stateless/03035_internal_functions_direct_call.sql index 951e0733dbb..e358e498343 100644 --- a/tests/queries/0_stateless/03035_internal_functions_direct_call.sql +++ b/tests/queries/0_stateless/03035_internal_functions_direct_call.sql @@ -4,7 +4,7 @@ SELECT __actionName(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } SELECT __actionName('aaa', 'aaa', 'aaa'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -SELECT __actionName('aaa', '') SETTINGS allow_experimental_analyzer = 1; -- { serverError BAD_ARGUMENTS } +SELECT __actionName('aaa', '') SETTINGS enable_analyzer = 1; -- { serverError BAD_ARGUMENTS } SELECT __actionName('aaa', materialize('aaa')); -- { serverError BAD_ARGUMENTS,ILLEGAL_COLUMN } SELECT __actionName(materialize('aaa'), 'aaa'); -- { serverError ILLEGAL_COLUMN } SELECT __actionName('aaa', 'aaa'); diff --git a/tests/queries/0_stateless/03035_recursive_cte_postgres_1.reference b/tests/queries/0_stateless/03035_recursive_cte_postgres_1.reference index e6cbdc971eb..568421f3fba 100644 --- a/tests/queries/0_stateless/03035_recursive_cte_postgres_1.reference +++ b/tests/queries/0_stateless/03035_recursive_cte_postgres_1.reference @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- WITH RECURSIVE -- sum of 1..100 diff --git a/tests/queries/0_stateless/03035_recursive_cte_postgres_1.sql b/tests/queries/0_stateless/03035_recursive_cte_postgres_1.sql index 8026dadc331..9a4e313ce90 100644 --- a/tests/queries/0_stateless/03035_recursive_cte_postgres_1.sql +++ b/tests/queries/0_stateless/03035_recursive_cte_postgres_1.sql @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- WITH RECURSIVE diff --git a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.sql b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.sql index d94a68aa4d8..e1a13d1ce71 100644 --- a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.sql +++ b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET optimize_move_to_prewhere = 0; SET query_plan_convert_outer_join_to_inner_join = 0; diff --git a/tests/queries/0_stateless/03036_recursive_cte_postgres_2.reference b/tests/queries/0_stateless/03036_recursive_cte_postgres_2.reference index adc06d2edd2..a10e1da6907 100644 --- a/tests/queries/0_stateless/03036_recursive_cte_postgres_2.reference +++ b/tests/queries/0_stateless/03036_recursive_cte_postgres_2.reference @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- Some examples with a tree -- diff --git a/tests/queries/0_stateless/03036_recursive_cte_postgres_2.sql b/tests/queries/0_stateless/03036_recursive_cte_postgres_2.sql index f06b1c62426..b8e850b4721 100644 --- a/tests/queries/0_stateless/03036_recursive_cte_postgres_2.sql +++ b/tests/queries/0_stateless/03036_recursive_cte_postgres_2.sql @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- Some examples with a tree diff --git a/tests/queries/0_stateless/03036_with_numbers.sql b/tests/queries/0_stateless/03036_with_numbers.sql index 3463ce826e2..bd0f6b6179c 100644 --- a/tests/queries/0_stateless/03036_with_numbers.sql +++ b/tests/queries/0_stateless/03036_with_numbers.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/13843 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; WITH 10 AS n SELECT * FROM numbers(n); diff --git a/tests/queries/0_stateless/03037_recursive_cte_postgres_3.reference b/tests/queries/0_stateless/03037_recursive_cte_postgres_3.reference index 3f8f47ac23e..f0ff70a7c60 100644 --- a/tests/queries/0_stateless/03037_recursive_cte_postgres_3.reference +++ b/tests/queries/0_stateless/03037_recursive_cte_postgres_3.reference @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- different tree example -- diff --git a/tests/queries/0_stateless/03037_recursive_cte_postgres_3.sql b/tests/queries/0_stateless/03037_recursive_cte_postgres_3.sql index 3c74ee889c4..213e8bc995d 100644 --- a/tests/queries/0_stateless/03037_recursive_cte_postgres_3.sql +++ b/tests/queries/0_stateless/03037_recursive_cte_postgres_3.sql @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- different tree example diff --git a/tests/queries/0_stateless/03037_union_view.sql b/tests/queries/0_stateless/03037_union_view.sql index 3ea81b829ba..d963444fd91 100644 --- a/tests/queries/0_stateless/03037_union_view.sql +++ b/tests/queries/0_stateless/03037_union_view.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/55803 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS broken_table; DROP TABLE IF EXISTS broken_view; @@ -28,4 +28,4 @@ FROM broken_view v FINAL WHERE v.start IS NOT NULL; DROP TABLE IF EXISTS broken_table; -DROP TABLE IF EXISTS broken_view; \ No newline at end of file +DROP TABLE IF EXISTS broken_view; diff --git a/tests/queries/0_stateless/03038_ambiguous_column.sql b/tests/queries/0_stateless/03038_ambiguous_column.sql index 9df3cd9bc9b..131bc552f56 100644 --- a/tests/queries/0_stateless/03038_ambiguous_column.sql +++ b/tests/queries/0_stateless/03038_ambiguous_column.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/48308 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS 03038_table; CREATE TABLE 03038_table diff --git a/tests/queries/0_stateless/03038_recursive_cte_postgres_4.reference b/tests/queries/0_stateless/03038_recursive_cte_postgres_4.reference index b920fc298b3..cf070eebc38 100644 --- a/tests/queries/0_stateless/03038_recursive_cte_postgres_4.reference +++ b/tests/queries/0_stateless/03038_recursive_cte_postgres_4.reference @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- test cycle detection -- diff --git a/tests/queries/0_stateless/03038_recursive_cte_postgres_4.sql b/tests/queries/0_stateless/03038_recursive_cte_postgres_4.sql index a6fe3b1e55c..7dad74893b9 100644 --- a/tests/queries/0_stateless/03038_recursive_cte_postgres_4.sql +++ b/tests/queries/0_stateless/03038_recursive_cte_postgres_4.sql @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- test cycle detection diff --git a/tests/queries/0_stateless/03039_recursive_cte_postgres_5.reference b/tests/queries/0_stateless/03039_recursive_cte_postgres_5.reference index 2910ac174e7..43399a0ab95 100644 --- a/tests/queries/0_stateless/03039_recursive_cte_postgres_5.reference +++ b/tests/queries/0_stateless/03039_recursive_cte_postgres_5.reference @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- test multiple WITH queries -- diff --git a/tests/queries/0_stateless/03039_recursive_cte_postgres_5.sql b/tests/queries/0_stateless/03039_recursive_cte_postgres_5.sql index e188a455434..eb4043cca42 100644 --- a/tests/queries/0_stateless/03039_recursive_cte_postgres_5.sql +++ b/tests/queries/0_stateless/03039_recursive_cte_postgres_5.sql @@ -33,7 +33,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- test multiple WITH queries diff --git a/tests/queries/0_stateless/03039_unknown_identifier_window_function.sql b/tests/queries/0_stateless/03039_unknown_identifier_window_function.sql index 640d217d2f9..652085d9f5a 100644 --- a/tests/queries/0_stateless/03039_unknown_identifier_window_function.sql +++ b/tests/queries/0_stateless/03039_unknown_identifier_window_function.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/45535 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT *, diff --git a/tests/queries/0_stateless/03040_alias_column_join.sql b/tests/queries/0_stateless/03040_alias_column_join.sql index 54f579c0feb..6ffd749a6c4 100644 --- a/tests/queries/0_stateless/03040_alias_column_join.sql +++ b/tests/queries/0_stateless/03040_alias_column_join.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/44365 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS 03040_test; CREATE TABLE 03040_test diff --git a/tests/queries/0_stateless/03040_array_sum_and_join.sql b/tests/queries/0_stateless/03040_array_sum_and_join.sql index 9aeddc9f765..90d3d83c9a2 100644 --- a/tests/queries/0_stateless/03040_array_sum_and_join.sql +++ b/tests/queries/0_stateless/03040_array_sum_and_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; select t.1 as cnt, t.2 as name, diff --git a/tests/queries/0_stateless/03040_recursive_cte_postgres_6.reference b/tests/queries/0_stateless/03040_recursive_cte_postgres_6.reference index 2d9d7bb9f6c..7ccc3df9f0d 100644 --- a/tests/queries/0_stateless/03040_recursive_cte_postgres_6.reference +++ b/tests/queries/0_stateless/03040_recursive_cte_postgres_6.reference @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- error cases -- diff --git a/tests/queries/0_stateless/03040_recursive_cte_postgres_6.sql b/tests/queries/0_stateless/03040_recursive_cte_postgres_6.sql index bff2ece6ece..6a6044198c8 100644 --- a/tests/queries/0_stateless/03040_recursive_cte_postgres_6.sql +++ b/tests/queries/0_stateless/03040_recursive_cte_postgres_6.sql @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- -- error cases diff --git a/tests/queries/0_stateless/03041_analyzer_gigachad_join.sql b/tests/queries/0_stateless/03041_analyzer_gigachad_join.sql index 7906e65f8b8..88f7fc562b1 100644 --- a/tests/queries/0_stateless/03041_analyzer_gigachad_join.sql +++ b/tests/queries/0_stateless/03041_analyzer_gigachad_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE IF NOT EXISTS first engine = MergeTree PARTITION BY (inn, toYYYYMM(received)) ORDER BY (inn, sessionId) AS SELECT now() AS received, '123456789' AS inn, '42' AS sessionId; diff --git a/tests/queries/0_stateless/03041_recursive_cte_postgres_7.reference b/tests/queries/0_stateless/03041_recursive_cte_postgres_7.reference index 6bcc3f89dcb..b98b2ec7e89 100644 --- a/tests/queries/0_stateless/03041_recursive_cte_postgres_7.reference +++ b/tests/queries/0_stateless/03041_recursive_cte_postgres_7.reference @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH RECURSIVE foo AS (SELECT 1 AS i UNION ALL diff --git a/tests/queries/0_stateless/03041_recursive_cte_postgres_7.sql b/tests/queries/0_stateless/03041_recursive_cte_postgres_7.sql index 71c34b7361f..5f4455efcc6 100644 --- a/tests/queries/0_stateless/03041_recursive_cte_postgres_7.sql +++ b/tests/queries/0_stateless/03041_recursive_cte_postgres_7.sql @@ -32,7 +32,7 @@ -- { echoOn } -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; WITH RECURSIVE foo AS (SELECT 1 AS i diff --git a/tests/queries/0_stateless/03041_select_with_query_result.sql b/tests/queries/0_stateless/03041_select_with_query_result.sql index 061223b43e1..e5897ea12cf 100644 --- a/tests/queries/0_stateless/03041_select_with_query_result.sql +++ b/tests/queries/0_stateless/03041_select_with_query_result.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/44153 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS parent; DROP TABLE IF EXISTS join_table_1; DROP TABLE IF EXISTS join_table_2; diff --git a/tests/queries/0_stateless/03042_analyzer_alias_join.sql b/tests/queries/0_stateless/03042_analyzer_alias_join.sql index dac3b6a4983..d9a8d8b4c7b 100644 --- a/tests/queries/0_stateless/03042_analyzer_alias_join.sql +++ b/tests/queries/0_stateless/03042_analyzer_alias_join.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/14978 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE test1(id UInt64, t1value UInt64) ENGINE=MergeTree ORDER BY tuple(); CREATE TABLE test2(id UInt64, t2value String) ENGINE=MergeTree ORDER BY tuple(); diff --git a/tests/queries/0_stateless/03042_not_found_column_c1.sql b/tests/queries/0_stateless/03042_not_found_column_c1.sql index b4dce2af489..08202dc0dca 100644 --- a/tests/queries/0_stateless/03042_not_found_column_c1.sql +++ b/tests/queries/0_stateless/03042_not_found_column_c1.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/42399 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE IF NOT EXISTS t0 (c0 Int32) ENGINE = Memory() ; CREATE TABLE t1 (c0 Int32, c1 Int32, c2 Int32) ENGINE = Memory() ; diff --git a/tests/queries/0_stateless/03043_group_array_result_is_expected.sql b/tests/queries/0_stateless/03043_group_array_result_is_expected.sql index 5311927ae3c..e2c79e5c41e 100644 --- a/tests/queries/0_stateless/03043_group_array_result_is_expected.sql +++ b/tests/queries/0_stateless/03043_group_array_result_is_expected.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/27115 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; drop table if exists fill_ex; create table fill_ex ( diff --git a/tests/queries/0_stateless/03044_analyzer_alias_join.sql b/tests/queries/0_stateless/03044_analyzer_alias_join.sql index 3ab8edb005f..7636edbb411 100644 --- a/tests/queries/0_stateless/03044_analyzer_alias_join.sql +++ b/tests/queries/0_stateless/03044_analyzer_alias_join.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/17319 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TEMPORARY TABLE hits (date Date, data Float64) engine=Memory(); SELECT diff --git a/tests/queries/0_stateless/03044_array_join_columns_in_nested_table.sql b/tests/queries/0_stateless/03044_array_join_columns_in_nested_table.sql index 0cf05763202..4885b7e3f81 100644 --- a/tests/queries/0_stateless/03044_array_join_columns_in_nested_table.sql +++ b/tests/queries/0_stateless/03044_array_join_columns_in_nested_table.sql @@ -1,3 +1,3 @@ -- https://github.com/ClickHouse/ClickHouse/issues/11813 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; select 1 from (select 1 x) l join (select 1 y, [1] a) r on l.x = r.y array join r.a; diff --git a/tests/queries/0_stateless/03045_analyzer_alias_join_with_if.sql b/tests/queries/0_stateless/03045_analyzer_alias_join_with_if.sql index ee8756b9460..cbc46726467 100644 --- a/tests/queries/0_stateless/03045_analyzer_alias_join_with_if.sql +++ b/tests/queries/0_stateless/03045_analyzer_alias_join_with_if.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/13210 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE test_a_table ( name String, a_col String diff --git a/tests/queries/0_stateless/03045_unknown_identifier_alias_substitution.sql b/tests/queries/0_stateless/03045_unknown_identifier_alias_substitution.sql index d97dfc880b3..967b7b24787 100644 --- a/tests/queries/0_stateless/03045_unknown_identifier_alias_substitution.sql +++ b/tests/queries/0_stateless/03045_unknown_identifier_alias_substitution.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23053 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS repl_tbl; CREATE TEMPORARY TABLE repl_tbl diff --git a/tests/queries/0_stateless/03046_column_in_block_array_join.sql b/tests/queries/0_stateless/03046_column_in_block_array_join.sql index c6b4613af3f..f91a18da831 100644 --- a/tests/queries/0_stateless/03046_column_in_block_array_join.sql +++ b/tests/queries/0_stateless/03046_column_in_block_array_join.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/37729 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS nested_test; DROP TABLE IF EXISTS join_test; diff --git a/tests/queries/0_stateless/03047_analyzer_alias_join.sql b/tests/queries/0_stateless/03047_analyzer_alias_join.sql index 7d44c92b6f1..29fc711aaf4 100644 --- a/tests/queries/0_stateless/03047_analyzer_alias_join.sql +++ b/tests/queries/0_stateless/03047_analyzer_alias_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT 1 AS value, * diff --git a/tests/queries/0_stateless/03047_group_by_field_identified_aggregation.sql b/tests/queries/0_stateless/03047_group_by_field_identified_aggregation.sql index cfaf1df44bd..d12e85ca03d 100644 --- a/tests/queries/0_stateless/03047_group_by_field_identified_aggregation.sql +++ b/tests/queries/0_stateless/03047_group_by_field_identified_aggregation.sql @@ -1,4 +1,4 @@ -- https://github.com/ClickHouse/ClickHouse/issues/32639 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT 0 AND id ? 1 : 2 AS a, sum(id) FROM (SELECT 1 AS id) GROUP BY a; diff --git a/tests/queries/0_stateless/03048_not_found_column_xxx_in_block.sql b/tests/queries/0_stateless/03048_not_found_column_xxx_in_block.sql index 42fd581e142..f511ea81e26 100644 --- a/tests/queries/0_stateless/03048_not_found_column_xxx_in_block.sql +++ b/tests/queries/0_stateless/03048_not_found_column_xxx_in_block.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/41964 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS ab_12_aaa; DROP TABLE IF EXISTS ab_12_bbb; diff --git a/tests/queries/0_stateless/03049_analyzer_group_by_alias.sql b/tests/queries/0_stateless/03049_analyzer_group_by_alias.sql index d25babe6788..712d1c27e6e 100644 --- a/tests/queries/0_stateless/03049_analyzer_group_by_alias.sql +++ b/tests/queries/0_stateless/03049_analyzer_group_by_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/7520 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE test (`a` UInt32, `b` UInt32) ENGINE = Memory; INSERT INTO test VALUES (1,2), (1,3), (2,4); diff --git a/tests/queries/0_stateless/03049_unknown_identifier_materialized_column.sql b/tests/queries/0_stateless/03049_unknown_identifier_materialized_column.sql index 938f270b9e4..0efe59a1f1c 100644 --- a/tests/queries/0_stateless/03049_unknown_identifier_materialized_column.sql +++ b/tests/queries/0_stateless/03049_unknown_identifier_materialized_column.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/54317 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/tests/queries/0_stateless/03050_select_one_one_one.sql b/tests/queries/0_stateless/03050_select_one_one_one.sql index 28a55e0c471..09f3f20c35d 100644 --- a/tests/queries/0_stateless/03050_select_one_one_one.sql +++ b/tests/queries/0_stateless/03050_select_one_one_one.sql @@ -1,4 +1,4 @@ -- https://github.com/ClickHouse/ClickHouse/issues/36973 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT 1, 1, 1; SELECT * FROM (SELECT 1, 1, 1); diff --git a/tests/queries/0_stateless/03051_many_ctes.sql b/tests/queries/0_stateless/03051_many_ctes.sql index d4e613bd279..e442813b6a2 100644 --- a/tests/queries/0_stateless/03051_many_ctes.sql +++ b/tests/queries/0_stateless/03051_many_ctes.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/40955 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; WITH toInt64(2) AS new_x SELECT new_x AS x FROM (SELECT 1 AS x) t; WITH toInt64(2) AS new_x SELECT * replace(new_x as x) FROM (SELECT 1 AS x) t; SELECT 2 AS x FROM (SELECT 1 AS x) t; diff --git a/tests/queries/0_stateless/03052_query_hash_includes_aliases.sql b/tests/queries/0_stateless/03052_query_hash_includes_aliases.sql index 24e9ab0f36e..55993175bf3 100644 --- a/tests/queries/0_stateless/03052_query_hash_includes_aliases.sql +++ b/tests/queries/0_stateless/03052_query_hash_includes_aliases.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/pull/40065 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT ( diff --git a/tests/queries/0_stateless/03053_analyzer_join_alias.sql b/tests/queries/0_stateless/03053_analyzer_join_alias.sql index 894b8af7c6f..677cf9d4d5e 100644 --- a/tests/queries/0_stateless/03053_analyzer_join_alias.sql +++ b/tests/queries/0_stateless/03053_analyzer_join_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23104 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/tests/queries/0_stateless/03054_analyzer_join_alias.sql b/tests/queries/0_stateless/03054_analyzer_join_alias.sql index e124aa33a9b..f018f57cc6f 100644 --- a/tests/queries/0_stateless/03054_analyzer_join_alias.sql +++ b/tests/queries/0_stateless/03054_analyzer_join_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/21584 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT count() FROM ( diff --git a/tests/queries/0_stateless/03055_analyzer_subquery_group_array.sql b/tests/queries/0_stateless/03055_analyzer_subquery_group_array.sql index 25b6dcb3564..29ba1dd7c5b 100644 --- a/tests/queries/0_stateless/03055_analyzer_subquery_group_array.sql +++ b/tests/queries/0_stateless/03055_analyzer_subquery_group_array.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23344 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT logTrace(repeat('Hello', 100)), ignore(*) FROM ( SELECT ignore((SELECT groupArrayState(([number], [number])) FROM numbers(19000))) diff --git a/tests/queries/0_stateless/03057_analyzer_subquery_alias_join.sql b/tests/queries/0_stateless/03057_analyzer_subquery_alias_join.sql index 2217af327fa..92f603ed595 100644 --- a/tests/queries/0_stateless/03057_analyzer_subquery_alias_join.sql +++ b/tests/queries/0_stateless/03057_analyzer_subquery_alias_join.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/10276 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT sum(x.n) as n, sum(z.n) as n2 diff --git a/tests/queries/0_stateless/03058_analyzer_ambiguous_columns.sql b/tests/queries/0_stateless/03058_analyzer_ambiguous_columns.sql index 3cce77f0240..ef3c0e5f63d 100644 --- a/tests/queries/0_stateless/03058_analyzer_ambiguous_columns.sql +++ b/tests/queries/0_stateless/03058_analyzer_ambiguous_columns.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/4567 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS fact; DROP TABLE IF EXISTS animals; DROP TABLE IF EXISTS colors; @@ -23,4 +23,3 @@ select id, animal_name, animal_key, color_name, color_key from fact a left join (select toInt64(animal_key) animal_key, animal_name from animals) b on (a.animal_key = b.animal_key) left join (select toInt64(color_key) color_key, color_name from colors) c on (a.color_key = c.color_key); -- { serverError AMBIGUOUS_IDENTIFIER } - diff --git a/tests/queries/0_stateless/03059_analyzer_join_engine_missing_column.sql b/tests/queries/0_stateless/03059_analyzer_join_engine_missing_column.sql index 27782462075..164a42e5ba3 100644 --- a/tests/queries/0_stateless/03059_analyzer_join_engine_missing_column.sql +++ b/tests/queries/0_stateless/03059_analyzer_join_engine_missing_column.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/17710 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE id_val(id UInt32, val UInt32) ENGINE = Memory; CREATE TABLE id_val_join0(id UInt32, val UInt8) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 0; CREATE TABLE id_val_join1(id UInt32, val UInt8) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1; diff --git a/tests/queries/0_stateless/03060_analyzer_regular_view_alias.sql b/tests/queries/0_stateless/03060_analyzer_regular_view_alias.sql index f8cd8690ee5..0556683b97a 100644 --- a/tests/queries/0_stateless/03060_analyzer_regular_view_alias.sql +++ b/tests/queries/0_stateless/03060_analyzer_regular_view_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/11068 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; create table vt(datetime_value DateTime, value Float64) Engine=Memory; create view computed_datum_hours as diff --git a/tests/queries/0_stateless/03061_analyzer_alias_as_right_key_in_join.sql b/tests/queries/0_stateless/03061_analyzer_alias_as_right_key_in_join.sql index 6fee6d1f73d..a1b50967a46 100644 --- a/tests/queries/0_stateless/03061_analyzer_alias_as_right_key_in_join.sql +++ b/tests/queries/0_stateless/03061_analyzer_alias_as_right_key_in_join.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/24395 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE xxxx_yyy (key UInt32, key_b ALIAS key) ENGINE=MergeTree() ORDER BY key; INSERT INTO xxxx_yyy SELECT number FROM numbers(10); diff --git a/tests/queries/0_stateless/03062_analyzer_join_engine_missing_column.sql b/tests/queries/0_stateless/03062_analyzer_join_engine_missing_column.sql index 9748175e4d4..487d74b3317 100644 --- a/tests/queries/0_stateless/03062_analyzer_join_engine_missing_column.sql +++ b/tests/queries/0_stateless/03062_analyzer_join_engine_missing_column.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23416 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; create table test (TOPIC String, PARTITION UInt64, OFFSET UInt64, ID UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03062', 'r2') ORDER BY (TOPIC, PARTITION, OFFSET); create table test_join (TOPIC String, PARTITION UInt64, OFFSET UInt64) ENGINE = Join(ANY, LEFT, `TOPIC`, `PARTITION`) SETTINGS join_any_take_last_row = 1; diff --git a/tests/queries/0_stateless/03063_analyzer_multi_join_wrong_table_specifier.sql b/tests/queries/0_stateless/03063_analyzer_multi_join_wrong_table_specifier.sql index 7eab1fa846a..5655d4a0110 100644 --- a/tests/queries/0_stateless/03063_analyzer_multi_join_wrong_table_specifier.sql +++ b/tests/queries/0_stateless/03063_analyzer_multi_join_wrong_table_specifier.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23162 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE t1 ( k Int64, x Int64) ENGINE = Memory; CREATE TABLE t2( x Int64 ) ENGINE = Memory; @@ -14,4 +14,3 @@ WHERE (t1.d >= now()); -- { serverError UNKNOWN_IDENTIFIER } SELECT * FROM t1 INNER JOIN s ON t1.k = s.k WHERE (t1.d >= now()); -- { serverError UNKNOWN_IDENTIFIER } - diff --git a/tests/queries/0_stateless/03064_analyzer_named_subqueries.sql b/tests/queries/0_stateless/03064_analyzer_named_subqueries.sql index 59ebb9d9af3..d5696411091 100644 --- a/tests/queries/0_stateless/03064_analyzer_named_subqueries.sql +++ b/tests/queries/0_stateless/03064_analyzer_named_subqueries.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/25655 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT sum(t.b) / 1 a, sum(t.a) diff --git a/tests/queries/0_stateless/03065_analyzer_cross_join_and_array_join.sql b/tests/queries/0_stateless/03065_analyzer_cross_join_and_array_join.sql index 7e6befe181e..5034e2eed8f 100644 --- a/tests/queries/0_stateless/03065_analyzer_cross_join_and_array_join.sql +++ b/tests/queries/0_stateless/03065_analyzer_cross_join_and_array_join.sql @@ -1,3 +1,3 @@ -- https://github.com/ClickHouse/ClickHouse/issues/11757 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; select * from (select [1, 2] a) aa cross join (select [3, 4] b) bb array join aa.a, bb.b; diff --git a/tests/queries/0_stateless/03066_analyzer_global_with_statement.sql b/tests/queries/0_stateless/03066_analyzer_global_with_statement.sql index 8983be242c3..2b879ed73da 100644 --- a/tests/queries/0_stateless/03066_analyzer_global_with_statement.sql +++ b/tests/queries/0_stateless/03066_analyzer_global_with_statement.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; WITH 0 AS test SELECT * FROM diff --git a/tests/queries/0_stateless/03067_analyzer_complex_alias_join.sql b/tests/queries/0_stateless/03067_analyzer_complex_alias_join.sql index 052a9eaf734..58845b93771 100644 --- a/tests/queries/0_stateless/03067_analyzer_complex_alias_join.sql +++ b/tests/queries/0_stateless/03067_analyzer_complex_alias_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; with d as (select 'key'::Varchar(255) c, 'x'::Varchar(255) s) SELECT r1, c as r2 FROM ( diff --git a/tests/queries/0_stateless/03068_analyzer_distributed_join.sql b/tests/queries/0_stateless/03068_analyzer_distributed_join.sql index 542380feb7c..459c8f5e8ac 100644 --- a/tests/queries/0_stateless/03068_analyzer_distributed_join.sql +++ b/tests/queries/0_stateless/03068_analyzer_distributed_join.sql @@ -1,7 +1,7 @@ -- Tags: no-replicated-database -- Closes: https://github.com/ClickHouse/ClickHouse/issues/6571 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE LINEITEM_shard ON CLUSTER test_shard_localhost ( L_ORDERKEY UInt64, diff --git a/tests/queries/0_stateless/03069_analyzer_with_alias_in_array_join.sql b/tests/queries/0_stateless/03069_analyzer_with_alias_in_array_join.sql index 09d2985fe60..5ec04cbc0f5 100644 --- a/tests/queries/0_stateless/03069_analyzer_with_alias_in_array_join.sql +++ b/tests/queries/0_stateless/03069_analyzer_with_alias_in_array_join.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/4432 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; WITH [1, 2] AS zz SELECT x FROM system.one diff --git a/tests/queries/0_stateless/03070_analyzer_CTE_scalar_as_numbers.sql b/tests/queries/0_stateless/03070_analyzer_CTE_scalar_as_numbers.sql index 7aadab2ca73..a94ae811476 100644 --- a/tests/queries/0_stateless/03070_analyzer_CTE_scalar_as_numbers.sql +++ b/tests/queries/0_stateless/03070_analyzer_CTE_scalar_as_numbers.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/8259 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; with (select 25) as something select *, something diff --git a/tests/queries/0_stateless/03071_analyzer_array_join_forbid_non_existing_columns.sql b/tests/queries/0_stateless/03071_analyzer_array_join_forbid_non_existing_columns.sql index e2eb758d649..211fa2a3119 100644 --- a/tests/queries/0_stateless/03071_analyzer_array_join_forbid_non_existing_columns.sql +++ b/tests/queries/0_stateless/03071_analyzer_array_join_forbid_non_existing_columns.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/9233 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT * FROM ( diff --git a/tests/queries/0_stateless/03071_fix_short_circuit_logic.sql b/tests/queries/0_stateless/03071_fix_short_circuit_logic.sql index 7745bceca0b..dc5fb5085fb 100644 --- a/tests/queries/0_stateless/03071_fix_short_circuit_logic.sql +++ b/tests/queries/0_stateless/03071_fix_short_circuit_logic.sql @@ -59,4 +59,4 @@ FROM ) WHERE (address = hex2bytes('0xd387a6e4e84a6c86bd90c158c6028a58cc8ac459')) AND (transfer_id NOT LIKE 'gas%') AND (value > 0) AND (dictGetOrDefault(token_data_map, 'is_blacklisted', (token_address_hex, 'zksync'), true)) ) -SETTINGS max_threads = 1, short_circuit_function_evaluation = 'enable', allow_experimental_analyzer = 0; \ No newline at end of file +SETTINGS max_threads = 1, short_circuit_function_evaluation = 'enable', enable_analyzer = 0; diff --git a/tests/queries/0_stateless/03072_analyzer_missing_columns_from_subquery.sql b/tests/queries/0_stateless/03072_analyzer_missing_columns_from_subquery.sql index e2846033913..ec3b067cbdf 100644 --- a/tests/queries/0_stateless/03072_analyzer_missing_columns_from_subquery.sql +++ b/tests/queries/0_stateless/03072_analyzer_missing_columns_from_subquery.sql @@ -1,3 +1,3 @@ -- https://github.com/ClickHouse/ClickHouse/issues/14699 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; select * from (select number from numbers(1)) where not ignore(*); diff --git a/tests/queries/0_stateless/03073_analyzer_alias_as_column_name.sql b/tests/queries/0_stateless/03073_analyzer_alias_as_column_name.sql index 5599324c62b..bba51e28ba3 100644 --- a/tests/queries/0_stateless/03073_analyzer_alias_as_column_name.sql +++ b/tests/queries/0_stateless/03073_analyzer_alias_as_column_name.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/27068 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE test ( id String, create_time DateTime ) ENGINE = MergeTree ORDER BY id; insert into test values(1,'1970-02-01 00:00:00'); diff --git a/tests/queries/0_stateless/03074_analyzer_alias_column_in_view.sql b/tests/queries/0_stateless/03074_analyzer_alias_column_in_view.sql index 4df5f6f48e6..314b6c0e8d6 100644 --- a/tests/queries/0_stateless/03074_analyzer_alias_column_in_view.sql +++ b/tests/queries/0_stateless/03074_analyzer_alias_column_in_view.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/28687 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; create view alias (dummy int, n alias dummy) as select * from system.one; select n from alias; diff --git a/tests/queries/0_stateless/03075_analyzer_subquery_alias.sql b/tests/queries/0_stateless/03075_analyzer_subquery_alias.sql index 416815e761b..4f097350da6 100644 --- a/tests/queries/0_stateless/03075_analyzer_subquery_alias.sql +++ b/tests/queries/0_stateless/03075_analyzer_subquery_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/28777 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT sum(q0.a2) AS a1, sum(q0.a1) AS a9 diff --git a/tests/queries/0_stateless/03076_analyzer_multiple_joins_alias.sql b/tests/queries/0_stateless/03076_analyzer_multiple_joins_alias.sql index 7ac9fe6b446..894e3bc56ee 100644 --- a/tests/queries/0_stateless/03076_analyzer_multiple_joins_alias.sql +++ b/tests/queries/0_stateless/03076_analyzer_multiple_joins_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/29734 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT * FROM ( @@ -49,4 +49,3 @@ INNER JOIN SELECT number AS x FROM numbers(10) ) AS c ON a.x = c.x; -- { serverError UNKNOWN_IDENTIFIER } - diff --git a/tests/queries/0_stateless/03077_analyzer_multi_scalar_subquery_aliases.sql b/tests/queries/0_stateless/03077_analyzer_multi_scalar_subquery_aliases.sql index 5a181023c57..d4335d35e51 100644 --- a/tests/queries/0_stateless/03077_analyzer_multi_scalar_subquery_aliases.sql +++ b/tests/queries/0_stateless/03077_analyzer_multi_scalar_subquery_aliases.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/33825 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE t1 (i Int64, j Int64) ENGINE = Memory; INSERT INTO t1 SELECT number, number FROM system.numbers LIMIT 10; SELECT diff --git a/tests/queries/0_stateless/03078_analyzer_multi_scalar_subquery_aliases.sql b/tests/queries/0_stateless/03078_analyzer_multi_scalar_subquery_aliases.sql index d91a9ed106d..b9b850619ea 100644 --- a/tests/queries/0_stateless/03078_analyzer_multi_scalar_subquery_aliases.sql +++ b/tests/queries/0_stateless/03078_analyzer_multi_scalar_subquery_aliases.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/33825 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE t2 (first_column Int64, second_column Int64) ENGINE = Memory; INSERT INTO t2 SELECT number, number FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/03079_analyzer_numeric_literals_as_column_names.sql b/tests/queries/0_stateless/03079_analyzer_numeric_literals_as_column_names.sql index 955d3b49a00..80e681c0776 100644 --- a/tests/queries/0_stateless/03079_analyzer_numeric_literals_as_column_names.sql +++ b/tests/queries/0_stateless/03079_analyzer_numeric_literals_as_column_names.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE testdata (`1` String) ENGINE=MergeTree ORDER BY tuple(); INSERT INTO testdata VALUES ('testdata'); diff --git a/tests/queries/0_stateless/03080_analyzer_prefer_column_name_to_alias__virtual_columns.sql b/tests/queries/0_stateless/03080_analyzer_prefer_column_name_to_alias__virtual_columns.sql index 01ab868f9ea..2138828cd27 100644 --- a/tests/queries/0_stateless/03080_analyzer_prefer_column_name_to_alias__virtual_columns.sql +++ b/tests/queries/0_stateless/03080_analyzer_prefer_column_name_to_alias__virtual_columns.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/35652 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE test ( id UInt64 ) diff --git a/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql b/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql index 7682e6ce866..a34c71a44e2 100644 --- a/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql +++ b/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/29838 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET distributed_foreground_insert=1; CREATE TABLE first_table_lr diff --git a/tests/queries/0_stateless/03081_analyzer_agg_func_CTE.sql b/tests/queries/0_stateless/03081_analyzer_agg_func_CTE.sql index e6a540dc5df..3cb02512a7f 100644 --- a/tests/queries/0_stateless/03081_analyzer_agg_func_CTE.sql +++ b/tests/queries/0_stateless/03081_analyzer_agg_func_CTE.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/36189 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE test ( `dt` Date, diff --git a/tests/queries/0_stateless/03082_analyzer_left_join_correct_column.sql b/tests/queries/0_stateless/03082_analyzer_left_join_correct_column.sql index 8f17248ed0d..3b83f978326 100644 --- a/tests/queries/0_stateless/03082_analyzer_left_join_correct_column.sql +++ b/tests/queries/0_stateless/03082_analyzer_left_join_correct_column.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/39634 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE test1 ( `pk` String, diff --git a/tests/queries/0_stateless/03084_analyzer_join_column_alias.sql b/tests/queries/0_stateless/03084_analyzer_join_column_alias.sql index 930726898b5..8a7258f5838 100644 --- a/tests/queries/0_stateless/03084_analyzer_join_column_alias.sql +++ b/tests/queries/0_stateless/03084_analyzer_join_column_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/47432 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; create table t1 engine = MergeTree() order by tuple() diff --git a/tests/queries/0_stateless/03085_analyzer_alias_column_group_by.sql b/tests/queries/0_stateless/03085_analyzer_alias_column_group_by.sql index fd67194b08b..c360e86197f 100644 --- a/tests/queries/0_stateless/03085_analyzer_alias_column_group_by.sql +++ b/tests/queries/0_stateless/03085_analyzer_alias_column_group_by.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/54910 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT toTypeName(stat_standard_id) AS stat_standard_id_1, count(1) AS value FROM ( SELECT 'string value' AS stat_standard_id ) GROUP BY stat_standard_id_1 LIMIT 1 diff --git a/tests/queries/0_stateless/03086_analyzer_window_func_part_of_group_by.sql b/tests/queries/0_stateless/03086_analyzer_window_func_part_of_group_by.sql index 31747328d1f..7e44b37f865 100644 --- a/tests/queries/0_stateless/03086_analyzer_window_func_part_of_group_by.sql +++ b/tests/queries/0_stateless/03086_analyzer_window_func_part_of_group_by.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/57321 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT ver, max(ver) OVER () AS ver_max diff --git a/tests/queries/0_stateless/03087_analyzer_subquery_with_alias.sql b/tests/queries/0_stateless/03087_analyzer_subquery_with_alias.sql index 6546e50c99e..a00ca4960d7 100644 --- a/tests/queries/0_stateless/03087_analyzer_subquery_with_alias.sql +++ b/tests/queries/0_stateless/03087_analyzer_subquery_with_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/59154 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT * FROM ( diff --git a/tests/queries/0_stateless/03088_analyzer_ambiguous_column_multi_call.sql b/tests/queries/0_stateless/03088_analyzer_ambiguous_column_multi_call.sql index e6f1ed81f91..3670404d124 100644 --- a/tests/queries/0_stateless/03088_analyzer_ambiguous_column_multi_call.sql +++ b/tests/queries/0_stateless/03088_analyzer_ambiguous_column_multi_call.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/61014 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; create database {CLICKHOUSE_DATABASE:Identifier}; diff --git a/tests/queries/0_stateless/03089_analyzer_alias_replacement.sql b/tests/queries/0_stateless/03089_analyzer_alias_replacement.sql index 069da5fdd65..5526e1aaf7d 100644 --- a/tests/queries/0_stateless/03089_analyzer_alias_replacement.sql +++ b/tests/queries/0_stateless/03089_analyzer_alias_replacement.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/61950 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; with dummy + 1 as dummy select dummy from system.one; diff --git a/tests/queries/0_stateless/03090_analyzer_multiple_using_statements.sql b/tests/queries/0_stateless/03090_analyzer_multiple_using_statements.sql index c35f33782ff..08ea103d3c9 100644 --- a/tests/queries/0_stateless/03090_analyzer_multiple_using_statements.sql +++ b/tests/queries/0_stateless/03090_analyzer_multiple_using_statements.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/55647 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT * diff --git a/tests/queries/0_stateless/03091_analyzer_same_table_name_in_different_databases.sql b/tests/queries/0_stateless/03091_analyzer_same_table_name_in_different_databases.sql index 599275c66e8..11984aec496 100644 --- a/tests/queries/0_stateless/03091_analyzer_same_table_name_in_different_databases.sql +++ b/tests/queries/0_stateless/03091_analyzer_same_table_name_in_different_databases.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/61947 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; diff --git a/tests/queries/0_stateless/03092_analyzer_same_table_name_in_different_databases.sql b/tests/queries/0_stateless/03092_analyzer_same_table_name_in_different_databases.sql index 10d18324c3c..83b1a902721 100644 --- a/tests/queries/0_stateless/03092_analyzer_same_table_name_in_different_databases.sql +++ b/tests/queries/0_stateless/03092_analyzer_same_table_name_in_different_databases.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/61947 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; diff --git a/tests/queries/0_stateless/03093_analyzer_column_alias.sql b/tests/queries/0_stateless/03093_analyzer_column_alias.sql index 9ff0f78ba24..edf89108b56 100644 --- a/tests/queries/0_stateless/03093_analyzer_column_alias.sql +++ b/tests/queries/0_stateless/03093_analyzer_column_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/26674 -SET allow_experimental_analyzer = true; +SET enable_analyzer = true; SELECT Carrier, diff --git a/tests/queries/0_stateless/03093_analyzer_miel_test.sql b/tests/queries/0_stateless/03093_analyzer_miel_test.sql index f408882dcd5..4915864bb51 100644 --- a/tests/queries/0_stateless/03093_analyzer_miel_test.sql +++ b/tests/queries/0_stateless/03093_analyzer_miel_test.sql @@ -8,7 +8,7 @@ INSERT INTO test_03093 VALUES ('x1', 123, {'k1': ''}); INSERT INTO test_03093 VALUES ('x1', 123, {'k1': '', 'k11': ''}); INSERT INTO test_03093 VALUES ('x1', 12, {'k1': ''}); -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; select app, arrayZip(untuple(sumMap(k.keys, replicate(1, k.keys)))) from test_03093 PREWHERE c > 1 group by app; select app, arrayZip(untuple(sumMap(k.keys, replicate(1, k.keys)))) from test_03093 WHERE c > 1 group by app; diff --git a/tests/queries/0_stateless/03093_bug37909_query_does_not_finish.sql b/tests/queries/0_stateless/03093_bug37909_query_does_not_finish.sql index 62fa3f437af..90f0a8a3b8e 100644 --- a/tests/queries/0_stateless/03093_bug37909_query_does_not_finish.sql +++ b/tests/queries/0_stateless/03093_bug37909_query_does_not_finish.sql @@ -75,4 +75,4 @@ FROM /* WHERE (v_date >= '2022-05-08') AND (v_date <= '2022-06-07') placing condition has same effect */ GROUP BY vDate ORDER BY vDate ASC -SETTINGS allow_experimental_analyzer = 1; -- the query times out if allow_experimental_analyzer = 0 +SETTINGS enable_analyzer = 1; -- the query times out if enable_analyzer = 0 diff --git a/tests/queries/0_stateless/03094_analyzer_fiddle_multiif.sql b/tests/queries/0_stateless/03094_analyzer_fiddle_multiif.sql index 1b1603be18e..8426749917a 100644 --- a/tests/queries/0_stateless/03094_analyzer_fiddle_multiif.sql +++ b/tests/queries/0_stateless/03094_analyzer_fiddle_multiif.sql @@ -5,7 +5,7 @@ INSERT INTO users_03094 VALUES ('John', 33); INSERT INTO users_03094 VALUES ('Ksenia', 48); INSERT INTO users_03094 VALUES ('Alice', 50); -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT multiIf((age > 30) or (true), '1', '2') AS a, diff --git a/tests/queries/0_stateless/03094_named_tuple_bug24607.sql b/tests/queries/0_stateless/03094_named_tuple_bug24607.sql index e3c97f3fe41..698c339e53e 100644 --- a/tests/queries/0_stateless/03094_named_tuple_bug24607.sql +++ b/tests/queries/0_stateless/03094_named_tuple_bug24607.sql @@ -1,4 +1,4 @@ SELECT JSONExtract('{"a":1, "b":"test"}', 'Tuple(a UInt8, b String)') AS x, x.a -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03094_one_thousand_joins.sql b/tests/queries/0_stateless/03094_one_thousand_joins.sql index 1f6bd99df7f..6ae4e4d4d3c 100644 --- a/tests/queries/0_stateless/03094_one_thousand_joins.sql +++ b/tests/queries/0_stateless/03094_one_thousand_joins.sql @@ -2,7 +2,7 @@ -- (no-tsan because it has a small maximum stack size and the test would fail with TOO_DEEP_RECURSION) SET join_algorithm = 'default'; -- for 'full_sorting_merge' the query is 10x slower -SET allow_experimental_analyzer = 1; -- old analyzer returns TOO_DEEP_SUBQUERIES +SET enable_analyzer = 1; -- old analyzer returns TOO_DEEP_SUBQUERIES -- Bug 33446, marked as 'long' because it still runs around 10 sec SELECT * FROM (SELECT 1 AS x) t1 JOIN (SELECT 1 AS x) t2 ON t1.x = t2.x JOIN (SELECT 1 AS x) t3 ON t1.x = t3.x JOIN (SELECT 1 AS x) t4 ON t1.x = t4.x JOIN (SELECT 1 AS x) t5 ON t1.x = t5.x JOIN (SELECT 1 AS x) t6 ON t1.x = t6.x JOIN (SELECT 1 AS x) t7 ON t1.x = t7.x JOIN (SELECT 1 AS x) t8 ON t1.x = t8.x JOIN (SELECT 1 AS x) t9 ON t1.x = t9.x JOIN (SELECT 1 AS x) t10 ON t1.x = t10.x JOIN (SELECT 1 AS x) t11 ON t1.x = t11.x JOIN (SELECT 1 AS x) t12 ON t1.x = t12.x JOIN (SELECT 1 AS x) t13 ON t1.x = t13.x JOIN (SELECT 1 AS x) t14 ON t1.x = t14.x JOIN (SELECT 1 AS x) t15 ON t1.x = t15.x JOIN (SELECT 1 AS x) t16 ON t1.x = t16.x JOIN (SELECT 1 AS x) t17 ON t1.x = t17.x JOIN (SELECT 1 AS x) t18 ON t1.x = t18.x JOIN (SELECT 1 AS x) t19 ON t1.x = t19.x JOIN (SELECT 1 AS x) t20 ON t1.x = t20.x JOIN (SELECT 1 AS x) t21 ON t1.x = t21.x JOIN (SELECT 1 AS x) t22 ON t1.x = t22.x JOIN (SELECT 1 AS x) t23 ON t1.x = t23.x JOIN (SELECT 1 AS x) t24 ON t1.x = t24.x JOIN (SELECT 1 AS x) t25 ON t1.x = t25.x JOIN (SELECT 1 AS x) t26 ON t1.x = t26.x JOIN (SELECT 1 AS x) t27 ON t1.x = t27.x JOIN (SELECT 1 AS x) t28 ON t1.x = t28.x JOIN (SELECT 1 AS x) t29 ON t1.x = t29.x JOIN (SELECT 1 AS x) t30 ON t1.x = t30.x JOIN (SELECT 1 AS x) t31 ON t1.x = t31.x JOIN (SELECT 1 AS x) t32 ON t1.x = t32.x JOIN (SELECT 1 AS x) t33 ON t1.x = t33.x JOIN (SELECT 1 AS x) t34 ON t1.x = t34.x JOIN (SELECT 1 AS x) t35 ON t1.x = t35.x JOIN (SELECT 1 AS x) t36 ON t1.x = t36.x JOIN (SELECT 1 AS x) t37 ON t1.x = t37.x JOIN (SELECT 1 AS x) t38 ON t1.x = t38.x JOIN (SELECT 1 AS x) t39 ON t1.x = t39.x JOIN (SELECT 1 AS x) t40 ON t1.x = t40.x JOIN (SELECT 1 AS x) t41 ON t1.x = t41.x JOIN (SELECT 1 AS x) t42 ON t1.x = t42.x JOIN (SELECT 1 AS x) t43 ON t1.x = t43.x JOIN (SELECT 1 AS x) t44 ON t1.x = t44.x JOIN (SELECT 1 AS x) t45 ON t1.x = t45.x JOIN (SELECT 1 AS x) t46 ON t1.x = t46.x JOIN (SELECT 1 AS x) t47 ON t1.x = t47.x JOIN (SELECT 1 AS x) t48 ON t1.x = t48.x JOIN (SELECT 1 AS x) t49 ON t1.x = t49.x JOIN (SELECT 1 AS x) t50 ON t1.x = t50.x JOIN (SELECT 1 AS x) t51 ON t1.x = t51.x JOIN (SELECT 1 AS x) t52 ON t1.x = t52.x JOIN (SELECT 1 AS x) t53 ON t1.x = t53.x JOIN (SELECT 1 AS x) t54 ON t1.x = t54.x JOIN (SELECT 1 AS x) t55 ON t1.x = t55.x JOIN (SELECT 1 AS x) t56 ON t1.x = t56.x JOIN (SELECT 1 AS x) t57 ON t1.x = t57.x JOIN (SELECT 1 AS x) t58 ON t1.x = t58.x JOIN (SELECT 1 AS x) t59 ON t1.x = t59.x JOIN (SELECT 1 AS x) t60 ON t1.x = t60.x JOIN (SELECT 1 AS x) t61 ON t1.x = t61.x JOIN (SELECT 1 AS x) t62 ON t1.x = t62.x JOIN (SELECT 1 AS x) t63 ON t1.x = t63.x JOIN (SELECT 1 AS x) t64 ON t1.x = t64.x JOIN (SELECT 1 AS x) t65 ON t1.x = t65.x JOIN (SELECT 1 AS x) t66 ON t1.x = t66.x JOIN (SELECT 1 AS x) t67 ON t1.x = t67.x JOIN (SELECT 1 AS x) t68 ON t1.x = t68.x JOIN (SELECT 1 AS x) t69 ON t1.x = t69.x JOIN (SELECT 1 AS x) t70 ON t1.x = t70.x JOIN (SELECT 1 AS x) t71 ON t1.x = t71.x JOIN (SELECT 1 AS x) t72 ON t1.x = t72.x JOIN (SELECT 1 AS x) t73 ON t1.x = t73.x JOIN (SELECT 1 AS x) t74 ON t1.x = t74.x JOIN (SELECT 1 AS x) t75 ON t1.x = t75.x JOIN (SELECT 1 AS x) t76 ON t1.x = t76.x JOIN (SELECT 1 AS x) t77 ON t1.x = t77.x JOIN (SELECT 1 AS x) t78 ON t1.x = t78.x JOIN (SELECT 1 AS x) t79 ON t1.x = t79.x JOIN (SELECT 1 AS x) t80 ON t1.x = t80.x JOIN (SELECT 1 AS x) t81 ON t1.x = t81.x JOIN (SELECT 1 AS x) t82 ON t1.x = t82.x JOIN (SELECT 1 AS x) t83 ON t1.x = t83.x JOIN (SELECT 1 AS x) t84 ON t1.x = t84.x JOIN (SELECT 1 AS x) t85 ON t1.x = t85.x JOIN (SELECT 1 AS x) t86 ON t1.x = t86.x JOIN (SELECT 1 AS x) t87 ON t1.x = t87.x JOIN (SELECT 1 AS x) t88 ON t1.x = t88.x JOIN (SELECT 1 AS x) t89 ON t1.x = t89.x JOIN (SELECT 1 AS x) t90 ON t1.x = t90.x JOIN (SELECT 1 AS x) t91 ON t1.x = t91.x JOIN (SELECT 1 AS x) t92 ON t1.x = t92.x JOIN (SELECT 1 AS x) t93 ON t1.x = t93.x JOIN (SELECT 1 AS x) t94 ON t1.x = t94.x JOIN (SELECT 1 AS x) t95 ON t1.x = t95.x JOIN (SELECT 1 AS x) t96 ON t1.x = t96.x JOIN (SELECT 1 AS x) t97 ON t1.x = t97.x JOIN (SELECT 1 AS x) t98 ON t1.x = t98.x JOIN (SELECT 1 AS x) t99 ON t1.x = t99.x JOIN (SELECT 1 AS x) t100 ON t1.x = t100.x JOIN (SELECT 1 AS x) t101 ON t1.x = t101.x JOIN (SELECT 1 AS x) t102 ON t1.x = t102.x JOIN (SELECT 1 AS x) t103 ON t1.x = t103.x JOIN (SELECT 1 AS x) t104 ON t1.x = t104.x JOIN (SELECT 1 AS x) t105 ON t1.x = t105.x JOIN (SELECT 1 AS x) t106 ON t1.x = t106.x JOIN (SELECT 1 AS x) t107 ON t1.x = t107.x JOIN (SELECT 1 AS x) t108 ON t1.x = t108.x JOIN (SELECT 1 AS x) t109 ON t1.x = t109.x JOIN (SELECT 1 AS x) t110 ON t1.x = t110.x JOIN (SELECT 1 AS x) t111 ON t1.x = t111.x JOIN (SELECT 1 AS x) t112 ON t1.x = t112.x JOIN (SELECT 1 AS x) t113 ON t1.x = t113.x JOIN (SELECT 1 AS x) t114 ON t1.x = t114.x JOIN (SELECT 1 AS x) t115 ON t1.x = t115.x JOIN (SELECT 1 AS x) t116 ON t1.x = t116.x JOIN (SELECT 1 AS x) t117 ON t1.x = t117.x JOIN (SELECT 1 AS x) t118 ON t1.x = t118.x JOIN (SELECT 1 AS x) t119 ON t1.x = t119.x JOIN (SELECT 1 AS x) t120 ON t1.x = t120.x JOIN (SELECT 1 AS x) t121 ON t1.x = t121.x JOIN (SELECT 1 AS x) t122 ON t1.x = t122.x JOIN (SELECT 1 AS x) t123 ON t1.x = t123.x JOIN (SELECT 1 AS x) t124 ON t1.x = t124.x JOIN (SELECT 1 AS x) t125 ON t1.x = t125.x JOIN (SELECT 1 AS x) t126 ON t1.x = t126.x JOIN (SELECT 1 AS x) t127 ON t1.x = t127.x JOIN (SELECT 1 AS x) t128 ON t1.x = t128.x JOIN (SELECT 1 AS x) t129 ON t1.x = t129.x JOIN (SELECT 1 AS x) t130 ON t1.x = t130.x JOIN (SELECT 1 AS x) t131 ON t1.x = t131.x JOIN (SELECT 1 AS x) t132 ON t1.x = t132.x JOIN (SELECT 1 AS x) t133 ON t1.x = t133.x JOIN (SELECT 1 AS x) t134 ON t1.x = t134.x JOIN (SELECT 1 AS x) t135 ON t1.x = t135.x JOIN (SELECT 1 AS x) t136 ON t1.x = t136.x JOIN (SELECT 1 AS x) t137 ON t1.x = t137.x JOIN (SELECT 1 AS x) t138 ON t1.x = t138.x JOIN (SELECT 1 AS x) t139 ON t1.x = t139.x JOIN (SELECT 1 AS x) t140 ON t1.x = t140.x JOIN (SELECT 1 AS x) t141 ON t1.x = t141.x JOIN (SELECT 1 AS x) t142 ON t1.x = t142.x JOIN (SELECT 1 AS x) t143 ON t1.x = t143.x JOIN (SELECT 1 AS x) t144 ON t1.x = t144.x JOIN (SELECT 1 AS x) t145 ON t1.x = t145.x JOIN (SELECT 1 AS x) t146 ON t1.x = t146.x JOIN (SELECT 1 AS x) t147 ON t1.x = t147.x JOIN (SELECT 1 AS x) t148 ON t1.x = t148.x JOIN (SELECT 1 AS x) t149 ON t1.x = t149.x JOIN (SELECT 1 AS x) t150 ON t1.x = t150.x JOIN (SELECT 1 AS x) t151 ON t1.x = t151.x JOIN (SELECT 1 AS x) t152 ON t1.x = t152.x JOIN (SELECT 1 AS x) t153 ON t1.x = t153.x JOIN (SELECT 1 AS x) t154 ON t1.x = t154.x JOIN (SELECT 1 AS x) t155 ON t1.x = t155.x JOIN (SELECT 1 AS x) t156 ON t1.x = t156.x JOIN (SELECT 1 AS x) t157 ON t1.x = t157.x JOIN (SELECT 1 AS x) t158 ON t1.x = t158.x JOIN (SELECT 1 AS x) t159 ON t1.x = t159.x JOIN (SELECT 1 AS x) t160 ON t1.x = t160.x JOIN (SELECT 1 AS x) t161 ON t1.x = t161.x JOIN (SELECT 1 AS x) t162 ON t1.x = t162.x JOIN (SELECT 1 AS x) t163 ON t1.x = t163.x JOIN (SELECT 1 AS x) t164 ON t1.x = t164.x JOIN (SELECT 1 AS x) t165 ON t1.x = t165.x JOIN (SELECT 1 AS x) t166 ON t1.x = t166.x JOIN (SELECT 1 AS x) t167 ON t1.x = t167.x JOIN (SELECT 1 AS x) t168 ON t1.x = t168.x JOIN (SELECT 1 AS x) t169 ON t1.x = t169.x JOIN (SELECT 1 AS x) t170 ON t1.x = t170.x JOIN (SELECT 1 AS x) t171 ON t1.x = t171.x JOIN (SELECT 1 AS x) t172 ON t1.x = t172.x JOIN (SELECT 1 AS x) t173 ON t1.x = t173.x JOIN (SELECT 1 AS x) t174 ON t1.x = t174.x JOIN (SELECT 1 AS x) t175 ON t1.x = t175.x JOIN (SELECT 1 AS x) t176 ON t1.x = t176.x JOIN (SELECT 1 AS x) t177 ON t1.x = t177.x JOIN (SELECT 1 AS x) t178 ON t1.x = t178.x JOIN (SELECT 1 AS x) t179 ON t1.x = t179.x JOIN (SELECT 1 AS x) t180 ON t1.x = t180.x JOIN (SELECT 1 AS x) t181 ON t1.x = t181.x JOIN (SELECT 1 AS x) t182 ON t1.x = t182.x JOIN (SELECT 1 AS x) t183 ON t1.x = t183.x JOIN (SELECT 1 AS x) t184 ON t1.x = t184.x JOIN (SELECT 1 AS x) t185 ON t1.x = t185.x JOIN (SELECT 1 AS x) t186 ON t1.x = t186.x JOIN (SELECT 1 AS x) t187 ON t1.x = t187.x JOIN (SELECT 1 AS x) t188 ON t1.x = t188.x JOIN (SELECT 1 AS x) t189 ON t1.x = t189.x JOIN (SELECT 1 AS x) t190 ON t1.x = t190.x JOIN (SELECT 1 AS x) t191 ON t1.x = t191.x JOIN (SELECT 1 AS x) t192 ON t1.x = t192.x JOIN (SELECT 1 AS x) t193 ON t1.x = t193.x JOIN (SELECT 1 AS x) t194 ON t1.x = t194.x JOIN (SELECT 1 AS x) t195 ON t1.x = t195.x JOIN (SELECT 1 AS x) t196 ON t1.x = t196.x JOIN (SELECT 1 AS x) t197 ON t1.x = t197.x JOIN (SELECT 1 AS x) t198 ON t1.x = t198.x JOIN (SELECT 1 AS x) t199 ON t1.x = t199.x JOIN (SELECT 1 AS x) t200 ON t1.x = t200.x JOIN (SELECT 1 AS x) t201 ON t1.x = t201.x JOIN (SELECT 1 AS x) t202 ON t1.x = t202.x JOIN (SELECT 1 AS x) t203 ON t1.x = t203.x JOIN (SELECT 1 AS x) t204 ON t1.x = t204.x JOIN (SELECT 1 AS x) t205 ON t1.x = t205.x JOIN (SELECT 1 AS x) t206 ON t1.x = t206.x JOIN (SELECT 1 AS x) t207 ON t1.x = t207.x JOIN (SELECT 1 AS x) t208 ON t1.x = t208.x JOIN (SELECT 1 AS x) t209 ON t1.x = t209.x JOIN (SELECT 1 AS x) t210 ON t1.x = t210.x JOIN (SELECT 1 AS x) t211 ON t1.x = t211.x JOIN (SELECT 1 AS x) t212 ON t1.x = t212.x JOIN (SELECT 1 AS x) t213 ON t1.x = t213.x JOIN (SELECT 1 AS x) t214 ON t1.x = t214.x JOIN (SELECT 1 AS x) t215 ON t1.x = t215.x JOIN (SELECT 1 AS x) t216 ON t1.x = t216.x JOIN (SELECT 1 AS x) t217 ON t1.x = t217.x JOIN (SELECT 1 AS x) t218 ON t1.x = t218.x JOIN (SELECT 1 AS x) t219 ON t1.x = t219.x JOIN (SELECT 1 AS x) t220 ON t1.x = t220.x JOIN (SELECT 1 AS x) t221 ON t1.x = t221.x JOIN (SELECT 1 AS x) t222 ON t1.x = t222.x JOIN (SELECT 1 AS x) t223 ON t1.x = t223.x JOIN (SELECT 1 AS x) t224 ON t1.x = t224.x JOIN (SELECT 1 AS x) t225 ON t1.x = t225.x JOIN (SELECT 1 AS x) t226 ON t1.x = t226.x JOIN (SELECT 1 AS x) t227 ON t1.x = t227.x JOIN (SELECT 1 AS x) t228 ON t1.x = t228.x JOIN (SELECT 1 AS x) t229 ON t1.x = t229.x JOIN (SELECT 1 AS x) t230 ON t1.x = t230.x JOIN (SELECT 1 AS x) t231 ON t1.x = t231.x JOIN (SELECT 1 AS x) t232 ON t1.x = t232.x JOIN (SELECT 1 AS x) t233 ON t1.x = t233.x JOIN (SELECT 1 AS x) t234 ON t1.x = t234.x JOIN (SELECT 1 AS x) t235 ON t1.x = t235.x JOIN (SELECT 1 AS x) t236 ON t1.x = t236.x JOIN (SELECT 1 AS x) t237 ON t1.x = t237.x JOIN (SELECT 1 AS x) t238 ON t1.x = t238.x JOIN (SELECT 1 AS x) t239 ON t1.x = t239.x JOIN (SELECT 1 AS x) t240 ON t1.x = t240.x JOIN (SELECT 1 AS x) t241 ON t1.x = t241.x JOIN (SELECT 1 AS x) t242 ON t1.x = t242.x JOIN (SELECT 1 AS x) t243 ON t1.x = t243.x JOIN (SELECT 1 AS x) t244 ON t1.x = t244.x JOIN (SELECT 1 AS x) t245 ON t1.x = t245.x JOIN (SELECT 1 AS x) t246 ON t1.x = t246.x JOIN (SELECT 1 AS x) t247 ON t1.x = t247.x JOIN (SELECT 1 AS x) t248 ON t1.x = t248.x JOIN (SELECT 1 AS x) t249 ON t1.x = t249.x JOIN (SELECT 1 AS x) t250 ON t1.x = t250.x JOIN (SELECT 1 AS x) t251 ON t1.x = t251.x JOIN (SELECT 1 AS x) t252 ON t1.x = t252.x JOIN (SELECT 1 AS x) t253 ON t1.x = t253.x JOIN (SELECT 1 AS x) t254 ON t1.x = t254.x JOIN (SELECT 1 AS x) t255 ON t1.x = t255.x JOIN (SELECT 1 AS x) t256 ON t1.x = t256.x JOIN (SELECT 1 AS x) t257 ON t1.x = t257.x JOIN (SELECT 1 AS x) t258 ON t1.x = t258.x JOIN (SELECT 1 AS x) t259 ON t1.x = t259.x JOIN (SELECT 1 AS x) t260 ON t1.x = t260.x JOIN (SELECT 1 AS x) t261 ON t1.x = t261.x JOIN (SELECT 1 AS x) t262 ON t1.x = t262.x JOIN (SELECT 1 AS x) t263 ON t1.x = t263.x JOIN (SELECT 1 AS x) t264 ON t1.x = t264.x JOIN (SELECT 1 AS x) t265 ON t1.x = t265.x JOIN (SELECT 1 AS x) t266 ON t1.x = t266.x JOIN (SELECT 1 AS x) t267 ON t1.x = t267.x JOIN (SELECT 1 AS x) t268 ON t1.x = t268.x JOIN (SELECT 1 AS x) t269 ON t1.x = t269.x JOIN (SELECT 1 AS x) t270 ON t1.x = t270.x JOIN (SELECT 1 AS x) t271 ON t1.x = t271.x JOIN (SELECT 1 AS x) t272 ON t1.x = t272.x JOIN (SELECT 1 AS x) t273 ON t1.x = t273.x JOIN (SELECT 1 AS x) t274 ON t1.x = t274.x JOIN (SELECT 1 AS x) t275 ON t1.x = t275.x JOIN (SELECT 1 AS x) t276 ON t1.x = t276.x JOIN (SELECT 1 AS x) t277 ON t1.x = t277.x JOIN (SELECT 1 AS x) t278 ON t1.x = t278.x JOIN (SELECT 1 AS x) t279 ON t1.x = t279.x JOIN (SELECT 1 AS x) t280 ON t1.x = t280.x JOIN (SELECT 1 AS x) t281 ON t1.x = t281.x JOIN (SELECT 1 AS x) t282 ON t1.x = t282.x JOIN (SELECT 1 AS x) t283 ON t1.x = t283.x JOIN (SELECT 1 AS x) t284 ON t1.x = t284.x JOIN (SELECT 1 AS x) t285 ON t1.x = t285.x JOIN (SELECT 1 AS x) t286 ON t1.x = t286.x JOIN (SELECT 1 AS x) t287 ON t1.x = t287.x JOIN (SELECT 1 AS x) t288 ON t1.x = t288.x JOIN (SELECT 1 AS x) t289 ON t1.x = t289.x JOIN (SELECT 1 AS x) t290 ON t1.x = t290.x JOIN (SELECT 1 AS x) t291 ON t1.x = t291.x JOIN (SELECT 1 AS x) t292 ON t1.x = t292.x JOIN (SELECT 1 AS x) t293 ON t1.x = t293.x JOIN (SELECT 1 AS x) t294 ON t1.x = t294.x JOIN (SELECT 1 AS x) t295 ON t1.x = t295.x JOIN (SELECT 1 AS x) t296 ON t1.x = t296.x JOIN (SELECT 1 AS x) t297 ON t1.x = t297.x JOIN (SELECT 1 AS x) t298 ON t1.x = t298.x JOIN (SELECT 1 AS x) t299 ON t1.x = t299.x JOIN (SELECT 1 AS x) t300 ON t1.x = t300.x JOIN (SELECT 1 AS x) t301 ON t1.x = t301.x JOIN (SELECT 1 AS x) t302 ON t1.x = t302.x JOIN (SELECT 1 AS x) t303 ON t1.x = t303.x JOIN (SELECT 1 AS x) t304 ON t1.x = t304.x JOIN (SELECT 1 AS x) t305 ON t1.x = t305.x JOIN (SELECT 1 AS x) t306 ON t1.x = t306.x JOIN (SELECT 1 AS x) t307 ON t1.x = t307.x JOIN (SELECT 1 AS x) t308 ON t1.x = t308.x JOIN (SELECT 1 AS x) t309 ON t1.x = t309.x JOIN (SELECT 1 AS x) t310 ON t1.x = t310.x JOIN (SELECT 1 AS x) t311 ON t1.x = t311.x JOIN (SELECT 1 AS x) t312 ON t1.x = t312.x JOIN (SELECT 1 AS x) t313 ON t1.x = t313.x JOIN (SELECT 1 AS x) t314 ON t1.x = t314.x JOIN (SELECT 1 AS x) t315 ON t1.x = t315.x JOIN (SELECT 1 AS x) t316 ON t1.x = t316.x JOIN (SELECT 1 AS x) t317 ON t1.x = t317.x JOIN (SELECT 1 AS x) t318 ON t1.x = t318.x JOIN (SELECT 1 AS x) t319 ON t1.x = t319.x JOIN (SELECT 1 AS x) t320 ON t1.x = t320.x JOIN (SELECT 1 AS x) t321 ON t1.x = t321.x JOIN (SELECT 1 AS x) t322 ON t1.x = t322.x JOIN (SELECT 1 AS x) t323 ON t1.x = t323.x JOIN (SELECT 1 AS x) t324 ON t1.x = t324.x JOIN (SELECT 1 AS x) t325 ON t1.x = t325.x JOIN (SELECT 1 AS x) t326 ON t1.x = t326.x JOIN (SELECT 1 AS x) t327 ON t1.x = t327.x JOIN (SELECT 1 AS x) t328 ON t1.x = t328.x JOIN (SELECT 1 AS x) t329 ON t1.x = t329.x JOIN (SELECT 1 AS x) t330 ON t1.x = t330.x JOIN (SELECT 1 AS x) t331 ON t1.x = t331.x JOIN (SELECT 1 AS x) t332 ON t1.x = t332.x JOIN (SELECT 1 AS x) t333 ON t1.x = t333.x JOIN (SELECT 1 AS x) t334 ON t1.x = t334.x JOIN (SELECT 1 AS x) t335 ON t1.x = t335.x JOIN (SELECT 1 AS x) t336 ON t1.x = t336.x JOIN (SELECT 1 AS x) t337 ON t1.x = t337.x JOIN (SELECT 1 AS x) t338 ON t1.x = t338.x JOIN (SELECT 1 AS x) t339 ON t1.x = t339.x JOIN (SELECT 1 AS x) t340 ON t1.x = t340.x JOIN (SELECT 1 AS x) t341 ON t1.x = t341.x JOIN (SELECT 1 AS x) t342 ON t1.x = t342.x JOIN (SELECT 1 AS x) t343 ON t1.x = t343.x JOIN (SELECT 1 AS x) t344 ON t1.x = t344.x JOIN (SELECT 1 AS x) t345 ON t1.x = t345.x JOIN (SELECT 1 AS x) t346 ON t1.x = t346.x JOIN (SELECT 1 AS x) t347 ON t1.x = t347.x JOIN (SELECT 1 AS x) t348 ON t1.x = t348.x JOIN (SELECT 1 AS x) t349 ON t1.x = t349.x JOIN (SELECT 1 AS x) t350 ON t1.x = t350.x JOIN (SELECT 1 AS x) t351 ON t1.x = t351.x JOIN (SELECT 1 AS x) t352 ON t1.x = t352.x JOIN (SELECT 1 AS x) t353 ON t1.x = t353.x JOIN (SELECT 1 AS x) t354 ON t1.x = t354.x JOIN (SELECT 1 AS x) t355 ON t1.x = t355.x JOIN (SELECT 1 AS x) t356 ON t1.x = t356.x JOIN (SELECT 1 AS x) t357 ON t1.x = t357.x JOIN (SELECT 1 AS x) t358 ON t1.x = t358.x JOIN (SELECT 1 AS x) t359 ON t1.x = t359.x JOIN (SELECT 1 AS x) t360 ON t1.x = t360.x JOIN (SELECT 1 AS x) t361 ON t1.x = t361.x JOIN (SELECT 1 AS x) t362 ON t1.x = t362.x JOIN (SELECT 1 AS x) t363 ON t1.x = t363.x JOIN (SELECT 1 AS x) t364 ON t1.x = t364.x JOIN (SELECT 1 AS x) t365 ON t1.x = t365.x JOIN (SELECT 1 AS x) t366 ON t1.x = t366.x JOIN (SELECT 1 AS x) t367 ON t1.x = t367.x JOIN (SELECT 1 AS x) t368 ON t1.x = t368.x JOIN (SELECT 1 AS x) t369 ON t1.x = t369.x JOIN (SELECT 1 AS x) t370 ON t1.x = t370.x JOIN (SELECT 1 AS x) t371 ON t1.x = t371.x JOIN (SELECT 1 AS x) t372 ON t1.x = t372.x JOIN (SELECT 1 AS x) t373 ON t1.x = t373.x JOIN (SELECT 1 AS x) t374 ON t1.x = t374.x JOIN (SELECT 1 AS x) t375 ON t1.x = t375.x JOIN (SELECT 1 AS x) t376 ON t1.x = t376.x JOIN (SELECT 1 AS x) t377 ON t1.x = t377.x JOIN (SELECT 1 AS x) t378 ON t1.x = t378.x JOIN (SELECT 1 AS x) t379 ON t1.x = t379.x JOIN (SELECT 1 AS x) t380 ON t1.x = t380.x JOIN (SELECT 1 AS x) t381 ON t1.x = t381.x JOIN (SELECT 1 AS x) t382 ON t1.x = t382.x JOIN (SELECT 1 AS x) t383 ON t1.x = t383.x JOIN (SELECT 1 AS x) t384 ON t1.x = t384.x JOIN (SELECT 1 AS x) t385 ON t1.x = t385.x JOIN (SELECT 1 AS x) t386 ON t1.x = t386.x JOIN (SELECT 1 AS x) t387 ON t1.x = t387.x JOIN (SELECT 1 AS x) t388 ON t1.x = t388.x JOIN (SELECT 1 AS x) t389 ON t1.x = t389.x JOIN (SELECT 1 AS x) t390 ON t1.x = t390.x JOIN (SELECT 1 AS x) t391 ON t1.x = t391.x JOIN (SELECT 1 AS x) t392 ON t1.x = t392.x JOIN (SELECT 1 AS x) t393 ON t1.x = t393.x JOIN (SELECT 1 AS x) t394 ON t1.x = t394.x JOIN (SELECT 1 AS x) t395 ON t1.x = t395.x JOIN (SELECT 1 AS x) t396 ON t1.x = t396.x JOIN (SELECT 1 AS x) t397 ON t1.x = t397.x JOIN (SELECT 1 AS x) t398 ON t1.x = t398.x JOIN (SELECT 1 AS x) t399 ON t1.x = t399.x JOIN (SELECT 1 AS x) t400 ON t1.x = t400.x JOIN (SELECT 1 AS x) t401 ON t1.x = t401.x JOIN (SELECT 1 AS x) t402 ON t1.x = t402.x JOIN (SELECT 1 AS x) t403 ON t1.x = t403.x JOIN (SELECT 1 AS x) t404 ON t1.x = t404.x JOIN (SELECT 1 AS x) t405 ON t1.x = t405.x JOIN (SELECT 1 AS x) t406 ON t1.x = t406.x JOIN (SELECT 1 AS x) t407 ON t1.x = t407.x JOIN (SELECT 1 AS x) t408 ON t1.x = t408.x JOIN (SELECT 1 AS x) t409 ON t1.x = t409.x JOIN (SELECT 1 AS x) t410 ON t1.x = t410.x JOIN (SELECT 1 AS x) t411 ON t1.x = t411.x JOIN (SELECT 1 AS x) t412 ON t1.x = t412.x JOIN (SELECT 1 AS x) t413 ON t1.x = t413.x JOIN (SELECT 1 AS x) t414 ON t1.x = t414.x JOIN (SELECT 1 AS x) t415 ON t1.x = t415.x JOIN (SELECT 1 AS x) t416 ON t1.x = t416.x JOIN (SELECT 1 AS x) t417 ON t1.x = t417.x JOIN (SELECT 1 AS x) t418 ON t1.x = t418.x JOIN (SELECT 1 AS x) t419 ON t1.x = t419.x JOIN (SELECT 1 AS x) t420 ON t1.x = t420.x JOIN (SELECT 1 AS x) t421 ON t1.x = t421.x JOIN (SELECT 1 AS x) t422 ON t1.x = t422.x JOIN (SELECT 1 AS x) t423 ON t1.x = t423.x JOIN (SELECT 1 AS x) t424 ON t1.x = t424.x JOIN (SELECT 1 AS x) t425 ON t1.x = t425.x JOIN (SELECT 1 AS x) t426 ON t1.x = t426.x JOIN (SELECT 1 AS x) t427 ON t1.x = t427.x JOIN (SELECT 1 AS x) t428 ON t1.x = t428.x JOIN (SELECT 1 AS x) t429 ON t1.x = t429.x JOIN (SELECT 1 AS x) t430 ON t1.x = t430.x JOIN (SELECT 1 AS x) t431 ON t1.x = t431.x JOIN (SELECT 1 AS x) t432 ON t1.x = t432.x JOIN (SELECT 1 AS x) t433 ON t1.x = t433.x JOIN (SELECT 1 AS x) t434 ON t1.x = t434.x JOIN (SELECT 1 AS x) t435 ON t1.x = t435.x JOIN (SELECT 1 AS x) t436 ON t1.x = t436.x JOIN (SELECT 1 AS x) t437 ON t1.x = t437.x JOIN (SELECT 1 AS x) t438 ON t1.x = t438.x JOIN (SELECT 1 AS x) t439 ON t1.x = t439.x JOIN (SELECT 1 AS x) t440 ON t1.x = t440.x JOIN (SELECT 1 AS x) t441 ON t1.x = t441.x JOIN (SELECT 1 AS x) t442 ON t1.x = t442.x JOIN (SELECT 1 AS x) t443 ON t1.x = t443.x JOIN (SELECT 1 AS x) t444 ON t1.x = t444.x JOIN (SELECT 1 AS x) t445 ON t1.x = t445.x JOIN (SELECT 1 AS x) t446 ON t1.x = t446.x JOIN (SELECT 1 AS x) t447 ON t1.x = t447.x JOIN (SELECT 1 AS x) t448 ON t1.x = t448.x JOIN (SELECT 1 AS x) t449 ON t1.x = t449.x JOIN (SELECT 1 AS x) t450 ON t1.x = t450.x JOIN (SELECT 1 AS x) t451 ON t1.x = t451.x JOIN (SELECT 1 AS x) t452 ON t1.x = t452.x JOIN (SELECT 1 AS x) t453 ON t1.x = t453.x JOIN (SELECT 1 AS x) t454 ON t1.x = t454.x JOIN (SELECT 1 AS x) t455 ON t1.x = t455.x JOIN (SELECT 1 AS x) t456 ON t1.x = t456.x JOIN (SELECT 1 AS x) t457 ON t1.x = t457.x JOIN (SELECT 1 AS x) t458 ON t1.x = t458.x JOIN (SELECT 1 AS x) t459 ON t1.x = t459.x JOIN (SELECT 1 AS x) t460 ON t1.x = t460.x JOIN (SELECT 1 AS x) t461 ON t1.x = t461.x JOIN (SELECT 1 AS x) t462 ON t1.x = t462.x JOIN (SELECT 1 AS x) t463 ON t1.x = t463.x JOIN (SELECT 1 AS x) t464 ON t1.x = t464.x JOIN (SELECT 1 AS x) t465 ON t1.x = t465.x JOIN (SELECT 1 AS x) t466 ON t1.x = t466.x JOIN (SELECT 1 AS x) t467 ON t1.x = t467.x JOIN (SELECT 1 AS x) t468 ON t1.x = t468.x JOIN (SELECT 1 AS x) t469 ON t1.x = t469.x JOIN (SELECT 1 AS x) t470 ON t1.x = t470.x JOIN (SELECT 1 AS x) t471 ON t1.x = t471.x JOIN (SELECT 1 AS x) t472 ON t1.x = t472.x JOIN (SELECT 1 AS x) t473 ON t1.x = t473.x JOIN (SELECT 1 AS x) t474 ON t1.x = t474.x JOIN (SELECT 1 AS x) t475 ON t1.x = t475.x JOIN (SELECT 1 AS x) t476 ON t1.x = t476.x JOIN (SELECT 1 AS x) t477 ON t1.x = t477.x JOIN (SELECT 1 AS x) t478 ON t1.x = t478.x JOIN (SELECT 1 AS x) t479 ON t1.x = t479.x JOIN (SELECT 1 AS x) t480 ON t1.x = t480.x JOIN (SELECT 1 AS x) t481 ON t1.x = t481.x JOIN (SELECT 1 AS x) t482 ON t1.x = t482.x JOIN (SELECT 1 AS x) t483 ON t1.x = t483.x JOIN (SELECT 1 AS x) t484 ON t1.x = t484.x JOIN (SELECT 1 AS x) t485 ON t1.x = t485.x JOIN (SELECT 1 AS x) t486 ON t1.x = t486.x JOIN (SELECT 1 AS x) t487 ON t1.x = t487.x JOIN (SELECT 1 AS x) t488 ON t1.x = t488.x JOIN (SELECT 1 AS x) t489 ON t1.x = t489.x JOIN (SELECT 1 AS x) t490 ON t1.x = t490.x JOIN (SELECT 1 AS x) t491 ON t1.x = t491.x JOIN (SELECT 1 AS x) t492 ON t1.x = t492.x JOIN (SELECT 1 AS x) t493 ON t1.x = t493.x JOIN (SELECT 1 AS x) t494 ON t1.x = t494.x JOIN (SELECT 1 AS x) t495 ON t1.x = t495.x JOIN (SELECT 1 AS x) t496 ON t1.x = t496.x JOIN (SELECT 1 AS x) t497 ON t1.x = t497.x JOIN (SELECT 1 AS x) t498 ON t1.x = t498.x JOIN (SELECT 1 AS x) t499 ON t1.x = t499.x JOIN (SELECT 1 AS x) t500 ON t1.x = t500.x JOIN (SELECT 1 AS x) t501 ON t1.x = t501.x JOIN (SELECT 1 AS x) t502 ON t1.x = t502.x JOIN (SELECT 1 AS x) t503 ON t1.x = t503.x JOIN (SELECT 1 AS x) t504 ON t1.x = t504.x JOIN (SELECT 1 AS x) t505 ON t1.x = t505.x JOIN (SELECT 1 AS x) t506 ON t1.x = t506.x JOIN (SELECT 1 AS x) t507 ON t1.x = t507.x JOIN (SELECT 1 AS x) t508 ON t1.x = t508.x JOIN (SELECT 1 AS x) t509 ON t1.x = t509.x JOIN (SELECT 1 AS x) t510 ON t1.x = t510.x JOIN (SELECT 1 AS x) t511 ON t1.x = t511.x JOIN (SELECT 1 AS x) t512 ON t1.x = t512.x JOIN (SELECT 1 AS x) t513 ON t1.x = t513.x JOIN (SELECT 1 AS x) t514 ON t1.x = t514.x JOIN (SELECT 1 AS x) t515 ON t1.x = t515.x JOIN (SELECT 1 AS x) t516 ON t1.x = t516.x JOIN (SELECT 1 AS x) t517 ON t1.x = t517.x JOIN (SELECT 1 AS x) t518 ON t1.x = t518.x JOIN (SELECT 1 AS x) t519 ON t1.x = t519.x JOIN (SELECT 1 AS x) t520 ON t1.x = t520.x JOIN (SELECT 1 AS x) t521 ON t1.x = t521.x JOIN (SELECT 1 AS x) t522 ON t1.x = t522.x JOIN (SELECT 1 AS x) t523 ON t1.x = t523.x JOIN (SELECT 1 AS x) t524 ON t1.x = t524.x JOIN (SELECT 1 AS x) t525 ON t1.x = t525.x JOIN (SELECT 1 AS x) t526 ON t1.x = t526.x JOIN (SELECT 1 AS x) t527 ON t1.x = t527.x JOIN (SELECT 1 AS x) t528 ON t1.x = t528.x JOIN (SELECT 1 AS x) t529 ON t1.x = t529.x JOIN (SELECT 1 AS x) t530 ON t1.x = t530.x JOIN (SELECT 1 AS x) t531 ON t1.x = t531.x JOIN (SELECT 1 AS x) t532 ON t1.x = t532.x JOIN (SELECT 1 AS x) t533 ON t1.x = t533.x JOIN (SELECT 1 AS x) t534 ON t1.x = t534.x JOIN (SELECT 1 AS x) t535 ON t1.x = t535.x JOIN (SELECT 1 AS x) t536 ON t1.x = t536.x JOIN (SELECT 1 AS x) t537 ON t1.x = t537.x JOIN (SELECT 1 AS x) t538 ON t1.x = t538.x JOIN (SELECT 1 AS x) t539 ON t1.x = t539.x JOIN (SELECT 1 AS x) t540 ON t1.x = t540.x JOIN (SELECT 1 AS x) t541 ON t1.x = t541.x JOIN (SELECT 1 AS x) t542 ON t1.x = t542.x JOIN (SELECT 1 AS x) t543 ON t1.x = t543.x JOIN (SELECT 1 AS x) t544 ON t1.x = t544.x JOIN (SELECT 1 AS x) t545 ON t1.x = t545.x JOIN (SELECT 1 AS x) t546 ON t1.x = t546.x JOIN (SELECT 1 AS x) t547 ON t1.x = t547.x JOIN (SELECT 1 AS x) t548 ON t1.x = t548.x JOIN (SELECT 1 AS x) t549 ON t1.x = t549.x JOIN (SELECT 1 AS x) t550 ON t1.x = t550.x JOIN (SELECT 1 AS x) t551 ON t1.x = t551.x JOIN (SELECT 1 AS x) t552 ON t1.x = t552.x JOIN (SELECT 1 AS x) t553 ON t1.x = t553.x JOIN (SELECT 1 AS x) t554 ON t1.x = t554.x JOIN (SELECT 1 AS x) t555 ON t1.x = t555.x JOIN (SELECT 1 AS x) t556 ON t1.x = t556.x JOIN (SELECT 1 AS x) t557 ON t1.x = t557.x JOIN (SELECT 1 AS x) t558 ON t1.x = t558.x JOIN (SELECT 1 AS x) t559 ON t1.x = t559.x JOIN (SELECT 1 AS x) t560 ON t1.x = t560.x JOIN (SELECT 1 AS x) t561 ON t1.x = t561.x JOIN (SELECT 1 AS x) t562 ON t1.x = t562.x JOIN (SELECT 1 AS x) t563 ON t1.x = t563.x JOIN (SELECT 1 AS x) t564 ON t1.x = t564.x JOIN (SELECT 1 AS x) t565 ON t1.x = t565.x JOIN (SELECT 1 AS x) t566 ON t1.x = t566.x JOIN (SELECT 1 AS x) t567 ON t1.x = t567.x JOIN (SELECT 1 AS x) t568 ON t1.x = t568.x JOIN (SELECT 1 AS x) t569 ON t1.x = t569.x JOIN (SELECT 1 AS x) t570 ON t1.x = t570.x JOIN (SELECT 1 AS x) t571 ON t1.x = t571.x JOIN (SELECT 1 AS x) t572 ON t1.x = t572.x JOIN (SELECT 1 AS x) t573 ON t1.x = t573.x JOIN (SELECT 1 AS x) t574 ON t1.x = t574.x JOIN (SELECT 1 AS x) t575 ON t1.x = t575.x JOIN (SELECT 1 AS x) t576 ON t1.x = t576.x JOIN (SELECT 1 AS x) t577 ON t1.x = t577.x JOIN (SELECT 1 AS x) t578 ON t1.x = t578.x JOIN (SELECT 1 AS x) t579 ON t1.x = t579.x JOIN (SELECT 1 AS x) t580 ON t1.x = t580.x JOIN (SELECT 1 AS x) t581 ON t1.x = t581.x JOIN (SELECT 1 AS x) t582 ON t1.x = t582.x JOIN (SELECT 1 AS x) t583 ON t1.x = t583.x JOIN (SELECT 1 AS x) t584 ON t1.x = t584.x JOIN (SELECT 1 AS x) t585 ON t1.x = t585.x JOIN (SELECT 1 AS x) t586 ON t1.x = t586.x JOIN (SELECT 1 AS x) t587 ON t1.x = t587.x JOIN (SELECT 1 AS x) t588 ON t1.x = t588.x JOIN (SELECT 1 AS x) t589 ON t1.x = t589.x JOIN (SELECT 1 AS x) t590 ON t1.x = t590.x JOIN (SELECT 1 AS x) t591 ON t1.x = t591.x JOIN (SELECT 1 AS x) t592 ON t1.x = t592.x JOIN (SELECT 1 AS x) t593 ON t1.x = t593.x JOIN (SELECT 1 AS x) t594 ON t1.x = t594.x JOIN (SELECT 1 AS x) t595 ON t1.x = t595.x JOIN (SELECT 1 AS x) t596 ON t1.x = t596.x JOIN (SELECT 1 AS x) t597 ON t1.x = t597.x JOIN (SELECT 1 AS x) t598 ON t1.x = t598.x JOIN (SELECT 1 AS x) t599 ON t1.x = t599.x JOIN (SELECT 1 AS x) t600 ON t1.x = t600.x JOIN (SELECT 1 AS x) t601 ON t1.x = t601.x JOIN (SELECT 1 AS x) t602 ON t1.x = t602.x JOIN (SELECT 1 AS x) t603 ON t1.x = t603.x JOIN (SELECT 1 AS x) t604 ON t1.x = t604.x JOIN (SELECT 1 AS x) t605 ON t1.x = t605.x JOIN (SELECT 1 AS x) t606 ON t1.x = t606.x JOIN (SELECT 1 AS x) t607 ON t1.x = t607.x JOIN (SELECT 1 AS x) t608 ON t1.x = t608.x JOIN (SELECT 1 AS x) t609 ON t1.x = t609.x JOIN (SELECT 1 AS x) t610 ON t1.x = t610.x JOIN (SELECT 1 AS x) t611 ON t1.x = t611.x JOIN (SELECT 1 AS x) t612 ON t1.x = t612.x JOIN (SELECT 1 AS x) t613 ON t1.x = t613.x JOIN (SELECT 1 AS x) t614 ON t1.x = t614.x JOIN (SELECT 1 AS x) t615 ON t1.x = t615.x JOIN (SELECT 1 AS x) t616 ON t1.x = t616.x JOIN (SELECT 1 AS x) t617 ON t1.x = t617.x JOIN (SELECT 1 AS x) t618 ON t1.x = t618.x JOIN (SELECT 1 AS x) t619 ON t1.x = t619.x JOIN (SELECT 1 AS x) t620 ON t1.x = t620.x JOIN (SELECT 1 AS x) t621 ON t1.x = t621.x JOIN (SELECT 1 AS x) t622 ON t1.x = t622.x JOIN (SELECT 1 AS x) t623 ON t1.x = t623.x JOIN (SELECT 1 AS x) t624 ON t1.x = t624.x JOIN (SELECT 1 AS x) t625 ON t1.x = t625.x JOIN (SELECT 1 AS x) t626 ON t1.x = t626.x JOIN (SELECT 1 AS x) t627 ON t1.x = t627.x JOIN (SELECT 1 AS x) t628 ON t1.x = t628.x JOIN (SELECT 1 AS x) t629 ON t1.x = t629.x JOIN (SELECT 1 AS x) t630 ON t1.x = t630.x JOIN (SELECT 1 AS x) t631 ON t1.x = t631.x JOIN (SELECT 1 AS x) t632 ON t1.x = t632.x JOIN (SELECT 1 AS x) t633 ON t1.x = t633.x JOIN (SELECT 1 AS x) t634 ON t1.x = t634.x JOIN (SELECT 1 AS x) t635 ON t1.x = t635.x JOIN (SELECT 1 AS x) t636 ON t1.x = t636.x JOIN (SELECT 1 AS x) t637 ON t1.x = t637.x JOIN (SELECT 1 AS x) t638 ON t1.x = t638.x JOIN (SELECT 1 AS x) t639 ON t1.x = t639.x JOIN (SELECT 1 AS x) t640 ON t1.x = t640.x JOIN (SELECT 1 AS x) t641 ON t1.x = t641.x JOIN (SELECT 1 AS x) t642 ON t1.x = t642.x JOIN (SELECT 1 AS x) t643 ON t1.x = t643.x JOIN (SELECT 1 AS x) t644 ON t1.x = t644.x JOIN (SELECT 1 AS x) t645 ON t1.x = t645.x JOIN (SELECT 1 AS x) t646 ON t1.x = t646.x JOIN (SELECT 1 AS x) t647 ON t1.x = t647.x JOIN (SELECT 1 AS x) t648 ON t1.x = t648.x JOIN (SELECT 1 AS x) t649 ON t1.x = t649.x JOIN (SELECT 1 AS x) t650 ON t1.x = t650.x JOIN (SELECT 1 AS x) t651 ON t1.x = t651.x JOIN (SELECT 1 AS x) t652 ON t1.x = t652.x JOIN (SELECT 1 AS x) t653 ON t1.x = t653.x JOIN (SELECT 1 AS x) t654 ON t1.x = t654.x JOIN (SELECT 1 AS x) t655 ON t1.x = t655.x JOIN (SELECT 1 AS x) t656 ON t1.x = t656.x JOIN (SELECT 1 AS x) t657 ON t1.x = t657.x JOIN (SELECT 1 AS x) t658 ON t1.x = t658.x JOIN (SELECT 1 AS x) t659 ON t1.x = t659.x JOIN (SELECT 1 AS x) t660 ON t1.x = t660.x JOIN (SELECT 1 AS x) t661 ON t1.x = t661.x JOIN (SELECT 1 AS x) t662 ON t1.x = t662.x JOIN (SELECT 1 AS x) t663 ON t1.x = t663.x JOIN (SELECT 1 AS x) t664 ON t1.x = t664.x JOIN (SELECT 1 AS x) t665 ON t1.x = t665.x JOIN (SELECT 1 AS x) t666 ON t1.x = t666.x diff --git a/tests/queries/0_stateless/03095_window_functions_qualify.sql b/tests/queries/0_stateless/03095_window_functions_qualify.sql index adedff2e2cf..72903992745 100644 --- a/tests/queries/0_stateless/03095_window_functions_qualify.sql +++ b/tests/queries/0_stateless/03095_window_functions_qualify.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT number, COUNT() OVER (PARTITION BY number % 3) AS partition_count FROM numbers(10) QUALIFY partition_count = 4 ORDER BY number; diff --git a/tests/queries/0_stateless/03096_text_log_format_string_args_not_empty.sql b/tests/queries/0_stateless/03096_text_log_format_string_args_not_empty.sql index b1ddd141e04..0e60ee77fe5 100644 --- a/tests/queries/0_stateless/03096_text_log_format_string_args_not_empty.sql +++ b/tests/queries/0_stateless/03096_text_log_format_string_args_not_empty.sql @@ -1,4 +1,4 @@ -set allow_experimental_analyzer = true; +set enable_analyzer = true; select count; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/tests/queries/0_stateless/03097_query_log_join_processes.sql b/tests/queries/0_stateless/03097_query_log_join_processes.sql index daf3136e3fe..135160f4709 100644 --- a/tests/queries/0_stateless/03097_query_log_join_processes.sql +++ b/tests/queries/0_stateless/03097_query_log_join_processes.sql @@ -2,6 +2,6 @@ SYSTEM FLUSH LOGS; -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT count(1) as num, hostName() as hostName FROM system.query_log as a INNER JOIN system.processes as b on a.query_id = b.query_id and type = 'QueryStart' and dateDiff('second', event_time, now()) > 5 and current_database = currentDatabase() FORMAT Null; diff --git a/tests/queries/0_stateless/03098_prefer_column_to_alias_subquery.sql b/tests/queries/0_stateless/03098_prefer_column_to_alias_subquery.sql index cf3768e2f29..cb41151b9c9 100644 --- a/tests/queries/0_stateless/03098_prefer_column_to_alias_subquery.sql +++ b/tests/queries/0_stateless/03098_prefer_column_to_alias_subquery.sql @@ -13,7 +13,7 @@ CREATE TABLE clickhouse_alias_issue_2 ( column_2 Nullable(Float32) ) Engine=Memory; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; INSERT INTO `clickhouse_alias_issue_1` VALUES (1, 100), (2, 200), (3, 300); @@ -28,7 +28,7 @@ VALUES (1, 10), (2, 20), (3, 30); -- \N 30 3 -- \N 20 2 -- \N 10 1 -SELECT * +SELECT * FROM ( SELECT diff --git a/tests/queries/0_stateless/03099_analyzer_multi_join.sql b/tests/queries/0_stateless/03099_analyzer_multi_join.sql index 67985962ba8..dfdaeeea770 100644 --- a/tests/queries/0_stateless/03099_analyzer_multi_join.sql +++ b/tests/queries/0_stateless/03099_analyzer_multi_join.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/56503 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT tb1.owner_id AS owner_id, diff --git a/tests/queries/0_stateless/03100_analyzer_constants_in_multiif.sql b/tests/queries/0_stateless/03100_analyzer_constants_in_multiif.sql index 04666411760..c9a4000d25e 100644 --- a/tests/queries/0_stateless/03100_analyzer_constants_in_multiif.sql +++ b/tests/queries/0_stateless/03100_analyzer_constants_in_multiif.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/59101 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; CREATE TABLE users (name String, age Int16) ENGINE=Memory; INSERT INTO users VALUES ('John', 33); diff --git a/tests/queries/0_stateless/03101_analyzer_identifiers_1.sql b/tests/queries/0_stateless/03101_analyzer_identifiers_1.sql index 2e0ad7d8a5b..499f712e57a 100644 --- a/tests/queries/0_stateless/03101_analyzer_identifiers_1.sql +++ b/tests/queries/0_stateless/03101_analyzer_identifiers_1.sql @@ -1,6 +1,6 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23194 -- This test add query-templates for fuzzer -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/tests/queries/0_stateless/03101_analyzer_identifiers_2.sql b/tests/queries/0_stateless/03101_analyzer_identifiers_2.sql index 35f34e33689..92c3e98265f 100644 --- a/tests/queries/0_stateless/03101_analyzer_identifiers_2.sql +++ b/tests/queries/0_stateless/03101_analyzer_identifiers_2.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23194 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; CREATE TEMPORARY TABLE test1 (a String, nest Nested(x String, y String)); diff --git a/tests/queries/0_stateless/03101_analyzer_identifiers_3.sql b/tests/queries/0_stateless/03101_analyzer_identifiers_3.sql index 77a0f040e88..997fee91930 100644 --- a/tests/queries/0_stateless/03101_analyzer_identifiers_3.sql +++ b/tests/queries/0_stateless/03101_analyzer_identifiers_3.sql @@ -1,7 +1,7 @@ -- Tags: no-parallel -- Looks like you cannot use the query parameter as a column name. -- https://github.com/ClickHouse/ClickHouse/issues/23194 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP DATABASE IF EXISTS db1_03101; DROP DATABASE IF EXISTS db2_03101; diff --git a/tests/queries/0_stateless/03101_analyzer_identifiers_4.sql b/tests/queries/0_stateless/03101_analyzer_identifiers_4.sql index eba6ad09a3f..869310aa181 100644 --- a/tests/queries/0_stateless/03101_analyzer_identifiers_4.sql +++ b/tests/queries/0_stateless/03101_analyzer_identifiers_4.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23194 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/tests/queries/0_stateless/03101_analyzer_invalid_join_on.sql b/tests/queries/0_stateless/03101_analyzer_invalid_join_on.sql index 8539376e136..d7e26862d36 100644 --- a/tests/queries/0_stateless/03101_analyzer_invalid_join_on.sql +++ b/tests/queries/0_stateless/03101_analyzer_invalid_join_on.sql @@ -3,23 +3,23 @@ drop table if exists t1; drop table if exists t2; -set allow_experimental_analyzer=1; +set enable_analyzer=1; create table t1 (c3 String, primary key(c3)) engine = MergeTree; create table t2 (c11 String, primary key(c11)) engine = MergeTree; insert into t1 values ('succeed'); insert into t2 values ('succeed'); -select +select ref_0.c11 as c_2_c30_0 - from + from t2 as ref_0 - cross join (select - ref_1.c3 as c_6_c28_15 - from + cross join (select + ref_1.c3 as c_6_c28_15 + from t1 as ref_1 ) as subq_0 where subq_0.c_6_c28_15 = (select c11 from t2 order by c11 limit 1); drop table if exists t1; -drop table if exists t2; \ No newline at end of file +drop table if exists t2; diff --git a/tests/queries/0_stateless/03102_prefer_column_name_to_alias.sql b/tests/queries/0_stateless/03102_prefer_column_name_to_alias.sql index 48e97fd0841..fafdb660e44 100644 --- a/tests/queries/0_stateless/03102_prefer_column_name_to_alias.sql +++ b/tests/queries/0_stateless/03102_prefer_column_name_to_alias.sql @@ -4,7 +4,7 @@ DROP TABLE IF EXISTS loans; CREATE TABLE loans (loan_number int, security_id text) ENGINE=Memory; -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; INSERT INTO loans VALUES (1, 'AAA'); INSERT INTO loans VALUES (1, 'AAA'); diff --git a/tests/queries/0_stateless/03103_positional_arguments.sql b/tests/queries/0_stateless/03103_positional_arguments.sql index ad30719dc3e..eecaa3f4ea6 100644 --- a/tests/queries/0_stateless/03103_positional_arguments.sql +++ b/tests/queries/0_stateless/03103_positional_arguments.sql @@ -1,6 +1,6 @@ -- https://github.com/ClickHouse/ClickHouse/issues/56466 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TABLE IF EXISTS users; diff --git a/tests/queries/0_stateless/03104_create_view_join.sql b/tests/queries/0_stateless/03104_create_view_join.sql index bed3d81c9a8..a39be92b1f1 100644 --- a/tests/queries/0_stateless/03104_create_view_join.sql +++ b/tests/queries/0_stateless/03104_create_view_join.sql @@ -4,7 +4,7 @@ DROP TABLE IF EXISTS test_table_01; DROP TABLE IF EXISTS test_table_02; DROP TABLE IF EXISTS test_view_01; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; CREATE TABLE test_table_01 ( column Int32 @@ -15,12 +15,12 @@ CREATE TABLE test_table_02 ( ) ENGINE = Memory(); CREATE VIEW test_view_01 AS -SELECT +SELECT t1.column, t2.column -FROM test_table_01 AS t1 +FROM test_table_01 AS t1 INNER JOIN test_table_02 AS t2 ON t1.column = t2.column; DROP TABLE IF EXISTS test_table_01; DROP TABLE IF EXISTS test_table_02; -DROP TABLE IF EXISTS test_view_01; \ No newline at end of file +DROP TABLE IF EXISTS test_view_01; diff --git a/tests/queries/0_stateless/03107_ill_formed_select_in_materialized_view.sql b/tests/queries/0_stateless/03107_ill_formed_select_in_materialized_view.sql index d142bf1662e..5f6ec74bdeb 100644 --- a/tests/queries/0_stateless/03107_ill_formed_select_in_materialized_view.sql +++ b/tests/queries/0_stateless/03107_ill_formed_select_in_materialized_view.sql @@ -6,10 +6,10 @@ DROP TABLE iF EXISTS b; CREATE TABLE a ( a UInt64, b UInt64) ENGINE = Memory; CREATE TABLE b ( b UInt64) ENGINE = Memory; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET joined_subquery_requires_alias = 0; CREATE MATERIALIZED VIEW view_4 ( bb UInt64, cnt UInt64) Engine=MergeTree ORDER BY bb POPULATE AS SELECT bb, count() AS cnt FROM (SELECT a, b AS j, b AS bb FROM a INNER JOIN (SELECT b AS j, b AS bb FROM b ) USING (j)) GROUP BY bb; -- { serverError UNKNOWN_IDENTIFIER } DROP TABLE IF EXISTS a; -DROP TABLE iF EXISTS b; \ No newline at end of file +DROP TABLE iF EXISTS b; diff --git a/tests/queries/0_stateless/03108_describe_union_all.sql b/tests/queries/0_stateless/03108_describe_union_all.sql index c5172902a2b..7e207ae2854 100644 --- a/tests/queries/0_stateless/03108_describe_union_all.sql +++ b/tests/queries/0_stateless/03108_describe_union_all.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/8030 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; -DESCRIBE (SELECT 1, 1 UNION ALL SELECT 1, 2); \ No newline at end of file +DESCRIBE (SELECT 1, 1 UNION ALL SELECT 1, 2); diff --git a/tests/queries/0_stateless/03109_ast_too_big.sql b/tests/queries/0_stateless/03109_ast_too_big.sql index 3eedf305f44..1464f90fe83 100644 --- a/tests/queries/0_stateless/03109_ast_too_big.sql +++ b/tests/queries/0_stateless/03109_ast_too_big.sql @@ -1,6 +1,6 @@ -- https://github.com/ClickHouse/ClickHouse/issues/32139 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; WITH data AS ( @@ -50,4 +50,4 @@ SELECT value12 AS v12, value13 AS v13, value14 AS v14 -FORMAT Null; \ No newline at end of file +FORMAT Null; diff --git a/tests/queries/0_stateless/03110_unicode_alias.sql b/tests/queries/0_stateless/03110_unicode_alias.sql index b8cbe7390fa..aa33195ea51 100644 --- a/tests/queries/0_stateless/03110_unicode_alias.sql +++ b/tests/queries/0_stateless/03110_unicode_alias.sql @@ -1,6 +1,6 @@ -- https://github.com/ClickHouse/ClickHouse/issues/47288 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; select 1 as `c0` from ( diff --git a/tests/queries/0_stateless/03111_inner_join_group_by.sql b/tests/queries/0_stateless/03111_inner_join_group_by.sql index fcc8c55a566..6ebaacfc3fe 100644 --- a/tests/queries/0_stateless/03111_inner_join_group_by.sql +++ b/tests/queries/0_stateless/03111_inner_join_group_by.sql @@ -1,6 +1,6 @@ -- https://github.com/ClickHouse/ClickHouse/issues/50705 -set allow_experimental_analyzer=1; +set enable_analyzer=1; SELECT count(s0.number), @@ -15,4 +15,4 @@ INNER JOIN LIMIT 10 ) AS s1 ON s0.number = s1.number GROUP BY s0.number > 5 -LIMIT 10 -- {serverError NOT_AN_AGGREGATE} \ No newline at end of file +LIMIT 10 -- {serverError NOT_AN_AGGREGATE} diff --git a/tests/queries/0_stateless/03112_analyzer_not_found_column_in_block.sql b/tests/queries/0_stateless/03112_analyzer_not_found_column_in_block.sql index eb07ff1d837..cc734b2c49a 100644 --- a/tests/queries/0_stateless/03112_analyzer_not_found_column_in_block.sql +++ b/tests/queries/0_stateless/03112_analyzer_not_found_column_in_block.sql @@ -15,7 +15,7 @@ PRIMARY KEY (user_id, timestamp); INSERT INTO my_first_table (user_id, message, timestamp, metric) VALUES (101, 'Hello, ClickHouse!', now(), -1.0 ), (102, 'Insert a lot of rows per batch', yesterday(), 1.41421 ), (102, 'Sort your data based on your commonly-used queries', today(), 2.718 ), (101, 'Granules are the smallest chunks of data read', now() + 5, 3.14159 ); -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT user_id @@ -24,4 +24,4 @@ FROM my_first_table WHERE timestamp > 0 and user_id IN (101) LIMIT 2 BY user_id; -DROP TABLE IF EXISTS my_first_table; \ No newline at end of file +DROP TABLE IF EXISTS my_first_table; diff --git a/tests/queries/0_stateless/03113_analyzer_not_found_column_in_block_2.sql b/tests/queries/0_stateless/03113_analyzer_not_found_column_in_block_2.sql index 7bcc6565ea0..4389bdf83fa 100644 --- a/tests/queries/0_stateless/03113_analyzer_not_found_column_in_block_2.sql +++ b/tests/queries/0_stateless/03113_analyzer_not_found_column_in_block_2.sql @@ -4,11 +4,11 @@ drop table if exists t; create table t (ID String) Engine= Memory() ; insert into t values('a'),('b'),('c'); - + -- This optimization is disabled by default and even its description says that it could lead to -- inconsistencies for distributed queries. set optimize_if_transform_strings_to_enum=0; -set allow_experimental_analyzer=1; +set enable_analyzer=1; SELECT multiIf( ((multiIf(ID='' AND (ID = 'a' OR ID = 'c' OR ID = 'b'),'a','x') as y) = 'c') OR (multiIf(ID='' AND (ID = 'a' OR ID = 'c' OR ID = 'b'),'a','x') = 'b') OR @@ -18,4 +18,4 @@ SELECT multiIf( ((multiIf(ID='' AND (ID = 'a' OR ID = 'c' OR ID = 'b'),'a','x') FROM remote('127.0.0.{1,2}', currentDatabase(), t) GROUP BY alias; -drop table if exists t; \ No newline at end of file +drop table if exists t; diff --git a/tests/queries/0_stateless/03114_analyzer_cte_with_join.sql b/tests/queries/0_stateless/03114_analyzer_cte_with_join.sql index 140197c7104..65dd3cb66b6 100644 --- a/tests/queries/0_stateless/03114_analyzer_cte_with_join.sql +++ b/tests/queries/0_stateless/03114_analyzer_cte_with_join.sql @@ -1,7 +1,7 @@ -- Tags: no-replicated-database -- https://github.com/ClickHouse/ClickHouse/issues/58500 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; drop table if exists t; diff --git a/tests/queries/0_stateless/03115_alias_exists_column.sql b/tests/queries/0_stateless/03115_alias_exists_column.sql index 654cdd71175..65fc0d8f9a9 100644 --- a/tests/queries/0_stateless/03115_alias_exists_column.sql +++ b/tests/queries/0_stateless/03115_alias_exists_column.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/44412 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT EXISTS(SELECT 1) AS mycheck FORMAT TSVWithNames; diff --git a/tests/queries/0_stateless/03116_analyzer_explicit_alias_as_column_name.sql b/tests/queries/0_stateless/03116_analyzer_explicit_alias_as_column_name.sql index 16035b8e72a..d3e3a29077a 100644 --- a/tests/queries/0_stateless/03116_analyzer_explicit_alias_as_column_name.sql +++ b/tests/queries/0_stateless/03116_analyzer_explicit_alias_as_column_name.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/39923 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT errors.name AS labels, diff --git a/tests/queries/0_stateless/03117_analyzer_same_column_name_as_func.sql b/tests/queries/0_stateless/03117_analyzer_same_column_name_as_func.sql index 43f428a1bf9..a3f4da89525 100644 --- a/tests/queries/0_stateless/03117_analyzer_same_column_name_as_func.sql +++ b/tests/queries/0_stateless/03117_analyzer_same_column_name_as_func.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/39855 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; create table x( a UInt64, diff --git a/tests/queries/0_stateless/03118_analyzer_multi_join_prewhere.sql b/tests/queries/0_stateless/03118_analyzer_multi_join_prewhere.sql index 8680e9215c3..84f89c2c647 100644 --- a/tests/queries/0_stateless/03118_analyzer_multi_join_prewhere.sql +++ b/tests/queries/0_stateless/03118_analyzer_multi_join_prewhere.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/4596 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE a1 ( ANIMAL Nullable(String) ) engine = MergeTree order by tuple(); insert into a1 values('CROCO'); diff --git a/tests/queries/0_stateless/03119_analyzer_window_function_in_CTE_alias.sql b/tests/queries/0_stateless/03119_analyzer_window_function_in_CTE_alias.sql index 2eb2c66b551..edbb324bda2 100644 --- a/tests/queries/0_stateless/03119_analyzer_window_function_in_CTE_alias.sql +++ b/tests/queries/0_stateless/03119_analyzer_window_function_in_CTE_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/47422 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; DROP TEMPORARY TABLE IF EXISTS test; CREATE TEMPORARY TABLE test (a Float32, id UInt64); diff --git a/tests/queries/0_stateless/03120_analyzer_dist_join.sql b/tests/queries/0_stateless/03120_analyzer_dist_join.sql index 624da39c69b..e40df56c5ac 100644 --- a/tests/queries/0_stateless/03120_analyzer_dist_join.sql +++ b/tests/queries/0_stateless/03120_analyzer_dist_join.sql @@ -1,6 +1,6 @@ -- Tags: no-replicated-database -- https://github.com/ClickHouse/ClickHouse/issues/8547 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET distributed_foreground_insert=1; CREATE TABLE a1_replicated ON CLUSTER test_shard_localhost ( diff --git a/tests/queries/0_stateless/03120_analyzer_param_in_CTE_alias.sql b/tests/queries/0_stateless/03120_analyzer_param_in_CTE_alias.sql index 16fffae4737..d1e3d5a2ffa 100644 --- a/tests/queries/0_stateless/03120_analyzer_param_in_CTE_alias.sql +++ b/tests/queries/0_stateless/03120_analyzer_param_in_CTE_alias.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/33000 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET param_test_a=30; diff --git a/tests/queries/0_stateless/03121_analyzer_filed_redefenition_in_subquery.sql b/tests/queries/0_stateless/03121_analyzer_filed_redefenition_in_subquery.sql index fe05259a320..891eb7aac1c 100644 --- a/tests/queries/0_stateless/03121_analyzer_filed_redefenition_in_subquery.sql +++ b/tests/queries/0_stateless/03121_analyzer_filed_redefenition_in_subquery.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/14739 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; drop table if exists test_subquery; diff --git a/tests/queries/0_stateless/03122_analyzer_collate_in_window_function.sql b/tests/queries/0_stateless/03122_analyzer_collate_in_window_function.sql index 707c5d889f7..efd2e2fc873 100644 --- a/tests/queries/0_stateless/03122_analyzer_collate_in_window_function.sql +++ b/tests/queries/0_stateless/03122_analyzer_collate_in_window_function.sql @@ -1,6 +1,6 @@ -- Tags: no-fasttest -- https://github.com/ClickHouse/ClickHouse/issues/44039 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; create table test_window_collate(c1 String, c2 String) engine=MergeTree order by c1; insert into test_window_collate values('1', '上海'); diff --git a/tests/queries/0_stateless/03123_analyzer_dist_join_CTE.sql b/tests/queries/0_stateless/03123_analyzer_dist_join_CTE.sql index 4fb8e0b91c4..4d9f5e2971a 100644 --- a/tests/queries/0_stateless/03123_analyzer_dist_join_CTE.sql +++ b/tests/queries/0_stateless/03123_analyzer_dist_join_CTE.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/22923 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SET prefer_localhost_replica=0; create table "t0" (a Int64, b Int64) engine = MergeTree() partition by a order by a; diff --git a/tests/queries/0_stateless/03124_analyzer_nested_CTE_dist_in.sql b/tests/queries/0_stateless/03124_analyzer_nested_CTE_dist_in.sql index 406a50c6d16..be5346efa1c 100644 --- a/tests/queries/0_stateless/03124_analyzer_nested_CTE_dist_in.sql +++ b/tests/queries/0_stateless/03124_analyzer_nested_CTE_dist_in.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/23865 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; create table table_local engine = Memory AS select * from numbers(10); diff --git a/tests/queries/0_stateless/03125_analyzer_CTE_two_joins.sql b/tests/queries/0_stateless/03125_analyzer_CTE_two_joins.sql index f7d5bb5f195..934e2bc3656 100644 --- a/tests/queries/0_stateless/03125_analyzer_CTE_two_joins.sql +++ b/tests/queries/0_stateless/03125_analyzer_CTE_two_joins.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/29748 -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; create table events ( distinct_id String ) engine = Memory; diff --git a/tests/queries/0_stateless/03126_column_not_under_group_by.sql b/tests/queries/0_stateless/03126_column_not_under_group_by.sql index 890a4e823a9..516126c899c 100644 --- a/tests/queries/0_stateless/03126_column_not_under_group_by.sql +++ b/tests/queries/0_stateless/03126_column_not_under_group_by.sql @@ -1,7 +1,6 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT v.x, r.a, sum(c) FROM (select 1 x, 2 c) AS v ANY LEFT JOIN (SELECT 1 x, 2 a) AS r ON v.x = r.x GROUP BY v.x; -- { serverError NOT_AN_AGGREGATE} - diff --git a/tests/queries/0_stateless/03129_cte_with_final.sql b/tests/queries/0_stateless/03129_cte_with_final.sql index 01e1ca6dfb0..2a0714ec571 100644 --- a/tests/queries/0_stateless/03129_cte_with_final.sql +++ b/tests/queries/0_stateless/03129_cte_with_final.sql @@ -12,7 +12,7 @@ ORDER BY key; INSERT INTO t Values (1, 'first', '2024-04-19 01:01:01'); INSERT INTO t Values (1, 'first', '2024-04-19 01:01:01'); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE passes=1 WITH merged_test AS( diff --git a/tests/queries/0_stateless/03130_analyzer_self_join_group_by.sql b/tests/queries/0_stateless/03130_analyzer_self_join_group_by.sql index 66b6b99981b..81af10c4a64 100644 --- a/tests/queries/0_stateless/03130_analyzer_self_join_group_by.sql +++ b/tests/queries/0_stateless/03130_analyzer_self_join_group_by.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 (x Int32) ENGINE = MergeTree ORDER BY x; INSERT INTO t1 VALUES (1), (2), (3); -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT t2.x FROM t1 JOIN t1 as t2 ON t1.x = t2.x GROUP BY t1.x; -- { serverError NOT_AN_AGGREGATE } SELECT t2.number FROM numbers(10) as t1 JOIN numbers(10) as t2 ON t1.number = t2.number GROUP BY t1.number; -- { serverError NOT_AN_AGGREGATE } diff --git a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql index 28362f1f469..4e42d94fd79 100644 --- a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql +++ b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET join_algorithm = 'hash'; DROP TABLE IF EXISTS test_table_1; diff --git a/tests/queries/0_stateless/03132_rewrite_aggregate_function_with_if_implicit_cast.sql b/tests/queries/0_stateless/03132_rewrite_aggregate_function_with_if_implicit_cast.sql index 4e8096fbed1..7b7237ea7d1 100644 --- a/tests/queries/0_stateless/03132_rewrite_aggregate_function_with_if_implicit_cast.sql +++ b/tests/queries/0_stateless/03132_rewrite_aggregate_function_with_if_implicit_cast.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- { echoOn } SELECT concat(1, sum(if(0, toUInt128(concat('%', toLowCardinality(toNullable(1)), toUInt256(1))), materialize(0)))); SELECT any(if((number % 10) = 5, number, CAST(NULL, 'Nullable(Int128)'))) AS a, toTypeName(a) FROM numbers(100) AS a; diff --git a/tests/queries/0_stateless/03132_sqlancer_union_all.sql b/tests/queries/0_stateless/03132_sqlancer_union_all.sql index 576da9e53d5..2502ce31ec3 100644 --- a/tests/queries/0_stateless/03132_sqlancer_union_all.sql +++ b/tests/queries/0_stateless/03132_sqlancer_union_all.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS t0; DROP TABLE IF EXISTS t1; diff --git a/tests/queries/0_stateless/03142_untuple_crash.sql b/tests/queries/0_stateless/03142_untuple_crash.sql index ac5dbba0de1..45106973271 100644 --- a/tests/queries/0_stateless/03142_untuple_crash.sql +++ b/tests/queries/0_stateless/03142_untuple_crash.sql @@ -1,2 +1,2 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT untuple(x -> 0) -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/tests/queries/0_stateless/03142_window_function_limit_by.sql b/tests/queries/0_stateless/03142_window_function_limit_by.sql index 44dd890db41..a3f6b56aee5 100644 --- a/tests/queries/0_stateless/03142_window_function_limit_by.sql +++ b/tests/queries/0_stateless/03142_window_function_limit_by.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; -- https://github.com/ClickHouse/ClickHouse/issues/55965 @@ -8,7 +8,7 @@ CREATE TABLE error_win_func `k` String, `in` UInt64, `out` UInt64 -) +) ENGINE = MergeTree ORDER BY k AS SELECT * from VALUES (('a', 2, 4), ('a', 4, 2), ('a', 6, 3), ('a', 8, 4)); @@ -16,7 +16,7 @@ SELECT * from VALUES (('a', 2, 4), ('a', 4, 2), ('a', 6, 3), ('a', 8, 4)); SELECT k, in / out AS ratio, - count(*) OVER w AS count_rows_w + count(*) OVER w AS count_rows_w FROM error_win_func WINDOW w AS (ROWS BETWEEN CURRENT ROW AND 3 FOLLOWING) @@ -38,4 +38,3 @@ WHERE st IN ('x', 'y') LIMIT 1 BY m; DROP TABLE t; - diff --git a/tests/queries/0_stateless/03143_cte_scope.sql b/tests/queries/0_stateless/03143_cte_scope.sql index 1b1d9444651..cf4478e2ff9 100644 --- a/tests/queries/0_stateless/03143_cte_scope.sql +++ b/tests/queries/0_stateless/03143_cte_scope.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/56287 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS tmp_a; DROP TABLE IF EXISTS tmp_b; diff --git a/tests/queries/0_stateless/03143_group_by_constant_secondary.sql b/tests/queries/0_stateless/03143_group_by_constant_secondary.sql index 030e3212748..099160dd522 100644 --- a/tests/queries/0_stateless/03143_group_by_constant_secondary.sql +++ b/tests/queries/0_stateless/03143_group_by_constant_secondary.sql @@ -3,6 +3,6 @@ SELECT count() FROM remote(test_cluster_two_shards, system, one) GROUP BY 'hi' SETTINGS - allow_experimental_analyzer = 1, + enable_analyzer = 1, group_by_two_level_threshold = 1, group_by_two_level_threshold_bytes = 33950592; diff --git a/tests/queries/0_stateless/03143_parallel_replicas_mat_view_bug.sql b/tests/queries/0_stateless/03143_parallel_replicas_mat_view_bug.sql index 97ed29802c7..02a8a2f3ce0 100644 --- a/tests/queries/0_stateless/03143_parallel_replicas_mat_view_bug.sql +++ b/tests/queries/0_stateless/03143_parallel_replicas_mat_view_bug.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS mv_table; DROP TABLE IF EXISTS null_table; SET cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=4, allow_experimental_parallel_reading_from_replicas=1; -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE TABLE null_table (str String) ENGINE = Null; CREATE MATERIALIZED VIEW mv_table (str String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03143_parallel_replicas_mat_view_bug', '{replica}') ORDER BY str AS SELECT str AS str FROM null_table; diff --git a/tests/queries/0_stateless/03144_aggregate_states_with_different_types.sql b/tests/queries/0_stateless/03144_aggregate_states_with_different_types.sql index 5bb4a8c9c9c..4445d5b8449 100644 --- a/tests/queries/0_stateless/03144_aggregate_states_with_different_types.sql +++ b/tests/queries/0_stateless/03144_aggregate_states_with_different_types.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; select * APPLY hex from ( diff --git a/tests/queries/0_stateless/03144_invalid_filter.sql b/tests/queries/0_stateless/03144_invalid_filter.sql index deb8d7b96b3..5b434972cc7 100644 --- a/tests/queries/0_stateless/03144_invalid_filter.sql +++ b/tests/queries/0_stateless/03144_invalid_filter.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/48049 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; CREATE TABLE test_table (`id` UInt64, `value` String) ENGINE = TinyLog() AS Select number, number::String from numbers(10); diff --git a/tests/queries/0_stateless/03146_bug47862.sql b/tests/queries/0_stateless/03146_bug47862.sql index 918f2316bea..0f411400618 100644 --- a/tests/queries/0_stateless/03146_bug47862.sql +++ b/tests/queries/0_stateless/03146_bug47862.sql @@ -9,4 +9,4 @@ FROM ( WHERE val_idx != 0 ) WHERE cast_res > 0 -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03146_tpc_ds_grouping.sql b/tests/queries/0_stateless/03146_tpc_ds_grouping.sql index f48c40e9bc4..cb290086b51 100644 --- a/tests/queries/0_stateless/03146_tpc_ds_grouping.sql +++ b/tests/queries/0_stateless/03146_tpc_ds_grouping.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/46335 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT key_a + key_b AS d, rank() OVER () AS f diff --git a/tests/queries/0_stateless/03148_query_log_used_dictionaries.sql b/tests/queries/0_stateless/03148_query_log_used_dictionaries.sql index 1b647a7ee62..75b7489ae5b 100644 --- a/tests/queries/0_stateless/03148_query_log_used_dictionaries.sql +++ b/tests/queries/0_stateless/03148_query_log_used_dictionaries.sql @@ -15,7 +15,7 @@ SELECT dictGet('03148_dictionary', 'name', number) as dict_value FROM numbers(1) SETTINGS - allow_experimental_analyzer = 1, + enable_analyzer = 1, log_comment = 'simple_with_analyzer' FORMAT Null; @@ -34,7 +34,7 @@ FROM ( FROM numbers(1) ) t SETTINGS - allow_experimental_analyzer = 1, + enable_analyzer = 1, log_comment = 'nested_with_analyzer' FORMAT Null; @@ -50,7 +50,7 @@ SELECT dictGet('03148_dictionary', 'name', number) as dict_value FROM numbers(1) SETTINGS - allow_experimental_analyzer = 0, + enable_analyzer = 0, log_comment = 'simple_without_analyzer' FORMAT Null; @@ -69,7 +69,7 @@ FROM ( FROM numbers(1) ) t SETTINGS - allow_experimental_analyzer = 0, + enable_analyzer = 0, log_comment = 'nested_without_analyzer' FORMAT Null; diff --git a/tests/queries/0_stateless/03148_setting_max_streams_to_max_threads_ratio_overflow.sql b/tests/queries/0_stateless/03148_setting_max_streams_to_max_threads_ratio_overflow.sql index af326c15bd8..38f25f60e09 100644 --- a/tests/queries/0_stateless/03148_setting_max_streams_to_max_threads_ratio_overflow.sql +++ b/tests/queries/0_stateless/03148_setting_max_streams_to_max_threads_ratio_overflow.sql @@ -7,8 +7,8 @@ CREATE TABLE test_table INSERT INTO test_table VALUES (0, 'Value_0'); -SELECT * FROM test_table SETTINGS max_threads = 1025, max_streams_to_max_threads_ratio = -9223372036854775808, allow_experimental_analyzer = 1; -- { serverError PARAMETER_OUT_OF_BOUND } +SELECT * FROM test_table SETTINGS max_threads = 1025, max_streams_to_max_threads_ratio = -9223372036854775808, enable_analyzer = 1; -- { serverError PARAMETER_OUT_OF_BOUND } -SELECT * FROM test_table SETTINGS max_threads = 1025, max_streams_to_max_threads_ratio = -9223372036854775808, allow_experimental_analyzer = 0; -- { serverError PARAMETER_OUT_OF_BOUND } +SELECT * FROM test_table SETTINGS max_threads = 1025, max_streams_to_max_threads_ratio = -9223372036854775808, enable_analyzer = 0; -- { serverError PARAMETER_OUT_OF_BOUND } DROP TABLE test_table; diff --git a/tests/queries/0_stateless/03150_grouping_sets_use_nulls_pushdown.sql b/tests/queries/0_stateless/03150_grouping_sets_use_nulls_pushdown.sql index a0bd1381351..c39143216d4 100644 --- a/tests/queries/0_stateless/03150_grouping_sets_use_nulls_pushdown.sql +++ b/tests/queries/0_stateless/03150_grouping_sets_use_nulls_pushdown.sql @@ -21,21 +21,21 @@ SELECT * FROM ( SELECT day_ FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) WHERE day_ = '2023-01-05' ORDER BY * -SETTINGS allow_experimental_analyzer=1; +SETTINGS enable_analyzer=1; SELECT * FROM ( SELECT * FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) WHERE day_ = '2023-01-05' GROUP BY * ORDER BY ALL -SETTINGS allow_experimental_analyzer=1; +SETTINGS enable_analyzer=1; SELECT * FROM ( SELECT * FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (*), (day_) ) ) WHERE day_ = '2023-01-05' GROUP BY GROUPING SETS (*) ORDER BY type_1 -SETTINGS allow_experimental_analyzer=1; +SETTINGS enable_analyzer=1; SELECT * FROM ( SELECT day_, COUNT(*) FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) diff --git a/tests/queries/0_stateless/03151_analyzer_view_read_only_necessary_columns.sql b/tests/queries/0_stateless/03151_analyzer_view_read_only_necessary_columns.sql index 40204b5cd03..ac86a8705d5 100644 --- a/tests/queries/0_stateless/03151_analyzer_view_read_only_necessary_columns.sql +++ b/tests/queries/0_stateless/03151_analyzer_view_read_only_necessary_columns.sql @@ -8,7 +8,7 @@ CREATE TABLE test_table DROP VIEW IF EXISTS test_view; CREATE VIEW test_view AS SELECT id, value FROM test_table; -EXPLAIN header = 1 SELECT sum(id) FROM test_view settings allow_experimental_analyzer=1; +EXPLAIN header = 1 SELECT sum(id) FROM test_view settings enable_analyzer=1; DROP VIEW test_view; DROP TABLE test_table; diff --git a/tests/queries/0_stateless/03152_join_filter_push_down_equivalent_columns.sql b/tests/queries/0_stateless/03152_join_filter_push_down_equivalent_columns.sql index 645e89034d7..ea8a9e1f8d7 100644 --- a/tests/queries/0_stateless/03152_join_filter_push_down_equivalent_columns.sql +++ b/tests/queries/0_stateless/03152_join_filter_push_down_equivalent_columns.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS users; CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree order by (uid, name); diff --git a/tests/queries/0_stateless/03154_recursive_cte_distributed.sql b/tests/queries/0_stateless/03154_recursive_cte_distributed.sql index b8c3356d5d6..47e0b9aad0b 100644 --- a/tests/queries/0_stateless/03154_recursive_cte_distributed.sql +++ b/tests/queries/0_stateless/03154_recursive_cte_distributed.sql @@ -1,6 +1,6 @@ -- Tags: shard -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_table; CREATE TABLE test_table diff --git a/tests/queries/0_stateless/03155_analyzer_interpolate.sql b/tests/queries/0_stateless/03155_analyzer_interpolate.sql index 30423cb86ff..42c5f5ef65f 100644 --- a/tests/queries/0_stateless/03155_analyzer_interpolate.sql +++ b/tests/queries/0_stateless/03155_analyzer_interpolate.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/62464 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT n, [number] AS inter FROM ( SELECT toFloat32(number % 10) AS n, number diff --git a/tests/queries/0_stateless/03155_in_nested_subselects.sql b/tests/queries/0_stateless/03155_in_nested_subselects.sql index 4f5ccd30aa3..faecb73040d 100644 --- a/tests/queries/0_stateless/03155_in_nested_subselects.sql +++ b/tests/queries/0_stateless/03155_in_nested_subselects.sql @@ -1,5 +1,5 @@ -- https://github.com/ClickHouse/ClickHouse/issues/63833 -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; create table Example (id Int32) engine = MergeTree ORDER BY id; INSERT INTO Example SELECT number AS id FROM numbers(2); @@ -8,10 +8,10 @@ create table Null engine=Null as Example ; --create table Null engine=MergeTree order by id as Example ; create materialized view Transform to Example as -select * from Null -join ( select * FROM Example +select * from Null +join ( select * FROM Example WHERE id IN (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM Null))))) - ) as old + ) as old using id; INSERT INTO Null SELECT number AS id FROM numbers(2); diff --git a/tests/queries/0_stateless/03161_cnf_reduction.reference b/tests/queries/0_stateless/03161_cnf_reduction.reference index 5e39c0f3223..41051af362f 100644 --- a/tests/queries/0_stateless/03161_cnf_reduction.reference +++ b/tests/queries/0_stateless/03161_cnf_reduction.reference @@ -2,7 +2,7 @@ SELECT id FROM `03161_table` WHERE f -SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 1 +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 1 -- Expected result with analyzer: 1 @@ -11,7 +11,7 @@ SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experim SELECT id FROM `03161_table` WHERE f -SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 0 +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 0 -- Expected result w/o analyzer: 1 diff --git a/tests/queries/0_stateless/03161_cnf_reduction.sql b/tests/queries/0_stateless/03161_cnf_reduction.sql index b34e9171d45..c232823e9cd 100644 --- a/tests/queries/0_stateless/03161_cnf_reduction.sql +++ b/tests/queries/0_stateless/03161_cnf_reduction.sql @@ -10,7 +10,7 @@ EXPLAIN SYNTAX SELECT id FROM 03161_table WHERE f AND (NOT(f) OR f) -SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 1; +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 1; SELECT ''; @@ -19,7 +19,7 @@ SELECT '-- Expected result with analyzer:'; SELECT id FROM 03161_table WHERE f AND (NOT(f) OR f) -SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 1; +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 1; SELECT ''; @@ -29,7 +29,7 @@ EXPLAIN SYNTAX SELECT id FROM 03161_table WHERE f AND (NOT(f) OR f) -SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 0; +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 0; SELECT ''; @@ -38,7 +38,7 @@ SELECT '-- Expected result w/o analyzer:'; SELECT id FROM 03161_table WHERE f AND (NOT(f) OR f) -SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 0; +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 0; DROP TABLE IF EXISTS 03161_table; @@ -58,7 +58,7 @@ SELECT '-- Reproducer from the issue with analyzer'; SELECT count() FROM 03161_reproducer WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7)) -SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 1; +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 1; SELECT ''; @@ -67,6 +67,6 @@ SELECT '-- Reproducer from the issue w/o analyzer'; SELECT count() FROM 03161_reproducer WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7)) -SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, allow_experimental_analyzer = 0; +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 0; DROP TABLE IF EXISTS 03161_reproducer; diff --git a/tests/queries/0_stateless/03164_analyzer_global_in_alias.sql b/tests/queries/0_stateless/03164_analyzer_global_in_alias.sql index 00c293334ee..ccfacd12d98 100644 --- a/tests/queries/0_stateless/03164_analyzer_global_in_alias.sql +++ b/tests/queries/0_stateless/03164_analyzer_global_in_alias.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT 1 GLOBAL IN (SELECT 1) AS s, s FROM remote('127.0.0.{2,3}', system.one) GROUP BY 1; SELECT 1 GLOBAL IN (SELECT 1) AS s FROM remote('127.0.0.{2,3}', system.one) GROUP BY 1; diff --git a/tests/queries/0_stateless/03164_early_constant_folding_analyzer.sql b/tests/queries/0_stateless/03164_early_constant_folding_analyzer.sql index dbffbc1af71..b1018d00082 100644 --- a/tests/queries/0_stateless/03164_early_constant_folding_analyzer.sql +++ b/tests/queries/0_stateless/03164_early_constant_folding_analyzer.sql @@ -27,4 +27,4 @@ ENGINE = MergeTree ORDER BY (date, pull_request_number, commit_sha, check_name, insert into checks select * from generateRandom() limit 1; -select trimLeft(explain) from (explain SELECT count(1) FROM checks WHERE test_name IS NOT NULL) where explain like '%ReadFromPreparedSource%' SETTINGS allow_experimental_analyzer = 1, allow_experimental_parallel_reading_from_replicas = 0; +select trimLeft(explain) from (explain SELECT count(1) FROM checks WHERE test_name IS NOT NULL) where explain like '%ReadFromPreparedSource%' SETTINGS enable_analyzer = 1, allow_experimental_parallel_reading_from_replicas = 0; diff --git a/tests/queries/0_stateless/03164_materialize_skip_index.sql b/tests/queries/0_stateless/03164_materialize_skip_index.sql index 4e59ef6b6cd..0443872596d 100644 --- a/tests/queries/0_stateless/03164_materialize_skip_index.sql +++ b/tests/queries/0_stateless/03164_materialize_skip_index.sql @@ -9,7 +9,7 @@ CREATE TABLE t_skip_index_insert ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 4; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SET materialize_skip_indexes_on_insert = 0; SYSTEM STOP MERGES t_skip_index_insert; diff --git a/tests/queries/0_stateless/03165_order_by_duplicate.reference b/tests/queries/0_stateless/03165_order_by_duplicate.reference index 5d5e7a33f4a..93d312d5d02 100644 --- a/tests/queries/0_stateless/03165_order_by_duplicate.reference +++ b/tests/queries/0_stateless/03165_order_by_duplicate.reference @@ -36,4 +36,4 @@ QUERY id: 0 LIMIT BY LIST id: 16, nodes: 1 COLUMN id: 2, column_name: id, result_type: UInt64, source_id: 3 - SETTINGS allow_experimental_analyzer=1 + SETTINGS enable_analyzer=1 diff --git a/tests/queries/0_stateless/03165_order_by_duplicate.sql b/tests/queries/0_stateless/03165_order_by_duplicate.sql index 0054cbc36a6..b8bcc10e119 100644 --- a/tests/queries/0_stateless/03165_order_by_duplicate.sql +++ b/tests/queries/0_stateless/03165_order_by_duplicate.sql @@ -13,4 +13,4 @@ WHERE id IN ( ) ORDER BY id ASC LIMIT 1 BY id -SETTINGS allow_experimental_analyzer = 1; +SETTINGS enable_analyzer = 1; diff --git a/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.sql b/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.sql index e32d23920dd..e27e8645466 100644 --- a/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.sql +++ b/tests/queries/0_stateless/03166_mv_prewhere_duplicating_name_bug.sql @@ -3,5 +3,5 @@ create table dst (s String, lc LowCardinality(String)) engine MergeTree order by create materialized view mv to dst (s String, lc String) as select 'a' as s, toLowCardinality('b') as lc from src; insert into src values (1); -select s, lc from mv where not ignore(lc) settings allow_experimental_analyzer=0; -select s, lc from mv where not ignore(lc) settings allow_experimental_analyzer=1; +select s, lc from mv where not ignore(lc) settings enable_analyzer=0; +select s, lc from mv where not ignore(lc) settings enable_analyzer=1; diff --git a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql index d3e3b38a3cb..96221f27e73 100644 --- a/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql +++ b/tests/queries/0_stateless/03166_skip_indexes_vertical_merge_1.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS t_ind_merge_1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; CREATE TABLE t_ind_merge_1 (a UInt64, b UInt64, c UInt64, d UInt64, INDEX idx_b b TYPE minmax) ENGINE = MergeTree diff --git a/tests/queries/0_stateless/03167_parametrized_view_with_cte.sql b/tests/queries/0_stateless/03167_parametrized_view_with_cte.sql index 1ac5540047a..ae6ab586415 100644 --- a/tests/queries/0_stateless/03167_parametrized_view_with_cte.sql +++ b/tests/queries/0_stateless/03167_parametrized_view_with_cte.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; CREATE OR REPLACE VIEW param_test AS SELECT {test_str:String} as s_result; WITH 'OK' AS s SELECT * FROM param_test(test_str=s); WITH (SELECT 123) AS s SELECT * FROM param_test(test_str=s); diff --git a/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.sql b/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.sql index 50d99b851a6..5ab32415f1c 100644 --- a/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.sql +++ b/tests/queries/0_stateless/03169_optimize_injective_functions_inside_uniq_crash.sql @@ -7,7 +7,7 @@ FROM FROM numbers(4096 * 100) GROUP BY k ) -SETTINGS allow_experimental_analyzer = 1, optimize_injective_functions_inside_uniq=0; +SETTINGS enable_analyzer = 1, optimize_injective_functions_inside_uniq=0; SELECT sum(u) FROM @@ -18,4 +18,4 @@ FROM FROM numbers(4096 * 100) GROUP BY k ) -SETTINGS allow_experimental_analyzer = 1, optimize_injective_functions_inside_uniq=1; +SETTINGS enable_analyzer = 1, optimize_injective_functions_inside_uniq=1; diff --git a/tests/queries/0_stateless/03170_part_offset_as_table_column.sql b/tests/queries/0_stateless/03170_part_offset_as_table_column.sql index 36cbc156744..7711457f23f 100644 --- a/tests/queries/0_stateless/03170_part_offset_as_table_column.sql +++ b/tests/queries/0_stateless/03170_part_offset_as_table_column.sql @@ -9,12 +9,12 @@ ORDER BY key; INSERT INTO test_table (key) SELECT number FROM numbers(10); -set allow_experimental_analyzer=0; +set enable_analyzer=0; SELECT * FROM test_table; -set allow_experimental_analyzer=1; +set enable_analyzer=1; SELECT * FROM test_table; diff --git a/tests/queries/0_stateless/03171_condition_pushdown.sql b/tests/queries/0_stateless/03171_condition_pushdown.sql index 9cfe41ce921..fcf5db886e9 100644 --- a/tests/queries/0_stateless/03171_condition_pushdown.sql +++ b/tests/queries/0_stateless/03171_condition_pushdown.sql @@ -1,5 +1,5 @@ -- This query succeeds only if it is correctly optimized. -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT * FROM (SELECT * FROM numbers(1e19)) AS t1, (SELECT * FROM numbers(1e19)) AS t2 WHERE t1.number IN (123, 456) AND t2.number = t1.number ORDER BY ALL; -- Still TODO: diff --git a/tests/queries/0_stateless/03171_function_to_subcolumns_fuzzer.sql b/tests/queries/0_stateless/03171_function_to_subcolumns_fuzzer.sql index f10019a78dd..53476c5bdd1 100644 --- a/tests/queries/0_stateless/03171_function_to_subcolumns_fuzzer.sql +++ b/tests/queries/0_stateless/03171_function_to_subcolumns_fuzzer.sql @@ -1,5 +1,5 @@ SET optimize_functions_to_subcolumns = 1; -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS t_func_to_subcolumns_map_2; diff --git a/tests/queries/0_stateless/03173_forbid_qualify.sql b/tests/queries/0_stateless/03173_forbid_qualify.sql index d8cb2bad2ea..0a41385c52f 100644 --- a/tests/queries/0_stateless/03173_forbid_qualify.sql +++ b/tests/queries/0_stateless/03173_forbid_qualify.sql @@ -4,8 +4,8 @@ create table test_qualify (number Int64) ENGINE = MergeTree ORDER BY (number); insert into test_qualify SELECT * FROM numbers(100); select count() from test_qualify; -- 100 -select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS allow_experimental_analyzer = 1; -- 49 -select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS allow_experimental_analyzer = 0; -- { serverError NOT_IMPLEMENTED } +select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS enable_analyzer = 1; -- 49 +select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS enable_analyzer = 0; -- { serverError NOT_IMPLEMENTED } delete from test_qualify where number in (select number from test_qualify qualify row_number() over (order by number) = 50); -- { serverError UNFINISHED } select count() from test_qualify; -- 100 diff --git a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh index 20a29e2734e..7c567c0f58f 100755 --- a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh +++ b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh @@ -17,7 +17,7 @@ $CLICKHOUSE_CLIENT -nq " " $CLICKHOUSE_CLIENT -nq " -SET allow_experimental_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 10, allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1; +SET enable_analyzer = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 10, allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, max_threads = 1; SELECT id, diff --git a/tests/queries/0_stateless/03174_merge_join_bug.sql b/tests/queries/0_stateless/03174_merge_join_bug.sql index ab4cb6cd4a9..ab3c384765d 100644 --- a/tests/queries/0_stateless/03174_merge_join_bug.sql +++ b/tests/queries/0_stateless/03174_merge_join_bug.sql @@ -1,6 +1,6 @@ -- Tags: no-random-settings -SET allow_experimental_analyzer=1, join_algorithm = 'full_sorting_merge'; +SET enable_analyzer=1, join_algorithm = 'full_sorting_merge'; CREATE TABLE xxxx_yyy (key UInt32, key_b ALIAS key) ENGINE=MergeTree() ORDER BY key SETTINGS ratio_of_defaults_for_sparse_serialization=0.0; INSERT INTO xxxx_yyy SELECT number FROM numbers(10); diff --git a/tests/queries/0_stateless/03199_join_with_materialized_column.sql b/tests/queries/0_stateless/03199_join_with_materialized_column.sql index 8c53c5b3e66..fef171cb9d1 100644 --- a/tests/queries/0_stateless/03199_join_with_materialized_column.sql +++ b/tests/queries/0_stateless/03199_join_with_materialized_column.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS table_with_materialized; CREATE TABLE table_with_materialized (col String MATERIALIZED 'A') ENGINE = Memory; diff --git a/tests/queries/0_stateless/03199_queries_with_new_analyzer.sql b/tests/queries/0_stateless/03199_queries_with_new_analyzer.sql index c32d7524492..d400a025f76 100644 --- a/tests/queries/0_stateless/03199_queries_with_new_analyzer.sql +++ b/tests/queries/0_stateless/03199_queries_with_new_analyzer.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer=1; +SET enable_analyzer=1; SELECT *, ngramMinHash(*) AS minhash, mortonEncode(untuple(ngramMinHash(*))) AS z FROM (SELECT toString(number) FROM numbers(10)) @@ -38,4 +38,3 @@ ORDER BY tuple(); INSERT INTO seq VALUES (0), (6), (7); WITH (Select min(number), max(number) from seq) as range Select * from numbers(range.1, range.2); - diff --git a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql index a01a595dbb5..25f3bb0f4c8 100644 --- a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql +++ b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql @@ -2,6 +2,5 @@ set allow_experimental_dynamic_type=1; create table test (d Dynamic) engine=Memory; insert into table test select * from numbers(5); alter table test modify column d Dynamic(max_types=1); -select d.UInt64 from test settings allow_experimental_analyzer=1; -select d.UInt64 from test settings allow_experimental_analyzer=0; - +select d.UInt64 from test settings enable_analyzer=1; +select d.UInt64 from test settings enable_analyzer=0; diff --git a/tests/queries/0_stateless/03200_subcolumns_join_use_nulls.sql b/tests/queries/0_stateless/03200_subcolumns_join_use_nulls.sql index 2dd0a37657d..6777a753490 100644 --- a/tests/queries/0_stateless/03200_subcolumns_join_use_nulls.sql +++ b/tests/queries/0_stateless/03200_subcolumns_join_use_nulls.sql @@ -8,6 +8,6 @@ SELECT count() FROM (SELECT number FROM numbers(10)) as tbl LEFT JOIN t_subcolumns_join ON number = id WHERE id is null -SETTINGS allow_experimental_analyzer = 1, optimize_functions_to_subcolumns = 1, join_use_nulls = 1; +SETTINGS enable_analyzer = 1, optimize_functions_to_subcolumns = 1, join_use_nulls = 1; DROP TABLE t_subcolumns_join; diff --git a/tests/queries/0_stateless/03201_sumIf_to_countIf_return_type.sql b/tests/queries/0_stateless/03201_sumIf_to_countIf_return_type.sql index 24369fd6497..b791f328da4 100644 --- a/tests/queries/0_stateless/03201_sumIf_to_countIf_return_type.sql +++ b/tests/queries/0_stateless/03201_sumIf_to_countIf_return_type.sql @@ -1,2 +1,2 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; EXPLAIN QUERY TREE SELECT tuple(sumIf(toInt64(1), 1)) FROM numbers(100) settings optimize_rewrite_sum_if_to_count_if=1; diff --git a/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql b/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql index 03e9e0feb40..1d10a2b9212 100644 --- a/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql +++ b/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql @@ -1,4 +1,4 @@ -SET allow_experimental_analyzer = 1; +SET enable_analyzer = 1; SELECT count() AS c FROM test.hits WHERE CounterID = 1704509 WITH TOTALS SETTINGS totals_mode = 'before_having', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; SELECT count() AS c FROM test.hits WHERE CounterID = 1704509 WITH TOTALS SETTINGS totals_mode = 'after_having_inclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; @@ -14,4 +14,3 @@ SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 170450 SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_inclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_exclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_auto', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; - diff --git a/tests/queries/1_stateful/00172_early_constant_folding.sql b/tests/queries/1_stateful/00172_early_constant_folding.sql index b4b58ba3cb0..343e87a26d1 100644 --- a/tests/queries/1_stateful/00172_early_constant_folding.sql +++ b/tests/queries/1_stateful/00172_early_constant_folding.sql @@ -2,5 +2,5 @@ set max_threads=10; set optimize_use_implicit_projections=1; -EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1 SETTINGS allow_experimental_analyzer = 0; -EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1 SETTINGS allow_experimental_analyzer = 1; +EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1 SETTINGS enable_analyzer = 0; +EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1 SETTINGS enable_analyzer = 1; diff --git a/tests/queries/1_stateful/00173_group_by_use_nulls.reference b/tests/queries/1_stateful/00173_group_by_use_nulls.reference index e82b996ad3c..4f8d10a9221 100644 --- a/tests/queries/1_stateful/00173_group_by_use_nulls.reference +++ b/tests/queries/1_stateful/00173_group_by_use_nulls.reference @@ -9,7 +9,7 @@ 33010362 1336 800784 1336 -- { echoOn } -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; SELECT CounterID AS k, quantileBFloat16(0.5)(ResolutionWidth) diff --git a/tests/queries/1_stateful/00173_group_by_use_nulls.sql b/tests/queries/1_stateful/00173_group_by_use_nulls.sql index 8531e9efaf8..ed537bb289a 100644 --- a/tests/queries/1_stateful/00173_group_by_use_nulls.sql +++ b/tests/queries/1_stateful/00173_group_by_use_nulls.sql @@ -21,7 +21,7 @@ LIMIT 10 SETTINGS group_by_use_nulls = 1 FORMAT Null; -- { echoOn } -set allow_experimental_analyzer = 1; +set enable_analyzer = 1; SELECT CounterID AS k, From cc5dd9830e5063d5a6c71cd9c6e406961e78861d Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 12 Jul 2024 14:56:47 +0200 Subject: [PATCH 1631/2361] Modify the docs --- docs/en/development/architecture.md | 2 +- docs/ru/development/architecture.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index c5d13ab63a5..23531f742c5 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -118,7 +118,7 @@ And the result of interpreting the `INSERT SELECT` query is a "completed" `Query `InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are performed. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted into separate classes to allow for modular transformations of the query. -To address current problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` is being developed. It is a new version of `InterpreterSelectQuery` that does not use `ExpressionAnalyzer` and introduces an additional abstraction level between `AST` and `QueryPipeline` called `QueryTree`. It is not production-ready yet, but it can be tested with the `enable_analyzer` flag. +To address problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` has been developed. IThis is a new version of the `Interpreter Select Query`, which does not use the `Expression Analyzer` and introduces an additional layer of abstraction between `AST` and `QueryPipeline`, called `QueryTree'. It is fully ready for use in production, but just in case it can be turned off by setting the value of the `enable_analyzer` setting to `false`. ## Functions {#functions} diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index 0701c8f4a51..0833120c34d 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -115,7 +115,7 @@ ClickHouse — Ð¿Ð¾Ð»Ð½Ð¾Ñ†ÐµÐ½Ð½Ð°Ñ ÑÑ‚Ð¾Ð»Ð±Ñ†Ð¾Ð²Ð°Ñ Ð¡Ð£Ð‘Ð”. Данны `InterpreterSelectQuery` иÑпользует `ExpressionAnalyzer` и `ExpressionActions` механизмы Ð´Ð»Ñ Ð°Ð½Ð°Ð»Ð¸Ð·Ð° запроÑов и преобразований. Именно здеÑÑŒ выполнÑетÑÑ Ð±Ð¾Ð»ÑŒÑˆÐ¸Ð½Ñтво оптимизаций запроÑов на оÑнове правил. `ExpressionAnalyzer` напиÑан довольно грÑзно и должен быть перепиÑан: различные Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñов и оптимизации должны быть извлечены в отдельные клаÑÑÑ‹, чтобы позволить модульные Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¸Ð»Ð¸ запроÑÑ‹. -Ð”Ð»Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ñ‚ÐµÐºÑƒÑ‰Ð¸Ñ… проблем, ÑущеÑтвующих в интерпретаторах, разрабатываетÑÑ Ð½Ð¾Ð²Ñ‹Ð¹ `InterpreterSelectQueryAnalyzer`. Это Ð½Ð¾Ð²Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ `InterpreterSelectQuery`, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð½Ðµ иÑпользует `ExpressionAnalyzer` и вводит дополнительный уровень абÑтракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он еще не готов к иÑпользованию в продакшене, но его можно протеÑтировать Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ флага `enable_analyzer`. +Ð”Ð»Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¾Ð±Ð»ÐµÐ¼, ÑущеÑтвующих в интерпретаторах, был разработан новый `InterpreterSelectQueryAnalyzer`. Это Ð½Ð¾Ð²Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ `InterpreterSelectQuery`, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð½Ðµ иÑпользует `ExpressionAnalyzer` и вводит дополнительный уровень абÑтракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он полноÑтью готов к иÑпользованию в продакшене, но на вÑÑкий Ñлучай его можно выключить, уÑтановив значение наÑтройки `enable_analyzer` в `false`. ## Функции {#functions} From 62d5e83f2540d47487846da19d5297794126705e Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 12 Jul 2024 15:01:48 +0200 Subject: [PATCH 1632/2361] Double space... --- docs/ru/development/architecture.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index 0833120c34d..d2afbf233b8 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -115,7 +115,7 @@ ClickHouse — Ð¿Ð¾Ð»Ð½Ð¾Ñ†ÐµÐ½Ð½Ð°Ñ ÑÑ‚Ð¾Ð»Ð±Ñ†Ð¾Ð²Ð°Ñ Ð¡Ð£Ð‘Ð”. Данны `InterpreterSelectQuery` иÑпользует `ExpressionAnalyzer` и `ExpressionActions` механизмы Ð´Ð»Ñ Ð°Ð½Ð°Ð»Ð¸Ð·Ð° запроÑов и преобразований. Именно здеÑÑŒ выполнÑетÑÑ Ð±Ð¾Ð»ÑŒÑˆÐ¸Ð½Ñтво оптимизаций запроÑов на оÑнове правил. `ExpressionAnalyzer` напиÑан довольно грÑзно и должен быть перепиÑан: различные Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñов и оптимизации должны быть извлечены в отдельные клаÑÑÑ‹, чтобы позволить модульные Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¸Ð»Ð¸ запроÑÑ‹. -Ð”Ð»Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¾Ð±Ð»ÐµÐ¼, ÑущеÑтвующих в интерпретаторах, был разработан новый `InterpreterSelectQueryAnalyzer`. Это Ð½Ð¾Ð²Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ `InterpreterSelectQuery`, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð½Ðµ иÑпользует `ExpressionAnalyzer` и вводит дополнительный уровень абÑтракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он полноÑтью готов к иÑпользованию в продакшене, но на вÑÑкий Ñлучай его можно выключить, уÑтановив значение наÑтройки `enable_analyzer` в `false`. +Ð”Ð»Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¾Ð±Ð»ÐµÐ¼, ÑущеÑтвующих в интерпретаторах, был разработан новый `InterpreterSelectQueryAnalyzer`. Это Ð½Ð¾Ð²Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ `InterpreterSelectQuery`, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð½Ðµ иÑпользует `ExpressionAnalyzer` и вводит дополнительный уровень абÑтракции между `AST` и `QueryPipeline`, называемый `QueryTree`. Он полноÑтью готов к иÑпользованию в продакшене, но на вÑÑкий Ñлучай его можно выключить, уÑтановив значение наÑтройки `enable_analyzer` в `false`. ## Функции {#functions} From aa66203f1732f22f4a0d5ed4c4816e148ffd7861 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 13 Jul 2024 18:46:24 +0200 Subject: [PATCH 1633/2361] Update architecture.md --- docs/en/development/architecture.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index 23531f742c5..a1a5901f859 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -118,7 +118,7 @@ And the result of interpreting the `INSERT SELECT` query is a "completed" `Query `InterpreterSelectQuery` uses `ExpressionAnalyzer` and `ExpressionActions` machinery for query analysis and transformations. This is where most rule-based query optimizations are performed. `ExpressionAnalyzer` is quite messy and should be rewritten: various query transformations and optimizations should be extracted into separate classes to allow for modular transformations of the query. -To address problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` has been developed. IThis is a new version of the `Interpreter Select Query`, which does not use the `Expression Analyzer` and introduces an additional layer of abstraction between `AST` and `QueryPipeline`, called `QueryTree'. It is fully ready for use in production, but just in case it can be turned off by setting the value of the `enable_analyzer` setting to `false`. +To address problems that exist in interpreters, a new `InterpreterSelectQueryAnalyzer` has been developed. This is a new version of the `InterpreterSelectQuery`, which does not use the `ExpressionAnalyzer` and introduces an additional layer of abstraction between `AST` and `QueryPipeline`, called `QueryTree'. It is fully ready for use in production, but just in case it can be turned off by setting the value of the `enable_analyzer` setting to `false`. ## Functions {#functions} From ee2589df597cd853259a18212ae979d7e6d65150 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 15 Jul 2024 23:06:03 +0200 Subject: [PATCH 1634/2361] Add a settings to the history changelog --- src/Core/SettingsChangesHistory.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 08fb6dc3301..dc81932f923 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -524,6 +524,10 @@ static std::initializer_list Date: Tue, 16 Jul 2024 13:37:34 +0000 Subject: [PATCH 1635/2361] Fix tests --- .../0_stateless/02995_baseline_24_7_2.tsv | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/queries/0_stateless/02995_baseline_24_7_2.tsv b/tests/queries/0_stateless/02995_baseline_24_7_2.tsv index 10b392f3e04..d3a07ecb644 100644 --- a/tests/queries/0_stateless/02995_baseline_24_7_2.tsv +++ b/tests/queries/0_stateless/02995_baseline_24_7_2.tsv @@ -18,7 +18,11 @@ allow_distributed_ddl 1 allow_drop_detached 0 allow_execute_multiif_columnar 1 allow_experimental_alter_materialized_view_structure 1 +<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv allow_experimental_analyzer 1 +======= +allow_experimental_analyzer 0 +>>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv allow_experimental_annoy_index 0 allow_experimental_bigint_types 1 allow_experimental_codecs 0 @@ -159,7 +163,10 @@ cloud_mode 0 cloud_mode_engine 1 cluster_for_parallel_replicas collect_hash_table_stats_during_aggregation 1 +<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv collect_hash_table_stats_during_joins 1 +======= +>>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv column_names_for_schema_inference compatibility compatibility_ignore_auto_increment_in_create_table 0 @@ -329,9 +336,13 @@ format_regexp_escaping_rule Raw format_regexp_skip_unmatched 0 format_schema format_template_resultset +<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv format_template_resultset_format format_template_row format_template_row_format +======= +format_template_row +>>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv format_template_rows_between_delimiter \n format_tsv_null_representation \\N formatdatetime_f_prints_single_zero 0 @@ -388,8 +399,11 @@ iceberg_engine_ignore_schema_evolution 0 idle_connection_timeout 3600 ignore_cold_parts_seconds 0 ignore_data_skipping_indices +<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv ignore_drop_queries_probability 0 ignore_materialized_views_with_dropped_target_table 0 +======= +>>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv ignore_on_cluster_for_replicated_access_entities_queries 0 ignore_on_cluster_for_replicated_named_collections_queries 0 ignore_on_cluster_for_replicated_udf_queries 0 @@ -712,8 +726,13 @@ mutations_execute_subqueries_on_initiator 0 mutations_max_literal_size_to_replace 16384 mutations_sync 0 mysql_datatypes_support_level +<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv mysql_map_fixed_string_to_text_in_show_columns 1 mysql_map_string_to_text_in_show_columns 1 +======= +mysql_map_fixed_string_to_text_in_show_columns 0 +mysql_map_string_to_text_in_show_columns 0 +>>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv mysql_max_rows_to_insert 65536 network_compression_method LZ4 network_zstd_compression_level 1 @@ -780,9 +799,13 @@ os_thread_priority 0 output_format_arrow_compression_method lz4_frame output_format_arrow_fixed_string_as_fixed_byte_array 1 output_format_arrow_low_cardinality_as_dictionary 0 +<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv output_format_arrow_string_as_string 1 output_format_arrow_use_64_bit_indexes_for_dictionary 0 output_format_arrow_use_signed_indexes_for_dictionary 1 +======= +output_format_arrow_string_as_string 0 +>>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv output_format_avro_codec output_format_avro_rows_in_file 1 output_format_avro_string_column_pattern @@ -1045,7 +1068,10 @@ totals_mode after_having_exclusive trace_profile_events 0 transfer_overflow_mode throw transform_null_in 0 +<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv traverse_shadow_remote_data_paths 0 +======= +>>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv union_default_mode unknown_packet_in_send_data 0 update_insert_deduplication_token_in_dependent_materialized_views 0 From a8ca5ad50b8998d9e13d81b66bc89434c1364704 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 29 Jul 2024 17:02:11 +0000 Subject: [PATCH 1636/2361] Fixed build and made setting an alias --- src/Client/HedgedConnections.cpp | 4 ++-- src/Client/MultiplexedConnections.cpp | 4 ++-- src/Core/Settings.h | 4 ++-- src/Core/SettingsChangesHistory.cpp | 1 + src/Interpreters/ActionsVisitor.cpp | 2 +- .../ClusterProxy/SelectStreamFactory.cpp | 4 ++-- .../ClusterProxy/executeQuery.cpp | 4 ++-- src/Interpreters/InterpreterCreateQuery.cpp | 4 ++-- src/Interpreters/InterpreterDescribeQuery.cpp | 2 +- src/Interpreters/InterpreterExplainQuery.cpp | 10 +++++----- src/Interpreters/InterpreterFactory.cpp | 6 +++--- src/Interpreters/InterpreterInsertQuery.cpp | 2 +- src/Interpreters/MutationsInterpreter.cpp | 6 +++--- src/Interpreters/executeQuery.cpp | 6 +++--- .../getHeaderForProcessingStage.cpp | 2 +- .../QueryPlan/DistributedCreateLocalPlan.cpp | 2 +- .../Transforms/buildPushingToViewsChain.cpp | 4 ++-- src/Server/TCPHandler.cpp | 6 +++--- src/Storages/AlterCommands.cpp | 2 +- src/Storages/IStorageCluster.cpp | 2 +- src/Storages/LiveView/StorageLiveView.cpp | 10 +++++----- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- src/Storages/MergeTree/RPNBuilder.cpp | 18 ++++++++--------- src/Storages/StorageBuffer.cpp | 2 +- src/Storages/StorageDistributed.cpp | 6 +++--- src/Storages/StorageExecutable.cpp | 2 +- src/Storages/StorageMerge.cpp | 20 +++++++++---------- src/Storages/StorageMergeTree.cpp | 6 +++--- src/Storages/StorageReplicatedMergeTree.cpp | 6 +++--- src/Storages/StorageView.cpp | 2 +- src/Storages/TTLDescription.cpp | 2 +- src/Storages/WindowView/StorageWindowView.cpp | 6 +++--- src/TableFunctions/TableFunctionView.cpp | 2 +- .../TableFunctionViewIfPermitted.cpp | 2 +- 34 files changed, 82 insertions(+), 81 deletions(-) diff --git a/src/Client/HedgedConnections.cpp b/src/Client/HedgedConnections.cpp index 1c7f222aa78..dd8348ea04f 100644 --- a/src/Client/HedgedConnections.cpp +++ b/src/Client/HedgedConnections.cpp @@ -196,11 +196,11 @@ void HedgedConnections::sendQuery( modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset; } - /// FIXME: Remove once we will make `enable_analyzer` obsolete setting. + /// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting. /// Make the analyzer being set, so it will be effectively applied on the remote server. /// In other words, the initiator always controls whether the analyzer enabled or not for /// all servers involved in the distributed query processing. - modified_settings.set("enable_analyzer", static_cast(modified_settings.enable_analyzer)); + modified_settings.set("allow_experimental_analyzer", static_cast(modified_settings.allow_experimental_analyzer)); replica.connection->sendQuery( timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {}); diff --git a/src/Client/MultiplexedConnections.cpp b/src/Client/MultiplexedConnections.cpp index 7ca22ae4c81..244eccf1ed9 100644 --- a/src/Client/MultiplexedConnections.cpp +++ b/src/Client/MultiplexedConnections.cpp @@ -150,11 +150,11 @@ void MultiplexedConnections::sendQuery( client_info.number_of_current_replica = replica_info->number_of_current_replica; } - /// FIXME: Remove once we will make `enable_analyzer` obsolete setting. + /// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting. /// Make the analyzer being set, so it will be effectively applied on the remote server. /// In other words, the initiator always controls whether the analyzer enabled or not for /// all servers involved in the distributed query processing. - modified_settings.set("enable_analyzer", static_cast(modified_settings.enable_analyzer)); + modified_settings.set("allow_experimental_analyzer", static_cast(modified_settings.allow_experimental_analyzer)); const bool enable_offset_parallel_processing = context->canUseOffsetParallelReplicas(); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index d6c0dc223b2..ac24c087946 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -638,7 +638,7 @@ class IColumn; M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \ M(Bool, enable_global_with_statement, true, "Propagate WITH statements to UNION queries and all subqueries", 0) \ M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \ - M(Bool, optimize_syntax_fuse_functions, false, "Allow apply fuse aggregating function. Available only with `enable_analyzer`", 0) \ + M(Bool, optimize_syntax_fuse_functions, false, "Allow apply fuse aggregating function. Available only with `allow_experimental_analyzer`", 0) \ M(Bool, flatten_nested, true, "If true, columns of type Nested will be flatten to separate array columns instead of one array of tuples", 0) \ M(Bool, asterisk_include_materialized_columns, false, "Include MATERIALIZED columns for wildcard query", 0) \ M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \ @@ -943,7 +943,7 @@ class IColumn; \ M(Bool, allow_experimental_join_condition, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y.", 0) \ \ - M(Bool, enable_analyzer, true, "Allow new query analyzer.", IMPORTANT) ALIAS(allow_experimental_analyzer) \ + M(Bool, allow_experimental_analyzer, true, "Allow new query analyzer.", IMPORTANT) ALIAS(enable_analyzer) \ M(Bool, analyzer_compatibility_join_using_top_level_identifier, false, "Force to resolve identifier in JOIN USING from projection (for example, in `SELECT a + 1 AS b FROM t1 JOIN t2 USING (b)` join will be performed by `t1.a + 1 = t2.b`, rather then `t1.b = t2.b`).", 0) \ \ M(Bool, allow_experimental_live_view, false, "Enable LIVE VIEW. Not mature enough.", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index dc81932f923..71f7c940e2c 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -528,6 +528,7 @@ static std::initializer_listgetSettingsRef().enable_analyzer && !identifier) + if (data.getContext()->getSettingsRef().allow_experimental_analyzer && !identifier) { /// Here we can be only from mutation interpreter. Normal selects with analyzed use other interpreter. /// This is a hacky way to allow reusing cache for prepared sets. diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 0948f24eca0..e35d31d2350 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -68,7 +68,7 @@ ASTPtr rewriteSelectQuery( // are written into the query context and will be sent by the query pipeline. select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, {}); - if (!context->getSettingsRef().enable_analyzer) + if (!context->getSettingsRef().allow_experimental_analyzer) { if (table_function_ptr) select_query.addTableFunction(table_function_ptr); @@ -165,7 +165,7 @@ void SelectStreamFactory::createForShardImpl( auto emplace_remote_stream = [&](bool lazy = false, time_t local_delay = 0) { Block shard_header; - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) shard_header = InterpreterSelectQueryAnalyzer::getSampleBlock(query_tree, context, SelectQueryOptions(processed_stage).analyze()); else shard_header = header; diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 6c8ab11bfc9..d04a73e384e 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -300,7 +300,7 @@ void executeQuery( const size_t shards = cluster->getShardCount(); - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { for (size_t i = 0, s = cluster->getShardsInfo().size(); i < s; ++i) { @@ -581,7 +581,7 @@ void executeQueryWithParallelReplicasCustomKey( /// Return directly (with correct header) if no shard to query. if (query_info.getCluster()->getShardsInfo().empty()) { - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) return; Pipe pipe(std::make_shared(header)); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index ea631ef01d5..971f90bd3cd 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -834,7 +834,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti Block as_select_sample; - if (getContext()->getSettingsRef().enable_analyzer) + if (getContext()->getSettingsRef().allow_experimental_analyzer) { as_select_sample = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); } @@ -1327,7 +1327,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) { Block input_block; - if (getContext()->getSettingsRef().enable_analyzer) + if (getContext()->getSettingsRef().allow_experimental_analyzer) { input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); } diff --git a/src/Interpreters/InterpreterDescribeQuery.cpp b/src/Interpreters/InterpreterDescribeQuery.cpp index 4a061f02c2b..39fc85a5e23 100644 --- a/src/Interpreters/InterpreterDescribeQuery.cpp +++ b/src/Interpreters/InterpreterDescribeQuery.cpp @@ -129,7 +129,7 @@ void InterpreterDescribeQuery::fillColumnsFromSubquery(const ASTTableExpression auto select_query = table_expression.subquery->children.at(0); auto current_context = getContext(); - if (settings.enable_analyzer) + if (settings.allow_experimental_analyzer) { SelectQueryOptions select_query_options; sample_block = InterpreterSelectQueryAnalyzer(select_query, current_context, select_query_options).getSampleBlock(); diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index 2fbfbf3a809..bedd9cb4a80 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -394,9 +394,9 @@ QueryPipeline InterpreterExplainQuery::executeImpl() } case ASTExplainQuery::QueryTree: { - if (!getContext()->getSettingsRef().enable_analyzer) + if (!getContext()->getSettingsRef().allow_experimental_analyzer) throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "EXPLAIN QUERY TREE is only supported with a new analyzer. Set enable_analyzer = 1."); + "EXPLAIN QUERY TREE is only supported with a new analyzer. Set allow_experimental_analyzer = 1."); if (ast.getExplainedQuery()->as() == nullptr) throw Exception(ErrorCodes::INCORRECT_QUERY, "Only SELECT is supported for EXPLAIN QUERY TREE query"); @@ -453,7 +453,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl() ContextPtr context; - if (getContext()->getSettingsRef().enable_analyzer) + if (getContext()->getSettingsRef().allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), options); context = interpreter.getContext(); @@ -499,7 +499,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl() QueryPlan plan; ContextPtr context; - if (getContext()->getSettingsRef().enable_analyzer) + if (getContext()->getSettingsRef().allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), options); context = interpreter.getContext(); @@ -558,7 +558,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl() QueryPlan plan; ContextPtr context = getContext(); - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter(ast.getExplainedQuery(), getContext(), SelectQueryOptions()); context = interpreter.getContext(); diff --git a/src/Interpreters/InterpreterFactory.cpp b/src/Interpreters/InterpreterFactory.cpp index a909c4e602d..12b3b510098 100644 --- a/src/Interpreters/InterpreterFactory.cpp +++ b/src/Interpreters/InterpreterFactory.cpp @@ -118,7 +118,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte if (query->as()) { - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) interpreter_name = "InterpreterSelectQueryAnalyzer"; /// This is internal part of ASTSelectWithUnionQuery. /// Even if there is SELECT without union, it is represented by ASTSelectWithUnionQuery with single ASTSelectQuery as a child. @@ -129,7 +129,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte { ProfileEvents::increment(ProfileEvents::SelectQuery); - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) interpreter_name = "InterpreterSelectQueryAnalyzer"; else interpreter_name = "InterpreterSelectWithUnionQuery"; @@ -222,7 +222,7 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte { const auto kind = query->as()->getKind(); if (kind == ASTExplainQuery::ParsedAST || kind == ASTExplainQuery::AnalyzedSyntax) - context->setSetting("enable_analyzer", false); + context->setSetting("allow_experimental_analyzer", false); interpreter_name = "InterpreterExplainQuery"; } diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 0213e2a2c42..c97593a1781 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -95,7 +95,7 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query) Block header_block; auto select_query_options = SelectQueryOptions(QueryProcessingStage::Complete, 1); - if (current_context->getSettingsRef().enable_analyzer) + if (current_context->getSettingsRef().allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter_select(query.select, current_context, select_query_options); header_block = interpreter_select.getSampleBlock(); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index c049dbc9cc1..57ad5caa4c7 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -189,7 +189,7 @@ bool isStorageTouchedByMutations( std::optional interpreter_select_query; BlockIO io; - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { auto select_query_tree = prepareQueryAffectedQueryTree(commands, storage.shared_from_this(), context); InterpreterSelectQueryAnalyzer interpreter(select_query_tree, context, SelectQueryOptions().ignoreLimits()); @@ -415,9 +415,9 @@ MutationsInterpreter::MutationsInterpreter( , logger(getLogger("MutationsInterpreter(" + source.getStorage()->getStorageID().getFullTableName() + ")")) { auto new_context = Context::createCopy(context_); - if (new_context->getSettingsRef().enable_analyzer) + if (new_context->getSettingsRef().allow_experimental_analyzer) { - new_context->setSetting("enable_analyzer", false); + new_context->setSetting("allow_experimental_analyzer", false); LOG_DEBUG(logger, "Will use old analyzer to prepare mutation"); } context = std::move(new_context); diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 7476915ab8a..ce58f7f922c 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -685,10 +685,10 @@ void validateAnalyzerSettings(ASTPtr ast, bool context_value) if (auto * set_query = node->as()) { - if (auto * value = set_query->changes.tryGet("enable_analyzer")) + if (auto * value = set_query->changes.tryGet("allow_experimental_analyzer")) { if (top_level != value->safeGet()) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'enable_analyzer' is changed in the subquery. Top level value: {}", top_level); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'allow_experimental_analyzer' is changed in the subquery. Top level value: {}", top_level); } } @@ -912,7 +912,7 @@ static std::tuple executeQueryImpl( /// Interpret SETTINGS clauses as early as possible (before invoking the corresponding interpreter), /// to allow settings to take effect. InterpreterSetQuery::applySettingsFromQuery(ast, context); - validateAnalyzerSettings(ast, context->getSettingsRef().enable_analyzer); + validateAnalyzerSettings(ast, context->getSettingsRef().allow_experimental_analyzer); if (auto * insert_query = ast->as()) insert_query->tail = istr; diff --git a/src/Interpreters/getHeaderForProcessingStage.cpp b/src/Interpreters/getHeaderForProcessingStage.cpp index c4a791e85e1..cf18cbbb54a 100644 --- a/src/Interpreters/getHeaderForProcessingStage.cpp +++ b/src/Interpreters/getHeaderForProcessingStage.cpp @@ -141,7 +141,7 @@ Block getHeaderForProcessingStage( Block result; - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { auto storage = std::make_shared(storage_snapshot->storage.getStorageID(), storage_snapshot->getAllColumnsDescription(), diff --git a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp index dc4b7fd733b..d8624a1c99b 100644 --- a/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp +++ b/src/Processors/QueryPlan/DistributedCreateLocalPlan.cpp @@ -65,7 +65,7 @@ std::unique_ptr createLocalPlan( .setShardInfo(static_cast(shard_num), static_cast(shard_count)) .ignoreASTOptimizations(); - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { /// For Analyzer, identifier in GROUP BY/ORDER BY/LIMIT BY lists has been resolved to /// ConstantNode in QueryTree if it is an alias of a constant, so we should not replace diff --git a/src/Processors/Transforms/buildPushingToViewsChain.cpp b/src/Processors/Transforms/buildPushingToViewsChain.cpp index a2d5ec5d1cb..98d66ed77c3 100644 --- a/src/Processors/Transforms/buildPushingToViewsChain.cpp +++ b/src/Processors/Transforms/buildPushingToViewsChain.cpp @@ -319,7 +319,7 @@ std::optional generateViewChain( Block header; /// Get list of columns we get from select query. - if (select_context->getSettingsRef().enable_analyzer) + if (select_context->getSettingsRef().allow_experimental_analyzer) header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context); else header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock(); @@ -613,7 +613,7 @@ static QueryPipeline process(Block block, ViewRuntimeData & view, const ViewsDat QueryPipelineBuilder pipeline; - if (local_context->getSettingsRef().enable_analyzer) + if (local_context->getSettingsRef().allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions().ignoreAccessCheck()); pipeline = interpreter.buildQueryPipeline(); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 4262716b406..c5dfe3e6e5f 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1904,14 +1904,14 @@ void TCPHandler::receiveQuery() /// Settings /// - /// FIXME: Remove when enable_analyzer will become obsolete. + /// FIXME: Remove when allow_experimental_analyzer will become obsolete. /// Analyzer became Beta in 24.3 and started to be enabled by default. /// We have to disable it for ourselves to make sure we don't have different settings on /// different servers. if (query_kind == ClientInfo::QueryKind::SECONDARY_QUERY && client_info.getVersionNumber() < VersionNumber(23, 3, 0) - && !passed_settings.enable_analyzer.changed) - passed_settings.set("enable_analyzer", false); + && !passed_settings.allow_experimental_analyzer.changed) + passed_settings.set("allow_experimental_analyzer", false); auto settings_changes = passed_settings.changes(); query_kind = query_context->getClientInfo().query_kind; diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 2843ff5a14e..7891042bb96 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -806,7 +806,7 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) metadata.select = SelectQueryDescription::getSelectQueryFromASTForMatView(select, metadata.refresh != nullptr, context); Block as_select_sample; - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { as_select_sample = InterpreterSelectQueryAnalyzer::getSampleBlock(select->clone(), context); } diff --git a/src/Storages/IStorageCluster.cpp b/src/Storages/IStorageCluster.cpp index b485ab9cbb5..63467603d16 100644 --- a/src/Storages/IStorageCluster.cpp +++ b/src/Storages/IStorageCluster.cpp @@ -125,7 +125,7 @@ void IStorageCluster::read( Block sample_block; ASTPtr query_to_send = query_info.query; - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(query_info.query, context, SelectQueryOptions(processed_stage)); } diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index c93da7ca512..71b1a0a73c9 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -378,7 +378,7 @@ void StorageLiveView::writeBlock(StorageLiveView & live_view, Block && block, Ch QueryPipelineBuilder builder; - if (local_context->getSettingsRef().enable_analyzer) + if (local_context->getSettingsRef().allow_experimental_analyzer) { auto select_description = buildSelectQueryTreeDescription(select_query_description.inner_query, local_context); if (select_description.dependent_table_node) @@ -475,7 +475,7 @@ Block StorageLiveView::getHeader() const if (!sample_block) { - if (live_view_context->getSettingsRef().enable_analyzer) + if (live_view_context->getSettingsRef().allow_experimental_analyzer) { sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(select_query_description.select_query, live_view_context, @@ -519,7 +519,7 @@ ASTPtr StorageLiveView::getInnerBlocksQuery() auto & select_with_union_query = select_query_description.select_query->as(); auto blocks_query = select_with_union_query.list_of_selects->children.at(0)->clone(); - if (!live_view_context->getSettingsRef().enable_analyzer) + if (!live_view_context->getSettingsRef().allow_experimental_analyzer) { /// Rewrite inner query with right aliases for JOIN. /// It cannot be done in constructor or startup() because InterpreterSelectQuery may access table, @@ -543,7 +543,7 @@ MergeableBlocksPtr StorageLiveView::collectMergeableBlocks(ContextPtr local_cont QueryPipelineBuilder builder; - if (local_context->getSettingsRef().enable_analyzer) + if (local_context->getSettingsRef().allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter(select_query_description.inner_query, local_context, @@ -599,7 +599,7 @@ QueryPipelineBuilder StorageLiveView::completeQuery(Pipes pipes) QueryPipelineBuilder builder; - if (block_context->getSettingsRef().enable_analyzer) + if (block_context->getSettingsRef().allow_experimental_analyzer) { auto select_description = buildSelectQueryTreeDescription(select_query_description.select_query, block_context); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 01ef0a409b0..ce27ad24e10 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -7097,7 +7097,7 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage( SelectQueryInfo &) const { /// with new analyzer, Planner make decision regarding parallel replicas usage, and so about processing stage on reading - if (!query_context->getSettingsRef().enable_analyzer) + if (!query_context->getSettingsRef().allow_experimental_analyzer) { const auto & settings = query_context->getSettingsRef(); if (query_context->canUseParallelReplicasCustomKey()) diff --git a/src/Storages/MergeTree/RPNBuilder.cpp b/src/Storages/MergeTree/RPNBuilder.cpp index fccb20c2b0a..6e963066f39 100644 --- a/src/Storages/MergeTree/RPNBuilder.cpp +++ b/src/Storages/MergeTree/RPNBuilder.cpp @@ -33,7 +33,7 @@ namespace ErrorCodes namespace { -void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & out, bool enable_analyzer, bool legacy = false) +void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & out, bool allow_experimental_analyzer, bool legacy = false) { switch (node.type) { @@ -45,18 +45,18 @@ void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & o /// If it was created from ASTLiteral, then result_name can be an alias. /// We need to convert value back to string here. const auto * column_const = typeid_cast(node.column.get()); - if (column_const && !enable_analyzer) + if (column_const && !allow_experimental_analyzer) writeString(applyVisitor(FieldVisitorToString(), column_const->getField()), out); else writeString(node.result_name, out); break; } case ActionsDAG::ActionType::ALIAS: - appendColumnNameWithoutAlias(*node.children.front(), out, enable_analyzer, legacy); + appendColumnNameWithoutAlias(*node.children.front(), out, allow_experimental_analyzer, legacy); break; case ActionsDAG::ActionType::ARRAY_JOIN: writeCString("arrayJoin(", out); - appendColumnNameWithoutAlias(*node.children.front(), out, enable_analyzer, legacy); + appendColumnNameWithoutAlias(*node.children.front(), out, allow_experimental_analyzer, legacy); writeChar(')', out); break; case ActionsDAG::ActionType::FUNCTION: @@ -75,17 +75,17 @@ void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & o writeCString(", ", out); first = false; - appendColumnNameWithoutAlias(*arg, out, enable_analyzer, legacy); + appendColumnNameWithoutAlias(*arg, out, allow_experimental_analyzer, legacy); } writeChar(')', out); } } } -String getColumnNameWithoutAlias(const ActionsDAG::Node & node, bool enable_analyzer, bool legacy = false) +String getColumnNameWithoutAlias(const ActionsDAG::Node & node, bool allow_experimental_analyzer, bool legacy = false) { WriteBufferFromOwnString out; - appendColumnNameWithoutAlias(node, out, enable_analyzer, legacy); + appendColumnNameWithoutAlias(node, out, allow_experimental_analyzer, legacy); return std::move(out.str()); } @@ -131,7 +131,7 @@ std::string RPNBuilderTreeNode::getColumnName() const if (ast_node) return ast_node->getColumnNameWithoutAlias(); else - return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().enable_analyzer); + return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().allow_experimental_analyzer); } std::string RPNBuilderTreeNode::getColumnNameWithModuloLegacy() const @@ -144,7 +144,7 @@ std::string RPNBuilderTreeNode::getColumnNameWithModuloLegacy() const } else { - return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().enable_analyzer, true /*legacy*/); + return getColumnNameWithoutAlias(*dag_node, getTreeContext().getSettings().allow_experimental_analyzer, true /*legacy*/); } } diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index da427ca4a6a..04e6d6676d1 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -397,7 +397,7 @@ void StorageBuffer::read( /// TODO: Find a way to support projections for StorageBuffer if (processed_stage > QueryProcessingStage::FetchColumns) { - if (local_context->getSettingsRef().enable_analyzer) + if (local_context->getSettingsRef().allow_experimental_analyzer) { auto storage = std::make_shared( getStorageID(), diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index f1fe70b4594..3e38ddf830a 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -833,7 +833,7 @@ void StorageDistributed::read( const auto & settings = local_context->getSettingsRef(); - if (settings.enable_analyzer) + if (settings.allow_experimental_analyzer) { StorageID remote_storage_id = StorageID::createEmpty(); if (!remote_table_function_ptr) @@ -1057,7 +1057,7 @@ static std::optional getFilterFromQuery(const ASTPtr & ast, ContextP QueryPlan plan; SelectQueryOptions options; options.only_analyze = true; - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter(ast, context, options); plan = std::move(interpreter).extractQueryPlan(); @@ -1611,7 +1611,7 @@ ClusterPtr StorageDistributed::skipUnusedShards( const StorageSnapshotPtr & storage_snapshot, ContextPtr local_context) const { - if (local_context->getSettingsRef().enable_analyzer) + if (local_context->getSettingsRef().allow_experimental_analyzer) return skipUnusedShardsWithAnalyzer(cluster, query_info, storage_snapshot, local_context); const auto & select = query_info.query->as(); diff --git a/src/Storages/StorageExecutable.cpp b/src/Storages/StorageExecutable.cpp index 27bfa6f854c..0094723e3fd 100644 --- a/src/Storages/StorageExecutable.cpp +++ b/src/Storages/StorageExecutable.cpp @@ -150,7 +150,7 @@ void StorageExecutable::read( for (auto & input_query : input_queries) { QueryPipelineBuilder builder; - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) builder = InterpreterSelectQueryAnalyzer(input_query, context, {}).buildQueryPipeline(); else builder = InterpreterSelectWithUnionQuery(input_query, context, {}).buildQueryPipeline(); diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 613317b2564..7c268d36a7b 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -590,7 +590,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ auto modified_query_info = getModifiedQueryInfo(modified_context, table, nested_storage_snaphsot, real_column_names, column_names_as_aliases, aliases); - if (!context->getSettingsRef().enable_analyzer) + if (!context->getSettingsRef().allow_experimental_analyzer) { auto storage_columns = storage_metadata_snapshot->getColumns(); auto syntax_result = TreeRewriter(context).analyzeSelect( @@ -1047,13 +1047,13 @@ void ReadFromMerge::addVirtualColumns( const StorageWithLockAndName & storage_with_lock) const { const auto & [database_name, _, storage, table_name] = storage_with_lock; - bool enable_analyzer = context->getSettingsRef().enable_analyzer; + bool allow_experimental_analyzer = context->getSettingsRef().allow_experimental_analyzer; /// Add virtual columns if we don't already have them. Block plan_header = child.plan.getCurrentDataStream().header; - if (enable_analyzer) + if (allow_experimental_analyzer) { String table_alias = modified_query_info.query_tree->as()->getJoinTree()->as()->getAlias(); @@ -1133,8 +1133,8 @@ QueryPipelineBuilderPtr ReadFromMerge::buildPipeline( if (!builder->initialized()) return builder; - bool enable_analyzer = context->getSettingsRef().enable_analyzer; - if (processed_stage > child.stage || (enable_analyzer && processed_stage != QueryProcessingStage::FetchColumns)) + bool allow_experimental_analyzer = context->getSettingsRef().allow_experimental_analyzer; + if (processed_stage > child.stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns)) { /** Materialization is needed, since from distributed storage the constants come materialized. * If you do not do this, different types (Const and non-Const) columns will be produced in different threads, @@ -1168,7 +1168,7 @@ ReadFromMerge::ChildPlan ReadFromMerge::createPlanForTable( modified_select.setFinal(); } - bool enable_analyzer = modified_context->getSettingsRef().enable_analyzer; + bool allow_experimental_analyzer = modified_context->getSettingsRef().allow_experimental_analyzer; auto storage_stage = storage->getQueryProcessingStage(modified_context, processed_stage, @@ -1201,13 +1201,13 @@ ReadFromMerge::ChildPlan ReadFromMerge::createPlanForTable( row_policy_data_opt->addStorageFilter(source_step_with_filter); } } - else if (processed_stage > storage_stage || enable_analyzer) + else if (processed_stage > storage_stage || allow_experimental_analyzer) { /// Maximum permissible parallelism is streams_num modified_context->setSetting("max_threads", streams_num); modified_context->setSetting("max_streams_to_max_threads_ratio", 1); - if (enable_analyzer) + if (allow_experimental_analyzer) { /// Converting query to AST because types might be different in the source table. /// Need to resolve types again. @@ -1479,7 +1479,7 @@ void ReadFromMerge::convertAndFilterSourceStream( auto storage_sample_block = snapshot->metadata->getSampleBlock(); auto pipe_columns = before_block_header.getNamesAndTypesList(); - if (local_context->getSettingsRef().enable_analyzer) + if (local_context->getSettingsRef().allow_experimental_analyzer) { for (const auto & alias : aliases) { @@ -1522,7 +1522,7 @@ void ReadFromMerge::convertAndFilterSourceStream( ActionsDAG::MatchColumnsMode convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Name; - if (local_context->getSettingsRef().enable_analyzer + if (local_context->getSettingsRef().allow_experimental_analyzer && (child.stage != QueryProcessingStage::FetchColumns || dynamic_cast(&snapshot->storage) != nullptr)) convert_actions_match_columns_mode = ActionsDAG::MatchColumnsMode::Position; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index ebc88993ee4..f55f672fe5e 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -208,7 +208,7 @@ void StorageMergeTree::read( const auto & settings = local_context->getSettingsRef(); /// reading step for parallel replicas with new analyzer is built in Planner, so don't do it here if (local_context->canUseParallelReplicasOnInitiator() && settings.parallel_replicas_for_non_replicated_merge_tree - && !settings.enable_analyzer) + && !settings.allow_experimental_analyzer) { ClusterProxy::executeQueryWithParallelReplicas( query_plan, getStorageID(), processed_stage, query_info.query, local_context, query_info.storage_limits); @@ -216,7 +216,7 @@ void StorageMergeTree::read( } if (local_context->canUseParallelReplicasCustomKey() && settings.parallel_replicas_for_non_replicated_merge_tree - && !settings.enable_analyzer && local_context->getClientInfo().distributed_depth == 0) + && !settings.allow_experimental_analyzer && local_context->getClientInfo().distributed_depth == 0) { if (auto cluster = local_context->getClusterForParallelReplicas(); local_context->canUseParallelReplicasCustomKeyForCluster(*cluster)) @@ -244,7 +244,7 @@ void StorageMergeTree::read( const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree - && (!local_context->getSettingsRef().enable_analyzer || query_info.current_table_chosen_for_reading_with_parallel_replicas); + && (!local_context->getSettingsRef().allow_experimental_analyzer || query_info.current_table_chosen_for_reading_with_parallel_replicas); if (auto plan = reader.read( column_names, diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a3965e7a6d4..2d826c6c2df 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5480,13 +5480,13 @@ void StorageReplicatedMergeTree::read( return; } /// reading step for parallel replicas with new analyzer is built in Planner, so don't do it here - if (local_context->canUseParallelReplicasOnInitiator() && !settings.enable_analyzer) + if (local_context->canUseParallelReplicasOnInitiator() && !settings.allow_experimental_analyzer) { readParallelReplicasImpl(query_plan, column_names, query_info, local_context, processed_stage); return; } - if (local_context->canUseParallelReplicasCustomKey() && !settings.enable_analyzer + if (local_context->canUseParallelReplicasCustomKey() && !settings.allow_experimental_analyzer && local_context->getClientInfo().distributed_depth == 0) { if (auto cluster = local_context->getClusterForParallelReplicas(); @@ -5555,7 +5555,7 @@ void StorageReplicatedMergeTree::readLocalImpl( const size_t num_streams) { const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() - && (!local_context->getSettingsRef().enable_analyzer + && (!local_context->getSettingsRef().allow_experimental_analyzer || query_info.current_table_chosen_for_reading_with_parallel_replicas); auto plan = reader.read( diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index dcb5ef2ae77..878998ebf12 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -164,7 +164,7 @@ void StorageView::read( auto options = SelectQueryOptions(QueryProcessingStage::Complete, 0, false, query_info.settings_limit_offset_done); - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context, storage_snapshot), options, column_names); interpreter.addStorageLimits(*query_info.storage_limits); diff --git a/src/Storages/TTLDescription.cpp b/src/Storages/TTLDescription.cpp index 16eccfd7343..d674f054632 100644 --- a/src/Storages/TTLDescription.cpp +++ b/src/Storages/TTLDescription.cpp @@ -172,7 +172,7 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType /// with subqueries it's possible that new analyzer will be enabled in ::read method /// of underlying storage when all other parts of infra are not ready for it /// (built with old analyzer). - context_copy->setSetting("enable_analyzer", false); + context_copy->setSetting("allow_experimental_analyzer", false); auto syntax_analyzer_result = TreeRewriter(context_copy).analyze(ast, columns); ExpressionAnalyzer analyzer(ast, syntax_analyzer_result, context_copy); auto dag = analyzer.getActionsDAG(false); diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index a2b1704f24b..65bf6768b1b 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1197,7 +1197,7 @@ StorageWindowView::StorageWindowView( , fire_signal_timeout_s(context_->getSettingsRef().wait_for_window_view_fire_signal_timeout.totalSeconds()) , clean_interval_usec(context_->getSettingsRef().window_view_clean_interval.totalMicroseconds()) { - if (context_->getSettingsRef().enable_analyzer) + if (context_->getSettingsRef().allow_experimental_analyzer) disabled_due_to_analyzer = true; if (mode <= LoadingStrictnessLevel::CREATE) @@ -1753,9 +1753,9 @@ StoragePtr StorageWindowView::getTargetTable() const void StorageWindowView::throwIfWindowViewIsDisabled(ContextPtr local_context) const { - if (disabled_due_to_analyzer || (local_context && local_context->getSettingsRef().enable_analyzer)) + if (disabled_due_to_analyzer || (local_context && local_context->getSettingsRef().allow_experimental_analyzer)) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Experimental WINDOW VIEW feature is not supported " - "in the current infrastructure for query analysis (the setting 'enable_analyzer')"); + "in the current infrastructure for query analysis (the setting 'allow_experimental_analyzer')"); } void registerStorageWindowView(StorageFactory & factory) diff --git a/src/TableFunctions/TableFunctionView.cpp b/src/TableFunctions/TableFunctionView.cpp index 02a278cf590..57501df6d4d 100644 --- a/src/TableFunctions/TableFunctionView.cpp +++ b/src/TableFunctions/TableFunctionView.cpp @@ -50,7 +50,7 @@ ColumnsDescription TableFunctionView::getActualTableStructure(ContextPtr context Block sample_block; - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.children[0], context); else sample_block = InterpreterSelectWithUnionQuery::getSampleBlock(create.children[0], context); diff --git a/src/TableFunctions/TableFunctionViewIfPermitted.cpp b/src/TableFunctions/TableFunctionViewIfPermitted.cpp index 7bae2731525..935be6c1987 100644 --- a/src/TableFunctions/TableFunctionViewIfPermitted.cpp +++ b/src/TableFunctions/TableFunctionViewIfPermitted.cpp @@ -114,7 +114,7 @@ bool TableFunctionViewIfPermitted::isPermitted(const ContextPtr & context, const try { - if (context->getSettingsRef().enable_analyzer) + if (context->getSettingsRef().allow_experimental_analyzer) { sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.children[0], context); } From ddcad048de8862392194d649788c56466a432b0e Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 30 Jul 2024 14:44:56 +0000 Subject: [PATCH 1637/2361] Fix build --- src/Core/SettingsChangesHistory.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 71f7c940e2c..bd7330ac6f8 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -500,6 +500,8 @@ static std::initializer_list Date: Tue, 30 Jul 2024 21:29:07 +0000 Subject: [PATCH 1638/2361] Minor --- src/Core/SettingsChangesHistory.cpp | 4 +--- tests/queries/0_stateless/00202_cross_join.sql | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index bd7330ac6f8..84c0ef2b127 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -528,9 +528,7 @@ static std::initializer_list Date: Wed, 31 Jul 2024 14:21:12 +0000 Subject: [PATCH 1639/2361] Build --- src/Core/SettingsChangesHistory.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 84c0ef2b127..c5d47fcdc4b 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -525,7 +525,6 @@ static std::initializer_list Date: Wed, 31 Jul 2024 15:32:32 +0000 Subject: [PATCH 1640/2361] Remove all changes --- src/Core/SettingsChangesHistory.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index c5d47fcdc4b..75bc15358ab 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -525,9 +525,7 @@ static std::initializer_list Date: Wed, 31 Jul 2024 16:00:04 +0000 Subject: [PATCH 1641/2361] Add to settings changes --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 75bc15358ab..893394adb61 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -501,6 +501,7 @@ static std::initializer_list Date: Wed, 31 Jul 2024 22:45:18 +0200 Subject: [PATCH 1642/2361] Validate also alias --- src/Interpreters/executeQuery.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index ce58f7f922c..fe87eed5570 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -690,6 +690,12 @@ void validateAnalyzerSettings(ASTPtr ast, bool context_value) if (top_level != value->safeGet()) throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'allow_experimental_analyzer' is changed in the subquery. Top level value: {}", top_level); } + + if (auto * value = set_query->changes.tryGet("enable_analyzer")) + { + if (top_level != value->safeGet()) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Setting 'enable_analyzer' is changed in the subquery. Top level value: {}", top_level); + } } for (auto child : node->children) From ef811fd25c36855e1c39029962999609ef6fb4de Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 1 Aug 2024 12:50:56 +0000 Subject: [PATCH 1643/2361] Fix test --- .../01049_join_low_card_bug_long.reference.j2 | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/tests/queries/0_stateless/01049_join_low_card_bug_long.reference.j2 b/tests/queries/0_stateless/01049_join_low_card_bug_long.reference.j2 index 872bb448027..341e77b1d78 100644 --- a/tests/queries/0_stateless/01049_join_low_card_bug_long.reference.j2 +++ b/tests/queries/0_stateless/01049_join_low_card_bug_long.reference.j2 @@ -19,17 +19,17 @@ str_r LowCardinality(String) str_l LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l -- @@ -49,17 +49,17 @@ str_r LowCardinality(String) str_l LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (x) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (lc) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (x) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) String String LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (lc) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) String String LowCardinality(String) LowCardinality(String) str_l str_l -- @@ -79,17 +79,17 @@ str_r String str_l String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String LowCardinality(String) LowCardinality(String) String String str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String LowCardinality(String) LowCardinality(String) String String str_l str_l -- @@ -109,17 +109,17 @@ str_r LowCardinality(String) str_l LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l -- @@ -333,17 +333,17 @@ str_r LowCardinality(String) str_l LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str LowCardinality(String) LowCardinality(String) str str -LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) +LowCardinality(String) LowCardinality(String) str_r str_r LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) LowCardinality(String) str_l str_l -- @@ -363,17 +363,17 @@ str_r String str_l String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (x) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN r USING (lc) ORDER BY x, r.lc, l.lc; String String str str String String str str -String String str_r str_r String String +String String str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (x) ORDER BY x, r.lc, l.lc; String String str str LowCardinality(String) LowCardinality(String) str str -String String str_r str_r LowCardinality(String) LowCardinality(String) +String String str_r str_r LowCardinality(String) LowCardinality(String) String String LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN r USING (lc) ORDER BY x, r.lc, l.lc; String String str str String String str str -String String str_r str_r String String +String String str_r str_r String String String String String String str_l str_l -- @@ -393,17 +393,17 @@ str_r String str_l String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l RIGHT JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; String String str str String String str str -String String str_r str_r String String +String String str_r str_r String String SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (x) ORDER BY x, r.lc, l.lc; LowCardinality(String) LowCardinality(String) str str String String str str -LowCardinality(String) LowCardinality(String) str_r str_r String String +LowCardinality(String) LowCardinality(String) str_r str_r String String LowCardinality(String) LowCardinality(String) String String str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l FULL JOIN r_lc AS r USING (lc) ORDER BY x, r.lc, l.lc; String String str str String String str str -String String str_r str_r String String +String String str_r str_r String String String String String String str_l str_l -- @@ -423,13 +423,13 @@ str_r Nullable(String) str_l Nullable(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str Nullable(String) Nullable(String) str str Nullable(String) Nullable(String) str_r str_r Nullable(String) Nullable(String) \N \N SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (x) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str LowCardinality(String) LowCardinality(String) str str -Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) +Nullable(String) Nullable(String) str_r str_r LowCardinality(String) LowCardinality(String) Nullable(String) Nullable(String) \N \N LowCardinality(String) LowCardinality(String) str_l str_l SELECT toTypeName(r.lc), toTypeName(materialize(r.lc)), r.lc, materialize(r.lc), toTypeName(l.lc), toTypeName(materialize(l.lc)), l.lc, materialize(l.lc) FROM l_lc AS l FULL JOIN nr AS r USING (lc) ORDER BY x, r.lc, l.lc; Nullable(String) Nullable(String) str str Nullable(String) Nullable(String) str str From 0301af99e5c6eda72a379a0d048903a3ecb9e0e0 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 1 Aug 2024 15:53:08 +0000 Subject: [PATCH 1644/2361] Fixed a bunch of tests --- tests/config/users.d/analyzer.xml | 2 +- .../integration/helpers/0_common_enable_old_analyzer.xml | 2 +- tests/integration/test_analyzer_compatibility/test.py | 6 ++++-- .../02998_analyzer_secret_args_tree_node.reference | 8 ++++---- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/tests/config/users.d/analyzer.xml b/tests/config/users.d/analyzer.xml index edba8b8578e..4b9764526fa 100644 --- a/tests/config/users.d/analyzer.xml +++ b/tests/config/users.d/analyzer.xml @@ -1,7 +1,7 @@ - 0 + 0 diff --git a/tests/integration/helpers/0_common_enable_old_analyzer.xml b/tests/integration/helpers/0_common_enable_old_analyzer.xml index edba8b8578e..4b9764526fa 100644 --- a/tests/integration/helpers/0_common_enable_old_analyzer.xml +++ b/tests/integration/helpers/0_common_enable_old_analyzer.xml @@ -1,7 +1,7 @@ - 0 + 0 diff --git a/tests/integration/test_analyzer_compatibility/test.py b/tests/integration/test_analyzer_compatibility/test.py index 2c840154eb5..6eeba1f1274 100644 --- a/tests/integration/test_analyzer_compatibility/test.py +++ b/tests/integration/test_analyzer_compatibility/test.py @@ -78,10 +78,12 @@ WHERE initial_query_id = '{query_id}';""" current.query("SYSTEM FLUSH LOGS") backward.query("SYSTEM FLUSH LOGS") + # The old version doesn't know about the alias. + # For this we will ask about the old experimental name. assert ( backward.query( """ -SELECT hostname() AS h, getSetting('enable_analyzer') +SELECT hostname() AS h, getSetting('allow_experimental_analyzer') FROM clusterAllReplicas('test_cluster_mixed', system.one) ORDER BY h;""" ) @@ -92,7 +94,7 @@ ORDER BY h;""" analyzer_enabled = backward.query( f""" SELECT -DISTINCT Settings['enable_analyzer'] +DISTINCT Settings['allow_experimental_analyzer'] FROM clusterAllReplicas('test_cluster_mixed', system.query_log) WHERE initial_query_id = '{query_id}';""" ) diff --git a/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.reference b/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.reference index ccd9540cb49..67ef38093d4 100644 --- a/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.reference +++ b/tests/queries/0_stateless/02998_analyzer_secret_args_tree_node.reference @@ -7,7 +7,7 @@ QUERY id: 0 encrypt(\'aes-256-ofb\', [HIDDEN id: 3], [HIDDEN id: 2]) Nullable(String) PROJECTION LIST id: 1, nodes: 2 - CONSTANT id: 2, constant_value: \'\\n��&\', constant_value_type: Nullable(String) + CONSTANT id: 2, constant_value: \'\\nãì&\', constant_value_type: Nullable(String) EXPRESSION FUNCTION id: 3, function_name: encrypt, function_type: ordinary, result_type: Nullable(String) ARGUMENTS @@ -15,7 +15,7 @@ QUERY id: 0 CONSTANT id: 5, constant_value: \'aes-256-ofb\', constant_value_type: String CONSTANT id: 6, constant_value: [HIDDEN id: 1], constant_value_type: Nullable(String) CONSTANT id: 7, constant_value: [HIDDEN id: 2], constant_value_type: String - CONSTANT id: 8, constant_value: \'��\', constant_value_type: Nullable(String) + CONSTANT id: 8, constant_value: \'çø\', constant_value_type: Nullable(String) EXPRESSION FUNCTION id: 9, function_name: encrypt, function_type: ordinary, result_type: Nullable(String) ARGUMENTS @@ -33,7 +33,7 @@ QUERY id: 0 encrypt(\'aes-256-ofb\', _subquery_2, \'12345678901234567890123456789012\') Nullable(String) PROJECTION LIST id: 1, nodes: 2 - CONSTANT id: 2, constant_value: \'\\n��&\', constant_value_type: Nullable(String) + CONSTANT id: 2, constant_value: \'\\nãì&\', constant_value_type: Nullable(String) EXPRESSION FUNCTION id: 3, function_name: encrypt, function_type: ordinary, result_type: Nullable(String) ARGUMENTS @@ -50,7 +50,7 @@ QUERY id: 0 JOIN TREE TABLE id: 10, table_name: system.one CONSTANT id: 11, constant_value: \'12345678901234567890123456789012\', constant_value_type: String - CONSTANT id: 12, constant_value: \'��\', constant_value_type: Nullable(String) + CONSTANT id: 12, constant_value: \'çø\', constant_value_type: Nullable(String) EXPRESSION FUNCTION id: 13, function_name: encrypt, function_type: ordinary, result_type: Nullable(String) ARGUMENTS From 330aae951706e4c3fd6ddada231d5d4bc23e37a0 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 1 Aug 2024 16:51:44 +0000 Subject: [PATCH 1645/2361] Even better test --- .../test_analyzer_compatibility/test.py | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_analyzer_compatibility/test.py b/tests/integration/test_analyzer_compatibility/test.py index 6eeba1f1274..505d1629cd2 100644 --- a/tests/integration/test_analyzer_compatibility/test.py +++ b/tests/integration/test_analyzer_compatibility/test.py @@ -1,4 +1,5 @@ import uuid +import time import pytest from helpers.cluster import ClickHouseCluster @@ -51,19 +52,19 @@ def test_two_new_versions(start_cluster): assert ( current.query( """ -SELECT hostname() AS h, getSetting('enable_analyzer') +SELECT hostname() AS h, getSetting('allow_experimental_analyzer') FROM clusterAllReplicas('test_cluster_mixed', system.one) ORDER BY h;""" ) == TSV([["backward", "true"], ["current", "true"]]) ) - # Should be enabled everywhere - analyzer_enabled = current.query( + # Should be enabled explicitly on the old instance. + analyzer_enabled = backward.query( f""" SELECT -DISTINCT Settings['enable_analyzer'] -FROM clusterAllReplicas('test_cluster_mixed', system.query_log) +DISTINCT Settings['allow_experimental_analyzer'] +FROM system.query_log WHERE initial_query_id = '{query_id}';""" ) @@ -100,3 +101,26 @@ WHERE initial_query_id = '{query_id}';""" ) assert TSV(analyzer_enabled) == TSV("0") + + # Only new version knows about the alias + # and it will send the old setting `allow_experimental_analyzer` + # to the remote server. + query_id = str(uuid.uuid4()) + current.query( + "SELECT * FROM clusterAllReplicas('test_cluster_mixed', system.tables) SETTINGS enable_analyzer = 1;", + query_id=query_id, + ) + + current.query("SYSTEM FLUSH LOGS") + backward.query("SYSTEM FLUSH LOGS") + + # Should be disabled explicitly everywhere. + analyzer_enabled = current.query( + f""" +SELECT +DISTINCT Settings['allow_experimental_analyzer'] +FROM system.query_log +WHERE initial_query_id = '{query_id}';""" + ) + + assert TSV(analyzer_enabled) == TSV("1") From a2020224750f9861f1571d4aa8e139560b3a1dfc Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 2 Aug 2024 11:29:56 +0000 Subject: [PATCH 1646/2361] Fixed performance tests --- tests/performance/storage_join_direct_join.xml | 2 +- tests/performance/uniq_to_count.xml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/performance/storage_join_direct_join.xml b/tests/performance/storage_join_direct_join.xml index 987500bb4f0..867108ac2b7 100644 --- a/tests/performance/storage_join_direct_join.xml +++ b/tests/performance/storage_join_direct_join.xml @@ -15,5 +15,5 @@ SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null; SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null SETTINGS - enable_analyzer=1 + allow_experimental_analyzer=1 diff --git a/tests/performance/uniq_to_count.xml b/tests/performance/uniq_to_count.xml index 57b0085d8fa..64e4cf1cc0d 100644 --- a/tests/performance/uniq_to_count.xml +++ b/tests/performance/uniq_to_count.xml @@ -3,6 +3,6 @@ select uniq(number) from (select number from numbers(1000000) group by number) - select uniq(number) from (select DISTINCT number from numbers(1000000)) SETTINGS enable_analyzer=1 - select uniq(number) from (select number from numbers(1000000) group by number) SETTINGS enable_analyzer=1 + select uniq(number) from (select DISTINCT number from numbers(1000000)) SETTINGS allow_experimental_analyzer=1 + select uniq(number) from (select number from numbers(1000000) group by number) SETTINGS allow_experimental_analyzer=1 From a9204c5da3179ca7c5e78fe537f4e57bf129dbab Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 2 Aug 2024 11:34:57 +0000 Subject: [PATCH 1647/2361] Delete changes --- .../0_stateless/02995_baseline_24_7_2.tsv | 26 ------------------- 1 file changed, 26 deletions(-) diff --git a/tests/queries/0_stateless/02995_baseline_24_7_2.tsv b/tests/queries/0_stateless/02995_baseline_24_7_2.tsv index d3a07ecb644..10b392f3e04 100644 --- a/tests/queries/0_stateless/02995_baseline_24_7_2.tsv +++ b/tests/queries/0_stateless/02995_baseline_24_7_2.tsv @@ -18,11 +18,7 @@ allow_distributed_ddl 1 allow_drop_detached 0 allow_execute_multiif_columnar 1 allow_experimental_alter_materialized_view_structure 1 -<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv allow_experimental_analyzer 1 -======= -allow_experimental_analyzer 0 ->>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv allow_experimental_annoy_index 0 allow_experimental_bigint_types 1 allow_experimental_codecs 0 @@ -163,10 +159,7 @@ cloud_mode 0 cloud_mode_engine 1 cluster_for_parallel_replicas collect_hash_table_stats_during_aggregation 1 -<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv collect_hash_table_stats_during_joins 1 -======= ->>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv column_names_for_schema_inference compatibility compatibility_ignore_auto_increment_in_create_table 0 @@ -336,13 +329,9 @@ format_regexp_escaping_rule Raw format_regexp_skip_unmatched 0 format_schema format_template_resultset -<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv format_template_resultset_format format_template_row format_template_row_format -======= -format_template_row ->>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv format_template_rows_between_delimiter \n format_tsv_null_representation \\N formatdatetime_f_prints_single_zero 0 @@ -399,11 +388,8 @@ iceberg_engine_ignore_schema_evolution 0 idle_connection_timeout 3600 ignore_cold_parts_seconds 0 ignore_data_skipping_indices -<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv ignore_drop_queries_probability 0 ignore_materialized_views_with_dropped_target_table 0 -======= ->>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv ignore_on_cluster_for_replicated_access_entities_queries 0 ignore_on_cluster_for_replicated_named_collections_queries 0 ignore_on_cluster_for_replicated_udf_queries 0 @@ -726,13 +712,8 @@ mutations_execute_subqueries_on_initiator 0 mutations_max_literal_size_to_replace 16384 mutations_sync 0 mysql_datatypes_support_level -<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv mysql_map_fixed_string_to_text_in_show_columns 1 mysql_map_string_to_text_in_show_columns 1 -======= -mysql_map_fixed_string_to_text_in_show_columns 0 -mysql_map_string_to_text_in_show_columns 0 ->>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv mysql_max_rows_to_insert 65536 network_compression_method LZ4 network_zstd_compression_level 1 @@ -799,13 +780,9 @@ os_thread_priority 0 output_format_arrow_compression_method lz4_frame output_format_arrow_fixed_string_as_fixed_byte_array 1 output_format_arrow_low_cardinality_as_dictionary 0 -<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv output_format_arrow_string_as_string 1 output_format_arrow_use_64_bit_indexes_for_dictionary 0 output_format_arrow_use_signed_indexes_for_dictionary 1 -======= -output_format_arrow_string_as_string 0 ->>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv output_format_avro_codec output_format_avro_rows_in_file 1 output_format_avro_string_column_pattern @@ -1068,10 +1045,7 @@ totals_mode after_having_exclusive trace_profile_events 0 transfer_overflow_mode throw transform_null_in 0 -<<<<<<< HEAD:tests/queries/0_stateless/02995_baseline_24_7_2.tsv traverse_shadow_remote_data_paths 0 -======= ->>>>>>> Fix tests:tests/queries/0_stateless/02995_baseline_23_12_1.tsv union_default_mode unknown_packet_in_send_data 0 update_insert_deduplication_token_in_dependent_materialized_views 0 From d998bf55f1a592df93f6c839dd647ff437962076 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Fri, 2 Aug 2024 11:37:08 +0000 Subject: [PATCH 1648/2361] Fix build --- src/Core/SettingsChangesHistory.cpp | 263 +--------------------------- 1 file changed, 1 insertion(+), 262 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 893394adb61..2ff392fcb84 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -78,6 +78,7 @@ static std::initializer_list col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, }; From 011910a59409b3b2ec5430097d88c40e091c6b30 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 5 Aug 2024 15:28:43 +0000 Subject: [PATCH 1649/2361] Fixed the integration test --- ...allel_replicas_crash_after_refactoring.sql | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 tests/queries/0_stateless/03215_parallel_replicas_crash_after_refactoring.sql diff --git a/tests/queries/0_stateless/03215_parallel_replicas_crash_after_refactoring.sql b/tests/queries/0_stateless/03215_parallel_replicas_crash_after_refactoring.sql new file mode 100644 index 00000000000..cae4fa0f0df --- /dev/null +++ b/tests/queries/0_stateless/03215_parallel_replicas_crash_after_refactoring.sql @@ -0,0 +1,33 @@ +-- Tags: disabled + +DROP TABLE IF EXISTS t1__fuzz_5; + +CREATE TABLE t1__fuzz_5 +( + `k` Int16, + `v` Nullable(UInt8) +) +ENGINE = MergeTree +ORDER BY k +SETTINGS index_granularity = 10; + +INSERT INTO t1__fuzz_5 SELECT + number, + number +FROM numbers(1000); + +INSERT INTO t1__fuzz_5 SELECT + number, + number +FROM numbers(1000, 1000); + +INSERT INTO t1__fuzz_5 SELECT + number, + number +FROM numbers(2000, 1000); + +SET receive_timeout = 10., receive_data_timeout_ms = 10000, allow_suspicious_low_cardinality_types = true, parallel_distributed_insert_select = 2, log_queries = true, table_function_remote_max_addresses = 200, max_execution_time = 10., max_memory_usage = 10000000000, log_comment = '/workspace/ch/tests/queries/0_stateless/02869_parallel_replicas_read_from_several.sql', send_logs_level = 'warning', prefer_localhost_replica = false, allow_introspection_functions = true, use_parallel_replicas = 257, max_parallel_replicas = 65535, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_single_task_marks_count_multiplier = -0., parallel_replicas_for_non_replicated_merge_tree = true; + +SELECT max(k) IGNORE NULLS FROM t1__fuzz_5 WITH TOTALS SETTINGS use_parallel_replicas = 257, max_parallel_replicas = 65535, prefer_localhost_replica = 0, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_single_task_marks_count_multiplier = -0; + +DROP TABLE IF EXISTS t1__fuzz_5; From d8aa219783b6715f5424772bf25c092a26be5e2d Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Mon, 5 Aug 2024 17:43:29 +0200 Subject: [PATCH 1650/2361] fix build --- src/Core/SettingsChangesHistory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 03e0f4f0dc8..2080e8fbf0d 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -77,7 +77,7 @@ static std::initializer_list Date: Mon, 5 Aug 2024 16:12:25 +0000 Subject: [PATCH 1651/2361] Update version_date.tsv and changelogs after v24.5.5.78-stable --- docs/changelogs/v24.5.5.78-stable.md | 55 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 56 insertions(+) create mode 100644 docs/changelogs/v24.5.5.78-stable.md diff --git a/docs/changelogs/v24.5.5.78-stable.md b/docs/changelogs/v24.5.5.78-stable.md new file mode 100644 index 00000000000..415ea165101 --- /dev/null +++ b/docs/changelogs/v24.5.5.78-stable.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.5.5.78-stable (0138248cb62) FIXME as compared to v24.5.4.49-stable (63b760955a0) + +#### Improvement +* Backported in [#66768](https://github.com/ClickHouse/ClickHouse/issues/66768): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Backported in [#66884](https://github.com/ClickHouse/ClickHouse/issues/66884): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66691](https://github.com/ClickHouse/ClickHouse/issues/66691): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). +* Backported in [#67814](https://github.com/ClickHouse/ClickHouse/issues/67814): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67501](https://github.com/ClickHouse/ClickHouse/issues/67501): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#67850](https://github.com/ClickHouse/ClickHouse/issues/67850): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#65350](https://github.com/ClickHouse/ClickHouse/issues/65350): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#65621](https://github.com/ClickHouse/ClickHouse/issues/65621): Fix `Cannot find column` in distributed query with `ARRAY JOIN` by `Nested` column. Fixes [#64755](https://github.com/ClickHouse/ClickHouse/issues/64755). [#64801](https://github.com/ClickHouse/ClickHouse/pull/64801) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#65933](https://github.com/ClickHouse/ClickHouse/issues/65933): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#66301](https://github.com/ClickHouse/ClickHouse/issues/66301): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)). +* Backported in [#66328](https://github.com/ClickHouse/ClickHouse/issues/66328): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#66155](https://github.com/ClickHouse/ClickHouse/issues/66155): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#66454](https://github.com/ClickHouse/ClickHouse/issues/66454): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66226](https://github.com/ClickHouse/ClickHouse/issues/66226): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66680](https://github.com/ClickHouse/ClickHouse/issues/66680): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#66604](https://github.com/ClickHouse/ClickHouse/issues/66604): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)). +* Backported in [#66360](https://github.com/ClickHouse/ClickHouse/issues/66360): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66972](https://github.com/ClickHouse/ClickHouse/issues/66972): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66969](https://github.com/ClickHouse/ClickHouse/issues/66969): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66720](https://github.com/ClickHouse/ClickHouse/issues/66720): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#66951](https://github.com/ClickHouse/ClickHouse/issues/66951): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66757](https://github.com/ClickHouse/ClickHouse/issues/66757): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66948](https://github.com/ClickHouse/ClickHouse/issues/66948): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67633](https://github.com/ClickHouse/ClickHouse/issues/67633): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#67481](https://github.com/ClickHouse/ClickHouse/issues/67481): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)). +* Backported in [#67197](https://github.com/ClickHouse/ClickHouse/issues/67197): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#67379](https://github.com/ClickHouse/ClickHouse/issues/67379): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67576](https://github.com/ClickHouse/ClickHouse/issues/67576): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#66387](https://github.com/ClickHouse/ClickHouse/issues/66387): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)). +* Backported in [#66426](https://github.com/ClickHouse/ClickHouse/issues/66426): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66544](https://github.com/ClickHouse/ClickHouse/issues/66544): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66859](https://github.com/ClickHouse/ClickHouse/issues/66859): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)). +* Backported in [#66875](https://github.com/ClickHouse/ClickHouse/issues/66875): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)). +* Backported in [#67059](https://github.com/ClickHouse/ClickHouse/issues/67059): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)). +* Backported in [#66945](https://github.com/ClickHouse/ClickHouse/issues/66945): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67252](https://github.com/ClickHouse/ClickHouse/issues/67252): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)). +* Backported in [#67412](https://github.com/ClickHouse/ClickHouse/issues/67412): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index cb6b8f588da..75c10fa67b8 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -2,6 +2,7 @@ v24.7.2.13-stable 2024-08-01 v24.7.1.2915-stable 2024-07-30 v24.6.2.17-stable 2024-07-05 v24.6.1.4423-stable 2024-07-01 +v24.5.5.78-stable 2024-08-05 v24.5.4.49-stable 2024-07-01 v24.5.3.5-stable 2024-06-13 v24.5.2.34-stable 2024-06-13 From 4e8d11c48e9ac0107940d730ccf8d35b7ac3573e Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 5 Aug 2024 16:46:42 +0000 Subject: [PATCH 1652/2361] Add fuzzers to 03208_buffer_over_distributed_type_mismatch --- ...208_buffer_over_distributed_type_mismatch.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.sql b/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.sql index 5a7c89074cf..333c445403d 100644 --- a/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.sql +++ b/tests/queries/0_stateless/03208_buffer_over_distributed_type_mismatch.sql @@ -58,3 +58,19 @@ SELECT amount FROM realtimebuff__fuzz_19 t1 JOIN realtimebuff__fuzz_19 t2 ON t1.amount = t2.amount JOIN realtimebuff__fuzz_19 t3 ON t1.amount = t3.amount ; -- { serverError NOT_IMPLEMENTED,AMBIGUOUS_COLUMN_NAME } + + +-- fuzzers: + +SELECT + toLowCardinality(1) + materialize(toLowCardinality(2)) +FROM realtimebuff__fuzz_19 +GROUP BY toLowCardinality(1) +FORMAT Null +; + +SELECT intDivOrZero(intDivOrZero(toLowCardinality(-128), toLowCardinality(-1)) = 0, materialize(toLowCardinality(4))) +FROM realtimebuff__fuzz_19 GROUP BY materialize(toLowCardinality(-127)), intDivOrZero(0, 0) = toLowCardinality(toLowCardinality(0)) +WITH TOTALS ORDER BY ALL DESC NULLS FIRST +FORMAT Null +; From 05395ac7bd93949d07f163bfc6cabf9c3e85adc5 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 5 Aug 2024 17:01:15 +0000 Subject: [PATCH 1653/2361] Un-flake 02524_fuzz_and_fuss_2 (hopefully) https://s3.amazonaws.com/clickhouse-test-reports/0/3221b651da16cd868350a1aff022ba71a1a15f3c/stateless_tests__tsan__s3_storage__[2_5].html https://s3.amazonaws.com/clickhouse-test-reports/0/1fde5b7bfa1b1a6a0d67258be5e7ef855b730559/stateless_tests__tsan__s3_storage__[2_4].html --- tests/queries/0_stateless/02524_fuzz_and_fuss_2.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02524_fuzz_and_fuss_2.sql b/tests/queries/0_stateless/02524_fuzz_and_fuss_2.sql index 7b49378d4da..a38fb0bd471 100644 --- a/tests/queries/0_stateless/02524_fuzz_and_fuss_2.sql +++ b/tests/queries/0_stateless/02524_fuzz_and_fuss_2.sql @@ -9,6 +9,6 @@ ENGINE = Memory; INSERT INTO data_a_02187 SELECT * FROM system.one -SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', distributed_foreground_insert = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '3', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', allow_experimental_live_view = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', allow_experimental_object_type = '1', optimize_use_projections = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1'; +SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', distributed_foreground_insert = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '9', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', allow_experimental_live_view = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', allow_experimental_object_type = '1', optimize_use_projections = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1'; DROP TABLE data_a_02187; From c8805fbcedcce226a82d78cff79d6afbdddca0a7 Mon Sep 17 00:00:00 2001 From: Alex Katsman Date: Mon, 5 Aug 2024 17:41:47 +0000 Subject: [PATCH 1654/2361] Misc fixes --- docker/test/fasttest/run.sh | 2 +- src/Functions/bitSlice.cpp | 4 ++-- src/Storages/MergeTree/KeyCondition.h | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 26283afc86a..394d31addb1 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -41,7 +41,7 @@ export FASTTEST_WORKSPACE export FASTTEST_SOURCE export FASTTEST_BUILD export FASTTEST_DATA -export FASTTEST_OUT +export FASTTEST_OUTPUT export PATH function ccache_status diff --git a/src/Functions/bitSlice.cpp b/src/Functions/bitSlice.cpp index f24473351ae..908c534b228 100644 --- a/src/Functions/bitSlice.cpp +++ b/src/Functions/bitSlice.cpp @@ -42,11 +42,11 @@ public: { FunctionArgumentDescriptors mandatory_args{ {"s", static_cast(&isStringOrFixedString), nullptr, "String"}, - {"offset", static_cast(&isNativeNumber), nullptr, "(U)Int8, (U)Int16, (U)Int32, (U)Int64 or Float"}, + {"offset", static_cast(&isNativeNumber), nullptr, "(U)Int8/16/32/64 or Float"}, }; FunctionArgumentDescriptors optional_args{ - {"length", static_cast(&isNativeNumber), nullptr, "(U)Int8, (U)Int16, (U)Int32, (U)Int64 or Float"}, + {"length", static_cast(&isNativeNumber), nullptr, "(U)Int8/16/32/64 or Float"}, }; validateFunctionArguments(*this, arguments, mandatory_args, optional_args); diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index e9343ec08ea..8bbb86aba43 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -6,6 +6,8 @@ #include #include +#include + #include #include @@ -14,7 +16,6 @@ #include #include -#include "DataTypes/Serializations/ISerialization.h" namespace DB From 457686c379cb884a579b86cbfd6a9abb3ec16d1a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 10 Jul 2024 19:39:58 +0200 Subject: [PATCH 1655/2361] Simplify StorageDistributed ctors Signed-off-by: Azat Khuzhin --- src/Storages/StorageDistributed.cpp | 32 ---------------------- src/Storages/StorageDistributed.h | 14 ---------- src/TableFunctions/TableFunctionRemote.cpp | 19 ++----------- 3 files changed, 3 insertions(+), 62 deletions(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 3e38ddf830a..eb9483127fc 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -373,38 +373,6 @@ StorageDistributed::StorageDistributed( } -StorageDistributed::StorageDistributed( - const StorageID & id_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - ASTPtr remote_table_function_ptr_, - const String & cluster_name_, - ContextPtr context_, - const ASTPtr & sharding_key_, - const String & storage_policy_name_, - const String & relative_data_path_, - const DistributedSettings & distributed_settings_, - LoadingStrictnessLevel mode, - ClusterPtr owned_cluster_) - : StorageDistributed( - id_, - columns_, - constraints_, - String{}, - String{}, - String{}, - cluster_name_, - context_, - sharding_key_, - storage_policy_name_, - relative_data_path_, - distributed_settings_, - mode, - std::move(owned_cluster_), - remote_table_function_ptr_) -{ -} - QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage( ContextPtr local_context, QueryProcessingStage::Enum to_stage, diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 85a8de86953..a9e10dd14ba 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -63,20 +63,6 @@ public: ClusterPtr owned_cluster_ = {}, ASTPtr remote_table_function_ptr_ = {}); - StorageDistributed( - const StorageID & id_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - ASTPtr remote_table_function_ptr_, - const String & cluster_name_, - ContextPtr context_, - const ASTPtr & sharding_key_, - const String & storage_policy_name_, - const String & relative_data_path_, - const DistributedSettings & distributed_settings_, - LoadingStrictnessLevel mode, - ClusterPtr owned_cluster_ = {}); - ~StorageDistributed() override; std::string getName() const override { return "Distributed"; } diff --git a/src/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp index e60c31b2d77..5d136993018 100644 --- a/src/TableFunctions/TableFunctionRemote.cpp +++ b/src/TableFunctions/TableFunctionRemote.cpp @@ -306,21 +306,7 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & /*ast_function*/, Con cached_columns = getActualTableStructure(context, is_insert_query); assert(cluster); - StoragePtr res = remote_table_function_ptr - ? std::make_shared( - StorageID(getDatabaseName(), table_name), - cached_columns, - ConstraintsDescription{}, - remote_table_function_ptr, - String{}, - context, - sharding_key, - String{}, - String{}, - DistributedSettings{}, - LoadingStrictnessLevel::CREATE, - cluster) - : std::make_shared( + StoragePtr res = std::make_shared( StorageID(getDatabaseName(), table_name), cached_columns, ConstraintsDescription{}, @@ -334,7 +320,8 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & /*ast_function*/, Con String{}, DistributedSettings{}, LoadingStrictnessLevel::CREATE, - cluster); + cluster, + remote_table_function_ptr); res->startup(); return res; From 9d7710684b91a9a5b0d7c2a34b709232979ebe65 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 10 Jul 2024 19:57:09 +0200 Subject: [PATCH 1656/2361] tests/test_distributed_inter_server_secret: get_query_user_info return list Signed-off-by: Azat Khuzhin --- .../test_distributed_inter_server_secret/test.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 7ecb2cda257..9389b901a30 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -116,10 +116,10 @@ def start_cluster(): cluster.shutdown() -# @return -- [user, initial_user] +# @return -- [[user, initial_user]] def get_query_user_info(node, query_pattern): node.query("SYSTEM FLUSH LOGS") - return ( + lines = ( node.query( """ SELECT user, initial_user @@ -133,8 +133,10 @@ def get_query_user_info(node, query_pattern): ) ) .strip() - .split("\t") + .split("\n") ) + lines = map(lambda x: x.split("\t"), lines) + return list(lines) # @return -- [user, initial_user] @@ -331,19 +333,19 @@ def test_secure_disagree_insert(): def test_user_insecure_cluster(user, password): id_ = "query-dist_insecure-" + user n1.query(f"SELECT *, '{id_}' FROM dist_insecure", user=user, password=password) - assert get_query_user_info(n1, id_) == [ + assert get_query_user_info(n1, id_)[0] == [ user, user, ] # due to prefer_localhost_replica - assert get_query_user_info(n2, id_) == ["default", user] + assert get_query_user_info(n2, id_)[0] == ["default", user] @users def test_user_secure_cluster(user, password): id_ = "query-dist_secure-" + user n1.query(f"SELECT *, '{id_}' FROM dist_secure", user=user, password=password) - assert get_query_user_info(n1, id_) == [user, user] - assert get_query_user_info(n2, id_) == [user, user] + assert get_query_user_info(n1, id_)[0] == [user, user] + assert get_query_user_info(n2, id_)[0] == [user, user] @users From 0dccc34a2004b075e290ebfd5ecc9fea3077a205 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 10 Jul 2024 19:23:34 +0200 Subject: [PATCH 1657/2361] Fix cluster() for inter-server secret (preserve initial user as before) The behaviour of cluster() with inter-server secret had been changed in #63013, after it started to use "default" user, and this introduces a regression. The intention of that patch was to adjust only remote(), since it only it accept custom user, which should be ignored. Fixes: https://github.com/ClickHouse/ClickHouse/issues/66287 Fixes: https://github.com/ClickHouse/ClickHouse/issues/66352 Signed-off-by: Azat Khuzhin --- src/Storages/StorageDistributed.cpp | 6 ++++-- src/Storages/StorageDistributed.h | 5 ++++- src/TableFunctions/TableFunctionRemote.cpp | 3 ++- .../test_distributed_inter_server_secret/test.py | 15 ++++++++++++++- 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index eb9483127fc..e146e95f89f 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -308,7 +308,8 @@ StorageDistributed::StorageDistributed( const DistributedSettings & distributed_settings_, LoadingStrictnessLevel mode, ClusterPtr owned_cluster_, - ASTPtr remote_table_function_ptr_) + ASTPtr remote_table_function_ptr_, + bool is_remote_function_) : IStorage(id_) , WithContext(context_->getGlobalContext()) , remote_database(remote_database_) @@ -322,6 +323,7 @@ StorageDistributed::StorageDistributed( , relative_data_path(relative_data_path_) , distributed_settings(distributed_settings_) , rng(randomSeed()) + , is_remote_function(is_remote_function_) { if (!distributed_settings.flush_on_detach && distributed_settings.background_insert_batch) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Settings flush_on_detach=0 and background_insert_batch=1 are incompatible"); @@ -869,7 +871,7 @@ void StorageDistributed::read( sharding_key_column_name, distributed_settings, shard_filter_generator, - /* is_remote_function= */ static_cast(owned_cluster)); + is_remote_function); /// This is a bug, it is possible only when there is no shards to query, and this is handled earlier. if (!query_plan.isInitialized()) diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index a9e10dd14ba..8a5585e9fd0 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -61,7 +61,8 @@ public: const DistributedSettings & distributed_settings_, LoadingStrictnessLevel mode, ClusterPtr owned_cluster_ = {}, - ASTPtr remote_table_function_ptr_ = {}); + ASTPtr remote_table_function_ptr_ = {}, + bool is_remote_function_ = false); ~StorageDistributed() override; @@ -273,6 +274,8 @@ private: // For random shard index generation mutable std::mutex rng_mutex; pcg64 rng; + + bool is_remote_function; }; } diff --git a/src/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp index 5d136993018..8a877ff0802 100644 --- a/src/TableFunctions/TableFunctionRemote.cpp +++ b/src/TableFunctions/TableFunctionRemote.cpp @@ -321,7 +321,8 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & /*ast_function*/, Con DistributedSettings{}, LoadingStrictnessLevel::CREATE, cluster, - remote_table_function_ptr); + remote_table_function_ptr, + !is_cluster_function); res->startup(); return res; diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 9389b901a30..36d7e044f1c 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -418,7 +418,7 @@ def test_per_user_protocol_settings_secure_cluster(user, password): ) -def test_secure_cluster_distributed_over_distributed_different_users(): +def test_secure_cluster_distributed_over_distributed_different_users_remote(): # This works because we will have initial_user='default' n1.query( "SELECT * FROM remote('n1', currentDatabase(), dist_secure)", user="new_user" @@ -433,3 +433,16 @@ def test_secure_cluster_distributed_over_distributed_different_users(): # and stuff). with pytest.raises(QueryRuntimeException): n1.query("SELECT * FROM dist_over_dist_secure", user="new_user") + + +def test_secure_cluster_distributed_over_distributed_different_users_cluster(): + id_ = "cluster-user" + n1.query( + f"SELECT *, '{id_}' FROM cluster(secure, currentDatabase(), dist_secure)", + user="nopass", + settings={ + "prefer_localhost_replica": 0, + }, + ) + assert get_query_user_info(n1, id_) == [["nopass", "nopass"]] * 4 + assert get_query_user_info(n2, id_) == [["nopass", "nopass"]] * 3 From 3593f740a8793339c5657a0bcc6f785e1198e510 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 25 Jul 2024 12:16:23 +0200 Subject: [PATCH 1658/2361] Revert "Remove bad tests @azat" (reverts #66823) This reverts commit 0c2c027af63fcbababffbe3a39ed2631884e1938. Signed-off-by: Azat Khuzhin --- ...2_part_log_rmt_fetch_merge_error.reference | 10 +++++ .../03002_part_log_rmt_fetch_merge_error.sql | 35 ++++++++++++++++ ..._part_log_rmt_fetch_mutate_error.reference | 10 +++++ .../03002_part_log_rmt_fetch_mutate_error.sql | 41 +++++++++++++++++++ 4 files changed, 96 insertions(+) create mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference create mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql create mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.reference create mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference new file mode 100644 index 00000000000..b19d389d8d0 --- /dev/null +++ b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference @@ -0,0 +1,10 @@ +before +rmt_master NewPart 0 1 +rmt_master MergeParts 0 1 +rmt_slave MergeParts 1 0 +rmt_slave DownloadPart 0 1 +after +rmt_master NewPart 0 1 +rmt_master MergeParts 0 1 +rmt_slave MergeParts 1 0 +rmt_slave DownloadPart 0 2 diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql new file mode 100644 index 00000000000..548a8e5570a --- /dev/null +++ b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql @@ -0,0 +1,35 @@ +-- Tags: no-replicated-database, no-parallel, no-shared-merge-tree +-- SMT: The merge process is completely different from RMT + +drop table if exists rmt_master; +drop table if exists rmt_slave; + +create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by key settings always_fetch_merged_part=0; +-- always_fetch_merged_part=1, consider this table as a "slave" +create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by key settings always_fetch_merged_part=1; + +insert into rmt_master values (1); + +system sync replica rmt_master; +system sync replica rmt_slave; +system stop replicated sends rmt_master; +optimize table rmt_master final settings alter_sync=1, optimize_throw_if_noop=1; + +select sleep(3) format Null; + +system flush logs; +select 'before'; +select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; + +system start replicated sends rmt_master; +-- sleep few seconds to try rmt_slave to fetch the part and reflect this error +-- in system.part_log +select sleep(3) format Null; +system sync replica rmt_slave; + +system flush logs; +select 'after'; +select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; + +drop table rmt_master; +drop table rmt_slave; diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.reference b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.reference new file mode 100644 index 00000000000..aac9e7527d1 --- /dev/null +++ b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.reference @@ -0,0 +1,10 @@ +before +rmt_master NewPart 0 1 +rmt_master MutatePart 0 1 +rmt_slave DownloadPart 0 1 +rmt_slave MutatePart 1 0 +after +rmt_master NewPart 0 1 +rmt_master MutatePart 0 1 +rmt_slave DownloadPart 0 2 +rmt_slave MutatePart 1 0 diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql new file mode 100644 index 00000000000..d8b5ebb3148 --- /dev/null +++ b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql @@ -0,0 +1,41 @@ +-- Tags: no-replicated-database, no-parallel, no-shared-merge-tree +-- SMT: The merge process is completely different from RMT + +drop table if exists rmt_master; +drop table if exists rmt_slave; + +create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by tuple() settings always_fetch_merged_part=0, old_parts_lifetime=600; +-- prefer_fetch_merged_part_*_threshold=0, consider this table as a "slave" +create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by tuple() settings prefer_fetch_merged_part_time_threshold=0, prefer_fetch_merged_part_size_threshold=0, old_parts_lifetime=600; + +insert into rmt_master values (1); + +system sync replica rmt_master; +system sync replica rmt_slave; +system stop replicated sends rmt_master; +system stop pulling replication log rmt_slave; +alter table rmt_master update key=key+100 where 1 settings alter_sync=1; + +-- first we need to make the rmt_master execute mutation so that it will have +-- the part, and rmt_slave will consider it instead of performing mutation on +-- it's own, otherwise prefer_fetch_merged_part_*_threshold will be simply ignored +select sleep(3) format Null; +system start pulling replication log rmt_slave; +-- and sleep few more seconds to try rmt_slave to fetch the part and reflect +-- this error in system.part_log +select sleep(3) format Null; + +system flush logs; +select 'before'; +select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; + +system start replicated sends rmt_master; +select sleep(3) format Null; +system sync replica rmt_slave; + +system flush logs; +select 'after'; +select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; + +drop table rmt_master; +drop table rmt_slave; From 50aebcfc28c5fa98dca92d92c5233adea92801f4 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 25 Jul 2024 13:18:53 +0200 Subject: [PATCH 1659/2361] Rewrite 03002_part_log_rmt_fetch_*_error tests from sql to sh with retries Except for this patch should fix the flakiness of this test it also reduces the execution time from 15.4s to 3.3s (5x!) Signed-off-by: Azat Khuzhin --- .../03002_part_log_rmt_fetch_merge_error.sh | 54 ++++++++++++++++ .../03002_part_log_rmt_fetch_merge_error.sql | 35 ---------- .../03002_part_log_rmt_fetch_mutate_error.sh | 64 +++++++++++++++++++ .../03002_part_log_rmt_fetch_mutate_error.sql | 41 ------------ 4 files changed, 118 insertions(+), 76 deletions(-) create mode 100755 tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sh delete mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql create mode 100755 tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sh delete mode 100644 tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sh b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sh new file mode 100755 index 00000000000..25d946b325d --- /dev/null +++ b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# Tags: no-replicated-database, no-parallel, no-shared-merge-tree +# SMT: The merge process is completely different from RMT + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +set -e + +function wait_until() +{ + local q=$1 && shift + while [ "$($CLICKHOUSE_CLIENT -nm -q "$q")" != "1" ]; do + # too frequent FLUSH LOGS is too costly + sleep 2 + done +} + +$CLICKHOUSE_CLIENT -nm -q " + drop table if exists rmt_master; + drop table if exists rmt_slave; + + create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by key settings always_fetch_merged_part=0; + -- always_fetch_merged_part=1, consider this table as a 'slave' + create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by key settings always_fetch_merged_part=1; + + insert into rmt_master values (1); + + system sync replica rmt_master; + system sync replica rmt_slave; + system stop replicated sends rmt_master; + optimize table rmt_master final settings alter_sync=1, optimize_throw_if_noop=1; +" + +$CLICKHOUSE_CLIENT -nm -q " + system flush logs; + select 'before'; + select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; + + system start replicated sends rmt_master; +" +# wait until rmt_slave will fetch the part and reflect this error in system.part_log +wait_until "system flush logs; select count()>0 from system.part_log where table = 'rmt_slave' and database = '$CLICKHOUSE_DATABASE' and error > 0" +$CLICKHOUSE_CLIENT -nm -q " + system sync replica rmt_slave; + + system flush logs; + select 'after'; + select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; + + drop table rmt_master; + drop table rmt_slave; +" diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql deleted file mode 100644 index 548a8e5570a..00000000000 --- a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sql +++ /dev/null @@ -1,35 +0,0 @@ --- Tags: no-replicated-database, no-parallel, no-shared-merge-tree --- SMT: The merge process is completely different from RMT - -drop table if exists rmt_master; -drop table if exists rmt_slave; - -create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by key settings always_fetch_merged_part=0; --- always_fetch_merged_part=1, consider this table as a "slave" -create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by key settings always_fetch_merged_part=1; - -insert into rmt_master values (1); - -system sync replica rmt_master; -system sync replica rmt_slave; -system stop replicated sends rmt_master; -optimize table rmt_master final settings alter_sync=1, optimize_throw_if_noop=1; - -select sleep(3) format Null; - -system flush logs; -select 'before'; -select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; - -system start replicated sends rmt_master; --- sleep few seconds to try rmt_slave to fetch the part and reflect this error --- in system.part_log -select sleep(3) format Null; -system sync replica rmt_slave; - -system flush logs; -select 'after'; -select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; - -drop table rmt_master; -drop table rmt_slave; diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sh b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sh new file mode 100755 index 00000000000..cc8f53aafb9 --- /dev/null +++ b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# Tags: no-replicated-database, no-parallel, no-shared-merge-tree +# SMT: The merge process is completely different from RMT + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# shellcheck source=./mergetree_mutations.lib +. "$CUR_DIR"/mergetree_mutations.lib + +set -e + +function wait_until() +{ + local q=$1 && shift + while [ "$($CLICKHOUSE_CLIENT -nm -q "$q")" != "1" ]; do + # too frequent FLUSH LOGS is too costly + sleep 2 + done +} + +$CLICKHOUSE_CLIENT -nm -q " + drop table if exists rmt_master; + drop table if exists rmt_slave; + + create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by tuple() settings always_fetch_merged_part=0, old_parts_lifetime=600; + -- prefer_fetch_merged_part_*_threshold=0, consider this table as a 'slave' + create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by tuple() settings prefer_fetch_merged_part_time_threshold=0, prefer_fetch_merged_part_size_threshold=0, old_parts_lifetime=600; + + insert into rmt_master values (1); + + system sync replica rmt_master; + system sync replica rmt_slave; + system stop replicated sends rmt_master; + system stop pulling replication log rmt_slave; + alter table rmt_master update key=key+100 where 1 settings alter_sync=1; +" + +# first we need to make the rmt_master execute mutation so that it will have +# the part, and rmt_slave will consider it instead of performing mutation on +# it's own, otherwise prefer_fetch_merged_part_*_threshold will be simply ignored +wait_for_mutation rmt_master 0000000000 +$CLICKHOUSE_CLIENT -nm -q "system start pulling replication log rmt_slave" +# and wait until rmt_slave to fetch the part and reflect this error in system.part_log +wait_until "system flush logs; select count()>0 from system.part_log where table = 'rmt_slave' and database = '$CLICKHOUSE_DATABASE' and error > 0" +$CLICKHOUSE_CLIENT -nm -q " + system flush logs; + select 'before'; + select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; + + system start replicated sends rmt_master; +" +wait_for_mutation rmt_slave 0000000000 +$CLICKHOUSE_CLIENT -nm -q " + system sync replica rmt_slave; + + system flush logs; + select 'after'; + select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; + + drop table rmt_master; + drop table rmt_slave; +" diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql b/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql deleted file mode 100644 index d8b5ebb3148..00000000000 --- a/tests/queries/0_stateless/03002_part_log_rmt_fetch_mutate_error.sql +++ /dev/null @@ -1,41 +0,0 @@ --- Tags: no-replicated-database, no-parallel, no-shared-merge-tree --- SMT: The merge process is completely different from RMT - -drop table if exists rmt_master; -drop table if exists rmt_slave; - -create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by tuple() settings always_fetch_merged_part=0, old_parts_lifetime=600; --- prefer_fetch_merged_part_*_threshold=0, consider this table as a "slave" -create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by tuple() settings prefer_fetch_merged_part_time_threshold=0, prefer_fetch_merged_part_size_threshold=0, old_parts_lifetime=600; - -insert into rmt_master values (1); - -system sync replica rmt_master; -system sync replica rmt_slave; -system stop replicated sends rmt_master; -system stop pulling replication log rmt_slave; -alter table rmt_master update key=key+100 where 1 settings alter_sync=1; - --- first we need to make the rmt_master execute mutation so that it will have --- the part, and rmt_slave will consider it instead of performing mutation on --- it's own, otherwise prefer_fetch_merged_part_*_threshold will be simply ignored -select sleep(3) format Null; -system start pulling replication log rmt_slave; --- and sleep few more seconds to try rmt_slave to fetch the part and reflect --- this error in system.part_log -select sleep(3) format Null; - -system flush logs; -select 'before'; -select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; - -system start replicated sends rmt_master; -select sleep(3) format Null; -system sync replica rmt_slave; - -system flush logs; -select 'after'; -select table, event_type, error>0, countIf(error=0) from system.part_log where database = currentDatabase() group by 1, 2, 3 order by 1, 2, 3; - -drop table rmt_master; -drop table rmt_slave; From 0310f52d3a73fa3df61ef218ce643e448edfd012 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 1 Aug 2024 10:49:09 +0200 Subject: [PATCH 1660/2361] Fix 01811_storage_buffer_flush_parameters flakiness Signed-off-by: Azat Khuzhin --- .../01811_storage_buffer_flush_parameters.sh | 47 +++++++++++++++++++ .../01811_storage_buffer_flush_parameters.sql | 22 --------- 2 files changed, 47 insertions(+), 22 deletions(-) create mode 100755 tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sh delete mode 100644 tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sql diff --git a/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sh b/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sh new file mode 100755 index 00000000000..6a5949741ab --- /dev/null +++ b/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +function wait_with_limit() +{ + local limit=$1 && shift + local expr=$1 && shift + + for ((i = 0; i < limit; ++i)); do + if eval "$expr"; then + break + fi + sleep 1 + done +} + +$CLICKHOUSE_CLIENT -nm -q " + drop table if exists data_01811; + drop table if exists buffer_01811; + + + create table data_01811 (key Int) Engine=Memory(); + /* Buffer with flush_rows=1000 */ + create table buffer_01811 (key Int) Engine=Buffer(currentDatabase(), data_01811, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0, /* max_bytes= */ 4e6, + /* flush_time= */ 86400, /* flush_rows= */ 10, /* flush_bytes= */0 + ); + + insert into buffer_01811 select * from numbers(10); + insert into buffer_01811 select * from numbers(10); +" + +# wait for background buffer flush +wait_with_limit 30 '[[ $($CLICKHOUSE_CLIENT -q "select count() from data_01811") -gt 0 ]]' + +$CLICKHOUSE_CLIENT -nm -q "select count() from data_01811" + +$CLICKHOUSE_CLIENT -nm -q " + drop table buffer_01811; + drop table data_01811; +" diff --git a/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sql b/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sql deleted file mode 100644 index dac68ad4ae8..00000000000 --- a/tests/queries/0_stateless/01811_storage_buffer_flush_parameters.sql +++ /dev/null @@ -1,22 +0,0 @@ -drop table if exists data_01811; -drop table if exists buffer_01811; - -create table data_01811 (key Int) Engine=Memory(); -/* Buffer with flush_rows=1000 */ -create table buffer_01811 (key Int) Engine=Buffer(currentDatabase(), data_01811, - /* num_layers= */ 1, - /* min_time= */ 1, /* max_time= */ 86400, - /* min_rows= */ 1e9, /* max_rows= */ 1e6, - /* min_bytes= */ 0, /* max_bytes= */ 4e6, - /* flush_time= */ 86400, /* flush_rows= */ 10, /* flush_bytes= */0 -); - -insert into buffer_01811 select * from numbers(10); -insert into buffer_01811 select * from numbers(10); - --- wait for background buffer flush -select sleep(3) format Null; -select count() from data_01811; - -drop table buffer_01811; -drop table data_01811; From 618789d1960182ff91d614545a16d1332768e008 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 22 Jun 2024 20:42:27 +0200 Subject: [PATCH 1661/2361] Do not pass logs for keeper explicitly in the image to allow overriding Right now, if you will start keeper like this: $ cat /tmp/local.yaml --- logger: log: "@remove": remove errorlog: "@remove": remove console: 1 $ docker run --name keeper -v /tmp/local.yaml:/etc/clickhouse-keeper/conf.d/local.yaml --rm -it clickhouse/clickhouse-keeper This will still not work, because the --log-file/--errorlog-file passed explicitly. So this patch removes this in case config is valid, but, if keeper is launched with embedded config, this had been left as-is, to keep previous behavior. Signed-off-by: Azat Khuzhin --- docker/keeper/entrypoint.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docker/keeper/entrypoint.sh b/docker/keeper/entrypoint.sh index 1390ad9ce74..68bd0ef9d87 100644 --- a/docker/keeper/entrypoint.sh +++ b/docker/keeper/entrypoint.sh @@ -40,8 +40,6 @@ fi DATA_DIR="${CLICKHOUSE_DATA_DIR:-/var/lib/clickhouse}" LOG_DIR="${LOG_DIR:-/var/log/clickhouse-keeper}" -LOG_PATH="${LOG_DIR}/clickhouse-keeper.log" -ERROR_LOG_PATH="${LOG_DIR}/clickhouse-keeper.err.log" COORDINATION_DIR="${DATA_DIR}/coordination" COORDINATION_LOG_DIR="${DATA_DIR}/coordination/log" COORDINATION_SNAPSHOT_DIR="${DATA_DIR}/coordination/snapshots" @@ -84,7 +82,7 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then # There is a config file. It is already tested with gosu (if it is readably by keeper user) if [ -f "$KEEPER_CONFIG" ]; then - exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@" + exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@" fi # There is no config file. Will use embedded one From 9d4b0d2579ef199518707ecb6a2258951466d041 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 5 Aug 2024 18:31:16 +0000 Subject: [PATCH 1662/2361] set max_threads --- .../queries/0_stateless/03161_lightweight_delete_projection.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 28e5612a529..618f3ac0cb8 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,5 +1,5 @@ -SET lightweight_deletes_sync = 2, alter_sync = 2; +SET max_threads = 1, lightweight_deletes_sync = 2, alter_sync = 2; DROP TABLE IF EXISTS users_compact; From 205303c2868645e2aebf76f9b49ab77960579c56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 5 Aug 2024 20:37:43 +0200 Subject: [PATCH 1663/2361] Disable more bad tests --- .../Nodes/tests/gtest_resource_class_fair.cpp | 12 +++++++----- .../Nodes/tests/gtest_resource_class_priority.cpp | 10 ++++++---- .../Nodes/tests/gtest_throttler_constraint.cpp | 2 +- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/Common/Scheduler/Nodes/tests/gtest_resource_class_fair.cpp b/src/Common/Scheduler/Nodes/tests/gtest_resource_class_fair.cpp index 4f0e8c80734..16cce309c2a 100644 --- a/src/Common/Scheduler/Nodes/tests/gtest_resource_class_fair.cpp +++ b/src/Common/Scheduler/Nodes/tests/gtest_resource_class_fair.cpp @@ -8,7 +8,9 @@ using namespace DB; using ResourceTest = ResourceTestClass; -TEST(SchedulerFairPolicy, Factory) +/// Tests disabled because of leaks in the test themselves: https://github.com/ClickHouse/ClickHouse/issues/67678 + +TEST(DISABLED_SchedulerFairPolicy, Factory) { ResourceTest t; @@ -17,7 +19,7 @@ TEST(SchedulerFairPolicy, Factory) EXPECT_TRUE(dynamic_cast(fair.get()) != nullptr); } -TEST(SchedulerFairPolicy, FairnessWeights) +TEST(DISABLED_SchedulerFairPolicy, FairnessWeights) { ResourceTest t; @@ -41,7 +43,7 @@ TEST(SchedulerFairPolicy, FairnessWeights) t.consumed("B", 20); } -TEST(SchedulerFairPolicy, Activation) +TEST(DISABLED_SchedulerFairPolicy, Activation) { ResourceTest t; @@ -77,7 +79,7 @@ TEST(SchedulerFairPolicy, Activation) t.consumed("B", 10); } -TEST(SchedulerFairPolicy, FairnessMaxMin) +TEST(DISABLED_SchedulerFairPolicy, FairnessMaxMin) { ResourceTest t; @@ -101,7 +103,7 @@ TEST(SchedulerFairPolicy, FairnessMaxMin) t.consumed("A", 20); } -TEST(SchedulerFairPolicy, HierarchicalFairness) +TEST(DISABLED_SchedulerFairPolicy, HierarchicalFairness) { ResourceTest t; diff --git a/src/Common/Scheduler/Nodes/tests/gtest_resource_class_priority.cpp b/src/Common/Scheduler/Nodes/tests/gtest_resource_class_priority.cpp index a447b7f6780..d3d38aae048 100644 --- a/src/Common/Scheduler/Nodes/tests/gtest_resource_class_priority.cpp +++ b/src/Common/Scheduler/Nodes/tests/gtest_resource_class_priority.cpp @@ -8,7 +8,9 @@ using namespace DB; using ResourceTest = ResourceTestClass; -TEST(SchedulerPriorityPolicy, Factory) +/// Tests disabled because of leaks in the test themselves: https://github.com/ClickHouse/ClickHouse/issues/67678 + +TEST(DISABLED_SchedulerPriorityPolicy, Factory) { ResourceTest t; @@ -17,7 +19,7 @@ TEST(SchedulerPriorityPolicy, Factory) EXPECT_TRUE(dynamic_cast(prio.get()) != nullptr); } -TEST(SchedulerPriorityPolicy, Priorities) +TEST(DISABLED_SchedulerPriorityPolicy, Priorities) { ResourceTest t; @@ -51,7 +53,7 @@ TEST(SchedulerPriorityPolicy, Priorities) t.consumed("C", 0); } -TEST(SchedulerPriorityPolicy, Activation) +TEST(DISABLED_SchedulerPriorityPolicy, Activation) { ResourceTest t; @@ -92,7 +94,7 @@ TEST(SchedulerPriorityPolicy, Activation) t.consumed("C", 0); } -TEST(SchedulerPriorityPolicy, SinglePriority) +TEST(DISABLED_SchedulerPriorityPolicy, SinglePriority) { ResourceTest t; diff --git a/src/Common/Scheduler/Nodes/tests/gtest_throttler_constraint.cpp b/src/Common/Scheduler/Nodes/tests/gtest_throttler_constraint.cpp index 363e286c91c..2bc24cdb292 100644 --- a/src/Common/Scheduler/Nodes/tests/gtest_throttler_constraint.cpp +++ b/src/Common/Scheduler/Nodes/tests/gtest_throttler_constraint.cpp @@ -10,7 +10,7 @@ using namespace DB; using ResourceTest = ResourceTestClass; -/// Test disabled because of leaks in the test themselves: https://github.com/ClickHouse/ClickHouse/issues/67678 +/// Tests disabled because of leaks in the test themselves: https://github.com/ClickHouse/ClickHouse/issues/67678 TEST(DISABLED_SchedulerThrottlerConstraint, LeakyBucketConstraint) { From 529f21c6b8dac22235b92c8a443abfb4b95c0f52 Mon Sep 17 00:00:00 2001 From: Max K Date: Thu, 25 Jul 2024 11:32:59 +0200 Subject: [PATCH 1664/2361] CI: CheckReadyForMerge fixes --- .github/workflows/backport_branches.yml | 7 ++++++- .github/workflows/jepsen.yml | 1 + .github/workflows/merge_queue.yml | 7 ++++++- .github/workflows/nightly.yml | 1 + .github/workflows/pull_request.yml | 6 +++++- .github/workflows/release_branches.yml | 4 +--- tests/ci/ci_config.py | 28 +++++++++++++++++++++++++ tests/ci/ci_utils.py | 9 ++++---- tests/ci/merge_pr.py | 24 ++++++++++++--------- 9 files changed, 67 insertions(+), 20 deletions(-) diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 322946ac77b..23744dc7f8f 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -260,13 +260,18 @@ jobs: - name: Finish label if: ${{ !failure() }} run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF cd "$GITHUB_WORKSPACE/tests/ci" # update mergeable check - python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + python3 merge_pr.py --set-ci-status # update overall ci report python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} python3 merge_pr.py - name: Check Workflow results + if: ${{ !cancelled() }} run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" cat > "$WORKFLOW_RESULT_FILE" << 'EOF' diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 035ba2e5b98..2280b1a7305 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -64,6 +64,7 @@ jobs: - name: Check out repository code uses: ClickHouse/checkout@v1 - name: Check Workflow results + if: ${{ !cancelled() }} run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml index 64083668719..629cf79770e 100644 --- a/.github/workflows/merge_queue.yml +++ b/.github/workflows/merge_queue.yml @@ -103,9 +103,14 @@ jobs: - name: Check and set merge status if: ${{ needs.StyleCheck.result == 'success' }} run: | + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF cd "$GITHUB_WORKSPACE/tests/ci" - python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + python3 merge_pr.py --set-ci-status - name: Check Workflow results + if: ${{ !cancelled() }} run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" cat > "$WORKFLOW_RESULT_FILE" << 'EOF' diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index ea9c125db70..36fea39686f 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -52,6 +52,7 @@ jobs: - name: Check out repository code uses: ClickHouse/checkout@v1 - name: Check Workflow results + if: ${{ !cancelled() }} run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" cat > "$WORKFLOW_RESULT_FILE" << 'EOF' diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 071f0f1e20a..dbc740ebc1b 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -170,7 +170,11 @@ jobs: if: ${{ needs.StyleCheck.result == 'success' }} run: | cd "$GITHUB_WORKSPACE/tests/ci" - python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + python3 merge_pr.py --set-ci-status - name: Check Workflow results uses: ./.github/actions/check_workflow with: diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index b79208b03a6..a5cd6321e8c 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -481,12 +481,10 @@ jobs: - name: Finish label if: ${{ !failure() }} run: | - cd "$GITHUB_WORKSPACE/tests/ci" - # update mergeable check - python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} # update overall ci report python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - name: Check Workflow results + if: ${{ !cancelled() }} run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" cat > "$WORKFLOW_RESULT_FILE" << 'EOF' diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index c031ca9b805..d34405e7692 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -687,6 +687,34 @@ class CI: assert res, f"not a build [{build_name}] or invalid JobConfig" return res + @classmethod + def is_workflow_ok(cls) -> bool: + # TODO: temporary method to make Mergeable check working + res = cls.GH.get_workflow_results() + if not res: + print("ERROR: no workflow results found") + return False + for workflow_job, workflow_data in res.items(): + status = workflow_data["result"] + if status in ( + cls.GH.ActionStatuses.SUCCESS, + cls.GH.ActionStatuses.SKIPPED, + ): + print(f"Workflow status for [{workflow_job}] is [{status}] - continue") + elif status in (cls.GH.ActionStatuses.FAILURE,): + if workflow_job in ( + WorkflowStages.TESTS_2, + WorkflowStages.TESTS_2_WW, + ): + print( + f"Failed Workflow status for [{workflow_job}], it's not required - continue" + ) + continue + + print(f"Failed Workflow status for [{workflow_job}]") + return False + return True + if __name__ == "__main__": parser = ArgumentParser( diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index dae1520afb6..0a2dd5e35ba 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -94,9 +94,10 @@ class GH: FAILURE = "failure" PENDING = "pending" SUCCESS = "success" + SKIPPED = "skipped" @classmethod - def _get_workflow_results(cls): + def get_workflow_results(cls): if not Path(Envs.WORKFLOW_RESULT_FILE).exists(): print( f"ERROR: Failed to get workflow results from file [{Envs.WORKFLOW_RESULT_FILE}]" @@ -115,13 +116,13 @@ class GH: @classmethod def print_workflow_results(cls): - res = cls._get_workflow_results() + res = cls.get_workflow_results() results = [f"{job}: {data['result']}" for job, data in res.items()] cls.print_in_group("Workflow results", results) @classmethod def is_workflow_ok(cls) -> bool: - res = cls._get_workflow_results() + res = cls.get_workflow_results() for _job, data in res.items(): if data["result"] == "failure": return False @@ -129,7 +130,7 @@ class GH: @classmethod def get_workflow_job_result(cls, wf_job_name: str) -> Optional[str]: - res = cls._get_workflow_results() + res = cls.get_workflow_results() if wf_job_name in res: return res[wf_job_name]["result"] # type: ignore else: diff --git a/tests/ci/merge_pr.py b/tests/ci/merge_pr.py index 13c7537a84b..ddeb76adf7e 100644 --- a/tests/ci/merge_pr.py +++ b/tests/ci/merge_pr.py @@ -23,7 +23,7 @@ from commit_status_helper import ( from get_robot_token import get_best_robot_token from github_helper import GitHub, NamedUser, PullRequest, Repository from pr_info import PRInfo -from report import SUCCESS, FAILURE +from report import SUCCESS from env_helper import GITHUB_UPSTREAM_REPOSITORY, GITHUB_REPOSITORY from synchronizer_utils import SYNC_BRANCH_PREFIX from ci_config import CI @@ -248,23 +248,27 @@ def main(): repo = gh.get_repo(args.repo) if args.set_ci_status: + CI.GH.print_workflow_results() # set Mergeable check status and exit - assert args.wf_status in (FAILURE, SUCCESS) commit = get_commit(gh, args.pr_info.sha) statuses = get_commit_filtered_statuses(commit) has_failed_statuses = False - has_native_failed_status = False for status in statuses: print(f"Check status [{status.context}], [{status.state}]") - if CI.is_required(status.context) and status.state != SUCCESS: - print(f"WARNING: Failed status [{status.context}], [{status.state}]") + if ( + CI.is_required(status.context) + and status.state != SUCCESS + and status.context != CI.StatusNames.SYNC + ): + print( + f"WARNING: Not success status [{status.context}], [{status.state}]" + ) has_failed_statuses = True - if status.context != CI.StatusNames.SYNC: - has_native_failed_status = True - if args.wf_status == SUCCESS or has_failed_statuses: - # set Mergeable check if workflow is successful (green) + workflow_ok = CI.is_workflow_ok() + if workflow_ok or has_failed_statuses: + # set Mergeable Check if workflow is successful (green) # or if we have GH statuses with failures (red) # to avoid false-green on a died runner state = trigger_mergeable_check( @@ -283,7 +287,7 @@ def main(): print( "Workflow failed but no failed statuses found (died runner?) - cannot set Mergeable Check status" ) - if args.wf_status == SUCCESS and not has_native_failed_status: + if workflow_ok and not has_failed_statuses: sys.exit(0) else: sys.exit(1) From bf2e679a51d843e03b52e0dc2853667c8b03978d Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 5 Aug 2024 19:40:25 +0000 Subject: [PATCH 1665/2361] no-msan 00314_sample_factor_virtual_column --- .../0_stateless/00314_sample_factor_virtual_column.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql b/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql index 6e3dc019069..b8ac5e733ed 100644 --- a/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql +++ b/tests/queries/0_stateless/00314_sample_factor_virtual_column.sql @@ -1,3 +1,7 @@ +-- Tags: no-msan +-- ^ +-- makes SELECTs extremely slow sometimes for some reason: "Aggregated. 1000000 to 1 rows (from 7.63 MiB) in 242.829221645 sec." + DROP TABLE IF EXISTS sample_00314_1; DROP TABLE IF EXISTS sample_00314_2; DROP TABLE IF EXISTS sample_merge_00314; From cb6baefa948ef1270ce9454f72075ac10bf6e729 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 5 Aug 2024 21:49:36 +0100 Subject: [PATCH 1666/2361] better --- src/Common/ShellCommand.cpp | 2 +- src/Processors/Sources/ShellCommandSource.cpp | 12 +++--------- tests/integration/test_executable_dictionary/test.py | 2 -- .../test_executable_table_function/test.py | 1 - 4 files changed, 4 insertions(+), 13 deletions(-) diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index 79b0d667863..0d41669816c 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -239,7 +239,7 @@ std::unique_ptr ShellCommand::executeImpl( LOG_TRACE( getLogger(), - "Started shell command '{}' with pid {} and file descriptors: read {}, write {}", + "Started shell command '{}' with pid {} and file descriptors: out {}, err {}", filename, pid, res->out.getFD(), diff --git a/src/Processors/Sources/ShellCommandSource.cpp b/src/Processors/Sources/ShellCommandSource.cpp index 923bdfad8f8..23359367a9b 100644 --- a/src/Processors/Sources/ShellCommandSource.cpp +++ b/src/Processors/Sources/ShellCommandSource.cpp @@ -76,13 +76,11 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond { Stopwatch watch; -#if defined(DEBUG_OR_SANITIZER_BUILD) auto describe_fd = [](const auto & pollfd) { return fmt::format("(fd={}, flags={})", pollfd.fd, fcntl(pollfd.fd, F_GETFL)); }; - LOG_TRACE( + LOG_TEST( getLogger("TimeoutReadBufferFromFileDescriptor"), "Polling descriptors: {}", fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", ")); -#endif res = poll(pfds, static_cast(num), static_cast(timeout_milliseconds)); @@ -94,13 +92,11 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond const auto elapsed = watch.elapsedMilliseconds(); if (timeout_milliseconds <= elapsed) { -#if defined(DEBUG_OR_SANITIZER_BUILD) - LOG_TRACE( + LOG_TEST( getLogger("TimeoutReadBufferFromFileDescriptor"), "Timeout exceeded: elapsed={}, timeout={}", elapsed, timeout_milliseconds); -#endif break; } timeout_milliseconds -= elapsed; @@ -111,14 +107,12 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond } } -#if defined(DEBUG_OR_SANITIZER_BUILD) auto describe_fd = [](const auto & pollfd) { return fmt::format("(fd={}, flags={})", pollfd.fd, fcntl(pollfd.fd, F_GETFL)); }; - LOG_TRACE( + LOG_TEST( getLogger("TimeoutReadBufferFromFileDescriptor"), "Poll for descriptors: {} returned {}", fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", "), res); -#endif return res; } diff --git a/tests/integration/test_executable_dictionary/test.py b/tests/integration/test_executable_dictionary/test.py index 2a6af75e751..22f3442bb95 100644 --- a/tests/integration/test_executable_dictionary/test.py +++ b/tests/integration/test_executable_dictionary/test.py @@ -235,7 +235,6 @@ def test_executable_implicit_input_signalled_python(started_cluster): ) -@pytest.mark.repeat(50) def test_executable_input_slow_python(started_cluster): skip_test_msan(node) assert node.query_and_get_error( @@ -246,7 +245,6 @@ def test_executable_input_slow_python(started_cluster): ) -@pytest.mark.repeat(50) def test_executable_implicit_input_slow_python(started_cluster): skip_test_msan(node) assert node.query_and_get_error( diff --git a/tests/integration/test_executable_table_function/test.py b/tests/integration/test_executable_table_function/test.py index a79616fc008..801a3c7c14a 100644 --- a/tests/integration/test_executable_table_function/test.py +++ b/tests/integration/test_executable_table_function/test.py @@ -139,7 +139,6 @@ def test_executable_function_input_signalled_python(started_cluster): assert node.query(query.format(source="(SELECT id FROM test_data_table)")) == "" -@pytest.mark.repeat(50) def test_executable_function_input_slow_python(started_cluster): skip_test_msan(node) From 4829ba31d3e8f874beb9e626df801021673008fd Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Mon, 5 Aug 2024 21:18:53 +0000 Subject: [PATCH 1667/2361] Actually fix a test --- .../test_distributed_type_object/test.py | 3 ++ ...allel_replicas_crash_after_refactoring.sql | 33 ------------------- 2 files changed, 3 insertions(+), 33 deletions(-) delete mode 100644 tests/queries/0_stateless/03215_parallel_replicas_crash_after_refactoring.sql diff --git a/tests/integration/test_distributed_type_object/test.py b/tests/integration/test_distributed_type_object/test.py index e774876bc8b..e274bd6b774 100644 --- a/tests/integration/test_distributed_type_object/test.py +++ b/tests/integration/test_distributed_type_object/test.py @@ -31,6 +31,9 @@ def started_cluster(): def test_distributed_type_object(started_cluster): + node1.query("TRUNCATE TABLE local_table") + node2.query("TRUNCATE TABLE local_table") + node1.query( 'INSERT INTO local_table FORMAT JSONEachRow {"id": 1, "data": {"k1": 10}}' ) diff --git a/tests/queries/0_stateless/03215_parallel_replicas_crash_after_refactoring.sql b/tests/queries/0_stateless/03215_parallel_replicas_crash_after_refactoring.sql deleted file mode 100644 index cae4fa0f0df..00000000000 --- a/tests/queries/0_stateless/03215_parallel_replicas_crash_after_refactoring.sql +++ /dev/null @@ -1,33 +0,0 @@ --- Tags: disabled - -DROP TABLE IF EXISTS t1__fuzz_5; - -CREATE TABLE t1__fuzz_5 -( - `k` Int16, - `v` Nullable(UInt8) -) -ENGINE = MergeTree -ORDER BY k -SETTINGS index_granularity = 10; - -INSERT INTO t1__fuzz_5 SELECT - number, - number -FROM numbers(1000); - -INSERT INTO t1__fuzz_5 SELECT - number, - number -FROM numbers(1000, 1000); - -INSERT INTO t1__fuzz_5 SELECT - number, - number -FROM numbers(2000, 1000); - -SET receive_timeout = 10., receive_data_timeout_ms = 10000, allow_suspicious_low_cardinality_types = true, parallel_distributed_insert_select = 2, log_queries = true, table_function_remote_max_addresses = 200, max_execution_time = 10., max_memory_usage = 10000000000, log_comment = '/workspace/ch/tests/queries/0_stateless/02869_parallel_replicas_read_from_several.sql', send_logs_level = 'warning', prefer_localhost_replica = false, allow_introspection_functions = true, use_parallel_replicas = 257, max_parallel_replicas = 65535, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_single_task_marks_count_multiplier = -0., parallel_replicas_for_non_replicated_merge_tree = true; - -SELECT max(k) IGNORE NULLS FROM t1__fuzz_5 WITH TOTALS SETTINGS use_parallel_replicas = 257, max_parallel_replicas = 65535, prefer_localhost_replica = 0, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_single_task_marks_count_multiplier = -0; - -DROP TABLE IF EXISTS t1__fuzz_5; From 5cdf12a9277993f7adf97c5083c7e92f914a5ae7 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 4 Aug 2024 22:31:17 +0000 Subject: [PATCH 1668/2361] Automatic style fix From 29f06e8037316d61034700eff8e41c9b8730f317 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Aug 2024 15:17:32 +0200 Subject: [PATCH 1669/2361] Update 00002_log_and_exception_messages_formatting.sql --- .../0_stateless/00002_log_and_exception_messages_formatting.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index b806aff3a00..efd961a0fda 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -1,5 +1,7 @@ -- Tags: no-parallel, no-fasttest, no-ubsan, no-batch, no-flaky-check -- no-parallel because we want to run this test when most of the other tests already passed +-- This is not a regular test. It is intended to run once after other tests to validate certain statistics about the whole test runs. +-- TODO: I advice to put in inside clickhouse-test instead. -- If this test fails, see the "Top patterns of log messages" diagnostics in the end of run.log From 61aff5efae873bde8631919386ba7f5c9db50af1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 5 Aug 2024 15:17:48 +0200 Subject: [PATCH 1670/2361] Update 00002_log_and_exception_messages_formatting.sql --- .../0_stateless/00002_log_and_exception_messages_formatting.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index efd961a0fda..53321afc94c 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -1,7 +1,7 @@ -- Tags: no-parallel, no-fasttest, no-ubsan, no-batch, no-flaky-check -- no-parallel because we want to run this test when most of the other tests already passed -- This is not a regular test. It is intended to run once after other tests to validate certain statistics about the whole test runs. --- TODO: I advice to put in inside clickhouse-test instead. +-- TODO: I advise to put in inside clickhouse-test instead. -- If this test fails, see the "Top patterns of log messages" diagnostics in the end of run.log From cfcd9fe096de7e136093e20ac283569c23721714 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 5 Aug 2024 21:57:23 +0000 Subject: [PATCH 1671/2361] Conflicts --- src/Interpreters/ExpressionActions.h | 2 +- src/Storages/StorageMergeTreeIndex.cpp | 15 +++++++-------- src/Storages/StorageMergeTreeIndex.h | 2 +- src/Storages/System/IStorageSystemOneBlock.cpp | 4 ++-- src/Storages/System/StorageSystemColumns.cpp | 6 +++--- .../System/StorageSystemDataSkippingIndices.cpp | 11 +++++------ src/Storages/System/StorageSystemReplicas.cpp | 11 +++++------ src/Storages/VirtualColumnUtils.h | 7 +++---- 8 files changed, 27 insertions(+), 31 deletions(-) diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index 6ff39ee07f7..7652fe49eab 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -102,7 +102,7 @@ public: /// /// @param allow_duplicates_in_input - actions are allowed to have /// duplicated input (that will refer into the block). This is needed for - /// preliminary query filtering (filterBlockWithDAG()), because they just + /// preliminary query filtering (filterBlockWithExpression()), because they just /// pass available virtual columns, which cannot be moved in case they are /// used multiple times. void execute(Block & block, size_t & num_rows, bool dry_run = false, bool allow_duplicates_in_input = false) const; diff --git a/src/Storages/StorageMergeTreeIndex.cpp b/src/Storages/StorageMergeTreeIndex.cpp index 90d01d356e9..15728290f19 100644 --- a/src/Storages/StorageMergeTreeIndex.cpp +++ b/src/Storages/StorageMergeTreeIndex.cpp @@ -275,7 +275,7 @@ public: private: std::shared_ptr storage; Poco::Logger * log; - ActionsDAGPtr virtual_columns_filter; + ExpressionActionsPtr virtual_columns_filter; }; void ReadFromMergeTreeIndex::applyFilters(ActionDAGNodes added_filter_nodes) @@ -289,10 +289,9 @@ void ReadFromMergeTreeIndex::applyFilters(ActionDAGNodes added_filter_nodes) { {}, std::make_shared(), StorageMergeTreeIndex::part_name_column.name }, }; - virtual_columns_filter = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); - - if (virtual_columns_filter) - VirtualColumnUtils::buildSetsForDAG(virtual_columns_filter, context); + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); + if (dag) + virtual_columns_filter = VirtualColumnUtils::buildFilterExpression(std::move(*dag), context); } } @@ -345,7 +344,7 @@ void StorageMergeTreeIndex::read( void ReadFromMergeTreeIndex::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - auto filtered_parts = storage->getFilteredDataParts(virtual_columns_filter, context); + auto filtered_parts = storage->getFilteredDataParts(virtual_columns_filter); LOG_DEBUG(log, "Reading index{}from {} parts of table {}", storage->with_marks ? " with marks " : " ", @@ -355,7 +354,7 @@ void ReadFromMergeTreeIndex::initializePipeline(QueryPipelineBuilder & pipeline, pipeline.init(Pipe(std::make_shared(getOutputStream().header, storage->key_sample_block, std::move(filtered_parts), context, storage->with_marks))); } -MergeTreeData::DataPartsVector StorageMergeTreeIndex::getFilteredDataParts(ActionsDAGPtr virtual_columns_filter, const ContextPtr & context) const +MergeTreeData::DataPartsVector StorageMergeTreeIndex::getFilteredDataParts(const ExpressionActionsPtr & virtual_columns_filter) const { if (!virtual_columns_filter) return data_parts; @@ -365,7 +364,7 @@ MergeTreeData::DataPartsVector StorageMergeTreeIndex::getFilteredDataParts(Actio all_part_names->insert(part->name); Block filtered_block{{std::move(all_part_names), std::make_shared(), part_name_column.name}}; - VirtualColumnUtils::filterBlockWithDAG(virtual_columns_filter, filtered_block, context); + VirtualColumnUtils::filterBlockWithExpression(virtual_columns_filter, filtered_block); if (!filtered_block.rows()) return {}; diff --git a/src/Storages/StorageMergeTreeIndex.h b/src/Storages/StorageMergeTreeIndex.h index 652a2d6eeaf..ed8274d7d92 100644 --- a/src/Storages/StorageMergeTreeIndex.h +++ b/src/Storages/StorageMergeTreeIndex.h @@ -36,7 +36,7 @@ public: private: friend class ReadFromMergeTreeIndex; - MergeTreeData::DataPartsVector getFilteredDataParts(ActionsDAGPtr virtual_columns_filter, const ContextPtr & context) const; + MergeTreeData::DataPartsVector getFilteredDataParts(const ExpressionActionsPtr & virtual_columns_filter) const; StoragePtr source_table; bool with_marks; diff --git a/src/Storages/System/IStorageSystemOneBlock.cpp b/src/Storages/System/IStorageSystemOneBlock.cpp index 308b34510ea..b8f32fcdb83 100644 --- a/src/Storages/System/IStorageSystemOneBlock.cpp +++ b/src/Storages/System/IStorageSystemOneBlock.cpp @@ -45,7 +45,7 @@ public: private: std::shared_ptr storage; std::vector columns_mask; - ActionsDAGPtr filter; + std::optional filter; }; void IStorageSystemOneBlock::read( @@ -106,7 +106,7 @@ void ReadFromSystemOneBlock::applyFilters(ActionDAGNodes added_filter_nodes) /// Must prepare sets here, initializePipeline() would be too late, see comment on FutureSetFromSubquery. if (filter) - VirtualColumnUtils::buildSetsForDAG(filter, context); + VirtualColumnUtils::buildSetsForDAG(*filter, context); } } diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 6b3e0094562..03c569303c5 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -338,7 +338,7 @@ private: std::shared_ptr storage; std::vector columns_mask; const size_t max_block_size; - ActionsDAGPtr virtual_columns_filter; + std::optional virtual_columns_filter; }; void ReadFromSystemColumns::applyFilters(ActionDAGNodes added_filter_nodes) @@ -355,7 +355,7 @@ void ReadFromSystemColumns::applyFilters(ActionDAGNodes added_filter_nodes) /// Must prepare sets here, initializePipeline() would be too late, see comment on FutureSetFromSubquery. if (virtual_columns_filter) - VirtualColumnUtils::buildSetsForDAG(virtual_columns_filter, context); + VirtualColumnUtils::buildSetsForDAG(*virtual_columns_filter, context); } } @@ -468,7 +468,7 @@ void ReadFromSystemColumns::initializePipeline(QueryPipelineBuilder & pipeline, /// Filter block with `database` and `table` columns. if (virtual_columns_filter) - VirtualColumnUtils::filterBlockWithDAG(virtual_columns_filter, block_to_filter, context); + VirtualColumnUtils::filterBlockWithPredicate(virtual_columns_filter->getOutputs().at(0), block_to_filter, context); if (!block_to_filter.rows()) { diff --git a/src/Storages/System/StorageSystemDataSkippingIndices.cpp b/src/Storages/System/StorageSystemDataSkippingIndices.cpp index a6bba44e257..a41771df406 100644 --- a/src/Storages/System/StorageSystemDataSkippingIndices.cpp +++ b/src/Storages/System/StorageSystemDataSkippingIndices.cpp @@ -214,7 +214,7 @@ private: std::shared_ptr storage; std::vector columns_mask; const size_t max_block_size; - ActionsDAGPtr virtual_columns_filter; + ExpressionActionsPtr virtual_columns_filter; }; void ReadFromSystemDataSkippingIndices::applyFilters(ActionDAGNodes added_filter_nodes) @@ -228,10 +228,9 @@ void ReadFromSystemDataSkippingIndices::applyFilters(ActionDAGNodes added_filter { ColumnString::create(), std::make_shared(), "database" }, }; - virtual_columns_filter = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); - - if (virtual_columns_filter) - VirtualColumnUtils::buildSetsForDAG(virtual_columns_filter, context); + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); + if (dag) + virtual_columns_filter = VirtualColumnUtils::buildFilterExpression(std::move(*dag), context); } } @@ -279,7 +278,7 @@ void ReadFromSystemDataSkippingIndices::initializePipeline(QueryPipelineBuilder /// Condition on "database" in a query acts like an index. Block block { ColumnWithTypeAndName(std::move(column), std::make_shared(), "database") }; if (virtual_columns_filter) - VirtualColumnUtils::filterBlockWithDAG(virtual_columns_filter, block, context); + VirtualColumnUtils::filterBlockWithExpression(virtual_columns_filter, block); ColumnPtr & filtered_databases = block.getByPosition(0).column; pipeline.init(Pipe(std::make_shared( diff --git a/src/Storages/System/StorageSystemReplicas.cpp b/src/Storages/System/StorageSystemReplicas.cpp index 9fb4dc5ed6f..724e4bd3f77 100644 --- a/src/Storages/System/StorageSystemReplicas.cpp +++ b/src/Storages/System/StorageSystemReplicas.cpp @@ -285,7 +285,7 @@ private: const bool with_zk_fields; const size_t max_block_size; std::shared_ptr impl; - ActionsDAGPtr virtual_columns_filter; + ExpressionActionsPtr virtual_columns_filter; }; void ReadFromSystemReplicas::applyFilters(ActionDAGNodes added_filter_nodes) @@ -301,10 +301,9 @@ void ReadFromSystemReplicas::applyFilters(ActionDAGNodes added_filter_nodes) { ColumnString::create(), std::make_shared(), "engine" }, }; - virtual_columns_filter = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); - - if (virtual_columns_filter) - VirtualColumnUtils::buildSetsForDAG(virtual_columns_filter, context); + auto dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), &block_to_filter); + if (dag) + virtual_columns_filter = VirtualColumnUtils::buildFilterExpression(std::move(*dag), context); } } @@ -443,7 +442,7 @@ void ReadFromSystemReplicas::initializePipeline(QueryPipelineBuilder & pipeline, }; if (virtual_columns_filter) - VirtualColumnUtils::filterBlockWithDAG(virtual_columns_filter, filtered_block, context); + VirtualColumnUtils::filterBlockWithExpression(virtual_columns_filter, filtered_block); if (!filtered_block.rows()) { diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 3f3f93eccf7..abf46dc23a4 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -20,15 +20,14 @@ namespace VirtualColumnUtils /// The filtering functions are tricky to use correctly. /// There are 2 ways: -/// 1. Call filterBlockWithPredicate() or filterBlockWithDAG() inside SourceStepWithFilter::applyFilters(). +/// 1. Call filterBlockWithPredicate() or filterBlockWithExpression() inside SourceStepWithFilter::applyFilters(). /// 2. Call splitFilterDagForAllowedInputs() and buildSetsForDAG() inside SourceStepWithFilter::applyFilters(). -/// Then call filterBlockWithPredicate() or filterBlockWithDAG() in initializePipeline(). +/// Then call filterBlockWithPredicate() or filterBlockWithExpression() in initializePipeline(). /// /// Otherwise calling filter*() outside applyFilters() will throw "Not-ready Set is passed" /// if there are subqueries. -/// Similar to filterBlockWithQuery, but uses ActionsDAG as a predicate. -/// Basically it is filterBlockWithDAG(splitFilterDagForAllowedInputs). +/// Similar to filterBlockWithExpression(buildFilterExpression(splitFilterDagForAllowedInputs(...))). void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context); /// Just filters block. Block should contain all the required columns. From a05ead3f77d52f6f168c0bb483633a63ffbb3997 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 5 Aug 2024 22:04:53 +0000 Subject: [PATCH 1672/2361] Conflict --- src/Core/SettingsChangesHistory.cpp | 260 ---------------------------- 1 file changed, 260 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index b80e4bf3086..5b94391bade 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,266 +57,6 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, - {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, - {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, - {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, - {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, - {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, - {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, - {"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."}, - {"collect_hash_table_stats_during_joins", false, true, "New setting."}, - {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, - {"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."}, - {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, - {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, - {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, - {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, - {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, - {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, - {"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, - {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, - {"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."}, - {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, - {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}, - {"restore_replace_external_table_functions_to_null", false, false, "New setting."}, - {"restore_replace_external_engines_to_null", false, false, "New setting."} - }}, - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, - {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, - {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, - {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, - {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, - {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, - {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, - {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, - {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, - {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, - {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, - {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, - {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, - {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, - {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, - {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, - {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, - {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, - {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, - {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, - {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, - {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, - {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, - {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, - {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, - {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, - {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, - {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, - {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, - {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"http_max_chunk_size", 0, 0, "Internal limitation"}, - {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, - {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, - {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, - {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, - {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, - }}, - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, - {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, - {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, - {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, - {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, - {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, - {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, - {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, - {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, - {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, - {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, - {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, - {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, - }}, - {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, - {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, - {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, - {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, - {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, - {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, - {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, - {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."}, - {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, - {"log_processors_profiles", false, true, "Enable by default"}, - {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, - {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, - {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, - {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, - {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, - {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, - {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, - {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, - {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, - {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, - {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, - {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, - {"allow_get_client_http_header", false, false, "Introduced a new function."}, - {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, - {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, - {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, - {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, - {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, - {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, - {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, - {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, - {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, - {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, - {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, - {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, - {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, - {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, - {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, - {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, - {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, - {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, - {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, - {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, - {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, - {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, - {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, - {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, - {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, - {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, - {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, {"24.12", { } From 4e8bd0654cd378fd03985170a4df8f49b583bd01 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 5 Aug 2024 22:06:36 +0000 Subject: [PATCH 1673/2361] Remove from history --- src/Core/SettingsChangesHistory.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 5b94391bade..03de54f2780 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -89,7 +89,6 @@ static std::initializer_list Date: Mon, 5 Aug 2024 22:36:30 +0000 Subject: [PATCH 1674/2361] Apply https://github.com/ClickHouse/zlib-ng/pull/18 --- contrib/zlib-ng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/zlib-ng b/contrib/zlib-ng index c19ba056b7c..a2fbeffdc30 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit c19ba056b7cc8029bb80f509956090c7ded58032 +Subproject commit a2fbeffdc30a8b0ce6d54ee31208e2688eac4c9f From 67556a594bb12b944ce4c94cacf9164e92965dfe Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 5 Aug 2024 23:07:53 +0000 Subject: [PATCH 1675/2361] Conflict --- src/Core/SettingsChangesHistory.cpp | 263 +--------------------------- 1 file changed, 1 insertion(+), 262 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 8a40ad94645..dce51b30382 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,268 +57,6 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, - {"allow_materialized_view_with_bad_select", true, false, "Stricter validation in CREATE MATERIALIZED VIEW"}, - {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, - {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, - {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, - {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, - {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, - {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, - {"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, - {"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."}, - {"collect_hash_table_stats_during_joins", false, true, "New setting."}, - {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, - {"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."}, - {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, - {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, - {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, - {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, - {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, - {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, - {"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, - {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, - {"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."}, - {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, - {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}, - {"restore_replace_external_table_functions_to_null", false, false, "New setting."}, - {"restore_replace_external_engines_to_null", false, false, "New setting."} - }}, - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, - {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, - {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, - {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, - {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, - {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, - {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, - {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, - {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, - {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, - {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, - {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, - {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, - {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, - {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, - {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, - {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, - {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, - {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, - {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, - {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, - {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, - {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, - {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, - {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, - {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, - {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, - {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, - {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, - {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"http_max_chunk_size", 0, 0, "Internal limitation"}, - {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, - {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, - {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, - {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, - {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, - }}, - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, - {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, - {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, - {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, - {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, - {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, - {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, - {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, - {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, - {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, - {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, - {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, - {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, - }}, - {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, - {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, - {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, - {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, - {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, - {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, - {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, - {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."}, - {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, - {"log_processors_profiles", false, true, "Enable by default"}, - {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, - {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, - {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, - {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, - {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, - {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, - {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, - {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, - {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, - {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, - {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, - {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, - {"allow_get_client_http_header", false, false, "Introduced a new function."}, - {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, - {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, - {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, - {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, - {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, - {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, - {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, - {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, - {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, - {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, - {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, - {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, - {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, - {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, - {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, - {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, - {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, - {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, - {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, - {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, - {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, - {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, - {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, - {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, - {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, - {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, - {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, {"24.12", { } @@ -340,6 +78,7 @@ static std::initializer_list Date: Tue, 6 Aug 2024 03:17:17 +0200 Subject: [PATCH 1676/2361] Revert "Revert "Add a test for #47892"" --- .../02968_full_sorting_join_fuzz.reference | 3 +++ .../0_stateless/02968_full_sorting_join_fuzz.sql | 15 +++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 tests/queries/0_stateless/02968_full_sorting_join_fuzz.reference create mode 100644 tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql diff --git a/tests/queries/0_stateless/02968_full_sorting_join_fuzz.reference b/tests/queries/0_stateless/02968_full_sorting_join_fuzz.reference new file mode 100644 index 00000000000..0e9f5cc1db1 --- /dev/null +++ b/tests/queries/0_stateless/02968_full_sorting_join_fuzz.reference @@ -0,0 +1,3 @@ +[NULL] [] 100 0 + +[NULL] [] 100 0 diff --git a/tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql b/tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql new file mode 100644 index 00000000000..802eda57df3 --- /dev/null +++ b/tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql @@ -0,0 +1,15 @@ +SET max_bytes_in_join = 0, join_algorithm = 'full_sorting_merge', max_block_size = 10240; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`key` UInt32, `s` String) ENGINE = MergeTree ORDER BY key; +CREATE TABLE t2 (`key` UInt32, `s` String) ENGINE = MergeTree ORDER BY key; + +INSERT INTO t1 SELECT (sipHash64(number, 'x') % 10000000) + 1 AS key, concat('val', toString(number)) AS s FROM numbers_mt(10000000); +INSERT INTO t2 SELECT (sipHash64(number, 'y') % 1000000) + 1 AS key, concat('val', toString(number)) AS s FROM numbers_mt(1000000); + +SELECT materialize([NULL]), [], 100, count(materialize(NULL)) FROM t1 ALL INNER JOIN t2 ON t1.key = t2.key PREWHERE 10 WHERE t2.key WITH TOTALS; + +DROP TABLE t1; +DROP TABLE t2; From 52f3ad97e86b485419f3f88c5c047d0868d0574a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 6 Aug 2024 03:30:19 +0200 Subject: [PATCH 1677/2361] Fix MaterializedMySQL --- src/Databases/MySQL/DatabaseMaterializedMySQL.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp index 2f5477a6b9d..8b3850c4e0c 100644 --- a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp @@ -46,6 +46,7 @@ DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( , settings(std::move(settings_)) , materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), binlog_client_, settings.get()) { + createDirectories(); } void DatabaseMaterializedMySQL::rethrowExceptionIfNeeded() const From 15a4d13cdad3fe9181703d5c0a993b8dcb8cb263 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 6 Aug 2024 04:33:02 +0200 Subject: [PATCH 1678/2361] Update 02968_full_sorting_join_fuzz.sql --- tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql b/tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql index 802eda57df3..85ca740cef2 100644 --- a/tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql +++ b/tests/queries/0_stateless/02968_full_sorting_join_fuzz.sql @@ -9,7 +9,7 @@ CREATE TABLE t2 (`key` UInt32, `s` String) ENGINE = MergeTree ORDER BY key; INSERT INTO t1 SELECT (sipHash64(number, 'x') % 10000000) + 1 AS key, concat('val', toString(number)) AS s FROM numbers_mt(10000000); INSERT INTO t2 SELECT (sipHash64(number, 'y') % 1000000) + 1 AS key, concat('val', toString(number)) AS s FROM numbers_mt(1000000); -SELECT materialize([NULL]), [], 100, count(materialize(NULL)) FROM t1 ALL INNER JOIN t2 ON t1.key = t2.key PREWHERE 10 WHERE t2.key WITH TOTALS; +SELECT materialize([NULL]), [], 100, count(materialize(NULL)) FROM t1 ALL INNER JOIN t2 ON t1.key = t2.key PREWHERE 10 WHERE t2.key != 0 WITH TOTALS; DROP TABLE t1; DROP TABLE t2; From e6f566e49d78080a954ca992d8d5e0f5fb1bb1e2 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Tue, 6 Aug 2024 13:23:12 +0800 Subject: [PATCH 1679/2361] Small refactors in ORC output format --- .../Formats/Impl/ORCBlockOutputFormat.cpp | 99 +++++++------------ 1 file changed, 33 insertions(+), 66 deletions(-) diff --git a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp index 6f543a05fba..bd89ae0fa86 100644 --- a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -203,25 +204,15 @@ template void ORCBlockOutputFormat::writeNumbers( orc::ColumnVectorBatch & orc_column, const IColumn & column, - const PaddedPODArray * null_bytemap, + const PaddedPODArray * /*null_bytemap*/, ConvertFunc convert) { NumberVectorBatch & number_orc_column = dynamic_cast(orc_column); const auto & number_column = assert_cast &>(column); - number_orc_column.resize(number_column.size()); + number_orc_column.data.resize(number_column.size()); for (size_t i = 0; i != number_column.size(); ++i) - { - if (null_bytemap && (*null_bytemap)[i]) - { - number_orc_column.notNull[i] = 0; - continue; - } - - number_orc_column.notNull[i] = 1; number_orc_column.data[i] = convert(number_column.getElement(i)); - } - number_orc_column.numElements = number_column.size(); } template @@ -229,7 +220,7 @@ void ORCBlockOutputFormat::writeDecimals( orc::ColumnVectorBatch & orc_column, const IColumn & column, DataTypePtr & type, - const PaddedPODArray * null_bytemap, + const PaddedPODArray * /*null_bytemap*/, ConvertFunc convert) { DecimalVectorBatch & decimal_orc_column = dynamic_cast(orc_column); @@ -238,71 +229,49 @@ void ORCBlockOutputFormat::writeDecimals( decimal_orc_column.precision = decimal_type->getPrecision(); decimal_orc_column.scale = decimal_type->getScale(); decimal_orc_column.resize(decimal_column.size()); - for (size_t i = 0; i != decimal_column.size(); ++i) - { - if (null_bytemap && (*null_bytemap)[i]) - { - decimal_orc_column.notNull[i] = 0; - continue; - } - decimal_orc_column.notNull[i] = 1; + decimal_orc_column.values.resize(decimal_column.size()); + for (size_t i = 0; i != decimal_column.size(); ++i) decimal_orc_column.values[i] = convert(decimal_column.getElement(i).value); - } - decimal_orc_column.numElements = decimal_column.size(); } template void ORCBlockOutputFormat::writeStrings( orc::ColumnVectorBatch & orc_column, const IColumn & column, - const PaddedPODArray * null_bytemap) + const PaddedPODArray * /*null_bytemap*/) { orc::StringVectorBatch & string_orc_column = dynamic_cast(orc_column); const auto & string_column = assert_cast(column); - string_orc_column.resize(string_column.size()); + string_orc_column.data.resize(string_column.size()); + string_orc_column.length.resize(string_column.size()); for (size_t i = 0; i != string_column.size(); ++i) { - if (null_bytemap && (*null_bytemap)[i]) - { - string_orc_column.notNull[i] = 0; - continue; - } - - string_orc_column.notNull[i] = 1; const std::string_view & string = string_column.getDataAt(i).toView(); string_orc_column.data[i] = const_cast(string.data()); string_orc_column.length[i] = string.size(); } - string_orc_column.numElements = string_column.size(); } template void ORCBlockOutputFormat::writeDateTimes( orc::ColumnVectorBatch & orc_column, const IColumn & column, - const PaddedPODArray * null_bytemap, + const PaddedPODArray * /*null_bytemap*/, GetSecondsFunc get_seconds, GetNanosecondsFunc get_nanoseconds) { orc::TimestampVectorBatch & timestamp_orc_column = dynamic_cast(orc_column); const auto & timestamp_column = assert_cast(column); - timestamp_orc_column.resize(timestamp_column.size()); + timestamp_orc_column.data.resize(timestamp_column.size()); + timestamp_orc_column.nanoseconds.resize(timestamp_column.size()); for (size_t i = 0; i != timestamp_column.size(); ++i) { - if (null_bytemap && (*null_bytemap)[i]) - { - timestamp_orc_column.notNull[i] = 0; - continue; - } - - timestamp_orc_column.notNull[i] = 1; timestamp_orc_column.data[i] = static_cast(get_seconds(timestamp_column.getElement(i))); timestamp_orc_column.nanoseconds[i] = static_cast(get_nanoseconds(timestamp_column.getElement(i))); } - timestamp_orc_column.numElements = timestamp_column.size(); } void ORCBlockOutputFormat::writeColumn( @@ -311,9 +280,19 @@ void ORCBlockOutputFormat::writeColumn( DataTypePtr & type, const PaddedPODArray * null_bytemap) { - orc_column.notNull.resize(column.size()); + orc_column.numElements = column.size(); if (null_bytemap) - orc_column.hasNulls = true; + { + orc_column.hasNulls = !memoryIsZero(null_bytemap->data(), 0, null_bytemap->size()); + if (orc_column.hasNulls) + { + orc_column.notNull.resize(null_bytemap->size()); + for (size_t i = 0; i < null_bytemap->size(); ++i) + orc_column.notNull[i] = !(*null_bytemap)[i]; + } + } + else + orc_column.hasNulls = false; /// ORC doesn't have unsigned types, so cast everything to signed and sign-extend to Int64 to /// make the ORC library calculate min and max correctly. @@ -471,6 +450,7 @@ void ORCBlockOutputFormat::writeColumn( } case TypeIndex::Nullable: { + chassert(!null_bytemap); const auto & nullable_column = assert_cast(column); const PaddedPODArray & new_null_bytemap = assert_cast &>(*nullable_column.getNullMapColumnPtr()).getData(); auto nested_type = removeNullable(type); @@ -485,19 +465,15 @@ void ORCBlockOutputFormat::writeColumn( const ColumnArray::Offsets & offsets = list_column.getOffsets(); size_t column_size = list_column.size(); - list_orc_column.resize(column_size); + list_orc_column.offsets.resize(column_size + 1); /// The length of list i in ListVectorBatch is offsets[i+1] - offsets[i]. list_orc_column.offsets[0] = 0; for (size_t i = 0; i != column_size; ++i) - { list_orc_column.offsets[i + 1] = offsets[i]; - list_orc_column.notNull[i] = 1; - } orc::ColumnVectorBatch & nested_orc_column = *list_orc_column.elements; - writeColumn(nested_orc_column, list_column.getData(), nested_type, null_bytemap); - list_orc_column.numElements = column_size; + writeColumn(nested_orc_column, list_column.getData(), nested_type, nullptr); break; } case TypeIndex::Tuple: @@ -505,10 +481,8 @@ void ORCBlockOutputFormat::writeColumn( orc::StructVectorBatch & struct_orc_column = dynamic_cast(orc_column); const auto & tuple_column = assert_cast(column); auto nested_types = assert_cast(type.get())->getElements(); - for (size_t i = 0; i != tuple_column.size(); ++i) - struct_orc_column.notNull[i] = 1; for (size_t i = 0; i != tuple_column.tupleSize(); ++i) - writeColumn(*struct_orc_column.fields[i], tuple_column.getColumn(i), nested_types[i], null_bytemap); + writeColumn(*struct_orc_column.fields[i], tuple_column.getColumn(i), nested_types[i], nullptr); break; } case TypeIndex::Map: @@ -520,25 +494,21 @@ void ORCBlockOutputFormat::writeColumn( size_t column_size = list_column.size(); - map_orc_column.resize(list_column.size()); + map_orc_column.offsets.resize(column_size + 1); /// The length of list i in ListVectorBatch is offsets[i+1] - offsets[i]. map_orc_column.offsets[0] = 0; for (size_t i = 0; i != column_size; ++i) - { map_orc_column.offsets[i + 1] = offsets[i]; - map_orc_column.notNull[i] = 1; - } + const auto nested_columns = assert_cast(list_column.getDataPtr().get())->getColumns(); orc::ColumnVectorBatch & keys_orc_column = *map_orc_column.keys; auto key_type = map_type.getKeyType(); - writeColumn(keys_orc_column, *nested_columns[0], key_type, null_bytemap); + writeColumn(keys_orc_column, *nested_columns[0], key_type, nullptr); orc::ColumnVectorBatch & values_orc_column = *map_orc_column.elements; auto value_type = map_type.getValueType(); - writeColumn(values_orc_column, *nested_columns[1], value_type, null_bytemap); - - map_orc_column.numElements = column_size; + writeColumn(values_orc_column, *nested_columns[1], value_type, nullptr); break; } default: @@ -575,10 +545,7 @@ void ORCBlockOutputFormat::consume(Chunk chunk) size_t columns_num = chunk.getNumColumns(); size_t rows_num = chunk.getNumRows(); - /// getMaxColumnSize is needed to write arrays. - /// The size of the batch must be no less than total amount of array elements - /// and no less than the number of rows (ORC writes a null bit for every row). - std::unique_ptr batch = writer->createRowBatch(getMaxColumnSize(chunk)); + std::unique_ptr batch = writer->createRowBatch(chunk.getNumRows()); orc::StructVectorBatch & root = dynamic_cast(*batch); auto columns = chunk.detachColumns(); From 24e1bfdb686bf4846c032eab49bc86c5389865ae Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Tue, 6 Aug 2024 07:37:00 +0000 Subject: [PATCH 1680/2361] Join engine support OPTIMIZE query Signed-off-by: Duc Canh Le --- src/Interpreters/HashJoin/HashJoin.cpp | 28 +++++++------ src/Interpreters/HashJoin/HashJoin.h | 2 +- src/Storages/StorageJoin.cpp | 42 +++++++++++++++++++ src/Storages/StorageJoin.h | 12 ++++++ .../03204_storage_join_optimize.reference | 10 +++++ .../03204_storage_join_optimize.sql | 5 +++ 6 files changed, 86 insertions(+), 13 deletions(-) create mode 100644 tests/queries/0_stateless/03204_storage_join_optimize.reference create mode 100644 tests/queries/0_stateless/03204_storage_join_optimize.sql diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 0c7cad4360d..a621ce16fb1 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -648,10 +648,8 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) return table_join->sizeLimits().check(total_rows, total_bytes, "JOIN", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED); } -void HashJoin::shrinkStoredBlocksToFit(size_t & total_bytes_in_join) +void HashJoin::shrinkStoredBlocksToFit(size_t & total_bytes_in_join, bool force_optimize) { - if (shrink_blocks) - return; /// Already shrunk Int64 current_memory_usage = getCurrentQueryMemoryUsage(); Int64 query_memory_usage_delta = current_memory_usage - memory_usage_before_adding_blocks; @@ -659,15 +657,21 @@ void HashJoin::shrinkStoredBlocksToFit(size_t & total_bytes_in_join) auto max_total_bytes_in_join = table_join->sizeLimits().max_bytes; - /** If accounted data size is more than half of `max_bytes_in_join` - * or query memory consumption growth from the beginning of adding blocks (estimation of memory consumed by join using memory tracker) - * is bigger than half of all memory available for query, - * then shrink stored blocks to fit. - */ - shrink_blocks = (max_total_bytes_in_join && total_bytes_in_join > max_total_bytes_in_join / 2) || - (max_total_bytes_for_query && query_memory_usage_delta > max_total_bytes_for_query / 2); - if (!shrink_blocks) - return; + if (!force_optimize) + { + if (shrink_blocks) + return; /// Already shrunk + + /** If accounted data size is more than half of `max_bytes_in_join` + * or query memory consumption growth from the beginning of adding blocks (estimation of memory consumed by join using memory tracker) + * is bigger than half of all memory available for query, + * then shrink stored blocks to fit. + */ + shrink_blocks = (max_total_bytes_in_join && total_bytes_in_join > max_total_bytes_in_join / 2) || + (max_total_bytes_for_query && query_memory_usage_delta > max_total_bytes_for_query / 2); + if (!shrink_blocks) + return; + } LOG_DEBUG(log, "Shrinking stored blocks, memory consumption is {} {} calculated by join, {} {} by memory tracker", ReadableSize(total_bytes_in_join), max_total_bytes_in_join ? fmt::format("/ {}", ReadableSize(max_total_bytes_in_join)) : "", diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 0b115b9fdbb..00f5ef6d214 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -372,7 +372,7 @@ public: void debugKeys() const; - void shrinkStoredBlocksToFit(size_t & total_bytes_in_join); + void shrinkStoredBlocksToFit(size_t & total_bytes_in_join, bool force_optimize = false); void setMaxJoinedBlockRows(size_t value) { max_joined_block_rows = value; } diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index a0d6cf11b64..695c41d3a62 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -75,6 +75,7 @@ StorageJoin::StorageJoin( table_join = std::make_shared(limits, use_nulls, kind, strictness, key_names); join = std::make_shared(table_join, getRightSampleBlock(), overwrite); restore(); + optimizeUnlocked(); } RWLockImpl::LockHolder StorageJoin::tryLockTimedWithContext(const RWLock & lock, RWLockImpl::Type type, ContextPtr context) const @@ -99,6 +100,47 @@ SinkToStoragePtr StorageJoin::write(const ASTPtr & query, const StorageMetadataP return StorageSetOrJoinBase::write(query, metadata_snapshot, context, /*async_insert=*/false); } +bool StorageJoin::optimize( + const ASTPtr & /*query*/, + const StorageMetadataPtr & /*metadata_snapshot*/, + const ASTPtr & partition, + bool final, + bool deduplicate, + const Names & /* deduplicate_by_columns */, + bool cleanup, + ContextPtr context) +{ + + if (partition) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Partition cannot be specified when optimizing table of type EmbeddedRocksDB"); + + if (final) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "FINAL cannot be specified when optimizing table of type EmbeddedRocksDB"); + + if (deduplicate) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DEDUPLICATE cannot be specified when optimizing table of type EmbeddedRocksDB"); + + if (cleanup) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "CLEANUP cannot be specified when optimizing table of type EmbeddedRocksDB"); + + std::lock_guard mutate_lock(mutate_mutex); + TableLockHolder lock_holder = tryLockTimedWithContext(rwlock, RWLockImpl::Write, context); + + optimizeUnlocked(); + return true; +} + +void StorageJoin::optimizeUnlocked() +{ + size_t current_bytes = join->getTotalByteCount(); + size_t dummy = current_bytes; + join->shrinkStoredBlocksToFit(dummy, true); + + size_t optimized_bytes = join->getTotalByteCount(); + if (current_bytes > optimized_bytes) + LOG_INFO(getLogger("StorageJoin"), "Optimized Join storage from {} to {} bytes", current_bytes, optimized_bytes); +} + void StorageJoin::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr context, TableExclusiveLockHolder &) { std::lock_guard mutate_lock(mutate_mutex); diff --git a/src/Storages/StorageJoin.h b/src/Storages/StorageJoin.h index c76df0cb452..10a551b4063 100644 --- a/src/Storages/StorageJoin.h +++ b/src/Storages/StorageJoin.h @@ -61,6 +61,18 @@ public: SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context, bool async_insert) override; + bool optimize( + const ASTPtr & /*query*/, + const StorageMetadataPtr & /*metadata_snapshot*/, + const ASTPtr & /*partition*/, + bool /*final*/, + bool /*deduplicate*/, + const Names & /* deduplicate_by_columns */, + bool /*cleanup*/, + ContextPtr /*context*/) override; + + void optimizeUnlocked(); + Pipe read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, diff --git a/tests/queries/0_stateless/03204_storage_join_optimize.reference b/tests/queries/0_stateless/03204_storage_join_optimize.reference new file mode 100644 index 00000000000..af98bcd6397 --- /dev/null +++ b/tests/queries/0_stateless/03204_storage_join_optimize.reference @@ -0,0 +1,10 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 diff --git a/tests/queries/0_stateless/03204_storage_join_optimize.sql b/tests/queries/0_stateless/03204_storage_join_optimize.sql new file mode 100644 index 00000000000..03a4658ba6c --- /dev/null +++ b/tests/queries/0_stateless/03204_storage_join_optimize.sql @@ -0,0 +1,5 @@ +CREATE TABLE dict_03204 (k UInt64, v UInt64) ENGINE = Join(ANY, LEFT, k); +INSERT INTO dict_03204 SELECT number, number FROM numbers(10); +OPTIMIZE TABLE dict_03204; +SELECT * FROM dict_03204 ORDER BY k; +DROP TABLE dict_03204; From 8e6de033355485a770f86b848e7574728a0eecf4 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 6 Aug 2024 09:39:00 +0200 Subject: [PATCH 1681/2361] Remove unsupported files --- .../queries/0_stateless/03215_parsing_archive_name_s3.sql | 8 ++++++++ tests/queries/0_stateless/data_minio/::03215_archive.csv | 1 - .../0_stateless/data_minio/test.zip::03215_archive.csv | 1 - .../0_stateless/data_minio/test::03215_archive.csv | 1 - 4 files changed, 8 insertions(+), 3 deletions(-) delete mode 100644 tests/queries/0_stateless/data_minio/::03215_archive.csv delete mode 100644 tests/queries/0_stateless/data_minio/test.zip::03215_archive.csv delete mode 100644 tests/queries/0_stateless/data_minio/test::03215_archive.csv diff --git a/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql b/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql index e34be475c5a..e0d63d313fa 100644 --- a/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql +++ b/tests/queries/0_stateless/03215_parsing_archive_name_s3.sql @@ -1,7 +1,15 @@ -- Tags: no-fasttest -- Tag no-fasttest: Depends on AWS +SET s3_truncate_on_insert=1; + +INSERT INTO FUNCTION s3(s3_conn, filename='::03215_archive.csv') SELECT 1; SELECT _file, _path FROM s3(s3_conn, filename='::03215_archive.csv') ORDER BY (_file, _path); + SELECT _file, _path FROM s3(s3_conn, filename='test :: 03215_archive.csv') ORDER BY (_file, _path); -- { serverError S3_ERROR } + +INSERT INTO FUNCTION s3(s3_conn, filename='test::03215_archive.csv') SELECT 1; SELECT _file, _path FROM s3(s3_conn, filename='test::03215_archive.csv') ORDER BY (_file, _path); + +INSERT INTO FUNCTION s3(s3_conn, filename='test.zip::03215_archive.csv') SETTINGS allow_archive_path_syntax=0 SELECT 1; SELECT _file, _path FROM s3(s3_conn, filename='test.zip::03215_archive.csv') ORDER BY (_file, _path) SETTINGS allow_archive_path_syntax=0; diff --git a/tests/queries/0_stateless/data_minio/::03215_archive.csv b/tests/queries/0_stateless/data_minio/::03215_archive.csv deleted file mode 100644 index d00491fd7e5..00000000000 --- a/tests/queries/0_stateless/data_minio/::03215_archive.csv +++ /dev/null @@ -1 +0,0 @@ -1 diff --git a/tests/queries/0_stateless/data_minio/test.zip::03215_archive.csv b/tests/queries/0_stateless/data_minio/test.zip::03215_archive.csv deleted file mode 100644 index d00491fd7e5..00000000000 --- a/tests/queries/0_stateless/data_minio/test.zip::03215_archive.csv +++ /dev/null @@ -1 +0,0 @@ -1 diff --git a/tests/queries/0_stateless/data_minio/test::03215_archive.csv b/tests/queries/0_stateless/data_minio/test::03215_archive.csv deleted file mode 100644 index d00491fd7e5..00000000000 --- a/tests/queries/0_stateless/data_minio/test::03215_archive.csv +++ /dev/null @@ -1 +0,0 @@ -1 From 11fd263be6e24ee4cdc3a51ac497510c82837fa5 Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Thu, 1 Aug 2024 08:35:05 +0000 Subject: [PATCH 1682/2361] implement DROP DETACHED PARTITION ALL Signed-off-by: Duc Canh Le --- src/Storages/MergeTree/MergeTreeData.cpp | 7 +++++-- .../03203_drop_detached_partition_all.reference | 5 +++++ .../0_stateless/03203_drop_detached_partition_all.sql | 8 ++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03203_drop_detached_partition_all.reference create mode 100644 tests/queries/0_stateless/03203_drop_detached_partition_all.sql diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 30a4a7caa0f..3d3ae2e63ea 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -6253,10 +6253,13 @@ void MergeTreeData::dropDetached(const ASTPtr & partition, bool part, ContextPtr } else { - String partition_id = getPartitionIDFromQuery(partition, local_context); + String partition_id; + bool all = partition->as()->all; + if (!all) + partition_id = getPartitionIDFromQuery(partition, local_context); DetachedPartsInfo detached_parts = getDetachedParts(); for (const auto & part_info : detached_parts) - if (part_info.valid_name && part_info.partition_id == partition_id + if (part_info.valid_name && (all || part_info.partition_id == partition_id) && part_info.prefix != "attaching" && part_info.prefix != "deleting") renamed_parts.addPart(part_info.dir_name, "deleting_" + part_info.dir_name, part_info.disk); } diff --git a/tests/queries/0_stateless/03203_drop_detached_partition_all.reference b/tests/queries/0_stateless/03203_drop_detached_partition_all.reference new file mode 100644 index 00000000000..c0f52d1d898 --- /dev/null +++ b/tests/queries/0_stateless/03203_drop_detached_partition_all.reference @@ -0,0 +1,5 @@ +1 1 +2 2 +3 3 +3 +0 diff --git a/tests/queries/0_stateless/03203_drop_detached_partition_all.sql b/tests/queries/0_stateless/03203_drop_detached_partition_all.sql new file mode 100644 index 00000000000..e29eb4ae36b --- /dev/null +++ b/tests/queries/0_stateless/03203_drop_detached_partition_all.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS t_03203; +CREATE TABLE t_03203 (p UInt64, v UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY v; +INSERT INTO t_03203 VALUES (1, 1), (2, 2), (3, 3); +SELECT * FROM t_03203 ORDER BY p, v; +ALTER TABLE t_03203 DETACH PARTITION ALL; +SELECT count() FROM system.detached_parts WHERE database = currentDatabase() AND table = 't_03203'; +ALTER TABLE t_03203 DROP DETACHED PARTITION ALL SETTINGS allow_drop_detached = 1; +SELECT count() FROM system.detached_parts WHERE database = currentDatabase() AND table = 't_03203'; From c6c2fce9d22739b1881d5b4814dbbabe8ab8f09b Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Tue, 6 Aug 2024 07:51:35 +0000 Subject: [PATCH 1683/2361] update document Signed-off-by: Duc Canh Le --- docs/en/sql-reference/statements/alter/partition.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 778816f8934..1bb7817364a 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -9,6 +9,7 @@ The following operations with [partitions](/docs/en/engines/table-engines/merget - [DETACH PARTITION\|PART](#detach-partitionpart) — Moves a partition or part to the `detached` directory and forget it. - [DROP PARTITION\|PART](#drop-partitionpart) — Deletes a partition or part. +- [DROP DETACHED PARTITION\|PART](#drop-detached-partitionpart) - Delete a part or all parts of a partition from `detached`. - [FORGET PARTITION](#forget-partition) — Deletes a partition metadata from zookeeper if it's empty. - [ATTACH PARTITION\|PART](#attach-partitionpart) — Adds a partition or part from the `detached` directory to the table. - [ATTACH PARTITION FROM](#attach-partition-from) — Copies the data partition from one table to another and adds. @@ -68,7 +69,7 @@ ALTER TABLE mt DROP PART 'all_4_4_0'; ## DROP DETACHED PARTITION\|PART ``` sql -ALTER TABLE table_name [ON CLUSTER cluster] DROP DETACHED PARTITION|PART partition_expr +ALTER TABLE table_name [ON CLUSTER cluster] DROP DETACHED PARTITION|PART ALL|partition_expr ``` Removes the specified part or all parts of the specified partition from `detached`. From 27569815d519c8ad70842798b989874cc04b3271 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 5 Aug 2024 17:01:00 +0200 Subject: [PATCH 1684/2361] Fix query cache randomization in stress tests Right now it fails with [1]: 2024.08.05 12:53:53.659422 [ 3559 ] {123237a2-5f08-47e4-996e-21e20bc4a51a} executeQuery: Code: 704. DB::Exception: The query result was not cached because the query contains a non-deterministic function. Use setting `query_cache_nondeterministic_function_handling = 'save'` or `= 'ignore'` to cache the query result regardless or to omit caching. (QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS) (version 24.8.1.1356) (from [::1]:42534) (in query: SELECT version(), arrayStringConcat(groupArray(value), ' ') FROM system.build_options WHERE name IN ('GIT_HASH', 'GIT_BRANCH') ), Stack trace (when copying this message, always include the lines below): Uexpected exception, will not retry: HTTPError : Code: 500. Code: 704. DB::Exception: The query result was not cached because the query contains a non-deterministic function. Use setting `query_cache_nondeterministic_function_handling = 'save'` or `= 'ignore'` to cache the query result regardless or to omit caching. (QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS) (version 24.8.1.1356) [1]: https://s3.amazonaws.com/clickhouse-test-reports/67737/e68c9c8d16f37f6c25739076c9b071ed97952269/stress_test__asan_/stress_test_run_10.txt Signed-off-by: Azat Khuzhin --- tests/ci/stress.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ci/stress.py b/tests/ci/stress.py index 8b8b17dd2a9..a3cdd8b3d6f 100755 --- a/tests/ci/stress.py +++ b/tests/ci/stress.py @@ -47,6 +47,8 @@ def get_options(i: int, upgrade_check: bool) -> str: if i > 0 and random.random() < 1 / 3: client_options.append("use_query_cache=1") + client_options.append("query_cache_nondeterministic_function_handling='ignore'") + client_options.append("query_cache_system_table_handling='ignore'") if i % 5 == 1: client_options.append("memory_tracker_fault_probability=0.001") From b2987e4f4dd267293b8b4f96f86f2202a7671b84 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Tue, 6 Aug 2024 11:42:18 +0200 Subject: [PATCH 1685/2361] Update StorageWindowView.cpp --- src/Storages/WindowView/StorageWindowView.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index bf934ed00d9..5830c844582 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1450,7 +1450,7 @@ void StorageWindowView::writeIntoWindowView( LOG_TRACE(window_view.log, "New max watermark: {}", window_view.max_watermark); } - Pipe pipe(std::make_shared(std::move(block))); + Pipe pipe(std::make_shared(block)); UInt32 lateness_bound = 0; UInt32 t_max_watermark = 0; From 50ed7da27dc1bcdbb03f5cf7e3d80ae95ecf643f Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Tue, 6 Aug 2024 12:02:53 +0200 Subject: [PATCH 1686/2361] squash! fix for parallel execution --- tests/queries/0_stateless/03215_parquet_index.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03215_parquet_index.sql b/tests/queries/0_stateless/03215_parquet_index.sql index 5b176ff70ba..0afccdf6f3b 100644 --- a/tests/queries/0_stateless/03215_parquet_index.sql +++ b/tests/queries/0_stateless/03215_parquet_index.sql @@ -4,12 +4,14 @@ CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet); INSERT INTO test_parquet SELECT number, toString(number) FROM numbers(100); SELECT col1, col2 FROM test_parquet; + -- Parquet will have indexes in columns. We are not checking that indexes exist here, there is an integration test test_parquet_page_index for that. We just check that a setting doesn't break the SELECT DROP TABLE IF EXISTS test_parquet; CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet) SETTINGS output_format_parquet_use_custom_encoder=false, output_format_parquet_write_page_index=true; INSERT INTO test_parquet SELECT number, toString(number) FROM numbers(100); SELECT col1, col2 FROM test_parquet; + -- Parquet will not have indexes in columns. DROP TABLE IF EXISTS test_parquet; CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet) SETTINGS output_format_parquet_use_custom_encoder=false, output_format_parquet_write_page_index=false; From 4a8be15ca8bffc445a6c66cacb5226946531e105 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 10:25:33 +0000 Subject: [PATCH 1687/2361] Bump rocksdb to v8.11.4 --- contrib/rocksdb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index 49ce8a1064d..5c2be544f55 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 49ce8a1064dd1ad89117899839bf136365e49e79 +Subproject commit 5c2be544f5509465957706c955b6d623e889ac4e From 7c15ad3966a982a3597034f8a66d91f2750dd2d0 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 6 Aug 2024 10:15:53 +0000 Subject: [PATCH 1688/2361] do not try to create azure container if not needed --- src/Common/ProfileEvents.cpp | 4 +- .../AzureBlobStorageCommon.cpp | 45 ++++++++++++++++++- 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 2b090136e2e..c6cf6c04af3 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -457,6 +457,7 @@ The server successfully detected this situation and will download merged part fr M(AzureDeleteObjects, "Number of Azure blob storage API DeleteObject(s) calls.") \ M(AzureListObjects, "Number of Azure blob storage API ListObjects calls.") \ M(AzureGetProperties, "Number of Azure blob storage API GetProperties calls.") \ + M(AzureCreateContainer, "Number of Azure blob storage API CreateContainer calls.") \ \ M(DiskAzureGetObject, "Number of Disk Azure API GetObject calls.") \ M(DiskAzureUpload, "Number of Disk Azure blob storage API Upload calls") \ @@ -464,8 +465,9 @@ The server successfully detected this situation and will download merged part fr M(DiskAzureCommitBlockList, "Number of Disk Azure blob storage API CommitBlockList calls") \ M(DiskAzureCopyObject, "Number of Disk Azure blob storage API CopyObject calls") \ M(DiskAzureListObjects, "Number of Disk Azure blob storage API ListObjects calls.") \ - M(DiskAzureDeleteObjects, "Number of Azure blob storage API DeleteObject(s) calls.") \ + M(DiskAzureDeleteObjects, "Number of Disk Azure blob storage API DeleteObject(s) calls.") \ M(DiskAzureGetProperties, "Number of Disk Azure blob storage API GetProperties calls.") \ + M(DiskAzureCreateContainer, "Number of Disk Azure blob storage API CreateContainer calls.") \ \ M(ReadBufferFromAzureMicroseconds, "Time spent on reading from Azure.") \ M(ReadBufferFromAzureInitMicroseconds, "Time spent initializing connection to Azure.") \ diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp index 0aa3b9c40b5..9043edd66a0 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp @@ -11,6 +11,14 @@ #include #include +namespace ProfileEvents +{ + extern const Event AzureGetProperties; + extern const Event DiskAzureGetProperties; + extern const Event AzureCreateContainer; + extern const Event DiskAzureCreateContainer; +} + namespace DB { @@ -214,20 +222,53 @@ void processURL(const String & url, const String & container_name, Endpoint & en } } +static bool containerExists(const ContainerClient & client) +{ + ProfileEvents::increment(ProfileEvents::AzureGetProperties); + if (client.GetClickhouseOptions().IsClientForDisk) + ProfileEvents::increment(ProfileEvents::DiskAzureGetProperties); + + try + { + client.GetProperties(); + return true; + } + catch (const Azure::Storage::StorageException & e) + { + if (e.StatusCode == Azure::Core::Http::HttpStatusCode::NotFound) + return false; + throw; + } +} + std::unique_ptr getContainerClient(const ConnectionParams & params, bool readonly) { if (params.endpoint.container_already_exists.value_or(false) || readonly) + { return params.createForContainer(); + } + + if (!params.endpoint.container_already_exists.has_value()) + { + auto container_client = params.createForContainer(); + if (containerExists(*container_client)) + return container_client; + } try { auto service_client = params.createForService(); + + ProfileEvents::increment(ProfileEvents::AzureCreateContainer); + if (params.client_options.ClickhouseOptions.IsClientForDisk) + ProfileEvents::increment(ProfileEvents::DiskAzureCreateContainer); + return std::make_unique(service_client->CreateBlobContainer(params.endpoint.container_name).Value); } catch (const Azure::Storage::StorageException & e) { - /// If container_already_exists is not set (in config), ignore already exists error. - /// (Conflict - The specified container already exists) + /// If container_already_exists is not set (in config), ignore already exists error. Conflict - The specified container already exists. + /// To avoid race with creation of container handle this error despite that we have already checked the existence of container. if (!params.endpoint.container_already_exists.has_value() && e.StatusCode == Azure::Core::Http::HttpStatusCode::Conflict) return params.createForContainer(); throw; From aa3d8086c32ce2b5a90fbe4788579cae970ec32f Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 6 Aug 2024 12:30:39 +0200 Subject: [PATCH 1689/2361] fix integration tests --- .../test_storage_azure_blob_storage/test.py | 37 ++++++++++++------- .../test_cluster.py | 21 +++++++---- tests/integration/test_storage_hdfs/test.py | 10 ++--- 3 files changed, 41 insertions(+), 27 deletions(-) diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index 15a1f6db2c1..092c124855c 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -635,7 +635,8 @@ def test_simple_write_named_collection_2_table_function(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='test_simple_write_named_2_tf.csv', format='CSV', structure='key UInt64, data String') VALUES (1, 'a') SETTINGS azure_truncate_on_insert=1", + f" container='cont', blob_path='test_simple_write_named_2_tf.csv', format='CSV', structure='key UInt64, data String') VALUES (1, 'a')", + settings={"azure_truncate_on_insert": 1}, ) print(get_azure_file_content("test_simple_write_named_2_tf.csv", port)) assert get_azure_file_content("test_simple_write_named_2_tf.csv", port) == '1,"a"\n' @@ -658,7 +659,8 @@ def test_put_get_with_globs_tf(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values} SETTINGS azure_truncate_on_insert=1", + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) query = ( f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azureBlobStorage(azure_conf2, " @@ -710,9 +712,9 @@ def test_schema_inference_from_globs_tf(cluster): query = ( f"insert into table function azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', " - f"container='cont', blob_path='{path}', format='CSVWithNames', structure='{table_format}') VALUES {values} SETTINGS azure_truncate_on_insert=1" + f"container='cont', blob_path='{path}', format='CSVWithNames', structure='{table_format}') VALUES {values}" ) - azure_query(node, query) + azure_query(node, query, settings={"azure_truncate_on_insert": 1}) query = ( f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azureBlobStorage(azure_conf2, " @@ -738,7 +740,8 @@ def test_partition_by_tf(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', " f"'cont', '{filename}', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', " - f"'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values} SETTINGS azure_truncate_on_insert=1", + f"'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) assert "1,2,3\n" == get_azure_file_content("test_partition_tf_3.csv", port) @@ -757,7 +760,8 @@ def test_filter_using_file(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', 'cont', '{filename}', " f"'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', " - f"'{table_format}') PARTITION BY {partition_by} VALUES {values} SETTINGS azure_truncate_on_insert=1", + f"'{table_format}') PARTITION BY {partition_by} VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) query = ( @@ -855,7 +859,8 @@ def test_function_signatures(cluster): account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_signature.csv', '{account_name}', '{account_key}', 'CSV', 'auto', 'column1 UInt32') VALUES (1),(2),(3) SETTINGS azure_truncate_on_insert=1", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_signature.csv', '{account_name}', '{account_key}', 'CSV', 'auto', 'column1 UInt32') VALUES (1),(2),(3)", + settings={"azure_truncate_on_insert": 1}, ) # " - connection_string, container_name, blobpath\n" @@ -969,12 +974,14 @@ def test_union_schema_inference_mode(cluster): account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference1.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'a UInt32') VALUES (1) SETTINGS azure_truncate_on_insert=1", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference1.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'a UInt32') VALUES (1)", + settings={"azure_truncate_on_insert": 1}, ) azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference2.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'b UInt32') VALUES (2) SETTINGS azure_truncate_on_insert=1", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference2.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'b UInt32') VALUES (2)", + settings={"azure_truncate_on_insert": 1}, ) node.query("system drop schema cache for azure") @@ -1011,7 +1018,8 @@ def test_union_schema_inference_mode(cluster): assert result == "a\tNullable(Int64)\n" "b\tNullable(Int64)\n" azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference3.jsonl', '{account_name}', '{account_key}', 'CSV', 'auto', 's String') VALUES ('Error') SETTINGS azure_truncate_on_insert=1", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference3.jsonl', '{account_name}', '{account_key}', 'CSV', 'auto', 's String') VALUES ('Error')", + settings={"azure_truncate_on_insert": 1}, ) error = azure_query( @@ -1505,7 +1513,8 @@ def test_hive_partitioning_with_one_parameter(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values} SETTINGS azure_truncate_on_insert=1", + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) query = ( @@ -1542,7 +1551,8 @@ def test_hive_partitioning_with_two_parameters(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2} SETTINGS azure_truncate_on_insert=1", + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + settings={"azure_truncate_on_insert": 1}, ) query = ( @@ -1588,7 +1598,8 @@ def test_hive_partitioning_without_setting(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," - f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2} SETTINGS azure_truncate_on_insert=1", + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + settings={"azure_truncate_on_insert": 1}, ) query = ( diff --git a/tests/integration/test_storage_azure_blob_storage/test_cluster.py b/tests/integration/test_storage_azure_blob_storage/test_cluster.py index 4d63016cf9a..04baf007c69 100644 --- a/tests/integration/test_storage_azure_blob_storage/test_cluster.py +++ b/tests/integration/test_storage_azure_blob_storage/test_cluster.py @@ -71,7 +71,8 @@ def test_select_all(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_select_all.csv', 'devstoreaccount1'," f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', 'key UInt64, data String') " - f"VALUES (1, 'a'), (2, 'b') SETTINGS azure_truncate_on_insert=1", + f"VALUES (1, 'a'), (2, 'b')", + settings={"azure_truncate_on_insert": 1}, ) print(get_azure_file_content("test_cluster_select_all.csv", port)) @@ -100,7 +101,8 @@ def test_count(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_count.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', " - f"'auto', 'key UInt64') VALUES (1), (2) SETTINGS azure_truncate_on_insert=1", + f"'auto', 'key UInt64') VALUES (1), (2)", + settings={"azure_truncate_on_insert": 1}, ) print(get_azure_file_content("test_cluster_count.csv", port)) @@ -128,7 +130,8 @@ def test_union_all(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_parquet_union_all', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'Parquet', " - f"'auto', 'a Int32, b String') VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd') SETTINGS azure_truncate_on_insert=1", + f"'auto', 'a Int32, b String') VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')", + settings={"azure_truncate_on_insert": 1}, ) pure_azure = azure_query( @@ -179,7 +182,8 @@ def test_skip_unavailable_shards(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_skip_unavailable.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " - f"'auto', 'a UInt64') VALUES (1), (2) SETTINGS azure_truncate_on_insert=1", + f"'auto', 'a UInt64') VALUES (1), (2)", + settings={"azure_truncate_on_insert": 1}, ) result = azure_query( node, @@ -199,7 +203,8 @@ def test_unset_skip_unavailable_shards(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_unset_skip_unavailable.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " - f"'auto', 'a UInt64') VALUES (1), (2) SETTINGS azure_truncate_on_insert=1", + f"'auto', 'a UInt64') VALUES (1), (2)", + settings={"azure_truncate_on_insert": 1}, ) result = azure_query( node, @@ -217,7 +222,8 @@ def test_cluster_with_named_collection(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_with_named_collection.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " - f"'auto', 'a UInt64') VALUES (1), (2) SETTINGS azure_truncate_on_insert=1", + f"'auto', 'a UInt64') VALUES (1), (2)", + settings={"azure_truncate_on_insert": 1}, ) pure_azure = azure_query( @@ -248,7 +254,8 @@ def test_partition_parallel_reading_with_cluster(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', '{filename}', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', '{table_format}') " - f"PARTITION BY {partition_by} VALUES {values} SETTINGS azure_truncate_on_insert=1", + f"PARTITION BY {partition_by} VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) assert "1,2,3\n" == get_azure_file_content("test_tf_3.csv", port) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 856715f28c8..3fef6bc46cf 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -341,7 +341,7 @@ def test_virtual_columns(started_cluster): ) == expected ) - node1.query("DROP TABLE virual_cols") + node1.query("DROP TABLE virtual_cols") def test_read_files_with_spaces(started_cluster): @@ -675,9 +675,7 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_2', 'Parquet', 'a Int32, b String')" ) - node1.query( - f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1" - ) + node1.query(f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1") result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "parquet_2" @@ -685,9 +683,7 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_3', 'Parquet', 'a Int32, _path String')" ) - node1.query( - f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1" - ) + node1.query(f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1") result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "kek" From 7e87650a001279317ce79c36415160639a83a4da Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 10:31:13 +0000 Subject: [PATCH 1690/2361] Bump rocksdb to v9.0.1 --- contrib/rocksdb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index 5c2be544f55..36540d43550 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 5c2be544f5509465957706c955b6d623e889ac4e +Subproject commit 36540d43550da19e96515bd731ee416a8787672c From f0a6ae220530fa9b34d23aa722ed8816842410ce Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 10:32:40 +0000 Subject: [PATCH 1691/2361] Bump rocksdb to v9.1.1 --- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index 36540d43550..6f7cabeac80 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 36540d43550da19e96515bd731ee416a8787672c +Subproject commit 6f7cabeac80a3a6150be2c8a8369fcecb107bf43 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 57c056532c6..0157f31d5d9 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -150,6 +150,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc ${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc ${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc + ${ROCKSDB_SOURCE_DIR}/db/multi_cf_iterator.cc ${ROCKSDB_SOURCE_DIR}/db/output_validator.cc ${ROCKSDB_SOURCE_DIR}/db/periodic_task_scheduler.cc ${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc From b3f5ddcd658efc8ad84b32481239750e8999ba0a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 10:37:35 +0000 Subject: [PATCH 1692/2361] Bump rocksdb to 9.2.1 --- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index 6f7cabeac80..08f93221f50 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 6f7cabeac80a3a6150be2c8a8369fcecb107bf43 +Subproject commit 08f93221f50700f19f11555fb46abfe708a716d1 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 0157f31d5d9..cff8f832f23 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -88,6 +88,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc ${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc ${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc + ${ROCKSDB_SOURCE_DIR}/db/attribute_group_iterator_impl.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc @@ -104,6 +105,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc ${ROCKSDB_SOURCE_DIR}/db/builder.cc ${ROCKSDB_SOURCE_DIR}/db/c.cc + ${ROCKSDB_SOURCE_DIR}/db/coalescing_iterator.cc ${ROCKSDB_SOURCE_DIR}/db/column_family.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc @@ -150,7 +152,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc ${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc ${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc - ${ROCKSDB_SOURCE_DIR}/db/multi_cf_iterator.cc ${ROCKSDB_SOURCE_DIR}/db/output_validator.cc ${ROCKSDB_SOURCE_DIR}/db/periodic_task_scheduler.cc ${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc @@ -389,6 +390,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc + ${ROCKSDB_SOURCE_DIR}/utilities/types_util.cc ${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc ${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc ${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc @@ -419,10 +421,12 @@ if(HAS_ARMV8_CRC) endif(HAS_ARMV8_CRC) list(APPEND SOURCES - "${ROCKSDB_SOURCE_DIR}/port/port_posix.cc" - "${ROCKSDB_SOURCE_DIR}/env/env_posix.cc" - "${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc" - "${ROCKSDB_SOURCE_DIR}/env/io_posix.cc") + ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc + ${ROCKSDB_SOURCE_DIR}/port/port_posix.cc + ${ROCKSDB_SOURCE_DIR}/env/env_posix.cc + ${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc + ${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc + ${ROCKSDB_SOURCE_DIR}/env/io_posix.cc) add_library(_rocksdb ${SOURCES}) add_library(ch_contrib::rocksdb ALIAS _rocksdb) From e7110be48c7b2b752cd51fd72a23fe956248077c Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 10:43:21 +0000 Subject: [PATCH 1693/2361] Bump rocksdb to v9.3.1 --- contrib/rocksdb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index 08f93221f50..c5201abc4a9 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 08f93221f50700f19f11555fb46abfe708a716d1 +Subproject commit c5201abc4a983450f9423435a4405829be23d0a8 From d7803ca621ca511f0bf2abe0fbc332658b5c2919 Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Tue, 6 Aug 2024 10:48:10 +0000 Subject: [PATCH 1694/2361] small fix in log Signed-off-by: Duc Canh Le --- src/Storages/StorageJoin.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index 695c41d3a62..9dace45d2ac 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -112,16 +112,16 @@ bool StorageJoin::optimize( { if (partition) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Partition cannot be specified when optimizing table of type EmbeddedRocksDB"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Partition cannot be specified when optimizing table of type Join"); if (final) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "FINAL cannot be specified when optimizing table of type EmbeddedRocksDB"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "FINAL cannot be specified when optimizing table of type Join"); if (deduplicate) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DEDUPLICATE cannot be specified when optimizing table of type EmbeddedRocksDB"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DEDUPLICATE cannot be specified when optimizing table of type Join"); if (cleanup) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "CLEANUP cannot be specified when optimizing table of type EmbeddedRocksDB"); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "CLEANUP cannot be specified when optimizing table of type Join"); std::lock_guard mutate_lock(mutate_mutex); TableLockHolder lock_holder = tryLockTimedWithContext(rwlock, RWLockImpl::Write, context); From 2a2ba4e685022d3687565586c06dc56dfc276f10 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 10:47:03 +0000 Subject: [PATCH 1695/2361] Bump rocksdb to 9.4.0 --- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/contrib/rocksdb b/contrib/rocksdb index c5201abc4a9..5f003e4a22d 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit c5201abc4a983450f9423435a4405829be23d0a8 +Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index cff8f832f23..b39a739d04c 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -126,6 +126,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc + ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc @@ -183,6 +184,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc ${ROCKSDB_SOURCE_DIR}/env/file_system.cc ${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc + ${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc ${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc ${ROCKSDB_SOURCE_DIR}/env/mock_env.cc ${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc @@ -370,6 +372,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc + ${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_for_tiering_collector.cc ${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc ${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc ${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc @@ -421,10 +424,8 @@ if(HAS_ARMV8_CRC) endif(HAS_ARMV8_CRC) list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc ${ROCKSDB_SOURCE_DIR}/port/port_posix.cc ${ROCKSDB_SOURCE_DIR}/env/env_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc ${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc ${ROCKSDB_SOURCE_DIR}/env/io_posix.cc) From 995187006a8c2500ddb7fa234f3443c75d900be4 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Mon, 5 Aug 2024 20:23:41 +0200 Subject: [PATCH 1696/2361] rework custom table's disk usage --- src/Disks/DiskFomAST.cpp | 191 ++++++++++++++++++ src/Disks/DiskFomAST.h | 15 ++ src/Disks/DiskSelector.h | 4 +- src/Disks/IDisk.h | 9 +- src/Disks/StoragePolicy.h | 2 + src/Disks/getOrCreateDiskFromAST.cpp | 121 ----------- src/Disks/getOrCreateDiskFromAST.h | 18 -- src/Interpreters/Context.cpp | 14 +- src/Interpreters/Context.h | 1 + src/Parsers/FieldFromAST.cpp | 1 - src/Storages/MergeTree/MergeTreeSettings.cpp | 10 +- .../test_storage_policies/configs/disks.xml | 8 - .../integration/test_storage_policies/test.py | 55 ----- ...02963_test_flexible_disk_configuration.sql | 34 +++- 14 files changed, 264 insertions(+), 219 deletions(-) create mode 100644 src/Disks/DiskFomAST.cpp create mode 100644 src/Disks/DiskFomAST.h delete mode 100644 src/Disks/getOrCreateDiskFromAST.cpp delete mode 100644 src/Disks/getOrCreateDiskFromAST.h diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp new file mode 100644 index 00000000000..c8a4f88547f --- /dev/null +++ b/src/Disks/DiskFomAST.cpp @@ -0,0 +1,191 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; + extern const int UNKNOWN_DISK; +} + +std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string & serialization, ContextPtr context, bool attach) +{ + Poco::Util::AbstractConfiguration::Keys disk_settings_keys; + config->keys(disk_settings_keys); + + + // Check that no settings are defined when disk from the config is referred. + if (disk_settings_keys.empty()) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Disk function has no arguments. Invalid disk description."); + + if (disk_settings_keys.size() == 1 && disk_settings_keys.front() == "name" && !attach) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Disk function `{}` has to have the other arguments which describe the disk. Invalid disk description.", + serialization); + } + + std::string disk_name; + if (config->has("name")) + { + disk_name = config->getString("name"); + } + + if (!disk_name.empty()) + { + if (disk_name.starts_with(DiskSelector::CUSTOM_DISK_PREFIX)) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Disk name `{}` could not start with `{}`", + disk_name, DiskSelector::CUSTOM_DISK_PREFIX); + + if (auto disk = context->tryGetDisk(disk_name)) + { + /// the disk is defined by config + if (disk->isCustomDisk()) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Disk with name `{}` already exist as a custom disk but the name does not start with `{}`", + disk_name, + DiskSelector::CUSTOM_DISK_PREFIX); + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The disk `{}` is already exist. It is impossible to redefine it.", disk_name); + } + } + + auto disk_settings_hash = sipHash128(serialization.data(), serialization.size()); + + std::string custom_disk_name; + if (disk_name.empty()) + { + /// We need a unique name for a created custom disk, but it needs to be the same + /// after table is reattached or server is restarted, so take a hash of the disk + /// configuration serialized ast as a disk name suffix. + custom_disk_name = toString(DiskSelector::CUSTOM_DISK_PREFIX) + "noname_" + toString(disk_settings_hash); + } + else + { + custom_disk_name = toString(DiskSelector::CUSTOM_DISK_PREFIX) + disk_name; + } + + auto result_disk = context->getOrCreateDisk(custom_disk_name, [&](const DisksMap & disks_map) -> DiskPtr { + auto disk = DiskFactory::instance().create( + disk_name, *config, /* config_path */"", context, disks_map, /* attach */attach, /* custom_disk */true); + /// Mark that disk can be used without storage policy. + disk->markDiskAsCustom(disk_settings_hash); + return disk; + }); + + if (!result_disk->isCustomDisk()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Disk with name `{}` expected to be custom disk", disk_name); + + if (result_disk->getCustomDiskSettings() != disk_settings_hash && !attach) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "The disk `{}` is already configured as a custom disk in another table. It can't be redefined with different settings.", + disk_name); + + if (!attach && !result_disk->isRemote()) + { + static constexpr auto custom_local_disks_base_dir_in_config = "custom_local_disks_base_directory"; + auto disk_path_expected_prefix = context->getConfigRef().getString(custom_local_disks_base_dir_in_config, ""); + + if (disk_path_expected_prefix.empty()) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Base path for custom local disks must be defined in config file by `{}`", + custom_local_disks_base_dir_in_config); + + if (!pathStartsWith(result_disk->getPath(), disk_path_expected_prefix)) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Path of the custom local disk must be inside `{}` directory", + disk_path_expected_prefix); + } + + return custom_disk_name; +} + +class DiskConfigurationFlattener +{ +public: + struct Data + { + ContextPtr context; + bool attach; + }; + + static bool needChildVisit(const ASTPtr &, const ASTPtr &) { return true; } + + static void visit(ASTPtr & ast, Data & data) + { + if (isDiskFunction(ast)) + { + const auto * function = ast->as(); + const auto * function_args_expr = assert_cast(function->arguments.get()); + const auto & function_args = function_args_expr->children; + auto config = getDiskConfigurationFromAST(function_args, data.context); + auto disk_setting_string = serializeAST(*function); + auto disk_name = getOrCreateCustomDisk(config, disk_setting_string, data.context, data.attach); + ast = std::make_shared(disk_name); + } + } +}; + + +std::string DiskFomAST::createCustomDisk(const ASTPtr & disk_function_ast, ContextPtr context, bool attach) +{ + if (!isDiskFunction(disk_function_ast)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected a disk function"); + + auto ast = disk_function_ast->clone(); + + using FlattenDiskConfigurationVisitor = InDepthNodeVisitor; + FlattenDiskConfigurationVisitor::Data data{context, attach}; + FlattenDiskConfigurationVisitor{data}.visit(ast); + + auto disk_name = assert_cast(*ast).value.get(); + return disk_name; +} + +std::string DiskFomAST::getConfigDefinedDisk(const std::string &disk_name, ContextPtr context) +{ + if (disk_name.starts_with(DiskSelector::CUSTOM_DISK_PREFIX)) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Disk name `{}` could not start with `{}`", + disk_name, DiskSelector::CUSTOM_DISK_PREFIX); + + if (auto result = context->tryGetDisk(disk_name)) + return disk_name; + + std::string custom_disk_name = DiskSelector::CUSTOM_DISK_PREFIX + disk_name; + if (auto result = context->tryGetDisk(custom_disk_name)) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Disk name `{}` is a custom disk that is used in other table." + "That disk could not be used by a reference. The custom disk should be fully specified with a disk function.", + disk_name); + + throw Exception(ErrorCodes::UNKNOWN_DISK, "Unknown disk {}", disk_name); +} + +} diff --git a/src/Disks/DiskFomAST.h b/src/Disks/DiskFomAST.h new file mode 100644 index 00000000000..3a70484eda0 --- /dev/null +++ b/src/Disks/DiskFomAST.h @@ -0,0 +1,15 @@ +#pragma once +#include +#include +#include + +namespace DB +{ + +namespace DiskFomAST +{ + std::string getConfigDefinedDisk(const std::string & name, ContextPtr context); + std::string createCustomDisk(const ASTPtr & disk_function, ContextPtr context, bool attach); +} + +} diff --git a/src/Disks/DiskSelector.h b/src/Disks/DiskSelector.h index 49a1be5cf50..0f7424460a2 100644 --- a/src/Disks/DiskSelector.h +++ b/src/Disks/DiskSelector.h @@ -6,6 +6,8 @@ #include #include +#include +#include namespace DB { @@ -18,7 +20,7 @@ using DiskSelectorPtr = std::shared_ptr; class DiskSelector { public: - static constexpr auto TMP_INTERNAL_DISK_PREFIX = "__tmp_internal_"; + static constexpr auto CUSTOM_DISK_PREFIX = "__"; explicit DiskSelector(std::unordered_set skip_types_ = {}) : skip_types(skip_types_) { } DiskSelector(const DiskSelector & from) = default; diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 412ad27e94f..78d5f37e3a7 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -464,9 +464,9 @@ public: virtual void chmod(const String & /*path*/, mode_t /*mode*/) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Disk does not support chmod"); } /// Was disk created to be used without storage configuration? - bool isCustomDisk() const { return is_custom_disk; } - - void markDiskAsCustom() { is_custom_disk = true; } + bool isCustomDisk() const { return custom_disk_settings_hash != 0; } + UInt128 getCustomDiskSettings() const { return custom_disk_settings_hash; } + void markDiskAsCustom(UInt128 settings_hash) { custom_disk_settings_hash = settings_hash; } virtual DiskPtr getDelegateDiskIfExists() const { return nullptr; } @@ -504,7 +504,8 @@ protected: private: ThreadPool copying_thread_pool; - bool is_custom_disk = false; + // 0 means the disk is not custom, the disk is predefined in the config + UInt128 custom_disk_settings_hash = 0; /// Check access to the disk. void checkAccess(); diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index 501e033abc3..e23598214b3 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -119,6 +120,7 @@ class StoragePolicySelector { public: static constexpr auto TMP_STORAGE_POLICY_PREFIX = "__"; + static_assert(std::string_view(DiskSelector::CUSTOM_DISK_PREFIX) == std::string_view(TMP_STORAGE_POLICY_PREFIX)); StoragePolicySelector(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, DiskSelectorPtr disks); diff --git a/src/Disks/getOrCreateDiskFromAST.cpp b/src/Disks/getOrCreateDiskFromAST.cpp deleted file mode 100644 index fd43f31a009..00000000000 --- a/src/Disks/getOrCreateDiskFromAST.cpp +++ /dev/null @@ -1,121 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - -namespace -{ - std::string getOrCreateDiskFromDiskAST(const ASTFunction & function, ContextPtr context, bool attach) - { - const auto * function_args_expr = assert_cast(function.arguments.get()); - const auto & function_args = function_args_expr->children; - auto config = getDiskConfigurationFromAST(function_args, context); - - std::string disk_name; - if (config->has("name")) - { - disk_name = config->getString("name"); - } - else - { - /// We need a unique name for a created custom disk, but it needs to be the same - /// after table is reattached or server is restarted, so take a hash of the disk - /// configuration serialized ast as a disk name suffix. - auto disk_setting_string = serializeAST(function); - disk_name = DiskSelector::TMP_INTERNAL_DISK_PREFIX - + toString(sipHash128(disk_setting_string.data(), disk_setting_string.size())); - } - - auto result_disk = context->getOrCreateDisk(disk_name, [&](const DisksMap & disks_map) -> DiskPtr { - auto disk = DiskFactory::instance().create( - disk_name, *config, /* config_path */"", context, disks_map, /* attach */attach, /* custom_disk */true); - /// Mark that disk can be used without storage policy. - disk->markDiskAsCustom(); - return disk; - }); - - if (!result_disk->isCustomDisk()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk with name `{}` already exist", disk_name); - - if (!attach && !result_disk->isRemote()) - { - static constexpr auto custom_local_disks_base_dir_in_config = "custom_local_disks_base_directory"; - auto disk_path_expected_prefix = context->getConfigRef().getString(custom_local_disks_base_dir_in_config, ""); - - if (disk_path_expected_prefix.empty()) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Base path for custom local disks must be defined in config file by `{}`", - custom_local_disks_base_dir_in_config); - - if (!pathStartsWith(result_disk->getPath(), disk_path_expected_prefix)) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Path of the custom local disk must be inside `{}` directory", - disk_path_expected_prefix); - } - - return disk_name; - } - - class DiskConfigurationFlattener - { - public: - struct Data - { - ContextPtr context; - bool attach; - }; - - static bool needChildVisit(const ASTPtr &, const ASTPtr &) { return true; } - - static void visit(ASTPtr & ast, Data & data) - { - if (isDiskFunction(ast)) - { - auto disk_name = getOrCreateDiskFromDiskAST(*ast->as(), data.context, data.attach); - ast = std::make_shared(disk_name); - } - } - }; - - /// Visits children first. - using FlattenDiskConfigurationVisitor = InDepthNodeVisitor; -} - - -std::string getOrCreateDiskFromDiskAST(const ASTPtr & disk_function, ContextPtr context, bool attach) -{ - if (!isDiskFunction(disk_function)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected a disk function"); - - auto ast = disk_function->clone(); - - FlattenDiskConfigurationVisitor::Data data{context, attach}; - FlattenDiskConfigurationVisitor{data}.visit(ast); - - auto disk_name = assert_cast(*ast).value.get(); - LOG_TRACE(getLogger("getOrCreateDiskFromDiskAST"), "Result disk name: {}", disk_name); - return disk_name; -} - -} diff --git a/src/Disks/getOrCreateDiskFromAST.h b/src/Disks/getOrCreateDiskFromAST.h deleted file mode 100644 index 61e1decbee9..00000000000 --- a/src/Disks/getOrCreateDiskFromAST.h +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once -#include -#include -#include - -namespace DB -{ - -class ASTFunction; - -/** - * Create a DiskPtr from disk AST function like disk(), - * add it to DiskSelector by a unique (but always the same for given configuration) disk name - * and return this name. - */ -std::string getOrCreateDiskFromDiskAST(const ASTPtr & disk_function, ContextPtr context, bool attach); - -} diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 5413b568068..0acbef26805 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -4,6 +4,7 @@ #include #include #include +#include "Common/Logger.h" #include #include #include @@ -4395,6 +4396,15 @@ DiskPtr Context::getDisk(const String & name) const return disk_selector->get(name); } +DiskPtr Context::tryGetDisk(const String & name) const +{ + std::lock_guard lock(shared->storage_policies_mutex); + + auto disk_selector = getDiskSelector(lock); + + return disk_selector->tryGet(name); +} + DiskPtr Context::getOrCreateDisk(const String & name, DiskCreator creator) const { std::lock_guard lock(shared->storage_policies_mutex); @@ -4422,9 +4432,11 @@ StoragePolicyPtr Context::getStoragePolicy(const String & name) const StoragePolicyPtr Context::getStoragePolicyFromDisk(const String & disk_name) const { + LOG_DEBUG(getLogger("StoragePolicy"), "getStoragePolicyFromDisk disk_name {}", disk_name); + std::lock_guard lock(shared->storage_policies_mutex); - const std::string storage_policy_name = StoragePolicySelector::TMP_STORAGE_POLICY_PREFIX + disk_name; + const std::string storage_policy_name = disk_name.starts_with(DiskSelector::CUSTOM_DISK_PREFIX) ? disk_name : StoragePolicySelector::TMP_STORAGE_POLICY_PREFIX + disk_name; auto storage_policy_selector = getStoragePolicySelector(lock); StoragePolicyPtr storage_policy = storage_policy_selector->tryGet(storage_policy_name); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index d5e35c3e4b3..586eff768df 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1186,6 +1186,7 @@ public: /// Provides storage disks DiskPtr getDisk(const String & name) const; + DiskPtr tryGetDisk(const String & name) const; using DiskCreator = std::function; DiskPtr getOrCreateDisk(const String & name, DiskCreator creator) const; diff --git a/src/Parsers/FieldFromAST.cpp b/src/Parsers/FieldFromAST.cpp index ad1eab49eeb..64aeae1b570 100644 --- a/src/Parsers/FieldFromAST.cpp +++ b/src/Parsers/FieldFromAST.cpp @@ -1,5 +1,4 @@ #include -#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index c968ad84936..f72b24e3270 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -64,10 +64,14 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def, ContextPtr conte auto ast = dynamic_cast(custom.getImpl()).ast; if (ast && isDiskFunction(ast)) { - auto disk_name = getOrCreateDiskFromDiskAST(ast, context, is_attach); - LOG_TRACE(getLogger("MergeTreeSettings"), "Created custom disk {}", disk_name); + auto disk_name = DiskFomAST::createCustomDisk(ast, context, is_attach); + LOG_DEBUG(getLogger("MergeTreeSettings"), "Created custom disk {}", disk_name); value = disk_name; } + else + { + value = DiskFomAST::getConfigDefinedDisk(value.safeGet(), context); + } } if (has("storage_policy")) diff --git a/tests/integration/test_storage_policies/configs/disks.xml b/tests/integration/test_storage_policies/configs/disks.xml index dc60d93208c..3331fee4e4f 100644 --- a/tests/integration/test_storage_policies/configs/disks.xml +++ b/tests/integration/test_storage_policies/configs/disks.xml @@ -1,12 +1,4 @@ - - system - blob_storage_log
- toYYYYMM(event_date) - 7500 - event_date + INTERVAL 30 DAY -
- diff --git a/tests/integration/test_storage_policies/test.py b/tests/integration/test_storage_policies/test.py index f65096389af..389146b2171 100644 --- a/tests/integration/test_storage_policies/test.py +++ b/tests/integration/test_storage_policies/test.py @@ -38,58 +38,3 @@ def test_storage_policy_configuration_change(started_cluster): "/etc/clickhouse-server/config.d/disks.xml", ) node.start_clickhouse() - - -def test_disk_is_immutable(started_cluster): - node.query("DROP TABLE IF EXISTS test_1") - - node.query( - """ - create table test_1 (a Int32) - engine = MergeTree() - order by tuple() - settings - disk=disk( - name='not_uniq_disk_name', - type = object_storage, - object_storage_type = local_blob_storage, - path='./03215_data_test_1/') - """ - ) - - node.query("INSERT INTO test_1 VALUES (1)") - node.query("SYSTEM FLUSH LOGS;") - - print(node.query("SELECT 'test_1', * FROM system.blob_storage_log")) - - print(node.query("SELECT 'test_1', * FROM test_1")) - - node.query("DROP TABLE test_1 SYNC") - node.query("DROP TABLE IF EXISTS test_2") - - node.query( - """ - create table test_2 (a Int32) - engine = MergeTree() - order by tuple() - settings - disk=disk( - name='not_uniq_disk_name', - type = object_storage, - object_storage_type = local_blob_storage, - path='./03215_data_test_2/') - """ - ) - - node.query("INSERT INTO test_2 VALUES (1)") - node.query("SYSTEM FLUSH LOGS;") - - print(node.query("SELECT 'test_2', * FROM system.blob_storage_log")) - - print(node.query("SELECT 'test_2', * FROM test_2")) - - node.restart_clickhouse() - - print(node.query("SELECT 'test_2', * FROM system.blob_storage_log")) - - print(node.query("SELECT 'test_2', * FROM test_2")) diff --git a/tests/queries/0_stateless/02963_test_flexible_disk_configuration.sql b/tests/queries/0_stateless/02963_test_flexible_disk_configuration.sql index 7ebef866360..6b285d423e7 100644 --- a/tests/queries/0_stateless/02963_test_flexible_disk_configuration.sql +++ b/tests/queries/0_stateless/02963_test_flexible_disk_configuration.sql @@ -2,13 +2,33 @@ drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple() -settings disk=disk(name='test1', type = object_storage, object_storage_type = local_blob_storage, path='./02963_test1/'); +settings disk=disk(name='02963_custom_disk', type = object_storage, object_storage_type = local_blob_storage, path='./02963_test1/'); -drop table test; +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='02963_custom_disk', type = object_storage, object_storage_type = local_blob_storage, path='./02963_test2/'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='02963_custom_disk'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk='02963_custom_disk'; -- { serverError BAD_ARGUMENTS } + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='s3_disk_02963'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple() settings disk='s3_disk_02963'; -drop table test; +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='s3_disk_02963', type = object_storage, object_storage_type = local_blob_storage, path='./02963_test2/'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple() settings disk=disk(name='test1', type = object_storage, @@ -17,7 +37,7 @@ settings disk=disk(name='test1', access_key_id = clickhouse, secret_access_key = clickhouse); -drop table test; +drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple() settings disk=disk(name='test2', type = object_storage, @@ -27,7 +47,7 @@ settings disk=disk(name='test2', access_key_id = clickhouse, secret_access_key = clickhouse); -drop table test; +drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple() settings disk=disk(name='test3', type = object_storage, @@ -37,8 +57,8 @@ settings disk=disk(name='test3', endpoint = 'http://localhost:11111/test/common/', access_key_id = clickhouse, secret_access_key = clickhouse); -drop table test; +drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple() settings disk=disk(name='test4', type = object_storage, @@ -48,8 +68,8 @@ settings disk=disk(name='test4', endpoint = 'http://localhost:11111/test/common/', access_key_id = clickhouse, secret_access_key = clickhouse); -drop table test; +drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple() settings disk=disk(name='test5', type = object_storage, From 4e2b6a5e4927da8a02cf15b374d0e351108d1d88 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 6 Aug 2024 13:06:34 +0200 Subject: [PATCH 1697/2361] Fix docs and a test --- .../engines/table-engines/integrations/time-series.md | 10 +++++++--- docs/en/interfaces/prometheus.md | 1 + tests/integration/test_prometheus_protocols/test.py | 5 +++++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/time-series.md b/docs/en/engines/table-engines/integrations/time-series.md index b9e47e8d2c9..2914bf4bf37 100644 --- a/docs/en/engines/table-engines/integrations/time-series.md +++ b/docs/en/engines/table-engines/integrations/time-series.md @@ -8,7 +8,7 @@ sidebar_label: TimeSeries A table engine storing time series, i.e. a set of values associated with timestamps and tags (or labels): -``` +```text metric_name1[tag1=value1, tag2=value2, ...] = {timestamp1: value1, timestamp2: value2, ...} metric_name2[...] = ... ``` @@ -39,6 +39,7 @@ CREATE TABLE my_table ENGINE=TimeSeries ``` Then this table can be used with the following protocols (a port must be assigned in the server configuration): + - [prometheus remote-write](../../../interfaces/prometheus.md#remote-write) - [prometheus remote-read](../../../interfaces/prometheus.md#remote-read) @@ -53,6 +54,7 @@ The target tables can be either specified explicitly in the `CREATE TABLE` query or the `TimeSeries` table engine can generate inner target tables automatically. The target tables are the following: + 1. The _data_ table {#data-table} contains time series associated with some identifier. The _data_ table must have columns: @@ -71,7 +73,7 @@ The _tags_ table must have columns: | `metric_name` | [x] | `LowCardinality(String)` | `String` or `LowCardinality(String)` | The name of a metric | | `` | [ ] | `String` | `String` or `LowCardinality(String)` or `LowCardinality(Nullable(String))` | The value of a specific tag, the tag's name and the name of a corresponding column are specified in the [tags_to_columns](#settings) setting | | `tags` | [x] | `Map(LowCardinality(String), String)` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Map of tags excluding the tag `__name__` containing the name of a metric and excluding tags with names enumerated in the [tags_to_columns](#settings) setting | -| `all_tags` | [ ] | `Map(String, String)` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Ephemeral column, each row is a map of all the tags excluding only the tag `__name__` containing the name of a metric. The only purpose of that column is to be used while calculating `id` | +| `all_tags` | [ ] | `Map(String, LowCardinality(String))` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Ephemeral column, each row is a map of all the tags excluding only the tag `__name__` containing the name of a metric. The only purpose of that column is to be used while calculating `id` | | `min_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` or `Nullable(DateTime64(X))` | Minimum timestamp of time series with that `id`. The column is created if [store_min_time_and_max_time](#settings) is `true` | | `max_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` or `Nullable(DateTime64(X))` | Maximum timestamp of time series with that `id`. The column is created if [store_min_time_and_max_time](#settings) is `true` | @@ -239,6 +241,7 @@ ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} ## Table engines of inner target tables {#inner-table-engines} By default inner target tables use the following table engines: + - the [data]{#data-table} table uses [MergeTree](../mergetree-family/mergetree); - the [tags]{#tags-table} table uses [AggregatingMergeTree](../mergetree-family/aggregatingmergetree) because the same data is often inserted multiple times to this table so we need a way to remove duplicates, and also because it's required to do aggregation for columns `min_time` and `max_time`; @@ -287,9 +290,10 @@ Here is a list of settings which can be specified while defining a `TimeSeries` | `aggregate_min_time_and_max_time` | Bool | true | When creating an inner target `tags` table, this flag enables using `SimpleAggregateFunction(min, Nullable(DateTime64(3)))` instead of just `Nullable(DateTime64(3))` as the type of the `min_time` column, and the same for the `max_time` column | | `filter_by_min_time_and_max_time` | Bool | true | If set to true then the table will use the `min_time` and `max_time` columns for filtering time series | -# Functions {#functions} +## Functions {#functions} Here is a list of functions supporting a `TimeSeries` table as an argument: + - [timeSeriesData](../../../sql-reference/table-functions/timeSeriesData.md) - [timeSeriesTags](../../../sql-reference/table-functions/timeSeriesTags.md) - [timeSeriesMetrics](../../../sql-reference/table-functions/timeSeriesMetrics.md) diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md index 75a68c59219..5eac99f685e 100644 --- a/docs/en/interfaces/prometheus.md +++ b/docs/en/interfaces/prometheus.md @@ -58,6 +58,7 @@ Settings: | `errors` | true | Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../operations/system-tables/errors.md) as well. | Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): + ```bash curl 127.0.0.1:9363/metrics ``` diff --git a/tests/integration/test_prometheus_protocols/test.py b/tests/integration/test_prometheus_protocols/test.py index 488c5369742..6adb3da56c3 100644 --- a/tests/integration/test_prometheus_protocols/test.py +++ b/tests/integration/test_prometheus_protocols/test.py @@ -144,6 +144,11 @@ def test_inner_engines(): def test_external_tables(): + node.query("DROP TABLE IF EXISTS mydata") + node.query("DROP TABLE IF EXISTS mytags") + node.query("DROP TABLE IF EXISTS mymetrics") + node.query("DROP TABLE IF EXISTS prometheus") + node.query( "CREATE TABLE mydata (id UUID, timestamp DateTime64(3), value Float64) " "ENGINE=MergeTree ORDER BY (id, timestamp)" From 27cdbb54d73f8f4b82d63850c1e6f6fd5669646e Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Tue, 6 Aug 2024 13:10:03 +0200 Subject: [PATCH 1698/2361] fix black --- tests/integration/test_storage_hdfs/test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 3fef6bc46cf..77921b885b0 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -675,7 +675,9 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_2', 'Parquet', 'a Int32, b String')" ) - node1.query(f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1") + node1.query( + f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1" + ) result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "parquet_2" @@ -683,7 +685,9 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_3', 'Parquet', 'a Int32, _path String')" ) - node1.query(f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1") + node1.query( + f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1" + ) result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "kek" From df0dac2f5b509438cce28214b78765c46439aa8c Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 11:17:41 +0000 Subject: [PATCH 1699/2361] Enable jemalloc and liburing in rocksdb --- contrib/CMakeLists.txt | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 30 +++++++++++++++++----------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 977efda15ff..eb3afe0ccdf 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -71,7 +71,6 @@ add_contrib (zlib-ng-cmake zlib-ng) add_contrib (bzip2-cmake bzip2) add_contrib (minizip-ng-cmake minizip-ng) add_contrib (snappy-cmake snappy) -add_contrib (rocksdb-cmake rocksdb) add_contrib (thrift-cmake thrift) # parquet/arrow/orc add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion @@ -148,6 +147,7 @@ add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arro add_contrib (cppkafka-cmake cppkafka) add_contrib (libpqxx-cmake libpqxx) add_contrib (libpq-cmake libpq) +add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing add_contrib (nuraft-cmake NuRaft) add_contrib (fast_float-cmake fast_float) add_contrib (idna-cmake idna) diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index b39a739d04c..7e5e9a28d0f 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -5,36 +5,38 @@ if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL return() endif() -# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default -set (CMAKE_CXX_STANDARD 20) - -# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs -option(WITH_JEMALLOC "build with JeMalloc" OFF) - -option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING - # ClickHouse cannot be compiled without snappy, lz4, zlib, zstd option(WITH_SNAPPY "build with SNAPPY" ON) option(WITH_LZ4 "build with lz4" ON) option(WITH_ZLIB "build with zlib" ON) option(WITH_ZSTD "build with zstd" ON) -if(WITH_SNAPPY) +if (ENABLE_JEMALLOC) + add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE) + list (APPEND THIRDPARTY_LIBS ch_contrib::jemalloc) +endif () + +if (ENABLE_LIBURING) + add_definitions(-DROCKSDB_IOURING_PRESENT) + list (APPEND THIRDPARTY_LIBS ch_contrib::liburing) +endif () + +if (WITH_SNAPPY) add_definitions(-DSNAPPY) list(APPEND THIRDPARTY_LIBS ch_contrib::snappy) endif() -if(WITH_ZLIB) +if (WITH_ZLIB) add_definitions(-DZLIB) list(APPEND THIRDPARTY_LIBS ch_contrib::zlib) endif() -if(WITH_LZ4) +if (WITH_LZ4) add_definitions(-DLZ4) list(APPEND THIRDPARTY_LIBS ch_contrib::lz4) endif() -if(WITH_ZSTD) +if (WITH_ZSTD) add_definitions(-DZSTD) list(APPEND THIRDPARTY_LIBS ch_contrib::zstd) endif() @@ -433,5 +435,9 @@ add_library(_rocksdb ${SOURCES}) add_library(ch_contrib::rocksdb ALIAS _rocksdb) target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) +# Not in the native build system but useful anyways: +# Make all functions in xxHash.h inline. Beneficial for performance: https://github.com/Cyan4973/xxHash/tree/v0.8.2#build-modifiers +target_compile_definitions (_rocksdb PRIVATE XXH_INLINE_ALL) + # SYSTEM is required to overcome some issues target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include") From 97f1cfc232c331504754db4ba4221a282c690bb6 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov Date: Wed, 24 Jul 2024 14:32:35 +0000 Subject: [PATCH 1700/2361] add ** glob to hdfs docs --- docs/en/sql-reference/table-functions/hdfs.md | 1 + docs/ru/sql-reference/table-functions/file.md | 1 + docs/ru/sql-reference/table-functions/hdfs.md | 1 + tests/integration/test_storage_hdfs/test.py | 16 ++++++++++++++++ 4 files changed, 19 insertions(+) diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 28cba5ccc6a..405ac477846 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -44,6 +44,7 @@ LIMIT 2 Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix. - `*` — Represents arbitrarily many characters except `/` but including the empty string. +- `**` — Represents all files inside a folder recursively. - `?` — Represents an arbitrary single character. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. The strings can contain the `/` symbol. - `{N..M}` — Represents any number `>= N` and `<= M`. diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md index 546a674d41a..f3e8b0f46b7 100644 --- a/docs/ru/sql-reference/table-functions/file.md +++ b/docs/ru/sql-reference/table-functions/file.md @@ -81,6 +81,7 @@ SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 U ОбрабатыватьÑÑ Ð±ÑƒÐ´ÑƒÑ‚ те и только те файлы, которые ÑущеÑтвуют в файловой ÑиÑтеме и удовлетворÑÑŽÑ‚ вÑему шаблону пути. - `*` — заменÑет любое количеÑтво любых Ñимволов кроме `/`, Ð²ÐºÐ»ÑŽÑ‡Ð°Ñ Ð¾Ñ‚ÑутÑтвие Ñимволов. +- `**` — ЗаменÑет любое количеÑтво любых Ñимволов, Ð²ÐºÐ»ÑŽÑ‡Ð°Ñ `/`, то еÑÑ‚ÑŒ оÑущеÑтвлÑет рекурÑивный поиÑк по вложенным директориÑм. - `?` — заменÑет ровно один любой Ñимвол. - `{some_string,another_string,yet_another_one}` — заменÑет любую из Ñтрок `'some_string', 'another_string', 'yet_another_one'`. Эти Ñтроки также могут Ñодержать Ñимвол `/`. - `{N..M}` — заменÑет любое чиÑло в интервале от `N` до `M` включительно (может Ñодержать ведущие нули). diff --git a/docs/ru/sql-reference/table-functions/hdfs.md b/docs/ru/sql-reference/table-functions/hdfs.md index 6dcb1a21791..13f1bdc43af 100644 --- a/docs/ru/sql-reference/table-functions/hdfs.md +++ b/docs/ru/sql-reference/table-functions/hdfs.md @@ -47,6 +47,7 @@ LIMIT 2 - `*` — ЗаменÑет любое количеÑтво любых Ñимволов (кроме `/`), Ð²ÐºÐ»ÑŽÑ‡Ð°Ñ Ð¾Ñ‚ÑутÑтвие Ñимволов. +- `**` — ЗаменÑет любое количеÑтво любых Ñимволов, Ð²ÐºÐ»ÑŽÑ‡Ð°Ñ `/`, то еÑÑ‚ÑŒ оÑущеÑтвлÑет рекурÑивный поиÑк по вложенным директориÑм. - `?` — ЗаменÑет ровно один любой Ñимвол. - `{some_string,another_string,yet_another_one}` — ЗаменÑет любую из Ñтрок `'some_string', 'another_string', 'yet_another_one'`. Эти Ñтроки также могут Ñодержать Ñимвол `/`. - `{N..M}` — ЗаменÑет любое чиÑло в интервале от `N` до `M` включительно (может Ñодержать ведущие нули). diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index ccd2c7eaf11..9b2dacd3d5d 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -61,6 +61,14 @@ def test_read_write_storage_with_globs(started_cluster): hdfs_api.write_data("/storage" + i, i + "\tMark\t72.53\n") assert hdfs_api.read_data("/storage" + i) == i + "\tMark\t72.53\n" + node1.query( + "create table HDFSStorageWithDoubleAsterisk (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/**.tsv', 'TSV')" + ) + + for i in ["1", "2", "3"]: + hdfs_api.write_data(f"/subdir/file{i}.tsv", f"{i}\tMark\t72.53\n") + assert hdfs_api.read_data(f"/subdir/file{i}.tsv") == f"{i}\tMark\t72.53\n" + assert ( node1.query( "select count(*) from HDFSStorageWithRange settings s3_throw_on_zero_files_match=1" @@ -70,6 +78,7 @@ def test_read_write_storage_with_globs(started_cluster): assert node1.query("select count(*) from HDFSStorageWithEnum") == "3\n" assert node1.query("select count(*) from HDFSStorageWithQuestionMark") == "3\n" assert node1.query("select count(*) from HDFSStorageWithAsterisk") == "3\n" + assert node1.query("select count(*) from HDFSStorageWithDoubleAsterisk") == "3\n" try: node1.query("insert into HDFSStorageWithEnum values (1, 'NEW', 4.2)") @@ -92,6 +101,13 @@ def test_read_write_storage_with_globs(started_cluster): print(ex) assert "in readonly mode" in str(ex) + try: + node1.query("insert into HDFSStorageWithDoubleAsterisk values (1, 'NEW', 4.2)") + assert False, "Exception have to be thrown" + except Exception as ex: + print(ex) + assert "in readonly mode" in str(ex) + def test_storage_with_multidirectory_glob(started_cluster): hdfs_api = started_cluster.hdfs_api From 7d45424d318f84a2d035a01290d263a620859bca Mon Sep 17 00:00:00 2001 From: Andrey Zvonov Date: Fri, 26 Jul 2024 08:41:14 +0000 Subject: [PATCH 1701/2361] specify files in test: --- tests/integration/test_storage_hdfs/test.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 9b2dacd3d5d..51d3eed91df 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -62,12 +62,15 @@ def test_read_write_storage_with_globs(started_cluster): assert hdfs_api.read_data("/storage" + i) == i + "\tMark\t72.53\n" node1.query( - "create table HDFSStorageWithDoubleAsterisk (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/**.tsv', 'TSV')" + "create table HDFSStorageWithDoubleAsterisk (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/**.doublestar.tsv', 'TSV')" ) for i in ["1", "2", "3"]: - hdfs_api.write_data(f"/subdir/file{i}.tsv", f"{i}\tMark\t72.53\n") - assert hdfs_api.read_data(f"/subdir/file{i}.tsv") == f"{i}\tMark\t72.53\n" + hdfs_api.write_data(f"/subdir{i}/file{i}.doublestar.tsv", f"{i}\tMark\t72.53\n") + assert ( + hdfs_api.read_data(f"/subdir{i}/file{i}.doublestar.tsv") + == f"{i}\tMark\t72.53\n" + ) assert ( node1.query( From 523767c0c0725773c074e1ad00229b27865a34f2 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 30 Jul 2024 17:26:28 +0200 Subject: [PATCH 1702/2361] run 01171 test in parallel --- .../01171_mv_select_insert_isolation_long.sh | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index 2fb58e4cc57..27f063c34d8 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-parallel, no-ordinary-database -# Test is too heavy, avoid parallel run in Flaky Check +# Tags: long, no-ordinary-database # shellcheck disable=SC2119 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) @@ -22,11 +21,6 @@ $CLICKHOUSE_CLIENT --query "CREATE TABLE tmp (x UInt32, nm Int32) ENGINE=MergeTr $CLICKHOUSE_CLIENT --query "INSERT INTO src VALUES (0, 0)" -function get_now() -{ - date +%s -} - is_pid_exist() { local pid=$1 @@ -42,14 +36,14 @@ function run_until_deadline_and_at_least_times() local function_to_run=$1; shift local started_time - started_time=$(get_now) + started_time=$SECONDS local i=0 while true do $function_to_run $i "$@" - [[ $(get_now) -lt $deadline ]] || break + [[ $SECONDS -lt $deadline ]] || break i=$(($i + 1)) done @@ -165,7 +159,7 @@ if [[ $((MAIN_TIME_PART + SECOND_TIME_PART + WAIT_FINISH + LAST_TIME_GAP)) -ge exit 1 fi -START_TIME=$(get_now) +START_TIME=$SECONDS STOP_TIME=$((START_TIME + MAIN_TIME_PART)) SECOND_STOP_TIME=$((STOP_TIME + SECOND_TIME_PART)) MIN_ITERATIONS=20 From 244da490138668882db10770679b940d2c4adadf Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 30 Jul 2024 18:41:44 +0200 Subject: [PATCH 1703/2361] add max iteration --- .../01171_mv_select_insert_isolation_long.sh | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index 27f063c34d8..fe5bfe529b7 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -27,28 +27,30 @@ is_pid_exist() ps -p $pid > /dev/null } -function run_until_deadline_and_at_least_times() +function run_until_deadline_with_min_max_iterations() { set -e local deadline=$1; shift local min_iterations=$1; shift + local max_iterations=$1; shift local function_to_run=$1; shift local started_time started_time=$SECONDS - local i=0 + local iteration=0 while true do - $function_to_run $i "$@" + $function_to_run $iteration "$@" [[ $SECONDS -lt $deadline ]] || break + [[ $iteration -lt $max_iterations ]] || break - i=$(($i + 1)) + iteration=$(($iteration + 1)) done - [[ $i -gt $min_iterations ]] || echo "$i/$min_iterations : not enough iterations of $function_to_run has been made from $started_time until $deadline" >&2 + [[ $iteration -gt $min_iterations ]] || echo "$iteration/$min_iterations : not enough iterations of $function_to_run has been made from $started_time until $deadline" >&2 } function insert_commit_action() @@ -163,17 +165,18 @@ START_TIME=$SECONDS STOP_TIME=$((START_TIME + MAIN_TIME_PART)) SECOND_STOP_TIME=$((STOP_TIME + SECOND_TIME_PART)) MIN_ITERATIONS=20 +MAX_ITERATIONS=200 -run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 1 & PID_1=$! -run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_commit_action 2 & PID_2=$! -run_until_deadline_and_at_least_times $STOP_TIME $MIN_ITERATIONS insert_rollback_action 3 & PID_3=$! +run_until_deadline_with_min_max_iterations $STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_commit_action 1 & PID_1=$! +run_until_deadline_with_min_max_iterations $STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_commit_action 2 & PID_2=$! +run_until_deadline_with_min_max_iterations $STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_rollback_action 3 & PID_3=$! -run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS optimize_action & PID_4=$! -run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS select_action & PID_5=$! -run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS select_insert_action & PID_6=$! +run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS optimize_action & PID_4=$! +run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_action & PID_5=$! +run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_insert_action & PID_6=$! sleep 0.$RANDOM -run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS select_action & PID_7=$! -run_until_deadline_and_at_least_times $SECOND_STOP_TIME $MIN_ITERATIONS select_insert_action & PID_8=$! +run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_action & PID_7=$! +run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_insert_action & PID_8=$! wait $PID_1 || echo "insert_commit_action has failed with status $?" 2>&1 wait $PID_2 || echo "second insert_commit_action has failed with status $?" 2>&1 From c896165e161b2267ae4d6bc255e028d7c0899bd9 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 31 Jul 2024 17:49:39 +0200 Subject: [PATCH 1704/2361] add min time --- .../01171_mv_select_insert_isolation_long.sh | 53 +++++++++++-------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index fe5bfe529b7..ff71d37cb32 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -27,15 +27,20 @@ is_pid_exist() ps -p $pid > /dev/null } -function run_until_deadline_with_min_max_iterations() +function run_with_time_and_iterations_limits() { set -e - local deadline=$1; shift + local min_time=$1; shift + local max_time=$1; shift local min_iterations=$1; shift local max_iterations=$1; shift local function_to_run=$1; shift + # if [ "${1:-X}" = "1" ]; then + # set -x + # fi + local started_time started_time=$SECONDS local iteration=0 @@ -44,13 +49,14 @@ function run_until_deadline_with_min_max_iterations() do $function_to_run $iteration "$@" - [[ $SECONDS -lt $deadline ]] || break - [[ $iteration -lt $max_iterations ]] || break + [[ $SECONDS -lt $max_time ]] || break + [[ $SECONDS -lt $min_time ]] || [[ $iteration -lt $max_iterations ]] || break iteration=$(($iteration + 1)) done - [[ $iteration -gt $min_iterations ]] || echo "$iteration/$min_iterations : not enough iterations of $function_to_run has been made from $started_time until $deadline" >&2 + [[ $iteration -gt $min_iterations ]] || echo "$iteration/$min_iterations : not enough iterations of $function_to_run has been made from $started_time until $max_time" >&2 + set +x } function insert_commit_action() @@ -151,43 +157,44 @@ function select_insert_action() ROLLBACK;" } -MAIN_TIME_PART=400 -SECOND_TIME_PART=30 +MIN_SECONDS=5 +MAX_SECONDS=300 WAIT_FINISH=60 -LAST_TIME_GAP=10 -if [[ $((MAIN_TIME_PART + SECOND_TIME_PART + WAIT_FINISH + LAST_TIME_GAP)) -ge 600 ]]; then +if [[ $((MAX_SECONDS + WAIT_FINISH)) -ge 550 ]]; then echo "time sttings are wrong" 2>&1 exit 1 fi START_TIME=$SECONDS -STOP_TIME=$((START_TIME + MAIN_TIME_PART)) -SECOND_STOP_TIME=$((STOP_TIME + SECOND_TIME_PART)) +MIN_TIME=$((START_TIME + MIN_SECONDS)) +MAX_TIME=$((START_TIME + MAX_SECONDS)) MIN_ITERATIONS=20 MAX_ITERATIONS=200 -run_until_deadline_with_min_max_iterations $STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_commit_action 1 & PID_1=$! -run_until_deadline_with_min_max_iterations $STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_commit_action 2 & PID_2=$! -run_until_deadline_with_min_max_iterations $STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_rollback_action 3 & PID_3=$! +run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_commit_action 1 & PID_1=$! +run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_commit_action 2 & PID_2=$! +run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_rollback_action 3 & PID_3=$! -run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS optimize_action & PID_4=$! -run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_action & PID_5=$! -run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_insert_action & PID_6=$! +run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS optimize_action & PID_4=$! +run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_action & PID_5=$! +run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_insert_action & PID_6=$! sleep 0.$RANDOM -run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_action & PID_7=$! -run_until_deadline_with_min_max_iterations $SECOND_STOP_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_insert_action & PID_8=$! - -wait $PID_1 || echo "insert_commit_action has failed with status $?" 2>&1 -wait $PID_2 || echo "second insert_commit_action has failed with status $?" 2>&1 -wait $PID_3 || echo "insert_rollback_action has failed with status $?" 2>&1 +run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_action & PID_7=$! +run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS select_insert_action & PID_8=$! +is_pid_exist $PID_1 || echo "insert_commit_action is not running" 2>&1 +is_pid_exist $PID_2 || echo "second insert_commit_action is not running" 2>&1 +is_pid_exist $PID_3 || echo "insert_rollback_action is not running" 2>&1 is_pid_exist $PID_4 || echo "optimize_action is not running" 2>&1 is_pid_exist $PID_5 || echo "select_action is not running" 2>&1 is_pid_exist $PID_6 || echo "select_insert_action is not running" 2>&1 is_pid_exist $PID_7 || echo "second select_action is not running" 2>&1 is_pid_exist $PID_8 || echo "second select_insert_action is not running" 2>&1 +wait $PID_1 || echo "insert_commit_action has failed with status $?" 2>&1 +wait $PID_2 || echo "second insert_commit_action has failed with status $?" 2>&1 +wait $PID_3 || echo "insert_rollback_action has failed with status $?" 2>&1 wait $PID_4 || echo "optimize_action has failed with status $?" 2>&1 wait $PID_5 || echo "select_action has failed with status $?" 2>&1 wait $PID_6 || echo "select_insert_action has failed with status $?" 2>&1 From 874a7dbe378e83331d6e4fab6aa82a0409fbfc5d Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 1 Aug 2024 17:39:36 +0200 Subject: [PATCH 1705/2361] adjust settings --- .../0_stateless/01171_mv_select_insert_isolation_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index ff71d37cb32..ba6fc85ca1a 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -158,7 +158,7 @@ function select_insert_action() } MIN_SECONDS=5 -MAX_SECONDS=300 +MAX_SECONDS=400 WAIT_FINISH=60 if [[ $((MAX_SECONDS + WAIT_FINISH)) -ge 550 ]]; then From 860050eb3d15f26c371a1aeab92fd8c0d166ce09 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 6 Aug 2024 13:30:23 +0200 Subject: [PATCH 1706/2361] Update src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- .../ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp index 9043edd66a0..1a0b6157a86 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.cpp @@ -268,7 +268,7 @@ std::unique_ptr getContainerClient(const ConnectionParams & par catch (const Azure::Storage::StorageException & e) { /// If container_already_exists is not set (in config), ignore already exists error. Conflict - The specified container already exists. - /// To avoid race with creation of container handle this error despite that we have already checked the existence of container. + /// To avoid race with creation of container, handle this error despite that we have already checked the existence of container. if (!params.endpoint.container_already_exists.has_value() && e.StatusCode == Azure::Core::Http::HttpStatusCode::Conflict) return params.createForContainer(); throw; From 851d4d3fdfdb682d0f9c9a8b1945608c6bc53086 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 6 Aug 2024 13:56:37 +0200 Subject: [PATCH 1707/2361] adjust MIN_ITERATIONS --- .../0_stateless/01171_mv_select_insert_isolation_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index ba6fc85ca1a..f6a88c205c1 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -169,7 +169,7 @@ fi START_TIME=$SECONDS MIN_TIME=$((START_TIME + MIN_SECONDS)) MAX_TIME=$((START_TIME + MAX_SECONDS)) -MIN_ITERATIONS=20 +MIN_ITERATIONS=15 MAX_ITERATIONS=200 run_with_time_and_iterations_limits $MIN_TIME $MAX_TIME $MIN_ITERATIONS $MAX_ITERATIONS insert_commit_action 1 & PID_1=$! From 913b9028649f668020850de91182d975d2a2916a Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 6 Aug 2024 12:51:16 +0200 Subject: [PATCH 1708/2361] CI: Fix for setting Mergeable Check from sync --- .github/workflows/jepsen.yml | 2 +- tests/ci/commit_status_helper.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 035ba2e5b98..d4d676ceead 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -66,7 +66,7 @@ jobs: - name: Check Workflow results run: | export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json" - cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' + cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py index 908ac4a7dca..7e3a0b4965c 100644 --- a/tests/ci/commit_status_helper.py +++ b/tests/ci/commit_status_helper.py @@ -497,9 +497,9 @@ def trigger_mergeable_check( description = format_description(description) if set_from_sync: - # update Mergeable Check from sync WF only if its status already present or its new status is not SUCCESS + # update Mergeable Check from sync WF only if its status already present or its new status is FAILURE # to avoid false-positives - if mergeable_status or state != SUCCESS: + if mergeable_status or state == FAILURE: set_mergeable_check(commit, description, state) elif mergeable_status is None or mergeable_status.description != description: set_mergeable_check(commit, description, state) From 2776a515ba36be6946919bc43ba267797d929cdd Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 12:29:54 +0000 Subject: [PATCH 1709/2361] Cosmetics I --- src/Storages/Statistics/Statistics.cpp | 12 ++++++------ src/Storages/Statistics/StatisticsCountMinSketch.cpp | 4 ++-- src/Storages/Statistics/StatisticsCountMinSketch.h | 4 ++-- src/Storages/Statistics/StatisticsTDigest.cpp | 4 ++-- src/Storages/Statistics/StatisticsTDigest.h | 4 ++-- src/Storages/Statistics/StatisticsUniq.cpp | 4 ++-- src/Storages/Statistics/StatisticsUniq.h | 4 ++-- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index ade3326288a..2a17101478a 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -204,15 +204,15 @@ void MergeTreeStatisticsFactory::registerValidator(StatisticsType stats_type, Va MergeTreeStatisticsFactory::MergeTreeStatisticsFactory() { - registerValidator(StatisticsType::TDigest, tdigestValidator); - registerCreator(StatisticsType::TDigest, tdigestCreator); + registerValidator(StatisticsType::TDigest, tdigestStatisticsValidator); + registerCreator(StatisticsType::TDigest, tdigestStatisticsCreator); - registerValidator(StatisticsType::Uniq, uniqValidator); - registerCreator(StatisticsType::Uniq, uniqCreator); + registerValidator(StatisticsType::Uniq, uniqStatisticsValidator); + registerCreator(StatisticsType::Uniq, uniqStatisticsCreator); #if USE_DATASKETCHES - registerValidator(StatisticsType::CountMinSketch, countMinSketchValidator); - registerCreator(StatisticsType::CountMinSketch, countMinSketchCreator); + registerValidator(StatisticsType::CountMinSketch, countMinSketchStatisticsValidator); + registerCreator(StatisticsType::CountMinSketch, countMinSketchStatisticsCreator); #endif } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index e69bbc1515b..50d3b6e515c 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -84,7 +84,7 @@ void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) } -void countMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void countMinSketchStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); @@ -92,7 +92,7 @@ void countMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr da throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } -StatisticsPtr countMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) +StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { return std::make_shared(stat, data_type); } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.h b/src/Storages/Statistics/StatisticsCountMinSketch.h index 6c8b74f8c35..d10bc78a88e 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -31,8 +31,8 @@ private: }; -void countMinSketchValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr countMinSketchCreator(const SingleStatisticsDescription & stat, DataTypePtr); +void countMinSketchStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr); } diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index 66150e00fdb..7c5ea443201 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -57,7 +57,7 @@ Float64 StatisticsTDigest::estimateEqual(const Field & val) const throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimating value of type {}", val.getTypeName()); } -void tdigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void tdigestStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); @@ -65,7 +65,7 @@ void tdigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'tdigest' do not support type {}", data_type->getName()); } -StatisticsPtr tdigestCreator(const SingleStatisticsDescription & stat, DataTypePtr) +StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr) { return std::make_shared(stat); } diff --git a/src/Storages/Statistics/StatisticsTDigest.h b/src/Storages/Statistics/StatisticsTDigest.h index 614973e5d8b..d41b0648aa4 100644 --- a/src/Storages/Statistics/StatisticsTDigest.h +++ b/src/Storages/Statistics/StatisticsTDigest.h @@ -23,7 +23,7 @@ private: QuantileTDigest t_digest; }; -void tdigestValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr tdigestCreator(const SingleStatisticsDescription & stat, DataTypePtr); +void tdigestStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr); } diff --git a/src/Storages/Statistics/StatisticsUniq.cpp b/src/Storages/Statistics/StatisticsUniq.cpp index 8f60ffcf0b5..c259f09e0ae 100644 --- a/src/Storages/Statistics/StatisticsUniq.cpp +++ b/src/Storages/Statistics/StatisticsUniq.cpp @@ -52,7 +52,7 @@ UInt64 StatisticsUniq::estimateCardinality() const return column->getUInt(0); } -void uniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void uniqStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); @@ -60,7 +60,7 @@ void uniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'uniq' do not support type {}", data_type->getName()); } -StatisticsPtr uniqCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) +StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) { return std::make_shared(stat, data_type); } diff --git a/src/Storages/Statistics/StatisticsUniq.h b/src/Storages/Statistics/StatisticsUniq.h index faabde8d47c..743714e1e1b 100644 --- a/src/Storages/Statistics/StatisticsUniq.h +++ b/src/Storages/Statistics/StatisticsUniq.h @@ -27,7 +27,7 @@ private: }; -void uniqValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr uniqCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type); +void uniqStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type); +StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type); } From 47e4ea598267d33e5db72623069629a9025b0c0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 14:31:22 +0200 Subject: [PATCH 1710/2361] Make 02984_form_format.sh parallelizable --- tests/queries/0_stateless/02984_form_format.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02984_form_format.sh b/tests/queries/0_stateless/02984_form_format.sh index 471b48e0f68..814fe1a0f2c 100755 --- a/tests/queries/0_stateless/02984_form_format.sh +++ b/tests/queries/0_stateless/02984_form_format.sh @@ -1,6 +1,4 @@ #!/bin/bash -# Tags: no-parallel - CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh @@ -8,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) FILE_NAME="data.tmp" FORM_DATA="${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}/${FILE_NAME}" mkdir -p ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ -touch $FORM_DATA +touch $FORM_DATA # Simple tests echo -ne "col1=42&col2=Hello%2C%20World%21" > $FORM_DATA From 5313c9539ef8f988a607da87849eaa1f12e787e1 Mon Sep 17 00:00:00 2001 From: Blargian Date: Tue, 6 Aug 2024 14:34:06 +0200 Subject: [PATCH 1711/2361] update documentation for Float32/64 and variants --- .../functions/type-conversion-functions.md | 420 +++++++++++++++++- 1 file changed, 416 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 1e618b8cdab..77dd1628fe4 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -3045,13 +3045,425 @@ toUInt256OrDefault('abc', CAST('0', 'UInt256')): 0 - [`toUInt256OrZero`](#touint256orzero). - [`toUInt256OrNull`](#touint256ornull). -## toFloat(32\|64) +## toFloat32 -## toFloat(32\|64)OrZero +Converts an input value to a value of type [`Float32`](../data-types/float.md). Throws an exception in case of an error. -## toFloat(32\|64)OrNull +**Syntax** -## toFloat(32\|64)OrDefault +```sql +toFloat32(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- String representations of (U)Int8/16/32/128/256. +- Values of type Float32/64, including `NaN` and `Inf`. +- String representations of Float32/64, including `NaN` and `Inf`. + +Unsupported arguments: +- String representations of binary and hexadecimal values, e.g. `SELECT toFloat32('0xc0fe');`. + +**Returned value** + +- 32-bit floating point value. [Float32](../data-types/float.md). + +**Example** + +Query: + +```sql +SELECT + toFloat32(42.7), + toFloat32('42.7'), + toFloat32('NaN') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toFloat32(42.7): 42.7 +toFloat32('42.7'): 42.7 +toFloat32('NaN'): nan +``` + +**See also** + +- [`toFloat32OrZero`](#tofloat32orzero). +- [`toFloat32OrNull`](#tofloat32ornull). +- [`toFloat32OrDefault`](#tofloat32ordefault). + +## toFloat32OrZero + +Like [`toFloat32`](#tofloat32), this function converts an input value to a value of type [Float32](../data-types/float.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toFloat32OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256, Float32/64. + +Unsupported arguments (return `0`): +- String representations of binary and hexadecimal values, e.g. `SELECT toFloat32OrZero('0xc0fe');`. + +**Returned value** + +- 32-bit Float value if successful, otherwise `0`. [Float32](../data-types/float.md). + +**Example** + +Query: + +``` sql +SELECT + toFloat32OrZero('42.7'), + toFloat32OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toFloat32OrZero('42.7'): 42.7 +toFloat32OrZero('abc'): 0 +``` + +**See also** + +- [`toFloat32`](#tofloat32). +- [`toFloat32OrNull`](#tofloat32ornull). +- [`toFloat32OrDefault`](#tofloat32ordefault). + +## toFloat32OrNull + +Like [`toFloat32`](#tofloat32), this function converts an input value to a value of type [Float32](../data-types/float.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toFloat32OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256, Float32/64. + +Unsupported arguments (return `\N`): +- String representations of binary and hexadecimal values, e.g. `SELECT toFloat32OrNull('0xc0fe');`. + +**Returned value** + +- 32-bit Float value if successful, otherwise `\N`. [Float32](../data-types/float.md). + +**Example** + +Query: + +``` sql +SELECT + toFloat32OrNull('42.7'), + toFloat32OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toFloat32OrNull('42.7'): 42.7 +toFloat32OrNull('abc'): á´ºáµá´¸á´¸ +``` + +**See also** + +- [`toFloat32`](#tofloat32). +- [`toFloat32OrZero`](#tofloat32orzero). +- [`toFloat32OrDefault`](#tofloat32ordefault). + +## toFloat32OrDefault + +Like [`toFloat32`](#tofloat32), this function converts an input value to a value of type [Float32](../data-types/float.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toFloat32OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `Float32` is unsuccessful. [Float32](../data-types/float.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- String representations of (U)Int8/16/32/128/256. +- Values of type Float32/64. +- String representations of Float32/64. + +Arguments for which the default value is returned: +- String representations of binary and hexadecimal values, e.g. `SELECT toFloat32OrDefault('0xc0fe', CAST('0', 'Float32'));`. + +**Returned value** + +- 32-bit Float value if successful, otherwise returns the default value if passed or `0` if not. [Float32](../data-types/float.md). + +**Example** + +Query: + +``` sql +SELECT + toFloat32OrDefault('8', CAST('0', 'Float32')), + toFloat32OrDefault('abc', CAST('0', 'Float32')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toFloat32OrDefault('8', CAST('0', 'Float32')): 8 +toFloat32OrDefault('abc', CAST('0', 'Float32')): 0 +``` + +**See also** + +- [`toFloat32`](#tofloat32). +- [`toFloat32OrZero`](#tofloat32orzero). +- [`toFloat32OrNull`](#tofloat32ornull). + +## toFloat64 + +Converts an input value to a value of type [`Float64`](../data-types/float.md). Throws an exception in case of an error. + +**Syntax** + +```sql +toFloat64(expr) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- String representations of (U)Int8/16/32/128/256. +- Values of type Float32/64, including `NaN` and `Inf`. +- String representations of type Float32/64, including `NaN` and `Inf`. + +Unsupported arguments: +- String representations of binary and hexadecimal values, e.g. `SELECT toFloat64('0xc0fe');`. + +**Returned value** + +- 64-bit floating point value. [Float64](../data-types/float.md). + +**Example** + +Query: + +```sql +SELECT + toFloat64(42.7), + toFloat64('42.7'), + toFloat64('NaN') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toFloat64(42.7): 42.7 +toFloat64('42.7'): 42.7 +toFloat64('NaN'): nan +``` + +**See also** + +- [`toFloat64OrZero`](#tofloat64orzero). +- [`toFloat64OrNull`](#tofloat64ornull). +- [`toFloat64OrDefault`](#tofloat64ordefault). + +## toFloat64OrZero + +Like [`toFloat64`](#tofloat64), this function converts an input value to a value of type [Float64](../data-types/float.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toFloat64OrZero(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256, Float32/64. + +Unsupported arguments (return `0`): +- String representations of binary and hexadecimal values, e.g. `SELECT toFloat64OrZero('0xc0fe');`. + +**Returned value** + +- 64-bit Float value if successful, otherwise `0`. [Float64](../data-types/float.md). + +**Example** + +Query: + +``` sql +SELECT + toFloat64OrZero('42.7'), + toFloat64OrZero('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toFloat64OrZero('42.7'): 42.7 +toFloat64OrZero('abc'): 0 +``` + +**See also** + +- [`toFloat64`](#tofloat64). +- [`toFloat64OrNull`](#tofloat64ornull). +- [`toFloat64OrDefault`](#tofloat64ordefault). + +## toFloat64OrNull + +Like [`toFloat64`](#tofloat64), this function converts an input value to a value of type [Float64](../data-types/float.md) but returns `NULL` in case of an error. + +**Syntax** + +```sql +toFloat64OrNull(x) +``` + +**Arguments** + +- `x` — A String representation of a number. [String](../data-types/string.md). + +Supported arguments: +- String representations of (U)Int8/16/32/128/256, Float32/64. + +Unsupported arguments (return `\N`): +- String representations of binary and hexadecimal values, e.g. `SELECT toFloat64OrNull('0xc0fe');`. + +**Returned value** + +- 64-bit Float value if successful, otherwise `\N`. [Float64](../data-types/float.md). + +**Example** + +Query: + +``` sql +SELECT + toFloat64OrNull('42.7'), + toFloat64OrNull('abc') +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toFloat64OrNull('42.7'): 42.7 +toFloat64OrNull('abc'): á´ºáµá´¸á´¸ +``` + +**See also** + +- [`toFloat64`](#tofloat64). +- [`toFloat64OrZero`](#tofloat64orzero). +- [`toFloat64OrDefault`](#tofloat64ordefault). + +## toFloat64OrDefault + +Like [`toFloat64`](#tofloat64), this function converts an input value to a value of type [Float64](../data-types/float.md) but returns the default value in case of an error. +If no `default` value is passed then `0` is returned in case of an error. + +**Syntax** + +```sql +toFloat64OrDefault(expr[, default]) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions) / [String](../data-types/string.md). +- `default` (optional) — The default value to return if parsing to type `Float64` is unsuccessful. [Float64](../data-types/float.md). + +Supported arguments: +- Values of type (U)Int8/16/32/64/128/256. +- String representations of (U)Int8/16/32/128/256. +- Values of type Float32/64. +- String representations of Float32/64. + +Arguments for which the default value is returned: +- String representations of binary and hexadecimal values, e.g. `SELECT toFloat64OrDefault('0xc0fe', CAST('0', 'Float64'));`. + +**Returned value** + +- 64-bit Float value if successful, otherwise returns the default value if passed or `0` if not. [Float64](../data-types/float.md). + +**Example** + +Query: + +``` sql +SELECT + toFloat64OrDefault('8', CAST('0', 'Float64')), + toFloat64OrDefault('abc', CAST('0', 'Float64')) +FORMAT vertical; +``` + +Result: + +```response +Row 1: +────── +toFloat64OrDefault('8', CAST('0', 'Float64')): 8 +toFloat64OrDefault('abc', CAST('0', 'Float64')): 0 +``` + +**See also** + +- [`toFloat64`](#tofloat64). +- [`toFloat64OrZero`](#tofloat64orzero). +- [`toFloat64OrNull`](#tofloat64ornull). ## toDate From 996699c78cbc4709a69ff866e487bba640fa0cfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 14:34:15 +0200 Subject: [PATCH 1712/2361] Make 01456_modify_column_type_via_add_drop_update parallelizable --- .../01456_modify_column_type_via_add_drop_update.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/01456_modify_column_type_via_add_drop_update.sql b/tests/queries/0_stateless/01456_modify_column_type_via_add_drop_update.sql index b7cbfc92c26..a2e4804f12e 100644 --- a/tests/queries/0_stateless/01456_modify_column_type_via_add_drop_update.sql +++ b/tests/queries/0_stateless/01456_modify_column_type_via_add_drop_update.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel - DROP TABLE IF EXISTS tbl; CREATE TABLE tbl(a String, b UInt32, c Float64, d Int64, e UInt8) ENGINE=MergeTree ORDER BY tuple(); INSERT INTO tbl SELECT number, number * 2, number * 3, number * 4, number * 5 FROM system.numbers LIMIT 10; From d09c82ff76186a93b0c521c67ab30d9101bc4769 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 12:33:18 +0000 Subject: [PATCH 1713/2361] Cosmetics II --- .../mergetree-family/mergetree.md | 2 +- src/Storages/Statistics/Statistics.cpp | 20 ++++++++++--------- .../Statistics/StatisticsCountMinSketch.cpp | 10 +++++----- .../Statistics/StatisticsCountMinSketch.h | 6 +++--- src/Storages/Statistics/StatisticsTDigest.cpp | 10 +++++----- src/Storages/Statistics/StatisticsTDigest.h | 6 +++--- src/Storages/Statistics/StatisticsUniq.cpp | 10 +++++----- src/Storages/Statistics/StatisticsUniq.h | 6 +++--- 8 files changed, 36 insertions(+), 34 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 7ffbd9a5bae..183b94f4641 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -1005,7 +1005,7 @@ They can be used for prewhere optimization only if we enable `set allow_statisti ## Column-level Settings {#column-level-settings} -Certain MergeTree settings can be override at column level: +Certain MergeTree settings can be overridden at column level: - `max_compress_block_size` — Maximum size of blocks of uncompressed data before compressing for writing to a table. - `min_compress_block_size` — Minimum size of blocks of uncompressed data required for compression when writing the next mark. diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 2a17101478a..771304405a6 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -89,15 +89,17 @@ Float64 IStatistics::estimateLess(const Field & /*val*/) const throw Exception(ErrorCodes::LOGICAL_ERROR, "Less-than estimation is not implemented for this type of statistics"); } -/// ------------------------------------- -/// Implementation of the estimation: -/// Note: Each statistics object supports certain types predicates natively, e.g. -/// - TDigest: '< X' (less-than predicates) -/// - Count-min sketches: '= X' (equal predicates) -/// - Uniq (HyperLogLog): 'count distinct(*)' (column cardinality) -/// If multiple statistics objects are available per column, it is sometimes also possible to combine them in a clever way. -/// For that reason, all estimation are performed in a central place (here), and we don't simply pass the predicate to the first statistics -/// object that supports it natively. +/// Notes: +/// - Statistics object usually only support estimation for certain types of predicates, e.g. +/// - TDigest: '< X' (less-than predicates) +/// - Count-min sketches: '= X' (equal predicates) +/// - Uniq (HyperLogLog): 'count distinct(*)' (column cardinality) +/// +/// If multiple statistics objects in a column support estimating a predicate, we want to try statistics in order of descending accuracy +/// (e.g. MinMax statistics are simpler than TDigest statistics and thus worse for estimating 'less' predicates). +/// +/// Sometimes, it is possible to combine multiple statistics in a clever way. For that reason, all estimation are performed in a central +/// place (here), and we don't simply pass the predicate to the first statistics object that supports it natively. Float64 ColumnStatistics::estimateLess(const Field & val) const { diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index 50d3b6e515c..dce5b39ae56 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -25,8 +25,8 @@ extern const int ILLEGAL_STATISTICS; static constexpr auto num_hashes = 7uz; static constexpr auto num_buckets = 2718uz; -StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescription & stat_, DataTypePtr data_type_) - : IStatistics(stat_) +StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescription & description, DataTypePtr data_type_) + : IStatistics(description) , sketch(num_hashes, num_buckets) , data_type(data_type_) { @@ -84,7 +84,7 @@ void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) } -void countMinSketchStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void countMinSketchStatisticsValidator(const SingleStatisticsDescription & /*description*/, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); @@ -92,9 +92,9 @@ void countMinSketchStatisticsValidator(const SingleStatisticsDescription &, Data throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } -StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) +StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type) { - return std::make_shared(stat, data_type); + return std::make_shared(description, data_type); } } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.h b/src/Storages/Statistics/StatisticsCountMinSketch.h index d10bc78a88e..af01408f2a3 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -14,7 +14,7 @@ namespace DB class StatisticsCountMinSketch : public IStatistics { public: - StatisticsCountMinSketch(const SingleStatisticsDescription & stat_, DataTypePtr data_type_); + StatisticsCountMinSketch(const SingleStatisticsDescription & description, DataTypePtr data_type_); Float64 estimateEqual(const Field & val) const override; @@ -31,8 +31,8 @@ private: }; -void countMinSketchStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr); +void countMinSketchStatisticsValidator(const SingleStatisticsDescription & description, DataTypePtr data_type); +StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type); } diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index 7c5ea443201..73ab6c84b4e 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -10,8 +10,8 @@ extern const int ILLEGAL_STATISTICS; extern const int LOGICAL_ERROR; } -StatisticsTDigest::StatisticsTDigest(const SingleStatisticsDescription & stat_) - : IStatistics(stat_) +StatisticsTDigest::StatisticsTDigest(const SingleStatisticsDescription & description) + : IStatistics(description) { } @@ -57,7 +57,7 @@ Float64 StatisticsTDigest::estimateEqual(const Field & val) const throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimating value of type {}", val.getTypeName()); } -void tdigestStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void tdigestStatisticsValidator(const SingleStatisticsDescription & /*description*/, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); @@ -65,9 +65,9 @@ void tdigestStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'tdigest' do not support type {}", data_type->getName()); } -StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr) +StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr /*data_type*/) { - return std::make_shared(stat); + return std::make_shared(description); } } diff --git a/src/Storages/Statistics/StatisticsTDigest.h b/src/Storages/Statistics/StatisticsTDigest.h index d41b0648aa4..47d6c93f64c 100644 --- a/src/Storages/Statistics/StatisticsTDigest.h +++ b/src/Storages/Statistics/StatisticsTDigest.h @@ -9,7 +9,7 @@ namespace DB class StatisticsTDigest : public IStatistics { public: - explicit StatisticsTDigest(const SingleStatisticsDescription & stat_); + explicit StatisticsTDigest(const SingleStatisticsDescription & description); void update(const ColumnPtr & column) override; @@ -23,7 +23,7 @@ private: QuantileTDigest t_digest; }; -void tdigestStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr); +void tdigestStatisticsValidator(const SingleStatisticsDescription & description, DataTypePtr data_type); +StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type); } diff --git a/src/Storages/Statistics/StatisticsUniq.cpp b/src/Storages/Statistics/StatisticsUniq.cpp index c259f09e0ae..e737f9987a5 100644 --- a/src/Storages/Statistics/StatisticsUniq.cpp +++ b/src/Storages/Statistics/StatisticsUniq.cpp @@ -11,8 +11,8 @@ namespace ErrorCodes extern const int ILLEGAL_STATISTICS; } -StatisticsUniq::StatisticsUniq(const SingleStatisticsDescription & stat_, const DataTypePtr & data_type) - : IStatistics(stat_) +StatisticsUniq::StatisticsUniq(const SingleStatisticsDescription & description, const DataTypePtr & data_type) + : IStatistics(description) { arena = std::make_unique(); AggregateFunctionProperties properties; @@ -52,7 +52,7 @@ UInt64 StatisticsUniq::estimateCardinality() const return column->getUInt(0); } -void uniqStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type) +void uniqStatisticsValidator(const SingleStatisticsDescription & /*description*/, DataTypePtr data_type) { data_type = removeNullable(data_type); data_type = removeLowCardinalityAndNullable(data_type); @@ -60,9 +60,9 @@ void uniqStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr da throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'uniq' do not support type {}", data_type->getName()); } -StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type) +StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type) { - return std::make_shared(stat, data_type); + return std::make_shared(description, data_type); } } diff --git a/src/Storages/Statistics/StatisticsUniq.h b/src/Storages/Statistics/StatisticsUniq.h index 743714e1e1b..6b511d4f496 100644 --- a/src/Storages/Statistics/StatisticsUniq.h +++ b/src/Storages/Statistics/StatisticsUniq.h @@ -10,7 +10,7 @@ namespace DB class StatisticsUniq : public IStatistics { public: - StatisticsUniq(const SingleStatisticsDescription & stat_, const DataTypePtr & data_type); + StatisticsUniq(const SingleStatisticsDescription & description, const DataTypePtr & data_type); ~StatisticsUniq() override; void update(const ColumnPtr & column) override; @@ -27,7 +27,7 @@ private: }; -void uniqStatisticsValidator(const SingleStatisticsDescription &, DataTypePtr data_type); -StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & stat, DataTypePtr data_type); +void uniqStatisticsValidator(const SingleStatisticsDescription & description, DataTypePtr data_type); +StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type); } From e2eeb6f1802090c29021977abdbc46d20f22c9a1 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 6 Aug 2024 12:54:27 +0000 Subject: [PATCH 1714/2361] Update version_date.tsv and changelogs after v24.6.3.95-stable --- docs/changelogs/v24.6.3.95-stable.md | 67 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 68 insertions(+) create mode 100644 docs/changelogs/v24.6.3.95-stable.md diff --git a/docs/changelogs/v24.6.3.95-stable.md b/docs/changelogs/v24.6.3.95-stable.md new file mode 100644 index 00000000000..b90b7346e40 --- /dev/null +++ b/docs/changelogs/v24.6.3.95-stable.md @@ -0,0 +1,67 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.6.3.95-stable (8325c920d11) FIXME as compared to v24.6.2.17-stable (5710a8b5c0c) + +#### Improvement +* Backported in [#66770](https://github.com/ClickHouse/ClickHouse/issues/66770): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) +* Backported in [#66885](https://github.com/ClickHouse/ClickHouse/issues/66885): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66693](https://github.com/ClickHouse/ClickHouse/issues/66693): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)). +* Backported in [#67816](https://github.com/ClickHouse/ClickHouse/issues/67816): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67503](https://github.com/ClickHouse/ClickHouse/issues/67503): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#67852](https://github.com/ClickHouse/ClickHouse/issues/67852): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#67838](https://github.com/ClickHouse/ClickHouse/issues/67838): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#66303](https://github.com/ClickHouse/ClickHouse/issues/66303): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)). +* Backported in [#66330](https://github.com/ClickHouse/ClickHouse/issues/66330): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#66157](https://github.com/ClickHouse/ClickHouse/issues/66157): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#66210](https://github.com/ClickHouse/ClickHouse/issues/66210): Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66456](https://github.com/ClickHouse/ClickHouse/issues/66456): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66228](https://github.com/ClickHouse/ClickHouse/issues/66228): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66183](https://github.com/ClickHouse/ClickHouse/issues/66183): Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)). +* Backported in [#66271](https://github.com/ClickHouse/ClickHouse/issues/66271): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#66682](https://github.com/ClickHouse/ClickHouse/issues/66682): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#66587](https://github.com/ClickHouse/ClickHouse/issues/66587): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)). +* Backported in [#66362](https://github.com/ClickHouse/ClickHouse/issues/66362): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66613](https://github.com/ClickHouse/ClickHouse/issues/66613): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66577](https://github.com/ClickHouse/ClickHouse/issues/66577): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66721](https://github.com/ClickHouse/ClickHouse/issues/66721): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#66670](https://github.com/ClickHouse/ClickHouse/issues/66670): Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#66952](https://github.com/ClickHouse/ClickHouse/issues/66952): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66956](https://github.com/ClickHouse/ClickHouse/issues/66956): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#66716](https://github.com/ClickHouse/ClickHouse/issues/66716): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#66759](https://github.com/ClickHouse/ClickHouse/issues/66759): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66751](https://github.com/ClickHouse/ClickHouse/issues/66751): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67635](https://github.com/ClickHouse/ClickHouse/issues/67635): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#67482](https://github.com/ClickHouse/ClickHouse/issues/67482): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)). +* Backported in [#67199](https://github.com/ClickHouse/ClickHouse/issues/67199): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#67381](https://github.com/ClickHouse/ClickHouse/issues/67381): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#67244](https://github.com/ClickHouse/ClickHouse/issues/67244): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)). +* Backported in [#67578](https://github.com/ClickHouse/ClickHouse/issues/67578): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67808](https://github.com/ClickHouse/ClickHouse/issues/67808): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Backport [#66599](https://github.com/ClickHouse/ClickHouse/issues/66599) to 24.6: Fix dropping named collection in local storage"'. [#66922](https://github.com/ClickHouse/ClickHouse/pull/66922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#66332](https://github.com/ClickHouse/ClickHouse/issues/66332): Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)). +* Backported in [#66142](https://github.com/ClickHouse/ClickHouse/issues/66142): Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#66389](https://github.com/ClickHouse/ClickHouse/issues/66389): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)). +* Backported in [#66428](https://github.com/ClickHouse/ClickHouse/issues/66428): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#66546](https://github.com/ClickHouse/ClickHouse/issues/66546): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#66861](https://github.com/ClickHouse/ClickHouse/issues/66861): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)). +* Backported in [#66877](https://github.com/ClickHouse/ClickHouse/issues/66877): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)). +* Backported in [#67061](https://github.com/ClickHouse/ClickHouse/issues/67061): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)). +* Backported in [#66940](https://github.com/ClickHouse/ClickHouse/issues/66940): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67254](https://github.com/ClickHouse/ClickHouse/issues/67254): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)). +* Backported in [#67414](https://github.com/ClickHouse/ClickHouse/issues/67414): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 75c10fa67b8..e410f31ca5a 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,5 +1,6 @@ v24.7.2.13-stable 2024-08-01 v24.7.1.2915-stable 2024-07-30 +v24.6.3.95-stable 2024-08-06 v24.6.2.17-stable 2024-07-05 v24.6.1.4423-stable 2024-07-01 v24.5.5.78-stable 2024-08-05 From dbfba5ebc4d011d13ab78f91700fae12b46ce58b Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 6 Aug 2024 12:56:52 +0000 Subject: [PATCH 1715/2361] Validate data types in ALTER ADD/MODIFY COLUMN --- src/Storages/AlterCommands.cpp | 5 +++++ ..._type_in_alter_add_modify_column.reference | 0 ...lidate_type_in_alter_add_modify_column.sql | 19 +++++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 tests/queries/0_stateless/03215_validate_type_in_alter_add_modify_column.reference create mode 100644 tests/queries/0_stateless/03215_validate_type_in_alter_add_modify_column.sql diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 7891042bb96..dfb388ffdb2 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -1316,6 +1317,8 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const throw Exception(ErrorCodes::BAD_ARGUMENTS, "Data type have to be specified for column {} to add", backQuote(column_name)); + validateDataType(command.data_type, DataTypeValidationSettings(context->getSettingsRef())); + /// FIXME: Adding a new column of type Object(JSON) is broken. /// Looks like there is something around default expression for this column (method `getDefault` is not implemented for the data type Object). /// But after ALTER TABLE ADD COLUMN we need to fill existing rows with something (exactly the default value). @@ -1395,6 +1398,8 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const /// So we don't allow to do it for now. if (command.data_type) { + validateDataType(command.data_type, DataTypeValidationSettings(context->getSettingsRef())); + const GetColumnsOptions options(GetColumnsOptions::All); const auto old_data_type = all_columns.getColumn(options, column_name).type; diff --git a/tests/queries/0_stateless/03215_validate_type_in_alter_add_modify_column.reference b/tests/queries/0_stateless/03215_validate_type_in_alter_add_modify_column.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03215_validate_type_in_alter_add_modify_column.sql b/tests/queries/0_stateless/03215_validate_type_in_alter_add_modify_column.sql new file mode 100644 index 00000000000..267bc7111f4 --- /dev/null +++ b/tests/queries/0_stateless/03215_validate_type_in_alter_add_modify_column.sql @@ -0,0 +1,19 @@ +set allow_experimental_variant_type = 0; +set allow_experimental_dynamic_type = 0; +set allow_suspicious_low_cardinality_types = 0; +set allow_suspicious_fixed_string_types = 0; + +drop table if exists test; +create table test (id UInt64) engine=MergeTree order by id; +alter table test add column bad Variant(UInt32, String); -- {serverError ILLEGAL_COLUMN} +alter table test add column bad Dynamic; -- {serverError ILLEGAL_COLUMN} +alter table test add column bad LowCardinality(UInt8); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +alter table test add column bad FixedString(10000); -- {serverError ILLEGAL_COLUMN} + +alter table test modify column id Variant(UInt32, String); -- {serverError ILLEGAL_COLUMN} +alter table test modify column id Dynamic; -- {serverError ILLEGAL_COLUMN} +alter table test modify column id LowCardinality(UInt8); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +alter table test modify column id FixedString(10000); -- {serverError ILLEGAL_COLUMN} + +drop table test; + From f3ee25036f9c5796a9018699d575f94bf75a50b5 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Thu, 2 May 2024 17:39:54 +0000 Subject: [PATCH 1716/2361] Building aarch64 builds with '-no-pie' to allow better introspection --- CMakeLists.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f796e6c4616..0d862b23e3a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -428,12 +428,14 @@ if (NOT SANITIZE) set (CMAKE_POSITION_INDEPENDENT_CODE OFF) endif() -if (OS_LINUX AND NOT (ARCH_AARCH64 OR ARCH_S390X) AND NOT SANITIZE) +if (NOT OS_ANDROID AND OS_LINUX AND NOT ARCH_S390X AND NOT SANITIZE) # Slightly more efficient code can be generated - # It's disabled for ARM because otherwise ClickHouse cannot run on Android. + # Disabled for Android, because otherwise ClickHouse cannot run on Android. set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie") +else () + message (WARNING "ClickHouse is built as PIE, system.trace_log will contain invalid addresses after server restart.") endif () if (ENABLE_TESTS) From e0e32b542560aeefa6863195d0207859ec5f338e Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 6 Aug 2024 13:26:12 +0000 Subject: [PATCH 1717/2361] Fix tests --- .../queries/0_stateless/02910_object-json-crash-add-column.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02910_object-json-crash-add-column.sql b/tests/queries/0_stateless/02910_object-json-crash-add-column.sql index b2d64be1676..bda5e958453 100644 --- a/tests/queries/0_stateless/02910_object-json-crash-add-column.sql +++ b/tests/queries/0_stateless/02910_object-json-crash-add-column.sql @@ -1,3 +1,5 @@ +SET allow_experimental_object_type=1; + DROP TABLE IF EXISTS test02910; CREATE TABLE test02910 From f37fcb776800cdb6bff4d89e5814c39bbbc46033 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 6 Aug 2024 14:32:20 +0100 Subject: [PATCH 1718/2361] impl --- src/Compression/fuzzers/CMakeLists.txt | 12 ++++++------ src/Storages/fuzzers/CMakeLists.txt | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Compression/fuzzers/CMakeLists.txt b/src/Compression/fuzzers/CMakeLists.txt index a693faecc14..311f1eb3d35 100644 --- a/src/Compression/fuzzers/CMakeLists.txt +++ b/src/Compression/fuzzers/CMakeLists.txt @@ -5,19 +5,19 @@ # If you want really small size of the resulted binary, just link with fuzz_compression and clickhouse_common_io clickhouse_add_executable (compressed_buffer_fuzzer compressed_buffer_fuzzer.cpp) -target_link_libraries (compressed_buffer_fuzzer PRIVATE dbms) +target_link_libraries (compressed_buffer_fuzzer PRIVATE dbms clickhouse_functions) clickhouse_add_executable (lz4_decompress_fuzzer lz4_decompress_fuzzer.cpp) -target_link_libraries (lz4_decompress_fuzzer PUBLIC dbms ch_contrib::lz4) +target_link_libraries (lz4_decompress_fuzzer PUBLIC dbms ch_contrib::lz4 clickhouse_functions) clickhouse_add_executable (delta_decompress_fuzzer delta_decompress_fuzzer.cpp) -target_link_libraries (delta_decompress_fuzzer PRIVATE dbms) +target_link_libraries (delta_decompress_fuzzer PRIVATE dbms clickhouse_functions) clickhouse_add_executable (double_delta_decompress_fuzzer double_delta_decompress_fuzzer.cpp) -target_link_libraries (double_delta_decompress_fuzzer PRIVATE dbms) +target_link_libraries (double_delta_decompress_fuzzer PRIVATE dbms clickhouse_functions) clickhouse_add_executable (encrypted_decompress_fuzzer encrypted_decompress_fuzzer.cpp) -target_link_libraries (encrypted_decompress_fuzzer PRIVATE dbms) +target_link_libraries (encrypted_decompress_fuzzer PRIVATE dbms clickhouse_functions) clickhouse_add_executable (gcd_decompress_fuzzer gcd_decompress_fuzzer.cpp) -target_link_libraries (gcd_decompress_fuzzer PRIVATE dbms) +target_link_libraries (gcd_decompress_fuzzer PRIVATE dbms clickhouse_functions) diff --git a/src/Storages/fuzzers/CMakeLists.txt b/src/Storages/fuzzers/CMakeLists.txt index e36fccec8df..ec56b853666 100644 --- a/src/Storages/fuzzers/CMakeLists.txt +++ b/src/Storages/fuzzers/CMakeLists.txt @@ -1,7 +1,7 @@ clickhouse_add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.cpp) # Look at comment around fuzz_compression target declaration -target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms) +target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms clickhouse_functions) clickhouse_add_executable (columns_description_fuzzer columns_description_fuzzer.cpp) target_link_libraries (columns_description_fuzzer PRIVATE dbms clickhouse_functions) From dbcc5cf1333c6a3d8f5f8f1b7dc67d038055be2e Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 6 Aug 2024 15:39:08 +0200 Subject: [PATCH 1719/2361] CI: Changelog: Critical Bug Fix to Bug Fix --- tests/ci/changelog.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py index 929f0f3523a..fe47fe53a9e 100755 --- a/tests/ci/changelog.py +++ b/tests/ci/changelog.py @@ -38,7 +38,7 @@ categories_preferred_order = ( "Experimental Feature", "Performance Improvement", "Improvement", - "Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)", + #"Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)", "Bug Fix (user-visible misbehavior in an official stable release)", "Build/Testing/Packaging Improvement", "Other", @@ -294,7 +294,8 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri r"(?i)bug\Wfix", category, ) - and "Critical Bug Fix" not in category + # Map "Critical Bug Fix" to "Bug fix" category for changelog + #and "Critical Bug Fix" not in category ): category = "Bug Fix (user-visible misbehavior in an official stable release)" From b99c6c1153224b8d17dc663939b7c621dfe77ceb Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 6 Aug 2024 14:02:24 +0000 Subject: [PATCH 1720/2361] fix reading of size column from missed Nested in compact parts --- src/Storages/MergeTree/MergeTreeReaderCompact.cpp | 12 +++++++++++- src/Storages/MergeTree/MergeTreeReaderCompact.h | 3 ++- .../MergeTree/MergeTreeReaderCompactSingleBuffer.cpp | 7 ++++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index ff0311dc1ca..d49ad61feca 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -147,7 +147,8 @@ void MergeTreeReaderCompact::readData( const NameAndTypePair & name_and_type, ColumnPtr & column, size_t rows_to_read, - const InputStreamGetter & getter) + const InputStreamGetter & getter, + ISerialization::SubstreamsCache & cache) { try { @@ -158,6 +159,13 @@ void MergeTreeReaderCompact::readData( deserialize_settings.getter = getter; deserialize_settings.avg_value_size_hint = avg_value_size_hints[name]; + auto it = cache.find(name); + if (it != cache.end() && it->second != nullptr) + { + column = it->second; + return; + } + if (name_and_type.isSubcolumn()) { const auto & type_in_storage = name_and_type.getTypeInStorage(); @@ -181,6 +189,8 @@ void MergeTreeReaderCompact::readData( serialization->deserializeBinaryBulkWithMultipleStreams(column, rows_to_read, deserialize_settings, deserialize_binary_bulk_state_map[name], nullptr); } + cache.emplace(name, column); + size_t read_rows_in_column = column->size() - column_size_before_reading; if (read_rows_in_column != rows_to_read) throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.h b/src/Storages/MergeTree/MergeTreeReaderCompact.h index 22eabd47930..1c6bd1474e3 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.h +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.h @@ -44,7 +44,8 @@ protected: const NameAndTypePair & name_and_type, ColumnPtr & column, size_t rows_to_read, - const InputStreamGetter & getter); + const InputStreamGetter & getter, + ISerialization::SubstreamsCache & cache); void readPrefix( const NameAndTypePair & name_and_type, diff --git a/src/Storages/MergeTree/MergeTreeReaderCompactSingleBuffer.cpp b/src/Storages/MergeTree/MergeTreeReaderCompactSingleBuffer.cpp index 2b2cf493bb5..004ba4db028 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompactSingleBuffer.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompactSingleBuffer.cpp @@ -22,6 +22,10 @@ try checkNumberOfColumns(num_columns); createColumnsForReading(res_columns); + /// Use cache to avoid reading the column with the same name twice. + /// It may happen if there are empty array Nested in the part. + std::unordered_map caches; + while (read_rows < max_rows_to_read) { size_t rows_to_read = data_part_info_for_read->getIndexGranularity().getMarkRows(from_mark); @@ -32,6 +36,7 @@ try continue; auto & column = res_columns[pos]; + auto & cache = caches[columns_to_read[pos].name]; stream->adjustRightMark(current_task_last_mark); /// Must go before seek. stream->seekToMarkAndColumn(from_mark, *column_positions[pos]); @@ -52,7 +57,7 @@ try }; readPrefix(columns_to_read[pos], buffer_getter, buffer_getter_for_prefix, columns_for_offsets[pos]); - readData(columns_to_read[pos], column, rows_to_read, buffer_getter); + readData(columns_to_read[pos], column, rows_to_read, buffer_getter, cache); } ++from_mark; From 99b18d31db32a077678661bd9ba84fb52ff49333 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 14 Jul 2024 15:30:38 +0200 Subject: [PATCH 1721/2361] Update gdb to 15.1 (by compiling from sources) Right now there are couple of gdb bugs that makes CI unstable: - https://sourceware.org/bugzilla/show_bug.cgi?id=29185 - https://bugzilla.redhat.com/show_bug.cgi?id=1492496 But ubuntu 22.04 does not have 14+ anywhere, the ~ubuntu-toolchain-r/test contains only gdb 13, so there is no other options except for compiling it from sources. But there also other reasons to update it - optimizations, looks like older gdb versions does not use index fully - 5.6sec vs 56sec: # 15.1 $ time command gdb -batch -ex 'disas main' clickhouse ... real 0m5.692s user 0m29.948s sys 0m1.190s # 12.1 (from ubuntu 22.04) real 0m56.709s user 0m59.307s sys 0m0.585s Also note, that we cannot compile gdb in the fasttest (that contains compiler) since some images does not includes full toolchain, for instance gdb is added in the following images: - test-util -> test-base -> lots of other images (no toolchain) - performance-comparison (no toolchain) - integration-test (no toolchain) - integration-tests-runner (no toolchain) Signed-off-by: Azat Khuzhin --- docker/images.json | 4 +++ docker/packager/binary-builder/Dockerfile | 5 ++++ docker/packager/gdb/Dockerfile | 30 +++++++++++++++++++++++ 3 files changed, 39 insertions(+) create mode 100644 docker/packager/gdb/Dockerfile diff --git a/docker/images.json b/docker/images.json index 716b76ee217..eae4ad30a79 100644 --- a/docker/images.json +++ b/docker/images.json @@ -7,6 +7,10 @@ "name": "clickhouse/cctools", "dependent": [] }, + "docker/packager/gdb": { + "name": "clickhouse/gdb", + "dependent": [] + }, "docker/test/compatibility/centos": { "name": "clickhouse/test-old-centos", "dependent": [] diff --git a/docker/packager/binary-builder/Dockerfile b/docker/packager/binary-builder/Dockerfile index 7d6acdcd856..647ab8758a5 100644 --- a/docker/packager/binary-builder/Dockerfile +++ b/docker/packager/binary-builder/Dockerfile @@ -6,6 +6,11 @@ ENV CXX=clang++-${LLVM_VERSION} # If the cctools is updated, then first build it in the CI, then update here in a different commit COPY --from=clickhouse/cctools:d9e3596e706b /cctools /cctools +# TODO: same for gdb and in other places as well +# +# NOTE: here it will add circular dependency but it will be fixed after [1] +# +# [1]: https://github.com/ClickHouse/ClickHouse/issues/66493 # Rust toolchain and libraries ENV RUSTUP_HOME=/rust/rustup diff --git a/docker/packager/gdb/Dockerfile b/docker/packager/gdb/Dockerfile new file mode 100644 index 00000000000..1b5c3bbcb2e --- /dev/null +++ b/docker/packager/gdb/Dockerfile @@ -0,0 +1,30 @@ +# docker build -t clickhouse/gdb . + +ARG FROM_TAG=latest +FROM clickhouse/fasttest:$FROM_TAG + +ENV CC=clang-${LLVM_VERSION} +ENV CXX=clang++-${LLVM_VERSION} +# ld from binutils is 2.38, which has the following error: +# +# DWARF error: invalid or unhandled FORM value: 0x23 +# +ENV LD=ld.lld-${LLVM_VERSION} + +ARG GDB_VERSION=15.1 + +# gdb dependencies +RUN apt-get update \ + && apt-get install --yes \ + libgmp-dev \ + libmpfr-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* + +RUN wget https://sourceware.org/pub/gdb/releases/gdb-$GDB_VERSION.tar.gz \ + && tar -xvf gdb-$GDB_VERSION.tar.gz \ + && cd gdb-$GDB_VERSION \ + && ./configure --prefix=/usr \ + && make -j $(nproc) \ + && make install \ + && rm -fr gdb-$GDB_VERSION gdb-$GDB_VERSION.tar.gz From 459dd1ff2db90f85d9c1f72329ff2f31117c13a7 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 14 Jul 2024 16:35:49 +0200 Subject: [PATCH 1722/2361] Add a comment of the image name for cctools Signed-off-by: Azat Khuzhin --- docker/packager/cctools/Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/packager/cctools/Dockerfile b/docker/packager/cctools/Dockerfile index d986c6a3c86..56a4a65bcc9 100644 --- a/docker/packager/cctools/Dockerfile +++ b/docker/packager/cctools/Dockerfile @@ -1,3 +1,5 @@ +# docker build -t clickhouse/cctools . + # This is a hack to significantly reduce the build time of the clickhouse/binary-builder # It's based on the assumption that we don't care of the cctools version so much # It event does not depend on the clickhouse/fasttest in the `docker/images.json` From 16fcd5b825b50ba88de8ef42b37d40eac1af5596 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 14 Jul 2024 15:48:12 +0200 Subject: [PATCH 1723/2361] Remove .gdb-index (do not give any benefit for gdb 15.1) Signed-off-by: Azat Khuzhin --- CMakeLists.txt | 8 -------- 1 file changed, 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f796e6c4616..f3c6b2abb30 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -187,14 +187,6 @@ else () set(NO_WHOLE_ARCHIVE --no-whole-archive) endif () -if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE") - # Can be lld or ld-lld or lld-13 or /path/to/lld. - if (LINKER_NAME MATCHES "lld") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index") - message (STATUS "Adding .gdb-index via --gdb-index linker option.") - endif () -endif() - if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE) AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" From 424f19d6c0d003d2a9e2c03017c7fa7dc9dd9233 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 5 Aug 2024 22:49:27 +0200 Subject: [PATCH 1724/2361] Move gdb into cctools Signed-off-by: Azat Khuzhin --- docker/images.json | 4 ---- docker/packager/cctools/Dockerfile | 24 ++++++++++++++++++++++++ docker/packager/gdb/Dockerfile | 30 ------------------------------ 3 files changed, 24 insertions(+), 34 deletions(-) delete mode 100644 docker/packager/gdb/Dockerfile diff --git a/docker/images.json b/docker/images.json index eae4ad30a79..716b76ee217 100644 --- a/docker/images.json +++ b/docker/images.json @@ -7,10 +7,6 @@ "name": "clickhouse/cctools", "dependent": [] }, - "docker/packager/gdb": { - "name": "clickhouse/gdb", - "dependent": [] - }, "docker/test/compatibility/centos": { "name": "clickhouse/test-old-centos", "dependent": [] diff --git a/docker/packager/cctools/Dockerfile b/docker/packager/cctools/Dockerfile index 56a4a65bcc9..570a42d42d5 100644 --- a/docker/packager/cctools/Dockerfile +++ b/docker/packager/cctools/Dockerfile @@ -32,5 +32,29 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \ && cd ../.. \ && rm -rf cctools-port +# +# GDB +# +# ld from binutils is 2.38, which has the following error: +# +# DWARF error: invalid or unhandled FORM value: 0x23 +# +ENV LD=ld.lld-${LLVM_VERSION} +ARG GDB_VERSION=15.1 +RUN apt-get update \ + && apt-get install --yes \ + libgmp-dev \ + libmpfr-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* +RUN wget https://sourceware.org/pub/gdb/releases/gdb-$GDB_VERSION.tar.gz \ + && tar -xvf gdb-$GDB_VERSION.tar.gz \ + && cd gdb-$GDB_VERSION \ + && ./configure --prefix=/opt/gdb \ + && make -j $(nproc) \ + && make install \ + && rm -fr gdb-$GDB_VERSION gdb-$GDB_VERSION.tar.gz + FROM scratch COPY --from=builder /cctools /cctools +COPY --from=builder /opt/gdb /opt/gdb diff --git a/docker/packager/gdb/Dockerfile b/docker/packager/gdb/Dockerfile deleted file mode 100644 index 1b5c3bbcb2e..00000000000 --- a/docker/packager/gdb/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -# docker build -t clickhouse/gdb . - -ARG FROM_TAG=latest -FROM clickhouse/fasttest:$FROM_TAG - -ENV CC=clang-${LLVM_VERSION} -ENV CXX=clang++-${LLVM_VERSION} -# ld from binutils is 2.38, which has the following error: -# -# DWARF error: invalid or unhandled FORM value: 0x23 -# -ENV LD=ld.lld-${LLVM_VERSION} - -ARG GDB_VERSION=15.1 - -# gdb dependencies -RUN apt-get update \ - && apt-get install --yes \ - libgmp-dev \ - libmpfr-dev \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* - -RUN wget https://sourceware.org/pub/gdb/releases/gdb-$GDB_VERSION.tar.gz \ - && tar -xvf gdb-$GDB_VERSION.tar.gz \ - && cd gdb-$GDB_VERSION \ - && ./configure --prefix=/usr \ - && make -j $(nproc) \ - && make install \ - && rm -fr gdb-$GDB_VERSION gdb-$GDB_VERSION.tar.gz From 5c4f2c198573ba03b793e4610f4e9854a61a5543 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 6 Aug 2024 14:33:28 +0000 Subject: [PATCH 1725/2361] Fix DateTime64 parsing after constant folding --- src/Analyzer/ConstantNode.cpp | 22 ++++++++++++++++++- ...03217_datetime64_constant_to_ast.reference | 2 ++ .../03217_datetime64_constant_to_ast.sql | 6 +++++ 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03217_datetime64_constant_to_ast.reference create mode 100644 tests/queries/0_stateless/03217_datetime64_constant_to_ast.sql diff --git a/src/Analyzer/ConstantNode.cpp b/src/Analyzer/ConstantNode.cpp index 46c1f7fb1ed..c65090f5b55 100644 --- a/src/Analyzer/ConstantNode.cpp +++ b/src/Analyzer/ConstantNode.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -162,6 +163,7 @@ QueryTreeNodePtr ConstantNode::cloneImpl() const ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const { const auto & constant_value_literal = constant_value->getValue(); + const auto & constant_value_type = constant_value->getType(); auto constant_value_ast = std::make_shared(constant_value_literal); if (!options.add_cast_for_constants) @@ -169,7 +171,25 @@ ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const if (requiresCastCall()) { - auto constant_type_name_ast = std::make_shared(constant_value->getType()->getName()); + /** Value for DateTime64 is Decimal64, which is serialized as a string literal. + * If we serialize it as is, DateTime64 would be parsed from that string literal, which can be incorrect. + * For example, DateTime64 cannot be parsed from the short value, like '1', while it's a valid Decimal64 value. + * It could also lead to ambiguous parsing because we don't know if the string literal represents a date or a Decimal64 literal. + * For this reason, we use a string literal representing a date instead of a Decimal64 literal. + */ + if (WhichDataType(constant_value_type->getTypeId()).isDateTime64()) + { + const auto * date_time_type = typeid_cast(constant_value_type.get()); + DecimalField decimal_value; + if (constant_value_literal.tryGet>(decimal_value)) + { + WriteBufferFromOwnString ostr; + writeDateTimeText(decimal_value.getValue(), date_time_type->getScale(), ostr, date_time_type->getTimeZone()); + constant_value_ast = std::make_shared(ostr.str()); + } + } + + auto constant_type_name_ast = std::make_shared(constant_value_type->getName()); return makeASTFunction("_CAST", std::move(constant_value_ast), std::move(constant_type_name_ast)); } diff --git a/tests/queries/0_stateless/03217_datetime64_constant_to_ast.reference b/tests/queries/0_stateless/03217_datetime64_constant_to_ast.reference new file mode 100644 index 00000000000..c20baa0d261 --- /dev/null +++ b/tests/queries/0_stateless/03217_datetime64_constant_to_ast.reference @@ -0,0 +1,2 @@ +1970-01-01 00:00:01.000 +1970-01-01 00:00:01.000 diff --git a/tests/queries/0_stateless/03217_datetime64_constant_to_ast.sql b/tests/queries/0_stateless/03217_datetime64_constant_to_ast.sql new file mode 100644 index 00000000000..63334a511c7 --- /dev/null +++ b/tests/queries/0_stateless/03217_datetime64_constant_to_ast.sql @@ -0,0 +1,6 @@ + +SET session_timezone = 'UTC'; + +SELECT toDateTime64('1970-01-01 00:00:01', 3) +FROM remote('127.0.0.{1,2}', system, one) +; From 815fdc43ac333a75adff646fef073ea591494d13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 6 Aug 2024 14:36:02 +0000 Subject: [PATCH 1726/2361] Revert "Merge pull request #67800 from ClickHouse/revert-66510" This reverts commit 45c4a71ccb62bac6728d0e583fd04c0fc4f45a6f, reversing changes made to bb71c1eea8e6019a5a21b6add08c2244764ddea5. --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- src/Storages/VirtualColumnUtils.cpp | 21 ++++++++++++------- src/Storages/VirtualColumnUtils.h | 10 ++++++++- ..._with_non_deterministic_function.reference | 2 ++ ..._count_with_non_deterministic_function.sql | 4 ++++ 5 files changed, 30 insertions(+), 9 deletions(-) create mode 100644 tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference create mode 100644 tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 49888596fbb..ce27ad24e10 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1146,7 +1146,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr, /*allow_non_deterministic_functions=*/ false); if (!filter_dag) return {}; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index ba1f4488005..90c2c7f93c1 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -275,7 +275,8 @@ bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( const ActionsDAG::Node * node, const Block * allowed_inputs, - ActionsDAG::Nodes & additional_nodes) + ActionsDAG::Nodes & additional_nodes, + bool allow_non_deterministic_functions) { if (node->type == ActionsDAG::ActionType::FUNCTION) { @@ -284,8 +285,14 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto & node_copy = additional_nodes.emplace_back(*node); node_copy.children.clear(); for (const auto * child : node->children) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes)) + if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) node_copy.children.push_back(child_copy); + /// Expression like (now_allowed AND allowed) is not allowed if allow_non_deterministic_functions = true. This is important for + /// trivial count optimization, otherwise we can get incorrect results. For example, if the query is + /// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply + /// trivial count. + else if (!allow_non_deterministic_functions) + return nullptr; if (node_copy.children.empty()) return nullptr; @@ -311,7 +318,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { auto & node_copy = additional_nodes.emplace_back(*node); for (auto & child : node_copy.children) - if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes); !child) + if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions); !child) return nullptr; return &node_copy; @@ -325,7 +332,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto index_hint_dag = index_hint->getActions().clone(); ActionsDAG::NodeRawConstPtrs atoms; for (const auto & output : index_hint_dag.getOutputs()) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes)) + if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) atoms.push_back(child_copy); if (!atoms.empty()) @@ -359,13 +366,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( return node; } -std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs) +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions) { if (!predicate) return {}; ActionsDAG::Nodes additional_nodes; - const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes); + const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, allow_non_deterministic_functions); if (!res) return {}; @@ -374,7 +381,7 @@ std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context) { - auto dag = splitFilterDagForAllowedInputs(predicate, &block); + auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_non_deterministic_functions=*/ false); if (dag) filterBlockWithExpression(buildFilterExpression(std::move(*dag), context), block); } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index d75dc70ae44..abf46dc23a4 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -41,7 +41,15 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context); bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); /// Extract a part of predicate that can be evaluated using only columns from input_names. -std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs); +/// When allow_non_deterministic_functions is true then even if the predicate contains non-deterministic +/// functions, we still allow to extract a part of the predicate, otherwise we return nullptr. +/// allow_non_deterministic_functions must be false when we are going to use the result to filter parts in +/// MergeTreeData::totalRowsByPartitionPredicateImp. For example, if the query is +/// `SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1` +/// The predicate will be `_partition_id = '0' AND rowNumberInBlock() = 1`, and `rowNumberInBlock()` is +/// non-deterministic. If we still extract the part `_partition_id = '0'` for filtering parts, then trivial +/// count optimization will be mistakenly applied to the query. +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions = true); /// Extract from the input stream a set of `name` column values template diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql new file mode 100644 index 00000000000..bb3269da597 --- /dev/null +++ b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql @@ -0,0 +1,4 @@ +CREATE TABLE t (p UInt8, x UInt64) Engine = MergeTree PARTITION BY p ORDER BY x; +INSERT INTO t SELECT 0, number FROM numbers(10) SETTINGS max_block_size = 100; +SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 0; +SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 1; From ea1575f60aa41d62c3d22211d8dfb5e187b2194e Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 21:14:04 +0200 Subject: [PATCH 1727/2361] tests: avoid leaving processes leftovers Previously processes cleanup on i.e. SIGINT simply did not work, because the launcher kills only processes in process group, while tests are launched with start_new_session=True for Popen(), which creates own process group. This is needed for killing process group in case of test timeout. So instead, look at the parent pid, and kill the child process groups. Also add some logging to make it more explicit which processes will be killed. Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 99 +++++++++++++++++++++++++++++++------------ 1 file changed, 71 insertions(+), 28 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 907d773337a..5e70b37e232 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -358,14 +358,78 @@ def clickhouse_execute_json( return rows +def kill_process_group(pgid): + print(f"Killing process group {pgid}") + print(f"Processes in process group {pgid}:") + print( + subprocess.check_output( + f"pgrep --pgroup {pgid} -a", shell=True, stderr=subprocess.STDOUT + ).decode("utf-8"), + end="", + ) + try: + # NOTE: this still may leave some processes, that had been + # created by timeout(1), since it also creates new process + # group. But this should not be a problem with default + # options, since the default time for each test is 10min, + # and this is way more bigger then the timeout for each + # timeout(1) invocation. + # + # But as a workaround we are sending SIGTERM first, and + # only after SIGKILL, that way timeout(1) will have an + # ability to terminate childrens (though not always since + # signals are asynchronous). + os.killpg(pgid, signal.SIGTERM) + # This may not be enough, but this is at least something + # (and anyway it is OK to spend 0.1 second more in case of + # test timeout). + sleep(0.1) + os.killpg(pgid, signal.SIGKILL) + except OSError as e: + if e.errno == ESRCH: + print(f"Got ESRCH while killing {pgid}. Ignoring.") + else: + raise + print(f"Process group {pgid} should be killed") + + +def cleanup_child_processes(pid): + pgid = os.getpgid(os.getpid()) + print(f"Child processes of {pid}:") + print( + subprocess.check_output( + f"pgrep --parent {pid} -a", shell=True, stderr=subprocess.STDOUT + ).decode("utf-8"), + end="", + ) + # Due to start_new_session=True, it is not enough to kill by PGID, we need + # to look at children processes as well. + # But we are hoping that nobody create session in the tests (though it is + # possible via timeout(), but we assuming that they will be killed by + # timeout). + processes = subprocess.check_output( + f"pgrep --parent {pid}", shell=True, stderr=subprocess.STDOUT + ) + processes = processes.decode("utf-8") + processes = processes.strip() + processes = processes.split("\n") + processes = map(lambda x: int(x.strip()), processes) + processes = list(processes) + for child in processes: + child_pgid = os.getpgid(child) + if child_pgid != pgid: + kill_process_group(child_pgid) + + # SIGKILL should not be sent, since this will kill the script itself + os.killpg(pgid, signal.SIGTERM) + + +# send signal to all processes in group to avoid hung check triggering +# (to avoid terminating clickhouse-test itself, the signal should be ignored) def stop_tests(): - # send signal to all processes in group to avoid hung check triggering - # (to avoid terminating clickhouse-test itself, the signal should be ignored) - print("Sending signals") signal.signal(signal.SIGTERM, signal.SIG_IGN) - os.killpg(os.getpgid(os.getpid()), signal.SIGTERM) - signal.signal(signal.SIGTERM, signal.SIG_DFL) - print("Sending signals DONE") + cleanup_child_processes(os.getpid()) + signal.signal(signal.SIGTERM, signal_handler) def get_db_engine(args, database_name): @@ -1258,28 +1322,7 @@ class TestCase: if proc: if proc.returncode is None: - try: - pgid = os.getpgid(proc.pid) - # NOTE: this still may leave some processes, that had been - # created by timeout(1), since it also creates new process - # group. But this should not be a problem with default - # options, since the default time for each test is 10min, - # and this is way more bigger then the timeout for each - # timeout(1) invocation. - # - # But as a workaround we are sending SIGTERM first, and - # only after SIGKILL, that way timeout(1) will have an - # ability to terminate childrens (though not always since - # signals are asynchronous). - os.killpg(pgid, signal.SIGTERM) - # This may not be enough, but this is at least something - # (and anyway it is OK to spend 0.1 second more in case of - # test timeout). - sleep(0.1) - os.killpg(pgid, signal.SIGKILL) - except OSError as e: - if e.errno != ESRCH: - raise + kill_process_group(os.getpgid(proc.pid)) if stderr: description += stderr From f9dcce6da3b9468abf5cdc27915c4c093cd231d7 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 21:23:37 +0200 Subject: [PATCH 1728/2361] tests: omit python stacktace in case of signals/server died It is simply useless and only create output that only distracts. Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 5e70b37e232..10a537f665d 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -3438,11 +3438,12 @@ def parse_args(): class Terminated(KeyboardInterrupt): - pass + def __init__(self, signal): + self.signal = signal -def signal_handler(sig, frame): - raise Terminated(f"Terminated with {sig} signal") +def signal_handler(signal, frame): + raise Terminated(signal) if __name__ == "__main__": @@ -3594,4 +3595,14 @@ if __name__ == "__main__": if args.replace_replicated_with_shared: args.s3_storage = True - main(args) + try: + main(args) + except ServerDied as e: + print(f"{e}", file=sys.stderr) + sys.exit(1) + except Terminated as e: + print(f"Terminated with {e.signal} signal", file=sys.stderr) + sys.exit(128 + e.signal) + except KeyboardInterrupt: + print("Interrupted") + sys.exit(128 + signal.SIGINT) From a478ad24a96b28a5cab77c01e77d5d510cddfabb Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 21:26:15 +0200 Subject: [PATCH 1729/2361] tests: try to catch stacktraces from client in case of test timeouts This is to catch issues like [1]. [1]: https://github.com/ClickHouse/ClickHouse/issues/67736 Signed-off-by: Azat Khuzhin --- src/Common/SignalHandlers.cpp | 1 + tests/clickhouse-test | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/Common/SignalHandlers.cpp b/src/Common/SignalHandlers.cpp index e025e49e0a3..c4358da2453 100644 --- a/src/Common/SignalHandlers.cpp +++ b/src/Common/SignalHandlers.cpp @@ -629,6 +629,7 @@ void HandledSignals::setupTerminateHandler() void HandledSignals::setupCommonDeadlySignalHandlers() { /// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime. + /// NOTE: that it is also used by clickhouse-test wrapper addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP, SIGTRAP}, signalHandler, true); #if defined(SANITIZER) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 10a537f665d..20e0ce0b150 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -368,6 +368,13 @@ def kill_process_group(pgid): end="", ) try: + # Let's try to dump stacktrace in client (useful to catch issues there) + os.killpg(pgid, signal.SIGTSTP) + # Wait some time for clickhouse utilities to gather stacktrace + if RELEASE_BUILD: + sleep(0.5) + else: + sleep(5) # NOTE: this still may leave some processes, that had been # created by timeout(1), since it also creates new process # group. But this should not be a problem with default @@ -380,9 +387,8 @@ def kill_process_group(pgid): # ability to terminate childrens (though not always since # signals are asynchronous). os.killpg(pgid, signal.SIGTERM) - # This may not be enough, but this is at least something - # (and anyway it is OK to spend 0.1 second more in case of - # test timeout). + # We need minimal delay to let processes handle SIGTERM - 0.1 (this may + # not be enough, but at least something) sleep(0.1) os.killpg(pgid, signal.SIGKILL) except OSError as e: @@ -2396,7 +2402,13 @@ class BuildFlags: POLYMORPHIC_PARTS = "polymorphic-parts" +# Release and non-sanitizer build +RELEASE_BUILD = False + + def collect_build_flags(args): + global RELEASE_BUILD + result = [] value = clickhouse_execute( @@ -2421,6 +2433,8 @@ def collect_build_flags(args): elif b"RelWithDebInfo" in value or b"Release" in value: result.append(BuildFlags.RELEASE) + RELEASE_BUILD = result == [BuildFlags.RELEASE] + value = clickhouse_execute( args, "SELECT value FROM system.settings WHERE name = 'allow_deprecated_database_ordinary'", From a6ccf1986936b3cd65dd016d7259e25eb35f35d9 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 21:55:03 +0200 Subject: [PATCH 1730/2361] tests: capture stderr/stdout/debuglog after terminating test It was simply wrong before, but now, with capturing stacktrace that can take sometime it is a must. Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 67 +++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 20e0ce0b150..a8c8b3614c8 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -1318,18 +1318,35 @@ class TestCase: return None - def process_result_impl( - self, proc, stdout: str, stderr: str, debug_log: str, total_time: float - ): + def process_result_impl(self, proc, total_time: float): + if proc: + if proc.returncode is None: + kill_process_group(os.getpgid(proc.pid)) + description = "" + debug_log = "" + if os.path.exists(self.testcase_args.debug_log_file): + with open(self.testcase_args.debug_log_file, "rb") as stream: + debug_log += self.testcase_args.debug_log_file + ":\n" + debug_log += str(stream.read(), errors="replace", encoding="utf-8") + debug_log += "\n" + + stdout = "" + if os.path.exists(self.stdout_file): + with open(self.stdout_file, "rb") as stdfd: + stdout = str(stdfd.read(), errors="replace", encoding="utf-8") + + stderr = "" + if os.path.exists(self.stderr_file): + with open(self.stderr_file, "rb") as stdfd: + stderr += str(stdfd.read(), errors="replace", encoding="utf-8") + if debug_log: debug_log = "\n".join(debug_log.splitlines()[:100]) if proc: if proc.returncode is None: - kill_process_group(os.getpgid(proc.pid)) - if stderr: description += stderr if debug_log: @@ -1658,13 +1675,6 @@ class TestCase: # Whether the test timed out will be decided later pass - debug_log = "" - if os.path.exists(self.testcase_args.debug_log_file): - with open(self.testcase_args.debug_log_file, "rb") as stream: - debug_log += self.testcase_args.debug_log_file + ":\n" - debug_log += str(stream.read(), errors="replace", encoding="utf-8") - debug_log += "\n" - total_time = (datetime.now() - start_time).total_seconds() # Normalize randomized database names in stdout, stderr files. @@ -1716,17 +1726,7 @@ class TestCase: "https://localhost:8443/", ) - stdout = "" - if os.path.exists(self.stdout_file): - with open(self.stdout_file, "rb") as stdfd: - stdout = str(stdfd.read(), errors="replace", encoding="utf-8") - - stderr = "" - if os.path.exists(self.stderr_file): - with open(self.stderr_file, "rb") as stdfd: - stderr += str(stdfd.read(), errors="replace", encoding="utf-8") - - return proc, stdout, stderr, debug_log, total_time + return proc, total_time def run(self, args, suite, client_options, server_logs_level): start_time = datetime.now() @@ -1758,14 +1758,14 @@ class TestCase: if not is_valid_utf_8(self.case_file) or ( self.reference_file and not is_valid_utf_8(self.reference_file) ): - proc, stdout, stderr, debug_log, total_time = self.run_single_test( + proc, total_time = self.run_single_test( server_logs_level, client_options ) - result = self.process_result_impl( - proc, stdout, stderr, debug_log, total_time + result = self.process_result_impl(proc, total_time) + result.check_if_need_retry( + args, result.description, result.description, self.runs_count ) - result.check_if_need_retry(args, stdout, stderr, self.runs_count) # to avoid breaking CSV parser result.description = result.description.replace("\0", "") else: @@ -1783,17 +1783,16 @@ class TestCase: ): ( proc, - stdout, - stderr, - debug_log, total_time, ) = self.run_single_test(server_logs_level, client_options) - result = self.process_result_impl( - proc, stdout, stderr, debug_log, total_time - ) + result = self.process_result_impl(proc, total_time) + result.check_if_need_retry( - args, stdout, stderr, self.runs_count + args, + result.description, + result.description, + self.runs_count, ) # to avoid breaking CSV parser result.description = result.description.replace("\0", "") From b76fb165d11a7d39b36b8f4e13355c2488ab9e58 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 3 Aug 2024 22:13:29 +0200 Subject: [PATCH 1731/2361] tests: fix pylint issue in clickhouse_execute_http() Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a8c8b3614c8..239375d7fec 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -267,7 +267,7 @@ def clickhouse_execute_http( max_http_retries=5, retry_error_codes=False, ): - if args.secure: + if base_args.secure: client = http.client.HTTPSConnection( host=base_args.tcp_host, port=base_args.http_port, timeout=timeout ) From ef7d12db6625b0a4e2cdf7ae5b6eb192e5d773af Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 5 Aug 2024 19:51:30 +0200 Subject: [PATCH 1732/2361] tests: change the process group earlier to avoid killing self Previously it was possible to have original pgid from the spawned threads, that could lead to killing the caller script and in case of CI it could be init process [1]. [1]: https://s3.amazonaws.com/clickhouse-test-reports/67737/e68c9c8d16f37f6c25739076c9b071ed97952269/stress_test__asan_/stress_test_run_21.txt Repro: $ echo "SELECT '1" > tests/queries/0_stateless/00001_select_1.sql # break the test $ cat /tmp/test.sh ./tests/clickhouse-test 0001_select --test-runs 3 --max-failures-chain 1 --no-random-settings --no-random-merge-tree-settings Before this change: $ /tmp/test.sh Using queries from '/src/ch/worktrees/clickhouse-upstream/tests/queries' directory Connecting to ClickHouse server... OK Connected to server 24.8.1.1 @ bef896ce143ea4e0464c9829de6277ba06cc1a53 mt/rename-without-lock-v2 Running 3 stateless tests (MainProcess). 00001_select_1: [ FAIL ] Reason: return code: 62 Code: 62. DB::Exception: Syntax error: failed at position 8 (''1; '): '1; . Single quoted string is not closed: ''1; '. (SYNTAX_ERROR) , result: stdout: Database: test_hz2zwymr Child processes of 13041: 13042 python3 ./tests/clickhouse-test 0001_select --test-runs 3 --max-failures-chain 1 --no-random-settings --no-random-merge-tree-settings Killing process group 13040 Processes in process group 13040: 13040 -bash 13042 python3 ./tests/clickhouse-test 0001_select --test-runs 3 --max-failures-chain 1 --no-random-settings --no-random-merge-tree-settings [2]+ Stopped /tmp/test.sh [1]$ Process group 13040 should be killed Max failures chain [2]+ Killed /tmp/test.sh After: $ /tmp/test.sh Using queries from '/src/ch/worktrees/clickhouse-upstream/tests/queries' directory Connecting to ClickHouse server... OK Connected to server 24.8.1.1 @ bef896ce143ea4e0464c9829de6277ba06cc1a53 mt/rename-without-lock-v2 Running 3 stateless tests (MainProcess). 00001_select_1: [ FAIL ] Reason: return code: 62 Code: 62. DB::Exception: Syntax error: failed at position 8 (''1; '): '1; . Single quoted string is not closed: ''1; '. (SYNTAX_ERROR) , result: stdout: Database: test_urz6rk5z Child processes of 9782: 9785 python3 ./tests/clickhouse-test 0001_select --test-runs 3 --max-failures-chain 1 --no-random-settings --no-random-merge-tree-settings Max failures chain Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 239375d7fec..dfcef86cf7e 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -3460,16 +3460,17 @@ def signal_handler(signal, frame): if __name__ == "__main__": + # Move to a new process group and kill it at exit so that we don't have any + # infinite tests processes left + # (new process group is required to avoid killing some parent processes) + os.setpgid(0, 0) + stop_time = None exit_code = multiprocessing.Value("i", 0) server_died = multiprocessing.Event() multiprocessing_manager = multiprocessing.Manager() restarted_tests = multiprocessing_manager.list() - # Move to a new process group and kill it at exit so that we don't have any - # infinite tests processes left - # (new process group is required to avoid killing some parent processes) - os.setpgid(0, 0) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGHUP, signal_handler) From 8ce23ff1136c2de03348af92a595620eee703d9a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Aug 2024 16:28:17 +0200 Subject: [PATCH 1733/2361] tests: increase delay co capture client stacktraces for sanitizers build 5 seconds is too small and not enough to print even few frames. [1]: https://s3.amazonaws.com/clickhouse-test-reports/67737/9658be5eea8351655dd3ea77b8c1d4717bac7999/stress_test__ubsan_.html Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index dfcef86cf7e..46d0f9e007e 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -374,7 +374,7 @@ def kill_process_group(pgid): if RELEASE_BUILD: sleep(0.5) else: - sleep(5) + sleep(10) # NOTE: this still may leave some processes, that had been # created by timeout(1), since it also creates new process # group. But this should not be a problem with default From 7c366a040fad26f7380adffb38c990f05f629c6d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Aug 2024 16:33:44 +0200 Subject: [PATCH 1734/2361] ci: use bash arrays to pass opts to clickhouse-test for stateless/stateful Signed-off-by: Azat Khuzhin --- docker/test/stateful/run.sh | 24 +++++++++++++++++------- docker/test/stateless/run.sh | 20 +++++++++++++++----- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 8e2f1890f89..1ad1f73395e 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -232,15 +232,25 @@ function run_tests() set +e + TEST_ARGS=( + -j 2 + --testname + --shard + --zookeeper + --check-zookeeper-session + --no-stateless + --hung-check + --print-time + "${ADDITIONAL_OPTIONS[@]}" + "$SKIP_TESTS_OPTION" + ) if [[ -n "$USE_PARALLEL_REPLICAS" ]] && [[ "$USE_PARALLEL_REPLICAS" -eq 1 ]]; then - clickhouse-test --client="clickhouse-client --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 \ - --max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'" \ - -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --no-parallel-replicas --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \ - "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt - else - clickhouse-test -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \ - "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt + TEST_ARGS+=( + --client="clickhouse-client --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 --max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'" + --no-parallel-replicas + ) fi + clickhouse-test "${TEST_ARGS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt set -e } diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index ea32df23af0..bcfc2020696 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -264,11 +264,21 @@ function run_tests() TIMEOUT=$((MAX_RUN_TIME - 800 > 8400 ? 8400 : MAX_RUN_TIME - 800)) START_TIME=${SECONDS} set +e - timeout --preserve-status --signal TERM --kill-after 60m ${TIMEOUT}s \ - clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ - --no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ - | ts '%Y-%m-%d %H:%M:%S' \ - | tee -a test_output/test_result.txt + + TEST_ARGS=( + --testname + --shard + --zookeeper + --check-zookeeper-session + --hung-check + --print-time + --no-drop-if-fail + --test-runs "$NUM_TRIES" + "${ADDITIONAL_OPTIONS[@]}" + ) + timeout --preserve-status --signal TERM --kill-after 60m ${TIMEOUT}s clickhouse-test "${TEST_ARGS[@]}" 2>&1 \ + | ts '%Y-%m-%d %H:%M:%S' \ + | tee -a test_output/test_result.txt set -e DURATION=$((SECONDS - START_TIME)) From 72bd43a309f8e327b7e252a9866dabd2496c26af Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Aug 2024 16:36:05 +0200 Subject: [PATCH 1735/2361] tests: do not capture client stacktraces in stress tests They are too uncontrollable, and likely will leave some clients [1]. [1]: https://s3.amazonaws.com/clickhouse-test-reports/67737/9658be5eea8351655dd3ea77b8c1d4717bac7999/stress_test__ubsan_.html Signed-off-by: Azat Khuzhin --- docker/test/stateful/run.sh | 1 + docker/test/stateless/run.sh | 1 + tests/clickhouse-test | 28 +++++++++++++++++++++------- 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 1ad1f73395e..3a4f0d97993 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -241,6 +241,7 @@ function run_tests() --no-stateless --hung-check --print-time + --capture-client-stacktrace "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" ) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index bcfc2020696..063195181a8 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -273,6 +273,7 @@ function run_tests() --hung-check --print-time --no-drop-if-fail + --capture-client-stacktrace --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" ) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 46d0f9e007e..88ff6753a8f 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -358,7 +358,13 @@ def clickhouse_execute_json( return rows +# Should we capture client's stacktraces via SIGTSTP +CAPTURE_CLIENT_STACKTRACE = False + + def kill_process_group(pgid): + global CAPTURE_CLIENT_STACKTRACE + print(f"Killing process group {pgid}") print(f"Processes in process group {pgid}:") print( @@ -368,13 +374,14 @@ def kill_process_group(pgid): end="", ) try: - # Let's try to dump stacktrace in client (useful to catch issues there) - os.killpg(pgid, signal.SIGTSTP) - # Wait some time for clickhouse utilities to gather stacktrace - if RELEASE_BUILD: - sleep(0.5) - else: - sleep(10) + if CAPTURE_CLIENT_STACKTRACE: + # Let's try to dump stacktrace in client (useful to catch issues there) + os.killpg(pgid, signal.SIGTSTP) + # Wait some time for clickhouse utilities to gather stacktrace + if RELEASE_BUILD: + sleep(0.5) + else: + sleep(10) # NOTE: this still may leave some processes, that had been # created by timeout(1), since it also creates new process # group. But this should not be a problem with default @@ -3446,6 +3453,11 @@ def parse_args(): default="./client.fatal.log", help="Path to file for fatal logs from client", ) + parser.add_argument( + "--capture-client-stacktrace", + action="store_true", + help="Capture stacktraces from clickhouse-client/local on errors", + ) return parser.parse_args() @@ -3488,6 +3500,8 @@ if __name__ == "__main__": ) sys.exit(1) + CAPTURE_CLIENT_STACKTRACE = args.capture_client_stacktrace + # Autodetect the directory with queries if not specified if args.queries is None: args.queries = "queries" From 2074485083e8860aafc36ac7886a54a75e144468 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Tue, 6 Aug 2024 14:25:02 +0000 Subject: [PATCH 1736/2361] Fix partial filtering in `filterBlockWithPredicate` --- src/Storages/MergeTree/MergeTreeData.cpp | 3 +- src/Storages/VirtualColumnUtils.cpp | 86 +++++++++---------- src/Storages/VirtualColumnUtils.h | 16 ++-- ...03217_read_rows_in_system_tables.reference | 10 +++ .../03217_read_rows_in_system_tables.sql | 34 ++++++++ 5 files changed, 97 insertions(+), 52 deletions(-) create mode 100644 tests/queries/0_stateless/03217_read_rows_in_system_tables.reference create mode 100644 tests/queries/0_stateless/03217_read_rows_in_system_tables.sql diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index ce27ad24e10..b24d7968b61 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -6914,7 +6914,8 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( const auto * predicate = filter_dag->getOutputs().at(0); // Generate valid expressions for filtering - VirtualColumnUtils::filterBlockWithPredicate(predicate, virtual_columns_block, query_context); + VirtualColumnUtils::filterBlockWithPredicate( + predicate, virtual_columns_block, query_context, /*allow_filtering_with_partial_predicate =*/true); rows = virtual_columns_block.rows(); part_name_column = virtual_columns_block.getByName("_part").column; diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 90c2c7f93c1..b40378250bb 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -1,51 +1,46 @@ -#include +#include + #include #include +#include +#include +#include +#include +#include #include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include -#include #include +#include #include +#include #include - -#include #include -#include #include +#include +#include #include #include - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include +#include +#include #include #include +#include #include -#include #include - -#include -#include #include -#include "Functions/FunctionsLogical.h" -#include "Functions/IFunction.h" -#include "Functions/IFunctionAdaptors.h" -#include "Functions/indexHint.h" -#include -#include -#include -#include namespace DB @@ -273,10 +268,7 @@ bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) } static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( - const ActionsDAG::Node * node, - const Block * allowed_inputs, - ActionsDAG::Nodes & additional_nodes, - bool allow_non_deterministic_functions) + const ActionsDAG::Node * node, const Block * allowed_inputs, ActionsDAG::Nodes & additional_nodes, bool allow_partial_result) { if (node->type == ActionsDAG::ActionType::FUNCTION) { @@ -285,13 +277,14 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto & node_copy = additional_nodes.emplace_back(*node); node_copy.children.clear(); for (const auto * child : node->children) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) + if (const auto * child_copy + = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_partial_result)) node_copy.children.push_back(child_copy); - /// Expression like (now_allowed AND allowed) is not allowed if allow_non_deterministic_functions = true. This is important for + /// Expression like (now_allowed AND allowed) is not allowed if allow_partial_result = true. This is important for /// trivial count optimization, otherwise we can get incorrect results. For example, if the query is /// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply /// trivial count. - else if (!allow_non_deterministic_functions) + else if (!allow_partial_result) return nullptr; if (node_copy.children.empty()) @@ -300,7 +293,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( if (node_copy.children.size() == 1) { const ActionsDAG::Node * res = node_copy.children.front(); - /// Expression like (not_allowed AND 256) can't be resuced to (and(256)) because AND requires + /// Expression like (not_allowed AND 256) can't be reduced to (and(256)) because AND requires /// at least two arguments; also it can't be reduced to (256) because result type is different. if (!res->result_type->equals(*node->result_type)) { @@ -318,7 +311,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { auto & node_copy = additional_nodes.emplace_back(*node); for (auto & child : node_copy.children) - if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions); !child) + if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_partial_result); !child) return nullptr; return &node_copy; @@ -332,7 +325,8 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto index_hint_dag = index_hint->getActions().clone(); ActionsDAG::NodeRawConstPtrs atoms; for (const auto & output : index_hint_dag.getOutputs()) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, allow_non_deterministic_functions)) + if (const auto * child_copy + = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, allow_partial_result)) atoms.push_back(child_copy); if (!atoms.empty()) @@ -366,22 +360,24 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( return node; } -std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions) +std::optional +splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_partial_result) { if (!predicate) return {}; ActionsDAG::Nodes additional_nodes; - const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, allow_non_deterministic_functions); + const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, allow_partial_result); if (!res) return {}; return ActionsDAG::cloneSubDAG({res}, true); } -void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context) +void filterBlockWithPredicate( + const ActionsDAG::Node * predicate, Block & block, ContextPtr context, bool allow_filtering_with_partial_predicate) { - auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_non_deterministic_functions=*/ false); + auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_partial_result=*/allow_filtering_with_partial_predicate); if (dag) filterBlockWithExpression(buildFilterExpression(std::move(*dag), context), block); } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index abf46dc23a4..f76cf2cad76 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -26,9 +26,13 @@ namespace VirtualColumnUtils /// /// Otherwise calling filter*() outside applyFilters() will throw "Not-ready Set is passed" /// if there are subqueries. +/// +/// Similar to filterBlockWithExpression(buildFilterExpression(splitFilterDagForAllowedInputs(...)))./// Similar to filterBlockWithQuery, but uses ActionsDAG as a predicate. +/// Basically it is filterBlockWithDAG(splitFilterDagForAllowedInputs). +/// If allow_filtering_with_partial_predicate is true, then the filtering will be done even if some part of the predicate +/// cannot be evaluated using the columns from the block. +void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context, bool allow_filtering_with_partial_predicate = true); -/// Similar to filterBlockWithExpression(buildFilterExpression(splitFilterDagForAllowedInputs(...))). -void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context); /// Just filters block. Block should contain all the required columns. ExpressionActionsPtr buildFilterExpression(ActionsDAG dag, ContextPtr context); @@ -41,15 +45,15 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context); bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); /// Extract a part of predicate that can be evaluated using only columns from input_names. -/// When allow_non_deterministic_functions is true then even if the predicate contains non-deterministic -/// functions, we still allow to extract a part of the predicate, otherwise we return nullptr. -/// allow_non_deterministic_functions must be false when we are going to use the result to filter parts in +/// When allow_partial_result is false, then the result will be empty if any part of if cannot be evaluated deterministically +/// on the given inputs. +/// allow_partial_result must be false when we are going to use the result to filter parts in /// MergeTreeData::totalRowsByPartitionPredicateImp. For example, if the query is /// `SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1` /// The predicate will be `_partition_id = '0' AND rowNumberInBlock() = 1`, and `rowNumberInBlock()` is /// non-deterministic. If we still extract the part `_partition_id = '0'` for filtering parts, then trivial /// count optimization will be mistakenly applied to the query. -std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions = true); +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_partial_result = true); /// Extract from the input stream a set of `name` column values template diff --git a/tests/queries/0_stateless/03217_read_rows_in_system_tables.reference b/tests/queries/0_stateless/03217_read_rows_in_system_tables.reference new file mode 100644 index 00000000000..b21ead49b1e --- /dev/null +++ b/tests/queries/0_stateless/03217_read_rows_in_system_tables.reference @@ -0,0 +1,10 @@ +information_schema tables +default test_replica_1 r1 +Expression ((Project names + Projection)) + Aggregating + Expression (Before GROUP BY) + ReadFromMerge + Filter (( + ( + ))) + ReadFromMergeTree (default.test_replica_1) +1 1 +1 1 diff --git a/tests/queries/0_stateless/03217_read_rows_in_system_tables.sql b/tests/queries/0_stateless/03217_read_rows_in_system_tables.sql new file mode 100644 index 00000000000..3bea04ccccf --- /dev/null +++ b/tests/queries/0_stateless/03217_read_rows_in_system_tables.sql @@ -0,0 +1,34 @@ +SELECT database, table FROM system.tables WHERE database = 'information_schema' AND table = 'tables'; + +-- To verify StorageSystemReplicas applies the filter properly +CREATE TABLE test_replica_1(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217/test_replica', 'r1') + ORDER BY x; +CREATE TABLE test_replica_2(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217/test_replica', 'r2') + ORDER BY x; + +SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = 'test_replica_1' AND replica_name = 'r1'; + + +-- To verify StorageMerge +CREATE TABLE all_replicas (x UInt32) + ENGINE = Merge(currentDatabase(), 'test_replica_*'); + +INSERT INTO test_replica_1 SELECT number AS x FROM numbers(10); +SYSTEM SYNC REPLICA test_replica_2; +-- If the filter not applied, then the plan will show both replicas +EXPLAIN SELECT _table, count() FROM all_replicas WHERE _table = 'test_replica_1' AND x >= 0 GROUP BY _table; + +SYSTEM FLUSH LOGS; +-- argMin-argMax make the test repeatable + +-- StorageSystemTables +SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 + AND query LIKE '%SELECT database, table FROM system.tables WHERE database = \'information_schema\' AND table = \'tables\';' + AND type = 'QueryFinish'; + +-- StorageSystemReplicas +SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 + AND query LIKE '%SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = \'test_replica_1\' AND replica_name = \'r1\';' + AND type = 'QueryFinish'; From f20cfdb54ea9ee577f9747e1e2d99af2c0e9e250 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 14:49:20 +0000 Subject: [PATCH 1737/2361] Cosmetics III --- src/Storages/Statistics/Statistics.cpp | 2 +- src/Storages/Statistics/Statistics.h | 6 +++--- src/Storages/Statistics/StatisticsCountMinSketch.cpp | 12 ++++++------ src/Storages/Statistics/StatisticsCountMinSketch.h | 6 +++--- src/Storages/Statistics/StatisticsTDigest.cpp | 10 +++++----- src/Storages/Statistics/StatisticsTDigest.h | 4 ++-- src/Storages/Statistics/StatisticsUniq.cpp | 10 +++++----- src/Storages/Statistics/StatisticsUniq.h | 4 ++-- 8 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 771304405a6..e3f9fcc8192 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -224,7 +224,7 @@ MergeTreeStatisticsFactory & MergeTreeStatisticsFactory::instance() return instance; } -void MergeTreeStatisticsFactory::validate(const ColumnStatisticsDescription & stats, DataTypePtr data_type) const +void MergeTreeStatisticsFactory::validate(const ColumnStatisticsDescription & stats, const DataTypePtr & data_type) const { for (const auto & [type, desc] : stats.types_to_desc) { diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index 16f0c67eabd..c6a45e68aa6 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -87,10 +87,10 @@ class MergeTreeStatisticsFactory : private boost::noncopyable public: static MergeTreeStatisticsFactory & instance(); - void validate(const ColumnStatisticsDescription & stats, DataTypePtr data_type) const; + void validate(const ColumnStatisticsDescription & stats, const DataTypePtr & data_type) const; - using Validator = std::function; - using Creator = std::function; + using Validator = std::function; + using Creator = std::function; ColumnStatisticsPtr get(const ColumnStatisticsDescription & stats) const; ColumnsStatistics getMany(const ColumnsDescription & columns) const; diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index dce5b39ae56..0dc01f5fcf0 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -25,7 +25,7 @@ extern const int ILLEGAL_STATISTICS; static constexpr auto num_hashes = 7uz; static constexpr auto num_buckets = 2718uz; -StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescription & description, DataTypePtr data_type_) +StatisticsCountMinSketch::StatisticsCountMinSketch(const SingleStatisticsDescription & description, const DataTypePtr & data_type_) : IStatistics(description) , sketch(num_hashes, num_buckets) , data_type(data_type_) @@ -84,15 +84,15 @@ void StatisticsCountMinSketch::deserialize(ReadBuffer & buf) } -void countMinSketchStatisticsValidator(const SingleStatisticsDescription & /*description*/, DataTypePtr data_type) +void countMinSketchStatisticsValidator(const SingleStatisticsDescription & /*description*/, const DataTypePtr & data_type) { - data_type = removeNullable(data_type); - data_type = removeLowCardinalityAndNullable(data_type); - if (!data_type->isValueRepresentedByNumber() && !isStringOrFixedString(data_type)) + DataTypePtr inner_data_type = removeNullable(data_type); + inner_data_type = removeLowCardinalityAndNullable(inner_data_type); + if (!inner_data_type->isValueRepresentedByNumber() && !isStringOrFixedString(inner_data_type)) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); } -StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type) +StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type) { return std::make_shared(description, data_type); } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.h b/src/Storages/Statistics/StatisticsCountMinSketch.h index af01408f2a3..d1de1a3aea5 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.h +++ b/src/Storages/Statistics/StatisticsCountMinSketch.h @@ -14,7 +14,7 @@ namespace DB class StatisticsCountMinSketch : public IStatistics { public: - StatisticsCountMinSketch(const SingleStatisticsDescription & description, DataTypePtr data_type_); + StatisticsCountMinSketch(const SingleStatisticsDescription & description, const DataTypePtr & data_type_); Float64 estimateEqual(const Field & val) const override; @@ -31,8 +31,8 @@ private: }; -void countMinSketchStatisticsValidator(const SingleStatisticsDescription & description, DataTypePtr data_type); -StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type); +void countMinSketchStatisticsValidator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); +StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); } diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index 73ab6c84b4e..1cf92fea24b 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -57,15 +57,15 @@ Float64 StatisticsTDigest::estimateEqual(const Field & val) const throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimating value of type {}", val.getTypeName()); } -void tdigestStatisticsValidator(const SingleStatisticsDescription & /*description*/, DataTypePtr data_type) +void tdigestStatisticsValidator(const SingleStatisticsDescription & /*description*/, const DataTypePtr & data_type) { - data_type = removeNullable(data_type); - data_type = removeLowCardinalityAndNullable(data_type); - if (!data_type->isValueRepresentedByNumber()) + DataTypePtr inner_data_type = removeNullable(data_type); + inner_data_type = removeLowCardinalityAndNullable(inner_data_type); + if (!inner_data_type->isValueRepresentedByNumber()) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'tdigest' do not support type {}", data_type->getName()); } -StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr /*data_type*/) +StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & /*data_type*/) { return std::make_shared(description); } diff --git a/src/Storages/Statistics/StatisticsTDigest.h b/src/Storages/Statistics/StatisticsTDigest.h index 47d6c93f64c..2b37799d07b 100644 --- a/src/Storages/Statistics/StatisticsTDigest.h +++ b/src/Storages/Statistics/StatisticsTDigest.h @@ -23,7 +23,7 @@ private: QuantileTDigest t_digest; }; -void tdigestStatisticsValidator(const SingleStatisticsDescription & description, DataTypePtr data_type); -StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type); +void tdigestStatisticsValidator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); +StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); } diff --git a/src/Storages/Statistics/StatisticsUniq.cpp b/src/Storages/Statistics/StatisticsUniq.cpp index e737f9987a5..07311b5b86d 100644 --- a/src/Storages/Statistics/StatisticsUniq.cpp +++ b/src/Storages/Statistics/StatisticsUniq.cpp @@ -52,15 +52,15 @@ UInt64 StatisticsUniq::estimateCardinality() const return column->getUInt(0); } -void uniqStatisticsValidator(const SingleStatisticsDescription & /*description*/, DataTypePtr data_type) +void uniqStatisticsValidator(const SingleStatisticsDescription & /*description*/, const DataTypePtr & data_type) { - data_type = removeNullable(data_type); - data_type = removeLowCardinalityAndNullable(data_type); - if (!data_type->isValueRepresentedByNumber()) + DataTypePtr inner_data_type = removeNullable(data_type); + inner_data_type = removeLowCardinalityAndNullable(inner_data_type); + if (!inner_data_type->isValueRepresentedByNumber()) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'uniq' do not support type {}", data_type->getName()); } -StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type) +StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type) { return std::make_shared(description, data_type); } diff --git a/src/Storages/Statistics/StatisticsUniq.h b/src/Storages/Statistics/StatisticsUniq.h index 6b511d4f496..1fdcab8bd89 100644 --- a/src/Storages/Statistics/StatisticsUniq.h +++ b/src/Storages/Statistics/StatisticsUniq.h @@ -27,7 +27,7 @@ private: }; -void uniqStatisticsValidator(const SingleStatisticsDescription & description, DataTypePtr data_type); -StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & description, DataTypePtr data_type); +void uniqStatisticsValidator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); +StatisticsPtr uniqStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); } From f2591bd1a6401337de0e3bf8d5c1fa93fd1b9394 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 6 Aug 2024 16:55:04 +0200 Subject: [PATCH 1738/2361] CI: CiBuddy with channel dispatcher --- tests/ci/ci_buddy.py | 102 ++++++++++++++++++++++++++++--------- tests/ci/create_release.py | 4 +- 2 files changed, 81 insertions(+), 25 deletions(-) diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index f0e73e925fe..07f318207a4 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -1,7 +1,7 @@ import argparse import json import os -from typing import Union, Dict +from typing import Union, Dict, List import boto3 import requests @@ -9,20 +9,44 @@ from botocore.exceptions import ClientError from pr_info import PRInfo from ci_config import CI +from ci_utils import WithIter + + +class Channels(metaclass=WithIter): + # Channel names must match json keys in ParameterStore + ALERTS = "alerts-channel" + INFO = "info-channel" + DRY_RUN = "dry-ryn-channel" + DEFAULT = "default" class CIBuddy: + Channels = Channels _HEADERS = {"Content-Type": "application/json"} def __init__(self, dry_run=False): self.repo = os.getenv("GITHUB_REPOSITORY", "") self.dry_run = dry_run res = self._get_webhooks() - self.test_channel = "" - self.dev_ci_channel = "" + self.channels = {} if res: - self.test_channel = json.loads(res)["test_channel"] - self.dev_ci_channel = json.loads(res)["ci_channel"] + channels = json.loads(res) + for channel in Channels: + if channel in channels: + self.channels[channel] = channels[channel] + + for channel in Channels: + if channel not in self.channels: + if Channels.DEFAULT in self.channels: + print( + f"ERROR: missing config for channel [{channel}] - will use default channel instead" + ) + self.channels[channel] = self.channels[Channels.DEFAULT] + else: + print( + f"ERROR: missing config for channel [{channel}] - will disable notification" + ) + self.channels[channel] = "" self.job_name = os.getenv("CHECK_NAME", "unknown") pr_info = PRInfo() self.pr_number = pr_info.number @@ -63,22 +87,33 @@ class CIBuddy: return json_string - def post(self, message, dry_run=None): - if dry_run is None: - dry_run = self.dry_run - print(f"Posting slack message, dry_run [{dry_run}]") - if dry_run: - url = self.test_channel + def post(self, message: str, channels: List[str]) -> None: + print(f"Posting slack message, dry_run [{self.dry_run}]") + if self.dry_run: + urls = [self.channels[Channels.DRY_RUN]] else: - url = self.dev_ci_channel + urls = [] + for channel in channels: + url = self.channels[channel] + if url: + urls.append(url) + else: + print(f"WARNING: no channel config for [{channel}] - skip") data = {"text": message} try: - requests.post(url, headers=self._HEADERS, data=json.dumps(data), timeout=10) + for url in urls: + requests.post( + url, headers=self._HEADERS, data=json.dumps(data), timeout=10 + ) except Exception as e: print(f"ERROR: Failed to post message, ex {e}") def _post_formatted( - self, title: str, body: Union[Dict, str], with_wf_link: bool + self, + title: str, + body: Union[Dict, str], + with_wf_link: bool, + channels: Union[List[str], str], ) -> None: message = title if isinstance(body, dict): @@ -96,31 +131,49 @@ class CIBuddy: run_id = os.getenv("GITHUB_RUN_ID", "") if with_wf_link and run_id: message += f" *workflow*: \n" - self.post(message) + self.post( + message, channels=[channels] if isinstance(channels, str) else channels + ) def post_info( - self, title: str, body: Union[Dict, str], with_wf_link: bool = True + self, + title: str, + body: Union[Dict, str], + with_wf_link: bool = True, + channels: Union[List[str], str] = Channels.INFO, ) -> None: title_extended = f":white_circle: *{title}*\n\n" - self._post_formatted(title_extended, body, with_wf_link) + self._post_formatted(title_extended, body, with_wf_link, channels=channels) def post_done( - self, title: str, body: Union[Dict, str], with_wf_link: bool = True + self, + title: str, + body: Union[Dict, str], + with_wf_link: bool = True, + channels: Union[List[str], str] = Channels.INFO, ) -> None: title_extended = f":white_check_mark: *{title}*\n\n" - self._post_formatted(title_extended, body, with_wf_link) + self._post_formatted(title_extended, body, with_wf_link, channels=channels) def post_warning( - self, title: str, body: Union[Dict, str], with_wf_link: bool = True + self, + title: str, + body: Union[Dict, str], + with_wf_link: bool = True, + channels: Union[List[str], str] = Channels.ALERTS, ) -> None: title_extended = f":warning: *{title}*\n\n" - self._post_formatted(title_extended, body, with_wf_link) + self._post_formatted(title_extended, body, with_wf_link, channels=channels) def post_critical( - self, title: str, body: Union[Dict, str], with_wf_link: bool = True + self, + title: str, + body: Union[Dict, str], + with_wf_link: bool = True, + channels: Union[List[str], str] = Channels.ALERTS, ) -> None: title_extended = f":black_circle: *{title}*\n\n" - self._post_formatted(title_extended, body, with_wf_link) + self._post_formatted(title_extended, body, with_wf_link, channels=channels) def post_job_error( self, @@ -129,6 +182,7 @@ class CIBuddy: with_instance_info: bool = True, with_wf_link: bool = True, critical: bool = False, + channel: Union[List[str], str] = Channels.ALERTS, ) -> None: instance_id, instance_type = "unknown", "unknown" if with_instance_info: @@ -159,7 +213,7 @@ class CIBuddy: run_id = os.getenv("GITHUB_RUN_ID", "") if with_wf_link and run_id: message += f" *workflow*: \n" - self.post(message) + self.post(message, channels=[channel] if isinstance(channel, str) else channel) def parse_args(): diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index b4e08f29dbe..27eba273ce0 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -821,7 +821,9 @@ if __name__ == "__main__": else: title = "Failed: " + title CIBuddy(dry_run=args.dry_run).post_critical( - title, dataclasses.asdict(release_info) + title, + dataclasses.asdict(release_info), + channels=[CIBuddy.Channels.ALERTS, CIBuddy.Channels.INFO], ) if args.set_progress_started: From 72ead6e8432daa1e643a5b0cc8559a4ff4d9efd0 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 6 Aug 2024 14:56:42 +0000 Subject: [PATCH 1739/2361] Cleanup. --- src/Storages/IStorage.h | 6 ++-- src/Storages/MergeTree/MutateTask.cpp | 34 +++++++++---------- ...61_lightweight_delete_projection.reference | 4 +-- .../03161_lightweight_delete_projection.sql | 4 +-- 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index d2cdc5af34f..0477a08b0d2 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -259,12 +259,12 @@ public: /// Return true if there is at least one part containing lightweight deleted mask. virtual bool hasLightweightDeletedMask() const { return false; } - /// Return true if storage has any projection. - virtual bool hasProjection() const { return false; } - /// Return true if storage can execute lightweight delete mutations. virtual bool supportsLightweightDelete() const { return false; } + /// Return true if storage has any projection. + virtual bool hasProjection() const { return false; } + /// Return true if storage can execute 'DELETE FROM' mutations. This is different from lightweight delete /// because those are internally translated into 'ALTER UDPATE' mutations. virtual bool supportsDelete() const { return false; } diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 8b5829eb058..3d9f49c9a7a 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -659,10 +659,8 @@ static NameSet collectFilesToSkip( const Block & updated_header, const std::set & indices_to_recalc, const String & mrk_extension, - const std::set & projections_to_recalc, - const std::set & stats_to_recalc, - const StorageMetadataPtr & metadata_snapshot, - bool skip_all_projections) + const std::set & projections_to_skip, + const std::set & stats_to_recalc) { NameSet files_to_skip = source_part->getFileNamesWithoutChecksums(); @@ -686,16 +684,8 @@ static NameSet collectFilesToSkip( } } - if (skip_all_projections) - { - for (const auto & projection : metadata_snapshot->getProjections()) - files_to_skip.insert(projection.getDirectoryName()); - } - else - { - for (const auto & projection : projections_to_recalc) - files_to_skip.insert(projection->getDirectoryName()); - } + for (const auto & projection : projections_to_skip) + files_to_skip.insert(projection->getDirectoryName()); for (const auto & stat : stats_to_recalc) files_to_skip.insert(stat->getFileName() + STATS_FILE_SUFFIX); @@ -2325,6 +2315,9 @@ bool MutateTask::prepare() lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP || lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW; + std::set projections_to_skip_container; + auto * projections_to_skip = &projections_to_skip_container; + bool should_create_projections = !(lightweight_delete_mode && lightweight_delete_drops_projections); /// Under lightweight delete mode, if option is drop, projections_to_recalc should be empty. if (should_create_projections) @@ -2333,6 +2326,13 @@ bool MutateTask::prepare() ctx->source_part, ctx->metadata_snapshot, ctx->materialized_projections); + + projections_to_skip = &ctx->projections_to_recalc; + } + else + { + for (const auto & projection : ctx->metadata_snapshot->getProjections()) + projections_to_skip->insert(&projection); } ctx->stats_to_recalc = MutationHelpers::getStatisticsToRecalculate(ctx->metadata_snapshot, ctx->materialized_statistics); @@ -2343,10 +2343,8 @@ bool MutateTask::prepare() ctx->updated_header, ctx->indices_to_recalc, ctx->mrk_extension, - ctx->projections_to_recalc, - ctx->stats_to_recalc, - ctx->metadata_snapshot, - !should_create_projections); + *projections_to_skip, + ctx->stats_to_recalc); ctx->files_to_rename = MutationHelpers::collectFilesForRenames( ctx->source_part, diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index eef0c5a41b5..8edf541c2a0 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -40,7 +40,7 @@ all_3_3_0_4 SELECT name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1) AND parent_name like 'all_3_3%'; p1 all_3_3_0_4 p2 all_3_3_0_4 wide part @@ -85,6 +85,6 @@ all_3_3_0_4 SELECT name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1) AND parent_name like 'all_3_3%'; p1 all_3_3_0_4 p2 all_3_3_0_4 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 618f3ac0cb8..0b05326e2c1 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -67,7 +67,7 @@ WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active SELECT name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1) AND parent_name like 'all_3_3%'; -- { echoOff } @@ -136,7 +136,7 @@ WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = SELECT name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1) AND parent_name like 'all_3_3%'; -- { echoOff } From e1e298f14e34a519d59844417e31b7a532b0a157 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 6 Aug 2024 14:56:54 +0000 Subject: [PATCH 1740/2361] Automatic style fix --- tests/ci/changelog.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py index fe47fe53a9e..39e426945d3 100755 --- a/tests/ci/changelog.py +++ b/tests/ci/changelog.py @@ -38,7 +38,7 @@ categories_preferred_order = ( "Experimental Feature", "Performance Improvement", "Improvement", - #"Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)", + # "Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)", "Bug Fix (user-visible misbehavior in an official stable release)", "Build/Testing/Packaging Improvement", "Other", @@ -295,7 +295,7 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri category, ) # Map "Critical Bug Fix" to "Bug fix" category for changelog - #and "Critical Bug Fix" not in category + # and "Critical Bug Fix" not in category ): category = "Bug Fix (user-visible misbehavior in an official stable release)" From bdfaffa9d74b0405ea619330215e6c71f0b6976a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Aug 2024 16:59:23 +0200 Subject: [PATCH 1741/2361] tests: make test_distributed_inter_server_secret idempotent Signed-off-by: Azat Khuzhin --- .../test.py | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 36d7e044f1c..d74cf97e5c6 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -46,6 +46,10 @@ users = pytest.mark.parametrize( ) +def generate_query_id(): + return str(uuid.uuid4()) + + def bootstrap(): for n in list(cluster.instances.values()): n.query("DROP TABLE IF EXISTS data") @@ -268,13 +272,13 @@ def test_secure_insert_buffer_async(): n1.query("SYSTEM RELOAD CONFIG") # ensure that SELECT creates new connection (we need separate table for # this, so that separate distributed pool will be used) - query_id = uuid.uuid4().hex + query_id = generate_query_id() n1.query("SELECT * FROM dist_secure_from_buffer", user="ro", query_id=query_id) assert n1.contains_in_log( "{" + query_id + "} Connection (n2:9000): Connecting." ) - query_id = uuid.uuid4().hex + query_id = generate_query_id() n1.query( "INSERT INTO dist_secure_buffer SELECT * FROM numbers(2)", query_id=query_id ) @@ -331,7 +335,7 @@ def test_secure_disagree_insert(): @users def test_user_insecure_cluster(user, password): - id_ = "query-dist_insecure-" + user + id_ = "query-dist_insecure-" + user + "-" + generate_query_id() n1.query(f"SELECT *, '{id_}' FROM dist_insecure", user=user, password=password) assert get_query_user_info(n1, id_)[0] == [ user, @@ -342,7 +346,7 @@ def test_user_insecure_cluster(user, password): @users def test_user_secure_cluster(user, password): - id_ = "query-dist_secure-" + user + id_ = "query-dist_secure-" + user + "-" + generate_query_id() n1.query(f"SELECT *, '{id_}' FROM dist_secure", user=user, password=password) assert get_query_user_info(n1, id_)[0] == [user, user] assert get_query_user_info(n2, id_)[0] == [user, user] @@ -350,7 +354,7 @@ def test_user_secure_cluster(user, password): @users def test_per_user_inline_settings_insecure_cluster(user, password): - id_ = "query-ddl-settings-dist_insecure-" + user + id_ = "query-ddl-settings-dist_insecure-" + user + "-" + generate_query_id() n1.query( f""" SELECT *, '{id_}' FROM dist_insecure @@ -367,7 +371,7 @@ def test_per_user_inline_settings_insecure_cluster(user, password): @users def test_per_user_inline_settings_secure_cluster(user, password): - id_ = "query-ddl-settings-dist_secure-" + user + id_ = "query-ddl-settings-dist_secure-" + user + "-" + generate_query_id() n1.query( f""" SELECT *, '{id_}' FROM dist_secure @@ -386,7 +390,7 @@ def test_per_user_inline_settings_secure_cluster(user, password): @users def test_per_user_protocol_settings_insecure_cluster(user, password): - id_ = "query-protocol-settings-dist_insecure-" + user + id_ = "query-protocol-settings-dist_insecure-" + user + "-" + generate_query_id() n1.query( f"SELECT *, '{id_}' FROM dist_insecure", user=user, @@ -402,7 +406,7 @@ def test_per_user_protocol_settings_insecure_cluster(user, password): @users def test_per_user_protocol_settings_secure_cluster(user, password): - id_ = "query-protocol-settings-dist_secure-" + user + id_ = "query-protocol-settings-dist_secure-" + user + "-" + generate_query_id() n1.query( f"SELECT *, '{id_}' FROM dist_secure", user=user, @@ -436,7 +440,7 @@ def test_secure_cluster_distributed_over_distributed_different_users_remote(): def test_secure_cluster_distributed_over_distributed_different_users_cluster(): - id_ = "cluster-user" + id_ = "cluster-user" + "-" + generate_query_id() n1.query( f"SELECT *, '{id_}' FROM cluster(secure, currentDatabase(), dist_secure)", user="nopass", From df2675fad0d1bcb79c8a2d7edd0c08b1da49a945 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Thu, 18 Jul 2024 12:32:16 +0200 Subject: [PATCH 1742/2361] [resubmit] add replication lag and recovery time metrics --- src/Databases/DatabaseReplicated.cpp | 45 ++++++++++---- src/Databases/DatabaseReplicated.h | 12 +++- src/Databases/DatabaseReplicatedWorker.cpp | 21 +++++++ src/Databases/DatabaseReplicatedWorker.h | 5 ++ src/Storages/System/StorageSystemClusters.cpp | 49 +++++++++++++-- src/Storages/System/StorageSystemClusters.h | 2 +- .../test_recovery_time_metric/__init__.py | 0 .../configs/config.xml | 41 +++++++++++++ .../test_recovery_time_metric/test.py | 61 +++++++++++++++++++ .../02117_show_create_table_system.reference | 2 + .../03206_replication_lag_metric.reference | 4 ++ .../03206_replication_lag_metric.sql | 11 ++++ 12 files changed, 234 insertions(+), 19 deletions(-) create mode 100644 tests/integration/test_recovery_time_metric/__init__.py create mode 100644 tests/integration/test_recovery_time_metric/configs/config.xml create mode 100644 tests/integration/test_recovery_time_metric/test.py create mode 100644 tests/queries/0_stateless/03206_replication_lag_metric.reference create mode 100644 tests/queries/0_stateless/03206_replication_lag_metric.sql diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index f127ccbc224..213c94d4d94 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -338,9 +339,12 @@ ClusterPtr DatabaseReplicated::getClusterImpl(bool all_groups) const return std::make_shared(getContext()->getSettingsRef(), shards, params); } -std::vector DatabaseReplicated::tryGetAreReplicasActive(const ClusterPtr & cluster_) const +ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) const { Strings paths; + + paths.emplace_back(fs::path(zookeeper_path) / "max_log_ptr"); + const auto & addresses_with_failover = cluster_->getShardsAddresses(); const auto & shards_info = cluster_->getShardsInfo(); for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) @@ -349,31 +353,50 @@ std::vector DatabaseReplicated::tryGetAreReplicasActive(const ClusterPtr { String full_name = getFullReplicaName(replica.database_shard_name, replica.database_replica_name); paths.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "active"); + paths.emplace_back(fs::path(zookeeper_path) / "replicas" / full_name / "log_ptr"); } } try { auto current_zookeeper = getZooKeeper(); - auto res = current_zookeeper->exists(paths); + auto zk_res = current_zookeeper->tryGet(paths); - std::vector statuses; - statuses.resize(paths.size()); + auto max_log_ptr_zk = zk_res[0]; + if (max_log_ptr_zk.error != Coordination::Error::ZOK) + throw Coordination::Exception(max_log_ptr_zk.error); - for (size_t i = 0; i < res.size(); ++i) - if (res[i].error == Coordination::Error::ZOK) - statuses[i] = 1; + UInt32 max_log_ptr = parse(max_log_ptr_zk.data); - return statuses; - } - catch (...) + ReplicasInfo replicas_info; + replicas_info.resize((zk_res.size() - 1) / 2); + + size_t global_replica_index = 0; + for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) + { + for (const auto & replica : addresses_with_failover[shard_index]) + { + auto replica_active = zk_res[2 * global_replica_index + 1]; + auto replica_log_ptr = zk_res[2 * global_replica_index + 2]; + + replicas_info[global_replica_index] = ReplicaInfo{ + .is_active = replica_active.error == Coordination::Error::ZOK, + .replication_lag = replica_log_ptr.error != Coordination::Error::ZNONODE ? std::optional(max_log_ptr - parse(replica_log_ptr.data)) : std::nullopt, + .recovery_time = replica.is_local && ddl_worker ? ddl_worker->getCurrentInitializationDurationMs() : 0, + }; + + ++global_replica_index; + } + } + + return replicas_info; + } catch (...) { tryLogCurrentException(log); return {}; } } - void DatabaseReplicated::fillClusterAuthInfo(String collection_name, const Poco::Util::AbstractConfiguration & config_ref) { const auto & config_prefix = fmt::format("named_collections.{}", collection_name); diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index 27ab262d1f1..db683be8f36 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -1,5 +1,7 @@ #pragma once +#include + #include #include #include @@ -17,6 +19,14 @@ using ZooKeeperPtr = std::shared_ptr; class Cluster; using ClusterPtr = std::shared_ptr; +struct ReplicaInfo +{ + bool is_active; + std::optional replication_lag; + UInt64 recovery_time; +}; +using ReplicasInfo = std::vector; + class DatabaseReplicated : public DatabaseAtomic { public: @@ -84,7 +94,7 @@ public: static void dropReplica(DatabaseReplicated * database, const String & database_zookeeper_path, const String & shard, const String & replica, bool throw_if_noop); - std::vector tryGetAreReplicasActive(const ClusterPtr & cluster_) const; + ReplicasInfo tryGetReplicasInfo(const ClusterPtr & cluster_) const; void renameDatabase(ContextPtr query_context, const String & new_name) override; diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index 1ef88dc03bc..4e7408aa96e 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -32,6 +32,12 @@ DatabaseReplicatedDDLWorker::DatabaseReplicatedDDLWorker(DatabaseReplicated * db bool DatabaseReplicatedDDLWorker::initializeMainThread() { + { + std::lock_guard lock(initialization_duration_timer_mutex); + initialization_duration_timer.emplace(); + initialization_duration_timer->start(); + } + while (!stop_flag) { try @@ -69,6 +75,10 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() initializeReplication(); initialized = true; + { + std::lock_guard lock(initialization_duration_timer_mutex); + initialization_duration_timer.reset(); + } return true; } catch (...) @@ -78,6 +88,11 @@ bool DatabaseReplicatedDDLWorker::initializeMainThread() } } + { + std::lock_guard lock(initialization_duration_timer_mutex); + initialization_duration_timer.reset(); + } + return false; } @@ -459,4 +474,10 @@ UInt32 DatabaseReplicatedDDLWorker::getLogPointer() const return max_id.load(); } +UInt64 DatabaseReplicatedDDLWorker::getCurrentInitializationDurationMs() const +{ + std::lock_guard lock(initialization_duration_timer_mutex); + return initialization_duration_timer ? initialization_duration_timer->elapsedMilliseconds() : 0; +} + } diff --git a/src/Databases/DatabaseReplicatedWorker.h b/src/Databases/DatabaseReplicatedWorker.h index 41edf2221b8..2309c831839 100644 --- a/src/Databases/DatabaseReplicatedWorker.h +++ b/src/Databases/DatabaseReplicatedWorker.h @@ -36,6 +36,8 @@ public: DatabaseReplicated * const database, bool committed = false); /// NOLINT UInt32 getLogPointer() const; + + UInt64 getCurrentInitializationDurationMs() const; private: bool initializeMainThread() override; void initializeReplication(); @@ -56,6 +58,9 @@ private: ZooKeeperPtr active_node_holder_zookeeper; /// It will remove "active" node when database is detached zkutil::EphemeralNodeHolderPtr active_node_holder; + + std::optional initialization_duration_timer; + mutable std::mutex initialization_duration_timer_mutex; }; } diff --git a/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp index 9c5c07ae49f..db1955c2e99 100644 --- a/src/Storages/System/StorageSystemClusters.cpp +++ b/src/Storages/System/StorageSystemClusters.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -31,6 +32,8 @@ ColumnsDescription StorageSystemClusters::getColumnsDescription() {"database_shard_name", std::make_shared(), "The name of the `Replicated` database shard (for clusters that belong to a `Replicated` database)."}, {"database_replica_name", std::make_shared(), "The name of the `Replicated` database replica (for clusters that belong to a `Replicated` database)."}, {"is_active", std::make_shared(std::make_shared()), "The status of the Replicated database replica (for clusters that belong to a Replicated database): 1 means 'replica is online', 0 means 'replica is offline', NULL means 'unknown'."}, + {"replication_lag", std::make_shared(std::make_shared()), "The replication lag of the `Replicated` database replica (for clusters that belong to a Replicated database)."}, + {"recovery_time", std::make_shared(std::make_shared()), "The recovery time of the `Replicated` database replica (for clusters that belong to a Replicated database), in milliseconds."}, }; description.setAliases({ @@ -67,6 +70,10 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const std const auto & shards_info = cluster->getShardsInfo(); const auto & addresses_with_failover = cluster->getShardsAddresses(); + ReplicasInfo replicas_info; + if (replicated) + replicas_info = replicated->tryGetReplicasInfo(name_and_cluster.second); + size_t replica_idx = 0; for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index) { @@ -114,17 +121,47 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const std res_columns[res_index++]->insert(address.database_shard_name); if (columns_mask[src_index++]) res_columns[res_index++]->insert(address.database_replica_name); + if (columns_mask[src_index++]) { - std::vector is_active; - if (replicated) - is_active = replicated->tryGetAreReplicasActive(name_and_cluster.second); - - if (is_active.empty()) + if (replicas_info.empty()) res_columns[res_index++]->insertDefault(); else - res_columns[res_index++]->insert(is_active[replica_idx++]); + { + const auto & replica_info = replicas_info[replica_idx]; + res_columns[res_index++]->insert(replica_info.is_active); + } } + + if (columns_mask[src_index++]) + { + if (replicas_info.empty()) + res_columns[res_index++]->insertDefault(); + else + { + const auto & replica_info = replicas_info[replica_idx]; + if (replica_info.replication_lag != std::nullopt) + res_columns[res_index++]->insert(*replica_info.replication_lag); + else + res_columns[res_index++]->insertDefault(); + } + } + + if (columns_mask[src_index++]) + { + if (replicas_info.empty()) + res_columns[res_index++]->insertDefault(); + else + { + const auto & replica_info = replicas_info[replica_idx]; + if (replica_info.recovery_time != 0) + res_columns[res_index++]->insert(replica_info.recovery_time); + else + res_columns[res_index++]->insertDefault(); + } + } + + ++replica_idx; } } } diff --git a/src/Storages/System/StorageSystemClusters.h b/src/Storages/System/StorageSystemClusters.h index f6adb902f43..a5f6d551ca1 100644 --- a/src/Storages/System/StorageSystemClusters.h +++ b/src/Storages/System/StorageSystemClusters.h @@ -1,10 +1,10 @@ #pragma once +#include #include #include #include - namespace DB { diff --git a/tests/integration/test_recovery_time_metric/__init__.py b/tests/integration/test_recovery_time_metric/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_recovery_time_metric/configs/config.xml b/tests/integration/test_recovery_time_metric/configs/config.xml new file mode 100644 index 00000000000..bad9b1fa9ea --- /dev/null +++ b/tests/integration/test_recovery_time_metric/configs/config.xml @@ -0,0 +1,41 @@ + + 9000 + + + + + + + + + default + + + + + + 2181 + 1 + /var/lib/clickhouse/coordination/log + /var/lib/clickhouse/coordination/snapshots + + 20000 + + + + 1 + localhost + 9444 + + + + + + + localhost + 2181 + + 20000 + + + diff --git a/tests/integration/test_recovery_time_metric/test.py b/tests/integration/test_recovery_time_metric/test.py new file mode 100644 index 00000000000..6fcf2fad423 --- /dev/null +++ b/tests/integration/test_recovery_time_metric/test.py @@ -0,0 +1,61 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=["configs/config.xml"], + stay_alive=True, +) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_recovery_time_metric(start_cluster): + node.query( + """ + DROP DATABASE IF EXISTS rdb; + CREATE DATABASE rdb + ENGINE = Replicated('/test/test_recovery_time_metric', 'shard1', 'replica1') + """ + ) + + node.query( + """ + DROP TABLE IF EXISTS rdb.t; + CREATE TABLE rdb.t + ( + `x` UInt32 + ) + ENGINE = MergeTree + ORDER BY x + """ + ) + + node.exec_in_container(["bash", "-c", "rm /var/lib/clickhouse/metadata/rdb/t.sql"]) + + node.restart_clickhouse() + + ret = int( + node.query( + """ + SELECT recovery_time + FROM system.clusters + WHERE cluster = 'rdb' + """ + ).strip() + ) + assert ret > 0 + + node.query( + """ + DROP DATABASE rdb + """ + ) diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index cfae4fee6c2..32e8b2f4312 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -52,6 +52,8 @@ CREATE TABLE system.clusters `database_shard_name` String, `database_replica_name` String, `is_active` Nullable(UInt8), + `replication_lag` Nullable(UInt32), + `recovery_time` Nullable(UInt64), `name` String ALIAS cluster ) ENGINE = SystemClusters diff --git a/tests/queries/0_stateless/03206_replication_lag_metric.reference b/tests/queries/0_stateless/03206_replication_lag_metric.reference new file mode 100644 index 00000000000..02f4a7264b1 --- /dev/null +++ b/tests/queries/0_stateless/03206_replication_lag_metric.reference @@ -0,0 +1,4 @@ +0 +2 +0 +2 diff --git a/tests/queries/0_stateless/03206_replication_lag_metric.sql b/tests/queries/0_stateless/03206_replication_lag_metric.sql new file mode 100644 index 00000000000..998c332a11c --- /dev/null +++ b/tests/queries/0_stateless/03206_replication_lag_metric.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel + +CREATE DATABASE rdb1 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica1'); +CREATE DATABASE rdb2 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica2'); + +SET distributed_ddl_task_timeout = 0; +CREATE TABLE rdb1.t (id UInt32) ENGINE = ReplicatedMergeTree ORDER BY id; +SELECT replication_lag FROM system.clusters WHERE cluster IN ('rdb1', 'rdb2') ORDER BY cluster ASC, replica_num ASC; + +DROP DATABASE rdb1; +DROP DATABASE rdb2; From 86e3b35f24a449b63172015a0f768434f9f203c6 Mon Sep 17 00:00:00 2001 From: Jacob Reckhard Date: Tue, 6 Aug 2024 09:07:17 -0600 Subject: [PATCH 1743/2361] spelling fixes --- src/DataTypes/DataTypeCustomGeo.cpp | 2 +- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/DataTypes/DataTypeCustomGeo.cpp b/src/DataTypes/DataTypeCustomGeo.cpp index d72787647c3..f90788ec403 100644 --- a/src/DataTypes/DataTypeCustomGeo.cpp +++ b/src/DataTypes/DataTypeCustomGeo.cpp @@ -24,7 +24,7 @@ void registerDataTypeDomainGeo(DataTypeFactory & factory) std::make_unique(std::make_unique())); }); - // Custom type for mulitple lines stored as Array(LineString) + // Custom type for multiple lines stored as Array(LineString) factory.registerSimpleDataTypeCustom("MultiLineString", [] { return std::make_pair(DataTypeFactory::instance().get("Array(LineString)"), diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 3d7e77f213d..71f5efca893 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -2086,6 +2086,7 @@ multiSearchFirstPositionUTF multibyte multidirectory multiline +multilinestring multiplyDecimal multipolygon multisearchany From 47953da08dbc791d521b4e722c08af08b9072b89 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 6 Aug 2024 15:08:40 +0000 Subject: [PATCH 1744/2361] fix reading of size column from missed Nested in compact parts --- src/Storages/MergeTree/MergeTreeReaderCompact.cpp | 2 +- .../MergeTree/MergeTreeReaderCompactSingleBuffer.cpp | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index d49ad61feca..69dc2e4b2bb 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -189,7 +189,7 @@ void MergeTreeReaderCompact::readData( serialization->deserializeBinaryBulkWithMultipleStreams(column, rows_to_read, deserialize_settings, deserialize_binary_bulk_state_map[name], nullptr); } - cache.emplace(name, column); + cache[name] = column; size_t read_rows_in_column = column->size() - column_size_before_reading; if (read_rows_in_column != rows_to_read) diff --git a/src/Storages/MergeTree/MergeTreeReaderCompactSingleBuffer.cpp b/src/Storages/MergeTree/MergeTreeReaderCompactSingleBuffer.cpp index 004ba4db028..649bcce1188 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompactSingleBuffer.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompactSingleBuffer.cpp @@ -22,21 +22,20 @@ try checkNumberOfColumns(num_columns); createColumnsForReading(res_columns); - /// Use cache to avoid reading the column with the same name twice. - /// It may happen if there are empty array Nested in the part. - std::unordered_map caches; - while (read_rows < max_rows_to_read) { size_t rows_to_read = data_part_info_for_read->getIndexGranularity().getMarkRows(from_mark); + /// Use cache to avoid reading the column with the same name twice. + /// It may happen if there are empty array Nested in the part. + ISerialization::SubstreamsCache cache; + for (size_t pos = 0; pos < num_columns; ++pos) { if (!res_columns[pos]) continue; auto & column = res_columns[pos]; - auto & cache = caches[columns_to_read[pos].name]; stream->adjustRightMark(current_task_last_mark); /// Must go before seek. stream->seekToMarkAndColumn(from_mark, *column_positions[pos]); From 0bb8d1a8ede4906a1cdc75af2dbf1e190ff355fc Mon Sep 17 00:00:00 2001 From: Blargian Date: Tue, 6 Aug 2024 17:10:34 +0200 Subject: [PATCH 1745/2361] optimize formatting by 3% --- .../functions/type-conversion-functions.md | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 77dd1628fe4..a2b6e496319 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -3063,7 +3063,7 @@ Supported arguments: - Values of type (U)Int8/16/32/64/128/256. - String representations of (U)Int8/16/32/128/256. - Values of type Float32/64, including `NaN` and `Inf`. -- String representations of Float32/64, including `NaN` and `Inf`. +- String representations of Float32/64, including `NaN` and `Inf` (case-insensitive). Unsupported arguments: - String representations of binary and hexadecimal values, e.g. `SELECT toFloat32('0xc0fe');`. @@ -3081,7 +3081,7 @@ SELECT toFloat32(42.7), toFloat32('42.7'), toFloat32('NaN') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3128,11 +3128,11 @@ Unsupported arguments (return `0`): Query: -``` sql +```sql SELECT toFloat32OrZero('42.7'), toFloat32OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3178,11 +3178,11 @@ Unsupported arguments (return `\N`): Query: -``` sql +```sql SELECT toFloat32OrNull('42.7'), toFloat32OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3219,8 +3219,8 @@ toFloat32OrDefault(expr[, default]) Supported arguments: - Values of type (U)Int8/16/32/64/128/256. - String representations of (U)Int8/16/32/128/256. -- Values of type Float32/64. -- String representations of Float32/64. +- Values of type Float32/64, including `NaN` and `Inf`. +- String representations of Float32/64, including `NaN` and `Inf` (case-insensitive). Arguments for which the default value is returned: - String representations of binary and hexadecimal values, e.g. `SELECT toFloat32OrDefault('0xc0fe', CAST('0', 'Float32'));`. @@ -3233,11 +3233,11 @@ Arguments for which the default value is returned: Query: -``` sql +```sql SELECT toFloat32OrDefault('8', CAST('0', 'Float32')), toFloat32OrDefault('abc', CAST('0', 'Float32')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3273,7 +3273,7 @@ Supported arguments: - Values of type (U)Int8/16/32/64/128/256. - String representations of (U)Int8/16/32/128/256. - Values of type Float32/64, including `NaN` and `Inf`. -- String representations of type Float32/64, including `NaN` and `Inf`. +- String representations of type Float32/64, including `NaN` and `Inf` (case-insensitive). Unsupported arguments: - String representations of binary and hexadecimal values, e.g. `SELECT toFloat64('0xc0fe');`. @@ -3291,7 +3291,7 @@ SELECT toFloat64(42.7), toFloat64('42.7'), toFloat64('NaN') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3338,11 +3338,11 @@ Unsupported arguments (return `0`): Query: -``` sql +```sql SELECT toFloat64OrZero('42.7'), toFloat64OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3388,11 +3388,11 @@ Unsupported arguments (return `\N`): Query: -``` sql +```sql SELECT toFloat64OrNull('42.7'), toFloat64OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3429,8 +3429,8 @@ toFloat64OrDefault(expr[, default]) Supported arguments: - Values of type (U)Int8/16/32/64/128/256. - String representations of (U)Int8/16/32/128/256. -- Values of type Float32/64. -- String representations of Float32/64. +- Values of type Float32/64, including `NaN` and `Inf`. +- String representations of Float32/64, including `NaN` and `Inf` (case-insensitive). Arguments for which the default value is returned: - String representations of binary and hexadecimal values, e.g. `SELECT toFloat64OrDefault('0xc0fe', CAST('0', 'Float64'));`. @@ -3443,11 +3443,11 @@ Arguments for which the default value is returned: Query: -``` sql +```sql SELECT toFloat64OrDefault('8', CAST('0', 'Float64')), toFloat64OrDefault('abc', CAST('0', 'Float64')) -FORMAT vertical; +FORMAT Vertical; ``` Result: From 5390d1b108956907bf4b038a56fdb2ed8e584308 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Aug 2024 16:12:44 +0200 Subject: [PATCH 1746/2361] docker: use self-compiled gdb 15 from cctools Signed-off-by: Azat Khuzhin --- docker/packager/binary-builder/Dockerfile | 5 ----- docker/test/fasttest/Dockerfile | 2 +- docker/test/integration/base/Dockerfile | 4 +++- docker/test/integration/runner/Dockerfile | 3 ++- docker/test/performance-comparison/Dockerfile | 4 +++- docker/test/util/Dockerfile | 4 +++- 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/docker/packager/binary-builder/Dockerfile b/docker/packager/binary-builder/Dockerfile index 647ab8758a5..7d6acdcd856 100644 --- a/docker/packager/binary-builder/Dockerfile +++ b/docker/packager/binary-builder/Dockerfile @@ -6,11 +6,6 @@ ENV CXX=clang++-${LLVM_VERSION} # If the cctools is updated, then first build it in the CI, then update here in a different commit COPY --from=clickhouse/cctools:d9e3596e706b /cctools /cctools -# TODO: same for gdb and in other places as well -# -# NOTE: here it will add circular dependency but it will be fixed after [1] -# -# [1]: https://github.com/ClickHouse/ClickHouse/issues/66493 # Rust toolchain and libraries ENV RUSTUP_HOME=/rust/rustup diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 4cac2ee6135..5d311c673a4 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -83,7 +83,7 @@ RUN arch=${TARGETARCH:-amd64} \ # Give suid to gdb to grant it attach permissions # chmod 777 to make the container user independent -RUN chmod u+s /usr/bin/gdb \ +RUN chmod u+s /opt/gdb/bin/gdb \ && mkdir -p /var/lib/clickhouse \ && chmod 777 /var/lib/clickhouse diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index 469251f648c..dc4d470a262 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update \ curl \ default-jre \ g++ \ - gdb \ iproute2 \ krb5-user \ libicu-dev \ @@ -73,3 +72,6 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \ ENV TZ=Etc/UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb +ENV PATH="/opt/gdb/bin:${PATH}" diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index d250b746e7d..d62009f1be3 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -30,7 +30,6 @@ RUN apt-get update \ luajit \ libssl-dev \ libcurl4-openssl-dev \ - gdb \ default-jdk \ software-properties-common \ libkrb5-dev \ @@ -87,6 +86,8 @@ COPY modprobe.sh /usr/local/bin/modprobe COPY dockerd-entrypoint.sh /usr/local/bin/ COPY misc/ /misc/ +COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb +ENV PATH="/opt/gdb/bin:${PATH}" # Same options as in test/base/Dockerfile # (in case you need to override them in tests) diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index c68a39f6f70..f7139275282 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -9,7 +9,6 @@ RUN apt-get update \ curl \ dmidecode \ g++ \ - gdb \ git \ gnuplot \ imagemagick \ @@ -42,6 +41,9 @@ RUN pip3 --no-cache-dir install -r requirements.txt COPY run.sh / +COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb +ENV PATH="/opt/gdb/bin:${PATH}" + CMD ["bash", "/run.sh"] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index dc928ba7195..8b949ed95db 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -44,7 +44,6 @@ RUN apt-get update \ bash \ bsdmainutils \ build-essential \ - gdb \ git \ gperf \ moreutils \ @@ -58,3 +57,6 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* COPY process_functional_tests_result.py / + +COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb +ENV PATH="/opt/gdb/bin:${PATH}" From 40e763dd8b79b20a56b9a5cbff9571d7d9ed8869 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Tue, 6 Aug 2024 17:17:02 +0200 Subject: [PATCH 1747/2361] more logs to debug logical error from async inserts --- src/Storages/MergeTree/MergeTreeDataWriter.cpp | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index ee3ac4207cc..cb02f1cf5f2 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -240,6 +240,14 @@ std::vector scatterAsyncInsertInfoBySelector(AsyncInsertInfo ++offset_idx; } } + if (offset_idx != async_insert_info->offsets.size()) + { + LOG_ERROR( + getLogger("MergeTreeDataWriter"), + "ChunkInfo of async insert offsets doesn't match the selector size {}. Offsets content is ({})", + selector.size(), fmt::join(async_insert_info->offsets.begin(), async_insert_info->offsets.end(), ",")); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected error for async deduplicated insert, please check error logs"); + } return result; } @@ -317,8 +325,10 @@ BlocksWithPartition MergeTreeDataWriter::splitBlockIntoParts( { if (async_insert_info_with_partition[i] == nullptr) { - LOG_ERROR(getLogger("MergeTreeDataWriter"), "The {}th element in async_insert_info_with_partition is nullptr. There are totally {} partitions in the insert. Selector content is {}", - i, partitions_count, fmt::join(selector.begin(), selector.end(), ",")); + LOG_ERROR( + getLogger("MergeTreeDataWriter"), + "The {}th element in async_insert_info_with_partition is nullptr. There are totally {} partitions in the insert. Selector content is ({}). Offsets content is ({})", + i, partitions_count, fmt::join(selector.begin(), selector.end(), ","), fmt::join(async_insert_info->offsets.begin(), async_insert_info->offsets.end(), ",")); throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected error for async deduplicated insert, please check error logs"); } result[i].offsets = std::move(async_insert_info_with_partition[i]->offsets); From 3ebc3852f404a1ce392e9b66b8356fa9da701097 Mon Sep 17 00:00:00 2001 From: MikhailBurdukov Date: Mon, 22 Jul 2024 15:28:29 +0000 Subject: [PATCH 1748/2361] Allow filtering ip addresses by ip family in DNS resolver --- programs/server/Server.cpp | 3 + src/Common/DNSResolver.cpp | 93 ++++++-- src/Common/DNSResolver.h | 9 + src/Core/ServerSettings.h | 2 + src/Core/SettingsChangesHistory.cpp | 261 +++++++++++++++++++++++ tests/integration/test_dns_cache/test.py | 67 +++++- 6 files changed, 418 insertions(+), 17 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 7800ee9ff00..3126f65ef09 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1768,6 +1768,9 @@ try new_server_settings.http_connections_store_limit, }); + DNSResolver::instance().setFilterSettings(new_server_settings.dns_allow_resolve_names_to_ipv4, new_server_settings.dns_allow_resolve_names_to_ipv6); + + if (global_context->isServerCompletelyStarted()) CannotAllocateThreadFaultInjector::setFaultProbability(new_server_settings.cannot_allocate_thread_fault_injection_probability); diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 4b577a251af..051e6e63091 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include "DNSPTRResolverProvider.h" @@ -139,12 +141,6 @@ DNSResolver::IPAddresses resolveIPAddressImpl(const std::string & host) return addresses; } -DNSResolver::IPAddresses resolveIPAddressWithCache(CacheBase & cache, const std::string & host) -{ - auto [result, _ ] = cache.getOrSet(host, [&host]() {return std::make_shared(resolveIPAddressImpl(host), std::chrono::system_clock::now());}); - return result->addresses; -} - std::unordered_set reverseResolveImpl(const Poco::Net::IPAddress & address) { auto ptr_resolver = DB::DNSPTRResolverProvider::get(); @@ -198,21 +194,90 @@ struct DNSResolver::Impl std::atomic disable_cache{false}; }; +struct DNSResolver::AddressFilter +{ + struct DNSFilterSettings + { + std::atomic dns_allow_resolve_names_to_ipv4{true}; + std::atomic dns_allow_resolve_names_to_ipv6{true}; + }; -DNSResolver::DNSResolver() : impl(std::make_unique()), log(getLogger("DNSResolver")) {} + void performAddressFiltering(DNSResolver::IPAddresses & addresses) + { + bool dns_resolve_ipv4 = settings.dns_allow_resolve_names_to_ipv4; + bool dns_resolve_ipv6 = settings.dns_allow_resolve_names_to_ipv6; + + if (dns_resolve_ipv4 && dns_resolve_ipv6) + { + return; + } + if (!dns_resolve_ipv4 && !dns_resolve_ipv6) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "DNS can't resolve any address, because dns_resolve_ipv6_interfaces and dns_resolve_ipv4_interfaces both are disabled"); + } + addresses.erase( + std::remove_if(addresses.begin(), addresses.end(), + [dns_resolve_ipv6, dns_resolve_ipv4](const Poco::Net::IPAddress& address) + { + return (address.family() == Poco::Net::IPAddress::IPv6 && !dns_resolve_ipv6) + || (address.family() == Poco::Net::IPAddress::IPv4 && !dns_resolve_ipv4); + }), + addresses.end() + ); + } + + void setSettings(bool dns_allow_resolve_names_to_ipv4_, bool dns_allow_resolve_names_to_ipv6_) + { + settings.dns_allow_resolve_names_to_ipv4 = dns_allow_resolve_names_to_ipv4_; + settings.dns_allow_resolve_names_to_ipv6 = dns_allow_resolve_names_to_ipv6_; + } + + DNSFilterSettings settings; +}; + + +DNSResolver::DNSResolver() + : impl(std::make_unique()) + , addressFilter(std::make_unique()) + , log(getLogger("DNSResolver")) {} + + +DNSResolver::IPAddresses DNSResolver::getResolvedIPAdressessWithFiltering(const std::string & host) +{ + auto addresses = resolveIPAddressImpl(host); + addressFilter->performAddressFiltering(addresses); + + if (addresses.empty()) + { + ProfileEvents::increment(ProfileEvents::DNSError); + throw DB::NetException(ErrorCodes::DNS_ERROR, "After filtering there are no resolved address for host({}).", host); + } + return addresses; +} + +DNSResolver::IPAddresses DNSResolver::resolveIPAddressWithCache(const std::string & host) +{ + auto [result, _ ] = impl->cache_host.getOrSet(host, [&host, this]() {return std::make_shared(getResolvedIPAdressessWithFiltering(host), std::chrono::system_clock::now());}); + return result->addresses; +} Poco::Net::IPAddress DNSResolver::resolveHost(const std::string & host) { return pickAddress(resolveHostAll(host)); // random order -> random pick } +void DNSResolver::setFilterSettings(bool dns_allow_resolve_names_to_ipv4_, bool dns_allow_resolve_names_to_ipv6_) +{ + addressFilter->setSettings(dns_allow_resolve_names_to_ipv4_, dns_allow_resolve_names_to_ipv6_); +} + DNSResolver::IPAddresses DNSResolver::resolveHostAllInOriginOrder(const std::string & host) { if (impl->disable_cache) - return resolveIPAddressImpl(host); + return getResolvedIPAdressessWithFiltering(host); addToNewHosts(host); - return resolveIPAddressWithCache(impl->cache_host, host); + return resolveIPAddressWithCache(host); } DNSResolver::IPAddresses DNSResolver::resolveHostAll(const std::string & host) @@ -232,7 +297,7 @@ Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host_an splitHostAndPort(host_and_port, host, port); addToNewHosts(host); - return Poco::Net::SocketAddress(pickAddress(resolveIPAddressWithCache(impl->cache_host, host)), port); + return Poco::Net::SocketAddress(pickAddress(resolveIPAddressWithCache(host)), port); } Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host, UInt16 port) @@ -241,7 +306,7 @@ Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host, U return Poco::Net::SocketAddress(host, port); addToNewHosts(host); - return Poco::Net::SocketAddress(pickAddress(resolveIPAddressWithCache(impl->cache_host, host)), port); + return Poco::Net::SocketAddress(pickAddress(resolveIPAddressWithCache(host)), port); } std::vector DNSResolver::resolveAddressList(const std::string & host, UInt16 port) @@ -254,7 +319,7 @@ std::vector DNSResolver::resolveAddressList(const std: if (!impl->disable_cache) addToNewHosts(host); - std::vector ips = impl->disable_cache ? hostByName(host) : resolveIPAddressWithCache(impl->cache_host, host); + std::vector ips = impl->disable_cache ? hostByName(host) : resolveIPAddressWithCache(host); auto ips_end = std::unique(ips.begin(), ips.end()); addresses.reserve(ips_end - ips.begin()); @@ -419,8 +484,8 @@ bool DNSResolver::updateCache(UInt32 max_consecutive_failures) bool DNSResolver::updateHost(const String & host) { - const auto old_value = resolveIPAddressWithCache(impl->cache_host, host); - auto new_value = resolveIPAddressImpl(host); + const auto old_value = resolveIPAddressWithCache(host); + auto new_value = getResolvedIPAdressessWithFiltering(host); const bool result = old_value != new_value; impl->cache_host.set(host, std::make_shared(std::move(new_value), std::chrono::system_clock::now())); return result; diff --git a/src/Common/DNSResolver.h b/src/Common/DNSResolver.h index 1ddd9d3b991..b35f55dfcd2 100644 --- a/src/Common/DNSResolver.h +++ b/src/Common/DNSResolver.h @@ -68,6 +68,8 @@ public: /// Returns true if IP of any host has been changed or an element was dropped (too many failures) bool updateCache(UInt32 max_consecutive_failures); + void setFilterSettings(bool dns_allow_resolve_names_to_ipv4, bool dns_allow_resolve_names_to_ipv6); + /// Returns a copy of cache entries std::vector> cacheEntries() const; @@ -86,6 +88,10 @@ private: struct Impl; std::unique_ptr impl; + + struct AddressFilter; + std::unique_ptr addressFilter; + LoggerPtr log; /// Updates cached value and returns true it has been changed. @@ -94,6 +100,9 @@ private: void addToNewHosts(const String & host); void addToNewAddresses(const Poco::Net::IPAddress & address); + + IPAddresses resolveIPAddressWithCache(const std::string & host); + IPAddresses getResolvedIPAdressessWithFiltering(const std::string & host); }; } diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index d13e6251ca9..6c23e3b95f6 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -106,6 +106,8 @@ namespace DB M(UInt64, dns_cache_max_entries, 10000, "Internal DNS cache max entries.", 0) \ M(Int32, dns_cache_update_period, 15, "Internal DNS cache update period in seconds.", 0) \ M(UInt32, dns_max_consecutive_failures, 10, "Max DNS resolve failures of a hostname before dropping the hostname from ClickHouse DNS cache.", 0) \ + M(Bool, dns_allow_resolve_names_to_ipv4, true, "Allows resolve names to ipv4 addresses.", 0) \ + M(Bool, dns_allow_resolve_names_to_ipv6, true, "Allows resolve names to ipv6 addresses.", 0) \ \ M(UInt64, max_table_size_to_drop, 50000000000lu, "If size of a table is greater than this value (in bytes) than table could not be dropped with any DROP query.", 0) \ M(UInt64, max_partition_size_to_drop, 50000000000lu, "Same as max_table_size_to_drop, but for the partitions.", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 0ccbd874a3d..a76e214b1fc 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -500,6 +500,267 @@ static std::initializer_list col >= '2023-01-01' AND col <= '2023-12-31')"}, + {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, + {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, + {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, + {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, + {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, + {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, + }}, + {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, + {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, + {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, + {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, + {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, + {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, + {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, + {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, + {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, + {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, + {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, + {"enable_vertical_final", false, true, "Use vertical final by default"}, + {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, + {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, + {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, + {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, + {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, + {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, + {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, + {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, + {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, + {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, + {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, + {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, + {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, + {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, + {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, + {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, + {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, + {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, + {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, + {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, + {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, + {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, + {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, + {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, + {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, + {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, + {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, + {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, + {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, + {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, + {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, + {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, + {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, + {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, + {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, + {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, + {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, + {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, + {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, + {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, + {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, + {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, + {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, + {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, + {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, + {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, + {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, + {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, + {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, + {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, + {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, + {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, + {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, + {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, + {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, + {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, + {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, + {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, + {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, + {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, + {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, + {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, + {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, + {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, + {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, + {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, + {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, + {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, + {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, + {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, + {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, + {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, + {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, + {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, + {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, + {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, + {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, + {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, + {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, + {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, + {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, + {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, + {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, + {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, + {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, + {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, + {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, + {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, + {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, + {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, + {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, + {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, + {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, }; diff --git a/tests/integration/test_dns_cache/test.py b/tests/integration/test_dns_cache/test.py index a6db26c8575..5e120dc42aa 100644 --- a/tests/integration/test_dns_cache/test.py +++ b/tests/integration/test_dns_cache/test.py @@ -32,6 +32,7 @@ node2 = cluster.add_instance( main_configs=["configs/listen_host.xml", "configs/dns_update_long.xml"], with_zookeeper=True, ipv6_address="2001:3984:3989::1:1112", + ipv4_address="10.5.95.11", ) @@ -39,9 +40,6 @@ node2 = cluster.add_instance( def cluster_without_dns_cache_update(): try: cluster.start() - - _fill_nodes([node1, node2], "test_table_drop") - yield cluster except Exception as ex: @@ -59,6 +57,8 @@ def test_ip_change_drop_dns_cache(cluster_without_dns_cache_update): # In this case we should manually set up the static DNS entries on the source host # to exclude resplving addresses automatically added by docker. # We use ipv6 for hosts, but resolved DNS entries may contain an unexpected ipv4 address. + _fill_nodes([node1, node2], "test_table_drop") + node2.set_hosts([("2001:3984:3989::1:1111", "node1")]) # drop DNS cache node2.query("SYSTEM DROP DNS CACHE") @@ -98,6 +98,67 @@ def test_ip_change_drop_dns_cache(cluster_without_dns_cache_update): assert_eq_with_retry(node2, "SELECT count(*) from test_table_drop", "7") +def _render_filter_config(allow_ipv4, allow_ipv6): + config = f""" + + {int(allow_ipv4)} + {int(allow_ipv6)} + + """ + return config + + +@pytest.mark.parametrize( + "allow_ipv4, allow_ipv6", + [ + (True, False), + (False, True), + (False, False), + ], +) +def test_dns_resolver_filter(cluster_without_dns_cache_update, allow_ipv4, allow_ipv6): + host_ipv6 = node2.ipv6_address + host_ipv4 = node2.ipv4_address + + node2.set_hosts( + [ + (host_ipv6, "test_host"), + (host_ipv4, "test_host"), + ] + ) + node2.replace_config( + "/etc/clickhouse-server/config.d/dns_filter.xml", + _render_filter_config(allow_ipv4, allow_ipv6), + ) + + node2.query("SYSTEM DROP DNS CACHE") + node2.query("SYSTEM DROP CONNECTIONS CACHE") + node2.query("SYSTEM RELOAD CONFIG") + + if not allow_ipv4 and not allow_ipv6: + with pytest.raises(QueryRuntimeException): + node4.query("SELECT * FROM remote('lost_host', 'system', 'one')") + else: + node2.query("SELECT * FROM remote('test_host', system, one)") + assert ( + node2.query( + "SELECT ip_address FROM system.dns_cache WHERE hostname='test_host'" + ) + == f"{host_ipv4 if allow_ipv4 else host_ipv6}\n" + ) + + node2.exec_in_container( + [ + "bash", + "-c", + "rm /etc/clickhouse-server/config.d/dns_filter.xml", + ], + privileged=True, + user="root", + ) + node2.query("SYSTEM RELOAD CONFIG") + + node3 = cluster.add_instance( "node3", main_configs=["configs/listen_host.xml"], From 0d5cb9f75a527e90aed18860efbd5ed1f9dcd775 Mon Sep 17 00:00:00 2001 From: MikhailBurdukov Date: Tue, 23 Jul 2024 09:25:02 +0000 Subject: [PATCH 1749/2361] Review fixes --- src/Common/DNSResolver.cpp | 34 ++++++++++++++--------------- src/Core/SettingsChangesHistory.cpp | 2 +- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 051e6e63091..08111d7f2af 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -12,8 +12,7 @@ #include #include #include -#include -#include +#include "Common/MultiVersion.h" #include #include "DNSPTRResolverProvider.h" @@ -198,14 +197,17 @@ struct DNSResolver::AddressFilter { struct DNSFilterSettings { - std::atomic dns_allow_resolve_names_to_ipv4{true}; - std::atomic dns_allow_resolve_names_to_ipv6{true}; + bool dns_allow_resolve_names_to_ipv4{true}; + bool dns_allow_resolve_names_to_ipv6{true}; }; + AddressFilter() : settings(std::make_unique()) {} + void performAddressFiltering(DNSResolver::IPAddresses & addresses) { - bool dns_resolve_ipv4 = settings.dns_allow_resolve_names_to_ipv4; - bool dns_resolve_ipv6 = settings.dns_allow_resolve_names_to_ipv6; + const auto current_settings = settings.get(); + bool dns_resolve_ipv4 = current_settings->dns_allow_resolve_names_to_ipv4; + bool dns_resolve_ipv6 = current_settings->dns_allow_resolve_names_to_ipv6; if (dns_resolve_ipv4 && dns_resolve_ipv6) { @@ -215,24 +217,20 @@ struct DNSResolver::AddressFilter { throw Exception(ErrorCodes::BAD_ARGUMENTS, "DNS can't resolve any address, because dns_resolve_ipv6_interfaces and dns_resolve_ipv4_interfaces both are disabled"); } - addresses.erase( - std::remove_if(addresses.begin(), addresses.end(), - [dns_resolve_ipv6, dns_resolve_ipv4](const Poco::Net::IPAddress& address) - { - return (address.family() == Poco::Net::IPAddress::IPv6 && !dns_resolve_ipv6) - || (address.family() == Poco::Net::IPAddress::IPv4 && !dns_resolve_ipv4); - }), - addresses.end() - ); + + std::erase_if(addresses, [dns_resolve_ipv6, dns_resolve_ipv4](const Poco::Net::IPAddress& address) + { + return (address.family() == Poco::Net::IPAddress::IPv6 && !dns_resolve_ipv6) + || (address.family() == Poco::Net::IPAddress::IPv4 && !dns_resolve_ipv4); + }); } void setSettings(bool dns_allow_resolve_names_to_ipv4_, bool dns_allow_resolve_names_to_ipv6_) { - settings.dns_allow_resolve_names_to_ipv4 = dns_allow_resolve_names_to_ipv4_; - settings.dns_allow_resolve_names_to_ipv6 = dns_allow_resolve_names_to_ipv6_; + settings.set(std::make_unique(dns_allow_resolve_names_to_ipv4_, dns_allow_resolve_names_to_ipv6_)); } - DNSFilterSettings settings; + MultiVersion settings; }; diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index a76e214b1fc..01b9bca795f 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -523,7 +523,7 @@ static std::initializer_list Date: Tue, 23 Jul 2024 12:58:50 +0000 Subject: [PATCH 1750/2361] Fix test --- src/Core/SettingsChangesHistory.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 01b9bca795f..ac427e2e03e 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -523,9 +523,7 @@ static std::initializer_list Date: Tue, 23 Jul 2024 16:56:29 +0000 Subject: [PATCH 1751/2361] Fix tidy build --- src/Common/DNSResolver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 08111d7f2af..bbee7d259f0 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -203,7 +203,7 @@ struct DNSResolver::AddressFilter AddressFilter() : settings(std::make_unique()) {} - void performAddressFiltering(DNSResolver::IPAddresses & addresses) + void performAddressFiltering(DNSResolver::IPAddresses & addresses) const { const auto current_settings = settings.get(); bool dns_resolve_ipv4 = current_settings->dns_allow_resolve_names_to_ipv4; From 304d01e2c36ae11cd1c703135c493ee885bc3062 Mon Sep 17 00:00:00 2001 From: MikhailBurdukov Date: Mon, 29 Jul 2024 19:18:14 +0000 Subject: [PATCH 1752/2361] Review fix --- programs/server/Server.cpp | 1 - src/Common/DNSResolver.cpp | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 3126f65ef09..aa7c3d75163 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1770,7 +1770,6 @@ try DNSResolver::instance().setFilterSettings(new_server_settings.dns_allow_resolve_names_to_ipv4, new_server_settings.dns_allow_resolve_names_to_ipv6); - if (global_context->isServerCompletelyStarted()) CannotAllocateThreadFaultInjector::setFaultProbability(new_server_settings.cannot_allocate_thread_fault_injection_probability); diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index bbee7d259f0..68a8fa7d74c 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -225,9 +225,9 @@ struct DNSResolver::AddressFilter }); } - void setSettings(bool dns_allow_resolve_names_to_ipv4_, bool dns_allow_resolve_names_to_ipv6_) + void setSettings(bool dns_allow_resolve_names_to_ipv4, bool dns_allow_resolve_names_to_ipv6) { - settings.set(std::make_unique(dns_allow_resolve_names_to_ipv4_, dns_allow_resolve_names_to_ipv6_)); + settings.set(std::make_unique(dns_allow_resolve_names_to_ipv4, dns_allow_resolve_names_to_ipv6)); } MultiVersion settings; @@ -264,9 +264,9 @@ Poco::Net::IPAddress DNSResolver::resolveHost(const std::string & host) return pickAddress(resolveHostAll(host)); // random order -> random pick } -void DNSResolver::setFilterSettings(bool dns_allow_resolve_names_to_ipv4_, bool dns_allow_resolve_names_to_ipv6_) +void DNSResolver::setFilterSettings(bool dns_allow_resolve_names_to_ipv4, bool dns_allow_resolve_names_to_ipv6) { - addressFilter->setSettings(dns_allow_resolve_names_to_ipv4_, dns_allow_resolve_names_to_ipv6_); + addressFilter->setSettings(dns_allow_resolve_names_to_ipv4, dns_allow_resolve_names_to_ipv6); } DNSResolver::IPAddresses DNSResolver::resolveHostAllInOriginOrder(const std::string & host) From 4143eea587b808086cceb98025906646fa78c96a Mon Sep 17 00:00:00 2001 From: MikhailBurdukov Date: Wed, 31 Jul 2024 08:35:38 +0000 Subject: [PATCH 1753/2361] Add test to skip parallel --- tests/integration/parallel_skip.json | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index 99fa626bd1e..6689572aeb7 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -1,6 +1,7 @@ [ "test_dns_cache/test.py::test_dns_cache_update", "test_dns_cache/test.py::test_ip_change_drop_dns_cache", + "test_dns_cache/test.py::test_dns_resolver_filter", "test_dns_cache/test.py::test_ip_change_update_dns_cache", "test_dns_cache/test.py::test_user_access_ip_change[node0]", "test_dns_cache/test.py::test_user_access_ip_change[node1]", From 54ba7703b1ccd116dceefe8b0e9c6aca5c24d212 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 15:16:43 +0000 Subject: [PATCH 1754/2361] Fix #67742 --- .../ConditionSelectivityEstimator.cpp | 38 ++++++---------- .../ConditionSelectivityEstimator.h | 10 ++--- src/Storages/Statistics/Statistics.cpp | 40 ++--------------- src/Storages/Statistics/Statistics.h | 7 --- src/Storages/Statistics/StatisticsTDigest.cpp | 43 ++++++++++--------- src/Storages/Statistics/StatisticsTDigest.h | 3 +- .../02864_statistics_bugs.reference | 1 + .../0_stateless/02864_statistics_bugs.sql | 9 ++++ 8 files changed, 54 insertions(+), 97 deletions(-) create mode 100644 tests/queries/0_stateless/02864_statistics_bugs.reference create mode 100644 tests/queries/0_stateless/02864_statistics_bugs.sql diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp index 57dff958b9a..432659f51f8 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.cpp +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.cpp @@ -19,7 +19,7 @@ void ConditionSelectivityEstimator::ColumnSelectivityEstimator::merge(String par Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateLess(const Field & val, Float64 rows) const { if (part_statistics.empty()) - return default_normal_cond_factor * rows; + return default_cond_range_factor * rows; Float64 result = 0; Float64 part_rows = 0; for (const auto & [key, estimator] : part_statistics) @@ -39,13 +39,7 @@ Float64 ConditionSelectivityEstimator::ColumnSelectivityEstimator::estimateEqual { if (part_statistics.empty()) { - auto float_val = StatisticsUtils::tryConvertToFloat64(val); - if (!float_val) - return default_unknown_cond_factor * rows; - else if (float_val.value() < - threshold || float_val.value() > threshold) - return default_normal_cond_factor * rows; - else - return default_good_cond_factor * rows; + return default_cond_equal_factor * rows; } Float64 result = 0; Float64 partial_cnt = 0; @@ -149,30 +143,22 @@ Float64 ConditionSelectivityEstimator::estimateRowCount(const RPNBuilderTreeNode auto [op, val] = extractBinaryOp(node, col); + if (dummy) + { + if (op == "equals") + return default_cond_equal_factor * total_rows; + else if (op == "less" || op == "lessOrEquals" || op == "greater" || op == "greaterOrEquals") + return default_cond_range_factor * total_rows; + else + return default_unknown_cond_factor * total_rows; + } + if (op == "equals") - { - if (dummy) - { - auto float_val = StatisticsUtils::tryConvertToFloat64(val); - if (!float_val || (float_val < - threshold || float_val > threshold)) - return default_normal_cond_factor * total_rows; - else - return default_good_cond_factor * total_rows; - } return estimator.estimateEqual(val, total_rows); - } else if (op == "less" || op == "lessOrEquals") - { - if (dummy) - return default_normal_cond_factor * total_rows; return estimator.estimateLess(val, total_rows); - } else if (op == "greater" || op == "greaterOrEquals") - { - if (dummy) - return default_normal_cond_factor * total_rows; return estimator.estimateGreater(val, total_rows); - } else return default_unknown_cond_factor * total_rows; } diff --git a/src/Storages/Statistics/ConditionSelectivityEstimator.h b/src/Storages/Statistics/ConditionSelectivityEstimator.h index ce7fdd12e92..269ee9ac6cb 100644 --- a/src/Storages/Statistics/ConditionSelectivityEstimator.h +++ b/src/Storages/Statistics/ConditionSelectivityEstimator.h @@ -38,12 +38,10 @@ private: std::pair extractBinaryOp(const RPNBuilderTreeNode & node, const String & column_name) const; - static constexpr auto default_good_cond_factor = 0.1; - static constexpr auto default_normal_cond_factor = 0.5; - static constexpr auto default_unknown_cond_factor = 1.0; - /// Conditions like "x = N" are considered good if abs(N) > threshold. - /// This is used to assume that condition is likely to have good selectivity. - static constexpr auto threshold = 2; + /// Used to estimate the selectivity of a condition when there is no statistics. + static constexpr auto default_cond_range_factor = 0.5; + static constexpr auto default_cond_equal_factor = 0.01; + static constexpr auto default_unknown_cond_factor = 1; UInt64 total_rows = 0; std::map column_estimators; diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index e3f9fcc8192..52eec437ac2 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -27,36 +27,6 @@ enum StatisticsFileVersion : UInt16 V0 = 0, }; -std::optional StatisticsUtils::tryConvertToFloat64(const Field & field) -{ - switch (field.getType()) - { - case Field::Types::Int64: - return field.get(); - case Field::Types::UInt64: - return field.get(); - case Field::Types::Float64: - return field.get(); - case Field::Types::Int128: - return field.get(); - case Field::Types::UInt128: - return field.get(); - case Field::Types::Int256: - return field.get(); - case Field::Types::UInt256: - return field.get(); - default: - return {}; - } -} - -std::optional StatisticsUtils::tryConvertToString(const DB::Field & field) -{ - if (field.getType() == Field::Types::String) - return field.get(); - return {}; -} - IStatistics::IStatistics(const SingleStatisticsDescription & stat_) : stat(stat_) { @@ -105,7 +75,7 @@ Float64 ColumnStatistics::estimateLess(const Field & val) const { if (stats.contains(StatisticsType::TDigest)) return stats.at(StatisticsType::TDigest)->estimateLess(val); - return rows * ConditionSelectivityEstimator::default_normal_cond_factor; + return rows * ConditionSelectivityEstimator::default_cond_range_factor; } Float64 ColumnStatistics::estimateGreater(const Field & val) const @@ -115,8 +85,7 @@ Float64 ColumnStatistics::estimateGreater(const Field & val) const Float64 ColumnStatistics::estimateEqual(const Field & val) const { - auto float_val = StatisticsUtils::tryConvertToFloat64(val); - if (float_val.has_value() && stats.contains(StatisticsType::Uniq) && stats.contains(StatisticsType::TDigest)) + if (stats_desc.data_type->isValueRepresentedByNumber() && stats.contains(StatisticsType::Uniq) && stats.contains(StatisticsType::TDigest)) { /// 2048 is the default number of buckets in TDigest. In this case, TDigest stores exactly one value (with many rows) for every bucket. if (stats.at(StatisticsType::Uniq)->estimateCardinality() < 2048) @@ -126,10 +95,7 @@ Float64 ColumnStatistics::estimateEqual(const Field & val) const if (stats.contains(StatisticsType::CountMinSketch)) return stats.at(StatisticsType::CountMinSketch)->estimateEqual(val); #endif - if (!float_val.has_value() && (float_val < - ConditionSelectivityEstimator::threshold || float_val > ConditionSelectivityEstimator::threshold)) - return rows * ConditionSelectivityEstimator::default_normal_cond_factor; - else - return rows * ConditionSelectivityEstimator::default_good_cond_factor; + return rows * ConditionSelectivityEstimator::default_cond_equal_factor; } /// ------------------------------------- diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index c6a45e68aa6..593ac20edb5 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -15,13 +15,6 @@ constexpr auto STATS_FILE_PREFIX = "statistics_"; constexpr auto STATS_FILE_SUFFIX = ".stats"; -struct StatisticsUtils -{ - /// Returns std::nullopt if input Field cannot be converted to a concrete value - static std::optional tryConvertToFloat64(const Field & field); - static std::optional tryConvertToString(const Field & field); -}; - /// Statistics describe properties of the values in the column, /// e.g. how many unique values exist, /// what are the N most frequent values, diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index 1cf92fea24b..b0c4bfda27d 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -1,6 +1,8 @@ #include -#include +#include #include +#include +#include namespace DB { @@ -10,24 +12,21 @@ extern const int ILLEGAL_STATISTICS; extern const int LOGICAL_ERROR; } -StatisticsTDigest::StatisticsTDigest(const SingleStatisticsDescription & description) +StatisticsTDigest::StatisticsTDigest(const SingleStatisticsDescription & description, const DataTypePtr & data_type_) : IStatistics(description) + , data_type(data_type_) { } void StatisticsTDigest::update(const ColumnPtr & column) { - size_t rows = column->size(); - for (size_t row = 0; row < rows; ++row) + for (size_t row = 0; row < column->size(); ++row) { - Field field; - column->get(row, field); - - if (field.isNull()) + if (column->isNullAt(row)) continue; - if (auto field_as_float = StatisticsUtils::tryConvertToFloat64(field)) - t_digest.add(*field_as_float, 1); + auto data = column->getFloat64(row); + t_digest.add(data, 1); } } @@ -43,18 +42,22 @@ void StatisticsTDigest::deserialize(ReadBuffer & buf) Float64 StatisticsTDigest::estimateLess(const Field & val) const { - auto val_as_float = StatisticsUtils::tryConvertToFloat64(val); - if (val_as_float) - return t_digest.getCountLessThan(*val_as_float); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimating value of type {}", val.getTypeName()); + Field val_converted = convertFieldToType(val, *data_type); + if (val_converted.isNull()) + return 0; + + auto val_as_float = applyVisitor(FieldVisitorConvertToNumber(), val_converted); + return t_digest.getCountLessThan(val_as_float); } Float64 StatisticsTDigest::estimateEqual(const Field & val) const { - auto val_as_float = StatisticsUtils::tryConvertToFloat64(val); - if (val_as_float) - return t_digest.getCountEqual(*val_as_float); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'tdigest' does not support estimating value of type {}", val.getTypeName()); + Field val_converted = convertFieldToType(val, *data_type); + if (val_converted.isNull()) + return 0; + + auto val_as_float = applyVisitor(FieldVisitorConvertToNumber(), val_converted); + return t_digest.getCountEqual(val_as_float); } void tdigestStatisticsValidator(const SingleStatisticsDescription & /*description*/, const DataTypePtr & data_type) @@ -65,9 +68,9 @@ void tdigestStatisticsValidator(const SingleStatisticsDescription & /*descriptio throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'tdigest' do not support type {}", data_type->getName()); } -StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & /*data_type*/) +StatisticsPtr tdigestStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type) { - return std::make_shared(description); + return std::make_shared(description, data_type); } } diff --git a/src/Storages/Statistics/StatisticsTDigest.h b/src/Storages/Statistics/StatisticsTDigest.h index 2b37799d07b..5e744fee2ce 100644 --- a/src/Storages/Statistics/StatisticsTDigest.h +++ b/src/Storages/Statistics/StatisticsTDigest.h @@ -9,7 +9,7 @@ namespace DB class StatisticsTDigest : public IStatistics { public: - explicit StatisticsTDigest(const SingleStatisticsDescription & description); + explicit StatisticsTDigest(const SingleStatisticsDescription & description, const DataTypePtr & data_type_); void update(const ColumnPtr & column) override; @@ -21,6 +21,7 @@ public: private: QuantileTDigest t_digest; + DataTypePtr data_type; }; void tdigestStatisticsValidator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); diff --git a/tests/queries/0_stateless/02864_statistics_bugs.reference b/tests/queries/0_stateless/02864_statistics_bugs.reference new file mode 100644 index 00000000000..f599e28b8ab --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_bugs.reference @@ -0,0 +1 @@ +10 diff --git a/tests/queries/0_stateless/02864_statistics_bugs.sql b/tests/queries/0_stateless/02864_statistics_bugs.sql new file mode 100644 index 00000000000..ef1735550e6 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_bugs.sql @@ -0,0 +1,9 @@ +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; + +DROP TABLE IF EXISTS bug_67742; +CREATE TABLE bug_67742 (a Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); +INSERT INTO bug_67742 SELECT number FROM system.numbers LIMIT 10000; +SELECT count(*) FROM bug_67742 WHERE a < '10'; +DROP TABLE bug_67742; From 012ea3cc6d09c50f29fe4d7964aa18ee038c35b2 Mon Sep 17 00:00:00 2001 From: MikhailBurdukov Date: Wed, 31 Jul 2024 13:29:36 +0000 Subject: [PATCH 1755/2361] Rebase --- src/Core/SettingsChangesHistory.cpp | 259 ----------------------- tests/integration/test_dns_cache/test.py | 138 ++++++------ 2 files changed, 74 insertions(+), 323 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ac427e2e03e..0ccbd874a3d 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -500,265 +500,6 @@ static std::initializer_list col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, }; diff --git a/tests/integration/test_dns_cache/test.py b/tests/integration/test_dns_cache/test.py index 5e120dc42aa..36401517429 100644 --- a/tests/integration/test_dns_cache/test.py +++ b/tests/integration/test_dns_cache/test.py @@ -32,7 +32,6 @@ node2 = cluster.add_instance( main_configs=["configs/listen_host.xml", "configs/dns_update_long.xml"], with_zookeeper=True, ipv6_address="2001:3984:3989::1:1112", - ipv4_address="10.5.95.11", ) @@ -40,6 +39,9 @@ node2 = cluster.add_instance( def cluster_without_dns_cache_update(): try: cluster.start() + + _fill_nodes([node1, node2], "test_table_drop") + yield cluster except Exception as ex: @@ -57,8 +59,6 @@ def test_ip_change_drop_dns_cache(cluster_without_dns_cache_update): # In this case we should manually set up the static DNS entries on the source host # to exclude resplving addresses automatically added by docker. # We use ipv6 for hosts, but resolved DNS entries may contain an unexpected ipv4 address. - _fill_nodes([node1, node2], "test_table_drop") - node2.set_hosts([("2001:3984:3989::1:1111", "node1")]) # drop DNS cache node2.query("SYSTEM DROP DNS CACHE") @@ -98,67 +98,6 @@ def test_ip_change_drop_dns_cache(cluster_without_dns_cache_update): assert_eq_with_retry(node2, "SELECT count(*) from test_table_drop", "7") -def _render_filter_config(allow_ipv4, allow_ipv6): - config = f""" - - {int(allow_ipv4)} - {int(allow_ipv6)} - - """ - return config - - -@pytest.mark.parametrize( - "allow_ipv4, allow_ipv6", - [ - (True, False), - (False, True), - (False, False), - ], -) -def test_dns_resolver_filter(cluster_without_dns_cache_update, allow_ipv4, allow_ipv6): - host_ipv6 = node2.ipv6_address - host_ipv4 = node2.ipv4_address - - node2.set_hosts( - [ - (host_ipv6, "test_host"), - (host_ipv4, "test_host"), - ] - ) - node2.replace_config( - "/etc/clickhouse-server/config.d/dns_filter.xml", - _render_filter_config(allow_ipv4, allow_ipv6), - ) - - node2.query("SYSTEM DROP DNS CACHE") - node2.query("SYSTEM DROP CONNECTIONS CACHE") - node2.query("SYSTEM RELOAD CONFIG") - - if not allow_ipv4 and not allow_ipv6: - with pytest.raises(QueryRuntimeException): - node4.query("SELECT * FROM remote('lost_host', 'system', 'one')") - else: - node2.query("SELECT * FROM remote('test_host', system, one)") - assert ( - node2.query( - "SELECT ip_address FROM system.dns_cache WHERE hostname='test_host'" - ) - == f"{host_ipv4 if allow_ipv4 else host_ipv6}\n" - ) - - node2.exec_in_container( - [ - "bash", - "-c", - "rm /etc/clickhouse-server/config.d/dns_filter.xml", - ], - privileged=True, - user="root", - ) - node2.query("SYSTEM RELOAD CONFIG") - - node3 = cluster.add_instance( "node3", main_configs=["configs/listen_host.xml"], @@ -378,3 +317,74 @@ def test_host_is_drop_from_cache_after_consecutive_failures( assert node4.wait_for_log_line( "Cached hosts dropped:.*InvalidHostThatDoesNotExist.*" ) + + +node7 = cluster.add_instance( + "node7", + main_configs=["configs/listen_host.xml", "configs/dns_update_long.xml"], + with_zookeeper=True, + ipv6_address="2001:3984:3989::1:1117", + ipv4_address="10.5.95.17", +) + + +def _render_filter_config(allow_ipv4, allow_ipv6): + config = f""" + + {int(allow_ipv4)} + {int(allow_ipv6)} + + """ + return config + + +@pytest.mark.parametrize( + "allow_ipv4, allow_ipv6", + [ + (True, False), + (False, True), + (False, False), + ], +) +def test_dns_resolver_filter(cluster_without_dns_cache_update, allow_ipv4, allow_ipv6): + node = node7 + host_ipv6 = node.ipv6_address + host_ipv4 = node.ipv4_address + + node.set_hosts( + [ + (host_ipv6, "test_host"), + (host_ipv4, "test_host"), + ] + ) + node.replace_config( + "/etc/clickhouse-server/config.d/dns_filter.xml", + _render_filter_config(allow_ipv4, allow_ipv6), + ) + + node.query("SYSTEM RELOAD CONFIG") + node.query("SYSTEM DROP DNS CACHE") + node.query("SYSTEM DROP CONNECTIONS CACHE") + + if not allow_ipv4 and not allow_ipv6: + with pytest.raises(QueryRuntimeException): + node.query("SELECT * FROM remote('lost_host', 'system', 'one')") + else: + node.query("SELECT * FROM remote('test_host', system, one)") + assert ( + node.query( + "SELECT ip_address FROM system.dns_cache WHERE hostname='test_host'" + ) + == f"{host_ipv4 if allow_ipv4 else host_ipv6}\n" + ) + + node.exec_in_container( + [ + "bash", + "-c", + "rm /etc/clickhouse-server/config.d/dns_filter.xml", + ], + privileged=True, + user="root", + ) + node.query("SYSTEM RELOAD CONFIG") From 2d93910bd601f7a4f8cc9385a2d191ce806453e3 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Tue, 6 Aug 2024 17:44:29 +0200 Subject: [PATCH 1756/2361] added tags no-fasttest --- tests/queries/0_stateless/03215_parquet_index.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03215_parquet_index.sql b/tests/queries/0_stateless/03215_parquet_index.sql index 0afccdf6f3b..5766f40f021 100644 --- a/tests/queries/0_stateless/03215_parquet_index.sql +++ b/tests/queries/0_stateless/03215_parquet_index.sql @@ -1,3 +1,5 @@ +-- Tags: no-fasttest + -- default settings. DROP TABLE IF EXISTS test_parquet; CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet); From 1e631472f0aabcb1b856942fb7409c5c010315a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 18:07:49 +0200 Subject: [PATCH 1757/2361] Make 01685_ssd_cache_dictionary_complex_key parallelizable --- .../01685_ssd_cache_dictionary_complex_key.sh | 81 +++++++++---------- 1 file changed, 38 insertions(+), 43 deletions(-) diff --git a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh index 5583a9dd5e7..55061b9a643 100755 --- a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh +++ b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh @@ -1,15 +1,11 @@ #!/usr/bin/env bash -# Tags: no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -n --query=" - DROP DATABASE IF EXISTS 01685_database_for_cache_dictionary; - CREATE DATABASE 01685_database_for_cache_dictionary; - - CREATE TABLE 01685_database_for_cache_dictionary.complex_key_simple_attributes_source_table + CREATE TABLE complex_key_simple_attributes_source_table ( id UInt64, id_key String, @@ -18,11 +14,11 @@ $CLICKHOUSE_CLIENT -n --query=" ) ENGINE = TinyLog; - INSERT INTO 01685_database_for_cache_dictionary.complex_key_simple_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); - INSERT INTO 01685_database_for_cache_dictionary.complex_key_simple_attributes_source_table VALUES(1, 'id_key_1', 'value_1', 'value_second_1'); - INSERT INTO 01685_database_for_cache_dictionary.complex_key_simple_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); + INSERT INTO complex_key_simple_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); + INSERT INTO complex_key_simple_attributes_source_table VALUES(1, 'id_key_1', 'value_1', 'value_second_1'); + INSERT INTO complex_key_simple_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); - CREATE DICTIONARY 01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes + CREATE DICTIONARY cache_dictionary_complex_key_simple_attributes ( id UInt64, id_key String, @@ -30,32 +26,32 @@ $CLICKHOUSE_CLIENT -n --query=" value_second String DEFAULT 'value_second_default' ) PRIMARY KEY id, id_key - SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_simple_attributes_source_table' DB '01685_database_for_cache_dictionary')) + SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_simple_attributes_source_table' DB '${CLICKHOUSE_DATABASE}')) LIFETIME(MIN 1 MAX 1000) - LAYOUT(COMPLEX_KEY_SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 8192 PATH '$USER_FILES_PATH/0d')); + LAYOUT(COMPLEX_KEY_SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 8192 PATH '$USER_FILES_PATH/${CLICKHOUSE_DATABASE}_dic')); SELECT 'Dictionary cache_dictionary_complex_key_simple_attributes'; SELECT 'dictGet existing value'; - SELECT dictGet('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, - dictGet('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; + SELECT dictGet('cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; SELECT 'dictGet with non existing value'; - SELECT dictGet('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, - dictGet('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; + SELECT dictGet('cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; SELECT 'dictGetOrDefault existing value'; - SELECT dictGetOrDefault('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, - dictGetOrDefault('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; + SELECT dictGetOrDefault('cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; SELECT 'dictGetOrDefault non existing value'; - SELECT dictGetOrDefault('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, - dictGetOrDefault('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; + SELECT dictGetOrDefault('cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; SELECT 'dictHas'; - SELECT dictHas('01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; + SELECT dictHas('cache_dictionary_complex_key_simple_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; SELECT 'select all values as input stream'; - SELECT * FROM 01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes ORDER BY id; + SELECT * FROM cache_dictionary_complex_key_simple_attributes ORDER BY id; - DROP DICTIONARY 01685_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes; - DROP TABLE 01685_database_for_cache_dictionary.complex_key_simple_attributes_source_table; + DROP DICTIONARY cache_dictionary_complex_key_simple_attributes; + DROP TABLE complex_key_simple_attributes_source_table; - CREATE TABLE 01685_database_for_cache_dictionary.complex_key_complex_attributes_source_table + CREATE TABLE complex_key_complex_attributes_source_table ( id UInt64, id_key String, @@ -64,11 +60,11 @@ $CLICKHOUSE_CLIENT -n --query=" ) ENGINE = TinyLog; - INSERT INTO 01685_database_for_cache_dictionary.complex_key_complex_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); - INSERT INTO 01685_database_for_cache_dictionary.complex_key_complex_attributes_source_table VALUES(1, 'id_key_1', 'value_1', NULL); - INSERT INTO 01685_database_for_cache_dictionary.complex_key_complex_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); + INSERT INTO complex_key_complex_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); + INSERT INTO complex_key_complex_attributes_source_table VALUES(1, 'id_key_1', 'value_1', NULL); + INSERT INTO complex_key_complex_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); - CREATE DICTIONARY 01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes + CREATE DICTIONARY cache_dictionary_complex_key_complex_attributes ( id UInt64, id_key String, @@ -77,29 +73,28 @@ $CLICKHOUSE_CLIENT -n --query=" value_second Nullable(String) DEFAULT 'value_second_default' ) PRIMARY KEY id, id_key - SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_complex_attributes_source_table' DB '01685_database_for_cache_dictionary')) + SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_complex_attributes_source_table' DB '${CLICKHOUSE_DATABASE}')) LIFETIME(MIN 1 MAX 1000) LAYOUT(COMPLEX_KEY_SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 8192 PATH '$USER_FILES_PATH/1d')); SELECT 'Dictionary cache_dictionary_complex_key_complex_attributes'; SELECT 'dictGet existing value'; - SELECT dictGet('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, - dictGet('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; + SELECT dictGet('cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; SELECT 'dictGet with non existing value'; - SELECT dictGet('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, - dictGet('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; + SELECT dictGet('cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; SELECT 'dictGetOrDefault existing value'; - SELECT dictGetOrDefault('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, - dictGetOrDefault('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; + SELECT dictGetOrDefault('cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; SELECT 'dictGetOrDefault non existing value'; - SELECT dictGetOrDefault('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, - dictGetOrDefault('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; + SELECT dictGetOrDefault('cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; SELECT 'dictHas'; - SELECT dictHas('01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; + SELECT dictHas('cache_dictionary_complex_key_complex_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; SELECT 'select all values as input stream'; - SELECT * FROM 01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes ORDER BY id; + SELECT * FROM cache_dictionary_complex_key_complex_attributes ORDER BY id; - DROP DICTIONARY 01685_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes; - DROP TABLE 01685_database_for_cache_dictionary.complex_key_complex_attributes_source_table; - - DROP DATABASE 01685_database_for_cache_dictionary;" + DROP DICTIONARY cache_dictionary_complex_key_complex_attributes; + DROP TABLE complex_key_complex_attributes_source_table; +" From 38d891c6befdacfa835005b522f104e9428a270b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 18:18:09 +0200 Subject: [PATCH 1758/2361] Make 02015_column_default_dict_get_identifier parallelizable --- ...015_column_default_dict_get_identifier.sql | 29 ++++++++----------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/tests/queries/0_stateless/02015_column_default_dict_get_identifier.sql b/tests/queries/0_stateless/02015_column_default_dict_get_identifier.sql index 24a3b631388..046d0c42180 100644 --- a/tests/queries/0_stateless/02015_column_default_dict_get_identifier.sql +++ b/tests/queries/0_stateless/02015_column_default_dict_get_identifier.sql @@ -1,9 +1,6 @@ -- Tags: no-parallel -DROP DATABASE IF EXISTS 02015_db; -CREATE DATABASE 02015_db; - -CREATE TABLE 02015_db.test_table +CREATE TABLE test_table ( key_column UInt64, data_column_1 UInt64, @@ -12,9 +9,9 @@ CREATE TABLE 02015_db.test_table ENGINE = MergeTree ORDER BY key_column; -INSERT INTO 02015_db.test_table VALUES (0, 0, 0); +INSERT INTO test_table VALUES (0, 0, 0); -CREATE DICTIONARY 02015_db.test_dictionary +CREATE DICTIONARY test_dictionary ( key_column UInt64 DEFAULT 0, data_column_1 UInt64 DEFAULT 1, @@ -22,20 +19,18 @@ CREATE DICTIONARY 02015_db.test_dictionary ) PRIMARY KEY key_column LAYOUT(DIRECT()) -SOURCE(CLICKHOUSE(DB '02015_db' TABLE 'test_table')); +SOURCE(CLICKHOUSE(DB currentDatabase() TABLE 'test_table')); -CREATE TABLE 02015_db.test_table_default +CREATE TABLE test_table_default ( - data_1 DEFAULT dictGetUInt64('02015_db.test_dictionary', 'data_column_1', toUInt64(0)), - data_2 DEFAULT dictGet(02015_db.test_dictionary, 'data_column_2', toUInt64(0)) + data_1 DEFAULT dictGetUInt64('test_dictionary', 'data_column_1', toUInt64(0)), + data_2 DEFAULT dictGet(test_dictionary, 'data_column_2', toUInt64(0)) ) ENGINE=TinyLog; -INSERT INTO 02015_db.test_table_default(data_1) VALUES (5); -SELECT * FROM 02015_db.test_table_default; +INSERT INTO test_table_default(data_1) VALUES (5); +SELECT * FROM test_table_default; -DROP TABLE 02015_db.test_table_default; -DROP DICTIONARY 02015_db.test_dictionary; -DROP TABLE 02015_db.test_table; - -DROP DATABASE 02015_db; +DROP TABLE test_table_default; +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; From 425fb0f485b1936ebfa46f0ae9db1e7ea134272f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 18:20:09 +0200 Subject: [PATCH 1759/2361] Make 01600_multiple_left_join_with_aliases parallelizable --- .../01600_multiple_left_join_with_aliases.sql | 25 +++++++------------ 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/tests/queries/0_stateless/01600_multiple_left_join_with_aliases.sql b/tests/queries/0_stateless/01600_multiple_left_join_with_aliases.sql index ae296e18560..2945622fbed 100644 --- a/tests/queries/0_stateless/01600_multiple_left_join_with_aliases.sql +++ b/tests/queries/0_stateless/01600_multiple_left_join_with_aliases.sql @@ -1,9 +1,4 @@ --- Tags: no-parallel - -drop database if exists test_01600; -create database test_01600; - -CREATE TABLE test_01600.base +CREATE TABLE base ( `id` UInt64, `id2` UInt64, @@ -14,7 +9,7 @@ ENGINE=MergeTree() PARTITION BY d ORDER BY (id,id2,d); -CREATE TABLE test_01600.derived1 +CREATE TABLE derived1 ( `id1` UInt64, `d1` UInt64, @@ -25,7 +20,7 @@ PARTITION BY d1 ORDER BY (id1, d1) ; -CREATE TABLE test_01600.derived2 +CREATE TABLE derived2 ( `id2` UInt64, `d2` UInt64, @@ -36,19 +31,17 @@ PARTITION BY d2 ORDER BY (id2, d2) ; -select +select base.id as `base.id`, derived2.value2 as `derived2.value2`, derived1.value1 as `derived1.value1` -from test_01600.base as base -left join test_01600.derived2 as derived2 on base.id2 = derived2.id2 -left join test_01600.derived1 as derived1 on base.id = derived1.id1; +from base as base +left join derived2 as derived2 on base.id2 = derived2.id2 +left join derived1 as derived1 on base.id = derived1.id1; SELECT base.id AS `base.id`, derived1.value1 AS `derived1.value1` -FROM test_01600.base AS base -LEFT JOIN test_01600.derived1 AS derived1 ON base.id = derived1.id1; - -drop database test_01600; +FROM base AS base +LEFT JOIN derived1 AS derived1 ON base.id = derived1.id1; From 078e4ca36db15a4f3bab27141d24fe00f89a5a1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 18:21:58 +0200 Subject: [PATCH 1760/2361] Make 02950_dictionary_ssd_cache_short_circuit parallelizable --- ...2950_dictionary_ssd_cache_short_circuit.sh | 23 ++++++++----------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh b/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh index 3d2fe5d664d..daa9c571a5d 100755 --- a/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh +++ b/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh @@ -1,15 +1,11 @@ #!/usr/bin/env bash -# Tags: no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -n --query=" - DROP DATABASE IF EXISTS 02950_database_for_ssd_cache_dictionary; - CREATE DATABASE 02950_database_for_ssd_cache_dictionary; - - CREATE TABLE 02950_database_for_ssd_cache_dictionary.source_table + CREATE TABLE source_table ( id UInt64, v1 String, @@ -18,9 +14,9 @@ $CLICKHOUSE_CLIENT -n --query=" ) ENGINE = TinyLog; - INSERT INTO 02950_database_for_ssd_cache_dictionary.source_table VALUES (0, 'zero', 'zero', 0), (1, 'one', NULL, 1); + INSERT INTO source_table VALUES (0, 'zero', 'zero', 0), (1, 'one', NULL, 1); - CREATE DICTIONARY 02950_database_for_ssd_cache_dictionary.ssd_cache_dictionary + CREATE DICTIONARY ssd_cache_dictionary ( id UInt64, v1 String, @@ -30,12 +26,11 @@ $CLICKHOUSE_CLIENT -n --query=" PRIMARY KEY id SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'source_table')) LIFETIME(MIN 1 MAX 1000) - LAYOUT(SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 8192 PATH '$CLICKHOUSE_USER_FILES/0d')); + LAYOUT(SSD_CACHE(BLOCK_SIZE 4096 FILE_SIZE 8192 PATH '$CLICKHOUSE_USER_FILES/${CLICKHOUSE_DATABASE}_ssd_dic')); - SELECT dictGetOrDefault('02950_database_for_ssd_cache_dictionary.ssd_cache_dictionary', ('v1', 'v2'), 0, (intDiv(1, id), intDiv(1, id))) FROM 02950_database_for_ssd_cache_dictionary.source_table; - SELECT dictGetOrDefault('02950_database_for_ssd_cache_dictionary.ssd_cache_dictionary', 'v2', id+1, intDiv(NULL, id)) FROM 02950_database_for_ssd_cache_dictionary.source_table; - SELECT dictGetOrDefault('02950_database_for_ssd_cache_dictionary.ssd_cache_dictionary', 'v3', id+1, intDiv(NULL, id)) FROM 02950_database_for_ssd_cache_dictionary.source_table; + SELECT dictGetOrDefault('ssd_cache_dictionary', ('v1', 'v2'), 0, (intDiv(1, id), intDiv(1, id))) FROM source_table; + SELECT dictGetOrDefault('ssd_cache_dictionary', 'v2', id+1, intDiv(NULL, id)) FROM source_table; + SELECT dictGetOrDefault('ssd_cache_dictionary', 'v3', id+1, intDiv(NULL, id)) FROM source_table; - DROP DICTIONARY 02950_database_for_ssd_cache_dictionary.ssd_cache_dictionary; - DROP TABLE 02950_database_for_ssd_cache_dictionary.source_table; - DROP DATABASE 02950_database_for_ssd_cache_dictionary;" + DROP DICTIONARY ssd_cache_dictionary; + DROP TABLE source_table;" From c58643897a95d1e58bf25d65dfaad52aa0eddefc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 18:26:31 +0200 Subject: [PATCH 1761/2361] Make 02002_row_level_filter_bug parallelizable --- .../0_stateless/02002_row_level_filter_bug.sh | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/02002_row_level_filter_bug.sh b/tests/queries/0_stateless/02002_row_level_filter_bug.sh index d15a26f48f5..557aa738217 100755 --- a/tests/queries/0_stateless/02002_row_level_filter_bug.sh +++ b/tests/queries/0_stateless/02002_row_level_filter_bug.sh @@ -1,11 +1,12 @@ #!/usr/bin/env bash -# Tags: no-parallel -# Tag no-parallel: create user CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +TEST_ROLE="${CLICKHOUSE_DATABASE}_role" +TEST_USER="${CLICKHOUSE_DATABASE}_user" +TEST_POLICY="${CLICKHOUSE_DATABASE}_policy" $CLICKHOUSE_CLIENT --query "drop table if exists test_table" $CLICKHOUSE_CLIENT --query "CREATE TABLE test_table @@ -37,26 +38,26 @@ arrayJoin(['AWD','ZZZ']) as team, arrayJoin([3183,3106,0,3130,3108,3126,3109,3107,3182,3180,3129,3128,3125,3266]) as a FROM numbers(600);" -$CLICKHOUSE_CLIENT --query "DROP ROLE IF exists AWD;" -$CLICKHOUSE_CLIENT --query "create role AWD;" -$CLICKHOUSE_CLIENT --query "REVOKE ALL ON *.* FROM AWD;" +$CLICKHOUSE_CLIENT --query "DROP ROLE IF EXISTS ${TEST_ROLE};" +$CLICKHOUSE_CLIENT --query "create role ${TEST_ROLE};" +$CLICKHOUSE_CLIENT --query "REVOKE ALL ON *.* FROM ${TEST_ROLE};" -$CLICKHOUSE_CLIENT --query "DROP USER IF EXISTS AWD_user;" -$CLICKHOUSE_CLIENT --query "CREATE USER AWD_user IDENTIFIED WITH plaintext_password BY 'AWD_pwd' DEFAULT ROLE AWD;" +$CLICKHOUSE_CLIENT --query "DROP USER IF EXISTS ${TEST_USER};" +$CLICKHOUSE_CLIENT --query "CREATE USER ${TEST_USER} IDENTIFIED WITH plaintext_password BY 'AWD_pwd' DEFAULT ROLE ${TEST_ROLE};" -$CLICKHOUSE_CLIENT --query "GRANT SELECT ON test_table TO AWD;" +$CLICKHOUSE_CLIENT --query "GRANT SELECT ON test_table TO ${TEST_ROLE};" -$CLICKHOUSE_CLIENT --query "DROP ROW POLICY IF EXISTS ttt_bu_test_table_AWD ON test_table;" -$CLICKHOUSE_CLIENT --query "CREATE ROW POLICY ttt_bu_test_table_AWD ON test_table FOR SELECT USING team = 'AWD' TO AWD;" +$CLICKHOUSE_CLIENT --query "DROP ROW POLICY IF EXISTS ${TEST_POLICY} ON test_table;" +$CLICKHOUSE_CLIENT --query "CREATE ROW POLICY ${TEST_POLICY} ON test_table FOR SELECT USING team = 'AWD' TO ${TEST_ROLE};" -$CLICKHOUSE_CLIENT --user=AWD_user --password=AWD_pwd --query " +$CLICKHOUSE_CLIENT --user=${TEST_USER} --password=AWD_pwd --query " SELECT count() AS count FROM test_table WHERE t_date = '2021-07-15' AND c = 'aur' AND a=3130; " -$CLICKHOUSE_CLIENT --user=AWD_user --password=AWD_pwd --query " +$CLICKHOUSE_CLIENT --user=${TEST_USER} --password=AWD_pwd --query " SELECT team, a, @@ -70,8 +71,12 @@ GROUP BY t_date; " -$CLICKHOUSE_CLIENT --user=AWD_user --password=AWD_pwd --query " +$CLICKHOUSE_CLIENT --user=${TEST_USER} --password=AWD_pwd --query " SELECT count() AS count FROM test_table WHERE (t_date = '2021-07-15') AND (c = 'aur') AND (a = 313) " + +$CLICKHOUSE_CLIENT --query "DROP ROLE IF EXISTS ${TEST_ROLE};" +$CLICKHOUSE_CLIENT --query "DROP USER IF EXISTS ${TEST_USER};" +$CLICKHOUSE_CLIENT --query "DROP ROW POLICY IF EXISTS ${TEST_POLICY} ON test_table;" From 57b6d461418d4ad8e2d5cb579d5671abc077117e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 18:28:01 +0200 Subject: [PATCH 1762/2361] Make 01764_table_function_dictionary parallelizable --- tests/queries/0_stateless/01764_table_function_dictionary.sql | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01764_table_function_dictionary.sql b/tests/queries/0_stateless/01764_table_function_dictionary.sql index 76e7213b367..e37f8d2a290 100644 --- a/tests/queries/0_stateless/01764_table_function_dictionary.sql +++ b/tests/queries/0_stateless/01764_table_function_dictionary.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel - DROP TABLE IF EXISTS table_function_dictionary_source_table; CREATE TABLE table_function_dictionary_source_table ( @@ -18,7 +16,7 @@ CREATE DICTIONARY table_function_dictionary_test_dictionary value UInt64 DEFAULT 0 ) PRIMARY KEY id -SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_function_dictionary_source_table')) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' DATABASE currentDatabase() TABLE 'table_function_dictionary_source_table')) LAYOUT(DIRECT()); SELECT * FROM dictionary('table_function_dictionary_test_dictionary'); From 5c4f4f85036838a7f145139f36949592720a1289 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 6 Aug 2024 18:52:29 +0200 Subject: [PATCH 1763/2361] do not add to custom disk names --- src/Disks/DiskFomAST.cpp | 88 +++++++++++------------------------- src/Disks/DiskSelector.h | 2 +- src/Disks/StoragePolicy.h | 1 - src/Interpreters/Context.cpp | 5 +- 4 files changed, 29 insertions(+), 67 deletions(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index c8a4f88547f..2a5e7368de9 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -18,7 +18,6 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int BAD_ARGUMENTS; extern const int UNKNOWN_DISK; } @@ -27,8 +26,6 @@ std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string { Poco::Util::AbstractConfiguration::Keys disk_settings_keys; config->keys(disk_settings_keys); - - // Check that no settings are defined when disk from the config is referred. if (disk_settings_keys.empty()) throw Exception( @@ -36,74 +33,48 @@ std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string "Disk function has no arguments. Invalid disk description."); if (disk_settings_keys.size() == 1 && disk_settings_keys.front() == "name" && !attach) - { throw Exception( ErrorCodes::BAD_ARGUMENTS, "Disk function `{}` has to have the other arguments which describe the disk. Invalid disk description.", serialization); - } + + auto disk_settings_hash = sipHash128(serialization.data(), serialization.size()); std::string disk_name; if (config->has("name")) { disk_name = config->getString("name"); } - - if (!disk_name.empty()) - { - if (disk_name.starts_with(DiskSelector::CUSTOM_DISK_PREFIX)) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Disk name `{}` could not start with `{}`", - disk_name, DiskSelector::CUSTOM_DISK_PREFIX); - - if (auto disk = context->tryGetDisk(disk_name)) - { - /// the disk is defined by config - if (disk->isCustomDisk()) - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Disk with name `{}` already exist as a custom disk but the name does not start with `{}`", - disk_name, - DiskSelector::CUSTOM_DISK_PREFIX); - - throw Exception(ErrorCodes::BAD_ARGUMENTS, "The disk `{}` is already exist. It is impossible to redefine it.", disk_name); - } - } - - auto disk_settings_hash = sipHash128(serialization.data(), serialization.size()); - - std::string custom_disk_name; - if (disk_name.empty()) + else { /// We need a unique name for a created custom disk, but it needs to be the same /// after table is reattached or server is restarted, so take a hash of the disk /// configuration serialized ast as a disk name suffix. - custom_disk_name = toString(DiskSelector::CUSTOM_DISK_PREFIX) + "noname_" + toString(disk_settings_hash); - } - else - { - custom_disk_name = toString(DiskSelector::CUSTOM_DISK_PREFIX) + disk_name; + disk_name = DiskSelector::TMP_INTERNAL_DISK_PREFIX + toString(disk_settings_hash); } - auto result_disk = context->getOrCreateDisk(custom_disk_name, [&](const DisksMap & disks_map) -> DiskPtr { - auto disk = DiskFactory::instance().create( + + auto disk = context->getOrCreateDisk(disk_name, [&](const DisksMap & disks_map) -> DiskPtr { + auto result = DiskFactory::instance().create( disk_name, *config, /* config_path */"", context, disks_map, /* attach */attach, /* custom_disk */true); /// Mark that disk can be used without storage policy. - disk->markDiskAsCustom(disk_settings_hash); - return disk; + result->markDiskAsCustom(disk_settings_hash); + return result; }); - if (!result_disk->isCustomDisk()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Disk with name `{}` expected to be custom disk", disk_name); + if (!disk->isCustomDisk()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The disk `{}` is already exist and described by the config." + " It is impossible to redefine it.", + disk_name); - if (result_disk->getCustomDiskSettings() != disk_settings_hash && !attach) + if (disk->getCustomDiskSettings() != disk_settings_hash && !attach) throw Exception( ErrorCodes::BAD_ARGUMENTS, "The disk `{}` is already configured as a custom disk in another table. It can't be redefined with different settings.", disk_name); - if (!attach && !result_disk->isRemote()) + if (!attach && !disk->isRemote()) { static constexpr auto custom_local_disks_base_dir_in_config = "custom_local_disks_base_directory"; auto disk_path_expected_prefix = context->getConfigRef().getString(custom_local_disks_base_dir_in_config, ""); @@ -114,14 +85,14 @@ std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string "Base path for custom local disks must be defined in config file by `{}`", custom_local_disks_base_dir_in_config); - if (!pathStartsWith(result_disk->getPath(), disk_path_expected_prefix)) + if (!pathStartsWith(disk->getPath(), disk_path_expected_prefix)) throw Exception( ErrorCodes::BAD_ARGUMENTS, "Path of the custom local disk must be inside `{}` directory", disk_path_expected_prefix); } - return custom_disk_name; + return disk_name; } class DiskConfigurationFlattener @@ -168,22 +139,17 @@ std::string DiskFomAST::createCustomDisk(const ASTPtr & disk_function_ast, Conte std::string DiskFomAST::getConfigDefinedDisk(const std::string &disk_name, ContextPtr context) { - if (disk_name.starts_with(DiskSelector::CUSTOM_DISK_PREFIX)) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Disk name `{}` could not start with `{}`", - disk_name, DiskSelector::CUSTOM_DISK_PREFIX); - if (auto result = context->tryGetDisk(disk_name)) - return disk_name; + { + if (result->isCustomDisk()) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Disk name `{}` is a custom disk that is used in other table." + "That disk could not be used by a reference by other tables. The custom disk should be fully specified with a disk function.", + disk_name); - std::string custom_disk_name = DiskSelector::CUSTOM_DISK_PREFIX + disk_name; - if (auto result = context->tryGetDisk(custom_disk_name)) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Disk name `{}` is a custom disk that is used in other table." - "That disk could not be used by a reference. The custom disk should be fully specified with a disk function.", - disk_name); + return disk_name; + } throw Exception(ErrorCodes::UNKNOWN_DISK, "Unknown disk {}", disk_name); } diff --git a/src/Disks/DiskSelector.h b/src/Disks/DiskSelector.h index 0f7424460a2..e6e2c257911 100644 --- a/src/Disks/DiskSelector.h +++ b/src/Disks/DiskSelector.h @@ -20,7 +20,7 @@ using DiskSelectorPtr = std::shared_ptr; class DiskSelector { public: - static constexpr auto CUSTOM_DISK_PREFIX = "__"; + static constexpr auto TMP_INTERNAL_DISK_PREFIX = "__tmp_internal_"; explicit DiskSelector(std::unordered_set skip_types_ = {}) : skip_types(skip_types_) { } DiskSelector(const DiskSelector & from) = default; diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index e23598214b3..ccf2e2071b2 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -120,7 +120,6 @@ class StoragePolicySelector { public: static constexpr auto TMP_STORAGE_POLICY_PREFIX = "__"; - static_assert(std::string_view(DiskSelector::CUSTOM_DISK_PREFIX) == std::string_view(TMP_STORAGE_POLICY_PREFIX)); StoragePolicySelector(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, DiskSelectorPtr disks); diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 0acbef26805..30f77f799e9 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -4,7 +4,6 @@ #include #include #include -#include "Common/Logger.h" #include #include #include @@ -4432,11 +4431,9 @@ StoragePolicyPtr Context::getStoragePolicy(const String & name) const StoragePolicyPtr Context::getStoragePolicyFromDisk(const String & disk_name) const { - LOG_DEBUG(getLogger("StoragePolicy"), "getStoragePolicyFromDisk disk_name {}", disk_name); - std::lock_guard lock(shared->storage_policies_mutex); - const std::string storage_policy_name = disk_name.starts_with(DiskSelector::CUSTOM_DISK_PREFIX) ? disk_name : StoragePolicySelector::TMP_STORAGE_POLICY_PREFIX + disk_name; + const std::string storage_policy_name = StoragePolicySelector::TMP_STORAGE_POLICY_PREFIX + disk_name; auto storage_policy_selector = getStoragePolicySelector(lock); StoragePolicyPtr storage_policy = storage_policy_selector->tryGet(storage_policy_name); From f0fdba3bbeb8687e38f89d8379e70cfd39db1252 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 6 Aug 2024 19:02:49 +0200 Subject: [PATCH 1764/2361] CI: Strict job timeout 1.5h for tests, 2h for builds --- tests/ci/ci_config.py | 1 - tests/ci/ci_definitions.py | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 7a19eb6f827..f578cd8b559 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -510,7 +510,6 @@ class CI: JobNames.LIBFUZZER_TEST: JobConfig( required_builds=[BuildNames.FUZZERS], run_by_label=Tags.libFuzzer, - timeout=10800, run_command='libfuzzer_test_check.py "$CHECK_NAME"', runner_type=Runners.STYLE_CHECKER, ), diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 48847b0d7a6..69e7ed259d5 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -331,7 +331,7 @@ class JobConfig: # will be triggered for the job if omitted in CI workflow yml run_command: str = "" # job timeout, seconds - timeout: Optional[int] = None + timeout: Optional[int] = 5400 # sets number of batches for a multi-batch job num_batches: int = 1 # label that enables job in CI, if set digest isn't used @@ -420,7 +420,6 @@ class CommonJobConfigs: ), run_command='functional_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, - timeout=9000, ) STATEFUL_TEST = JobConfig( job_name_keyword="stateful", @@ -531,7 +530,6 @@ class CommonJobConfigs: docker=["clickhouse/sqllogic-test"], ), run_command="sqllogic_test.py", - timeout=10800, release_only=True, runner_type=Runners.FUNC_TESTER, ) @@ -543,7 +541,6 @@ class CommonJobConfigs: docker=["clickhouse/sqltest"], ), run_command="sqltest.py", - timeout=10800, release_only=True, runner_type=Runners.FUZZER_UNIT_TESTER, ) @@ -613,6 +610,7 @@ class CommonJobConfigs: docker=["clickhouse/binary-builder"], git_submodules=True, ), + timeout=7200, run_command="build_check.py $BUILD_NAME", runner_type=Runners.BUILDER, ) From 586b1df3ee3f683e61c92e55d85e31f163eaa5af Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Tue, 6 Aug 2024 19:38:54 +0200 Subject: [PATCH 1765/2361] Add keeper error description to the message --- src/Storages/StorageReplicatedMergeTree.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 2d826c6c2df..b85c7145a91 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -800,7 +800,8 @@ void StorageReplicatedMergeTree::createNewZooKeeperNodes() { auto res = future.get(); if (res.error != Coordination::Error::ZOK && res.error != Coordination::Error::ZNODEEXISTS) - throw Coordination::Exception(res.error, "Failed to create new nodes {} at {}", res.path_created, zookeeper_path); + throw Coordination::Exception(res.error, "Failed to create new nodes {} at {} with error {}", + res.path_created, zookeeper_path, Coordination::errorMessage(res.error)); } } From 9c92c26edcb8ac83191f8a814196aa05a5600730 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 6 Aug 2024 17:51:26 +0000 Subject: [PATCH 1766/2361] Simplify code. --- src/Storages/MergeTree/MergeTreeData.cpp | 9 +++--- .../MergeTree/registerStorageMergeTree.cpp | 13 +++++++++ src/Storages/StorageFactory.cpp | 28 ------------------- 3 files changed, 17 insertions(+), 33 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 98e1cddcf4b..dbb3e39c12e 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3216,13 +3216,12 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context /// Block the case of alter table add projection for special merge trees. if (std::any_of(commands.begin(), commands.end(), [](const AlterCommand & c) { return c.type == AlterCommand::ADD_PROJECTION; })) { - const std::unordered_set allowed_storages{"MergeTree", "ReplicatedMergeTree", "SharedMergeTree"}; - if (auto storage_name = getName(); !allowed_storages.contains(storage_name) + if (merging_params.mode != MergingParams::Mode::Ordinary && settings_from_storage->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Projection is fully supported in (Replictaed, Shared)MergeTree, but also allowed in non-throw mode with other" - " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode." - " Current storage name is {}.", storage_name); + "Projection is fully supported in {} with deduplicate_merge_projection_mode = throw. " + "Use 'drop' or 'rebuild' option of deduplicate_merge_projection_mode.", + getName()); } commands.apply(new_metadata, local_context); diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 3f0603f6900..b7887c35590 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -34,6 +34,7 @@ namespace ErrorCodes extern const int UNKNOWN_STORAGE; extern const int NO_REPLICA_NAME_GIVEN; extern const int CANNOT_EXTRACT_TABLE_STRUCTURE; + extern const int SUPPORT_IS_DISABLED; } @@ -829,6 +830,18 @@ static StoragePtr create(const StorageFactory::Arguments & args) "Floating point partition key is not supported: {}", metadata.partition_key.column_names[i]); } + if (metadata.hasProjections() && args.mode == LoadingStrictnessLevel::CREATE) + { + /// Now let's handle the merge tree family. Note we only handle in the mode of CREATE due to backward compatibility. + /// Otherwise, it would fail to start in the case of existing projections with special mergetree. + if (merging_params.mode != MergeTreeData::MergingParams::Mode::Ordinary + && storage_settings->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "Projection is fully supported in {}MergeTree with deduplicate_merge_projection_mode = throw. " + "Use 'drop' or 'rebuild' option of deduplicate_merge_projection_mode.", + merging_params.getModeName()); + } + if (arg_num != arg_cnt) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong number of engine arguments."); diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 557f53a9ada..25cb81fa5fa 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -20,7 +20,6 @@ namespace ErrorCodes extern const int FUNCTION_CANNOT_HAVE_PARAMETERS; extern const int BAD_ARGUMENTS; extern const int DATA_TYPE_CANNOT_BE_USED_IN_TABLES; - extern const int SUPPORT_IS_DISABLED; } @@ -201,33 +200,6 @@ StoragePtr StorageFactory::get( check_feature( "projections", [](StorageFeatures features) { return features.supports_projections; }); - - /// Now let's handle the merge tree family. Note we only handle in the mode of CREATE due to backward compatibility. - /// Otherwise, it would fail to start in the case of existing projections with special mergetree. - chassert(query.storage->engine); - const std::unordered_set allowed_engines{"MergeTree", "ReplicatedMergeTree", "SharedMergeTree"}; - if (auto engine_name(query.storage->engine->name); mode == LoadingStrictnessLevel::CREATE - && !allowed_engines.contains(engine_name)) - { - /// default throw mode in deduplicate_merge_projection_mode - bool projection_allowed = false; - if (auto * setting = query.storage->settings; setting != nullptr) - { - for (const auto & change : setting->changes) - { - if (change.name == "deduplicate_merge_projection_mode" && change.value != Field("throw")) - { - projection_allowed = true; - break; - } - } - } - if (!projection_allowed) - throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, - "Projection is fully supported in (Replictaed, Shared)MergeTree, but also allowed in non-throw mode with other" - " mergetree family members. Consider drop or rebuild option of deduplicate_merge_projection_mode." - " Current storage name is {}.", engine_name); - } } } } From c74460b47e23ced4562d6f5123d61042c42d2896 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 6 Aug 2024 17:53:23 +0000 Subject: [PATCH 1767/2361] Cleanup. --- src/Storages/StorageFactory.cpp | 2 -- .../0_stateless/03206_projection_merge_special_mergetree.sql | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 25cb81fa5fa..060b271d8f4 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -196,11 +196,9 @@ StoragePtr StorageFactory::get( [](StorageFeatures features) { return features.supports_skipping_indices; }); if (query.columns_list && query.columns_list->projections && !query.columns_list->projections->children.empty()) - { check_feature( "projections", [](StorageFeatures features) { return features.supports_projections; }); - } } } diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql index d3448138396..82684f754b6 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -103,4 +103,4 @@ SELECT FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'tp') AND (active = 1); -DROP TABLE tp; \ No newline at end of file +DROP TABLE tp; From bf33aabec412aa2729bfd58f3e717c5b8285acb8 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 17 Jun 2024 10:39:10 +0200 Subject: [PATCH 1768/2361] Add documentation. (cherry picked from commit 083fff6ed6ccff44b678ae3ea6af75501d9359fb) --- docs/en/engines/table-engines/index.md | 1 + .../table-engines/integrations/time-series.md | 295 ++++++++++++++++++ docs/en/interfaces/prometheus.md | 160 ++++++++++ .../settings.md | 42 --- docs/en/operations/settings/settings.md | 11 + .../table-functions/timeSeriesData.md | 28 ++ .../table-functions/timeSeriesMetrics.md | 28 ++ .../table-functions/timeSeriesTags.md | 28 ++ .../aspell-ignore/en/aspell-dict.txt | 6 + 9 files changed, 557 insertions(+), 42 deletions(-) create mode 100644 docs/en/engines/table-engines/integrations/time-series.md create mode 100644 docs/en/interfaces/prometheus.md create mode 100644 docs/en/sql-reference/table-functions/timeSeriesData.md create mode 100644 docs/en/sql-reference/table-functions/timeSeriesMetrics.md create mode 100644 docs/en/sql-reference/table-functions/timeSeriesTags.md diff --git a/docs/en/engines/table-engines/index.md b/docs/en/engines/table-engines/index.md index 5e81eacc937..20c7c511aa9 100644 --- a/docs/en/engines/table-engines/index.md +++ b/docs/en/engines/table-engines/index.md @@ -61,6 +61,7 @@ Engines in the family: - [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md) - [PostgreSQL](../../engines/table-engines/integrations/postgresql.md) - [S3Queue](../../engines/table-engines/integrations/s3queue.md) +- [TimeSeries](../../engines/table-engines/integrations/time-series.md) ### Special Engines {#special-engines} diff --git a/docs/en/engines/table-engines/integrations/time-series.md b/docs/en/engines/table-engines/integrations/time-series.md new file mode 100644 index 00000000000..4830fd61d27 --- /dev/null +++ b/docs/en/engines/table-engines/integrations/time-series.md @@ -0,0 +1,295 @@ +--- +slug: /en/engines/table-engines/special/time_series +sidebar_position: 60 +sidebar_label: TimeSeries +--- + +# TimeSeries Engine [Experimental] + +A table engine storing time series, i.e. a set of values associated with timestamps and tags (or labels): + +``` +metric_name1[tag1=value1, tag2=value2, ...] = {timestamp1: value1, timestamp2: value2, ...} +metric_name2[...] = ... +``` + +:::info +This is an experimental feature that may change in backwards-incompatible ways in the future releases. +Enable usage of the TimeSeries table engine +with [allow_experimental_time_series_table](../../../operations/settings/settings.md#allow-experimental-time-series-table) setting. +Input the command `set allow_experimental_time_series_table = 1`. +::: + +## Syntax {#syntax} + +``` sql +CREATE TABLE name [(columns)] ENGINE=TimeSeries +[SETTINGS var1=value1, ...] +[DATA db.data_table_name | DATA ENGINE data_table_engine(arguments)] +[TAGS db.tags_table_name | TAGS ENGINE tags_table_engine(arguments)] +[METRICS db.metrics_table_name | METRICS ENGINE metrics_table_engine(arguments)] +``` + +## Usage {#usage} + +It's easier to start with everything set by default (it's allowed to create a `TimeSeries` table without specifying a list of columns): + +``` sql +CREATE TABLE my_table ENGINE=TimeSeries +``` + +Then this table can be used with the following protocols (a port must be assigned in the server configuration): +- [prometheus remote-write](../../../interfaces/prometheus.md#remote-write) +- [prometheus remote-read](../../../interfaces/prometheus.md#remote-read) + +## Target tables {#target-tables} + +A `TimeSeries` table doesn't have its own data, everything is stored in its target tables. +This is similar to how a [materialized view](../../../sql-reference/statements/create/view#materialized-view) works, +with the difference that a materialized view has one target table +whereas a `TimeSeries` table has three target tables named [data]{#data-table}, [tags]{#tags-table], and [metrics]{#metrics-table}. + +The target tables can be either specified explicitly in the `CREATE TABLE` query +or the `TimeSeries` table engine can generate inner target tables automatically. + +The target tables are the following: +1. The _data_ table {#data-table} contains time series associated with some identifier. +The _data_ table must have columns: + +| Name | Mandatory? | Default type | Possible types | Description | +|---|---|---|---|---| +| `id` | [x] | `UUID` | any | Identifies a combination of a metric names and tags | +| `timestamp` | [x] | `DateTime64(3)` | `DateTime64(X)` | A time point | +| `value` | [x] | `Float64` | `Float32` or `Float64` | A value associated with the `timestamp` | + +2. The _tags_ table {#tags-table} contains identifiers calculated for each combination of a metric name and tags. +The _tags_ table must have columns: + +| Name | Mandatory? | Default type | Possible types | Description | +|---|---|---|---|---| +| `id` | [x] | `UUID` | any (must match the type of `id` in the [data]{#data-table} table) | An `id` identifies a combination of a metric name and tags. The DEFAULT expression specifies how to calculate such an identifier | +| `metric_name` | [x] | `LowCardinality(String)` | `String` or `LowCardinality(String)` | The name of a metric | +| `` | [ ] | `String` | `String` or `LowCardinality(String)` or `LowCardinality(Nullable(String))` | The value of a specific tag, the tag's name and the name of a corresponding column are specified in the [tags_to_columns](#settings) setting | +| `tags` | [x] | `Map(LowCardinality(String), String)` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Map of tags excluding the tag `__name__` containing the name of a metric and excluding tags with names enumerated in the [tags_to_columns](#settings) setting | +| `all_tags` | [ ] | `Map(String, String)` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Ephemeral column, each row is a map of all the tags excluding only the tag `__name__` containing the name of a metric. The only purpose of that column is to be used while calculating `id` | +| `min_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` or `Nullable(DateTime64(X))` | Minimum timestamp of time series with that `id`. The column is created if [store_min_time_and_max_time](#settings) is `true` | +| `max_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` or `Nullable(DateTime64(X))` | Maximum timestamp of time series with that `id`. The column is created if [store_min_time_and_max_time](#settings) is `true` | + +3. The _metrics_ table {#metrics-table} contains some information about metrics been collected, the types of those metrics and their descriptions. +The _metrics_ table must have columns: + +| Name | Mandatory? | Default type | Possible types | Description | +|---|---|---|---|---| +| `metric_family_name` | [x] | `String` | `String` or `LowCardinality(String)` | The name of a metric family | +| `type` | [x] | `String` | `String` or `LowCardinality(String)` | The type of a metric family, one of "counter", "gauge", "summary", "stateset", "histogram", "gaugehistogram" | +| `unit` | [x] | `String` | `String` or `LowCardinality(String)` | The unit used in a metric | +| `help` | [x] | `String` | `String` or `LowCardinality(String)` | The description of a metric | + +Any row inserted into a `TimeSeries` table will be in fact stored in those three target tables. +A `TimeSeries` table contains all those columns from the [data]{#data-table}, [tags]{#tags-table}, [metrics]{#metrics-table} tables. + +## Creation {#creation} + +There are multiple ways to create a table with the `TimeSeries` table engine. +The simplest statement + +``` sql +CREATE TABLE my_table ENGINE=TimeSeries +``` + +will actually create the following table (you can see that by executing `SHOW CREATE TABLE my_table`): + +``` sql +CREATE TABLE my_table +( + `id` UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)), + `timestamp` DateTime64(3), + `value` Float64, + `metric_name` LowCardinality(String), + `tags` Map(LowCardinality(String), String), + `all_tags` Map(String, String), + `min_time` Nullable(DateTime64(3)), + `max_time` Nullable(DateTime64(3)), + `metric_family_name` String, + `type` String, + `unit` String, + `help` String +) +ENGINE = TimeSeries +DATA ENGINE = MergeTree ORDER BY (id, timestamp) +DATA INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +TAGS ENGINE = AggregatingMergeTree PRIMARY KEY metric_name ORDER BY (metric_name, id) +TAGS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +METRICS ENGINE = ReplacingMergeTree ORDER BY metric_family_name +METRICS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +``` + +So the columns were generated automatically and also there are three inner UUIDs in this statement - +one per each inner target table that was created. +(Inner UUIDs are not shown normally until setting +[show_table_uuid_in_table_create_query_if_not_nil](../../../operations/settings/settings#show_table_uuid_in_table_create_query_if_not_nil) +is set.) + +Inner target tables have names like `.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, +`.inner_id.tags.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, `.inner_id.metrics.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +and each target table has columns which is a subset of the columns of the main `TimeSeries` table: + +``` sql +CREATE TABLE default.`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +( + `id` UUID, + `timestamp` DateTime64(3), + `value` Float64 +) +ENGINE = MergeTree +ORDER BY (id, timestamp) +``` + +``` sql +CREATE TABLE default.`.inner_id.tags.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +( + `id` UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)), + `metric_name` LowCardinality(String), + `tags` Map(LowCardinality(String), String), + `all_tags` Map(String, String) EPHEMERAL, + `min_time` SimpleAggregateFunction(min, Nullable(DateTime64(3))), + `max_time` SimpleAggregateFunction(max, Nullable(DateTime64(3))) +) +ENGINE = AggregatingMergeTree +PRIMARY KEY metric_name +ORDER BY (metric_name, id) +``` + +``` sql +CREATE TABLE default.`.inner_id.metrics.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +( + `metric_family_name` String, + `type` String, + `unit` String, + `help` String +) +ENGINE = ReplacingMergeTree +ORDER BY metric_family_name +``` + +## Adjusting types of columns {#adjusting-column-types} + +You can adjust the types of almost any column of the inner target tables by specifying them explicitly +while defining the main table. For example, + +``` sql +CREATE TABLE my_table +( + timestamp DateTime64(6) +) ENGINE=TimeSeries +``` + +will make the inner [data]{#data-table} table store timestamp in microseconds instead of milliseconds: + +``` sql +CREATE TABLE default.`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +( + `id` UUID, + `timestamp` DateTime64(6), + `value` Float64 +) +ENGINE = MergeTree +ORDER BY (id, timestamp) +``` + +## The `id` column {#id-column} + +The `id` column contains identifiers, every identifier is calculated for a combination of a metric name and tags. +The DEFAULT expression for the `id` column is an expression which will be used to calculate such identifiers. +Both the type of the `id` column and that expression can be adjusted by specifying them explicitly: + +``` sql +CREATE TABLE my_table +( + id UInt64 DEFAULT sipHash64(metric_name, all_tags) +) ENGINE=TimeSeries +``` + +## The `tags` and `all_tags` columns {#tags-and-all-tags} + +There are two columns containing maps of tags - `tags` and `all_tags`. In this example they mean the same, however they can be different +if setting `tags_to_columns` is used. This setting allows to specify that a specific tag should be stored in a separate column instead of storing +in a map inside the `tags` column: + +``` sql +CREATE TABLE my_table ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} +``` + +This statement will add columns +``` + `instance` String, + `job` String +``` +to the definition of both `my_table` and its inner [tags]{#tags-table} target table. In this case the `tags` column will not contain tags `instance` and `job`, +but the `all_tags` column will contain them. The `all_tags` column is ephemeral and its only purpose to be used in the DEFAULT expression +for the `id` column. + +The types of columns can be adjusted by specifying them explicitly: + +``` sql +CREATE TABLE my_table (instance LowCardinality(String), job LowCardinality(Nullable(String))) +ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} +``` + +## Table engines of inner target tables {#inner-table-engines} + +By default inner target tables use the following table engines: +- the [data]{#data-table} table uses [MergeTree](../mergetree-family/mergetree); +- the [tags]{#tags-table} table uses [AggregatingMergeTree](../mergetree-family/aggregatingmergetree) because the same data is often inserted multiple times to this table so we need a way +to remove duplicates, and also because it's required to do aggregation for columns `min_time` and `max_time`; +- the [metrics]{#metrics-table} table uses [ReplacingMergeTree](../mergetree-family/replacingmergetree) because the same data is often inserted multiple times to this table so we need a way +to remove duplicates. + +Other table engines also can be used for inner target tables if it's specified so: + +``` sql +CREATE TABLE my_table ENGINE=TimeSeries +DATA ENGINE=ReplicatedMergeTree +TAGS ENGINE=ReplicatedAggregatingMergeTree +METRICS ENGINE=ReplicatedReplacingMergeTree +``` + +## External target tables {#external-target-tables} + +It's possible to make a `TimeSeries` table use a manually created table: + +``` sql +CREATE TABLE data_for_my_table +( + `id` UUID, + `timestamp` DateTime64(3), + `value` Float64 +) +ENGINE = MergeTree +ORDER BY (id, timestamp); + +CREATE TABLE tags_for_my_table ... + +CREATE TABLE metrics_for_my_table ... + +CREATE TABLE my_table ENGINE=TimeSeries DATA data_for_my_table TAGS tags_for_my_table METRICS metrics_for_my_table; +``` + +## Settings {#settings} + +Here is a list of settings which can be specified while defining a `TimeSeries` table: + +| Name | Type | Default | Description | +|---|---|---|---| +| `tags_to_columns` | Map | {} | Map specifying which tags should be put to separate columns in the [tags]{#tags-table} table. Syntax: `{'tag1': 'column1', 'tag2' : column2, ...}` | +| `use_all_tags_column_to_generate_id` | Bool | true | When generating an expression to calculate an identifier of a time series, this flag enables using the `all_tags` column in that calculation | +| `store_min_time_and_max_time` | Bool | true | If set to true then the table will store `min_time` and `max_time` for each time series | +| `aggregate_min_time_and_max_time` | Bool | true | When creating an inner target `tags` table, this flag enables using `SimpleAggregateFunction(min, Nullable(DateTime64(3)))` instead of just `Nullable(DateTime64(3))` as the type of the `min_time` column, and the same for the `max_time` column | +| `filter_by_min_time_and_max_time` | Bool | true | If set to true then the table will use the `min_time` and `max_time` columns for filtering time series | + +# Functions {#functions} + +Here is a list of functions supporting a `TimeSeries` table as an argument: +- [timeSeriesData](../../../sql-reference/table-functions/timeSeriesData.md) +- [timeSeriesTags](../../../sql-reference/table-functions/timeSeriesTags.md) +- [timeSeriesMetrics](../../../sql-reference/table-functions/timeSeriesMetrics.md) diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md new file mode 100644 index 00000000000..75a68c59219 --- /dev/null +++ b/docs/en/interfaces/prometheus.md @@ -0,0 +1,160 @@ +--- +slug: /en/interfaces/prometheus +sidebar_position: 19 +sidebar_label: Prometheus protocols +--- + +# Prometheus protocols + +## Exposing metrics {#expose} + +:::note +ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com. +::: + +ClickHouse can expose its own metrics for scraping from Prometheus: + +```xml + + 9363 + /metrics + true + true + true + true + + +Section `` can be used to make more extended handlers. +This section is similar to [](http.md) but works for prometheus protocols: + +```xml + + 9363 + + + /metrics + + expose_metrics + true + true + true + true + + + + +``` + +Settings: + +| Name | Default | Description | +|---|---|---|---| +| `port` | none | Port for serving the exposing metrics protocol. | +| `endpoint` | `/metrics` | HTTP endpoint for scraping metrics by prometheus server. Starts with `/`. Should not be used with the `` section. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | +| `metrics` | true | Expose metrics from the [system.metrics](../operations/system-tables/metrics.md) table. | +| `asynchronous_metrics` | true | Expose current metrics values from the [system.asynchronous_metrics](../operations/system-tables/asynchronous_metrics.md) table. | +| `events` | true | Expose metrics from the [system.events](../operations/system-tables/events.md) table. | +| `errors` | true | Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../operations/system-tables/errors.md) as well. | + +Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): +```bash +curl 127.0.0.1:9363/metrics +``` + +## Remote-write protocol {#remote-write} + +ClickHouse supports the [remote-write](https://prometheus.io/docs/specs/remote_write_spec/) protocol. +Data are received by this protocol and written to a [TimeSeries](../engines/table-engines/integrations/time-series.md) table +(which should be created beforehand). + +```xml + + 9363 + + + /write + + remote_writedb_name + time_series_table
+
+
+
+
+``` + +Settings: + +| Name | Default | Description | +|---|---|---|---| +| `port` | none | Port for serving the `remote-write` protocol. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | +| `table` | none | The name of a [TimeSeries](../engines/table-engines/integrations/time-series.md) table to write data received by the `remote-write` protocol. This name can optionally contain the name of a database too. | +| `database` | none | The name of a database where the table specified in the `table` setting is located if it's not specified in the `table` setting. | + +## Remote-read protocol {#remote-read} + +ClickHouse supports the [remote-read](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/) protocol. +Data are read from a [TimeSeries](../engines/table-engines/integrations/time-series.md) table and sent via this protocol. + +```xml + + 9363 + + + /read + + remote_readdb_name + time_series_table
+
+
+
+
+``` + +Settings: + +| Name | Default | Description | +|---|---|---|---| +| `port` | none | Port for serving the `remote-read` protocol. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | +| `table` | none | The name of a [TimeSeries](../engines/table-engines/integrations/time-series.md) table to read data to send by the `remote-read` protocol. This name can optionally contain the name of a database too. | +| `database` | none | The name of a database where the table specified in the `table` setting is located if it's not specified in the `table` setting. | + +## Configuration for multiple protocols {#multiple-protocols} + +Multiple protocols can be specified together in one place: + +```xml + + 9363 + + + /metrics + + expose_metrics + true + true + true + true + + + + /write + + remote_writedb_name.time_series_table + + + + /read + + remote_readdb_name.time_series_table + + + + +``` diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index a1e3c292b04..68f61650e00 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -2112,48 +2112,6 @@ The trailing slash is mandatory. /var/lib/clickhouse/ ``` -## Prometheus {#prometheus} - -:::note -ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com. -::: - -Exposing metrics data for scraping from [Prometheus](https://prometheus.io). - -Settings: - -- `endpoint` – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. -- `port` – Port for `endpoint`. -- `metrics` – Expose metrics from the [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) table. -- `events` – Expose metrics from the [system.events](../../operations/system-tables/events.md#system_tables-events) table. -- `asynchronous_metrics` – Expose current metrics values from the [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) table. -- `errors` - Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../../operations/system-tables/asynchronous_metrics.md#system_tables-errors) as well. - -**Example** - -``` xml - - 0.0.0.0 - 8123 - 9000 - - - /metrics - 9363 - true - true - true - true - - - -``` - -Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): -```bash -curl 127.0.0.1:9363/metrics -``` - ## query_log {#query-log} Setting for logging queries received with the [log_queries=1](../../operations/settings/settings.md) setting. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 35547c3a9a6..feac12f9c99 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5614,3 +5614,14 @@ Default value: `1GiB`. Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries. Default value: `false`. + +## allow_experimental_time_series_table {#allow-experimental-time-series-table} + +Allows creation of tables with the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine. + +Possible values: + +- 0 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is disabled. +- 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled. + +Default value: `0`. diff --git a/docs/en/sql-reference/table-functions/timeSeriesData.md b/docs/en/sql-reference/table-functions/timeSeriesData.md new file mode 100644 index 00000000000..aa7a9d30c2a --- /dev/null +++ b/docs/en/sql-reference/table-functions/timeSeriesData.md @@ -0,0 +1,28 @@ +--- +slug: /en/sql-reference/table-functions/timeSeriesData +sidebar_position: 145 +sidebar_label: timeSeriesData +--- + +# timeSeriesData + +`timeSeriesData(db_name.time_series_table)` - Returns the [data](../../engines/table-engines/integrations/time-series.md#data-table) table +used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries DATA data_table +``` + +The function also works if the _data_ table is inner: + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries DATA INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +``` + +The following queries are equivalent: + +``` sql +SELECT * FROM timeSeriesData(db_name.time_series_table); +SELECT * FROM timeSeriesData('db_name.time_series_table'); +SELECT * FROM timeSeriesData('db_name', 'time_series_table'); +``` diff --git a/docs/en/sql-reference/table-functions/timeSeriesMetrics.md b/docs/en/sql-reference/table-functions/timeSeriesMetrics.md new file mode 100644 index 00000000000..913f1185bca --- /dev/null +++ b/docs/en/sql-reference/table-functions/timeSeriesMetrics.md @@ -0,0 +1,28 @@ +--- +slug: /en/sql-reference/table-functions/timeSeriesMetrics +sidebar_position: 145 +sidebar_label: timeSeriesMetrics +--- + +# timeSeriesMetrics + +`timeSeriesMetrics(db_name.time_series_table)` - Returns the [metrics](../../engines/table-engines/integrations/time-series.md#metrics-table) table +used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries METRICS metrics_table +``` + +The function also works if the _metrics_ table is inner: + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries METRICS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +``` + +The following queries are equivalent: + +``` sql +SELECT * FROM timeSeriesMetrics(db_name.time_series_table); +SELECT * FROM timeSeriesMetrics('db_name.time_series_table'); +SELECT * FROM timeSeriesMetrics('db_name', 'time_series_table'); +``` diff --git a/docs/en/sql-reference/table-functions/timeSeriesTags.md b/docs/en/sql-reference/table-functions/timeSeriesTags.md new file mode 100644 index 00000000000..663a7dc6ac8 --- /dev/null +++ b/docs/en/sql-reference/table-functions/timeSeriesTags.md @@ -0,0 +1,28 @@ +--- +slug: /en/sql-reference/table-functions/timeSeriesTags +sidebar_position: 145 +sidebar_label: timeSeriesTags +--- + +# timeSeriesTags + +`timeSeriesTags(db_name.time_series_table)` - Returns the [tags](../../engines/table-engines/integrations/time-series.md#tags-table) table +used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries TAGS tags_table +``` + +The function also works if the _tags_ table is inner: + +``` sql +CREATE TABLE db_name.time_series_table ENGINE=TimeSeries TAGS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' +``` + +The following queries are equivalent: + +``` sql +SELECT * FROM timeSeriesTags(db_name.time_series_table); +SELECT * FROM timeSeriesTags('db_name.time_series_table'); +SELECT * FROM timeSeriesTags('db_name', 'time_series_table'); +``` diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 8a9a8d2e76c..382e64f343c 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1672,6 +1672,7 @@ fuzzQuery fuzzer fuzzers gRPC +gaugehistogram gccMurmurHash gcem generateRandom @@ -2556,6 +2557,7 @@ startsWithUTF startswith statbox stateful +stateset stddev stddevPop stddevPopStable @@ -2687,6 +2689,10 @@ themself threadpool throwIf timeDiff +TimeSeries +timeSeriesData +timeSeriesMetrics +timeSeriesTags timeSlot timeSlots timeZone From 60175f80a9bb3e4fea46a2f8dc8d15595d987b85 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 6 Aug 2024 20:19:15 +0200 Subject: [PATCH 1769/2361] Revert "Add documentation." This reverts commit 083fff6ed6ccff44b678ae3ea6af75501d9359fb. --- docs/en/engines/table-engines/index.md | 1 - .../table-engines/integrations/time-series.md | 299 ------------------ docs/en/interfaces/prometheus.md | 161 ---------- .../settings.md | 42 +++ docs/en/operations/settings/settings.md | 11 - .../table-functions/timeSeriesData.md | 28 -- .../table-functions/timeSeriesMetrics.md | 28 -- .../table-functions/timeSeriesTags.md | 28 -- .../aspell-ignore/en/aspell-dict.txt | 6 - 9 files changed, 42 insertions(+), 562 deletions(-) delete mode 100644 docs/en/engines/table-engines/integrations/time-series.md delete mode 100644 docs/en/interfaces/prometheus.md delete mode 100644 docs/en/sql-reference/table-functions/timeSeriesData.md delete mode 100644 docs/en/sql-reference/table-functions/timeSeriesMetrics.md delete mode 100644 docs/en/sql-reference/table-functions/timeSeriesTags.md diff --git a/docs/en/engines/table-engines/index.md b/docs/en/engines/table-engines/index.md index 20c7c511aa9..5e81eacc937 100644 --- a/docs/en/engines/table-engines/index.md +++ b/docs/en/engines/table-engines/index.md @@ -61,7 +61,6 @@ Engines in the family: - [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md) - [PostgreSQL](../../engines/table-engines/integrations/postgresql.md) - [S3Queue](../../engines/table-engines/integrations/s3queue.md) -- [TimeSeries](../../engines/table-engines/integrations/time-series.md) ### Special Engines {#special-engines} diff --git a/docs/en/engines/table-engines/integrations/time-series.md b/docs/en/engines/table-engines/integrations/time-series.md deleted file mode 100644 index 2914bf4bf37..00000000000 --- a/docs/en/engines/table-engines/integrations/time-series.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -slug: /en/engines/table-engines/special/time_series -sidebar_position: 60 -sidebar_label: TimeSeries ---- - -# TimeSeries Engine [Experimental] - -A table engine storing time series, i.e. a set of values associated with timestamps and tags (or labels): - -```text -metric_name1[tag1=value1, tag2=value2, ...] = {timestamp1: value1, timestamp2: value2, ...} -metric_name2[...] = ... -``` - -:::info -This is an experimental feature that may change in backwards-incompatible ways in the future releases. -Enable usage of the TimeSeries table engine -with [allow_experimental_time_series_table](../../../operations/settings/settings.md#allow-experimental-time-series-table) setting. -Input the command `set allow_experimental_time_series_table = 1`. -::: - -## Syntax {#syntax} - -``` sql -CREATE TABLE name [(columns)] ENGINE=TimeSeries -[SETTINGS var1=value1, ...] -[DATA db.data_table_name | DATA ENGINE data_table_engine(arguments)] -[TAGS db.tags_table_name | TAGS ENGINE tags_table_engine(arguments)] -[METRICS db.metrics_table_name | METRICS ENGINE metrics_table_engine(arguments)] -``` - -## Usage {#usage} - -It's easier to start with everything set by default (it's allowed to create a `TimeSeries` table without specifying a list of columns): - -``` sql -CREATE TABLE my_table ENGINE=TimeSeries -``` - -Then this table can be used with the following protocols (a port must be assigned in the server configuration): - -- [prometheus remote-write](../../../interfaces/prometheus.md#remote-write) -- [prometheus remote-read](../../../interfaces/prometheus.md#remote-read) - -## Target tables {#target-tables} - -A `TimeSeries` table doesn't have its own data, everything is stored in its target tables. -This is similar to how a [materialized view](../../../sql-reference/statements/create/view#materialized-view) works, -with the difference that a materialized view has one target table -whereas a `TimeSeries` table has three target tables named [data]{#data-table}, [tags]{#tags-table], and [metrics]{#metrics-table}. - -The target tables can be either specified explicitly in the `CREATE TABLE` query -or the `TimeSeries` table engine can generate inner target tables automatically. - -The target tables are the following: - -1. The _data_ table {#data-table} contains time series associated with some identifier. -The _data_ table must have columns: - -| Name | Mandatory? | Default type | Possible types | Description | -|---|---|---|---|---| -| `id` | [x] | `UUID` | any | Identifies a combination of a metric names and tags | -| `timestamp` | [x] | `DateTime64(3)` | `DateTime64(X)` | A time point | -| `value` | [x] | `Float64` | `Float32` or `Float64` | A value associated with the `timestamp` | - -2. The _tags_ table {#tags-table} contains identifiers calculated for each combination of a metric name and tags. -The _tags_ table must have columns: - -| Name | Mandatory? | Default type | Possible types | Description | -|---|---|---|---|---| -| `id` | [x] | `UUID` | any (must match the type of `id` in the [data]{#data-table} table) | An `id` identifies a combination of a metric name and tags. The DEFAULT expression specifies how to calculate such an identifier | -| `metric_name` | [x] | `LowCardinality(String)` | `String` or `LowCardinality(String)` | The name of a metric | -| `` | [ ] | `String` | `String` or `LowCardinality(String)` or `LowCardinality(Nullable(String))` | The value of a specific tag, the tag's name and the name of a corresponding column are specified in the [tags_to_columns](#settings) setting | -| `tags` | [x] | `Map(LowCardinality(String), String)` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Map of tags excluding the tag `__name__` containing the name of a metric and excluding tags with names enumerated in the [tags_to_columns](#settings) setting | -| `all_tags` | [ ] | `Map(String, LowCardinality(String))` | `Map(String, String)` or `Map(LowCardinality(String), String)` or `Map(LowCardinality(String), LowCardinality(String))` | Ephemeral column, each row is a map of all the tags excluding only the tag `__name__` containing the name of a metric. The only purpose of that column is to be used while calculating `id` | -| `min_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` or `Nullable(DateTime64(X))` | Minimum timestamp of time series with that `id`. The column is created if [store_min_time_and_max_time](#settings) is `true` | -| `max_time` | [ ] | `Nullable(DateTime64(3))` | `DateTime64(X)` or `Nullable(DateTime64(X))` | Maximum timestamp of time series with that `id`. The column is created if [store_min_time_and_max_time](#settings) is `true` | - -3. The _metrics_ table {#metrics-table} contains some information about metrics been collected, the types of those metrics and their descriptions. -The _metrics_ table must have columns: - -| Name | Mandatory? | Default type | Possible types | Description | -|---|---|---|---|---| -| `metric_family_name` | [x] | `String` | `String` or `LowCardinality(String)` | The name of a metric family | -| `type` | [x] | `String` | `String` or `LowCardinality(String)` | The type of a metric family, one of "counter", "gauge", "summary", "stateset", "histogram", "gaugehistogram" | -| `unit` | [x] | `String` | `String` or `LowCardinality(String)` | The unit used in a metric | -| `help` | [x] | `String` | `String` or `LowCardinality(String)` | The description of a metric | - -Any row inserted into a `TimeSeries` table will be in fact stored in those three target tables. -A `TimeSeries` table contains all those columns from the [data]{#data-table}, [tags]{#tags-table}, [metrics]{#metrics-table} tables. - -## Creation {#creation} - -There are multiple ways to create a table with the `TimeSeries` table engine. -The simplest statement - -``` sql -CREATE TABLE my_table ENGINE=TimeSeries -``` - -will actually create the following table (you can see that by executing `SHOW CREATE TABLE my_table`): - -``` sql -CREATE TABLE my_table -( - `id` UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)), - `timestamp` DateTime64(3), - `value` Float64, - `metric_name` LowCardinality(String), - `tags` Map(LowCardinality(String), String), - `all_tags` Map(String, String), - `min_time` Nullable(DateTime64(3)), - `max_time` Nullable(DateTime64(3)), - `metric_family_name` String, - `type` String, - `unit` String, - `help` String -) -ENGINE = TimeSeries -DATA ENGINE = MergeTree ORDER BY (id, timestamp) -DATA INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -TAGS ENGINE = AggregatingMergeTree PRIMARY KEY metric_name ORDER BY (metric_name, id) -TAGS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -METRICS ENGINE = ReplacingMergeTree ORDER BY metric_family_name -METRICS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -``` - -So the columns were generated automatically and also there are three inner UUIDs in this statement - -one per each inner target table that was created. -(Inner UUIDs are not shown normally until setting -[show_table_uuid_in_table_create_query_if_not_nil](../../../operations/settings/settings#show_table_uuid_in_table_create_query_if_not_nil) -is set.) - -Inner target tables have names like `.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, -`.inner_id.tags.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, `.inner_id.metrics.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -and each target table has columns which is a subset of the columns of the main `TimeSeries` table: - -``` sql -CREATE TABLE default.`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -( - `id` UUID, - `timestamp` DateTime64(3), - `value` Float64 -) -ENGINE = MergeTree -ORDER BY (id, timestamp) -``` - -``` sql -CREATE TABLE default.`.inner_id.tags.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -( - `id` UUID DEFAULT reinterpretAsUUID(sipHash128(metric_name, all_tags)), - `metric_name` LowCardinality(String), - `tags` Map(LowCardinality(String), String), - `all_tags` Map(String, String) EPHEMERAL, - `min_time` SimpleAggregateFunction(min, Nullable(DateTime64(3))), - `max_time` SimpleAggregateFunction(max, Nullable(DateTime64(3))) -) -ENGINE = AggregatingMergeTree -PRIMARY KEY metric_name -ORDER BY (metric_name, id) -``` - -``` sql -CREATE TABLE default.`.inner_id.metrics.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -( - `metric_family_name` String, - `type` String, - `unit` String, - `help` String -) -ENGINE = ReplacingMergeTree -ORDER BY metric_family_name -``` - -## Adjusting types of columns {#adjusting-column-types} - -You can adjust the types of almost any column of the inner target tables by specifying them explicitly -while defining the main table. For example, - -``` sql -CREATE TABLE my_table -( - timestamp DateTime64(6) -) ENGINE=TimeSeries -``` - -will make the inner [data]{#data-table} table store timestamp in microseconds instead of milliseconds: - -``` sql -CREATE TABLE default.`.inner_id.data.xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -( - `id` UUID, - `timestamp` DateTime64(6), - `value` Float64 -) -ENGINE = MergeTree -ORDER BY (id, timestamp) -``` - -## The `id` column {#id-column} - -The `id` column contains identifiers, every identifier is calculated for a combination of a metric name and tags. -The DEFAULT expression for the `id` column is an expression which will be used to calculate such identifiers. -Both the type of the `id` column and that expression can be adjusted by specifying them explicitly: - -``` sql -CREATE TABLE my_table -( - id UInt64 DEFAULT sipHash64(metric_name, all_tags) -) ENGINE=TimeSeries -``` - -## The `tags` and `all_tags` columns {#tags-and-all-tags} - -There are two columns containing maps of tags - `tags` and `all_tags`. In this example they mean the same, however they can be different -if setting `tags_to_columns` is used. This setting allows you to specify that a specific tag should be stored in a separate column instead of storing -it in a map inside the `tags` column: - -``` sql -CREATE TABLE my_table ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} -``` - -This statement will add columns -``` - `instance` String, - `job` String -``` -to the definition of both `my_table` and its inner [tags]{#tags-table} target table. In this case the `tags` column will not contain tags `instance` and `job`, -but the `all_tags` column will contain them. The `all_tags` column is ephemeral and its only purpose to be used in the DEFAULT expression -for the `id` column. - -The types of columns can be adjusted by specifying them explicitly: - -``` sql -CREATE TABLE my_table (instance LowCardinality(String), job LowCardinality(Nullable(String))) -ENGINE=TimeSeries SETTINGS = {'instance': 'instance', 'job': 'job'} -``` - -## Table engines of inner target tables {#inner-table-engines} - -By default inner target tables use the following table engines: - -- the [data]{#data-table} table uses [MergeTree](../mergetree-family/mergetree); -- the [tags]{#tags-table} table uses [AggregatingMergeTree](../mergetree-family/aggregatingmergetree) because the same data is often inserted multiple times to this table so we need a way -to remove duplicates, and also because it's required to do aggregation for columns `min_time` and `max_time`; -- the [metrics]{#metrics-table} table uses [ReplacingMergeTree](../mergetree-family/replacingmergetree) because the same data is often inserted multiple times to this table so we need a way -to remove duplicates. - -Other table engines also can be used for inner target tables if it's specified so: - -``` sql -CREATE TABLE my_table ENGINE=TimeSeries -DATA ENGINE=ReplicatedMergeTree -TAGS ENGINE=ReplicatedAggregatingMergeTree -METRICS ENGINE=ReplicatedReplacingMergeTree -``` - -## External target tables {#external-target-tables} - -It's possible to make a `TimeSeries` table use a manually created table: - -``` sql -CREATE TABLE data_for_my_table -( - `id` UUID, - `timestamp` DateTime64(3), - `value` Float64 -) -ENGINE = MergeTree -ORDER BY (id, timestamp); - -CREATE TABLE tags_for_my_table ... - -CREATE TABLE metrics_for_my_table ... - -CREATE TABLE my_table ENGINE=TimeSeries DATA data_for_my_table TAGS tags_for_my_table METRICS metrics_for_my_table; -``` - -## Settings {#settings} - -Here is a list of settings which can be specified while defining a `TimeSeries` table: - -| Name | Type | Default | Description | -|---|---|---|---| -| `tags_to_columns` | Map | {} | Map specifying which tags should be put to separate columns in the [tags]{#tags-table} table. Syntax: `{'tag1': 'column1', 'tag2' : column2, ...}` | -| `use_all_tags_column_to_generate_id` | Bool | true | When generating an expression to calculate an identifier of a time series, this flag enables using the `all_tags` column in that calculation | -| `store_min_time_and_max_time` | Bool | true | If set to true then the table will store `min_time` and `max_time` for each time series | -| `aggregate_min_time_and_max_time` | Bool | true | When creating an inner target `tags` table, this flag enables using `SimpleAggregateFunction(min, Nullable(DateTime64(3)))` instead of just `Nullable(DateTime64(3))` as the type of the `min_time` column, and the same for the `max_time` column | -| `filter_by_min_time_and_max_time` | Bool | true | If set to true then the table will use the `min_time` and `max_time` columns for filtering time series | - -## Functions {#functions} - -Here is a list of functions supporting a `TimeSeries` table as an argument: - -- [timeSeriesData](../../../sql-reference/table-functions/timeSeriesData.md) -- [timeSeriesTags](../../../sql-reference/table-functions/timeSeriesTags.md) -- [timeSeriesMetrics](../../../sql-reference/table-functions/timeSeriesMetrics.md) diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md deleted file mode 100644 index 5eac99f685e..00000000000 --- a/docs/en/interfaces/prometheus.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -slug: /en/interfaces/prometheus -sidebar_position: 19 -sidebar_label: Prometheus protocols ---- - -# Prometheus protocols - -## Exposing metrics {#expose} - -:::note -ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com. -::: - -ClickHouse can expose its own metrics for scraping from Prometheus: - -```xml - - 9363 - /metrics - true - true - true - true - - -Section `` can be used to make more extended handlers. -This section is similar to [](http.md) but works for prometheus protocols: - -```xml - - 9363 - - - /metrics - - expose_metrics - true - true - true - true - - - - -``` - -Settings: - -| Name | Default | Description | -|---|---|---|---| -| `port` | none | Port for serving the exposing metrics protocol. | -| `endpoint` | `/metrics` | HTTP endpoint for scraping metrics by prometheus server. Starts with `/`. Should not be used with the `` section. | -| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | -| `metrics` | true | Expose metrics from the [system.metrics](../operations/system-tables/metrics.md) table. | -| `asynchronous_metrics` | true | Expose current metrics values from the [system.asynchronous_metrics](../operations/system-tables/asynchronous_metrics.md) table. | -| `events` | true | Expose metrics from the [system.events](../operations/system-tables/events.md) table. | -| `errors` | true | Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../operations/system-tables/errors.md) as well. | - -Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): - -```bash -curl 127.0.0.1:9363/metrics -``` - -## Remote-write protocol {#remote-write} - -ClickHouse supports the [remote-write](https://prometheus.io/docs/specs/remote_write_spec/) protocol. -Data are received by this protocol and written to a [TimeSeries](../engines/table-engines/integrations/time-series.md) table -(which should be created beforehand). - -```xml - - 9363 - - - /write - - remote_writedb_name - time_series_table
-
-
-
-
-``` - -Settings: - -| Name | Default | Description | -|---|---|---|---| -| `port` | none | Port for serving the `remote-write` protocol. | -| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | -| `table` | none | The name of a [TimeSeries](../engines/table-engines/integrations/time-series.md) table to write data received by the `remote-write` protocol. This name can optionally contain the name of a database too. | -| `database` | none | The name of a database where the table specified in the `table` setting is located if it's not specified in the `table` setting. | - -## Remote-read protocol {#remote-read} - -ClickHouse supports the [remote-read](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/) protocol. -Data are read from a [TimeSeries](../engines/table-engines/integrations/time-series.md) table and sent via this protocol. - -```xml - - 9363 - - - /read - - remote_readdb_name - time_series_table
-
-
-
-
-``` - -Settings: - -| Name | Default | Description | -|---|---|---|---| -| `port` | none | Port for serving the `remote-read` protocol. | -| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | -| `table` | none | The name of a [TimeSeries](../engines/table-engines/integrations/time-series.md) table to read data to send by the `remote-read` protocol. This name can optionally contain the name of a database too. | -| `database` | none | The name of a database where the table specified in the `table` setting is located if it's not specified in the `table` setting. | - -## Configuration for multiple protocols {#multiple-protocols} - -Multiple protocols can be specified together in one place: - -```xml - - 9363 - - - /metrics - - expose_metrics - true - true - true - true - - - - /write - - remote_writedb_name.time_series_table - - - - /read - - remote_readdb_name.time_series_table - - - - -``` diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 68f61650e00..a1e3c292b04 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -2112,6 +2112,48 @@ The trailing slash is mandatory. /var/lib/clickhouse/ ``` +## Prometheus {#prometheus} + +:::note +ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com. +::: + +Exposing metrics data for scraping from [Prometheus](https://prometheus.io). + +Settings: + +- `endpoint` – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. +- `port` – Port for `endpoint`. +- `metrics` – Expose metrics from the [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) table. +- `events` – Expose metrics from the [system.events](../../operations/system-tables/events.md#system_tables-events) table. +- `asynchronous_metrics` – Expose current metrics values from the [system.asynchronous_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) table. +- `errors` - Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../../operations/system-tables/asynchronous_metrics.md#system_tables-errors) as well. + +**Example** + +``` xml + + 0.0.0.0 + 8123 + 9000 + + + /metrics + 9363 + true + true + true + true + + + +``` + +Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): +```bash +curl 127.0.0.1:9363/metrics +``` + ## query_log {#query-log} Setting for logging queries received with the [log_queries=1](../../operations/settings/settings.md) setting. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index feac12f9c99..35547c3a9a6 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5614,14 +5614,3 @@ Default value: `1GiB`. Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries. Default value: `false`. - -## allow_experimental_time_series_table {#allow-experimental-time-series-table} - -Allows creation of tables with the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine. - -Possible values: - -- 0 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is disabled. -- 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled. - -Default value: `0`. diff --git a/docs/en/sql-reference/table-functions/timeSeriesData.md b/docs/en/sql-reference/table-functions/timeSeriesData.md deleted file mode 100644 index aa7a9d30c2a..00000000000 --- a/docs/en/sql-reference/table-functions/timeSeriesData.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -slug: /en/sql-reference/table-functions/timeSeriesData -sidebar_position: 145 -sidebar_label: timeSeriesData ---- - -# timeSeriesData - -`timeSeriesData(db_name.time_series_table)` - Returns the [data](../../engines/table-engines/integrations/time-series.md#data-table) table -used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): - -``` sql -CREATE TABLE db_name.time_series_table ENGINE=TimeSeries DATA data_table -``` - -The function also works if the _data_ table is inner: - -``` sql -CREATE TABLE db_name.time_series_table ENGINE=TimeSeries DATA INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -``` - -The following queries are equivalent: - -``` sql -SELECT * FROM timeSeriesData(db_name.time_series_table); -SELECT * FROM timeSeriesData('db_name.time_series_table'); -SELECT * FROM timeSeriesData('db_name', 'time_series_table'); -``` diff --git a/docs/en/sql-reference/table-functions/timeSeriesMetrics.md b/docs/en/sql-reference/table-functions/timeSeriesMetrics.md deleted file mode 100644 index 913f1185bca..00000000000 --- a/docs/en/sql-reference/table-functions/timeSeriesMetrics.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -slug: /en/sql-reference/table-functions/timeSeriesMetrics -sidebar_position: 145 -sidebar_label: timeSeriesMetrics ---- - -# timeSeriesMetrics - -`timeSeriesMetrics(db_name.time_series_table)` - Returns the [metrics](../../engines/table-engines/integrations/time-series.md#metrics-table) table -used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): - -``` sql -CREATE TABLE db_name.time_series_table ENGINE=TimeSeries METRICS metrics_table -``` - -The function also works if the _metrics_ table is inner: - -``` sql -CREATE TABLE db_name.time_series_table ENGINE=TimeSeries METRICS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -``` - -The following queries are equivalent: - -``` sql -SELECT * FROM timeSeriesMetrics(db_name.time_series_table); -SELECT * FROM timeSeriesMetrics('db_name.time_series_table'); -SELECT * FROM timeSeriesMetrics('db_name', 'time_series_table'); -``` diff --git a/docs/en/sql-reference/table-functions/timeSeriesTags.md b/docs/en/sql-reference/table-functions/timeSeriesTags.md deleted file mode 100644 index 663a7dc6ac8..00000000000 --- a/docs/en/sql-reference/table-functions/timeSeriesTags.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -slug: /en/sql-reference/table-functions/timeSeriesTags -sidebar_position: 145 -sidebar_label: timeSeriesTags ---- - -# timeSeriesTags - -`timeSeriesTags(db_name.time_series_table)` - Returns the [tags](../../engines/table-engines/integrations/time-series.md#tags-table) table -used by table `db_name.time_series_table` which table engine is [TimeSeries](../../engines/table-engines/integrations/time-series.md): - -``` sql -CREATE TABLE db_name.time_series_table ENGINE=TimeSeries TAGS tags_table -``` - -The function also works if the _tags_ table is inner: - -``` sql -CREATE TABLE db_name.time_series_table ENGINE=TimeSeries TAGS INNER UUID '01234567-89ab-cdef-0123-456789abcdef' -``` - -The following queries are equivalent: - -``` sql -SELECT * FROM timeSeriesTags(db_name.time_series_table); -SELECT * FROM timeSeriesTags('db_name.time_series_table'); -SELECT * FROM timeSeriesTags('db_name', 'time_series_table'); -``` diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 382e64f343c..8a9a8d2e76c 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1672,7 +1672,6 @@ fuzzQuery fuzzer fuzzers gRPC -gaugehistogram gccMurmurHash gcem generateRandom @@ -2557,7 +2556,6 @@ startsWithUTF startswith statbox stateful -stateset stddev stddevPop stddevPopStable @@ -2689,10 +2687,6 @@ themself threadpool throwIf timeDiff -TimeSeries -timeSeriesData -timeSeriesMetrics -timeSeriesTags timeSlot timeSlots timeZone From d09eaac0dae0c937fa652a4973263f6c1dc18028 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 19:34:56 +0200 Subject: [PATCH 1770/2361] Make 00965_shard_unresolvable_addresses.sql faster and parallelizable --- .../0_stateless/00965_shard_unresolvable_addresses.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql index 16b62c37d80..41bf4d261f6 100644 --- a/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql +++ b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql @@ -1,9 +1,9 @@ --- Tags: shard, no-parallel +-- Tags: shard SET prefer_localhost_replica = 1; +SET connections_with_failover_max_tries=1; +SET connect_timeout_with_failover_ms=2000; +SET connect_timeout_with_failover_secure_ms=2000; SELECT count() FROM remote('127.0.0.1,localhos', system.one); -- { serverError ALL_CONNECTION_TRIES_FAILED } SELECT count() FROM remote('127.0.0.1|localhos', system.one); - --- Clear cache to avoid future errors in the logs -SYSTEM DROP DNS CACHE From 71d47d2d07c29e99193f2c5d454ba6967a43ec48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 19:35:15 +0200 Subject: [PATCH 1771/2361] Disable 02434_cancel_insert_when_client_dies in fast tests --- .../0_stateless/02434_cancel_insert_when_client_dies.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh b/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh index dca8dae22c3..1548bef857f 100755 --- a/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh +++ b/tests/queries/0_stateless/02434_cancel_insert_when_client_dies.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-random-settings, no-asan, no-msan, no-tsan, no-debug +# Tags: no-random-settings, no-asan, no-msan, no-tsan, no-debug, no-fasttest +# no-fasttest: The test runs for 40 seconds # shellcheck disable=SC2009 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) From ebc5b260abbfb0b87d1c551d4c8c67b010858c0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 19:35:32 +0200 Subject: [PATCH 1772/2361] Disable 02447_drop_database_replica in slow tests --- tests/queries/0_stateless/02447_drop_database_replica.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02447_drop_database_replica.sh b/tests/queries/0_stateless/02447_drop_database_replica.sh index c6bf298f944..abe99398a56 100755 --- a/tests/queries/0_stateless/02447_drop_database_replica.sh +++ b/tests/queries/0_stateless/02447_drop_database_replica.sh @@ -1,8 +1,9 @@ #!/usr/bin/env bash -# Tags: no-parallel +# Tags: no-parallel, no-fasttest # no-parallel: This test is not parallel because when we execute system-wide SYSTEM DROP REPLICA, # other tests might shut down the storage in parallel and the test will fail. +# no-fasttest: It has several tests with timeouts for inactive replicas CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 64e33c510a7cc80bb771597a0ddcc11b06fa4ac0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 19:35:52 +0200 Subject: [PATCH 1773/2361] Disable 02998_primary_key_skip_columns in fast tests --- tests/queries/0_stateless/02998_primary_key_skip_columns.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql index ee558996b52..1abe692a7a4 100644 --- a/tests/queries/0_stateless/02998_primary_key_skip_columns.sql +++ b/tests/queries/0_stateless/02998_primary_key_skip_columns.sql @@ -1,4 +1,5 @@ --- Tags: no-asan, no-tsan, no-msan, no-ubsan, no-random-settings, no-debug +-- Tags: no-asan, no-tsan, no-msan, no-ubsan, no-random-settings, no-debug, no-fasttest +-- no-fasttest: Low index granularity and too many parts makes the test slow DROP TABLE IF EXISTS test; From 401d4348b5ed5666a93dfb9cfe36d093bce9c908 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 19:49:06 +0200 Subject: [PATCH 1774/2361] Remove waiting for mutations from fast tests --- .../0_stateless/00834_kill_mutation_replicated_zookeeper.sh | 3 ++- .../0_stateless/01414_mutations_and_errors_zookeeper.sh | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh index 16ad08deeb2..3e6b339cb57 100755 --- a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: replica, no-debug +# Tags: replica, no-debug, no-fasttest +# no-fasttest: Waiting for failed mutations is slow: https://github.com/ClickHouse/ClickHouse/issues/67936 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh index 31e2cc395aa..5cdd6057050 100755 --- a/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh +++ b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: zookeeper, no-parallel +# Tags: zookeeper, no-parallel, no-fasttest +# no-fasttest: Waiting for failed mutations is slow: https://github.com/ClickHouse/ClickHouse/issues/67936 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 015691058073c4e7587e053efbc880dc88190586 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 20:14:09 +0200 Subject: [PATCH 1775/2361] Disable another slow tests in fasttest --- tests/queries/0_stateless/01030_storage_url_syntax.sql | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/queries/0_stateless/01030_storage_url_syntax.sql b/tests/queries/0_stateless/01030_storage_url_syntax.sql index 0eb89af8462..084486b61ee 100644 --- a/tests/queries/0_stateless/01030_storage_url_syntax.sql +++ b/tests/queries/0_stateless/01030_storage_url_syntax.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: Timeout for the first query (CANNOT_DETECT_FORMAT) is too slow: https://github.com/ClickHouse/ClickHouse/issues/67939 + drop table if exists test_table_url_syntax ; create table test_table_url_syntax (id UInt32) ENGINE = URL('') From 6061f01dc0fabd8d737a7d5e13b3828f2b9c0ab7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 20:21:36 +0200 Subject: [PATCH 1776/2361] Disable more slow tests --- .../0_stateless/02044_url_glob_parallel_connection_refused.sh | 3 ++- tests/queries/0_stateless/02435_rollback_cancelled_queries.sh | 3 ++- .../queries/0_stateless/02445_replicated_db_alter_partition.sh | 3 +++ .../02581_share_big_sets_between_mutation_tasks.sql | 3 ++- tests/queries/0_stateless/02703_max_local_read_bandwidth.sh | 3 ++- .../queries/0_stateless/02805_distributed_queries_timeouts.sql | 2 ++ .../0_stateless/02994_merge_tree_mutations_cleanup.sql.j2 | 2 ++ tests/queries/0_stateless/02995_forget_partition.sh | 3 ++- 8 files changed, 17 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/02044_url_glob_parallel_connection_refused.sh b/tests/queries/0_stateless/02044_url_glob_parallel_connection_refused.sh index b4b0ee8a023..d70845f52eb 100755 --- a/tests/queries/0_stateless/02044_url_glob_parallel_connection_refused.sh +++ b/tests/queries/0_stateless/02044_url_glob_parallel_connection_refused.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: distributed +# Tags: distributed, no-fasttest +# no-fasttest: Slow wait and retries CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02435_rollback_cancelled_queries.sh b/tests/queries/0_stateless/02435_rollback_cancelled_queries.sh index ba652013a57..1bc7d4cd1d3 100755 --- a/tests/queries/0_stateless/02435_rollback_cancelled_queries.sh +++ b/tests/queries/0_stateless/02435_rollback_cancelled_queries.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-random-settings, no-ordinary-database +# Tags: no-random-settings, no-ordinary-database, no-fasttest +# no-fasttest: The test is slow (too many small blocks) # shellcheck disable=SC2009 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) diff --git a/tests/queries/0_stateless/02445_replicated_db_alter_partition.sh b/tests/queries/0_stateless/02445_replicated_db_alter_partition.sh index 4d9048354a1..f716a6aa779 100755 --- a/tests/queries/0_stateless/02445_replicated_db_alter_partition.sh +++ b/tests/queries/0_stateless/02445_replicated_db_alter_partition.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash +# Tags: no-fasttest +# no-fasttest: Slow timeouts + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks.sql b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks.sql index ea1452fc372..ad8bef7fbb7 100644 --- a/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks.sql +++ b/tests/queries/0_stateless/02581_share_big_sets_between_mutation_tasks.sql @@ -1,4 +1,5 @@ --- Tags: no-tsan, no-asan, no-ubsan, no-msan +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-fasttest +-- no-fasttest: Slow test -- no sanitizers: too slow sometimes DROP TABLE IF EXISTS 02581_trips; diff --git a/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh b/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh index 6f43c1ae869..03e0f363d71 100755 --- a/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh +++ b/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-object-storage, no-random-settings, no-random-merge-tree-settings +# Tags: no-object-storage, no-random-settings, no-random-merge-tree-settings, no-fasttest +# no-fasttest: The test is slow CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql b/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql index f6bccc99977..98aeac36243 100644 --- a/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql +++ b/tests/queries/0_stateless/02805_distributed_queries_timeouts.sql @@ -1,3 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: Timeouts are slow create table dist as system.one engine=Distributed(test_shard_localhost, system, one); select sleep(8) from dist settings function_sleep_max_microseconds_per_block=8e9, prefer_localhost_replica=0, receive_timeout=7, async_socket_for_remote=0, use_hedged_requests=1 format Null; select sleep(8) from dist settings function_sleep_max_microseconds_per_block=8e9, prefer_localhost_replica=0, receive_timeout=7, async_socket_for_remote=1, use_hedged_requests=0 format Null; diff --git a/tests/queries/0_stateless/02994_merge_tree_mutations_cleanup.sql.j2 b/tests/queries/0_stateless/02994_merge_tree_mutations_cleanup.sql.j2 index 1b9be79dbe4..65601cd39be 100644 --- a/tests/queries/0_stateless/02994_merge_tree_mutations_cleanup.sql.j2 +++ b/tests/queries/0_stateless/02994_merge_tree_mutations_cleanup.sql.j2 @@ -1,3 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: Slow wait drop table if exists data_rmt; drop table if exists data_mt; diff --git a/tests/queries/0_stateless/02995_forget_partition.sh b/tests/queries/0_stateless/02995_forget_partition.sh index 6fa0b96e90d..e9d4590cce6 100755 --- a/tests/queries/0_stateless/02995_forget_partition.sh +++ b/tests/queries/0_stateless/02995_forget_partition.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: zookeeper, no-replicated-database +# Tags: zookeeper, no-replicated-database, no-fasttest +# no-fasttest: Slow wait CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From c4fda6cd4c5b7cfa40792c742eab98aa1857fd7d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 18:26:22 +0000 Subject: [PATCH 1777/2361] Fix style --- src/Storages/Statistics/StatisticsTDigest.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index b0c4bfda27d..fd9b922ffc8 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -9,7 +9,6 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_STATISTICS; -extern const int LOGICAL_ERROR; } StatisticsTDigest::StatisticsTDigest(const SingleStatisticsDescription & description, const DataTypePtr & data_type_) From 8cfcf341aeaedd4defb5ff1b2dbb05578aceb1d6 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Tue, 6 Aug 2024 18:26:52 +0000 Subject: [PATCH 1778/2361] fix --- tests/integration/test_storage_hdfs/test.py | 23 ++++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 4aac0142026..7597fdcd229 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -610,44 +610,48 @@ def test_format_detection(started_cluster): def test_schema_inference_with_globs(started_cluster): + fs = HdfsClient(hosts=started_cluster.hdfs_ip) + dir = "/test_schema_inference_with_globs" + fs.mkdirs(dir) node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" + f"insert into table function hdfs('hdfs://hdfs1:9000{dir}/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" ) node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0" + f"insert into table function hdfs('hdfs://hdfs1:9000{dir}/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0" ) result = node1.query( - f"desc hdfs('hdfs://hdfs1:9000/data*.jsoncompacteachrow') settings input_format_json_infer_incomplete_types_as_strings=0" + f"desc hdfs('hdfs://hdfs1:9000{dir}/data*.jsoncompacteachrow') settings input_format_json_infer_incomplete_types_as_strings=0" ) assert result.strip() == "c1\tNullable(Int64)" result = node1.query( - f"select * from hdfs('hdfs://hdfs1:9000/data*.jsoncompacteachrow') settings input_format_json_infer_incomplete_types_as_strings=0" + f"select * from hdfs('hdfs://hdfs1:9000{dir}/data*.jsoncompacteachrow') settings input_format_json_infer_incomplete_types_as_strings=0" ) assert sorted(result.split()) == ["0", "\\N"] node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" + f"insert into table function hdfs('hdfs://hdfs1:9000{dir}/data3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" ) filename = "data{1,3}.jsoncompacteachrow" result = node1.query_and_get_error( - f"desc hdfs('hdfs://hdfs1:9000/{filename}') settings schema_inference_use_cache_for_hdfs=0, input_format_json_infer_incomplete_types_as_strings=0" + f"desc hdfs('hdfs://hdfs1:9000{dir}/{filename}') settings schema_inference_use_cache_for_hdfs=0, input_format_json_infer_incomplete_types_as_strings=0" ) assert "All attempts to extract table structure from files failed" in result node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]'" + f"insert into table function hdfs('hdfs://hdfs1:9000{dir}/data0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]'" ) result = node1.query_and_get_error( - f"desc hdfs('hdfs://hdfs1:9000/data*.jsoncompacteachrow') settings schema_inference_use_cache_for_hdfs=0, input_format_json_infer_incomplete_types_as_strings=0" + f"desc hdfs('hdfs://hdfs1:9000{dir}/data*.jsoncompacteachrow') settings schema_inference_use_cache_for_hdfs=0, input_format_json_infer_incomplete_types_as_strings=0" ) assert "CANNOT_EXTRACT_TABLE_STRUCTURE" in result + fs.delete(dir, recursive=True) def test_insert_select_schema_inference(started_cluster): @@ -694,6 +698,7 @@ def test_cluster_macro(started_cluster): def test_virtual_columns_2(started_cluster): hdfs_api = started_cluster.hdfs_api + fs = HdfsClient(hosts=started_cluster.hdfs_ip) table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_2', 'Parquet', 'a Int32, b String')" @@ -710,6 +715,8 @@ def test_virtual_columns_2(started_cluster): result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "kek" + fs.delete("/parquet_2") + fs.delete("/parquet_3") def check_profile_event_for_query(node, file, profile_event, amount=1): From 302bd5fdc6f077acf05a17c5e700cb44b2e609ab Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 18:37:26 +0000 Subject: [PATCH 1779/2361] Remove usearch submodule --- .gitmodules | 3 --- contrib/usearch | 1 - 2 files changed, 4 deletions(-) delete mode 160000 contrib/usearch diff --git a/.gitmodules b/.gitmodules index 7e0b4df4ad1..c4c93822711 100644 --- a/.gitmodules +++ b/.gitmodules @@ -339,9 +339,6 @@ [submodule "contrib/incbin"] path = contrib/incbin url = https://github.com/graphitemaster/incbin.git -[submodule "contrib/usearch"] - path = contrib/usearch - url = https://github.com/unum-cloud/usearch.git [submodule "contrib/SimSIMD"] path = contrib/SimSIMD url = https://github.com/ashvardanian/SimSIMD.git diff --git a/contrib/usearch b/contrib/usearch deleted file mode 160000 index 955c6f9c11a..00000000000 --- a/contrib/usearch +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 955c6f9c11adfd89c912e0d1643d160b4e9e543f From 7a5b30d955b85a164097b4c7dd5ce957a47059e5 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 18:39:18 +0000 Subject: [PATCH 1780/2361] Re-add forked usearch repo --- .gitmodules | 3 +++ contrib/usearch | 1 + 2 files changed, 4 insertions(+) create mode 160000 contrib/usearch diff --git a/.gitmodules b/.gitmodules index c4c93822711..7fdfb1103c5 100644 --- a/.gitmodules +++ b/.gitmodules @@ -339,6 +339,9 @@ [submodule "contrib/incbin"] path = contrib/incbin url = https://github.com/graphitemaster/incbin.git +[submodule "contrib/usearch"] + path = contrib/usearch + url = https://github.com/ClickHouse/usearch.git [submodule "contrib/SimSIMD"] path = contrib/SimSIMD url = https://github.com/ashvardanian/SimSIMD.git diff --git a/contrib/usearch b/contrib/usearch new file mode 160000 index 00000000000..955c6f9c11a --- /dev/null +++ b/contrib/usearch @@ -0,0 +1 @@ +Subproject commit 955c6f9c11adfd89c912e0d1643d160b4e9e543f From 9594a9baffb125e21b28cd421b511d26febfc900 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 18:43:44 +0000 Subject: [PATCH 1781/2361] Fix memory corruption in usearch --- contrib/usearch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/usearch b/contrib/usearch index 955c6f9c11a..30810452bec 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 955c6f9c11adfd89c912e0d1643d160b4e9e543f +Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356 From 36b6adbb30efdc0a96a274bdcbaeec2d961a8bed Mon Sep 17 00:00:00 2001 From: "Zhukova, Maria" Date: Tue, 6 Aug 2024 12:15:23 -0700 Subject: [PATCH 1782/2361] qpl-cmake: Better wording on ISA-L copy and removed non-required linking --- contrib/qpl-cmake/CMakeLists.txt | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/contrib/qpl-cmake/CMakeLists.txt b/contrib/qpl-cmake/CMakeLists.txt index b2f263252c2..e62612cff5a 100644 --- a/contrib/qpl-cmake/CMakeLists.txt +++ b/contrib/qpl-cmake/CMakeLists.txt @@ -24,7 +24,9 @@ message(STATUS "Intel QPL version: ${QPL_VERSION}") # which are then combined into static or shared qpl. # Output ch_contrib::qpl by linking with 8 library targets. -# Note, qpl submodule comes with its own version of isal that is not compatible with upstream isal (e.g., ch_contrib::isal). +# Note, QPL has integrated a customized version of ISA-L to meet specific needs. +# This version has been significantly modified and there are no plans to maintain compatibility with the upstream version +# or upgrade the current copy. ## cmake/CompileOptions.cmake and automatic wrappers generation @@ -733,10 +735,6 @@ target_compile_definitions(_qpl target_link_libraries(_qpl PRIVATE ch_contrib::accel-config) -# C++ filesystem library requires additional linking for older GNU/Clang -target_link_libraries(_qpl PRIVATE $<$,$,9.1>>:stdc++fs>) -target_link_libraries(_qpl PRIVATE $<$,$,9.0>>:c++fs>) - target_include_directories(_qpl SYSTEM BEFORE PUBLIC "${QPL_PROJECT_DIR}/include" PUBLIC ${UUID_DIR}) From 45b55c4d6ed5cb5f023855ac78f4097dafba7fec Mon Sep 17 00:00:00 2001 From: filimonov <1549571+filimonov@users.noreply.github.com> Date: Tue, 6 Aug 2024 22:25:08 +0200 Subject: [PATCH 1783/2361] Update comment --- CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0d862b23e3a..2e4be09f5d3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -430,7 +430,10 @@ endif() if (NOT OS_ANDROID AND OS_LINUX AND NOT ARCH_S390X AND NOT SANITIZE) # Slightly more efficient code can be generated - # Disabled for Android, because otherwise ClickHouse cannot run on Android. + # Using '-no-pie' builds executables with fixed addresses, resulting in slightly more efficient code + # and keeping binary addresses constant even with ASLR enabled. + # Disabled on Android as it requires PIE: https://source.android.com/docs/security/enhancements#android-5 + # Disabled on IBM S390X due to build issues with 'no-pie' set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie") From db2d732b2b6ee7e61a2fe5c644db7ccb718e9f0c Mon Sep 17 00:00:00 2001 From: filimonov <1549571+filimonov@users.noreply.github.com> Date: Tue, 6 Aug 2024 22:25:47 +0200 Subject: [PATCH 1784/2361] Update comment2 --- CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e4be09f5d3..afab666a733 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -429,7 +429,6 @@ if (NOT SANITIZE) endif() if (NOT OS_ANDROID AND OS_LINUX AND NOT ARCH_S390X AND NOT SANITIZE) - # Slightly more efficient code can be generated # Using '-no-pie' builds executables with fixed addresses, resulting in slightly more efficient code # and keeping binary addresses constant even with ASLR enabled. # Disabled on Android as it requires PIE: https://source.android.com/docs/security/enhancements#android-5 From 139f5e55d34092969e49e4fd191404cac0ddd05b Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 6 Aug 2024 20:31:41 +0000 Subject: [PATCH 1785/2361] Unflake 02099_tsv_raw_format.sh --- ...rence => 02099_tsv_raw_format_1.reference} | 2 -- ...aw_format.sh => 02099_tsv_raw_format_1.sh} | 13 ------------- .../02099_tsv_raw_format_2.reference | 2 ++ .../0_stateless/02099_tsv_raw_format_2.sh | 19 +++++++++++++++++++ 4 files changed, 21 insertions(+), 15 deletions(-) rename tests/queries/0_stateless/{02099_tsv_raw_format.reference => 02099_tsv_raw_format_1.reference} (94%) rename tests/queries/0_stateless/{02099_tsv_raw_format.sh => 02099_tsv_raw_format_1.sh} (69%) create mode 100644 tests/queries/0_stateless/02099_tsv_raw_format_2.reference create mode 100755 tests/queries/0_stateless/02099_tsv_raw_format_2.sh diff --git a/tests/queries/0_stateless/02099_tsv_raw_format.reference b/tests/queries/0_stateless/02099_tsv_raw_format_1.reference similarity index 94% rename from tests/queries/0_stateless/02099_tsv_raw_format.reference rename to tests/queries/0_stateless/02099_tsv_raw_format_1.reference index de46cf8dff7..3ac175e51f6 100644 --- a/tests/queries/0_stateless/02099_tsv_raw_format.reference +++ b/tests/queries/0_stateless/02099_tsv_raw_format_1.reference @@ -109,5 +109,3 @@ UInt64 String Date 2 \N nSome text -b1cad4eb4be08a40387c9de70d02fcc2 - -b1cad4eb4be08a40387c9de70d02fcc2 - diff --git a/tests/queries/0_stateless/02099_tsv_raw_format.sh b/tests/queries/0_stateless/02099_tsv_raw_format_1.sh similarity index 69% rename from tests/queries/0_stateless/02099_tsv_raw_format.sh rename to tests/queries/0_stateless/02099_tsv_raw_format_1.sh index a69c96ab613..a3468f46ca0 100755 --- a/tests/queries/0_stateless/02099_tsv_raw_format.sh +++ b/tests/queries/0_stateless/02099_tsv_raw_format_1.sh @@ -46,16 +46,3 @@ echo 'nSome text' | $CLICKHOUSE_CLIENT -q "INSERT INTO test_nullable_string_0209 $CLICKHOUSE_CLIENT -q "SELECT * FROM test_nullable_string_02099" $CLICKHOUSE_CLIENT -q "DROP TABLE test_nullable_string_02099" - - -$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_parallel_parsing_02099" -$CLICKHOUSE_CLIENT -q "CREATE TABLE test_parallel_parsing_02099 (x UInt64, a Array(UInt64), s String) ENGINE=Memory()"; -$CLICKHOUSE_CLIENT -q "SELECT number AS x, range(number % 50) AS a, toString(a) AS s FROM numbers(1000000) FORMAT TSVRaw" | $CLICKHOUSE_CLIENT --input_format_parallel_parsing=0 -q "INSERT INTO test_parallel_parsing_02099 FORMAT TSVRaw" -$CLICKHOUSE_CLIENT -q "SELECT * FROM test_parallel_parsing_02099 ORDER BY x" | md5sum - -$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_parallel_parsing_02099" - -$CLICKHOUSE_CLIENT -q "SELECT number AS x, range(number % 50) AS a, toString(a) AS s FROM numbers(1000000) FORMAT TSVRaw" | $CLICKHOUSE_CLIENT --input_format_parallel_parsing=1 -q "INSERT INTO test_parallel_parsing_02099 FORMAT TSVRaw" -$CLICKHOUSE_CLIENT -q "SELECT * FROM test_parallel_parsing_02099 ORDER BY x" | md5sum - -$CLICKHOUSE_CLIENT -q "DROP TABLE test_parallel_parsing_02099" diff --git a/tests/queries/0_stateless/02099_tsv_raw_format_2.reference b/tests/queries/0_stateless/02099_tsv_raw_format_2.reference new file mode 100644 index 00000000000..4682749c21f --- /dev/null +++ b/tests/queries/0_stateless/02099_tsv_raw_format_2.reference @@ -0,0 +1,2 @@ +c8ff17885084035ea1aebd95fee2efb6 - +c8ff17885084035ea1aebd95fee2efb6 - diff --git a/tests/queries/0_stateless/02099_tsv_raw_format_2.sh b/tests/queries/0_stateless/02099_tsv_raw_format_2.sh new file mode 100755 index 00000000000..d6034a0616f --- /dev/null +++ b/tests/queries/0_stateless/02099_tsv_raw_format_2.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Tags: long + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_parallel_parsing_02099" +$CLICKHOUSE_CLIENT -q "CREATE TABLE test_parallel_parsing_02099 (x UInt64, a Array(UInt64), s String) ENGINE=Memory()"; +$CLICKHOUSE_CLIENT -q "SELECT number AS x, range(number % 50) AS a, toString(a) AS s FROM numbers(100000) FORMAT TSVRaw" | $CLICKHOUSE_CLIENT --input_format_parallel_parsing=0 -q "INSERT INTO test_parallel_parsing_02099 FORMAT TSVRaw" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_parallel_parsing_02099 ORDER BY x" | md5sum + +$CLICKHOUSE_CLIENT -q "TRUNCATE TABLE test_parallel_parsing_02099" + +$CLICKHOUSE_CLIENT -q "SELECT number AS x, range(number % 50) AS a, toString(a) AS s FROM numbers(100000) FORMAT TSVRaw" | $CLICKHOUSE_CLIENT --input_format_parallel_parsing=1 -q "INSERT INTO test_parallel_parsing_02099 FORMAT TSVRaw" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_parallel_parsing_02099 ORDER BY x" | md5sum + +$CLICKHOUSE_CLIENT -q "DROP TABLE test_parallel_parsing_02099" + From 340a2bcd2582c563c0f2eaeda0da1f32269b5253 Mon Sep 17 00:00:00 2001 From: filimonov <1549571+filimonov@users.noreply.github.com> Date: Tue, 6 Aug 2024 22:36:16 +0200 Subject: [PATCH 1786/2361] Update comment3 --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index afab666a733..7b4e0484ab1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -433,6 +433,7 @@ if (NOT OS_ANDROID AND OS_LINUX AND NOT ARCH_S390X AND NOT SANITIZE) # and keeping binary addresses constant even with ASLR enabled. # Disabled on Android as it requires PIE: https://source.android.com/docs/security/enhancements#android-5 # Disabled on IBM S390X due to build issues with 'no-pie' + # Disabled with sanitizers to avoid issues with maximum relocation size: https://github.com/ClickHouse/ClickHouse/pull/49145 set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie") From 04438784e2178820537c65b66b5a8341f3d63b8d Mon Sep 17 00:00:00 2001 From: pufit Date: Tue, 6 Aug 2024 16:45:46 -0400 Subject: [PATCH 1787/2361] add a stateless test for `grant current grants` --- .../03215_grant_current_grants.reference | 2 ++ .../0_stateless/03215_grant_current_grants.sh | 26 +++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 tests/queries/0_stateless/03215_grant_current_grants.reference create mode 100755 tests/queries/0_stateless/03215_grant_current_grants.sh diff --git a/tests/queries/0_stateless/03215_grant_current_grants.reference b/tests/queries/0_stateless/03215_grant_current_grants.reference new file mode 100644 index 00000000000..e4f6850b806 --- /dev/null +++ b/tests/queries/0_stateless/03215_grant_current_grants.reference @@ -0,0 +1,2 @@ +GRANT SELECT, CREATE TABLE, CREATE VIEW ON default.* +GRANT SELECT ON default.* diff --git a/tests/queries/0_stateless/03215_grant_current_grants.sh b/tests/queries/0_stateless/03215_grant_current_grants.sh new file mode 100755 index 00000000000..68af4a62bba --- /dev/null +++ b/tests/queries/0_stateless/03215_grant_current_grants.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +user1="user03215_1_${CLICKHOUSE_DATABASE}_$RANDOM" +user2="user03215_2_${CLICKHOUSE_DATABASE}_$RANDOM" +user3="user03215_3_${CLICKHOUSE_DATABASE}_$RANDOM" +db=${CLICKHOUSE_DATABASE} + + +${CLICKHOUSE_CLIENT} --query "CREATE USER $user1, $user2, $user3;"; +${CLICKHOUSE_CLIENT} --query "GRANT SELECT, CREATE TABLE, CREATE VIEW ON $db.* TO $user1 WITH GRANT OPTION;"; + +${CLICKHOUSE_CLIENT} --query "GRANT CURRENT GRANTS ON $db.* TO $user2" --user $user1; +${CLICKHOUSE_CLIENT} --query "GRANT CURRENT GRANTS ON $db.* TO $user3" --user $user2; + +${CLICKHOUSE_CLIENT} --query "SHOW GRANTS FOR $user2" | sed 's/ TO.*//'; +${CLICKHOUSE_CLIENT} --query "SHOW GRANTS FOR $user3" | sed 's/ TO.*//'; + +${CLICKHOUSE_CLIENT} --query "GRANT CURRENT GRANTS(SELECT ON $db.*) TO $user3" --user $user1; +${CLICKHOUSE_CLIENT} --query "SHOW GRANTS FOR $user3" | sed 's/ TO.*//'; + +${CLICKHOUSE_CLIENT} --query "DROP USER IF EXISTS $user1, $user2, $user3"; From 5ae5cd35b5b263d14bdd62aa5cbaa1e22219208a Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 6 Aug 2024 21:50:31 +0100 Subject: [PATCH 1788/2361] update --- base/poco/Net/include/Poco/Net/HTTPServerSession.h | 4 ++-- src/Server/HTTP/sendExceptionToHTTPClient.cpp | 2 +- .../0_stateless/00408_http_keep_alive.reference | 6 +++--- tests/queries/0_stateless/00408_http_keep_alive.sh | 7 ++++--- tests/queries/0_stateless/00501_http_head.re | 12 ++++++++++++ tests/queries/0_stateless/00501_http_head.reference | 4 ++-- tests/queries/0_stateless/00501_http_head.sh | 5 +++-- 7 files changed, 27 insertions(+), 13 deletions(-) create mode 100644 tests/queries/0_stateless/00501_http_head.re diff --git a/base/poco/Net/include/Poco/Net/HTTPServerSession.h b/base/poco/Net/include/Poco/Net/HTTPServerSession.h index 93f31012336..54e7f2c8c50 100644 --- a/base/poco/Net/include/Poco/Net/HTTPServerSession.h +++ b/base/poco/Net/include/Poco/Net/HTTPServerSession.h @@ -57,10 +57,10 @@ namespace Net /// Returns the server's address. void setKeepAliveTimeout(Poco::Timespan keepAliveTimeout); - + size_t getKeepAliveTimeout() const { return _keepAliveTimeout.totalSeconds(); } - size_t getMaxKeepAliveRequests() const { return _maxKeepAliveRequests; } + size_t getMaxKeepAliveRequests() const { return _maxKeepAliveRequests; } private: bool _firstRequest; diff --git a/src/Server/HTTP/sendExceptionToHTTPClient.cpp b/src/Server/HTTP/sendExceptionToHTTPClient.cpp index 022a763a9a2..658b7a4707a 100644 --- a/src/Server/HTTP/sendExceptionToHTTPClient.cpp +++ b/src/Server/HTTP/sendExceptionToHTTPClient.cpp @@ -29,7 +29,7 @@ void sendExceptionToHTTPClient( if (!out) { /// If nothing was sent yet. - WriteBufferFromHTTPServerResponse out_for_message{response, request.getMethod() == HTTPRequest::HTTP_HEAD, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT}; + WriteBufferFromHTTPServerResponse out_for_message{response, request.getMethod() == HTTPRequest::HTTP_HEAD}; out_for_message.writeln(exception_message); out_for_message.finalize(); diff --git a/tests/queries/0_stateless/00408_http_keep_alive.reference b/tests/queries/0_stateless/00408_http_keep_alive.reference index d5d7dacce9e..5402036bfd7 100644 --- a/tests/queries/0_stateless/00408_http_keep_alive.reference +++ b/tests/queries/0_stateless/00408_http_keep_alive.reference @@ -1,6 +1,6 @@ < Connection: Keep-Alive -< Keep-Alive: timeout=10, max=10000 +< Keep-Alive: timeout=10, max=? < Connection: Keep-Alive -< Keep-Alive: timeout=10, max=10000 +< Keep-Alive: timeout=10, max=? < Connection: Keep-Alive -< Keep-Alive: timeout=10, max=10000 +< Keep-Alive: timeout=10, max=? diff --git a/tests/queries/0_stateless/00408_http_keep_alive.sh b/tests/queries/0_stateless/00408_http_keep_alive.sh index 4bd0e494eb8..4a1cb4ed712 100755 --- a/tests/queries/0_stateless/00408_http_keep_alive.sh +++ b/tests/queries/0_stateless/00408_http_keep_alive.sh @@ -6,9 +6,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) URL="${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/" -${CLICKHOUSE_CURL} -vsS "${URL}" --data-binary @- <<< "SELECT 1" 2>&1 | perl -lnE 'print if /Keep-Alive/'; -${CLICKHOUSE_CURL} -vsS "${URL}" --data-binary @- <<< " error here " 2>&1 | perl -lnE 'print if /Keep-Alive/'; -${CLICKHOUSE_CURL} -vsS "${URL}"ping 2>&1 | perl -lnE 'print if /Keep-Alive/'; +# the sed command here replaces the real number of left requests with a question mark, because it can vary and we don't really have control over it +${CLICKHOUSE_CURL} -vsS "${URL}" --data-binary @- <<< "SELECT 1" 2>&1 | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I' | grep -i 'keep-alive'; +${CLICKHOUSE_CURL} -vsS "${URL}" --data-binary @- <<< " error here " 2>&1 | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I' | grep -i 'keep-alive'; +${CLICKHOUSE_CURL} -vsS "${URL}"ping 2>&1 | perl -lnE 'print if /Keep-Alive/' | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I' | grep -i 'keep-alive'; # no keep-alive: ${CLICKHOUSE_CURL} -vsS "${URL}"404/not/found/ 2>&1 | perl -lnE 'print if /Keep-Alive/'; diff --git a/tests/queries/0_stateless/00501_http_head.re b/tests/queries/0_stateless/00501_http_head.re new file mode 100644 index 00000000000..807bcd4922e --- /dev/null +++ b/tests/queries/0_stateless/00501_http_head.re @@ -0,0 +1,12 @@ +HTTP/1.1 200 OK +Connection: Keep-Alive +Content-Type: text/tab-separated-values; charset=UTF-8 +Transfer-Encoding: chunked +Keep-Alive: timeout=10, max=? + +HTTP/1.1 200 OK +Connection: Keep-Alive +Content-Type: text/tab-separated-values; charset=UTF-8 +Transfer-Encoding: chunked +Keep-Alive: timeout=10, max=? + diff --git a/tests/queries/0_stateless/00501_http_head.reference b/tests/queries/0_stateless/00501_http_head.reference index db82132b145..807bcd4922e 100644 --- a/tests/queries/0_stateless/00501_http_head.reference +++ b/tests/queries/0_stateless/00501_http_head.reference @@ -2,11 +2,11 @@ HTTP/1.1 200 OK Connection: Keep-Alive Content-Type: text/tab-separated-values; charset=UTF-8 Transfer-Encoding: chunked -Keep-Alive: timeout=10, max=10000 +Keep-Alive: timeout=10, max=? HTTP/1.1 200 OK Connection: Keep-Alive Content-Type: text/tab-separated-values; charset=UTF-8 Transfer-Encoding: chunked -Keep-Alive: timeout=10, max=10000 +Keep-Alive: timeout=10, max=? diff --git a/tests/queries/0_stateless/00501_http_head.sh b/tests/queries/0_stateless/00501_http_head.sh index 60283f26833..30da64c31f0 100755 --- a/tests/queries/0_stateless/00501_http_head.sh +++ b/tests/queries/0_stateless/00501_http_head.sh @@ -4,8 +4,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -( ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=SELECT%201"; - ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=select+*+from+system.numbers+limit+1000000" ) | grep -v "Date:" | grep -v "X-ClickHouse-Server-Display-Name:" | grep -v "X-ClickHouse-Query-Id:" | grep -v "X-ClickHouse-Format:" | grep -v "X-ClickHouse-Timezone:" +# the sed command here replaces the real number of left requests with a question mark, because it can vary and we don't really have control over it +( ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=SELECT%201" | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I'; + ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=select+*+from+system.numbers+limit+1000000" ) | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I' | grep -v "Date:" | grep -v "X-ClickHouse-Server-Display-Name:" | grep -v "X-ClickHouse-Query-Id:" | grep -v "X-ClickHouse-Format:" | grep -v "X-ClickHouse-Timezone:" if [[ $(${CLICKHOUSE_CURL} -sS -X POST -I "${CLICKHOUSE_URL}&query=SELECT+1" | grep -c '411 Length Required') -ne 1 ]]; then echo FAIL From 1f5c4101b2d74d7ccf798621083fb536bf35de18 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 6 Aug 2024 21:54:15 +0100 Subject: [PATCH 1789/2361] rm redundant file --- tests/queries/0_stateless/00501_http_head.re | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 tests/queries/0_stateless/00501_http_head.re diff --git a/tests/queries/0_stateless/00501_http_head.re b/tests/queries/0_stateless/00501_http_head.re deleted file mode 100644 index 807bcd4922e..00000000000 --- a/tests/queries/0_stateless/00501_http_head.re +++ /dev/null @@ -1,12 +0,0 @@ -HTTP/1.1 200 OK -Connection: Keep-Alive -Content-Type: text/tab-separated-values; charset=UTF-8 -Transfer-Encoding: chunked -Keep-Alive: timeout=10, max=? - -HTTP/1.1 200 OK -Connection: Keep-Alive -Content-Type: text/tab-separated-values; charset=UTF-8 -Transfer-Encoding: chunked -Keep-Alive: timeout=10, max=? - From 9dec9be1b5254b17f5146e3aaabe3c66f763900d Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Tue, 6 Aug 2024 21:58:55 +0000 Subject: [PATCH 1790/2361] Fixed --- .../queries/0_stateless/02558_system_processes_elapsed.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02558_system_processes_elapsed.sh b/tests/queries/0_stateless/02558_system_processes_elapsed.sh index 891ac3cf7bc..8d2615541fd 100755 --- a/tests/queries/0_stateless/02558_system_processes_elapsed.sh +++ b/tests/queries/0_stateless/02558_system_processes_elapsed.sh @@ -9,7 +9,12 @@ while :; do pid=$! sleep 1.5 duration="$($CLICKHOUSE_CLIENT -q "select floor(elapsed) from system.processes where current_database = currentDatabase() and query not like '%system.processes%'")" - kill -INT $pid + # The process might not exist at this point in some exception situations + # maybe it was killed by OOM? + # It safe to skip this iteration. + if ! kill -INT $pid > /dev/null 2>&1; then + continue + fi wait $CLICKHOUSE_CLIENT -q "kill query where current_database = currentDatabase() sync format Null" if [[ $duration -eq 1 ]]; then From a43ed76ae84c7fe68bc82f8a726acf1299fcaec3 Mon Sep 17 00:00:00 2001 From: Alexey Gerasimchuck Date: Tue, 6 Aug 2024 22:20:12 +0000 Subject: [PATCH 1791/2361] Fixed session log parallel/sequenced test work within a single fixture --- tests/integration/parallel_skip.json | 5 - .../test_session_log/configs/users.xml | 12 -- tests/integration/test_session_log/test.py | 135 ++++++++---------- 3 files changed, 61 insertions(+), 91 deletions(-) diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index 9b8109f3f17..99fa626bd1e 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -94,11 +94,6 @@ "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_setting_in_query", "test_profile_max_sessions_for_user/test.py::test_profile_max_sessions_for_user_client_suggestions_load", - "test_session_log/test.py::test_grpc_session", - "test_session_log/test.py::test_mysql_session", - "test_session_log/test.py::test_postgres_session", - "test_session_log/test.py::test_parallel_sessions", - "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_stop_moves_query", "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_table_detach", "test_ttl_move/test.py::TestCancelBackgroundMoving::test_cancel_background_moving_on_zookeeper_disconnect", diff --git a/tests/integration/test_session_log/configs/users.xml b/tests/integration/test_session_log/configs/users.xml index 0416dfadc8a..766fdbcf00f 100644 --- a/tests/integration/test_session_log/configs/users.xml +++ b/tests/integration/test_session_log/configs/users.xml @@ -7,17 +7,5 @@ - - pass - - - pass - - - pass - - - pass -
\ No newline at end of file diff --git a/tests/integration/test_session_log/test.py b/tests/integration/test_session_log/test.py index 5e424610ba2..0eb614f7aa9 100644 --- a/tests/integration/test_session_log/test.py +++ b/tests/integration/test_session_log/test.py @@ -60,6 +60,19 @@ def next_session_id(): return str(session_id) +user_counter = 0 + + +def create_unique_user(prefix): + global user_counter + user_counter += 1 + user_name = f"{prefix}_{os.getppid()}_{user_counter}" + instance.query( + f"CREATE USER {user_name} IDENTIFIED WITH plaintext_password BY 'pass'" + ) + return user_name + + def grpc_query(query, user_, pass_, raise_exception): try: query_info = clickhouse_grpc_pb2.QueryInfo( @@ -131,6 +144,37 @@ def wait_for_corresponding_login_success_and_logout(user, expected_login_count): logins_and_logouts = instance.query(sql) +def check_session_log(user): + instance.query("SYSTEM FLUSH LOGS") + login_success_records = instance.query( + f"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='{user}' AND type = 'LoginSuccess'" + ) + assert login_success_records == f"{user}\t1\t1\n" + logout_records = instance.query( + f"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='{user}' AND type = 'Logout'" + ) + assert logout_records == f"{user}\t1\t1\n" + login_failure_records = instance.query( + f"SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='{user}' AND type = 'LoginFailure'" + ) + assert login_failure_records == f"{user}\t1\t1\n" + + wait_for_corresponding_login_success_and_logout(user, 1) + + +def session_log_test(prefix, query_function): + user = create_unique_user(prefix) + wrong_user = "wrong_" + user + + query_function("SELECT 1", user, "pass", False) + query_function("SELECT 2", user, "wrong_pass", True) + query_function("SELECT 3", wrong_user, "pass", True) + + check_session_log(user) + + instance.query(f"DROP USER {user}") + + @pytest.fixture(scope="module") def started_cluster(): try: @@ -145,78 +189,21 @@ def started_cluster(): def test_grpc_session(started_cluster): - grpc_query("SELECT 1", "grpc_user", "pass", False) - grpc_query("SELECT 2", "grpc_user", "wrong_pass", True) - grpc_query("SELECT 3", "wrong_grpc_user", "pass", True) - - instance.query("SYSTEM FLUSH LOGS") - login_success_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'LoginSuccess'" - ) - assert login_success_records == "grpc_user\t1\t1\n" - logout_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'Logout'" - ) - assert logout_records == "grpc_user\t1\t1\n" - login_failure_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='grpc_user' AND type = 'LoginFailure'" - ) - assert login_failure_records == "grpc_user\t1\t1\n" - logins_and_logouts = instance.query( - f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'grpc_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'grpc_user' AND type = 'Logout')" - ) - assert logins_and_logouts == "1\n" + session_log_test("grpc", grpc_query) def test_mysql_session(started_cluster): - mysql_query("SELECT 1", "mysql_user", "pass", False) - mysql_query("SELECT 2", "mysql_user", "wrong_pass", True) - mysql_query("SELECT 3", "wrong_mysql_user", "pass", True) - - instance.query("SYSTEM FLUSH LOGS") - login_success_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'LoginSuccess'" - ) - assert login_success_records == "mysql_user\t1\t1\n" - logout_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'Logout'" - ) - assert logout_records == "mysql_user\t1\t1\n" - login_failure_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='mysql_user' AND type = 'LoginFailure'" - ) - assert login_failure_records == "mysql_user\t1\t1\n" - logins_and_logouts = instance.query( - f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'mysql_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'mysql_user' AND type = 'Logout')" - ) - assert logins_and_logouts == "1\n" + session_log_test("mysql", mysql_query) def test_postgres_session(started_cluster): - postgres_query("SELECT 1", "postgres_user", "pass", False) - postgres_query("SELECT 2", "postgres_user", "wrong_pass", True) - postgres_query("SELECT 3", "wrong_postgres_user", "pass", True) - - instance.query("SYSTEM FLUSH LOGS") - login_success_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'LoginSuccess'" - ) - assert login_success_records == "postgres_user\t1\t1\n" - logout_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'Logout'" - ) - assert logout_records == "postgres_user\t1\t1\n" - login_failure_records = instance.query( - "SELECT user, client_port <> 0, client_address <> toIPv6('::') FROM system.session_log WHERE user='postgres_user' AND type = 'LoginFailure'" - ) - assert login_failure_records == "postgres_user\t1\t1\n" - logins_and_logouts = instance.query( - f"SELECT COUNT(*) FROM (SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'postgres_user' AND type = 'LoginSuccess' INTERSECT SELECT {SESSION_LOG_MATCHING_FIELDS} FROM system.session_log WHERE user = 'postgres_user' AND type = 'Logout')" - ) - assert logins_and_logouts == "1\n" + session_log_test("postgres", postgres_query) def test_parallel_sessions(started_cluster): + user = create_unique_user("parallel") + wrong_user = "wrong_" + user + thread_list = [] for _ in range(10): # Sleep time does not significantly matter here, @@ -226,7 +213,7 @@ def test_parallel_sessions(started_cluster): target=function, args=( f"SELECT sleep({random.uniform(0.03, 0.04)})", - "parallel_user", + user, "pass", False, ), @@ -237,7 +224,7 @@ def test_parallel_sessions(started_cluster): target=function, args=( f"SELECT sleep({random.uniform(0.03, 0.04)})", - "parallel_user", + user, "wrong_pass", True, ), @@ -248,7 +235,7 @@ def test_parallel_sessions(started_cluster): target=function, args=( f"SELECT sleep({random.uniform(0.03, 0.04)})", - "wrong_parallel_user", + wrong_user, "pass", True, ), @@ -261,38 +248,38 @@ def test_parallel_sessions(started_cluster): instance.query("SYSTEM FLUSH LOGS") port_0_sessions = instance.query( - f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user'" + f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}'" ) assert port_0_sessions == "90\n" port_0_sessions = instance.query( - f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND client_port = 0" + f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND client_port = 0" ) assert port_0_sessions == "0\n" address_0_sessions = instance.query( - f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND client_address = toIPv6('::')" + f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND client_address = toIPv6('::')" ) assert address_0_sessions == "0\n" grpc_sessions = instance.query( - f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'gRPC'" + f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND interface = 'gRPC'" ) assert grpc_sessions == "30\n" mysql_sessions = instance.query( - f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'MySQL'" + f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND interface = 'MySQL'" ) assert mysql_sessions == "30\n" postgres_sessions = instance.query( - f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND interface = 'PostgreSQL'" + f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND interface = 'PostgreSQL'" ) assert postgres_sessions == "30\n" - wait_for_corresponding_login_success_and_logout("parallel_user", 30) + wait_for_corresponding_login_success_and_logout(user, 30) logout_failure_sessions = instance.query( - f"SELECT COUNT(*) FROM system.session_log WHERE user = 'parallel_user' AND type = 'LoginFailure'" + f"SELECT COUNT(*) FROM system.session_log WHERE user = '{user}' AND type = 'LoginFailure'" ) assert logout_failure_sessions == "30\n" From 5b3692b4f02421d56692107365ad4cc7a3297418 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Wed, 7 Aug 2024 00:29:19 +0200 Subject: [PATCH 1792/2361] Added some useful debug logs --- src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp index 1e8164152a5..702d058ee79 100644 --- a/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp +++ b/src/Storages/TimeSeries/PrometheusRemoteWriteProtocol.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -529,6 +530,8 @@ namespace ContextMutablePtr insert_context = Context::createCopy(context); insert_context->setCurrentQueryId(context->getCurrentQueryId() + ":" + String{toString(table_kind)}); + LOG_TEST(log, "{}: Executing query: {}", time_series_storage_id.getNameForLogs(), queryToString(insert_query)); + InterpreterInsertQuery interpreter( insert_query, insert_context, From 20c2d346a5db550d38954b5c5b1de2d1a09a884c Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 7 Aug 2024 00:35:25 +0000 Subject: [PATCH 1793/2361] just add test --- .../MergeTree/MergeTreeIndexBloomFilter.cpp | 26 +------------------ .../00945_bloom_filter_index.reference | 2 ++ .../0_stateless/00945_bloom_filter_index.sql | 11 ++++++++ ..._bloom_filter_not_supported_func.reference | 2 -- .../03215_bloom_filter_not_supported_func.sql | 14 ---------- 5 files changed, 14 insertions(+), 41 deletions(-) delete mode 100644 tests/queries/0_stateless/03215_bloom_filter_not_supported_func.reference delete mode 100644 tests/queries/0_stateless/03215_bloom_filter_not_supported_func.sql diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index 0a4eda3be69..dc314ce53d4 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -366,31 +366,7 @@ bool MergeTreeIndexConditionBloomFilter::extractAtomFromTree(const RPNBuilderTre } } - if (node.isFunction()) - { - /// Similar to the logic of KeyCondition, restrict the usage of bloom filter, in case of func like cast(c=1 or c=9999 as Bool). - const std::unordered_set atom_map - { - "equals", - "notEquals", - "has", - "mapContains", - "indexOf", - "hasAny", - "hasAll", - "in", - "notIn", - "globalIn", - "globalNotIn" - }; - - auto func_name = node.toFunctionNode().getFunctionName(); - if (atom_map.find(func_name) == std::end(atom_map)) - return false; - } - - bool res = traverseFunction(node, out, nullptr /*parent*/); - return res; + return traverseFunction(node, out, nullptr /*parent*/); } bool MergeTreeIndexConditionBloomFilter::traverseFunction(const RPNBuilderTreeNode & node, RPNElement & out, const RPNBuilderTreeNode * parent) diff --git a/tests/queries/0_stateless/00945_bloom_filter_index.reference b/tests/queries/0_stateless/00945_bloom_filter_index.reference index e6751fe4762..9d9b49b29c9 100644 --- a/tests/queries/0_stateless/00945_bloom_filter_index.reference +++ b/tests/queries/0_stateless/00945_bloom_filter_index.reference @@ -227,3 +227,5 @@ 1 value1 1 value2 2 value3 +1 +1 diff --git a/tests/queries/0_stateless/00945_bloom_filter_index.sql b/tests/queries/0_stateless/00945_bloom_filter_index.sql index 2b7feacbd98..71109df79e7 100644 --- a/tests/queries/0_stateless/00945_bloom_filter_index.sql +++ b/tests/queries/0_stateless/00945_bloom_filter_index.sql @@ -374,3 +374,14 @@ SELECT id, ary[indexOf(ary, 'value2')] FROM test_bf_indexOf WHERE ary[indexOf(ar SELECT id, ary[indexOf(ary, 'value3')] FROM test_bf_indexOf WHERE ary[indexOf(ary, 'value3')] = 'value3' ORDER BY id FORMAT TSV; DROP TABLE IF EXISTS test_bf_indexOf; + +-- expecting cast function to be unknown +DROP TABLE IF EXISTS test_bf_cast; + +CREATE TABLE test_bf_cast (c Int32, INDEX x1 (c) type bloom_filter) ENGINE = MergeTree ORDER BY c as select 1; + +SELECT count() FROM test_bf_cast WHERE cast(c=1 or c=9999 as Bool) settings use_skip_indexes=0; + +SELECT count() FROM test_bf_cast WHERE cast(c=1 or c=9999 as Bool) settings use_skip_indexes=1; + +DROP TABLE test_bf_cast; \ No newline at end of file diff --git a/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.reference b/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.reference deleted file mode 100644 index 6ed281c757a..00000000000 --- a/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.reference +++ /dev/null @@ -1,2 +0,0 @@ -1 -1 diff --git a/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.sql b/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.sql deleted file mode 100644 index 3d094244892..00000000000 --- a/tests/queries/0_stateless/03215_bloom_filter_not_supported_func.sql +++ /dev/null @@ -1,14 +0,0 @@ -drop table if exists t; - -create table t ( - c Int32, - index x1 (c) type bloom_filter -) engine=MergeTree order by c as select 1; - -SELECT count() FROM t WHERE cast(c=1 or c=9999 as Bool) -settings use_skip_indexes=0; - -SELECT count() FROM t WHERE cast(c=1 or c=9999 as Bool) -settings use_skip_indexes=1; - -drop table t; \ No newline at end of file From 25f557667a90f9805bc22796a8c799f3203019fa Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 7 Aug 2024 02:47:53 +0200 Subject: [PATCH 1794/2361] Change log level in clickhouse-local --- src/Client/LocalConnection.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index 072184e0a66..7595a29912b 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -365,7 +365,7 @@ bool LocalConnection::poll(size_t) { while (pollImpl()) { - LOG_DEBUG(&Poco::Logger::get("LocalConnection"), "Executor timeout encountered, will retry"); + LOG_TEST(&Poco::Logger::get("LocalConnection"), "Executor timeout encountered, will retry"); if (needSendProgressOrMetrics()) return true; From 2a5a8f15f4ab5d4322fb09d41ca1e8279197abe4 Mon Sep 17 00:00:00 2001 From: pufit Date: Tue, 6 Aug 2024 20:51:44 -0400 Subject: [PATCH 1795/2361] Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. --- src/Storages/MergeTree/MergeTreeData.cpp | 4 ++++ .../02884_create_view_with_sql_security_option.reference | 1 + .../0_stateless/02884_create_view_with_sql_security_option.sh | 2 ++ 3 files changed, 7 insertions(+) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 49888596fbb..5d63f6c94d3 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3348,6 +3348,10 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ALTER MODIFY REFRESH is not supported by MergeTree engines family"); + if (command.type == AlterCommand::MODIFY_SQL_SECURITY) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "ALTER MODIFY SQL SECURITY is not supported by MergeTree engines family"); + if (command.type == AlterCommand::MODIFY_ORDER_BY && !is_custom_partitioned) { throw Exception(ErrorCodes::BAD_ARGUMENTS, diff --git a/tests/queries/0_stateless/02884_create_view_with_sql_security_option.reference b/tests/queries/0_stateless/02884_create_view_with_sql_security_option.reference index a03343c8cb3..39e7aad87e0 100644 --- a/tests/queries/0_stateless/02884_create_view_with_sql_security_option.reference +++ b/tests/queries/0_stateless/02884_create_view_with_sql_security_option.reference @@ -27,6 +27,7 @@ OK OK 100 100 +OK ===== TestGrants ===== OK OK diff --git a/tests/queries/0_stateless/02884_create_view_with_sql_security_option.sh b/tests/queries/0_stateless/02884_create_view_with_sql_security_option.sh index cc4e76a9ed9..fadbbff7f34 100755 --- a/tests/queries/0_stateless/02884_create_view_with_sql_security_option.sh +++ b/tests/queries/0_stateless/02884_create_view_with_sql_security_option.sh @@ -199,6 +199,8 @@ ${CLICKHOUSE_CLIENT} --user $user2 --query "INSERT INTO source SELECT * FROM gen ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM destination1" ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM destination2" +(( $(${CLICKHOUSE_CLIENT} --query "ALTER TABLE test_table MODIFY SQL SECURITY INVOKER" 2>&1 | grep -c "is not supported") >= 1 )) && echo "OK" || echo "UNEXPECTED" + echo "===== TestGrants =====" ${CLICKHOUSE_CLIENT} --query "GRANT CREATE ON *.* TO $user1" ${CLICKHOUSE_CLIENT} --query "GRANT SELECT ON $db.test_table TO $user1, $user2" From 114284bdcea7e4a769f2b9c004c5092cbb323550 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Fri, 2 Aug 2024 15:20:49 +0800 Subject: [PATCH 1796/2361] fixed --- src/AggregateFunctions/WindowFunction.h | 117 ++ src/Planner/PlannerActionsVisitor.cpp | 75 +- src/Planner/PlannerActionsVisitor.h | 6 + src/Planner/PlannerWindowFunctions.cpp | 40 +- src/Processors/Transforms/WindowTransform.cpp | 1018 +++++++---------- src/Processors/Transforms/WindowTransform.h | 25 +- 6 files changed, 615 insertions(+), 666 deletions(-) create mode 100644 src/AggregateFunctions/WindowFunction.h diff --git a/src/AggregateFunctions/WindowFunction.h b/src/AggregateFunctions/WindowFunction.h new file mode 100644 index 00000000000..f7fbd7389ea --- /dev/null +++ b/src/AggregateFunctions/WindowFunction.h @@ -0,0 +1,117 @@ +#pragma once +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ +extern const int BAD_ARGUMENTS; +} +class WindowTransform; + + +// Interface for true window functions. It's not much of an interface, they just +// accept the guts of WindowTransform and do 'something'. Given a small number of +// true window functions, and the fact that the WindowTransform internals are +// pretty much well-defined in domain terms (e.g. frame boundaries), this is +// somewhat acceptable. +class IWindowFunction +{ +public: + virtual ~IWindowFunction() = default; + + // Must insert the result for current_row. + virtual void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const = 0; + + virtual std::optional getDefaultFrame() const { return {}; } + + virtual ColumnPtr castColumn(const Columns &, const std::vector &) { return nullptr; } + + /// Is the frame type supported by this function. + virtual bool checkWindowFrameType(const WindowTransform * /*transform*/) const { return true; } +}; + +// Runtime data for computing one window function. +struct WindowFunctionWorkspace +{ + AggregateFunctionPtr aggregate_function; + + // Cached value of aggregate function isState virtual method + bool is_aggregate_function_state = false; + + // This field is set for pure window functions. When set, we ignore the + // window_function.aggregate_function, and work through this interface + // instead. + IWindowFunction * window_function_impl = nullptr; + + std::vector argument_column_indices; + + // Will not be initialized for a pure window function. + mutable AlignedBuffer aggregate_function_state; + + // Argument columns. Be careful, this is a per-block cache. + std::vector argument_columns; + UInt64 cached_block_number = std::numeric_limits::max(); +}; + +// A basic implementation for a true window function. It pretends to be an +// aggregate function, but refuses to work as such. +struct WindowFunction : public IAggregateFunctionHelper, public IWindowFunction +{ + std::string name; + + WindowFunction( + const std::string & name_, const DataTypes & argument_types_, const Array & parameters_, const DataTypePtr & result_type_) + : IAggregateFunctionHelper(argument_types_, parameters_, result_type_), name(name_) + { + } + + bool isOnlyWindowFunction() const override { return true; } + + [[noreturn]] void fail() const + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "The function '{}' can only be used as a window function, not as an aggregate function", getName()); + } + + String getName() const override { return name; } + void create(AggregateDataPtr __restrict) const override { } + void destroy(AggregateDataPtr __restrict) const noexcept override { } + bool hasTrivialDestructor() const override { return true; } + size_t sizeOfData() const override { return 0; } + size_t alignOfData() const override { return 1; } + void add(AggregateDataPtr __restrict, const IColumn **, size_t, Arena *) const override { fail(); } + void merge(AggregateDataPtr __restrict, ConstAggregateDataPtr, Arena *) const override { fail(); } + void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional) const override { fail(); } + void deserialize(AggregateDataPtr __restrict, ReadBuffer &, std::optional, Arena *) const override { fail(); } + void insertResultInto(AggregateDataPtr __restrict, IColumn &, Arena *) const override { fail(); } +}; + +template +struct StatefulWindowFunction : public WindowFunction +{ + StatefulWindowFunction( + const std::string & name_, const DataTypes & argument_types_, const Array & parameters_, const DataTypePtr & result_type_) + : WindowFunction(name_, argument_types_, parameters_, result_type_) + { + } + + size_t sizeOfData() const override { return sizeof(State); } + size_t alignOfData() const override { return 1; } + + void create(AggregateDataPtr __restrict place) const override { new (place) State(); } + + void destroy(AggregateDataPtr __restrict place) const noexcept override { reinterpret_cast(place)->~State(); } + + bool hasTrivialDestructor() const override { return std::is_trivially_destructible_v; } + + State & getState(const WindowFunctionWorkspace & workspace) const + { + return *reinterpret_cast(workspace.aggregate_function_state.data()); + } +}; + +} diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 57457493844..99b9c3f7482 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -236,8 +236,16 @@ public: if (function_node.isWindowFunction()) { + auto get_window_frame = [&]() -> std::optional + { + auto & window_node = function_node.getWindowNode()->as(); + auto & window_frame = window_node.getWindowFrame(); + if (!window_frame.is_default) + return window_frame; + return {}; + }; buffer << " OVER ("; - buffer << calculateWindowNodeActionName(function_node.getWindowNode()); + buffer << calculateWindowNodeActionName(function_node.getWindowNode(), get_window_frame); buffer << ')'; } @@ -298,7 +306,7 @@ public: return calculateConstantActionNodeName(constant_literal, applyVisitor(FieldToDataType(), constant_literal)); } - String calculateWindowNodeActionName(const QueryTreeNodePtr & node) + String calculateWindowNodeActionName(const QueryTreeNodePtr & node, std::function()> get_window_frame) { auto & window_node = node->as(); WriteBufferFromOwnString buffer; @@ -364,44 +372,14 @@ public: } } - auto & window_frame = window_node.getWindowFrame(); - if (!window_frame.is_default) + auto window_frame_opt = get_window_frame(); + if (window_frame_opt) { + auto & window_frame = *window_frame_opt; if (window_node.hasPartitionBy() || window_node.hasOrderBy()) buffer << ' '; - buffer << window_frame.type << " BETWEEN "; - if (window_frame.begin_type == WindowFrame::BoundaryType::Current) - { - buffer << "CURRENT ROW"; - } - else if (window_frame.begin_type == WindowFrame::BoundaryType::Unbounded) - { - buffer << "UNBOUNDED"; - buffer << " " << (window_frame.begin_preceding ? "PRECEDING" : "FOLLOWING"); - } - else - { - buffer << calculateActionNodeName(window_node.getFrameBeginOffsetNode()); - buffer << " " << (window_frame.begin_preceding ? "PRECEDING" : "FOLLOWING"); - } - - buffer << " AND "; - - if (window_frame.end_type == WindowFrame::BoundaryType::Current) - { - buffer << "CURRENT ROW"; - } - else if (window_frame.end_type == WindowFrame::BoundaryType::Unbounded) - { - buffer << "UNBOUNDED"; - buffer << " " << (window_frame.end_preceding ? "PRECEDING" : "FOLLOWING"); - } - else - { - buffer << calculateActionNodeName(window_node.getFrameEndOffsetNode()); - buffer << " " << (window_frame.end_preceding ? "PRECEDING" : "FOLLOWING"); - } + window_frame.toString(buffer); } return buffer.str(); @@ -1062,14 +1040,35 @@ String calculateWindowNodeActionName(const QueryTreeNodePtr & node, bool use_column_identifier_as_action_node_name) { ActionNodeNameHelper helper(node_to_name, planner_context, use_column_identifier_as_action_node_name); - return helper.calculateWindowNodeActionName(node); + auto get_window_frame = [&]()-> std::optional{ + auto & window_node = node->as(); + auto & window_frame = window_node.getWindowFrame(); + if (!window_frame.is_default) + return window_frame; + return {}; + }; + return helper.calculateWindowNodeActionName(node, get_window_frame); } String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, bool use_column_identifier_as_action_node_name) { QueryTreeNodeToName empty_map; ActionNodeNameHelper helper(empty_map, planner_context, use_column_identifier_as_action_node_name); - return helper.calculateWindowNodeActionName(node); + auto get_window_frame = [&]()-> std::optional{ + auto & window_node = node->as(); + auto & window_frame = window_node.getWindowFrame(); + if (!window_frame.is_default) + return window_frame; + return {}; + }; + return helper.calculateWindowNodeActionName(node, get_window_frame); +} + +String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, std::function()> get_window_frame, bool use_column_identifier_as_action_node_name) +{ + QueryTreeNodeToName empty_map; + ActionNodeNameHelper helper(empty_map, planner_context, use_column_identifier_as_action_node_name); + return helper.calculateWindowNodeActionName(node, get_window_frame); } } diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index 6bb32047327..78d7c69357a 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include @@ -8,6 +9,7 @@ #include #include +#include namespace DB { @@ -85,5 +87,9 @@ String calculateWindowNodeActionName(const QueryTreeNodePtr & node, String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, bool use_column_identifier_as_action_node_name = true); +String calculateWindowNodeActionName(const QueryTreeNodePtr & node, + const PlannerContext & planner_context, + std::function()> get_window_frame, + bool use_column_identifier_as_action_node_name = true); } diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp index 225852de5a7..7d0fc3a85b3 100644 --- a/src/Planner/PlannerWindowFunctions.cpp +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -1,5 +1,7 @@ +#include #include +#include #include #include #include @@ -8,41 +10,60 @@ #include -#include #include +#include namespace DB { namespace ErrorCodes { - extern const int NOT_IMPLEMENTED; +extern const int NOT_IMPLEMENTED; } namespace { -WindowDescription extractWindowDescriptionFromWindowNode(const QueryTreeNodePtr & node, const PlannerContext & planner_context) +//WindowDescription extractWindowDescriptionFromWindowNode(const QueryTreeNodePtr & node, const PlannerContext & planner_context) +WindowDescription extractWindowDescriptionFromWindowNode(const FunctionNode & func_node, const PlannerContext & planner_context) { + auto node = func_node.getWindowNode(); auto & window_node = node->as(); + auto get_window_frame = [&]() -> std::optional + { + auto frame = window_node.getWindowFrame(); + if (!frame.is_default) + return frame; + auto aggregate_function = func_node.getAggregateFunction(); + if (const auto * win_func = dynamic_cast(aggregate_function.get())) + { + return win_func->getDefaultFrame(); + } + return {}; + }; + WindowDescription window_description; - window_description.window_name = calculateWindowNodeActionName(node, planner_context); + window_description.window_name = calculateWindowNodeActionName(node, planner_context, get_window_frame); for (const auto & partition_by_node : window_node.getPartitionBy().getNodes()) { auto partition_by_node_action_name = calculateActionNodeName(partition_by_node, planner_context); - auto partition_by_sort_column_description = SortColumnDescription(partition_by_node_action_name, 1 /* direction */, 1 /* nulls_direction */); + auto partition_by_sort_column_description + = SortColumnDescription(partition_by_node_action_name, 1 /* direction */, 1 /* nulls_direction */); window_description.partition_by.push_back(std::move(partition_by_sort_column_description)); } window_description.order_by = extractSortDescription(window_node.getOrderByNode(), planner_context); window_description.full_sort_description = window_description.partition_by; - window_description.full_sort_description.insert(window_description.full_sort_description.end(), window_description.order_by.begin(), window_description.order_by.end()); + window_description.full_sort_description.insert( + window_description.full_sort_description.end(), window_description.order_by.begin(), window_description.order_by.end()); /// WINDOW frame is validated during query analysis stage - window_description.frame = window_node.getWindowFrame(); + auto window_frame = get_window_frame(); + window_description.frame = window_frame ? *window_frame : window_node.getWindowFrame(); + auto node_frame = window_node.getWindowFrame(); const auto & query_context = planner_context.getQueryContext(); const auto & query_context_settings = query_context->getSettingsRef(); @@ -64,7 +85,8 @@ WindowDescription extractWindowDescriptionFromWindowNode(const QueryTreeNodePtr } -std::vector extractWindowDescriptions(const QueryTreeNodes & window_function_nodes, const PlannerContext & planner_context) +std::vector +extractWindowDescriptions(const QueryTreeNodes & window_function_nodes, const PlannerContext & planner_context) { std::unordered_map window_name_to_description; @@ -72,7 +94,7 @@ std::vector extractWindowDescriptions(const QueryTreeNodes & { auto & window_function_node_typed = window_function_node->as(); - auto function_window_description = extractWindowDescriptionFromWindowNode(window_function_node_typed.getWindowNode(), planner_context); + auto function_window_description = extractWindowDescriptionFromWindowNode(window_function_node_typed, planner_context); auto frame_type = function_window_description.frame.type; if (frame_type != WindowFrame::FrameType::ROWS && frame_type != WindowFrame::FrameType::RANGE) diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 8de248a9c95..ae9a94bff90 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -6,9 +6,12 @@ #include #include #include +#include #include #include +#include #include +#include #include #include #include @@ -16,9 +19,6 @@ #include #include #include -#include -#include -#include #include #include @@ -57,52 +57,31 @@ struct Settings; namespace ErrorCodes { - extern const int BAD_ARGUMENTS; - extern const int NOT_IMPLEMENTED; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; - extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; +extern const int BAD_ARGUMENTS; +extern const int NOT_IMPLEMENTED; +extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +extern const int ILLEGAL_TYPE_OF_ARGUMENT; +extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; +extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; } -// Interface for true window functions. It's not much of an interface, they just -// accept the guts of WindowTransform and do 'something'. Given a small number of -// true window functions, and the fact that the WindowTransform internals are -// pretty much well-defined in domain terms (e.g. frame boundaries), this is -// somewhat acceptable. -class IWindowFunction -{ -public: - virtual ~IWindowFunction() = default; - - // Must insert the result for current_row. - virtual void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const = 0; - - virtual std::optional getDefaultFrame() const { return {}; } - - virtual ColumnPtr castColumn(const Columns &, const std::vector &) { return nullptr; } - - /// Is the frame type supported by this function. - virtual bool checkWindowFrameType(const WindowTransform * /*transform*/) const { return true; } -}; // Compares ORDER BY column values at given rows to find the boundaries of frame: // [compared] with [reference] +/- offset. Return value is -1/0/+1, like in // sorting predicates -- -1 means [compared] is less than [reference] +/- offset. template -static int compareValuesWithOffset(const IColumn * _compared_column, - size_t compared_row, const IColumn * _reference_column, +static int compareValuesWithOffset( + const IColumn * _compared_column, + size_t compared_row, + const IColumn * _reference_column, size_t reference_row, const Field & _offset, bool offset_is_preceding) { // Casting the columns to the known type here makes it faster, probably // because the getData call can be devirtualized. - const auto * compared_column = assert_cast( - _compared_column); - const auto * reference_column = assert_cast( - _reference_column); + const auto * compared_column = assert_cast(_compared_column); + const auto * reference_column = assert_cast(_reference_column); using ValueType = typename ColumnType::ValueType; // Note that the storage type of offset returned by get<> is different, so @@ -112,13 +91,11 @@ static int compareValuesWithOffset(const IColumn * _compared_column, const auto compared_value_data = compared_column->getDataAt(compared_row); assert(compared_value_data.size == sizeof(ValueType)); - auto compared_value = unalignedLoad( - compared_value_data.data); + auto compared_value = unalignedLoad(compared_value_data.data); const auto reference_value_data = reference_column->getDataAt(reference_row); assert(reference_value_data.size == sizeof(ValueType)); - auto reference_value = unalignedLoad( - reference_value_data.data); + auto reference_value = unalignedLoad(reference_value_data.data); bool is_overflow; if (offset_is_preceding) @@ -143,37 +120,34 @@ static int compareValuesWithOffset(const IColumn * _compared_column, else { // No overflow, compare normally. - return compared_value < reference_value ? -1 - : compared_value == reference_value ? 0 : 1; + return compared_value < reference_value ? -1 : compared_value == reference_value ? 0 : 1; } } // A specialization of compareValuesWithOffset for floats. template -static int compareValuesWithOffsetFloat(const IColumn * _compared_column, - size_t compared_row, const IColumn * _reference_column, +static int compareValuesWithOffsetFloat( + const IColumn * _compared_column, + size_t compared_row, + const IColumn * _reference_column, size_t reference_row, const Field & _offset, bool offset_is_preceding) { // Casting the columns to the known type here makes it faster, probably // because the getData call can be devirtualized. - const auto * compared_column = assert_cast( - _compared_column); - const auto * reference_column = assert_cast( - _reference_column); + const auto * compared_column = assert_cast(_compared_column); + const auto * reference_column = assert_cast(_reference_column); const auto offset = _offset.get(); chassert(offset >= 0); const auto compared_value_data = compared_column->getDataAt(compared_row); assert(compared_value_data.size == sizeof(typename ColumnType::ValueType)); - auto compared_value = unalignedLoad( - compared_value_data.data); + auto compared_value = unalignedLoad(compared_value_data.data); const auto reference_value_data = reference_column->getDataAt(reference_row); assert(reference_value_data.size == sizeof(typename ColumnType::ValueType)); - auto reference_value = unalignedLoad( - reference_value_data.data); + auto reference_value = unalignedLoad(reference_value_data.data); /// Floats overflow to Inf and the comparison will work normally, so we don't have to do anything. if (offset_is_preceding) @@ -181,58 +155,58 @@ static int compareValuesWithOffsetFloat(const IColumn * _compared_column, else reference_value += static_cast(offset); - const auto result = compared_value < reference_value ? -1 - : (compared_value == reference_value ? 0 : 1); + const auto result = compared_value < reference_value ? -1 : (compared_value == reference_value ? 0 : 1); return result; } // Helper macros to dispatch on type of the ORDER BY column #define APPLY_FOR_ONE_NEST_TYPE(FUNCTION, TYPE) \ -else if (typeid_cast(nest_compared_column.get())) \ -{ \ - /* clang-tidy you're dumb, I can't put FUNCTION in braces here. */ \ - nest_compare_function = FUNCTION; /* NOLINT */ \ -} + else if (typeid_cast(nest_compared_column.get())) \ + { \ + /* clang-tidy you're dumb, I can't put FUNCTION in braces here. */ \ + nest_compare_function = FUNCTION; /* NOLINT */ \ + } #define APPLY_FOR_NEST_TYPES(FUNCTION) \ -if (false) /* NOLINT */ \ -{ \ - /* Do nothing, a starter condition. */ \ -} \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + if (false) /* NOLINT */ \ + { \ + /* Do nothing, a starter condition. */ \ + } \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION##Float, ColumnVector) \ -APPLY_FOR_ONE_NEST_TYPE(FUNCTION##Float, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION##Float, ColumnVector) \ + APPLY_FOR_ONE_NEST_TYPE(FUNCTION##Float, ColumnVector) \ \ -else \ -{ \ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, \ - "The RANGE OFFSET frame for '{}' ORDER BY nest column is not implemented", \ - demangle(typeid(nest_compared_column).name())); \ -} + else \ + { \ + throw Exception( \ + ErrorCodes::NOT_IMPLEMENTED, \ + "The RANGE OFFSET frame for '{}' ORDER BY nest column is not implemented", \ + demangle(typeid(nest_compared_column).name())); \ + } // A specialization of compareValuesWithOffset for nullable. template -static int compareValuesWithOffsetNullable(const IColumn * _compared_column, - size_t compared_row, const IColumn * _reference_column, +static int compareValuesWithOffsetNullable( + const IColumn * _compared_column, + size_t compared_row, + const IColumn * _reference_column, size_t reference_row, const Field & _offset, bool offset_is_preceding) { - const auto * compared_column = assert_cast( - _compared_column); - const auto * reference_column = assert_cast( - _reference_column); + const auto * compared_column = assert_cast(_compared_column); + const auto * reference_column = assert_cast(_reference_column); if (compared_column->isNullAt(compared_row) && !reference_column->isNullAt(reference_row)) { @@ -251,54 +225,59 @@ static int compareValuesWithOffsetNullable(const IColumn * _compared_column, ColumnPtr nest_reference_column = reference_column->getNestedColumnPtr(); std::function nest_compare_function; + bool offset_is_preceding)> + nest_compare_function; APPLY_FOR_NEST_TYPES(compareValuesWithOffset) - return nest_compare_function(nest_compared_column.get(), compared_row, - nest_reference_column.get(), reference_row, _offset, offset_is_preceding); + return nest_compare_function( + nest_compared_column.get(), compared_row, nest_reference_column.get(), reference_row, _offset, offset_is_preceding); } // Helper macros to dispatch on type of the ORDER BY column #define APPLY_FOR_ONE_TYPE(FUNCTION, TYPE) \ -else if (typeid_cast(column)) \ -{ \ - /* clang-tidy you're dumb, I can't put FUNCTION in braces here. */ \ - compare_values_with_offset = FUNCTION; /* NOLINT */ \ -} + else if (typeid_cast(column)) \ + { \ + /* clang-tidy you're dumb, I can't put FUNCTION in braces here. */ \ + compare_values_with_offset = FUNCTION; /* NOLINT */ \ + } #define APPLY_FOR_TYPES(FUNCTION) \ -if (false) /* NOLINT */ \ -{ \ - /* Do nothing, a starter condition. */ \ -} \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + if (false) /* NOLINT */ \ + { \ + /* Do nothing, a starter condition. */ \ + } \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ -APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ \ -APPLY_FOR_ONE_TYPE(FUNCTION##Float, ColumnVector) \ -APPLY_FOR_ONE_TYPE(FUNCTION##Float, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION##Float, ColumnVector) \ + APPLY_FOR_ONE_TYPE(FUNCTION##Float, ColumnVector) \ \ -APPLY_FOR_ONE_TYPE(FUNCTION##Nullable, ColumnNullable) \ -else \ -{ \ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, \ - "The RANGE OFFSET frame for '{}' ORDER BY column is not implemented", \ - demangle(typeid(*column).name())); \ -} + APPLY_FOR_ONE_TYPE(FUNCTION##Nullable, ColumnNullable) \ + else \ + { \ + throw Exception( \ + ErrorCodes::NOT_IMPLEMENTED, \ + "The RANGE OFFSET frame for '{}' ORDER BY column is not implemented", \ + demangle(typeid(*column).name())); \ + } -WindowTransform::WindowTransform(const Block & input_header_, - const Block & output_header_, - const WindowDescription & window_description_, - const std::vector & functions) +WindowTransform::WindowTransform( + const Block & input_header_, + const Block & output_header_, + const WindowDescription & window_description_, + const std::vector & functions) : IProcessor({input_header_}, {output_header_}) , input(inputs.front()) , output(outputs.front()) @@ -329,8 +308,7 @@ WindowTransform::WindowTransform(const Block & input_header_, workspace.argument_column_indices.reserve(f.argument_names.size()); for (const auto & argument_name : f.argument_names) { - workspace.argument_column_indices.push_back( - input_header.getPositionByName(argument_name)); + workspace.argument_column_indices.push_back(input_header.getPositionByName(argument_name)); } workspace.argument_columns.assign(f.argument_names.size(), nullptr); @@ -347,9 +325,7 @@ WindowTransform::WindowTransform(const Block & input_header_, } workspace.is_aggregate_function_state = workspace.aggregate_function->isState(); - workspace.aggregate_function_state.reset( - aggregate_function->sizeOfData(), - aggregate_function->alignOfData()); + workspace.aggregate_function_state.reset(aggregate_function->sizeOfData(), aggregate_function->alignOfData()); aggregate_function->create(workspace.aggregate_function_state.data()); workspaces.push_back(std::move(workspace)); @@ -358,24 +334,20 @@ WindowTransform::WindowTransform(const Block & input_header_, partition_by_indices.reserve(window_description.partition_by.size()); for (const auto & column : window_description.partition_by) { - partition_by_indices.push_back( - input_header.getPositionByName(column.column_name)); + partition_by_indices.push_back(input_header.getPositionByName(column.column_name)); } order_by_indices.reserve(window_description.order_by.size()); for (const auto & column : window_description.order_by) { - order_by_indices.push_back( - input_header.getPositionByName(column.column_name)); + order_by_indices.push_back(input_header.getPositionByName(column.column_name)); } // Choose a row comparison function for RANGE OFFSET frame based on the // type of the ORDER BY column. if (window_description.frame.type == WindowFrame::FrameType::RANGE - && (window_description.frame.begin_type - == WindowFrame::BoundaryType::Offset - || window_description.frame.end_type - == WindowFrame::BoundaryType::Offset)) + && (window_description.frame.begin_type == WindowFrame::BoundaryType::Offset + || window_description.frame.end_type == WindowFrame::BoundaryType::Offset)) { assert(order_by_indices.size() == 1); const auto & entry = input_header.getByPosition(order_by_indices[0]); @@ -385,32 +357,26 @@ WindowTransform::WindowTransform(const Block & input_header_, // Convert the offsets to the ORDER BY column type. We can't just check // that the type matches, because e.g. the int literals are always // (U)Int64, but the column might be Int8 and so on. - if (window_description.frame.begin_type - == WindowFrame::BoundaryType::Offset) + if (window_description.frame.begin_type == WindowFrame::BoundaryType::Offset) { - window_description.frame.begin_offset = convertFieldToTypeOrThrow( - window_description.frame.begin_offset, - *entry.type); + window_description.frame.begin_offset = convertFieldToTypeOrThrow(window_description.frame.begin_offset, *entry.type); - if (applyVisitor(FieldVisitorAccurateLess{}, - window_description.frame.begin_offset, Field(0))) + if (applyVisitor(FieldVisitorAccurateLess{}, window_description.frame.begin_offset, Field(0))) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Window frame start offset must be nonnegative, {} given", window_description.frame.begin_offset); } } - if (window_description.frame.end_type - == WindowFrame::BoundaryType::Offset) + if (window_description.frame.end_type == WindowFrame::BoundaryType::Offset) { - window_description.frame.end_offset = convertFieldToTypeOrThrow( - window_description.frame.end_offset, - *entry.type); + window_description.frame.end_offset = convertFieldToTypeOrThrow(window_description.frame.end_offset, *entry.type); - if (applyVisitor(FieldVisitorAccurateLess{}, - window_description.frame.end_offset, Field(0))) + if (applyVisitor(FieldVisitorAccurateLess{}, window_description.frame.end_offset, Field(0))) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Window frame start offset must be nonnegative, {} given", window_description.frame.end_offset); } @@ -423,11 +389,10 @@ WindowTransform::WindowTransform(const Block & input_header_, { if (!workspace.window_function_impl->checkWindowFrameType(this)) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported window frame type for function '{}'", - workspace.aggregate_function->getName()); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Unsupported window frame type for function '{}'", workspace.aggregate_function->getName()); } } - } } @@ -436,8 +401,7 @@ WindowTransform::~WindowTransform() // Some states may be not created yet if the creation failed. for (auto & ws : workspaces) { - ws.aggregate_function->destroy( - ws.aggregate_function_state.data()); + ws.aggregate_function->destroy(ws.aggregate_function_state.data()); } } @@ -511,14 +475,10 @@ void WindowTransform::advancePartitionEnd() size_t i = 0; for (; i < partition_by_columns; ++i) { - const auto * reference_column - = inputAt(prev_frame_start)[partition_by_indices[i]].get(); - const auto * compared_column - = inputAt(partition_end)[partition_by_indices[i]].get(); + const auto * reference_column = inputAt(prev_frame_start)[partition_by_indices[i]].get(); + const auto * compared_column = inputAt(partition_end)[partition_by_indices[i]].get(); - if (compared_column->compareAt(partition_end.row, - prev_frame_start.row, *reference_column, - 1 /* nan_direction_hint */) != 0) + if (compared_column->compareAt(partition_end.row, prev_frame_start.row, *reference_column, 1 /* nan_direction_hint */) != 0) { break; } @@ -630,9 +590,8 @@ auto WindowTransform::moveRowNumber(const RowNumber & original_row_number, Int64 void WindowTransform::advanceFrameStartRowsOffset() { // Just recalculate it each time by walking blocks. - const auto [moved_row, offset_left] = moveRowNumber(current_row, - window_description.frame.begin_offset.get() - * (window_description.frame.begin_preceding ? -1 : 1)); + const auto [moved_row, offset_left] = moveRowNumber( + current_row, window_description.frame.begin_offset.get() * (window_description.frame.begin_preceding ? -1 : 1)); frame_start = moved_row; @@ -669,21 +628,17 @@ void WindowTransform::advanceFrameStartRangeOffset() { // See the comment for advanceFrameEndRangeOffset(). const int direction = window_description.order_by[0].direction; - const bool preceding = window_description.frame.begin_preceding - == (direction > 0); - const auto * reference_column - = inputAt(current_row)[order_by_indices[0]].get(); + const bool preceding = window_description.frame.begin_preceding == (direction > 0); + const auto * reference_column = inputAt(current_row)[order_by_indices[0]].get(); for (; frame_start < partition_end; advanceRowNumber(frame_start)) { // The first frame value is [current_row] with offset, so we advance // while [frames_start] < [current_row] with offset. - const auto * compared_column - = inputAt(frame_start)[order_by_indices[0]].get(); - if (compare_values_with_offset(compared_column, frame_start.row, - reference_column, current_row.row, - window_description.frame.begin_offset, - preceding) - * direction >= 0) + const auto * compared_column = inputAt(frame_start)[order_by_indices[0]].get(); + if (compare_values_with_offset( + compared_column, frame_start.row, reference_column, current_row.row, window_description.frame.begin_offset, preceding) + * direction + >= 0) { frame_started = true; return; @@ -728,7 +683,8 @@ void WindowTransform::advanceFrameStart() advanceFrameStartRangeOffset(); break; default: - throw Exception(ErrorCodes::NOT_IMPLEMENTED, + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Frame start type '{}' for frame '{}' is not implemented", window_description.frame.begin_type, window_description.frame.type); @@ -788,8 +744,7 @@ bool WindowTransform::arePeers(const RowNumber & x, const RowNumber & y) const { const auto * column_x = inputAt(x)[order_by_indices[i]].get(); const auto * column_y = inputAt(y)[order_by_indices[i]].get(); - if (column_x->compareAt(x.row, y.row, *column_y, - 1 /* nan_direction_hint */) != 0) + if (column_x->compareAt(x.row, y.row, *column_y, 1 /* nan_direction_hint */) != 0) { return false; } @@ -806,8 +761,7 @@ void WindowTransform::advanceFrameEndCurrentRow() // (only loop over rows and not over blocks), that should hopefully be more // efficient. // partition_end is either in this new block or past-the-end. - assert(frame_end.block == partition_end.block - || frame_end.block + 1 == partition_end.block); + assert(frame_end.block == partition_end.block || frame_end.block + 1 == partition_end.block); if (frame_end == partition_end) { @@ -869,10 +823,8 @@ void WindowTransform::advanceFrameEndRowsOffset() { // Walk the specified offset from the current row. The "+1" is needed // because the frame_end is a past-the-end pointer. - const auto [moved_row, offset_left] = moveRowNumber(current_row, - window_description.frame.end_offset.get() - * (window_description.frame.end_preceding ? -1 : 1) - + 1); + const auto [moved_row, offset_left] = moveRowNumber( + current_row, window_description.frame.end_offset.get() * (window_description.frame.end_preceding ? -1 : 1) + 1); if (partition_end <= moved_row) { @@ -905,22 +857,18 @@ void WindowTransform::advanceFrameEndRangeOffset() // PRECEDING/FOLLOWING change direction for DESC order. // See CD 9075-2:201?(E) 7.14 p. 429. const int direction = window_description.order_by[0].direction; - const bool preceding = window_description.frame.end_preceding - == (direction > 0); - const auto * reference_column - = inputAt(current_row)[order_by_indices[0]].get(); + const bool preceding = window_description.frame.end_preceding == (direction > 0); + const auto * reference_column = inputAt(current_row)[order_by_indices[0]].get(); for (; frame_end < partition_end; advanceRowNumber(frame_end)) { // The last frame value is current_row with offset, and we need a // past-the-end pointer, so we advance while // [frame_end] <= [current_row] with offset. - const auto * compared_column - = inputAt(frame_end)[order_by_indices[0]].get(); - if (compare_values_with_offset(compared_column, frame_end.row, - reference_column, current_row.row, - window_description.frame.end_offset, - preceding) - * direction > 0) + const auto * compared_column = inputAt(frame_end)[order_by_indices[0]].get(); + if (compare_values_with_offset( + compared_column, frame_end.row, reference_column, current_row.row, window_description.frame.end_offset, preceding) + * direction + > 0) { frame_ended = true; return; @@ -955,9 +903,8 @@ void WindowTransform::advanceFrameEnd() advanceFrameEndRangeOffset(); break; default: - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "The frame end type '{}' is not implemented", - window_description.frame.end_type); + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "The frame end type '{}' is not implemented", window_description.frame.end_type); } break; } @@ -1028,13 +975,9 @@ void WindowTransform::updateAggregationState() // rows manually, instead of using advanceRowNumber(). // For this purpose, the past-the-end block can be different than the // block of the past-the-end row (it's usually the next block). - const auto past_the_end_block = rows_to_add_end.row == 0 - ? rows_to_add_end.block - : rows_to_add_end.block + 1; + const auto past_the_end_block = rows_to_add_end.row == 0 ? rows_to_add_end.block : rows_to_add_end.block + 1; - for (auto block_number = rows_to_add_start.block; - block_number < past_the_end_block; - ++block_number) + for (auto block_number = rows_to_add_start.block; block_number < past_the_end_block; ++block_number) { auto & block = blockAt(block_number); @@ -1042,18 +985,15 @@ void WindowTransform::updateAggregationState() { for (size_t i = 0; i < ws.argument_column_indices.size(); ++i) { - ws.argument_columns[i] = block.input_columns[ - ws.argument_column_indices[i]].get(); + ws.argument_columns[i] = block.input_columns[ws.argument_column_indices[i]].get(); } ws.cached_block_number = block_number; } // First and last blocks may be processed partially, and other blocks // are processed in full. - const auto first_row = block_number == rows_to_add_start.block - ? rows_to_add_start.row : 0; - const auto past_the_end_row = block_number == rows_to_add_end.block - ? rows_to_add_end.row : block.rows; + const auto first_row = block_number == rows_to_add_start.block ? rows_to_add_start.row : 0; + const auto past_the_end_row = block_number == rows_to_add_end.block ? rows_to_add_end.row : block.rows; // We should add an addBatch analog that can accept a starting offset. // For now, add the values one by one. @@ -1101,8 +1041,7 @@ void WindowTransform::writeOutCurrentRow() } } -static void assertSameColumns(const Columns & left_all, - const Columns & right_all) +static void assertSameColumns(const Columns & left_all, const Columns & right_all) { assert(left_all.size() == right_all.size()); @@ -1120,8 +1059,7 @@ static void assertSameColumns(const Columns & left_all, if (const auto * right_lc = typeid_cast(right_column)) right_column = right_lc->getDictionary().getNestedColumn().get(); - assert(typeid(*left_column).hash_code() - == typeid(*right_column).hash_code()); + assert(typeid(*left_column).hash_code() == typeid(*right_column).hash_code()); if (isColumnConst(*left_column)) { @@ -1182,8 +1120,7 @@ void WindowTransform::appendChunk(Chunk & chunk) if (ws.window_function_impl) block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices)); - block.output_columns.push_back(ws.aggregate_function->getResultType() - ->createColumn()); + block.output_columns.push_back(ws.aggregate_function->getResultType()->createColumn()); block.output_columns.back()->reserve(block.rows); } @@ -1404,6 +1341,13 @@ IProcessor::Status WindowTransform::prepare() { // Output the ready block. const auto i = next_output_block_number - first_block_number; + LOG_ERROR( + getLogger("WindowTransform"), + "xxx {} output block: {}, next_output_block_number: {} first_not_ready_row.block: {}", + fmt::ptr(this), + i, + next_output_block_number, + first_not_ready_row.block); auto & block = blocks[i]; auto columns = block.original_input_columns; for (auto & res : block.output_columns) @@ -1507,12 +1451,10 @@ void WindowTransform::work() // that the frame start can be further than current row for some frame specs // (e.g. EXCLUDE CURRENT ROW), so we have to check both. assert(prev_frame_start <= frame_start); - const auto first_used_block = std::min(next_output_block_number, - std::min(prev_frame_start.block, current_row.block)); + const auto first_used_block = std::min(next_output_block_number, std::min(prev_frame_start.block, current_row.block)); if (first_block_number < first_used_block) { - blocks.erase(blocks.begin(), - blocks.begin() + (first_used_block - first_block_number)); + blocks.erase(blocks.begin(), blocks.begin() + (first_used_block - first_block_number)); first_block_number = first_used_block; assert(next_output_block_number >= first_block_number); @@ -1523,118 +1465,82 @@ void WindowTransform::work() } } -// A basic implementation for a true window function. It pretends to be an -// aggregate function, but refuses to work as such. -struct WindowFunction - : public IAggregateFunctionHelper - , public IWindowFunction -{ - std::string name; - - WindowFunction(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_, const DataTypePtr & result_type_) - : IAggregateFunctionHelper(argument_types_, parameters_, result_type_) - , name(name_) - {} - - bool isOnlyWindowFunction() const override { return true; } - - [[noreturn]] void fail() const - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The function '{}' can only be used as a window function, not as an aggregate function", - getName()); - } - - String getName() const override { return name; } - void create(AggregateDataPtr __restrict) const override {} - void destroy(AggregateDataPtr __restrict) const noexcept override {} - bool hasTrivialDestructor() const override { return true; } - size_t sizeOfData() const override { return 0; } - size_t alignOfData() const override { return 1; } - void add(AggregateDataPtr __restrict, const IColumn **, size_t, Arena *) const override { fail(); } - void merge(AggregateDataPtr __restrict, ConstAggregateDataPtr, Arena *) const override { fail(); } - void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional) const override { fail(); } - void deserialize(AggregateDataPtr __restrict, ReadBuffer &, std::optional, Arena *) const override { fail(); } - void insertResultInto(AggregateDataPtr __restrict, IColumn &, Arena *) const override { fail(); } -}; struct WindowFunctionRank final : public WindowFunction { - WindowFunctionRank(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionRank(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, std::make_shared()) - {} + { + } bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { - IColumn & to = *transform->blockAt(transform->current_row) - .output_columns[function_index]; - assert_cast(to).getData().push_back( - transform->peer_group_start_row_number); + IColumn & to = *transform->blockAt(transform->current_row).output_columns[function_index]; + assert_cast(to).getData().push_back(transform->peer_group_start_row_number); } }; struct WindowFunctionDenseRank final : public WindowFunction { - WindowFunctionDenseRank(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionDenseRank(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, std::make_shared()) - {} + { + } bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { - IColumn & to = *transform->blockAt(transform->current_row) - .output_columns[function_index]; - assert_cast(to).getData().push_back( - transform->peer_group_number); + IColumn & to = *transform->blockAt(transform->current_row).output_columns[function_index]; + assert_cast(to).getData().push_back(transform->peer_group_number); } }; namespace recurrent_detail { - template T getValue(const WindowTransform * /*transform*/, size_t /*function_index*/, size_t /*column_index*/, RowNumber /*row*/) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "recurrent_detail::getValue() is not implemented for {} type", typeid(T).name()); - } +template +T getValue(const WindowTransform * /*transform*/, size_t /*function_index*/, size_t /*column_index*/, RowNumber /*row*/) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "recurrent_detail::getValue() is not implemented for {} type", typeid(T).name()); +} - template<> Float64 getValue(const WindowTransform * transform, size_t function_index, size_t column_index, RowNumber row) - { - const auto & workspace = transform->workspaces[function_index]; - const auto & column = transform->blockAt(row.block).input_columns[workspace.argument_column_indices[column_index]]; - return column->getFloat64(row.row); - } +template <> +Float64 getValue(const WindowTransform * transform, size_t function_index, size_t column_index, RowNumber row) +{ + const auto & workspace = transform->workspaces[function_index]; + const auto & column = transform->blockAt(row.block).input_columns[workspace.argument_column_indices[column_index]]; + return column->getFloat64(row.row); +} - template void setValueToOutputColumn(const WindowTransform * /*transform*/, size_t /*function_index*/, T /*value*/) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "recurrent_detail::setValueToOutputColumn() is not implemented for {} type", typeid(T).name()); - } +template +void setValueToOutputColumn(const WindowTransform * /*transform*/, size_t /*function_index*/, T /*value*/) +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "recurrent_detail::setValueToOutputColumn() is not implemented for {} type", typeid(T).name()); +} - template<> void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, Float64 value) - { - auto current_row = transform->current_row; - const auto & current_block = transform->blockAt(current_row); - IColumn & to = *current_block.output_columns[function_index]; +template <> +void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, Float64 value) +{ + auto current_row = transform->current_row; + const auto & current_block = transform->blockAt(current_row); + IColumn & to = *current_block.output_columns[function_index]; - assert_cast(to).getData().push_back(value); - } + assert_cast(to).getData().push_back(value); +} } struct WindowFunctionHelpers { - template + template static T getValue(const WindowTransform * transform, size_t function_index, size_t column_index, RowNumber row) { return recurrent_detail::getValue(transform, function_index, column_index, row); } - template + template static void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, T value) { recurrent_detail::setValueToOutputColumn(transform, function_index, value); @@ -1669,35 +1575,6 @@ struct WindowFunctionHelpers } }; -template -struct StatefulWindowFunction : public WindowFunction -{ - StatefulWindowFunction(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_, const DataTypePtr & result_type_) - : WindowFunction(name_, argument_types_, parameters_, result_type_) - { - } - - size_t sizeOfData() const override { return sizeof(State); } - size_t alignOfData() const override { return 1; } - - void create(AggregateDataPtr __restrict place) const override - { - new (place) State(); - } - - void destroy(AggregateDataPtr __restrict place) const noexcept override - { - reinterpret_cast(place)->~State(); - } - - bool hasTrivialDestructor() const override { return std::is_trivially_destructible_v; } - - State & getState(const WindowFunctionWorkspace & workspace) const - { - return *reinterpret_cast(workspace.aggregate_function_state.data()); - } -}; struct ExponentialTimeDecayedSumState { @@ -1721,34 +1598,34 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly one parameter", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); } - WindowFunctionExponentialTimeDecayedSum(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionExponentialTimeDecayedSum(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) , decay_length(getDecayLength(parameters_, name_)) { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly two arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); } if (!isNumber(argument_types[ARGUMENT_VALUE])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Argument {} must be a number, '{}' given", ARGUMENT_VALUE, argument_types[ARGUMENT_VALUE]->getName()); } - if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) && !isDateTime64(argument_types[ARGUMENT_TIME])) + if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) + && !isDateTime64(argument_types[ARGUMENT_TIME])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Argument {} must be DateTime, DateTime64 or a number, '{}' given", ARGUMENT_TIME, argument_types[ARGUMENT_TIME]->getName()); @@ -1757,8 +1634,7 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); @@ -1770,8 +1646,7 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc RowNumber frame_back = transform->prevRowNumber(transform->frame_end); Float64 back_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, frame_back); - if (transform->prev_frame_start <= transform->frame_start - && transform->frame_start < transform->prev_frame_end + if (transform->prev_frame_start <= transform->frame_start && transform->frame_start < transform->prev_frame_end && transform->prev_frame_end <= transform->frame_end) { for (RowNumber i = transform->prev_frame_start; i < transform->frame_start; transform->advanceRowNumber(i)) @@ -1805,8 +1680,8 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result); } - private: - const Float64 decay_length; +private: + const Float64 decay_length; }; struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction @@ -1818,34 +1693,34 @@ struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly one parameter", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); } - WindowFunctionExponentialTimeDecayedMax(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionExponentialTimeDecayedMax(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, std::make_shared()) , decay_length(getDecayLength(parameters_, name_)) { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly two arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); } if (!isNumber(argument_types[ARGUMENT_VALUE])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Argument {} must be a number, '{}' given", ARGUMENT_VALUE, argument_types[ARGUMENT_VALUE]->getName()); } - if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) && !isDateTime64(argument_types[ARGUMENT_TIME])) + if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) + && !isDateTime64(argument_types[ARGUMENT_TIME])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Argument {} must be DateTime, DateTime64 or a number, '{}' given", ARGUMENT_TIME, argument_types[ARGUMENT_TIME]->getName()); @@ -1854,8 +1729,7 @@ struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { Float64 result = std::numeric_limits::quiet_NaN(); @@ -1881,8 +1755,8 @@ struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result); } - private: - const Float64 decay_length; +private: + const Float64 decay_length; }; struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFunction @@ -1893,26 +1767,25 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly one parameter", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); } - WindowFunctionExponentialTimeDecayedCount(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionExponentialTimeDecayedCount(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) , decay_length(getDecayLength(parameters_, name_)) { if (argument_types.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly one argument", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one argument", name_); } - if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) && !isDateTime64(argument_types[ARGUMENT_TIME])) + if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) + && !isDateTime64(argument_types[ARGUMENT_TIME])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Argument {} must be DateTime, DateTime64 or a number, '{}' given", ARGUMENT_TIME, argument_types[ARGUMENT_TIME]->getName()); @@ -1921,8 +1794,7 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); @@ -1934,8 +1806,7 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu RowNumber frame_back = transform->prevRowNumber(transform->frame_end); Float64 back_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, frame_back); - if (transform->prev_frame_start <= transform->frame_start - && transform->frame_start < transform->prev_frame_end + if (transform->prev_frame_start <= transform->frame_start && transform->frame_start < transform->prev_frame_end && transform->prev_frame_end <= transform->frame_end) { for (RowNumber i = transform->prev_frame_start; i < transform->frame_start; transform->advanceRowNumber(i)) @@ -1966,8 +1837,8 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result); } - private: - const Float64 decay_length; +private: + const Float64 decay_length; }; struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunction @@ -1979,34 +1850,34 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly one parameter", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); } - WindowFunctionExponentialTimeDecayedAvg(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionExponentialTimeDecayedAvg(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) , decay_length(getDecayLength(parameters_, name_)) { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly two arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); } if (!isNumber(argument_types[ARGUMENT_VALUE])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Argument {} must be a number, '{}' given", ARGUMENT_VALUE, argument_types[ARGUMENT_VALUE]->getName()); } - if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) && !isDateTime64(argument_types[ARGUMENT_TIME])) + if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) + && !isDateTime64(argument_types[ARGUMENT_TIME])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Argument {} must be DateTime, DateTime64 or a number, '{}' given", ARGUMENT_TIME, argument_types[ARGUMENT_TIME]->getName()); @@ -2015,8 +1886,7 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); @@ -2030,8 +1900,7 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc RowNumber frame_back = transform->prevRowNumber(transform->frame_end); Float64 back_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, frame_back); - if (transform->prev_frame_start <= transform->frame_start - && transform->frame_start < transform->prev_frame_end + if (transform->prev_frame_start <= transform->frame_start && transform->frame_start < transform->prev_frame_end && transform->prev_frame_end <= transform->frame_end) { for (RowNumber i = transform->prev_frame_start; i < transform->frame_start; transform->advanceRowNumber(i)) @@ -2074,56 +1943,49 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc state.previous_count = count; state.previous_time = back_t; - result = sum/count; + result = sum / count; } WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result); } - private: - const Float64 decay_length; +private: + const Float64 decay_length; }; struct WindowFunctionRowNumber final : public WindowFunction { - WindowFunctionRowNumber(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionRowNumber(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, std::make_shared()) - {} + { + } bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { - IColumn & to = *transform->blockAt(transform->current_row) - .output_columns[function_index]; - assert_cast(to).getData().push_back( - transform->current_row_number); + IColumn & to = *transform->blockAt(transform->current_row).output_columns[function_index]; + assert_cast(to).getData().push_back(transform->current_row_number); } }; namespace { - struct NtileState - { - UInt64 buckets = 0; - RowNumber start_row; - UInt64 current_partition_rows = 0; - UInt64 current_partition_inserted_row = 0; +struct NtileState +{ + UInt64 buckets = 0; + RowNumber start_row; + UInt64 current_partition_rows = 0; + UInt64 current_partition_inserted_row = 0; - void windowInsertResultInto( - const WindowTransform * transform, - size_t function_index, - const DataTypes & argument_types); - }; + void windowInsertResultInto(const WindowTransform * transform, size_t function_index, const DataTypes & argument_types); +}; } // Usage: ntile(n). n is the number of buckets. struct WindowFunctionNtile final : public StatefulWindowFunction { - WindowFunctionNtile(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionNtile(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) { if (argument_types.size() != 1) @@ -2131,7 +1993,11 @@ struct WindowFunctionNtile final : public StatefulWindowFunction auto type_id = argument_types[0]->getTypeId(); if (type_id != TypeIndex::UInt8 && type_id != TypeIndex::UInt16 && type_id != TypeIndex::UInt32 && type_id != TypeIndex::UInt64) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "'{}' argument type must be an unsigned integer (not larger than 64-bit), got {}", name_, argument_types[0]->getName()); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "'{}' argument type must be an unsigned integer (not larger than 64-bit), got {}", + name_, + argument_types[0]->getName()); } bool allocatesMemoryInArena() const override { return false; } @@ -2162,13 +2028,13 @@ struct WindowFunctionNtile final : public StatefulWindowFunction std::optional getDefaultFrame() const override { WindowFrame frame; + frame.is_default = false; frame.type = WindowFrame::FrameType::ROWS; frame.end_type = WindowFrame::BoundaryType::Unbounded; return frame; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); @@ -2178,83 +2044,80 @@ struct WindowFunctionNtile final : public StatefulWindowFunction namespace { - void NtileState::windowInsertResultInto( - const WindowTransform * transform, - size_t function_index, - const DataTypes & argument_types) +void NtileState::windowInsertResultInto(const WindowTransform * transform, size_t function_index, const DataTypes & argument_types) +{ + if (!buckets) [[unlikely]] { - if (!buckets) [[unlikely]] + const auto & current_block = transform->blockAt(transform->current_row); + const auto & workspace = transform->workspaces[function_index]; + const auto & arg_col = *current_block.original_input_columns[workspace.argument_column_indices[0]]; + if (!isColumnConst(arg_col)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be a constant"); + auto type_id = argument_types[0]->getTypeId(); + if (type_id == TypeIndex::UInt8) + buckets = arg_col[transform->current_row.row].get(); + else if (type_id == TypeIndex::UInt16) + buckets = arg_col[transform->current_row.row].get(); + else if (type_id == TypeIndex::UInt32) + buckets = arg_col[transform->current_row.row].get(); + else if (type_id == TypeIndex::UInt64) + buckets = arg_col[transform->current_row.row].get(); + + if (!buckets) { - const auto & current_block = transform->blockAt(transform->current_row); - const auto & workspace = transform->workspaces[function_index]; - const auto & arg_col = *current_block.original_input_columns[workspace.argument_column_indices[0]]; - if (!isColumnConst(arg_col)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be a constant"); - auto type_id = argument_types[0]->getTypeId(); - if (type_id == TypeIndex::UInt8) - buckets = arg_col[transform->current_row.row].get(); - else if (type_id == TypeIndex::UInt16) - buckets = arg_col[transform->current_row.row].get(); - else if (type_id == TypeIndex::UInt32) - buckets = arg_col[transform->current_row.row].get(); - else if (type_id == TypeIndex::UInt64) - buckets = arg_col[transform->current_row.row].get(); - - if (!buckets) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be greater than zero"); - } - } - // new partition - if (WindowFunctionHelpers::checkPartitionEnterFirstRow(transform)) [[unlikely]] - { - current_partition_rows = 0; - current_partition_inserted_row = 0; - start_row = transform->current_row; - } - current_partition_rows++; - - // Only do the action when we meet the last row in this partition. - if (!WindowFunctionHelpers::checkPartitionEnterLastRow(transform)) - return; - - auto bucket_capacity = current_partition_rows / buckets; - auto capacity_diff = current_partition_rows - bucket_capacity * buckets; - - // bucket number starts from 1. - UInt64 bucket_num = 1; - while (current_partition_inserted_row < current_partition_rows) - { - auto current_bucket_capacity = bucket_capacity; - if (capacity_diff > 0) - { - current_bucket_capacity += 1; - capacity_diff--; - } - auto left_rows = current_bucket_capacity; - while (left_rows) - { - auto available_block_rows = transform->blockRowsNumber(start_row) - start_row.row; - IColumn & to = *transform->blockAt(start_row).output_columns[function_index]; - auto & pod_array = assert_cast(to).getData(); - if (left_rows < available_block_rows) - { - pod_array.resize_fill(pod_array.size() + left_rows, bucket_num); - start_row.row += left_rows; - left_rows = 0; - } - else - { - pod_array.resize_fill(pod_array.size() + available_block_rows, bucket_num); - left_rows -= available_block_rows; - start_row.block++; - start_row.row = 0; - } - } - current_partition_inserted_row += current_bucket_capacity; - bucket_num += 1; + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be greater than zero"); } } + // new partition + if (WindowFunctionHelpers::checkPartitionEnterFirstRow(transform)) [[unlikely]] + { + current_partition_rows = 0; + current_partition_inserted_row = 0; + start_row = transform->current_row; + } + current_partition_rows++; + + // Only do the action when we meet the last row in this partition. + if (!WindowFunctionHelpers::checkPartitionEnterLastRow(transform)) + return; + + auto bucket_capacity = current_partition_rows / buckets; + auto capacity_diff = current_partition_rows - bucket_capacity * buckets; + + // bucket number starts from 1. + UInt64 bucket_num = 1; + while (current_partition_inserted_row < current_partition_rows) + { + auto current_bucket_capacity = bucket_capacity; + if (capacity_diff > 0) + { + current_bucket_capacity += 1; + capacity_diff--; + } + auto left_rows = current_bucket_capacity; + while (left_rows) + { + auto available_block_rows = transform->blockRowsNumber(start_row) - start_row.row; + IColumn & to = *transform->blockAt(start_row).output_columns[function_index]; + auto & pod_array = assert_cast(to).getData(); + if (left_rows < available_block_rows) + { + pod_array.resize_fill(pod_array.size() + left_rows, bucket_num); + start_row.row += left_rows; + left_rows = 0; + } + else + { + pod_array.resize_fill(pod_array.size() + available_block_rows, bucket_num); + left_rows -= available_block_rows; + start_row.block++; + start_row.row = 0; + } + } + current_partition_inserted_row += current_bucket_capacity; + bucket_num += 1; + } +} } namespace @@ -2269,23 +2132,21 @@ struct PercentRankState struct WindowFunctionPercentRank final : public StatefulWindowFunction { public: - WindowFunctionPercentRank(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionPercentRank(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) - {} + { + } bool allocatesMemoryInArena() const override { return false; } bool checkWindowFrameType(const WindowTransform * transform) const override { - if (transform->window_description.frame.type != WindowFrame::FrameType::RANGE - || transform->window_description.frame.begin_type != WindowFrame::BoundaryType::Unbounded - || transform->window_description.frame.end_type != WindowFrame::BoundaryType::Current) - { - LOG_ERROR( - getLogger("WindowFunctionPercentRank"), - "Window frame for function 'percent_rank' should be 'RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT'"); - return false; + if (transform->window_description.frame != getDefaultFrame()) + { + LOG_ERROR( + getLogger("WindowFunctionPercentRank"), + "Window frame for function 'percent_rank' should be '{}'", getDefaultFrame()->toString()); + return false; } return true; } @@ -2293,9 +2154,10 @@ public: std::optional getDefaultFrame() const override { WindowFrame frame; + frame.is_default = false; frame.type = WindowFrame::FrameType::RANGE; frame.begin_type = WindowFrame::BoundaryType::Unbounded; - frame.end_type = WindowFrame::BoundaryType::Current; + frame.end_type = WindowFrame::BoundaryType::Unbounded; return frame; } @@ -2371,14 +2233,12 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction { FunctionBasePtr func_cast = nullptr; - WindowFunctionLagLeadInFrame(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionLagLeadInFrame(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, createResultType(argument_types_, name_)) { if (!parameters.empty()) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Function {} cannot be parameterized", name_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} cannot be parameterized", name_); } if (argument_types.size() == 1) @@ -2388,9 +2248,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (!isInt64OrUInt64FieldType(argument_types[1]->getDefault().getType())) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Offset must be an integer, '{}' given", - argument_types[1]->getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Offset must be an integer, '{}' given", argument_types[1]->getName()); } if (argument_types.size() == 2) @@ -2400,9 +2258,11 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (argument_types.size() > 3) { - throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, + throw Exception( + ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Function '{}' accepts at most 3 arguments, {} given", - name, argument_types.size()); + name, + argument_types.size()); } if (argument_types[0]->equals(*argument_types[2])) @@ -2411,14 +2271,16 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction const auto supertype = tryGetLeastSupertype(DataTypes{argument_types[0], argument_types[2]}); if (!supertype) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "There is no supertype for the argument type '{}' and the default value type '{}'", argument_types[0]->getName(), argument_types[2]->getName()); } if (!argument_types[0]->equals(*supertype)) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "The supertype '{}' for the argument type '{}' and the default value type '{}' is not the same as the argument type", supertype->getName(), argument_types[0]->getName(), @@ -2427,15 +2289,8 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction const auto from_name = argument_types[2]->getName(); const auto to_name = argument_types[0]->getName(); - ColumnsWithTypeAndName arguments - { - { argument_types[2], "" }, - { - DataTypeString().createColumnConst(0, to_name), - std::make_shared(), - "" - } - }; + ColumnsWithTypeAndName arguments{ + {argument_types[2], ""}, {DataTypeString().createColumnConst(0, to_name), std::make_shared(), ""}}; auto get_cast_func = [&arguments] { @@ -2444,7 +2299,6 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction }; func_cast = get_cast_func(); - } ColumnPtr castColumn(const Columns & columns, const std::vector & idx) override @@ -2452,15 +2306,11 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (!func_cast) return nullptr; - ColumnsWithTypeAndName arguments - { - { columns[idx[2]], argument_types[2], "" }, - { - DataTypeString().createColumnConst(columns[idx[2]]->size(), argument_types[0]->getName()), - std::make_shared(), - "" - } - }; + ColumnsWithTypeAndName arguments{ + {columns[idx[2]], argument_types[2], ""}, + {DataTypeString().createColumnConst(columns[idx[2]]->size(), argument_types[0]->getName()), + std::make_shared(), + ""}}; return func_cast->execute(arguments, argument_types[0], columns[idx[2]]->size()); } @@ -2469,8 +2319,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction { if (argument_types_.empty()) { - throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, - "Function {} takes at least one argument", name_); + throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} takes at least one argument", name_); } return argument_types_[0]; @@ -2478,8 +2327,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { const auto & current_block = transform->blockAt(transform->current_row); IColumn & to = *current_block.output_columns[function_index]; @@ -2488,34 +2336,27 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction Int64 offset = 1; if (argument_types.size() > 1) { - offset = (*current_block.input_columns[ - workspace.argument_column_indices[1]])[ - transform->current_row.row].get(); + offset = (*current_block.input_columns[workspace.argument_column_indices[1]])[transform->current_row.row].get(); /// Either overflow or really negative value, both is not acceptable. if (offset < 0) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The offset for function {} must be in (0, {}], {} given", - getName(), INT64_MAX, offset); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "The offset for function {} must be in (0, {}], {} given", getName(), INT64_MAX, offset); } } - const auto [target_row, offset_left] = transform->moveRowNumber( - transform->current_row, offset * (is_lead ? 1 : -1)); + const auto [target_row, offset_left] = transform->moveRowNumber(transform->current_row, offset * (is_lead ? 1 : -1)); - if (offset_left != 0 - || target_row < transform->frame_start - || transform->frame_end <= target_row) + if (offset_left != 0 || target_row < transform->frame_start || transform->frame_end <= target_row) { // Offset is outside the frame. if (argument_types.size() > 2) { // Column with default values is specified. - const IColumn & default_column = - current_block.casted_columns[function_index] ? - *current_block.casted_columns[function_index].get() : - *current_block.input_columns[workspace.argument_column_indices[2]].get(); + const IColumn & default_column = current_block.casted_columns[function_index] + ? *current_block.casted_columns[function_index].get() + : *current_block.input_columns[workspace.argument_column_indices[2]].get(); to.insert(default_column[transform->current_row.row]); } @@ -2527,30 +2368,24 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction else { // Offset is inside the frame. - to.insertFrom(*transform->blockAt(target_row).input_columns[ - workspace.argument_column_indices[0]], - target_row.row); + to.insertFrom(*transform->blockAt(target_row).input_columns[workspace.argument_column_indices[0]], target_row.row); } } }; struct WindowFunctionNthValue final : public WindowFunction { - WindowFunctionNthValue(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionNthValue(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, createResultType(name_, argument_types_)) { if (!parameters.empty()) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Function {} cannot be parameterized", name_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} cannot be parameterized", name_); } if (!isInt64OrUInt64FieldType(argument_types[1]->getDefault().getType())) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Offset must be an integer, '{}' given", - argument_types[1]->getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Offset must be an integer, '{}' given", argument_types[1]->getName()); } } @@ -2558,8 +2393,7 @@ struct WindowFunctionNthValue final : public WindowFunction { if (argument_types_.size() != 2) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes exactly two arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); } return argument_types_[0]; @@ -2567,30 +2401,24 @@ struct WindowFunctionNthValue final : public WindowFunction bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { const auto & current_block = transform->blockAt(transform->current_row); IColumn & to = *current_block.output_columns[function_index]; const auto & workspace = transform->workspaces[function_index]; - Int64 offset = (*current_block.input_columns[ - workspace.argument_column_indices[1]])[ - transform->current_row.row].get(); + Int64 offset = (*current_block.input_columns[workspace.argument_column_indices[1]])[transform->current_row.row].get(); /// Either overflow or really negative value, both is not acceptable. if (offset <= 0) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The offset for function {} must be in (0, {}], {} given", - getName(), INT64_MAX, offset); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "The offset for function {} must be in (0, {}], {} given", getName(), INT64_MAX, offset); } --offset; const auto [target_row, offset_left] = transform->moveRowNumber(transform->frame_start, offset); - if (offset_left != 0 - || target_row < transform->frame_start - || transform->frame_end <= target_row) + if (offset_left != 0 || target_row < transform->frame_start || transform->frame_end <= target_row) { // Offset is outside the frame. to.insertDefault(); @@ -2598,9 +2426,7 @@ struct WindowFunctionNthValue final : public WindowFunction else { // Offset is inside the frame. - to.insertFrom(*transform->blockAt(target_row).input_columns[ - workspace.argument_column_indices[0]], - target_row.row); + to.insertFrom(*transform->blockAt(target_row).input_columns[workspace.argument_column_indices[0]], target_row.row); } } }; @@ -2621,35 +2447,34 @@ struct NonNegativeDerivativeParams bool interval_specified = false; Int64 ts_scale_multiplier = 0; - NonNegativeDerivativeParams( - const std::string & name_, const DataTypes & argument_types, const Array & parameters) + NonNegativeDerivativeParams(const std::string & name_, const DataTypes & argument_types, const Array & parameters) { if (!parameters.empty()) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Function {} cannot be parameterized", name_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} cannot be parameterized", name_); } if (argument_types.size() != 2 && argument_types.size() != 3) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Function {} takes 2 or 3 arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes 2 or 3 arguments", name_); } if (!isNumber(argument_types[ARGUMENT_METRIC])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Argument {} must be a number, '{}' given", - ARGUMENT_METRIC, - argument_types[ARGUMENT_METRIC]->getName()); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Argument {} must be a number, '{}' given", + ARGUMENT_METRIC, + argument_types[ARGUMENT_METRIC]->getName()); } if (!isDateTime(argument_types[ARGUMENT_TIMESTAMP]) && !isDateTime64(argument_types[ARGUMENT_TIMESTAMP])) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Argument {} must be DateTime or DateTime64, '{}' given", - ARGUMENT_TIMESTAMP, - argument_types[ARGUMENT_TIMESTAMP]->getName()); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Argument {} must be DateTime or DateTime64, '{}' given", + ARGUMENT_TIMESTAMP, + argument_types[ARGUMENT_TIMESTAMP]->getName()); } if (isDateTime64(argument_types[ARGUMENT_TIMESTAMP])) @@ -2683,27 +2508,28 @@ struct NonNegativeDerivativeParams }; // nonNegativeDerivative(metric_column, timestamp_column[, INTERVAL 1 SECOND]) -struct WindowFunctionNonNegativeDerivative final : public StatefulWindowFunction, public NonNegativeDerivativeParams +struct WindowFunctionNonNegativeDerivative final : public StatefulWindowFunction, + public NonNegativeDerivativeParams { using Params = NonNegativeDerivativeParams; - WindowFunctionNonNegativeDerivative(const std::string & name_, - const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionNonNegativeDerivative(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) , NonNegativeDerivativeParams(name, argument_types, parameters) - {} + { + } bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, - size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override { const auto & current_block = transform->blockAt(transform->current_row); const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); - auto interval_duration = interval_specified ? interval_length * - (*current_block.input_columns[workspace.argument_column_indices[ARGUMENT_INTERVAL]]).getFloat64(0) : 1; + auto interval_duration = interval_specified + ? interval_length * (*current_block.input_columns[workspace.argument_column_indices[ARGUMENT_INTERVAL]]).getFloat64(0) + : 1; Float64 curr_metric = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_METRIC, transform->current_row); Float64 metric_diff = curr_metric - state.previous_metric; @@ -2711,16 +2537,18 @@ struct WindowFunctionNonNegativeDerivative final : public StatefulWindowFunction if (ts_scale_multiplier) { - const auto & column = transform->blockAt(transform->current_row.block).input_columns[workspace.argument_column_indices[ARGUMENT_TIMESTAMP]]; + const auto & column + = transform->blockAt(transform->current_row.block).input_columns[workspace.argument_column_indices[ARGUMENT_TIMESTAMP]]; const auto & curr_timestamp = checkAndGetColumn(*column).getInt(transform->current_row.row); Float64 time_elapsed = curr_timestamp - state.previous_timestamp; - result = (time_elapsed > 0) ? (metric_diff * ts_scale_multiplier / time_elapsed * interval_duration) : 0; + result = (time_elapsed > 0) ? (metric_diff * ts_scale_multiplier / time_elapsed * interval_duration) : 0; state.previous_timestamp = curr_timestamp; } else { - Float64 curr_timestamp = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIMESTAMP, transform->current_row); + Float64 curr_timestamp + = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIMESTAMP, transform->current_row); Float64 time_elapsed = curr_timestamp - state.previous_timestamp; result = (time_elapsed > 0) ? (metric_diff / time_elapsed * interval_duration) : 0; state.previous_timestamp = curr_timestamp; diff --git a/src/Processors/Transforms/WindowTransform.h b/src/Processors/Transforms/WindowTransform.h index fe4f79e997c..cb672ad6841 100644 --- a/src/Processors/Transforms/WindowTransform.h +++ b/src/Processors/Transforms/WindowTransform.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -21,30 +22,6 @@ using ExpressionActionsPtr = std::shared_ptr; class Arena; -class IWindowFunction; - -// Runtime data for computing one window function. -struct WindowFunctionWorkspace -{ - AggregateFunctionPtr aggregate_function; - - // Cached value of aggregate function isState virtual method - bool is_aggregate_function_state = false; - - // This field is set for pure window functions. When set, we ignore the - // window_function.aggregate_function, and work through this interface - // instead. - IWindowFunction * window_function_impl = nullptr; - - std::vector argument_column_indices; - - // Will not be initialized for a pure window function. - mutable AlignedBuffer aggregate_function_state; - - // Argument columns. Be careful, this is a per-block cache. - std::vector argument_columns; - UInt64 cached_block_number = std::numeric_limits::max(); -}; struct WindowTransformBlock { From b35ff7e41784c50c04ad02df94b7a3ecd947789c Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Fri, 2 Aug 2024 15:33:31 +0800 Subject: [PATCH 1797/2361] update test --- src/Processors/Transforms/WindowTransform.cpp | 8 +------- .../0_stateless/01592_window_functions.reference | 11 +++++++++++ tests/queries/0_stateless/01592_window_functions.sql | 11 +++++++++++ 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index ae9a94bff90..a003c9a8e56 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -1341,13 +1341,6 @@ IProcessor::Status WindowTransform::prepare() { // Output the ready block. const auto i = next_output_block_number - first_block_number; - LOG_ERROR( - getLogger("WindowTransform"), - "xxx {} output block: {}, next_output_block_number: {} first_not_ready_row.block: {}", - fmt::ptr(this), - i, - next_output_block_number, - first_not_ready_row.block); auto & block = blocks[i]; auto columns = block.original_input_columns; for (auto & res : block.output_columns) @@ -2158,6 +2151,7 @@ public: frame.type = WindowFrame::FrameType::RANGE; frame.begin_type = WindowFrame::BoundaryType::Unbounded; frame.end_type = WindowFrame::BoundaryType::Unbounded; + //frame.end_type = WindowFrame::BoundaryType::Current; return frame; } diff --git a/tests/queries/0_stateless/01592_window_functions.reference b/tests/queries/0_stateless/01592_window_functions.reference index 06ec67ee82d..558f643c281 100644 --- a/tests/queries/0_stateless/01592_window_functions.reference +++ b/tests/queries/0_stateless/01592_window_functions.reference @@ -92,3 +92,14 @@ Kindle Fire Tablet 150 1 0 Samsung Galaxy Tab Tablet 200 2 0.5 iPad Tablet 700 3 1 Others Unknow 200 1 0 +---- Q9 ---- +0 1 0 +1 2 1 +2 3 2 +3 4 3 +4 5 4 +5 6 5 +6 7 6 +7 8 7 +8 9 8 +9 10 9 diff --git a/tests/queries/0_stateless/01592_window_functions.sql b/tests/queries/0_stateless/01592_window_functions.sql index a660fcca7b2..32c53763e40 100644 --- a/tests/queries/0_stateless/01592_window_functions.sql +++ b/tests/queries/0_stateless/01592_window_functions.sql @@ -124,3 +124,14 @@ ORDER BY drop table product_groups; drop table products; + +select '---- Q9 ----'; +select number, row_number, cast(percent_rank * 10000 as Int32) as percent_rank +from ( + select number, row_number() over () as row_number, percent_rank() over (order by number) as percent_rank + from numbers(10000) + order by number + limit 10 +) +settings max_block_size=100; + From 632ab91bbb389f7904296f6c9a3c89e43ff8a4df Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Fri, 2 Aug 2024 16:01:29 +0800 Subject: [PATCH 1798/2361] revert format --- src/Planner/PlannerActionsVisitor.cpp | 6 + src/Planner/PlannerWindowFunctions.cpp | 1 - src/Processors/Transforms/WindowTransform.cpp | 915 ++++++++++-------- 3 files changed, 508 insertions(+), 414 deletions(-) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 99b9c3f7482..f6c2c92cbb4 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -242,6 +243,11 @@ public: auto & window_frame = window_node.getWindowFrame(); if (!window_frame.is_default) return window_frame; + auto aggregate_function = function_node.getAggregateFunction(); + if (const auto * win_func = dynamic_cast(aggregate_function.get())) + { + return win_func->getDefaultFrame(); + } return {}; }; buffer << " OVER ("; diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp index 7d0fc3a85b3..2a28787ba96 100644 --- a/src/Planner/PlannerWindowFunctions.cpp +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -24,7 +24,6 @@ extern const int NOT_IMPLEMENTED; namespace { -//WindowDescription extractWindowDescriptionFromWindowNode(const QueryTreeNodePtr & node, const PlannerContext & planner_context) WindowDescription extractWindowDescriptionFromWindowNode(const FunctionNode & func_node, const PlannerContext & planner_context) { auto node = func_node.getWindowNode(); diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index a003c9a8e56..c26cd7cc8c3 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -6,12 +6,9 @@ #include #include #include -#include #include #include -#include #include -#include #include #include #include @@ -19,6 +16,9 @@ #include #include #include +#include +#include +#include #include #include @@ -57,31 +57,30 @@ struct Settings; namespace ErrorCodes { -extern const int BAD_ARGUMENTS; -extern const int NOT_IMPLEMENTED; -extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; -extern const int ILLEGAL_TYPE_OF_ARGUMENT; -extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; -extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; + extern const int BAD_ARGUMENTS; + extern const int NOT_IMPLEMENTED; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION; + extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION; } - // Compares ORDER BY column values at given rows to find the boundaries of frame: // [compared] with [reference] +/- offset. Return value is -1/0/+1, like in // sorting predicates -- -1 means [compared] is less than [reference] +/- offset. template -static int compareValuesWithOffset( - const IColumn * _compared_column, - size_t compared_row, - const IColumn * _reference_column, +static int compareValuesWithOffset(const IColumn * _compared_column, + size_t compared_row, const IColumn * _reference_column, size_t reference_row, const Field & _offset, bool offset_is_preceding) { // Casting the columns to the known type here makes it faster, probably // because the getData call can be devirtualized. - const auto * compared_column = assert_cast(_compared_column); - const auto * reference_column = assert_cast(_reference_column); + const auto * compared_column = assert_cast( + _compared_column); + const auto * reference_column = assert_cast( + _reference_column); using ValueType = typename ColumnType::ValueType; // Note that the storage type of offset returned by get<> is different, so @@ -91,11 +90,13 @@ static int compareValuesWithOffset( const auto compared_value_data = compared_column->getDataAt(compared_row); assert(compared_value_data.size == sizeof(ValueType)); - auto compared_value = unalignedLoad(compared_value_data.data); + auto compared_value = unalignedLoad( + compared_value_data.data); const auto reference_value_data = reference_column->getDataAt(reference_row); assert(reference_value_data.size == sizeof(ValueType)); - auto reference_value = unalignedLoad(reference_value_data.data); + auto reference_value = unalignedLoad( + reference_value_data.data); bool is_overflow; if (offset_is_preceding) @@ -120,34 +121,37 @@ static int compareValuesWithOffset( else { // No overflow, compare normally. - return compared_value < reference_value ? -1 : compared_value == reference_value ? 0 : 1; + return compared_value < reference_value ? -1 + : compared_value == reference_value ? 0 : 1; } } // A specialization of compareValuesWithOffset for floats. template -static int compareValuesWithOffsetFloat( - const IColumn * _compared_column, - size_t compared_row, - const IColumn * _reference_column, +static int compareValuesWithOffsetFloat(const IColumn * _compared_column, + size_t compared_row, const IColumn * _reference_column, size_t reference_row, const Field & _offset, bool offset_is_preceding) { // Casting the columns to the known type here makes it faster, probably // because the getData call can be devirtualized. - const auto * compared_column = assert_cast(_compared_column); - const auto * reference_column = assert_cast(_reference_column); + const auto * compared_column = assert_cast( + _compared_column); + const auto * reference_column = assert_cast( + _reference_column); const auto offset = _offset.get(); chassert(offset >= 0); const auto compared_value_data = compared_column->getDataAt(compared_row); assert(compared_value_data.size == sizeof(typename ColumnType::ValueType)); - auto compared_value = unalignedLoad(compared_value_data.data); + auto compared_value = unalignedLoad( + compared_value_data.data); const auto reference_value_data = reference_column->getDataAt(reference_row); assert(reference_value_data.size == sizeof(typename ColumnType::ValueType)); - auto reference_value = unalignedLoad(reference_value_data.data); + auto reference_value = unalignedLoad( + reference_value_data.data); /// Floats overflow to Inf and the comparison will work normally, so we don't have to do anything. if (offset_is_preceding) @@ -155,58 +159,58 @@ static int compareValuesWithOffsetFloat( else reference_value += static_cast(offset); - const auto result = compared_value < reference_value ? -1 : (compared_value == reference_value ? 0 : 1); + const auto result = compared_value < reference_value ? -1 + : (compared_value == reference_value ? 0 : 1); return result; } // Helper macros to dispatch on type of the ORDER BY column #define APPLY_FOR_ONE_NEST_TYPE(FUNCTION, TYPE) \ - else if (typeid_cast(nest_compared_column.get())) \ - { \ - /* clang-tidy you're dumb, I can't put FUNCTION in braces here. */ \ - nest_compare_function = FUNCTION; /* NOLINT */ \ - } +else if (typeid_cast(nest_compared_column.get())) \ +{ \ + /* clang-tidy you're dumb, I can't put FUNCTION in braces here. */ \ + nest_compare_function = FUNCTION; /* NOLINT */ \ +} #define APPLY_FOR_NEST_TYPES(FUNCTION) \ - if (false) /* NOLINT */ \ - { \ - /* Do nothing, a starter condition. */ \ - } \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +if (false) /* NOLINT */ \ +{ \ + /* Do nothing, a starter condition. */ \ +} \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION, ColumnVector) \ \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION##Float, ColumnVector) \ - APPLY_FOR_ONE_NEST_TYPE(FUNCTION##Float, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION##Float, ColumnVector) \ +APPLY_FOR_ONE_NEST_TYPE(FUNCTION##Float, ColumnVector) \ \ - else \ - { \ - throw Exception( \ - ErrorCodes::NOT_IMPLEMENTED, \ - "The RANGE OFFSET frame for '{}' ORDER BY nest column is not implemented", \ - demangle(typeid(nest_compared_column).name())); \ - } +else \ +{ \ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, \ + "The RANGE OFFSET frame for '{}' ORDER BY nest column is not implemented", \ + demangle(typeid(nest_compared_column).name())); \ +} // A specialization of compareValuesWithOffset for nullable. template -static int compareValuesWithOffsetNullable( - const IColumn * _compared_column, - size_t compared_row, - const IColumn * _reference_column, +static int compareValuesWithOffsetNullable(const IColumn * _compared_column, + size_t compared_row, const IColumn * _reference_column, size_t reference_row, const Field & _offset, bool offset_is_preceding) { - const auto * compared_column = assert_cast(_compared_column); - const auto * reference_column = assert_cast(_reference_column); + const auto * compared_column = assert_cast( + _compared_column); + const auto * reference_column = assert_cast( + _reference_column); if (compared_column->isNullAt(compared_row) && !reference_column->isNullAt(reference_row)) { @@ -225,59 +229,54 @@ static int compareValuesWithOffsetNullable( ColumnPtr nest_reference_column = reference_column->getNestedColumnPtr(); std::function - nest_compare_function; + bool offset_is_preceding)> nest_compare_function; APPLY_FOR_NEST_TYPES(compareValuesWithOffset) - return nest_compare_function( - nest_compared_column.get(), compared_row, nest_reference_column.get(), reference_row, _offset, offset_is_preceding); + return nest_compare_function(nest_compared_column.get(), compared_row, + nest_reference_column.get(), reference_row, _offset, offset_is_preceding); } // Helper macros to dispatch on type of the ORDER BY column #define APPLY_FOR_ONE_TYPE(FUNCTION, TYPE) \ - else if (typeid_cast(column)) \ - { \ - /* clang-tidy you're dumb, I can't put FUNCTION in braces here. */ \ - compare_values_with_offset = FUNCTION; /* NOLINT */ \ - } +else if (typeid_cast(column)) \ +{ \ + /* clang-tidy you're dumb, I can't put FUNCTION in braces here. */ \ + compare_values_with_offset = FUNCTION; /* NOLINT */ \ +} #define APPLY_FOR_TYPES(FUNCTION) \ - if (false) /* NOLINT */ \ - { \ - /* Do nothing, a starter condition. */ \ - } \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +if (false) /* NOLINT */ \ +{ \ + /* Do nothing, a starter condition. */ \ +} \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ - APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION, ColumnVector) \ \ - APPLY_FOR_ONE_TYPE(FUNCTION##Float, ColumnVector) \ - APPLY_FOR_ONE_TYPE(FUNCTION##Float, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION##Float, ColumnVector) \ +APPLY_FOR_ONE_TYPE(FUNCTION##Float, ColumnVector) \ \ - APPLY_FOR_ONE_TYPE(FUNCTION##Nullable, ColumnNullable) \ - else \ - { \ - throw Exception( \ - ErrorCodes::NOT_IMPLEMENTED, \ - "The RANGE OFFSET frame for '{}' ORDER BY column is not implemented", \ - demangle(typeid(*column).name())); \ - } +APPLY_FOR_ONE_TYPE(FUNCTION##Nullable, ColumnNullable) \ +else \ +{ \ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, \ + "The RANGE OFFSET frame for '{}' ORDER BY column is not implemented", \ + demangle(typeid(*column).name())); \ +} -WindowTransform::WindowTransform( - const Block & input_header_, - const Block & output_header_, - const WindowDescription & window_description_, - const std::vector & functions) +WindowTransform::WindowTransform(const Block & input_header_, + const Block & output_header_, + const WindowDescription & window_description_, + const std::vector & functions) : IProcessor({input_header_}, {output_header_}) , input(inputs.front()) , output(outputs.front()) @@ -308,7 +307,8 @@ WindowTransform::WindowTransform( workspace.argument_column_indices.reserve(f.argument_names.size()); for (const auto & argument_name : f.argument_names) { - workspace.argument_column_indices.push_back(input_header.getPositionByName(argument_name)); + workspace.argument_column_indices.push_back( + input_header.getPositionByName(argument_name)); } workspace.argument_columns.assign(f.argument_names.size(), nullptr); @@ -325,7 +325,9 @@ WindowTransform::WindowTransform( } workspace.is_aggregate_function_state = workspace.aggregate_function->isState(); - workspace.aggregate_function_state.reset(aggregate_function->sizeOfData(), aggregate_function->alignOfData()); + workspace.aggregate_function_state.reset( + aggregate_function->sizeOfData(), + aggregate_function->alignOfData()); aggregate_function->create(workspace.aggregate_function_state.data()); workspaces.push_back(std::move(workspace)); @@ -334,20 +336,24 @@ WindowTransform::WindowTransform( partition_by_indices.reserve(window_description.partition_by.size()); for (const auto & column : window_description.partition_by) { - partition_by_indices.push_back(input_header.getPositionByName(column.column_name)); + partition_by_indices.push_back( + input_header.getPositionByName(column.column_name)); } order_by_indices.reserve(window_description.order_by.size()); for (const auto & column : window_description.order_by) { - order_by_indices.push_back(input_header.getPositionByName(column.column_name)); + order_by_indices.push_back( + input_header.getPositionByName(column.column_name)); } // Choose a row comparison function for RANGE OFFSET frame based on the // type of the ORDER BY column. if (window_description.frame.type == WindowFrame::FrameType::RANGE - && (window_description.frame.begin_type == WindowFrame::BoundaryType::Offset - || window_description.frame.end_type == WindowFrame::BoundaryType::Offset)) + && (window_description.frame.begin_type + == WindowFrame::BoundaryType::Offset + || window_description.frame.end_type + == WindowFrame::BoundaryType::Offset)) { assert(order_by_indices.size() == 1); const auto & entry = input_header.getByPosition(order_by_indices[0]); @@ -357,26 +363,32 @@ WindowTransform::WindowTransform( // Convert the offsets to the ORDER BY column type. We can't just check // that the type matches, because e.g. the int literals are always // (U)Int64, but the column might be Int8 and so on. - if (window_description.frame.begin_type == WindowFrame::BoundaryType::Offset) + if (window_description.frame.begin_type + == WindowFrame::BoundaryType::Offset) { - window_description.frame.begin_offset = convertFieldToTypeOrThrow(window_description.frame.begin_offset, *entry.type); + window_description.frame.begin_offset = convertFieldToTypeOrThrow( + window_description.frame.begin_offset, + *entry.type); - if (applyVisitor(FieldVisitorAccurateLess{}, window_description.frame.begin_offset, Field(0))) + if (applyVisitor(FieldVisitorAccurateLess{}, + window_description.frame.begin_offset, Field(0))) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Window frame start offset must be nonnegative, {} given", window_description.frame.begin_offset); } } - if (window_description.frame.end_type == WindowFrame::BoundaryType::Offset) + if (window_description.frame.end_type + == WindowFrame::BoundaryType::Offset) { - window_description.frame.end_offset = convertFieldToTypeOrThrow(window_description.frame.end_offset, *entry.type); + window_description.frame.end_offset = convertFieldToTypeOrThrow( + window_description.frame.end_offset, + *entry.type); - if (applyVisitor(FieldVisitorAccurateLess{}, window_description.frame.end_offset, Field(0))) + if (applyVisitor(FieldVisitorAccurateLess{}, + window_description.frame.end_offset, Field(0))) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Window frame start offset must be nonnegative, {} given", window_description.frame.end_offset); } @@ -389,10 +401,11 @@ WindowTransform::WindowTransform( { if (!workspace.window_function_impl->checkWindowFrameType(this)) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, "Unsupported window frame type for function '{}'", workspace.aggregate_function->getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported window frame type for function '{}'", + workspace.aggregate_function->getName()); } } + } } @@ -401,7 +414,8 @@ WindowTransform::~WindowTransform() // Some states may be not created yet if the creation failed. for (auto & ws : workspaces) { - ws.aggregate_function->destroy(ws.aggregate_function_state.data()); + ws.aggregate_function->destroy( + ws.aggregate_function_state.data()); } } @@ -475,10 +489,14 @@ void WindowTransform::advancePartitionEnd() size_t i = 0; for (; i < partition_by_columns; ++i) { - const auto * reference_column = inputAt(prev_frame_start)[partition_by_indices[i]].get(); - const auto * compared_column = inputAt(partition_end)[partition_by_indices[i]].get(); + const auto * reference_column + = inputAt(prev_frame_start)[partition_by_indices[i]].get(); + const auto * compared_column + = inputAt(partition_end)[partition_by_indices[i]].get(); - if (compared_column->compareAt(partition_end.row, prev_frame_start.row, *reference_column, 1 /* nan_direction_hint */) != 0) + if (compared_column->compareAt(partition_end.row, + prev_frame_start.row, *reference_column, + 1 /* nan_direction_hint */) != 0) { break; } @@ -590,8 +608,9 @@ auto WindowTransform::moveRowNumber(const RowNumber & original_row_number, Int64 void WindowTransform::advanceFrameStartRowsOffset() { // Just recalculate it each time by walking blocks. - const auto [moved_row, offset_left] = moveRowNumber( - current_row, window_description.frame.begin_offset.get() * (window_description.frame.begin_preceding ? -1 : 1)); + const auto [moved_row, offset_left] = moveRowNumber(current_row, + window_description.frame.begin_offset.get() + * (window_description.frame.begin_preceding ? -1 : 1)); frame_start = moved_row; @@ -628,17 +647,21 @@ void WindowTransform::advanceFrameStartRangeOffset() { // See the comment for advanceFrameEndRangeOffset(). const int direction = window_description.order_by[0].direction; - const bool preceding = window_description.frame.begin_preceding == (direction > 0); - const auto * reference_column = inputAt(current_row)[order_by_indices[0]].get(); + const bool preceding = window_description.frame.begin_preceding + == (direction > 0); + const auto * reference_column + = inputAt(current_row)[order_by_indices[0]].get(); for (; frame_start < partition_end; advanceRowNumber(frame_start)) { // The first frame value is [current_row] with offset, so we advance // while [frames_start] < [current_row] with offset. - const auto * compared_column = inputAt(frame_start)[order_by_indices[0]].get(); - if (compare_values_with_offset( - compared_column, frame_start.row, reference_column, current_row.row, window_description.frame.begin_offset, preceding) - * direction - >= 0) + const auto * compared_column + = inputAt(frame_start)[order_by_indices[0]].get(); + if (compare_values_with_offset(compared_column, frame_start.row, + reference_column, current_row.row, + window_description.frame.begin_offset, + preceding) + * direction >= 0) { frame_started = true; return; @@ -683,8 +706,7 @@ void WindowTransform::advanceFrameStart() advanceFrameStartRangeOffset(); break; default: - throw Exception( - ErrorCodes::NOT_IMPLEMENTED, + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Frame start type '{}' for frame '{}' is not implemented", window_description.frame.begin_type, window_description.frame.type); @@ -744,7 +766,8 @@ bool WindowTransform::arePeers(const RowNumber & x, const RowNumber & y) const { const auto * column_x = inputAt(x)[order_by_indices[i]].get(); const auto * column_y = inputAt(y)[order_by_indices[i]].get(); - if (column_x->compareAt(x.row, y.row, *column_y, 1 /* nan_direction_hint */) != 0) + if (column_x->compareAt(x.row, y.row, *column_y, + 1 /* nan_direction_hint */) != 0) { return false; } @@ -761,7 +784,8 @@ void WindowTransform::advanceFrameEndCurrentRow() // (only loop over rows and not over blocks), that should hopefully be more // efficient. // partition_end is either in this new block or past-the-end. - assert(frame_end.block == partition_end.block || frame_end.block + 1 == partition_end.block); + assert(frame_end.block == partition_end.block + || frame_end.block + 1 == partition_end.block); if (frame_end == partition_end) { @@ -823,8 +847,10 @@ void WindowTransform::advanceFrameEndRowsOffset() { // Walk the specified offset from the current row. The "+1" is needed // because the frame_end is a past-the-end pointer. - const auto [moved_row, offset_left] = moveRowNumber( - current_row, window_description.frame.end_offset.get() * (window_description.frame.end_preceding ? -1 : 1) + 1); + const auto [moved_row, offset_left] = moveRowNumber(current_row, + window_description.frame.end_offset.get() + * (window_description.frame.end_preceding ? -1 : 1) + + 1); if (partition_end <= moved_row) { @@ -857,18 +883,22 @@ void WindowTransform::advanceFrameEndRangeOffset() // PRECEDING/FOLLOWING change direction for DESC order. // See CD 9075-2:201?(E) 7.14 p. 429. const int direction = window_description.order_by[0].direction; - const bool preceding = window_description.frame.end_preceding == (direction > 0); - const auto * reference_column = inputAt(current_row)[order_by_indices[0]].get(); + const bool preceding = window_description.frame.end_preceding + == (direction > 0); + const auto * reference_column + = inputAt(current_row)[order_by_indices[0]].get(); for (; frame_end < partition_end; advanceRowNumber(frame_end)) { // The last frame value is current_row with offset, and we need a // past-the-end pointer, so we advance while // [frame_end] <= [current_row] with offset. - const auto * compared_column = inputAt(frame_end)[order_by_indices[0]].get(); - if (compare_values_with_offset( - compared_column, frame_end.row, reference_column, current_row.row, window_description.frame.end_offset, preceding) - * direction - > 0) + const auto * compared_column + = inputAt(frame_end)[order_by_indices[0]].get(); + if (compare_values_with_offset(compared_column, frame_end.row, + reference_column, current_row.row, + window_description.frame.end_offset, + preceding) + * direction > 0) { frame_ended = true; return; @@ -903,8 +933,9 @@ void WindowTransform::advanceFrameEnd() advanceFrameEndRangeOffset(); break; default: - throw Exception( - ErrorCodes::NOT_IMPLEMENTED, "The frame end type '{}' is not implemented", window_description.frame.end_type); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "The frame end type '{}' is not implemented", + window_description.frame.end_type); } break; } @@ -975,9 +1006,13 @@ void WindowTransform::updateAggregationState() // rows manually, instead of using advanceRowNumber(). // For this purpose, the past-the-end block can be different than the // block of the past-the-end row (it's usually the next block). - const auto past_the_end_block = rows_to_add_end.row == 0 ? rows_to_add_end.block : rows_to_add_end.block + 1; + const auto past_the_end_block = rows_to_add_end.row == 0 + ? rows_to_add_end.block + : rows_to_add_end.block + 1; - for (auto block_number = rows_to_add_start.block; block_number < past_the_end_block; ++block_number) + for (auto block_number = rows_to_add_start.block; + block_number < past_the_end_block; + ++block_number) { auto & block = blockAt(block_number); @@ -985,15 +1020,18 @@ void WindowTransform::updateAggregationState() { for (size_t i = 0; i < ws.argument_column_indices.size(); ++i) { - ws.argument_columns[i] = block.input_columns[ws.argument_column_indices[i]].get(); + ws.argument_columns[i] = block.input_columns[ + ws.argument_column_indices[i]].get(); } ws.cached_block_number = block_number; } // First and last blocks may be processed partially, and other blocks // are processed in full. - const auto first_row = block_number == rows_to_add_start.block ? rows_to_add_start.row : 0; - const auto past_the_end_row = block_number == rows_to_add_end.block ? rows_to_add_end.row : block.rows; + const auto first_row = block_number == rows_to_add_start.block + ? rows_to_add_start.row : 0; + const auto past_the_end_row = block_number == rows_to_add_end.block + ? rows_to_add_end.row : block.rows; // We should add an addBatch analog that can accept a starting offset. // For now, add the values one by one. @@ -1041,7 +1079,8 @@ void WindowTransform::writeOutCurrentRow() } } -static void assertSameColumns(const Columns & left_all, const Columns & right_all) +static void assertSameColumns(const Columns & left_all, + const Columns & right_all) { assert(left_all.size() == right_all.size()); @@ -1059,7 +1098,8 @@ static void assertSameColumns(const Columns & left_all, const Columns & right_al if (const auto * right_lc = typeid_cast(right_column)) right_column = right_lc->getDictionary().getNestedColumn().get(); - assert(typeid(*left_column).hash_code() == typeid(*right_column).hash_code()); + assert(typeid(*left_column).hash_code() + == typeid(*right_column).hash_code()); if (isColumnConst(*left_column)) { @@ -1120,7 +1160,8 @@ void WindowTransform::appendChunk(Chunk & chunk) if (ws.window_function_impl) block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices)); - block.output_columns.push_back(ws.aggregate_function->getResultType()->createColumn()); + block.output_columns.push_back(ws.aggregate_function->getResultType() + ->createColumn()); block.output_columns.back()->reserve(block.rows); } @@ -1444,10 +1485,12 @@ void WindowTransform::work() // that the frame start can be further than current row for some frame specs // (e.g. EXCLUDE CURRENT ROW), so we have to check both. assert(prev_frame_start <= frame_start); - const auto first_used_block = std::min(next_output_block_number, std::min(prev_frame_start.block, current_row.block)); + const auto first_used_block = std::min(next_output_block_number, + std::min(prev_frame_start.block, current_row.block)); if (first_block_number < first_used_block) { - blocks.erase(blocks.begin(), blocks.begin() + (first_used_block - first_block_number)); + blocks.erase(blocks.begin(), + blocks.begin() + (first_used_block - first_block_number)); first_block_number = first_used_block; assert(next_output_block_number >= first_block_number); @@ -1458,82 +1501,83 @@ void WindowTransform::work() } } - struct WindowFunctionRank final : public WindowFunction { - WindowFunctionRank(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionRank(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, std::make_shared()) - { - } + {} bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { - IColumn & to = *transform->blockAt(transform->current_row).output_columns[function_index]; - assert_cast(to).getData().push_back(transform->peer_group_start_row_number); + IColumn & to = *transform->blockAt(transform->current_row) + .output_columns[function_index]; + assert_cast(to).getData().push_back( + transform->peer_group_start_row_number); } }; struct WindowFunctionDenseRank final : public WindowFunction { - WindowFunctionDenseRank(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionDenseRank(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, std::make_shared()) - { - } + {} bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { - IColumn & to = *transform->blockAt(transform->current_row).output_columns[function_index]; - assert_cast(to).getData().push_back(transform->peer_group_number); + IColumn & to = *transform->blockAt(transform->current_row) + .output_columns[function_index]; + assert_cast(to).getData().push_back( + transform->peer_group_number); } }; namespace recurrent_detail { -template -T getValue(const WindowTransform * /*transform*/, size_t /*function_index*/, size_t /*column_index*/, RowNumber /*row*/) -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "recurrent_detail::getValue() is not implemented for {} type", typeid(T).name()); -} + template T getValue(const WindowTransform * /*transform*/, size_t /*function_index*/, size_t /*column_index*/, RowNumber /*row*/) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "recurrent_detail::getValue() is not implemented for {} type", typeid(T).name()); + } -template <> -Float64 getValue(const WindowTransform * transform, size_t function_index, size_t column_index, RowNumber row) -{ - const auto & workspace = transform->workspaces[function_index]; - const auto & column = transform->blockAt(row.block).input_columns[workspace.argument_column_indices[column_index]]; - return column->getFloat64(row.row); -} + template<> Float64 getValue(const WindowTransform * transform, size_t function_index, size_t column_index, RowNumber row) + { + const auto & workspace = transform->workspaces[function_index]; + const auto & column = transform->blockAt(row.block).input_columns[workspace.argument_column_indices[column_index]]; + return column->getFloat64(row.row); + } -template -void setValueToOutputColumn(const WindowTransform * /*transform*/, size_t /*function_index*/, T /*value*/) -{ - throw Exception( - ErrorCodes::NOT_IMPLEMENTED, "recurrent_detail::setValueToOutputColumn() is not implemented for {} type", typeid(T).name()); -} + template void setValueToOutputColumn(const WindowTransform * /*transform*/, size_t /*function_index*/, T /*value*/) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "recurrent_detail::setValueToOutputColumn() is not implemented for {} type", typeid(T).name()); + } -template <> -void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, Float64 value) -{ - auto current_row = transform->current_row; - const auto & current_block = transform->blockAt(current_row); - IColumn & to = *current_block.output_columns[function_index]; + template<> void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, Float64 value) + { + auto current_row = transform->current_row; + const auto & current_block = transform->blockAt(current_row); + IColumn & to = *current_block.output_columns[function_index]; - assert_cast(to).getData().push_back(value); -} + assert_cast(to).getData().push_back(value); + } } struct WindowFunctionHelpers { - template + template static T getValue(const WindowTransform * transform, size_t function_index, size_t column_index, RowNumber row) { return recurrent_detail::getValue(transform, function_index, column_index, row); } - template + template static void setValueToOutputColumn(const WindowTransform * transform, size_t function_index, T value) { recurrent_detail::setValueToOutputColumn(transform, function_index, value); @@ -1568,7 +1612,6 @@ struct WindowFunctionHelpers } }; - struct ExponentialTimeDecayedSumState { Float64 previous_time; @@ -1591,34 +1634,34 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); } - WindowFunctionExponentialTimeDecayedSum(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionExponentialTimeDecayedSum(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) , decay_length(getDecayLength(parameters_, name_)) { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly two arguments", name_); } if (!isNumber(argument_types[ARGUMENT_VALUE])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument {} must be a number, '{}' given", ARGUMENT_VALUE, argument_types[ARGUMENT_VALUE]->getName()); } - if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) - && !isDateTime64(argument_types[ARGUMENT_TIME])) + if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) && !isDateTime64(argument_types[ARGUMENT_TIME])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument {} must be DateTime, DateTime64 or a number, '{}' given", ARGUMENT_TIME, argument_types[ARGUMENT_TIME]->getName()); @@ -1627,7 +1670,8 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); @@ -1639,7 +1683,8 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc RowNumber frame_back = transform->prevRowNumber(transform->frame_end); Float64 back_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, frame_back); - if (transform->prev_frame_start <= transform->frame_start && transform->frame_start < transform->prev_frame_end + if (transform->prev_frame_start <= transform->frame_start + && transform->frame_start < transform->prev_frame_end && transform->prev_frame_end <= transform->frame_end) { for (RowNumber i = transform->prev_frame_start; i < transform->frame_start; transform->advanceRowNumber(i)) @@ -1673,8 +1718,8 @@ struct WindowFunctionExponentialTimeDecayedSum final : public StatefulWindowFunc WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result); } -private: - const Float64 decay_length; + private: + const Float64 decay_length; }; struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction @@ -1686,34 +1731,34 @@ struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); } - WindowFunctionExponentialTimeDecayedMax(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionExponentialTimeDecayedMax(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, std::make_shared()) , decay_length(getDecayLength(parameters_, name_)) { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly two arguments", name_); } if (!isNumber(argument_types[ARGUMENT_VALUE])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument {} must be a number, '{}' given", ARGUMENT_VALUE, argument_types[ARGUMENT_VALUE]->getName()); } - if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) - && !isDateTime64(argument_types[ARGUMENT_TIME])) + if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) && !isDateTime64(argument_types[ARGUMENT_TIME])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument {} must be DateTime, DateTime64 or a number, '{}' given", ARGUMENT_TIME, argument_types[ARGUMENT_TIME]->getName()); @@ -1722,7 +1767,8 @@ struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { Float64 result = std::numeric_limits::quiet_NaN(); @@ -1748,8 +1794,8 @@ struct WindowFunctionExponentialTimeDecayedMax final : public WindowFunction WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result); } -private: - const Float64 decay_length; + private: + const Float64 decay_length; }; struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFunction @@ -1760,25 +1806,26 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); } - WindowFunctionExponentialTimeDecayedCount(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionExponentialTimeDecayedCount(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) , decay_length(getDecayLength(parameters_, name_)) { if (argument_types.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one argument", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly one argument", name_); } - if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) - && !isDateTime64(argument_types[ARGUMENT_TIME])) + if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) && !isDateTime64(argument_types[ARGUMENT_TIME])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument {} must be DateTime, DateTime64 or a number, '{}' given", ARGUMENT_TIME, argument_types[ARGUMENT_TIME]->getName()); @@ -1787,7 +1834,8 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); @@ -1799,7 +1847,8 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu RowNumber frame_back = transform->prevRowNumber(transform->frame_end); Float64 back_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, frame_back); - if (transform->prev_frame_start <= transform->frame_start && transform->frame_start < transform->prev_frame_end + if (transform->prev_frame_start <= transform->frame_start + && transform->frame_start < transform->prev_frame_end && transform->prev_frame_end <= transform->frame_end) { for (RowNumber i = transform->prev_frame_start; i < transform->frame_start; transform->advanceRowNumber(i)) @@ -1830,8 +1879,8 @@ struct WindowFunctionExponentialTimeDecayedCount final : public StatefulWindowFu WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result); } -private: - const Float64 decay_length; + private: + const Float64 decay_length; }; struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunction @@ -1843,34 +1892,34 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc { if (parameters_.size() != 1) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly one parameter", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly one parameter", name_); } return applyVisitor(FieldVisitorConvertToNumber(), parameters_[0]); } - WindowFunctionExponentialTimeDecayedAvg(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionExponentialTimeDecayedAvg(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) , decay_length(getDecayLength(parameters_, name_)) { if (argument_types.size() != 2) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly two arguments", name_); } if (!isNumber(argument_types[ARGUMENT_VALUE])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument {} must be a number, '{}' given", ARGUMENT_VALUE, argument_types[ARGUMENT_VALUE]->getName()); } - if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) - && !isDateTime64(argument_types[ARGUMENT_TIME])) + if (!isNumber(argument_types[ARGUMENT_TIME]) && !isDateTime(argument_types[ARGUMENT_TIME]) && !isDateTime64(argument_types[ARGUMENT_TIME])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument {} must be DateTime, DateTime64 or a number, '{}' given", ARGUMENT_TIME, argument_types[ARGUMENT_TIME]->getName()); @@ -1879,7 +1928,8 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); @@ -1893,7 +1943,8 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc RowNumber frame_back = transform->prevRowNumber(transform->frame_end); Float64 back_t = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIME, frame_back); - if (transform->prev_frame_start <= transform->frame_start && transform->frame_start < transform->prev_frame_end + if (transform->prev_frame_start <= transform->frame_start + && transform->frame_start < transform->prev_frame_end && transform->prev_frame_end <= transform->frame_end) { for (RowNumber i = transform->prev_frame_start; i < transform->frame_start; transform->advanceRowNumber(i)) @@ -1936,49 +1987,56 @@ struct WindowFunctionExponentialTimeDecayedAvg final : public StatefulWindowFunc state.previous_count = count; state.previous_time = back_t; - result = sum / count; + result = sum/count; } WindowFunctionHelpers::setValueToOutputColumn(transform, function_index, result); } -private: - const Float64 decay_length; + private: + const Float64 decay_length; }; struct WindowFunctionRowNumber final : public WindowFunction { - WindowFunctionRowNumber(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionRowNumber(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, std::make_shared()) - { - } + {} bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { - IColumn & to = *transform->blockAt(transform->current_row).output_columns[function_index]; - assert_cast(to).getData().push_back(transform->current_row_number); + IColumn & to = *transform->blockAt(transform->current_row) + .output_columns[function_index]; + assert_cast(to).getData().push_back( + transform->current_row_number); } }; namespace { -struct NtileState -{ - UInt64 buckets = 0; - RowNumber start_row; - UInt64 current_partition_rows = 0; - UInt64 current_partition_inserted_row = 0; + struct NtileState + { + UInt64 buckets = 0; + RowNumber start_row; + UInt64 current_partition_rows = 0; + UInt64 current_partition_inserted_row = 0; - void windowInsertResultInto(const WindowTransform * transform, size_t function_index, const DataTypes & argument_types); -}; + void windowInsertResultInto( + const WindowTransform * transform, + size_t function_index, + const DataTypes & argument_types); + }; } // Usage: ntile(n). n is the number of buckets. struct WindowFunctionNtile final : public StatefulWindowFunction { - WindowFunctionNtile(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionNtile(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) { if (argument_types.size() != 1) @@ -1986,11 +2044,7 @@ struct WindowFunctionNtile final : public StatefulWindowFunction auto type_id = argument_types[0]->getTypeId(); if (type_id != TypeIndex::UInt8 && type_id != TypeIndex::UInt16 && type_id != TypeIndex::UInt32 && type_id != TypeIndex::UInt64) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "'{}' argument type must be an unsigned integer (not larger than 64-bit), got {}", - name_, - argument_types[0]->getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "'{}' argument type must be an unsigned integer (not larger than 64-bit), got {}", name_, argument_types[0]->getName()); } bool allocatesMemoryInArena() const override { return false; } @@ -2021,13 +2075,13 @@ struct WindowFunctionNtile final : public StatefulWindowFunction std::optional getDefaultFrame() const override { WindowFrame frame; - frame.is_default = false; frame.type = WindowFrame::FrameType::ROWS; frame.end_type = WindowFrame::BoundaryType::Unbounded; return frame; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); @@ -2037,81 +2091,84 @@ struct WindowFunctionNtile final : public StatefulWindowFunction namespace { -void NtileState::windowInsertResultInto(const WindowTransform * transform, size_t function_index, const DataTypes & argument_types) -{ - if (!buckets) [[unlikely]] + void NtileState::windowInsertResultInto( + const WindowTransform * transform, + size_t function_index, + const DataTypes & argument_types) { - const auto & current_block = transform->blockAt(transform->current_row); - const auto & workspace = transform->workspaces[function_index]; - const auto & arg_col = *current_block.original_input_columns[workspace.argument_column_indices[0]]; - if (!isColumnConst(arg_col)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be a constant"); - auto type_id = argument_types[0]->getTypeId(); - if (type_id == TypeIndex::UInt8) - buckets = arg_col[transform->current_row.row].get(); - else if (type_id == TypeIndex::UInt16) - buckets = arg_col[transform->current_row.row].get(); - else if (type_id == TypeIndex::UInt32) - buckets = arg_col[transform->current_row.row].get(); - else if (type_id == TypeIndex::UInt64) - buckets = arg_col[transform->current_row.row].get(); - - if (!buckets) + if (!buckets) [[unlikely]] { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be greater than zero"); - } - } - // new partition - if (WindowFunctionHelpers::checkPartitionEnterFirstRow(transform)) [[unlikely]] - { - current_partition_rows = 0; - current_partition_inserted_row = 0; - start_row = transform->current_row; - } - current_partition_rows++; + const auto & current_block = transform->blockAt(transform->current_row); + const auto & workspace = transform->workspaces[function_index]; + const auto & arg_col = *current_block.original_input_columns[workspace.argument_column_indices[0]]; + if (!isColumnConst(arg_col)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be a constant"); + auto type_id = argument_types[0]->getTypeId(); + if (type_id == TypeIndex::UInt8) + buckets = arg_col[transform->current_row.row].get(); + else if (type_id == TypeIndex::UInt16) + buckets = arg_col[transform->current_row.row].get(); + else if (type_id == TypeIndex::UInt32) + buckets = arg_col[transform->current_row.row].get(); + else if (type_id == TypeIndex::UInt64) + buckets = arg_col[transform->current_row.row].get(); - // Only do the action when we meet the last row in this partition. - if (!WindowFunctionHelpers::checkPartitionEnterLastRow(transform)) - return; - - auto bucket_capacity = current_partition_rows / buckets; - auto capacity_diff = current_partition_rows - bucket_capacity * buckets; - - // bucket number starts from 1. - UInt64 bucket_num = 1; - while (current_partition_inserted_row < current_partition_rows) - { - auto current_bucket_capacity = bucket_capacity; - if (capacity_diff > 0) - { - current_bucket_capacity += 1; - capacity_diff--; - } - auto left_rows = current_bucket_capacity; - while (left_rows) - { - auto available_block_rows = transform->blockRowsNumber(start_row) - start_row.row; - IColumn & to = *transform->blockAt(start_row).output_columns[function_index]; - auto & pod_array = assert_cast(to).getData(); - if (left_rows < available_block_rows) + if (!buckets) { - pod_array.resize_fill(pod_array.size() + left_rows, bucket_num); - start_row.row += left_rows; - left_rows = 0; - } - else - { - pod_array.resize_fill(pod_array.size() + available_block_rows, bucket_num); - left_rows -= available_block_rows; - start_row.block++; - start_row.row = 0; + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be greater than zero"); } } - current_partition_inserted_row += current_bucket_capacity; - bucket_num += 1; + // new partition + if (WindowFunctionHelpers::checkPartitionEnterFirstRow(transform)) [[unlikely]] + { + current_partition_rows = 0; + current_partition_inserted_row = 0; + start_row = transform->current_row; + } + current_partition_rows++; + + // Only do the action when we meet the last row in this partition. + if (!WindowFunctionHelpers::checkPartitionEnterLastRow(transform)) + return; + + auto bucket_capacity = current_partition_rows / buckets; + auto capacity_diff = current_partition_rows - bucket_capacity * buckets; + + // bucket number starts from 1. + UInt64 bucket_num = 1; + while (current_partition_inserted_row < current_partition_rows) + { + auto current_bucket_capacity = bucket_capacity; + if (capacity_diff > 0) + { + current_bucket_capacity += 1; + capacity_diff--; + } + auto left_rows = current_bucket_capacity; + while (left_rows) + { + auto available_block_rows = transform->blockRowsNumber(start_row) - start_row.row; + IColumn & to = *transform->blockAt(start_row).output_columns[function_index]; + auto & pod_array = assert_cast(to).getData(); + if (left_rows < available_block_rows) + { + pod_array.resize_fill(pod_array.size() + left_rows, bucket_num); + start_row.row += left_rows; + left_rows = 0; + } + else + { + pod_array.resize_fill(pod_array.size() + available_block_rows, bucket_num); + left_rows -= available_block_rows; + start_row.block++; + start_row.row = 0; + } + } + current_partition_inserted_row += current_bucket_capacity; + bucket_num += 1; + } } } -} namespace { @@ -2125,20 +2182,21 @@ struct PercentRankState struct WindowFunctionPercentRank final : public StatefulWindowFunction { public: - WindowFunctionPercentRank(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionPercentRank(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) - { - } + {} bool allocatesMemoryInArena() const override { return false; } bool checkWindowFrameType(const WindowTransform * transform) const override { - if (transform->window_description.frame != getDefaultFrame()) + auto default_window_frame = getDefaultFrame(); + if (transform->window_description.frame != default_window_frame) { LOG_ERROR( getLogger("WindowFunctionPercentRank"), - "Window frame for function 'percent_rank' should be '{}'", getDefaultFrame()->toString()); + "Window frame for function 'percent_rank' should be '{}'", default_window_frame->toString()); return false; } return true; @@ -2147,11 +2205,9 @@ public: std::optional getDefaultFrame() const override { WindowFrame frame; - frame.is_default = false; frame.type = WindowFrame::FrameType::RANGE; frame.begin_type = WindowFrame::BoundaryType::Unbounded; frame.end_type = WindowFrame::BoundaryType::Unbounded; - //frame.end_type = WindowFrame::BoundaryType::Current; return frame; } @@ -2227,12 +2283,14 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction { FunctionBasePtr func_cast = nullptr; - WindowFunctionLagLeadInFrame(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionLagLeadInFrame(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, createResultType(argument_types_, name_)) { if (!parameters.empty()) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} cannot be parameterized", name_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Function {} cannot be parameterized", name_); } if (argument_types.size() == 1) @@ -2242,7 +2300,9 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (!isInt64OrUInt64FieldType(argument_types[1]->getDefault().getType())) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Offset must be an integer, '{}' given", argument_types[1]->getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Offset must be an integer, '{}' given", + argument_types[1]->getName()); } if (argument_types.size() == 2) @@ -2252,11 +2312,9 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (argument_types.size() > 3) { - throw Exception( - ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, + throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Function '{}' accepts at most 3 arguments, {} given", - name, - argument_types.size()); + name, argument_types.size()); } if (argument_types[0]->equals(*argument_types[2])) @@ -2265,16 +2323,14 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction const auto supertype = tryGetLeastSupertype(DataTypes{argument_types[0], argument_types[2]}); if (!supertype) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no supertype for the argument type '{}' and the default value type '{}'", argument_types[0]->getName(), argument_types[2]->getName()); } if (!argument_types[0]->equals(*supertype)) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The supertype '{}' for the argument type '{}' and the default value type '{}' is not the same as the argument type", supertype->getName(), argument_types[0]->getName(), @@ -2283,8 +2339,15 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction const auto from_name = argument_types[2]->getName(); const auto to_name = argument_types[0]->getName(); - ColumnsWithTypeAndName arguments{ - {argument_types[2], ""}, {DataTypeString().createColumnConst(0, to_name), std::make_shared(), ""}}; + ColumnsWithTypeAndName arguments + { + { argument_types[2], "" }, + { + DataTypeString().createColumnConst(0, to_name), + std::make_shared(), + "" + } + }; auto get_cast_func = [&arguments] { @@ -2293,6 +2356,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction }; func_cast = get_cast_func(); + } ColumnPtr castColumn(const Columns & columns, const std::vector & idx) override @@ -2300,11 +2364,15 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction if (!func_cast) return nullptr; - ColumnsWithTypeAndName arguments{ - {columns[idx[2]], argument_types[2], ""}, - {DataTypeString().createColumnConst(columns[idx[2]]->size(), argument_types[0]->getName()), - std::make_shared(), - ""}}; + ColumnsWithTypeAndName arguments + { + { columns[idx[2]], argument_types[2], "" }, + { + DataTypeString().createColumnConst(columns[idx[2]]->size(), argument_types[0]->getName()), + std::make_shared(), + "" + } + }; return func_cast->execute(arguments, argument_types[0], columns[idx[2]]->size()); } @@ -2313,7 +2381,8 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction { if (argument_types_.empty()) { - throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} takes at least one argument", name_); + throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, + "Function {} takes at least one argument", name_); } return argument_types_[0]; @@ -2321,7 +2390,8 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { const auto & current_block = transform->blockAt(transform->current_row); IColumn & to = *current_block.output_columns[function_index]; @@ -2330,27 +2400,34 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction Int64 offset = 1; if (argument_types.size() > 1) { - offset = (*current_block.input_columns[workspace.argument_column_indices[1]])[transform->current_row.row].get(); + offset = (*current_block.input_columns[ + workspace.argument_column_indices[1]])[ + transform->current_row.row].get(); /// Either overflow or really negative value, both is not acceptable. if (offset < 0) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, "The offset for function {} must be in (0, {}], {} given", getName(), INT64_MAX, offset); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The offset for function {} must be in (0, {}], {} given", + getName(), INT64_MAX, offset); } } - const auto [target_row, offset_left] = transform->moveRowNumber(transform->current_row, offset * (is_lead ? 1 : -1)); + const auto [target_row, offset_left] = transform->moveRowNumber( + transform->current_row, offset * (is_lead ? 1 : -1)); - if (offset_left != 0 || target_row < transform->frame_start || transform->frame_end <= target_row) + if (offset_left != 0 + || target_row < transform->frame_start + || transform->frame_end <= target_row) { // Offset is outside the frame. if (argument_types.size() > 2) { // Column with default values is specified. - const IColumn & default_column = current_block.casted_columns[function_index] - ? *current_block.casted_columns[function_index].get() - : *current_block.input_columns[workspace.argument_column_indices[2]].get(); + const IColumn & default_column = + current_block.casted_columns[function_index] ? + *current_block.casted_columns[function_index].get() : + *current_block.input_columns[workspace.argument_column_indices[2]].get(); to.insert(default_column[transform->current_row.row]); } @@ -2362,24 +2439,30 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction else { // Offset is inside the frame. - to.insertFrom(*transform->blockAt(target_row).input_columns[workspace.argument_column_indices[0]], target_row.row); + to.insertFrom(*transform->blockAt(target_row).input_columns[ + workspace.argument_column_indices[0]], + target_row.row); } } }; struct WindowFunctionNthValue final : public WindowFunction { - WindowFunctionNthValue(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionNthValue(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : WindowFunction(name_, argument_types_, parameters_, createResultType(name_, argument_types_)) { if (!parameters.empty()) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} cannot be parameterized", name_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Function {} cannot be parameterized", name_); } if (!isInt64OrUInt64FieldType(argument_types[1]->getDefault().getType())) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Offset must be an integer, '{}' given", argument_types[1]->getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Offset must be an integer, '{}' given", + argument_types[1]->getName()); } } @@ -2387,7 +2470,8 @@ struct WindowFunctionNthValue final : public WindowFunction { if (argument_types_.size() != 2) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes exactly two arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes exactly two arguments", name_); } return argument_types_[0]; @@ -2395,24 +2479,30 @@ struct WindowFunctionNthValue final : public WindowFunction bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { const auto & current_block = transform->blockAt(transform->current_row); IColumn & to = *current_block.output_columns[function_index]; const auto & workspace = transform->workspaces[function_index]; - Int64 offset = (*current_block.input_columns[workspace.argument_column_indices[1]])[transform->current_row.row].get(); + Int64 offset = (*current_block.input_columns[ + workspace.argument_column_indices[1]])[ + transform->current_row.row].get(); /// Either overflow or really negative value, both is not acceptable. if (offset <= 0) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, "The offset for function {} must be in (0, {}], {} given", getName(), INT64_MAX, offset); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "The offset for function {} must be in (0, {}], {} given", + getName(), INT64_MAX, offset); } --offset; const auto [target_row, offset_left] = transform->moveRowNumber(transform->frame_start, offset); - if (offset_left != 0 || target_row < transform->frame_start || transform->frame_end <= target_row) + if (offset_left != 0 + || target_row < transform->frame_start + || transform->frame_end <= target_row) { // Offset is outside the frame. to.insertDefault(); @@ -2420,7 +2510,9 @@ struct WindowFunctionNthValue final : public WindowFunction else { // Offset is inside the frame. - to.insertFrom(*transform->blockAt(target_row).input_columns[workspace.argument_column_indices[0]], target_row.row); + to.insertFrom(*transform->blockAt(target_row).input_columns[ + workspace.argument_column_indices[0]], + target_row.row); } } }; @@ -2441,34 +2533,35 @@ struct NonNegativeDerivativeParams bool interval_specified = false; Int64 ts_scale_multiplier = 0; - NonNegativeDerivativeParams(const std::string & name_, const DataTypes & argument_types, const Array & parameters) + NonNegativeDerivativeParams( + const std::string & name_, const DataTypes & argument_types, const Array & parameters) { if (!parameters.empty()) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function {} cannot be parameterized", name_); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Function {} cannot be parameterized", name_); } if (argument_types.size() != 2 && argument_types.size() != 3) { - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes 2 or 3 arguments", name_); + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Function {} takes 2 or 3 arguments", name_); } if (!isNumber(argument_types[ARGUMENT_METRIC])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Argument {} must be a number, '{}' given", - ARGUMENT_METRIC, - argument_types[ARGUMENT_METRIC]->getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Argument {} must be a number, '{}' given", + ARGUMENT_METRIC, + argument_types[ARGUMENT_METRIC]->getName()); } if (!isDateTime(argument_types[ARGUMENT_TIMESTAMP]) && !isDateTime64(argument_types[ARGUMENT_TIMESTAMP])) { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Argument {} must be DateTime or DateTime64, '{}' given", - ARGUMENT_TIMESTAMP, - argument_types[ARGUMENT_TIMESTAMP]->getName()); + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Argument {} must be DateTime or DateTime64, '{}' given", + ARGUMENT_TIMESTAMP, + argument_types[ARGUMENT_TIMESTAMP]->getName()); } if (isDateTime64(argument_types[ARGUMENT_TIMESTAMP])) @@ -2502,28 +2595,27 @@ struct NonNegativeDerivativeParams }; // nonNegativeDerivative(metric_column, timestamp_column[, INTERVAL 1 SECOND]) -struct WindowFunctionNonNegativeDerivative final : public StatefulWindowFunction, - public NonNegativeDerivativeParams +struct WindowFunctionNonNegativeDerivative final : public StatefulWindowFunction, public NonNegativeDerivativeParams { using Params = NonNegativeDerivativeParams; - WindowFunctionNonNegativeDerivative(const std::string & name_, const DataTypes & argument_types_, const Array & parameters_) + WindowFunctionNonNegativeDerivative(const std::string & name_, + const DataTypes & argument_types_, const Array & parameters_) : StatefulWindowFunction(name_, argument_types_, parameters_, std::make_shared()) , NonNegativeDerivativeParams(name, argument_types, parameters) - { - } + {} bool allocatesMemoryInArena() const override { return false; } - void windowInsertResultInto(const WindowTransform * transform, size_t function_index) const override + void windowInsertResultInto(const WindowTransform * transform, + size_t function_index) const override { const auto & current_block = transform->blockAt(transform->current_row); const auto & workspace = transform->workspaces[function_index]; auto & state = getState(workspace); - auto interval_duration = interval_specified - ? interval_length * (*current_block.input_columns[workspace.argument_column_indices[ARGUMENT_INTERVAL]]).getFloat64(0) - : 1; + auto interval_duration = interval_specified ? interval_length * + (*current_block.input_columns[workspace.argument_column_indices[ARGUMENT_INTERVAL]]).getFloat64(0) : 1; Float64 curr_metric = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_METRIC, transform->current_row); Float64 metric_diff = curr_metric - state.previous_metric; @@ -2531,18 +2623,16 @@ struct WindowFunctionNonNegativeDerivative final : public StatefulWindowFunction if (ts_scale_multiplier) { - const auto & column - = transform->blockAt(transform->current_row.block).input_columns[workspace.argument_column_indices[ARGUMENT_TIMESTAMP]]; + const auto & column = transform->blockAt(transform->current_row.block).input_columns[workspace.argument_column_indices[ARGUMENT_TIMESTAMP]]; const auto & curr_timestamp = checkAndGetColumn(*column).getInt(transform->current_row.row); Float64 time_elapsed = curr_timestamp - state.previous_timestamp; - result = (time_elapsed > 0) ? (metric_diff * ts_scale_multiplier / time_elapsed * interval_duration) : 0; + result = (time_elapsed > 0) ? (metric_diff * ts_scale_multiplier / time_elapsed * interval_duration) : 0; state.previous_timestamp = curr_timestamp; } else { - Float64 curr_timestamp - = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIMESTAMP, transform->current_row); + Float64 curr_timestamp = WindowFunctionHelpers::getValue(transform, function_index, ARGUMENT_TIMESTAMP, transform->current_row); Float64 time_elapsed = curr_timestamp - state.previous_timestamp; result = (time_elapsed > 0) ? (metric_diff / time_elapsed * interval_duration) : 0; state.previous_timestamp = curr_timestamp; @@ -2682,5 +2772,4 @@ void registerWindowFunctions(AggregateFunctionFactory & factory) name, argument_types, parameters); }, properties}); } - } From 6e7bffa6ea100ad0f966d3e608eaf2462f6771f8 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Fri, 2 Aug 2024 16:13:09 +0800 Subject: [PATCH 1799/2361] remove unused codes --- src/Planner/PlannerActionsVisitor.cpp | 30 --------------------------- src/Planner/PlannerActionsVisitor.h | 12 ----------- 2 files changed, 42 deletions(-) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index f6c2c92cbb4..071da91839b 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -1040,36 +1040,6 @@ String calculateConstantActionNodeName(const Field & constant_literal) return ActionNodeNameHelper::calculateConstantActionNodeName(constant_literal); } -String calculateWindowNodeActionName(const QueryTreeNodePtr & node, - const PlannerContext & planner_context, - QueryTreeNodeToName & node_to_name, - bool use_column_identifier_as_action_node_name) -{ - ActionNodeNameHelper helper(node_to_name, planner_context, use_column_identifier_as_action_node_name); - auto get_window_frame = [&]()-> std::optional{ - auto & window_node = node->as(); - auto & window_frame = window_node.getWindowFrame(); - if (!window_frame.is_default) - return window_frame; - return {}; - }; - return helper.calculateWindowNodeActionName(node, get_window_frame); -} - -String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, bool use_column_identifier_as_action_node_name) -{ - QueryTreeNodeToName empty_map; - ActionNodeNameHelper helper(empty_map, planner_context, use_column_identifier_as_action_node_name); - auto get_window_frame = [&]()-> std::optional{ - auto & window_node = node->as(); - auto & window_frame = window_node.getWindowFrame(); - if (!window_frame.is_default) - return window_frame; - return {}; - }; - return helper.calculateWindowNodeActionName(node, get_window_frame); -} - String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, std::function()> get_window_frame, bool use_column_identifier_as_action_node_name) { QueryTreeNodeToName empty_map; diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index 78d7c69357a..a7f95a38169 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -75,18 +75,6 @@ String calculateConstantActionNodeName(const Field & constant_literal); * Window node action name can only be part of window function action name. * For column node column node identifier from planner context is used, if use_column_identifier_as_action_node_name = true. */ -String calculateWindowNodeActionName(const QueryTreeNodePtr & node, - const PlannerContext & planner_context, - QueryTreeNodeToName & node_to_name, - bool use_column_identifier_as_action_node_name = true); - -/** Calculate action node name for window node. - * Window node action name can only be part of window function action name. - * For column node column node identifier from planner context is used, if use_column_identifier_as_action_node_name = true. - */ -String calculateWindowNodeActionName(const QueryTreeNodePtr & node, - const PlannerContext & planner_context, - bool use_column_identifier_as_action_node_name = true); String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, std::function()> get_window_frame, From b35dd3bc02a2082f9a3d6a6e507805d9e1bd87a9 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Fri, 2 Aug 2024 16:22:36 +0800 Subject: [PATCH 1800/2361] simplify codes --- src/Planner/PlannerActionsVisitor.cpp | 30 +++++++++++++++----------- src/Planner/PlannerActionsVisitor.h | 1 + src/Planner/PlannerWindowFunctions.cpp | 13 +---------- 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 071da91839b..288669e7050 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -237,19 +237,7 @@ public: if (function_node.isWindowFunction()) { - auto get_window_frame = [&]() -> std::optional - { - auto & window_node = function_node.getWindowNode()->as(); - auto & window_frame = window_node.getWindowFrame(); - if (!window_frame.is_default) - return window_frame; - auto aggregate_function = function_node.getAggregateFunction(); - if (const auto * win_func = dynamic_cast(aggregate_function.get())) - { - return win_func->getDefaultFrame(); - } - return {}; - }; + auto get_window_frame = [&]() { return extractWindowFrame(function_node); }; buffer << " OVER ("; buffer << calculateWindowNodeActionName(function_node.getWindowNode(), get_window_frame); buffer << ')'; @@ -1040,6 +1028,22 @@ String calculateConstantActionNodeName(const Field & constant_literal) return ActionNodeNameHelper::calculateConstantActionNodeName(constant_literal); } +std::optional extractWindowFrame(const FunctionNode & node) +{ + if (!node.isWindowFunction()) + return {}; + auto & window_node = node.getWindowNode()->as(); + const auto & window_frame = window_node.getWindowFrame(); + if (!window_frame.is_default) + return window_frame; + auto aggregate_function = node.getAggregateFunction(); + if (const auto * win_func = dynamic_cast(aggregate_function.get())) + { + return win_func->getDefaultFrame(); + } + return {}; +} + String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, std::function()> get_window_frame, bool use_column_identifier_as_action_node_name) { QueryTreeNodeToName empty_map; diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index a7f95a38169..71b8accb2a0 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -71,6 +71,7 @@ String calculateConstantActionNodeName(const Field & constant_literal, const Dat /// Calculate action node name for constant, data type will be derived from constant literal value String calculateConstantActionNodeName(const Field & constant_literal); +std::optional extractWindowFrame(const FunctionNode & node); /** Calculate action node name for window node. * Window node action name can only be part of window function action name. * For column node column node identifier from planner context is used, if use_column_identifier_as_action_node_name = true. diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp index 2a28787ba96..a69dd95a650 100644 --- a/src/Planner/PlannerWindowFunctions.cpp +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -29,18 +29,7 @@ WindowDescription extractWindowDescriptionFromWindowNode(const FunctionNode & fu auto node = func_node.getWindowNode(); auto & window_node = node->as(); - auto get_window_frame = [&]() -> std::optional - { - auto frame = window_node.getWindowFrame(); - if (!frame.is_default) - return frame; - auto aggregate_function = func_node.getAggregateFunction(); - if (const auto * win_func = dynamic_cast(aggregate_function.get())) - { - return win_func->getDefaultFrame(); - } - return {}; - }; + auto get_window_frame = [&]() { return extractWindowFrame(func_node); }; WindowDescription window_description; window_description.window_name = calculateWindowNodeActionName(node, planner_context, get_window_frame); From 2e521e17edcb273cfcb59c2224280a6f8e9f73da Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Sat, 3 Aug 2024 06:02:50 +0800 Subject: [PATCH 1801/2361] fixed --- src/Planner/PlannerActionsVisitor.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 288669e7050..7758d0c129f 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -1039,9 +1039,11 @@ std::optional extractWindowFrame(const FunctionNode & node) auto aggregate_function = node.getAggregateFunction(); if (const auto * win_func = dynamic_cast(aggregate_function.get())) { - return win_func->getDefaultFrame(); + auto function_default_window_frame = win_func->getDefaultFrame(); + if (function_default_window_frame) + return function_default_window_frame; } - return {}; + return window_frame; } String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, std::function()> get_window_frame, bool use_column_identifier_as_action_node_name) From 682c735fa6c128a9f5df82f1a776186ed3ea1065 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Sat, 3 Aug 2024 06:14:52 +0800 Subject: [PATCH 1802/2361] update tests --- .../01592_window_functions.reference | 24 --------- .../0_stateless/01592_window_functions.sql | 30 ----------- .../0_stateless/03037_precent_rank.reference | 22 ++++++++ .../0_stateless/03037_precent_rank.sql | 53 +++++++++++++++++++ 4 files changed, 75 insertions(+), 54 deletions(-) create mode 100644 tests/queries/0_stateless/03037_precent_rank.reference create mode 100644 tests/queries/0_stateless/03037_precent_rank.sql diff --git a/tests/queries/0_stateless/01592_window_functions.reference b/tests/queries/0_stateless/01592_window_functions.reference index 558f643c281..ec957dd7a02 100644 --- a/tests/queries/0_stateless/01592_window_functions.reference +++ b/tests/queries/0_stateless/01592_window_functions.reference @@ -79,27 +79,3 @@ iPhone 900 Smartphone 500 500 Kindle Fire 150 Tablet 150 350 Samsung Galaxy Tab 200 Tablet 175 350 iPad 700 Tablet 350 350 ----- Q8 ---- -Lenovo Thinkpad Laptop 700 1 0 -Sony VAIO Laptop 700 1 0 -Dell Vostro Laptop 800 3 0.6666666666666666 -HP Elite Laptop 1200 4 1 -Microsoft Lumia Smartphone 200 1 0 -HTC One Smartphone 400 2 0.3333333333333333 -Nexus Smartphone 500 3 0.6666666666666666 -iPhone Smartphone 900 4 1 -Kindle Fire Tablet 150 1 0 -Samsung Galaxy Tab Tablet 200 2 0.5 -iPad Tablet 700 3 1 -Others Unknow 200 1 0 ----- Q9 ---- -0 1 0 -1 2 1 -2 3 2 -3 4 3 -4 5 4 -5 6 5 -6 7 6 -7 8 7 -8 9 8 -9 10 9 diff --git a/tests/queries/0_stateless/01592_window_functions.sql b/tests/queries/0_stateless/01592_window_functions.sql index 32c53763e40..c6bb23bc7cf 100644 --- a/tests/queries/0_stateless/01592_window_functions.sql +++ b/tests/queries/0_stateless/01592_window_functions.sql @@ -101,37 +101,7 @@ SELECT FROM products INNER JOIN product_groups USING (group_id)) t order by group_name, product_name, price; -select '---- Q8 ----'; -INSERT INTO product_groups VALUES (4, 'Unknow'); -INSERT INTO products (product_id,product_name, group_id,price) VALUES (12, 'Others', 4, 200); - -SELECT * -FROM -( - SELECT - product_name, - group_name, - price, - rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS rank, - percent_rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS percent - FROM products - INNER JOIN product_groups USING (group_id) -) AS t -ORDER BY - group_name ASC, - price ASC, - product_name ASC; - drop table product_groups; drop table products; -select '---- Q9 ----'; -select number, row_number, cast(percent_rank * 10000 as Int32) as percent_rank -from ( - select number, row_number() over () as row_number, percent_rank() over (order by number) as percent_rank - from numbers(10000) - order by number - limit 10 -) -settings max_block_size=100; diff --git a/tests/queries/0_stateless/03037_precent_rank.reference b/tests/queries/0_stateless/03037_precent_rank.reference new file mode 100644 index 00000000000..6a23f3884cd --- /dev/null +++ b/tests/queries/0_stateless/03037_precent_rank.reference @@ -0,0 +1,22 @@ +Lenovo Thinkpad Laptop 700 1 0 +Sony VAIO Laptop 700 1 0 +Dell Vostro Laptop 800 3 0.6666666666666666 +HP Elite Laptop 1200 4 1 +Microsoft Lumia Smartphone 200 1 0 +HTC One Smartphone 400 2 0.3333333333333333 +Nexus Smartphone 500 3 0.6666666666666666 +iPhone Smartphone 900 4 1 +Kindle Fire Tablet 150 1 0 +Samsung Galaxy Tab Tablet 200 2 0.5 +iPad Tablet 700 3 1 +Others Unknow 200 1 0 +0 1 0 +1 2 1 +2 3 2 +3 4 3 +4 5 4 +5 6 5 +6 7 6 +7 8 7 +8 9 8 +9 10 9 diff --git a/tests/queries/0_stateless/03037_precent_rank.sql b/tests/queries/0_stateless/03037_precent_rank.sql new file mode 100644 index 00000000000..54880de53bc --- /dev/null +++ b/tests/queries/0_stateless/03037_precent_rank.sql @@ -0,0 +1,53 @@ +set allow_experimental_analyzer=1; +drop table if exists product_groups; +drop table if exists products; + +CREATE TABLE product_groups ( + group_id Int64, + group_name String +) Engine = Memory; + + +CREATE TABLE products ( + product_id Int64, + product_name String, + price DECIMAL(11, 2), + group_id Int64 +) Engine = Memory; + +INSERT INTO product_groups VALUES (1, 'Smartphone'),(2, 'Laptop'),(3, 'Tablet'); + +INSERT INTO products (product_id,product_name, group_id,price) VALUES (1, 'Microsoft Lumia', 1, 200), (2, 'HTC One', 1, 400), (3, 'Nexus', 1, 500), (4, 'iPhone', 1, 900),(5, 'HP Elite', 2, 1200),(6, 'Lenovo Thinkpad', 2, 700),(7, 'Sony VAIO', 2, 700),(8, 'Dell Vostro', 2, 800),(9, 'iPad', 3, 700),(10, 'Kindle Fire', 3, 150),(11, 'Samsung Galaxy Tab', 3, 200); + +INSERT INTO product_groups VALUES (4, 'Unknow'); +INSERT INTO products (product_id,product_name, group_id,price) VALUES (12, 'Others', 4, 200); + +SELECT * +FROM +( + SELECT + product_name, + group_name, + price, + rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS rank, + percent_rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS percent + FROM products + INNER JOIN product_groups USING (group_id) +) AS t +ORDER BY + group_name ASC, + price ASC, + product_name ASC; + +drop table product_groups; +drop table products; + +select number, row_number, cast(percent_rank * 10000 as Int32) as percent_rank +from ( + select number, row_number() over () as row_number, percent_rank() over (order by number) as percent_rank + from numbers(10000) + order by number + limit 10 +) +settings max_block_size=100; + From 8f5cf70aab732121072bf30a74e28d3f213f29ac Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Sat, 3 Aug 2024 06:29:22 +0800 Subject: [PATCH 1803/2361] add some comments --- src/Planner/PlannerActionsVisitor.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index 71b8accb2a0..7b6b65eeb58 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -71,7 +71,11 @@ String calculateConstantActionNodeName(const Field & constant_literal, const Dat /// Calculate action node name for constant, data type will be derived from constant literal value String calculateConstantActionNodeName(const Field & constant_literal); +/// If the window frame is not set in sql, try to use the default frame from window function +/// if it have any one. Otherwise use the default window frame from `WindowNode`. +/// If the window frame is set in sql, use it anyway. std::optional extractWindowFrame(const FunctionNode & node); + /** Calculate action node name for window node. * Window node action name can only be part of window function action name. * For column node column node identifier from planner context is used, if use_column_identifier_as_action_node_name = true. From 08c48cf9444143b4642879f45b7e16bda5b9ccf3 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Sat, 3 Aug 2024 06:33:10 +0800 Subject: [PATCH 1804/2361] update --- src/Planner/PlannerActionsVisitor.cpp | 6 ++---- src/Planner/PlannerActionsVisitor.h | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 7758d0c129f..288669e7050 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -1039,11 +1039,9 @@ std::optional extractWindowFrame(const FunctionNode & node) auto aggregate_function = node.getAggregateFunction(); if (const auto * win_func = dynamic_cast(aggregate_function.get())) { - auto function_default_window_frame = win_func->getDefaultFrame(); - if (function_default_window_frame) - return function_default_window_frame; + return win_func->getDefaultFrame(); } - return window_frame; + return {}; } String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, std::function()> get_window_frame, bool use_column_identifier_as_action_node_name) diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index 7b6b65eeb58..17cce39f2a0 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -72,7 +72,7 @@ String calculateConstantActionNodeName(const Field & constant_literal, const Dat String calculateConstantActionNodeName(const Field & constant_literal); /// If the window frame is not set in sql, try to use the default frame from window function -/// if it have any one. Otherwise use the default window frame from `WindowNode`. +/// if it have any one. Otherwise return empty. /// If the window frame is set in sql, use it anyway. std::optional extractWindowFrame(const FunctionNode & node); From b8c6beeb7a86b823719631851477f58898a6b871 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Sat, 3 Aug 2024 07:21:31 +0800 Subject: [PATCH 1805/2361] update doc --- docs/en/sql-reference/window-functions/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/window-functions/index.md b/docs/en/sql-reference/window-functions/index.md index 27d4bd763c7..668d831f4b1 100644 --- a/docs/en/sql-reference/window-functions/index.md +++ b/docs/en/sql-reference/window-functions/index.md @@ -24,7 +24,7 @@ ClickHouse supports the standard grammar for defining windows and window functio | `GROUPS` frame | ⌠| | Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) | | `rank()`, `dense_rank()`, `row_number()` | ✅
Alias: `denseRank()` | -| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`
Alias: `percentRank()`| +| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`. Need to use the new analyzer, set allow_experimental_analyzer=1.
Alias: `percentRank()`| | `lag/lead(value, offset)` | âŒ
You can use one of the following workarounds:
1) `any(value) over (.... rows between preceding and preceding)`, or `following` for `lead`
2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` | | ntile(buckets) | ✅
Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). | From 9845aeac0f71222d48a5be28e75df071d9d500c2 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Mon, 5 Aug 2024 10:18:28 +0800 Subject: [PATCH 1806/2361] support percent_rank in old analyzer --- .../sql-reference/window-functions/index.md | 2 +- src/Interpreters/ExpressionAnalyzer.cpp | 22 ++++++++++++++++--- src/Interpreters/ExpressionAnalyzer.h | 7 +++++- .../0_stateless/03037_precent_rank.sql | 1 - 4 files changed, 26 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/window-functions/index.md b/docs/en/sql-reference/window-functions/index.md index 668d831f4b1..27d4bd763c7 100644 --- a/docs/en/sql-reference/window-functions/index.md +++ b/docs/en/sql-reference/window-functions/index.md @@ -24,7 +24,7 @@ ClickHouse supports the standard grammar for defining windows and window functio | `GROUPS` frame | ⌠| | Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) | | `rank()`, `dense_rank()`, `row_number()` | ✅
Alias: `denseRank()` | -| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`. Need to use the new analyzer, set allow_experimental_analyzer=1.
Alias: `percentRank()`| +| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`
Alias: `percentRank()`| | `lag/lead(value, offset)` | âŒ
You can use one of the following workarounds:
1) `any(value) over (.... rows between preceding and preceding)`, or `following` for `lead`
2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` | | ntile(buckets) | ✅
Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). | diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index c767aeb2ec2..7063b2162a0 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -38,6 +38,7 @@ #include #include +#include #include #include @@ -590,6 +591,7 @@ void ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAG & actions, Aggrega void ExpressionAnalyzer::makeWindowDescriptionFromAST(const Context & context_, const WindowDescriptions & existing_descriptions, + AggregateFunctionPtr aggregate_function, WindowDescription & desc, const IAST * ast) { const auto & definition = ast->as(); @@ -698,7 +700,21 @@ void ExpressionAnalyzer::makeWindowDescriptionFromAST(const Context & context_, ast->formatForErrorMessage()); } + const auto * window_function = aggregate_function ? dynamic_cast(aggregate_function.get()) : nullptr; desc.frame.is_default = definition.frame_is_default; + if (desc.frame.is_default && window_function) + { + auto default_window_frame_opt = window_function->getDefaultFrame(); + if (default_window_frame_opt) + { + desc.frame = *default_window_frame_opt; + /// Append the default frame description to window_name, make sure it will be put into + /// a proper window description. + desc.window_name += " " + desc.frame.toString(); + return; + } + } + desc.frame.type = definition.frame_type; desc.frame.begin_type = definition.frame_begin_type; desc.frame.begin_preceding = definition.frame_begin_preceding; @@ -734,7 +750,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAG & actions) WindowDescription desc; desc.window_name = elem.name; makeWindowDescriptionFromAST(*current_context, window_descriptions, - desc, elem.definition.get()); + nullptr, desc, elem.definition.get()); auto [it, inserted] = window_descriptions.insert( {elem.name, std::move(desc)}); @@ -821,12 +837,12 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAG & actions) WindowDescription desc; desc.window_name = default_window_name; makeWindowDescriptionFromAST(*current_context, window_descriptions, - desc, &definition); + window_function.aggregate_function, desc, &definition); auto full_sort_description = desc.full_sort_description; auto [it, inserted] = window_descriptions.insert( - {default_window_name, std::move(desc)}); + {desc.window_name, std::move(desc)}); if (!inserted) { diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 0c00247df85..dc038e10594 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -135,7 +135,12 @@ public: /// A list of windows for window functions. const WindowDescriptions & windowDescriptions() const { return window_descriptions; } - void makeWindowDescriptionFromAST(const Context & context, const WindowDescriptions & existing_descriptions, WindowDescription & desc, const IAST * ast); + void makeWindowDescriptionFromAST( + const Context & context, + const WindowDescriptions & existing_descriptions, + AggregateFunctionPtr aggregate_function, + WindowDescription & desc, + const IAST * ast); void makeWindowDescriptions(ActionsDAG & actions); /** Checks if subquery is not a plain StorageSet. diff --git a/tests/queries/0_stateless/03037_precent_rank.sql b/tests/queries/0_stateless/03037_precent_rank.sql index 54880de53bc..b0f83fa3340 100644 --- a/tests/queries/0_stateless/03037_precent_rank.sql +++ b/tests/queries/0_stateless/03037_precent_rank.sql @@ -1,4 +1,3 @@ -set allow_experimental_analyzer=1; drop table if exists product_groups; drop table if exists products; From 81b9d6d3f51192bee509cb76a8c731ac1aa2b388 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Mon, 5 Aug 2024 11:19:21 +0800 Subject: [PATCH 1807/2361] add doc --- .../window-functions/lagInFrame.md | 2 +- .../window-functions/leadInFrame.md | 2 +- .../window-functions/percent_rank.md | 72 +++++++++++++++++++ 3 files changed, 74 insertions(+), 2 deletions(-) create mode 100644 docs/en/sql-reference/window-functions/percent_rank.md diff --git a/docs/en/sql-reference/window-functions/lagInFrame.md b/docs/en/sql-reference/window-functions/lagInFrame.md index de6e9005baa..01bf809e76e 100644 --- a/docs/en/sql-reference/window-functions/lagInFrame.md +++ b/docs/en/sql-reference/window-functions/lagInFrame.md @@ -1,7 +1,7 @@ --- slug: /en/sql-reference/window-functions/lagInFrame sidebar_label: lagInFrame -sidebar_position: 8 +sidebar_position: 9 --- # lagInFrame diff --git a/docs/en/sql-reference/window-functions/leadInFrame.md b/docs/en/sql-reference/window-functions/leadInFrame.md index 4a82c03f6e6..dae4353b582 100644 --- a/docs/en/sql-reference/window-functions/leadInFrame.md +++ b/docs/en/sql-reference/window-functions/leadInFrame.md @@ -1,7 +1,7 @@ --- slug: /en/sql-reference/window-functions/leadInFrame sidebar_label: leadInFrame -sidebar_position: 9 +sidebar_position: 10 --- # leadInFrame diff --git a/docs/en/sql-reference/window-functions/percent_rank.md b/docs/en/sql-reference/window-functions/percent_rank.md new file mode 100644 index 00000000000..4b260f667b9 --- /dev/null +++ b/docs/en/sql-reference/window-functions/percent_rank.md @@ -0,0 +1,72 @@ +--- +slug: /en/sql-reference/window-functions/percent_rank +sidebar_label: percent_rank +sidebar_position: 8 +--- + +# percent_rank + +returns the relative rank (i.e. percentile) of rows within a window partition. + +**Syntax** + +Alias: `percentRank` (case-sensitive) + +```sql +percent_rank (column_name) + OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column] + [RANGE RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING]] | [window_name]) +FROM table_name +WINDOW window_name as ([PARTITION BY grouping_column] [ORDER BY sorting_column] RANGE RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +``` + +The default and required window frame definition is `RANGE RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING`. + +For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax). + +**Example** + + +Query: + +```sql +CREATE TABLE salaries +( + `team` String, + `player` String, + `salary` UInt32, + `position` String +) +Engine = Memory; + +INSERT INTO salaries FORMAT Values + ('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'), + ('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'), + ('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'), + ('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'), + ('Port Elizabeth Barbarians', 'Robert George', 195000, 'M'), + ('South Hampton Seagulls', 'Douglas Benson', 150000, 'M'), + ('South Hampton Seagulls', 'James Henderson', 140000, 'M'); +``` + +```sql +SELECT player, salary, + percent_rank() OVER (ORDER BY salary DESC) AS percent_rank +FROM salaries; +``` + +Result: + +```response + + ┌─player──────────┬─salary─┬───────percent_rank─┠+1. │ Gary Chen │ 195000 │ 0 │ +2. │ Robert George │ 195000 │ 0 │ +3. │ Charles Juarez │ 190000 │ 0.3333333333333333 │ +4. │ Michael Stanley │ 150000 │ 0.5 │ +5. │ Scott Harrison │ 150000 │ 0.5 │ +6. │ Douglas Benson │ 150000 │ 0.5 │ +7. │ James Henderson │ 140000 │ 1 │ + └─────────────────┴────────┴────────────────────┘ + +``` From deb58b4ede1cabff8e4490fb39b2627ee94a347a Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Wed, 3 Jul 2024 18:23:38 +0800 Subject: [PATCH 1808/2361] any/anti/semi join support mixed join conditions --- src/Interpreters/HashJoin/HashJoin.cpp | 61 +++-- src/Interpreters/HashJoin/HashJoinMethods.h | 139 ++++++++---- src/Interpreters/HashJoin/JoinFeatures.h | 7 +- src/Interpreters/HashJoin/JoinUsedFlags.h | 33 ++- src/Interpreters/HashJoin/LeftHashJoin.cpp | 3 + src/Interpreters/joinDispatch.h | 108 +++++---- src/Storages/StorageJoin.cpp | 6 +- ..._join_on_inequal_expression_fast.reference | 214 ++++++++++++++++++ ...006_join_on_inequal_expression_fast.sql.j2 | 11 + 9 files changed, 469 insertions(+), 113 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 0c7cad4360d..769cb574ed7 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -291,12 +291,13 @@ void HashJoin::dataMapInit(MapsVariant & map) { if (kind == JoinKind::Cross) return; - joinDispatchInit(kind, strictness, map); - joinDispatch(kind, strictness, map, [&](auto, auto, auto & map_) { map_.create(data->type); }); + auto prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; + joinDispatchInit(kind, strictness, map, prefer_use_maps_all); + joinDispatch(kind, strictness, map, prefer_use_maps_all, [&](auto, auto, auto & map_) { map_.create(data->type); }); if (reserve_num) { - joinDispatch(kind, strictness, map, [&](auto, auto, auto & map_) { map_.reserve(data->type, reserve_num); }); + joinDispatch(kind, strictness, map, prefer_use_maps_all, [&](auto, auto, auto & map_) { map_.reserve(data->type, reserve_num); }); } if (!data) @@ -327,9 +328,10 @@ size_t HashJoin::getTotalRowCount() const } else { + auto prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; for (const auto & map : data->maps) { - joinDispatch(kind, strictness, map, [&](auto, auto, auto & map_) { res += map_.getTotalRowCount(data->type); }); + joinDispatch(kind, strictness, map, prefer_use_maps_all, [&](auto, auto, auto & map_) { res += map_.getTotalRowCount(data->type); }); } } @@ -367,9 +369,10 @@ size_t HashJoin::getTotalByteCount() const if (data->type != Type::CROSS) { + auto prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; for (const auto & map : data->maps) { - joinDispatch(kind, strictness, map, [&](auto, auto, auto & map_) { res += map_.getTotalByteCountImpl(data->type); }); + joinDispatch(kind, strictness, map, prefer_use_maps_all, [&](auto, auto, auto & map_) { res += map_.getTotalByteCountImpl(data->type); }); } } return res; @@ -520,6 +523,8 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) return true; } + bool prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; + size_t total_rows = 0; size_t total_bytes = 0; { @@ -592,7 +597,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) bool is_inserted = false; if (kind != JoinKind::Cross) { - joinDispatch(kind, strictness, data->maps[onexpr_idx], [&](auto kind_, auto strictness_, auto & map) + joinDispatch(kind, strictness, data->maps[onexpr_idx], prefer_use_maps_all, [&](auto kind_, auto strictness_, auto & map) { size_t size = HashJoinMethods>::insertFromBlockImpl( *this, @@ -608,10 +613,10 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) is_inserted); if (flag_per_row) - used_flags->reinit(stored_block); + used_flags->reinit, MapsAll>>(stored_block); else if (is_inserted) /// Number of buckets + 1 value from zero storage - used_flags->reinit(size + 1); + used_flags->reinit, MapsAll>>(size + 1); }); } @@ -869,7 +874,7 @@ ColumnWithTypeAndName HashJoin::joinGet(const Block & block, const Block & block keys.insert(std::move(key)); } - static_assert(!MapGetter::flagged, + static_assert(!MapGetter::flagged, "joinGet are not protected from hash table changes between block processing"); std::vector maps_vector; @@ -910,16 +915,34 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) materializeBlockInplace(block); } + bool prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; { std::vectormaps[0])> * > maps_vector; for (size_t i = 0; i < table_join->getClauses().size(); ++i) maps_vector.push_back(&data->maps[i]); - if (joinDispatch(kind, strictness, maps_vector, [&](auto kind_, auto strictness_, auto & maps_vector_) + if (joinDispatch(kind, strictness, maps_vector, prefer_use_maps_all, [&](auto kind_, auto strictness_, auto & maps_vector_) { - using MapType = typename MapGetter::Map; - Block remaining_block = HashJoinMethods::joinBlockImpl( - *this, block, sample_block_with_columns_to_add, maps_vector_); + Block remaining_block; + if constexpr (std::is_same_v, std::vector>) + { + remaining_block = HashJoinMethods::joinBlockImpl( + *this, block, sample_block_with_columns_to_add, maps_vector_); + } + else if constexpr (std::is_same_v, std::vector>) + { + remaining_block = HashJoinMethods::joinBlockImpl( + *this, block, sample_block_with_columns_to_add, maps_vector_); + } + else if constexpr (std::is_same_v, std::vector>) + { + remaining_block = HashJoinMethods::joinBlockImpl( + *this, block, sample_block_with_columns_to_add, maps_vector_); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown maps type"); + } if (remaining_block.rows()) not_processed = std::make_shared(ExtraBlock{std::move(remaining_block)}); else @@ -1019,7 +1042,8 @@ public: rows_added = fillColumnsFromMap(map, columns_right); }; - if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps.front(), fill_callback)) + bool prefer_use_maps_all = parent.table_join->getMixedJoinExpression() != nullptr; + if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps.front(), prefer_use_maps_all, fill_callback)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown JOIN strictness '{}' (must be on of: ANY, ALL, ASOF)", parent.strictness); } @@ -1216,11 +1240,12 @@ void HashJoin::reuseJoinedData(const HashJoin & join) if (flag_per_row) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "StorageJoin with ORs is not supported"); + bool prefer_use_maps_all = join.table_join->getMixedJoinExpression() != nullptr; for (auto & map : data->maps) { - joinDispatch(kind, strictness, map, [this](auto kind_, auto strictness_, auto & map_) + joinDispatch(kind, strictness, map, prefer_use_maps_all, [this](auto kind_, auto strictness_, auto & map_) { - used_flags->reinit(map_.getBufferSizeInCells(data->type) + 1); + used_flags->reinit, MapsAll>>(map_.getBufferSizeInCells(data->type) + 1); }); } } @@ -1300,7 +1325,9 @@ void HashJoin::validateAdditionalFilterExpression(ExpressionActionsPtr additiona additional_filter_expression->dumpActions()); } - bool is_supported = (strictness == JoinStrictness::All) && (isInnerOrLeft(kind) || isRightOrFull(kind)); + bool is_supported = ((strictness == JoinStrictness::All) && (isInnerOrLeft(kind) || isRightOrFull(kind))) + || ((strictness == JoinStrictness::Semi || strictness == JoinStrictness::Any || strictness == JoinStrictness::Anti) + && (isLeft(kind) || isRight(kind))); if (!is_supported) { throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 0dfafa94efc..36785c08845 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -112,7 +112,7 @@ public: const MapsTemplateVector & maps_, bool is_join_get = false) { - constexpr JoinFeatures join_features; + constexpr JoinFeatures join_features; std::vector join_on_keys; const auto & onexprs = join.table_join->getClauses(); @@ -358,22 +358,20 @@ private: AddedColumns & added_columns, JoinStuff::JoinUsedFlags & used_flags) { - constexpr JoinFeatures join_features; - if constexpr (join_features.is_all_join) + constexpr JoinFeatures join_features; + if constexpr (join_features.is_maps_all) { - if (added_columns.additional_filter_expression) - { - bool mark_per_row_used = join_features.right || join_features.full || mapv.size() > 1; - return joinRightColumnsWithAddtitionalFilter( - std::forward>(key_getter_vector), - mapv, - added_columns, - used_flags, - need_filter, - join_features.need_flags, - join_features.add_missing, - mark_per_row_used); - } + if (added_columns.additional_filter_expression) + { + bool mark_per_row_used = join_features.right || join_features.full || mapv.size() > 1; + return joinRightColumnsWithAddtitionalFilter( + std::forward>(key_getter_vector), + mapv, + added_columns, + used_flags, + need_filter, + mark_per_row_used); + } } if (added_columns.additional_filter_expression) @@ -394,7 +392,7 @@ private: AddedColumns & added_columns, JoinStuff::JoinUsedFlags & used_flags) { - constexpr JoinFeatures join_features; + constexpr JoinFeatures join_features; size_t rows = added_columns.rows_to_add; if constexpr (need_filter) @@ -474,7 +472,7 @@ private: mapped, added_columns, current_offset, known_rows, used_flags_opt); } } - else if constexpr (join_features.is_any_join && KIND == JoinKind::Inner) + else if constexpr (join_features.is_any_join && join_features.inner) { bool used_once = used_flags.template setUsedOnce(find_result); @@ -655,24 +653,23 @@ private: } /// First to collect all matched rows refs by join keys, then filter out rows which are not true in additional filter expression. - template + template static size_t joinRightColumnsWithAddtitionalFilter( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, JoinStuff::JoinUsedFlags & used_flags [[maybe_unused]], bool need_filter [[maybe_unused]], - bool need_flags [[maybe_unused]], - bool add_missing [[maybe_unused]], bool flag_per_row [[maybe_unused]]) { + constexpr JoinFeatures join_features; size_t left_block_rows = added_columns.rows_to_add; if (need_filter) added_columns.filter = IColumn::Filter(left_block_rows, 0); std::unique_ptr pool; - if constexpr (need_replication) + if constexpr (join_features.need_replication) added_columns.offsets_to_replicate = std::make_unique(left_block_rows); std::vector row_replicate_offset; @@ -699,7 +696,7 @@ private: selected_rows.clear(); for (; left_row_iter < left_block_rows; ++left_row_iter) { - if constexpr (need_replication) + if constexpr (join_features.need_replication) { if (unlikely(total_added_rows + current_added_rows >= max_joined_block_rows)) { @@ -743,7 +740,7 @@ private: for (size_t i = 1, n = row_replicate_offset.size(); i < n; ++i) { bool any_matched = false; - /// For all right join, flag_per_row is true, we need mark used flags for each row. + /// For right join, flag_per_row is true, we need mark used flags for each row. if (flag_per_row) { for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) @@ -751,10 +748,26 @@ private: if (filter_flags[replicated_row]) { any_matched = true; - added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, add_missing); - total_added_rows += 1; - if (need_flags) - used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); + if constexpr (join_features.is_semi_join || join_features.is_any_join) + { + auto used_once = used_flags.template setUsedOnce(selected_right_row_it->block, selected_right_row_it->row_num, 0); + if (used_once) + { + total_added_rows += 1; + added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + } + } + else if constexpr (join_features.is_anti_join) + { + if constexpr (join_features.right && join_features.need_flags) + used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); + } + else + { + total_added_rows += 1; + added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); + } } ++selected_right_row_it; } @@ -763,34 +776,66 @@ private: { for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) { - if (filter_flags[replicated_row]) + if constexpr (join_features.is_anti_join) { - any_matched = true; - added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, add_missing); - total_added_rows += 1; + any_matched |= filter_flags[replicated_row]; + } + else if constexpr (join_features.need_replication) + { + if (filter_flags[replicated_row]) + { + any_matched = true; + added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + total_added_rows += 1; + } + ++selected_right_row_it; + } + else + { + if (filter_flags[replicated_row]) + { + any_matched = true; + added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + total_added_rows += 1; + selected_right_row_it = selected_right_row_it + row_replicate_offset[i] - replicated_row; + break; + } + else + ++selected_right_row_it; } - ++selected_right_row_it; } } - if (!any_matched) + + + if constexpr (join_features.is_anti_join) { - if (add_missing) - addNotFoundRow(added_columns, total_added_rows); - else - addNotFoundRow(added_columns, total_added_rows); + if (!any_matched) + { + if constexpr (join_features.left) + if (need_filter) + setUsed(added_columns.filter, left_start_row + i - 1); + addNotFoundRow(added_columns, total_added_rows); + } } else { - if (!flag_per_row && need_flags) - used_flags.template setUsed(find_results[find_result_index]); - if (need_filter) - setUsed(added_columns.filter, left_start_row + i - 1); - if (add_missing) - added_columns.applyLazyDefaults(); + if (!any_matched) + { + addNotFoundRow(added_columns, total_added_rows); + } + else + { + if (!flag_per_row) + used_flags.template setUsed(find_results[find_result_index]); + if (need_filter) + setUsed(added_columns.filter, left_start_row + i - 1); + if constexpr (join_features.add_missing) + added_columns.applyLazyDefaults(); + } } find_result_index += (prev_replicated_row != row_replicate_offset[i]); - if constexpr (need_replication) + if constexpr (join_features.need_replication) { (*added_columns.offsets_to_replicate)[left_start_row + i - 1] = total_added_rows; } @@ -817,7 +862,7 @@ private: auto filter_col = buildAdditionalFilter(left_start_row, selected_rows, row_replicate_offset, added_columns); copy_final_matched_rows(left_start_row, filter_col); - if constexpr (need_replication) + if constexpr (join_features.need_replication) { // Add a check for current_added_rows to avoid run the filter expression on too small size batch. if (total_added_rows >= max_joined_block_rows || current_added_rows < 1024) @@ -825,7 +870,7 @@ private: } } - if constexpr (need_replication) + if constexpr (join_features.need_replication) { added_columns.offsets_to_replicate->resize_assume_reserved(left_row_iter); added_columns.filter.resize_assume_reserved(left_row_iter); diff --git a/src/Interpreters/HashJoin/JoinFeatures.h b/src/Interpreters/HashJoin/JoinFeatures.h index 2f2bd1e29a2..a530179f0b4 100644 --- a/src/Interpreters/HashJoin/JoinFeatures.h +++ b/src/Interpreters/HashJoin/JoinFeatures.h @@ -3,15 +3,15 @@ #include namespace DB { -template +template struct JoinFeatures { static constexpr bool is_any_join = STRICTNESS == JoinStrictness::Any; - static constexpr bool is_any_or_semi_join = STRICTNESS == JoinStrictness::Any || STRICTNESS == JoinStrictness::RightAny || (STRICTNESS == JoinStrictness::Semi && KIND == JoinKind::Left); static constexpr bool is_all_join = STRICTNESS == JoinStrictness::All; static constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; static constexpr bool is_semi_join = STRICTNESS == JoinStrictness::Semi; static constexpr bool is_anti_join = STRICTNESS == JoinStrictness::Anti; + static constexpr bool is_any_or_semi_join = is_any_join || STRICTNESS == JoinStrictness::RightAny || (is_semi_join && KIND == JoinKind::Left); static constexpr bool left = KIND == JoinKind::Left; static constexpr bool right = KIND == JoinKind::Right; @@ -22,7 +22,8 @@ struct JoinFeatures static constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left)); static constexpr bool add_missing = (left || full) && !is_semi_join; - static constexpr bool need_flags = MapGetter::flagged; + static constexpr bool need_flags = MapGetter, HashJoin::MapsOne>>::flagged; + static constexpr bool is_maps_all = std::is_same_v, HashJoin::MapsAll>; }; } diff --git a/src/Interpreters/HashJoin/JoinUsedFlags.h b/src/Interpreters/HashJoin/JoinUsedFlags.h index bd41ba2073f..c84c6ec3fea 100644 --- a/src/Interpreters/HashJoin/JoinUsedFlags.h +++ b/src/Interpreters/HashJoin/JoinUsedFlags.h @@ -26,10 +26,10 @@ public: /// Update size for vector with flags. /// Calling this method invalidates existing flags. /// It can be called several times, but all of them should happen before using this structure. - template + template void reinit(size_t size) { - if constexpr (MapGetter::flagged) + if constexpr (MapGetter::flagged) { assert(flags[nullptr].size() <= size); need_flags = true; @@ -43,10 +43,10 @@ public: } } - template + template void reinit(const Block * block_ptr) { - if constexpr (MapGetter::flagged) + if constexpr (MapGetter::flagged) { assert(flags[block_ptr].size() <= block_ptr->rows()); need_flags = true; @@ -148,6 +148,31 @@ public: } } + template + bool setUsedOnce(const Block * block, size_t row_num, size_t offset) + { + if constexpr (!use_flags) + return true; + + if constexpr (flag_per_row) + { + /// fast check to prevent heavy CAS with seq_cst order + if (flags[block][row_num].load(std::memory_order_relaxed)) + return false; + + bool expected = false; + return flags[block][row_num].compare_exchange_strong(expected, true); + } + else + { + /// fast check to prevent heavy CAS with seq_cst order + if (flags[nullptr][offset].load(std::memory_order_relaxed)) + return false; + + bool expected = false; + return flags[nullptr][offset].compare_exchange_strong(expected, true); + } + } }; } diff --git a/src/Interpreters/HashJoin/LeftHashJoin.cpp b/src/Interpreters/HashJoin/LeftHashJoin.cpp index 69e17ff70bd..a53ffaac0b5 100644 --- a/src/Interpreters/HashJoin/LeftHashJoin.cpp +++ b/src/Interpreters/HashJoin/LeftHashJoin.cpp @@ -4,8 +4,11 @@ namespace DB { template class HashJoinMethods; template class HashJoinMethods; +template class HashJoinMethods; template class HashJoinMethods; template class HashJoinMethods; +template class HashJoinMethods; template class HashJoinMethods; +template class HashJoinMethods; template class HashJoinMethods; } diff --git a/src/Interpreters/joinDispatch.h b/src/Interpreters/joinDispatch.h index 54c5c7dc83a..982c56e8210 100644 --- a/src/Interpreters/joinDispatch.h +++ b/src/Interpreters/joinDispatch.h @@ -12,38 +12,41 @@ namespace DB { -template +template struct MapGetter; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; /// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; /// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template -struct MapGetter { using Map = HashJoin::MapsAsof; static constexpr bool flagged = false; }; +template +struct MapGetter { using Map = HashJoin::MapsAsof; static constexpr bool flagged = false; }; static constexpr std::array STRICTNESSES = { JoinStrictness::RightAny, @@ -62,7 +65,7 @@ static constexpr std::array KINDS = { }; /// Init specified join map -inline bool joinDispatchInit(JoinKind kind, JoinStrictness strictness, HashJoin::MapsVariant & maps) +inline bool joinDispatchInit(JoinKind kind, JoinStrictness strictness, HashJoin::MapsVariant & maps, bool prefer_use_maps_all = false) { return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) { @@ -70,7 +73,10 @@ inline bool joinDispatchInit(JoinKind kind, JoinStrictness strictness, HashJoin: constexpr auto j = ij % STRICTNESSES.size(); if (kind == KINDS[i] && strictness == STRICTNESSES[j]) { - maps = typename MapGetter::Map(); + if (prefer_use_maps_all) + maps = typename MapGetter::Map(); + else + maps = typename MapGetter::Map(); return true; } return false; @@ -79,7 +85,7 @@ inline bool joinDispatchInit(JoinKind kind, JoinStrictness strictness, HashJoin: /// Call function on specified join map template -inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, MapsVariant & maps, Func && func) +inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, MapsVariant & maps, bool prefer_use_maps_all, Func && func) { return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) { @@ -89,10 +95,16 @@ inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, MapsVariant & constexpr auto j = ij % STRICTNESSES.size(); if (kind == KINDS[i] && strictness == STRICTNESSES[j]) { - func( - std::integral_constant(), - std::integral_constant(), - std::get::Map>(maps)); + if (prefer_use_maps_all) + func( + std::integral_constant(), + std::integral_constant(), + std::get::Map>(maps)); + else + func( + std::integral_constant(), + std::integral_constant(), + std::get::Map>(maps)); return true; } return false; @@ -101,7 +113,7 @@ inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, MapsVariant & /// Call function on specified join map template -inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, std::vector & mapsv, Func && func) +inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, std::vector & mapsv, bool prefer_use_maps_all, Func && func) { return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) { @@ -111,17 +123,31 @@ inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, std::vector::Map; - std::vector v; - v.reserve(mapsv.size()); - for (const auto & el : mapsv) - v.push_back(&std::get(*el)); + if (prefer_use_maps_all) + { + using MapType = typename MapGetter::Map; + std::vector v; + v.reserve(mapsv.size()); + for (const auto & el : mapsv) + v.push_back(&std::get(*el)); - func( - std::integral_constant(), - std::integral_constant(), - v - /*std::get::Map>(maps)*/); + func( + std::integral_constant(), std::integral_constant(), v + /*std::get::Map>(maps)*/); + } + else + { + using MapType = typename MapGetter::Map; + std::vector v; + v.reserve(mapsv.size()); + for (const auto & el : mapsv) + v.push_back(&std::get(*el)); + + func( + std::integral_constant(), std::integral_constant(), v + /*std::get::Map>(maps)*/); + + } return true; } return false; diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index a0d6cf11b64..974df940e91 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -504,7 +504,11 @@ protected: return {}; Chunk chunk; - if (!joinDispatch(join->kind, join->strictness, join->data->maps.front(), + if (!joinDispatch( + join->kind, + join->strictness, + join->data->maps.front(), + join->table_join->getMixedJoinExpression() != nullptr, [&](auto kind, auto strictness, auto & map) { chunk = createChunk(map); })) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown JOIN strictness"); return chunk; diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference index 46f24f73356..3e413afd98e 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference @@ -382,6 +382,220 @@ key1 e 5 5 5 key1 C 3 4 5 key2 a2 1 1 1 0 0 \N key4 f 2 3 4 key4 F 1 1 1 SET join_algorithm='hash'; +SELECT t1.*, t2.* FROM t1 LEFT ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 LEFT ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +key1 d 4 7 2 0 0 \N +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 0 0 \N +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 LEFT SEMI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 LEFT SEMI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 LEFT SEMI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT SEMI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 LEFT ANTI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANTI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +key4 f 2 3 4 key4 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 d 4 7 2 key1 0 0 \N +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +key4 f 2 3 4 key4 0 0 \N +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +SELECT t1.*, t2.* FROM t1 RIGHT ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 +key1 a 1 1 2 key1 A 1 2 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 RIGHT ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key1 A 1 2 1 + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 RIGHT SEMI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT SEMI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 RIGHT ANTI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key1 A 1 2 1 + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +SET join_algorithm='grace_hash'; +SELECT t1.*, t2.* FROM t1 LEFT ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 LEFT ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +key1 d 4 7 2 0 0 \N +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 0 0 \N +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 LEFT SEMI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 LEFT SEMI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 LEFT SEMI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT SEMI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 LEFT ANTI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANTI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +key4 f 2 3 4 key4 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 d 4 7 2 key1 0 0 \N +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +key4 f 2 3 4 key4 0 0 \N +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +SELECT t1.*, t2.* FROM t1 RIGHT ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 +key1 a 1 1 2 key1 A 1 2 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 RIGHT ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key1 A 1 2 1 + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 RIGHT SEMI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT SEMI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 RIGHT ANTI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key1 A 1 2 1 + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +SET join_algorithm='hash'; SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY (t1.key, t1.attr, t2.key, t2.attr); key1 a 1 1 2 key1 A 1 2 1 key1 a 1 1 2 key1 B 2 1 2 diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 index 61ad5ec0bf1..1bf5a7870e7 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 @@ -22,6 +22,17 @@ SELECT t1.*, t2.* FROM t1 {{ join_type }} JOIN t2 ON t1.key = t2.key AND (t1.a=2 {% endfor -%} {% endfor -%} +{% for algorithm in ['hash', 'grace_hash'] -%} +SET join_algorithm='{{ algorithm }}'; +{% for join_type in ['LEFT', 'RIGHT'] -%} +{% for join_strictness in ['ANY', 'SEMI', 'ANTI'] -%} +SELECT t1.*, t2.* FROM t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT t1.*, t2.* from t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT t1.*, t2.* from t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 {{ join_type }} {{ join_strictness }} JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +{% endfor -%} +{% endfor -%} +{% endfor -%} {% for algorithm in ['hash'] -%} SET join_algorithm='{{ algorithm }}'; From 413834d04920072dd9a38b56902d799f71a476a3 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Thu, 4 Jul 2024 10:06:11 +0800 Subject: [PATCH 1809/2361] instance template classes --- src/Interpreters/HashJoin/HashJoinMethods.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 36785c08845..17c11b202ca 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -970,9 +970,12 @@ private: /// Instantiate template class ahead in different .cpp files to avoid `too large translation unit`. extern template class HashJoinMethods; extern template class HashJoinMethods; +extern template class HashJoinMethods; extern template class HashJoinMethods; extern template class HashJoinMethods; +extern template class HashJoinMethods; extern template class HashJoinMethods; +extern template class HashJoinMethods; extern template class HashJoinMethods; extern template class HashJoinMethods; From 377eed20fca56c74e4987fe4038fca6f0e019090 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Thu, 4 Jul 2024 15:05:24 +0800 Subject: [PATCH 1810/2361] reduce the size of HashJoin.cpp.o --- src/Interpreters/HashJoin/FullHashJoin.cpp | 2 +- src/Interpreters/HashJoin/HashJoinMethods.h | 834 +--------------- .../HashJoin/HashJoinMethodsImpl.h | 912 ++++++++++++++++++ src/Interpreters/HashJoin/InnerHashJoin.cpp | 2 +- src/Interpreters/HashJoin/LeftHashJoin.cpp | 2 +- src/Interpreters/HashJoin/RightHashJoin.cpp | 2 +- 6 files changed, 932 insertions(+), 822 deletions(-) create mode 100644 src/Interpreters/HashJoin/HashJoinMethodsImpl.h diff --git a/src/Interpreters/HashJoin/FullHashJoin.cpp b/src/Interpreters/HashJoin/FullHashJoin.cpp index 5d058d10fc2..4cdb2e757a4 100644 --- a/src/Interpreters/HashJoin/FullHashJoin.cpp +++ b/src/Interpreters/HashJoin/FullHashJoin.cpp @@ -1,4 +1,4 @@ -#include +#include namespace DB { diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 17c11b202ca..e3b8fbc1737 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -12,15 +12,8 @@ #include #include - namespace DB { -namespace ErrorCodes -{ - extern const int UNSUPPORTED_JOIN_KEYS; - extern const int LOGICAL_ERROR; -} - /// Inserting an element into a hash table of the form `key -> reference to a string`, which will then be used by JOIN. template struct Inserter @@ -64,7 +57,6 @@ struct Inserter } }; - /// MapsTemplate is one of MapsOne, MapsAll and MapsAsof template class HashJoinMethods @@ -81,27 +73,7 @@ public: ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, - bool & is_inserted) - { - switch (type) - { - case HashJoin::Type::EMPTY: - [[fallthrough]]; - case HashJoin::Type::CROSS: - /// Do nothing. We will only save block, and it is enough - is_inserted = true; - return 0; - - #define M(TYPE) \ - case HashJoin::Type::TYPE: \ - return insertFromBlockImplTypeCase>::Type>(\ - join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, join_mask, pool, is_inserted); \ - break; - - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - } - } + bool & is_inserted); using MapsTemplateVector = std::vector; @@ -110,278 +82,36 @@ public: Block & block, const Block & block_with_columns_to_add, const MapsTemplateVector & maps_, - bool is_join_get = false) - { - constexpr JoinFeatures join_features; - - std::vector join_on_keys; - const auto & onexprs = join.table_join->getClauses(); - for (size_t i = 0; i < onexprs.size(); ++i) - { - const auto & key_names = !is_join_get ? onexprs[i].key_names_left : onexprs[i].key_names_right; - join_on_keys.emplace_back(block, key_names, onexprs[i].condColumnNames().first, join.key_sizes[i]); - } - size_t existing_columns = block.columns(); - - /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. - * Because if they are constants, then in the "not joined" rows, they may have different values - * - default values, which can differ from the values of these constants. - */ - if constexpr (join_features.right || join_features.full) - { - materializeBlockInplace(block); - } - - /** For LEFT/INNER JOIN, the saved blocks do not contain keys. - * For FULL/RIGHT JOIN, the saved blocks contain keys; - * but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped. - * For ASOF, the last column is used as the ASOF column - */ - AddedColumns added_columns( - block, - block_with_columns_to_add, - join.savedBlockSample(), - join, - std::move(join_on_keys), - join.table_join->getMixedJoinExpression(), - join_features.is_asof_join, - is_join_get); - - bool has_required_right_keys = (join.required_right_keys.columns() != 0); - added_columns.need_filter = join_features.need_filter || has_required_right_keys; - added_columns.max_joined_block_rows = join.max_joined_block_rows; - if (!added_columns.max_joined_block_rows) - added_columns.max_joined_block_rows = std::numeric_limits::max(); - else - added_columns.reserve(join_features.need_replication); - - size_t num_joined = switchJoinRightColumns(maps_, added_columns, join.data->type, *join.used_flags); - /// Do not hold memory for join_on_keys anymore - added_columns.join_on_keys.clear(); - Block remaining_block = sliceBlock(block, num_joined); - - added_columns.buildOutput(); - for (size_t i = 0; i < added_columns.size(); ++i) - block.insert(added_columns.moveColumn(i)); - - std::vector right_keys_to_replicate [[maybe_unused]]; - - if constexpr (join_features.need_filter) - { - /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. - for (size_t i = 0; i < existing_columns; ++i) - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(added_columns.filter, -1); - - /// Add join key columns from right block if needed using value from left table because of equality - for (size_t i = 0; i < join.required_right_keys.columns(); ++i) - { - const auto & right_key = join.required_right_keys.getByPosition(i); - /// asof column is already in block. - if (join_features.is_asof_join && right_key.name == join.table_join->getOnlyClause().key_names_right.back()) - continue; - - const auto & left_column = block.getByName(join.required_right_keys_sources[i]); - const auto & right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); - auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column); - block.insert(std::move(right_col)); - } - } - else if (has_required_right_keys) - { - /// Add join key columns from right block if needed. - for (size_t i = 0; i < join.required_right_keys.columns(); ++i) - { - const auto & right_key = join.required_right_keys.getByPosition(i); - auto right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); - /// asof column is already in block. - if (join_features.is_asof_join && right_key.name == join.table_join->getOnlyClause().key_names_right.back()) - continue; - - const auto & left_column = block.getByName(join.required_right_keys_sources[i]); - auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column, &added_columns.filter); - block.insert(std::move(right_col)); - - if constexpr (join_features.need_replication) - right_keys_to_replicate.push_back(block.getPositionByName(right_col_name)); - } - } - - if constexpr (join_features.need_replication) - { - std::unique_ptr & offsets_to_replicate = added_columns.offsets_to_replicate; - - /// If ALL ... JOIN - we replicate all the columns except the new ones. - for (size_t i = 0; i < existing_columns; ++i) - { - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->replicate(*offsets_to_replicate); - } - - /// Replicate additional right keys - for (size_t pos : right_keys_to_replicate) - { - block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); - } - } - - return remaining_block; - } - + bool is_join_get = false); private: template - static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes) - { - if constexpr (is_asof_join) - { - auto key_column_copy = key_columns; - auto key_size_copy = key_sizes; - key_column_copy.pop_back(); - key_size_copy.pop_back(); - return KeyGetter(key_column_copy, key_size_copy, nullptr); - } - else - return KeyGetter(key_columns, key_sizes, nullptr); - } + static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes); template - static size_t NO_INLINE insertFromBlockImplTypeCase( + static size_t insertFromBlockImplTypeCase( HashJoin & join, HashMap & map, size_t rows, const ColumnRawPtrs & key_columns, - const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, bool & is_inserted) - { - [[maybe_unused]] constexpr bool mapped_one = std::is_same_v; - constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; - - const IColumn * asof_column [[maybe_unused]] = nullptr; - if constexpr (is_asof_join) - asof_column = key_columns.back(); - - auto key_getter = createKeyGetter(key_columns, key_sizes); - - /// For ALL and ASOF join always insert values - is_inserted = !mapped_one || is_asof_join; - - for (size_t i = 0; i < rows; ++i) - { - if (null_map && (*null_map)[i]) - { - /// nulls are not inserted into hash table, - /// keep them for RIGHT and FULL joins - is_inserted = true; - continue; - } - - /// Check condition for right table from ON section - if (join_mask && !(*join_mask)[i]) - continue; - - if constexpr (is_asof_join) - Inserter::insertAsof(join, map, key_getter, stored_block, i, pool, *asof_column); - else if constexpr (mapped_one) - is_inserted |= Inserter::insertOne(join, map, key_getter, stored_block, i, pool); - else - Inserter::insertAll(join, map, key_getter, stored_block, i, pool); - } - return map.getBufferSizeInCells(); - } + const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, bool & is_inserted); template static size_t switchJoinRightColumns( const std::vector & mapv, AddedColumns & added_columns, HashJoin::Type type, - JoinStuff::JoinUsedFlags & used_flags) - { - constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; - switch (type) - { - case HashJoin::Type::EMPTY: { - if constexpr (!is_asof_join) - { - using KeyGetter = KeyGetterEmpty; - std::vector key_getter_vector; - key_getter_vector.emplace_back(); - - using MapTypeVal = typename KeyGetter::MappedType; - std::vector a_map_type_vector; - a_map_type_vector.emplace_back(); - return joinRightColumnsSwitchNullability( - std::move(key_getter_vector), a_map_type_vector, added_columns, used_flags); - } - throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys. Type: {}", type); - } - #define M(TYPE) \ - case HashJoin::Type::TYPE: \ - { \ - using MapTypeVal = const typename std::remove_reference_t::element_type; \ - using KeyGetter = typename KeyGetterForType::Type; \ - std::vector a_map_type_vector(mapv.size()); \ - std::vector key_getter_vector; \ - for (size_t d = 0; d < added_columns.join_on_keys.size(); ++d) \ - { \ - const auto & join_on_key = added_columns.join_on_keys[d]; \ - a_map_type_vector[d] = mapv[d]->TYPE.get(); \ - key_getter_vector.push_back(std::move(createKeyGetter(join_on_key.key_columns, join_on_key.key_sizes))); \ - } \ - return joinRightColumnsSwitchNullability( \ - std::move(key_getter_vector), a_map_type_vector, added_columns, used_flags); \ - } - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - - default: - throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys (type: {})", type); - } - } + JoinStuff::JoinUsedFlags & used_flags); template static size_t joinRightColumnsSwitchNullability( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, - JoinStuff::JoinUsedFlags & used_flags) - { - if (added_columns.need_filter) - { - return joinRightColumnsSwitchMultipleDisjuncts( - std::forward>(key_getter_vector), mapv, added_columns, used_flags); - } - else - { - return joinRightColumnsSwitchMultipleDisjuncts( - std::forward>(key_getter_vector), mapv, added_columns, used_flags); - } - } + JoinStuff::JoinUsedFlags & used_flags); template static size_t joinRightColumnsSwitchMultipleDisjuncts( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, - JoinStuff::JoinUsedFlags & used_flags) - { - constexpr JoinFeatures join_features; - if constexpr (join_features.is_maps_all) - { - if (added_columns.additional_filter_expression) - { - bool mark_per_row_used = join_features.right || join_features.full || mapv.size() > 1; - return joinRightColumnsWithAddtitionalFilter( - std::forward>(key_getter_vector), - mapv, - added_columns, - used_flags, - need_filter, - mark_per_row_used); - } - } - - if (added_columns.additional_filter_expression) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Additional filter expression is not supported for this JOIN"); - - return mapv.size() > 1 ? joinRightColumns( - std::forward>(key_getter_vector), mapv, added_columns, used_flags) - : joinRightColumns( - std::forward>(key_getter_vector), mapv, added_columns, used_flags); - } + JoinStuff::JoinUsedFlags & used_flags); /// Joins right table columns which indexes are present in right_indexes using specified map. /// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). @@ -390,267 +120,17 @@ private: std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, - JoinStuff::JoinUsedFlags & used_flags) - { - constexpr JoinFeatures join_features; - - size_t rows = added_columns.rows_to_add; - if constexpr (need_filter) - added_columns.filter = IColumn::Filter(rows, 0); - - Arena pool; - - if constexpr (join_features.need_replication) - added_columns.offsets_to_replicate = std::make_unique(rows); - - IColumn::Offset current_offset = 0; - size_t max_joined_block_rows = added_columns.max_joined_block_rows; - size_t i = 0; - for (; i < rows; ++i) - { - if constexpr (join_features.need_replication) - { - if (unlikely(current_offset >= max_joined_block_rows)) - { - added_columns.offsets_to_replicate->resize_assume_reserved(i); - added_columns.filter.resize_assume_reserved(i); - break; - } - } - - bool right_row_found = false; - - KnownRowsHolder known_rows; - for (size_t onexpr_idx = 0; onexpr_idx < added_columns.join_on_keys.size(); ++onexpr_idx) - { - const auto & join_keys = added_columns.join_on_keys[onexpr_idx]; - if (join_keys.null_map && (*join_keys.null_map)[i]) - continue; - - bool row_acceptable = !join_keys.isRowFiltered(i); - using FindResult = typename KeyGetter::FindResult; - auto find_result = row_acceptable ? key_getter_vector[onexpr_idx].findKey(*(mapv[onexpr_idx]), i, pool) : FindResult(); - - if (find_result.isFound()) - { - right_row_found = true; - auto & mapped = find_result.getMapped(); - if constexpr (join_features.is_asof_join) - { - const IColumn & left_asof_key = added_columns.leftAsofKey(); - - auto row_ref = mapped->findAsof(left_asof_key, i); - if (row_ref.block) - { - setUsed(added_columns.filter, i); - if constexpr (flag_per_row) - used_flags.template setUsed(row_ref.block, row_ref.row_num, 0); - else - used_flags.template setUsed(find_result); - - added_columns.appendFromBlock(*row_ref.block, row_ref.row_num, join_features.add_missing); - } - else - addNotFoundRow(added_columns, current_offset); - } - else if constexpr (join_features.is_all_join) - { - setUsed(added_columns.filter, i); - used_flags.template setUsed(find_result); - auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; - addFoundRowAll(mapped, added_columns, current_offset, known_rows, used_flags_opt); - } - else if constexpr ((join_features.is_any_join || join_features.is_semi_join) && join_features.right) - { - /// Use first appeared left key + it needs left columns replication - bool used_once = used_flags.template setUsedOnce(find_result); - if (used_once) - { - auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; - setUsed(added_columns.filter, i); - addFoundRowAll( - mapped, added_columns, current_offset, known_rows, used_flags_opt); - } - } - else if constexpr (join_features.is_any_join && join_features.inner) - { - bool used_once = used_flags.template setUsedOnce(find_result); - - /// Use first appeared left key only - if (used_once) - { - setUsed(added_columns.filter, i); - added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); - } - - break; - } - else if constexpr (join_features.is_any_join && join_features.full) - { - /// TODO - } - else if constexpr (join_features.is_anti_join) - { - if constexpr (join_features.right && join_features.need_flags) - used_flags.template setUsed(find_result); - } - else /// ANY LEFT, SEMI LEFT, old ANY (RightAny) - { - setUsed(added_columns.filter, i); - used_flags.template setUsed(find_result); - added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); - - if (join_features.is_any_or_semi_join) - { - break; - } - } - } - } - - if (!right_row_found) - { - if constexpr (join_features.is_anti_join && join_features.left) - setUsed(added_columns.filter, i); - addNotFoundRow(added_columns, current_offset); - } - - if constexpr (join_features.need_replication) - { - (*added_columns.offsets_to_replicate)[i] = current_offset; - } - } - - added_columns.applyLazyDefaults(); - return i; - } + JoinStuff::JoinUsedFlags & used_flags); template - static void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]) - { - if constexpr (need_filter) - filter[pos] = 1; - } + static void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]); template static ColumnPtr buildAdditionalFilter( size_t left_start_row, const std::vector & selected_rows, const std::vector & row_replicate_offset, - AddedColumns & added_columns) - { - ColumnPtr result_column; - do - { - if (selected_rows.empty()) - { - result_column = ColumnUInt8::create(); - break; - } - const Block & sample_right_block = *selected_rows.begin()->block; - if (!sample_right_block || !added_columns.additional_filter_expression) - { - auto filter = ColumnUInt8::create(); - filter->insertMany(1, selected_rows.size()); - result_column = std::move(filter); - break; - } - - auto required_cols = added_columns.additional_filter_expression->getRequiredColumnsWithTypes(); - if (required_cols.empty()) - { - Block block; - added_columns.additional_filter_expression->execute(block); - result_column = block.getByPosition(0).column->cloneResized(selected_rows.size()); - break; - } - NameSet required_column_names; - for (auto & col : required_cols) - required_column_names.insert(col.name); - - Block executed_block; - size_t right_col_pos = 0; - for (const auto & col : sample_right_block.getColumnsWithTypeAndName()) - { - if (required_column_names.contains(col.name)) - { - auto new_col = col.column->cloneEmpty(); - for (const auto & selected_row : selected_rows) - { - const auto & src_col = selected_row.block->getByPosition(right_col_pos); - new_col->insertFrom(*src_col.column, selected_row.row_num); - } - executed_block.insert({std::move(new_col), col.type, col.name}); - } - right_col_pos += 1; - } - if (!executed_block) - { - result_column = ColumnUInt8::create(); - break; - } - - for (const auto & col_name : required_column_names) - { - const auto * src_col = added_columns.left_block.findByName(col_name); - if (!src_col) - continue; - auto new_col = src_col->column->cloneEmpty(); - size_t prev_left_offset = 0; - for (size_t i = 1; i < row_replicate_offset.size(); ++i) - { - const size_t & left_offset = row_replicate_offset[i]; - size_t rows = left_offset - prev_left_offset; - if (rows) - new_col->insertManyFrom(*src_col->column, left_start_row + i - 1, rows); - prev_left_offset = left_offset; - } - executed_block.insert({std::move(new_col), src_col->type, col_name}); - } - if (!executed_block) - { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "required columns: [{}], but not found any in left/right table. right table: {}, left table: {}", - required_cols.toString(), - sample_right_block.dumpNames(), - added_columns.left_block.dumpNames()); - } - - for (const auto & col : executed_block.getColumnsWithTypeAndName()) - if (!col.column || !col.type) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Illegal nullptr column in input block: {}", executed_block.dumpStructure()); - - added_columns.additional_filter_expression->execute(executed_block); - result_column = executed_block.getByPosition(0).column->convertToFullColumnIfConst(); - executed_block.clear(); - } while (false); - - result_column = result_column->convertToFullIfNeeded(); - if (result_column->isNullable()) - { - /// Convert Nullable(UInt8) to UInt8 ensuring that nulls are zeros - /// Trying to avoid copying data, since we are the only owner of the column. - ColumnPtr mask_column = assert_cast(*result_column).getNullMapColumnPtr(); - - MutableColumnPtr mutable_column; - { - ColumnPtr nested_column = assert_cast(*result_column).getNestedColumnPtr(); - result_column.reset(); - mutable_column = IColumn::mutate(std::move(nested_column)); - } - - auto & column_data = assert_cast(*mutable_column).getData(); - const auto & mask_column_data = assert_cast(*mask_column).getData(); - for (size_t i = 0; i < column_data.size(); ++i) - { - if (mask_column_data[i]) - column_data[i] = 0; - } - return mutable_column; - } - return result_column; - } + AddedColumns & added_columns); /// First to collect all matched rows refs by join keys, then filter out rows which are not true in additional filter expression. template @@ -660,241 +140,10 @@ private: AddedColumns & added_columns, JoinStuff::JoinUsedFlags & used_flags [[maybe_unused]], bool need_filter [[maybe_unused]], - bool flag_per_row [[maybe_unused]]) - { - constexpr JoinFeatures join_features; - size_t left_block_rows = added_columns.rows_to_add; - if (need_filter) - added_columns.filter = IColumn::Filter(left_block_rows, 0); - - std::unique_ptr pool; - - if constexpr (join_features.need_replication) - added_columns.offsets_to_replicate = std::make_unique(left_block_rows); - - std::vector row_replicate_offset; - row_replicate_offset.reserve(left_block_rows); - - using FindResult = typename KeyGetter::FindResult; - size_t max_joined_block_rows = added_columns.max_joined_block_rows; - size_t left_row_iter = 0; - PreSelectedRows selected_rows; - selected_rows.reserve(left_block_rows); - std::vector find_results; - find_results.reserve(left_block_rows); - bool exceeded_max_block_rows = false; - IColumn::Offset total_added_rows = 0; - IColumn::Offset current_added_rows = 0; - - auto collect_keys_matched_rows_refs = [&]() - { - pool = std::make_unique(); - find_results.clear(); - row_replicate_offset.clear(); - row_replicate_offset.push_back(0); - current_added_rows = 0; - selected_rows.clear(); - for (; left_row_iter < left_block_rows; ++left_row_iter) - { - if constexpr (join_features.need_replication) - { - if (unlikely(total_added_rows + current_added_rows >= max_joined_block_rows)) - { - break; - } - } - KnownRowsHolder all_flag_known_rows; - KnownRowsHolder single_flag_know_rows; - for (size_t join_clause_idx = 0; join_clause_idx < added_columns.join_on_keys.size(); ++join_clause_idx) - { - const auto & join_keys = added_columns.join_on_keys[join_clause_idx]; - if (join_keys.null_map && (*join_keys.null_map)[left_row_iter]) - continue; - - bool row_acceptable = !join_keys.isRowFiltered(left_row_iter); - auto find_result = row_acceptable - ? key_getter_vector[join_clause_idx].findKey(*(mapv[join_clause_idx]), left_row_iter, *pool) - : FindResult(); - - if (find_result.isFound()) - { - auto & mapped = find_result.getMapped(); - find_results.push_back(find_result); - if (flag_per_row) - addFoundRowAll(mapped, selected_rows, current_added_rows, all_flag_known_rows, nullptr); - else - addFoundRowAll(mapped, selected_rows, current_added_rows, single_flag_know_rows, nullptr); - } - } - row_replicate_offset.push_back(current_added_rows); - } - }; - - auto copy_final_matched_rows = [&](size_t left_start_row, ColumnPtr filter_col) - { - const PaddedPODArray & filter_flags = assert_cast(*filter_col).getData(); - - size_t prev_replicated_row = 0; - auto selected_right_row_it = selected_rows.begin(); - size_t find_result_index = 0; - for (size_t i = 1, n = row_replicate_offset.size(); i < n; ++i) - { - bool any_matched = false; - /// For right join, flag_per_row is true, we need mark used flags for each row. - if (flag_per_row) - { - for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) - { - if (filter_flags[replicated_row]) - { - any_matched = true; - if constexpr (join_features.is_semi_join || join_features.is_any_join) - { - auto used_once = used_flags.template setUsedOnce(selected_right_row_it->block, selected_right_row_it->row_num, 0); - if (used_once) - { - total_added_rows += 1; - added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); - } - } - else if constexpr (join_features.is_anti_join) - { - if constexpr (join_features.right && join_features.need_flags) - used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); - } - else - { - total_added_rows += 1; - added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); - used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); - } - } - ++selected_right_row_it; - } - } - else - { - for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) - { - if constexpr (join_features.is_anti_join) - { - any_matched |= filter_flags[replicated_row]; - } - else if constexpr (join_features.need_replication) - { - if (filter_flags[replicated_row]) - { - any_matched = true; - added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); - total_added_rows += 1; - } - ++selected_right_row_it; - } - else - { - if (filter_flags[replicated_row]) - { - any_matched = true; - added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); - total_added_rows += 1; - selected_right_row_it = selected_right_row_it + row_replicate_offset[i] - replicated_row; - break; - } - else - ++selected_right_row_it; - } - } - } - - - if constexpr (join_features.is_anti_join) - { - if (!any_matched) - { - if constexpr (join_features.left) - if (need_filter) - setUsed(added_columns.filter, left_start_row + i - 1); - addNotFoundRow(added_columns, total_added_rows); - } - } - else - { - if (!any_matched) - { - addNotFoundRow(added_columns, total_added_rows); - } - else - { - if (!flag_per_row) - used_flags.template setUsed(find_results[find_result_index]); - if (need_filter) - setUsed(added_columns.filter, left_start_row + i - 1); - if constexpr (join_features.add_missing) - added_columns.applyLazyDefaults(); - } - } - find_result_index += (prev_replicated_row != row_replicate_offset[i]); - - if constexpr (join_features.need_replication) - { - (*added_columns.offsets_to_replicate)[left_start_row + i - 1] = total_added_rows; - } - prev_replicated_row = row_replicate_offset[i]; - } - }; - - while (left_row_iter < left_block_rows && !exceeded_max_block_rows) - { - auto left_start_row = left_row_iter; - collect_keys_matched_rows_refs(); - if (selected_rows.size() != current_added_rows || row_replicate_offset.size() != left_row_iter - left_start_row + 1) - { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Sizes are mismatched. selected_rows.size:{}, current_added_rows:{}, row_replicate_offset.size:{}, left_row_iter: {}, " - "left_start_row: {}", - selected_rows.size(), - current_added_rows, - row_replicate_offset.size(), - left_row_iter, - left_start_row); - } - auto filter_col = buildAdditionalFilter(left_start_row, selected_rows, row_replicate_offset, added_columns); - copy_final_matched_rows(left_start_row, filter_col); - - if constexpr (join_features.need_replication) - { - // Add a check for current_added_rows to avoid run the filter expression on too small size batch. - if (total_added_rows >= max_joined_block_rows || current_added_rows < 1024) - exceeded_max_block_rows = true; - } - } - - if constexpr (join_features.need_replication) - { - added_columns.offsets_to_replicate->resize_assume_reserved(left_row_iter); - added_columns.filter.resize_assume_reserved(left_row_iter); - } - added_columns.applyLazyDefaults(); - return left_row_iter; - } + bool flag_per_row [[maybe_unused]]); /// Cut first num_rows rows from block in place and returns block with remaining rows - static Block sliceBlock(Block & block, size_t num_rows) - { - size_t total_rows = block.rows(); - if (num_rows >= total_rows) - return {}; - size_t remaining_rows = total_rows - num_rows; - Block remaining_block = block.cloneEmpty(); - for (size_t i = 0; i < block.columns(); ++i) - { - auto & col = block.getByPosition(i); - remaining_block.getByPosition(i).column = col.column->cut(num_rows, remaining_rows); - col.column = col.column->cut(0, num_rows); - } - return remaining_block; - } + static Block sliceBlock(Block & block, size_t num_rows); /** Since we do not store right key columns, * this function is used to copy left key columns to right key columns. @@ -909,62 +158,11 @@ private: const DataTypePtr & right_key_type, const String & renamed_right_column, const ColumnWithTypeAndName & left_column, - const IColumn::Filter * null_map_filter = nullptr) - { - ColumnWithTypeAndName right_column = left_column; - right_column.name = renamed_right_column; + const IColumn::Filter * null_map_filter = nullptr); - if (null_map_filter) - right_column.column = JoinCommon::filterWithBlanks(right_column.column, *null_map_filter); + static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable); - bool should_be_nullable = isNullableOrLowCardinalityNullable(right_key_type); - if (null_map_filter) - correctNullabilityInplace(right_column, should_be_nullable, *null_map_filter); - else - correctNullabilityInplace(right_column, should_be_nullable); - - if (!right_column.type->equals(*right_key_type)) - { - right_column.column = castColumnAccurate(right_column, right_key_type); - right_column.type = right_key_type; - } - - right_column.column = right_column.column->convertToFullColumnIfConst(); - return right_column; - } - - static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable) - { - if (nullable) - { - JoinCommon::convertColumnToNullable(column); - } - else - { - /// We have to replace values masked by NULLs with defaults. - if (column.column) - if (const auto * nullable_column = checkAndGetColumn(&*column.column)) - column.column = JoinCommon::filterWithBlanks(column.column, nullable_column->getNullMapColumn().getData(), true); - - JoinCommon::removeColumnNullability(column); - } - } - - static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map) - { - if (nullable) - { - JoinCommon::convertColumnToNullable(column); - if (column.type->isNullable() && !negative_null_map.empty()) - { - MutableColumnPtr mutable_column = IColumn::mutate(std::move(column.column)); - assert_cast(*mutable_column).applyNegatedNullMap(negative_null_map); - column.column = std::move(mutable_column); - } - } - else - JoinCommon::removeColumnNullability(column); - } + static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map); }; /// Instantiate template class ahead in different .cpp files to avoid `too large translation unit`. diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h new file mode 100644 index 00000000000..2bf5f6aef4a --- /dev/null +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -0,0 +1,912 @@ +#pragma once +#include +namespace DB +{ +namespace ErrorCodes +{ + extern const int UNSUPPORTED_JOIN_KEYS; + extern const int LOGICAL_ERROR; +} +template +size_t HashJoinMethods::insertFromBlockImpl( + HashJoin & join, + HashJoin::Type type, + MapsTemplate & maps, + size_t rows, + const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, + Block * stored_block, + ConstNullMapPtr null_map, + UInt8ColumnDataPtr join_mask, + Arena & pool, + bool & is_inserted) +{ + switch (type) + { + case HashJoin::Type::EMPTY: + [[fallthrough]]; + case HashJoin::Type::CROSS: + /// Do nothing. We will only save block, and it is enough + is_inserted = true; + return 0; + +#define M(TYPE) \ + case HashJoin::Type::TYPE: \ + return insertFromBlockImplTypeCase< \ + typename KeyGetterForType>::Type>( \ + join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, join_mask, pool, is_inserted); \ + break; + + APPLY_FOR_JOIN_VARIANTS(M) +#undef M + } +} + +template +Block HashJoinMethods::joinBlockImpl( + const HashJoin & join, Block & block, const Block & block_with_columns_to_add, const MapsTemplateVector & maps_, bool is_join_get) +{ + constexpr JoinFeatures join_features; + + std::vector join_on_keys; + const auto & onexprs = join.table_join->getClauses(); + for (size_t i = 0; i < onexprs.size(); ++i) + { + const auto & key_names = !is_join_get ? onexprs[i].key_names_left : onexprs[i].key_names_right; + join_on_keys.emplace_back(block, key_names, onexprs[i].condColumnNames().first, join.key_sizes[i]); + } + size_t existing_columns = block.columns(); + + /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. + * Because if they are constants, then in the "not joined" rows, they may have different values + * - default values, which can differ from the values of these constants. + */ + if constexpr (join_features.right || join_features.full) + { + materializeBlockInplace(block); + } + + /** For LEFT/INNER JOIN, the saved blocks do not contain keys. + * For FULL/RIGHT JOIN, the saved blocks contain keys; + * but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped. + * For ASOF, the last column is used as the ASOF column + */ + AddedColumns added_columns( + block, + block_with_columns_to_add, + join.savedBlockSample(), + join, + std::move(join_on_keys), + join.table_join->getMixedJoinExpression(), + join_features.is_asof_join, + is_join_get); + + bool has_required_right_keys = (join.required_right_keys.columns() != 0); + added_columns.need_filter = join_features.need_filter || has_required_right_keys; + added_columns.max_joined_block_rows = join.max_joined_block_rows; + if (!added_columns.max_joined_block_rows) + added_columns.max_joined_block_rows = std::numeric_limits::max(); + else + added_columns.reserve(join_features.need_replication); + + size_t num_joined = switchJoinRightColumns(maps_, added_columns, join.data->type, *join.used_flags); + /// Do not hold memory for join_on_keys anymore + added_columns.join_on_keys.clear(); + Block remaining_block = sliceBlock(block, num_joined); + + added_columns.buildOutput(); + for (size_t i = 0; i < added_columns.size(); ++i) + block.insert(added_columns.moveColumn(i)); + + std::vector right_keys_to_replicate [[maybe_unused]]; + + if constexpr (join_features.need_filter) + { + /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. + for (size_t i = 0; i < existing_columns; ++i) + block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(added_columns.filter, -1); + + /// Add join key columns from right block if needed using value from left table because of equality + for (size_t i = 0; i < join.required_right_keys.columns(); ++i) + { + const auto & right_key = join.required_right_keys.getByPosition(i); + /// asof column is already in block. + if (join_features.is_asof_join && right_key.name == join.table_join->getOnlyClause().key_names_right.back()) + continue; + + const auto & left_column = block.getByName(join.required_right_keys_sources[i]); + const auto & right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); + auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column); + block.insert(std::move(right_col)); + } + } + else if (has_required_right_keys) + { + /// Add join key columns from right block if needed. + for (size_t i = 0; i < join.required_right_keys.columns(); ++i) + { + const auto & right_key = join.required_right_keys.getByPosition(i); + auto right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); + /// asof column is already in block. + if (join_features.is_asof_join && right_key.name == join.table_join->getOnlyClause().key_names_right.back()) + continue; + + const auto & left_column = block.getByName(join.required_right_keys_sources[i]); + auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column, &added_columns.filter); + block.insert(std::move(right_col)); + + if constexpr (join_features.need_replication) + right_keys_to_replicate.push_back(block.getPositionByName(right_col_name)); + } + } + + if constexpr (join_features.need_replication) + { + std::unique_ptr & offsets_to_replicate = added_columns.offsets_to_replicate; + + /// If ALL ... JOIN - we replicate all the columns except the new ones. + for (size_t i = 0; i < existing_columns; ++i) + { + block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->replicate(*offsets_to_replicate); + } + + /// Replicate additional right keys + for (size_t pos : right_keys_to_replicate) + { + block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); + } + } + + return remaining_block; +} + +template +template +KeyGetter HashJoinMethods::createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes) +{ + if constexpr (is_asof_join) + { + auto key_column_copy = key_columns; + auto key_size_copy = key_sizes; + key_column_copy.pop_back(); + key_size_copy.pop_back(); + return KeyGetter(key_column_copy, key_size_copy, nullptr); + } + else + return KeyGetter(key_columns, key_sizes, nullptr); +} + +template +template +size_t HashJoinMethods::insertFromBlockImplTypeCase( + HashJoin & join, + HashMap & map, + size_t rows, + const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, + Block * stored_block, + ConstNullMapPtr null_map, + UInt8ColumnDataPtr join_mask, + Arena & pool, + bool & is_inserted) +{ + [[maybe_unused]] constexpr bool mapped_one = std::is_same_v; + constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; + + const IColumn * asof_column [[maybe_unused]] = nullptr; + if constexpr (is_asof_join) + asof_column = key_columns.back(); + + auto key_getter = createKeyGetter(key_columns, key_sizes); + + /// For ALL and ASOF join always insert values + is_inserted = !mapped_one || is_asof_join; + + for (size_t i = 0; i < rows; ++i) + { + if (null_map && (*null_map)[i]) + { + /// nulls are not inserted into hash table, + /// keep them for RIGHT and FULL joins + is_inserted = true; + continue; + } + + /// Check condition for right table from ON section + if (join_mask && !(*join_mask)[i]) + continue; + + if constexpr (is_asof_join) + Inserter::insertAsof(join, map, key_getter, stored_block, i, pool, *asof_column); + else if constexpr (mapped_one) + is_inserted |= Inserter::insertOne(join, map, key_getter, stored_block, i, pool); + else + Inserter::insertAll(join, map, key_getter, stored_block, i, pool); + } + return map.getBufferSizeInCells(); +} + +template +template +size_t HashJoinMethods::switchJoinRightColumns( + const std::vector & mapv, + AddedColumns & added_columns, + HashJoin::Type type, + JoinStuff::JoinUsedFlags & used_flags) +{ + constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; + switch (type) + { + case HashJoin::Type::EMPTY: { + if constexpr (!is_asof_join) + { + using KeyGetter = KeyGetterEmpty; + std::vector key_getter_vector; + key_getter_vector.emplace_back(); + + using MapTypeVal = typename KeyGetter::MappedType; + std::vector a_map_type_vector; + a_map_type_vector.emplace_back(); + return joinRightColumnsSwitchNullability( + std::move(key_getter_vector), a_map_type_vector, added_columns, used_flags); + } + throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys. Type: {}", type); + } +#define M(TYPE) \ + case HashJoin::Type::TYPE: { \ + using MapTypeVal = const typename std::remove_reference_t::element_type; \ + using KeyGetter = typename KeyGetterForType::Type; \ + std::vector a_map_type_vector(mapv.size()); \ + std::vector key_getter_vector; \ + for (size_t d = 0; d < added_columns.join_on_keys.size(); ++d) \ + { \ + const auto & join_on_key = added_columns.join_on_keys[d]; \ + a_map_type_vector[d] = mapv[d]->TYPE.get(); \ + key_getter_vector.push_back( \ + std::move(createKeyGetter(join_on_key.key_columns, join_on_key.key_sizes))); \ + } \ + return joinRightColumnsSwitchNullability(std::move(key_getter_vector), a_map_type_vector, added_columns, used_flags); \ + } + APPLY_FOR_JOIN_VARIANTS(M) +#undef M + + default: + throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys (type: {})", type); + } +} + +template +template +size_t HashJoinMethods::joinRightColumnsSwitchNullability( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags) +{ + if (added_columns.need_filter) + { + return joinRightColumnsSwitchMultipleDisjuncts( + std::forward>(key_getter_vector), mapv, added_columns, used_flags); + } + else + { + return joinRightColumnsSwitchMultipleDisjuncts( + std::forward>(key_getter_vector), mapv, added_columns, used_flags); + } +} + +template +template +size_t HashJoinMethods::joinRightColumnsSwitchMultipleDisjuncts( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags) +{ + constexpr JoinFeatures join_features; + if constexpr (join_features.is_maps_all) + { + if (added_columns.additional_filter_expression) + { + bool mark_per_row_used = join_features.right || join_features.full || mapv.size() > 1; + return joinRightColumnsWithAddtitionalFilter( + std::forward>(key_getter_vector), mapv, added_columns, used_flags, need_filter, mark_per_row_used); + } + } + + if (added_columns.additional_filter_expression) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Additional filter expression is not supported for this JOIN"); + + return mapv.size() > 1 ? joinRightColumns( + std::forward>(key_getter_vector), mapv, added_columns, used_flags) + : joinRightColumns( + std::forward>(key_getter_vector), mapv, added_columns, used_flags); +} + + +/// Joins right table columns which indexes are present in right_indexes using specified map. +/// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). +template +template +size_t HashJoinMethods::joinRightColumns( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags) +{ + constexpr JoinFeatures join_features; + + size_t rows = added_columns.rows_to_add; + if constexpr (need_filter) + added_columns.filter = IColumn::Filter(rows, 0); + + Arena pool; + + if constexpr (join_features.need_replication) + added_columns.offsets_to_replicate = std::make_unique(rows); + + IColumn::Offset current_offset = 0; + size_t max_joined_block_rows = added_columns.max_joined_block_rows; + size_t i = 0; + for (; i < rows; ++i) + { + if constexpr (join_features.need_replication) + { + if (unlikely(current_offset >= max_joined_block_rows)) + { + added_columns.offsets_to_replicate->resize_assume_reserved(i); + added_columns.filter.resize_assume_reserved(i); + break; + } + } + + bool right_row_found = false; + + KnownRowsHolder known_rows; + for (size_t onexpr_idx = 0; onexpr_idx < added_columns.join_on_keys.size(); ++onexpr_idx) + { + const auto & join_keys = added_columns.join_on_keys[onexpr_idx]; + if (join_keys.null_map && (*join_keys.null_map)[i]) + continue; + + bool row_acceptable = !join_keys.isRowFiltered(i); + using FindResult = typename KeyGetter::FindResult; + auto find_result = row_acceptable ? key_getter_vector[onexpr_idx].findKey(*(mapv[onexpr_idx]), i, pool) : FindResult(); + + if (find_result.isFound()) + { + right_row_found = true; + auto & mapped = find_result.getMapped(); + if constexpr (join_features.is_asof_join) + { + const IColumn & left_asof_key = added_columns.leftAsofKey(); + + auto row_ref = mapped->findAsof(left_asof_key, i); + if (row_ref.block) + { + setUsed(added_columns.filter, i); + if constexpr (flag_per_row) + used_flags.template setUsed(row_ref.block, row_ref.row_num, 0); + else + used_flags.template setUsed(find_result); + + added_columns.appendFromBlock(*row_ref.block, row_ref.row_num, join_features.add_missing); + } + else + addNotFoundRow(added_columns, current_offset); + } + else if constexpr (join_features.is_all_join) + { + setUsed(added_columns.filter, i); + used_flags.template setUsed(find_result); + auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; + addFoundRowAll(mapped, added_columns, current_offset, known_rows, used_flags_opt); + } + else if constexpr ((join_features.is_any_join || join_features.is_semi_join) && join_features.right) + { + /// Use first appeared left key + it needs left columns replication + bool used_once = used_flags.template setUsedOnce(find_result); + if (used_once) + { + auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; + setUsed(added_columns.filter, i); + addFoundRowAll(mapped, added_columns, current_offset, known_rows, used_flags_opt); + } + } + else if constexpr (join_features.is_any_join && join_features.inner) + { + bool used_once = used_flags.template setUsedOnce(find_result); + + /// Use first appeared left key only + if (used_once) + { + setUsed(added_columns.filter, i); + added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); + } + + break; + } + else if constexpr (join_features.is_any_join && join_features.full) + { + /// TODO + } + else if constexpr (join_features.is_anti_join) + { + if constexpr (join_features.right && join_features.need_flags) + used_flags.template setUsed(find_result); + } + else /// ANY LEFT, SEMI LEFT, old ANY (RightAny) + { + setUsed(added_columns.filter, i); + used_flags.template setUsed(find_result); + added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); + + if (join_features.is_any_or_semi_join) + { + break; + } + } + } + } + + if (!right_row_found) + { + if constexpr (join_features.is_anti_join && join_features.left) + setUsed(added_columns.filter, i); + addNotFoundRow(added_columns, current_offset); + } + + if constexpr (join_features.need_replication) + { + (*added_columns.offsets_to_replicate)[i] = current_offset; + } + } + + added_columns.applyLazyDefaults(); + return i; +} + +template +template +void HashJoinMethods::setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]) +{ + if constexpr (need_filter) + filter[pos] = 1; +} + +template +template +ColumnPtr HashJoinMethods::buildAdditionalFilter( + size_t left_start_row, + const std::vector & selected_rows, + const std::vector & row_replicate_offset, + AddedColumns & added_columns) +{ + ColumnPtr result_column; + do + { + if (selected_rows.empty()) + { + result_column = ColumnUInt8::create(); + break; + } + const Block & sample_right_block = *selected_rows.begin()->block; + if (!sample_right_block || !added_columns.additional_filter_expression) + { + auto filter = ColumnUInt8::create(); + filter->insertMany(1, selected_rows.size()); + result_column = std::move(filter); + break; + } + + auto required_cols = added_columns.additional_filter_expression->getRequiredColumnsWithTypes(); + if (required_cols.empty()) + { + Block block; + added_columns.additional_filter_expression->execute(block); + result_column = block.getByPosition(0).column->cloneResized(selected_rows.size()); + break; + } + NameSet required_column_names; + for (auto & col : required_cols) + required_column_names.insert(col.name); + + Block executed_block; + size_t right_col_pos = 0; + for (const auto & col : sample_right_block.getColumnsWithTypeAndName()) + { + if (required_column_names.contains(col.name)) + { + auto new_col = col.column->cloneEmpty(); + for (const auto & selected_row : selected_rows) + { + const auto & src_col = selected_row.block->getByPosition(right_col_pos); + new_col->insertFrom(*src_col.column, selected_row.row_num); + } + executed_block.insert({std::move(new_col), col.type, col.name}); + } + right_col_pos += 1; + } + if (!executed_block) + { + result_column = ColumnUInt8::create(); + break; + } + + for (const auto & col_name : required_column_names) + { + const auto * src_col = added_columns.left_block.findByName(col_name); + if (!src_col) + continue; + auto new_col = src_col->column->cloneEmpty(); + size_t prev_left_offset = 0; + for (size_t i = 1; i < row_replicate_offset.size(); ++i) + { + const size_t & left_offset = row_replicate_offset[i]; + size_t rows = left_offset - prev_left_offset; + if (rows) + new_col->insertManyFrom(*src_col->column, left_start_row + i - 1, rows); + prev_left_offset = left_offset; + } + executed_block.insert({std::move(new_col), src_col->type, col_name}); + } + if (!executed_block) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "required columns: [{}], but not found any in left/right table. right table: {}, left table: {}", + required_cols.toString(), + sample_right_block.dumpNames(), + added_columns.left_block.dumpNames()); + } + + for (const auto & col : executed_block.getColumnsWithTypeAndName()) + if (!col.column || !col.type) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Illegal nullptr column in input block: {}", executed_block.dumpStructure()); + + added_columns.additional_filter_expression->execute(executed_block); + result_column = executed_block.getByPosition(0).column->convertToFullColumnIfConst(); + executed_block.clear(); + } while (false); + + result_column = result_column->convertToFullIfNeeded(); + if (result_column->isNullable()) + { + /// Convert Nullable(UInt8) to UInt8 ensuring that nulls are zeros + /// Trying to avoid copying data, since we are the only owner of the column. + ColumnPtr mask_column = assert_cast(*result_column).getNullMapColumnPtr(); + + MutableColumnPtr mutable_column; + { + ColumnPtr nested_column = assert_cast(*result_column).getNestedColumnPtr(); + result_column.reset(); + mutable_column = IColumn::mutate(std::move(nested_column)); + } + + auto & column_data = assert_cast(*mutable_column).getData(); + const auto & mask_column_data = assert_cast(*mask_column).getData(); + for (size_t i = 0; i < column_data.size(); ++i) + { + if (mask_column_data[i]) + column_data[i] = 0; + } + return mutable_column; + } + return result_column; +} + +template +template +size_t HashJoinMethods::joinRightColumnsWithAddtitionalFilter( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags [[maybe_unused]], + bool need_filter [[maybe_unused]], + bool flag_per_row [[maybe_unused]]) +{ + constexpr JoinFeatures join_features; + size_t left_block_rows = added_columns.rows_to_add; + if (need_filter) + added_columns.filter = IColumn::Filter(left_block_rows, 0); + + std::unique_ptr pool; + + if constexpr (join_features.need_replication) + added_columns.offsets_to_replicate = std::make_unique(left_block_rows); + + std::vector row_replicate_offset; + row_replicate_offset.reserve(left_block_rows); + + using FindResult = typename KeyGetter::FindResult; + size_t max_joined_block_rows = added_columns.max_joined_block_rows; + size_t left_row_iter = 0; + PreSelectedRows selected_rows; + selected_rows.reserve(left_block_rows); + std::vector find_results; + find_results.reserve(left_block_rows); + bool exceeded_max_block_rows = false; + IColumn::Offset total_added_rows = 0; + IColumn::Offset current_added_rows = 0; + + auto collect_keys_matched_rows_refs = [&]() + { + pool = std::make_unique(); + find_results.clear(); + row_replicate_offset.clear(); + row_replicate_offset.push_back(0); + current_added_rows = 0; + selected_rows.clear(); + for (; left_row_iter < left_block_rows; ++left_row_iter) + { + if constexpr (join_features.need_replication) + { + if (unlikely(total_added_rows + current_added_rows >= max_joined_block_rows)) + { + break; + } + } + KnownRowsHolder all_flag_known_rows; + KnownRowsHolder single_flag_know_rows; + for (size_t join_clause_idx = 0; join_clause_idx < added_columns.join_on_keys.size(); ++join_clause_idx) + { + const auto & join_keys = added_columns.join_on_keys[join_clause_idx]; + if (join_keys.null_map && (*join_keys.null_map)[left_row_iter]) + continue; + + bool row_acceptable = !join_keys.isRowFiltered(left_row_iter); + auto find_result = row_acceptable + ? key_getter_vector[join_clause_idx].findKey(*(mapv[join_clause_idx]), left_row_iter, *pool) + : FindResult(); + + if (find_result.isFound()) + { + auto & mapped = find_result.getMapped(); + find_results.push_back(find_result); + if (flag_per_row) + addFoundRowAll(mapped, selected_rows, current_added_rows, all_flag_known_rows, nullptr); + else + addFoundRowAll(mapped, selected_rows, current_added_rows, single_flag_know_rows, nullptr); + } + } + row_replicate_offset.push_back(current_added_rows); + } + }; + + auto copy_final_matched_rows = [&](size_t left_start_row, ColumnPtr filter_col) + { + const PaddedPODArray & filter_flags = assert_cast(*filter_col).getData(); + + size_t prev_replicated_row = 0; + auto selected_right_row_it = selected_rows.begin(); + size_t find_result_index = 0; + for (size_t i = 1, n = row_replicate_offset.size(); i < n; ++i) + { + bool any_matched = false; + /// For right join, flag_per_row is true, we need mark used flags for each row. + if (flag_per_row) + { + for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) + { + if (filter_flags[replicated_row]) + { + any_matched = true; + if constexpr (join_features.is_semi_join || join_features.is_any_join) + { + auto used_once = used_flags.template setUsedOnce( + selected_right_row_it->block, selected_right_row_it->row_num, 0); + if (used_once) + { + total_added_rows += 1; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + } + } + else if constexpr (join_features.is_anti_join) + { + if constexpr (join_features.right && join_features.need_flags) + used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); + } + else + { + total_added_rows += 1; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + used_flags.template setUsed( + selected_right_row_it->block, selected_right_row_it->row_num, 0); + } + } + ++selected_right_row_it; + } + } + else + { + for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) + { + if constexpr (join_features.is_anti_join) + { + any_matched |= filter_flags[replicated_row]; + } + else if constexpr (join_features.need_replication) + { + if (filter_flags[replicated_row]) + { + any_matched = true; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + total_added_rows += 1; + } + ++selected_right_row_it; + } + else + { + if (filter_flags[replicated_row]) + { + any_matched = true; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + total_added_rows += 1; + selected_right_row_it = selected_right_row_it + row_replicate_offset[i] - replicated_row; + break; + } + else + ++selected_right_row_it; + } + } + } + + + if constexpr (join_features.is_anti_join) + { + if (!any_matched) + { + if constexpr (join_features.left) + if (need_filter) + setUsed(added_columns.filter, left_start_row + i - 1); + addNotFoundRow(added_columns, total_added_rows); + } + } + else + { + if (!any_matched) + { + addNotFoundRow(added_columns, total_added_rows); + } + else + { + if (!flag_per_row) + used_flags.template setUsed(find_results[find_result_index]); + if (need_filter) + setUsed(added_columns.filter, left_start_row + i - 1); + if constexpr (join_features.add_missing) + added_columns.applyLazyDefaults(); + } + } + find_result_index += (prev_replicated_row != row_replicate_offset[i]); + + if constexpr (join_features.need_replication) + { + (*added_columns.offsets_to_replicate)[left_start_row + i - 1] = total_added_rows; + } + prev_replicated_row = row_replicate_offset[i]; + } + }; + + while (left_row_iter < left_block_rows && !exceeded_max_block_rows) + { + auto left_start_row = left_row_iter; + collect_keys_matched_rows_refs(); + if (selected_rows.size() != current_added_rows || row_replicate_offset.size() != left_row_iter - left_start_row + 1) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Sizes are mismatched. selected_rows.size:{}, current_added_rows:{}, row_replicate_offset.size:{}, left_row_iter: {}, " + "left_start_row: {}", + selected_rows.size(), + current_added_rows, + row_replicate_offset.size(), + left_row_iter, + left_start_row); + } + auto filter_col = buildAdditionalFilter(left_start_row, selected_rows, row_replicate_offset, added_columns); + copy_final_matched_rows(left_start_row, filter_col); + + if constexpr (join_features.need_replication) + { + // Add a check for current_added_rows to avoid run the filter expression on too small size batch. + if (total_added_rows >= max_joined_block_rows || current_added_rows < 1024) + exceeded_max_block_rows = true; + } + } + + if constexpr (join_features.need_replication) + { + added_columns.offsets_to_replicate->resize_assume_reserved(left_row_iter); + added_columns.filter.resize_assume_reserved(left_row_iter); + } + added_columns.applyLazyDefaults(); + return left_row_iter; +} + +template +Block HashJoinMethods::sliceBlock(Block & block, size_t num_rows) +{ + size_t total_rows = block.rows(); + if (num_rows >= total_rows) + return {}; + size_t remaining_rows = total_rows - num_rows; + Block remaining_block = block.cloneEmpty(); + for (size_t i = 0; i < block.columns(); ++i) + { + auto & col = block.getByPosition(i); + remaining_block.getByPosition(i).column = col.column->cut(num_rows, remaining_rows); + col.column = col.column->cut(0, num_rows); + } + return remaining_block; +} + +template +ColumnWithTypeAndName HashJoinMethods::copyLeftKeyColumnToRight( + const DataTypePtr & right_key_type, + const String & renamed_right_column, + const ColumnWithTypeAndName & left_column, + const IColumn::Filter * null_map_filter) +{ + ColumnWithTypeAndName right_column = left_column; + right_column.name = renamed_right_column; + + if (null_map_filter) + right_column.column = JoinCommon::filterWithBlanks(right_column.column, *null_map_filter); + + bool should_be_nullable = isNullableOrLowCardinalityNullable(right_key_type); + if (null_map_filter) + correctNullabilityInplace(right_column, should_be_nullable, *null_map_filter); + else + correctNullabilityInplace(right_column, should_be_nullable); + + if (!right_column.type->equals(*right_key_type)) + { + right_column.column = castColumnAccurate(right_column, right_key_type); + right_column.type = right_key_type; + } + + right_column.column = right_column.column->convertToFullColumnIfConst(); + return right_column; +} + +template +void HashJoinMethods::correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable) +{ + if (nullable) + { + JoinCommon::convertColumnToNullable(column); + } + else + { + /// We have to replace values masked by NULLs with defaults. + if (column.column) + if (const auto * nullable_column = checkAndGetColumn(&*column.column)) + column.column = JoinCommon::filterWithBlanks(column.column, nullable_column->getNullMapColumn().getData(), true); + + JoinCommon::removeColumnNullability(column); + } +} + +template +void HashJoinMethods::correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map) +{ + if (nullable) + { + JoinCommon::convertColumnToNullable(column); + if (column.type->isNullable() && !negative_null_map.empty()) + { + MutableColumnPtr mutable_column = IColumn::mutate(std::move(column.column)); + assert_cast(*mutable_column).applyNegatedNullMap(negative_null_map); + column.column = std::move(mutable_column); + } + } + else + JoinCommon::removeColumnNullability(column); +} +} + diff --git a/src/Interpreters/HashJoin/InnerHashJoin.cpp b/src/Interpreters/HashJoin/InnerHashJoin.cpp index 85aedf3a8e5..258e3143996 100644 --- a/src/Interpreters/HashJoin/InnerHashJoin.cpp +++ b/src/Interpreters/HashJoin/InnerHashJoin.cpp @@ -1,5 +1,5 @@ -#include +#include namespace DB { diff --git a/src/Interpreters/HashJoin/LeftHashJoin.cpp b/src/Interpreters/HashJoin/LeftHashJoin.cpp index a53ffaac0b5..4e06789570e 100644 --- a/src/Interpreters/HashJoin/LeftHashJoin.cpp +++ b/src/Interpreters/HashJoin/LeftHashJoin.cpp @@ -1,4 +1,4 @@ -#include +#include namespace DB { diff --git a/src/Interpreters/HashJoin/RightHashJoin.cpp b/src/Interpreters/HashJoin/RightHashJoin.cpp index 8e304754f5c..d9d41d7d63c 100644 --- a/src/Interpreters/HashJoin/RightHashJoin.cpp +++ b/src/Interpreters/HashJoin/RightHashJoin.cpp @@ -1,4 +1,4 @@ -#include +#include namespace DB { From bd4d648f63d56d11c729f719f864a8b9985c43c3 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Tue, 23 Jul 2024 10:41:57 +0800 Subject: [PATCH 1811/2361] update doc --- docs/en/sql-reference/statements/select/join.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 96d9d26977d..b228f7025c4 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -186,7 +186,7 @@ Otherwise, you'll get `INVALID_JOIN_ON_EXPRESSION`. ::: -Clickhouse currently supports `ALL INNER/LEFT/RIGHT/FULL JOIN` with inequality conditions in addition to equality conditions. The inequality conditions are supported only for `hash` and `grace_hash` join algorithms. The inequality conditions are not supported with `join_use_nulls`. +Clickhouse currently supports `ALL/ANY/SEMI/ANTI INNER/LEFT/RIGHT/FULL JOIN` with inequality conditions in addition to equality conditions. The inequality conditions are supported only for `hash` and `grace_hash` join algorithms. The inequality conditions are not supported with `join_use_nulls`. **Example** From 58b7ac2264eb6d2ba83d634e1d52b874ca54d9c7 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Wed, 7 Aug 2024 11:36:01 +0800 Subject: [PATCH 1812/2361] update --- src/Interpreters/HashJoin/HashJoin.cpp | 2 +- src/Interpreters/HashJoin/HashJoinMethods.h | 1 + src/Interpreters/HashJoin/InnerHashJoin.cpp | 1 + src/Interpreters/joinDispatch.h | 20 +++-- ..._join_on_inequal_expression_fast.reference | 78 +++++++++++++++++++ ...006_join_on_inequal_expression_fast.sql.j2 | 31 ++++++++ 6 files changed, 127 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 769cb574ed7..4033d1e3035 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -1327,7 +1327,7 @@ void HashJoin::validateAdditionalFilterExpression(ExpressionActionsPtr additiona bool is_supported = ((strictness == JoinStrictness::All) && (isInnerOrLeft(kind) || isRightOrFull(kind))) || ((strictness == JoinStrictness::Semi || strictness == JoinStrictness::Any || strictness == JoinStrictness::Anti) - && (isLeft(kind) || isRight(kind))); + && (isLeft(kind) || isRight(kind))) || (strictness == JoinStrictness::Any && (isInner(kind))); if (!is_supported) { throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index e3b8fbc1737..3b7a67467e3 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -185,6 +185,7 @@ extern template class HashJoinMethods; extern template class HashJoinMethods; +extern template class HashJoinMethods; extern template class HashJoinMethods; extern template class HashJoinMethods; extern template class HashJoinMethods; diff --git a/src/Interpreters/HashJoin/InnerHashJoin.cpp b/src/Interpreters/HashJoin/InnerHashJoin.cpp index 258e3143996..69f4c620cb8 100644 --- a/src/Interpreters/HashJoin/InnerHashJoin.cpp +++ b/src/Interpreters/HashJoin/InnerHashJoin.cpp @@ -5,6 +5,7 @@ namespace DB { template class HashJoinMethods; template class HashJoinMethods; +template class HashJoinMethods; template class HashJoinMethods; template class HashJoinMethods; template class HashJoinMethods; diff --git a/src/Interpreters/joinDispatch.h b/src/Interpreters/joinDispatch.h index 982c56e8210..5d4bd8f92e5 100644 --- a/src/Interpreters/joinDispatch.h +++ b/src/Interpreters/joinDispatch.h @@ -12,6 +12,15 @@ namespace DB { +/// HashJoin::MapsOne is more efficient, it only store one row for each key in the map. It is recommended to use it whenever possible. +/// When only need to match only one row from right table, use HashJoin::MapsOne. For example, LEFT ANY/SEMI/ANTI. +/// +/// HashJoin::MapsAll will store all rows for each key in the map. It is used when need to match multiple rows from right table. +/// For example, RIGHT ANY/ALL, FULL JOIN, INNER JOIN. +/// +/// prefer_use_maps_all is true when there is mixed inequal condition in the join condition. For example, `t1.a = t2.a AND t1.b > t2.b`. +/// In this case, we need to use HashJoin::MapsAll to store all rows for each key in the map. We will select all matched rows from the map +/// and filter them by `t1.b > t2.b`. template struct MapGetter; @@ -21,8 +30,9 @@ template struct MapGetter struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; -template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; @@ -33,14 +43,14 @@ template struct MapGetter struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -/// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. +/// Only ANTI LEFT and ANTI RIGHT are valid. INNER and FULL are here for templates instantiation. template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference index 3e413afd98e..11ac01d24d5 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference @@ -1,3 +1,4 @@ +03006_join_on_inequal_expression_fast.sql -- { echoOn } SET join_algorithm='hash'; SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); @@ -596,6 +597,39 @@ SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2. 0 0 \N key4 F 1 1 1 SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); SET join_algorithm='hash'; +SELECT t1.* FROM t1 LEFT ANY JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 b 2 3 2 +key1 c 3 2 1 +key1 d 4 7 2 +key1 e 5 5 5 +key2 a2 1 1 1 +key4 f 2 3 4 +SELECT t1.* FROM t1 LEFT SEMI JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 b 2 3 2 +key1 c 3 2 1 +key1 d 4 7 2 +key2 a2 1 1 1 +key4 f 2 3 4 +SELECT t1.* FROM t1 LEFT ANTI JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 e 5 5 5 +SELECT t1.* FROM t1 RIGHT ANY JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +SELECT t1.* FROM t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +SELECT t1.* FROM t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +SET join_algorithm='hash'; SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY (t1.key, t1.attr, t2.key, t2.attr); key1 a 1 1 2 key1 A 1 2 1 key1 a 1 1 2 key1 B 2 1 2 @@ -666,3 +700,47 @@ key2 a2 1 1 1 key1 A 1 2 1 key2 a2 1 1 1 key3 a3 1 1 1 key2 a2 1 1 1 key4 F 1 1 1 key4 f 2 3 4 key1 B 2 1 2 +SET join_algorithm='hash'; +SELECT t1.*, t2.* FROM t1 INNER ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 INNER ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 INNER ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 INNER ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SET join_algorithm='grace_hash'; +SELECT t1.*, t2.* FROM t1 INNER ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 INNER ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 INNER ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 INNER ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SET join_algorithm='hash'; +SELECT t1.* FROM t1 INNER ANY JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 b 2 3 2 +key1 c 3 2 1 +key1 d 4 7 2 +key2 a2 1 1 1 +key4 f 2 3 4 diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 index 1bf5a7870e7..b300881c562 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 @@ -34,12 +34,43 @@ SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 {{ join_type }} {{ join_stri {% endfor -%} {% endfor -%} +{% for algorithm in ['hash'] -%} +SET join_algorithm='{{ algorithm }}'; +{% for join_type in ['LEFT', 'RIGHT'] -%} +{% for join_strictness in ['ANY', 'SEMI', 'ANTI'] -%} +SELECT t1.* FROM t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +{% endfor -%} +{% endfor -%} +{% endfor -%} + {% for algorithm in ['hash'] -%} SET join_algorithm='{{ algorithm }}'; {% for join_type in ['LEFT', 'INNER', 'RIGHT', 'FULL'] -%} SELECT t1.*, t2.* FROM t1 {{ join_type }} JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY (t1.key, t1.attr, t2.key, t2.attr); {% endfor -%} {% endfor -%} + +{% for algorithm in ['hash', 'grace_hash'] -%} +SET join_algorithm='{{ algorithm }}'; +{% for join_type in ['INNER'] -%} +{% for join_strictness in ['ANY'] -%} +SELECT t1.*, t2.* FROM t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT t1.*, t2.* from t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT t1.*, t2.* from t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 {{ join_type }} {{ join_strictness }} JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +{% endfor -%} +{% endfor -%} +{% endfor -%} + +{% for algorithm in ['hash'] -%} +SET join_algorithm='{{ algorithm }}'; +{% for join_type in ['INNER'] -%} +{% for join_strictness in ['ANY'] -%} +SELECT t1.* FROM t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +{% endfor -%} +{% endfor -%} +{% endfor -%} + -- { echoOff } -- test error messages From ead9dc42d8a75ac99a0e4f538764cba206ad59a7 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 7 Aug 2024 08:31:49 +0000 Subject: [PATCH 1813/2361] Make it worse --- ...02310_clickhouse_local_INSERT_progress_profile_events.expect | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02310_clickhouse_local_INSERT_progress_profile_events.expect b/tests/queries/0_stateless/02310_clickhouse_local_INSERT_progress_profile_events.expect index d5b2a278180..5c731ea6d89 100755 --- a/tests/queries/0_stateless/02310_clickhouse_local_INSERT_progress_profile_events.expect +++ b/tests/queries/0_stateless/02310_clickhouse_local_INSERT_progress_profile_events.expect @@ -1,5 +1,5 @@ #!/usr/bin/expect -f -# Tags: no-debug, no-tsan, no-msan, no-asan, no-ubsan, no-s3-storage +# Tags: no-debug, no-tsan, no-msan, no-asan, no-ubsan, no-s3-storage, no-cpu-aarch64 # ^ it can be slower than 60 seconds # This is the regression for the concurrent access in ProgressIndication, From 5eb896b9f1976feaa423071919e65d22e09da4ea Mon Sep 17 00:00:00 2001 From: Blargian Date: Wed, 7 Aug 2024 10:43:41 +0200 Subject: [PATCH 1814/2361] Add documentation for toDecimal32 and variants --- .../functions/type-conversion-functions.md | 471 ++++++++++++------ 1 file changed, 305 insertions(+), 166 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 1e618b8cdab..24055bb99b7 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -95,7 +95,7 @@ SELECT toInt8(-8), toInt8(-8.8), toInt8('-8') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -156,7 +156,7 @@ Query: SELECT toInt8OrZero('-8'), toInt8OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -216,7 +216,7 @@ Query: SELECT toInt8OrNull('-8'), toInt8OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -281,7 +281,7 @@ Query: SELECT toInt8OrDefault('-8', CAST('-1', 'Int8')), toInt8OrDefault('abc', CAST('-1', 'Int8')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -345,7 +345,7 @@ SELECT toInt16(-16), toInt16(-16.16), toInt16('-16') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -406,7 +406,7 @@ Query: SELECT toInt16OrZero('-16'), toInt16OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -466,7 +466,7 @@ Query: SELECT toInt16OrNull('-16'), toInt16OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -531,7 +531,7 @@ Query: SELECT toInt16OrDefault('-16', CAST('-1', 'Int16')), toInt16OrDefault('abc', CAST('-1', 'Int16')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -595,7 +595,7 @@ SELECT toInt32(-32), toInt32(-32.32), toInt32('-32') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -656,7 +656,7 @@ Query: SELECT toInt32OrZero('-32'), toInt32OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -715,7 +715,7 @@ Query: SELECT toInt32OrNull('-32'), toInt32OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -780,7 +780,7 @@ Query: SELECT toInt32OrDefault('-32', CAST('-1', 'Int32')), toInt32OrDefault('abc', CAST('-1', 'Int32')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -844,7 +844,7 @@ SELECT toInt64(-64), toInt64(-64.64), toInt64('-64') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -905,7 +905,7 @@ Query: SELECT toInt64OrZero('-64'), toInt64OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -965,7 +965,7 @@ Query: SELECT toInt64OrNull('-64'), toInt64OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1030,7 +1030,7 @@ Query: SELECT toInt64OrDefault('-64', CAST('-1', 'Int64')), toInt64OrDefault('abc', CAST('-1', 'Int64')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1093,7 +1093,7 @@ SELECT toInt128(-128), toInt128(-128.8), toInt128('-128') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1154,7 +1154,7 @@ Query: SELECT toInt128OrZero('-128'), toInt128OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1214,7 +1214,7 @@ Query: SELECT toInt128OrNull('-128'), toInt128OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1279,7 +1279,7 @@ Query: SELECT toInt128OrDefault('-128', CAST('-1', 'Int128')), toInt128OrDefault('abc', CAST('-1', 'Int128')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1342,7 +1342,7 @@ SELECT toInt256(-256), toInt256(-256.256), toInt256('-256') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1403,7 +1403,7 @@ Query: SELECT toInt256OrZero('-256'), toInt256OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1463,7 +1463,7 @@ Query: SELECT toInt256OrNull('-256'), toInt256OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1528,7 +1528,7 @@ Query: SELECT toInt256OrDefault('-256', CAST('-1', 'Int256')), toInt256OrDefault('abc', CAST('-1', 'Int256')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1592,7 +1592,7 @@ SELECT toUInt8(8), toUInt8(8.8), toUInt8('8') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1653,7 +1653,7 @@ Query: SELECT toUInt8OrZero('-8'), toUInt8OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1713,7 +1713,7 @@ Query: SELECT toUInt8OrNull('8'), toUInt8OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1778,7 +1778,7 @@ Query: SELECT toUInt8OrDefault('8', CAST('0', 'UInt8')), toUInt8OrDefault('abc', CAST('0', 'UInt8')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1842,7 +1842,7 @@ SELECT toUInt16(16), toUInt16(16.16), toUInt16('16') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1903,7 +1903,7 @@ Query: SELECT toUInt16OrZero('16'), toUInt16OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1963,7 +1963,7 @@ Query: SELECT toUInt16OrNull('16'), toUInt16OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2028,7 +2028,7 @@ Query: SELECT toUInt16OrDefault('16', CAST('0', 'UInt16')), toUInt16OrDefault('abc', CAST('0', 'UInt16')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2092,7 +2092,7 @@ SELECT toUInt32(32), toUInt32(32.32), toUInt32('32') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2154,7 +2154,7 @@ Query: SELECT toUInt32OrZero('32'), toUInt32OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2214,7 +2214,7 @@ Query: SELECT toUInt32OrNull('32'), toUInt32OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2279,7 +2279,7 @@ Query: SELECT toUInt32OrDefault('32', CAST('0', 'UInt32')), toUInt32OrDefault('abc', CAST('0', 'UInt32')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2343,7 +2343,7 @@ SELECT toUInt64(64), toUInt64(64.64), toUInt64('64') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2404,7 +2404,7 @@ Query: SELECT toUInt64OrZero('64'), toUInt64OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2464,7 +2464,7 @@ Query: SELECT toUInt64OrNull('64'), toUInt64OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2529,7 +2529,7 @@ Query: SELECT toUInt64OrDefault('64', CAST('0', 'UInt64')), toUInt64OrDefault('abc', CAST('0', 'UInt64')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2592,7 +2592,7 @@ SELECT toUInt128(128), toUInt128(128.8), toUInt128('128') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2653,7 +2653,7 @@ Query: SELECT toUInt128OrZero('128'), toUInt128OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2713,7 +2713,7 @@ Query: SELECT toUInt128OrNull('128'), toUInt128OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2778,7 +2778,7 @@ Query: SELECT toUInt128OrDefault('128', CAST('0', 'UInt128')), toUInt128OrDefault('abc', CAST('0', 'UInt128')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2841,7 +2841,7 @@ SELECT toUInt256(256), toUInt256(256.256), toUInt256('256') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2902,7 +2902,7 @@ Query: SELECT toUInt256OrZero('256'), toUInt256OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2962,7 +2962,7 @@ Query: SELECT toUInt256OrNull('256'), toUInt256OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3027,7 +3027,7 @@ Query: SELECT toUInt256OrDefault('-256', CAST('0', 'UInt256')), toUInt256OrDefault('abc', CAST('0', 'UInt256')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3542,173 +3542,312 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN ## toDateTime64OrDefault -## toDecimal(32\|64\|128\|256) +## toDecimal32 -Converts `value` to the [Decimal](../data-types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places. +Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. -- `toDecimal32(value, S)` -- `toDecimal64(value, S)` -- `toDecimal128(value, S)` -- `toDecimal256(value, S)` +**Syntax** -## toDecimal(32\|64\|128\|256)OrNull - -Converts an input string to a [Nullable(Decimal(P,S))](../data-types/decimal.md) data type value. This family of functions includes: - -- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` data type. -- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` data type. -- `toDecimal128OrNull(expr, S)` — Results in `Nullable(Decimal128(S))` data type. -- `toDecimal256OrNull(expr, S)` — Results in `Nullable(Decimal256(S))` data type. - -These functions should be used instead of `toDecimal*()` functions, if you prefer to get a `NULL` value instead of an exception in the event of an input value parsing error. +```sql +toDecimal32(expr, S) +``` **Arguments** -- `expr` — [Expression](../syntax.md/#syntax-expressions), returns a value in the [String](../data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `S` — Scale parameter from [ 1 : 9 ] specifying how many decimal digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values or string representations of type (U)Int8/16/32/64/128/256. +- Values or string representations of type Float32/64. + +Unsupported arguments: +- Values or string representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32('0xc0fe', 1);`. + +:::note +Integer overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: **Returned value** -A value in the `Nullable(Decimal(P,S))` data type. The value contains: +- Value of type `Decimal(9, S)`. [Decimal32(S)](../data-types/int-uint.md). -- Number with `S` decimal places, if ClickHouse interprets the input string as a number. -- `NULL`, if ClickHouse can’t interpret the input string as a number or if the input number contains more than `S` decimal places. - -**Examples** +**Example** Query: -``` sql -SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val); +```sql +SELECT + toDecimal32(2, 1) AS a, toTypeName(a) AS type_a, + toDecimal32(4.2, 2) AS b, toTypeName(b) AS type_b, + toDecimal32('4.2', 3) AS c, toTypeName(c) AS type_c +FORMAT Vertical; ``` Result: ```response -┌────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┠-│ -1.111 │ Nullable(Decimal(9, 5)) │ -└────────┴────────────────────────────────────────────────────┘ +Row 1: +────── +a: 2 +type_a: Decimal(9, 1) +b: 4.2 +type_b: Decimal(9, 2) +c: 4.2 +type_c: Decimal(9, 3) ``` -Query: +**See also** -``` sql -SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val); +- [`toDecimal32OrZero`](#todecimal32orzero). +- [`toDecimal32OrNull`](#todecimal32ornull). +- [`toDecimal32OrDefault`](#todecimal32ordefault). + +## toDecimal32OrZero + +Like [`toDecimal32`](#todecimal32), this function converts an input value to a value of type [Decimal(9, S)](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal32OrZero(expr, S) ``` -Result: - -```response -┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┠-│ á´ºáµá´¸á´¸ │ Nullable(Decimal(9, 2)) │ -└──────┴────────────────────────────────────────────────────┘ -``` - - -## toDecimal(32\|64\|128\|256)OrDefault - -Converts an input string to a [Decimal(P,S)](../data-types/decimal.md) data type value. This family of functions includes: - -- `toDecimal32OrDefault(expr, S)` — Results in `Decimal32(S)` data type. -- `toDecimal64OrDefault(expr, S)` — Results in `Decimal64(S)` data type. -- `toDecimal128OrDefault(expr, S)` — Results in `Decimal128(S)` data type. -- `toDecimal256OrDefault(expr, S)` — Results in `Decimal256(S)` data type. - -These functions should be used instead of `toDecimal*()` functions, if you prefer to get a default value instead of an exception in the event of an input value parsing error. - **Arguments** -- `expr` — [Expression](../syntax.md/#syntax-expressions), returns a value in the [String](../data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter from [ 1 : 9 ] specifying how many decimal digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrZero('0xc0fe', 1);`. + +:::note +Integer overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: **Returned value** -A value in the `Decimal(P,S)` data type. The value contains: - -- Number with `S` decimal places, if ClickHouse interprets the input string as a number. -- Default `Decimal(P,S)` data type value, if ClickHouse can’t interpret the input string as a number or if the input number contains more than `S` decimal places. - -**Examples** - -Query: - -``` sql -SELECT toDecimal32OrDefault(toString(-1.111), 5) AS val, toTypeName(val); -``` - -Result: - -```response -┌────val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 5))─┠-│ -1.111 │ Decimal(9, 5) │ -└────────┴───────────────────────────────────────────────────────┘ -``` - -Query: - -``` sql -SELECT toDecimal32OrDefault(toString(-1.111), 2) AS val, toTypeName(val); -``` - -Result: - -```response -┌─val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 2))─┠-│ 0 │ Decimal(9, 2) │ -└─────┴───────────────────────────────────────────────────────┘ -``` - -## toDecimal(32\|64\|128\|256)OrZero - -Converts an input value to the [Decimal(P,S)](../data-types/decimal.md) data type. This family of functions includes: - -- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` data type. -- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` data type. -- `toDecimal128OrZero( expr, S)` — Results in `Decimal128(S)` data type. -- `toDecimal256OrZero( expr, S)` — Results in `Decimal256(S)` data type. - -These functions should be used instead of `toDecimal*()` functions, if you prefer to get a `0` value instead of an exception in the event of an input value parsing error. - -**Arguments** - -- `expr` — [Expression](../syntax.md/#syntax-expressions), returns a value in the [String](../data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. - -**Returned value** - -A value in the `Nullable(Decimal(P,S))` data type. The value contains: - -- Number with `S` decimal places, if ClickHouse interprets the input string as a number. -- 0 with `S` decimal places, if ClickHouse can’t interpret the input string as a number or if the input number contains more than `S` decimal places. +- Value of type `Decimal(9, S)` if successful, otherwise `0` with `S` decimal places. [Decimal32(S)](../data-types/decimal.md). **Example** Query: ``` sql -SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val); +SELECT + toDecimal32OrZero(toString(-1.111), 5) AS val, + toTypeName(val) +FORMAT Vertical; ``` Result: ```response -┌────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┠-│ -1.111 │ Decimal(9, 5) │ -└────────┴────────────────────────────────────────────────────┘ +Row 1: +────── +val: -1.111 +toTypeName(val): Decimal(9, 5) ``` Query: ``` sql -SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val); +SELECT + toDecimal32OrZero(toString(-1.111), 2) AS val, + toTypeName(val) +FORMAT Vertical; ``` Result: ```response -┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┠-│ 0.00 │ Decimal(9, 2) │ -└──────┴────────────────────────────────────────────────────┘ +Row 1: +────── +val: -1.11 +toTypeName(val): Decimal(9, 2) +``` + +**See also** + +- [`toDecimal32`](#todecimal32). +- [`toDecimal32OrNull`](#todecimal32ornull). +- [`toDecimal32OrDefault`](#todecimal32ordefault). + +## toDecimal32OrNull + +Like [`toDecimal32`](#todecimal32), this function converts an input value to a value of type [Nullable(Decimal(9, S))](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal32OrNull(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter from [ 1 : 9 ] specifying how many decimal digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrNull('0xc0fe', 1);`. + +:::note +Integer overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: + +**Returned value** + +- Value of type `Nullable(Decimal(9, S))` if successful, otherwise value `NULL` of the same type. [Decimal32(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal32OrNull(toString(-1.111), 5) AS val, + toTypeName(val) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +val: -1.111 +toTypeName(val): Nullable(Decimal(9, 5)) +``` + +Query: + +``` sql +SELECT + toDecimal32OrNull(toString(-1.111), 2) AS val, + toTypeName(val) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +val: -1.11 +toTypeName(val): Nullable(Decimal(9, 2)) +``` + +**See also** + +- [`toDecimal32`](#todecimal32). +- [`toDecimal32OrZero`](#todecimal32orzero). +- [`toDecimal32OrDefault`](#todecimal32ordefault). + +## toDecimal32OrDefault + +Like [`toDecimal32`](#todecimal32), this function converts an input value to a value of type [Decimal(9, S)](../data-types/decimal.md) but returns the default value in case of an error. + +**Syntax** + +```sql +toDecimal32OrDefault(expr, S[, default]) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter from [ 1 : 9 ] specifying how many decimal digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal32(S)` is unsuccessful. [Decimal32(S](../data-types/decimal.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrDefault('0xc0fe', 1);`. + +:::note +Integer overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: + +**Returned value** + +- Value of type `Decimal(9, S)` if successful, otherwise returns the default value if passed or `0` if not. [Decimal32(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal32OrDefault(toString(-1.111), 5) AS val, + toTypeName(val) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +val: -1.111 +toTypeName(val): Decimal(9, 5) +``` + +Query: + +``` sql +SELECT + toDecimal32OrDefault(toString(-1.111), 2) AS val, + toTypeName(val) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +val: -1.11 +toTypeName(val): Decimal(9, 2) +``` + +Query: + +``` sql +SELECT + toDecimal32OrDefault('Inf', 2, CAST('0', 'Decimal32(2)')) AS val, + toTypeName(val) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +val: 0 +toTypeName(val): Decimal(9, 2) ``` ## toString From c933a38955e288afbef5c246fed9640878b0a68f Mon Sep 17 00:00:00 2001 From: khodyrevyurii Date: Tue, 6 Aug 2024 22:53:24 +0500 Subject: [PATCH 1815/2361] change std::thread::hardware_concurrency on container friendly method getNumberOfPhysicalCPUCores --- programs/server/Server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 7800ee9ff00..46bbc235fee 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1623,7 +1623,7 @@ try concurrent_threads_soft_limit = new_server_settings.concurrent_threads_soft_limit_num; if (new_server_settings.concurrent_threads_soft_limit_ratio_to_cores > 0) { - auto value = new_server_settings.concurrent_threads_soft_limit_ratio_to_cores * std::thread::hardware_concurrency(); + auto value = new_server_settings.concurrent_threads_soft_limit_ratio_to_cores * getNumberOfPhysicalCPUCores(); if (value > 0 && value < concurrent_threads_soft_limit) concurrent_threads_soft_limit = value; } From 00b62b1c0dccd16e45cef445cc8bf717b2da6486 Mon Sep 17 00:00:00 2001 From: khodyrevyurii Date: Wed, 7 Aug 2024 01:26:25 +0500 Subject: [PATCH 1816/2361] Minor clarifycation for method getNumberOfPhysicalCPUCores --- programs/server/Server.cpp | 7 ++++--- src/Common/getNumberOfPhysicalCPUCores.h | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 46bbc235fee..618bd2b011c 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -814,10 +814,11 @@ try const size_t physical_server_memory = getMemoryAmount(); - LOG_INFO(log, "Available RAM: {}; physical cores: {}; logical cores: {}.", + LOG_INFO(log, "Available RAM: {}; logical cores: {}; used cores: {}.", formatReadableSizeWithBinarySuffix(physical_server_memory), - getNumberOfPhysicalCPUCores(), // on ARM processors it can show only enabled at current moment cores - std::thread::hardware_concurrency()); + std::thread::hardware_concurrency(), + getNumberOfPhysicalCPUCores() // on ARM processors it can show only enabled at current moment cores + ); #if defined(__x86_64__) String cpu_info; diff --git a/src/Common/getNumberOfPhysicalCPUCores.h b/src/Common/getNumberOfPhysicalCPUCores.h index 827e95e1bea..9e3412fdcba 100644 --- a/src/Common/getNumberOfPhysicalCPUCores.h +++ b/src/Common/getNumberOfPhysicalCPUCores.h @@ -1,4 +1,5 @@ #pragma once /// Get number of CPU cores without hyper-threading. +/// The calculation respects possible cgroups limits. unsigned getNumberOfPhysicalCPUCores(); From 4f2b1c36b7115143a23462282dc5474ed5b90afd Mon Sep 17 00:00:00 2001 From: Blargian Date: Wed, 7 Aug 2024 11:12:09 +0200 Subject: [PATCH 1817/2361] Fix typo from previous PR --- docs/en/sql-reference/functions/type-conversion-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 24055bb99b7..5db44da3e2d 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -2362,7 +2362,7 @@ toUInt64('64'): 64 - [`toUInt64OrNull`](#touint64ornull). - [`toUInt64OrDefault`](#touint64ordefault). -## toInt64OrZero +## toUInt64OrZero Like [`toUInt64`](#touint64), this function converts an input value to a value of type [UInt64](../data-types/int-uint.md) but returns `0` in case of an error. From b76e4acbc0a260f5222249a250066c77d2fcaff8 Mon Sep 17 00:00:00 2001 From: Blargian Date: Wed, 7 Aug 2024 11:13:56 +0200 Subject: [PATCH 1818/2361] fix another typo --- docs/en/sql-reference/functions/type-conversion-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 5db44da3e2d..8e72fea7fdb 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -1546,7 +1546,7 @@ toInt256OrDefault('abc', CAST('-1', 'Int256')): -1 - [`toInt256OrZero`](#toint256orzero). - [`toInt256OrNull`](#toint256ornull). -# toUInt8 +## toUInt8 Converts an input value to a value of type [`UInt8`](../data-types/int-uint.md). Throws an exception in case of an error. From e4134f5a51a1ad6d46c60337b9a3b5f8695d8020 Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Wed, 7 Aug 2024 09:16:19 +0000 Subject: [PATCH 1819/2361] catch exception in destructor of `LocalFileHolder` Signed-off-by: Duc Canh Le --- src/Storages/Cache/ExternalDataSourceCache.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/Storages/Cache/ExternalDataSourceCache.cpp b/src/Storages/Cache/ExternalDataSourceCache.cpp index cffb1dc9ca3..8c778fd511a 100644 --- a/src/Storages/Cache/ExternalDataSourceCache.cpp +++ b/src/Storages/Cache/ExternalDataSourceCache.cpp @@ -57,8 +57,15 @@ LocalFileHolder::~LocalFileHolder() { if (original_readbuffer) { - assert_cast(original_readbuffer.get())->seek(0, SEEK_SET); - file_cache_controller->value().startBackgroundDownload(std::move(original_readbuffer), *thread_pool); + try + { + assert_cast(original_readbuffer.get())->seek(0, SEEK_SET); + file_cache_controller->value().startBackgroundDownload(std::move(original_readbuffer), *thread_pool); + } + catch (...) + { + tryLogCurrentException(getLogger("LocalFileHolder"), "Exception during destructor of LocalFileHolder."); + } } } From 6172c56c1fd27f39d11914542f9e2dcb94fffd36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 09:48:38 +0000 Subject: [PATCH 1820/2361] Split tests to separate vaguely correlated tests --- ...3217_filtering_in_storage_merge.reference} | 6 +---- .../03217_filtering_in_storage_merge.sql | 16 +++++++++++ ...03217_filtering_in_system_tables.reference | 4 +++ ...l => 03217_filtering_in_system_tables.sql} | 27 +++++++------------ 4 files changed, 30 insertions(+), 23 deletions(-) rename tests/queries/0_stateless/{03217_read_rows_in_system_tables.reference => 03217_filtering_in_storage_merge.reference} (54%) create mode 100644 tests/queries/0_stateless/03217_filtering_in_storage_merge.sql create mode 100644 tests/queries/0_stateless/03217_filtering_in_system_tables.reference rename tests/queries/0_stateless/{03217_read_rows_in_system_tables.sql => 03217_filtering_in_system_tables.sql} (55%) diff --git a/tests/queries/0_stateless/03217_read_rows_in_system_tables.reference b/tests/queries/0_stateless/03217_filtering_in_storage_merge.reference similarity index 54% rename from tests/queries/0_stateless/03217_read_rows_in_system_tables.reference rename to tests/queries/0_stateless/03217_filtering_in_storage_merge.reference index b21ead49b1e..d366ad04c39 100644 --- a/tests/queries/0_stateless/03217_read_rows_in_system_tables.reference +++ b/tests/queries/0_stateless/03217_filtering_in_storage_merge.reference @@ -1,10 +1,6 @@ -information_schema tables -default test_replica_1 r1 Expression ((Project names + Projection)) Aggregating Expression (Before GROUP BY) ReadFromMerge Filter (( + ( + ))) - ReadFromMergeTree (default.test_replica_1) -1 1 -1 1 + ReadFromMergeTree (default.test_03217_merge_replica_1) diff --git a/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql b/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql new file mode 100644 index 00000000000..5ecc1e7c672 --- /dev/null +++ b/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql @@ -0,0 +1,16 @@ +CREATE TABLE test_03217_merge_replica_1(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_merge_replica', 'r1') + ORDER BY x; +CREATE TABLE test_03217_merge_replica_2(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_merge_replica', 'r2') + ORDER BY x; + + +CREATE TABLE test_03217_all_replicas (x UInt32) + ENGINE = Merge(currentDatabase(), 'test_03217_merge_replica_*'); + +INSERT INTO test_03217_merge_replica_1 SELECT number AS x FROM numbers(10); +SYSTEM SYNC REPLICA test_03217_merge_replica_2; + +-- If the filter on _table is not applied, then the plan will show both replicas +EXPLAIN SELECT _table, count() FROM test_03217_all_replicas WHERE _table = 'test_03217_merge_replica_1' AND x >= 0 GROUP BY _table; diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.reference b/tests/queries/0_stateless/03217_filtering_in_system_tables.reference new file mode 100644 index 00000000000..218fddf92e0 --- /dev/null +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.reference @@ -0,0 +1,4 @@ +information_schema tables +default test_03217_system_tables_replica_1 r1 +1 1 +1 1 diff --git a/tests/queries/0_stateless/03217_read_rows_in_system_tables.sql b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql similarity index 55% rename from tests/queries/0_stateless/03217_read_rows_in_system_tables.sql rename to tests/queries/0_stateless/03217_filtering_in_system_tables.sql index 3bea04ccccf..bbc755e478d 100644 --- a/tests/queries/0_stateless/03217_read_rows_in_system_tables.sql +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql @@ -1,27 +1,18 @@ +-- If filtering is not done correctly on databases, then this query report to read 3 rows, which are: `system.tables`, `information_schema.tables` and `INFORMATION_SCHEMA.tables` SELECT database, table FROM system.tables WHERE database = 'information_schema' AND table = 'tables'; --- To verify StorageSystemReplicas applies the filter properly -CREATE TABLE test_replica_1(x UInt32) - ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217/test_replica', 'r1') +CREATE TABLE test_03217_system_tables_replica_1(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_system_tables_replica', 'r1') ORDER BY x; -CREATE TABLE test_replica_2(x UInt32) - ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217/test_replica', 'r2') +CREATE TABLE test_03217_system_tables_replica_2(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_system_tables_replica', 'r2') ORDER BY x; -SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = 'test_replica_1' AND replica_name = 'r1'; - - --- To verify StorageMerge -CREATE TABLE all_replicas (x UInt32) - ENGINE = Merge(currentDatabase(), 'test_replica_*'); - -INSERT INTO test_replica_1 SELECT number AS x FROM numbers(10); -SYSTEM SYNC REPLICA test_replica_2; --- If the filter not applied, then the plan will show both replicas -EXPLAIN SELECT _table, count() FROM all_replicas WHERE _table = 'test_replica_1' AND x >= 0 GROUP BY _table; +-- If filtering is not done correctly on database-table column, then this query report to read 2 rows, which are the above tables +SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = 'test_03217_system_tables_replica_1' AND replica_name = 'r1'; SYSTEM FLUSH LOGS; --- argMin-argMax make the test repeatable +-- argMin-argMax is necessary to make the test repeatable -- StorageSystemTables SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 @@ -30,5 +21,5 @@ SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_ -- StorageSystemReplicas SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 - AND query LIKE '%SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = \'test_replica_1\' AND replica_name = \'r1\';' + AND query LIKE '%SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = \'test_03217_system_tables_replica_1\' AND replica_name = \'r1\';' AND type = 'QueryFinish'; From de41ffa18f5c2d3533fab3222aa401c193a0baac Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Wed, 7 Aug 2024 12:06:14 +0200 Subject: [PATCH 1821/2361] fix docks for groupConcat function documentation had wrong usage of a function. Examples of usage - https://github.com/ClickHouse/ClickHouse/blob/763952bf36d0b55f1b33ff11c693267574aa9666/tests/queries/0_stateless/03156_group_concat.sql --- .../aggregate-functions/reference/groupconcat.md | 6 +++--- tests/instructions/easy_tasks_sorted_ru.md | 4 ---- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md b/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md index 072252de8c9..bfa6160bbcc 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md @@ -10,7 +10,7 @@ Calculates a concatenated string from a group of strings, optionally separated b **Syntax** ``` sql -groupConcat(expression [, delimiter] [, limit]); +groupConcat[(delimiter [, limit])](expression); ``` **Arguments** @@ -20,7 +20,7 @@ groupConcat(expression [, delimiter] [, limit]); - `limit` — A positive [integer](../../../sql-reference/data-types/int-uint.md) specifying the maximum number of elements to concatenate. If more elements are present, excess elements are ignored. This parameter is optional. :::note -If delimiter is specified without limit, it must be the first parameter following the expression. If both delimiter and limit are specified, delimiter must precede limit. +If delimiter is specified without limit, it must be the first parameter. If both delimiter and limit are specified, delimiter must precede limit. ::: **Returned value** @@ -61,7 +61,7 @@ This concatenates all names into one continuous string without any separator. Query: ``` sql -SELECT groupConcat(Name, ', ', 2) FROM Employees; +SELECT groupConcat(', ')(Name) FROM Employees; ``` Result: diff --git a/tests/instructions/easy_tasks_sorted_ru.md b/tests/instructions/easy_tasks_sorted_ru.md index fbd86ebf08f..e0607126ecc 100644 --- a/tests/instructions/easy_tasks_sorted_ru.md +++ b/tests/instructions/easy_tasks_sorted_ru.md @@ -97,10 +97,6 @@ Upd: Ñделали по-другому: теперь вÑÑ‘ безопаÑно. Возвращает инкрементальное чиÑло Ð´Ð»Ñ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð¾ вÑтречающихÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ð¹ key. -## ÐÐ³Ñ€ÐµÐ³Ð°Ñ‚Ð½Ð°Ñ Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ groupConcat. - -`groupConcat(x, ',')` - Ñобрать из переданных значений x Ñтроку, разделённую запÑтыми. - ## Функции DATE_ADD, DATE_SUB как Ñинонимы Ð´Ð»Ñ ÑовмеÑтимоÑти Ñ SQL. https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-add From a9c284dd8efb439ff06cf0f95e2a9920a26fdf5d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 7 Aug 2024 10:07:27 +0000 Subject: [PATCH 1822/2361] Include fixes aafe498 and cfaa852 --- src/Storages/Statistics/Statistics.cpp | 30 +++++++++++++++++-- src/Storages/Statistics/Statistics.h | 6 ++++ src/Storages/Statistics/StatisticsTDigest.cpp | 18 ++++------- .../02864_statistics_bugs.reference | 2 ++ .../0_stateless/02864_statistics_bugs.sql | 18 +++++++++++ 5 files changed, 60 insertions(+), 14 deletions(-) diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 52eec437ac2..fd686c5f0aa 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -1,14 +1,17 @@ #include +#include +#include +#include +#include #include #include +#include #include #include #include #include #include #include -#include -#include #include "config.h" /// USE_DATASKETCHES @@ -27,6 +30,29 @@ enum StatisticsFileVersion : UInt16 V0 = 0, }; +std::optional StatisticsUtils::tryConvertToFloat64(const Field & value, const DataTypePtr & data_type) +{ + if (data_type->isValueRepresentedByNumber()) + { + Field value_converted; + + if (isInteger(data_type) && (value.getType() == Field::Types::Float64 || value.getType() == Field::Types::String)) + /// For case val_int32 < 10.5 or val_int32 < '10.5' we should convert 10.5 to Float64. + value_converted = convertFieldToType(value, *DataTypeFactory::instance().get("Float64")); + else + /// We should convert value to the real column data type and then translate it to Float64. + /// For example for expression col_date > '2024-08-07', if we directly convert '2024-08-07' to Float64, we will get null. + value_converted = convertFieldToType(value, *data_type); + + if (value_converted.isNull()) + return {}; + + Float64 value_as_float = applyVisitor(FieldVisitorConvertToNumber(), value_converted); + return value_as_float; + } + return {}; +} + IStatistics::IStatistics(const SingleStatisticsDescription & stat_) : stat(stat_) { diff --git a/src/Storages/Statistics/Statistics.h b/src/Storages/Statistics/Statistics.h index 593ac20edb5..2a30c0de315 100644 --- a/src/Storages/Statistics/Statistics.h +++ b/src/Storages/Statistics/Statistics.h @@ -14,6 +14,12 @@ namespace DB constexpr auto STATS_FILE_PREFIX = "statistics_"; constexpr auto STATS_FILE_SUFFIX = ".stats"; +struct StatisticsUtils +{ + /// Returns std::nullopt if input Field cannot be converted to a concrete value + /// - `data_type` is the type of the column on which the statistics object was build on + static std::optional tryConvertToFloat64(const Field & value, const DataTypePtr & data_type); +}; /// Statistics describe properties of the values in the column, /// e.g. how many unique values exist, diff --git a/src/Storages/Statistics/StatisticsTDigest.cpp b/src/Storages/Statistics/StatisticsTDigest.cpp index fd9b922ffc8..285b779036f 100644 --- a/src/Storages/Statistics/StatisticsTDigest.cpp +++ b/src/Storages/Statistics/StatisticsTDigest.cpp @@ -1,8 +1,6 @@ #include -#include #include #include -#include namespace DB { @@ -41,22 +39,18 @@ void StatisticsTDigest::deserialize(ReadBuffer & buf) Float64 StatisticsTDigest::estimateLess(const Field & val) const { - Field val_converted = convertFieldToType(val, *data_type); - if (val_converted.isNull()) + auto val_as_float = StatisticsUtils::tryConvertToFloat64(val, data_type); + if (!val_as_float.has_value()) return 0; - - auto val_as_float = applyVisitor(FieldVisitorConvertToNumber(), val_converted); - return t_digest.getCountLessThan(val_as_float); + return t_digest.getCountLessThan(*val_as_float); } Float64 StatisticsTDigest::estimateEqual(const Field & val) const { - Field val_converted = convertFieldToType(val, *data_type); - if (val_converted.isNull()) + auto val_as_float = StatisticsUtils::tryConvertToFloat64(val, data_type); + if (!val_as_float.has_value()) return 0; - - auto val_as_float = applyVisitor(FieldVisitorConvertToNumber(), val_converted); - return t_digest.getCountEqual(val_as_float); + return t_digest.getCountEqual(*val_as_float); } void tdigestStatisticsValidator(const SingleStatisticsDescription & /*description*/, const DataTypePtr & data_type) diff --git a/tests/queries/0_stateless/02864_statistics_bugs.reference b/tests/queries/0_stateless/02864_statistics_bugs.reference index f599e28b8ab..a7eeae9def6 100644 --- a/tests/queries/0_stateless/02864_statistics_bugs.reference +++ b/tests/queries/0_stateless/02864_statistics_bugs.reference @@ -1 +1,3 @@ 10 +11 +0 diff --git a/tests/queries/0_stateless/02864_statistics_bugs.sql b/tests/queries/0_stateless/02864_statistics_bugs.sql index ef1735550e6..01bbe221b0f 100644 --- a/tests/queries/0_stateless/02864_statistics_bugs.sql +++ b/tests/queries/0_stateless/02864_statistics_bugs.sql @@ -7,3 +7,21 @@ CREATE TABLE bug_67742 (a Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDE INSERT INTO bug_67742 SELECT number FROM system.numbers LIMIT 10000; SELECT count(*) FROM bug_67742 WHERE a < '10'; DROP TABLE bug_67742; + +DROP TABLE IF EXISTS bug_67742; +CREATE TABLE bug_67742 (a Int32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); +INSERT INTO bug_67742 SELECT number FROM system.numbers LIMIT 10000; +SELECT count(*) FROM bug_67742 WHERE a < '10.5'; -- { serverError TYPE_MISMATCH } +DROP TABLE bug_67742; + +DROP TABLE IF EXISTS bug_67742; +CREATE TABLE bug_67742 (a Int32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); +INSERT INTO bug_67742 SELECT number FROM system.numbers LIMIT 10000; +SELECT count(*) FROM bug_67742 WHERE a < 10.5; +DROP TABLE bug_67742; + +DROP TABLE IF EXISTS bug_67742; +CREATE TABLE bug_67742 (a Int16 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); +INSERT INTO bug_67742 SELECT number FROM system.numbers LIMIT 10000; +SELECT count(*) FROM bug_67742 WHERE a < '9999999999999999999999999'; +DROP TABLE bug_67742; From a9735f470c195dadbfffe02545b0979bfa9bd778 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Wed, 7 Aug 2024 12:09:26 +0200 Subject: [PATCH 1823/2361] squash! fix docks for groupConcat function documentation had wrong usage of a function. Examples of usage - https://github.com/ClickHouse/ClickHouse/blob/763952bf36d0b55f1b33ff11c693267574aa9666/tests/queries/0_stateless/03156_group_concat.sql --- .../sql-reference/aggregate-functions/reference/groupconcat.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md b/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md index bfa6160bbcc..6a24aa244bf 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md +++ b/docs/en/sql-reference/aggregate-functions/reference/groupconcat.md @@ -78,7 +78,7 @@ This output shows the names separated by a comma followed by a space. Query: ``` sql -SELECT groupConcat(Name, ', ', 2) FROM Employees; +SELECT groupConcat(', ', 2)(Name) FROM Employees; ``` Result: From 3c531d314d0b0cfb64fe21fc7bc910ce3327cc79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 10:09:35 +0000 Subject: [PATCH 1824/2361] Fix build --- src/Storages/Kafka/KafkaConfigLoader.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/Kafka/KafkaConfigLoader.cpp b/src/Storages/Kafka/KafkaConfigLoader.cpp index 000e08e2276..df6ccec4b7f 100644 --- a/src/Storages/Kafka/KafkaConfigLoader.cpp +++ b/src/Storages/Kafka/KafkaConfigLoader.cpp @@ -356,7 +356,7 @@ void updateGlobalConfiguration( } #else // USE_KRB5 if (kafka_config.has_property("sasl.kerberos.keytab") || kafka_config.has_property("sasl.kerberos.principal")) - LOG_WARNING(log, "Ignoring Kerberos-related parameters because ClickHouse was built without krb5 library support."); + LOG_WARNING(params.log, "Ignoring Kerberos-related parameters because ClickHouse was built without krb5 library support."); #endif // USE_KRB5 // No need to add any prefix, messages can be distinguished kafka_config.set_log_callback( From bcc5201c99e00998beab8088a988a66f921415b6 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 30 Jul 2024 17:17:07 +0200 Subject: [PATCH 1825/2361] init --- src/Common/SystemLogBase.cpp | 138 ++++++++++++-------- src/Common/SystemLogBase.h | 58 +++++--- src/Interpreters/InterpreterSystemQuery.cpp | 9 +- src/Interpreters/SystemLog.cpp | 38 +++--- 4 files changed, 148 insertions(+), 95 deletions(-) diff --git a/src/Common/SystemLogBase.cpp b/src/Common/SystemLogBase.cpp index 7d2c15714e2..748cf4744ae 100644 --- a/src/Common/SystemLogBase.cpp +++ b/src/Common/SystemLogBase.cpp @@ -27,12 +27,15 @@ #include #include +#define LOGICAL_IF_THEN(A, B) (!(A) || !!(B)) + namespace DB { namespace ErrorCodes { extern const int TIMEOUT_EXCEEDED; + extern const int ABORTED; } ISystemLog::~ISystemLog() = default; @@ -86,9 +89,8 @@ void SystemLogQueue::push(LogElement&& element) // by one, under exclusive lock, so we will see each message count. // It is enough to only wake the flushing thread once, after the message // count increases past half available size. - const uint64_t queue_end = queue_front_index + queue.size(); - requested_flush_up_to = std::max(requested_flush_up_to, queue_end); - + const auto last_log_index = queue_front_index + queue.size(); + requested_flush_index = std::max(requested_flush_index, last_log_index); flush_event.notify_all(); } @@ -127,20 +129,46 @@ template void SystemLogQueue::handleCrash() { if (settings.notify_flush_on_crash) - notifyFlush(/* force */ true); + { + notifyFlush(getLastLogIndex(), /* should_prepare_tables_anyway */ true); + } } template -void SystemLogQueue::waitFlush(uint64_t expected_flushed_up_to) +void SystemLogQueue::notifyFlush(SystemLogQueue::Index expected_flushed_index, bool should_prepare_tables_anyway) { + std::unique_lock lock(mutex); + // Publish our flush request, taking care not to overwrite the requests + // made by other threads. + force_prepare_tables_requested |= should_prepare_tables_anyway; + requested_flush_index = std::max(requested_flush_index, expected_flushed_index); + flush_event.notify_all(); +} + +template +void SystemLogQueue::waitFlush(SystemLogQueue::Index expected_flushed_index, bool should_prepare_tables_anyway) +{ + LOG_DEBUG(log, "Requested flush up to offset {}", expected_flushed_index); + // Use an arbitrary timeout to avoid endless waiting. 60s proved to be // too fast for our parallel functional tests, probably because they // heavily load the disk. const int timeout_seconds = 180; + std::unique_lock lock(mutex); - bool result = flush_event.wait_for(lock, std::chrono::seconds(timeout_seconds), [&] + + // there is no obligation to call notifyFlush before waitFlush, than we have to be sure that flush_event has been triggered + force_prepare_tables_requested |= should_prepare_tables_anyway; + if (requested_flush_index < expected_flushed_index) { - return flushed_up_to >= expected_flushed_up_to && !is_force_prepare_tables; + requested_flush_index = expected_flushed_index; + flush_event.notify_all(); + } + + auto result = confirm_event.wait_for(lock, std::chrono::seconds(timeout_seconds), [&] + { + const bool if_should_prepare_then_it_is_done = LOGICAL_IF_THEN(should_prepare_tables_anyway, prepare_tables_done); + return (flushed_index >= expected_flushed_index && if_should_prepare_then_it_is_done) || is_shutdown; }); if (!result) @@ -148,67 +176,54 @@ void SystemLogQueue::waitFlush(uint64_t expected_flushed_up_to) throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Timeout exceeded ({} s) while flushing system log '{}'.", toString(timeout_seconds), demangle(typeid(*this).name())); } -} - -template -uint64_t SystemLogQueue::notifyFlush(bool should_prepare_tables_anyway) -{ - uint64_t this_thread_requested_offset; + if (is_shutdown) { - std::lock_guard lock(mutex); - if (is_shutdown) - return uint64_t(-1); - - this_thread_requested_offset = queue_front_index + queue.size(); - - // Publish our flush request, taking care not to overwrite the requests - // made by other threads. - is_force_prepare_tables |= should_prepare_tables_anyway; - requested_flush_up_to = std::max(requested_flush_up_to, this_thread_requested_offset); - - flush_event.notify_all(); + throw Exception(ErrorCodes::ABORTED, "Shutdown has been called while flushing system log '{}'. Aborting.", + demangle(typeid(*this).name())); } - - LOG_DEBUG(log, "Requested flush up to offset {}", this_thread_requested_offset); - return this_thread_requested_offset; } template -void SystemLogQueue::confirm(uint64_t to_flush_end) +SystemLogQueue::Index SystemLogQueue::getLastLogIndex() { std::lock_guard lock(mutex); - flushed_up_to = to_flush_end; - is_force_prepare_tables = false; - flush_event.notify_all(); + return queue_front_index + queue.size(); } template -typename SystemLogQueue::Index SystemLogQueue::pop(std::vector & output, - bool & should_prepare_tables_anyway, - bool & exit_this_thread) +void SystemLogQueue::confirm(SystemLogQueue::Index last_flashed_index) { - /// Call dtors and deallocate strings without holding the global lock - output.resize(0); + std::lock_guard lock(mutex); + prepare_tables_done = true; + flushed_index = std::max(flushed_index, last_flashed_index); + confirm_event.notify_all(); +} +template +typename SystemLogQueue::PopResult SystemLogQueue::pop() +{ std::unique_lock lock(mutex); - flush_event.wait_for(lock, - std::chrono::milliseconds(settings.flush_interval_milliseconds), - [&] () - { - return requested_flush_up_to > flushed_up_to || is_shutdown || is_force_prepare_tables; - } - ); + + flush_event.wait_for(lock, std::chrono::milliseconds(settings.flush_interval_milliseconds), [&] () + { + const bool if_prepare_requested_and_it_is_not_done = force_prepare_tables_requested && !prepare_tables_done; + return requested_flush_index > flushed_index || if_prepare_requested_and_it_is_not_done || is_shutdown; + }); + + if (is_shutdown) + return PopResult{.is_shutdown = true}; queue_front_index += queue.size(); - // Swap with existing array from previous flush, to save memory - // allocations. - queue.swap(output); - should_prepare_tables_anyway = is_force_prepare_tables; + PopResult result; + result.logs_index = queue_front_index; + result.logs_elemets.swap(queue); - exit_this_thread = is_shutdown; - return queue_front_index; + const bool if_prepare_requested_and_it_is_not_done = force_prepare_tables_requested && !prepare_tables_done; + result.create_table_force = if_prepare_requested_and_it_is_not_done; + + return result; } template @@ -229,13 +244,21 @@ SystemLogBase::SystemLogBase( } template -void SystemLogBase::flush(bool force) +SystemLogBase::Index SystemLogBase::getLastLogIndex() { - uint64_t this_thread_requested_offset = queue->notifyFlush(force); - if (this_thread_requested_offset == uint64_t(-1)) - return; + return queue->getLastLogIndex(); +} - queue->waitFlush(this_thread_requested_offset); +template +void SystemLogBase::notifyFlush(Index expected_flushed_index) +{ + queue->notifyFlush(expected_flushed_index, /* should_prepare_tables_anyway */ true); +} + +template +void SystemLogBase::flush(Index expected_flushed_index) +{ + queue->waitFlush(expected_flushed_index, /* should_prepare_tables_anyway */ true); } template @@ -257,9 +280,6 @@ void SystemLogBase::add(LogElement element) queue->push(std::move(element)); } -template -void SystemLogBase::notifyFlush(bool force) { queue->notifyFlush(force); } - #define INSTANTIATE_SYSTEM_LOG_BASE(ELEMENT) template class SystemLogBase; SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_BASE) @@ -267,3 +287,5 @@ SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_BASE) SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_QUEUE) } + +#undef LOGICAL_IF_THEN diff --git a/src/Common/SystemLogBase.h b/src/Common/SystemLogBase.h index b87fcf419d3..6c60ffafc4c 100644 --- a/src/Common/SystemLogBase.h +++ b/src/Common/SystemLogBase.h @@ -54,10 +54,19 @@ struct StorageID; class ISystemLog { public: + using Index = uint64_t; + virtual String getName() const = 0; - //// force -- force table creation (used for SYSTEM FLUSH LOGS) - virtual void flush(bool force = false) = 0; /// NOLINT + /// Return the index of the lastest added log element. That index no less than the flashed index. + /// The flashed index is the index of the last log element which has been flushed successfully. + /// Thereby all the records whose index is less than the flashed index are flushed already. + virtual Index getLastLogIndex() = 0; + /// Call this method to wake up the flush thread and flush the data in the background. It is non blocking call + virtual void notifyFlush(Index expected_flushed_index) = 0; + /// Call this method to wait intill the logs are flushed up to expected_flushed_index. It is blocking call. + virtual void flush(Index expected_flushed_index) = 0; + virtual void prepareTable() = 0; /// Start the background thread. @@ -97,24 +106,34 @@ struct SystemLogQueueSettings template class SystemLogQueue { - using Index = uint64_t; - public: + using Index = ISystemLog::Index; + explicit SystemLogQueue(const SystemLogQueueSettings & settings_); void shutdown(); // producer methods void push(LogElement && element); - Index notifyFlush(bool should_prepare_tables_anyway); - void waitFlush(Index expected_flushed_up_to); + + Index getLastLogIndex(); + void notifyFlush(Index expected_flushed_index, bool should_prepare_tables_anyway); + void waitFlush(Index expected_flushed_index, bool should_prepare_tables_anyway); /// Handles crash, flushes log without blocking if notify_flush_on_crash is set void handleCrash(); + struct PopResult + { + Index logs_index = 0; + std::vector logs_elemets = {}; + bool create_table_force = false; + bool is_shutdown = false; + }; + // consumer methods - Index pop(std::vector& output, bool & should_prepare_tables_anyway, bool & exit_this_thread); - void confirm(Index to_flush_end); + PopResult pop(); + void confirm(Index last_flashed_index); private: /// Data shared between callers of add()/flush()/shutdown(), and the saving thread @@ -124,22 +143,29 @@ private: // Queue is bounded. But its size is quite large to not block in all normal cases. std::vector queue; + // An always-incrementing index of the first message currently in the queue. // We use it to give a global sequential index to every message, so that we // can wait until a particular message is flushed. This is used to implement // synchronous log flushing for SYSTEM FLUSH LOGS. Index queue_front_index = 0; + // A flag that says we must create the tables even if the queue is empty. - bool is_force_prepare_tables = false; + bool force_prepare_tables_requested = false; + bool prepare_tables_done = false; + // Requested to flush logs up to this index, exclusive - Index requested_flush_up_to = 0; + Index requested_flush_index = 0; + // Flushed log up to this index, exclusive - Index flushed_up_to = 0; + Index flushed_index = 0; + // Logged overflow message at this queue front index Index logged_queue_full_at_index = -1; bool is_shutdown = false; + std::condition_variable confirm_event; std::condition_variable flush_event; const SystemLogQueueSettings settings; @@ -150,6 +176,7 @@ template class SystemLogBase : public ISystemLog { public: + using Index = ISystemLog::Index; using Self = SystemLogBase; explicit SystemLogBase( @@ -163,15 +190,16 @@ public: */ void add(LogElement element); + Index getLastLogIndex() override; + + void notifyFlush(Index expected_flushed_index) override; + /// Flush data in the buffer to disk. Block the thread until the data is stored on disk. - void flush(bool force) override; + void flush(Index expected_flushed_index) override; /// Handles crash, flushes log without blocking if notify_flush_on_crash is set void handleCrash() override; - /// Non-blocking flush data in the buffer to disk. - void notifyFlush(bool force); - String getName() const override { return LogElement::name(); } static const char * getDefaultOrderBy() { return "event_date, event_time"; } diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index c284acfa308..9b483bac25c 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -712,11 +712,18 @@ BlockIO InterpreterSystemQuery::execute() getContext()->checkAccess(AccessType::SYSTEM_FLUSH_LOGS); auto logs = getContext()->getSystemLogs(); + std::vector> commands; commands.reserve(logs.size()); for (auto * system_log : logs) - commands.emplace_back([system_log] { system_log->flush(true); }); + { + auto current_index = system_log->getLastLogIndex(); + /// The data is started to being flushed in the background after notifyFlush call + system_log->notifyFlush(current_index); + commands.emplace_back([system_log, current_index] { system_log->flush(current_index); }); + } + /// The data is flashing in the background, we need to wait until it is done executeCommandsAndThrowIfError(commands); break; } diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 572481e6b12..7042564799a 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -1,6 +1,7 @@ #include #include +#include "Common/SystemLogBase.h" #include #include #include @@ -462,33 +463,26 @@ void SystemLog::savingThreadFunction() { setThreadName("SystemLogFlush"); - std::vector to_flush; - bool exit_this_thread = false; - while (!exit_this_thread) + while (true) { try { - // The end index (exclusive, like std end()) of the messages we are - // going to flush. - uint64_t to_flush_end = 0; - // Should we prepare table even if there are no new messages. - bool should_prepare_tables_anyway = false; + auto result = queue->pop(); - to_flush_end = queue->pop(to_flush, should_prepare_tables_anyway, exit_this_thread); - - if (to_flush.empty()) + if (result.is_shutdown) { - if (should_prepare_tables_anyway) - { - prepareTable(); - LOG_TRACE(log, "Table created (force)"); - - queue->confirm(to_flush_end); - } + LOG_TRACE(log, "Terminating"); + return; } - else + + if (!result.logs_elemets.empty()) { - flushImpl(to_flush, to_flush_end); + flushImpl(result.logs_elemets, result.logs_index); + } + else if (result.create_table_force) + { + prepareTable(); + queue->confirm(/* last_flashed_index */ 0); } } catch (...) @@ -496,7 +490,6 @@ void SystemLog::savingThreadFunction() tryLogCurrentException(__PRETTY_FUNCTION__); } } - LOG_TRACE(log, "Terminating"); } @@ -579,6 +572,9 @@ StoragePtr SystemLog::getStorage() const template void SystemLog::prepareTable() { + if (is_prepared) + return; + String description = table_id.getNameForLogs(); auto table = getStorage(); From aa42ccf0531aa416c5525b6d8c01c057f781b0e3 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 30 Jul 2024 19:04:22 +0200 Subject: [PATCH 1826/2361] move LOGICAL_IF_THEN to base/defines.h --- base/base/defines.h | 2 ++ src/Common/SystemLogBase.cpp | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/base/base/defines.h b/base/base/defines.h index 5685a6d9833..7860cebd359 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -164,3 +164,5 @@ template constexpr void UNUSED(Args &&... args [[maybe_unused]]) // NOLINT(cppcoreguidelines-missing-std-forward) { } + +#define LOGICAL_IF_THEN(A, B) (!(A) || !!(B)) diff --git a/src/Common/SystemLogBase.cpp b/src/Common/SystemLogBase.cpp index 748cf4744ae..6840c461ce6 100644 --- a/src/Common/SystemLogBase.cpp +++ b/src/Common/SystemLogBase.cpp @@ -27,7 +27,6 @@ #include #include -#define LOGICAL_IF_THEN(A, B) (!(A) || !!(B)) namespace DB { @@ -287,5 +286,3 @@ SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_BASE) SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_QUEUE) } - -#undef LOGICAL_IF_THEN From abd5dfe1d0c7b88212a2f0dc4ed1a8b470dcedb1 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 30 Jul 2024 19:12:13 +0200 Subject: [PATCH 1827/2361] fix typo --- src/Common/SystemLogBase.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/SystemLogBase.h b/src/Common/SystemLogBase.h index 6c60ffafc4c..3915b99f8aa 100644 --- a/src/Common/SystemLogBase.h +++ b/src/Common/SystemLogBase.h @@ -58,7 +58,7 @@ public: virtual String getName() const = 0; - /// Return the index of the lastest added log element. That index no less than the flashed index. + /// Return the index of the latest added log element. That index no less than the flashed index. /// The flashed index is the index of the last log element which has been flushed successfully. /// Thereby all the records whose index is less than the flashed index are flushed already. virtual Index getLastLogIndex() = 0; From 8e5577ad8f8cb4e4d3ccb977af8536957088b8ca Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 30 Jul 2024 19:18:03 +0200 Subject: [PATCH 1828/2361] fix includes --- src/Interpreters/SystemLog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 7042564799a..c236b524c60 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -1,7 +1,7 @@ #include #include -#include "Common/SystemLogBase.h" +#include #include #include #include From 86267418f9a74915d5089770d658b328684b9189 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 31 Jul 2024 23:28:03 +0200 Subject: [PATCH 1829/2361] fix tests, rework recreation tables conditions, add log about ignored logs --- base/base/defines.h | 2 - src/Backups/BackupsWorker.cpp | 2 + src/Common/SystemLogBase.cpp | 104 +++++++++--------- src/Common/SystemLogBase.h | 28 +++-- src/Interpreters/SystemLog.cpp | 9 +- .../test_system_flush_logs/test.py | 25 ++++- .../test_system_logs_recreate/test.py | 13 ++- 7 files changed, 105 insertions(+), 78 deletions(-) diff --git a/base/base/defines.h b/base/base/defines.h index 7860cebd359..5685a6d9833 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -164,5 +164,3 @@ template constexpr void UNUSED(Args &&... args [[maybe_unused]]) // NOLINT(cppcoreguidelines-missing-std-forward) { } - -#define LOGICAL_IF_THEN(A, B) (!(A) || !!(B)) diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 0b93ae6d547..363aaae9c8d 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -1171,6 +1171,8 @@ void BackupsWorker::waitAll() for (const auto & id : current_operations) wait(id, /* rethrow_exception= */ false); + backup_log->flush(backup_log->getLastLogIndex()); + LOG_INFO(log, "Backups and restores finished"); } diff --git a/src/Common/SystemLogBase.cpp b/src/Common/SystemLogBase.cpp index 6840c461ce6..a35a46c49cc 100644 --- a/src/Common/SystemLogBase.cpp +++ b/src/Common/SystemLogBase.cpp @@ -88,31 +88,18 @@ void SystemLogQueue::push(LogElement&& element) // by one, under exclusive lock, so we will see each message count. // It is enough to only wake the flushing thread once, after the message // count increases past half available size. + const auto last_log_index = queue_front_index + queue.size(); - requested_flush_index = std::max(requested_flush_index, last_log_index); - flush_event.notify_all(); + notifyFlushUnlocked(last_log_index, /* should_prepare_tables_anyway */ false); } if (queue.size() >= settings.max_size_rows) { + chassert(queue.size() == settings.max_size_rows); + // Ignore all further entries until the queue is flushed. - // Log a message about that. Don't spam it -- this might be especially - // problematic in case of trace log. Remember what the front index of the - // queue was when we last logged the message. If it changed, it means the - // queue was flushed, and we can log again. - if (queue_front_index != logged_queue_full_at_index) - { - logged_queue_full_at_index = queue_front_index; - - // TextLog sets its logger level to 0, so this log is a noop and - // there is no recursive logging. - lock.unlock(); - LOG_ERROR(log, "Queue is full for system log '{}' at {}. max_size_rows {}", - demangle(typeid(*this).name()), - queue_front_index, - settings.max_size_rows); - } - + // To the next batch we add a log message about how much we have lost + ++ignored_logs; return; } @@ -133,15 +120,22 @@ void SystemLogQueue::handleCrash() } } +template +void SystemLogQueue::notifyFlushUnlocked(Index expected_flushed_index, bool should_prepare_tables_anyway) +{ + if (should_prepare_tables_anyway) + requested_prepare_tables = std::max(requested_prepare_tables, expected_flushed_index); + + requested_flush_index = std::max(requested_flush_index, expected_flushed_index); + + flush_event.notify_all(); +} + template void SystemLogQueue::notifyFlush(SystemLogQueue::Index expected_flushed_index, bool should_prepare_tables_anyway) { - std::unique_lock lock(mutex); - // Publish our flush request, taking care not to overwrite the requests - // made by other threads. - force_prepare_tables_requested |= should_prepare_tables_anyway; - requested_flush_index = std::max(requested_flush_index, expected_flushed_index); - flush_event.notify_all(); + std::lock_guard lock(mutex); + notifyFlushUnlocked(expected_flushed_index, should_prepare_tables_anyway); } template @@ -156,18 +150,15 @@ void SystemLogQueue::waitFlush(SystemLogQueue::Index exp std::unique_lock lock(mutex); - // there is no obligation to call notifyFlush before waitFlush, than we have to be sure that flush_event has been triggered - force_prepare_tables_requested |= should_prepare_tables_anyway; - if (requested_flush_index < expected_flushed_index) - { - requested_flush_index = expected_flushed_index; - flush_event.notify_all(); - } + // there is no obligation to call notifyFlush before waitFlush, than we have to be sure that flush_event has been triggered before we wait the result + notifyFlushUnlocked(expected_flushed_index, should_prepare_tables_anyway); auto result = confirm_event.wait_for(lock, std::chrono::seconds(timeout_seconds), [&] { - const bool if_should_prepare_then_it_is_done = LOGICAL_IF_THEN(should_prepare_tables_anyway, prepare_tables_done); - return (flushed_index >= expected_flushed_index && if_should_prepare_then_it_is_done) || is_shutdown; + if (should_prepare_tables_anyway) + return (flushed_index >= expected_flushed_index && prepared_tables >= requested_prepare_tables) || is_shutdown; + else + return (flushed_index >= expected_flushed_index) || is_shutdown; }); if (!result) @@ -194,7 +185,7 @@ template void SystemLogQueue::confirm(SystemLogQueue::Index last_flashed_index) { std::lock_guard lock(mutex); - prepare_tables_done = true; + prepared_tables = std::max(prepared_tables, last_flashed_index); flushed_index = std::max(flushed_index, last_flashed_index); confirm_event.notify_all(); } @@ -202,25 +193,34 @@ void SystemLogQueue::confirm(SystemLogQueue::Index last_ template typename SystemLogQueue::PopResult SystemLogQueue::pop() { - std::unique_lock lock(mutex); - - flush_event.wait_for(lock, std::chrono::milliseconds(settings.flush_interval_milliseconds), [&] () - { - const bool if_prepare_requested_and_it_is_not_done = force_prepare_tables_requested && !prepare_tables_done; - return requested_flush_index > flushed_index || if_prepare_requested_and_it_is_not_done || is_shutdown; - }); - - if (is_shutdown) - return PopResult{.is_shutdown = true}; - - queue_front_index += queue.size(); - PopResult result; - result.logs_index = queue_front_index; - result.logs_elemets.swap(queue); + size_t prev_ignored_logs = 0; - const bool if_prepare_requested_and_it_is_not_done = force_prepare_tables_requested && !prepare_tables_done; - result.create_table_force = if_prepare_requested_and_it_is_not_done; + { + std::unique_lock lock(mutex); + + flush_event.wait_for(lock, std::chrono::milliseconds(settings.flush_interval_milliseconds), [&] () + { + return requested_flush_index > flushed_index || requested_prepare_tables > prepared_tables || is_shutdown; + }); + + if (is_shutdown) + return PopResult{.is_shutdown = true}; + + queue_front_index += queue.size(); + prev_ignored_logs = ignored_logs; + ignored_logs = 0; + + result.last_log_index = queue_front_index; + result.logs.swap(queue); + result.create_table_force = requested_prepare_tables > prepared_tables; + } + + if (prev_ignored_logs) + LOG_ERROR(log, "Queue had been full at {}, accepted {} logs, ignored {} logs.", + result.last_log_index - result.logs.size(), + result.logs.size(), + prev_ignored_logs); return result; } diff --git a/src/Common/SystemLogBase.h b/src/Common/SystemLogBase.h index 3915b99f8aa..c359287a73f 100644 --- a/src/Common/SystemLogBase.h +++ b/src/Common/SystemLogBase.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -54,7 +55,7 @@ struct StorageID; class ISystemLog { public: - using Index = uint64_t; + using Index = int64_t; virtual String getName() const = 0; @@ -125,8 +126,8 @@ public: struct PopResult { - Index logs_index = 0; - std::vector logs_elemets = {}; + Index last_log_index = 0; + std::vector logs = {}; bool create_table_force = false; bool is_shutdown = false; }; @@ -136,6 +137,8 @@ public: void confirm(Index last_flashed_index); private: + void notifyFlushUnlocked(Index expected_flushed_index, bool should_prepare_tables_anyway); + /// Data shared between callers of add()/flush()/shutdown(), and the saving thread std::mutex mutex; @@ -150,18 +153,21 @@ private: // synchronous log flushing for SYSTEM FLUSH LOGS. Index queue_front_index = 0; - // A flag that says we must create the tables even if the queue is empty. - bool force_prepare_tables_requested = false; - bool prepare_tables_done = false; - // Requested to flush logs up to this index, exclusive - Index requested_flush_index = 0; - + Index requested_flush_index = std::numeric_limits::min(); // Flushed log up to this index, exclusive Index flushed_index = 0; - // Logged overflow message at this queue front index - Index logged_queue_full_at_index = -1; + // The same logic for the prepare tables: if requested_prepar_tables > prepared_tables we need to do prepare + // except that initial prepared_tables is -1 + // it is due to the difference: when no logs have been written and we call flush logs + // it becomes in the state: requested_flush_index = 0 and flushed_index = 0 -- we do not want to do anything + // but if we need to prepare tables it becomes requested_prepare_tables = 0 and prepared_tables = -1 + // we trigger background thread and do prepare + Index requested_prepare_tables = std::numeric_limits::min(); + Index prepared_tables = -1; + + size_t ignored_logs = 0; bool is_shutdown = false; diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index c236b524c60..9d07184a0e5 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -475,14 +475,14 @@ void SystemLog::savingThreadFunction() return; } - if (!result.logs_elemets.empty()) + if (!result.logs.empty()) { - flushImpl(result.logs_elemets, result.logs_index); + flushImpl(result.logs, result.last_log_index); } else if (result.create_table_force) { prepareTable(); - queue->confirm(/* last_flashed_index */ 0); + queue->confirm(result.last_log_index); } } catch (...) @@ -572,9 +572,6 @@ StoragePtr SystemLog::getStorage() const template void SystemLog::prepareTable() { - if (is_prepared) - return; - String description = table_id.getNameForLogs(); auto table = getStorage(); diff --git a/tests/integration/test_system_flush_logs/test.py b/tests/integration/test_system_flush_logs/test.py index 713b327eb76..0399122406a 100644 --- a/tests/integration/test_system_flush_logs/test.py +++ b/tests/integration/test_system_flush_logs/test.py @@ -4,7 +4,7 @@ import pytest from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry +from helpers.test_tools import assert_eq_with_retry, assert_logs_contain_with_retry, TSV cluster = ClickHouseCluster(__file__) node = cluster.add_instance( @@ -75,6 +75,8 @@ def test_system_suspend(): def test_log_max_size(start_cluster): + # we do misconfiguration here: buffer_size_rows_flush_threshold > max_size_rows, flush_interval_milliseconds is huge + # no auto flush by size not by time has a chance node.exec_in_container( [ "bash", @@ -83,6 +85,7 @@ def test_log_max_size(start_cluster): 1000000 + 1000000 10 10 @@ -91,11 +94,23 @@ def test_log_max_size(start_cluster): """, ] ) - node.restart_clickhouse() - for i in range(10): - node.query(f"select {i}") - assert node.query("select count() >= 10 from system.query_log") == "1\n" + node.query(f"TRUNCATE TABLE IF EXISTS system.query_log") + node.restart_clickhouse() + + # all logs records above max_size_rows are lost + # The accepted logs records are never flushed until system flush logs is called by us + for i in range(21): + node.query(f"select {i}") + node.query("system flush logs") + + assert_logs_contain_with_retry( + node, "Queue had been full at 0, accepted 10 logs, ignored 34 logs." + ) + assert node.query( + "select count() >= 10, count() < 20 from system.query_log" + ) == TSV([[1, 1]]) + node.exec_in_container( ["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"] ) diff --git a/tests/integration/test_system_logs_recreate/test.py b/tests/integration/test_system_logs_recreate/test.py index 1bdb1fe3261..1a4ed31278d 100644 --- a/tests/integration/test_system_logs_recreate/test.py +++ b/tests/integration/test_system_logs_recreate/test.py @@ -173,11 +173,20 @@ def test_drop_system_log(): node.query("system flush logs") node.query("select 2") node.query("system flush logs") - assert node.query("select count() > 0 from system.query_log") == "1\n" + assert node.query("select count() >= 2 from system.query_log") == "1\n" + node.query("drop table system.query_log sync") node.query("select 3") node.query("system flush logs") - assert node.query("select count() > 0 from system.query_log") == "1\n" + assert node.query("select count() >= 1 from system.query_log") == "1\n" + + node.query("drop table system.query_log sync") + node.restart_clickhouse() + node.query("system flush logs") + assert ( + node.query("select count() >= 0 from system.query_log") == "1\n" + ) # we check that query_log just exists + node.exec_in_container( ["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"] ) From 00efd5fc0da7124cb1671487250476185101a488 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 31 Jul 2024 22:23:16 +0000 Subject: [PATCH 1830/2361] Automatic style fix --- tests/integration/test_system_logs_recreate/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_system_logs_recreate/test.py b/tests/integration/test_system_logs_recreate/test.py index 1a4ed31278d..d5347ae7dea 100644 --- a/tests/integration/test_system_logs_recreate/test.py +++ b/tests/integration/test_system_logs_recreate/test.py @@ -185,7 +185,7 @@ def test_drop_system_log(): node.query("system flush logs") assert ( node.query("select count() >= 0 from system.query_log") == "1\n" - ) # we check that query_log just exists + ) # we check that query_log just exists node.exec_in_container( ["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"] From e3290c782066aaf7d1891d865547706387bf773c Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 1 Aug 2024 16:58:17 +0200 Subject: [PATCH 1831/2361] rework Context::getSystemLogs, add system logs flush at shutdown --- src/Backups/BackupsWorker.cpp | 4 +- src/Common/SystemLogBase.cpp | 8 +- src/Common/SystemLogBase.h | 8 +- src/Interpreters/Context.cpp | 7 +- src/Interpreters/Context.h | 4 +- src/Interpreters/InterpreterSystemQuery.cpp | 17 +--- src/Interpreters/SystemLog.cpp | 86 ++++++++-------- src/Interpreters/SystemLog.h | 104 +++++++++----------- 8 files changed, 109 insertions(+), 129 deletions(-) diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 363aaae9c8d..106aa89082d 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -1171,7 +1171,7 @@ void BackupsWorker::waitAll() for (const auto & id : current_operations) wait(id, /* rethrow_exception= */ false); - backup_log->flush(backup_log->getLastLogIndex()); + backup_log->flush(backup_log->getLastLogIndex(), /* should_prepare_tables_anyway */ false); LOG_INFO(log, "Backups and restores finished"); } @@ -1223,6 +1223,8 @@ void BackupsWorker::cancelAll(bool wait_) for (const auto & id : current_operations) wait(id, /* rethrow_exception= */ false); + backup_log->flush(backup_log->getLastLogIndex(), /* should_prepare_tables_anyway */ false); + LOG_INFO(log, "Backups and restores finished or stopped"); } diff --git a/src/Common/SystemLogBase.cpp b/src/Common/SystemLogBase.cpp index a35a46c49cc..127c8862a35 100644 --- a/src/Common/SystemLogBase.cpp +++ b/src/Common/SystemLogBase.cpp @@ -249,15 +249,15 @@ SystemLogBase::Index SystemLogBase::getLastLogIndex() } template -void SystemLogBase::notifyFlush(Index expected_flushed_index) +void SystemLogBase::notifyFlush(Index expected_flushed_index, bool should_prepare_tables_anyway) { - queue->notifyFlush(expected_flushed_index, /* should_prepare_tables_anyway */ true); + queue->notifyFlush(expected_flushed_index, should_prepare_tables_anyway); } template -void SystemLogBase::flush(Index expected_flushed_index) +void SystemLogBase::flush(Index expected_flushed_index, bool should_prepare_tables_anyway) { - queue->waitFlush(expected_flushed_index, /* should_prepare_tables_anyway */ true); + queue->waitFlush(expected_flushed_index, should_prepare_tables_anyway); } template diff --git a/src/Common/SystemLogBase.h b/src/Common/SystemLogBase.h index c359287a73f..0d7b04d5c57 100644 --- a/src/Common/SystemLogBase.h +++ b/src/Common/SystemLogBase.h @@ -64,9 +64,9 @@ public: /// Thereby all the records whose index is less than the flashed index are flushed already. virtual Index getLastLogIndex() = 0; /// Call this method to wake up the flush thread and flush the data in the background. It is non blocking call - virtual void notifyFlush(Index expected_flushed_index) = 0; + virtual void notifyFlush(Index expected_flushed_index, bool should_prepare_tables_anyway) = 0; /// Call this method to wait intill the logs are flushed up to expected_flushed_index. It is blocking call. - virtual void flush(Index expected_flushed_index) = 0; + virtual void flush(Index expected_flushed_index, bool should_prepare_tables_anyway) = 0; virtual void prepareTable() = 0; @@ -198,10 +198,10 @@ public: Index getLastLogIndex() override; - void notifyFlush(Index expected_flushed_index) override; + void notifyFlush(Index expected_flushed_index, bool should_prepare_tables_anyway) override; /// Flush data in the buffer to disk. Block the thread until the data is stored on disk. - void flush(Index expected_flushed_index) override; + void flush(Index expected_flushed_index, bool should_prepare_tables_anyway) override; /// Handles crash, flushes log without blocking if notify_flush_on_crash is set void handleCrash() override; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 5413b568068..3051ed3e567 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -99,6 +99,7 @@ #include #include #include +#include #include #include #include @@ -618,7 +619,7 @@ struct ContextSharedPart : boost::noncopyable /** After system_logs have been shut down it is guaranteed that no system table gets created or written to. * Note that part changes at shutdown won't be logged to part log. */ - SHUTDOWN(log, "system logs", system_logs, shutdown()); + SHUTDOWN(log, "system logs", system_logs, flushAndShutdown()); LOG_TRACE(log, "Shutting down database catalog"); DatabaseCatalog::shutdown(); @@ -4312,13 +4313,13 @@ std::shared_ptr Context::getBlobStorageLog() const return shared->system_logs->blob_storage_log; } -std::vector Context::getSystemLogs() const +SystemLogs Context::getSystemLogs() const { SharedLockGuard lock(shared->mutex); if (!shared->system_logs) return {}; - return shared->system_logs->logs; + return *shared->system_logs; } std::optional Context::getDashboards() const diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index d5e35c3e4b3..3da4f124553 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -48,6 +48,8 @@ namespace DB class ASTSelectQuery; +class SystemLogs; + struct ContextSharedPart; class ContextAccess; class ContextAccessWrapper; @@ -1150,7 +1152,7 @@ public: std::shared_ptr getBackupLog() const; std::shared_ptr getBlobStorageLog() const; - std::vector getSystemLogs() const; + SystemLogs getSystemLogs() const; using Dashboards = std::vector>; std::optional getDashboards() const; diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 9b483bac25c..ef6d1040c5e 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -710,21 +710,8 @@ BlockIO InterpreterSystemQuery::execute() case Type::FLUSH_LOGS: { getContext()->checkAccess(AccessType::SYSTEM_FLUSH_LOGS); - - auto logs = getContext()->getSystemLogs(); - - std::vector> commands; - commands.reserve(logs.size()); - for (auto * system_log : logs) - { - auto current_index = system_log->getLastLogIndex(); - /// The data is started to being flushed in the background after notifyFlush call - system_log->notifyFlush(current_index); - commands.emplace_back([system_log, current_index] { system_log->flush(current_index); }); - } - - /// The data is flashing in the background, we need to wait until it is done - executeCommandsAndThrowIfError(commands); + auto system_logs = getContext()->getSystemLogs(); + system_logs.flush(true); break; } case Type::STOP_LISTEN: diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 9d07184a0e5..9b58da3f545 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -50,6 +50,7 @@ #include + namespace DB { @@ -312,56 +313,13 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf azure_queue_log = createSystemLog(global_context, "system", "azure_queue_log", config, "azure_queue_log", "Contains logging entries with the information files processes by S3Queue engine."); blob_storage_log = createSystemLog(global_context, "system", "blob_storage_log", config, "blob_storage_log", "Contains logging entries with information about various blob storage operations such as uploads and deletes."); - if (query_log) - logs.emplace_back(query_log.get()); - if (query_thread_log) - logs.emplace_back(query_thread_log.get()); - if (part_log) - logs.emplace_back(part_log.get()); - if (trace_log) - logs.emplace_back(trace_log.get()); - if (crash_log) - logs.emplace_back(crash_log.get()); - if (text_log) - logs.emplace_back(text_log.get()); - if (metric_log) - logs.emplace_back(metric_log.get()); - if (error_log) - logs.emplace_back(error_log.get()); - if (asynchronous_metric_log) - logs.emplace_back(asynchronous_metric_log.get()); - if (opentelemetry_span_log) - logs.emplace_back(opentelemetry_span_log.get()); - if (query_views_log) - logs.emplace_back(query_views_log.get()); - if (zookeeper_log) - logs.emplace_back(zookeeper_log.get()); if (session_log) - { - logs.emplace_back(session_log.get()); global_context->addWarningMessage("Table system.session_log is enabled. It's unreliable and may contain garbage. Do not use it for any kind of security monitoring."); - } - if (transactions_info_log) - logs.emplace_back(transactions_info_log.get()); - if (processors_profile_log) - logs.emplace_back(processors_profile_log.get()); - if (filesystem_cache_log) - logs.emplace_back(filesystem_cache_log.get()); - if (filesystem_read_prefetches_log) - logs.emplace_back(filesystem_read_prefetches_log.get()); - if (asynchronous_insert_log) - logs.emplace_back(asynchronous_insert_log.get()); - if (backup_log) - logs.emplace_back(backup_log.get()); - if (s3_queue_log) - logs.emplace_back(s3_queue_log.get()); - if (blob_storage_log) - logs.emplace_back(blob_storage_log.get()); bool should_prepare = global_context->getServerSettings().prepare_system_log_tables_on_startup; try { - for (auto & log : logs) + for (auto & log : getAllLogs()) { log->startup(); if (should_prepare) @@ -395,20 +353,56 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf } } - -SystemLogs::~SystemLogs() +std::vector SystemLogs::getAllLogs() const { +/// NOLINTBEGIN(bugprone-macro-parentheses) +#define GET_RAW_POINTERS(log_type, member, descr) \ + member.get(), \ + + std::vector result = { + LIST_OF_ALL_SYSTEM_LOGS(GET_RAW_POINTERS) + }; +#undef GET_RAW_POINTERS +/// NOLINTEND(bugprone-macro-parentheses) + + auto last_it = std::remove(result.begin(), result.end(), nullptr); + result.erase(last_it, result.end()); + + return result; +} + +void SystemLogs::flush(bool should_prepare_tables_anyway) +{ + auto logs = getAllLogs(); + std::vector logs_indexes(logs.size(), 0); + + for (size_t i = 0; i < logs.size(); ++i) + { + auto last_log_index = logs[i]->getLastLogIndex(); + logs_indexes[i] = last_log_index; + logs[i]->notifyFlush(last_log_index, should_prepare_tables_anyway); + } + + for (size_t i = 0; i < logs.size(); ++i) + logs[i]->flush(logs_indexes[i], should_prepare_tables_anyway); +} + +void SystemLogs::flushAndShutdown() +{ + flush(/* should_prepare_tables_anyway */ false); shutdown(); } void SystemLogs::shutdown() { + auto logs = getAllLogs(); for (auto & log : logs) log->shutdown(); } void SystemLogs::handleCrash() { + auto logs = getAllLogs(); for (auto & log : logs) log->handleCrash(); } diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 0ac468b15ec..093be203282 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -5,6 +5,32 @@ #include #include +#include + +#define LIST_OF_ALL_SYSTEM_LOGS(M) \ + M(QueryLog, query_log, "Used to log queries.") \ + M(QueryThreadLog, query_thread_log, "Used to log query threads.") \ + M(PartLog, part_log, "Used to log operations with parts.") \ + M(TraceLog, trace_log, "Used to log traces from query profiler.") \ + M(CrashLog, crash_log, "Used to log server crashes.") \ + M(TextLog, text_log, "Used to log all text messages.") \ + M(MetricLog, metric_log, "Used to log all metrics.") \ + M(ErrorLog, error_log, "Used to log errors.") \ + M(FilesystemCacheLog, filesystem_cache_log, "") \ + M(FilesystemReadPrefetchesLog, filesystem_read_prefetches_log, "") \ + M(ObjectStorageQueueLog, s3_queue_log, "") \ + M(ObjectStorageQueueLog, azure_queue_log, "") \ + M(AsynchronousMetricLog, asynchronous_metric_log, "Metrics from system.asynchronous_metrics") \ + M(OpenTelemetrySpanLog, opentelemetry_span_log, "OpenTelemetry trace spans.") \ + M(QueryViewsLog, query_views_log, "Used to log queries of materialized and live views.") \ + M(ZooKeeperLog, zookeeper_log, "Used to log all actions of ZooKeeper client.") \ + M(SessionLog, session_log, "Login, LogOut and Login failure events.") \ + M(TransactionsInfoLog, transactions_info_log, "Events related to transactions.") \ + M(ProcessorsProfileLog, processors_profile_log, "Used to log processors profiling") \ + M(AsynchronousInsertLog, asynchronous_insert_log, "") \ + M(BackupLog, backup_log, "Backup and restore events") \ + M(BlobStorageLog, blob_storage_log, "Log blob storage operations") \ + namespace DB { @@ -34,71 +60,39 @@ namespace DB }; */ -class QueryLog; -class QueryThreadLog; -class PartLog; -class TextLog; -class TraceLog; -class CrashLog; -class ErrorLog; -class MetricLog; -class AsynchronousMetricLog; -class OpenTelemetrySpanLog; -class QueryViewsLog; -class ZooKeeperLog; -class SessionLog; -class TransactionsInfoLog; -class ProcessorsProfileLog; -class FilesystemCacheLog; -class FilesystemReadPrefetchesLog; -class AsynchronousInsertLog; -class BackupLog; -class ObjectStorageQueueLog; -class BlobStorageLog; +/// NOLINTBEGIN(bugprone-macro-parentheses) +#define FORWARD_DECLARATION(log_type, member, descr) \ + class log_type; \ + +LIST_OF_ALL_SYSTEM_LOGS(FORWARD_DECLARATION) +#undef FORWARD_DECLARATION +/// NOLINTEND(bugprone-macro-parentheses) + /// System logs should be destroyed in destructor of the last Context and before tables, /// because SystemLog destruction makes insert query while flushing data into underlying tables -struct SystemLogs +class SystemLogs { +public: + SystemLogs() = default; SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConfiguration & config); - ~SystemLogs(); + SystemLogs(const SystemLogs & other) = default; + void flush(bool should_prepare_tables_anyway); + void flushAndShutdown(); void shutdown(); void handleCrash(); - std::shared_ptr query_log; /// Used to log queries. - std::shared_ptr query_thread_log; /// Used to log query threads. - std::shared_ptr part_log; /// Used to log operations with parts - std::shared_ptr trace_log; /// Used to log traces from query profiler - std::shared_ptr crash_log; /// Used to log server crashes. - std::shared_ptr text_log; /// Used to log all text messages. - std::shared_ptr metric_log; /// Used to log all metrics. - std::shared_ptr error_log; /// Used to log errors. - std::shared_ptr filesystem_cache_log; - std::shared_ptr filesystem_read_prefetches_log; - std::shared_ptr s3_queue_log; - std::shared_ptr azure_queue_log; - /// Metrics from system.asynchronous_metrics. - std::shared_ptr asynchronous_metric_log; - /// OpenTelemetry trace spans. - std::shared_ptr opentelemetry_span_log; - /// Used to log queries of materialized and live views - std::shared_ptr query_views_log; - /// Used to log all actions of ZooKeeper client - std::shared_ptr zookeeper_log; - /// Login, LogOut and Login failure events - std::shared_ptr session_log; - /// Events related to transactions - std::shared_ptr transactions_info_log; - /// Used to log processors profiling - std::shared_ptr processors_profile_log; - std::shared_ptr asynchronous_insert_log; - /// Backup and restore events - std::shared_ptr backup_log; - /// Log blob storage operations - std::shared_ptr blob_storage_log; +/// NOLINTBEGIN(bugprone-macro-parentheses) +#define PUBLIC_MEMBERS(log_type, member, descr) \ + std::shared_ptr member; \ - std::vector logs; + LIST_OF_ALL_SYSTEM_LOGS(PUBLIC_MEMBERS) +#undef PUBLIC_MEMBERS +/// NOLINTEND(bugprone-macro-parentheses) + +private: + std::vector getAllLogs() const; }; struct SystemLogSettings From 633f700df60b878e59fe17d6204faf4e984b1009 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Fri, 2 Aug 2024 12:14:58 +0200 Subject: [PATCH 1832/2361] adjust tests --- .../test_system_flush_logs/test.py | 63 +++-- .../test_system_logs_recreate/test.py | 225 +++++++++--------- 2 files changed, 146 insertions(+), 142 deletions(-) diff --git a/tests/integration/test_system_flush_logs/test.py b/tests/integration/test_system_flush_logs/test.py index 0399122406a..44269883d1b 100644 --- a/tests/integration/test_system_flush_logs/test.py +++ b/tests/integration/test_system_flush_logs/test.py @@ -12,18 +12,6 @@ node = cluster.add_instance( stay_alive=True, ) -system_logs = [ - # enabled by default - ("system.text_log", 1), - ("system.query_log", 1), - ("system.query_thread_log", 1), - ("system.part_log", 1), - ("system.trace_log", 1), - ("system.metric_log", 1), - ("system.error_log", 1), -] - - @pytest.fixture(scope="module", autouse=True) def start_cluster(): try: @@ -33,22 +21,29 @@ def start_cluster(): cluster.shutdown() -@pytest.fixture(scope="function") -def flush_logs(): +def test_system_logs_exists(): + system_logs = [ + # disabled by default + ("system.text_log", 0), + ("system.query_log", 1), + ("system.query_thread_log", 1), + ("system.part_log", 1), + ("system.trace_log", 1), + ("system.metric_log", 1), + ("system.error_log", 1), + ] + node.query("SYSTEM FLUSH LOGS") - - -@pytest.mark.parametrize("table,exists", system_logs) -def test_system_logs(flush_logs, table, exists): - q = "SELECT * FROM {}".format(table) - if exists: - node.query(q) - else: - response = node.query_and_get_error(q) - assert ( - "Table {} does not exist".format(table) in response - or "Unknown table expression identifier '{}'".format(table) in response - ) + for table, exists in system_logs: + q = "SELECT * FROM {}".format(table) + if exists: + node.query(q) + else: + response = node.query_and_get_error(q) + assert ( + "Table {} does not exist".format(table) in response + or "Unknown table expression identifier '{}'".format(table) in response + ) # Logic is tricky, let's check that there is no hang in case of message queue @@ -67,11 +62,14 @@ def test_system_logs_non_empty_queue(): def test_system_suspend(): - node.query("CREATE TABLE t (x DateTime) ENGINE=Memory;") - node.query("INSERT INTO t VALUES (now());") - node.query("SYSTEM SUSPEND FOR 1 SECOND;") - node.query("INSERT INTO t VALUES (now());") - assert "1\n" == node.query("SELECT max(x) - min(x) >= 1 FROM t;") + try: + node.query("CREATE TABLE t (x DateTime) ENGINE=Memory;") + node.query("INSERT INTO t VALUES (now());") + node.query("SYSTEM SUSPEND FOR 1 SECOND;") + node.query("INSERT INTO t VALUES (now());") + assert "1\n" == node.query("SELECT max(x) - min(x) >= 1 FROM t;") + finally: + node.query("DROP TABLE IF EXISTS t;") def test_log_max_size(start_cluster): @@ -95,6 +93,7 @@ def test_log_max_size(start_cluster): ] ) + node.query("SYSTEM FLUSH LOGS") node.query(f"TRUNCATE TABLE IF EXISTS system.query_log") node.restart_clickhouse() diff --git a/tests/integration/test_system_logs_recreate/test.py b/tests/integration/test_system_logs_recreate/test.py index d5347ae7dea..c6d5861904c 100644 --- a/tests/integration/test_system_logs_recreate/test.py +++ b/tests/integration/test_system_logs_recreate/test.py @@ -33,124 +33,129 @@ def test_system_logs_recreate(): "error_log", ] - node.query("SYSTEM FLUSH LOGS") - for table in system_logs: - assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") - assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + try: + node.query("SYSTEM FLUSH LOGS") + for table in system_logs: + assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") + assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 1 ) - == 1 - ) - # NOTE: we use zzz- prefix to make it the last file, - # so that it will be applied last. - for table in system_logs: - node.exec_in_container( - [ - "bash", - "-c", - f"""echo " - - <{table}> - ENGINE = Null - - - - " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml - """, - ] - ) - - node.restart_clickhouse() - node.query("SYSTEM FLUSH LOGS") - for table in system_logs: - assert "ENGINE = MergeTree" not in node.query( - f"SHOW CREATE TABLE system.{table}" - ) - assert "ENGINE = Null" in node.query(f"SHOW CREATE TABLE system.{table}") - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + # NOTE: we use zzz- prefix to make it the last file, + # so that it will be applied last. + for table in system_logs: + node.exec_in_container( + [ + "bash", + "-c", + f"""echo " + + <{table}> + ENGINE = Null + + + + " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml + """, + ] ) - == 2 - ) - # apply only storage_policy for all system tables - for table in system_logs: - node.exec_in_container( - [ - "bash", - "-c", - f"""echo " - - <{table}> - system_tables - - - " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml - """, - ] - ) - node.restart_clickhouse() - node.query("SYSTEM FLUSH LOGS") - import logging - - for table in system_logs: - create_table_sql = node.query(f"SHOW CREATE TABLE system.{table} FORMAT TSVRaw") - logging.debug( - "With storage policy, SHOW CREATE TABLE system.%s is: %s", - table, - create_table_sql, - ) - assert "ENGINE = MergeTree" in create_table_sql - assert "ENGINE = Null" not in create_table_sql - assert "SETTINGS storage_policy = 'system_tables'" in create_table_sql - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + node.restart_clickhouse() + node.query("SYSTEM FLUSH LOGS") + for table in system_logs: + assert "ENGINE = MergeTree" not in node.query( + f"SHOW CREATE TABLE system.{table}" ) - == 3 - ) - - for table in system_logs: - node.exec_in_container( - ["rm", f"/etc/clickhouse-server/config.d/zzz-override-{table}.xml"] - ) - - node.restart_clickhouse() - node.query("SYSTEM FLUSH LOGS") - for table in system_logs: - assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") - assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + assert "ENGINE = Null" in node.query(f"SHOW CREATE TABLE system.{table}") + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 2 ) - == 4 - ) - node.query("SYSTEM FLUSH LOGS") - # Ensure that there was no superfluous RENAME's - # IOW that the table created only when the structure is indeed different. - for table in system_logs: - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + # apply only storage_policy for all system tables + for table in system_logs: + node.exec_in_container( + [ + "bash", + "-c", + f"""echo " + + <{table}> + system_tables + + + " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml + """, + ] ) - == 4 - ) + node.restart_clickhouse() + node.query("SYSTEM FLUSH LOGS") + import logging + + for table in system_logs: + create_table_sql = node.query(f"SHOW CREATE TABLE system.{table} FORMAT TSVRaw") + logging.debug( + "With storage policy, SHOW CREATE TABLE system.%s is: %s", + table, + create_table_sql, + ) + assert "ENGINE = MergeTree" in create_table_sql + assert "ENGINE = Null" not in create_table_sql + assert "SETTINGS storage_policy = 'system_tables'" in create_table_sql + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 3 + ) + + for table in system_logs: + node.exec_in_container( + ["rm", f"/etc/clickhouse-server/config.d/zzz-override-{table}.xml"] + ) + + node.restart_clickhouse() + node.query("SYSTEM FLUSH LOGS") + for table in system_logs: + assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") + assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 4 + ) + + node.query("SYSTEM FLUSH LOGS") + # Ensure that there was no superfluous RENAME's + # IOW that the table created only when the structure is indeed different. + for table in system_logs: + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 4 + ) + finally: + for table in system_logs: + for syffix in range(3): + node.query(f"DROP TABLE IF EXISTS system.{table}_{syffix} sync") def test_drop_system_log(): From 08f8d94856841254957e1e746685d32de2074dc6 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Fri, 2 Aug 2024 12:16:44 +0200 Subject: [PATCH 1833/2361] no flush backup logs at shutdown, flush all logs --- src/Backups/BackupsWorker.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 106aa89082d..0b93ae6d547 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -1171,8 +1171,6 @@ void BackupsWorker::waitAll() for (const auto & id : current_operations) wait(id, /* rethrow_exception= */ false); - backup_log->flush(backup_log->getLastLogIndex(), /* should_prepare_tables_anyway */ false); - LOG_INFO(log, "Backups and restores finished"); } @@ -1223,8 +1221,6 @@ void BackupsWorker::cancelAll(bool wait_) for (const auto & id : current_operations) wait(id, /* rethrow_exception= */ false); - backup_log->flush(backup_log->getLastLogIndex(), /* should_prepare_tables_anyway */ false); - LOG_INFO(log, "Backups and restores finished or stopped"); } From 3cd3b2857c8104933eda67f901d8b098082c1049 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 12:33:04 +0200 Subject: [PATCH 1834/2361] 00965_shard_unresolvable_addresses is still slow --- .../queries/0_stateless/00965_shard_unresolvable_addresses.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql index 41bf4d261f6..f2afb974a06 100644 --- a/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql +++ b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql @@ -1,4 +1,5 @@ --- Tags: shard +-- Tags: shard, no-fasttest +-- no-fasttest: Slow timeouts SET prefer_localhost_replica = 1; SET connections_with_failover_max_tries=1; From e8bf5129c03dd88712829e9e187145e248ba4f04 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Wed, 7 Aug 2024 12:44:31 +0200 Subject: [PATCH 1835/2361] fix docks for clickhouse-keeper-client starting 24.7 paths are not accepted as bare strings, only as string literals - https://github.com/ClickHouse/ClickHouse/pull/65494 --- .../utilities/clickhouse-keeper-client.md | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/en/operations/utilities/clickhouse-keeper-client.md b/docs/en/operations/utilities/clickhouse-keeper-client.md index 6407c66783b..a66ecbc1372 100644 --- a/docs/en/operations/utilities/clickhouse-keeper-client.md +++ b/docs/en/operations/utilities/clickhouse-keeper-client.md @@ -28,39 +28,39 @@ A client application to interact with clickhouse-keeper by its native protocol. Connected to ZooKeeper at [::1]:9181 with session_id 137 / :) ls keeper foo bar -/ :) cd keeper +/ :) cd 'keeper' /keeper :) ls api_version -/keeper :) cd api_version +/keeper :) cd 'api_version' /keeper/api_version :) ls -/keeper/api_version :) cd xyz +/keeper/api_version :) cd 'xyz' Path /keeper/api_version/xyz does not exist /keeper/api_version :) cd ../../ / :) ls keeper foo bar -/ :) get keeper/api_version +/ :) get 'keeper/api_version' 2 ``` ## Commands {#clickhouse-keeper-client-commands} -- `ls [path]` -- Lists the nodes for the given path (default: cwd) -- `cd [path]` -- Changes the working path (default `.`) -- `exists ` -- Returns `1` if node exists, `0` otherwise -- `set [version]` -- Updates the node's value. Only updates if version matches (default: -1) -- `create [mode]` -- Creates new node with the set value -- `touch ` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists -- `get ` -- Returns the node's value -- `rm [version]` -- Removes the node only if version matches (default: -1) -- `rmr ` -- Recursively deletes path. Confirmation required +- `ls '[path]'` -- Lists the nodes for the given path (default: cwd) +- `cd '[path]'` -- Changes the working path (default `.`) +- `exists ''` -- Returns `1` if node exists, `0` otherwise +- `set '' [version]` -- Updates the node's value. Only updates if version matches (default: -1) +- `create '' [mode]` -- Creates new node with the set value +- `touch ''` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists +- `get ''` -- Returns the node's value +- `rm '' [version]` -- Removes the node only if version matches (default: -1) +- `rmr ''` -- Recursively deletes path. Confirmation required - `flwc ` -- Executes four-letter-word command - `help` -- Prints this message -- `get_direct_children_number [path]` -- Get numbers of direct children nodes under a specific path -- `get_all_children_number [path]` -- Get all numbers of children nodes under a specific path -- `get_stat [path]` -- Returns the node's stat (default `.`) -- `find_super_nodes [path]` -- Finds nodes with number of children larger than some threshold for the given path (default `.`) +- `get_direct_children_number '[path]'` -- Get numbers of direct children nodes under a specific path +- `get_all_children_number '[path]'` -- Get all numbers of children nodes under a specific path +- `get_stat '[path]'` -- Returns the node's stat (default `.`) +- `find_super_nodes '[path]'` -- Finds nodes with number of children larger than some threshold for the given path (default `.`) - `delete_stale_backups` -- Deletes ClickHouse nodes used for backups that are now inactive - `find_big_family [path] [n]` -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10) -- `sync ` -- Synchronizes node between processes and leader +- `sync ''` -- Synchronizes node between processes and leader - `reconfig "" [version]` -- Reconfigure Keeper cluster. See https://clickhouse.com/docs/en/guides/sre/keeper/clickhouse-keeper#reconfiguration From e2e9ae776d0e0795a565fa7ed6a29671fcda377e Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 7 Aug 2024 09:03:29 +0000 Subject: [PATCH 1836/2361] Follow-up to #63898, pt. II --- .../0_stateless/00366_multi_statements.sh | 20 +++--- .../00443_preferred_block_size_bytes.sh | 4 +- ...ess_to_temporary_table_in_readonly_mode.sh | 20 +++--- ...4_performance_introspection_and_logging.sh | 4 +- ..._fetch_merged_or_mutated_part_zookeeper.sh | 6 +- ...d_optimize_skip_select_on_unused_shards.sh | 30 ++++----- ...p_select_on_unused_shards_with_prewhere.sh | 26 ++++---- ...ated_minimalistic_part_header_zookeeper.sh | 4 +- .../queries/0_stateless/00837_minmax_index.sh | 2 +- .../queries/0_stateless/00838_unique_index.sh | 2 +- .../0_stateless/00907_set_index_max_rows.sh | 2 +- .../0_stateless/00908_bloom_filter_index.sh | 10 +-- .../queries/0_stateless/00942_mutate_index.sh | 2 +- .../0_stateless/00943_materialize_index.sh | 4 +- .../00944_clear_index_in_partition.sh | 2 +- .../00964_bloom_index_string_functions.sh | 2 +- .../00965_set_index_string_functions.sh | 2 +- .../00974_primary_key_for_lowCardinality.sh | 4 +- tests/queries/0_stateless/00990_hasToken.sh | 2 +- .../01013_sync_replica_timeout_zookeeper.sh | 6 +- ...th_nondeterministic_functions_zookeeper.sh | 4 +- .../01037_polygon_dicts_correctness_all.sh | 6 +- .../01037_polygon_dicts_correctness_fast.sh | 6 +- .../01037_polygon_dicts_simple_functions.sh | 6 +- .../01055_minmax_index_compact_parts.sh | 2 +- .../01077_mutations_index_consistency.sh | 10 +-- .../01187_set_profile_as_setting.sh | 8 +-- ..._block_size_rows_for_materialized_views.sh | 2 +- .../01307_multiple_leaders_zookeeper.sh | 10 +-- .../0_stateless/01415_sticking_mutations.sh | 2 +- tests/queries/0_stateless/01451_dist_logs.sh | 2 +- .../01459_manual_write_to_replicas.sh | 4 +- .../01459_manual_write_to_replicas_quorum.sh | 4 +- ..._write_to_replicas_quorum_detach_attach.sh | 4 +- ...house_server_start_with_embedded_config.sh | 2 +- .../0_stateless/01508_format_regexp_raw.sh | 4 +- .../01509_dictionary_preallocate.sh | 2 +- ...01510_format_regexp_raw_low_cardinality.sh | 4 +- .../0_stateless/01526_initial_query_id.sh | 2 +- .../01599_mutation_query_params.sh | 2 +- .../01600_quota_by_forwarded_ip.sh | 4 +- .../01684_ssd_cache_dictionary_simple_key.sh | 2 +- .../01685_ssd_cache_dictionary_complex_key.sh | 2 +- .../01691_parser_data_type_exponential.sh | 2 +- ...ojections_optimize_aggregation_in_order.sh | 2 +- ...s_partial_optimize_aggregation_in_order.sh | 2 +- .../01753_optimize_aggregation_in_order.sh | 2 +- .../01758_optimize_skip_unused_shards_once.sh | 2 +- ...91_dist_INSERT_block_structure_mismatch.sh | 2 +- .../01814_distributed_push_down_limit.sh | 6 +- .../01853_dictionary_cache_duplicates.sh | 8 +-- .../01872_initial_query_start_time.sh | 2 +- ...75_ssd_cache_dictionary_decimal256_type.sh | 2 +- .../01890_materialized_distributed_join.sh | 2 +- .../01903_ssd_cache_dictionary_array_type.sh | 2 +- ..._cache_dictionary_default_nullable_type.sh | 2 +- ...1927_query_views_log_matview_exceptions.sh | 4 +- .../0_stateless/01947_multiple_pipe_read.sh | 4 +- .../02003_memory_limit_in_client.sh | 32 ++++----- .../02021_create_database_with_comment.sh | 2 +- .../02050_client_profile_events.sh | 8 +-- .../02221_parallel_replicas_bug.sh | 2 +- .../02221_system_zookeeper_unrestricted.sh | 4 +- ...2221_system_zookeeper_unrestricted_like.sh | 4 +- ...parallel_distributed_insert_select_view.sh | 6 +- ...arallel_reading_from_replicas_benchmark.sh | 4 +- .../02232_allow_only_replicated_engine.sh | 6 +- .../0_stateless/02250_ON_CLUSTER_grant.sh | 4 +- tests/queries/0_stateless/02262_column_ttl.sh | 4 +- .../0_stateless/02286_parallel_final.sh | 4 +- ..._distinct_in_order_optimization_explain.sh | 66 +++++++++---------- ..._column_ttl_expired_column_optimization.sh | 2 +- .../0_stateless/02361_fsync_profile_events.sh | 4 +- ...7_extend_protocol_with_query_parameters.sh | 10 +-- ...ting_by_input_stream_properties_explain.sh | 2 +- .../0_stateless/02417_load_marks_async.sh | 2 +- 76 files changed, 229 insertions(+), 229 deletions(-) diff --git a/tests/queries/0_stateless/00366_multi_statements.sh b/tests/queries/0_stateless/00366_multi_statements.sh index 0b2e80fe457..8546e547581 100755 --- a/tests/queries/0_stateless/00366_multi_statements.sh +++ b/tests/queries/0_stateless/00366_multi_statements.sh @@ -14,22 +14,22 @@ $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2" -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2;" -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;" +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;" $CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" $CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES (1),(2),(3);" -$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" -$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366" +$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);" +$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" +$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" +$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "SELECT 1" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "SELECT 1;" @@ -48,4 +48,4 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=INSERT+INTO+t_00366+VALUES" -d "(7),(8),(9)" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="DROP TABLE t_00366;" +$CLICKHOUSE_CLIENT --query="DROP TABLE t_00366;" diff --git a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh index 27b9f5c00c7..0635fbc2a57 100755 --- a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh +++ b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh @@ -43,10 +43,10 @@ popd > /dev/null #SCRIPTDIR=`dirname "$SCRIPTPATH"` SCRIPTDIR=$SCRIPTPATH -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED rm "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout diff --git a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh index 560b97a1d1b..5550fa69d3d 100755 --- a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh +++ b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS test_readonly; CREATE TABLE test_readonly ( ID Int @@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT -n --query=" ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; CREATE TEMPORARY TABLE readonly ( ID Int @@ -26,7 +26,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -34,7 +34,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; DROP TABLE test_readonly; " 2> /dev/null; @@ -46,7 +46,7 @@ CODE=$?; ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; CREATE TEMPORARY TABLE readonly ( ID Int @@ -58,7 +58,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -66,7 +66,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; DROP TABLE test_readonly; " 2> /dev/null; @@ -78,7 +78,7 @@ CODE=$?; ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; CREATE TEMPORARY TABLE readonly ( ID Int @@ -90,7 +90,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -98,7 +98,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; DROP TABLE test_readonly; " 2> /dev/null; diff --git a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index 93fd0c4a977..e9a4369a5bf 100755 --- a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh +++ b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -19,13 +19,13 @@ settings="$server_logs --log_queries=1 --log_query_threads=1 --log_profile_event # Test insert logging on each block and checkPacket() method -$CLICKHOUSE_CLIENT $settings -n -q " +$CLICKHOUSE_CLIENT $settings -q " DROP TABLE IF EXISTS null_00634; CREATE TABLE null_00634 (i UInt8) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple();" head -c 1000 /dev/zero | $CLICKHOUSE_CLIENT $settings --max_insert_block_size=10 --min_insert_block_size_rows=1 --min_insert_block_size_bytes=1 -q "INSERT INTO null_00634 FORMAT RowBinary" -$CLICKHOUSE_CLIENT $settings -n -q " +$CLICKHOUSE_CLIENT $settings -q " SELECT count() FROM null_00634; DROP TABLE null_00634;" diff --git a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh index 96d5764780f..d69e14bdbb9 100755 --- a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh +++ b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/mergetree_mutations.lib -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS fetches_r1 SYNC; DROP TABLE IF EXISTS fetches_r2 SYNC" @@ -17,7 +17,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE fetches_r2(x UInt32) ENGINE Replicate SETTINGS prefer_fetch_merged_part_time_threshold=0, \ prefer_fetch_merged_part_size_threshold=0" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET insert_keeper_fault_injection_probability=0; INSERT INTO fetches_r1 VALUES (1); INSERT INTO fetches_r1 VALUES (2); @@ -51,6 +51,6 @@ ${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA fetches_r2" ${CLICKHOUSE_CLIENT} --query="SELECT '*** Check data after fetch/clone of mutated part ***'" ${CLICKHOUSE_CLIENT} --query="SELECT _part, * FROM fetches_r2 ORDER BY x" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE fetches_r1 SYNC; DROP TABLE fetches_r2 SYNC" diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh index 09f20284402..989096a26d6 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh @@ -25,83 +25,83 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed WHERE a = 0 AND b | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Should pass now -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0; " # Should still fail because of matching unavailable shard -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Try more complext expressions for constant folding - all should pass. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 1 AND a = 0 AND b = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a IN (0, 1) AND b IN (0, 1); " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR a = 1 AND b = 1; " # TODO: should pass one day. -#${CLICKHOUSE_CLIENT} -n --query=" +#${CLICKHOUSE_CLIENT} --query=" # SET optimize_skip_unused_shards = 1; # SELECT count(*) FROM distributed WHERE a = 0 AND b >= 0 AND b <= 1; #" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND c = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND c != 10; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND (a+b)*b != 12; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE (a = 0 OR a = 1) AND (b = 0 OR b = 1); " # These ones should fail. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b <= 1; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND c = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 OR a = 1 AND b = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR c = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh index 035907bddd7..b3dff2ea69a 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh @@ -30,73 +30,73 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed_00754 PREWHERE a | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Should pass now -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0; " # Should still fail because of matching unavailable shard -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Try more complex expressions for constant folding - all should pass. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 1 AND a = 0 WHERE b = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 1 WHERE b = 1 AND length(c) = 5; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a IN (0, 1) AND b IN (0, 1) WHERE c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a IN (0, 1) WHERE b IN (0, 1) AND c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR a = 1 AND b = 1 WHERE c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE (a = 0 OR a = 1) WHERE (b = 0 OR b = 1); " # These should fail. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b <= 1; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 WHERE c LIKE '%l%'; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 OR a = 1 AND b = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR c LIKE '%l%'; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh index 12d889a7137..8f7a1a9ae98 100755 --- a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh +++ b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS part_header_r1; DROP TABLE IF EXISTS part_header_r2; @@ -62,7 +62,7 @@ do [[ $count1 == 1 && $count2 == 1 ]] && break done -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SELECT '*** Test part removal ***'; SELECT '*** replica 1 ***'; diff --git a/tests/queries/0_stateless/00837_minmax_index.sh b/tests/queries/0_stateless/00837_minmax_index.sh index e4de0b9ebfc..ff487f50ee0 100755 --- a/tests/queries/0_stateless/00837_minmax_index.sh +++ b/tests/queries/0_stateless/00837_minmax_index.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00838_unique_index.sh b/tests/queries/0_stateless/00838_unique_index.sh index b267b6a8eb3..a3aba4f26b6 100755 --- a/tests/queries/0_stateless/00838_unique_index.sh +++ b/tests/queries/0_stateless/00838_unique_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00907_set_index_max_rows.sh b/tests/queries/0_stateless/00907_set_index_max_rows.sh index 3707aaf2ca6..bdd0f36346f 100755 --- a/tests/queries/0_stateless/00907_set_index_max_rows.sh +++ b/tests/queries/0_stateless/00907_set_index_max_rows.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh index 25a6567b894..3bd169dd6df 100755 --- a/tests/queries/0_stateless/00908_bloom_filter_index.sh +++ b/tests/queries/0_stateless/00908_bloom_filter_index.sh @@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx3;" # NGRAM BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx ( k UInt64, @@ -22,7 +22,7 @@ CREATE TABLE bloom_filter_idx ORDER BY k SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi';" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx2 ( k UInt64, @@ -109,7 +109,7 @@ $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT count() FROM bloom # TOKEN BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx3 ( k UInt64, @@ -147,7 +147,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE bloom_filter_idx2" $CLICKHOUSE_CLIENT --query="DROP TABLE bloom_filter_idx3" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx_na;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx_na ( na Array(Array(String)), @@ -156,7 +156,7 @@ CREATE TABLE bloom_filter_idx_na ORDER BY na" 2>&1 | grep -c 'DB::Exception: Unexpected type Array(Array(String)) of bloom filter index' # NGRAM BF with IPv6 -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_ipv6_idx ( foo IPv6, diff --git a/tests/queries/0_stateless/00942_mutate_index.sh b/tests/queries/0_stateless/00942_mutate_index.sh index 6ebb30c25b9..e1e23639e85 100755 --- a/tests/queries/0_stateless/00942_mutate_index.sh +++ b/tests/queries/0_stateless/00942_mutate_index.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00943_materialize_index.sh b/tests/queries/0_stateless/00943_materialize_index.sh index 6ff7d34a9d7..e4a585fce97 100755 --- a/tests/queries/0_stateless/00943_materialize_index.sh +++ b/tests/queries/0_stateless/00943_materialize_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, @@ -34,7 +34,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO minmax_idx VALUES $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0 FORMAT JSON" | grep "rows_read" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" ALTER TABLE minmax_idx ADD INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1 SETTINGS mutations_sync = 2;" $CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx IN PARTITION 1 SETTINGS mutations_sync = 2;" diff --git a/tests/queries/0_stateless/00944_clear_index_in_partition.sh b/tests/queries/0_stateless/00944_clear_index_in_partition.sh index 4655077960f..a12536da239 100755 --- a/tests/queries/0_stateless/00944_clear_index_in_partition.sh +++ b/tests/queries/0_stateless/00944_clear_index_in_partition.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh index e2ec7fd42e4..9e410f09b13 100755 --- a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh +++ b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx;" # NGRAM BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx ( k UInt64, diff --git a/tests/queries/0_stateless/00965_set_index_string_functions.sh b/tests/queries/0_stateless/00965_set_index_string_functions.sh index 8892fb11752..0f29c3dd2f2 100755 --- a/tests/queries/0_stateless/00965_set_index_string_functions.sh +++ b/tests/queries/0_stateless/00965_set_index_string_functions.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( k UInt64, diff --git a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh index 389d433c7e2..ba260042f47 100755 --- a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh +++ b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS lowString;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS string;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" create table lowString ( a LowCardinality(String), @@ -18,7 +18,7 @@ ENGINE = MergeTree() PARTITION BY toYYYYMM(b) ORDER BY (a)" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" create table string ( a String, diff --git a/tests/queries/0_stateless/00990_hasToken.sh b/tests/queries/0_stateless/00990_hasToken.sh index 6a1d4ff5ccf..d79472aa5a5 100755 --- a/tests/queries/0_stateless/00990_hasToken.sh +++ b/tests/queries/0_stateless/00990_hasToken.sh @@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # We should have correct env vars from shell_config.sh to run this test -python3 "$CURDIR"/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -nm +python3 "$CURDIR"/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -m diff --git a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh index 55bbfb3ff11..54f1bbe29dc 100755 --- a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh +++ b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) R1=table_1013_1 R2=table_1013_2 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; @@ -19,13 +19,13 @@ ${CLICKHOUSE_CLIENT} -n -q " INSERT INTO $R1 VALUES (1) " -timeout 10s ${CLICKHOUSE_CLIENT} -n -q " +timeout 10s ${CLICKHOUSE_CLIENT} -q " SET receive_timeout=1; SYSTEM SYNC REPLICA $R2 " 2>&1 | grep -F -q "Code: 159. DB::Exception" && echo 'OK' || echo 'Failed!' # By dropping tables all related SYNC REPLICA queries would be terminated as well -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; " diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index 4f35b69da0b..053fd9d9d49 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -9,7 +9,7 @@ R1=table_1017_1 R2=table_1017_2 T1=table_1017_merge -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; @@ -68,7 +68,7 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" --allow_nondeterministic_mutations=1 2>&1 \ && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh index 9a26f78a8ee..5c67fe08fbf 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh @@ -14,7 +14,7 @@ declare -a SearchTypes=("POLYGON" "POLYGON_SIMPLE" "POLYGON_INDEX_EACH" "POLYGON tar -xf "${CURDIR}"/01037_test_data_search.tar.gz -C "${DATA_DIR}" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS points; CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " @@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --max_insert_block_si rm "${DATA_DIR}"/01037_point_data -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array @@ -43,7 +43,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh index 47f7a5c1c4f..591978d1129 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh @@ -14,7 +14,7 @@ declare -a SearchTypes=("POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") tar -xf "${CURDIR}"/01037_test_data_perf.tar.gz -C "${DATA_DIR}" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " @@ -22,7 +22,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --min_chunk_bytes_for rm "${DATA_DIR}"/01037_point_data -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array @@ -42,7 +42,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array diff --git a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh index d1ee3f283bc..ac033ff4eb8 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) TMP_DIR=${CLICKHOUSE_TMP}/tmp mkdir -p $TMP_DIR -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array (key Array(Array(Array(Array(Float64)))), name String, value UInt64) ENGINE = Memory; @@ -53,7 +53,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array ( @@ -106,7 +106,7 @@ do diff -q "${CURDIR}/01037_polygon_dicts_simple_functions.ans" "$outputFile" done -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP DICTIONARY dict_array; DROP DICTIONARY dict_tuple; DROP TABLE polygons_array; diff --git a/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh index 0b14ef8f6fa..29ce4da02ed 100755 --- a/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh +++ b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/01077_mutations_index_consistency.sh b/tests/queries/0_stateless/01077_mutations_index_consistency.sh index ffbe3692b64..f103692de56 100755 --- a/tests/queries/0_stateless/01077_mutations_index_consistency.sh +++ b/tests/queries/0_stateless/01077_mutations_index_consistency.sh @@ -7,13 +7,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS movement" -$CLICKHOUSE_CLIENT -n --query "CREATE TABLE movement (date DateTime('Asia/Istanbul')) Engine = MergeTree ORDER BY (toStartOfHour(date)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';" +$CLICKHOUSE_CLIENT --query "CREATE TABLE movement (date DateTime('Asia/Istanbul')) Engine = MergeTree ORDER BY (toStartOfHour(date)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';" $CLICKHOUSE_CLIENT --query "insert into movement select toDateTime('2020-01-22 00:00:00', 'Asia/Istanbul') + number%(23*3600) from numbers(1000000);" $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE movement FINAL" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -26,7 +26,7 @@ ORDER BY Hour DESC $CLICKHOUSE_CLIENT --query "alter table movement delete where date >= toDateTime('2020-01-22T16:00:00', 'Asia/Istanbul') and date < toDateTime('2020-01-22T17:00:00', 'Asia/Istanbul') SETTINGS mutations_sync = 2" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -37,7 +37,7 @@ ORDER BY Hour DESC " | grep "16:00:00" | wc -l -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -48,7 +48,7 @@ ORDER BY Hour DESC " | grep "22:00:00" | cut -f1 -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour diff --git a/tests/queries/0_stateless/01187_set_profile_as_setting.sh b/tests/queries/0_stateless/01187_set_profile_as_setting.sh index 42f596c45d6..f6c6fd0be34 100755 --- a/tests/queries/0_stateless/01187_set_profile_as_setting.sh +++ b/tests/queries/0_stateless/01187_set_profile_as_setting.sh @@ -7,11 +7,11 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -m -q "select value, changed from system.settings where name='readonly';" -$CLICKHOUSE_CLIENT -n -m -q "set profile='default'; select value, changed from system.settings where name='readonly';" -$CLICKHOUSE_CLIENT -n -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" 2>&1| grep -Fa "Cannot modify 'send_logs_level' setting in readonly mode" > /dev/null && echo "OK" +$CLICKHOUSE_CLIENT -m -q "select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='default'; select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" 2>&1| grep -Fa "Cannot modify 'send_logs_level' setting in readonly mode" > /dev/null && echo "OK" CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=fatal/g') -$CLICKHOUSE_CLIENT -n -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=select+value,changed+from+system.settings+where+name='readonly'" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&profile=default&query=select+value,changed+from+system.settings+where+name='readonly'" diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index 5f82731c54e..1ec53399958 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -10,7 +10,7 @@ set -o pipefail # shellcheck disable=SC2120 function execute() { - ${CLICKHOUSE_CLIENT} -n "$@" + ${CLICKHOUSE_CLIENT} "$@" } # diff --git a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh index db986e74b6b..02aa0f76be5 100755 --- a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh +++ b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh @@ -12,8 +12,8 @@ DATA_SIZE=200 SEQ=$(seq 0 $(($NUM_REPLICAS - 1))) -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE IF EXISTS r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done function thread() { @@ -30,6 +30,6 @@ done wait -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "SYSTEM SYNC REPLICA r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "SELECT count(), sum(x) FROM r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "SELECT count(), sum(x) FROM r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "DROP TABLE r$REPLICA"; done diff --git a/tests/queries/0_stateless/01415_sticking_mutations.sh b/tests/queries/0_stateless/01415_sticking_mutations.sh index b7c8768a65d..97467c3ce9d 100755 --- a/tests/queries/0_stateless/01415_sticking_mutations.sh +++ b/tests/queries/0_stateless/01415_sticking_mutations.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS sticking_mutations" function check_sticky_mutations() { - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE sticking_mutations ( + $CLICKHOUSE_CLIENT --query "CREATE TABLE sticking_mutations ( date Date, key UInt64, value1 String, diff --git a/tests/queries/0_stateless/01451_dist_logs.sh b/tests/queries/0_stateless/01451_dist_logs.sh index 23dee7a827d..e281e232bb5 100755 --- a/tests/queries/0_stateless/01451_dist_logs.sh +++ b/tests/queries/0_stateless/01451_dist_logs.sh @@ -10,4 +10,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # triggered not for the first query for _ in {1..20}; do echo "select * from remote('127.{2,3}', system.numbers) where number = 10 limit 1;" -done | ${CLICKHOUSE_CLIENT} -n 2>/dev/null +done | ${CLICKHOUSE_CLIENT} 2>/dev/null diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh index 56620d848a3..cc574557438 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -31,7 +31,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh index 91a73471557..24ea3ba3835 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh @@ -16,7 +16,7 @@ unset CLICKHOUSE_WRITE_COVERAGE NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -39,7 +39,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh index 1f76a2efc6b..a2ef0d52328 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh @@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=6 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -36,7 +36,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh index 29593ea4fb5..6954fef7314 100755 --- a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh +++ b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh @@ -34,7 +34,7 @@ done # Check access rights -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP DATABASE IF EXISTS test; CREATE DATABASE test; USE test; diff --git a/tests/queries/0_stateless/01508_format_regexp_raw.sh b/tests/queries/0_stateless/01508_format_regexp_raw.sh index 8cf1bd73566..52613c28b2f 100755 --- a/tests/queries/0_stateless/01508_format_regexp_raw.sh +++ b/tests/queries/0_stateless/01508_format_regexp_raw.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t; CREATE TABLE t (a String, b String) ENGINE = Memory; " @@ -12,7 +12,7 @@ CREATE TABLE t (a String, b String) ENGINE = Memory; ${CLICKHOUSE_CLIENT} --format_regexp_escaping_rule 'Raw' --format_regexp '^(.+?) separator (.+?)$' --query ' INSERT INTO t FORMAT Regexp abc\ separator Hello, world!' -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t; DROP TABLE t; " diff --git a/tests/queries/0_stateless/01509_dictionary_preallocate.sh b/tests/queries/0_stateless/01509_dictionary_preallocate.sh index 2a22a307a08..0459f69b0ad 100755 --- a/tests/queries/0_stateless/01509_dictionary_preallocate.sh +++ b/tests/queries/0_stateless/01509_dictionary_preallocate.sh @@ -15,7 +15,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # PREALLOCATE attribute (and also for the history/greppability, that it was # such). -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS data_01509; DROP DICTIONARY IF EXISTS dict_01509; CREATE TABLE data_01509 diff --git a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh index 594caca7d04..dc178d081bf 100755 --- a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh +++ b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t; CREATE TABLE t (a String, b LowCardinality(Nullable(String))) ENGINE = Memory; " @@ -12,7 +12,7 @@ CREATE TABLE t (a String, b LowCardinality(Nullable(String))) ENGINE = Memory; ${CLICKHOUSE_CLIENT} --format_regexp_escaping_rule 'Raw' --format_regexp '^(.+?) separator (.+?)$' --query ' INSERT INTO t FORMAT Regexp abc\ separator Hello, world!' -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t; DROP TABLE t; " diff --git a/tests/queries/0_stateless/01526_initial_query_id.sh b/tests/queries/0_stateless/01526_initial_query_id.sh index e77764ee34e..8ba27a04d60 100755 --- a/tests/queries/0_stateless/01526_initial_query_id.sh +++ b/tests/queries/0_stateless/01526_initial_query_id.sh @@ -15,7 +15,7 @@ ${CLICKHOUSE_CURL} \ --get \ --data-urlencode "query=select 1 format Null" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " system flush logs; select interface, initial_query_id = query_id from system.query_log diff --git a/tests/queries/0_stateless/01599_mutation_query_params.sh b/tests/queries/0_stateless/01599_mutation_query_params.sh index 52b0131a9c2..5b604c96028 100755 --- a/tests/queries/0_stateless/01599_mutation_query_params.sh +++ b/tests/queries/0_stateless/01599_mutation_query_params.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS test; CREATE TABLE test diff --git a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh index 1d768c8b027..834eba8f25c 100755 --- a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh +++ b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " CREATE USER quoted_by_ip_${CLICKHOUSE_DATABASE}; CREATE USER quoted_by_forwarded_ip_${CLICKHOUSE_DATABASE}; @@ -57,7 +57,7 @@ ${CLICKHOUSE_CURL} -H 'X-Forwarded-For: 5.6.7.8, 1.2.3.4' -sS "${CLICKHOUSE_URL} ${CLICKHOUSE_CURL} -H 'X-Forwarded-For: 1.2.3.4, 5.6.7.8' -sS "${CLICKHOUSE_URL}&user=quoted_by_forwarded_ip_${CLICKHOUSE_DATABASE}" -d "SELECT count() FROM numbers(10)" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP QUOTA IF EXISTS quota_by_ip_${CLICKHOUSE_DATABASE}; DROP QUOTA IF EXISTS quota_by_forwarded_ip; diff --git a/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh b/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh index 0e5c2862066..6a7eb975c87 100755 --- a/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh +++ b/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP DATABASE IF EXISTS 01684_database_for_cache_dictionary; CREATE DATABASE 01684_database_for_cache_dictionary; diff --git a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh index 55061b9a643..c2d222a86ea 100755 --- a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh +++ b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE complex_key_simple_attributes_source_table ( id UInt64, diff --git a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh index f8004f9350d..5d115e09a79 100755 --- a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh +++ b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # Check that DataType parser does not have exponential complexity in the case found by fuzzer. -for _ in {1..10}; do ${CLICKHOUSE_CLIENT} -n --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done +for _ in {1..10}; do ${CLICKHOUSE_CLIENT} --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done diff --git a/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh index a166837e01a..f38e53f898a 100755 --- a/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS in_order_agg_01710; CREATE TABLE in_order_agg_01710 diff --git a/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh index ee73974e8a4..01537524730 100755 --- a/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS in_order_agg_partial_01710; CREATE TABLE in_order_agg_partial_01710 diff --git a/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh index 2a7345f4865..f9681ebe4f5 100755 --- a/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --optimize_aggregation_in_order=1 -nm -q " +$CLICKHOUSE_CLIENT --optimize_aggregation_in_order=1 -m -q " drop table if exists data_01753; create table data_01753 (key Int) engine=MergeTree() order by key as select * from numbers(8); select * from data_01753 group by key settings max_block_size=1; diff --git a/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh b/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh index b963f3a618f..3c9e12f780b 100755 --- a/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh +++ b/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -nm -q " +$CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -m -q " create table dist_01758 as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy); select * from dist_01758 where dummy = 0 format Null; " |& grep -o "StorageDistributed (dist_01758).*" diff --git a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh index 9c51b82282c..ee46f8194b9 100755 --- a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh +++ b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -nm -q " +$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -m -q " DROP TABLE IF EXISTS tmp_01683; DROP TABLE IF EXISTS dist_01683; diff --git a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh index 4b75102e9cf..f3e8ceffff6 100755 --- a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh +++ b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh @@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function setup() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " drop table if exists data_01814; drop table if exists dist_01814; @@ -24,7 +24,7 @@ function setup() function cleanup() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " drop table data_01814; drop table dist_01814; " @@ -67,7 +67,7 @@ function test_distributed_push_down_limit_with_query_log() $CLICKHOUSE_CLIENT "${settings_and_opts[@]}" -q "select * from $table group by key limit $offset, 10" - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " system flush logs; select read_rows from system.query_log where diff --git a/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh b/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh index 218320772c9..2b122debff5 100755 --- a/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh +++ b/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function run_test_once() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS simple_key_source_table_01863; CREATE TABLE simple_key_source_table_01863 ( @@ -29,13 +29,13 @@ function run_test_once() LIFETIME(MIN 0 MAX 1000); " - prev=$($CLICKHOUSE_CLIENT -nm -q "SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1") - curr=$($CLICKHOUSE_CLIENT -nm -q " + prev=$($CLICKHOUSE_CLIENT -m -q "SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1") + curr=$($CLICKHOUSE_CLIENT -m -q " SELECT toUInt64(1) as key, dictGet('simple_key_cache_dictionary_01863', 'value', key) FORMAT Null; SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1 ") - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " DROP DICTIONARY simple_key_cache_dictionary_01863; DROP TABLE simple_key_source_table_01863; " diff --git a/tests/queries/0_stateless/01872_initial_query_start_time.sh b/tests/queries/0_stateless/01872_initial_query_start_time.sh index 6a935602ea4..ff3e1954c75 100755 --- a/tests/queries/0_stateless/01872_initial_query_start_time.sh +++ b/tests/queries/0_stateless/01872_initial_query_start_time.sh @@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} -q "create table m (dummy UInt8) ENGINE = Distributed('test query_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") ${CLICKHOUSE_CLIENT} -q "select * from m format Null" "--query_id=$query_id" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " system flush logs; select anyIf(initial_query_start_time, is_initial_query) = anyIf(initial_query_start_time, not is_initial_query), diff --git a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh index 8336229a643..1294ba53e82 100755 --- a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh +++ b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET allow_experimental_bigint_types = 1; DROP TABLE IF EXISTS dictionary_decimal_source_table; diff --git a/tests/queries/0_stateless/01890_materialized_distributed_join.sh b/tests/queries/0_stateless/01890_materialized_distributed_join.sh index 88f7dcf9a69..5c04ee8b214 100755 --- a/tests/queries/0_stateless/01890_materialized_distributed_join.sh +++ b/tests/queries/0_stateless/01890_materialized_distributed_join.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists test_distributed; drop table if exists test_source; drop table if exists test_shard; diff --git a/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh b/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh index 853445daf3f..a44106414ea 100755 --- a/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh +++ b/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS dictionary_array_source_table; CREATE TABLE dictionary_array_source_table ( diff --git a/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh b/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh index 0b555cf82c2..a5c65ca87a7 100755 --- a/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh +++ b/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS dictionary_nullable_source_table; CREATE TABLE dictionary_nullable_source_table ( diff --git a/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh b/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh index 47d5e733480..608107c76d6 100755 --- a/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh +++ b/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function cleanup() { - ${CLICKHOUSE_CLIENT} -n -q " + ${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS matview_exception_a_to_c; DROP TABLE IF EXISTS matview_exception_a_to_b; DROP TABLE IF EXISTS table_exception_c; @@ -17,7 +17,7 @@ function cleanup() function setup() { - ${CLICKHOUSE_CLIENT} -n -q " + ${CLICKHOUSE_CLIENT} -q " CREATE TABLE table_exception_a (a String, b Int64) ENGINE = MergeTree ORDER BY b; CREATE TABLE table_exception_b (a Float64, b Int64) ENGINE = MergeTree ORDER BY tuple(); CREATE TABLE table_exception_c (a Float64) ENGINE = MergeTree ORDER BY a; diff --git a/tests/queries/0_stateless/01947_multiple_pipe_read.sh b/tests/queries/0_stateless/01947_multiple_pipe_read.sh index 06a18a55e6e..51709eb574e 100755 --- a/tests/queries/0_stateless/01947_multiple_pipe_read.sh +++ b/tests/queries/0_stateless/01947_multiple_pipe_read.sh @@ -11,7 +11,7 @@ cat "$SAMPLE_FILE" echo '******************' echo 'Read twice from a regular file' -${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -n -q 'select * from table; select * from table;' --file "$SAMPLE_FILE" +${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table; select * from table;' --file "$SAMPLE_FILE" echo '---' ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table WHERE x IN (select x from table);' --file "$SAMPLE_FILE" echo '---' @@ -19,7 +19,7 @@ ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table UNI echo '******************' echo 'Read twice from file descriptor that corresponds to a regular file' -${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -n -q 'select * from table; select * from table;' < "$SAMPLE_FILE" +${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table; select * from table;' < "$SAMPLE_FILE" echo '---' ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table WHERE x IN (select x from table);' < "$SAMPLE_FILE" echo '---' diff --git a/tests/queries/0_stateless/02003_memory_limit_in_client.sh b/tests/queries/0_stateless/02003_memory_limit_in_client.sh index 96028f4847a..15cacbff8c5 100755 --- a/tests/queries/0_stateless/02003_memory_limit_in_client.sh +++ b/tests/queries/0_stateless/02003_memory_limit_in_client.sh @@ -4,21 +4,21 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" diff --git a/tests/queries/0_stateless/02021_create_database_with_comment.sh b/tests/queries/0_stateless/02021_create_database_with_comment.sh index f77397dc482..d87b0794c91 100755 --- a/tests/queries/0_stateless/02021_create_database_with_comment.sh +++ b/tests/queries/0_stateless/02021_create_database_with_comment.sh @@ -20,7 +20,7 @@ function test_db_comments() local ENGINE_NAME="$1" echo "engine : ${ENGINE_NAME}" - $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -nm <& /dev/null +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' >& /dev/null echo $? echo 'regression test for overlap profile events snapshots between queries' -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'regression test for overlap profile events snapshots between queries (clickhouse-local)' -$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' +$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'print everything' profile_events="$( @@ -35,5 +35,5 @@ profile_events="$( test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)" echo 'check that ProfileEvents is new for each query' -sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') +sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') test "$sleep_function_calls" -eq 1 && echo OK || echo "FAIL ($sleep_function_calls)" diff --git a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh index 3c44a2a7ba7..a382b3859f3 100755 --- a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh +++ b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh @@ -4,4 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 -nm < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null +${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 -m < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh index db94c59d2de..e23a272a4e8 100755 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table_2" -${CLICKHOUSE_CLIENT} -n -q" +${CLICKHOUSE_CLIENT} -q" CREATE TABLE sample_table ( key UInt64 ) @@ -16,7 +16,7 @@ ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_ ORDER BY tuple(); " -${CLICKHOUSE_CLIENT} -n -q" +${CLICKHOUSE_CLIENT} -q" CREATE TABLE sample_table_2 ( key UInt64 ) diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh index c62ec14b340..6381d811d5d 100755 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table_2;" -${CLICKHOUSE_CLIENT} -n --query="CREATE TABLE sample_table ( +${CLICKHOUSE_CLIENT} --query="CREATE TABLE sample_table ( key UInt64 ) ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_system_zookeeper_unrestricted_like', '1') @@ -16,7 +16,7 @@ ORDER BY tuple(); DROP TABLE IF EXISTS sample_table SYNC;" -${CLICKHOUSE_CLIENT} -n --query "CREATE TABLE sample_table_2 ( +${CLICKHOUSE_CLIENT} --query "CREATE TABLE sample_table_2 ( key UInt64 ) ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_system_zookeeper_unrestricted_like_2', '1') diff --git a/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh b/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh index 376a49fd820..63111cc32e4 100755 --- a/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh +++ b/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists dst_02225; drop table if exists src_02225; create table dst_02225 (key Int) engine=Memory(); @@ -14,7 +14,7 @@ create table src_02225 (key Int) engine=Memory(); insert into src_02225 values (1); " -$CLICKHOUSE_CLIENT --param_database=$CLICKHOUSE_DATABASE -nm -q " +$CLICKHOUSE_CLIENT --param_database=$CLICKHOUSE_DATABASE -m -q " truncate table dst_02225; insert into function remote('127.{1,2}', currentDatabase(), dst_02225, key) select * from remote('127.{1,2}', view(select * from {database:Identifier}.src_02225), key) @@ -29,7 +29,7 @@ settings parallel_distributed_insert_select=2, max_distributed_depth=1; select * from dst_02225; " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table src_02225; drop table dst_02225; " diff --git a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh index bc90f4b2c11..177b373641f 100755 --- a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh +++ b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data_02226; create table data_02226 (key Int) engine=MergeTree() order by key as select * from numbers(1); @@ -24,7 +24,7 @@ opts=( $CLICKHOUSE_BENCHMARK --query "select * from remote('127.1', $CLICKHOUSE_DATABASE, data_02226)" "${opts[@]}" >& /dev/null ret=$? -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table data_02226; " diff --git a/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh b/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh index d1a3825d286..e47a3033681 100755 --- a/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh +++ b/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh @@ -12,9 +12,9 @@ ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO us ${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON Memory, TABLE ENGINE ON MergeTree, TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_memory (x UInt32) engine = Memory;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt (x UInt32) engine = ReplicatedMergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt (x UInt32) engine = ReplicatedMergeTree order by x;" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" ${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh b/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh index 66417e9694a..09f9c0c8a98 100755 --- a/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh +++ b/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function cleanup() { - $CLICKHOUSE_CLIENT -nmq " + $CLICKHOUSE_CLIENT -mq " DROP USER IF EXISTS with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; DROP USER IF EXISTS without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; DROP DATABASE IF EXISTS db_with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; @@ -15,7 +15,7 @@ function cleanup() cleanup trap cleanup EXIT -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " CREATE USER with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; CREATE USER without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; diff --git a/tests/queries/0_stateless/02262_column_ttl.sh b/tests/queries/0_stateless/02262_column_ttl.sh index b5e29c9b2a1..c620d3b6d9c 100755 --- a/tests/queries/0_stateless/02262_column_ttl.sh +++ b/tests/queries/0_stateless/02262_column_ttl.sh @@ -14,7 +14,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # note, that this should be written in .sh since we need $CLICKHOUSE_DATABASE # not 'default' to catch text_log -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists ttl_02262; drop table if exists this_text_log; @@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -nm -q " ttl_02262_uuid=$($CLICKHOUSE_CLIENT -q "select uuid from system.tables where database = '$CLICKHOUSE_DATABASE' and name = 'ttl_02262'") -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " -- OPTIMIZE TABLE x FINAL will be done in background -- attach to it's log, via table UUID in query_id (see merger/mutator code). create materialized view this_text_log engine=Memory() as diff --git a/tests/queries/0_stateless/02286_parallel_final.sh b/tests/queries/0_stateless/02286_parallel_final.sh index 0ac510208f3..47dfad42e11 100755 --- a/tests/queries/0_stateless/02286_parallel_final.sh +++ b/tests/queries/0_stateless/02286_parallel_final.sh @@ -9,7 +9,7 @@ echo "Test intersecting ranges" test_random_values() { layers=$1 - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " drop table if exists tbl_8parts_${layers}granules_rnd; create table tbl_8parts_${layers}granules_rnd (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 % 8); insert into tbl_8parts_${layers}granules_rnd select number, 1 from numbers_mt($((layers * 8 * 8192))); @@ -29,7 +29,7 @@ echo "Test non intersecting ranges" test_sequential_values() { layers=$1 - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " drop table if exists tbl_8parts_${layers}granules_seq; create table tbl_8parts_${layers}granules_seq (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 / $((layers * 8192)))::UInt64; insert into tbl_8parts_${layers}granules_seq select number, 1 from numbers_mt($((layers * 8 * 8192))); diff --git a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh index bd7e6be3987..953485c3a1f 100755 --- a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh +++ b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh @@ -23,99 +23,99 @@ $CLICKHOUSE_CLIENT -q "insert into distinct_in_order_explain select number % num $CLICKHOUSE_CLIENT -q "select '-- disable optimize_distinct_in_order'" $CLICKHOUSE_CLIENT -q "select '-- distinct all primary key columns -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- enable optimize_distinct_in_order'" $CLICKHOUSE_CLIENT -q "select '-- distinct with all primary key columns -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column in distinct -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by the same columns -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a, b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a, b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by columns are prefix of distinct columns -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column in distinct but non-primary key prefix -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column _not_ in distinct -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by column in distinct -> final distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by column _not_ in distinct -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by a" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by a" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by _const_ column in distinct -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, 1 as x from distinct_in_order_explain order by x" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, 1 as x from distinct_in_order_explain order by x" | eval $FIND_DISTINCT echo "-- Check reading in order for distinct" echo "-- disabled, distinct columns match sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$DISABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$DISABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "-- enabled, distinct columns match sorting key" # read_in_order_two_level_merge_threshold is set here to avoid repeating MergeTreeInOrder in output -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns DON't form prefix of sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "-- enabled, distinct columns contains constant columns, non-const columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns contains constant columns, non-const columns match prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, b, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, b, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, only part of distinct columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "=== disable new analyzer ===" DISABLE_ANALYZER="set enable_analyzer=0" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES echo "-- check that reading in order optimization for ORDER BY and DISTINCT applied correctly in the same query" ENABLE_READ_IN_ORDER="set optimize_read_in_order=1" echo "-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization i.e. it contains columns from DISTINCT clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization, but direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (1), - it contains columns from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (2), - direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that disabling other 'read in order' optimizations do not disable distinct in order optimization" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES echo "=== enable new analyzer ===" ENABLE_ANALYZER="set enable_analyzer=1" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES echo "-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization i.e. it contains columns from DISTINCT clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization, but direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (1), - it contains columns from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (2), - direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that disabling other 'read in order' optimizations do not disable distinct in order optimization" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES $CLICKHOUSE_CLIENT -q "drop table if exists distinct_in_order_explain sync" diff --git a/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh b/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh index 96f80d65878..490f8361682 100755 --- a/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh +++ b/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) data_path="$CLICKHOUSE_TMP/local" -$CLICKHOUSE_LOCAL --path "$data_path" -nm -q " +$CLICKHOUSE_LOCAL --path "$data_path" -m -q " create table ttl_02335 ( date Date, key Int, diff --git a/tests/queries/0_stateless/02361_fsync_profile_events.sh b/tests/queries/0_stateless/02361_fsync_profile_events.sh index 98c9cf9b7b4..73bf3fa120a 100755 --- a/tests/queries/0_stateless/02361_fsync_profile_events.sh +++ b/tests/queries/0_stateless/02361_fsync_profile_events.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data_fsync_pe; create table data_fsync_pe (key Int) engine=MergeTree() @@ -27,7 +27,7 @@ for i in {1..100}; do $CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data_fsync_pe values (1)" read -r FileSync FileOpen DirectorySync FileSyncElapsedMicroseconds DirectorySyncElapsedMicroseconds <<<"$( - $CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " + $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " system flush logs; select diff --git a/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh b/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh index 71e3b6961f8..46396d38747 100755 --- a/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh +++ b/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh @@ -24,7 +24,7 @@ $CLICKHOUSE_CLIENT \ table_name="t_02377_extend_protocol_with_query_parameters_$RANDOM$RANDOM" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " create table $table_name( id Int64, arr Array(UInt8), @@ -57,17 +57,17 @@ $CLICKHOUSE_CLIENT \ # it is possible to set parameter for the current session -$CLICKHOUSE_CLIENT -n -q "set param_n = 42; select {n: UInt8}" +$CLICKHOUSE_CLIENT -q "set param_n = 42; select {n: UInt8}" # and it will not be visible to other sessions -$CLICKHOUSE_CLIENT -n -q "select {n: UInt8} -- { serverError 456 }" +$CLICKHOUSE_CLIENT -q "select {n: UInt8} -- { serverError 456 }" # the same parameter could be set multiple times within one session (new value overrides the previous one) -$CLICKHOUSE_CLIENT -n -q "set param_n = 12; set param_n = 13; select {n: UInt8}" +$CLICKHOUSE_CLIENT -q "set param_n = 12; set param_n = 13; select {n: UInt8}" # multiple different parameters could be defined within each session -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " set param_a = 13, param_b = 'str'; set param_c = '2022-08-04 18:30:53'; set param_d = '{\'10\': [11, 12], \'13\': [14, 15]}'; diff --git a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh index 4b9793da5bb..974f10e2f24 100755 --- a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh +++ b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh @@ -15,7 +15,7 @@ FIND_SORTMODE="$GREP_SORTMODE | $TRIM_LEADING_SPACES" function explain_sorting { echo "-- QUERY: "$1 - $CLICKHOUSE_CLIENT --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTING + $CLICKHOUSE_CLIENT --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -q "$1" | eval $FIND_SORTING } function explain_sortmode { diff --git a/tests/queries/0_stateless/02417_load_marks_async.sh b/tests/queries/0_stateless/02417_load_marks_async.sh index 950656e7ab6..bcede9e4f5e 100755 --- a/tests/queries/0_stateless/02417_load_marks_async.sh +++ b/tests/queries/0_stateless/02417_load_marks_async.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS test;" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " CREATE TABLE test ( n0 UInt64, From b416764585e7e03382f66c99636b4bf0b51bd79f Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 7 Aug 2024 13:02:38 +0200 Subject: [PATCH 1837/2361] put description of system log table in one place --- src/Interpreters/SystemLog.cpp | 35 ++++------------------ src/Interpreters/SystemLog.h | 54 ++++++++++++++++------------------ 2 files changed, 30 insertions(+), 59 deletions(-) diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 9b58da3f545..d4403b72583 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -284,34 +284,11 @@ ASTPtr getCreateTableQueryClean(const StorageID & table_id, ContextPtr context) SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConfiguration & config) { - query_log = createSystemLog(global_context, "system", "query_log", config, "query_log", "Contains information about executed queries, for example, start time, duration of processing, error messages."); - query_thread_log = createSystemLog(global_context, "system", "query_thread_log", config, "query_thread_log", "Contains information about threads that execute queries, for example, thread name, thread start time, duration of query processing."); - part_log = createSystemLog(global_context, "system", "part_log", config, "part_log", "This table contains information about events that occurred with data parts in the MergeTree family tables, such as adding or merging data."); - trace_log = createSystemLog(global_context, "system", "trace_log", config, "trace_log", "Contains stack traces collected by the sampling query profiler."); - crash_log = createSystemLog(global_context, "system", "crash_log", config, "crash_log", "Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur."); - text_log = createSystemLog(global_context, "system", "text_log", config, "text_log", "Contains logging entries which are normally written to a log file or to stdout."); - metric_log = createSystemLog(global_context, "system", "metric_log", config, "metric_log", "Contains history of metrics values from tables system.metrics and system.events, periodically flushed to disk."); - error_log = createSystemLog(global_context, "system", "error_log", config, "error_log", "Contains history of error values from table system.errors, periodically flushed to disk."); - filesystem_cache_log = createSystemLog(global_context, "system", "filesystem_cache_log", config, "filesystem_cache_log", "Contains a history of all events occurred with filesystem cache for objects on a remote filesystem."); - filesystem_read_prefetches_log = createSystemLog( - global_context, "system", "filesystem_read_prefetches_log", config, "filesystem_read_prefetches_log", "Contains a history of all prefetches done during reading from MergeTables backed by a remote filesystem."); - asynchronous_metric_log = createSystemLog( - global_context, "system", "asynchronous_metric_log", config, - "asynchronous_metric_log", "Contains the historical values for system.asynchronous_metrics, once per time interval (one second by default)."); - opentelemetry_span_log = createSystemLog( - global_context, "system", "opentelemetry_span_log", config, - "opentelemetry_span_log", "Contains information about trace spans for executed queries."); - query_views_log = createSystemLog(global_context, "system", "query_views_log", config, "query_views_log", "Contains information about the dependent views executed when running a query, for example, the view type or the execution time."); - zookeeper_log = createSystemLog(global_context, "system", "zookeeper_log", config, "zookeeper_log", "This table contains information about the parameters of the request to the ZooKeeper server and the response from it."); - session_log = createSystemLog(global_context, "system", "session_log", config, "session_log", "Contains information about all successful and failed login and logout events."); - transactions_info_log = createSystemLog( - global_context, "system", "transactions_info_log", config, "transactions_info_log", "Contains information about all transactions executed on a current server."); - processors_profile_log = createSystemLog(global_context, "system", "processors_profile_log", config, "processors_profile_log", "Contains profiling information on processors level (building blocks for a pipeline for query execution."); - asynchronous_insert_log = createSystemLog(global_context, "system", "asynchronous_insert_log", config, "asynchronous_insert_log", "Contains a history for all asynchronous inserts executed on current server."); - backup_log = createSystemLog(global_context, "system", "backup_log", config, "backup_log", "Contains logging entries with the information about BACKUP and RESTORE operations."); - s3_queue_log = createSystemLog(global_context, "system", "s3queue_log", config, "s3queue_log", "Contains logging entries with the information files processes by S3Queue engine."); - azure_queue_log = createSystemLog(global_context, "system", "azure_queue_log", config, "azure_queue_log", "Contains logging entries with the information files processes by S3Queue engine."); - blob_storage_log = createSystemLog(global_context, "system", "blob_storage_log", config, "blob_storage_log", "Contains logging entries with information about various blob storage operations such as uploads and deletes."); +#define CREATE_PUBLIC_MEMBERS(log_type, member, descr) \ + member = createSystemLog(global_context, "system", #member, config, #member, descr); \ + + LIST_OF_ALL_SYSTEM_LOGS(CREATE_PUBLIC_MEMBERS) +#undef CREATE_PUBLIC_MEMBERS if (session_log) global_context->addWarningMessage("Table system.session_log is enabled. It's unreliable and may contain garbage. Do not use it for any kind of security monitoring."); @@ -355,7 +332,6 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf std::vector SystemLogs::getAllLogs() const { -/// NOLINTBEGIN(bugprone-macro-parentheses) #define GET_RAW_POINTERS(log_type, member, descr) \ member.get(), \ @@ -363,7 +339,6 @@ std::vector SystemLogs::getAllLogs() const LIST_OF_ALL_SYSTEM_LOGS(GET_RAW_POINTERS) }; #undef GET_RAW_POINTERS -/// NOLINTEND(bugprone-macro-parentheses) auto last_it = std::remove(result.begin(), result.end(), nullptr); result.erase(last_it, result.end()); diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 093be203282..6682829c0c6 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -8,28 +8,28 @@ #include #define LIST_OF_ALL_SYSTEM_LOGS(M) \ - M(QueryLog, query_log, "Used to log queries.") \ - M(QueryThreadLog, query_thread_log, "Used to log query threads.") \ - M(PartLog, part_log, "Used to log operations with parts.") \ - M(TraceLog, trace_log, "Used to log traces from query profiler.") \ - M(CrashLog, crash_log, "Used to log server crashes.") \ - M(TextLog, text_log, "Used to log all text messages.") \ - M(MetricLog, metric_log, "Used to log all metrics.") \ - M(ErrorLog, error_log, "Used to log errors.") \ - M(FilesystemCacheLog, filesystem_cache_log, "") \ - M(FilesystemReadPrefetchesLog, filesystem_read_prefetches_log, "") \ - M(ObjectStorageQueueLog, s3_queue_log, "") \ - M(ObjectStorageQueueLog, azure_queue_log, "") \ - M(AsynchronousMetricLog, asynchronous_metric_log, "Metrics from system.asynchronous_metrics") \ - M(OpenTelemetrySpanLog, opentelemetry_span_log, "OpenTelemetry trace spans.") \ - M(QueryViewsLog, query_views_log, "Used to log queries of materialized and live views.") \ - M(ZooKeeperLog, zookeeper_log, "Used to log all actions of ZooKeeper client.") \ - M(SessionLog, session_log, "Login, LogOut and Login failure events.") \ - M(TransactionsInfoLog, transactions_info_log, "Events related to transactions.") \ - M(ProcessorsProfileLog, processors_profile_log, "Used to log processors profiling") \ - M(AsynchronousInsertLog, asynchronous_insert_log, "") \ - M(BackupLog, backup_log, "Backup and restore events") \ - M(BlobStorageLog, blob_storage_log, "Log blob storage operations") \ + M(QueryLog, query_log, "Contains information about executed queries, for example, start time, duration of processing, error messages.") \ + M(QueryThreadLog, query_thread_log, "Contains information about threads that execute queries, for example, thread name, thread start time, duration of query processing.") \ + M(PartLog, part_log, "This table contains information about events that occurred with data parts in the MergeTree family tables, such as adding or merging data.") \ + M(TraceLog, trace_log, "Contains stack traces collected by the sampling query profiler.") \ + M(CrashLog, crash_log, "Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur.") \ + M(TextLog, text_log, "Contains logging entries which are normally written to a log file or to stdout.") \ + M(MetricLog, metric_log, "Contains history of metrics values from tables system.metrics and system.events, periodically flushed to disk.") \ + M(ErrorLog, error_log, "Contains history of error values from table system.errors, periodically flushed to disk.") \ + M(FilesystemCacheLog, filesystem_cache_log, "Contains a history of all events occurred with filesystem cache for objects on a remote filesystem.") \ + M(FilesystemReadPrefetchesLog, filesystem_read_prefetches_log, "Contains a history of all prefetches done during reading from MergeTables backed by a remote filesystem.") \ + M(ObjectStorageQueueLog, s3_queue_log, "Contains logging entries with the information files processes by S3Queue engine.") \ + M(ObjectStorageQueueLog, azure_queue_log, "Contains logging entries with the information files processes by S3Queue engine.") \ + M(AsynchronousMetricLog, asynchronous_metric_log, "Contains the historical values for system.asynchronous_metrics, once per time interval (one second by default).") \ + M(OpenTelemetrySpanLog, opentelemetry_span_log, "Contains information about trace spans for executed queries.") \ + M(QueryViewsLog, query_views_log, "Contains information about the dependent views executed when running a query, for example, the view type or the execution time.") \ + M(ZooKeeperLog, zookeeper_log, "This table contains information about the parameters of the request to the ZooKeeper server and the response from it.") \ + M(SessionLog, session_log, "Contains information about all successful and failed login and logout events.") \ + M(TransactionsInfoLog, transactions_info_log, "Contains information about all transactions executed on a current server.") \ + M(ProcessorsProfileLog, processors_profile_log, "Contains profiling information on processors level (building blocks for a pipeline for query execution.") \ + M(AsynchronousInsertLog, asynchronous_insert_log, "Contains a history for all asynchronous inserts executed on current server.") \ + M(BackupLog, backup_log, "Contains logging entries with the information about BACKUP and RESTORE operations.") \ + M(BlobStorageLog, blob_storage_log, "Contains logging entries with information about various blob storage operations such as uploads and deletes.") \ namespace DB @@ -60,13 +60,11 @@ namespace DB }; */ -/// NOLINTBEGIN(bugprone-macro-parentheses) #define FORWARD_DECLARATION(log_type, member, descr) \ class log_type; \ LIST_OF_ALL_SYSTEM_LOGS(FORWARD_DECLARATION) #undef FORWARD_DECLARATION -/// NOLINTEND(bugprone-macro-parentheses) /// System logs should be destroyed in destructor of the last Context and before tables, @@ -83,13 +81,11 @@ public: void shutdown(); void handleCrash(); -/// NOLINTBEGIN(bugprone-macro-parentheses) -#define PUBLIC_MEMBERS(log_type, member, descr) \ +#define DECLARE_PUBLIC_MEMBERS(log_type, member, descr) \ std::shared_ptr member; \ - LIST_OF_ALL_SYSTEM_LOGS(PUBLIC_MEMBERS) -#undef PUBLIC_MEMBERS -/// NOLINTEND(bugprone-macro-parentheses) + LIST_OF_ALL_SYSTEM_LOGS(DECLARE_PUBLIC_MEMBERS) +#undef DECLARE_PUBLIC_MEMBERS private: std::vector getAllLogs() const; From b9f564f6f4fa98cc385acd6ce3c8e9f736bc55e0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 7 Aug 2024 11:12:11 +0000 Subject: [PATCH 1838/2361] Automatic style fix --- .../test_system_flush_logs/test.py | 1 + .../test_system_logs_recreate/test.py | 20 ++++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_system_flush_logs/test.py b/tests/integration/test_system_flush_logs/test.py index 44269883d1b..dd48ef055f5 100644 --- a/tests/integration/test_system_flush_logs/test.py +++ b/tests/integration/test_system_flush_logs/test.py @@ -12,6 +12,7 @@ node = cluster.add_instance( stay_alive=True, ) + @pytest.fixture(scope="module", autouse=True) def start_cluster(): try: diff --git a/tests/integration/test_system_logs_recreate/test.py b/tests/integration/test_system_logs_recreate/test.py index c6d5861904c..8b84734ed02 100644 --- a/tests/integration/test_system_logs_recreate/test.py +++ b/tests/integration/test_system_logs_recreate/test.py @@ -36,8 +36,12 @@ def test_system_logs_recreate(): try: node.query("SYSTEM FLUSH LOGS") for table in system_logs: - assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") - assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") + assert "ENGINE = MergeTree" in node.query( + f"SHOW CREATE TABLE system.{table}" + ) + assert "ENGINE = Null" not in node.query( + f"SHOW CREATE TABLE system.{table}" + ) assert ( len( node.query(f"SHOW TABLES FROM system LIKE '{table}%'") @@ -103,7 +107,9 @@ def test_system_logs_recreate(): import logging for table in system_logs: - create_table_sql = node.query(f"SHOW CREATE TABLE system.{table} FORMAT TSVRaw") + create_table_sql = node.query( + f"SHOW CREATE TABLE system.{table} FORMAT TSVRaw" + ) logging.debug( "With storage policy, SHOW CREATE TABLE system.%s is: %s", table, @@ -129,8 +135,12 @@ def test_system_logs_recreate(): node.restart_clickhouse() node.query("SYSTEM FLUSH LOGS") for table in system_logs: - assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") - assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") + assert "ENGINE = MergeTree" in node.query( + f"SHOW CREATE TABLE system.{table}" + ) + assert "ENGINE = Null" not in node.query( + f"SHOW CREATE TABLE system.{table}" + ) assert ( len( node.query(f"SHOW TABLES FROM system LIKE '{table}%'") From 49871bacc1d56fb82b78c70dbfc92d52003e2e99 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 7 Aug 2024 12:37:39 +0100 Subject: [PATCH 1839/2361] fix test --- .../poco/Net/include/Poco/Net/HTTPServerSession.h | 1 - base/poco/Net/src/HTTPServerSession.cpp | 1 - tests/integration/test_server_keep_alive/test.py | 15 ++++++++++++--- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/base/poco/Net/include/Poco/Net/HTTPServerSession.h b/base/poco/Net/include/Poco/Net/HTTPServerSession.h index 54e7f2c8c50..b0659ca405c 100644 --- a/base/poco/Net/include/Poco/Net/HTTPServerSession.h +++ b/base/poco/Net/include/Poco/Net/HTTPServerSession.h @@ -66,7 +66,6 @@ namespace Net bool _firstRequest; Poco::Timespan _keepAliveTimeout; int _maxKeepAliveRequests; - HTTPServerParams::Ptr _params; }; diff --git a/base/poco/Net/src/HTTPServerSession.cpp b/base/poco/Net/src/HTTPServerSession.cpp index 3093f215952..8eec3e14872 100644 --- a/base/poco/Net/src/HTTPServerSession.cpp +++ b/base/poco/Net/src/HTTPServerSession.cpp @@ -24,7 +24,6 @@ HTTPServerSession::HTTPServerSession(const StreamSocket & socket, HTTPServerPara , _firstRequest(true) , _keepAliveTimeout(pParams->getKeepAliveTimeout()) , _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests()) - , _params(pParams) { setTimeout(pParams->getTimeout()); } diff --git a/tests/integration/test_server_keep_alive/test.py b/tests/integration/test_server_keep_alive/test.py index 96f08a37adb..e550319b6df 100644 --- a/tests/integration/test_server_keep_alive/test.py +++ b/tests/integration/test_server_keep_alive/test.py @@ -1,5 +1,6 @@ import logging import pytest +import random import requests from helpers.cluster import ClickHouseCluster @@ -24,19 +25,27 @@ def test_max_keep_alive_requests_on_user_side(start_cluster): # In this test we have `keep_alive_timeout` set to one hour to never trigger connection reset by timeout, `max_keep_alive_requests` is set to 5. # We expect server to close connection after each 5 requests. We detect connection reset by change in src port. # So the first 5 requests should come from the same port, the following 5 requests should come from another port. + + log_comments = [] + for _ in range(10): + rand_id = random.randint(0, 1000000) + log_comment = f"test_requests_with_keep_alive_{rand_id}" + log_comments.append(log_comment) + log_comments = sorted(log_comments) + session = requests.Session() for i in range(10): session.get( - f"http://{node.ip_address}:8123/?query=select%201&log_comment=test_requests_with_keep_alive_{i}" + f"http://{node.ip_address}:8123/?query=select%201&log_comment={log_comments[i]}" ) ports = node.query( - """ + f""" SYSTEM FLUSH LOGS; SELECT port FROM system.query_log - WHERE log_comment like 'test_requests_with_keep_alive_%' AND type = 'QueryFinish' + WHERE log_comment IN ({", ".join(f"'{comment}'" for comment in log_comments)}) AND type = 'QueryFinish' ORDER BY log_comment """ ).split("\n")[:-1] From 016d1fea6d72c25179633f72f8ca8338dc59338f Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Wed, 7 Aug 2024 13:58:03 +0200 Subject: [PATCH 1840/2361] CI: Integration tests uncover some logging --- tests/ci/integration_tests_runner.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index 2b348be8b51..6165b1b9aaa 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -20,6 +20,7 @@ from typing import Any, Dict from env_helper import IS_CI from integration_test_images import IMAGES +from tee_popen import TeePopen MAX_RETRY = 1 NUM_WORKERS = 5 @@ -356,20 +357,13 @@ class ClickhouseIntegrationTestsRunner: logging.info("Package found in %s", full_path) log_name = "install_" + f + ".log" log_path = os.path.join(str(self.path()), log_name) - with open(log_path, "w", encoding="utf-8") as log: - cmd = f"dpkg -x {full_path} ." - logging.info("Executing installation cmd %s", cmd) - with subprocess.Popen( - cmd, shell=True, stderr=log, stdout=log - ) as proc: - if proc.wait() == 0: - logging.info( - "Installation of %s successfull", full_path - ) - else: - raise RuntimeError( - f"Installation of {full_path} failed" - ) + cmd = f"dpkg -x {full_path} ." + logging.info("Executing installation cmd %s", cmd) + with TeePopen(cmd, log_file=log_path) as proc: + if proc.wait() == 0: + logging.info("Installation of %s successfull", full_path) + else: + raise RuntimeError(f"Installation of {full_path} failed") break else: raise FileNotFoundError(f"Package with {package} not found") From ddc058aa6ff6780fa67bc5c59d9d7ff9a71d4ee1 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 7 Aug 2024 12:51:06 +0200 Subject: [PATCH 1841/2361] Update minio in stateless tests --- docker/test/stateless/Dockerfile | 4 ++-- docker/test/stateless/setup_minio.sh | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index a0e5513a3a2..d8eb072328f 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -69,8 +69,8 @@ ENV MAX_RUN_TIME=0 # Unrelated to vars in setup_minio.sh, but should be the same there # to have the same binaries for local running scenario -ARG MINIO_SERVER_VERSION=2022-01-03T18-22-58Z -ARG MINIO_CLIENT_VERSION=2022-01-05T23-52-51Z +ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z +ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z ARG TARGETARCH # Download Minio-related binaries diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh index 2b9433edd20..d8310d072b8 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/docker/test/stateless/setup_minio.sh @@ -59,8 +59,8 @@ find_os() { download_minio() { local os local arch - local minio_server_version=${MINIO_SERVER_VERSION:-2022-09-07T22-25-02Z} - local minio_client_version=${MINIO_CLIENT_VERSION:-2022-08-28T20-08-11Z} + local minio_server_version=${MINIO_SERVER_VERSION:-2024-08-03T04-33-23Z} + local minio_client_version=${MINIO_CLIENT_VERSION:-2024-07-31T15-58-33Z} os=$(find_os) arch=$(find_arch) @@ -82,10 +82,10 @@ setup_minio() { local test_type=$1 ./mc alias set clickminio http://localhost:11111 clickhouse clickhouse ./mc admin user add clickminio test testtest - ./mc admin policy set clickminio readwrite user=test + ./mc admin policy attach clickminio readwrite --user=test ./mc mb --ignore-existing clickminio/test if [ "$test_type" = "stateless" ]; then - ./mc policy set public clickminio/test + ./mc anonymous set public clickminio/test fi } @@ -148,4 +148,4 @@ main() { setup_aws_credentials } -main "$@" \ No newline at end of file +main "$@" From c39cdcffaff5917510b27f457e35a63dddeed0b5 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Wed, 7 Aug 2024 14:03:53 +0200 Subject: [PATCH 1842/2361] docs for lightweight_mutation_projection_mode option starting with 24.7 we can lightweight delete in tables with projections. Fixed docs stating it is not possible --- docs/en/sql-reference/statements/delete.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md index a52b7204c30..8745a06c124 100644 --- a/docs/en/sql-reference/statements/delete.md +++ b/docs/en/sql-reference/statements/delete.md @@ -36,9 +36,10 @@ If you anticipate frequent deletes, consider using a [custom partitioning key](/ ## Limitations of lightweight `DELETE` -### Lightweight `DELETE`s do not work with projections +### Lightweight `DELETE`s with projections -Currently, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance. +By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance. +However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'` will work with projections. ## Performance considerations when using lightweight `DELETE` From 37a6bd25f6d6dd2bfe3913639a7650c313642fb5 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Wed, 7 Aug 2024 14:06:02 +0200 Subject: [PATCH 1843/2361] squash! docs for lightweight_mutation_projection_mode option starting with 24.7 we can lightweight delete in tables with projections. Fixed docs stating it is not possible --- docs/en/sql-reference/statements/delete.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md index 8745a06c124..88a9c933519 100644 --- a/docs/en/sql-reference/statements/delete.md +++ b/docs/en/sql-reference/statements/delete.md @@ -39,7 +39,7 @@ If you anticipate frequent deletes, consider using a [custom partitioning key](/ ### Lightweight `DELETE`s with projections By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance. -However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'` will work with projections. +However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'`, deletes will work with projections. ## Performance considerations when using lightweight `DELETE` From 6afff5824e3bd3e4eca2c50d7cce10dda5678433 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 7 Aug 2024 12:14:05 +0000 Subject: [PATCH 1844/2361] Fix 03130_convert_outer_join_to_inner_join --- .../03130_convert_outer_join_to_inner_join.sql | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql index 28362f1f469..168177a06d7 100644 --- a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql +++ b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql @@ -6,14 +6,18 @@ CREATE TABLE test_table_1 ( id UInt64, value String -) ENGINE=MergeTree ORDER BY id; +) ENGINE=MergeTree ORDER BY id +SETTINGS index_granularity = 16 # We have number of granules in the `EXPLAIN` output in reference file +; DROP TABLE IF EXISTS test_table_2; CREATE TABLE test_table_2 ( id UInt64, value String -) ENGINE=MergeTree ORDER BY id; +) ENGINE=MergeTree ORDER BY id +SETTINGS index_granularity = 16 +; INSERT INTO test_table_1 VALUES (1, 'Value_1'), (2, 'Value_2'); INSERT INTO test_table_2 VALUES (2, 'Value_2'), (3, 'Value_3'); From 4a537874cad725227c847850b2da42d0ed86ccaf Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 7 Aug 2024 14:35:05 +0200 Subject: [PATCH 1845/2361] adjust tests --- src/Disks/DiskFomAST.cpp | 2 +- src/Storages/MergeTree/MergeTreeSettings.cpp | 22 +++++++++---------- ...2808_custom_disk_with_user_defined_name.sh | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index 2a5e7368de9..35cb124acfd 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -144,7 +144,7 @@ std::string DiskFomAST::getConfigDefinedDisk(const std::string &disk_name, Conte if (result->isCustomDisk()) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Disk name `{}` is a custom disk that is used in other table." + "Disk name `{}` is a custom disk that is used in other table. " "That disk could not be used by a reference by other tables. The custom disk should be fully specified with a disk function.", disk_name); diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index f72b24e3270..e11af43ed23 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -59,19 +59,19 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def, ContextPtr conte CustomType custom; if (name == "disk") { + ASTPtr value_as_custom_ast = nullptr; if (value.tryGet(custom) && 0 == strcmp(custom.getTypeName(), "AST")) + value_as_custom_ast = dynamic_cast(custom.getImpl()).ast; + + if (value_as_custom_ast && isDiskFunction(value_as_custom_ast)) { - auto ast = dynamic_cast(custom.getImpl()).ast; - if (ast && isDiskFunction(ast)) - { - auto disk_name = DiskFomAST::createCustomDisk(ast, context, is_attach); - LOG_DEBUG(getLogger("MergeTreeSettings"), "Created custom disk {}", disk_name); - value = disk_name; - } - else - { - value = DiskFomAST::getConfigDefinedDisk(value.safeGet(), context); - } + auto disk_name = DiskFomAST::createCustomDisk(value_as_custom_ast, context, is_attach); + LOG_DEBUG(getLogger("MergeTreeSettings"), "Created custom disk {}", disk_name); + value = disk_name; + } + else + { + value = DiskFomAST::getConfigDefinedDisk(value.safeGet(), context); } if (has("storage_policy")) diff --git a/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh b/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh index 333bc1bc25d..b62adea5683 100755 --- a/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh +++ b/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh @@ -13,7 +13,7 @@ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS disk = disk(name = 's3_disk', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk); -""" 2>&1 | grep -q "Disk with name \`s3_disk\` already exist" && echo 'OK' || echo 'FAIL' +""" 2>&1 | grep -q "The disk \`s3_disk\` is already exist and described by the config" && echo 'OK' || echo 'FAIL' disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" From 139fe624c2cd8e7e0af1e779f81113f1b132e9f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 13:16:47 +0000 Subject: [PATCH 1846/2361] Fix typos --- tests/integration/test_storage_kafka/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index b1d4f1f26b3..c1a66934f43 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -2242,16 +2242,16 @@ def test_kafka_virtual_columns_with_materialized_view( ) -def insert_with_retry(instance, values, table_name="kafka", max_try_couunt=5): +def insert_with_retry(instance, values, table_name="kafka", max_try_count=5): try_count = 0 while True: logging.debug(f"Inserting, try_count is {try_count}") try: try_count += 1 - instance.query("INSERT INTO test.kafka VALUES {}".format(values)) + instance.query(f"INSERT INTO test.{table_name} VALUES {values}") break except QueryRuntimeException as e: - if "Local: Timed out." in str(e) and try_count < max_try_couunt: + if "Local: Timed out." in str(e) and try_count < max_try_count: continue else: raise From a9b22a454d42ce81da8e711e8abd1e9908443ebc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 15:11:21 +0200 Subject: [PATCH 1847/2361] Make 03172_system_detached_tables parallelizable --- .../03172_system_detached_tables.reference | 14 ++-- .../03172_system_detached_tables.sh | 68 +++++++++++++++++++ .../03172_system_detached_tables.sql | 53 --------------- 3 files changed, 75 insertions(+), 60 deletions(-) create mode 100755 tests/queries/0_stateless/03172_system_detached_tables.sh delete mode 100644 tests/queries/0_stateless/03172_system_detached_tables.sql diff --git a/tests/queries/0_stateless/03172_system_detached_tables.reference b/tests/queries/0_stateless/03172_system_detached_tables.reference index 83d1ff13942..2fce89e5980 100644 --- a/tests/queries/0_stateless/03172_system_detached_tables.reference +++ b/tests/queries/0_stateless/03172_system_detached_tables.reference @@ -1,11 +1,11 @@ database atomic tests -test03172_system_detached_tables test_table 0 -test03172_system_detached_tables test_table_perm 1 -test03172_system_detached_tables test_table 0 -test03172_system_detached_tables test_table_perm 1 -test03172_system_detached_tables test_table 0 +default_atomic test_table 0 +default_atomic test_table_perm 1 +default_atomic test_table 0 +default_atomic test_table_perm 1 +default_atomic test_table 0 ----------------------- database lazy tests -before attach test03172_system_detached_tables_lazy test_table 0 -before attach test03172_system_detached_tables_lazy test_table_perm 1 +before attach default_lazy test_table 0 +before attach default_lazy test_table_perm 1 DROP TABLE diff --git a/tests/queries/0_stateless/03172_system_detached_tables.sh b/tests/queries/0_stateless/03172_system_detached_tables.sh new file mode 100755 index 00000000000..47775abcc45 --- /dev/null +++ b/tests/queries/0_stateless/03172_system_detached_tables.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +DATABASE_ATOMIC="${CLICKHOUSE_DATABASE}_atomic" +DATABASE_LAZY="${CLICKHOUSE_DATABASE}_lazy" + +$CLICKHOUSE_CLIENT --multiquery " + +SELECT 'database atomic tests'; +DROP DATABASE IF EXISTS ${DATABASE_ATOMIC}; +CREATE DATABASE IF NOT EXISTS ${DATABASE_ATOMIC} ENGINE=Atomic; + +CREATE TABLE ${DATABASE_ATOMIC}.test_table (n Int64) ENGINE=MergeTree ORDER BY n; +SELECT * FROM system.detached_tables WHERE database='${DATABASE_ATOMIC}'; + +DETACH TABLE ${DATABASE_ATOMIC}.test_table; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='${DATABASE_ATOMIC}'; + +ATTACH TABLE ${DATABASE_ATOMIC}.test_table; + +CREATE TABLE ${DATABASE_ATOMIC}.test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n; +SELECT * FROM system.detached_tables WHERE database='${DATABASE_ATOMIC}'; + +DETACH TABLE ${DATABASE_ATOMIC}.test_table_perm PERMANENTLY; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='${DATABASE_ATOMIC}'; + +DETACH TABLE ${DATABASE_ATOMIC}.test_table SYNC; +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='${DATABASE_ATOMIC}'; + +SELECT database, table, is_permanently FROM system.detached_tables WHERE database='${DATABASE_ATOMIC}' AND table='test_table'; + +DROP DATABASE ${DATABASE_ATOMIC} SYNC; + +" + +$CLICKHOUSE_CLIENT --multiquery " + +SELECT '-----------------------'; +SELECT 'database lazy tests'; + +DROP DATABASE IF EXISTS ${DATABASE_LAZY}; +CREATE DATABASE ${DATABASE_LAZY} Engine=Lazy(10); + +CREATE TABLE ${DATABASE_LAZY}.test_table (number UInt64) engine=Log; +INSERT INTO ${DATABASE_LAZY}.test_table SELECT * FROM numbers(100); +DETACH TABLE ${DATABASE_LAZY}.test_table; + +CREATE TABLE ${DATABASE_LAZY}.test_table_perm (number UInt64) engine=Log; +INSERT INTO ${DATABASE_LAZY}.test_table_perm SELECT * FROM numbers(100); +DETACH table ${DATABASE_LAZY}.test_table_perm PERMANENTLY; + +SELECT 'before attach', database, table, is_permanently FROM system.detached_tables WHERE database='${DATABASE_LAZY}'; + +ATTACH TABLE ${DATABASE_LAZY}.test_table; +ATTACH TABLE ${DATABASE_LAZY}.test_table_perm; + +SELECT 'after attach', database, table, is_permanently FROM system.detached_tables WHERE database='${DATABASE_LAZY}'; + +SELECT 'DROP TABLE'; +DROP TABLE ${DATABASE_LAZY}.test_table SYNC; +DROP TABLE ${DATABASE_LAZY}.test_table_perm SYNC; + +DROP DATABASE ${DATABASE_LAZY} SYNC; + +" diff --git a/tests/queries/0_stateless/03172_system_detached_tables.sql b/tests/queries/0_stateless/03172_system_detached_tables.sql deleted file mode 100644 index 1a3c2d7cc0f..00000000000 --- a/tests/queries/0_stateless/03172_system_detached_tables.sql +++ /dev/null @@ -1,53 +0,0 @@ --- Tags: no-parallel - -SELECT 'database atomic tests'; -DROP DATABASE IF EXISTS test03172_system_detached_tables; -CREATE DATABASE IF NOT EXISTS test03172_system_detached_tables ENGINE=Atomic; - -CREATE TABLE test03172_system_detached_tables.test_table (n Int64) ENGINE=MergeTree ORDER BY n; -SELECT * FROM system.detached_tables WHERE database='test03172_system_detached_tables'; - -DETACH TABLE test03172_system_detached_tables.test_table; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables'; - -ATTACH TABLE test03172_system_detached_tables.test_table; - -CREATE TABLE test03172_system_detached_tables.test_table_perm (n Int64) ENGINE=MergeTree ORDER BY n; -SELECT * FROM system.detached_tables WHERE database='test03172_system_detached_tables'; - -DETACH TABLE test03172_system_detached_tables.test_table_perm PERMANENTLY; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables'; - -DETACH TABLE test03172_system_detached_tables.test_table SYNC; -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables'; - -SELECT database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables' AND table='test_table'; - -DROP DATABASE test03172_system_detached_tables SYNC; - -SELECT '-----------------------'; -SELECT 'database lazy tests'; - -DROP DATABASE IF EXISTS test03172_system_detached_tables_lazy; -CREATE DATABASE test03172_system_detached_tables_lazy Engine=Lazy(10); - -CREATE TABLE test03172_system_detached_tables_lazy.test_table (number UInt64) engine=Log; -INSERT INTO test03172_system_detached_tables_lazy.test_table SELECT * FROM numbers(100); -DETACH TABLE test03172_system_detached_tables_lazy.test_table; - -CREATE TABLE test03172_system_detached_tables_lazy.test_table_perm (number UInt64) engine=Log; -INSERT INTO test03172_system_detached_tables_lazy.test_table_perm SELECT * FROM numbers(100); -DETACH table test03172_system_detached_tables_lazy.test_table_perm PERMANENTLY; - -SELECT 'before attach', database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables_lazy'; - -ATTACH TABLE test03172_system_detached_tables_lazy.test_table; -ATTACH TABLE test03172_system_detached_tables_lazy.test_table_perm; - -SELECT 'after attach', database, table, is_permanently FROM system.detached_tables WHERE database='test03172_system_detached_tables_lazy'; - -SELECT 'DROP TABLE'; -DROP TABLE test03172_system_detached_tables_lazy.test_table SYNC; -DROP TABLE test03172_system_detached_tables_lazy.test_table_perm SYNC; - -DROP DATABASE test03172_system_detached_tables_lazy SYNC; From 3a564255de9f6f827ff033392cc6b15ca260c6fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 15:16:15 +0200 Subject: [PATCH 1848/2361] Make 01764_table_function_dictionary parallelizable --- tests/queries/0_stateless/01764_table_function_dictionary.sql | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01764_table_function_dictionary.sql b/tests/queries/0_stateless/01764_table_function_dictionary.sql index 76e7213b367..e37f8d2a290 100644 --- a/tests/queries/0_stateless/01764_table_function_dictionary.sql +++ b/tests/queries/0_stateless/01764_table_function_dictionary.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel - DROP TABLE IF EXISTS table_function_dictionary_source_table; CREATE TABLE table_function_dictionary_source_table ( @@ -18,7 +16,7 @@ CREATE DICTIONARY table_function_dictionary_test_dictionary value UInt64 DEFAULT 0 ) PRIMARY KEY id -SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_function_dictionary_source_table')) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' DATABASE currentDatabase() TABLE 'table_function_dictionary_source_table')) LAYOUT(DIRECT()); SELECT * FROM dictionary('table_function_dictionary_test_dictionary'); From 1dd5af578871d00602b427c3eec9fe8308e7cbd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 15:17:31 +0200 Subject: [PATCH 1849/2361] Make 01760_ddl_dictionary_use_current_database_name parallelizable --- .../01760_ddl_dictionary_use_current_database_name.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/01760_ddl_dictionary_use_current_database_name.sql b/tests/queries/0_stateless/01760_ddl_dictionary_use_current_database_name.sql index a7f04921f1f..c6bccde8590 100644 --- a/tests/queries/0_stateless/01760_ddl_dictionary_use_current_database_name.sql +++ b/tests/queries/0_stateless/01760_ddl_dictionary_use_current_database_name.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel - DROP TABLE IF EXISTS ddl_dictonary_test_source; CREATE TABLE ddl_dictonary_test_source ( From 76ce1fc1ee82be806a3472108ff104cb440a1aed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 6 Aug 2024 15:20:22 +0200 Subject: [PATCH 1850/2361] Make 01945_system_warnings parallelizable --- tests/queries/0_stateless/01945_system_warnings.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/01945_system_warnings.sh b/tests/queries/0_stateless/01945_system_warnings.sh index 249c3218bcc..63403ce2893 100755 --- a/tests/queries/0_stateless/01945_system_warnings.sh +++ b/tests/queries/0_stateless/01945_system_warnings.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash -# Tags: no-parallel CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From b3c30b05d6ea7c436ddc21f31016122c10de1ec7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 13:22:22 +0000 Subject: [PATCH 1851/2361] Make test_kafka_insert repeatable --- tests/integration/test_storage_kafka/test.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index c1a66934f43..3f71866c913 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -2280,11 +2280,12 @@ def test_kafka_insert(kafka_cluster, create_query_generator): values.append("({i}, {i})".format(i=i)) values = ",".join(values) - insert_with_retry(instance, values) + with existing_kafka_topic(get_admin_client(kafka_cluster), topic_name): + insert_with_retry(instance, values) - messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count) - result = "\n".join(messages) - kafka_check_result(result, True) + messages = kafka_consume_with_retry(kafka_cluster, topic_name, message_count) + result = "\n".join(messages) + kafka_check_result(result, True) @pytest.mark.parametrize( From ecba21bfe78fabf76eb7207f40e3fe53ecb70eaf Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Wed, 7 Aug 2024 15:23:19 +0200 Subject: [PATCH 1852/2361] Fix documentation for memory overcommit --- docs/en/operations/settings/settings.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 2b11c836fc1..968481062e9 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4629,8 +4629,8 @@ Default Value: 5. ## memory_overcommit_ratio_denominator {#memory_overcommit_ratio_denominator} -It represents soft memory limit in case when hard limit is reached on user level. -This value is used to compute overcommit ratio for the query. +It represents the soft memory limit when the hard limit is reached on the global level. +This value is used to compute the overcommit ratio for the query. Zero means skip the query. Read more about [memory overcommit](memory-overcommit.md). @@ -4646,8 +4646,8 @@ Default value: `5000000`. ## memory_overcommit_ratio_denominator_for_user {#memory_overcommit_ratio_denominator_for_user} -It represents soft memory limit in case when hard limit is reached on global level. -This value is used to compute overcommit ratio for the query. +It represents the soft memory limit when the hard limit is reached on the user level. +This value is used to compute the overcommit ratio for the query. Zero means skip the query. Read more about [memory overcommit](memory-overcommit.md). From ac5aab17584ab66e81a0124b9b87590e8d6ad43d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 13:30:28 +0000 Subject: [PATCH 1853/2361] Handle kafka null messages --- src/Storages/Kafka/KafkaConsumer2.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index 8581398aa90..f6ef85da317 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -358,8 +358,9 @@ ReadBufferPtr KafkaConsumer2::getNextMessage() size_t size = current->get_payload().get_size(); ++current; - chassert(data != nullptr); - return std::make_shared(data, size); + // `data` can be nullptr on case of the Kafka message has empty payload + if (data) + return std::make_shared(data, size); } return nullptr; From 28c8d158635ef2564a60c25a6f971aa324030dcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 13:37:56 +0000 Subject: [PATCH 1854/2361] Fix style --- tests/queries/0_stateless/03217_filtering_in_system_tables.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql index bbc755e478d..72ca7c8684d 100644 --- a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql @@ -16,10 +16,12 @@ SYSTEM FLUSH LOGS; -- StorageSystemTables SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 + AND current_database = currentDatabase() AND query LIKE '%SELECT database, table FROM system.tables WHERE database = \'information_schema\' AND table = \'tables\';' AND type = 'QueryFinish'; -- StorageSystemReplicas SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 + AND current_database = currentDatabase() AND query LIKE '%SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = \'test_03217_system_tables_replica_1\' AND replica_name = \'r1\';' AND type = 'QueryFinish'; From 9a812fc53502cbd3bb69964d8e8b1afb186e6b46 Mon Sep 17 00:00:00 2001 From: "Max K." Date: Wed, 7 Aug 2024 15:55:03 +0200 Subject: [PATCH 1855/2361] Revert "CI: Strict job timeout 1.5h for tests, 2h for builds" --- tests/ci/ci_config.py | 1 + tests/ci/ci_definitions.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index f578cd8b559..7a19eb6f827 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -510,6 +510,7 @@ class CI: JobNames.LIBFUZZER_TEST: JobConfig( required_builds=[BuildNames.FUZZERS], run_by_label=Tags.libFuzzer, + timeout=10800, run_command='libfuzzer_test_check.py "$CHECK_NAME"', runner_type=Runners.STYLE_CHECKER, ), diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 69e7ed259d5..48847b0d7a6 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -331,7 +331,7 @@ class JobConfig: # will be triggered for the job if omitted in CI workflow yml run_command: str = "" # job timeout, seconds - timeout: Optional[int] = 5400 + timeout: Optional[int] = None # sets number of batches for a multi-batch job num_batches: int = 1 # label that enables job in CI, if set digest isn't used @@ -420,6 +420,7 @@ class CommonJobConfigs: ), run_command='functional_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, + timeout=9000, ) STATEFUL_TEST = JobConfig( job_name_keyword="stateful", @@ -530,6 +531,7 @@ class CommonJobConfigs: docker=["clickhouse/sqllogic-test"], ), run_command="sqllogic_test.py", + timeout=10800, release_only=True, runner_type=Runners.FUNC_TESTER, ) @@ -541,6 +543,7 @@ class CommonJobConfigs: docker=["clickhouse/sqltest"], ), run_command="sqltest.py", + timeout=10800, release_only=True, runner_type=Runners.FUZZER_UNIT_TESTER, ) @@ -610,7 +613,6 @@ class CommonJobConfigs: docker=["clickhouse/binary-builder"], git_submodules=True, ), - timeout=7200, run_command="build_check.py $BUILD_NAME", runner_type=Runners.BUILDER, ) From 364e973ef7aaa561780c50cd795b9edbcae51a41 Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 7 Aug 2024 16:23:47 +0200 Subject: [PATCH 1856/2361] Ping CI From 3485e87d8ac635ec42e0f55f0d11dbb07ae03dba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 15:18:55 +0000 Subject: [PATCH 1857/2361] Really handle null messages --- src/Storages/Kafka/KafkaConsumer2.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index f6ef85da317..dc71086db3b 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -352,8 +352,7 @@ void KafkaConsumer2::subscribeIfNotSubscribedYet() ReadBufferPtr KafkaConsumer2::getNextMessage() { - if (current != messages.end()) - { + while (current != messages.end()) { const auto * data = current->get_payload().get_data(); size_t size = current->get_payload().get_size(); ++current; From d2e9833dba4038999e7aeb540ea6b56cc1c59449 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 7 Aug 2024 17:00:41 +0200 Subject: [PATCH 1858/2361] Add minio audit logs --- docker/test/stateless/run.sh | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index ea32df23af0..b33c261dacc 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -176,6 +176,25 @@ done setup_logs_replication attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01 +# create minio log webhooks for both audit and server logs +clickhouse-client --query "CREATE TABLE minio_audit_logs +( + log String, + event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(substring(JSONExtractRaw(log, 'time'), 2, 29), 9, 'UTC') +) +ENGINE = MergeTree +ORDER BY tuple()" +./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" + +clickhouse-client --query "CREATE TABLE minio_server_logs +( + log String, + event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(substring(JSONExtractRaw(log, 'time'), 2, 29), 9, 'UTC') +) +ENGINE = MergeTree +ORDER BY tuple()" +./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" + function fn_exists() { declare -F "$1" > /dev/null; } @@ -328,6 +347,11 @@ do fi done + +# collect minio audit and server logs +clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" + # Stop server so we can safely read data with clickhouse-local. # Why do we read data with clickhouse-local? # Because it's the simplest way to read it when server has crashed. From bf111b65fdcaa333680cf64ad9d32fe9493b182e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 17:48:46 +0200 Subject: [PATCH 1859/2361] 03201_avro_negative_block_size_arrays is parallelizable --- .../0_stateless/03201_avro_negative_block_size_arrays.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03201_avro_negative_block_size_arrays.sh b/tests/queries/0_stateless/03201_avro_negative_block_size_arrays.sh index dcecd7b3bea..f7101989377 100755 --- a/tests/queries/0_stateless/03201_avro_negative_block_size_arrays.sh +++ b/tests/queries/0_stateless/03201_avro_negative_block_size_arrays.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-parallel, no-fasttest +# Tags: no-fasttest +# no-fasttest: Requires libraries set -e From ff2e8b65bd5b45112e4aeaf03de089d68019a90a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 17:58:44 +0200 Subject: [PATCH 1860/2361] 03171_hashed_dictionary_short_circuit_bug_fix is parallelizable --- .../03171_hashed_dictionary_short_circuit_bug_fix.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.sql b/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.sql index e1b5531a442..6d3a63dbadb 100644 --- a/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.sql +++ b/tests/queries/0_stateless/03171_hashed_dictionary_short_circuit_bug_fix.sql @@ -1,5 +1,3 @@ --- Tags: no-parallel - CREATE TABLE x ( hash_id UInt64, user_result Decimal(3, 2) ) ENGINE = Memory(); CREATE TABLE y ( hash_id UInt64, user_result DECIMAL(18, 6) ) ENGINE = Memory(); From db9ba0188622ddd0677a99ab9d8e0765cb5a6c55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 17:58:54 +0200 Subject: [PATCH 1861/2361] Leave some notes --- tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql b/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql index bbe701f022b..7bbe2d3c533 100644 --- a/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql +++ b/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql @@ -1,4 +1,5 @@ -- Tags: no-random-settings, no-object-storage, no-parallel +-- no-parallel: Running `DROP MARK CACHE` can have a big impact on other concurrent tests -- Tag no-object-storage: this test relies on the number of opened files in MergeTree that can differ in object storages SET allow_experimental_dynamic_type = 1; From 09964cee569eebea42e4efd52b2228062f8e8331 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 18:09:06 +0200 Subject: [PATCH 1862/2361] Fix 03168_query_log_privileges_not_empty --- ...8_query_log_privileges_not_empty.reference | 1 - .../03168_query_log_privileges_not_empty.sh | 36 +++++++++---------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/tests/queries/0_stateless/03168_query_log_privileges_not_empty.reference b/tests/queries/0_stateless/03168_query_log_privileges_not_empty.reference index e3ac97f9945..f4ada41d77d 100644 --- a/tests/queries/0_stateless/03168_query_log_privileges_not_empty.reference +++ b/tests/queries/0_stateless/03168_query_log_privileges_not_empty.reference @@ -1,4 +1,3 @@ -1 3168 8613 [] ['SELECT(a, b) ON default.d_03168_query_log'] [] [] diff --git a/tests/queries/0_stateless/03168_query_log_privileges_not_empty.sh b/tests/queries/0_stateless/03168_query_log_privileges_not_empty.sh index 9abc635a874..b817052e355 100755 --- a/tests/queries/0_stateless/03168_query_log_privileges_not_empty.sh +++ b/tests/queries/0_stateless/03168_query_log_privileges_not_empty.sh @@ -1,32 +1,28 @@ #!/usr/bin/env bash -# Tags: no-parallel CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -user_name="u_03168_query_log" -table_name="default.d_03168_query_log" +user_name="u_03168_query_log_${CLICKHOUSE_DATABASE}" +table_name="d_03168_query_log" test_query="select a, b from ${table_name}" -${CLICKHOUSE_CLIENT_BINARY} --query "drop user if exists ${user_name}" -${CLICKHOUSE_CLIENT_BINARY} --query "create user ${user_name}" -${CLICKHOUSE_CLIENT_BINARY} --query "drop table if exists ${table_name}" -${CLICKHOUSE_CLIENT_BINARY} --query "create table ${table_name} (a UInt64, b UInt64) order by a" +${CLICKHOUSE_CLIENT} --query "drop user if exists ${user_name}" +${CLICKHOUSE_CLIENT} --query "create user ${user_name}" +${CLICKHOUSE_CLIENT} --query "drop table if exists ${table_name}" +${CLICKHOUSE_CLIENT} --query "create table ${table_name} (a UInt64, b UInt64) order by a" +${CLICKHOUSE_CLIENT} --query "insert into table ${table_name} values (3168, 8613)" -${CLICKHOUSE_CLIENT_BINARY} --query "insert into table ${table_name} values (3168, 8613)" +${CLICKHOUSE_CLIENT} --user ${user_name} --query "${test_query}" 2>&1 >/dev/null | (grep -q "ACCESS_DENIED" || echo "Expected ACCESS_DENIED error not found") -error="$(${CLICKHOUSE_CLIENT_BINARY} --user ${user_name} --query "${test_query}" 2>&1 >/dev/null)" -echo "${error}" | grep -Fc "ACCESS_DENIED" +${CLICKHOUSE_CLIENT} --query "grant select(a, b) on ${table_name} to ${user_name}" +${CLICKHOUSE_CLIENT} --user ${user_name} --query "${test_query}" -${CLICKHOUSE_CLIENT_BINARY} --query "grant select(a, b) on ${table_name} to ${user_name}" +${CLICKHOUSE_CLIENT} --query "system flush logs" +${CLICKHOUSE_CLIENT} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'ExceptionBeforeStart' and current_database = currentDatabase() order by event_time desc limit 1" +${CLICKHOUSE_CLIENT} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'QueryStart' and current_database = currentDatabase() order by event_time desc limit 1" +${CLICKHOUSE_CLIENT} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'QueryFinish' and current_database = currentDatabase() order by event_time desc limit 1" -${CLICKHOUSE_CLIENT_BINARY} --user ${user_name} --query "${test_query}" - -${CLICKHOUSE_CLIENT_BINARY} --query "system flush logs" -${CLICKHOUSE_CLIENT_BINARY} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'ExceptionBeforeStart' and current_database = currentDatabase() order by event_time desc limit 1" -${CLICKHOUSE_CLIENT_BINARY} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'QueryStart' and current_database = currentDatabase() order by event_time desc limit 1" -${CLICKHOUSE_CLIENT_BINARY} --query "select used_privileges, missing_privileges from system.query_log where query = '${test_query}' and type = 'QueryFinish' and current_database = currentDatabase() order by event_time desc limit 1" - -${CLICKHOUSE_CLIENT_BINARY} --query "drop table ${table_name}" -${CLICKHOUSE_CLIENT_BINARY} --query "drop user ${user_name}" +${CLICKHOUSE_CLIENT} --query "drop table ${table_name}" +${CLICKHOUSE_CLIENT} --query "drop user ${user_name}" From c7b94ca43a4f5051e0b84be5cb0e2bc94a6137e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 18:11:51 +0200 Subject: [PATCH 1863/2361] 03164_adapting_parquet_reader_output_size is parallelizable --- .../0_stateless/03164_adapting_parquet_reader_output_size.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.sql b/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.sql index fa098b64702..e6b13510301 100644 --- a/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.sql +++ b/tests/queries/0_stateless/03164_adapting_parquet_reader_output_size.sql @@ -1,4 +1,4 @@ --- Tags: no-fasttest, no-parallel, no-random-settings +-- Tags: no-fasttest, no-random-settings set max_insert_threads=1; @@ -22,4 +22,4 @@ CREATE TABLE test_parquet (col1 String, col2 String, col3 String, col4 String, c INSERT INTO test_parquet SELECT rand(),rand(),rand(),rand(),rand(),rand(),rand() FROM numbers(100000); SELECT max(blockSize()) FROM test_parquet; -DROP TABLE IF EXISTS test_parquet; \ No newline at end of file +DROP TABLE IF EXISTS test_parquet; From 58b15c71d9811315666e0b563870aa404682294e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 18:15:19 +0200 Subject: [PATCH 1864/2361] 03156_default_multiquery_split is parallelizable --- tests/queries/0_stateless/03156_default_multiquery_split.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03156_default_multiquery_split.sh b/tests/queries/0_stateless/03156_default_multiquery_split.sh index ac64c2d093d..8ba2f46b786 100755 --- a/tests/queries/0_stateless/03156_default_multiquery_split.sh +++ b/tests/queries/0_stateless/03156_default_multiquery_split.sh @@ -1,11 +1,10 @@ #!/usr/bin/env bash -# Tags: no-parallel, no-ordinary-database CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -SQL_FILE_NAME=$"03156_default_multiquery_split_$$.sql" +SQL_FILE_NAME=$"03156_default_multiquery_split_${CLICKHOUSE_DATABASE}.sql" # The old multiquery implementation uses '\n' to split INSERT query segmentation # this case is mainly to test the following situations From a109e78776fd435b9dfb464bf8d229ef319ec0e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 18:17:08 +0200 Subject: [PATCH 1865/2361] 03148_async_queries_in_query_log_errors is parallelizable --- .../0_stateless/03148_async_queries_in_query_log_errors.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03148_async_queries_in_query_log_errors.sh b/tests/queries/0_stateless/03148_async_queries_in_query_log_errors.sh index 2b4b96a9cbf..9c290133bf9 100755 --- a/tests/queries/0_stateless/03148_async_queries_in_query_log_errors.sh +++ b/tests/queries/0_stateless/03148_async_queries_in_query_log_errors.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash -# Tags: no-parallel CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 35f827e55a7b1583f3424ac4692203aa851a1818 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 7 Aug 2024 18:19:15 +0200 Subject: [PATCH 1866/2361] Make 03147_table_function_loop parallelizable --- .../queries/0_stateless/03147_table_function_loop.sql | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/03147_table_function_loop.sql b/tests/queries/0_stateless/03147_table_function_loop.sql index aa3c8e2def5..e10155fe8b3 100644 --- a/tests/queries/0_stateless/03147_table_function_loop.sql +++ b/tests/queries/0_stateless/03147_table_function_loop.sql @@ -3,14 +3,11 @@ SELECT * FROM loop(numbers(3)) LIMIT 10; SELECT * FROM loop (numbers(3)) LIMIT 10 settings max_block_size = 1; -DROP DATABASE IF EXISTS 03147_db; -CREATE DATABASE IF NOT EXISTS 03147_db; -CREATE TABLE 03147_db.t (n Int8) ENGINE=MergeTree ORDER BY n; -INSERT INTO 03147_db.t SELECT * FROM numbers(10); -USE 03147_db; +CREATE TABLE t (n Int8) ENGINE=MergeTree ORDER BY n; +INSERT INTO t SELECT * FROM numbers(10); -SELECT * FROM loop(03147_db.t) LIMIT 15; +SELECT * FROM loop({CLICKHOUSE_DATABASE:Identifier}.t) LIMIT 15; SELECT * FROM loop(t) LIMIT 15; -SELECT * FROM loop(03147_db, t) LIMIT 15; +SELECT * FROM loop({CLICKHOUSE_DATABASE:Identifier}, t) LIMIT 15; SELECT * FROM loop('', '') -- { serverError UNKNOWN_TABLE } From ad678cb5a8d4533a88273b8244dc0844c83e641c Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 7 Aug 2024 18:24:03 +0200 Subject: [PATCH 1867/2361] Ignore disappeared projections on start --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 33 +++++++++++- .../MergeTree/MergeTreeDataPartChecksum.cpp | 6 --- .../MergeTree/MergeTreeDataPartChecksum.h | 3 -- .../test_broken_projections/test.py | 50 +++++++++++++++++++ 4 files changed, 81 insertions(+), 11 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 3a44359b537..918a4cda714 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -72,6 +72,7 @@ namespace ErrorCodes extern const int BAD_TTL_FILE; extern const int NOT_IMPLEMENTED; extern const int NO_SUCH_COLUMN_IN_TABLE; + extern const int FILE_DOESNT_EXIST; } @@ -749,8 +750,16 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks /// Probably there is something wrong with files of this part. /// So it can be helpful to add to the error message some information about those files. String files_in_part; + for (auto it = getDataPartStorage().iterate(); it->isValid(); it->next()) - files_in_part += fmt::format("{}{} ({} bytes)", (files_in_part.empty() ? "" : ", "), it->name(), getDataPartStorage().getFileSize(it->name())); + { + std::string file_info; + if (!getDataPartStorage().isDirectory(it->name())) + file_info = fmt::format(" ({} bytes)", getDataPartStorage().getFileSize(it->name())); + + files_in_part += fmt::format("{}{}{}", (files_in_part.empty() ? "" : ", "), it->name(), file_info); + + } if (!files_in_part.empty()) e->addMessage("Part contains files: {}", files_in_part); if (isEmpty()) @@ -2141,7 +2150,27 @@ void IMergeTreeDataPart::checkConsistencyBase() const } } - checksums.checkSizes(getDataPartStorage()); + const auto & data_part_storage = getDataPartStorage(); + for (const auto & [filename, checksum] : checksums.files) + { + try + { + checksum.checkSize(data_part_storage, filename); + } + catch (const Exception & ex) + { + /// For projection parts check will mark them broken in loadProjections + if (!parent_part && filename.ends_with(".proj")) + { + std::string projection_name = fs::path(filename).stem(); + LOG_INFO(storage.log, "Projection {} doesn't exist on start for part {}, marking it as broken", projection_name, name); + if (hasProjection(projection_name)) + markProjectionPartAsBroken(projection_name, ex.message(), ex.code()); + } + else + throw; + } + } } else { diff --git a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp index b327480fa92..3ef36ce364c 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp @@ -100,12 +100,6 @@ void MergeTreeDataPartChecksums::checkEqual(const MergeTreeDataPartChecksums & r } } -void MergeTreeDataPartChecksums::checkSizes(const IDataPartStorage & storage) const -{ - for (const auto & [name, checksum] : files) - checksum.checkSize(storage, name); -} - UInt64 MergeTreeDataPartChecksums::getTotalSizeOnDisk() const { UInt64 res = 0; diff --git a/src/Storages/MergeTree/MergeTreeDataPartChecksum.h b/src/Storages/MergeTree/MergeTreeDataPartChecksum.h index 05178dc3a60..dc52f1ada2b 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartChecksum.h +++ b/src/Storages/MergeTree/MergeTreeDataPartChecksum.h @@ -65,9 +65,6 @@ struct MergeTreeDataPartChecksums static bool isBadChecksumsErrorCode(int code); - /// Checks that the directory contains all the needed files of the correct size. Does not check the checksum. - void checkSizes(const IDataPartStorage & storage) const; - /// Returns false if the checksum is too old. bool read(ReadBuffer & in); /// Assume that header with version (the first line) is read diff --git a/tests/integration/test_broken_projections/test.py b/tests/integration/test_broken_projections/test.py index 162c0dbaa2f..578ff42369c 100644 --- a/tests/integration/test_broken_projections/test.py +++ b/tests/integration/test_broken_projections/test.py @@ -4,6 +4,7 @@ import logging import string import random from helpers.cluster import ClickHouseCluster +from multiprocessing.dummy import Pool cluster = ClickHouseCluster(__file__) @@ -18,6 +19,12 @@ def cluster(): stay_alive=True, with_zookeeper=True, ) + cluster.add_instance( + "node_restart", + main_configs=["config.d/dont_start_broken.xml"], + stay_alive=True, + with_zookeeper=True, + ) logging.info("Starting cluster...") cluster.start() @@ -632,6 +639,49 @@ def test_broken_on_start(cluster): check(node, table_name, 0) +def test_disappeared_projection_on_start(cluster): + node = cluster.instances["node_restart"] + + table_name = "test_disapperead_projection" + create_table(node, table_name, 1) + + node.query(f"SYSTEM STOP MERGES {table_name}") + + insert(node, table_name, 0, 5) + insert(node, table_name, 5, 5) + insert(node, table_name, 10, 5) + insert(node, table_name, 15, 5) + + assert ["all_0_0_0", "all_1_1_0", "all_2_2_0", "all_3_3_0"] == get_parts( + node, table_name + ) + + def drop_projection(): + node.query( + f"ALTER TABLE {table_name} DROP PROJECTION proj2", + settings={"mutations_sync": "0"}, + ) + + p = Pool(2) + p.apply_async(drop_projection) + + for i in range(30): + create_query = node.query(f"SHOW CREATE TABLE {table_name}") + if "proj2" not in create_query: + break + time.sleep(0.5) + + assert "proj2" not in create_query + + # Remove 'proj2' for part all_2_2_0 + break_projection(node, table_name, "proj2", "all_2_2_0", "part") + + node.restart_clickhouse() + + # proj2 is not broken, it doesn't exist, but ok + check(node, table_name, 0, expect_broken_part="proj2", do_check_command=0) + + def test_mutation_with_broken_projection(cluster): node = cluster.instances["node"] From 0dc4d773edd530494c1ab514d104a45665bdd16a Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 7 Aug 2024 18:46:34 +0200 Subject: [PATCH 1868/2361] Fxi style --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 918a4cda714..93904c1a838 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -72,7 +72,6 @@ namespace ErrorCodes extern const int BAD_TTL_FILE; extern const int NOT_IMPLEMENTED; extern const int NO_SUCH_COLUMN_IN_TABLE; - extern const int FILE_DOESNT_EXIST; } From 4fb1febe4859368de7ff3c8b73f2cc16d398c089 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Wed, 7 Aug 2024 18:51:24 +0200 Subject: [PATCH 1869/2361] Update table.md --- docs/en/sql-reference/statements/create/table.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 9c8984d698f..7428e6cd6ca 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -241,12 +241,12 @@ CREATE OR REPLACE TABLE test ( id UInt64, size_bytes Int64, - size String Alias formatReadableSize(size_bytes) + size String ALIAS formatReadableSize(size_bytes) ) ENGINE = MergeTree ORDER BY id; -INSERT INTO test Values (1, 4678899); +INSERT INTO test VALUES (1, 4678899); SELECT id, size_bytes, size FROM test; ┌─id─┬─size_bytes─┬─size─────┠@@ -497,7 +497,7 @@ If you perform a SELECT query mentioning a specific value in an encrypted column ```sql CREATE TABLE mytable ( - x String Codec(AES_128_GCM_SIV) + x String CODEC(AES_128_GCM_SIV) ) ENGINE = MergeTree ORDER BY x; ``` From 3b48a1a92c1294cae71d6287a8adbc49e0b9890d Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Wed, 7 Aug 2024 14:01:40 -0300 Subject: [PATCH 1870/2361] Update http.md --- docs/en/interfaces/http.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index f5b6326fa96..03fdfa048c8 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -379,7 +379,7 @@ You can mitigate this problem by enabling `wait_end_of_query=1` ([Response Buffe However, this does not completely solve the problem because the result must still fit within the `http_response_buffer_size`, and other settings like `send_progress_in_http_headers` can interfere with the delay of the header. The only way to catch all errors is to analyze the HTTP body before parsing it using the required format. -### Queries with Parameters {#cli-queries-with-parameters} +## Queries with Parameters {#cli-queries-with-parameters} You can create a query with parameters and pass values for them from the corresponding HTTP request parameters. For more information, see [Queries with Parameters for CLI](../interfaces/cli.md#cli-queries-with-parameters). From 06d154055f9e233180f13585e43e2992ae5ccfdf Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 7 Aug 2024 19:19:33 +0200 Subject: [PATCH 1871/2361] adjust tests --- tests/integration/test_disk_configuration/test.py | 2 +- tests/queries/0_stateless/03008_s3_plain_rewritable.sh | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_disk_configuration/test.py b/tests/integration/test_disk_configuration/test.py index afc5303298c..f297c665dc5 100644 --- a/tests/integration/test_disk_configuration/test.py +++ b/tests/integration/test_disk_configuration/test.py @@ -373,7 +373,7 @@ def test_merge_tree_setting_override(start_cluster): CREATE TABLE {TABLE_NAME} (a Int32) ENGINE = MergeTree() ORDER BY tuple() - SETTINGS disk = 'kek', storage_policy = 's3'; + SETTINGS disk = 's3', storage_policy = 's3'; """ ) ) diff --git a/tests/queries/0_stateless/03008_s3_plain_rewritable.sh b/tests/queries/0_stateless/03008_s3_plain_rewritable.sh index 4d5989f6f12..8eea7940774 100755 --- a/tests/queries/0_stateless/03008_s3_plain_rewritable.sh +++ b/tests/queries/0_stateless/03008_s3_plain_rewritable.sh @@ -46,7 +46,12 @@ ${CLICKHOUSE_CLIENT} --query "drop table if exists test_s3_mt_dst" ${CLICKHOUSE_CLIENT} -m --query " create table test_s3_mt_dst (a Int32, b Int64, c Int64) engine = MergeTree() partition by intDiv(a, 1000) order by tuple(a, b) -settings disk = '03008_s3_plain_rewritable' +settings disk = disk( + name = 03008_s3_plain_rewritable, + type = s3_plain_rewritable, + endpoint = 'http://localhost:11111/test/03008_test_s3_mt/', + access_key_id = clickhouse, + secret_access_key = clickhouse); " ${CLICKHOUSE_CLIENT} -m --query " From d992431f96f4374d7bb7d15be51c3569dedcb4d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 17:27:24 +0000 Subject: [PATCH 1872/2361] Make tests more sturdier for flaky test check --- tests/integration/test_storage_kafka/test.py | 33 +++++++++++++++----- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 3f71866c913..39796fa3ba5 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -1161,8 +1161,27 @@ def kafka_cluster(): @pytest.fixture(autouse=True) def kafka_setup_teardown(): - instance.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") - # logging.debug("kafka is available - running test") + instance.query("DROP DATABASE IF EXISTS test SYNC; CREATE DATABASE test;") + admin_client = get_admin_client(cluster) + def get_topics_to_delete(): + return [t for t in admin_client.list_topics() if not t.startswith("_")] + topics = get_topics_to_delete() + logging.debug(f"Deleting topics: {topics}") + result = admin_client.delete_topics(topics) + for topic, error in result.topic_error_codes: + if error != 0: + logging.warning(f"Received error {error} while deleting topic {topic}") + else: + logging.info(f"Deleted topic {topic}") + + retries = 0 + topics = get_topics_to_delete() + while (len(topics) != 0): + logging.info(f"Existing topics: {topics}") + if retries >= 5: + raise Exception(f"Failed to delete topics {topics}") + retries += 1 + time.sleep(0.5) yield # run test @@ -3286,7 +3305,7 @@ def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster, create_query_gen ) instance.query( f""" - DROP TABLE IF EXISTS test.view; + DROP TABLE IF EXISTS test.view SYNC; DROP TABLE IF EXISTS test.consumer; {create_query}; @@ -3329,7 +3348,7 @@ def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster, create_query_gen instance.query( """ DROP TABLE test.consumer; - DROP TABLE test.view; + DROP TABLE test.view SYNC; """ ) @@ -5381,7 +5400,7 @@ def test_multiple_read_in_materialized_views(kafka_cluster, create_query_generat ) instance.query( f""" - DROP TABLE IF EXISTS test.kafka_multiple_read_input; + DROP TABLE IF EXISTS test.kafka_multiple_read_input SYNC; DROP TABLE IF EXISTS test.kafka_multiple_read_table; DROP TABLE IF EXISTS test.kafka_multiple_read_mv; @@ -5496,9 +5515,9 @@ def test_kafka_null_message(kafka_cluster, create_query_generator): instance.query( """ - DROP TABLE test.null_message_consumer; + DROP TABLE test.null_message_consumer SYNC; DROP TABLE test.null_message_view; - DROP TABLE test.null_message_kafka; + DROP TABLE test.null_message_kafka SYNC; """ ) From ec3a248e70e16d2cb4db1ec5ff17e95cc11dedae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 17:37:54 +0000 Subject: [PATCH 1873/2361] Fix clang-tidy --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index b24d7968b61..5ee0bd328e0 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1146,7 +1146,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr, /*allow_non_deterministic_functions=*/ false); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr, /*allow_partial_result=*/ false); if (!filter_dag) return {}; From 47270449dcdbd056432aa4d837ccb33917fc1a9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 7 Aug 2024 17:39:09 +0000 Subject: [PATCH 1874/2361] Style fix --- src/Storages/Kafka/KafkaConsumer2.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/Kafka/KafkaConsumer2.cpp b/src/Storages/Kafka/KafkaConsumer2.cpp index dc71086db3b..60626dfa402 100644 --- a/src/Storages/Kafka/KafkaConsumer2.cpp +++ b/src/Storages/Kafka/KafkaConsumer2.cpp @@ -352,7 +352,8 @@ void KafkaConsumer2::subscribeIfNotSubscribedYet() ReadBufferPtr KafkaConsumer2::getNextMessage() { - while (current != messages.end()) { + while (current != messages.end()) + { const auto * data = current->get_payload().get_data(); size_t size = current->get_payload().get_size(); ++current; From 94398996b2a002f5f08e3f97c2544e7a0f712f59 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 7 Aug 2024 17:47:15 +0000 Subject: [PATCH 1875/2361] Automatic style fix --- tests/integration/test_storage_kafka/test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 39796fa3ba5..4b6c9922d74 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -1163,8 +1163,10 @@ def kafka_cluster(): def kafka_setup_teardown(): instance.query("DROP DATABASE IF EXISTS test SYNC; CREATE DATABASE test;") admin_client = get_admin_client(cluster) + def get_topics_to_delete(): return [t for t in admin_client.list_topics() if not t.startswith("_")] + topics = get_topics_to_delete() logging.debug(f"Deleting topics: {topics}") result = admin_client.delete_topics(topics) @@ -1176,7 +1178,7 @@ def kafka_setup_teardown(): retries = 0 topics = get_topics_to_delete() - while (len(topics) != 0): + while len(topics) != 0: logging.info(f"Existing topics: {topics}") if retries >= 5: raise Exception(f"Failed to delete topics {topics}") From cf8ddbc15e0bb1143ff1737aec80171b518b24bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mi=D1=81hael=20Stetsyuk?= <59827607+mstetsyuk@users.noreply.github.com> Date: Wed, 7 Aug 2024 19:03:20 +0100 Subject: [PATCH 1876/2361] Update src/Databases/DatabaseReplicated.cpp Co-authored-by: Alexander Tokmakov --- src/Databases/DatabaseReplicated.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 213c94d4d94..09dd2065b19 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -390,7 +390,8 @@ ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) } return replicas_info; - } catch (...) + } + catch (...) { tryLogCurrentException(log); return {}; From 7341dcefd44a1ee38a68e11604db7dfae8a32882 Mon Sep 17 00:00:00 2001 From: Ilya Yatsishin <2159081+qoega@users.noreply.github.com> Date: Wed, 7 Aug 2024 20:36:23 +0200 Subject: [PATCH 1877/2361] Follow up for #67843 --- tests/integration/test_access_for_functions/test.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/integration/test_access_for_functions/test.py b/tests/integration/test_access_for_functions/test.py index 52777c60729..82125b35b49 100644 --- a/tests/integration/test_access_for_functions/test.py +++ b/tests/integration/test_access_for_functions/test.py @@ -80,5 +80,3 @@ EOF""", instance.query(f"SHOW GRANTS FOR `{user_id}`") == f"GRANT SELECT ON mydb.* TO `{user_id}`\n" ) - instance.stop_clickhouse() - instance.start_clickhouse() From 466944683bf9c5e1d9dcb3d91c24ab9bf896a791 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 7 Aug 2024 18:50:19 +0000 Subject: [PATCH 1878/2361] fix for multiple WITH --- src/Interpreters/AddDefaultDatabaseVisitor.h | 7 +++-- .../03215_view_with_recursive.reference | 1 + .../0_stateless/03215_view_with_recursive.sql | 28 +++++++++++++++++++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index 5e46a653efa..a28c7c1bff3 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -101,7 +101,7 @@ private: const String database_name; std::set external_tables; - mutable String with_alias; + mutable std::unordered_set with_aliases; bool only_replace_current_database_function = false; bool only_replace_in_join = false; @@ -120,7 +120,8 @@ private: void visit(ASTSelectQuery & select, ASTPtr &) const { if (select.recursive_with) - with_alias = select.with()->children[0]->as()->name; + for (const auto & child : select.with()->children) + with_aliases.insert(child->as()->name); if (select.tables()) tryVisit(select.refTables()); @@ -171,7 +172,7 @@ private: if (external_tables.contains(identifier.shortName())) return; /// This is WITH RECURSIVE alias. - if (!with_alias.empty() && identifier.name() == with_alias) + if (with_aliases.contains(identifier.name())) return; auto qualified_identifier = std::make_shared(database_name, identifier.name()); diff --git a/tests/queries/0_stateless/03215_view_with_recursive.reference b/tests/queries/0_stateless/03215_view_with_recursive.reference index c3ac783e702..c3ca8065a70 100644 --- a/tests/queries/0_stateless/03215_view_with_recursive.reference +++ b/tests/queries/0_stateless/03215_view_with_recursive.reference @@ -1 +1,2 @@ 5050 +8 diff --git a/tests/queries/0_stateless/03215_view_with_recursive.sql b/tests/queries/0_stateless/03215_view_with_recursive.sql index ef7908612af..5d93ccc5438 100644 --- a/tests/queries/0_stateless/03215_view_with_recursive.sql +++ b/tests/queries/0_stateless/03215_view_with_recursive.sql @@ -13,3 +13,31 @@ SELECT sum(number) FROM test_table; SELECT * FROM 03215_test_v; + +CREATE VIEW 03215_multi_v +AS WITH RECURSIVE + task AS + ( + SELECT + number AS task_id, + number - 1 AS parent_id + FROM numbers(10) + ), + rtq AS + ( + SELECT + task_id, + parent_id + FROM task AS t + WHERE t.parent_id = 1 + UNION ALL + SELECT + t.task_id, + t.parent_id + FROM task AS t, rtq AS r + WHERE t.parent_id = r.task_id + ) +SELECT count() +FROM rtq; + +SELECT * FROM 03215_multi_v; From 1082792950ca7b962c1288ab49bb8ff3ca855bbe Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 7 Aug 2024 20:21:50 +0100 Subject: [PATCH 1879/2361] fix test --- .../test_async_metrics_in_cgroup/test.py | 98 +++++++++---------- 1 file changed, 45 insertions(+), 53 deletions(-) diff --git a/tests/integration/test_async_metrics_in_cgroup/test.py b/tests/integration/test_async_metrics_in_cgroup/test.py index 00951c95a0e..d9f2e3aaaed 100644 --- a/tests/integration/test_async_metrics_in_cgroup/test.py +++ b/tests/integration/test_async_metrics_in_cgroup/test.py @@ -1,11 +1,10 @@ import pytest -import subprocess -import time from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node") +node1 = cluster.add_instance("node1", stay_alive=True) +node2 = cluster.add_instance("node2", stay_alive=True) @pytest.fixture(scope="module") @@ -17,61 +16,54 @@ def start_cluster(): cluster.shutdown() -def test_user_cpu_accounting(start_cluster): - if node.is_built_with_sanitizer(): - pytest.skip("Disabled for sanitizers") - - # check that our metrics sources actually exist - assert ( - subprocess.Popen("test -f /sys/fs/cgroup/cpu.stat".split(" ")).wait() == 0 - or subprocess.Popen( - "test -f /sys/fs/cgroup/cpuacct/cpuacct.stat".split(" ") - ).wait() - == 0 - ) - - # first let's spawn some cpu-intensive process outside of the container and check that it doesn't accounted by ClickHouse server - proc = subprocess.Popen( - "openssl speed -multi 8".split(" "), - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - - time.sleep(5) - - metric = node.query( - """ - SELECT max(value) - FROM ( - SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value - FROM system.asynchronous_metric_log - WHERE event_time >= now() - 60 AND metric = 'OSUserTime' - GROUP BY t - ) - """ - ).strip("\n") - - assert float(metric) < 2 - - proc.kill() - - # then let's test that we will account cpu time spent by the server itself +def run_cpu_intensive_task(node): node.query( - "SELECT cityHash64(*) FROM system.numbers_mt FORMAT Null SETTINGS max_execution_time=10", + "SELECT sum(*) FROM system.numbers_mt FORMAT Null SETTINGS max_execution_time=10", ignore_error=True, ) - metric = node.query( + +def get_async_metric(node, metric): + node.query("SYSTEM FLUSH LOGS") + return node.query( + f""" + SELECT max(value) + FROM ( + SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value + FROM system.asynchronous_metric_log + WHERE event_time >= now() - 60 AND metric = '{metric}' + GROUP BY t + ) + SETTINGS max_threads = 1 """ - SELECT max(value) - FROM ( - SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value - FROM system.asynchronous_metric_log - WHERE event_time >= now() - 60 AND metric = 'OSUserTime' - GROUP BY t - ) - """ ).strip("\n") + +def test_user_cpu_accounting(start_cluster): + if node1.is_built_with_sanitizer(): + pytest.skip("Disabled for sanitizers") + + # run query on the other node, its usage shouldn't be accounted by node1 + run_cpu_intensive_task(node2) + + node1_cpu_time = get_async_metric(node1, "OSUserTime") + assert float(node1_cpu_time) < 2 + + # then let's test that we will account cpu time spent by the server itself + node2_cpu_time = get_async_metric(node2, "OSUserTime") # this check is really weak, but CI is tough place and we cannot guarantee that test process will get many cpu time - assert float(metric) > 1 + assert float(node2_cpu_time) > 2 + + +def test_normalized_user_cpu(start_cluster): + if node1.is_built_with_sanitizer(): + pytest.skip("Disabled for sanitizers") + + # run query on the other node, its usage shouldn't be accounted by node1 + run_cpu_intensive_task(node2) + + node1_cpu_time = get_async_metric(node1, "OSUserTimeNormalized") + assert float(node1_cpu_time) < 1.01 + + node2_cpu_time = get_async_metric(node2, "OSUserTimeNormalized") + assert float(node2_cpu_time) < 1.01 From d81b5239debaf01b74521511db44d6cb4cd419c4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 7 Aug 2024 21:37:01 +0200 Subject: [PATCH 1880/2361] Remove unused CLI option --- src/Client/ClientApplicationBase.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index 9f133616d2e..71d13ad4f53 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -200,8 +200,6 @@ void ClientApplicationBase::init(int argc, char ** argv) ("pager", po::value(), "Pipe all output into this command (less or similar)") ("max_memory_usage_in_client", po::value(), "Set memory limit in client/local server") - ("fuzzer-args", po::value(), "Command line arguments for the LLVM's libFuzzer driver. Only relevant if the application is compiled with libFuzzer.") - ("client_logs_file", po::value(), "Path to a file for writing client logs. Currently we only have fatal logs (when the client crashes)") ; From f08cb90fe3f017d44e4dd58c7e696396d6bf5ac0 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Wed, 7 Aug 2024 19:54:55 +0000 Subject: [PATCH 1881/2361] fxs --- src/Databases/DatabaseReplicated.cpp | 9 ++++++++- src/Storages/System/StorageSystemClusters.cpp | 6 +++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 09dd2065b19..fe00c1c60aa 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -379,10 +379,17 @@ ReplicasInfo DatabaseReplicated::tryGetReplicasInfo(const ClusterPtr & cluster_) auto replica_active = zk_res[2 * global_replica_index + 1]; auto replica_log_ptr = zk_res[2 * global_replica_index + 2]; + UInt64 recovery_time = 0; + { + std::lock_guard lock(ddl_worker_mutex); + if (replica.is_local && ddl_worker) + recovery_time = ddl_worker->getCurrentInitializationDurationMs(); + } + replicas_info[global_replica_index] = ReplicaInfo{ .is_active = replica_active.error == Coordination::Error::ZOK, .replication_lag = replica_log_ptr.error != Coordination::Error::ZNONODE ? std::optional(max_log_ptr - parse(replica_log_ptr.data)) : std::nullopt, - .recovery_time = replica.is_local && ddl_worker ? ddl_worker->getCurrentInitializationDurationMs() : 0, + .recovery_time = recovery_time, }; ++global_replica_index; diff --git a/src/Storages/System/StorageSystemClusters.cpp b/src/Storages/System/StorageSystemClusters.cpp index db1955c2e99..9493d2c97ab 100644 --- a/src/Storages/System/StorageSystemClusters.cpp +++ b/src/Storages/System/StorageSystemClusters.cpp @@ -70,8 +70,9 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const std const auto & shards_info = cluster->getShardsInfo(); const auto & addresses_with_failover = cluster->getShardsAddresses(); + size_t recovery_time_column_idx = columns_mask.size() - 1, replication_lag_column_idx = columns_mask.size() - 2, is_active_column_idx = columns_mask.size() - 3; ReplicasInfo replicas_info; - if (replicated) + if (replicated && (columns_mask[recovery_time_column_idx] || columns_mask[replication_lag_column_idx] || columns_mask[is_active_column_idx])) replicas_info = replicated->tryGetReplicasInfo(name_and_cluster.second); size_t replica_idx = 0; @@ -122,6 +123,7 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const std if (columns_mask[src_index++]) res_columns[res_index++]->insert(address.database_replica_name); + /// make sure these three columns remain the last ones if (columns_mask[src_index++]) { if (replicas_info.empty()) @@ -132,7 +134,6 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const std res_columns[res_index++]->insert(replica_info.is_active); } } - if (columns_mask[src_index++]) { if (replicas_info.empty()) @@ -146,7 +147,6 @@ void StorageSystemClusters::writeCluster(MutableColumns & res_columns, const std res_columns[res_index++]->insertDefault(); } } - if (columns_mask[src_index++]) { if (replicas_info.empty()) From 9fdc746c44499e819649a0298755cae7b02c23e0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 7 Aug 2024 22:23:21 +0200 Subject: [PATCH 1882/2361] Fix test `02845_threads_count_in_distributed_queries` --- .../02845_threads_count_in_distributed_queries.sql.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/02845_threads_count_in_distributed_queries.sql.j2 b/tests/queries/0_stateless/02845_threads_count_in_distributed_queries.sql.j2 index ffdd4e3400e..7d751eb8f17 100644 --- a/tests/queries/0_stateless/02845_threads_count_in_distributed_queries.sql.j2 +++ b/tests/queries/0_stateless/02845_threads_count_in_distributed_queries.sql.j2 @@ -1,3 +1,7 @@ +-- Tags: no-parallel, no-fasttest +-- ^ because query_thread_log is not guaranteed to be written under high load +-- (when the queue is full, events are silently dropped) + -- enforce some defaults to be sure that the env settings will not affect the test SET max_threads=5, async_socket_for_remote=1, prefer_localhost_replica=1, optimize_read_in_order=1, load_marks_asynchronously=0, local_filesystem_read_method='pread', remote_filesystem_read_method='read'; From 725640613b0d1cf47515697b5856a85953b73483 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 7 Aug 2024 22:35:57 +0200 Subject: [PATCH 1883/2361] Add annotations --- .../0_stateless/00111_shard_external_sort_distributed.sql | 3 ++- .../0_stateless/00376_shard_group_uniq_array_of_int_array.sql | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql index 93efc317bfa..9e06654195d 100644 --- a/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql +++ b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql @@ -1,4 +1,5 @@ --- Tags: distributed, long +-- Tags: distributed, long, no-flaky-check +-- ^ no-flaky-check - sometimes longer than 600s with ThreadFuzzer. SET max_memory_usage = 150000000; SET max_bytes_before_external_sort = 10000000; diff --git a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql index 24b7f1c30a6..4453c26283c 100644 --- a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql +++ b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql @@ -1,4 +1,4 @@ --- Tags: shard +-- Tags: long SET max_rows_to_read = '55M'; From b2722d883282eaea7f5d57d962b8f9acc884ce05 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Wed, 7 Aug 2024 20:54:40 +0000 Subject: [PATCH 1884/2361] Disallow LowCardinality type for external tables --- src/Storages/StorageTimeSeries.cpp | 13 ++++++++++++- tests/integration/test_prometheus_protocols/test.py | 10 +++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/src/Storages/StorageTimeSeries.cpp b/src/Storages/StorageTimeSeries.cpp index f5a2c0c59a2..3ff57aaf3e5 100644 --- a/src/Storages/StorageTimeSeries.cpp +++ b/src/Storages/StorageTimeSeries.cpp @@ -155,7 +155,18 @@ StorageTimeSeries::StorageTimeSeries( auto & target = targets.emplace_back(); target.kind = target_kind; target.table_id = initTarget(target_kind, target_info, local_context, getStorageID(), columns, *storage_settings, mode); - target.is_inner_table = target_info->table_id.empty(); + target.is_inner_table = target_info && target_info->table_id.empty(); + + if (target_kind == ViewTarget::Metrics && !target.is_inner_table) + { + auto table = DatabaseCatalog::instance().tryGetTable(target.table_id, getContext()); + auto metadata = table->getInMemoryMetadataPtr(); + + for (const auto & column : metadata->columns) + if (column.type->lowCardinality()) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "External metrics table cannot have LowCardnality columns for now."); + } + has_inner_tables |= target.is_inner_table; } } diff --git a/tests/integration/test_prometheus_protocols/test.py b/tests/integration/test_prometheus_protocols/test.py index 6adb3da56c3..0c75a8194c7 100644 --- a/tests/integration/test_prometheus_protocols/test.py +++ b/tests/integration/test_prometheus_protocols/test.py @@ -60,19 +60,19 @@ def show_query_result(query): def compare_query(query): - timeout = 30 + timeout = 60 start_time = time.time() evaluation_time = start_time print(f"Evaluating query: {query}") print(f"Evaluation time: {evaluation_time}") while time.time() < start_time + timeout: result_from_writer = execute_query_on_prometheus_writer(query, evaluation_time) + time.sleep(1) result_from_reader = execute_query_on_prometheus_reader(query, evaluation_time) print(f"Result from prometheus_writer: {result_from_writer}") print(f"Result from prometheus_reader: {result_from_reader}") if result_from_writer == result_from_reader: return - time.sleep(1) raise Exception( f"Got different results from prometheus_writer and prometheus_reader" ) @@ -162,8 +162,12 @@ def test_external_tables(): "max_time SimpleAggregateFunction(max, Nullable(DateTime64(3)))) " "ENGINE=AggregatingMergeTree ORDER BY (metric_name, id)" ) + + # FIXME: The table structure should be: + # "CREATE TABLE mymetrics (metric_family_name String, type LowCardinality(String), unit LowCardinality(String), help String)" + # Renamed it because of the bug and potential type mismatch. node.query( - "CREATE TABLE mymetrics (metric_family_name String, type LowCardinality(String), unit LowCardinality(String), help String) " + "CREATE TABLE mymetrics (metric_family_name String, type String, unit String, help String) " "ENGINE=ReplacingMergeTree ORDER BY metric_family_name" ) node.query( From 85427030aaac223559c0ce6ca06dfa01d4345c21 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 7 Aug 2024 10:01:41 +0200 Subject: [PATCH 1885/2361] tests: fix 03002_part_log_rmt_fetch_merge_error flakiness CI found [1]: --- /usr/share/clickhouse-test/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.reference 2024-08-07 05:09:42.058643403 +0200 +++ /tmp/clickhouse-test/0_stateless/03002_part_log_rmt_fetch_merge_error.stdout 2024-08-07 05:54:45.514083455 +0200 @@ -6,5 +6,7 @@ after rmt_master NewPart 0 1 rmt_master MergeParts 0 1 +rmt_master RemovePart 0 1 rmt_slave MergeParts 1 0 rmt_slave DownloadPart 0 2 +rmt_slave RemovePart 0 1 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 1.0 --prefer_fetch_merged_part_size_threshold 3517855074 --vertical_merge_algorithm_min_rows_to_activate 1000000 --vertical_merge_algorithm_min_columns_to_activate 100 --allow_vertical_merges_from_compact_to_wide_parts 0 --min_merge_bytes_to_use_direct_io 10737418240 --index_granularity_bytes 7659983 --merge_max_block_size 17667 --index_granularity 48465 --min_bytes_for_wide_part 1073741824 --marks_compress_block_size 58048 --primary_key_compress_block_size 18342 --replace_long_file_name_to_hash 0 --max_file_name_length 36 --min_bytes_for_full_part_storage 536870912 --compact_parts_max_bytes_to_buffer 148846831 --compact_parts_max_granules_to_buffer 140 --compact_parts_merge_max_bytes_to_prefetch_part 4513530 --cache_populated_by_fetch 1 --concurrent_part_removal_threshold 8 --old_parts_lifetime 10 The reason is old_parts_lifetime=10 [1]: https://s3.amazonaws.com/clickhouse-test-reports/67511/881d57a7644057b586e4cdb95ebb8785d912d4c5/stateless_tests__msan__%5B3_4%5D.html Signed-off-by: Azat Khuzhin --- .../0_stateless/03002_part_log_rmt_fetch_merge_error.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sh b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sh index 25d946b325d..e58c542b8ac 100755 --- a/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sh +++ b/tests/queries/0_stateless/03002_part_log_rmt_fetch_merge_error.sh @@ -21,9 +21,9 @@ $CLICKHOUSE_CLIENT -nm -q " drop table if exists rmt_master; drop table if exists rmt_slave; - create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by key settings always_fetch_merged_part=0; + create table rmt_master (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'master') order by key settings always_fetch_merged_part=0, old_parts_lifetime=600; -- always_fetch_merged_part=1, consider this table as a 'slave' - create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by key settings always_fetch_merged_part=1; + create table rmt_slave (key Int) engine=ReplicatedMergeTree('/clickhouse/{database}', 'slave') order by key settings always_fetch_merged_part=1, old_parts_lifetime=600; insert into rmt_master values (1); From 1dece979fe317a04f98d2b8008619c47fb72edb1 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Wed, 7 Aug 2024 23:10:40 +0200 Subject: [PATCH 1886/2361] CI: pass job timout into tests --- tests/ci/ci.py | 1 + tests/ci/ci_definitions.py | 2 +- tests/ci/functional_test_check.py | 3 +++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 8d0414ce7a8..49b597333dc 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -985,6 +985,7 @@ def _run_test(job_name: str, run_command: str) -> int: else: print("Use run command from the workflow") env["CHECK_NAME"] = job_name + env["MAX_RUN_TIME"] = str(timeout or 0) print(f"Going to start run command [{run_command}]") stopwatch = Stopwatch() job_log = Path(TEMP_PATH) / "job_log.txt" diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 48847b0d7a6..592cb2f4879 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -420,7 +420,7 @@ class CommonJobConfigs: ), run_command='functional_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, - timeout=9000, + timeout=1000, # test ) STATEFUL_TEST = JobConfig( job_name_keyword="stateful", diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 52970404d2d..3aff97643c3 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -114,6 +114,9 @@ def get_run_command( if flaky_check: envs.append("-e NUM_TRIES=50") envs.append("-e MAX_RUN_TIME=2800") + else: + max_run_time = os.getenv("MAX_RUN_TIME", 0) + envs.append(f"-e MAX_RUN_TIME={max_run_time}") envs += [f"-e {e}" for e in additional_envs] From 086b3d240dd696c483f136d79db4587a83bb0a14 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Wed, 7 Aug 2024 23:34:36 +0200 Subject: [PATCH 1887/2361] CI: push CI --- tests/ci/ci_definitions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 592cb2f4879..b62d2e0aa8e 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -420,7 +420,7 @@ class CommonJobConfigs: ), run_command='functional_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, - timeout=1000, # test + timeout=1001, # test ) STATEFUL_TEST = JobConfig( job_name_keyword="stateful", From c6c0a44b93c382b384eb3ef83cf9da5102629de8 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Wed, 7 Aug 2024 23:57:19 +0200 Subject: [PATCH 1888/2361] fix flaky tests --- .../test_storage_azure_blob_storage/test.py | 2 +- tests/integration/test_storage_hdfs/test.py | 51 +++++++------------ 2 files changed, 19 insertions(+), 34 deletions(-) diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index 092c124855c..fbdc7f29f98 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -1272,7 +1272,7 @@ def test_filtering_by_file_or_path(cluster): node.query("SYSTEM FLUSH LOGS") result = node.query( - f"SELECT ProfileEvents['EngineFileLikeReadFiles'] FROM system.query_log WHERE query ilike '%select%azure%test_filter%' AND type='QueryFinish'" + f"SELECT ProfileEvents['EngineFileLikeReadFiles'] FROM system.query_log WHERE query ilike '%select%azure%test_filter%' AND type='QueryFinish' ORDER BY event_time_microseconds DESC LIMIT 1" ) assert int(result) == 1 diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 77921b885b0..c52e99b800e 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -41,7 +41,6 @@ def test_read_write_storage(started_cluster): node1.query("insert into SimpleHDFSStorage values (1, 'Mark', 72.53)") assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n" assert node1.query("select * from SimpleHDFSStorage") == "1\tMark\t72.53\n" - node1.query("drop table if exists SimpleHDFSStorage") def test_read_write_storage_with_globs(started_cluster): @@ -95,11 +94,6 @@ def test_read_write_storage_with_globs(started_cluster): print(ex) assert "in readonly mode" in str(ex) - node1.query("DROP TABLE HDFSStorageWithRange") - node1.query("DROP TABLE HDFSStorageWithEnum") - node1.query("DROP TABLE HDFSStorageWithQuestionMark") - node1.query("DROP TABLE HDFSStorageWithAsterisk") - def test_storage_with_multidirectory_glob(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -341,7 +335,6 @@ def test_virtual_columns(started_cluster): ) == expected ) - node1.query("DROP TABLE virtual_cols") def test_read_files_with_spaces(started_cluster): @@ -363,7 +356,6 @@ def test_read_files_with_spaces(started_cluster): ) assert node1.query("select * from test order by id") == "1\n2\n3\n" fs.delete(dir, recursive=True) - node1.query("DROP TABLE test") def test_truncate_table(started_cluster): @@ -435,7 +427,7 @@ def test_seekable_formats(started_cluster): f"hdfs('hdfs://hdfs1:9000/parquet', 'Parquet', 'a Int32, b String')" ) node1.query( - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)" ) result = node1.query(f"SELECT count() FROM {table_function}") @@ -443,7 +435,7 @@ def test_seekable_formats(started_cluster): table_function = f"hdfs('hdfs://hdfs1:9000/orc', 'ORC', 'a Int32, b String')" node1.query( - f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)" ) result = node1.query(f"SELECT count() FROM {table_function}") assert int(result) == 5000000 @@ -467,7 +459,7 @@ def test_read_table_with_default(started_cluster): def test_schema_inference(started_cluster): node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1" + f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000)" ) result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/native', 'Native')") @@ -520,7 +512,6 @@ def test_hdfs_directory_not_exist(started_cluster): assert "" == node1.query( "select * from HDFSStorageWithNotExistDir settings hdfs_ignore_file_doesnt_exist=1" ) - node1.query("DROP TABLE HDFSStorageWithNotExistDir") def test_overwrite(started_cluster): @@ -540,7 +531,6 @@ def test_overwrite(started_cluster): result = node1.query(f"select count() from test_overwrite") assert int(result) == 10 - node1.query(f"DROP TABLE test_overwrite") def test_multiple_inserts(started_cluster): @@ -577,7 +567,6 @@ def test_multiple_inserts(started_cluster): result = node1.query(f"select count() from test_multiple_inserts") assert int(result) == 60 - node1.query(f"DROP TABLE test_multiple_inserts") def test_format_detection(started_cluster): @@ -591,10 +580,10 @@ def test_format_detection(started_cluster): def test_schema_inference_with_globs(started_cluster): node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL SETTINGS hdfs_truncate_on_insert=1" + f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" ) node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0 SETTINGS hdfs_truncate_on_insert=1" + f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0" ) result = node1.query( @@ -608,7 +597,7 @@ def test_schema_inference_with_globs(started_cluster): assert sorted(result.split()) == ["0", "\\N"] node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL SETTINGS hdfs_truncate_on_insert=1" + f"insert into table function hdfs('hdfs://hdfs1:9000/data3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" ) filename = "data{1,3}.jsoncompacteachrow" @@ -620,7 +609,7 @@ def test_schema_inference_with_globs(started_cluster): assert "All attempts to extract table structure from files failed" in result node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/data0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]' SETTINGS hdfs_truncate_on_insert=1" + f"insert into table function hdfs('hdfs://hdfs1:9000/data0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]'" ) result = node1.query_and_get_error( @@ -632,7 +621,7 @@ def test_schema_inference_with_globs(started_cluster): def test_insert_select_schema_inference(started_cluster): node1.query( - f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x SETTINGS hdfs_truncate_on_insert=1" + f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x" ) result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/test.native.zst')") @@ -675,9 +664,7 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_2', 'Parquet', 'a Int32, b String')" ) - node1.query( - f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1" - ) + node1.query(f"insert into table function {table_function} SELECT 1, 'kek'") result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "parquet_2" @@ -685,9 +672,7 @@ def test_virtual_columns_2(started_cluster): table_function = ( f"hdfs('hdfs://hdfs1:9000/parquet_3', 'Parquet', 'a Int32, _path String')" ) - node1.query( - f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1" - ) + node1.query(f"insert into table function {table_function} SELECT 1, 'kek'") result = node1.query(f"SELECT _path FROM {table_function}") assert result.strip() == "kek" @@ -984,11 +969,11 @@ def test_read_subcolumns(started_cluster): node = started_cluster.instances["node1"] node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS hdfs_truncate_on_insert=1" + f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)" ) node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS hdfs_truncate_on_insert=1" + f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)" ) res = node.query( @@ -1034,11 +1019,11 @@ def test_union_schema_inference_mode(started_cluster): node = started_cluster.instances["node1"] node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference1.jsonl') select 1 as a SETTINGS hdfs_truncate_on_insert=1" + "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference1.jsonl') select 1 as a" ) node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') select 2 as b SETTINGS hdfs_truncate_on_insert=1" + "insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') select 2 as b" ) node.query("system drop schema cache for hdfs") @@ -1070,7 +1055,7 @@ def test_union_schema_inference_mode(started_cluster): ) assert result == "a\tNullable(Int64)\n" "b\tNullable(Int64)\n" node.query( - f"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference3.jsonl', TSV) select 'Error' SETTINGS hdfs_truncate_on_insert=1" + f"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference3.jsonl', TSV) select 'Error'" ) error = node.query_and_get_error( @@ -1083,11 +1068,11 @@ def test_format_detection(started_cluster): node = started_cluster.instances["node1"] node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection0', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(0) SETTINGS hdfs_truncate_on_insert=1" + "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection0', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(0)" ) node.query( - "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(10) SETTINGS hdfs_truncate_on_insert=1" + "insert into function hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(10)" ) expected_desc_result = node.query( @@ -1151,7 +1136,7 @@ def test_write_to_globbed_partitioned_path(started_cluster): node = started_cluster.instances["node1"] error = node.query_and_get_error( - "insert into function hdfs('hdfs://hdfs1:9000/test_data_*_{_partition_id}.csv') partition by 42 select 42 SETTINGS hdfs_truncate_on_insert=1" + "insert into function hdfs('hdfs://hdfs1:9000/test_data_*_{_partition_id}.csv') partition by 42 select 42" ) assert "DATABASE_ACCESS_DENIED" in error From 55ad7d30946d609159fe5ae9156f02f5b160585a Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Thu, 8 Aug 2024 00:08:12 +0200 Subject: [PATCH 1889/2361] Fix stylelint --- tests/ci/functional_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 3aff97643c3..b7391eff01b 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -115,7 +115,7 @@ def get_run_command( envs.append("-e NUM_TRIES=50") envs.append("-e MAX_RUN_TIME=2800") else: - max_run_time = os.getenv("MAX_RUN_TIME", 0) + max_run_time = os.getenv("MAX_RUN_TIME", "0") envs.append(f"-e MAX_RUN_TIME={max_run_time}") envs += [f"-e {e}" for e in additional_envs] From ff8ce505d752eff1c867d73b47e39a03f0f13622 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 8 Aug 2024 00:20:16 +0200 Subject: [PATCH 1890/2361] Revert "Bump rocksdb from v8.10 to v9.4 + enable jemalloc and liburing" --- contrib/CMakeLists.txt | 2 +- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 44 ++++++++++------------------ 3 files changed, 18 insertions(+), 30 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index eb3afe0ccdf..977efda15ff 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -71,6 +71,7 @@ add_contrib (zlib-ng-cmake zlib-ng) add_contrib (bzip2-cmake bzip2) add_contrib (minizip-ng-cmake minizip-ng) add_contrib (snappy-cmake snappy) +add_contrib (rocksdb-cmake rocksdb) add_contrib (thrift-cmake thrift) # parquet/arrow/orc add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion @@ -147,7 +148,6 @@ add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arro add_contrib (cppkafka-cmake cppkafka) add_contrib (libpqxx-cmake libpqxx) add_contrib (libpq-cmake libpq) -add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing add_contrib (nuraft-cmake NuRaft) add_contrib (fast_float-cmake fast_float) add_contrib (idna-cmake idna) diff --git a/contrib/rocksdb b/contrib/rocksdb index 5f003e4a22d..49ce8a1064d 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24 +Subproject commit 49ce8a1064dd1ad89117899839bf136365e49e79 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 7e5e9a28d0f..57c056532c6 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -5,38 +5,36 @@ if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL return() endif() +# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default +set (CMAKE_CXX_STANDARD 20) + +# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs +option(WITH_JEMALLOC "build with JeMalloc" OFF) + +option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING + # ClickHouse cannot be compiled without snappy, lz4, zlib, zstd option(WITH_SNAPPY "build with SNAPPY" ON) option(WITH_LZ4 "build with lz4" ON) option(WITH_ZLIB "build with zlib" ON) option(WITH_ZSTD "build with zstd" ON) -if (ENABLE_JEMALLOC) - add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE) - list (APPEND THIRDPARTY_LIBS ch_contrib::jemalloc) -endif () - -if (ENABLE_LIBURING) - add_definitions(-DROCKSDB_IOURING_PRESENT) - list (APPEND THIRDPARTY_LIBS ch_contrib::liburing) -endif () - -if (WITH_SNAPPY) +if(WITH_SNAPPY) add_definitions(-DSNAPPY) list(APPEND THIRDPARTY_LIBS ch_contrib::snappy) endif() -if (WITH_ZLIB) +if(WITH_ZLIB) add_definitions(-DZLIB) list(APPEND THIRDPARTY_LIBS ch_contrib::zlib) endif() -if (WITH_LZ4) +if(WITH_LZ4) add_definitions(-DLZ4) list(APPEND THIRDPARTY_LIBS ch_contrib::lz4) endif() -if (WITH_ZSTD) +if(WITH_ZSTD) add_definitions(-DZSTD) list(APPEND THIRDPARTY_LIBS ch_contrib::zstd) endif() @@ -90,7 +88,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc ${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc ${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc - ${ROCKSDB_SOURCE_DIR}/db/attribute_group_iterator_impl.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc @@ -107,7 +104,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc ${ROCKSDB_SOURCE_DIR}/db/builder.cc ${ROCKSDB_SOURCE_DIR}/db/c.cc - ${ROCKSDB_SOURCE_DIR}/db/coalescing_iterator.cc ${ROCKSDB_SOURCE_DIR}/db/column_family.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc @@ -128,7 +124,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc - ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc @@ -186,7 +181,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc ${ROCKSDB_SOURCE_DIR}/env/file_system.cc ${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc - ${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc ${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc ${ROCKSDB_SOURCE_DIR}/env/mock_env.cc ${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc @@ -374,7 +368,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc - ${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_for_tiering_collector.cc ${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc ${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc ${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc @@ -395,7 +388,6 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc - ${ROCKSDB_SOURCE_DIR}/utilities/types_util.cc ${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc ${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc ${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc @@ -426,18 +418,14 @@ if(HAS_ARMV8_CRC) endif(HAS_ARMV8_CRC) list(APPEND SOURCES - ${ROCKSDB_SOURCE_DIR}/port/port_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/env_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc - ${ROCKSDB_SOURCE_DIR}/env/io_posix.cc) + "${ROCKSDB_SOURCE_DIR}/port/port_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/env_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc" + "${ROCKSDB_SOURCE_DIR}/env/io_posix.cc") add_library(_rocksdb ${SOURCES}) add_library(ch_contrib::rocksdb ALIAS _rocksdb) target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) -# Not in the native build system but useful anyways: -# Make all functions in xxHash.h inline. Beneficial for performance: https://github.com/Cyan4973/xxHash/tree/v0.8.2#build-modifiers -target_compile_definitions (_rocksdb PRIVATE XXH_INLINE_ALL) - # SYSTEM is required to overcome some issues target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include") From c19ee360d1a4cf0bc7607923505ba1e2a3848132 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Thu, 8 Aug 2024 00:44:42 +0200 Subject: [PATCH 1891/2361] Update StorageObjectStorageSource.cpp --- src/Storages/ObjectStorage/StorageObjectStorageSource.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 810bad4788b..d8e26977e75 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -208,7 +208,6 @@ Chunk StorageObjectStorageSource::generate() .filename = &filename, .last_modified = object_info->metadata->last_modified, .etag = &(object_info->metadata->etag) - .last_modified = object_info->metadata->last_modified, }, getContext(), read_from_format_info.columns_description); const auto & partition_columns = configuration->getPartitionColumns(); From 8426e0d5e5d7f102fd57a45c82ae6acccda65369 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Wed, 7 Aug 2024 16:44:10 +0800 Subject: [PATCH 1892/2361] fix crash --- src/Interpreters/HashJoin/HashJoinMethodsImpl.h | 3 +-- src/Interpreters/HashJoin/JoinFeatures.h | 2 +- .../03006_join_on_inequal_expression_fast.reference | 1 - 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index 2bf5f6aef4a..5fefe53d145 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -361,7 +361,6 @@ size_t HashJoinMethods::joinRightColumns( } bool right_row_found = false; - KnownRowsHolder known_rows; for (size_t onexpr_idx = 0; onexpr_idx < added_columns.join_on_keys.size(); ++onexpr_idx) { @@ -693,7 +692,7 @@ size_t HashJoinMethods::joinRightColumnsWithAddti any_matched = true; if constexpr (join_features.is_semi_join || join_features.is_any_join) { - auto used_once = used_flags.template setUsedOnce( + auto used_once = used_flags.template setUsedOnce( selected_right_row_it->block, selected_right_row_it->row_num, 0); if (used_once) { diff --git a/src/Interpreters/HashJoin/JoinFeatures.h b/src/Interpreters/HashJoin/JoinFeatures.h index a530179f0b4..b8de606c51e 100644 --- a/src/Interpreters/HashJoin/JoinFeatures.h +++ b/src/Interpreters/HashJoin/JoinFeatures.h @@ -22,7 +22,7 @@ struct JoinFeatures static constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left)); static constexpr bool add_missing = (left || full) && !is_semi_join; - static constexpr bool need_flags = MapGetter, HashJoin::MapsOne>>::flagged; + static constexpr bool need_flags = MapGetter, HashJoin::MapsAll>>::flagged; static constexpr bool is_maps_all = std::is_same_v, HashJoin::MapsAll>; }; diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference index 11ac01d24d5..aa8d4103db2 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference @@ -1,4 +1,3 @@ -03006_join_on_inequal_expression_fast.sql -- { echoOn } SET join_algorithm='hash'; SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); From daf62e16824bb3af1137dba181a72ada11b367ad Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Tue, 6 Aug 2024 18:07:42 +0800 Subject: [PATCH 1893/2361] update --- .../window-functions/percent_rank.md | 6 ++-- src/Planner/PlannerActionsVisitor.cpp | 36 ++++++------------- src/Planner/PlannerActionsVisitor.h | 9 ++--- src/Planner/PlannerWindowFunctions.cpp | 14 ++++---- src/Planner/Utils.cpp | 19 ++++++++++ src/Planner/Utils.h | 7 ++++ 6 files changed, 48 insertions(+), 43 deletions(-) diff --git a/docs/en/sql-reference/window-functions/percent_rank.md b/docs/en/sql-reference/window-functions/percent_rank.md index 4b260f667b9..2e348f2a333 100644 --- a/docs/en/sql-reference/window-functions/percent_rank.md +++ b/docs/en/sql-reference/window-functions/percent_rank.md @@ -15,12 +15,12 @@ Alias: `percentRank` (case-sensitive) ```sql percent_rank (column_name) OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column] - [RANGE RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING]] | [window_name]) + [RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING]] | [window_name]) FROM table_name -WINDOW window_name as ([PARTITION BY grouping_column] [ORDER BY sorting_column] RANGE RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +WINDOW window_name as ([PARTITION BY grouping_column] [ORDER BY sorting_column] RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) ``` -The default and required window frame definition is `RANGE RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING`. +The default and required window frame definition is `RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING`. For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax). diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index 288669e7050..43177fc73c0 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -237,9 +237,8 @@ public: if (function_node.isWindowFunction()) { - auto get_window_frame = [&]() { return extractWindowFrame(function_node); }; buffer << " OVER ("; - buffer << calculateWindowNodeActionName(function_node.getWindowNode(), get_window_frame); + buffer << calculateWindowNodeActionName(node, function_node.getWindowNode()); buffer << ')'; } @@ -300,21 +299,22 @@ public: return calculateConstantActionNodeName(constant_literal, applyVisitor(FieldToDataType(), constant_literal)); } - String calculateWindowNodeActionName(const QueryTreeNodePtr & node, std::function()> get_window_frame) + String calculateWindowNodeActionName(const QueryTreeNodePtr & function_nodew_node_, const QueryTreeNodePtr & window_node_) { - auto & window_node = node->as(); + const auto & function_node = function_nodew_node_->as(); + const auto & window_node = window_node_->as(); WriteBufferFromOwnString buffer; if (window_node.hasPartitionBy()) { buffer << "PARTITION BY "; - auto & partition_by_nodes = window_node.getPartitionBy().getNodes(); + const auto & partition_by_nodes = window_node.getPartitionBy().getNodes(); size_t partition_by_nodes_size = partition_by_nodes.size(); for (size_t i = 0; i < partition_by_nodes_size; ++i) { - auto & partition_by_node = partition_by_nodes[i]; + const auto & partition_by_node = partition_by_nodes[i]; buffer << calculateActionNodeName(partition_by_node); if (i + 1 != partition_by_nodes_size) buffer << ", "; @@ -328,7 +328,7 @@ public: buffer << "ORDER BY "; - auto & order_by_nodes = window_node.getOrderBy().getNodes(); + const auto & order_by_nodes = window_node.getOrderBy().getNodes(); size_t order_by_nodes_size = order_by_nodes.size(); for (size_t i = 0; i < order_by_nodes_size; ++i) @@ -366,7 +366,7 @@ public: } } - auto window_frame_opt = get_window_frame(); + auto window_frame_opt = extractWindowFrame(function_node); if (window_frame_opt) { auto & window_frame = *window_frame_opt; @@ -1028,27 +1028,11 @@ String calculateConstantActionNodeName(const Field & constant_literal) return ActionNodeNameHelper::calculateConstantActionNodeName(constant_literal); } -std::optional extractWindowFrame(const FunctionNode & node) -{ - if (!node.isWindowFunction()) - return {}; - auto & window_node = node.getWindowNode()->as(); - const auto & window_frame = window_node.getWindowFrame(); - if (!window_frame.is_default) - return window_frame; - auto aggregate_function = node.getAggregateFunction(); - if (const auto * win_func = dynamic_cast(aggregate_function.get())) - { - return win_func->getDefaultFrame(); - } - return {}; -} - -String calculateWindowNodeActionName(const QueryTreeNodePtr & node, const PlannerContext & planner_context, std::function()> get_window_frame, bool use_column_identifier_as_action_node_name) +String calculateWindowNodeActionName(const QueryTreeNodePtr & function_node, const QueryTreeNodePtr & window_node, const PlannerContext & planner_context, bool use_column_identifier_as_action_node_name) { QueryTreeNodeToName empty_map; ActionNodeNameHelper helper(empty_map, planner_context, use_column_identifier_as_action_node_name); - return helper.calculateWindowNodeActionName(node, get_window_frame); + return helper.calculateWindowNodeActionName(function_node, window_node); } } diff --git a/src/Planner/PlannerActionsVisitor.h b/src/Planner/PlannerActionsVisitor.h index 17cce39f2a0..4f608ad3f7b 100644 --- a/src/Planner/PlannerActionsVisitor.h +++ b/src/Planner/PlannerActionsVisitor.h @@ -71,18 +71,13 @@ String calculateConstantActionNodeName(const Field & constant_literal, const Dat /// Calculate action node name for constant, data type will be derived from constant literal value String calculateConstantActionNodeName(const Field & constant_literal); -/// If the window frame is not set in sql, try to use the default frame from window function -/// if it have any one. Otherwise return empty. -/// If the window frame is set in sql, use it anyway. -std::optional extractWindowFrame(const FunctionNode & node); - /** Calculate action node name for window node. * Window node action name can only be part of window function action name. * For column node column node identifier from planner context is used, if use_column_identifier_as_action_node_name = true. */ -String calculateWindowNodeActionName(const QueryTreeNodePtr & node, +String calculateWindowNodeActionName(const QueryTreeNodePtr & function_node, + const QueryTreeNodePtr & window_node, const PlannerContext & planner_context, - std::function()> get_window_frame, bool use_column_identifier_as_action_node_name = true); } diff --git a/src/Planner/PlannerWindowFunctions.cpp b/src/Planner/PlannerWindowFunctions.cpp index a69dd95a650..f91cf644cf0 100644 --- a/src/Planner/PlannerWindowFunctions.cpp +++ b/src/Planner/PlannerWindowFunctions.cpp @@ -12,27 +12,27 @@ #include #include +#include namespace DB { namespace ErrorCodes { -extern const int NOT_IMPLEMENTED; + extern const int NOT_IMPLEMENTED; } namespace { -WindowDescription extractWindowDescriptionFromWindowNode(const FunctionNode & func_node, const PlannerContext & planner_context) +WindowDescription extractWindowDescriptionFromWindowNode(const QueryTreeNodePtr & func_node_, const PlannerContext & planner_context) { + const auto & func_node = func_node_->as(); auto node = func_node.getWindowNode(); auto & window_node = node->as(); - auto get_window_frame = [&]() { return extractWindowFrame(func_node); }; - WindowDescription window_description; - window_description.window_name = calculateWindowNodeActionName(node, planner_context, get_window_frame); + window_description.window_name = calculateWindowNodeActionName(func_node_, node, planner_context); for (const auto & partition_by_node : window_node.getPartitionBy().getNodes()) { @@ -49,7 +49,7 @@ WindowDescription extractWindowDescriptionFromWindowNode(const FunctionNode & fu window_description.full_sort_description.end(), window_description.order_by.begin(), window_description.order_by.end()); /// WINDOW frame is validated during query analysis stage - auto window_frame = get_window_frame(); + auto window_frame = extractWindowFrame(func_node); window_description.frame = window_frame ? *window_frame : window_node.getWindowFrame(); auto node_frame = window_node.getWindowFrame(); @@ -82,7 +82,7 @@ extractWindowDescriptions(const QueryTreeNodes & window_function_nodes, const Pl { auto & window_function_node_typed = window_function_node->as(); - auto function_window_description = extractWindowDescriptionFromWindowNode(window_function_node_typed, planner_context); + auto function_window_description = extractWindowDescriptionFromWindowNode(window_function_node, planner_context); auto frame_type = function_window_description.frame.type; if (frame_type != WindowFrame::FrameType::ROWS && frame_type != WindowFrame::FrameType::RANGE) diff --git a/src/Planner/Utils.cpp b/src/Planner/Utils.cpp index a6e94a124e6..822a3e9465e 100644 --- a/src/Planner/Utils.cpp +++ b/src/Planner/Utils.cpp @@ -22,6 +22,8 @@ #include +#include + #include #include #include @@ -34,6 +36,7 @@ #include #include #include +#include #include @@ -507,4 +510,20 @@ void appendSetsFromActionsDAG(const ActionsDAG & dag, UsefulSets & useful_sets) } } +std::optional extractWindowFrame(const FunctionNode & node) +{ + if (!node.isWindowFunction()) + return {}; + auto & window_node = node.getWindowNode()->as(); + const auto & window_frame = window_node.getWindowFrame(); + if (!window_frame.is_default) + return window_frame; + auto aggregate_function = node.getAggregateFunction(); + if (const auto * win_func = dynamic_cast(aggregate_function.get())) + { + return win_func->getDefaultFrame(); + } + return {}; +} + } diff --git a/src/Planner/Utils.h b/src/Planner/Utils.h index ae60976a8d6..254b8f4eae1 100644 --- a/src/Planner/Utils.h +++ b/src/Planner/Utils.h @@ -19,6 +19,8 @@ #include +#include + namespace DB { @@ -91,4 +93,9 @@ ASTPtr parseAdditionalResultFilter(const Settings & settings); using UsefulSets = std::unordered_set; void appendSetsFromActionsDAG(const ActionsDAG & dag, UsefulSets & useful_sets); +/// If the window frame is not set in sql, try to use the default frame from window function +/// if it have any one. Otherwise return empty. +/// If the window frame is set in sql, use it anyway. +std::optional extractWindowFrame(const FunctionNode & node); + } From 42aa967311a55d3da0e1230595b0e0ca9928e777 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 8 Aug 2024 00:38:05 +0000 Subject: [PATCH 1894/2361] add profile events for merges --- src/Common/ProfileEvents.cpp | 25 +++++- .../Merges/AggregatingSortedTransform.h | 10 +++ .../Algorithms/AggregatingSortedAlgorithm.h | 2 + .../FinishAggregatingInOrderAlgorithm.cpp | 3 + .../FinishAggregatingInOrderAlgorithm.h | 5 ++ .../GraphiteRollupSortedAlgorithm.h | 2 + .../Merges/Algorithms/IMergingAlgorithm.h | 11 ++- .../IMergingAlgorithmWithSharedChunks.h | 2 + src/Processors/Merges/Algorithms/MergedData.h | 2 + .../Algorithms/MergingSortedAlgorithm.h | 2 +- .../Algorithms/SummingSortedAlgorithm.h | 2 + .../Merges/CollapsingSortedTransform.h | 10 +++ src/Processors/Merges/IMergingTransform.h | 35 +++++++- .../Merges/MergingSortedTransform.cpp | 26 ++---- .../Merges/MergingSortedTransform.h | 4 - .../Merges/ReplacingSortedTransform.h | 9 ++ .../Merges/SummingSortedTransform.h | 10 +++ .../Merges/VersionedCollapsingTransform.h | 9 ++ .../Transforms/ColumnGathererTransform.cpp | 57 ++++++------- .../Transforms/ColumnGathererTransform.h | 11 ++- .../Transforms/MergeJoinTransform.cpp | 12 ++- .../Transforms/MergeJoinTransform.h | 2 + .../Transforms/MergeSortingTransform.cpp | 2 - .../Transforms/PasteJoinTransform.cpp | 10 +++ .../Transforms/PasteJoinTransform.h | 3 +- .../gtest_blocks_size_merging_streams.cpp | 4 +- src/Storages/MergeTree/MergeList.h | 1 + src/Storages/MergeTree/MergeProgress.h | 27 +++--- src/Storages/MergeTree/MergeTask.cpp | 84 +++++++++++++++---- src/Storages/MergeTree/MergeTask.h | 20 ++++- src/Storages/MergeTree/MutateTask.cpp | 10 ++- 31 files changed, 308 insertions(+), 104 deletions(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index ccdce7ff584..857a08d8a5d 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -210,7 +210,29 @@ M(Merge, "Number of launched background merges.") \ M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \ M(MergedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \ - M(MergesTimeMilliseconds, "Total time spent for background merges.")\ + M(MergeTotalMilliseconds, "Total time spent for background merges") \ + M(MergeExecuteMilliseconds, "Total busy time spent for execution of background merges") \ + M(MergeHorizontalStageTotalMilliseconds, "Total time spent for horizontal stage of background merges") \ + M(MergeHorizontalStageExecuteMilliseconds, "Total busy time spent for execution of horizontal stage of background merges") \ + M(MergeVerticalStageTotalMilliseconds, "Total time spent for vertical stage of background merges") \ + M(MergeVerticalStageExecuteMilliseconds, "Total busy time spent for execution of vertical stage of background merges") \ + M(MergeProjectionStageTotalMilliseconds, "Total time spent for projection stage of background merges") \ + M(MergeProjectionStageExecuteMilliseconds, "Total busy time spent for execution of projection stage of background merges") \ + \ + M(MergingSortedMilliseconds, "Total time spent while merging sorted columns") \ + M(AggregatingSortedMilliseconds, "Total time spent while aggregating sorted columns") \ + M(CollapsingSortedMilliseconds, "Total time spent while collapsing sorted columns") \ + M(ReplacingSortedMilliseconds, "Total time spent while replacing sorted columns") \ + M(SummingSortedMilliseconds, "Total time spent while summing sorted columns") \ + M(VersionedCollapsingSortedMilliseconds, "Total time spent while version collapsing sorted columns") \ + M(GatheringColumnMilliseconds, "Total time spent while gathering columns for vertical merge") \ + \ + M(MutationTotalParts, "Number of total parts for which mutations tried to be applied") \ + M(MutationUntouchedParts, "Number of total parts for which mutations tried to be applied but which was completely skipped according to predicate") \ + M(MutatedRows, "Rows read for mutations. This is the number of rows before mutation") \ + M(MutatedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for mutations. This is the number before mutation.") \ + M(MutationTimeMilliseconds, "Total time spent for mutations.") \ + M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections") \ \ M(MergeTreeDataWriterRows, "Number of rows INSERTed to MergeTree tables.") \ M(MergeTreeDataWriterUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables.") \ @@ -225,7 +247,6 @@ M(MergeTreeDataWriterProjectionsCalculationMicroseconds, "Time spent calculating projections") \ M(MergeTreeDataProjectionWriterSortingBlocksMicroseconds, "Time spent sorting blocks (for projection it might be a key different from table's sorting key)") \ M(MergeTreeDataProjectionWriterMergingBlocksMicroseconds, "Time spent merging blocks") \ - M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections") \ \ M(InsertedWideParts, "Number of parts inserted in Wide format.") \ M(InsertedCompactParts, "Number of parts inserted in Compact format.") \ diff --git a/src/Processors/Merges/AggregatingSortedTransform.h b/src/Processors/Merges/AggregatingSortedTransform.h index c6d7e844c65..c96ad3db525 100644 --- a/src/Processors/Merges/AggregatingSortedTransform.h +++ b/src/Processors/Merges/AggregatingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event AggregatingSortedMilliseconds; +} + namespace DB { @@ -29,6 +34,11 @@ public: } String getName() const override { return "AggregatingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::AggregatingSortedMilliseconds, "Aggregated sorted", getLogger("AggregatingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h index 53c103e7038..908994e1851 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h @@ -30,6 +30,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } + /// Stores information for aggregation of SimpleAggregateFunction columns struct SimpleAggregateDescription { diff --git a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp index 86675bcb237..477566d8a94 100644 --- a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp @@ -126,6 +126,9 @@ IMergingAlgorithm::Status FinishAggregatingInOrderAlgorithm::merge() Chunk FinishAggregatingInOrderAlgorithm::prepareToMerge() { + total_merged_rows += accumulated_rows; + total_merged_bytes += accumulated_bytes; + accumulated_rows = 0; accumulated_bytes = 0; diff --git a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h index cc6578e79be..39171c5a978 100644 --- a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h +++ b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h @@ -50,6 +50,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return {.bytes = accumulated_bytes, .rows = accumulated_rows, .blocks = chunk_num}; } + private: Chunk prepareToMerge(); void addToAggregation(); @@ -92,6 +94,9 @@ private: UInt64 chunk_num = 0; size_t accumulated_rows = 0; size_t accumulated_bytes = 0; + + size_t total_merged_rows = 0; + size_t total_merged_bytes = 0; }; } diff --git a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h index aaa3859efb6..cb2775c968d 100644 --- a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h @@ -33,6 +33,8 @@ public: const char * getName() const override { return "GraphiteRollupSortedAlgorithm"; } Status merge() override; + MergedStats getMergedStats() const override { return merged_data->getMergedStats(); } + struct ColumnsDefinition { size_t path_column_num; diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithm.h b/src/Processors/Merges/Algorithms/IMergingAlgorithm.h index 9a1c7c24270..83f11232b71 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithm.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB { @@ -65,6 +65,15 @@ public: IMergingAlgorithm() = default; virtual ~IMergingAlgorithm() = default; + + struct MergedStats + { + UInt64 bytes = 0; + UInt64 rows = 0; + UInt64 blocks = 0; + }; + + virtual MergedStats getMergedStats() const = 0; }; // TODO: use when compile with clang which could support it diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h index bc1aafe93f7..1725108ac5d 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h @@ -16,6 +16,8 @@ public: void initialize(Inputs inputs) override; void consume(Input & input, size_t source_num) override; + MergedStats getMergedStats() const override { return merged_data->getMergedStats(); } + private: Block header; SortDescription description; diff --git a/src/Processors/Merges/Algorithms/MergedData.h b/src/Processors/Merges/Algorithms/MergedData.h index c5bb074bb0c..8f47f89d8ee 100644 --- a/src/Processors/Merges/Algorithms/MergedData.h +++ b/src/Processors/Merges/Algorithms/MergedData.h @@ -183,6 +183,8 @@ public: UInt64 totalAllocatedBytes() const { return total_allocated_bytes; } UInt64 maxBlockSize() const { return max_block_size; } + IMergingAlgorithm::MergedStats getMergedStats() const { return {.bytes = total_allocated_bytes, .rows = total_merged_rows, .blocks = total_chunks}; } + virtual ~MergedData() = default; protected: diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index bcb111baadf..c889668a38e 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -31,7 +31,7 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; - const MergedData & getMergedData() const { return merged_data; } + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } private: Block header; diff --git a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h index 664b171c4b9..74b4e397831 100644 --- a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h @@ -30,6 +30,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } + struct AggregateDescription; struct MapDescription; diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 4479ac82f66..99fb700abf1 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event CollapsingSortedMilliseconds; +} + namespace DB { @@ -36,6 +41,11 @@ public: } String getName() const override { return "CollapsingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::CollapsingSortedMilliseconds, "Collapsed sorted", getLogger("CollapsingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/IMergingTransform.h b/src/Processors/Merges/IMergingTransform.h index be629271736..fba5b038618 100644 --- a/src/Processors/Merges/IMergingTransform.h +++ b/src/Processors/Merges/IMergingTransform.h @@ -2,7 +2,10 @@ #include #include +#include #include +#include +#include namespace DB { @@ -110,6 +113,8 @@ public: void work() override { + Stopwatch watch; + if (!state.init_chunks.empty()) algorithm.initialize(std::move(state.init_chunks)); @@ -147,6 +152,8 @@ public: // std::cerr << "Finished" << std::endl; state.is_finished = true; } + + merging_elapsed_ns += watch.elapsedNanoseconds(); } protected: @@ -156,7 +163,33 @@ protected: Algorithm algorithm; /// Profile info. - Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; + UInt64 merging_elapsed_ns = 0; + + void logMergedStats(ProfileEvents::Event elapsed_ms_event, std::string_view transform_message, LoggerPtr log) const + { + auto stats = algorithm.getMergedStats(); + + UInt64 elapsed_ms = merging_elapsed_ns / 1000000LL; + ProfileEvents::increment(elapsed_ms_event, elapsed_ms); + + /// Don't print info for small parts (< 1M rows) + if (stats.rows < 1000000) + return; + + double seconds = static_cast(merging_elapsed_ns) / 1000000000ULL; + + if (seconds == 0.0) + { + LOG_DEBUG(log, "{}: {} blocks, {} rows, {} bytes in 0 sec.", + transform_message, stats.blocks, stats.rows, stats.bytes); + } + else + { + LOG_DEBUG(log, "{}: {} blocks, {} rows, {} bytes in {} sec., {} rows/sec., {}/sec.", + transform_message, stats.blocks, stats.rows, stats.bytes, + seconds, stats.rows / seconds, ReadableSize(stats.bytes / seconds)); + } + } private: using IMergingTransformBase::state; diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index 338b1ff7935..d2895a2a2e9 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -1,9 +1,12 @@ #include #include #include - #include -#include + +namespace ProfileEvents +{ + extern const Event MergingSortedMilliseconds; +} namespace DB { @@ -18,7 +21,6 @@ MergingSortedTransform::MergingSortedTransform( UInt64 limit_, bool always_read_till_end_, WriteBuffer * out_row_sources_buf_, - bool quiet_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( @@ -37,7 +39,6 @@ MergingSortedTransform::MergingSortedTransform( limit_, out_row_sources_buf_, use_average_block_sizes) - , quiet(quiet_) { } @@ -48,22 +49,7 @@ void MergingSortedTransform::onNewInput() void MergingSortedTransform::onFinish() { - if (quiet) - return; - - const auto & merged_data = algorithm.getMergedData(); - - auto log = getLogger("MergingSortedTransform"); - - double seconds = total_stopwatch.elapsedSeconds(); - - if (seconds == 0.0) - LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in 0 sec.", merged_data.totalChunks(), merged_data.totalMergedRows()); - else - LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in {} sec., {} rows/sec., {}/sec", - merged_data.totalChunks(), merged_data.totalMergedRows(), seconds, - merged_data.totalMergedRows() / seconds, - ReadableSize(merged_data.totalAllocatedBytes() / seconds)); + logMergedStats(ProfileEvents::MergingSortedMilliseconds, "Merged sorted", getLogger("MergingSortedTransform")); } } diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 2b53939f309..6e52450efa7 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -21,7 +21,6 @@ public: UInt64 limit_ = 0, bool always_read_till_end_ = false, WriteBuffer * out_row_sources_buf_ = nullptr, - bool quiet_ = false, bool use_average_block_sizes = false, bool have_all_inputs_ = true); @@ -30,9 +29,6 @@ public: protected: void onNewInput() override; void onFinish() override; - -private: - bool quiet = false; }; } diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index 2657987f161..dc262aab9ee 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -3,6 +3,10 @@ #include #include +namespace ProfileEvents +{ + extern const Event ReplacingSortedMilliseconds; +} namespace DB { @@ -38,6 +42,11 @@ public: } String getName() const override { return "ReplacingSorted"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::ReplacingSortedMilliseconds, "Replaced sorted", getLogger("ReplacingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/SummingSortedTransform.h b/src/Processors/Merges/SummingSortedTransform.h index 70ddebfea95..d7c20223d7e 100644 --- a/src/Processors/Merges/SummingSortedTransform.h +++ b/src/Processors/Merges/SummingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event SummingSortedMilliseconds; +} + namespace DB { @@ -33,6 +38,11 @@ public: } String getName() const override { return "SummingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::SummingSortedMilliseconds, "Summed sorted", getLogger("SummingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 18244469bd7..32b5d7bf343 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -3,6 +3,10 @@ #include #include +namespace ProfileEvents +{ + extern const Event VersionedCollapsingSortedMilliseconds; +} namespace DB { @@ -33,6 +37,11 @@ public: } String getName() const override { return "VersionedCollapsingTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::VersionedCollapsingSortedMilliseconds, "Versioned collapsed sorted", getLogger("VersionedCollapsingTransform")); + } }; } diff --git a/src/Processors/Transforms/ColumnGathererTransform.cpp b/src/Processors/Transforms/ColumnGathererTransform.cpp index 15f8355bdc7..52fa42fdb51 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.cpp +++ b/src/Processors/Transforms/ColumnGathererTransform.cpp @@ -1,11 +1,15 @@ #include +#include #include #include #include #include #include -#include +namespace ProfileEvents +{ + extern const Event GatheringColumnMilliseconds; +} namespace DB { @@ -33,6 +37,13 @@ ColumnGathererStream::ColumnGathererStream( throw Exception(ErrorCodes::EMPTY_DATA_PASSED, "There are no streams to gather"); } +void ColumnGathererStream::updateStats(const IColumn & column) +{ + merged_rows += column.size(); + merged_bytes += column.allocatedBytes(); + ++merged_blocks; +} + void ColumnGathererStream::initialize(Inputs inputs) { Columns source_columns; @@ -82,7 +93,9 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() { res.addColumn(source_to_fully_copy->column); } - merged_rows += source_to_fully_copy->size; + + updateStats(*source_to_fully_copy->column); + source_to_fully_copy->pos = source_to_fully_copy->size; source_to_fully_copy = nullptr; return Status(std::move(res)); @@ -96,8 +109,7 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() { next_required_source = 0; Chunk res; - merged_rows += sources.front().column->size(); - merged_bytes += sources.front().column->allocatedBytes(); + updateStats(*sources.front().column); res.addColumn(std::move(sources.front().column)); sources.front().pos = sources.front().size = 0; return Status(std::move(res)); @@ -123,8 +135,8 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() if (source_to_fully_copy && result_column->empty()) { Chunk res; - merged_rows += source_to_fully_copy->column->size(); - merged_bytes += source_to_fully_copy->column->allocatedBytes(); + updateStats(*source_to_fully_copy->column); + if (result_column->hasDynamicStructure()) { auto col = result_column->cloneEmpty(); @@ -140,13 +152,13 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() return Status(std::move(res)); } - auto col = result_column->cloneEmpty(); - result_column.swap(col); + auto return_column = result_column->cloneEmpty(); + result_column.swap(return_column); Chunk res; - merged_rows += col->size(); - merged_bytes += col->allocatedBytes(); - res.addColumn(std::move(col)); + updateStats(*return_column); + + res.addColumn(std::move(return_column)); return Status(std::move(res), row_sources_buf.eof() && !source_to_fully_copy); } @@ -185,31 +197,10 @@ ColumnGathererTransform::ColumnGathererTransform( toString(header.columns())); } -void ColumnGathererTransform::work() -{ - Stopwatch stopwatch; - IMergingTransform::work(); - elapsed_ns += stopwatch.elapsedNanoseconds(); -} - void ColumnGathererTransform::onFinish() { - auto merged_rows = algorithm.getMergedRows(); - auto merged_bytes = algorithm.getMergedRows(); - /// Don't print info for small parts (< 10M rows) - if (merged_rows < 10000000) - return; - - double seconds = static_cast(elapsed_ns) / 1000000000ULL; const auto & column_name = getOutputPort().getHeader().getByPosition(0).name; - - if (seconds == 0.0) - LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in 0 sec.", - column_name, static_cast(merged_bytes) / merged_rows); - else - LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in {} sec., {} rows/sec., {}/sec.", - column_name, static_cast(merged_bytes) / merged_rows, seconds, - merged_rows / seconds, ReadableSize(merged_bytes / seconds)); + logMergedStats(ProfileEvents::GatheringColumnMilliseconds, fmt::format("Gathered column {}", column_name), log); } } diff --git a/src/Processors/Transforms/ColumnGathererTransform.h b/src/Processors/Transforms/ColumnGathererTransform.h index ec5691316ce..a535b2669d0 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.h +++ b/src/Processors/Transforms/ColumnGathererTransform.h @@ -2,6 +2,7 @@ #include #include +#include "base/types.h" #include #include @@ -72,10 +73,11 @@ public: template void gather(Column & column_res); - UInt64 getMergedRows() const { return merged_rows; } - UInt64 getMergedBytes() const { return merged_bytes; } + MergedStats getMergedStats() const override { return {.bytes = merged_bytes, .rows = merged_rows, .blocks = merged_blocks}; } private: + void updateStats(const IColumn & column); + /// Cache required fields struct Source { @@ -105,6 +107,7 @@ private: ssize_t next_required_source = -1; UInt64 merged_rows = 0; UInt64 merged_bytes = 0; + UInt64 merged_blocks = 0; }; class ColumnGathererTransform final : public IMergingTransform @@ -120,12 +123,8 @@ public: String getName() const override { return "ColumnGathererTransform"; } - void work() override; - protected: void onFinish() override; - UInt64 elapsed_ns = 0; - LoggerPtr log; }; diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index e96a75d277b..26601207da8 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -511,6 +511,16 @@ void MergeJoinAlgorithm::logElapsed(double seconds) stat.max_blocks_loaded); } +IMergingAlgorithm::MergedStats MergeJoinAlgorithm::getMergedStats() const +{ + return + { + .bytes = 0, + .rows = stat.num_rows[0] + stat.num_rows[1], + .blocks = stat.num_blocks[0] + stat.num_blocks[1], + }; +} + static void prepareChunk(Chunk & chunk) { if (!chunk) @@ -1271,7 +1281,7 @@ MergeJoinTransform::MergeJoinTransform( void MergeJoinTransform::onFinish() { - algorithm.logElapsed(total_stopwatch.elapsedSeconds()); + algorithm.logElapsed(merging_elapsed_ns / 1000000000ULL); } } diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index d37a0b9f3ae..841a3f15a92 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -245,6 +245,8 @@ public: void setAsofInequality(ASOFJoinInequality asof_inequality_); void logElapsed(double seconds); + MergedStats getMergedStats() const override; + private: std::optional handleAnyJoinState(); Status anyJoin(); diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index ede13b29219..c45192e7118 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -185,7 +185,6 @@ void MergeSortingTransform::consume(Chunk chunk) if (!external_merging_sorted) { - bool quiet = false; bool have_all_inputs = false; bool use_average_block_sizes = false; @@ -199,7 +198,6 @@ void MergeSortingTransform::consume(Chunk chunk) limit, /*always_read_till_end_=*/ false, nullptr, - quiet, use_average_block_sizes, have_all_inputs); diff --git a/src/Processors/Transforms/PasteJoinTransform.cpp b/src/Processors/Transforms/PasteJoinTransform.cpp index d2fa7eed256..ad01b721726 100644 --- a/src/Processors/Transforms/PasteJoinTransform.cpp +++ b/src/Processors/Transforms/PasteJoinTransform.cpp @@ -58,6 +58,16 @@ static void prepareChunk(Chunk & chunk) chunk.setColumns(std::move(columns), num_rows); } +IMergingAlgorithm::MergedStats PasteJoinAlgorithm::getMergedStats() const +{ + return + { + .bytes = 0, + .rows = stat.num_rows[0] + stat.num_rows[1], + .blocks = stat.num_blocks[0] + stat.num_blocks[1], + }; +} + void PasteJoinAlgorithm::initialize(Inputs inputs) { if (inputs.size() != 2) diff --git a/src/Processors/Transforms/PasteJoinTransform.h b/src/Processors/Transforms/PasteJoinTransform.h index 6a7e65ee27c..fbe85f6993b 100644 --- a/src/Processors/Transforms/PasteJoinTransform.h +++ b/src/Processors/Transforms/PasteJoinTransform.h @@ -35,8 +35,7 @@ public: void initialize(Inputs inputs) override; void consume(Input & input, size_t source_num) override; Status merge() override; - - void logElapsed(double seconds); + MergedStats getMergedStats() const override; private: Chunk createBlockWithDefaults(size_t source_num); diff --git a/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp b/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp index bc22f249f97..f41a447049c 100644 --- a/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp +++ b/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp @@ -83,7 +83,7 @@ TEST(MergingSortedTest, SimpleBlockSizeTest) EXPECT_EQ(pipe.numOutputPorts(), 3); auto transform = std::make_shared(pipe.getHeader(), pipe.numOutputPorts(), sort_description, - 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, false, true); + 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, true); pipe.addTransform(std::move(transform)); @@ -125,7 +125,7 @@ TEST(MergingSortedTest, MoreInterestingBlockSizes) EXPECT_EQ(pipe.numOutputPorts(), 3); auto transform = std::make_shared(pipe.getHeader(), pipe.numOutputPorts(), sort_description, - 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, false, true); + 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, true); pipe.addTransform(std::move(transform)); diff --git a/src/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h index d40af6abf43..3a96ba0abae 100644 --- a/src/Storages/MergeTree/MergeList.h +++ b/src/Storages/MergeTree/MergeList.h @@ -6,6 +6,7 @@ #include #include #include +#include "base/types.h" #include #include #include diff --git a/src/Storages/MergeTree/MergeProgress.h b/src/Storages/MergeTree/MergeProgress.h index dd4922051b5..8562e81e761 100644 --- a/src/Storages/MergeTree/MergeProgress.h +++ b/src/Storages/MergeTree/MergeProgress.h @@ -8,10 +8,10 @@ namespace ProfileEvents { - extern const Event MergesTimeMilliseconds; extern const Event MergedUncompressedBytes; extern const Event MergedRows; - extern const Event Merge; + extern const Event MutatedRows; + extern const Event MutatedUncompressedBytes; } namespace DB @@ -63,18 +63,17 @@ public: void updateWatch() { UInt64 watch_curr_elapsed = merge_list_element_ptr->watch.elapsed(); - ProfileEvents::increment(ProfileEvents::MergesTimeMilliseconds, (watch_curr_elapsed - watch_prev_elapsed) / 1000000); watch_prev_elapsed = watch_curr_elapsed; } - void operator() (const Progress & value) + void operator()(const Progress & value) { - ProfileEvents::increment(ProfileEvents::MergedUncompressedBytes, value.read_bytes); - if (stage.is_first) - { - ProfileEvents::increment(ProfileEvents::MergedRows, value.read_rows); - ProfileEvents::increment(ProfileEvents::Merge); - } + if (merge_list_element_ptr->is_mutation) + updateProfileEvents(value, ProfileEvents::MutatedRows, ProfileEvents::MutatedUncompressedBytes); + else + updateProfileEvents(value, ProfileEvents::MergedRows, ProfileEvents::MergedUncompressedBytes); + + updateWatch(); merge_list_element_ptr->bytes_read_uncompressed += value.read_bytes; @@ -90,6 +89,14 @@ public: std::memory_order_relaxed); } } + +private: + void updateProfileEvents(const Progress & value, ProfileEvents::Event rows_event, ProfileEvents::Event bytes_event) const + { + ProfileEvents::increment(bytes_event, value.read_bytes); + if (stage.is_first) + ProfileEvents::increment(rows_event, value.read_rows); + } }; } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index ce06adf110c..5f178f08ec3 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -5,9 +5,13 @@ #include #include +#include "Common/ElapsedTimeProfileEventIncrement.h" +#include "Common/Logger.h" +#include "Common/Stopwatch.h" #include #include #include +#include #include #include #include @@ -39,6 +43,16 @@ #include #include +namespace ProfileEvents +{ + extern const Event Merge; + extern const Event MergeTotalMilliseconds; + extern const Event MergeExecuteMilliseconds; + extern const Event MergeHorizontalStageExecuteMilliseconds; + extern const Event MergeVerticalStageExecuteMilliseconds; + extern const Event MergeProjectionStageExecuteMilliseconds; +} + namespace DB { @@ -186,6 +200,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() if (isTTLMergeType(global_ctx->future_part->merge_type) && global_ctx->ttl_merges_blocker->isCancelled()) throw Exception(ErrorCodes::ABORTED, "Cancelled merging parts with TTL"); + ProfileEvents::increment(ProfileEvents::Merge); + LOG_DEBUG(ctx->log, "Merging {} parts: from {} to {} into {} with storage {}", global_ctx->future_part->parts.size(), global_ctx->future_part->parts.front()->name, @@ -446,6 +462,9 @@ void MergeTask::addGatheringColumn(GlobalRuntimeContextPtr global_ctx, const Str MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::getContextForNextStage() { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeHorizontalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + auto new_ctx = std::make_shared(); new_ctx->rows_sources_write_buf = std::move(ctx->rows_sources_write_buf); @@ -463,8 +482,10 @@ MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::g MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNextStage() { - auto new_ctx = std::make_shared(); + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeVerticalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + auto new_ctx = std::make_shared(); new_ctx->need_sync = std::move(ctx->need_sync); ctx.reset(); @@ -474,9 +495,14 @@ MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNe bool MergeTask::ExecuteAndFinalizeHorizontalPart::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -534,7 +560,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const { - /// No need to execute this part if it is horizontal merge. + /// No need to execute this part if it is horizontal merge. if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; @@ -906,12 +932,24 @@ bool MergeTask::MergeProjectionsStage::finalizeProjectionsAndWholeMerge() const return false; } +MergeTask::StageRuntimeContextPtr MergeTask::MergeProjectionsStage::getContextForNextStage() +{ + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeProjectionStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + + return nullptr; +} bool MergeTask::VerticalMergeStage::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -920,9 +958,14 @@ bool MergeTask::VerticalMergeStage::execute() bool MergeTask::MergeProjectionsStage::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -969,12 +1012,22 @@ bool MergeTask::VerticalMergeStage::executeVerticalMergeForAllColumns() const bool MergeTask::execute() { - assert(stages_iterator != stages.end()); - if ((*stages_iterator)->execute()) + chassert(stages_iterator != stages.end()); + const auto & current_stage = *stages_iterator; + + if (current_stage->execute()) return true; - /// Stage is finished, need initialize context for the next stage - auto next_stage_context = (*stages_iterator)->getContextForNextStage(); + /// Stage is finished, need to initialize context for the next stage and update profile events. + + UInt64 current_elapsed_ms = global_ctx->merge_list_element_ptr->watch.elapsedMilliseconds(); + UInt64 stage_elapsed_ms = current_elapsed_ms - global_ctx->prev_elapesed_ms; + global_ctx->prev_elapesed_ms = current_elapsed_ms; + + ProfileEvents::increment(current_stage->getTotalTimeProfileEvent(), stage_elapsed_ms); + ProfileEvents::increment(ProfileEvents::MergeTotalMilliseconds, stage_elapsed_ms); + + auto next_stage_context = current_stage->getContextForNextStage(); /// Move to the next stage in an array of stages ++stages_iterator; @@ -1099,7 +1152,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() /* limit_= */0, /* always_read_till_end_= */false, ctx->rows_sources_write_buf.get(), - true, ctx->blocks_are_granules_size); break; diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 8b0f2130e8e..979c85482e5 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -26,6 +27,12 @@ #include #include +namespace ProfileEvents +{ + extern const Event MergeHorizontalStageTotalMilliseconds; + extern const Event MergeVerticalStageTotalMilliseconds; + extern const Event MergeProjectionStageTotalMilliseconds; +} namespace DB { @@ -134,6 +141,7 @@ private: { virtual void setRuntimeContext(StageRuntimeContextPtr local, StageRuntimeContextPtr global) = 0; virtual StageRuntimeContextPtr getContextForNextStage() = 0; + virtual ProfileEvents::Event getTotalTimeProfileEvent() const = 0; virtual bool execute() = 0; virtual ~IStage() = default; }; @@ -195,6 +203,7 @@ private: bool need_prefix; scope_guard temporary_directory_lock; + UInt64 prev_elapesed_ms{0}; }; using GlobalRuntimeContextPtr = std::shared_ptr; @@ -233,6 +242,7 @@ private: /// Dependencies for next stages std::list::const_iterator it_name_and_type; bool need_sync{false}; + UInt64 elapsed_execute_ns{0}; }; using ExecuteAndFinalizeHorizontalPartRuntimeContextPtr = std::shared_ptr; @@ -256,7 +266,6 @@ private: ExecuteAndFinalizeHorizontalPartSubtasks::const_iterator subtasks_iterator = subtasks.begin(); - MergeAlgorithm chooseMergeAlgorithm() const; void createMergedStream(); void extractMergingAndGatheringColumns() const; @@ -268,6 +277,7 @@ private: } StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeHorizontalStageTotalMilliseconds; } ExecuteAndFinalizeHorizontalPartRuntimeContextPtr ctx; GlobalRuntimeContextPtr global_ctx; @@ -307,6 +317,7 @@ private: QueryPipeline column_parts_pipeline; std::unique_ptr executor; std::unique_ptr rows_sources_read_buf{nullptr}; + UInt64 elapsed_execute_ns{0}; }; using VerticalMergeRuntimeContextPtr = std::shared_ptr; @@ -321,6 +332,7 @@ private: global_ctx = static_pointer_cast(global); } StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeVerticalStageTotalMilliseconds; } bool prepareVerticalMergeForAllColumns() const; bool executeVerticalMergeForAllColumns() const; @@ -361,6 +373,7 @@ private: MergeTasks::iterator projections_iterator; LoggerPtr log{getLogger("MergeTask::MergeProjectionsStage")}; + UInt64 elapsed_execute_ns{0}; }; using MergeProjectionsRuntimeContextPtr = std::shared_ptr; @@ -368,12 +381,15 @@ private: struct MergeProjectionsStage : public IStage { bool execute() override; + void setRuntimeContext(StageRuntimeContextPtr local, StageRuntimeContextPtr global) override { ctx = static_pointer_cast(local); global_ctx = static_pointer_cast(global); } - StageRuntimeContextPtr getContextForNextStage() override { return nullptr; } + + StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeProjectionStageTotalMilliseconds; } bool mergeMinMaxIndexAndPrepareProjections() const; bool executeProjections() const; diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 9a775db73e2..fe78964a241 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -38,7 +38,10 @@ namespace ProfileEvents { -extern const Event MutateTaskProjectionsCalculationMicroseconds; + extern const Event MutationTotalParts; + extern const Event MutationUntouchedParts; + extern const Event MutationTimeMilliseconds; + extern const Event MutateTaskProjectionsCalculationMicroseconds; } namespace CurrentMetrics @@ -2034,6 +2037,9 @@ bool MutateTask::execute() if (task->executeStep()) return true; + auto total_elapsed_ms = (*ctx->mutate_entry)->watch.elapsedMilliseconds(); + ProfileEvents::increment(ProfileEvents::MutationTimeMilliseconds, total_elapsed_ms); + // The `new_data_part` is a shared pointer and must be moved to allow // part deletion in case it is needed in `MutateFromLogEntryTask::finalize`. // @@ -2118,6 +2124,7 @@ bool MutateTask::prepare() throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to mutate {} parts, not one. " "This is a bug.", ctx->future_part->parts.size()); + ProfileEvents::increment(ProfileEvents::MutationTotalParts); ctx->num_mutations = std::make_unique(CurrentMetrics::PartMutation); auto context_for_reading = Context::createCopy(ctx->context); @@ -2174,6 +2181,7 @@ bool MutateTask::prepare() ctx->temporary_directory_lock = std::move(lock); } + ProfileEvents::increment(ProfileEvents::MutationUntouchedParts); promise.set_value(std::move(part)); return false; } From c42725331d2a8bcafad804afc4a9f610630abbc4 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Mon, 5 Aug 2024 21:41:41 +0200 Subject: [PATCH 1895/2361] Fix --- src/Common/FailPoint.cpp | 1 + src/Functions/sleep.h | 8 ++++++++ ...date_tricky_expression_and_replication.reference} | 0 ...umn_update_tricky_expression_and_replication.sql} | 12 ++++++------ 4 files changed, 15 insertions(+), 6 deletions(-) rename tests/queries/0_stateless/{02597_column_update_tricy_expression_and_replication.reference => 02597_column_update_tricky_expression_and_replication.reference} (100%) rename tests/queries/0_stateless/{02597_column_update_tricy_expression_and_replication.sql => 02597_column_update_tricky_expression_and_replication.sql} (74%) diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index f5ec8cf0356..0b1ec552d43 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -60,6 +60,7 @@ static struct InitFiu ONCE(receive_timeout_on_table_status_response) \ REGULAR(keepermap_fail_drop_data) \ REGULAR(lazy_pipe_fds_fail_close) \ + PAUSEABLE(infinite_sleep) \ namespace FailPoints diff --git a/src/Functions/sleep.h b/src/Functions/sleep.h index 62ee19fa904..b6e4b36ee64 100644 --- a/src/Functions/sleep.h +++ b/src/Functions/sleep.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +33,11 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } +namespace FailPoints +{ + extern const char infinite_sleep[]; +} + /** sleep(seconds) - the specified number of seconds sleeps each columns. */ @@ -107,6 +113,8 @@ public: { /// When sleeping, the query cannot be cancelled. For ability to cancel query, we limit sleep time. UInt64 microseconds = static_cast(seconds * 1e6); + FailPointInjection::pauseFailPoint(FailPoints::infinite_sleep); + if (max_microseconds && microseconds > max_microseconds) throw Exception(ErrorCodes::TOO_SLOW, "The maximum sleep time is {} microseconds. Requested: {} microseconds", max_microseconds, microseconds); diff --git a/tests/queries/0_stateless/02597_column_update_tricy_expression_and_replication.reference b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.reference similarity index 100% rename from tests/queries/0_stateless/02597_column_update_tricy_expression_and_replication.reference rename to tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.reference diff --git a/tests/queries/0_stateless/02597_column_update_tricy_expression_and_replication.sql b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sql similarity index 74% rename from tests/queries/0_stateless/02597_column_update_tricy_expression_and_replication.sql rename to tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sql index 34f88b19b7e..12901c1e33d 100644 --- a/tests/queries/0_stateless/02597_column_update_tricy_expression_and_replication.sql +++ b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sql @@ -6,23 +6,23 @@ CREATE TABLE test ( ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/test_table', '1') ORDER BY (c_id, p_id); -INSERT INTO test SELECT '1', '11', '111' FROM numbers(3); +INSERT INTO test SELECT '1', '11', '111' FROM numbers(5); -INSERT INTO test SELECT '2', '22', '22' FROM numbers(3); - -set mutations_sync=0; +SET mutations_sync=0; +SYSTEM ENABLE FAILPOINT infinite_sleep; ALTER TABLE test UPDATE d = d + sleepEachRow(0.3) where 1; ALTER TABLE test ADD COLUMN x UInt32 default 0; ALTER TABLE test UPDATE d = x + 1 where 1; + +SYSTEM DISABLE FAILPOINT infinite_sleep; ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync = 2; --{serverError BAD_ARGUMENTS} ALTER TABLE test UPDATE x = x + 1 where 1 SETTINGS mutations_sync = 2; - ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync = 2; -select * from test format Null; +SELECT * from test format Null; DROP TABLE test; From 5329fba9a25409e427f08ec7dc1e8e6d134e21d8 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Tue, 6 Aug 2024 20:21:31 +0200 Subject: [PATCH 1896/2361] Fix harder --- ...e_tricky_expression_and_replication.python | 61 +++++++++++++++++++ ...pdate_tricky_expression_and_replication.sh | 9 +++ ...date_tricky_expression_and_replication.sql | 28 --------- .../0_stateless/helpers/pure_http_client.py | 7 ++- 4 files changed, 74 insertions(+), 31 deletions(-) create mode 100644 tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python create mode 100755 tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sh delete mode 100644 tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sql diff --git a/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python new file mode 100644 index 00000000000..835cc7bfa51 --- /dev/null +++ b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 + +import os +import sys +from threading import Thread +from queue import Queue + +CURDIR = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) + +from pure_http_client import ClickHouseClient + + +client = ClickHouseClient() + + +client.query("DROP TABLE IF EXISTS test SYNC") +client.query(""" +CREATE TABLE test +( + c_id String, + p_id String, + d UInt32, +) +Engine = ReplicatedMergeTree() +ORDER BY (c_id, p_id) +""") + + +def attempt_mutation(q): + try: + client.query( + "ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync=2", + with_retries=False, + ) + except ValueError as e: + assert "BAD_ARGUMENTS" in str(e) + q.put("OK") + + +client.query("INSERT INTO test SELECT '1', '11', '111' FROM numbers(5)") +client.query("SYSTEM ENABLE FAILPOINT infinite_sleep") +client.query( + "ALTER TABLE test UPDATE d = d + sleepEachRow(0.3) where 1 SETTINGS mutations_sync=0" +) +client.query("ALTER TABLE test ADD COLUMN x UInt32 default 0 SETTINGS mutations_sync=0") +client.query("ALTER TABLE test UPDATE d = x + 1 where 1 SETTINGS mutations_sync=0") + +q = Queue() +t = Thread(target=attempt_mutation, args=(q,)) +t.start() +t.join() +assert not q.empty() +assert q.get() == "OK" + +client.query("SYSTEM DISABLE FAILPOINT infinite_sleep") + +client.query("ALTER TABLE test UPDATE x = x + 1 where 1 SETTINGS mutations_sync=2") +client.query("ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync=2") +client.query("SELECT * from test format Null") +client.query("DROP TABLE test") diff --git a/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sh b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sh new file mode 100755 index 00000000000..5be04d99204 --- /dev/null +++ b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: zookeeper, no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# We should have correct env vars from shell_config.sh to run this test +python3 "$CURDIR"/02597_column_update_tricky_expression_and_replication.python diff --git a/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sql b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sql deleted file mode 100644 index 12901c1e33d..00000000000 --- a/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.sql +++ /dev/null @@ -1,28 +0,0 @@ -CREATE TABLE test ( - `c_id` String, - `p_id` String, - `d` UInt32 -) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/test_table', '1') -ORDER BY (c_id, p_id); - -INSERT INTO test SELECT '1', '11', '111' FROM numbers(5); - -SET mutations_sync=0; -SYSTEM ENABLE FAILPOINT infinite_sleep; - -ALTER TABLE test UPDATE d = d + sleepEachRow(0.3) where 1; - -ALTER TABLE test ADD COLUMN x UInt32 default 0; -ALTER TABLE test UPDATE d = x + 1 where 1; - -SYSTEM DISABLE FAILPOINT infinite_sleep; -ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync = 2; --{serverError BAD_ARGUMENTS} - -ALTER TABLE test UPDATE x = x + 1 where 1 SETTINGS mutations_sync = 2; -ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync = 2; - -SELECT * from test format Null; - -DROP TABLE test; - diff --git a/tests/queries/0_stateless/helpers/pure_http_client.py b/tests/queries/0_stateless/helpers/pure_http_client.py index a31a91e0550..360a1eaa486 100644 --- a/tests/queries/0_stateless/helpers/pure_http_client.py +++ b/tests/queries/0_stateless/helpers/pure_http_client.py @@ -19,9 +19,9 @@ class ClickHouseClient: self.host = host def query( - self, query, connection_timeout=500, settings=dict(), binary_result=False + self, query, connection_timeout=500, settings=dict(), binary_result=False, with_retries=True ): - NUMBER_OF_TRIES = 30 + NUMBER_OF_TRIES = 30 if with_retries else 1 DELAY = 10 params = { @@ -40,7 +40,8 @@ class ClickHouseClient: if r.status_code == 200: return r.content if binary_result else r.text else: - print("ATTENTION: try #%d failed" % i) + if with_retries: + print("ATTENTION: try #%d failed" % i) if i != (NUMBER_OF_TRIES - 1): print(query) print(r.text) From f4e1bde9b6c1b4e4477368408f955efc9bb10df5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 6 Aug 2024 18:37:21 +0000 Subject: [PATCH 1897/2361] Automatic style fix --- ..._column_update_tricky_expression_and_replication.python | 6 ++++-- tests/queries/0_stateless/helpers/pure_http_client.py | 7 ++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python index 835cc7bfa51..8bd5783c011 100644 --- a/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python +++ b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python @@ -15,7 +15,8 @@ client = ClickHouseClient() client.query("DROP TABLE IF EXISTS test SYNC") -client.query(""" +client.query( + """ CREATE TABLE test ( c_id String, @@ -24,7 +25,8 @@ CREATE TABLE test ) Engine = ReplicatedMergeTree() ORDER BY (c_id, p_id) -""") +""" +) def attempt_mutation(q): diff --git a/tests/queries/0_stateless/helpers/pure_http_client.py b/tests/queries/0_stateless/helpers/pure_http_client.py index 360a1eaa486..c3c4109ce5b 100644 --- a/tests/queries/0_stateless/helpers/pure_http_client.py +++ b/tests/queries/0_stateless/helpers/pure_http_client.py @@ -19,7 +19,12 @@ class ClickHouseClient: self.host = host def query( - self, query, connection_timeout=500, settings=dict(), binary_result=False, with_retries=True + self, + query, + connection_timeout=500, + settings=dict(), + binary_result=False, + with_retries=True, ): NUMBER_OF_TRIES = 30 if with_retries else 1 DELAY = 10 From b4cb5dcdd3c4d572e6ad73836569c09a7a69f526 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Wed, 7 Aug 2024 18:46:11 +0200 Subject: [PATCH 1898/2361] Typo --- ...02597_column_update_tricky_expression_and_replication.python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python index 8bd5783c011..eb0cab9d56f 100644 --- a/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python +++ b/tests/queries/0_stateless/02597_column_update_tricky_expression_and_replication.python @@ -23,7 +23,7 @@ CREATE TABLE test p_id String, d UInt32, ) -Engine = ReplicatedMergeTree() +Engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test/test_table', '1') ORDER BY (c_id, p_id) """ ) From 0882b810a753db4ed7643a2fed3f94fea785ae4d Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 8 Aug 2024 01:04:37 +0000 Subject: [PATCH 1899/2361] try to make bigger steps for execution of merges --- src/Storages/MergeTree/MergeTask.cpp | 54 ++++++++++++++-------- src/Storages/MergeTree/MergeTask.h | 1 + src/Storages/MergeTree/MergeTreeSettings.h | 1 + 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 5f178f08ec3..3203120c6eb 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -12,6 +12,7 @@ #include #include #include +#include "base/types.h" #include #include #include @@ -512,11 +513,20 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::execute() bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() { - Block block; - if (!ctx->is_cancelled() && (global_ctx->merging_executor->pull(block))) - { - global_ctx->rows_written += block.rows(); + Stopwatch watch(CLOCK_MONOTONIC_COARSE); + UInt64 step_time_ms = global_ctx->data->getSettings()->merge_preferred_step_execution_time_ms.totalMilliseconds(); + do + { + Block block; + + if (ctx->is_cancelled() || !global_ctx->merging_executor->pull(block)) + { + finalize(); + return false; + } + + global_ctx->rows_written += block.rows(); const_cast(*global_ctx->to).write(block); UInt64 result_rows = 0; @@ -536,11 +546,14 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() global_ctx->space_reservation->update(static_cast((1. - progress) * ctx->initial_reservation)); } + } while (watch.elapsedMilliseconds() < step_time_ms); - /// Need execute again - return true; - } + /// Need execute again + return true; +} +void MergeTask::ExecuteAndFinalizeHorizontalPart::finalize() const +{ global_ctx->merging_executor.reset(); global_ctx->merged_pipeline.reset(); @@ -550,14 +563,10 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() if (ctx->need_remove_expired_values && global_ctx->ttl_merges_blocker->isCancelled()) throw Exception(ErrorCodes::ABORTED, "Cancelled merging parts with expired TTL"); - const auto data_settings = global_ctx->data->getSettings(); const size_t sum_compressed_bytes_upper_bound = global_ctx->merge_list_element_ptr->total_size_bytes_compressed; - ctx->need_sync = needSyncPart(ctx->sum_input_rows_upper_bound, sum_compressed_bytes_upper_bound, *data_settings); - - return false; + ctx->need_sync = needSyncPart(ctx->sum_input_rows_upper_bound, sum_compressed_bytes_upper_bound, *global_ctx->data->getSettings()); } - bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const { /// No need to execute this part if it is horizontal merge. @@ -734,17 +743,24 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const bool MergeTask::VerticalMergeStage::executeVerticalMergeForOneColumn() const { - Block block; - if (!global_ctx->merges_blocker->isCancelled() && !global_ctx->merge_list_element_ptr->is_cancelled.load(std::memory_order_relaxed) - && ctx->executor->pull(block)) + Stopwatch watch(CLOCK_MONOTONIC_COARSE); + UInt64 step_time_ms = global_ctx->data->getSettings()->merge_preferred_step_execution_time_ms.totalMilliseconds(); + + do { + Block block; + + if (global_ctx->merges_blocker->isCancelled() + || global_ctx->merge_list_element_ptr->is_cancelled.load(std::memory_order_relaxed) + || !ctx->executor->pull(block)) + return false; + ctx->column_elems_written += block.rows(); ctx->column_to->write(block); + } while (watch.elapsedMilliseconds() < step_time_ms); - /// Need execute again - return true; - } - return false; + /// Need execute again + return true; } diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 979c85482e5..24917a4cb0e 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -254,6 +254,7 @@ private: bool prepare(); bool executeImpl(); + void finalize() const; /// NOTE: Using pointer-to-member instead of std::function and lambda makes stacktraces much more concise and readable using ExecuteAndFinalizeHorizontalPartSubtasks = std::array; diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 5ba1988cc5d..e56bb15f7b0 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -84,6 +84,7 @@ struct Settings; M(Bool, exclude_deleted_rows_for_part_size_in_merge, false, "Use an estimated source part size (excluding lightweight deleted rows) when selecting parts to merge", 0) \ M(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \ M(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \ + M(Milliseconds, merge_preferred_step_execution_time_ms, 100, "Target time to execetion of one step of merge. Can be exceeded if one step takes longer time", 0) \ \ /** Inserts settings. */ \ M(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ From 142e9528f0f1f7daa68f296181f1c0e5f7b6108b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 03:10:45 +0200 Subject: [PATCH 1900/2361] Add a test for #57420 --- .../03218_materialize_msan.reference | 1 + .../0_stateless/03218_materialize_msan.sql | 21 +++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 tests/queries/0_stateless/03218_materialize_msan.reference create mode 100644 tests/queries/0_stateless/03218_materialize_msan.sql diff --git a/tests/queries/0_stateless/03218_materialize_msan.reference b/tests/queries/0_stateless/03218_materialize_msan.reference new file mode 100644 index 00000000000..eb84f35f9f4 --- /dev/null +++ b/tests/queries/0_stateless/03218_materialize_msan.reference @@ -0,0 +1 @@ +[(NULL,'11\01111111\011111','1111')] -2147483648 \N diff --git a/tests/queries/0_stateless/03218_materialize_msan.sql b/tests/queries/0_stateless/03218_materialize_msan.sql new file mode 100644 index 00000000000..b41300ea1e3 --- /dev/null +++ b/tests/queries/0_stateless/03218_materialize_msan.sql @@ -0,0 +1,21 @@ +SELECT + materialize([(NULL, '11\01111111\011111', '1111')]) AS t, + (t[1048576]).2, + materialize(-2147483648), + (t[-2147483648]).1 +GROUP BY + materialize([(NULL, '1')]), + '', + (materialize((t[1023]).2), (materialize(''), (t[2147483647]).1, materialize(9223372036854775807)), (materialize(''), materialize(NULL, 2147483647, t[65535], 256)), materialize(NULL)) +; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT + materialize([(NULL, '11\01111111\011111', '1111')]) AS t, + (t[1048576]).2, + materialize(-2147483648), + (t[-2147483648]).1 +GROUP BY + materialize([(NULL, '1')]), + '', + (materialize((t[1023]).2), (materialize(''), (t[2147483647]).1, materialize(9223372036854775807)), (materialize(''), materialize(NULL), materialize(2147483647), materialize(t[65535]), materialize(256)), materialize(NULL)) +; From da6378752fc562f7b8df487f86fe2257215eb96a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 04:07:43 +0200 Subject: [PATCH 1901/2361] Add annotations --- tests/queries/0_stateless/01304_direct_io_long.sh | 3 ++- tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01304_direct_io_long.sh b/tests/queries/0_stateless/01304_direct_io_long.sh index 6ab25eebaf7..867c37667fe 100755 --- a/tests/queries/0_stateless/01304_direct_io_long.sh +++ b/tests/queries/0_stateless/01304_direct_io_long.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: long, no-object-storage-with-slow-build +# Tags: long, no-object-storage-with-slow-build, no-flaky-check +# It can be too long with ThreadFuzzer CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 b/tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 index 47940356302..7df77595347 100644 --- a/tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 +++ b/tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 @@ -1,4 +1,5 @@ --- Tags: long +-- Tags: long, no-flaky-check +-- It can be too long with ThreadFuzzer DROP TABLE IF EXISTS left; DROP TABLE IF EXISTS right; From 4fac40a3cb4823f0014a2c5324593b6ef8a6b6ac Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 04:41:22 +0200 Subject: [PATCH 1902/2361] Step back --- programs/client/Client.cpp | 10 ++++++++++ programs/local/LocalServer.cpp | 3 +++ src/Client/ClientBase.h | 3 +++ src/Client/LocalConnection.cpp | 1 + src/Client/LocalConnection.h | 2 ++ src/Client/Suggest.cpp | 4 ---- 6 files changed, 19 insertions(+), 4 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 0f136664de8..1d99d223ee9 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -327,6 +328,7 @@ int Client::main(const std::vector & /*args*/) try { UseSSL use_ssl; + auto & thread_status = MainThreadStatus::getInstance(); setupSignalHandler(); std::cout << std::fixed << std::setprecision(3); @@ -341,6 +343,14 @@ try initTTYBuffer(toProgressOption(config().getString("progress", "default"))); ASTAlterCommand::setFormatAlterCommandsWithParentheses(true); + { + // All that just to set DB::CurrentThread::get().getGlobalContext() + // which is required for client timezone (pushed from server) to work. + auto thread_group = std::make_shared(); + const_cast(thread_group->global_context) = global_context; + thread_status.attachToGroup(thread_group, false); + } + /// Includes delayed_interactive. if (is_interactive) { diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index cb89c6c5510..0d731ed0e14 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -29,8 +29,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -461,6 +463,7 @@ int LocalServer::main(const std::vector & /*args*/) try { UseSSL use_ssl; + thread_status.emplace(); StackTrace::setShowAddresses(server_settings.show_addresses_in_stack_traces); diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 26deb1eda26..1a23b6b1363 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -296,6 +296,9 @@ protected: Settings cmd_settings; MergeTreeSettings cmd_merge_tree_settings; + /// thread status should be destructed before shared context because it relies on process list. + std::optional thread_status; + ServerConnectionPtr connection; ConnectionParameters connection_parameters; diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index b0a5ef99253..072184e0a66 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -128,6 +128,7 @@ void LocalConnection::sendQuery( state->query_id = query_id; state->query = query; + state->query_scope_holder = std::make_unique(query_context); state->stage = QueryProcessingStage::Enum(stage); state->profile_queue = std::make_shared(std::numeric_limits::max()); CurrentThread::attachInternalProfileEventsQueue(state->profile_queue); diff --git a/src/Client/LocalConnection.h b/src/Client/LocalConnection.h index 5cc3d0b30ec..b424c5b5aa3 100644 --- a/src/Client/LocalConnection.h +++ b/src/Client/LocalConnection.h @@ -61,6 +61,8 @@ struct LocalQueryState /// Time after the last check to stop the request and send the progress. Stopwatch after_send_progress; Stopwatch after_send_profile_events; + + std::unique_ptr query_scope_holder; }; diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index c1f163939e8..0188ebc8173 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -96,10 +96,6 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p loading_thread = std::thread([my_context = Context::createCopy(context), connection_parameters, suggestion_limit, this] { ThreadStatus thread_status; - my_context->makeQueryContext(); - auto group = ThreadGroup::createForQuery(my_context); - CurrentThread::attachToGroup(group); - for (size_t retry = 0; retry < 10; ++retry) { try From ca9bd647fbd97fd36de5e30778c824ae522e03c3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 04:52:18 +0200 Subject: [PATCH 1903/2361] Simplification --- src/Client/ClientBase.cpp | 43 +++++++-------------------------------- 1 file changed, 7 insertions(+), 36 deletions(-) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 62a008bc88c..a305278fb4d 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1877,48 +1877,19 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin profile_events.watch.restart(); { - /// Temporarily apply query settings to context. - std::optional old_settings; - SCOPE_EXIT_SAFE({ - if (old_settings) - client_context->setSettings(*old_settings); + /// Temporarily apply query settings to the context. + Settings old_settings = client_context->getSettingsCopy(); + SCOPE_EXIT_SAFE( + { + client_context->setSettings(old_settings); }); - - auto apply_query_settings = [&](const IAST & settings_ast) - { - if (!old_settings) - old_settings.emplace(client_context->getSettingsRef()); - client_context->applySettingsChanges(settings_ast.as()->changes); - client_context->resetSettingsToDefaultValue(settings_ast.as()->default_settings); - }; - - const auto * insert = parsed_query->as(); - if (const auto * select = parsed_query->as(); select && select->settings()) - apply_query_settings(*select->settings()); - else if (const auto * select_with_union = parsed_query->as()) - { - const ASTs & children = select_with_union->list_of_selects->children; - if (!children.empty()) - { - // On the client it is enough to apply settings only for the - // last SELECT, since the only thing that is important to apply - // on the client is format settings. - const auto * last_select = children.back()->as(); - if (last_select && last_select->settings()) - { - apply_query_settings(*last_select->settings()); - } - } - } - else if (const auto * query_with_output = parsed_query->as(); query_with_output && query_with_output->settings_ast) - apply_query_settings(*query_with_output->settings_ast); - else if (insert && insert->settings_ast) - apply_query_settings(*insert->settings_ast); + InterpreterSetQuery::applySettingsFromQuery(parsed_query, client_context); if (!connection->checkConnected(connection_parameters.timeouts)) connect(); ASTPtr input_function; + const auto * insert = parsed_query->as(); if (insert && insert->select) insert->tryFindInputFunction(input_function); From 013680e397dcd74f807c0e90efa9a643f9bae1a1 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 8 Aug 2024 03:00:51 +0000 Subject: [PATCH 1904/2361] empty commit From 301ac5dab7222035cdcc4fa32d061eb9f2294c05 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 9 Jul 2024 17:51:48 +0200 Subject: [PATCH 1905/2361] Fix possible data-race StorageKafka with statistics_interval_ms>0 The problem here is that ignorelist did not work by some reason, if I will look at the ignored functions it should not contain any TSan interseption code, while it does: $ lldb-13 clickhouse (lldb) target create "clickhouse" disas -n rd_avg_rollover Current executable set to '/home/azat/ch/tmp/tsan-test/clickhouse' (x86_64). (lldb) disas -n rd_avg_rollover clickhouse`rd_kafka_stats_emit_avg: clickhouse[0x1cbf84a7] <+39>: leaq 0x30(%r15), %r12 clickhouse[0x1cbf84ab] <+43>: movq %r12, %rdi clickhouse[0x1cbf84ae] <+46>: callq 0x1ccdad40 ; rdk_thread_mutex_lock at tinycthread.c:111 clickhouse[0x1cbf84b3] <+51>: leaq 0x58(%r15), %rdi clickhouse[0x1cbf84b7] <+55>: callq 0x71b5390 ; __tsan_read4 clickhouse[0x1cbf84bc] <+60>: cmpl $0x0, 0x58(%r15) clickhouse[0x1cbf84c1] <+65>: je 0x1cbf8595 ; <+277> [inlined] rd_avg_rollover + 238 at rdavg.h clickhouse[0x1cbf84c7] <+71>: leaq -0xc8(%rbp), %rdi clickhouse[0x1cbf84ce] <+78>: xorl %esi, %esi clickhouse[0x1cbf84d0] <+80>: callq 0x1ccdac80 ; rdk_thread_mutex_init at tinycthread.c:62 clickhouse[0x1cbf84d5] <+85>: leaq 0x5c(%r15), %rdi clickhouse[0x1cbf84d9] <+89>: callq 0x71b5390 ; __tsan_read4 (lldb) disas -n rd_avg_calc clickhouse`rd_kafka_broker_ops_io_serve: clickhouse[0x1cbdf086] <+1990>: leaq 0x5a4(%rbx), %rdi clickhouse[0x1cbdf08d] <+1997>: callq 0x71b5390 ; __tsan_read4 clickhouse[0x1cbdf092] <+2002>: cmpl $0x0, 0x5a4(%rbx) clickhouse[0x1cbdf099] <+2009>: je 0x1cbdf12b ; <+2155> [inlined] rd_kafka_broker_timeout_scan + 719 at rdkafka_broker.c I guess the reason is that they had been inlined So now rd_avg_calc() guarded with a mutex. Refs: https://github.com/ClickHouse/librdkafka/pull/11 Fixes: https://github.com/ClickHouse/ClickHouse/issues/60939 Signed-off-by: Azat Khuzhin --- contrib/librdkafka | 2 +- tests/tsan_ignorelist.txt | 4 +--- tests/ubsan_ignorelist.txt | 1 + 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/contrib/librdkafka b/contrib/librdkafka index 2d2aab6f5b7..39d4ed49ccf 160000 --- a/contrib/librdkafka +++ b/contrib/librdkafka @@ -1 +1 @@ -Subproject commit 2d2aab6f5b79db1cfca15d7bf0dee75d00d82082 +Subproject commit 39d4ed49ccf3406e2bf825d5d7b0903b5a290782 diff --git a/tests/tsan_ignorelist.txt b/tests/tsan_ignorelist.txt index 96bf6e4251f..2a31fc9bc15 100644 --- a/tests/tsan_ignorelist.txt +++ b/tests/tsan_ignorelist.txt @@ -5,11 +5,9 @@ # # Caveats for generic entry "fun": # - does not work for __attribute__((__always_inline__)) +# - and may not work for functions that had been inlined # - requires asterisk at the beginning *and* end for static functions # [thread] # https://github.com/ClickHouse/ClickHouse/issues/55629 fun:rd_kafka_broker_set_nodename -# https://github.com/ClickHouse/ClickHouse/issues/60443 -fun:*rd_avg_calc* -fun:*rd_avg_rollover* diff --git a/tests/ubsan_ignorelist.txt b/tests/ubsan_ignorelist.txt index 57d6598afa6..b75819b3f4b 100644 --- a/tests/ubsan_ignorelist.txt +++ b/tests/ubsan_ignorelist.txt @@ -9,6 +9,7 @@ # # Caveats for generic entry "fun": # - does not work for __attribute__((__always_inline__)) +# - and may not work for functions that had been inlined # - requires asterisk at the beginning *and* end for static functions # [undefined] From d6ecabb41dff64a1fb8ac5a77ffc1a8bed15162b Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Mon, 5 Aug 2024 00:40:23 +0000 Subject: [PATCH 1906/2361] Fix flaky test_storage_s3_queue/test.py::test_multiple_tables_streaming_sync_distributed Disable parallel processing for the Ordered mode for the test_storage_s3_queue/test.py::test_multiple_tables_streaming_sync_distributed test. The reason for this is that the load between the processing nodes is too uneven when s3queue_processing_threads_num != 1, e.g.: ``` $ grep res1 pytest.log 2024-08-07 07:15:58 [ 575 ] DEBUG : res1 size: 13300, res2 size: 1700, total_rows: 15000 (test.py:813, test_multiple_tables_streaming_sync_distributed) ``` In CIs environment, there are rare cases when one of the processors handles all the workload, while the other is busy-waiting, and the test fails on assert: When s3queue_processing_threads_num == 1, the workload is evenly distributed: ``` $ grep res1 pytest.log 2024-08-07 07:26:52 [ 586 ] DEBUG : res1 size: 7200, res2 size: 7800, total_rows: 15000 (test.py:813, test_multiple_tables_streaming_sync_distributed) ``` This change only fixes test flakiness. Further investigation of the Order mode parallelism is required. --- tests/integration/test_storage_s3_queue/test.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 92d6f181464..8f197e09e61 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -771,7 +771,11 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): table_name, mode, files_path, - additional_settings={"keeper_path": keeper_path, "s3queue_buckets": 2}, + additional_settings={ + "keeper_path": keeper_path, + "s3queue_buckets": 2, + **({"s3queue_processing_threads_num": 1} if mode == "ordered" else {}), + }, ) for instance in [node, node_2]: @@ -806,6 +810,10 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): list(map(int, l.split())) for l in run_query(node_2, get_query).splitlines() ] + logging.debug( + f"res1 size: {len(res1)}, res2 size: {len(res2)}, total_rows: {total_rows}" + ) + assert len(res1) + len(res2) == total_rows # Checking that all engines have made progress From bd3674e6e982b66f571248a717628649b1482fe5 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 7 Aug 2024 20:13:50 +0200 Subject: [PATCH 1907/2361] Add restart --- docker/test/stateless/run.sh | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index b33c261dacc..19f2cfca43f 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -54,8 +54,6 @@ source /utils.lib /usr/share/clickhouse-test/config/install.sh ./setup_minio.sh stateless -./mc admin trace clickminio > /test_output/minio.log & -MC_ADMIN_PID=$! ./setup_hdfs_minicluster.sh @@ -176,7 +174,7 @@ done setup_logs_replication attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01 -# create minio log webhooks for both audit and server logs +# create tables for minio log webhooks clickhouse-client --query "CREATE TABLE minio_audit_logs ( log String, @@ -184,7 +182,6 @@ clickhouse-client --query "CREATE TABLE minio_audit_logs ) ENGINE = MergeTree ORDER BY tuple()" -./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" clickhouse-client --query "CREATE TABLE minio_server_logs ( @@ -193,7 +190,36 @@ clickhouse-client --query "CREATE TABLE minio_server_logs ) ENGINE = MergeTree ORDER BY tuple()" + +# create minio log webhooks for both audit and server logs ./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" +./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" +max_retries=100 +retry=1 + +while [ $retry -le $max_retries ]; do + echo "clickminio restart attempt $retry:" + + output=$(mc admin service restart clickminio 2>&1) + echo "$output" + + if echo "$output" | grep -q "Restarted \`clickminio\` successfully in 1 seconds"; then + echo "Restarted clickminio successfully." + break + fi + + sleep 1 + + retry=$((retry + 1)) +done + +if [ $retry -gt $max_retries ]; then + echo "Failed to restart clickminio after $max_retries attempts." +fi + +./mc admin service restart clickminio +./mc admin trace clickminio > /test_output/minio.log & +MC_ADMIN_PID=$! function fn_exists() { declare -F "$1" > /dev/null; From 5c97205742ff12f28cab2b853a9473cbb59edfe3 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 07:23:10 +0000 Subject: [PATCH 1908/2361] Reapply "Bump rocksdb from v8.10 to v9.4 + enable jemalloc and liburing" This reverts commit ff8ce505d752eff1c867d73b47e39a03f0f13622. --- contrib/CMakeLists.txt | 2 +- contrib/rocksdb | 2 +- contrib/rocksdb-cmake/CMakeLists.txt | 44 ++++++++++++++++++---------- 3 files changed, 30 insertions(+), 18 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 977efda15ff..eb3afe0ccdf 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -71,7 +71,6 @@ add_contrib (zlib-ng-cmake zlib-ng) add_contrib (bzip2-cmake bzip2) add_contrib (minizip-ng-cmake minizip-ng) add_contrib (snappy-cmake snappy) -add_contrib (rocksdb-cmake rocksdb) add_contrib (thrift-cmake thrift) # parquet/arrow/orc add_contrib (arrow-cmake arrow) # requires: snappy, thrift, double-conversion @@ -148,6 +147,7 @@ add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift, avro, arro add_contrib (cppkafka-cmake cppkafka) add_contrib (libpqxx-cmake libpqxx) add_contrib (libpq-cmake libpq) +add_contrib (rocksdb-cmake rocksdb) # requires: jemalloc, snappy, zlib, lz4, zstd, liburing add_contrib (nuraft-cmake NuRaft) add_contrib (fast_float-cmake fast_float) add_contrib (idna-cmake idna) diff --git a/contrib/rocksdb b/contrib/rocksdb index 49ce8a1064d..5f003e4a22d 160000 --- a/contrib/rocksdb +++ b/contrib/rocksdb @@ -1 +1 @@ -Subproject commit 49ce8a1064dd1ad89117899839bf136365e49e79 +Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24 diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 57c056532c6..7e5e9a28d0f 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -5,36 +5,38 @@ if (NOT ENABLE_ROCKSDB OR NO_SSE3_OR_HIGHER) # assumes SSE4.2 and PCLMUL return() endif() -# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default -set (CMAKE_CXX_STANDARD 20) - -# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs -option(WITH_JEMALLOC "build with JeMalloc" OFF) - -option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING - # ClickHouse cannot be compiled without snappy, lz4, zlib, zstd option(WITH_SNAPPY "build with SNAPPY" ON) option(WITH_LZ4 "build with lz4" ON) option(WITH_ZLIB "build with zlib" ON) option(WITH_ZSTD "build with zstd" ON) -if(WITH_SNAPPY) +if (ENABLE_JEMALLOC) + add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE) + list (APPEND THIRDPARTY_LIBS ch_contrib::jemalloc) +endif () + +if (ENABLE_LIBURING) + add_definitions(-DROCKSDB_IOURING_PRESENT) + list (APPEND THIRDPARTY_LIBS ch_contrib::liburing) +endif () + +if (WITH_SNAPPY) add_definitions(-DSNAPPY) list(APPEND THIRDPARTY_LIBS ch_contrib::snappy) endif() -if(WITH_ZLIB) +if (WITH_ZLIB) add_definitions(-DZLIB) list(APPEND THIRDPARTY_LIBS ch_contrib::zlib) endif() -if(WITH_LZ4) +if (WITH_LZ4) add_definitions(-DLZ4) list(APPEND THIRDPARTY_LIBS ch_contrib::lz4) endif() -if(WITH_ZSTD) +if (WITH_ZSTD) add_definitions(-DZSTD) list(APPEND THIRDPARTY_LIBS ch_contrib::zstd) endif() @@ -88,6 +90,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc ${ROCKSDB_SOURCE_DIR}/cache/tiered_secondary_cache.cc ${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc + ${ROCKSDB_SOURCE_DIR}/db/attribute_group_iterator_impl.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc ${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc @@ -104,6 +107,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc ${ROCKSDB_SOURCE_DIR}/db/builder.cc ${ROCKSDB_SOURCE_DIR}/db/c.cc + ${ROCKSDB_SOURCE_DIR}/db/coalescing_iterator.cc ${ROCKSDB_SOURCE_DIR}/db/column_family.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc ${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc @@ -124,6 +128,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc + ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_follower.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc ${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc @@ -181,6 +186,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc ${ROCKSDB_SOURCE_DIR}/env/file_system.cc ${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc + ${ROCKSDB_SOURCE_DIR}/env/fs_on_demand.cc ${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc ${ROCKSDB_SOURCE_DIR}/env/mock_env.cc ${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc @@ -368,6 +374,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc ${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc + ${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_for_tiering_collector.cc ${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc ${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc ${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc @@ -388,6 +395,7 @@ set(SOURCES ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc ${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc + ${ROCKSDB_SOURCE_DIR}/utilities/types_util.cc ${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc ${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc ${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc @@ -418,14 +426,18 @@ if(HAS_ARMV8_CRC) endif(HAS_ARMV8_CRC) list(APPEND SOURCES - "${ROCKSDB_SOURCE_DIR}/port/port_posix.cc" - "${ROCKSDB_SOURCE_DIR}/env/env_posix.cc" - "${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc" - "${ROCKSDB_SOURCE_DIR}/env/io_posix.cc") + ${ROCKSDB_SOURCE_DIR}/port/port_posix.cc + ${ROCKSDB_SOURCE_DIR}/env/env_posix.cc + ${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc + ${ROCKSDB_SOURCE_DIR}/env/io_posix.cc) add_library(_rocksdb ${SOURCES}) add_library(ch_contrib::rocksdb ALIAS _rocksdb) target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) +# Not in the native build system but useful anyways: +# Make all functions in xxHash.h inline. Beneficial for performance: https://github.com/Cyan4973/xxHash/tree/v0.8.2#build-modifiers +target_compile_definitions (_rocksdb PRIVATE XXH_INLINE_ALL) + # SYSTEM is required to overcome some issues target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include") From 5a17d93bf3968c9d7b19935ca4270d29b8eebef9 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 07:29:20 +0000 Subject: [PATCH 1909/2361] Fix freebsd build --- contrib/rocksdb-cmake/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 7e5e9a28d0f..44aa7494607 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -11,7 +11,7 @@ option(WITH_LZ4 "build with lz4" ON) option(WITH_ZLIB "build with zlib" ON) option(WITH_ZSTD "build with zstd" ON) -if (ENABLE_JEMALLOC) +if (ENABLE_JEMALLOC AND OS_LINUX) # gives compile errors with jemalloc enabled for rocksdb on non-Linux add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE) list (APPEND THIRDPARTY_LIBS ch_contrib::jemalloc) endif () From 9d30b45dbeddec977e2b5ef1ca8286c35c012129 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 8 Aug 2024 08:43:49 +0100 Subject: [PATCH 1910/2361] Fix --- docker/test/stateless/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 19f2cfca43f..4d86afa4bac 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -200,7 +200,7 @@ retry=1 while [ $retry -le $max_retries ]; do echo "clickminio restart attempt $retry:" - output=$(mc admin service restart clickminio 2>&1) + output=$(./mc admin service restart clickminio 2>&1) echo "$output" if echo "$output" | grep -q "Restarted \`clickminio\` successfully in 1 seconds"; then From 71a761232c36f5ec3c30df7f3c4c3294641e414f Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Thu, 8 Aug 2024 08:45:42 +0100 Subject: [PATCH 1911/2361] empty From 29d701aea7f429b884715e08c9e2f60851156963 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov <32552679+zvonand@users.noreply.github.com> Date: Thu, 8 Aug 2024 10:51:57 +0300 Subject: [PATCH 1912/2361] Forgot to drop table in test --- tests/integration/test_storage_hdfs/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 035fe45797d..aef5ddb3675 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -118,6 +118,7 @@ def test_read_write_storage_with_globs(started_cluster): node1.query("drop table HDFSStorageWithEnum") node1.query("drop table HDFSStorageWithQuestionMark") node1.query("drop table HDFSStorageWithAsterisk") + node1.query("drop table HDFSStorageWithDoubleAsterisk") def test_storage_with_multidirectory_glob(started_cluster): From 59a63cd110d42ac313384481af1c6bf836dfef06 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Thu, 8 Aug 2024 09:48:12 +0200 Subject: [PATCH 1913/2361] CI: Fix for filtering jobs in PRs --- tests/ci/ci_cache.py | 3 ++- tests/ci/test_ci_config.py | 43 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index a59fd3e5a29..9eeda7161ee 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -731,7 +731,8 @@ class CiCache: job_config=reference_config, ): remove_from_workflow.append(job_name) - has_test_jobs_to_skip = True + if job_name != CI.JobNames.DOCS_CHECK: + has_test_jobs_to_skip = True else: required_builds += ( job_config.required_builds if job_config.required_builds else [] diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 6ffedfdecd4..525b3bf367b 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -642,7 +642,7 @@ class TestCIConfig(unittest.TestCase): release_branch=True, ) for record_t_, records_ in ci_cache.records.items(): - if record_t_.value == CiCache.RecordType.FAILED.value: + if record_t_.value == record.record_type.value: records_[record.to_str_key()] = record ci_cache.filter_out_not_affected_jobs() @@ -716,7 +716,7 @@ class TestCIConfig(unittest.TestCase): release_branch=True, ) for record_t_, records_ in ci_cache.records.items(): - if record_t_.value == CiCache.RecordType.FAILED.value: + if record_t_.value == record.record_type.value: records_[record.to_str_key()] = record ci_cache.filter_out_not_affected_jobs() @@ -726,3 +726,42 @@ class TestCIConfig(unittest.TestCase): MOCK_REQUIRED_BUILDS, ) self.assertCountEqual(list(ci_cache.jobs_to_do), expected_to_do) + + def test_ci_py_filters_not_affected_jobs_in_prs_docs_check(self): + """ + checks ci.py filters not affected jobs in PRs, + Docs Check is special from ci_cache perspective - + check it ci pr pipline is filtered properly when only docs check is to be skipped + """ + settings = CiSettings() + settings.no_ci_cache = True + pr_info = PRInfo(github_event=_TEST_EVENT_JSON) + pr_info.event_type = EventType.PULL_REQUEST + pr_info.number = 123 + assert pr_info.is_pr + ci_cache = CIPY._configure_jobs( + S3Helper(), pr_info, settings, skip_jobs=False, dry_run=True + ) + self.assertTrue(not ci_cache.jobs_to_skip, "Must be no jobs in skip list") + assert not ci_cache.jobs_to_wait + assert not ci_cache.jobs_to_skip + + job_config = ci_cache.jobs_to_do[CI.JobNames.DOCS_CHECK] + for batch in range(job_config.num_batches): + # add any record into cache + record = CiCache.Record( + record_type=CiCache.RecordType.PENDING, + job_name=CI.JobNames.DOCS_CHECK, + job_digest=ci_cache.job_digests[CI.JobNames.DOCS_CHECK], + batch=batch, + num_batches=job_config.num_batches, + release_branch=True, + ) + for record_t_, records_ in ci_cache.records.items(): + if record_t_.value == record.record_type.value: + records_[record.to_str_key()] = record + + expected_jobs = list(ci_cache.jobs_to_do) + expected_jobs.remove(CI.JobNames.DOCS_CHECK) + ci_cache.filter_out_not_affected_jobs() + self.assertCountEqual(list(ci_cache.jobs_to_do), expected_jobs) From 6a2ebfc95b29af7b9df5166f04b794a2db8fdff0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 10:07:14 +0200 Subject: [PATCH 1914/2361] Revert "Use `Atomic` database by default in `clickhouse-local`" --- programs/local/LocalServer.cpp | 21 ++++----- src/Databases/DatabaseAtomic.cpp | 24 ++-------- src/Databases/DatabaseAtomic.h | 3 -- src/Databases/DatabaseLazy.cpp | 3 +- src/Databases/DatabaseLazy.h | 2 +- src/Databases/DatabaseOnDisk.cpp | 28 +++-------- src/Databases/DatabaseOnDisk.h | 7 +-- src/Databases/DatabaseOrdinary.cpp | 4 +- src/Databases/DatabasesOverlay.cpp | 47 ------------------- src/Databases/DatabasesOverlay.h | 9 ---- src/Databases/IDatabase.h | 1 - .../MySQL/DatabaseMaterializedMySQL.cpp | 1 - src/Interpreters/StorageID.h | 1 + .../0_stateless/01191_rename_dictionary.sql | 1 - ...ickhouse_local_interactive_table.reference | 4 +- ...2141_clickhouse_local_interactive_table.sh | 4 +- .../03199_atomic_clickhouse_local.reference | 6 --- .../03199_atomic_clickhouse_local.sh | 24 ---------- 18 files changed, 29 insertions(+), 161 deletions(-) delete mode 100644 tests/queries/0_stateless/03199_atomic_clickhouse_local.reference delete mode 100755 tests/queries/0_stateless/03199_atomic_clickhouse_local.sh diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 0d731ed0e14..6b0b8fc5b50 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -51,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -216,12 +216,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str return system_database; } -static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context) +static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) { - auto overlay = std::make_shared(name_, context); - overlay->registerNextDatabase(std::make_shared(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context)); - overlay->registerNextDatabase(std::make_shared(name_, "", context)); - return overlay; + auto databaseCombiner = std::make_shared(name_, context_); + databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); + databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); + return databaseCombiner; } /// If path is specified and not empty, will try to setup server environment and load existing metadata @@ -367,7 +367,7 @@ std::string LocalServer::getInitialCreateTableQuery() else table_structure = "(" + table_structure + ")"; - return fmt::format("CREATE TEMPORARY TABLE {} {} ENGINE = File({}, {});", + return fmt::format("CREATE TABLE {} {} ENGINE = File({}, {});", table_name, table_structure, data_format, table_file); } @@ -761,12 +761,7 @@ void LocalServer::processConfig() DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase(); std::string default_database = server_settings.default_database; - { - DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context); - if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil) - DatabaseCatalog::instance().addUUIDMapping(uuid); - DatabaseCatalog::instance().attachDatabase(default_database, database); - } + DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context)); global_context->setCurrentDatabase(default_database); if (getClientConfiguration().has("path")) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 83b82976e4f..d86e29ca915 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -53,6 +53,9 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, c , db_uuid(uuid) { assert(db_uuid != UUIDHelpers::Nil); + fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); + fs::create_directories(path_to_table_symlinks); + tryCreateMetadataSymlink(); } DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, ContextPtr context_) @@ -60,16 +63,6 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C { } -void DatabaseAtomic::createDirectories() -{ - if (database_atomic_directories_created.test_and_set()) - return; - DatabaseOnDisk::createDirectories(); - fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); - fs::create_directories(path_to_table_symlinks); - tryCreateMetadataSymlink(); -} - String DatabaseAtomic::getTableDataPath(const String & table_name) const { std::lock_guard lock(mutex); @@ -106,7 +99,6 @@ void DatabaseAtomic::drop(ContextPtr) void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, const StoragePtr & table, const String & relative_table_path) { assert(relative_table_path != data_path && !relative_table_path.empty()); - createDirectories(); DetachedTables not_in_use; std::lock_guard lock(mutex); not_in_use = cleanupDetachedTables(); @@ -208,15 +200,11 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_ if (exchange && !supportsAtomicRename()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported"); - createDirectories(); waitDatabaseStarted(); auto & other_db = dynamic_cast(to_database); bool inside_database = this == &other_db; - if (!inside_database) - other_db.createDirectories(); - String old_metadata_path = getObjectMetadataPath(table_name); String new_metadata_path = to_database.getObjectMetadataPath(to_table_name); @@ -337,7 +325,6 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora const String & table_metadata_tmp_path, const String & table_metadata_path, ContextPtr query_context) { - createDirectories(); DetachedTables not_in_use; auto table_data_path = getTableDataPath(query); try @@ -474,9 +461,6 @@ void DatabaseAtomic::beforeLoadingMetadata(ContextMutablePtr /*context*/, Loadin if (mode < LoadingStrictnessLevel::FORCE_RESTORE) return; - if (!fs::exists(path_to_table_symlinks)) - return; - /// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken for (const auto & table_path : fs::directory_iterator(path_to_table_symlinks)) { @@ -604,7 +588,6 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new { /// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard - createDirectories(); waitDatabaseStarted(); bool check_ref_deps = query_context->getSettingsRef().check_referential_table_dependencies; @@ -696,5 +679,4 @@ void registerDatabaseAtomic(DatabaseFactory & factory) }; factory.registerDatabase("Atomic", create_fn); } - } diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index ca24494f600..4a4ccfa2573 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -76,9 +76,6 @@ protected: using DetachedTables = std::unordered_map; [[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex); - std::atomic_flag database_atomic_directories_created = ATOMIC_FLAG_INIT; - void createDirectories(); - void tryCreateMetadataSymlink(); virtual bool allowMoveTableToOtherDatabaseEngine(IDatabase & /*to_database*/) const { return false; } diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index e43adfc5d37..3fb6d30fcb8 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -47,13 +47,12 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, : DatabaseOnDisk(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseLazy (" + name_ + ")", context_) , expiration_time(expiration_time_) { - createDirectories(); } void DatabaseLazy::loadStoredObjects(ContextMutablePtr local_context, LoadingStrictnessLevel /*mode*/) { - iterateMetadataFiles([this, &local_context](const String & file_name) + iterateMetadataFiles(local_context, [this, &local_context](const String & file_name) { const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index aeac130594f..41cfb751141 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -12,7 +12,7 @@ class DatabaseLazyIterator; class Context; /** Lazy engine of databases. - * Works like DatabaseOrdinary, but stores only recently accessed tables in memory. + * Works like DatabaseOrdinary, but stores in memory only the cache. * Can be used only with *Log engines. */ class DatabaseLazy final : public DatabaseOnDisk diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 82a81b0b32d..734f354d9a5 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -172,14 +172,7 @@ DatabaseOnDisk::DatabaseOnDisk( , metadata_path(metadata_path_) , data_path(data_path_) { -} - - -void DatabaseOnDisk::createDirectories() -{ - if (directories_created.test_and_set()) - return; - fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path); + fs::create_directories(local_context->getPath() + data_path); fs::create_directories(metadata_path); } @@ -197,8 +190,6 @@ void DatabaseOnDisk::createTable( const StoragePtr & table, const ASTPtr & query) { - createDirectories(); - const auto & settings = local_context->getSettingsRef(); const auto & create = query->as(); assert(table_name == create.getTable()); @@ -266,6 +257,7 @@ void DatabaseOnDisk::createTable( } commitCreateTable(create, table, table_metadata_tmp_path, table_metadata_path, local_context); + removeDetachedPermanentlyFlag(local_context, table_name, table_metadata_path, false); } @@ -293,8 +285,6 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora { try { - createDirectories(); - /// Add a table to the map of known tables. attachTable(query_context, query.getTable(), table, getTableDataPath(query)); @@ -430,7 +420,6 @@ void DatabaseOnDisk::renameTable( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported"); } - createDirectories(); waitDatabaseStarted(); auto table_data_relative_path = getTableDataPath(table_name); @@ -579,14 +568,14 @@ void DatabaseOnDisk::drop(ContextPtr local_context) assert(TSA_SUPPRESS_WARNING_FOR_READ(tables).empty()); if (local_context->getSettingsRef().force_remove_data_recursively_on_drop) { - (void)fs::remove_all(std::filesystem::path(getContext()->getPath()) / data_path); + (void)fs::remove_all(local_context->getPath() + getDataPath()); (void)fs::remove_all(getMetadataPath()); } else { try { - (void)fs::remove(std::filesystem::path(getContext()->getPath()) / data_path); + (void)fs::remove(local_context->getPath() + getDataPath()); (void)fs::remove(getMetadataPath()); } catch (const fs::filesystem_error & e) @@ -624,18 +613,15 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n } } -void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const +void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const { - if (!fs::exists(metadata_path)) - return; - auto process_tmp_drop_metadata_file = [&](const String & file_name) { assert(getUUID() == UUIDHelpers::Nil); static const char * tmp_drop_ext = ".sql.tmp_drop"; const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext)); - if (fs::exists(std::filesystem::path(getContext()->getPath()) / data_path / object_name)) + if (fs::exists(local_context->getPath() + getDataPath() + '/' + object_name)) { fs::rename(getMetadataPath() + file_name, getMetadataPath() + object_name + ".sql"); LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name)); @@ -652,7 +638,7 @@ void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_meta std::vector> metadata_files; fs::directory_iterator dir_end; - for (fs::directory_iterator dir_it(metadata_path); dir_it != dir_end; ++dir_it) + for (fs::directory_iterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it) { String file_name = dir_it->path().filename(); /// For '.svn', '.gitignore' directory and similar. diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 0c0ecf76a26..12656068643 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -64,7 +64,7 @@ public: time_t getObjectMetadataModificationTime(const String & object_name) const override; String getDataPath() const override { return data_path; } - String getTableDataPath(const String & table_name) const override { return std::filesystem::path(data_path) / escapeForFileName(table_name) / ""; } + String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } @@ -83,7 +83,7 @@ protected: using IteratingFunction = std::function; - void iterateMetadataFiles(const IteratingFunction & process_metadata_file) const; + void iterateMetadataFiles(ContextPtr context, const IteratingFunction & process_metadata_file) const; ASTPtr getCreateTableQueryImpl( const String & table_name, @@ -99,9 +99,6 @@ protected: virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach); virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {} - std::atomic_flag directories_created = ATOMIC_FLAG_INIT; - void createDirectories(); - const String metadata_path; const String data_path; }; diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index dd8a3f42ea8..8808261654f 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -55,7 +55,7 @@ static constexpr size_t METADATA_FILE_BUFFER_SIZE = 32768; static constexpr const char * const CONVERT_TO_REPLICATED_FLAG_NAME = "convert_to_replicated"; DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata_path_, ContextPtr context_) - : DatabaseOrdinary(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseOrdinary (" + name_ + ")", context_) + : DatabaseOrdinary(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseOrdinary (" + name_ + ")", context_) { } @@ -265,7 +265,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables } }; - iterateMetadataFiles(process_metadata); + iterateMetadataFiles(local_context, process_metadata); size_t objects_in_database = metadata.parsed_tables.size() - prev_tables_count; size_t dictionaries_in_database = metadata.total_dictionaries - prev_total_dictionaries; diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 495733e15fd..801356b3dd7 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -14,8 +14,6 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int CANNOT_GET_CREATE_TABLE_QUERY; - extern const int BAD_ARGUMENTS; - extern const int UNKNOWN_TABLE; } DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_) @@ -126,39 +124,6 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab getEngineName()); } -void DatabasesOverlay::renameTable( - ContextPtr current_context, - const String & name, - IDatabase & to_database, - const String & to_name, - bool exchange, - bool dictionary) -{ - for (auto & db : databases) - { - if (db->isTableExist(name, current_context)) - { - if (DatabasesOverlay * to_overlay_database = typeid_cast(&to_database)) - { - /// Renaming from Overlay database inside itself or into another Overlay database. - /// Just use the first database in the overlay as a destination. - if (to_overlay_database->databases.empty()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "The destination Overlay database {} does not have any members", to_database.getDatabaseName()); - - db->renameTable(current_context, name, *to_overlay_database->databases[0], to_name, exchange, dictionary); - } - else - { - /// Renaming into a different type of database. E.g. from Overlay on top of Atomic database into just Atomic database. - db->renameTable(current_context, name, to_database, to_name, exchange, dictionary); - } - - return; - } - } - throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(getDatabaseName()), backQuote(name)); -} - ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const { ASTPtr result = nullptr; @@ -213,18 +178,6 @@ String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const return result; } -UUID DatabasesOverlay::getUUID() const -{ - UUID result = UUIDHelpers::Nil; - for (const auto & db : databases) - { - result = db->getUUID(); - if (result != UUIDHelpers::Nil) - break; - } - return result; -} - UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const { UUID result = UUIDHelpers::Nil; diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h index 40c653e5cb5..b0c7e7e4032 100644 --- a/src/Databases/DatabasesOverlay.h +++ b/src/Databases/DatabasesOverlay.h @@ -35,21 +35,12 @@ public: StoragePtr detachTable(ContextPtr context, const String & table_name) override; - void renameTable( - ContextPtr current_context, - const String & name, - IDatabase & to_database, - const String & to_name, - bool exchange, - bool dictionary) override; - ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override; ASTPtr getCreateDatabaseQuery() const override; String getTableDataPath(const String & table_name) const override; String getTableDataPath(const ASTCreateQuery & query) const override; - UUID getUUID() const override; UUID tryGetTableUUID(const String & table_name) const override; void drop(ContextPtr context) override; diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index 02418abb2b0..f94326d220e 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -416,7 +416,6 @@ public: std::lock_guard lock{mutex}; return database_name; } - /// Get UUID of database. virtual UUID getUUID() const { return UUIDHelpers::Nil; } diff --git a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp index 8b3850c4e0c..2f5477a6b9d 100644 --- a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp @@ -46,7 +46,6 @@ DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( , settings(std::move(settings_)) , materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), binlog_client_, settings.get()) { - createDirectories(); } void DatabaseMaterializedMySQL::rethrowExceptionIfNeeded() const diff --git a/src/Interpreters/StorageID.h b/src/Interpreters/StorageID.h index ad55d16e284..f9afbc7b98d 100644 --- a/src/Interpreters/StorageID.h +++ b/src/Interpreters/StorageID.h @@ -27,6 +27,7 @@ class ASTQueryWithTableAndOutput; class ASTTableIdentifier; class Context; +// TODO(ilezhankin): refactor and merge |ASTTableIdentifier| struct StorageID { String database_name; diff --git a/tests/queries/0_stateless/01191_rename_dictionary.sql b/tests/queries/0_stateless/01191_rename_dictionary.sql index be95e5a7d4b..c5012dabc81 100644 --- a/tests/queries/0_stateless/01191_rename_dictionary.sql +++ b/tests/queries/0_stateless/01191_rename_dictionary.sql @@ -27,7 +27,6 @@ RENAME DICTIONARY test_01191.t TO test_01191.dict1; -- {serverError INCORRECT_QU DROP DICTIONARY test_01191.t; -- {serverError INCORRECT_QUERY} DROP TABLE test_01191.t; -DROP DATABASE IF EXISTS dummy_db; CREATE DATABASE dummy_db ENGINE=Atomic; RENAME DICTIONARY test_01191.dict TO dummy_db.dict1; RENAME DICTIONARY dummy_db.dict1 TO test_01191.dict; diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference index 0e74c0a083e..0bb8966cbe4 100644 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference @@ -1,2 +1,2 @@ -CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') -CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') +CREATE TABLE default.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TABLE foo.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh index 3a95e59416a..934d87616ac 100755 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh @@ -4,5 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' -$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' +$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' +$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' diff --git a/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference b/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference deleted file mode 100644 index 1975397394b..00000000000 --- a/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference +++ /dev/null @@ -1,6 +0,0 @@ -123 -Hello -['Hello','world'] -Hello -Hello -['Hello','world'] diff --git a/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh b/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh deleted file mode 100755 index edaa83b8f95..00000000000 --- a/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - -${CLICKHOUSE_LOCAL} -n " -CREATE TABLE test (x UInt8) ORDER BY x; -INSERT INTO test VALUES (123); -SELECT * FROM test; -CREATE OR REPLACE TABLE test (s String) ORDER BY s; -INSERT INTO test VALUES ('Hello'); -SELECT * FROM test; -RENAME TABLE test TO test2; -CREATE OR REPLACE TABLE test (s Array(String)) ORDER BY s; -INSERT INTO test VALUES (['Hello', 'world']); -SELECT * FROM test; -SELECT * FROM test2; -EXCHANGE TABLES test AND test2; -SELECT * FROM test; -SELECT * FROM test2; -DROP TABLE test; -DROP TABLE test2; -" From f4aac7bbd9431e4d95eadfea31239b331ea18d77 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 8 Aug 2024 09:19:45 +0100 Subject: [PATCH 1915/2361] Another fix --- docker/test/stateless/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 4d86afa4bac..830a02a64a3 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -203,7 +203,7 @@ while [ $retry -le $max_retries ]; do output=$(./mc admin service restart clickminio 2>&1) echo "$output" - if echo "$output" | grep -q "Restarted \`clickminio\` successfully in 1 seconds"; then + if echo "$output" | grep -q "Restarted \`clickminio\` successfully"; then echo "Restarted clickminio successfully." break fi From d0f35ce6a60e13b8aff9687a45e293ce89693241 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 8 Aug 2024 10:29:01 +0200 Subject: [PATCH 1916/2361] Fix setting prefix --- src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 433a0e96d2e..7205b5b3294 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -305,7 +305,8 @@ void S3ObjectStorage::listObjects(const std::string & path, RelativePathsWithMet S3::ListObjectsV2Request request; request.SetBucket(uri.bucket); - request.SetPrefix(path); + if (path != "/") + request.SetPrefix(path); if (max_keys) request.SetMaxKeys(static_cast(max_keys)); else From effaeeeeac72673aa6ff5a84b73bbbd79a2067d4 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 08:43:39 +0000 Subject: [PATCH 1917/2361] Update 3rd party lib guide --- docs/en/development/contrib.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index db3eabaecfc..a4353450957 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -27,23 +27,23 @@ Avoid dumping copies of external code into the library directory. Instead create a Git submodule to pull third-party code from an external upstream repository. All submodules used by ClickHouse are listed in the `.gitmodule` file. -If the library can be used as-is (the default case), you can reference the upstream repository directly. -If the library needs patching, create a fork of the upstream repository in the [ClickHouse organization on GitHub](https://github.com/ClickHouse). +- If the library can be used as-is (the default case), you can reference the upstream repository directly. +- If the library needs patching, create a fork of the upstream repository in the [ClickHouse organization on GitHub](https://github.com/ClickHouse). In the latter case, we aim to isolate custom patches as much as possible from upstream commits. -To that end, create a branch with prefix `clickhouse/` from the branch or tag you want to integrate, e.g. `clickhouse/master` (for branch `master`) or `clickhouse/release/vX.Y.Z` (for tag `release/vX.Y.Z`). -This ensures that pulls from the upstream repository into the fork will leave custom `clickhouse/` branches unaffected. -Submodules in `contrib/` must only track `clickhouse/` branches of forked third-party repositories. +To that end, create a branch with prefix `ClickHouse/` from the branch or tag you want to integrate, e.g. `ClickHouse/2024_2` (for branch `2024_2`) or `ClickHouse/release/vX.Y.Z` (for tag `release/vX.Y.Z`). +Avoid following upstream development branches `master`/ `main` / `dev` (i.e., prefix branches `ClickHouse/master` / `ClickHouse/main` / `ClickHouse/dev` in the fork repository). +Such branches are moving targets which make proper versioning harder. +"Prefix branches" ensure that pulls from the upstream repository into the fork will leave custom `ClickHouse/` branches unaffected. +Submodules in `contrib/` must only track `ClickHouse/` branches of forked third-party repositories. -Patches are only applied against `clickhouse/` branches of external libraries. -For that, push the patch as a branch with `clickhouse/`, e.g. `clickhouse/fix-some-desaster`. -Then create a PR from the new branch against the custom tracking branch with `clickhouse/` prefix, (e.g. `clickhouse/master` or `clickhouse/release/vX.Y.Z`) and merge the patch. +Patches are only applied against `ClickHouse/` branches of external libraries. + +There are two ways to do that: +- you like to make a new fix against a `ClickHouse/`-prefix branch in the forked repository, e.g. a sanitizer fix. In that case, push the fix as a branch with `ClickHouse/` prefix, e.g. `ClickHouse/fix-sanitizer-disaster`. Then create a PR from the new branch against the custom tracking branch, e.g. `ClickHouse/2024_2 <-- ClickHouse/fix-sanitizer-disaster` and merge the PR. +- you update the submodule and need to forward earlier patches. Re-creating old PRs is overkill in this case. Instead, simply cherry-pick older commits into the new `ClickHouse/` branch (corresponding to the new version). Feel free to squash commits of PRs that had multiple commits. + +Once the submodule has been updated, bump the submodule in ClickHouse to point to the new hash in the fork. Create patches of third-party libraries with the official repository in mind and consider contributing the patch back to the upstream repository. This makes sure that others will also benefit from the patch and it will not be a maintenance burden for the ClickHouse team. - -To pull upstream changes into the submodule, you can use two methods: -- (less work but less clean): merge upstream `master` into the corresponding `clickhouse/` tracking branch in the forked repository. You will need to resolve merge conflicts with previous custom patches. This method can be used when the `clickhouse/` branch tracks an upstream development branch like `master`, `main`, `dev`, etc. -- (more work but cleaner): create a new branch with `clickhouse/` prefix from the upstream commit or tag you like to integrate. Then re-apply all existing patches using new PRs (or squash them into a single PR). This method can be used when the `clickhouse/` branch tracks a specific upstream version branch or tag. It is cleaner in the sense that custom patches and upstream changes are better isolated from each other. - -Once the submodule has been updated, bump the submodule in ClickHouse to point to the new hash in the fork. From 76b8bcd97a40a5e05e43c3940be20d3e42c4f532 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 08:46:46 +0000 Subject: [PATCH 1918/2361] Update --- docs/en/development/contrib.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/development/contrib.md b/docs/en/development/contrib.md index a4353450957..c49492c1cb4 100644 --- a/docs/en/development/contrib.md +++ b/docs/en/development/contrib.md @@ -41,7 +41,7 @@ Patches are only applied against `ClickHouse/` branches of external libraries. There are two ways to do that: - you like to make a new fix against a `ClickHouse/`-prefix branch in the forked repository, e.g. a sanitizer fix. In that case, push the fix as a branch with `ClickHouse/` prefix, e.g. `ClickHouse/fix-sanitizer-disaster`. Then create a PR from the new branch against the custom tracking branch, e.g. `ClickHouse/2024_2 <-- ClickHouse/fix-sanitizer-disaster` and merge the PR. -- you update the submodule and need to forward earlier patches. Re-creating old PRs is overkill in this case. Instead, simply cherry-pick older commits into the new `ClickHouse/` branch (corresponding to the new version). Feel free to squash commits of PRs that had multiple commits. +- you update the submodule and need to re-apply earlier patches. In this case, re-creating old PRs is overkill. Instead, simply cherry-pick older commits into the new `ClickHouse/` branch (corresponding to the new version). Feel free to squash commits of PRs that had multiple commits. In the best case, we did contribute custom patches back to upstream and can omit patches in the new version. Once the submodule has been updated, bump the submodule in ClickHouse to point to the new hash in the fork. From 59b737c9ac045ca0ec48eb2b8893c5e54646003a Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Thu, 8 Aug 2024 10:50:14 +0200 Subject: [PATCH 1919/2361] CI: set correct timeout for stateless tests --- tests/ci/ci_definitions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index b62d2e0aa8e..48847b0d7a6 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -420,7 +420,7 @@ class CommonJobConfigs: ), run_command='functional_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, - timeout=1001, # test + timeout=9000, ) STATEFUL_TEST = JobConfig( job_name_keyword="stateful", From dbe3035b6d96e40ae78204ba53cc91296b2af765 Mon Sep 17 00:00:00 2001 From: Sema Checherinda <104093494+CheSema@users.noreply.github.com> Date: Thu, 8 Aug 2024 11:32:42 +0200 Subject: [PATCH 1920/2361] Update src/Disks/DiskFomAST.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- src/Disks/DiskFomAST.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index 35cb124acfd..9f83b09c8a2 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -26,7 +26,7 @@ std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string { Poco::Util::AbstractConfiguration::Keys disk_settings_keys; config->keys(disk_settings_keys); - // Check that no settings are defined when disk from the config is referred. + /// Check that no settings are defined when disk from the config is referred. if (disk_settings_keys.empty()) throw Exception( ErrorCodes::BAD_ARGUMENTS, From 376d643e39b82011d8135a23c926ae22fee4d68a Mon Sep 17 00:00:00 2001 From: Sema Checherinda <104093494+CheSema@users.noreply.github.com> Date: Thu, 8 Aug 2024 11:32:55 +0200 Subject: [PATCH 1921/2361] Update src/Disks/DiskFomAST.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- src/Disks/DiskFomAST.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index 9f83b09c8a2..6af7ad21366 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -30,7 +30,7 @@ std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string if (disk_settings_keys.empty()) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Disk function has no arguments. Invalid disk description."); + "Disk function must have arguments. Invalid disk description."); if (disk_settings_keys.size() == 1 && disk_settings_keys.front() == "name" && !attach) throw Exception( From 8a93b1c7cff3c42f03642b4f7722c80c95937062 Mon Sep 17 00:00:00 2001 From: Sema Checherinda <104093494+CheSema@users.noreply.github.com> Date: Thu, 8 Aug 2024 11:33:10 +0200 Subject: [PATCH 1922/2361] Update src/Disks/DiskFomAST.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- src/Disks/DiskFomAST.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index 6af7ad21366..5c9ece699c4 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -35,7 +35,7 @@ std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string if (disk_settings_keys.size() == 1 && disk_settings_keys.front() == "name" && !attach) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Disk function `{}` has to have the other arguments which describe the disk. Invalid disk description.", + "Disk function `{}` must have other arguments apart from `name`, which describe disk configuration. Invalid disk description.", serialization); auto disk_settings_hash = sipHash128(serialization.data(), serialization.size()); From b79e701d8f7fda40bfb647c3c22bd637aef93fce Mon Sep 17 00:00:00 2001 From: Sema Checherinda <104093494+CheSema@users.noreply.github.com> Date: Thu, 8 Aug 2024 11:33:36 +0200 Subject: [PATCH 1923/2361] Update src/Disks/DiskFomAST.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- src/Disks/DiskFomAST.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index 5c9ece699c4..6d0a4ec2ea3 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -137,7 +137,7 @@ std::string DiskFomAST::createCustomDisk(const ASTPtr & disk_function_ast, Conte return disk_name; } -std::string DiskFomAST::getConfigDefinedDisk(const std::string &disk_name, ContextPtr context) +std::string DiskFomAST::getConfigDefinedDisk(const std::string & disk_name, ContextPtr context) { if (auto result = context->tryGetDisk(disk_name)) { From 2e5b71cdb17625480d7ed281dc3d7fa2315bfac5 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 8 Aug 2024 12:27:03 +0200 Subject: [PATCH 1924/2361] Update trace_log.md --- docs/en/operations/system-tables/trace_log.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/system-tables/trace_log.md b/docs/en/operations/system-tables/trace_log.md index 5adc33de37f..a60de2a08d1 100644 --- a/docs/en/operations/system-tables/trace_log.md +++ b/docs/en/operations/system-tables/trace_log.md @@ -3,7 +3,7 @@ slug: /en/operations/system-tables/trace_log --- # trace_log -Contains stack traces collected by the sampling query profiler. +Contains stack traces collected by the [sampling query profiler](../../operations/optimizing-performance/sampling-query-profiler.md). ClickHouse creates this table when the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also see settings: [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns), [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns), [memory_profiler_step](../../operations/settings/settings.md#memory_profiler_step), [memory_profiler_sample_probability](../../operations/settings/settings.md#memory_profiler_sample_probability), [trace_profile_events](../../operations/settings/settings.md#trace_profile_events). From df23a3456f66f50397091f3d84c6f07a6a1d8d81 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 8 Aug 2024 12:28:26 +0200 Subject: [PATCH 1925/2361] work with review --- src/Disks/DiskFomAST.cpp | 24 ++++++++------------ src/Disks/DiskFomAST.h | 2 +- src/Disks/StoragePolicy.h | 2 -- src/Interpreters/Context.cpp | 9 -------- src/Interpreters/Context.h | 1 - src/Storages/MergeTree/MergeTreeSettings.cpp | 2 +- 6 files changed, 11 insertions(+), 29 deletions(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index 6d0a4ec2ea3..638161c5c16 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -133,25 +133,19 @@ std::string DiskFomAST::createCustomDisk(const ASTPtr & disk_function_ast, Conte FlattenDiskConfigurationVisitor::Data data{context, attach}; FlattenDiskConfigurationVisitor{data}.visit(ast); - auto disk_name = assert_cast(*ast).value.get(); - return disk_name; + return assert_cast(*ast).value.get(); } -std::string DiskFomAST::getConfigDefinedDisk(const std::string & disk_name, ContextPtr context) +void DiskFomAST::ensureDiskIsNotCustom(const std::string & disk_name, ContextPtr context) { - if (auto result = context->tryGetDisk(disk_name)) - { - if (result->isCustomDisk()) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Disk name `{}` is a custom disk that is used in other table. " - "That disk could not be used by a reference by other tables. The custom disk should be fully specified with a disk function.", - disk_name); + auto disk = context->getDisk(disk_name); - return disk_name; - } - - throw Exception(ErrorCodes::UNKNOWN_DISK, "Unknown disk {}", disk_name); + if (disk->isCustomDisk()) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Disk name `{}` is a custom disk that is used in other table. " + "That disk could not be used by a reference by other tables. The custom disk should be fully specified with a disk function.", + disk_name); } } diff --git a/src/Disks/DiskFomAST.h b/src/Disks/DiskFomAST.h index 3a70484eda0..0a30834533e 100644 --- a/src/Disks/DiskFomAST.h +++ b/src/Disks/DiskFomAST.h @@ -8,7 +8,7 @@ namespace DB namespace DiskFomAST { - std::string getConfigDefinedDisk(const std::string & name, ContextPtr context); + void ensureDiskIsNotCustom(const std::string & name, ContextPtr context); std::string createCustomDisk(const ASTPtr & disk_function, ContextPtr context, bool attach); } diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index ccf2e2071b2..8e49ed910e3 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -12,8 +12,6 @@ #include #include -#include -#include #include #include #include diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 30f77f799e9..5413b568068 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -4395,15 +4395,6 @@ DiskPtr Context::getDisk(const String & name) const return disk_selector->get(name); } -DiskPtr Context::tryGetDisk(const String & name) const -{ - std::lock_guard lock(shared->storage_policies_mutex); - - auto disk_selector = getDiskSelector(lock); - - return disk_selector->tryGet(name); -} - DiskPtr Context::getOrCreateDisk(const String & name, DiskCreator creator) const { std::lock_guard lock(shared->storage_policies_mutex); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 586eff768df..d5e35c3e4b3 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1186,7 +1186,6 @@ public: /// Provides storage disks DiskPtr getDisk(const String & name) const; - DiskPtr tryGetDisk(const String & name) const; using DiskCreator = std::function; DiskPtr getOrCreateDisk(const String & name, DiskCreator creator) const; diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index e11af43ed23..dabb6991b0b 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -71,7 +71,7 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def, ContextPtr conte } else { - value = DiskFomAST::getConfigDefinedDisk(value.safeGet(), context); + DiskFomAST::ensureDiskIsNotCustom(value.safeGet(), context); } if (has("storage_policy")) From ec145c86f5b4c4c716d4b27334381eeae83f99a8 Mon Sep 17 00:00:00 2001 From: Sema Checherinda <104093494+CheSema@users.noreply.github.com> Date: Thu, 8 Aug 2024 12:28:52 +0200 Subject: [PATCH 1926/2361] Update src/Disks/DiskFomAST.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- src/Disks/DiskFomAST.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index 638161c5c16..bb2fcda68cb 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -64,7 +64,7 @@ std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string if (!disk->isCustomDisk()) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "The disk `{}` is already exist and described by the config." + "Disk `{}` already exists and is described by the config." " It is impossible to redefine it.", disk_name); From 69ac203c9fce9972b89082ca653894fc8709f2fd Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 8 Aug 2024 12:56:33 +0200 Subject: [PATCH 1927/2361] fix tests --- src/Interpreters/Context.cpp | 2 +- src/Interpreters/SystemLog.h | 2 +- tests/integration/test_system_flush_logs/test.py | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 3051ed3e567..4a08fd5fe5b 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -4256,7 +4256,7 @@ std::shared_ptr Context::getS3QueueLog() const if (!shared->system_logs) return {}; - return shared->system_logs->s3_queue_log; + return shared->system_logs->s3queue_log; } std::shared_ptr Context::getAzureQueueLog() const diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 6682829c0c6..24ef6a18eb8 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -18,7 +18,7 @@ M(ErrorLog, error_log, "Contains history of error values from table system.errors, periodically flushed to disk.") \ M(FilesystemCacheLog, filesystem_cache_log, "Contains a history of all events occurred with filesystem cache for objects on a remote filesystem.") \ M(FilesystemReadPrefetchesLog, filesystem_read_prefetches_log, "Contains a history of all prefetches done during reading from MergeTables backed by a remote filesystem.") \ - M(ObjectStorageQueueLog, s3_queue_log, "Contains logging entries with the information files processes by S3Queue engine.") \ + M(ObjectStorageQueueLog, s3queue_log, "Contains logging entries with the information files processes by S3Queue engine.") \ M(ObjectStorageQueueLog, azure_queue_log, "Contains logging entries with the information files processes by S3Queue engine.") \ M(AsynchronousMetricLog, asynchronous_metric_log, "Contains the historical values for system.asynchronous_metrics, once per time interval (one second by default).") \ M(OpenTelemetrySpanLog, opentelemetry_span_log, "Contains information about trace spans for executed queries.") \ diff --git a/tests/integration/test_system_flush_logs/test.py b/tests/integration/test_system_flush_logs/test.py index dd48ef055f5..cfecea5b3d6 100644 --- a/tests/integration/test_system_flush_logs/test.py +++ b/tests/integration/test_system_flush_logs/test.py @@ -24,8 +24,7 @@ def start_cluster(): def test_system_logs_exists(): system_logs = [ - # disabled by default - ("system.text_log", 0), + ("system.text_log", 1), ("system.query_log", 1), ("system.query_thread_log", 1), ("system.part_log", 1), From e90487fd54a5c87eb0875191e7547fc2b7f2e229 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 8 Aug 2024 12:57:50 +0200 Subject: [PATCH 1928/2361] tests/clickhouse-test: remove superior global Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 88ff6753a8f..480dc553247 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -363,8 +363,6 @@ CAPTURE_CLIENT_STACKTRACE = False def kill_process_group(pgid): - global CAPTURE_CLIENT_STACKTRACE - print(f"Killing process group {pgid}") print(f"Processes in process group {pgid}:") print( From 420f97c8506d7c9d4216c868a75547c48a408de2 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 8 Aug 2024 12:58:40 +0200 Subject: [PATCH 1929/2361] tests/clickhouse-test: update return type hint in run_single_test() Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 480dc553247..2da7550158b 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -1603,7 +1603,7 @@ class TestCase: def run_single_test( self, server_logs_level, client_options - ) -> Tuple[Optional[Popen], str, str, str, float]: + ) -> Tuple[Optional[Popen], float]: args = self.testcase_args client = args.testcase_client start_time = args.testcase_start_time From 117fedd3bbad038ba449e3ff69c85cd937388e80 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 8 Aug 2024 12:59:32 +0200 Subject: [PATCH 1930/2361] fix style --- src/Disks/DiskFomAST.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index bb2fcda68cb..b2f1280c507 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -19,7 +19,6 @@ namespace DB namespace ErrorCodes { extern const int BAD_ARGUMENTS; - extern const int UNKNOWN_DISK; } std::string getOrCreateCustomDisk(DiskConfigurationPtr config, const std::string & serialization, ContextPtr context, bool attach) From 979f93df12b8901df5af56f019861f910c7637cc Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 8 Aug 2024 13:00:07 +0200 Subject: [PATCH 1931/2361] tests/clickhouse-test: better english in comment Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 2da7550158b..a70d706c7e7 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -415,8 +415,8 @@ def cleanup_child_processes(pid): ) # Due to start_new_session=True, it is not enough to kill by PGID, we need # to look at children processes as well. - # But we are hoping that nobody create session in the tests (though it is - # possible via timeout(), but we assuming that they will be killed by + # But we are hoping that nobody creates session in the tests (though it is + # possible via timeout(), but we are assuming that they will be killed by # timeout). processes = subprocess.check_output( f"pgrep --parent {pid}", shell=True, stderr=subprocess.STDOUT From bc2740aa7000694a20f3db5f13538d030248b4e0 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 8 Aug 2024 13:00:37 +0200 Subject: [PATCH 1932/2361] tests/clickhouse-test: s/RELEASE_BUILD/RELEASE_NON_SANITIZED/g Signed-off-by: Azat Khuzhin --- tests/clickhouse-test | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a70d706c7e7..a3d7e0e922d 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -376,7 +376,7 @@ def kill_process_group(pgid): # Let's try to dump stacktrace in client (useful to catch issues there) os.killpg(pgid, signal.SIGTSTP) # Wait some time for clickhouse utilities to gather stacktrace - if RELEASE_BUILD: + if RELEASE_NON_SANITIZED: sleep(0.5) else: sleep(10) @@ -2407,11 +2407,11 @@ class BuildFlags: # Release and non-sanitizer build -RELEASE_BUILD = False +RELEASE_NON_SANITIZED = False def collect_build_flags(args): - global RELEASE_BUILD + global RELEASE_NON_SANITIZED result = [] @@ -2437,7 +2437,7 @@ def collect_build_flags(args): elif b"RelWithDebInfo" in value or b"Release" in value: result.append(BuildFlags.RELEASE) - RELEASE_BUILD = result == [BuildFlags.RELEASE] + RELEASE_NON_SANITIZED = result == [BuildFlags.RELEASE] value = clickhouse_execute( args, From b0ba53788ac758ac1405ceefacd91bb0418b5834 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 7 Aug 2024 16:29:07 +0000 Subject: [PATCH 1933/2361] Refactor tests for (experimental) statistics --- docs/en/development/tests.md | 4 +- .../statements/alter/statistics.md | 16 +- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- ...2864_statistics_count_min_sketch.reference | 14 -- .../02864_statistics_count_min_sketch.sql | 70 ------ .../02864_statistics_ddl.reference | 37 +-- .../0_stateless/02864_statistics_ddl.sql | 234 ++++++++++++++---- ...delayed_materialization_in_merge.reference | 12 + ...stics_delayed_materialization_in_merge.sql | 36 +++ .../02864_statistics_exception.reference | 0 .../02864_statistics_exception.sql | 55 ---- ..._statistics_materialize_in_merge.reference | 10 - .../02864_statistics_materialize_in_merge.sql | 52 ---- .../02864_statistics_predicates.reference | 92 +++++++ .../02864_statistics_predicates.sql | 214 ++++++++++++++++ .../02864_statistics_uniq.reference | 35 --- .../0_stateless/02864_statistics_uniq.sql | 73 ------ .../02864_statistics_usage.reference | 20 ++ .../0_stateless/02864_statistics_usage.sql | 42 ++++ 19 files changed, 619 insertions(+), 399 deletions(-) delete mode 100644 tests/queries/0_stateless/02864_statistics_count_min_sketch.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_count_min_sketch.sql create mode 100644 tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference create mode 100644 tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_exception.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_exception.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql create mode 100644 tests/queries/0_stateless/02864_statistics_predicates.reference create mode 100644 tests/queries/0_stateless/02864_statistics_predicates.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_uniq.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_uniq.sql create mode 100644 tests/queries/0_stateless/02864_statistics_usage.reference create mode 100644 tests/queries/0_stateless/02864_statistics_usage.sql diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 269995a1a96..6cb36e2049b 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -14,7 +14,7 @@ Each functional test sends one or multiple queries to the running ClickHouse ser Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from ClickHouse and it is available to general public. -Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. +Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. :::note A common mistake when testing data types `DateTime` and `DateTime64` is assuming that the server uses a specific time zone (e.g. "UTC"). This is not the case, time zones in CI test runs @@ -38,7 +38,7 @@ For more options, see `tests/clickhouse-test --help`. You can simply run all tes ### Adding a New Test -To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client --multiquery < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. diff --git a/docs/en/sql-reference/statements/alter/statistics.md b/docs/en/sql-reference/statements/alter/statistics.md index 6880cef0e5c..7a1774a01b5 100644 --- a/docs/en/sql-reference/statements/alter/statistics.md +++ b/docs/en/sql-reference/statements/alter/statistics.md @@ -8,26 +8,28 @@ sidebar_label: STATISTICS The following operations are available: -- `ALTER TABLE [db].table ADD STATISTICS (columns list) TYPE (type list)` - Adds statistic description to tables metadata. +- `ALTER TABLE [db].table ADD STATISTICS [IF NOT EXISTS] (column list) TYPE (type list)` - Adds statistic description to tables metadata. -- `ALTER TABLE [db].table MODIFY STATISTICS (columns list) TYPE (type list)` - Modifies statistic description to tables metadata. +- `ALTER TABLE [db].table MODIFY STATISTICS (column list) TYPE (type list)` - Modifies statistic description to tables metadata. -- `ALTER TABLE [db].table DROP STATISTICS (columns list)` - Removes statistics from the metadata of the specified columns and deletes all statistics objects in all parts for the specified columns. +- `ALTER TABLE [db].table DROP STATISTICS [IF EXISTS] (column list)` - Removes statistics from the metadata of the specified columns and deletes all statistics objects in all parts for the specified columns. -- `ALTER TABLE [db].table CLEAR STATISTICS (columns list)` - Deletes all statistics objects in all parts for the specified columns. Statistics objects can be rebuild using `ALTER TABLE MATERIALIZE STATISTICS`. +- `ALTER TABLE [db].table CLEAR STATISTICS [IF EXISTS] (column list)` - Deletes all statistics objects in all parts for the specified columns. Statistics objects can be rebuild using `ALTER TABLE MATERIALIZE STATISTICS`. -- `ALTER TABLE [db.]table MATERIALIZE STATISTICS (columns list)` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +- `ALTER TABLE [db.]table MATERIALIZE STATISTICS [IF EXISTS] (column list)` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). The first two commands are lightweight in a sense that they only change metadata or remove files. Also, they are replicated, syncing statistics metadata via ZooKeeper. -There is an example adding two statistics types to two columns: +## Example: + +Adding two statistics types to two columns: ``` ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq; ``` :::note -Statistic manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). +Statistic are supported only for [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 051d52a71cd..fe4857e9449 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3513,7 +3513,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context const auto & new_column = new_metadata.getColumns().get(command.column_name); if (!old_column.type->equals(*new_column.type)) throw Exception(ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN, - "ALTER types of column {} with statistics is not not safe " + "ALTER types of column {} with statistics is not safe " "because it can change the representation of statistics", backQuoteIfNeed(command.column_name)); } diff --git a/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference b/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference deleted file mode 100644 index 02c41656a36..00000000000 --- a/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference +++ /dev/null @@ -1,14 +0,0 @@ -CREATE TABLE default.tab\n(\n `a` String,\n `b` UInt64,\n `c` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -Test statistics count_min: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) -Test statistics multi-types: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) -Test LowCardinality and Nullable data type: -tab2 diff --git a/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql b/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql deleted file mode 100644 index c730aa7b4a7..00000000000 --- a/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql +++ /dev/null @@ -1,70 +0,0 @@ --- Tags: no-fasttest - -DROP TABLE IF EXISTS tab SYNC; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET allow_suspicious_low_cardinality_types=1; -SET mutations_sync = 2; - -CREATE TABLE tab -( - a String, - b UInt64, - c Int64, - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE tab; - -INSERT INTO tab select toString(number % 10000), number % 1000, -(number % 100), generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'Test statistics count_min:'; - -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -ALTER TABLE tab ADD STATISTICS b TYPE count_min; -ALTER TABLE tab ADD STATISTICS c TYPE count_min; -ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS a, b, c; - - -SELECT 'Test statistics multi-types:'; - -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; -ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; -ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS a, b, c; - -DROP TABLE IF EXISTS tab SYNC; - - -SELECT 'Test LowCardinality and Nullable data type:'; -DROP TABLE IF EXISTS tab2 SYNC; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE tab2 -( - a LowCardinality(Int64) STATISTICS(count_min), - b Nullable(Int64) STATISTICS(count_min), - c LowCardinality(Nullable(Int64)) STATISTICS(count_min), - pk String, -) Engine = MergeTree() ORDER BY pk; - -select name from system.tables where name = 'tab2' and database = currentDatabase(); - -DROP TABLE IF EXISTS tab2 SYNC; diff --git a/tests/queries/0_stateless/02864_statistics_ddl.reference b/tests/queries/0_stateless/02864_statistics_ddl.reference index a7ff5caa0b0..0e453b0ee8a 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.reference +++ b/tests/queries/0_stateless/02864_statistics_ddl.reference @@ -1,31 +1,6 @@ -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -10 -0 -After drop statistic - Prewhere info - Prewhere filter - Prewhere filter column: and(less(b, 10), less(a, 10)) (removed) -10 -CREATE TABLE default.tab\n(\n `a` Float64,\n `b` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After add statistic -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After materialize statistic - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -20 -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -20 -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After rename - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(c, 10)) (removed) -20 +CREATE TABLE default.tab\n(\n `f64` Float64,\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32,\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64,\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32,\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02864_statistics_ddl.sql b/tests/queries/0_stateless/02864_statistics_ddl.sql index fe612efe2ac..32b56a842b7 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.sql +++ b/tests/queries/0_stateless/02864_statistics_ddl.sql @@ -1,59 +1,195 @@ --- Tests that various DDL statements create/drop/materialize statistics +-- Tags: no-fasttest +-- no-fasttest: 'count_min' sketches need a 3rd party library + +-- Tests that DDL statements which create / drop / materialize statistics + +SET mutations_sync = 1; DROP TABLE IF EXISTS tab; +-- Error case: Can't create statistics when allow_experimental_statistics = 0 +CREATE TABLE tab (col Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; + +-- Error case: Unknown statistics types are rejected +CREATE TABLE tab (col Float64 STATISTICS(no_statistics_type)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +-- Error case: The same statistics type can't exist more than once on a column +CREATE TABLE tab (col Float64 STATISTICS(tdigest, tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SET allow_suspicious_low_cardinality_types = 1; + +-- Statistics can only be created on columns of specific data types (depending on the statistics kind), (*) + +-- tdigest requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- uniq requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- count_min requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col String STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col FixedString(1) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col Array(Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- CREATE TABLE was easy, ALTER is more fun CREATE TABLE tab ( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; + f64 Float64, + f64_tdigest Float64 STATISTICS(tdigest), + f32 Float32, + s String, + a Array(Float64) +) +Engine = MergeTree() +ORDER BY tuple(); +-- Error case: Unknown statistics types are rejected +-- (relevant for ADD and MODIFY) +ALTER TABLE tab ADD STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab MODIFY STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +-- for some reason, ALTER TABLE tab MODIFY STATISTICS IF EXISTS is not supported + +-- Error case: The same statistics type can't exist more than once on a column +-- (relevant for ADD and MODIFY) +-- Create the same statistics object twice +ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +-- Create an statistics which exists already +ALTER TABLE tab ADD STATISTICS f64_tdigest TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64_tdigest TYPE tdigest; -- no-op +ALTER TABLE tab MODIFY STATISTICS f64_tdigest TYPE tdigest; -- no-op + +-- Error case: Column does not exist +-- (relevant for ADD, MODIFY, DROP, CLEAR, and MATERIALIZE) +-- Note that the results are unfortunately quite inconsistent ... +ALTER TABLE tab ADD STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS IF EXISTS no_such_column; -- no-op +ALTER TABLE tab CLEAR STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS no_such_column; -- no-op +ALTER TABLE tab MATERIALIZE STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS no_such_column; -- { serverError ILLEGAL_STATISTICS } + +-- Error case: Column exists but has no statistics +-- (relevant for MODIFY, DROP, CLEAR, and MATERIALIZE) +-- Note that the results are unfortunately quite inconsistent ... +ALTER TABLE tab MODIFY STATISTICS s TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS IF EXISTS s; -- no-op +ALTER TABLE tab CLEAR STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS s; -- no-op +ALTER TABLE tab MATERIALIZE STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS s; -- { serverError ILLEGAL_STATISTICS } + +-- We don't check systematically that that statistics can only be created via ALTER ADD STATISTICS on columns of specific data types (the +-- internal type validation code is tested already above, (*)). Only do a rudimentary check for each statistics type with a data type that +-- works and one that doesn't work. +-- tdigest +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +-- uniq +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } +-- count_min +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } + +-- Any data type changes on columns with statistics are disallowed, for simplicity even if the new data type is compatible with all existing +-- statistics objects (e.g. tdigest can be created on Float64 and UInt64) +ALTER TABLE tab MODIFY COLUMN f64_tdigest UInt64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- Finally, do a full-circle test of a good case. Print table definition after each step. +-- Intentionally specifying _two_ columns and _two_ statistics types to have that also tested. +SHOW CREATE TABLE tab; +ALTER TABLE tab ADD STATISTICS f64, f32 TYPE tdigest, uniq; +SHOW CREATE TABLE tab; +ALTER TABLE tab MODIFY STATISTICS f64, f32 TYPE tdigest, uniq; +SHOW CREATE TABLE tab; +ALTER TABLE tab CLEAR STATISTICS f64, f32; +SHOW CREATE TABLE tab; +ALTER TABLE tab MATERIALIZE STATISTICS f64, f32; +SHOW CREATE TABLE tab; +ALTER TABLE tab DROP STATISTICS f64, f32; SHOW CREATE TABLE tab; -INSERT INTO tab select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; -SELECT count(*) FROM tab WHERE b < NULL and a < '10'; - -ALTER TABLE tab DROP STATISTICS a, b; - -SELECT 'After drop statistic'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -SHOW CREATE TABLE tab; - -ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; - -SELECT 'After add statistic'; - -SHOW CREATE TABLE tab; - -ALTER TABLE tab MATERIALIZE STATISTICS a, b; -INSERT INTO tab select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'After materialize statistic'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -OPTIMIZE TABLE tab FINAL; - -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -ALTER TABLE tab RENAME COLUMN b TO c; -SHOW CREATE TABLE tab; - -SELECT 'After rename'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE c < 10 and a < 10; - -DROP TABLE IF EXISTS tab; +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference new file mode 100644 index 00000000000..eb5e685597c --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference @@ -0,0 +1,12 @@ +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(b, 10_UInt8), less(a, 10_UInt8)) (removed) +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After truncate, insert, and materialize + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql new file mode 100644 index 00000000000..33a5f9052ba --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql @@ -0,0 +1,36 @@ +-- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). +-- (The concrete statistics type, column data type and predicate type don't matter) + +-- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) + +DROP TABLE IF EXISTS tab; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET allow_analyzer = 1; + +SET materialize_statistics_on_insert = 0; + +CREATE TABLE tab +( + a Int64 STATISTICS(tdigest), + b Int16 STATISTICS(tdigest), +) ENGINE = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. + +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) + +OPTIMIZE TABLE tab FINAL; +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +TRUNCATE TABLE tab; +SET mutations_sync = 2; +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +ALTER TABLE tab MATERIALIZE STATISTICS a, b; +SELECT 'After truncate, insert, and materialize'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_exception.reference b/tests/queries/0_stateless/02864_statistics_exception.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/02864_statistics_exception.sql b/tests/queries/0_stateless/02864_statistics_exception.sql deleted file mode 100644 index 289ffee6600..00000000000 --- a/tests/queries/0_stateless/02864_statistics_exception.sql +++ /dev/null @@ -1,55 +0,0 @@ --- Tests creating/dropping/materializing statistics produces the right exceptions. - -DROP TABLE IF EXISTS tab; - --- Can't create statistics when allow_experimental_statistics = 0 -CREATE TABLE tab -( - a Float64 STATISTICS(tdigest) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - -SET allow_experimental_statistics = 1; - --- The same type of statistics can't exist more than once on a column -CREATE TABLE tab -( - a Float64 STATISTICS(tdigest, tdigest) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - --- Unknown statistics types are rejected -CREATE TABLE tab -( - a Float64 STATISTICS(no_statistics_type) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - --- tDigest statistics can only be created on numeric columns -CREATE TABLE tab -( - a String STATISTICS(tdigest), -) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } - -CREATE TABLE tab -( - a Float64, - b String -) Engine = MergeTree() ORDER BY tuple(); - -ALTER TABLE tab ADD STATISTICS a TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -ALTER TABLE tab ADD STATISTICS IF NOT EXISTS a TYPE tdigest; -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; --- Statistics can be created only on integer columns -ALTER TABLE tab ADD STATISTICS b TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS b; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS a; -ALTER TABLE tab DROP STATISTICS IF EXISTS a; -ALTER TABLE tab CLEAR STATISTICS a; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab CLEAR STATISTICS IF EXISTS a; -ALTER TABLE tab MATERIALIZE STATISTICS b; -- { serverError ILLEGAL_STATISTICS } - -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -ALTER TABLE tab MODIFY COLUMN a Float64 TTL toDateTime(b) + INTERVAL 1 MONTH; -ALTER TABLE tab MODIFY COLUMN a Int64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } - -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference deleted file mode 100644 index 5e969cf41cb..00000000000 --- a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference +++ /dev/null @@ -1,10 +0,0 @@ -10 -10 -10 -statistics not used Condition less(b, 10_UInt8) moved to PREWHERE -statistics not used Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after merge Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after merge Condition less(b, 10_UInt8) moved to PREWHERE -statistics used after materialize Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after materialize Condition less(b, 10_UInt8) moved to PREWHERE -2 0 diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql deleted file mode 100644 index 6606cff263f..00000000000 --- a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql +++ /dev/null @@ -1,52 +0,0 @@ --- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). - -DROP TABLE IF EXISTS tab; - -SET enable_analyzer = 1; -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; - -SET materialize_statistics_on_insert = 0; - -CREATE TABLE tab -( - a Int64 STATISTICS(tdigest), - b Int16 STATISTICS(tdigest), -) ENGINE = MergeTree() ORDER BY tuple() -SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. - -INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics not used'; - -OPTIMIZE TABLE tab FINAL; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics used after merge'; - -TRUNCATE TABLE tab; -SET mutations_sync = 2; - -INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; -ALTER TABLE tab MATERIALIZE STATISTICS a, b; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics used after materialize'; - -DROP TABLE tab; - -SYSTEM FLUSH LOGS; - -SELECT log_comment, message FROM system.text_log JOIN -( - SELECT Settings['log_comment'] AS log_comment, query_id FROM system.query_log - WHERE current_database = currentDatabase() - AND query LIKE 'SELECT count(*) FROM tab%' - AND type = 'QueryFinish' -) AS query_log USING (query_id) -WHERE message LIKE '%moved to PREWHERE%' -ORDER BY event_time_microseconds; - -SELECT count(), sum(ProfileEvents['MergeTreeDataWriterStatisticsCalculationMicroseconds']) -FROM system.query_log -WHERE current_database = currentDatabase() - AND query LIKE 'INSERT INTO tab SELECT%' - AND type = 'QueryFinish'; diff --git a/tests/queries/0_stateless/02864_statistics_predicates.reference b/tests/queries/0_stateless/02864_statistics_predicates.reference new file mode 100644 index 00000000000..1c2abd47aaf --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_predicates.reference @@ -0,0 +1,92 @@ +u64 and = +10 +10 +10 +10 +0 +0 +0 +0 +10 +10 +10 +10 +u64 and < +70 +70 +70 +70 +80 +80 +80 +80 +70 +70 +70 +f64 and = +10 +10 +10 +10 +0 +0 +0 +0 +10 +10 +10 +0 +0 +0 +f64 and < +70 +70 +70 +70 +80 +80 +80 +80 +70 +70 +70 +80 +80 +80 +dt and = +0 +0 +0 +0 +10 +10 +10 +10 +dt and < +10000 +10000 +10000 +70 +70 +70 +70 +b and = +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +0 +0 +0 +0 +s and = +10 +10 diff --git a/tests/queries/0_stateless/02864_statistics_predicates.sql b/tests/queries/0_stateless/02864_statistics_predicates.sql new file mode 100644 index 00000000000..3e754dfb1de --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_predicates.sql @@ -0,0 +1,214 @@ +-- Tags: no-fasttest +-- no-fasttest: 'count_min' sketches need a 3rd party library + +-- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types. + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + u64 UInt64, + u64_tdigest UInt64 STATISTICS(tdigest), + u64_count_min UInt64 STATISTICS(count_min), + u64_uniq UInt64 STATISTICS(uniq), + f64 Float64, + f64_tdigest Float64 STATISTICS(tdigest), + f64_count_min Float64 STATISTICS(count_min), + f64_uniq Float64 STATISTICS(uniq), + dt DateTime, + dt_tdigest DateTime STATISTICS(tdigest), + dt_count_min DateTime STATISTICS(count_min), + dt_uniq DateTime STATISTICS(uniq), + b Bool, + b_tdigest Bool STATISTICS(tdigest), + b_count_min Bool STATISTICS(count_min), + b_uniq Bool STATISTICS(uniq), + s String, + -- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest + s_count_min String STATISTICS(count_min) + -- s_uniq String STATISTICS(uniq), -- not supported by uniq +) Engine = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO tab +-- SELECT number % 10000, number % 1000, -(number % 100) FROM system.numbers LIMIT 10000; +SELECT number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 2, + number % 2, + number % 2, + number % 2, + toString(number % 1000), + toString(number % 1000) +FROM system.numbers LIMIT 10000; + +-- u64 ---------------------------------------------------- + +SELECT 'u64 and ='; + +SELECT count(*) FROM tab WHERE u64 = 7; +SELECT count(*) FROM tab WHERE u64_tdigest = 7; +SELECT count(*) FROM tab WHERE u64_count_min = 7; +SELECT count(*) FROM tab WHERE u64_uniq = 7; + +SELECT count(*) FROM tab WHERE u64 = 7.7; +SELECT count(*) FROM tab WHERE u64_tdigest = 7.7; +SELECT count(*) FROM tab WHERE u64_count_min = 7.7; +SELECT count(*) FROM tab WHERE u64_uniq = 7.7; + +SELECT count(*) FROM tab WHERE u64 = '7'; +SELECT count(*) FROM tab WHERE u64_tdigest = '7'; +SELECT count(*) FROM tab WHERE u64_count_min = '7'; +SELECT count(*) FROM tab WHERE u64_uniq = '7'; + +SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_count_min = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH } + +SELECT 'u64 and <'; + +SELECT count(*) FROM tab WHERE u64 < 7; +SELECT count(*) FROM tab WHERE u64_tdigest < 7; +SELECT count(*) FROM tab WHERE u64_count_min < 7; +SELECT count(*) FROM tab WHERE u64_uniq < 7; + +SELECT count(*) FROM tab WHERE u64 < 7.7; +SELECT count(*) FROM tab WHERE u64_tdigest < 7.7; +SELECT count(*) FROM tab WHERE u64_count_min < 7.7; +SELECT count(*) FROM tab WHERE u64_uniq < 7.7; + +SELECT count(*) FROM tab WHERE u64 < '7'; +-- SELECT count(*) FROM tab WHERE u64_tdigest < '7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE u64_count_min < '7'; +SELECT count(*) FROM tab WHERE u64_uniq < '7'; + +SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_count_min < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH } + +-- f64 ---------------------------------------------------- + +SELECT 'f64 and ='; + +SELECT count(*) FROM tab WHERE f64 = 7; +SELECT count(*) FROM tab WHERE f64_tdigest = 7; +SELECT count(*) FROM tab WHERE f64_count_min = 7; +SELECT count(*) FROM tab WHERE f64_uniq = 7; + +SELECT count(*) FROM tab WHERE f64 = 7.7; +SELECT count(*) FROM tab WHERE f64_tdigest = 7.7; +SELECT count(*) FROM tab WHERE f64_count_min = 7.7; +SELECT count(*) FROM tab WHERE f64_uniq = 7.7; + +SELECT count(*) FROM tab WHERE f64 = '7'; +-- SELECT count(*) FROM tab WHERE f64_tdigest = '7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE f64_count_min = '7'; +SELECT count(*) FROM tab WHERE f64_uniq = '7'; + +SELECT count(*) FROM tab WHERE f64 = '7.7'; +-- SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE f64_count_min = '7.7'; +SELECT count(*) FROM tab WHERE f64_uniq = '7.7'; + +SELECT 'f64 and <'; + +SELECT count(*) FROM tab WHERE f64 < 7; +SELECT count(*) FROM tab WHERE f64_tdigest < 7; +SELECT count(*) FROM tab WHERE f64_count_min < 7; +SELECT count(*) FROM tab WHERE f64_uniq < 7; + +SELECT count(*) FROM tab WHERE f64 < 7.7; +SELECT count(*) FROM tab WHERE f64_tdigest < 7.7; +SELECT count(*) FROM tab WHERE f64_count_min < 7.7; +SELECT count(*) FROM tab WHERE f64_uniq < 7.7; + +SELECT count(*) FROM tab WHERE f64 < '7'; +-- SELECT count(*) FROM tab WHERE f64_tdigest < '7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE f64_count_min < '7'; +SELECT count(*) FROM tab WHERE f64_uniq < '7'; + +SELECT count(*) FROM tab WHERE f64 < '7.7'; +-- SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE f64_count_min < '7.7'; +SELECT count(*) FROM tab WHERE f64_uniq < '7.7'; + +-- dt ---------------------------------------------------- + +SELECT 'dt and ='; + +SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_count_min = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13'; + +SELECT count(*) FROM tab WHERE dt = 7; +SELECT count(*) FROM tab WHERE dt_tdigest = 7; +SELECT count(*) FROM tab WHERE dt_count_min = 7; +SELECT count(*) FROM tab WHERE dt_uniq = 7; + +SELECT 'dt and <'; + +SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13'; +-- SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13'; + +SELECT count(*) FROM tab WHERE dt < 7; +SELECT count(*) FROM tab WHERE dt_tdigest < 7; +SELECT count(*) FROM tab WHERE dt_count_min < 7; +SELECT count(*) FROM tab WHERE dt_uniq < 7; + +-- b ---------------------------------------------------- + +SELECT 'b and ='; + +SELECT count(*) FROM tab WHERE b = true; +SELECT count(*) FROM tab WHERE b_tdigest = true; +SELECT count(*) FROM tab WHERE b_count_min = true; +SELECT count(*) FROM tab WHERE b_uniq = true; + +SELECT count(*) FROM tab WHERE b = 'true'; +SELECT count(*) FROM tab WHERE b_tdigest = 'true'; +SELECT count(*) FROM tab WHERE b_count_min = 'true'; +SELECT count(*) FROM tab WHERE b_uniq = 'true'; + +SELECT count(*) FROM tab WHERE b = 1; +SELECT count(*) FROM tab WHERE b_tdigest = 1; +SELECT count(*) FROM tab WHERE b_count_min = 1; +SELECT count(*) FROM tab WHERE b_uniq = 1; + +SELECT count(*) FROM tab WHERE b = 1.1; +SELECT count(*) FROM tab WHERE b_tdigest = 1.1; +SELECT count(*) FROM tab WHERE b_count_min = 1.1; +SELECT count(*) FROM tab WHERE b_uniq = 1.1; + +-- s ---------------------------------------------------- + +SELECT 's and ='; + +SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE } +-- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported +SELECT count(*) FROM tab WHERE s_count_min = 7; -- { serverError NO_COMMON_TYPE } +-- SELECT count(*) FROM tab WHERE s_uniq = 7; -- not supported + +SELECT count(*) FROM tab WHERE s = '7'; +-- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported +SELECT count(*) FROM tab WHERE s_count_min = '7'; +-- SELECT count(*) FROM tab WHERE s_uniq = '7'; -- not supported + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_uniq.reference b/tests/queries/0_stateless/02864_statistics_uniq.reference deleted file mode 100644 index 77786dbdd8c..00000000000 --- a/tests/queries/0_stateless/02864_statistics_uniq.reference +++ /dev/null @@ -1,35 +0,0 @@ -CREATE TABLE default.t1\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest, uniq),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) -After modify TDigest - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(c, -1), less(a, 10), less(b, 10)) (removed) -After drop - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(c, -1), less(b, 10)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_uniq.sql b/tests/queries/0_stateless/02864_statistics_uniq.sql deleted file mode 100644 index 0f5f353c045..00000000000 --- a/tests/queries/0_stateless/02864_statistics_uniq.sql +++ /dev/null @@ -1,73 +0,0 @@ -DROP TABLE IF EXISTS t1; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET mutations_sync = 1; - -CREATE TABLE t1 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Int64 STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE t1; - -INSERT INTO t1 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; -INSERT INTO t1 select 0, 0, 11, generateUUIDv4(); - -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -OPTIMIZE TABLE t1 FINAL; - -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT 'After modify TDigest'; -ALTER TABLE t1 MODIFY STATISTICS c TYPE TDigest; -ALTER TABLE t1 MATERIALIZE STATISTICS c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - - -ALTER TABLE t1 DROP STATISTICS c; - -SELECT 'After drop'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -DROP TABLE IF EXISTS t1; -DROP TABLE IF EXISTS t2; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t2 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c LowCardinality(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t2 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t2; -DROP TABLE IF EXISTS t3; - -CREATE TABLE t3 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Nullable(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t3 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t3; - diff --git a/tests/queries/0_stateless/02864_statistics_usage.reference b/tests/queries/0_stateless/02864_statistics_usage.reference new file mode 100644 index 00000000000..a9f669b88c1 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_usage.reference @@ -0,0 +1,20 @@ +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After drop statistic + Prewhere info + Prewhere filter + Prewhere filter column: and(less(b, 10_UInt8), less(a, 10_UInt8)) (removed) +After add and materialize statistic + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After rename + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(c, 10_UInt8)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_usage.sql b/tests/queries/0_stateless/02864_statistics_usage.sql new file mode 100644 index 00000000000..f936854df44 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_usage.sql @@ -0,0 +1,42 @@ +-- Test that the optimizer picks up column statistics +-- (The concrete statistics type, column data type and predicate type don't matter) + +-- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; +SET allow_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest) +) Engine = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +ALTER TABLE tab DROP STATISTICS a, b; +SELECT 'After drop statistic'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) + +ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS a, b; +INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; +SELECT 'After add and materialize statistic'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +OPTIMIZE TABLE tab FINAL; +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +ALTER TABLE tab RENAME COLUMN b TO c; +SELECT 'After rename'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then c (statistics used) + +DROP TABLE IF EXISTS tab; From 4b2234f87d7b7ff2033327bd1c03278735438f1a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 12:06:32 +0000 Subject: [PATCH 1934/2361] Minor fixups --- .../0_stateless/00945_bloom_filter_index.sql | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/00945_bloom_filter_index.sql b/tests/queries/0_stateless/00945_bloom_filter_index.sql index 71109df79e7..6e3819e74d3 100644 --- a/tests/queries/0_stateless/00945_bloom_filter_index.sql +++ b/tests/queries/0_stateless/00945_bloom_filter_index.sql @@ -375,13 +375,9 @@ SELECT id, ary[indexOf(ary, 'value3')] FROM test_bf_indexOf WHERE ary[indexOf(ar DROP TABLE IF EXISTS test_bf_indexOf; --- expecting cast function to be unknown +-- Test for bug #65597 DROP TABLE IF EXISTS test_bf_cast; - -CREATE TABLE test_bf_cast (c Int32, INDEX x1 (c) type bloom_filter) ENGINE = MergeTree ORDER BY c as select 1; - -SELECT count() FROM test_bf_cast WHERE cast(c=1 or c=9999 as Bool) settings use_skip_indexes=0; - -SELECT count() FROM test_bf_cast WHERE cast(c=1 or c=9999 as Bool) settings use_skip_indexes=1; - -DROP TABLE test_bf_cast; \ No newline at end of file +CREATE TABLE test_bf_cast (c Int32, INDEX x1 (c) type bloom_filter) ENGINE = MergeTree ORDER BY c AS SELECT 1; +SELECT count() FROM test_bf_cast WHERE cast(c = 1 OR c = 9999 AS Bool) SETTINGS use_skip_indexes=0; +SELECT count() FROM test_bf_cast WHERE cast(c = 1 OR c = 9999 AS Bool) SETTINGS use_skip_indexes=1; +DROP TABLE test_bf_cast; From eca6bba0388f21c67d0caf24e56032bc6b7cd339 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 12:50:45 +0000 Subject: [PATCH 1935/2361] Split 00284_external_aggregation --- .../00284_external_aggregation.reference | 20 ----------------- .../00284_external_aggregation.sql | 19 +++------------- .../00284_external_aggregation_2.reference | 20 +++++++++++++++++ .../00284_external_aggregation_2.sql | 22 +++++++++++++++++++ .../0_stateless/02099_tsv_raw_format_1.sh | 3 +++ .../0_stateless/02099_tsv_raw_format_2.sh | 3 +++ 6 files changed, 51 insertions(+), 36 deletions(-) create mode 100644 tests/queries/0_stateless/00284_external_aggregation_2.reference create mode 100644 tests/queries/0_stateless/00284_external_aggregation_2.sql diff --git a/tests/queries/0_stateless/00284_external_aggregation.reference b/tests/queries/0_stateless/00284_external_aggregation.reference index be0db217a97..48e30e781e0 100644 --- a/tests/queries/0_stateless/00284_external_aggregation.reference +++ b/tests/queries/0_stateless/00284_external_aggregation.reference @@ -1,22 +1,2 @@ 49999995000000 10000000 499999500000 1000000 15 -100033 2 -100034 2 -100035 2 -100036 2 -100037 2 -100038 2 -100039 2 -10004 2 -100040 2 -100041 2 -100033 2 -100034 2 -100035 2 -100036 2 -100037 2 -100038 2 -100039 2 -10004 2 -100040 2 -100041 2 diff --git a/tests/queries/0_stateless/00284_external_aggregation.sql b/tests/queries/0_stateless/00284_external_aggregation.sql index c1140faaa28..cdc31ff68c8 100644 --- a/tests/queries/0_stateless/00284_external_aggregation.sql +++ b/tests/queries/0_stateless/00284_external_aggregation.sql @@ -1,5 +1,8 @@ -- Tags: long +-- This test was split in two due to long runtimes in sanitizers. +-- The other part is 00284_external_aggregation_2. + SET max_bytes_before_external_group_by = 100000000; SET max_memory_usage = 410000000; SET group_by_two_level_threshold = 100000; @@ -7,19 +10,3 @@ SET group_by_two_level_threshold_bytes = 50000000; SELECT sum(k), sum(c) FROM (SELECT number AS k, count() AS c FROM (SELECT * FROM system.numbers LIMIT 10000000) GROUP BY k); SELECT sum(k), sum(c), max(u) FROM (SELECT number AS k, count() AS c, uniqArray(range(number % 16)) AS u FROM (SELECT * FROM system.numbers LIMIT 1000000) GROUP BY k); - -SET max_memory_usage = 0; -SET group_by_two_level_threshold = 100000; -SET max_bytes_before_external_group_by = '1Mi'; - --- method: key_string & key_string_two_level -CREATE TABLE t_00284_str(s String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -INSERT INTO t_00284_str SELECT toString(number) FROM numbers_mt(1e6); -INSERT INTO t_00284_str SELECT toString(number) FROM numbers_mt(1e6); -SELECT s, count() FROM t_00284_str GROUP BY s ORDER BY s LIMIT 10 OFFSET 42; - --- method: low_cardinality_key_string & low_cardinality_key_string_two_level -CREATE TABLE t_00284_lc_str(s LowCardinality(String)) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; -INSERT INTO t_00284_lc_str SELECT toString(number) FROM numbers_mt(1e6); -INSERT INTO t_00284_lc_str SELECT toString(number) FROM numbers_mt(1e6); -SELECT s, count() FROM t_00284_lc_str GROUP BY s ORDER BY s LIMIT 10 OFFSET 42; diff --git a/tests/queries/0_stateless/00284_external_aggregation_2.reference b/tests/queries/0_stateless/00284_external_aggregation_2.reference new file mode 100644 index 00000000000..71d2e96d4b0 --- /dev/null +++ b/tests/queries/0_stateless/00284_external_aggregation_2.reference @@ -0,0 +1,20 @@ +100033 2 +100034 2 +100035 2 +100036 2 +100037 2 +100038 2 +100039 2 +10004 2 +100040 2 +100041 2 +100033 2 +100034 2 +100035 2 +100036 2 +100037 2 +100038 2 +100039 2 +10004 2 +100040 2 +100041 2 diff --git a/tests/queries/0_stateless/00284_external_aggregation_2.sql b/tests/queries/0_stateless/00284_external_aggregation_2.sql new file mode 100644 index 00000000000..7960e3894d0 --- /dev/null +++ b/tests/queries/0_stateless/00284_external_aggregation_2.sql @@ -0,0 +1,22 @@ +-- Tags: long + +-- This test was split in two due to long runtimes in sanitizers. +-- The other part is 00284_external_aggregation. + +SET group_by_two_level_threshold_bytes = 50000000; +SET max_memory_usage = 0; +SET group_by_two_level_threshold = 100000; +SET max_bytes_before_external_group_by = '1Mi'; + +-- method: key_string & key_string_two_level +CREATE TABLE t_00284_str(s String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO t_00284_str SELECT toString(number) FROM numbers_mt(1e6); +INSERT INTO t_00284_str SELECT toString(number) FROM numbers_mt(1e6); +SELECT s, count() FROM t_00284_str GROUP BY s ORDER BY s LIMIT 10 OFFSET 42; + +-- method: low_cardinality_key_string & low_cardinality_key_string_two_level +CREATE TABLE t_00284_lc_str(s LowCardinality(String)) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO t_00284_lc_str SELECT toString(number) FROM numbers_mt(1e6); +INSERT INTO t_00284_lc_str SELECT toString(number) FROM numbers_mt(1e6); +SELECT s, count() FROM t_00284_lc_str GROUP BY s ORDER BY s LIMIT 10 OFFSET 42; + diff --git a/tests/queries/0_stateless/02099_tsv_raw_format_1.sh b/tests/queries/0_stateless/02099_tsv_raw_format_1.sh index a3468f46ca0..bd1f8731717 100755 --- a/tests/queries/0_stateless/02099_tsv_raw_format_1.sh +++ b/tests/queries/0_stateless/02099_tsv_raw_format_1.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash # Tags: long +# This test was split in two due to long runtimes in sanitizers. +# The other part is 02099_tsv_raw_format_2.sh. + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02099_tsv_raw_format_2.sh b/tests/queries/0_stateless/02099_tsv_raw_format_2.sh index d6034a0616f..9f57eea42f2 100755 --- a/tests/queries/0_stateless/02099_tsv_raw_format_2.sh +++ b/tests/queries/0_stateless/02099_tsv_raw_format_2.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash # Tags: long +# This test was split in two due to long runtimes in sanitizers. +# The other part is 02099_tsv_raw_format_1.sh. +# CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh From 9cf60214e13da94ca2dcee82491d6795c784aaa5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 8 Aug 2024 13:43:12 +0000 Subject: [PATCH 1936/2361] Update version_date.tsv and changelogs after v24.7.3.42-stable --- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v24.7.3.42-stable.md | 37 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 5 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 docs/changelogs/v24.7.3.42-stable.md diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 94603763572..a44664259fb 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.7.2.13" +ARG VERSION="24.7.3.42" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index f40118c7b06..2565828c846 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.7.2.13" +ARG VERSION="24.7.3.42" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 032aa862e4a..5ac8a58afea 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.7.2.13" +ARG VERSION="24.7.3.42" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docs/changelogs/v24.7.3.42-stable.md b/docs/changelogs/v24.7.3.42-stable.md new file mode 100644 index 00000000000..48f6e301f3c --- /dev/null +++ b/docs/changelogs/v24.7.3.42-stable.md @@ -0,0 +1,37 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.7.3.42-stable (63730bc4293) FIXME as compared to v24.7.2.13-stable (6e41f601b2f) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#67969](https://github.com/ClickHouse/ClickHouse/issues/67969): Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#67637](https://github.com/ClickHouse/ClickHouse/issues/67637): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#67820](https://github.com/ClickHouse/ClickHouse/issues/67820): Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#67881](https://github.com/ClickHouse/ClickHouse/issues/67881): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67713](https://github.com/ClickHouse/ClickHouse/issues/67713): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67995](https://github.com/ClickHouse/ClickHouse/issues/67995): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)). + +#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC) + +* Backported in [#67818](https://github.com/ClickHouse/ClickHouse/issues/67818): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67766](https://github.com/ClickHouse/ClickHouse/issues/67766): Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)). +* Backported in [#67854](https://github.com/ClickHouse/ClickHouse/issues/67854): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#67840](https://github.com/ClickHouse/ClickHouse/issues/67840): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#67518](https://github.com/ClickHouse/ClickHouse/issues/67518): Split slow test 03036_dynamic_read_subcolumns. [#66954](https://github.com/ClickHouse/ClickHouse/pull/66954) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#67516](https://github.com/ClickHouse/ClickHouse/issues/67516): Split 01508_partition_pruning_long. [#66983](https://github.com/ClickHouse/ClickHouse/pull/66983) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#67529](https://github.com/ClickHouse/ClickHouse/issues/67529): Reduce max time of 00763_long_lock_buffer_alter_destination_table. [#67185](https://github.com/ClickHouse/ClickHouse/pull/67185) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#67643](https://github.com/ClickHouse/ClickHouse/issues/67643): [Green CI] Fix potentially flaky test_mask_sensitive_info integration test. [#67506](https://github.com/ClickHouse/ClickHouse/pull/67506) ([Alexey Katsman](https://github.com/alexkats)). +* Backported in [#67609](https://github.com/ClickHouse/ClickHouse/issues/67609): Fix test_zookeeper_config_load_balancing after adding the xdist worker name to the instance. [#67590](https://github.com/ClickHouse/ClickHouse/pull/67590) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#67871](https://github.com/ClickHouse/ClickHouse/issues/67871): Fix 02434_cancel_insert_when_client_dies. [#67600](https://github.com/ClickHouse/ClickHouse/pull/67600) ([vdimir](https://github.com/vdimir)). +* Backported in [#67704](https://github.com/ClickHouse/ClickHouse/issues/67704): Fix 02910_bad_logs_level_in_local in fast tests. [#67603](https://github.com/ClickHouse/ClickHouse/pull/67603) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#67689](https://github.com/ClickHouse/ClickHouse/issues/67689): Fix 01605_adaptive_granularity_block_borders. [#67605](https://github.com/ClickHouse/ClickHouse/pull/67605) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#67827](https://github.com/ClickHouse/ClickHouse/issues/67827): Try fix 03143_asof_join_ddb_long. [#67620](https://github.com/ClickHouse/ClickHouse/pull/67620) ([Nikita Taranov](https://github.com/nickitat)). +* Backported in [#67892](https://github.com/ClickHouse/ClickHouse/issues/67892): Revert "Merge pull request [#66510](https://github.com/ClickHouse/ClickHouse/issues/66510) from canhld94/fix_trivial_count_non_deterministic_func". [#67800](https://github.com/ClickHouse/ClickHouse/pull/67800) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index e410f31ca5a..f46353277e2 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v24.7.3.42-stable 2024-08-08 v24.7.2.13-stable 2024-08-01 v24.7.1.2915-stable 2024-07-30 v24.6.3.95-stable 2024-08-06 From cab274e1b696e8e355066cce3b05d4337c486157 Mon Sep 17 00:00:00 2001 From: kruglov Date: Fri, 2 Aug 2024 10:46:56 +0300 Subject: [PATCH 1937/2361] Fixed error on generated columns in MaterializedPostgreSQL --- .../fetchPostgreSQLTableStructure.cpp | 34 +++++++++----- .../fetchPostgreSQLTableStructure.h | 1 + .../test.py | 44 ++++++++++++++++++- 3 files changed, 67 insertions(+), 12 deletions(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 943f3ae502e..e2f2358c892 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -196,7 +196,7 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( } else { - std::tuple row; + std::tuple row; while (stream >> row) { const auto column_name = std::get<0>(row); @@ -206,13 +206,14 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( std::get<3>(row)); columns.push_back(NameAndTypePair(column_name, data_type)); - auto attgenerated = std::get<6>(row); + auto attgenerated = std::get<7>(row); attributes.emplace( column_name, PostgreSQLTableStructure::PGAttribute{ .atttypid = parse(std::get<4>(row)), .atttypmod = parse(std::get<5>(row)), + .attnum = parse(std::get<6>(row)), .atthasdef = false, .attgenerated = attgenerated.empty() ? char{} : char(attgenerated[0]), .attr_def = {} @@ -308,6 +309,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "attndims AS dims, " /// array dimensions "atttypid as type_id, " "atttypmod as type_modifier, " + "attnum as att_num, " "attgenerated as generated " /// if column has GENERATED "FROM pg_attribute " "WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) " @@ -338,17 +340,29 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "WHERE adrelid = (SELECT oid FROM pg_class WHERE {});", where); pqxx::result result{tx.exec(attrdef_query)}; - for (const auto row : result) + if (static_cast(result.size()) > table.physical_columns->names.size()) { - size_t adnum = row[0].as(); - if (!adnum || adnum > table.physical_columns->names.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Received {} attrdef, but currently fetched columns list has {} columns", + result.size(), table.physical_columns->attributes.size()); + } + + for (const auto & column_attrs : table.physical_columns->attributes) + { + if (column_attrs.second.attgenerated != 's') { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Received adnum {}, but currently fetched columns list has {} columns", - adnum, table.physical_columns->attributes.size()); + continue; + } + + for (const auto row : result) + { + int adnum = row[0].as(); + if (column_attrs.second.attnum == adnum) + { + table.physical_columns->attributes.at(column_attrs.first).attr_def = row[1].as(); + break; + } } - const auto column_name = table.physical_columns->names[adnum - 1]; - table.physical_columns->attributes.at(column_name).attr_def = row[1].as(); } } diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index 81bf7b278fc..25ece6909fd 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -16,6 +16,7 @@ struct PostgreSQLTableStructure { Int32 atttypid; Int32 atttypmod; + Int32 attnum; bool atthasdef; char attgenerated; std::string attr_def; diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index 406b50bc486..75edb22aab1 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -953,12 +953,14 @@ def test_generated_columns(started_cluster): "", f"""CREATE TABLE {table} ( key integer PRIMARY KEY, - x integer, + x integer DEFAULT 0, + temp integer DEFAULT 0, y integer GENERATED ALWAYS AS (x*2) STORED, - z text); + z text DEFAULT 'z'); """, ) + pg_manager.execute(f"alter table {table} drop column temp;") pg_manager.execute(f"insert into {table} (key, x, z) values (1,1,'1');") pg_manager.execute(f"insert into {table} (key, x, z) values (2,2,'2');") @@ -991,6 +993,44 @@ def test_generated_columns(started_cluster): ) +def test_generated_columns_with_sequence(started_cluster): + table = "test_generated_columns_with_sequence" + + pg_manager.create_postgres_table( + table, + "", + f"""CREATE TABLE {table} ( + key integer PRIMARY KEY, + x integer, + y integer GENERATED ALWAYS AS (x*2) STORED, + z text); + """, + ) + + pg_manager.execute( + f"create sequence {table}_id_seq increment by 1 minvalue 1 start 1;" + ) + pg_manager.execute( + f"alter table {table} alter key set default nextval('{table}_id_seq');" + ) + pg_manager.execute(f"insert into {table} (key, x, z) values (1,1,'1');") + pg_manager.execute(f"insert into {table} (key, x, z) values (2,2,'2');") + + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_tables_list = '{table}'", + "materialized_postgresql_backoff_min_ms = 100", + "materialized_postgresql_backoff_max_ms = 100", + ], + ) + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + def test_default_columns(started_cluster): table = "test_default_columns" From 72bc5cd2e99cee09d0e003fb75192c0bb3114bad Mon Sep 17 00:00:00 2001 From: Kruglov Kirill Date: Mon, 5 Aug 2024 16:10:27 +0300 Subject: [PATCH 1938/2361] Update src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index e2f2358c892..b9fd9c325f8 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -349,7 +349,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( for (const auto & column_attrs : table.physical_columns->attributes) { - if (column_attrs.second.attgenerated != 's') + if (column_attrs.second.attgenerated != 's') /// e.g. not a generated column { continue; } From 55b2000d38e0bc6282714fdb1204d450437433ec Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 13:58:43 +0000 Subject: [PATCH 1939/2361] Fix fasttest --- .../02864_statistics_delayed_materialization_in_merge.sql | 2 +- tests/queries/0_stateless/02864_statistics_usage.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql index 33a5f9052ba..d469a4c2036 100644 --- a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql +++ b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql @@ -7,7 +7,7 @@ DROP TABLE IF EXISTS tab; SET allow_experimental_statistics = 1; SET allow_statistics_optimize = 1; -SET allow_analyzer = 1; +SET enable_analyzer = 1; SET materialize_statistics_on_insert = 0; diff --git a/tests/queries/0_stateless/02864_statistics_usage.sql b/tests/queries/0_stateless/02864_statistics_usage.sql index f936854df44..4956bd27e87 100644 --- a/tests/queries/0_stateless/02864_statistics_usage.sql +++ b/tests/queries/0_stateless/02864_statistics_usage.sql @@ -6,7 +6,7 @@ SET allow_experimental_statistics = 1; SET allow_statistics_optimize = 1; SET mutations_sync = 1; -SET allow_analyzer = 1; +SET enable_analyzer = 1; DROP TABLE IF EXISTS tab; From 05febdfb2bdfa78f2d017758ce2261fb554e9546 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 8 Aug 2024 13:47:44 +0000 Subject: [PATCH 1940/2361] add more events and add tests --- src/Common/ProfileEvents.cpp | 9 +- .../Transforms/ColumnGathererTransform.h | 1 - .../Transforms/MergeJoinTransform.cpp | 3 +- .../Transforms/MergeJoinTransform.h | 1 + .../Transforms/PasteJoinTransform.cpp | 2 +- .../Transforms/PasteJoinTransform.h | 1 + src/Storages/MergeTree/MergeList.h | 1 - src/Storages/MergeTree/MergeTask.cpp | 16 ++-- src/Storages/MergeTree/MergeTask.h | 2 +- .../MergeTree/MutateFromLogEntryTask.cpp | 2 + .../MergeTree/MutatePlainMergeTreeTask.cpp | 2 + src/Storages/MergeTree/MutateTask.cpp | 25 ++++-- src/Storages/MergeTree/MutateTask.h | 1 + .../02378_part_log_profile_events.sql | 2 +- .../03221_merge_profile_events.reference | 3 + .../03221_merge_profile_events.sql | 88 +++++++++++++++++++ .../03221_mutate_profile_events.reference | 2 + .../03221_mutate_profile_events.sql | 33 +++++++ 18 files changed, 174 insertions(+), 20 deletions(-) create mode 100644 tests/queries/0_stateless/03221_merge_profile_events.reference create mode 100644 tests/queries/0_stateless/03221_merge_profile_events.sql create mode 100644 tests/queries/0_stateless/03221_mutate_profile_events.reference create mode 100644 tests/queries/0_stateless/03221_mutate_profile_events.sql diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 857a08d8a5d..d43d9fdcea8 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -209,6 +209,8 @@ \ M(Merge, "Number of launched background merges.") \ M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \ + M(MergedColumns, "Number of columns merged during the horizontal stage of merges.") \ + M(GatheredColumns, "Number of columns gathered during the vertical stage of merges.") \ M(MergedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \ M(MergeTotalMilliseconds, "Total time spent for background merges") \ M(MergeExecuteMilliseconds, "Total busy time spent for execution of background merges") \ @@ -231,8 +233,11 @@ M(MutationUntouchedParts, "Number of total parts for which mutations tried to be applied but which was completely skipped according to predicate") \ M(MutatedRows, "Rows read for mutations. This is the number of rows before mutation") \ M(MutatedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for mutations. This is the number before mutation.") \ - M(MutationTimeMilliseconds, "Total time spent for mutations.") \ - M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections") \ + M(MutationTotalMilliseconds, "Total time spent for mutations.") \ + M(MutationExecuteMilliseconds, "Total busy time spent for execution of mutations.") \ + M(MutationAllPartColumns, "Number of times when task to mutate all columns in part was created") \ + M(MutationSomePartColumns, "Number of times when task to mutate some columns in part was created") \ + M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections in mutations.") \ \ M(MergeTreeDataWriterRows, "Number of rows INSERTed to MergeTree tables.") \ M(MergeTreeDataWriterUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables.") \ diff --git a/src/Processors/Transforms/ColumnGathererTransform.h b/src/Processors/Transforms/ColumnGathererTransform.h index a535b2669d0..fbc9a6bfcc6 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.h +++ b/src/Processors/Transforms/ColumnGathererTransform.h @@ -2,7 +2,6 @@ #include #include -#include "base/types.h" #include #include diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 26601207da8..ec7f567ea57 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -515,7 +515,7 @@ IMergingAlgorithm::MergedStats MergeJoinAlgorithm::getMergedStats() const { return { - .bytes = 0, + .bytes = stat.num_bytes[0] + stat.num_bytes[1], .rows = stat.num_rows[0] + stat.num_rows[1], .blocks = stat.num_blocks[0] + stat.num_blocks[1], }; @@ -557,6 +557,7 @@ void MergeJoinAlgorithm::consume(Input & input, size_t source_num) { stat.num_blocks[source_num] += 1; stat.num_rows[source_num] += input.chunk.getNumRows(); + stat.num_bytes[source_num] += input.chunk.allocatedBytes(); } prepareChunk(input.chunk); diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index 841a3f15a92..8f74974af0f 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -282,6 +282,7 @@ private: { size_t num_blocks[2] = {0, 0}; size_t num_rows[2] = {0, 0}; + size_t num_bytes[2] = {0, 0}; size_t max_blocks_loaded = 0; }; diff --git a/src/Processors/Transforms/PasteJoinTransform.cpp b/src/Processors/Transforms/PasteJoinTransform.cpp index ad01b721726..982a347a70f 100644 --- a/src/Processors/Transforms/PasteJoinTransform.cpp +++ b/src/Processors/Transforms/PasteJoinTransform.cpp @@ -62,7 +62,7 @@ IMergingAlgorithm::MergedStats PasteJoinAlgorithm::getMergedStats() const { return { - .bytes = 0, + .bytes = stat.num_bytes[0] + stat.num_bytes[1], .rows = stat.num_rows[0] + stat.num_rows[1], .blocks = stat.num_blocks[0] + stat.num_blocks[1], }; diff --git a/src/Processors/Transforms/PasteJoinTransform.h b/src/Processors/Transforms/PasteJoinTransform.h index fbe85f6993b..c184f20362d 100644 --- a/src/Processors/Transforms/PasteJoinTransform.h +++ b/src/Processors/Transforms/PasteJoinTransform.h @@ -54,6 +54,7 @@ private: { size_t num_blocks[2] = {0, 0}; size_t num_rows[2] = {0, 0}; + size_t num_bytes[2] = {0, 0}; size_t max_blocks_loaded = 0; }; diff --git a/src/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h index 3a96ba0abae..d40af6abf43 100644 --- a/src/Storages/MergeTree/MergeList.h +++ b/src/Storages/MergeTree/MergeList.h @@ -6,7 +6,6 @@ #include #include #include -#include "base/types.h" #include #include #include diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 5f178f08ec3..39bac8f7c24 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -5,9 +5,6 @@ #include #include -#include "Common/ElapsedTimeProfileEventIncrement.h" -#include "Common/Logger.h" -#include "Common/Stopwatch.h" #include #include #include @@ -46,6 +43,8 @@ namespace ProfileEvents { extern const Event Merge; + extern const Event MergedColumns; + extern const Event GatheredColumns; extern const Event MergeTotalMilliseconds; extern const Event MergeExecuteMilliseconds; extern const Event MergeHorizontalStageExecuteMilliseconds; @@ -183,6 +182,8 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::extractMergingAndGatheringColu bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() { + ProfileEvents::increment(ProfileEvents::Merge); + String local_tmp_prefix; if (global_ctx->need_prefix) { @@ -200,8 +201,6 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() if (isTTLMergeType(global_ctx->future_part->merge_type) && global_ctx->ttl_merges_blocker->isCancelled()) throw Exception(ErrorCodes::ABORTED, "Cancelled merging parts with TTL"); - ProfileEvents::increment(ProfileEvents::Merge); - LOG_DEBUG(ctx->log, "Merging {} parts: from {} to {} into {} with storage {}", global_ctx->future_part->parts.size(), global_ctx->future_part->parts.front()->name, @@ -810,6 +809,9 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c /// Print overall profiling info. NOTE: it may duplicates previous messages { + ProfileEvents::increment(ProfileEvents::MergedColumns, global_ctx->merging_columns.size()); + ProfileEvents::increment(ProfileEvents::GatheredColumns, global_ctx->gathering_columns.size()); + double elapsed_seconds = global_ctx->merge_list_element_ptr->watch.elapsedSeconds(); LOG_DEBUG(ctx->log, "Merge sorted {} rows, containing {} columns ({} merged, {} gathered) in {} sec., {} rows/sec., {}/sec.", @@ -1021,8 +1023,8 @@ bool MergeTask::execute() /// Stage is finished, need to initialize context for the next stage and update profile events. UInt64 current_elapsed_ms = global_ctx->merge_list_element_ptr->watch.elapsedMilliseconds(); - UInt64 stage_elapsed_ms = current_elapsed_ms - global_ctx->prev_elapesed_ms; - global_ctx->prev_elapesed_ms = current_elapsed_ms; + UInt64 stage_elapsed_ms = current_elapsed_ms - global_ctx->prev_elapsed_ms; + global_ctx->prev_elapsed_ms = current_elapsed_ms; ProfileEvents::increment(current_stage->getTotalTimeProfileEvent(), stage_elapsed_ms); ProfileEvents::increment(ProfileEvents::MergeTotalMilliseconds, stage_elapsed_ms); diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 979c85482e5..38ccc287187 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -203,7 +203,7 @@ private: bool need_prefix; scope_guard temporary_directory_lock; - UInt64 prev_elapesed_ms{0}; + UInt64 prev_elapsed_ms{0}; }; using GlobalRuntimeContextPtr = std::shared_ptr; diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 73084f487b9..56f68fd265a 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -254,6 +254,7 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit LOG_ERROR(log, "{}. Data after mutation is not byte-identical to data on another replicas. " "We will download merged part from replica to force byte-identical result.", getCurrentExceptionMessage(false)); + mutate_task->updateProfileEvents(); write_part_log(ExecutionStatus::fromCurrentException("", true)); if (storage.getSettings()->detach_not_byte_identical_parts) @@ -281,6 +282,7 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit */ finish_callback = [storage_ptr = &storage]() { storage_ptr->merge_selecting_task->schedule(); }; ProfileEvents::increment(ProfileEvents::ReplicatedPartMutations); + mutate_task->updateProfileEvents(); write_part_log({}); return true; diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 9aec074deae..10461eb5942 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -102,6 +102,7 @@ bool MutatePlainMergeTreeTask::executeStep() transaction.commit(); storage.updateMutationEntriesErrors(future_part, true, ""); + mutate_task->updateProfileEvents(); write_part_log({}); state = State::NEED_FINISH; @@ -114,6 +115,7 @@ bool MutatePlainMergeTreeTask::executeStep() PreformattedMessage exception_message = getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false); LOG_ERROR(getLogger("MutatePlainMergeTreeTask"), exception_message); storage.updateMutationEntriesErrors(future_part, false, exception_message.text); + mutate_task->updateProfileEvents(); write_part_log(ExecutionStatus::fromCurrentException("", true)); tryLogCurrentException(__PRETTY_FUNCTION__); return false; diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index fe78964a241..f4af38d3b67 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -40,7 +40,10 @@ namespace ProfileEvents { extern const Event MutationTotalParts; extern const Event MutationUntouchedParts; - extern const Event MutationTimeMilliseconds; + extern const Event MutationTotalMilliseconds; + extern const Event MutationExecuteMilliseconds; + extern const Event MutationAllPartColumns; + extern const Event MutationSomePartColumns; extern const Event MutateTaskProjectionsCalculationMicroseconds; } @@ -1049,6 +1052,7 @@ struct MutationContext /// Whether we need to count lightweight delete rows in this mutation bool count_lightweight_deleted_rows; + UInt64 execute_elapsed_ns = 0; }; using MutationContextPtr = std::shared_ptr; @@ -2020,6 +2024,9 @@ MutateTask::MutateTask( bool MutateTask::execute() { + Stopwatch watch; + SCOPE_EXIT({ ctx->execute_elapsed_ns += watch.elapsedNanoseconds(); }); + switch (state) { case State::NEED_PREPARE: @@ -2037,9 +2044,6 @@ bool MutateTask::execute() if (task->executeStep()) return true; - auto total_elapsed_ms = (*ctx->mutate_entry)->watch.elapsedMilliseconds(); - ProfileEvents::increment(ProfileEvents::MutationTimeMilliseconds, total_elapsed_ms); - // The `new_data_part` is a shared pointer and must be moved to allow // part deletion in case it is needed in `MutateFromLogEntryTask::finalize`. // @@ -2056,6 +2060,15 @@ bool MutateTask::execute() return false; } +void MutateTask::updateProfileEvents() const +{ + UInt64 total_elapsed_ms = (*ctx->mutate_entry)->watch.elapsedMilliseconds(); + UInt64 execute_elapsed_ms = ctx->execute_elapsed_ns / 1000000UL; + + ProfileEvents::increment(ProfileEvents::MutationTotalMilliseconds, total_elapsed_ms); + ProfileEvents::increment(ProfileEvents::MutationExecuteMilliseconds, execute_elapsed_ms); +} + static bool canSkipConversionToNullable(const MergeTreeDataPartPtr & part, const MutationCommand & command) { if (command.type != MutationCommand::READ_COLUMN) @@ -2118,13 +2131,13 @@ static bool canSkipMutationCommandForPart(const MergeTreeDataPartPtr & part, con bool MutateTask::prepare() { + ProfileEvents::increment(ProfileEvents::MutationTotalParts); MutationHelpers::checkOperationIsNotCanceled(*ctx->merges_blocker, ctx->mutate_entry); if (ctx->future_part->parts.size() != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to mutate {} parts, not one. " "This is a bug.", ctx->future_part->parts.size()); - ProfileEvents::increment(ProfileEvents::MutationTotalParts); ctx->num_mutations = std::make_unique(CurrentMetrics::PartMutation); auto context_for_reading = Context::createCopy(ctx->context); @@ -2291,6 +2304,7 @@ bool MutateTask::prepare() ctx->new_data_part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::REMOVE_BLOBS; task = std::make_unique(ctx); + ProfileEvents::increment(ProfileEvents::MutationAllPartColumns); } else /// TODO: check that we modify only non-key columns in this case. { @@ -2330,6 +2344,7 @@ bool MutateTask::prepare() ctx->new_data_part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::ASK_KEEPER; task = std::make_unique(ctx); + ProfileEvents::increment(ProfileEvents::MutationSomePartColumns); } return true; diff --git a/src/Storages/MergeTree/MutateTask.h b/src/Storages/MergeTree/MutateTask.h index dc22b90f0e9..08427bff6d8 100644 --- a/src/Storages/MergeTree/MutateTask.h +++ b/src/Storages/MergeTree/MutateTask.h @@ -39,6 +39,7 @@ public: bool need_prefix_); bool execute(); + void updateProfileEvents() const; std::future getFuture() { diff --git a/tests/queries/0_stateless/02378_part_log_profile_events.sql b/tests/queries/0_stateless/02378_part_log_profile_events.sql index 38d3f8b4c05..eec76d6f50e 100644 --- a/tests/queries/0_stateless/02378_part_log_profile_events.sql +++ b/tests/queries/0_stateless/02378_part_log_profile_events.sql @@ -39,7 +39,7 @@ SYSTEM FLUSH LOGS; SELECT if(count() == 2, 'Ok', 'Error: ' || toString(count())), - if(SUM(ProfileEvents['MergedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergedRows']))), + if(SUM(ProfileEvents['MutatedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MutatedRows']))), if(SUM(ProfileEvents['FileOpen']) > 1, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['FileOpen']))) FROM system.part_log WHERE event_time > now() - INTERVAL 10 MINUTE diff --git a/tests/queries/0_stateless/03221_merge_profile_events.reference b/tests/queries/0_stateless/03221_merge_profile_events.reference new file mode 100644 index 00000000000..729e53eae79 --- /dev/null +++ b/tests/queries/0_stateless/03221_merge_profile_events.reference @@ -0,0 +1,3 @@ +Horizontal 1 20000 3 0 480000 1 1 1 1 +Vertical 1 20000 1 2 480000 1 1 1 1 1 1 +Vertical 2 20020 4 2 480660 1 1 1 1 1 1 1 1 diff --git a/tests/queries/0_stateless/03221_merge_profile_events.sql b/tests/queries/0_stateless/03221_merge_profile_events.sql new file mode 100644 index 00000000000..787aff93ffc --- /dev/null +++ b/tests/queries/0_stateless/03221_merge_profile_events.sql @@ -0,0 +1,88 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_merge_profile_events_1; + +CREATE TABLE t_merge_profile_events_1 (id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_merge_profile_events_1 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_1 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_1 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0 +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_1' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_1; + +DROP TABLE IF EXISTS t_merge_profile_events_2; + +CREATE TABLE t_merge_profile_events_2 (id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_merge_profile_events_2 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_2 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_2 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_2' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_2; + +DROP TABLE IF EXISTS t_merge_profile_events_3; + +CREATE TABLE t_merge_profile_events_3 (id UInt64, v1 UInt64, v2 UInt64, PROJECTION p (SELECT sum(v1), sum(v2) GROUP BY id % 10)) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_3 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeProjectionStageTotalMilliseconds'] > 0, + ProfileEvents['MergeProjectionStageExecuteMilliseconds'] > 0, +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_3' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_3; diff --git a/tests/queries/0_stateless/03221_mutate_profile_events.reference b/tests/queries/0_stateless/03221_mutate_profile_events.reference new file mode 100644 index 00000000000..d094c37ff88 --- /dev/null +++ b/tests/queries/0_stateless/03221_mutate_profile_events.reference @@ -0,0 +1,2 @@ +3 2 1 10000 160000 0 1 1 1 +4 2 1 10000 320000 1 0 1 1 diff --git a/tests/queries/0_stateless/03221_mutate_profile_events.sql b/tests/queries/0_stateless/03221_mutate_profile_events.sql new file mode 100644 index 00000000000..e9f7f9670bd --- /dev/null +++ b/tests/queries/0_stateless/03221_mutate_profile_events.sql @@ -0,0 +1,33 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_mutate_profile_events; + +CREATE TABLE t_mutate_profile_events (key UInt64, id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id PARTITION BY key +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_mutate_profile_events SELECT 1, number, number, number FROM numbers(10000); +INSERT INTO t_mutate_profile_events SELECT 2, number, number, number FROM numbers(10000); + +SET mutations_sync = 2; + +ALTER TABLE t_mutate_profile_events UPDATE v1 = 1000 WHERE key = 1; +ALTER TABLE t_mutate_profile_events DELETE WHERE key = 2 AND v2 % 10 = 0; + +SYSTEM FLUSH LOGS; + +SELECT + splitByChar('_', part_name)[-1] AS version, + sum(ProfileEvents['MutationTotalParts']), + sum(ProfileEvents['MutationUntouchedParts']), + sum(ProfileEvents['MutatedRows']), + sum(ProfileEvents['MutatedUncompressedBytes']), + sum(ProfileEvents['MutationAllPartColumns']), + sum(ProfileEvents['MutationSomePartColumns']), + sum(ProfileEvents['MutationTotalMilliseconds']) > 0, + sum(ProfileEvents['MutationExecuteMilliseconds']) > 0, +FROM system.part_log +WHERE database = currentDatabase() AND table = 't_mutate_profile_events' AND event_type = 'MutatePart' +GROUP BY version ORDER BY version; + +DROP TABLE IF EXISTS t_mutate_profile_events From 34bbf3d9c49f0f563eee8973ac3c3bae0e64c6de Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 8 Aug 2024 17:12:27 +0200 Subject: [PATCH 1941/2361] Use async inserts --- docker/test/stateless/run.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 830a02a64a3..c81f33ace01 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -192,8 +192,9 @@ ENGINE = MergeTree ORDER BY tuple()" # create minio log webhooks for both audit and server logs -./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" -./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" +# use async inserts to avoid creating too many parts +./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" +./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" max_retries=100 retry=1 @@ -375,6 +376,12 @@ done # collect minio audit and server logs +has_async_inserts=$(clickhouse-client -q "SELECT count() FROM system.asynchronous_inserts WHERE table = 'minio_audit_logs' OR table = 'minio_server_logs'") +if [[ has_async_inserts -eq 1 ]]; then + echo "Waiting for async inserts to flush" + sleep 5 +fi + clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" From 33ba78ee42bc85690dce69c82fd51d723a6d2eab Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 8 Aug 2024 17:47:12 +0200 Subject: [PATCH 1942/2361] Update test.py --- tests/integration/test_drop_is_lock_free/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_drop_is_lock_free/test.py b/tests/integration/test_drop_is_lock_free/test.py index 1bb8767a9a0..3855bc21f90 100644 --- a/tests/integration/test_drop_is_lock_free/test.py +++ b/tests/integration/test_drop_is_lock_free/test.py @@ -176,7 +176,7 @@ def test_query_is_permanent(transaction, permanent, exclusive_table): select_handler = node.get_query_request( f""" - SELECT sleepEachRow(3) FROM {exclusive_table} SETTINGS function_sleep_max_microseconds_per_block = 0; + SELECT sleepEachRow(3) FROM {exclusive_table} SETTINGS function_sleep_max_microseconds_per_block = 0, max_threads=1; """, query_id=query_id, ) From e264ecd2011c7860c4898208a53555d676222bbb Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 8 Aug 2024 16:10:46 +0000 Subject: [PATCH 1943/2361] fix skip of parts in mutation with analyzer --- src/Interpreters/MutationsInterpreter.cpp | 11 +++++----- src/Interpreters/MutationsInterpreter.h | 1 - src/Storages/MergeTree/MutateTask.cpp | 2 +- ...3221_mutation_analyzer_skip_part.reference | 4 ++++ .../03221_mutation_analyzer_skip_part.sql | 21 +++++++++++++++++++ 5 files changed, 31 insertions(+), 8 deletions(-) create mode 100644 tests/queries/0_stateless/03221_mutation_analyzer_skip_part.reference create mode 100644 tests/queries/0_stateless/03221_mutation_analyzer_skip_part.sql diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 57ad5caa4c7..5de33b3ed86 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -146,7 +146,6 @@ ColumnDependencies getAllColumnDependencies( bool isStorageTouchedByMutations( - MergeTreeData & storage, MergeTreeData::DataPartPtr source_part, const StorageMetadataPtr & metadata_snapshot, const std::vector & commands, @@ -155,7 +154,9 @@ bool isStorageTouchedByMutations( if (commands.empty()) return false; + auto storage_from_part = std::make_shared(source_part); bool all_commands_can_be_skipped = true; + for (const auto & command : commands) { if (command.type == MutationCommand::APPLY_DELETED_MASK) @@ -170,7 +171,7 @@ bool isStorageTouchedByMutations( if (command.partition) { - const String partition_id = storage.getPartitionIDFromQuery(command.partition, context); + const String partition_id = storage_from_part->getPartitionIDFromQuery(command.partition, context); if (partition_id == source_part->info.partition_id) all_commands_can_be_skipped = false; } @@ -184,20 +185,18 @@ bool isStorageTouchedByMutations( if (all_commands_can_be_skipped) return false; - auto storage_from_part = std::make_shared(source_part); - std::optional interpreter_select_query; BlockIO io; if (context->getSettingsRef().allow_experimental_analyzer) { - auto select_query_tree = prepareQueryAffectedQueryTree(commands, storage.shared_from_this(), context); + auto select_query_tree = prepareQueryAffectedQueryTree(commands, storage_from_part, context); InterpreterSelectQueryAnalyzer interpreter(select_query_tree, context, SelectQueryOptions().ignoreLimits()); io = interpreter.execute(); } else { - ASTPtr select_query = prepareQueryAffectedAST(commands, storage.shared_from_this(), context); + ASTPtr select_query = prepareQueryAffectedAST(commands, storage_from_part, context); /// Interpreter must be alive, when we use result of execute() method. /// For some reason it may copy context and give it into ExpressionTransform /// after that we will use context from destroyed stack frame in our stream. diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index 6aaa233cda3..57863e9ae73 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -19,7 +19,6 @@ using QueryPipelineBuilderPtr = std::unique_ptr; /// Return false if the data isn't going to be changed by mutations. bool isStorageTouchedByMutations( - MergeTreeData & storage, MergeTreeData::DataPartPtr source_part, const StorageMetadataPtr & metadata_snapshot, const std::vector & commands, diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 9a775db73e2..5fcf699de59 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -2134,7 +2134,7 @@ bool MutateTask::prepare() ctx->commands_for_part.emplace_back(command); if (ctx->source_part->isStoredOnDisk() && !isStorageTouchedByMutations( - *ctx->data, ctx->source_part, ctx->metadata_snapshot, ctx->commands_for_part, context_for_reading)) + ctx->source_part, ctx->metadata_snapshot, ctx->commands_for_part, context_for_reading)) { NameSet files_to_copy_instead_of_hardlinks; auto settings_ptr = ctx->data->getSettings(); diff --git a/tests/queries/0_stateless/03221_mutation_analyzer_skip_part.reference b/tests/queries/0_stateless/03221_mutation_analyzer_skip_part.reference new file mode 100644 index 00000000000..68f8708dbaf --- /dev/null +++ b/tests/queries/0_stateless/03221_mutation_analyzer_skip_part.reference @@ -0,0 +1,4 @@ +1_1_1_0_3 10000 +1_1_1_0_4 0 +2_2_2_0_3 0 +2_2_2_0_4 10000 diff --git a/tests/queries/0_stateless/03221_mutation_analyzer_skip_part.sql b/tests/queries/0_stateless/03221_mutation_analyzer_skip_part.sql new file mode 100644 index 00000000000..bf9a10e2af4 --- /dev/null +++ b/tests/queries/0_stateless/03221_mutation_analyzer_skip_part.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t_mutate_skip_part; + +CREATE TABLE t_mutate_skip_part (key UInt64, id UInt64, v1 UInt64, v2 UInt64) ENGINE = MergeTree ORDER BY id PARTITION BY key; + +INSERT INTO t_mutate_skip_part SELECT 1, number, number, number FROM numbers(10000); +INSERT INTO t_mutate_skip_part SELECT 2, number, number, number FROM numbers(10000); + +SET mutations_sync = 2; + +ALTER TABLE t_mutate_skip_part UPDATE v1 = 1000 WHERE key = 1; +ALTER TABLE t_mutate_skip_part DELETE WHERE key = 2 AND v2 % 10 = 0; + +SYSTEM FLUSH LOGS; + +-- If part is skipped in mutation and hardlinked then read_rows must be 0. +SELECT part_name, read_rows +FROM system.part_log +WHERE database = currentDatabase() AND table = 't_mutate_skip_part' AND event_type = 'MutatePart' +ORDER BY part_name; + +DROP TABLE IF EXISTS t_mutate_skip_part; From f9f13a8e415e9f2130281d069c28dd6e9a68be75 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 8 Aug 2024 16:27:25 +0000 Subject: [PATCH 1944/2361] enable setting optimize_functions_to_subcolumns by default --- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index f04c696645a..f0a8d0c2647 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -605,7 +605,7 @@ class IColumn; M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \ M(Bool, optimize_multiif_to_if, true, "Replace 'multiIf' with only one condition to 'if'.", 0) \ M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \ - M(Bool, optimize_functions_to_subcolumns, false, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \ + M(Bool, optimize_functions_to_subcolumns, true, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \ M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \ M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \ M(Bool, optimize_append_index, false, "Use constraints in order to append index condition (indexHint)", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 41e4ac2e154..b00d0964e01 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -83,6 +83,7 @@ static std::initializer_list Date: Thu, 8 Aug 2024 18:31:24 +0200 Subject: [PATCH 1945/2361] Fix --- ...ckhouse_local_interactive_exception.expect | 29 ----- ...ckhouse_local_interactive_exception.python | 110 ++++++++++++++++++ ...ouse_local_interactive_exception.reference | 1 + 3 files changed, 111 insertions(+), 29 deletions(-) delete mode 100755 tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.expect create mode 100644 tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python diff --git a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.expect b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.expect deleted file mode 100755 index add977c4fce..00000000000 --- a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.expect +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/expect -f - -set basedir [file dirname $argv0] -set basename [file tail $argv0] -if {[info exists env(CLICKHOUSE_TMP)]} { - set CLICKHOUSE_TMP $env(CLICKHOUSE_TMP) -} else { - set CLICKHOUSE_TMP "." -} -exp_internal -f $CLICKHOUSE_TMP/$basename.debuglog 0 - -log_user 0 -set timeout 20 -match_max 100000 - -expect_after { - -i $any_spawn_id eof { exp_continue } - -i $any_spawn_id timeout { exit 1 } -} - -spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_LOCAL --disable_suggestion" - -expect ":) " -send -- "insert into table function null() format TSV some trash here 123 \n 456\r" -expect "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -expect ":) " - -send -- "" -expect eof diff --git a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python new file mode 100644 index 00000000000..03f8d493ec2 --- /dev/null +++ b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python @@ -0,0 +1,110 @@ +import pty +import os +import shlex +import time +import multiprocessing + +COMPLETION_TIMEOUT_SECONDS = 30 +DEBUG_LOG = os.path.join( + os.environ["CLICKHOUSE_TMP"], + os.path.basename(os.path.abspath(__file__)).strip(".python") + ".debuglog", +) + +STATE_MAP = { + -1: "process did not start", + 0: "all good", + 1: "process started and said ':)'", + 2: "prompt search was started", + 3: "prompt is missing", +} + + +def run_with_timeout(func, args, timeout): + for _ in range(5): + state = multiprocessing.Value("i", -1) + process = multiprocessing.Process( + target=func, args=args, kwargs={"state": state} + ) + process.start() + process.join(timeout) + + if state.value in (0, 3): + return + + if process.is_alive(): + process.terminate() + + if state.value == -1: + continue + + print(f"Timeout, state: {STATE_MAP[state.value]}") + return + + +def test_completion(program, argv, prompt, state=None): + shell_pid, master = pty.fork() + if shell_pid == 0: + os.execv(program, argv) + else: + try: + debug_log_fd = open(DEBUG_LOG, "a") + + output_b = os.read(master, 4096) + output = output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + while not ":)" in output: + output_b = os.read(master, 4096) + output += output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + + state.value = 1 + + os.write(master, bytes(prompt.encode())) + output_b = os.read(master, 4096) + output = output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + while not prompt[:-10] in output: + output_b = os.read(master, 4096) + output += output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + + time.sleep(0.01) + os.write(master, b"\r") + + state.value = 2 + + output_b = os.read(master, 4096) + output = output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + while not "CANNOT_PARSE_INPUT_ASSERTION_FAILED" in output: + output_b = os.read(master, 4096) + output += output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + + while not ":)" in output: + output_b = os.read(master, 4096) + output += output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + + print("OK") + state.value = 0 + finally: + os.close(master) + debug_log_fd.close() + + +if __name__ == "__main__": + clickhouse_local = os.environ["CLICKHOUSE_LOCAL"] + args = shlex.split(clickhouse_local) + args.append("--disable_suggestion") + args.append("--highlight=0") + run_with_timeout( + test_completion, [args[0], args, "insert into table function null() format TSV some trash here 123 \n 456"], COMPLETION_TIMEOUT_SECONDS + ) diff --git a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.reference b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.reference index e69de29bb2d..d86bac9de59 100644 --- a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.reference +++ b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.reference @@ -0,0 +1 @@ +OK From d52d599af4db1c779551b6788a5505e521fe3c31 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 18:34:02 +0200 Subject: [PATCH 1946/2361] Annotations --- ...ter_skip_virtual_columns_with_non_deterministic_functions.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql index 6ef8c5a8656..6714a069246 100644 --- a/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql +++ b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql @@ -1,3 +1,4 @@ +-- Tags: long SET max_rows_to_read = 0; create table test (number UInt64) engine=MergeTree order by number; insert into test select * from numbers(50000000); From 34d2c71eadfefd79e3cfb62d46d156cdbdaab681 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Thu, 8 Aug 2024 18:50:16 +0200 Subject: [PATCH 1947/2361] Cleanup --- ...ckhouse_local_interactive_exception.python | 44 +++++++------------ 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python index 03f8d493ec2..9527991c36e 100644 --- a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python +++ b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python @@ -40,6 +40,18 @@ def run_with_timeout(func, args, timeout): print(f"Timeout, state: {STATE_MAP[state.value]}") return +def expect(text, master, debug_log_fd): + output_b = os.read(master, 4096) + output = output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + while not text in output: + output_b = os.read(master, 4096) + output += output_b.decode() + debug_log_fd.write(repr(output_b) + "\n") + debug_log_fd.flush() + + return output def test_completion(program, argv, prompt, state=None): shell_pid, master = pty.fork() @@ -49,43 +61,17 @@ def test_completion(program, argv, prompt, state=None): try: debug_log_fd = open(DEBUG_LOG, "a") - output_b = os.read(master, 4096) - output = output_b.decode() - debug_log_fd.write(repr(output_b) + "\n") - debug_log_fd.flush() - while not ":)" in output: - output_b = os.read(master, 4096) - output += output_b.decode() - debug_log_fd.write(repr(output_b) + "\n") - debug_log_fd.flush() + expect(":)", master, debug_log_fd) state.value = 1 - os.write(master, bytes(prompt.encode())) - output_b = os.read(master, 4096) - output = output_b.decode() - debug_log_fd.write(repr(output_b) + "\n") - debug_log_fd.flush() - while not prompt[:-10] in output: - output_b = os.read(master, 4096) - output += output_b.decode() - debug_log_fd.write(repr(output_b) + "\n") - debug_log_fd.flush() + expect(prompt[:-10], master, debug_log_fd) time.sleep(0.01) os.write(master, b"\r") - state.value = 2 - output_b = os.read(master, 4096) - output = output_b.decode() - debug_log_fd.write(repr(output_b) + "\n") - debug_log_fd.flush() - while not "CANNOT_PARSE_INPUT_ASSERTION_FAILED" in output: - output_b = os.read(master, 4096) - output += output_b.decode() - debug_log_fd.write(repr(output_b) + "\n") - debug_log_fd.flush() + output = expect("CANNOT_PARSE_INPUT_ASSERTION_FAILED", master, debug_log_fd) while not ":)" in output: output_b = os.read(master, 4096) From cd69fa5a4c6f7128c473c780c797d60dc314b2ee Mon Sep 17 00:00:00 2001 From: Jacob Reckhard Date: Thu, 8 Aug 2024 11:20:55 -0600 Subject: [PATCH 1948/2361] fixed typos --- docs/en/sql-reference/functions/geo/polygon.md | 2 +- src/Functions/geometryConverters.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/functions/geo/polygon.md b/docs/en/sql-reference/functions/geo/polygon.md index c054e05d39c..be9e9810626 100644 --- a/docs/en/sql-reference/functions/geo/polygon.md +++ b/docs/en/sql-reference/functions/geo/polygon.md @@ -197,7 +197,7 @@ The function returns a ClickHouse internal representation of the multilinestring ### Example ```sql -SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6))')); +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6))'); ``` ```response diff --git a/src/Functions/geometryConverters.h b/src/Functions/geometryConverters.h index bf975017a6d..f1156d81f01 100644 --- a/src/Functions/geometryConverters.h +++ b/src/Functions/geometryConverters.h @@ -471,7 +471,7 @@ static void callOnGeometryDataType(DataTypePtr type, F && f) return f(ConverterType>()); /// We should take the name into consideration to avoid ambiguity. - /// Because for example both MultiLineString and Polygon are resolved to Array(Tuple(Point)). + /// Because for example both MultiLineString and Polygon are resolved to Array(Array(Point)). else if (factory.get("MultiLineString")->equals(*type) && type->getCustomName() && type->getCustomName()->getName() == "MultiLineString") return f(ConverterType>()); From e55d81fd427af0a62e3534d838d64eb847419248 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Thu, 8 Aug 2024 19:38:48 +0200 Subject: [PATCH 1949/2361] fix test --- .../0_stateless/02808_custom_disk_with_user_defined_name.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh b/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh index b62adea5683..63fa60bd548 100755 --- a/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh +++ b/tests/queries/0_stateless/02808_custom_disk_with_user_defined_name.sh @@ -13,7 +13,7 @@ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS disk = disk(name = 's3_disk', type = cache, max_size = '100Ki', path = ${CLICKHOUSE_TEST_UNIQUE_NAME}, disk = s3_disk); -""" 2>&1 | grep -q "The disk \`s3_disk\` is already exist and described by the config" && echo 'OK' || echo 'FAIL' +""" 2>&1 | grep -q "Disk \`s3_disk\` already exists and is described by the config" && echo 'OK' || echo 'FAIL' disk_name="${CLICKHOUSE_TEST_UNIQUE_NAME}" From 2bd7606280b0714febc1d034233ee0fb9a3532a6 Mon Sep 17 00:00:00 2001 From: Jacob Reckhard Date: Thu, 8 Aug 2024 11:39:06 -0600 Subject: [PATCH 1950/2361] added additional tests --- .../03215_multilinestring_geometry.reference | 10 ++++++++++ .../0_stateless/03215_multilinestring_geometry.sql | 12 ++++++++++++ 2 files changed, 22 insertions(+) diff --git a/tests/queries/0_stateless/03215_multilinestring_geometry.reference b/tests/queries/0_stateless/03215_multilinestring_geometry.reference index f4c5774018e..f20e21d86f6 100644 --- a/tests/queries/0_stateless/03215_multilinestring_geometry.reference +++ b/tests/queries/0_stateless/03215_multilinestring_geometry.reference @@ -15,3 +15,13 @@ MULTILINESTRING((1 1,2 2,3 3,1 1),(1 0,2 0,3 0)) WITH wkt(CAST([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'Array(Array(Tuple(Float64, Float64)))')) as x SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); POLYGON((1 1,2 2,3 3,1 1)) String [[(1,1),(2,2),(3,3),(1,1)]] Polygon +-- Non constant tests + +CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String) Engine = Memory; +INSERT INTO t (shape, wkt_string) VALUES ([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +INSERT INTO t (shape, wkt_string) VALUES ([[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +-- but reading MultiLineString should still return an Array(Array(Tuple(Float64, Float64))) +select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t; +POLYGON((1 1,2 2,3 3,1 1)) [[(1,1),(2,2),(3,3),(1,1)]] 1 +POLYGON((1 1,2 2,3 3,1 1),(1 0,2 0,3 0,1 0)) [[(1,1),(2,2),(3,3),(1,1)],[(1,0),(2,0),(3,0)]] 1 diff --git a/tests/queries/0_stateless/03215_multilinestring_geometry.sql b/tests/queries/0_stateless/03215_multilinestring_geometry.sql index 71344920c52..6081e081fb3 100644 --- a/tests/queries/0_stateless/03215_multilinestring_geometry.sql +++ b/tests/queries/0_stateless/03215_multilinestring_geometry.sql @@ -10,3 +10,15 @@ SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, -- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. WITH wkt(CAST([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'Array(Array(Tuple(Float64, Float64)))')) as x SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); + +-- Non constant tests + +CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String) Engine = Memory; +INSERT INTO t (shape, wkt_string) VALUES ([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +INSERT INTO t (shape, wkt_string) VALUES ([[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); + +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +-- but reading MultiLineString should still return an Array(Array(Tuple(Float64, Float64))) +select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t; + + From b0b150c599052d596b8c746a31ba50019601b6a0 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 18:29:20 +0000 Subject: [PATCH 1951/2361] Remove workarounds for solved bugs --- .../02864_statistics_predicates.reference | 6 ++++++ .../0_stateless/02864_statistics_predicates.sql | 12 ++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/02864_statistics_predicates.reference b/tests/queries/0_stateless/02864_statistics_predicates.reference index 1c2abd47aaf..ffbd7269e05 100644 --- a/tests/queries/0_stateless/02864_statistics_predicates.reference +++ b/tests/queries/0_stateless/02864_statistics_predicates.reference @@ -23,6 +23,7 @@ u64 and < 70 70 70 +70 f64 and = 10 10 @@ -35,6 +36,8 @@ f64 and = 10 10 10 +10 +0 0 0 0 @@ -50,6 +53,8 @@ f64 and < 70 70 70 +70 +80 80 80 80 @@ -66,6 +71,7 @@ dt and < 10000 10000 10000 +10000 70 70 70 diff --git a/tests/queries/0_stateless/02864_statistics_predicates.sql b/tests/queries/0_stateless/02864_statistics_predicates.sql index 3e754dfb1de..779116cf19a 100644 --- a/tests/queries/0_stateless/02864_statistics_predicates.sql +++ b/tests/queries/0_stateless/02864_statistics_predicates.sql @@ -92,7 +92,7 @@ SELECT count(*) FROM tab WHERE u64_count_min < 7.7; SELECT count(*) FROM tab WHERE u64_uniq < 7.7; SELECT count(*) FROM tab WHERE u64 < '7'; --- SELECT count(*) FROM tab WHERE u64_tdigest < '7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE u64_tdigest < '7'; SELECT count(*) FROM tab WHERE u64_count_min < '7'; SELECT count(*) FROM tab WHERE u64_uniq < '7'; @@ -116,12 +116,12 @@ SELECT count(*) FROM tab WHERE f64_count_min = 7.7; SELECT count(*) FROM tab WHERE f64_uniq = 7.7; SELECT count(*) FROM tab WHERE f64 = '7'; --- SELECT count(*) FROM tab WHERE f64_tdigest = '7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE f64_tdigest = '7'; SELECT count(*) FROM tab WHERE f64_count_min = '7'; SELECT count(*) FROM tab WHERE f64_uniq = '7'; SELECT count(*) FROM tab WHERE f64 = '7.7'; --- SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; SELECT count(*) FROM tab WHERE f64_count_min = '7.7'; SELECT count(*) FROM tab WHERE f64_uniq = '7.7'; @@ -138,12 +138,12 @@ SELECT count(*) FROM tab WHERE f64_count_min < 7.7; SELECT count(*) FROM tab WHERE f64_uniq < 7.7; SELECT count(*) FROM tab WHERE f64 < '7'; --- SELECT count(*) FROM tab WHERE f64_tdigest < '7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE f64_tdigest < '7'; SELECT count(*) FROM tab WHERE f64_count_min < '7'; SELECT count(*) FROM tab WHERE f64_uniq < '7'; SELECT count(*) FROM tab WHERE f64 < '7.7'; --- SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; SELECT count(*) FROM tab WHERE f64_count_min < '7.7'; SELECT count(*) FROM tab WHERE f64_uniq < '7.7'; @@ -164,7 +164,7 @@ SELECT count(*) FROM tab WHERE dt_uniq = 7; SELECT 'dt and <'; SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13'; --- SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; -- BOOM (#67742) +SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13'; From 3062f221eb537a49f61bdc4ee058e011ed4d2246 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 8 Aug 2024 18:37:31 +0000 Subject: [PATCH 1952/2361] Automatic style fix --- ...02164_clickhouse_local_interactive_exception.python | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python index 9527991c36e..5ca7ac4e286 100644 --- a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python +++ b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python @@ -40,6 +40,7 @@ def run_with_timeout(func, args, timeout): print(f"Timeout, state: {STATE_MAP[state.value]}") return + def expect(text, master, debug_log_fd): output_b = os.read(master, 4096) output = output_b.decode() @@ -53,6 +54,7 @@ def expect(text, master, debug_log_fd): return output + def test_completion(program, argv, prompt, state=None): shell_pid, master = pty.fork() if shell_pid == 0: @@ -92,5 +94,11 @@ if __name__ == "__main__": args.append("--disable_suggestion") args.append("--highlight=0") run_with_timeout( - test_completion, [args[0], args, "insert into table function null() format TSV some trash here 123 \n 456"], COMPLETION_TIMEOUT_SECONDS + test_completion, + [ + args[0], + args, + "insert into table function null() format TSV some trash here 123 \n 456", + ], + COMPLETION_TIMEOUT_SECONDS, ) From 343accc92d86748b975f845e1154837d585dcd54 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 20:41:11 +0200 Subject: [PATCH 1953/2361] Disable randomization of `trace_profile_events` in clickhouse-test --- tests/clickhouse-test | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index ffdd6169777..907d773337a 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -836,7 +836,6 @@ class SettingsRandomizer: "cross_join_min_bytes_to_compress": lambda: random.choice([0, 1, 100000000]), "min_external_table_block_size_bytes": lambda: random.choice([0, 1, 100000000]), "max_parsing_threads": lambda: random.choice([0, 1, 10]), - "trace_profile_events": lambda: random.randint(0, 1), "optimize_functions_to_subcolumns": lambda: random.randint(0, 1), } From fe234bd88f3eb1dca8cdb8b217606abfbcea1d54 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 20:56:51 +0200 Subject: [PATCH 1954/2361] Fix test --- tests/queries/0_stateless/03218_materialize_msan.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03218_materialize_msan.sql b/tests/queries/0_stateless/03218_materialize_msan.sql index b41300ea1e3..7e7043e687b 100644 --- a/tests/queries/0_stateless/03218_materialize_msan.sql +++ b/tests/queries/0_stateless/03218_materialize_msan.sql @@ -1,3 +1,5 @@ +SET enable_analyzer = 1; + SELECT materialize([(NULL, '11\01111111\011111', '1111')]) AS t, (t[1048576]).2, From 91ff9f40a2cea46d2ddf14628b16a0ddf923a3cc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 21:04:10 +0200 Subject: [PATCH 1955/2361] Misc --- tests/queries/0_stateless/01603_read_with_backoff_bug.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql index 278817b1d48..8a6fa9b7845 100644 --- a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql +++ b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql @@ -4,6 +4,7 @@ set enable_filesystem_cache=0; set enable_filesystem_cache_on_write_operations=0; set max_rows_to_read = '30M'; + drop table if exists t; create table t (x UInt64, s String) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; From bc20b637eae8bba72ecfab5256c7eace40586976 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 8 Aug 2024 21:47:23 +0200 Subject: [PATCH 1956/2361] Add missing file --- .../test_broken_projections/config.d/dont_start_broken.xml | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 tests/integration/test_broken_projections/config.d/dont_start_broken.xml diff --git a/tests/integration/test_broken_projections/config.d/dont_start_broken.xml b/tests/integration/test_broken_projections/config.d/dont_start_broken.xml new file mode 100644 index 00000000000..9603cdc7e3e --- /dev/null +++ b/tests/integration/test_broken_projections/config.d/dont_start_broken.xml @@ -0,0 +1,6 @@ + + + + 0 + + From 6e3df43ae3c13c037fa8e88ae7d102f94bb7398a Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Thu, 8 Aug 2024 22:09:58 +0200 Subject: [PATCH 1957/2361] Cleanup --- ...64_clickhouse_local_interactive_exception.python | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python index 5ca7ac4e286..4c2df9556a1 100644 --- a/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python +++ b/tests/queries/0_stateless/02164_clickhouse_local_interactive_exception.python @@ -41,11 +41,7 @@ def run_with_timeout(func, args, timeout): return -def expect(text, master, debug_log_fd): - output_b = os.read(master, 4096) - output = output_b.decode() - debug_log_fd.write(repr(output_b) + "\n") - debug_log_fd.flush() +def expect(text, master, debug_log_fd, output=""): while not text in output: output_b = os.read(master, 4096) output += output_b.decode() @@ -74,12 +70,7 @@ def test_completion(program, argv, prompt, state=None): state.value = 2 output = expect("CANNOT_PARSE_INPUT_ASSERTION_FAILED", master, debug_log_fd) - - while not ":)" in output: - output_b = os.read(master, 4096) - output += output_b.decode() - debug_log_fd.write(repr(output_b) + "\n") - debug_log_fd.flush() + expect(":)", master, debug_log_fd, output) print("OK") state.value = 0 From 6f346c161463330c31d0bce2566df6875cf1d614 Mon Sep 17 00:00:00 2001 From: Jacob Reckhard Date: Thu, 8 Aug 2024 15:20:07 -0600 Subject: [PATCH 1958/2361] added order for repeatablility of test --- .../03215_multilinestring_geometry.reference | 11 +++++++---- .../0_stateless/03215_multilinestring_geometry.sql | 10 ++++++---- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/03215_multilinestring_geometry.reference b/tests/queries/0_stateless/03215_multilinestring_geometry.reference index f20e21d86f6..9702dd6d6f8 100644 --- a/tests/queries/0_stateless/03215_multilinestring_geometry.reference +++ b/tests/queries/0_stateless/03215_multilinestring_geometry.reference @@ -17,11 +17,14 @@ SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); POLYGON((1 1,2 2,3 3,1 1)) String [[(1,1),(2,2),(3,3),(1,1)]] Polygon -- Non constant tests -CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String) Engine = Memory; -INSERT INTO t (shape, wkt_string) VALUES ([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); -INSERT INTO t (shape, wkt_string) VALUES ([[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +DROP TABLE IF EXISTS t; +CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String, ord Float64) Engine = Memory; +INSERT INTO t (ord, shape, wkt_string) VALUES (1, [[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (2, [[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (3, [[(1, 0), (2, 1), (3, 0), (4, 1), (5, 0), (6, 1), (7, 0), (8, 1), (9, 0), (10, 1)]], 'MULTILINESTRING ((1 0, 2 1, 3 0, 4 1, 5 0, 6 1, 7 0, 8 1, 9 0, 10 1))'); -- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. -- but reading MultiLineString should still return an Array(Array(Tuple(Float64, Float64))) -select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t; +select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t order by ord; POLYGON((1 1,2 2,3 3,1 1)) [[(1,1),(2,2),(3,3),(1,1)]] 1 POLYGON((1 1,2 2,3 3,1 1),(1 0,2 0,3 0,1 0)) [[(1,1),(2,2),(3,3),(1,1)],[(1,0),(2,0),(3,0)]] 1 +POLYGON((1 0,2 1,3 0,4 1,5 0,6 1,7 0,8 1,9 0,10 1,1 0)) [[(1,0),(2,1),(3,0),(4,1),(5,0),(6,1),(7,0),(8,1),(9,0),(10,1)]] 1 diff --git a/tests/queries/0_stateless/03215_multilinestring_geometry.sql b/tests/queries/0_stateless/03215_multilinestring_geometry.sql index 6081e081fb3..cf4ef15f63d 100644 --- a/tests/queries/0_stateless/03215_multilinestring_geometry.sql +++ b/tests/queries/0_stateless/03215_multilinestring_geometry.sql @@ -13,12 +13,14 @@ SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); -- Non constant tests -CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String) Engine = Memory; -INSERT INTO t (shape, wkt_string) VALUES ([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); -INSERT INTO t (shape, wkt_string) VALUES ([[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +DROP TABLE IF EXISTS t; +CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String, ord Float64) Engine = Memory; +INSERT INTO t (ord, shape, wkt_string) VALUES (1, [[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (2, [[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (3, [[(1, 0), (2, 1), (3, 0), (4, 1), (5, 0), (6, 1), (7, 0), (8, 1), (9, 0), (10, 1)]], 'MULTILINESTRING ((1 0, 2 1, 3 0, 4 1, 5 0, 6 1, 7 0, 8 1, 9 0, 10 1))'); -- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. -- but reading MultiLineString should still return an Array(Array(Tuple(Float64, Float64))) -select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t; +select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t order by ord; From 8d0c8318ea269ea30707669889932c2b4b66d612 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 9 Aug 2024 00:05:26 +0200 Subject: [PATCH 1959/2361] Apply suggestions from code review --- src/QueryPipeline/QueryPipeline.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp index 844b9e3b039..c9c0bad7553 100644 --- a/src/QueryPipeline/QueryPipeline.cpp +++ b/src/QueryPipeline/QueryPipeline.cpp @@ -283,7 +283,7 @@ static void initRowsBeforeAggregation(std::shared_ptr processors, IO if (!processors->empty()) { RowsBeforeStepCounterPtr rows_before_aggregation = std::make_shared(); - for (auto processor : *processors) + for (const auto & processor : *processors) { if (typeid_cast(processor.get()) || typeid_cast(processor.get())) { @@ -545,7 +545,7 @@ void QueryPipeline::complete(std::shared_ptr format) extremes = nullptr; initRowsBeforeLimit(format.get()); - for (const auto context : resources.interpreter_context) + for (const auto & context : resources.interpreter_context) { if (context->getSettingsRef().rows_before_aggregation) { From eb4ea0757730f8b8e59b9230d72aea3ca4bb1ff3 Mon Sep 17 00:00:00 2001 From: justindeguzman Date: Thu, 8 Aug 2024 16:55:39 -0700 Subject: [PATCH 1960/2361] [Docs] Fix broken links --- docs/en/interfaces/prometheus.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md index 75a68c59219..bf541901b34 100644 --- a/docs/en/interfaces/prometheus.md +++ b/docs/en/interfaces/prometheus.md @@ -25,7 +25,7 @@ ClickHouse can expose its own metrics for scraping from Prometheus: Section `` can be used to make more extended handlers. -This section is similar to [](http.md) but works for prometheus protocols: +This section is similar to [](/en/interfaces/http) but works for prometheus protocols: ```xml @@ -51,11 +51,11 @@ Settings: |---|---|---|---| | `port` | none | Port for serving the exposing metrics protocol. | | `endpoint` | `/metrics` | HTTP endpoint for scraping metrics by prometheus server. Starts with `/`. Should not be used with the `` section. | -| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | -| `metrics` | true | Expose metrics from the [system.metrics](../operations/system-tables/metrics.md) table. | -| `asynchronous_metrics` | true | Expose current metrics values from the [system.asynchronous_metrics](../operations/system-tables/asynchronous_metrics.md) table. | -| `events` | true | Expose metrics from the [system.events](../operations/system-tables/events.md) table. | -| `errors` | true | Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](../operations/system-tables/errors.md) as well. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](/en/interfaces/http) section. | +| `metrics` | true | Expose metrics from the [system.metrics](/en/operations/system-tables/metrics) table. | +| `asynchronous_metrics` | true | Expose current metrics values from the [system.asynchronous_metrics](/en/operations/system-tables/asynchronous_metrics) table. | +| `events` | true | Expose metrics from the [system.events](/en/operations/system-tables/events) table. | +| `errors` | true | Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the [system.errors](/en/operations/system-tables/errors) as well. | Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): ```bash @@ -65,7 +65,7 @@ curl 127.0.0.1:9363/metrics ## Remote-write protocol {#remote-write} ClickHouse supports the [remote-write](https://prometheus.io/docs/specs/remote_write_spec/) protocol. -Data are received by this protocol and written to a [TimeSeries](../engines/table-engines/integrations/time-series.md) table +Data are received by this protocol and written to a [TimeSeries](/en/engines/table-engines/special/time_series) table (which should be created beforehand). ```xml @@ -89,14 +89,14 @@ Settings: | Name | Default | Description | |---|---|---|---| | `port` | none | Port for serving the `remote-write` protocol. | -| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | -| `table` | none | The name of a [TimeSeries](../engines/table-engines/integrations/time-series.md) table to write data received by the `remote-write` protocol. This name can optionally contain the name of a database too. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](/en/interfaces/http) section. | +| `table` | none | The name of a [TimeSeries](/en/engines/table-engines/special/time_series) table to write data received by the `remote-write` protocol. This name can optionally contain the name of a database too. | | `database` | none | The name of a database where the table specified in the `table` setting is located if it's not specified in the `table` setting. | ## Remote-read protocol {#remote-read} ClickHouse supports the [remote-read](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/) protocol. -Data are read from a [TimeSeries](../engines/table-engines/integrations/time-series.md) table and sent via this protocol. +Data are read from a [TimeSeries](/en/engines/table-engines/special/time_series) table and sent via this protocol. ```xml @@ -119,8 +119,8 @@ Settings: | Name | Default | Description | |---|---|---|---| | `port` | none | Port for serving the `remote-read` protocol. | -| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](http.md) section. | -| `table` | none | The name of a [TimeSeries](../engines/table-engines/integrations/time-series.md) table to read data to send by the `remote-read` protocol. This name can optionally contain the name of a database too. | +| `url` / `headers` / `method` | none | Filters used to find a matching handler for a request. Similar to the fields with the same names in the [](/en/interfaces/http) section. | +| `table` | none | The name of a [TimeSeries](/en/engines/table-engines/special/time_series) table to read data to send by the `remote-read` protocol. This name can optionally contain the name of a database too. | | `database` | none | The name of a database where the table specified in the `table` setting is located if it's not specified in the `table` setting. | ## Configuration for multiple protocols {#multiple-protocols} From d7442e0670b1900e73299341d44287d21eafd0ad Mon Sep 17 00:00:00 2001 From: pufit Date: Thu, 8 Aug 2024 20:45:43 -0400 Subject: [PATCH 1961/2361] Fix flacky 02572_query_views_log_background_thread --- .../02572_query_views_log_background_thread.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.sh b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh index a3e428e75c8..509cd03f6c2 100755 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.sh +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh @@ -13,13 +13,16 @@ ${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "drop table if exists b ${CLICKHOUSE_CLIENT} --query="create table copy_02572 (key Int) engine=Memory();" ${CLICKHOUSE_CLIENT} --query="create table data_02572 (key Int) engine=Memory();" -${CLICKHOUSE_CLIENT} --query="create table buffer_02572 (key Int) engine=Buffer(currentDatabase(), data_02572, 1, 3, 3, 1, 1e9, 1, 1e9);" +${CLICKHOUSE_CLIENT} --query="create table buffer_02572 (key Int) engine=Buffer(currentDatabase(), data_02572, 1, 8, 8, 1, 1e9, 1, 1e9);" ${CLICKHOUSE_CLIENT} --query="create materialized view mv_02572 to copy_02572 as select * from data_02572;" +start=$(date +%s) ${CLICKHOUSE_CLIENT} --query="insert into buffer_02572 values (1);" -# ensure that the flush was not direct -${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" +if [ $(( $(date +%s) - start )) -gt 6 ]; then # clickhouse test cluster is overloaded, will skip + # ensure that the flush was not direct + ${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" +fi # we cannot use OPTIMIZE, this will attach query context, so let's wait for _ in {1..100}; do From 6f0f27838745d427966a3c949316a541d9fda7ac Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 03:45:07 +0200 Subject: [PATCH 1962/2361] Fix trash (low-quality code) in AWS S3 --- src/IO/S3/URI.cpp | 29 ++++++++++++----------------- src/IO/S3/URI.h | 8 ++++---- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index fead18315d8..446c2aa355b 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -1,8 +1,8 @@ #include -#include -#include -#include "Common/Macros.h" + #if USE_AWS_S3 +#include +#include #include #include #include @@ -10,6 +10,7 @@ #include + namespace DB { @@ -47,14 +48,6 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access static const RE2 path_style_pattern("^/([^/]*)/(.*)"); - static constexpr auto S3 = "S3"; - static constexpr auto S3EXPRESS = "S3EXPRESS"; - static constexpr auto COSN = "COSN"; - static constexpr auto COS = "COS"; - static constexpr auto OBS = "OBS"; - static constexpr auto OSS = "OSS"; - static constexpr auto EOS = "EOS"; - if (allow_archive_path_syntax) std::tie(uri_str, archive_pattern) = getURIAndArchivePattern(uri_); else @@ -85,7 +78,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) URIConverter::modifyURI(uri, mapper); } - storage_name = S3; + storage_name = "S3"; if (uri.getHost().empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host is empty in S3 URI."); @@ -93,11 +86,13 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// Extract object version ID from query string. bool has_version_id = false; for (const auto & [query_key, query_value] : uri.getQueryParameters()) + { if (query_key == "versionId") { version_id = query_value; has_version_id = true; } + } /// Poco::URI will ignore '?' when parsing the path, but if there is a versionId in the http parameter, /// '?' can not be used as a wildcard, otherwise it will be ambiguous. @@ -130,14 +125,14 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) boost::to_upper(name); /// For S3Express it will look like s3express-eun1-az1, i.e. contain region and AZ info - if (name != S3 && !name.starts_with(S3EXPRESS) && name != COS && name != OBS && name != OSS && name != EOS) + if (name != "S3" && !name.starts_with("S3EXPRESS") && name != "COS" && name != "OBS" && name != "OSS" && name != "EOS") throw Exception( ErrorCodes::BAD_ARGUMENTS, "Object storage system name is unrecognized in virtual hosted style S3 URI: {}", quoteString(name)); - if (name == COS) - storage_name = COSN; + if (name == "COS") + storage_name = "COSN"; else storage_name = name; } @@ -153,8 +148,8 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) void URI::addRegionToURI(const std::string ®ion) { - if (auto pos = endpoint.find("amazonaws.com"); pos != std::string::npos) - endpoint = endpoint.substr(0, pos) + region + "." + endpoint.substr(pos); + if (auto pos = endpoint.find(".amazonaws.com"); pos != std::string::npos) + endpoint = endpoint.substr(0, pos) + "." + region + endpoint.substr(pos); } void URI::validateBucket(const String & bucket, const Poco::URI & uri) diff --git a/src/IO/S3/URI.h b/src/IO/S3/URI.h index 80e2da96cd4..c8d0b28cd15 100644 --- a/src/IO/S3/URI.h +++ b/src/IO/S3/URI.h @@ -1,14 +1,14 @@ #pragma once -#include -#include - #include "config.h" #if USE_AWS_S3 +#include +#include #include + namespace DB::S3 { @@ -23,7 +23,7 @@ namespace DB::S3 struct URI { Poco::URI uri; - // Custom endpoint if URI scheme is not S3. + // Custom endpoint if URI scheme, if not S3. std::string endpoint; std::string bucket; std::string key; From 248da0341aca537104486243b84ca230e9c4f9fb Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Thu, 8 Aug 2024 15:54:56 +0800 Subject: [PATCH 1963/2361] fixed --- .../HashJoin/HashJoinMethodsImpl.h | 53 ++++++++++++++----- src/Interpreters/joinDispatch.h | 8 +-- ..._join_on_inequal_expression_fast.reference | 1 - 3 files changed, 44 insertions(+), 18 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index 5fefe53d145..aedd24630d1 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -1,11 +1,12 @@ #pragma once #include + namespace DB { namespace ErrorCodes { - extern const int UNSUPPORTED_JOIN_KEYS; - extern const int LOGICAL_ERROR; +extern const int UNSUPPORTED_JOIN_KEYS; +extern const int LOGICAL_ERROR; } template size_t HashJoinMethods::insertFromBlockImpl( @@ -156,7 +157,6 @@ Block HashJoinMethods::joinBlockImpl( block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); } } - return remaining_block; } @@ -596,7 +596,7 @@ ColumnPtr HashJoinMethods::buildAdditionalFilter template template -size_t HashJoinMethods::joinRightColumnsWithAddtitionalFilter( +size_t HashJoinMethods::joinRightColumnsWithAddtitionalFilter( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, @@ -662,6 +662,8 @@ size_t HashJoinMethods::joinRightColumnsWithAddti { auto & mapped = find_result.getMapped(); find_results.push_back(find_result); + /// We don't add missing in addFoundRowAll here. we will add it after filter is applied. + /// it's different from `joinRightColumns`. if (flag_per_row) addFoundRowAll(mapped, selected_rows, current_added_rows, all_flag_known_rows, nullptr); else @@ -682,32 +684,54 @@ size_t HashJoinMethods::joinRightColumnsWithAddti for (size_t i = 1, n = row_replicate_offset.size(); i < n; ++i) { bool any_matched = false; - /// For right join, flag_per_row is true, we need mark used flags for each row. + /// right/full join or multiple disjuncts, we need to mark used flags for each row. if (flag_per_row) { for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) { if (filter_flags[replicated_row]) { - any_matched = true; if constexpr (join_features.is_semi_join || join_features.is_any_join) { - auto used_once = used_flags.template setUsedOnce( - selected_right_row_it->block, selected_right_row_it->row_num, 0); - if (used_once) + /// For LEFT/INNER SEMI/ANY JOIN, we need to add only first appeared row from left, + if constexpr (join_features.left || join_features.inner) { - total_added_rows += 1; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + if (!any_matched) + { + // For inner join, we need mark each right row'flag, because we only use each right row once. + auto used_once = used_flags.template setUsedOnce( + selected_right_row_it->block, selected_right_row_it->row_num, 0); + if (used_once) + { + any_matched = true; + total_added_rows += 1; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + } + } + } + else + { + auto used_once = used_flags.template setUsedOnce( + selected_right_row_it->block, selected_right_row_it->row_num, 0); + if (used_once) + { + any_matched = true; + total_added_rows += 1; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + } } } else if constexpr (join_features.is_anti_join) { + any_matched = true; if constexpr (join_features.right && join_features.need_flags) used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); } else { + any_matched = true; total_added_rows += 1; added_columns.appendFromBlock( *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); @@ -715,6 +739,7 @@ size_t HashJoinMethods::joinRightColumnsWithAddti selected_right_row_it->block, selected_right_row_it->row_num, 0); } } + ++selected_right_row_it; } } @@ -892,7 +917,8 @@ void HashJoinMethods::correctNullabilityInplace( } template -void HashJoinMethods::correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map) +void HashJoinMethods::correctNullabilityInplace( + ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map) { if (nullable) { @@ -908,4 +934,3 @@ void HashJoinMethods::correctNullabilityInplace( JoinCommon::removeColumnNullability(column); } } - diff --git a/src/Interpreters/joinDispatch.h b/src/Interpreters/joinDispatch.h index 5d4bd8f92e5..4aabc49c29b 100644 --- a/src/Interpreters/joinDispatch.h +++ b/src/Interpreters/joinDispatch.h @@ -16,11 +16,13 @@ namespace DB /// When only need to match only one row from right table, use HashJoin::MapsOne. For example, LEFT ANY/SEMI/ANTI. /// /// HashJoin::MapsAll will store all rows for each key in the map. It is used when need to match multiple rows from right table. -/// For example, RIGHT ANY/ALL, FULL JOIN, INNER JOIN. +/// For example, LEFT ALL, INNER ALL, RIGHT ALL/ANY. /// /// prefer_use_maps_all is true when there is mixed inequal condition in the join condition. For example, `t1.a = t2.a AND t1.b > t2.b`. /// In this case, we need to use HashJoin::MapsAll to store all rows for each key in the map. We will select all matched rows from the map /// and filter them by `t1.b > t2.b`. +/// +/// flagged indicates whether we need to store flags for each row whether it has been used in the join. See JoinUsedFlags.h. template struct MapGetter; @@ -30,7 +32,7 @@ template struct MapGetter struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; @@ -43,7 +45,7 @@ template struct MapGetter struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference index aa8d4103db2..a70e70ef7e9 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference @@ -742,4 +742,3 @@ key1 b 2 3 2 key1 c 3 2 1 key1 d 4 7 2 key2 a2 1 1 1 -key4 f 2 3 4 From d0a1ee821b609856c2692abc01d132f8d7a8b88f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 03:51:12 +0200 Subject: [PATCH 1964/2361] You don't know regular expressions --- src/IO/S3/URI.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index 446c2aa355b..eea73474c44 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -41,7 +41,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// Case when AWS Private Link Interface is being used /// E.g. (bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/bucket-name/key) /// https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html - static const RE2 aws_private_link_style_pattern(R"(bucket\.vpce\-([a-z0-9\-.]+)\.vpce.amazonaws.com(:\d{1,5})?)"); + static const RE2 aws_private_link_style_pattern(R"(bucket\.vpce\-([a-z0-9\-.]+)\.vpce\.amazonaws\.com(:\d{1,5})?)"); /// Case when bucket name and key represented in path of S3 URL. /// E.g. (https://s3.region.amazonaws.com/bucket-name/key) From 978d36f9fe0e8cc34b6529276b7435b56a16bbb7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 03:57:47 +0200 Subject: [PATCH 1965/2361] It's strange that we cared about this --- src/IO/S3/URI.cpp | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index eea73474c44..64962f63edb 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -43,7 +43,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html static const RE2 aws_private_link_style_pattern(R"(bucket\.vpce\-([a-z0-9\-.]+)\.vpce\.amazonaws\.com(:\d{1,5})?)"); - /// Case when bucket name and key represented in path of S3 URL. + /// Case when bucket name and key represented in the path of S3 URL. /// E.g. (https://s3.region.amazonaws.com/bucket-name/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access static const RE2 path_style_pattern("^/([^/]*)/(.*)"); @@ -124,13 +124,6 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) } boost::to_upper(name); - /// For S3Express it will look like s3express-eun1-az1, i.e. contain region and AZ info - if (name != "S3" && !name.starts_with("S3EXPRESS") && name != "COS" && name != "OBS" && name != "OSS" && name != "EOS") - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Object storage system name is unrecognized in virtual hosted style S3 URI: {}", - quoteString(name)); - if (name == "COS") storage_name = "COSN"; else From dbd4a6d551a9c242c0ce8025ebcdb1304f0d448c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 04:41:06 +0200 Subject: [PATCH 1966/2361] Speed up from 4 sec to 2 sec #52771 --- src/IO/S3/Credentials.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/IO/S3/Credentials.cpp b/src/IO/S3/Credentials.cpp index dfb7727fca4..11779c4dbd5 100644 --- a/src/IO/S3/Credentials.cpp +++ b/src/IO/S3/Credentials.cpp @@ -145,12 +145,16 @@ Aws::String AWSEC2MetadataClient::getDefaultCredentialsSecurely() const { String user_agent_string = awsComputeUserAgentString(); auto [new_token, response_code] = getEC2MetadataToken(user_agent_string); - if (response_code == Aws::Http::HttpResponseCode::BAD_REQUEST) + if (response_code == Aws::Http::HttpResponseCode::BAD_REQUEST + || response_code == Aws::Http::HttpResponseCode::REQUEST_NOT_MADE) + { + /// At least the host should be available and reply, otherwise neither IMDSv2 nor IMDSv1 are usable. return {}; + } else if (response_code != Aws::Http::HttpResponseCode::OK || new_token.empty()) { LOG_TRACE(logger, "Calling EC2MetadataService to get token failed, " - "falling back to less secure way. HTTP response code: {}", response_code); + "falling back to a less secure way. HTTP response code: {}", response_code); return getDefaultCredentials(); } From 8bfe4ee23f8b37ff8780e44ede9dc785ac563f75 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 04:57:18 +0200 Subject: [PATCH 1967/2361] Speed up requests when IMDS is not available --- src/IO/S3/Credentials.cpp | 6 +++--- src/IO/S3/Credentials.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/IO/S3/Credentials.cpp b/src/IO/S3/Credentials.cpp index 11779c4dbd5..9c5f6547933 100644 --- a/src/IO/S3/Credentials.cpp +++ b/src/IO/S3/Credentials.cpp @@ -251,7 +251,7 @@ static Aws::String getAWSMetadataEndpoint() return ec2_metadata_service_endpoint; } -std::shared_ptr InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration) +std::shared_ptr createEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration) { auto endpoint = getAWSMetadataEndpoint(); return std::make_shared(client_configuration, endpoint.c_str()); @@ -785,11 +785,11 @@ S3CredentialsProviderChain::S3CredentialsProviderChain( /// EC2MetadataService throttles by delaying the response so the service client should set a large read timeout. /// EC2MetadataService delay is in order of seconds so it only make sense to retry after a couple of seconds. - aws_client_configuration.connectTimeoutMs = 1000; + aws_client_configuration.connectTimeoutMs = 10; aws_client_configuration.requestTimeoutMs = 1000; aws_client_configuration.retryStrategy = std::make_shared(1, 1000); - auto ec2_metadata_client = InitEC2MetadataClient(aws_client_configuration); + auto ec2_metadata_client = createEC2MetadataClient(aws_client_configuration); auto config_loader = std::make_shared(ec2_metadata_client, !credentials_configuration.use_insecure_imds_request); AddProvider(std::make_shared(config_loader)); diff --git a/src/IO/S3/Credentials.h b/src/IO/S3/Credentials.h index 95297ab0538..042c48ec15a 100644 --- a/src/IO/S3/Credentials.h +++ b/src/IO/S3/Credentials.h @@ -70,7 +70,7 @@ private: LoggerPtr logger; }; -std::shared_ptr InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration); +std::shared_ptr createEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration); class AWSEC2InstanceProfileConfigLoader : public Aws::Config::AWSProfileConfigLoader { From 9470ceb34d6519c820f7c7ddccbe27b48a046f2a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 05:41:10 +0200 Subject: [PATCH 1968/2361] Minor changes --- src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 2 +- src/IO/S3/PocoHTTPClient.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 433a0e96d2e..9854bada9ec 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -63,7 +63,7 @@ void throwIfError(const Aws::Utils::Outcome & response) { const auto & err = response.GetError(); throw S3Exception( - fmt::format("{} (Code: {}, s3 exception: {})", + fmt::format("{} (Code: {}, S3 exception: '{}')", err.GetMessage(), static_cast(err.GetErrorType()), err.GetExceptionName()), err.GetErrorType()); } diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index aab7a39534d..de43f34d838 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -128,7 +128,7 @@ void PocoHTTPClientConfiguration::updateSchemeAndRegion() } else { - /// In global mode AWS C++ SDK send `us-east-1` but accept switching to another one if being suggested. + /// In global mode AWS C++ SDK sends `us-east-1` but accepts switching to another one if being suggested. region = Aws::Region::AWS_GLOBAL; } } From 54cd980e13d70e84e2c0aad51420a2dd31217131 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Aug 2024 09:44:30 +0200 Subject: [PATCH 1969/2361] Increase queue size and fix tests --- docker/test/stateless/run.sh | 11 +++-------- .../0_stateless/02726_async_insert_flush_queue.sql | 4 +++- .../0_stateless/02726_async_insert_flush_stress.sh | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index c81f33ace01..cd2a61cce4e 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -193,8 +193,8 @@ ORDER BY tuple()" # create minio log webhooks for both audit and server logs # use async inserts to avoid creating too many parts -./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" -./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" +./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" queue_size=300000 +./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" queue_size=300000 max_retries=100 retry=1 @@ -376,12 +376,7 @@ done # collect minio audit and server logs -has_async_inserts=$(clickhouse-client -q "SELECT count() FROM system.asynchronous_inserts WHERE table = 'minio_audit_logs' OR table = 'minio_server_logs'") -if [[ has_async_inserts -eq 1 ]]; then - echo "Waiting for async inserts to flush" - sleep 5 -fi - +clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE" clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" diff --git a/tests/queries/0_stateless/02726_async_insert_flush_queue.sql b/tests/queries/0_stateless/02726_async_insert_flush_queue.sql index 97d644fa4d6..5d941adcb81 100644 --- a/tests/queries/0_stateless/02726_async_insert_flush_queue.sql +++ b/tests/queries/0_stateless/02726_async_insert_flush_queue.sql @@ -30,7 +30,9 @@ SELECT count() FROM t_async_inserts_flush; SYSTEM FLUSH ASYNC INSERT QUEUE; -SELECT count() FROM system.asynchronous_inserts; +SELECT count() FROM system.asynchronous_inserts +WHERE database = currentDatabase() AND table = 't_async_inserts_flush'; + SELECT count() FROM t_async_inserts_flush; DROP TABLE t_async_inserts_flush; diff --git a/tests/queries/0_stateless/02726_async_insert_flush_stress.sh b/tests/queries/0_stateless/02726_async_insert_flush_stress.sh index 876766d0780..61bbbd620f0 100755 --- a/tests/queries/0_stateless/02726_async_insert_flush_stress.sh +++ b/tests/queries/0_stateless/02726_async_insert_flush_stress.sh @@ -91,5 +91,5 @@ flush1 $TIMEOUT & wait ${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH ASYNC INSERT QUEUE" -${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.asynchronous_inserts" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.asynchronous_inserts WHERE database = currentDatabase() AND table = 'async_inserts'" ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS async_inserts"; From cfc10961ed237eac079db6f113330db5391adc1f Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Fri, 9 Aug 2024 15:56:36 +0800 Subject: [PATCH 1970/2361] fix getName() style for columnlowcardinality and columnunique --- src/Columns/ColumnLowCardinality.h | 2 +- src/Columns/ColumnUnique.h | 2 ++ src/Columns/IColumnUnique.h | 2 +- src/Columns/tests/gtest_column_dump_structure.cpp | 2 +- .../02313_dump_column_structure_low_cardinality.reference | 2 +- tests/queries/0_stateless/02355_column_type_name_lc.reference | 2 +- 6 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h index 5a23853e961..d9c90e7dc21 100644 --- a/src/Columns/ColumnLowCardinality.h +++ b/src/Columns/ColumnLowCardinality.h @@ -46,7 +46,7 @@ public: return Base::create(std::move(column_unique), std::move(indexes), is_shared); } - std::string getName() const override { return "LowCardinality(" + getDictionaryPtr()->getName() + ")"; } + std::string getName() const override { return "LowCardinality(" + getDictionary().getNestedColumn()->getName() + ")"; } const char * getFamilyName() const override { return "LowCardinality"; } TypeIndex getDataType() const override { return TypeIndex::LowCardinality; } diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h index d6cb75679be..8a66f4e02ed 100644 --- a/src/Columns/ColumnUnique.h +++ b/src/Columns/ColumnUnique.h @@ -48,6 +48,8 @@ private: ColumnUnique(const ColumnUnique & other); public: + std::string getName() const override { return "Unique(" + getNestedColumn()->getName() + ")"; } + MutableColumnPtr cloneEmpty() const override; const ColumnPtr & getNestedColumn() const override; diff --git a/src/Columns/IColumnUnique.h b/src/Columns/IColumnUnique.h index a8e10e5e2b2..52b1bef3009 100644 --- a/src/Columns/IColumnUnique.h +++ b/src/Columns/IColumnUnique.h @@ -73,7 +73,7 @@ public: /// Returns dictionary hash which is SipHash is applied to each row of nested column. virtual UInt128 getHash() const = 0; - const char * getFamilyName() const override { return "ColumnUnique"; } + const char * getFamilyName() const override { return "Unique"; } TypeIndex getDataType() const override { return getNestedColumn()->getDataType(); } void insert(const Field &) override diff --git a/src/Columns/tests/gtest_column_dump_structure.cpp b/src/Columns/tests/gtest_column_dump_structure.cpp index e00c77798c8..d9647147157 100644 --- a/src/Columns/tests/gtest_column_dump_structure.cpp +++ b/src/Columns/tests/gtest_column_dump_structure.cpp @@ -10,7 +10,7 @@ TEST(IColumn, dumpStructure) { auto type_lc = std::make_shared(std::make_shared()); ColumnPtr column_lc = type_lc->createColumn(); - String expected_structure = "ColumnLowCardinality(size = 0, UInt8(size = 0), ColumnUnique(size = 1, String(size = 1)))"; + String expected_structure = "LowCardinality(size = 0, UInt8(size = 0), Unique(size = 1, String(size = 1)))"; std::vector threads; for (size_t i = 0; i < 6; ++i) diff --git a/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference b/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference index fa7f1799c31..6b1e4743867 100644 --- a/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference +++ b/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference @@ -1 +1 @@ -Array(LowCardinality(String)), Const(size = 1, Array(size = 1, UInt64(size = 1), ColumnLowCardinality(size = 2, UInt8(size = 2), ColumnUnique(size = 3, String(size = 3))))) +Array(LowCardinality(String)), Const(size = 1, Array(size = 1, UInt64(size = 1), LowCardinality(size = 2, UInt8(size = 2), Unique(size = 3, String(size = 3))))) diff --git a/tests/queries/0_stateless/02355_column_type_name_lc.reference b/tests/queries/0_stateless/02355_column_type_name_lc.reference index 234a072299f..50c25a86b2f 100644 --- a/tests/queries/0_stateless/02355_column_type_name_lc.reference +++ b/tests/queries/0_stateless/02355_column_type_name_lc.reference @@ -1 +1 @@ -ColumnLowCardinality +LowCardinality(String) From 47f429a52435d84b3e1cee06202e743c11bcba0d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 20:54:29 +0000 Subject: [PATCH 1971/2361] Proper CMake for libfiu --- CMakeLists.txt | 2 +- contrib/libfiu-cmake/CMakeLists.txt | 27 ++++++++++++++------------- src/CMakeLists.txt | 4 ++-- src/Common/FailPoint.cpp | 6 ++++-- src/Common/FailPoint.h | 9 ++++----- src/Common/config.h.in | 2 +- src/configure_config.cmake | 4 ++-- 7 files changed, 28 insertions(+), 26 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7b4e0484ab1..134f3afd727 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -402,7 +402,7 @@ if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") set(ENABLE_GWP_ASAN OFF) endif () -option (ENABLE_FIU "Enable Fiu" ON) +option (ENABLE_LIBFIU "Enable libfiu" ON) option(WERROR "Enable -Werror compiler option" ON) diff --git a/contrib/libfiu-cmake/CMakeLists.txt b/contrib/libfiu-cmake/CMakeLists.txt index e805491edbb..eab55087c98 100644 --- a/contrib/libfiu-cmake/CMakeLists.txt +++ b/contrib/libfiu-cmake/CMakeLists.txt @@ -1,20 +1,21 @@ -if (NOT ENABLE_FIU) - message (STATUS "Not using fiu") +if (NOT ENABLE_LIBFIU) + message (STATUS "Not using libfiu") return () endif () -set(FIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/") +set(LIBFIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/") -set(FIU_SOURCES - ${FIU_DIR}/libfiu/fiu.c - ${FIU_DIR}/libfiu/fiu-rc.c - ${FIU_DIR}/libfiu/backtrace.c - ${FIU_DIR}/libfiu/wtable.c +set(LIBFIU_SOURCES + ${LIBFIU_DIR}/libfiu/fiu.c + ${LIBFIU_DIR}/libfiu/fiu-rc.c + ${LIBFIU_DIR}/libfiu/backtrace.c + ${LIBFIU_DIR}/libfiu/wtable.c ) -set(FIU_HEADERS "${FIU_DIR}/libfiu") +set(LIBFIU_HEADERS "${LIBFIU_DIR}/libfiu") -add_library(_fiu ${FIU_SOURCES}) -target_compile_definitions(_fiu PUBLIC DUMMY_BACKTRACE) -target_include_directories(_fiu PUBLIC ${FIU_HEADERS}) -add_library(ch_contrib::fiu ALIAS _fiu) +add_library(_libfiu ${LIBFIU_SOURCES}) +target_compile_definitions(_libfiu PUBLIC DUMMY_BACKTRACE) +target_compile_definitions(_libfiu PUBLIC FIU_ENABLE) +target_include_directories(_libfiu PUBLIC ${LIBFIU_HEADERS}) +add_library(ch_contrib::libfiu ALIAS _libfiu) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 98dd0601a1b..db3ef0f489f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -353,8 +353,8 @@ target_link_libraries(clickhouse_common_io Poco::Foundation ) -if (TARGET ch_contrib::fiu) - target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::fiu) +if (TARGET ch_contrib::libfiu) + target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::libfiu) endif() if (TARGET ch_contrib::cpuid) diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index 0b1ec552d43..b2fcbc77c56 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -7,6 +7,8 @@ #include #include +#include "config.h" + namespace DB { @@ -15,7 +17,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; }; -#if FIU_ENABLE +#if USE_LIBFIU static struct InitFiu { InitFiu() @@ -135,7 +137,7 @@ void FailPointInjection::pauseFailPoint(const String & fail_point_name) void FailPointInjection::enableFailPoint(const String & fail_point_name) { -#if FIU_ENABLE +#if USE_LIBFIU #define SUB_M(NAME, flags, pause) \ if (fail_point_name == FailPoints::NAME) \ { \ diff --git a/src/Common/FailPoint.h b/src/Common/FailPoint.h index b3e1214d597..1af13d08553 100644 --- a/src/Common/FailPoint.h +++ b/src/Common/FailPoint.h @@ -1,17 +1,16 @@ #pragma once -#include "config.h" #include #include #include +#include "config.h" + #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdocumentation" #pragma clang diagnostic ignored "-Wreserved-macro-identifier" - -#include -#include - +# include +# include #pragma clang diagnostic pop #include diff --git a/src/Common/config.h.in b/src/Common/config.h.in index 6a0090130a3..56a067b06e8 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -59,7 +59,7 @@ #cmakedefine01 USE_SKIM #cmakedefine01 USE_PRQL #cmakedefine01 USE_ULID -#cmakedefine01 FIU_ENABLE +#cmakedefine01 USE_LIBFIU #cmakedefine01 USE_BCRYPT #cmakedefine01 USE_LIBARCHIVE #cmakedefine01 USE_POCKETFFT diff --git a/src/configure_config.cmake b/src/configure_config.cmake index d22bf674df4..721041bc11b 100644 --- a/src/configure_config.cmake +++ b/src/configure_config.cmake @@ -161,8 +161,8 @@ endif() if (TARGET ch_contrib::ssh) set(USE_SSH 1) endif() -if (TARGET ch_contrib::fiu) - set(FIU_ENABLE 1) +if (TARGET ch_contrib::libfiu) + set(USE_LIBFIU 1) endif() if (TARGET ch_contrib::libarchive) set(USE_LIBARCHIVE 1) From 30d8e407723f45ed79dc960604d77e8ee02f3edb Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 21:02:04 +0000 Subject: [PATCH 1972/2361] Fix referenced variable for vectorscan in system.build_options --- src/Storages/System/StorageSystemBuildOptions.cpp.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index a81bcb08bfc..3f84a7468fd 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -36,7 +36,7 @@ const char * auto_config_build[] "USE_SSL", "@USE_SSL@", "OPENSSL_VERSION", "@OPENSSL_VERSION@", "OPENSSL_IS_BORING_SSL", "@OPENSSL_IS_BORING_SSL@", - "USE_VECTORSCAN", "@ENABLE_VECTORSCAN@", + "USE_VECTORSCAN", "@USE_VECTORSCAN@", "USE_SIMDJSON", "@USE_SIMDJSON@", "USE_ODBC", "@USE_ODBC@", "USE_GRPC", "@USE_GRPC@", From b242a129f811c9838167d98df56d060ceac24b85 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 8 Aug 2024 21:03:42 +0000 Subject: [PATCH 1973/2361] Fix referenced variable for jemalloc in system.build_options --- src/Storages/System/StorageSystemBuildOptions.cpp.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index 3f84a7468fd..6cecef5a7ad 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -21,7 +21,7 @@ const char * auto_config_build[] "BUILD_COMPILE_DEFINITIONS", "@BUILD_COMPILE_DEFINITIONS@", "USE_EMBEDDED_COMPILER", "@USE_EMBEDDED_COMPILER@", "USE_GLIBC_COMPATIBILITY", "@GLIBC_COMPATIBILITY@", - "USE_JEMALLOC", "@ENABLE_JEMALLOC@", + "USE_JEMALLOC", "@USE_JEMALLOC@", "USE_ICU", "@USE_ICU@", "USE_H3", "@USE_H3@", "USE_MYSQL", "@USE_MYSQL@", From 1e2eea9f6333b165b1b15acef5f489ad067a57f3 Mon Sep 17 00:00:00 2001 From: kruglov Date: Fri, 9 Aug 2024 10:16:15 +0300 Subject: [PATCH 1974/2361] Fixed errors when publication name contents symbols except [a-z_] --- src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index f632e553a0d..01f78673ed8 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -659,7 +659,7 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(pqxx::nontransaction & tx void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) { - std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); + std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", doubleQuoteString(publication_name)); tx.exec(query_str); LOG_DEBUG(log, "Dropped publication: {}", publication_name); } @@ -667,7 +667,7 @@ void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) void PostgreSQLReplicationHandler::addTableToPublication(pqxx::nontransaction & ntx, const String & table_name) { - std::string query_str = fmt::format("ALTER PUBLICATION {} ADD TABLE ONLY {}", publication_name, doubleQuoteWithSchema(table_name)); + std::string query_str = fmt::format("ALTER PUBLICATION {} ADD TABLE ONLY {}", doubleQuoteString(publication_name), doubleQuoteWithSchema(table_name)); ntx.exec(query_str); LOG_TRACE(log, "Added table {} to publication `{}`", doubleQuoteWithSchema(table_name), publication_name); } From a497a2391455de21dec19b365cc939defdc56b1e Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 9 Aug 2024 08:33:11 +0000 Subject: [PATCH 1975/2361] Fix CMake for QPL --- contrib/qpl-cmake/CMakeLists.txt | 4 ---- src/CMakeLists.txt | 2 ++ src/Common/config.h.in | 1 + .../CompressionCodecDeflateQpl.cpp | 8 +++---- src/Compression/CompressionCodecDeflateQpl.h | 6 +++++ src/Compression/CompressionFactory.cpp | 22 +++++++++---------- .../System/StorageSystemBuildOptions.cpp.in | 2 +- src/configure_config.cmake | 3 +++ 8 files changed, 28 insertions(+), 20 deletions(-) diff --git a/contrib/qpl-cmake/CMakeLists.txt b/contrib/qpl-cmake/CMakeLists.txt index e62612cff5a..89332ae0f7a 100644 --- a/contrib/qpl-cmake/CMakeLists.txt +++ b/contrib/qpl-cmake/CMakeLists.txt @@ -728,10 +728,6 @@ add_library(_qpl STATIC ${LIB_DEPS}) target_include_directories(_qpl PUBLIC $ $) - -target_compile_definitions(_qpl - PUBLIC -DENABLE_QPL_COMPRESSION) - target_link_libraries(_qpl PRIVATE ch_contrib::accel-config) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index db3ef0f489f..eba04d93df5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -556,6 +556,8 @@ target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4) if (TARGET ch_contrib::qpl) dbms_target_link_libraries(PUBLIC ch_contrib::qpl) + target_link_libraries (clickhouse_compression PUBLIC ch_contrib::qpl) + target_link_libraries (clickhouse_compression PUBLIC ch_contrib::accel-config) endif () if (TARGET ch_contrib::accel-config) diff --git a/src/Common/config.h.in b/src/Common/config.h.in index 56a067b06e8..1680cde22a2 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -32,6 +32,7 @@ #cmakedefine01 USE_IDNA #cmakedefine01 USE_NLP #cmakedefine01 USE_VECTORSCAN +#cmakedefine01 USE_QPL #cmakedefine01 USE_LIBURING #cmakedefine01 USE_AVRO #cmakedefine01 USE_CAPNP diff --git a/src/Compression/CompressionCodecDeflateQpl.cpp b/src/Compression/CompressionCodecDeflateQpl.cpp index f1b5b24e866..c82ee861a6f 100644 --- a/src/Compression/CompressionCodecDeflateQpl.cpp +++ b/src/Compression/CompressionCodecDeflateQpl.cpp @@ -1,7 +1,3 @@ -#ifdef ENABLE_QPL_COMPRESSION - -#include -#include #include #include #include @@ -11,6 +7,10 @@ #include #include #include +#include +#include + +#if USE_QPL #include "libaccel_config.h" diff --git a/src/Compression/CompressionCodecDeflateQpl.h b/src/Compression/CompressionCodecDeflateQpl.h index 86fd9051bd8..d9abc0fb7e0 100644 --- a/src/Compression/CompressionCodecDeflateQpl.h +++ b/src/Compression/CompressionCodecDeflateQpl.h @@ -4,6 +4,11 @@ #include #include #include + +#include "config.h" + +#if USE_QPL + #include namespace Poco @@ -117,3 +122,4 @@ private: }; } +#endif diff --git a/src/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp index 2e7aa0d086f..fbb5664d441 100644 --- a/src/Compression/CompressionFactory.cpp +++ b/src/Compression/CompressionFactory.cpp @@ -1,20 +1,20 @@ -#include "config.h" - #include +#include +#include +#include +#include #include #include #include -#include -#include -#include -#include #include -#include -#include -#include +#include +#include +#include #include +#include "config.h" + namespace DB { @@ -179,7 +179,7 @@ void registerCodecZSTD(CompressionCodecFactory & factory); void registerCodecZSTDQAT(CompressionCodecFactory & factory); #endif void registerCodecMultiple(CompressionCodecFactory & factory); -#ifdef ENABLE_QPL_COMPRESSION +#if USE_QPL void registerCodecDeflateQpl(CompressionCodecFactory & factory); #endif @@ -209,7 +209,7 @@ CompressionCodecFactory::CompressionCodecFactory() registerCodecGorilla(*this); registerCodecEncrypted(*this); registerCodecFPC(*this); -#ifdef ENABLE_QPL_COMPRESSION +#if USE_QPL registerCodecDeflateQpl(*this); #endif registerCodecGCD(*this); diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index 6cecef5a7ad..f7edfb17542 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -62,7 +62,7 @@ const char * auto_config_build[] "USE_ARROW", "@USE_ARROW@", "USE_ORC", "@USE_ORC@", "USE_MSGPACK", "@USE_MSGPACK@", - "USE_QPL", "@ENABLE_QPL@", + "USE_QPL", "@USE_QPL@", "USE_QAT", "@ENABLE_QATLIB@", "GIT_HASH", "@GIT_HASH@", "GIT_BRANCH", R"IRjaNsZIL9Yh7FQ4(@GIT_BRANCH@)IRjaNsZIL9Yh7FQ4", diff --git a/src/configure_config.cmake b/src/configure_config.cmake index 721041bc11b..6782cc6d824 100644 --- a/src/configure_config.cmake +++ b/src/configure_config.cmake @@ -135,6 +135,9 @@ endif() if (TARGET ch_contrib::vectorscan) set(USE_VECTORSCAN 1) endif() +if (TARGET ch_contrib::qpl) + set(USE_QPL 1) +endif() if (TARGET ch_contrib::avrocpp) set(USE_AVRO 1) endif() From eec5fe087c273aba443d97824da0e7aadfd52cdd Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 9 Aug 2024 08:50:52 +0000 Subject: [PATCH 1976/2361] Fix CMake for QATlib --- contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt | 4 ++-- src/CMakeLists.txt | 7 ++----- src/Common/config.h.in | 1 + src/Compression/CompressionCodecZSTDQAT.cpp | 5 ++++- src/Compression/CompressionFactory.cpp | 4 ++-- src/Storages/System/StorageSystemBuildOptions.cpp.in | 2 +- src/configure_config.cmake | 3 +++ 7 files changed, 15 insertions(+), 11 deletions(-) diff --git a/contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt b/contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt index 72d21a8572b..fc18092f574 100644 --- a/contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt +++ b/contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt @@ -27,7 +27,7 @@ if (ENABLE_QAT_OUT_OF_TREE_BUILD) ${QAT_AL_INCLUDE_DIR} ${QAT_USDM_INCLUDE_DIR} ${ZSTD_LIBRARY_DIR}) - target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC) + target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0) add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin) else () # In-tree build message(STATUS "Intel QATZSTD in-tree build") @@ -78,7 +78,7 @@ else () # In-tree build ${QAT_USDM_INCLUDE_DIR} ${ZSTD_LIBRARY_DIR} ${LIBQAT_HEADER_DIR}) - target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC -DINTREE) + target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DINTREE) target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $ $) add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin) endif () diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index eba04d93df5..43092d10dd2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -560,12 +560,9 @@ if (TARGET ch_contrib::qpl) target_link_libraries (clickhouse_compression PUBLIC ch_contrib::accel-config) endif () -if (TARGET ch_contrib::accel-config) - dbms_target_link_libraries(PUBLIC ch_contrib::accel-config) -endif () - -if (TARGET ch_contrib::qatzstd_plugin) +if (TARGET ch_contrib::accel-config AND TARGET ch_contrib::qatzstd_plugin) dbms_target_link_libraries(PUBLIC ch_contrib::qatzstd_plugin) + dbms_target_link_libraries(PUBLIC ch_contrib::accel-config) target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::qatzstd_plugin) endif () diff --git a/src/Common/config.h.in b/src/Common/config.h.in index 1680cde22a2..e3f8882850f 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -33,6 +33,7 @@ #cmakedefine01 USE_NLP #cmakedefine01 USE_VECTORSCAN #cmakedefine01 USE_QPL +#cmakedefine01 USE_QATLIB #cmakedefine01 USE_LIBURING #cmakedefine01 USE_AVRO #cmakedefine01 USE_CAPNP diff --git a/src/Compression/CompressionCodecZSTDQAT.cpp b/src/Compression/CompressionCodecZSTDQAT.cpp index 5a4ef70a30a..e19b7e4a001 100644 --- a/src/Compression/CompressionCodecZSTDQAT.cpp +++ b/src/Compression/CompressionCodecZSTDQAT.cpp @@ -1,4 +1,6 @@ -#ifdef ENABLE_ZSTD_QAT_CODEC +#include "config.h" + +#if USE_QATLIB #include #include @@ -6,6 +8,7 @@ #include #include #include + #include #include diff --git a/src/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp index fbb5664d441..ac00f571568 100644 --- a/src/Compression/CompressionFactory.cpp +++ b/src/Compression/CompressionFactory.cpp @@ -175,7 +175,7 @@ void registerCodecNone(CompressionCodecFactory & factory); void registerCodecLZ4(CompressionCodecFactory & factory); void registerCodecLZ4HC(CompressionCodecFactory & factory); void registerCodecZSTD(CompressionCodecFactory & factory); -#ifdef ENABLE_ZSTD_QAT_CODEC +#if USE_QATLIB void registerCodecZSTDQAT(CompressionCodecFactory & factory); #endif void registerCodecMultiple(CompressionCodecFactory & factory); @@ -198,7 +198,7 @@ CompressionCodecFactory::CompressionCodecFactory() registerCodecNone(*this); registerCodecLZ4(*this); registerCodecZSTD(*this); -#ifdef ENABLE_ZSTD_QAT_CODEC +#if USE_QATLIB registerCodecZSTDQAT(*this); #endif registerCodecLZ4HC(*this); diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index f7edfb17542..9e5adbfe825 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -63,7 +63,7 @@ const char * auto_config_build[] "USE_ORC", "@USE_ORC@", "USE_MSGPACK", "@USE_MSGPACK@", "USE_QPL", "@USE_QPL@", - "USE_QAT", "@ENABLE_QATLIB@", + "USE_QATLIB", "@USE_QATLIB@", "GIT_HASH", "@GIT_HASH@", "GIT_BRANCH", R"IRjaNsZIL9Yh7FQ4(@GIT_BRANCH@)IRjaNsZIL9Yh7FQ4", "GIT_DATE", "@GIT_DATE@", diff --git a/src/configure_config.cmake b/src/configure_config.cmake index 6782cc6d824..5b24f79ef6e 100644 --- a/src/configure_config.cmake +++ b/src/configure_config.cmake @@ -138,6 +138,9 @@ endif() if (TARGET ch_contrib::qpl) set(USE_QPL 1) endif() +if (TARGET ch_contrib::qatlib) + set(USE_QATLIB 1) +endif() if (TARGET ch_contrib::avrocpp) set(USE_AVRO 1) endif() From 759299910c2892b809735caaf663fe374c315c5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 9 Aug 2024 09:14:00 +0000 Subject: [PATCH 1977/2361] Force new analyzer for test --- tests/queries/0_stateless/03217_filtering_in_storage_merge.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql b/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql index 5ecc1e7c672..42d31e95f9c 100644 --- a/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql +++ b/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql @@ -13,4 +13,4 @@ INSERT INTO test_03217_merge_replica_1 SELECT number AS x FROM numbers(10); SYSTEM SYNC REPLICA test_03217_merge_replica_2; -- If the filter on _table is not applied, then the plan will show both replicas -EXPLAIN SELECT _table, count() FROM test_03217_all_replicas WHERE _table = 'test_03217_merge_replica_1' AND x >= 0 GROUP BY _table; +EXPLAIN SELECT _table, count() FROM test_03217_all_replicas WHERE _table = 'test_03217_merge_replica_1' AND x >= 0 GROUP BY _table SETTINGS allow_experimental_analyzer=1; From dc64550536ff249b1c12070ed646bc4321bc68bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 9 Aug 2024 09:14:13 +0000 Subject: [PATCH 1978/2361] Remove wrong check from test --- .../0_stateless/03217_filtering_in_system_tables.reference | 4 ++-- .../0_stateless/03217_filtering_in_system_tables.sql | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.reference b/tests/queries/0_stateless/03217_filtering_in_system_tables.reference index 218fddf92e0..d7ccd989f53 100644 --- a/tests/queries/0_stateless/03217_filtering_in_system_tables.reference +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.reference @@ -1,4 +1,4 @@ information_schema tables default test_03217_system_tables_replica_1 r1 -1 1 -1 1 +1 +1 diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql index 72ca7c8684d..0db846bc500 100644 --- a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql @@ -10,18 +10,17 @@ CREATE TABLE test_03217_system_tables_replica_2(x UInt32) -- If filtering is not done correctly on database-table column, then this query report to read 2 rows, which are the above tables SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = 'test_03217_system_tables_replica_1' AND replica_name = 'r1'; - SYSTEM FLUSH LOGS; --- argMin-argMax is necessary to make the test repeatable +-- argMax is necessary to make the test repeatable -- StorageSystemTables -SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 +SELECT argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 AND current_database = currentDatabase() AND query LIKE '%SELECT database, table FROM system.tables WHERE database = \'information_schema\' AND table = \'tables\';' AND type = 'QueryFinish'; -- StorageSystemReplicas -SELECT argMin(read_rows, event_time_microseconds), argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 +SELECT argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 AND current_database = currentDatabase() AND query LIKE '%SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = \'test_03217_system_tables_replica_1\' AND replica_name = \'r1\';' AND type = 'QueryFinish'; From 6360687b307ea2ea7c5cf5746d83655b72a73a75 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Aug 2024 11:28:18 +0200 Subject: [PATCH 1979/2361] Try fix flaky 02675_profile_events_from_query_log_and_client --- ...events_from_query_log_and_client.reference | 14 ++++++------ ...rofile_events_from_query_log_and_client.sh | 22 +++++++++++++++---- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference index babcecf7004..9dbac8d34f2 100644 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference @@ -1,11 +1,11 @@ INSERT TO S3 - [ 0 ] S3Clients: 1 - [ 0 ] S3CompleteMultipartUpload: 1 - [ 0 ] S3CreateMultipartUpload: 1 - [ 0 ] S3HeadObject: 2 - [ 0 ] S3ReadRequestsCount: 2 - [ 0 ] S3UploadPart: 1 - [ 0 ] S3WriteRequestsCount: 3 +Successful write requests 3 +S3Clients 1 +S3CompleteMultipartUpload 1 +S3CreateMultipartUpload 1 +S3HeadObject 2 +S3ReadRequestsCount 2 +S3UploadPart 1 CHECK WITH query_log QueryFinish S3CreateMultipartUpload 1 S3UploadPart 1 S3CompleteMultipartUpload 1 S3PutObject 0 CREATE diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh index e346d9893a7..cae20be79dc 100755 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh @@ -9,7 +9,21 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "INSERT TO S3" $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/profile_events.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(1000000) SETTINGS s3_max_single_part_upload_size = 10, s3_truncate_on_insert = 1; -" 2>&1 | grep -o -e '\ \[\ .*\ \]\ S3.*:\ .*\ ' | grep -v 'Microseconds' | grep -v 'S3DiskConnections' | grep -v 'S3DiskAddresses' | sort +" 2>&1 | $CLICKHOUSE_LOCAL -q " +WITH '(\\w+): (\\d+)' AS pattern, + (SELECT (groupArray(regexpExtract(line, pattern, 1)), + groupArray(regexpExtract(line, pattern, 2)::UInt64))::Map(String, UInt64) + FROM file(stdin, 'LineAsString', 'line String') + WHERE line LIKE '% S3%' + AND line NOT LIKE '%Microseconds%' + AND line NOT LIKE '%S3DiskConnections%' + AND line NOT LIKE '%S3DiskAddresses') AS pe_map +SELECT untuple(arrayJoin(pe_map) AS pe) +WHERE tupleElement(pe, 1) not like '%WriteRequests%' +UNION ALL +SELECT 'Successful write requests', + (pe_map['S3WriteRequestsCount'] - pe_map['S3WriteRequestsErrors'])::UInt64 +" echo "CHECK WITH query_log" $CLICKHOUSE_CLIENT -nq " @@ -40,19 +54,19 @@ CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t echo "INSERT" $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; -" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ ' +" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "READ" $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; -" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ ' +" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "INSERT and READ INSERT" $CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq " INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; -" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ ' +" 2>&1 | grep -o -e ' \[ .* \] FileOpen: .* ' echo "DROP" $CLICKHOUSE_CLIENT -nq " From e4903858c8ae108b87b726a8056acab10dd6b851 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 9 Aug 2024 09:31:33 +0000 Subject: [PATCH 1980/2361] Add extra check to make sure both replicas are present in system.replicas --- .../0_stateless/03217_filtering_in_system_tables.reference | 2 ++ tests/queries/0_stateless/03217_filtering_in_system_tables.sql | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.reference b/tests/queries/0_stateless/03217_filtering_in_system_tables.reference index d7ccd989f53..c0761c3f689 100644 --- a/tests/queries/0_stateless/03217_filtering_in_system_tables.reference +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.reference @@ -1,4 +1,6 @@ information_schema tables +both default test_03217_system_tables_replica_1 r1 +both default test_03217_system_tables_replica_2 r2 default test_03217_system_tables_replica_1 r1 1 1 diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql index 0db846bc500..2ce63559b99 100644 --- a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql @@ -8,6 +8,8 @@ CREATE TABLE test_03217_system_tables_replica_2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_system_tables_replica', 'r2') ORDER BY x; +-- Make sure we can read both replicas +SELECT 'both', database, table, replica_name FROM system.replicas WHERE database = currentDatabase(); -- If filtering is not done correctly on database-table column, then this query report to read 2 rows, which are the above tables SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = 'test_03217_system_tables_replica_1' AND replica_name = 'r1'; SYSTEM FLUSH LOGS; From da907d535623422b7beb1d8cf3e7389698567e28 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Aug 2024 10:37:48 +0100 Subject: [PATCH 1981/2361] Better parsing --- docker/test/stateless/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index cd2a61cce4e..c59d36114ae 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -178,7 +178,7 @@ attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 202 clickhouse-client --query "CREATE TABLE minio_audit_logs ( log String, - event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(substring(JSONExtractRaw(log, 'time'), 2, 29), 9, 'UTC') + event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(trim(BOTH '"' FROM JSONExtractRaw(log, 'time')), 9, 'UTC') ) ENGINE = MergeTree ORDER BY tuple()" @@ -186,7 +186,7 @@ ORDER BY tuple()" clickhouse-client --query "CREATE TABLE minio_server_logs ( log String, - event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(substring(JSONExtractRaw(log, 'time'), 2, 29), 9, 'UTC') + event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(trim(BOTH '"' FROM JSONExtractRaw(log, 'time')), 9, 'UTC') ) ENGINE = MergeTree ORDER BY tuple()" From a5e06c7c311f142aa8d790cf398af19d86cbce4f Mon Sep 17 00:00:00 2001 From: Salvatore Mesoraca Date: Fri, 9 Aug 2024 11:06:11 +0200 Subject: [PATCH 1982/2361] Fix UB in hopEnd and hopStart This was causing segfaults because of a NULL pointer dereference --- src/Functions/FunctionsTimeWindow.cpp | 7 ++++++- .../01049_window_view_window_functions.reference | 2 ++ .../0_stateless/01049_window_view_window_functions.sql | 3 +++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/Functions/FunctionsTimeWindow.cpp b/src/Functions/FunctionsTimeWindow.cpp index 77d740803be..faea9c6ba58 100644 --- a/src/Functions/FunctionsTimeWindow.cpp +++ b/src/Functions/FunctionsTimeWindow.cpp @@ -622,7 +622,12 @@ struct TimeWindowImpl { auto type = WhichDataType(arguments[0].type); if (type.isTuple()) - return std::static_pointer_cast(arguments[0].type)->getElement(0); + { + const auto & tuple_elems = std::static_pointer_cast(arguments[0].type)->getElements(); + if (tuple_elems.empty()) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Tuple passed to {} should not be empty", function_name); + return tuple_elems[0]; + } else if (type.isUInt32()) return std::make_shared(); else diff --git a/tests/queries/0_stateless/01049_window_view_window_functions.reference b/tests/queries/0_stateless/01049_window_view_window_functions.reference index 2d49664b280..47d1ccc57dd 100644 --- a/tests/queries/0_stateless/01049_window_view_window_functions.reference +++ b/tests/queries/0_stateless/01049_window_view_window_functions.reference @@ -67,3 +67,5 @@ SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 2020-01-10 00:00:00 SELECT hopEnd(hop(toDateTime('2019-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa')); 2019-01-10 00:00:00 +SELECT hopStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT hopEnd(tuple()); -- { serverError ILLEGAL_COLUMN } diff --git a/tests/queries/0_stateless/01049_window_view_window_functions.sql b/tests/queries/0_stateless/01049_window_view_window_functions.sql index 617019bd2c6..3638dd1a3b2 100644 --- a/tests/queries/0_stateless/01049_window_view_window_functions.sql +++ b/tests/queries/0_stateless/01049_window_view_window_functions.sql @@ -36,3 +36,6 @@ SELECT hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, I SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'), 'US/Samoa'); SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'), 'US/Samoa'); SELECT hopEnd(hop(toDateTime('2019-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa')); + +SELECT hopStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT hopEnd(tuple()); -- { serverError ILLEGAL_COLUMN } From 2657e2b3ef2f98802fd2b8ebcd359fe756b709c6 Mon Sep 17 00:00:00 2001 From: Graham Campbell Date: Fri, 9 Aug 2024 11:08:41 +0100 Subject: [PATCH 1983/2361] Do not apply redundant sorting removal when there's an offset --- .../Optimizations/removeRedundantSorting.cpp | 8 ++-- .../02496_remove_redundant_sorting.reference | 37 +++++++++++++++++ .../02496_remove_redundant_sorting.sh | 22 ++++++++++ ...emove_redundant_sorting_analyzer.reference | 41 +++++++++++++++++++ 4 files changed, 105 insertions(+), 3 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp index 7cac7bee6ec..f0094f0f8d2 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -59,9 +60,10 @@ public: if (typeid_cast(current_step) || typeid_cast(current_step) /// (1) if there are LIMITs on top of ORDER BY, the ORDER BY is non-removable - || typeid_cast(current_step) /// (2) if ORDER BY is with FILL WITH, it is non-removable - || typeid_cast(current_step) /// (3) ORDER BY will change order of previous sorting - || typeid_cast(current_step)) /// (4) aggregation change order + || typeid_cast(current_step) /// (2) OFFSET on top of ORDER BY, the ORDER BY is non-removable + || typeid_cast(current_step) /// (3) if ORDER BY is with FILL WITH, it is non-removable + || typeid_cast(current_step) /// (4) ORDER BY will change order of previous sorting + || typeid_cast(current_step)) /// (5) aggregation change order { logStep("nodes_affect_order/push", current_node); nodes_affect_order.push_back(current_node); diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference index 77ef213b36d..a0a1fd60812 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference @@ -527,3 +527,40 @@ Expression (Projection) 2 4 1 3 0 2 +-- presence of an inner OFFSET retains the ORDER BY +-- query +WITH + t1 AS ( + SELECT SUM(a) AS a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + GROUP BY 2 + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2 +-- explain +Expression (Projection) + Expression (Before ORDER BY) + Aggregating + Expression (Before GROUP BY) + Offset + Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Expression (Projection) + Expression (Before ORDER BY) + Aggregating + Expression (Before GROUP BY) + ReadFromStorage (Values) +-- execute +0 diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh index 646e2501a99..d59b4387101 100755 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh @@ -329,3 +329,25 @@ FROM ORDER BY number DESC )" run_query "$query" + +echo "-- presence of an inner OFFSET retains the ORDER BY" +query="WITH + t1 AS ( + SELECT SUM(a) AS a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + GROUP BY 2 + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2" +run_query "$query" diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference index b6a2e3182df..58441de5f22 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference @@ -533,3 +533,44 @@ Expression (Project names) 2 4 1 3 0 2 +-- presence of an inner OFFSET retains the ORDER BY +-- query +WITH + t1 AS ( + SELECT SUM(a) AS a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + GROUP BY 2 + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2 +-- explain +Expression (Project names) + Expression (Projection) + Aggregating + Expression (Before GROUP BY) + Expression (Change column names to column identifiers) + Expression (Project names) + Offset + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + Expression (Projection) + Expression (Change column names to column identifiers) + Expression (Project names) + Expression (Projection) + Aggregating + Expression (Before GROUP BY) + Expression (Change column names to column identifiers) + ReadFromStorage (Values) +-- execute +0 From dccb6bdd88ef26244ddb1c9de8d1232140036294 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Fri, 9 Aug 2024 18:33:05 +0800 Subject: [PATCH 1984/2361] fix failed uts --- .../Formats/Impl/ORCBlockOutputFormat.cpp | 47 +++++++------------ .../Formats/Impl/ORCBlockOutputFormat.h | 5 -- 2 files changed, 17 insertions(+), 35 deletions(-) diff --git a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp index bd89ae0fa86..4a7a23158ff 100644 --- a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp @@ -280,20 +280,28 @@ void ORCBlockOutputFormat::writeColumn( DataTypePtr & type, const PaddedPODArray * null_bytemap) { - orc_column.numElements = column.size(); + size_t rows = column.size(); + orc_column.resize(rows); + orc_column.numElements = rows; + + /// Calculate orc_column.hasNulls if (null_bytemap) - { orc_column.hasNulls = !memoryIsZero(null_bytemap->data(), 0, null_bytemap->size()); - if (orc_column.hasNulls) - { - orc_column.notNull.resize(null_bytemap->size()); - for (size_t i = 0; i < null_bytemap->size(); ++i) - orc_column.notNull[i] = !(*null_bytemap)[i]; - } - } else orc_column.hasNulls = false; + /// Fill orc_column.notNull + if (orc_column.hasNulls) + { + for (size_t i = 0; i < rows; ++i) + orc_column.notNull[i] = !(*null_bytemap)[i]; + } + else + { + for (size_t i = 0; i < rows; ++i) + orc_column.notNull[i] = 1; + } + /// ORC doesn't have unsigned types, so cast everything to signed and sign-extend to Int64 to /// make the ORC library calculate min and max correctly. switch (type->getTypeId()) @@ -516,27 +524,6 @@ void ORCBlockOutputFormat::writeColumn( } } -size_t ORCBlockOutputFormat::getColumnSize(const IColumn & column, DataTypePtr & type) -{ - if (type->getTypeId() == TypeIndex::Array) - { - auto nested_type = assert_cast(*type).getNestedType(); - const IColumn & nested_column = assert_cast(column).getData(); - return std::max(column.size(), getColumnSize(nested_column, nested_type)); - } - - return column.size(); -} - -size_t ORCBlockOutputFormat::getMaxColumnSize(Chunk & chunk) -{ - size_t columns_num = chunk.getNumColumns(); - size_t max_column_size = 0; - for (size_t i = 0; i != columns_num; ++i) - max_column_size = std::max(max_column_size, getColumnSize(*chunk.getColumns()[i], data_types[i])); - return max_column_size; -} - void ORCBlockOutputFormat::consume(Chunk chunk) { if (!writer) diff --git a/src/Processors/Formats/Impl/ORCBlockOutputFormat.h b/src/Processors/Formats/Impl/ORCBlockOutputFormat.h index 28837193d1a..06ecac9b820 100644 --- a/src/Processors/Formats/Impl/ORCBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/ORCBlockOutputFormat.h @@ -69,11 +69,6 @@ private: void writeColumn(orc::ColumnVectorBatch & orc_column, const IColumn & column, DataTypePtr & type, const PaddedPODArray * null_bytemap); - /// These two functions are needed to know maximum nested size of arrays to - /// create an ORC Batch with the appropriate size - size_t getColumnSize(const IColumn & column, DataTypePtr & type); - size_t getMaxColumnSize(Chunk & chunk); - void prepareWriter(); const FormatSettings format_settings; From 4ced1f37e8e478ef806bd623a411916c18169dfe Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Aug 2024 11:56:53 +0100 Subject: [PATCH 1985/2361] Escape quote --- docker/test/stateless/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index c59d36114ae..acdcda753d7 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -178,7 +178,7 @@ attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 202 clickhouse-client --query "CREATE TABLE minio_audit_logs ( log String, - event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(trim(BOTH '"' FROM JSONExtractRaw(log, 'time')), 9, 'UTC') + event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(trim(BOTH '\"' FROM JSONExtractRaw(log, 'time')), 9, 'UTC') ) ENGINE = MergeTree ORDER BY tuple()" @@ -186,7 +186,7 @@ ORDER BY tuple()" clickhouse-client --query "CREATE TABLE minio_server_logs ( log String, - event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(trim(BOTH '"' FROM JSONExtractRaw(log, 'time')), 9, 'UTC') + event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(trim(BOTH '\"' FROM JSONExtractRaw(log, 'time')), 9, 'UTC') ) ENGINE = MergeTree ORDER BY tuple()" From ade1228b9578d5c0d7124a9d5c40ac3207e48074 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Aug 2024 13:01:00 +0200 Subject: [PATCH 1986/2361] Fix order --- ...rofile_events_from_query_log_and_client.reference | 2 +- ...02675_profile_events_from_query_log_and_client.sh | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference index 9dbac8d34f2..448eca3e5b1 100644 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference @@ -1,11 +1,11 @@ INSERT TO S3 -Successful write requests 3 S3Clients 1 S3CompleteMultipartUpload 1 S3CreateMultipartUpload 1 S3HeadObject 2 S3ReadRequestsCount 2 S3UploadPart 1 +Successful write requests 3 CHECK WITH query_log QueryFinish S3CreateMultipartUpload 1 S3UploadPart 1 S3CompleteMultipartUpload 1 S3PutObject 0 CREATE diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh index cae20be79dc..6d770b308b5 100755 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh @@ -18,11 +18,13 @@ WITH '(\\w+): (\\d+)' AS pattern, AND line NOT LIKE '%Microseconds%' AND line NOT LIKE '%S3DiskConnections%' AND line NOT LIKE '%S3DiskAddresses') AS pe_map -SELECT untuple(arrayJoin(pe_map) AS pe) -WHERE tupleElement(pe, 1) not like '%WriteRequests%' -UNION ALL -SELECT 'Successful write requests', - (pe_map['S3WriteRequestsCount'] - pe_map['S3WriteRequestsErrors'])::UInt64 +SELECT * FROM ( + SELECT untuple(arrayJoin(pe_map) AS pe) + WHERE tupleElement(pe, 1) not like '%WriteRequests%' + UNION ALL + SELECT 'Successful write requests', + (pe_map['S3WriteRequestsCount'] - pe_map['S3WriteRequestsErrors'])::UInt64 +) ORDER BY 1 " echo "CHECK WITH query_log" From 36c0c4562b8622b84012a12e29175f272bda2b0b Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Aug 2024 13:14:30 +0200 Subject: [PATCH 1987/2361] Fix race in WithRetries --- src/Backups/WithRetries.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/Backups/WithRetries.cpp b/src/Backups/WithRetries.cpp index 181e6331ac9..9f22085f5a9 100644 --- a/src/Backups/WithRetries.cpp +++ b/src/Backups/WithRetries.cpp @@ -68,13 +68,19 @@ const WithRetries::KeeperSettings & WithRetries::getKeeperSettings() const WithRetries::FaultyKeeper WithRetries::getFaultyZooKeeper() const { - /// We need to create new instance of ZooKeeperWithFaultInjection each time a copy a pointer to ZooKeeper client there + zkutil::ZooKeeperPtr current_zookeeper; + { + std::lock_guard lock(zookeeper_mutex); + current_zookeeper = zookeeper; + } + + /// We need to create new instance of ZooKeeperWithFaultInjection each time and copy a pointer to ZooKeeper client there /// The reason is that ZooKeeperWithFaultInjection may reset the underlying pointer and there could be a race condition /// when the same object is used from multiple threads. auto faulty_zookeeper = ZooKeeperWithFaultInjection::createInstance( settings.keeper_fault_injection_probability, settings.keeper_fault_injection_seed, - zookeeper, + current_zookeeper, log->name(), log); From 1e3ccbc3ec81c5b9d79a034159181f1f6bdb195c Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 9 Aug 2024 11:22:44 +0000 Subject: [PATCH 1988/2361] add perf test for subcolumns --- .../optimize_functions_to_subcolumns.xml | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 tests/performance/optimize_functions_to_subcolumns.xml diff --git a/tests/performance/optimize_functions_to_subcolumns.xml b/tests/performance/optimize_functions_to_subcolumns.xml new file mode 100644 index 00000000000..a246aae7950 --- /dev/null +++ b/tests/performance/optimize_functions_to_subcolumns.xml @@ -0,0 +1,27 @@ + + + 1 + 4 + + + + CREATE TABLE t_subcolumns (a Array(UInt64), s Nullable(String), m Map(String, UInt64)) ENGINE = MergeTree ORDER BY tuple() + + + + INSERT INTO t_subcolumns SELECT range(number % 20), toString(number), mapFromArrays(range(number % 20), range(number % 20)) FROM numbers_mt(50000000) + + + + OPTIMIZE TABLE t_subcolumns FINAL + + + SELECT count() FROM t_subcolumns WHERE NOT ignore(length(a)) + SELECT count() FROM t_subcolumns WHERE notEmpty(a) + SELECT count() FROM t_subcolumns WHERE NOT ignore(length(m)) + SELECT count() FROM t_subcolumns WHERE notEmpty(m) + SELECT count() FROM t_subcolumns WHERE isNotNull(s) + SELECT count(s) FROM t_subcolumns + + DROP TABLE t_subcolumns + From e8f2f65e62c65878463879396b7ebdceed48c5e3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 9 Aug 2024 11:51:23 +0000 Subject: [PATCH 1989/2361] Avoid converting type to string and back in _CAST --- src/Functions/CastOverloadResolver.cpp | 23 +++++++++++++++---- src/Functions/CastOverloadResolver.h | 6 ++++- src/Functions/toBool.cpp | 3 +-- src/Interpreters/ActionsDAG.cpp | 12 +++++----- src/Interpreters/castColumn.cpp | 6 ++--- .../optimizeUseAggregateProjection.cpp | 21 ++++++----------- src/Processors/Transforms/WindowTransform.cpp | 17 ++------------ src/Storages/MergeTree/KeyCondition.cpp | 7 ++---- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 20 +++++----------- ...rojection_with_normalized_states.reference | 1 + ...gate_projection_with_normalized_states.sql | 2 ++ 11 files changed, 52 insertions(+), 66 deletions(-) diff --git a/src/Functions/CastOverloadResolver.cpp b/src/Functions/CastOverloadResolver.cpp index 49f63073aaf..6cb4d492fd8 100644 --- a/src/Functions/CastOverloadResolver.cpp +++ b/src/Functions/CastOverloadResolver.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -35,7 +36,7 @@ FunctionBasePtr createFunctionBaseCast( class CastOverloadResolverImpl : public IFunctionOverloadResolver { public: - const char * getNameImpl() const + static const char * getNameImpl(CastType cast_type, bool internal) { if (cast_type == CastType::accurate) return "accurateCast"; @@ -49,7 +50,7 @@ public: String getName() const override { - return getNameImpl(); + return getNameImpl(cast_type, internal); } size_t getNumberOfArguments() const override { return 2; } @@ -79,10 +80,22 @@ public: } } + static FunctionBasePtr createInternalCast(ColumnWithTypeAndName from, DataTypePtr to, CastType cast_type, std::optional diagnostic) + { + if (cast_type == CastType::accurateOrNull && !isVariant(to)) + to = makeNullable(to); + + ColumnsWithTypeAndName arguments; + arguments.emplace_back(std::move(from)); + arguments.emplace_back().type = std::make_unique(); + + return createFunctionBaseCast(nullptr, getNameImpl(cast_type, true), arguments, to, diagnostic, cast_type); + } + protected: FunctionBasePtr buildImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type) const override { - return createFunctionBaseCast(context, getNameImpl(), arguments, return_type, diagnostic, cast_type); + return createFunctionBaseCast(context, getNameImpl(cast_type, internal), arguments, return_type, diagnostic, cast_type); } DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override @@ -130,9 +143,9 @@ private: }; -FunctionOverloadResolverPtr createInternalCastOverloadResolver(CastType type, std::optional diagnostic) +FunctionBasePtr createInternalCast(ColumnWithTypeAndName from, DataTypePtr to, CastType cast_type, std::optional diagnostic) { - return CastOverloadResolverImpl::create(ContextPtr{}, type, true, diagnostic); + return CastOverloadResolverImpl::createInternalCast(std::move(from), std::move(to), cast_type, std::move(diagnostic)); } REGISTER_FUNCTION(CastOverloadResolvers) diff --git a/src/Functions/CastOverloadResolver.h b/src/Functions/CastOverloadResolver.h index 7d98f774812..66f9d6cfcaf 100644 --- a/src/Functions/CastOverloadResolver.h +++ b/src/Functions/CastOverloadResolver.h @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB @@ -11,6 +12,9 @@ namespace DB class IFunctionOverloadResolver; using FunctionOverloadResolverPtr = std::shared_ptr; +class IFunctionBase; +using FunctionBasePtr = std::shared_ptr; + enum class CastType : uint8_t { nonAccurate, @@ -24,6 +28,6 @@ struct CastDiagnostic std::string column_to; }; -FunctionOverloadResolverPtr createInternalCastOverloadResolver(CastType type, std::optional diagnostic); +FunctionBasePtr createInternalCast(ColumnWithTypeAndName from, DataTypePtr to, CastType cast_type, std::optional diagnostic); } diff --git a/src/Functions/toBool.cpp b/src/Functions/toBool.cpp index 6f2c436c1ea..ac595d313e3 100644 --- a/src/Functions/toBool.cpp +++ b/src/Functions/toBool.cpp @@ -54,8 +54,7 @@ namespace } }; - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::nonAccurate, {}); - auto func_cast = func_builder_cast->build(cast_args); + auto func_cast = createInternalCast(arguments[0], result_type, CastType::nonAccurate, {}); return func_cast->execute(cast_args, result_type, arguments[0].column->size()); } }; diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index df1c0aa1f2a..2a594839c6a 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -301,11 +301,11 @@ const ActionsDAG::Node & ActionsDAG::addCast(const Node & node_to_cast, const Da column.column = DataTypeString().createColumnConst(0, cast_type_constant_value); column.type = std::make_shared(); - const auto * cast_type_constant_node = &addColumn(std::move(column)); + const auto * cast_type_constant_node = &addColumn(column); ActionsDAG::NodeRawConstPtrs children = {&node_to_cast, cast_type_constant_node}; - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::nonAccurate, {}); + auto func_base_cast = createInternalCast(ColumnWithTypeAndName{node_to_cast.result_type, node_to_cast.result_name}, cast_type, CastType::nonAccurate, {}); - return addFunction(func_builder_cast, std::move(children), result_name); + return addFunction(func_base_cast, std::move(children), result_name); } const ActionsDAG::Node & ActionsDAG::addFunctionImpl( @@ -1547,11 +1547,11 @@ ActionsDAG ActionsDAG::makeConvertingActions( const auto * left_arg = dst_node; CastDiagnostic diagnostic = {dst_node->result_name, res_elem.name}; - FunctionOverloadResolverPtr func_builder_cast - = createInternalCastOverloadResolver(CastType::nonAccurate, std::move(diagnostic)); + ColumnWithTypeAndName left_column{nullptr, dst_node->result_type, {}}; + auto func_base_cast = createInternalCast(std::move(left_column), res_elem.type, CastType::nonAccurate, std::move(diagnostic)); NodeRawConstPtrs children = { left_arg, right_arg }; - dst_node = &actions_dag.addFunction(func_builder_cast, std::move(children), {}); + dst_node = &actions_dag.addFunction(func_base_cast, std::move(children), {}); } if (dst_node->column && isColumnConst(*dst_node->column) && !(res_elem.column && isColumnConst(*res_elem.column))) diff --git a/src/Interpreters/castColumn.cpp b/src/Interpreters/castColumn.cpp index 906dfb84b14..a779c9bc34d 100644 --- a/src/Interpreters/castColumn.cpp +++ b/src/Interpreters/castColumn.cpp @@ -26,11 +26,9 @@ static ColumnPtr castColumn(CastType cast_type, const ColumnWithTypeAndName & ar "" } }; - auto get_cast_func = [cast_type, &arguments] + auto get_cast_func = [from = arg, to = type, cast_type] { - - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(cast_type, {}); - return func_builder_cast->build(arguments); + return createInternalCast(from, to, cast_type, {}); }; FunctionBasePtr func_cast = cache ? cache->getOrSet(cast_type, from_name, to_name, std::move(get_cast_func)) : get_cast_func(); diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 52d1931c51e..b31ee7ea53c 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -255,20 +255,13 @@ static void appendAggregateFunctions( const auto * node = input; - if (node->result_name != aggregate.column_name) - { - if (DataTypeAggregateFunction::strictEquals(type, node->result_type)) - { - node = &proj_dag.addAlias(*node, aggregate.column_name); - } - else - { - /// Cast to aggregate types specified in query if it's not - /// strictly the same as the one specified in projection. This - /// is required to generate correct results during finalization. - node = &proj_dag.addCast(*node, type, aggregate.column_name); - } - } + if (!DataTypeAggregateFunction::strictEquals(type, node->result_type)) + /// Cast to aggregate types specified in query if it's not + /// strictly the same as the one specified in projection. This + /// is required to generate correct results during finalization. + node = &proj_dag.addCast(*node, type, aggregate.column_name); + else if (node->result_name != aggregate.column_name) + node = &proj_dag.addAlias(*node, aggregate.column_name); proj_dag_outputs.push_back(node); } diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index c26cd7cc8c3..c27c230c741 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -2337,22 +2337,9 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction argument_types[2]->getName()); } - const auto from_name = argument_types[2]->getName(); - const auto to_name = argument_types[0]->getName(); - ColumnsWithTypeAndName arguments + auto get_cast_func = [from = argument_types[2], to = argument_types[0]] { - { argument_types[2], "" }, - { - DataTypeString().createColumnConst(0, to_name), - std::make_shared(), - "" - } - }; - - auto get_cast_func = [&arguments] - { - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::accurate, {}); - return func_builder_cast->build(arguments); + return createInternalCast({from, {}}, to, CastType::accurate, {}); }; func_cast = get_cast_func(); diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index dfb43c4e75d..aa7a498d5a3 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1956,11 +1956,8 @@ bool KeyCondition::extractAtomFromTree(const RPNBuilderTreeNode & node, RPNEleme auto common_type_maybe_nullable = (key_expr_type_is_nullable && !common_type->isNullable()) ? DataTypePtr(std::make_shared(common_type)) : common_type; - ColumnsWithTypeAndName arguments{ - {nullptr, key_expr_type, ""}, - {DataTypeString().createColumnConst(1, common_type_maybe_nullable->getName()), common_type_maybe_nullable, ""}}; - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::nonAccurate, {}); - auto func_cast = func_builder_cast->build(arguments); + + auto func_cast = createInternalCast({key_expr_type, {}}, common_type_maybe_nullable, CastType::nonAccurate, {}); /// If we know the given range only contains one value, then we treat all functions as positive monotonic. if (!single_point && !func_cast->hasInformationAboutMonotonicity()) diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 36ff6c0a4bd..9c82817e8cb 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -152,23 +152,15 @@ const ActionsDAG::Node & addFunction( const ActionsDAG::Node & addCast( const ActionsDAGPtr & dag, const ActionsDAG::Node & node_to_cast, - const String & type_name, + const DataTypePtr & to_type, OriginalToNewNodeMap & node_remap) { - if (node_to_cast.result_type->getName() == type_name) + if (!node_to_cast.result_type->equals(*to_type)) return node_to_cast; - Field cast_type_constant_value(type_name); - - ColumnWithTypeAndName column; - column.column = DataTypeString().createColumnConst(0, cast_type_constant_value); - column.type = std::make_shared(); - - const auto * cast_type_constant_node = &dag->addColumn(std::move(column)); - ActionsDAG::NodeRawConstPtrs children = {&node_to_cast, cast_type_constant_node}; - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::nonAccurate, {}); - - return addFunction(dag, func_builder_cast, std::move(children), node_remap); + const auto & new_node = dag->addCast(node_to_cast, to_type, {}); + node_remap[new_node.result_name] = {dag.get(), &new_node}; + return new_node; } /// Normalizes the filter node by adding AND with a constant true. @@ -332,7 +324,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// Build AND(last_step_result_node, true) const auto & and_node = addAndTrue(last_step_dag, *last_step_result_node_info.node, node_remap); /// Build CAST(and_node, type of PREWHERE column) - const auto & cast_node = addCast(last_step_dag, and_node, output->result_type->getName(), node_remap); + const auto & cast_node = addCast(last_step_dag, and_node, output->result_type, node_remap); /// Add alias for the result with the name of the PREWHERE column const auto & prewhere_result_node = last_step_dag->addAlias(cast_node, output->result_name); last_step_dag->addOrReplaceInOutputs(prewhere_result_node); diff --git a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.reference b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.reference index 25aa9dc5dec..37993873983 100644 --- a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.reference +++ b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.reference @@ -1,2 +1,3 @@ 3 950 990 500 2000 +[950] [999] diff --git a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql index 5375823aa8e..956bf3711a2 100644 --- a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql +++ b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql @@ -29,4 +29,6 @@ FROM cluster('test_cluster_two_shards', currentDatabase(), r) WHERE a = 'x' settings prefer_localhost_replica=0; +SELECT quantilesTimingMerge(0.95)(q), quantilesTimingMerge(toInt64(1))(q) FROM remote('127.0.0.{1,2}', currentDatabase(), r); + DROP TABLE r; From 3d850f8ceb0ca5cfae26e8faa7c4d900cc4e8fda Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 9 Aug 2024 13:58:02 +0200 Subject: [PATCH 1990/2361] fix --- src/Processors/Sources/ShellCommandSource.cpp | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/Processors/Sources/ShellCommandSource.cpp b/src/Processors/Sources/ShellCommandSource.cpp index 23359367a9b..f55a3713215 100644 --- a/src/Processors/Sources/ShellCommandSource.cpp +++ b/src/Processors/Sources/ShellCommandSource.cpp @@ -70,17 +70,16 @@ static void makeFdBlocking(int fd) static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_milliseconds) { + auto logger = getLogger("TimeoutReadBufferFromFileDescriptor"); + auto describe_fd = [](const auto & pollfd) { return fmt::format("(fd={}, flags={})", pollfd.fd, fcntl(pollfd.fd, F_GETFL)); }; + int res; while (true) { Stopwatch watch; - auto describe_fd = [](const auto & pollfd) { return fmt::format("(fd={}, flags={})", pollfd.fd, fcntl(pollfd.fd, F_GETFL)); }; - LOG_TEST( - getLogger("TimeoutReadBufferFromFileDescriptor"), - "Polling descriptors: {}", - fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", ")); + LOG_TEST(logger, "Polling descriptors: {}", fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", ")); res = poll(pfds, static_cast(num), static_cast(timeout_milliseconds)); @@ -92,11 +91,7 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond const auto elapsed = watch.elapsedMilliseconds(); if (timeout_milliseconds <= elapsed) { - LOG_TEST( - getLogger("TimeoutReadBufferFromFileDescriptor"), - "Timeout exceeded: elapsed={}, timeout={}", - elapsed, - timeout_milliseconds); + LOG_TEST(logger, "Timeout exceeded: elapsed={}, timeout={}", elapsed, timeout_milliseconds); break; } timeout_milliseconds -= elapsed; @@ -107,9 +102,8 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond } } - auto describe_fd = [](const auto & pollfd) { return fmt::format("(fd={}, flags={})", pollfd.fd, fcntl(pollfd.fd, F_GETFL)); }; LOG_TEST( - getLogger("TimeoutReadBufferFromFileDescriptor"), + logger, "Poll for descriptors: {} returned {}", fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", "), res); From 1875e8d9cd581871c247e3a4cc58f1f57ffa0659 Mon Sep 17 00:00:00 2001 From: Salvatore Mesoraca Date: Fri, 9 Aug 2024 14:06:52 +0200 Subject: [PATCH 1991/2361] Fix UB in tumbleEnd and tumbleStart This was causing segfaults because of a NULL pointer dereference --- src/Functions/FunctionsTimeWindow.cpp | 7 ++++++- .../01049_window_view_window_functions.reference | 2 ++ .../0_stateless/01049_window_view_window_functions.sql | 2 ++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/Functions/FunctionsTimeWindow.cpp b/src/Functions/FunctionsTimeWindow.cpp index faea9c6ba58..88b85c48326 100644 --- a/src/Functions/FunctionsTimeWindow.cpp +++ b/src/Functions/FunctionsTimeWindow.cpp @@ -267,7 +267,12 @@ struct TimeWindowImpl { auto type = WhichDataType(arguments[0].type); if (type.isTuple()) - return std::static_pointer_cast(arguments[0].type)->getElement(0); + { + const auto & tuple_elems = std::static_pointer_cast(arguments[0].type)->getElements(); + if (tuple_elems.empty()) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Tuple passed to {} should not be empty", function_name); + return tuple_elems[0]; + } else if (type.isUInt32()) return std::make_shared(); else diff --git a/tests/queries/0_stateless/01049_window_view_window_functions.reference b/tests/queries/0_stateless/01049_window_view_window_functions.reference index 47d1ccc57dd..073301104d2 100644 --- a/tests/queries/0_stateless/01049_window_view_window_functions.reference +++ b/tests/queries/0_stateless/01049_window_view_window_functions.reference @@ -69,3 +69,5 @@ SELECT hopEnd(hop(toDateTime('2019-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DA 2019-01-10 00:00:00 SELECT hopStart(tuple()); -- { serverError ILLEGAL_COLUMN } SELECT hopEnd(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleEnd(tuple()); -- { serverError ILLEGAL_COLUMN } diff --git a/tests/queries/0_stateless/01049_window_view_window_functions.sql b/tests/queries/0_stateless/01049_window_view_window_functions.sql index 3638dd1a3b2..fb2b4b4949a 100644 --- a/tests/queries/0_stateless/01049_window_view_window_functions.sql +++ b/tests/queries/0_stateless/01049_window_view_window_functions.sql @@ -39,3 +39,5 @@ SELECT hopEnd(hop(toDateTime('2019-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DA SELECT hopStart(tuple()); -- { serverError ILLEGAL_COLUMN } SELECT hopEnd(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleEnd(tuple()); -- { serverError ILLEGAL_COLUMN } From 20563bc6cbc6e70eb6926c5f21fded356270f40f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 9 Aug 2024 12:19:44 +0000 Subject: [PATCH 1992/2361] Make test work with ReplicatedDatabase in test --- .../0_stateless/03217_filtering_in_system_tables.sql | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql index 2ce63559b99..eb506dfe39a 100644 --- a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql @@ -9,9 +9,11 @@ CREATE TABLE test_03217_system_tables_replica_2(x UInt32) ORDER BY x; -- Make sure we can read both replicas -SELECT 'both', database, table, replica_name FROM system.replicas WHERE database = currentDatabase(); +-- The replica name might be altered because of `_functional_tests_helper_database_replicated_replace_args_macros`, +-- thus we need to use `left` +SELECT 'both', database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase(); -- If filtering is not done correctly on database-table column, then this query report to read 2 rows, which are the above tables -SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = 'test_03217_system_tables_replica_1' AND replica_name = 'r1'; +SELECT database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase() AND table = 'test_03217_system_tables_replica_1' AND replica_name LIKE 'r1%'; SYSTEM FLUSH LOGS; -- argMax is necessary to make the test repeatable @@ -24,5 +26,5 @@ SELECT argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 -- StorageSystemReplicas SELECT argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 AND current_database = currentDatabase() - AND query LIKE '%SELECT database, table, replica_name FROM system.replicas WHERE database = currentDatabase() AND table = \'test_03217_system_tables_replica_1\' AND replica_name = \'r1\';' + AND query LIKE '%SELECT database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase() AND table = \'test_03217_system_tables_replica_1\' AND replica_name LIKE \'r1\%\';' AND type = 'QueryFinish'; From 96b54df1638043e27887099df5f7cf310d1e7fa4 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Fri, 9 Aug 2024 15:04:03 +0200 Subject: [PATCH 1993/2361] fix bugprone-macro-parentheses --- src/Interpreters/SystemLog.cpp | 4 +++- src/Interpreters/SystemLog.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index d4403b72583..832c39bfaf8 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -284,11 +284,13 @@ ASTPtr getCreateTableQueryClean(const StorageID & table_id, ContextPtr context) SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConfiguration & config) { +/// NOLINTBEGIN(bugprone-macro-parentheses) #define CREATE_PUBLIC_MEMBERS(log_type, member, descr) \ member = createSystemLog(global_context, "system", #member, config, #member, descr); \ LIST_OF_ALL_SYSTEM_LOGS(CREATE_PUBLIC_MEMBERS) #undef CREATE_PUBLIC_MEMBERS +/// NOLINTEND(bugprone-macro-parentheses) if (session_log) global_context->addWarningMessage("Table system.session_log is enabled. It's unreliable and may contain garbage. Do not use it for any kind of security monitoring."); @@ -333,7 +335,7 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf std::vector SystemLogs::getAllLogs() const { #define GET_RAW_POINTERS(log_type, member, descr) \ - member.get(), \ + (member).get(), \ std::vector result = { LIST_OF_ALL_SYSTEM_LOGS(GET_RAW_POINTERS) diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 24ef6a18eb8..9e1af3578bd 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -60,11 +60,13 @@ namespace DB }; */ +/// NOLINTBEGIN(bugprone-macro-parentheses) #define FORWARD_DECLARATION(log_type, member, descr) \ class log_type; \ LIST_OF_ALL_SYSTEM_LOGS(FORWARD_DECLARATION) #undef FORWARD_DECLARATION +/// NOLINTEND(bugprone-macro-parentheses) /// System logs should be destroyed in destructor of the last Context and before tables, From 21be195dd8f841c3cfaf453bb7faba02627b9c0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 9 Aug 2024 13:14:09 +0000 Subject: [PATCH 1994/2361] Revert unnecessary change --- .../clickhouse_path/format_schemas/test.capnp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/test.capnp b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/test.capnp index 247e7b9ceca..44f1961205b 100644 --- a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/test.capnp +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/test.capnp @@ -7,4 +7,4 @@ struct TestRecordStruct val1 @2 : Text; val2 @3 : Float32; val3 @4 : UInt8; -} +} \ No newline at end of file From 8cf5f6d6168342a69b188b17588566a4ac85fa69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Fri, 9 Aug 2024 13:20:05 +0000 Subject: [PATCH 1995/2361] Add empty cell to reports when time is missing --- tests/ci/report.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/ci/report.py b/tests/ci/report.py index 0b6c818aed0..15b1512896a 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -770,10 +770,12 @@ def create_test_html_report( row.append(f'{test_result.status}') colspan += 1 + row.append("") if test_result.time is not None: has_test_time = True - row.append(f"{test_result.time}") - colspan += 1 + row.append(str(test_result.time)) + row.append("") + colspan += 1 if test_result.log_urls is not None: has_log_urls = True From b757522fc4ac545451acc398ab230323fb7c0fd3 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 9 Aug 2024 14:20:57 +0100 Subject: [PATCH 1996/2361] fix build --- programs/keeper/Keeper.cpp | 2 +- src/Server/HTTPHandlerFactory.cpp | 41 ++----------------------- src/Server/PrometheusRequestHandler.cpp | 4 +-- src/Server/PrometheusRequestHandler.h | 7 +++-- 4 files changed, 10 insertions(+), 44 deletions(-) diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index ae51a62ff9c..a447a9e50f6 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -515,7 +515,7 @@ try "Prometheus: http://" + address.toString(), std::make_unique( std::move(my_http_context), - createKeeperPrometheusHandlerFactory(config_getter(), async_metrics, "PrometheusHandler-factory"), + createKeeperPrometheusHandlerFactory(*this, config_getter(), async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params)); diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 0ee45783d52..fc31ad2874e 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -122,7 +122,8 @@ static inline auto createHandlersFactoryFromConfig( } else if (handler_type == "prometheus") { - main_handler_factory->addHandler(createPrometheusHandlerFactoryForHTTPRule(config, prefix + "." + key, async_metrics)); + main_handler_factory->addHandler( + createPrometheusHandlerFactoryForHTTPRule(server, config, prefix + "." + key, async_metrics)); } else if (handler_type == "replicas_status") { @@ -199,19 +200,7 @@ HTTPRequestHandlerFactoryPtr createHandlerFactory(IServer & server, const Poco:: else if (name == "InterserverIOHTTPHandler-factory" || name == "InterserverIOHTTPSHandler-factory") return createInterserverHTTPHandlerFactory(server, name); else if (name == "PrometheusHandler-factory") -<<<<<<< HEAD - { - auto metrics_writer = std::make_shared(config, "prometheus", async_metrics); - return createPrometheusMainHandlerFactory(config, metrics_writer, name); - } -||||||| 02b8d563e3a - { - auto metrics_writer = std::make_shared(config, "prometheus", async_metrics); - return createPrometheusMainHandlerFactory(server, config, metrics_writer, name); - } -======= return createPrometheusHandlerFactory(server, config, async_metrics, name); ->>>>>>> master throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown HTTP handler factory name."); } @@ -298,34 +287,8 @@ void addDefaultHandlersFactory( ); factory.addHandler(query_handler); -<<<<<<< HEAD - /// We check that prometheus handler will be served on current (default) port. - /// Otherwise it will be created separately, see createHandlerFactory(...). - if (config.has("prometheus") && config.getInt("prometheus.port", 0) == 0) - { - auto writer = std::make_shared(config, "prometheus", async_metrics); - auto creator - = [writer]() -> std::unique_ptr { return std::make_unique(writer); }; - auto prometheus_handler = std::make_shared>(std::move(creator)); - prometheus_handler->attachStrictPath(config.getString("prometheus.endpoint", "/metrics")); - prometheus_handler->allowGetAndHeadRequest(); -||||||| 02b8d563e3a - /// We check that prometheus handler will be served on current (default) port. - /// Otherwise it will be created separately, see createHandlerFactory(...). - if (config.has("prometheus") && config.getInt("prometheus.port", 0) == 0) - { - auto writer = std::make_shared(config, "prometheus", async_metrics); - auto creator = [&server, writer] () -> std::unique_ptr - { - return std::make_unique(server, writer); - }; - auto prometheus_handler = std::make_shared>(std::move(creator)); - prometheus_handler->attachStrictPath(config.getString("prometheus.endpoint", "/metrics")); - prometheus_handler->allowGetAndHeadRequest(); -======= /// createPrometheusHandlerFactoryForHTTPRuleDefaults() can return nullptr if prometheus protocols must not be served on http port. if (auto prometheus_handler = createPrometheusHandlerFactoryForHTTPRuleDefaults(server, config, async_metrics)) ->>>>>>> master factory.addHandler(prometheus_handler); } diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 52cda92d9b4..ae1fb6d629e 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -95,7 +95,7 @@ public: class PrometheusRequestHandler::ImplWithContext : public Impl { public: - explicit ImplWithContext(PrometheusRequestHandler & parent) : Impl(parent), default_settings(parent.server.context()->getSettingsRef()) { } + explicit ImplWithContext(PrometheusRequestHandler & parent) : Impl(parent), default_settings(server().context()->getSettingsRef()) { } virtual void handlingRequestWithContext(HTTPServerRequest & request, HTTPServerResponse & response) = 0; @@ -353,7 +353,7 @@ void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPSe if (request.getVersion() == HTTPServerRequest::HTTP_1_1) response.setChunkedTransferEncoding(true); - setResponseDefaultHeaders(response); + setResponseDefaultHeaders(response); impl->beforeHandlingRequest(request); impl->handleRequest(request, response); diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index 7aeed11d6b8..281ecf5260e 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -15,8 +15,11 @@ class WriteBufferFromHTTPServerResponse; class PrometheusRequestHandler : public HTTPRequestHandler { public: - PrometheusRequestHandler(const PrometheusRequestHandlerConfig & config_, - const AsynchronousMetrics & async_metrics_, std::shared_ptr metrics_writer_); + PrometheusRequestHandler( + IServer & server_, + const PrometheusRequestHandlerConfig & config_, + const AsynchronousMetrics & async_metrics_, + std::shared_ptr metrics_writer_); ~PrometheusRequestHandler() override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event_) override; From 65ebcd6f21b26144cb47e6b71c939517b1fb38a2 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 9 Aug 2024 13:55:47 +0000 Subject: [PATCH 1997/2361] Fixing test. --- .../0_stateless/01656_test_query_log_factories_info.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01656_test_query_log_factories_info.reference b/tests/queries/0_stateless/01656_test_query_log_factories_info.reference index 47b3133ceca..44531c19ab7 100644 --- a/tests/queries/0_stateless/01656_test_query_log_factories_info.reference +++ b/tests/queries/0_stateless/01656_test_query_log_factories_info.reference @@ -17,7 +17,7 @@ used_functions ['repeat'] arraySort(used_data_type_families) -['Array','Int32','Nullable','String'] +['Int32','Nullable','String'] used_database_engines ['Atomic'] From a74cc601cd11ab32df61c46f9950c051fd8bb4da Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 16:04:27 +0200 Subject: [PATCH 1998/2361] A catch-all URL style for S3 --- src/IO/S3/URI.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index 64962f63edb..9c80b377661 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -136,7 +136,16 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) validateBucket(bucket, uri); } else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket or key name are invalid in S3 URI."); + { + /// Custom endpoint, e.g. a public domain of Cloudflare R2, + /// which could be served by a custom server-side code. + storage_name = "S3"; + bucket = "default"; + is_virtual_hosted_style = false; + endpoint = uri.getScheme() + "://" + uri.getAuthority(); + if (!uri.getPath().empty()) + key = uri.getPath().substr(1); + } } void URI::addRegionToURI(const std::string ®ion) From 43a38fb5f0563f50d38cf5d988db3b181b64f606 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 9 Aug 2024 15:11:08 +0100 Subject: [PATCH 1999/2361] rm redundant file --- programs/server/config.d/listen.xml | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 programs/server/config.d/listen.xml diff --git a/programs/server/config.d/listen.xml b/programs/server/config.d/listen.xml deleted file mode 100644 index f94e5c88568..00000000000 --- a/programs/server/config.d/listen.xml +++ /dev/null @@ -1,3 +0,0 @@ - - :: - From 9a6d98cd203ee9a6cdde4450aaf72d109a9719c7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 16:15:28 +0200 Subject: [PATCH 2000/2361] Update test --- .../integration/test_odbc_interaction/test.py | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py index 0d0d7a0afb1..9d4ca5ad49f 100644 --- a/tests/integration/test_odbc_interaction/test.py +++ b/tests/integration/test_odbc_interaction/test.py @@ -51,9 +51,9 @@ create_table_sql_nullable_template = """ """ -def skip_test_msan(instance): - if instance.is_built_with_memory_sanitizer(): - pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") +def skip_test_sanitizers(instance): + if instance.is_built_with_sanitizer(): + pytest.skip("Sanitizers cannot work with third-party shared libraries") def get_mysql_conn(): @@ -208,7 +208,7 @@ def started_cluster(): def test_mysql_odbc_select_nullable(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) mysql_setup = node1.odbc_drivers["MySQL"] table_name = "test_insert_nullable_select" @@ -248,7 +248,7 @@ def test_mysql_odbc_select_nullable(started_cluster): def test_mysql_simple_select_works(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) mysql_setup = node1.odbc_drivers["MySQL"] @@ -331,7 +331,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nulla def test_mysql_insert(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) mysql_setup = node1.odbc_drivers["MySQL"] table_name = "test_insert" @@ -374,7 +374,7 @@ def test_mysql_insert(started_cluster): def test_sqlite_simple_select_function_works(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -438,7 +438,7 @@ def test_sqlite_simple_select_function_works(started_cluster): def test_sqlite_table_function(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -470,7 +470,7 @@ def test_sqlite_table_function(started_cluster): def test_sqlite_simple_select_storage_works(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -503,7 +503,7 @@ def test_sqlite_simple_select_storage_works(started_cluster): def test_sqlite_odbc_hashed_dictionary(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] node1.exec_in_container( @@ -586,7 +586,7 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster): def test_sqlite_odbc_cached_dictionary(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] node1.exec_in_container( @@ -635,7 +635,7 @@ def test_sqlite_odbc_cached_dictionary(started_cluster): def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -663,7 +663,7 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -685,7 +685,7 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): def test_no_connection_pooling(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -717,7 +717,7 @@ def test_no_connection_pooling(started_cluster): def test_postgres_insert(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) @@ -754,7 +754,7 @@ def test_postgres_insert(started_cluster): def test_odbc_postgres_date_data_type(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -783,7 +783,7 @@ def test_odbc_postgres_date_data_type(started_cluster): def test_odbc_postgres_conversions(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -841,7 +841,7 @@ def test_odbc_postgres_conversions(started_cluster): def test_odbc_cyrillic_with_varchar(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() @@ -868,7 +868,7 @@ def test_odbc_cyrillic_with_varchar(started_cluster): def test_many_connections(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() @@ -894,7 +894,7 @@ def test_many_connections(started_cluster): def test_concurrent_queries(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() @@ -948,7 +948,7 @@ def test_concurrent_queries(started_cluster): def test_odbc_long_column_names(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() @@ -986,7 +986,7 @@ def test_odbc_long_column_names(started_cluster): def test_odbc_long_text(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() From 9d91b600caa0d5cddcf4d13a8c80ac85c9077fca Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 16:27:35 +0200 Subject: [PATCH 2001/2361] Add a test --- tests/integration/test_s3_imds/test_simple.py | 2 +- .../03221_s3_imds_decent_timeout.reference | 1 + .../0_stateless/03221_s3_imds_decent_timeout.sh | 16 ++++++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03221_s3_imds_decent_timeout.reference create mode 100755 tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh diff --git a/tests/integration/test_s3_imds/test_simple.py b/tests/integration/test_s3_imds/test_simple.py index 0dacac2b0b9..4884c824f99 100644 --- a/tests/integration/test_s3_imds/test_simple.py +++ b/tests/integration/test_s3_imds/test_simple.py @@ -56,7 +56,7 @@ def test_credentials_from_metadata(): ) expected_logs = [ - "Calling EC2MetadataService to get token failed, falling back to less secure way", + "Calling EC2MetadataService to get token failed, falling back to a less secure way", "Getting default credentials for ec2 instance from resolver:8080", "Calling EC2MetadataService resource, /latest/meta-data/iam/security-credentials returned credential string myrole", "Calling EC2MetadataService resource /latest/meta-data/iam/security-credentials/myrole", diff --git a/tests/queries/0_stateless/03221_s3_imds_decent_timeout.reference b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh new file mode 100755 index 00000000000..fb55539d04a --- /dev/null +++ b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +# ^ requires S3 + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Inaccessible IMDS should not introduce large delays, so this query should reply quickly at least sometimes: +while true +do + # This host (likely) drops packets sent to it (does not reply), so it is good for testing timeouts. + # At the same time, we expect that google.com does not drop packets and quickly replies with 404, which is a non-retriable error for S3. + AWS_EC2_METADATA_SERVICE_ENDPOINT='https://10.255.255.255/' ${CLICKHOUSE_LOCAL} --time --query "SELECT * FROM s3('https://google.com/test')" |& grep -v -F 404 | + ${CLICKHOUSE_LOCAL} --input-format TSV "SELECT c1::Float64 < 1 FROM table" | grep 1 && break +done From 79cfffbaf88c3c34213d5144fd20f7888b6cabe0 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 9 Aug 2024 15:57:16 +0100 Subject: [PATCH 2002/2361] Even bigger queue --- docker/test/stateless/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index acdcda753d7..a030be92506 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -193,8 +193,8 @@ ORDER BY tuple()" # create minio log webhooks for both audit and server logs # use async inserts to avoid creating too many parts -./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" queue_size=300000 -./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" queue_size=300000 +./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" queue_size=1000000 +./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" queue_size=1000000 max_retries=100 retry=1 From e3fded6c94b7fe81af750a6976d704dbeea7a4a5 Mon Sep 17 00:00:00 2001 From: Aleksei Filatov Date: Fri, 9 Aug 2024 15:13:55 +0000 Subject: [PATCH 2003/2361] Log peration error for zk multi request --- src/Common/ZooKeeper/ZooKeeper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 064ac2261ec..1a9ed4f1ee7 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -1570,7 +1570,7 @@ size_t getFailedOpIndex(Coordination::Error exception_code, const Coordination:: KeeperMultiException::KeeperMultiException(Coordination::Error exception_code, size_t failed_op_index_, const Coordination::Requests & requests_, const Coordination::Responses & responses_) - : KeeperException(exception_code, "Transaction failed: Op #{}, path", failed_op_index_), + : KeeperException(exception_code, "Transaction failed ({}): Op #{}, path", exception_code, failed_op_index_), requests(requests_), responses(responses_), failed_op_index(failed_op_index_) { addMessage(getPathForFirstFailedOp()); From 97eded0ac7aa41a9320729b418c8ab2ff1821202 Mon Sep 17 00:00:00 2001 From: kruglov Date: Fri, 9 Aug 2024 17:38:24 +0300 Subject: [PATCH 2004/2361] Fixed test_dependent_loading. event_time_microseconds has two dates connected with "\n" --- .../test_postgresql_replica_database_engine_2/test.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index 75edb22aab1..7fdd17625a9 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -1127,9 +1127,13 @@ def test_dependent_loading(started_cluster): nested_time = instance.query( f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{uuid}_nested' and message not like '%like%'" ).strip() - time = instance.query( - f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{table}' and message not like '%like%'" - ).strip() + time = ( + instance.query( + f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{table}' and message not like '%like%'" + ) + .strip() + .split("\n")[-1] + ) instance.query( f"SELECT toDateTime64('{nested_time}', 6) < toDateTime64('{time}', 6)" ) From c13d348d1e8a467b9d16fc83214ef574752092e0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 19:56:50 +0200 Subject: [PATCH 2005/2361] Fix test `00900_long_parquet_load` --- tests/queries/0_stateless/00900_long_parquet_load.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00900_long_parquet_load.sh b/tests/queries/0_stateless/00900_long_parquet_load.sh index 1bafb033f56..3a7022ac0cf 100755 --- a/tests/queries/0_stateless/00900_long_parquet_load.sh +++ b/tests/queries/0_stateless/00900_long_parquet_load.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-fasttest, no-debug +# Tags: long, no-fasttest, no-debug, no-asan, no-msan, no-tsan # # Load all possible .parquet files found in submodules. From a25accdae3cf420beaeb8ca25a9ac32070c397b8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 20:20:09 +0200 Subject: [PATCH 2006/2361] Fix a test --- programs/client/Client.cpp | 3 +++ src/Client/ClientBase.cpp | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 1d99d223ee9..631914dee5c 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1164,6 +1164,9 @@ void Client::processOptions(const OptionsDescription & options_description, /// (There is no need to copy the context because clickhouse-client has no background tasks so it won't use that context in parallel.) client_context = global_context; initClientContext(); + + /// Allow to pass-through unknown settings to the server. + client_context->getAccessControl().allowAllSettings(); } diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index a00a9499237..473db8e9678 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -689,9 +689,6 @@ void ClientBase::initClientContext() client_context->setQueryKindInitial(); client_context->setQueryKind(query_kind); client_context->setQueryParameters(query_parameters); - - /// Allow to pass-through unknown settings to the server. - client_context->getAccessControl().allowAllSettings(); } bool ClientBase::isRegularFile(int fd) From 776eb7ca75b50ee11da5b26d9dd0bb7544916151 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 20:38:24 +0200 Subject: [PATCH 2007/2361] Fix unit test --- src/Core/MySQL/MySQLGtid.cpp | 2 -- src/Core/MySQL/tests/gtest_MySQLGtid.cpp | 17 ++++++++--------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/Core/MySQL/MySQLGtid.cpp b/src/Core/MySQL/MySQLGtid.cpp index 7916f882979..28b583a0cfe 100644 --- a/src/Core/MySQL/MySQLGtid.cpp +++ b/src/Core/MySQL/MySQLGtid.cpp @@ -24,9 +24,7 @@ void GTIDSet::tryMerge(size_t i) void GTIDSets::parse(String gtid_format) { if (gtid_format.empty()) - { return; - } std::vector gtid_sets; boost::split(gtid_sets, gtid_format, [](char c) { return c == ','; }); diff --git a/src/Core/MySQL/tests/gtest_MySQLGtid.cpp b/src/Core/MySQL/tests/gtest_MySQLGtid.cpp index e31a87aaa39..e5a2fe44e5c 100644 --- a/src/Core/MySQL/tests/gtest_MySQLGtid.cpp +++ b/src/Core/MySQL/tests/gtest_MySQLGtid.cpp @@ -10,20 +10,19 @@ GTEST_TEST(GTIDSetsContains, Tests) contained1, contained2, contained3, contained4, contained5, not_contained1, not_contained2, not_contained3, not_contained4, not_contained5, not_contained6; - gtid_set.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, 24DA167-0C0C-11E8-8442-00059A3C7B00:1-19:47-49:60"); - contained1.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, 24DA167-0C0C-11E8-8442-00059A3C7B00:1-19:47-49:60"); + gtid_set.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, FBC30C64-F8C9-4DDF-8CDD-066208EB433B:1-19:47-49:60"); + contained1.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, FBC30C64-F8C9-4DDF-8CDD-066208EB433B:1-19:47-49:60"); contained2.parse("2174B383-5441-11E8-B90A-C80AA9429562:2-3:11:47-49"); contained3.parse("2174B383-5441-11E8-B90A-C80AA9429562:11"); - contained4.parse("24DA167-0C0C-11E8-8442-00059A3C7B00:2-16:47-49:60"); - contained5.parse("24DA167-0C0C-11E8-8442-00059A3C7B00:60"); + contained4.parse("FBC30C64-F8C9-4DDF-8CDD-066208EB433B:2-16:47-49:60"); + contained5.parse("FBC30C64-F8C9-4DDF-8CDD-066208EB433B:60"); - not_contained1.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-50, 24DA167-0C0C-11E8-8442-00059A3C7B00:1-19:47-49:60"); + not_contained1.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-50, FBC30C64-F8C9-4DDF-8CDD-066208EB433B:1-19:47-49:60"); not_contained2.parse("2174B383-5441-11E8-B90A-C80AA9429562:0-3:11:47-49"); not_contained3.parse("2174B383-5441-11E8-B90A-C80AA9429562:99"); - not_contained4.parse("24DA167-0C0C-11E8-8442-00059A3C7B00:2-16:46-49:60"); - not_contained5.parse("24DA167-0C0C-11E8-8442-00059A3C7B00:99"); - not_contained6.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, 24DA167-0C0C-11E8-8442-00059A3C7B00:1-19:47-49:60, 00000000-0000-0000-0000-000000000000"); - + not_contained4.parse("FBC30C64-F8C9-4DDF-8CDD-066208EB433B:2-16:46-49:60"); + not_contained5.parse("FBC30C64-F8C9-4DDF-8CDD-066208EB433B:99"); + not_contained6.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, FBC30C64-F8C9-4DDF-8CDD-066208EB433B:1-19:47-49:60, 00000000-0000-0000-0000-000000000000"); ASSERT_TRUE(gtid_set.contains(contained1)); ASSERT_TRUE(gtid_set.contains(contained2)); From c65c3768baad44a7f8a58913385acb6cfd369735 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 21:15:18 +0200 Subject: [PATCH 2008/2361] Fix .NET --- src/IO/ReadHelpers.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 753560620e3..580aa51b238 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -258,6 +258,14 @@ inline void readBoolText(bool & x, ReadBuffer & buf) char tmp = '0'; readChar(tmp, buf); x = tmp != '0'; + + if (!buf.eof() && isAlphaASCII(*buf.position())) + { + if (tmp == 't') + assertString("rue", buf); + else if (tmp == 'T') + assertString("RUE", buf); + } } template From b42159479145664628cf876cda61012e2f56b21d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 21:15:27 +0200 Subject: [PATCH 2009/2361] Fix backups --- src/Access/AccessBackup.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Access/AccessBackup.cpp b/src/Access/AccessBackup.cpp index 90effdab70f..d9ee89b45ce 100644 --- a/src/Access/AccessBackup.cpp +++ b/src/Access/AccessBackup.cpp @@ -93,7 +93,7 @@ namespace break; } - UUID id = parse(line); + UUID id = parse(line.substr(0, line.find('\t'))); line.clear(); String queries; From ae5982f92affaaa664899c31b3507d893b8773aa Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 21:42:16 +0200 Subject: [PATCH 2010/2361] Fix crap --- src/Common/parseRemoteDescription.cpp | 15 ++++++++----- src/Storages/StorageExternalDistributed.cpp | 24 ++++++++++----------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/src/Common/parseRemoteDescription.cpp b/src/Common/parseRemoteDescription.cpp index df3820b11f9..6a53098362d 100644 --- a/src/Common/parseRemoteDescription.cpp +++ b/src/Common/parseRemoteDescription.cpp @@ -79,11 +79,16 @@ std::vector parseRemoteDescription( /// Look for the corresponding closing bracket for (m = i + 1; m < r; ++m) { - if (description[m] == '{') ++cnt; - if (description[m] == '}') --cnt; - if (description[m] == '.' && description[m-1] == '.') last_dot = m; - if (description[m] == separator) have_splitter = true; - if (cnt == 0) break; + if (description[m] == '{') + ++cnt; + if (description[m] == '}') + --cnt; + if (description[m] == '.' && description[m-1] == '.') + last_dot = m; + if (description[m] == separator) + have_splitter = true; + if (cnt == 0) + break; } if (cnt != 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function '{}': incorrect brace sequence in first argument", func_name); diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp index 951c87807bb..4277387bc5d 100644 --- a/src/Storages/StorageExternalDistributed.cpp +++ b/src/Storages/StorageExternalDistributed.cpp @@ -111,15 +111,17 @@ void registerStorageExternalDistributed(StorageFactory & factory) std::unordered_set shards; ASTs inner_engine_args(engine_args.begin() + 1, engine_args.end()); + String addresses_expr = checkAndGetLiteralArgument(engine_args[1], "addresses"); + Strings shards_addresses = get_addresses(addresses_expr); auto engine_name = checkAndGetLiteralArgument(engine_args[0], "engine_name"); if (engine_name == "URL") { - auto configuration = StorageURL::getConfiguration(inner_engine_args, context); - auto shards_addresses = get_addresses(configuration.addresses_expr); auto format_settings = StorageURL::getFormatSettingsFromArgs(args); for (const auto & shard_address : shards_addresses) { + inner_engine_args.at(0) = std::make_shared(shard_address); + auto configuration = StorageURL::getConfiguration(inner_engine_args, context); auto uri_options = parseRemoteDescription(shard_address, 0, shard_address.size(), '|', max_addresses); if (uri_options.size() > 1) { @@ -140,13 +142,12 @@ void registerStorageExternalDistributed(StorageFactory & factory) else if (engine_name == "MySQL") { MySQLSettings mysql_settings; - auto configuration = StorageMySQL::getConfiguration(inner_engine_args, context, mysql_settings); - auto shards_addresses = get_addresses(configuration.addresses_expr); for (const auto & shard_address : shards_addresses) { - auto current_configuration{configuration}; - current_configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 3306); - auto pool = createMySQLPoolWithFailover(current_configuration, mysql_settings); + inner_engine_args.at(0) = std::make_shared(shard_address); + auto configuration = StorageMySQL::getConfiguration(inner_engine_args, context, mysql_settings); + configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 3306); + auto pool = createMySQLPoolWithFailover(configuration, mysql_settings); shards.insert(std::make_shared( args.table_id, std::move(pool), configuration.database, configuration.table, /* replace_query = */ false, /* on_duplicate_clause = */ "", @@ -157,14 +158,13 @@ void registerStorageExternalDistributed(StorageFactory & factory) #if USE_LIBPQXX else if (engine_name == "PostgreSQL") { - auto configuration = StoragePostgreSQL::getConfiguration(inner_engine_args, context); - auto shards_addresses = get_addresses(configuration.addresses_expr); for (const auto & shard_address : shards_addresses) { - auto current_configuration{configuration}; - current_configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 5432); + inner_engine_args.at(0) = std::make_shared(shard_address); + auto configuration = StoragePostgreSQL::getConfiguration(inner_engine_args, context); + configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 5432); auto pool = std::make_shared( - current_configuration, + configuration, settings.postgresql_connection_pool_size, settings.postgresql_connection_pool_wait_timeout, settings.postgresql_connection_pool_retries, From f4173546a93ab72986d1239a812ac0bc7fda11cd Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 9 Aug 2024 21:47:01 +0200 Subject: [PATCH 2011/2361] Remove obsolete test --- src/IO/tests/gtest_s3_uri.cpp | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/src/IO/tests/gtest_s3_uri.cpp b/src/IO/tests/gtest_s3_uri.cpp index 0ec28f80072..7216c8077e3 100644 --- a/src/IO/tests/gtest_s3_uri.cpp +++ b/src/IO/tests/gtest_s3_uri.cpp @@ -206,11 +206,6 @@ TEST(S3UriTest, validPatterns) } } -TEST_P(S3UriTest, invalidPatterns) -{ - ASSERT_ANY_THROW(S3::URI new_uri(GetParam())); -} - TEST(S3UriTest, versionIdChecks) { for (const auto& test_case : TestCases) @@ -223,19 +218,4 @@ TEST(S3UriTest, versionIdChecks) } } -INSTANTIATE_TEST_SUITE_P( - S3, - S3UriTest, - testing::Values( - "https:///", - "https://.s3.amazonaws.com/key", - "https://s3.amazonaws.com/key", - "https://jokserfn.s3amazonaws.com/key", - "https://s3.amazonaws.com//", - "https://amazonaws.com/", - "https://amazonaws.com//", - "https://amazonaws.com//key")); - -} - #endif From b5afddb1af0a9aeb4738cf3fb7b7242361469028 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 9 Aug 2024 22:56:25 +0200 Subject: [PATCH 2012/2361] Update optimize_functions_to_subcolumns.xml --- tests/performance/optimize_functions_to_subcolumns.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/performance/optimize_functions_to_subcolumns.xml b/tests/performance/optimize_functions_to_subcolumns.xml index a246aae7950..146af1605c4 100644 --- a/tests/performance/optimize_functions_to_subcolumns.xml +++ b/tests/performance/optimize_functions_to_subcolumns.xml @@ -1,6 +1,5 @@ - 1 4 From 61befa33a40d4ec78ba124107e700d14f9be9599 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 9 Aug 2024 21:10:05 +0000 Subject: [PATCH 2013/2361] Fix crash on parquet column type mismatch --- .../Formats/Impl/ParquetBlockInputFormat.cpp | 108 ++++++++++++------ .../02841_parquet_filter_pushdown.reference | 2 + .../02841_parquet_filter_pushdown.sql | 6 + 3 files changed, 83 insertions(+), 33 deletions(-) diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index bc5e8292192..1268f1df5f6 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -25,6 +25,7 @@ #include #include #include +#include namespace CurrentMetrics { @@ -54,7 +55,7 @@ namespace ErrorCodes } \ } while (false) -/// Decode min/max value from column chunk statistics. +/// Decode min/max value from column chunk statistics. Returns Null if missing or unsupported. /// /// There are two questionable decisions in this implementation: /// * We parse the value from the encoded byte string instead of casting the parquet::Statistics @@ -62,7 +63,7 @@ namespace ErrorCodes /// * We dispatch based on the parquet logical+converted+physical type instead of the ClickHouse type. /// The idea is that this is similar to what we'll have to do when reimplementing Parquet parsing in /// ClickHouse instead of using Arrow (for speed). So, this is an exercise in parsing Parquet manually. -static std::optional decodePlainParquetValueSlow(const std::string & data, parquet::Type::type physical_type, const parquet::ColumnDescriptor & descr) +static Field decodePlainParquetValueSlow(const std::string & data, parquet::Type::type physical_type, const parquet::ColumnDescriptor & descr) { using namespace parquet; @@ -118,8 +119,6 @@ static std::optional decodePlainParquetValueSlow(const std::string & data if (data.size() != size || size < 1 || size > 32) throw Exception(ErrorCodes::CANNOT_PARSE_NUMBER, "Unexpected decimal size: {} (actual {})", size, data.size()); - /// For simplicity, widen all decimals to 256-bit. It should compare correctly with values - /// of different bitness. Int256 val = 0; memcpy(&val, data.data(), size); if (big_endian) @@ -128,7 +127,20 @@ static std::optional decodePlainParquetValueSlow(const std::string & data if (size < 32 && (val >> (size * 8 - 1)) != 0) val |= ~((Int256(1) << (size * 8)) - 1); - return Field(DecimalField(Decimal256(val), static_cast(scale))); + auto narrow = [&]() -> Field { + using T = typename D::NativeType; + T x = 0; + memcpy(&x, &val, sizeof(T)); + return Field(DecimalField(D(x), static_cast(scale))); + }; + if (size <= 4) + return narrow.template operator()(); + else if (size <= 8) + return narrow.template operator()(); + else if (size <= 16) + return narrow.template operator()(); + else + return narrow.template operator()(); } while (false); @@ -213,14 +225,15 @@ static std::optional decodePlainParquetValueSlow(const std::string & data /// TODO: Remove this workaround either when we implement our own Parquet decoder that /// doesn't have this bug, or if it's fixed in Arrow. if (data.empty()) - return std::nullopt; + return Field(); return Field(data); } - /// This one's deprecated in Parquet. + /// This type is deprecated in Parquet. + /// TODO: But turns out it's still used in practice, we should support it. if (physical_type == Type::type::INT96) - throw Exception(ErrorCodes::CANNOT_PARSE_NUMBER, "Parquet INT96 type is deprecated and not supported"); + return Field(); /// Integers. @@ -283,15 +296,12 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa continue; auto stats = it->second; - auto default_value = [&]() -> Field - { - DataTypePtr type = header.getByPosition(idx).type; - if (type->lowCardinality()) - type = assert_cast(*type).getDictionaryType(); - if (type->isNullable()) - type = assert_cast(*type).getNestedType(); - return type->getDefault(); - }; + DataTypePtr type = header.getByPosition(idx).type; + if (type->lowCardinality()) + type = assert_cast(*type).getDictionaryType(); + if (type->isNullable()) + type = assert_cast(*type).getNestedType(); + Field default_value = type->getDefault(); /// Only primitive fields are supported, not arrays, maps, tuples, or Nested. /// Arrays, maps, and Nested can't be meaningfully supported because Parquet only has min/max @@ -299,8 +309,8 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa /// Same limitation for tuples, but maybe it would make sense to have some kind of tuple /// expansion in KeyCondition to accept ranges per element instead of whole tuple. - std::optional min; - std::optional max; + Field min; + Field max; if (stats->HasMinMax()) { try @@ -315,6 +325,39 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa } } + /// If the data type in parquet file substantially differs from the requested data type, + /// it's sometimes correct to just typecast the min/max values. + /// Other times it's incorrect, e.g.: + /// INSERT INTO FUNCTION file('t.parquet', Parquet, 'x String') VALUES ('1'), ('100'), ('2'); + /// SELECT * FROM file('t.parquet', Parquet, 'x Int64') WHERE x >= 3; + /// If we just typecast min/max from string to integer, this query will incorrectly return empty result. + /// Allow conversion in some simple cases and ignore the min/max values otherwise. + auto min_type = min.getType(); + auto max_type = max.getType(); + min = convertFieldToType(min, *type); + max = convertFieldToType(max, *type); + auto ok_cast = [&](Field::Types::Which from, Field::Types::Which to) -> bool + { + if (from == to) + return true; + /// Decimal -> wider decimal. + if (Field::isDecimal(from) || Field::isDecimal(to)) + return Field::isDecimal(from) && Field::isDecimal(to) && to >= from; + /// Integer -> IP. + if (to == Field::Types::IPv4 || to == Field::Types::IPv6) + return from == Field::Types::UInt64 || from == Field::Types::Int64; + /// Disable index for everything else, especially string <-> number. + return false; + }; + if (!(ok_cast(min_type, min.getType()) && ok_cast(max_type, max.getType())) && + !(min == max) && + !(min_type == Field::Types::Int64 && min.getType() == Field::Types::UInt64 && min.get() >= 0) && + !(max_type == Field::Types::UInt64 && max.getType() == Field::Types::Int64 && max.get() <= UInt64(INT64_MAX))) + { + min = Field(); + max = Field(); + } + /// In Range, NULL is represented as positive or negative infinity (represented by a special /// kind of Field, different from floating-point infinities). @@ -328,7 +371,7 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa { /// Single-point range containing either the default value of one of the infinities. if (null_as_default) - hyperrectangle[idx].right = hyperrectangle[idx].left = default_value(); + hyperrectangle[idx].right = hyperrectangle[idx].left = default_value; else hyperrectangle[idx].right = hyperrectangle[idx].left; continue; @@ -339,32 +382,31 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa if (null_as_default) { /// Make sure the range contains the default value. - Field def = default_value(); - if (min.has_value() && applyVisitor(FieldVisitorAccurateLess(), def, *min)) - min = def; - if (max.has_value() && applyVisitor(FieldVisitorAccurateLess(), *max, def)) - max = def; + if (!min.isNull() && applyVisitor(FieldVisitorAccurateLess(), default_value, min)) + min = default_value; + if (!max.isNull() && applyVisitor(FieldVisitorAccurateLess(), max, default_value)) + max = default_value; } else { /// Make sure the range reaches infinity on at least one side. - if (min.has_value() && max.has_value()) - min.reset(); + if (!min.isNull() && !max.isNull()) + min = Field(); } } else { /// If the column doesn't have nulls, exclude both infinities. - if (!min.has_value()) + if (min.isNull()) hyperrectangle[idx].left_included = false; - if (!max.has_value()) + if (max.isNull()) hyperrectangle[idx].right_included = false; } - if (min.has_value()) - hyperrectangle[idx].left = std::move(min.value()); - if (max.has_value()) - hyperrectangle[idx].right = std::move(max.value()); + if (!min.isNull()) + hyperrectangle[idx].left = std::move(min); + if (!max.isNull()) + hyperrectangle[idx].right = std::move(max); } return hyperrectangle; diff --git a/tests/queries/0_stateless/02841_parquet_filter_pushdown.reference b/tests/queries/0_stateless/02841_parquet_filter_pushdown.reference index 4adf418bcc7..8003b9cb626 100644 --- a/tests/queries/0_stateless/02841_parquet_filter_pushdown.reference +++ b/tests/queries/0_stateless/02841_parquet_filter_pushdown.reference @@ -71,3 +71,5 @@ d256 Nullable(Decimal(76, 40)) 500 244750 500 244750 500 244750 +42 +100 diff --git a/tests/queries/0_stateless/02841_parquet_filter_pushdown.sql b/tests/queries/0_stateless/02841_parquet_filter_pushdown.sql index 950485d53f0..52caee50b32 100644 --- a/tests/queries/0_stateless/02841_parquet_filter_pushdown.sql +++ b/tests/queries/0_stateless/02841_parquet_filter_pushdown.sql @@ -131,3 +131,9 @@ select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, select count(), sum(number) from file('02841.parquet') where indexHint(string_or_null == ''); -- quirk with infinities select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, string_or_null String') where indexHint(string_or_null == ''); select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, nEgAtIvE_oR_nUlL Int64') where indexHint(nEgAtIvE_oR_nUlL > -50) settings input_format_parquet_case_insensitive_column_matching = 1; + +-- Bad type conversions. +insert into function file('02841.parquet') select 42 as x; +select * from file('02841.parquet', Parquet, 'x Nullable(String)') where x not in (1); +insert into function file('t.parquet', Parquet, 'x String') values ('1'), ('100'), ('2'); +select * from file('t.parquet', Parquet, 'x Int64') where x >= 3; From c61eef4a7659e4856cc3266d8d7dd28b4e095d2b Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 9 Aug 2024 21:17:49 +0000 Subject: [PATCH 2014/2361] Reimplement Dynamic type --- docs/en/sql-reference/data-types/dynamic.md | 123 +- src/Columns/ColumnDynamic.cpp | 881 ++++-- src/Columns/ColumnDynamic.h | 164 +- src/Columns/ColumnVariant.cpp | 29 +- src/Columns/ColumnVariant.h | 11 +- src/Columns/tests/gtest_column_dynamic.cpp | 428 ++- src/DataTypes/DataTypeDynamic.cpp | 79 +- src/DataTypes/DataTypeFactory.cpp | 6 + src/DataTypes/DataTypeFactory.h | 1 + src/DataTypes/DataTypeTuple.cpp | 13 +- src/DataTypes/DataTypesBinaryEncoding.cpp | 2 +- .../Serializations/SerializationDynamic.cpp | 361 ++- .../Serializations/SerializationDynamic.h | 8 +- .../SerializationDynamicElement.cpp | 127 +- .../SerializationDynamicElement.h | 8 +- .../SerializationVariantElement.cpp | 41 +- .../SerializationVariantElement.h | 6 +- src/Formats/JSONExtractTree.cpp | 31 +- src/Functions/FunctionsConversion.cpp | 326 +-- src/Functions/dynamicType.cpp | 91 +- tests/queries/0_stateless/00000_test.sql | 43 + ...03033_dynamic_text_serialization.reference | 26 +- .../03033_dynamic_text_serialization.sql | 4 +- .../03034_dynamic_conversions.reference | 18 +- .../0_stateless/03034_dynamic_conversions.sql | 6 +- .../03035_dynamic_sorting.reference | 715 +++-- .../0_stateless/03035_dynamic_sorting.sql | 67 +- ...ed_subcolumns_compact_merge_tree.reference | 20 + ...d_shared_subcolumns_compact_merge_tree.sql | 43 + ...ic_read_shared_subcolumns_memory.reference | 20 + ..._dynamic_read_shared_subcolumns_memory.sql | 43 + ..._read_shared_subcolumns_small.reference.j2 | 2460 +++++++++++++++++ ...ynamic_read_shared_subcolumns_small.sql.j2 | 46 + ...hared_subcolumns_wide_merge_tree.reference | 20 + ...read_shared_subcolumns_wide_merge_tree.sql | 43 + ..._1_horizontal_compact_merge_tree.reference | 94 +- ...merges_1_horizontal_compact_merge_tree.sql | 29 +- ...s_1_horizontal_compact_wide_tree.reference | 94 +- ..._merges_1_horizontal_compact_wide_tree.sql | 28 +- ...es_1_vertical_compact_merge_tree.reference | 94 +- ...c_merges_1_vertical_compact_merge_tree.sql | 31 +- ...erges_1_vertical_wide_merge_tree.reference | 94 +- ...amic_merges_1_vertical_wide_merge_tree.sql | 28 +- .../03037_dynamic_merges_small.reference.j2 | 376 ++- .../03037_dynamic_merges_small.sql.j2 | 28 +- ...ynamic_merges_compact_horizontal.reference | 84 +- ...sted_dynamic_merges_compact_horizontal.sql | 33 +- ..._dynamic_merges_compact_vertical.reference | 84 +- ...nested_dynamic_merges_compact_vertical.sql | 33 +- ...8_nested_dynamic_merges_small.reference.j2 | 340 ++- .../03038_nested_dynamic_merges_small.sql.j2 | 36 +- ...d_dynamic_merges_wide_horizontal.reference | 84 +- ..._nested_dynamic_merges_wide_horizontal.sql | 33 +- ...ted_dynamic_merges_wide_vertical.reference | 84 +- ...38_nested_dynamic_merges_wide_vertical.sql | 33 +- ...type_alters_1_compact_merge_tree.reference | 123 +- ...namic_type_alters_1_compact_merge_tree.sql | 20 +- ...040_dynamic_type_alters_1_memory.reference | 94 +- ...ic_type_alters_1_wide_merge_tree.reference | 157 +- .../03041_dynamic_type_check_table.sh | 2 +- .../03150_dynamic_type_mv_insert.reference | 50 +- .../03150_dynamic_type_mv_insert.sql | 7 + ...151_dynamic_type_scale_max_types.reference | 48 +- .../03151_dynamic_type_scale_max_types.sql | 8 +- .../03152_dynamic_type_simple.reference | 2 +- .../0_stateless/03152_dynamic_type_simple.sql | 8 + .../0_stateless/03153_dynamic_type_empty.sql | 2 + .../03159_dynamic_type_all_types.sql | 4 +- .../03172_dynamic_binary_serialization.sh | 4 +- ...3200_memory_engine_alter_dynamic.reference | 20 +- .../03200_memory_engine_alter_dynamic.sql | 4 +- 71 files changed, 6725 insertions(+), 1878 deletions(-) create mode 100644 tests/queries/0_stateless/00000_test.sql create mode 100644 tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.reference create mode 100644 tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.sql create mode 100644 tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.reference create mode 100644 tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.sql create mode 100644 tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 create mode 100644 tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 create mode 100644 tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.reference create mode 100644 tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.sql diff --git a/docs/en/sql-reference/data-types/dynamic.md b/docs/en/sql-reference/data-types/dynamic.md index 8be81471377..f9befd166fe 100644 --- a/docs/en/sql-reference/data-types/dynamic.md +++ b/docs/en/sql-reference/data-types/dynamic.md @@ -14,7 +14,7 @@ To declare a column of `Dynamic` type, use the following syntax: Dynamic(max_types=N) ``` -Where `N` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_types` is `32`. +Where `N` is an optional parameter between `0` and `254` indicating how many different data types can be stored as separate subcolumns inside a column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all values with new types will be stored together in a special shared data structure in binary form. Default value of `max_types` is `32`. :::note The Dynamic data type is an experimental feature. To use it, set `allow_experimental_dynamic_type = 1`. @@ -224,41 +224,43 @@ SELECT d::Dynamic(max_types=5) as d2, dynamicType(d2) FROM test; └───────┴────────────────┘ ``` -If `K < N`, then the values with the rarest types are converted to `String`: +If `K < N`, then the values with the rarest types will be inserted into a single special subcolumn, but still will be accessible: ```text CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory; INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]); -SELECT d, dynamicType(d), d::Dynamic(max_types=2) as d2, dynamicType(d2) FROM test; +SELECT d, dynamicType(d), d::Dynamic(max_types=2) as d2, dynamicType(d2), isDynamicElementInSharedData(d2) FROM test; ``` ```text -┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┠-│ á´ºáµá´¸á´¸ │ None │ á´ºáµá´¸á´¸ │ None │ -│ 42 │ Int64 │ 42 │ Int64 │ -│ 43 │ Int64 │ 43 │ Int64 │ -│ 42.42 │ String │ 42.42 │ String │ -│ true │ Bool │ true │ String │ -│ [1,2,3] │ Array(Int64) │ [1,2,3] │ String │ -└─────────┴────────────────┴─────────┴─────────────────┘ +┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┬─isDynamicElementInSharedData(d2)─┠+│ á´ºáµá´¸á´¸ │ None │ á´ºáµá´¸á´¸ │ None │ false │ +│ 42 │ Int64 │ 42 │ Int64 │ false │ +│ 43 │ Int64 │ 43 │ Int64 │ false │ +│ 42.42 │ String │ 42.42 │ String │ false │ +│ true │ Bool │ true │ Bool │ true │ +│ [1,2,3] │ Array(Int64) │ [1,2,3] │ Array(Int64) │ true │ +└─────────┴────────────────┴─────────┴─────────────────┴──────────────────────────────────┘ ``` -If `K=1`, all types are converted to `String`: +Functions `isDynamicElementInSharedData` returns `true` for rows that are stored in a special shared data structure inside `Dynamic` and as we can see, resulting column contains only 2 types that are not stored in shared data structure. + +If `K=0`, all types will be inserted into single special subcolumn: ```text CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory; INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]); -SELECT d, dynamicType(d), d::Dynamic(max_types=1) as d2, dynamicType(d2) FROM test; +SELECT d, dynamicType(d), d::Dynamic(max_types=0) as d2, dynamicType(d2), isDynamicElementInSharedData(d2) FROM test; ``` ```text -┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┠-│ á´ºáµá´¸á´¸ │ None │ á´ºáµá´¸á´¸ │ None │ -│ 42 │ Int64 │ 42 │ String │ -│ 43 │ Int64 │ 43 │ String │ -│ 42.42 │ String │ 42.42 │ String │ -│ true │ Bool │ true │ String │ -│ [1,2,3] │ Array(Int64) │ [1,2,3] │ String │ -└─────────┴────────────────┴─────────┴─────────────────┘ +┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┬─isDynamicElementInSharedData(d2)─┠+│ á´ºáµá´¸á´¸ │ None │ á´ºáµá´¸á´¸ │ None │ false │ +│ 42 │ Int64 │ 42 │ Int64 │ true │ +│ 43 │ Int64 │ 43 │ Int64 │ true │ +│ 42.42 │ String │ 42.42 │ String │ true │ +│ true │ Bool │ true │ Bool │ true │ +│ [1,2,3] │ Array(Int64) │ [1,2,3] │ Array(Int64) │ true │ +└─────────┴────────────────┴─────────┴─────────────────┴──────────────────────────────────┘ ``` ## Reading Dynamic type from the data @@ -411,17 +413,17 @@ SELECT d, dynamicType(d) FROM test ORDER by d; ## Reaching the limit in number of different data types stored inside Dynamic -`Dynamic` data type can store only limited number of different data types inside. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 1 and 255 (due to implementation details, it's impossible to have more than 255 different data types inside Dynamic). -When the limit is reached, all new data types inserted to `Dynamic` column will be casted to `String` and stored as `String` values. +`Dynamic` data type can store only limited number of different data types as separate subcolumns. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 0 and 254 (due to implementation details, it's impossible to have more than 254 different data types that can be stored as separate subcolumns inside Dynamic). +When the limit is reached, all new data types inserted to `Dynamic` column will be inserted into a single shared data structure that stores values with different data types in binary form. Let's see what happens when the limit is reached in different scenarios. ### Reaching the limit during data parsing -During parsing of `Dynamic` values from the data, when the limit is reached for current block of data, all new values will be inserted as `String` values: +During parsing of `Dynamic` values from the data, when the limit is reached for current block of data, all new values will be inserted into shared data structure: ```sql -SELECT d, dynamicType(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', ' +SELECT d, dynamicType(d), isDynamicElementInSharedData(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', ' {"d" : 42} {"d" : [1, 2, 3]} {"d" : "Hello, World!"} @@ -432,22 +434,22 @@ SELECT d, dynamicType(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', ' ``` ```text -┌─d──────────────────────────┬─dynamicType(d)─┠-│ 42 │ Int64 │ -│ [1,2,3] │ Array(Int64) │ -│ Hello, World! │ String │ -│ 2020-01-01 │ String │ -│ ["str1", "str2", "str3"] │ String │ -│ {"a" : 1, "b" : [1, 2, 3]} │ String │ -└────────────────────────────┴────────────────┘ +┌─d──────────────────────┬─dynamicType(d)─────────────────┬─isDynamicElementInSharedData(d)─┠+│ 42 │ Int64 │ false │ +│ [1,2,3] │ Array(Int64) │ false │ +│ Hello, World! │ String │ false │ +│ 2020-01-01 │ Date │ true │ +│ ['str1','str2','str3'] │ Array(String) │ true │ +│ (1,[1,2,3]) │ Tuple(a Int64, b Array(Int64)) │ true │ +└────────────────────────┴────────────────────────────────┴─────────────────────────────────┘ ``` -As we can see, after inserting 3 different data types `Int64`, `Array(Int64)` and `String` all new types were converted to `String`. +As we can see, after inserting 3 different data types `Int64`, `Array(Int64)` and `String` all new types were inserted into special shared data structure. ### During merges of data parts in MergeTree table engines -During merge of several data parts in MergeTree table the `Dynamic` column in the resulting data part can reach the limit of different data types inside and won't be able to store all types from source parts. -In this case ClickHouse chooses what types will remain after merge and what types will be casted to `String`. In most cases ClickHouse tries to keep the most frequent types and cast the rarest types to `String`, but it depends on the implementation. +During merge of several data parts in MergeTree table the `Dynamic` column in the resulting data part can reach the limit of different data types that can be stored in separate subcolumns inside and won't be able to store all types as subcolumns from source parts. +In this case ClickHouse chooses what types will remain as separate subcolumns after merge and what types will be inserted into shared data structure. In most cases ClickHouse tries to keep the most frequent types and store the rarest types in shared data structure, but it depends on the implementation. Let's see an example of such merge. First, let's create a table with `Dynamic` column, set the limit of different data types to `3` and insert values with `5` different types: @@ -463,17 +465,17 @@ INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(1); Each insert will create a separate data pert with `Dynamic` column containing single type: ```sql -SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; +SELECT count(), dynamicType(d), isDynamicElementInSharedData(d), _part FROM test GROUP BY _part, dynamicType(d), isDynamicElementInSharedData(d) ORDER BY _part, count(); ``` ```text -┌─count()─┬─dynamicType(d)──────┬─_part─────┠-│ 5 │ UInt64 │ all_1_1_0 │ -│ 4 │ Array(UInt64) │ all_2_2_0 │ -│ 3 │ Date │ all_3_3_0 │ -│ 2 │ Map(UInt64, UInt64) │ all_4_4_0 │ -│ 1 │ String │ all_5_5_0 │ -└─────────┴─────────────────────┴───────────┘ +┌─count()─┬─dynamicType(d)──────┬─isDynamicElementInSharedData(d)─┬─_part─────┠+│ 5 │ UInt64 │ false │ all_1_1_0 │ +│ 4 │ Array(UInt64) │ false │ all_2_2_0 │ +│ 3 │ Date │ false │ all_3_3_0 │ +│ 2 │ Map(UInt64, UInt64) │ false │ all_4_4_0 │ +│ 1 │ String │ false │ all_5_5_0 │ +└─────────┴─────────────────────┴─────────────────────────────────┴───────────┘ ``` Now, let's merge all parts into one and see what will happen: @@ -481,18 +483,20 @@ Now, let's merge all parts into one and see what will happen: ```sql SYSTEM START MERGES test; OPTIMIZE TABLE test FINAL; -SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; +SELECT count(), dynamicType(d), isDynamicElementInSharedData(d), _part FROM test GROUP BY _part, dynamicType(d), isDynamicElementInSharedData(d) ORDER BY _part, count() desc; ``` ```text -┌─count()─┬─dynamicType(d)─┬─_part─────┠-│ 5 │ UInt64 │ all_1_5_2 │ -│ 6 │ String │ all_1_5_2 │ -│ 4 │ Array(UInt64) │ all_1_5_2 │ -└─────────┴────────────────┴───────────┘ +┌─count()─┬─dynamicType(d)──────┬─isDynamicElementInSharedData(d)─┬─_part─────┠+│ 5 │ UInt64 │ false │ all_1_5_2 │ +│ 4 │ Array(UInt64) │ false │ all_1_5_2 │ +│ 3 │ Date │ false │ all_1_5_2 │ +│ 2 │ Map(UInt64, UInt64) │ true │ all_1_5_2 │ +│ 1 │ String │ true │ all_1_5_2 │ +└─────────┴─────────────────────┴─────────────────────────────────┴───────────┘ ``` -As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`. +As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` as subcolumns and inserted all other types into shared data. ## JSONExtract functions with Dynamic @@ -509,22 +513,23 @@ SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType( ``` ```sql -SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types``` +SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Dynamic)') AS map_of_dynamics, mapApply((k, v) -> (k, dynamicType(v)), map_of_dynamics) AS map_of_dynamic_types +``` ```text -┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┠-│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │ -└──────────────────────────────────┴─────────────────────────────────────────────────┘ +┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────────────┠+│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'Int64','b':'String','c':'Array(Nullable(Int64))'} │ +└──────────────────────────────────┴─────────────────────────────────────────────────────────┘ ``` ```sql -SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types``` +SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Dynamic') AS dynamics, arrayMap(x -> (x.1, dynamicType(x.2)), dynamics) AS dynamic_types``` ``` ```text -┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┠-│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │ -└────────────────────────────────────────┴───────────────────────────────────────────────────────┘ +┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────────────┠+│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','Int64'),('b','String'),('c','Array(Nullable(Int64))')] │ +└────────────────────────────────────────┴───────────────────────────────────────────────────────────────┘ ``` ### Binary output format diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index a92d54dd675..454f7956f48 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -1,16 +1,21 @@ #include #include +#include #include #include #include +#include #include #include #include #include #include #include -#include +#include +#include +#include +#include #include namespace DB @@ -22,31 +27,77 @@ namespace ErrorCodes extern const int PARAMETER_OUT_OF_BOUND; } - -ColumnDynamic::ColumnDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_dynamic_types_) +namespace { - /// Create empty Variant. - variant_info.variant_type = std::make_shared(DataTypes{}); - variant_info.variant_name = variant_info.variant_type->getName(); - variant_column = variant_info.variant_type->createColumn(); + +/// Static default format settings to avoid creating it every time. +const FormatSettings & getFormatSettings() +{ + static const FormatSettings settings; + return settings; +} + +} + +/// Shared variant will contain String values but we cannot use usual String type +/// because we can have regular variant with type String. +/// To solve it, we use String type with custom name for shared variant. +DataTypePtr ColumnDynamic::getSharedVariantDataType() +{ + return DataTypeFactory::instance().getCustom("String", std::make_unique(std::make_unique(getSharedVariantTypeName()))); +} + +ColumnDynamic::ColumnDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_dynamic_types_), global_max_dynamic_types(max_dynamic_types) +{ + /// Create Variant with shared variant. + setVariantType(std::make_shared(DataTypes{getSharedVariantDataType()})); } ColumnDynamic::ColumnDynamic( - MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_) + MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_) + : variant_column(std::move(variant_column_)) + , max_dynamic_types(max_dynamic_types_) + , global_max_dynamic_types(global_max_dynamic_types_) + , statistics(statistics_) +{ + createVariantInfo(variant_type_); +} + +ColumnDynamic::ColumnDynamic( + MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_) : variant_column(std::move(variant_column_)) , variant_info(variant_info_) , max_dynamic_types(max_dynamic_types_) + , global_max_dynamic_types(global_max_dynamic_types_) , statistics(statistics_) { } -ColumnDynamic::MutablePtr ColumnDynamic::create(MutableColumnPtr variant_column, const DataTypePtr & variant_type, size_t max_dynamic_types_, const Statistics & statistics_) +void ColumnDynamic::setVariantType(const DataTypePtr & variant_type) +{ + if (variant_column && !empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Setting specific variant type is allowed only for empty dynamic column"); + + variant_column = variant_type->createColumn(); + createVariantInfo(variant_type); +} + +void ColumnDynamic::setMaxDynamicPaths(size_t max_dynamic_type_) +{ + if (variant_column && !empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Setting specific max_dynamic_type parameter is allowed only for empty dynamic column"); + + max_dynamic_types = max_dynamic_type_; +} + +void ColumnDynamic::createVariantInfo(const DataTypePtr & variant_type) { - VariantInfo variant_info; variant_info.variant_type = variant_type; variant_info.variant_name = variant_type->getName(); const auto & variants = assert_cast(*variant_type).getVariants(); + variant_info.variant_names.clear(); variant_info.variant_names.reserve(variants.size()); + variant_info.variant_name_to_discriminator.clear(); variant_info.variant_name_to_discriminator.reserve(variants.size()); for (ColumnVariant::Discriminator discr = 0; discr != variants.size(); ++discr) { @@ -54,30 +105,26 @@ ColumnDynamic::MutablePtr ColumnDynamic::create(MutableColumnPtr variant_column, variant_info.variant_name_to_discriminator[variant_name] = discr; } - return create(std::move(variant_column), variant_info, max_dynamic_types_, statistics_); + if (!variant_info.variant_name_to_discriminator.contains(getSharedVariantTypeName())) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Variant in Dynamic column doesn't contain shared variant"); } -bool ColumnDynamic::addNewVariant(const DB::DataTypePtr & new_variant) +bool ColumnDynamic::addNewVariant(const DataTypePtr & new_variant, const String & new_variant_name) { /// Check if we already have such variant. - if (variant_info.variant_name_to_discriminator.contains(new_variant->getName())) + if (variant_info.variant_name_to_discriminator.contains(new_variant_name)) return true; - /// Check if we reached maximum number of variants. - if (variant_info.variant_names.size() >= max_dynamic_types) + /// Check if we reached maximum number of variants (don't count shared variant). + if (variant_info.variant_names.size() - 1 == max_dynamic_types) { - /// ColumnDynamic can have max_dynamic_types number of variants only when it has String as a variant. - /// Otherwise we won't be able to cast new variants to Strings. - if (!variant_info.variant_name_to_discriminator.contains("String")) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Maximum number of variants reached, but no String variant exists"); + /// Dynamic column should always have shared variant. + if (!variant_info.variant_name_to_discriminator.contains(getSharedVariantTypeName())) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Maximum number of variants reached, but no shared variant exists"); return false; } - /// If we have (max_dynamic_types - 1) number of variants and don't have String variant, we can add only String variant. - if (variant_info.variant_names.size() == max_dynamic_types - 1 && new_variant->getName() != "String" && !variant_info.variant_name_to_discriminator.contains("String")) - return false; - const DataTypes & current_variants = assert_cast(*variant_info.variant_type).getVariants(); DataTypes all_variants = current_variants; all_variants.push_back(new_variant); @@ -86,21 +133,15 @@ bool ColumnDynamic::addNewVariant(const DB::DataTypePtr & new_variant) return true; } -void ColumnDynamic::addStringVariant() +void extendVariantColumn( + IColumn & variant_column, + const DataTypePtr & old_variant_type, + const DataTypePtr & new_variant_type, + std::unordered_map old_variant_name_to_discriminator) { - if (!addNewVariant(std::make_shared())) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add String variant to Dynamic column, it's a bug"); -} - -void ColumnDynamic::updateVariantInfoAndExpandVariantColumn(const DB::DataTypePtr & new_variant_type) -{ - const DataTypes & current_variants = assert_cast(variant_info.variant_type.get())->getVariants(); + const DataTypes & current_variants = assert_cast(old_variant_type.get())->getVariants(); const DataTypes & new_variants = assert_cast(new_variant_type.get())->getVariants(); - Names new_variant_names; - new_variant_names.reserve(new_variants.size()); - std::unordered_map new_variant_name_to_discriminator; - new_variant_name_to_discriminator.reserve(new_variants.size()); std::vector> new_variant_columns_and_discriminators_to_add; new_variant_columns_and_discriminators_to_add.reserve(new_variants.size() - current_variants.size()); std::vector current_to_new_discriminators; @@ -108,26 +149,26 @@ void ColumnDynamic::updateVariantInfoAndExpandVariantColumn(const DB::DataTypePt for (ColumnVariant::Discriminator discr = 0; discr != new_variants.size(); ++discr) { - const auto & name = new_variant_names.emplace_back(new_variants[discr]->getName()); - new_variant_name_to_discriminator[name] = discr; - - auto current_it = variant_info.variant_name_to_discriminator.find(name); - if (current_it == variant_info.variant_name_to_discriminator.end()) + auto current_it = old_variant_name_to_discriminator.find(new_variants[discr]->getName()); + if (current_it == old_variant_name_to_discriminator.end()) new_variant_columns_and_discriminators_to_add.emplace_back(new_variants[discr]->createColumn(), discr); else current_to_new_discriminators[current_it->second] = discr; } - variant_info.variant_type = new_variant_type; - variant_info.variant_name = new_variant_type->getName(); - variant_info.variant_names = new_variant_names; - variant_info.variant_name_to_discriminator = new_variant_name_to_discriminator; - assert_cast(*variant_column).extend(current_to_new_discriminators, std::move(new_variant_columns_and_discriminators_to_add)); + assert_cast(variant_column).extend(current_to_new_discriminators, std::move(new_variant_columns_and_discriminators_to_add)); +} + +void ColumnDynamic::updateVariantInfoAndExpandVariantColumn(const DataTypePtr & new_variant_type) +{ + extendVariantColumn(*variant_column, variant_info.variant_type, new_variant_type, variant_info.variant_name_to_discriminator); + createVariantInfo(new_variant_type); + /// Clear mappings cache because now with new Variant we will have new mappings. variant_mappings_cache.clear(); } -std::vector * ColumnDynamic::combineVariants(const DB::ColumnDynamic::VariantInfo & other_variant_info) +std::vector * ColumnDynamic::combineVariants(const ColumnDynamic::VariantInfo & other_variant_info) { /// Check if we already have global discriminators mapping for other Variant in cache. /// It's used to not calculate the same mapping each call of insertFrom with the same columns. @@ -153,21 +194,14 @@ std::vector * ColumnDynamic::combineVariants(const { const DataTypes & current_variants = assert_cast(*variant_info.variant_type).getVariants(); - /// We cannot combine Variants if total number of variants exceeds max_dynamic_types. - if (current_variants.size() + num_new_variants > max_dynamic_types) + /// We cannot combine Variants if total number of variants exceeds max_dynamic_types (don't count shared variant). + if (current_variants.size() + num_new_variants - 1 > max_dynamic_types) { /// Remember that we cannot combine our variant with this one, so we will not try to do it again. variants_with_failed_combination.insert(other_variant_info.variant_name); return nullptr; } - /// We cannot combine Variants if total number of variants reaches max_dynamic_types and we don't have String variant. - if (current_variants.size() + num_new_variants == max_dynamic_types && !variant_info.variant_name_to_discriminator.contains("String") && !other_variant_info.variant_name_to_discriminator.contains("String")) - { - variants_with_failed_combination.insert(other_variant_info.variant_name); - return nullptr; - } - DataTypes all_variants = current_variants; all_variants.insert(all_variants.end(), other_variants.begin(), other_variants.end()); auto new_variant_type = std::make_shared(all_variants); @@ -185,40 +219,93 @@ std::vector * ColumnDynamic::combineVariants(const return &it->second; } -void ColumnDynamic::insert(const DB::Field & x) +void ColumnDynamic::insert(const Field & x) { - /// Check if we can insert field without Variant extension. - if (variant_column->tryInsert(x)) + if (x.isNull()) + { + insertDefault(); return; + } + + auto & variant_col = getVariantColumn(); + auto shared_variant_discr = getSharedVariantDiscriminator(); + /// Check if we can insert field into existing variants and avoid Variant extension. + for (size_t i = 0; i != variant_col.getNumVariants(); ++i) + { + if (i != shared_variant_discr && variant_col.getVariantByGlobalDiscriminator(i).tryInsert(x)) + { + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(i)); + variant_col.getOffsets().push_back(variant_col.getVariantByGlobalDiscriminator(i).size() - 1); + return; + } + } /// If we cannot insert field into current variant column, extend it with new variant for this field from its type. - if (addNewVariant(applyVisitor(FieldToDataType(), x))) + auto field_data_type = applyVisitor(FieldToDataType(), x); + auto field_data_type_name = field_data_type->getName(); + if (addNewVariant(field_data_type, field_data_type_name)) { - /// Now we should be able to insert this field into extended variant column. - variant_column->insert(x); + /// Insert this field into newly added variant. + auto discr = variant_info.variant_name_to_discriminator[field_data_type_name]; + variant_col.getVariantByGlobalDiscriminator(discr).insert(x); + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(discr)); + variant_col.getOffsets().push_back(variant_col.getVariantByGlobalDiscriminator(discr).size() - 1); } else { /// We reached maximum number of variants and couldn't add new variant. - /// This case should be really rare in real use cases. - /// We should always be able to add String variant and cast inserted value to String. - addStringVariant(); - variant_column->insert(toString(x)); + /// In this case we add the value of this new variant into special shared variant. + /// We store values in shared variant in binary form with binary encoded type. + auto & shared_variant = getSharedVariant(); + auto & chars = shared_variant.getChars(); + WriteBufferFromVector value_buf(chars, AppendModeTag()); + encodeDataType(field_data_type, value_buf); + getVariantSerialization(field_data_type, field_data_type_name)->serializeBinary(x, value_buf, getFormatSettings()); + value_buf.finalize(); + chars.push_back(0); + shared_variant.getOffsets().push_back(chars.size()); + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(shared_variant_discr)); + variant_col.getOffsets().push_back(shared_variant.size() - 1); } } -bool ColumnDynamic::tryInsert(const DB::Field & x) +bool ColumnDynamic::tryInsert(const Field & x) { /// We can insert any value into Dynamic column. insert(x); return true; } +Field ColumnDynamic::operator[](size_t n) const +{ + Field res; + get(n, res); + return res; +} + +void ColumnDynamic::get(size_t n, Field & res) const +{ + const auto & variant_col = getVariantColumn(); + /// Check if value is not in shared variant. + if (variant_col.globalDiscriminatorAt(n) != getSharedVariantDiscriminator()) + { + variant_col.get(n, res); + return; + } + + /// We should deeserialize value from shared variant. + const auto & shared_variant = getSharedVariant(); + auto value_data = shared_variant.getDataAt(variant_col.offsetAt(n)); + ReadBufferFromMemory buf(value_data.data, value_data.size); + auto type = decodeDataType(buf); + getVariantSerialization(type)->deserializeBinary(res, buf, getFormatSettings()); +} + #if !defined(DEBUG_OR_SANITIZER_BUILD) -void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n) +void ColumnDynamic::insertFrom(const IColumn & src_, size_t n) #else -void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) +void ColumnDynamic::doInsertFrom(const IColumn & src_, size_t n) #endif { const auto & dynamic_src = assert_cast(src_); @@ -231,6 +318,28 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) } auto & variant_col = assert_cast(*variant_column); + const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); + auto src_global_discr = src_variant_col.globalDiscriminatorAt(n); + auto src_offset = src_variant_col.offsetAt(n); + + /// Check if we insert from shared variant and process it separately. + if (src_global_discr == dynamic_src.getSharedVariantDiscriminator()) + { + auto & src_shared_variant = dynamic_src.getSharedVariant(); + auto value = src_shared_variant.getDataAt(src_offset); + /// Decode data type of this value. + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + /// Check if we have this variant and deserialize value into variant from shared variant data. + if (auto it = variant_info.variant_name_to_discriminator.find(type_name); it != variant_info.variant_name_to_discriminator.end()) + variant_col.deserializeBinaryIntoVariant(it->second, getVariantSerialization(type, type_name), buf, getFormatSettings()); + /// Otherwise just insert it into our shared variant. + else + variant_col.insertIntoVariantFrom(getSharedVariantDiscriminator(), src_shared_variant, src_offset); + + return; + } /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) @@ -241,8 +350,6 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) /// We cannot combine 2 Variant types as total number of variants exceeds the limit. /// We need to insert single value, try to add only corresponding variant. - const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); - auto src_global_discr = src_variant_col.globalDiscriminatorAt(n); /// NULL doesn't require Variant extension. if (src_global_discr == ColumnVariant::NULL_DISCRIMINATOR) @@ -260,19 +367,18 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) } /// We reached maximum number of variants and couldn't add new variant. - /// We should always be able to add String variant and cast inserted value to String. - addStringVariant(); - auto tmp_variant_column = src_variant_col.getVariantByGlobalDiscriminator(src_global_discr).cloneEmpty(); - tmp_variant_column->insertFrom(src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(n)); - auto tmp_string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - auto string_variant_discr = variant_info.variant_name_to_discriminator["String"]; - variant_col.insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0); + /// Insert this value into shared variant. + insertValueIntoSharedVariant( + src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), + variant_type, + dynamic_src.variant_info.variant_names[src_global_discr], + src_offset); } #if !defined(DEBUG_OR_SANITIZER_BUILD) -void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size_t length) +void ColumnDynamic::insertRangeFrom(const IColumn & src_, size_t start, size_t length) #else -void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, size_t length) +void ColumnDynamic::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length) #endif { if (start + length > src_.size()) @@ -293,156 +399,206 @@ void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, si /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { - variant_col.insertRangeFrom(*dynamic_src.variant_column, start, length, *global_discriminators_mapping); + size_t prev_size = variant_col.size(); + auto shared_variant_discr = getSharedVariantDiscriminator(); + variant_col.insertRangeFrom(*dynamic_src.variant_column, start, length, *global_discriminators_mapping, shared_variant_discr); + + /// We should process insertion from srs shared variant separately, because it can contain + /// values that should be extracted into our variants. insertRangeFrom above didn't insert + /// values into our shared variant (we specified shared_variant_discr as special skip discriminator). + + /// Check if srs shared variant is empty, nothing to do in this case. + if (dynamic_src.getSharedVariant().empty()) + return; + + /// Iterate over src discriminators and process insertion from src shared variant. + const auto & src_variant_column = dynamic_src.getVariantColumn(); + const auto src_shared_variant_discr = dynamic_src.getSharedVariantDiscriminator(); + const auto src_shared_variant_local_discr = src_variant_column.localDiscriminatorByGlobal(src_shared_variant_discr); + const auto & src_local_discriminators = src_variant_column.getLocalDiscriminators(); + const auto & src_offsets = src_variant_column.getOffsets(); + const auto & src_shared_variant = assert_cast(src_variant_column.getVariantByLocalDiscriminator(src_shared_variant_local_discr)); + + auto & local_discriminators = variant_col.getLocalDiscriminators(); + auto & offsets = variant_col.getOffsets(); + const auto shared_variant_local_discr = variant_col.localDiscriminatorByGlobal(shared_variant_discr); + auto & shared_variant = assert_cast(variant_col.getVariantByLocalDiscriminator(shared_variant_local_discr)); + for (size_t i = 0; i != length; ++i) + { + if (src_local_discriminators[start + i] == src_shared_variant_local_discr) + { + chassert(local_discriminators[prev_size + i] == shared_variant_local_discr); + auto value = src_shared_variant.getDataAt(src_offsets[start + i]); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + /// Check if we have variant with this type. In this case we should extract + /// the value from src shared variant and insert it into this variant. + if (auto it = variant_info.variant_name_to_discriminator.find(type_name); it != variant_info.variant_name_to_discriminator.end()) + { + auto local_discr = variant_col.localDiscriminatorByGlobal(it->second); + auto & variant = variant_col.getVariantByLocalDiscriminator(local_discr); + getVariantSerialization(type, type_name)->deserializeBinary(variant, buf, getFormatSettings()); + /// Local discriminators were already filled in ColumnVariant::insertRangeFrom and this row should contain + /// shared_variant_local_discr. Change it to local discriminator of the found variant and update offsets. + local_discriminators[prev_size + i] = local_discr; + offsets[prev_size + i] = variant.size() - 1; + } + /// Otherwise, insert this value into shared variant. + else + { + shared_variant.insertData(value.data, value.size); + /// Update variant offset. + offsets[prev_size + i] = shared_variant.size() - 1; + } + } + } + return; } /// We cannot combine 2 Variant types as total number of variants exceeds the limit. - /// In this case we will add most frequent variants from this range and insert them as usual, - /// all other variants will be converted to String. - /// TODO: instead of keeping all current variants and just adding new most frequent variants - /// from source columns we can also try to replace rarest existing variants with frequent - /// variants from source column (so we will avoid casting new frequent variants to String - /// and keeping rare existing ones). It will require rewriting of existing data in Variant - /// column but will improve usability of Dynamic column for example during squashing blocks - /// during insert. - - const auto & src_variant_column = dynamic_src.getVariantColumn(); - - /// Calculate ranges for each variant in current range. - std::vector> variants_ranges(dynamic_src.variant_info.variant_names.size(), {0, 0}); - /// If we insert the whole column, no need to iterate through the range, we can just take variant sizes. - if (start == 0 && length == dynamic_src.size()) - { - for (size_t i = 0; i != dynamic_src.variant_info.variant_names.size(); ++i) - variants_ranges[i] = {0, src_variant_column.getVariantByGlobalDiscriminator(i).size()}; - } - /// Otherwise we need to iterate through discriminators and calculate the range for each variant. - else - { - const auto & local_discriminators = src_variant_column.getLocalDiscriminators(); - const auto & offsets = src_variant_column.getOffsets(); - size_t end = start + length; - for (size_t i = start; i != end; ++i) - { - auto discr = src_variant_column.globalDiscriminatorByLocal(local_discriminators[i]); - if (discr != ColumnVariant::NULL_DISCRIMINATOR) - { - if (!variants_ranges[discr].second) - variants_ranges[discr].first = offsets[i]; - ++variants_ranges[discr].second; - } - } - } - + /// In this case we will add most frequent variants and insert them as usual, + /// all other variants will be inserted into shared variant. const auto & src_variants = assert_cast(*dynamic_src.variant_info.variant_type).getVariants(); - /// List of variants that will be converted to String. - std::vector variants_to_convert_to_string; /// Mapping from global discriminators of src_variant to the new variant we will create. std::vector other_to_new_discriminators; other_to_new_discriminators.reserve(dynamic_src.variant_info.variant_names.size()); - /// Check if we cannot add any more new variants. In this case we will convert all new variants to String. - if (variant_info.variant_names.size() == max_dynamic_types || (variant_info.variant_names.size() == max_dynamic_types - 1 && !variant_info.variant_name_to_discriminator.contains("String"))) + /// Check if we cannot add any more new variants. In this case we will insert all new variants into shared variant. + if (variant_info.variant_names.size() - 1 == max_dynamic_types) { - addStringVariant(); + auto shared_variant_discr = getSharedVariantDiscriminator(); for (size_t i = 0; i != dynamic_src.variant_info.variant_names.size(); ++i) { auto it = variant_info.variant_name_to_discriminator.find(dynamic_src.variant_info.variant_names[i]); if (it == variant_info.variant_name_to_discriminator.end()) - { - variants_to_convert_to_string.push_back(i); - other_to_new_discriminators.push_back(variant_info.variant_name_to_discriminator["String"]); - } + other_to_new_discriminators.push_back(shared_variant_discr); else - { other_to_new_discriminators.push_back(it->second); - } } } - /// We still can add some new variants, but not all of them. Let's choose the most frequent variants in specified range. + /// We still can add some new variants, but not all of them. Let's choose the most frequent variants. else { + /// Create list of pairs and sort it. std::vector> new_variants_with_sizes; new_variants_with_sizes.reserve(dynamic_src.variant_info.variant_names.size()); - for (size_t i = 0; i != dynamic_src.variant_info.variant_names.size(); ++i) + const auto & src_variant_column = dynamic_src.getVariantColumn(); + for (const auto & [name, discr] : dynamic_src.variant_info.variant_name_to_discriminator) { - const auto & variant_name = dynamic_src.variant_info.variant_names[i]; - if (variant_name != "String" && !variant_info.variant_name_to_discriminator.contains(variant_name)) - new_variants_with_sizes.emplace_back(variants_ranges[i].second, i); + if (!variant_info.variant_name_to_discriminator.contains(name)) + new_variants_with_sizes.emplace_back(src_variant_column.getVariantByGlobalDiscriminator(discr).size(), discr); } std::sort(new_variants_with_sizes.begin(), new_variants_with_sizes.end(), std::greater()); DataTypes new_variants = assert_cast(*variant_info.variant_type).getVariants(); - if (!variant_info.variant_name_to_discriminator.contains("String")) - new_variants.push_back(std::make_shared()); - + /// Add new variants from sorted list until we reach max_dynamic_types. for (const auto & [_, discr] : new_variants_with_sizes) { - if (new_variants.size() != max_dynamic_types) - new_variants.push_back(src_variants[discr]); - else - variants_to_convert_to_string.push_back(discr); + if (new_variants.size() - 1 == max_dynamic_types) + break; + new_variants.push_back(src_variants[discr]); } auto new_variant_type = std::make_shared(new_variants); updateVariantInfoAndExpandVariantColumn(new_variant_type); - auto string_variant_discriminator = variant_info.variant_name_to_discriminator.at("String"); + auto shared_variant_discr = getSharedVariantDiscriminator(); for (const auto & variant_name : dynamic_src.variant_info.variant_names) { auto it = variant_info.variant_name_to_discriminator.find(variant_name); if (it == variant_info.variant_name_to_discriminator.end()) - other_to_new_discriminators.push_back(string_variant_discriminator); + other_to_new_discriminators.push_back(shared_variant_discr); else other_to_new_discriminators.push_back(it->second); } } - /// Convert to String all variants that couldn't be added. - std::unordered_map variants_converted_to_string; - variants_converted_to_string.reserve(variants_to_convert_to_string.size()); - for (auto discr : variants_to_convert_to_string) - { - auto [variant_start, variant_length] = variants_ranges[discr]; - const auto & variant = src_variant_column.getVariantPtrByGlobalDiscriminator(discr); - if (variant_start == 0 && variant_length == variant->size()) - variants_converted_to_string[discr] = castColumn(ColumnWithTypeAndName(variant, src_variants[discr], ""), std::make_shared()); - else - variants_converted_to_string[discr] = castColumn(ColumnWithTypeAndName(variant->cut(variant_start, variant_length), src_variants[discr], ""), std::make_shared()); - } - + /// Iterate over the range and perform insertion. + const auto & src_variant_column = dynamic_src.getVariantColumn(); const auto & src_local_discriminators = src_variant_column.getLocalDiscriminators(); const auto & src_offsets = src_variant_column.getOffsets(); const auto & src_variant_columns = src_variant_column.getVariants(); + const auto src_shared_variant_discr = dynamic_src.getSharedVariantDiscriminator(); + const auto src_shared_variant_local_discr = src_variant_column.localDiscriminatorByGlobal(src_shared_variant_discr); + const auto & src_shared_variant = assert_cast(*src_variant_columns[src_shared_variant_local_discr]); + auto & local_discriminators = variant_col.getLocalDiscriminators(); + local_discriminators.reserve(local_discriminators.size() + length); + auto & offsets = variant_col.getOffsets(); + offsets.reserve(offsets.size() + length); + auto & variant_columns = variant_col.getVariants(); + const auto shared_variant_discr = getSharedVariantDiscriminator(); + const auto shared_variant_local_discr = variant_col.localDiscriminatorByGlobal(shared_variant_discr); + auto & shared_variant = assert_cast(*variant_columns[shared_variant_local_discr]); size_t end = start + length; for (size_t i = start; i != end; ++i) { - auto local_discr = src_local_discriminators[i]; - if (local_discr == ColumnVariant::NULL_DISCRIMINATOR) + auto src_local_discr = src_local_discriminators[i]; + auto src_offset = src_offsets[i]; + if (src_local_discr == ColumnVariant::NULL_DISCRIMINATOR) { - variant_col.insertDefault(); + local_discriminators.push_back(ColumnVariant::NULL_DISCRIMINATOR); + offsets.emplace_back(); } else { - auto global_discr = src_variant_column.globalDiscriminatorByLocal(local_discr); - auto to_global_discr = other_to_new_discriminators[global_discr]; - auto it = variants_converted_to_string.find(global_discr); - if (it == variants_converted_to_string.end()) + /// Process insertion from src shared variant separately. + if (src_local_discr == src_shared_variant_local_discr) { - variant_col.insertIntoVariantFrom(to_global_discr, *src_variant_columns[local_discr], src_offsets[i]); + auto value = src_shared_variant.getDataAt(src_offset); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + /// Check if we have variant with this type. In this case we should extract + /// the value from src shared variant and insert it into this variant. + if (auto it = variant_info.variant_name_to_discriminator.find(type_name); it != variant_info.variant_name_to_discriminator.end()) + { + auto local_discr = variant_col.localDiscriminatorByGlobal(it->second); + getVariantSerialization(type, type_name)->deserializeBinary(*variant_columns[local_discr], buf, getFormatSettings()); + local_discriminators.push_back(local_discr); + offsets.push_back(variant_columns[local_discr]->size() - 1); + } + /// Otherwise, insert this value into shared variant. + else + { + shared_variant.insertData(value.data, value.size); + local_discriminators.push_back(shared_variant_local_discr); + offsets.push_back(shared_variant.size() - 1); + } } + /// Insertion from usual variant. else { - variant_col.insertIntoVariantFrom(to_global_discr, *it->second, src_offsets[i] - variants_ranges[global_discr].first); + auto src_global_discr = src_variant_column.globalDiscriminatorByLocal(src_local_discr); + auto global_discr = other_to_new_discriminators[src_global_discr]; + /// Check if we need to insert this value into shared variant. + if (global_discr == shared_variant_discr) + { + serializeValueIntoSharedVariant( + shared_variant, + *src_variant_columns[src_local_discr], + src_variants[src_global_discr], + getVariantSerialization(src_variants[src_global_discr], dynamic_src.variant_info.variant_names[src_global_discr]), + src_offset); + local_discriminators.push_back(shared_variant_local_discr); + offsets.push_back(shared_variant.size() - 1); + } + else + { + auto local_discr = variant_col.localDiscriminatorByGlobal(global_discr); + variant_columns[local_discr]->insertFrom(*src_variant_columns[src_local_discr], src_offset); + local_discriminators.push_back(local_discr); + offsets.push_back(variant_columns[local_discr]->size() - 1); + } } } } } #if !defined(DEBUG_OR_SANITIZER_BUILD) -void ColumnDynamic::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length) +void ColumnDynamic::insertManyFrom(const IColumn & src_, size_t position, size_t length) #else -void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length) +void ColumnDynamic::doInsertManyFrom(const IColumn & src_, size_t position, size_t length) #endif { const auto & dynamic_src = assert_cast(src_); @@ -455,6 +611,36 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, } auto & variant_col = assert_cast(*variant_column); + const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); + auto src_global_discr = src_variant_col.globalDiscriminatorAt(position); + auto src_offset = src_variant_col.offsetAt(position); + + /// Check if we insert from shared variant and process it separately. + if (src_global_discr == dynamic_src.getSharedVariantDiscriminator()) + { + auto & src_shared_variant = dynamic_src.getSharedVariant(); + auto value = src_shared_variant.getDataAt(src_offset); + /// Decode data type of this value. + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + /// Check if we have this variant and deserialize value into variant from shared variant data. + if (auto it = variant_info.variant_name_to_discriminator.find(type_name); it != variant_info.variant_name_to_discriminator.end()) + { + /// Deserialize value into temporary column and use it in insertManyIntoVariantFrom. + auto tmp_column = type->createColumn(); + tmp_column->reserve(1); + getVariantSerialization(type, type_name)->deserializeBinary(*tmp_column, buf, getFormatSettings()); + variant_col.insertManyIntoVariantFrom(it->second, *tmp_column, 0, length); + } + /// Otherwise just insert it into our shared variant. + else + { + variant_col.insertManyIntoVariantFrom(getSharedVariantDiscriminator(), src_shared_variant, src_offset, length); + } + + return; + } /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) @@ -465,8 +651,6 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, /// We cannot combine 2 Variant types as total number of variants exceeds the limit. /// We need to insert single value, try to add only corresponding variant. - const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); - auto src_global_discr = src_variant_col.globalDiscriminatorAt(position); if (src_global_discr == ColumnVariant::NULL_DISCRIMINATOR) { insertDefault(); @@ -481,21 +665,51 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, return; } - addStringVariant(); - auto tmp_variant_column = src_variant_col.getVariantByGlobalDiscriminator(src_global_discr).cloneEmpty(); - tmp_variant_column->insertFrom(src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(position)); - auto tmp_string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - auto string_variant_discr = variant_info.variant_name_to_discriminator["String"]; - variant_col.insertManyIntoVariantFrom(string_variant_discr, *tmp_string_column, 0, length); + /// We reached maximum number of variants and couldn't add new variant. + /// Insert this value into shared variant. + /// Create temporary string column, serialize value into it and use it in insertManyIntoVariantFrom. + auto tmp_shared_variant = ColumnString::create(); + serializeValueIntoSharedVariant( + *tmp_shared_variant, + src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), + variant_type, + getVariantSerialization(variant_type, dynamic_src.variant_info.variant_names[src_global_discr]), + src_offset); + + variant_col.insertManyIntoVariantFrom(getSharedVariantDiscriminator(), *tmp_shared_variant, 0, length); } +void ColumnDynamic::insertValueIntoSharedVariant(const IColumn & src, const DataTypePtr & type, const String & type_name, size_t n) +{ + auto & variant_col = getVariantColumn(); + auto & shared_variant = getSharedVariant(); + serializeValueIntoSharedVariant(shared_variant, src, type, getVariantSerialization(type, type_name), n); + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(getSharedVariantDiscriminator())); + variant_col.getOffsets().push_back(shared_variant.size() - 1); +} -StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, const char *& begin) const +void ColumnDynamic::serializeValueIntoSharedVariant( + ColumnString & shared_variant, + const IColumn & src, + const DataTypePtr & type, + const SerializationPtr & serialization, + size_t n) +{ + auto & chars = shared_variant.getChars(); + WriteBufferFromVector value_buf(chars, AppendModeTag()); + encodeDataType(type, value_buf); + serialization->serializeBinary(src, n, value_buf, getFormatSettings()); + value_buf.finalize(); + chars.push_back(0); + shared_variant.getOffsets().push_back(chars.size()); +} + +StringRef ColumnDynamic::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin) const { /// We cannot use Variant serialization here as it serializes discriminator + value, /// but Dynamic doesn't have fixed mapping discriminator <-> variant type /// as different Dynamic column can have different Variants. - /// Instead, we serialize null bit + variant type in binary format (size + bytes) + value. + /// Instead, we serialize null bit + variant type and value in binary format (size + data). const auto & variant_col = assert_cast(*variant_column); auto discr = variant_col.globalDiscriminatorAt(n); StringRef res; @@ -509,19 +723,29 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co return res; } - const auto & variant_type = assert_cast(*variant_info.variant_type).getVariant(discr); - String variant_type_binary_data = encodeDataType(variant_type); - size_t variant_type_binary_data_size = variant_type_binary_data.size(); - char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size(), begin); - memcpy(pos, &null_bit, sizeof(UInt8)); - memcpy(pos + sizeof(UInt8), &variant_type_binary_data_size, sizeof(size_t)); - memcpy(pos + sizeof(UInt8) + sizeof(size_t), variant_type_binary_data.data(), variant_type_binary_data.size()); - res.data = pos; - res.size = sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size(); + WriteBufferFromOwnString buf; + StringRef type_and_value; + /// If we have value from shared variant, it's already stored in the desired format. + if (discr == getSharedVariantDiscriminator()) + { + type_and_value = getSharedVariant().getDataAt(variant_col.offsetAt(n)); + } + /// For regular variants serialize its type and value in binary format. + else + { + const auto & variant_type = assert_cast(*variant_info.variant_type).getVariant(discr); + encodeDataType(variant_type, buf); + getVariantSerialization(variant_type, variant_info.variant_names[discr]) + ->serializeBinary(variant_col.getVariantByGlobalDiscriminator(discr), variant_col.offsetAt(n), buf, getFormatSettings()); + type_and_value = buf.str(); + } - auto value_ref = variant_col.getVariantByGlobalDiscriminator(discr).serializeValueIntoArena(variant_col.offsetAt(n), arena, begin); - res.data = value_ref.data - res.size; - res.size += value_ref.size; + char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + type_and_value.size, begin); + memcpy(pos, &null_bit, sizeof(UInt8)); + memcpy(pos + sizeof(UInt8), &type_and_value.size, sizeof(size_t)); + memcpy(pos + sizeof(UInt8) + sizeof(size_t), type_and_value.data, type_and_value.size); + res.data = pos; + res.size = sizeof(UInt8) + sizeof(size_t) + type_and_value.size; return res; } @@ -536,39 +760,36 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos) return pos; } - /// Read variant type in binary format. - const size_t variant_type_binary_data_size = unalignedLoad(pos); - pos += sizeof(variant_type_binary_data_size); - String variant_type_binary_data; - variant_type_binary_data.resize(variant_type_binary_data_size); - memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size); - pos += variant_type_binary_data_size; - auto variant_type = decodeDataType(variant_type_binary_data); + /// Read variant type and value in binary format. + const size_t type_and_value_size = unalignedLoad(pos); + pos += sizeof(type_and_value_size); + std::string_view type_and_value(pos, type_and_value_size); + pos += type_and_value_size; + + ReadBufferFromMemory buf(type_and_value.data(), type_and_value.size()); + auto variant_type = decodeDataType(buf); auto variant_name = variant_type->getName(); /// If we already have such variant, just deserialize it into corresponding variant column. auto it = variant_info.variant_name_to_discriminator.find(variant_name); if (it != variant_info.variant_name_to_discriminator.end()) { - auto discr = it->second; - return variant_col.deserializeVariantAndInsertFromArena(discr, pos); + variant_col.deserializeBinaryIntoVariant(it->second, getVariantSerialization(variant_type, variant_name), buf, getFormatSettings()); } - - /// If we don't have such variant, add it. - if (likely(addNewVariant(variant_type))) + /// If we don't have such variant, try to add it. + else if (likely(addNewVariant(variant_type))) { auto discr = variant_info.variant_name_to_discriminator[variant_name]; - return variant_col.deserializeVariantAndInsertFromArena(discr, pos); + variant_col.deserializeBinaryIntoVariant(discr, getVariantSerialization(variant_type, variant_name), buf, getFormatSettings()); + } + /// Otherwise insert this value into shared variant. + else + { + auto & shared_variant = getSharedVariant(); + shared_variant.insertData(type_and_value.data(), type_and_value.size()); + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(getSharedVariantDiscriminator())); + variant_col.getOffsets().push_back(shared_variant.size() - 1); } - /// We reached maximum number of variants and couldn't add new variant. - /// We should always be able to add String variant and cast inserted value to String. - addStringVariant(); - /// Create temporary column of this variant type and deserialize value into it. - auto tmp_variant_column = variant_type->createColumn(); - pos = tmp_variant_column->deserializeAndInsertFromArena(pos); - /// Cast temporary column to String and insert this value into String variant. - auto str_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - variant_col.insertIntoVariantFrom(variant_info.variant_name_to_discriminator["String"], *str_column, 0); return pos; } @@ -579,14 +800,10 @@ const char * ColumnDynamic::skipSerializedInArena(const char * pos) const if (null_bit) return pos; - const size_t variant_type_binary_data_size = unalignedLoad(pos); - pos += sizeof(variant_type_binary_data_size); - String variant_type_binary_data; - variant_type_binary_data.resize(variant_type_binary_data_size); - memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size); - pos += variant_type_binary_data_size; - auto tmp_variant_column = decodeDataType(variant_type_binary_data)->createColumn(); - return tmp_variant_column->skipSerializedInArena(pos); + const size_t type_and_value_size = unalignedLoad(pos); + pos += sizeof(type_and_value_size); + pos += type_and_value_size; + return pos; } void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const @@ -604,9 +821,9 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const } #if !defined(DEBUG_OR_SANITIZER_BUILD) -int ColumnDynamic::compareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const +int ColumnDynamic::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #else -int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const +int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #endif { const auto & left_variant = assert_cast(*variant_column); @@ -614,7 +831,9 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int const auto & right_variant = assert_cast(*right_dynamic.variant_column); auto left_discr = left_variant.globalDiscriminatorAt(n); + auto left_shared_variant_discr = getSharedVariantDiscriminator(); auto right_discr = right_variant.globalDiscriminatorAt(m); + auto right_shared_variant_discr = right_dynamic.getSharedVariantDiscriminator(); /// Check if we have NULLs and return result based on nan_direction_hint. if (left_discr == ColumnVariant::NULL_DISCRIMINATOR && right_discr == ColumnVariant::NULL_DISCRIMINATOR) @@ -624,12 +843,125 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int else if (right_discr == ColumnVariant::NULL_DISCRIMINATOR) return -nan_direction_hint; - /// If rows have different types, we compare type names. - if (variant_info.variant_names[left_discr] != right_dynamic.variant_info.variant_names[right_discr]) - return variant_info.variant_names[left_discr] < right_dynamic.variant_info.variant_names[right_discr] ? -1 : 1; + /// Check if both values are in shared variant. + if (left_discr == left_shared_variant_discr && right_discr == right_shared_variant_discr) + { + /// Extract type names from both values. + auto left_value = getSharedVariant().getDataAt(left_variant.offsetAt(n)); + ReadBufferFromMemory buf_left(left_value.data, left_value.size); + auto left_data_type = decodeDataType(buf_left); + auto left_data_type_name = left_data_type->getName(); - /// If rows have the same types, compare actual values from corresponding variants. - return left_variant.getVariantByGlobalDiscriminator(left_discr).compareAt(left_variant.offsetAt(n), right_variant.offsetAt(m), right_variant.getVariantByGlobalDiscriminator(right_discr), nan_direction_hint); + auto right_value = right_dynamic.getSharedVariant().getDataAt(right_variant.offsetAt(m)); + ReadBufferFromMemory buf_right(right_value.data, right_value.size); + auto right_data_type = decodeDataType(buf_right); + auto right_data_type_name = right_data_type->getName(); + + /// If rows have different types, we compare type names. + if (left_data_type_name != right_data_type_name) + return left_data_type_name < right_data_type_name ? -1 : 1; + + /// If rows have the same type, we compare actual values. + /// We have both values serialized in binary format, so we need to + /// create temporary column, insert both values into it and compare. + auto tmp_column = left_data_type->createColumn(); + const auto & serialization = getVariantSerialization(left_data_type, left_data_type_name); + serialization->deserializeBinary(*tmp_column, buf_left, getFormatSettings()); + serialization->deserializeBinary(*tmp_column, buf_right, getFormatSettings()); + return tmp_column->compareAt(0, 1, *tmp_column, nan_direction_hint); + } + /// Check if only left value is in shared data. + else if (left_discr == left_shared_variant_discr) + { + /// Extract left type name from the value. + auto left_value = getSharedVariant().getDataAt(left_variant.offsetAt(n)); + ReadBufferFromMemory buf_left(left_value.data, left_value.size); + auto left_data_type = decodeDataType(buf_left); + auto left_data_type_name = left_data_type->getName(); + + /// If rows have different types, we compare type names. + if (left_data_type_name != right_dynamic.variant_info.variant_names[right_discr]) + return left_data_type_name < right_dynamic.variant_info.variant_names[right_discr] ? -1 : 1; + + /// If rows have the same type, we compare actual values. + /// We have left value serialized in binary format, we need to + /// create temporary column, insert the value into it and compare. + auto tmp_column = left_data_type->createColumn(); + getVariantSerialization(left_data_type, left_data_type_name)->deserializeBinary(*tmp_column, buf_left, getFormatSettings()); + return tmp_column->compareAt(0, right_variant.offsetAt(m), right_variant.getVariantByGlobalDiscriminator(right_discr), nan_direction_hint); + } + /// Check if only right value is in shared data. + else if (right_discr == right_shared_variant_discr) + { + /// Extract right type name from the value. + auto right_value = right_dynamic.getSharedVariant().getDataAt(right_variant.offsetAt(m)); + ReadBufferFromMemory buf_right(right_value.data, right_value.size); + auto right_data_type = decodeDataType(buf_right); + auto right_data_type_name = right_data_type->getName(); + + /// If rows have different types, we compare type names. + if (variant_info.variant_names[left_discr] != right_data_type_name) + return variant_info.variant_names[left_discr] < right_data_type_name ? -1 : 1; + + /// If rows have the same type, we compare actual values. + /// We have right value serialized in binary format, we need to + /// create temporary column, insert the value into it and compare. + auto tmp_column = right_data_type->createColumn(); + getVariantSerialization(right_data_type, right_data_type_name)->deserializeBinary(*tmp_column, buf_right, getFormatSettings()); + return left_variant.getVariantByGlobalDiscriminator(left_discr).compareAt(left_variant.offsetAt(n), 0, *tmp_column, nan_direction_hint); + } + /// Otherwise both values are regular variants. + else + { + /// If rows have different types, we compare type names. + if (variant_info.variant_names[left_discr] != right_dynamic.variant_info.variant_names[right_discr]) + return variant_info.variant_names[left_discr] < right_dynamic.variant_info.variant_names[right_discr] ? -1 : 1; + + /// If rows have the same types, compare actual values from corresponding variants. + return left_variant.getVariantByGlobalDiscriminator(left_discr).compareAt(left_variant.offsetAt(n), right_variant.offsetAt(m), right_variant.getVariantByGlobalDiscriminator(right_discr), nan_direction_hint); + } +} + +struct ColumnDynamic::ComparatorBase +{ + const ColumnDynamic & parent; + int nan_direction_hint; + + ComparatorBase(const ColumnDynamic & parent_, int nan_direction_hint_) + : parent(parent_), nan_direction_hint(nan_direction_hint_) + { + } + + ALWAYS_INLINE int compare(size_t lhs, size_t rhs) const + { + return parent.compareAt(lhs, rhs, parent, nan_direction_hint); + } +}; + +void ColumnDynamic::getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res) const +{ + if (direction == IColumn::PermutationSortDirection::Ascending && stability == IColumn::PermutationSortStability::Unstable) + getPermutationImpl(limit, res, ComparatorAscendingUnstable(*this, nan_direction_hint), DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Ascending && stability == IColumn::PermutationSortStability::Stable) + getPermutationImpl(limit, res, ComparatorAscendingStable(*this, nan_direction_hint), DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Descending && stability == IColumn::PermutationSortStability::Unstable) + getPermutationImpl(limit, res, ComparatorDescendingUnstable(*this, nan_direction_hint), DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Descending && stability == IColumn::PermutationSortStability::Stable) + getPermutationImpl(limit, res, ComparatorDescendingStable(*this, nan_direction_hint), DefaultSort(), DefaultPartialSort()); +} + +void ColumnDynamic::updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, DB::EqualRanges & equal_ranges) const +{ + auto comparator_equal = ComparatorEqual(*this, nan_direction_hint); + + if (direction == IColumn::PermutationSortDirection::Ascending && stability == IColumn::PermutationSortStability::Unstable) + updatePermutationImpl(limit, res, equal_ranges, ComparatorAscendingUnstable(*this, nan_direction_hint), comparator_equal, DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Ascending && stability == IColumn::PermutationSortStability::Stable) + updatePermutationImpl(limit, res, equal_ranges, ComparatorAscendingStable(*this, nan_direction_hint), comparator_equal, DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Descending && stability == IColumn::PermutationSortStability::Unstable) + updatePermutationImpl(limit, res, equal_ranges, ComparatorDescendingUnstable(*this, nan_direction_hint), comparator_equal, DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Descending && stability == IColumn::PermutationSortStability::Stable) + updatePermutationImpl(limit, res, equal_ranges, ComparatorDescendingStable(*this, nan_direction_hint), comparator_equal, DefaultSort(), DefaultPartialSort()); } ColumnPtr ColumnDynamic::compress() const @@ -637,14 +969,16 @@ ColumnPtr ColumnDynamic::compress() const ColumnPtr variant_compressed = variant_column->compress(); size_t byte_size = variant_compressed->byteSize(); return ColumnCompressed::create(size(), byte_size, - [my_variant_compressed = std::move(variant_compressed), my_variant_info = variant_info, my_max_dynamic_types = max_dynamic_types, my_statistics = statistics]() mutable + [my_variant_compressed = std::move(variant_compressed), my_variant_info = variant_info, my_max_dynamic_types = max_dynamic_types, my_global_max_dynamic_types = global_max_dynamic_types, my_statistics = statistics]() mutable { - return ColumnDynamic::create(my_variant_compressed->decompress(), my_variant_info, my_max_dynamic_types, my_statistics); + return ColumnDynamic::create(my_variant_compressed->decompress(), my_variant_info, my_max_dynamic_types, my_global_max_dynamic_types, my_statistics); }); } void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { + LOG_DEBUG(getLogger("ColumnDynamic"), "takeDynamicStructureFromSourceColumns"); + if (!empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "takeDynamicStructureFromSourceColumns should be called only on empty Dynamic column"); @@ -663,6 +997,9 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source /// First, collect all variants from all source columns and calculate total sizes. std::unordered_map total_sizes; DataTypes all_variants; + /// Add shared variant type in advance; + all_variants.push_back(getSharedVariantDataType()); + total_sizes[getSharedVariantTypeName()] = 0; for (const auto & source_column : source_columns) { @@ -671,7 +1008,7 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source const auto & source_variant_info = source_dynamic.getVariantInfo(); const auto & source_variants = assert_cast(*source_variant_info.variant_type).getVariants(); /// During deserialization from MergeTree we will have variant sizes statistics from the whole data part. - const auto & source_statistics = source_dynamic.getStatistics(); + const auto & source_statistics = source_dynamic.getStatistics(); for (size_t i = 0; i != source_variants.size(); ++i) { const auto & variant_name = source_variant_info.variant_names[i]; @@ -682,37 +1019,67 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source all_variants.push_back(source_variants[i]); it = total_sizes.emplace(variant_name, 0).first; } - auto statistics_it = source_statistics.data.find(variant_name); - size_t size = statistics_it == source_statistics.data.end() ? source_variant_column.getVariantByGlobalDiscriminator(i).size() : statistics_it->second; + size_t size = source_variant_column.getVariantByGlobalDiscriminator(i).size(); + if (source_statistics) + { + auto statistics_it = source_statistics->variants_statistics.find(variant_name); + if (statistics_it != source_statistics->variants_statistics.end()) + size = statistics_it->second; + } + it->second += size; } + + /// Use add variants from shared variant statistics. It can help extracting + /// frequent variants from shared variant to usual variants. + if (source_statistics) + { + for (const auto & [variant_name, size] : source_statistics->shared_variants_statistics) + { + auto it = total_sizes.find(variant_name); + /// Add this variant to the list of all variants if we didn't see it yet. + if (it == total_sizes.end()) + { + all_variants.push_back(DataTypeFactory::instance().get(variant_name)); + it = total_sizes.emplace(variant_name, 0).first; + } + it->second += size; + } + } } DataTypePtr result_variant_type; - /// Check if the number of all variants exceeds the limit. - if (all_variants.size() > max_dynamic_types || (all_variants.size() == max_dynamic_types && !total_sizes.contains("String"))) + Statistics new_statistics(Statistics::Source::MERGE); + /// Check if the number of all dynamic types exceeds the limit. + if (all_variants.size() - 1 > global_max_dynamic_types) { /// Create list of variants with their sizes and sort it. std::vector> variants_with_sizes; variants_with_sizes.reserve(all_variants.size()); for (const auto & variant : all_variants) - variants_with_sizes.emplace_back(total_sizes[variant->getName()], variant); + { + if (variant->getName() != getSharedVariantTypeName()) + variants_with_sizes.emplace_back(total_sizes[variant->getName()], variant); + } std::sort(variants_with_sizes.begin(), variants_with_sizes.end(), std::greater()); - /// Take first max_dynamic_types variants from sorted list. + /// Take first max_dynamic_types variants from sorted list and fill shared_variants_statistics with the rest. DataTypes result_variants; - result_variants.reserve(max_dynamic_types); - /// Add String variant in advance. - result_variants.push_back(std::make_shared()); - for (const auto & [_, variant] : variants_with_sizes) + result_variants.reserve(global_max_dynamic_types + 1); + for (const auto & [size, variant] : variants_with_sizes) { - if (result_variants.size() == max_dynamic_types) - break; - - if (variant->getName() != "String") + /// Add variant to the resulting variants list until we reach max_dynamic_types. + if (result_variants.size() < global_max_dynamic_types) result_variants.push_back(variant); + /// Add all remaining variants into shared_variants_statistics until we reach its max size. + else if (new_statistics.shared_variants_statistics.size() < Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE) + new_statistics.shared_variants_statistics[variant->getName()] = size; + else + break; } + /// Add shared variant. + result_variants.push_back(getSharedVariantDataType()); result_variant_type = std::make_shared(result_variants); } else @@ -720,26 +1087,16 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source result_variant_type = std::make_shared(all_variants); } - /// Now we have resulting Variant and can fill variant info. - variant_info.variant_type = result_variant_type; - variant_info.variant_name = result_variant_type->getName(); - const auto & result_variants = assert_cast(*result_variant_type).getVariants(); - variant_info.variant_names.clear(); - variant_info.variant_names.reserve(result_variants.size()); - variant_info.variant_name_to_discriminator.clear(); - variant_info.variant_name_to_discriminator.reserve(result_variants.size()); - statistics.data.clear(); - statistics.data.reserve(result_variants.size()); - statistics.source = Statistics::Source::MERGE; - for (size_t i = 0; i != result_variants.size(); ++i) - { - auto variant_name = result_variants[i]->getName(); - variant_info.variant_names.push_back(variant_name); - variant_info.variant_name_to_discriminator[variant_name] = i; - statistics.data[variant_name] = total_sizes[variant_name]; - } + /// Now we have resulting Variant and can fill variant info and create merge statistics. + setVariantType(result_variant_type); + new_statistics.variants_statistics.reserve(variant_info.variant_names.size()); + for (const auto & variant_name : variant_info.variant_names) + new_statistics.variants_statistics[variant_name] = total_sizes[variant_name]; + statistics = std::make_shared(std::move(new_statistics)); - variant_column = variant_info.variant_type->createColumn(); + /// Reduce max_dynamic_types to the number of selected variants (without shared variant), so there will be no possibility + /// to extend selected variants on inerts into this column during merges. + max_dynamic_types = variant_info.variant_names.size() - 1; /// Now we have the resulting Variant that will be used in all merged columns. /// Variants can also contain Dynamic columns inside, we should collect diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index e92cabd3db9..8b815e2b015 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -19,11 +20,15 @@ namespace DB * * When new values are inserted into Dynamic column, the internal Variant * type and column are extended if the inserted value has new type. + * When the limit on number of dynamic types is exceeded, all values + * with new types are inserted into special shared variant with type String + * that contains values and their types in binary format. */ class ColumnDynamic final : public COWHelper, ColumnDynamic> { public: - /// + static constexpr const char * SHARED_VARIANT_TYPE_NAME = "SharedVariant"; + struct Statistics { enum class Source @@ -32,12 +37,27 @@ public: MERGE, /// Statistics were calculated during merge of several MergeTree parts. }; + Statistics(Source source_) : source(source_) {} + /// Source of the statistics. Source source; - /// Statistics data: (variant name) -> (total variant size in data part). - std::unordered_map data; + /// Statistics data for usual variants: (variant name) -> (total variant size in data part). + std::unordered_map variants_statistics; + /// Statistics data for variants from shared variant: (variant name) -> (total variant size in data part). + /// For shared variant we store statistics only for first 256 variants (should cover almost all cases and it's not expensive). + static constexpr const size_t MAX_SHARED_VARIANT_STATISTICS_SIZE = 256; + std::unordered_map shared_variants_statistics; }; + using StatisticsPtr = std::shared_ptr; + + struct ComparatorBase; + using ComparatorAscendingUnstable = ComparatorAscendingUnstableImpl; + using ComparatorAscendingStable = ComparatorAscendingStableImpl; + using ComparatorDescendingUnstable = ComparatorDescendingUnstableImpl; + using ComparatorDescendingStable = ComparatorDescendingStableImpl; + using ComparatorEqual = ComparatorEqualImpl; + private: friend class COWHelper, ColumnDynamic>; @@ -54,28 +74,32 @@ private: }; explicit ColumnDynamic(size_t max_dynamic_types_); - ColumnDynamic(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {}); + ColumnDynamic(MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}); + ColumnDynamic(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}); public: /** Create immutable column using immutable arguments. This arguments may be shared with other columns. * Use IColumn::mutate in order to make mutable column and mutate shared nested columns. */ using Base = COWHelper, ColumnDynamic>; - static Ptr create(const ColumnPtr & variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {}) + static Ptr create(const ColumnPtr & variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}) { - return ColumnDynamic::create(variant_column_->assumeMutable(), variant_info_, max_dynamic_types_, statistics_); + return ColumnDynamic::create(variant_column_->assumeMutable(), variant_info_, max_dynamic_types_, global_max_dynamic_types_, statistics_); } - static MutablePtr create(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {}) + static MutablePtr create(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}) { - return Base::create(std::move(variant_column_), variant_info_, max_dynamic_types_, statistics_); + return Base::create(std::move(variant_column_), variant_info_, max_dynamic_types_, global_max_dynamic_types_, statistics_); } - static MutablePtr create(MutableColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, const Statistics & statistics_ = {}); - - static ColumnPtr create(ColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, const Statistics & statistics_ = {}) + static MutablePtr create(MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}) { - return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, statistics_); + return Base::create(std::move(variant_column_), variant_type_, max_dynamic_types_, global_max_dynamic_types_, statistics_); + } + + static ColumnPtr create(ColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}) + { + return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, global_max_dynamic_types_, statistics_); } static MutablePtr create(size_t max_dynamic_types_) @@ -83,7 +107,7 @@ public: return Base::create(max_dynamic_types_); } - std::string getName() const override { return "Dynamic(max_types=" + std::to_string(max_dynamic_types) + ")"; } + std::string getName() const override { return "Dynamic(max_types=" + std::to_string(global_max_dynamic_types) + ")"; } const char * getFamilyName() const override { @@ -98,12 +122,12 @@ public: MutableColumnPtr cloneEmpty() const override { /// Keep current dynamic structure - return Base::create(variant_column->cloneEmpty(), variant_info, max_dynamic_types, statistics); + return Base::create(variant_column->cloneEmpty(), variant_info, max_dynamic_types, global_max_dynamic_types, statistics); } MutableColumnPtr cloneResized(size_t size) const override { - return Base::create(variant_column->cloneResized(size), variant_info, max_dynamic_types, statistics); + return Base::create(variant_column->cloneResized(size), variant_info, max_dynamic_types, global_max_dynamic_types, statistics); } size_t size() const override @@ -111,15 +135,9 @@ public: return variant_column->size(); } - Field operator[](size_t n) const override - { - return (*variant_column)[n]; - } + Field operator[](size_t n) const override; - void get(size_t n, Field & res) const override - { - variant_column->get(n, res); - } + void get(size_t n, Field & res) const override; bool isDefaultAt(size_t n) const override { @@ -187,7 +205,7 @@ public: ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override { - return create(variant_column->filter(filt, result_size_hint), variant_info, max_dynamic_types); + return create(variant_column->filter(filt, result_size_hint), variant_info, max_dynamic_types, global_max_dynamic_types); } void expand(const Filter & mask, bool inverted) override @@ -197,17 +215,17 @@ public: ColumnPtr permute(const Permutation & perm, size_t limit) const override { - return create(variant_column->permute(perm, limit), variant_info, max_dynamic_types); + return create(variant_column->permute(perm, limit), variant_info, max_dynamic_types, global_max_dynamic_types); } ColumnPtr index(const IColumn & indexes, size_t limit) const override { - return create(variant_column->index(indexes, limit), variant_info, max_dynamic_types); + return create(variant_column->index(indexes, limit), variant_info, max_dynamic_types, global_max_dynamic_types); } ColumnPtr replicate(const Offsets & replicate_offsets) const override { - return create(variant_column->replicate(replicate_offsets), variant_info, max_dynamic_types); + return create(variant_column->replicate(replicate_offsets), variant_info, max_dynamic_types, global_max_dynamic_types); } MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override @@ -216,7 +234,7 @@ public: MutableColumns scattered_columns; scattered_columns.reserve(num_columns); for (auto & scattered_variant_column : scattered_variant_columns) - scattered_columns.emplace_back(create(std::move(scattered_variant_column), variant_info, max_dynamic_types)); + scattered_columns.emplace_back(create(std::move(scattered_variant_column), variant_info, max_dynamic_types, global_max_dynamic_types)); return scattered_columns; } @@ -238,16 +256,10 @@ public: } void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, - size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override - { - variant_column->getPermutation(direction, stability, limit, nan_direction_hint, res); - } + size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override; void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, - size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override - { - variant_column->updatePermutation(direction, stability, limit, nan_direction_hint, res, equal_ranges); - } + size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override { @@ -293,7 +305,7 @@ public: bool structureEquals(const IColumn & rhs) const override { if (const auto * rhs_concrete = typeid_cast(&rhs)) - return max_dynamic_types == rhs_concrete->max_dynamic_types; + return global_max_dynamic_types == rhs_concrete->global_max_dynamic_types; return false; } @@ -336,17 +348,67 @@ public: const ColumnVariant & getVariantColumn() const { return assert_cast(*variant_column); } ColumnVariant & getVariantColumn() { return assert_cast(*variant_column); } - bool addNewVariant(const DataTypePtr & new_variant); - void addStringVariant(); + bool addNewVariant(const DataTypePtr & new_variant, const String & new_variant_name); + bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); } bool hasDynamicStructure() const override { return true; } void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; - const Statistics & getStatistics() const { return statistics; } + const StatisticsPtr & getStatistics() const { return statistics; } + void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; } size_t getMaxDynamicTypes() const { return max_dynamic_types; } + void setVariantType(const DataTypePtr & variant_type); + void setMaxDynamicPaths(size_t max_dynamic_type_); + + static const String & getSharedVariantTypeName() + { + static const String name = SHARED_VARIANT_TYPE_NAME; + return name; + } + + static DataTypePtr getSharedVariantDataType(); + + ColumnVariant::Discriminator getSharedVariantDiscriminator() const + { + return variant_info.variant_name_to_discriminator.at(getSharedVariantTypeName()); + } + + ColumnString & getSharedVariant() + { + return assert_cast(getVariantColumn().getVariantByGlobalDiscriminator(getSharedVariantDiscriminator())); + } + + const ColumnString & getSharedVariant() const + { + return assert_cast(getVariantColumn().getVariantByGlobalDiscriminator(getSharedVariantDiscriminator())); + } + + /// Serializes type and value in binary format into provided shared variant. Doesn't update Variant discriminators and offsets. + static void serializeValueIntoSharedVariant(ColumnString & shared_variant, const IColumn & src, const DataTypePtr & type, const SerializationPtr & serialization, size_t n); + + /// Insert value into shared variant. Also updates Variant discriminators and offsets. + void insertValueIntoSharedVariant(const IColumn & src, const DataTypePtr & type, const String & type_name, size_t n); + + const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type, const String & variant_name) const + { + /// Get serialization for provided data type. + /// To avoid calling type->getDefaultSerialization() every time we use simple cache with max size. + /// When max size is reached, just clear the cache. + if (serialization_cache.size() == SERIALIZATION_CACHE_MAX_SIZE) + serialization_cache.clear(); + + if (auto it = serialization_cache.find(variant_name); it != serialization_cache.end()) + return it->second; + + return serialization_cache.emplace(variant_name, variant_type->getDefaultSerialization()).first->second; + } + + const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) const { return getVariantSerialization(variant_type, variant_type->getName()); } private: + void createVariantInfo(const DataTypePtr & variant_type); + /// Combine current variant with the other variant and return global discriminators mapping /// from other variant to the combined one. It's used for inserting from /// different variants. @@ -359,12 +421,19 @@ private: /// Store the type of current variant with some additional information. VariantInfo variant_info; /// The maximum number of different types that can be stored in this Dynamic column. - /// If exceeded, all new variants will be converted to String. + /// If exceeded, all new variants will be added to a special shared variant with type String + /// in binary format. This limit can be different for different instances of Dynamic column. + /// When max_dynamic_types = 0, we will have only shared variant and insert all values into it. size_t max_dynamic_types; + /// The types limit specified in the data type by the user Dynamic(max_types=N). + /// max_dynamic_types in all column instances of this Dynamic type can be only smaller + /// (for example, max_dynamic_types can be reduced in takeDynamicStructureFromSourceColumns + /// before merge of different Dynamic columns). + size_t global_max_dynamic_types; /// Size statistics of each variants from MergeTree data part. /// Used in takeDynamicStructureFromSourceColumns and set during deserialization. - Statistics statistics; + StatisticsPtr statistics; /// Cache (Variant name) -> (global discriminators mapping from this variant to current variant in Dynamic column). /// Used to avoid mappings recalculation in combineVariants for the same Variant types. @@ -372,6 +441,17 @@ private: /// Cache of Variant types that couldn't be combined with current variant in Dynamic column. /// Used to avoid checking if combination is possible for the same Variant types. std::unordered_set variants_with_failed_combination; + + /// We can use serializations of different data types to serialize values into shared variant. + /// To avoid creating the same serialization multiple times, use simple cache. + static const size_t SERIALIZATION_CACHE_MAX_SIZE = 256; + mutable std::unordered_map serialization_cache; }; +void extendVariantColumn( + IColumn & variant_column, + const DataTypePtr & old_variant_type, + const DataTypePtr & new_variant_type, + std::unordered_map old_variant_name_to_discriminator); + } diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index de7efb41d19..7531e976926 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -476,7 +476,7 @@ void ColumnVariant::insertFromImpl(const DB::IColumn & src_, size_t n, const std } } -void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping) +void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping, Discriminator * skip_discriminator) { const size_t num_variants = variants.size(); const auto & src = assert_cast(src_); @@ -557,9 +557,12 @@ void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, Discriminator global_discr = src_global_discr; if (global_discriminators_mapping && src_global_discr != NULL_DISCRIMINATOR) global_discr = (*global_discriminators_mapping)[src_global_discr]; - Discriminator local_discr = localDiscriminatorByGlobal(global_discr); - if (nested_length) - variants[local_discr]->insertRangeFrom(*src.variants[src_local_discr], nested_start, nested_length); + if (!skip_discriminator || global_discr != *skip_discriminator) + { + Discriminator local_discr = localDiscriminatorByGlobal(global_discr); + if (nested_length) + variants[local_discr]->insertRangeFrom(*src.variants[src_local_discr], nested_start, nested_length); + } } } @@ -610,7 +613,7 @@ void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t l void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length) #endif { - insertRangeFromImpl(src_, start, length, nullptr); + insertRangeFromImpl(src_, start, length, nullptr, nullptr); } #if !defined(DEBUG_OR_SANITIZER_BUILD) @@ -627,9 +630,9 @@ void ColumnVariant::insertFrom(const DB::IColumn & src_, size_t n, const std::ve insertFromImpl(src_, n, &global_discriminators_mapping); } -void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector & global_discriminators_mapping) +void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector & global_discriminators_mapping, Discriminator skip_discriminator) { - insertRangeFromImpl(src_, start, length, &global_discriminators_mapping); + insertRangeFromImpl(src_, start, length, &global_discriminators_mapping, &skip_discriminator); } void ColumnVariant::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length, const std::vector & global_discriminators_mapping) @@ -673,6 +676,14 @@ void ColumnVariant::insertManyIntoVariantFrom(DB::ColumnVariant::Discriminator g variants[local_discr]->insertManyFrom(src_, position, length); } +void ColumnVariant::deserializeBinaryIntoVariant(ColumnVariant::Discriminator global_discr, const SerializationPtr & serialization, ReadBuffer & buf, const FormatSettings & format_settings) +{ + auto local_discr = localDiscriminatorByGlobal(global_discr); + serialization->deserializeBinary(*variants[local_discr], buf, format_settings); + getLocalDiscriminators().push_back(local_discr); + getOffsets().push_back(variants[local_discr]->size() - 1); +} + void ColumnVariant::insertDefault() { getLocalDiscriminators().push_back(NULL_DISCRIMINATOR); @@ -1213,9 +1224,7 @@ struct ColumnVariant::ComparatorBase ALWAYS_INLINE int compare(size_t lhs, size_t rhs) const { - int res = parent.compareAt(lhs, rhs, parent, nan_direction_hint); - - return res; + return parent.compareAt(lhs, rhs, parent, nan_direction_hint); } }; diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index 34c24b5428d..571a843d113 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -2,6 +2,8 @@ #include #include +#include +#include namespace DB @@ -196,13 +198,15 @@ public: /// Methods for insertion from another Variant but with known mapping between global discriminators. void insertFrom(const IColumn & src_, size_t n, const std::vector & global_discriminators_mapping); - void insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector & global_discriminators_mapping); + /// Don't insert data into variant with skip_discriminator global discriminator, it will be processed separately. + void insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector & global_discriminators_mapping, Discriminator skip_discriminator); void insertManyFrom(const IColumn & src_, size_t position, size_t length, const std::vector & global_discriminators_mapping); /// Methods for insertion into a specific variant. void insertIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t n); void insertRangeIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t start, size_t length); void insertManyIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t position, size_t length); + void deserializeBinaryIntoVariant(Discriminator global_discr, const SerializationPtr & serialization, ReadBuffer & buf, const FormatSettings & format_settings); void insertDefault() override; void insertManyDefaults(size_t length) override; @@ -263,6 +267,7 @@ public: ColumnPtr & getVariantPtrByGlobalDiscriminator(size_t discr) { return variants[global_to_local_discriminators.at(discr)]; } const NestedColumns & getVariants() const { return variants; } + NestedColumns & getVariants() { return variants; } const IColumn & getLocalDiscriminatorsColumn() const { return *local_discriminators; } IColumn & getLocalDiscriminatorsColumn() { return *local_discriminators; } @@ -302,6 +307,8 @@ public: return true; } + std::vector getLocalToGlobalDiscriminatorsMapping() const { return local_to_global_discriminators; } + /// Check if we have only 1 non-empty variant and no NULL values, /// and if so, return the discriminator of this non-empty column. std::optional getLocalDiscriminatorOfOneNoneEmptyVariantNoNulls() const; @@ -322,7 +329,7 @@ public: private: void insertFromImpl(const IColumn & src_, size_t n, const std::vector * global_discriminators_mapping); - void insertRangeFromImpl(const IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping); + void insertRangeFromImpl(const IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping, Discriminator * skip_discriminator); void insertManyFromImpl(const IColumn & src_, size_t position, size_t length, const std::vector * global_discriminators_mapping); void initIdentityGlobalToLocalDiscriminatorsMapping(); diff --git a/src/Columns/tests/gtest_column_dynamic.cpp b/src/Columns/tests/gtest_column_dynamic.cpp index a2862b09de1..5445bd525d9 100644 --- a/src/Columns/tests/gtest_column_dynamic.cpp +++ b/src/Columns/tests/gtest_column_dynamic.cpp @@ -9,9 +9,12 @@ TEST(ColumnDynamic, CreateEmpty) { auto column = ColumnDynamic::create(255); ASSERT_TRUE(column->empty()); - ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant()"); - ASSERT_TRUE(column->getVariantInfo().variant_names.empty()); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.empty()); + ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(SharedVariant)"); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 1); + ASSERT_EQ(column->getVariantInfo().variant_names[0], "SharedVariant"); + ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.size(), 1); + ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0); + ASSERT_TRUE(column->getVariantColumn().getVariantByGlobalDiscriminator(0).empty()); } TEST(ColumnDynamic, InsertDefault) @@ -19,9 +22,12 @@ TEST(ColumnDynamic, InsertDefault) auto column = ColumnDynamic::create(255); column->insertDefault(); ASSERT_TRUE(column->size() == 1); - ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant()"); - ASSERT_TRUE(column->getVariantInfo().variant_names.empty()); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.empty()); + ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(SharedVariant)"); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 1); + ASSERT_EQ(column->getVariantInfo().variant_names[0], "SharedVariant"); + ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.size(), 1); + ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0); + ASSERT_TRUE(column->getVariantColumn().getVariantByGlobalDiscriminator(0).empty()); ASSERT_TRUE(column->isNullAt(0)); ASSERT_EQ((*column)[0], Field(Null())); } @@ -41,10 +47,10 @@ TEST(ColumnDynamic, InsertFields) column->insert(Field(43.43)); ASSERT_TRUE(column->size() == 10); - ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, String)"); - std::vector expected_names = {"Float64", "Int8", "String"}; + ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, SharedVariant, String)"); + std::vector expected_names = {"Float64", "Int8", "SharedVariant", "String"}; ASSERT_EQ(column->getVariantInfo().variant_names, expected_names); - std::unordered_map expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"String", 2}}; + std::unordered_map expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}; ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator == expected_variant_name_to_discriminator); } @@ -66,56 +72,66 @@ TEST(ColumnDynamic, InsertFieldsOverflow1) { auto column = getDynamicWithManyVariants(253); - ASSERT_EQ(column->getVariantInfo().variant_names.size(), 253); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254); column->insert(Field(42.42)); - ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column->size(), 254); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("Float64")); column->insert(Field(42)); + ASSERT_EQ(column->size(), 255); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 1); Field field = (*column)[column->size() - 1]; - ASSERT_EQ(field, "42"); + ASSERT_EQ(field, 42); column->insert(Field(43)); + ASSERT_EQ(column->size(), 256); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 2); field = (*column)[column->size() - 1]; - ASSERT_EQ(field, "43"); + ASSERT_EQ(field, 43); column->insert(Field("str1")); + ASSERT_EQ(column->size(), 257); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 3); field = (*column)[column->size() - 1]; ASSERT_EQ(field, "str1"); column->insert(Field(Array({Field(42), Field(43)}))); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 4); field = (*column)[column->size() - 1]; - ASSERT_EQ(field, "[42, 43]"); + ASSERT_EQ(field, Field(Array({Field(42), Field(43)}))); } TEST(ColumnDynamic, InsertFieldsOverflow2) { auto column = getDynamicWithManyVariants(254); - ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); column->insert(Field("str1")); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 1); + Field field = (*column)[column->size() - 1]; + ASSERT_EQ(field, "str1"); column->insert(Field(42)); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); - Field field = (*column)[column->size() - 1]; - ASSERT_EQ(field, "42"); + ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 2); + field = (*column)[column->size() - 1]; + ASSERT_EQ(field, 42); } ColumnDynamic::MutablePtr getInsertFromColumn(size_t num = 1) @@ -155,7 +171,7 @@ void checkInsertFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynami TEST(ColumnDynamic, InsertFrom1) { auto column_to = ColumnDynamic::create(255); - checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertFrom2) @@ -165,7 +181,7 @@ TEST(ColumnDynamic, InsertFrom2) column_to->insert(Field(42.42)); column_to->insert(Field("str")); - checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertFrom3) @@ -176,7 +192,7 @@ TEST(ColumnDynamic, InsertFrom3) column_to->insert(Field("str")); column_to->insert(Array({Field(42)})); - checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}}); + checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}}); } TEST(ColumnDynamic, InsertFromOverflow1) @@ -188,7 +204,7 @@ TEST(ColumnDynamic, InsertFromOverflow1) auto column_to = getDynamicWithManyVariants(253); column_to->insertFrom(*column_from, 0); - ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); auto field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, 42); @@ -196,13 +212,15 @@ TEST(ColumnDynamic, InsertFromOverflow1) column_to->insertFrom(*column_from, 1); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 1); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); column_to->insertFrom(*column_from, 2); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, "str"); } @@ -221,9 +239,32 @@ TEST(ColumnDynamic, InsertFromOverflow2) column_to->insertFrom(*column_from, 1); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 1); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); +} + +TEST(ColumnDynamic, InsertFromOverflow3) +{ + auto column_from = ColumnDynamic::create(1); + column_from->insert(Field(42)); + column_from->insert(Field(42.42)); + + auto column_to = ColumnDynamic::create(255); + column_to->insert(Field(41)); + + column_to->insertFrom(*column_from, 0); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_EQ(column_to->getSharedVariant().size(), 0); + auto field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, 42); + + column_to->insertFrom(*column_from, 1); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 1); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, 42.42); } void checkInsertManyFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynamic::MutablePtr & column_to, const std::string & expected_variant, const std::vector & expected_names, const std::unordered_map & expected_variant_name_to_discriminator) @@ -257,7 +298,7 @@ void checkInsertManyFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDy TEST(ColumnDynamic, InsertManyFrom1) { auto column_to = ColumnDynamic::create(255); - checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertManyFrom2) @@ -267,7 +308,7 @@ TEST(ColumnDynamic, InsertManyFrom2) column_to->insert(Field(42.42)); column_to->insert(Field("str")); - checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertManyFrom3) @@ -278,7 +319,7 @@ TEST(ColumnDynamic, InsertManyFrom3) column_to->insert(Field("str")); column_to->insert(Array({Field(42)})); - checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}}); + checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}}); } TEST(ColumnDynamic, InsertManyFromOverflow1) @@ -290,8 +331,9 @@ TEST(ColumnDynamic, InsertManyFromOverflow1) auto column_to = getDynamicWithManyVariants(253); column_to->insertManyFrom(*column_from, 0, 2); - ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_EQ(column_to->getSharedVariant().size(), 0); auto field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, 42); field = (*column_to)[column_to->size() - 1]; @@ -300,15 +342,17 @@ TEST(ColumnDynamic, InsertManyFromOverflow1) column_to->insertManyFrom(*column_from, 1, 2); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); column_to->insertManyFrom(*column_from, 2, 2); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 4); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, "str"); field = (*column_to)[column_to->size() - 2]; @@ -323,8 +367,9 @@ TEST(ColumnDynamic, InsertManyFromOverflow2) auto column_to = getDynamicWithManyVariants(253); column_to->insertManyFrom(*column_from, 0, 2); - ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_EQ(column_to->getSharedVariant().size(), 0); auto field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, 42); field = (*column_to)[column_to->size() - 1]; @@ -333,11 +378,39 @@ TEST(ColumnDynamic, InsertManyFromOverflow2) column_to->insertManyFrom(*column_from, 1, 2); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); +} + + +TEST(ColumnDynamic, InsertManyFromOverflow3) +{ + auto column_from = ColumnDynamic::create(1); + column_from->insert(Field(42)); + column_from->insert(Field(42.42)); + + auto column_to = ColumnDynamic::create(255); + column_to->insert(Field(41)); + + column_to->insertManyFrom(*column_from, 0, 2); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_EQ(column_to->getSharedVariant().size(), 0); + auto field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, 42); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, 42); + + column_to->insertManyFrom(*column_from, 1, 2); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); + field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, 42.42); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, 42.42); } void checkInsertRangeFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynamic::MutablePtr & column_to, const std::string & expected_variant, const std::vector & expected_names, const std::unordered_map & expected_variant_name_to_discriminator) @@ -369,7 +442,7 @@ void checkInsertRangeFrom(const ColumnDynamic::MutablePtr & column_from, ColumnD TEST(ColumnDynamic, InsertRangeFrom1) { auto column_to = ColumnDynamic::create(255); - checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertRangeFrom2) @@ -379,7 +452,7 @@ TEST(ColumnDynamic, InsertRangeFrom2) column_to->insert(Field(42.42)); column_to->insert(Field("str1")); - checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertRangeFrom3) @@ -390,7 +463,7 @@ TEST(ColumnDynamic, InsertRangeFrom3) column_to->insert(Field("str1")); column_to->insert(Array({Field(42)})); - checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}}); + checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}}); } TEST(ColumnDynamic, InsertRangeFromOverflow1) @@ -403,16 +476,18 @@ TEST(ColumnDynamic, InsertRangeFromOverflow1) auto column_to = getDynamicWithManyVariants(253); column_to->insertRangeFrom(*column_from, 0, 4); + ASSERT_EQ(column_to->size(), 257); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); auto field = (*column_to)[column_to->size() - 4]; ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 3]; ASSERT_EQ(field, Field(43)); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, Field("str")); } @@ -428,14 +503,15 @@ TEST(ColumnDynamic, InsertRangeFromOverflow2) column_to->insertRangeFrom(*column_from, 0, 3); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 1); auto field = (*column_to)[column_to->size() - 3]; ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, Field(43)); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); } TEST(ColumnDynamic, InsertRangeFromOverflow3) @@ -449,15 +525,16 @@ TEST(ColumnDynamic, InsertRangeFromOverflow3) column_to->insert(Field("Str")); column_to->insertRangeFrom(*column_from, 0, 3); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 3); auto field = (*column_to)[column_to->size() - 3]; ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, Field(43)); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); } TEST(ColumnDynamic, InsertRangeFromOverflow4) @@ -471,12 +548,13 @@ TEST(ColumnDynamic, InsertRangeFromOverflow4) column_to->insertRangeFrom(*column_from, 0, 3); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 3); auto field = (*column_to)[column_to->size() - 3]; - ASSERT_EQ(field, Field("42")); + ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, Field("str")); } @@ -493,15 +571,16 @@ TEST(ColumnDynamic, InsertRangeFromOverflow5) column_to->insert(Field("str")); column_to->insertRangeFrom(*column_from, 0, 4); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 3); auto field = (*column_to)[column_to->size() - 4]; ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 3]; ASSERT_EQ(field, Field(43)); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, Field("str")); } @@ -520,13 +599,14 @@ TEST(ColumnDynamic, InsertRangeFromOverflow6) auto column_to = getDynamicWithManyVariants(253); column_to->insertRangeFrom(*column_from, 2, 5); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); - ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 4); auto field = (*column_to)[column_to->size() - 5]; - ASSERT_EQ(field, Field("44")); + ASSERT_EQ(field, Field(44)); field = (*column_to)[column_to->size() - 4]; ASSERT_EQ(field, Field(42.42)); field = (*column_to)[column_to->size() - 3]; @@ -534,7 +614,131 @@ TEST(ColumnDynamic, InsertRangeFromOverflow6) field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, Field("str")); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, Field("[42]")); + ASSERT_EQ(field, Field(Array({Field(42)}))); +} + +TEST(ColumnDynamic, InsertRangeFromOverflow7) +{ + auto column_from = ColumnDynamic::create(3); + column_from->insert(Field(42.42)); + column_from->insert(Field("str1")); + column_from->insert(Field(42)); + column_from->insert(Field(43.43)); + column_from->insert(Field(Array({Field(41)}))); + column_from->insert(Field(43)); + column_from->insert(Field("str2")); + column_from->insert(Field(Array({Field(42)}))); + + auto column_to = ColumnDynamic::create(255); + column_to->insert(Field(42)); + + column_to->insertRangeFrom(*column_from, 0, 8); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 4); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); + auto field = (*column_to)[column_to->size() - 8]; + ASSERT_EQ(field, Field(42.42)); + field = (*column_to)[column_to->size() - 7]; + ASSERT_EQ(field, Field("str1")); + field = (*column_to)[column_to->size() - 6]; + ASSERT_EQ(field, Field(42)); + field = (*column_to)[column_to->size() - 5]; + ASSERT_EQ(field, Field(43.43)); + field = (*column_to)[column_to->size() - 4]; + ASSERT_EQ(field, Field(Array({Field(41)}))); + field = (*column_to)[column_to->size() - 3]; + ASSERT_EQ(field, Field(43)); + field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, Field("str2")); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, Field(Array({Field(42)}))); +} + +TEST(ColumnDynamic, InsertRangeFromOverflow8) +{ + auto column_from = ColumnDynamic::create(3); + column_from->insert(Field(42.42)); + column_from->insert(Field("str1")); + column_from->insert(Field(42)); + column_from->insert(Field(43.43)); + column_from->insert(Field(Array({Field(41)}))); + column_from->insert(Field(43)); + column_from->insert(Field("str2")); + column_from->insert(Field(Array({Field(42)}))); + + auto column_to = ColumnDynamic::create(3); + column_to->insert(Field(42)); + column_from->insert(Field("str1")); + + column_to->insertRangeFrom(*column_from, 0, 8); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 3); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 4); + auto field = (*column_to)[column_to->size() - 8]; + ASSERT_EQ(field, Field(42.42)); + field = (*column_to)[column_to->size() - 7]; + ASSERT_EQ(field, Field("str1")); + field = (*column_to)[column_to->size() - 6]; + ASSERT_EQ(field, Field(42)); + field = (*column_to)[column_to->size() - 5]; + ASSERT_EQ(field, Field(43.43)); + field = (*column_to)[column_to->size() - 4]; + ASSERT_EQ(field, Field(Array({Field(41)}))); + field = (*column_to)[column_to->size() - 3]; + ASSERT_EQ(field, Field(43)); + field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, Field("str2")); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, Field(Array({Field(42)}))); +} + +TEST(ColumnDynamic, InsertRangeFromOverflow9) +{ + auto column_from = ColumnDynamic::create(3); + column_from->insert(Field("str1")); + column_from->insert(Field(42.42)); + column_from->insert(Field("str2")); + column_from->insert(Field(42)); + column_from->insert(Field(43.43)); + column_from->insert(Field(Array({Field(41)}))); + column_from->insert(Field(43)); + column_from->insert(Field("str2")); + column_from->insert(Field(Array({Field(42)}))); + + auto column_to = ColumnDynamic::create(3); + column_to->insert(Field(42)); + + column_to->insertRangeFrom(*column_from, 0, 9); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 3); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 4); + auto field = (*column_to)[column_to->size() - 9]; + ASSERT_EQ(field, Field("str1")); + field = (*column_to)[column_to->size() - 8]; + ASSERT_EQ(field, Field(42.42)); + field = (*column_to)[column_to->size() - 7]; + ASSERT_EQ(field, Field("str2")); + field = (*column_to)[column_to->size() - 6]; + ASSERT_EQ(field, Field(42)); + field = (*column_to)[column_to->size() - 5]; + ASSERT_EQ(field, Field(43.43)); + field = (*column_to)[column_to->size() - 4]; + ASSERT_EQ(field, Field(Array({Field(41)}))); + field = (*column_to)[column_to->size() - 3]; + ASSERT_EQ(field, Field(43)); + field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, Field("str2")); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, Field(Array({Field(42)}))); } TEST(ColumnDynamic, SerializeDeserializeFromArena1) @@ -583,18 +787,18 @@ TEST(ColumnDynamic, SerializeDeserializeFromArena2) pos = column_to->deserializeAndInsertFromArena(pos); column_to->deserializeAndInsertFromArena(pos); - ASSERT_EQ((*column_from)[column_from->size() - 4], 42); - ASSERT_EQ((*column_from)[column_from->size() - 3], 42.42); - ASSERT_EQ((*column_from)[column_from->size() - 2], "str"); - ASSERT_EQ((*column_from)[column_from->size() - 1], Null()); - ASSERT_EQ(column_to->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, String)"); - std::vector expected_names = {"Float64", "Int8", "String"}; + ASSERT_EQ((*column_to)[column_to->size() - 4], 42); + ASSERT_EQ((*column_to)[column_to->size() - 3], 42.42); + ASSERT_EQ((*column_to)[column_to->size() - 2], "str"); + ASSERT_EQ((*column_to)[column_to->size() - 1], Null()); + ASSERT_EQ(column_to->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, SharedVariant, String)"); + std::vector expected_names = {"Float64", "Int8", "SharedVariant", "String"}; ASSERT_EQ(column_to->getVariantInfo().variant_names, expected_names); - std::unordered_map expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"String", 2}}; + std::unordered_map expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}; ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator == expected_variant_name_to_discriminator); } -TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow) +TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow1) { auto column_from = ColumnDynamic::create(255); column_from->insert(Field(42)); @@ -615,18 +819,56 @@ TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow) pos = column_to->deserializeAndInsertFromArena(pos); column_to->deserializeAndInsertFromArena(pos); - ASSERT_EQ((*column_from)[column_from->size() - 4], 42); - ASSERT_EQ((*column_from)[column_from->size() - 3], 42.42); - ASSERT_EQ((*column_from)[column_from->size() - 2], "str"); - ASSERT_EQ((*column_from)[column_from->size() - 1], Null()); + ASSERT_EQ((*column_to)[column_to->size() - 4], 42); + ASSERT_EQ((*column_to)[column_to->size() - 3], 42.42); + ASSERT_EQ((*column_to)[column_to->size() - 2], "str"); + ASSERT_EQ((*column_to)[column_to->size() - 1], Null()); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); +} + +TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow2) +{ + auto column_from = ColumnDynamic::create(2); + column_from->insert(Field(42)); + column_from->insert(Field(42.42)); + column_from->insert(Field("str")); + column_from->insert(Field(Null())); + column_from->insert(Field(Array({Field(42)}))); + + Arena arena; + const char * pos = nullptr; + auto ref1 = column_from->serializeValueIntoArena(0, arena, pos); + column_from->serializeValueIntoArena(1, arena, pos); + column_from->serializeValueIntoArena(2, arena, pos); + column_from->serializeValueIntoArena(3, arena, pos); + column_from->serializeValueIntoArena(4, arena, pos); + + auto column_to = ColumnDynamic::create(3); + column_to->insert(Field(42.42)); + pos = column_to->deserializeAndInsertFromArena(ref1.data); + pos = column_to->deserializeAndInsertFromArena(pos); + pos = column_to->deserializeAndInsertFromArena(pos); + pos = column_to->deserializeAndInsertFromArena(pos); + column_to->deserializeAndInsertFromArena(pos); + + ASSERT_EQ((*column_to)[column_to->size() - 5], 42); + ASSERT_EQ((*column_to)[column_to->size() - 4], 42.42); + ASSERT_EQ((*column_to)[column_to->size() - 3], "str"); + ASSERT_EQ((*column_to)[column_to->size() - 2], Null()); + ASSERT_EQ((*column_to)[column_to->size() - 1], Field(Array({Field(42)}))); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); } TEST(ColumnDynamic, skipSerializedInArena) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(3); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -647,6 +889,34 @@ TEST(ColumnDynamic, skipSerializedInArena) pos = column_to->skipSerializedInArena(pos); ASSERT_EQ(pos, end); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.empty()); - ASSERT_TRUE(column_to->getVariantInfo().variant_names.empty()); + ASSERT_EQ(column_to->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0); + ASSERT_EQ(column_to->getVariantInfo().variant_names, Names{"SharedVariant"}); +} + +TEST(ColumnDynamic, compare) +{ + auto column_from = ColumnDynamic::create(3); + column_from->insert(Field(42)); + column_from->insert(Field(42.42)); + column_from->insert(Field("str")); + column_from->insert(Field(Null())); + column_from->insert(Field(Array({Field(42)}))); + + ASSERT_EQ(column_from->compareAt(0, 0, *column_from, -1), 0); + ASSERT_EQ(column_from->compareAt(0, 1, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(1, 1, *column_from, -1), 0); + ASSERT_EQ(column_from->compareAt(0, 2, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(2, 0, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(2, 4, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(4, 2, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(4, 4, *column_from, -1), 0); + ASSERT_EQ(column_from->compareAt(0, 3, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(1, 3, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(2, 3, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(3, 3, *column_from, -1), 0); + ASSERT_EQ(column_from->compareAt(4, 3, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(3, 0, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(3, 1, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(3, 2, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(3, 4, *column_from, -1), -1); } diff --git a/src/DataTypes/DataTypeDynamic.cpp b/src/DataTypes/DataTypeDynamic.cpp index a1b1f8325f0..e00638a50ab 100644 --- a/src/DataTypes/DataTypeDynamic.cpp +++ b/src/DataTypes/DataTypeDynamic.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include +#include namespace DB { @@ -71,8 +73,8 @@ static DataTypePtr create(const ASTPtr & arguments) auto * literal = argument->arguments->children[1]->as(); - if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get() == 0 || literal->value.get() > ColumnVariant::MAX_NESTED_COLUMNS) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 1 and 255"); + if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get() > ColumnVariant::MAX_NESTED_COLUMNS - 1) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 0 and 254"); return std::make_shared(literal->value.get()); } @@ -84,30 +86,72 @@ void registerDataTypeDynamic(DataTypeFactory & factory) std::unique_ptr DataTypeDynamic::getDynamicSubcolumnData(std::string_view subcolumn_name, const DB::IDataType::SubstreamData & data, bool throw_if_null) const { - auto [subcolumn_type_name, subcolumn_nested_name] = Nested::splitName(subcolumn_name); + auto [type_subcolumn_name, subcolumn_nested_name] = Nested::splitName(subcolumn_name); /// Check if requested subcolumn is a valid data type. - auto subcolumn_type = DataTypeFactory::instance().tryGet(String(subcolumn_type_name)); + auto subcolumn_type = DataTypeFactory::instance().tryGet(String(type_subcolumn_name)); if (!subcolumn_type) { if (throw_if_null) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Dynamic type doesn't have subcolumn '{}'", subcolumn_type_name); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Dynamic type doesn't have subcolumn '{}'", type_subcolumn_name); return nullptr; } std::unique_ptr res = std::make_unique(subcolumn_type->getDefaultSerialization()); res->type = subcolumn_type; std::optional discriminator; + ColumnPtr null_map_for_variant_from_shared_variant; if (data.column) { /// If column was provided, we should extract subcolumn from Dynamic column. const auto & dynamic_column = assert_cast(*data.column); const auto & variant_info = dynamic_column.getVariantInfo(); + const auto & variant_column = dynamic_column.getVariantColumn(); + const auto & shared_variant = dynamic_column.getSharedVariant(); /// Check if provided Dynamic column has subcolumn of this type. - auto it = variant_info.variant_name_to_discriminator.find(subcolumn_type->getName()); + String subcolumn_type_name = subcolumn_type->getName(); + auto it = variant_info.variant_name_to_discriminator.find(subcolumn_type_name); if (it != variant_info.variant_name_to_discriminator.end()) { discriminator = it->second; - res->column = dynamic_column.getVariantColumn().getVariantPtrByGlobalDiscriminator(*discriminator); + res->column = variant_column.getVariantPtrByGlobalDiscriminator(*discriminator); + } + /// Otherwise if there is data in shared variant try to find requested type there. + else if (!shared_variant.empty()) + { + /// Create null map for resulting subcolumn to make it Nullable. + auto null_map_column = ColumnUInt8::create(); + NullMap & null_map = assert_cast(*null_map_column).getData(); + null_map.reserve(variant_column.size()); + auto subcolumn = subcolumn_type->createColumn(); + auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(dynamic_column.getSharedVariantDiscriminator()); + const auto & local_discriminators = variant_column.getLocalDiscriminators(); + const auto & offsets = variant_column.getOffsets(); + const FormatSettings format_settings; + for (size_t i = 0; i != local_discriminators.size(); ++i) + { + if (local_discriminators[i] == shared_variant_local_discr) + { + auto value = shared_variant.getDataAt(offsets[i]); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + if (type->getName() == subcolumn_type_name) + { + dynamic_column.getVariantSerialization(subcolumn_type, subcolumn_type_name)->deserializeBinary(*subcolumn, buf, format_settings); + null_map.push_back(0); + } + else + { + null_map.push_back(1); + } + } + else + { + null_map.push_back(1); + } + } + + res->column = std::move(subcolumn); + null_map_for_variant_from_shared_variant = std::move(null_map_column); } } @@ -125,7 +169,7 @@ std::unique_ptr DataTypeDynamic::getDynamicSubcolumnDa return nullptr; } - res->serialization = std::make_shared(res->serialization, subcolumn_type->getName(), is_null_map_subcolumn); + res->serialization = std::make_shared(res->serialization, subcolumn_type->getName(), String(subcolumn_nested_name), is_null_map_subcolumn); /// Make resulting subcolumn Nullable only if type subcolumn can be inside Nullable or can be LowCardinality(Nullable()). bool make_subcolumn_nullable = subcolumn_type->canBeInsideNullable() || subcolumn_type->lowCardinality(); if (!is_null_map_subcolumn && make_subcolumn_nullable) @@ -133,10 +177,10 @@ std::unique_ptr DataTypeDynamic::getDynamicSubcolumnDa if (data.column) { + /// Check if provided Dynamic column has subcolumn of this type. In this case we should use VariantSubcolumnCreator/VariantNullMapSubcolumnCreator to + /// create full subcolumn from variant according to discriminators. if (discriminator) { - /// Provided Dynamic column has subcolumn of this type, we should use VariantSubcolumnCreator/VariantNullMapSubcolumnCreator to - /// create full subcolumn from variant according to discriminators. const auto & variant_column = assert_cast(*data.column).getVariantColumn(); std::unique_ptr creator; if (is_null_map_subcolumn) @@ -154,6 +198,21 @@ std::unique_ptr DataTypeDynamic::getDynamicSubcolumnDa make_subcolumn_nullable); res->column = creator->create(res->column); } + /// Check if requested type was extracted from shared variant. In this case we should use + /// VariantSubcolumnCreator to create full subcolumn from variant according to created null map. + else if (null_map_for_variant_from_shared_variant) + { + if (is_null_map_subcolumn) + { + res->column = null_map_for_variant_from_shared_variant; + } + else + { + SerializationVariantElement::VariantSubcolumnCreator creator( + null_map_for_variant_from_shared_variant, "", 0, 0, make_subcolumn_nullable, null_map_for_variant_from_shared_variant); + res->column = creator.create(res->column); + } + } /// Provided Dynamic column doesn't have subcolumn of this type, just create column filled with default values. else if (is_null_map_subcolumn) { diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index 6f7dcd65b83..ca2ebdfbdbb 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -150,6 +150,12 @@ DataTypePtr DataTypeFactory::getCustom(DataTypeCustomDescPtr customization) cons return type; } +DataTypePtr DataTypeFactory::getCustom(const String & base_name, DataTypeCustomDescPtr customization) const +{ + auto type = get(base_name); + type->setCustomization(std::move(customization)); + return type; +} void DataTypeFactory::registerDataType(const String & family_name, Value creator, Case case_sensitiveness) { diff --git a/src/DataTypes/DataTypeFactory.h b/src/DataTypes/DataTypeFactory.h index edba9886d1c..a8324341691 100644 --- a/src/DataTypes/DataTypeFactory.h +++ b/src/DataTypes/DataTypeFactory.h @@ -34,6 +34,7 @@ public: DataTypePtr get(const String & family_name, const ASTPtr & parameters) const; DataTypePtr get(const ASTPtr & ast) const; DataTypePtr getCustom(DataTypeCustomDescPtr customization) const; + DataTypePtr getCustom(const String & base_name, DataTypeCustomDescPtr customization) const; /// Return nullptr in case of error. DataTypePtr tryGet(const String & full_name) const; diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index e96937d522d..67b4a0a5e31 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -192,17 +192,12 @@ MutableColumnPtr DataTypeTuple::createColumn() const MutableColumnPtr DataTypeTuple::createColumn(const ISerialization & serialization) const { - /// If we read Tuple as Variant subcolumn, it may be wrapped to SerializationVariantElement. - /// Here we don't need it, so we drop this wrapper. - const auto * current_serialization = &serialization; - while (const auto * serialization_variant_element = typeid_cast(current_serialization)) - current_serialization = serialization_variant_element->getNested().get(); - - /// If we read subcolumn of nested Tuple, it may be wrapped to SerializationNamed + /// If we read subcolumn of nested Tuple or this Tuple is a subcolumn, it may be wrapped to SerializationWrapper /// several times to allow to reconstruct the substream path name. /// Here we don't need substream path name, so we drop first several wrapper serializations. - while (const auto * serialization_named = typeid_cast(current_serialization)) - current_serialization = serialization_named->getNested().get(); + const auto * current_serialization = &serialization; + while (const auto * serialization_wrapper = dynamic_cast(current_serialization)) + current_serialization = serialization_wrapper->getNested().get(); const auto * serialization_tuple = typeid_cast(current_serialization); if (!serialization_tuple) diff --git a/src/DataTypes/DataTypesBinaryEncoding.cpp b/src/DataTypes/DataTypesBinaryEncoding.cpp index bd994e313ba..610f246265e 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.cpp +++ b/src/DataTypes/DataTypesBinaryEncoding.cpp @@ -444,7 +444,7 @@ void encodeDataType(const DataTypePtr & type, WriteBuffer & buf) case BinaryTypeIndex::Dynamic: { const auto & dynamic_type = assert_cast(*type); - /// Maximum number of dynamic types is 255, we can write it as 1 byte. + /// Maximum number of dynamic types is 254, we can write it as 1 byte. writeBinary(UInt8(dynamic_type.getMaxDynamicTypes()), buf); break; } diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 7609ffc91ca..67b29750948 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -27,15 +27,21 @@ namespace ErrorCodes struct SerializeBinaryBulkStateDynamic : public ISerialization::SerializeBinaryBulkState { SerializationDynamic::DynamicStructureSerializationVersion structure_version; + size_t max_dynamic_types; DataTypePtr variant_type; Names variant_names; SerializationPtr variant_serialization; ISerialization::SerializeBinaryBulkStatePtr variant_state; - /// Variants statistics. Map (Variant name) -> (Variant size). - ColumnDynamic::Statistics statistics = { .source = ColumnDynamic::Statistics::Source::READ, .data = {} }; + /// Variants statistics. + ColumnDynamic::Statistics statistics; + /// If true, statistics will be recalculated during serialization. + bool recalculate_statistics = false; - explicit SerializeBinaryBulkStateDynamic(UInt64 structure_version_) : structure_version(structure_version_) {} + explicit SerializeBinaryBulkStateDynamic(UInt64 structure_version_) + : structure_version(structure_version_), statistics(ColumnDynamic::Statistics::Source::READ) + { + } }; struct DeserializeBinaryBulkStateDynamic : public ISerialization::DeserializeBinaryBulkState @@ -106,20 +112,41 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( writeBinaryLittleEndian(structure_version, *stream); auto dynamic_state = std::make_shared(structure_version); + dynamic_state->max_dynamic_types = column_dynamic.getMaxDynamicTypes(); + /// Write max_dynamic_types parameter, because it can differ from the max_dynamic_types + /// that is specified in the Dynamic type (we could decrease it before merge). + writeBinaryLittleEndian(dynamic_state->max_dynamic_types, *stream); + dynamic_state->variant_type = variant_info.variant_type; dynamic_state->variant_names = variant_info.variant_names; const auto & variant_column = column_dynamic.getVariantColumn(); - /// Write internal Variant type name. + /// Write information about variants. + size_t num_variants = dynamic_state->variant_names.size() - 1; /// Don't write shared variant, Dynamic column should always have it. + writeBinaryLittleEndian(num_variants, *stream); if (settings.data_types_binary_encoding) - encodeDataType(dynamic_state->variant_type, *stream); + { + const auto & variants = assert_cast(*dynamic_state->variant_type).getVariants(); + for (const auto & variant: variants) + { + if (variant->getName() != ColumnDynamic::getSharedVariantTypeName()) + encodeDataType(dynamic_state->variant_type, *stream); + } + } else - writeStringBinary(dynamic_state->variant_type->getName(), *stream); + { + for (const auto & name : dynamic_state->variant_names) + { + if (name != ColumnDynamic::getSharedVariantTypeName()) + writeStringBinary(name, *stream); + } + } /// Write statistics in prefix if needed. if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::PREFIX) { const auto & statistics = column_dynamic.getStatistics(); + /// First, write statistics for usual variants. for (size_t i = 0; i != variant_info.variant_names.size(); ++i) { size_t size = 0; @@ -129,13 +156,55 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( /// - statistics read from the data part during deserialization of Dynamic column (Statistics::Source::READ). /// We can rely only on statistics calculated during the merge, because column with statistics that was read /// during deserialization from some data part could be filtered/limited/transformed/etc and so the statistics can be outdated. - if (!statistics.data.empty() && statistics.source == ColumnDynamic::Statistics::Source::MERGE) - size = statistics.data.at(variant_info.variant_names[i]); + if (statistics && statistics->source == ColumnDynamic::Statistics::Source::MERGE) + size = statistics->variants_statistics.at(variant_info.variant_names[i]); /// Otherwise we can use only variant sizes from current column. else size = variant_column.getVariantByGlobalDiscriminator(i).size(); writeVarUInt(size, *stream); } + + /// Second, write statistics for variants in shared variant. + /// Check if we have statistics calculated during merge of some data parts (Statistics::Source::MERGE). + if (statistics && statistics->source == ColumnDynamic::Statistics::Source::MERGE) + { + writeVarUInt(statistics->shared_variants_statistics.size(), *stream); + for (const auto & [variant_name, size] : statistics->shared_variants_statistics) + { + writeStringBinary(variant_name, *stream); + writeVarUInt(size, *stream); + } + } + /// If we don't have statistics for shared variants from merge, calculate it from the column. + else + { + std::unordered_map shared_variants_statistics; + const auto & shared_variant = column_dynamic.getSharedVariant(); + for (size_t i = 0; i != shared_variant.size(); ++i) + { + auto value = shared_variant.getDataAt(i); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + if (auto it = shared_variants_statistics.find(type_name); it != shared_variants_statistics.end()) + ++it->second; + else if (shared_variants_statistics.size() < ColumnDynamic::Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE) + shared_variants_statistics.emplace(type_name, 1); + } + + writeVarUInt(shared_variants_statistics.size(), *stream); + for (const auto & [variant_name, size] : shared_variants_statistics) + { + writeStringBinary(variant_name, *stream); + writeVarUInt(size, *stream); + } + } + } + /// Otherwise statistics will be written in the suffix, in this case we will recalculate + /// statistics during serialization to make it more precise. + else + { + dynamic_state->recalculate_statistics = true; } dynamic_state->variant_serialization = dynamic_state->variant_type->getDefaultSerialization(); @@ -182,33 +251,58 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD UInt64 structure_version; readBinaryLittleEndian(structure_version, *structure_stream); auto structure_state = std::make_shared(structure_version); - /// Read internal Variant type name. + /// Read max_dynamic_types parameter. + readBinaryLittleEndian(structure_state->max_dynamic_types, *structure_stream); + /// Read information about variants. + DataTypes variants; + size_t num_variants; + readBinaryLittleEndian(num_variants, *structure_stream); + variants.reserve(num_variants + 1); /// +1 for shared variant. if (settings.data_types_binary_encoding) { - structure_state->variant_type = decodeDataType(*structure_stream); + for (size_t i = 0; i != num_variants; ++i) + variants.push_back(decodeDataType(*structure_stream)); } else { String data_type_name; - readStringBinary(data_type_name, *structure_stream); - structure_state->variant_type = DataTypeFactory::instance().get(data_type_name); + for (size_t i = 0; i != num_variants; ++i) + { + readStringBinary(data_type_name, *structure_stream); + variants.push_back(DataTypeFactory::instance().get(data_type_name)); + } } - const auto * variant_type = typeid_cast(structure_state->variant_type.get()); - if (!variant_type) - throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect type of Dynamic nested column, expected Variant, got {}", structure_state->variant_type->getName()); + /// Add shared variant, Dynamic column should always have it. + variants.push_back(ColumnDynamic::getSharedVariantDataType()); + auto variant_type = std::make_shared(variants); /// Read statistics. if (settings.dynamic_read_statistics) { - const auto & variants = variant_type->getVariants(); + ColumnDynamic::Statistics statistics(ColumnDynamic::Statistics::Source::READ); + /// First, read statistics for usual variants. size_t variant_size; - for (const auto & variant : variants) + for (const auto & variant : variant_type->getVariants()) { readVarUInt(variant_size, *structure_stream); - structure_state->statistics.data[variant->getName()] = variant_size; + statistics.variants_statistics[variant->getName()] = variant_size; } + + /// Second, rend statistics for shared variants. + size_t statistics_size; + readVarUInt(statistics_size, *structure_stream); + String variant_name; + for (size_t i = 0; i != statistics_size; ++i) + { + readStringBinary(variant_name, *structure_stream); + readVarUInt(variant_size, *structure_stream); + statistics.shared_variants_statistics[variant_name] = variant_size; + } + + structure_state->statistics = std::make_shared(std::move(statistics)); } + structure_state->variant_type = std::move(variant_type); state = structure_state; addToSubstreamsDeserializeStatesCache(cache, settings.path, state); } @@ -231,8 +325,16 @@ void SerializationDynamic::serializeBinaryBulkStateSuffix( /// Write statistics in suffix if needed. if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::SUFFIX) { + /// First, write statistics for usual variants. for (const auto & variant_name : dynamic_state->variant_names) - writeVarUInt(dynamic_state->statistics.data[variant_name], *stream); + writeVarUInt(dynamic_state->statistics.variants_statistics[variant_name], *stream); + /// Second, write statistics for shared variants. + writeVarUInt(dynamic_state->statistics.shared_variants_statistics.size(), *stream); + for (const auto & [variant_name, size] : dynamic_state->statistics.shared_variants_statistics) + { + writeStringBinary(variant_name, *stream); + writeVarUInt(size, *stream); + } } settings.path.push_back(Substream::DynamicData); @@ -255,9 +357,42 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreams( if (!variant_info.variant_type->equals(*dynamic_state->variant_type)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", dynamic_state->variant_type->getName(), variant_info.variant_type->getName()); + if (column_dynamic.getMaxDynamicTypes() != dynamic_state->max_dynamic_types) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_types parameter of Dynamic. Expected: {}, Got: {}", dynamic_state->max_dynamic_types, column_dynamic.getMaxDynamicTypes()); + settings.path.push_back(Substream::DynamicData); - assert_cast(*dynamic_state->variant_serialization) - .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.data); + if (dynamic_state->recalculate_statistics) + { + assert_cast(*dynamic_state->variant_serialization) + .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.variants_statistics); + /// Calculate statistics for shared variants. + const auto & shared_variant = column_dynamic.getSharedVariant(); + if (!shared_variant.empty()) + { + const auto & local_discriminators = variant_column->getLocalDiscriminators(); + const auto & offsets = variant_column->getOffsets(); + const auto shared_variant_discr = variant_column->localDiscriminatorByGlobal(column_dynamic.getSharedVariantDiscriminator()); + size_t end = limit == 0 || offset + limit > local_discriminators.size() ? local_discriminators.size() : offset + limit; + for (size_t i = offset; i != end; ++i) + { + if (local_discriminators[i] == shared_variant_discr) + { + auto value = shared_variant.getDataAt(offsets[i]); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + if (auto it = dynamic_state->statistics.shared_variants_statistics.find(type_name); it != dynamic_state->statistics.shared_variants_statistics.end()) + ++it->second; + else if (dynamic_state->statistics.shared_variants_statistics.size() < ColumnDynamic::Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE) + dynamic_state->statistics.shared_variants_statistics.emplace(type_name, 1); + } + } + } + } + else + { + assert_cast(*dynamic_state->variant_serialization).serializeBinaryBulkWithMultipleStreams(*variant_column, offset, limit, settings, dynamic_state->variant_state); + } settings.path.pop_back(); } @@ -272,13 +407,17 @@ void SerializationDynamic::deserializeBinaryBulkWithMultipleStreams( return; auto mutable_column = column->assumeMutable(); + auto & column_dynamic = assert_cast(*mutable_column); auto * dynamic_state = checkAndGetState(state); auto * structure_state = checkAndGetState(dynamic_state->structure_state); if (mutable_column->empty()) - mutable_column = ColumnDynamic::create(structure_state->variant_type->createColumn(), structure_state->variant_type, max_dynamic_types, structure_state->statistics); + { + column_dynamic.setMaxDynamicPaths(structure_state->max_dynamic_types); + column_dynamic.setVariantType(structure_state->variant_type); + column_dynamic.setStatistics(structure_state->statistics); + } - auto & column_dynamic = assert_cast(*mutable_column); const auto & variant_info = column_dynamic.getVariantInfo(); if (!variant_info.variant_type->equals(*structure_state->variant_type)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", structure_state->variant_type->getName(), variant_info.variant_type->getName()); @@ -329,24 +468,42 @@ void SerializationDynamic::serializeBinary(const IColumn & column, size_t row_nu encodeDataType(std::make_shared(), ostr); return; } + /// Check if this value is in shared variant. In this case it's already + /// in desired binary format. + else if (global_discr == dynamic_column.getSharedVariantDiscriminator()) + { + auto value = dynamic_column.getSharedVariant().getDataAt(variant_column.offsetAt(row_num)); + ostr.write(value.data, value.size); + return; + } const auto & variant_type = assert_cast(*variant_info.variant_type).getVariant(global_discr); + const auto & variant_type_name = variant_info.variant_names[global_discr]; encodeDataType(variant_type, ostr); - variant_type->getDefaultSerialization()->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings); + dynamic_column.getVariantSerialization(variant_type, variant_type_name)->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings); } -template -static void deserializeVariant( +template +static ReturnType deserializeVariant( ColumnVariant & variant_column, - const DataTypePtr & variant_type, + const SerializationPtr & variant_serialization, ColumnVariant::Discriminator global_discr, ReadBuffer & istr, DeserializeFunc deserialize) { auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discr); - deserialize(*variant_type->getDefaultSerialization(), variant, istr); + if constexpr (std::is_same_v) + { + if (!deserialize(*variant_serialization, variant, istr)) + return ReturnType(false); + } + else + { + deserialize(*variant_serialization, variant, istr); + } variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(global_discr)); variant_column.getOffsets().push_back(variant.size() - 1); + return ReturnType(true); } void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -360,11 +517,12 @@ void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr } auto variant_type_name = variant_type->getName(); + const auto & variant_serialization = dynamic_column.getVariantSerialization(variant_type, variant_type_name); const auto & variant_info = dynamic_column.getVariantInfo(); auto it = variant_info.variant_name_to_discriminator.find(variant_type_name); if (it != variant_info.variant_name_to_discriminator.end()) { - deserializeVariant(dynamic_column.getVariantColumn(), variant_type, it->second, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); }); + deserializeVariant(dynamic_column.getVariantColumn(), variant_serialization, it->second, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); }); return; } @@ -372,25 +530,15 @@ void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr if (dynamic_column.addNewVariant(variant_type)) { auto discr = variant_info.variant_name_to_discriminator.at(variant_type_name); - deserializeVariant(dynamic_column.getVariantColumn(), variant_type, discr, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); }); + deserializeVariant(dynamic_column.getVariantColumn(), variant_serialization, discr, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); }); return; } /// We reached maximum number of variants and couldn't add new variant. - /// This case should be really rare in real use cases. - /// We should always be able to add String variant and insert value as String. - dynamic_column.addStringVariant(); + /// In this case we insert this value into shared variant in binary form. auto tmp_variant_column = variant_type->createColumn(); - variant_type->getDefaultSerialization()->deserializeBinary(*tmp_variant_column, istr, settings); - auto string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - auto & variant_column = dynamic_column.getVariantColumn(); - variant_column.insertIntoVariantFrom(variant_info.variant_name_to_discriminator.at("String"), *string_column, 0); -} - -void SerializationDynamic::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextCSV(dynamic_column.getVariantColumn(), row_num, ostr, settings); + variant_serialization->deserializeBinary(*tmp_variant_column, istr, settings); + dynamic_column.insertValueIntoSharedVariant(*tmp_variant_column, variant_type, variant_type_name, 0); } template @@ -406,6 +554,7 @@ static void deserializeTextImpl( auto & dynamic_column = assert_cast(column); auto & variant_column = dynamic_column.getVariantColumn(); const auto & variant_info = dynamic_column.getVariantInfo(); + const auto & variant_types = assert_cast(*variant_info.variant_type).getVariants(); String field = read_field(istr); auto field_buf = std::make_unique(field); JSONInferenceInfo json_info; @@ -413,27 +562,81 @@ static void deserializeTextImpl( if (escaping_rule == FormatSettings::EscapingRule::JSON) transformFinalInferredJSONTypeIfNeeded(variant_type, settings, &json_info); - if (checkIfTypeIsComplete(variant_type) && dynamic_column.addNewVariant(variant_type)) + /// If inferred type is not complete, we cannot add it as a new variant. + /// Let's try to deserialize this field into existing variants. + /// If failed, insert this value as String. + if (!checkIfTypeIsComplete(variant_type)) + { + size_t shared_variant_discr = dynamic_column.getSharedVariantDiscriminator(); + for (size_t i = 0; i != variant_types.size(); ++i) + { + field_buf = std::make_unique(field); + if (i != shared_variant_discr + && deserializeVariant( + variant_column, + dynamic_column.getVariantSerialization(variant_types[i], variant_info.variant_names[i]), + i, + *field_buf, + try_deserialize_variant)) + return; + } + + variant_type = std::make_shared(); + /// To be able to deserialize field as String with Quoted escaping rule, it should be quoted. + if (escaping_rule == FormatSettings::EscapingRule::Quoted && (field.size() < 2 || field.front() != '\'' || field.back() != '\'')) + field = "'" + field + "'"; + } + else if (dynamic_column.addNewVariant(variant_type, variant_type->getName())) { auto discr = variant_info.variant_name_to_discriminator.at(variant_type->getName()); - deserializeVariant(dynamic_column.getVariantColumn(), variant_type, discr, *field_buf, deserialize_variant); + deserializeVariant(dynamic_column.getVariantColumn(), dynamic_column.getVariantSerialization(variant_type), discr, *field_buf, deserialize_variant); return; } - /// We couldn't infer type or add new variant. Try to insert field into current variants. + /// We couldn't infer type or add new variant. Insert it into shared variant. + auto tmp_variant_column = variant_type->createColumn(); field_buf = std::make_unique(field); - if (try_deserialize_variant(*variant_info.variant_type->getDefaultSerialization(), variant_column, *field_buf)) - return; + auto variant_type_name = variant_type->getName(); + deserialize_variant(*dynamic_column.getVariantSerialization(variant_type, variant_type_name), *tmp_variant_column, *field_buf); + dynamic_column.insertValueIntoSharedVariant(*tmp_variant_column, variant_type, variant_type_name, 0); +} - /// We couldn't insert field into any existing variant, add String variant and read value as String. - dynamic_column.addStringVariant(); +template +static void serializeTextImpl( + const IColumn & column, + size_t row_num, + WriteBuffer & ostr, + const FormatSettings & settings, + NestedSerialize nested_serialize) +{ + const auto & dynamic_column = assert_cast(column); + const auto & variant_column = dynamic_column.getVariantColumn(); + /// Check if this row has value in shared variant. In this case we should first deserialize it from binary format. + if (variant_column.globalDiscriminatorAt(row_num) == dynamic_column.getSharedVariantDiscriminator()) + { + auto value = dynamic_column.getSharedVariant().getDataAt(variant_column.offsetAt(row_num)); + ReadBufferFromMemory buf(value.data, value.size); + auto variant_type = decodeDataType(buf); + auto tmp_variant_column = variant_type->createColumn(); + auto variant_serialization = dynamic_column.getVariantSerialization(variant_type); + variant_serialization->deserializeBinary(*tmp_variant_column, buf, settings); + nested_serialize(*variant_serialization, *tmp_variant_column, 0, ostr); + } + /// Otherwise just use serialization for Variant. + else + { + nested_serialize(*dynamic_column.getVariantInfo().variant_type->getDefaultSerialization(), variant_column, row_num, ostr); + } +} - if (escaping_rule == FormatSettings::EscapingRule::Quoted && (field.size() < 2 || field.front() != '\'' || field.back() != '\'')) - field = "'" + field + "'"; +void SerializationDynamic::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextCSV(col, row, buf, settings); + }; - field_buf = std::make_unique(field); - auto string_discr = variant_info.variant_name_to_discriminator.at("String"); - deserializeVariant(dynamic_column.getVariantColumn(), std::make_shared(), string_discr, *field_buf, deserialize_variant); + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -466,8 +669,12 @@ bool SerializationDynamic::tryDeserializeTextCSV(DB::IColumn & column, DB::ReadB void SerializationDynamic::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextEscaped(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextEscaped(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -500,8 +707,12 @@ bool SerializationDynamic::tryDeserializeTextEscaped(DB::IColumn & column, DB::R void SerializationDynamic::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextQuoted(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextQuoted(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -534,8 +745,12 @@ bool SerializationDynamic::tryDeserializeTextQuoted(DB::IColumn & column, DB::Re void SerializationDynamic::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextJSON(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextJSON(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -568,8 +783,12 @@ bool SerializationDynamic::tryDeserializeTextJSON(DB::IColumn & column, DB::Read void SerializationDynamic::serializeTextRaw(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextRaw(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextRaw(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextRaw(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -602,8 +821,12 @@ bool SerializationDynamic::tryDeserializeTextRaw(DB::IColumn & column, DB::ReadB void SerializationDynamic::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeText(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeText(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -636,8 +859,12 @@ bool SerializationDynamic::tryDeserializeWholeText(DB::IColumn & column, DB::Rea void SerializationDynamic::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextXML(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextXML(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } } diff --git a/src/DataTypes/Serializations/SerializationDynamic.h b/src/DataTypes/Serializations/SerializationDynamic.h index 001a3cf87ce..3dbf311fb6c 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.h +++ b/src/DataTypes/Serializations/SerializationDynamic.h @@ -105,9 +105,13 @@ private: { DynamicStructureSerializationVersion structure_version; DataTypePtr variant_type; - ColumnDynamic::Statistics statistics = {.source = ColumnDynamic::Statistics::Source::READ, .data = {}}; + size_t max_dynamic_types; + ColumnDynamic::StatisticsPtr statistics; - explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_) : structure_version(structure_version_) {} + explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_) + : structure_version(structure_version_) + { + } }; size_t max_dynamic_types; diff --git a/src/DataTypes/Serializations/SerializationDynamicElement.cpp b/src/DataTypes/Serializations/SerializationDynamicElement.cpp index 211f0ac9377..cffca14bca5 100644 --- a/src/DataTypes/Serializations/SerializationDynamicElement.cpp +++ b/src/DataTypes/Serializations/SerializationDynamicElement.cpp @@ -4,7 +4,10 @@ #include #include #include +#include #include +#include +#include #include namespace DB @@ -21,6 +24,8 @@ struct DeserializeBinaryBulkStateDynamicElement : public ISerialization::Deseria ISerialization::DeserializeBinaryBulkStatePtr structure_state; SerializationPtr variant_serialization; ISerialization::DeserializeBinaryBulkStatePtr variant_element_state; + bool read_from_shared_variant; + ColumnPtr shared_variant; }; void SerializationDynamicElement::enumerateStreams( @@ -73,9 +78,10 @@ void SerializationDynamicElement::deserializeBinaryBulkStatePrefix( auto dynamic_element_state = std::make_shared(); dynamic_element_state->structure_state = std::move(structure_state); - const auto & variant_type = checkAndGetState(dynamic_element_state->structure_state)->variant_type; + const auto & variant_type = assert_cast( + *checkAndGetState(dynamic_element_state->structure_state)->variant_type); /// Check if we actually have required element in the Variant. - if (auto global_discr = assert_cast(*variant_type).tryGetVariantDiscriminator(dynamic_element_name)) + if (auto global_discr = variant_type.tryGetVariantDiscriminator(dynamic_element_name)) { settings.path.push_back(Substream::DynamicData); if (is_null_map_subcolumn) @@ -83,6 +89,21 @@ void SerializationDynamicElement::deserializeBinaryBulkStatePrefix( else dynamic_element_state->variant_serialization = std::make_shared(nested_serialization, dynamic_element_name, *global_discr); dynamic_element_state->variant_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_element_state->variant_element_state, cache); + dynamic_element_state->read_from_shared_variant = false; + settings.path.pop_back(); + } + /// If we don't have this element in the Variant, we will read shared variant and try to find it there. + else + { + auto shared_variant_global_discr = variant_type.tryGetVariantDiscriminator(ColumnDynamic::getSharedVariantTypeName()); + chassert(shared_variant_global_discr.has_value()); + settings.path.push_back(Substream::DynamicData); + dynamic_element_state->variant_serialization = std::make_shared( + ColumnDynamic::getSharedVariantDataType()->getDefaultSerialization(), + ColumnDynamic::getSharedVariantTypeName(), + *shared_variant_global_discr); + dynamic_element_state->variant_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_element_state->variant_element_state, cache); + dynamic_element_state->read_from_shared_variant = true; settings.path.pop_back(); } @@ -115,23 +136,103 @@ void SerializationDynamicElement::deserializeBinaryBulkWithMultipleStreams( auto * dynamic_element_state = checkAndGetState(state); - if (dynamic_element_state->variant_serialization) + /// Check if this subcolumn should not be read from shared variant. + /// In this case just read data from the corresponding variant. + if (!dynamic_element_state->read_from_shared_variant) { settings.path.push_back(Substream::DynamicData); - dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams(result_column, limit, settings, dynamic_element_state->variant_element_state, cache); + dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams( + result_column, limit, settings, dynamic_element_state->variant_element_state, cache); settings.path.pop_back(); } - else if (is_null_map_subcolumn) - { - auto mutable_column = result_column->assumeMutable(); - auto & data = assert_cast(*mutable_column).getData(); - data.resize_fill(data.size() + limit, 1); - } + /// Otherwise, read the shared variant column and extract requested type from it. else { - auto mutable_column = result_column->assumeMutable(); - mutable_column->insertManyDefaults(limit); - result_column = std::move(mutable_column); + settings.path.push_back(Substream::DynamicData); + /// Initialize shared_variant column if needed. + if (result_column->empty()) + dynamic_element_state->shared_variant = makeNullable(ColumnDynamic::getSharedVariantDataType()->createColumn()); + size_t prev_size = result_column->size(); + dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams( + dynamic_element_state->shared_variant, limit, settings, dynamic_element_state->variant_element_state, cache); + settings.path.pop_back(); + + /// If we need to read a subcolumn from variant column, create an empty variant column, fill it and extract subcolumn. + auto variant_type = DataTypeFactory::instance().get(dynamic_element_name); + auto result_type = makeNullableOrLowCardinalityNullableSafe(variant_type); + MutableColumnPtr variant_column = nested_subcolumn.empty() || is_null_map_subcolumn ? result_column->assumeMutable() : result_type->createColumn(); + variant_column->reserve(variant_column->size() + limit); + MutableColumnPtr non_nullable_variant_column = variant_column->assumeMutable(); + NullMap * null_map = nullptr; + bool is_low_cardinality_nullable = isColumnLowCardinalityNullable(*variant_column); + /// Resulting subolumn can be Nullable, but value is serialized in shared variant as non-Nullable. + /// Extract non-nullable column and remember the null map to fill it during deserialization. + if (isColumnNullable(*variant_column)) + { + auto & nullable_variant_column = assert_cast(*variant_column); + non_nullable_variant_column = nullable_variant_column.getNestedColumnPtr()->assumeMutable(); + null_map = &nullable_variant_column.getNullMapData(); + } + else if (is_null_map_subcolumn) + { + null_map = &assert_cast(*variant_column).getData(); + } + + auto variant_serialization = variant_type->getDefaultSerialization(); + + const auto & nullable_shared_variant = assert_cast(*dynamic_element_state->shared_variant); + const auto & shared_null_map = nullable_shared_variant.getNullMapData(); + const auto & shared_variant = assert_cast(nullable_shared_variant.getNestedColumn()); + const FormatSettings format_settings; + for (size_t i = prev_size; i != shared_variant.size(); ++i) + { + if (!shared_null_map[i]) + { + auto value = shared_variant.getDataAt(i); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + if (type->getName() == dynamic_element_name) + { + /// When requested type is LowCardinality the subcolumn type name will be LowCardinality(Nullable). + /// Value in shared variant is serialized as LowCardinality and we cannot simply deserialize it + /// inside LowCardinality(Nullable) column (it will try to deserialize null bit). In this case we + /// have to create temporary LowCardinality column, deserialize value into it and insert it into + /// resulting LowCardinality(Nullable) (insertion from LowCardinality column to LowCardinality(Nullable) + /// column is allowed). + if (is_low_cardinality_nullable) + { + auto tmp_column = variant_type->createColumn(); + variant_serialization->deserializeBinary(*tmp_column, buf, format_settings); + non_nullable_variant_column->insertFrom(*tmp_column, 0); + } + else if (is_null_map_subcolumn) + { + null_map->push_back(0); + } + else + { + variant_serialization->deserializeBinary(*non_nullable_variant_column, buf, format_settings); + if (null_map) + null_map->push_back(0); + } + } + else + { + variant_column->insertDefault(); + } + } + else + { + variant_column->insertDefault(); + } + } + + /// Extract nested subcolumn if needed. + if (!nested_subcolumn.empty() && !is_null_map_subcolumn) + { + auto subcolumn = result_type->getSubcolumn(nested_subcolumn, variant_column->getPtr()); + result_column->assumeMutable()->insertRangeFrom(*subcolumn, 0, subcolumn->size()); + } } } diff --git a/src/DataTypes/Serializations/SerializationDynamicElement.h b/src/DataTypes/Serializations/SerializationDynamicElement.h index 127d14a55e0..c674cf479ae 100644 --- a/src/DataTypes/Serializations/SerializationDynamicElement.h +++ b/src/DataTypes/Serializations/SerializationDynamicElement.h @@ -13,11 +13,15 @@ private: /// To be able to deserialize Dynamic element as a subcolumn /// we need its type name and global discriminator. String dynamic_element_name; + /// Nested subcolumn of a type dynamic type. For example, for `Tuple(a UInt32)`.a + /// subcolumn dynamic_element_name = 'Tuple(a UInt32)' and nested_subcolumn = 'a'. + /// Needed to extract nested subcolumn from values in shared variant. + String nested_subcolumn; bool is_null_map_subcolumn; public: - SerializationDynamicElement(const SerializationPtr & nested_, const String & dynamic_element_name_, bool is_null_map_subcolumn_ = false) - : SerializationWrapper(nested_), dynamic_element_name(dynamic_element_name_), is_null_map_subcolumn(is_null_map_subcolumn_) + SerializationDynamicElement(const SerializationPtr & nested_, const String & dynamic_element_name_, const String & nested_subcolumn_, bool is_null_map_subcolumn_ = false) + : SerializationWrapper(nested_), dynamic_element_name(dynamic_element_name_), nested_subcolumn(nested_subcolumn_), is_null_map_subcolumn(is_null_map_subcolumn_) { } diff --git a/src/DataTypes/Serializations/SerializationVariantElement.cpp b/src/DataTypes/Serializations/SerializationVariantElement.cpp index 03b5d9584e0..36dc85f60ee 100644 --- a/src/DataTypes/Serializations/SerializationVariantElement.cpp +++ b/src/DataTypes/Serializations/SerializationVariantElement.cpp @@ -305,8 +305,10 @@ SerializationVariantElement::VariantSubcolumnCreator::VariantSubcolumnCreator( const String & variant_element_name_, ColumnVariant::Discriminator global_variant_discriminator_, ColumnVariant::Discriminator local_variant_discriminator_, - bool make_nullable_) + bool make_nullable_, + const ColumnPtr & null_map_) : local_discriminators(local_discriminators_) + , null_map(null_map_) , variant_element_name(variant_element_name_) , global_variant_discriminator(global_variant_discriminator_) , local_variant_discriminator(local_variant_discriminator_) @@ -314,12 +316,13 @@ SerializationVariantElement::VariantSubcolumnCreator::VariantSubcolumnCreator( { } -DataTypePtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB::DataTypePtr & prev) const + +DataTypePtr SerializationVariantElement::VariantSubcolumnCreator::create(const DataTypePtr & prev) const { return make_nullable ? makeNullableOrLowCardinalityNullableSafe(prev) : prev; } -SerializationPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB::SerializationPtr & prev) const +SerializationPtr SerializationVariantElement::VariantSubcolumnCreator::create(const SerializationPtr & prev) const { return std::make_shared(prev, variant_element_name, global_variant_discriminator); } @@ -339,12 +342,16 @@ ColumnPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB: return res; } - /// In general case we should iterate through discriminators and create null-map for our variant. - NullMap null_map; - null_map.reserve(local_discriminators->size()); - const auto & local_discriminators_data = assert_cast(*local_discriminators).getData(); - for (auto local_discr : local_discriminators_data) - null_map.push_back(local_discr != local_variant_discriminator); + /// In general case we should iterate through discriminators and create null-map for our variant if we don't already have it. + std::optional null_map_from_discriminators; + if (!null_map) + { + null_map_from_discriminators = NullMap(); + null_map_from_discriminators->reserve(local_discriminators->size()); + const auto & local_discriminators_data = assert_cast(*local_discriminators).getData(); + for (auto local_discr : local_discriminators_data) + null_map_from_discriminators->push_back(local_discr != local_variant_discriminator); + } /// Now we can create new column from null-map and variant column using IColumn::expand. auto res_column = IColumn::mutate(prev); @@ -356,13 +363,21 @@ ColumnPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB: if (make_nullable && prev->lowCardinality()) res_column = assert_cast(*res_column).cloneNullable(); - res_column->expand(null_map, /*inverted = */ true); + if (null_map_from_discriminators) + res_column->expand(*null_map_from_discriminators, /*inverted = */ true); + else + res_column->expand(assert_cast(*null_map).getData(), /*inverted = */ true); if (make_nullable && prev->canBeInsideNullable()) { - auto null_map_col = ColumnUInt8::create(); - null_map_col->getData() = std::move(null_map); - return ColumnNullable::create(std::move(res_column), std::move(null_map_col)); + if (null_map_from_discriminators) + { + auto null_map_col = ColumnUInt8::create(); + null_map_col->getData() = std::move(*null_map_from_discriminators); + return ColumnNullable::create(std::move(res_column), std::move(null_map_col)); + } + + return ColumnNullable::create(std::move(res_column), null_map->assumeMutable()); } return res_column; diff --git a/src/DataTypes/Serializations/SerializationVariantElement.h b/src/DataTypes/Serializations/SerializationVariantElement.h index 69101aea0f5..64f86eb2190 100644 --- a/src/DataTypes/Serializations/SerializationVariantElement.h +++ b/src/DataTypes/Serializations/SerializationVariantElement.h @@ -63,18 +63,22 @@ public: struct VariantSubcolumnCreator : public ISubcolumnCreator { + private: const ColumnPtr local_discriminators; + const ColumnPtr null_map; /// optional const String variant_element_name; const ColumnVariant::Discriminator global_variant_discriminator; const ColumnVariant::Discriminator local_variant_discriminator; bool make_nullable; + public: VariantSubcolumnCreator( const ColumnPtr & local_discriminators_, const String & variant_element_name_, ColumnVariant::Discriminator global_variant_discriminator_, ColumnVariant::Discriminator local_variant_discriminator_, - bool make_nullable_); + bool make_nullable_, + const ColumnPtr & null_map_ = nullptr); DataTypePtr create(const DataTypePtr & prev) const override; ColumnPtr create(const ColumnPtr & prev) const override; diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 242d2dc9f80..86fde3852b8 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1362,13 +1362,14 @@ public: } auto & variant_column = column_dynamic.getVariantColumn(); - auto variant_info = column_dynamic.getVariantInfo(); + const auto & variant_info = column_dynamic.getVariantInfo(); /// Second, infer ClickHouse type for this element and add it as a new variant. auto element_type = elementToDataType(element, format_settings); - if (column_dynamic.addNewVariant(element_type)) + auto element_type_name = element_type->getName(); + if (column_dynamic.addNewVariant(element_type, element_type_name)) { auto node = buildJSONExtractTree(element_type, "Dynamic inference"); - auto global_discriminator = variant_info.variant_name_to_discriminator[element_type->getName()]; + auto global_discriminator = variant_info.variant_name_to_discriminator.at(element_type_name); auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discriminator); if (!node->insertResultToColumn(variant, element, insert_settings, format_settings, error)) return false; @@ -1377,29 +1378,15 @@ public: return true; } - /// We couldn't add new variant. Try to insert element into current variants. - auto variant_node = buildJSONExtractTree(variant_info.variant_type, "Dynamic inference"); - if (variant_node->insertResultToColumn(variant_column, element, insert_settings, format_settings, error)) - return true; - - /// We couldn't insert element into any existing variant, add String variant and read value as String. - column_dynamic.addStringVariant(); - auto string_global_discriminator = variant_info.variant_name_to_discriminator["String"]; - auto & string_column = variant_column.getVariantByGlobalDiscriminator(string_global_discriminator); - if (!getStringNode()->insertResultToColumn(string_column, element, insert_settings, format_settings, error)) + /// We couldn't add this variant, insert it into shared variant. + auto tmp_variant_column = element_type->createColumn(); + auto node = buildJSONExtractTree(element_type, "Dynamic inference"); + if (!node->insertResultToColumn(*tmp_variant_column, element, insert_settings, format_settings, error)) return false; - variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(string_global_discriminator)); - variant_column.getOffsets().push_back(string_column.size() - 1); + column_dynamic.insertValueIntoSharedVariant(*tmp_variant_column, element_type, element_type_name, 0); return true; } - static const std::unique_ptr> & getStringNode() - { - static const std::unique_ptr> string_node - = buildJSONExtractTree(std::make_shared(), "Dynamic inference"); - return string_node; - } - static DataTypePtr elementToDataType(const typename JSONParser::Element & element, const FormatSettings & format_settings) { JSONInferenceInfo json_inference_info; diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 675283d011e..21b98cf505c 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -4287,13 +4288,98 @@ private: WrapperType createDynamicToColumnWrapper(const DataTypePtr &) const { return [this] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr + (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr { + /// When casting Dynamic to regular column we should cast all variants from current Dynamic column + /// and construct the result based on discriminators. const auto & column_dynamic = assert_cast(*arguments.front().column.get()); + const auto & variant_column = column_dynamic.getVariantColumn(); const auto & variant_info = column_dynamic.getVariantInfo(); - auto variant_wrapper = createVariantToColumnWrapper(assert_cast(*variant_info.variant_type), result_type); - ColumnsWithTypeAndName args = {ColumnWithTypeAndName(column_dynamic.getVariantColumnPtr(), variant_info.variant_type, "")}; - return variant_wrapper(args, result_type, col_nullable, input_rows_count); + + /// First, cast usual variants to result type. + const auto & variant_types = assert_cast(*variant_info.variant_type).getVariants(); + std::vector casted_variant_columns; + casted_variant_columns.reserve(variant_types.size()); + for (size_t i = 0; i != variant_types.size(); ++i) + { + const auto & variant_col = variant_column.getVariantPtrByGlobalDiscriminator(i); + ColumnsWithTypeAndName variant = {{variant_col, variant_types[i], ""}}; + auto variant_wrapper = prepareUnpackDictionaries(variant_types[i], result_type); + casted_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_col->size())); + } + + /// Second, collect all variants stored in shared variant and cast them to result type. + std::vector variant_columns_from_shared_variant; + DataTypes variant_types_from_shared_variant; + /// We will need to know what variant to use when we see discriminator of a shared variant. + /// To do it, we remember what variant was extracted from each row and what was it's offset. + PaddedPODArray shared_variant_indexes; + PaddedPODArray shared_variant_offsets; + std::unordered_map shared_variant_to_index; + const auto & shared_variant = column_dynamic.getSharedVariant(); + const auto shared_variant_discr = column_dynamic.getSharedVariantDiscriminator(); + const auto & local_discriminators = variant_column.getLocalDiscriminators(); + const auto & offsets = variant_column.getOffsets(); + if (!shared_variant.empty()) + { + shared_variant_indexes.reserve(input_rows_count); + shared_variant_offsets.reserve(input_rows_count); + FormatSettings format_settings; + const auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(shared_variant_discr); + for (size_t i = 0; i != input_rows_count; ++i) + { + if (local_discriminators[i] == shared_variant_local_discr) + { + auto value = shared_variant.getDataAt(offsets[i]); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + auto it = shared_variant_to_index.find(type_name); + /// Check if didn't created column for this variant yet. + if (it == shared_variant_to_index.end()) + { + it = shared_variant_to_index.emplace(type_name, variant_columns_from_shared_variant.size()).first; + variant_columns_from_shared_variant.push_back(type->createColumn()); + variant_types_from_shared_variant.push_back(type); + } + + shared_variant_indexes.push_back(it->second); + shared_variant_offsets.push_back(variant_columns_from_shared_variant[it->second]->size()); + type->getDefaultSerialization()->deserializeBinary(*variant_columns_from_shared_variant[it->second], buf, format_settings); + } + else + { + shared_variant_indexes.emplace_back(); + shared_variant_offsets.emplace_back(); + } + } + } + + /// Cast all extracted variants into result type. + std::vector casted_shared_variant_columns; + casted_shared_variant_columns.reserve(variant_types_from_shared_variant.size()); + for (size_t i = 0; i != variant_types_from_shared_variant.size(); ++i) + { + ColumnsWithTypeAndName variant = {{variant_columns_from_shared_variant[i]->getPtr(), variant_types_from_shared_variant[i], ""}}; + auto variant_wrapper = prepareUnpackDictionaries(variant_types_from_shared_variant[i], result_type); + casted_shared_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_columns_from_shared_variant[i]->size())); + } + + /// Construct result column from all casted variants. + auto res = result_type->createColumn(); + res->reserve(input_rows_count); + for (size_t i = 0; i != input_rows_count; ++i) + { + auto global_discr = variant_column.globalDiscriminatorByLocal(local_discriminators[i]); + if (global_discr == ColumnVariant::NULL_DISCRIMINATOR) + res->insertDefault(); + else if (global_discr == shared_variant_discr) + res->insertFrom(*casted_shared_variant_columns[shared_variant_indexes[i]], shared_variant_offsets[i]); + else + res->insertFrom(*casted_variant_columns[global_discr], offsets[i]); + } + + return res; }; } @@ -4320,200 +4406,51 @@ private: }; } - std::pair getReducedVariant( - const ColumnVariant & variant_column, - const DataTypePtr & variant_type, - const std::unordered_map & variant_name_to_discriminator, - size_t max_result_num_variants, - const ColumnDynamic::Statistics & statistics = {}) const + WrapperType createVariantToDynamicWrapper(const DataTypeVariant & from_variant_type, const DataTypeDynamic & dynamic_type) const { - const auto & variant_types = assert_cast(*variant_type).getVariants(); - /// First check if we don't exceed the limit in current Variant column. - if (variant_types.size() < max_result_num_variants || (variant_types.size() == max_result_num_variants && variant_name_to_discriminator.contains("String"))) - return {variant_column.getPtr(), variant_type}; - - /// We want to keep the most frequent variants and convert to string the rarest. - std::vector> variant_sizes; - variant_sizes.reserve(variant_types.size()); - std::optional old_string_discriminator; - /// List of variants that should be converted to a single String variant. - std::vector variants_to_convert_to_string; - for (size_t i = 0; i != variant_types.size(); ++i) + /// First create extended Variant with shared variant type and cast this Variant to it. + auto variants_for_dynamic = from_variant_type.getVariants(); + size_t number_of_variants = variants_for_dynamic.size(); + variants_for_dynamic.push_back(ColumnDynamic::getSharedVariantDataType()); + const auto & variant_type_for_dynamic = std::make_shared(variants_for_dynamic); + auto old_to_new_variant_wrapper = createVariantToVariantWrapper(from_variant_type, *variant_type_for_dynamic); + auto max_dynamic_types = dynamic_type.getMaxDynamicTypes(); + return [old_to_new_variant_wrapper, variant_type_for_dynamic, number_of_variants, max_dynamic_types] + (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr { - /// String variant won't be removed. - String variant_name = variant_types[i]->getName(); + auto variant_column_for_dynamic = old_to_new_variant_wrapper(arguments, result_type, col_nullable, input_rows_count); + /// If resulting Dynamic column can contain all variants from this Variant column, just create Dynamic column from it. + if (max_dynamic_types >= number_of_variants) + return ColumnDynamic::create(variant_column_for_dynamic, variant_type_for_dynamic, max_dynamic_types, max_dynamic_types); - if (variant_name == "String") - { - old_string_discriminator = i; - /// For simplicity, add this variant to the list that will be converted to string, - /// so we will process it with other variants when constructing the new String variant. - variants_to_convert_to_string.push_back(i); - } - else - { - size_t size = 0; - if (statistics.data.empty()) - size = variant_column.getVariantByGlobalDiscriminator(i).size(); - else - size = statistics.data.at(variant_name); - variant_sizes.emplace_back(size, i); - } - } - - /// Sort variants by sizes, so we will keep the most frequent. - std::sort(variant_sizes.begin(), variant_sizes.end(), std::greater()); - - DataTypes remaining_variants; - remaining_variants.reserve(max_result_num_variants); - /// Add String variant in advance. - remaining_variants.push_back(std::make_shared()); - for (auto [_, discr] : variant_sizes) - { - if (remaining_variants.size() != max_result_num_variants) - remaining_variants.push_back(variant_types[discr]); - else - variants_to_convert_to_string.push_back(discr); - } - - auto reduced_variant = std::make_shared(remaining_variants); - const auto & new_variants = reduced_variant->getVariants(); - /// To construct reduced variant column we will need mapping from old to new discriminators. - std::vector old_to_new_discriminators_mapping; - old_to_new_discriminators_mapping.resize(variant_types.size()); - ColumnVariant::Discriminator string_variant_discriminator = 0; - for (size_t i = 0; i != new_variants.size(); ++i) - { - String variant_name = new_variants[i]->getName(); - if (variant_name == "String") - { - string_variant_discriminator = i; - for (auto discr : variants_to_convert_to_string) - old_to_new_discriminators_mapping[discr] = i; - } - else - { - auto old_discr = variant_name_to_discriminator.at(variant_name); - old_to_new_discriminators_mapping[old_discr] = i; - } - } - - /// Convert all reduced variants to String. - std::unordered_map variants_converted_to_string; - variants_converted_to_string.reserve(variants_to_convert_to_string.size()); - size_t string_variant_size = 0; - for (auto discr : variants_to_convert_to_string) - { - auto string_type = std::make_shared(); - auto string_wrapper = prepareUnpackDictionaries(variant_types[discr], string_type); - auto column_to_convert = ColumnWithTypeAndName(variant_column.getVariantPtrByGlobalDiscriminator(discr), variant_types[discr], ""); - ColumnsWithTypeAndName args = {column_to_convert}; - auto variant_string_column = string_wrapper(args, string_type, nullptr, column_to_convert.column->size()); - string_variant_size += variant_string_column->size(); - variants_converted_to_string[discr] = variant_string_column; - } - - /// Create new discriminators and offsets and fill new String variant according to old discriminators. - auto string_variant = ColumnString::create(); - string_variant->reserve(string_variant_size); - auto new_discriminators_column = variant_column.getLocalDiscriminatorsPtr()->cloneEmpty(); - auto & new_discriminators_data = assert_cast(*new_discriminators_column).getData(); - new_discriminators_data.reserve(variant_column.size()); - auto new_offsets = variant_column.getOffsetsPtr()->cloneEmpty(); - auto & new_offsets_data = assert_cast(*new_offsets).getData(); - new_offsets_data.reserve(variant_column.size()); - const auto & old_local_discriminators = variant_column.getLocalDiscriminators(); - const auto & old_offsets = variant_column.getOffsets(); - for (size_t i = 0; i != old_local_discriminators.size(); ++i) - { - auto old_discr = variant_column.globalDiscriminatorByLocal(old_local_discriminators[i]); - - if (old_discr == ColumnVariant::NULL_DISCRIMINATOR) - { - new_discriminators_data.push_back(ColumnVariant::NULL_DISCRIMINATOR); - new_offsets_data.push_back(0); - continue; - } - - auto new_discr = old_to_new_discriminators_mapping[old_discr]; - new_discriminators_data.push_back(new_discr); - if (new_discr != string_variant_discriminator) - { - new_offsets_data.push_back(old_offsets[i]); - } - else - { - new_offsets_data.push_back(string_variant->size()); - string_variant->insertFrom(*variants_converted_to_string[old_discr], old_offsets[i]); - } - } - - /// Create new list of variant columns. - Columns new_variant_columns; - new_variant_columns.resize(new_variants.size()); - for (size_t i = 0; i != variant_types.size(); ++i) - { - auto new_discr = old_to_new_discriminators_mapping[i]; - if (new_discr != string_variant_discriminator) - new_variant_columns[new_discr] = variant_column.getVariantPtrByGlobalDiscriminator(i); - } - new_variant_columns[string_variant_discriminator] = std::move(string_variant); - return {ColumnVariant::create(std::move(new_discriminators_column), std::move(new_offsets), new_variant_columns), reduced_variant}; - } - - WrapperType createVariantToDynamicWrapper(const DataTypePtr & from_type, const DataTypeDynamic & dynamic_type) const - { - const auto & from_variant_type = assert_cast(*from_type); - size_t max_dynamic_types = dynamic_type.getMaxDynamicTypes(); - const auto & variants = from_variant_type.getVariants(); - std::unordered_map variant_name_to_discriminator; - variant_name_to_discriminator.reserve(variants.size()); - for (size_t i = 0; i != variants.size(); ++i) - variant_name_to_discriminator[variants[i]->getName()] = i; - - return [from_type, max_dynamic_types, variant_name_to_discriminator, this] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr - { - const auto & variant_column = assert_cast(*arguments.front().column); - auto [reduced_variant_column, reduced_variant_type] = getReducedVariant(variant_column, from_type, variant_name_to_discriminator, max_dynamic_types); - return ColumnDynamic::create(reduced_variant_column, reduced_variant_type, max_dynamic_types); + /// Otherwise some variants should go to the shared variant. Create temporary Dynamic column from this Variant and insert + /// all data to the resulting Dynamic column, this insertion will do all the logic with shared variant. + auto tmp_dynamic_column = ColumnDynamic::create(variant_column_for_dynamic, variant_type_for_dynamic, number_of_variants, number_of_variants); + auto result_dynamic_column = ColumnDynamic::create(max_dynamic_types); + result_dynamic_column->insertRangeFrom(*tmp_dynamic_column, 0, tmp_dynamic_column->size()); + return result_dynamic_column; }; } WrapperType createColumnToDynamicWrapper(const DataTypePtr & from_type, const DataTypeDynamic & dynamic_type) const { if (const auto * variant_type = typeid_cast(from_type.get())) - return createVariantToDynamicWrapper(from_type, dynamic_type); - - if (dynamic_type.getMaxDynamicTypes() == 1) - { - DataTypePtr string_type = std::make_shared(); - if (from_type->isNullable()) - string_type = makeNullable(string_type); - auto string_wrapper = prepareUnpackDictionaries(from_type, string_type); - auto variant_type = std::make_shared(DataTypes{removeNullable(string_type)}); - auto variant_wrapper = createColumnToVariantWrapper(string_type, *variant_type); - return [string_wrapper, variant_wrapper, string_type, variant_type, max_dynamic_types=dynamic_type.getMaxDynamicTypes()] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr - { - auto string_column = string_wrapper(arguments, string_type, col_nullable, input_rows_count); - auto column = ColumnWithTypeAndName(string_column, string_type, ""); - ColumnsWithTypeAndName args = {column}; - auto variant_column = variant_wrapper(args, variant_type, nullptr, string_column->size()); - return ColumnDynamic::create(variant_column, variant_type, max_dynamic_types); - }; - } + return createVariantToDynamicWrapper(*variant_type, dynamic_type); if (context && context->getSettingsRef().cast_string_to_dynamic_use_inference && isStringOrFixedString(removeNullable(removeLowCardinality(from_type)))) return createStringToDynamicThroughParsingWrapper(); + /// First, cast column to Variant with 2 variants - the type of the column we cast and shared variant type. auto variant_type = std::make_shared(DataTypes{removeNullableOrLowCardinalityNullable(from_type)}); - auto variant_wrapper = createColumnToVariantWrapper(from_type, *variant_type); - return [variant_wrapper, variant_type, max_dynamic_types=dynamic_type.getMaxDynamicTypes()] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr + auto column_to_variant_wrapper = createColumnToVariantWrapper(from_type, *variant_type); + /// Second, cast this Variant to Dynamic. + auto variant_to_dynamic_wrapper = createVariantToDynamicWrapper(*variant_type, dynamic_type); + return [column_to_variant_wrapper, variant_to_dynamic_wrapper, variant_type] + (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr { - auto variant_res = variant_wrapper(arguments, variant_type, col_nullable, input_rows_count); - return ColumnDynamic::create(variant_res, variant_type, max_dynamic_types); + auto variant_res = column_to_variant_wrapper(arguments, variant_type, col_nullable, input_rows_count); + ColumnsWithTypeAndName args = {{variant_res, variant_type, ""}}; + return variant_to_dynamic_wrapper(args, result_type, nullptr, input_rows_count); }; } @@ -4530,21 +4467,26 @@ private: (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr { const auto & column_dynamic = assert_cast(*arguments[0].column); - return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), to_max_types); + /// We should use the same limit as already used in column and change only global limit. + /// It's needed because shared variant should contain values only when limit is exceeded, + /// so if there are already some data, we cannot increase the limit. + return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), column_dynamic.getMaxDynamicTypes(), to_max_types); }; } - return [to_max_types, this] + return [to_max_types] (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr { const auto & column_dynamic = assert_cast(*arguments[0].column); - auto [reduced_variant_column, reduced_variant_type] = getReducedVariant( - column_dynamic.getVariantColumn(), - column_dynamic.getVariantInfo().variant_type, - column_dynamic.getVariantInfo().variant_name_to_discriminator, - to_max_types, - column_dynamic.getStatistics()); - return ColumnDynamic::create(reduced_variant_column, reduced_variant_type, to_max_types); + /// If real limit in the column is not greater than desired, just use the same variant column. + if (column_dynamic.getMaxDynamicTypes() <= to_max_types) + return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), column_dynamic.getMaxDynamicTypes(), to_max_types); + + /// Otherwise some variants should go to the shared variant. In this case we can just insert all + /// the data into resulting column and it will do all the logic with shared variant. + auto result_dynamic_column = ColumnDynamic::create(to_max_types); + result_dynamic_column->insertRangeFrom(column_dynamic, 0, column_dynamic.size()); + return result_dynamic_column; }; } diff --git a/src/Functions/dynamicType.cpp b/src/Functions/dynamicType.cpp index e8ca73597d6..327cdfe1616 100644 --- a/src/Functions/dynamicType.cpp +++ b/src/Functions/dynamicType.cpp @@ -2,10 +2,14 @@ #include #include #include +#include +#include #include #include #include #include +#include +#include #include @@ -65,11 +69,15 @@ public: const auto & variant_column = dynamic_column->getVariantColumn(); auto res = result_type->createColumn(); String element_type; + auto shared_variant_discr = dynamic_column->getSharedVariantDiscriminator(); + const auto & shared_variant = dynamic_column->getSharedVariant(); for (size_t i = 0; i != input_rows_count; ++i) { auto global_discr = variant_column.globalDiscriminatorAt(i); if (global_discr == ColumnVariant::NULL_DISCRIMINATOR) element_type = name_for_null; + else if (global_discr == shared_variant_discr) + element_type = getTypeNameFromSharedVariantValue(shared_variant.getDataAt(variant_column.offsetAt(i))); else element_type = variant_info.variant_names[global_discr]; @@ -78,6 +86,63 @@ public: return res; } + + String getTypeNameFromSharedVariantValue(StringRef value) const + { + ReadBufferFromMemory buf(value.data, value.size); + return decodeDataType(buf)->getName(); + } +}; + +class FunctionIsDynamicElementInSharedData : public IFunction +{ +public: + static constexpr auto name = "isDynamicElementInSharedData"; + + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + String getName() const override { return name; } + size_t getNumberOfArguments() const override { return 1; } + bool useDefaultImplementationForConstants() const override { return true; } + bool useDefaultImplementationForNulls() const override { return false; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + if (arguments.empty() || arguments.size() > 1) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 1", + getName(), arguments.empty()); + + if (!isDynamic(arguments[0].type.get())) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "First argument for function {} must be Dynamic, got {} instead", + getName(), arguments[0].type->getName()); + + return DataTypeFactory::instance().get("Bool"); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + { + const ColumnDynamic * dynamic_column = checkAndGetColumn(arguments[0].column.get()); + if (!dynamic_column) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "First argument for function {} must be Dynamic, got {} instead", + getName(), arguments[0].type->getName()); + + const auto & variant_column = dynamic_column->getVariantColumn(); + const auto & local_discriminators = variant_column.getLocalDiscriminators(); + auto res = result_type->createColumn(); + auto & res_data = assert_cast(*res).getData(); + res_data.reserve(dynamic_column->size()); + auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(dynamic_column->getSharedVariantDiscriminator()); + for (size_t i = 0; i != input_rows_count; ++i) + res_data.push_back(local_discriminators[i] == shared_variant_local_discr); + + return res; + } }; } @@ -88,7 +153,7 @@ REGISTER_FUNCTION(DynamicType) .description = R"( Returns the variant type name for each row of `Dynamic` column. If row contains NULL, it returns 'None' for it. )", - .syntax = {"dynamicType(variant)"}, + .syntax = {"dynamicType(dynamic)"}, .arguments = {{"dynamic", "Dynamic column"}}, .examples = {{{ "Example", @@ -104,6 +169,30 @@ SELECT d, dynamicType(d) FROM test; │ Hello, World! │ String │ │ [1,2,3] │ Array(Int64) │ └───────────────┴────────────────┘ +)"}}}, + .categories{"Variant"}, + }); + + factory.registerFunction(FunctionDocumentation{ + .description = R"( +Returns true for rows in Dynamic column that are not separated into subcolumns and stored inside shared variant in binary form. +)", + .syntax = {"isDynamicElementInSharedData(dynamic)"}, + .arguments = {{"dynamic", "Dynamic column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (d Dynamic(max_types=2)) ENGINE = Memory; +INSERT INTO test VALUES (NULL), (42), ('Hello, World!'), ([1, 2, 3]); +SELECT d, isDynamicElementInSharedData(d) FROM test; +)", + R"( +┌─d─────────────┬─isDynamicElementInSharedData(d)─┠+│ á´ºáµá´¸á´¸ │ false │ +│ 42 │ false │ +│ Hello, World! │ true │ +│ [1,2,3] │ true │ +└───────────────┴────────────────────┘ )"}}}, .categories{"Variant"}, }); diff --git a/tests/queries/0_stateless/00000_test.sql b/tests/queries/0_stateless/00000_test.sql new file mode 100644 index 00000000000..db9dd774484 --- /dev/null +++ b/tests/queries/0_stateless/00000_test.sql @@ -0,0 +1,43 @@ +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; + + +system stop merges test; +insert into test select number, number from numbers(10); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(10); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(5); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=3)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(10); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(5); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(20); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=3)) from numbers(4); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; + +drop table test; + diff --git a/tests/queries/0_stateless/03033_dynamic_text_serialization.reference b/tests/queries/0_stateless/03033_dynamic_text_serialization.reference index d965245266c..9fc356cc5e6 100644 --- a/tests/queries/0_stateless/03033_dynamic_text_serialization.reference +++ b/tests/queries/0_stateless/03033_dynamic_text_serialization.reference @@ -11,11 +11,11 @@ JSON {"d":["1","str",["1","2","3"]],"dynamicType(d)":"Tuple(Int64, String, Array(Int64))"} {"d":null,"dynamicType(d)":"None"} {"d":true,"dynamicType(d)":"Bool"} -{"d":"42","dynamicType(d)":"Int64"} -{"d":"42.42","dynamicType(d)":"String"} -{"d":"str","dynamicType(d)":"String"} -{"d":null,"dynamicType(d)":"None"} -{"d":"1","dynamicType(d)":"Int64"} +{"d":"42","dynamicType(d)":"Int64","isDynamicElementInSharedData(d)":false} +{"d":42.42,"dynamicType(d)":"Float64","isDynamicElementInSharedData(d)":false} +{"d":"str","dynamicType(d)":"String","isDynamicElementInSharedData(d)":true} +{"d":null,"dynamicType(d)":"None","isDynamicElementInSharedData(d)":false} +{"d":true,"dynamicType(d)":"Bool","isDynamicElementInSharedData(d)":true} CSV 42,"Int64" 42.42,"Float64" @@ -44,12 +44,12 @@ Cast using parsing [1,2,3] Array(Int64) 2020-01-01 Date 2020-01-01 10:00:00.000000000 DateTime64(9) -\N None +NULL String true Bool -42 Int64 -42.42 Float64 -[1, 2, 3] String -2020-01-01 String -2020-01-01 10:00:00 String -\N None -true String +42 Int64 false +42.42 Float64 false +[1,2,3] Array(Int64) false +2020-01-01 Date true +2020-01-01 10:00:00.000000000 DateTime64(9) true +NULL String true +true Bool true diff --git a/tests/queries/0_stateless/03033_dynamic_text_serialization.sql b/tests/queries/0_stateless/03033_dynamic_text_serialization.sql index d12d110fe28..45539cb13eb 100644 --- a/tests/queries/0_stateless/03033_dynamic_text_serialization.sql +++ b/tests/queries/0_stateless/03033_dynamic_text_serialization.sql @@ -16,7 +16,7 @@ select d, dynamicType(d) from format(JSONEachRow, 'd Dynamic', $$ {"d" : true} $$) format JSONEachRow; -select d, dynamicType(d) from format(JSONEachRow, 'd Dynamic(max_types=2)', $$ +select d, dynamicType(d), isDynamicElementInSharedData(d) from format(JSONEachRow, 'd Dynamic(max_types=2)', $$ {"d" : 42} {"d" : 42.42} {"d" : "str"} @@ -69,6 +69,6 @@ create table test (s String) engine=Memory; insert into test values ('42'), ('42.42'), ('[1, 2, 3]'), ('2020-01-01'), ('2020-01-01 10:00:00'), ('NULL'), ('true'); set cast_string_to_dynamic_use_inference=1; select s::Dynamic as d, dynamicType(d) from test; -select s::Dynamic(max_types=3) as d, dynamicType(d) from test; +select s::Dynamic(max_types=3) as d, dynamicType(d), isDynamicElementInSharedData(d) from test; drop table test; diff --git a/tests/queries/0_stateless/03034_dynamic_conversions.reference b/tests/queries/0_stateless/03034_dynamic_conversions.reference index 45f94f7ecc4..e22b64701a3 100644 --- a/tests/queries/0_stateless/03034_dynamic_conversions.reference +++ b/tests/queries/0_stateless/03034_dynamic_conversions.reference @@ -1,9 +1,9 @@ 0 UInt64 1 UInt64 2 UInt64 -0 String -1 String -2 String +0 UInt64 +1 UInt64 +2 UInt64 0 1 2 @@ -25,15 +25,15 @@ str_1 String \N None 4 UInt64 str_5 String -0 String +0 UInt64 str_1 String -[0,1] String +[0,1] Array(UInt64) \N None -4 String +4 UInt64 str_5 String 0 UInt64 str_1 String -[0,1] String +[0,1] Array(UInt64) \N None 4 UInt64 str_5 String @@ -51,13 +51,13 @@ str_5 String 2 0 UInt64 str_1 String -[0,1] String +[0,1] Array(UInt64) \N None 4 UInt64 str_5 String 0 UInt64 1970-01-02 Date -[0,1] String +[0,1] Array(UInt64) \N None 4 UInt64 1970-01-06 Date diff --git a/tests/queries/0_stateless/03034_dynamic_conversions.sql b/tests/queries/0_stateless/03034_dynamic_conversions.sql index ed75fbf2377..c0b470f29c5 100644 --- a/tests/queries/0_stateless/03034_dynamic_conversions.sql +++ b/tests/queries/0_stateless/03034_dynamic_conversions.sql @@ -3,7 +3,7 @@ set allow_experimental_variant_type=1; set use_variant_as_common_type=1; select number::Dynamic as d, dynamicType(d) from numbers(3); -select number::Dynamic(max_types=1) as d, dynamicType(d) from numbers(3); +select number::Dynamic(max_types=0) as d, dynamicType(d) from numbers(3); select number::Dynamic::UInt64 as v from numbers(3); select number::Dynamic::String as v from numbers(3); select number::Dynamic::Date as v from numbers(3); @@ -12,13 +12,13 @@ select number::Dynamic::Variant(UInt64, String) as v, variantType(v) from number select (number % 2 ? NULL : number)::Dynamic as d, dynamicType(d) from numbers(3); select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic as d, dynamicType(d) from numbers(6); +select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=0) as d, dynamicType(d) from numbers(6); select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=1) as d, dynamicType(d) from numbers(6); select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=2) as d, dynamicType(d) from numbers(6); -select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=3) as d, dynamicType(d) from numbers(6); select number::Dynamic(max_types=2)::Dynamic(max_types=3) as d from numbers(3); select number::Dynamic(max_types=2)::Dynamic(max_types=1) as d from numbers(3); -select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=3)::Dynamic(max_types=2) as d, dynamicType(d) from numbers(6); +select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=2)::Dynamic(max_types=1) as d, dynamicType(d) from numbers(6); select multiIf(number % 4 == 0, number, number % 4 == 1, toDate(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=4)::Dynamic(max_types=3) as d, dynamicType(d) from numbers(6); diff --git a/tests/queries/0_stateless/03035_dynamic_sorting.reference b/tests/queries/0_stateless/03035_dynamic_sorting.reference index 9b8df11c7a9..f253c34ce8a 100644 --- a/tests/queries/0_stateless/03035_dynamic_sorting.reference +++ b/tests/queries/0_stateless/03035_dynamic_sorting.reference @@ -1,299 +1,442 @@ order by d1 nulls first -\N None -\N None -\N None -\N None -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,4] Array(Int64) -42 Int64 -42 Int64 -42 Int64 -42 Int64 -42 Int64 -43 Int64 -abc String -abc String -abc String -abc String -abc String -abd String +\N None false +\N None false +\N None false +\N None false +\N None false +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,4] Array(Int64) true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-02 Date true +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +43 Int64 false +abc String false +abc String false +abc String false +abc String false +abc String false +abc String false +abd String false order by d1 nulls last -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,4] Array(Int64) -42 Int64 -42 Int64 -42 Int64 -42 Int64 -42 Int64 -43 Int64 -abc String -abc String -abc String -abc String -abc String -abd String -\N None -\N None -\N None -\N None +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,4] Array(Int64) true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-02 Date true +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +43 Int64 false +abc String false +abc String false +abc String false +abc String false +abc String false +abc String false +abd String false +\N None false +\N None false +\N None false +\N None false +\N None false order by d2 nulls first -\N None -\N None -\N None -\N None -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,4] Array(Int64) -42 Int64 -42 Int64 -42 Int64 -42 Int64 -42 Int64 -43 Int64 -abc String -abc String -abc String -abc String -abc String -abd String +\N None false +\N None false +\N None false +\N None false +\N None false +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,4] Array(Int64) true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-02 Date true +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +43 Int64 false +abc String false +abc String false +abc String false +abc String false +abc String false +abc String false +abd String false order by d2 nulls last -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,3] Array(Int64) -[1,2,4] Array(Int64) -42 Int64 -42 Int64 -42 Int64 -42 Int64 -42 Int64 -43 Int64 -abc String -abc String -abc String -abc String -abc String -abd String -\N None -\N None -\N None -\N None +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,3] Array(Int64) true +[1,2,4] Array(Int64) true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-01 Date true +2020-01-02 Date true +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +42 Int64 false +43 Int64 false +abc String false +abc String false +abc String false +abc String false +abc String false +abc String false +abd String false +\N None false +\N None false +\N None false +\N None false +\N None false order by d1, d2 nulls first -[1,2,3] \N Array(Int64) None -[1,2,3] [1,2,3] Array(Int64) Array(Int64) -[1,2,3] [1,2,4] Array(Int64) Array(Int64) -[1,2,3] 42 Array(Int64) Int64 -[1,2,3] abc Array(Int64) String -[1,2,4] [1,2,3] Array(Int64) Array(Int64) -42 \N Int64 None -42 [1,2,3] Int64 Array(Int64) -42 42 Int64 Int64 -42 43 Int64 Int64 -42 abc Int64 String -43 42 Int64 Int64 -abc \N String None -abc [1,2,3] String Array(Int64) -abc 42 String Int64 -abc abc String String -abc abd String String -abd abc String String -\N \N None None -\N [1,2,3] None Array(Int64) -\N 42 None Int64 -\N abc None String +[1,2,3] \N Array(Int64) true None false +[1,2,3] [1,2,3] Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 Array(Int64) true Date true +[1,2,3] 42 Array(Int64) true Int64 false +[1,2,3] abc Array(Int64) true String false +[1,2,4] [1,2,3] Array(Int64) true Array(Int64) true +2020-01-01 \N Date true None false +2020-01-01 [1,2,3] Date true Array(Int64) true +2020-01-01 2020-01-01 Date true Date true +2020-01-01 2020-01-02 Date true Date true +2020-01-01 42 Date true Int64 false +2020-01-01 abc Date true String false +2020-01-02 2020-01-01 Date true Date true +42 \N Int64 false None false +42 [1,2,3] Int64 false Array(Int64) true +42 2020-01-01 Int64 false Date true +42 42 Int64 false Int64 false +42 43 Int64 false Int64 false +42 abc Int64 false String false +43 42 Int64 false Int64 false +abc \N String false None false +abc [1,2,3] String false Array(Int64) true +abc 2020-01-01 String false Date true +abc 42 String false Int64 false +abc abc String false String false +abc abd String false String false +abd abc String false String false +\N \N None false None false +\N [1,2,3] None false Array(Int64) true +\N 2020-01-01 None false Date true +\N 42 None false Int64 false +\N abc None false String false order by d1, d2 nulls last -[1,2,3] [1,2,3] Array(Int64) Array(Int64) -[1,2,3] [1,2,4] Array(Int64) Array(Int64) -[1,2,3] 42 Array(Int64) Int64 -[1,2,3] abc Array(Int64) String -[1,2,3] \N Array(Int64) None -[1,2,4] [1,2,3] Array(Int64) Array(Int64) -42 [1,2,3] Int64 Array(Int64) -42 42 Int64 Int64 -42 43 Int64 Int64 -42 abc Int64 String -42 \N Int64 None -43 42 Int64 Int64 -abc [1,2,3] String Array(Int64) -abc 42 String Int64 -abc abc String String -abc abd String String -abc \N String None -abd abc String String -\N [1,2,3] None Array(Int64) -\N 42 None Int64 -\N abc None String -\N \N None None +[1,2,3] [1,2,3] Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 Array(Int64) true Date true +[1,2,3] 42 Array(Int64) true Int64 false +[1,2,3] abc Array(Int64) true String false +[1,2,3] \N Array(Int64) true None false +[1,2,4] [1,2,3] Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] Date true Array(Int64) true +2020-01-01 2020-01-01 Date true Date true +2020-01-01 2020-01-02 Date true Date true +2020-01-01 42 Date true Int64 false +2020-01-01 abc Date true String false +2020-01-01 \N Date true None false +2020-01-02 2020-01-01 Date true Date true +42 [1,2,3] Int64 false Array(Int64) true +42 2020-01-01 Int64 false Date true +42 42 Int64 false Int64 false +42 43 Int64 false Int64 false +42 abc Int64 false String false +42 \N Int64 false None false +43 42 Int64 false Int64 false +abc [1,2,3] String false Array(Int64) true +abc 2020-01-01 String false Date true +abc 42 String false Int64 false +abc abc String false String false +abc abd String false String false +abc \N String false None false +abd abc String false String false +\N [1,2,3] None false Array(Int64) true +\N 2020-01-01 None false Date true +\N 42 None false Int64 false +\N abc None false String false +\N \N None false None false order by d2, d1 nulls first -\N [1,2,3] None Array(Int64) -[1,2,3] [1,2,3] Array(Int64) Array(Int64) -[1,2,4] [1,2,3] Array(Int64) Array(Int64) -42 [1,2,3] Int64 Array(Int64) -abc [1,2,3] String Array(Int64) -[1,2,3] [1,2,4] Array(Int64) Array(Int64) -\N 42 None Int64 -[1,2,3] 42 Array(Int64) Int64 -42 42 Int64 Int64 -43 42 Int64 Int64 -abc 42 String Int64 -42 43 Int64 Int64 -\N abc None String -[1,2,3] abc Array(Int64) String -42 abc Int64 String -abc abc String String -abd abc String String -abc abd String String -\N \N None None -[1,2,3] \N Array(Int64) None -42 \N Int64 None -abc \N String None +\N [1,2,3] None false Array(Int64) true +[1,2,3] [1,2,3] Array(Int64) true Array(Int64) true +[1,2,4] [1,2,3] Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] Date true Array(Int64) true +42 [1,2,3] Int64 false Array(Int64) true +abc [1,2,3] String false Array(Int64) true +[1,2,3] [1,2,4] Array(Int64) true Array(Int64) true +\N 2020-01-01 None false Date true +[1,2,3] 2020-01-01 Array(Int64) true Date true +2020-01-01 2020-01-01 Date true Date true +2020-01-02 2020-01-01 Date true Date true +42 2020-01-01 Int64 false Date true +abc 2020-01-01 String false Date true +2020-01-01 2020-01-02 Date true Date true +\N 42 None false Int64 false +[1,2,3] 42 Array(Int64) true Int64 false +2020-01-01 42 Date true Int64 false +42 42 Int64 false Int64 false +43 42 Int64 false Int64 false +abc 42 String false Int64 false +42 43 Int64 false Int64 false +\N abc None false String false +[1,2,3] abc Array(Int64) true String false +2020-01-01 abc Date true String false +42 abc Int64 false String false +abc abc String false String false +abd abc String false String false +abc abd String false String false +\N \N None false None false +[1,2,3] \N Array(Int64) true None false +2020-01-01 \N Date true None false +42 \N Int64 false None false +abc \N String false None false order by d2, d1 nulls last -[1,2,3] [1,2,3] Array(Int64) Array(Int64) -[1,2,4] [1,2,3] Array(Int64) Array(Int64) -42 [1,2,3] Int64 Array(Int64) -abc [1,2,3] String Array(Int64) -\N [1,2,3] None Array(Int64) -[1,2,3] [1,2,4] Array(Int64) Array(Int64) -[1,2,3] 42 Array(Int64) Int64 -42 42 Int64 Int64 -43 42 Int64 Int64 -abc 42 String Int64 -\N 42 None Int64 -42 43 Int64 Int64 -[1,2,3] abc Array(Int64) String -42 abc Int64 String -abc abc String String -abd abc String String -\N abc None String -abc abd String String -[1,2,3] \N Array(Int64) None -42 \N Int64 None -abc \N String None -\N \N None None +[1,2,3] [1,2,3] Array(Int64) true Array(Int64) true +[1,2,4] [1,2,3] Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] Date true Array(Int64) true +42 [1,2,3] Int64 false Array(Int64) true +abc [1,2,3] String false Array(Int64) true +\N [1,2,3] None false Array(Int64) true +[1,2,3] [1,2,4] Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 Array(Int64) true Date true +2020-01-01 2020-01-01 Date true Date true +2020-01-02 2020-01-01 Date true Date true +42 2020-01-01 Int64 false Date true +abc 2020-01-01 String false Date true +\N 2020-01-01 None false Date true +2020-01-01 2020-01-02 Date true Date true +[1,2,3] 42 Array(Int64) true Int64 false +2020-01-01 42 Date true Int64 false +42 42 Int64 false Int64 false +43 42 Int64 false Int64 false +abc 42 String false Int64 false +\N 42 None false Int64 false +42 43 Int64 false Int64 false +[1,2,3] abc Array(Int64) true String false +2020-01-01 abc Date true String false +42 abc Int64 false String false +abc abc String false String false +abd abc String false String false +\N abc None false String false +abc abd String false String false +[1,2,3] \N Array(Int64) true None false +2020-01-01 \N Date true None false +42 \N Int64 false None false +abc \N String false None false +\N \N None false None false d1 = d2 -[1,2,3] [1,2,3] 1 Array(Int64) Array(Int64) -[1,2,3] [1,2,4] 0 Array(Int64) Array(Int64) -[1,2,3] 42 0 Array(Int64) Int64 -[1,2,3] abc 0 Array(Int64) String -[1,2,3] \N 0 Array(Int64) None -[1,2,4] [1,2,3] 0 Array(Int64) Array(Int64) -42 [1,2,3] 0 Int64 Array(Int64) -42 42 1 Int64 Int64 -42 43 0 Int64 Int64 -42 abc 0 Int64 String -42 \N 0 Int64 None -43 42 0 Int64 Int64 -abc [1,2,3] 0 String Array(Int64) -abc 42 0 String Int64 -abc abc 1 String String -abc abd 0 String String -abc \N 0 String None -abd abc 0 String String -\N [1,2,3] 0 None Array(Int64) -\N 42 0 None Int64 -\N abc 0 None String -\N \N 1 None None +[1,2,3] [1,2,3] 1 Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] 0 Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 0 Array(Int64) true Date true +[1,2,3] 42 0 Array(Int64) true Int64 false +[1,2,3] abc 0 Array(Int64) true String false +[1,2,3] \N 0 Array(Int64) true None false +[1,2,4] [1,2,3] 0 Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] 0 Date true Array(Int64) true +2020-01-01 2020-01-01 1 Date true Date true +2020-01-01 2020-01-02 0 Date true Date true +2020-01-01 42 0 Date true Int64 false +2020-01-01 abc 0 Date true String false +2020-01-01 \N 0 Date true None false +2020-01-02 2020-01-01 0 Date true Date true +42 [1,2,3] 0 Int64 false Array(Int64) true +42 2020-01-01 0 Int64 false Date true +42 42 1 Int64 false Int64 false +42 43 0 Int64 false Int64 false +42 abc 0 Int64 false String false +42 \N 0 Int64 false None false +43 42 0 Int64 false Int64 false +abc [1,2,3] 0 String false Array(Int64) true +abc 2020-01-01 0 String false Date true +abc 42 0 String false Int64 false +abc abc 1 String false String false +abc abd 0 String false String false +abc \N 0 String false None false +abd abc 0 String false String false +\N [1,2,3] 0 None false Array(Int64) true +\N 2020-01-01 0 None false Date true +\N 42 0 None false Int64 false +\N abc 0 None false String false +\N \N 1 None false None false d1 < d2 -[1,2,3] [1,2,3] 0 Array(Int64) Array(Int64) -[1,2,3] [1,2,4] 1 Array(Int64) Array(Int64) -[1,2,3] 42 1 Array(Int64) Int64 -[1,2,3] abc 1 Array(Int64) String -[1,2,3] \N 1 Array(Int64) None -[1,2,4] [1,2,3] 0 Array(Int64) Array(Int64) -42 [1,2,3] 0 Int64 Array(Int64) -42 42 0 Int64 Int64 -42 43 1 Int64 Int64 -42 abc 1 Int64 String -42 \N 1 Int64 None -43 42 0 Int64 Int64 -abc [1,2,3] 0 String Array(Int64) -abc 42 0 String Int64 -abc abc 0 String String -abc abd 1 String String -abc \N 1 String None -abd abc 0 String String -\N [1,2,3] 0 None Array(Int64) -\N 42 0 None Int64 -\N abc 0 None String -\N \N 0 None None +[1,2,3] [1,2,3] 0 Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] 1 Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 1 Array(Int64) true Date true +[1,2,3] 42 1 Array(Int64) true Int64 false +[1,2,3] abc 1 Array(Int64) true String false +[1,2,3] \N 1 Array(Int64) true None false +[1,2,4] [1,2,3] 0 Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] 0 Date true Array(Int64) true +2020-01-01 2020-01-01 0 Date true Date true +2020-01-01 2020-01-02 1 Date true Date true +2020-01-01 42 1 Date true Int64 false +2020-01-01 abc 1 Date true String false +2020-01-01 \N 1 Date true None false +2020-01-02 2020-01-01 0 Date true Date true +42 [1,2,3] 0 Int64 false Array(Int64) true +42 2020-01-01 0 Int64 false Date true +42 42 0 Int64 false Int64 false +42 43 1 Int64 false Int64 false +42 abc 1 Int64 false String false +42 \N 1 Int64 false None false +43 42 0 Int64 false Int64 false +abc [1,2,3] 0 String false Array(Int64) true +abc 2020-01-01 0 String false Date true +abc 42 0 String false Int64 false +abc abc 0 String false String false +abc abd 1 String false String false +abc \N 1 String false None false +abd abc 0 String false String false +\N [1,2,3] 0 None false Array(Int64) true +\N 2020-01-01 0 None false Date true +\N 42 0 None false Int64 false +\N abc 0 None false String false +\N \N 0 None false None false d1 <= d2 -[1,2,3] [1,2,3] 1 Array(Int64) Array(Int64) -[1,2,3] [1,2,4] 1 Array(Int64) Array(Int64) -[1,2,3] 42 1 Array(Int64) Int64 -[1,2,3] abc 1 Array(Int64) String -[1,2,3] \N 1 Array(Int64) None -[1,2,4] [1,2,3] 0 Array(Int64) Array(Int64) -42 [1,2,3] 0 Int64 Array(Int64) -42 42 1 Int64 Int64 -42 43 1 Int64 Int64 -42 abc 1 Int64 String -42 \N 1 Int64 None -43 42 0 Int64 Int64 -abc [1,2,3] 0 String Array(Int64) -abc 42 0 String Int64 -abc abc 1 String String -abc abd 1 String String -abc \N 1 String None -abd abc 0 String String -\N [1,2,3] 0 None Array(Int64) -\N 42 0 None Int64 -\N abc 0 None String -\N \N 1 None None +[1,2,3] [1,2,3] 1 Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] 1 Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 1 Array(Int64) true Date true +[1,2,3] 42 1 Array(Int64) true Int64 false +[1,2,3] abc 1 Array(Int64) true String false +[1,2,3] \N 1 Array(Int64) true None false +[1,2,4] [1,2,3] 0 Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] 0 Date true Array(Int64) true +2020-01-01 2020-01-01 1 Date true Date true +2020-01-01 2020-01-02 1 Date true Date true +2020-01-01 42 1 Date true Int64 false +2020-01-01 abc 1 Date true String false +2020-01-01 \N 1 Date true None false +2020-01-02 2020-01-01 0 Date true Date true +42 [1,2,3] 0 Int64 false Array(Int64) true +42 2020-01-01 0 Int64 false Date true +42 42 1 Int64 false Int64 false +42 43 1 Int64 false Int64 false +42 abc 1 Int64 false String false +42 \N 1 Int64 false None false +43 42 0 Int64 false Int64 false +abc [1,2,3] 0 String false Array(Int64) true +abc 2020-01-01 0 String false Date true +abc 42 0 String false Int64 false +abc abc 1 String false String false +abc abd 1 String false String false +abc \N 1 String false None false +abd abc 0 String false String false +\N [1,2,3] 0 None false Array(Int64) true +\N 2020-01-01 0 None false Date true +\N 42 0 None false Int64 false +\N abc 0 None false String false +\N \N 1 None false None false d1 > d2 -[1,2,3] [1,2,3] 0 Array(Int64) Array(Int64) -[1,2,3] [1,2,4] 0 Array(Int64) Array(Int64) -[1,2,3] 42 0 Array(Int64) Int64 -[1,2,3] abc 0 Array(Int64) String -[1,2,3] \N 0 Array(Int64) None -[1,2,4] [1,2,3] 1 Array(Int64) Array(Int64) -42 [1,2,3] 1 Int64 Array(Int64) -42 42 0 Int64 Int64 -42 43 0 Int64 Int64 -42 abc 0 Int64 String -42 \N 0 Int64 None -43 42 1 Int64 Int64 -abc [1,2,3] 1 String Array(Int64) -abc 42 1 String Int64 -abc abc 0 String String -abc abd 0 String String -abc \N 0 String None -abd abc 1 String String -\N [1,2,3] 1 None Array(Int64) -\N 42 1 None Int64 -\N abc 1 None String -\N \N 0 None None +[1,2,3] [1,2,3] 0 Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] 0 Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 0 Array(Int64) true Date true +[1,2,3] 42 0 Array(Int64) true Int64 false +[1,2,3] abc 0 Array(Int64) true String false +[1,2,3] \N 0 Array(Int64) true None false +[1,2,4] [1,2,3] 1 Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] 1 Date true Array(Int64) true +2020-01-01 2020-01-01 0 Date true Date true +2020-01-01 2020-01-02 0 Date true Date true +2020-01-01 42 0 Date true Int64 false +2020-01-01 abc 0 Date true String false +2020-01-01 \N 0 Date true None false +2020-01-02 2020-01-01 1 Date true Date true +42 [1,2,3] 1 Int64 false Array(Int64) true +42 2020-01-01 1 Int64 false Date true +42 42 0 Int64 false Int64 false +42 43 0 Int64 false Int64 false +42 abc 0 Int64 false String false +42 \N 0 Int64 false None false +43 42 1 Int64 false Int64 false +abc [1,2,3] 1 String false Array(Int64) true +abc 2020-01-01 1 String false Date true +abc 42 1 String false Int64 false +abc abc 0 String false String false +abc abd 0 String false String false +abc \N 0 String false None false +abd abc 1 String false String false +\N [1,2,3] 1 None false Array(Int64) true +\N 2020-01-01 1 None false Date true +\N 42 1 None false Int64 false +\N abc 1 None false String false +\N \N 0 None false None false d1 >= d2 -[1,2,3] [1,2,3] 1 Array(Int64) Array(Int64) -[1,2,3] [1,2,4] 1 Array(Int64) Array(Int64) -[1,2,3] 42 1 Array(Int64) Int64 -[1,2,3] abc 1 Array(Int64) String -[1,2,3] \N 1 Array(Int64) None -[1,2,4] [1,2,3] 1 Array(Int64) Array(Int64) -42 [1,2,3] 1 Int64 Array(Int64) -42 42 1 Int64 Int64 -42 43 1 Int64 Int64 -42 abc 1 Int64 String -42 \N 1 Int64 None -43 42 1 Int64 Int64 -abc [1,2,3] 1 String Array(Int64) -abc 42 1 String Int64 -abc abc 1 String String -abc abd 1 String String -abc \N 1 String None -abd abc 1 String String -\N [1,2,3] 1 None Array(Int64) -\N 42 1 None Int64 -\N abc 1 None String -\N \N 1 None None +[1,2,3] [1,2,3] 1 Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] 1 Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 1 Array(Int64) true Date true +[1,2,3] 42 1 Array(Int64) true Int64 false +[1,2,3] abc 1 Array(Int64) true String false +[1,2,3] \N 1 Array(Int64) true None false +[1,2,4] [1,2,3] 1 Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] 1 Date true Array(Int64) true +2020-01-01 2020-01-01 1 Date true Date true +2020-01-01 2020-01-02 1 Date true Date true +2020-01-01 42 1 Date true Int64 false +2020-01-01 abc 1 Date true String false +2020-01-01 \N 1 Date true None false +2020-01-02 2020-01-01 1 Date true Date true +42 [1,2,3] 1 Int64 false Array(Int64) true +42 2020-01-01 1 Int64 false Date true +42 42 1 Int64 false Int64 false +42 43 1 Int64 false Int64 false +42 abc 1 Int64 false String false +42 \N 1 Int64 false None false +43 42 1 Int64 false Int64 false +abc [1,2,3] 1 String false Array(Int64) true +abc 2020-01-01 1 String false Date true +abc 42 1 String false Int64 false +abc abc 1 String false String false +abc abd 1 String false String false +abc \N 1 String false None false +abd abc 1 String false String false +\N [1,2,3] 1 None false Array(Int64) true +\N 2020-01-01 1 None false Date true +\N 42 1 None false Int64 false +\N abc 1 None false String false +\N \N 1 None false None false diff --git a/tests/queries/0_stateless/03035_dynamic_sorting.sql b/tests/queries/0_stateless/03035_dynamic_sorting.sql index 0487fafc955..e0039a348c6 100644 --- a/tests/queries/0_stateless/03035_dynamic_sorting.sql +++ b/tests/queries/0_stateless/03035_dynamic_sorting.sql @@ -1,80 +1,55 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; -create table test (d1 Dynamic, d2 Dynamic) engine=Memory; - -insert into test values (42, 42); -insert into test values (42, 43); -insert into test values (43, 42); - -insert into test values ('abc', 'abc'); -insert into test values ('abc', 'abd'); -insert into test values ('abd', 'abc'); - -insert into test values ([1,2,3], [1,2,3]); -insert into test values ([1,2,3], [1,2,4]); -insert into test values ([1,2,4], [1,2,3]); - -insert into test values (NULL, NULL); - -insert into test values (42, 'abc'); -insert into test values ('abc', 42); - -insert into test values (42, [1,2,3]); -insert into test values ([1,2,3], 42); - -insert into test values (42, NULL); -insert into test values (NULL, 42); - -insert into test values ('abc', [1,2,3]); -insert into test values ([1,2,3], 'abc'); - -insert into test values ('abc', NULL); -insert into test values (NULL, 'abc'); - -insert into test values ([1,2,3], NULL); -insert into test values (NULL, [1,2,3]); +create table test (d1 Dynamic(max_types=2), d2 Dynamic(max_types=2)) engine=Memory; +insert into test values (42, 42), (42, 43), (43, 42), ('abc', 'abc'), ('abc', 'abd'), ('abd', 'abc'), +([1,2,3], [1,2,3]), ([1,2,3], [1,2,4]), ([1,2,4], [1,2,3]), +('2020-01-01', '2020-01-01'), ('2020-01-01', '2020-01-02'), ('2020-01-02', '2020-01-01'), +(NULL, NULL), (42, 'abc'), ('abc', 42), (42, [1,2,3]), ([1,2,3], 42), (42, NULL), (NULL, 42), +('abc', [1,2,3]), ([1,2,3], 'abc'), ('abc', NULL), (NULL, 'abc'), ([1,2,3], NULL), (NULL, [1,2,3]), +(42, '2020-01-01'), ('2020-01-01', 42), ('2020-01-01', 'abc'), ('abc', '2020-01-01'), +('2020-01-01', [1,2,3]), ([1,2,3], '2020-01-01'), ('2020-01-01', NULL), (NULL, '2020-01-01'); select 'order by d1 nulls first'; -select d1, dynamicType(d1) from test order by d1 nulls first; +select d1, dynamicType(d1), isDynamicElementInSharedData(d1) from test order by d1 nulls first; select 'order by d1 nulls last'; -select d1, dynamicType(d1) from test order by d1 nulls last; +select d1, dynamicType(d1), isDynamicElementInSharedData(d1) from test order by d1 nulls last; select 'order by d2 nulls first'; -select d2, dynamicType(d2) from test order by d2 nulls first; +select d2, dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2 nulls first; select 'order by d2 nulls last'; -select d2, dynamicType(d2) from test order by d2 nulls last; +select d2, dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2 nulls last; select 'order by d1, d2 nulls first'; -select d1, d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2 nulls first; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2 nulls first; select 'order by d1, d2 nulls last'; -select d1, d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2 nulls last; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2 nulls last; select 'order by d2, d1 nulls first'; -select d1, d2, dynamicType(d1), dynamicType(d2) from test order by d2, d1 nulls first; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2, d1 nulls first; select 'order by d2, d1 nulls last'; -select d1, d2, dynamicType(d1), dynamicType(d2) from test order by d2, d1 nulls last; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2, d1 nulls last; select 'd1 = d2'; -select d1, d2, d1 = d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d1 = d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; select 'd1 < d2'; -select d1, d2, d1 < d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d1 < d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; select 'd1 <= d2'; -select d1, d2, d1 <= d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d1 <= d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; select 'd1 > d2'; -select d1, d2, d1 > d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d1 > d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; select 'd1 >= d2'; -select d1, d2, d2 >= d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d2 >= d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.reference b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.reference new file mode 100644 index 00000000000..ca6c5dbba82 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.reference @@ -0,0 +1,20 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +360000 +360000 +200000 +200000 +0 +0 +20000 +20000 +200000 +200000 +20000 +20000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.sql b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.sql new file mode 100644 index 00000000000..bff28fb5c90 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`LowCardinality(String)`, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`LowCardinality(String)`, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`LowCardinality(String)`, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.reference b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.reference new file mode 100644 index 00000000000..ca6c5dbba82 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.reference @@ -0,0 +1,20 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +360000 +360000 +200000 +200000 +0 +0 +20000 +20000 +200000 +200000 +20000 +20000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.sql b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.sql new file mode 100644 index 00000000000..4eed3d15529 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=Memory; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 new file mode 100644 index 00000000000..9c1f8fa45e8 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 @@ -0,0 +1,2460 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +36 +36 +20 +20 +0 +0 +2 +2 +20 +20 +2 +2 +20 +0 +0 0 \N [] +1 1 \N [] +2 2 \N [] +3 3 \N [] +4 4 \N [] +5 5 \N [] +6 6 \N [] +7 7 \N [] +8 8 \N [] +9 9 \N [] +str_10 \N str_10 [] +10 10 \N [] +10 10 \N [] +[[0,1]] \N \N [] +str_11 \N \N [] +str_11 \N str_11 [] +str_12 \N str_12 [] +12 12 \N [] +12 12 \N [] +str_13 \N str_13 [] +13 13 \N [] +13 13 \N [] +str_14 \N str_14 [] +14 14 \N [] +14 14 \N [] +str_15 \N str_15 [] +15 15 \N [] +15 15 \N [] +[[0,1,2,3,4,5,6]] \N \N [] +str_16 \N \N [] +str_16 \N str_16 [] +str_17 \N str_17 [] +17 17 \N [] +17 17 \N [] +str_18 \N str_18 [] +18 18 \N [] +18 18 \N [] +str_19 \N str_19 [] +19 19 \N [] +19 19 \N [] +[20] \N \N [20] +['str_21','str_21'] \N \N ['str_21','str_21'] +[22,22,22] \N \N [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +[40] \N \N [40] +41 41 \N [] +\N \N \N [] +str_43 \N str_43 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] +45 45 \N [] +\N \N \N [] +str_47 \N str_47 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 49 \N [] +\N \N \N [] +str_51 \N str_51 [] +[52,52,52] \N \N [52,52,52] +53 53 \N [] +\N \N \N [] +str_55 \N str_55 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] +57 57 \N [] +\N \N \N [] +str_59 \N str_59 [] +[60] \N \N [60] +61 61 \N [] +\N \N \N [] +str_63 \N str_63 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] +65 65 \N [] +\N \N \N [] +str_67 \N str_67 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] +69 69 \N [] +\N \N \N [] +str_71 \N str_71 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] +73 73 \N [] +\N \N \N [] +str_75 \N str_75 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] +77 77 \N [] +\N \N \N [] +str_79 \N str_79 [] +0 \N [] +1 \N [] +2 \N [] +3 \N [] +4 \N [] +5 \N [] +6 \N [] +7 \N [] +8 \N [] +9 \N [] +\N str_10 [] +10 \N [] +10 \N [] +\N \N [] +\N \N [] +\N str_11 [] +\N str_12 [] +12 \N [] +12 \N [] +\N str_13 [] +13 \N [] +13 \N [] +\N str_14 [] +14 \N [] +14 \N [] +\N str_15 [] +15 \N [] +15 \N [] +\N \N [] +\N \N [] +\N str_16 [] +\N str_17 [] +17 \N [] +17 \N [] +\N str_18 [] +18 \N [] +18 \N [] +\N str_19 [] +19 \N [] +19 \N [] +\N \N [20] +\N \N ['str_21','str_21'] +\N \N [22,22,22] +\N \N [23,23,23,23] +\N \N [24,24,24,24,24] +\N \N [25,25,25,25,25,25] +\N \N [26,26,26,26,26,26,26] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] +\N \N [29,29,29,29,29,29,29,29,29,29] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [40] +41 \N [] +\N \N [] +\N str_43 [] +\N \N [44,44,44,44,44] +45 \N [] +\N \N [] +\N str_47 [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] +\N \N [] +\N str_51 [] +\N \N [52,52,52] +53 \N [] +\N \N [] +\N str_55 [] +\N \N [56,56,56,56,56,56,56] +57 \N [] +\N \N [] +\N str_59 [] +\N \N [60] +61 \N [] +\N \N [] +\N str_63 [] +\N \N [64,64,64,64,64] +65 \N [] +\N \N [] +\N str_67 [] +\N \N [68,68,68,68,68,68,68,68,68] +69 \N [] +\N \N [] +\N str_71 [] +\N \N [NULL,NULL,NULL] +73 \N [] +\N \N [] +\N str_75 [] +\N \N [76,76,76,76,76,76,76] +77 \N [] +\N \N [] +\N str_79 [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +0 0 \N [] 0 [] +1 1 \N [] 0 [] +2 2 \N [] 0 [] +3 3 \N [] 0 [] +4 4 \N [] 0 [] +5 5 \N [] 0 [] +6 6 \N [] 0 [] +7 7 \N [] 0 [] +8 8 \N [] 0 [] +9 9 \N [] 0 [] +str_10 \N \N [] 0 [] +10 10 \N [] 0 [] +10 10 \N [] 0 [] +[[0,1]] \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_12 \N \N [] 0 [] +12 12 \N [] 0 [] +12 12 \N [] 0 [] +str_13 \N \N [] 0 [] +13 13 \N [] 0 [] +13 13 \N [] 0 [] +str_14 \N \N [] 0 [] +14 14 \N [] 0 [] +14 14 \N [] 0 [] +str_15 \N \N [] 0 [] +15 15 \N [] 0 [] +15 15 \N [] 0 [] +[[0,1,2,3,4,5,6]] \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_17 \N \N [] 0 [] +17 17 \N [] 0 [] +17 17 \N [] 0 [] +str_18 \N \N [] 0 [] +18 18 \N [] 0 [] +18 18 \N [] 0 [] +str_19 \N \N [] 0 [] +19 19 \N [] 0 [] +19 19 \N [] 0 [] +[20] \N \N [20] 1 [20] +['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] +[22,22,22] \N \N [22,22,22] 3 [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] 4 [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] 5 [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +[40] \N \N [40] 1 [40] +41 41 \N [] 0 [] +\N \N \N [] 0 [] +str_43 \N \N [] 0 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] 5 [44,44,44,44,44] +45 45 \N [] 0 [] +\N \N \N [] 0 [] +str_47 \N \N [] 0 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 49 \N [] 0 [] +\N \N \N [] 0 [] +str_51 \N \N [] 0 [] +[52,52,52] \N \N [52,52,52] 3 [52,52,52] +53 53 \N [] 0 [] +\N \N \N [] 0 [] +str_55 \N \N [] 0 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] +57 57 \N [] 0 [] +\N \N \N [] 0 [] +str_59 \N \N [] 0 [] +[60] \N \N [60] 1 [60] +61 61 \N [] 0 [] +\N \N \N [] 0 [] +str_63 \N \N [] 0 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] 5 [64,64,64,64,64] +65 65 \N [] 0 [] +\N \N \N [] 0 [] +str_67 \N \N [] 0 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] +69 69 \N [] 0 [] +\N \N \N [] 0 [] +str_71 \N \N [] 0 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] +73 73 \N [] 0 [] +\N \N \N [] 0 [] +str_75 \N \N [] 0 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] +77 77 \N [] 0 [] +\N \N \N [] 0 [] +str_79 \N \N [] 0 [] +0 \N [] 0 [] [] +1 \N [] 0 [] [] +2 \N [] 0 [] [] +3 \N [] 0 [] [] +4 \N [] 0 [] [] +5 \N [] 0 [] [] +6 \N [] 0 [] [] +7 \N [] 0 [] [] +8 \N [] 0 [] [] +9 \N [] 0 [] [] +\N \N [] 0 [] [] +10 \N [] 0 [] [] +10 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +12 \N [] 0 [] [] +12 \N [] 0 [] [] +\N \N [] 0 [] [] +13 \N [] 0 [] [] +13 \N [] 0 [] [] +\N \N [] 0 [] [] +14 \N [] 0 [] [] +14 \N [] 0 [] [] +\N \N [] 0 [] [] +15 \N [] 0 [] [] +15 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +17 \N [] 0 [] [] +17 \N [] 0 [] [] +\N \N [] 0 [] [] +18 \N [] 0 [] [] +18 \N [] 0 [] [] +\N \N [] 0 [] [] +19 \N [] 0 [] [] +19 \N [] 0 [] [] +\N \N [20] 1 [20] [NULL] +\N \N ['str_21','str_21'] 2 [NULL,NULL] ['str_21','str_21'] +\N \N [22,22,22] 3 [22,22,22] [NULL,NULL,NULL] +\N \N [23,23,23,23] 4 [23,23,23,23] [NULL,NULL,NULL,NULL] +\N \N [24,24,24,24,24] 5 [24,24,24,24,24] [NULL,NULL,NULL,NULL,NULL] +\N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] [NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [40] 1 [40] [NULL] +41 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [44,44,44,44,44] 5 [44,44,44,44,44] [NULL,NULL,NULL,NULL,NULL] +45 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [52,52,52] 3 [52,52,52] [NULL,NULL,NULL] +53 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +57 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [60] 1 [60] [NULL] +61 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [64,64,64,64,64] 5 [64,64,64,64,64] [NULL,NULL,NULL,NULL,NULL] +65 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +69 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] [NULL,NULL,NULL] +73 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +77 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [20] +[] 0 [NULL,NULL] +[] 0 [22,22,22] +[] 0 [23,23,23,23] +[] 0 [24,24,24,24,24] +[] 0 [25,25,25,25,25,25] +[] 0 [26,26,26,26,26,26,26] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [28,28,28,28,28,28,28,28,28] +[] 0 [29,29,29,29,29,29,29,29,29,29] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [40] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [44,44,44,44,44] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [52,52,52] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [56,56,56,56,56,56,56] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [60] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [64,64,64,64,64] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [68,68,68,68,68,68,68,68,68] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [76,76,76,76,76,76,76] +[] 0 [] +[] 0 [] +[] 0 [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +36 +36 +20 +20 +0 +0 +2 +2 +20 +20 +2 +2 +20 +0 +0 0 \N [] +1 1 \N [] +2 2 \N [] +3 3 \N [] +4 4 \N [] +5 5 \N [] +6 6 \N [] +7 7 \N [] +8 8 \N [] +9 9 \N [] +str_10 \N str_10 [] +10 10 \N [] +10 10 \N [] +[[0,1]] \N \N [] +str_11 \N \N [] +str_11 \N str_11 [] +str_12 \N str_12 [] +12 12 \N [] +12 12 \N [] +str_13 \N str_13 [] +13 13 \N [] +13 13 \N [] +str_14 \N str_14 [] +14 14 \N [] +14 14 \N [] +str_15 \N str_15 [] +15 15 \N [] +15 15 \N [] +[[0,1,2,3,4,5,6]] \N \N [] +str_16 \N \N [] +str_16 \N str_16 [] +str_17 \N str_17 [] +17 17 \N [] +17 17 \N [] +str_18 \N str_18 [] +18 18 \N [] +18 18 \N [] +str_19 \N str_19 [] +19 19 \N [] +19 19 \N [] +[20] \N \N [20] +['str_21','str_21'] \N \N ['str_21','str_21'] +[22,22,22] \N \N [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +[40] \N \N [40] +41 41 \N [] +\N \N \N [] +str_43 \N str_43 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] +45 45 \N [] +\N \N \N [] +str_47 \N str_47 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 49 \N [] +\N \N \N [] +str_51 \N str_51 [] +[52,52,52] \N \N [52,52,52] +53 53 \N [] +\N \N \N [] +str_55 \N str_55 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] +57 57 \N [] +\N \N \N [] +str_59 \N str_59 [] +[60] \N \N [60] +61 61 \N [] +\N \N \N [] +str_63 \N str_63 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] +65 65 \N [] +\N \N \N [] +str_67 \N str_67 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] +69 69 \N [] +\N \N \N [] +str_71 \N str_71 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] +73 73 \N [] +\N \N \N [] +str_75 \N str_75 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] +77 77 \N [] +\N \N \N [] +str_79 \N str_79 [] +0 \N [] +1 \N [] +2 \N [] +3 \N [] +4 \N [] +5 \N [] +6 \N [] +7 \N [] +8 \N [] +9 \N [] +\N str_10 [] +10 \N [] +10 \N [] +\N \N [] +\N \N [] +\N str_11 [] +\N str_12 [] +12 \N [] +12 \N [] +\N str_13 [] +13 \N [] +13 \N [] +\N str_14 [] +14 \N [] +14 \N [] +\N str_15 [] +15 \N [] +15 \N [] +\N \N [] +\N \N [] +\N str_16 [] +\N str_17 [] +17 \N [] +17 \N [] +\N str_18 [] +18 \N [] +18 \N [] +\N str_19 [] +19 \N [] +19 \N [] +\N \N [20] +\N \N ['str_21','str_21'] +\N \N [22,22,22] +\N \N [23,23,23,23] +\N \N [24,24,24,24,24] +\N \N [25,25,25,25,25,25] +\N \N [26,26,26,26,26,26,26] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] +\N \N [29,29,29,29,29,29,29,29,29,29] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [40] +41 \N [] +\N \N [] +\N str_43 [] +\N \N [44,44,44,44,44] +45 \N [] +\N \N [] +\N str_47 [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] +\N \N [] +\N str_51 [] +\N \N [52,52,52] +53 \N [] +\N \N [] +\N str_55 [] +\N \N [56,56,56,56,56,56,56] +57 \N [] +\N \N [] +\N str_59 [] +\N \N [60] +61 \N [] +\N \N [] +\N str_63 [] +\N \N [64,64,64,64,64] +65 \N [] +\N \N [] +\N str_67 [] +\N \N [68,68,68,68,68,68,68,68,68] +69 \N [] +\N \N [] +\N str_71 [] +\N \N [NULL,NULL,NULL] +73 \N [] +\N \N [] +\N str_75 [] +\N \N [76,76,76,76,76,76,76] +77 \N [] +\N \N [] +\N str_79 [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +0 0 \N [] 0 [] +1 1 \N [] 0 [] +2 2 \N [] 0 [] +3 3 \N [] 0 [] +4 4 \N [] 0 [] +5 5 \N [] 0 [] +6 6 \N [] 0 [] +7 7 \N [] 0 [] +8 8 \N [] 0 [] +9 9 \N [] 0 [] +str_10 \N \N [] 0 [] +10 10 \N [] 0 [] +10 10 \N [] 0 [] +[[0,1]] \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_12 \N \N [] 0 [] +12 12 \N [] 0 [] +12 12 \N [] 0 [] +str_13 \N \N [] 0 [] +13 13 \N [] 0 [] +13 13 \N [] 0 [] +str_14 \N \N [] 0 [] +14 14 \N [] 0 [] +14 14 \N [] 0 [] +str_15 \N \N [] 0 [] +15 15 \N [] 0 [] +15 15 \N [] 0 [] +[[0,1,2,3,4,5,6]] \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_17 \N \N [] 0 [] +17 17 \N [] 0 [] +17 17 \N [] 0 [] +str_18 \N \N [] 0 [] +18 18 \N [] 0 [] +18 18 \N [] 0 [] +str_19 \N \N [] 0 [] +19 19 \N [] 0 [] +19 19 \N [] 0 [] +[20] \N \N [20] 1 [20] +['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] +[22,22,22] \N \N [22,22,22] 3 [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] 4 [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] 5 [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +[40] \N \N [40] 1 [40] +41 41 \N [] 0 [] +\N \N \N [] 0 [] +str_43 \N \N [] 0 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] 5 [44,44,44,44,44] +45 45 \N [] 0 [] +\N \N \N [] 0 [] +str_47 \N \N [] 0 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 49 \N [] 0 [] +\N \N \N [] 0 [] +str_51 \N \N [] 0 [] +[52,52,52] \N \N [52,52,52] 3 [52,52,52] +53 53 \N [] 0 [] +\N \N \N [] 0 [] +str_55 \N \N [] 0 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] +57 57 \N [] 0 [] +\N \N \N [] 0 [] +str_59 \N \N [] 0 [] +[60] \N \N [60] 1 [60] +61 61 \N [] 0 [] +\N \N \N [] 0 [] +str_63 \N \N [] 0 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] 5 [64,64,64,64,64] +65 65 \N [] 0 [] +\N \N \N [] 0 [] +str_67 \N \N [] 0 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] +69 69 \N [] 0 [] +\N \N \N [] 0 [] +str_71 \N \N [] 0 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] +73 73 \N [] 0 [] +\N \N \N [] 0 [] +str_75 \N \N [] 0 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] +77 77 \N [] 0 [] +\N \N \N [] 0 [] +str_79 \N \N [] 0 [] +0 \N [] 0 [] [] +1 \N [] 0 [] [] +2 \N [] 0 [] [] +3 \N [] 0 [] [] +4 \N [] 0 [] [] +5 \N [] 0 [] [] +6 \N [] 0 [] [] +7 \N [] 0 [] [] +8 \N [] 0 [] [] +9 \N [] 0 [] [] +\N \N [] 0 [] [] +10 \N [] 0 [] [] +10 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +12 \N [] 0 [] [] +12 \N [] 0 [] [] +\N \N [] 0 [] [] +13 \N [] 0 [] [] +13 \N [] 0 [] [] +\N \N [] 0 [] [] +14 \N [] 0 [] [] +14 \N [] 0 [] [] +\N \N [] 0 [] [] +15 \N [] 0 [] [] +15 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +17 \N [] 0 [] [] +17 \N [] 0 [] [] +\N \N [] 0 [] [] +18 \N [] 0 [] [] +18 \N [] 0 [] [] +\N \N [] 0 [] [] +19 \N [] 0 [] [] +19 \N [] 0 [] [] +\N \N [20] 1 [20] [NULL] +\N \N ['str_21','str_21'] 2 [NULL,NULL] ['str_21','str_21'] +\N \N [22,22,22] 3 [22,22,22] [NULL,NULL,NULL] +\N \N [23,23,23,23] 4 [23,23,23,23] [NULL,NULL,NULL,NULL] +\N \N [24,24,24,24,24] 5 [24,24,24,24,24] [NULL,NULL,NULL,NULL,NULL] +\N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] [NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [40] 1 [40] [NULL] +41 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [44,44,44,44,44] 5 [44,44,44,44,44] [NULL,NULL,NULL,NULL,NULL] +45 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [52,52,52] 3 [52,52,52] [NULL,NULL,NULL] +53 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +57 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [60] 1 [60] [NULL] +61 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [64,64,64,64,64] 5 [64,64,64,64,64] [NULL,NULL,NULL,NULL,NULL] +65 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +69 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] [NULL,NULL,NULL] +73 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +77 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [20] +[] 0 [NULL,NULL] +[] 0 [22,22,22] +[] 0 [23,23,23,23] +[] 0 [24,24,24,24,24] +[] 0 [25,25,25,25,25,25] +[] 0 [26,26,26,26,26,26,26] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [28,28,28,28,28,28,28,28,28] +[] 0 [29,29,29,29,29,29,29,29,29,29] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [40] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [44,44,44,44,44] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [52,52,52] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [56,56,56,56,56,56,56] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [60] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [64,64,64,64,64] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [68,68,68,68,68,68,68,68,68] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [76,76,76,76,76,76,76] +[] 0 [] +[] 0 [] +[] 0 [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +36 +36 +20 +20 +0 +0 +2 +2 +20 +20 +2 +2 +20 +0 +0 0 \N [] +1 1 \N [] +2 2 \N [] +3 3 \N [] +4 4 \N [] +5 5 \N [] +6 6 \N [] +7 7 \N [] +8 8 \N [] +9 9 \N [] +str_10 \N str_10 [] +10 10 \N [] +10 10 \N [] +[[0,1]] \N \N [] +str_11 \N \N [] +str_11 \N str_11 [] +str_12 \N str_12 [] +12 12 \N [] +12 12 \N [] +str_13 \N str_13 [] +13 13 \N [] +13 13 \N [] +str_14 \N str_14 [] +14 14 \N [] +14 14 \N [] +str_15 \N str_15 [] +15 15 \N [] +15 15 \N [] +[[0,1,2,3,4,5,6]] \N \N [] +str_16 \N \N [] +str_16 \N str_16 [] +str_17 \N str_17 [] +17 17 \N [] +17 17 \N [] +str_18 \N str_18 [] +18 18 \N [] +18 18 \N [] +str_19 \N str_19 [] +19 19 \N [] +19 19 \N [] +[20] \N \N [20] +['str_21','str_21'] \N \N ['str_21','str_21'] +[22,22,22] \N \N [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +[40] \N \N [40] +41 41 \N [] +\N \N \N [] +str_43 \N str_43 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] +45 45 \N [] +\N \N \N [] +str_47 \N str_47 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 49 \N [] +\N \N \N [] +str_51 \N str_51 [] +[52,52,52] \N \N [52,52,52] +53 53 \N [] +\N \N \N [] +str_55 \N str_55 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] +57 57 \N [] +\N \N \N [] +str_59 \N str_59 [] +[60] \N \N [60] +61 61 \N [] +\N \N \N [] +str_63 \N str_63 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] +65 65 \N [] +\N \N \N [] +str_67 \N str_67 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] +69 69 \N [] +\N \N \N [] +str_71 \N str_71 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] +73 73 \N [] +\N \N \N [] +str_75 \N str_75 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] +77 77 \N [] +\N \N \N [] +str_79 \N str_79 [] +0 \N [] +1 \N [] +2 \N [] +3 \N [] +4 \N [] +5 \N [] +6 \N [] +7 \N [] +8 \N [] +9 \N [] +\N str_10 [] +10 \N [] +10 \N [] +\N \N [] +\N \N [] +\N str_11 [] +\N str_12 [] +12 \N [] +12 \N [] +\N str_13 [] +13 \N [] +13 \N [] +\N str_14 [] +14 \N [] +14 \N [] +\N str_15 [] +15 \N [] +15 \N [] +\N \N [] +\N \N [] +\N str_16 [] +\N str_17 [] +17 \N [] +17 \N [] +\N str_18 [] +18 \N [] +18 \N [] +\N str_19 [] +19 \N [] +19 \N [] +\N \N [20] +\N \N ['str_21','str_21'] +\N \N [22,22,22] +\N \N [23,23,23,23] +\N \N [24,24,24,24,24] +\N \N [25,25,25,25,25,25] +\N \N [26,26,26,26,26,26,26] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] +\N \N [29,29,29,29,29,29,29,29,29,29] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [40] +41 \N [] +\N \N [] +\N str_43 [] +\N \N [44,44,44,44,44] +45 \N [] +\N \N [] +\N str_47 [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] +\N \N [] +\N str_51 [] +\N \N [52,52,52] +53 \N [] +\N \N [] +\N str_55 [] +\N \N [56,56,56,56,56,56,56] +57 \N [] +\N \N [] +\N str_59 [] +\N \N [60] +61 \N [] +\N \N [] +\N str_63 [] +\N \N [64,64,64,64,64] +65 \N [] +\N \N [] +\N str_67 [] +\N \N [68,68,68,68,68,68,68,68,68] +69 \N [] +\N \N [] +\N str_71 [] +\N \N [NULL,NULL,NULL] +73 \N [] +\N \N [] +\N str_75 [] +\N \N [76,76,76,76,76,76,76] +77 \N [] +\N \N [] +\N str_79 [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +0 0 \N [] 0 [] +1 1 \N [] 0 [] +2 2 \N [] 0 [] +3 3 \N [] 0 [] +4 4 \N [] 0 [] +5 5 \N [] 0 [] +6 6 \N [] 0 [] +7 7 \N [] 0 [] +8 8 \N [] 0 [] +9 9 \N [] 0 [] +str_10 \N \N [] 0 [] +10 10 \N [] 0 [] +10 10 \N [] 0 [] +[[0,1]] \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_12 \N \N [] 0 [] +12 12 \N [] 0 [] +12 12 \N [] 0 [] +str_13 \N \N [] 0 [] +13 13 \N [] 0 [] +13 13 \N [] 0 [] +str_14 \N \N [] 0 [] +14 14 \N [] 0 [] +14 14 \N [] 0 [] +str_15 \N \N [] 0 [] +15 15 \N [] 0 [] +15 15 \N [] 0 [] +[[0,1,2,3,4,5,6]] \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_17 \N \N [] 0 [] +17 17 \N [] 0 [] +17 17 \N [] 0 [] +str_18 \N \N [] 0 [] +18 18 \N [] 0 [] +18 18 \N [] 0 [] +str_19 \N \N [] 0 [] +19 19 \N [] 0 [] +19 19 \N [] 0 [] +[20] \N \N [20] 1 [20] +['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] +[22,22,22] \N \N [22,22,22] 3 [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] 4 [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] 5 [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +[40] \N \N [40] 1 [40] +41 41 \N [] 0 [] +\N \N \N [] 0 [] +str_43 \N \N [] 0 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] 5 [44,44,44,44,44] +45 45 \N [] 0 [] +\N \N \N [] 0 [] +str_47 \N \N [] 0 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 49 \N [] 0 [] +\N \N \N [] 0 [] +str_51 \N \N [] 0 [] +[52,52,52] \N \N [52,52,52] 3 [52,52,52] +53 53 \N [] 0 [] +\N \N \N [] 0 [] +str_55 \N \N [] 0 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] +57 57 \N [] 0 [] +\N \N \N [] 0 [] +str_59 \N \N [] 0 [] +[60] \N \N [60] 1 [60] +61 61 \N [] 0 [] +\N \N \N [] 0 [] +str_63 \N \N [] 0 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] 5 [64,64,64,64,64] +65 65 \N [] 0 [] +\N \N \N [] 0 [] +str_67 \N \N [] 0 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] +69 69 \N [] 0 [] +\N \N \N [] 0 [] +str_71 \N \N [] 0 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] +73 73 \N [] 0 [] +\N \N \N [] 0 [] +str_75 \N \N [] 0 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] +77 77 \N [] 0 [] +\N \N \N [] 0 [] +str_79 \N \N [] 0 [] +0 \N [] 0 [] [] +1 \N [] 0 [] [] +2 \N [] 0 [] [] +3 \N [] 0 [] [] +4 \N [] 0 [] [] +5 \N [] 0 [] [] +6 \N [] 0 [] [] +7 \N [] 0 [] [] +8 \N [] 0 [] [] +9 \N [] 0 [] [] +\N \N [] 0 [] [] +10 \N [] 0 [] [] +10 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +12 \N [] 0 [] [] +12 \N [] 0 [] [] +\N \N [] 0 [] [] +13 \N [] 0 [] [] +13 \N [] 0 [] [] +\N \N [] 0 [] [] +14 \N [] 0 [] [] +14 \N [] 0 [] [] +\N \N [] 0 [] [] +15 \N [] 0 [] [] +15 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +17 \N [] 0 [] [] +17 \N [] 0 [] [] +\N \N [] 0 [] [] +18 \N [] 0 [] [] +18 \N [] 0 [] [] +\N \N [] 0 [] [] +19 \N [] 0 [] [] +19 \N [] 0 [] [] +\N \N [20] 1 [20] [NULL] +\N \N ['str_21','str_21'] 2 [NULL,NULL] ['str_21','str_21'] +\N \N [22,22,22] 3 [22,22,22] [NULL,NULL,NULL] +\N \N [23,23,23,23] 4 [23,23,23,23] [NULL,NULL,NULL,NULL] +\N \N [24,24,24,24,24] 5 [24,24,24,24,24] [NULL,NULL,NULL,NULL,NULL] +\N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] [NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [40] 1 [40] [NULL] +41 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [44,44,44,44,44] 5 [44,44,44,44,44] [NULL,NULL,NULL,NULL,NULL] +45 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [52,52,52] 3 [52,52,52] [NULL,NULL,NULL] +53 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +57 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [60] 1 [60] [NULL] +61 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [64,64,64,64,64] 5 [64,64,64,64,64] [NULL,NULL,NULL,NULL,NULL] +65 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +69 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] [NULL,NULL,NULL] +73 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +77 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [20] +[] 0 [NULL,NULL] +[] 0 [22,22,22] +[] 0 [23,23,23,23] +[] 0 [24,24,24,24,24] +[] 0 [25,25,25,25,25,25] +[] 0 [26,26,26,26,26,26,26] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [28,28,28,28,28,28,28,28,28] +[] 0 [29,29,29,29,29,29,29,29,29,29] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [40] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [44,44,44,44,44] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [52,52,52] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [56,56,56,56,56,56,56] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [60] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [64,64,64,64,64] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [68,68,68,68,68,68,68,68,68] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [76,76,76,76,76,76,76] +[] 0 [] +[] 0 [] +[] 0 [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 new file mode 100644 index 00000000000..0c123d5f6fe --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 @@ -0,0 +1,46 @@ +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; + +{% for engine in ['Memory', 'MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000', 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1'] -%} + +create table test (id UInt64, d Dynamic(max_types=2)) engine={{ engine }}; + +insert into test select number, number from numbers(10); +insert into test select number, 'str_' || toString(number) from numbers(10, 10); +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(20, 10); +insert into test select number, NULL from numbers(30, 10); +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(40, 40); +insert into test select number, if(number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(10, 10); +insert into test select number, if(number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(10, 10); + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test order by id, d; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test order by id, d; +select d.Int8, d.Date, d.`Array(String)` from test order by id, d; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test order by id, d; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test order by id, d; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.reference b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.reference new file mode 100644 index 00000000000..ca6c5dbba82 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.reference @@ -0,0 +1,20 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +360000 +360000 +200000 +200000 +0 +0 +20000 +20000 +200000 +200000 +20000 +20000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.sql b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.sql new file mode 100644 index 00000000000..61dc8fca01a --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference index d0d777a5a38..b0be05f07a2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference @@ -1,28 +1,66 @@ -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String +50000 DateTime false +60000 Date false +70000 Array(UInt16) false +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +30000 String false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +100000 None false +100000 UInt64 true +110000 String false +200000 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql index d2c787040e5..fb23e15738e 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -12,22 +12,37 @@ insert into test select number, toDate(number) from numbers(60000); insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final;; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(200000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(10000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference index d0d777a5a38..b0be05f07a2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference @@ -1,28 +1,66 @@ -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String +50000 DateTime false +60000 Date false +70000 Array(UInt16) false +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +30000 String false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +100000 None false +100000 UInt64 true +110000 String false +200000 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql index f99bf771608..c098a3191e0 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -12,22 +12,36 @@ insert into test select number, toDate(number) from numbers(60000); insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final;; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(200000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(10000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference index d0d777a5a38..b0be05f07a2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference @@ -1,28 +1,66 @@ -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String +50000 DateTime false +60000 Date false +70000 Array(UInt16) false +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +30000 String false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +100000 None false +100000 UInt64 true +110000 String false +200000 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql index be81596d043..17b1e451143 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -12,23 +12,36 @@ insert into test select number, toDate(number) from numbers(60000); insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); -system start merges test; -optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(200000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(10000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference index d0d777a5a38..b0be05f07a2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference @@ -1,28 +1,66 @@ -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String +50000 DateTime false +60000 Date false +70000 Array(UInt16) false +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +30000 String false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +100000 None false +100000 UInt64 true +110000 String false +200000 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql index f6396af42a8..fd6c0109263 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -12,22 +12,36 @@ insert into test select number, toDate(number) from numbers(60000); insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final;; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(200000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(10000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_small.reference.j2 b/tests/queries/0_stateless/03037_dynamic_merges_small.reference.j2 index 96a854630ed..7d3bc371e36 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_small.reference.j2 +++ b/tests/queries/0_stateless/03037_dynamic_merges_small.reference.j2 @@ -1,112 +1,264 @@ -5 DateTime -6 Date -7 Array(UInt16) -8 String -10 None -10 UInt64 -7 Array(UInt16) -10 None -10 UInt64 -19 String -7 Array(UInt16) -10 None -10 UInt64 -19 String -20 Map(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -1 Tuple(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -10 None -10 UInt64 -20 Map(UInt64, UInt64) -27 String -5 DateTime -6 Date -7 Array(UInt16) -8 String -10 None -10 UInt64 -7 Array(UInt16) -10 None -10 UInt64 -19 String -7 Array(UInt16) -10 None -10 UInt64 -19 String -20 Map(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -1 Tuple(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -10 None -10 UInt64 -20 Map(UInt64, UInt64) -27 String -5 DateTime -6 Date -7 Array(UInt16) -8 String -10 None -10 UInt64 -7 Array(UInt16) -10 None -10 UInt64 -19 String -7 Array(UInt16) -10 None -10 UInt64 -19 String -20 Map(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -1 Tuple(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -10 None -10 UInt64 -20 Map(UInt64, UInt64) -27 String -5 DateTime -6 Date -7 Array(UInt16) -8 String -10 None -10 UInt64 -7 Array(UInt16) -10 None -10 UInt64 -19 String -7 Array(UInt16) -10 None -10 UInt64 -19 String -20 Map(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -1 Tuple(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -10 None -10 UInt64 -20 Map(UInt64, UInt64) -27 String +5 DateTime false +6 Date false +7 Array(UInt16) false +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +3 String false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +10 None false +10 UInt64 true +11 String false +20 Map(UInt64, UInt64) false +5 DateTime false +6 Date false +7 Array(UInt16) false +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +3 String false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +10 None false +10 UInt64 true +11 String false +20 Map(UInt64, UInt64) false +5 DateTime false +6 Date false +7 Array(UInt16) false +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +3 String false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +10 None false +10 UInt64 true +11 String false +20 Map(UInt64, UInt64) false +5 DateTime false +6 Date false +7 Array(UInt16) false +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +3 String false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +10 None false +10 UInt64 true +11 String false +20 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 b/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 index 263e92be403..3778399d0a4 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 +++ b/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 @@ -9,7 +9,7 @@ drop table if exists test; 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1', 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1'] -%} -create table test (id UInt64, d Dynamic(max_types=3)) engine={{ engine }}; +create table test (id UInt64, d Dynamic(max_types=2)) engine={{ engine }}; system stop merges test; insert into test select number, number from numbers(10); @@ -19,23 +19,37 @@ insert into test select number, toDate(number) from numbers(6); insert into test select number, toDateTime(number) from numbers(5); insert into test select number, NULL from numbers(10); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final;; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(20); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(1); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(3); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference index 4be740f6050..a4c2df74a74 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference @@ -1,21 +1,63 @@ -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None +6667 Tuple(a Dynamic(max_types=2)):DateTime false +13333 Tuple(a Dynamic(max_types=2)):IPv4 false +16667 Tuple(a Dynamic(max_types=2)):Date false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +16667 Tuple(a Dynamic(max_types=2)):DateTime false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +40000 Tuple(a Dynamic(max_types=2)):DateTime false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +63334 Tuple(a Dynamic(max_types=2)):DateTime true +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql index 81888946681..8ba192cb5db 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql @@ -6,24 +6,39 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference index 4be740f6050..a4c2df74a74 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference @@ -1,21 +1,63 @@ -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None +6667 Tuple(a Dynamic(max_types=2)):DateTime false +13333 Tuple(a Dynamic(max_types=2)):IPv4 false +16667 Tuple(a Dynamic(max_types=2)):Date false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +16667 Tuple(a Dynamic(max_types=2)):DateTime false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +40000 Tuple(a Dynamic(max_types=2)):DateTime false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +63334 Tuple(a Dynamic(max_types=2)):DateTime true +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql index ba58ca471a2..1ea7eefdd53 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql @@ -6,24 +6,39 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 index ae07c164074..3d7e8b60f73 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 @@ -1,84 +1,256 @@ -2 Tuple(a Dynamic(max_types=3)):Date -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):String -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 UInt64:None -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 UInt64:None -2 Tuple(a Dynamic(max_types=3)):DateTime -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -13 Tuple(a Dynamic(max_types=3)):None -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -12 Tuple(a Dynamic(max_types=3)):String -13 Tuple(a Dynamic(max_types=3)):None -2 Tuple(a Dynamic(max_types=3)):Date -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):String -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 UInt64:None -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 UInt64:None -2 Tuple(a Dynamic(max_types=3)):DateTime -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -13 Tuple(a Dynamic(max_types=3)):None -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -12 Tuple(a Dynamic(max_types=3)):String -13 Tuple(a Dynamic(max_types=3)):None -2 Tuple(a Dynamic(max_types=3)):Date -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):String -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 UInt64:None -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 UInt64:None -2 Tuple(a Dynamic(max_types=3)):DateTime -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -13 Tuple(a Dynamic(max_types=3)):None -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -12 Tuple(a Dynamic(max_types=3)):String -13 Tuple(a Dynamic(max_types=3)):None -2 Tuple(a Dynamic(max_types=3)):Date -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):String -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 UInt64:None -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 UInt64:None -2 Tuple(a Dynamic(max_types=3)):DateTime -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -13 Tuple(a Dynamic(max_types=3)):None -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -12 Tuple(a Dynamic(max_types=3)):String -13 Tuple(a Dynamic(max_types=3)):None +test +2 Tuple(a Dynamic(max_types=2)):DateTime false +2 Tuple(a Dynamic(max_types=2)):IPv4 false +4 Tuple(a Dynamic(max_types=2)):Date false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +4 Tuple(a Dynamic(max_types=2)):DateTime false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +8 Tuple(a Dynamic(max_types=2)):DateTime false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +14 Tuple(a Dynamic(max_types=2)):DateTime true +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +test +2 Tuple(a Dynamic(max_types=2)):DateTime false +2 Tuple(a Dynamic(max_types=2)):IPv4 false +4 Tuple(a Dynamic(max_types=2)):Date false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +4 Tuple(a Dynamic(max_types=2)):DateTime false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +8 Tuple(a Dynamic(max_types=2)):DateTime false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +14 Tuple(a Dynamic(max_types=2)):DateTime true +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +test +2 Tuple(a Dynamic(max_types=2)):DateTime false +2 Tuple(a Dynamic(max_types=2)):IPv4 false +4 Tuple(a Dynamic(max_types=2)):Date false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +4 Tuple(a Dynamic(max_types=2)):DateTime false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +8 Tuple(a Dynamic(max_types=2)):DateTime false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String true +14 Tuple(a Dynamic(max_types=2)):DateTime false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +test +2 Tuple(a Dynamic(max_types=2)):DateTime false +2 Tuple(a Dynamic(max_types=2)):IPv4 false +4 Tuple(a Dynamic(max_types=2)):Date false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +4 Tuple(a Dynamic(max_types=2)):DateTime false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +8 Tuple(a Dynamic(max_types=2)):DateTime false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String true +14 Tuple(a Dynamic(max_types=2)):DateTime false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 index 7828c2af49c..8682b6cef81 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 @@ -10,25 +10,41 @@ drop table if exists test; 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1', 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1'] -%} -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; +select 'test'; +create table test (id UInt64, d Dynamic(max_types=2)) engine={{ engine }}; system stop merges test; -insert into test select number, number from numbers(10); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(10); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(5); +insert into test select number, number from numbers(20); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(20); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(10); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(20); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(5); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(20); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(10); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(40); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(8); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference index 4be740f6050..a4c2df74a74 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference @@ -1,21 +1,63 @@ -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None +6667 Tuple(a Dynamic(max_types=2)):DateTime false +13333 Tuple(a Dynamic(max_types=2)):IPv4 false +16667 Tuple(a Dynamic(max_types=2)):Date false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +16667 Tuple(a Dynamic(max_types=2)):DateTime false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +40000 Tuple(a Dynamic(max_types=2)):DateTime false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +63334 Tuple(a Dynamic(max_types=2)):DateTime true +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql index a53c5b0b2a5..c6a09036c30 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql @@ -6,24 +6,39 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference index 4be740f6050..a4c2df74a74 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference @@ -1,21 +1,63 @@ -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None +6667 Tuple(a Dynamic(max_types=2)):DateTime false +13333 Tuple(a Dynamic(max_types=2)):IPv4 false +16667 Tuple(a Dynamic(max_types=2)):Date false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +16667 Tuple(a Dynamic(max_types=2)):DateTime false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +40000 Tuple(a Dynamic(max_types=2)):DateTime false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +63334 Tuple(a Dynamic(max_types=2)):DateTime true +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql index 4256b010ec0..c1964c45d98 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql @@ -6,24 +6,39 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference index 2ec301b747b..9386548c74d 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference @@ -1,13 +1,13 @@ initial insert alter add column 1 -3 None +3 None false 0 0 \N \N \N 0 1 1 \N \N \N 0 2 2 \N \N \N 0 insert after alter add column 1 -4 String -4 UInt64 -7 None +4 String false +4 UInt64 false +7 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 @@ -24,147 +24,158 @@ insert after alter add column 1 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 alter modify column 1 -7 None -8 String +4 String true +4 UInt64 true +7 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 insert after alter modify column 1 -8 None -11 String +1 Date true +5 String true +5 UInt64 true +8 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 alter modify column 2 -8 None -11 String +1 Date true +5 String true +5 UInt64 true +8 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 insert after alter modify column 2 -1 Date -1 UInt64 -9 None -12 String +1 String false +1 UInt64 false +2 Date true +5 String true +5 UInt64 true +9 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 19 19 \N \N \N \N 0 20 20 20 \N 20 \N 0 21 21 str_21 str_21 \N \N 0 22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 -1 Date -1 UInt64 -9 None -12 String +1 String false +1 UInt64 false +2 Date true +5 String true +5 UInt64 true +9 None false 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 6 6 6 \N 0 str_6 \N \N 0 7 7 7 \N 0 str_7 \N \N 0 8 8 8 \N 0 str_8 \N \N 0 9 9 9 \N 0 \N \N \N 0 10 10 10 \N 0 \N \N \N 0 11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 +12 12 12 \N 0 \N 12 \N 0 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 -1 Date -1 UInt64 -12 None -12 String +1 String false +1 UInt64 false +2 Date true +5 String true +5 UInt64 true +12 None false 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 6 6 6 \N 0 str_6 \N \N 0 7 7 7 \N 0 str_7 \N \N 0 8 8 8 \N 0 str_8 \N \N 0 9 9 9 \N 0 \N \N \N 0 10 10 10 \N 0 \N \N \N 0 11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 +12 12 12 \N 0 \N 12 \N 0 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql index 4ab700306d4..de05ba36177 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql @@ -9,7 +9,7 @@ insert into test select number, number from numbers(3); select 'alter add column 1'; alter table test add column d Dynamic(max_types=3) settings mutations_sync=1; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; select 'insert after alter add column 1'; @@ -17,37 +17,37 @@ insert into test select number, number, number from numbers(3, 3); insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); insert into test select number, number, NULL from numbers(9, 3); insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'alter modify column 1'; -alter table test modify column d Dynamic(max_types=1) settings mutations_sync=1; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +alter table test modify column d Dynamic(max_types=0) settings mutations_sync=1; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'insert after alter modify column 1'; insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'alter modify column 2'; -alter table test modify column d Dynamic(max_types=3) settings mutations_sync=1; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +alter table test modify column d Dynamic(max_types=2) settings mutations_sync=1; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'insert after alter modify column 2'; insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'alter modify column 3'; alter table test modify column y Dynamic settings mutations_sync=1; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'insert after alter modify column 3'; insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference index c592528c3cd..d7123288280 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference @@ -24,48 +24,28 @@ insert after alter add column 1 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 alter modify column 1 +4 String +4 UInt64 7 None -8 String 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 insert after alter modify column 1 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -alter modify column 2 -4 UInt64 -7 String +1 Date +5 String +5 UInt64 8 None 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 @@ -83,13 +63,37 @@ alter modify column 2 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -insert after alter modify column 2 +18 18 1970-01-19 \N \N 1970-01-19 0 +alter modify column 2 1 Date +5 String 5 UInt64 -8 String +8 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +insert after alter modify column 2 +2 Date +6 String +6 UInt64 9 None 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 @@ -107,17 +111,17 @@ insert after alter modify column 2 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 19 19 \N \N \N \N 0 20 20 20 \N 20 \N 0 21 21 str_21 str_21 \N \N 0 22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 -1 Date -5 UInt64 -8 String +2 Date +6 String +6 UInt64 9 None 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 @@ -135,17 +139,17 @@ alter modify column 3 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 -1 Date -5 UInt64 -8 String +2 Date +6 String +6 UInt64 12 None 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 @@ -163,9 +167,9 @@ insert after alter modify column 3 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference index 2ec301b747b..d7123288280 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference @@ -24,147 +24,152 @@ insert after alter add column 1 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 alter modify column 1 +4 String +4 UInt64 7 None -8 String 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 insert after alter modify column 1 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -alter modify column 2 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -insert after alter modify column 2 1 Date -1 UInt64 -9 None -12 String +5 String +5 UInt64 +8 None 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +alter modify column 2 +1 Date +5 String +5 UInt64 +8 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +insert after alter modify column 2 +2 Date +6 String +6 UInt64 +9 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 19 19 \N \N \N \N 0 20 20 20 \N 20 \N 0 21 21 str_21 str_21 \N \N 0 22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 -1 Date -1 UInt64 +2 Date +6 String +6 UInt64 9 None -12 String 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 6 6 6 \N 0 str_6 \N \N 0 7 7 7 \N 0 str_7 \N \N 0 8 8 8 \N 0 str_8 \N \N 0 9 9 9 \N 0 \N \N \N 0 10 10 10 \N 0 \N \N \N 0 11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 +12 12 12 \N 0 \N 12 \N 0 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 -1 Date -1 UInt64 +2 Date +6 String +6 UInt64 12 None -12 String 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 6 6 6 \N 0 str_6 \N \N 0 7 7 7 \N 0 str_7 \N \N 0 8 8 8 \N 0 str_8 \N \N 0 9 9 9 \N 0 \N \N \N 0 10 10 10 \N 0 \N \N \N 0 11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 +12 12 12 \N 0 \N 12 \N 0 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 diff --git a/tests/queries/0_stateless/03041_dynamic_type_check_table.sh b/tests/queries/0_stateless/03041_dynamic_type_check_table.sh index c8bd533e253..da24b892cbd 100755 --- a/tests/queries/0_stateless/03041_dynamic_type_check_table.sh +++ b/tests/queries/0_stateless/03041_dynamic_type_check_table.sh @@ -13,7 +13,7 @@ function run() $CH_CLIENT -q "insert into test select number, number from numbers(3)" echo "alter add column" - $CH_CLIENT -q "alter table test add column d Dynamic(max_types=3) settings mutations_sync=1" + $CH_CLIENT -q "alter table test add column d Dynamic(max_types=2) settings mutations_sync=1" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.\`Tuple(a UInt64)\`.a from test order by x" diff --git a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.reference b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.reference index 0b76d30953e..370e6352657 100644 --- a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.reference +++ b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.reference @@ -1,35 +1,35 @@ 1 2024-01-01 Date 2 1704056400 Decimal(18, 3) -3 1 String -4 2 String +3 1 Float32 +4 2 Float64 1 2024-01-01 Date 1 2024-01-01 Date 2 1704056400 Decimal(18, 3) 2 1704056400 Decimal(18, 3) -3 1 String -3 1 String -4 2 String -4 2 String - -1 2024-01-01 String -1 2024-01-01 String -2 1704056400 String -2 1704056400 String -3 1 String -3 1 String -4 2 String -4 2 String +3 1 Float32 +3 1 Float32 +4 2 Float64 +4 2 Float64 1 2024-01-01 Date -1 2024-01-01 String -1 2024-01-01 String +1 2024-01-01 Date 2 1704056400 Decimal(18, 3) -2 1704056400 String -2 1704056400 String -3 1 String -3 1 String -3 1 String -4 2 String -4 2 String -4 2 String +2 1704056400 Decimal(18, 3) +3 1 Float32 +3 1 Float32 +4 2 Float64 +4 2 Float64 + +1 2024-01-01 Date +1 2024-01-01 Date +1 2024-01-01 Date +2 1704056400 Decimal(18, 3) +2 1704056400 Decimal(18, 3) +2 1704056400 Decimal(18, 3) +3 1 Float32 +3 1 Float32 +3 1 Float32 +4 2 Float64 +4 2 Float64 +4 2 Float64 diff --git a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql index ad5ea9512c6..71d5dd4abd1 100644 --- a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql +++ b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql @@ -1,5 +1,6 @@ SET allow_experimental_dynamic_type=1; +DROP TABLE IF EXISTS null_table; CREATE TABLE null_table ( n1 UInt8, @@ -7,9 +8,11 @@ CREATE TABLE null_table ) ENGINE = Null; +DROP VIEW IF EXISTS dummy_rmv; CREATE MATERIALIZED VIEW dummy_rmv TO to_table AS SELECT * FROM null_table; +DROP TABLE IF EXISTS to_table; CREATE TABLE to_table ( n1 UInt8, @@ -32,3 +35,7 @@ select ''; ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=10); INSERT INTO null_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; + +DROP TABLE null_table; +DROP VIEW dummy_rmv; +DROP TABLE to_table; diff --git a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.reference b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.reference index d96fbf658d8..2d3b2f118f6 100644 --- a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.reference +++ b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.reference @@ -1,26 +1,26 @@ -1 2024-01-01 Date -2 1704056400 String -3 1 String -4 2 String +1 2024-01-01 Date false +2 1704056400 Decimal(18, 3) false +3 1 Float32 true +4 2 Float64 true -1 2024-01-01 Date -1 2024-01-01 Date -2 1704056400 Decimal(18, 3) -2 1704056400 String -3 1 Float32 -3 1 String -4 2 Float64 -4 2 String +1 2024-01-01 Date false +1 2024-01-01 Date false +2 1704056400 Decimal(18, 3) false +2 1704056400 Decimal(18, 3) false +3 1 Float32 false +3 1 Float32 false +4 2 Float64 false +4 2 Float64 false -1 2024-01-01 String -1 2024-01-01 String -1 2024-01-01 String -2 1704056400 String -2 1704056400 String -2 1704056400 String -3 1 String -3 1 String -3 1 String -4 2 String -4 2 String -4 2 String +1 2024-01-01 Date true +1 2024-01-01 Date true +1 2024-01-01 Date true +2 1704056400 Decimal(18, 3) true +2 1704056400 Decimal(18, 3) true +2 1704056400 Decimal(18, 3) true +3 1 Float32 true +3 1 Float32 true +3 1 Float32 true +4 2 Float64 true +4 2 Float64 true +4 2 Float64 true diff --git a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql index 632f3504fdb..e476d34a1db 100644 --- a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql +++ b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql @@ -11,16 +11,16 @@ CREATE TABLE to_table ENGINE = MergeTree ORDER BY n1; INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); -SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; select ''; ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=5); INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); -SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; select ''; -ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=1); +ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=0); INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); -SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=500); -- { serverError UNEXPECTED_AST_STRUCTURE } diff --git a/tests/queries/0_stateless/03152_dynamic_type_simple.reference b/tests/queries/0_stateless/03152_dynamic_type_simple.reference index 5f243209ff3..e508bdd1990 100644 --- a/tests/queries/0_stateless/03152_dynamic_type_simple.reference +++ b/tests/queries/0_stateless/03152_dynamic_type_simple.reference @@ -7,7 +7,7 @@ string2 String \N None 42 Int64 42 Int64 string String -string String [1, 2] String +string String [1,2] Array(Int64) [1,2] Array(Int64) \N None ┌─d────────────────────────┬─dynamicType(d)─┬─d.Int64─┬─d.String─┬─────d.Date─┬─d.Float64─┬──────────d.DateTime─┬─d.Array(Int64)─┬─d.Array(String)──────────┠1. │ 42 │ Int64 │ 42 │ á´ºáµá´¸á´¸ │ á´ºáµá´¸á´¸ │ á´ºáµá´¸á´¸ │ á´ºáµá´¸á´¸ │ [] │ [] │ diff --git a/tests/queries/0_stateless/03152_dynamic_type_simple.sql b/tests/queries/0_stateless/03152_dynamic_type_simple.sql index fd5328faf15..ed24b213b1c 100644 --- a/tests/queries/0_stateless/03152_dynamic_type_simple.sql +++ b/tests/queries/0_stateless/03152_dynamic_type_simple.sql @@ -1,14 +1,17 @@ SET allow_experimental_dynamic_type=1; +DROP TABLE IF EXISTS test_max_types; CREATE TABLE test_max_types (d Dynamic(max_types=5)) ENGINE = Memory; INSERT INTO test_max_types VALUES ('string1'), (42), (3.14), ([1, 2]), (toDate('2021-01-01')), ('string2'); SELECT d, dynamicType(d) FROM test_max_types; SELECT ''; +DROP TABLE IF EXISTS test_nested_dynamic; CREATE TABLE test_nested_dynamic (d1 Dynamic, d2 Dynamic(max_types=2)) ENGINE = Memory; INSERT INTO test_nested_dynamic VALUES (NULL, 42), (42, 'string'), ('string', [1, 2]), ([1, 2], NULL); SELECT d1, dynamicType(d1), d2, dynamicType(d2) FROM test_nested_dynamic; +DROP TABLE IF EXISTS test_rapid_schema; CREATE TABLE test_rapid_schema (d Dynamic) ENGINE = Memory; INSERT INTO test_rapid_schema VALUES (42), ('string1'), (toDate('2021-01-01')), ([1, 2, 3]), (3.14), ('string2'), (toDateTime('2021-01-01 12:00:00')), (['array', 'of', 'strings']), (NULL), (toFloat64(42.42)); @@ -27,3 +30,8 @@ FROM FROM numbers(10000) ) ); + +DROP TABLE test_max_types; +DROP TABLE test_nested_dynamic; +DROP TABLE test_rapid_schema; + diff --git a/tests/queries/0_stateless/03153_dynamic_type_empty.sql b/tests/queries/0_stateless/03153_dynamic_type_empty.sql index 8e942fe6f6e..3a0c98e63ee 100644 --- a/tests/queries/0_stateless/03153_dynamic_type_empty.sql +++ b/tests/queries/0_stateless/03153_dynamic_type_empty.sql @@ -1,5 +1,7 @@ SET allow_experimental_dynamic_type=1; +DROP TABLE IF EXISTS test_null_empty; CREATE TABLE test_null_empty (d Dynamic) ENGINE = Memory; INSERT INTO test_null_empty VALUES ([]), ([1]), ([]), (['1']), ([]), (()),((1)), (()), (('1')), (()), ({}), ({1:2}), ({}), ({'1':'2'}), ({}); SELECT d, dynamicType(d) FROM test_null_empty; +DROP TABLE test_null_empty; diff --git a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql index d302205ca23..fffea1bd0f5 100644 --- a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql +++ b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql @@ -6,7 +6,7 @@ SET allow_experimental_variant_type=1; SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t (d Dynamic(max_types=255)) ENGINE = Memory; +CREATE TABLE t (d Dynamic(max_types=254)) ENGINE = Memory; -- Integer types: signed and unsigned integers (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256) INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); @@ -84,7 +84,7 @@ INSERT INTO t VALUES ([(1, (2, ['aa', 'bb']), [(3, 'cc'), (4, 'dd')]), (5, (6, [ SELECT dynamicType(d), d FROM t ORDER BY substring(dynamicType(d),1,1), length(dynamicType(d)), d; -CREATE TABLE t2 (d Dynamic(max_types=255)) ENGINE = Memory; +CREATE TABLE t2 (d Dynamic(max_types=254)) ENGINE = Memory; INSERT INTO t2 SELECT * FROM t; SELECT ''; diff --git a/tests/queries/0_stateless/03172_dynamic_binary_serialization.sh b/tests/queries/0_stateless/03172_dynamic_binary_serialization.sh index 9b57e5c8718..b9bab2bd70b 100755 --- a/tests/queries/0_stateless/03172_dynamic_binary_serialization.sh +++ b/tests/queries/0_stateless/03172_dynamic_binary_serialization.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "drop table if exists test" -$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 -q "create table test (id UInt64, d Dynamic(max_types=255)) engine=Memory" +$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 -q "create table test (id UInt64, d Dynamic(max_types=254)) engine=Memory" $CLICKHOUSE_CLIENT -q "insert into test select 0, NULL" $CLICKHOUSE_CLIENT -q "insert into test select 1, materialize(42)::UInt8" @@ -58,6 +58,6 @@ $CLICKHOUSE_CLIENT -q "insert into test select 47, materialize([[(20, 20), (50, $CLICKHOUSE_CLIENT -q "insert into test select 48, materialize([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]])::MultiPolygon" $CLICKHOUSE_CLIENT -q "insert into test select 49, materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])" -$CLICKHOUSE_CLIENT -q "select * from test format RowBinary" | $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --input-format RowBinary --structure 'id UInt64, d Dynamic(max_types=255)' -q "select d, dynamicType(d) from table order by id" +$CLICKHOUSE_CLIENT -q "select * from test format RowBinary" | $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --input-format RowBinary --structure 'id UInt64, d Dynamic(max_types=254)' -q "select d, dynamicType(d) from table order by id" $CLICKHOUSE_CLIENT -q "drop table test" diff --git a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.reference b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.reference index 6d2c1334d6e..8d2470dea44 100644 --- a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.reference +++ b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.reference @@ -1,10 +1,10 @@ -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 diff --git a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql index 25f3bb0f4c8..939b49e1599 100644 --- a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql +++ b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql @@ -1,6 +1,8 @@ set allow_experimental_dynamic_type=1; +drop table if exists test; create table test (d Dynamic) engine=Memory; insert into table test select * from numbers(5); -alter table test modify column d Dynamic(max_types=1); +alter table test modify column d Dynamic(max_types=0); select d.UInt64 from test settings enable_analyzer=1; select d.UInt64 from test settings enable_analyzer=0; +drop table test; From c1c32daf01bba08129dd17e2b3a108cd7e837528 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 9 Aug 2024 21:59:40 +0000 Subject: [PATCH 2015/2361] Fix tests --- .../Serializations/SerializationDynamic.cpp | 2 +- tests/queries/0_stateless/00000_test.sql | 43 ------------------- ...native_with_binary_encoded_types.reference | 4 +- ...ry_and_native_with_binary_encoded_types.sh | 6 +-- 4 files changed, 6 insertions(+), 49 deletions(-) delete mode 100644 tests/queries/0_stateless/00000_test.sql diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 67b29750948..5fadb6e4de4 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -130,7 +130,7 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( for (const auto & variant: variants) { if (variant->getName() != ColumnDynamic::getSharedVariantTypeName()) - encodeDataType(dynamic_state->variant_type, *stream); + encodeDataType(variant, *stream); } } else diff --git a/tests/queries/0_stateless/00000_test.sql b/tests/queries/0_stateless/00000_test.sql deleted file mode 100644 index db9dd774484..00000000000 --- a/tests/queries/0_stateless/00000_test.sql +++ /dev/null @@ -1,43 +0,0 @@ -set allow_experimental_variant_type = 1; -set use_variant_as_common_type = 1; -set allow_experimental_dynamic_type = 1; -set enable_named_columns_in_function_tuple = 0; -drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; - - -system stop merges test; -insert into test select number, number from numbers(10); -insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(10); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(5); -insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=3)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(10); - -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; -system start merges test; -optimize table test final; -select '---------------------'; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; - -system stop merges test; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(5); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(20); - -select '---------------------'; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; -system start merges test; -optimize table test final; -select '---------------------'; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; - -system stop merges test; -insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=3)) from numbers(4); - -select '---------------------'; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; -system start merges test; -optimize table test final; -select '---------------------'; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type, isDynamicElementInSharedVariant(d.`Tuple(a Dynamic(max_types=3))`.a) as flag from test group by type, flag order by count(), type; - -drop table test; - diff --git a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.reference b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.reference index 1ba147f9627..7de0804e0f2 100644 --- a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.reference +++ b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.reference @@ -110,5 +110,5 @@ true Bool [{42:(1,[(2,{1:2})])}] Dynamic [{42:(1,[(2,{1:2})])}] Dynamic(max_types=10) [{42:(1,[(2,{1:2})])}] Dynamic(max_types=10) -[{42:(1,[(2,{1:2})])}] Dynamic(max_types=255) -[{42:(1,[(2,{1:2})])}] Dynamic(max_types=255) +[{42:(1,[(2,{1:2})])}] Dynamic(max_types=254) +[{42:(1,[(2,{1:2})])}] Dynamic(max_types=254) diff --git a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh index 0c585d36348..1e674a29072 100755 --- a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh +++ b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh @@ -6,8 +6,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function test { - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --stacktrace --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --stacktrace --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" } test "materialize(42)::UInt8" @@ -66,4 +66,4 @@ test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])" test "materialize(42::UInt32)::Variant(UInt32, String, Tuple(a UInt32, b Array(Map(String, String))))" test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])::Dynamic" test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])::Dynamic(max_types=10)" -test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])::Dynamic(max_types=255)" +test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])::Dynamic(max_types=254)" From cebcc88b312b0702de9866c229e67097c738c4d1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2024 00:18:54 +0200 Subject: [PATCH 2016/2361] Fix build --- src/IO/tests/gtest_s3_uri.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/IO/tests/gtest_s3_uri.cpp b/src/IO/tests/gtest_s3_uri.cpp index 7216c8077e3..c0bf7fcb28a 100644 --- a/src/IO/tests/gtest_s3_uri.cpp +++ b/src/IO/tests/gtest_s3_uri.cpp @@ -218,4 +218,5 @@ TEST(S3UriTest, versionIdChecks) } } +} #endif From c716315c3fdfd57719daf0f7f42b786afe6e68af Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2024 00:20:46 +0200 Subject: [PATCH 2017/2361] Annotations --- .../0_stateless/00375_shard_group_uniq_array_of_string.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql index 8db91904a6a..c8a243d9b27 100644 --- a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql +++ b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql @@ -1,4 +1,4 @@ --- Tags: shard +-- Tags: shard, long DROP TABLE IF EXISTS group_uniq_str; CREATE TABLE group_uniq_str ENGINE = Memory AS SELECT number % 10 as id, toString(intDiv((number%10000), 10)) as v FROM system.numbers LIMIT 10000000; From 805a2e33bfedbdfc2393217e7b485761b1943e1d Mon Sep 17 00:00:00 2001 From: avogar Date: Sat, 10 Aug 2024 00:47:43 +0000 Subject: [PATCH 2018/2361] Fix unit tests --- src/Columns/tests/gtest_column_dynamic.cpp | 74 +++++++++++----------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/src/Columns/tests/gtest_column_dynamic.cpp b/src/Columns/tests/gtest_column_dynamic.cpp index 5445bd525d9..de76261229d 100644 --- a/src/Columns/tests/gtest_column_dynamic.cpp +++ b/src/Columns/tests/gtest_column_dynamic.cpp @@ -7,7 +7,7 @@ using namespace DB; TEST(ColumnDynamic, CreateEmpty) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); ASSERT_TRUE(column->empty()); ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(SharedVariant)"); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 1); @@ -19,7 +19,7 @@ TEST(ColumnDynamic, CreateEmpty) TEST(ColumnDynamic, InsertDefault) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); column->insertDefault(); ASSERT_TRUE(column->size() == 1); ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(SharedVariant)"); @@ -34,7 +34,7 @@ TEST(ColumnDynamic, InsertDefault) TEST(ColumnDynamic, InsertFields) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); column->insert(Field(42)); column->insert(Field(-42)); column->insert(Field("str1")); @@ -56,7 +56,7 @@ TEST(ColumnDynamic, InsertFields) ColumnDynamic::MutablePtr getDynamicWithManyVariants(size_t num_variants, Field tuple_element = Field(42)) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); for (size_t i = 0; i != num_variants; ++i) { Tuple tuple; @@ -136,7 +136,7 @@ TEST(ColumnDynamic, InsertFieldsOverflow2) ColumnDynamic::MutablePtr getInsertFromColumn(size_t num = 1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); for (size_t i = 0; i != num; ++i) { column_from->insert(Field(42)); @@ -170,13 +170,13 @@ void checkInsertFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynami TEST(ColumnDynamic, InsertFrom1) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertFrom2) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str")); @@ -186,7 +186,7 @@ TEST(ColumnDynamic, InsertFrom2) TEST(ColumnDynamic, InsertFrom3) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str")); @@ -197,7 +197,7 @@ TEST(ColumnDynamic, InsertFrom3) TEST(ColumnDynamic, InsertFromOverflow1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -227,7 +227,7 @@ TEST(ColumnDynamic, InsertFromOverflow1) TEST(ColumnDynamic, InsertFromOverflow2) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); @@ -251,7 +251,7 @@ TEST(ColumnDynamic, InsertFromOverflow3) column_from->insert(Field(42)); column_from->insert(Field(42.42)); - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(41)); column_to->insertFrom(*column_from, 0); @@ -297,13 +297,13 @@ void checkInsertManyFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDy TEST(ColumnDynamic, InsertManyFrom1) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertManyFrom2) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str")); @@ -313,7 +313,7 @@ TEST(ColumnDynamic, InsertManyFrom2) TEST(ColumnDynamic, InsertManyFrom3) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str")); @@ -324,7 +324,7 @@ TEST(ColumnDynamic, InsertManyFrom3) TEST(ColumnDynamic, InsertManyFromOverflow1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -361,7 +361,7 @@ TEST(ColumnDynamic, InsertManyFromOverflow1) TEST(ColumnDynamic, InsertManyFromOverflow2) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); @@ -393,7 +393,7 @@ TEST(ColumnDynamic, InsertManyFromOverflow3) column_from->insert(Field(42)); column_from->insert(Field(42.42)); - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(41)); column_to->insertManyFrom(*column_from, 0, 2); @@ -441,13 +441,13 @@ void checkInsertRangeFrom(const ColumnDynamic::MutablePtr & column_from, ColumnD TEST(ColumnDynamic, InsertRangeFrom1) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertRangeFrom2) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str1")); @@ -457,7 +457,7 @@ TEST(ColumnDynamic, InsertRangeFrom2) TEST(ColumnDynamic, InsertRangeFrom3) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str1")); @@ -468,7 +468,7 @@ TEST(ColumnDynamic, InsertRangeFrom3) TEST(ColumnDynamic, InsertRangeFromOverflow1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(42.42)); @@ -494,7 +494,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow1) TEST(ColumnDynamic, InsertRangeFromOverflow2) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(42.42)); @@ -516,7 +516,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow2) TEST(ColumnDynamic, InsertRangeFromOverflow3) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(42.42)); @@ -539,7 +539,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow3) TEST(ColumnDynamic, InsertRangeFromOverflow4) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -561,7 +561,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow4) TEST(ColumnDynamic, InsertRangeFromOverflow5) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(42.42)); @@ -587,7 +587,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow5) TEST(ColumnDynamic, InsertRangeFromOverflow6) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(44)); @@ -619,7 +619,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow6) TEST(ColumnDynamic, InsertRangeFromOverflow7) { - auto column_from = ColumnDynamic::create(3); + auto column_from = ColumnDynamic::create(2); column_from->insert(Field(42.42)); column_from->insert(Field("str1")); column_from->insert(Field(42)); @@ -629,7 +629,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow7) column_from->insert(Field("str2")); column_from->insert(Field(Array({Field(42)}))); - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insertRangeFrom(*column_from, 0, 8); @@ -659,7 +659,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow7) TEST(ColumnDynamic, InsertRangeFromOverflow8) { - auto column_from = ColumnDynamic::create(3); + auto column_from = ColumnDynamic::create(2); column_from->insert(Field(42.42)); column_from->insert(Field("str1")); column_from->insert(Field(42)); @@ -669,7 +669,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow8) column_from->insert(Field("str2")); column_from->insert(Field(Array({Field(42)}))); - auto column_to = ColumnDynamic::create(3); + auto column_to = ColumnDynamic::create(2); column_to->insert(Field(42)); column_from->insert(Field("str1")); @@ -711,7 +711,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow9) column_from->insert(Field("str2")); column_from->insert(Field(Array({Field(42)}))); - auto column_to = ColumnDynamic::create(3); + auto column_to = ColumnDynamic::create(2); column_to->insert(Field(42)); column_to->insertRangeFrom(*column_from, 0, 9); @@ -743,7 +743,7 @@ TEST(ColumnDynamic, InsertRangeFromOverflow9) TEST(ColumnDynamic, SerializeDeserializeFromArena1) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); column->insert(Field(42)); column->insert(Field(42.42)); column->insert(Field("str")); @@ -768,7 +768,7 @@ TEST(ColumnDynamic, SerializeDeserializeFromArena1) TEST(ColumnDynamic, SerializeDeserializeFromArena2) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -781,7 +781,7 @@ TEST(ColumnDynamic, SerializeDeserializeFromArena2) column_from->serializeValueIntoArena(2, arena, pos); column_from->serializeValueIntoArena(3, arena, pos); - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); pos = column_to->deserializeAndInsertFromArena(ref1.data); pos = column_to->deserializeAndInsertFromArena(pos); pos = column_to->deserializeAndInsertFromArena(pos); @@ -800,7 +800,7 @@ TEST(ColumnDynamic, SerializeDeserializeFromArena2) TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -846,7 +846,7 @@ TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow2) column_from->serializeValueIntoArena(3, arena, pos); column_from->serializeValueIntoArena(4, arena, pos); - auto column_to = ColumnDynamic::create(3); + auto column_to = ColumnDynamic::create(2); column_to->insert(Field(42.42)); pos = column_to->deserializeAndInsertFromArena(ref1.data); pos = column_to->deserializeAndInsertFromArena(pos); @@ -882,7 +882,7 @@ TEST(ColumnDynamic, skipSerializedInArena) auto ref4 = column_from->serializeValueIntoArena(3, arena, pos); const char * end = ref4.data + ref4.size; - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); pos = column_to->skipSerializedInArena(ref1.data); pos = column_to->skipSerializedInArena(pos); pos = column_to->skipSerializedInArena(pos); From c26b3cb4452931ee3bb3355b47dafb364744c9ab Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 10 Aug 2024 02:27:23 +0000 Subject: [PATCH 2019/2361] handle the case of packed storage --- src/Storages/MergeTree/MutateTask.cpp | 11 ++++++++--- .../03161_lightweight_delete_projection.sql | 4 +++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 3d9f49c9a7a..0f0428287b6 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1554,6 +1554,10 @@ private: removed_projections.insert(command.column_name); } + bool lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); + bool lightweight_delete_drop = lightweight_delete_mode + && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP; + const auto & projections = ctx->metadata_snapshot->getProjections(); for (const auto & projection : projections) { @@ -1561,10 +1565,11 @@ private: continue; bool need_recalculate = - ctx->materialized_projections.contains(projection.name) + (ctx->materialized_projections.contains(projection.name) || (!is_full_part_storage && ctx->source_part->hasProjection(projection.name) - && !ctx->source_part->hasBrokenProjection(projection.name)); + && !ctx->source_part->hasBrokenProjection(projection.name))) + && !lightweight_delete_drop; if (need_recalculate) { @@ -1572,7 +1577,7 @@ private: } else { - if (!ctx->updated_header.has(RowExistsColumn::name) && ctx->source_part->checksums.has(projection.getDirectoryName())) + if (!lightweight_delete_mode && ctx->source_part->checksums.has(projection.getDirectoryName())) entries_to_hardlink.insert(projection.getDirectoryName()); } } diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index 0b05326e2c1..da6427cbf22 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,5 +1,7 @@ +-- For cloud version, should also consider min_bytes_for_full_part_storage since packed storage exists, +-- but for less redundancy, just let CI test the parameter. -SET max_threads = 1, lightweight_deletes_sync = 2, alter_sync = 2; +SET lightweight_deletes_sync = 2, alter_sync = 2; DROP TABLE IF EXISTS users_compact; From 9b9fff4232d80e579b1d23ced8bfbb1b2c5e2147 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Sat, 10 Aug 2024 08:48:08 +0200 Subject: [PATCH 2020/2361] Push CI From e582118544f3c49c3c6600ac8fa252151714d25f Mon Sep 17 00:00:00 2001 From: Blargian Date: Sat, 10 Aug 2024 13:09:40 +0200 Subject: [PATCH 2021/2361] review changes --- .../functions/type-conversion-functions.md | 916 +++++++++++++++--- 1 file changed, 801 insertions(+), 115 deletions(-) diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 8e72fea7fdb..5c06e72f977 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -64,9 +64,8 @@ toInt8(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -251,9 +250,8 @@ toInt8OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int8` is unsuccessful. [Int8](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -314,9 +312,8 @@ toInt16(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -501,9 +498,8 @@ toInt16OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int16` is unsuccessful. [Int16](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -564,9 +560,8 @@ toInt32(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -750,9 +745,8 @@ toInt32OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int32` is unsuccessful. [Int32](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -813,9 +807,8 @@ toInt64(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported types: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1000,9 +993,8 @@ toInt64OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int64` is unsuccessful. [Int64](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1063,9 +1055,8 @@ toInt128(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1312,9 +1303,8 @@ toInt256(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1498,9 +1488,8 @@ toInt256OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int256` is unsuccessful. [Int256](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf` @@ -1561,9 +1550,8 @@ toUInt8(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1748,9 +1736,8 @@ toUInt8OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `UInt8` is unsuccessful. [UInt8](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1811,9 +1798,8 @@ toUInt16(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1998,9 +1984,8 @@ toUInt16OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `UInt16` is unsuccessful. [UInt16](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2061,9 +2046,8 @@ toUInt32(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2249,9 +2233,8 @@ toUInt32OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `UInt32` is unsuccessful. [UInt32](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2312,9 +2295,8 @@ toUInt64(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported types: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2499,9 +2481,8 @@ toUInt64OrDefault(expr[, default]) - `defauult` (optional) — The default value to return if parsing to type `UInt64` is unsuccessful. [UInt64](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2562,9 +2543,8 @@ toUInt128(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2811,9 +2791,8 @@ toUInt256(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2997,9 +2976,8 @@ toUInt256OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `UInt256` is unsuccessful. [UInt256](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf` @@ -3555,7 +3533,7 @@ toDecimal32(expr, S) **Arguments** - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). -- `S` — Scale parameter from [ 1 : 9 ] specifying how many decimal digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `S` — Scale parameter between 0 and 9, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). Supported arguments: - Values or string representations of type (U)Int8/16/32/64/128/256. @@ -3566,7 +3544,7 @@ Unsupported arguments: - String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32('0xc0fe', 1);`. :::note -Integer overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. Excessive digits in a fraction are discarded (not rounded). Excessive digits in the integer part will lead to an exception. ::: @@ -3619,20 +3597,20 @@ toDecimal32OrZero(expr, S) **Arguments** - `expr` — A String representation of a number. [String](../data-types/string.md). -- `S` — Scale parameter from [ 1 : 9 ] specifying how many decimal digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `S` — Scale parameter between 0 and 9, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). Supported arguments: - String representations of type (U)Int8/16/32/64/128/256. - String representations of type Float32/64. Unsupported arguments: -- String representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of Float32/64 values `NaN` and `Inf`. - String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrZero('0xc0fe', 1);`. :::note -Integer overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. Excessive digits in a fraction are discarded (not rounded). -Excessive digits in the integer part will lead to an exception. +Excessive digits in the integer part will lead to an error. ::: **Returned value** @@ -3645,8 +3623,10 @@ Query: ``` sql SELECT - toDecimal32OrZero(toString(-1.111), 5) AS val, - toTypeName(val) + toDecimal32OrZero(toString(-1.111), 5) AS a, + toTypeName(a), + toDecimal32OrZero(toString('Inf'), 5) as b, + toTypeName(b) FORMAT Vertical; ``` @@ -3655,26 +3635,10 @@ Result: ```response Row 1: ────── -val: -1.111 -toTypeName(val): Decimal(9, 5) -``` - -Query: - -``` sql -SELECT - toDecimal32OrZero(toString(-1.111), 2) AS val, - toTypeName(val) -FORMAT Vertical; -``` - -Result: - -```response -Row 1: -────── -val: -1.11 -toTypeName(val): Decimal(9, 2) +a: -1.111 +toTypeName(a): Decimal(9, 5) +b: 0 +toTypeName(b): Decimal(9, 5) ``` **See also** @@ -3696,20 +3660,20 @@ toDecimal32OrNull(expr, S) **Arguments** - `expr` — A String representation of a number. [String](../data-types/string.md). -- `S` — Scale parameter from [ 1 : 9 ] specifying how many decimal digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `S` — Scale parameter between 0 and 9, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). Supported arguments: - String representations of type (U)Int8/16/32/64/128/256. - String representations of type Float32/64. Unsupported arguments: -- String representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of Float32/64 values `NaN` and `Inf`. - String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrNull('0xc0fe', 1);`. :::note -Integer overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. Excessive digits in a fraction are discarded (not rounded). -Excessive digits in the integer part will lead to an exception. +Excessive digits in the integer part will lead to an error. ::: **Returned value** @@ -3722,8 +3686,10 @@ Query: ``` sql SELECT - toDecimal32OrNull(toString(-1.111), 5) AS val, - toTypeName(val) + toDecimal32OrNull(toString(-1.111), 5) AS a, + toTypeName(a), + toDecimal32OrNull(toString('Inf'), 5) as b, + toTypeName(b) FORMAT Vertical; ``` @@ -3732,26 +3698,10 @@ Result: ```response Row 1: ────── -val: -1.111 -toTypeName(val): Nullable(Decimal(9, 5)) -``` - -Query: - -``` sql -SELECT - toDecimal32OrNull(toString(-1.111), 2) AS val, - toTypeName(val) -FORMAT Vertical; -``` - -Result: - -```response -Row 1: -────── -val: -1.11 -toTypeName(val): Nullable(Decimal(9, 2)) +a: -1.111 +toTypeName(a): Nullable(Decimal(9, 5)) +b: á´ºáµá´¸á´¸ +toTypeName(b): Nullable(Decimal(9, 5)) ``` **See also** @@ -3773,21 +3723,21 @@ toDecimal32OrDefault(expr, S[, default]) **Arguments** - `expr` — A String representation of a number. [String](../data-types/string.md). -- `S` — Scale parameter from [ 1 : 9 ] specifying how many decimal digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). -- `default` (optional) — The default value to return if parsing to type `Decimal32(S)` is unsuccessful. [Decimal32(S](../data-types/decimal.md). +- `S` — Scale parameter between 0 and 9, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal32(S)` is unsuccessful. [Decimal32(S)](../data-types/decimal.md). Supported arguments: - String representations of type (U)Int8/16/32/64/128/256. - String representations of type Float32/64. Unsupported arguments: -- String representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of Float32/64 values `NaN` and `Inf`. - String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrDefault('0xc0fe', 1);`. :::note -Integer overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. Excessive digits in a fraction are discarded (not rounded). -Excessive digits in the integer part will lead to an exception. +Excessive digits in the integer part will lead to an error. ::: **Returned value** @@ -3800,8 +3750,10 @@ Query: ``` sql SELECT - toDecimal32OrDefault(toString(-1.111), 5) AS val, - toTypeName(val) + toDecimal32OrDefault(toString(0.0001), 5) AS a, + toTypeName(a), + toDecimal32OrDefault('Inf', 0, CAST('-1', 'Decimal32(0)')) AS b, + toTypeName(b) FORMAT Vertical; ``` @@ -3810,16 +3762,125 @@ Result: ```response Row 1: ────── -val: -1.111 -toTypeName(val): Decimal(9, 5) +a: 0.0001 +toTypeName(a): Decimal(9, 5) +b: -1 +toTypeName(b): Decimal(9, 0) ``` +**See also** + +- [`toDecimal32`](#todecimal32). +- [`toDecimal32OrZero`](#todecimal32orzero). +- [`toDecimal32OrNull`](#todecimal32ornull). + +## toDecimal64 + +Converts an input value to a value of type [`Decimal(18, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. + +**Syntax** + +```sql +toDecimal64(expr, S) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `S` — Scale parameter between 0 and 18, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values or string representations of type (U)Int8/16/32/64/128/256. +- Values or string representations of type Float32/64. + +Unsupported arguments: +- Values or string representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal64('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal64`: `( -1 * 10^(18 - S), 1 * 10^(18 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: + +**Returned value** + +- Value of type `Decimal(18, S)`. [Decimal64(S)](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toDecimal64(2, 1) AS a, toTypeName(a) AS type_a, + toDecimal64(4.2, 2) AS b, toTypeName(b) AS type_b, + toDecimal64('4.2', 3) AS c, toTypeName(c) AS type_c +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 2 +type_a: Decimal(18, 1) +b: 4.2 +type_b: Decimal(18, 2) +c: 4.2 +type_c: Decimal(18, 3) +``` + +**See also** + +- [`toDecimal64OrZero`](#todecimal64orzero). +- [`toDecimal64OrNull`](#todecimal64ornull). +- [`toDecimal64OrDefault`](#todecimal64ordefault). + +## toDecimal64OrZero + +Like [`toDecimal64`](#todecimal64), this function converts an input value to a value of type [Decimal(18, S)](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal64OrZero(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 18, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal64OrZero('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal64`: `( -1 * 10^(18 - S), 1 * 10^(18 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(18, S)` if successful, otherwise `0` with `S` decimal places. [Decimal64(S)](../data-types/decimal.md). + +**Example** + Query: ``` sql SELECT - toDecimal32OrDefault(toString(-1.111), 2) AS val, - toTypeName(val) + toDecimal64OrZero(toString(0.0001), 18) AS a, + toTypeName(a), + toDecimal64OrZero(toString('Inf'), 18) as b, + toTypeName(b) FORMAT Vertical; ``` @@ -3828,16 +3889,61 @@ Result: ```response Row 1: ────── -val: -1.11 -toTypeName(val): Decimal(9, 2) +a: 0.0001 +toTypeName(a): Decimal(18, 18) +b: 0 +toTypeName(b): Decimal(18, 18) ``` +**See also** + +- [`toDecimal64`](#todecimal64). +- [`toDecimal64OrNull`](#todecimal64ornull). +- [`toDecimal64OrDefault`](#todecimal64ordefault). + +## toDecimal64OrNull + +Like [`toDecimal64`](#todecimal64), this function converts an input value to a value of type [Nullable(Decimal(18, S))](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal64OrNull(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 18, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal64OrNull('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal64`: `( -1 * 10^(18 - S), 1 * 10^(18 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Nullable(Decimal(18, S))` if successful, otherwise value `NULL` of the same type. [Decimal64(S)](../data-types/decimal.md). + +**Examples** + Query: ``` sql SELECT - toDecimal32OrDefault('Inf', 2, CAST('0', 'Decimal32(2)')) AS val, - toTypeName(val) + toDecimal64OrNull(toString(0.0001), 18) AS a, + toTypeName(a), + toDecimal64OrNull(toString('Inf'), 18) as b, + toTypeName(b) FORMAT Vertical; ``` @@ -3846,10 +3952,590 @@ Result: ```response Row 1: ────── -val: 0 -toTypeName(val): Decimal(9, 2) +a: 0.0001 +toTypeName(a): Nullable(Decimal(18, 18)) +b: á´ºáµá´¸á´¸ +toTypeName(b): Nullable(Decimal(18, 18)) ``` +**See also** + +- [`toDecimal64`](#todecimal64). +- [`toDecimal64OrZero`](#todecimal64orzero). +- [`toDecimal64OrDefault`](#todecimal64ordefault). + +## toDecimal64OrDefault + +Like [`toDecimal64`](#todecimal64), this function converts an input value to a value of type [Decimal(18, S)](../data-types/decimal.md) but returns the default value in case of an error. + +**Syntax** + +```sql +toDecimal64OrDefault(expr, S[, default]) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 18, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal64(S)` is unsuccessful. [Decimal64(S)](../data-types/decimal.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal64OrDefault('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal64`: `( -1 * 10^(18 - S), 1 * 10^(18 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(18, S)` if successful, otherwise returns the default value if passed or `0` if not. [Decimal64(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal64OrDefault(toString(0.0001), 18) AS a, + toTypeName(a), + toDecimal64OrDefault('Inf', 0, CAST('-1', 'Decimal64(0)')) AS b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Decimal(18, 18) +b: -1 +toTypeName(b): Decimal(18, 0) +``` + +**See also** + +- [`toDecimal64`](#todecimal64). +- [`toDecimal64OrZero`](#todecimal64orzero). +- [`toDecimal64OrNull`](#todecimal64ornull). + +## toDecimal128 + +Converts an input value to a value of type [`Decimal(38, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. + +**Syntax** + +```sql +toDecimal128(expr, S) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `S` — Scale parameter between 0 and 38, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values or string representations of type (U)Int8/16/32/64/128/256. +- Values or string representations of type Float32/64. + +Unsupported arguments: +- Values or string representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal128('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal128`: `( -1 * 10^(38 - S), 1 * 10^(38 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: + +**Returned value** + +- Value of type `Decimal(38, S)`. [Decimal128(S)](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toDecimal128(99, 1) AS a, toTypeName(a) AS type_a, + toDecimal128(99.67, 2) AS b, toTypeName(b) AS type_b, + toDecimal128('99.67', 3) AS c, toTypeName(c) AS type_c +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 99 +type_a: Decimal(38, 1) +b: 99.67 +type_b: Decimal(38, 2) +c: 99.67 +type_c: Decimal(38, 3) +``` + +**See also** + +- [`toDecimal128OrZero`](#todecimal128orzero). +- [`toDecimal128OrNull`](#todecimal128ornull). +- [`toDecimal128OrDefault`](#todecimal128ordefault). + +## toDecimal128OrZero + +Like [`toDecimal128`](#todecimal128), this function converts an input value to a value of type [Decimal(38, S)](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal128OrZero(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 38, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal128OrZero('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal128`: `( -1 * 10^(38 - S), 1 * 10^(38 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(38, S)` if successful, otherwise `0` with `S` decimal places. [Decimal128(S)](../data-types/decimal.md). + +**Example** + +Query: + +``` sql +SELECT + toDecimal128OrZero(toString(0.0001), 38) AS a, + toTypeName(a), + toDecimal128OrZero(toString('Inf'), 38) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Decimal(38, 38) +b: 0 +toTypeName(b): Decimal(38, 38) +``` + +**See also** + +- [`toDecimal128`](#todecimal128). +- [`toDecimal128OrNull`](#todecimal128ornull). +- [`toDecimal128OrDefault`](#todecimal128ordefault). + +## toDecimal128OrNull + +Like [`toDecimal128`](#todecimal128), this function converts an input value to a value of type [Nullable(Decimal(38, S))](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal128OrNull(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 38, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal128OrNull('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal128`: `( -1 * 10^(38 - S), 1 * 10^(38 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Nullable(Decimal(38, S))` if successful, otherwise value `NULL` of the same type. [Decimal128(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal128OrNull(toString(1/42), 38) AS a, + toTypeName(a), + toDecimal128OrNull(toString('Inf'), 38) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.023809523809523808 +toTypeName(a): Nullable(Decimal(38, 38)) +b: á´ºáµá´¸á´¸ +toTypeName(b): Nullable(Decimal(38, 38)) +``` + +**See also** + +- [`toDecimal128`](#todecimal128). +- [`toDecimal128OrZero`](#todecimal128orzero). +- [`toDecimal128OrDefault`](#todecimal128ordefault). + +## toDecimal128OrDefault + +Like [`toDecimal128`](#todecimal128), this function converts an input value to a value of type [Decimal(38, S)](../data-types/decimal.md) but returns the default value in case of an error. + +**Syntax** + +```sql +toDecimal128OrDefault(expr, S[, default]) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 38, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal128(S)` is unsuccessful. [Decimal128(S)](../data-types/decimal.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal128OrDefault('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal128`: `( -1 * 10^(38 - S), 1 * 10^(38 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(38, S)` if successful, otherwise returns the default value if passed or `0` if not. [Decimal128(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal128OrDefault(toString(1/42), 18) AS a, + toTypeName(a), + toDecimal128OrDefault('Inf', 0, CAST('-1', 'Decimal128(0)')) AS b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.023809523809523808 +toTypeName(a): Decimal(38, 18) +b: -1 +toTypeName(b): Decimal(38, 0) +``` + +**See also** + +- [`toDecimal128`](#todecimal128). +- [`toDecimal128OrZero`](#todecimal128orzero). +- [`toDecimal128OrNull`](#todecimal128ornull). + +## toDecimal256 + +Converts an input value to a value of type [`Decimal(76, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. + +**Syntax** + +```sql +toDecimal256(expr, S) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `S` — Scale parameter between 0 and 76, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values or string representations of type (U)Int8/16/32/64/128/256. +- Values or string representations of type Float32/64. + +Unsupported arguments: +- Values or string representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal256('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal256`: `( -1 * 10^(76 - S), 1 * 10^(76 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: + +**Returned value** + +- Value of type `Decimal(76, S)`. [Decimal256(S)](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toDecimal256(99, 1) AS a, toTypeName(a) AS type_a, + toDecimal256(99.67, 2) AS b, toTypeName(b) AS type_b, + toDecimal256('99.67', 3) AS c, toTypeName(c) AS type_c +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 99 +type_a: Decimal(76, 1) +b: 99.67 +type_b: Decimal(76, 2) +c: 99.67 +type_c: Decimal(76, 3) +``` + +**See also** + +- [`toDecimal256OrZero`](#todecimal256orzero). +- [`toDecimal256OrNull`](#todecimal256ornull). +- [`toDecimal256OrDefault`](#todecimal256ordefault). + +## toDecimal256OrZero + +Like [`toDecimal256`](#todecimal256), this function converts an input value to a value of type [Decimal(76, S)](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal256OrZero(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 76, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal256OrZero('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal256`: `( -1 * 10^(76 - S), 1 * 10^(76 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(76, S)` if successful, otherwise `0` with `S` decimal places. [Decimal256(S)](../data-types/decimal.md). + +**Example** + +Query: + +``` sql +SELECT + toDecimal256OrZero(toString(0.0001), 76) AS a, + toTypeName(a), + toDecimal256OrZero(toString('Inf'), 76) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Decimal(76, 76) +b: 0 +toTypeName(b): Decimal(76, 76) +``` + +**See also** + +- [`toDecimal256`](#todecimal256). +- [`toDecimal256OrNull`](#todecimal256ornull). +- [`toDecimal256OrDefault`](#todecimal256ordefault). + +## toDecimal256OrNull + +Like [`toDecimal256`](#todecimal256), this function converts an input value to a value of type [Nullable(Decimal(76, S))](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal256OrNull(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 76, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal256OrNull('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal256`: `( -1 * 10^(76 - S), 1 * 10^(76 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Nullable(Decimal(76, S))` if successful, otherwise value `NULL` of the same type. [Decimal256(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal256OrNull(toString(1/42), 76) AS a, + toTypeName(a), + toDecimal256OrNull(toString('Inf'), 76) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.023809523809523808 +toTypeName(a): Nullable(Decimal(76, 76)) +b: á´ºáµá´¸á´¸ +toTypeName(b): Nullable(Decimal(76, 76)) +``` + +**See also** + +- [`toDecimal256`](#todecimal256). +- [`toDecimal256OrZero`](#todecimal256orzero). +- [`toDecimal256OrDefault`](#todecimal256ordefault). + +## toDecimal256OrDefault + +Like [`toDecimal256`](#todecimal256), this function converts an input value to a value of type [Decimal(76, S)](../data-types/decimal.md) but returns the default value in case of an error. + +**Syntax** + +```sql +toDecimal256OrDefault(expr, S[, default]) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 76, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal256(S)` is unsuccessful. [Decimal256(S)](../data-types/decimal.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal256OrDefault('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal256`: `( -1 * 10^(76 - S), 1 * 10^(76 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(76, S)` if successful, otherwise returns the default value if passed or `0` if not. [Decimal256(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal256OrDefault(toString(1/42), 76) AS a, + toTypeName(a), + toDecimal256OrDefault('Inf', 0, CAST('-1', 'Decimal256(0)')) AS b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.023809523809523808 +toTypeName(a): Decimal(76, 76) +b: -1 +toTypeName(b): Decimal(76, 0) +``` + +**See also** + +- [`toDecimal256`](#todecimal256). +- [`toDecimal256OrZero`](#todecimal256orzero). +- [`toDecimal256OrNull`](#todecimal256ornull). + ## toString Functions for converting between numbers, strings (but not fixed strings), dates, and dates with times. From d243feea2136bbfa5f1e943f64e5ebd851f2b103 Mon Sep 17 00:00:00 2001 From: avogar Date: Sat, 10 Aug 2024 11:21:28 +0000 Subject: [PATCH 2022/2361] Fix special builds --- src/Columns/ColumnDynamic.cpp | 8 ++++---- src/Columns/ColumnDynamic.h | 2 +- src/Columns/ColumnVariant.cpp | 2 +- src/Columns/ColumnVariant.h | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index 454f7956f48..7246be29592 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -325,7 +325,7 @@ void ColumnDynamic::doInsertFrom(const IColumn & src_, size_t n) /// Check if we insert from shared variant and process it separately. if (src_global_discr == dynamic_src.getSharedVariantDiscriminator()) { - auto & src_shared_variant = dynamic_src.getSharedVariant(); + const auto & src_shared_variant = dynamic_src.getSharedVariant(); auto value = src_shared_variant.getDataAt(src_offset); /// Decode data type of this value. ReadBufferFromMemory buf(value.data, value.size); @@ -469,9 +469,9 @@ void ColumnDynamic::doInsertRangeFrom(const IColumn & src_, size_t start, size_t if (variant_info.variant_names.size() - 1 == max_dynamic_types) { auto shared_variant_discr = getSharedVariantDiscriminator(); - for (size_t i = 0; i != dynamic_src.variant_info.variant_names.size(); ++i) + for (const auto & variant_name : dynamic_src.variant_info.variant_names) { - auto it = variant_info.variant_name_to_discriminator.find(dynamic_src.variant_info.variant_names[i]); + auto it = variant_info.variant_name_to_discriminator.find(variant_name); if (it == variant_info.variant_name_to_discriminator.end()) other_to_new_discriminators.push_back(shared_variant_discr); else @@ -618,7 +618,7 @@ void ColumnDynamic::doInsertManyFrom(const IColumn & src_, size_t position, size /// Check if we insert from shared variant and process it separately. if (src_global_discr == dynamic_src.getSharedVariantDiscriminator()) { - auto & src_shared_variant = dynamic_src.getSharedVariant(); + const auto & src_shared_variant = dynamic_src.getSharedVariant(); auto value = src_shared_variant.getDataAt(src_offset); /// Decode data type of this value. ReadBufferFromMemory buf(value.data, value.size); diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 8b815e2b015..a595a990964 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -37,7 +37,7 @@ public: MERGE, /// Statistics were calculated during merge of several MergeTree parts. }; - Statistics(Source source_) : source(source_) {} + explicit Statistics(Source source_) : source(source_) {} /// Source of the statistics. Source source; diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index 7531e976926..0402e1a0690 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -476,7 +476,7 @@ void ColumnVariant::insertFromImpl(const DB::IColumn & src_, size_t n, const std } } -void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping, Discriminator * skip_discriminator) +void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping, const Discriminator * skip_discriminator) { const size_t num_variants = variants.size(); const auto & src = assert_cast(src_); diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index 571a843d113..7c8093e385d 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -329,7 +329,7 @@ public: private: void insertFromImpl(const IColumn & src_, size_t n, const std::vector * global_discriminators_mapping); - void insertRangeFromImpl(const IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping, Discriminator * skip_discriminator); + void insertRangeFromImpl(const IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping, const Discriminator * skip_discriminator); void insertManyFromImpl(const IColumn & src_, size_t position, size_t length, const std::vector * global_discriminators_mapping); void initIdentityGlobalToLocalDiscriminatorsMapping(); From c1b5b908ba13f863de4e9621d3fe2c8139758650 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 10 Aug 2024 13:01:55 +0000 Subject: [PATCH 2023/2361] hide Field::get in private, only use Field::safeGet --- programs/client/Client.cpp | 2 +- programs/keeper-client/Commands.cpp | 6 +- .../AggregateFunctionGroupArray.cpp | 12 +- .../AggregateFunctionGroupArrayIntersect.cpp | 6 +- .../AggregateFunctionGroupArrayMoving.cpp | 6 +- .../AggregateFunctionGroupArraySorted.cpp | 6 +- .../AggregateFunctionGroupConcat.cpp | 10 +- .../AggregateFunctionGroupUniqArray.cpp | 6 +- ...AggregateFunctionKolmogorovSmirnovTest.cpp | 4 +- ...ateFunctionLargestTriangleThreeBuckets.cpp | 2 +- .../AggregateFunctionMannWhitney.cpp | 4 +- .../AggregateFunctionQuantile.h | 6 +- .../AggregateFunctionSumMap.cpp | 10 +- .../Passes/ComparisonTupleEliminationPass.cpp | 2 +- .../Passes/ConvertOrLikeChainPass.cpp | 2 +- .../Passes/FunctionToSubcolumnsPass.cpp | 6 +- src/Analyzer/Passes/FuseFunctionsPass.cpp | 2 +- .../Passes/IfTransformStringsToEnumPass.cpp | 10 +- .../Passes/NormalizeCountVariantsPass.cpp | 2 +- ...ateOrDateTimeConverterWithPreimagePass.cpp | 8 +- .../RewriteAggregateFunctionWithIfPass.cpp | 4 +- src/Analyzer/Passes/SumIfToCountIfPass.cpp | 6 +- src/Analyzer/QueryTreeBuilder.cpp | 2 +- src/Analyzer/Resolve/IdentifierResolver.cpp | 8 +- src/Analyzer/Resolve/QueryAnalyzer.cpp | 4 +- src/Analyzer/SetUtils.cpp | 8 +- src/Backups/BackupSettings.cpp | 4 +- src/Backups/DDLAdjustingForBackupVisitor.cpp | 4 +- src/Backups/RestoreSettings.cpp | 8 +- src/Backups/SettingsFieldOptionalString.cpp | 2 +- src/Backups/SettingsFieldOptionalUUID.cpp | 2 +- src/Client/QueryFuzzer.cpp | 20 +-- src/Client/Suggest.cpp | 2 +- src/Columns/ColumnAggregateFunction.cpp | 19 +-- src/Columns/ColumnArray.cpp | 6 +- src/Columns/ColumnDecimal.h | 2 +- src/Columns/ColumnFixedString.cpp | 4 +- src/Columns/ColumnMap.cpp | 10 +- src/Columns/ColumnObject.cpp | 4 +- src/Columns/ColumnString.h | 2 +- src/Columns/ColumnTuple.cpp | 6 +- src/Columns/ColumnVector.h | 4 +- src/Columns/tests/gtest_column_variant.cpp | 120 +++++++++--------- src/Columns/tests/gtest_low_cardinality.cpp | 6 +- src/Common/CollectionOfDerived.h | 2 +- src/Common/FieldBinaryEncoding.cpp | 2 +- src/Common/FieldVisitorSum.cpp | 2 +- src/Common/FieldVisitorSum.h | 2 +- src/Common/FieldVisitorToString.cpp | 2 +- src/Common/HashTable/HashMap.h | 2 +- src/Common/examples/arena_with_free_lists.cpp | 22 ++-- src/Core/ExternalTable.cpp | 2 +- src/Core/Field.h | 71 ++++------- src/Core/Range.cpp | 16 +-- src/Core/Settings.cpp | 2 +- src/Core/SettingsFields.cpp | 14 +- src/Core/SettingsQuirks.cpp | 6 +- src/Core/examples/field.cpp | 2 +- src/Core/tests/gtest_field.cpp | 28 ++-- src/DataTypes/DataTypeAggregateFunction.cpp | 4 +- src/DataTypes/DataTypeDynamic.cpp | 4 +- src/DataTypes/DataTypeEnum.cpp | 18 +-- src/DataTypes/DataTypeFixedString.cpp | 4 +- src/DataTypes/DataTypeObject.cpp | 2 +- src/DataTypes/DataTypesDecimal.cpp | 6 +- src/DataTypes/ObjectUtils.cpp | 2 +- .../Serializations/JSONDataParser.cpp | 4 +- .../SerializationAggregateFunction.cpp | 4 +- .../Serializations/SerializationArray.cpp | 4 +- .../SerializationDecimalBase.cpp | 2 +- .../SerializationFixedString.cpp | 4 +- .../SerializationIPv4andIPv6.cpp | 2 +- .../Serializations/SerializationMap.cpp | 4 +- .../Serializations/SerializationNumber.cpp | 2 +- .../Serializations/SerializationString.cpp | 4 +- .../Serializations/SerializationTuple.cpp | 4 +- .../Serializations/SerializationUUID.cpp | 2 +- src/DataTypes/registerDataTypeDateTime.cpp | 2 +- src/Databases/DDLLoadingDependencyVisitor.cpp | 4 +- src/Databases/DDLRenamingVisitor.cpp | 6 +- src/Databases/DatabaseReplicated.cpp | 4 +- .../MySQL/MaterializedMySQLSyncThread.cpp | 18 +-- .../MySQL/tests/gtest_mysql_binlog.cpp | 30 ++--- src/Dictionaries/CacheDictionaryStorage.h | 12 +- src/Dictionaries/DictionaryHelpers.h | 4 +- src/Dictionaries/FlatDictionary.cpp | 6 +- src/Dictionaries/HashedArrayDictionary.cpp | 8 +- src/Dictionaries/HashedDictionary.h | 8 +- .../HierarchyDictionariesUtils.cpp | 6 +- src/Dictionaries/IPAddressDictionary.cpp | 8 +- src/Dictionaries/MongoDBDictionarySource.cpp | 3 +- src/Dictionaries/PolygonDictionary.cpp | 10 +- src/Dictionaries/RangeHashedDictionary.h | 4 +- src/Dictionaries/RedisDictionarySource.cpp | 3 +- src/Disks/getOrCreateDiskFromAST.cpp | 2 +- src/Functions/DateTimeTransforms.h | 4 +- src/Functions/FunctionsConsistentHashing.h | 4 +- src/Functions/FunctionsConversion.cpp | 30 ++--- src/Functions/FunctionsJSON.cpp | 2 +- src/Functions/FunctionsLogical.cpp | 6 +- src/Functions/FunctionsRound.h | 6 +- src/Functions/IFunctionCustomWeek.h | 8 +- src/Functions/IFunctionDateOrDateTime.h | 16 +-- .../JSONPath/Parsers/ParserJSONPathRange.cpp | 4 +- src/Functions/MultiMatchAllIndicesImpl.h | 2 +- src/Functions/MultiMatchAnyImpl.h | 2 +- src/Functions/MultiSearchAllPositionsImpl.h | 2 +- src/Functions/MultiSearchFirstIndexImpl.h | 2 +- src/Functions/MultiSearchFirstPositionImpl.h | 2 +- src/Functions/MultiSearchImpl.h | 2 +- src/Functions/URL/cutURLParameter.cpp | 2 +- src/Functions/array/arrayElement.cpp | 38 +++--- src/Functions/array/mapOp.cpp | 2 +- src/Functions/getClientHTTPHeader.cpp | 2 +- src/Functions/multiIf.cpp | 2 +- src/Functions/nested.cpp | 4 +- src/IO/S3Common.cpp | 8 +- src/Interpreters/ActionsVisitor.cpp | 12 +- src/Interpreters/AddDefaultDatabaseVisitor.h | 2 +- .../ComparisonTupleEliminationVisitor.cpp | 2 +- .../ConvertFunctionOrLikeVisitor.cpp | 4 +- .../ConvertStringsToEnumVisitor.cpp | 12 +- src/Interpreters/DDLTask.cpp | 2 +- src/Interpreters/InterpreterCreateQuery.cpp | 4 +- src/Interpreters/InterpreterExplainQuery.cpp | 4 +- .../InterpreterKillQueryQuery.cpp | 2 +- src/Interpreters/InterpreterSelectQuery.cpp | 2 +- src/Interpreters/MutationsInterpreter.cpp | 2 +- ...OrDateTimeConverterWithPreimageVisitor.cpp | 8 +- ...OptimizeIfWithConstantConditionVisitor.cpp | 4 +- .../OptimizeShardingKeyRewriteInVisitor.cpp | 21 ++- .../RewriteCountVariantsVisitor.cpp | 2 +- src/Interpreters/TreeOptimizer.cpp | 2 +- src/Interpreters/WindowDescription.cpp | 8 +- src/Interpreters/convertFieldToType.cpp | 62 ++++----- .../evaluateConstantExpression.cpp | 4 +- .../replaceForPositionalArguments.cpp | 4 +- .../tests/gtest_comparison_graph.cpp | 4 +- src/Parsers/ASTFunction.cpp | 2 +- src/Parsers/ASTLiteral.cpp | 8 +- src/Parsers/Access/ParserCreateQuotaQuery.cpp | 2 +- src/Parsers/ExpressionElementParsers.cpp | 10 +- src/Parsers/ParserAlterQuery.cpp | 12 +- src/Parsers/ParserCheckQuery.cpp | 2 +- src/Parsers/ParserCreateQuery.cpp | 4 +- src/Parsers/ParserDictionary.cpp | 6 +- src/Parsers/ParserPartition.cpp | 2 +- src/Parsers/ParserSystemQuery.cpp | 8 +- src/Parsers/ParserUndropQuery.cpp | 2 +- src/Parsers/tests/gtest_dictionary_parser.cpp | 40 +++--- .../Formats/Impl/CHColumnToArrowColumn.cpp | 2 +- .../Impl/ConstantExpressionTemplate.cpp | 6 +- .../Impl/NativeORCBlockInputFormat.cpp | 4 +- .../Impl/PrometheusTextOutputFormat.cpp | 4 +- .../Formats/Impl/ValuesBlockInputFormat.cpp | 8 +- .../Algorithms/SummingSortedAlgorithm.cpp | 12 +- .../QueryPlan/ReadFromSystemNumbersStep.cpp | 10 +- src/Processors/Sources/MySQLSource.cpp | 4 +- .../Transforms/FillingTransform.cpp | 14 +- src/Processors/Transforms/WindowTransform.cpp | 20 +-- src/Storages/AlterCommands.cpp | 8 +- src/Storages/ColumnsDescription.cpp | 2 +- ...pproximateNearestNeighborIndexesCommon.cpp | 12 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 8 +- src/Storages/MergeTree/KeyCondition.cpp | 12 +- src/Storages/MergeTree/MergeTreeData.cpp | 8 +- .../MergeTree/MergeTreeDataWriter.cpp | 4 +- .../MergeTree/MergeTreeIndexAnnoy.cpp | 6 +- .../MergeTree/MergeTreeIndexBloomFilter.cpp | 12 +- .../MergeTreeIndexBloomFilterText.cpp | 50 ++++---- .../MergeTree/MergeTreeIndexFullText.cpp | 42 +++--- .../MergeTree/MergeTreeIndexHypothesis.cpp | 2 +- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 4 +- .../MergeTree/MergeTreeIndexUSearch.cpp | 8 +- src/Storages/MergeTree/MergeTreePartition.cpp | 2 +- .../MergeTree/MergeTreeWhereOptimizer.cpp | 6 +- .../MergeTree/registerStorageMergeTree.cpp | 2 +- src/Storages/Statistics/Statistics.cpp | 16 +-- .../Statistics/StatisticsCountMinSketch.cpp | 2 +- src/Storages/StorageFactory.cpp | 2 +- src/Storages/StorageFile.cpp | 6 +- src/Storages/StorageFuzzJSON.cpp | 2 +- src/Storages/StorageJoin.cpp | 4 +- src/Storages/StoragePostgreSQL.cpp | 4 +- src/Storages/System/StorageSystemColumns.cpp | 6 +- .../System/StorageSystemPartsBase.cpp | 2 +- src/Storages/System/StorageSystemPartsBase.h | 10 +- src/Storages/VirtualColumnUtils.h | 2 +- src/Storages/getStructureOfRemoteTable.cpp | 12 +- src/TableFunctions/TableFunctionExplain.cpp | 4 +- src/TableFunctions/TableFunctionFile.cpp | 2 +- .../TableFunctionMergeTreeIndex.cpp | 4 +- 192 files changed, 749 insertions(+), 774 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 1d99d223ee9..25c94c56aa6 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -223,7 +223,7 @@ std::vector Client::loadWarningMessages() size_t rows = packet.block.rows(); for (size_t i = 0; i < rows; ++i) - messages.emplace_back(column[i].get()); + messages.emplace_back(column[i].safeGet()); } continue; diff --git a/programs/keeper-client/Commands.cpp b/programs/keeper-client/Commands.cpp index df9da8e9613..7226bd82df7 100644 --- a/programs/keeper-client/Commands.cpp +++ b/programs/keeper-client/Commands.cpp @@ -95,7 +95,7 @@ void SetCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) co client->zookeeper->set( client->getAbsolutePath(query->args[0].safeGet()), query->args[1].safeGet(), - static_cast(query->args[2].get())); + static_cast(query->args[2].safeGet())); } bool CreateCommand::parse(IParser::Pos & pos, std::shared_ptr & node, Expected & expected) const @@ -494,7 +494,7 @@ void RMCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) con { Int32 version{-1}; if (query->args.size() == 2) - version = static_cast(query->args[1].get()); + version = static_cast(query->args[1].safeGet()); client->zookeeper->remove(client->getAbsolutePath(query->args[0].safeGet()), version); } @@ -549,7 +549,7 @@ void ReconfigCommand::execute(const DB::ASTKeeperQuery * query, DB::KeeperClient String leaving; String new_members; - auto operation = query->args[0].get(); + auto operation = query->args[0].safeGet(); switch (operation) { case static_cast(ReconfigCommand::Operation::ADD): diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index 7034e6373b1..5cc9f725b46 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -780,12 +780,12 @@ AggregateFunctionPtr createAggregateFunctionGroupArray( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - if ((type == Field::Types::Int64 && parameters[0].get() < 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) + if ((type == Field::Types::Int64 && parameters[0].safeGet() < 0) || + (type == Field::Types::UInt64 && parameters[0].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); has_limit = true; - max_elems = parameters[0].get(); + max_elems = parameters[0].safeGet(); } else throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, @@ -816,11 +816,11 @@ AggregateFunctionPtr createAggregateFunctionGroupArraySample( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - if ((type == Field::Types::Int64 && parameters[i].get() < 0) || - (type == Field::Types::UInt64 && parameters[i].get() == 0)) + if ((type == Field::Types::Int64 && parameters[i].safeGet() < 0) || + (type == Field::Types::UInt64 && parameters[i].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - return parameters[i].get(); + return parameters[i].safeGet(); }; UInt64 max_elems = get_parameter(0); diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp index 38f2fcb9fb9..36d00b1d9ec 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp @@ -83,16 +83,16 @@ public: if (version == 1) { for (size_t i = 0; i < arr_size; ++i) - set.insert(static_cast((*data_column)[offset + i].get())); + set.insert(static_cast((*data_column)[offset + i].safeGet())); } else if (!set.empty()) { typename State::Set new_set; for (size_t i = 0; i < arr_size; ++i) { - typename State::Set::LookupResult set_value = set.find(static_cast((*data_column)[offset + i].get())); + typename State::Set::LookupResult set_value = set.find(static_cast((*data_column)[offset + i].safeGet())); if (set_value != nullptr) - new_set.insert(static_cast((*data_column)[offset + i].get())); + new_set.insert(static_cast((*data_column)[offset + i].safeGet())); } set = std::move(new_set); } diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp index 026b8d1956f..2c3ac7f883e 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp @@ -269,12 +269,12 @@ AggregateFunctionPtr createAggregateFunctionMoving( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive integer", name); - if ((type == Field::Types::Int64 && parameters[0].get() <= 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) + if ((type == Field::Types::Int64 && parameters[0].safeGet() <= 0) || + (type == Field::Types::UInt64 && parameters[0].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive integer", name); limit_size = true; - max_elems = parameters[0].get(); + max_elems = parameters[0].safeGet(); } else throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, diff --git a/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp b/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp index d41d743e17a..27043ed6aa6 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp @@ -397,11 +397,11 @@ AggregateFunctionPtr createAggregateFunctionGroupArray( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - if ((type == Field::Types::Int64 && parameters[0].get() < 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) + if ((type == Field::Types::Int64 && parameters[0].safeGet() < 0) || + (type == Field::Types::UInt64 && parameters[0].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - max_elems = parameters[0].get(); + max_elems = parameters[0].safeGet(); } else throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, diff --git a/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp b/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp index 5494ef74705..636ac80e350 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp @@ -247,7 +247,7 @@ AggregateFunctionPtr createAggregateFunctionGroupConcat( if (type != Field::Types::String) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First parameter for aggregate function {} should be string", name); - delimiter = parameters[0].get(); + delimiter = parameters[0].safeGet(); } if (parameters.size() == 2) { @@ -256,12 +256,12 @@ AggregateFunctionPtr createAggregateFunctionGroupConcat( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second parameter for aggregate function {} should be a positive number", name); - if ((type == Field::Types::Int64 && parameters[1].get() <= 0) || - (type == Field::Types::UInt64 && parameters[1].get() == 0)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second parameter for aggregate function {} should be a positive number, got: {}", name, parameters[1].get()); + if ((type == Field::Types::Int64 && parameters[1].safeGet() <= 0) || + (type == Field::Types::UInt64 && parameters[1].safeGet() == 0)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second parameter for aggregate function {} should be a positive number, got: {}", name, parameters[1].safeGet()); has_limit = true; - limit = parameters[1].get(); + limit = parameters[1].safeGet(); } if (has_limit) diff --git a/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp index 7b4300b3568..5cbf449c946 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp @@ -323,12 +323,12 @@ AggregateFunctionPtr createAggregateFunctionGroupUniqArray( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - if ((type == Field::Types::Int64 && parameters[0].get() < 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) + if ((type == Field::Types::Int64 && parameters[0].safeGet() < 0) || + (type == Field::Types::UInt64 && parameters[0].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); limit_size = true; - max_elems = parameters[0].get(); + max_elems = parameters[0].safeGet(); } else throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, diff --git a/src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.cpp b/src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.cpp index 04eebe9f485..28e8d37b8c8 100644 --- a/src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.cpp @@ -238,7 +238,7 @@ public: if (params[0].getType() != Field::Types::String) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require first parameter to be a String", getName()); - const auto & param = params[0].get(); + const auto & param = params[0].safeGet(); if (param == "two-sided") alternative = Alternative::TwoSided; else if (param == "less") @@ -255,7 +255,7 @@ public: if (params[1].getType() != Field::Types::String) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require second parameter to be a String", getName()); - method = params[1].get(); + method = params[1].safeGet(); if (method != "auto" && method != "exact" && method != "asymp" && method != "asymptotic") throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown method in aggregate function {}. " "It must be one of: 'auto', 'exact', 'asymp' (or 'asymptotic')", getName()); diff --git a/src/AggregateFunctions/AggregateFunctionLargestTriangleThreeBuckets.cpp b/src/AggregateFunctions/AggregateFunctionLargestTriangleThreeBuckets.cpp index 6d1e3c0f64b..813b13b6f7b 100644 --- a/src/AggregateFunctions/AggregateFunctionLargestTriangleThreeBuckets.cpp +++ b/src/AggregateFunctions/AggregateFunctionLargestTriangleThreeBuckets.cpp @@ -181,7 +181,7 @@ public: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require first parameter to be a UInt64", getName()); - total_buckets = params[0].get(); + total_buckets = params[0].safeGet(); this->x_type = WhichDataType(arguments[0]).idx; this->y_type = WhichDataType(arguments[1]).idx; diff --git a/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp b/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp index f088737c340..fa90846650d 100644 --- a/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp +++ b/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp @@ -152,7 +152,7 @@ public: if (params[0].getType() != Field::Types::String) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require first parameter to be a String", getName()); - const auto & param = params[0].get(); + const auto & param = params[0].safeGet(); if (param == "two-sided") alternative = Alternative::TwoSided; else if (param == "less") @@ -169,7 +169,7 @@ public: if (params[1].getType() != Field::Types::UInt64) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require second parameter to be a UInt64", getName()); - continuity_correction = static_cast(params[1].get()); + continuity_correction = static_cast(params[1].safeGet()); } String getName() const override diff --git a/src/AggregateFunctions/AggregateFunctionQuantile.h b/src/AggregateFunctions/AggregateFunctionQuantile.h index 127dc06b642..423fd4bc569 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantile.h +++ b/src/AggregateFunctions/AggregateFunctionQuantile.h @@ -117,7 +117,7 @@ public: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} requires relative accuracy parameter with Float64 type", getName()); - relative_accuracy = relative_accuracy_field.get(); + relative_accuracy = relative_accuracy_field.safeGet(); if (relative_accuracy <= 0 || relative_accuracy >= 1 || isNaN(relative_accuracy)) throw Exception( @@ -147,9 +147,9 @@ public: ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} requires accuracy parameter with integer type", getName()); if (accuracy_field.getType() == Field::Types::Int64) - accuracy = accuracy_field.get(); + accuracy = accuracy_field.safeGet(); else - accuracy = accuracy_field.get(); + accuracy = accuracy_field.safeGet(); if (accuracy <= 0) throw Exception( diff --git a/src/AggregateFunctions/AggregateFunctionSumMap.cpp b/src/AggregateFunctions/AggregateFunctionSumMap.cpp index 666136a91b7..9a94c3dfe1a 100644 --- a/src/AggregateFunctions/AggregateFunctionSumMap.cpp +++ b/src/AggregateFunctions/AggregateFunctionSumMap.cpp @@ -300,12 +300,12 @@ public: /// Compatibility with previous versions. if (value.getType() == Field::Types::Decimal32) { - auto source = value.get>(); + auto source = value.safeGet>(); value = DecimalField(source.getValue(), source.getScale()); } else if (value.getType() == Field::Types::Decimal64) { - auto source = value.get>(); + auto source = value.safeGet>(); value = DecimalField(source.getValue(), source.getScale()); } @@ -355,7 +355,7 @@ public: /// Compatibility with previous versions. if (value.getType() == Field::Types::Decimal128) { - auto source = value.get>(); + auto source = value.safeGet>(); WhichDataType value_type(values_types[col_idx]); if (value_type.isDecimal32()) { @@ -560,7 +560,7 @@ private: template bool compareImpl(FieldType & x) const { - auto val = rhs.get(); + auto val = rhs.safeGet(); if (val > x) { x = val; @@ -600,7 +600,7 @@ private: template bool compareImpl(FieldType & x) const { - auto val = rhs.get(); + auto val = rhs.safeGet(); if (val < x) { x = val; diff --git a/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp b/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp index 180470952cd..4bc4255e2e2 100644 --- a/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp +++ b/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp @@ -129,7 +129,7 @@ private: if (constant_node_value.getType() != Field::Types::Which::Tuple) return {}; - const auto & constant_tuple = constant_node_value.get(); + const auto & constant_tuple = constant_node_value.safeGet(); const auto & function_arguments_nodes = function_node_typed.getArguments().getNodes(); size_t function_arguments_nodes_size = function_arguments_nodes.size(); diff --git a/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp b/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp index 2b2ac95d7b9..6c4ce789993 100644 --- a/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp +++ b/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp @@ -89,7 +89,7 @@ public: if (!pattern || !isString(pattern->getResultType())) continue; - auto regexp = likePatternToRegexp(pattern->getValue().get()); + auto regexp = likePatternToRegexp(pattern->getValue().safeGet()); /// Case insensitive. Works with UTF-8 as well. if (is_ilike) regexp = "(?i)" + regexp; diff --git a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp index b968f43c6a6..1fc3eec6833 100644 --- a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp +++ b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp @@ -68,10 +68,10 @@ void optimizeFunctionEmpty(QueryTreeNodePtr &, FunctionNode & function_node, Col String getSubcolumnNameForElement(const Field & value, const DataTypeTuple & data_type_tuple) { if (value.getType() == Field::Types::String) - return value.get(); + return value.safeGet(); if (value.getType() == Field::Types::UInt64) - return data_type_tuple.getNameByPosition(value.get()); + return data_type_tuple.getNameByPosition(value.safeGet()); return ""; } @@ -79,7 +79,7 @@ String getSubcolumnNameForElement(const Field & value, const DataTypeTuple & dat String getSubcolumnNameForElement(const Field & value, const DataTypeVariant &) { if (value.getType() == Field::Types::String) - return value.get(); + return value.safeGet(); return ""; } diff --git a/src/Analyzer/Passes/FuseFunctionsPass.cpp b/src/Analyzer/Passes/FuseFunctionsPass.cpp index 0175e304a2b..f3b109a10ed 100644 --- a/src/Analyzer/Passes/FuseFunctionsPass.cpp +++ b/src/Analyzer/Passes/FuseFunctionsPass.cpp @@ -187,7 +187,7 @@ FunctionNodePtr createFusedQuantilesNode(std::vector & nodes /// Sort nodes and parameters in ascending order of quantile level std::vector permutation(nodes.size()); iota(permutation.data(), permutation.size(), size_t(0)); - std::sort(permutation.begin(), permutation.end(), [&](size_t i, size_t j) { return parameters[i].get() < parameters[j].get(); }); + std::sort(permutation.begin(), permutation.end(), [&](size_t i, size_t j) { return parameters[i].safeGet() < parameters[j].safeGet(); }); std::vector new_nodes; new_nodes.reserve(permutation.size()); diff --git a/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp b/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp index d966f129d08..f81327c5d55 100644 --- a/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp +++ b/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp @@ -134,8 +134,8 @@ public: return; std::set string_values; - string_values.insert(first_literal->getValue().get()); - string_values.insert(second_literal->getValue().get()); + string_values.insert(first_literal->getValue().safeGet()); + string_values.insert(second_literal->getValue().safeGet()); changeIfArguments(*function_if_node, string_values, context); wrapIntoToString(*function_node, std::move(modified_if_node), context); @@ -163,7 +163,7 @@ public: if (!isArray(literal_to->getResultType()) || !isString(literal_default->getResultType())) return; - auto array_to = literal_to->getValue().get(); + auto array_to = literal_to->getValue().safeGet(); if (array_to.empty()) return; @@ -178,9 +178,9 @@ public: std::set string_values; for (const auto & value : array_to) - string_values.insert(value.get()); + string_values.insert(value.safeGet()); - string_values.insert(literal_default->getValue().get()); + string_values.insert(literal_default->getValue().safeGet()); changeTransformArguments(*function_modified_transform_node, string_values, context); wrapIntoToString(*function_node, std::move(modified_transform_node), context); diff --git a/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp b/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp index 3a8b6e75d40..02f1c93ea7f 100644 --- a/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp +++ b/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp @@ -54,7 +54,7 @@ public: } else if (function_node->getFunctionName() == "sum" && first_argument_constant_literal.getType() == Field::Types::UInt64 && - first_argument_constant_literal.get() == 1) + first_argument_constant_literal.safeGet() == 1) { function_node->getArguments().getNodes().clear(); resolveAggregateFunctionNodeByName(*function_node, "count"); diff --git a/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp b/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp index feb8bcc792d..0f33c302265 100644 --- a/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp +++ b/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp @@ -143,13 +143,13 @@ private: const auto & column_type = column_node_typed.getColumnType().get(); if (isDateOrDate32(column_type)) { - start_date_or_date_time = date_lut.dateToString(range.first.get()); - end_date_or_date_time = date_lut.dateToString(range.second.get()); + start_date_or_date_time = date_lut.dateToString(range.first.safeGet()); + end_date_or_date_time = date_lut.dateToString(range.second.safeGet()); } else if (isDateTime(column_type) || isDateTime64(column_type)) { - start_date_or_date_time = date_lut.timeToString(range.first.get()); - end_date_or_date_time = date_lut.timeToString(range.second.get()); + start_date_or_date_time = date_lut.timeToString(range.first.safeGet()); + end_date_or_date_time = date_lut.timeToString(range.second.safeGet()); } else [[unlikely]] return {}; diff --git a/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp b/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp index a48e88132a6..091061ceb81 100644 --- a/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp +++ b/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp @@ -60,7 +60,7 @@ public: { const auto & second_const_value = second_const_node->getValue(); if (second_const_value.isNull() - || (lower_name == "sum" && isInt64OrUInt64FieldType(second_const_value.getType()) && second_const_value.get() == 0 + || (lower_name == "sum" && isInt64OrUInt64FieldType(second_const_value.getType()) && second_const_value.safeGet() == 0 && !if_node->getResultType()->isNullable())) { /// avg(if(cond, a, null)) -> avgIf(a::ResultTypeIf, cond) @@ -89,7 +89,7 @@ public: { const auto & first_const_value = first_const_node->getValue(); if (first_const_value.isNull() - || (lower_name == "sum" && isInt64OrUInt64FieldType(first_const_value.getType()) && first_const_value.get() == 0 + || (lower_name == "sum" && isInt64OrUInt64FieldType(first_const_value.getType()) && first_const_value.safeGet() == 0 && !if_node->getResultType()->isNullable())) { /// avg(if(cond, null, a) -> avgIf(a::ResultTypeIf, !cond)) diff --git a/src/Analyzer/Passes/SumIfToCountIfPass.cpp b/src/Analyzer/Passes/SumIfToCountIfPass.cpp index 1524629dc81..a987ced497a 100644 --- a/src/Analyzer/Passes/SumIfToCountIfPass.cpp +++ b/src/Analyzer/Passes/SumIfToCountIfPass.cpp @@ -66,7 +66,7 @@ public: resolveAggregateFunctionNodeByName(*function_node, "countIf"); - if (constant_value_literal.get() != 1) + if (constant_value_literal.safeGet() != 1) { /// Rewrite `sumIf(123, cond)` into `123 * countIf(cond)` node = getMultiplyFunction(std::move(multiplier_node), node); @@ -105,8 +105,8 @@ public: const auto & if_true_condition_constant_value_literal = if_true_condition_constant_node->getValue(); const auto & if_false_condition_constant_value_literal = if_false_condition_constant_node->getValue(); - auto if_true_condition_value = if_true_condition_constant_value_literal.get(); - auto if_false_condition_value = if_false_condition_constant_value_literal.get(); + auto if_true_condition_value = if_true_condition_constant_value_literal.safeGet(); + auto if_false_condition_value = if_false_condition_constant_value_literal.safeGet(); if (if_false_condition_value == 0) { diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index fb41826929f..9754897d54d 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -471,7 +471,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express std::shared_ptr collator; if (order_by_element.getCollation()) - collator = std::make_shared(order_by_element.getCollation()->as().value.get()); + collator = std::make_shared(order_by_element.getCollation()->as().value.safeGet()); const auto & sort_expression_ast = order_by_element.children.at(0); auto sort_expression = buildExpression(sort_expression_ast, context); diff --git a/src/Analyzer/Resolve/IdentifierResolver.cpp b/src/Analyzer/Resolve/IdentifierResolver.cpp index 447bf825836..a79433ac130 100644 --- a/src/Analyzer/Resolve/IdentifierResolver.cpp +++ b/src/Analyzer/Resolve/IdentifierResolver.cpp @@ -1273,7 +1273,7 @@ QueryTreeNodePtr IdentifierResolver::matchArrayJoinSubcolumns( const auto & constant_node_value = constant_node.getValue(); if (constant_node_value.getType() == Field::Types::String) { - array_join_subcolumn_prefix = constant_node_value.get() + "."; + array_join_subcolumn_prefix = constant_node_value.safeGet() + "."; array_join_parent_column = argument_nodes.at(0).get(); } } @@ -1287,7 +1287,7 @@ QueryTreeNodePtr IdentifierResolver::matchArrayJoinSubcolumns( if (!second_argument || second_argument->getValue().getType() != Field::Types::String) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected constant string as second argument of getSubcolumn function {}", resolved_function->dumpTree()); - const auto & resolved_subcolumn_path = second_argument->getValue().get(); + const auto & resolved_subcolumn_path = second_argument->getValue().safeGet(); if (!startsWith(resolved_subcolumn_path, array_join_subcolumn_prefix)) return {}; @@ -1331,7 +1331,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveExpressionFromArrayJoinExpression size_t nested_function_arguments_size = nested_function_arguments.size(); const auto & nested_keys_names_constant_node = nested_function_arguments[0]->as(); - const auto & nested_keys_names = nested_keys_names_constant_node.getValue().get(); + const auto & nested_keys_names = nested_keys_names_constant_node.getValue().safeGet(); size_t nested_keys_names_size = nested_keys_names.size(); if (nested_keys_names_size == nested_function_arguments_size - 1) @@ -1344,7 +1344,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveExpressionFromArrayJoinExpression auto array_join_column = std::make_shared(array_join_column_expression_typed.getColumn(), array_join_column_expression_typed.getColumnSource()); - const auto & nested_key_name = nested_keys_names[i - 1].get(); + const auto & nested_key_name = nested_keys_names[i - 1].safeGet(); Identifier nested_identifier = Identifier(nested_key_name); array_join_resolved_expression = wrapExpressionNodeInTupleElement(array_join_column, nested_identifier, scope.context); break; diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 767d5c11075..2ce79b7bddd 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -746,11 +746,11 @@ void QueryAnalyzer::replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_ UInt64 pos; if (constant_node->getValue().getType() == Field::Types::UInt64) { - pos = constant_node->getValue().get(); + pos = constant_node->getValue().safeGet(); } else // Int64 { - auto value = constant_node->getValue().get(); + auto value = constant_node->getValue().safeGet(); if (value > 0) pos = value; else diff --git a/src/Analyzer/SetUtils.cpp b/src/Analyzer/SetUtils.cpp index 0ecb3545225..095be1fb9b8 100644 --- a/src/Analyzer/SetUtils.cpp +++ b/src/Analyzer/SetUtils.cpp @@ -93,7 +93,7 @@ Block createBlockFromCollection(const Collection & collection, const DataTypes& "Invalid type in set. Expected tuple, got {}", value.getTypeName()); - const auto & tuple = value.template get(); + const auto & tuple = value.template safeGet(); const DataTypePtr & value_type = value_types[collection_index]; const DataTypes & tuple_value_type = typeid_cast(value_type.get())->getElements(); @@ -169,15 +169,15 @@ Block getSetElementsForConstantValue(const DataTypePtr & expression_type, const if (rhs_which_type.isArray()) { const DataTypeArray * value_array_type = assert_cast(value_type.get()); - size_t value_array_size = value.get().size(); + size_t value_array_size = value.safeGet().size(); DataTypes value_types(value_array_size, value_array_type->getNestedType()); - result_block = createBlockFromCollection(value.get(), value_types, set_element_types, transform_null_in); + result_block = createBlockFromCollection(value.safeGet(), value_types, set_element_types, transform_null_in); } else if (rhs_which_type.isTuple()) { const DataTypeTuple * value_tuple_type = assert_cast(value_type.get()); const DataTypes & value_types = value_tuple_type->getElements(); - result_block = createBlockFromCollection(value.get(), value_types, set_element_types, transform_null_in); + result_block = createBlockFromCollection(value.safeGet(), value_types, set_element_types, transform_null_in); } else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, diff --git a/src/Backups/BackupSettings.cpp b/src/Backups/BackupSettings.cpp index e33880f88e3..7f353028346 100644 --- a/src/Backups/BackupSettings.cpp +++ b/src/Backups/BackupSettings.cpp @@ -125,7 +125,7 @@ std::vector BackupSettings::Util::clusterHostIDsFromAST(const IAST & as throw Exception( ErrorCodes::CANNOT_PARSE_BACKUP_SETTINGS, "Setting cluster_host_ids has wrong format, must be array of arrays of string literals"); - const auto & replicas = array_of_replicas->value.get(); + const auto & replicas = array_of_replicas->value.safeGet(); res[i].resize(replicas.size()); for (size_t j = 0; j != replicas.size(); ++j) { @@ -134,7 +134,7 @@ std::vector BackupSettings::Util::clusterHostIDsFromAST(const IAST & as throw Exception( ErrorCodes::CANNOT_PARSE_BACKUP_SETTINGS, "Setting cluster_host_ids has wrong format, must be array of arrays of string literals"); - res[i][j] = replica.get(); + res[i][j] = replica.safeGet(); } } } diff --git a/src/Backups/DDLAdjustingForBackupVisitor.cpp b/src/Backups/DDLAdjustingForBackupVisitor.cpp index 910831195a3..4dcbdcc1617 100644 --- a/src/Backups/DDLAdjustingForBackupVisitor.cpp +++ b/src/Backups/DDLAdjustingForBackupVisitor.cpp @@ -46,8 +46,8 @@ namespace if (zookeeper_path_ast && (zookeeper_path_ast->value.getType() == Field::Types::String) && replica_name_ast && (replica_name_ast->value.getType() == Field::Types::String)) { - String & zookeeper_path_arg = zookeeper_path_ast->value.get(); - String & replica_name_arg = replica_name_ast->value.get(); + String & zookeeper_path_arg = zookeeper_path_ast->value.safeGet(); + String & replica_name_arg = replica_name_ast->value.safeGet(); if (create.uuid != UUIDHelpers::Nil) { String table_uuid_str = toString(create.uuid); diff --git a/src/Backups/RestoreSettings.cpp b/src/Backups/RestoreSettings.cpp index 7bbfd9ed751..4662305cdd6 100644 --- a/src/Backups/RestoreSettings.cpp +++ b/src/Backups/RestoreSettings.cpp @@ -31,7 +31,7 @@ namespace { if (field.getType() == Field::Types::String) { - const String & str = field.get(); + const String & str = field.safeGet(); if (str == "1" || boost::iequals(str, "true") || boost::iequals(str, "create")) { value = RestoreTableCreationMode::kCreate; @@ -54,7 +54,7 @@ namespace if (field.getType() == Field::Types::UInt64) { - UInt64 number = field.get(); + UInt64 number = field.safeGet(); if (number == 1) { value = RestoreTableCreationMode::kCreate; @@ -95,7 +95,7 @@ namespace { if (field.getType() == Field::Types::String) { - const String & str = field.get(); + const String & str = field.safeGet(); if (str == "1" || boost::iequals(str, "true") || boost::iequals(str, "create")) { value = RestoreAccessCreationMode::kCreate; @@ -118,7 +118,7 @@ namespace if (field.getType() == Field::Types::UInt64) { - UInt64 number = field.get(); + UInt64 number = field.safeGet(); if (number == 1) { value = RestoreAccessCreationMode::kCreate; diff --git a/src/Backups/SettingsFieldOptionalString.cpp b/src/Backups/SettingsFieldOptionalString.cpp index 573fd1e052c..684407a533d 100644 --- a/src/Backups/SettingsFieldOptionalString.cpp +++ b/src/Backups/SettingsFieldOptionalString.cpp @@ -19,7 +19,7 @@ SettingFieldOptionalString::SettingFieldOptionalString(const Field & field) if (field.getType() == Field::Types::String) { - value = field.get(); + value = field.safeGet(); return; } diff --git a/src/Backups/SettingsFieldOptionalUUID.cpp b/src/Backups/SettingsFieldOptionalUUID.cpp index 3f14608b206..0011f7f1073 100644 --- a/src/Backups/SettingsFieldOptionalUUID.cpp +++ b/src/Backups/SettingsFieldOptionalUUID.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes if (field.getType() == Field::Types::String) { - const String & str = field.get(); + const String & str = field.safeGet(); if (str.empty()) { value = std::nullopt; diff --git a/src/Client/QueryFuzzer.cpp b/src/Client/QueryFuzzer.cpp index f5b700ea529..cc7b37aad52 100644 --- a/src/Client/QueryFuzzer.cpp +++ b/src/Client/QueryFuzzer.cpp @@ -133,7 +133,7 @@ Field QueryFuzzer::fuzzField(Field field) if (type == Field::Types::String) { - auto & str = field.get(); + auto & str = field.safeGet(); UInt64 action = fuzz_rand() % 10; switch (action) { @@ -159,7 +159,7 @@ Field QueryFuzzer::fuzzField(Field field) } else if (type == Field::Types::Array) { - auto & arr = field.get(); + auto & arr = field.safeGet(); if (fuzz_rand() % 5 == 0 && !arr.empty()) { @@ -191,7 +191,7 @@ Field QueryFuzzer::fuzzField(Field field) } else if (type == Field::Types::Tuple) { - auto & arr = field.get(); + auto & arr = field.safeGet(); if (fuzz_rand() % 5 == 0 && !arr.empty()) { @@ -912,17 +912,17 @@ ASTPtr QueryFuzzer::fuzzLiteralUnderExpressionList(ASTPtr child) auto type = l->value.getType(); if (type == Field::Types::Which::String && fuzz_rand() % 7 == 0) { - String value = l->value.get(); + String value = l->value.safeGet(); child = makeASTFunction( "toFixedString", std::make_shared(value), std::make_shared(static_cast(value.size()))); } else if (type == Field::Types::Which::UInt64 && fuzz_rand() % 7 == 0) { - child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toUInt128" : "toUInt256", std::make_shared(l->value.get())); + child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toUInt128" : "toUInt256", std::make_shared(l->value.safeGet())); } else if (type == Field::Types::Which::Int64 && fuzz_rand() % 7 == 0) { - child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toInt128" : "toInt256", std::make_shared(l->value.get())); + child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toInt128" : "toInt256", std::make_shared(l->value.safeGet())); } else if (type == Field::Types::Which::Float64 && fuzz_rand() % 7 == 0) { @@ -930,22 +930,22 @@ ASTPtr QueryFuzzer::fuzzLiteralUnderExpressionList(ASTPtr child) if (decimal == 0) child = makeASTFunction( "toDecimal32", - std::make_shared(l->value.get()), + std::make_shared(l->value.safeGet()), std::make_shared(static_cast(fuzz_rand() % 9))); else if (decimal == 1) child = makeASTFunction( "toDecimal64", - std::make_shared(l->value.get()), + std::make_shared(l->value.safeGet()), std::make_shared(static_cast(fuzz_rand() % 18))); else if (decimal == 2) child = makeASTFunction( "toDecimal128", - std::make_shared(l->value.get()), + std::make_shared(l->value.safeGet()), std::make_shared(static_cast(fuzz_rand() % 38))); else child = makeASTFunction( "toDecimal256", - std::make_shared(l->value.get()), + std::make_shared(l->value.safeGet()), std::make_shared(static_cast(fuzz_rand() % 76))); } diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 0188ebc8173..affd620f83a 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -214,7 +214,7 @@ void Suggest::fillWordsFromBlock(const Block & block) Words new_words; new_words.reserve(rows); for (size_t i = 0; i < rows; ++i) - new_words.emplace_back(column[i].get()); + new_words.emplace_back(column[i].safeGet()); addWords(std::move(new_words)); } diff --git a/src/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp index e26fe790a8e..c41c340b069 100644 --- a/src/Columns/ColumnAggregateFunction.cpp +++ b/src/Columns/ColumnAggregateFunction.cpp @@ -426,9 +426,9 @@ MutableColumnPtr ColumnAggregateFunction::cloneEmpty() const Field ColumnAggregateFunction::operator[](size_t n) const { Field field = AggregateFunctionStateData(); - field.get().name = type_string; + field.safeGet().name = type_string; { - WriteBufferFromString buffer(field.get().data); + WriteBufferFromString buffer(field.safeGet().data); func->serialize(data[n], buffer, version); } return field; @@ -436,12 +436,7 @@ Field ColumnAggregateFunction::operator[](size_t n) const void ColumnAggregateFunction::get(size_t n, Field & res) const { - res = AggregateFunctionStateData(); - res.get().name = type_string; - { - WriteBufferFromString buffer(res.get().data); - func->serialize(data[n], buffer, version); - } + res = operator[](n); } StringRef ColumnAggregateFunction::getDataAt(size_t n) const @@ -521,7 +516,7 @@ void ColumnAggregateFunction::insert(const Field & x) "Inserting field of type {} into ColumnAggregateFunction. Expected {}", x.getTypeName(), Field::Types::AggregateFunctionState); - const auto & field_name = x.get().name; + const auto & field_name = x.safeGet().name; if (type_string != field_name) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot insert filed with type {} into column with type {}", field_name, type_string); @@ -529,7 +524,7 @@ void ColumnAggregateFunction::insert(const Field & x) ensureOwnership(); Arena & arena = createOrGetArena(); pushBackAndCreateState(data, arena, func.get()); - ReadBufferFromString read_buffer(x.get().data); + ReadBufferFromString read_buffer(x.safeGet().data); func->deserialize(data.back(), read_buffer, version, &arena); } @@ -538,14 +533,14 @@ bool ColumnAggregateFunction::tryInsert(const DB::Field & x) if (x.getType() != Field::Types::AggregateFunctionState) return false; - const auto & field_name = x.get().name; + const auto & field_name = x.safeGet().name; if (type_string != field_name) return false; ensureOwnership(); Arena & arena = createOrGetArena(); pushBackAndCreateState(data, arena, func.get()); - ReadBufferFromString read_buffer(x.get().data); + ReadBufferFromString read_buffer(x.safeGet().data); func->deserialize(data.back(), read_buffer, version, &arena); return true; } diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 19cce678cc7..5379adc0bf7 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -141,7 +141,7 @@ void ColumnArray::get(size_t n, Field & res) const size, max_array_size_as_field); res = Array(); - Array & res_arr = res.get(); + Array & res_arr = res.safeGet(); res_arr.reserve(size); for (size_t i = 0; i < size; ++i) @@ -309,7 +309,7 @@ void ColumnArray::updateHashFast(SipHash & hash) const void ColumnArray::insert(const Field & x) { - const Array & array = x.get(); + const Array & array = x.safeGet(); size_t size = array.size(); for (size_t i = 0; i < size; ++i) getData().insert(array[i]); @@ -321,7 +321,7 @@ bool ColumnArray::tryInsert(const Field & x) if (x.getType() != Field::Types::Which::Array) return false; - const Array & array = x.get(); + const Array & array = x.safeGet(); size_t size = array.size(); for (size_t i = 0; i < size; ++i) { diff --git a/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h index 59bfbd2159c..07120f5f035 100644 --- a/src/Columns/ColumnDecimal.h +++ b/src/Columns/ColumnDecimal.h @@ -74,7 +74,7 @@ public: void insertData(const char * src, size_t /*length*/) override; void insertDefault() override { data.push_back(T()); } void insertManyDefaults(size_t length) override { data.resize_fill(data.size() + length); } - void insert(const Field & x) override { data.push_back(x.get()); } + void insert(const Field & x) override { data.push_back(x.safeGet()); } bool tryInsert(const Field & x) override; #if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp index 0bb3f7edb14..04e894ee5ab 100644 --- a/src/Columns/ColumnFixedString.cpp +++ b/src/Columns/ColumnFixedString.cpp @@ -59,7 +59,7 @@ bool ColumnFixedString::isDefaultAt(size_t index) const void ColumnFixedString::insert(const Field & x) { - const String & s = x.get(); + const String & s = x.safeGet(); insertData(s.data(), s.size()); } @@ -67,7 +67,7 @@ bool ColumnFixedString::tryInsert(const Field & x) { if (x.getType() != Field::Types::Which::String) return false; - const String & s = x.get(); + const String & s = x.safeGet(); if (s.size() > n) return false; insertData(s.data(), s.size()); diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 1025b4e77b9..6a6618bd81e 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -72,7 +72,7 @@ void ColumnMap::get(size_t n, Field & res) const size_t size = offsets[n] - offsets[n - 1]; res = Map(); - auto & map = res.get(); + auto & map = res.safeGet(); map.reserve(size); for (size_t i = 0; i < size; ++i) @@ -96,7 +96,7 @@ void ColumnMap::insertData(const char *, size_t) void ColumnMap::insert(const Field & x) { - const auto & map = x.get(); + const auto & map = x.safeGet(); nested->insert(Array(map.begin(), map.end())); } @@ -105,7 +105,7 @@ bool ColumnMap::tryInsert(const Field & x) if (x.getType() != Field::Types::Which::Map) return false; - const auto & map = x.get(); + const auto & map = x.safeGet(); return nested->tryInsert(Array(map.begin(), map.end())); } @@ -288,8 +288,8 @@ void ColumnMap::getExtremes(Field & min, Field & max) const /// Convert result Array fields to Map fields because client expect min and max field to have type Map - Array nested_min_value = nested_min.get(); - Array nested_max_value = nested_max.get(); + Array nested_min_value = nested_min.safeGet(); + Array nested_max_value = nested_max.safeGet(); Map map_min_value(nested_min_value.begin(), nested_min_value.end()); Map map_max_value(nested_max_value.begin(), nested_max_value.end()); diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index a6431007cb6..eb99bb4081b 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -698,7 +698,7 @@ void ColumnObject::forEachSubcolumnRecursively(RecursiveMutableColumnCallback ca void ColumnObject::insert(const Field & field) { - const auto & object = field.get(); + const auto & object = field.safeGet(); HashSet inserted_paths; size_t old_size = size(); @@ -754,7 +754,7 @@ void ColumnObject::get(size_t n, Field & res) const { assert(n < size()); res = Object(); - auto & object = res.get(); + auto & object = res.safeGet(); for (const auto & entry : subcolumns) { diff --git a/src/Columns/ColumnString.h b/src/Columns/ColumnString.h index c1012e1e55e..65aff9e6255 100644 --- a/src/Columns/ColumnString.h +++ b/src/Columns/ColumnString.h @@ -123,7 +123,7 @@ public: void insert(const Field & x) override { - const String & s = x.get(); + const String & s = x.safeGet(); const size_t old_size = chars.size(); const size_t size_to_append = s.size() + 1; const size_t new_size = old_size + size_to_append; diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 4fc3f88a87c..251be8d9986 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -141,7 +141,7 @@ void ColumnTuple::get(size_t n, Field & res) const const size_t tuple_size = columns.size(); res = Tuple(); - Tuple & res_tuple = res.get(); + Tuple & res_tuple = res.safeGet(); res_tuple.reserve(tuple_size); for (size_t i = 0; i < tuple_size; ++i) @@ -169,7 +169,7 @@ void ColumnTuple::insertData(const char *, size_t) void ColumnTuple::insert(const Field & x) { - const auto & tuple = x.get(); + const auto & tuple = x.safeGet(); const size_t tuple_size = columns.size(); if (tuple.size() != tuple_size) @@ -185,7 +185,7 @@ bool ColumnTuple::tryInsert(const Field & x) if (x.getType() != Field::Types::Which::Tuple) return false; - const auto & tuple = x.get(); + const auto & tuple = x.safeGet(); const size_t tuple_size = columns.size(); if (tuple.size() != tuple_size) diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 2fe5b635bd2..a5e1ee4b462 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -85,7 +85,7 @@ public: void insertMany(const Field & field, size_t length) override { - data.resize_fill(data.size() + length, static_cast(field.get())); + data.resize_fill(data.size() + length, static_cast(field.safeGet())); } void insertData(const char * pos, size_t) override @@ -235,7 +235,7 @@ public: void insert(const Field & x) override { - data.push_back(static_cast(x.get())); + data.push_back(static_cast(x.safeGet())); } bool tryInsert(const DB::Field & x) override; diff --git a/src/Columns/tests/gtest_column_variant.cpp b/src/Columns/tests/gtest_column_variant.cpp index 25f276b9600..5e481b88409 100644 --- a/src/Columns/tests/gtest_column_variant.cpp +++ b/src/Columns/tests/gtest_column_variant.cpp @@ -108,10 +108,10 @@ void checkColumnVariant1(ColumnVariant * column) ASSERT_EQ(offsets[1], 0); ASSERT_EQ(offsets[3], 1); ASSERT_TRUE(column->isDefaultAt(2) && column->isDefaultAt(4)); - ASSERT_EQ((*column)[0].get(), 42); - ASSERT_EQ((*column)[1].get(), "Hello"); + ASSERT_EQ((*column)[0].safeGet(), 42); + ASSERT_EQ((*column)[1].safeGet(), "Hello"); ASSERT_TRUE((*column)[2].isNull()); - ASSERT_EQ((*column)[3].get(), "World"); + ASSERT_EQ((*column)[3].safeGet(), "World"); ASSERT_TRUE((*column)[4].isNull()); } @@ -209,9 +209,9 @@ TEST(ColumnVariant, CreateFromDiscriminatorsAndOneFullColumnNoNulls) ASSERT_EQ(offsets[0], 0); ASSERT_EQ(offsets[1], 1); ASSERT_EQ(offsets[2], 2); - ASSERT_EQ((*column)[0].get(), 0); - ASSERT_EQ((*column)[1].get(), 1); - ASSERT_EQ((*column)[2].get(), 2); + ASSERT_EQ((*column)[0].safeGet(), 0); + ASSERT_EQ((*column)[1].safeGet(), 1); + ASSERT_EQ((*column)[2].safeGet(), 2); } TEST(ColumnVariant, CreateFromDiscriminatorsAndOneFullColumnNoNullsWithLocalOrder) @@ -222,9 +222,9 @@ TEST(ColumnVariant, CreateFromDiscriminatorsAndOneFullColumnNoNullsWithLocalOrde ASSERT_EQ(offsets[0], 0); ASSERT_EQ(offsets[1], 1); ASSERT_EQ(offsets[2], 2); - ASSERT_EQ((*column)[0].get(), 0); - ASSERT_EQ((*column)[1].get(), 1); - ASSERT_EQ((*column)[2].get(), 2); + ASSERT_EQ((*column)[0].safeGet(), 0); + ASSERT_EQ((*column)[1].safeGet(), 1); + ASSERT_EQ((*column)[2].safeGet(), 2); ASSERT_EQ(column->localDiscriminatorAt(0), 2); ASSERT_EQ(column->localDiscriminatorAt(1), 2); ASSERT_EQ(column->localDiscriminatorAt(2), 2); @@ -331,9 +331,9 @@ TEST(ColumnVariant, CloneResizedGeneral1) ASSERT_EQ(offsets[0], 0); ASSERT_EQ(offsets[1], 0); ASSERT_EQ(offsets[3], 1); - ASSERT_EQ((*resized_column_variant)[0].get(), 42); - ASSERT_EQ((*resized_column_variant)[1].get(), "Hello"); - ASSERT_EQ((*resized_column_variant)[3].get(), 43); + ASSERT_EQ((*resized_column_variant)[0].safeGet(), 42); + ASSERT_EQ((*resized_column_variant)[1].safeGet(), "Hello"); + ASSERT_EQ((*resized_column_variant)[3].safeGet(), 43); } TEST(ColumnVariant, CloneResizedGeneral2) @@ -367,7 +367,7 @@ TEST(ColumnVariant, CloneResizedGeneral2) ASSERT_EQ(discriminators[2], ColumnVariant::NULL_DISCRIMINATOR); const auto & offsets = resized_column_variant->getOffsets(); ASSERT_EQ(offsets[0], 0); - ASSERT_EQ((*resized_column_variant)[0].get(), 42); + ASSERT_EQ((*resized_column_variant)[0].safeGet(), 42); } TEST(ColumnVariant, CloneResizedGeneral3) @@ -405,10 +405,10 @@ TEST(ColumnVariant, CloneResizedGeneral3) ASSERT_EQ(offsets[1], 0); ASSERT_EQ(offsets[2], 1); ASSERT_EQ(offsets[3], 1); - ASSERT_EQ((*resized_column_variant)[0].get(), 42); - ASSERT_EQ((*resized_column_variant)[1].get(), "Hello"); - ASSERT_EQ((*resized_column_variant)[2].get(), "World"); - ASSERT_EQ((*resized_column_variant)[3].get(), 43); + ASSERT_EQ((*resized_column_variant)[0].safeGet(), 42); + ASSERT_EQ((*resized_column_variant)[1].safeGet(), "Hello"); + ASSERT_EQ((*resized_column_variant)[2].safeGet(), "World"); + ASSERT_EQ((*resized_column_variant)[3].safeGet(), 43); } MutableColumnPtr createDiscriminators2() @@ -465,7 +465,7 @@ TEST(ColumnVariant, InsertFrom) auto column_from = createVariantColumn2(change_order); column_to->insertFrom(*column_from, 3); ASSERT_EQ(column_to->globalDiscriminatorAt(5), 0); - ASSERT_EQ((*column_to)[5].get(), 43); + ASSERT_EQ((*column_to)[5].safeGet(), 43); } } @@ -478,8 +478,8 @@ TEST(ColumnVariant, InsertRangeFromOneColumnNoNulls) column_to->insertRangeFrom(*column_from, 2, 2); ASSERT_EQ(column_to->globalDiscriminatorAt(7), 0); ASSERT_EQ(column_to->globalDiscriminatorAt(8), 0); - ASSERT_EQ((*column_to)[7].get(), 2); - ASSERT_EQ((*column_to)[8].get(), 3); + ASSERT_EQ((*column_to)[7].safeGet(), 2); + ASSERT_EQ((*column_to)[8].safeGet(), 3); } } @@ -494,9 +494,9 @@ TEST(ColumnVariant, InsertRangeFromGeneral) ASSERT_EQ(column_to->globalDiscriminatorAt(6), ColumnVariant::NULL_DISCRIMINATOR); ASSERT_EQ(column_to->globalDiscriminatorAt(7), 0); ASSERT_EQ(column_to->globalDiscriminatorAt(8), 1); - ASSERT_EQ((*column_to)[5].get(), "Hello"); - ASSERT_EQ((*column_to)[7].get(), 43); - ASSERT_EQ((*column_to)[8].get(), "World"); + ASSERT_EQ((*column_to)[5].safeGet(), "Hello"); + ASSERT_EQ((*column_to)[7].safeGet(), 43); + ASSERT_EQ((*column_to)[8].safeGet(), "World"); } } @@ -509,8 +509,8 @@ TEST(ColumnVariant, InsertManyFrom) column_to->insertManyFrom(*column_from, 3, 2); ASSERT_EQ(column_to->globalDiscriminatorAt(5), 0); ASSERT_EQ(column_to->globalDiscriminatorAt(6), 0); - ASSERT_EQ((*column_to)[5].get(), 43); - ASSERT_EQ((*column_to)[6].get(), 43); + ASSERT_EQ((*column_to)[5].safeGet(), 43); + ASSERT_EQ((*column_to)[6].safeGet(), 43); } } @@ -520,8 +520,8 @@ TEST(ColumnVariant, PopBackOneColumnNoNulls) column->popBack(3); ASSERT_EQ(column->size(), 2); ASSERT_EQ(column->getVariantByLocalDiscriminator(0).size(), 2); - ASSERT_EQ((*column)[0].get(), 0); - ASSERT_EQ((*column)[1].get(), 1); + ASSERT_EQ((*column)[0].safeGet(), 0); + ASSERT_EQ((*column)[1].safeGet(), 1); } TEST(ColumnVariant, PopBackGeneral) @@ -531,8 +531,8 @@ TEST(ColumnVariant, PopBackGeneral) ASSERT_EQ(column->size(), 3); ASSERT_EQ(column->getVariantByLocalDiscriminator(0).size(), 1); ASSERT_EQ(column->getVariantByLocalDiscriminator(1).size(), 1); - ASSERT_EQ((*column)[0].get(), 42); - ASSERT_EQ((*column)[1].get(), "Hello"); + ASSERT_EQ((*column)[0].safeGet(), 42); + ASSERT_EQ((*column)[1].safeGet(), "Hello"); ASSERT_TRUE((*column)[2].isNull()); } @@ -545,8 +545,8 @@ TEST(ColumnVariant, FilterOneColumnNoNulls) filter.push_back(1); auto filtered_column = column->filter(filter, -1); ASSERT_EQ(filtered_column->size(), 2); - ASSERT_EQ((*filtered_column)[0].get(), 0); - ASSERT_EQ((*filtered_column)[1].get(), 2); + ASSERT_EQ((*filtered_column)[0].safeGet(), 0); + ASSERT_EQ((*filtered_column)[1].safeGet(), 2); } TEST(ColumnVariant, FilterGeneral) @@ -562,7 +562,7 @@ TEST(ColumnVariant, FilterGeneral) filter.push_back(0); auto filtered_column = column->filter(filter, -1); ASSERT_EQ(filtered_column->size(), 3); - ASSERT_EQ((*filtered_column)[0].get(), "Hello"); + ASSERT_EQ((*filtered_column)[0].safeGet(), "Hello"); ASSERT_TRUE((*filtered_column)[1].isNull()); ASSERT_TRUE((*filtered_column)[2].isNull()); } @@ -577,9 +577,9 @@ TEST(ColumnVariant, PermuteAndIndexOneColumnNoNulls) permutation.push_back(0); auto permuted_column = column->permute(permutation, 3); ASSERT_EQ(permuted_column->size(), 3); - ASSERT_EQ((*permuted_column)[0].get(), 1); - ASSERT_EQ((*permuted_column)[1].get(), 3); - ASSERT_EQ((*permuted_column)[2].get(), 2); + ASSERT_EQ((*permuted_column)[0].safeGet(), 1); + ASSERT_EQ((*permuted_column)[1].safeGet(), 3); + ASSERT_EQ((*permuted_column)[2].safeGet(), 2); auto index = ColumnUInt64::create(); index->getData().push_back(1); @@ -588,9 +588,9 @@ TEST(ColumnVariant, PermuteAndIndexOneColumnNoNulls) index->getData().push_back(0); auto indexed_column = column->index(*index, 3); ASSERT_EQ(indexed_column->size(), 3); - ASSERT_EQ((*indexed_column)[0].get(), 1); - ASSERT_EQ((*indexed_column)[1].get(), 3); - ASSERT_EQ((*indexed_column)[2].get(), 2); + ASSERT_EQ((*indexed_column)[0].safeGet(), 1); + ASSERT_EQ((*indexed_column)[1].safeGet(), 3); + ASSERT_EQ((*indexed_column)[2].safeGet(), 2); } TEST(ColumnVariant, PermuteGeneral) @@ -603,9 +603,9 @@ TEST(ColumnVariant, PermuteGeneral) permutation.push_back(5); auto permuted_column = column->permute(permutation, 4); ASSERT_EQ(permuted_column->size(), 4); - ASSERT_EQ((*permuted_column)[0].get(), 43); - ASSERT_EQ((*permuted_column)[1].get(), "World"); - ASSERT_EQ((*permuted_column)[2].get(), "Hello"); + ASSERT_EQ((*permuted_column)[0].safeGet(), 43); + ASSERT_EQ((*permuted_column)[1].safeGet(), "World"); + ASSERT_EQ((*permuted_column)[2].safeGet(), "Hello"); ASSERT_TRUE((*permuted_column)[3].isNull()); } @@ -618,12 +618,12 @@ TEST(ColumnVariant, ReplicateOneColumnNoNull) offsets.push_back(6); auto replicated_column = column->replicate(offsets); ASSERT_EQ(replicated_column->size(), 6); - ASSERT_EQ((*replicated_column)[0].get(), 1); - ASSERT_EQ((*replicated_column)[1].get(), 1); - ASSERT_EQ((*replicated_column)[2].get(), 1); - ASSERT_EQ((*replicated_column)[3].get(), 2); - ASSERT_EQ((*replicated_column)[4].get(), 2); - ASSERT_EQ((*replicated_column)[5].get(), 2); + ASSERT_EQ((*replicated_column)[0].safeGet(), 1); + ASSERT_EQ((*replicated_column)[1].safeGet(), 1); + ASSERT_EQ((*replicated_column)[2].safeGet(), 1); + ASSERT_EQ((*replicated_column)[3].safeGet(), 2); + ASSERT_EQ((*replicated_column)[4].safeGet(), 2); + ASSERT_EQ((*replicated_column)[5].safeGet(), 2); } TEST(ColumnVariant, ReplicateGeneral) @@ -637,9 +637,9 @@ TEST(ColumnVariant, ReplicateGeneral) offsets.push_back(7); auto replicated_column = column->replicate(offsets); ASSERT_EQ(replicated_column->size(), 7); - ASSERT_EQ((*replicated_column)[0].get(), 42); - ASSERT_EQ((*replicated_column)[1].get(), "Hello"); - ASSERT_EQ((*replicated_column)[2].get(), "Hello"); + ASSERT_EQ((*replicated_column)[0].safeGet(), 42); + ASSERT_EQ((*replicated_column)[1].safeGet(), "Hello"); + ASSERT_EQ((*replicated_column)[2].safeGet(), "Hello"); ASSERT_TRUE((*replicated_column)[3].isNull()); ASSERT_TRUE((*replicated_column)[4].isNull()); ASSERT_TRUE((*replicated_column)[5].isNull()); @@ -657,13 +657,13 @@ TEST(ColumnVariant, ScatterOneColumnNoNulls) selector.push_back(1); auto columns = column->scatter(3, selector); ASSERT_EQ(columns[0]->size(), 2); - ASSERT_EQ((*columns[0])[0].get(), 0); - ASSERT_EQ((*columns[0])[1].get(), 3); + ASSERT_EQ((*columns[0])[0].safeGet(), 0); + ASSERT_EQ((*columns[0])[1].safeGet(), 3); ASSERT_EQ(columns[1]->size(), 2); - ASSERT_EQ((*columns[1])[0].get(), 1); - ASSERT_EQ((*columns[1])[1].get(), 4); + ASSERT_EQ((*columns[1])[0].safeGet(), 1); + ASSERT_EQ((*columns[1])[1].safeGet(), 4); ASSERT_EQ(columns[2]->size(), 1); - ASSERT_EQ((*columns[2])[0].get(), 2); + ASSERT_EQ((*columns[2])[0].safeGet(), 2); } TEST(ColumnVariant, ScatterGeneral) @@ -680,12 +680,12 @@ TEST(ColumnVariant, ScatterGeneral) auto columns = column->scatter(3, selector); ASSERT_EQ(columns[0]->size(), 3); - ASSERT_EQ((*columns[0])[0].get(), 42); - ASSERT_EQ((*columns[0])[1].get(), "Hello"); - ASSERT_EQ((*columns[0])[2].get(), 43); + ASSERT_EQ((*columns[0])[0].safeGet(), 42); + ASSERT_EQ((*columns[0])[1].safeGet(), "Hello"); + ASSERT_EQ((*columns[0])[2].safeGet(), 43); ASSERT_EQ(columns[1]->size(), 2); - ASSERT_EQ((*columns[1])[0].get(), "World"); - ASSERT_EQ((*columns[1])[1].get(), 44); + ASSERT_EQ((*columns[1])[0].safeGet(), "World"); + ASSERT_EQ((*columns[1])[1].safeGet(), 44); ASSERT_EQ(columns[2]->size(), 2); ASSERT_TRUE((*columns[2])[0].isNull()); ASSERT_TRUE((*columns[2])[1].isNull()); diff --git a/src/Columns/tests/gtest_low_cardinality.cpp b/src/Columns/tests/gtest_low_cardinality.cpp index 5e01279b7df..ce16d2cadb1 100644 --- a/src/Columns/tests/gtest_low_cardinality.cpp +++ b/src/Columns/tests/gtest_low_cardinality.cpp @@ -20,13 +20,13 @@ void testLowCardinalityNumberInsert(const DataTypePtr & data_type) Field value; column->get(0, value); - ASSERT_EQ(value.get(), 15); + ASSERT_EQ(value.safeGet(), 15); column->get(1, value); - ASSERT_EQ(value.get(), 20); + ASSERT_EQ(value.safeGet(), 20); column->get(2, value); - ASSERT_EQ(value.get(), 25); + ASSERT_EQ(value.safeGet(), 25); } TEST(ColumnLowCardinality, Insert) diff --git a/src/Common/CollectionOfDerived.h b/src/Common/CollectionOfDerived.h index 9f80ff727b4..bcbcc36c67a 100644 --- a/src/Common/CollectionOfDerived.h +++ b/src/Common/CollectionOfDerived.h @@ -168,7 +168,7 @@ private: records.emplace(it, type_idx, item); } - Records::const_iterator getImpl(std::type_index type_idx) const + typename Records::const_iterator getImpl(std::type_index type_idx) const { auto it = std::lower_bound(records.cbegin(), records.cend(), type_idx); diff --git a/src/Common/FieldBinaryEncoding.cpp b/src/Common/FieldBinaryEncoding.cpp index 6c1a8496fe6..23263c988c3 100644 --- a/src/Common/FieldBinaryEncoding.cpp +++ b/src/Common/FieldBinaryEncoding.cpp @@ -208,7 +208,7 @@ void FieldVisitorEncodeBinary::operator() (const Map & x, WriteBuffer & buf) con writeVarUInt(size, buf); for (size_t i = 0; i < size; ++i) { - const Tuple & key_and_value = x[i].get(); + const Tuple & key_and_value = x[i].safeGet(); Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[0]); Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[1]); } diff --git a/src/Common/FieldVisitorSum.cpp b/src/Common/FieldVisitorSum.cpp index b825f188586..af9503ac046 100644 --- a/src/Common/FieldVisitorSum.cpp +++ b/src/Common/FieldVisitorSum.cpp @@ -19,7 +19,7 @@ bool FieldVisitorSum::operator() (UInt64 & x) const return x != 0; } -bool FieldVisitorSum::operator() (Float64 & x) const { x += rhs.get(); return x != 0; } +bool FieldVisitorSum::operator() (Float64 & x) const { x += rhs.safeGet(); return x != 0; } bool FieldVisitorSum::operator() (Null &) const { diff --git a/src/Common/FieldVisitorSum.h b/src/Common/FieldVisitorSum.h index cbb4c4a1de3..d28676b5093 100644 --- a/src/Common/FieldVisitorSum.h +++ b/src/Common/FieldVisitorSum.h @@ -37,7 +37,7 @@ public: template bool operator() (DecimalField & x) const { - x += rhs.get>(); + x += rhs.safeGet>(); return x.getValue() != T(0); } diff --git a/src/Common/FieldVisitorToString.cpp b/src/Common/FieldVisitorToString.cpp index c4cb4266418..2148bac20d1 100644 --- a/src/Common/FieldVisitorToString.cpp +++ b/src/Common/FieldVisitorToString.cpp @@ -172,7 +172,7 @@ String FieldVisitorToString::operator() (const Object & x) const String convertFieldToString(const Field & field) { if (field.getType() == Field::Types::Which::String) - return field.get(); + return field.safeGet(); return applyVisitor(FieldVisitorToString(), field); } diff --git a/src/Common/HashTable/HashMap.h b/src/Common/HashTable/HashMap.h index a26797a687a..92621db5558 100644 --- a/src/Common/HashTable/HashMap.h +++ b/src/Common/HashTable/HashMap.h @@ -297,7 +297,7 @@ public: } /// Only inserts the value if key isn't already present - void ALWAYS_INLINE insertIfNotPresent(const Key & x, const Cell::Mapped & value) + void ALWAYS_INLINE insertIfNotPresent(const Key & x, const typename Cell::Mapped & value) { LookupResult it; bool inserted; diff --git a/src/Common/examples/arena_with_free_lists.cpp b/src/Common/examples/arena_with_free_lists.cpp index 6793d567aca..3a1304e2d94 100644 --- a/src/Common/examples/arena_with_free_lists.cpp +++ b/src/Common/examples/arena_with_free_lists.cpp @@ -174,19 +174,19 @@ struct Dictionary { switch (attribute.type) { - case AttributeUnderlyingTypeTest::UInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::UInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::UInt32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; - case AttributeUnderlyingTypeTest::UInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Int8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Int16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Int32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; - case AttributeUnderlyingTypeTest::Int64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Float32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; - case AttributeUnderlyingTypeTest::Float64: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingTypeTest::UInt8: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::UInt16: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::UInt32: std::get>(attribute.arrays)[idx] = static_cast(value.safeGet()); break; + case AttributeUnderlyingTypeTest::UInt64: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::Int8: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::Int16: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::Int32: std::get>(attribute.arrays)[idx] = static_cast(value.safeGet()); break; + case AttributeUnderlyingTypeTest::Int64: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::Float32: std::get>(attribute.arrays)[idx] = static_cast(value.safeGet()); break; + case AttributeUnderlyingTypeTest::Float64: std::get>(attribute.arrays)[idx] = value.safeGet(); break; case AttributeUnderlyingTypeTest::String: { - const auto & string = value.get(); + const auto & string = value.safeGet(); auto & string_ref = std::get>(attribute.arrays)[idx]; const auto & null_value_ref = std::get(attribute.null_values); diff --git a/src/Core/ExternalTable.cpp b/src/Core/ExternalTable.cpp index af681cd5639..c2bcf6ec651 100644 --- a/src/Core/ExternalTable.cpp +++ b/src/Core/ExternalTable.cpp @@ -49,7 +49,7 @@ ExternalTableDataPtr BaseExternalTable::getData(ContextPtr context) { initReadBuffer(); initSampleBlock(); - auto input = context->getInputFormat(format, *read_buffer, sample_block, context->getSettingsRef().get("max_block_size").get()); + auto input = context->getInputFormat(format, *read_buffer, sample_block, context->getSettingsRef().get("max_block_size").safeGet()); auto data = std::make_unique(); data->pipe = std::make_unique(); diff --git a/src/Core/Field.h b/src/Core/Field.h index f1bb4a72b0d..689ac38a235 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -457,15 +457,6 @@ public: std::string_view getTypeName() const; bool isNull() const { return which == Types::Null; } - template - NearestFieldType> & get(); - - template - const auto & get() const - { - auto * mutable_this = const_cast *>(this); - return mutable_this->get(); - } bool isNegativeInfinity() const { return which == Types::Null && get().isNegativeInfinity(); } bool isPositiveInfinity() const { return which == Types::Null && get().isPositiveInfinity(); } @@ -681,6 +672,25 @@ private: Types::Which which; + /// This function is prone to type punning and should never be used outside of Field class, + /// whenever it is used within this class the stored type should be checked in advance. + template + NearestFieldType> & get() + { + // Before storing the value in the Field, we static_cast it to the field + // storage type, so here we return the value of storage type as well. + // Otherwise, it is easy to make a mistake of reinterpret_casting the stored + // value to a different and incompatible type. + // For example, a Float32 value is stored as Float64, and it is incorrect to + // return a reference to this value as Float32. + return *reinterpret_cast>*>(&storage); + } + + template + NearestFieldType> & get() const + { + return const_cast(this)->get(); + } /// Assuming there was no allocated state or it was deallocated (see destroy). template @@ -859,55 +869,18 @@ constexpr bool isInt64OrUInt64FieldType(Field::Types::Which t) || t == Field::Types::UInt64; } -constexpr bool isInt64OrUInt64orBoolFieldType(Field::Types::Which t) -{ - return t == Field::Types::Int64 - || t == Field::Types::UInt64 - || t == Field::Types::Bool; -} - -// Field value getter with type checking in debug builds. -template -NearestFieldType> & Field::get() -{ - // Before storing the value in the Field, we static_cast it to the field - // storage type, so here we return the value of storage type as well. - // Otherwise, it is easy to make a mistake of reinterpret_casting the stored - // value to a different and incompatible type. - // For example, a Float32 value is stored as Float64, and it is incorrect to - // return a reference to this value as Float32. - using StoredType = NearestFieldType>; - -#ifndef NDEBUG - // Disregard signedness when converting between int64 types. - constexpr Field::Types::Which target = TypeToEnum::value; - if (target != which - && (!isInt64OrUInt64orBoolFieldType(target) || !isInt64OrUInt64orBoolFieldType(which)) && target != Field::Types::IPv4) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Invalid Field get from type {} to type {}", which, target); -#endif - - StoredType * MAY_ALIAS ptr = reinterpret_cast(&storage); - - return *ptr; -} - - template auto & Field::safeGet() { const Types::Which target = TypeToEnum>>::value; - /// We allow converting int64 <-> uint64, int64 <-> bool, uint64 <-> bool in safeGet(). - if (target != which - && (!isInt64OrUInt64orBoolFieldType(target) || !isInt64OrUInt64orBoolFieldType(which))) - throw Exception(ErrorCodes::BAD_GET, - "Bad get: has {}, requested {}", getTypeName(), target); + /// bool is stored as uint64, will be returned as UInt64 when requested as bool or UInt64, as Int64 when requested as Int64 + if (target != which && !(which == Field::Types::Bool && (target == Field::Types::UInt64 || target == Field::Types::Int64))) + throw Exception(ErrorCodes::BAD_GET, "Bad get: has {}, requested {}", getTypeName(), target); return get(); } - template requires not_field_or_bool_or_stringlike Field::Field(T && rhs) diff --git a/src/Core/Range.cpp b/src/Core/Range.cpp index 956b96653a1..1a5ce1e012e 100644 --- a/src/Core/Range.cpp +++ b/src/Core/Range.cpp @@ -62,27 +62,27 @@ void Range::shrinkToIncludedIfPossible() { if (left.isExplicit() && !left_included) { - if (left.getType() == Field::Types::UInt64 && left.get() != std::numeric_limits::max()) + if (left.getType() == Field::Types::UInt64 && left.safeGet() != std::numeric_limits::max()) { - ++left.get(); + ++left.safeGet(); left_included = true; } - if (left.getType() == Field::Types::Int64 && left.get() != std::numeric_limits::max()) + if (left.getType() == Field::Types::Int64 && left.safeGet() != std::numeric_limits::max()) { - ++left.get(); + ++left.safeGet(); left_included = true; } } if (right.isExplicit() && !right_included) { - if (right.getType() == Field::Types::UInt64 && right.get() != std::numeric_limits::min()) + if (right.getType() == Field::Types::UInt64 && right.safeGet() != std::numeric_limits::min()) { - --right.get(); + --right.safeGet(); right_included = true; } - if (right.getType() == Field::Types::Int64 && right.get() != std::numeric_limits::min()) + if (right.getType() == Field::Types::Int64 && right.safeGet() != std::numeric_limits::min()) { - --right.get(); + --right.safeGet(); right_included = true; } } diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 9c9c9c1db00..45bd2b9eb42 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -118,7 +118,7 @@ void Settings::set(std::string_view name, const Field & value) { if (value.getType() != Field::Types::Which::String) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected type of value for setting 'compatibility'. Expected String, got {}", value.getTypeName()); - applyCompatibilitySetting(value.get()); + applyCompatibilitySetting(value.safeGet()); } /// If we change setting that was changed by compatibility setting before /// we should remove it from settings_changed_by_compatibility_setting, diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index 7d094e2a107..bb2ef58b214 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -53,29 +53,29 @@ namespace { if (f.getType() == Field::Types::String) { - return stringToNumber(f.get()); + return stringToNumber(f.safeGet()); } else if (f.getType() == Field::Types::UInt64) { T result; - if (!accurate::convertNumeric(f.get(), result)) + if (!accurate::convertNumeric(f.safeGet(), result)) throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Field value {} is out of range of {} type", f, demangle(typeid(T).name())); return result; } else if (f.getType() == Field::Types::Int64) { T result; - if (!accurate::convertNumeric(f.get(), result)) + if (!accurate::convertNumeric(f.safeGet(), result)) throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Field value {} is out of range of {} type", f, demangle(typeid(T).name())); return result; } else if (f.getType() == Field::Types::Bool) { - return T(f.get()); + return T(f.safeGet()); } else if (f.getType() == Field::Types::Float64) { - Float64 x = f.get(); + Float64 x = f.safeGet(); if constexpr (std::is_floating_point_v) { return T(x); @@ -120,7 +120,7 @@ namespace if (f.getType() == Field::Types::String) { /// Allow to parse Map from string field. For the convenience. - const auto & str = f.get(); + const auto & str = f.safeGet(); return stringToMap(str); } @@ -218,7 +218,7 @@ namespace UInt64 fieldToMaxThreads(const Field & f) { if (f.getType() == Field::Types::String) - return stringToMaxThreads(f.get()); + return stringToMaxThreads(f.safeGet()); else return fieldToNumber(f); } diff --git a/src/Core/SettingsQuirks.cpp b/src/Core/SettingsQuirks.cpp index 5541cc19653..3127a5ef36d 100644 --- a/src/Core/SettingsQuirks.cpp +++ b/src/Core/SettingsQuirks.cpp @@ -100,7 +100,7 @@ void doSettingsSanityCheckClamp(Settings & current_settings, LoggerPtr log) return current_value; }; - UInt64 max_threads = get_current_value("max_threads").get(); + UInt64 max_threads = get_current_value("max_threads").safeGet(); UInt64 max_threads_max_value = 256 * getNumberOfPhysicalCPUCores(); if (max_threads > max_threads_max_value) { @@ -120,7 +120,7 @@ void doSettingsSanityCheckClamp(Settings & current_settings, LoggerPtr log) "input_format_parquet_max_block_size"}; for (auto const & setting : block_rows_settings) { - if (auto block_size = get_current_value(setting).get(); + if (auto block_size = get_current_value(setting).safeGet(); block_size > max_sane_block_rows_size) { if (log) @@ -129,7 +129,7 @@ void doSettingsSanityCheckClamp(Settings & current_settings, LoggerPtr log) } } - if (auto max_block_size = get_current_value("max_block_size").get(); max_block_size == 0) + if (auto max_block_size = get_current_value("max_block_size").safeGet(); max_block_size == 0) { if (log) LOG_WARNING(log, "Sanity check: 'max_block_size' cannot be 0. Set to default value {}", DEFAULT_BLOCK_SIZE); diff --git a/src/Core/examples/field.cpp b/src/Core/examples/field.cpp index 110e11d0cb1..3064290e127 100644 --- a/src/Core/examples/field.cpp +++ b/src/Core/examples/field.cpp @@ -37,7 +37,7 @@ int main(int argc, char ** argv) std::cerr << applyVisitor(to_string, field) << std::endl; } - field.get().push_back(field); + field.safeGet().push_back(field); std::cerr << applyVisitor(to_string, field) << std::endl; std::cerr << (field < field2) << std::endl; diff --git a/src/Core/tests/gtest_field.cpp b/src/Core/tests/gtest_field.cpp index 5585442d835..7e778be9575 100644 --- a/src/Core/tests/gtest_field.cpp +++ b/src/Core/tests/gtest_field.cpp @@ -8,31 +8,31 @@ GTEST_TEST(Field, FromBool) { Field f{false}; ASSERT_EQ(f.getType(), Field::Types::Bool); - ASSERT_EQ(f.get(), 0); - ASSERT_EQ(f.get(), false); + ASSERT_EQ(f.safeGet(), 0); + ASSERT_EQ(f.safeGet(), false); } { Field f{true}; ASSERT_EQ(f.getType(), Field::Types::Bool); - ASSERT_EQ(f.get(), 1); - ASSERT_EQ(f.get(), true); + ASSERT_EQ(f.safeGet(), 1); + ASSERT_EQ(f.safeGet(), true); } { Field f; f = false; ASSERT_EQ(f.getType(), Field::Types::Bool); - ASSERT_EQ(f.get(), 0); - ASSERT_EQ(f.get(), false); + ASSERT_EQ(f.safeGet(), 0); + ASSERT_EQ(f.safeGet(), false); } { Field f; f = true; ASSERT_EQ(f.getType(), Field::Types::Bool); - ASSERT_EQ(f.get(), 1); - ASSERT_EQ(f.get(), true); + ASSERT_EQ(f.safeGet(), 1); + ASSERT_EQ(f.safeGet(), true); } } @@ -42,15 +42,15 @@ GTEST_TEST(Field, Move) Field f; f = Field{String{"Hello, world (1)"}}; - ASSERT_EQ(f.get(), "Hello, world (1)"); + ASSERT_EQ(f.safeGet(), "Hello, world (1)"); f = Field{String{"Hello, world (2)"}}; - ASSERT_EQ(f.get(), "Hello, world (2)"); + ASSERT_EQ(f.safeGet(), "Hello, world (2)"); f = Field{Array{Field{String{"Hello, world (3)"}}}}; - ASSERT_EQ(f.get()[0].get(), "Hello, world (3)"); + ASSERT_EQ(f.safeGet()[0].safeGet(), "Hello, world (3)"); f = String{"Hello, world (4)"}; - ASSERT_EQ(f.get(), "Hello, world (4)"); + ASSERT_EQ(f.safeGet(), "Hello, world (4)"); f = Array{Field{String{"Hello, world (5)"}}}; - ASSERT_EQ(f.get()[0].get(), "Hello, world (5)"); + ASSERT_EQ(f.safeGet()[0].safeGet(), "Hello, world (5)"); f = Array{String{"Hello, world (6)"}}; - ASSERT_EQ(f.get()[0].get(), "Hello, world (6)"); + ASSERT_EQ(f.safeGet()[0].safeGet(), "Hello, world (6)"); } diff --git a/src/DataTypes/DataTypeAggregateFunction.cpp b/src/DataTypes/DataTypeAggregateFunction.cpp index 09175617bf1..dbb713bc382 100644 --- a/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/src/DataTypes/DataTypeAggregateFunction.cpp @@ -119,7 +119,7 @@ MutableColumnPtr DataTypeAggregateFunction::createColumn() const Field DataTypeAggregateFunction::getDefault() const { Field field = AggregateFunctionStateData(); - field.get().name = getName(); + field.safeGet().name = getName(); AlignedBuffer place_buffer(function->sizeOfData(), function->alignOfData()); AggregateDataPtr place = place_buffer.data(); @@ -128,7 +128,7 @@ Field DataTypeAggregateFunction::getDefault() const try { - WriteBufferFromString buffer_from_field(field.get().data); + WriteBufferFromString buffer_from_field(field.safeGet().data); function->serialize(place, buffer_from_field, version); } catch (...) diff --git a/src/DataTypes/DataTypeDynamic.cpp b/src/DataTypes/DataTypeDynamic.cpp index a1b1f8325f0..e0430272479 100644 --- a/src/DataTypes/DataTypeDynamic.cpp +++ b/src/DataTypes/DataTypeDynamic.cpp @@ -71,10 +71,10 @@ static DataTypePtr create(const ASTPtr & arguments) auto * literal = argument->arguments->children[1]->as(); - if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get() == 0 || literal->value.get() > ColumnVariant::MAX_NESTED_COLUMNS) + if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.safeGet() == 0 || literal->value.safeGet() > ColumnVariant::MAX_NESTED_COLUMNS) throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 1 and 255"); - return std::make_shared(literal->value.get()); + return std::make_shared(literal->value.safeGet()); } void registerDataTypeDynamic(DataTypeFactory & factory) diff --git a/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp index 08e0c0d2045..b9a5a1a5a68 100644 --- a/src/DataTypes/DataTypeEnum.cpp +++ b/src/DataTypes/DataTypeEnum.cpp @@ -122,12 +122,12 @@ Field DataTypeEnum::castToName(const Field & value_or_name) const { if (value_or_name.getType() == Field::Types::String) { - this->getValue(value_or_name.get()); /// Check correctness - return value_or_name.get(); + this->getValue(value_or_name.safeGet()); /// Check correctness + return value_or_name.safeGet(); } else if (value_or_name.getType() == Field::Types::Int64) { - Int64 value = value_or_name.get(); + Int64 value = value_or_name.safeGet(); checkOverflow(value); return this->getNameForValue(static_cast(value)).toString(); } @@ -141,12 +141,12 @@ Field DataTypeEnum::castToValue(const Field & value_or_name) const { if (value_or_name.getType() == Field::Types::String) { - return this->getValue(value_or_name.get()); + return this->getValue(value_or_name.safeGet()); } else if (value_or_name.getType() == Field::Types::Int64 || value_or_name.getType() == Field::Types::UInt64) { - Int64 value = value_or_name.get(); + Int64 value = value_or_name.safeGet(); checkOverflow(value); this->getNameForValue(static_cast(value)); /// Check correctness return value; @@ -220,7 +220,7 @@ static void autoAssignNumberForEnum(const ASTPtr & arguments) "Elements of Enum data type must be of form: " "'name' = number or 'name', where name is string literal and number is an integer"); - literal_child_assign_num = value_literal->value.get(); + literal_child_assign_num = value_literal->value.safeGet(); } assign_number_child.emplace_back(child); } @@ -269,8 +269,8 @@ static DataTypePtr createExact(const ASTPtr & arguments) "Elements of Enum data type must be of form: " "'name' = number or 'name', where name is string literal and number is an integer"); - const String & field_name = name_literal->value.get(); - const auto value = value_literal->value.get(); + const String & field_name = name_literal->value.safeGet(); + const auto value = value_literal->value.safeGet(); if (value > std::numeric_limits::max() || value < std::numeric_limits::min()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Value {} for element '{}' exceeds range of {}", @@ -302,7 +302,7 @@ static DataTypePtr create(const ASTPtr & arguments) "Elements of Enum data type must be of form: " "'name' = number or 'name', where name is string literal and number is an integer"); - Int64 value = value_literal->value.get(); + Int64 value = value_literal->value.safeGet(); if (value > std::numeric_limits::max() || value < std::numeric_limits::min()) return createExact(arguments); diff --git a/src/DataTypes/DataTypeFixedString.cpp b/src/DataTypes/DataTypeFixedString.cpp index 080ff8826a5..63d5245287f 100644 --- a/src/DataTypes/DataTypeFixedString.cpp +++ b/src/DataTypes/DataTypeFixedString.cpp @@ -51,11 +51,11 @@ static DataTypePtr create(const ASTPtr & arguments) "FixedString data type family must have exactly one argument - size in bytes"); const auto * argument = arguments->children[0]->as(); - if (!argument || argument->value.getType() != Field::Types::UInt64 || argument->value.get() == 0) + if (!argument || argument->value.getType() != Field::Types::UInt64 || argument->value.safeGet() == 0) throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "FixedString data type family must have a number (positive integer) as its argument"); - return std::make_shared(argument->value.get()); + return std::make_shared(argument->value.safeGet()); } diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 91b9bfcb2a5..5fd69688194 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -69,7 +69,7 @@ static DataTypePtr create(const ASTPtr & arguments) throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Object data type family must have a const string as its schema name parameter"); - return std::make_shared(literal->value.get(), is_nullable); + return std::make_shared(literal->value.safeGet(), is_nullable); } void registerDataTypeObject(DataTypeFactory & factory) diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index a427fd0717a..1d8f7711de1 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -80,14 +80,14 @@ static DataTypePtr create(const ASTPtr & arguments) const auto * precision_arg = arguments->children[0]->as(); if (!precision_arg || precision_arg->value.getType() != Field::Types::UInt64) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Decimal argument precision is invalid"); - precision = precision_arg->value.get(); + precision = precision_arg->value.safeGet(); if (arguments->children.size() == 2) { const auto * scale_arg = arguments->children[1]->as(); if (!scale_arg || !isInt64OrUInt64FieldType(scale_arg->value.getType())) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Decimal argument scale is invalid"); - scale = scale_arg->value.get(); + scale = scale_arg->value.safeGet(); } } @@ -107,7 +107,7 @@ static DataTypePtr createExact(const ASTPtr & arguments) "Decimal32 | Decimal64 | Decimal128 | Decimal256 data type family must have a one number as its argument"); UInt64 precision = DecimalUtils::max_precision; - UInt64 scale = scale_arg->value.get(); + UInt64 scale = scale_arg->value.safeGet(); return createDecimal(precision, scale); } diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 1d525e5987f..a7f2ff83873 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -104,7 +104,7 @@ Array createEmptyArrayField(size_t num_dimensions) for (size_t i = 1; i < num_dimensions; ++i) { current_array->push_back(Array()); - current_array = ¤t_array->back().get(); + current_array = ¤t_array->back().safeGet(); } return array; diff --git a/src/DataTypes/Serializations/JSONDataParser.cpp b/src/DataTypes/Serializations/JSONDataParser.cpp index 56641424396..0f74815f5b4 100644 --- a/src/DataTypes/Serializations/JSONDataParser.cpp +++ b/src/DataTypes/Serializations/JSONDataParser.cpp @@ -131,7 +131,7 @@ void JSONDataParser::traverseArrayElement(const Element & element, P auto nested_hash = getHashOfNestedPath(paths[i], values[i]); if (nested_hash) { - size_t array_size = values[i].template get().size(); + size_t array_size = values[i].template safeGet().size(); auto & current_nested_sizes = ctx.nested_sizes_by_path[*nested_hash]; if (current_nested_sizes.size() == ctx.current_size) @@ -154,7 +154,7 @@ void JSONDataParser::traverseArrayElement(const Element & element, P auto nested_hash = getHashOfNestedPath(paths[i], values[i]); if (nested_hash) { - size_t array_size = values[i].template get().size(); + size_t array_size = values[i].template safeGet().size(); auto & current_nested_sizes = ctx.nested_sizes_by_path[*nested_hash]; if (current_nested_sizes.empty()) diff --git a/src/DataTypes/Serializations/SerializationAggregateFunction.cpp b/src/DataTypes/Serializations/SerializationAggregateFunction.cpp index 55f7641e058..41b198890e4 100644 --- a/src/DataTypes/Serializations/SerializationAggregateFunction.cpp +++ b/src/DataTypes/Serializations/SerializationAggregateFunction.cpp @@ -16,14 +16,14 @@ namespace DB void SerializationAggregateFunction::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - const AggregateFunctionStateData & state = field.get(); + const AggregateFunctionStateData & state = field.safeGet(); writeBinary(state.data, ostr); } void SerializationAggregateFunction::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const { field = AggregateFunctionStateData(); - AggregateFunctionStateData & s = field.get(); + AggregateFunctionStateData & s = field.safeGet(); readBinary(s.data, istr); s.name = type_name; } diff --git a/src/DataTypes/Serializations/SerializationArray.cpp b/src/DataTypes/Serializations/SerializationArray.cpp index b7d43332085..0a9c4529e23 100644 --- a/src/DataTypes/Serializations/SerializationArray.cpp +++ b/src/DataTypes/Serializations/SerializationArray.cpp @@ -29,7 +29,7 @@ static constexpr size_t MAX_ARRAYS_SIZE = 1ULL << 40; void SerializationArray::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const { - const Array & a = field.get(); + const Array & a = field.safeGet(); writeVarUInt(a.size(), ostr); for (const auto & i : a) { @@ -51,7 +51,7 @@ void SerializationArray::deserializeBinary(Field & field, ReadBuffer & istr, con settings.binary.max_binary_string_size); field = Array(); - Array & arr = field.get(); + Array & arr = field.safeGet(); arr.reserve(size); for (size_t i = 0; i < size; ++i) nested->deserializeBinary(arr.emplace_back(), istr, settings); diff --git a/src/DataTypes/Serializations/SerializationDecimalBase.cpp b/src/DataTypes/Serializations/SerializationDecimalBase.cpp index 49dc042e872..8927f949368 100644 --- a/src/DataTypes/Serializations/SerializationDecimalBase.cpp +++ b/src/DataTypes/Serializations/SerializationDecimalBase.cpp @@ -13,7 +13,7 @@ namespace DB template void SerializationDecimalBase::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - FieldType x = field.get>(); + FieldType x = field.safeGet>(); writeBinaryLittleEndian(x, ostr); } diff --git a/src/DataTypes/Serializations/SerializationFixedString.cpp b/src/DataTypes/Serializations/SerializationFixedString.cpp index f919dc16d33..688c71792fa 100644 --- a/src/DataTypes/Serializations/SerializationFixedString.cpp +++ b/src/DataTypes/Serializations/SerializationFixedString.cpp @@ -28,7 +28,7 @@ static constexpr size_t MAX_STRINGS_SIZE = 1ULL << 30; void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - const String & s = field.get(); + const String & s = field.safeGet(); ostr.write(s.data(), std::min(s.size(), n)); if (s.size() < n) for (size_t i = s.size(); i < n; ++i) @@ -39,7 +39,7 @@ void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer void SerializationFixedString::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const { field = String(); - String & s = field.get(); + String & s = field.safeGet(); s.resize(n); istr.readStrict(s.data(), n); } diff --git a/src/DataTypes/Serializations/SerializationIPv4andIPv6.cpp b/src/DataTypes/Serializations/SerializationIPv4andIPv6.cpp index dfcd24aff58..c1beceb4533 100644 --- a/src/DataTypes/Serializations/SerializationIPv4andIPv6.cpp +++ b/src/DataTypes/Serializations/SerializationIPv4andIPv6.cpp @@ -125,7 +125,7 @@ bool SerializationIP::tryDeserializeTextCSV(DB::IColumn & column, DB::ReadB template void SerializationIP::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - IPv x = field.get(); + IPv x = field.safeGet(); if constexpr (std::is_same_v) writeBinary(x, ostr); else diff --git a/src/DataTypes/Serializations/SerializationMap.cpp b/src/DataTypes/Serializations/SerializationMap.cpp index 0bef3c7d79d..c722b3ac7a1 100644 --- a/src/DataTypes/Serializations/SerializationMap.cpp +++ b/src/DataTypes/Serializations/SerializationMap.cpp @@ -40,7 +40,7 @@ static IColumn & extractNestedColumn(IColumn & column) void SerializationMap::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & map = field.get(); + const auto & map = field.safeGet(); writeVarUInt(map.size(), ostr); for (const auto & elem : map) { @@ -63,7 +63,7 @@ void SerializationMap::deserializeBinary(Field & field, ReadBuffer & istr, const size, settings.binary.max_binary_string_size); field = Map(); - Map & map = field.get(); + Map & map = field.safeGet(); map.reserve(size); for (size_t i = 0; i < size; ++i) { diff --git a/src/DataTypes/Serializations/SerializationNumber.cpp b/src/DataTypes/Serializations/SerializationNumber.cpp index bdb4dfc6735..bfc13af8ca3 100644 --- a/src/DataTypes/Serializations/SerializationNumber.cpp +++ b/src/DataTypes/Serializations/SerializationNumber.cpp @@ -169,7 +169,7 @@ template void SerializationNumber::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { /// ColumnVector::ValueType is a narrower type. For example, UInt8, when the Field type is UInt64 - typename ColumnVector::ValueType x = static_cast::ValueType>(field.get()); + typename ColumnVector::ValueType x = static_cast::ValueType>(field.safeGet()); writeBinaryLittleEndian(x, ostr); } diff --git a/src/DataTypes/Serializations/SerializationString.cpp b/src/DataTypes/Serializations/SerializationString.cpp index 9e523d0d745..ac5d4e3e128 100644 --- a/src/DataTypes/Serializations/SerializationString.cpp +++ b/src/DataTypes/Serializations/SerializationString.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes void SerializationString::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const { - const String & s = field.get(); + const String & s = field.safeGet(); if (settings.binary.max_binary_string_size && s.size() > settings.binary.max_binary_string_size) throw Exception( ErrorCodes::TOO_LARGE_STRING_SIZE, @@ -59,7 +59,7 @@ void SerializationString::deserializeBinary(Field & field, ReadBuffer & istr, co settings.binary.max_binary_string_size); field = String(); - String & s = field.get(); + String & s = field.safeGet(); s.resize(size); istr.readStrict(s.data(), size); } diff --git a/src/DataTypes/Serializations/SerializationTuple.cpp b/src/DataTypes/Serializations/SerializationTuple.cpp index 7a5227ca752..594a23ab507 100644 --- a/src/DataTypes/Serializations/SerializationTuple.cpp +++ b/src/DataTypes/Serializations/SerializationTuple.cpp @@ -34,7 +34,7 @@ static inline const IColumn & extractElementColumn(const IColumn & column, size_ void SerializationTuple::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & tuple = field.get(); + const auto & tuple = field.safeGet(); for (size_t element_index = 0; element_index < elems.size(); ++element_index) { const auto & serialization = elems[element_index]; @@ -47,7 +47,7 @@ void SerializationTuple::deserializeBinary(Field & field, ReadBuffer & istr, con const size_t size = elems.size(); field = Tuple(); - Tuple & tuple = field.get(); + Tuple & tuple = field.safeGet(); tuple.reserve(size); for (size_t i = 0; i < size; ++i) elems[i]->deserializeBinary(tuple.emplace_back(), istr, settings); diff --git a/src/DataTypes/Serializations/SerializationUUID.cpp b/src/DataTypes/Serializations/SerializationUUID.cpp index 58178a896dc..f18466ad8ad 100644 --- a/src/DataTypes/Serializations/SerializationUUID.cpp +++ b/src/DataTypes/Serializations/SerializationUUID.cpp @@ -137,7 +137,7 @@ bool SerializationUUID::tryDeserializeTextCSV(IColumn & column, ReadBuffer & ist void SerializationUUID::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - UUID x = field.get(); + UUID x = field.safeGet(); writeBinaryLittleEndian(x, ostr); } diff --git a/src/DataTypes/registerDataTypeDateTime.cpp b/src/DataTypes/registerDataTypeDateTime.cpp index 84a52d4affb..9a632bd381b 100644 --- a/src/DataTypes/registerDataTypeDateTime.cpp +++ b/src/DataTypes/registerDataTypeDateTime.cpp @@ -55,7 +55,7 @@ getArgument(const ASTPtr & arguments, size_t argument_index, const char * argume } } - return argument->value.get(); + return argument->value.safeGet(); } static DataTypePtr create(const ASTPtr & arguments) diff --git a/src/Databases/DDLLoadingDependencyVisitor.cpp b/src/Databases/DDLLoadingDependencyVisitor.cpp index 67bce915168..b91aa84ecd3 100644 --- a/src/Databases/DDLLoadingDependencyVisitor.cpp +++ b/src/Databases/DDLLoadingDependencyVisitor.cpp @@ -183,7 +183,7 @@ void DDLLoadingDependencyVisitor::extractTableNameFromArgument(const ASTFunction if (name->value.getType() != Field::Types::String) return; - auto maybe_qualified_name = QualifiedTableName::tryParseFromString(name->value.get()); + auto maybe_qualified_name = QualifiedTableName::tryParseFromString(name->value.safeGet()); if (!maybe_qualified_name) return; @@ -194,7 +194,7 @@ void DDLLoadingDependencyVisitor::extractTableNameFromArgument(const ASTFunction if (literal->value.getType() != Field::Types::String) return; - auto maybe_qualified_name = QualifiedTableName::tryParseFromString(literal->value.get()); + auto maybe_qualified_name = QualifiedTableName::tryParseFromString(literal->value.safeGet()); /// Just return if name if invalid if (!maybe_qualified_name) return; diff --git a/src/Databases/DDLRenamingVisitor.cpp b/src/Databases/DDLRenamingVisitor.cpp index 38e100e2470..7556223b30e 100644 --- a/src/Databases/DDLRenamingVisitor.cpp +++ b/src/Databases/DDLRenamingVisitor.cpp @@ -180,7 +180,7 @@ namespace if (database_name_field && table_name_field) { - QualifiedTableName qualified_name{database_name_field->get(), table_name_field->get()}; + QualifiedTableName qualified_name{database_name_field->safeGet(), table_name_field->safeGet()}; if (!qualified_name.database.empty() && !qualified_name.table.empty()) { auto new_qualified_name = data.renaming_map.getNewTableName(qualified_name); @@ -207,7 +207,7 @@ namespace if (literal->value.getType() != Field::Types::String) return; - auto maybe_qualified_name = QualifiedTableName::tryParseFromString(literal->value.get()); + auto maybe_qualified_name = QualifiedTableName::tryParseFromString(literal->value.safeGet()); /// Just return if name if invalid if (!maybe_qualified_name || maybe_qualified_name->database.empty() || maybe_qualified_name->table.empty()) return; @@ -247,7 +247,7 @@ namespace if (!literal || (literal->value.getType() != Field::Types::String)) return; - auto database_name = literal->value.get(); + auto database_name = literal->value.safeGet(); if (database_name.empty()) return; diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index f127ccbc224..602bb29aebc 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -802,8 +802,8 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora if (!arg1 || !arg2 || arg1->value.getType() != Field::Types::String || arg2->value.getType() != Field::Types::String) return; - String maybe_path = arg1->value.get(); - String maybe_replica = arg2->value.get(); + String maybe_path = arg1->value.safeGet(); + String maybe_replica = arg2->value.safeGet(); /// Looks like it's ReplicatedMergeTree with explicit zookeeper_path and replica_name arguments. /// Let's ensure that some macros are used. diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp index 04b4070d5af..1364e9ae2b2 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp @@ -736,11 +736,11 @@ static void writeFieldsToColumn( { for (size_t index = 0; index < rows_data.size(); ++index) { - const Tuple & row_data = rows_data[index].get(); + const Tuple & row_data = rows_data[index].safeGet(); const Field & value = row_data[column_index]; if (write_data_to_null_map(value, index)) - casted_column->insertValue(static_cast(value.template get())); + casted_column->insertValue(static_cast(value.template safeGet())); } }; @@ -776,17 +776,17 @@ static void writeFieldsToColumn( { for (size_t index = 0; index < rows_data.size(); ++index) { - const Tuple & row_data = rows_data[index].get(); + const Tuple & row_data = rows_data[index].safeGet(); const Field & value = row_data[column_index]; if (write_data_to_null_map(value, index)) { if (value.getType() == Field::Types::UInt64) - casted_int32_column->insertValue(static_cast(value.get())); + casted_int32_column->insertValue(static_cast(value.safeGet())); else if (value.getType() == Field::Types::Int64) { /// For MYSQL_TYPE_INT24 - const Int32 & num = static_cast(value.get()); + const Int32 & num = static_cast(value.safeGet()); casted_int32_column->insertValue(num & 0x800000 ? num | 0xFF000000 : num); } else @@ -798,7 +798,7 @@ static void writeFieldsToColumn( { for (size_t index = 0; index < rows_data.size(); ++index) { - const Tuple & row_data = rows_data[index].get(); + const Tuple & row_data = rows_data[index].safeGet(); const Field & value = row_data[column_index]; if (write_data_to_null_map(value, index)) @@ -812,12 +812,12 @@ static void writeFieldsToColumn( { for (size_t index = 0; index < rows_data.size(); ++index) { - const Tuple & row_data = rows_data[index].get(); + const Tuple & row_data = rows_data[index].safeGet(); const Field & value = row_data[column_index]; if (write_data_to_null_map(value, index)) { - const String & data = value.get(); + const String & data = value.safeGet(); casted_fixed_string_column->insertData(data.data(), data.size()); } } @@ -864,7 +864,7 @@ static inline size_t onUpdateData(const Row & rows_data, Block & buffer, size_t { writeable_rows_mask[index + 1] = true; writeable_rows_mask[index] = differenceSortingKeys( - rows_data[index].get(), rows_data[index + 1].get(), sorting_columns_index); + rows_data[index].safeGet(), rows_data[index + 1].safeGet(), sorting_columns_index); } for (size_t column = 0; column < buffer.columns() - 2; ++column) diff --git a/src/Databases/MySQL/tests/gtest_mysql_binlog.cpp b/src/Databases/MySQL/tests/gtest_mysql_binlog.cpp index 11299c5b8b1..6f1ba26ee33 100644 --- a/src/Databases/MySQL/tests/gtest_mysql_binlog.cpp +++ b/src/Databases/MySQL/tests/gtest_mysql_binlog.cpp @@ -281,12 +281,12 @@ static void testFile1(IBinlog & binlog, UInt64 timeout, bool filtered = false) ASSERT_EQ(write_event->table, "a"); ASSERT_EQ(write_event->rows.size(), 1); ASSERT_EQ(write_event->rows[0].getType(), Field::Types::Tuple); - auto row_data = write_event->rows[0].get(); + auto row_data = write_event->rows[0].safeGet(); ASSERT_EQ(row_data.size(), 4u); - ASSERT_EQ(row_data[0].get(), 1u); - ASSERT_EQ(row_data[1].get(), 1u); - ASSERT_EQ(row_data[2].get(), 1u); - ASSERT_EQ(row_data[3].get(), 1u); + ASSERT_EQ(row_data[0].safeGet(), 1u); + ASSERT_EQ(row_data[1].safeGet(), 1u); + ASSERT_EQ(row_data[2].safeGet(), 1u); + ASSERT_EQ(row_data[3].safeGet(), 1u); ASSERT_TRUE(binlog.tryReadEvent(event, timeout)); ++count; @@ -342,18 +342,18 @@ static void testFile1(IBinlog & binlog, UInt64 timeout, bool filtered = false) ASSERT_EQ(update_event->table, "a"); ASSERT_EQ(update_event->rows.size(), 2); ASSERT_EQ(update_event->rows[0].getType(), Field::Types::Tuple); - row_data = update_event->rows[0].get(); + row_data = update_event->rows[0].safeGet(); ASSERT_EQ(row_data.size(), 4u); - ASSERT_EQ(row_data[0].get(), 1u); - ASSERT_EQ(row_data[1].get(), 1u); - ASSERT_EQ(row_data[2].get(), 1u); - ASSERT_EQ(row_data[3].get(), 1u); - row_data = update_event->rows[1].get(); + ASSERT_EQ(row_data[0].safeGet(), 1u); + ASSERT_EQ(row_data[1].safeGet(), 1u); + ASSERT_EQ(row_data[2].safeGet(), 1u); + ASSERT_EQ(row_data[3].safeGet(), 1u); + row_data = update_event->rows[1].safeGet(); ASSERT_EQ(row_data.size(), 4u); - ASSERT_EQ(row_data[0].get(), 1u); - ASSERT_EQ(row_data[1].get(), 2u); - ASSERT_EQ(row_data[2].get(), 1u); - ASSERT_EQ(row_data[3].get(), 1u); + ASSERT_EQ(row_data[0].safeGet(), 1u); + ASSERT_EQ(row_data[1].safeGet(), 2u); + ASSERT_EQ(row_data[2].safeGet(), 1u); + ASSERT_EQ(row_data[3].safeGet(), 1u); ASSERT_TRUE(binlog.tryReadEvent(event, timeout)); ++count; diff --git a/src/Dictionaries/CacheDictionaryStorage.h b/src/Dictionaries/CacheDictionaryStorage.h index 47f99bd1093..781822533e9 100644 --- a/src/Dictionaries/CacheDictionaryStorage.h +++ b/src/Dictionaries/CacheDictionaryStorage.h @@ -395,13 +395,13 @@ private: } else if constexpr (std::is_same_v) { - const String & string_value = column_value.get(); + const String & string_value = column_value.safeGet(); StringRef inserted_value = copyStringInArena(arena, string_value); container.back() = inserted_value; } else { - container.back() = static_cast(column_value.get()); + container.back() = static_cast(column_value.safeGet()); } }); } @@ -441,7 +441,7 @@ private: } else if constexpr (std::is_same_v) { - const String & string_value = column_value.get(); + const String & string_value = column_value.safeGet(); StringRef inserted_value = copyStringInArena(arena, string_value); if (!cell_was_default) @@ -454,7 +454,7 @@ private: } else { - container[index_to_use] = static_cast(column_value.get()); + container[index_to_use] = static_cast(column_value.safeGet()); } }); } @@ -651,12 +651,12 @@ private: } else if constexpr (std::is_same_v) { - auto & value = default_value.get(); + auto & value = default_value.safeGet(); value_setter(value); } else { - value_setter(default_value.get()); + value_setter(default_value.safeGet()); } } else diff --git a/src/Dictionaries/DictionaryHelpers.h b/src/Dictionaries/DictionaryHelpers.h index 64fc05e99ab..43fd39640c3 100644 --- a/src/Dictionaries/DictionaryHelpers.h +++ b/src/Dictionaries/DictionaryHelpers.h @@ -345,7 +345,7 @@ public: if (attribute_default_value.isNull()) default_value_is_null = true; else - default_value = static_cast(attribute_default_value.get()); + default_value = static_cast(attribute_default_value.safeGet()); } else { @@ -377,7 +377,7 @@ public: if constexpr (std::is_same_v) { Field field = (*default_values_column)[row]; - return field.get(); + return field.safeGet(); } else if constexpr (std::is_same_v) return default_values_column->getDataAt(row); diff --git a/src/Dictionaries/FlatDictionary.cpp b/src/Dictionaries/FlatDictionary.cpp index 999160226d9..b0233766741 100644 --- a/src/Dictionaries/FlatDictionary.cpp +++ b/src/Dictionaries/FlatDictionary.cpp @@ -245,7 +245,7 @@ ColumnPtr FlatDictionary::getHierarchy(ColumnPtr key_column, const DataTypePtr & std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); const ContainerType & parent_keys = std::get>(hierarchical_attribute.container); @@ -300,7 +300,7 @@ ColumnUInt8::Ptr FlatDictionary::isInHierarchy( std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); const ContainerType & parent_keys = std::get>(hierarchical_attribute.container); @@ -701,7 +701,7 @@ void FlatDictionary::setAttributeValue(Attribute & attribute, const UInt64 key, return; } - auto & attribute_value = value.get(); + auto & attribute_value = value.safeGet(); auto & container = std::get>(attribute.container); loaded_keys[key] = true; diff --git a/src/Dictionaries/HashedArrayDictionary.cpp b/src/Dictionaries/HashedArrayDictionary.cpp index d7d50dfb0a6..8768be8e5ec 100644 --- a/src/Dictionaries/HashedArrayDictionary.cpp +++ b/src/Dictionaries/HashedArrayDictionary.cpp @@ -240,7 +240,7 @@ ColumnPtr HashedArrayDictionary::getHierarchy(Colu std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); auto is_key_valid_func = [&, this](auto & key) @@ -313,7 +313,7 @@ ColumnUInt8::Ptr HashedArrayDictionary::isInHierar std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); auto is_key_valid_func = [&](auto & key) @@ -581,13 +581,13 @@ void HashedArrayDictionary::blockToAttributes(cons if constexpr (std::is_same_v) { - String & value_to_insert = column_value_to_insert.get(); + String & value_to_insert = column_value_to_insert.safeGet(); StringRef string_in_arena_reference = copyStringInArena(*string_arenas[shard], value_to_insert); attribute_container.back() = string_in_arena_reference; } else { - auto value_to_insert = static_cast(column_value_to_insert.get()); + auto value_to_insert = static_cast(column_value_to_insert.safeGet()); attribute_container.back() = value_to_insert; } }; diff --git a/src/Dictionaries/HashedDictionary.h b/src/Dictionaries/HashedDictionary.h index 3a2b61e5149..7e935fe4855 100644 --- a/src/Dictionaries/HashedDictionary.h +++ b/src/Dictionaries/HashedDictionary.h @@ -636,7 +636,7 @@ ColumnPtr HashedDictionary::getHierarchy(C std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); const CollectionsHolder & child_key_to_parent_key_maps = std::get>(hierarchical_attribute.containers); @@ -710,7 +710,7 @@ ColumnUInt8::Ptr HashedDictionary::isInHie std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); const CollectionsHolder & child_key_to_parent_key_maps = std::get>(hierarchical_attribute.containers); @@ -1004,13 +1004,13 @@ void HashedDictionary::blockToAttributes(c if constexpr (std::is_same_v) { - String & value_to_insert = column_value_to_insert.get(); + String & value_to_insert = column_value_to_insert.safeGet(); StringRef arena_value = copyStringInArena(*string_arenas[shard], value_to_insert); container.insert({key, arena_value}); } else { - auto value_to_insert = static_cast(column_value_to_insert.get()); + auto value_to_insert = static_cast(column_value_to_insert.safeGet()); container.insert({key, value_to_insert}); } diff --git a/src/Dictionaries/HierarchyDictionariesUtils.cpp b/src/Dictionaries/HierarchyDictionariesUtils.cpp index e1119982a34..de532ade26d 100644 --- a/src/Dictionaries/HierarchyDictionariesUtils.cpp +++ b/src/Dictionaries/HierarchyDictionariesUtils.cpp @@ -50,7 +50,7 @@ namespace std::optional null_value; if (!hierarchical_attribute.null_value.isNull()) - null_value = hierarchical_attribute.null_value.get(); + null_value = hierarchical_attribute.null_value.safeGet(); ColumnPtr key_to_request_column = ColumnVector::create(); auto * key_to_request_column_typed = static_cast *>(key_to_request_column->assumeMutable().get()); @@ -190,7 +190,7 @@ ColumnPtr getKeysHierarchyDefaultImplementation( std::optional null_value; if (!hierarchical_attribute.null_value.isNull()) - null_value = hierarchical_attribute.null_value.get(); + null_value = hierarchical_attribute.null_value.safeGet(); auto get_parent_key_func = [&](auto & key) { @@ -252,7 +252,7 @@ ColumnUInt8::Ptr getKeysIsInHierarchyDefaultImplementation( std::optional null_value; if (!hierarchical_attribute.null_value.isNull()) - null_value = hierarchical_attribute.null_value.get(); + null_value = hierarchical_attribute.null_value.safeGet(); auto get_parent_key_func = [&](auto & key) { diff --git a/src/Dictionaries/IPAddressDictionary.cpp b/src/Dictionaries/IPAddressDictionary.cpp index 41fafcc162b..4f9e991752f 100644 --- a/src/Dictionaries/IPAddressDictionary.cpp +++ b/src/Dictionaries/IPAddressDictionary.cpp @@ -613,14 +613,14 @@ void IPAddressDictionary::calculateBytesAllocated() template void IPAddressDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value) { - attribute.null_values = null_value.isNull() ? T{} : T(null_value.get()); + attribute.null_values = null_value.isNull() ? T{} : T(null_value.safeGet()); attribute.maps.emplace>(); } template <> void IPAddressDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value) { - attribute.null_values = null_value.isNull() ? String() : null_value.get(); + attribute.null_values = null_value.isNull() ? String() : null_value.safeGet(); attribute.maps.emplace>(); attribute.string_arena = std::make_unique(); } @@ -976,13 +976,13 @@ void IPAddressDictionary::setAttributeValue(Attribute & attribute, const Field & if constexpr (std::is_same_v) { - const auto & string = value.get(); + const auto & string = value.safeGet(); const auto * string_in_arena = attribute.string_arena->insert(string.data(), string.size()); setAttributeValueImpl(attribute, StringRef{string_in_arena, string.size()}); } else { - setAttributeValueImpl(attribute, static_cast(value.get())); + setAttributeValueImpl(attribute, static_cast(value.safeGet())); } }; diff --git a/src/Dictionaries/MongoDBDictionarySource.cpp b/src/Dictionaries/MongoDBDictionarySource.cpp index 46910fa9f6a..c30a6f90e44 100644 --- a/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/src/Dictionaries/MongoDBDictionarySource.cpp @@ -1,7 +1,6 @@ #include "MongoDBDictionarySource.h" #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" -#include "registerDictionaries.h" #include #include @@ -233,7 +232,7 @@ QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, con } case AttributeUnderlyingType::String: { - String loaded_str((*key_columns[attribute_index])[row_idx].get()); + String loaded_str((*key_columns[attribute_index])[row_idx].safeGet()); /// Convert string to ObjectID if (key_attribute.is_object_id) { diff --git a/src/Dictionaries/PolygonDictionary.cpp b/src/Dictionaries/PolygonDictionary.cpp index dfc920623e3..ff29ca1f6b8 100644 --- a/src/Dictionaries/PolygonDictionary.cpp +++ b/src/Dictionaries/PolygonDictionary.cpp @@ -141,7 +141,7 @@ ColumnPtr IPolygonDictionary::getColumn( { getItemsShortCircuitImpl( requested_key_points, - [&](size_t row) { return (*attribute_values_column)[row].get(); }, + [&](size_t row) { return (*attribute_values_column)[row].safeGet(); }, [&](Array & value) { result_column_typed.insert(value); }, default_mask.value()); } @@ -149,7 +149,7 @@ ColumnPtr IPolygonDictionary::getColumn( { getItemsImpl( requested_key_points, - [&](size_t row) { return (*attribute_values_column)[row].get(); }, + [&](size_t row) { return (*attribute_values_column)[row].safeGet(); }, [&](Array & value) { result_column_typed.insert(value); }, default_value_provider.value()); } @@ -432,16 +432,16 @@ void IPolygonDictionary::getItemsImpl( } else if constexpr (std::is_same_v) { - set_value(default_value.get()); + set_value(default_value.safeGet()); } else if constexpr (std::is_same_v) { - auto default_value_string = default_value.get(); + auto default_value_string = default_value.safeGet(); set_value(default_value_string); } else { - set_value(default_value.get>()); + set_value(default_value.safeGet>()); } } } diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index fc6c98990d0..c264b480bcb 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -906,13 +906,13 @@ void RangeHashedDictionary::setAttributeValue(Attribute & a if constexpr (std::is_same_v) { - const auto & string = value.get(); + const auto & string = value.safeGet(); StringRef string_ref = copyStringInArena(string_arena, string); value_to_insert = string_ref; } else { - value_to_insert = static_cast(value.get()); + value_to_insert = static_cast(value.safeGet()); } container.back() = value_to_insert; diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index 1736cdff306..9db639a0ca4 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -1,7 +1,6 @@ #include "RedisDictionarySource.h" #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" -#include "registerDictionaries.h" #include #include @@ -160,7 +159,7 @@ namespace DB if (isInteger(type)) key << DB::toString(key_columns[i]->get64(row)); else if (isString(type)) - key << (*key_columns[i])[row].get(); + key << (*key_columns[i])[row].safeGet(); else throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected type of key in Redis dictionary"); } diff --git a/src/Disks/getOrCreateDiskFromAST.cpp b/src/Disks/getOrCreateDiskFromAST.cpp index fd43f31a009..67bae372f87 100644 --- a/src/Disks/getOrCreateDiskFromAST.cpp +++ b/src/Disks/getOrCreateDiskFromAST.cpp @@ -113,7 +113,7 @@ std::string getOrCreateDiskFromDiskAST(const ASTPtr & disk_function, ContextPtr FlattenDiskConfigurationVisitor::Data data{context, attach}; FlattenDiskConfigurationVisitor{data}.visit(ast); - auto disk_name = assert_cast(*ast).value.get(); + auto disk_name = assert_cast(*ast).value.safeGet(); LOG_TRACE(getLogger("getOrCreateDiskFromDiskAST"), "Result disk name: {}", disk_name); return disk_name; } diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index a7bd398cdaa..5dbafe48c3a 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1196,7 +1196,7 @@ struct ToYearImpl { if (point.getType() != Field::Types::UInt64) return std::nullopt; - auto year = point.get(); + auto year = point.safeGet(); if (year < DATE_LUT_MIN_YEAR || year >= DATE_LUT_MAX_YEAR) return std::nullopt; const DateLUTImpl & date_lut = DateLUT::instance("UTC"); @@ -2001,7 +2001,7 @@ struct ToYYYYMMImpl { if (point.getType() != Field::Types::UInt64) return std::nullopt; - auto year_month = point.get(); + auto year_month = point.safeGet(); auto year = year_month / 100; auto month = year_month % 100; diff --git a/src/Functions/FunctionsConsistentHashing.h b/src/Functions/FunctionsConsistentHashing.h index 306b6395dc5..210bb69e16d 100644 --- a/src/Functions/FunctionsConsistentHashing.h +++ b/src/Functions/FunctionsConsistentHashing.h @@ -101,9 +101,9 @@ private: BucketsType num_buckets; if (buckets_field.getType() == Field::Types::Int64) - num_buckets = checkBucketsRange(buckets_field.get()); + num_buckets = checkBucketsRange(buckets_field.safeGet()); else if (buckets_field.getType() == Field::Types::UInt64) - num_buckets = checkBucketsRange(buckets_field.get()); + num_buckets = checkBucketsRange(buckets_field.safeGet()); else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of the second argument of function {}", diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 675283d011e..0760b929652 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -117,7 +117,7 @@ UInt32 extractToDecimalScale(const ColumnWithTypeAndName & named_column) Field field; named_column.column->get(0, field); - return static_cast(field.get()); + return static_cast(field.safeGet()); } @@ -2604,8 +2604,8 @@ struct ToNumberMonotonicity if (left.isNull() || right.isNull()) return {}; - Float64 left_float = left.get(); - Float64 right_float = right.get(); + Float64 left_float = left.safeGet(); + Float64 right_float = right.safeGet(); if (left_float >= static_cast(std::numeric_limits::min()) && left_float <= static_cast(std::numeric_limits::max()) @@ -2633,11 +2633,11 @@ struct ToNumberMonotonicity const bool left_in_first_half = left.isNull() ? from_is_unsigned - : (left.get() >= 0); + : (left.safeGet() >= 0); const bool right_in_first_half = right.isNull() ? !from_is_unsigned - : (right.get() >= 0); + : (right.safeGet() >= 0); /// Size of type is the same. if (size_of_from == size_of_to) @@ -2675,7 +2675,7 @@ struct ToNumberMonotonicity return {}; /// Function cannot be monotonic when left and right are not on the same ranges. - if (divideByRangeOfType(left.get()) != divideByRangeOfType(right.get())) + if (divideByRangeOfType(left.safeGet()) != divideByRangeOfType(right.safeGet())) return {}; if (to_is_unsigned) @@ -2683,7 +2683,7 @@ struct ToNumberMonotonicity else { // If To is signed, it's possible that the signedness is different after conversion. So we check it explicitly. - const bool is_monotonic = (T(left.get()) >= 0) == (T(right.get()) >= 0); + const bool is_monotonic = (T(left.safeGet()) >= 0) == (T(right.safeGet()) >= 0); return { .is_monotonic = is_monotonic }; } @@ -2707,13 +2707,13 @@ struct ToDateMonotonicity } else if ( ((left.getType() == Field::Types::UInt64 || left.isNull()) && (right.getType() == Field::Types::UInt64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) + && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF))) || ((left.getType() == Field::Types::Int64 || left.isNull()) && (right.getType() == Field::Types::Int64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) + && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF))) || (( (left.getType() == Field::Types::Float64 || left.isNull()) && (right.getType() == Field::Types::Float64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF)))) + && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF)))) || !isNativeNumber(type)) { return {}; @@ -2768,16 +2768,16 @@ struct ToStringMonotonicity if (left.getType() == Field::Types::UInt64 && right.getType() == Field::Types::UInt64) { - return (left.get() == 0 && right.get() == 0) - || (floor(log10(left.get())) == floor(log10(right.get()))) + return (left.safeGet() == 0 && right.safeGet() == 0) + || (floor(log10(left.safeGet())) == floor(log10(right.safeGet()))) ? positive : not_monotonic; } if (left.getType() == Field::Types::Int64 && right.getType() == Field::Types::Int64) { - return (left.get() == 0 && right.get() == 0) - || (left.get() > 0 && right.get() > 0 && floor(log10(left.get())) == floor(log10(right.get()))) + return (left.safeGet() == 0 && right.safeGet() == 0) + || (left.safeGet() > 0 && right.safeGet() > 0 && floor(log10(left.safeGet())) == floor(log10(right.safeGet()))) ? positive : not_monotonic; } @@ -4673,7 +4673,7 @@ private: return [function_name] ( ColumnsWithTypeAndName & arguments, const DataTypePtr & res_type, const ColumnNullable * nullable_col, size_t /*input_rows_count*/) { - using ColumnEnumType = EnumType::ColumnType; + using ColumnEnumType = typename EnumType::ColumnType; const auto & first_col = arguments.front().column.get(); const auto & first_type = arguments.front().type.get(); diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index c35df8ba72d..1eaf0d1609a 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -243,7 +243,7 @@ private: } case MoveType::Index: { - Int64 index = (*arguments[j + 1].column)[row].get(); + Int64 index = (*arguments[j + 1].column)[row].safeGet(); if (!moveToElementByIndex(res_element, static_cast(index), key)) return false; break; diff --git a/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp index 65d7473b945..ff0cff09c9e 100644 --- a/src/Functions/FunctionsLogical.cpp +++ b/src/Functions/FunctionsLogical.cpp @@ -701,11 +701,11 @@ ColumnPtr FunctionAnyArityLogical::getConstantResultForNonConstArgum bool constant_value_bool = false; if (field_type == Field::Types::Float64) - constant_value_bool = static_cast(constant_field_value.get()); + constant_value_bool = static_cast(constant_field_value.safeGet()); else if (field_type == Field::Types::Int64) - constant_value_bool = static_cast(constant_field_value.get()); + constant_value_bool = static_cast(constant_field_value.safeGet()); else if (field_type == Field::Types::UInt64) - constant_value_bool = static_cast(constant_field_value.get()); + constant_value_bool = static_cast(constant_field_value.safeGet()); has_true_constant = has_true_constant || constant_value_bool; has_false_constant = has_false_constant || !constant_value_bool; diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 6b65a5feaec..ed7fe1a5de1 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -500,7 +500,7 @@ inline Scale getScaleArg(const ColumnConst* scale_col) { const auto & scale_field = scale_col->getField(); - Int64 scale64 = scale_field.get(); + Int64 scale64 = scale_field.safeGet(); validateScale(scale64); return scale64; @@ -632,7 +632,7 @@ public: Scale raw_scale = scale64; DecimalRoundingImpl::applyOne(value_col_typed->getElement(i), value_col_typed->getScale(), - reinterpret_cast::NativeT&>(col_res->getElement(i)), raw_scale); + reinterpret_cast::NativeT&>(col_res->getElement(i)), raw_scale); } } } @@ -854,7 +854,7 @@ private: using ValueType = typename Container::value_type; std::vector boundary_values(boundaries.size()); for (size_t i = 0; i < boundaries.size(); ++i) - boundary_values[i] = static_cast(boundaries[i].get()); + boundary_values[i] = static_cast(boundaries[i].safeGet()); ::sort(boundary_values.begin(), boundary_values.end()); boundary_values.erase(std::unique(boundary_values.begin(), boundary_values.end()), boundary_values.end()); diff --git a/src/Functions/IFunctionCustomWeek.h b/src/Functions/IFunctionCustomWeek.h index 51542c9cab1..ba0baa35819 100644 --- a/src/Functions/IFunctionCustomWeek.h +++ b/src/Functions/IFunctionCustomWeek.h @@ -50,15 +50,15 @@ public: if (checkAndGetDataType(&type)) { - return Transform::FactorTransform::execute(UInt16(left.get()), date_lut) - == Transform::FactorTransform::execute(UInt16(right.get()), date_lut) + return Transform::FactorTransform::execute(UInt16(left.safeGet()), date_lut) + == Transform::FactorTransform::execute(UInt16(right.safeGet()), date_lut) ? is_monotonic : is_not_monotonic; } else { - return Transform::FactorTransform::execute(UInt32(left.get()), date_lut) - == Transform::FactorTransform::execute(UInt32(right.get()), date_lut) + return Transform::FactorTransform::execute(UInt32(left.safeGet()), date_lut) + == Transform::FactorTransform::execute(UInt32(right.safeGet()), date_lut) ? is_monotonic : is_not_monotonic; } diff --git a/src/Functions/IFunctionDateOrDateTime.h b/src/Functions/IFunctionDateOrDateTime.h index 762b79bfafc..899aa2c305d 100644 --- a/src/Functions/IFunctionDateOrDateTime.h +++ b/src/Functions/IFunctionDateOrDateTime.h @@ -72,30 +72,30 @@ public: if (checkAndGetDataType(type_ptr)) { - return Transform::FactorTransform::execute(UInt16(left.get()), *date_lut) - == Transform::FactorTransform::execute(UInt16(right.get()), *date_lut) + return Transform::FactorTransform::execute(UInt16(left.safeGet()), *date_lut) + == Transform::FactorTransform::execute(UInt16(right.safeGet()), *date_lut) ? is_monotonic : is_not_monotonic; } else if (checkAndGetDataType(type_ptr)) { - return Transform::FactorTransform::execute(Int32(left.get()), *date_lut) - == Transform::FactorTransform::execute(Int32(right.get()), *date_lut) + return Transform::FactorTransform::execute(Int32(left.safeGet()), *date_lut) + == Transform::FactorTransform::execute(Int32(right.safeGet()), *date_lut) ? is_monotonic : is_not_monotonic; } else if (checkAndGetDataType(type_ptr)) { - return Transform::FactorTransform::execute(UInt32(left.get()), *date_lut) - == Transform::FactorTransform::execute(UInt32(right.get()), *date_lut) + return Transform::FactorTransform::execute(UInt32(left.safeGet()), *date_lut) + == Transform::FactorTransform::execute(UInt32(right.safeGet()), *date_lut) ? is_monotonic : is_not_monotonic; } else { assert(checkAndGetDataType(type_ptr)); - const auto & left_date_time = left.get(); + const auto & left_date_time = left.safeGet(); TransformDateTime64 transformer_left(left_date_time.getScale()); - const auto & right_date_time = right.get(); + const auto & right_date_time = right.safeGet(); TransformDateTime64 transformer_right(right_date_time.getScale()); return transformer_left.execute(left_date_time.getValue(), *date_lut) diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index fb74018b330..84ac0ff08f3 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -46,7 +46,7 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte { return false; } - range_indices.first = static_cast(number_ptr->as()->value.get()); + range_indices.first = static_cast(number_ptr->as()->value.safeGet()); if (pos->type == TokenType::Comma || pos->type == TokenType::ClosingSquareBracket) { @@ -63,7 +63,7 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte { return false; } - range_indices.second = static_cast(number_ptr->as()->value.get()); + range_indices.second = static_cast(number_ptr->as()->value.safeGet()); } else { diff --git a/src/Functions/MultiMatchAllIndicesImpl.h b/src/Functions/MultiMatchAllIndicesImpl.h index 3e9c8fba215..c1a2fb6be2d 100644 --- a/src/Functions/MultiMatchAllIndicesImpl.h +++ b/src/Functions/MultiMatchAllIndicesImpl.h @@ -75,7 +75,7 @@ struct MultiMatchAllIndicesImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); checkHyperscanRegexp(needles, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length); diff --git a/src/Functions/MultiMatchAnyImpl.h b/src/Functions/MultiMatchAnyImpl.h index 20b2150048b..ce6a054c064 100644 --- a/src/Functions/MultiMatchAnyImpl.h +++ b/src/Functions/MultiMatchAnyImpl.h @@ -89,7 +89,7 @@ struct MultiMatchAnyImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); checkHyperscanRegexp(needles, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length); diff --git a/src/Functions/MultiSearchAllPositionsImpl.h b/src/Functions/MultiSearchAllPositionsImpl.h index cfe60e51bcd..6c2cd215638 100644 --- a/src/Functions/MultiSearchAllPositionsImpl.h +++ b/src/Functions/MultiSearchAllPositionsImpl.h @@ -33,7 +33,7 @@ struct MultiSearchAllPositionsImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 { diff --git a/src/Functions/MultiSearchFirstIndexImpl.h b/src/Functions/MultiSearchFirstIndexImpl.h index 36a5fd514d9..3c6ec8ead44 100644 --- a/src/Functions/MultiSearchFirstIndexImpl.h +++ b/src/Functions/MultiSearchFirstIndexImpl.h @@ -44,7 +44,7 @@ struct MultiSearchFirstIndexImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); auto searcher = Impl::createMultiSearcherInBigHaystack(needles); diff --git a/src/Functions/MultiSearchFirstPositionImpl.h b/src/Functions/MultiSearchFirstPositionImpl.h index ccdd82a0ee5..b5c68ad664d 100644 --- a/src/Functions/MultiSearchFirstPositionImpl.h +++ b/src/Functions/MultiSearchFirstPositionImpl.h @@ -44,7 +44,7 @@ struct MultiSearchFirstPositionImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 { diff --git a/src/Functions/MultiSearchImpl.h b/src/Functions/MultiSearchImpl.h index 467cc96a95f..3eb8e6fb627 100644 --- a/src/Functions/MultiSearchImpl.h +++ b/src/Functions/MultiSearchImpl.h @@ -44,7 +44,7 @@ struct MultiSearchImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); auto searcher = Impl::createMultiSearcherInBigHaystack(needles); diff --git a/src/Functions/URL/cutURLParameter.cpp b/src/Functions/URL/cutURLParameter.cpp index 3ab9cad1ea7..4439e79e962 100644 --- a/src/Functions/URL/cutURLParameter.cpp +++ b/src/Functions/URL/cutURLParameter.cpp @@ -156,7 +156,7 @@ public: for (size_t j = 0; j < num_needles; ++j) { auto field = col_needle_const_array->getData()[j]; - cutURL(res_data, field.get(), res_offset, cur_res_offset); + cutURL(res_data, field.safeGet(), res_offset, cur_res_offset); } } else diff --git a/src/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp index 227b29d5d9f..81f3f97979b 100644 --- a/src/Functions/array/arrayElement.cpp +++ b/src/Functions/array/arrayElement.cpp @@ -904,10 +904,10 @@ ColumnPtr FunctionArrayElement::executeNumberConst( return nullptr; if (index.getType() == Field::Types::UInt64 - || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) { ArrayElementNumImpl::template vectorConst( - col_nested->getData(), col_array->getOffsets(), index.get() - 1, col_res_vec->getData(), builder); + col_nested->getData(), col_array->getOffsets(), index.safeGet() - 1, col_res_vec->getData(), builder); } else if (index.getType() == Field::Types::Int64) { @@ -972,14 +972,14 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument auto col_res = ColumnString::create(); if (index.getType() == Field::Types::UInt64 - || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) { if (builder) ArrayElementStringImpl::vectorConst( col_nested->getChars(), col_array->getOffsets(), col_nested->getOffsets(), - index.get() - 1, + index.safeGet() - 1, col_res->getChars(), col_res->getOffsets(), builder); @@ -988,7 +988,7 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument col_nested->getChars(), col_array->getOffsets(), col_nested->getOffsets(), - index.get() - 1, + index.safeGet() - 1, col_res->getChars(), col_res->getOffsets(), builder); @@ -1000,7 +1000,7 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument col_nested->getChars(), col_array->getOffsets(), col_nested->getOffsets(), - -(UInt64(index.get()) + 1), + -(UInt64(index.safeGet()) + 1), col_res->getChars(), col_res->getOffsets(), builder); @@ -1009,7 +1009,7 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument col_nested->getChars(), col_array->getOffsets(), col_nested->getOffsets(), - -(UInt64(index.get()) + 1), + -(UInt64(index.safeGet()) + 1), col_res->getChars(), col_res->getOffsets(), builder); @@ -1046,7 +1046,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( auto res_offsets = ColumnArray::ColumnOffsets::create(); auto res_string_null_map = col_nullable ? ColumnUInt8::create() : nullptr; if (index.getType() == Field::Types::UInt64 - || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) { if (col_nullable) ArrayElementArrayStringImpl::vectorConst( @@ -1055,7 +1055,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( col_nested_array->getOffsets(), col_nested_elem->getOffsets(), &string_null_map->getData(), - index.get() - 1, + index.safeGet() - 1, res_string->getChars(), res_offsets->getData(), res_string->getOffsets(), @@ -1068,7 +1068,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( col_nested_array->getOffsets(), col_nested_elem->getOffsets(), nullptr, - index.get() - 1, + index.safeGet() - 1, res_string->getChars(), res_offsets->getData(), res_string->getOffsets(), @@ -1084,7 +1084,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( col_nested_array->getOffsets(), col_nested_elem->getOffsets(), &string_null_map->getData(), - -(UInt64(index.get()) + 1), + -(UInt64(index.safeGet()) + 1), res_string->getChars(), res_offsets->getData(), res_string->getOffsets(), @@ -1097,7 +1097,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( col_nested_array->getOffsets(), col_nested_elem->getOffsets(), nullptr, - -(UInt64(index.get()) + 1), + -(UInt64(index.safeGet()) + 1), res_string->getChars(), res_offsets->getData(), res_string->getOffsets(), @@ -1153,7 +1153,7 @@ ColumnPtr FunctionArrayElement::executeArrayNumberConst( auto & res_offsets = res_array->getOffsets(); NullMap * res_null_map = res_nullable ? &res_nullable->getNullMapData() : nullptr; - if (index.getType() == Field::Types::UInt64 || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + if (index.getType() == Field::Types::UInt64 || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) { if (col_nullable) ArrayElementArrayNumImpl::template vectorConst( @@ -1161,7 +1161,7 @@ ColumnPtr FunctionArrayElement::executeArrayNumberConst( col_array->getOffsets(), col_nested_array->getOffsets(), null_map, - index.get() - 1, + index.safeGet() - 1, res_data->getData(), res_offsets, res_null_map, @@ -1172,7 +1172,7 @@ ColumnPtr FunctionArrayElement::executeArrayNumberConst( col_array->getOffsets(), col_nested_array->getOffsets(), null_map, - index.get() - 1, + index.safeGet() - 1, res_data->getData(), res_offsets, res_null_map, @@ -1392,12 +1392,12 @@ ColumnPtr FunctionArrayElement::executeGenericConst( auto col_res = col_nested.cloneEmpty(); if (index.getType() == Field::Types::UInt64 - || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) ArrayElementGenericImpl::vectorConst( - col_nested, col_array->getOffsets(), index.get() - 1, *col_res, builder); + col_nested, col_array->getOffsets(), index.safeGet() - 1, *col_res, builder); else if (index.getType() == Field::Types::Int64) ArrayElementGenericImpl::vectorConst( - col_nested, col_array->getOffsets(), -(static_cast(index.get() + 1)), *col_res, builder); + col_nested, col_array->getOffsets(), -(static_cast(index.safeGet() + 1)), *col_res, builder); else throw Exception(ErrorCodes::LOGICAL_ERROR, "Illegal type of array index"); @@ -1789,7 +1789,7 @@ bool FunctionArrayElement::matchKeyToIndexStringConst( using DataColumn = std::decay_t; if (index.getType() != Field::Types::String) return false; - MatcherStringConst matcher{data_column, index.get()}; + MatcherStringConst matcher{data_column, index.safeGet()}; executeMatchKeyToIndex(offsets, matched_idxs, matcher); return true; }); diff --git a/src/Functions/array/mapOp.cpp b/src/Functions/array/mapOp.cpp index 86797cb5db0..614b01c2ac8 100644 --- a/src/Functions/array/mapOp.cpp +++ b/src/Functions/array/mapOp.cpp @@ -237,7 +237,7 @@ private: } arg.val_column->get(offset + j, temp_val); - ValType value = temp_val.get(); + ValType value = temp_val.safeGet(); if constexpr (op_type == OpTypes::ADD) { diff --git a/src/Functions/getClientHTTPHeader.cpp b/src/Functions/getClientHTTPHeader.cpp index 140f39d03b8..50a6275fc82 100644 --- a/src/Functions/getClientHTTPHeader.cpp +++ b/src/Functions/getClientHTTPHeader.cpp @@ -58,7 +58,7 @@ public: { Field header; source->get(row, header); - if (auto it = client_info.http_headers.find(header.get()); it != client_info.http_headers.end()) + if (auto it = client_info.http_headers.find(header.safeGet()); it != client_info.http_headers.end()) result->insert(it->second); else result->insertDefault(); diff --git a/src/Functions/multiIf.cpp b/src/Functions/multiIf.cpp index c4b675fcf6c..14b8b70b22c 100644 --- a/src/Functions/multiIf.cpp +++ b/src/Functions/multiIf.cpp @@ -200,7 +200,7 @@ public: if (value.isNull()) continue; - if (value.get() == 0) + if (value.safeGet() == 0) continue; instruction.condition_always_true = true; diff --git a/src/Functions/nested.cpp b/src/Functions/nested.cpp index bdaf57d65c9..85c342b5e7c 100644 --- a/src/Functions/nested.cpp +++ b/src/Functions/nested.cpp @@ -145,7 +145,7 @@ private: if (nested_names_field.getType() != Field::Types::Array) return {}; - const auto & nested_names_array = nested_names_field.get(); + const auto & nested_names_array = nested_names_field.safeGet(); Names nested_names; nested_names.reserve(nested_names_array.size()); @@ -155,7 +155,7 @@ private: if (nested_name_field.getType() != Field::Types::String) return {}; - nested_names.push_back(nested_name_field.get()); + nested_names.push_back(nested_name_field.safeGet()); } return nested_names; diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index a794cdbcf05..59040bf1fea 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -294,21 +294,21 @@ void RequestSettings::finishInit(const DB::Settings & settings, bool validate_se /// to avoid losing token bucket state on every config reload, /// which could lead to exceeding limit for short time. /// But it is good enough unless very high `burst` values are used. - if (UInt64 max_get_rps = isChanged("max_get_rps") ? get("max_get_rps").get() : settings.s3_max_get_rps) + if (UInt64 max_get_rps = isChanged("max_get_rps") ? get("max_get_rps").safeGet() : settings.s3_max_get_rps) { size_t default_max_get_burst = settings.s3_max_get_burst ? settings.s3_max_get_burst : (Throttler::default_burst_seconds * max_get_rps); - size_t max_get_burst = isChanged("max_get_burts") ? get("max_get_burst").get() : default_max_get_burst; + size_t max_get_burst = isChanged("max_get_burts") ? get("max_get_burst").safeGet() : default_max_get_burst; get_request_throttler = std::make_shared(max_get_rps, max_get_burst); } - if (UInt64 max_put_rps = isChanged("max_put_rps") ? get("max_put_rps").get() : settings.s3_max_put_rps) + if (UInt64 max_put_rps = isChanged("max_put_rps") ? get("max_put_rps").safeGet() : settings.s3_max_put_rps) { size_t default_max_put_burst = settings.s3_max_put_burst ? settings.s3_max_put_burst : (Throttler::default_burst_seconds * max_put_rps); - size_t max_put_burst = isChanged("max_put_burts") ? get("max_put_burst").get() : default_max_put_burst; + size_t max_put_burst = isChanged("max_put_burts") ? get("max_put_burst").safeGet() : default_max_put_burst; put_request_throttler = std::make_shared(max_put_rps, max_put_burst); } } diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index e1b7e92ee5d..368eb8174f0 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -131,7 +131,7 @@ static Block createBlockFromCollection(const Collection & collection, const Data throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, "Invalid type in set. Expected tuple, got {}", String(value.getTypeName())); - const auto & tuple = value.template get(); + const auto & tuple = value.template safeGet(); size_t tuple_size = tuple.size(); if (tuple_size != columns_num) throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, "Incorrect size of tuple in set: {} instead of {}", @@ -233,7 +233,7 @@ static Block createBlockFromAST(const ASTPtr & node, const DataTypes & types, Co "Invalid type of set. Expected tuple, got {}", function_result.getTypeName()); - tuple = &function_result.get(); + tuple = &function_result.safeGet(); } /// Tuple can be represented as a literal in AST. @@ -246,7 +246,7 @@ static Block createBlockFromAST(const ASTPtr & node, const DataTypes & types, Co "Invalid type in set. Expected tuple, got {}", literal->value.getTypeName()); - tuple = &literal->value.get(); + tuple = &literal->value.safeGet(); } assert(tuple || func); @@ -332,14 +332,14 @@ Block createBlockForSet( if (type_index == TypeIndex::Tuple) { const DataTypes & value_types = assert_cast(right_arg_type.get())->getElements(); - block = createBlockFromCollection(right_arg_value.get(), value_types, set_element_types, tranform_null_in); + block = createBlockFromCollection(right_arg_value.safeGet(), value_types, set_element_types, tranform_null_in); } else if (type_index == TypeIndex::Array) { const auto* right_arg_array_type = assert_cast(right_arg_type.get()); - size_t right_arg_array_size = right_arg_value.get().size(); + size_t right_arg_array_size = right_arg_value.safeGet().size(); DataTypes value_types(right_arg_array_size, right_arg_array_type->getNestedType()); - block = createBlockFromCollection(right_arg_value.get(), value_types, set_element_types, tranform_null_in); + block = createBlockFromCollection(right_arg_value.safeGet(), value_types, set_element_types, tranform_null_in); } else throw_unsupported_type(right_arg_type); diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index 356bffa75e9..302a5e55c53 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -201,7 +201,7 @@ private: if (literal_value.getType() != Field::Types::String) continue; - auto dictionary_name = literal_value.get(); + auto dictionary_name = literal_value.safeGet(); auto qualified_dictionary_name = context->getExternalDictionariesLoader().qualifyDictionaryNameWithDatabase(dictionary_name, context); literal_value = qualified_dictionary_name.getFullName(); } diff --git a/src/Interpreters/ComparisonTupleEliminationVisitor.cpp b/src/Interpreters/ComparisonTupleEliminationVisitor.cpp index 4f06f345b96..b9f7f37b338 100644 --- a/src/Interpreters/ComparisonTupleEliminationVisitor.cpp +++ b/src/Interpreters/ComparisonTupleEliminationVisitor.cpp @@ -22,7 +22,7 @@ ASTs splitTuple(const ASTPtr & node) if (const auto * literal = node->as(); literal && literal->value.getType() == Field::Types::Tuple) { ASTs result; - const auto & tuple = literal->value.get(); + const auto & tuple = literal->value.safeGet(); for (const auto & child : tuple) result.emplace_back(std::make_shared(child)); return result; diff --git a/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp b/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp index 084bb0a1bb9..220355e0741 100644 --- a/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp +++ b/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp @@ -45,7 +45,7 @@ void ConvertFunctionOrLikeData::visit(ASTFunction & function, ASTPtr &) if (!identifier || !literal || literal->value.getType() != Field::Types::String) continue; - String regexp = likePatternToRegexp(literal->value.get()); + String regexp = likePatternToRegexp(literal->value.safeGet()); /// Case insensitive. Works with UTF-8 as well. if (is_ilike) regexp = "(?i)" + regexp; @@ -61,7 +61,7 @@ void ConvertFunctionOrLikeData::visit(ASTFunction & function, ASTPtr &) match->arguments->children.push_back(it->second); unique_elems.push_back(std::move(match)); } - it->second->value.get().push_back(regexp); + it->second->value.safeGet().push_back(regexp); } } diff --git a/src/Interpreters/ConvertStringsToEnumVisitor.cpp b/src/Interpreters/ConvertStringsToEnumVisitor.cpp index 7cc95dc521b..d35baa92900 100644 --- a/src/Interpreters/ConvertStringsToEnumVisitor.cpp +++ b/src/Interpreters/ConvertStringsToEnumVisitor.cpp @@ -33,8 +33,8 @@ String makeStringsEnum(const std::set & values) void changeIfArguments(ASTPtr & first, ASTPtr & second) { - String first_value = first->as()->value.get(); - String second_value = second->as()->value.get(); + String first_value = first->as()->value.safeGet(); + String second_value = second->as()->value.safeGet(); std::set values; values.insert(first_value); @@ -59,9 +59,9 @@ void changeTransformArguments(ASTPtr & array_to, ASTPtr & other) { std::set values; - for (const auto & item : array_to->as()->value.get()) - values.insert(item.get()); - values.insert(other->as()->value.get()); + for (const auto & item : array_to->as()->value.safeGet()) + values.insert(item.safeGet()); + values.insert(other->as()->value.safeGet()); String enum_string = makeStringsEnum(values); @@ -168,7 +168,7 @@ void ConvertStringsToEnumMatcher::visit(ASTFunction & function_node, Data & data if (literal_to->value.getTypeName() != "Array" || literal_other->value.getTypeName() != "String") return; - Array array_to = literal_to->value.get(); + Array array_to = literal_to->value.safeGet(); if (array_to.empty()) return; diff --git a/src/Interpreters/DDLTask.cpp b/src/Interpreters/DDLTask.cpp index fa197d59c13..6e08dd5e2cc 100644 --- a/src/Interpreters/DDLTask.cpp +++ b/src/Interpreters/DDLTask.cpp @@ -538,7 +538,7 @@ void DatabaseReplicatedTask::createSyncedNodeIfNeed(const ZooKeeperPtr & zookeep /// Bool type is really weird, sometimes it's Bool and sometimes it's UInt64... assert(value.getType() == Field::Types::Bool || value.getType() == Field::Types::UInt64); - if (!value.get()) + if (!value.safeGet()) return; zookeeper->createIfNotExists(getSyncedNodePath(), ""); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 971f90bd3cd..15c6aba8b62 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -689,7 +689,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( throw Exception(ErrorCodes::LOGICAL_ERROR, "Neither default value expression nor type is provided for a column"); if (col_decl.comment) - column.comment = col_decl.comment->as().value.get(); + column.comment = col_decl.comment->as().value.safeGet(); if (col_decl.codec) { @@ -1875,7 +1875,7 @@ void InterpreterCreateQuery::prepareOnClusterQuery(ASTCreateQuery & create, Cont if (has_explicit_zk_path_arg) { - String zk_path = create.storage->engine->arguments->children[0]->as()->value.get(); + String zk_path = create.storage->engine->arguments->children[0]->as()->value.safeGet(); Macros::MacroExpansionInfo info; info.table_id.uuid = create.uuid; info.ignore_unknown = true; diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index bedd9cb4a80..c820f999e0c 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -332,7 +332,7 @@ ExplainSettings checkAndGetSettings(const ASTPtr & ast_settings) if (settings.hasBooleanSetting(change.name)) { - auto value = change.value.get(); + auto value = change.value.safeGet(); if (value > 1) throw Exception(ErrorCodes::INVALID_SETTING_VALUE, "Invalid value {} for setting \"{}\". " "Expected boolean type", value, change.name); @@ -341,7 +341,7 @@ ExplainSettings checkAndGetSettings(const ASTPtr & ast_settings) } else { - auto value = change.value.get(); + auto value = change.value.safeGet(); settings.setIntegerSetting(change.name, value); } } diff --git a/src/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp index 7eb487ba7b3..2c579f3b468 100644 --- a/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -334,7 +334,7 @@ BlockIO InterpreterKillQueryQuery::execute() for (size_t i = 0; i < moves_block.rows(); ++i) { table_id = StorageID{database_col.getDataAt(i).toString(), table_col.getDataAt(i).toString()}; - auto task_uuid = task_uuid_col[i].get(); + auto task_uuid = task_uuid_col[i].safeGet(); CancellationCode code = CancellationCode::Unknown; diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index cb42a8abf9c..0c79f4310ce 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1232,7 +1232,7 @@ SortDescription InterpreterSelectQuery::getSortDescription(const ASTSelectQuery std::shared_ptr collator; if (order_by_elem.getCollation()) - collator = std::make_shared(order_by_elem.getCollation()->as().value.get()); + collator = std::make_shared(order_by_elem.getCollation()->as().value.safeGet()); if (order_by_elem.with_fill) { diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 57ad5caa4c7..dfffeb437d4 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -220,7 +220,7 @@ bool isStorageTouchedByMutations( Block tmp_block; while (executor.pull(tmp_block)); - auto count = (*block.getByName("count()").column)[0].get(); + auto count = (*block.getByName("count()").column)[0].safeGet(); return count != 0; } diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp index dd205ae6508..913f9900b77 100644 --- a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp +++ b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp @@ -42,13 +42,13 @@ ASTPtr generateOptimizedDateFilterAST(const String & comparator, const NameAndTy if (isDateOrDate32(column.type.get())) { - start_date_or_date_time = date_lut.dateToString(range.first.get()); - end_date_or_date_time = date_lut.dateToString(range.second.get()); + start_date_or_date_time = date_lut.dateToString(range.first.safeGet()); + end_date_or_date_time = date_lut.dateToString(range.second.safeGet()); } else if (isDateTime(column.type.get()) || isDateTime64(column.type.get())) { - start_date_or_date_time = date_lut.timeToString(range.first.get()); - end_date_or_date_time = date_lut.timeToString(range.second.get()); + start_date_or_date_time = date_lut.timeToString(range.first.safeGet()); + end_date_or_date_time = date_lut.timeToString(range.second.safeGet()); } else [[unlikely]] return {}; diff --git a/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp index 48c9988b6fc..e9a663d53b0 100644 --- a/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp +++ b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp @@ -24,7 +24,7 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v if (literal->value.getType() == Field::Types::Int64 || literal->value.getType() == Field::Types::UInt64) { - value = literal->value.get(); + value = literal->value.safeGet(); return true; } if (literal->value.getType() == Field::Types::Null) @@ -51,7 +51,7 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v { if (type_literal->value.getType() == Field::Types::String) { - const auto & type_str = type_literal->value.get(); + const auto & type_str = type_literal->value.safeGet(); if (type_str == "UInt8" || type_str == "Nullable(UInt8)") return tryExtractConstValueFromCondition(expr_list->children.at(0), value); } diff --git a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp index 54515ea072a..6a0522b0676 100644 --- a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp +++ b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp @@ -72,7 +72,7 @@ bool shardContains( if (sharding_value.isNull()) return false; - UInt64 value = sharding_value.get(); + UInt64 value = sharding_value.safeGet(); const auto shard_num = data.slots[value % data.slots.size()] + 1; return data.shard_info.shard_num == shard_num; } @@ -120,11 +120,20 @@ void OptimizeShardingKeyRewriteInMatcher::visit(ASTFunction & function, Data & d else if (auto * tuple_literal = right->as(); tuple_literal && tuple_literal->value.getType() == Field::Types::Tuple) { - auto & tuple = tuple_literal->value.get(); - std::erase_if(tuple, [&](auto & child) + auto & tuple = tuple_literal->value.safeGet(); + if (tuple.size() > 1) { - return tuple.size() > 1 && !shardContains(child, name, data); - }); + Tuple new_tuple; + + for (const auto & child : tuple) + if (shardContains(child, name, data)) + new_tuple.emplace_back(std::move(child)); + + if (new_tuple.size() == 0) + new_tuple.emplace_back(std::move(tuple.back())); + + tuple_literal->value = std::move(new_tuple); + } } } @@ -159,7 +168,7 @@ public: { if (isTuple(constant->getResultType())) { - const auto & tuple = constant->getValue().get(); + const auto & tuple = constant->getValue().safeGet(); Tuple new_tuple; new_tuple.reserve(tuple.size()); diff --git a/src/Interpreters/RewriteCountVariantsVisitor.cpp b/src/Interpreters/RewriteCountVariantsVisitor.cpp index 4a541c3765a..272e1ac735f 100644 --- a/src/Interpreters/RewriteCountVariantsVisitor.cpp +++ b/src/Interpreters/RewriteCountVariantsVisitor.cpp @@ -53,7 +53,7 @@ void RewriteCountVariantsVisitor::visit(ASTFunction & func) { if (first_arg_literal->value.getType() == Field::Types::UInt64) { - auto constant = first_arg_literal->value.get(); + auto constant = first_arg_literal->value.safeGet(); if (constant == 1 && !context->getSettingsRef().aggregate_functions_null_for_empty) transform = true; } diff --git a/src/Interpreters/TreeOptimizer.cpp b/src/Interpreters/TreeOptimizer.cpp index b872eb94fde..6483dd3be48 100644 --- a/src/Interpreters/TreeOptimizer.cpp +++ b/src/Interpreters/TreeOptimizer.cpp @@ -184,7 +184,7 @@ void optimizeGroupBy(ASTSelectQuery * select_query, ContextPtr context) const auto & value = group_exprs[i]->as()->value; if (value.getType() == Field::Types::UInt64) { - auto pos = value.get(); + auto pos = value.safeGet(); if (pos > 0 && pos <= select_query->select()->children.size()) keep_position = true; } diff --git a/src/Interpreters/WindowDescription.cpp b/src/Interpreters/WindowDescription.cpp index 31a881001e3..b1e12ff8048 100644 --- a/src/Interpreters/WindowDescription.cpp +++ b/src/Interpreters/WindowDescription.cpp @@ -94,8 +94,8 @@ void WindowFrame::checkValid() const if (begin_type == BoundaryType::Offset && !((begin_offset.getType() == Field::Types::UInt64 || begin_offset.getType() == Field::Types::Int64) - && begin_offset.get() >= 0 - && begin_offset.get() < INT_MAX)) + && begin_offset.safeGet() >= 0 + && begin_offset.safeGet() < INT_MAX)) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Frame start offset for '{}' frame must be a nonnegative 32-bit integer, '{}' of type '{}' given", @@ -107,8 +107,8 @@ void WindowFrame::checkValid() const if (end_type == BoundaryType::Offset && !((end_offset.getType() == Field::Types::UInt64 || end_offset.getType() == Field::Types::Int64) - && end_offset.get() >= 0 - && end_offset.get() < INT_MAX)) + && end_offset.safeGet() >= 0 + && end_offset.safeGet() < INT_MAX)) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Frame end offset for '{}' frame must be a nonnegative 32-bit integer, '{}' of type '{}' given", diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 1a7c166c6a5..f3957c3b69b 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -57,7 +57,7 @@ template Field convertNumericTypeImpl(const Field & from) { To result; - if (!accurate::convertNumeric(from.get(), result)) + if (!accurate::convertNumeric(from.safeGet(), result)) return {}; return result; } @@ -88,7 +88,7 @@ Field convertNumericType(const Field & from, const IDataType & type) template Field convertIntToDecimalType(const Field & from, const DataTypeDecimal & type) { - From value = from.get(); + From value = from.safeGet(); if (!type.canStoreWhole(value)) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Number is too big to place in {}", type.getName()); @@ -100,7 +100,7 @@ Field convertIntToDecimalType(const Field & from, const DataTypeDecimal & typ template Field convertStringToDecimalType(const Field & from, const DataTypeDecimal & type) { - const String & str_value = from.get(); + const String & str_value = from.safeGet(); T value = type.parseFromString(str_value); return DecimalField(value, type.getScale()); } @@ -108,7 +108,7 @@ Field convertStringToDecimalType(const Field & from, const DataTypeDecimal & template Field convertDecimalToDecimalType(const Field & from, const DataTypeDecimal & type) { - auto field = from.get>(); + auto field = from.safeGet>(); T value = convertDecimals, DataTypeDecimal>(field.getValue(), field.getScale(), type.getScale()); return DecimalField(value, type.getScale()); } @@ -116,7 +116,7 @@ Field convertDecimalToDecimalType(const Field & from, const DataTypeDecimal & template Field convertFloatToDecimalType(const Field & from, const DataTypeDecimal & type) { - From value = from.get(); + From value = from.safeGet(); if (!type.canStoreWhole(value)) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Number is too big to place in {}", type.getName()); @@ -182,24 +182,24 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID /// Conversion between Date and DateTime and vice versa. if (which_type.isDate() && which_from_type.isDateTime()) { - return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.get()).toUnderType()); + return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.safeGet()).toUnderType()); } else if (which_type.isDate32() && which_from_type.isDateTime()) { - return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.get()).toUnderType()); + return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.safeGet()).toUnderType()); } else if (which_type.isDateTime() && which_from_type.isDate()) { - return static_cast(type).getTimeZone().fromDayNum(DayNum(src.get())); + return static_cast(type).getTimeZone().fromDayNum(DayNum(src.safeGet())); } else if (which_type.isDateTime() && which_from_type.isDate32()) { - return static_cast(type).getTimeZone().fromDayNum(DayNum(src.get())); + return static_cast(type).getTimeZone().fromDayNum(DayNum(src.safeGet())); } else if (which_type.isDateTime64() && which_from_type.isDate()) { const auto & date_time64_type = static_cast(type); - const auto value = date_time64_type.getTimeZone().fromDayNum(DayNum(src.get())); + const auto value = date_time64_type.getTimeZone().fromDayNum(DayNum(src.safeGet())); return DecimalField( DecimalUtils::decimalFromComponentsWithMultiplier(value, 0, date_time64_type.getScaleMultiplier()), date_time64_type.getScale()); @@ -207,7 +207,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID else if (which_type.isDateTime64() && which_from_type.isDate32()) { const auto & date_time64_type = static_cast(type); - const auto value = date_time64_type.getTimeZone().fromDayNum(ExtendedDayNum(static_cast(src.get()))); + const auto value = date_time64_type.getTimeZone().fromDayNum(ExtendedDayNum(static_cast(src.safeGet()))); return DecimalField( DecimalUtils::decimalFromComponentsWithMultiplier(value, 0, date_time64_type.getScaleMultiplier()), date_time64_type.getScale()); @@ -253,7 +253,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (which_type.isDateTime64() && src.getType() == Field::Types::Decimal64) { - const auto & from_type = src.get(); + const auto & from_type = src.safeGet(); const auto & to_type = static_cast(type); const auto scale_from = from_type.getScale(); @@ -318,7 +318,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (which_from_type.isFixedString() && assert_cast(from_type_hint)->getN() == IPV6_BINARY_LENGTH) { const auto col = type.createColumn(); - ReadBufferFromString in_buffer(src.get()); + ReadBufferFromString in_buffer(src.safeGet()); type.getDefaultSerialization()->deserializeBinary(*col, in_buffer, {}); return (*col)[0]; } @@ -330,7 +330,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (which_type.isFixedString()) { size_t n = assert_cast(type).getN(); - const auto & src_str = src.get(); + const auto & src_str = src.safeGet(); if (src_str.size() < n) { String src_str_extended = src_str; @@ -347,7 +347,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID { if (src.getType() == Field::Types::Array) { - const Array & src_arr = src.get(); + const Array & src_arr = src.safeGet(); size_t src_arr_size = src_arr.size(); const auto & element_type = *(type_array->getNestedType()); @@ -370,7 +370,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID { if (src.getType() == Field::Types::Tuple) { - const auto & src_tuple = src.get(); + const auto & src_tuple = src.safeGet(); size_t src_tuple_size = src_tuple.size(); size_t dst_tuple_size = type_tuple->getElements().size(); @@ -415,7 +415,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID const auto & key_type = *type_map->getKeyType(); const auto & value_type = *type_map->getValueType(); - const auto & map = src.get(); + const auto & map = src.safeGet(); size_t map_size = map.size(); Map res(map_size); @@ -424,7 +424,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID for (size_t i = 0; i < map_size; ++i) { - const auto & map_entry = map[i].get(); + const auto & map_entry = map[i].safeGet(); const auto & key = map_entry[0]; const auto & value = map_entry[1]; @@ -453,7 +453,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID "Cannot convert {} to {}", src.getTypeName(), agg_func_type->getName()); - const auto & name = src.get().name; + const auto & name = src.safeGet().name; if (agg_func_type->getName() != name) throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert {} to {}", name, agg_func_type->getName()); @@ -468,7 +468,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (src.getType() == Field::Types::Tuple && from_type_tuple && from_type_tuple->haveExplicitNames()) { const auto & names = from_type_tuple->getElementNames(); - const auto & tuple = src.get(); + const auto & tuple = src.safeGet(); if (names.size() != tuple.size()) throw Exception(ErrorCodes::TYPE_MISMATCH, @@ -485,10 +485,10 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (src.getType() == Field::Types::Map) { Object object; - const auto & map = src.get(); + const auto & map = src.safeGet(); for (const auto & element : map) { - const auto & map_entry = element.get(); + const auto & map_entry = element.safeGet(); const auto & key = map_entry[0]; const auto & value = map_entry[1]; @@ -496,7 +496,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert from Map with key of type {} to Object", key.getTypeName()); - object[key.get()] = value; + object[key.safeGet()] = value; } return object; @@ -537,7 +537,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } const auto col = type_to_parse->createColumn(); - ReadBufferFromString in_buffer(src.get()); + ReadBufferFromString in_buffer(src.safeGet()); try { type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, FormatSettings{}); @@ -545,9 +545,9 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID catch (Exception & e) { if (e.code() == ErrorCodes::UNEXPECTED_DATA_AFTER_PARSED_VALUE) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert string '{}' to type {}", src.get(), type.getName()); + throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert string '{}' to type {}", src.safeGet(), type.getName()); - e.addMessage(fmt::format("while converting '{}' to {}", src.get(), type.getName())); + e.addMessage(fmt::format("while converting '{}' to {}", src.safeGet(), type.getName())); throw; } @@ -610,7 +610,7 @@ Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_t template static bool decimalEqualsFloat(Field field, Float64 float_value) { - auto decimal_field = field.get>(); + auto decimal_field = field.safeGet>(); auto decimal_to_float = DecimalUtils::convertTo(decimal_field.getValue(), decimal_field.getScale()); return decimal_to_float == float_value; } @@ -629,13 +629,13 @@ std::optional convertFieldToTypeStrict(const Field & from_value, const ID { /// Convert back to Float64 and compare if (result_value.getType() == Field::Types::Decimal32) - return decimalEqualsFloat(result_value, from_value.get()) ? result_value : std::optional{}; + return decimalEqualsFloat(result_value, from_value.safeGet()) ? result_value : std::optional{}; if (result_value.getType() == Field::Types::Decimal64) - return decimalEqualsFloat(result_value, from_value.get()) ? result_value : std::optional{}; + return decimalEqualsFloat(result_value, from_value.safeGet()) ? result_value : std::optional{}; if (result_value.getType() == Field::Types::Decimal128) - return decimalEqualsFloat(result_value, from_value.get()) ? result_value : std::optional{}; + return decimalEqualsFloat(result_value, from_value.safeGet()) ? result_value : std::optional{}; if (result_value.getType() == Field::Types::Decimal256) - return decimalEqualsFloat(result_value, from_value.get()) ? result_value : std::optional{}; + return decimalEqualsFloat(result_value, from_value.safeGet()) ? result_value : std::optional{}; throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown decimal type {}", result_value.getTypeName()); } diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 4bfc80af1fe..d4bb0cc2f8a 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -297,7 +297,7 @@ namespace { if (tuple_literal->value.getType() == Field::Types::Tuple) { - const auto & tuple = tuple_literal->value.get(); + const auto & tuple = tuple_literal->value.safeGet(); for (const auto & child : tuple) { const auto dnf = analyzeEquals(identifier, child, expr); @@ -792,7 +792,7 @@ std::optional evaluateExpressionOverConstantCondition(const ASTPtr & nod else if (const auto * literal = node->as()) { // Check if it's always true or false. - if (literal->value.getType() == Field::Types::UInt64 && literal->value.get() == 0) + if (literal->value.getType() == Field::Types::UInt64 && literal->value.safeGet() == 0) return {result}; else return {}; diff --git a/src/Interpreters/replaceForPositionalArguments.cpp b/src/Interpreters/replaceForPositionalArguments.cpp index 3d60723a167..ee967f45c74 100644 --- a/src/Interpreters/replaceForPositionalArguments.cpp +++ b/src/Interpreters/replaceForPositionalArguments.cpp @@ -35,11 +35,11 @@ bool replaceForPositionalArguments(ASTPtr & argument, const ASTSelectQuery * sel if (which == Field::Types::UInt64) { - pos = ast_literal->value.get(); + pos = ast_literal->value.safeGet(); } else if (which == Field::Types::Int64) { - auto value = ast_literal->value.get(); + auto value = ast_literal->value.safeGet(); if (value > 0) pos = value; else diff --git a/src/Interpreters/tests/gtest_comparison_graph.cpp b/src/Interpreters/tests/gtest_comparison_graph.cpp index ac24a8de368..5f93bb983c1 100644 --- a/src/Interpreters/tests/gtest_comparison_graph.cpp +++ b/src/Interpreters/tests/gtest_comparison_graph.cpp @@ -29,7 +29,7 @@ TEST(ComparisonGraph, Bounds) const auto & [lower, strict] = *res; - ASSERT_EQ(lower.get(), 3); + ASSERT_EQ(lower.safeGet(), 3); ASSERT_TRUE(strict); } @@ -39,7 +39,7 @@ TEST(ComparisonGraph, Bounds) const auto & [upper, strict] = *res; - ASSERT_EQ(upper.get(), 7); + ASSERT_EQ(upper.safeGet(), 7); ASSERT_TRUE(strict); } diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index cd9e910d45a..d42728addb7 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -522,7 +522,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format if (tuple_arguments_valid && lit_right) { if (isInt64OrUInt64FieldType(lit_right->value.getType()) - && lit_right->value.get() >= 0) + && lit_right->value.safeGet() >= 0) { if (frame.need_parens) settings.ostr << '('; diff --git a/src/Parsers/ASTLiteral.cpp b/src/Parsers/ASTLiteral.cpp index 8dedc5dc95d..515f4f0cb9f 100644 --- a/src/Parsers/ASTLiteral.cpp +++ b/src/Parsers/ASTLiteral.cpp @@ -73,8 +73,8 @@ void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr) const /// Special case for very large arrays and tuples. Instead of listing all elements, will use hash of them. /// (Otherwise column name will be too long, that will lead to significant slowdown of expression analysis.) auto type = value.getType(); - if ((type == Field::Types::Array && value.get().size() > min_elements_for_hashing) - || (type == Field::Types::Tuple && value.get().size() > min_elements_for_hashing)) + if ((type == Field::Types::Array && value.safeGet().size() > min_elements_for_hashing) + || (type == Field::Types::Tuple && value.safeGet().size() > min_elements_for_hashing)) { SipHash hash; applyVisitor(FieldVisitorHash(hash), value); @@ -92,7 +92,7 @@ void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr) const /// for tons of literals as it creates temporary String. if (value.getType() == Field::Types::String) { - writeQuoted(value.get(), ostr); + writeQuoted(value.safeGet(), ostr); } else { @@ -110,7 +110,7 @@ void ASTLiteral::appendColumnNameImplLegacy(WriteBuffer & ostr) const /// Special case for very large arrays. Instead of listing all elements, will use hash of them. /// (Otherwise column name will be too long, that will lead to significant slowdown of expression analysis.) auto type = value.getType(); - if ((type == Field::Types::Array && value.get().size() > min_elements_for_hashing)) + if ((type == Field::Types::Array && value.safeGet().size() > min_elements_for_hashing)) { SipHash hash; applyVisitor(FieldVisitorHash(hash), value); diff --git a/src/Parsers/Access/ParserCreateQuotaQuery.cpp b/src/Parsers/Access/ParserCreateQuotaQuery.cpp index ddfdbe38903..ddf4e9ecda5 100644 --- a/src/Parsers/Access/ParserCreateQuotaQuery.cpp +++ b/src/Parsers/Access/ParserCreateQuotaQuery.cpp @@ -114,7 +114,7 @@ namespace T fieldToNumber(const Field & f) { if (f.getType() == Field::Types::String) - return parseWithSizeSuffix(boost::algorithm::trim_copy(f.get())); + return parseWithSizeSuffix(boost::algorithm::trim_copy(f.safeGet())); else return applyVisitor(FieldVisitorConvertToNumber(), f); } diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 9927acdcf17..de395d120d7 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -314,7 +314,7 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex ASTPtr ast_uuid; if (!uuid_p.parse(pos, ast_uuid, expected)) return false; - uuid = parseFromString(ast_uuid->as()->value.get()); + uuid = parseFromString(ast_uuid->as()->value.safeGet()); } if (parts.size() == 1) node = std::make_shared(parts[0], std::move(params)); @@ -1626,7 +1626,7 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e if (!parser_string_literal.parse(pos, ast_prefix_name, expected)) return false; - column_name_prefix = ast_prefix_name->as().value.get(); + column_name_prefix = ast_prefix_name->as().value.safeGet(); } if (with_open_round_bracket) @@ -1689,7 +1689,7 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e auto res = std::make_shared(); if (regexp_node) - res->setPattern(regexp_node->as().value.get()); + res->setPattern(regexp_node->as().value.safeGet()); else res->children = std::move(identifiers); res->is_strict = is_strict; @@ -1861,7 +1861,7 @@ static bool parseColumnsMatcherBody(IParser::Pos & pos, ASTPtr & node, Expected else { auto regexp_matcher = std::make_shared(); - regexp_matcher->setPattern(regexp_node->as().value.get()); + regexp_matcher->setPattern(regexp_node->as().value.safeGet()); if (!transformers->children.empty()) { @@ -2310,7 +2310,7 @@ bool ParserTTLElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!parser_string_literal.parse(pos, ast_space_name, expected)) return false; - destination_name = ast_space_name->as().value.get(); + destination_name = ast_space_name->as().value.safeGet(); } else if (mode == TTLMode::GROUP_BY) { diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index dbefb0cb966..73fd563faf6 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -517,7 +517,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_space_name, expected)) return false; - command->move_destination_name = ast_space_name->as().value.get(); + command->move_destination_name = ast_space_name->as().value.safeGet(); } else if (s_move_partition.ignore(pos, expected)) { @@ -545,7 +545,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_space_name, expected)) return false; - command->move_destination_name = ast_space_name->as().value.get(); + command->move_destination_name = ast_space_name->as().value.safeGet(); } } else if (s_add_constraint.ignore(pos, expected)) @@ -638,7 +638,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_from, expected)) return false; - command->from = ast_from->as().value.get(); + command->from = ast_from->as().value.safeGet(); command->type = ASTAlterCommand::FETCH_PARTITION; } else if (s_fetch_part.ignore(pos, expected)) @@ -652,7 +652,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ASTPtr ast_from; if (!parser_string_literal.parse(pos, ast_from, expected)) return false; - command->from = ast_from->as().value.get(); + command->from = ast_from->as().value.safeGet(); command->part = true; command->type = ASTAlterCommand::FETCH_PARTITION; } @@ -680,7 +680,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_with_name, expected)) return false; - command->with_name = ast_with_name->as().value.get(); + command->with_name = ast_with_name->as().value.safeGet(); } } else if (s_unfreeze.ignore(pos, expected)) @@ -707,7 +707,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_with_name, expected)) return false; - command->with_name = ast_with_name->as().value.get(); + command->with_name = ast_with_name->as().value.safeGet(); } else { diff --git a/src/Parsers/ParserCheckQuery.cpp b/src/Parsers/ParserCheckQuery.cpp index 42716ba7f2c..33b6a5a1ac2 100644 --- a/src/Parsers/ParserCheckQuery.cpp +++ b/src/Parsers/ParserCheckQuery.cpp @@ -55,7 +55,7 @@ bool ParserCheckQuery::parseCheckTable(Pos & pos, ASTPtr & node, Expected & expe const auto * ast_literal = ast_part_name->as(); if (!ast_literal || ast_literal->value.getType() != Field::Types::String) return false; - query->part_name = ast_literal->value.get(); + query->part_name = ast_literal->value.safeGet(); } if (query->database) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index a592975613b..3621a695272 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -922,7 +922,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->is_create_empty = is_create_empty; if (from_path) - query->attach_from_path = from_path->as().value.get(); + query->attach_from_path = from_path->as().value.safeGet(); return true; } @@ -1431,7 +1431,7 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e ASTPtr ast_uuid; if (!uuid_p.parse(pos, ast_uuid, expected)) return false; - uuid = parseFromString(ast_uuid->as()->value.get()); + uuid = parseFromString(ast_uuid->as()->value.safeGet()); } if (s_on.ignore(pos, expected)) diff --git a/src/Parsers/ParserDictionary.cpp b/src/Parsers/ParserDictionary.cpp index 83a006231d9..ce38d1b54d1 100644 --- a/src/Parsers/ParserDictionary.cpp +++ b/src/Parsers/ParserDictionary.cpp @@ -33,7 +33,7 @@ bool ParserDictionaryLifetime::parseImpl(Pos & pos, ASTPtr & node, Expected & ex if (literal.value.getType() != Field::Types::UInt64) return false; - res->max_sec = literal.value.get(); + res->max_sec = literal.value.safeGet(); node = res; return true; } @@ -58,10 +58,10 @@ bool ParserDictionaryLifetime::parseImpl(Pos & pos, ASTPtr & node, Expected & ex return false; if (pair.first == "min") - res->min_sec = literal->value.get(); + res->min_sec = literal->value.safeGet(); else if (pair.first == "max") { - res->max_sec = literal->value.get(); + res->max_sec = literal->value.safeGet(); initialized_max = true; } else diff --git a/src/Parsers/ParserPartition.cpp b/src/Parsers/ParserPartition.cpp index 80a28f4803e..ab97b3d0e3b 100644 --- a/src/Parsers/ParserPartition.cpp +++ b/src/Parsers/ParserPartition.cpp @@ -65,7 +65,7 @@ bool ParserPartition::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { if (literal_ast->value.getType() == Field::Types::Tuple) { - fields_count = literal_ast->value.get().size(); + fields_count = literal_ast->value.safeGet().size(); } else { diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index 0545c3e5568..81b64ab47c6 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -445,7 +445,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & ASTPtr ast; if (!ParserStringLiteral{}.parse(pos, ast, expected)) return false; - String time_str = ast->as().value.get(); + String time_str = ast->as().value.safeGet(); ReadBufferFromString buf(time_str); time_t time; readDateTimeText(time, buf); @@ -467,7 +467,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & return false; } - res->seconds = seconds->as()->value.get(); + res->seconds = seconds->as()->value.safeGet(); break; } case Type::DROP_FILESYSTEM_CACHE: @@ -538,7 +538,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & ASTPtr ast; if (ParserKeyword{Keyword::WITH_NAME}.ignore(pos, expected) && ParserStringLiteral{}.parse(pos, ast, expected)) { - res->backup_name = ast->as().value.get(); + res->backup_name = ast->as().value.safeGet(); } else { @@ -577,7 +577,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & if (!ParserStringLiteral{}.parse(pos, ast, expected)) return false; - custom_name = ast->as().value.get(); + custom_name = ast->as().value.safeGet(); } return true; diff --git a/src/Parsers/ParserUndropQuery.cpp b/src/Parsers/ParserUndropQuery.cpp index 07ca8a3b5fd..57da47df70d 100644 --- a/src/Parsers/ParserUndropQuery.cpp +++ b/src/Parsers/ParserUndropQuery.cpp @@ -41,7 +41,7 @@ bool parseUndropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected) ASTPtr ast_uuid; if (!uuid_p.parse(pos, ast_uuid, expected)) return false; - uuid = parseFromString(ast_uuid->as()->value.get()); + uuid = parseFromString(ast_uuid->as()->value.safeGet()); } if (ParserKeyword{Keyword::ON}.ignore(pos, expected)) { diff --git a/src/Parsers/tests/gtest_dictionary_parser.cpp b/src/Parsers/tests/gtest_dictionary_parser.cpp index a1ba46125a7..af3591750a1 100644 --- a/src/Parsers/tests/gtest_dictionary_parser.cpp +++ b/src/Parsers/tests/gtest_dictionary_parser.cpp @@ -56,21 +56,21 @@ TEST(ParserDictionaryDDL, SimpleDictionary) EXPECT_EQ(create->dictionary->source->name, "clickhouse"); auto children = create->dictionary->source->elements->children; EXPECT_EQ(children[0]->as() -> first, "host"); - EXPECT_EQ(children[0]->as()->second->as()->value.get(), "localhost"); + EXPECT_EQ(children[0]->as()->second->as()->value.safeGet(), "localhost"); EXPECT_EQ(children[1]->as()->first, "port"); - EXPECT_EQ(children[1]->as()->second->as()->value.get(), 9000); + EXPECT_EQ(children[1]->as()->second->as()->value.safeGet(), 9000); EXPECT_EQ(children[2]->as()->first, "user"); - EXPECT_EQ(children[2]->as()->second->as()->value.get(), "default"); + EXPECT_EQ(children[2]->as()->second->as()->value.safeGet(), "default"); EXPECT_EQ(children[3]->as()->first, "password"); - EXPECT_EQ(children[3]->as()->second->as()->value.get(), ""); + EXPECT_EQ(children[3]->as()->second->as()->value.safeGet(), ""); EXPECT_EQ(children[4]->as()->first, "db"); - EXPECT_EQ(children[4]->as()->second->as()->value.get(), "test"); + EXPECT_EQ(children[4]->as()->second->as()->value.safeGet(), "test"); EXPECT_EQ(children[5]->as()->first, "table"); - EXPECT_EQ(children[5]->as()->second->as()->value.get(), "table_for_dict"); + EXPECT_EQ(children[5]->as()->second->as()->value.safeGet(), "table_for_dict"); /// layout test auto * layout = create->dictionary->layout; @@ -102,9 +102,9 @@ TEST(ParserDictionaryDDL, SimpleDictionary) EXPECT_EQ(attributes_children[1]->as()->name, "second_column"); EXPECT_EQ(attributes_children[2]->as()->name, "third_column"); - EXPECT_EQ(attributes_children[0]->as()->default_value->as()->value.get(), 0); - EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.get(), 1); - EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.get(), 2); + EXPECT_EQ(attributes_children[0]->as()->default_value->as()->value.safeGet(), 0); + EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.safeGet(), 1); + EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.safeGet(), 2); EXPECT_EQ(attributes_children[0]->as()->expression, nullptr); EXPECT_EQ(attributes_children[1]->as()->expression, nullptr); @@ -150,8 +150,8 @@ TEST(ParserDictionaryDDL, AttributesWithMultipleProperties) EXPECT_EQ(attributes_children[2]->as()->name, "third_column"); EXPECT_EQ(attributes_children[0]->as()->default_value, nullptr); - EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.get(), 1); - EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.get(), 2); + EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.safeGet(), 1); + EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.safeGet(), 2); EXPECT_EQ(attributes_children[0]->as()->expression, nullptr); EXPECT_EQ(attributes_children[1]->as()->expression, nullptr); @@ -195,9 +195,9 @@ TEST(ParserDictionaryDDL, CustomAttributePropertiesOrder) EXPECT_EQ(attributes_children[1]->as()->name, "second_column"); EXPECT_EQ(attributes_children[2]->as()->name, "third_column"); - EXPECT_EQ(attributes_children[0]->as()->default_value->as()->value.get(), 100); - EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.get(), 1); - EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.get(), 2); + EXPECT_EQ(attributes_children[0]->as()->default_value->as()->value.safeGet(), 100); + EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.safeGet(), 1); + EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.safeGet(), 2); EXPECT_EQ(attributes_children[0]->as()->expression, nullptr); EXPECT_EQ(attributes_children[1]->as()->expression, nullptr); @@ -248,25 +248,25 @@ TEST(ParserDictionaryDDL, NestedSource) auto children = create->dictionary->source->elements->children; EXPECT_EQ(children[0]->as()->first, "host"); - EXPECT_EQ(children[0]->as()->second->as()->value.get(), "localhost"); + EXPECT_EQ(children[0]->as()->second->as()->value.safeGet(), "localhost"); EXPECT_EQ(children[1]->as()->first, "port"); - EXPECT_EQ(children[1]->as()->second->as()->value.get(), 9000); + EXPECT_EQ(children[1]->as()->second->as()->value.safeGet(), 9000); EXPECT_EQ(children[2]->as()->first, "user"); - EXPECT_EQ(children[2]->as()->second->as()->value.get(), "default"); + EXPECT_EQ(children[2]->as()->second->as()->value.safeGet(), "default"); EXPECT_EQ(children[3]->as()->first, "replica"); auto replica = children[3]->as()->second->children; EXPECT_EQ(replica[0]->as()->first, "host"); - EXPECT_EQ(replica[0]->as()->second->as()->value.get(), "127.0.0.1"); + EXPECT_EQ(replica[0]->as()->second->as()->value.safeGet(), "127.0.0.1"); EXPECT_EQ(replica[1]->as()->first, "priority"); - EXPECT_EQ(replica[1]->as()->second->as()->value.get(), 1); + EXPECT_EQ(replica[1]->as()->second->as()->value.safeGet(), 1); EXPECT_EQ(children[4]->as()->first, "password"); - EXPECT_EQ(children[4]->as()->second->as()->value.get(), ""); + EXPECT_EQ(children[4]->as()->second->as()->value.safeGet(), ""); } diff --git a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp index 58bf4c1a2fc..30301b242db 100644 --- a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp +++ b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp @@ -185,7 +185,7 @@ namespace DB } else { - auto value = static_cast(column[value_i].get>().getValue()); + auto value = static_cast(column[value_i].safeGet>().getValue()); if (need_rescale) { if (common::mulOverflow(value, rescale_multiplier, value)) diff --git a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp index 06e8668cd7c..566a036d79c 100644 --- a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp +++ b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp @@ -208,20 +208,20 @@ private: /// Do not replace empty array and array of NULLs if (literal->value.getType() == Field::Types::Array) { - const Array & array = literal->value.get(); + const Array & array = literal->value.safeGet(); auto not_null = std::find_if_not(array.begin(), array.end(), [](const auto & elem) { return elem.isNull(); }); if (not_null == array.end()) return true; } else if (literal->value.getType() == Field::Types::Map) { - const Map & map = literal->value.get(); + const Map & map = literal->value.safeGet(); if (map.size() % 2) return false; } else if (literal->value.getType() == Field::Types::Tuple) { - const Tuple & tuple = literal->value.get(); + const Tuple & tuple = literal->value.safeGet(); for (const auto & value : tuple) if (value.isNull()) diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 649721f28bf..58bec8120f1 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -262,7 +262,7 @@ convertFieldToORCLiteral(const orc::Type & orc_type, const Field & field, DataTy { case orc::BOOLEAN: { /// May throw exception - auto val = field.get(); + auto val = field.safeGet(); return orc::Literal(val != 0); } case orc::BYTE: @@ -275,7 +275,7 @@ convertFieldToORCLiteral(const orc::Type & orc_type, const Field & field, DataTy /// SELECT * FROM file('t.orc', ORC, 'x UInt8') WHERE x > 10 /// We have to reject this, otherwise it would miss values > 127 (because /// they're treated as negative by ORC). - auto val = field.get(); + auto val = field.safeGet(); return orc::Literal(val); } case orc::FLOAT: diff --git a/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp b/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp index 3578401a0f8..b43c195f201 100644 --- a/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp @@ -286,10 +286,10 @@ static void columnMapToContainer(const ColumnMap * col_map, size_t row_num, Cont { Field field; col_map->get(row_num, field); - const auto & map_field = field.get(); + const auto & map_field = field.safeGet(); for (const auto & map_element : map_field) { - const auto & map_entry = map_element.get(); + const auto & map_entry = map_element.safeGet(); String entry_key; String entry_value; diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index de34a8aa04f..9839f64b947 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -333,7 +333,7 @@ namespace { const DataTypeTuple & type_tuple = static_cast(data_type); - Tuple & tuple_value = value.get(); + Tuple & tuple_value = value.safeGet(); size_t src_tuple_size = tuple_value.size(); size_t dst_tuple_size = type_tuple.getElements().size(); @@ -360,7 +360,7 @@ namespace if (element_type.isNullable()) return; - Array & array_value = value.get(); + Array & array_value = value.safeGet(); size_t array_value_size = array_value.size(); for (size_t i = 0; i < array_value_size; ++i) @@ -378,12 +378,12 @@ namespace const auto & key_type = *type_map.getKeyType(); const auto & value_type = *type_map.getValueType(); - auto & map = value.get(); + auto & map = value.safeGet(); size_t map_size = map.size(); for (size_t i = 0; i < map_size; ++i) { - auto & map_entry = map[i].get(); + auto & map_entry = map[i].safeGet(); auto & entry_key = map_entry[0]; auto & entry_value = map_entry[1]; diff --git a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp index e2c6371c44f..80c00f91d82 100644 --- a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp @@ -127,14 +127,14 @@ static bool mergeMap(const SummingSortedAlgorithm::MapDescription & desc, Row right(left.size()); for (size_t col_num : desc.key_col_nums) - right[col_num] = (*raw_columns[col_num])[row_number].template get(); + right[col_num] = (*raw_columns[col_num])[row_number].template safeGet(); for (size_t col_num : desc.val_col_nums) - right[col_num] = (*raw_columns[col_num])[row_number].template get(); + right[col_num] = (*raw_columns[col_num])[row_number].template safeGet(); auto at_ith_column_jth_row = [&](const Row & matrix, size_t i, size_t j) -> const Field & { - return matrix[i].get()[j]; + return matrix[i].safeGet()[j]; }; auto tuple_of_nth_columns_at_jth_row = [&](const Row & matrix, const ColumnNumbers & col_nums, size_t j) -> Array @@ -160,7 +160,7 @@ static bool mergeMap(const SummingSortedAlgorithm::MapDescription & desc, auto merge = [&](const Row & matrix) { - size_t rows = matrix[desc.key_col_nums[0]].get().size(); + size_t rows = matrix[desc.key_col_nums[0]].safeGet().size(); for (size_t j = 0; j < rows; ++j) { @@ -190,10 +190,10 @@ static bool mergeMap(const SummingSortedAlgorithm::MapDescription & desc, for (const auto & key_value : merged) { for (size_t col_num_index = 0, size = desc.key_col_nums.size(); col_num_index < size; ++col_num_index) - row[desc.key_col_nums[col_num_index]].get()[row_num] = key_value.first[col_num_index]; + row[desc.key_col_nums[col_num_index]].safeGet()[row_num] = key_value.first[col_num_index]; for (size_t col_num_index = 0, size = desc.val_col_nums.size(); col_num_index < size; ++col_num_index) - row[desc.val_col_nums[col_num_index]].get()[row_num] = key_value.second[col_num_index]; + row[desc.val_col_nums[col_num_index]].safeGet()[row_num] = key_value.second[col_num_index]; ++row_num; } diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index 2080e29ceba..596d08845e1 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -119,23 +119,23 @@ using RangesWithStep = std::vector; std::optional steppedRangeFromRange(const Range & r, UInt64 step, UInt64 remainder) { - if ((r.right.get() == 0) && (!r.right_included)) + if ((r.right.safeGet() == 0) && (!r.right_included)) return std::nullopt; - UInt64 begin = (r.left.get() / step) * step; + UInt64 begin = (r.left.safeGet() / step) * step; if (begin > std::numeric_limits::max() - remainder) return std::nullopt; begin += remainder; - while ((r.left_included <= r.left.get()) && (begin <= r.left.get() - r.left_included)) + while ((r.left_included <= r.left.safeGet()) && (begin <= r.left.safeGet() - r.left_included)) { if (std::numeric_limits::max() - step < begin) return std::nullopt; begin += step; } - if ((begin >= r.right_included) && (begin - r.right_included >= r.right.get())) + if ((begin >= r.right_included) && (begin - r.right_included >= r.right.safeGet())) return std::nullopt; - UInt64 right_edge_included = r.right.get() - (1 - r.right_included); + UInt64 right_edge_included = r.right.safeGet() - (1 - r.right_included); return std::optional{RangeWithStep{begin, step, static_cast(right_edge_included - begin) / step + 1}}; } diff --git a/src/Processors/Sources/MySQLSource.cpp b/src/Processors/Sources/MySQLSource.cpp index 5d533a7747e..52be9a6e84a 100644 --- a/src/Processors/Sources/MySQLSource.cpp +++ b/src/Processors/Sources/MySQLSource.cpp @@ -219,11 +219,11 @@ namespace read_bytes_size += 8; break; case ValueType::vtEnum8: - assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).get()); + assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).safeGet()); read_bytes_size += assert_cast(column).byteSize(); break; case ValueType::vtEnum16: - assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).get()); + assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).safeGet()); read_bytes_size += assert_cast(column).byteSize(); break; case ValueType::vtString: diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 9601f821cc8..95f4a674ebb 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -62,7 +62,7 @@ static FillColumnDescription::StepFunction getStepFunction( case IntervalKind::Kind::NAME: \ return [step, scale, &date_lut](Field & field) { \ field = Add##NAME##sImpl::execute(static_cast(\ - field.get()), static_cast(step), date_lut, utc_time_zone, scale); }; + field.safeGet()), static_cast(step), date_lut, utc_time_zone, scale); }; FOR_EACH_INTERVAL_KIND(DECLARE_CASE) #undef DECLARE_CASE @@ -139,21 +139,21 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & { if (which.isDate() || which.isDate32()) { - Int64 avg_seconds = descr.fill_step.get() * descr.step_kind->toAvgSeconds(); + Int64 avg_seconds = descr.fill_step.safeGet() * descr.step_kind->toAvgSeconds(); if (std::abs(avg_seconds) < 86400) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds)); } if (which.isDate()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.get(), DateLUT::instance()); + descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); else if (which.isDate32()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.get(), DateLUT::instance()); + descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); else if (const auto * date_time = checkAndGetDataType(type.get())) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.get(), date_time->getTimeZone()); + descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), date_time->getTimeZone()); else if (const auto * date_time64 = checkAndGetDataType(type.get())) { - const auto & step_dec = descr.fill_step.get &>(); + const auto & step_dec = descr.fill_step.safeGet &>(); Int64 step = DecimalUtils::convertTo(step_dec.getValue(), step_dec.getScale()); static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); @@ -163,7 +163,7 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & case IntervalKind::Kind::NAME: \ descr.step_func = [step, &time_zone = date_time64->getTimeZone()](Field & field) \ { \ - auto field_decimal = field.get>(); \ + auto field_decimal = field.safeGet>(); \ auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), step, time_zone, utc_time_zone, field_decimal.getScale()); \ field = DecimalField(res, field_decimal.getScale()); \ }; \ diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index f76e2d64368..32066adad0d 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -107,7 +107,7 @@ static int compareValuesWithOffset(const IColumn * _compared_column, using ValueType = typename ColumnType::ValueType; // Note that the storage type of offset returned by get<> is different, so // we need to specify the type explicitly. - const ValueType offset = static_cast(_offset.get()); + const ValueType offset = static_cast(_offset.safeGet()); assert(offset >= 0); const auto compared_value_data = compared_column->getDataAt(compared_row); @@ -162,7 +162,7 @@ static int compareValuesWithOffsetFloat(const IColumn * _compared_column, _compared_column); const auto * reference_column = assert_cast( _reference_column); - const auto offset = _offset.get(); + const auto offset = _offset.safeGet(); chassert(offset >= 0); const auto compared_value_data = compared_column->getDataAt(compared_row); @@ -631,7 +631,7 @@ void WindowTransform::advanceFrameStartRowsOffset() { // Just recalculate it each time by walking blocks. const auto [moved_row, offset_left] = moveRowNumber(current_row, - window_description.frame.begin_offset.get() + window_description.frame.begin_offset.safeGet() * (window_description.frame.begin_preceding ? -1 : 1)); frame_start = moved_row; @@ -870,7 +870,7 @@ void WindowTransform::advanceFrameEndRowsOffset() // Walk the specified offset from the current row. The "+1" is needed // because the frame_end is a past-the-end pointer. const auto [moved_row, offset_left] = moveRowNumber(current_row, - window_description.frame.end_offset.get() + window_description.frame.end_offset.safeGet() * (window_description.frame.end_preceding ? -1 : 1) + 1); @@ -2192,13 +2192,13 @@ namespace throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be a constant"); auto type_id = argument_types[0]->getTypeId(); if (type_id == TypeIndex::UInt8) - buckets = arg_col[transform->current_row.row].get(); + buckets = arg_col[transform->current_row.row].safeGet(); else if (type_id == TypeIndex::UInt16) - buckets = arg_col[transform->current_row.row].get(); + buckets = arg_col[transform->current_row.row].safeGet(); else if (type_id == TypeIndex::UInt32) - buckets = arg_col[transform->current_row.row].get(); + buckets = arg_col[transform->current_row.row].safeGet(); else if (type_id == TypeIndex::UInt64) - buckets = arg_col[transform->current_row.row].get(); + buckets = arg_col[transform->current_row.row].safeGet(); if (!buckets) { @@ -2490,7 +2490,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction { offset = (*current_block.input_columns[ workspace.argument_column_indices[1]])[ - transform->current_row.row].get(); + transform->current_row.row].safeGet(); /// Either overflow or really negative value, both is not acceptable. if (offset < 0) @@ -2576,7 +2576,7 @@ struct WindowFunctionNthValue final : public WindowFunction Int64 offset = (*current_block.input_columns[ workspace.argument_column_indices[1]])[ - transform->current_row.row].get(); + transform->current_row.row].safeGet(); /// Either overflow or really negative value, both is not acceptable. if (offset <= 0) diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 7891042bb96..4869819d0d6 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -109,7 +109,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ if (ast_col_decl.comment) { const auto & ast_comment = typeid_cast(*ast_col_decl.comment); - command.comment = ast_comment.value.get(); + command.comment = ast_comment.value.safeGet(); } if (ast_col_decl.codec) @@ -167,7 +167,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ if (ast_col_decl.comment) { const auto & ast_comment = ast_col_decl.comment->as(); - command.comment.emplace(ast_comment.value.get()); + command.comment.emplace(ast_comment.value.safeGet()); } if (ast_col_decl.ttl) @@ -210,7 +210,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.type = COMMENT_COLUMN; command.column_name = getIdentifierName(command_ast->column); const auto & ast_comment = command_ast->comment->as(); - command.comment = ast_comment.value.get(); + command.comment = ast_comment.value.safeGet(); command.if_exists = command_ast->if_exists; return command; } @@ -220,7 +220,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.ast = command_ast->clone(); command.type = COMMENT_TABLE; const auto & ast_comment = command_ast->comment->as(); - command.comment = ast_comment.value.get(); + command.comment = ast_comment.value.safeGet(); return command; } else if (command_ast->type == ASTAlterCommand::MODIFY_ORDER_BY) diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index da749812167..0d724245b49 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -197,7 +197,7 @@ void ColumnDescription::readText(ReadBuffer & buf) } if (col_ast->comment) - comment = col_ast->comment->as().value.get(); + comment = col_ast->comment->as().value.safeGet(); if (col_ast->codec) codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(col_ast->codec, type, false, true, true, true); diff --git a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp index 7354243732c..d6a8af3238e 100644 --- a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp +++ b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp @@ -268,7 +268,7 @@ bool ApproximateNearestNeighborCondition::tryCastToConstType(const ASTPtr & node if (const_value.getType() == Field::Types::Float64) { out.function = RPNElement::FUNCTION_FLOAT_LITERAL; - out.float_literal.emplace(const_value.get()); + out.float_literal.emplace(const_value.safeGet()); out.func_name = "Float literal"; return true; } @@ -276,7 +276,7 @@ bool ApproximateNearestNeighborCondition::tryCastToConstType(const ASTPtr & node if (const_value.getType() == Field::Types::UInt64) { out.function = RPNElement::FUNCTION_INT_LITERAL; - out.int_literal.emplace(const_value.get()); + out.int_literal.emplace(const_value.safeGet()); out.func_name = "Int literal"; return true; } @@ -284,7 +284,7 @@ bool ApproximateNearestNeighborCondition::tryCastToConstType(const ASTPtr & node if (const_value.getType() == Field::Types::Int64) { out.function = RPNElement::FUNCTION_INT_LITERAL; - out.int_literal.emplace(const_value.get()); + out.int_literal.emplace(const_value.safeGet()); out.func_name = "Int literal"; return true; } @@ -292,7 +292,7 @@ bool ApproximateNearestNeighborCondition::tryCastToConstType(const ASTPtr & node if (const_value.getType() == Field::Types::Tuple) { out.function = RPNElement::FUNCTION_LITERAL_TUPLE; - out.tuple_literal = const_value.get(); + out.tuple_literal = const_value.safeGet(); out.func_name = "Tuple literal"; return true; } @@ -300,7 +300,7 @@ bool ApproximateNearestNeighborCondition::tryCastToConstType(const ASTPtr & node if (const_value.getType() == Field::Types::Array) { out.function = RPNElement::FUNCTION_LITERAL_ARRAY; - out.array_literal = const_value.get(); + out.array_literal = const_value.safeGet(); out.func_name = "Array literal"; return true; } @@ -308,7 +308,7 @@ bool ApproximateNearestNeighborCondition::tryCastToConstType(const ASTPtr & node if (const_value.getType() == Field::Types::String) { out.function = RPNElement::FUNCTION_STRING_LITERAL; - out.func_name = const_value.get(); + out.func_name = const_value.safeGet(); return true; } } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 3a44359b537..0db05373e43 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -428,7 +428,7 @@ std::pair IMergeTreeDataPart::getMinMaxDate() const if (storage.minmax_idx_date_column_pos != -1 && minmax_idx->initialized) { const auto & hyperrectangle = minmax_idx->hyperrectangle[storage.minmax_idx_date_column_pos]; - return {DayNum(hyperrectangle.left.get()), DayNum(hyperrectangle.right.get())}; + return {DayNum(hyperrectangle.left.safeGet()), DayNum(hyperrectangle.right.safeGet())}; } else return {}; @@ -444,15 +444,15 @@ std::pair IMergeTreeDataPart::getMinMaxTime() const if (hyperrectangle.left.getType() == Field::Types::UInt64) { assert(hyperrectangle.right.getType() == Field::Types::UInt64); - return {hyperrectangle.left.get(), hyperrectangle.right.get()}; + return {hyperrectangle.left.safeGet(), hyperrectangle.right.safeGet()}; } /// The case of DateTime64 else if (hyperrectangle.left.getType() == Field::Types::Decimal64) { assert(hyperrectangle.right.getType() == Field::Types::Decimal64); - auto left = hyperrectangle.left.get>(); - auto right = hyperrectangle.right.get>(); + auto left = hyperrectangle.left.safeGet>(); + auto right = hyperrectangle.right.safeGet>(); assert(left.getScale() == right.getScale()); diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 69bffac9160..a717c08f0a2 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -349,7 +349,7 @@ const KeyCondition::AtomMap KeyCondition::atom_map if (value.getType() != Field::Types::String) return false; - String prefix = extractFixedPrefixFromLikePattern(value.get(), /*requires_perfect_prefix*/ false); + String prefix = extractFixedPrefixFromLikePattern(value.safeGet(), /*requires_perfect_prefix*/ false); if (prefix.empty()) return false; @@ -370,7 +370,7 @@ const KeyCondition::AtomMap KeyCondition::atom_map if (value.getType() != Field::Types::String) return false; - String prefix = extractFixedPrefixFromLikePattern(value.get(), /*requires_perfect_prefix*/ true); + String prefix = extractFixedPrefixFromLikePattern(value.safeGet(), /*requires_perfect_prefix*/ true); if (prefix.empty()) return false; @@ -391,7 +391,7 @@ const KeyCondition::AtomMap KeyCondition::atom_map if (value.getType() != Field::Types::String) return false; - String prefix = value.get(); + String prefix = value.safeGet(); if (prefix.empty()) return false; @@ -412,7 +412,7 @@ const KeyCondition::AtomMap KeyCondition::atom_map if (value.getType() != Field::Types::String) return false; - const String & expression = value.get(); + const String & expression = value.safeGet(); /// This optimization can't process alternation - this would require /// a comprehensive parsing of regular expression. @@ -2918,8 +2918,8 @@ BoolMask KeyCondition::checkInHyperrectangle( /// Let's support only the case of 2d, because I'm not confident in other cases. if (num_dimensions == 2) { - UInt64 left = key_range.left.get(); - UInt64 right = key_range.right.get(); + UInt64 left = key_range.left.safeGet(); + UInt64 right = key_range.right.safeGet(); BoolMask mask(false, true); auto hyperrectangle_intersection_callback = [&](std::array, 2> curve_hyperrectangle) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 2e10f5a0227..830f6f5d52f 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -5869,7 +5869,7 @@ String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, ContextPtr loc if (partition_lit && partition_lit->value.getType() == Field::Types::String) { MergeTreePartInfo::validatePartitionID(partition_ast.value->clone(), format_version); - return partition_lit->value.get(); + return partition_lit->value.safeGet(); } } @@ -5932,7 +5932,7 @@ String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, ContextPtr loc throw Exception(ErrorCodes::INVALID_PARTITION_VALUE, "Expected tuple for complex partition key, got {}", partition_key_value.getTypeName()); - const Tuple & tuple = partition_key_value.get(); + const Tuple & tuple = partition_key_value.safeGet(); if (tuple.size() != fields_count) throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong number of fields in the partition expression: {}, must be: {}", tuple.size(), fields_count); @@ -6868,7 +6868,7 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( auto * place = arena.alignedAlloc(size_of_state, align_of_state); func->create(place); if (const AggregateFunctionCount * agg_count = typeid_cast(func.get())) - AggregateFunctionCount::set(place, value.get()); + AggregateFunctionCount::set(place, value.safeGet()); else { auto value_column = func->getArgumentTypes().front()->createColumnConst(1, value)->convertToFullColumnIfConst(); @@ -7510,7 +7510,7 @@ MergeTreeData::MatcherFn MergeTreeData::getPartitionMatcher(const ASTPtr & parti if (const auto * partition_lit = partition_ast->as().value->as()) { id = partition_lit->value.getType() == Field::Types::UInt64 - ? toString(partition_lit->value.get()) + ? toString(partition_lit->value.safeGet()) : partition_lit->value.safeGet(); prefixed = true; } diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index ee3ac4207cc..98f1563546e 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -441,8 +441,8 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( String part_name; if (data.format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { - DayNum min_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].left.get()); - DayNum max_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].right.get()); + DayNum min_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].left.safeGet()); + DayNum max_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].right.safeGet()); const auto & date_lut = DateLUT::serverTimezoneInstance(); diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp index 497e86334f3..b68e48eeb3a 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp @@ -343,12 +343,12 @@ MergeTreeIndexPtr annoyIndexCreator(const IndexDescription & index) static constexpr auto DEFAULT_DISTANCE_FUNCTION = DISTANCE_FUNCTION_L2; String distance_function = DEFAULT_DISTANCE_FUNCTION; if (!index.arguments.empty()) - distance_function = index.arguments[0].get(); + distance_function = index.arguments[0].safeGet(); static constexpr auto DEFAULT_TREES = 100uz; UInt64 trees = DEFAULT_TREES; if (index.arguments.size() > 1) - trees = index.arguments[1].get(); + trees = index.arguments[1].safeGet(); return std::make_shared(index, trees, distance_function); } @@ -375,7 +375,7 @@ void annoyIndexValidator(const IndexDescription & index, bool /* attach */) if (!index.arguments.empty()) { - String distance_name = index.arguments[0].get(); + String distance_name = index.arguments[0].safeGet(); if (distance_name != DISTANCE_FUNCTION_L2 && distance_name != DISTANCE_FUNCTION_COSINE) throw Exception(ErrorCodes::INCORRECT_DATA, "Annoy index only supports distance functions '{}' and '{}'", DISTANCE_FUNCTION_L2, DISTANCE_FUNCTION_COSINE); } diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index c6a00751f25..be0ee693e15 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -348,19 +348,19 @@ bool MergeTreeIndexConditionBloomFilter::extractAtomFromTree(const RPNBuilderTre { if (const_value.getType() == Field::Types::UInt64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Int64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Float64) { - out.function = const_value.get() != 0.0 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() != 0.0 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } } @@ -692,7 +692,7 @@ bool MergeTreeIndexConditionBloomFilter::traverseTreeEquals( const bool is_nullable = actual_type->isNullable(); auto mutable_column = actual_type->createColumn(); - for (const auto & f : value_field.get()) + for (const auto & f : value_field.safeGet()) { if ((f.isNull() && !is_nullable) || f.isDecimal(f.getType())) /// NOLINT(readability-static-accessed-through-instance) return false; @@ -763,7 +763,7 @@ bool MergeTreeIndexConditionBloomFilter::traverseTreeEquals( if (which.isTuple() && key_node_function_name == "tuple") { - const Tuple & tuple = value_field.get(); + const Tuple & tuple = value_field.safeGet(); const auto * value_tuple_data_type = typeid_cast(value_type.get()); if (tuple.size() != key_node_function_arguments_size) @@ -952,7 +952,7 @@ void bloomFilterIndexValidator(const IndexDescription & index, bool attach) { const auto & argument = index.arguments[0]; - if (!attach && (argument.getType() != Field::Types::Float64 || argument.get() < 0 || argument.get() > 1)) + if (!attach && (argument.getType() != Field::Types::Float64 || argument.safeGet() < 0 || argument.safeGet() > 1)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "The BloomFilter false positive must be a double number between 0 and 1."); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp index 5b6813d12e3..857b7903588 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp @@ -341,19 +341,19 @@ bool MergeTreeConditionBloomFilterText::extractAtomFromTree(const RPNBuilderTree if (const_value.getType() == Field::Types::UInt64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Int64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Float64) { - out.function = const_value.get() != 0.0 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() != 0.0 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } } @@ -493,7 +493,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - auto value = const_value.get(); + auto value = const_value.safeGet(); if (is_case_insensitive_scenario) std::ranges::transform(value, value.begin(), [](const auto & c) { return static_cast(std::tolower(c)); }); @@ -509,7 +509,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_HAS; out.bloom_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -519,7 +519,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_HAS; out.bloom_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -529,7 +529,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_NOT_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -538,7 +538,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -547,7 +547,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringLikeToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -556,7 +556,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_NOT_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringLikeToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -565,7 +565,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, true, false); return true; } @@ -574,7 +574,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, false, true); return true; } @@ -589,13 +589,13 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( /// 2d vector is not needed here but is used because already exists for FUNCTION_IN std::vector> bloom_filters; bloom_filters.emplace_back(); - for (const auto & element : const_value.get()) + for (const auto & element : const_value.safeGet()) { if (element.getType() != Field::Types::String) return false; bloom_filters.back().emplace_back(params); - const auto & value = element.get(); + const auto & value = element.safeGet(); if (function_name == "multiSearchAny") { @@ -615,7 +615,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.function = RPNElement::FUNCTION_MATCH; out.bloom_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); String required_substring; bool dummy_is_trivial, dummy_required_substring_is_prefix; std::vector alternatives; @@ -743,11 +743,11 @@ MergeTreeIndexPtr bloomFilterIndexTextCreator( { if (index.type == NgramTokenExtractor::getName()) { - size_t n = index.arguments[0].get(); + size_t n = index.arguments[0].safeGet(); BloomFilterParameters params( - index.arguments[1].get(), - index.arguments[2].get(), - index.arguments[3].get()); + index.arguments[1].safeGet(), + index.arguments[2].safeGet(), + index.arguments[3].safeGet()); auto tokenizer = std::make_unique(n); @@ -756,9 +756,9 @@ MergeTreeIndexPtr bloomFilterIndexTextCreator( else if (index.type == SplitTokenExtractor::getName()) { BloomFilterParameters params( - index.arguments[0].get(), - index.arguments[1].get(), - index.arguments[2].get()); + index.arguments[0].safeGet(), + index.arguments[1].safeGet(), + index.arguments[2].safeGet()); auto tokenizer = std::make_unique(); @@ -815,9 +815,9 @@ void bloomFilterIndexTextValidator(const IndexDescription & index, bool /*attach /// Just validate BloomFilterParameters params( - index.arguments[0].get(), - index.arguments[1].get(), - index.arguments[2].get()); + index.arguments[0].safeGet(), + index.arguments[1].safeGet(), + index.arguments[2].safeGet()); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index cd6af68ebcc..b5c6bb95d37 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -74,7 +74,7 @@ void MergeTreeIndexGranuleFullText::deserializeBinary(ReadBuffer & istr, MergeTr for (auto & gin_filter : gin_filters) { size_serialization->deserializeBinary(field_rows, istr, {}); - size_t filter_size = field_rows.get(); + size_t filter_size = field_rows.safeGet(); gin_filter.getFilter().resize(filter_size); if (filter_size == 0) @@ -379,19 +379,19 @@ bool MergeTreeConditionFullText::traverseAtomAST(const RPNBuilderTreeNode & node /// Check constant like in KeyCondition if (const_value.getType() == Field::Types::UInt64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Int64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Float64) { - out.function = const_value.get() != 0.00 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() != 0.00 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } } @@ -530,7 +530,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_HAS; out.gin_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -539,7 +539,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_HAS; out.gin_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -549,7 +549,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_NOT_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -558,7 +558,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -567,7 +567,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringLikeToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -576,7 +576,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_NOT_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringLikeToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -585,7 +585,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -594,7 +594,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, true, false); return true; } @@ -603,7 +603,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, false, true); return true; } @@ -615,13 +615,13 @@ bool MergeTreeConditionFullText::traverseASTEquals( /// 2d vector is not needed here but is used because already exists for FUNCTION_IN std::vector gin_filters; gin_filters.emplace_back(); - for (const auto & element : const_value.get()) + for (const auto & element : const_value.safeGet()) { if (element.getType() != Field::Types::String) return false; gin_filters.back().emplace_back(params); - const auto & value = element.get(); + const auto & value = element.safeGet(); token_extractor->substringToGinFilter(value.data(), value.size(), gin_filters.back().back(), false, false); } out.set_gin_filters = std::move(gin_filters); @@ -632,7 +632,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_MATCH; - auto & value = const_value.get(); + auto & value = const_value.safeGet(); String required_substring; bool dummy_is_trivial, dummy_required_substring_is_prefix; std::vector alternatives; @@ -776,8 +776,8 @@ MergeTreeIndexConditionPtr MergeTreeIndexFullText::createIndexCondition( MergeTreeIndexPtr fullTextIndexCreator( const IndexDescription & index) { - size_t n = index.arguments.empty() ? 0 : index.arguments[0].get(); - UInt64 max_rows = index.arguments.size() < 2 ? DEFAULT_MAX_ROWS_PER_POSTINGS_LIST : index.arguments[1].get(); + size_t n = index.arguments.empty() ? 0 : index.arguments[0].safeGet(); + UInt64 max_rows = index.arguments.size() < 2 ? DEFAULT_MAX_ROWS_PER_POSTINGS_LIST : index.arguments[1].safeGet(); GinFilterParameters params(n, max_rows); /// Use SplitTokenExtractor when n is 0, otherwise use NgramTokenExtractor @@ -826,12 +826,12 @@ void fullTextIndexValidator(const IndexDescription & index, bool /*attach*/) { if (index.arguments[1].getType() != Field::Types::UInt64) throw Exception(ErrorCodes::INCORRECT_QUERY, "The second full text index argument must be UInt64"); - if (index.arguments[1].get() != UNLIMITED_ROWS_PER_POSTINGS_LIST && index.arguments[1].get() < MIN_ROWS_PER_POSTINGS_LIST) + if (index.arguments[1].safeGet() != UNLIMITED_ROWS_PER_POSTINGS_LIST && index.arguments[1].safeGet() < MIN_ROWS_PER_POSTINGS_LIST) throw Exception(ErrorCodes::INCORRECT_QUERY, "The maximum rows per postings list must be no less than {}", MIN_ROWS_PER_POSTINGS_LIST); } /// Just validate - size_t ngrams = index.arguments.empty() ? 0 : index.arguments[0].get(); - UInt64 max_rows_per_postings_list = index.arguments.size() < 2 ? DEFAULT_MAX_ROWS_PER_POSTINGS_LIST : index.arguments[1].get(); + size_t ngrams = index.arguments.empty() ? 0 : index.arguments[0].safeGet(); + UInt64 max_rows_per_postings_list = index.arguments.size() < 2 ? DEFAULT_MAX_ROWS_PER_POSTINGS_LIST : index.arguments[1].safeGet(); GinFilterParameters params(ngrams, max_rows_per_postings_list); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp index cd8065ecadf..abf3ae56376 100644 --- a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp @@ -37,7 +37,7 @@ void MergeTreeIndexGranuleHypothesis::deserializeBinary(ReadBuffer & istr, Merge Field field_met; const auto & size_type = DataTypePtr(std::make_shared()); size_type->getDefaultSerialization()->deserializeBinary(field_met, istr, {}); - met = field_met.get(); + met = field_met.safeGet(); is_empty = false; } diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index a92df4ac72d..fa242fccbc1 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -97,7 +97,7 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeInd Field field_rows; const auto & size_type = DataTypePtr(std::make_shared()); size_type->getDefaultSerialization()->deserializeBinary(field_rows, istr, {}); - size_t rows_to_read = field_rows.get(); + size_t rows_to_read = field_rows.safeGet(); if (rows_to_read == 0) return; @@ -591,7 +591,7 @@ MergeTreeIndexConditionPtr MergeTreeIndexSet::createIndexCondition( MergeTreeIndexPtr setIndexCreator(const IndexDescription & index) { - size_t max_rows = index.arguments[0].get(); + size_t max_rows = index.arguments[0].safeGet(); return std::make_shared(index, max_rows); } diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp index 59a4b0fbf9c..efd9bb754e1 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp @@ -377,12 +377,12 @@ MergeTreeIndexPtr usearchIndexCreator(const IndexDescription & index) static constexpr auto default_distance_function = DISTANCE_FUNCTION_L2; String distance_function = default_distance_function; if (!index.arguments.empty()) - distance_function = index.arguments[0].get(); + distance_function = index.arguments[0].safeGet(); static constexpr auto default_scalar_kind = unum::usearch::scalar_kind_t::f16_k; auto scalar_kind = default_scalar_kind; if (index.arguments.size() > 1) - scalar_kind = nameToScalarKind.at(index.arguments[1].get()); + scalar_kind = nameToScalarKind.at(index.arguments[1].safeGet()); return std::make_shared(index, distance_function, scalar_kind); } @@ -408,14 +408,14 @@ void usearchIndexValidator(const IndexDescription & index, bool /* attach */) if (!index.arguments.empty()) { - String distance_name = index.arguments[0].get(); + String distance_name = index.arguments[0].safeGet(); if (distance_name != DISTANCE_FUNCTION_L2 && distance_name != DISTANCE_FUNCTION_COSINE) throw Exception(ErrorCodes::INCORRECT_DATA, "USearch index only supports distance functions '{}' and '{}'", DISTANCE_FUNCTION_L2, DISTANCE_FUNCTION_COSINE); } /// Check that a supported kind was passed as a second argument - if (index.arguments.size() > 1 && !nameToScalarKind.contains(index.arguments[1].get())) + if (index.arguments.size() > 1 && !nameToScalarKind.contains(index.arguments[1].safeGet())) { String supported_kinds; for (const auto & [name, kind] : nameToScalarKind) diff --git a/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp index b240f80ee13..5b5bc244f92 100644 --- a/src/Storages/MergeTree/MergeTreePartition.cpp +++ b/src/Storages/MergeTree/MergeTreePartition.cpp @@ -241,7 +241,7 @@ String MergeTreePartition::getID(const Block & partition_key_sample) const if (typeid_cast(partition_key_sample.getByPosition(i).type.get())) result += toString(DateLUT::serverTimezoneInstance().toNumYYYYMMDD(DayNum(value[i].safeGet()))); else if (typeid_cast(partition_key_sample.getByPosition(i).type.get())) - result += toString(value[i].get().toUnderType()); + result += toString(value[i].safeGet().toUnderType()); else result += applyVisitor(to_string_visitor, value[i]); diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 43c40dee77d..f0c26c302e1 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -222,17 +222,17 @@ static bool isConditionGood(const RPNBuilderTreeNode & condition, const NameSet /// check the value with respect to threshold if (type == Field::Types::UInt64) { - const auto value = output_value.get(); + const auto value = output_value.safeGet(); return value > threshold; } else if (type == Field::Types::Int64) { - const auto value = output_value.get(); + const auto value = output_value.safeGet(); return value < -threshold || threshold < value; } else if (type == Field::Types::Float64) { - const auto value = output_value.get(); + const auto value = output_value.safeGet(); return value < -threshold || threshold < value; } diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 3f0603f6900..52857845e99 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -591,7 +591,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) if (ast->value.getType() != Field::Types::String) throw Exception(ErrorCodes::BAD_ARGUMENTS, format_str, error_msg); - graphite_config_name = ast->value.get(); + graphite_config_name = ast->value.safeGet(); } else throw Exception(ErrorCodes::BAD_ARGUMENTS, format_str, error_msg); diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index ade3326288a..fd9f29b6375 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -32,19 +32,19 @@ std::optional StatisticsUtils::tryConvertToFloat64(const Field & field) switch (field.getType()) { case Field::Types::Int64: - return field.get(); + return field.safeGet(); case Field::Types::UInt64: - return field.get(); + return field.safeGet(); case Field::Types::Float64: - return field.get(); + return field.safeGet(); case Field::Types::Int128: - return field.get(); + return field.safeGet(); case Field::Types::UInt128: - return field.get(); + return field.safeGet(); case Field::Types::Int256: - return field.get(); + return field.safeGet(); case Field::Types::UInt256: - return field.get(); + return field.safeGet(); default: return {}; } @@ -53,7 +53,7 @@ std::optional StatisticsUtils::tryConvertToFloat64(const Field & field) std::optional StatisticsUtils::tryConvertToString(const DB::Field & field) { if (field.getType() == Field::Types::String) - return field.get(); + return field.safeGet(); return {}; } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index e69bbc1515b..a6f2f883e65 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -48,7 +48,7 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const return sketch.get_estimate(&val_converted, data_type->getSizeOfValueInMemory()); if (isStringOrFixedString(data_type)) - return sketch.get_estimate(val.get()); + return sketch.get_estimate(val.safeGet()); throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'count_min' does not support estimate data type of {}", data_type->getName()); } diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 060b271d8f4..b95ccedb093 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -203,7 +203,7 @@ StoragePtr StorageFactory::get( } if (query.comment) - comment = query.comment->as().value.get(); + comment = query.comment->as().value.safeGet(); ASTs empty_engine_args; Arguments arguments{ diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 98cd5c4dfa9..20b02b59a10 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -2203,11 +2203,11 @@ void registerStorageFile(StorageFactory & factory) { auto type = literal->value.getType(); if (type == Field::Types::Int64) - source_fd = static_cast(literal->value.get()); + source_fd = static_cast(literal->value.safeGet()); else if (type == Field::Types::UInt64) - source_fd = static_cast(literal->value.get()); + source_fd = static_cast(literal->value.safeGet()); else if (type == Field::Types::String) - StorageFile::parseFileSource(literal->value.get(), source_path, storage_args.path_to_archive); + StorageFile::parseFileSource(literal->value.safeGet(), source_path, storage_args.path_to_archive); else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second argument must be path or file descriptor"); } diff --git a/src/Storages/StorageFuzzJSON.cpp b/src/Storages/StorageFuzzJSON.cpp index 9950d41f1c2..fc73f246d35 100644 --- a/src/Storages/StorageFuzzJSON.cpp +++ b/src/Storages/StorageFuzzJSON.cpp @@ -419,7 +419,7 @@ void fuzzJSONObject( if (val.fixed->getType() == Field::Types::Which::String) { out << fuzzJSONStructure(config, rnd, "\""); - writeText(val.fixed->get(), out); + writeText(val.fixed->safeGet(), out); out << fuzzJSONStructure(config, rnd, "\""); } else diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index a0d6cf11b64..136df425813 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -341,10 +341,10 @@ void registerStorageJoin(StorageFactory & factory) else if (setting.name == "any_join_distinct_right_table_keys") old_any_join = setting.value; else if (setting.name == "disk") - disk_name = setting.value.get(); + disk_name = setting.value.safeGet(); else if (setting.name == "persistent") { - persistent = setting.value.get(); + persistent = setting.value.safeGet(); } else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown setting {} for storage {}", setting.name, args.engine_name); diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index d3214e7ed13..e0a4af68824 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -294,7 +294,7 @@ public: { const auto * array_type = typeid_cast(data_type.get()); const auto & nested = array_type->getNestedType(); - const auto & array = array_field.get(); + const auto & array = array_field.safeGet(); if (!isArray(nested)) { @@ -312,7 +312,7 @@ public: if (!isArray(nested_array_type->getNestedType())) { - parseArrayContent(iter->get(), nested, ostr); + parseArrayContent(iter->safeGet(), nested, ostr); } else { diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 6a7810b97f9..522884b4fef 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -105,8 +105,8 @@ protected: while (rows_count < max_block_size && db_table_num < total_tables) { - const std::string database_name = (*databases)[db_table_num].get(); - const std::string table_name = (*tables)[db_table_num].get(); + const std::string database_name = (*databases)[db_table_num].safeGet(); + const std::string table_name = (*tables)[db_table_num].safeGet(); ++db_table_num; ColumnsDescription columns; @@ -426,7 +426,7 @@ void ReadFromSystemColumns::initializePipeline(QueryPipelineBuilder & pipeline, for (size_t i = 0; i < num_databases; ++i) { - const std::string database_name = (*database_column)[i].get(); + const std::string database_name = (*database_column)[i].safeGet(); if (database_name.empty()) { for (auto & [table_name, table] : external_tables) diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index 7ace8ee24aa..c87bdb6d26a 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -138,7 +138,7 @@ StoragesInfoStream::StoragesInfoStream(std::optional filter_by_datab for (size_t i = 0; i < rows; ++i) { - String database_name = (*database_column_for_filter)[i].get(); + String database_name = (*database_column_for_filter)[i].safeGet(); const DatabasePtr database = databases.at(database_name); offsets[i] = i ? offsets[i - 1] : 0; diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index 806af4a7bf8..3be73aeda17 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -52,13 +52,13 @@ public: { StoragesInfo info; - info.database = (*database_column)[next_row].get(); - info.table = (*table_column)[next_row].get(); - UUID storage_uuid = (*storage_uuid_column)[next_row].get(); + info.database = (*database_column)[next_row].safeGet(); + info.table = (*table_column)[next_row].safeGet(); + UUID storage_uuid = (*storage_uuid_column)[next_row].safeGet(); auto is_same_table = [&storage_uuid, this] (size_t row) -> bool { - return (*storage_uuid_column)[row].get() == storage_uuid; + return (*storage_uuid_column)[row].safeGet() == storage_uuid; }; /// We may have two rows per table which differ in 'active' value. @@ -66,7 +66,7 @@ public: /// must collect the inactive parts. Remember this fact in StoragesInfo. for (; next_row < rows && is_same_table(next_row); ++next_row) { - const auto active = (*active_column)[next_row].get(); + const auto active = (*active_column)[next_row].safeGet(); if (active == 0) info.need_inactive_parts = true; } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 73b7908b75c..ea837da1e73 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -51,7 +51,7 @@ auto extractSingleValueFromBlock(const Block & block, const String & name) const ColumnWithTypeAndName & data = block.getByName(name); size_t rows = block.rows(); for (size_t i = 0; i < rows; ++i) - res.insert((*data.column)[i].get()); + res.insert((*data.column)[i].safeGet()); return res; } diff --git a/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp index 9d23f132759..1408e120bc5 100644 --- a/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -102,16 +102,16 @@ ColumnsDescription getStructureOfRemoteTableInShard( { ColumnDescription column; - column.name = (*name)[i].get(); + column.name = (*name)[i].safeGet(); - String data_type_name = (*type)[i].get(); + String data_type_name = (*type)[i].safeGet(); column.type = data_type_factory.get(data_type_name); - String kind_name = (*default_kind)[i].get(); + String kind_name = (*default_kind)[i].safeGet(); if (!kind_name.empty()) { column.default_desc.kind = columnDefaultKindFromString(kind_name); - String expr_str = (*default_expr)[i].get(); + String expr_str = (*default_expr)[i].safeGet(); column.default_desc.expression = parseQuery( expr_parser, expr_str.data(), expr_str.data() + expr_str.size(), "default expression", 0, settings.max_parser_depth, settings.max_parser_backtracks); @@ -207,8 +207,8 @@ ColumnsDescriptionByShardNum getExtendedObjectsOfRemoteTables( size_t size = name_col.size(); for (size_t i = 0; i < size; ++i) { - auto name = name_col[i].get(); - auto type_name = type_col[i].get(); + auto name = name_col[i].safeGet(); + auto type_name = type_col[i].safeGet(); auto storage_column = storage_columns.tryGetPhysical(name); if (storage_column && storage_column->type->hasDynamicSubcolumnsDeprecated()) diff --git a/src/TableFunctions/TableFunctionExplain.cpp b/src/TableFunctions/TableFunctionExplain.cpp index 552b9fde986..69d24c879bd 100644 --- a/src/TableFunctions/TableFunctionExplain.cpp +++ b/src/TableFunctions/TableFunctionExplain.cpp @@ -83,7 +83,7 @@ void TableFunctionExplain::parseArguments(const ASTPtr & ast_function, ContextPt "Table function '{}' requires a String argument for EXPLAIN kind, got '{}'", getName(), queryToString(kind_arg)); - ASTExplainQuery::ExplainKind kind = ASTExplainQuery::fromString(kind_literal->value.get()); + ASTExplainQuery::ExplainKind kind = ASTExplainQuery::fromString(kind_literal->value.safeGet()); auto explain_query = std::make_shared(kind); const auto * settings_arg = function->arguments->children[1]->as(); @@ -92,7 +92,7 @@ void TableFunctionExplain::parseArguments(const ASTPtr & ast_function, ContextPt "Table function '{}' requires a serialized string settings argument, got '{}'", getName(), queryToString(function->arguments->children[1])); - const auto & settings_str = settings_arg->value.get(); + const auto & settings_str = settings_arg->value.safeGet(); if (!settings_str.empty()) { const Settings & settings = context->getSettingsRef(); diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index 1b6d86f8fa5..e59ee52fd82 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -47,7 +47,7 @@ void TableFunctionFile::parseFirstArguments(const ASTPtr & arg, const ContextPtr else if (type == Field::Types::Int64 || type == Field::Types::UInt64) { fd = static_cast( - (type == Field::Types::Int64) ? literal->value.get() : literal->value.get()); + (type == Field::Types::Int64) ? literal->value.safeGet() : literal->value.safeGet()); if (fd < 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "File descriptor must be non-negative"); } diff --git a/src/TableFunctions/TableFunctionMergeTreeIndex.cpp b/src/TableFunctions/TableFunctionMergeTreeIndex.cpp index 06a48f0e25f..27ed50fb711 100644 --- a/src/TableFunctions/TableFunctionMergeTreeIndex.cpp +++ b/src/TableFunctions/TableFunctionMergeTreeIndex.cpp @@ -76,9 +76,9 @@ void TableFunctionMergeTreeIndex::parseArguments(const ASTPtr & ast_function, Co "Table function '{}' expected bool flag for 'with_marks' argument", getName()); if (value.getType() == Field::Types::Bool) - with_marks = value.get(); + with_marks = value.safeGet(); else - with_marks = value.get(); + with_marks = value.safeGet(); } if (!params.empty()) From 18d9bb2ade4e98051df007663a387eb74146c26f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Aug 2024 16:25:32 +0200 Subject: [PATCH 2024/2361] tests: attempt to fix 01600_parts_states_metrics_long (by forbid parallel run) CI: https://s3.amazonaws.com/clickhouse-test-reports/68134/8d4f822fee64d44440459b733c67dee5e9fb1e02/stateless_tests__tsan__s3_storage__[2_4].html Signed-off-by: Azat Khuzhin --- tests/queries/0_stateless/01600_parts_states_metrics_long.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh index 47b5a4dea13..8062bb0ba5d 100755 --- a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long +# Tags: long, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From eeda67042c08bedbd18c3b7a76cb8928e9975348 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Aug 2024 16:28:49 +0200 Subject: [PATCH 2025/2361] tests: make 01600_parts_states_metrics_long faster Signed-off-by: Azat Khuzhin --- .../01600_parts_states_metrics_long.sh | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh index 8062bb0ba5d..a07dd306b3e 100755 --- a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh @@ -5,6 +5,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +function query() +{ + # NOTE: database_atomic_wait_for_drop_and_detach_synchronously needed only for local env, CI has it ON + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&database_atomic_wait_for_drop_and_detach_synchronously=1" -d "$*" +} + # NOTE: database = $CLICKHOUSE_DATABASE is unwanted verify_sql="SELECT (SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics) @@ -18,13 +24,13 @@ verify() { for i in {1..5000} do - result=$( $CLICKHOUSE_CLIENT --query="$verify_sql" ) + result=$( query "$verify_sql" ) [ "$result" = "1" ] && echo "$result" && break sleep 0.1 if [[ $i -eq 5000 ]] then - $CLICKHOUSE_CLIENT " + query " SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics; SELECT sum(active), sum(NOT active) FROM system.parts; SELECT sum(active), sum(NOT active) FROM system.projection_parts; @@ -34,17 +40,17 @@ verify() done } -$CLICKHOUSE_CLIENT --database_atomic_wait_for_drop_and_detach_synchronously=1 --query="DROP TABLE IF EXISTS test_table" -$CLICKHOUSE_CLIENT --query="CREATE TABLE test_table (data Date) ENGINE = MergeTree PARTITION BY toYear(data) ORDER BY data;" +query "DROP TABLE IF EXISTS test_table" +query "CREATE TABLE test_table (data Date) ENGINE = MergeTree PARTITION BY toYear(data) ORDER BY data;" -$CLICKHOUSE_CLIENT --query="INSERT INTO test_table VALUES ('1992-01-01')" +query "INSERT INTO test_table VALUES ('1992-01-01')" verify -$CLICKHOUSE_CLIENT --query="INSERT INTO test_table VALUES ('1992-01-02')" +query "INSERT INTO test_table VALUES ('1992-01-02')" verify -$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE test_table FINAL" +query "OPTIMIZE TABLE test_table FINAL" verify -$CLICKHOUSE_CLIENT --database_atomic_wait_for_drop_and_detach_synchronously=1 --query="DROP TABLE test_table" +query "DROP TABLE test_table" verify From 0a8fb05ece2771439844c03456d43b02eb8f51cd Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 10 Aug 2024 16:23:23 +0000 Subject: [PATCH 2026/2361] fix after merge --- src/Core/Field.h | 7 +++++++ src/Disks/DiskFomAST.cpp | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/Core/Field.h b/src/Core/Field.h index 689ac38a235..13741183f21 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -869,6 +869,13 @@ constexpr bool isInt64OrUInt64FieldType(Field::Types::Which t) || t == Field::Types::UInt64; } +constexpr bool isInt64OrUInt64orBoolFieldType(Field::Types::Which t) +{ + return t == Field::Types::Int64 + || t == Field::Types::UInt64 + || t == Field::Types::Bool; +} + template auto & Field::safeGet() { diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index b2f1280c507..5329ff8748a 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -132,7 +132,7 @@ std::string DiskFomAST::createCustomDisk(const ASTPtr & disk_function_ast, Conte FlattenDiskConfigurationVisitor::Data data{context, attach}; FlattenDiskConfigurationVisitor{data}.visit(ast); - return assert_cast(*ast).value.get(); + return assert_cast(*ast).value.safeGet(); } void DiskFomAST::ensureDiskIsNotCustom(const std::string & disk_name, ContextPtr context) From 0a536cbf150916bff0ec63c6ef3b77df517868f8 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Sat, 10 Aug 2024 18:58:28 +0200 Subject: [PATCH 2027/2361] Add batch size --- docker/test/stateless/run.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index a030be92506..17e39487e3e 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -193,8 +193,8 @@ ORDER BY tuple()" # create minio log webhooks for both audit and server logs # use async inserts to avoid creating too many parts -./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" queue_size=1000000 -./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" queue_size=1000000 +./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" queue_size=1000000 batch_size=500 +./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" queue_size=1000000 batch_size=500 max_retries=100 retry=1 @@ -376,6 +376,8 @@ done # collect minio audit and server logs +# wait for minio to flush its batch if it has any +sleep 1 clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE" clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" From 3f718626da61f0502115586425d728492d3a3ae3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2024 19:55:11 +0200 Subject: [PATCH 2028/2361] Better test --- tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh index fb55539d04a..021278955cd 100755 --- a/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh +++ b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-asan, no-msan, no-tsan # ^ requires S3 CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) @@ -10,7 +10,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) while true do # This host (likely) drops packets sent to it (does not reply), so it is good for testing timeouts. - # At the same time, we expect that google.com does not drop packets and quickly replies with 404, which is a non-retriable error for S3. - AWS_EC2_METADATA_SERVICE_ENDPOINT='https://10.255.255.255/' ${CLICKHOUSE_LOCAL} --time --query "SELECT * FROM s3('https://google.com/test')" |& grep -v -F 404 | + # At the same time, we expect that the clickhouse host does not drop packets and quickly replies with 4xx, which is a non-retriable error for S3. + AWS_EC2_METADATA_SERVICE_ENDPOINT='https://10.255.255.255/' ${CLICKHOUSE_LOCAL} --time --query "SELECT * FROM s3('${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/nonexistent')" |& grep -v -F 404 | ${CLICKHOUSE_LOCAL} --input-format TSV "SELECT c1::Float64 < 1 FROM table" | grep 1 && break done From 7524b8f76712ce421fdebd1fe86c79128fea3ceb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 10 Aug 2024 19:55:22 +0200 Subject: [PATCH 2029/2361] A slight improvement --- src/Storages/StorageMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 78dbb72c199..f7701a2aab8 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -895,7 +895,7 @@ void StorageMergeTree::loadDeduplicationLog() std::string path = fs::path(relative_data_path) / "deduplication_logs"; /// If either there is already a deduplication log, or we will be able to use it. - if (disk->exists(path) || !disk->isReadOnly()) + if (!disk->isReadOnly() || disk->exists(path)) { deduplication_log = std::make_unique(path, settings->non_replicated_deduplication_window, format_version, disk); deduplication_log->load(); From d53513a81a10b8230a30fdbb386aca1d067cbcfa Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 10 Aug 2024 18:12:34 +0000 Subject: [PATCH 2030/2361] fix --- tests/queries/0_stateless/01710_projection_vertical_merges.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01710_projection_vertical_merges.sql b/tests/queries/0_stateless/01710_projection_vertical_merges.sql index 0f80d659e92..0d745e44b10 100644 --- a/tests/queries/0_stateless/01710_projection_vertical_merges.sql +++ b/tests/queries/0_stateless/01710_projection_vertical_merges.sql @@ -1,4 +1,5 @@ --- Tags: long +-- Tags: long, no-parallel +-- set no-parallel tag is to prevent timeout of this test drop table if exists t; From 80e926996319b926f3cbebf2050ed4a60666ee71 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 10 Aug 2024 19:39:59 +0000 Subject: [PATCH 2031/2361] allow UInt64 <-> Int64 conversion --- src/Core/Field.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Core/Field.h b/src/Core/Field.h index 13741183f21..ba8c66580ad 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -882,8 +882,11 @@ auto & Field::safeGet() const Types::Which target = TypeToEnum>>::value; /// bool is stored as uint64, will be returned as UInt64 when requested as bool or UInt64, as Int64 when requested as Int64 - if (target != which && !(which == Field::Types::Bool && (target == Field::Types::UInt64 || target == Field::Types::Int64))) - throw Exception(ErrorCodes::BAD_GET, "Bad get: has {}, requested {}", getTypeName(), target); + /// also allow UInt64 <-> Int64 conversion + if (target != which && + !(which == Field::Types::Bool && (target == Field::Types::UInt64 || target == Field::Types::Int64)) && + !(isInt64OrUInt64FieldType(which) && isInt64OrUInt64FieldType(target))) + throw Exception(ErrorCodes::BAD_GET, "Bad get: has {}, requested {}", getTypeName(), target); return get(); } From 556f66987897bd5065426e187bef4e0cba2a975c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 10 Aug 2024 21:52:55 +0000 Subject: [PATCH 2032/2361] Separate test into separate file to pass bugfix check --- tests/integration/parallel_skip.json | 4 + tests/integration/test_storage_kafka/test.py | 135 ---------- .../test_produce_http_interface.py | 243 ++++++++++++++++++ 3 files changed, 247 insertions(+), 135 deletions(-) create mode 100644 tests/integration/test_storage_kafka/test_produce_http_interface.py diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index 99fa626bd1e..fca2126d824 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -162,9 +162,13 @@ "test_storage_kafka/test.py::test_system_kafka_consumers_rebalance_mv", "test_storage_kafka/test.py::test_formats_errors", "test_storage_kafka/test.py::test_multiple_read_in_materialized_views", + "test_storage_kafka/test.py::test_kafka_null_message", + + "test_storage_kafka/test_produce_http_interface.py::test_kafka_produce_http_interface_row_based_format", "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string", "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_request_new_ticket_after_expiration", "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_no_kdc", "test_storage_kerberized_kafka/test.py::test_kafka_config_from_sql_named_collection" + ] diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 9b2f465c1b6..4b6c9922d74 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -5524,141 +5524,6 @@ def test_kafka_null_message(kafka_cluster, create_query_generator): ) -def test_kafka_produce_http_interface_row_based_format(kafka_cluster): - # reproduction of #61060 with validating the written messages - admin_client = KafkaAdminClient( - bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) - ) - - topic_prefix = "http_row_" - - # It is important to have: - # - long enough messages - # - enough messages - # I don't know the exact requirement for message sizes, but it doesn't reproduce with short messages - # For the number of messages it seems like at least 3 messages is necessary - expected_key = "01234567890123456789" - expected_value = "aaaaabbbbbccccc" - - insert_query_end = f"(key, value) VALUES ('{expected_key}', '{expected_value}'), ('{expected_key}', '{expected_value}'), ('{expected_key}', '{expected_value}')" - insert_query_template = "INSERT INTO {table_name} " + insert_query_end - - extra_settings = { - "Protobuf": ", kafka_schema = 'string_key_value.proto:StringKeyValuePair'", - "CapnProto": ", kafka_schema='string_key_value:StringKeyValuePair'", - "Template": ", format_template_row='string_key_value.format'", - } - - # Only the formats that can be used both and input and output format are tested - # Reasons to exclude following formats: - # - JSONStrings: not actually an input format - # - ProtobufSingle: I cannot make it work to parse the messages. Probably something is broken, - # because the producer can write multiple rows into a same message, which makes them impossible to parse properly. Should added after #67549 is fixed. - # - ProtobufList: I didn't want to deal with the envelope and stuff - # - Npy: supports only single column - # - LineAsString: supports only single column - # - RawBLOB: supports only single column - formats_to_test = [ - "TabSeparated", - "TabSeparatedRaw", - "TabSeparatedWithNames", - "TabSeparatedWithNamesAndTypes", - "TabSeparatedRawWithNames", - "TabSeparatedRawWithNamesAndTypes", - "Template", - "CSV", - "CSVWithNames", - "CSVWithNamesAndTypes", - "CustomSeparated", - "CustomSeparatedWithNames", - "CustomSeparatedWithNamesAndTypes", - "Values", - "JSON", - "JSONColumns", - "JSONColumnsWithMetadata", - "JSONCompact", - "JSONCompactColumns", - "JSONEachRow", - "JSONStringsEachRow", - "JSONCompactEachRow", - "JSONCompactEachRowWithNames", - "JSONCompactEachRowWithNamesAndTypes", - "JSONCompactStringsEachRow", - "JSONCompactStringsEachRowWithNames", - "JSONCompactStringsEachRowWithNamesAndTypes", - "JSONObjectEachRow", - "BSONEachRow", - "TSKV", - "Protobuf", - "Avro", - "Parquet", - "Arrow", - "ArrowStream", - "ORC", - "RowBinary", - "RowBinaryWithNames", - "RowBinaryWithNamesAndTypes", - "Native", - "CapnProto", - "MsgPack", - ] - for format in formats_to_test: - logging.debug(f"Creating tables and writing messages to {format}") - topic = topic_prefix + format - kafka_create_topic(admin_client, topic) - - extra_setting = extra_settings.get(format, "") - - # kafka_max_rows_per_message is set to 2 to make sure every format produces at least 2 messages, thus increasing the chance of catching a bug - instance.query( - f""" - DROP TABLE IF EXISTS test.view_{topic}; - DROP TABLE IF EXISTS test.consumer_{topic}; - CREATE TABLE test.kafka_writer_{topic} (key String, value String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{topic}', - kafka_group_name = '{topic}', - kafka_format = '{format}', - kafka_max_rows_per_message = 2 {extra_setting}; - - CREATE TABLE test.kafka_{topic} (key String, value String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kafka1:19092', - kafka_topic_list = '{topic}', - kafka_group_name = '{topic}', - kafka_format = '{format}' {extra_setting}; - - CREATE MATERIALIZED VIEW test.view_{topic} Engine=Log AS - SELECT key, value FROM test.kafka_{topic}; - """ - ) - - instance.http_query( - insert_query_template.format(table_name="test.kafka_writer_" + topic), - method="POST", - ) - - expected = f"""\ -{expected_key}\t{expected_value} -{expected_key}\t{expected_value} -{expected_key}\t{expected_value} -""" - # give some times for the readers to read the messages - for format in formats_to_test: - logging.debug(f"Checking result for {format}") - topic = topic_prefix + format - - result = instance.query_with_retry( - f"SELECT * FROM test.view_{topic}", - check_callback=lambda res: res.count("\n") == 3, - ) - - assert TSV(result) == TSV(expected) - - kafka_delete_topic(admin_client, topic) - - if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_kafka/test_produce_http_interface.py b/tests/integration/test_storage_kafka/test_produce_http_interface.py new file mode 100644 index 00000000000..fc10a07f239 --- /dev/null +++ b/tests/integration/test_storage_kafka/test_produce_http_interface.py @@ -0,0 +1,243 @@ +import time +import logging + +import pytest +from helpers.cluster import ClickHouseCluster, is_arm +from helpers.test_tools import TSV +from kafka import KafkaAdminClient +from kafka.admin import NewTopic + +if is_arm(): + pytestmark = pytest.mark.skip + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance( + "instance", + main_configs=["configs/kafka.xml", "configs/named_collection.xml"], + user_configs=["configs/users.xml"], + with_kafka=True, + with_zookeeper=True, # For Replicated Table + macros={ + "kafka_broker": "kafka1", + "kafka_topic_old": "old", + "kafka_group_name_old": "old", + "kafka_topic_new": "new", + "kafka_group_name_new": "new", + "kafka_client_id": "instance", + "kafka_format_json_each_row": "JSONEachRow", + }, + clickhouse_path_dir="clickhouse_path", +) + + +@pytest.fixture(scope="module") +def kafka_cluster(): + try: + cluster.start() + kafka_id = instance.cluster.kafka_docker_id + print(("kafka_id is {}".format(kafka_id))) + yield cluster + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def kafka_setup_teardown(): + instance.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") + # logging.debug("kafka is available - running test") + yield # run test + + +def kafka_create_topic( + admin_client, + topic_name, + num_partitions=1, + replication_factor=1, + max_retries=50, + config=None, +): + logging.debug( + f"Kafka create topic={topic_name}, num_partitions={num_partitions}, replication_factor={replication_factor}" + ) + topics_list = [ + NewTopic( + name=topic_name, + num_partitions=num_partitions, + replication_factor=replication_factor, + topic_configs=config, + ) + ] + retries = 0 + while True: + try: + admin_client.create_topics(new_topics=topics_list, validate_only=False) + logging.debug("Admin client succeed") + return + except Exception as e: + retries += 1 + time.sleep(0.5) + if retries < max_retries: + logging.warning(f"Failed to create topic {e}") + else: + raise + + +def kafka_delete_topic(admin_client, topic, max_retries=50): + result = admin_client.delete_topics([topic]) + for topic, e in result.topic_error_codes: + if e == 0: + logging.debug(f"Topic {topic} deleted") + else: + logging.error(f"Failed to delete topic {topic}: {e}") + + retries = 0 + while True: + topics_listed = admin_client.list_topics() + logging.debug(f"TOPICS LISTED: {topics_listed}") + if topic not in topics_listed: + return + else: + retries += 1 + time.sleep(0.5) + if retries > max_retries: + raise Exception(f"Failed to delete topics {topic}, {result}") + + +def test_kafka_produce_http_interface_row_based_format(kafka_cluster): + # reproduction of #61060 with validating the written messages + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + + topic_prefix = "http_row_" + + # It is important to have: + # - long enough messages + # - enough messages + # I don't know the exact requirement for message sizes, but it doesn't reproduce with short messages + # For the number of messages it seems like at least 3 messages is necessary + expected_key = "01234567890123456789" + expected_value = "aaaaabbbbbccccc" + + insert_query_end = f"(key, value) VALUES ('{expected_key}', '{expected_value}'), ('{expected_key}', '{expected_value}'), ('{expected_key}', '{expected_value}')" + insert_query_template = "INSERT INTO {table_name} " + insert_query_end + + extra_settings = { + "Protobuf": ", kafka_schema = 'string_key_value.proto:StringKeyValuePair'", + "CapnProto": ", kafka_schema='string_key_value:StringKeyValuePair'", + "Template": ", format_template_row='string_key_value.format'", + } + + # Only the formats that can be used both and input and output format are tested + # Reasons to exclude following formats: + # - JSONStrings: not actually an input format + # - ProtobufSingle: I cannot make it work to parse the messages. Probably something is broken, + # because the producer can write multiple rows into a same message, which makes them impossible to parse properly. Should added after #67549 is fixed. + # - ProtobufList: I didn't want to deal with the envelope and stuff + # - Npy: supports only single column + # - LineAsString: supports only single column + # - RawBLOB: supports only single column + formats_to_test = [ + "TabSeparated", + "TabSeparatedRaw", + "TabSeparatedWithNames", + "TabSeparatedWithNamesAndTypes", + "TabSeparatedRawWithNames", + "TabSeparatedRawWithNamesAndTypes", + "Template", + "CSV", + "CSVWithNames", + "CSVWithNamesAndTypes", + "CustomSeparated", + "CustomSeparatedWithNames", + "CustomSeparatedWithNamesAndTypes", + "Values", + "JSON", + "JSONColumns", + "JSONColumnsWithMetadata", + "JSONCompact", + "JSONCompactColumns", + "JSONEachRow", + "JSONStringsEachRow", + "JSONCompactEachRow", + "JSONCompactEachRowWithNames", + "JSONCompactEachRowWithNamesAndTypes", + "JSONCompactStringsEachRow", + "JSONCompactStringsEachRowWithNames", + "JSONCompactStringsEachRowWithNamesAndTypes", + "JSONObjectEachRow", + "BSONEachRow", + "TSKV", + "Protobuf", + "Avro", + "Parquet", + "Arrow", + "ArrowStream", + "ORC", + "RowBinary", + "RowBinaryWithNames", + "RowBinaryWithNamesAndTypes", + "Native", + "CapnProto", + "MsgPack", + ] + for format in formats_to_test: + logging.debug(f"Creating tables and writing messages to {format}") + topic = topic_prefix + format + kafka_create_topic(admin_client, topic) + + extra_setting = extra_settings.get(format, "") + + # kafka_max_rows_per_message is set to 2 to make sure every format produces at least 2 messages, thus increasing the chance of catching a bug + instance.query( + f""" + DROP TABLE IF EXISTS test.view_{topic}; + DROP TABLE IF EXISTS test.consumer_{topic}; + CREATE TABLE test.kafka_writer_{topic} (key String, value String) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{topic}', + kafka_group_name = '{topic}', + kafka_format = '{format}', + kafka_max_rows_per_message = 2 {extra_setting}; + + CREATE TABLE test.kafka_{topic} (key String, value String) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{topic}', + kafka_group_name = '{topic}', + kafka_format = '{format}' {extra_setting}; + + CREATE MATERIALIZED VIEW test.view_{topic} Engine=Log AS + SELECT key, value FROM test.kafka_{topic}; + """ + ) + instance.http_query( + insert_query_template.format(table_name="test.kafka_writer_" + topic), + method="POST", + ) + + expected = f"""\ +{expected_key}\t{expected_value} +{expected_key}\t{expected_value} +{expected_key}\t{expected_value} +""" + # give some times for the readers to read the messages + for format in formats_to_test: + logging.debug(f"Checking result for {format}") + topic = topic_prefix + format + + result = instance.query_with_retry( + f"SELECT * FROM test.view_{topic}", + check_callback=lambda res: res.count("\n") == 3, + ) + + assert TSV(result) == TSV(expected) + + kafka_delete_topic(admin_client, topic) + + +if __name__ == "__main__": + cluster.start() + input("Cluster created, press any key to destroy...") + cluster.shutdown() From 613ebe367c1f811eea38d7c5e778cedddbfb0ce7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Sat, 10 Aug 2024 22:05:11 +0000 Subject: [PATCH 2033/2361] Only add extra cell when necessary --- tests/ci/report.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/ci/report.py b/tests/ci/report.py index 15b1512896a..6779a6dae96 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -738,7 +738,7 @@ def create_test_html_report( if test_results: rows_part = [] num_fails = 0 - has_test_time = False + has_test_time = any(tr.time is not None for tr in test_results) has_log_urls = False # Display entires with logs at the top (they correspond to failed tests) @@ -770,12 +770,12 @@ def create_test_html_report( row.append(f'{test_result.status}') colspan += 1 - row.append("") - if test_result.time is not None: - has_test_time = True - row.append(str(test_result.time)) - row.append("") - colspan += 1 + if has_test_time: + if test_result.time is not None: + row.append(f"{test_result.time}") + else: + row.append("") + colspan += 1 if test_result.log_urls is not None: has_log_urls = True From e93584e741fab888977413c800df4595220e7552 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 10 Aug 2024 23:02:30 +0000 Subject: [PATCH 2034/2361] fix Field conversion to IPv4 --- src/Interpreters/convertFieldToType.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 3489852bbd5..738c51baa64 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -304,8 +304,8 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } if (which_type.isIPv4() && src.getType() == Field::Types::UInt64) { - /// convert to UInt32 which is the underlying type for native IPv4 - return convertNumericType(src, type); + /// convert through UInt32 which is the underlying type for native IPv4 + return static_cast(convertNumericType(src, type).safeGet()); } } else if (which_type.isUUID() && src.getType() == Field::Types::UUID) From ee3eec0a2a15592b2020d34b74d9f595e707c092 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 11 Aug 2024 04:47:26 +0200 Subject: [PATCH 2035/2361] Update Credentials.cpp --- src/IO/S3/Credentials.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/IO/S3/Credentials.cpp b/src/IO/S3/Credentials.cpp index 9c5f6547933..d6f7542da6b 100644 --- a/src/IO/S3/Credentials.cpp +++ b/src/IO/S3/Credentials.cpp @@ -785,6 +785,8 @@ S3CredentialsProviderChain::S3CredentialsProviderChain( /// EC2MetadataService throttles by delaying the response so the service client should set a large read timeout. /// EC2MetadataService delay is in order of seconds so it only make sense to retry after a couple of seconds. + /// But the connection timeout should be small because there is the case when there is no IMDS at all, + /// like outside of the cloud, on your own machines. aws_client_configuration.connectTimeoutMs = 10; aws_client_configuration.requestTimeoutMs = 1000; From 1142305b113e261d0c8910c0b622ba94727fe78d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Aug 2024 09:53:43 +0200 Subject: [PATCH 2036/2361] tests: fix 01246_buffer_flush flakiness due to slow trace_log flush Signed-off-by: Azat Khuzhin --- tests/queries/0_stateless/01246_buffer_flush.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01246_buffer_flush.sh b/tests/queries/0_stateless/01246_buffer_flush.sh index aea91a0bf6b..3c7b9038e1f 100755 --- a/tests/queries/0_stateless/01246_buffer_flush.sh +++ b/tests/queries/0_stateless/01246_buffer_flush.sh @@ -27,7 +27,7 @@ function wait_until() function get_buffer_delay() { local buffer_insert_id=$1 && shift - query "SYSTEM FLUSH LOGS" + $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" query " WITH (SELECT event_time_microseconds FROM system.query_log WHERE current_database = '$CLICKHOUSE_DATABASE' AND type = 'QueryStart' AND query_id = '$buffer_insert_id') AS begin_, From 45028620332d55391e900b6c7e75acb34df1d98c Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 11 Aug 2024 08:35:47 +0000 Subject: [PATCH 2037/2361] Fix no-SSE3 build --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index eb3afe0ccdf..b33e7083e32 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -179,7 +179,7 @@ else() message(STATUS "Not using QPL") endif () -if (OS_LINUX AND ARCH_AMD64) +if (OS_LINUX AND ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER) option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES}) elseif(ENABLE_QATLIB) message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64") From 53bc1b7e3539cde14cb34f26af296bde5c29449e Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Sun, 11 Aug 2024 13:19:36 +0200 Subject: [PATCH 2038/2361] Revert "Refactor tests for (experimental) statistics" --- docs/en/development/tests.md | 4 +- .../statements/alter/statistics.md | 16 +- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- ...2864_statistics_count_min_sketch.reference | 14 ++ .../02864_statistics_count_min_sketch.sql | 70 ++++++ .../02864_statistics_ddl.reference | 37 ++- .../0_stateless/02864_statistics_ddl.sql | 234 ++++-------------- ...delayed_materialization_in_merge.reference | 12 - ...stics_delayed_materialization_in_merge.sql | 36 --- .../02864_statistics_exception.reference | 0 .../02864_statistics_exception.sql | 55 ++++ ..._statistics_materialize_in_merge.reference | 10 + .../02864_statistics_materialize_in_merge.sql | 52 ++++ .../02864_statistics_predicates.reference | 98 -------- .../02864_statistics_predicates.sql | 214 ---------------- .../02864_statistics_uniq.reference | 35 +++ .../0_stateless/02864_statistics_uniq.sql | 73 ++++++ .../02864_statistics_usage.reference | 20 -- .../0_stateless/02864_statistics_usage.sql | 42 ---- 19 files changed, 399 insertions(+), 625 deletions(-) create mode 100644 tests/queries/0_stateless/02864_statistics_count_min_sketch.reference create mode 100644 tests/queries/0_stateless/02864_statistics_count_min_sketch.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql create mode 100644 tests/queries/0_stateless/02864_statistics_exception.reference create mode 100644 tests/queries/0_stateless/02864_statistics_exception.sql create mode 100644 tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference create mode 100644 tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_predicates.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_predicates.sql create mode 100644 tests/queries/0_stateless/02864_statistics_uniq.reference create mode 100644 tests/queries/0_stateless/02864_statistics_uniq.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_usage.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_usage.sql diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 6cb36e2049b..269995a1a96 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -14,7 +14,7 @@ Each functional test sends one or multiple queries to the running ClickHouse ser Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from ClickHouse and it is available to general public. -Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. +Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. :::note A common mistake when testing data types `DateTime` and `DateTime64` is assuming that the server uses a specific time zone (e.g. "UTC"). This is not the case, time zones in CI test runs @@ -38,7 +38,7 @@ For more options, see `tests/clickhouse-test --help`. You can simply run all tes ### Adding a New Test -To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client --multiquery < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. diff --git a/docs/en/sql-reference/statements/alter/statistics.md b/docs/en/sql-reference/statements/alter/statistics.md index 7a1774a01b5..6880cef0e5c 100644 --- a/docs/en/sql-reference/statements/alter/statistics.md +++ b/docs/en/sql-reference/statements/alter/statistics.md @@ -8,28 +8,26 @@ sidebar_label: STATISTICS The following operations are available: -- `ALTER TABLE [db].table ADD STATISTICS [IF NOT EXISTS] (column list) TYPE (type list)` - Adds statistic description to tables metadata. +- `ALTER TABLE [db].table ADD STATISTICS (columns list) TYPE (type list)` - Adds statistic description to tables metadata. -- `ALTER TABLE [db].table MODIFY STATISTICS (column list) TYPE (type list)` - Modifies statistic description to tables metadata. +- `ALTER TABLE [db].table MODIFY STATISTICS (columns list) TYPE (type list)` - Modifies statistic description to tables metadata. -- `ALTER TABLE [db].table DROP STATISTICS [IF EXISTS] (column list)` - Removes statistics from the metadata of the specified columns and deletes all statistics objects in all parts for the specified columns. +- `ALTER TABLE [db].table DROP STATISTICS (columns list)` - Removes statistics from the metadata of the specified columns and deletes all statistics objects in all parts for the specified columns. -- `ALTER TABLE [db].table CLEAR STATISTICS [IF EXISTS] (column list)` - Deletes all statistics objects in all parts for the specified columns. Statistics objects can be rebuild using `ALTER TABLE MATERIALIZE STATISTICS`. +- `ALTER TABLE [db].table CLEAR STATISTICS (columns list)` - Deletes all statistics objects in all parts for the specified columns. Statistics objects can be rebuild using `ALTER TABLE MATERIALIZE STATISTICS`. -- `ALTER TABLE [db.]table MATERIALIZE STATISTICS [IF EXISTS] (column list)` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +- `ALTER TABLE [db.]table MATERIALIZE STATISTICS (columns list)` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). The first two commands are lightweight in a sense that they only change metadata or remove files. Also, they are replicated, syncing statistics metadata via ZooKeeper. -## Example: - -Adding two statistics types to two columns: +There is an example adding two statistics types to two columns: ``` ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq; ``` :::note -Statistic are supported only for [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). +Statistic manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index c7101021f02..625b1281c61 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3517,7 +3517,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context const auto & new_column = new_metadata.getColumns().get(command.column_name); if (!old_column.type->equals(*new_column.type)) throw Exception(ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN, - "ALTER types of column {} with statistics is not safe " + "ALTER types of column {} with statistics is not not safe " "because it can change the representation of statistics", backQuoteIfNeed(command.column_name)); } diff --git a/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference b/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference new file mode 100644 index 00000000000..02c41656a36 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference @@ -0,0 +1,14 @@ +CREATE TABLE default.tab\n(\n `a` String,\n `b` UInt64,\n `c` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +Test statistics count_min: + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) +Test statistics multi-types: + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) +Test LowCardinality and Nullable data type: +tab2 diff --git a/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql b/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql new file mode 100644 index 00000000000..c730aa7b4a7 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql @@ -0,0 +1,70 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS tab SYNC; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET allow_suspicious_low_cardinality_types=1; +SET mutations_sync = 2; + +CREATE TABLE tab +( + a String, + b UInt64, + c Int64, + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; + +SHOW CREATE TABLE tab; + +INSERT INTO tab select toString(number % 10000), number % 1000, -(number % 100), generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'Test statistics count_min:'; + +ALTER TABLE tab ADD STATISTICS a TYPE count_min; +ALTER TABLE tab ADD STATISTICS b TYPE count_min; +ALTER TABLE tab ADD STATISTICS c TYPE count_min; +ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE tab DROP STATISTICS a, b, c; + + +SELECT 'Test statistics multi-types:'; + +ALTER TABLE tab ADD STATISTICS a TYPE count_min; +ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; +ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') +FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) +WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +ALTER TABLE tab DROP STATISTICS a, b, c; + +DROP TABLE IF EXISTS tab SYNC; + + +SELECT 'Test LowCardinality and Nullable data type:'; +DROP TABLE IF EXISTS tab2 SYNC; +SET allow_suspicious_low_cardinality_types=1; +CREATE TABLE tab2 +( + a LowCardinality(Int64) STATISTICS(count_min), + b Nullable(Int64) STATISTICS(count_min), + c LowCardinality(Nullable(Int64)) STATISTICS(count_min), + pk String, +) Engine = MergeTree() ORDER BY pk; + +select name from system.tables where name = 'tab2' and database = currentDatabase(); + +DROP TABLE IF EXISTS tab2 SYNC; diff --git a/tests/queries/0_stateless/02864_statistics_ddl.reference b/tests/queries/0_stateless/02864_statistics_ddl.reference index 0e453b0ee8a..a7ff5caa0b0 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.reference +++ b/tests/queries/0_stateless/02864_statistics_ddl.reference @@ -1,6 +1,31 @@ -CREATE TABLE default.tab\n(\n `f64` Float64,\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32,\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.tab\n(\n `f64` Float64,\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32,\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) +10 +0 +After drop statistic + Prewhere info + Prewhere filter + Prewhere filter column: and(less(b, 10), less(a, 10)) (removed) +10 +CREATE TABLE default.tab\n(\n `a` Float64,\n `b` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After add statistic +CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After materialize statistic + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) +20 +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) +20 +CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After rename + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), less(c, 10)) (removed) +20 diff --git a/tests/queries/0_stateless/02864_statistics_ddl.sql b/tests/queries/0_stateless/02864_statistics_ddl.sql index 32b56a842b7..fe612efe2ac 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.sql +++ b/tests/queries/0_stateless/02864_statistics_ddl.sql @@ -1,195 +1,59 @@ --- Tags: no-fasttest --- no-fasttest: 'count_min' sketches need a 3rd party library - --- Tests that DDL statements which create / drop / materialize statistics - -SET mutations_sync = 1; +-- Tests that various DDL statements create/drop/materialize statistics DROP TABLE IF EXISTS tab; --- Error case: Can't create statistics when allow_experimental_statistics = 0 -CREATE TABLE tab (col Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - SET allow_experimental_statistics = 1; - --- Error case: Unknown statistics types are rejected -CREATE TABLE tab (col Float64 STATISTICS(no_statistics_type)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - --- Error case: The same statistics type can't exist more than once on a column -CREATE TABLE tab (col Float64 STATISTICS(tdigest, tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - -SET allow_suspicious_low_cardinality_types = 1; - --- Statistics can only be created on columns of specific data types (depending on the statistics kind), (*) - --- tdigest requires data_type.isValueRepresentedByInteger --- These types work: -CREATE TABLE tab (col UInt8 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col UInt256 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Float32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Decimal32(3) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col IPv4 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Nullable(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; --- These types don't work: -CREATE TABLE tab (col String STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col FixedString(1) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Array(Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col UUID STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col IPv6 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } - --- uniq requires data_type.isValueRepresentedByInteger --- These types work: -CREATE TABLE tab (col UInt8 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col UInt256 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Float32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Decimal32(3) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime64 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col IPv4 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Nullable(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; --- These types don't work: -CREATE TABLE tab (col String STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col FixedString(1) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Array(Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } - --- count_min requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String --- These types work: -CREATE TABLE tab (col UInt8 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col UInt256 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Float32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Decimal32(3) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime64 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col IPv4 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Nullable(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col String STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col FixedString(1) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; --- These types don't work: -CREATE TABLE tab (col Array(Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col UUID STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col IPv6 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } - --- CREATE TABLE was easy, ALTER is more fun +SET allow_statistics_optimize = 1; CREATE TABLE tab ( - f64 Float64, - f64_tdigest Float64 STATISTICS(tdigest), - f32 Float32, - s String, - a Array(Float64) -) -Engine = MergeTree() -ORDER BY tuple(); + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; --- Error case: Unknown statistics types are rejected --- (relevant for ADD and MODIFY) -ALTER TABLE tab ADD STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } -ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } -ALTER TABLE tab MODIFY STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } --- for some reason, ALTER TABLE tab MODIFY STATISTICS IF EXISTS is not supported - --- Error case: The same statistics type can't exist more than once on a column --- (relevant for ADD and MODIFY) --- Create the same statistics object twice -ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } -ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } -ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } --- Create an statistics which exists already -ALTER TABLE tab ADD STATISTICS f64_tdigest TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64_tdigest TYPE tdigest; -- no-op -ALTER TABLE tab MODIFY STATISTICS f64_tdigest TYPE tdigest; -- no-op - --- Error case: Column does not exist --- (relevant for ADD, MODIFY, DROP, CLEAR, and MATERIALIZE) --- Note that the results are unfortunately quite inconsistent ... -ALTER TABLE tab ADD STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab ADD STATISTICS IF NOT EXISTS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS IF EXISTS no_such_column; -- no-op -ALTER TABLE tab CLEAR STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab CLEAR STATISTICS IF EXISTS no_such_column; -- no-op -ALTER TABLE tab MATERIALIZE STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS no_such_column; -- { serverError ILLEGAL_STATISTICS } - --- Error case: Column exists but has no statistics --- (relevant for MODIFY, DROP, CLEAR, and MATERIALIZE) --- Note that the results are unfortunately quite inconsistent ... -ALTER TABLE tab MODIFY STATISTICS s TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS s; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS IF EXISTS s; -- no-op -ALTER TABLE tab CLEAR STATISTICS s; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab CLEAR STATISTICS IF EXISTS s; -- no-op -ALTER TABLE tab MATERIALIZE STATISTICS s; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS s; -- { serverError ILLEGAL_STATISTICS } - --- We don't check systematically that that statistics can only be created via ALTER ADD STATISTICS on columns of specific data types (the --- internal type validation code is tested already above, (*)). Only do a rudimentary check for each statistics type with a data type that --- works and one that doesn't work. --- tdigest --- Works: -ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; -ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; --- Doesn't work: -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } --- uniq --- Works: -ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64; -ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; --- Doesn't work: -ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } --- count_min --- Works: -ALTER TABLE tab ADD STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; -ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; --- Doesn't work: -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } - --- Any data type changes on columns with statistics are disallowed, for simplicity even if the new data type is compatible with all existing --- statistics objects (e.g. tdigest can be created on Float64 and UInt64) -ALTER TABLE tab MODIFY COLUMN f64_tdigest UInt64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } - --- Finally, do a full-circle test of a good case. Print table definition after each step. --- Intentionally specifying _two_ columns and _two_ statistics types to have that also tested. -SHOW CREATE TABLE tab; -ALTER TABLE tab ADD STATISTICS f64, f32 TYPE tdigest, uniq; -SHOW CREATE TABLE tab; -ALTER TABLE tab MODIFY STATISTICS f64, f32 TYPE tdigest, uniq; -SHOW CREATE TABLE tab; -ALTER TABLE tab CLEAR STATISTICS f64, f32; -SHOW CREATE TABLE tab; -ALTER TABLE tab MATERIALIZE STATISTICS f64, f32; -SHOW CREATE TABLE tab; -ALTER TABLE tab DROP STATISTICS f64, f32; SHOW CREATE TABLE tab; -DROP TABLE tab; +INSERT INTO tab select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT count(*) FROM tab WHERE b < 10 and a < 10; +SELECT count(*) FROM tab WHERE b < NULL and a < '10'; + +ALTER TABLE tab DROP STATISTICS a, b; + +SELECT 'After drop statistic'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT count(*) FROM tab WHERE b < 10 and a < 10; + +SHOW CREATE TABLE tab; + +ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; + +SELECT 'After add statistic'; + +SHOW CREATE TABLE tab; + +ALTER TABLE tab MATERIALIZE STATISTICS a, b; +INSERT INTO tab select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; + +SELECT 'After materialize statistic'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT count(*) FROM tab WHERE b < 10 and a < 10; + +OPTIMIZE TABLE tab FINAL; + +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT count(*) FROM tab WHERE b < 10 and a < 10; + +ALTER TABLE tab RENAME COLUMN b TO c; +SHOW CREATE TABLE tab; + +SELECT 'After rename'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT count(*) FROM tab WHERE c < 10 and a < 10; + +DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference deleted file mode 100644 index eb5e685597c..00000000000 --- a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference +++ /dev/null @@ -1,12 +0,0 @@ -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(b, 10_UInt8), less(a, 10_UInt8)) (removed) -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) -After truncate, insert, and materialize - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql deleted file mode 100644 index d469a4c2036..00000000000 --- a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql +++ /dev/null @@ -1,36 +0,0 @@ --- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). --- (The concrete statistics type, column data type and predicate type don't matter) - --- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) - -DROP TABLE IF EXISTS tab; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET enable_analyzer = 1; - -SET materialize_statistics_on_insert = 0; - -CREATE TABLE tab -( - a Int64 STATISTICS(tdigest), - b Int16 STATISTICS(tdigest), -) ENGINE = MergeTree() ORDER BY tuple() -SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. - -INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) - -OPTIMIZE TABLE tab FINAL; -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) - -TRUNCATE TABLE tab; -SET mutations_sync = 2; -INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; -ALTER TABLE tab MATERIALIZE STATISTICS a, b; -SELECT 'After truncate, insert, and materialize'; -SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) - -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_exception.reference b/tests/queries/0_stateless/02864_statistics_exception.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02864_statistics_exception.sql b/tests/queries/0_stateless/02864_statistics_exception.sql new file mode 100644 index 00000000000..289ffee6600 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_exception.sql @@ -0,0 +1,55 @@ +-- Tests creating/dropping/materializing statistics produces the right exceptions. + +DROP TABLE IF EXISTS tab; + +-- Can't create statistics when allow_experimental_statistics = 0 +CREATE TABLE tab +( + a Float64 STATISTICS(tdigest) +) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SET allow_experimental_statistics = 1; + +-- The same type of statistics can't exist more than once on a column +CREATE TABLE tab +( + a Float64 STATISTICS(tdigest, tdigest) +) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +-- Unknown statistics types are rejected +CREATE TABLE tab +( + a Float64 STATISTICS(no_statistics_type) +) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +-- tDigest statistics can only be created on numeric columns +CREATE TABLE tab +( + a String STATISTICS(tdigest), +) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +CREATE TABLE tab +( + a Float64, + b String +) Engine = MergeTree() ORDER BY tuple(); + +ALTER TABLE tab ADD STATISTICS a TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS a TYPE tdigest; +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS a TYPE tdigest; +ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; +-- Statistics can be created only on integer columns +ALTER TABLE tab ADD STATISTICS b TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS b; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS a; +ALTER TABLE tab DROP STATISTICS IF EXISTS a; +ALTER TABLE tab CLEAR STATISTICS a; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS a; +ALTER TABLE tab MATERIALIZE STATISTICS b; -- { serverError ILLEGAL_STATISTICS } + +ALTER TABLE tab ADD STATISTICS a TYPE tdigest; +ALTER TABLE tab MODIFY COLUMN a Float64 TTL toDateTime(b) + INTERVAL 1 MONTH; +ALTER TABLE tab MODIFY COLUMN a Int64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference new file mode 100644 index 00000000000..5e969cf41cb --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference @@ -0,0 +1,10 @@ +10 +10 +10 +statistics not used Condition less(b, 10_UInt8) moved to PREWHERE +statistics not used Condition less(a, 10_UInt8) moved to PREWHERE +statistics used after merge Condition less(a, 10_UInt8) moved to PREWHERE +statistics used after merge Condition less(b, 10_UInt8) moved to PREWHERE +statistics used after materialize Condition less(a, 10_UInt8) moved to PREWHERE +statistics used after materialize Condition less(b, 10_UInt8) moved to PREWHERE +2 0 diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql new file mode 100644 index 00000000000..6606cff263f --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql @@ -0,0 +1,52 @@ +-- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). + +DROP TABLE IF EXISTS tab; + +SET enable_analyzer = 1; +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; + +SET materialize_statistics_on_insert = 0; + +CREATE TABLE tab +( + a Int64 STATISTICS(tdigest), + b Int16 STATISTICS(tdigest), +) ENGINE = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. + +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; + +SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics not used'; + +OPTIMIZE TABLE tab FINAL; + +SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics used after merge'; + +TRUNCATE TABLE tab; +SET mutations_sync = 2; + +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +ALTER TABLE tab MATERIALIZE STATISTICS a, b; + +SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics used after materialize'; + +DROP TABLE tab; + +SYSTEM FLUSH LOGS; + +SELECT log_comment, message FROM system.text_log JOIN +( + SELECT Settings['log_comment'] AS log_comment, query_id FROM system.query_log + WHERE current_database = currentDatabase() + AND query LIKE 'SELECT count(*) FROM tab%' + AND type = 'QueryFinish' +) AS query_log USING (query_id) +WHERE message LIKE '%moved to PREWHERE%' +ORDER BY event_time_microseconds; + +SELECT count(), sum(ProfileEvents['MergeTreeDataWriterStatisticsCalculationMicroseconds']) +FROM system.query_log +WHERE current_database = currentDatabase() + AND query LIKE 'INSERT INTO tab SELECT%' + AND type = 'QueryFinish'; diff --git a/tests/queries/0_stateless/02864_statistics_predicates.reference b/tests/queries/0_stateless/02864_statistics_predicates.reference deleted file mode 100644 index ffbd7269e05..00000000000 --- a/tests/queries/0_stateless/02864_statistics_predicates.reference +++ /dev/null @@ -1,98 +0,0 @@ -u64 and = -10 -10 -10 -10 -0 -0 -0 -0 -10 -10 -10 -10 -u64 and < -70 -70 -70 -70 -80 -80 -80 -80 -70 -70 -70 -70 -f64 and = -10 -10 -10 -10 -0 -0 -0 -0 -10 -10 -10 -10 -0 -0 -0 -0 -f64 and < -70 -70 -70 -70 -80 -80 -80 -80 -70 -70 -70 -70 -80 -80 -80 -80 -dt and = -0 -0 -0 -0 -10 -10 -10 -10 -dt and < -10000 -10000 -10000 -10000 -70 -70 -70 -70 -b and = -5000 -5000 -5000 -5000 -5000 -5000 -5000 -5000 -5000 -5000 -5000 -5000 -0 -0 -0 -0 -s and = -10 -10 diff --git a/tests/queries/0_stateless/02864_statistics_predicates.sql b/tests/queries/0_stateless/02864_statistics_predicates.sql deleted file mode 100644 index 779116cf19a..00000000000 --- a/tests/queries/0_stateless/02864_statistics_predicates.sql +++ /dev/null @@ -1,214 +0,0 @@ --- Tags: no-fasttest --- no-fasttest: 'count_min' sketches need a 3rd party library - --- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types. - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; - -DROP TABLE IF EXISTS tab; - -CREATE TABLE tab -( - u64 UInt64, - u64_tdigest UInt64 STATISTICS(tdigest), - u64_count_min UInt64 STATISTICS(count_min), - u64_uniq UInt64 STATISTICS(uniq), - f64 Float64, - f64_tdigest Float64 STATISTICS(tdigest), - f64_count_min Float64 STATISTICS(count_min), - f64_uniq Float64 STATISTICS(uniq), - dt DateTime, - dt_tdigest DateTime STATISTICS(tdigest), - dt_count_min DateTime STATISTICS(count_min), - dt_uniq DateTime STATISTICS(uniq), - b Bool, - b_tdigest Bool STATISTICS(tdigest), - b_count_min Bool STATISTICS(count_min), - b_uniq Bool STATISTICS(uniq), - s String, - -- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest - s_count_min String STATISTICS(count_min) - -- s_uniq String STATISTICS(uniq), -- not supported by uniq -) Engine = MergeTree() ORDER BY tuple() -SETTINGS min_bytes_for_wide_part = 0; - -INSERT INTO tab --- SELECT number % 10000, number % 1000, -(number % 100) FROM system.numbers LIMIT 10000; -SELECT number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 1000, - number % 2, - number % 2, - number % 2, - number % 2, - toString(number % 1000), - toString(number % 1000) -FROM system.numbers LIMIT 10000; - --- u64 ---------------------------------------------------- - -SELECT 'u64 and ='; - -SELECT count(*) FROM tab WHERE u64 = 7; -SELECT count(*) FROM tab WHERE u64_tdigest = 7; -SELECT count(*) FROM tab WHERE u64_count_min = 7; -SELECT count(*) FROM tab WHERE u64_uniq = 7; - -SELECT count(*) FROM tab WHERE u64 = 7.7; -SELECT count(*) FROM tab WHERE u64_tdigest = 7.7; -SELECT count(*) FROM tab WHERE u64_count_min = 7.7; -SELECT count(*) FROM tab WHERE u64_uniq = 7.7; - -SELECT count(*) FROM tab WHERE u64 = '7'; -SELECT count(*) FROM tab WHERE u64_tdigest = '7'; -SELECT count(*) FROM tab WHERE u64_count_min = '7'; -SELECT count(*) FROM tab WHERE u64_uniq = '7'; - -SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_count_min = '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH } - -SELECT 'u64 and <'; - -SELECT count(*) FROM tab WHERE u64 < 7; -SELECT count(*) FROM tab WHERE u64_tdigest < 7; -SELECT count(*) FROM tab WHERE u64_count_min < 7; -SELECT count(*) FROM tab WHERE u64_uniq < 7; - -SELECT count(*) FROM tab WHERE u64 < 7.7; -SELECT count(*) FROM tab WHERE u64_tdigest < 7.7; -SELECT count(*) FROM tab WHERE u64_count_min < 7.7; -SELECT count(*) FROM tab WHERE u64_uniq < 7.7; - -SELECT count(*) FROM tab WHERE u64 < '7'; -SELECT count(*) FROM tab WHERE u64_tdigest < '7'; -SELECT count(*) FROM tab WHERE u64_count_min < '7'; -SELECT count(*) FROM tab WHERE u64_uniq < '7'; - -SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_count_min < '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH } - --- f64 ---------------------------------------------------- - -SELECT 'f64 and ='; - -SELECT count(*) FROM tab WHERE f64 = 7; -SELECT count(*) FROM tab WHERE f64_tdigest = 7; -SELECT count(*) FROM tab WHERE f64_count_min = 7; -SELECT count(*) FROM tab WHERE f64_uniq = 7; - -SELECT count(*) FROM tab WHERE f64 = 7.7; -SELECT count(*) FROM tab WHERE f64_tdigest = 7.7; -SELECT count(*) FROM tab WHERE f64_count_min = 7.7; -SELECT count(*) FROM tab WHERE f64_uniq = 7.7; - -SELECT count(*) FROM tab WHERE f64 = '7'; -SELECT count(*) FROM tab WHERE f64_tdigest = '7'; -SELECT count(*) FROM tab WHERE f64_count_min = '7'; -SELECT count(*) FROM tab WHERE f64_uniq = '7'; - -SELECT count(*) FROM tab WHERE f64 = '7.7'; -SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; -SELECT count(*) FROM tab WHERE f64_count_min = '7.7'; -SELECT count(*) FROM tab WHERE f64_uniq = '7.7'; - -SELECT 'f64 and <'; - -SELECT count(*) FROM tab WHERE f64 < 7; -SELECT count(*) FROM tab WHERE f64_tdigest < 7; -SELECT count(*) FROM tab WHERE f64_count_min < 7; -SELECT count(*) FROM tab WHERE f64_uniq < 7; - -SELECT count(*) FROM tab WHERE f64 < 7.7; -SELECT count(*) FROM tab WHERE f64_tdigest < 7.7; -SELECT count(*) FROM tab WHERE f64_count_min < 7.7; -SELECT count(*) FROM tab WHERE f64_uniq < 7.7; - -SELECT count(*) FROM tab WHERE f64 < '7'; -SELECT count(*) FROM tab WHERE f64_tdigest < '7'; -SELECT count(*) FROM tab WHERE f64_count_min < '7'; -SELECT count(*) FROM tab WHERE f64_uniq < '7'; - -SELECT count(*) FROM tab WHERE f64 < '7.7'; -SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; -SELECT count(*) FROM tab WHERE f64_count_min < '7.7'; -SELECT count(*) FROM tab WHERE f64_uniq < '7.7'; - --- dt ---------------------------------------------------- - -SELECT 'dt and ='; - -SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_count_min = '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13'; - -SELECT count(*) FROM tab WHERE dt = 7; -SELECT count(*) FROM tab WHERE dt_tdigest = 7; -SELECT count(*) FROM tab WHERE dt_count_min = 7; -SELECT count(*) FROM tab WHERE dt_uniq = 7; - -SELECT 'dt and <'; - -SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13'; - -SELECT count(*) FROM tab WHERE dt < 7; -SELECT count(*) FROM tab WHERE dt_tdigest < 7; -SELECT count(*) FROM tab WHERE dt_count_min < 7; -SELECT count(*) FROM tab WHERE dt_uniq < 7; - --- b ---------------------------------------------------- - -SELECT 'b and ='; - -SELECT count(*) FROM tab WHERE b = true; -SELECT count(*) FROM tab WHERE b_tdigest = true; -SELECT count(*) FROM tab WHERE b_count_min = true; -SELECT count(*) FROM tab WHERE b_uniq = true; - -SELECT count(*) FROM tab WHERE b = 'true'; -SELECT count(*) FROM tab WHERE b_tdigest = 'true'; -SELECT count(*) FROM tab WHERE b_count_min = 'true'; -SELECT count(*) FROM tab WHERE b_uniq = 'true'; - -SELECT count(*) FROM tab WHERE b = 1; -SELECT count(*) FROM tab WHERE b_tdigest = 1; -SELECT count(*) FROM tab WHERE b_count_min = 1; -SELECT count(*) FROM tab WHERE b_uniq = 1; - -SELECT count(*) FROM tab WHERE b = 1.1; -SELECT count(*) FROM tab WHERE b_tdigest = 1.1; -SELECT count(*) FROM tab WHERE b_count_min = 1.1; -SELECT count(*) FROM tab WHERE b_uniq = 1.1; - --- s ---------------------------------------------------- - -SELECT 's and ='; - -SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE } --- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported -SELECT count(*) FROM tab WHERE s_count_min = 7; -- { serverError NO_COMMON_TYPE } --- SELECT count(*) FROM tab WHERE s_uniq = 7; -- not supported - -SELECT count(*) FROM tab WHERE s = '7'; --- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported -SELECT count(*) FROM tab WHERE s_count_min = '7'; --- SELECT count(*) FROM tab WHERE s_uniq = '7'; -- not supported - -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_uniq.reference b/tests/queries/0_stateless/02864_statistics_uniq.reference new file mode 100644 index 00000000000..77786dbdd8c --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_uniq.reference @@ -0,0 +1,35 @@ +CREATE TABLE default.t1\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest, uniq),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) +After modify TDigest + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(c, -1), less(a, 10), less(b, 10)) (removed) +After drop + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10), less(c, -1), less(b, 10)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_uniq.sql b/tests/queries/0_stateless/02864_statistics_uniq.sql new file mode 100644 index 00000000000..0f5f353c045 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_uniq.sql @@ -0,0 +1,73 @@ +DROP TABLE IF EXISTS t1; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; + +CREATE TABLE t1 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c Int64 STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; + +SHOW CREATE TABLE t1; + +INSERT INTO t1 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; +INSERT INTO t1 select 0, 0, 11, generateUUIDv4(); + +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +OPTIMIZE TABLE t1 FINAL; + +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +SELECT 'After modify TDigest'; +ALTER TABLE t1 MODIFY STATISTICS c TYPE TDigest; +ALTER TABLE t1 MATERIALIZE STATISTICS c; + +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + + +ALTER TABLE t1 DROP STATISTICS c; + +SELECT 'After drop'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +SET allow_suspicious_low_cardinality_types=1; +CREATE TABLE t2 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c LowCardinality(Int64) STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t2 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; + +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t3 +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest), + c Nullable(Int64) STATISTICS(tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk +SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t3 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; + +DROP TABLE IF EXISTS t3; + diff --git a/tests/queries/0_stateless/02864_statistics_usage.reference b/tests/queries/0_stateless/02864_statistics_usage.reference deleted file mode 100644 index a9f669b88c1..00000000000 --- a/tests/queries/0_stateless/02864_statistics_usage.reference +++ /dev/null @@ -1,20 +0,0 @@ -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) -After drop statistic - Prewhere info - Prewhere filter - Prewhere filter column: and(less(b, 10_UInt8), less(a, 10_UInt8)) (removed) -After add and materialize statistic - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) -After rename - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10_UInt8), less(c, 10_UInt8)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_usage.sql b/tests/queries/0_stateless/02864_statistics_usage.sql deleted file mode 100644 index 4956bd27e87..00000000000 --- a/tests/queries/0_stateless/02864_statistics_usage.sql +++ /dev/null @@ -1,42 +0,0 @@ --- Test that the optimizer picks up column statistics --- (The concrete statistics type, column data type and predicate type don't matter) - --- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET mutations_sync = 1; -SET enable_analyzer = 1; - -DROP TABLE IF EXISTS tab; - -CREATE TABLE tab -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest) -) Engine = MergeTree() ORDER BY tuple() -SETTINGS min_bytes_for_wide_part = 0; - -INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) - -ALTER TABLE tab DROP STATISTICS a, b; -SELECT 'After drop statistic'; -SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) - -ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; -ALTER TABLE tab MATERIALIZE STATISTICS a, b; -INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; -SELECT 'After add and materialize statistic'; -SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) - -OPTIMIZE TABLE tab FINAL; -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) - -ALTER TABLE tab RENAME COLUMN b TO c; -SELECT 'After rename'; -SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then c (statistics used) - -DROP TABLE IF EXISTS tab; From 29afd2de785450f2e7f5faec1dc6b35e166cefb4 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 11 Aug 2024 13:26:45 +0200 Subject: [PATCH 2039/2361] Remove "Processing configuration file" message from clickhouse-local Make the behaviour identical to the clickhouse-client Signed-off-by: Azat Khuzhin --- programs/local/LocalServer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 6b0b8fc5b50..200beea7b63 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -143,7 +143,7 @@ void LocalServer::initialize(Poco::Util::Application & self) if (fs::exists(config_path)) { - ConfigProcessor config_processor(config_path, false, true); + ConfigProcessor config_processor(config_path); ConfigProcessor::setConfigPath(fs::path(config_path).parent_path()); auto loaded_config = config_processor.loadConfig(); getClientConfiguration().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false); From 4fec61da55c1032f274da87198af59c78cd0d87e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 11 Aug 2024 12:35:27 +0000 Subject: [PATCH 2040/2361] fix wrong datatype in system.kafka_consumers --- src/Storages/System/StorageSystemKafkaConsumers.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemKafkaConsumers.cpp b/src/Storages/System/StorageSystemKafkaConsumers.cpp index 86713632339..5e790587716 100644 --- a/src/Storages/System/StorageSystemKafkaConsumers.cpp +++ b/src/Storages/System/StorageSystemKafkaConsumers.cpp @@ -79,7 +79,7 @@ void StorageSystemKafkaConsumers::fillData(MutableColumns & res_columns, Context auto & num_rebalance_revocations = assert_cast(*res_columns[index++]); auto & num_rebalance_assigments = assert_cast(*res_columns[index++]); auto & is_currently_used = assert_cast(*res_columns[index++]); - auto & last_used = assert_cast(*res_columns[index++]); + auto & last_used = assert_cast(*res_columns[index++]); auto & rdkafka_stat = assert_cast(*res_columns[index++]); const auto access = context->getAccess(); From 4ef3fe416debecefcea4d7336aac7c679092cf0c Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sun, 11 Aug 2024 13:08:53 +0000 Subject: [PATCH 2041/2361] Fix and simplify test --- .../02496_remove_redundant_sorting.reference | 68 ++++++++---------- .../02496_remove_redundant_sorting.sh | 43 ++++++----- ...emove_redundant_sorting_analyzer.reference | 71 ++++++++----------- 3 files changed, 82 insertions(+), 100 deletions(-) diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference index a0a1fd60812..4d004f2f78f 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference @@ -465,6 +465,37 @@ Expression ((Projection + Before ORDER BY)) ReadFromStorage (SystemOne) -- execute Float64 9007199254740994 +-- presence of an inner OFFSET retains the ORDER BY +-- query +WITH + t1 AS ( + SELECT a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2 +-- explain +Expression ((Projection + Before ORDER BY)) + Aggregating + Expression (Before GROUP BY) + Offset + Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + Before ORDER BY))) + ReadFromStorage (Values) +-- execute +0 -- disable common optimization to avoid functions to be lifted up (liftUpFunctions optimization), needed for testing with stateful function -- neighbor() as stateful function prevents removing inner ORDER BY since its result depends on order -- query @@ -527,40 +558,3 @@ Expression (Projection) 2 4 1 3 0 2 --- presence of an inner OFFSET retains the ORDER BY --- query -WITH - t1 AS ( - SELECT SUM(a) AS a, b - FROM - VALUES ( - 'b UInt32, a Int32', - (1, 1), - (2, 0) - ) - GROUP BY 2 - ) -SELECT - SUM(a) -FROM ( - SELECT a, b - FROM t1 - ORDER BY 1 DESC, 2 - OFFSET 1 -) t2 --- explain -Expression (Projection) - Expression (Before ORDER BY) - Aggregating - Expression (Before GROUP BY) - Offset - Expression (Projection) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - Expression (Projection) - Expression (Before ORDER BY) - Aggregating - Expression (Before GROUP BY) - ReadFromStorage (Values) --- execute -0 diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh index d59b4387101..c9bd242e429 100755 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh @@ -302,6 +302,27 @@ FROM )" run_query "$query" +echo "-- presence of an inner OFFSET retains the ORDER BY" +query="WITH + t1 AS ( + SELECT a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2" +run_query "$query" + echo "-- disable common optimization to avoid functions to be lifted up (liftUpFunctions optimization), needed for testing with stateful function" ENABLE_OPTIMIZATION="SET query_plan_enable_optimizations=0;$ENABLE_OPTIMIZATION" echo "-- neighbor() as stateful function prevents removing inner ORDER BY since its result depends on order" @@ -329,25 +350,3 @@ FROM ORDER BY number DESC )" run_query "$query" - -echo "-- presence of an inner OFFSET retains the ORDER BY" -query="WITH - t1 AS ( - SELECT SUM(a) AS a, b - FROM - VALUES ( - 'b UInt32, a Int32', - (1, 1), - (2, 0) - ) - GROUP BY 2 - ) -SELECT - SUM(a) -FROM ( - SELECT a, b - FROM t1 - ORDER BY 1 DESC, 2 - OFFSET 1 -) t2" -run_query "$query" diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference index 58441de5f22..dd5ac7bf706 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference @@ -464,6 +464,36 @@ Expression ((Project names + Projection)) ReadFromStorage (SystemOne) -- execute Float64 9007199254740994 +-- presence of an inner OFFSET retains the ORDER BY +-- query +WITH + t1 AS ( + SELECT a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2 +-- explain +Expression ((Project names + Projection)) + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + Project names))) + Offset + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Projection + Change column names to column identifiers)))))) + ReadFromStorage (Values) +-- execute +0 -- disable common optimization to avoid functions to be lifted up (liftUpFunctions optimization), needed for testing with stateful function -- neighbor() as stateful function prevents removing inner ORDER BY since its result depends on order -- query @@ -533,44 +563,3 @@ Expression (Project names) 2 4 1 3 0 2 --- presence of an inner OFFSET retains the ORDER BY --- query -WITH - t1 AS ( - SELECT SUM(a) AS a, b - FROM - VALUES ( - 'b UInt32, a Int32', - (1, 1), - (2, 0) - ) - GROUP BY 2 - ) -SELECT - SUM(a) -FROM ( - SELECT a, b - FROM t1 - ORDER BY 1 DESC, 2 - OFFSET 1 -) t2 --- explain -Expression (Project names) - Expression (Projection) - Aggregating - Expression (Before GROUP BY) - Expression (Change column names to column identifiers) - Expression (Project names) - Offset - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - Expression (Projection) - Expression (Change column names to column identifiers) - Expression (Project names) - Expression (Projection) - Aggregating - Expression (Before GROUP BY) - Expression (Change column names to column identifiers) - ReadFromStorage (Values) --- execute -0 From 8e706265e6df3653de76224bfc050b4f52e49282 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 11 Aug 2024 16:29:35 +0000 Subject: [PATCH 2042/2361] fix --- src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp index 6a0522b0676..3ca7e8183f1 100644 --- a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp +++ b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp @@ -125,7 +125,7 @@ void OptimizeShardingKeyRewriteInMatcher::visit(ASTFunction & function, Data & d { Tuple new_tuple; - for (const auto & child : tuple) + for (auto & child : tuple) if (shardContains(child, name, data)) new_tuple.emplace_back(std::move(child)); From d314e5aa45fb8ac91324721ab278185b09437a40 Mon Sep 17 00:00:00 2001 From: Vladimir Varankin Date: Sun, 11 Aug 2024 18:37:29 +0200 Subject: [PATCH 2043/2361] typos in prometheus.md --- docs/en/interfaces/prometheus.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md index bf541901b34..8e7023cc51f 100644 --- a/docs/en/interfaces/prometheus.md +++ b/docs/en/interfaces/prometheus.md @@ -75,7 +75,7 @@ Data are received by this protocol and written to a [TimeSeries](/en/engines/tab /write - remote_writeremote_write db_name time_series_table
@@ -105,7 +105,7 @@ Data are read from a [TimeSeries](/en/engines/table-engines/special/time_series) /read - remote_readremote_read
db_name time_series_table
@@ -144,14 +144,14 @@ Multiple protocols can be specified together in one place: /write - remote_writeremote_write
db_name.time_series_table
/read - remote_readremote_read
db_name.time_series_table
From e384e2c38e405b1b4758adaa44cd321e6d7f41b3 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 11 Aug 2024 18:34:33 +0200 Subject: [PATCH 2044/2361] tests: fix 02122_join_group_by_timeout flakiness CI found [1] failure of the test: 2024-08-11 21:06:07 /usr/share/clickhouse-test/queries/0_stateless/02122_join_group_by_timeout.sh: line 51: 52614 Killed timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT -q "SELECT a.name as n And the problem is not the server, but the client, since query executed for ~1 second: 2024.08.11 21:06:02.284318 [ 49232 ] {ba989ee2-f615-49ca-bcd8-31b3916aeb2c} executeQuery: (from [::1]:54144) (comment: 02122_join_group_by_timeout.sh) SELECT a.name as n FROM ( SELECT 'Name' as name, number FROM system.numbers LIMIT 2000000 ) AS a, ( SELECT 'Name' as name2, number FROM system.numbers LIMIT 2000000 ) as b FORMAT Null SETTINGS max_execution_time = 1, timeout_overflow_mode = 'break' (stage: Complete) 2024.08.11 21:06:03.331249 [ 49232 ] {ba989ee2-f615-49ca-bcd8-31b3916aeb2c} executeQuery: Read 517104 rows, 3.95 MiB in 1.072023 sec., 482362.78512681165 rows/sec., 3.68 MiB/sec. [1]: https://s3.amazonaws.com/clickhouse-test-reports/67134/18da3f0ab63da1eef9396627d0dfd56cf5356f65/stateless_tests__msan__[1_4].html So instead of using timeout, let's use time from the system.query_log instead. Signed-off-by: Azat Khuzhin --- .../02122_join_group_by_timeout.reference | 6 +- .../02122_join_group_by_timeout.sh | 70 ++++++++++--------- 2 files changed, 41 insertions(+), 35 deletions(-) diff --git a/tests/queries/0_stateless/02122_join_group_by_timeout.reference b/tests/queries/0_stateless/02122_join_group_by_timeout.reference index f314e22e519..6500560e8fc 100644 --- a/tests/queries/0_stateless/02122_join_group_by_timeout.reference +++ b/tests/queries/0_stateless/02122_join_group_by_timeout.reference @@ -1,4 +1,6 @@ -Code: 159 -0 +Code: 159 +query_duration 1 +0 +query_duration 1 Code: 159 0 diff --git a/tests/queries/0_stateless/02122_join_group_by_timeout.sh b/tests/queries/0_stateless/02122_join_group_by_timeout.sh index 8380c5dbd0c..b4644878544 100755 --- a/tests/queries/0_stateless/02122_join_group_by_timeout.sh +++ b/tests/queries/0_stateless/02122_join_group_by_timeout.sh @@ -1,27 +1,23 @@ #!/usr/bin/env bash -# Tags: no-debug - -# no-debug: Query is canceled by timeout after max_execution_time, -# but sending an exception to the client may hang -# for more than MAX_PROCESS_WAIT seconds in a slow debug build, -# and test will fail. CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -MAX_PROCESS_WAIT=5 - -IS_SANITIZER=$($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.warnings WHERE message like '%built with sanitizer%'") -if [ "$IS_SANITIZER" -gt 0 ]; then - # Query may hang for more than 5 seconds, especially in tsan build - MAX_PROCESS_WAIT=15 +TIMEOUT=5 +IS_SANITIZER_OR_DEBUG=$($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.warnings WHERE message like '%built with sanitizer%' or message like '%built in debug mode%'") +if [ "$IS_SANITIZER_OR_DEBUG" -gt 0 ]; then + # Increase the timeout due to in debug/sanitizers build: + # - client is slow + # - stacktrace resolving is slow + TIMEOUT=15 fi # TCP CLIENT: As of today (02/12/21) uses PullingAsyncPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) -timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_execution_time 1 -q \ - "SELECT * FROM +query_id=$(random_str 12) +$CLICKHOUSE_CLIENT --query_id "$query_id" --max_execution_time 1 -q " + SELECT * FROM ( SELECT a.name as n FROM @@ -34,28 +30,35 @@ timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_execution_time 1 -q \ GROUP BY n ) LIMIT 20 - FORMAT Null" 2>&1 | grep -o "Code: 159" | sort | uniq + FORMAT Null +" 2>&1 | grep -m1 -o "Code: 159" +$CLICKHOUSE_CLIENT -q "system flush logs" +${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" + ### Should stop pulling data and return what has been generated already (return code 0) -timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT -q \ - "SELECT a.name as n - FROM - ( - SELECT 'Name' as name, number FROM system.numbers LIMIT 2000000 - ) AS a, - ( - SELECT 'Name' as name2, number FROM system.numbers LIMIT 2000000 - ) as b - FORMAT Null - SETTINGS max_execution_time = 1, timeout_overflow_mode = 'break' - " +query_id=$(random_str 12) +$CLICKHOUSE_CLIENT --query_id "$query_id" -q " + SELECT a.name as n + FROM + ( + SELECT 'Name' as name, number FROM system.numbers LIMIT 2000000 + ) AS a, + ( + SELECT 'Name' as name2, number FROM system.numbers LIMIT 2000000 + ) as b + FORMAT Null + SETTINGS max_execution_time = 1, timeout_overflow_mode = 'break' +" echo $? +$CLICKHOUSE_CLIENT -q "system flush logs" +${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" # HTTP CLIENT: As of today (02/12/21) uses PullingPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) -${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_execution_time=1" -d \ - "SELECT * FROM +${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_execution_time=1" -d " + SELECT * FROM ( SELECT a.name as n FROM @@ -68,12 +71,13 @@ ${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_exec GROUP BY n ) LIMIT 20 - FORMAT Null" 2>&1 | grep -o "Code: 159" | sort | uniq + FORMAT Null +" 2>&1 | grep -o "Code: 159" | sort | uniq ### Should stop pulling data and return what has been generated already (return code 0) -${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL" -d \ - "SELECT a.name as n +${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL" -d " + SELECT a.name as n FROM ( SELECT 'Name' as name, number FROM system.numbers LIMIT 2000000 @@ -83,5 +87,5 @@ ${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL" -d \ ) as b FORMAT Null SETTINGS max_execution_time = 1, timeout_overflow_mode = 'break' - " +" echo $? From 8a48b3334433fe5e77c23ff6df10e454db2d3f82 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 11 Aug 2024 21:27:08 +0200 Subject: [PATCH 2045/2361] Fix settings/current_database in system.processes for async BACKUP/RESTORE Signed-off-by: Azat Khuzhin --- src/Backups/BackupsWorker.cpp | 4 ++++ src/Interpreters/ProcessList.h | 3 +++ 2 files changed, 7 insertions(+) diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 0b93ae6d547..8b45c816817 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -490,6 +490,8 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context /// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously. auto process_list_element = context_in_use->getProcessListElement(); + /// Update context to preserve query information in processlist (settings, current_database) + process_list_element->updateContext(context_in_use); thread_pool.scheduleOrThrowOnError( [this, @@ -853,6 +855,8 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt /// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously. auto process_list_element = context_in_use->getProcessListElement(); + /// Update context to preserve query information in processlist (settings, current_database) + process_list_element->updateContext(context_in_use); thread_pool.scheduleOrThrowOnError( [this, diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index accb73e12df..248ba947bc1 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -244,6 +244,9 @@ public: /// Same as checkTimeLimit but it never throws [[nodiscard]] bool checkTimeLimitSoft(); + /// Use it in case of the query left in background to execute asynchronously + void updateContext(ContextWeakPtr weak_context) { context = std::move(weak_context); } + /// Get the reference for the start of the query. Used to synchronize with other Stopwatches UInt64 getQueryCPUStartTime() { return watch.getStart(); } }; From 5c8665c66069256f4e34fb32068fab2fcb90cc65 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 11 Aug 2024 20:40:55 +0000 Subject: [PATCH 2046/2361] fix system.kafka_consumers and doc, fix tidy --- docs/en/operations/system-tables/kafka_consumers.md | 1 + src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp | 2 +- src/Storages/System/StorageSystemKafkaConsumers.cpp | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/en/operations/system-tables/kafka_consumers.md b/docs/en/operations/system-tables/kafka_consumers.md index 7e28a251e26..d58c9f754fd 100644 --- a/docs/en/operations/system-tables/kafka_consumers.md +++ b/docs/en/operations/system-tables/kafka_consumers.md @@ -24,6 +24,7 @@ Columns: - `num_rebalance_revocations`, (UInt64) - number of times the consumer was revoked its partitions - `num_rebalance_assignments`, (UInt64) - number of times the consumer was assigned to Kafka cluster - `is_currently_used`, (UInt8) - consumer is in use +- `last_used`, (UInt64) - last time this consumer was in use, unix time in microseconds - `rdkafka_stat` (String) - library internal statistic. See https://github.com/ClickHouse/librdkafka/blob/master/STATISTICS.md . Set `statistics_interval_ms` to 0 disable, default is 3000 (once in three seconds). Example: diff --git a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp index 3ca7e8183f1..86cec8659f5 100644 --- a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp +++ b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp @@ -129,7 +129,7 @@ void OptimizeShardingKeyRewriteInMatcher::visit(ASTFunction & function, Data & d if (shardContains(child, name, data)) new_tuple.emplace_back(std::move(child)); - if (new_tuple.size() == 0) + if (new_tuple.empty()) new_tuple.emplace_back(std::move(tuple.back())); tuple_literal->value = std::move(new_tuple); diff --git a/src/Storages/System/StorageSystemKafkaConsumers.cpp b/src/Storages/System/StorageSystemKafkaConsumers.cpp index 5e790587716..db6804d3ad7 100644 --- a/src/Storages/System/StorageSystemKafkaConsumers.cpp +++ b/src/Storages/System/StorageSystemKafkaConsumers.cpp @@ -42,7 +42,7 @@ ColumnsDescription StorageSystemKafkaConsumers::getColumnsDescription() {"num_rebalance_revocations", std::make_shared(), "Number of times the consumer was revoked its partitions."}, {"num_rebalance_assignments", std::make_shared(), "Number of times the consumer was assigned to Kafka cluster."}, {"is_currently_used", std::make_shared(), "The flag which shows whether the consumer is in use."}, - {"last_used", std::make_shared(6), "The last time this consumer was in use."}, + {"last_used", std::make_shared(), "The last time this consumer was in use, unix time in microseconds."}, {"rdkafka_stat", std::make_shared(), "Library internal statistic. Set statistics_interval_ms to 0 disable, default is 3000 (once in three seconds)."}, }; } From 967bd0566336f8c239f0045f703fc8fe428cb28f Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sun, 11 Aug 2024 12:24:13 -0600 Subject: [PATCH 2047/2361] Add create_if_not_exists setting to Settings.h --- src/Core/Settings.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 6f24db57026..1b52df76c45 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -896,6 +896,7 @@ class IColumn; M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \ M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \ M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \ + M(Bool, create_if_not_exists, false, "Enable IF NOT EXISTS for CREATE statements by default", 0) \ \ \ /* ###################################### */ \ From f90b8327bea16ee81c12f0210d2602889a5944bc Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sun, 11 Aug 2024 11:43:57 -0600 Subject: [PATCH 2048/2361] Update SettingsChangesHistory.cpp with new create_if_not_exists settings --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 511723f1873..202b21a92f0 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -75,6 +75,7 @@ static std::initializer_list Date: Sun, 11 Aug 2024 11:47:30 -0600 Subject: [PATCH 2049/2361] Add support for new create_if_not_exists setting in InterpreterCreateQuery.cpp --- src/Interpreters/InterpreterCreateQuery.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index a101e5e8f09..d899b8e111e 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1946,6 +1946,8 @@ BlockIO InterpreterCreateQuery::execute() FunctionNameNormalizer::visit(query_ptr.get()); auto & create = query_ptr->as(); + create.if_not_exists |= getContext()->getSettingsRef().create_if_not_exists; + bool is_create_database = create.database && !create.table; if (!create.cluster.empty() && !maybeRemoveOnCluster(query_ptr, getContext())) { From 2af1134c08ab164b2d77af854166fa30d96fddd9 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sun, 11 Aug 2024 12:17:24 -0600 Subject: [PATCH 2050/2361] Update settings.md docs with new create_if_not_exists settings --- docs/en/operations/settings/settings.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index e432f4e038f..22f73a03729 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5637,3 +5637,9 @@ Possible values: - 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled. Default value: `0`. + +## create_if_not_exists + +Enable IF NOT EXISTS for CREATE statements by default. If either this setting or IF NOT EXISTS is specified, then no Exception will be thrown when trying to create a new table. + +Default value: `false`. From cc0412c55372108116b6b05fb4fc66ebd4eccae2 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sun, 11 Aug 2024 16:01:48 -0600 Subject: [PATCH 2051/2361] Add test 03221_create_if_not_exists_setting --- ...221_create_if_not_exists_setting.reference | 4 ++ .../03221_create_if_not_exists_setting.sh | 43 +++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 tests/queries/0_stateless/03221_create_if_not_exists_setting.reference create mode 100755 tests/queries/0_stateless/03221_create_if_not_exists_setting.sh diff --git a/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference b/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference new file mode 100644 index 00000000000..8740b05c9ca --- /dev/null +++ b/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference @@ -0,0 +1,4 @@ +57 +82 +0 +0 diff --git a/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh new file mode 100755 index 00000000000..cfbe2eb8fd9 --- /dev/null +++ b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# $CLICKHOUSE_CLIENT -mn -q "SET create_if_not_exists=0;" # Default +$CLICKHOUSE_CLIENT -mn -q " +DROP TABLE IF EXISTS example_table; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; +" 2> /dev/null +# ensure failed error code +echo $? +$CLICKHOUSE_CLIENT -mn -q " +DROP DATABASE IF EXISTS example_database; +CREATE DATABASE example_database; +CREATE DATABASE example_database; +" 2> /dev/null +echo $? + +$CLICKHOUSE_CLIENT -mn -q " +SET create_if_not_exists=1; +DROP TABLE IF EXISTS example_table; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; +" +# ensure successful error code +echo $? + + +$CLICKHOUSE_CLIENT -mn -q " +SET create_if_not_exists=1; +DROP DATABASE IF EXISTS example_database; +CREATE DATABASE example_database; +CREATE DATABASE example_database; +" +echo $? + +$CLICKHOUSE_CLIENT -mn -q " +DROP DATABASE example_database; +DROP TABLE example_table; +" \ No newline at end of file From 957a0b6ea4c3e262a5c1fa664d81ab31d7e0d757 Mon Sep 17 00:00:00 2001 From: sakulali Date: Sun, 11 Aug 2024 00:12:36 +0800 Subject: [PATCH 2052/2361] Add a setting query_cache_tag --- docs/en/operations/query-cache.md | 10 ++++++ docs/en/operations/settings/settings.md | 11 +++++++ .../operations/system-tables/query_cache.md | 2 ++ src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.cpp | 1 + src/Interpreters/Cache/QueryCache.cpp | 16 +++++++--- src/Interpreters/Cache/QueryCache.h | 13 ++++++-- src/Interpreters/executeQuery.cpp | 5 +-- .../System/StorageSystemQueryCache.cpp | 5 ++- .../02494_query_cache_tag.reference | 14 ++++++++ .../0_stateless/02494_query_cache_tag.sql | 32 +++++++++++++++++++ 11 files changed, 100 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/02494_query_cache_tag.reference create mode 100644 tests/queries/0_stateless/02494_query_cache_tag.sql diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md index 7a920671fc2..a6c4d74f4ac 100644 --- a/docs/en/operations/query-cache.md +++ b/docs/en/operations/query-cache.md @@ -143,6 +143,16 @@ value can be specified at session, profile or query level using setting [query_c Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries). +Entries in the query cache can separate by tag, using setting [query_cache_tag](settings/settings.md#query-cache-tag). Queries with different tags are considered different entries. For example, the result of query + +``` sql +SELECT 1 SETTINGS use_query_cache = true; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one'; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one diff'; +``` + +have different entries in the query cache, find the specified tag in system table [system.query_cache](system-tables/query_cache.md) + ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#setting-max_block_size) rows. Due to filtering, aggregation, etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting [query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index e432f4e038f..7b855665efb 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1800,6 +1800,17 @@ Possible values: Default value: `0`. +## query_cache_tag {#query-cache-tag} + +An arbitrary string to separate entries in the [query cache](../query-cache.md). +Queries with different values of this setting are considered different. + +Possible values: + +- string: name of query cache tag + +Default value: `''`. + ## query_cache_max_size_in_bytes {#query-cache-max-size-in-bytes} The maximum amount of memory (in bytes) the current user may allocate in the [query cache](../query-cache.md). 0 means unlimited. diff --git a/docs/en/operations/system-tables/query_cache.md b/docs/en/operations/system-tables/query_cache.md index a9f86f5fc2b..393b37d3616 100644 --- a/docs/en/operations/system-tables/query_cache.md +++ b/docs/en/operations/system-tables/query_cache.md @@ -14,6 +14,7 @@ Columns: - `compressed` ([UInt8](../../sql-reference/data-types/int-uint.md)) — If the query cache entry is compressed. - `expires_at` ([DateTime](../../sql-reference/data-types/datetime.md)) — When the query cache entry becomes stale. - `key_hash` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — A hash of the query string, used as a key to find query cache entries. +- `tag` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — An arbitrary string to separate entries in the query cache. **Example** @@ -31,6 +32,7 @@ shared: 0 compressed: 1 expires_at: 2023-10-13 13:35:45 key_hash: 12188185624808016954 +tag: 1 row in set. Elapsed: 0.004 sec. ``` diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 4559cc67b35..ed58f8041d0 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -676,6 +676,7 @@ class IColumn; M(Bool, query_cache_squash_partial_results, true, "Squash partial result blocks to blocks of size 'max_block_size'. Reduces performance of inserts into the query cache but improves the compressability of cache entries.", 0) \ M(Seconds, query_cache_ttl, 60, "After this time in seconds entries in the query cache become stale", 0) \ M(Bool, query_cache_share_between_users, false, "Allow other users to read entry in the query cache", 0) \ + M(String, query_cache_tag, "", "An arbitrary string to separate entries in the query cache. Queries with different values of this setting are considered different.", 0) \ M(Bool, enable_sharing_sets_for_mutations, true, "Allow sharing set objects build for IN subqueries between different tasks of the same mutation. This reduces memory usage and CPU consumption", 0) \ \ M(Bool, optimize_rewrite_sum_if_to_count_if, true, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index c6392044f72..49a325b07b1 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -84,6 +84,7 @@ static std::initializer_list user_id_, const std::vector & current_user_roles_, bool is_shared_, std::chrono::time_point expires_at_, - bool is_compressed_) - : ast_hash(calculateAstHash(ast_, current_database, settings)) + bool is_compressed_, + const String & tag_) + : ast_hash(calculateAstHash(ast_, current_database, settings, tag_)) , header(header_) , user_id(user_id_) , current_user_roles(current_user_roles_) @@ -242,11 +247,12 @@ QueryCache::Key::Key( , expires_at(expires_at_) , is_compressed(is_compressed_) , query_string(queryStringFromAST(ast_)) + , tag(tag_) { } -QueryCache::Key::Key(ASTPtr ast_, const String & current_database, const Settings & settings, std::optional user_id_, const std::vector & current_user_roles_) - : QueryCache::Key(ast_, current_database, settings, {}, user_id_, current_user_roles_, false, std::chrono::system_clock::from_time_t(1), false) /// dummy values for everything != AST, current database, user name/roles +QueryCache::Key::Key(ASTPtr ast_, const String & current_database, const Settings & settings, std::optional user_id_, const std::vector & current_user_roles_, const String & tag_) + : QueryCache::Key(ast_, current_database, settings, {}, user_id_, current_user_roles_, false, std::chrono::system_clock::from_time_t(1), false, tag_) /// dummy values for everything != AST, current database, user name/roles { } diff --git a/src/Interpreters/Cache/QueryCache.h b/src/Interpreters/Cache/QueryCache.h index 461197cac32..54de5edb145 100644 --- a/src/Interpreters/Cache/QueryCache.h +++ b/src/Interpreters/Cache/QueryCache.h @@ -88,6 +88,10 @@ public: /// SYSTEM.QUERY_CACHE. const String query_string; + /// An arbitrary string to separate entries in the query cache. + /// Queries with different values of this setting are considered different. + const String tag; + /// Ctor to construct a Key for writing into query cache. Key(ASTPtr ast_, const String & current_database, @@ -96,10 +100,15 @@ public: std::optional user_id_, const std::vector & current_user_roles_, bool is_shared_, std::chrono::time_point expires_at_, - bool is_compressed); + bool is_compressed, + const String & tag_); /// Ctor to construct a Key for reading from query cache (this operation only needs the AST + user name). - Key(ASTPtr ast_, const String & current_database, const Settings & settings, std::optional user_id_, const std::vector & current_user_roles_); + Key(ASTPtr ast_, + const String & current_database, + const Settings & settings, + std::optional user_id_, const std::vector & current_user_roles_, + const String & tag_); bool operator==(const Key & other) const; }; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index fe87eed5570..6422d3128fa 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -1129,7 +1129,7 @@ static std::tuple executeQueryImpl( { if (can_use_query_cache && settings.enable_reads_from_query_cache) { - QueryCache::Key key(ast, context->getCurrentDatabase(), *settings_copy, context->getUserID(), context->getCurrentRoles()); + QueryCache::Key key(ast, context->getCurrentDatabase(), *settings_copy, context->getUserID(), context->getCurrentRoles(), settings.query_cache_tag); QueryCache::Reader reader = query_cache->createReader(key); if (reader.hasCacheEntryForKey()) { @@ -1258,7 +1258,8 @@ static std::tuple executeQueryImpl( context->getUserID(), context->getCurrentRoles(), settings.query_cache_share_between_users, std::chrono::system_clock::now() + std::chrono::seconds(settings.query_cache_ttl), - settings.query_cache_compress_entries); + settings.query_cache_compress_entries, + settings.query_cache_tag); const size_t num_query_runs = settings.query_cache_min_query_runs ? query_cache->recordQueryRun(key) : 1; /// try to avoid locking a mutex in recordQueryRun() if (num_query_runs <= settings.query_cache_min_query_runs) diff --git a/src/Storages/System/StorageSystemQueryCache.cpp b/src/Storages/System/StorageSystemQueryCache.cpp index 4c54d4ae16f..f81d50e8806 100644 --- a/src/Storages/System/StorageSystemQueryCache.cpp +++ b/src/Storages/System/StorageSystemQueryCache.cpp @@ -1,6 +1,7 @@ #include "StorageSystemQueryCache.h" #include #include +#include #include #include #include @@ -19,7 +20,8 @@ ColumnsDescription StorageSystemQueryCache::getColumnsDescription() {"shared", std::make_shared(), "If the query cache entry is shared between multiple users."}, {"compressed", std::make_shared(), "If the query cache entry is compressed."}, {"expires_at", std::make_shared(), "When the query cache entry becomes stale."}, - {"key_hash", std::make_shared(), "A hash of the query string, used as a key to find query cache entries."} + {"key_hash", std::make_shared(), "A hash of the query string, used as a key to find query cache entries."}, + {"tag", std::make_shared(std::make_shared()), "An arbitrary string to separate entries in the query cache."} }; } @@ -56,6 +58,7 @@ void StorageSystemQueryCache::fillData(MutableColumns & res_columns, ContextPtr res_columns[4]->insert(key.is_compressed); res_columns[5]->insert(std::chrono::system_clock::to_time_t(key.expires_at)); res_columns[6]->insert(key.ast_hash.low64); /// query cache considers aliases (issue #56258) + res_columns[7]->insert(key.tag); } } diff --git a/tests/queries/0_stateless/02494_query_cache_tag.reference b/tests/queries/0_stateless/02494_query_cache_tag.reference new file mode 100644 index 00000000000..055d3d4c5bb --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_tag.reference @@ -0,0 +1,14 @@ +1 +1 +--- +1 +1 +1 +2 +--- +1 +1 +1 +2 +1 +3 diff --git a/tests/queries/0_stateless/02494_query_cache_tag.sql b/tests/queries/0_stateless/02494_query_cache_tag.sql new file mode 100644 index 00000000000..054607058e8 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_tag.sql @@ -0,0 +1,32 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- Cache the query after the query invocation +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Queries with tag value of this setting or not are considered different cache entries. +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one'; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Queries with different tags values of this setting are considered different cache entries. +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one'; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one diff'; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; From 4866581a6769d9dda8da236286d15c84828ccc4c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 04:59:21 +0200 Subject: [PATCH 2053/2361] Fix DeltaLake partitioned by Bool when it is true --- src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp index c896a760597..89b48f08438 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp @@ -332,6 +332,8 @@ struct DeltaLakeMetadataImpl WhichDataType which(check_type->getTypeId()); if (which.isStringOrFixedString()) return value; + else if (isBool(data_type)) + return parse(value); else if (which.isInt8()) return parse(value); else if (which.isUInt8()) From 7e277d5656c4e66da2f830386d066ec491384d0e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 05:04:51 +0200 Subject: [PATCH 2054/2361] Fix Upgrade check --- docker/test/upgrade/run.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index a4c4c75e5b3..e9768873f6a 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -129,6 +129,7 @@ configure # Check that all new/changed setting were added in settings changes history. # Some settings can be different for builds with sanitizers, so we check +# Also the automatic value of 'max_threads' and similar was displayed as "'auto(...)'" in previous versions instead of "auto(...)". # settings changes only for non-sanitizer builds. IS_SANITIZED=$(clickhouse-local --query "SELECT value LIKE '%-fsanitize=%' FROM system.build_options WHERE name = 'CXX_FLAGS'") if [ "${IS_SANITIZED}" -eq "0" ] @@ -145,7 +146,9 @@ then old_settings.value AS old_value FROM new_settings LEFT JOIN old_settings ON new_settings.name = old_settings.name - WHERE (new_settings.value != old_settings.value) AND (name NOT IN ( + WHERE (new_value != old_value) + AND NOT (startsWith(new_value, 'auto(') AND old_value LIKE '%auto(%') + AND (name NOT IN ( SELECT arrayJoin(tupleElement(changes, 'name')) FROM ( @@ -177,7 +180,7 @@ then if [ -s changed_settings.txt ] then mv changed_settings.txt /test_output/ - echo -e "Changed settings are not reflected in settings changes history (see changed_settings.txt)$FAIL$(head_escaped /test_output/changed_settings.txt)" >> /test_output/test_results.tsv + echo -e "Changed settings are not reflected in the settings changes history (see changed_settings.txt)$FAIL$(head_escaped /test_output/changed_settings.txt)" >> /test_output/test_results.tsv else echo -e "There are no changed settings or they are reflected in settings changes history$OK" >> /test_output/test_results.tsv fi From 1e15574a25b2eaa4d08c1a14a005ba0b3ebcfc23 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 05:53:43 +0200 Subject: [PATCH 2055/2361] Fix ExternalDistributed --- src/Storages/StorageExternalDistributed.cpp | 35 ++++++++++++++++++--- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp index 4277387bc5d..9fc8b588c89 100644 --- a/src/Storages/StorageExternalDistributed.cpp +++ b/src/Storages/StorageExternalDistributed.cpp @@ -1,4 +1,4 @@ -#include "StorageExternalDistributed.h" +#include #include #include @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include #include @@ -111,7 +113,30 @@ void registerStorageExternalDistributed(StorageFactory & factory) std::unordered_set shards; ASTs inner_engine_args(engine_args.begin() + 1, engine_args.end()); - String addresses_expr = checkAndGetLiteralArgument(engine_args[1], "addresses"); + + ASTPtr * address_arg = nullptr; + + /// If there is a named collection argument, named `addresses_expr` + for (auto & node : inner_engine_args) + { + if (ASTFunction * func = node->as(); func && func->name == "equals" && func->arguments) + { + if (ASTExpressionList * func_args = func->arguments->as(); func_args && func_args->children.size() == 2) + { + if (ASTIdentifier * arg_name = func_args->children[0]->as(); arg_name && arg_name->name() == "addresses_expr") + { + address_arg = &func_args->children[1]; + break; + } + } + } + } + + /// Otherwise it is the first argument. + if (!address_arg) + address_arg = &inner_engine_args.at(0); + + String addresses_expr = checkAndGetLiteralArgument(*address_arg, "addresses"); Strings shards_addresses = get_addresses(addresses_expr); auto engine_name = checkAndGetLiteralArgument(engine_args[0], "engine_name"); @@ -120,7 +145,7 @@ void registerStorageExternalDistributed(StorageFactory & factory) auto format_settings = StorageURL::getFormatSettingsFromArgs(args); for (const auto & shard_address : shards_addresses) { - inner_engine_args.at(0) = std::make_shared(shard_address); + *address_arg = std::make_shared(shard_address); auto configuration = StorageURL::getConfiguration(inner_engine_args, context); auto uri_options = parseRemoteDescription(shard_address, 0, shard_address.size(), '|', max_addresses); if (uri_options.size() > 1) @@ -144,7 +169,7 @@ void registerStorageExternalDistributed(StorageFactory & factory) MySQLSettings mysql_settings; for (const auto & shard_address : shards_addresses) { - inner_engine_args.at(0) = std::make_shared(shard_address); + *address_arg = std::make_shared(shard_address); auto configuration = StorageMySQL::getConfiguration(inner_engine_args, context, mysql_settings); configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 3306); auto pool = createMySQLPoolWithFailover(configuration, mysql_settings); @@ -160,7 +185,7 @@ void registerStorageExternalDistributed(StorageFactory & factory) { for (const auto & shard_address : shards_addresses) { - inner_engine_args.at(0) = std::make_shared(shard_address); + *address_arg = std::make_shared(shard_address); auto configuration = StoragePostgreSQL::getConfiguration(inner_engine_args, context); configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 5432); auto pool = std::make_shared( From a41c1305887d08f43c02e354bb307f69a16b3fb0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 06:05:44 +0200 Subject: [PATCH 2056/2361] Update 02675_profile_events_from_query_log_and_client.sh --- .../02675_profile_events_from_query_log_and_client.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh index 894b2b61563..ff534a6a2e6 100755 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-random-merge-tree-settings # Tag no-fasttest: needs s3 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) From 5acf9f6f8160ee6de2845b13bed07d63832ca3fa Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 07:01:52 +0200 Subject: [PATCH 2057/2361] Fix `test_cluster_all_replicas` --- tests/integration/test_cluster_all_replicas/test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_cluster_all_replicas/test.py b/tests/integration/test_cluster_all_replicas/test.py index d8bad180e1b..9797db7c498 100644 --- a/tests/integration/test_cluster_all_replicas/test.py +++ b/tests/integration/test_cluster_all_replicas/test.py @@ -21,14 +21,14 @@ def start_cluster(): def test_cluster(start_cluster): assert ( node1.query( - "SELECT hostName() FROM clusterAllReplicas('one_shard_two_nodes', system.one)" + "SELECT hostName() FROM clusterAllReplicas('one_shard_two_nodes', system.one) ORDER BY ALL" ) == "node1\nnode2\n" ) assert set( node1.query( - """SELECT hostName(), * FROM clusterAllReplicas("one_shard_two_nodes", system.one) ORDER BY dummy""" + """SELECT hostName(), * FROM clusterAllReplicas("one_shard_two_nodes", system.one) ORDER BY ALL""" ).splitlines() ) == {"node1\t0", "node2\t0"} @@ -48,7 +48,7 @@ def test_global_in(start_cluster): assert set( node1.query( - """SELECT hostName(), * FROM clusterAllReplicas("one_shard_two_nodes", system.one) where dummy GLOBAL IN u""" + """SELECT hostName(), * FROM clusterAllReplicas("one_shard_two_nodes", system.one) where dummy GLOBAL IN u ORDER BY ALL""" ).splitlines() ) == {"node1\t0", "node2\t0"} @@ -63,7 +63,7 @@ def test_global_in(start_cluster): def test_skip_unavailable_replica(start_cluster, cluster): assert ( node1.query( - f"SELECT hostName() FROM clusterAllReplicas('{cluster}', system.one) settings skip_unavailable_shards=1" + f"SELECT hostName() FROM clusterAllReplicas('{cluster}', system.one) ORDER BY ALL settings skip_unavailable_shards=1" ) == "node1\nnode2\n" ) @@ -81,5 +81,5 @@ def test_error_on_unavailable_replica(start_cluster, cluster): # so when skip_unavailable_shards=0 - any unavailable replica should lead to an error with pytest.raises(QueryRuntimeException): node1.query( - f"SELECT hostName() FROM clusterAllReplicas('{cluster}', system.one) settings skip_unavailable_shards=0" + f"SELECT hostName() FROM clusterAllReplicas('{cluster}', system.one) ORDER BY ALL settings skip_unavailable_shards=0" ) From 20b97a45bf3c73960e71e1a158cec35ec522ccff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 07:09:42 +0200 Subject: [PATCH 2058/2361] Fix fundamentally broken test CC @azat --- tests/integration/test_throttling/test.py | 62 ++++++++++------------- 1 file changed, 28 insertions(+), 34 deletions(-) diff --git a/tests/integration/test_throttling/test.py b/tests/integration/test_throttling/test.py index c53c2bb1ddf..4bd96e2756d 100644 --- a/tests/integration/test_throttling/test.py +++ b/tests/integration/test_throttling/test.py @@ -121,21 +121,15 @@ def node_update_config(mode, setting, value=None): node.restart_clickhouse() -def assert_took(took, should_took): +def assert_took(took, should_take): # we need to decrease the lower limit because the server limits could # be enforced by throttling some server background IO instead of query IO # and we have no control over it - # - # and the same for upper limit, it can be slightly larger, due to for - # instance network latencies or CPU starvation - if should_took > 0: - assert took >= should_took * 0.85 and took <= should_took * 1.8 - else: - assert took >= should_took * 0.85 + assert took >= should_take * 0.85 @pytest.mark.parametrize( - "policy,backup_name,mode,setting,value,should_took", + "policy,backup_name,mode,setting,value,should_take", [ # # Local -> Local @@ -149,7 +143,7 @@ def assert_took(took, should_took): 0, id="no_local_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", next_backup_name("local"), @@ -159,7 +153,7 @@ def assert_took(took, should_took): 7, id="user_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", next_backup_name("local"), @@ -181,7 +175,7 @@ def assert_took(took, should_took): 0, id="no_remote_to_local_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", next_backup_name("local"), @@ -191,7 +185,7 @@ def assert_took(took, should_took): 7, id="user_remote_to_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", next_backup_name("local"), @@ -252,7 +246,7 @@ def assert_took(took, should_took): 0, id="no_local_to_remote_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", next_backup_name("remote"), @@ -262,7 +256,7 @@ def assert_took(took, should_took): 7, id="user_local_to_remote_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", next_backup_name("remote"), @@ -274,7 +268,7 @@ def assert_took(took, should_took): ), ], ) -def test_backup_throttling(policy, backup_name, mode, setting, value, should_took): +def test_backup_throttling(policy, backup_name, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -284,7 +278,7 @@ def test_backup_throttling(policy, backup_name, mode, setting, value, should_too """ ) _, took = elapsed(node.query, f"backup table data to {backup_name}") - assert_took(took, should_took) + assert_took(took, should_take) def test_backup_throttling_override(): @@ -305,18 +299,18 @@ def test_backup_throttling_override(): "max_backup_bandwidth": "500K", }, ) - # reading 1e6*8 bytes with 500Ki default bandwith should take (8-0.5)/0.5=15 seconds + # reading 1e6*8 bytes with 500Ki default bandwidth should take (8-0.5)/0.5=15 seconds assert_took(took, 15) @pytest.mark.parametrize( - "policy,mode,setting,value,should_took", + "policy,mode,setting,value,should_take", [ # # Local # pytest.param("default", None, None, None, 0, id="no_local_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", "user", @@ -325,7 +319,7 @@ def test_backup_throttling_override(): 7, id="user_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", "server", @@ -338,7 +332,7 @@ def test_backup_throttling_override(): # Remote # pytest.param("s3", None, None, None, 0, id="no_remote_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", "user", @@ -347,7 +341,7 @@ def test_backup_throttling_override(): 7, id="user_remote_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", "server", @@ -358,7 +352,7 @@ def test_backup_throttling_override(): ), ], ) -def test_read_throttling(policy, mode, setting, value, should_took): +def test_read_throttling(policy, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -368,17 +362,17 @@ def test_read_throttling(policy, mode, setting, value, should_took): """ ) _, took = elapsed(node.query, f"select * from data") - assert_took(took, should_took) + assert_took(took, should_take) @pytest.mark.parametrize( - "policy,mode,setting,value,should_took", + "policy,mode,setting,value,should_take", [ # # Local # pytest.param("default", None, None, None, 0, id="no_local_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", "user", @@ -387,7 +381,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): 7, id="local_user_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", "server", @@ -400,7 +394,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): # Remote # pytest.param("s3", None, None, None, 0, id="no_remote_throttling"), - # writing 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # writing 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", "user", @@ -409,7 +403,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): 7, id="user_remote_throttling", ), - # writing 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # writing 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", "server", @@ -420,7 +414,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): ), ], ) -def test_write_throttling(policy, mode, setting, value, should_took): +def test_write_throttling(policy, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -429,7 +423,7 @@ def test_write_throttling(policy, mode, setting, value, should_took): """ ) _, took = elapsed(node.query, f"insert into data select * from numbers(1e6)") - assert_took(took, should_took) + assert_took(took, should_take) def test_max_mutations_bandwidth_for_server(): @@ -444,7 +438,7 @@ def test_max_mutations_bandwidth_for_server(): node.query, "alter table data update key = -key where 1 settings mutations_sync = 1", ) - # reading 1e6*8 bytes with 1M/s bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M/s bandwidth should take (8-1)/1=7 seconds assert_took(took, 7) @@ -457,5 +451,5 @@ def test_max_merges_bandwidth_for_server(): ) node.query("insert into data select * from numbers(1e6)") _, took = elapsed(node.query, "optimize table data final") - # reading 1e6*8 bytes with 1M/s bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M/s bandwidth should take (8-1)/1=7 seconds assert_took(took, 7) From b3504def35ba6baf7a7cf1f9b84a72e8f70f95f3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 07:15:08 +0200 Subject: [PATCH 2059/2361] Fix leftovers --- tests/config/config.d/transactions.xml | 4 ++-- .../test_distributed_type_object/configs/remote_servers.xml | 2 +- .../configs/host_regexp.xml | 4 ++-- .../configs/listen_host.xml | 2 +- .../test_jbod_ha/configs/config.d/storage_configuration.xml | 2 +- .../integration/test_server_reload/configs/default_passwd.xml | 2 +- .../test_server_reload/configs/overrides_from_zk.xml | 2 +- tests/integration/test_version_update/configs/log_conf.xml | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/config/config.d/transactions.xml b/tests/config/config.d/transactions.xml index 9948b1f1865..64e166b81b5 100644 --- a/tests/config/config.d/transactions.xml +++ b/tests/config/config.d/transactions.xml @@ -1,4 +1,4 @@ - + 42 @@ -18,4 +18,4 @@ 0.01 - + diff --git a/tests/integration/test_distributed_type_object/configs/remote_servers.xml b/tests/integration/test_distributed_type_object/configs/remote_servers.xml index ebce4697529..0ea61f0d5fc 100644 --- a/tests/integration/test_distributed_type_object/configs/remote_servers.xml +++ b/tests/integration/test_distributed_type_object/configs/remote_servers.xml @@ -1,4 +1,4 @@ - + diff --git a/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml b/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml index 7a2141e6c7e..0bf7fad9a70 100644 --- a/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml +++ b/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml @@ -1,4 +1,4 @@ - + @@ -8,4 +8,4 @@ default - \ No newline at end of file + diff --git a/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml b/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml index 58ef55cd3f3..4f0841ab8b6 100644 --- a/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml +++ b/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml @@ -1,4 +1,4 @@ - + :: 0.0.0.0 1 diff --git a/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml b/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml index b5c351d105b..b2c4645644a 100644 --- a/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml @@ -1,4 +1,4 @@ - + 1000 diff --git a/tests/integration/test_server_reload/configs/default_passwd.xml b/tests/integration/test_server_reload/configs/default_passwd.xml index f79149e7e23..45ae005bd19 100644 --- a/tests/integration/test_server_reload/configs/default_passwd.xml +++ b/tests/integration/test_server_reload/configs/default_passwd.xml @@ -1,4 +1,4 @@ - + diff --git a/tests/integration/test_server_reload/configs/overrides_from_zk.xml b/tests/integration/test_server_reload/configs/overrides_from_zk.xml index d420faa88a2..b17c5c9fa99 100644 --- a/tests/integration/test_server_reload/configs/overrides_from_zk.xml +++ b/tests/integration/test_server_reload/configs/overrides_from_zk.xml @@ -1,4 +1,4 @@ - + diff --git a/tests/integration/test_version_update/configs/log_conf.xml b/tests/integration/test_version_update/configs/log_conf.xml index f9d15e572aa..17215c1759d 100644 --- a/tests/integration/test_version_update/configs/log_conf.xml +++ b/tests/integration/test_version_update/configs/log_conf.xml @@ -1,4 +1,4 @@ - + trace /var/log/clickhouse-server/log.log From b92a8f0fbcec640fbac35fca1b3fb23914d44990 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 07:16:25 +0200 Subject: [PATCH 2060/2361] Fix leftovers --- .../external-authenticators/kerberos.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/zh/operations/external-authenticators/kerberos.md b/docs/zh/operations/external-authenticators/kerberos.md index 649a0b9bd48..d1a39bbc952 100644 --- a/docs/zh/operations/external-authenticators/kerberos.md +++ b/docs/zh/operations/external-authenticators/kerberos.md @@ -23,30 +23,30 @@ slug: /zh/operations/external-authenticators/kerberos 示例 (进入 `config.xml`): ```xml - + - + ``` 主体规范: ```xml - + HTTP/clickhouse.example.com@EXAMPLE.COM - + ``` 按领域过滤: ```xml - + EXAMPLE.COM - + ``` !!! warning "注æ„" @@ -74,7 +74,7 @@ Kerberos主体å称格å¼é€šå¸¸éµå¾ªä»¥ä¸‹æ¨¡å¼: 示例 (进入 `users.xml`): ``` - + @@ -85,7 +85,7 @@ Kerberos主体å称格å¼é€šå¸¸éµå¾ªä»¥ä¸‹æ¨¡å¼: - + ``` !!! warning "警告" From 6f189e9eb7ded15df4ddbe7f90dafb28feaab2e3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 07:17:17 +0200 Subject: [PATCH 2061/2361] Fix leftovers --- .../test_distributed_type_object/configs/remote_servers.xml | 2 +- .../configs/host_regexp.xml | 2 +- .../configs/listen_host.xml | 2 +- .../test_jbod_ha/configs/config.d/storage_configuration.xml | 2 +- tests/integration/test_server_reload/configs/default_passwd.xml | 2 +- .../test_server_reload/configs/overrides_from_zk.xml | 2 +- tests/integration/test_version_update/configs/log_conf.xml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/integration/test_distributed_type_object/configs/remote_servers.xml b/tests/integration/test_distributed_type_object/configs/remote_servers.xml index 0ea61f0d5fc..68b420f36b4 100644 --- a/tests/integration/test_distributed_type_object/configs/remote_servers.xml +++ b/tests/integration/test_distributed_type_object/configs/remote_servers.xml @@ -15,4 +15,4 @@ - + diff --git a/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml b/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml index 0bf7fad9a70..9329c8dbde2 100644 --- a/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml +++ b/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml @@ -8,4 +8,4 @@ default - + diff --git a/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml b/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml index 4f0841ab8b6..9c27c612f63 100644 --- a/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml +++ b/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml @@ -2,4 +2,4 @@ :: 0.0.0.0 1 - + diff --git a/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml b/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml index b2c4645644a..fb9acc58ad6 100644 --- a/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml @@ -27,4 +27,4 @@ - + diff --git a/tests/integration/test_server_reload/configs/default_passwd.xml b/tests/integration/test_server_reload/configs/default_passwd.xml index 45ae005bd19..9d664cbf9c4 100644 --- a/tests/integration/test_server_reload/configs/default_passwd.xml +++ b/tests/integration/test_server_reload/configs/default_passwd.xml @@ -9,4 +9,4 @@ 123 - + diff --git a/tests/integration/test_server_reload/configs/overrides_from_zk.xml b/tests/integration/test_server_reload/configs/overrides_from_zk.xml index b17c5c9fa99..aa6105f6ebe 100644 --- a/tests/integration/test_server_reload/configs/overrides_from_zk.xml +++ b/tests/integration/test_server_reload/configs/overrides_from_zk.xml @@ -7,4 +7,4 @@ - + diff --git a/tests/integration/test_version_update/configs/log_conf.xml b/tests/integration/test_version_update/configs/log_conf.xml index 17215c1759d..27c7107ce5e 100644 --- a/tests/integration/test_version_update/configs/log_conf.xml +++ b/tests/integration/test_version_update/configs/log_conf.xml @@ -8,4 +8,4 @@ /var/log/clickhouse-server/stderr.log /var/log/clickhouse-server/stdout.log - + From d31c56e0d9756642d90885016369e9ca7994e3f0 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sun, 11 Aug 2024 23:17:24 -0600 Subject: [PATCH 2062/2361] Update 03221_create_if_not_exists_setting.sh --- .../queries/0_stateless/03221_create_if_not_exists_setting.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh index cfbe2eb8fd9..8dcde8977bc 100755 --- a/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh +++ b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh @@ -38,6 +38,6 @@ CREATE DATABASE example_database; echo $? $CLICKHOUSE_CLIENT -mn -q " -DROP DATABASE example_database; -DROP TABLE example_table; +DROP DATABASE IF EXISTS example_database; +DROP TABLE IF EXISTS example_table; " \ No newline at end of file From 6016dc96aae57f38ef3ace1ed687b82bcc437425 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 07:19:54 +0200 Subject: [PATCH 2063/2361] Fix test `01172_transaction_counters` --- programs/server/config.d/transactions.xml | 1 + .../01172_transaction_counters.reference | 44 +++++++++---------- .../01172_transaction_counters.sql | 1 - 3 files changed, 23 insertions(+), 23 deletions(-) create mode 120000 programs/server/config.d/transactions.xml diff --git a/programs/server/config.d/transactions.xml b/programs/server/config.d/transactions.xml new file mode 120000 index 00000000000..be9de46b607 --- /dev/null +++ b/programs/server/config.d/transactions.xml @@ -0,0 +1 @@ +../../../tests/config/config.d/transactions.xml \ No newline at end of file diff --git a/tests/queries/0_stateless/01172_transaction_counters.reference b/tests/queries/0_stateless/01172_transaction_counters.reference index 24083d7d40b..0fd73c7bcec 100644 --- a/tests/queries/0_stateless/01172_transaction_counters.reference +++ b/tests/queries/0_stateless/01172_transaction_counters.reference @@ -16,25 +16,25 @@ 7 all_3_3_0 (0,0,'00000000-0000-0000-0000-000000000000') 0 7 all_4_4_0 (0,0,'00000000-0000-0000-0000-000000000000') 0 8 1 -1 1 AddPart 1 1 1 1 all_1_1_0 -2 1 Begin 1 1 1 1 -2 1 AddPart 1 1 1 1 all_2_2_0 -2 1 Rollback 1 1 1 1 -3 1 Begin 1 1 1 1 -3 1 AddPart 1 1 1 1 all_3_3_0 -3 1 Commit 1 1 1 0 -1 1 LockPart 1 1 1 1 all_2_2_0 -4 1 Begin 1 1 1 1 -4 1 AddPart 1 1 1 1 all_4_4_0 -4 1 Commit 1 1 1 0 -5 1 Begin 1 1 1 1 -5 1 AddPart 1 1 1 1 all_5_5_0 -5 1 LockPart 1 1 1 1 all_1_1_0 -5 1 LockPart 1 1 1 1 all_3_3_0 -5 1 LockPart 1 1 1 1 all_4_4_0 -5 1 LockPart 1 1 1 1 all_5_5_0 -5 1 UnlockPart 1 1 1 1 all_1_1_0 -5 1 UnlockPart 1 1 1 1 all_3_3_0 -5 1 UnlockPart 1 1 1 1 all_4_4_0 -5 1 UnlockPart 1 1 1 1 all_5_5_0 -5 1 Rollback 1 1 1 1 +1 AddPart 1 1 1 1 all_1_1_0 +2 Begin 1 1 1 1 +2 AddPart 1 1 1 1 all_2_2_0 +2 Rollback 1 1 1 1 +3 Begin 1 1 1 1 +3 AddPart 1 1 1 1 all_3_3_0 +3 Commit 1 1 1 0 +1 LockPart 1 1 1 1 all_2_2_0 +4 Begin 1 1 1 1 +4 AddPart 1 1 1 1 all_4_4_0 +4 Commit 1 1 1 0 +5 Begin 1 1 1 1 +5 AddPart 1 1 1 1 all_5_5_0 +5 LockPart 1 1 1 1 all_1_1_0 +5 LockPart 1 1 1 1 all_3_3_0 +5 LockPart 1 1 1 1 all_4_4_0 +5 LockPart 1 1 1 1 all_5_5_0 +5 UnlockPart 1 1 1 1 all_1_1_0 +5 UnlockPart 1 1 1 1 all_3_3_0 +5 UnlockPart 1 1 1 1 all_4_4_0 +5 UnlockPart 1 1 1 1 all_5_5_0 +5 Rollback 1 1 1 1 diff --git a/tests/queries/0_stateless/01172_transaction_counters.sql b/tests/queries/0_stateless/01172_transaction_counters.sql index a809e4196e9..581b45cd15c 100644 --- a/tests/queries/0_stateless/01172_transaction_counters.sql +++ b/tests/queries/0_stateless/01172_transaction_counters.sql @@ -42,7 +42,6 @@ rollback; system flush logs; select indexOf((select arraySort(groupUniqArray(tid)) from system.transactions_info_log where database=currentDatabase() and table='txn_counters'), tid), - (toDecimal64(now64(6), 6) - toDecimal64(event_time, 6)) < 100, type, thread_id!=0, length(query_id)=length(queryID()) or type='Commit' and query_id='', -- ignore fault injection after commit From c6ae8abd87b1741472cdbb1ebcdf64c36570dda1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 07:39:25 +0200 Subject: [PATCH 2064/2361] Fix `00600_replace_running_query` --- .../00600_replace_running_query.sh | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh index 7a71d17f19b..ad7c49a9ad3 100755 --- a/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/tests/queries/0_stateless/00600_replace_running_query.sh @@ -13,7 +13,18 @@ ${CLICKHOUSE_CLIENT} -q "grant select on system.numbers to u_00600${TEST_PREFIX} function wait_for_query_to_start() { - while [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE query_id = '$1'") == 0 ]]; do sleep 0.1; done + while [[ 0 -eq $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE query_id = '$1'") ]] + do + sleep 0.1 + done +} + +function wait_for_queries_to_finish() +{ + while [[ 0 -ne $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE current_database = '${CLICKHOUSE_DATABASE}' AND query NOT LIKE '%this query%'") ]] + do + sleep 0.1 + done } @@ -25,8 +36,9 @@ $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d # Wait for it to be replaced wait +wait_for_queries_to_finish -${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id=42 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & +${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id=42 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' & wait_for_query_to_start '42' # Trying to run another query with the same query_id @@ -37,10 +49,13 @@ $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=42&replace_running_query=1" -d 'S $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = '42' SYNC" > /dev/null wait +wait_for_queries_to_finish -${CLICKHOUSE_CLIENT} --query_id=42 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & +${CLICKHOUSE_CLIENT} --query_id=42 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' & wait_for_query_to_start '42' ${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --replace_running_query_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null wait +wait_for_queries_to_finish + ${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --query='SELECT 44' ${CLICKHOUSE_CLIENT} -q "drop user u_00600${TEST_PREFIX}" From 574c445be9368ee481e8d213251106233d417f69 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 7 Aug 2024 16:29:07 +0000 Subject: [PATCH 2065/2361] Refactor tests for (experimental) statistics --- docs/en/development/tests.md | 4 +- .../statements/alter/statistics.md | 16 +- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- ...2864_statistics_count_min_sketch.reference | 14 -- .../02864_statistics_count_min_sketch.sql | 70 ------ .../02864_statistics_ddl.reference | 37 +-- .../0_stateless/02864_statistics_ddl.sql | 234 ++++++++++++++---- ...delayed_materialization_in_merge.reference | 12 + ...stics_delayed_materialization_in_merge.sql | 36 +++ .../02864_statistics_exception.reference | 0 .../02864_statistics_exception.sql | 55 ---- ..._statistics_materialize_in_merge.reference | 10 - .../02864_statistics_materialize_in_merge.sql | 52 ---- .../02864_statistics_predicates.reference | 98 ++++++++ .../02864_statistics_predicates.sql | 214 ++++++++++++++++ .../02864_statistics_uniq.reference | 35 --- .../0_stateless/02864_statistics_uniq.sql | 73 ------ .../02864_statistics_usage.reference | 20 ++ .../0_stateless/02864_statistics_usage.sql | 42 ++++ 19 files changed, 625 insertions(+), 399 deletions(-) delete mode 100644 tests/queries/0_stateless/02864_statistics_count_min_sketch.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_count_min_sketch.sql create mode 100644 tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference create mode 100644 tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_exception.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_exception.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql create mode 100644 tests/queries/0_stateless/02864_statistics_predicates.reference create mode 100644 tests/queries/0_stateless/02864_statistics_predicates.sql delete mode 100644 tests/queries/0_stateless/02864_statistics_uniq.reference delete mode 100644 tests/queries/0_stateless/02864_statistics_uniq.sql create mode 100644 tests/queries/0_stateless/02864_statistics_usage.reference create mode 100644 tests/queries/0_stateless/02864_statistics_usage.sql diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 269995a1a96..6cb36e2049b 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -14,7 +14,7 @@ Each functional test sends one or multiple queries to the running ClickHouse ser Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from ClickHouse and it is available to general public. -Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. +Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. :::note A common mistake when testing data types `DateTime` and `DateTime64` is assuming that the server uses a specific time zone (e.g. "UTC"). This is not the case, time zones in CI test runs @@ -38,7 +38,7 @@ For more options, see `tests/clickhouse-test --help`. You can simply run all tes ### Adding a New Test -To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client --multiquery < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. diff --git a/docs/en/sql-reference/statements/alter/statistics.md b/docs/en/sql-reference/statements/alter/statistics.md index 6880cef0e5c..7a1774a01b5 100644 --- a/docs/en/sql-reference/statements/alter/statistics.md +++ b/docs/en/sql-reference/statements/alter/statistics.md @@ -8,26 +8,28 @@ sidebar_label: STATISTICS The following operations are available: -- `ALTER TABLE [db].table ADD STATISTICS (columns list) TYPE (type list)` - Adds statistic description to tables metadata. +- `ALTER TABLE [db].table ADD STATISTICS [IF NOT EXISTS] (column list) TYPE (type list)` - Adds statistic description to tables metadata. -- `ALTER TABLE [db].table MODIFY STATISTICS (columns list) TYPE (type list)` - Modifies statistic description to tables metadata. +- `ALTER TABLE [db].table MODIFY STATISTICS (column list) TYPE (type list)` - Modifies statistic description to tables metadata. -- `ALTER TABLE [db].table DROP STATISTICS (columns list)` - Removes statistics from the metadata of the specified columns and deletes all statistics objects in all parts for the specified columns. +- `ALTER TABLE [db].table DROP STATISTICS [IF EXISTS] (column list)` - Removes statistics from the metadata of the specified columns and deletes all statistics objects in all parts for the specified columns. -- `ALTER TABLE [db].table CLEAR STATISTICS (columns list)` - Deletes all statistics objects in all parts for the specified columns. Statistics objects can be rebuild using `ALTER TABLE MATERIALIZE STATISTICS`. +- `ALTER TABLE [db].table CLEAR STATISTICS [IF EXISTS] (column list)` - Deletes all statistics objects in all parts for the specified columns. Statistics objects can be rebuild using `ALTER TABLE MATERIALIZE STATISTICS`. -- `ALTER TABLE [db.]table MATERIALIZE STATISTICS (columns list)` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +- `ALTER TABLE [db.]table MATERIALIZE STATISTICS [IF EXISTS] (column list)` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). The first two commands are lightweight in a sense that they only change metadata or remove files. Also, they are replicated, syncing statistics metadata via ZooKeeper. -There is an example adding two statistics types to two columns: +## Example: + +Adding two statistics types to two columns: ``` ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq; ``` :::note -Statistic manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). +Statistic are supported only for [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 625b1281c61..c7101021f02 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3517,7 +3517,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context const auto & new_column = new_metadata.getColumns().get(command.column_name); if (!old_column.type->equals(*new_column.type)) throw Exception(ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN, - "ALTER types of column {} with statistics is not not safe " + "ALTER types of column {} with statistics is not safe " "because it can change the representation of statistics", backQuoteIfNeed(command.column_name)); } diff --git a/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference b/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference deleted file mode 100644 index 02c41656a36..00000000000 --- a/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference +++ /dev/null @@ -1,14 +0,0 @@ -CREATE TABLE default.tab\n(\n `a` String,\n `b` UInt64,\n `c` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -Test statistics count_min: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) -Test statistics multi-types: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) -Test LowCardinality and Nullable data type: -tab2 diff --git a/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql b/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql deleted file mode 100644 index c730aa7b4a7..00000000000 --- a/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql +++ /dev/null @@ -1,70 +0,0 @@ --- Tags: no-fasttest - -DROP TABLE IF EXISTS tab SYNC; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET allow_suspicious_low_cardinality_types=1; -SET mutations_sync = 2; - -CREATE TABLE tab -( - a String, - b UInt64, - c Int64, - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE tab; - -INSERT INTO tab select toString(number % 10000), number % 1000, -(number % 100), generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'Test statistics count_min:'; - -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -ALTER TABLE tab ADD STATISTICS b TYPE count_min; -ALTER TABLE tab ADD STATISTICS c TYPE count_min; -ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS a, b, c; - - -SELECT 'Test statistics multi-types:'; - -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; -ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; -ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS a, b, c; - -DROP TABLE IF EXISTS tab SYNC; - - -SELECT 'Test LowCardinality and Nullable data type:'; -DROP TABLE IF EXISTS tab2 SYNC; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE tab2 -( - a LowCardinality(Int64) STATISTICS(count_min), - b Nullable(Int64) STATISTICS(count_min), - c LowCardinality(Nullable(Int64)) STATISTICS(count_min), - pk String, -) Engine = MergeTree() ORDER BY pk; - -select name from system.tables where name = 'tab2' and database = currentDatabase(); - -DROP TABLE IF EXISTS tab2 SYNC; diff --git a/tests/queries/0_stateless/02864_statistics_ddl.reference b/tests/queries/0_stateless/02864_statistics_ddl.reference index a7ff5caa0b0..0e453b0ee8a 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.reference +++ b/tests/queries/0_stateless/02864_statistics_ddl.reference @@ -1,31 +1,6 @@ -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -10 -0 -After drop statistic - Prewhere info - Prewhere filter - Prewhere filter column: and(less(b, 10), less(a, 10)) (removed) -10 -CREATE TABLE default.tab\n(\n `a` Float64,\n `b` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After add statistic -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After materialize statistic - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -20 -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -20 -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After rename - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(c, 10)) (removed) -20 +CREATE TABLE default.tab\n(\n `f64` Float64,\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32,\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64,\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32,\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02864_statistics_ddl.sql b/tests/queries/0_stateless/02864_statistics_ddl.sql index fe612efe2ac..32b56a842b7 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.sql +++ b/tests/queries/0_stateless/02864_statistics_ddl.sql @@ -1,59 +1,195 @@ --- Tests that various DDL statements create/drop/materialize statistics +-- Tags: no-fasttest +-- no-fasttest: 'count_min' sketches need a 3rd party library + +-- Tests that DDL statements which create / drop / materialize statistics + +SET mutations_sync = 1; DROP TABLE IF EXISTS tab; +-- Error case: Can't create statistics when allow_experimental_statistics = 0 +CREATE TABLE tab (col Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; + +-- Error case: Unknown statistics types are rejected +CREATE TABLE tab (col Float64 STATISTICS(no_statistics_type)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +-- Error case: The same statistics type can't exist more than once on a column +CREATE TABLE tab (col Float64 STATISTICS(tdigest, tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SET allow_suspicious_low_cardinality_types = 1; + +-- Statistics can only be created on columns of specific data types (depending on the statistics kind), (*) + +-- tdigest requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- uniq requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- count_min requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col String STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col FixedString(1) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col Array(Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- CREATE TABLE was easy, ALTER is more fun CREATE TABLE tab ( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; + f64 Float64, + f64_tdigest Float64 STATISTICS(tdigest), + f32 Float32, + s String, + a Array(Float64) +) +Engine = MergeTree() +ORDER BY tuple(); +-- Error case: Unknown statistics types are rejected +-- (relevant for ADD and MODIFY) +ALTER TABLE tab ADD STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab MODIFY STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +-- for some reason, ALTER TABLE tab MODIFY STATISTICS IF EXISTS is not supported + +-- Error case: The same statistics type can't exist more than once on a column +-- (relevant for ADD and MODIFY) +-- Create the same statistics object twice +ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +-- Create an statistics which exists already +ALTER TABLE tab ADD STATISTICS f64_tdigest TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64_tdigest TYPE tdigest; -- no-op +ALTER TABLE tab MODIFY STATISTICS f64_tdigest TYPE tdigest; -- no-op + +-- Error case: Column does not exist +-- (relevant for ADD, MODIFY, DROP, CLEAR, and MATERIALIZE) +-- Note that the results are unfortunately quite inconsistent ... +ALTER TABLE tab ADD STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS IF EXISTS no_such_column; -- no-op +ALTER TABLE tab CLEAR STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS no_such_column; -- no-op +ALTER TABLE tab MATERIALIZE STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS no_such_column; -- { serverError ILLEGAL_STATISTICS } + +-- Error case: Column exists but has no statistics +-- (relevant for MODIFY, DROP, CLEAR, and MATERIALIZE) +-- Note that the results are unfortunately quite inconsistent ... +ALTER TABLE tab MODIFY STATISTICS s TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS IF EXISTS s; -- no-op +ALTER TABLE tab CLEAR STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS s; -- no-op +ALTER TABLE tab MATERIALIZE STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS s; -- { serverError ILLEGAL_STATISTICS } + +-- We don't check systematically that that statistics can only be created via ALTER ADD STATISTICS on columns of specific data types (the +-- internal type validation code is tested already above, (*)). Only do a rudimentary check for each statistics type with a data type that +-- works and one that doesn't work. +-- tdigest +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +-- uniq +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } +-- count_min +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } + +-- Any data type changes on columns with statistics are disallowed, for simplicity even if the new data type is compatible with all existing +-- statistics objects (e.g. tdigest can be created on Float64 and UInt64) +ALTER TABLE tab MODIFY COLUMN f64_tdigest UInt64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- Finally, do a full-circle test of a good case. Print table definition after each step. +-- Intentionally specifying _two_ columns and _two_ statistics types to have that also tested. +SHOW CREATE TABLE tab; +ALTER TABLE tab ADD STATISTICS f64, f32 TYPE tdigest, uniq; +SHOW CREATE TABLE tab; +ALTER TABLE tab MODIFY STATISTICS f64, f32 TYPE tdigest, uniq; +SHOW CREATE TABLE tab; +ALTER TABLE tab CLEAR STATISTICS f64, f32; +SHOW CREATE TABLE tab; +ALTER TABLE tab MATERIALIZE STATISTICS f64, f32; +SHOW CREATE TABLE tab; +ALTER TABLE tab DROP STATISTICS f64, f32; SHOW CREATE TABLE tab; -INSERT INTO tab select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; -SELECT count(*) FROM tab WHERE b < NULL and a < '10'; - -ALTER TABLE tab DROP STATISTICS a, b; - -SELECT 'After drop statistic'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -SHOW CREATE TABLE tab; - -ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; - -SELECT 'After add statistic'; - -SHOW CREATE TABLE tab; - -ALTER TABLE tab MATERIALIZE STATISTICS a, b; -INSERT INTO tab select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'After materialize statistic'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -OPTIMIZE TABLE tab FINAL; - -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -ALTER TABLE tab RENAME COLUMN b TO c; -SHOW CREATE TABLE tab; - -SELECT 'After rename'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE c < 10 and a < 10; - -DROP TABLE IF EXISTS tab; +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference new file mode 100644 index 00000000000..eb5e685597c --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference @@ -0,0 +1,12 @@ +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(b, 10_UInt8), less(a, 10_UInt8)) (removed) +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After truncate, insert, and materialize + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql new file mode 100644 index 00000000000..d469a4c2036 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql @@ -0,0 +1,36 @@ +-- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). +-- (The concrete statistics type, column data type and predicate type don't matter) + +-- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) + +DROP TABLE IF EXISTS tab; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET enable_analyzer = 1; + +SET materialize_statistics_on_insert = 0; + +CREATE TABLE tab +( + a Int64 STATISTICS(tdigest), + b Int16 STATISTICS(tdigest), +) ENGINE = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. + +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) + +OPTIMIZE TABLE tab FINAL; +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +TRUNCATE TABLE tab; +SET mutations_sync = 2; +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +ALTER TABLE tab MATERIALIZE STATISTICS a, b; +SELECT 'After truncate, insert, and materialize'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_exception.reference b/tests/queries/0_stateless/02864_statistics_exception.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/02864_statistics_exception.sql b/tests/queries/0_stateless/02864_statistics_exception.sql deleted file mode 100644 index 289ffee6600..00000000000 --- a/tests/queries/0_stateless/02864_statistics_exception.sql +++ /dev/null @@ -1,55 +0,0 @@ --- Tests creating/dropping/materializing statistics produces the right exceptions. - -DROP TABLE IF EXISTS tab; - --- Can't create statistics when allow_experimental_statistics = 0 -CREATE TABLE tab -( - a Float64 STATISTICS(tdigest) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - -SET allow_experimental_statistics = 1; - --- The same type of statistics can't exist more than once on a column -CREATE TABLE tab -( - a Float64 STATISTICS(tdigest, tdigest) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - --- Unknown statistics types are rejected -CREATE TABLE tab -( - a Float64 STATISTICS(no_statistics_type) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - --- tDigest statistics can only be created on numeric columns -CREATE TABLE tab -( - a String STATISTICS(tdigest), -) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } - -CREATE TABLE tab -( - a Float64, - b String -) Engine = MergeTree() ORDER BY tuple(); - -ALTER TABLE tab ADD STATISTICS a TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -ALTER TABLE tab ADD STATISTICS IF NOT EXISTS a TYPE tdigest; -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; --- Statistics can be created only on integer columns -ALTER TABLE tab ADD STATISTICS b TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS b; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS a; -ALTER TABLE tab DROP STATISTICS IF EXISTS a; -ALTER TABLE tab CLEAR STATISTICS a; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab CLEAR STATISTICS IF EXISTS a; -ALTER TABLE tab MATERIALIZE STATISTICS b; -- { serverError ILLEGAL_STATISTICS } - -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -ALTER TABLE tab MODIFY COLUMN a Float64 TTL toDateTime(b) + INTERVAL 1 MONTH; -ALTER TABLE tab MODIFY COLUMN a Int64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } - -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference deleted file mode 100644 index 5e969cf41cb..00000000000 --- a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference +++ /dev/null @@ -1,10 +0,0 @@ -10 -10 -10 -statistics not used Condition less(b, 10_UInt8) moved to PREWHERE -statistics not used Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after merge Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after merge Condition less(b, 10_UInt8) moved to PREWHERE -statistics used after materialize Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after materialize Condition less(b, 10_UInt8) moved to PREWHERE -2 0 diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql deleted file mode 100644 index 6606cff263f..00000000000 --- a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql +++ /dev/null @@ -1,52 +0,0 @@ --- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). - -DROP TABLE IF EXISTS tab; - -SET enable_analyzer = 1; -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; - -SET materialize_statistics_on_insert = 0; - -CREATE TABLE tab -( - a Int64 STATISTICS(tdigest), - b Int16 STATISTICS(tdigest), -) ENGINE = MergeTree() ORDER BY tuple() -SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. - -INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics not used'; - -OPTIMIZE TABLE tab FINAL; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics used after merge'; - -TRUNCATE TABLE tab; -SET mutations_sync = 2; - -INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; -ALTER TABLE tab MATERIALIZE STATISTICS a, b; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics used after materialize'; - -DROP TABLE tab; - -SYSTEM FLUSH LOGS; - -SELECT log_comment, message FROM system.text_log JOIN -( - SELECT Settings['log_comment'] AS log_comment, query_id FROM system.query_log - WHERE current_database = currentDatabase() - AND query LIKE 'SELECT count(*) FROM tab%' - AND type = 'QueryFinish' -) AS query_log USING (query_id) -WHERE message LIKE '%moved to PREWHERE%' -ORDER BY event_time_microseconds; - -SELECT count(), sum(ProfileEvents['MergeTreeDataWriterStatisticsCalculationMicroseconds']) -FROM system.query_log -WHERE current_database = currentDatabase() - AND query LIKE 'INSERT INTO tab SELECT%' - AND type = 'QueryFinish'; diff --git a/tests/queries/0_stateless/02864_statistics_predicates.reference b/tests/queries/0_stateless/02864_statistics_predicates.reference new file mode 100644 index 00000000000..ffbd7269e05 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_predicates.reference @@ -0,0 +1,98 @@ +u64 and = +10 +10 +10 +10 +0 +0 +0 +0 +10 +10 +10 +10 +u64 and < +70 +70 +70 +70 +80 +80 +80 +80 +70 +70 +70 +70 +f64 and = +10 +10 +10 +10 +0 +0 +0 +0 +10 +10 +10 +10 +0 +0 +0 +0 +f64 and < +70 +70 +70 +70 +80 +80 +80 +80 +70 +70 +70 +70 +80 +80 +80 +80 +dt and = +0 +0 +0 +0 +10 +10 +10 +10 +dt and < +10000 +10000 +10000 +10000 +70 +70 +70 +70 +b and = +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +0 +0 +0 +0 +s and = +10 +10 diff --git a/tests/queries/0_stateless/02864_statistics_predicates.sql b/tests/queries/0_stateless/02864_statistics_predicates.sql new file mode 100644 index 00000000000..779116cf19a --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_predicates.sql @@ -0,0 +1,214 @@ +-- Tags: no-fasttest +-- no-fasttest: 'count_min' sketches need a 3rd party library + +-- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types. + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + u64 UInt64, + u64_tdigest UInt64 STATISTICS(tdigest), + u64_count_min UInt64 STATISTICS(count_min), + u64_uniq UInt64 STATISTICS(uniq), + f64 Float64, + f64_tdigest Float64 STATISTICS(tdigest), + f64_count_min Float64 STATISTICS(count_min), + f64_uniq Float64 STATISTICS(uniq), + dt DateTime, + dt_tdigest DateTime STATISTICS(tdigest), + dt_count_min DateTime STATISTICS(count_min), + dt_uniq DateTime STATISTICS(uniq), + b Bool, + b_tdigest Bool STATISTICS(tdigest), + b_count_min Bool STATISTICS(count_min), + b_uniq Bool STATISTICS(uniq), + s String, + -- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest + s_count_min String STATISTICS(count_min) + -- s_uniq String STATISTICS(uniq), -- not supported by uniq +) Engine = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO tab +-- SELECT number % 10000, number % 1000, -(number % 100) FROM system.numbers LIMIT 10000; +SELECT number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 2, + number % 2, + number % 2, + number % 2, + toString(number % 1000), + toString(number % 1000) +FROM system.numbers LIMIT 10000; + +-- u64 ---------------------------------------------------- + +SELECT 'u64 and ='; + +SELECT count(*) FROM tab WHERE u64 = 7; +SELECT count(*) FROM tab WHERE u64_tdigest = 7; +SELECT count(*) FROM tab WHERE u64_count_min = 7; +SELECT count(*) FROM tab WHERE u64_uniq = 7; + +SELECT count(*) FROM tab WHERE u64 = 7.7; +SELECT count(*) FROM tab WHERE u64_tdigest = 7.7; +SELECT count(*) FROM tab WHERE u64_count_min = 7.7; +SELECT count(*) FROM tab WHERE u64_uniq = 7.7; + +SELECT count(*) FROM tab WHERE u64 = '7'; +SELECT count(*) FROM tab WHERE u64_tdigest = '7'; +SELECT count(*) FROM tab WHERE u64_count_min = '7'; +SELECT count(*) FROM tab WHERE u64_uniq = '7'; + +SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_count_min = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH } + +SELECT 'u64 and <'; + +SELECT count(*) FROM tab WHERE u64 < 7; +SELECT count(*) FROM tab WHERE u64_tdigest < 7; +SELECT count(*) FROM tab WHERE u64_count_min < 7; +SELECT count(*) FROM tab WHERE u64_uniq < 7; + +SELECT count(*) FROM tab WHERE u64 < 7.7; +SELECT count(*) FROM tab WHERE u64_tdigest < 7.7; +SELECT count(*) FROM tab WHERE u64_count_min < 7.7; +SELECT count(*) FROM tab WHERE u64_uniq < 7.7; + +SELECT count(*) FROM tab WHERE u64 < '7'; +SELECT count(*) FROM tab WHERE u64_tdigest < '7'; +SELECT count(*) FROM tab WHERE u64_count_min < '7'; +SELECT count(*) FROM tab WHERE u64_uniq < '7'; + +SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_count_min < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH } + +-- f64 ---------------------------------------------------- + +SELECT 'f64 and ='; + +SELECT count(*) FROM tab WHERE f64 = 7; +SELECT count(*) FROM tab WHERE f64_tdigest = 7; +SELECT count(*) FROM tab WHERE f64_count_min = 7; +SELECT count(*) FROM tab WHERE f64_uniq = 7; + +SELECT count(*) FROM tab WHERE f64 = 7.7; +SELECT count(*) FROM tab WHERE f64_tdigest = 7.7; +SELECT count(*) FROM tab WHERE f64_count_min = 7.7; +SELECT count(*) FROM tab WHERE f64_uniq = 7.7; + +SELECT count(*) FROM tab WHERE f64 = '7'; +SELECT count(*) FROM tab WHERE f64_tdigest = '7'; +SELECT count(*) FROM tab WHERE f64_count_min = '7'; +SELECT count(*) FROM tab WHERE f64_uniq = '7'; + +SELECT count(*) FROM tab WHERE f64 = '7.7'; +SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; +SELECT count(*) FROM tab WHERE f64_count_min = '7.7'; +SELECT count(*) FROM tab WHERE f64_uniq = '7.7'; + +SELECT 'f64 and <'; + +SELECT count(*) FROM tab WHERE f64 < 7; +SELECT count(*) FROM tab WHERE f64_tdigest < 7; +SELECT count(*) FROM tab WHERE f64_count_min < 7; +SELECT count(*) FROM tab WHERE f64_uniq < 7; + +SELECT count(*) FROM tab WHERE f64 < 7.7; +SELECT count(*) FROM tab WHERE f64_tdigest < 7.7; +SELECT count(*) FROM tab WHERE f64_count_min < 7.7; +SELECT count(*) FROM tab WHERE f64_uniq < 7.7; + +SELECT count(*) FROM tab WHERE f64 < '7'; +SELECT count(*) FROM tab WHERE f64_tdigest < '7'; +SELECT count(*) FROM tab WHERE f64_count_min < '7'; +SELECT count(*) FROM tab WHERE f64_uniq < '7'; + +SELECT count(*) FROM tab WHERE f64 < '7.7'; +SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; +SELECT count(*) FROM tab WHERE f64_count_min < '7.7'; +SELECT count(*) FROM tab WHERE f64_uniq < '7.7'; + +-- dt ---------------------------------------------------- + +SELECT 'dt and ='; + +SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_count_min = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13'; + +SELECT count(*) FROM tab WHERE dt = 7; +SELECT count(*) FROM tab WHERE dt_tdigest = 7; +SELECT count(*) FROM tab WHERE dt_count_min = 7; +SELECT count(*) FROM tab WHERE dt_uniq = 7; + +SELECT 'dt and <'; + +SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13'; + +SELECT count(*) FROM tab WHERE dt < 7; +SELECT count(*) FROM tab WHERE dt_tdigest < 7; +SELECT count(*) FROM tab WHERE dt_count_min < 7; +SELECT count(*) FROM tab WHERE dt_uniq < 7; + +-- b ---------------------------------------------------- + +SELECT 'b and ='; + +SELECT count(*) FROM tab WHERE b = true; +SELECT count(*) FROM tab WHERE b_tdigest = true; +SELECT count(*) FROM tab WHERE b_count_min = true; +SELECT count(*) FROM tab WHERE b_uniq = true; + +SELECT count(*) FROM tab WHERE b = 'true'; +SELECT count(*) FROM tab WHERE b_tdigest = 'true'; +SELECT count(*) FROM tab WHERE b_count_min = 'true'; +SELECT count(*) FROM tab WHERE b_uniq = 'true'; + +SELECT count(*) FROM tab WHERE b = 1; +SELECT count(*) FROM tab WHERE b_tdigest = 1; +SELECT count(*) FROM tab WHERE b_count_min = 1; +SELECT count(*) FROM tab WHERE b_uniq = 1; + +SELECT count(*) FROM tab WHERE b = 1.1; +SELECT count(*) FROM tab WHERE b_tdigest = 1.1; +SELECT count(*) FROM tab WHERE b_count_min = 1.1; +SELECT count(*) FROM tab WHERE b_uniq = 1.1; + +-- s ---------------------------------------------------- + +SELECT 's and ='; + +SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE } +-- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported +SELECT count(*) FROM tab WHERE s_count_min = 7; -- { serverError NO_COMMON_TYPE } +-- SELECT count(*) FROM tab WHERE s_uniq = 7; -- not supported + +SELECT count(*) FROM tab WHERE s = '7'; +-- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported +SELECT count(*) FROM tab WHERE s_count_min = '7'; +-- SELECT count(*) FROM tab WHERE s_uniq = '7'; -- not supported + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_uniq.reference b/tests/queries/0_stateless/02864_statistics_uniq.reference deleted file mode 100644 index 77786dbdd8c..00000000000 --- a/tests/queries/0_stateless/02864_statistics_uniq.reference +++ /dev/null @@ -1,35 +0,0 @@ -CREATE TABLE default.t1\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest, uniq),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) -After modify TDigest - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(c, -1), less(a, 10), less(b, 10)) (removed) -After drop - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(c, -1), less(b, 10)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_uniq.sql b/tests/queries/0_stateless/02864_statistics_uniq.sql deleted file mode 100644 index 0f5f353c045..00000000000 --- a/tests/queries/0_stateless/02864_statistics_uniq.sql +++ /dev/null @@ -1,73 +0,0 @@ -DROP TABLE IF EXISTS t1; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET mutations_sync = 1; - -CREATE TABLE t1 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Int64 STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE t1; - -INSERT INTO t1 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; -INSERT INTO t1 select 0, 0, 11, generateUUIDv4(); - -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -OPTIMIZE TABLE t1 FINAL; - -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT 'After modify TDigest'; -ALTER TABLE t1 MODIFY STATISTICS c TYPE TDigest; -ALTER TABLE t1 MATERIALIZE STATISTICS c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - - -ALTER TABLE t1 DROP STATISTICS c; - -SELECT 'After drop'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -DROP TABLE IF EXISTS t1; -DROP TABLE IF EXISTS t2; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t2 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c LowCardinality(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t2 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t2; -DROP TABLE IF EXISTS t3; - -CREATE TABLE t3 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Nullable(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t3 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t3; - diff --git a/tests/queries/0_stateless/02864_statistics_usage.reference b/tests/queries/0_stateless/02864_statistics_usage.reference new file mode 100644 index 00000000000..a9f669b88c1 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_usage.reference @@ -0,0 +1,20 @@ +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After drop statistic + Prewhere info + Prewhere filter + Prewhere filter column: and(less(b, 10_UInt8), less(a, 10_UInt8)) (removed) +After add and materialize statistic + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After rename + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(c, 10_UInt8)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_usage.sql b/tests/queries/0_stateless/02864_statistics_usage.sql new file mode 100644 index 00000000000..4956bd27e87 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_usage.sql @@ -0,0 +1,42 @@ +-- Test that the optimizer picks up column statistics +-- (The concrete statistics type, column data type and predicate type don't matter) + +-- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest) +) Engine = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +ALTER TABLE tab DROP STATISTICS a, b; +SELECT 'After drop statistic'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) + +ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS a, b; +INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; +SELECT 'After add and materialize statistic'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +OPTIMIZE TABLE tab FINAL; +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +ALTER TABLE tab RENAME COLUMN b TO c; +SELECT 'After rename'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then c (statistics used) + +DROP TABLE IF EXISTS tab; From ee433684ddc4614a5bb93dbfd1e3b481a2f343d6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 7 Aug 2024 10:55:16 +0000 Subject: [PATCH 2066/2361] Follow-up to ClickHouse#63898, pt. III --- .../02421_simple_queries_for_opentelemetry.sh | 10 +++---- .../02458_insert_select_progress_tcp.sh | 2 +- .../02476_analyzer_identifier_hints.sh | 8 +++--- .../0_stateless/02480_tets_show_full.sh | 2 +- .../0_stateless/02482_load_parts_refcounts.sh | 2 +- .../02496_remove_redundant_sorting.sh | 6 ++--- .../02497_storage_file_reader_selection.sh | 2 +- .../02500_remove_redundant_distinct.sh | 6 ++--- ...atabase_replicated_no_arguments_for_rmt.sh | 4 +-- .../0_stateless/02532_send_logs_level_test.sh | 2 +- .../02555_davengers_rename_chain.sh | 8 +++--- ...02572_query_views_log_background_thread.sh | 8 +++--- .../02703_max_local_read_bandwidth.sh | 4 +-- .../02703_max_local_write_bandwidth.sh | 4 +-- .../0_stateless/02704_max_backup_bandwidth.sh | 4 +-- .../0_stateless/02724_limit_num_mutations.sh | 8 +++--- .../02725_async_insert_table_setting.sh | 4 +-- .../queries/0_stateless/02841_local_assert.sh | 6 ++--- ...rge_across_partitions_final_with_lonely.sh | 6 ++--- .../02871_clickhouse_client_restart_pager.sh | 2 +- .../02875_clickhouse_local_multiquery.sh | 4 +-- .../02875_merge_engine_set_index.sh | 4 +-- ...tion_table_with_explicit_insert_columns.sh | 2 +- ...ture_from_insertion_table_with_defaults.sh | 2 +- .../02883_named_collections_override.sh | 2 +- .../02884_async_insert_native_protocol_1.sh | 4 +-- .../02884_async_insert_native_protocol_2.sh | 4 +-- .../02884_async_insert_native_protocol_3.sh | 4 +-- .../02884_async_insert_native_protocol_4.sh | 4 +-- .../02885_ephemeral_columns_from_file.sh | 2 +- .../0_stateless/02895_npy_output_format.sh | 2 +- ...ak_memory_usage_http_headers_regression.sh | 2 +- .../02903_empty_order_by_throws_error.sh | 4 +-- .../02903_rmt_retriable_merge_exception.sh | 6 ++--- ...904_empty_order_by_with_setting_enabled.sh | 8 +++--- .../02907_backup_mv_with_no_inner_table.sh | 8 +++--- .../02907_backup_mv_with_no_source_table.sh | 12 ++++----- .../02907_backup_restore_default_nullable.sh | 4 +-- .../02907_backup_restore_flatten_nested.sh | 8 +++--- .../02907_clickhouse_dictionary_bug.sh | 2 +- .../02907_system_backups_profile_events.sh | 6 ++--- .../0_stateless/02908_Npy_files_caching.sh | 4 +-- .../0_stateless/02908_table_ttl_dependency.sh | 6 ++--- .../02909_settings_in_json_schema_cache.sh | 2 +- .../02915_input_table_function_in_subquery.sh | 2 +- .../02915_lazy_loading_of_base_backups.sh | 26 +++++++++---------- .../0_stateless/02916_dictionary_access.sh | 8 +++--- .../0_stateless/02916_joinget_dependency.sh | 6 ++--- .../02930_client_file_log_comment.sh | 2 +- ...lumn_use_structure_from_insertion_table.sh | 2 +- .../02940_system_stacktrace_optimizations.sh | 6 ++--- ..._alter_metadata_merge_checksum_mismatch.sh | 8 +++--- .../02947_merge_tree_index_table_3.sh | 4 +-- ...2950_dictionary_ssd_cache_short_circuit.sh | 2 +- .../02950_distributed_initial_query_event.sh | 2 +- .../02974_backup_query_format_null.sh | 4 +-- ...ert_select_resize_to_max_insert_threads.sh | 2 +- .../03008_deduplication_random_setttings.sh | 6 ++--- .../03008_local_plain_rewritable.sh | 14 +++++----- .../03031_clickhouse_local_input.sh | 8 +++--- .../0_stateless/03032_async_backup_restore.sh | 4 +-- .../03096_http_interface_role_query_param.sh | 4 +-- ...03140_client_subsequent_external_tables.sh | 2 +- .../03143_prewhere_profile_events.sh | 12 ++++----- .../03145_non_loaded_projection_backup.sh | 12 ++++----- .../03155_test_move_to_prewhere.sh | 4 +-- .../03156_default_multiquery_split.sh | 2 +- .../0_stateless/03169_time_virtual_column.sh | 2 +- .../03173_parallel_replicas_join_bug.sh | 2 +- .../03198_settings_in_csv_tsv_schema_cache.sh | 8 +++--- .../03198_unload_primary_key_outdated.sh | 4 +-- .../03199_dictionary_table_access.sh | 8 +++--- ...s_to_read_for_schema_inference_in_cache.sh | 2 +- 73 files changed, 186 insertions(+), 186 deletions(-) diff --git a/tests/queries/0_stateless/02421_simple_queries_for_opentelemetry.sh b/tests/queries/0_stateless/02421_simple_queries_for_opentelemetry.sh index 98b571c5968..91e85eabcb8 100755 --- a/tests/queries/0_stateless/02421_simple_queries_for_opentelemetry.sh +++ b/tests/queries/0_stateless/02421_simple_queries_for_opentelemetry.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # $2 - query function execute_query() { - ${CLICKHOUSE_CLIENT} --opentelemetry_start_trace_probability=1 --query_id $1 -nq " + ${CLICKHOUSE_CLIENT} --opentelemetry_start_trace_probability=1 --query_id $1 -q " ${2} " } @@ -18,7 +18,7 @@ function execute_query() # so we only to check the db.statement only function check_query_span_query_only() { -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " SYSTEM FLUSH LOGS; SELECT attribute['db.statement'] as query FROM system.opentelemetry_span_log @@ -31,7 +31,7 @@ ${CLICKHOUSE_CLIENT} -nq " function check_query_span() { -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " SYSTEM FLUSH LOGS; SELECT attribute['db.statement'] as query, attribute['clickhouse.read_rows'] as read_rows, @@ -47,7 +47,7 @@ ${CLICKHOUSE_CLIENT} -nq " # # Set up # -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.opentelemetry_test; CREATE TABLE ${CLICKHOUSE_DATABASE}.opentelemetry_test (id UInt64) Engine=MergeTree Order By id; " @@ -79,4 +79,4 @@ check_query_span $query_id # ${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.opentelemetry_test; -" \ No newline at end of file +" diff --git a/tests/queries/0_stateless/02458_insert_select_progress_tcp.sh b/tests/queries/0_stateless/02458_insert_select_progress_tcp.sh index ae3ea017fbb..178da822d41 100755 --- a/tests/queries/0_stateless/02458_insert_select_progress_tcp.sh +++ b/tests/queries/0_stateless/02458_insert_select_progress_tcp.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists insert_select_progress_tcp; create table insert_select_progress_tcp(s UInt16) engine = MergeTree order by s; " diff --git a/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh b/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh index 4c850a6ec9e..92f519a9f8a 100755 --- a/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh +++ b/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS test_table; CREATE TABLE test_table ( @@ -74,7 +74,7 @@ $CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, arrayMap(lambda_argument -> l $CLICKHOUSE_CLIENT -q "WITH 1 AS constant_value SELECT (SELECT constant_valu) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS test_table_compound; CREATE TABLE test_table_compound ( @@ -142,7 +142,7 @@ $CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constan $CLICKHOUSE_CLIENT -q "WITH cast(tuple(1), 'Tuple(value_1 String)') AS constant_value SELECT (SELECT constant_value.value_) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS test_table_1; CREATE TABLE test_table_1 ( @@ -185,7 +185,7 @@ $CLICKHOUSE_CLIENT -q "SELECT ((1))::Tuple(a Tuple(b UInt32)) AS t, t.a.c SETTIN $CLICKHOUSE_CLIENT -q "SELECT 1"; -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE test_table; DROP TABLE test_table_compound; DROP TABLE test_table_1; diff --git a/tests/queries/0_stateless/02480_tets_show_full.sh b/tests/queries/0_stateless/02480_tets_show_full.sh index 5f5040ba128..50184857a1f 100755 --- a/tests/queries/0_stateless/02480_tets_show_full.sh +++ b/tests/queries/0_stateless/02480_tets_show_full.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) database=$($CLICKHOUSE_CLIENT -q 'SELECT currentDatabase()') -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS test_02480_table; DROP VIEW IF EXISTS test_02480_view; CREATE TABLE test_02480_table (id Int64) ENGINE=MergeTree ORDER BY id; diff --git a/tests/queries/0_stateless/02482_load_parts_refcounts.sh b/tests/queries/0_stateless/02482_load_parts_refcounts.sh index 5303824d97c..4dc7a7fd99b 100755 --- a/tests/queries/0_stateless/02482_load_parts_refcounts.sh +++ b/tests/queries/0_stateless/02482_load_parts_refcounts.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS load_parts_refcounts SYNC; CREATE TABLE load_parts_refcounts (id UInt32) diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh index c9bd242e429..6e132c55628 100755 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh @@ -26,15 +26,15 @@ FROM ORDER BY number DESC ) ORDER BY number ASC" -$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;EXPLAIN $query" +$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;EXPLAIN $query" function run_query { echo "-- query" echo "$1" echo "-- explain" - $CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;EXPLAIN $1" + $CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;EXPLAIN $1" echo "-- execute" - $CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;$1" + $CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;$1" } echo "-- Enabled query_plan_remove_redundant_sorting" diff --git a/tests/queries/0_stateless/02497_storage_file_reader_selection.sh b/tests/queries/0_stateless/02497_storage_file_reader_selection.sh index aa43e81f131..27243dd47fa 100755 --- a/tests/queries/0_stateless/02497_storage_file_reader_selection.sh +++ b/tests/queries/0_stateless/02497_storage_file_reader_selection.sh @@ -13,6 +13,6 @@ $CLICKHOUSE_LOCAL --storage_file_read_method=mmap --print-profile-events -q "SEL $CLICKHOUSE_LOCAL --storage_file_read_method=pread --print-profile-events -q "SELECT * FROM file($DATA_FILE) FORMAT Null" 2>&1 | grep -F -q "CreatedReadBufferMMap" && echo 'Fail' || echo 0 $CLICKHOUSE_LOCAL --storage_file_read_method=pread --print-profile-events -q "SELECT * FROM file($DATA_FILE) FORMAT Null" 2>&1 | grep -F -q "CreatedReadBufferOrdinary" && echo 1 || echo 'Fail' -$CLICKHOUSE_CLIENT --storage_file_read_method=mmap -nq "SELECT * FROM file('/dev/null', 'LineAsString') FORMAT Null -- { serverError BAD_ARGUMENTS }" +$CLICKHOUSE_CLIENT --storage_file_read_method=mmap -q "SELECT * FROM file('/dev/null', 'LineAsString') FORMAT Null -- { serverError BAD_ARGUMENTS }" rm $DATA_FILE diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh index 3c06119e8d2..6fd42fa940a 100755 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh @@ -24,15 +24,15 @@ FROM ) )" -$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;EXPLAIN $query" +$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;EXPLAIN $query" function run_query { echo "-- query" echo "$1" echo "-- explain" - $CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;EXPLAIN $1" + $CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;EXPLAIN $1" echo "-- execute" - $CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;$1" + $CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;$1" } echo "-- Enabled $OPTIMIZATION_SETTING" diff --git a/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh b/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh index a0f228e6af4..c1aa24943c1 100755 --- a/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh +++ b/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh @@ -14,8 +14,8 @@ ${CLICKHOUSE_CLIENT} -q "CREATE USER user_${CLICKHOUSE_DATABASE} settings databa ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" ${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.sh b/tests/queries/0_stateless/02532_send_logs_level_test.sh index 506ac2331f2..a50539311cb 100755 --- a/tests/queries/0_stateless/02532_send_logs_level_test.sh +++ b/tests/queries/0_stateless/02532_send_logs_level_test.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key Int) engine=MergeTree order by tuple() settings min_bytes_for_wide_part = '1G', compress_marks = 1; insert into data values (1); diff --git a/tests/queries/0_stateless/02555_davengers_rename_chain.sh b/tests/queries/0_stateless/02555_davengers_rename_chain.sh index 660a95846c4..196507dc72e 100755 --- a/tests/queries/0_stateless/02555_davengers_rename_chain.sh +++ b/tests/queries/0_stateless/02555_davengers_rename_chain.sh @@ -46,7 +46,7 @@ tables["wrong_metadata_compact"]="min_bytes_for_wide_part = 10000000" for table in "${!tables[@]}"; do settings="${tables[$table]}" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS $table; CREATE TABLE $table( @@ -69,7 +69,7 @@ for table in "${!tables[@]}"; do wait_column "$table" "\`a1\` UInt64" || exit 2 - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" -- { echoOn } SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null; @@ -82,7 +82,7 @@ for table in "${!tables[@]}"; do wait_mutation_loaded "$table" "b1 TO a" || exit 2 - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" -- { echoOn } SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null; @@ -94,7 +94,7 @@ for table in "${!tables[@]}"; do wait_for_all_mutations "$table" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" -- { echoOn } SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null; diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.sh b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh index 509cd03f6c2..22b94e09b58 100755 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.sh +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "drop table if exists buffer_02572; +${CLICKHOUSE_CLIENT} --ignore-error --query "drop table if exists buffer_02572; drop table if exists data_02572; drop table if exists copy_02572; drop table if exists mv_02572;" ${CLICKHOUSE_CLIENT} --query="create table copy_02572 (key Int) engine=Memory();" @@ -21,7 +21,7 @@ ${CLICKHOUSE_CLIENT} --query="insert into buffer_02572 values (1);" if [ $(( $(date +%s) - start )) -gt 6 ]; then # clickhouse test cluster is overloaded, will skip # ensure that the flush was not direct - ${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" + ${CLICKHOUSE_CLIENT} --ignore-error --query "select * from data_02572; select * from copy_02572;" fi # we cannot use OPTIMIZE, this will attach query context, so let's wait @@ -31,11 +31,11 @@ for _ in {1..100}; do done -${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" +${CLICKHOUSE_CLIENT} --ignore-error --query "select * from data_02572; select * from copy_02572;" ${CLICKHOUSE_CLIENT} --query="system flush logs;" ${CLICKHOUSE_CLIENT} --query="select count() > 0, lower(status::String), errorCodeToName(exception_code) from system.query_views_log where view_name = concatWithSeparator('.', currentDatabase(), 'mv_02572') and view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') - group by 2, 3;" \ No newline at end of file + group by 2, 3;" diff --git a/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh b/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh index 03e0f363d71..79253648475 100755 --- a/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh +++ b/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9; " @@ -26,7 +26,7 @@ read_methods=( for read_method in "${read_methods[@]}"; do query_id=$(random_str 10) $CLICKHOUSE_CLIENT --query_id "$query_id" -q "select * from data format Null settings max_local_read_bandwidth='1M', local_filesystem_read_method='$read_method'" - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT '$read_method', diff --git a/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh b/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh index 4f6a300c5b3..c5776134673 100755 --- a/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh +++ b/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9; " @@ -13,7 +13,7 @@ $CLICKHOUSE_CLIENT -nm -q " query_id=$(random_str 10) # writes 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds $CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data select * from numbers(1e6) settings max_local_write_bandwidth='1M'" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT query_duration_ms >= 7e3, diff --git a/tests/queries/0_stateless/02704_max_backup_bandwidth.sh b/tests/queries/0_stateless/02704_max_backup_bandwidth.sh index 8cb03a93a7a..7e914c4c539 100755 --- a/tests/queries/0_stateless/02704_max_backup_bandwidth.sh +++ b/tests/queries/0_stateless/02704_max_backup_bandwidth.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9; " @@ -15,7 +15,7 @@ $CLICKHOUSE_CLIENT -q "insert into data select * from numbers(1e6)" query_id=$(random_str 10) $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to Disk('backups', '$CLICKHOUSE_DATABASE/data/backup1')" --max_backup_bandwidth=1M > /dev/null -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT query_duration_ms >= 7e3, diff --git a/tests/queries/0_stateless/02724_limit_num_mutations.sh b/tests/queries/0_stateless/02724_limit_num_mutations.sh index 60888db0e2e..604cc9ff08e 100755 --- a/tests/queries/0_stateless/02724_limit_num_mutations.sh +++ b/tests/queries/0_stateless/02724_limit_num_mutations.sh @@ -23,7 +23,7 @@ function wait_for_alter() done } -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t_limit_mutations SYNC; CREATE TABLE t_limit_mutations (id UInt64, v UInt64) @@ -48,14 +48,14 @@ SELECT count() FROM system.mutations WHERE database = currentDatabase() AND tabl SHOW CREATE TABLE t_limit_mutations; " -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " ALTER TABLE t_limit_mutations UPDATE v = 6 WHERE 1 SETTINGS number_of_mutations_to_throw = 100; ALTER TABLE t_limit_mutations MODIFY COLUMN v String SETTINGS number_of_mutations_to_throw = 100, alter_sync = 0; " wait_for_alter "String" -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t_limit_mutations ORDER BY id; SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_limit_mutations' AND NOT is_done; SHOW CREATE TABLE t_limit_mutations; @@ -65,7 +65,7 @@ ${CLICKHOUSE_CLIENT} --query "SYSTEM START MERGES t_limit_mutations" wait_for_mutation "t_limit_mutations" "0000000003" -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t_limit_mutations ORDER BY id; SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_limit_mutations' AND NOT is_done; SHOW CREATE TABLE t_limit_mutations; diff --git a/tests/queries/0_stateless/02725_async_insert_table_setting.sh b/tests/queries/0_stateless/02725_async_insert_table_setting.sh index 13911e8d677..14c2d335275 100755 --- a/tests/queries/0_stateless/02725_async_insert_table_setting.sh +++ b/tests/queries/0_stateless/02725_async_insert_table_setting.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t_mt_async_insert; DROP TABLE IF EXISTS t_mt_sync_insert; @@ -19,7 +19,7 @@ url="${CLICKHOUSE_URL}&async_insert=0&wait_for_async_insert=1" ${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO t_mt_async_insert VALUES (1, 'aa'), (2, 'bb')" ${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO t_mt_sync_insert VALUES (1, 'aa'), (2, 'bb')" -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT count() FROM t_mt_async_insert; SELECT count() FROM t_mt_sync_insert; diff --git a/tests/queries/0_stateless/02841_local_assert.sh b/tests/queries/0_stateless/02841_local_assert.sh index a167c09da1f..dc49007b0f6 100755 --- a/tests/queries/0_stateless/02841_local_assert.sh +++ b/tests/queries/0_stateless/02841_local_assert.sh @@ -7,12 +7,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh echo "create table test (x UInt64) engine=Memory; -insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS}" | $CLICKHOUSE_LOCAL -nm +insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS}" | $CLICKHOUSE_LOCAL -m echo "create table test (x UInt64) engine=Memory; -insert into test from infile 'data';" | $CLICKHOUSE_LOCAL -nm --ignore-error +insert into test from infile 'data';" | $CLICKHOUSE_LOCAL -m --ignore-error echo "create table test (x UInt64) engine=Memory; insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS} -select 1" | $CLICKHOUSE_LOCAL -nm +select 1" | $CLICKHOUSE_LOCAL -m diff --git a/tests/queries/0_stateless/02868_no_merge_across_partitions_final_with_lonely.sh b/tests/queries/0_stateless/02868_no_merge_across_partitions_final_with_lonely.sh index 4bc29ce4233..be0ef4e2648 100755 --- a/tests/queries/0_stateless/02868_no_merge_across_partitions_final_with_lonely.sh +++ b/tests/queries/0_stateless/02868_no_merge_across_partitions_final_with_lonely.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm -q """ +${CLICKHOUSE_CLIENT} -m -q """ DROP TABLE IF EXISTS with_lonely; CREATE TABLE with_lonely @@ -23,7 +23,7 @@ ORDER BY (id); """ create_optimize_partition() { - ${CLICKHOUSE_CLIENT} -nm -q """ + ${CLICKHOUSE_CLIENT} -m -q """ INSERT INTO with_lonely SELECT number, '$1', number*10, 0 FROM numbers(10); INSERT INTO with_lonely SELECT number+500000, '$1', number*10, 1 FROM numbers(10); """ @@ -39,7 +39,7 @@ create_optimize_partition "2022-10-29" create_optimize_partition "2022-10-30" create_optimize_partition "2022-10-31" -${CLICKHOUSE_CLIENT} -nm -q """ +${CLICKHOUSE_CLIENT} -m -q """ SYSTEM STOP MERGES with_lonely; INSERT INTO with_lonely SELECT number, '2022-11-01', number*10, 0 FROM numbers(10); diff --git a/tests/queries/0_stateless/02871_clickhouse_client_restart_pager.sh b/tests/queries/0_stateless/02871_clickhouse_client_restart_pager.sh index cc4ce9b122e..418e439e44b 100755 --- a/tests/queries/0_stateless/02871_clickhouse_client_restart_pager.sh +++ b/tests/queries/0_stateless/02871_clickhouse_client_restart_pager.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # head by default print 10 rows, but it is not enough to query 11 rows, since # we need to overflow the default pipe size, hence just 1 million of rows (it # should be around 6 MiB in text representation, should be definitelly enough). -$CLICKHOUSE_CLIENT --ignore-error -nm --pager head -q " +$CLICKHOUSE_CLIENT --ignore-error -m --pager head -q " select * from numbers(1e6); -- { clientError CANNOT_WRITE_TO_FILE_DESCRIPTOR } select * from numbers(1e6); -- { clientError CANNOT_WRITE_TO_FILE_DESCRIPTOR } " diff --git a/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh b/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh index 3f2b732e71b..3a7d861262e 100755 --- a/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh +++ b/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "select 1; select 2;" $CLICKHOUSE_LOCAL -q "select 1; select 2;" # -n is a no-op -$CLICKHOUSE_CLIENT -n -q "select 1; select 2;" -$CLICKHOUSE_LOCAL -n -q "select 1; select 2;" +$CLICKHOUSE_CLIENT -q "select 1; select 2;" +$CLICKHOUSE_LOCAL -q "select 1; select 2;" exit 0 diff --git a/tests/queries/0_stateless/02875_merge_engine_set_index.sh b/tests/queries/0_stateless/02875_merge_engine_set_index.sh index 355d83167a6..f40696c31a9 100755 --- a/tests/queries/0_stateless/02875_merge_engine_set_index.sh +++ b/tests/queries/0_stateless/02875_merge_engine_set_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " CREATE TABLE t1 ( a UInt32, @@ -57,7 +57,7 @@ ORDER BY b DESC FORMAT Null;" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; SELECT ProfileEvents['SelectedMarks'] diff --git a/tests/queries/0_stateless/02878_use_structure_from_insertion_table_with_explicit_insert_columns.sh b/tests/queries/0_stateless/02878_use_structure_from_insertion_table_with_explicit_insert_columns.sh index 8bdaa47c111..dd08724456b 100755 --- a/tests/queries/0_stateless/02878_use_structure_from_insertion_table_with_explicit_insert_columns.sh +++ b/tests/queries/0_stateless/02878_use_structure_from_insertion_table_with_explicit_insert_columns.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_LOCAL -q "select 42 as x format Native" > $CLICKHOUSE_TEST_UNIQUE_NAME.native -$CLICKHOUSE_LOCAL -n -q " +$CLICKHOUSE_LOCAL -q " create table test (x UInt64, y UInt64) engine=Memory; insert into test (x) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native'); insert into test (y) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native'); diff --git a/tests/queries/0_stateless/02879_use_structure_from_insertion_table_with_defaults.sh b/tests/queries/0_stateless/02879_use_structure_from_insertion_table_with_defaults.sh index 315bbcd544f..c7270b65e19 100755 --- a/tests/queries/0_stateless/02879_use_structure_from_insertion_table_with_defaults.sh +++ b/tests/queries/0_stateless/02879_use_structure_from_insertion_table_with_defaults.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_LOCAL -q "select 1 as x format Native" > $CLICKHOUSE_TEST_UNIQUE_NAME.native -$CLICKHOUSE_LOCAL -n -q " +$CLICKHOUSE_LOCAL -q " create table test (x UInt64, y UInt64 default 42) engine=Memory; insert into test select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native'); select * from test; diff --git a/tests/queries/0_stateless/02883_named_collections_override.sh b/tests/queries/0_stateless/02883_named_collections_override.sh index a08c795127d..915ce280226 100755 --- a/tests/queries/0_stateless/02883_named_collections_override.sh +++ b/tests/queries/0_stateless/02883_named_collections_override.sh @@ -8,7 +8,7 @@ u1="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection1" u2="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection2" u3="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection3" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP NAMED COLLECTION IF EXISTS $u1; DROP NAMED COLLECTION IF EXISTS $u2; diff --git a/tests/queries/0_stateless/02884_async_insert_native_protocol_1.sh b/tests/queries/0_stateless/02884_async_insert_native_protocol_1.sh index 7f583087336..791515c82d6 100755 --- a/tests/queries/0_stateless/02884_async_insert_native_protocol_1.sh +++ b/tests/queries/0_stateless/02884_async_insert_native_protocol_1.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_async_insert_native_1; CREATE TABLE t_async_insert_native_1 (id UInt64, s String) ENGINE = MergeTree ORDER BY id; " @@ -22,7 +22,7 @@ echo '{"id": 1, "s": "aaa"}' \ | $CLICKHOUSE_CLIENT $async_insert_options -q 'INSERT INTO t_async_insert_native_1 FORMAT JSONEachRow {"id": 2, "s": "bbb"}' 2>&1 \ | grep -o "NOT_IMPLEMENTED" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " SELECT sum(length(entries.bytes)) FROM system.asynchronous_inserts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_async_insert_native_1'; diff --git a/tests/queries/0_stateless/02884_async_insert_native_protocol_2.sh b/tests/queries/0_stateless/02884_async_insert_native_protocol_2.sh index b9b1854eaef..a8a9209ee68 100755 --- a/tests/queries/0_stateless/02884_async_insert_native_protocol_2.sh +++ b/tests/queries/0_stateless/02884_async_insert_native_protocol_2.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_async_insert_native_2; CREATE TABLE t_async_insert_native_2 (id UInt64, s String) ENGINE = MergeTree ORDER BY id; " @@ -18,7 +18,7 @@ echo "(3, 'ccc') (4, 'ddd') (5, 'eee')" | $CLICKHOUSE_CLIENT $async_insert_optio wait -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " SELECT * FROM t_async_insert_native_2 ORDER BY id; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02884_async_insert_native_protocol_3.sh b/tests/queries/0_stateless/02884_async_insert_native_protocol_3.sh index c9d399607d0..229f13eb821 100755 --- a/tests/queries/0_stateless/02884_async_insert_native_protocol_3.sh +++ b/tests/queries/0_stateless/02884_async_insert_native_protocol_3.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_async_insert_native_3; CREATE TABLE t_async_insert_native_3 (id UInt64, s String) ENGINE = MergeTree ORDER BY id; " @@ -21,7 +21,7 @@ $CLICKHOUSE_CLIENT $async_insert_options -q "INSERT INTO t_async_insert_native_3 wait -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " SELECT format, length(entries.bytes) FROM system.asynchronous_inserts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_async_insert_native_3' ORDER BY format; diff --git a/tests/queries/0_stateless/02884_async_insert_native_protocol_4.sh b/tests/queries/0_stateless/02884_async_insert_native_protocol_4.sh index 9118c11315c..e84c1ca8899 100755 --- a/tests/queries/0_stateless/02884_async_insert_native_protocol_4.sh +++ b/tests/queries/0_stateless/02884_async_insert_native_protocol_4.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_async_insert_native_4; CREATE TABLE t_async_insert_native_4 (id UInt64) ENGINE = MergeTree ORDER BY id; " @@ -20,7 +20,7 @@ echo "(2) (3) (4) (5)" | $CLICKHOUSE_CLIENT_WITH_LOG $async_insert_options --asy -q 'INSERT INTO t_async_insert_native_4 FORMAT Values' 2>&1 \ | grep -c "too much data" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " SELECT * FROM t_async_insert_native_4 ORDER BY id; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02885_ephemeral_columns_from_file.sh b/tests/queries/0_stateless/02885_ephemeral_columns_from_file.sh index 2917ec86957..065658d4d56 100755 --- a/tests/queries/0_stateless/02885_ephemeral_columns_from_file.sh +++ b/tests/queries/0_stateless/02885_ephemeral_columns_from_file.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', a $CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', auto, 'x UInt64 Alias y, y UInt64')" 2>&1 | grep -c "BAD_ARGUMENTS" $CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', auto, 'x UInt64 Materialized 42, y UInt64')" 2>&1 | grep -c "BAD_ARGUMENTS" -$CLICKHOUSE_LOCAL -n -q " +$CLICKHOUSE_LOCAL -q " create table test (x UInt64 Ephemeral, y UInt64 default x + 1) engine=Memory; insert into test (x, y) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl'); select * from test; diff --git a/tests/queries/0_stateless/02895_npy_output_format.sh b/tests/queries/0_stateless/02895_npy_output_format.sh index a364e447062..74000bc298f 100755 --- a/tests/queries/0_stateless/02895_npy_output_format.sh +++ b/tests/queries/0_stateless/02895_npy_output_format.sh @@ -9,7 +9,7 @@ mkdir -p ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ rm -rf ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/* chmod 777 ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ -${CLICKHOUSE_CLIENT} -n -q --ignore-error " +${CLICKHOUSE_CLIENT} -q --ignore-error " DROP DATABASE IF EXISTS npy_output_02895; CREATE DATABASE IF NOT EXISTS npy_output_02895; diff --git a/tests/queries/0_stateless/02895_peak_memory_usage_http_headers_regression.sh b/tests/queries/0_stateless/02895_peak_memory_usage_http_headers_regression.sh index d6775927f35..b4656c9e321 100755 --- a/tests/queries/0_stateless/02895_peak_memory_usage_http_headers_regression.sh +++ b/tests/queries/0_stateless/02895_peak_memory_usage_http_headers_regression.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS data; DROP TABLE IF EXISTS data2; DROP VIEW IF EXISTS mv1; diff --git a/tests/queries/0_stateless/02903_empty_order_by_throws_error.sh b/tests/queries/0_stateless/02903_empty_order_by_throws_error.sh index 64f5dd1a987..ef631d9ed1b 100755 --- a/tests/queries/0_stateless/02903_empty_order_by_throws_error.sh +++ b/tests/queries/0_stateless/02903_empty_order_by_throws_error.sh @@ -5,13 +5,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # setting disabled and no order by or primary key; expect error -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() SETTINGS index_granularity = 8192; " 2>&1 \ | grep -F -q "You must provide an ORDER BY or PRIMARY KEY expression in the table definition." && echo 'OK' || echo 'FAIL' # setting disabled and primary key in table definition -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() PRIMARY KEY a SETTINGS index_granularity = 8192; SHOW CREATE TABLE test_empty_order_by; diff --git a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh index 5065da371a8..b77e5b0b402 100755 --- a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh +++ b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh @@ -15,7 +15,7 @@ if [[ $($CLICKHOUSE_CLIENT -q "select count()>0 from system.clusters where clust cluster=test_cluster_database_replicated fi -$CLICKHOUSE_CLIENT -nm --distributed_ddl_output_mode=none -q " +$CLICKHOUSE_CLIENT -m --distributed_ddl_output_mode=none -q " drop table if exists rmt1; drop table if exists rmt2; @@ -46,7 +46,7 @@ part_name='%' # wait while there be at least one 'No active replica has part all_0_1_1 or covering part' in logs for _ in {0..50}; do - no_active_repilica_messages=$($CLICKHOUSE_CLIENT -nm -q " + no_active_repilica_messages=$($CLICKHOUSE_CLIENT -m -q " system flush logs; select count() @@ -65,7 +65,7 @@ for _ in {0..50}; do sleep 1 done -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " system start pulling replication log rmt2; system flush logs; diff --git a/tests/queries/0_stateless/02904_empty_order_by_with_setting_enabled.sh b/tests/queries/0_stateless/02904_empty_order_by_with_setting_enabled.sh index 7ac9b488be5..5f9dc6ea077 100755 --- a/tests/queries/0_stateless/02904_empty_order_by_with_setting_enabled.sh +++ b/tests/queries/0_stateless/02904_empty_order_by_with_setting_enabled.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # setting enabled and no order by or primary key -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET create_table_empty_primary_key_by_default = true; DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() SETTINGS index_granularity = 8192; @@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" " 2>&1 \ | grep -F -q "ORDER BY tuple()" && echo 'OK' || echo 'FAIL' # setting enabled and per-column primary key -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET create_table_empty_primary_key_by_default = true; DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8 PRIMARY KEY, b String PRIMARY KEY) ENGINE = MergeTree() SETTINGS index_granularity = 8192; @@ -21,7 +21,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" " 2>&1 \ | grep -F -q "ORDER BY (a, b)" && echo 'OK' || echo 'FAIL' # setting enabled and primary key in table definition (not per-column or order by) -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET create_table_empty_primary_key_by_default = true; DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8, b String) ENGINE = MergeTree() PRIMARY KEY (a) SETTINGS index_granularity = 8192; @@ -29,7 +29,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" " 2>&1 \ | grep -F -q "ORDER BY a" && echo 'OK' || echo 'FAIL' # setting enabled and order by in table definition (no primary key) -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET create_table_empty_primary_key_by_default = true; DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8, b String) ENGINE = MergeTree() ORDER BY (a, b) SETTINGS index_granularity = 8192; diff --git a/tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh b/tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh index 30ec50fa20f..e37f1e51c74 100755 --- a/tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh +++ b/tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists src; create table src (a Int32) engine = MergeTree() order by tuple(); @@ -15,14 +15,14 @@ create materialized view mv (a Int32) engine = MergeTree() order by tuple() as s uuid=$(${CLICKHOUSE_CLIENT} --query "select uuid from system.tables where table='mv' and database == currentDatabase()") inner_table=".inner_id.${uuid}" -${CLICKHOUSE_CLIENT} -nm --query "drop table \`$inner_table\` sync" +${CLICKHOUSE_CLIENT} -m --query "drop table \`$inner_table\` sync" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " set send_logs_level = 'error'; backup table ${CLICKHOUSE_DATABASE}.\`mv\` to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table mv; restore table ${CLICKHOUSE_DATABASE}.\`mv\` from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); " | grep -o "RESTORED" diff --git a/tests/queries/0_stateless/02907_backup_mv_with_no_source_table.sh b/tests/queries/0_stateless/02907_backup_mv_with_no_source_table.sh index d59ebe400ee..f950954941f 100755 --- a/tests/queries/0_stateless/02907_backup_mv_with_no_source_table.sh +++ b/tests/queries/0_stateless/02907_backup_mv_with_no_source_table.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists src; create table src (a Int32) engine = MergeTree() order by tuple(); @@ -15,18 +15,18 @@ drop table if exists mv; create materialized view mv to dst (a Int32) as select * from src; " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table src; backup database ${CLICKHOUSE_DATABASE} on cluster test_shard_localhost to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table mv; set allow_deprecated_database_ordinary=1; restore table ${CLICKHOUSE_DATABASE}.mv on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); " | grep -o "RESTORED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists src; create table src (a Int32) engine = MergeTree() order by tuple(); @@ -37,13 +37,13 @@ drop table if exists mv; create materialized view mv to dst (a Int32) as select * from src; " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table src; drop table dst; backup database ${CLICKHOUSE_DATABASE} on cluster test_shard_localhost to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2'); " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table mv; set allow_deprecated_database_ordinary=1; restore table ${CLICKHOUSE_DATABASE}.mv on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2'); diff --git a/tests/queries/0_stateless/02907_backup_restore_default_nullable.sh b/tests/queries/0_stateless/02907_backup_restore_default_nullable.sh index 8ed36a7edd7..dc5793d1638 100755 --- a/tests/queries/0_stateless/02907_backup_restore_default_nullable.sh +++ b/tests/queries/0_stateless/02907_backup_restore_default_nullable.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists test; set data_type_default_nullable = 0; create table test (test String) ENGINE = MergeTree() ORDER BY tuple(); @@ -13,7 +13,7 @@ backup table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost to Disk ${CLICKHOUSE_CLIENT} --query "show create table test" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table test sync; set data_type_default_nullable = 1; restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); diff --git a/tests/queries/0_stateless/02907_backup_restore_flatten_nested.sh b/tests/queries/0_stateless/02907_backup_restore_flatten_nested.sh index 742d24a97eb..eae307add10 100755 --- a/tests/queries/0_stateless/02907_backup_restore_flatten_nested.sh +++ b/tests/queries/0_stateless/02907_backup_restore_flatten_nested.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists test; set flatten_nested = 0; create table test (test Array(Tuple(foo String, bar Float64))) ENGINE = MergeTree() ORDER BY tuple(); @@ -13,7 +13,7 @@ backup table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost to Disk ${CLICKHOUSE_CLIENT} --query "show create table test" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists test2; set flatten_nested = 0; create table test2 (test Nested(foo String, bar Float64)) ENGINE = MergeTree() ORDER BY tuple(); @@ -22,7 +22,7 @@ backup table ${CLICKHOUSE_DATABASE}.test2 on cluster test_shard_localhost to Dis ${CLICKHOUSE_CLIENT} --query "show create table test2" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table test sync; set flatten_nested = 1; restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); @@ -30,7 +30,7 @@ restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from D ${CLICKHOUSE_CLIENT} --query "show create table test" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table test2 sync; set flatten_nested = 1; restore table ${CLICKHOUSE_DATABASE}.test2 on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2'); diff --git a/tests/queries/0_stateless/02907_clickhouse_dictionary_bug.sh b/tests/queries/0_stateless/02907_clickhouse_dictionary_bug.sh index 57182050534..2cad15c6fcb 100755 --- a/tests/queries/0_stateless/02907_clickhouse_dictionary_bug.sh +++ b/tests/queries/0_stateless/02907_clickhouse_dictionary_bug.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DICTIONARY IF EXISTS 02907_dictionary; DROP TABLE IF EXISTS 02907_table; diff --git a/tests/queries/0_stateless/02907_system_backups_profile_events.sh b/tests/queries/0_stateless/02907_system_backups_profile_events.sh index 801056a2844..9a1d5a3db11 100755 --- a/tests/queries/0_stateless/02907_system_backups_profile_events.sh +++ b/tests/queries/0_stateless/02907_system_backups_profile_events.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple(); " @@ -12,10 +12,10 @@ create table test (a Int32) engine = MergeTree() order by tuple(); backup_id=${CLICKHOUSE_TEST_UNIQUE_NAME} backup_name="Disk('backups', '$backup_id')"; -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " backup table ${CLICKHOUSE_DATABASE}.test to $backup_name; " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " select ProfileEvents['BackupEntriesCollectorMicroseconds'] > 10 from system.backups where name='Disk(\'backups\', \'$backup_id\')' " diff --git a/tests/queries/0_stateless/02908_Npy_files_caching.sh b/tests/queries/0_stateless/02908_Npy_files_caching.sh index 4845f740972..218e13efb95 100755 --- a/tests/queries/0_stateless/02908_Npy_files_caching.sh +++ b/tests/queries/0_stateless/02908_Npy_files_caching.sh @@ -7,13 +7,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy') settings optimize_count_from_files=0" $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy') settings optimize_count_from_files=1" $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy', auto, 'array Int64') settings optimize_count_from_files=1" -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " desc file('$CURDIR/data_npy/one_dim.npy'); select number_of_rows from system.schema_inference_cache where format='Npy'; " $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/npy_big.npy') settings optimize_count_from_files=0" $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/npy_big.npy') settings optimize_count_from_files=1" -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " desc file('$CURDIR/data_npy/npy_big.npy'); select number_of_rows from system.schema_inference_cache where format='Npy'; " diff --git a/tests/queries/0_stateless/02908_table_ttl_dependency.sh b/tests/queries/0_stateless/02908_table_ttl_dependency.sh index 70136b4a42b..0bc02426f61 100755 --- a/tests/queries/0_stateless/02908_table_ttl_dependency.sh +++ b/tests/queries/0_stateless/02908_table_ttl_dependency.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS 02908_dependent; DROP TABLE IF EXISTS 02908_main; @@ -14,11 +14,11 @@ $CLICKHOUSE_CLIENT -nm -q " CREATE TABLE 02908_dependent (a UInt32, ts DateTime) ENGINE = MergeTree ORDER BY a TTL ts + 1 WHERE a IN (SELECT a FROM ${CLICKHOUSE_DATABASE}.02908_main); " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE 02908_main; " 2>&1 | grep -F -q "HAVE_DEPENDENT_OBJECTS" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE 02908_dependent; DROP TABLE 02908_main; " diff --git a/tests/queries/0_stateless/02909_settings_in_json_schema_cache.sh b/tests/queries/0_stateless/02909_settings_in_json_schema_cache.sh index 8da144f90ca..75d491642ea 100755 --- a/tests/queries/0_stateless/02909_settings_in_json_schema_cache.sh +++ b/tests/queries/0_stateless/02909_settings_in_json_schema_cache.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh echo '{"x" : 42}' > $CLICKHOUSE_TEST_UNIQUE_NAME.json -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS schema_inference_make_columns_nullable=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS schema_inference_make_columns_nullable=0; SELECT count() from system.schema_inference_cache where format = 'JSON' and additional_format_info like '%schema_inference_make_columns_nullable%';" diff --git a/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh b/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh index 80e38338751..7ad38e11e96 100755 --- a/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh +++ b/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " CREATE TABLE IF NOT EXISTS ts_data_double_raw ( device_id UInt32 NOT NULL CODEC(ZSTD), diff --git a/tests/queries/0_stateless/02915_lazy_loading_of_base_backups.sh b/tests/queries/0_stateless/02915_lazy_loading_of_base_backups.sh index 5f0f41a956b..b6d6ca57768 100755 --- a/tests/queries/0_stateless/02915_lazy_loading_of_base_backups.sh +++ b/tests/queries/0_stateless/02915_lazy_loading_of_base_backups.sh @@ -13,40 +13,40 @@ b_backup="Disk('backups', '$b_backup_id')" c_backup_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_c c_backup="Disk('backups', '$c_backup_id')" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS tbl1; DROP TABLE IF EXISTS tbl2; DROP TABLE IF EXISTS tbl3; " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE TABLE tbl1 (a Int32) ENGINE = MergeTree() ORDER BY tuple(); " # The following BACKUP command must write backup 'a'. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $a_backup SETTINGS id='$a_backup_id'; " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE TABLE tbl2 (a Int32) ENGINE = MergeTree() ORDER BY tuple(); " # The following BACKUP command must read backup 'a' and write backup 'b'. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $b_backup SETTINGS id='$b_backup_id', base_backup=$a_backup; " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE TABLE tbl3 (a Int32) ENGINE = MergeTree() ORDER BY tuple(); " # The following BACKUP command must read only backup 'b' (and not 'a') and write backup 'c'. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $c_backup SETTINGS id='$c_backup_id', base_backup=$b_backup; " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE tbl1; DROP TABLE tbl2; DROP TABLE tbl3; @@ -57,28 +57,28 @@ r2_restore_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_r2 r3_restore_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_r3 # The following RESTORE command must read all 3 backups 'a', 'b', c' because the table 'tbl1' was in the first backup. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl1 FROM $c_backup SETTINGS id='$r1_restore_id'; " | grep -o "RESTORED" # The following RESTORE command must read only 2 backups 'b', c' (and not 'a') because the table 'tbl2' was in the second backup. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl2 FROM $c_backup SETTINGS id='$r2_restore_id'; " | grep -o "RESTORED" # The following RESTORE command must read only 1 backup 'c' (and not 'a' or 'b') because the table 'tbl3' was in the third backup. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl3 FROM $c_backup SETTINGS id='$r3_restore_id'; " | grep -o "RESTORED" all_ids="['$a_backup_id', '$b_backup_id', '$c_backup_id', '$r1_restore_id', '$r2_restore_id', '$r3_restore_id']" id_prefix_len=`expr "${CLICKHOUSE_TEST_UNIQUE_NAME}_" : '.*'` -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " SELECT substr(id, 1 + $id_prefix_len) as short_id, ProfileEvents['BackupsOpenedForRead'], ProfileEvents['BackupsOpenedForWrite'] FROM system.backups WHERE id IN ${all_ids} ORDER BY short_id " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE tbl1; DROP TABLE tbl2; DROP TABLE tbl3; diff --git a/tests/queries/0_stateless/02916_dictionary_access.sh b/tests/queries/0_stateless/02916_dictionary_access.sh index 08ee517ab3b..be62cc027ef 100755 --- a/tests/queries/0_stateless/02916_dictionary_access.sh +++ b/tests/queries/0_stateless/02916_dictionary_access.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) username="user_${CLICKHOUSE_TEST_UNIQUE_NAME}" dictname="dict_${CLICKHOUSE_TEST_UNIQUE_NAME}" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE DICTIONARY IF NOT EXISTS ${dictname} ( id UInt64, @@ -23,15 +23,15 @@ ${CLICKHOUSE_CLIENT} -nm --query " SELECT dictGet(${dictname}, 'value', 1); " -$CLICKHOUSE_CLIENT -nm --user="${username}" --query " +$CLICKHOUSE_CLIENT -m --user="${username}" --query " SELECT * FROM dictionary(${dictname}); " 2>&1 | grep -o ACCESS_DENIED | uniq -$CLICKHOUSE_CLIENT -nm --user="${username}" --query " +$CLICKHOUSE_CLIENT -m --user="${username}" --query " SELECT dictGet(${dictname}, 'value', 1); " 2>&1 | grep -o ACCESS_DENIED | uniq -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP DICTIONARY IF EXISTS ${dictname}; DROP USER IF EXISTS ${username}; " diff --git a/tests/queries/0_stateless/02916_joinget_dependency.sh b/tests/queries/0_stateless/02916_joinget_dependency.sh index 6477ae8c967..ff9332cb57f 100755 --- a/tests/queries/0_stateless/02916_joinget_dependency.sh +++ b/tests/queries/0_stateless/02916_joinget_dependency.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # We test the dependency on the DROP -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS Sub_distributed; DROP TABLE IF EXISTS Sub; DROP TABLE IF EXISTS Mapping; @@ -20,8 +20,8 @@ $CLICKHOUSE_CLIENT -q " DROP TABLE Mapping; " 2>&1 | grep -cm1 "HAVE_DEPENDENT_OBJECTS" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE Sub_distributed; DROP TABLE Sub; DROP TABLE Mapping; -" \ No newline at end of file +" diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.sh b/tests/queries/0_stateless/02930_client_file_log_comment.sh index 50cd587e4b5..393bffcaf59 100755 --- a/tests/queries/0_stateless/02930_client_file_log_comment.sh +++ b/tests/queries/0_stateless/02930_client_file_log_comment.sh @@ -14,7 +14,7 @@ echo -n 'select 4242' >> "$file2" $CLICKHOUSE_CLIENT --queries-file "$file1" "$file2" <<<'select 42' $CLICKHOUSE_CLIENT --log_comment foo --queries-file /dev/stdin <<<'select 424242' -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " system flush logs; select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 42' and type != 'QueryStart'; select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 4242' and type != 'QueryStart'; diff --git a/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh index d9e4a2c8f8b..76ab56a4570 100755 --- a/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh +++ b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh echo "1,2" > $CLICKHOUSE_TEST_UNIQUE_NAME.csv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " create table test (x UInt64, y UInt32, size UInt64) engine=Memory; insert into test select c1, c2, _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') settings use_structure_from_insertion_table_in_table_functions=1; select * from test; diff --git a/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh b/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh index 0e23bb6c42b..9d1faf301d3 100755 --- a/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh +++ b/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh @@ -9,12 +9,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # no message at all echo "thread = 0" -$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_id = 0" |& grep -F -o 'Send signal to' +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_id = 0" |& grep -F -o 'Send signal to' # send messages to some threads echo "thread != 0" -$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_id != 0 format Null" |& grep -F -o 'Send signal to' | grep -v 'Send signal to 0 threads (total)' +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_id != 0 format Null" |& grep -F -o 'Send signal to' | grep -v 'Send signal to 0 threads (total)' # there is no thread with comm="foo", so no signals will be sent echo "thread_name = 'foo'" -$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_name = 'foo' format Null" |& grep -F -o 'Send signal to 0 threads (total)' +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_name = 'foo' format Null" |& grep -F -o 'Send signal to 0 threads (total)' diff --git a/tests/queries/0_stateless/02943_rmt_alter_metadata_merge_checksum_mismatch.sh b/tests/queries/0_stateless/02943_rmt_alter_metadata_merge_checksum_mismatch.sh index 27950866e81..44af2dbf26f 100755 --- a/tests/queries/0_stateless/02943_rmt_alter_metadata_merge_checksum_mismatch.sh +++ b/tests/queries/0_stateless/02943_rmt_alter_metadata_merge_checksum_mismatch.sh @@ -26,7 +26,7 @@ function wait_part() function restore_failpoints() { # restore entry error with failpoints (to avoid endless errors in logs) - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " system enable failpoint replicated_queue_unfail_entries; system sync replica $failed_replica; system disable failpoint replicated_queue_unfail_entries; @@ -34,7 +34,7 @@ function restore_failpoints() } trap restore_failpoints EXIT -$CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q " +$CLICKHOUSE_CLIENT -m --insert_keeper_fault_injection_probability=0 -q " drop table if exists data_r1; drop table if exists data_r2; @@ -45,7 +45,7 @@ $CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q " " # will fail ALTER_METADATA on one of replicas -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " system enable failpoint replicated_queue_fail_next_entry; alter table data_r1 drop index value_idx settings alter_sync=0; -- part all_0_0_0_1 @@ -80,7 +80,7 @@ fi # This will create MERGE_PARTS, on failed replica it will be fetched from source replica (since it does not have all parts to execute merge) $CLICKHOUSE_CLIENT -q "optimize table $success_replica final settings optimize_throw_if_noop=1, alter_sync=1" # part all_0_0_1_1 -$CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q " +$CLICKHOUSE_CLIENT -m --insert_keeper_fault_injection_probability=0 -q " insert into $success_replica (key) values (2); -- part all_2_2_0 -- Avoid 'Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet' system sync replica $success_replica pull; diff --git a/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh b/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh index 6cb184cb1fe..ec699d974d4 100755 --- a/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh +++ b/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) user_name="${CLICKHOUSE_DATABASE}_test_user_02947" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_merge_tree_index; DROP USER IF EXISTS $user_name; @@ -44,7 +44,7 @@ $CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT arr.size $CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" $CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b.mark FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_merge_tree_index; DROP USER IF EXISTS $user_name; " diff --git a/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh b/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh index daa9c571a5d..d06aba8a4b6 100755 --- a/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh +++ b/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE source_table ( id UInt64, diff --git a/tests/queries/0_stateless/02950_distributed_initial_query_event.sh b/tests/queries/0_stateless/02950_distributed_initial_query_event.sh index 7f690a681c4..737d5c6b41e 100755 --- a/tests/queries/0_stateless/02950_distributed_initial_query_event.sh +++ b/tests/queries/0_stateless/02950_distributed_initial_query_event.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # CREATE TABLE local (x UInt8) Engine=Memory; # CREATE TABLE distributed ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), x) -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS local; DROP TABLE IF EXISTS distributed; CREATE TABLE local (x UInt8) Engine=Memory; diff --git a/tests/queries/0_stateless/02974_backup_query_format_null.sh b/tests/queries/0_stateless/02974_backup_query_format_null.sh index ddba2f6de16..345a4f47b20 100755 --- a/tests/queries/0_stateless/02974_backup_query_format_null.sh +++ b/tests/queries/0_stateless/02974_backup_query_format_null.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS tbl; CREATE TABLE tbl (a Int32) ENGINE = MergeTree() ORDER BY tuple(); INSERT INTO tbl VALUES (2), (80), (-12345); @@ -14,7 +14,7 @@ backup_name="Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}')" ${CLICKHOUSE_CLIENT} --query "BACKUP TABLE tbl TO ${backup_name} FORMAT Null" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE tbl; RESTORE ALL FROM ${backup_name} FORMAT Null " diff --git a/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh b/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh index e65c9654c9c..7ad5a2179f9 100755 --- a/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh +++ b/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nq """ +${CLICKHOUSE_CLIENT} -q """ CREATE TABLE t1_local ( n UInt64, diff --git a/tests/queries/0_stateless/03008_deduplication_random_setttings.sh b/tests/queries/0_stateless/03008_deduplication_random_setttings.sh index e9f59138177..07b99eb4e99 100755 --- a/tests/queries/0_stateless/03008_deduplication_random_setttings.sh +++ b/tests/queries/0_stateless/03008_deduplication_random_setttings.sh @@ -35,7 +35,7 @@ THIS_RUN+=" deduplicate_src_table=$deduplicate_src_table" THIS_RUN+=" deduplicate_dst_table=$deduplicate_dst_table" THIS_RUN+=" insert_unique_blocks=$insert_unique_blocks" -$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " +$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ --insert-method $insert_method \ --table-engine $engine \ @@ -48,7 +48,7 @@ $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " ) " 1>/dev/null 2>&1 && echo 'insert_several_blocks_into_table OK' || echo "FAIL: insert_several_blocks_into_table ${THIS_RUN}" -$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " +$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \ --insert-method $insert_method \ --table-engine $engine \ @@ -61,7 +61,7 @@ $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " ) " 1>/dev/null 2>&1 && echo 'mv_generates_several_blocks OK' || echo "FAIL: mv_generates_several_blocks ${THIS_RUN}" -$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " +$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \ --insert-method $insert_method \ --table-engine $engine \ diff --git a/tests/queries/0_stateless/03008_local_plain_rewritable.sh b/tests/queries/0_stateless/03008_local_plain_rewritable.sh index d51e180efc9..e61f9061297 100755 --- a/tests/queries/0_stateless/03008_local_plain_rewritable.sh +++ b/tests/queries/0_stateless/03008_local_plain_rewritable.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query "drop table if exists 03008_test_local_mt sync" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " create table 03008_test_local_mt (a Int32, b Int64, c Int64) engine = MergeTree() partition by intDiv(a, 1000) order by tuple(a, b) settings disk = disk( @@ -19,35 +19,35 @@ settings disk = disk( path = '/var/lib/clickhouse/disks/local_plain_rewritable/') " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " insert into 03008_test_local_mt (*) values (1, 2, 0), (2, 2, 2), (3, 1, 9), (4, 7, 7), (5, 10, 2), (6, 12, 5); insert into 03008_test_local_mt (*) select number, number, number from numbers_mt(10000); " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " select count(*) from 03008_test_local_mt; select (*) from 03008_test_local_mt order by tuple(a, b) limit 10; " ${CLICKHOUSE_CLIENT} --query "optimize table 03008_test_local_mt final;" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " alter table 03008_test_local_mt modify setting disk = '03008_local_plain_rewritable', old_parts_lifetime = 3600; select engine_full from system.tables WHERE database = currentDatabase() AND name = '03008_test_local_mt'; " | grep -c "old_parts_lifetime = 3600" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " select count(*) from 03008_test_local_mt; select (*) from 03008_test_local_mt order by tuple(a, b) limit 10; " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " alter table 03008_test_local_mt update c = 0 where a % 2 = 1; alter table 03008_test_local_mt add column d Int64 after c; alter table 03008_test_local_mt drop column c; " 2>&1 | grep -Fq "SUPPORT_IS_DISABLED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " truncate table 03008_test_local_mt; select count(*) from 03008_test_local_mt; " diff --git a/tests/queries/0_stateless/03031_clickhouse_local_input.sh b/tests/queries/0_stateless/03031_clickhouse_local_input.sh index 6f59e9b9703..e2f9cf48108 100755 --- a/tests/queries/0_stateless/03031_clickhouse_local_input.sh +++ b/tests/queries/0_stateless/03031_clickhouse_local_input.sh @@ -6,15 +6,15 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) tmp_file="$CUR_DIR/$CLICKHOUSE_DATABASE.txt" echo '# foo' -$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -n -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') format LineAsString" << "$INPUT_FILE" -$CLICKHOUSE_CLIENT --external --file="$INPUT_FILE" --name=t --structure='x String' -nm -q " +$CLICKHOUSE_CLIENT --external --file="$INPUT_FILE" --name=t --structure='x String' -m -q " select * from t; select * from t; " diff --git a/tests/queries/0_stateless/03143_prewhere_profile_events.sh b/tests/queries/0_stateless/03143_prewhere_profile_events.sh index 00daa0fe7cc..6a6b993e5f8 100755 --- a/tests/queries/0_stateless/03143_prewhere_profile_events.sh +++ b/tests/queries/0_stateless/03143_prewhere_profile_events.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS t; CREATE TABLE t(a UInt32, b UInt32, c UInt32, d UInt32) ENGINE=MergeTree ORDER BY a SETTINGS min_bytes_for_wide_part=0, min_rows_for_wide_part=0; @@ -25,7 +25,7 @@ client_opts=( --max_threads 8 ) -${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_1" -nq " +${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_1" -q " SELECT * FROM t PREWHERE (b % 8192) = 42 @@ -33,7 +33,7 @@ PREWHERE (b % 8192) = 42 FORMAT Null " -${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_2" -nq " +${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_2" -q " SELECT * FROM t PREWHERE (b % 8192) = 42 AND (c % 8192) = 42 @@ -42,7 +42,7 @@ PREWHERE (b % 8192) = 42 AND (c % 8192) = 42 settings enable_multiple_prewhere_read_steps=1; " -${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_3" -nq " +${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_3" -q " SELECT * FROM t PREWHERE (b % 8192) = 42 AND (c % 16384) = 42 @@ -51,7 +51,7 @@ PREWHERE (b % 8192) = 42 AND (c % 16384) = 42 settings enable_multiple_prewhere_read_steps=0; " -${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_4" -nq " +${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_4" -q " SELECT b, c FROM t PREWHERE (b % 8192) = 42 AND (c % 8192) = 42 @@ -59,7 +59,7 @@ PREWHERE (b % 8192) = 42 AND (c % 8192) = 42 settings enable_multiple_prewhere_read_steps=1; " -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " SYSTEM FLUSH LOGS; -- 52503 which is 43 * number of granules, 10000000 diff --git a/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh b/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh index 95aef9bbc5b..4e7b318e202 100755 --- a/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh +++ b/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists tp_1; create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y partition by intDiv(y, 100) settings max_parts_to_merge_at_once=1; insert into tp_1 select number, number from numbers(3); @@ -25,7 +25,7 @@ alter table tp_1 drop projection pp; alter table tp_1 attach partition '0'; " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; check table tp_1 settings check_query_single_value_result = 0;" | grep -o "Found unexpected projection directories: pp.proj" @@ -34,19 +34,19 @@ $CLICKHOUSE_CLIENT -q " backup table tp_1 to Disk('backups', '$backup_id'); " | grep -o "BACKUP_CREATED" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; drop table tp_1; restore table tp_1 from Disk('backups', '$backup_id'); " | grep -o "RESTORED" $CLICKHOUSE_CLIENT -q "select count() from tp_1;" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; check table tp_1 settings check_query_single_value_result = 0;" | grep -o "Found unexpected projection directories: pp.proj" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; check table tp_1" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; drop table tp_1" diff --git a/tests/queries/0_stateless/03155_test_move_to_prewhere.sh b/tests/queries/0_stateless/03155_test_move_to_prewhere.sh index b6980b3a23a..f33a6b3ef27 100755 --- a/tests/queries/0_stateless/03155_test_move_to_prewhere.sh +++ b/tests/queries/0_stateless/03155_test_move_to_prewhere.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " CREATE TABLE event_envoy ( timestamp_interval DateTime CODEC(DoubleDelta), @@ -18,7 +18,7 @@ ${CLICKHOUSE_CLIENT} -nq " INSERT INTO event_envoy SELECT now() - number, 'us-east-1', 'ch_super_fast' FROM numbers_mt(1e5); " -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " CREATE TABLE event_envoy_remote ( timestamp_interval DateTime CODEC(DoubleDelta), diff --git a/tests/queries/0_stateless/03156_default_multiquery_split.sh b/tests/queries/0_stateless/03156_default_multiquery_split.sh index 8ba2f46b786..d849fb5a162 100755 --- a/tests/queries/0_stateless/03156_default_multiquery_split.sh +++ b/tests/queries/0_stateless/03156_default_multiquery_split.sh @@ -53,6 +53,6 @@ SELECT * FROM TEST2 ORDER BY value; DROP TABLE TEST1; DROP TABLE TEST2; EOF -$CLICKHOUSE_CLIENT -m -n < "$SQL_FILE_NAME" +$CLICKHOUSE_CLIENT -m < "$SQL_FILE_NAME" rm "$SQL_FILE_NAME" diff --git a/tests/queries/0_stateless/03169_time_virtual_column.sh b/tests/queries/0_stateless/03169_time_virtual_column.sh index fef1de8c6f2..b289f39accb 100755 --- a/tests/queries/0_stateless/03169_time_virtual_column.sh +++ b/tests/queries/0_stateless/03169_time_virtual_column.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "1,2" > $CLICKHOUSE_TEST_UNIQUE_NAME.csv sleep 1 -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " select _size, (dateDiff('millisecond', _time, now()) < 600000 AND dateDiff('millisecond', _time, now()) > 0) from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv'); " rm $CLICKHOUSE_TEST_UNIQUE_NAME.csv diff --git a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh index 7c567c0f58f..af702569794 100755 --- a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh +++ b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " CREATE TABLE ids (id UUID, whatever String) Engine=MergeTree ORDER BY tuple(); INSERT INTO ids VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', 'whatever'); diff --git a/tests/queries/0_stateless/03198_settings_in_csv_tsv_schema_cache.sh b/tests/queries/0_stateless/03198_settings_in_csv_tsv_schema_cache.sh index ce53f467823..583257d8fd3 100755 --- a/tests/queries/0_stateless/03198_settings_in_csv_tsv_schema_cache.sh +++ b/tests/queries/0_stateless/03198_settings_in_csv_tsv_schema_cache.sh @@ -5,25 +5,25 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh echo -e 'a,b,c\n1,2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_skip_first_lines=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_skip_first_lines=0; SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%skip_first_lines%';" echo -e 'a,b,c\n"1",2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_numbers_from_strings=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_numbers_from_strings=0; SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%try_infer_numbers_from_strings%';" echo -e 'a,b,c\n"(1,2,3)",2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_strings_from_quoted_tuples=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_strings_from_quoted_tuples=0; SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%try_infer_strings_from_quoted_tuples%';" echo -e 'a\tb\tc\n1\t2\t3' > $CLICKHOUSE_TEST_UNIQUE_NAME.tsv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.tsv') SETTINGS input_format_tsv_skip_first_lines=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.tsv') SETTINGS input_format_tsv_skip_first_lines=0; SELECT count() from system.schema_inference_cache where format = 'TSV' and additional_format_info like '%skip_first_lines%';" diff --git a/tests/queries/0_stateless/03198_unload_primary_key_outdated.sh b/tests/queries/0_stateless/03198_unload_primary_key_outdated.sh index 4f217935123..c759cc34425 100755 --- a/tests/queries/0_stateless/03198_unload_primary_key_outdated.sh +++ b/tests/queries/0_stateless/03198_unload_primary_key_outdated.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n " +$CLICKHOUSE_CLIENT " DROP TABLE IF EXISTS t_unload_primary_key; CREATE TABLE t_unload_primary_key (a UInt64, b UInt64) @@ -26,7 +26,7 @@ for _ in {1..100}; do sleep 0.3 done -$CLICKHOUSE_CLIENT -n " +$CLICKHOUSE_CLIENT " SELECT name, active, primary_key_bytes_in_memory FROM system.parts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_unload_primary_key' ORDER BY name; DROP TABLE IF EXISTS t_unload_primary_key; " diff --git a/tests/queries/0_stateless/03199_dictionary_table_access.sh b/tests/queries/0_stateless/03199_dictionary_table_access.sh index 952b466b5da..14f017c7fbc 100755 --- a/tests/queries/0_stateless/03199_dictionary_table_access.sh +++ b/tests/queries/0_stateless/03199_dictionary_table_access.sh @@ -8,7 +8,7 @@ username="user_${CLICKHOUSE_TEST_UNIQUE_NAME}" dictname="dict_${CLICKHOUSE_TEST_UNIQUE_NAME}" dicttablename="dict_table_${CLICKHOUSE_TEST_UNIQUE_NAME}" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE DICTIONARY IF NOT EXISTS ${dictname} ( id UInt64, @@ -26,15 +26,15 @@ ${CLICKHOUSE_CLIENT} -nm --query " SELECT * FROM ${dicttablename}; " -$CLICKHOUSE_CLIENT -nm --user="${username}" --query " +$CLICKHOUSE_CLIENT -m --user="${username}" --query " SELECT * FROM ${dictname}; " 2>&1 | grep -o ACCESS_DENIED | uniq -$CLICKHOUSE_CLIENT -nm --user="${username}" --query " +$CLICKHOUSE_CLIENT -m --user="${username}" --query " SELECT * FROM ${dicttablename}; " 2>&1 | grep -o ACCESS_DENIED | uniq -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS ${dicttablename} SYNC; DROP DICTIONARY IF EXISTS ${dictname}; DROP USER IF EXISTS ${username}; diff --git a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh index 8a77538f592..adbb0cb6de0 100755 --- a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh +++ b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh echo '{"x" : 42}' > $CLICKHOUSE_TEST_UNIQUE_NAME.json -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS input_format_max_bytes_to_read_for_schema_inference=1000; SELECT additional_format_info from system.schema_inference_cache" From 5c54c7025bd87f1e4239354b5f3c0adff188dd3a Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 12 Aug 2024 08:25:54 +0000 Subject: [PATCH 2067/2361] Followup for #56996 --- src/Interpreters/HashJoin/HashJoinMethodsImpl.h | 4 ++-- src/Interpreters/HashJoin/JoinFeatures.h | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index aedd24630d1..39ba9fc6e93 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -354,8 +354,8 @@ size_t HashJoinMethods::joinRightColumns( { if (unlikely(current_offset >= max_joined_block_rows)) { - added_columns.offsets_to_replicate->resize_assume_reserved(i); - added_columns.filter.resize_assume_reserved(i); + added_columns.offsets_to_replicate->resize(i); + added_columns.filter.resize(i); break; } } diff --git a/src/Interpreters/HashJoin/JoinFeatures.h b/src/Interpreters/HashJoin/JoinFeatures.h index b8de606c51e..b39593e7cac 100644 --- a/src/Interpreters/HashJoin/JoinFeatures.h +++ b/src/Interpreters/HashJoin/JoinFeatures.h @@ -18,11 +18,25 @@ struct JoinFeatures static constexpr bool inner = KIND == JoinKind::Inner; static constexpr bool full = KIND == JoinKind::Full; + /** Whether we may need duplicate rows from the left table. + * For example, when we have row (key1, attr1) in left table + * and rows (key1, attr2), (key1, attr3) in right table, + * then we need to duplicate row (key1, attr1) for each of joined rows from right table, so result will be + * (key1, attr1, key1, attr2) + * (key1, attr1, key1, attr3) + */ static constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right); + + /// Whether we need to filter rows from the left table that do not have matches in the right table. static constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left)); + + /// Whether we need to add default values for columns from the left table. static constexpr bool add_missing = (left || full) && !is_semi_join; + /// Whether we need to store flags for rows from the right table table + /// that indicates if they have matches in the left table. static constexpr bool need_flags = MapGetter, HashJoin::MapsAll>>::flagged; + static constexpr bool is_maps_all = std::is_same_v, HashJoin::MapsAll>; }; From 8f124710ef97801f020fb88e31bcd94529a112fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 12 Aug 2024 10:27:21 +0000 Subject: [PATCH 2068/2361] Remove Log engine from Kafka integration tests It doesn't work well when `thread_per_consumer` is used as writer can make readers starve when `shared_time_mutex` prefers writes over reads. --- tests/integration/test_storage_kafka/test.py | 26 ++++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 4b6c9922d74..52d6054c12a 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -1019,7 +1019,7 @@ def test_kafka_formats(kafka_cluster, create_query_generator): DROP TABLE IF EXISTS test.kafka_{format_name}_mv; - CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS + CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv ENGINE=MergeTree ORDER BY tuple() AS SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}; """.format( topic_name=topic_name, @@ -2460,7 +2460,7 @@ def test_kafka_commit_on_block_write(kafka_cluster, create_query_generator): (generate_old_create_table_query, "kafka.*Committed offset 2.*virt2_[01]"), ( generate_new_create_table_query, - r"kafka.*Saved offset 2[0-9]* for topic-partition \[virt2_[01]:[0-9]+", + r"kafka.*Saved offset 2 for topic-partition \[virt2_[01]:[0-9]+", ), ], ) @@ -2494,7 +2494,7 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line) f""" {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY tuple() AS SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka; """ ) @@ -2729,7 +2729,7 @@ def test_kafka_produce_key_timestamp(kafka_cluster, create_query_generator, log_ DROP TABLE IF EXISTS test.consumer; {writer_create_query}; {reader_create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY tuple() AS SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka; """ ) @@ -2865,7 +2865,7 @@ def test_kafka_produce_consume_avro(kafka_cluster, create_query_generator): {writer_create_query}; {reader_create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY tuple() AS SELECT key, value FROM test.kafka; """ ) @@ -3537,7 +3537,7 @@ def test_bad_reschedule(kafka_cluster, create_query_generator): f""" {create_query}; - CREATE MATERIALIZED VIEW test.destination Engine=Log AS + CREATE MATERIALIZED VIEW test.destination ENGINE=MergeTree ORDER BY tuple() AS SELECT key, now() as consume_ts, @@ -3745,7 +3745,7 @@ def test_kafka_unavailable(kafka_cluster, create_query_generator, do_direct_read f""" {create_query}; - CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS + CREATE MATERIALIZED VIEW test.destination_unavailable ENGINE=MergeTree ORDER BY tuple() AS SELECT key, now() as consume_ts, @@ -4267,12 +4267,12 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator {create_query}; DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv; - CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS + CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv ENGINE=MergeTree ORDER BY tuple() AS SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name} WHERE length(_error) = 0; DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv; - CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS + CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv ENGINE=MergeTree ORDER BY tuple() AS SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name} WHERE length(_error) > 0; """ @@ -4796,7 +4796,7 @@ def test_max_rows_per_message(kafka_cluster, create_query_generator): DROP TABLE IF EXISTS test.kafka; {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY key, value AS SELECT key, value FROM test.kafka; """ ) @@ -4875,7 +4875,7 @@ def test_row_based_formats(kafka_cluster, create_query_generator): {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY key, value AS SELECT key, value FROM test.{table_name}; INSERT INTO test.{table_name} SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}); @@ -4982,7 +4982,7 @@ def test_block_based_formats_2(kafka_cluster, create_query_generator): {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY key, value AS SELECT key, value FROM test.{table_name}; INSERT INTO test.{table_name} SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}) settings max_block_size=12, optimize_trivial_insert_select=0; @@ -5362,7 +5362,7 @@ def test_formats_errors(kafka_cluster): input_format_with_names_use_header=0, format_schema='key_value_message:Message'; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY key, value AS SELECT key, value FROM test.{table_name}; """ ) From 06ceaee50218507f49bfc714903240ad4b5d81a0 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Mon, 12 Aug 2024 11:09:45 +0000 Subject: [PATCH 2069/2361] Fix test 01903_correct_block_size_prediction_with_default - Don't allow random settings that affect the memory usage - Run two queries and compare the memory usage, rather than having an arbitrary hardcoded value --- ...ock_size_prediction_with_default.reference | 6 ++++ ...rect_block_size_prediction_with_default.sh | 36 +++++++++++++++++++ ...ect_block_size_prediction_with_default.sql | 13 ------- 3 files changed, 42 insertions(+), 13 deletions(-) create mode 100755 tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh delete mode 100644 tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.reference b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.reference index b70a1cb7c75..2c66db91737 100644 --- a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.reference +++ b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.reference @@ -1,3 +1,9 @@ 8 +8 +1 4 4 +1 +4 +4 +1 diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh new file mode 100755 index 00000000000..922dcb957e5 --- /dev/null +++ b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Tags: no-random-merge-tree-settings, no-random-settings + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +sql="toUInt16OrNull(arrayFirst((v, k) -> (k = '4Id'), arr[2], arr[1]))" + +# Create the table and fill it +$CLICKHOUSE_CLIENT -n --query=" + CREATE TABLE test_extract(str String, arr Array(Array(String)) ALIAS extractAllGroupsHorizontal(str, '\\W(\\w+)=(\"[^\"]*?\"|[^\",}]*)')) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY tuple(); + INSERT INTO test_extract (str) WITH range(8) as range_arr, arrayMap(x-> concat(toString(x),'Id'), range_arr) as key, arrayMap(x -> rand() % 8, range_arr) as val, arrayStringConcat(arrayMap((x,y) -> concat(x,'=',toString(y)), key, val),',') as str SELECT str FROM numbers(500000); + ALTER TABLE test_extract ADD COLUMN 15Id Nullable(UInt16) DEFAULT $sql;" + +function test() +{ + # Execute two queries and compare if they have similar memory usage: + # The first query uses the default column value, while the second explicitly uses the same SQL as the default value. + # Follow https://github.com/ClickHouse/ClickHouse/issues/17317 for more info about the issue + where=$1 + + uuid_1=$($CLICKHOUSE_CLIENT --query="SELECT generateUUIDv4()") + $CLICKHOUSE_CLIENT --query="SELECT uniq(15Id) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_1 + uuid_2=$($CLICKHOUSE_CLIENT --query="SELECT generateUUIDv4()") + $CLICKHOUSE_CLIENT --query="SELECT uniq($sql) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_2 + $CLICKHOUSE_CLIENT -n --query=" + SYSTEM FLUSH LOGS; + WITH memory_1 AS (SELECT memory_usage FROM system.query_log WHERE event_time > now() - INTERVAL 5 MINUTE AND query_id='$uuid_1' AND type = 'QueryFinish' as memory_1), + memory_2 AS (SELECT memory_usage FROM system.query_log WHERE event_time > now() - INTERVAL 5 MINUTE AND query_id='$uuid_2' AND type = 'QueryFinish' as memory_2) + SELECT memory_1.memory_usage <= 1.2 * memory_2.memory_usage FROM memory_1, memory_2;" +} + +test "" +test "PREWHERE 15Id < 4" +test "WHERE 15Id < 4" diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql deleted file mode 100644 index 2eec08635eb..00000000000 --- a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql +++ /dev/null @@ -1,13 +0,0 @@ --- Tags: no-random-merge-tree-settings - -CREATE TABLE test_extract(str String, arr Array(Array(String)) ALIAS extractAllGroupsHorizontal(str, '\\W(\\w+)=("[^"]*?"|[^",}]*)')) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY tuple(); - -INSERT INTO test_extract (str) WITH range(8) as range_arr, arrayMap(x-> concat(toString(x),'Id'), range_arr) as key, arrayMap(x -> rand() % 8, range_arr) as val, arrayStringConcat(arrayMap((x,y) -> concat(x,'=',toString(y)), key, val),',') as str SELECT str FROM numbers(500000); - -ALTER TABLE test_extract ADD COLUMN `15Id` Nullable(UInt16) DEFAULT toUInt16OrNull(arrayFirst((v, k) -> (k = '4Id'), arr[2], arr[1])); - -SELECT uniq(15Id) FROM test_extract SETTINGS max_threads=1, max_memory_usage=100000000; - -SELECT uniq(15Id) FROM test_extract PREWHERE 15Id < 4 SETTINGS max_threads=1, max_memory_usage=100000000; - -SELECT uniq(15Id) FROM test_extract WHERE 15Id < 4 SETTINGS max_threads=1, max_memory_usage=100000000; From 38f3131e11d1d777101a975361eb585fd6263300 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 11:39:36 +0000 Subject: [PATCH 2070/2361] Fix review comments --- src/Columns/ColumnDynamic.cpp | 43 +++++++++++-------- src/Columns/ColumnDynamic.h | 12 ++++++ src/DataTypes/DataTypeDynamic.cpp | 4 +- .../Serializations/SerializationDynamic.cpp | 2 +- src/Functions/FunctionsConversion.cpp | 2 +- ..._read_shared_subcolumns_small.reference.j2 | 3 ++ 6 files changed, 43 insertions(+), 23 deletions(-) diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index 7246be29592..b1d28342a28 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -115,8 +115,8 @@ bool ColumnDynamic::addNewVariant(const DataTypePtr & new_variant, const String if (variant_info.variant_name_to_discriminator.contains(new_variant_name)) return true; - /// Check if we reached maximum number of variants (don't count shared variant). - if (variant_info.variant_names.size() - 1 == max_dynamic_types) + /// Check if we reached maximum number of variants. + if (!canAddNewVariant()) { /// Dynamic column should always have shared variant. if (!variant_info.variant_name_to_discriminator.contains(getSharedVariantTypeName())) @@ -194,8 +194,8 @@ std::vector * ColumnDynamic::combineVariants(const { const DataTypes & current_variants = assert_cast(*variant_info.variant_type).getVariants(); - /// We cannot combine Variants if total number of variants exceeds max_dynamic_types (don't count shared variant). - if (current_variants.size() + num_new_variants - 1 > max_dynamic_types) + /// We cannot combine Variants if total number of variants exceeds max_dynamic_types. + if (!canAddNewVariants(num_new_variants)) { /// Remember that we cannot combine our variant with this one, so we will not try to do it again. variants_with_failed_combination.insert(other_variant_info.variant_name); @@ -403,11 +403,11 @@ void ColumnDynamic::doInsertRangeFrom(const IColumn & src_, size_t start, size_t auto shared_variant_discr = getSharedVariantDiscriminator(); variant_col.insertRangeFrom(*dynamic_src.variant_column, start, length, *global_discriminators_mapping, shared_variant_discr); - /// We should process insertion from srs shared variant separately, because it can contain + /// We should process insertion from src shared variant separately, because it can contain /// values that should be extracted into our variants. insertRangeFrom above didn't insert /// values into our shared variant (we specified shared_variant_discr as special skip discriminator). - /// Check if srs shared variant is empty, nothing to do in this case. + /// Check if src shared variant is empty, nothing to do in this case. if (dynamic_src.getSharedVariant().empty()) return; @@ -466,7 +466,7 @@ void ColumnDynamic::doInsertRangeFrom(const IColumn & src_, size_t start, size_t other_to_new_discriminators.reserve(dynamic_src.variant_info.variant_names.size()); /// Check if we cannot add any more new variants. In this case we will insert all new variants into shared variant. - if (variant_info.variant_names.size() - 1 == max_dynamic_types) + if (!canAddNewVariant()) { auto shared_variant_discr = getSharedVariantDiscriminator(); for (const auto & variant_name : dynamic_src.variant_info.variant_names) @@ -496,7 +496,7 @@ void ColumnDynamic::doInsertRangeFrom(const IColumn & src_, size_t start, size_t /// Add new variants from sorted list until we reach max_dynamic_types. for (const auto & [_, discr] : new_variants_with_sizes) { - if (new_variants.size() - 1 == max_dynamic_types) + if (!canAddNewVariant(new_variants.size())) break; new_variants.push_back(src_variants[discr]); } @@ -846,13 +846,17 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_ /// Check if both values are in shared variant. if (left_discr == left_shared_variant_discr && right_discr == right_shared_variant_discr) { - /// Extract type names from both values. + /// First check if both type and value are equal. auto left_value = getSharedVariant().getDataAt(left_variant.offsetAt(n)); + auto right_value = right_dynamic.getSharedVariant().getDataAt(right_variant.offsetAt(m)); + if (left_value == right_value) + return 0; + + /// Extract type names from both values. ReadBufferFromMemory buf_left(left_value.data, left_value.size); auto left_data_type = decodeDataType(buf_left); auto left_data_type_name = left_data_type->getName(); - auto right_value = right_dynamic.getSharedVariant().getDataAt(right_variant.offsetAt(m)); ReadBufferFromMemory buf_right(right_value.data, right_value.size); auto right_data_type = decodeDataType(buf_right); auto right_data_type_name = right_data_type->getName(); @@ -977,8 +981,6 @@ ColumnPtr ColumnDynamic::compress() const void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { - LOG_DEBUG(getLogger("ColumnDynamic"), "takeDynamicStructureFromSourceColumns"); - if (!empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "takeDynamicStructureFromSourceColumns should be called only on empty Dynamic column"); @@ -1050,8 +1052,10 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source DataTypePtr result_variant_type; Statistics new_statistics(Statistics::Source::MERGE); + /// Reset max_dynamic_types to global_max_dynamic_types. + max_dynamic_types = global_max_dynamic_types; /// Check if the number of all dynamic types exceeds the limit. - if (all_variants.size() - 1 > global_max_dynamic_types) + if (!canAddNewVariants(0, all_variants.size())) { /// Create list of variants with their sizes and sort it. std::vector> variants_with_sizes; @@ -1065,11 +1069,13 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source /// Take first max_dynamic_types variants from sorted list and fill shared_variants_statistics with the rest. DataTypes result_variants; - result_variants.reserve(global_max_dynamic_types + 1); + result_variants.reserve(max_dynamic_types + 1); /// +1 for shared variant. + /// Add shared variant. + result_variants.push_back(getSharedVariantDataType()); for (const auto & [size, variant] : variants_with_sizes) { /// Add variant to the resulting variants list until we reach max_dynamic_types. - if (result_variants.size() < global_max_dynamic_types) + if (canAddNewVariant(result_variants.size())) result_variants.push_back(variant); /// Add all remaining variants into shared_variants_statistics until we reach its max size. else if (new_statistics.shared_variants_statistics.size() < Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE) @@ -1078,8 +1084,6 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source break; } - /// Add shared variant. - result_variants.push_back(getSharedVariantDataType()); result_variant_type = std::make_shared(result_variants); } else @@ -1094,8 +1098,9 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source new_statistics.variants_statistics[variant_name] = total_sizes[variant_name]; statistics = std::make_shared(std::move(new_statistics)); - /// Reduce max_dynamic_types to the number of selected variants (without shared variant), so there will be no possibility + /// Reduce max_dynamic_types to the number of selected variants, so there will be no possibility /// to extend selected variants on inerts into this column during merges. + /// -1 because we don't count shared variant in the limit. max_dynamic_types = variant_info.variant_names.size() - 1; /// Now we have the resulting Variant that will be used in all merged columns. @@ -1112,7 +1117,7 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source { /// Try to find this variant in current source column. auto it = source_variant_info.variant_name_to_discriminator.find(variant_info.variant_names[i]); - if (it != source_variant_info.variant_name_to_discriminator.end()) + if (it != source_variant_info.variant_name_to_discriminator.end()) /// Add shared variant. variants_source_columns[i].push_back(source_dynamic_column.getVariantColumn().getVariantPtrByGlobalDiscriminator(it->second)); } } diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index a595a990964..1f050c9079e 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -27,6 +27,10 @@ namespace DB class ColumnDynamic final : public COWHelper, ColumnDynamic> { public: + /// Maximum limit on dynamic types. We use ColumnVariant to store all the types, + /// so the limit cannot be greater then ColumnVariant::MAX_NESTED_COLUMNS. + /// We also always have reserved variant for shared variant. + static constexpr size_t MAX_DYNAMIC_TYPES_LIMIT = ColumnVariant::MAX_NESTED_COLUMNS - 1; static constexpr const char * SHARED_VARIANT_TYPE_NAME = "SharedVariant"; struct Statistics @@ -359,6 +363,14 @@ public: size_t getMaxDynamicTypes() const { return max_dynamic_types; } + /// Check if we can add new variant types. + /// Shared variant doesn't count in the limit but always presents, + /// so we should subtract 1 from the total types count. + bool canAddNewVariants(size_t current_variants_count, size_t new_variants_count) { return current_variants_count + new_variants_count - 1 <= max_dynamic_types; } + bool canAddNewVariant(size_t current_variants_count) { return canAddNewVariants(current_variants_count, 1); } + bool canAddNewVariants(size_t new_variants_count) { return canAddNewVariants(variant_info.variant_names.size(), new_variants_count); } + bool canAddNewVariant() { return canAddNewVariants(variant_info.variant_names.size(), 1); } + void setVariantType(const DataTypePtr & variant_type); void setMaxDynamicPaths(size_t max_dynamic_type_); diff --git a/src/DataTypes/DataTypeDynamic.cpp b/src/DataTypes/DataTypeDynamic.cpp index e00638a50ab..04e76df57fe 100644 --- a/src/DataTypes/DataTypeDynamic.cpp +++ b/src/DataTypes/DataTypeDynamic.cpp @@ -73,8 +73,8 @@ static DataTypePtr create(const ASTPtr & arguments) auto * literal = argument->arguments->children[1]->as(); - if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get() > ColumnVariant::MAX_NESTED_COLUMNS - 1) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 0 and 254"); + if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get() > ColumnDynamic::MAX_DYNAMIC_TYPES_LIMIT) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 0 and {}", ColumnDynamic::MAX_DYNAMIC_TYPES_LIMIT); return std::make_shared(literal->value.get()); } diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 5fadb6e4de4..9cd0adcc2ed 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -288,7 +288,7 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD statistics.variants_statistics[variant->getName()] = variant_size; } - /// Second, rend statistics for shared variants. + /// Second, read statistics for shared variants. size_t statistics_size; readVarUInt(statistics_size, *structure_stream); String variant_name; diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 21b98cf505c..660efb46b37 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -4335,7 +4335,7 @@ private: auto type = decodeDataType(buf); auto type_name = type->getName(); auto it = shared_variant_to_index.find(type_name); - /// Check if didn't created column for this variant yet. + /// Check if we didn't create column for this variant yet. if (it == shared_variant_to_index.end()) { it = shared_variant_to_index.emplace(type_name, variant_columns_from_shared_variant.size()).first; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 index 9c1f8fa45e8..de12c6b8737 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 @@ -1,3 +1,4 @@ +Memory Array(Array(Dynamic)) Array(Variant(String, UInt64)) LowCardinality(String) @@ -818,6 +819,7 @@ str_79 0 [] [] [] [] [] [] [] [] [] [] [] +MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000 Array(Array(Dynamic)) Array(Variant(String, UInt64)) LowCardinality(String) @@ -1638,6 +1640,7 @@ str_79 0 [] [] [] [] [] [] [] [] [] [] [] +MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1 Array(Array(Dynamic)) Array(Variant(String, UInt64)) LowCardinality(String) From e9d16bc0549fe2a1fc44e50bd173ebc9358ed838 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 12 Aug 2024 08:32:14 +0200 Subject: [PATCH 2071/2361] Use new mc restart --- docker/test/stateless/run.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 69052cf4771..c70cbe1fe45 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -195,16 +195,18 @@ ORDER BY tuple()" # use async inserts to avoid creating too many parts ./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" queue_size=1000000 batch_size=500 ./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" queue_size=1000000 batch_size=500 + max_retries=100 retry=1 - while [ $retry -le $max_retries ]; do echo "clickminio restart attempt $retry:" - output=$(./mc admin service restart clickminio 2>&1) - echo "$output" + output=$(./mc admin service restart clickminio --wait --json 2>&1 | jq -r .status) + echo "Output of restart status: $output" - if echo "$output" | grep -q "Restarted \`clickminio\` successfully"; then + expected_output="success +success" + if [ "$output" = "$expected_output" ]; then echo "Restarted clickminio successfully." break fi @@ -218,7 +220,6 @@ if [ $retry -gt $max_retries ]; then echo "Failed to restart clickminio after $max_retries attempts." fi -./mc admin service restart clickminio ./mc admin trace clickminio > /test_output/minio.log & MC_ADMIN_PID=$! From b58a22aba7f590ada33bfce95dd525c4c8a414ae Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 11:41:42 +0000 Subject: [PATCH 2072/2361] Update test --- .../03036_dynamic_read_shared_subcolumns_small.sql.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 index 0c123d5f6fe..dde4f3f53c3 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 @@ -6,6 +6,7 @@ drop table if exists test; {% for engine in ['Memory', 'MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000', 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1'] -%} +select '{{ engine }}'; create table test (id UInt64, d Dynamic(max_types=2)) engine={{ engine }}; insert into test select number, number from numbers(10); From 737948470d6f2cd69f1842396984ec17aea49b65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 12 Aug 2024 11:54:34 +0000 Subject: [PATCH 2073/2361] Fix syntax --- tests/integration/test_storage_kafka/test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 52d6054c12a..bef90e1b9d3 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -4796,7 +4796,7 @@ def test_max_rows_per_message(kafka_cluster, create_query_generator): DROP TABLE IF EXISTS test.kafka; {create_query}; - CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY key, value AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY (key, value) AS SELECT key, value FROM test.kafka; """ ) @@ -4875,7 +4875,7 @@ def test_row_based_formats(kafka_cluster, create_query_generator): {create_query}; - CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY key, value AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY (key, value) AS SELECT key, value FROM test.{table_name}; INSERT INTO test.{table_name} SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}); @@ -4982,7 +4982,7 @@ def test_block_based_formats_2(kafka_cluster, create_query_generator): {create_query}; - CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY key, value AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY (key, value) AS SELECT key, value FROM test.{table_name}; INSERT INTO test.{table_name} SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}) settings max_block_size=12, optimize_trivial_insert_select=0; @@ -5362,7 +5362,7 @@ def test_formats_errors(kafka_cluster): input_format_with_names_use_header=0, format_schema='key_value_message:Message'; - CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY key, value AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY (key, value) AS SELECT key, value FROM test.{table_name}; """ ) From 3172bf8d76534bb46ce54ae6af96e14443d2b59b Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 12 Aug 2024 12:23:32 +0000 Subject: [PATCH 2074/2361] better accounting of time for merge of projections --- .../Transforms/MergeJoinTransform.cpp | 2 +- src/Storages/MergeTree/MergeTask.cpp | 24 ++++++++++++++----- .../03221_merge_profile_events.reference | 2 +- .../03221_merge_profile_events.sql | 8 ++++--- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index ec7f567ea57..6abfa0fccd0 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -1282,7 +1282,7 @@ MergeJoinTransform::MergeJoinTransform( void MergeJoinTransform::onFinish() { - algorithm.logElapsed(merging_elapsed_ns / 1000000000ULL); + algorithm.logElapsed(static_cast(merging_elapsed_ns) / 1000000000ULL); } } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 39bac8f7c24..cb1921ede2b 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -461,8 +461,12 @@ void MergeTask::addGatheringColumn(GlobalRuntimeContextPtr global_ctx, const Str MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::getContextForNextStage() { - ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); - ProfileEvents::increment(ProfileEvents::MergeHorizontalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeHorizontalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } auto new_ctx = std::make_shared(); @@ -481,8 +485,12 @@ MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::g MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNextStage() { - ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); - ProfileEvents::increment(ProfileEvents::MergeVerticalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeVerticalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } auto new_ctx = std::make_shared(); new_ctx->need_sync = std::move(ctx->need_sync); @@ -1026,8 +1034,12 @@ bool MergeTask::execute() UInt64 stage_elapsed_ms = current_elapsed_ms - global_ctx->prev_elapsed_ms; global_ctx->prev_elapsed_ms = current_elapsed_ms; - ProfileEvents::increment(current_stage->getTotalTimeProfileEvent(), stage_elapsed_ms); - ProfileEvents::increment(ProfileEvents::MergeTotalMilliseconds, stage_elapsed_ms); + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(current_stage->getTotalTimeProfileEvent(), stage_elapsed_ms); + ProfileEvents::increment(ProfileEvents::MergeTotalMilliseconds, stage_elapsed_ms); + } auto next_stage_context = current_stage->getContextForNextStage(); diff --git a/tests/queries/0_stateless/03221_merge_profile_events.reference b/tests/queries/0_stateless/03221_merge_profile_events.reference index 729e53eae79..d969717336b 100644 --- a/tests/queries/0_stateless/03221_merge_profile_events.reference +++ b/tests/queries/0_stateless/03221_merge_profile_events.reference @@ -1,3 +1,3 @@ Horizontal 1 20000 3 0 480000 1 1 1 1 Vertical 1 20000 1 2 480000 1 1 1 1 1 1 -Vertical 2 20020 4 2 480660 1 1 1 1 1 1 1 1 +Vertical 2 400000 2 6 12800000 1 1 1 1 1 1 1 1 1 1 diff --git a/tests/queries/0_stateless/03221_merge_profile_events.sql b/tests/queries/0_stateless/03221_merge_profile_events.sql index 787aff93ffc..1aa3dd266f8 100644 --- a/tests/queries/0_stateless/03221_merge_profile_events.sql +++ b/tests/queries/0_stateless/03221_merge_profile_events.sql @@ -58,12 +58,12 @@ DROP TABLE IF EXISTS t_merge_profile_events_2; DROP TABLE IF EXISTS t_merge_profile_events_3; -CREATE TABLE t_merge_profile_events_3 (id UInt64, v1 UInt64, v2 UInt64, PROJECTION p (SELECT sum(v1), sum(v2) GROUP BY id % 10)) +CREATE TABLE t_merge_profile_events_3 (id UInt64, v1 UInt64, v2 UInt64, PROJECTION p (SELECT v2, v2 * v2, v2 * 2, v2 * 10, v1 ORDER BY v1)) ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; -INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(10000); -INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(100000); +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(100000); OPTIMIZE TABLE t_merge_profile_events_3 FINAL; SYSTEM FLUSH LOGS; @@ -83,6 +83,8 @@ SELECT ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, ProfileEvents['MergeProjectionStageTotalMilliseconds'] > 0, ProfileEvents['MergeProjectionStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] <= duration_ms, + ProfileEvents['MergeTotalMilliseconds'] <= duration_ms FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_3' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; DROP TABLE IF EXISTS t_merge_profile_events_3; From 34643ee16c5452a08feac63aaaea605a8c912b37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 12 Aug 2024 13:30:25 +0000 Subject: [PATCH 2075/2361] Run test only from modified files --- tests/ci/integration_tests_runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index 84718462ab5..7b9e7d1f63e 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -69,9 +69,9 @@ def get_changed_tests_to_run(pr_info, repo_path): return [] for fpath in changed_files: - if "tests/integration/test_" in fpath: + if re.search("tests/integration/test_.*/test.*\.py", fpath) is not None: logging.info("File %s changed and seems like integration test", fpath) - result.add(fpath.split("/")[2]) + result.add("/".join(fpath.split("/")[2:])) return filter_existing_tests(result, repo_path) From 83d20bee00a4973cbffc3bcd9ba4073c79efb073 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Mon, 12 Aug 2024 07:42:55 -0600 Subject: [PATCH 2076/2361] Update 03221_create_if_not_exists_setting test to a .sql test --- ...221_create_if_not_exists_setting.reference | 4 --- ...=> 03221_create_if_not_exists_setting.sql} | 32 ++++--------------- 2 files changed, 6 insertions(+), 30 deletions(-) rename tests/queries/0_stateless/{03221_create_if_not_exists_setting.sh => 03221_create_if_not_exists_setting.sql} (51%) mode change 100755 => 100644 diff --git a/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference b/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference index 8740b05c9ca..e69de29bb2d 100644 --- a/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference +++ b/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference @@ -1,4 +0,0 @@ -57 -82 -0 -0 diff --git a/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql old mode 100755 new mode 100644 similarity index 51% rename from tests/queries/0_stateless/03221_create_if_not_exists_setting.sh rename to tests/queries/0_stateless/03221_create_if_not_exists_setting.sql index 8dcde8977bc..59535981e7a --- a/tests/queries/0_stateless/03221_create_if_not_exists_setting.sh +++ b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql @@ -1,43 +1,23 @@ -#!/usr/bin/env bash -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh +SET create_if_not_exists=0; -- Default -# $CLICKHOUSE_CLIENT -mn -q "SET create_if_not_exists=0;" # Default -$CLICKHOUSE_CLIENT -mn -q " DROP TABLE IF EXISTS example_table; CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; -CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; -" 2> /dev/null -# ensure failed error code -echo $? -$CLICKHOUSE_CLIENT -mn -q " +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; -- { serverError TABLE_ALREADY_EXISTS } + DROP DATABASE IF EXISTS example_database; CREATE DATABASE example_database; -CREATE DATABASE example_database; -" 2> /dev/null -echo $? +CREATE DATABASE example_database; -- { serverError DATABASE_ALREADY_EXISTS } -$CLICKHOUSE_CLIENT -mn -q " SET create_if_not_exists=1; + DROP TABLE IF EXISTS example_table; CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; -" -# ensure successful error code -echo $? - -$CLICKHOUSE_CLIENT -mn -q " -SET create_if_not_exists=1; DROP DATABASE IF EXISTS example_database; CREATE DATABASE example_database; CREATE DATABASE example_database; -" -echo $? -$CLICKHOUSE_CLIENT -mn -q " DROP DATABASE IF EXISTS example_database; -DROP TABLE IF EXISTS example_table; -" \ No newline at end of file +DROP TABLE IF EXISTS example_table; \ No newline at end of file From f7c6eabb498b47b21c13dbf55efbda551902d09c Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Mon, 12 Aug 2024 13:44:05 +0000 Subject: [PATCH 2077/2361] Small fix to filter by current_database in system.query_log --- ...01903_correct_block_size_prediction_with_default.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh index 922dcb957e5..e898a9d5ee8 100755 --- a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh +++ b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh @@ -20,15 +20,15 @@ function test() # Follow https://github.com/ClickHouse/ClickHouse/issues/17317 for more info about the issue where=$1 - uuid_1=$($CLICKHOUSE_CLIENT --query="SELECT generateUUIDv4()") + uuid_1=$(cat /proc/sys/kernel/random/uuid) $CLICKHOUSE_CLIENT --query="SELECT uniq(15Id) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_1 - uuid_2=$($CLICKHOUSE_CLIENT --query="SELECT generateUUIDv4()") + uuid_2=$(cat /proc/sys/kernel/random/uuid) $CLICKHOUSE_CLIENT --query="SELECT uniq($sql) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_2 $CLICKHOUSE_CLIENT -n --query=" SYSTEM FLUSH LOGS; - WITH memory_1 AS (SELECT memory_usage FROM system.query_log WHERE event_time > now() - INTERVAL 5 MINUTE AND query_id='$uuid_1' AND type = 'QueryFinish' as memory_1), - memory_2 AS (SELECT memory_usage FROM system.query_log WHERE event_time > now() - INTERVAL 5 MINUTE AND query_id='$uuid_2' AND type = 'QueryFinish' as memory_2) - SELECT memory_1.memory_usage <= 1.2 * memory_2.memory_usage FROM memory_1, memory_2;" + WITH memory_1 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_1' AND type = 'QueryFinish' as memory_1), + memory_2 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_2' AND type = 'QueryFinish' as memory_2) + SELECT memory_1.memory_usage <= 1.2 * memory_2.memory_usage FROM memory_1, memory_2;" } test "" From c817a4e8adfba37b23156ed75e1a501068e10cc1 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Mon, 12 Aug 2024 07:45:51 -0600 Subject: [PATCH 2078/2361] Update settings.md to clarify create_if_not_exists behavior Co-authored-by: Nikita Taranov --- docs/en/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 22f73a03729..b9d5dde8522 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5640,6 +5640,6 @@ Default value: `0`. ## create_if_not_exists -Enable IF NOT EXISTS for CREATE statements by default. If either this setting or IF NOT EXISTS is specified, then no Exception will be thrown when trying to create a new table. +Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown. Default value: `false`. From a32945614607c7043d2c04b0d431b781380ee946 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 13:47:10 +0000 Subject: [PATCH 2079/2361] Fix review comments --- src/Columns/ColumnObject.cpp | 18 ++++++------- src/DataTypes/DataTypeObject.cpp | 26 ++++++++++++------- src/DataTypes/DataTypeObject.h | 6 ++--- .../Serializations/ISerialization.cpp | 2 +- src/DataTypes/Serializations/ISerialization.h | 2 +- .../Serializations/SerializationJSON.cpp | 9 ++++--- src/Formats/JSONExtractTree.cpp | 9 ++++--- src/Functions/FunctionsJSON.cpp | 2 +- src/Functions/JSONPaths.cpp | 16 ++++++------ .../MergeTree/MergeTreeDataPartWriterWide.cpp | 16 ++++++------ .../MergeTree/MergeTreeReaderWide.cpp | 8 +++--- src/Storages/MergeTree/checkDataPart.cpp | 4 +-- 12 files changed, 65 insertions(+), 53 deletions(-) diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index e7bb7639dd2..1f16c12f6ba 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -193,7 +193,7 @@ Field ColumnObject::operator[](size_t n) const const auto & shared_data_offsets = getSharedDataOffsets(); const auto [shared_paths, shared_values] = getSharedDataPathsAndValues(); - size_t start = shared_data_offsets[ssize_t(n) - 1]; + size_t start = shared_data_offsets[static_cast(n) - 1]; size_t end = shared_data_offsets[n]; for (size_t i = start; i != end; ++i) { @@ -495,7 +495,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co /// Check if src object doesn't have any paths in shared data in specified range. const auto & src_shared_data_offsets = src_object_column.getSharedDataOffsets(); - if (src_shared_data_offsets[ssize_t(start) - 1] == src_shared_data_offsets[ssize_t(start) + length - 1]) + if (src_shared_data_offsets[static_cast(start) - 1] == src_shared_data_offsets[static_cast(start) + length - 1]) { size_t current_size = size(); @@ -537,7 +537,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co size_t current_size = shared_data_offsets.size(); /// Use separate index to iterate over sorted src_dynamic_paths_for_shared_data. size_t src_dynamic_paths_for_shared_data_index = 0; - size_t offset = src_shared_data_offsets[ssize_t(row) - 1]; + size_t offset = src_shared_data_offsets[static_cast(row) - 1]; size_t end = src_shared_data_offsets[row]; for (size_t i = offset; i != end; ++i) { @@ -639,8 +639,8 @@ StringRef ColumnObject::serializeValueIntoArena(size_t n, Arena & arena, const c StringRef res(begin, 0); // Serialize all paths and values in binary format. const auto & shared_data_offsets = getSharedDataOffsets(); - size_t offset = shared_data_offsets[ssize_t(n) - 1]; - size_t end = shared_data_offsets[ssize_t(n)]; + size_t offset = shared_data_offsets[static_cast(n) - 1]; + size_t end = shared_data_offsets[static_cast(n)]; size_t num_paths = typed_paths.size() + dynamic_paths.size() + (end - offset); char * pos = arena.allocContinue(sizeof(size_t), begin); memcpy(pos, &num_paths, sizeof(size_t)); @@ -1259,8 +1259,8 @@ void ColumnObject::fillPathColumnFromSharedData(IColumn & path_column, StringRef { const auto & shared_data_array = assert_cast(*shared_data_column); const auto & shared_data_offsets = shared_data_array.getOffsets(); - size_t first_offset = shared_data_offsets[ssize_t(start) - 1]; - size_t last_offset = shared_data_offsets[ssize_t(end) - 1]; + size_t first_offset = shared_data_offsets[static_cast(start) - 1]; + size_t last_offset = shared_data_offsets[static_cast(end) - 1]; /// Check if we have at least one row with data. if (first_offset == last_offset) { @@ -1274,8 +1274,8 @@ void ColumnObject::fillPathColumnFromSharedData(IColumn & path_column, StringRef const auto & dynamic_serialization = getDynamicSerialization(); for (size_t i = start; i != end; ++i) { - size_t paths_start = shared_data_offsets[ssize_t(i) - 1]; - size_t paths_end = shared_data_offsets[ssize_t(i)]; + size_t paths_start = shared_data_offsets[static_cast(i) - 1]; + size_t paths_end = shared_data_offsets[static_cast(i)]; auto lower_bound_path_index = ColumnObject::findPathLowerBoundInSharedData(path, shared_data_paths, paths_start, paths_end); if (lower_bound_path_index != paths_end && shared_data_paths.getDataAt(lower_bound_path_index) == path) { diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 86041e580c4..7d05dded13d 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -39,9 +39,9 @@ namespace ErrorCodes DataTypeObject::DataTypeObject( const SchemaFormat & schema_format_, - const std::unordered_map & typed_paths_, - const std::unordered_set & paths_to_skip_, - const std::vector & path_regexps_to_skip_, + std::unordered_map typed_paths_, + std::unordered_set paths_to_skip_, + std::vector path_regexps_to_skip_, size_t max_dynamic_paths_, size_t max_dynamic_types_) : schema_format(schema_format_) @@ -59,10 +59,10 @@ DataTypeObject::DataTypeObject( throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified with the data type ('{}') and matches the SKIP path prefix '{}'", typed_path, type->getName(), path_to_skip); } - for (const auto & path_regext_to_skip : paths_to_skip) + for (const auto & path_regex_to_skip : path_regexps_to_skip) { - if (re2::RE2::FullMatch(typed_path, re2::RE2(path_regext_to_skip))) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified with the data type ('{}') and matches the SKIP REGEXP '{}'", typed_path, type->getName(), path_regext_to_skip); + if (re2::RE2::FullMatch(typed_path, re2::RE2(path_regex_to_skip))) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified with the data type ('{}') and matches the SKIP REGEXP '{}'", typed_path, type->getName(), path_regex_to_skip); } } } @@ -220,7 +220,13 @@ void replaceJSONTypeNameIfNeeded(String & type_name, size_t max_dynamic_paths, s { /// Replace only if we don't already have parameters in JSON type declaration. if (pos + 4 == type_name.size() || type_name[pos + 4] != '(') - type_name.replace(pos, 4, fmt::format("JSON(max_dynamic_paths={}, max_dynamic_types={})", max_dynamic_paths / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, std::max(max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))); + type_name.replace( + pos, + 4, + fmt::format( + "JSON(max_dynamic_paths={}, max_dynamic_types={})", + max_dynamic_paths / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, + std::max(max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))); pos = type_name.find("JSON", pos + 4); } } @@ -275,7 +281,7 @@ String getSubPath(const String & path, const String & prefix) return path.substr(prefix.size() + 1); } -std::string_view getSubPath(const std::string_view & path, const String & prefix) +std::string_view getSubPath(std::string_view path, const String & prefix) { return path.substr(prefix.size() + 1); } @@ -305,7 +311,7 @@ std::unique_ptr DataTypeObject::getDynamicSubcolu } std::unique_ptr res = std::make_unique(std::make_shared(prefix, typed_paths_serializations)); - /// Keep all current constrains like limits and skip paths/prefixes/regexps. + /// Keep all current constraints like limits and skip paths/prefixes/regexps. res->type = std::make_shared(schema_format, typed_sub_paths, paths_to_skip, path_regexps_to_skip, max_dynamic_paths, max_dynamic_types); /// If column was provided, we should create a column for the requested subcolumn. if (data.column) @@ -491,7 +497,7 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject:: } std::sort(path_regexps_to_skip.begin(), path_regexps_to_skip.end()); - return std::make_shared(schema_format, typed_paths, paths_to_skip, path_regexps_to_skip, max_dynamic_paths, max_dynamic_types); + return std::make_shared(schema_format, std::move(typed_paths), std::move(paths_to_skip), std::move(path_regexps_to_skip), max_dynamic_paths, max_dynamic_types); } static DataTypePtr createJSON(const ASTPtr & arguments) diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 6eea777ed26..7eb2e7729de 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -25,9 +25,9 @@ public: explicit DataTypeObject( const SchemaFormat & schema_format_, - const std::unordered_map & typed_paths_ = {}, - const std::unordered_set & paths_to_skip_ = {}, - const std::vector & path_regexps_to_skip_ = {}, + std::unordered_map typed_paths_ = {}, + std::unordered_set paths_to_skip_ = {}, + std::vector path_regexps_to_skip_ = {}, size_t max_dynamic_paths_ = DEFAULT_MAX_SEPARATELY_STORED_PATHS, size_t max_dynamic_types_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES); diff --git a/src/DataTypes/Serializations/ISerialization.cpp b/src/DataTypes/Serializations/ISerialization.cpp index 9d0d9f7c6eb..338edc3a144 100644 --- a/src/DataTypes/Serializations/ISerialization.cpp +++ b/src/DataTypes/Serializations/ISerialization.cpp @@ -411,7 +411,7 @@ bool ISerialization::hasSubcolumnForPath(const SubstreamPath & path, size_t pref || path[last_elem].type == Substream::ObjectTypedPath; } -bool ISerialization::isFictitiousSubcolumn(const DB::ISerialization::SubstreamPath & path, size_t prefix_len) +bool ISerialization::isEphemeralSubcolumn(const DB::ISerialization::SubstreamPath & path, size_t prefix_len) { if (prefix_len == 0 || prefix_len > path.size()) return false; diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 52f636bcc86..480d5a4f7c4 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -451,7 +451,7 @@ public: /// Returns true if subcolumn doesn't actually stores any data in column and doesn't require a separate stream /// for writing/reading data. For example, it's a null-map subcolumn of Variant type (it's always constructed from discriminators);. - static bool isFictitiousSubcolumn(const SubstreamPath & path, size_t prefix_len); + static bool isEphemeralSubcolumn(const SubstreamPath & path, size_t prefix_len); protected: template diff --git a/src/DataTypes/Serializations/SerializationJSON.cpp b/src/DataTypes/Serializations/SerializationJSON.cpp index 346e6785abc..6ad326c65da 100644 --- a/src/DataTypes/Serializations/SerializationJSON.cpp +++ b/src/DataTypes/Serializations/SerializationJSON.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #if USE_SIMDJSON #include @@ -51,6 +52,8 @@ struct PathElements } } + boost::split(elements, path, boost::is_any_of(".")); + elements.emplace_back(last_dot_pos + 1, size_t(pos - last_dot_pos - 1)); } @@ -108,12 +111,12 @@ void SerializationJSON::serializeTextImpl(const IColumn & column, size_t const auto & dynamic_paths = column_object.getDynamicPaths(); const auto & shared_data_offsets = column_object.getSharedDataOffsets(); const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); - size_t shared_data_offset = shared_data_offsets[ssize_t(row_num) - 1]; - size_t shared_data_end = shared_data_offsets[ssize_t(row_num)]; + size_t shared_data_offset = shared_data_offsets[static_cast(row_num) - 1]; + size_t shared_data_end = shared_data_offsets[static_cast(row_num)]; /// We need to convert the set of paths in this row to a JSON object. /// To do it, we first collect all the paths from current row, then we sort them - /// and construct the resulting JSON object by iterating over sorter list of paths. + /// and construct the resulting JSON object by iterating over sorted list of paths. /// For example: /// b.c, a.b, a.a, b.e, g, h.u.t -> a.a, a.b, b.c, b.e, g, h.u.t -> {"a" : {"a" : ..., "b" : ...}, "b" : {"c" : ..., "e" : ...}, "g" : ..., "h" : {"u" : {"t" : ...}}}. std::vector sorted_paths; diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 8203ccb5862..6340cd6f8e3 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1761,9 +1761,12 @@ private: if (paths_to_skip.contains(path)) return true; - auto it = std::lower_bound(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end(), path); - if (it != sorted_paths_to_skip.end() && it != sorted_paths_to_skip.begin() && path.starts_with(*std::prev(it))) - return true; + if (!sorted_paths_to_skip.empty()) + { + auto it = std::lower_bound(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end(), path); + if (it != sorted_paths_to_skip.begin() && path.starts_with(*std::prev(it))) + return true; + } for (const auto & regexp : path_regexps_to_skip) { diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index 9bf329f6dbb..ccdf4e6df81 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -739,7 +739,7 @@ public: { NumberType value; - if (!tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, true, error)) + if (!tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, /*allow_type_conversion=*/true, error)) return false; auto & col_vec = assert_cast &>(dest); col_vec.insertValue(value); diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp index 4f29846319d..4a84cec711b 100644 --- a/src/Functions/JSONPaths.cpp +++ b/src/Functions/JSONPaths.cpp @@ -178,8 +178,8 @@ private: const auto [shared_data_paths, _] = column_object.getSharedDataPathsAndValues(); for (size_t i = 0; i != shared_data_offsets.size(); ++i) { - size_t start = shared_data_offsets[ssize_t(i) - 1]; - size_t end = shared_data_offsets[ssize_t(i)]; + size_t start = shared_data_offsets[static_cast(i) - 1]; + size_t end = shared_data_offsets[static_cast(i)]; /// Merge sorted list of paths from shared data and sorted_dynamic_and_typed_paths size_t sorted_paths_index = 0; for (size_t j = start; j != end; ++j) @@ -187,7 +187,7 @@ private: auto shared_data_path = shared_data_paths->getDataAt(j); while (sorted_paths_index != sorted_dynamic_and_typed_paths.size() && sorted_dynamic_and_typed_paths[sorted_paths_index] < shared_data_path) { - auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; + const auto & path = sorted_dynamic_and_typed_paths[sorted_paths_index]; /// If it's dynamic path include it only if it's not NULL. if (auto it = dynamic_path_columns.find(path); it == dynamic_path_columns.end() || !it->second->isNullAt(i)) data.insertData(path.data(), path.size()); @@ -199,7 +199,7 @@ private: for (; sorted_paths_index != sorted_dynamic_and_typed_paths.size(); ++sorted_paths_index) { - auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; + const auto & path = sorted_dynamic_and_typed_paths[sorted_paths_index]; if (auto it = dynamic_path_columns.find(path); it == dynamic_path_columns.end() || !it->second->isNullAt(i)) data.insertData(path.data(), path.size()); } @@ -254,8 +254,8 @@ private: /// Iterate over all rows and extract types from dynamic values in shared data. for (size_t i = 0; i != shared_data_offsets.size(); ++i) { - size_t start = shared_data_offsets[ssize_t(i) - 1]; - size_t end = shared_data_offsets[ssize_t(i)]; + size_t start = shared_data_offsets[static_cast(i) - 1]; + size_t end = shared_data_offsets[static_cast(i)]; for (size_t j = start; j != end; ++j) { if (auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j))) @@ -288,8 +288,8 @@ private: const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); for (size_t i = 0; i != shared_data_offsets.size(); ++i) { - size_t start = shared_data_offsets[ssize_t(i) - 1]; - size_t end = shared_data_offsets[ssize_t(i)]; + size_t start = shared_data_offsets[static_cast(i) - 1]; + size_t end = shared_data_offsets[static_cast(i)]; /// Merge sorted list of paths and values from shared data and sorted_typed_and_dynamic_paths_with_types size_t sorted_paths_index = 0; for (size_t j = start; j != end; ++j) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index d507468f3ea..3edcce74b09 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -132,8 +132,8 @@ void MergeTreeDataPartWriterWide::addStreams( { assert(!substream_path.empty()); - /// Don't create streams for fictitious subcolumns that don't store any real data. - if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + /// Don't create streams for ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) return; auto full_stream_name = ISerialization::getFileNameForStream(name_and_type, substream_path); @@ -209,8 +209,8 @@ ISerialization::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGett { return [&, this] (const ISerialization::SubstreamPath & substream_path) -> WriteBuffer * { - /// Skip fictitious subcolumns that don't store any real data. - if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) return nullptr; bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; @@ -375,8 +375,8 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( min_compress_block_size = settings.min_compress_block_size; getSerialization(name_and_type.name)->enumerateStreams([&] (const ISerialization::SubstreamPath & substream_path) { - /// Skip fictitious subcolumns that don't store any real data. - if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) return; bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; @@ -417,8 +417,8 @@ void MergeTreeDataPartWriterWide::writeSingleGranule( /// So that instead of the marks pointing to the end of the compressed block, there were marks pointing to the beginning of the next one. serialization->enumerateStreams([&] (const ISerialization::SubstreamPath & substream_path) { - /// Skip fictitious subcolumns that don't store any real data. - if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) return; bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 9ab45ec7b56..898bf5a2933 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -213,8 +213,8 @@ void MergeTreeReaderWide::addStreams( ISerialization::StreamCallback callback = [&] (const ISerialization::SubstreamPath & substream_path) { - /// Don't create streams for fictitious subcolumns that don't store any real data. - if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + /// Don't create streams for ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) return; auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(name_and_type, substream_path, data_part_info_for_read->getChecksums()); @@ -352,8 +352,8 @@ void MergeTreeReaderWide::prefetchForColumn( deserializePrefix(serialization, name_and_type, current_task_last_mark, cache, deserialize_states_cache); auto callback = [&](const ISerialization::SubstreamPath & substream_path) { - /// Skip fictitious subcolumns that don't store any real data. - if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) return; auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(name_and_type, substream_path, data_part_info_for_read->getChecksums()); diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 6cc2b947424..3a22daa0011 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -215,8 +215,8 @@ static IMergeTreeDataPart::Checksums checkDataPart( { get_serialization(column)->enumerateStreams([&](const ISerialization::SubstreamPath & substream_path) { - /// Skip fictitious subcolumns that don't store any real data. - if (ISerialization::isFictitiousSubcolumn(substream_path, substream_path.size())) + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) return; auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(column, substream_path, ".bin", data_part_storage); From 9834457c26011bb785d4c62fbbb758afd6a6ef12 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 13:48:18 +0000 Subject: [PATCH 2080/2361] Fix copying arguments --- src/DataTypes/DataTypeObject.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 7d05dded13d..9f76d04d7da 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -45,9 +45,9 @@ DataTypeObject::DataTypeObject( size_t max_dynamic_paths_, size_t max_dynamic_types_) : schema_format(schema_format_) - , typed_paths(typed_paths_) - , paths_to_skip(paths_to_skip_) - , path_regexps_to_skip(path_regexps_to_skip_) + , typed_paths(std::move(typed_paths_)) + , paths_to_skip(std::move(paths_to_skip_)) + , path_regexps_to_skip(std::move(path_regexps_to_skip_)) , max_dynamic_paths(max_dynamic_paths_) , max_dynamic_types(max_dynamic_types_) { From 6cde029ed9cd6a22937193e12863974f1fa8f160 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 12 Aug 2024 13:48:44 +0000 Subject: [PATCH 2081/2361] Fix style --- tests/ci/integration_tests_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index 7b9e7d1f63e..f5dbef4f6db 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -69,7 +69,7 @@ def get_changed_tests_to_run(pr_info, repo_path): return [] for fpath in changed_files: - if re.search("tests/integration/test_.*/test.*\.py", fpath) is not None: + if re.search(r"tests/integration/test_.*/test.*\.py", fpath) is not None: logging.info("File %s changed and seems like integration test", fpath) result.add("/".join(fpath.split("/")[2:])) return filter_existing_tests(result, repo_path) From 8522776c33b334745e8743a075f0456aee3720c9 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 13:49:01 +0000 Subject: [PATCH 2082/2361] Remove unused code --- src/DataTypes/Serializations/SerializationJSON.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/DataTypes/Serializations/SerializationJSON.cpp b/src/DataTypes/Serializations/SerializationJSON.cpp index 6ad326c65da..092ccd1c5a5 100644 --- a/src/DataTypes/Serializations/SerializationJSON.cpp +++ b/src/DataTypes/Serializations/SerializationJSON.cpp @@ -1,7 +1,6 @@ #include #include #include -#include #if USE_SIMDJSON #include @@ -52,8 +51,6 @@ struct PathElements } } - boost::split(elements, path, boost::is_any_of(".")); - elements.emplace_back(last_dot_pos + 1, size_t(pos - last_dot_pos - 1)); } From da5b9582a990f7a2c05c1a3dede3739fb9cbfcae Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Mon, 12 Aug 2024 13:54:17 +0000 Subject: [PATCH 2083/2361] Fix indent --- .../01903_correct_block_size_prediction_with_default.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh index e898a9d5ee8..075d9a1dacf 100755 --- a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh +++ b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh @@ -26,9 +26,9 @@ function test() $CLICKHOUSE_CLIENT --query="SELECT uniq($sql) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_2 $CLICKHOUSE_CLIENT -n --query=" SYSTEM FLUSH LOGS; - WITH memory_1 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_1' AND type = 'QueryFinish' as memory_1), - memory_2 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_2' AND type = 'QueryFinish' as memory_2) - SELECT memory_1.memory_usage <= 1.2 * memory_2.memory_usage FROM memory_1, memory_2;" + WITH memory_1 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_1' AND type = 'QueryFinish' as memory_1), + memory_2 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_2' AND type = 'QueryFinish' as memory_2) + SELECT memory_1.memory_usage <= 1.2 * memory_2.memory_usage FROM memory_1, memory_2;" } test "" From eb3ffb71847fc5a204af31665a4594b0918fc1d7 Mon Sep 17 00:00:00 2001 From: divanik Date: Mon, 12 Aug 2024 15:09:16 +0000 Subject: [PATCH 2084/2361] Add supportsReplication --- src/Storages/MergeTree/MergeTreeData.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 625b1281c61..f925cb773f5 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2351,7 +2351,7 @@ size_t MergeTreeData::clearOldTemporaryDirectories(const String & root_path, siz /// We don't control the amount of refs for temporary parts so we cannot decide can we remove blobs /// or not. So we are not doing it bool keep_shared = false; - if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication) + if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication && supportsReplication()) { LOG_WARNING(log, "Since zero-copy replication is enabled we are not going to remove blobs from shared storage for {}", full_path); keep_shared = true; From 897b8d5a88a69b8831ab489c2bea9d32d0cf06dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Mon, 12 Aug 2024 15:21:01 +0000 Subject: [PATCH 2085/2361] Try to give more chances to `node2` to steal some work --- tests/integration/test_storage_s3_queue/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 8f197e09e61..00ef8499594 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1300,7 +1300,7 @@ where zookeeper_path ilike '%{table_name}%' and status = 'Processed' and rows_pr pytest.param("unordered", 1), pytest.param("unordered", 8), pytest.param("ordered", 1), - pytest.param("ordered", 8), + pytest.param("ordered", 2), ], ) def test_shards_distributed(started_cluster, mode, processing_threads): @@ -1311,7 +1311,7 @@ def test_shards_distributed(started_cluster, mode, processing_threads): keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" files_to_generate = 300 - row_num = 50 + row_num = 300 total_rows = row_num * files_to_generate shards_num = 2 From a39b9cf643bff565728be4083eb024ff5254f363 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sat, 11 May 2024 13:05:24 +0000 Subject: [PATCH 2086/2361] Un-screw usearch's build description No directory 'SimSIMD-map' exists, the build only worked because SimSIMD support in usearch was (accidentally?) disabled. This commit corrects the build description. SimSIMD support in usearch will be enabled by a later commit. --- contrib/CMakeLists.txt | 2 +- contrib/usearch-cmake/CMakeLists.txt | 8 +++----- src/Storages/MergeTree/MergeTreeIndexUSearch.h | 1 - 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index b33e7083e32..98b992e1080 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -207,7 +207,7 @@ if (ARCH_S390X) endif() add_contrib (annoy-cmake annoy) -option(ENABLE_USEARCH "Enable USearch (Approximate Neighborhood Search, HNSW) support" ${ENABLE_LIBRARIES}) +option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES}) if (ENABLE_USEARCH) add_contrib (FP16-cmake FP16) add_contrib (robin-map-cmake robin-map) diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 29fbe57106c..0b6f60e106b 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -1,9 +1,7 @@ -set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") -set(USEARCH_SOURCE_DIR "${USEARCH_PROJECT_DIR}/include") - set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16") set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map") -set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD-map") +set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") +set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") add_library(_usearch INTERFACE) @@ -11,7 +9,7 @@ target_include_directories(_usearch SYSTEM INTERFACE ${FP16_PROJECT_DIR}/include ${ROBIN_MAP_PROJECT_DIR}/include ${SIMSIMD_PROJECT_DIR}/include - ${USEARCH_SOURCE_DIR}) + ${USEARCH_PROJECT_DIR}/include) add_library(ch_contrib::usearch ALIAS _usearch) target_compile_definitions(_usearch INTERFACE ENABLE_USEARCH) diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.h b/src/Storages/MergeTree/MergeTreeIndexUSearch.h index 41de94402c9..e6068790d22 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.h +++ b/src/Storages/MergeTree/MergeTreeIndexUSearch.h @@ -113,4 +113,3 @@ private: #endif - From d7211f9d12d33c54929fb24991fe7e46939be67d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 9 Aug 2024 09:22:38 +0000 Subject: [PATCH 2087/2361] Fix CMake integration of usearch and annoy Registers usearch and annoy properly via configure_config.cmake and config.h.in like all other 3rd party libs, instead of (mis)using target_compile_definitions. --- contrib/annoy-cmake/CMakeLists.txt | 1 - contrib/usearch-cmake/CMakeLists.txt | 1 - src/Common/config.h.in | 2 ++ src/Processors/QueryPlan/ReadFromMergeTree.cpp | 6 ++++-- src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp | 4 ++-- src/Storages/MergeTree/MergeTreeIndexAnnoy.h | 4 +++- src/Storages/MergeTree/MergeTreeIndexUSearch.cpp | 6 +++--- src/Storages/MergeTree/MergeTreeIndexUSearch.h | 8 ++++++-- src/Storages/MergeTree/MergeTreeIndices.cpp | 4 ++-- src/Storages/MergeTree/MergeTreeIndices.h | 5 +++-- src/configure_config.cmake | 6 ++++++ 11 files changed, 31 insertions(+), 16 deletions(-) diff --git a/contrib/annoy-cmake/CMakeLists.txt b/contrib/annoy-cmake/CMakeLists.txt index bdef7d92132..f6579c12412 100644 --- a/contrib/annoy-cmake/CMakeLists.txt +++ b/contrib/annoy-cmake/CMakeLists.txt @@ -20,5 +20,4 @@ add_library(_annoy INTERFACE) target_include_directories(_annoy SYSTEM INTERFACE ${ANNOY_SOURCE_DIR}) add_library(ch_contrib::annoy ALIAS _annoy) -target_compile_definitions(_annoy INTERFACE ENABLE_ANNOY) target_compile_definitions(_annoy INTERFACE ANNOYLIB_MULTITHREADED_BUILD) diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 0b6f60e106b..6be622275ae 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -12,4 +12,3 @@ target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/include) add_library(ch_contrib::usearch ALIAS _usearch) -target_compile_definitions(_usearch INTERFACE ENABLE_USEARCH) diff --git a/src/Common/config.h.in b/src/Common/config.h.in index e3f8882850f..0fa5f4313b2 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -58,6 +58,8 @@ #cmakedefine01 USE_FILELOG #cmakedefine01 USE_ODBC #cmakedefine01 USE_BLAKE3 +#cmakedefine01 USE_ANNOY +#cmakedefine01 USE_USEARCH #cmakedefine01 USE_SKIM #cmakedefine01 USE_PRQL #cmakedefine01 USE_ULID diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 901d7c61167..0ec7bde933c 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -52,6 +52,8 @@ #include #include +#include "config.h" + using namespace DB; namespace @@ -1476,11 +1478,11 @@ static void buildIndexes( MergeTreeIndexConditionPtr condition; if (index_helper->isVectorSearch()) { -#ifdef ENABLE_ANNOY +#if USE_ANNOY if (const auto * annoy = typeid_cast(index_helper.get())) condition = annoy->createIndexCondition(query_info, context); #endif -#ifdef ENABLE_USEARCH +#if USE_USEARCH if (const auto * usearch = typeid_cast(index_helper.get())) condition = usearch->createIndexCondition(query_info, context); #endif diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp index b68e48eeb3a..cec0e0926f0 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp @@ -1,7 +1,7 @@ -#ifdef ENABLE_ANNOY - #include +#if USE_ANNOY + #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h index 282920c608e..8e0e0e621a0 100644 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h +++ b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h @@ -1,6 +1,8 @@ #pragma once -#ifdef ENABLE_ANNOY +#include "config.h" + +#if USE_ANNOY #include diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp index efd9bb754e1..5a532803d84 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp @@ -1,10 +1,10 @@ -#ifdef ENABLE_USEARCH +#include + +#if USE_USEARCH #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpass-failed" -#include - #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.h b/src/Storages/MergeTree/MergeTreeIndexUSearch.h index e6068790d22..6923ef2f807 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.h +++ b/src/Storages/MergeTree/MergeTreeIndexUSearch.h @@ -1,12 +1,16 @@ #pragma once -#ifdef ENABLE_USEARCH +#include "config.h" -#include +#if USE_USEARCH #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpass-failed" + +#include + #include + #pragma clang diagnostic pop namespace DB diff --git a/src/Storages/MergeTree/MergeTreeIndices.cpp b/src/Storages/MergeTree/MergeTreeIndices.cpp index bded961db8e..32ac629e706 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.cpp +++ b/src/Storages/MergeTree/MergeTreeIndices.cpp @@ -127,12 +127,12 @@ MergeTreeIndexFactory::MergeTreeIndexFactory() registerCreator("hypothesis", hypothesisIndexCreator); registerValidator("hypothesis", hypothesisIndexValidator); -#ifdef ENABLE_ANNOY +#if USE_ANNOY registerCreator("annoy", annoyIndexCreator); registerValidator("annoy", annoyIndexValidator); #endif -#ifdef ENABLE_USEARCH +#if USE_USEARCH registerCreator("usearch", usearchIndexCreator); registerValidator("usearch", usearchIndexValidator); #endif diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index 1be73e1c811..355f1b69356 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -15,6 +15,7 @@ #include #include +#include "config.h" constexpr auto INDEX_FILE_PREFIX = "skp_idx_"; @@ -230,12 +231,12 @@ void bloomFilterIndexValidator(const IndexDescription & index, bool attach); MergeTreeIndexPtr hypothesisIndexCreator(const IndexDescription & index); void hypothesisIndexValidator(const IndexDescription & index, bool attach); -#ifdef ENABLE_ANNOY +#if USE_ANNOY MergeTreeIndexPtr annoyIndexCreator(const IndexDescription & index); void annoyIndexValidator(const IndexDescription & index, bool attach); #endif -#ifdef ENABLE_USEARCH +#if USE_USEARCH MergeTreeIndexPtr usearchIndexCreator(const IndexDescription& index); void usearchIndexValidator(const IndexDescription& index, bool attach); #endif diff --git a/src/configure_config.cmake b/src/configure_config.cmake index 5b24f79ef6e..702875b1f40 100644 --- a/src/configure_config.cmake +++ b/src/configure_config.cmake @@ -164,6 +164,12 @@ endif() if (TARGET ch_contrib::bcrypt) set(USE_BCRYPT 1) endif() +if (TARGET ch_contrib::annoy) + set(USE_ANNOY 1) +endif() +if (TARGET ch_contrib::usearch) + set(USE_USEARCH 1) +endif() if (TARGET ch_contrib::ssh) set(USE_SSH 1) endif() From 7c419399216a714f9dcffe7835f951718851bceb Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 9 Aug 2024 09:36:39 +0000 Subject: [PATCH 2088/2361] Fix test results (no analyzer support yet ...) --- tests/queries/0_stateless/02354_vector_search_queries.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index 64051aa8544..87d27be0ea4 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -8,6 +8,8 @@ SET allow_experimental_annoy_index = 1; SET allow_experimental_usearch_index = 1; +SET enable_analyzer = 0; + SELECT 'ARRAY, 10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block'; DROP TABLE IF EXISTS tab_annoy; From 218421c255cadbe65406e6a040d05942cc4efc3e Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 9 Aug 2024 09:47:50 +0000 Subject: [PATCH 2089/2361] Remove Annoy indexes Annoy indexes fell out of favor in the community, at least when it comes to vector databases. Such indexes work okay-ish low dimensions but they suffers badly from a curse of dimensionality which makes them inapt for a high number of dimensions. Now that Annoy is gone, issue (*) also disappears and we can drop 'no-ubsan', 'no-cpu-aarch64', and 'no-asan' from tests. (*) spotify/annoy#456 --- .gitmodules | 3 - contrib/CMakeLists.txt | 1 - contrib/annoy | 1 - contrib/annoy-cmake/CMakeLists.txt | 23 - .../mergetree-family/annindexes.md | 87 +--- src/CMakeLists.txt | 4 - src/Common/config.h.in | 1 - src/Core/Settings.h | 6 +- src/Databases/DatabaseReplicated.cpp | 1 - src/Interpreters/InterpreterCreateQuery.cpp | 2 - src/Parsers/ASTIndexDeclaration.h | 1 - src/Parsers/ParserCreateIndexQuery.cpp | 4 +- src/Parsers/ParserCreateQuery.cpp | 4 +- .../QueryPlan/ReadFromMergeTree.cpp | 5 - .../MergeTree/MergeTreeIOSettings.cpp | 1 - src/Storages/MergeTree/MergeTreeIOSettings.h | 2 - .../MergeTree/MergeTreeIndexAnnoy.cpp | 416 ------------------ src/Storages/MergeTree/MergeTreeIndexAnnoy.h | 114 ----- src/Storages/MergeTree/MergeTreeIndices.cpp | 4 - src/Storages/MergeTree/MergeTreeIndices.h | 5 - src/configure_config.cmake | 3 - .../02354_vector_search_bugs.reference | 10 - .../0_stateless/02354_vector_search_bugs.sql | 75 +--- ...ector_search_default_granularity.reference | 6 +- ...2354_vector_search_default_granularity.sql | 21 +- ...r_search_index_creation_negative.reference | 2 +- ..._vector_search_index_creation_negative.sql | 22 +- .../02354_vector_search_queries.reference | 99 ----- .../02354_vector_search_queries.sql | 119 +---- 29 files changed, 32 insertions(+), 1010 deletions(-) delete mode 160000 contrib/annoy delete mode 100644 contrib/annoy-cmake/CMakeLists.txt delete mode 100644 src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp delete mode 100644 src/Storages/MergeTree/MergeTreeIndexAnnoy.h diff --git a/.gitmodules b/.gitmodules index 7fdfb1103c5..0a66031de8d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -230,9 +230,6 @@ [submodule "contrib/minizip-ng"] path = contrib/minizip-ng url = https://github.com/zlib-ng/minizip-ng -[submodule "contrib/annoy"] - path = contrib/annoy - url = https://github.com/ClickHouse/annoy [submodule "contrib/qpl"] path = contrib/qpl url = https://github.com/intel/qpl diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 98b992e1080..dc2ad2a3150 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -205,7 +205,6 @@ add_contrib (morton-nd-cmake morton-nd) if (ARCH_S390X) add_contrib(crc32-s390x-cmake crc32-s390x) endif() -add_contrib (annoy-cmake annoy) option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES}) if (ENABLE_USEARCH) diff --git a/contrib/annoy b/contrib/annoy deleted file mode 160000 index f2ac8e7b48f..00000000000 --- a/contrib/annoy +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f2ac8e7b48f9a9cf676d3b58286e5455aba8e956 diff --git a/contrib/annoy-cmake/CMakeLists.txt b/contrib/annoy-cmake/CMakeLists.txt deleted file mode 100644 index f6579c12412..00000000000 --- a/contrib/annoy-cmake/CMakeLists.txt +++ /dev/null @@ -1,23 +0,0 @@ -option(ENABLE_ANNOY "Enable Annoy index support" ${ENABLE_LIBRARIES}) - -# Annoy index should be disabled with undefined sanitizer. Because of memory storage optimizations -# (https://github.com/ClickHouse/annoy/blob/9d8a603a4cd252448589e84c9846f94368d5a289/src/annoylib.h#L442-L463) -# UBSan fails and leads to crash. Simmilar issue is already opened in Annoy repo -# https://github.com/spotify/annoy/issues/456 -# Problem with aligment can lead to errors like -# (https://stackoverflow.com/questions/46790550/c-undefined-behavior-strict-aliasing-rule-or-incorrect-alignment) -# or will lead to crash on arm https://developer.arm.com/documentation/ka003038/latest -# This issues should be resolved before annoy became non-experimental (--> setting "allow_experimental_annoy_index") -if ((NOT ENABLE_ANNOY) OR (SANITIZE STREQUAL "undefined") OR (ARCH_AARCH64)) - message (STATUS "Not using annoy") - return() -endif() - -set(ANNOY_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/annoy") -set(ANNOY_SOURCE_DIR "${ANNOY_PROJECT_DIR}/src") - -add_library(_annoy INTERFACE) -target_include_directories(_annoy SYSTEM INTERFACE ${ANNOY_SOURCE_DIR}) - -add_library(ch_contrib::annoy ALIAS _annoy) -target_compile_definitions(_annoy INTERFACE ANNOYLIB_MULTITHREADED_BUILD) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 5a81313f62e..9a80542522e 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -126,81 +126,8 @@ was specified for ANN indexes, the default value is 100 million. # Available ANN Indexes {#available_ann_indexes} -- [Annoy](/docs/en/engines/table-engines/mergetree-family/annindexes.md#annoy-annoy) - - [USearch](/docs/en/engines/table-engines/mergetree-family/annindexes.md#usearch-usearch) -## Annoy {#annoy} - -Annoy indexes are currently experimental, to use them you first need to `SET allow_experimental_annoy_index = 1`. They are also currently -disabled on ARM due to memory safety problems with the algorithm. - -This type of ANN index is based on the [Annoy library](https://github.com/spotify/annoy) which recursively divides the space into random -linear surfaces (lines in 2D, planes in 3D etc.). - -
- -
- -Syntax to create an Annoy index over an [Array(Float32)](../../../sql-reference/data-types/array.md) column: - -```sql -CREATE TABLE table_with_annoy_index -( - id Int64, - vectors Array(Float32), - INDEX [ann_index_name] vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N] -) -ENGINE = MergeTree -ORDER BY id; -``` - -Annoy currently supports two distance functions: -- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space - ([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)). -- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors - ([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)). - -For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no -distance function was specified during index creation, `L2Distance` is used as default. - -Parameter `NumTrees` is the number of trees which the algorithm creates (default if not specified: 100). Higher values of `NumTree` mean -more accurate search results but slower index creation / query times (approximately linearly) as well as larger index sizes. - -:::note -All arrays must have same length. To avoid errors, you can use a -[CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints), for example, `CONSTRAINT constraint_name_1 CHECK -length(vectors) = 256`. Also, empty `Arrays` and unspecified `Array` values in INSERT statements (i.e. default values) are not supported. -::: - -The creation of Annoy indexes (whenever a new part is build, e.g. at the end of a merge) is a relatively slow process. You can increase -setting `max_threads_for_annoy_index_creation` (default: 4) which controls how many threads are used to create an Annoy index. Please be -careful with this setting, it is possible that multiple indexes are created in parallel in which case there can be overparallelization. - -Setting `annoy_index_search_k_nodes` (default: `NumTrees * LIMIT`) determines how many tree nodes are inspected during SELECTs. Larger -values mean more accurate results at the cost of longer query runtime: - -```sql -SELECT * -FROM table_name -ORDER BY L2Distance(vectors, Point) -LIMIT N -SETTINGS annoy_index_search_k_nodes=100; -``` - -:::note -The Annoy index currently does not work with per-table, non-default `index_granularity` settings (see -[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml. -::: - ## USearch {#usearch} This type of ANN index is based on the [USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW @@ -211,6 +138,8 @@ that are expensive to load and compare. The library also has several hardware-sp distance computations on modern Arm (NEON and SVE) and x86 (AVX2 and AVX-512) CPUs and OS-specific optimizations to allow efficient navigation around immutable persistent files, without loading them into RAM. +USearch indexes are currently experimental, to use them you first need to `SET allow_experimental_usearch_index = 1`. +
-
- -Syntax to create an USearch index over an [Array](../../../sql-reference/data-types/array.md) column: - -```sql -CREATE TABLE table_with_usearch_index -( - id Int64, - vectors Array(Float32), - INDEX [ann_index_name] vectors TYPE usearch([Distance[, ScalarKind]]) [GRANULARITY N] -) -ENGINE = MergeTree -ORDER BY id; -``` - -USearch currently supports two distance functions: -- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space - ([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)). -- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors - ([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)). - -USearch allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16` or `i8`. If no scalar kind -was specified during index creation, `f16` is used as default. - -For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no -distance function was specified during index creation, `L2Distance` is used as default. - -:::note -All arrays must have same length. To avoid errors, you can use a -[CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints), for example, `CONSTRAINT constraint_name_1 CHECK -length(vectors) = 256`. Also, empty `Arrays` and unspecified `Array` values in INSERT statements (i.e. default values) are not supported. -::: - -:::note -The USearch index currently does not work with per-table, non-default `index_granularity` settings (see -[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml. -::: - diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 23ad12bb017..e9f3b95dbc1 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -907,9 +907,9 @@ class IColumn; M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions", 0) \ M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \ M(Bool, allow_experimental_time_series_table, false, "Allows experimental TimeSeries table engine", 0) \ + M(Bool, allow_experimental_vector_similarity_index, false, "Allow experimental vector similarity index", 0) \ M(Bool, allow_experimental_variant_type, false, "Allow Variant data type", 0) \ M(Bool, allow_experimental_dynamic_type, false, "Allow Dynamic data type", 0) \ - M(Bool, allow_experimental_usearch_index, false, "Allows to use USearch index. Disabled by default because this feature is experimental", 0) \ M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \ M(UInt64, max_limit_for_ann_queries, 1'000'000, "SELECT queries with LIMIT bigger than this setting cannot use ANN indexes. Helps to prevent memory overflows in ANN search indexes.", 0) \ M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \ @@ -1036,6 +1036,7 @@ class IColumn; MAKE_OBSOLETE(M, Bool, allow_experimental_annoy_index, false) \ MAKE_OBSOLETE(M, UInt64, max_threads_for_annoy_index_creation, 4) \ MAKE_OBSOLETE(M, Int64, annoy_index_search_k_nodes, -1) \ + MAKE_OBSOLETE(M, Bool, allow_experimental_usearch_index, false) \ MAKE_OBSOLETE(M, Bool, optimize_move_functions_out_of_any, false) \ MAKE_OBSOLETE(M, Bool, allow_experimental_undrop_table_query, true) \ MAKE_OBSOLETE(M, Bool, allow_experimental_s3queue, true) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 511723f1873..8fabd1ecf91 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -85,6 +85,7 @@ static std::initializer_listsetSetting("allow_experimental_object_type", 1); query_context->setSetting("allow_experimental_variant_type", 1); query_context->setSetting("allow_experimental_dynamic_type", 1); - query_context->setSetting("allow_experimental_usearch_index", 1); + query_context->setSetting("allow_experimental_vector_similarity_index", 1); query_context->setSetting("allow_experimental_bigint_types", 1); query_context->setSetting("allow_experimental_window_functions", 1); query_context->setSetting("allow_experimental_geo_types", 1); diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index a1ffcf07588..95143031707 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -787,8 +787,8 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti if (index_desc.type == INVERTED_INDEX_NAME && !settings.allow_experimental_inverted_index) throw Exception(ErrorCodes::ILLEGAL_INDEX, "Please use index type 'full_text' instead of 'inverted'"); /// ---- - if (index_desc.type == "usearch" && !settings.allow_experimental_usearch_index) - throw Exception(ErrorCodes::INCORRECT_QUERY, "USearch index is disabled. Turn on allow_experimental_usearch_index"); + if (index_desc.type == "vector_similarity" && !settings.allow_experimental_vector_similarity_index) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index is disabled. Turn on allow_experimental_vector_similarity_index"); properties.indices.push_back(index_desc); } diff --git a/src/Parsers/ASTIndexDeclaration.h b/src/Parsers/ASTIndexDeclaration.h index 90645f12b7c..72f3f017a99 100644 --- a/src/Parsers/ASTIndexDeclaration.h +++ b/src/Parsers/ASTIndexDeclaration.h @@ -13,7 +13,7 @@ class ASTIndexDeclaration : public IAST { public: static const auto DEFAULT_INDEX_GRANULARITY = 1uz; - static const auto DEFAULT_USEARCH_INDEX_GRANULARITY = 100'000'000uz; + static const auto DEFAULT_VECTOR_SIMILARITY_INDEX_GRANULARITY = 100'000'000uz; ASTIndexDeclaration(ASTPtr expression, ASTPtr type, const String & name_); diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index e7cfd753f99..ed89b80edca 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -89,8 +89,8 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected else { auto index_type = index->getType(); - if (index_type && index_type->name == "usearch") - index->granularity = ASTIndexDeclaration::DEFAULT_USEARCH_INDEX_GRANULARITY; + if (index_type && index_type->name == "vector_similarity") + index->granularity = ASTIndexDeclaration::DEFAULT_VECTOR_SIMILARITY_INDEX_GRANULARITY; else index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY; } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index b31fe21c4cc..cc4e02f46a3 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -214,8 +214,8 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe else { auto index_type = index->getType(); - if (index_type->name == "usearch") - index->granularity = ASTIndexDeclaration::DEFAULT_USEARCH_INDEX_GRANULARITY; + if (index_type->name == "vector_similarity") + index->granularity = ASTIndexDeclaration::DEFAULT_VECTOR_SIMILARITY_INDEX_GRANULARITY; else index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY; } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 3324cc4e42a..1f30725b4d0 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include @@ -1478,8 +1478,8 @@ static void buildIndexes( if (index_helper->isVectorSimilarityIndex()) { #if USE_USEARCH - if (const auto * usearch_index = typeid_cast(index_helper.get())) - condition = usearch_index->createIndexCondition(query_info, context); + if (const auto * vector_similarity_index = typeid_cast(index_helper.get())) + condition = vector_similarity_index->createIndexCondition(query_info, context); #endif if (!condition) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown vector search index {}", index_helper->index.name); diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp similarity index 76% rename from src/Storages/MergeTree/MergeTreeIndexUSearch.cpp rename to src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 1aa6c9c14d4..6f3b1b043cd 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -1,4 +1,4 @@ -#include +#include #if USE_USEARCH @@ -90,7 +90,7 @@ void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const auto result = Base::save_to_stream(callback); if (result.error) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not save USearch index, error: " + String(result.error.release())); + throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not save vector similarity index, error: " + String(result.error.release())); } void USearchIndexWithSerialization::deserialize(ReadBuffer & istr) @@ -104,7 +104,7 @@ void USearchIndexWithSerialization::deserialize(ReadBuffer & istr) auto result = Base::load_from_stream(callback); if (result.error) /// See the comment in MergeTreeIndexGranuleVectorSimilarity::deserializeBinary why we throw here - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not load USearch index, error: " + String(result.error.release()) + " Please drop the index and create it again."); + throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index, error: " + String(result.error.release()) + " Please drop the index and create it again."); } USearchIndexWithSerialization::Statistics USearchIndexWithSerialization::getStatistics() const @@ -121,16 +121,16 @@ USearchIndexWithSerialization::Statistics USearchIndexWithSerialization::getStat return statistics; } -MergeTreeIndexGranuleUSearch::MergeTreeIndexGranuleUSearch( +MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_) - : MergeTreeIndexGranuleUSearch(index_name_, index_sample_block_, metric_kind_, scalar_kind_, nullptr) + : MergeTreeIndexGranuleVectorSimilarity(index_name_, index_sample_block_, metric_kind_, scalar_kind_, nullptr) { } -MergeTreeIndexGranuleUSearch::MergeTreeIndexGranuleUSearch( +MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, @@ -144,7 +144,7 @@ MergeTreeIndexGranuleUSearch::MergeTreeIndexGranuleUSearch( { } -void MergeTreeIndexGranuleUSearch::serializeBinary(WriteBuffer & ostr) const +void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr) const { if (empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty minmax index {}", backQuote(index_name)); @@ -158,18 +158,18 @@ void MergeTreeIndexGranuleUSearch::serializeBinary(WriteBuffer & ostr) const index->serialize(ostr); auto statistics = index->getStatistics(); - LOG_TRACE(logger, "Wrote USearch index: max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}", + LOG_TRACE(logger, "Wrote vector similarity index: max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}", statistics.max_level, statistics.connectivity, statistics.size, statistics.capacity, ReadableSize(statistics.memory_usage)); } -void MergeTreeIndexGranuleUSearch::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/) +void MergeTreeIndexGranuleVectorSimilarity::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/) { UInt64 file_version; readIntBinary(file_version, istr); if (file_version != FILE_FORMAT_VERSION) throw Exception( ErrorCodes::FORMAT_VERSION_TOO_OLD, - "USearch index could not be loaded because its version is too old (current version: {}, persisted version: {}). Please drop the index and create it again.", + "Vector similarity index could not be loaded because its version is too old (current version: {}, persisted version: {}). Please drop the index and create it again.", FILE_FORMAT_VERSION, file_version); /// More fancy error handling would be: Set a flag on the index that it failed to load. During usage return all granules, i.e. /// behave as if the index does not exist. Since format changes are expected to happen only rarely and it is "only" an index, keep it simple for now. @@ -181,11 +181,11 @@ void MergeTreeIndexGranuleUSearch::deserializeBinary(ReadBuffer & istr, MergeTre index->deserialize(istr); auto statistics = index->getStatistics(); - LOG_TRACE(logger, "Loaded USearch index: max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}", + LOG_TRACE(logger, "Loaded vector similarity index: max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}", statistics.max_level, statistics.connectivity, statistics.size, statistics.capacity, ReadableSize(statistics.memory_usage)); } -MergeTreeIndexAggregatorUSearch::MergeTreeIndexAggregatorUSearch( +MergeTreeIndexAggregatorVectorSimilarity::MergeTreeIndexAggregatorVectorSimilarity( const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, @@ -197,14 +197,14 @@ MergeTreeIndexAggregatorUSearch::MergeTreeIndexAggregatorUSearch( { } -MergeTreeIndexGranulePtr MergeTreeIndexAggregatorUSearch::getGranuleAndReset() +MergeTreeIndexGranulePtr MergeTreeIndexAggregatorVectorSimilarity::getGranuleAndReset() { - auto granule = std::make_shared(index_name, index_sample_block, metric_kind, scalar_kind, index); + auto granule = std::make_shared(index_name, index_sample_block, metric_kind, scalar_kind, index); index = nullptr; return granule; } -void MergeTreeIndexAggregatorUSearch::update(const Block & block, size_t * pos, size_t limit) +void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_t * pos, size_t limit) { if (*pos >= block.rows()) throw Exception( @@ -239,8 +239,8 @@ void MergeTreeIndexAggregatorUSearch::update(const Block & block, size_t * pos, if (column_array->empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); - /// The Usearch algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays - /// are INSERTed into an Usearch-indexed column or if no value was specified at all in which case the arrays take on their default + /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays + /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default /// values which is also empty. if (column_array->isDefaultAt(0)) throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); @@ -262,13 +262,13 @@ void MergeTreeIndexAggregatorUSearch::update(const Block & block, size_t * pos, /// Reserving space is mandatory if (!index->reserve(roundUpToPowerOfTwoOrZero(index->size() + num_rows))) - throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for usearch index"); + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); for (size_t current_row = 0; current_row < num_rows; ++current_row) { auto rc = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[current_row - 1]]); if (!rc) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not add data to USearch index, error: " + String(rc.error.release())); + throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index, error: " + String(rc.error.release())); ProfileEvents::increment(ProfileEvents::USearchAddCount); ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, rc.visited_members); @@ -281,7 +281,7 @@ void MergeTreeIndexAggregatorUSearch::update(const Block & block, size_t * pos, *pos += rows_read; } -MergeTreeIndexConditionUSearch::MergeTreeIndexConditionUSearch( +MergeTreeIndexConditionVectorSimilarity::MergeTreeIndexConditionVectorSimilarity( const IndexDescription & /*index_description*/, const SelectQueryInfo & query, unum::usearch::metric_kind_t metric_kind_, @@ -291,12 +291,12 @@ MergeTreeIndexConditionUSearch::MergeTreeIndexConditionUSearch( { } -bool MergeTreeIndexConditionUSearch::mayBeTrueOnGranule(MergeTreeIndexGranulePtr) const +bool MergeTreeIndexConditionVectorSimilarity::mayBeTrueOnGranule(MergeTreeIndexGranulePtr) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "mayBeTrueOnGranule is not supported for ANN skip indexes"); } -bool MergeTreeIndexConditionUSearch::alwaysUnknownOrTrue() const +bool MergeTreeIndexConditionVectorSimilarity::alwaysUnknownOrTrue() const { String index_distance_function; switch (metric_kind) @@ -308,14 +308,14 @@ bool MergeTreeIndexConditionUSearch::alwaysUnknownOrTrue() const return vector_similarity_condition.alwaysUnknownOrTrue(index_distance_function); } -std::vector MergeTreeIndexConditionUSearch::getUsefulRanges(MergeTreeIndexGranulePtr granule_) const +std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(MergeTreeIndexGranulePtr granule_) const { const UInt64 limit = vector_similarity_condition.getLimit(); const UInt64 index_granularity = vector_similarity_condition.getIndexGranularity(); const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); - const auto granule = std::dynamic_pointer_cast(granule_); + const auto granule = std::dynamic_pointer_cast(granule_); if (granule == nullptr) throw Exception(ErrorCodes::LOGICAL_ERROR, "Granule has the wrong type"); @@ -328,7 +328,7 @@ std::vector MergeTreeIndexConditionUSearch::getUsefulRanges(MergeTreeInd auto result = index->search(reference_vector.data(), limit); if (result.error) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not search in USearch index, error: " + String(result.error.release())); + throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not search in vector similarity index, error: " + String(result.error.release())); ProfileEvents::increment(ProfileEvents::USearchSearchCount); ProfileEvents::increment(ProfileEvents::USearchSearchVisitedMembers, result.visited_members); @@ -350,34 +350,34 @@ std::vector MergeTreeIndexConditionUSearch::getUsefulRanges(MergeTreeInd return granules; } -MergeTreeIndexUSearch::MergeTreeIndexUSearch(const IndexDescription & index_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_) +MergeTreeIndexVectorSimilarity::MergeTreeIndexVectorSimilarity(const IndexDescription & index_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_) : IMergeTreeIndex(index_) , metric_kind(metric_kind_) , scalar_kind(scalar_kind_) { } -MergeTreeIndexGranulePtr MergeTreeIndexUSearch::createIndexGranule() const +MergeTreeIndexGranulePtr MergeTreeIndexVectorSimilarity::createIndexGranule() const { - return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind); + return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind); } -MergeTreeIndexAggregatorPtr MergeTreeIndexUSearch::createIndexAggregator(const MergeTreeWriterSettings & /*settings*/) const +MergeTreeIndexAggregatorPtr MergeTreeIndexVectorSimilarity::createIndexAggregator(const MergeTreeWriterSettings & /*settings*/) const { - return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind); + return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind); } -MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const +MergeTreeIndexConditionPtr MergeTreeIndexVectorSimilarity::createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const { - return std::make_shared(index, query, metric_kind, context); + return std::make_shared(index, query, metric_kind, context); }; -MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const ActionsDAG *, ContextPtr) const +MergeTreeIndexConditionPtr MergeTreeIndexVectorSimilarity::createIndexCondition(const ActionsDAG *, ContextPtr) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeIndexAnnoy cannot be created with ActionsDAG"); } -MergeTreeIndexPtr usearchIndexCreator(const IndexDescription & index) +MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index) { static constexpr auto default_metric_kind = unum::usearch::metric_kind_t::l2sq_k; auto metric_kind = default_metric_kind; @@ -389,25 +389,25 @@ MergeTreeIndexPtr usearchIndexCreator(const IndexDescription & index) if (index.arguments.size() > 1) scalar_kind = quantizationToScalarKind.at(index.arguments[1].safeGet()); - return std::make_shared(index, metric_kind, scalar_kind); + return std::make_shared(index, metric_kind, scalar_kind); } -void usearchIndexValidator(const IndexDescription & index, bool /* attach */) +void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* attach */) { - /// Check number and type of USearch index arguments: + /// Check number and type of index arguments: if (index.arguments.size() > 2) - throw Exception(ErrorCodes::INCORRECT_QUERY, "USearch index must not have more than one parameters"); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index must not have more than one parameters"); if (!index.arguments.empty() && index.arguments[0].getType() != Field::Types::String) - throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of USearch index (distance function) must be of type String"); + throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of vector similarity index (distance function) must be of type String"); if (index.arguments.size() > 1 && index.arguments[1].getType() != Field::Types::String) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of USearch index (scalar type) must be of type String"); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of vector similarity index (scalar type) must be of type String"); /// Check that the index is created on a single column if (index.column_names.size() != 1 || index.data_types.size() != 1) - throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "USearch indexes must be created on a single column"); + throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Vector similarity indexes must be created on a single column"); /// Check that a supported metric was passed as first argument @@ -420,16 +420,15 @@ void usearchIndexValidator(const IndexDescription & index, bool /* attach */) throw Exception(ErrorCodes::INCORRECT_DATA, "Unrecognized scalar kind (second argument) for vector index. Supported kinds are: {}", keysAsString(quantizationToScalarKind)); /// Check data type of indexed column: - DataTypePtr data_type = index.sample_block.getDataTypes()[0]; if (const auto * data_type_array = typeid_cast(data_type.get())) { TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); if (!WhichDataType(nested_type_index).isFloat32()) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "USearch can only be created on columns of type Array(Float32)"); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity index can only be created on columns of type Array(Float32)"); } else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "USearch can only be created on columns of type Array(Float32)"); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity index can only be created on columns of type Array(Float32)"); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.h b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h similarity index 84% rename from src/Storages/MergeTree/MergeTreeIndexUSearch.h rename to src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h index d4df6658a90..95ea3cd5240 100644 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.h +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h @@ -48,22 +48,22 @@ public: using USearchIndexWithSerializationPtr = std::shared_ptr; -struct MergeTreeIndexGranuleUSearch final : public IMergeTreeIndexGranule +struct MergeTreeIndexGranuleVectorSimilarity final : public IMergeTreeIndexGranule { - MergeTreeIndexGranuleUSearch( + MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_); - MergeTreeIndexGranuleUSearch( + MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, USearchIndexWithSerializationPtr index_); - ~MergeTreeIndexGranuleUSearch() override = default; + ~MergeTreeIndexGranuleVectorSimilarity() override = default; void serializeBinary(WriteBuffer & ostr) const override; void deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) override; @@ -76,7 +76,7 @@ struct MergeTreeIndexGranuleUSearch final : public IMergeTreeIndexGranule const unum::usearch::scalar_kind_t scalar_kind; USearchIndexWithSerializationPtr index; - LoggerPtr logger = getLogger("USearchIndex"); + LoggerPtr logger = getLogger("VectorSimilarityIndex"); private: /// The version of the persistence format of USearch index. Increment whenever you change the format. @@ -87,15 +87,15 @@ private: }; -struct MergeTreeIndexAggregatorUSearch final : IMergeTreeIndexAggregator +struct MergeTreeIndexAggregatorVectorSimilarity final : IMergeTreeIndexAggregator { - MergeTreeIndexAggregatorUSearch( + MergeTreeIndexAggregatorVectorSimilarity( const String & index_name_, const Block & index_sample_block, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_); - ~MergeTreeIndexAggregatorUSearch() override = default; + ~MergeTreeIndexAggregatorVectorSimilarity() override = default; bool empty() const override { return !index || index->size() == 0; } MergeTreeIndexGranulePtr getGranuleAndReset() override; @@ -109,16 +109,16 @@ struct MergeTreeIndexAggregatorUSearch final : IMergeTreeIndexAggregator }; -class MergeTreeIndexConditionUSearch final : public IMergeTreeIndexCondition +class MergeTreeIndexConditionVectorSimilarity final : public IMergeTreeIndexCondition { public: - MergeTreeIndexConditionUSearch( + MergeTreeIndexConditionVectorSimilarity( const IndexDescription & index_description, const SelectQueryInfo & query, unum::usearch::metric_kind_t metric_kind_, ContextPtr context); - ~MergeTreeIndexConditionUSearch() override = default; + ~MergeTreeIndexConditionVectorSimilarity() override = default; bool alwaysUnknownOrTrue() const override; bool mayBeTrueOnGranule(MergeTreeIndexGranulePtr granule) const override; @@ -130,15 +130,15 @@ private: }; -class MergeTreeIndexUSearch : public IMergeTreeIndex +class MergeTreeIndexVectorSimilarity : public IMergeTreeIndex { public: - MergeTreeIndexUSearch( + MergeTreeIndexVectorSimilarity( const IndexDescription & index_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_); - ~MergeTreeIndexUSearch() override = default; + ~MergeTreeIndexVectorSimilarity() override = default; MergeTreeIndexGranulePtr createIndexGranule() const override; MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; diff --git a/src/Storages/MergeTree/MergeTreeIndices.cpp b/src/Storages/MergeTree/MergeTreeIndices.cpp index f07449f762c..89aed7873a4 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.cpp +++ b/src/Storages/MergeTree/MergeTreeIndices.cpp @@ -129,8 +129,8 @@ MergeTreeIndexFactory::MergeTreeIndexFactory() registerValidator("hypothesis", hypothesisIndexValidator); #if USE_USEARCH - registerCreator("usearch", usearchIndexCreator); - registerValidator("usearch", usearchIndexValidator); + registerCreator("vector_similarity", vectorSimilarityIndexCreator); + registerValidator("vector_similarity", vectorSimilarityIndexValidator); #endif registerCreator("inverted", fullTextIndexCreator); diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index 3dee79aae85..48ef2a4739e 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -239,8 +239,8 @@ MergeTreeIndexPtr hypothesisIndexCreator(const IndexDescription & index); void hypothesisIndexValidator(const IndexDescription & index, bool attach); #if USE_USEARCH -MergeTreeIndexPtr usearchIndexCreator(const IndexDescription & index); -void usearchIndexValidator(const IndexDescription & index, bool attach); +MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index); +void vectorSimilarityIndexValidator(const IndexDescription & index, bool attach); #endif MergeTreeIndexPtr fullTextIndexCreator(const IndexDescription & index); diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql index de36683ede1..2ef75d0a7fe 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ b/tests/queries/0_stateless/02354_vector_search_bugs.sql @@ -2,21 +2,21 @@ -- Tests various bugs and special cases for vector indexes. -SET allow_experimental_usearch_index = 1; +SET allow_experimental_vector_similarity_index = 1; SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make it future-proof DROP TABLE IF EXISTS tab; SELECT 'Issue #52258: Empty Arrays or Arrays with default values are rejected'; -CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree() ORDER BY id; INSERT INTO tab VALUES (1, []); -- { serverError INCORRECT_DATA } INSERT INTO tab (id) VALUES (1); -- { serverError INCORRECT_DATA } DROP TABLE tab; SELECT 'It is possible to create parts with different Array vector sizes but there will be an error at query time'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; SYSTEM STOP MERGES tab; INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2]); INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); @@ -31,7 +31,7 @@ DROP TABLE tab; SELECT 'Correctness of index with > 1 mark'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192; -- disable adaptive granularity due to bug +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192; -- disable adaptive granularity due to bug INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); WITH [1.0, 0.0] AS reference_vec diff --git a/tests/queries/0_stateless/02354_vector_search_default_granularity.sql b/tests/queries/0_stateless/02354_vector_search_default_granularity.sql index ff659b56033..a19a0d17536 100644 --- a/tests/queries/0_stateless/02354_vector_search_default_granularity.sql +++ b/tests/queries/0_stateless/02354_vector_search_default_granularity.sql @@ -2,17 +2,17 @@ -- Tests that vector search indexes use a (non-standard) index granularity of 100 mio by default. -SET allow_experimental_usearch_index = 1; +SET allow_experimental_vector_similarity_index = 1; -- After CREATE TABLE DROP TABLE IF EXISTS tab; -CREATE TABLE tab (id Int32, vec Array(Float32), INDEX idx(vec) TYPE usearch) ENGINE = MergeTree ORDER BY id; +CREATE TABLE tab (id Int32, vec Array(Float32), INDEX idx(vec) TYPE vector_similarity) ENGINE = MergeTree ORDER BY id; SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; -- After ALTER TABLE DROP TABLE tab; CREATE TABLE tab (id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id; -ALTER TABLE tab ADD INDEX idx(vec) TYPE usearch; +ALTER TABLE tab ADD INDEX idx(vec) TYPE vector_similarity; SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_detach_attach.sql b/tests/queries/0_stateless/02354_vector_search_detach_attach.sql index 92e8efd918b..36241dfabf7 100644 --- a/tests/queries/0_stateless/02354_vector_search_detach_attach.sql +++ b/tests/queries/0_stateless/02354_vector_search_detach_attach.sql @@ -2,10 +2,10 @@ -- Tests that vector similarity indexes can be detached/attached. -SET allow_experimental_usearch_index = 1; +SET allow_experimental_vector_similarity_index = 1; DROP TABLE IF EXISTS tab; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); DETACH TABLE tab SYNC; diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql index 60bd54d1dbe..912f7d7fcae 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql @@ -2,36 +2,36 @@ -- Tests that various conditions are checked during creation of vector search indexes. -SET allow_experimental_usearch_index = 1; +SET allow_experimental_vector_similarity_index = 1; DROP TABLE IF EXISTS tab; SELECT 'At most two index arguments'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch('too', 'many', 'args')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('too', 'many', 'args')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } SELECT '1st argument (distance function) must be String'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } SELECT 'Unsupported distance functions are rejected'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch('invalidDistance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('invalidDistance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } SELECT '2nd argument (scalar kind) must be String'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } SELECT 'Unsupported scalar kinds are rejected'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch('L2Distance', 'invalidKind')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('L2Distance', 'invalidKind')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } SELECT 'Must be created on single column'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } SELECT 'Must be created on Array(Float32) columns'; SET allow_suspicious_low_cardinality_types = 1; -CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } SELECT 'Rejects INSERTs of Arrays with different sizes'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference index 22ad46f802c..7c8e4c0ca59 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.reference +++ b/tests/queries/0_stateless/02354_vector_search_queries.reference @@ -1,9 +1,9 @@ 10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block -- Usearch: ORDER-BY-type +- ORDER-BY-type 5 [0,2] 0 6 [0,2.1] 0.09999990463256836 7 [0,2.2] 0.20000004768371582 -- Usearch: ORDER-BY-type, EXPLAIN +- ORDER-BY-type, EXPLAIN Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -16,15 +16,15 @@ Expression (Projection) Granules: 1/1 Skip Name: idx - Description: usearch GRANULARITY 100000000 + Description: vector_similarity GRANULARITY 100000000 Parts: 1/1 Granules: 1/1 12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block -- Usearch: ORDER-BY-type +- ORDER-BY-type 6 [0,2] 0 7 [0,2.1] 0.09999990463256836 8 [0,2.2] 0.20000004768371582 -- Usearch: ORDER-BY-type, EXPLAIN +- ORDER-BY-type, EXPLAIN Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -37,11 +37,11 @@ Expression (Projection) Granules: 4/4 Skip Name: idx - Description: usearch GRANULARITY 2 + Description: vector_similarity GRANULARITY 2 Parts: 1/1 Granules: 2/4 Special cases -- Usearch: ORDER-BY-type +- ORDER-BY-type 6 [1,9.3] 0.005731362878640178 1 [2,3.2] 0.15200169244542905 7 [5.5,4.7] 0.3503476876550442 diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index 555f47b364f..50537ad6244 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -2,7 +2,7 @@ -- Tests various simple approximate nearest neighborhood (ANN) queries that utilize vector search indexes. -SET allow_experimental_usearch_index = 1; +SET allow_experimental_vector_similarity_index = 1; SET enable_analyzer = 0; @@ -10,18 +10,18 @@ SELECT '10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule DROP TABLE IF EXISTS tab; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); -SELECT '- Usearch: ORDER-BY-type'; +SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- Usearch: ORDER-BY-type, EXPLAIN'; +SELECT '- ORDER-BY-type, EXPLAIN'; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) @@ -34,17 +34,17 @@ DROP TABLE tab; SELECT '12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch() GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity() GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); -SELECT '- Usearch: ORDER-BY-type'; +SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- Usearch: ORDER-BY-type, EXPLAIN'; +SELECT '- ORDER-BY-type, EXPLAIN'; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) @@ -58,10 +58,10 @@ DROP TABLE tab; SELECT 'Special cases'; -- Not a systematic test, just to check that no bad things happen. -- Just for jun, use metric = 'cosineDistance', scalarKind = 'f64' -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch('cosineDistance', 'f64') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('cosineDistance', 'f64') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); -SELECT '- Usearch: ORDER-BY-type'; +SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, cosineDistance(vec, reference_vec) FROM tab From cc5c64e1ede7284d91ada1f28edbb18a457f5894 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 10 Jun 2024 19:48:51 +0000 Subject: [PATCH 2115/2361] Add migration helper for legacy 'annoy' and 'usearch' indexes types Index types 'annoy' and 'usearch' were removed and replaced by 'vector_similarity' indexes in an earlier commit. This means unfortuantely, that if customers have tables with these indexes and upgrade, their database might not start anymore - the system loads the metadata at startup, thinks something is wrong with such tables, and halts immediately. This commit adds support for loading and attaching such indexes back. Data insert or use (search) return an error which recommends a migration to 'vector_similarity' indexes. The implementation is generally similar to what has recently been implemented for 'full_text' indexes [1, 2]. [1] https://github.com/ClickHouse/ClickHouse/pull/64656 [2] https://github.com/ClickHouse/ClickHouse/pull/64846 --- .../QueryPlan/ReadFromMergeTree.cpp | 3 ++ .../MergeTreeIndexLegacyVectorSimilarity.cpp | 45 +++++++++++++++++++ .../MergeTreeIndexLegacyVectorSimilarity.h | 26 +++++++++++ src/Storages/MergeTree/MergeTreeIndices.cpp | 10 +++++ src/Storages/MergeTree/MergeTreeIndices.h | 3 ++ ...earch_legacy_index_compatibility.reference | 2 + ...ctor_search_legacy_index_compatibility.sql | 43 ++++++++++++++++++ 7 files changed, 132 insertions(+) create mode 100644 src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.cpp create mode 100644 src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h create mode 100644 tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.sql diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 1f30725b4d0..348019d7d10 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -1481,6 +1482,8 @@ static void buildIndexes( if (const auto * vector_similarity_index = typeid_cast(index_helper.get())) condition = vector_similarity_index->createIndexCondition(query_info, context); #endif + if (const auto * legacy_vector_similarity_index = typeid_cast(index_helper.get())) + condition = legacy_vector_similarity_index->createIndexCondition(query_info, context); if (!condition) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown vector search index {}", index_helper->index.name); } diff --git a/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.cpp new file mode 100644 index 00000000000..29de109d4fc --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.cpp @@ -0,0 +1,45 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_INDEX; +} + +MergeTreeIndexLegacyVectorSimilarity::MergeTreeIndexLegacyVectorSimilarity(const IndexDescription & index_) + : IMergeTreeIndex(index_) +{ +} + +MergeTreeIndexGranulePtr MergeTreeIndexLegacyVectorSimilarity::createIndexGranule() const +{ + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indexes of type 'annoy' or 'usearch' are no longer supported. Please drop and recreate the index as type 'vector_similarity'"); +} + +MergeTreeIndexAggregatorPtr MergeTreeIndexLegacyVectorSimilarity::createIndexAggregator(const MergeTreeWriterSettings &) const +{ + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indexes of type 'annoy' or 'usearch' are no longer supported. Please drop and recreate the index as type 'vector_similarity'"); +} + +MergeTreeIndexConditionPtr MergeTreeIndexLegacyVectorSimilarity::createIndexCondition(const SelectQueryInfo &, ContextPtr) const +{ + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indexes of type 'annoy' or 'usearch' are no longer supported. Please drop and recreate the index as type 'vector_similarity'"); +}; + +MergeTreeIndexConditionPtr MergeTreeIndexLegacyVectorSimilarity::createIndexCondition(const ActionsDAG *, ContextPtr) const +{ + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indexes of type 'annoy' or 'usearch' are no longer supported. Please drop and recreate the index as type 'vector_similarity'"); +} + +MergeTreeIndexPtr legacyVectorSimilarityIndexCreator(const IndexDescription & index) +{ + return std::make_shared(index); +} + +void legacyVectorSimilarityIndexValidator(const IndexDescription &, bool) +{ +} + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h new file mode 100644 index 00000000000..1015401823d --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h @@ -0,0 +1,26 @@ +#pragma once + +#include + +/// Walking corpse implementation for removed skipping index of type "annoy" and "usearch". +/// Its only purpose is to allow loading old tables with indexes of these types. +/// Data insertion and index usage/search will throw an exception, suggesting to migrate to "vector_similarity" indexes. + +namespace DB +{ + +class MergeTreeIndexLegacyVectorSimilarity : public IMergeTreeIndex +{ +public: + explicit MergeTreeIndexLegacyVectorSimilarity(const IndexDescription & index_); + ~MergeTreeIndexLegacyVectorSimilarity() override = default; + + MergeTreeIndexGranulePtr createIndexGranule() const override; + MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings &) const override; + MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo &, ContextPtr) const; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; + + bool isVectorSimilarityIndex() const override { return true; } +}; + +} diff --git a/src/Storages/MergeTree/MergeTreeIndices.cpp b/src/Storages/MergeTree/MergeTreeIndices.cpp index 89aed7873a4..d2fc0e84b56 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.cpp +++ b/src/Storages/MergeTree/MergeTreeIndices.cpp @@ -132,6 +132,16 @@ MergeTreeIndexFactory::MergeTreeIndexFactory() registerCreator("vector_similarity", vectorSimilarityIndexCreator); registerValidator("vector_similarity", vectorSimilarityIndexValidator); #endif + /// ------ + /// TODO: remove this block at the end of 2024. + /// Index types 'annoy' and 'usearch' are no longer supported as of June 2024. Their successor is index type 'vector_similarity'. + /// To support loading tables with old indexes during a transition period, register dummy indexes which allow load/attaching but + /// throw an exception when the user attempts to use them. + registerCreator("annoy", legacyVectorSimilarityIndexCreator); + registerValidator("annoy", legacyVectorSimilarityIndexValidator); + registerCreator("usearch", legacyVectorSimilarityIndexCreator); + registerValidator("usearch", legacyVectorSimilarityIndexValidator); + /// ------ registerCreator("inverted", fullTextIndexCreator); registerValidator("inverted", fullTextIndexValidator); diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index 48ef2a4739e..c52d7ffe131 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -243,6 +243,9 @@ MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index); void vectorSimilarityIndexValidator(const IndexDescription & index, bool attach); #endif +MergeTreeIndexPtr legacyVectorSimilarityIndexCreator(const IndexDescription & index); +void legacyVectorSimilarityIndexValidator(const IndexDescription & index, bool attach); + MergeTreeIndexPtr fullTextIndexCreator(const IndexDescription & index); void fullTextIndexValidator(const IndexDescription & index, bool attach); diff --git a/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.reference b/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.reference new file mode 100644 index 00000000000..030bfa9b1bd --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.reference @@ -0,0 +1,2 @@ +Annoy +Usearch diff --git a/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.sql b/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.sql new file mode 100644 index 00000000000..0889aa74f7a --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.sql @@ -0,0 +1,43 @@ +-- Indexes of type 'annoy' or 'usearch' are no longer supported. +-- Test what happens when ClickHouse encounters tables with the old index type. + +DROP TABLE IF EXISTS tab; + +SELECT 'Annoy'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX vec_idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; + +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); -- { serverError ILLEGAL_INDEX } + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; +-- (*) The search succeeds because the index contains no data (i.e. some shortcut) +-- If it had data (can't really test in SQL tests ...), this statement would also return an error, trust me. + +-- Detach and attach should work. +DETACH TABLE tab; +ATTACH TABLE tab; + +DROP TABLE tab; + +SELECT 'Usearch'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX vec_idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; + +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); -- { serverError ILLEGAL_INDEX } + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; +-- see above: (*) + +-- Detach and attach should work. +DETACH TABLE tab; +ATTACH TABLE tab; + +DROP TABLE tab; From d2e79f0b92936eb3ec3f6409fe6db18a3091919d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 9 Aug 2024 15:28:38 +0000 Subject: [PATCH 2116/2361] Rework vector index parameters USearch (similar to FAISS) allows to specify the distance function, quantization, and various HNSW meta-parameters for index creation and sarch. Some users wished for greater configurability, so let's expose them. Index creation now requires either - 2 parameters (with the other 4 parameters taking on default values), or - 6 parameters for full control This commit also remove quantization `f64` (that would be upsampling). --- .../mergetree-family/annindexes.md | 12 +- .../MergeTreeIndexVectorSimilarity.cpp | 166 ++++++++++++------ .../MergeTreeIndexVectorSimilarity.h | 23 ++- .../0_stateless/02354_vector_search_bugs.sql | 6 +- ...2354_vector_search_default_granularity.sql | 4 +- .../02354_vector_search_detach_attach.sql | 2 +- ...r_search_index_creation_negative.reference | 12 +- ..._vector_search_index_creation_negative.sql | 48 +++-- ...4_vector_search_multiple_indexes.reference | 0 .../02354_vector_search_multiple_indexes.sql | 14 ++ .../02354_vector_search_queries.sql | 8 +- 11 files changed, 203 insertions(+), 92 deletions(-) create mode 100644 tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 63c061a0d46..354fac6ea74 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -43,12 +43,22 @@ CREATE TABLE table ( id Int64, vectors Array(Float32), - INDEX [index_name vectors TYPE vector_similarity([Distance[, ScalarKind]]) [GRANULARITY [N]] + INDEX index_name vec TYPE vector_similarity(method, distance_function[, quantization, connectivity, expansion_add, expansion_search]) [GRANULARITY N] ) ENGINE = MergeTree ORDER BY id; ``` +Parameters: +- `method`: Supports currently only `hnsw`. +- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a + line between two points in Euclidean space), or `cosineDistance` (the [cosine + distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors). +- `quantization`: either `f32`, `f16`, or `i8` for storing the vector with reduced precision (optional, default: `f32`) +- `m`: the number of neighbors per graph node (optional, default: 16) +- `ef_construction`: (optional, default: 128) +- `ef_search`: (optional, default: 64) + Vector similarity indexes are based on the [USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW algorithm](https://arxiv.org/abs/1603.09320), i.e., a hierarchical graph where each point represents a vector and the edges represent similarity. Such hierarchical structures can be very efficient on large collections. They may often fetch 0.05% or less data from the diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 6f3b1b043cd..5b0793fa0c8 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -45,6 +45,9 @@ namespace ErrorCodes namespace { +/// The only indexing method currently supported by USearch +std::set methods = {"hnsw"}; + /// Maps from user-facing name to internal name std::unordered_map distanceFunctionToMetricKind = { {"L2Distance", unum::usearch::metric_kind_t::l2sq_k}, @@ -52,22 +55,37 @@ std::unordered_map distanceFunctionToMetri /// Maps from user-facing name to internal name std::unordered_map quantizationToScalarKind = { - {"f64", unum::usearch::scalar_kind_t::f64_k}, {"f32", unum::usearch::scalar_kind_t::f32_k}, {"f16", unum::usearch::scalar_kind_t::f16_k}, {"i8", unum::usearch::scalar_kind_t::i8_k}}; +template +concept is_set = std::same_as>; + +template +concept is_unordered_map = std::same_as>; + template -String keysAsString(const T & t) +String joinByComma(const T & t) { - String result; - for (const auto & [k, _] : t) + if constexpr (is_set) { - if (!result.empty()) - result += ", "; - result += k; + return fmt::format("{}", fmt::join(t, ", ")); } - return result; + else if constexpr (is_unordered_map) + { + String joined_keys; + for (const auto & [k, _] : t) + { + if (!joined_keys.empty()) + joined_keys += ", "; + joined_keys += k; + } + return joined_keys; + } + /// TODO once our libcxx is recent enough, replace above by + /// return fmt::format("{}", fmt::join(std::views::keys(t)), ", ")); + std::unreachable(); } } @@ -75,8 +93,10 @@ String keysAsString(const T & t) USearchIndexWithSerialization::USearchIndexWithSerialization( size_t dimensions, unum::usearch::metric_kind_t metric_kind, - unum::usearch::scalar_kind_t scalar_kind) - : Base(Base::make(unum::usearch::metric_punned_t(dimensions, metric_kind, scalar_kind))) + unum::usearch::scalar_kind_t scalar_kind, + UsearchHnswParams usearch_hnsw_params) + : Base(Base::make(unum::usearch::metric_punned_t(dimensions, metric_kind, scalar_kind), + unum::usearch::index_dense_config_t(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search))) { } @@ -125,8 +145,9 @@ MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, - unum::usearch::scalar_kind_t scalar_kind_) - : MergeTreeIndexGranuleVectorSimilarity(index_name_, index_sample_block_, metric_kind_, scalar_kind_, nullptr) + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_) + : MergeTreeIndexGranuleVectorSimilarity(index_name_, index_sample_block_, metric_kind_, scalar_kind_, usearch_hnsw_params_, nullptr) { } @@ -135,11 +156,13 @@ MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_, USearchIndexWithSerializationPtr index_) : index_name(index_name_) , index_sample_block(index_sample_block_) , metric_kind(metric_kind_) , scalar_kind(scalar_kind_) + , usearch_hnsw_params(usearch_hnsw_params_) , index(std::move(index_)) { } @@ -153,8 +176,8 @@ void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr) /// Number of dimensions is required in the index constructor, /// so it must be written and read separately from the other part - writeIntBinary(static_cast(index->dimensions()), ostr); // write dimension - // + writeIntBinary(static_cast(index->dimensions()), ostr); + index->serialize(ostr); auto statistics = index->getStatistics(); @@ -176,7 +199,7 @@ void MergeTreeIndexGranuleVectorSimilarity::deserializeBinary(ReadBuffer & istr, UInt64 dimension; readIntBinary(dimension, istr); - index = std::make_shared(dimension, metric_kind, scalar_kind); + index = std::make_shared(dimension, metric_kind, scalar_kind, usearch_hnsw_params); index->deserialize(istr); @@ -189,17 +212,19 @@ MergeTreeIndexAggregatorVectorSimilarity::MergeTreeIndexAggregatorVectorSimilari const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, - unum::usearch::scalar_kind_t scalar_kind_) + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_) : index_name(index_name_) , index_sample_block(index_sample_block_) , metric_kind(metric_kind_) , scalar_kind(scalar_kind_) + , usearch_hnsw_params(usearch_hnsw_params_) { } MergeTreeIndexGranulePtr MergeTreeIndexAggregatorVectorSimilarity::getGranuleAndReset() { - auto granule = std::make_shared(index_name, index_sample_block, metric_kind, scalar_kind, index); + auto granule = std::make_shared(index_name, index_sample_block, metric_kind, scalar_kind, usearch_hnsw_params, index); index = nullptr; return granule; } @@ -258,15 +283,15 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); if (!index) - index = std::make_shared(dimensions, metric_kind, scalar_kind); + index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); /// Reserving space is mandatory if (!index->reserve(roundUpToPowerOfTwoOrZero(index->size() + num_rows))) throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); - for (size_t current_row = 0; current_row < num_rows; ++current_row) + for (size_t row = 0; row < num_rows; ++row) { - auto rc = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[current_row - 1]]); + auto rc = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); if (!rc) throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index, error: " + String(rc.error.release())); @@ -313,8 +338,6 @@ std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(Mer const UInt64 limit = vector_similarity_condition.getLimit(); const UInt64 index_granularity = vector_similarity_condition.getIndexGranularity(); - const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); - const auto granule = std::dynamic_pointer_cast(granule_); if (granule == nullptr) throw Exception(ErrorCodes::LOGICAL_ERROR, "Granule has the wrong type"); @@ -326,6 +349,8 @@ std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(Mer "does not match the dimension in the index ({})", vector_similarity_condition.getDimensions(), index->dimensions()); + const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); + auto result = index->search(reference_vector.data(), limit); if (result.error) throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not search in vector similarity index, error: " + String(result.error.release())); @@ -350,21 +375,26 @@ std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(Mer return granules; } -MergeTreeIndexVectorSimilarity::MergeTreeIndexVectorSimilarity(const IndexDescription & index_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_) +MergeTreeIndexVectorSimilarity::MergeTreeIndexVectorSimilarity( + const IndexDescription & index_, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_) : IMergeTreeIndex(index_) , metric_kind(metric_kind_) , scalar_kind(scalar_kind_) + , usearch_hnsw_params(usearch_hnsw_params_) { } MergeTreeIndexGranulePtr MergeTreeIndexVectorSimilarity::createIndexGranule() const { - return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind); + return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind, usearch_hnsw_params); } MergeTreeIndexAggregatorPtr MergeTreeIndexVectorSimilarity::createIndexAggregator(const MergeTreeWriterSettings & /*settings*/) const { - return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind); + return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind, usearch_hnsw_params); } MergeTreeIndexConditionPtr MergeTreeIndexVectorSimilarity::createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const @@ -379,56 +409,82 @@ MergeTreeIndexConditionPtr MergeTreeIndexVectorSimilarity::createIndexCondition( MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index) { - static constexpr auto default_metric_kind = unum::usearch::metric_kind_t::l2sq_k; - auto metric_kind = default_metric_kind; - if (!index.arguments.empty()) - metric_kind = distanceFunctionToMetricKind.at(index.arguments[0].safeGet()); + const bool has_six_args = (index.arguments.size() == 6); - static constexpr auto default_scalar_kind = unum::usearch::scalar_kind_t::f16_k; - auto scalar_kind = default_scalar_kind; - if (index.arguments.size() > 1) - scalar_kind = quantizationToScalarKind.at(index.arguments[1].safeGet()); + unum::usearch::metric_kind_t metric_kind = distanceFunctionToMetricKind.at(index.arguments[1].safeGet()); - return std::make_shared(index, metric_kind, scalar_kind); + /// use defaults for the other parameters + unum::usearch::scalar_kind_t scalar_kind = unum::usearch::scalar_kind_t::f32_k; + UsearchHnswParams usearch_hnsw_params; + + if (has_six_args) + { + scalar_kind = quantizationToScalarKind.at(index.arguments[2].safeGet()); + usearch_hnsw_params = {.m = index.arguments[3].safeGet(), + .ef_construction = index.arguments[4].safeGet(), + .ef_search = index.arguments[5].safeGet()}; + } + + return std::make_shared(index, metric_kind, scalar_kind, usearch_hnsw_params); } void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* attach */) { - /// Check number and type of index arguments: + const bool has_two_args = (index.arguments.size() == 2); + const bool has_six_args = (index.arguments.size() == 6); - if (index.arguments.size() > 2) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index must not have more than one parameters"); + /// Check number and type of arguments + if (!has_two_args && !has_six_args) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index must have two or six arguments"); + if (index.arguments[0].getType() != Field::Types::String) + throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of vector similarity index (method) must be of type String"); + if (index.arguments[1].getType() != Field::Types::String) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of vector similarity index (metric) must be of type String"); + if (has_six_args) + { + if (index.arguments[2].getType() != Field::Types::String) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Third argument of vector similarity index (quantization) must be of type String"); + if (index.arguments[3].getType() != Field::Types::UInt64) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Fourth argument of vector similarity index (M) must be of type UInt64"); + if (index.arguments[4].getType() != Field::Types::UInt64) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Fifth argument of vector similarity index (ef_construction) must be of type UInt64"); + if (index.arguments[5].getType() != Field::Types::UInt64) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Sixth argument of vector similarity index (ef_search) must be of type UInt64"); + } - if (!index.arguments.empty() && index.arguments[0].getType() != Field::Types::String) - throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of vector similarity index (distance function) must be of type String"); - if (index.arguments.size() > 1 && index.arguments[1].getType() != Field::Types::String) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of vector similarity index (scalar type) must be of type String"); + /// Check that passed arguments are supported + if (!methods.contains(index.arguments[0].safeGet())) + throw Exception(ErrorCodes::INCORRECT_DATA, "First argument (method) of vector similarity index is not supported. Supported methods are: {}", joinByComma(methods)); + if (!distanceFunctionToMetricKind.contains(index.arguments[1].safeGet())) + throw Exception(ErrorCodes::INCORRECT_DATA, "Second argument (distance function) of vector similarity index is not supported. Supported distance function are: {}", joinByComma(distanceFunctionToMetricKind)); + if (has_six_args) + { + if (!quantizationToScalarKind.contains(index.arguments[2].safeGet())) + throw Exception(ErrorCodes::INCORRECT_DATA, "Third argument (quantization) of vector similarity index is not supported. Supported quantizations are: {}", joinByComma(quantizationToScalarKind)); + if (index.arguments[3].safeGet() < 2) + throw Exception(ErrorCodes::INCORRECT_DATA, "Fourth argument (M) of vector similarity index must be > 1"); + if (index.arguments[4].safeGet() < 1) + throw Exception(ErrorCodes::INCORRECT_DATA, "Fifth argument (ef_construction) of vector similarity index must be > 0"); + if (index.arguments[5].safeGet() < 1) + throw Exception(ErrorCodes::INCORRECT_DATA, "Sixth argument (ef_search) of vector similarity index must be > 0"); + } /// Check that the index is created on a single column - if (index.column_names.size() != 1 || index.data_types.size() != 1) throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Vector similarity indexes must be created on a single column"); - /// Check that a supported metric was passed as first argument - - if (!index.arguments.empty() && !distanceFunctionToMetricKind.contains(index.arguments[0].safeGet())) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unrecognized metric kind (first argument) for vector index. Supported kinds are: {}", keysAsString(distanceFunctionToMetricKind)); - - /// Check that a supported kind was passed as a second argument - - if (index.arguments.size() > 1 && !quantizationToScalarKind.contains(index.arguments[1].safeGet())) - throw Exception(ErrorCodes::INCORRECT_DATA, "Unrecognized scalar kind (second argument) for vector index. Supported kinds are: {}", keysAsString(quantizationToScalarKind)); - - /// Check data type of indexed column: + /// Check data type of the indexed column: DataTypePtr data_type = index.sample_block.getDataTypes()[0]; if (const auto * data_type_array = typeid_cast(data_type.get())) { TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); if (!WhichDataType(nested_type_index).isFloat32()) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity index can only be created on columns of type Array(Float32)"); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); } else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity index can only be created on columns of type Array(Float32)"); + { + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); + } } } diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h index 95ea3cd5240..f7098c1626c 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h @@ -14,6 +14,13 @@ namespace DB { +struct UsearchHnswParams +{ + size_t m = unum::usearch::default_connectivity(); + size_t ef_construction = unum::usearch::default_expansion_add(); + size_t ef_search = unum::usearch::default_expansion_search(); +}; + using USearchIndex = unum::usearch::index_dense_gt; class USearchIndexWithSerialization : public USearchIndex @@ -24,7 +31,8 @@ public: USearchIndexWithSerialization( size_t dimensions, unum::usearch::metric_kind_t metric_kind, - unum::usearch::scalar_kind_t scalar_kind); + unum::usearch::scalar_kind_t scalar_kind, + UsearchHnswParams usearch_hnsw_params); void serialize(WriteBuffer & ostr) const; void deserialize(ReadBuffer & istr); @@ -54,13 +62,15 @@ struct MergeTreeIndexGranuleVectorSimilarity final : public IMergeTreeIndexGranu const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, - unum::usearch::scalar_kind_t scalar_kind_); + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_); MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_, USearchIndexWithSerializationPtr index_); ~MergeTreeIndexGranuleVectorSimilarity() override = default; @@ -74,6 +84,7 @@ struct MergeTreeIndexGranuleVectorSimilarity final : public IMergeTreeIndexGranu const Block index_sample_block; const unum::usearch::metric_kind_t metric_kind; const unum::usearch::scalar_kind_t scalar_kind; + const UsearchHnswParams usearch_hnsw_params; USearchIndexWithSerializationPtr index; LoggerPtr logger = getLogger("VectorSimilarityIndex"); @@ -93,7 +104,8 @@ struct MergeTreeIndexAggregatorVectorSimilarity final : IMergeTreeIndexAggregato const String & index_name_, const Block & index_sample_block, unum::usearch::metric_kind_t metric_kind_, - unum::usearch::scalar_kind_t scalar_kind_); + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_); ~MergeTreeIndexAggregatorVectorSimilarity() override = default; @@ -105,6 +117,7 @@ struct MergeTreeIndexAggregatorVectorSimilarity final : IMergeTreeIndexAggregato const Block index_sample_block; const unum::usearch::metric_kind_t metric_kind; const unum::usearch::scalar_kind_t scalar_kind; + const UsearchHnswParams usearch_hnsw_params; USearchIndexWithSerializationPtr index; }; @@ -136,7 +149,8 @@ public: MergeTreeIndexVectorSimilarity( const IndexDescription & index_, unum::usearch::metric_kind_t metric_kind_, - unum::usearch::scalar_kind_t scalar_kind_); + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_); ~MergeTreeIndexVectorSimilarity() override = default; @@ -149,6 +163,7 @@ public: private: const unum::usearch::metric_kind_t metric_kind; const unum::usearch::scalar_kind_t scalar_kind; + const UsearchHnswParams usearch_hnsw_params; }; } diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql index 2ef75d0a7fe..7c66b4b8e45 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ b/tests/queries/0_stateless/02354_vector_search_bugs.sql @@ -9,14 +9,14 @@ DROP TABLE IF EXISTS tab; SELECT 'Issue #52258: Empty Arrays or Arrays with default values are rejected'; -CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree() ORDER BY id; INSERT INTO tab VALUES (1, []); -- { serverError INCORRECT_DATA } INSERT INTO tab (id) VALUES (1); -- { serverError INCORRECT_DATA } DROP TABLE tab; SELECT 'It is possible to create parts with different Array vector sizes but there will be an error at query time'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; SYSTEM STOP MERGES tab; INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2]); INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); @@ -31,7 +31,7 @@ DROP TABLE tab; SELECT 'Correctness of index with > 1 mark'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192; -- disable adaptive granularity due to bug +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192; -- disable adaptive granularity due to bug INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); WITH [1.0, 0.0] AS reference_vec diff --git a/tests/queries/0_stateless/02354_vector_search_default_granularity.sql b/tests/queries/0_stateless/02354_vector_search_default_granularity.sql index a19a0d17536..acb69cb6ff8 100644 --- a/tests/queries/0_stateless/02354_vector_search_default_granularity.sql +++ b/tests/queries/0_stateless/02354_vector_search_default_granularity.sql @@ -6,13 +6,13 @@ SET allow_experimental_vector_similarity_index = 1; -- After CREATE TABLE DROP TABLE IF EXISTS tab; -CREATE TABLE tab (id Int32, vec Array(Float32), INDEX idx(vec) TYPE vector_similarity) ENGINE = MergeTree ORDER BY id; +CREATE TABLE tab (id Int32, vec Array(Float32), INDEX idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; -- After ALTER TABLE DROP TABLE tab; CREATE TABLE tab (id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id; -ALTER TABLE tab ADD INDEX idx(vec) TYPE vector_similarity; +ALTER TABLE tab ADD INDEX idx(vec) TYPE vector_similarity('hnsw', 'L2Distance'); SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_detach_attach.sql b/tests/queries/0_stateless/02354_vector_search_detach_attach.sql index 36241dfabf7..f92eaddbbed 100644 --- a/tests/queries/0_stateless/02354_vector_search_detach_attach.sql +++ b/tests/queries/0_stateless/02354_vector_search_detach_attach.sql @@ -5,7 +5,7 @@ SET allow_experimental_vector_similarity_index = 1; DROP TABLE IF EXISTS tab; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); DETACH TABLE tab SYNC; diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference index bee3236f436..b6d034208d0 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference @@ -1,8 +1,10 @@ -At most two index arguments -1st argument (distance function) must be String -Unsupported distance functions are rejected -2nd argument (scalar kind) must be String -Unsupported scalar kinds are rejected +Two or six index arguments +1st argument (method) must be String and hnsw +2nd argument (distance function) must be String and L2Distance or cosineDistance +3nd argument (quantization), if given, must be String and f32, f16, ... +4nd argument (M), if given, must be UInt64 and > 1 +5nd argument (ef_construction), if given, must be UInt64 and > 0 +6nd argument (ef_search), if given, must be UInt64 and > 0 Must be created on single column Must be created on Array(Float32) columns Rejects INSERTs of Arrays with different sizes diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql index 912f7d7fcae..7c2ddfe81fc 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql @@ -6,32 +6,46 @@ SET allow_experimental_vector_similarity_index = 1; DROP TABLE IF EXISTS tab; -SELECT 'At most two index arguments'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('too', 'many', 'args')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +SELECT 'Two or six index arguments'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant_have_one_arg')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant', 'have', 'three_args')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant', 'have', 'more', 'than', 'six', 'args', '!')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -SELECT '1st argument (distance function) must be String'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +SELECT '1st argument (method) must be String and hnsw'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity(3, 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('not_hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -SELECT 'Unsupported distance functions are rejected'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('invalidDistance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +SELECT '2nd argument (distance function) must be String and L2Distance or cosineDistance'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'invalid_distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -SELECT '2nd argument (scalar kind) must be String'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } - -SELECT 'Unsupported scalar kinds are rejected'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('L2Distance', 'invalidKind')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +SELECT '3nd argument (quantization), if given, must be String and f32, f16, ...'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1, 1, 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'invalid', 2, 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +SELECT '4nd argument (M), if given, must be UInt64 and > 1'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 'invalid', 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 1, 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +SELECT '5nd argument (ef_construction), if given, must be UInt64 and > 0'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 'invalid', 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 0, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +SELECT '6nd argument (ef_search), if given, must be UInt64 and > 0'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 1, 'invalid')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 1, 0)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } SELECT 'Must be created on single column'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } SELECT 'Must be created on Array(Float32) columns'; SET allow_suspicious_low_cardinality_types = 1; -CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec UInt64, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } SELECT 'Rejects INSERTs of Arrays with different sizes'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql new file mode 100644 index 00000000000..f1cfc041233 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that multiple vector similarity indexes can be created on the same column (even if that makes no sense) + +SET allow_experimental_vector_similarity_index = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')); + +ALTER TABLE tab ADD INDEX idx(vec) TYPE minmax; +ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); +ALTER TABLE tab ADD INDEX vec_idx2(vec) TYPE vector_similarity('hnsw', 'L2Distance'); -- silly but creating the same index also works for non-vector indexes ... + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index 50537ad6244..dbf0fca32ab 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -10,7 +10,7 @@ SELECT '10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule DROP TABLE IF EXISTS tab; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); @@ -34,7 +34,7 @@ DROP TABLE tab; SELECT '12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity() GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); SELECT '- ORDER-BY-type'; @@ -56,9 +56,9 @@ DROP TABLE tab; SELECT 'Special cases'; -- Not a systematic test, just to check that no bad things happen. --- Just for jun, use metric = 'cosineDistance', scalarKind = 'f64' +-- Test with non-default metric, M, ef_construction, ef_search -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('cosineDistance', 'f64') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f32', 42, 99, 66) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); SELECT '- ORDER-BY-type'; From fb76cb90b1badef334b96b61d976136fd38d535d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 11 Aug 2024 09:31:36 +0000 Subject: [PATCH 2117/2361] Allow un-quoted skip index parameters Previously, only this syntax to create a skip index worked: INDEX index_name column_name TYPE vector_similarity('hnsw', 'L2Distance') Now, this syntax will work as well: INDEX index_name column_name TYPE vector_similarity(hnsw, L2Distance) --- .../mergetree-family/annindexes.md | 15 +++++++++++- src/Storages/IndicesDescription.cpp | 12 +++++++--- ...search_unquoted_index_parameters.reference | 0 ...ector_search_unquoted_index_parameters.sql | 23 +++++++++++++++++++ 4 files changed, 46 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.sql diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 354fac6ea74..e73d6f07a32 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -43,7 +43,7 @@ CREATE TABLE table ( id Int64, vectors Array(Float32), - INDEX index_name vec TYPE vector_similarity(method, distance_function[, quantization, connectivity, expansion_add, expansion_search]) [GRANULARITY N] + INDEX index_name vectors TYPE vector_similarity(method, distance_function[, quantization, connectivity, expansion_add, expansion_search]) [GRANULARITY N] ) ENGINE = MergeTree ORDER BY id; @@ -59,6 +59,19 @@ Parameters: - `ef_construction`: (optional, default: 128) - `ef_search`: (optional, default: 64) +Example: + +```sql +CREATE TABLE table +( + id Int64, + vectors Array(Float32), + INDEX idx vectors TYPE vector_similarity('hnsw', 'L2Distance') -- Alternative syntax: TYPE vector_similarity(hnsw, L2Distance) +) +ENGINE = MergeTree +ORDER BY id; +``` + Vector similarity indexes are based on the [USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW algorithm](https://arxiv.org/abs/1603.09320), i.e., a hierarchical graph where each point represents a vector and the edges represent similarity. Such hierarchical structures can be very efficient on large collections. They may often fetch 0.05% or less data from the diff --git a/src/Storages/IndicesDescription.cpp b/src/Storages/IndicesDescription.cpp index cef8fd85f97..753fbf1d635 100644 --- a/src/Storages/IndicesDescription.cpp +++ b/src/Storages/IndicesDescription.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -130,10 +131,15 @@ IndexDescription IndexDescription::getIndexFromAST(const ASTPtr & definition_ast { for (size_t i = 0; i < index_type->arguments->children.size(); ++i) { - const auto * argument = index_type->arguments->children[i]->as(); - if (!argument) + const auto & child = index_type->arguments->children[i]; + if (const auto * ast_literal = child->as(); ast_literal != nullptr) + /// E.g. INDEX index_name column_name TYPE vector_similarity('hnsw', 'f32') + result.arguments.emplace_back(ast_literal->value); + else if (const auto * ast_identifier = child->as(); ast_identifier != nullptr) + /// E.g. INDEX index_name column_name TYPE vector_similarity(hnsw, f32) + result.arguments.emplace_back(ast_identifier->name()); + else throw Exception(ErrorCodes::INCORRECT_QUERY, "Only literals can be skip index arguments"); - result.arguments.emplace_back(argument->value); } } diff --git a/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.reference b/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.sql b/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.sql new file mode 100644 index 00000000000..da6494bf831 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; + +-- Tests that quoted and unquoted parameters can be passed to vector search indexes. + +DROP TABLE IF EXISTS tab1; +DROP TABLE IF EXISTS tab2; + +CREATE TABLE tab1 (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')); +CREATE TABLE tab2 (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity(hnsw, L2Distance)); + +DROP TABLE tab1; +DROP TABLE tab2; + +CREATE TABLE tab1 (id Int32, vec Array(Float32), PRIMARY KEY id); +CREATE TABLE tab2 (id Int32, vec Array(Float32), PRIMARY KEY id); + +ALTER TABLE tab1 ADD INDEX idx1(vec) TYPE vector_similarity('hnsw', 'L2Distance'); +ALTER TABLE tab2 ADD INDEX idx2(vec) TYPE vector_similarity(hnsw, L2Distance); + +DROP TABLE tab1; +DROP TABLE tab2; From ea1cd665750f82bbeaf66f67b1d85e014afdec18 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:32:43 +0200 Subject: [PATCH 2118/2361] fix tidy --- src/Storages/VirtualColumnUtils.cpp | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 1abac56d266..3143c7f78f6 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include @@ -46,15 +45,7 @@ #include #include #include -#include "Functions/FunctionsLogical.h" -#include "Functions/IFunction.h" -#include "Functions/IFunctionAdaptors.h" -#include "Functions/indexHint.h" #include -#include -#include -#include -#include namespace DB From 44d4784da559cc3b73e71acdee95c39cc3403c42 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 15:52:39 +0000 Subject: [PATCH 2119/2361] Use std::string_view for lookups in hash table --- src/Columns/ColumnObject.cpp | 43 +++++++++---------- src/Columns/ColumnObject.h | 29 +++++++------ src/Common/StringHashForHeterogeneousLookup.h | 25 +++++++++++ src/Functions/JSONPaths.cpp | 28 ++++++------ 4 files changed, 76 insertions(+), 49 deletions(-) create mode 100644 src/Common/StringHashForHeterogeneousLookup.h diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 1f16c12f6ba..4ab4a4e441a 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -243,7 +244,7 @@ void ColumnObject::insertData(const char *, size_t) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertData is not supported for {}", getName()); } -ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(const String & path) +ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(const std::string_view path) { if (dynamic_paths.size() == max_dynamic_paths) return nullptr; @@ -435,7 +436,7 @@ void ColumnObject::doInsertFrom(const IColumn & src, size_t n) /// Second, insert dynamic paths and extend them if needed. /// We can reach the limit of dynamic paths, and in this case /// the rest of dynamic paths will be inserted into shared data. - std::vector src_dynamic_paths_for_shared_data; + std::vector src_dynamic_paths_for_shared_data; for (const auto & [path, column] : src_object_column.dynamic_paths) { /// Check if we already have such dynamic path. @@ -469,7 +470,7 @@ void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t l /// Second, insert dynamic paths and extend them if needed. /// We can reach the limit of dynamic paths, and in this case /// the rest of dynamic paths will be inserted into shared data. - std::vector src_dynamic_paths_for_shared_data; + std::vector src_dynamic_paths_for_shared_data; for (const auto & [path, column] : src_object_column.dynamic_paths) { /// Check if we already have such dynamic path. @@ -487,7 +488,7 @@ void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t l insertFromSharedDataAndFillRemainingDynamicPaths(src_object_column, std::move(src_dynamic_paths_for_shared_data), start, length); } -void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::ColumnObject & src_object_column, std::vector && src_dynamic_paths_for_shared_data, size_t start, size_t length) +void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::ColumnObject & src_object_column, std::vector && src_dynamic_paths_for_shared_data, size_t start, size_t length) { /// Paths in shared data are sorted, so paths from src_dynamic_paths_for_shared_data should be inserted properly /// to keep paths sorted. Let's sort them in advance. @@ -512,8 +513,8 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co for (size_t i = start; i != start + length; ++i) { /// Paths in src_dynamic_paths_for_shared_data are already sorted. - for (const auto & path : src_dynamic_paths_for_shared_data) - serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, path, *src_object_column.dynamic_paths.at(path), i); + for (const auto path : src_dynamic_paths_for_shared_data) + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, path, *src_object_column.dynamic_paths.find(path)->second, i); shared_data_offsets.push_back(shared_data_paths->size()); } } @@ -541,9 +542,9 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co size_t end = src_shared_data_offsets[row]; for (size_t i = offset; i != end; ++i) { - auto path = src_shared_data_paths->getDataAt(i); + auto path = src_shared_data_paths->getDataAt(i).toView(); /// Check if we have this path in dynamic paths. - if (auto it = dynamic_paths_ptrs.find(path.toString()); it != dynamic_paths_ptrs.end()) + if (auto it = dynamic_paths_ptrs.find(path); it != dynamic_paths_ptrs.end()) { /// Deserialize binary value into dynamic column from shared data. deserializeValueFromSharedData(src_shared_data_values, i, *it->second); @@ -555,8 +556,8 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co while (src_dynamic_paths_for_shared_data_index < src_dynamic_paths_for_shared_data.size() && src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index] < path) { - const auto & dynamic_path = src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index]; - serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, dynamic_path, *src_object_column.dynamic_paths.at(dynamic_path), row); + const auto dynamic_path = src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index]; + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, dynamic_path, *src_object_column.dynamic_paths.find(dynamic_path)->second, row); ++src_dynamic_paths_for_shared_data_index; } @@ -569,8 +570,8 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co /// Insert remaining dynamic paths from src_dynamic_paths_for_shared_data. for (; src_dynamic_paths_for_shared_data_index != src_dynamic_paths_for_shared_data.size(); ++src_dynamic_paths_for_shared_data_index) { - const auto & dynamic_path = src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index]; - serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, dynamic_path, *src_object_column.dynamic_paths.at(dynamic_path), row); + const auto dynamic_path = src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index]; + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, dynamic_path, *src_object_column.dynamic_paths.find(dynamic_path)->second, row); } shared_data_offsets.push_back(shared_data_paths->size()); @@ -584,7 +585,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co } } -void ColumnObject::serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const String & path, const IColumn & column, size_t n) +void ColumnObject::serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const std::string_view path, const IColumn & column, size_t n) { /// Don't store Null values in shared data. We consider Null value equivalent to the absence /// of this path in the row because we cannot distinguish these 2 cases for dynamic paths. @@ -700,11 +701,10 @@ const char * ColumnObject::deserializeAndInsertFromArena(const char * pos) auto path_size = unalignedLoad(pos); pos += sizeof(size_t); std::string_view path(pos, path_size); - String path_str(path); pos += path_size; /// Check if it's a typed path. In this case we should use /// deserializeAndInsertFromArena of corresponding column. - if (auto typed_it = typed_paths.find(path_str); typed_it != typed_paths.end()) + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) { pos = typed_it->second->deserializeAndInsertFromArena(pos); } @@ -712,19 +712,18 @@ const char * ColumnObject::deserializeAndInsertFromArena(const char * pos) /// to dynamic paths or shared data. else { - auto value_size = unalignedLoad(pos); pos += sizeof(size_t); std::string_view value(pos, value_size); pos += value_size; /// Check if we have this path in dynamic paths. - if (auto dynamic_it = dynamic_paths.find(path_str); dynamic_it != dynamic_paths.end()) + if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) { ReadBufferFromMemory buf(value.data(), value.size()); getDynamicSerialization()->deserializeBinary(*dynamic_it->second, buf, getFormatSettings()); } /// Try to add a new dynamic path. - else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path_str)) + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) { ReadBufferFromMemory buf(value.data(), value.size()); getDynamicSerialization()->deserializeBinary(*dynamic_path_column, buf, getFormatSettings()); @@ -773,7 +772,7 @@ const char * ColumnObject::skipSerializedInArena(const char * pos) const { auto path_size = unalignedLoad(pos); pos += sizeof(size_t); - String path(pos, path_size); + std::string_view path(pos, path_size); pos += path_size; if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) { @@ -1167,7 +1166,7 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou if (path_to_total_number_of_non_null_values.size() > max_dynamic_paths) { /// Sort paths by total_number_of_non_null_values. - std::vector> paths_with_sizes; + std::vector> paths_with_sizes; paths_with_sizes.reserve(path_to_total_number_of_non_null_values.size()); for (const auto & [path, size] : path_to_total_number_of_non_null_values) paths_with_sizes.emplace_back(size, path); @@ -1176,8 +1175,8 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou /// Fill dynamic_paths with first max_dynamic_paths paths in sorted list. for (size_t i = 0; i != max_dynamic_paths; ++i) { - dynamic_paths[paths_with_sizes[i].second] = ColumnDynamic::create(max_dynamic_types); - dynamic_paths_ptrs[paths_with_sizes[i].second] = assert_cast(dynamic_paths[paths_with_sizes[i].second].get()); + dynamic_paths.emplace(paths_with_sizes[i].second, ColumnDynamic::create(max_dynamic_types)); + dynamic_paths_ptrs.emplace(paths_with_sizes[i].second, assert_cast(dynamic_paths.find(paths_with_sizes[i].second)->second.get())); } } /// Use all dynamic paths from all source columns. diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index fbb68897e08..ecb6c4e0e15 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -9,7 +9,7 @@ #include #include #include - +#include #include namespace DB @@ -44,6 +44,9 @@ private: size_t max_dynamic_types_, const Statistics & statistics_ = {}); + /// Use StringHashForHeterogeneousLookup hash for hash maps to be able to use std::string_view in find() method. + using PathToColumnMap = std::unordered_map; + using PathToDynamicColumnPtrMap = std::unordered_map; public: /** Create immutable column using immutable arguments. This arguments may be shared with other columns. * Use mutate in order to make mutable column and mutate shared nested columns. @@ -158,14 +161,14 @@ public: bool hasDynamicStructure() const override { return true; } void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; - const std::unordered_map & getTypedPaths() const { return typed_paths; } - std::unordered_map & getTypedPaths() { return typed_paths; } + const PathToColumnMap & getTypedPaths() const { return typed_paths; } + PathToColumnMap & getTypedPaths() { return typed_paths; } - const std::unordered_map & getDynamicPaths() const { return dynamic_paths; } - std::unordered_map & getDynamicPaths() { return dynamic_paths; } + const PathToColumnMap & getDynamicPaths() const { return dynamic_paths; } + PathToColumnMap & getDynamicPaths() { return dynamic_paths; } - const std::unordered_map & getDynamicPathsPtrs() const { return dynamic_paths_ptrs; } - std::unordered_map & getDynamicPathsPtrs() { return dynamic_paths_ptrs; } + const PathToDynamicColumnPtrMap & getDynamicPathsPtrs() const { return dynamic_paths_ptrs; } + PathToDynamicColumnPtrMap & getDynamicPathsPtrs() { return dynamic_paths_ptrs; } const Statistics & getStatistics() const { return statistics; } @@ -198,12 +201,12 @@ public: /// Try to add new dynamic path. Returns pointer to the new dynamic /// path column or nullptr if limit on dynamic paths is reached. - ColumnDynamic * tryToAddNewDynamicPath(const String & path); + ColumnDynamic * tryToAddNewDynamicPath(const std::string_view path); void setDynamicPaths(const std::vector & paths); void setStatistics(const Statistics & statistics_) { statistics = statistics_; } - void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const String & path, const IColumn & column, size_t n); + void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const std::string_view path, const IColumn & column, size_t n); void deserializeValueFromSharedData(const ColumnString * shared_data_values, size_t n, IColumn & column) const; /// Paths in shared data are sorted in each row. Use this method to find the lower bound for specific path in the row. @@ -212,19 +215,19 @@ public: static void fillPathColumnFromSharedData(IColumn & path_column, StringRef path, const ColumnPtr & shared_data_column, size_t start, size_t end); private: - void insertFromSharedDataAndFillRemainingDynamicPaths(const ColumnObject & src_object_column, std::vector && src_dynamic_paths_for_shared_data, size_t start, size_t length); + void insertFromSharedDataAndFillRemainingDynamicPaths(const ColumnObject & src_object_column, std::vector && src_dynamic_paths_for_shared_data, size_t start, size_t length); void serializePathAndValueIntoArena(Arena & arena, const char *& begin, StringRef path, StringRef value, StringRef & res) const; /// Map path -> column for paths with explicitly specified types. /// This set of paths is constant and cannot be changed. - std::unordered_map typed_paths; + PathToColumnMap typed_paths; /// Map path -> column for dynamically added paths. All columns /// here are Dynamic columns. This set of paths can be extended /// during inerts into the column. - std::unordered_map dynamic_paths; + PathToColumnMap dynamic_paths; /// Store and use pointers to ColumnDynamic to avoid virtual calls. /// With hundreds of dynamic paths these virtual calls are noticeable. - std::unordered_map dynamic_paths_ptrs; + PathToDynamicColumnPtrMap dynamic_paths_ptrs; /// Shared storage for all other paths and values. It's filled /// when the number of dynamic paths reaches the limit. /// It has type Array(Tuple(String, String)) and stores diff --git a/src/Common/StringHashForHeterogeneousLookup.h b/src/Common/StringHashForHeterogeneousLookup.h new file mode 100644 index 00000000000..0983fd460d6 --- /dev/null +++ b/src/Common/StringHashForHeterogeneousLookup.h @@ -0,0 +1,25 @@ +#pragma once +#include + +namespace DB +{ + +/// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0919r3.html +struct StringHashForHeterogeneousLookup +{ + using hash_type = std::hash; + using transparent_key_equal = std::equal_to<>; + using is_transparent = void; // required to make find() work with different type than key_type + + auto operator()(const std::string_view view) const + { + return hash_type()(view); + } + + auto operator()(const std::string & str) const + { + return hash_type()(str); + } +}; + +} diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp index 4a84cec711b..35613e40aac 100644 --- a/src/Functions/JSONPaths.cpp +++ b/src/Functions/JSONPaths.cpp @@ -139,7 +139,7 @@ private: { /// Collect all dynamic paths. const auto & dynamic_path_columns = column_object.getDynamicPaths(); - std::vector dynamic_paths; + std::vector dynamic_paths; dynamic_paths.reserve(dynamic_path_columns.size()); for (const auto & [path, _] : dynamic_path_columns) dynamic_paths.push_back(path); @@ -149,11 +149,11 @@ private: size_t size = column_object.size(); for (size_t i = 0; i != size; ++i) { - for (const auto & path : dynamic_paths) + for (const auto path : dynamic_paths) { /// Don't include path if it contains NULL, because we consider /// it to be equivalent to the absence of this path in this row. - if (!dynamic_path_columns.at(path)->isNullAt(i)) + if (!dynamic_path_columns.find(path)->second->isNullAt(i)) data.insertData(path.data(), path.size()); } offsets.push_back(data.size()); @@ -162,7 +162,7 @@ private: } /// Collect all paths: typed, dynamic and paths from shared data. - std::vector sorted_dynamic_and_typed_paths; + std::vector sorted_dynamic_and_typed_paths; const auto & typed_path_columns = column_object.getTypedPaths(); const auto & dynamic_path_columns = column_object.getDynamicPaths(); sorted_dynamic_and_typed_paths.reserve(typed_path_columns.size() + dynamic_path_columns.size()); @@ -184,22 +184,22 @@ private: size_t sorted_paths_index = 0; for (size_t j = start; j != end; ++j) { - auto shared_data_path = shared_data_paths->getDataAt(j); + auto shared_data_path = shared_data_paths->getDataAt(j).toView(); while (sorted_paths_index != sorted_dynamic_and_typed_paths.size() && sorted_dynamic_and_typed_paths[sorted_paths_index] < shared_data_path) { - const auto & path = sorted_dynamic_and_typed_paths[sorted_paths_index]; + const auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; /// If it's dynamic path include it only if it's not NULL. if (auto it = dynamic_path_columns.find(path); it == dynamic_path_columns.end() || !it->second->isNullAt(i)) data.insertData(path.data(), path.size()); ++sorted_paths_index; } - data.insertData(shared_data_path.data, shared_data_path.size); + data.insertData(shared_data_path.data(), shared_data_path.size()); } for (; sorted_paths_index != sorted_dynamic_and_typed_paths.size(); ++sorted_paths_index) { - const auto & path = sorted_dynamic_and_typed_paths[sorted_paths_index]; + const auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; if (auto it = dynamic_path_columns.find(path); it == dynamic_path_columns.end() || !it->second->isNullAt(i)) data.insertData(path.data(), path.size()); } @@ -220,7 +220,7 @@ private: if constexpr (Impl::paths_mode == PathsMode::DYNAMIC_PATHS) { const auto & dynamic_path_columns = column_object.getDynamicPaths(); - std::vector sorted_dynamic_paths; + std::vector sorted_dynamic_paths; sorted_dynamic_paths.reserve(dynamic_path_columns.size()); for (const auto & [path, _] : dynamic_path_columns) sorted_dynamic_paths.push_back(path); @@ -230,9 +230,9 @@ private: /// Iterate over all rows and extract types from dynamic columns. for (size_t i = 0; i != column_object.size(); ++i) { - for (auto & path : sorted_dynamic_paths) + for (const auto path : sorted_dynamic_paths) { - auto column = dynamic_path_columns.at(path); + const auto & column = dynamic_path_columns.find(path)->second; if (!column->isNullAt(i)) { auto type = getDynamicValueType(column, i); @@ -272,7 +272,7 @@ private: } /// Iterate over all rows and extract types from dynamic columns from dynamic paths and from values in shared data. - std::vector> sorted_typed_and_dynamic_paths_with_types; + std::vector> sorted_typed_and_dynamic_paths_with_types; const auto & typed_path_types = type_object.getTypedPaths(); const auto & dynamic_path_columns = column_object.getDynamicPaths(); sorted_typed_and_dynamic_paths_with_types.reserve(typed_path_types.size() + dynamic_path_columns.size()); @@ -294,7 +294,7 @@ private: size_t sorted_paths_index = 0; for (size_t j = start; j != end; ++j) { - auto shared_data_path = shared_data_paths->getDataAt(j); + auto shared_data_path = shared_data_paths->getDataAt(j).toView(); auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j)); /// Skip NULL values. if (!type_name) @@ -319,7 +319,7 @@ private: ++sorted_paths_index; } - paths_column->insertData(shared_data_path.data, shared_data_path.size); + paths_column->insertData(shared_data_path.data(), shared_data_path.size()); types_column->insertData(type_name->data(), type_name->size()); } From 44e267ec76bcdb625a8bb6e5ffccc7bdc74d5cef Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 15:58:40 +0000 Subject: [PATCH 2120/2361] Fix conflicts and style check --- src/Columns/ColumnObjectDeprecated.cpp | 4 ++-- src/DataTypes/DataTypeObject.cpp | 2 -- src/DataTypes/DataTypeObjectDeprecated.cpp | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/Columns/ColumnObjectDeprecated.cpp b/src/Columns/ColumnObjectDeprecated.cpp index d3f23dc6b57..d03b1d0df82 100644 --- a/src/Columns/ColumnObjectDeprecated.cpp +++ b/src/Columns/ColumnObjectDeprecated.cpp @@ -698,7 +698,7 @@ void ColumnObjectDeprecated::forEachSubcolumnRecursively(RecursiveMutableColumnC void ColumnObjectDeprecated::insert(const Field & field) { - const auto & object = field.get(); + const auto & object = field.safeGet(); HashSet inserted_paths; size_t old_size = size(); @@ -754,7 +754,7 @@ void ColumnObjectDeprecated::get(size_t n, Field & res) const { assert(n < size()); res = Object(); - auto & object = res.get(); + auto & object = res.safeGet(); for (const auto & entry : subcolumns) { diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 4f88bed8de8..b5d1f429001 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -26,8 +26,6 @@ #endif #include - - namespace DB { diff --git a/src/DataTypes/DataTypeObjectDeprecated.cpp b/src/DataTypes/DataTypeObjectDeprecated.cpp index d9ec70ca7cc..07f9c116e58 100644 --- a/src/DataTypes/DataTypeObjectDeprecated.cpp +++ b/src/DataTypes/DataTypeObjectDeprecated.cpp @@ -72,7 +72,7 @@ static DataTypePtr create(const ASTPtr & arguments) throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Object data type family must have a const string as its schema name parameter"); - return std::make_shared(literal->value.get(), is_nullable); + return std::make_shared(literal->value.safeGet(), is_nullable); } void registerDataTypeObjectDeprecated(DataTypeFactory & factory) From 3a6e05eb43cbb9937cded286ac7259b2f7168057 Mon Sep 17 00:00:00 2001 From: Yarik Briukhovetskyi <114298166+yariks5s@users.noreply.github.com> Date: Mon, 12 Aug 2024 18:03:42 +0200 Subject: [PATCH 2121/2361] try to fix includes --- src/Storages/VirtualColumnUtils.cpp | 55 ++++++++++++++++------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 3143c7f78f6..d932f5cc469 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -1,43 +1,40 @@ #include #include -#include -#include -#include -#include -#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include + #include -#include -#include -#include #include +#include +#include +#include #include -#include -#include + #include +#include #include +#include #include #include -#include -#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include #include #include -#include #include +#include #include + +#include #include #include #include @@ -45,7 +42,15 @@ #include #include #include +#include "Functions/FunctionsLogical.h" +#include "Functions/IFunction.h" +#include "Functions/IFunctionAdaptors.h" +#include "Functions/indexHint.h" #include +#include +#include +#include +#include namespace DB From 858b7e55d0df3db1412d538f701c30584b5783bf Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Mon, 12 Aug 2024 16:16:50 +0000 Subject: [PATCH 2122/2361] Improve condition in case the default column consumes slightly more memory It never happened in the few hundreds of tests I ran successfully, but we'd rather be safe than sorry. --- .../01903_correct_block_size_prediction_with_default.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh index 075d9a1dacf..1482730af2c 100755 --- a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh +++ b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh @@ -28,7 +28,8 @@ function test() SYSTEM FLUSH LOGS; WITH memory_1 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_1' AND type = 'QueryFinish' as memory_1), memory_2 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_2' AND type = 'QueryFinish' as memory_2) - SELECT memory_1.memory_usage <= 1.2 * memory_2.memory_usage FROM memory_1, memory_2;" + SELECT memory_1.memory_usage <= 1.2 * memory_2.memory_usage OR + memory_2.memory_usage <= 1.2 * memory_1.memory_usage FROM memory_1, memory_2;" } test "" From f7af4c5643af2ee87b81a7972c0bb91cf723c8a2 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Aug 2024 17:27:43 +0100 Subject: [PATCH 2123/2361] don't report system-wide metrics when cgroup metrics present --- src/Common/AsynchronousMetrics.cpp | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 02c130d3caa..9b6a7428411 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -75,12 +75,8 @@ AsynchronousMetrics::AsynchronousMetrics( , protocol_server_metrics_func(protocol_server_metrics_func_) { #if defined(OS_LINUX) - openFileIfExists("/proc/meminfo", meminfo); - openFileIfExists("/proc/loadavg", loadavg); - openFileIfExists("/proc/stat", proc_stat); openFileIfExists("/proc/cpuinfo", cpuinfo); openFileIfExists("/proc/sys/fs/file-nr", file_nr); - openFileIfExists("/proc/uptime", uptime); openFileIfExists("/proc/net/dev", net_dev); /// CGroups v2 @@ -103,6 +99,19 @@ AsynchronousMetrics::AsynchronousMetrics( if (!cgroupcpu_stat) openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat); + if (!cgroupcpu_stat && !cgroupcpuacct_stat) + { + /// The following metrics are not cgroup-aware and we've found cgroup-specific metric files for the similar metrics, + /// so we're better not reporting them at all to avoid confusion + openFileIfExists("/proc/loadavg", loadavg); + openFileIfExists("/proc/stat", proc_stat); + openFileIfExists("/proc/uptime", uptime); + } + + /// The same story for memory metrics + if (!cgroupmem_limit_in_bytes) + openFileIfExists("/proc/meminfo", meminfo); + openFileIfExists("/proc/sys/vm/max_map_count", vm_max_map_count); openFileIfExists("/proc/self/maps", vm_maps); @@ -1193,8 +1202,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) tryLogCurrentException(__PRETTY_FUNCTION__); } } - - if (meminfo) + else if (meminfo) { try { From 0c209242b1331cd714be186239e90a68acc44eae Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 16:28:18 +0000 Subject: [PATCH 2124/2361] Fix build --- src/DataTypes/Serializations/SerializationObject.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 9091702326a..68d25eaeaff 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -466,7 +466,7 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( void SerializationObject::serializeBinary(const Field & field, WriteBuffer & ostr, const DB::FormatSettings & settings) const { - const auto & object = field.get(); + const auto & object = field.safeGet(); /// Serialize number of paths and then pairs (path, value). writeVarUInt(object.size(), ostr); for (const auto & [path, value] : object) From f0f10bc0099e659bfc0bf31079e89832f9db4b17 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Aug 2024 17:30:12 +0100 Subject: [PATCH 2125/2361] remove cgroupsV2MemoryControllerEnabled() --- base/base/cgroupsv2.cpp | 24 ------------------------ base/base/cgroupsv2.h | 4 ---- base/base/getMemoryAmount.cpp | 3 --- 3 files changed, 31 deletions(-) diff --git a/base/base/cgroupsv2.cpp b/base/base/cgroupsv2.cpp index 4372696c2b7..d8f95b23ae7 100644 --- a/base/base/cgroupsv2.cpp +++ b/base/base/cgroupsv2.cpp @@ -27,27 +27,6 @@ bool cgroupsV2Enabled() #endif } -bool cgroupsV2MemoryControllerEnabled() -{ -#if defined(OS_LINUX) - chassert(cgroupsV2Enabled()); - /// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available - /// for the current + child cgroups. The set of available controllers can be restricted from level to level using file - /// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file. - fs::path cgroup_dir = cgroupV2PathOfProcess(); - if (cgroup_dir.empty()) - return false; - std::ifstream controllers_file(cgroup_dir / "cgroup.controllers"); - if (!controllers_file.is_open()) - return false; - std::string controllers; - std::getline(controllers_file, controllers); - return controllers.find("memory") != std::string::npos; -#else - return false; -#endif -} - fs::path cgroupV2PathOfProcess() { #if defined(OS_LINUX) @@ -77,9 +56,6 @@ std::optional getCgroupsV2PathContainingFile(std::string_view file_ if (!cgroupsV2Enabled()) return {}; - if (!cgroupsV2MemoryControllerEnabled()) - return {}; - fs::path current_cgroup = cgroupV2PathOfProcess(); if (current_cgroup.empty()) return {}; diff --git a/base/base/cgroupsv2.h b/base/base/cgroupsv2.h index 9d8e178a866..925a399471e 100644 --- a/base/base/cgroupsv2.h +++ b/base/base/cgroupsv2.h @@ -12,10 +12,6 @@ static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgrou /// Is cgroups v2 enabled on the system? bool cgroupsV2Enabled(); -/// Is the memory controller of cgroups v2 enabled on the system? -/// Assumes that cgroupsV2Enabled() is enabled. -bool cgroupsV2MemoryControllerEnabled(); - /// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup. /// Returns an empty path the cgroup cannot be determined. /// Assumes that cgroupsV2Enabled() is enabled. diff --git a/base/base/getMemoryAmount.cpp b/base/base/getMemoryAmount.cpp index 03aab1eac72..bbfbecdbffd 100644 --- a/base/base/getMemoryAmount.cpp +++ b/base/base/getMemoryAmount.cpp @@ -19,9 +19,6 @@ std::optional getCgroupsV2MemoryLimit() if (!cgroupsV2Enabled()) return {}; - if (!cgroupsV2MemoryControllerEnabled()) - return {}; - std::filesystem::path current_cgroup = cgroupV2PathOfProcess(); if (current_cgroup.empty()) return {}; From 05b595094868dd29e59ea9c766d0829f57ce94f9 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 12 Aug 2024 17:31:56 +0100 Subject: [PATCH 2126/2361] small fix --- base/base/cgroupsv2.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/base/cgroupsv2.cpp b/base/base/cgroupsv2.cpp index d8f95b23ae7..b4ca8271d64 100644 --- a/base/base/cgroupsv2.cpp +++ b/base/base/cgroupsv2.cpp @@ -60,8 +60,8 @@ std::optional getCgroupsV2PathContainingFile(std::string_view file_ if (current_cgroup.empty()) return {}; - /// Return the bottom-most nested current memory file. If there is no such file at the current - /// level, try again at the parent level as memory settings are inherited. + /// Return the bottom-most nested file. If there is no such file at the current + /// level, try again at the parent level as settings are inherited. while (current_cgroup != default_cgroups_mount.parent_path()) { const auto path = current_cgroup / file_name; From f8011d53d6ec21a3572c67179d7752f65f032260 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 17:14:43 +0000 Subject: [PATCH 2127/2361] Fix data types parsing --- src/Parsers/ParserCreateQuery.cpp | 34 ------------------- src/Parsers/ParserCreateQuery.h | 9 ----- src/Parsers/ParserDataType.cpp | 10 ++++-- .../queries/0_stateless/03205_json_syntax.sql | 2 +- .../03210_json_type_alter_add_column.sql.j2 | 1 + 5 files changed, 10 insertions(+), 46 deletions(-) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 62c5ff42ddf..85b2c52643b 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -53,40 +53,6 @@ ASTPtr parseComment(IParser::Pos & pos, Expected & expected) } - -bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) -{ - ParserToken open(TokenType::OpeningRoundBracket); - ParserToken close(TokenType::ClosingRoundBracket); - ParserIdentifier name_p; - ParserNameTypePairList columns_p; - - ASTPtr name; - ASTPtr columns; - - /// For now `name == 'Nested'` or `name == 'Tuple'`, probably alternative nested data structures will appear - if (!name_p.parse(pos, name, expected)) - return false; - - if (!open.ignore(pos, expected)) - return false; - - if (!columns_p.parse(pos, columns, expected)) - return false; - - if (!close.ignore(pos, expected)) - return false; - - auto func = std::make_shared(); - tryGetIdentifierNameInto(name, func->name); - - func->arguments = columns; - func->children.push_back(columns); - node = func; - - return true; -} - bool ParserSQLSecurity::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserToken s_eq(TokenType::Equals); diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index 53a62deb22b..82da2e7ea0b 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -18,15 +18,6 @@ namespace DB { -/** A nested table. For example, Nested(UInt32 CounterID, FixedString(2) UserAgentMajor) - */ -class ParserNestedTable : public IParserBase -{ -protected: - const char * getName() const override { return "nested table"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; -}; - /** Parses sql security option. DEFINER = user_name SQL SECURITY DEFINER */ class ParserSQLSecurity : public IParserBase diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index fab9ed728a7..e941c05eba1 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -273,8 +273,14 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } else if (type_name == "Nested") { - ParserNestedTable nested_parser; - nested_parser.parse(pos, arg, expected); + ParserNameTypePair name_and_type_parser; + name_and_type_parser.parse(pos, arg, expected); + } + else if (type_name == "Tuple") + { + ParserNameTypePair name_and_type_parser; + ParserDataType only_type_parser; + name_and_type_parser.parse(pos, arg, expected) || only_type_parser.parse(pos, arg, expected); } else if (type_name == "AggregateFunction" || type_name == "SimpleAggregateFunction") { diff --git a/tests/queries/0_stateless/03205_json_syntax.sql b/tests/queries/0_stateless/03205_json_syntax.sql index 11fa513f265..e3c88c81d0d 100644 --- a/tests/queries/0_stateless/03205_json_syntax.sql +++ b/tests/queries/0_stateless/03205_json_syntax.sql @@ -36,5 +36,5 @@ create table test (json JSON(SKIP `some path`.`path some`)) engine=Memory; drop table test; create table test (json JSON(SKIP REGEXP '.*a.*')) engine=Memory; drop table test; -create table test (json JSON(max_dynamic_paths=10, max_dynamic_types=10, a.b.c UInt32, b.c.d String, SKIP g.d.a, SKIP o.g.a, SKIP REGEXP '.*a.*', SKIP REGEXP 'abc')) engine=Memory; +create table test (json JSON(max_dynamic_paths=10, max_dynamic_types=10, a.b.c UInt32, b.c.d String, SKIP g.d.a, SKIP o.g.a, SKIP REGEXP '.*u.*', SKIP REGEXP 'abc')) engine=Memory; drop table test; diff --git a/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 b/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 index 8ab9f5181e3..add57928804 100644 --- a/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 +++ b/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 @@ -2,6 +2,7 @@ set allow_experimental_dynamic_type = 1; set allow_experimental_variant_type = 1; +set allow_experimental_json_type = 1; set use_variant_as_common_type = 1; drop table if exists test; From c22265b889684b7fa34ba6816ce3910143ef7226 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Aug 2024 17:11:11 +0000 Subject: [PATCH 2128/2361] Some fixups --- docs/en/operations/query-cache.md | 16 +++++----- docs/en/operations/settings/settings.md | 8 ++--- .../operations/system-tables/query_cache.md | 4 +-- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 2 +- src/Interpreters/Cache/QueryCache.cpp | 25 ++++++++-------- src/Interpreters/Cache/QueryCache.h | 11 ++++--- src/Interpreters/executeQuery.cpp | 5 ++-- .../System/StorageSystemQueryCache.cpp | 16 +++++----- .../02494_query_cache_tag.reference | 12 ++++---- .../0_stateless/02494_query_cache_tag.sql | 30 ++++++++++--------- 11 files changed, 66 insertions(+), 65 deletions(-) diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md index a6c4d74f4ac..384938e28f6 100644 --- a/docs/en/operations/query-cache.md +++ b/docs/en/operations/query-cache.md @@ -143,16 +143,18 @@ value can be specified at session, profile or query level using setting [query_c Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries). -Entries in the query cache can separate by tag, using setting [query_cache_tag](settings/settings.md#query-cache-tag). Queries with different tags are considered different entries. For example, the result of query +Sometimes it is useful to keep multiple results for the same query cached. This can be achieved using setting +[query_cache_tag](settings/settings.md#query-cache-tag) that acts as as a label (or namespace) for a query cache entries. The query cache +considers results of the same query with different tags different. -``` sql -SELECT 1 SETTINGS use_query_cache = true; -SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one'; -SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one diff'; +Example for creating three different query cache entries for the same query: + +```sql +SELECT 1 SETTINGS use_query_cache = true; -- query_cache_tag is implicitly '' (empty string) +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'tag 1'; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'tag 2'; ``` -have different entries in the query cache, find the specified tag in system table [system.query_cache](system-tables/query_cache.md) - ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#setting-max_block_size) rows. Due to filtering, aggregation, etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting [query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 7b855665efb..e4a126249ca 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1802,14 +1802,14 @@ Default value: `0`. ## query_cache_tag {#query-cache-tag} -An arbitrary string to separate entries in the [query cache](../query-cache.md). -Queries with different values of this setting are considered different. +A string which acts as a label for [query cache](../query-cache.md) entries. +The same queries with different tags are considered different by the query cache. Possible values: -- string: name of query cache tag +- Any string -Default value: `''`. +Default value: `''` ## query_cache_max_size_in_bytes {#query-cache-max-size-in-bytes} diff --git a/docs/en/operations/system-tables/query_cache.md b/docs/en/operations/system-tables/query_cache.md index 393b37d3616..9c48574a329 100644 --- a/docs/en/operations/system-tables/query_cache.md +++ b/docs/en/operations/system-tables/query_cache.md @@ -9,12 +9,12 @@ Columns: - `query` ([String](../../sql-reference/data-types/string.md)) — Query string. - `result_size` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Size of the query cache entry. +- `tag` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — Tag of the query cache entry. - `stale` ([UInt8](../../sql-reference/data-types/int-uint.md)) — If the query cache entry is stale. - `shared` ([UInt8](../../sql-reference/data-types/int-uint.md)) — If the query cache entry is shared between multiple users. - `compressed` ([UInt8](../../sql-reference/data-types/int-uint.md)) — If the query cache entry is compressed. - `expires_at` ([DateTime](../../sql-reference/data-types/datetime.md)) — When the query cache entry becomes stale. - `key_hash` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — A hash of the query string, used as a key to find query cache entries. -- `tag` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — An arbitrary string to separate entries in the query cache. **Example** @@ -27,12 +27,12 @@ Row 1: ────── query: SELECT 1 SETTINGS use_query_cache = 1 result_size: 128 +tag: stale: 0 shared: 0 compressed: 1 expires_at: 2023-10-13 13:35:45 key_hash: 12188185624808016954 -tag: 1 row in set. Elapsed: 0.004 sec. ``` diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 75579f20187..f9ffab0ea57 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -676,7 +676,7 @@ class IColumn; M(Bool, query_cache_squash_partial_results, true, "Squash partial result blocks to blocks of size 'max_block_size'. Reduces performance of inserts into the query cache but improves the compressability of cache entries.", 0) \ M(Seconds, query_cache_ttl, 60, "After this time in seconds entries in the query cache become stale", 0) \ M(Bool, query_cache_share_between_users, false, "Allow other users to read entry in the query cache", 0) \ - M(String, query_cache_tag, "", "An arbitrary string to separate entries in the query cache. Queries with different values of this setting are considered different.", 0) \ + M(String, query_cache_tag, "", "A string which acts as a label for query cache entries. The same queries with different tags are considered different by the query cache.", 0) \ M(Bool, enable_sharing_sets_for_mutations, true, "Allow sharing set objects build for IN subqueries between different tasks of the same mutation. This reduces memory usage and CPU consumption", 0) \ \ M(Bool, optimize_rewrite_sum_if_to_count_if, true, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 8fd16504e95..0528287e83e 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -82,9 +82,9 @@ static std::initializer_list user_id_, const std::vector & current_user_roles_, bool is_shared_, std::chrono::time_point expires_at_, - bool is_compressed_, - const String & tag_) - : ast_hash(calculateAstHash(ast_, current_database, settings, tag_)) + bool is_compressed_) + : ast_hash(calculateAstHash(ast_, current_database, settings)) , header(header_) , user_id(user_id_) , current_user_roles(current_user_roles_) @@ -247,12 +242,18 @@ QueryCache::Key::Key( , expires_at(expires_at_) , is_compressed(is_compressed_) , query_string(queryStringFromAST(ast_)) - , tag(tag_) + , tag(settings.query_cache_tag) { } -QueryCache::Key::Key(ASTPtr ast_, const String & current_database, const Settings & settings, std::optional user_id_, const std::vector & current_user_roles_, const String & tag_) - : QueryCache::Key(ast_, current_database, settings, {}, user_id_, current_user_roles_, false, std::chrono::system_clock::from_time_t(1), false, tag_) /// dummy values for everything != AST, current database, user name/roles +QueryCache::Key::Key( + ASTPtr ast_, + const String & current_database, + const Settings & settings, + std::optional user_id_, + const std::vector & current_user_roles_) + : QueryCache::Key(ast_, current_database, settings, {}, user_id_, current_user_roles_, false, std::chrono::system_clock::from_time_t(1), false) + /// ^^ dummy values for everything != AST, current database, user name/roles { } diff --git a/src/Interpreters/Cache/QueryCache.h b/src/Interpreters/Cache/QueryCache.h index 54de5edb145..c7ebaf4d26a 100644 --- a/src/Interpreters/Cache/QueryCache.h +++ b/src/Interpreters/Cache/QueryCache.h @@ -88,8 +88,9 @@ public: /// SYSTEM.QUERY_CACHE. const String query_string; - /// An arbitrary string to separate entries in the query cache. - /// Queries with different values of this setting are considered different. + /// A tag (namespace) for distinguish multiple entries of the same query. + /// This member has currently no use besides that SYSTEM.QUERY_CACHE can populate the 'tag' column conveniently without having to + /// compute the tag from the query AST. const String tag; /// Ctor to construct a Key for writing into query cache. @@ -100,15 +101,13 @@ public: std::optional user_id_, const std::vector & current_user_roles_, bool is_shared_, std::chrono::time_point expires_at_, - bool is_compressed, - const String & tag_); + bool is_compressed); /// Ctor to construct a Key for reading from query cache (this operation only needs the AST + user name). Key(ASTPtr ast_, const String & current_database, const Settings & settings, - std::optional user_id_, const std::vector & current_user_roles_, - const String & tag_); + std::optional user_id_, const std::vector & current_user_roles_); bool operator==(const Key & other) const; }; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 6422d3128fa..fe87eed5570 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -1129,7 +1129,7 @@ static std::tuple executeQueryImpl( { if (can_use_query_cache && settings.enable_reads_from_query_cache) { - QueryCache::Key key(ast, context->getCurrentDatabase(), *settings_copy, context->getUserID(), context->getCurrentRoles(), settings.query_cache_tag); + QueryCache::Key key(ast, context->getCurrentDatabase(), *settings_copy, context->getUserID(), context->getCurrentRoles()); QueryCache::Reader reader = query_cache->createReader(key); if (reader.hasCacheEntryForKey()) { @@ -1258,8 +1258,7 @@ static std::tuple executeQueryImpl( context->getUserID(), context->getCurrentRoles(), settings.query_cache_share_between_users, std::chrono::system_clock::now() + std::chrono::seconds(settings.query_cache_ttl), - settings.query_cache_compress_entries, - settings.query_cache_tag); + settings.query_cache_compress_entries); const size_t num_query_runs = settings.query_cache_min_query_runs ? query_cache->recordQueryRun(key) : 1; /// try to avoid locking a mutex in recordQueryRun() if (num_query_runs <= settings.query_cache_min_query_runs) diff --git a/src/Storages/System/StorageSystemQueryCache.cpp b/src/Storages/System/StorageSystemQueryCache.cpp index f81d50e8806..b3532ba40a7 100644 --- a/src/Storages/System/StorageSystemQueryCache.cpp +++ b/src/Storages/System/StorageSystemQueryCache.cpp @@ -16,12 +16,12 @@ ColumnsDescription StorageSystemQueryCache::getColumnsDescription() { {"query", std::make_shared(), "Query string."}, {"result_size", std::make_shared(), "Size of the query cache entry."}, + {"tag", std::make_shared(std::make_shared()), "Tag of the query cache entry."}, {"stale", std::make_shared(), "If the query cache entry is stale."}, {"shared", std::make_shared(), "If the query cache entry is shared between multiple users."}, {"compressed", std::make_shared(), "If the query cache entry is compressed."}, {"expires_at", std::make_shared(), "When the query cache entry becomes stale."}, - {"key_hash", std::make_shared(), "A hash of the query string, used as a key to find query cache entries."}, - {"tag", std::make_shared(std::make_shared()), "An arbitrary string to separate entries in the query cache."} + {"key_hash", std::make_shared(), "A hash of the query string, used as a key to find query cache entries."} }; } @@ -53,12 +53,12 @@ void StorageSystemQueryCache::fillData(MutableColumns & res_columns, ContextPtr res_columns[0]->insert(key.query_string); /// approximates the original query string res_columns[1]->insert(QueryCache::QueryCacheEntryWeight()(*query_result)); - res_columns[2]->insert(key.expires_at < std::chrono::system_clock::now()); - res_columns[3]->insert(key.is_shared); - res_columns[4]->insert(key.is_compressed); - res_columns[5]->insert(std::chrono::system_clock::to_time_t(key.expires_at)); - res_columns[6]->insert(key.ast_hash.low64); /// query cache considers aliases (issue #56258) - res_columns[7]->insert(key.tag); + res_columns[2]->insert(key.tag); + res_columns[3]->insert(key.expires_at < std::chrono::system_clock::now()); + res_columns[4]->insert(key.is_shared); + res_columns[5]->insert(key.is_compressed); + res_columns[6]->insert(std::chrono::system_clock::to_time_t(key.expires_at)); + res_columns[7]->insert(key.ast_hash.low64); /// query cache considers aliases (issue #56258) } } diff --git a/tests/queries/0_stateless/02494_query_cache_tag.reference b/tests/queries/0_stateless/02494_query_cache_tag.reference index 055d3d4c5bb..f7be5c06ecf 100644 --- a/tests/queries/0_stateless/02494_query_cache_tag.reference +++ b/tests/queries/0_stateless/02494_query_cache_tag.reference @@ -1,14 +1,12 @@ 1 -1 +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = \'abc\' abc --- 1 1 -1 -2 +SELECT 1 SETTINGS use_query_cache = true +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = \'abc\' abc --- 1 1 -1 -2 -1 -3 +SELECT 1 SETTINGS use_query_cache = true abc +SELECT 1 SETTINGS use_query_cache = true def diff --git a/tests/queries/0_stateless/02494_query_cache_tag.sql b/tests/queries/0_stateless/02494_query_cache_tag.sql index 054607058e8..62d36f6ebe6 100644 --- a/tests/queries/0_stateless/02494_query_cache_tag.sql +++ b/tests/queries/0_stateless/02494_query_cache_tag.sql @@ -3,30 +3,32 @@ SYSTEM DROP QUERY CACHE; --- Cache the query after the query invocation -SELECT 1 SETTINGS use_query_cache = true; -SELECT COUNT(*) FROM system.query_cache; +-- Store the result a single query with a tag in the query cache and check that the system table knows about the tag +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'; + +SELECT query, tag FROM system.query_cache; SELECT '---'; SYSTEM DROP QUERY CACHE; --- Queries with tag value of this setting or not are considered different cache entries. -SELECT 1 SETTINGS use_query_cache = true; -SELECT COUNT(*) FROM system.query_cache; -SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one'; -SELECT COUNT(*) FROM system.query_cache; +-- Store the result of the same query with two different tags. The cache should store two entries. +SELECT 1 SETTINGS use_query_cache = true; -- default query_cache_tag = '' +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'; +SELECT query, tag FROM system.query_cache ORDER BY ALL; SELECT '---'; SYSTEM DROP QUERY CACHE; --- Queries with different tags values of this setting are considered different cache entries. +-- Like before but the tag is set standalone. + +SET query_cache_tag = 'abc'; SELECT 1 SETTINGS use_query_cache = true; -SELECT COUNT(*) FROM system.query_cache; -SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one'; -SELECT COUNT(*) FROM system.query_cache; -SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'one diff'; -SELECT COUNT(*) FROM system.query_cache; + +SET query_cache_tag = 'def'; +SELECT 1 SETTINGS use_query_cache = true; + +SELECT query, tag FROM system.query_cache ORDER BY ALL; SYSTEM DROP QUERY CACHE; From 38405dd7cdfb7189c1a1184c0eb8b3e23fda55e6 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 12 Aug 2024 18:14:22 +0000 Subject: [PATCH 2129/2361] add projection merge doc --- docs/en/operations/settings/merge-tree-settings.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index 67fa45c20cd..a3bd919d3ce 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -1041,3 +1041,14 @@ Compression rates of LZ4 or ZSTD improve on average by 20-40%. This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values. High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting. + +### deduplicate_merge_projection_mode + +Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting. +It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. + +Possible values: + +- throw, drop, rebuild + +Default value: throw \ No newline at end of file From aa7a2bcb02f6c2f48bcc7acca3bcec2f1a16130b Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Mon, 12 Aug 2024 20:34:02 +0200 Subject: [PATCH 2130/2361] Fix typo --- docs/en/sql-reference/functions/other-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 21e4a6599ea..4f51dc6b8d3 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -4073,7 +4073,7 @@ getSubcolumn(col_name, subcol_name) **Returned value** -- Returns the extracted sub-colum. +- Returns the extracted sub-column. **Example** From eab8594570e703a766f2f91ae3d13b0ed640b554 Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Mon, 12 Aug 2024 20:35:33 +0200 Subject: [PATCH 2131/2361] Update aspell-dict.txt --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 862f38976ce..51246d990fa 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1697,6 +1697,8 @@ getOSKernelVersion getServerPort getSetting getSizeOfEnumType +getSubcolumn +getTypeSerializationStreams getblockinfo getevents ghcnd From 45a14fa0ce3ae94a374bbf955ba0fb7109b7e678 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Aug 2024 18:54:06 +0000 Subject: [PATCH 2132/2361] Fix spelling --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index ffd9fae7f45..03ec8e1752c 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -2115,6 +2115,7 @@ namenode namepassword nameprofile namequota +namespace namespaces natively nats From 469c1698b0dbf8a91a6e94a2bab0669f33bf7be2 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 12 Aug 2024 19:31:57 +0000 Subject: [PATCH 2133/2361] Fix 'Refresh set entry already exists' --- src/Interpreters/InterpreterSystemQuery.cpp | 17 +++-- src/Interpreters/InterpreterSystemQuery.h | 2 +- src/Storages/MaterializedView/RefreshSet.cpp | 75 +++++++++++-------- src/Storages/MaterializedView/RefreshSet.h | 23 +++--- src/Storages/MaterializedView/RefreshTask.cpp | 10 +-- src/Storages/MaterializedView/RefreshTask.h | 3 +- .../MaterializedView/RefreshTask_fwd.h | 1 + src/Storages/StorageMaterializedView.cpp | 1 - .../System/StorageSystemViewRefreshes.cpp | 3 + 9 files changed, 77 insertions(+), 58 deletions(-) diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index ef6d1040c5e..1cd55a0020c 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -663,13 +663,16 @@ BlockIO InterpreterSystemQuery::execute() startStopAction(ActionLocks::ViewRefresh, false); break; case Type::REFRESH_VIEW: - getRefreshTask()->run(); + for (const auto & task : getRefreshTasks()) + task->run(); break; case Type::CANCEL_VIEW: - getRefreshTask()->cancel(); + for (const auto & task : getRefreshTasks()) + task->cancel(); break; case Type::TEST_VIEW: - getRefreshTask()->setFakeTime(query.fake_time_for_view); + for (const auto & task : getRefreshTasks()) + task->setFakeTime(query.fake_time_for_view); break; case Type::DROP_REPLICA: dropReplica(query); @@ -1242,15 +1245,15 @@ void InterpreterSystemQuery::flushDistributed(ASTSystemQuery & query) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "SYSTEM RESTART DISK is not supported"); } -RefreshTaskHolder InterpreterSystemQuery::getRefreshTask() +RefreshTaskList InterpreterSystemQuery::getRefreshTasks() { auto ctx = getContext(); ctx->checkAccess(AccessType::SYSTEM_VIEWS); - auto task = ctx->getRefreshSet().getTask(table_id); - if (!task) + auto tasks = ctx->getRefreshSet().findTasks(table_id); + if (tasks.empty()) throw Exception( ErrorCodes::BAD_ARGUMENTS, "Refreshable view {} doesn't exist", table_id.getNameForLogs()); - return task; + return tasks; } diff --git a/src/Interpreters/InterpreterSystemQuery.h b/src/Interpreters/InterpreterSystemQuery.h index 776dd7915f0..f44fe930b04 100644 --- a/src/Interpreters/InterpreterSystemQuery.h +++ b/src/Interpreters/InterpreterSystemQuery.h @@ -74,7 +74,7 @@ private: void flushDistributed(ASTSystemQuery & query); [[noreturn]] void restartDisk(String & name); - RefreshTaskHolder getRefreshTask(); + RefreshTaskList getRefreshTasks(); AccessRightsElements getRequiredAccessForDDLOnCluster() const; void startStopAction(StorageActionBlockType action_type, bool start); diff --git a/src/Storages/MaterializedView/RefreshSet.cpp b/src/Storages/MaterializedView/RefreshSet.cpp index a3ef327dc24..43aa0ada99b 100644 --- a/src/Storages/MaterializedView/RefreshSet.cpp +++ b/src/Storages/MaterializedView/RefreshSet.cpp @@ -27,6 +27,7 @@ RefreshSet::Handle & RefreshSet::Handle::operator=(Handle && other) noexcept parent_set = std::exchange(other.parent_set, nullptr); id = std::move(other.id); dependencies = std::move(other.dependencies); + iter = std::move(other.iter); metric_increment = std::move(other.metric_increment); return *this; } @@ -39,21 +40,21 @@ RefreshSet::Handle::~Handle() void RefreshSet::Handle::rename(StorageID new_id) { std::lock_guard lock(parent_set->mutex); - parent_set->removeDependenciesLocked(id, dependencies); - auto it = parent_set->tasks.find(id); - auto task = it->second; - parent_set->tasks.erase(it); + RefreshTaskHolder task = *iter; + parent_set->removeDependenciesLocked(task, dependencies); + parent_set->removeTaskLocked(id, iter); id = new_id; - parent_set->tasks.emplace(id, task); - parent_set->addDependenciesLocked(id, dependencies); + iter = parent_set->addTaskLocked(id, task); + parent_set->addDependenciesLocked(task, dependencies); } void RefreshSet::Handle::changeDependencies(std::vector deps) { std::lock_guard lock(parent_set->mutex); - parent_set->removeDependenciesLocked(id, dependencies); + RefreshTaskHolder task = *iter; + parent_set->removeDependenciesLocked(task, dependencies); dependencies = std::move(deps); - parent_set->addDependenciesLocked(id, dependencies); + parent_set->addDependenciesLocked(task, dependencies); } void RefreshSet::Handle::reset() @@ -63,8 +64,8 @@ void RefreshSet::Handle::reset() { std::lock_guard lock(parent_set->mutex); - parent_set->removeDependenciesLocked(id, dependencies); - parent_set->tasks.erase(id); + parent_set->removeDependenciesLocked(*iter, dependencies); + parent_set->removeTaskLocked(id, iter); } parent_set = nullptr; @@ -76,37 +77,50 @@ RefreshSet::RefreshSet() = default; void RefreshSet::emplace(StorageID id, const std::vector & dependencies, RefreshTaskHolder task) { std::lock_guard guard(mutex); - auto [it, is_inserted] = tasks.emplace(id, task); - if (!is_inserted) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Refresh set entry already exists for table {}", id.getFullTableName()); - addDependenciesLocked(id, dependencies); + const auto iter = addTaskLocked(id, task); + addDependenciesLocked(task, dependencies); - task->setRefreshSetHandleUnlock(Handle(this, id, dependencies)); + task->setRefreshSetHandleUnlock(Handle(this, id, iter, dependencies)); } -void RefreshSet::addDependenciesLocked(const StorageID & id, const std::vector & dependencies) +RefreshTaskList::iterator RefreshSet::addTaskLocked(StorageID id, RefreshTaskHolder task) +{ + RefreshTaskList & list = tasks[id]; + list.push_back(task); + return std::prev(list.end()); +} + +void RefreshSet::removeTaskLocked(StorageID id, RefreshTaskList::iterator iter) +{ + const auto it = tasks.find(id); + it->second.erase(iter); + if (it->second.empty()) + tasks.erase(it); +} + +void RefreshSet::addDependenciesLocked(RefreshTaskHolder task, const std::vector & dependencies) { for (const StorageID & dep : dependencies) - dependents[dep].insert(id); + dependents[dep].insert(task); } -void RefreshSet::removeDependenciesLocked(const StorageID & id, const std::vector & dependencies) +void RefreshSet::removeDependenciesLocked(RefreshTaskHolder task, const std::vector & dependencies) { for (const StorageID & dep : dependencies) { auto & set = dependents[dep]; - set.erase(id); + set.erase(task); if (set.empty()) dependents.erase(dep); } } -RefreshTaskHolder RefreshSet::getTask(const StorageID & id) const +RefreshTaskList RefreshSet::findTasks(const StorageID & id) const { std::lock_guard lock(mutex); - if (auto task = tasks.find(id); task != tasks.end()) - return task->second; - return nullptr; + if (auto it = tasks.find(id); it != tasks.end()) + return it->second; + return {}; } RefreshSet::InfoContainer RefreshSet::getInfo() const @@ -116,26 +130,23 @@ RefreshSet::InfoContainer RefreshSet::getInfo() const lock.unlock(); InfoContainer res; - for (const auto & [id, task] : tasks_copy) - res.push_back(task->getInfo()); + for (const auto & [id, list] : tasks_copy) + for (const auto & task : list) + res.push_back(task->getInfo()); return res; } std::vector RefreshSet::getDependents(const StorageID & id) const { std::lock_guard lock(mutex); - std::vector res; auto it = dependents.find(id); if (it == dependents.end()) return {}; - for (const StorageID & dep_id : it->second) - if (auto task = tasks.find(dep_id); task != tasks.end()) - res.push_back(task->second); - return res; + return std::vector(it->second.begin(), it->second.end()); } -RefreshSet::Handle::Handle(RefreshSet * parent_set_, StorageID id_, std::vector dependencies_) +RefreshSet::Handle::Handle(RefreshSet * parent_set_, StorageID id_, RefreshTaskList::iterator iter_, std::vector dependencies_) : parent_set(parent_set_), id(std::move(id_)), dependencies(std::move(dependencies_)) - , metric_increment(CurrentMetrics::Increment(CurrentMetrics::RefreshableViews)) {} + , iter(iter_), metric_increment(CurrentMetrics::Increment(CurrentMetrics::RefreshableViews)) {} } diff --git a/src/Storages/MaterializedView/RefreshSet.h b/src/Storages/MaterializedView/RefreshSet.h index eff445023a6..7fb583fd316 100644 --- a/src/Storages/MaterializedView/RefreshSet.h +++ b/src/Storages/MaterializedView/RefreshSet.h @@ -5,12 +5,11 @@ #include #include #include +#include namespace DB { -using DatabaseAndTableNameSet = std::unordered_set; - enum class RefreshState : RefreshTaskStateUnderlying { Disabled = 0, @@ -46,8 +45,7 @@ struct RefreshInfo class RefreshSet { public: - /// RAII thing that unregisters a task and its dependencies in destructor. - /// Storage IDs must be unique. Not thread safe. + /// RAII thing that unregisters a task and its dependencies in destructor. Not thread safe. class Handle { friend class RefreshSet; @@ -73,9 +71,10 @@ public: RefreshSet * parent_set = nullptr; StorageID id = StorageID::createEmpty(); std::vector dependencies; + RefreshTaskList::iterator iter; // in parent_set->tasks[id] std::optional metric_increment; - Handle(RefreshSet * parent_set_, StorageID id_, std::vector dependencies_); + Handle(RefreshSet * parent_set_, StorageID id_, RefreshTaskList::iterator iter_, std::vector dependencies_); }; using InfoContainer = std::vector; @@ -84,7 +83,9 @@ public: void emplace(StorageID id, const std::vector & dependencies, RefreshTaskHolder task); - RefreshTaskHolder getTask(const StorageID & id) const; + /// Finds active refreshable view(s) by database and table name. + /// Normally there's at most one, but we allow name collisions here, just in case. + RefreshTaskList findTasks(const StorageID & id) const; InfoContainer getInfo() const; @@ -92,8 +93,8 @@ public: std::vector getDependents(const StorageID & id) const; private: - using TaskMap = std::unordered_map; - using DependentsMap = std::unordered_map; + using TaskMap = std::unordered_map; + using DependentsMap = std::unordered_map, StorageID::DatabaseAndTableNameHash, StorageID::DatabaseAndTableNameEqual>; /// Protects the two maps below, not locked for any nontrivial operations (e.g. operations that /// block or lock other mutexes). @@ -102,8 +103,10 @@ private: TaskMap tasks; DependentsMap dependents; - void addDependenciesLocked(const StorageID & id, const std::vector & dependencies); - void removeDependenciesLocked(const StorageID & id, const std::vector & dependencies); + RefreshTaskList::iterator addTaskLocked(StorageID id, RefreshTaskHolder task); + void removeTaskLocked(StorageID id, RefreshTaskList::iterator iter); + void addDependenciesLocked(RefreshTaskHolder task, const std::vector & dependencies); + void removeDependenciesLocked(RefreshTaskHolder task, const std::vector & dependencies); }; } diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index aa8f51d5295..0837eaf97fd 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -33,7 +33,6 @@ RefreshTask::RefreshTask( {} RefreshTaskHolder RefreshTask::create( - const StorageMaterializedView & view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy) { @@ -46,12 +45,9 @@ RefreshTaskHolder RefreshTask::create( t->refreshTask(); }); - std::vector deps; if (strategy.dependencies) for (auto && dependency : strategy.dependencies->children) - deps.emplace_back(dependency->as()); - - context->getRefreshSet().emplace(view.getStorageID(), deps, task); + task->initial_dependencies.emplace_back(dependency->as()); return task; } @@ -61,6 +57,7 @@ void RefreshTask::initializeAndStart(std::shared_ptr vi view_to_refresh = view; if (view->getContext()->getSettingsRef().stop_refreshable_materialized_views_on_startup) stop_requested = true; + view->getContext()->getRefreshSet().emplace(view->getStorageID(), initial_dependencies, shared_from_this()); populateDependencies(); advanceNextRefreshTime(currentTime()); refresh_task->schedule(); @@ -69,7 +66,8 @@ void RefreshTask::initializeAndStart(std::shared_ptr vi void RefreshTask::rename(StorageID new_id) { std::lock_guard guard(mutex); - set_handle.rename(new_id); + if (set_handle) + set_handle.rename(new_id); } void RefreshTask::alterRefreshParams(const DB::ASTRefreshStrategy & new_strategy) diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index 1f050a97cd9..623493f6aec 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -26,7 +26,6 @@ public: /// The only proper way to construct task static RefreshTaskHolder create( - const StorageMaterializedView & view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy); @@ -84,9 +83,11 @@ private: RefreshSchedule refresh_schedule; RefreshSettings refresh_settings; // TODO: populate, use, update on alter + std::vector initial_dependencies; RefreshSet::Handle set_handle; /// StorageIDs of our dependencies that we're waiting for. + using DatabaseAndTableNameSet = std::unordered_set; DatabaseAndTableNameSet remaining_dependencies; bool time_arrived = false; diff --git a/src/Storages/MaterializedView/RefreshTask_fwd.h b/src/Storages/MaterializedView/RefreshTask_fwd.h index 1f366962eb6..9a0a122381e 100644 --- a/src/Storages/MaterializedView/RefreshTask_fwd.h +++ b/src/Storages/MaterializedView/RefreshTask_fwd.h @@ -11,5 +11,6 @@ class RefreshTask; using RefreshTaskStateUnderlying = UInt8; using RefreshTaskHolder = std::shared_ptr; using RefreshTaskObserver = std::weak_ptr; +using RefreshTaskList = std::list; } diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 696136834d4..4c6c2fff209 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -203,7 +203,6 @@ StorageMaterializedView::StorageMaterializedView( { fixed_uuid = false; refresher = RefreshTask::create( - *this, getContext(), *query.refresh_strategy); refresh_on_start = mode < LoadingStrictnessLevel::ATTACH && !query.is_create_empty; diff --git a/src/Storages/System/StorageSystemViewRefreshes.cpp b/src/Storages/System/StorageSystemViewRefreshes.cpp index 30539ed6b6a..061201017a7 100644 --- a/src/Storages/System/StorageSystemViewRefreshes.cpp +++ b/src/Storages/System/StorageSystemViewRefreshes.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -19,6 +20,7 @@ ColumnsDescription StorageSystemViewRefreshes::getColumnsDescription() { {"database", std::make_shared(), "The name of the database the table is in."}, {"view", std::make_shared(), "Table name."}, + {"uuid", std::make_shared(), "Table uuid (Atomic database)."}, {"status", std::make_shared(), "Current state of the refresh."}, {"last_refresh_result", std::make_shared(), "Outcome of the latest refresh attempt."}, {"last_refresh_time", std::make_shared(std::make_shared()), @@ -63,6 +65,7 @@ void StorageSystemViewRefreshes::fillData( std::size_t i = 0; res_columns[i++]->insert(refresh.view_id.getDatabaseName()); res_columns[i++]->insert(refresh.view_id.getTableName()); + res_columns[i++]->insert(refresh.view_id.uuid); res_columns[i++]->insert(toString(refresh.state)); res_columns[i++]->insert(toString(refresh.last_refresh_result)); From 5a683796a0dc8408ed2694af672675929352bf8f Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 12 Aug 2024 22:34:14 +0200 Subject: [PATCH 2134/2361] Update DatabaseReplicated.cpp --- src/Databases/DatabaseReplicated.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index f4aa925d6dd..6011b8e65e3 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -1584,6 +1584,8 @@ void DatabaseReplicated::dropTable(ContextPtr local_context, const String & tabl } auto table = tryGetTable(table_name, getContext()); + if (!table) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} doesn't exist", table_name); if (table->getName() == "MaterializedView" || table->getName() == "WindowView") { /// Avoid recursive locking of metadata_mutex From c2185606398526716bf4505b4e359be0beb605cc Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 12 Aug 2024 21:14:42 +0000 Subject: [PATCH 2135/2361] Don't enable allow_materialized_view_with_bad_select yet, someone has to add support for fixture reuse in test_replicated_database first --- src/Core/Settings.h | 2 +- src/Core/SettingsChangesHistory.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index fee60ec7981..cb61f829da3 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -616,7 +616,7 @@ class IColumn; M(Bool, throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert, true, "Throw exception on INSERT query when the setting `deduplicate_blocks_in_dependent_materialized_views` is enabled along with `async_insert`. It guarantees correctness, because these features can't work together.", 0) \ M(Bool, materialized_views_ignore_errors, false, "Allows to ignore errors for MATERIALIZED VIEW, and deliver original block to the table regardless of MVs", 0) \ M(Bool, ignore_materialized_views_with_dropped_target_table, false, "Ignore MVs with dropped target table during pushing to views", 0) \ - M(Bool, allow_materialized_view_with_bad_select, false, "Allow CREATE MATERIALIZED VIEW with SELECT query that references nonexistent tables or columns. It must still be syntactically valid. Doesn't apply to refreshable MVs. Doesn't apply if the MV schema needs to be inferred from the SELECT query (i.e. if the CREATE has no column list and no TO table). Can be used for creating MV before its source table.", 0) \ + M(Bool, allow_materialized_view_with_bad_select, true, "Allow CREATE MATERIALIZED VIEW with SELECT query that references nonexistent tables or columns. It must still be syntactically valid. Doesn't apply to refreshable MVs. Doesn't apply if the MV schema needs to be inferred from the SELECT query (i.e. if the CREATE has no column list and no TO table). Can be used for creating MV before its source table.", 0) \ M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \ M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index dce51b30382..cae37e7ddde 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -78,7 +78,7 @@ static std::initializer_list Date: Mon, 12 Aug 2024 21:20:57 +0000 Subject: [PATCH 2136/2361] fix drift of profile event time --- src/Storages/MergeTree/MergeTask.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index cb1921ede2b..3aa4d764685 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -944,8 +944,13 @@ bool MergeTask::MergeProjectionsStage::finalizeProjectionsAndWholeMerge() const MergeTask::StageRuntimeContextPtr MergeTask::MergeProjectionsStage::getContextForNextStage() { - ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); - ProfileEvents::increment(ProfileEvents::MergeProjectionStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + /// Do not increment for projection stage because time is already accounted in main task. + /// The projection stage has its own empty projection stage which may add a drift of severals milliseconds. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeProjectionStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } return nullptr; } @@ -1034,6 +1039,8 @@ bool MergeTask::execute() UInt64 stage_elapsed_ms = current_elapsed_ms - global_ctx->prev_elapsed_ms; global_ctx->prev_elapsed_ms = current_elapsed_ms; + auto next_stage_context = current_stage->getContextForNextStage(); + /// Do not increment for projection stage because time is already accounted in main task. if (global_ctx->parent_part == nullptr) { @@ -1041,8 +1048,6 @@ bool MergeTask::execute() ProfileEvents::increment(ProfileEvents::MergeTotalMilliseconds, stage_elapsed_ms); } - auto next_stage_context = current_stage->getContextForNextStage(); - /// Move to the next stage in an array of stages ++stages_iterator; if (stages_iterator == stages.end()) From 8136e6a45275b958a663ac0ee4682984e1536b07 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 21:29:26 +0000 Subject: [PATCH 2137/2361] Update new prepareForSquashing method for ColumnDynamic --- src/Columns/ColumnDynamic.cpp | 23 ++++++++++--------- src/DataTypes/DataTypeVariant.cpp | 2 +- .../03210_dynamic_squashing.reference | 20 +++++++++------- .../0_stateless/03210_dynamic_squashing.sql | 17 ++++++++------ 4 files changed, 35 insertions(+), 27 deletions(-) diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index ecc2c738366..69b4c5dfc4e 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -987,7 +987,8 @@ void ColumnDynamic::prepareForSquashing(const Columns & source_columns) /// Internal variants of source dynamic columns may differ. /// We want to preallocate memory for all variants we will have after squashing. /// It may happen that the total number of variants in source columns will - /// exceed the limit, in this case we will choose the most frequent variants. + /// exceed the limit, in this case we will choose the most frequent variants + /// and insert the rest types into the shared variant. /// First, preallocate memory for variant discriminators and offsets. size_t new_size = size(); @@ -1030,17 +1031,14 @@ void ColumnDynamic::prepareForSquashing(const Columns & source_columns) DataTypePtr result_variant_type; /// Check if the number of all variants exceeds the limit. - if (all_variants.size() > max_dynamic_types || (all_variants.size() == max_dynamic_types && !total_variant_sizes.contains("String"))) + if (!canAddNewVariants(0, all_variants.size())) { /// We want to keep the most frequent variants in the resulting dynamic column. DataTypes result_variants; - result_variants.reserve(max_dynamic_types); + result_variants.reserve(max_dynamic_types + 1); /// +1 for shared variant. /// Add variants from current variant column as we will not rewrite it. for (const auto & variant : assert_cast(*variant_info.variant_type).getVariants()) result_variants.push_back(variant); - /// Add String variant in advance (if we didn't add it yet) as we must have it across variants when we reach the limit. - if (!variant_info.variant_name_to_discriminator.contains("String")) - result_variants.push_back(std::make_shared()); /// Create list of remaining variants with their sizes and sort it. std::vector> variants_with_sizes; @@ -1049,15 +1047,18 @@ void ColumnDynamic::prepareForSquashing(const Columns & source_columns) { /// Add variant to the list only of we didn't add it yet. auto variant_name = variant->getName(); - if (variant_name != "String" && !variant_info.variant_name_to_discriminator.contains(variant_name)) - variants_with_sizes.emplace_back(total_variant_sizes[variant->getName()], variant); + if (!variant_info.variant_name_to_discriminator.contains(variant_name)) + variants_with_sizes.emplace_back(total_variant_sizes[variant_name], variant); } std::sort(variants_with_sizes.begin(), variants_with_sizes.end(), std::greater()); /// Add the most frequent variants until we reach max_dynamic_types. - size_t num_new_variants = max_dynamic_types - result_variants.size(); - for (size_t i = 0; i != num_new_variants; ++i) - result_variants.push_back(variants_with_sizes[i].second); + for (const auto & [_, new_variant] : variants_with_sizes) + { + if (!canAddNewVariant(result_variants.size())) + break; + result_variants.push_back(new_variant); + } result_variant_type = std::make_shared(result_variants); } diff --git a/src/DataTypes/DataTypeVariant.cpp b/src/DataTypes/DataTypeVariant.cpp index 8a10ca7d06d..cc8d04e94da 100644 --- a/src/DataTypes/DataTypeVariant.cpp +++ b/src/DataTypes/DataTypeVariant.cpp @@ -117,7 +117,7 @@ bool DataTypeVariant::equals(const IDataType & rhs) const /// The same data types with different custom names considered different. /// For example, UInt8 and Bool. - if ((variants[i]->hasCustomName() || rhs_variant.variants[i]) && variants[i]->getName() != rhs_variant.variants[i]->getName()) + if ((variants[i]->hasCustomName() || rhs_variant.variants[i]->hasCustomName()) && variants[i]->getName() != rhs_variant.variants[i]->getName()) return false; } diff --git a/tests/queries/0_stateless/03210_dynamic_squashing.reference b/tests/queries/0_stateless/03210_dynamic_squashing.reference index 4f5b5ba098c..1c23c22f550 100644 --- a/tests/queries/0_stateless/03210_dynamic_squashing.reference +++ b/tests/queries/0_stateless/03210_dynamic_squashing.reference @@ -1,8 +1,12 @@ -Array(UInt8) -None -UInt64 -None -String -UInt64 -String -UInt64 +1 +Array(UInt8) true +None false +UInt64 false +2 +Array(UInt8) true +None false +UInt64 false +3 +Array(UInt8) true +String false +UInt64 true diff --git a/tests/queries/0_stateless/03210_dynamic_squashing.sql b/tests/queries/0_stateless/03210_dynamic_squashing.sql index 23b47184e33..da3b911e796 100644 --- a/tests/queries/0_stateless/03210_dynamic_squashing.sql +++ b/tests/queries/0_stateless/03210_dynamic_squashing.sql @@ -4,17 +4,20 @@ set max_block_size = 1000; drop table if exists test; create table test (d Dynamic) engine=MergeTree order by tuple(); -insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=2), number < 3000, range(number % 5)::Dynamic(max_types=2), number::Dynamic(max_types=2)) from numbers(1000000); -select distinct dynamicType(d) as type from test order by type; +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(1000000); +select '1'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; drop table test; -create table test (d Dynamic(max_types=2)) engine=MergeTree order by tuple(); -insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=2), number < 3000, range(number % 5)::Dynamic(max_types=2), number::Dynamic(max_types=2)) from numbers(1000000); -select distinct dynamicType(d) as type from test order by type; +create table test (d Dynamic(max_types=1)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(1000000); +select '2'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; truncate table test; -insert into test select multiIf(number < 1000, 'Str'::Dynamic(max_types=2), number < 3000, range(number % 5)::Dynamic(max_types=2), number::Dynamic(max_types=2)) from numbers(1000000); -select distinct dynamicType(d) as type from test order by type; +insert into test select multiIf(number < 1000, 'Str'::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(1000000); +select '3'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; drop table test; From 83cb991f75f242b11beb48134d6ebfb26c73bcd7 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 12 Aug 2024 21:30:30 +0000 Subject: [PATCH 2138/2361] Fix special build --- src/Columns/ColumnDynamic.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index d80055c1716..e6e720765f6 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -368,10 +368,10 @@ public: /// Check if we can add new variant types. /// Shared variant doesn't count in the limit but always presents, /// so we should subtract 1 from the total types count. - bool canAddNewVariants(size_t current_variants_count, size_t new_variants_count) { return current_variants_count + new_variants_count - 1 <= max_dynamic_types; } - bool canAddNewVariant(size_t current_variants_count) { return canAddNewVariants(current_variants_count, 1); } - bool canAddNewVariants(size_t new_variants_count) { return canAddNewVariants(variant_info.variant_names.size(), new_variants_count); } - bool canAddNewVariant() { return canAddNewVariants(variant_info.variant_names.size(), 1); } + bool canAddNewVariants(size_t current_variants_count, size_t new_variants_count) const { return current_variants_count + new_variants_count - 1 <= max_dynamic_types; } + bool canAddNewVariant(size_t current_variants_count) const { return canAddNewVariants(current_variants_count, 1); } + bool canAddNewVariants(size_t new_variants_count) const { return canAddNewVariants(variant_info.variant_names.size(), new_variants_count); } + bool canAddNewVariant() const { return canAddNewVariants(variant_info.variant_names.size(), 1); } void setVariantType(const DataTypePtr & variant_type); void setMaxDynamicPaths(size_t max_dynamic_type_); From 3b1d6f30bec5a8a568ab477e639d97c9c95a3f2c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 23:55:01 +0200 Subject: [PATCH 2139/2361] Debug test --- .../0_stateless/02490_benchmark_max_consecutive_errors.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh b/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh index f747b3156a5..df7e9386662 100755 --- a/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh +++ b/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh @@ -11,5 +11,6 @@ if [ "$RES" -eq 10 ] then echo "$RES" else + echo "$RES" cat "${CLICKHOUSE_TMP}/${CLICKHOUSE_DATABASE}.log" fi From 9c7d9a6a8d96b88c56aaa95b691f2b9bf79cf8d4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 23:57:03 +0200 Subject: [PATCH 2140/2361] Annotations --- tests/queries/0_stateless/02293_ttest_large_samples.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/02293_ttest_large_samples.sql b/tests/queries/0_stateless/02293_ttest_large_samples.sql index 826bd483fe9..b4687541360 100644 --- a/tests/queries/0_stateless/02293_ttest_large_samples.sql +++ b/tests/queries/0_stateless/02293_ttest_large_samples.sql @@ -1,3 +1,5 @@ +-- Tags: long + SELECT roundBankers(result.1, 5), roundBankers(result.2, 5) FROM ( SELECT studentTTest(sample, variant) as result From 1767ec6b4ca0fc0e8546e705e0d0dff3ffa797cb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 12 Aug 2024 23:55:01 +0200 Subject: [PATCH 2141/2361] Debug test --- .../0_stateless/02490_benchmark_max_consecutive_errors.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh b/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh index f747b3156a5..df7e9386662 100755 --- a/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh +++ b/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh @@ -11,5 +11,6 @@ if [ "$RES" -eq 10 ] then echo "$RES" else + echo "$RES" cat "${CLICKHOUSE_TMP}/${CLICKHOUSE_DATABASE}.log" fi From 11d5531f829630b64566536526bd1c6369f2005e Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 12 Aug 2024 23:06:26 +0000 Subject: [PATCH 2142/2361] allow_materialized_view_with_bad_select=0 in test --- .../0_stateless/02932_refreshable_materialized_views_2.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh index 2a803114842..50a905576d5 100755 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh @@ -10,7 +10,7 @@ CLICKHOUSE_LOG_COMMENT= # Set session timezone to UTC to make all DateTime formatting and parsing use UTC, because refresh # scheduling is done in UTC. CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" -CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`" $CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" From f12609440f081f19b0b21fdd15229cfdbb7cbb3d Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 12 Aug 2024 23:09:57 +0000 Subject: [PATCH 2143/2361] fashion --- src/Storages/MaterializedView/RefreshSet.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Storages/MaterializedView/RefreshSet.cpp b/src/Storages/MaterializedView/RefreshSet.cpp index 43aa0ada99b..7536f59c1e4 100644 --- a/src/Storages/MaterializedView/RefreshSet.cpp +++ b/src/Storages/MaterializedView/RefreshSet.cpp @@ -9,11 +9,6 @@ namespace CurrentMetrics namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - RefreshSet::Handle::Handle(Handle && other) noexcept { *this = std::move(other); From ddd5a96950528513eec78d56b103435964576b12 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Mon, 12 Aug 2024 23:56:02 +0000 Subject: [PATCH 2144/2361] Fix --- .../Formats/Impl/ParquetBlockInputFormat.cpp | 111 ++++++++++-------- 1 file changed, 64 insertions(+), 47 deletions(-) diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index 1268f1df5f6..38d5094b267 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -63,7 +63,7 @@ namespace ErrorCodes /// * We dispatch based on the parquet logical+converted+physical type instead of the ClickHouse type. /// The idea is that this is similar to what we'll have to do when reimplementing Parquet parsing in /// ClickHouse instead of using Arrow (for speed). So, this is an exercise in parsing Parquet manually. -static Field decodePlainParquetValueSlow(const std::string & data, parquet::Type::type physical_type, const parquet::ColumnDescriptor & descr) +static Field decodePlainParquetValueSlow(const std::string & data, parquet::Type::type physical_type, const parquet::ColumnDescriptor & descr, TypeIndex type_hint) { using namespace parquet; @@ -127,20 +127,19 @@ static Field decodePlainParquetValueSlow(const std::string & data, parquet::Type if (size < 32 && (val >> (size * 8 - 1)) != 0) val |= ~((Int256(1) << (size * 8)) - 1); - auto narrow = [&]() -> Field { - using T = typename D::NativeType; - T x = 0; - memcpy(&x, &val, sizeof(T)); - return Field(DecimalField(D(x), static_cast(scale))); + auto narrow = [&](auto x) -> Field + { + memcpy(&x, &val, sizeof(x)); + return Field(DecimalField(x, static_cast(scale))); }; if (size <= 4) - return narrow.template operator()(); + return narrow(Decimal32(0)); else if (size <= 8) - return narrow.template operator()(); + return narrow(Decimal64(0)); else if (size <= 16) - return narrow.template operator()(); + return narrow(Decimal128(0)); else - return narrow.template operator()(); + return narrow(Decimal256(0)); } while (false); @@ -197,8 +196,6 @@ static Field decodePlainParquetValueSlow(const std::string & data, parquet::Type return Field(val); } - /// Strings. - if (physical_type == Type::type::BYTE_ARRAY || physical_type == Type::type::FIXED_LEN_BYTE_ARRAY) { /// Arrow's parquet decoder handles missing min/max values slightly incorrectly. @@ -227,6 +224,25 @@ static Field decodePlainParquetValueSlow(const std::string & data, parquet::Type if (data.empty()) return Field(); + /// Long integers. + auto reinterpret_fixed_string = [&](auto x) + { + if (data.size() != sizeof(x)) + throw Exception(ErrorCodes::CANNOT_PARSE_NUMBER, "Unexpected {} size: {}", fieldTypeToString(Field::TypeToEnum::value), data.size()); + memcpy(&x, data.data(), data.size()); + return Field(x); + }; + switch (type_hint) + { + case TypeIndex::UInt128: return reinterpret_fixed_string(UInt128(0)); + case TypeIndex::UInt256: return reinterpret_fixed_string(UInt256(0)); + case TypeIndex::Int128: return reinterpret_fixed_string(Int128(0)); + case TypeIndex::Int256: return reinterpret_fixed_string(Int256(0)); + case TypeIndex::IPv6: return reinterpret_fixed_string(IPv6(0)); + default: break; + } + + /// Strings. return Field(data); } @@ -302,6 +318,7 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa if (type->isNullable()) type = assert_cast(*type).getNestedType(); Field default_value = type->getDefault(); + TypeIndex type_index = type->getTypeId(); /// Only primitive fields are supported, not arrays, maps, tuples, or Nested. /// Arrays, maps, and Nested can't be meaningfully supported because Parquet only has min/max @@ -315,8 +332,41 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa { try { - min = decodePlainParquetValueSlow(stats->EncodeMin(), stats->physical_type(), *stats->descr()); - max = decodePlainParquetValueSlow(stats->EncodeMax(), stats->physical_type(), *stats->descr()); + min = decodePlainParquetValueSlow(stats->EncodeMin(), stats->physical_type(), *stats->descr(), type_index); + max = decodePlainParquetValueSlow(stats->EncodeMax(), stats->physical_type(), *stats->descr(), type_index); + + /// If the data type in parquet file substantially differs from the requested data type, + /// it's sometimes correct to just typecast the min/max values. + /// Other times it's incorrect, e.g.: + /// INSERT INTO FUNCTION file('t.parquet', Parquet, 'x String') VALUES ('1'), ('100'), ('2'); + /// SELECT * FROM file('t.parquet', Parquet, 'x Int64') WHERE x >= 3; + /// If we just typecast min/max from string to integer, this query will incorrectly return empty result. + /// Allow conversion in some simple cases, otherwise ignore the min/max values. + auto min_type = min.getType(); + auto max_type = max.getType(); + min = convertFieldToType(min, *type); + max = convertFieldToType(max, *type); + auto ok_cast = [&](Field::Types::Which from, Field::Types::Which to) -> bool + { + if (from == to) + return true; + /// Decimal -> wider decimal. + if (Field::isDecimal(from) || Field::isDecimal(to)) + return Field::isDecimal(from) && Field::isDecimal(to) && to >= from; + /// Integer -> IP. + if (to == Field::Types::IPv4) + return from == Field::Types::UInt64; + /// Disable index for everything else, especially string <-> number. + return false; + }; + if (!(ok_cast(min_type, min.getType()) && ok_cast(max_type, max.getType())) && + !(min == max) && + !(min_type == Field::Types::Int64 && min.getType() == Field::Types::UInt64 && min.get() >= 0) && + !(max_type == Field::Types::UInt64 && max.getType() == Field::Types::Int64 && max.get() <= UInt64(INT64_MAX))) + { + min = Field(); + max = Field(); + } } catch (Exception & e) { @@ -325,39 +375,6 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa } } - /// If the data type in parquet file substantially differs from the requested data type, - /// it's sometimes correct to just typecast the min/max values. - /// Other times it's incorrect, e.g.: - /// INSERT INTO FUNCTION file('t.parquet', Parquet, 'x String') VALUES ('1'), ('100'), ('2'); - /// SELECT * FROM file('t.parquet', Parquet, 'x Int64') WHERE x >= 3; - /// If we just typecast min/max from string to integer, this query will incorrectly return empty result. - /// Allow conversion in some simple cases and ignore the min/max values otherwise. - auto min_type = min.getType(); - auto max_type = max.getType(); - min = convertFieldToType(min, *type); - max = convertFieldToType(max, *type); - auto ok_cast = [&](Field::Types::Which from, Field::Types::Which to) -> bool - { - if (from == to) - return true; - /// Decimal -> wider decimal. - if (Field::isDecimal(from) || Field::isDecimal(to)) - return Field::isDecimal(from) && Field::isDecimal(to) && to >= from; - /// Integer -> IP. - if (to == Field::Types::IPv4 || to == Field::Types::IPv6) - return from == Field::Types::UInt64 || from == Field::Types::Int64; - /// Disable index for everything else, especially string <-> number. - return false; - }; - if (!(ok_cast(min_type, min.getType()) && ok_cast(max_type, max.getType())) && - !(min == max) && - !(min_type == Field::Types::Int64 && min.getType() == Field::Types::UInt64 && min.get() >= 0) && - !(max_type == Field::Types::UInt64 && max.getType() == Field::Types::Int64 && max.get() <= UInt64(INT64_MAX))) - { - min = Field(); - max = Field(); - } - /// In Range, NULL is represented as positive or negative infinity (represented by a special /// kind of Field, different from floating-point infinities). From f16c969352d2938cda1b1953af0f7b895d732873 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Aug 2024 02:02:15 +0200 Subject: [PATCH 2145/2361] Fix flaky check --- tests/queries/0_stateless/00600_replace_running_query.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh index ad7c49a9ad3..4e7ed7c8cfa 100755 --- a/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/tests/queries/0_stateless/00600_replace_running_query.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: no-parallel CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none From c3e32bbecf0497a5eae6ab0cf23ba641591cfa0f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Aug 2024 02:07:51 +0200 Subject: [PATCH 2146/2361] Fix flaky check --- .../00600_replace_running_query.sh | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh index 4e7ed7c8cfa..80e2ecf5d5b 100755 --- a/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/tests/queries/0_stateless/00600_replace_running_query.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash -# Tags: no-parallel CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none @@ -7,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -TEST_PREFIX=$RANDOM +TEST_PREFIX="${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "drop user if exists u_00600${TEST_PREFIX}" ${CLICKHOUSE_CLIENT} -q "create user u_00600${TEST_PREFIX} settings max_execution_time=60, readonly=1" ${CLICKHOUSE_CLIENT} -q "grant select on system.numbers to u_00600${TEST_PREFIX}" @@ -29,34 +28,34 @@ function wait_for_queries_to_finish() } -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 & -wait_for_query_to_start 'hello' +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=${CLICKHOUSE_DATABASE}hello&replace_running_query=1" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 & +wait_for_query_to_start "${CLICKHOUSE_DATABASE}hello" # Replace it -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d 'SELECT 0' +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=${CLICKHOUSE_DATABASE}hello&replace_running_query=1" -d 'SELECT 0' # Wait for it to be replaced wait wait_for_queries_to_finish -${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id=42 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' & -wait_for_query_to_start '42' +${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id="${CLICKHOUSE_DATABASE}42" --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' & +wait_for_query_to_start "${CLICKHOUSE_DATABASE}42" # Trying to run another query with the same query_id -${CLICKHOUSE_CLIENT} --query_id=42 --query='SELECT 43' 2>&1 | grep -cF 'is already running by user' +${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --query='SELECT 43' 2>&1 | grep -cF 'is already running by user' # Trying to replace query of a different user -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=42&replace_running_query=1" -d 'SELECT 1' | grep -cF 'is already running by user' +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=${CLICKHOUSE_DATABASE}42&replace_running_query=1" -d 'SELECT 1' | grep -cF 'is already running by user' -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = '42' SYNC" > /dev/null +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}42' SYNC" > /dev/null wait wait_for_queries_to_finish -${CLICKHOUSE_CLIENT} --query_id=42 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' & -wait_for_query_to_start '42' -${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --replace_running_query_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null +${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' & +wait_for_query_to_start "${CLICKHOUSE_DATABASE}42" +${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --replace_running_query=1 --replace_running_query_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null wait wait_for_queries_to_finish -${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --query='SELECT 44' +${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --replace_running_query=1 --query='SELECT 44' ${CLICKHOUSE_CLIENT} -q "drop user u_00600${TEST_PREFIX}" From 5e6f728248396fefd1b003555558977cb79a592e Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 13 Aug 2024 00:35:40 +0000 Subject: [PATCH 2147/2361] safeGet --- src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index 38d5094b267..3f2e701afc2 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -361,8 +361,8 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa }; if (!(ok_cast(min_type, min.getType()) && ok_cast(max_type, max.getType())) && !(min == max) && - !(min_type == Field::Types::Int64 && min.getType() == Field::Types::UInt64 && min.get() >= 0) && - !(max_type == Field::Types::UInt64 && max.getType() == Field::Types::Int64 && max.get() <= UInt64(INT64_MAX))) + !(min_type == Field::Types::Int64 && min.getType() == Field::Types::UInt64 && min.safeGet() >= 0) && + !(max_type == Field::Types::UInt64 && max.getType() == Field::Types::Int64 && max.safeGet() <= UInt64(INT64_MAX))) { min = Field(); max = Field(); From b80dd738b03c1618e0872b14472457a2462c334e Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Aug 2024 02:41:00 +0200 Subject: [PATCH 2148/2361] Fix error --- src/IO/ReadHelpers.h | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 580aa51b238..d63ba16e080 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -259,12 +259,18 @@ inline void readBoolText(bool & x, ReadBuffer & buf) readChar(tmp, buf); x = tmp != '0'; - if (!buf.eof() && isAlphaASCII(*buf.position())) + if (!buf.eof() && isAlphaASCII(tmp)) { - if (tmp == 't') - assertString("rue", buf); - else if (tmp == 'T') - assertString("RUE", buf); + if (tmp == 't' || tmp == 'T') + { + assertStringCaseInsensitive("rue", buf); + x = true; + } + else if (tmp == 'f' || tmp == 'F') + { + assertStringCaseInsensitive("alse", buf); + x = false; + } } } From a517bc90cd9e369a4385f367e9f5e9688520c8bb Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Mon, 12 Aug 2024 21:42:47 -0400 Subject: [PATCH 2149/2361] Update PULL_REQUEST_TEMPLATE.md --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 8b6e957e1d8..3dcce68ab46 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -60,7 +60,7 @@ At a minimum, the following information should be added (but add more as needed) - [ ] Exclude: All with aarch64, release, debug --- - [ ] Run only fuzzers related jobs (libFuzzer fuzzers, AST fuzzers, etc.) -- [ ] Exclude AST fuzzers +- [ ] Exclude: AST fuzzers --- - [ ] Do not test - [ ] Woolen Wolfdog From cb7a67c6dd9df40d728108c37bfd423435b45141 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Tue, 13 Aug 2024 11:39:22 +0800 Subject: [PATCH 2150/2361] fix failed uts --- .../02210_toColumnTypeName_toLowCardinality_const.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02210_toColumnTypeName_toLowCardinality_const.reference b/tests/queries/0_stateless/02210_toColumnTypeName_toLowCardinality_const.reference index 1e3d3a50562..e3978020431 100644 --- a/tests/queries/0_stateless/02210_toColumnTypeName_toLowCardinality_const.reference +++ b/tests/queries/0_stateless/02210_toColumnTypeName_toLowCardinality_const.reference @@ -1 +1 @@ -Const(ColumnLowCardinality) +Const(LowCardinality(UInt8)) From 761a28502ef79787244ad1da7b15b3fedfb24221 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 13 Aug 2024 05:17:18 +0000 Subject: [PATCH 2151/2361] Unchange integration test --- tests/integration/test_replicated_database/test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index a9b178302f9..60a6e099b22 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -712,7 +712,6 @@ def create_some_tables(db): "distributed_ddl_task_timeout": 0, "allow_experimental_object_type": 1, "allow_suspicious_codecs": 1, - "allow_materialized_view_with_bad_select": 1, } main_node.query(f"CREATE TABLE {db}.t1 (n int) ENGINE=Memory", settings=settings) dummy_node.query( From 2a51b6c403c8f86fb9e68f358ca490630c40fec6 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Tue, 13 Aug 2024 10:20:05 +0800 Subject: [PATCH 2152/2361] fix crash in lag/lead --- src/Processors/Transforms/WindowTransform.cpp | 6 ++++++ .../03210_lag_lead_inframe_types.reference | 16 +++++++++++++++ .../03210_lag_lead_inframe_types.sql | 20 +++++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 85e6b2ec55e..cae817380e0 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -1159,6 +1159,12 @@ void WindowTransform::appendChunk(Chunk & chunk) { if (ws.window_function_impl) block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices)); + else + { + /// `castColumn` returns nullptr at default, so it's OK to put nullptr as a placeholder here + /// it should not be used in fact. + block.casted_columns.push_back(nullptr); + } block.output_columns.push_back(ws.aggregate_function->getResultType() ->createColumn()); diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference index d4734a85e72..4ecf7f56b07 100644 --- a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference @@ -38,3 +38,19 @@ 7 8 9 +15 \N 3 15 15 15 15 +14 \N 2 10 10 10 154 +13 \N 2 10 10 10 143 +12 \N 2 10 10 10 14 +11 \N 2 10 10 10 12 +10 \N 2 10 10 10 10 +9 \N 1 5 5 5 99 +8 \N 1 5 5 5 88 +7 \N 1 5 5 5 9 +6 \N 1 5 5 5 7 +5 \N 1 5 5 5 5 +4 \N 0 0 0 0 44 +3 \N 0 0 0 0 33 +2 \N 0 0 0 0 4 +1 \N 0 0 0 0 2 +0 \N 0 0 0 0 0 diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql index f6017ee6690..cc6746e428f 100644 --- a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql @@ -2,3 +2,23 @@ SELECT lagInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (OR SELECT leadInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); SELECT lagInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); SELECT leadInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); + +SELECT + number, + YYYYMMDDToDate(1, toLowCardinality(11), max(YYYYMMDDToDate(YYYYMMDDToDate(toLowCardinality(1), 11, materialize(NULL), 19700101.1, 1, 27, 7, materialize(toUInt256(37)), 9, 19, 9), 1, toUInt128(11), NULL, 19700101.1, 1, 27, 7, 37, 9, 19, 9), toUInt256(30)) IGNORE NULLS OVER w, NULL, 19700101.1, toNullable(1), 27, materialize(7), 37, 9, 19, 9), + p, + pp, + lagInFrame(number, number - pp) OVER w AS lag2, + lagInFrame(number, number - pp, number * 11) OVER w AS lag, + leadInFrame(number, number - pp, number * 11) OVER w AS lead +FROM +( + SELECT + number, + intDiv(number, 5) AS p, + p * 5 AS pp + FROM numbers(16) +) +WHERE toLowCardinality(1) +WINDOW w AS (PARTITION BY p ORDER BY number ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY number DESC NULLS LAST; From a0f617c6cc06cd80ecbb485965f6b7e7763be18c Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 13 Aug 2024 10:14:37 +0200 Subject: [PATCH 2153/2361] tests: make 01600_parts_states_metrics_long better - better bash - HTTP protocol cannot handle multiple queries fix this - decrease number of retries (this should be ok after no-parallel) to print final debug info Signed-off-by: Azat Khuzhin --- .../01600_parts_states_metrics_long.sh | 43 +++++++++++-------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh index a07dd306b3e..0a9f94cc451 100755 --- a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh @@ -11,33 +11,40 @@ function query() ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&database_atomic_wait_for_drop_and_detach_synchronously=1" -d "$*" } -# NOTE: database = $CLICKHOUSE_DATABASE is unwanted -verify_sql="SELECT - (SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics) - = (SELECT sum(active), sum(NOT active) FROM - (SELECT active FROM system.parts UNION ALL SELECT active FROM system.projection_parts UNION ALL SELECT 1 FROM system.dropped_tables_parts))" # The query is not atomic - it can compare states between system.parts and system.metrics from different points in time. # So, there is inherent race condition. But it should get expected result eventually. # In case of test failure, this code will do infinite loop and timeout. verify() { - for i in {1..5000} - do - result=$( query "$verify_sql" ) - [ "$result" = "1" ] && echo "$result" && break - sleep 0.1 + local result - if [[ $i -eq 5000 ]] - then - query " - SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics; - SELECT sum(active), sum(NOT active) FROM system.parts; - SELECT sum(active), sum(NOT active) FROM system.projection_parts; - SELECT count() FROM system.dropped_tables_parts; - " + for _ in {1..100}; do + # NOTE: database = $CLICKHOUSE_DATABASE is unwanted + result=$( query "SELECT + (SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics) + = + (SELECT sum(active), sum(NOT active) FROM ( + SELECT active FROM system.parts + UNION ALL SELECT active FROM system.projection_parts + UNION ALL SELECT 1 FROM system.dropped_tables_parts + ))" + ) + + if [ "$result" = "1" ]; then + echo "$result" + return fi + + sleep 0.5 done + + $CLICKHOUSE_CLIENT -q " + SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics; + SELECT sum(active), sum(NOT active) FROM system.parts; + SELECT sum(active), sum(NOT active) FROM system.projection_parts; + SELECT count() FROM system.dropped_tables_parts; + " } query "DROP TABLE IF EXISTS test_table" From 2896dd51d816e28ee73fd3f1acf20912e2689f35 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 09:52:43 +0000 Subject: [PATCH 2154/2361] Implemenet prepareForSquashing for ColumnObject --- src/Columns/ColumnDynamic.cpp | 19 +- src/Columns/ColumnDynamic.h | 2 + src/Columns/ColumnObject.cpp | 123 +++++++++- src/Columns/ColumnObject.h | 3 + src/Core/SettingsChangesHistory.cpp | 265 +--------------------- src/DataTypes/DataTypesBinaryEncoding.cpp | 10 + src/Interpreters/Squashing.cpp | 1 + 7 files changed, 151 insertions(+), 272 deletions(-) diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index ee06a1b10fe..29813abb99e 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -660,11 +660,6 @@ void ColumnDynamic::prepareForSquashing(const Columns & source_columns) if (source_columns.empty()) return; - /// Internal variants of source dynamic columns may differ. - /// We want to preallocate memory for all variants we will have after squashing. - /// It may happen that the total number of variants in source columns will - /// exceed the limit, in this case we will choose the most frequent variants. - /// First, preallocate memory for variant discriminators and offsets. size_t new_size = size(); for (const auto & source_column : source_columns) @@ -673,7 +668,18 @@ void ColumnDynamic::prepareForSquashing(const Columns & source_columns) variant_col.getLocalDiscriminators().reserve_exact(new_size); variant_col.getOffsets().reserve_exact(new_size); - /// Second, collect all variants and their total sizes. + /// Second, preallocate memory for variants. + prepareVariantsForSquashing(source_columns); +} + +void ColumnDynamic::prepareVariantsForSquashing(const Columns & source_columns) +{ + /// Internal variants of source dynamic columns may differ. + /// We want to preallocate memory for all variants we will have after squashing. + /// It may happen that the total number of variants in source columns will + /// exceed the limit, in this case we will choose the most frequent variants. + + /// Collect all variants and their total sizes. std::unordered_map total_variant_sizes; DataTypes all_variants; @@ -747,6 +753,7 @@ void ColumnDynamic::prepareForSquashing(const Columns & source_columns) /// Now current dynamic column has all resulting variants and we can call /// prepareForSquashing on them to preallocate the memory. + auto & variant_col = getVariantColumn(); for (size_t i = 0; i != variant_info.variant_names.size(); ++i) { Columns source_variant_columns; diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index fe9815b1a81..2d695e1948b 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -259,6 +259,8 @@ public: } void prepareForSquashing(const Columns & source_columns) override; + /// Prepare only variants but not discriminators and offsets. + void prepareVariantsForSquashing(const Columns & source_columns); void ensureOwnership() override { diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index ea497451648..852327cfc17 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -257,6 +257,12 @@ ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(const std::string_view path return it_ptr->second; } +void ColumnObject::addNewDynamicPath(const std::string_view path) +{ + if (!tryToAddNewDynamicPath(path)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add new dynamic path as the limit ({}) on dynamic paths is reached", max_dynamic_paths); +} + void ColumnObject::setDynamicPaths(const std::vector & paths) { if (paths.size() > max_dynamic_paths) @@ -1123,6 +1129,115 @@ void ColumnObject::getExtremes(DB::Field & min, DB::Field & max) const } } +void ColumnObject::prepareForSquashing(const std::vector & source_columns) +{ + if (source_columns.empty()) + return; + + /// Dynamic paths of source Object columns may differ. + /// We want to preallocate memory for all dynamic paths we will have after squashing. + /// It may happen that the total number of dynamic paths in source columns will + /// exceed the limit, in this case we will choose the most frequent paths. + std::unordered_map path_to_total_number_of_non_null_values; + + auto add_dynamic_paths = [&](const ColumnObject & source_object) + { + for (const auto & [path, dynamic_column_ptr] : source_object.dynamic_paths_ptrs) + { + auto it = path_to_total_number_of_non_null_values.find(path); + if (it == path_to_total_number_of_non_null_values.end()) + it = path_to_total_number_of_non_null_values.emplace(path, 0).first; + it->second += (dynamic_column_ptr->size() - dynamic_column_ptr->getNumberOfDefaultRows()); + } + }; + + for (const auto & source_column : source_columns) + add_dynamic_paths(assert_cast(*source_column)); + + /// Add dynamic paths from this object column. + add_dynamic_paths(*this); + + /// Check if the number of all dynamic paths exceeds the limit. + if (path_to_total_number_of_non_null_values.size() > max_dynamic_paths) + { + /// We want to keep the most frequent paths in the resulting object column. + /// Sort paths by total number of non null values. + /// Don't include paths from current column as we cannot change them. + std::vector> paths_with_sizes; + paths_with_sizes.reserve(path_to_total_number_of_non_null_values.size() - dynamic_paths.size()); + for (const auto & [path, size] : path_to_total_number_of_non_null_values) + { + if (!dynamic_paths.contains(path)) + paths_with_sizes.emplace_back(size, path); + } + std::sort(paths_with_sizes.begin(), paths_with_sizes.end(), std::greater()); + + /// Fill dynamic_paths with first paths in sorted list until we reach the limit. + size_t paths_to_add = max_dynamic_paths - dynamic_paths.size(); + for (size_t i = 0; i != paths_to_add; ++i) + addNewDynamicPath(paths_with_sizes[i].second); + } + /// Otherwise keep all paths. + else + { + /// Create columns for new dynamic paths. + for (const auto & [path, _] : path_to_total_number_of_non_null_values) + { + if (!dynamic_paths.contains(path)) + addNewDynamicPath(path); + } + } + + /// Now current object column has all resulting dynamic paths and we can call + /// prepareForSquashing on them to preallocate the memory. + /// Also we can preallocate memory for dynamic paths and shared data. + Columns shared_data_source_columns; + shared_data_source_columns.reserve(source_columns.size()); + std::unordered_map typed_paths_source_columns; + typed_paths_source_columns.reserve(typed_paths.size()); + std::unordered_map dynamic_paths_source_columns; + dynamic_paths_source_columns.reserve(dynamic_paths.size()); + + for (const auto & [path, column] : typed_paths) + typed_paths_source_columns[path].reserve(source_columns.size()); + + for (const auto & [path, column] : dynamic_paths) + dynamic_paths_source_columns[path].reserve(source_columns.size()); + + size_t total_size = 0; + for (const auto & source_column : source_columns) + { + const auto & source_object_column = assert_cast(*source_column); + total_size += source_object_column.size(); + shared_data_source_columns.push_back(source_object_column.shared_data); + + for (const auto & [path, column] : source_object_column.typed_paths) + typed_paths_source_columns.at(path).push_back(column); + + for (const auto & [path, column] : source_object_column.dynamic_paths) + { + if (dynamic_paths.contains(path)) + dynamic_paths_source_columns.at(path).push_back(column); + } + } + + shared_data->prepareForSquashing(shared_data_source_columns); + + for (const auto & [path, source_typed_columns] : typed_paths_source_columns) + typed_paths[path]->prepareForSquashing(source_typed_columns); + + for (const auto & [path, source_dynamic_columns] : dynamic_paths_source_columns) + { + /// ColumnDynamic::prepareForSquashing may not preallocate enough memory for discriminators and offsets + /// because source columns may not have this dynamic path (and so dynamic columns filled with nulls). + /// For this reason we first call ColumnDynamic::reserve with resulting size to preallocate memory for + /// discriminators and offsets and ColumnDynamic::prepareVariantsForSquashing to preallocate memory + /// for all variants inside Dynamic. + dynamic_paths_ptrs[path]->reserve(total_size); + dynamic_paths_ptrs[path]->prepareVariantsForSquashing(source_dynamic_columns); + } +} + void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & source_columns) { if (!empty()) @@ -1147,14 +1262,14 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou const auto & source_object = assert_cast(*source_column); /// During deserialization from MergeTree we will have statistics from the whole /// data part with number of non null values for each dynamic path. - const auto & source_statistics = source_object.getStatistics(); - for (const auto & [path, column] : source_object.dynamic_paths) + const auto & source_statistics = source_object.getStatistics(); + for (const auto & [path, column_ptr] : source_object.dynamic_paths_ptrs) { auto it = path_to_total_number_of_non_null_values.find(path); if (it == path_to_total_number_of_non_null_values.end()) it = path_to_total_number_of_non_null_values.emplace(path, 0).first; auto statistics_it = source_statistics.data.find(path); - size_t size = statistics_it == source_statistics.data.end() ? (column->size() - column->getNumberOfDefaultRows()) : statistics_it->second; + size_t size = statistics_it == source_statistics.data.end() ? (column_ptr->size() - column_ptr->getNumberOfDefaultRows()) : statistics_it->second; it->second += size; } } @@ -1165,7 +1280,7 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou /// Check if the number of all dynamic paths exceeds the limit. if (path_to_total_number_of_non_null_values.size() > max_dynamic_paths) { - /// Sort paths by total_number_of_non_null_values. + /// Sort paths by total number of non null values. std::vector> paths_with_sizes; paths_with_sizes.reserve(path_to_total_number_of_non_null_values.size()); for (const auto & [path, size] : path_to_total_number_of_non_null_values) diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index ecb6c4e0e15..38c71e94e12 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -141,6 +141,7 @@ public: void reserve(size_t n) override; size_t capacity() const override; + void prepareForSquashing(const std::vector & source_columns) override; void ensureOwnership() override; size_t byteSize() const override; size_t byteSizeAt(size_t n) const override; @@ -202,6 +203,8 @@ public: /// Try to add new dynamic path. Returns pointer to the new dynamic /// path column or nullptr if limit on dynamic paths is reached. ColumnDynamic * tryToAddNewDynamicPath(const std::string_view path); + /// Throws an exception if cannot add. + void addNewDynamicPath(const std::string_view path); void setDynamicPaths(const std::vector & paths); void setStatistics(const Statistics & statistics_) { statistics = statistics_; } diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 50d46429803..bcf9b3b835c 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,268 +57,6 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { - {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, - {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, - {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, - {"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"}, - {"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"}, - {"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"}, - {"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."}, - {"input_format_json_case_insensitive_column_matching", false, false, "Ignore case when matching JSON keys with CH columns."}, - {"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."}, - {"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."}, - {"collect_hash_table_stats_during_joins", false, true, "New setting."}, - {"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."}, - {"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."}, - {"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."}, - {"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"}, - {"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"}, - {"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"}, - {"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"}, - {"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"}, - {"merge_tree_min_bytes_per_task_for_remote_reading", 4194304, 2097152, "Value is unified with `filesystem_prefetch_min_bytes_for_single_read_task`"}, - {"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."}, - {"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."}, - {"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."}, - {"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}, - {"allow_experimental_json_type", false, false, "Add new experimental JSON type"}, - {"use_json_alias_for_old_object_type", true, false, "Use JSON type alias to create new JSON type"}, - {"type_json_skip_duplicated_paths", false, false, "Allow to skip duplicated paths during JSON parsing"} - }}, - {"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"}, - {"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"}, - {"input_format_parquet_use_native_reader", false, false, "When reading Parquet files, to use native reader instead of arrow reader."}, - {"hdfs_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in HDFS engine instead of empty query result"}, - {"azure_throw_on_zero_files_match", false, false, "Allow to throw an error when ListObjects request cannot match any files in AzureBlobStorage engine instead of empty query result"}, - {"s3_validate_request_settings", true, true, "Allow to disable S3 request settings validation"}, - {"allow_experimental_full_text_index", false, false, "Enable experimental full-text index"}, - {"azure_skip_empty_files", false, false, "Allow to skip empty files in azure table engine"}, - {"hdfs_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in HDFS table engine"}, - {"azure_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in AzureBlobStorage table engine"}, - {"s3_ignore_file_doesnt_exist", false, false, "Allow to return 0 rows when the requested files don't exist instead of throwing an exception in S3 table engine"}, - {"s3_max_part_number", 10000, 10000, "Maximum part number number for s3 upload part"}, - {"s3_max_single_operation_copy_size", 32 * 1024 * 1024, 32 * 1024 * 1024, "Maximum size for a single copy operation in s3"}, - {"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."}, - {"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."}, - {"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"}, - {"allow_deprecated_snowflake_conversion_functions", true, false, "Disabled deprecated functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake."}, - {"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."}, - {"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."}, - {"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."}, - {"allow_experimental_statistics", false, false, "The setting was renamed. The previous name is `allow_experimental_statistic`."}, - {"enable_vertical_final", false, true, "Enable vertical final by default again after fixing bug"}, - {"parallel_replicas_custom_key_range_lower", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards"}, - {"parallel_replicas_custom_key_range_upper", 0, 0, "Add settings to control the range filter when using parallel replicas with dynamic shards. A value of 0 disables the upper limit"}, - {"output_format_pretty_display_footer_column_names", 0, 1, "Add a setting to display column names in the footer if there are many rows. Threshold value is controlled by output_format_pretty_display_footer_column_names_min_rows."}, - {"output_format_pretty_display_footer_column_names_min_rows", 0, 50, "Add a setting to control the threshold value for setting output_format_pretty_display_footer_column_names_min_rows. Default 50."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.5", {{"allow_deprecated_error_prone_window_functions", true, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)"}, - {"allow_experimental_join_condition", false, false, "Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y."}, - {"input_format_tsv_crlf_end_of_line", false, false, "Enables reading of CRLF line endings with TSV formats"}, - {"output_format_parquet_use_custom_encoder", false, true, "Enable custom Parquet encoder."}, - {"cross_join_min_rows_to_compress", 0, 10000000, "Minimal count of rows to compress block in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"cross_join_min_bytes_to_compress", 0, 1_GiB, "Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached."}, - {"http_max_chunk_size", 0, 0, "Internal limitation"}, - {"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."}, - {"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"}, - {"cast_string_to_dynamic_use_inference", false, false, "Add setting to allow converting String to Dynamic through parsing"}, - {"allow_experimental_dynamic_type", false, false, "Add new experimental Dynamic type"}, - {"azure_max_blocks_in_multipart_upload", 50000, 50000, "Maximum number of blocks in multipart upload for Azure."}, - }}, - {"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"}, - {"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"}, - {"ignore_drop_queries_probability", 0, 0, "Allow to ignore drop queries in server with specified probability for testing purposes"}, - {"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"}, - {"query_cache_system_table_handling", "save", "throw", "The query cache no longer caches results of queries against system tables"}, - {"input_format_json_ignore_unnecessary_fields", false, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields"}, - {"input_format_hive_text_allow_variable_number_of_columns", false, true, "Ignore extra columns in Hive Text input (if file has more columns than expected) and treat missing fields in Hive Text input as default values."}, - {"allow_experimental_database_replicated", false, true, "Database engine Replicated is now in Beta stage"}, - {"temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds", (10 * 60 * 1000), (10 * 60 * 1000), "Wait time to lock cache for sapce reservation in temporary data in filesystem cache"}, - {"optimize_rewrite_sum_if_to_count_if", false, true, "Only available for the analyzer, where it works correctly"}, - {"azure_allow_parallel_part_upload", "true", "true", "Use multiple threads for azure multipart upload."}, - {"max_recursive_cte_evaluation_depth", DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, DBMS_RECURSIVE_CTE_MAX_EVALUATION_DEPTH, "Maximum limit on recursive CTE evaluation depth"}, - {"query_plan_convert_outer_join_to_inner_join", false, true, "Allow to convert OUTER JOIN to INNER JOIN if filter after JOIN always filters default values"}, - }}, - {"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"}, - {"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"}, - {"use_page_cache_for_disks_without_file_cache", false, false, "Added userspace page cache"}, - {"read_from_page_cache_if_exists_otherwise_bypass_cache", false, false, "Added userspace page cache"}, - {"page_cache_inject_eviction", false, false, "Added userspace page cache"}, - {"default_table_engine", "None", "MergeTree", "Set default table engine to MergeTree for better usability"}, - {"input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects", false, false, "Allow to use String type for ambiguous paths during named tuple inference from JSON objects"}, - {"traverse_shadow_remote_data_paths", false, false, "Traverse shadow directory when query system.remote_data_paths."}, - {"throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert", false, true, "Deduplication in dependent materialized view cannot work together with async inserts."}, - {"parallel_replicas_allow_in_with_subquery", false, true, "If true, subquery for IN will be executed on every follower replica"}, - {"log_processors_profiles", false, true, "Enable by default"}, - {"function_locate_has_mysql_compatible_argument_order", false, true, "Increase compatibility with MySQL's locate function."}, - {"allow_suspicious_primary_key", true, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)"}, - {"filesystem_cache_reserve_space_wait_lock_timeout_milliseconds", 1000, 1000, "Wait time to lock cache for sapce reservation in filesystem cache"}, - {"max_parser_backtracks", 0, 1000000, "Limiting the complexity of parsing"}, - {"analyzer_compatibility_join_using_top_level_identifier", false, false, "Force to resolve identifier in JOIN USING from projection"}, - {"distributed_insert_skip_read_only_replicas", false, false, "If true, INSERT into Distributed will skip read-only replicas"}, - {"keeper_max_retries", 10, 10, "Max retries for general keeper operations"}, - {"keeper_retry_initial_backoff_ms", 100, 100, "Initial backoff timeout for general keeper operations"}, - {"keeper_retry_max_backoff_ms", 5000, 5000, "Max backoff timeout for general keeper operations"}, - {"s3queue_allow_experimental_sharded_mode", false, false, "Enable experimental sharded mode of S3Queue table engine. It is experimental because it will be rewritten"}, - {"allow_experimental_analyzer", false, true, "Enable analyzer and planner by default."}, - {"merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability", 0.0, 0.0, "For testing of `PartsSplitter` - split read ranges into intersecting and non intersecting every time you read from MergeTree with the specified probability."}, - {"allow_get_client_http_header", false, false, "Introduced a new function."}, - {"output_format_pretty_row_numbers", false, true, "It is better for usability."}, - {"output_format_pretty_max_value_width_apply_for_single_value", true, false, "Single values in Pretty formats won't be cut."}, - {"output_format_parquet_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_orc_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_arrow_string_as_string", false, true, "ClickHouse allows arbitrary binary data in the String data type, which is typically UTF-8. Parquet/ORC/Arrow Strings only support UTF-8. That's why you can choose which Arrow's data type to use for the ClickHouse String data type - String or Binary. While Binary would be more correct and compatible, using String by default will correspond to user expectations in most cases."}, - {"output_format_parquet_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_orc_compression_method", "lz4", "zstd", "Parquet/ORC/Arrow support many compression methods, including lz4 and zstd. ClickHouse supports each and every compression method. Some inferior tools, such as 'duckdb', lack support for the faster `lz4` compression method, that's why we set zstd by default."}, - {"output_format_pretty_highlight_digit_groups", false, true, "If enabled and if output is a terminal, highlight every digit corresponding to the number of thousands, millions, etc. with underline."}, - {"geo_distance_returns_float64_on_float64_arguments", false, true, "Increase the default precision."}, - {"azure_max_inflight_parts_for_one_file", 20, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited."}, - {"azure_strict_upload_part_size", 0, 0, "The exact size of part to upload during multipart upload to Azure blob storage."}, - {"azure_min_upload_part_size", 16*1024*1024, 16*1024*1024, "The minimum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_max_upload_part_size", 5ull*1024*1024*1024, 5ull*1024*1024*1024, "The maximum size of part to upload during multipart upload to Azure blob storage."}, - {"azure_upload_part_size_multiply_factor", 2, 2, "Multiply azure_min_upload_part_size by this factor each time azure_multiply_parts_count_threshold parts were uploaded from a single write to Azure blob storage."}, - {"azure_upload_part_size_multiply_parts_count_threshold", 500, 500, "Each time this number of parts was uploaded to Azure blob storage, azure_min_upload_part_size is multiplied by azure_upload_part_size_multiply_factor."}, - {"output_format_csv_serialize_tuple_into_separate_columns", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_deserialize_separate_columns_into_tuple", true, true, "A new way of how interpret tuples in CSV format was added."}, - {"input_format_csv_try_infer_strings_from_quoted_tuples", true, true, "A new way of how interpret tuples in CSV format was added."}, - }}, - {"24.2", {{"allow_suspicious_variant_types", true, false, "Don't allow creating Variant type with suspicious variants by default"}, - {"validate_experimental_and_suspicious_types_inside_nested_types", false, true, "Validate usage of experimental and suspicious types inside nested types"}, - {"output_format_values_escape_quote_with_quote", false, false, "If true escape ' with '', otherwise quoted with \\'"}, - {"output_format_pretty_single_large_number_tip_threshold", 0, 1'000'000, "Print a readable number tip on the right side of the table if the block consists of a single number which exceeds this value (except 0)"}, - {"input_format_try_infer_exponent_floats", true, false, "Don't infer floats in exponential notation by default"}, - {"query_plan_optimize_prewhere", true, true, "Allow to push down filter to PREWHERE expression for supported storages"}, - {"async_insert_max_data_size", 1000000, 10485760, "The previous value appeared to be too small."}, - {"async_insert_poll_timeout_ms", 10, 10, "Timeout in milliseconds for polling data from asynchronous insert queue"}, - {"async_insert_use_adaptive_busy_timeout", false, true, "Use adaptive asynchronous insert timeout"}, - {"async_insert_busy_timeout_min_ms", 50, 50, "The minimum value of the asynchronous insert timeout in milliseconds; it also serves as the initial value, which may be increased later by the adaptive algorithm"}, - {"async_insert_busy_timeout_max_ms", 200, 200, "The minimum value of the asynchronous insert timeout in milliseconds; async_insert_busy_timeout_ms is aliased to async_insert_busy_timeout_max_ms"}, - {"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"}, - {"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"}, - {"format_template_row_format", "", "", "Template row format string can be set directly in query"}, - {"format_template_resultset_format", "", "", "Template result set format string can be set in query"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}, - {"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."}, - {"min_external_table_block_size_rows", DEFAULT_INSERT_BLOCK_SIZE, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to external table to specified size in rows, if blocks are not big enough"}, - {"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."}, - {"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."}, - {"optimize_time_filter_with_preimage", true, true, "Optimize Date and DateTime predicates by converting functions into equivalent comparisons without conversions (e.g. toYear(col) = 2023 -> col >= '2023-01-01' AND col <= '2023-12-31')"}, - {"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}, - {"default_view_definer", "CURRENT_USER", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"}, - {"default_materialized_view_sql_security", "DEFINER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"}, - {"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}, - {"mysql_map_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - {"mysql_map_fixed_string_to_text_in_show_columns", false, true, "Reduce the configuration effort to connect ClickHouse with BI tools."}, - }}, - {"24.1", {{"print_pretty_type_names", false, true, "Better user experience."}, - {"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"}, - {"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"}, - {"allow_experimental_variant_type", false, false, "Add new experimental Variant type"}, - {"use_variant_as_common_type", false, false, "Allow to use Variant in if/multiIf if there is no common type"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"parallel_replicas_mark_segment_size", 128, 128, "Add new setting to control segment size in new parallel replicas coordinator implementation"}, - {"ignore_materialized_views_with_dropped_target_table", false, false, "Add new setting to allow to ignore materialized views with dropped target table"}, - {"output_format_compression_level", 3, 3, "Allow to change compression level in the query output"}, - {"output_format_compression_zstd_window_log", 0, 0, "Allow to change zstd window log in the query output when zstd compression is used"}, - {"enable_zstd_qat_codec", false, false, "Add new ZSTD_QAT codec"}, - {"enable_vertical_final", false, true, "Use vertical final by default"}, - {"output_format_arrow_use_64_bit_indexes_for_dictionary", false, false, "Allow to use 64 bit indexes type in Arrow dictionaries"}, - {"max_rows_in_set_to_optimize_join", 100000, 0, "Disable join optimization as it prevents from read in order optimization"}, - {"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"}, - {"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"}, - {"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"}, - {"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}, - {"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}, - {"update_insert_deduplication_token_in_dependent_materialized_views", false, false, "Allow to update insert deduplication token with table identifier during insert in dependent materialized views"}, - {"azure_max_unexpected_write_error_retries", 4, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write"}, - {"split_parts_ranges_into_intersecting_and_non_intersecting_final", false, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"}, - {"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}}, - {"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."}, - {"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"}, - {"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"}, - {"input_format_arrow_allow_missing_columns", false, true, "Allow missing columns in Arrow files by default"}}}, - {"23.11", {{"parsedatetime_parse_without_leading_zeros", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.9", {{"optimize_group_by_constant_keys", false, true, "Optimize group by constant keys by default"}, - {"input_format_json_try_infer_named_tuples_from_objects", false, true, "Try to infer named Tuples from JSON objects by default"}, - {"input_format_json_read_numbers_as_strings", false, true, "Allow to read numbers as strings in JSON formats by default"}, - {"input_format_json_read_arrays_as_strings", false, true, "Allow to read arrays as strings in JSON formats by default"}, - {"input_format_json_infer_incomplete_types_as_strings", false, true, "Allow to infer incomplete types as Strings in JSON formats by default"}, - {"input_format_json_try_infer_numbers_from_strings", true, false, "Don't infer numbers from strings in JSON formats by default to prevent possible parsing errors"}, - {"http_write_exception_in_output_format", false, true, "Output valid JSON/XML on exception in HTTP streaming."}}}, - {"23.8", {{"rewrite_count_distinct_if_with_count_distinct_implementation", false, true, "Rewrite countDistinctIf with count_distinct_implementation configuration"}}}, - {"23.7", {{"function_sleep_max_microseconds_per_block", 0, 3000000, "In previous versions, the maximum sleep time of 3 seconds was applied only for `sleep`, but not for `sleepEachRow` function. In the new version, we introduce this setting. If you set compatibility with the previous versions, we will disable the limit altogether."}}}, - {"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."}, - {"http_receive_timeout", 180, 30, "See http_send_timeout."}}}, - {"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."}, - {"parallelize_output_from_storages", false, true, "Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows."}, - {"use_with_fill_by_sorting_prefix", false, true, "Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently"}, - {"output_format_parquet_compliant_nested_types", false, true, "Change an internal field name in output Parquet file schema."}}}, - {"23.4", {{"allow_suspicious_indices", true, false, "If true, index can defined with identical expressions"}, - {"allow_nonconst_timezone_arguments", true, false, "Allow non-const timezone arguments in certain time-related functions like toTimeZone(), fromUnixTimestamp*(), snowflakeToDateTime*()."}, - {"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"}, - {"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"}, - {"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}, - {"formatdatetime_f_prints_single_zero", true, false, "Improved compatibility with MySQL DATE_FORMAT()/STR_TO_DATE()"}, - {"formatdatetime_parsedatetime_m_is_month_name", false, true, "Improved compatibility with MySQL DATE_FORMAT/STR_TO_DATE"}}}, - {"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"}, - {"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"}, - {"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"}, - {"output_format_arrow_compression_method", "none", "lz4_frame", "Use lz4 compression in Arrow output format by default"}, - {"output_format_parquet_compression_method", "snappy", "lz4", "Use lz4 compression in Parquet output format by default"}, - {"output_format_orc_compression_method", "none", "lz4_frame", "Use lz4 compression in ORC output format by default"}, - {"async_query_sending_for_remote", false, true, "Create connections and send query async across shards"}}}, - {"23.2", {{"output_format_parquet_fixed_string_as_fixed_byte_array", false, true, "Use Parquet FIXED_LENGTH_BYTE_ARRAY type for FixedString by default"}, - {"output_format_arrow_fixed_string_as_fixed_byte_array", false, true, "Use Arrow FIXED_SIZE_BINARY type for FixedString by default"}, - {"query_plan_remove_redundant_distinct", false, true, "Remove redundant Distinct step in query plan"}, - {"optimize_duplicate_order_by_and_distinct", true, false, "Remove duplicate ORDER BY and DISTINCT if it's possible"}, - {"insert_keeper_max_retries", 0, 20, "Enable reconnections to Keeper on INSERT, improve reliability"}}}, - {"23.1", {{"input_format_json_read_objects_as_strings", 0, 1, "Enable reading nested json objects as strings while object type is experimental"}, - {"input_format_json_defaults_for_missing_elements_in_named_tuple", false, true, "Allow missing elements in JSON objects while reading named tuples by default"}, - {"input_format_csv_detect_header", false, true, "Detect header in CSV format by default"}, - {"input_format_tsv_detect_header", false, true, "Detect header in TSV format by default"}, - {"input_format_custom_detect_header", false, true, "Detect header in CustomSeparated format by default"}, - {"query_plan_remove_redundant_sorting", false, true, "Remove redundant sorting in query plan. For example, sorting steps related to ORDER BY clauses in subqueries"}}}, - {"22.12", {{"max_size_to_preallocate_for_aggregation", 10'000'000, 100'000'000, "This optimizes performance"}, - {"query_plan_aggregation_in_order", 0, 1, "Enable some refactoring around query plan"}, - {"format_binary_max_string_size", 0, 1_GiB, "Prevent allocating large amount of memory"}}}, - {"22.11", {{"use_structure_from_insertion_table_in_table_functions", 0, 2, "Improve using structure from insertion table in table functions"}}}, - {"22.9", {{"force_grouping_standard_compatibility", false, true, "Make GROUPING function output the same as in SQL standard and other DBMS"}}}, - {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, - {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, - {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, - {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}, - {"input_format_skip_unknown_fields", false, true, "Optimize reading subset of columns for some input formats"}}}, - {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, - {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, - {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, - {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, - {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, - {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, - {"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}}, - {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, - {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, - {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, - {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, - {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, - {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, - {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, - {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, - {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, - {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, - {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, - {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, - {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, - {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, - {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, - {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, - {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, - {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, - {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, - {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, {"24.12", { } @@ -347,6 +85,9 @@ static std::initializer_list(*type); + /// Write version of the serialization because we can add new arguments in the JSON type. + writeBinary(TYPE_JSON_SERIALIZATION_VERSION, buf); writeVarUInt(object_type.getMaxDynamicPaths(), buf); writeBinary(UInt8(object_type.getMaxDynamicTypes()), buf); const auto & typed_paths = object_type.getTypedPaths(); @@ -726,6 +732,10 @@ DataTypePtr decodeDataType(ReadBuffer & buf) } case BinaryTypeIndex::JSON: { + UInt8 serialization_version; + readBinary(serialization_version, buf); + if (serialization_version > TYPE_JSON_SERIALIZATION_VERSION) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected version of JSON type binary encoding"); size_t max_dynamic_paths; readVarUInt(max_dynamic_paths, buf); UInt8 max_dynamic_types; diff --git a/src/Interpreters/Squashing.cpp b/src/Interpreters/Squashing.cpp index 95b76c60063..ad7a5a4c863 100644 --- a/src/Interpreters/Squashing.cpp +++ b/src/Interpreters/Squashing.cpp @@ -146,6 +146,7 @@ Chunk Squashing::squash(std::vector && input_chunks, Chunk::ChunkInfoColl { /// We know all the data we will insert in advance and can make all necessary pre-allocations. mutable_columns[i]->prepareForSquashing(source_columns_list[i]); +// mutable_columns[i]->reserve(rows); for (auto & source_column : source_columns_list[i]) { auto column = std::move(source_column); From 5812dbcf2e4a9eada33a611ea7b63172f6ed0905 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 13 Aug 2024 11:53:33 +0200 Subject: [PATCH 2155/2361] Update 03210_dynamic_squashing.sql --- tests/queries/0_stateless/03210_dynamic_squashing.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03210_dynamic_squashing.sql b/tests/queries/0_stateless/03210_dynamic_squashing.sql index da3b911e796..d9ebc28fc43 100644 --- a/tests/queries/0_stateless/03210_dynamic_squashing.sql +++ b/tests/queries/0_stateless/03210_dynamic_squashing.sql @@ -1,3 +1,5 @@ +-- Tags: long + set allow_experimental_dynamic_type = 1; set max_block_size = 1000; From 85bd63a2ac54c8665e99c1b07c4a5e0189212635 Mon Sep 17 00:00:00 2001 From: kevinyhzou Date: Fri, 10 May 2024 12:18:06 +0800 Subject: [PATCH 2156/2361] rebase and resolve conflict --- src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.cpp | 1 + src/Interpreters/HashJoin/AddedColumns.cpp | 157 ++++++++++++------ src/Interpreters/HashJoin/AddedColumns.h | 28 +++- src/Interpreters/HashJoin/HashJoin.cpp | 4 +- src/Interpreters/HashJoin/HashJoin.h | 12 ++ src/Interpreters/HashJoin/HashJoinMethods.h | 3 +- .../HashJoin/HashJoinMethodsImpl.h | 49 +++--- src/Interpreters/HashJoin/KnowRowsHolder.h | 9 +- src/Interpreters/RowRefs.cpp | 6 +- src/Interpreters/RowRefs.h | 7 +- src/Interpreters/TableJoin.cpp | 1 + src/Interpreters/TableJoin.h | 2 + tests/performance/all_join_opt.xml | 15 ++ 14 files changed, 206 insertions(+), 89 deletions(-) create mode 100644 tests/performance/all_join_opt.xml diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 6f24db57026..e2740026e58 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -325,6 +325,7 @@ class IColumn; \ M(Bool, join_use_nulls, false, "Use NULLs for non-joined rows of outer JOINs for types that can be inside Nullable. If false, use default value of corresponding columns data type.", IMPORTANT) \ \ + M(Int32, join_output_by_rowlist_perkey_rows_threshold, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join.", 0) \ M(JoinStrictness, join_default_strictness, JoinStrictness::All, "Set default strictness in JOIN query. Possible values: empty string, 'ANY', 'ALL'. If empty, query without strictness will throw exception.", 0) \ M(Bool, any_join_distinct_right_table_keys, false, "Enable old ANY JOIN logic with many-to-one left-to-right table keys mapping for all ANY JOINs. It leads to confusing not equal results for 't1 ANY LEFT JOIN t2' and 't2 ANY RIGHT JOIN t1'. ANY RIGHT JOIN needs one-to-many keys mapping to be consistent with LEFT one.", IMPORTANT) \ M(Bool, single_join_prefer_left_table, true, "For single JOIN in case of identifier ambiguity prefer left table", IMPORTANT) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 511723f1873..cc9524bea2e 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -85,6 +85,7 @@ static std::initializer_list void AddedColumns::buildOutput() -{ -} +template<> +void AddedColumns::buildOutput() {} + +template<> +void AddedColumns::buildJoinGetOutput() {} + +template<> +template +void AddedColumns::buildOutputFromBlocks() {} template<> void AddedColumns::buildOutput() { - for (size_t i = 0; i < this->size(); ++i) + if (!output_by_row_list) + buildOutputFromBlocks(); + else { - auto& col = columns[i]; - size_t default_count = 0; - auto apply_default = [&]() + if (join_data_avg_perkey_rows < output_by_row_list_threshold) + buildOutputFromBlocks(); + else { - if (default_count > 0) + for (size_t i = 0; i < this->size(); ++i) { - JoinCommon::addDefaultValues(*col, type_name[i].type, default_count); - default_count = 0; - } - }; - - for (size_t j = 0; j < lazy_output.blocks.size(); ++j) - { - if (!lazy_output.blocks[j]) - { - default_count++; - continue; - } - apply_default(); - const auto & column_from_block = reinterpret_cast(lazy_output.blocks[j])->getByPosition(right_indexes[i]); - /// If it's joinGetOrNull, we need to wrap not-nullable columns in StorageJoin. - if (is_join_get) - { - if (auto * nullable_col = typeid_cast(col.get()); - nullable_col && !column_from_block.column->isNullable()) + auto & col = columns[i]; + for (auto row_ref_i : lazy_output.row_refs) { - nullable_col->insertFromNotNullable(*column_from_block.column, lazy_output.row_nums[j]); - continue; + if (row_ref_i) + { + const RowRefList * row_ref_list = reinterpret_cast(row_ref_i); + for (auto it = row_ref_list->begin(); it.ok(); ++it) + col->insertFrom(*it->block->getByPosition(right_indexes[i]).column, it->row_num); + } + else + type_name[i].type->insertDefaultInto(*col); } } - col->insertFrom(*column_from_block.column, lazy_output.row_nums[j]); } - apply_default(); + } +} + +template<> +void AddedColumns::buildJoinGetOutput() +{ + for (size_t i = 0; i < this->size(); ++i) + { + auto & col = columns[i]; + for (auto row_ref_i : lazy_output.row_refs) + { + if (!row_ref_i) + { + type_name[i].type->insertDefaultInto(*col); + continue; + } + const auto * row_ref = reinterpret_cast(row_ref_i); + const auto & column_from_block = row_ref->block->getByPosition(right_indexes[i]); + if (auto * nullable_col = typeid_cast(col.get()); nullable_col && !column_from_block.column->isNullable()) + nullable_col->insertFromNotNullable(*column_from_block.column, row_ref->row_num); + else + col->insertFrom(*column_from_block.column, row_ref->row_num); + } + } +} + +template<> +template +void AddedColumns::buildOutputFromBlocks() +{ + if (this->size() == 0) + return; + std::vector blocks; + std::vector row_nums; + blocks.reserve(lazy_output.row_refs.size()); + row_nums.reserve(lazy_output.row_refs.size()); + for (auto row_ref_i : lazy_output.row_refs) + { + if (row_ref_i) + { + if constexpr (from_row_list) + { + const RowRefList * row_ref_list = reinterpret_cast(row_ref_i); + for (auto it = row_ref_list->begin(); it.ok(); ++it) + { + blocks.emplace_back(it->block); + row_nums.emplace_back(it->row_num); + } + } + else + { + const RowRef * row_ref = reinterpret_cast(row_ref_i); + blocks.emplace_back(row_ref->block); + row_nums.emplace_back(row_ref->row_num); + } + } + else + { + blocks.emplace_back(nullptr); + row_nums.emplace_back(0); + } + } + for (size_t i = 0; i < this->size(); ++i) + { + auto & col = columns[i]; + for (size_t j = 0; j < blocks.size(); ++j) + { + if (blocks[j]) + col->insertFrom(*blocks[j]->getByPosition(right_indexes[i]).column, row_nums[j]); + else + type_name[i].type->insertDefaultInto(*col); + } } } @@ -72,29 +139,27 @@ void AddedColumns::applyLazyDefaults() } template<> -void AddedColumns::applyLazyDefaults() -{ -} +void AddedColumns::applyLazyDefaults() {} template <> -void AddedColumns::appendFromBlock(const Block & block, size_t row_num,const bool has_defaults) +void AddedColumns::appendFromBlock(const RowRef * row_ref, const bool has_defaults) { if (has_defaults) applyLazyDefaults(); #ifndef NDEBUG - checkBlock(block); + checkBlock(*row_ref->block); #endif if (is_join_get) { size_t right_indexes_size = right_indexes.size(); for (size_t j = 0; j < right_indexes_size; ++j) { - const auto & column_from_block = block.getByPosition(right_indexes[j]); + const auto & column_from_block = row_ref->block->getByPosition(right_indexes[j]); if (auto * nullable_col = nullable_column_ptrs[j]) - nullable_col->insertFromNotNullable(*column_from_block.column, row_num); + nullable_col->insertFromNotNullable(*column_from_block.column, row_ref->row_num); else - columns[j]->insertFrom(*column_from_block.column, row_num); + columns[j]->insertFrom(*column_from_block.column, row_ref->row_num); } } else @@ -102,22 +167,21 @@ void AddedColumns::appendFromBlock(const Block & block, size_t row_num,co size_t right_indexes_size = right_indexes.size(); for (size_t j = 0; j < right_indexes_size; ++j) { - const auto & column_from_block = block.getByPosition(right_indexes[j]); - columns[j]->insertFrom(*column_from_block.column, row_num); + const auto & column_from_block = row_ref->block->getByPosition(right_indexes[j]); + columns[j]->insertFrom(*column_from_block.column, row_ref->row_num); } } } template <> -void AddedColumns::appendFromBlock(const Block & block, size_t row_num, bool) +void AddedColumns::appendFromBlock(const RowRef * row_ref, bool) { #ifndef NDEBUG - checkBlock(block); + checkBlock(*row_ref->block); #endif if (has_columns_to_add) { - lazy_output.blocks.emplace_back(reinterpret_cast(&block)); - lazy_output.row_nums.emplace_back(static_cast(row_num)); + lazy_output.row_refs.emplace_back(reinterpret_cast(row_ref)); } } template<> @@ -131,8 +195,7 @@ void AddedColumns::appendDefaultRow() { if (has_columns_to_add) { - lazy_output.blocks.emplace_back(0); - lazy_output.row_nums.emplace_back(0); + lazy_output.row_refs.emplace_back(0); } } } diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index 13a7df6f498..f1b95a63be6 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -50,8 +50,7 @@ public: struct LazyOutput { - PaddedPODArray blocks; - PaddedPODArray row_nums; + PaddedPODArray row_refs; }; AddedColumns( @@ -76,8 +75,7 @@ public: if constexpr (lazy) { has_columns_to_add = num_columns_to_add > 0; - lazy_output.blocks.reserve(rows_to_add); - lazy_output.row_nums.reserve(rows_to_add); + lazy_output.row_refs.reserve(rows_to_add); } columns.reserve(num_columns_to_add); @@ -115,18 +113,22 @@ public: if (columns[j]->isNullable() && !saved_column->isNullable()) nullable_column_ptrs[j] = typeid_cast(columns[j].get()); } + join_data_avg_perkey_rows = join.getJoinedData()->avgPerKeyRows(); + output_by_row_list_threshold = join.getTableJoin().outputByRowListPerkeyRowsThreshold(); } size_t size() const { return columns.size(); } void buildOutput(); + void buildJoinGetOutput(); + ColumnWithTypeAndName moveColumn(size_t i) { return ColumnWithTypeAndName(std::move(columns[i]), type_name[i].type, type_name[i].qualified_name); } - void appendFromBlock(const Block & block, size_t row_num, bool has_default); + void appendFromBlock(const RowRef * row_ref, bool has_default); void appendDefaultRow(); @@ -134,6 +136,8 @@ public: const IColumn & leftAsofKey() const { return *left_asof_key; } + static constexpr bool isLazy() { return lazy; } + Block left_block; std::vector join_on_keys; ExpressionActionsPtr additional_filter_expression; @@ -142,6 +146,9 @@ public: size_t rows_to_add; std::unique_ptr offsets_to_replicate; bool need_filter = false; + bool output_by_row_list = false; + size_t join_data_avg_perkey_rows = 0; + size_t output_by_row_list_threshold = 0; IColumn::Filter filter; void reserve(bool need_replicate) @@ -212,15 +219,22 @@ private: columns.back()->reserve(src_column.column->size()); type_name.emplace_back(src_column.type, src_column.name, qualified_name); } + + /** Build output from the blocks that extract from `RowRef` or `RowRefList`, to avoid block cache miss which may cause performance slow down. + * And This problem would happen it we directly build output from `RowRef` or `RowRefList`. + */ + template + void buildOutputFromBlocks(); }; /// Adapter class to pass into addFoundRowAll /// In joinRightColumnsWithAdditionalFilter we don't want to add rows directly into AddedColumns, /// because they need to be filtered by additional_filter_expression. -class PreSelectedRows : public std::vector +class PreSelectedRows : public std::vector { public: - void appendFromBlock(const Block & block, size_t row_num, bool /* has_default */) { this->emplace_back(&block, row_num); } + void appendFromBlock(const RowRef * row_ref, bool /* has_default */) { this->emplace_back(row_ref); } + static constexpr bool isLazy() { return false; } }; } diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index dd7d42de63e..9c07a71e614 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -495,7 +495,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) } size_t rows = source_block.rows(); - + data->rows_to_join += rows; const auto & right_key_names = table_join->getAllNames(JoinTableSide::Right); ColumnPtrMap all_key_columns(right_key_names.size()); for (const auto & column_name : right_key_names) @@ -647,7 +647,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) total_bytes = getTotalByteCount(); } } - + data->keys_to_join = total_rows; shrinkStoredBlocksToFit(total_bytes); return table_join->sizeLimits().check(total_rows, total_bytes, "JOIN", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED); diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 00f5ef6d214..d645b8e9273 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -345,6 +345,18 @@ public: size_t blocks_allocated_size = 0; size_t blocks_nullmaps_allocated_size = 0; + + /// Number of rows of right table to join + size_t rows_to_join = 0; + /// Number of keys of right table to join + size_t keys_to_join = 0; + + size_t avgPerKeyRows() const + { + if (keys_to_join == 0) + return 0; + return rows_to_join / keys_to_join; + } }; using RightTableDataPtr = std::shared_ptr; diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 3b7a67467e3..97ad57d26ea 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -83,6 +83,7 @@ public: const Block & block_with_columns_to_add, const MapsTemplateVector & maps_, bool is_join_get = false); + private: template static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes); @@ -128,7 +129,7 @@ private: template static ColumnPtr buildAdditionalFilter( size_t left_start_row, - const std::vector & selected_rows, + const std::vector & selected_rows, const std::vector & row_replicate_offset, AddedColumns & added_columns); diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index aedd24630d1..0d90bad2d8a 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -95,7 +95,10 @@ Block HashJoinMethods::joinBlockImpl( added_columns.join_on_keys.clear(); Block remaining_block = sliceBlock(block, num_joined); - added_columns.buildOutput(); + if (is_join_get) + added_columns.buildJoinGetOutput(); + else + added_columns.buildOutput(); for (size_t i = 0; i < added_columns.size(); ++i) block.insert(added_columns.moveColumn(i)); @@ -339,6 +342,8 @@ size_t HashJoinMethods::joinRightColumns( size_t rows = added_columns.rows_to_add; if constexpr (need_filter) added_columns.filter = IColumn::Filter(rows, 0); + if constexpr (!flag_per_row && (STRICTNESS == JoinStrictness::All || (STRICTNESS == JoinStrictness::Semi && KIND == JoinKind::Right))) + added_columns.output_by_row_list = true; Arena pool; @@ -381,15 +386,15 @@ size_t HashJoinMethods::joinRightColumns( const IColumn & left_asof_key = added_columns.leftAsofKey(); auto row_ref = mapped->findAsof(left_asof_key, i); - if (row_ref.block) + if (row_ref && row_ref->block) { setUsed(added_columns.filter, i); if constexpr (flag_per_row) - used_flags.template setUsed(row_ref.block, row_ref.row_num, 0); + used_flags.template setUsed(row_ref->block, row_ref->row_num, 0); else used_flags.template setUsed(find_result); - added_columns.appendFromBlock(*row_ref.block, row_ref.row_num, join_features.add_missing); + added_columns.appendFromBlock(row_ref, join_features.add_missing); } else addNotFoundRow(added_columns, current_offset); @@ -420,7 +425,7 @@ size_t HashJoinMethods::joinRightColumns( if (used_once) { setUsed(added_columns.filter, i); - added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); + added_columns.appendFromBlock(&mapped, join_features.add_missing); } break; @@ -438,7 +443,7 @@ size_t HashJoinMethods::joinRightColumns( { setUsed(added_columns.filter, i); used_flags.template setUsed(find_result); - added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); + added_columns.appendFromBlock(&mapped, join_features.add_missing); if (join_features.is_any_or_semi_join) { @@ -477,7 +482,7 @@ template template ColumnPtr HashJoinMethods::buildAdditionalFilter( size_t left_start_row, - const std::vector & selected_rows, + const std::vector & selected_rows, const std::vector & row_replicate_offset, AddedColumns & added_columns) { @@ -489,7 +494,7 @@ ColumnPtr HashJoinMethods::buildAdditionalFilter result_column = ColumnUInt8::create(); break; } - const Block & sample_right_block = *selected_rows.begin()->block; + const Block & sample_right_block = *((*selected_rows.begin())->block); if (!sample_right_block || !added_columns.additional_filter_expression) { auto filter = ColumnUInt8::create(); @@ -519,8 +524,8 @@ ColumnPtr HashJoinMethods::buildAdditionalFilter auto new_col = col.column->cloneEmpty(); for (const auto & selected_row : selected_rows) { - const auto & src_col = selected_row.block->getByPosition(right_col_pos); - new_col->insertFrom(*src_col.column, selected_row.row_num); + const auto & src_col = selected_row->block->getByPosition(right_col_pos); + new_col->insertFrom(*src_col.column, selected_row->row_num); } executed_block.insert({std::move(new_col), col.type, col.name}); } @@ -700,26 +705,24 @@ size_t HashJoinMethods::joinRightColumnsWithAddt { // For inner join, we need mark each right row'flag, because we only use each right row once. auto used_once = used_flags.template setUsedOnce( - selected_right_row_it->block, selected_right_row_it->row_num, 0); + (*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0); if (used_once) { any_matched = true; total_added_rows += 1; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); } } } else { auto used_once = used_flags.template setUsedOnce( - selected_right_row_it->block, selected_right_row_it->row_num, 0); + (*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0); if (used_once) { any_matched = true; total_added_rows += 1; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); } } } @@ -727,16 +730,14 @@ size_t HashJoinMethods::joinRightColumnsWithAddt { any_matched = true; if constexpr (join_features.right && join_features.need_flags) - used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); + used_flags.template setUsed((*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0); } else { any_matched = true; total_added_rows += 1; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); - used_flags.template setUsed( - selected_right_row_it->block, selected_right_row_it->row_num, 0); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); + used_flags.template setUsed((*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0); } } @@ -756,8 +757,7 @@ size_t HashJoinMethods::joinRightColumnsWithAddt if (filter_flags[replicated_row]) { any_matched = true; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); total_added_rows += 1; } ++selected_right_row_it; @@ -767,8 +767,7 @@ size_t HashJoinMethods::joinRightColumnsWithAddt if (filter_flags[replicated_row]) { any_matched = true; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); total_added_rows += 1; selected_right_row_it = selected_right_row_it + row_replicate_offset[i] - replicated_row; break; diff --git a/src/Interpreters/HashJoin/KnowRowsHolder.h b/src/Interpreters/HashJoin/KnowRowsHolder.h index d51c96893c5..9223e98d13c 100644 --- a/src/Interpreters/HashJoin/KnowRowsHolder.h +++ b/src/Interpreters/HashJoin/KnowRowsHolder.h @@ -104,7 +104,7 @@ void addFoundRowAll( { if (!known_rows.isKnown(std::make_pair(it->block, it->row_num))) { - added.appendFromBlock(*it->block, it->row_num, false); + added.appendFromBlock(*it, false); ++current_offset; if (!new_known_rows_ptr) { @@ -124,11 +124,16 @@ void addFoundRowAll( known_rows.add(std::cbegin(*new_known_rows_ptr), std::cend(*new_known_rows_ptr)); } } + else if constexpr (AddedColumns::isLazy()) + { + added.appendFromBlock(&mapped, false); + current_offset += mapped.rows; + } else { for (auto it = mapped.begin(); it.ok(); ++it) { - added.appendFromBlock(*it->block, it->row_num, false); + added.appendFromBlock(*it, false); ++current_offset; } } diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index 9785ba46dab..1b397ab56ef 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -144,7 +144,7 @@ public: return low; } - RowRef findAsof(const IColumn & asof_column, size_t row_num) override + RowRef * findAsof(const IColumn & asof_column, size_t row_num) override { sort(); @@ -156,10 +156,10 @@ public: if (pos != entries.size()) { size_t row_ref_index = entries[pos].row_ref_index; - return row_refs[row_ref_index]; + return &row_refs[row_ref_index]; } - return {nullptr, 0}; + return nullptr; } private: diff --git a/src/Interpreters/RowRefs.h b/src/Interpreters/RowRefs.h index 650b2311ba7..7c98c47dd11 100644 --- a/src/Interpreters/RowRefs.h +++ b/src/Interpreters/RowRefs.h @@ -122,7 +122,7 @@ struct RowRefList : RowRef }; RowRefList() {} /// NOLINT - RowRefList(const Block * block_, size_t row_num_) : RowRef(block_, row_num_) {} + RowRefList(const Block * block_, size_t row_num_) : RowRef(block_, row_num_), rows(1) {} ForwardIterator begin() const { return ForwardIterator(this); } @@ -135,8 +135,11 @@ struct RowRefList : RowRef *next = Batch(nullptr); } next = next->insert(std::move(row_ref), pool); + ++rows; } +public: + SizeT rows = 0; private: Batch * next = nullptr; }; @@ -158,7 +161,7 @@ struct SortedLookupVectorBase virtual void insert(const IColumn &, const Block *, size_t) = 0; // This needs to be synchronized internally - virtual RowRef findAsof(const IColumn &, size_t) = 0; + virtual RowRef * findAsof(const IColumn &, size_t) = 0; }; diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index c8c926db13c..138085f0710 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -115,6 +115,7 @@ TableJoin::TableJoin(const Settings & settings, VolumePtr tmp_volume_, Temporary , partial_merge_join_left_table_buffer_bytes(settings.partial_merge_join_left_table_buffer_bytes) , max_files_to_merge(settings.join_on_disk_max_files_to_merge) , temporary_files_codec(settings.temporary_files_codec) + , output_by_rowlist_perkey_rows_threshold(settings.join_output_by_rowlist_perkey_rows_threshold) , max_memory_usage(settings.max_memory_usage) , tmp_volume(tmp_volume_) , tmp_data(tmp_data_) diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index 3f2bebb5816..4d626084d81 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -148,6 +148,7 @@ private: const size_t partial_merge_join_left_table_buffer_bytes = 0; const size_t max_files_to_merge = 0; const String temporary_files_codec = "LZ4"; + const size_t output_by_rowlist_perkey_rows_threshold = 0; /// Value if setting max_memory_usage for query, can be used when max_bytes_in_join is not specified. size_t max_memory_usage = 0; @@ -295,6 +296,7 @@ public: return join_use_nulls && isRightOrFull(kind()); } + size_t outputByRowListPerkeyRowsThreshold() const { return output_by_rowlist_perkey_rows_threshold; } size_t defaultMaxBytes() const { return default_max_bytes; } size_t maxJoinedBlockRows() const { return max_joined_block_rows; } size_t maxRowsInRightBlock() const { return partial_merge_join_rows_in_right_blocks; } diff --git a/tests/performance/all_join_opt.xml b/tests/performance/all_join_opt.xml new file mode 100644 index 00000000000..0ab9c39f67c --- /dev/null +++ b/tests/performance/all_join_opt.xml @@ -0,0 +1,15 @@ + + CREATE TABLE test (a Int64, b String, c LowCardinality(String)) ENGINE = MergeTree() ORDER BY a + CREATE TABLE test1 (a Int64, b String, c LowCardinality(String)) ENGINE = MergeTree() ORDER BY a + + INSERT INTO test SELECT number % 10000, number % 10000, number % 10000 FROM numbers(10000000) + INSERT INTO test1 SELECT number % 1000 , number % 1000, number % 1000 FROM numbers(100000) + + SELECT MAX(test1.a) FROM test INNER JOIN test1 on test.b = test1.b + SELECT MAX(test1.a) FROM test LEFT JOIN test1 on test.b = test1.b + SELECT MAX(test1.a) FROM test RIGHT JOIN test1 on test.b = test1.b + SELECT MAX(test1.a) FROM test FULL JOIN test1 on test.b = test1.b + + DROP TABLE IF EXISTS test + DROP TABLE IF EXISTS test1 + \ No newline at end of file From d2be1bf693045bebec341a850685b377ee3d88a9 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 13 Aug 2024 12:33:44 +0000 Subject: [PATCH 2157/2361] Fix FullSortingJoinTest.AsofGreaterGeneratedTestData with empty data --- src/Processors/tests/gtest_full_sorting_join.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index f678d7984e8..befe5e28b5d 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -208,6 +208,12 @@ Block executePipeline(QueryPipeline && pipeline) template void assertColumnVectorEq(const typename ColumnVector::Container & expected, const Block & block, const std::string & name) { + if (expected.empty()) + { + ASSERT_TRUE(block.columns() == 0); + return; + } + const auto * actual = typeid_cast *>(block.getByName(name).column.get()); ASSERT_TRUE(actual) << "unexpected column type: " << block.getByName(name).column->dumpStructure() << "expected: " << typeid(ColumnVector).name(); @@ -230,6 +236,12 @@ void assertColumnVectorEq(const typename ColumnVector::Container & expected, template void assertColumnEq(const IColumn & expected, const Block & block, const std::string & name) { + if (expected.empty()) + { + ASSERT_TRUE(block.columns() == 0); + return; + } + const ColumnPtr & actual = block.getByName(name).column; ASSERT_TRUE(checkColumn(*actual)); ASSERT_TRUE(checkColumn(expected)); From 52dea79a906ecc3d9a19599612b1c2c7708876b6 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Tue, 13 Aug 2024 15:20:37 +0200 Subject: [PATCH 2158/2361] Update 03210_dynamic_squashing.sql --- tests/queries/0_stateless/03210_dynamic_squashing.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03210_dynamic_squashing.sql b/tests/queries/0_stateless/03210_dynamic_squashing.sql index d9ebc28fc43..71d09263fda 100644 --- a/tests/queries/0_stateless/03210_dynamic_squashing.sql +++ b/tests/queries/0_stateless/03210_dynamic_squashing.sql @@ -6,18 +6,18 @@ set max_block_size = 1000; drop table if exists test; create table test (d Dynamic) engine=MergeTree order by tuple(); -insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(1000000); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); select '1'; select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; drop table test; create table test (d Dynamic(max_types=1)) engine=MergeTree order by tuple(); -insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(1000000); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); select '2'; select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; truncate table test; -insert into test select multiIf(number < 1000, 'Str'::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(1000000); +insert into test select multiIf(number < 1000, 'Str'::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); select '3'; select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; From fbe08cc24ca3f1b4472eba0960b14227917c0329 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Tue, 13 Aug 2024 07:24:41 -0600 Subject: [PATCH 2159/2361] Add no-parallel flag to 03221_create_if_not_exists_setting.sql --- tests/queries/0_stateless/03221_create_if_not_exists_setting.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql index 59535981e7a..18b3ed7bcec 100644 --- a/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql +++ b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql @@ -1,3 +1,4 @@ +-- Tags: no-parallel SET create_if_not_exists=0; -- Default From 973b2405794cebeabf9497e3b10ed6180130b891 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 13 Aug 2024 15:35:14 +0200 Subject: [PATCH 2160/2361] Fix min/max time columns --- src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp | 7 +++++-- .../0_stateless/03222_create_timeseries_table.reference | 0 .../queries/0_stateless/03222_create_timeseries_table.sql | 7 +++++++ 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03222_create_timeseries_table.reference create mode 100644 tests/queries/0_stateless/03222_create_timeseries_table.sql diff --git a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp index f9e7290e514..746a6a28274 100644 --- a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp +++ b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp @@ -227,8 +227,11 @@ void TimeSeriesDefinitionNormalizer::addMissingColumns(ASTCreateQuery & create) /// We use Nullable(DateTime64(3)) as the default type of the `min_time` and `max_time` columns. /// It's nullable because it allows the aggregation (see aggregate_min_time_and_max_time) work correctly even /// for rows in the "tags" table which doesn't have `min_time` and `max_time` (because they have no matching rows in the "data" table). - make_new_column(TimeSeriesColumnNames::MinTime, make_nullable(timestamp_type)); - make_new_column(TimeSeriesColumnNames::MaxTime, make_nullable(timestamp_type)); + + if (!is_next_column_named(TimeSeriesColumnNames::MinTime)) + make_new_column(TimeSeriesColumnNames::MinTime, make_nullable(timestamp_type)); + if (!is_next_column_named(TimeSeriesColumnNames::MaxTime)) + make_new_column(TimeSeriesColumnNames::MaxTime, make_nullable(timestamp_type)); } /// Add missing columns for the "metrics" table. diff --git a/tests/queries/0_stateless/03222_create_timeseries_table.reference b/tests/queries/0_stateless/03222_create_timeseries_table.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03222_create_timeseries_table.sql b/tests/queries/0_stateless/03222_create_timeseries_table.sql new file mode 100644 index 00000000000..bdb29e7d366 --- /dev/null +++ b/tests/queries/0_stateless/03222_create_timeseries_table.sql @@ -0,0 +1,7 @@ +SET allow_experimental_time_series_table = 1; + +CREATE TABLE 03222_timeseries_table1 ENGINE = TimeSeries FORMAT Null; +CREATE TABLE 03222_timeseries_table2 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 1, aggregate_min_time_and_max_time = 1 FORMAT Null; +--- This doesn't work because allow_nullable_key cannot be set in query for the internal MergeTree tables +--- CREATE TABLE 03222_timeseries_table3 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 1, aggregate_min_time_and_max_time = 0; +CREATE TABLE 03222_timeseries_table4 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 0 FORMAT Null; From 5da5bea8dfb768d3f6fd42f081a3d82e1c782e64 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 13 Aug 2024 13:47:20 +0000 Subject: [PATCH 2161/2361] Reduce flakiness of a test --- .../0_stateless/00652_mergetree_mutations.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/queries/0_stateless/00652_mergetree_mutations.sh b/tests/queries/0_stateless/00652_mergetree_mutations.sh index a9d7908a1af..3b0966dd2c3 100755 --- a/tests/queries/0_stateless/00652_mergetree_mutations.sh +++ b/tests/queries/0_stateless/00652_mergetree_mutations.sh @@ -70,7 +70,23 @@ sleep 1 ${CLICKHOUSE_CLIENT} --query="INSERT INTO mutations_cleaner(x) VALUES (4)" sleep 0.1 +for i in {1..10} +do + + if [ $(${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' and table = 'mutations_cleaner'") -eq 2 ]; then + break + fi + + if [[ $i -eq 100 ]]; then + echo "Timed out while waiting for outdated mutation record to be deleted!" + fi + + sleep 1 + ${CLICKHOUSE_CLIENT} --query="INSERT INTO mutations_cleaner(x) VALUES (4)" +done + # Check that the first mutation is cleaned ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, command, is_done FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' and table = 'mutations_cleaner' ORDER BY mutation_id" ${CLICKHOUSE_CLIENT} --query="DROP TABLE mutations_cleaner" + From 16fd24fb1f82f109e3ac34665941c2acea7bf697 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Tue, 13 Aug 2024 13:51:24 +0000 Subject: [PATCH 2162/2361] Update fuzzer dictionary as per commit 99282e526a8aeb175e9f3e69fc9385070d03798a Also update README so that we have consistent behavior of sort across macOS and Linux. --- tests/fuzz/README.md | 4 +- tests/fuzz/all.dict | 926 ++-- tests/fuzz/dictionaries/datatypes.dict | 4416 ++++++++++++++++- tests/fuzz/dictionaries/functions.dict | 110 + .../{key_words.dict => keywords.dict} | 25 +- 5 files changed, 4895 insertions(+), 586 deletions(-) rename tests/fuzz/dictionaries/{key_words.dict => keywords.dict} (95%) diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md index 6b5b161b2d5..576ad66ed93 100644 --- a/tests/fuzz/README.md +++ b/tests/fuzz/README.md @@ -13,11 +13,11 @@ The list of datatypes generated via the following query: The list of keywords generated via the following query: ``` - clickhouse client -q "SELECT DISTINCT concat('\"', keyword, '\"') as res FROM system.keywords ORDER BY keyword" > key_words.dict + clickhouse client -q "SELECT DISTINCT concat('\"', keyword, '\"') as res FROM system.keywords ORDER BY keyword" > keywords.dict ``` Then merge all dictionaries into one (all.dict) ``` - cat ./dictionaries/* | sort | uniq > all.dict + cat ./dictionaries/* | LC_ALL=C sort | uniq > all.dict ``` \ No newline at end of file diff --git a/tests/fuzz/all.dict b/tests/fuzz/all.dict index f08e319f0d4..1c3c657d6b0 100644 --- a/tests/fuzz/all.dict +++ b/tests/fuzz/all.dict @@ -4,26 +4,26 @@ "accurateCastOrNull" "acos" "acosh" -"ADD" "ADD COLUMN" "ADD CONSTRAINT" +"ADD INDEX" +"ADD PROJECTION" +"ADD STATISTICS" +"ADD" "addDate" "addDays" "addHours" -"ADD INDEX" "addInterval" "addMicroseconds" "addMilliseconds" "addMinutes" "addMonths" "addNanoseconds" -"ADD PROJECTION" "addQuarters" "addressToLine" "addressToLineWithInlines" "addressToSymbol" "addSeconds" -"ADD STATISTIC" "addTupleOfIntervals" "addWeeks" "addYears" @@ -53,7 +53,6 @@ "ALL" "ALLOWED_LATENESS" "alphaTokens" -"ALTER" "ALTER COLUMN" "ALTER DATABASE" "ALTER LIVE VIEW" @@ -66,6 +65,7 @@ "ALTER TABLE" "ALTER TEMPORARY TABLE" "ALTER USER" +"ALTER" "analysisOfVariance" "analysisOfVarianceArgMax" "analysisOfVarianceArgMin" @@ -81,9 +81,9 @@ "analysisOfVarianceResample" "analysisOfVarianceSimpleState" "analysisOfVarianceState" -"and" -"AND" "AND STDOUT" +"AND" +"and" "anova" "anovaArgMax" "anovaArgMin" @@ -100,8 +100,8 @@ "anovaSimpleState" "anovaState" "ANTI" -"any" "ANY" +"any" "anyArgMax" "anyArgMin" "anyArray" @@ -136,6 +136,8 @@ "anyLastOrDefault" "anyLastOrNull" "anyLastResample" +"anyLastSimpleState" +"anyLastState" "anyLast_respect_nulls" "anyLast_respect_nullsArgMax" "anyLast_respect_nullsArgMin" @@ -151,14 +153,14 @@ "anyLast_respect_nullsResample" "anyLast_respect_nullsSimpleState" "anyLast_respect_nullsState" -"anyLastSimpleState" -"anyLastState" "anyMap" "anyMerge" "anyNull" "anyOrDefault" "anyOrNull" "anyResample" +"anySimpleState" +"anyState" "any_respect_nulls" "any_respect_nullsArgMax" "any_respect_nullsArgMin" @@ -174,8 +176,6 @@ "any_respect_nullsResample" "any_respect_nullsSimpleState" "any_respect_nullsState" -"anySimpleState" -"anyState" "any_value" "any_valueArgMax" "any_valueArgMin" @@ -189,6 +189,8 @@ "any_valueOrDefault" "any_valueOrNull" "any_valueResample" +"any_valueSimpleState" +"any_valueState" "any_value_respect_nulls" "any_value_respect_nullsArgMax" "any_value_respect_nullsArgMin" @@ -204,12 +206,10 @@ "any_value_respect_nullsResample" "any_value_respect_nullsSimpleState" "any_value_respect_nullsState" -"any_valueSimpleState" -"any_valueState" "APPEND" "appendTrailingCharIfAbsent" -"APPLY" "APPLY DELETED MASK" +"APPLY" "approx_top_count" "approx_top_countArgMax" "approx_top_countArgMin" @@ -285,43 +285,14 @@ "argMinResample" "argMinSimpleState" "argMinState" -"array" +"ARRAY JOIN" "Array" -"array_agg" -"array_aggArgMax" -"array_aggArgMin" -"array_aggArray" -"array_aggDistinct" -"array_aggForEach" -"array_aggIf" -"array_aggMap" -"array_aggMerge" -"array_aggNull" -"array_aggOrDefault" -"array_aggOrNull" -"array_aggResample" -"array_aggSimpleState" -"array_aggState" +"array" "arrayAll" "arrayAUC" "arrayAvg" "arrayCompact" "arrayConcat" -"array_concat_agg" -"array_concat_aggArgMax" -"array_concat_aggArgMin" -"array_concat_aggArray" -"array_concat_aggDistinct" -"array_concat_aggForEach" -"array_concat_aggIf" -"array_concat_aggMap" -"array_concat_aggMerge" -"array_concat_aggNull" -"array_concat_aggOrDefault" -"array_concat_aggOrNull" -"array_concat_aggResample" -"array_concat_aggSimpleState" -"array_concat_aggState" "arrayCount" "arrayCumSum" "arrayCumSumNonNegative" @@ -345,7 +316,6 @@ "arrayIntersect" "arrayJaccardIndex" "arrayJoin" -"ARRAY JOIN" "arrayLast" "arrayLastIndex" "arrayLastOrNull" @@ -382,6 +352,36 @@ "arrayUniq" "arrayWithConstant" "arrayZip" +"array_agg" +"array_aggArgMax" +"array_aggArgMin" +"array_aggArray" +"array_aggDistinct" +"array_aggForEach" +"array_aggIf" +"array_aggMap" +"array_aggMerge" +"array_aggNull" +"array_aggOrDefault" +"array_aggOrNull" +"array_aggResample" +"array_aggSimpleState" +"array_aggState" +"array_concat_agg" +"array_concat_aggArgMax" +"array_concat_aggArgMin" +"array_concat_aggArray" +"array_concat_aggDistinct" +"array_concat_aggForEach" +"array_concat_aggIf" +"array_concat_aggMap" +"array_concat_aggMerge" +"array_concat_aggNull" +"array_concat_aggOrDefault" +"array_concat_aggOrNull" +"array_concat_aggResample" +"array_concat_aggSimpleState" +"array_concat_aggState" "AS" "ASC" "ASCENDING" @@ -396,7 +396,6 @@ "atan" "atan2" "atanh" -"ATTACH" "ATTACH PART" "ATTACH PARTITION" "ATTACH POLICY" @@ -406,6 +405,7 @@ "ATTACH ROW POLICY" "ATTACH SETTINGS PROFILE" "ATTACH USER" +"ATTACH" "AUTO_INCREMENT" "avg" "avgArgMax" @@ -445,39 +445,17 @@ "base58Encode" "base64Decode" "base64Encode" -"base_backup" +"base64URLDecode" +"base64URLEncode" "basename" +"base_backup" "BCRYPT_HASH" "BCRYPT_PASSWORD" "BEGIN TRANSACTION" "BETWEEN" "BIDIRECTIONAL" -"BIGINT" -"BIGINT SIGNED" -"BIGINT UNSIGNED" "bin" -"BINARY" -"BINARY LARGE OBJECT" -"BINARY VARYING" -"BIT" "bitAnd" -"BIT_AND" -"BIT_ANDArgMax" -"BIT_ANDArgMin" -"BIT_ANDArray" -"BIT_ANDDistinct" -"BIT_ANDForEach" -"BIT_ANDIf" -"BIT_ANDMap" -"BIT_ANDMerge" -"BIT_ANDNull" -"BIT_ANDOrDefault" -"BIT_ANDOrNull" -"BIT_ANDResample" -"BIT_ANDSimpleState" -"BIT_ANDState" -"__bitBoolMaskAnd" -"__bitBoolMaskOr" "bitCount" "bitHammingDistance" "bitmapAnd" @@ -503,6 +481,31 @@ "bitmaskToList" "bitNot" "bitOr" +"bitPositionsToArray" +"bitRotateLeft" +"bitRotateRight" +"bitShiftLeft" +"bitShiftRight" +"bitSlice" +"bitTest" +"bitTestAll" +"bitTestAny" +"bitXor" +"BIT_AND" +"BIT_ANDArgMax" +"BIT_ANDArgMin" +"BIT_ANDArray" +"BIT_ANDDistinct" +"BIT_ANDForEach" +"BIT_ANDIf" +"BIT_ANDMap" +"BIT_ANDMerge" +"BIT_ANDNull" +"BIT_ANDOrDefault" +"BIT_ANDOrNull" +"BIT_ANDResample" +"BIT_ANDSimpleState" +"BIT_ANDState" "BIT_OR" "BIT_ORArgMax" "BIT_ORArgMin" @@ -518,18 +521,6 @@ "BIT_ORResample" "BIT_ORSimpleState" "BIT_ORState" -"bitPositionsToArray" -"bitRotateLeft" -"bitRotateRight" -"bitShiftLeft" -"bitShiftRight" -"bitSlice" -"__bitSwapLastTwo" -"bitTest" -"bitTestAll" -"bitTestAny" -"__bitWrapperFunc" -"bitXor" "BIT_XOR" "BIT_XORArgMax" "BIT_XORArgMin" @@ -546,13 +537,9 @@ "BIT_XORSimpleState" "BIT_XORState" "BLAKE3" -"BLOB" "blockNumber" "blockSerializedSize" "blockSize" -"bool" -"Bool" -"boolean" "BOTH" "boundingRatio" "boundingRatioArgMax" @@ -571,8 +558,6 @@ "boundingRatioState" "buildId" "BY" -"BYTE" -"BYTEA" "byteHammingDistance" "byteSize" "byteSlice" @@ -583,7 +568,6 @@ "caseWithExpression" "caseWithoutExpr" "caseWithoutExpression" -"_CAST" "CAST" "catboostEvaluate" "categoricalInformationValue" @@ -607,36 +591,41 @@ "CHANGE" "CHANGEABLE_IN_READONLY" "CHANGED" -"char" -"CHAR" -"CHARACTER" -"CHARACTER LARGE OBJECT" -"CHARACTER_LENGTH" -"CHARACTER VARYING" -"CHAR LARGE OBJECT" -"CHAR_LENGTH" +"changeDay" +"changeHour" +"changeMinute" +"changeMonth" +"changeSecond" +"changeYear" "CHAR VARYING" -"CHECK" +"CHAR" +"char" +"CHARACTER LARGE OBJECT" +"CHARACTER VARYING" +"CHARACTER" +"CHARACTER_LENGTH" +"CHAR_LENGTH" "CHECK ALL TABLES" "CHECK TABLE" +"CHECK" "cityHash64" +"clamp" "CLEANUP" "CLEAR COLUMN" "CLEAR INDEX" "CLEAR PROJECTION" -"CLEAR STATISTIC" -"CLOB" +"CLEAR STATISTICS" "CLUSTER" -"cluster_host_ids" "CLUSTERS" +"cluster_host_ids" "CN" "coalesce" "CODEC" "COLLATE" "COLUMN" "COLUMNS" -"COMMENT" "COMMENT COLUMN" +"COMMENT" "COMMIT" "COMPRESSION" "concat" @@ -644,8 +633,8 @@ "concatWithSeparator" "concatWithSeparatorAssumeInjective" "concat_ws" -"connection_id" "connectionId" +"connection_id" "CONST" "CONSTRAINT" "contingency" @@ -735,21 +724,13 @@ "countSubstringsCaseInsensitive" "countSubstringsCaseInsensitiveUTF8" "covarPop" -"COVAR_POP" "covarPopArgMax" -"COVAR_POPArgMax" "covarPopArgMin" -"COVAR_POPArgMin" "covarPopArray" -"COVAR_POPArray" "covarPopDistinct" -"COVAR_POPDistinct" "covarPopForEach" -"COVAR_POPForEach" "covarPopIf" -"COVAR_POPIf" "covarPopMap" -"COVAR_POPMap" "covarPopMatrix" "covarPopMatrixArgMax" "covarPopMatrixArgMin" @@ -766,17 +747,11 @@ "covarPopMatrixSimpleState" "covarPopMatrixState" "covarPopMerge" -"COVAR_POPMerge" "covarPopNull" -"COVAR_POPNull" "covarPopOrDefault" -"COVAR_POPOrDefault" "covarPopOrNull" -"COVAR_POPOrNull" "covarPopResample" -"COVAR_POPResample" "covarPopSimpleState" -"COVAR_POPSimpleState" "covarPopStable" "covarPopStableArgMax" "covarPopStableArgMin" @@ -793,23 +768,14 @@ "covarPopStableSimpleState" "covarPopStableState" "covarPopState" -"COVAR_POPState" "covarSamp" -"COVAR_SAMP" "covarSampArgMax" -"COVAR_SAMPArgMax" "covarSampArgMin" -"COVAR_SAMPArgMin" "covarSampArray" -"COVAR_SAMPArray" "covarSampDistinct" -"COVAR_SAMPDistinct" "covarSampForEach" -"COVAR_SAMPForEach" "covarSampIf" -"COVAR_SAMPIf" "covarSampMap" -"COVAR_SAMPMap" "covarSampMatrix" "covarSampMatrixArgMax" "covarSampMatrixArgMin" @@ -826,17 +792,11 @@ "covarSampMatrixSimpleState" "covarSampMatrixState" "covarSampMerge" -"COVAR_SAMPMerge" "covarSampNull" -"COVAR_SAMPNull" "covarSampOrDefault" -"COVAR_SAMPOrDefault" "covarSampOrNull" -"COVAR_SAMPOrNull" "covarSampResample" -"COVAR_SAMPResample" "covarSampSimpleState" -"COVAR_SAMPSimpleState" "covarSampStable" "covarSampStableArgMax" "covarSampStableArgMin" @@ -853,6 +813,35 @@ "covarSampStableSimpleState" "covarSampStableState" "covarSampState" +"COVAR_POP" +"COVAR_POPArgMax" +"COVAR_POPArgMin" +"COVAR_POPArray" +"COVAR_POPDistinct" +"COVAR_POPForEach" +"COVAR_POPIf" +"COVAR_POPMap" +"COVAR_POPMerge" +"COVAR_POPNull" +"COVAR_POPOrDefault" +"COVAR_POPOrNull" +"COVAR_POPResample" +"COVAR_POPSimpleState" +"COVAR_POPState" +"COVAR_SAMP" +"COVAR_SAMPArgMax" +"COVAR_SAMPArgMin" +"COVAR_SAMPArray" +"COVAR_SAMPDistinct" +"COVAR_SAMPForEach" +"COVAR_SAMPIf" +"COVAR_SAMPMap" +"COVAR_SAMPMerge" +"COVAR_SAMPNull" +"COVAR_SAMPOrDefault" +"COVAR_SAMPOrNull" +"COVAR_SAMPResample" +"COVAR_SAMPSimpleState" "COVAR_SAMPState" "cramersV" "cramersVArgMax" @@ -887,7 +876,6 @@ "CRC32" "CRC32IEEE" "CRC64" -"CREATE" "CREATE POLICY" "CREATE PROFILE" "CREATE QUOTA" @@ -897,25 +885,27 @@ "CREATE TABLE" "CREATE TEMPORARY TABLE" "CREATE USER" +"CREATE" "CROSS" "CUBE" "curdate" -"current_database" -"currentDatabase" -"current_date" "CURRENT GRANTS" -"currentProfiles" "CURRENT QUOTA" -"currentRoles" "CURRENT ROLES" "CURRENT ROW" -"current_schemas" -"currentSchemas" -"current_timestamp" "CURRENT TRANSACTION" -"currentUser" -"CURRENT_USER" +"currentDatabase" +"currentProfiles" +"currentRoles" +"currentSchemas" "CURRENTUSER" +"currentUser" +"current_database" +"current_date" +"current_schemas" +"current_timestamp" +"CURRENT_USER" +"current_user" "cutFragment" "cutIPv6" "cutQueryString" @@ -932,27 +922,29 @@ "cutWWW" "D" "damerauLevenshteinDistance" +"DATA INNER UUID" +"DATA" "DATABASE" "DATABASES" -"Date" "DATE" -"Date32" -"DATE_ADD" +"Date" "DATEADD" -"date_diff" -"dateDiff" -"DATE_DIFF" "DATEDIFF" -"DATE_FORMAT" +"dateDiff" "dateName" -"DATE_SUB" "DATESUB" "DateTime" -"DateTime32" "DateTime64" "dateTime64ToSnowflake" +"dateTime64ToSnowflakeID" "dateTimeToSnowflake" +"dateTimeToSnowflakeID" "dateTrunc" +"DATE_ADD" +"DATE_DIFF" +"date_diff" +"DATE_FORMAT" +"DATE_SUB" "DATE_TRUNC" "DAY" "DAYOFMONTH" @@ -960,10 +952,8 @@ "DAYOFYEAR" "DAYS" "DD" -"DEC" "Decimal" "Decimal128" -"Decimal256" "Decimal32" "Decimal64" "decodeHTMLComponent" @@ -972,17 +962,17 @@ "decodeXMLComponent" "decrypt" "DEDUPLICATE" -"DEFAULT" "DEFAULT DATABASE" -"defaultProfiles" "DEFAULT ROLE" +"DEFAULT" +"defaultProfiles" "defaultRoles" "defaultValueOfArgumentType" "defaultValueOfTypeName" "DEFINER" "degrees" -"DELETE" "DELETE WHERE" +"DELETE" "deltaSum" "deltaSumArgMax" "deltaSumArgMin" @@ -1014,6 +1004,21 @@ "deltaSumTimestampSimpleState" "deltaSumTimestampState" "demangle" +"denseRank" +"denseRankArgMax" +"denseRankArgMin" +"denseRankArray" +"denseRankDistinct" +"denseRankForEach" +"denseRankIf" +"denseRankMap" +"denseRankMerge" +"denseRankNull" +"denseRankOrDefault" +"denseRankOrNull" +"denseRankResample" +"denseRankSimpleState" +"denseRankState" "dense_rank" "dense_rankArgMax" "dense_rankArgMin" @@ -1033,9 +1038,9 @@ "DESC" "DESCENDING" "DESCRIBE" -"DETACH" "DETACH PART" "DETACH PARTITION" +"DETACH" "detectCharset" "detectLanguage" "detectLanguageMixed" @@ -1092,8 +1097,8 @@ "distanceL2Squared" "distanceLinf" "distanceLp" -"DISTINCT" "DISTINCT ON" +"DISTINCT" "DIV" "divide" "divideDecimal" @@ -1102,11 +1107,8 @@ "domainWithoutWWW" "domainWithoutWWWRFC" "dotProduct" -"DOUBLE" -"DOUBLE PRECISION" "DOUBLE_SHA1_HASH" "DOUBLE_SHA1_PASSWORD" -"DROP" "DROP COLUMN" "DROP CONSTRAINT" "DROP DEFAULT" @@ -1116,15 +1118,20 @@ "DROP PART" "DROP PARTITION" "DROP PROJECTION" -"DROP STATISTIC" +"DROP STATISTICS" "DROP TABLE" "DROP TEMPORARY TABLE" +"DROP" "dumpColumnStructure" +"dynamicElement" +"dynamicType" "e" "editDistance" +"editDistanceUTF8" "ELSE" -"empty" +"EMPTY AS" "EMPTY" +"empty" "emptyArrayDate" "emptyArrayDateTime" "emptyArrayFloat32" @@ -1139,10 +1146,9 @@ "emptyArrayUInt32" "emptyArrayUInt64" "emptyArrayUInt8" -"EMPTY AS" +"ENABLED ROLES" "enabledProfiles" "enabledRoles" -"ENABLED ROLES" "encodeURLComponent" "encodeURLFormComponent" "encodeXMLComponent" @@ -1168,11 +1174,10 @@ "entropySimpleState" "entropyState" "Enum" -"ENUM" "Enum16" "Enum8" -"EPHEMERAL" "EPHEMERAL SEQUENTIAL" +"EPHEMERAL" "equals" "erf" "erfc" @@ -1182,11 +1187,11 @@ "EVENT" "EVENTS" "EVERY" -"EXCEPT" "EXCEPT DATABASE" "EXCEPT DATABASES" "EXCEPT TABLE" "EXCEPT TABLES" +"EXCEPT" "EXCHANGE DICTIONARIES" "EXCHANGE TABLES" "EXISTS" @@ -1272,8 +1277,8 @@ "EXPRESSION" "EXTENDED" "EXTERNAL DDL FROM" -"extract" "EXTRACT" +"extract" "extractAll" "extractAllGroups" "extractAllGroupsHorizontal" @@ -1289,15 +1294,15 @@ "FALSE" "farmFingerprint64" "farmHash64" -"FETCH" "FETCH PART" "FETCH PARTITION" +"FETCH" "FIELDS" -"file" "FILE" -"filesystemAvailable" +"file" "FILESYSTEM CACHE" "FILESYSTEM CACHES" +"filesystemAvailable" "filesystemCapacity" "filesystemUnreserved" "FILTER" @@ -1322,6 +1327,8 @@ "first_valueOrDefault" "first_valueOrNull" "first_valueResample" +"first_valueSimpleState" +"first_valueState" "first_value_respect_nulls" "first_value_respect_nullsArgMax" "first_value_respect_nullsArgMin" @@ -1337,9 +1344,6 @@ "first_value_respect_nullsResample" "first_value_respect_nullsSimpleState" "first_value_respect_nullsState" -"first_valueSimpleState" -"first_valueState" -"FIXED" "FixedString" "flameGraph" "flameGraphArgMax" @@ -1358,19 +1362,17 @@ "flameGraphState" "flatten" "flattenTuple" -"FLOAT" "Float32" "Float64" "floor" "FOLLOWING" "FOR" "ForEach" -"FOREIGN" "FOREIGN KEY" +"FOREIGN" "FORGET PARTITION" -"format" "FORMAT" -"FORMAT_BYTES" +"format" "formatDateTime" "formatDateTimeInJodaSyntax" "formatQuery" @@ -1383,26 +1385,27 @@ "formatReadableTimeDelta" "formatRow" "formatRowNoNewline" +"FORMAT_BYTES" "FQDN" "fragment" "FREEZE" +"FROM INFILE" +"FROM SHARD" "FROM" -"FROM_BASE64" -"FROM_DAYS" "fromDaysSinceYearZero" "fromDaysSinceYearZero32" -"FROM INFILE" "fromModifiedJulianDay" "fromModifiedJulianDayOrNull" -"FROM SHARD" -"FROM_UNIXTIME" "fromUnixTimestamp" "fromUnixTimestamp64Micro" "fromUnixTimestamp64Milli" "fromUnixTimestamp64Nano" "fromUnixTimestampInJodaSyntax" -"from_utc_timestamp" "fromUTCTimestamp" +"FROM_BASE64" +"FROM_DAYS" +"FROM_UNIXTIME" +"from_utc_timestamp" "FULL" "fullHostName" "FULLTEXT" @@ -1411,44 +1414,46 @@ "gccMurmurHash" "gcd" "generateRandomStructure" +"generateSnowflakeID" "generateULID" "generateUUIDv4" +"generateUUIDv7" "geoDistance" "geohashDecode" "geohashEncode" "geohashesInBox" -"GEOMETRY" "geoToH3" "geoToS2" +"getClientHTTPHeader" "getMacro" "getOSKernelVersion" -"__getScalar" "getServerPort" "getSetting" "getSizeOfEnumType" "getSubcolumn" "getTypeSerializationStreams" +"GLOBAL IN" +"GLOBAL NOT IN" "GLOBAL" "globalIn" -"GLOBAL IN" "globalInIgnoreSet" "globalNotIn" -"GLOBAL NOT IN" "globalNotInIgnoreSet" "globalNotNullIn" "globalNotNullInIgnoreSet" "globalNullIn" "globalNullInIgnoreSet" "globalVariable" +"GRANT OPTION FOR" "GRANT" "GRANTEES" -"GRANT OPTION FOR" "GRANULARITY" "greatCircleAngle" "greatCircleDistance" "greater" "greaterOrEquals" "greatest" +"GROUP BY" "groupArray" "groupArrayArgMax" "groupArrayArgMin" @@ -1673,7 +1678,21 @@ "groupBitXorResample" "groupBitXorSimpleState" "groupBitXorState" -"GROUP BY" +"groupConcat" +"groupConcatArgMax" +"groupConcatArgMin" +"groupConcatArray" +"groupConcatDistinct" +"groupConcatForEach" +"groupConcatIf" +"groupConcatMap" +"groupConcatMerge" +"groupConcatNull" +"groupConcatOrDefault" +"groupConcatOrNull" +"groupConcatResample" +"groupConcatSimpleState" +"groupConcatState" "GROUPING SETS" "GROUPS" "groupUniqArray" @@ -1691,6 +1710,21 @@ "groupUniqArrayResample" "groupUniqArraySimpleState" "groupUniqArrayState" +"group_concat" +"group_concatArgMax" +"group_concatArgMin" +"group_concatArray" +"group_concatDistinct" +"group_concatForEach" +"group_concatIf" +"group_concatMap" +"group_concatMerge" +"group_concatNull" +"group_concatOrDefault" +"group_concatOrNull" +"group_concatResample" +"group_concatSimpleState" +"group_concatState" "H" "h3CellAreaM2" "h3CellAreaRads2" @@ -1753,6 +1787,8 @@ "hex" "HH" "HIERARCHICAL" +"hilbertDecode" +"hilbertEncode" "histogram" "histogramArgMax" "histogramArgMin" @@ -1773,8 +1809,8 @@ "hopEnd" "hopStart" "HOST" -"hostname" "hostName" +"hostname" "HOUR" "HOURS" "HTTP" @@ -1784,25 +1820,24 @@ "identity" "idnaDecode" "idnaEncode" -"if" "IF EMPTY" "IF EXISTS" "IF NOT EXISTS" +"if" "ifNotFinite" "ifNull" -"ignore" "IGNORE NULLS" -"ilike" +"ignore" "ILIKE" -"in" +"ilike" +"IN PARTITION" "IN" +"in" "INDEX" "INDEXES" "indexHint" "indexOf" "INDICES" -"INET4" -"INET6" "INET6_ATON" "INET6_NTOA" "INET_ATON" @@ -1812,28 +1847,18 @@ "initcap" "initcapUTF8" "initializeAggregation" -"initial_query_id" "initialQueryID" +"initial_query_id" "INJECTIVE" "INNER" -"IN PARTITION" "INSERT INTO" "instr" -"INT" -"INT1" -"Int128" "Int16" -"INT1 SIGNED" -"INT1 UNSIGNED" -"Int256" "Int32" "Int64" "Int8" "intDiv" "intDivOrZero" -"INTEGER" -"INTEGER SIGNED" -"INTEGER UNSIGNED" "INTERPOLATE" "INTERSECT" "INTERVAL" @@ -1854,11 +1879,8 @@ "intervalLengthSumResample" "intervalLengthSumSimpleState" "intervalLengthSumState" -"IntervalMicrosecond" -"IntervalMillisecond" "IntervalMinute" "IntervalMonth" -"IntervalNanosecond" "IntervalQuarter" "IntervalSecond" "IntervalWeek" @@ -1868,12 +1890,9 @@ "intHash32" "intHash64" "INTO OUTFILE" -"INT SIGNED" -"INT UNSIGNED" "INVISIBLE" "INVOKER" "IP" -"IPv4" "IPv4CIDRToRange" "IPv4NumToString" "IPv4NumToStringClassC" @@ -1881,12 +1900,14 @@ "IPv4StringToNumOrDefault" "IPv4StringToNumOrNull" "IPv4ToIPv6" -"IPv6" "IPv6CIDRToRange" "IPv6NumToString" "IPv6StringToNum" "IPv6StringToNumOrDefault" "IPv6StringToNumOrNull" +"IS NOT DISTINCT FROM" +"IS NOT NULL" +"IS NULL" "isConstant" "isDecimalOverflow" "isFinite" @@ -1896,16 +1917,13 @@ "isIPv6String" "isNaN" "isNotDistinctFrom" -"IS NOT DISTINCT FROM" "isNotNull" -"IS NOT NULL" "isNull" -"IS NULL" "isNullable" -"IS_OBJECT_ID" "isValidJSON" "isValidUTF8" "isZeroOrNull" +"IS_OBJECT_ID" "jaroSimilarity" "jaroWinklerSimilarity" "javaHash" @@ -1913,10 +1931,7 @@ "JOIN" "joinGet" "joinGetOrNull" -"JSON" "JSONArrayLength" -"JSON_ARRAY_LENGTH" -"JSON_EXISTS" "JSONExtract" "JSONExtractArrayRaw" "JSONExtractBool" @@ -1931,15 +1946,19 @@ "JSONHas" "JSONKey" "JSONLength" +"JSONMergePatch" "jsonMergePatch" -"JSON_QUERY" "JSONType" +"JSON_ARRAY_LENGTH" +"JSON_EXISTS" +"JSON_QUERY" "JSON_VALUE" "jumpConsistentHash" +"JWT" "kafkaMurmurHash" "KERBEROS" -"KEY" "KEY BY" +"KEY" "KEYED BY" "KEYS" "KILL" @@ -2046,6 +2065,8 @@ "last_valueOrDefault" "last_valueOrNull" "last_valueResample" +"last_valueSimpleState" +"last_valueState" "last_value_respect_nulls" "last_value_respect_nullsArgMax" "last_value_respect_nullsArgMin" @@ -2061,8 +2082,6 @@ "last_value_respect_nullsResample" "last_value_respect_nullsSimpleState" "last_value_respect_nullsState" -"last_valueSimpleState" -"last_valueState" "LAYOUT" "lcase" "lcm" @@ -2084,25 +2103,26 @@ "leadInFrameState" "LEADING" "least" -"left" -"LEFT" "LEFT ARRAY JOIN" +"LEFT" +"left" "leftPad" "leftPadUTF8" "leftUTF8" "lemmatize" "length" "lengthUTF8" +"LESS THAN" "less" "lessOrEquals" -"LESS THAN" "LEVEL" "levenshteinDistance" +"levenshteinDistanceUTF8" "lgamma" "LIFETIME" "LIGHTWEIGHT" -"like" "LIKE" +"like" "LIMIT" "LINEAR" "LinfDistance" @@ -2118,8 +2138,6 @@ "log1p" "log2" "logTrace" -"LONGBLOB" -"LONGTEXT" "LowCardinality" "lowCardinalityIndices" "lowCardinalityKeys" @@ -2129,8 +2147,8 @@ "LpDistance" "LpNorm" "LpNormalize" -"ltrim" "LTRIM" +"ltrim" "lttb" "lttbArgMax" "lttbArgMin" @@ -2170,7 +2188,6 @@ "mannWhitneyUTestSimpleState" "mannWhitneyUTestState" "map" -"Map" "mapAdd" "mapAll" "mapApply" @@ -2181,7 +2198,6 @@ "mapExtractKeyLike" "mapFilter" "mapFromArrays" -"MAP_FROM_ARRAYS" "mapFromString" "mapKeys" "mapPartialReverseSort" @@ -2192,18 +2208,19 @@ "mapSubtract" "mapUpdate" "mapValues" -"match" +"MAP_FROM_ARRAYS" "MATCH" -"materialize" -"MATERIALIZE" +"match" "MATERIALIZE COLUMN" -"MATERIALIZED" "MATERIALIZE INDEX" "MATERIALIZE PROJECTION" -"MATERIALIZE STATISTIC" +"MATERIALIZE STATISTICS" "MATERIALIZE TTL" -"max" +"MATERIALIZE" +"materialize" +"MATERIALIZED" "MAX" +"max" "max2" "maxArgMax" "maxArgMin" @@ -2507,14 +2524,11 @@ "medianTimingWeightedResample" "medianTimingWeightedSimpleState" "medianTimingWeightedState" -"MEDIUMBLOB" -"MEDIUMINT" -"MEDIUMINT SIGNED" -"MEDIUMINT UNSIGNED" -"MEDIUMTEXT" "MEMORY" "Merge" "MERGES" +"METRICS INNER UUID" +"METRICS" "metroHash64" "MI" "MICROSECOND" @@ -2522,8 +2536,8 @@ "mid" "MILLISECOND" "MILLISECONDS" -"min" "MIN" +"min" "min2" "minArgMax" "minArgMin" @@ -2562,18 +2576,20 @@ "MINUTES" "mismatches" "MM" -"mod" "MOD" -"MODIFY" +"mod" "MODIFY COLUMN" "MODIFY COMMENT" +"MODIFY DEFINER" "MODIFY ORDER BY" "MODIFY QUERY" "MODIFY REFRESH" "MODIFY SAMPLE BY" "MODIFY SETTING" "MODIFY SQL SECURITY" +"MODIFY STATISTICS" "MODIFY TTL" +"MODIFY" "modulo" "moduloLegacy" "moduloOrZero" @@ -2582,9 +2598,9 @@ "MONTHS" "mortonDecode" "mortonEncode" -"MOVE" "MOVE PART" "MOVE PARTITION" +"MOVE" "movingXXX" "MS" "multiFuzzyMatchAllIndices" @@ -2596,7 +2612,6 @@ "multiMatchAnyIndex" "multiply" "multiplyDecimal" -"MultiPolygon" "multiSearchAllPositions" "multiSearchAllPositionsCaseInsensitive" "multiSearchAllPositionsCaseInsensitiveUTF8" @@ -2624,18 +2639,10 @@ "NAMED COLLECTION" "NANOSECOND" "NANOSECONDS" -"NATIONAL CHAR" -"NATIONAL CHARACTER" -"NATIONAL CHARACTER LARGE OBJECT" -"NATIONAL CHARACTER VARYING" -"NATIONAL CHAR VARYING" -"NCHAR" -"NCHAR LARGE OBJECT" -"NCHAR VARYING" "negate" "neighbor" -"nested" "Nested" +"nested" "netloc" "NEXT" "ngramDistance" @@ -2678,7 +2685,6 @@ "nonNegativeDerivativeResample" "nonNegativeDerivativeSimpleState" "nonNegativeDerivativeState" -"NO_PASSWORD" "normalizedQueryHash" "normalizedQueryHashKeepNames" "normalizeL1" @@ -2696,13 +2702,19 @@ "normL2Squared" "normLinf" "normLp" -"not" -"NOT" "NOT BETWEEN" +"NOT IDENTIFIED" +"NOT ILIKE" +"NOT IN" +"NOT KEYED" +"NOT LIKE" +"NOT OVERRIDABLE" +"NOT" +"not" "notEmpty" "notEquals" -"nothing" "Nothing" +"nothing" "nothingArgMax" "nothingArgMin" "nothingArray" @@ -2746,21 +2758,16 @@ "nothingUInt64Resample" "nothingUInt64SimpleState" "nothingUInt64State" -"NOT IDENTIFIED" "notILike" -"NOT ILIKE" "notIn" -"NOT IN" "notInIgnoreSet" -"NOT KEYED" "notLike" -"NOT LIKE" "notNullIn" "notNullInIgnoreSet" -"NOT OVERRIDABLE" "now" "now64" "nowInBlock" +"NO_PASSWORD" "NS" "nth_value" "nth_valueArgMax" @@ -2792,28 +2799,25 @@ "ntileResample" "ntileSimpleState" "ntileState" -"Null" "NULL" +"Null" "Nullable" "nullIf" "nullIn" "nullInIgnoreSet" "NULLS" -"NUMERIC" -"NVARCHAR" -"Object" "OCTET_LENGTH" "OFFSET" -"ON" "ON DELETE" -"ONLY" "ON UPDATE" "ON VOLUME" +"ON" +"ONLY" "OPTIMIZE TABLE" -"or" -"OR" -"ORDER BY" "OR REPLACE" +"OR" +"or" +"ORDER BY" "OUTER" "OVER" "OVERRIDABLE" @@ -2838,32 +2842,64 @@ "parseDateTimeInJodaSyntaxOrZero" "parseDateTimeOrNull" "parseDateTimeOrZero" +"parseReadableSize" +"parseReadableSizeOrNull" +"parseReadableSizeOrZero" "parseTimeDelta" "PART" "PARTIAL" -"PARTITION" "PARTITION BY" +"PARTITION" +"partitionID" "partitionId" "PARTITIONS" "PART_MOVE_TO_SHARD" "PASTE" "path" "pathFull" +"percentRank" +"percentRankArgMax" +"percentRankArgMin" +"percentRankArray" +"percentRankDistinct" +"percentRankForEach" +"percentRankIf" +"percentRankMap" +"percentRankMerge" +"percentRankNull" +"percentRankOrDefault" +"percentRankOrNull" +"percentRankResample" +"percentRankSimpleState" +"percentRankState" +"percent_rank" +"percent_rankArgMax" +"percent_rankArgMin" +"percent_rankArray" +"percent_rankDistinct" +"percent_rankForEach" +"percent_rankIf" +"percent_rankMap" +"percent_rankMerge" +"percent_rankNull" +"percent_rankOrDefault" +"percent_rankOrNull" +"percent_rankResample" +"percent_rankSimpleState" +"percent_rankState" "PERIODIC REFRESH" "PERMANENTLY" "PERMISSIVE" -"PERSISTENT" "PERSISTENT SEQUENTIAL" +"PERSISTENT" "pi" "PIPELINE" "PLAINTEXT_PASSWORD" "PLAN" "plus" "pmod" -"Point" "pointInEllipses" "pointInPolygon" -"Polygon" "polygonAreaCartesian" "polygonAreaSpherical" "polygonConvexHullCartesian" @@ -2887,15 +2923,16 @@ "positionCaseInsensitive" "positionCaseInsensitiveUTF8" "positionUTF8" -"positive_modulo" "positiveModulo" +"positive_modulo" "pow" "power" "PRECEDING" "PRECISION" "PREWHERE" -"PRIMARY" "PRIMARY KEY" +"PRIMARY" +"printf" "PROFILE" "PROJECTION" "proportionsZTest" @@ -2906,6 +2943,7 @@ "punycodeEncode" "Q" "QQ" +"QUALIFY" "quantile" "quantileArgMax" "quantileArgMin" @@ -3418,12 +3456,12 @@ "quantileTimingWeightedState" "QUARTER" "QUARTERS" +"QUERY TREE" "QUERY" -"query_id" "queryID" "queryString" "queryStringAndFragment" -"QUERY TREE" +"query_id" "QUOTA" "radians" "rand" @@ -3440,16 +3478,16 @@ "randNegativeBinomial" "randNormal" "randomFixedString" -"RANDOMIZED" "RANDOMIZE FOR" +"RANDOMIZED" "randomPrintableASCII" "randomString" "randomStringUTF8" "randPoisson" "randStudentT" "randUniform" -"range" "RANGE" +"range" "rank" "rankArgMax" "rankArgMin" @@ -3481,20 +3519,22 @@ "rankSimpleState" "rankState" "READONLY" +"readWKTLineString" +"readWKTMultiLineString" "readWKTMultiPolygon" "readWKTPoint" "readWKTPolygon" "readWKTRing" -"REAL" "REALM" "RECOMPRESS" +"RECURSIVE" "REFERENCES" "REFRESH" "REGEXP" "regexpExtract" +"regexpQuoteMeta" "REGEXP_EXTRACT" "REGEXP_MATCHES" -"regexpQuoteMeta" "REGEXP_REPLACE" "regionHierarchy" "regionIn" @@ -3526,21 +3566,21 @@ "reinterpretAsUInt64" "reinterpretAsUInt8" "reinterpretAsUUID" -"REMOVE" "REMOVE SAMPLE BY" "REMOVE TTL" -"RENAME" +"REMOVE" "RENAME COLUMN" "RENAME DATABASE" "RENAME DICTIONARY" "RENAME TABLE" "RENAME TO" +"RENAME" "repeat" -"replace" +"REPLACE PARTITION" "REPLACE" +"replace" "replaceAll" "replaceOne" -"REPLACE PARTITION" "replaceRegexpAll" "replaceRegexpOne" "replicate" @@ -3570,12 +3610,11 @@ "reverseUTF8" "revision" "REVOKE" -"right" "RIGHT" +"right" "rightPad" "rightPadUTF8" "rightUTF8" -"Ring" "ROLLBACK" "ROLLUP" "round" @@ -3585,6 +3624,9 @@ "roundDuration" "roundToExp2" "ROW" +"rowNumberInAllBlocks" +"rowNumberInBlock" +"ROWS" "row_number" "row_numberArgMax" "row_numberArgMin" @@ -3592,8 +3634,6 @@ "row_numberDistinct" "row_numberForEach" "row_numberIf" -"rowNumberInAllBlocks" -"rowNumberInBlock" "row_numberMap" "row_numberMerge" "row_numberNull" @@ -3602,10 +3642,9 @@ "row_numberResample" "row_numberSimpleState" "row_numberState" -"ROWS" "rpad" -"rtrim" "RTRIM" +"rtrim" "runningAccumulate" "runningConcurrency" "runningDifference" @@ -3622,10 +3661,10 @@ "s2ToGeo" "S3" "SALT" -"SAMPLE" "SAMPLE BY" +"SAMPLE" +"SAN" "scalarProduct" -"__scalarSubqueryResult" "SCHEMA" "SCHEME" "SECOND" @@ -3681,18 +3720,18 @@ "seriesOutliersDetectTukey" "seriesPeriodDetectFFT" "SERVER" -"serverTimezone" "serverTimeZone" +"serverTimezone" "serverUUID" -"SET" -"SET DEFAULT" "SET DEFAULT ROLE" +"SET DEFAULT" "SET FAKE TIME" "SET NULL" -"SET ROLE" "SET ROLE DEFAULT" -"SETTINGS" +"SET ROLE" "SET TRANSACTION SNAPSHOT" +"SET" +"SETTINGS" "SHA1" "SHA224" "SHA256" @@ -3703,9 +3742,7 @@ "SHA512_256" "shardCount" "shardNum" -"SHOW" "SHOW ACCESS" -"showCertificate" "SHOW CREATE" "SHOW ENGINES" "SHOW FUNCTIONS" @@ -3713,11 +3750,12 @@ "SHOW PRIVILEGES" "SHOW PROCESSLIST" "SHOW SETTING" +"SHOW" +"showCertificate" "sigmoid" "sign" "SIGNED" "SIMPLE" -"SimpleAggregateFunction" "simpleJSONExtractBool" "simpleJSONExtractFloat" "simpleJSONExtractInt" @@ -3741,7 +3779,6 @@ "simpleLinearRegressionSimpleState" "simpleLinearRegressionState" "sin" -"SINGLE" "singleValueOrNull" "singleValueOrNullArgMax" "singleValueOrNullArgMin" @@ -3796,44 +3833,43 @@ "skewSampState" "sleep" "sleepEachRow" -"SMALLINT" -"SMALLINT SIGNED" -"SMALLINT UNSIGNED" +"snowflakeIDToDateTime" +"snowflakeIDToDateTime64" "snowflakeToDateTime" "snowflakeToDateTime64" "soundex" "SOURCE" "space" -"sparkbar" "sparkBar" -"sparkbarArgMax" +"sparkbar" "sparkBarArgMax" -"sparkbarArgMin" +"sparkbarArgMax" "sparkBarArgMin" -"sparkbarArray" +"sparkbarArgMin" "sparkBarArray" -"sparkbarDistinct" +"sparkbarArray" "sparkBarDistinct" -"sparkbarForEach" +"sparkbarDistinct" "sparkBarForEach" -"sparkbarIf" +"sparkbarForEach" "sparkBarIf" -"sparkbarMap" +"sparkbarIf" "sparkBarMap" -"sparkbarMerge" +"sparkbarMap" "sparkBarMerge" -"sparkbarNull" +"sparkbarMerge" "sparkBarNull" -"sparkbarOrDefault" +"sparkbarNull" "sparkBarOrDefault" -"sparkbarOrNull" +"sparkbarOrDefault" "sparkBarOrNull" -"sparkbarResample" +"sparkbarOrNull" "sparkBarResample" -"sparkbarSimpleState" +"sparkbarResample" "sparkBarSimpleState" -"sparkbarState" +"sparkbarSimpleState" "sparkBarState" +"sparkbarState" "SPATIAL" "splitByAlpha" "splitByChar" @@ -3860,42 +3896,29 @@ "SS" "SSH_KEY" "SSL_CERTIFICATE" +"START TRANSACTION" "startsWith" "startsWithUTF8" "State" -"STATISTIC" +"STATISTICS" "STD" "STDArgMax" "STDArgMin" "STDArray" "stddevPop" -"STDDEV_POP" "stddevPopArgMax" -"STDDEV_POPArgMax" "stddevPopArgMin" -"STDDEV_POPArgMin" "stddevPopArray" -"STDDEV_POPArray" "stddevPopDistinct" -"STDDEV_POPDistinct" "stddevPopForEach" -"STDDEV_POPForEach" "stddevPopIf" -"STDDEV_POPIf" "stddevPopMap" -"STDDEV_POPMap" "stddevPopMerge" -"STDDEV_POPMerge" "stddevPopNull" -"STDDEV_POPNull" "stddevPopOrDefault" -"STDDEV_POPOrDefault" "stddevPopOrNull" -"STDDEV_POPOrNull" "stddevPopResample" -"STDDEV_POPResample" "stddevPopSimpleState" -"STDDEV_POPSimpleState" "stddevPopStable" "stddevPopStableArgMax" "stddevPopStableArgMin" @@ -3912,35 +3935,20 @@ "stddevPopStableSimpleState" "stddevPopStableState" "stddevPopState" -"STDDEV_POPState" "stddevSamp" -"STDDEV_SAMP" "stddevSampArgMax" -"STDDEV_SAMPArgMax" "stddevSampArgMin" -"STDDEV_SAMPArgMin" "stddevSampArray" -"STDDEV_SAMPArray" "stddevSampDistinct" -"STDDEV_SAMPDistinct" "stddevSampForEach" -"STDDEV_SAMPForEach" "stddevSampIf" -"STDDEV_SAMPIf" "stddevSampMap" -"STDDEV_SAMPMap" "stddevSampMerge" -"STDDEV_SAMPMerge" "stddevSampNull" -"STDDEV_SAMPNull" "stddevSampOrDefault" -"STDDEV_SAMPOrDefault" "stddevSampOrNull" -"STDDEV_SAMPOrNull" "stddevSampResample" -"STDDEV_SAMPResample" "stddevSampSimpleState" -"STDDEV_SAMPSimpleState" "stddevSampStable" "stddevSampStableArgMax" "stddevSampStableArgMin" @@ -3957,6 +3965,35 @@ "stddevSampStableSimpleState" "stddevSampStableState" "stddevSampState" +"STDDEV_POP" +"STDDEV_POPArgMax" +"STDDEV_POPArgMin" +"STDDEV_POPArray" +"STDDEV_POPDistinct" +"STDDEV_POPForEach" +"STDDEV_POPIf" +"STDDEV_POPMap" +"STDDEV_POPMerge" +"STDDEV_POPNull" +"STDDEV_POPOrDefault" +"STDDEV_POPOrNull" +"STDDEV_POPResample" +"STDDEV_POPSimpleState" +"STDDEV_POPState" +"STDDEV_SAMP" +"STDDEV_SAMPArgMax" +"STDDEV_SAMPArgMin" +"STDDEV_SAMPArray" +"STDDEV_SAMPDistinct" +"STDDEV_SAMPForEach" +"STDDEV_SAMPIf" +"STDDEV_SAMPMap" +"STDDEV_SAMPMerge" +"STDDEV_SAMPNull" +"STDDEV_SAMPOrDefault" +"STDDEV_SAMPOrNull" +"STDDEV_SAMPResample" +"STDDEV_SAMPSimpleState" "STDDEV_SAMPState" "STDDistinct" "STDForEach" @@ -4008,10 +4045,10 @@ "stringJaccardIndex" "stringJaccardIndexUTF8" "stringToH3" -"str_to_date" -"str_to_map" "structureToCapnProtoSchema" "structureToProtobufSchema" +"str_to_date" +"str_to_map" "studentTTest" "studentTTestArgMax" "studentTTestArgMin" @@ -4029,16 +4066,16 @@ "studentTTestState" "subBitmap" "subDate" -"SUBPARTITION" "SUBPARTITION BY" +"SUBPARTITION" "SUBPARTITIONS" "substr" -"substring" "SUBSTRING" +"substring" "substringIndex" -"SUBSTRING_INDEX" "substringIndexUTF8" "substringUTF8" +"SUBSTRING_INDEX" "subtractDays" "subtractHours" "subtractInterval" @@ -4173,22 +4210,23 @@ "sumWithOverflowSimpleState" "sumWithOverflowState" "SUSPEND" -"svg" "SVG" +"svg" "SYNC" "synonyms" "SYNTAX" "SYSTEM" -"TABLE" "TABLE OVERRIDE" +"TABLE" "TABLES" +"TAGS INNER UUID" +"TAGS" "tan" "tanh" "tcpPort" -"TEMPORARY" "TEMPORARY TABLE" +"TEMPORARY" "TEST" -"TEXT" "tgamma" "theilsU" "theilsUArgMax" @@ -4208,33 +4246,31 @@ "THEN" "throwIf" "tid" -"TIME" "timeDiff" "timeSlot" "timeSlots" -"timestamp" "TIMESTAMP" -"TIMESTAMP_ADD" +"timestamp" "TIMESTAMPADD" -"timestamp_diff" -"timestampDiff" -"TIMESTAMP_DIFF" "TIMESTAMPDIFF" -"TIMESTAMP_SUB" +"timestampDiff" "TIMESTAMPSUB" -"timezone" +"TIMESTAMP_ADD" +"TIMESTAMP_DIFF" +"timestamp_diff" +"TIMESTAMP_SUB" "timeZone" -"timezoneOf" +"timezone" "timeZoneOf" -"timezoneOffset" +"timezoneOf" "timeZoneOffset" -"TINYBLOB" -"TINYINT" -"TINYINT SIGNED" -"TINYINT UNSIGNED" -"TINYTEXT" +"timezoneOffset" +"TO DISK" +"TO INNER UUID" +"TO SHARD" +"TO TABLE" +"TO VOLUME" "TO" -"TO_BASE64" "toBool" "toColumnTypeName" "toDate" @@ -4258,7 +4294,6 @@ "toDayOfMonth" "toDayOfWeek" "toDayOfYear" -"TO_DAYS" "toDaysSinceYearZero" "toDecimal128" "toDecimal128OrDefault" @@ -4277,7 +4312,6 @@ "toDecimal64OrNull" "toDecimal64OrZero" "toDecimalString" -"TO DISK" "toFixedString" "toFloat32" "toFloat32OrDefault" @@ -4288,7 +4322,6 @@ "toFloat64OrNull" "toFloat64OrZero" "toHour" -"TO INNER UUID" "toInt128" "toInt128OrDefault" "toInt128OrNull" @@ -4389,7 +4422,6 @@ "toRelativeWeekNum" "toRelativeYearNum" "toSecond" -"TO SHARD" "toStartOfDay" "toStartOfFifteenMinutes" "toStartOfFiveMinute" @@ -4409,11 +4441,10 @@ "toStartOfYear" "toString" "toStringCutToZero" -"TO TABLE" "TOTALS" "toTime" -"toTimezone" "toTimeZone" +"toTimezone" "toTypeName" "toUInt128" "toUInt128OrDefault" @@ -4439,25 +4470,26 @@ "toUInt8OrDefault" "toUInt8OrNull" "toUInt8OrZero" -"TO_UNIXTIME" "toUnixTimestamp" "toUnixTimestamp64Micro" "toUnixTimestamp64Milli" "toUnixTimestamp64Nano" -"to_utc_timestamp" "toUTCTimestamp" "toUUID" "toUUIDOrDefault" "toUUIDOrNull" "toUUIDOrZero" "toValidUTF8" -"TO VOLUME" "toWeek" "toYear" "toYearWeek" "toYYYYMM" "toYYYYMMDD" "toYYYYMMDDhhmmss" +"TO_BASE64" +"TO_DAYS" +"TO_UNIXTIME" +"to_utc_timestamp" "TRACKING ONLY" "TRAILING" "TRANSACTION" @@ -4468,17 +4500,18 @@ "translate" "translateUTF8" "TRIGGER" -"trim" "TRIM" +"trim" "trimBoth" "trimLeft" "trimRight" "TRUE" "trunc" -"truncate" "TRUNCATE" +"truncate" "tryBase58Decode" "tryBase64Decode" +"tryBase64URLDecode" "tryDecrypt" "tryIdnaEncode" "tryPunycodeDecode" @@ -4486,8 +4519,8 @@ "tumble" "tumbleEnd" "tumbleStart" -"tuple" "Tuple" +"tuple" "tupleConcat" "tupleDivide" "tupleDivideByNumber" @@ -4502,15 +4535,14 @@ "tupleModuloByNumber" "tupleMultiply" "tupleMultiplyByNumber" +"tupleNames" "tupleNegate" "tuplePlus" "tupleToNameValuePairs" "TYPE" "TYPEOF" "ucase" -"UInt128" "UInt16" -"UInt256" "UInt32" "UInt64" "UInt8" @@ -4643,48 +4675,32 @@ "USE" "user" "USING" -"UTC_timestamp" "UTCTimestamp" +"UTC_timestamp" "UUID" "UUIDNumToString" "UUIDStringToNum" -"validateNestedArraySizes" +"UUIDToNum" +"UUIDv7ToDateTime" "VALID UNTIL" +"validateNestedArraySizes" "VALUES" -"VARBINARY" -"VARCHAR" -"VARCHAR2" -"Variant" "variantElement" "variantType" "varPop" -"VAR_POP" "varPopArgMax" -"VAR_POPArgMax" "varPopArgMin" -"VAR_POPArgMin" "varPopArray" -"VAR_POPArray" "varPopDistinct" -"VAR_POPDistinct" "varPopForEach" -"VAR_POPForEach" "varPopIf" -"VAR_POPIf" "varPopMap" -"VAR_POPMap" "varPopMerge" -"VAR_POPMerge" "varPopNull" -"VAR_POPNull" "varPopOrDefault" -"VAR_POPOrDefault" "varPopOrNull" -"VAR_POPOrNull" "varPopResample" -"VAR_POPResample" "varPopSimpleState" -"VAR_POPSimpleState" "varPopStable" "varPopStableArgMax" "varPopStableArgMin" @@ -4701,35 +4717,20 @@ "varPopStableSimpleState" "varPopStableState" "varPopState" -"VAR_POPState" "varSamp" -"VAR_SAMP" "varSampArgMax" -"VAR_SAMPArgMax" "varSampArgMin" -"VAR_SAMPArgMin" "varSampArray" -"VAR_SAMPArray" "varSampDistinct" -"VAR_SAMPDistinct" "varSampForEach" -"VAR_SAMPForEach" "varSampIf" -"VAR_SAMPIf" "varSampMap" -"VAR_SAMPMap" "varSampMerge" -"VAR_SAMPMerge" "varSampNull" -"VAR_SAMPNull" "varSampOrDefault" -"VAR_SAMPOrDefault" "varSampOrNull" -"VAR_SAMPOrNull" "varSampResample" -"VAR_SAMPResample" "varSampSimpleState" -"VAR_SAMPSimpleState" "varSampStable" "varSampStableArgMax" "varSampStableArgMin" @@ -4746,8 +4747,37 @@ "varSampStableSimpleState" "varSampStableState" "varSampState" -"VAR_SAMPState" "VARYING" +"VAR_POP" +"VAR_POPArgMax" +"VAR_POPArgMin" +"VAR_POPArray" +"VAR_POPDistinct" +"VAR_POPForEach" +"VAR_POPIf" +"VAR_POPMap" +"VAR_POPMerge" +"VAR_POPNull" +"VAR_POPOrDefault" +"VAR_POPOrNull" +"VAR_POPResample" +"VAR_POPSimpleState" +"VAR_POPState" +"VAR_SAMP" +"VAR_SAMPArgMax" +"VAR_SAMPArgMin" +"VAR_SAMPArray" +"VAR_SAMPDistinct" +"VAR_SAMPForEach" +"VAR_SAMPIf" +"VAR_SAMPMap" +"VAR_SAMPMerge" +"VAR_SAMPNull" +"VAR_SAMPOrDefault" +"VAR_SAMPOrNull" +"VAR_SAMPResample" +"VAR_SAMPSimpleState" +"VAR_SAMPState" "vectorDifference" "vectorSum" "version" @@ -4763,8 +4793,8 @@ "visitParamHas" "WATCH" "WATERMARK" -"week" "WEEK" +"week" "WEEKS" "welchTTest" "welchTTestArgMax" @@ -4783,8 +4813,8 @@ "welchTTestState" "WHEN" "WHERE" -"width_bucket" "widthBucket" +"width_bucket" "WINDOW" "windowFunnel" "windowFunnelArgMax" @@ -4802,15 +4832,15 @@ "windowFunnelSimpleState" "windowFunnelState" "windowID" -"WITH" "WITH ADMIN OPTION" "WITH CHECK" "WITH FILL" "WITH GRANT OPTION" -"with_itemindex" "WITH NAME" "WITH REPLACE OPTION" "WITH TIES" +"WITH" +"WITH_ITEMINDEX" "WK" "wkt" "wordShingleMinHash" @@ -4845,3 +4875,11 @@ "YYYYMMDDToDate32" "ZKPATH" "zookeeperSessionUptime" +"_CAST" +"__actionName" +"__bitBoolMaskAnd" +"__bitBoolMaskOr" +"__bitSwapLastTwo" +"__bitWrapperFunc" +"__getScalar" +"__scalarSubqueryResult" diff --git a/tests/fuzz/dictionaries/datatypes.dict b/tests/fuzz/dictionaries/datatypes.dict index a01a94fd3e3..e562595fb67 100644 --- a/tests/fuzz/dictionaries/datatypes.dict +++ b/tests/fuzz/dictionaries/datatypes.dict @@ -1,135 +1,4283 @@ -"AggregateFunction" -"Array" -"BIGINT" -"BIGINT SIGNED" -"BIGINT UNSIGNED" -"BINARY" -"BINARY LARGE OBJECT" -"BINARY VARYING" -"BIT" -"BLOB" -"BYTE" -"BYTEA" -"Bool" -"CHAR" -"CHAR LARGE OBJECT" -"CHAR VARYING" -"CHARACTER" -"CHARACTER LARGE OBJECT" -"CHARACTER VARYING" -"CLOB" -"DEC" -"DOUBLE" -"DOUBLE PRECISION" -"Date" -"Date32" -"DateTime" -"DateTime32" -"DateTime64" -"Decimal" -"Decimal128" -"Decimal256" -"Decimal32" -"Decimal64" -"ENUM" -"Enum" -"Enum16" -"Enum8" -"FIXED" -"FLOAT" -"FixedString" -"Float32" -"Float64" -"GEOMETRY" -"INET4" -"INET6" -"INT" -"INT SIGNED" -"INT UNSIGNED" -"INT1" -"INT1 SIGNED" -"INT1 UNSIGNED" -"INTEGER" -"INTEGER SIGNED" -"INTEGER UNSIGNED" -"IPv4" -"IPv6" -"Int128" -"Int16" -"Int256" -"Int32" -"Int64" -"Int8" -"IntervalDay" -"IntervalHour" -"IntervalMicrosecond" -"IntervalMillisecond" -"IntervalMinute" -"IntervalMonth" -"IntervalNanosecond" -"IntervalQuarter" -"IntervalSecond" -"IntervalWeek" -"IntervalYear" -"JSON" -"LONGBLOB" -"LONGTEXT" -"LowCardinality" -"MEDIUMBLOB" -"MEDIUMINT" -"MEDIUMINT SIGNED" -"MEDIUMINT UNSIGNED" -"MEDIUMTEXT" -"Map" -"MultiPolygon" -"NATIONAL CHAR" -"NATIONAL CHAR VARYING" -"NATIONAL CHARACTER" -"NATIONAL CHARACTER LARGE OBJECT" -"NATIONAL CHARACTER VARYING" -"NCHAR" -"NCHAR LARGE OBJECT" -"NCHAR VARYING" -"NUMERIC" -"NVARCHAR" -"Nested" -"Nothing" -"Nullable" -"Object" -"Point" -"Polygon" -"REAL" -"Ring" -"SET" -"SIGNED" -"SINGLE" -"SMALLINT" -"SMALLINT SIGNED" -"SMALLINT UNSIGNED" -"SimpleAggregateFunction" -"String" -"TEXT" -"TIME" -"TIMESTAMP" -"TINYBLOB" -"TINYINT" -"TINYINT SIGNED" -"TINYINT UNSIGNED" -"TINYTEXT" -"Tuple" -"UInt128" -"UInt16" -"UInt256" -"UInt32" -"UInt64" -"UInt8" -"UNSIGNED" -"UUID" -"VARBINARY" -"VARCHAR" -"VARCHAR2" -"Variant" +"BIT_AND" +"BIT_ANDArgMax" +"BIT_ANDArgMin" +"BIT_ANDArray" +"BIT_ANDDistinct" +"BIT_ANDForEach" +"BIT_ANDIf" +"BIT_ANDMap" +"BIT_ANDMerge" +"BIT_ANDNull" +"BIT_ANDOrDefault" +"BIT_ANDOrNull" +"BIT_ANDResample" +"BIT_ANDSimpleState" +"BIT_ANDState" +"BIT_OR" +"BIT_ORArgMax" +"BIT_ORArgMin" +"BIT_ORArray" +"BIT_ORDistinct" +"BIT_ORForEach" +"BIT_ORIf" +"BIT_ORMap" +"BIT_ORMerge" +"BIT_ORNull" +"BIT_OROrDefault" +"BIT_OROrNull" +"BIT_ORResample" +"BIT_ORSimpleState" +"BIT_ORState" +"BIT_XOR" +"BIT_XORArgMax" +"BIT_XORArgMin" +"BIT_XORArray" +"BIT_XORDistinct" +"BIT_XORForEach" +"BIT_XORIf" +"BIT_XORMap" +"BIT_XORMerge" +"BIT_XORNull" +"BIT_XOROrDefault" +"BIT_XOROrNull" +"BIT_XORResample" +"BIT_XORSimpleState" +"BIT_XORState" +"BLAKE3" +"CAST" +"CHARACTER_LENGTH" +"CHAR_LENGTH" +"COVAR_POP" +"COVAR_POPArgMax" +"COVAR_POPArgMin" +"COVAR_POPArray" +"COVAR_POPDistinct" +"COVAR_POPForEach" +"COVAR_POPIf" +"COVAR_POPMap" +"COVAR_POPMerge" +"COVAR_POPNull" +"COVAR_POPOrDefault" +"COVAR_POPOrNull" +"COVAR_POPResample" +"COVAR_POPSimpleState" +"COVAR_POPState" +"COVAR_SAMP" +"COVAR_SAMPArgMax" +"COVAR_SAMPArgMin" +"COVAR_SAMPArray" +"COVAR_SAMPDistinct" +"COVAR_SAMPForEach" +"COVAR_SAMPIf" +"COVAR_SAMPMap" +"COVAR_SAMPMerge" +"COVAR_SAMPNull" +"COVAR_SAMPOrDefault" +"COVAR_SAMPOrNull" +"COVAR_SAMPResample" +"COVAR_SAMPSimpleState" +"COVAR_SAMPState" +"CRC32" +"CRC32IEEE" +"CRC64" +"DATABASE" +"DATE" +"DATE_DIFF" +"DATE_FORMAT" +"DATE_TRUNC" +"DAY" +"DAYOFMONTH" +"DAYOFWEEK" +"DAYOFYEAR" +"FORMAT_BYTES" +"FQDN" +"FROM_BASE64" +"FROM_DAYS" +"FROM_UNIXTIME" +"HOUR" +"INET6_ATON" +"INET6_NTOA" +"INET_ATON" +"INET_NTOA" +"IPv4CIDRToRange" +"IPv4NumToString" +"IPv4NumToStringClassC" +"IPv4StringToNum" +"IPv4StringToNumOrDefault" +"IPv4StringToNumOrNull" +"IPv4ToIPv6" +"IPv6CIDRToRange" +"IPv6NumToString" +"IPv6StringToNum" +"IPv6StringToNumOrDefault" +"IPv6StringToNumOrNull" +"JSONArrayLength" +"JSONExtract" +"JSONExtractArrayRaw" +"JSONExtractBool" +"JSONExtractFloat" +"JSONExtractInt" +"JSONExtractKeys" +"JSONExtractKeysAndValues" +"JSONExtractKeysAndValuesRaw" +"JSONExtractRaw" +"JSONExtractString" +"JSONExtractUInt" +"JSONHas" +"JSONKey" +"JSONLength" +"JSONMergePatch" +"JSONType" +"JSON_ARRAY_LENGTH" +"JSON_EXISTS" +"JSON_QUERY" +"JSON_VALUE" +"L1Distance" +"L1Norm" +"L1Normalize" +"L2Distance" +"L2Norm" +"L2Normalize" +"L2SquaredDistance" +"L2SquaredNorm" +"LAST_DAY" +"LinfDistance" +"LinfNorm" +"LinfNormalize" +"LpDistance" +"LpNorm" +"LpNormalize" +"MACNumToString" +"MACStringToNum" +"MACStringToOUI" +"MAP_FROM_ARRAYS" +"MD4" +"MD5" +"MILLISECOND" +"MINUTE" +"MONTH" +"OCTET_LENGTH" +"QUARTER" +"REGEXP_EXTRACT" +"REGEXP_MATCHES" +"REGEXP_REPLACE" +"SCHEMA" +"SECOND" +"SHA1" +"SHA224" +"SHA256" +"SHA384" +"SHA512" +"SHA512_256" +"STD" +"STDArgMax" +"STDArgMin" +"STDArray" +"STDDEV_POP" +"STDDEV_POPArgMax" +"STDDEV_POPArgMin" +"STDDEV_POPArray" +"STDDEV_POPDistinct" +"STDDEV_POPForEach" +"STDDEV_POPIf" +"STDDEV_POPMap" +"STDDEV_POPMerge" +"STDDEV_POPNull" +"STDDEV_POPOrDefault" +"STDDEV_POPOrNull" +"STDDEV_POPResample" +"STDDEV_POPSimpleState" +"STDDEV_POPState" +"STDDEV_SAMP" +"STDDEV_SAMPArgMax" +"STDDEV_SAMPArgMin" +"STDDEV_SAMPArray" +"STDDEV_SAMPDistinct" +"STDDEV_SAMPForEach" +"STDDEV_SAMPIf" +"STDDEV_SAMPMap" +"STDDEV_SAMPMerge" +"STDDEV_SAMPNull" +"STDDEV_SAMPOrDefault" +"STDDEV_SAMPOrNull" +"STDDEV_SAMPResample" +"STDDEV_SAMPSimpleState" +"STDDEV_SAMPState" +"STDDistinct" +"STDForEach" +"STDIf" +"STDMap" +"STDMerge" +"STDNull" +"STDOrDefault" +"STDOrNull" +"STDResample" +"STDSimpleState" +"STDState" +"SUBSTRING_INDEX" +"SVG" +"TIMESTAMP_DIFF" +"TO_BASE64" +"TO_DAYS" +"TO_UNIXTIME" +"ULIDStringToDateTime" +"URLHash" +"URLHierarchy" +"URLPathHierarchy" +"UTCTimestamp" +"UTC_timestamp" +"UUIDNumToString" +"UUIDStringToNum" +"UUIDToNum" +"UUIDv7ToDateTime" +"VAR_POP" +"VAR_POPArgMax" +"VAR_POPArgMin" +"VAR_POPArray" +"VAR_POPDistinct" +"VAR_POPForEach" +"VAR_POPIf" +"VAR_POPMap" +"VAR_POPMerge" +"VAR_POPNull" +"VAR_POPOrDefault" +"VAR_POPOrNull" +"VAR_POPResample" +"VAR_POPSimpleState" +"VAR_POPState" +"VAR_SAMP" +"VAR_SAMPArgMax" +"VAR_SAMPArgMin" +"VAR_SAMPArray" +"VAR_SAMPDistinct" +"VAR_SAMPForEach" +"VAR_SAMPIf" +"VAR_SAMPMap" +"VAR_SAMPMerge" +"VAR_SAMPNull" +"VAR_SAMPOrDefault" +"VAR_SAMPOrNull" +"VAR_SAMPResample" +"VAR_SAMPSimpleState" +"VAR_SAMPState" "YEAR" -"bool" -"boolean" -"Dynamic" +"YYYYMMDDToDate" +"YYYYMMDDToDate32" +"YYYYMMDDhhmmssToDateTime" +"YYYYMMDDhhmmssToDateTime64" +"_CAST" +"__actionName" +"__bitBoolMaskAnd" +"__bitBoolMaskOr" +"__bitSwapLastTwo" +"__bitWrapperFunc" +"__getScalar" +"__scalarSubqueryResult" +"abs" +"accurateCast" +"accurateCastOrDefault" +"accurateCastOrNull" +"acos" +"acosh" +"addDate" +"addDays" +"addHours" +"addInterval" +"addMicroseconds" +"addMilliseconds" +"addMinutes" +"addMonths" +"addNanoseconds" +"addQuarters" +"addSeconds" +"addTupleOfIntervals" +"addWeeks" +"addYears" +"addressToLine" +"addressToLineWithInlines" +"addressToSymbol" +"aes_decrypt_mysql" +"aes_encrypt_mysql" +"age" +"aggThrow" +"aggThrowArgMax" +"aggThrowArgMin" +"aggThrowArray" +"aggThrowDistinct" +"aggThrowForEach" +"aggThrowIf" +"aggThrowMap" +"aggThrowMerge" +"aggThrowNull" +"aggThrowOrDefault" +"aggThrowOrNull" +"aggThrowResample" +"aggThrowSimpleState" +"aggThrowState" +"alphaTokens" +"analysisOfVariance" +"analysisOfVarianceArgMax" +"analysisOfVarianceArgMin" +"analysisOfVarianceArray" +"analysisOfVarianceDistinct" +"analysisOfVarianceForEach" +"analysisOfVarianceIf" +"analysisOfVarianceMap" +"analysisOfVarianceMerge" +"analysisOfVarianceNull" +"analysisOfVarianceOrDefault" +"analysisOfVarianceOrNull" +"analysisOfVarianceResample" +"analysisOfVarianceSimpleState" +"analysisOfVarianceState" +"and" +"anova" +"anovaArgMax" +"anovaArgMin" +"anovaArray" +"anovaDistinct" +"anovaForEach" +"anovaIf" +"anovaMap" +"anovaMerge" +"anovaNull" +"anovaOrDefault" +"anovaOrNull" +"anovaResample" +"anovaSimpleState" +"anovaState" +"any" +"anyArgMax" +"anyArgMin" +"anyArray" +"anyDistinct" +"anyForEach" +"anyHeavy" +"anyHeavyArgMax" +"anyHeavyArgMin" +"anyHeavyArray" +"anyHeavyDistinct" +"anyHeavyForEach" +"anyHeavyIf" +"anyHeavyMap" +"anyHeavyMerge" +"anyHeavyNull" +"anyHeavyOrDefault" +"anyHeavyOrNull" +"anyHeavyResample" +"anyHeavySimpleState" +"anyHeavyState" +"anyIf" +"anyLast" +"anyLastArgMax" +"anyLastArgMin" +"anyLastArray" +"anyLastDistinct" +"anyLastForEach" +"anyLastIf" +"anyLastMap" +"anyLastMerge" +"anyLastNull" +"anyLastOrDefault" +"anyLastOrNull" +"anyLastResample" +"anyLastSimpleState" +"anyLastState" +"anyLast_respect_nulls" +"anyLast_respect_nullsArgMax" +"anyLast_respect_nullsArgMin" +"anyLast_respect_nullsArray" +"anyLast_respect_nullsDistinct" +"anyLast_respect_nullsForEach" +"anyLast_respect_nullsIf" +"anyLast_respect_nullsMap" +"anyLast_respect_nullsMerge" +"anyLast_respect_nullsNull" +"anyLast_respect_nullsOrDefault" +"anyLast_respect_nullsOrNull" +"anyLast_respect_nullsResample" +"anyLast_respect_nullsSimpleState" +"anyLast_respect_nullsState" +"anyMap" +"anyMerge" +"anyNull" +"anyOrDefault" +"anyOrNull" +"anyResample" +"anySimpleState" +"anyState" +"any_respect_nulls" +"any_respect_nullsArgMax" +"any_respect_nullsArgMin" +"any_respect_nullsArray" +"any_respect_nullsDistinct" +"any_respect_nullsForEach" +"any_respect_nullsIf" +"any_respect_nullsMap" +"any_respect_nullsMerge" +"any_respect_nullsNull" +"any_respect_nullsOrDefault" +"any_respect_nullsOrNull" +"any_respect_nullsResample" +"any_respect_nullsSimpleState" +"any_respect_nullsState" +"any_value" +"any_valueArgMax" +"any_valueArgMin" +"any_valueArray" +"any_valueDistinct" +"any_valueForEach" +"any_valueIf" +"any_valueMap" +"any_valueMerge" +"any_valueNull" +"any_valueOrDefault" +"any_valueOrNull" +"any_valueResample" +"any_valueSimpleState" +"any_valueState" +"any_value_respect_nulls" +"any_value_respect_nullsArgMax" +"any_value_respect_nullsArgMin" +"any_value_respect_nullsArray" +"any_value_respect_nullsDistinct" +"any_value_respect_nullsForEach" +"any_value_respect_nullsIf" +"any_value_respect_nullsMap" +"any_value_respect_nullsMerge" +"any_value_respect_nullsNull" +"any_value_respect_nullsOrDefault" +"any_value_respect_nullsOrNull" +"any_value_respect_nullsResample" +"any_value_respect_nullsSimpleState" +"any_value_respect_nullsState" +"appendTrailingCharIfAbsent" +"approx_top_count" +"approx_top_countArgMax" +"approx_top_countArgMin" +"approx_top_countArray" +"approx_top_countDistinct" +"approx_top_countForEach" +"approx_top_countIf" +"approx_top_countMap" +"approx_top_countMerge" +"approx_top_countNull" +"approx_top_countOrDefault" +"approx_top_countOrNull" +"approx_top_countResample" +"approx_top_countSimpleState" +"approx_top_countState" +"approx_top_k" +"approx_top_kArgMax" +"approx_top_kArgMin" +"approx_top_kArray" +"approx_top_kDistinct" +"approx_top_kForEach" +"approx_top_kIf" +"approx_top_kMap" +"approx_top_kMerge" +"approx_top_kNull" +"approx_top_kOrDefault" +"approx_top_kOrNull" +"approx_top_kResample" +"approx_top_kSimpleState" +"approx_top_kState" +"approx_top_sum" +"approx_top_sumArgMax" +"approx_top_sumArgMin" +"approx_top_sumArray" +"approx_top_sumDistinct" +"approx_top_sumForEach" +"approx_top_sumIf" +"approx_top_sumMap" +"approx_top_sumMerge" +"approx_top_sumNull" +"approx_top_sumOrDefault" +"approx_top_sumOrNull" +"approx_top_sumResample" +"approx_top_sumSimpleState" +"approx_top_sumState" +"argMax" +"argMaxArgMax" +"argMaxArgMin" +"argMaxArray" +"argMaxDistinct" +"argMaxForEach" +"argMaxIf" +"argMaxMap" +"argMaxMerge" +"argMaxNull" +"argMaxOrDefault" +"argMaxOrNull" +"argMaxResample" +"argMaxSimpleState" +"argMaxState" +"argMin" +"argMinArgMax" +"argMinArgMin" +"argMinArray" +"argMinDistinct" +"argMinForEach" +"argMinIf" +"argMinMap" +"argMinMerge" +"argMinNull" +"argMinOrDefault" +"argMinOrNull" +"argMinResample" +"argMinSimpleState" +"argMinState" +"array" +"arrayAUC" +"arrayAll" +"arrayAvg" +"arrayCompact" +"arrayConcat" +"arrayCount" +"arrayCumSum" +"arrayCumSumNonNegative" +"arrayDifference" +"arrayDistinct" +"arrayDotProduct" +"arrayElement" +"arrayEnumerate" +"arrayEnumerateDense" +"arrayEnumerateDenseRanked" +"arrayEnumerateUniq" +"arrayEnumerateUniqRanked" +"arrayExists" +"arrayFill" +"arrayFilter" +"arrayFirst" +"arrayFirstIndex" +"arrayFirstOrNull" +"arrayFlatten" +"arrayFold" +"arrayIntersect" +"arrayJaccardIndex" +"arrayJoin" +"arrayLast" +"arrayLastIndex" +"arrayLastOrNull" +"arrayMap" +"arrayMax" +"arrayMin" +"arrayPartialReverseSort" +"arrayPartialShuffle" +"arrayPartialSort" +"arrayPopBack" +"arrayPopFront" +"arrayProduct" +"arrayPushBack" +"arrayPushFront" +"arrayRandomSample" +"arrayReduce" +"arrayReduceInRanges" +"arrayResize" +"arrayReverse" +"arrayReverseFill" +"arrayReverseSort" +"arrayReverseSplit" +"arrayRotateLeft" +"arrayRotateRight" +"arrayShiftLeft" +"arrayShiftRight" +"arrayShingles" +"arrayShuffle" +"arraySlice" +"arraySort" +"arraySplit" +"arrayStringConcat" +"arraySum" +"arrayUniq" +"arrayWithConstant" +"arrayZip" +"array_agg" +"array_aggArgMax" +"array_aggArgMin" +"array_aggArray" +"array_aggDistinct" +"array_aggForEach" +"array_aggIf" +"array_aggMap" +"array_aggMerge" +"array_aggNull" +"array_aggOrDefault" +"array_aggOrNull" +"array_aggResample" +"array_aggSimpleState" +"array_aggState" +"array_concat_agg" +"array_concat_aggArgMax" +"array_concat_aggArgMin" +"array_concat_aggArray" +"array_concat_aggDistinct" +"array_concat_aggForEach" +"array_concat_aggIf" +"array_concat_aggMap" +"array_concat_aggMerge" +"array_concat_aggNull" +"array_concat_aggOrDefault" +"array_concat_aggOrNull" +"array_concat_aggResample" +"array_concat_aggSimpleState" +"array_concat_aggState" +"ascii" +"asin" +"asinh" +"assumeNotNull" +"atan" +"atan2" +"atanh" +"avg" +"avgArgMax" +"avgArgMin" +"avgArray" +"avgDistinct" +"avgForEach" +"avgIf" +"avgMap" +"avgMerge" +"avgNull" +"avgOrDefault" +"avgOrNull" +"avgResample" +"avgSimpleState" +"avgState" +"avgWeighted" +"avgWeightedArgMax" +"avgWeightedArgMin" +"avgWeightedArray" +"avgWeightedDistinct" +"avgWeightedForEach" +"avgWeightedIf" +"avgWeightedMap" +"avgWeightedMerge" +"avgWeightedNull" +"avgWeightedOrDefault" +"avgWeightedOrNull" +"avgWeightedResample" +"avgWeightedSimpleState" +"avgWeightedState" +"bar" +"base58Decode" +"base58Encode" +"base64Decode" +"base64Encode" +"base64URLDecode" +"base64URLEncode" +"basename" +"bin" +"bitAnd" +"bitCount" +"bitHammingDistance" +"bitNot" +"bitOr" +"bitPositionsToArray" +"bitRotateLeft" +"bitRotateRight" +"bitShiftLeft" +"bitShiftRight" +"bitSlice" +"bitTest" +"bitTestAll" +"bitTestAny" +"bitXor" +"bitmapAnd" +"bitmapAndCardinality" +"bitmapAndnot" +"bitmapAndnotCardinality" +"bitmapBuild" +"bitmapCardinality" +"bitmapContains" +"bitmapHasAll" +"bitmapHasAny" +"bitmapMax" +"bitmapMin" +"bitmapOr" +"bitmapOrCardinality" +"bitmapSubsetInRange" +"bitmapSubsetLimit" +"bitmapToArray" +"bitmapTransform" +"bitmapXor" +"bitmapXorCardinality" +"bitmaskToArray" +"bitmaskToList" +"blockNumber" +"blockSerializedSize" +"blockSize" +"boundingRatio" +"boundingRatioArgMax" +"boundingRatioArgMin" +"boundingRatioArray" +"boundingRatioDistinct" +"boundingRatioForEach" +"boundingRatioIf" +"boundingRatioMap" +"boundingRatioMerge" +"boundingRatioNull" +"boundingRatioOrDefault" +"boundingRatioOrNull" +"boundingRatioResample" +"boundingRatioSimpleState" +"boundingRatioState" +"buildId" +"byteHammingDistance" +"byteSize" +"byteSlice" +"byteSwap" +"caseWithExpr" +"caseWithExpression" +"caseWithoutExpr" +"caseWithoutExpression" +"catboostEvaluate" +"categoricalInformationValue" +"categoricalInformationValueArgMax" +"categoricalInformationValueArgMin" +"categoricalInformationValueArray" +"categoricalInformationValueDistinct" +"categoricalInformationValueForEach" +"categoricalInformationValueIf" +"categoricalInformationValueMap" +"categoricalInformationValueMerge" +"categoricalInformationValueNull" +"categoricalInformationValueOrDefault" +"categoricalInformationValueOrNull" +"categoricalInformationValueResample" +"categoricalInformationValueSimpleState" +"categoricalInformationValueState" +"cbrt" +"ceil" +"ceiling" +"changeDay" +"changeHour" +"changeMinute" +"changeMonth" +"changeSecond" +"changeYear" +"char" +"cityHash64" +"clamp" +"coalesce" +"concat" +"concatAssumeInjective" +"concatWithSeparator" +"concatWithSeparatorAssumeInjective" +"concat_ws" +"connectionId" +"connection_id" +"contingency" +"contingencyArgMax" +"contingencyArgMin" +"contingencyArray" +"contingencyDistinct" +"contingencyForEach" +"contingencyIf" +"contingencyMap" +"contingencyMerge" +"contingencyNull" +"contingencyOrDefault" +"contingencyOrNull" +"contingencyResample" +"contingencySimpleState" +"contingencyState" +"convertCharset" +"corr" +"corrArgMax" +"corrArgMin" +"corrArray" +"corrDistinct" +"corrForEach" +"corrIf" +"corrMap" +"corrMatrix" +"corrMatrixArgMax" +"corrMatrixArgMin" +"corrMatrixArray" +"corrMatrixDistinct" +"corrMatrixForEach" +"corrMatrixIf" +"corrMatrixMap" +"corrMatrixMerge" +"corrMatrixNull" +"corrMatrixOrDefault" +"corrMatrixOrNull" +"corrMatrixResample" +"corrMatrixSimpleState" +"corrMatrixState" +"corrMerge" +"corrNull" +"corrOrDefault" +"corrOrNull" +"corrResample" +"corrSimpleState" +"corrStable" +"corrStableArgMax" +"corrStableArgMin" +"corrStableArray" +"corrStableDistinct" +"corrStableForEach" +"corrStableIf" +"corrStableMap" +"corrStableMerge" +"corrStableNull" +"corrStableOrDefault" +"corrStableOrNull" +"corrStableResample" +"corrStableSimpleState" +"corrStableState" +"corrState" +"cos" +"cosh" +"cosineDistance" +"count" +"countArgMax" +"countArgMin" +"countArray" +"countDigits" +"countDistinct" +"countEqual" +"countForEach" +"countIf" +"countMap" +"countMatches" +"countMatchesCaseInsensitive" +"countMerge" +"countNull" +"countOrDefault" +"countOrNull" +"countResample" +"countSimpleState" +"countState" +"countSubstrings" +"countSubstringsCaseInsensitive" +"countSubstringsCaseInsensitiveUTF8" +"covarPop" +"covarPopArgMax" +"covarPopArgMin" +"covarPopArray" +"covarPopDistinct" +"covarPopForEach" +"covarPopIf" +"covarPopMap" +"covarPopMatrix" +"covarPopMatrixArgMax" +"covarPopMatrixArgMin" +"covarPopMatrixArray" +"covarPopMatrixDistinct" +"covarPopMatrixForEach" +"covarPopMatrixIf" +"covarPopMatrixMap" +"covarPopMatrixMerge" +"covarPopMatrixNull" +"covarPopMatrixOrDefault" +"covarPopMatrixOrNull" +"covarPopMatrixResample" +"covarPopMatrixSimpleState" +"covarPopMatrixState" +"covarPopMerge" +"covarPopNull" +"covarPopOrDefault" +"covarPopOrNull" +"covarPopResample" +"covarPopSimpleState" +"covarPopStable" +"covarPopStableArgMax" +"covarPopStableArgMin" +"covarPopStableArray" +"covarPopStableDistinct" +"covarPopStableForEach" +"covarPopStableIf" +"covarPopStableMap" +"covarPopStableMerge" +"covarPopStableNull" +"covarPopStableOrDefault" +"covarPopStableOrNull" +"covarPopStableResample" +"covarPopStableSimpleState" +"covarPopStableState" +"covarPopState" +"covarSamp" +"covarSampArgMax" +"covarSampArgMin" +"covarSampArray" +"covarSampDistinct" +"covarSampForEach" +"covarSampIf" +"covarSampMap" +"covarSampMatrix" +"covarSampMatrixArgMax" +"covarSampMatrixArgMin" +"covarSampMatrixArray" +"covarSampMatrixDistinct" +"covarSampMatrixForEach" +"covarSampMatrixIf" +"covarSampMatrixMap" +"covarSampMatrixMerge" +"covarSampMatrixNull" +"covarSampMatrixOrDefault" +"covarSampMatrixOrNull" +"covarSampMatrixResample" +"covarSampMatrixSimpleState" +"covarSampMatrixState" +"covarSampMerge" +"covarSampNull" +"covarSampOrDefault" +"covarSampOrNull" +"covarSampResample" +"covarSampSimpleState" +"covarSampStable" +"covarSampStableArgMax" +"covarSampStableArgMin" +"covarSampStableArray" +"covarSampStableDistinct" +"covarSampStableForEach" +"covarSampStableIf" +"covarSampStableMap" +"covarSampStableMerge" +"covarSampStableNull" +"covarSampStableOrDefault" +"covarSampStableOrNull" +"covarSampStableResample" +"covarSampStableSimpleState" +"covarSampStableState" +"covarSampState" +"cramersV" +"cramersVArgMax" +"cramersVArgMin" +"cramersVArray" +"cramersVBiasCorrected" +"cramersVBiasCorrectedArgMax" +"cramersVBiasCorrectedArgMin" +"cramersVBiasCorrectedArray" +"cramersVBiasCorrectedDistinct" +"cramersVBiasCorrectedForEach" +"cramersVBiasCorrectedIf" +"cramersVBiasCorrectedMap" +"cramersVBiasCorrectedMerge" +"cramersVBiasCorrectedNull" +"cramersVBiasCorrectedOrDefault" +"cramersVBiasCorrectedOrNull" +"cramersVBiasCorrectedResample" +"cramersVBiasCorrectedSimpleState" +"cramersVBiasCorrectedState" +"cramersVDistinct" +"cramersVForEach" +"cramersVIf" +"cramersVMap" +"cramersVMerge" +"cramersVNull" +"cramersVOrDefault" +"cramersVOrNull" +"cramersVResample" +"cramersVSimpleState" +"cramersVState" +"curdate" +"currentDatabase" +"currentProfiles" +"currentRoles" +"currentSchemas" +"currentUser" +"current_database" +"current_date" +"current_schemas" +"current_timestamp" +"current_user" +"cutFragment" +"cutIPv6" +"cutQueryString" +"cutQueryStringAndFragment" +"cutToFirstSignificantSubdomain" +"cutToFirstSignificantSubdomainCustom" +"cutToFirstSignificantSubdomainCustomRFC" +"cutToFirstSignificantSubdomainCustomWithWWW" +"cutToFirstSignificantSubdomainCustomWithWWWRFC" +"cutToFirstSignificantSubdomainRFC" +"cutToFirstSignificantSubdomainWithWWW" +"cutToFirstSignificantSubdomainWithWWWRFC" +"cutURLParameter" +"cutWWW" +"damerauLevenshteinDistance" +"dateDiff" +"dateName" +"dateTime64ToSnowflake" +"dateTime64ToSnowflakeID" +"dateTimeToSnowflake" +"dateTimeToSnowflakeID" +"dateTrunc" +"date_diff" +"decodeHTMLComponent" +"decodeURLComponent" +"decodeURLFormComponent" +"decodeXMLComponent" +"decrypt" +"defaultProfiles" +"defaultRoles" +"defaultValueOfArgumentType" +"defaultValueOfTypeName" +"degrees" +"deltaSum" +"deltaSumArgMax" +"deltaSumArgMin" +"deltaSumArray" +"deltaSumDistinct" +"deltaSumForEach" +"deltaSumIf" +"deltaSumMap" +"deltaSumMerge" +"deltaSumNull" +"deltaSumOrDefault" +"deltaSumOrNull" +"deltaSumResample" +"deltaSumSimpleState" +"deltaSumState" +"deltaSumTimestamp" +"deltaSumTimestampArgMax" +"deltaSumTimestampArgMin" +"deltaSumTimestampArray" +"deltaSumTimestampDistinct" +"deltaSumTimestampForEach" +"deltaSumTimestampIf" +"deltaSumTimestampMap" +"deltaSumTimestampMerge" +"deltaSumTimestampNull" +"deltaSumTimestampOrDefault" +"deltaSumTimestampOrNull" +"deltaSumTimestampResample" +"deltaSumTimestampSimpleState" +"deltaSumTimestampState" +"demangle" +"denseRank" +"denseRankArgMax" +"denseRankArgMin" +"denseRankArray" +"denseRankDistinct" +"denseRankForEach" +"denseRankIf" +"denseRankMap" +"denseRankMerge" +"denseRankNull" +"denseRankOrDefault" +"denseRankOrNull" +"denseRankResample" +"denseRankSimpleState" +"denseRankState" +"dense_rank" +"dense_rankArgMax" +"dense_rankArgMin" +"dense_rankArray" +"dense_rankDistinct" +"dense_rankForEach" +"dense_rankIf" +"dense_rankMap" +"dense_rankMerge" +"dense_rankNull" +"dense_rankOrDefault" +"dense_rankOrNull" +"dense_rankResample" +"dense_rankSimpleState" +"dense_rankState" +"detectCharset" +"detectLanguage" +"detectLanguageMixed" +"detectLanguageUnknown" +"detectProgrammingLanguage" +"detectTonality" +"dictGet" +"dictGetAll" +"dictGetChildren" +"dictGetDate" +"dictGetDateOrDefault" +"dictGetDateTime" +"dictGetDateTimeOrDefault" +"dictGetDescendants" +"dictGetFloat32" +"dictGetFloat32OrDefault" +"dictGetFloat64" +"dictGetFloat64OrDefault" +"dictGetHierarchy" +"dictGetIPv4" +"dictGetIPv4OrDefault" +"dictGetIPv6" +"dictGetIPv6OrDefault" +"dictGetInt16" +"dictGetInt16OrDefault" +"dictGetInt32" +"dictGetInt32OrDefault" +"dictGetInt64" +"dictGetInt64OrDefault" +"dictGetInt8" +"dictGetInt8OrDefault" +"dictGetOrDefault" +"dictGetOrNull" +"dictGetString" +"dictGetStringOrDefault" +"dictGetUInt16" +"dictGetUInt16OrDefault" +"dictGetUInt32" +"dictGetUInt32OrDefault" +"dictGetUInt64" +"dictGetUInt64OrDefault" +"dictGetUInt8" +"dictGetUInt8OrDefault" +"dictGetUUID" +"dictGetUUIDOrDefault" +"dictHas" +"dictIsIn" +"displayName" +"distanceL1" +"distanceL2" +"distanceL2Squared" +"distanceLinf" +"distanceLp" +"divide" +"divideDecimal" +"domain" +"domainRFC" +"domainWithoutWWW" +"domainWithoutWWWRFC" +"dotProduct" +"dumpColumnStructure" +"dynamicElement" +"dynamicType" +"e" +"editDistance" +"editDistanceUTF8" +"empty" +"emptyArrayDate" +"emptyArrayDateTime" +"emptyArrayFloat32" +"emptyArrayFloat64" +"emptyArrayInt16" +"emptyArrayInt32" +"emptyArrayInt64" +"emptyArrayInt8" +"emptyArrayString" +"emptyArrayToSingle" +"emptyArrayUInt16" +"emptyArrayUInt32" +"emptyArrayUInt64" +"emptyArrayUInt8" +"enabledProfiles" +"enabledRoles" +"encodeURLComponent" +"encodeURLFormComponent" +"encodeXMLComponent" +"encrypt" +"endsWith" +"endsWithUTF8" +"entropy" +"entropyArgMax" +"entropyArgMin" +"entropyArray" +"entropyDistinct" +"entropyForEach" +"entropyIf" +"entropyMap" +"entropyMerge" +"entropyNull" +"entropyOrDefault" +"entropyOrNull" +"entropyResample" +"entropySimpleState" +"entropyState" +"equals" +"erf" +"erfc" +"errorCodeToName" +"evalMLMethod" +"exp" +"exp10" +"exp2" +"exponentialMovingAverage" +"exponentialMovingAverageArgMax" +"exponentialMovingAverageArgMin" +"exponentialMovingAverageArray" +"exponentialMovingAverageDistinct" +"exponentialMovingAverageForEach" +"exponentialMovingAverageIf" +"exponentialMovingAverageMap" +"exponentialMovingAverageMerge" +"exponentialMovingAverageNull" +"exponentialMovingAverageOrDefault" +"exponentialMovingAverageOrNull" +"exponentialMovingAverageResample" +"exponentialMovingAverageSimpleState" +"exponentialMovingAverageState" +"exponentialTimeDecayedAvg" +"exponentialTimeDecayedAvgArgMax" +"exponentialTimeDecayedAvgArgMin" +"exponentialTimeDecayedAvgArray" +"exponentialTimeDecayedAvgDistinct" +"exponentialTimeDecayedAvgForEach" +"exponentialTimeDecayedAvgIf" +"exponentialTimeDecayedAvgMap" +"exponentialTimeDecayedAvgMerge" +"exponentialTimeDecayedAvgNull" +"exponentialTimeDecayedAvgOrDefault" +"exponentialTimeDecayedAvgOrNull" +"exponentialTimeDecayedAvgResample" +"exponentialTimeDecayedAvgSimpleState" +"exponentialTimeDecayedAvgState" +"exponentialTimeDecayedCount" +"exponentialTimeDecayedCountArgMax" +"exponentialTimeDecayedCountArgMin" +"exponentialTimeDecayedCountArray" +"exponentialTimeDecayedCountDistinct" +"exponentialTimeDecayedCountForEach" +"exponentialTimeDecayedCountIf" +"exponentialTimeDecayedCountMap" +"exponentialTimeDecayedCountMerge" +"exponentialTimeDecayedCountNull" +"exponentialTimeDecayedCountOrDefault" +"exponentialTimeDecayedCountOrNull" +"exponentialTimeDecayedCountResample" +"exponentialTimeDecayedCountSimpleState" +"exponentialTimeDecayedCountState" +"exponentialTimeDecayedMax" +"exponentialTimeDecayedMaxArgMax" +"exponentialTimeDecayedMaxArgMin" +"exponentialTimeDecayedMaxArray" +"exponentialTimeDecayedMaxDistinct" +"exponentialTimeDecayedMaxForEach" +"exponentialTimeDecayedMaxIf" +"exponentialTimeDecayedMaxMap" +"exponentialTimeDecayedMaxMerge" +"exponentialTimeDecayedMaxNull" +"exponentialTimeDecayedMaxOrDefault" +"exponentialTimeDecayedMaxOrNull" +"exponentialTimeDecayedMaxResample" +"exponentialTimeDecayedMaxSimpleState" +"exponentialTimeDecayedMaxState" +"exponentialTimeDecayedSum" +"exponentialTimeDecayedSumArgMax" +"exponentialTimeDecayedSumArgMin" +"exponentialTimeDecayedSumArray" +"exponentialTimeDecayedSumDistinct" +"exponentialTimeDecayedSumForEach" +"exponentialTimeDecayedSumIf" +"exponentialTimeDecayedSumMap" +"exponentialTimeDecayedSumMerge" +"exponentialTimeDecayedSumNull" +"exponentialTimeDecayedSumOrDefault" +"exponentialTimeDecayedSumOrNull" +"exponentialTimeDecayedSumResample" +"exponentialTimeDecayedSumSimpleState" +"exponentialTimeDecayedSumState" +"extract" +"extractAll" +"extractAllGroups" +"extractAllGroupsHorizontal" +"extractAllGroupsVertical" +"extractGroups" +"extractKeyValuePairs" +"extractKeyValuePairsWithEscaping" +"extractTextFromHTML" +"extractURLParameter" +"extractURLParameterNames" +"extractURLParameters" +"factorial" +"farmFingerprint64" +"farmHash64" +"file" +"filesystemAvailable" +"filesystemCapacity" +"filesystemUnreserved" +"finalizeAggregation" +"firstLine" +"firstSignificantSubdomain" +"firstSignificantSubdomainCustom" +"firstSignificantSubdomainCustomRFC" +"firstSignificantSubdomainRFC" +"first_value" +"first_valueArgMax" +"first_valueArgMin" +"first_valueArray" +"first_valueDistinct" +"first_valueForEach" +"first_valueIf" +"first_valueMap" +"first_valueMerge" +"first_valueNull" +"first_valueOrDefault" +"first_valueOrNull" +"first_valueResample" +"first_valueSimpleState" +"first_valueState" +"first_value_respect_nulls" +"first_value_respect_nullsArgMax" +"first_value_respect_nullsArgMin" +"first_value_respect_nullsArray" +"first_value_respect_nullsDistinct" +"first_value_respect_nullsForEach" +"first_value_respect_nullsIf" +"first_value_respect_nullsMap" +"first_value_respect_nullsMerge" +"first_value_respect_nullsNull" +"first_value_respect_nullsOrDefault" +"first_value_respect_nullsOrNull" +"first_value_respect_nullsResample" +"first_value_respect_nullsSimpleState" +"first_value_respect_nullsState" +"flameGraph" +"flameGraphArgMax" +"flameGraphArgMin" +"flameGraphArray" +"flameGraphDistinct" +"flameGraphForEach" +"flameGraphIf" +"flameGraphMap" +"flameGraphMerge" +"flameGraphNull" +"flameGraphOrDefault" +"flameGraphOrNull" +"flameGraphResample" +"flameGraphSimpleState" +"flameGraphState" +"flatten" +"flattenTuple" +"floor" +"format" +"formatDateTime" +"formatDateTimeInJodaSyntax" +"formatQuery" +"formatQueryOrNull" +"formatQuerySingleLine" +"formatQuerySingleLineOrNull" +"formatReadableDecimalSize" +"formatReadableQuantity" +"formatReadableSize" +"formatReadableTimeDelta" +"formatRow" +"formatRowNoNewline" +"fragment" +"fromDaysSinceYearZero" +"fromDaysSinceYearZero32" +"fromModifiedJulianDay" +"fromModifiedJulianDayOrNull" +"fromUTCTimestamp" +"fromUnixTimestamp" +"fromUnixTimestamp64Micro" +"fromUnixTimestamp64Milli" +"fromUnixTimestamp64Nano" +"fromUnixTimestampInJodaSyntax" +"from_utc_timestamp" +"fullHostName" +"fuzzBits" +"gccMurmurHash" +"gcd" +"generateRandomStructure" +"generateSnowflakeID" +"generateULID" +"generateUUIDv4" +"generateUUIDv7" +"geoDistance" +"geoToH3" +"geoToS2" +"geohashDecode" +"geohashEncode" +"geohashesInBox" +"getClientHTTPHeader" +"getMacro" +"getOSKernelVersion" +"getServerPort" +"getSetting" +"getSizeOfEnumType" +"getSubcolumn" +"getTypeSerializationStreams" +"globalIn" +"globalInIgnoreSet" +"globalNotIn" +"globalNotInIgnoreSet" +"globalNotNullIn" +"globalNotNullInIgnoreSet" +"globalNullIn" +"globalNullInIgnoreSet" +"globalVariable" +"greatCircleAngle" +"greatCircleDistance" +"greater" +"greaterOrEquals" +"greatest" +"groupArray" +"groupArrayArgMax" +"groupArrayArgMin" +"groupArrayArray" +"groupArrayDistinct" +"groupArrayForEach" +"groupArrayIf" +"groupArrayInsertAt" +"groupArrayInsertAtArgMax" +"groupArrayInsertAtArgMin" +"groupArrayInsertAtArray" +"groupArrayInsertAtDistinct" +"groupArrayInsertAtForEach" +"groupArrayInsertAtIf" +"groupArrayInsertAtMap" +"groupArrayInsertAtMerge" +"groupArrayInsertAtNull" +"groupArrayInsertAtOrDefault" +"groupArrayInsertAtOrNull" +"groupArrayInsertAtResample" +"groupArrayInsertAtSimpleState" +"groupArrayInsertAtState" +"groupArrayIntersect" +"groupArrayIntersectArgMax" +"groupArrayIntersectArgMin" +"groupArrayIntersectArray" +"groupArrayIntersectDistinct" +"groupArrayIntersectForEach" +"groupArrayIntersectIf" +"groupArrayIntersectMap" +"groupArrayIntersectMerge" +"groupArrayIntersectNull" +"groupArrayIntersectOrDefault" +"groupArrayIntersectOrNull" +"groupArrayIntersectResample" +"groupArrayIntersectSimpleState" +"groupArrayIntersectState" +"groupArrayLast" +"groupArrayLastArgMax" +"groupArrayLastArgMin" +"groupArrayLastArray" +"groupArrayLastDistinct" +"groupArrayLastForEach" +"groupArrayLastIf" +"groupArrayLastMap" +"groupArrayLastMerge" +"groupArrayLastNull" +"groupArrayLastOrDefault" +"groupArrayLastOrNull" +"groupArrayLastResample" +"groupArrayLastSimpleState" +"groupArrayLastState" +"groupArrayMap" +"groupArrayMerge" +"groupArrayMovingAvg" +"groupArrayMovingAvgArgMax" +"groupArrayMovingAvgArgMin" +"groupArrayMovingAvgArray" +"groupArrayMovingAvgDistinct" +"groupArrayMovingAvgForEach" +"groupArrayMovingAvgIf" +"groupArrayMovingAvgMap" +"groupArrayMovingAvgMerge" +"groupArrayMovingAvgNull" +"groupArrayMovingAvgOrDefault" +"groupArrayMovingAvgOrNull" +"groupArrayMovingAvgResample" +"groupArrayMovingAvgSimpleState" +"groupArrayMovingAvgState" +"groupArrayMovingSum" +"groupArrayMovingSumArgMax" +"groupArrayMovingSumArgMin" +"groupArrayMovingSumArray" +"groupArrayMovingSumDistinct" +"groupArrayMovingSumForEach" +"groupArrayMovingSumIf" +"groupArrayMovingSumMap" +"groupArrayMovingSumMerge" +"groupArrayMovingSumNull" +"groupArrayMovingSumOrDefault" +"groupArrayMovingSumOrNull" +"groupArrayMovingSumResample" +"groupArrayMovingSumSimpleState" +"groupArrayMovingSumState" +"groupArrayNull" +"groupArrayOrDefault" +"groupArrayOrNull" +"groupArrayResample" +"groupArraySample" +"groupArraySampleArgMax" +"groupArraySampleArgMin" +"groupArraySampleArray" +"groupArraySampleDistinct" +"groupArraySampleForEach" +"groupArraySampleIf" +"groupArraySampleMap" +"groupArraySampleMerge" +"groupArraySampleNull" +"groupArraySampleOrDefault" +"groupArraySampleOrNull" +"groupArraySampleResample" +"groupArraySampleSimpleState" +"groupArraySampleState" +"groupArraySimpleState" +"groupArraySorted" +"groupArraySortedArgMax" +"groupArraySortedArgMin" +"groupArraySortedArray" +"groupArraySortedDistinct" +"groupArraySortedForEach" +"groupArraySortedIf" +"groupArraySortedMap" +"groupArraySortedMerge" +"groupArraySortedNull" +"groupArraySortedOrDefault" +"groupArraySortedOrNull" +"groupArraySortedResample" +"groupArraySortedSimpleState" +"groupArraySortedState" +"groupArrayState" +"groupBitAnd" +"groupBitAndArgMax" +"groupBitAndArgMin" +"groupBitAndArray" +"groupBitAndDistinct" +"groupBitAndForEach" +"groupBitAndIf" +"groupBitAndMap" +"groupBitAndMerge" +"groupBitAndNull" +"groupBitAndOrDefault" +"groupBitAndOrNull" +"groupBitAndResample" +"groupBitAndSimpleState" +"groupBitAndState" +"groupBitOr" +"groupBitOrArgMax" +"groupBitOrArgMin" +"groupBitOrArray" +"groupBitOrDistinct" +"groupBitOrForEach" +"groupBitOrIf" +"groupBitOrMap" +"groupBitOrMerge" +"groupBitOrNull" +"groupBitOrOrDefault" +"groupBitOrOrNull" +"groupBitOrResample" +"groupBitOrSimpleState" +"groupBitOrState" +"groupBitXor" +"groupBitXorArgMax" +"groupBitXorArgMin" +"groupBitXorArray" +"groupBitXorDistinct" +"groupBitXorForEach" +"groupBitXorIf" +"groupBitXorMap" +"groupBitXorMerge" +"groupBitXorNull" +"groupBitXorOrDefault" +"groupBitXorOrNull" +"groupBitXorResample" +"groupBitXorSimpleState" +"groupBitXorState" +"groupBitmap" +"groupBitmapAnd" +"groupBitmapAndArgMax" +"groupBitmapAndArgMin" +"groupBitmapAndArray" +"groupBitmapAndDistinct" +"groupBitmapAndForEach" +"groupBitmapAndIf" +"groupBitmapAndMap" +"groupBitmapAndMerge" +"groupBitmapAndNull" +"groupBitmapAndOrDefault" +"groupBitmapAndOrNull" +"groupBitmapAndResample" +"groupBitmapAndSimpleState" +"groupBitmapAndState" +"groupBitmapArgMax" +"groupBitmapArgMin" +"groupBitmapArray" +"groupBitmapDistinct" +"groupBitmapForEach" +"groupBitmapIf" +"groupBitmapMap" +"groupBitmapMerge" +"groupBitmapNull" +"groupBitmapOr" +"groupBitmapOrArgMax" +"groupBitmapOrArgMin" +"groupBitmapOrArray" +"groupBitmapOrDefault" +"groupBitmapOrDistinct" +"groupBitmapOrForEach" +"groupBitmapOrIf" +"groupBitmapOrMap" +"groupBitmapOrMerge" +"groupBitmapOrNull" +"groupBitmapOrNull" +"groupBitmapOrOrDefault" +"groupBitmapOrOrNull" +"groupBitmapOrResample" +"groupBitmapOrSimpleState" +"groupBitmapOrState" +"groupBitmapResample" +"groupBitmapSimpleState" +"groupBitmapState" +"groupBitmapXor" +"groupBitmapXorArgMax" +"groupBitmapXorArgMin" +"groupBitmapXorArray" +"groupBitmapXorDistinct" +"groupBitmapXorForEach" +"groupBitmapXorIf" +"groupBitmapXorMap" +"groupBitmapXorMerge" +"groupBitmapXorNull" +"groupBitmapXorOrDefault" +"groupBitmapXorOrNull" +"groupBitmapXorResample" +"groupBitmapXorSimpleState" +"groupBitmapXorState" +"groupConcat" +"groupConcatArgMax" +"groupConcatArgMin" +"groupConcatArray" +"groupConcatDistinct" +"groupConcatForEach" +"groupConcatIf" +"groupConcatMap" +"groupConcatMerge" +"groupConcatNull" +"groupConcatOrDefault" +"groupConcatOrNull" +"groupConcatResample" +"groupConcatSimpleState" +"groupConcatState" +"groupUniqArray" +"groupUniqArrayArgMax" +"groupUniqArrayArgMin" +"groupUniqArrayArray" +"groupUniqArrayDistinct" +"groupUniqArrayForEach" +"groupUniqArrayIf" +"groupUniqArrayMap" +"groupUniqArrayMerge" +"groupUniqArrayNull" +"groupUniqArrayOrDefault" +"groupUniqArrayOrNull" +"groupUniqArrayResample" +"groupUniqArraySimpleState" +"groupUniqArrayState" +"group_concat" +"group_concatArgMax" +"group_concatArgMin" +"group_concatArray" +"group_concatDistinct" +"group_concatForEach" +"group_concatIf" +"group_concatMap" +"group_concatMerge" +"group_concatNull" +"group_concatOrDefault" +"group_concatOrNull" +"group_concatResample" +"group_concatSimpleState" +"group_concatState" +"h3CellAreaM2" +"h3CellAreaRads2" +"h3Distance" +"h3EdgeAngle" +"h3EdgeLengthKm" +"h3EdgeLengthM" +"h3ExactEdgeLengthKm" +"h3ExactEdgeLengthM" +"h3ExactEdgeLengthRads" +"h3GetBaseCell" +"h3GetDestinationIndexFromUnidirectionalEdge" +"h3GetFaces" +"h3GetIndexesFromUnidirectionalEdge" +"h3GetOriginIndexFromUnidirectionalEdge" +"h3GetPentagonIndexes" +"h3GetRes0Indexes" +"h3GetResolution" +"h3GetUnidirectionalEdge" +"h3GetUnidirectionalEdgeBoundary" +"h3GetUnidirectionalEdgesFromHexagon" +"h3HexAreaKm2" +"h3HexAreaM2" +"h3HexRing" +"h3IndexesAreNeighbors" +"h3IsPentagon" +"h3IsResClassIII" +"h3IsValid" +"h3Line" +"h3NumHexagons" +"h3PointDistKm" +"h3PointDistM" +"h3PointDistRads" +"h3ToCenterChild" +"h3ToChildren" +"h3ToGeo" +"h3ToGeoBoundary" +"h3ToParent" +"h3ToString" +"h3UnidirectionalEdgeIsValid" +"h3kRing" +"halfMD5" +"has" +"hasAll" +"hasAny" +"hasColumnInTable" +"hasSubsequence" +"hasSubsequenceCaseInsensitive" +"hasSubsequenceCaseInsensitiveUTF8" +"hasSubsequenceUTF8" +"hasSubstr" +"hasThreadFuzzer" +"hasToken" +"hasTokenCaseInsensitive" +"hasTokenCaseInsensitiveOrNull" +"hasTokenOrNull" +"hex" +"hilbertDecode" +"hilbertEncode" +"histogram" +"histogramArgMax" +"histogramArgMin" +"histogramArray" +"histogramDistinct" +"histogramForEach" +"histogramIf" +"histogramMap" +"histogramMerge" +"histogramNull" +"histogramOrDefault" +"histogramOrNull" +"histogramResample" +"histogramSimpleState" +"histogramState" +"hiveHash" +"hop" +"hopEnd" +"hopStart" +"hostName" +"hostname" +"hypot" +"identity" +"idnaDecode" +"idnaEncode" +"if" +"ifNotFinite" +"ifNull" +"ignore" +"ilike" +"in" +"inIgnoreSet" +"indexHint" +"indexOf" +"initcap" +"initcapUTF8" +"initialQueryID" +"initial_query_id" +"initializeAggregation" +"instr" +"intDiv" +"intDivOrZero" +"intExp10" +"intExp2" +"intHash32" +"intHash64" +"intervalLengthSum" +"intervalLengthSumArgMax" +"intervalLengthSumArgMin" +"intervalLengthSumArray" +"intervalLengthSumDistinct" +"intervalLengthSumForEach" +"intervalLengthSumIf" +"intervalLengthSumMap" +"intervalLengthSumMerge" +"intervalLengthSumNull" +"intervalLengthSumOrDefault" +"intervalLengthSumOrNull" +"intervalLengthSumResample" +"intervalLengthSumSimpleState" +"intervalLengthSumState" +"isConstant" +"isDecimalOverflow" +"isFinite" +"isIPAddressInRange" +"isIPv4String" +"isIPv6String" +"isInfinite" +"isNaN" +"isNotDistinctFrom" +"isNotNull" +"isNull" +"isNullable" +"isValidJSON" +"isValidUTF8" +"isZeroOrNull" +"jaroSimilarity" +"jaroWinklerSimilarity" +"javaHash" +"javaHashUTF16LE" +"joinGet" +"joinGetOrNull" +"jsonMergePatch" +"jumpConsistentHash" +"kafkaMurmurHash" +"kolmogorovSmirnovTest" +"kolmogorovSmirnovTestArgMax" +"kolmogorovSmirnovTestArgMin" +"kolmogorovSmirnovTestArray" +"kolmogorovSmirnovTestDistinct" +"kolmogorovSmirnovTestForEach" +"kolmogorovSmirnovTestIf" +"kolmogorovSmirnovTestMap" +"kolmogorovSmirnovTestMerge" +"kolmogorovSmirnovTestNull" +"kolmogorovSmirnovTestOrDefault" +"kolmogorovSmirnovTestOrNull" +"kolmogorovSmirnovTestResample" +"kolmogorovSmirnovTestSimpleState" +"kolmogorovSmirnovTestState" +"kostikConsistentHash" +"kql_array_sort_asc" +"kql_array_sort_desc" +"kurtPop" +"kurtPopArgMax" +"kurtPopArgMin" +"kurtPopArray" +"kurtPopDistinct" +"kurtPopForEach" +"kurtPopIf" +"kurtPopMap" +"kurtPopMerge" +"kurtPopNull" +"kurtPopOrDefault" +"kurtPopOrNull" +"kurtPopResample" +"kurtPopSimpleState" +"kurtPopState" +"kurtSamp" +"kurtSampArgMax" +"kurtSampArgMin" +"kurtSampArray" +"kurtSampDistinct" +"kurtSampForEach" +"kurtSampIf" +"kurtSampMap" +"kurtSampMerge" +"kurtSampNull" +"kurtSampOrDefault" +"kurtSampOrNull" +"kurtSampResample" +"kurtSampSimpleState" +"kurtSampState" +"lagInFrame" +"lagInFrameArgMax" +"lagInFrameArgMin" +"lagInFrameArray" +"lagInFrameDistinct" +"lagInFrameForEach" +"lagInFrameIf" +"lagInFrameMap" +"lagInFrameMerge" +"lagInFrameNull" +"lagInFrameOrDefault" +"lagInFrameOrNull" +"lagInFrameResample" +"lagInFrameSimpleState" +"lagInFrameState" +"largestTriangleThreeBuckets" +"largestTriangleThreeBucketsArgMax" +"largestTriangleThreeBucketsArgMin" +"largestTriangleThreeBucketsArray" +"largestTriangleThreeBucketsDistinct" +"largestTriangleThreeBucketsForEach" +"largestTriangleThreeBucketsIf" +"largestTriangleThreeBucketsMap" +"largestTriangleThreeBucketsMerge" +"largestTriangleThreeBucketsNull" +"largestTriangleThreeBucketsOrDefault" +"largestTriangleThreeBucketsOrNull" +"largestTriangleThreeBucketsResample" +"largestTriangleThreeBucketsSimpleState" +"largestTriangleThreeBucketsState" +"last_value" +"last_valueArgMax" +"last_valueArgMin" +"last_valueArray" +"last_valueDistinct" +"last_valueForEach" +"last_valueIf" +"last_valueMap" +"last_valueMerge" +"last_valueNull" +"last_valueOrDefault" +"last_valueOrNull" +"last_valueResample" +"last_valueSimpleState" +"last_valueState" +"last_value_respect_nulls" +"last_value_respect_nullsArgMax" +"last_value_respect_nullsArgMin" +"last_value_respect_nullsArray" +"last_value_respect_nullsDistinct" +"last_value_respect_nullsForEach" +"last_value_respect_nullsIf" +"last_value_respect_nullsMap" +"last_value_respect_nullsMerge" +"last_value_respect_nullsNull" +"last_value_respect_nullsOrDefault" +"last_value_respect_nullsOrNull" +"last_value_respect_nullsResample" +"last_value_respect_nullsSimpleState" +"last_value_respect_nullsState" +"lcase" +"lcm" +"leadInFrame" +"leadInFrameArgMax" +"leadInFrameArgMin" +"leadInFrameArray" +"leadInFrameDistinct" +"leadInFrameForEach" +"leadInFrameIf" +"leadInFrameMap" +"leadInFrameMerge" +"leadInFrameNull" +"leadInFrameOrDefault" +"leadInFrameOrNull" +"leadInFrameResample" +"leadInFrameSimpleState" +"leadInFrameState" +"least" +"left" +"leftPad" +"leftPadUTF8" +"leftUTF8" +"lemmatize" +"length" +"lengthUTF8" +"less" +"lessOrEquals" +"levenshteinDistance" +"levenshteinDistanceUTF8" +"lgamma" +"like" +"ln" +"locate" +"log" +"log10" +"log1p" +"log2" +"logTrace" +"lowCardinalityIndices" +"lowCardinalityKeys" +"lower" +"lowerUTF8" +"lpad" +"ltrim" +"lttb" +"lttbArgMax" +"lttbArgMin" +"lttbArray" +"lttbDistinct" +"lttbForEach" +"lttbIf" +"lttbMap" +"lttbMerge" +"lttbNull" +"lttbOrDefault" +"lttbOrNull" +"lttbResample" +"lttbSimpleState" +"lttbState" +"makeDate" +"makeDate32" +"makeDateTime" +"makeDateTime64" +"mannWhitneyUTest" +"mannWhitneyUTestArgMax" +"mannWhitneyUTestArgMin" +"mannWhitneyUTestArray" +"mannWhitneyUTestDistinct" +"mannWhitneyUTestForEach" +"mannWhitneyUTestIf" +"mannWhitneyUTestMap" +"mannWhitneyUTestMerge" +"mannWhitneyUTestNull" +"mannWhitneyUTestOrDefault" +"mannWhitneyUTestOrNull" +"mannWhitneyUTestResample" +"mannWhitneyUTestSimpleState" +"mannWhitneyUTestState" +"map" +"mapAdd" +"mapAll" +"mapApply" +"mapConcat" +"mapContains" +"mapContainsKeyLike" +"mapExists" +"mapExtractKeyLike" +"mapFilter" +"mapFromArrays" +"mapFromString" +"mapKeys" +"mapPartialReverseSort" +"mapPartialSort" +"mapPopulateSeries" +"mapReverseSort" +"mapSort" +"mapSubtract" +"mapUpdate" +"mapValues" +"match" +"materialize" +"max" +"max2" +"maxArgMax" +"maxArgMin" +"maxArray" +"maxDistinct" +"maxForEach" +"maxIf" +"maxIntersections" +"maxIntersectionsArgMax" +"maxIntersectionsArgMin" +"maxIntersectionsArray" +"maxIntersectionsDistinct" +"maxIntersectionsForEach" +"maxIntersectionsIf" +"maxIntersectionsMap" +"maxIntersectionsMerge" +"maxIntersectionsNull" +"maxIntersectionsOrDefault" +"maxIntersectionsOrNull" +"maxIntersectionsPosition" +"maxIntersectionsPositionArgMax" +"maxIntersectionsPositionArgMin" +"maxIntersectionsPositionArray" +"maxIntersectionsPositionDistinct" +"maxIntersectionsPositionForEach" +"maxIntersectionsPositionIf" +"maxIntersectionsPositionMap" +"maxIntersectionsPositionMerge" +"maxIntersectionsPositionNull" +"maxIntersectionsPositionOrDefault" +"maxIntersectionsPositionOrNull" +"maxIntersectionsPositionResample" +"maxIntersectionsPositionSimpleState" +"maxIntersectionsPositionState" +"maxIntersectionsResample" +"maxIntersectionsSimpleState" +"maxIntersectionsState" +"maxMap" +"maxMappedArrays" +"maxMappedArraysArgMax" +"maxMappedArraysArgMin" +"maxMappedArraysArray" +"maxMappedArraysDistinct" +"maxMappedArraysForEach" +"maxMappedArraysIf" +"maxMappedArraysMap" +"maxMappedArraysMerge" +"maxMappedArraysNull" +"maxMappedArraysOrDefault" +"maxMappedArraysOrNull" +"maxMappedArraysResample" +"maxMappedArraysSimpleState" +"maxMappedArraysState" +"maxMerge" +"maxNull" +"maxOrDefault" +"maxOrNull" +"maxResample" +"maxSimpleState" +"maxState" +"meanZTest" +"meanZTestArgMax" +"meanZTestArgMin" +"meanZTestArray" +"meanZTestDistinct" +"meanZTestForEach" +"meanZTestIf" +"meanZTestMap" +"meanZTestMerge" +"meanZTestNull" +"meanZTestOrDefault" +"meanZTestOrNull" +"meanZTestResample" +"meanZTestSimpleState" +"meanZTestState" +"median" +"medianArgMax" +"medianArgMin" +"medianArray" +"medianBFloat16" +"medianBFloat16ArgMax" +"medianBFloat16ArgMin" +"medianBFloat16Array" +"medianBFloat16Distinct" +"medianBFloat16ForEach" +"medianBFloat16If" +"medianBFloat16Map" +"medianBFloat16Merge" +"medianBFloat16Null" +"medianBFloat16OrDefault" +"medianBFloat16OrNull" +"medianBFloat16Resample" +"medianBFloat16SimpleState" +"medianBFloat16State" +"medianBFloat16Weighted" +"medianBFloat16WeightedArgMax" +"medianBFloat16WeightedArgMin" +"medianBFloat16WeightedArray" +"medianBFloat16WeightedDistinct" +"medianBFloat16WeightedForEach" +"medianBFloat16WeightedIf" +"medianBFloat16WeightedMap" +"medianBFloat16WeightedMerge" +"medianBFloat16WeightedNull" +"medianBFloat16WeightedOrDefault" +"medianBFloat16WeightedOrNull" +"medianBFloat16WeightedResample" +"medianBFloat16WeightedSimpleState" +"medianBFloat16WeightedState" +"medianDD" +"medianDDArgMax" +"medianDDArgMin" +"medianDDArray" +"medianDDDistinct" +"medianDDForEach" +"medianDDIf" +"medianDDMap" +"medianDDMerge" +"medianDDNull" +"medianDDOrDefault" +"medianDDOrNull" +"medianDDResample" +"medianDDSimpleState" +"medianDDState" +"medianDeterministic" +"medianDeterministicArgMax" +"medianDeterministicArgMin" +"medianDeterministicArray" +"medianDeterministicDistinct" +"medianDeterministicForEach" +"medianDeterministicIf" +"medianDeterministicMap" +"medianDeterministicMerge" +"medianDeterministicNull" +"medianDeterministicOrDefault" +"medianDeterministicOrNull" +"medianDeterministicResample" +"medianDeterministicSimpleState" +"medianDeterministicState" +"medianDistinct" +"medianExact" +"medianExactArgMax" +"medianExactArgMin" +"medianExactArray" +"medianExactDistinct" +"medianExactForEach" +"medianExactHigh" +"medianExactHighArgMax" +"medianExactHighArgMin" +"medianExactHighArray" +"medianExactHighDistinct" +"medianExactHighForEach" +"medianExactHighIf" +"medianExactHighMap" +"medianExactHighMerge" +"medianExactHighNull" +"medianExactHighOrDefault" +"medianExactHighOrNull" +"medianExactHighResample" +"medianExactHighSimpleState" +"medianExactHighState" +"medianExactIf" +"medianExactLow" +"medianExactLowArgMax" +"medianExactLowArgMin" +"medianExactLowArray" +"medianExactLowDistinct" +"medianExactLowForEach" +"medianExactLowIf" +"medianExactLowMap" +"medianExactLowMerge" +"medianExactLowNull" +"medianExactLowOrDefault" +"medianExactLowOrNull" +"medianExactLowResample" +"medianExactLowSimpleState" +"medianExactLowState" +"medianExactMap" +"medianExactMerge" +"medianExactNull" +"medianExactOrDefault" +"medianExactOrNull" +"medianExactResample" +"medianExactSimpleState" +"medianExactState" +"medianExactWeighted" +"medianExactWeightedArgMax" +"medianExactWeightedArgMin" +"medianExactWeightedArray" +"medianExactWeightedDistinct" +"medianExactWeightedForEach" +"medianExactWeightedIf" +"medianExactWeightedMap" +"medianExactWeightedMerge" +"medianExactWeightedNull" +"medianExactWeightedOrDefault" +"medianExactWeightedOrNull" +"medianExactWeightedResample" +"medianExactWeightedSimpleState" +"medianExactWeightedState" +"medianForEach" +"medianGK" +"medianGKArgMax" +"medianGKArgMin" +"medianGKArray" +"medianGKDistinct" +"medianGKForEach" +"medianGKIf" +"medianGKMap" +"medianGKMerge" +"medianGKNull" +"medianGKOrDefault" +"medianGKOrNull" +"medianGKResample" +"medianGKSimpleState" +"medianGKState" +"medianIf" +"medianInterpolatedWeighted" +"medianInterpolatedWeightedArgMax" +"medianInterpolatedWeightedArgMin" +"medianInterpolatedWeightedArray" +"medianInterpolatedWeightedDistinct" +"medianInterpolatedWeightedForEach" +"medianInterpolatedWeightedIf" +"medianInterpolatedWeightedMap" +"medianInterpolatedWeightedMerge" +"medianInterpolatedWeightedNull" +"medianInterpolatedWeightedOrDefault" +"medianInterpolatedWeightedOrNull" +"medianInterpolatedWeightedResample" +"medianInterpolatedWeightedSimpleState" +"medianInterpolatedWeightedState" +"medianMap" +"medianMerge" +"medianNull" +"medianOrDefault" +"medianOrNull" +"medianResample" +"medianSimpleState" +"medianState" +"medianTDigest" +"medianTDigestArgMax" +"medianTDigestArgMin" +"medianTDigestArray" +"medianTDigestDistinct" +"medianTDigestForEach" +"medianTDigestIf" +"medianTDigestMap" +"medianTDigestMerge" +"medianTDigestNull" +"medianTDigestOrDefault" +"medianTDigestOrNull" +"medianTDigestResample" +"medianTDigestSimpleState" +"medianTDigestState" +"medianTDigestWeighted" +"medianTDigestWeightedArgMax" +"medianTDigestWeightedArgMin" +"medianTDigestWeightedArray" +"medianTDigestWeightedDistinct" +"medianTDigestWeightedForEach" +"medianTDigestWeightedIf" +"medianTDigestWeightedMap" +"medianTDigestWeightedMerge" +"medianTDigestWeightedNull" +"medianTDigestWeightedOrDefault" +"medianTDigestWeightedOrNull" +"medianTDigestWeightedResample" +"medianTDigestWeightedSimpleState" +"medianTDigestWeightedState" +"medianTiming" +"medianTimingArgMax" +"medianTimingArgMin" +"medianTimingArray" +"medianTimingDistinct" +"medianTimingForEach" +"medianTimingIf" +"medianTimingMap" +"medianTimingMerge" +"medianTimingNull" +"medianTimingOrDefault" +"medianTimingOrNull" +"medianTimingResample" +"medianTimingSimpleState" +"medianTimingState" +"medianTimingWeighted" +"medianTimingWeightedArgMax" +"medianTimingWeightedArgMin" +"medianTimingWeightedArray" +"medianTimingWeightedDistinct" +"medianTimingWeightedForEach" +"medianTimingWeightedIf" +"medianTimingWeightedMap" +"medianTimingWeightedMerge" +"medianTimingWeightedNull" +"medianTimingWeightedOrDefault" +"medianTimingWeightedOrNull" +"medianTimingWeightedResample" +"medianTimingWeightedSimpleState" +"medianTimingWeightedState" +"metroHash64" +"mid" +"min" +"min2" +"minArgMax" +"minArgMin" +"minArray" +"minDistinct" +"minForEach" +"minIf" +"minMap" +"minMappedArrays" +"minMappedArraysArgMax" +"minMappedArraysArgMin" +"minMappedArraysArray" +"minMappedArraysDistinct" +"minMappedArraysForEach" +"minMappedArraysIf" +"minMappedArraysMap" +"minMappedArraysMerge" +"minMappedArraysNull" +"minMappedArraysOrDefault" +"minMappedArraysOrNull" +"minMappedArraysResample" +"minMappedArraysSimpleState" +"minMappedArraysState" +"minMerge" +"minNull" +"minOrDefault" +"minOrNull" +"minResample" +"minSampleSizeContinous" +"minSampleSizeContinuous" +"minSampleSizeConversion" +"minSimpleState" +"minState" +"minus" +"mismatches" +"mod" +"modulo" +"moduloLegacy" +"moduloOrZero" +"monthName" +"mortonDecode" +"mortonEncode" +"multiFuzzyMatchAllIndices" +"multiFuzzyMatchAny" +"multiFuzzyMatchAnyIndex" +"multiIf" +"multiMatchAllIndices" +"multiMatchAny" +"multiMatchAnyIndex" +"multiSearchAllPositions" +"multiSearchAllPositionsCaseInsensitive" +"multiSearchAllPositionsCaseInsensitiveUTF8" +"multiSearchAllPositionsUTF8" +"multiSearchAny" +"multiSearchAnyCaseInsensitive" +"multiSearchAnyCaseInsensitiveUTF8" +"multiSearchAnyUTF8" +"multiSearchFirstIndex" +"multiSearchFirstIndexCaseInsensitive" +"multiSearchFirstIndexCaseInsensitiveUTF8" +"multiSearchFirstIndexUTF8" +"multiSearchFirstPosition" +"multiSearchFirstPositionCaseInsensitive" +"multiSearchFirstPositionCaseInsensitiveUTF8" +"multiSearchFirstPositionUTF8" +"multiply" +"multiplyDecimal" +"murmurHash2_32" +"murmurHash2_64" +"murmurHash3_128" +"murmurHash3_32" +"murmurHash3_64" +"negate" +"neighbor" +"nested" +"netloc" +"ngramDistance" +"ngramDistanceCaseInsensitive" +"ngramDistanceCaseInsensitiveUTF8" +"ngramDistanceUTF8" +"ngramMinHash" +"ngramMinHashArg" +"ngramMinHashArgCaseInsensitive" +"ngramMinHashArgCaseInsensitiveUTF8" +"ngramMinHashArgUTF8" +"ngramMinHashCaseInsensitive" +"ngramMinHashCaseInsensitiveUTF8" +"ngramMinHashUTF8" +"ngramSearch" +"ngramSearchCaseInsensitive" +"ngramSearchCaseInsensitiveUTF8" +"ngramSearchUTF8" +"ngramSimHash" +"ngramSimHashCaseInsensitive" +"ngramSimHashCaseInsensitiveUTF8" +"ngramSimHashUTF8" +"ngrams" +"nonNegativeDerivative" +"nonNegativeDerivativeArgMax" +"nonNegativeDerivativeArgMin" +"nonNegativeDerivativeArray" +"nonNegativeDerivativeDistinct" +"nonNegativeDerivativeForEach" +"nonNegativeDerivativeIf" +"nonNegativeDerivativeMap" +"nonNegativeDerivativeMerge" +"nonNegativeDerivativeNull" +"nonNegativeDerivativeOrDefault" +"nonNegativeDerivativeOrNull" +"nonNegativeDerivativeResample" +"nonNegativeDerivativeSimpleState" +"nonNegativeDerivativeState" +"normL1" +"normL2" +"normL2Squared" +"normLinf" +"normLp" +"normalizeL1" +"normalizeL2" +"normalizeLinf" +"normalizeLp" +"normalizeQuery" +"normalizeQueryKeepNames" +"normalizeUTF8NFC" +"normalizeUTF8NFD" +"normalizeUTF8NFKC" +"normalizeUTF8NFKD" +"normalizedQueryHash" +"normalizedQueryHashKeepNames" +"not" +"notEmpty" +"notEquals" +"notILike" +"notIn" +"notInIgnoreSet" +"notLike" +"notNullIn" +"notNullInIgnoreSet" +"nothing" +"nothingArgMax" +"nothingArgMin" +"nothingArray" +"nothingDistinct" +"nothingForEach" +"nothingIf" +"nothingMap" +"nothingMerge" +"nothingNull" +"nothingNull" +"nothingNullArgMax" +"nothingNullArgMin" +"nothingNullArray" +"nothingNullDistinct" +"nothingNullForEach" +"nothingNullIf" +"nothingNullMap" +"nothingNullMerge" +"nothingNullNull" +"nothingNullOrDefault" +"nothingNullOrNull" +"nothingNullResample" +"nothingNullSimpleState" +"nothingNullState" +"nothingOrDefault" +"nothingOrNull" +"nothingResample" +"nothingSimpleState" +"nothingState" +"nothingUInt64" +"nothingUInt64ArgMax" +"nothingUInt64ArgMin" +"nothingUInt64Array" +"nothingUInt64Distinct" +"nothingUInt64ForEach" +"nothingUInt64If" +"nothingUInt64Map" +"nothingUInt64Merge" +"nothingUInt64Null" +"nothingUInt64OrDefault" +"nothingUInt64OrNull" +"nothingUInt64Resample" +"nothingUInt64SimpleState" +"nothingUInt64State" +"now" +"now64" +"nowInBlock" +"nth_value" +"nth_valueArgMax" +"nth_valueArgMin" +"nth_valueArray" +"nth_valueDistinct" +"nth_valueForEach" +"nth_valueIf" +"nth_valueMap" +"nth_valueMerge" +"nth_valueNull" +"nth_valueOrDefault" +"nth_valueOrNull" +"nth_valueResample" +"nth_valueSimpleState" +"nth_valueState" +"ntile" +"ntileArgMax" +"ntileArgMin" +"ntileArray" +"ntileDistinct" +"ntileForEach" +"ntileIf" +"ntileMap" +"ntileMerge" +"ntileNull" +"ntileOrDefault" +"ntileOrNull" +"ntileResample" +"ntileSimpleState" +"ntileState" +"nullIf" +"nullIn" +"nullInIgnoreSet" +"or" +"parseDateTime" +"parseDateTime32BestEffort" +"parseDateTime32BestEffortOrNull" +"parseDateTime32BestEffortOrZero" +"parseDateTime64BestEffort" +"parseDateTime64BestEffortOrNull" +"parseDateTime64BestEffortOrZero" +"parseDateTime64BestEffortUS" +"parseDateTime64BestEffortUSOrNull" +"parseDateTime64BestEffortUSOrZero" +"parseDateTimeBestEffort" +"parseDateTimeBestEffortOrNull" +"parseDateTimeBestEffortOrZero" +"parseDateTimeBestEffortUS" +"parseDateTimeBestEffortUSOrNull" +"parseDateTimeBestEffortUSOrZero" +"parseDateTimeInJodaSyntax" +"parseDateTimeInJodaSyntaxOrNull" +"parseDateTimeInJodaSyntaxOrZero" +"parseDateTimeOrNull" +"parseDateTimeOrZero" +"parseReadableSize" +"parseReadableSizeOrNull" +"parseReadableSizeOrZero" +"parseTimeDelta" +"partitionID" +"partitionId" +"path" +"pathFull" +"percentRank" +"percentRankArgMax" +"percentRankArgMin" +"percentRankArray" +"percentRankDistinct" +"percentRankForEach" +"percentRankIf" +"percentRankMap" +"percentRankMerge" +"percentRankNull" +"percentRankOrDefault" +"percentRankOrNull" +"percentRankResample" +"percentRankSimpleState" +"percentRankState" +"percent_rank" +"percent_rankArgMax" +"percent_rankArgMin" +"percent_rankArray" +"percent_rankDistinct" +"percent_rankForEach" +"percent_rankIf" +"percent_rankMap" +"percent_rankMerge" +"percent_rankNull" +"percent_rankOrDefault" +"percent_rankOrNull" +"percent_rankResample" +"percent_rankSimpleState" +"percent_rankState" +"pi" +"plus" +"pmod" +"pointInEllipses" +"pointInPolygon" +"polygonAreaCartesian" +"polygonAreaSpherical" +"polygonConvexHullCartesian" +"polygonPerimeterCartesian" +"polygonPerimeterSpherical" +"polygonsDistanceCartesian" +"polygonsDistanceSpherical" +"polygonsEqualsCartesian" +"polygonsIntersectionCartesian" +"polygonsIntersectionSpherical" +"polygonsSymDifferenceCartesian" +"polygonsSymDifferenceSpherical" +"polygonsUnionCartesian" +"polygonsUnionSpherical" +"polygonsWithinCartesian" +"polygonsWithinSpherical" +"port" +"portRFC" +"position" +"positionCaseInsensitive" +"positionCaseInsensitiveUTF8" +"positionUTF8" +"positiveModulo" +"positive_modulo" +"pow" +"power" +"printf" +"proportionsZTest" +"protocol" +"punycodeDecode" +"punycodeEncode" +"quantile" +"quantileArgMax" +"quantileArgMin" +"quantileArray" +"quantileBFloat16" +"quantileBFloat16ArgMax" +"quantileBFloat16ArgMin" +"quantileBFloat16Array" +"quantileBFloat16Distinct" +"quantileBFloat16ForEach" +"quantileBFloat16If" +"quantileBFloat16Map" +"quantileBFloat16Merge" +"quantileBFloat16Null" +"quantileBFloat16OrDefault" +"quantileBFloat16OrNull" +"quantileBFloat16Resample" +"quantileBFloat16SimpleState" +"quantileBFloat16State" +"quantileBFloat16Weighted" +"quantileBFloat16WeightedArgMax" +"quantileBFloat16WeightedArgMin" +"quantileBFloat16WeightedArray" +"quantileBFloat16WeightedDistinct" +"quantileBFloat16WeightedForEach" +"quantileBFloat16WeightedIf" +"quantileBFloat16WeightedMap" +"quantileBFloat16WeightedMerge" +"quantileBFloat16WeightedNull" +"quantileBFloat16WeightedOrDefault" +"quantileBFloat16WeightedOrNull" +"quantileBFloat16WeightedResample" +"quantileBFloat16WeightedSimpleState" +"quantileBFloat16WeightedState" +"quantileDD" +"quantileDDArgMax" +"quantileDDArgMin" +"quantileDDArray" +"quantileDDDistinct" +"quantileDDForEach" +"quantileDDIf" +"quantileDDMap" +"quantileDDMerge" +"quantileDDNull" +"quantileDDOrDefault" +"quantileDDOrNull" +"quantileDDResample" +"quantileDDSimpleState" +"quantileDDState" +"quantileDeterministic" +"quantileDeterministicArgMax" +"quantileDeterministicArgMin" +"quantileDeterministicArray" +"quantileDeterministicDistinct" +"quantileDeterministicForEach" +"quantileDeterministicIf" +"quantileDeterministicMap" +"quantileDeterministicMerge" +"quantileDeterministicNull" +"quantileDeterministicOrDefault" +"quantileDeterministicOrNull" +"quantileDeterministicResample" +"quantileDeterministicSimpleState" +"quantileDeterministicState" +"quantileDistinct" +"quantileExact" +"quantileExactArgMax" +"quantileExactArgMin" +"quantileExactArray" +"quantileExactDistinct" +"quantileExactExclusive" +"quantileExactExclusiveArgMax" +"quantileExactExclusiveArgMin" +"quantileExactExclusiveArray" +"quantileExactExclusiveDistinct" +"quantileExactExclusiveForEach" +"quantileExactExclusiveIf" +"quantileExactExclusiveMap" +"quantileExactExclusiveMerge" +"quantileExactExclusiveNull" +"quantileExactExclusiveOrDefault" +"quantileExactExclusiveOrNull" +"quantileExactExclusiveResample" +"quantileExactExclusiveSimpleState" +"quantileExactExclusiveState" +"quantileExactForEach" +"quantileExactHigh" +"quantileExactHighArgMax" +"quantileExactHighArgMin" +"quantileExactHighArray" +"quantileExactHighDistinct" +"quantileExactHighForEach" +"quantileExactHighIf" +"quantileExactHighMap" +"quantileExactHighMerge" +"quantileExactHighNull" +"quantileExactHighOrDefault" +"quantileExactHighOrNull" +"quantileExactHighResample" +"quantileExactHighSimpleState" +"quantileExactHighState" +"quantileExactIf" +"quantileExactInclusive" +"quantileExactInclusiveArgMax" +"quantileExactInclusiveArgMin" +"quantileExactInclusiveArray" +"quantileExactInclusiveDistinct" +"quantileExactInclusiveForEach" +"quantileExactInclusiveIf" +"quantileExactInclusiveMap" +"quantileExactInclusiveMerge" +"quantileExactInclusiveNull" +"quantileExactInclusiveOrDefault" +"quantileExactInclusiveOrNull" +"quantileExactInclusiveResample" +"quantileExactInclusiveSimpleState" +"quantileExactInclusiveState" +"quantileExactLow" +"quantileExactLowArgMax" +"quantileExactLowArgMin" +"quantileExactLowArray" +"quantileExactLowDistinct" +"quantileExactLowForEach" +"quantileExactLowIf" +"quantileExactLowMap" +"quantileExactLowMerge" +"quantileExactLowNull" +"quantileExactLowOrDefault" +"quantileExactLowOrNull" +"quantileExactLowResample" +"quantileExactLowSimpleState" +"quantileExactLowState" +"quantileExactMap" +"quantileExactMerge" +"quantileExactNull" +"quantileExactOrDefault" +"quantileExactOrNull" +"quantileExactResample" +"quantileExactSimpleState" +"quantileExactState" +"quantileExactWeighted" +"quantileExactWeightedArgMax" +"quantileExactWeightedArgMin" +"quantileExactWeightedArray" +"quantileExactWeightedDistinct" +"quantileExactWeightedForEach" +"quantileExactWeightedIf" +"quantileExactWeightedMap" +"quantileExactWeightedMerge" +"quantileExactWeightedNull" +"quantileExactWeightedOrDefault" +"quantileExactWeightedOrNull" +"quantileExactWeightedResample" +"quantileExactWeightedSimpleState" +"quantileExactWeightedState" +"quantileForEach" +"quantileGK" +"quantileGKArgMax" +"quantileGKArgMin" +"quantileGKArray" +"quantileGKDistinct" +"quantileGKForEach" +"quantileGKIf" +"quantileGKMap" +"quantileGKMerge" +"quantileGKNull" +"quantileGKOrDefault" +"quantileGKOrNull" +"quantileGKResample" +"quantileGKSimpleState" +"quantileGKState" +"quantileIf" +"quantileInterpolatedWeighted" +"quantileInterpolatedWeightedArgMax" +"quantileInterpolatedWeightedArgMin" +"quantileInterpolatedWeightedArray" +"quantileInterpolatedWeightedDistinct" +"quantileInterpolatedWeightedForEach" +"quantileInterpolatedWeightedIf" +"quantileInterpolatedWeightedMap" +"quantileInterpolatedWeightedMerge" +"quantileInterpolatedWeightedNull" +"quantileInterpolatedWeightedOrDefault" +"quantileInterpolatedWeightedOrNull" +"quantileInterpolatedWeightedResample" +"quantileInterpolatedWeightedSimpleState" +"quantileInterpolatedWeightedState" +"quantileMap" +"quantileMerge" +"quantileNull" +"quantileOrDefault" +"quantileOrNull" +"quantileResample" +"quantileSimpleState" +"quantileState" +"quantileTDigest" +"quantileTDigestArgMax" +"quantileTDigestArgMin" +"quantileTDigestArray" +"quantileTDigestDistinct" +"quantileTDigestForEach" +"quantileTDigestIf" +"quantileTDigestMap" +"quantileTDigestMerge" +"quantileTDigestNull" +"quantileTDigestOrDefault" +"quantileTDigestOrNull" +"quantileTDigestResample" +"quantileTDigestSimpleState" +"quantileTDigestState" +"quantileTDigestWeighted" +"quantileTDigestWeightedArgMax" +"quantileTDigestWeightedArgMin" +"quantileTDigestWeightedArray" +"quantileTDigestWeightedDistinct" +"quantileTDigestWeightedForEach" +"quantileTDigestWeightedIf" +"quantileTDigestWeightedMap" +"quantileTDigestWeightedMerge" +"quantileTDigestWeightedNull" +"quantileTDigestWeightedOrDefault" +"quantileTDigestWeightedOrNull" +"quantileTDigestWeightedResample" +"quantileTDigestWeightedSimpleState" +"quantileTDigestWeightedState" +"quantileTiming" +"quantileTimingArgMax" +"quantileTimingArgMin" +"quantileTimingArray" +"quantileTimingDistinct" +"quantileTimingForEach" +"quantileTimingIf" +"quantileTimingMap" +"quantileTimingMerge" +"quantileTimingNull" +"quantileTimingOrDefault" +"quantileTimingOrNull" +"quantileTimingResample" +"quantileTimingSimpleState" +"quantileTimingState" +"quantileTimingWeighted" +"quantileTimingWeightedArgMax" +"quantileTimingWeightedArgMin" +"quantileTimingWeightedArray" +"quantileTimingWeightedDistinct" +"quantileTimingWeightedForEach" +"quantileTimingWeightedIf" +"quantileTimingWeightedMap" +"quantileTimingWeightedMerge" +"quantileTimingWeightedNull" +"quantileTimingWeightedOrDefault" +"quantileTimingWeightedOrNull" +"quantileTimingWeightedResample" +"quantileTimingWeightedSimpleState" +"quantileTimingWeightedState" +"quantiles" +"quantilesArgMax" +"quantilesArgMin" +"quantilesArray" +"quantilesBFloat16" +"quantilesBFloat16ArgMax" +"quantilesBFloat16ArgMin" +"quantilesBFloat16Array" +"quantilesBFloat16Distinct" +"quantilesBFloat16ForEach" +"quantilesBFloat16If" +"quantilesBFloat16Map" +"quantilesBFloat16Merge" +"quantilesBFloat16Null" +"quantilesBFloat16OrDefault" +"quantilesBFloat16OrNull" +"quantilesBFloat16Resample" +"quantilesBFloat16SimpleState" +"quantilesBFloat16State" +"quantilesBFloat16Weighted" +"quantilesBFloat16WeightedArgMax" +"quantilesBFloat16WeightedArgMin" +"quantilesBFloat16WeightedArray" +"quantilesBFloat16WeightedDistinct" +"quantilesBFloat16WeightedForEach" +"quantilesBFloat16WeightedIf" +"quantilesBFloat16WeightedMap" +"quantilesBFloat16WeightedMerge" +"quantilesBFloat16WeightedNull" +"quantilesBFloat16WeightedOrDefault" +"quantilesBFloat16WeightedOrNull" +"quantilesBFloat16WeightedResample" +"quantilesBFloat16WeightedSimpleState" +"quantilesBFloat16WeightedState" +"quantilesDD" +"quantilesDDArgMax" +"quantilesDDArgMin" +"quantilesDDArray" +"quantilesDDDistinct" +"quantilesDDForEach" +"quantilesDDIf" +"quantilesDDMap" +"quantilesDDMerge" +"quantilesDDNull" +"quantilesDDOrDefault" +"quantilesDDOrNull" +"quantilesDDResample" +"quantilesDDSimpleState" +"quantilesDDState" +"quantilesDeterministic" +"quantilesDeterministicArgMax" +"quantilesDeterministicArgMin" +"quantilesDeterministicArray" +"quantilesDeterministicDistinct" +"quantilesDeterministicForEach" +"quantilesDeterministicIf" +"quantilesDeterministicMap" +"quantilesDeterministicMerge" +"quantilesDeterministicNull" +"quantilesDeterministicOrDefault" +"quantilesDeterministicOrNull" +"quantilesDeterministicResample" +"quantilesDeterministicSimpleState" +"quantilesDeterministicState" +"quantilesDistinct" +"quantilesExact" +"quantilesExactArgMax" +"quantilesExactArgMin" +"quantilesExactArray" +"quantilesExactDistinct" +"quantilesExactExclusive" +"quantilesExactExclusiveArgMax" +"quantilesExactExclusiveArgMin" +"quantilesExactExclusiveArray" +"quantilesExactExclusiveDistinct" +"quantilesExactExclusiveForEach" +"quantilesExactExclusiveIf" +"quantilesExactExclusiveMap" +"quantilesExactExclusiveMerge" +"quantilesExactExclusiveNull" +"quantilesExactExclusiveOrDefault" +"quantilesExactExclusiveOrNull" +"quantilesExactExclusiveResample" +"quantilesExactExclusiveSimpleState" +"quantilesExactExclusiveState" +"quantilesExactForEach" +"quantilesExactHigh" +"quantilesExactHighArgMax" +"quantilesExactHighArgMin" +"quantilesExactHighArray" +"quantilesExactHighDistinct" +"quantilesExactHighForEach" +"quantilesExactHighIf" +"quantilesExactHighMap" +"quantilesExactHighMerge" +"quantilesExactHighNull" +"quantilesExactHighOrDefault" +"quantilesExactHighOrNull" +"quantilesExactHighResample" +"quantilesExactHighSimpleState" +"quantilesExactHighState" +"quantilesExactIf" +"quantilesExactInclusive" +"quantilesExactInclusiveArgMax" +"quantilesExactInclusiveArgMin" +"quantilesExactInclusiveArray" +"quantilesExactInclusiveDistinct" +"quantilesExactInclusiveForEach" +"quantilesExactInclusiveIf" +"quantilesExactInclusiveMap" +"quantilesExactInclusiveMerge" +"quantilesExactInclusiveNull" +"quantilesExactInclusiveOrDefault" +"quantilesExactInclusiveOrNull" +"quantilesExactInclusiveResample" +"quantilesExactInclusiveSimpleState" +"quantilesExactInclusiveState" +"quantilesExactLow" +"quantilesExactLowArgMax" +"quantilesExactLowArgMin" +"quantilesExactLowArray" +"quantilesExactLowDistinct" +"quantilesExactLowForEach" +"quantilesExactLowIf" +"quantilesExactLowMap" +"quantilesExactLowMerge" +"quantilesExactLowNull" +"quantilesExactLowOrDefault" +"quantilesExactLowOrNull" +"quantilesExactLowResample" +"quantilesExactLowSimpleState" +"quantilesExactLowState" +"quantilesExactMap" +"quantilesExactMerge" +"quantilesExactNull" +"quantilesExactOrDefault" +"quantilesExactOrNull" +"quantilesExactResample" +"quantilesExactSimpleState" +"quantilesExactState" +"quantilesExactWeighted" +"quantilesExactWeightedArgMax" +"quantilesExactWeightedArgMin" +"quantilesExactWeightedArray" +"quantilesExactWeightedDistinct" +"quantilesExactWeightedForEach" +"quantilesExactWeightedIf" +"quantilesExactWeightedMap" +"quantilesExactWeightedMerge" +"quantilesExactWeightedNull" +"quantilesExactWeightedOrDefault" +"quantilesExactWeightedOrNull" +"quantilesExactWeightedResample" +"quantilesExactWeightedSimpleState" +"quantilesExactWeightedState" +"quantilesForEach" +"quantilesGK" +"quantilesGKArgMax" +"quantilesGKArgMin" +"quantilesGKArray" +"quantilesGKDistinct" +"quantilesGKForEach" +"quantilesGKIf" +"quantilesGKMap" +"quantilesGKMerge" +"quantilesGKNull" +"quantilesGKOrDefault" +"quantilesGKOrNull" +"quantilesGKResample" +"quantilesGKSimpleState" +"quantilesGKState" +"quantilesIf" +"quantilesInterpolatedWeighted" +"quantilesInterpolatedWeightedArgMax" +"quantilesInterpolatedWeightedArgMin" +"quantilesInterpolatedWeightedArray" +"quantilesInterpolatedWeightedDistinct" +"quantilesInterpolatedWeightedForEach" +"quantilesInterpolatedWeightedIf" +"quantilesInterpolatedWeightedMap" +"quantilesInterpolatedWeightedMerge" +"quantilesInterpolatedWeightedNull" +"quantilesInterpolatedWeightedOrDefault" +"quantilesInterpolatedWeightedOrNull" +"quantilesInterpolatedWeightedResample" +"quantilesInterpolatedWeightedSimpleState" +"quantilesInterpolatedWeightedState" +"quantilesMap" +"quantilesMerge" +"quantilesNull" +"quantilesOrDefault" +"quantilesOrNull" +"quantilesResample" +"quantilesSimpleState" +"quantilesState" +"quantilesTDigest" +"quantilesTDigestArgMax" +"quantilesTDigestArgMin" +"quantilesTDigestArray" +"quantilesTDigestDistinct" +"quantilesTDigestForEach" +"quantilesTDigestIf" +"quantilesTDigestMap" +"quantilesTDigestMerge" +"quantilesTDigestNull" +"quantilesTDigestOrDefault" +"quantilesTDigestOrNull" +"quantilesTDigestResample" +"quantilesTDigestSimpleState" +"quantilesTDigestState" +"quantilesTDigestWeighted" +"quantilesTDigestWeightedArgMax" +"quantilesTDigestWeightedArgMin" +"quantilesTDigestWeightedArray" +"quantilesTDigestWeightedDistinct" +"quantilesTDigestWeightedForEach" +"quantilesTDigestWeightedIf" +"quantilesTDigestWeightedMap" +"quantilesTDigestWeightedMerge" +"quantilesTDigestWeightedNull" +"quantilesTDigestWeightedOrDefault" +"quantilesTDigestWeightedOrNull" +"quantilesTDigestWeightedResample" +"quantilesTDigestWeightedSimpleState" +"quantilesTDigestWeightedState" +"quantilesTiming" +"quantilesTimingArgMax" +"quantilesTimingArgMin" +"quantilesTimingArray" +"quantilesTimingDistinct" +"quantilesTimingForEach" +"quantilesTimingIf" +"quantilesTimingMap" +"quantilesTimingMerge" +"quantilesTimingNull" +"quantilesTimingOrDefault" +"quantilesTimingOrNull" +"quantilesTimingResample" +"quantilesTimingSimpleState" +"quantilesTimingState" +"quantilesTimingWeighted" +"quantilesTimingWeightedArgMax" +"quantilesTimingWeightedArgMin" +"quantilesTimingWeightedArray" +"quantilesTimingWeightedDistinct" +"quantilesTimingWeightedForEach" +"quantilesTimingWeightedIf" +"quantilesTimingWeightedMap" +"quantilesTimingWeightedMerge" +"quantilesTimingWeightedNull" +"quantilesTimingWeightedOrDefault" +"quantilesTimingWeightedOrNull" +"quantilesTimingWeightedResample" +"quantilesTimingWeightedSimpleState" +"quantilesTimingWeightedState" +"queryID" +"queryString" +"queryStringAndFragment" +"query_id" +"radians" +"rand" +"rand32" +"rand64" +"randBernoulli" +"randBinomial" +"randCanonical" +"randChiSquared" +"randConstant" +"randExponential" +"randFisherF" +"randLogNormal" +"randNegativeBinomial" +"randNormal" +"randPoisson" +"randStudentT" +"randUniform" +"randomFixedString" +"randomPrintableASCII" +"randomString" +"randomStringUTF8" +"range" +"rank" +"rankArgMax" +"rankArgMin" +"rankArray" +"rankCorr" +"rankCorrArgMax" +"rankCorrArgMin" +"rankCorrArray" +"rankCorrDistinct" +"rankCorrForEach" +"rankCorrIf" +"rankCorrMap" +"rankCorrMerge" +"rankCorrNull" +"rankCorrOrDefault" +"rankCorrOrNull" +"rankCorrResample" +"rankCorrSimpleState" +"rankCorrState" +"rankDistinct" +"rankForEach" +"rankIf" +"rankMap" +"rankMerge" +"rankNull" +"rankOrDefault" +"rankOrNull" +"rankResample" +"rankSimpleState" +"rankState" +"readWKTLineString" +"readWKTMultiLineString" +"readWKTMultiPolygon" +"readWKTPoint" +"readWKTPolygon" +"readWKTRing" +"regexpExtract" +"regexpQuoteMeta" +"regionHierarchy" +"regionIn" +"regionToArea" +"regionToCity" +"regionToContinent" +"regionToCountry" +"regionToDistrict" +"regionToName" +"regionToPopulation" +"regionToTopContinent" +"reinterpret" +"reinterpretAsDate" +"reinterpretAsDateTime" +"reinterpretAsFixedString" +"reinterpretAsFloat32" +"reinterpretAsFloat64" +"reinterpretAsInt128" +"reinterpretAsInt16" +"reinterpretAsInt256" +"reinterpretAsInt32" +"reinterpretAsInt64" +"reinterpretAsInt8" +"reinterpretAsString" +"reinterpretAsUInt128" +"reinterpretAsUInt16" +"reinterpretAsUInt256" +"reinterpretAsUInt32" +"reinterpretAsUInt64" +"reinterpretAsUInt8" +"reinterpretAsUUID" +"repeat" +"replace" +"replaceAll" +"replaceOne" +"replaceRegexpAll" +"replaceRegexpOne" +"replicate" +"retention" +"retentionArgMax" +"retentionArgMin" +"retentionArray" +"retentionDistinct" +"retentionForEach" +"retentionIf" +"retentionMap" +"retentionMerge" +"retentionNull" +"retentionOrDefault" +"retentionOrNull" +"retentionResample" +"retentionSimpleState" +"retentionState" +"reverse" +"reverseUTF8" +"revision" +"right" +"rightPad" +"rightPadUTF8" +"rightUTF8" +"round" +"roundAge" +"roundBankers" +"roundDown" +"roundDuration" +"roundToExp2" +"rowNumberInAllBlocks" +"rowNumberInBlock" +"row_number" +"row_numberArgMax" +"row_numberArgMin" +"row_numberArray" +"row_numberDistinct" +"row_numberForEach" +"row_numberIf" +"row_numberMap" +"row_numberMerge" +"row_numberNull" +"row_numberOrDefault" +"row_numberOrNull" +"row_numberResample" +"row_numberSimpleState" +"row_numberState" +"rpad" +"rtrim" +"runningAccumulate" +"runningConcurrency" +"runningDifference" +"runningDifferenceStartingWithFirstValue" +"s2CapContains" +"s2CapUnion" +"s2CellsIntersect" +"s2GetNeighbors" +"s2RectAdd" +"s2RectContains" +"s2RectIntersection" +"s2RectUnion" +"s2ToGeo" +"scalarProduct" +"sequenceCount" +"sequenceCountArgMax" +"sequenceCountArgMin" +"sequenceCountArray" +"sequenceCountDistinct" +"sequenceCountForEach" +"sequenceCountIf" +"sequenceCountMap" +"sequenceCountMerge" +"sequenceCountNull" +"sequenceCountOrDefault" +"sequenceCountOrNull" +"sequenceCountResample" +"sequenceCountSimpleState" +"sequenceCountState" +"sequenceMatch" +"sequenceMatchArgMax" +"sequenceMatchArgMin" +"sequenceMatchArray" +"sequenceMatchDistinct" +"sequenceMatchForEach" +"sequenceMatchIf" +"sequenceMatchMap" +"sequenceMatchMerge" +"sequenceMatchNull" +"sequenceMatchOrDefault" +"sequenceMatchOrNull" +"sequenceMatchResample" +"sequenceMatchSimpleState" +"sequenceMatchState" +"sequenceNextNode" +"sequenceNextNodeArgMax" +"sequenceNextNodeArgMin" +"sequenceNextNodeArray" +"sequenceNextNodeDistinct" +"sequenceNextNodeForEach" +"sequenceNextNodeIf" +"sequenceNextNodeMap" +"sequenceNextNodeMerge" +"sequenceNextNodeNull" +"sequenceNextNodeOrDefault" +"sequenceNextNodeOrNull" +"sequenceNextNodeResample" +"sequenceNextNodeSimpleState" +"sequenceNextNodeState" +"seriesDecomposeSTL" +"seriesOutliersDetectTukey" +"seriesPeriodDetectFFT" +"serverTimeZone" +"serverTimezone" +"serverUUID" +"shardCount" +"shardNum" +"showCertificate" +"sigmoid" +"sign" +"simpleJSONExtractBool" +"simpleJSONExtractFloat" +"simpleJSONExtractInt" +"simpleJSONExtractRaw" +"simpleJSONExtractString" +"simpleJSONExtractUInt" +"simpleJSONHas" +"simpleLinearRegression" +"simpleLinearRegressionArgMax" +"simpleLinearRegressionArgMin" +"simpleLinearRegressionArray" +"simpleLinearRegressionDistinct" +"simpleLinearRegressionForEach" +"simpleLinearRegressionIf" +"simpleLinearRegressionMap" +"simpleLinearRegressionMerge" +"simpleLinearRegressionNull" +"simpleLinearRegressionOrDefault" +"simpleLinearRegressionOrNull" +"simpleLinearRegressionResample" +"simpleLinearRegressionSimpleState" +"simpleLinearRegressionState" +"sin" +"singleValueOrNull" +"singleValueOrNullArgMax" +"singleValueOrNullArgMin" +"singleValueOrNullArray" +"singleValueOrNullDistinct" +"singleValueOrNullForEach" +"singleValueOrNullIf" +"singleValueOrNullMap" +"singleValueOrNullMerge" +"singleValueOrNullNull" +"singleValueOrNullOrDefault" +"singleValueOrNullOrNull" +"singleValueOrNullResample" +"singleValueOrNullSimpleState" +"singleValueOrNullState" +"sinh" +"sipHash128" +"sipHash128Keyed" +"sipHash128Reference" +"sipHash128ReferenceKeyed" +"sipHash64" +"sipHash64Keyed" +"skewPop" +"skewPopArgMax" +"skewPopArgMin" +"skewPopArray" +"skewPopDistinct" +"skewPopForEach" +"skewPopIf" +"skewPopMap" +"skewPopMerge" +"skewPopNull" +"skewPopOrDefault" +"skewPopOrNull" +"skewPopResample" +"skewPopSimpleState" +"skewPopState" +"skewSamp" +"skewSampArgMax" +"skewSampArgMin" +"skewSampArray" +"skewSampDistinct" +"skewSampForEach" +"skewSampIf" +"skewSampMap" +"skewSampMerge" +"skewSampNull" +"skewSampOrDefault" +"skewSampOrNull" +"skewSampResample" +"skewSampSimpleState" +"skewSampState" +"sleep" +"sleepEachRow" +"snowflakeIDToDateTime" +"snowflakeIDToDateTime64" +"snowflakeToDateTime" +"snowflakeToDateTime64" +"soundex" +"space" +"sparkBar" +"sparkBarArgMax" +"sparkBarArgMin" +"sparkBarArray" +"sparkBarDistinct" +"sparkBarForEach" +"sparkBarIf" +"sparkBarMap" +"sparkBarMerge" +"sparkBarNull" +"sparkBarOrDefault" +"sparkBarOrNull" +"sparkBarResample" +"sparkBarSimpleState" +"sparkBarState" +"sparkbar" +"sparkbarArgMax" +"sparkbarArgMin" +"sparkbarArray" +"sparkbarDistinct" +"sparkbarForEach" +"sparkbarIf" +"sparkbarMap" +"sparkbarMerge" +"sparkbarNull" +"sparkbarOrDefault" +"sparkbarOrNull" +"sparkbarResample" +"sparkbarSimpleState" +"sparkbarState" +"splitByAlpha" +"splitByChar" +"splitByNonAlpha" +"splitByRegexp" +"splitByString" +"splitByWhitespace" +"sqid" +"sqidDecode" +"sqidEncode" +"sqrt" +"startsWith" +"startsWithUTF8" +"stddevPop" +"stddevPopArgMax" +"stddevPopArgMin" +"stddevPopArray" +"stddevPopDistinct" +"stddevPopForEach" +"stddevPopIf" +"stddevPopMap" +"stddevPopMerge" +"stddevPopNull" +"stddevPopOrDefault" +"stddevPopOrNull" +"stddevPopResample" +"stddevPopSimpleState" +"stddevPopStable" +"stddevPopStableArgMax" +"stddevPopStableArgMin" +"stddevPopStableArray" +"stddevPopStableDistinct" +"stddevPopStableForEach" +"stddevPopStableIf" +"stddevPopStableMap" +"stddevPopStableMerge" +"stddevPopStableNull" +"stddevPopStableOrDefault" +"stddevPopStableOrNull" +"stddevPopStableResample" +"stddevPopStableSimpleState" +"stddevPopStableState" +"stddevPopState" +"stddevSamp" +"stddevSampArgMax" +"stddevSampArgMin" +"stddevSampArray" +"stddevSampDistinct" +"stddevSampForEach" +"stddevSampIf" +"stddevSampMap" +"stddevSampMerge" +"stddevSampNull" +"stddevSampOrDefault" +"stddevSampOrNull" +"stddevSampResample" +"stddevSampSimpleState" +"stddevSampStable" +"stddevSampStableArgMax" +"stddevSampStableArgMin" +"stddevSampStableArray" +"stddevSampStableDistinct" +"stddevSampStableForEach" +"stddevSampStableIf" +"stddevSampStableMap" +"stddevSampStableMerge" +"stddevSampStableNull" +"stddevSampStableOrDefault" +"stddevSampStableOrNull" +"stddevSampStableResample" +"stddevSampStableSimpleState" +"stddevSampStableState" +"stddevSampState" +"stem" +"stochasticLinearRegression" +"stochasticLinearRegressionArgMax" +"stochasticLinearRegressionArgMin" +"stochasticLinearRegressionArray" +"stochasticLinearRegressionDistinct" +"stochasticLinearRegressionForEach" +"stochasticLinearRegressionIf" +"stochasticLinearRegressionMap" +"stochasticLinearRegressionMerge" +"stochasticLinearRegressionNull" +"stochasticLinearRegressionOrDefault" +"stochasticLinearRegressionOrNull" +"stochasticLinearRegressionResample" +"stochasticLinearRegressionSimpleState" +"stochasticLinearRegressionState" +"stochasticLogisticRegression" +"stochasticLogisticRegressionArgMax" +"stochasticLogisticRegressionArgMin" +"stochasticLogisticRegressionArray" +"stochasticLogisticRegressionDistinct" +"stochasticLogisticRegressionForEach" +"stochasticLogisticRegressionIf" +"stochasticLogisticRegressionMap" +"stochasticLogisticRegressionMerge" +"stochasticLogisticRegressionNull" +"stochasticLogisticRegressionOrDefault" +"stochasticLogisticRegressionOrNull" +"stochasticLogisticRegressionResample" +"stochasticLogisticRegressionSimpleState" +"stochasticLogisticRegressionState" +"str_to_date" +"str_to_map" +"stringJaccardIndex" +"stringJaccardIndexUTF8" +"stringToH3" +"structureToCapnProtoSchema" +"structureToProtobufSchema" +"studentTTest" +"studentTTestArgMax" +"studentTTestArgMin" +"studentTTestArray" +"studentTTestDistinct" +"studentTTestForEach" +"studentTTestIf" +"studentTTestMap" +"studentTTestMerge" +"studentTTestNull" +"studentTTestOrDefault" +"studentTTestOrNull" +"studentTTestResample" +"studentTTestSimpleState" +"studentTTestState" +"subBitmap" +"subDate" +"substr" +"substring" +"substringIndex" +"substringIndexUTF8" +"substringUTF8" +"subtractDays" +"subtractHours" +"subtractInterval" +"subtractMicroseconds" +"subtractMilliseconds" +"subtractMinutes" +"subtractMonths" +"subtractNanoseconds" +"subtractQuarters" +"subtractSeconds" +"subtractTupleOfIntervals" +"subtractWeeks" +"subtractYears" +"sum" +"sumArgMax" +"sumArgMin" +"sumArray" +"sumCount" +"sumCountArgMax" +"sumCountArgMin" +"sumCountArray" +"sumCountDistinct" +"sumCountForEach" +"sumCountIf" +"sumCountMap" +"sumCountMerge" +"sumCountNull" +"sumCountOrDefault" +"sumCountOrNull" +"sumCountResample" +"sumCountSimpleState" +"sumCountState" +"sumDistinct" +"sumForEach" +"sumIf" +"sumKahan" +"sumKahanArgMax" +"sumKahanArgMin" +"sumKahanArray" +"sumKahanDistinct" +"sumKahanForEach" +"sumKahanIf" +"sumKahanMap" +"sumKahanMerge" +"sumKahanNull" +"sumKahanOrDefault" +"sumKahanOrNull" +"sumKahanResample" +"sumKahanSimpleState" +"sumKahanState" +"sumMap" +"sumMapFiltered" +"sumMapFilteredArgMax" +"sumMapFilteredArgMin" +"sumMapFilteredArray" +"sumMapFilteredDistinct" +"sumMapFilteredForEach" +"sumMapFilteredIf" +"sumMapFilteredMap" +"sumMapFilteredMerge" +"sumMapFilteredNull" +"sumMapFilteredOrDefault" +"sumMapFilteredOrNull" +"sumMapFilteredResample" +"sumMapFilteredSimpleState" +"sumMapFilteredState" +"sumMapFilteredWithOverflow" +"sumMapFilteredWithOverflowArgMax" +"sumMapFilteredWithOverflowArgMin" +"sumMapFilteredWithOverflowArray" +"sumMapFilteredWithOverflowDistinct" +"sumMapFilteredWithOverflowForEach" +"sumMapFilteredWithOverflowIf" +"sumMapFilteredWithOverflowMap" +"sumMapFilteredWithOverflowMerge" +"sumMapFilteredWithOverflowNull" +"sumMapFilteredWithOverflowOrDefault" +"sumMapFilteredWithOverflowOrNull" +"sumMapFilteredWithOverflowResample" +"sumMapFilteredWithOverflowSimpleState" +"sumMapFilteredWithOverflowState" +"sumMapWithOverflow" +"sumMapWithOverflowArgMax" +"sumMapWithOverflowArgMin" +"sumMapWithOverflowArray" +"sumMapWithOverflowDistinct" +"sumMapWithOverflowForEach" +"sumMapWithOverflowIf" +"sumMapWithOverflowMap" +"sumMapWithOverflowMerge" +"sumMapWithOverflowNull" +"sumMapWithOverflowOrDefault" +"sumMapWithOverflowOrNull" +"sumMapWithOverflowResample" +"sumMapWithOverflowSimpleState" +"sumMapWithOverflowState" +"sumMappedArrays" +"sumMappedArraysArgMax" +"sumMappedArraysArgMin" +"sumMappedArraysArray" +"sumMappedArraysDistinct" +"sumMappedArraysForEach" +"sumMappedArraysIf" +"sumMappedArraysMap" +"sumMappedArraysMerge" +"sumMappedArraysNull" +"sumMappedArraysOrDefault" +"sumMappedArraysOrNull" +"sumMappedArraysResample" +"sumMappedArraysSimpleState" +"sumMappedArraysState" +"sumMerge" +"sumNull" +"sumOrDefault" +"sumOrNull" +"sumResample" +"sumSimpleState" +"sumState" +"sumWithOverflow" +"sumWithOverflowArgMax" +"sumWithOverflowArgMin" +"sumWithOverflowArray" +"sumWithOverflowDistinct" +"sumWithOverflowForEach" +"sumWithOverflowIf" +"sumWithOverflowMap" +"sumWithOverflowMerge" +"sumWithOverflowNull" +"sumWithOverflowOrDefault" +"sumWithOverflowOrNull" +"sumWithOverflowResample" +"sumWithOverflowSimpleState" +"sumWithOverflowState" +"svg" +"synonyms" +"tan" +"tanh" +"tcpPort" +"tgamma" +"theilsU" +"theilsUArgMax" +"theilsUArgMin" +"theilsUArray" +"theilsUDistinct" +"theilsUForEach" +"theilsUIf" +"theilsUMap" +"theilsUMerge" +"theilsUNull" +"theilsUOrDefault" +"theilsUOrNull" +"theilsUResample" +"theilsUSimpleState" +"theilsUState" +"throwIf" +"tid" +"timeDiff" +"timeSlot" +"timeSlots" +"timeZone" +"timeZoneOf" +"timeZoneOffset" +"timestamp" +"timestampDiff" +"timestamp_diff" +"timezone" +"timezoneOf" +"timezoneOffset" +"toBool" +"toColumnTypeName" +"toDate" +"toDate32" +"toDate32OrDefault" +"toDate32OrNull" +"toDate32OrZero" +"toDateOrDefault" +"toDateOrNull" +"toDateOrZero" +"toDateTime" +"toDateTime32" +"toDateTime64" +"toDateTime64OrDefault" +"toDateTime64OrNull" +"toDateTime64OrZero" +"toDateTimeOrDefault" +"toDateTimeOrNull" +"toDateTimeOrZero" +"toDayOfMonth" +"toDayOfWeek" +"toDayOfYear" +"toDaysSinceYearZero" +"toDecimal128" +"toDecimal128OrDefault" +"toDecimal128OrNull" +"toDecimal128OrZero" +"toDecimal256" +"toDecimal256OrDefault" +"toDecimal256OrNull" +"toDecimal256OrZero" +"toDecimal32" +"toDecimal32OrDefault" +"toDecimal32OrNull" +"toDecimal32OrZero" +"toDecimal64" +"toDecimal64OrDefault" +"toDecimal64OrNull" +"toDecimal64OrZero" +"toDecimalString" +"toFixedString" +"toFloat32" +"toFloat32OrDefault" +"toFloat32OrNull" +"toFloat32OrZero" +"toFloat64" +"toFloat64OrDefault" +"toFloat64OrNull" +"toFloat64OrZero" +"toHour" +"toIPv4" +"toIPv4OrDefault" +"toIPv4OrNull" +"toIPv4OrZero" +"toIPv6" +"toIPv6OrDefault" +"toIPv6OrNull" +"toIPv6OrZero" +"toISOWeek" +"toISOYear" +"toInt128" +"toInt128OrDefault" +"toInt128OrNull" +"toInt128OrZero" +"toInt16" +"toInt16OrDefault" +"toInt16OrNull" +"toInt16OrZero" +"toInt256" +"toInt256OrDefault" +"toInt256OrNull" +"toInt256OrZero" +"toInt32" +"toInt32OrDefault" +"toInt32OrNull" +"toInt32OrZero" +"toInt64" +"toInt64OrDefault" +"toInt64OrNull" +"toInt64OrZero" +"toInt8" +"toInt8OrDefault" +"toInt8OrNull" +"toInt8OrZero" +"toIntervalDay" +"toIntervalHour" +"toIntervalMicrosecond" +"toIntervalMillisecond" +"toIntervalMinute" +"toIntervalMonth" +"toIntervalNanosecond" +"toIntervalQuarter" +"toIntervalSecond" +"toIntervalWeek" +"toIntervalYear" +"toJSONString" +"toLastDayOfMonth" +"toLastDayOfWeek" +"toLowCardinality" +"toMillisecond" +"toMinute" +"toModifiedJulianDay" +"toModifiedJulianDayOrNull" +"toMonday" +"toMonth" +"toNullable" +"toQuarter" +"toRelativeDayNum" +"toRelativeHourNum" +"toRelativeMinuteNum" +"toRelativeMonthNum" +"toRelativeQuarterNum" +"toRelativeSecondNum" +"toRelativeWeekNum" +"toRelativeYearNum" +"toSecond" +"toStartOfDay" +"toStartOfFifteenMinutes" +"toStartOfFiveMinute" +"toStartOfFiveMinutes" +"toStartOfHour" +"toStartOfISOYear" +"toStartOfInterval" +"toStartOfMicrosecond" +"toStartOfMillisecond" +"toStartOfMinute" +"toStartOfMonth" +"toStartOfNanosecond" +"toStartOfQuarter" +"toStartOfSecond" +"toStartOfTenMinutes" +"toStartOfWeek" +"toStartOfYear" +"toString" +"toStringCutToZero" +"toTime" +"toTimeZone" +"toTimezone" +"toTypeName" +"toUInt128" +"toUInt128OrDefault" +"toUInt128OrNull" +"toUInt128OrZero" +"toUInt16" +"toUInt16OrDefault" +"toUInt16OrNull" +"toUInt16OrZero" +"toUInt256" +"toUInt256OrDefault" +"toUInt256OrNull" +"toUInt256OrZero" +"toUInt32" +"toUInt32OrDefault" +"toUInt32OrNull" +"toUInt32OrZero" +"toUInt64" +"toUInt64OrDefault" +"toUInt64OrNull" +"toUInt64OrZero" +"toUInt8" +"toUInt8OrDefault" +"toUInt8OrNull" +"toUInt8OrZero" +"toUTCTimestamp" +"toUUID" +"toUUIDOrDefault" +"toUUIDOrNull" +"toUUIDOrZero" +"toUnixTimestamp" +"toUnixTimestamp64Micro" +"toUnixTimestamp64Milli" +"toUnixTimestamp64Nano" +"toValidUTF8" +"toWeek" +"toYYYYMM" +"toYYYYMMDD" +"toYYYYMMDDhhmmss" +"toYear" +"toYearWeek" +"to_utc_timestamp" +"today" +"tokens" +"topK" +"topKArgMax" +"topKArgMin" +"topKArray" +"topKDistinct" +"topKForEach" +"topKIf" +"topKMap" +"topKMerge" +"topKNull" +"topKOrDefault" +"topKOrNull" +"topKResample" +"topKSimpleState" +"topKState" +"topKWeighted" +"topKWeightedArgMax" +"topKWeightedArgMin" +"topKWeightedArray" +"topKWeightedDistinct" +"topKWeightedForEach" +"topKWeightedIf" +"topKWeightedMap" +"topKWeightedMerge" +"topKWeightedNull" +"topKWeightedOrDefault" +"topKWeightedOrNull" +"topKWeightedResample" +"topKWeightedSimpleState" +"topKWeightedState" +"topLevelDomain" +"topLevelDomainRFC" +"transactionID" +"transactionLatestSnapshot" +"transactionOldestSnapshot" +"transform" +"translate" +"translateUTF8" +"trim" +"trimBoth" +"trimLeft" +"trimRight" +"trunc" +"truncate" +"tryBase58Decode" +"tryBase64Decode" +"tryBase64URLDecode" +"tryDecrypt" +"tryIdnaEncode" +"tryPunycodeDecode" +"tumble" +"tumbleEnd" +"tumbleStart" +"tuple" +"tupleConcat" +"tupleDivide" +"tupleDivideByNumber" +"tupleElement" +"tupleHammingDistance" +"tupleIntDiv" +"tupleIntDivByNumber" +"tupleIntDivOrZero" +"tupleIntDivOrZeroByNumber" +"tupleMinus" +"tupleModulo" +"tupleModuloByNumber" +"tupleMultiply" +"tupleMultiplyByNumber" +"tupleNames" +"tupleNegate" +"tuplePlus" +"tupleToNameValuePairs" +"ucase" +"unbin" +"unhex" +"uniq" +"uniqArgMax" +"uniqArgMin" +"uniqArray" +"uniqCombined" +"uniqCombined64" +"uniqCombined64ArgMax" +"uniqCombined64ArgMin" +"uniqCombined64Array" +"uniqCombined64Distinct" +"uniqCombined64ForEach" +"uniqCombined64If" +"uniqCombined64Map" +"uniqCombined64Merge" +"uniqCombined64Null" +"uniqCombined64OrDefault" +"uniqCombined64OrNull" +"uniqCombined64Resample" +"uniqCombined64SimpleState" +"uniqCombined64State" +"uniqCombinedArgMax" +"uniqCombinedArgMin" +"uniqCombinedArray" +"uniqCombinedDistinct" +"uniqCombinedForEach" +"uniqCombinedIf" +"uniqCombinedMap" +"uniqCombinedMerge" +"uniqCombinedNull" +"uniqCombinedOrDefault" +"uniqCombinedOrNull" +"uniqCombinedResample" +"uniqCombinedSimpleState" +"uniqCombinedState" +"uniqDistinct" +"uniqExact" +"uniqExactArgMax" +"uniqExactArgMin" +"uniqExactArray" +"uniqExactDistinct" +"uniqExactForEach" +"uniqExactIf" +"uniqExactMap" +"uniqExactMerge" +"uniqExactNull" +"uniqExactOrDefault" +"uniqExactOrNull" +"uniqExactResample" +"uniqExactSimpleState" +"uniqExactState" +"uniqForEach" +"uniqHLL12" +"uniqHLL12ArgMax" +"uniqHLL12ArgMin" +"uniqHLL12Array" +"uniqHLL12Distinct" +"uniqHLL12ForEach" +"uniqHLL12If" +"uniqHLL12Map" +"uniqHLL12Merge" +"uniqHLL12Null" +"uniqHLL12OrDefault" +"uniqHLL12OrNull" +"uniqHLL12Resample" +"uniqHLL12SimpleState" +"uniqHLL12State" +"uniqIf" +"uniqMap" +"uniqMerge" +"uniqNull" +"uniqOrDefault" +"uniqOrNull" +"uniqResample" +"uniqSimpleState" +"uniqState" +"uniqTheta" +"uniqThetaArgMax" +"uniqThetaArgMin" +"uniqThetaArray" +"uniqThetaDistinct" +"uniqThetaForEach" +"uniqThetaIf" +"uniqThetaIntersect" +"uniqThetaMap" +"uniqThetaMerge" +"uniqThetaNot" +"uniqThetaNull" +"uniqThetaOrDefault" +"uniqThetaOrNull" +"uniqThetaResample" +"uniqThetaSimpleState" +"uniqThetaState" +"uniqThetaUnion" +"uniqUpTo" +"uniqUpToArgMax" +"uniqUpToArgMin" +"uniqUpToArray" +"uniqUpToDistinct" +"uniqUpToForEach" +"uniqUpToIf" +"uniqUpToMap" +"uniqUpToMerge" +"uniqUpToNull" +"uniqUpToOrDefault" +"uniqUpToOrNull" +"uniqUpToResample" +"uniqUpToSimpleState" +"uniqUpToState" +"upper" +"upperUTF8" +"uptime" +"user" +"validateNestedArraySizes" +"varPop" +"varPopArgMax" +"varPopArgMin" +"varPopArray" +"varPopDistinct" +"varPopForEach" +"varPopIf" +"varPopMap" +"varPopMerge" +"varPopNull" +"varPopOrDefault" +"varPopOrNull" +"varPopResample" +"varPopSimpleState" +"varPopStable" +"varPopStableArgMax" +"varPopStableArgMin" +"varPopStableArray" +"varPopStableDistinct" +"varPopStableForEach" +"varPopStableIf" +"varPopStableMap" +"varPopStableMerge" +"varPopStableNull" +"varPopStableOrDefault" +"varPopStableOrNull" +"varPopStableResample" +"varPopStableSimpleState" +"varPopStableState" +"varPopState" +"varSamp" +"varSampArgMax" +"varSampArgMin" +"varSampArray" +"varSampDistinct" +"varSampForEach" +"varSampIf" +"varSampMap" +"varSampMerge" +"varSampNull" +"varSampOrDefault" +"varSampOrNull" +"varSampResample" +"varSampSimpleState" +"varSampStable" +"varSampStableArgMax" +"varSampStableArgMin" +"varSampStableArray" +"varSampStableDistinct" +"varSampStableForEach" +"varSampStableIf" +"varSampStableMap" +"varSampStableMerge" +"varSampStableNull" +"varSampStableOrDefault" +"varSampStableOrNull" +"varSampStableResample" +"varSampStableSimpleState" +"varSampStableState" +"varSampState" +"variantElement" +"variantType" +"vectorDifference" +"vectorSum" +"version" +"visibleWidth" +"visitParamExtractBool" +"visitParamExtractFloat" +"visitParamExtractInt" +"visitParamExtractRaw" +"visitParamExtractString" +"visitParamExtractUInt" +"visitParamHas" +"week" +"welchTTest" +"welchTTestArgMax" +"welchTTestArgMin" +"welchTTestArray" +"welchTTestDistinct" +"welchTTestForEach" +"welchTTestIf" +"welchTTestMap" +"welchTTestMerge" +"welchTTestNull" +"welchTTestOrDefault" +"welchTTestOrNull" +"welchTTestResample" +"welchTTestSimpleState" +"welchTTestState" +"widthBucket" +"width_bucket" +"windowFunnel" +"windowFunnelArgMax" +"windowFunnelArgMin" +"windowFunnelArray" +"windowFunnelDistinct" +"windowFunnelForEach" +"windowFunnelIf" +"windowFunnelMap" +"windowFunnelMerge" +"windowFunnelNull" +"windowFunnelOrDefault" +"windowFunnelOrNull" +"windowFunnelResample" +"windowFunnelSimpleState" +"windowFunnelState" +"windowID" +"wkt" +"wordShingleMinHash" +"wordShingleMinHashArg" +"wordShingleMinHashArgCaseInsensitive" +"wordShingleMinHashArgCaseInsensitiveUTF8" +"wordShingleMinHashArgUTF8" +"wordShingleMinHashCaseInsensitive" +"wordShingleMinHashCaseInsensitiveUTF8" +"wordShingleMinHashUTF8" +"wordShingleSimHash" +"wordShingleSimHashCaseInsensitive" +"wordShingleSimHashCaseInsensitiveUTF8" +"wordShingleSimHashUTF8" +"wyHash64" +"xor" +"xxHash32" +"xxHash64" +"xxh3" +"yandexConsistentHash" +"yearweek" +"yesterday" +"zookeeperSessionUptime" diff --git a/tests/fuzz/dictionaries/functions.dict b/tests/fuzz/dictionaries/functions.dict index 6f2a88c22fa..e562595fb67 100644 --- a/tests/fuzz/dictionaries/functions.dict +++ b/tests/fuzz/dictionaries/functions.dict @@ -126,6 +126,7 @@ "JSONHas" "JSONKey" "JSONLength" +"JSONMergePatch" "JSONType" "JSON_ARRAY_LENGTH" "JSON_EXISTS" @@ -227,6 +228,8 @@ "UTC_timestamp" "UUIDNumToString" "UUIDStringToNum" +"UUIDToNum" +"UUIDv7ToDateTime" "VAR_POP" "VAR_POPArgMax" "VAR_POPArgMin" @@ -263,6 +266,7 @@ "YYYYMMDDhhmmssToDateTime" "YYYYMMDDhhmmssToDateTime64" "_CAST" +"__actionName" "__bitBoolMaskAnd" "__bitBoolMaskOr" "__bitSwapLastTwo" @@ -660,6 +664,8 @@ "base58Encode" "base64Decode" "base64Encode" +"base64URLDecode" +"base64URLEncode" "basename" "bin" "bitAnd" @@ -744,8 +750,15 @@ "cbrt" "ceil" "ceiling" +"changeDay" +"changeHour" +"changeMinute" +"changeMonth" +"changeSecond" +"changeYear" "char" "cityHash64" +"clamp" "coalesce" "concat" "concatAssumeInjective" @@ -970,6 +983,7 @@ "current_date" "current_schemas" "current_timestamp" +"current_user" "cutFragment" "cutIPv6" "cutQueryString" @@ -988,7 +1002,9 @@ "dateDiff" "dateName" "dateTime64ToSnowflake" +"dateTime64ToSnowflakeID" "dateTimeToSnowflake" +"dateTimeToSnowflakeID" "dateTrunc" "date_diff" "decodeHTMLComponent" @@ -1032,6 +1048,21 @@ "deltaSumTimestampSimpleState" "deltaSumTimestampState" "demangle" +"denseRank" +"denseRankArgMax" +"denseRankArgMin" +"denseRankArray" +"denseRankDistinct" +"denseRankForEach" +"denseRankIf" +"denseRankMap" +"denseRankMerge" +"denseRankNull" +"denseRankOrDefault" +"denseRankOrNull" +"denseRankResample" +"denseRankSimpleState" +"denseRankState" "dense_rank" "dense_rankArgMax" "dense_rankArgMin" @@ -1108,8 +1139,11 @@ "domainWithoutWWWRFC" "dotProduct" "dumpColumnStructure" +"dynamicElement" +"dynamicType" "e" "editDistance" +"editDistanceUTF8" "empty" "emptyArrayDate" "emptyArrayDateTime" @@ -1334,14 +1368,17 @@ "gccMurmurHash" "gcd" "generateRandomStructure" +"generateSnowflakeID" "generateULID" "generateUUIDv4" +"generateUUIDv7" "geoDistance" "geoToH3" "geoToS2" "geohashDecode" "geohashEncode" "geohashesInBox" +"getClientHTTPHeader" "getMacro" "getOSKernelVersion" "getServerPort" @@ -1589,6 +1626,20 @@ "groupBitmapXorSimpleState" "groupBitmapXorState" "groupConcat" +"groupConcatArgMax" +"groupConcatArgMin" +"groupConcatArray" +"groupConcatDistinct" +"groupConcatForEach" +"groupConcatIf" +"groupConcatMap" +"groupConcatMerge" +"groupConcatNull" +"groupConcatOrDefault" +"groupConcatOrNull" +"groupConcatResample" +"groupConcatSimpleState" +"groupConcatState" "groupUniqArray" "groupUniqArrayArgMax" "groupUniqArrayArgMin" @@ -1604,6 +1655,21 @@ "groupUniqArrayResample" "groupUniqArraySimpleState" "groupUniqArrayState" +"group_concat" +"group_concatArgMax" +"group_concatArgMin" +"group_concatArray" +"group_concatDistinct" +"group_concatForEach" +"group_concatIf" +"group_concatMap" +"group_concatMerge" +"group_concatNull" +"group_concatOrDefault" +"group_concatOrNull" +"group_concatResample" +"group_concatSimpleState" +"group_concatState" "h3CellAreaM2" "h3CellAreaRads2" "h3Distance" @@ -1660,6 +1726,8 @@ "hasTokenCaseInsensitiveOrNull" "hasTokenOrNull" "hex" +"hilbertDecode" +"hilbertEncode" "histogram" "histogramArgMax" "histogramArgMin" @@ -1881,6 +1949,7 @@ "less" "lessOrEquals" "levenshteinDistance" +"levenshteinDistanceUTF8" "lgamma" "like" "ln" @@ -2498,10 +2567,44 @@ "parseDateTimeInJodaSyntaxOrZero" "parseDateTimeOrNull" "parseDateTimeOrZero" +"parseReadableSize" +"parseReadableSizeOrNull" +"parseReadableSizeOrZero" "parseTimeDelta" +"partitionID" "partitionId" "path" "pathFull" +"percentRank" +"percentRankArgMax" +"percentRankArgMin" +"percentRankArray" +"percentRankDistinct" +"percentRankForEach" +"percentRankIf" +"percentRankMap" +"percentRankMerge" +"percentRankNull" +"percentRankOrDefault" +"percentRankOrNull" +"percentRankResample" +"percentRankSimpleState" +"percentRankState" +"percent_rank" +"percent_rankArgMax" +"percent_rankArgMin" +"percent_rankArray" +"percent_rankDistinct" +"percent_rankForEach" +"percent_rankIf" +"percent_rankMap" +"percent_rankMerge" +"percent_rankNull" +"percent_rankOrDefault" +"percent_rankOrNull" +"percent_rankResample" +"percent_rankSimpleState" +"percent_rankState" "pi" "plus" "pmod" @@ -2533,6 +2636,7 @@ "positive_modulo" "pow" "power" +"printf" "proportionsZTest" "protocol" "punycodeDecode" @@ -3103,6 +3207,8 @@ "rankResample" "rankSimpleState" "rankState" +"readWKTLineString" +"readWKTMultiLineString" "readWKTMultiPolygon" "readWKTPoint" "readWKTPolygon" @@ -3340,6 +3446,8 @@ "skewSampState" "sleep" "sleepEachRow" +"snowflakeIDToDateTime" +"snowflakeIDToDateTime64" "snowflakeToDateTime" "snowflakeToDateTime64" "soundex" @@ -3902,6 +4010,7 @@ "truncate" "tryBase58Decode" "tryBase64Decode" +"tryBase64URLDecode" "tryDecrypt" "tryIdnaEncode" "tryPunycodeDecode" @@ -3923,6 +4032,7 @@ "tupleModuloByNumber" "tupleMultiply" "tupleMultiplyByNumber" +"tupleNames" "tupleNegate" "tuplePlus" "tupleToNameValuePairs" diff --git a/tests/fuzz/dictionaries/key_words.dict b/tests/fuzz/dictionaries/keywords.dict similarity index 95% rename from tests/fuzz/dictionaries/key_words.dict rename to tests/fuzz/dictionaries/keywords.dict index db517a2382c..abaaf9e53b5 100644 --- a/tests/fuzz/dictionaries/key_words.dict +++ b/tests/fuzz/dictionaries/keywords.dict @@ -3,7 +3,7 @@ "ADD CONSTRAINT" "ADD INDEX" "ADD PROJECTION" -"ADD STATISTIC" +"ADD STATISTICS" "ADMIN OPTION FOR" "AFTER" "ALGORITHM" @@ -76,7 +76,7 @@ "CLEAR COLUMN" "CLEAR INDEX" "CLEAR PROJECTION" -"CLEAR STATISTIC" +"CLEAR STATISTICS" "CLUSTER" "CLUSTERS" "CN" @@ -110,6 +110,8 @@ "CURRENTUSER" "CURRENT_USER" "D" +"DATA" +"DATA INNER UUID" "DATABASE" "DATABASES" "DATE" @@ -147,7 +149,7 @@ "DROP PART" "DROP PARTITION" "DROP PROJECTION" -"DROP STATISTIC" +"DROP STATISTICS" "DROP TABLE" "DROP TEMPORARY TABLE" "ELSE" @@ -247,6 +249,7 @@ "IS NULL" "IS_OBJECT_ID" "JOIN" +"JWT" "KERBEROS" "KEY" "KEY BY" @@ -277,13 +280,15 @@ "MATERIALIZE COLUMN" "MATERIALIZE INDEX" "MATERIALIZE PROJECTION" -"MATERIALIZE STATISTIC" +"MATERIALIZE STATISTICS" "MATERIALIZE TTL" "MATERIALIZED" "MAX" "MCS" "MEMORY" "MERGES" +"METRICS" +"METRICS INNER UUID" "MI" "MICROSECOND" "MICROSECONDS" @@ -297,12 +302,14 @@ "MODIFY" "MODIFY COLUMN" "MODIFY COMMENT" +"MODIFY DEFINER" "MODIFY ORDER BY" "MODIFY QUERY" "MODIFY REFRESH" "MODIFY SAMPLE BY" "MODIFY SETTING" "MODIFY SQL SECURITY" +"MODIFY STATISTICS" "MODIFY TTL" "MONTH" "MONTHS" @@ -373,6 +380,7 @@ "Protobuf" "Q" "QQ" +"QUALIFY" "QUARTER" "QUARTERS" "QUERY" @@ -384,6 +392,7 @@ "READONLY" "REALM" "RECOMPRESS" +"RECURSIVE" "REFERENCES" "REFRESH" "REGEXP" @@ -415,6 +424,7 @@ "SALT" "SAMPLE" "SAMPLE BY" +"SAN" "SCHEME" "SECOND" "SECONDS" @@ -460,7 +470,8 @@ "SS" "SSH_KEY" "SSL_CERTIFICATE" -"STATISTIC" +"START TRANSACTION" +"STATISTICS" "STEP" "STORAGE" "STRICT" @@ -475,6 +486,8 @@ "TABLE" "TABLE OVERRIDE" "TABLES" +"TAGS" +"TAGS INNER UUID" "TEMPORARY" "TEMPORARY TABLE" "TEST" @@ -529,6 +542,7 @@ "WITH NAME" "WITH REPLACE OPTION" "WITH TIES" +"WITH_ITEMINDEX" "WK" "WRITABLE" "WW" @@ -540,4 +554,3 @@ "bagexpansion" "base_backup" "cluster_host_ids" -"with_itemindex" From 710cf1a223c7b6d3755316beb1904ef841f2e51e Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 13:55:57 +0000 Subject: [PATCH 2163/2361] Calculate statistics for paths in shared data --- src/Columns/ColumnObject.cpp | 86 ++++++++++--- src/Columns/ColumnObject.h | 39 ++++-- src/DataTypes/DataTypesBinaryEncoding.h | 106 +++++++-------- .../Serializations/SerializationObject.cpp | 121 ++++++++++++++++-- .../Serializations/SerializationObject.h | 3 +- src/Interpreters/Squashing.cpp | 1 - ...9_json_type_horizontal_merges.reference.j2 | 40 ++++++ .../03209_json_type_horizontal_merges.sql.j2 | 15 +++ ...209_json_type_vertical_merges.reference.j2 | 40 ++++++ .../03209_json_type_vertical_merges.sql.j2 | 15 +++ .../03222_json_squashing.reference | 102 +++++++++++++++ .../0_stateless/03222_json_squashing.sql | 82 ++++++++++++ 12 files changed, 555 insertions(+), 95 deletions(-) create mode 100644 tests/queries/0_stateless/03222_json_squashing.reference create mode 100644 tests/queries/0_stateless/03222_json_squashing.sql diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 852327cfc17..0645c10340d 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -38,10 +38,12 @@ ColumnObject::ColumnObject( std::unordered_map dynamic_paths_, MutableColumnPtr shared_data_, size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, size_t max_dynamic_types_, - const Statistics & statistics_) + const StatisticsPtr & statistics_) : shared_data(std::move(shared_data_)) , max_dynamic_paths(max_dynamic_paths_) + , global_max_dynamic_paths(global_max_dynamic_paths_) , max_dynamic_types(max_dynamic_types_) , statistics(statistics_) { @@ -60,7 +62,7 @@ ColumnObject::ColumnObject( ColumnObject::ColumnObject( std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_) - : max_dynamic_paths(max_dynamic_paths_), max_dynamic_types(max_dynamic_types_) + : max_dynamic_paths(max_dynamic_paths_), global_max_dynamic_paths(max_dynamic_paths_), max_dynamic_types(max_dynamic_types_) { typed_paths.reserve(typed_paths_.size()); for (auto & [path, column] : typed_paths_) @@ -81,8 +83,9 @@ ColumnObject::Ptr ColumnObject::create( const std::unordered_map & dynamic_paths_, const ColumnPtr & shared_data_, size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, size_t max_dynamic_types_, - const ColumnObject::Statistics & statistics_) + const ColumnObject::StatisticsPtr & statistics_) { std::unordered_map mutable_typed_paths; mutable_typed_paths.reserve(typed_paths_.size()); @@ -99,6 +102,7 @@ ColumnObject::Ptr ColumnObject::create( std::move(mutable_dynamic_paths), shared_data_->assumeMutable(), max_dynamic_paths_, + global_max_dynamic_paths_, max_dynamic_types_, statistics_); } @@ -108,10 +112,11 @@ ColumnObject::MutablePtr ColumnObject::create( std::unordered_map dynamic_paths_, MutableColumnPtr shared_data_, size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, size_t max_dynamic_types_, - const ColumnObject::Statistics & statistics_) + const ColumnObject::StatisticsPtr & statistics_) { - return Base::create(std::move(typed_paths_), std::move(dynamic_paths_), std::move(shared_data_), max_dynamic_paths_, max_dynamic_types_, statistics_); + return Base::create(std::move(typed_paths_), std::move(dynamic_paths_), std::move(shared_data_), max_dynamic_paths_, global_max_dynamic_paths_, max_dynamic_types_, statistics_); } ColumnObject::MutablePtr ColumnObject::create(std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_) @@ -153,6 +158,7 @@ MutableColumnPtr ColumnObject::cloneEmpty() const std::move(empty_dynamic_paths), shared_data->cloneEmpty(), max_dynamic_paths, + global_max_dynamic_paths, max_dynamic_types, statistics); } @@ -174,6 +180,7 @@ MutableColumnPtr ColumnObject::cloneResized(size_t size) const std::move(resized_dynamic_paths), shared_data->cloneResized(size), max_dynamic_paths, + global_max_dynamic_paths, max_dynamic_types, statistics); } @@ -263,6 +270,14 @@ void ColumnObject::addNewDynamicPath(const std::string_view path) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add new dynamic path as the limit ({}) on dynamic paths is reached", max_dynamic_paths); } +void ColumnObject::setMaxDynamicPaths(size_t max_dynamic_paths_) +{ + if (!empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Setting specific max_dynamic_paths parameter is allowed only for empty object column"); + + max_dynamic_paths = max_dynamic_paths_; +} + void ColumnObject::setDynamicPaths(const std::vector & paths) { if (paths.size() > max_dynamic_paths) @@ -836,7 +851,7 @@ ColumnPtr ColumnObject::filter(const Filter & filt, ssize_t result_size_hint) co filtered_dynamic_paths[path] = column->filter(filt, result_size_hint); auto filtered_shared_data = shared_data->filter(filt, result_size_hint); - return ColumnObject::create(filtered_typed_paths, filtered_dynamic_paths, filtered_shared_data, max_dynamic_paths, max_dynamic_types); + return ColumnObject::create(filtered_typed_paths, filtered_dynamic_paths, filtered_shared_data, max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types); } void ColumnObject::expand(const Filter & mask, bool inverted) @@ -861,7 +876,7 @@ ColumnPtr ColumnObject::permute(const Permutation & perm, size_t limit) const permuted_dynamic_paths[path] = column->permute(perm, limit); auto permuted_shared_data = shared_data->permute(perm, limit); - return ColumnObject::create(permuted_typed_paths, permuted_dynamic_paths, permuted_shared_data, max_dynamic_paths, max_dynamic_types); + return ColumnObject::create(permuted_typed_paths, permuted_dynamic_paths, permuted_shared_data, max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types); } ColumnPtr ColumnObject::index(const IColumn & indexes, size_t limit) const @@ -877,7 +892,7 @@ ColumnPtr ColumnObject::index(const IColumn & indexes, size_t limit) const indexed_dynamic_paths[path] = column->index(indexes, limit); auto indexed_shared_data = shared_data->index(indexes, limit); - return ColumnObject::create(indexed_typed_paths, indexed_dynamic_paths, indexed_shared_data, max_dynamic_paths, max_dynamic_types); + return ColumnObject::create(indexed_typed_paths, indexed_dynamic_paths, indexed_shared_data, max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types); } ColumnPtr ColumnObject::replicate(const Offsets & replicate_offsets) const @@ -893,7 +908,7 @@ ColumnPtr ColumnObject::replicate(const Offsets & replicate_offsets) const replicated_dynamic_paths[path] = column->replicate(replicate_offsets); auto replicated_shared_data = shared_data->replicate(replicate_offsets); - return ColumnObject::create(replicated_typed_paths, replicated_dynamic_paths, replicated_shared_data, max_dynamic_paths, max_dynamic_types); + return ColumnObject::create(replicated_typed_paths, replicated_dynamic_paths, replicated_shared_data, max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types); } MutableColumns ColumnObject::scatter(ColumnIndex num_columns, const Selector & selector) const @@ -924,7 +939,7 @@ MutableColumns ColumnObject::scatter(ColumnIndex num_columns, const Selector & s MutableColumns result_columns; result_columns.reserve(num_columns); for (size_t i = 0; i != num_columns; ++i) - result_columns.emplace_back(ColumnObject::create(std::move(scattered_typed_paths[i]), std::move(scattered_dynamic_paths[i]), std::move(scattered_shared_data_columns[i]), max_dynamic_paths, max_dynamic_types)); + result_columns.emplace_back(ColumnObject::create(std::move(scattered_typed_paths[i]), std::move(scattered_dynamic_paths[i]), std::move(scattered_shared_data_columns[i]), max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types)); return result_columns; } @@ -1075,6 +1090,7 @@ ColumnPtr ColumnObject::compress() const my_compressed_dynamic_paths = std::move(compressed_dynamic_paths), my_compressed_shared_data = std::move(compressed_shared_data), my_max_dynamic_paths = max_dynamic_paths, + my_global_max_dynamic_paths = global_max_dynamic_paths, my_max_dynamic_types = max_dynamic_types, my_statistics = statistics]() mutable { @@ -1089,7 +1105,7 @@ ColumnPtr ColumnObject::compress() const decompressed_dynamic_paths[path] = column->decompress(); auto decompressed_shared_data = my_compressed_shared_data->decompress(); - return ColumnObject::create(decompressed_typed_paths, decompressed_dynamic_paths, decompressed_shared_data, my_max_dynamic_paths, my_max_dynamic_types, my_statistics); + return ColumnObject::create(decompressed_typed_paths, decompressed_dynamic_paths, decompressed_shared_data, my_max_dynamic_paths, my_global_max_dynamic_paths, my_max_dynamic_types, my_statistics); }; return ColumnCompressed::create(size(), byte_size, decompress); @@ -1268,14 +1284,35 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou auto it = path_to_total_number_of_non_null_values.find(path); if (it == path_to_total_number_of_non_null_values.end()) it = path_to_total_number_of_non_null_values.emplace(path, 0).first; - auto statistics_it = source_statistics.data.find(path); - size_t size = statistics_it == source_statistics.data.end() ? (column_ptr->size() - column_ptr->getNumberOfDefaultRows()) : statistics_it->second; + size_t size = column_ptr->size() - column_ptr->getNumberOfDefaultRows(); + if (source_statistics) + { + auto statistics_it = source_statistics->dynamic_paths_statistics.find(path); + if (statistics_it != source_statistics->dynamic_paths_statistics.end()) + size = statistics_it->second; + } it->second += size; } + + /// Add paths from shared data statistics. It can helo extracting frequent paths + /// from shared data to dynamic paths. + if (source_statistics) + { + for (const auto & [path, size] : source_statistics->shared_data_paths_statistics) + { + auto it = path_to_total_number_of_non_null_values.find(path); + if (it == path_to_total_number_of_non_null_values.end()) + it = path_to_total_number_of_non_null_values.emplace(path, 0).first; + it->second += size; + } + } } + /// Reset current state. dynamic_paths.clear(); dynamic_paths_ptrs.clear(); + max_dynamic_paths = global_max_dynamic_paths; + Statistics new_statistics(Statistics::Source::MERGE); /// Check if the number of all dynamic paths exceeds the limit. if (path_to_total_number_of_non_null_values.size() > max_dynamic_paths) @@ -1288,10 +1325,18 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou std::sort(paths_with_sizes.begin(), paths_with_sizes.end(), std::greater()); /// Fill dynamic_paths with first max_dynamic_paths paths in sorted list. - for (size_t i = 0; i != max_dynamic_paths; ++i) + for (const auto & [size, path] : paths_with_sizes) { - dynamic_paths.emplace(paths_with_sizes[i].second, ColumnDynamic::create(max_dynamic_types)); - dynamic_paths_ptrs.emplace(paths_with_sizes[i].second, assert_cast(dynamic_paths.find(paths_with_sizes[i].second)->second.get())); + if (dynamic_paths.size() < max_dynamic_paths) + { + dynamic_paths.emplace(path, ColumnDynamic::create(max_dynamic_types)); + dynamic_paths_ptrs.emplace(path, assert_cast(dynamic_paths.find(path)->second.get())); + } + /// Add all remaining paths into shared data statistics until we reach its max size; + else if (new_statistics.shared_data_paths_statistics.size() < Statistics::MAX_SHARED_DATA_STATISTICS_SIZE) + { + new_statistics.shared_data_paths_statistics.emplace(path, size); + } } } /// Use all dynamic paths from all source columns. @@ -1305,10 +1350,13 @@ void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & sou } /// Fill statistics for the merged part. - statistics.data.clear(); - statistics.source = Statistics::Source::MERGE; for (const auto & [path, _] : dynamic_paths) - statistics.data[path] = path_to_total_number_of_non_null_values[path]; + new_statistics.dynamic_paths_statistics[path] = path_to_total_number_of_non_null_values[path]; + statistics = std::make_shared(std::move(new_statistics)); + + /// Set max_dynamic_paths to the number of selected dynamic paths. + /// It's needed to avoid adding new unexpected dynamic paths during inserts into this column during merge. + max_dynamic_paths = dynamic_paths.size(); /// Now we have the resulting set of dynamic paths that will be used in all merged columns. /// As we use Dynamic column for dynamic paths, we should call takeDynamicStructureFromSourceColumns diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index 38c71e94e12..8412a50281a 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -26,12 +26,21 @@ public: MERGE, /// Statistics were calculated during merge of several MergeTree parts. }; + explicit Statistics(Source source_) : source(source_) {} + /// Source of the statistics. Source source; - /// Statistics data: (path) -> (total number of not-null values). - std::unordered_map data; + /// Statistics for dynamic paths: (path) -> (total number of not-null values). + std::unordered_map dynamic_paths_statistics; + /// Statistics for paths in shared data: path) -> (total number of not-null values). + /// We don't store statistics for all paths in shared data but only for some subset of them + /// (is 10000 a good limit? It should not be expensive to store 10000 paths per part) + static const size_t MAX_SHARED_DATA_STATISTICS_SIZE = 10000; + std::unordered_map shared_data_paths_statistics; }; + using StatisticsPtr = std::shared_ptr; + private: friend class COWHelper, ColumnObject>; @@ -41,8 +50,9 @@ private: std::unordered_map dynamic_paths_, MutableColumnPtr shared_data_, size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, size_t max_dynamic_types_, - const Statistics & statistics_ = {}); + const StatisticsPtr & statistics_ = {}); /// Use StringHashForHeterogeneousLookup hash for hash maps to be able to use std::string_view in find() method. using PathToColumnMap = std::unordered_map; @@ -58,16 +68,18 @@ public: const std::unordered_map & dynamic_paths_, const ColumnPtr & shared_data_, size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, size_t max_dynamic_types_, - const Statistics & statistics_ = {}); + const StatisticsPtr & statistics_ = {}); static MutablePtr create( std::unordered_map typed_paths_, std::unordered_map dynamic_paths_, MutableColumnPtr shared_data_, size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, size_t max_dynamic_types_, - const Statistics & statistics_ = {}); + const StatisticsPtr & statistics_ = {}); static MutablePtr create(std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_); @@ -171,7 +183,7 @@ public: const PathToDynamicColumnPtrMap & getDynamicPathsPtrs() const { return dynamic_paths_ptrs; } PathToDynamicColumnPtrMap & getDynamicPathsPtrs() { return dynamic_paths_ptrs; } - const Statistics & getStatistics() const { return statistics; } + const StatisticsPtr & getStatistics() const { return statistics; } const ColumnPtr & getSharedDataPtr() const { return shared_data; } ColumnPtr & getSharedDataPtr() { return shared_data; } @@ -199,6 +211,7 @@ public: size_t getMaxDynamicTypes() const { return max_dynamic_types; } size_t getMaxDynamicPaths() const { return max_dynamic_paths; } + size_t getGlobalMaxDynamicPaths() const { return global_max_dynamic_paths; } /// Try to add new dynamic path. Returns pointer to the new dynamic /// path column or nullptr if limit on dynamic paths is reached. @@ -207,7 +220,8 @@ public: void addNewDynamicPath(const std::string_view path); void setDynamicPaths(const std::vector & paths); - void setStatistics(const Statistics & statistics_) { statistics = statistics_; } + void setMaxDynamicPaths(size_t max_dynamic_paths_); + void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; } void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const std::string_view path, const IColumn & column, size_t n); void deserializeValueFromSharedData(const ColumnString * shared_data_values, size_t n, IColumn & column) const; @@ -238,12 +252,17 @@ private: WrappedPtr shared_data; /// Maximum number of dynamic paths. If this limit is reached, all new paths will be inserted into shared data. + /// This limit can be different for different instances of Object column. For example, we can decrease it + /// in takeDynamicStructureFromSourceColumns before merge. size_t max_dynamic_paths; + /// Global limit on number of dynamic paths for all column instances of this Object type. It's the limit specified + /// in the type definition (for example 'JSON(max_dynamic_paths=N)'). max_dynamic_paths is always not greater than this limit. + size_t global_max_dynamic_paths; /// Maximum number of dynamic types for each dynamic path. Used while creating Dynamic columns for new dynamic paths. size_t max_dynamic_types; - /// Statistics on the number of non-null values for each dynamic path in the MergeTree data part. - /// Calculated during merges or reading from MergeTree. Used to determine the set of dynamic paths for the merged part. - Statistics statistics; + /// Statistics on the number of non-null values for each dynamic path and for some shared data paths in the MergeTree data part. + /// Calculated during serializing of data part in MergeTree. Used to determine the set of dynamic paths for the merged part. + StatisticsPtr statistics; }; } diff --git a/src/DataTypes/DataTypesBinaryEncoding.h b/src/DataTypes/DataTypesBinaryEncoding.h index 5ac21471a29..cdfbfee1ccf 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.h +++ b/src/DataTypes/DataTypesBinaryEncoding.h @@ -8,59 +8,59 @@ namespace DB /** Binary encoding for ClickHouse data types: -|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ClickHouse data type | Binary encoding | -|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Nothing | 0x00 | -| UInt8 | 0x01 | -| UInt16 | 0x02 | -| UInt32 | 0x03 | -| UInt64 | 0x04 | -| UInt128 | 0x05 | -| UInt256 | 0x06 | -| Int8 | 0x07 | -| Int16 | 0x08 | -| Int32 | 0x09 | -| Int64 | 0x0A | -| Int128 | 0x0B | -| Int256 | 0x0C | -| Float32 | 0x0D | -| Float64 | 0x0E | -| Date | 0x0F | -| Date32 | 0x10 | -| DateTime | 0x11 | -| DateTime(time_zone) | 0x12 | -| DateTime64(P) | 0x13 | -| DateTime64(P, time_zone) | 0x14 | -| String | 0x15 | -| FixedString(N) | 0x16 | -| Enum8 | 0x17... | -| Enum16 | 0x18...> | -| Decimal32(P, S) | 0x19 | -| Decimal64(P, S) | 0x1A | -| Decimal128(P, S) | 0x1B | -| Decimal256(P, S) | 0x1C | -| UUID | 0x1D | -| Array(T) | 0x1E | -| Tuple(T1, ..., TN) | 0x1F... | -| Tuple(name1 T1, ..., nameN TN) | 0x20... | -| Set| 0x21 | -| Interval | 0x22 | -| Nullable(T) | 0x23 | -| Function | 0x24... | -| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25...... | -| LowCardinality(T) | 0x26 | -| Map(K, V) | 0x27 | -| IPv4 | 0x28 | -| IPv6 | 0x29 | -| Variant(T1, ..., TN) | 0x2A... | -| Dynamic(max_types=N) | 0x2B | -| Custom type (Ring, Polygon, etc) | 0x2C | -| Bool | 0x2D | -| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E...... | -| Nested(name1 T1, ..., nameN TN) | 0x2F... | -| JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp) | 0x30......... | -|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|---------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ClickHouse data type | Binary encoding | +|---------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Nothing | 0x00 | +| UInt8 | 0x01 | +| UInt16 | 0x02 | +| UInt32 | 0x03 | +| UInt64 | 0x04 | +| UInt128 | 0x05 | +| UInt256 | 0x06 | +| Int8 | 0x07 | +| Int16 | 0x08 | +| Int32 | 0x09 | +| Int64 | 0x0A | +| Int128 | 0x0B | +| Int256 | 0x0C | +| Float32 | 0x0D | +| Float64 | 0x0E | +| Date | 0x0F | +| Date32 | 0x10 | +| DateTime | 0x11 | +| DateTime(time_zone) | 0x12 | +| DateTime64(P) | 0x13 | +| DateTime64(P, time_zone) | 0x14 | +| String | 0x15 | +| FixedString(N) | 0x16 | +| Enum8 | 0x17... | +| Enum16 | 0x18...> | +| Decimal32(P, S) | 0x19 | +| Decimal64(P, S) | 0x1A | +| Decimal128(P, S) | 0x1B | +| Decimal256(P, S) | 0x1C | +| UUID | 0x1D | +| Array(T) | 0x1E | +| Tuple(T1, ..., TN) | 0x1F... | +| Tuple(name1 T1, ..., nameN TN) | 0x20... | +| Set| 0x21 | +| Interval | 0x22 | +| Nullable(T) | 0x23 | +| Function | 0x24... | +| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25...... | +| LowCardinality(T) | 0x26 | +| Map(K, V) | 0x27 | +| IPv4 | 0x28 | +| IPv6 | 0x29 | +| Variant(T1, ..., TN) | 0x2A... | +| Dynamic(max_types=N) | 0x2B | +| Custom type (Ring, Polygon, etc) | 0x2C | +| Bool | 0x2D | +| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E...... | +| Nested(name1 T1, ..., nameN TN) | 0x2F... | +| JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp) | 0x30......... | +|---------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| Interval kind binary encoding: |---------------|-----------------| diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 68d25eaeaff..a1580ffedd4 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -75,14 +75,20 @@ void SerializationObject::ObjectSerializationVersion::checkVersion(UInt64 versio struct SerializeBinaryBulkStateObject: public ISerialization::SerializeBinaryBulkState { SerializationObject::ObjectSerializationVersion serialization_version; + size_t max_dynamic_paths; std::vector sorted_dynamic_paths; std::unordered_map typed_path_states; std::unordered_map dynamic_path_states; ISerialization::SerializeBinaryBulkStatePtr shared_data_state; - /// Paths statistics. Map (dynamic path) -> (number of non-null values in this path). - ColumnObject::Statistics statistics = { .source = ColumnObject::Statistics::Source::READ, .data = {} }; + /// Paths statistics. + ColumnObject::Statistics statistics; + /// If true, statistics will be recalculated during serialization. + bool recalculate_statistics = false; - explicit SerializeBinaryBulkStateObject(UInt64 serialization_version_) : serialization_version(serialization_version_) {} + explicit SerializeBinaryBulkStateObject(UInt64 serialization_version_) + : serialization_version(serialization_version_), statistics(ColumnObject::Statistics::Source::READ) + { + } }; struct DeserializeBinaryBulkStateObject : public ISerialization::DeserializeBinaryBulkState @@ -190,8 +196,11 @@ void SerializationObject::serializeBinaryBulkStatePrefix( UInt64 serialization_version = ObjectSerializationVersion::Value::BASIC; writeBinaryLittleEndian(serialization_version, *stream); - /// Write all dynamic paths in sorted order. auto object_state = std::make_shared(serialization_version); + object_state->max_dynamic_paths = column_object.getMaxDynamicPaths(); + /// Write max_dynamic_paths parameter. + writeBinaryLittleEndian(object_state->max_dynamic_paths, *stream); + /// Write all dynamic paths in sorted order. object_state->sorted_dynamic_paths.reserve(dynamic_paths.size()); for (const auto & [path, _] : dynamic_paths) object_state->sorted_dynamic_paths.push_back(path); @@ -204,6 +213,7 @@ void SerializationObject::serializeBinaryBulkStatePrefix( if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::PREFIX) { const auto & statistics = column_object.getStatistics(); + /// First, write statistics for dynamic paths. for (const auto & path : object_state->sorted_dynamic_paths) { size_t number_of_non_null_values = 0; @@ -213,13 +223,52 @@ void SerializationObject::serializeBinaryBulkStatePrefix( /// - statistics read from the data part during deserialization of Object column (Statistics::Source::READ). /// We can rely only on statistics calculated during the merge, because column with statistics that was read /// during deserialization from some data part could be filtered/limited/transformed/etc and so the statistics can be outdated. - if (!statistics.data.empty() && statistics.source == ColumnObject::Statistics::Source::MERGE) - number_of_non_null_values = statistics.data.at(path); + if (statistics && statistics->source == ColumnObject::Statistics::Source::MERGE) + number_of_non_null_values = statistics->dynamic_paths_statistics.at(path); /// Otherwise we can use only path column from current object column. else number_of_non_null_values = (dynamic_paths.at(path)->size() - dynamic_paths.at(path)->getNumberOfDefaultRows()); writeVarUInt(number_of_non_null_values, *stream); } + + /// Second, write statistics for paths in shared data. + /// Check if we have statistics calculated during merge of some data parts (Statistics::Source::MERGE). + if (statistics && statistics->source == ColumnObject::Statistics::Source::MERGE) + { + writeVarUInt(statistics->shared_data_paths_statistics.size(), *stream); + for (const auto & [path, size] : statistics->shared_data_paths_statistics) + { + writeStringBinary(path, *stream); + writeVarUInt(size, *stream); + } + } + /// If we don't have statistics for shared data from merge, calculate it from the column. + else + { + std::unordered_map shared_data_paths_statistics; + const auto [shared_data_paths, _] = column_object.getSharedDataPathsAndValues(); + for (size_t i = 0; i != shared_data_paths->size(); ++i) + { + auto path = shared_data_paths->getDataAt(i).toView(); + if (auto it = shared_data_paths_statistics.find(path); it != shared_data_paths_statistics.end()) + ++it->second; + else if (shared_data_paths_statistics.size() < ColumnObject::Statistics::MAX_SHARED_DATA_STATISTICS_SIZE) + shared_data_paths_statistics.emplace(path, 1); + } + + writeVarUInt(shared_data_paths_statistics.size(), *stream); + for (const auto & [path, size] : shared_data_paths_statistics) + { + writeStringBinary(path, *stream); + writeVarUInt(size, *stream); + } + } + } + /// Otherwise statistics will be written in the suffix, in this case we will recalculate + /// statistics during serialization to make it more precise. + else + { + object_state->recalculate_statistics = true; } settings.path.push_back(Substream::ObjectData); @@ -304,6 +353,8 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeOb UInt64 serialization_version; readBinaryLittleEndian(serialization_version, *structure_stream); auto structure_state = std::make_shared(serialization_version); + /// Read max_dynamic_paths parameter. + readBinaryLittleEndian(structure_state->max_dynamic_paths, *structure_stream); /// Read the sorted list of dynamic paths. size_t dynamic_paths_size; readVarUInt(dynamic_paths_size, *structure_stream); @@ -319,8 +370,24 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeOb /// Read statistics if needed. if (settings.object_and_dynamic_read_statistics) { + ColumnObject::Statistics statistics(ColumnObject::Statistics::Source::READ); + statistics.dynamic_paths_statistics.reserve(structure_state->sorted_dynamic_paths.size()); + /// First, read dynamic paths statistics. for (const auto & path : structure_state->sorted_dynamic_paths) - readVarUInt(structure_state->statistics.data[path], *structure_stream); + readVarUInt(statistics.dynamic_paths_statistics[path], *structure_stream); + + /// Second, read shared data paths statistics. + size_t size; + readVarUInt(size, *structure_stream); + statistics.shared_data_paths_statistics.reserve(size); + String path; + for (size_t i = 0; i != size; ++i) + { + readStringBinary(path, *structure_stream); + readVarUInt(statistics.shared_data_paths_statistics[path], *structure_stream); + } + + structure_state->statistics = std::make_shared(std::move(statistics)); } state = std::move(structure_state); @@ -362,14 +429,37 @@ void SerializationObject::serializeBinaryBulkWithMultipleStreams( auto it = dynamic_paths.find(path); if (it == dynamic_paths.end()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Dynamic structure mismatch for Object column: dynamic path '{}' is not found in the column", path); - size_t number_of_non_null_values = 0; - dynamic_serialization_typed->serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants(*it->second, offset, limit, settings, object_state->dynamic_path_states[path], number_of_non_null_values); - object_state->statistics.data[path] += number_of_non_null_values; + if (object_state->recalculate_statistics) + { + size_t number_of_non_null_values = 0; + dynamic_serialization_typed->serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants(*it->second, offset, limit, settings, object_state->dynamic_path_states[path], number_of_non_null_values); + object_state->statistics.dynamic_paths_statistics[path] += number_of_non_null_values; + } + else + { + dynamic_serialization_typed->serializeBinaryBulkWithMultipleStreams(*it->second, offset, limit, settings, object_state->dynamic_path_states[path]); + } settings.path.pop_back(); } settings.path.push_back(Substream::ObjectSharedData); shared_data_serialization->serializeBinaryBulkWithMultipleStreams(*shared_data, offset, limit, settings, object_state->shared_data_state); + if (object_state->recalculate_statistics) + { + /// Calculate statistics for paths in shared data. + const auto [shared_data_paths, _] = column_object.getSharedDataPathsAndValues(); + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + size_t start = shared_data_offsets[offset - 1]; + size_t end = limit == 0 || offset + limit > shared_data_offsets.size() ? shared_data_paths->size() : shared_data_offsets[offset + limit - 1]; + for (size_t i = start; i != end; ++i) + { + auto path = shared_data_paths->getDataAt(i).toView(); + if (auto it = object_state->statistics.shared_data_paths_statistics.find(path); it != object_state->statistics.shared_data_paths_statistics.end()) + ++it->second; + else if (object_state->statistics.shared_data_paths_statistics.size() < ColumnObject::Statistics::MAX_SHARED_DATA_STATISTICS_SIZE) + object_state->statistics.shared_data_paths_statistics.emplace(path, 1); + } + } settings.path.pop_back(); settings.path.pop_back(); } @@ -388,8 +478,17 @@ void SerializationObject::serializeBinaryBulkStateSuffix( /// Write statistics in suffix if needed. if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::SUFFIX) { + /// First, write dynamic paths statistics. for (const auto & path : object_state->sorted_dynamic_paths) - writeVarUInt(object_state->statistics.data[path], *stream); + writeVarUInt(object_state->statistics.dynamic_paths_statistics[path], *stream); + + /// Second, write shared data paths statistics. + writeVarUInt(object_state->statistics.shared_data_paths_statistics.size(), *stream); + for (const auto & [path, size] : object_state->statistics.shared_data_paths_statistics) + { + writeStringBinary(path, *stream); + writeVarUInt(size, *stream); + } } settings.path.push_back(Substream::ObjectData); diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index faf15aa3260..62ff9849f45 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -82,10 +82,11 @@ private: struct DeserializeBinaryBulkStateObjectStructure : public ISerialization::DeserializeBinaryBulkState { ObjectSerializationVersion structure_version; + size_t max_dynamic_paths; std::vector sorted_dynamic_paths; std::unordered_set dynamic_paths; /// Paths statistics. Map (dynamic path) -> (number of non-null values in this path). - ColumnObject::Statistics statistics = {.source = ColumnObject::Statistics::Source::READ, .data = {}}; + ColumnObject::StatisticsPtr statistics; explicit DeserializeBinaryBulkStateObjectStructure(UInt64 structure_version_) : structure_version(structure_version_) {} }; diff --git a/src/Interpreters/Squashing.cpp b/src/Interpreters/Squashing.cpp index ad7a5a4c863..95b76c60063 100644 --- a/src/Interpreters/Squashing.cpp +++ b/src/Interpreters/Squashing.cpp @@ -146,7 +146,6 @@ Chunk Squashing::squash(std::vector && input_chunks, Chunk::ChunkInfoColl { /// We know all the data we will insert in advance and can make all necessary pre-allocations. mutable_columns[i]->prepareForSquashing(source_columns_list[i]); -// mutable_columns[i]->reserve(rows); for (auto & source_column : source_columns_list[i]) { auto column = std::move(source_column); diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 index 31ba25b7732..42f24e7f26f 100644 --- a/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 @@ -1,3 +1,4 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; Dynamic paths 100000 a 90000 b @@ -47,6 +48,26 @@ Shared data paths 60000 e 10000 g Dynamic paths +200000 f +100000 a +90000 b +40000 c +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +120000 c +100000 a +Shared data paths +90000 b +70000 d +60000 e +10000 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +Dynamic paths 100000 a 90000 b 80000 c @@ -94,3 +115,22 @@ Shared data paths 70000 d 60000 e 10000 g +Dynamic paths +200000 f +100000 a +90000 b +40000 c +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +120000 c +100000 a +Shared data paths +90000 b +70000 d +60000 e +10000 g diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 index 6ae9f438432..05b5d8d6095 100644 --- a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 @@ -7,6 +7,8 @@ drop table if exists test; {% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;', 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;'] -%} +select '{{ create_command }}'; + {{ create_command }} system stop merges test; @@ -54,6 +56,19 @@ select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by pat select 'Shared data paths'; select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system stop merges test; +insert into test select number, toJSONString(map('c', number)) from numbers(40000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + drop table test; {% endfor -%} diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 index 31ba25b7732..99257f1d6ce 100644 --- a/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 @@ -1,3 +1,4 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; Dynamic paths 100000 a 90000 b @@ -47,6 +48,26 @@ Shared data paths 60000 e 10000 g Dynamic paths +200000 f +100000 a +90000 b +40000 c +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +120000 c +100000 a +Shared data paths +90000 b +70000 d +60000 e +10000 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +Dynamic paths 100000 a 90000 b 80000 c @@ -94,3 +115,22 @@ Shared data paths 70000 d 60000 e 10000 g +Dynamic paths +200000 f +100000 a +90000 b +40000 c +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +120000 c +100000 a +Shared data paths +90000 b +70000 d +60000 e +10000 g diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 b/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 index aef36452bb8..d17d8a48537 100644 --- a/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 @@ -7,6 +7,8 @@ drop table if exists test; {% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;', 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;'] -%} +select '{{ create_command }}'; + {{ create_command }} system stop merges test; @@ -54,6 +56,19 @@ select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by pat select 'Shared data paths'; select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system stop merges test; +insert into test select number, toJSONString(map('c', number)) from numbers(40000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + drop table test; {% endfor -%} diff --git a/tests/queries/0_stateless/03222_json_squashing.reference b/tests/queries/0_stateless/03222_json_squashing.reference new file mode 100644 index 00000000000..d0c19d8239a --- /dev/null +++ b/tests/queries/0_stateless/03222_json_squashing.reference @@ -0,0 +1,102 @@ +All paths +a +b +c +d +Dynamic paths +a +b +c +d +Shared data paths +All paths +a +b +c +d +e +f +Dynamic paths +a +b +c +d +e +f +Shared data paths +All paths +a +b +c +d +Dynamic paths +c +d +Shared data paths +a +b +All paths +a +b +c +d +e +f +Dynamic paths +a +b +Shared data paths +c +d +e +f +All paths +a +b +c +d +e +Dynamic paths +a +e +Shared data paths +b +c +d +All paths +b +c +d +e +Dynamic paths +d +e +Shared data paths +b +c +All paths +b +c +d +e +f +g +Dynamic paths +b +c +Shared data paths +d +e +f +g +All paths +b +d +e +f +Dynamic paths +b +f +Shared data paths +d +e diff --git a/tests/queries/0_stateless/03222_json_squashing.sql b/tests/queries/0_stateless/03222_json_squashing.sql new file mode 100644 index 00000000000..b6bda7a702f --- /dev/null +++ b/tests/queries/0_stateless/03222_json_squashing.sql @@ -0,0 +1,82 @@ +-- Tags: long + +set allow_experimental_json_type = 1; +set max_block_size = 1000; + +drop table if exists test; + +create table test (json JSON) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON, number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON) from numbers(1000000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON, number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON, '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON) from numbers(1000000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +drop table test; +create table test (json JSON(max_dynamic_paths=2)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=2), number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2)) from numbers(1000000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON(max_dynamic_paths=2)) from numbers(1000000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42}'::JSON(max_dynamic_paths=2), number < 3000, '{"b" : "Hello", "c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43}'::JSON(max_dynamic_paths=2)) from numbers(1000000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +drop table test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8)) from numbers(1000000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00", "g" : "Hello2"}]}')::JSON(max_dynamic_paths=8)) from numbers(1000000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00"}]}')::JSON(max_dynamic_paths=8)) from numbers(1000000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +drop table test; \ No newline at end of file From 0b728e7547b7e1642b37cdc6721fd05a42abccce Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 13:56:08 +0000 Subject: [PATCH 2164/2361] Update docs --- .../data-types/data-types-binary-encoding.md | 103 +++++++++--------- 1 file changed, 52 insertions(+), 51 deletions(-) diff --git a/docs/en/sql-reference/data-types/data-types-binary-encoding.md b/docs/en/sql-reference/data-types/data-types-binary-encoding.md index bbd47d6f620..08fb664126a 100644 --- a/docs/en/sql-reference/data-types/data-types-binary-encoding.md +++ b/docs/en/sql-reference/data-types/data-types-binary-encoding.md @@ -12,58 +12,59 @@ This specification describes the binary format that can be used for binary encod The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information. `var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression. -| ClickHouse data type | Binary encoding | -|-----------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Nothing` | `0x00` | -| `UInt8` | `0x01` | -| `UInt16` | `0x02` | -| `UInt32` | `0x03` | -| `UInt64` | `0x04` | -| `UInt128` | `0x05` | -| `UInt256` | `0x06` | -| `Int8` | `0x07` | -| `Int16` | `0x08` | -| `Int32` | `0x09` | -| `Int64` | `0x0A` | -| `Int128` | `0x0B` | -| `Int256` | `0x0C` | -| `Float32` | `0x0D` | -| `Float64` | `0x0E` | -| `Date` | `0x0F` | -| `Date32` | `0x10` | -| `DateTime` | `0x11` | -| `DateTime(time_zone)` | `0x12` | -| `DateTime64(P)` | `0x13` | -| `DateTime64(P, time_zone)` | `0x14` | -| `String` | `0x15` | -| `FixedString(N)` | `0x16` | -| `Enum8` | `0x17...` | -| `Enum16` | `0x18...>` | -| `Decimal32(P, S)` | `0x19` | -| `Decimal64(P, S)` | `0x1A` | -| `Decimal128(P, S)` | `0x1B` | -| `Decimal256(P, S)` | `0x1C` | -| `UUID` | `0x1D` | -| `Array(T)` | `0x1E` | -| `Tuple(T1, ..., TN)` | `0x1F...` | -| `Tuple(name1 T1, ..., nameN TN)` | `0x20...` | -| `Set` | `0x21` | -| `Interval` | `0x22` (see [interval kind binary encoding](#interval-kind-binary-encoding)) | -| `Nullable(T)` | `0x23` | -| `Function` | `0x24...` | -| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | -| `LowCardinality(T)` | `0x26` | -| `Map(K, V)` | `0x27` | -| `IPv4` | `0x28` | -| `IPv6` | `0x29` | -| `Variant(T1, ..., TN)` | `0x2A...` | -| `Dynamic(max_types=N)` | `0x2B` | -| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C` | -| `Bool` | `0x2D` | -| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | -| `Nested(name1 T1, ..., nameN TN)` | `0x2F...` | -| `JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp)` | `0x30.........` | +| ClickHouse data type | Binary encoding | +|-----------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Nothing` | `0x00` | +| `UInt8` | `0x01` | +| `UInt16` | `0x02` | +| `UInt32` | `0x03` | +| `UInt64` | `0x04` | +| `UInt128` | `0x05` | +| `UInt256` | `0x06` | +| `Int8` | `0x07` | +| `Int16` | `0x08` | +| `Int32` | `0x09` | +| `Int64` | `0x0A` | +| `Int128` | `0x0B` | +| `Int256` | `0x0C` | +| `Float32` | `0x0D` | +| `Float64` | `0x0E` | +| `Date` | `0x0F` | +| `Date32` | `0x10` | +| `DateTime` | `0x11` | +| `DateTime(time_zone)` | `0x12` | +| `DateTime64(P)` | `0x13` | +| `DateTime64(P, time_zone)` | `0x14` | +| `String` | `0x15` | +| `FixedString(N)` | `0x16` | +| `Enum8` | `0x17...` | +| `Enum16` | `0x18...>` | +| `Decimal32(P, S)` | `0x19` | +| `Decimal64(P, S)` | `0x1A` | +| `Decimal128(P, S)` | `0x1B` | +| `Decimal256(P, S)` | `0x1C` | +| `UUID` | `0x1D` | +| `Array(T)` | `0x1E` | +| `Tuple(T1, ..., TN)` | `0x1F...` | +| `Tuple(name1 T1, ..., nameN TN)` | `0x20...` | +| `Set` | `0x21` | +| `Interval` | `0x22` (see [interval kind binary encoding](#interval-kind-binary-encoding)) | +| `Nullable(T)` | `0x23` | +| `Function` | `0x24...` | +| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | +| `LowCardinality(T)` | `0x26` | +| `Map(K, V)` | `0x27` | +| `IPv4` | `0x28` | +| `IPv6` | `0x29` | +| `Variant(T1, ..., TN)` | `0x2A...` | +| `Dynamic(max_types=N)` | `0x2B` | +| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C` | +| `Bool` | `0x2D` | +| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | +| `Nested(name1 T1, ..., nameN TN)` | `0x2F...` | +| `JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp)` | `0x30.........` | +For type `JSON` byte `uint8_serialization_version` indicates the version of the serialization. Right now the version is always 0 but can change in future if new arguments will be introduced for `JSON` type. ### Interval kind binary encoding From b5f7875f574b640dc00d42dea5e721058e9a10fc Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 13 Aug 2024 14:04:04 +0000 Subject: [PATCH 2165/2361] Remove odd new line --- tests/queries/0_stateless/00652_mergetree_mutations.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/00652_mergetree_mutations.sh b/tests/queries/0_stateless/00652_mergetree_mutations.sh index 3b0966dd2c3..6be0ebf882f 100755 --- a/tests/queries/0_stateless/00652_mergetree_mutations.sh +++ b/tests/queries/0_stateless/00652_mergetree_mutations.sh @@ -89,4 +89,3 @@ done ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, command, is_done FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' and table = 'mutations_cleaner' ORDER BY mutation_id" ${CLICKHOUSE_CLIENT} --query="DROP TABLE mutations_cleaner" - From 79e055783931bd544897ec23eb07b7c7c9b09a69 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Tue, 13 Aug 2024 14:18:48 +0000 Subject: [PATCH 2166/2361] Write a script to automatically update the dict --- tests/fuzz/README.md | 23 - tests/fuzz/all.dict | 2377 +++++++------ tests/fuzz/dictionaries/datatypes.dict | 4418 +----------------------- tests/fuzz/dictionaries/functions.dict | 4283 ----------------------- tests/fuzz/update_dict.sh | 20 + 5 files changed, 1389 insertions(+), 9732 deletions(-) delete mode 100644 tests/fuzz/README.md create mode 100755 tests/fuzz/update_dict.sh diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md deleted file mode 100644 index 576ad66ed93..00000000000 --- a/tests/fuzz/README.md +++ /dev/null @@ -1,23 +0,0 @@ -The list of functions generated via the following query - -``` - clickhouse client -q "SELECT * FROM (SELECT DISTINCT concat('\"', name, '\"') as res FROM system.functions ORDER BY name UNION ALL SELECT concat('\"', a.name, b.name, '\"') as res FROM system.functions as a CROSS JOIN system.aggregate_function_combinators as b WHERE a.is_aggregate = 1) ORDER BY res" > functions.dict -``` - -The list of datatypes generated via the following query: - -``` - clickhouse client -q "SELECT DISTINCT concat('\"', name, '\"') as res FROM system.data_type_families ORDER BY name" > datatypes.dict -``` - -The list of keywords generated via the following query: - -``` - clickhouse client -q "SELECT DISTINCT concat('\"', keyword, '\"') as res FROM system.keywords ORDER BY keyword" > keywords.dict -``` - -Then merge all dictionaries into one (all.dict) - -``` - cat ./dictionaries/* | LC_ALL=C sort | uniq > all.dict -``` \ No newline at end of file diff --git a/tests/fuzz/all.dict b/tests/fuzz/all.dict index 1c3c657d6b0..30af3746fca 100644 --- a/tests/fuzz/all.dict +++ b/tests/fuzz/all.dict @@ -1,15 +1,971 @@ -"abs" -"accurateCast" -"accurateCastOrDefault" -"accurateCastOrNull" -"acos" -"acosh" "ADD COLUMN" "ADD CONSTRAINT" "ADD INDEX" "ADD PROJECTION" "ADD STATISTICS" "ADD" +"ADMIN OPTION FOR" +"AFTER" +"ALGORITHM" +"ALIAS" +"ALL" +"ALLOWED_LATENESS" +"ALTER COLUMN" +"ALTER DATABASE" +"ALTER LIVE VIEW" +"ALTER POLICY" +"ALTER PROFILE" +"ALTER QUOTA" +"ALTER ROLE" +"ALTER ROW POLICY" +"ALTER SETTINGS PROFILE" +"ALTER TABLE" +"ALTER TEMPORARY TABLE" +"ALTER USER" +"ALTER" +"AND STDOUT" +"AND" +"ANTI" +"ANY" +"APPEND" +"APPLY DELETED MASK" +"APPLY" +"ARRAY JOIN" +"AS" +"ASC" +"ASCENDING" +"ASOF" +"ASSUME" +"AST" +"ASYNC" +"ATTACH PART" +"ATTACH PARTITION" +"ATTACH POLICY" +"ATTACH PROFILE" +"ATTACH QUOTA" +"ATTACH ROLE" +"ATTACH ROW POLICY" +"ATTACH SETTINGS PROFILE" +"ATTACH USER" +"ATTACH" +"AUTO_INCREMENT" +"AZURE" +"AggregateFunction" +"Array" +"BACKUP" +"BCRYPT_HASH" +"BCRYPT_PASSWORD" +"BEGIN TRANSACTION" +"BETWEEN" +"BIDIRECTIONAL" +"BIGINT SIGNED" +"BIGINT UNSIGNED" +"BIGINT" +"BINARY LARGE OBJECT" +"BINARY VARYING" +"BINARY" +"BIT" +"BIT_AND" +"BIT_ANDArgMax" +"BIT_ANDArgMin" +"BIT_ANDArray" +"BIT_ANDDistinct" +"BIT_ANDForEach" +"BIT_ANDIf" +"BIT_ANDMap" +"BIT_ANDMerge" +"BIT_ANDNull" +"BIT_ANDOrDefault" +"BIT_ANDOrNull" +"BIT_ANDResample" +"BIT_ANDSimpleState" +"BIT_ANDState" +"BIT_OR" +"BIT_ORArgMax" +"BIT_ORArgMin" +"BIT_ORArray" +"BIT_ORDistinct" +"BIT_ORForEach" +"BIT_ORIf" +"BIT_ORMap" +"BIT_ORMerge" +"BIT_ORNull" +"BIT_OROrDefault" +"BIT_OROrNull" +"BIT_ORResample" +"BIT_ORSimpleState" +"BIT_ORState" +"BIT_XOR" +"BIT_XORArgMax" +"BIT_XORArgMin" +"BIT_XORArray" +"BIT_XORDistinct" +"BIT_XORForEach" +"BIT_XORIf" +"BIT_XORMap" +"BIT_XORMerge" +"BIT_XORNull" +"BIT_XOROrDefault" +"BIT_XOROrNull" +"BIT_XORResample" +"BIT_XORSimpleState" +"BIT_XORState" +"BLAKE3" +"BLOB" +"BOTH" +"BY" +"BYTE" +"BYTEA" +"Bool" +"CASCADE" +"CASE" +"CAST" +"CHANGE" +"CHANGEABLE_IN_READONLY" +"CHANGED" +"CHAR LARGE OBJECT" +"CHAR VARYING" +"CHAR" +"CHARACTER LARGE OBJECT" +"CHARACTER VARYING" +"CHARACTER" +"CHARACTER_LENGTH" +"CHAR_LENGTH" +"CHECK ALL TABLES" +"CHECK TABLE" +"CHECK" +"CLEANUP" +"CLEAR COLUMN" +"CLEAR INDEX" +"CLEAR PROJECTION" +"CLEAR STATISTICS" +"CLOB" +"CLUSTER" +"CLUSTERS" +"CN" +"CODEC" +"COLLATE" +"COLUMN" +"COLUMNS" +"COMMENT COLUMN" +"COMMENT" +"COMMIT" +"COMPRESSION" +"CONST" +"CONSTRAINT" +"COVAR_POP" +"COVAR_POPArgMax" +"COVAR_POPArgMin" +"COVAR_POPArray" +"COVAR_POPDistinct" +"COVAR_POPForEach" +"COVAR_POPIf" +"COVAR_POPMap" +"COVAR_POPMerge" +"COVAR_POPNull" +"COVAR_POPOrDefault" +"COVAR_POPOrNull" +"COVAR_POPResample" +"COVAR_POPSimpleState" +"COVAR_POPState" +"COVAR_SAMP" +"COVAR_SAMPArgMax" +"COVAR_SAMPArgMin" +"COVAR_SAMPArray" +"COVAR_SAMPDistinct" +"COVAR_SAMPForEach" +"COVAR_SAMPIf" +"COVAR_SAMPMap" +"COVAR_SAMPMerge" +"COVAR_SAMPNull" +"COVAR_SAMPOrDefault" +"COVAR_SAMPOrNull" +"COVAR_SAMPResample" +"COVAR_SAMPSimpleState" +"COVAR_SAMPState" +"CRC32" +"CRC32IEEE" +"CRC64" +"CREATE POLICY" +"CREATE PROFILE" +"CREATE QUOTA" +"CREATE ROLE" +"CREATE ROW POLICY" +"CREATE SETTINGS PROFILE" +"CREATE TABLE" +"CREATE TEMPORARY TABLE" +"CREATE USER" +"CREATE" +"CROSS" +"CUBE" +"CURRENT GRANTS" +"CURRENT QUOTA" +"CURRENT ROLES" +"CURRENT ROW" +"CURRENT TRANSACTION" +"CURRENTUSER" +"CURRENT_USER" +"D" +"DATA INNER UUID" +"DATA" +"DATABASE" +"DATABASES" +"DATE" +"DATEADD" +"DATEDIFF" +"DATESUB" +"DATE_ADD" +"DATE_DIFF" +"DATE_FORMAT" +"DATE_SUB" +"DATE_TRUNC" +"DAY" +"DAYOFMONTH" +"DAYOFWEEK" +"DAYOFYEAR" +"DAYS" +"DD" +"DEC" +"DEDUPLICATE" +"DEFAULT DATABASE" +"DEFAULT ROLE" +"DEFAULT" +"DEFINER" +"DELETE WHERE" +"DELETE" +"DEPENDS ON" +"DESC" +"DESCENDING" +"DESCRIBE" +"DETACH PART" +"DETACH PARTITION" +"DETACH" +"DICTIONARIES" +"DICTIONARY" +"DISK" +"DISTINCT ON" +"DISTINCT" +"DIV" +"DOUBLE PRECISION" +"DOUBLE" +"DOUBLE_SHA1_HASH" +"DOUBLE_SHA1_PASSWORD" +"DROP COLUMN" +"DROP CONSTRAINT" +"DROP DEFAULT" +"DROP DETACHED PART" +"DROP DETACHED PARTITION" +"DROP INDEX" +"DROP PART" +"DROP PARTITION" +"DROP PROJECTION" +"DROP STATISTICS" +"DROP TABLE" +"DROP TEMPORARY TABLE" +"DROP" +"Date" +"Date32" +"DateTime" +"DateTime32" +"DateTime64" +"Decimal" +"Decimal128" +"Decimal256" +"Decimal32" +"Decimal64" +"Dynamic" +"ELSE" +"EMPTY AS" +"EMPTY" +"ENABLED ROLES" +"END" +"ENFORCED" +"ENGINE" +"ENUM" +"EPHEMERAL SEQUENTIAL" +"EPHEMERAL" +"ESTIMATE" +"EVENT" +"EVENTS" +"EVERY" +"EXCEPT DATABASE" +"EXCEPT DATABASES" +"EXCEPT TABLE" +"EXCEPT TABLES" +"EXCEPT" +"EXCHANGE DICTIONARIES" +"EXCHANGE TABLES" +"EXISTS" +"EXPLAIN" +"EXPRESSION" +"EXTENDED" +"EXTERNAL DDL FROM" +"EXTRACT" +"Enum" +"Enum16" +"Enum8" +"FALSE" +"FETCH PART" +"FETCH PARTITION" +"FETCH" +"FIELDS" +"FILE" +"FILESYSTEM CACHE" +"FILESYSTEM CACHES" +"FILTER" +"FINAL" +"FIRST" +"FIXED" +"FLOAT" +"FOLLOWING" +"FOR" +"FOREIGN KEY" +"FOREIGN" +"FORGET PARTITION" +"FORMAT" +"FORMAT_BYTES" +"FQDN" +"FREEZE" +"FROM INFILE" +"FROM SHARD" +"FROM" +"FROM_BASE64" +"FROM_DAYS" +"FROM_UNIXTIME" +"FULL" +"FULLTEXT" +"FUNCTION" +"FixedString" +"Float32" +"Float64" +"ForEach" +"GEOMETRY" +"GLOBAL IN" +"GLOBAL NOT IN" +"GLOBAL" +"GRANT OPTION FOR" +"GRANT" +"GRANTEES" +"GRANULARITY" +"GROUP BY" +"GROUPING SETS" +"GROUPS" +"H" +"HASH" +"HAVING" +"HDFS" +"HH" +"HIERARCHICAL" +"HOST" +"HOUR" +"HOURS" +"HTTP" +"ID" +"IDENTIFIED" +"IF EMPTY" +"IF EXISTS" +"IF NOT EXISTS" +"IGNORE NULLS" +"ILIKE" +"IN PARTITION" +"IN" +"INDEX" +"INDEXES" +"INDICES" +"INET4" +"INET6" +"INET6_ATON" +"INET6_NTOA" +"INET_ATON" +"INET_NTOA" +"INHERIT" +"INJECTIVE" +"INNER" +"INSERT INTO" +"INT SIGNED" +"INT UNSIGNED" +"INT" +"INT1 SIGNED" +"INT1 UNSIGNED" +"INT1" +"INTEGER SIGNED" +"INTEGER UNSIGNED" +"INTEGER" +"INTERPOLATE" +"INTERSECT" +"INTERVAL" +"INTO OUTFILE" +"INVISIBLE" +"INVOKER" +"IP" +"IPv4" +"IPv4CIDRToRange" +"IPv4NumToString" +"IPv4NumToStringClassC" +"IPv4StringToNum" +"IPv4StringToNumOrDefault" +"IPv4StringToNumOrNull" +"IPv4ToIPv6" +"IPv6" +"IPv6CIDRToRange" +"IPv6NumToString" +"IPv6StringToNum" +"IPv6StringToNumOrDefault" +"IPv6StringToNumOrNull" +"IS NOT DISTINCT FROM" +"IS NOT NULL" +"IS NULL" +"IS_OBJECT_ID" +"Int128" +"Int16" +"Int256" +"Int32" +"Int64" +"Int8" +"IntervalDay" +"IntervalHour" +"IntervalMicrosecond" +"IntervalMillisecond" +"IntervalMinute" +"IntervalMonth" +"IntervalNanosecond" +"IntervalQuarter" +"IntervalSecond" +"IntervalWeek" +"IntervalYear" +"JOIN" +"JSON" +"JSONArrayLength" +"JSONExtract" +"JSONExtractArrayRaw" +"JSONExtractBool" +"JSONExtractFloat" +"JSONExtractInt" +"JSONExtractKeys" +"JSONExtractKeysAndValues" +"JSONExtractKeysAndValuesRaw" +"JSONExtractRaw" +"JSONExtractString" +"JSONExtractUInt" +"JSONHas" +"JSONKey" +"JSONLength" +"JSONMergePatch" +"JSONType" +"JSON_ARRAY_LENGTH" +"JSON_EXISTS" +"JSON_QUERY" +"JSON_VALUE" +"JWT" +"KERBEROS" +"KEY BY" +"KEY" +"KEYED BY" +"KEYS" +"KILL" +"KIND" +"L1Distance" +"L1Norm" +"L1Normalize" +"L2Distance" +"L2Norm" +"L2Normalize" +"L2SquaredDistance" +"L2SquaredNorm" +"LARGE OBJECT" +"LAST" +"LAST_DAY" +"LAYOUT" +"LDAP" +"LEADING" +"LEFT ARRAY JOIN" +"LEFT" +"LESS THAN" +"LEVEL" +"LIFETIME" +"LIGHTWEIGHT" +"LIKE" +"LIMIT" +"LINEAR" +"LIST" +"LIVE" +"LOCAL" +"LONGBLOB" +"LONGTEXT" +"LTRIM" +"LineString" +"LinfDistance" +"LinfNorm" +"LinfNormalize" +"LowCardinality" +"LpDistance" +"LpNorm" +"LpNormalize" +"M" +"MACNumToString" +"MACStringToNum" +"MACStringToOUI" +"MAP_FROM_ARRAYS" +"MATCH" +"MATERIALIZE COLUMN" +"MATERIALIZE INDEX" +"MATERIALIZE PROJECTION" +"MATERIALIZE STATISTICS" +"MATERIALIZE TTL" +"MATERIALIZE" +"MATERIALIZED" +"MAX" +"MCS" +"MD4" +"MD5" +"MEDIUMBLOB" +"MEDIUMINT SIGNED" +"MEDIUMINT UNSIGNED" +"MEDIUMINT" +"MEDIUMTEXT" +"MEMORY" +"MERGES" +"METRICS INNER UUID" +"METRICS" +"MI" +"MICROSECOND" +"MICROSECONDS" +"MILLISECOND" +"MILLISECONDS" +"MIN" +"MINUTE" +"MINUTES" +"MM" +"MOD" +"MODIFY COLUMN" +"MODIFY COMMENT" +"MODIFY DEFINER" +"MODIFY ORDER BY" +"MODIFY QUERY" +"MODIFY REFRESH" +"MODIFY SAMPLE BY" +"MODIFY SETTING" +"MODIFY SQL SECURITY" +"MODIFY STATISTICS" +"MODIFY TTL" +"MODIFY" +"MONTH" +"MONTHS" +"MOVE PART" +"MOVE PARTITION" +"MOVE" +"MS" +"MUTATION" +"Map" +"Merge" +"MultiLineString" +"MultiPolygon" +"N" +"NAME" +"NAMED COLLECTION" +"NANOSECOND" +"NANOSECONDS" +"NATIONAL CHAR VARYING" +"NATIONAL CHAR" +"NATIONAL CHARACTER LARGE OBJECT" +"NATIONAL CHARACTER VARYING" +"NATIONAL CHARACTER" +"NCHAR LARGE OBJECT" +"NCHAR VARYING" +"NCHAR" +"NEXT" +"NO ACTION" +"NO DELAY" +"NO LIMITS" +"NONE" +"NOT BETWEEN" +"NOT IDENTIFIED" +"NOT ILIKE" +"NOT IN" +"NOT KEYED" +"NOT LIKE" +"NOT OVERRIDABLE" +"NOT" +"NO_PASSWORD" +"NS" +"NULL" +"NULLS" +"NUMERIC" +"NVARCHAR" +"Nested" +"Nothing" +"Null" +"Nullable" +"OCTET_LENGTH" +"OFFSET" +"ON DELETE" +"ON UPDATE" +"ON VOLUME" +"ON" +"ONLY" +"OPTIMIZE TABLE" +"OR REPLACE" +"OR" +"ORDER BY" +"OUTER" +"OVER" +"OVERRIDABLE" +"Object" +"PART" +"PARTIAL" +"PARTITION BY" +"PARTITION" +"PARTITIONS" +"PART_MOVE_TO_SHARD" +"PASTE" +"PERIODIC REFRESH" +"PERMANENTLY" +"PERMISSIVE" +"PERSISTENT SEQUENTIAL" +"PERSISTENT" +"PIPELINE" +"PLAINTEXT_PASSWORD" +"PLAN" +"POPULATE" +"PRECEDING" +"PRECISION" +"PREWHERE" +"PRIMARY KEY" +"PRIMARY" +"PROFILE" +"PROJECTION" +"PULL" +"Point" +"Polygon" +"Protobuf" +"Q" +"QQ" +"QUALIFY" +"QUARTER" +"QUARTERS" +"QUERY TREE" +"QUERY" +"QUOTA" +"RANDOMIZE FOR" +"RANDOMIZED" +"RANGE" +"READONLY" +"REAL" +"REALM" +"RECOMPRESS" +"RECURSIVE" +"REFERENCES" +"REFRESH" +"REGEXP" +"REGEXP_EXTRACT" +"REGEXP_MATCHES" +"REGEXP_REPLACE" +"REMOVE SAMPLE BY" +"REMOVE TTL" +"REMOVE" +"RENAME COLUMN" +"RENAME DATABASE" +"RENAME DICTIONARY" +"RENAME TABLE" +"RENAME TO" +"RENAME" +"REPLACE PARTITION" +"REPLACE" +"RESET SETTING" +"RESPECT NULLS" +"RESTORE" +"RESTRICT" +"RESTRICTIVE" +"RESUME" +"REVOKE" +"RIGHT" +"ROLLBACK" +"ROLLUP" +"ROW" +"ROWS" +"RTRIM" +"Resample" +"Ring" +"S" +"S3" +"SALT" +"SAMPLE BY" +"SAMPLE" +"SAN" +"SCHEMA" +"SCHEME" +"SECOND" +"SECONDS" +"SELECT" +"SEMI" +"SERVER" +"SET DEFAULT ROLE" +"SET DEFAULT" +"SET FAKE TIME" +"SET NULL" +"SET ROLE DEFAULT" +"SET ROLE" +"SET TRANSACTION SNAPSHOT" +"SET" +"SETTINGS" +"SHA1" +"SHA224" +"SHA256" +"SHA256_HASH" +"SHA256_PASSWORD" +"SHA384" +"SHA512" +"SHA512_256" +"SHOW ACCESS" +"SHOW CREATE" +"SHOW ENGINES" +"SHOW FUNCTIONS" +"SHOW GRANTS" +"SHOW PRIVILEGES" +"SHOW PROCESSLIST" +"SHOW SETTING" +"SHOW" +"SIGNED" +"SIMPLE" +"SINGLE" +"SMALLINT SIGNED" +"SMALLINT UNSIGNED" +"SMALLINT" +"SOURCE" +"SPATIAL" +"SQL SECURITY" +"SQL_TSI_DAY" +"SQL_TSI_HOUR" +"SQL_TSI_MICROSECOND" +"SQL_TSI_MILLISECOND" +"SQL_TSI_MINUTE" +"SQL_TSI_MONTH" +"SQL_TSI_NANOSECOND" +"SQL_TSI_QUARTER" +"SQL_TSI_SECOND" +"SQL_TSI_WEEK" +"SQL_TSI_YEAR" +"SS" +"SSH_KEY" +"SSL_CERTIFICATE" +"START TRANSACTION" +"STATISTICS" +"STD" +"STDArgMax" +"STDArgMin" +"STDArray" +"STDDEV_POP" +"STDDEV_POPArgMax" +"STDDEV_POPArgMin" +"STDDEV_POPArray" +"STDDEV_POPDistinct" +"STDDEV_POPForEach" +"STDDEV_POPIf" +"STDDEV_POPMap" +"STDDEV_POPMerge" +"STDDEV_POPNull" +"STDDEV_POPOrDefault" +"STDDEV_POPOrNull" +"STDDEV_POPResample" +"STDDEV_POPSimpleState" +"STDDEV_POPState" +"STDDEV_SAMP" +"STDDEV_SAMPArgMax" +"STDDEV_SAMPArgMin" +"STDDEV_SAMPArray" +"STDDEV_SAMPDistinct" +"STDDEV_SAMPForEach" +"STDDEV_SAMPIf" +"STDDEV_SAMPMap" +"STDDEV_SAMPMerge" +"STDDEV_SAMPNull" +"STDDEV_SAMPOrDefault" +"STDDEV_SAMPOrNull" +"STDDEV_SAMPResample" +"STDDEV_SAMPSimpleState" +"STDDEV_SAMPState" +"STDDistinct" +"STDForEach" +"STDIf" +"STDMap" +"STDMerge" +"STDNull" +"STDOrDefault" +"STDOrNull" +"STDResample" +"STDSimpleState" +"STDState" +"STEP" +"STORAGE" +"STRICT" +"STRICTLY_ASCENDING" +"SUBPARTITION BY" +"SUBPARTITION" +"SUBPARTITIONS" +"SUBSTRING" +"SUBSTRING_INDEX" +"SUSPEND" +"SVG" +"SYNC" +"SYNTAX" +"SYSTEM" +"SimpleAggregateFunction" +"State" +"String" +"TABLE OVERRIDE" +"TABLE" +"TABLES" +"TAGS INNER UUID" +"TAGS" +"TEMPORARY TABLE" +"TEMPORARY" +"TEST" +"TEXT" +"THEN" +"TIME" +"TIMESTAMP" +"TIMESTAMPADD" +"TIMESTAMPDIFF" +"TIMESTAMPSUB" +"TIMESTAMP_ADD" +"TIMESTAMP_DIFF" +"TIMESTAMP_SUB" +"TINYBLOB" +"TINYINT SIGNED" +"TINYINT UNSIGNED" +"TINYINT" +"TINYTEXT" +"TO DISK" +"TO INNER UUID" +"TO SHARD" +"TO TABLE" +"TO VOLUME" +"TO" +"TOP" +"TOTALS" +"TO_BASE64" +"TO_DAYS" +"TO_UNIXTIME" +"TRACKING ONLY" +"TRAILING" +"TRANSACTION" +"TRIGGER" +"TRIM" +"TRUE" +"TRUNCATE" +"TTL" +"TYPE" +"TYPEOF" +"Tuple" +"UInt128" +"UInt16" +"UInt256" +"UInt32" +"UInt64" +"UInt8" +"ULIDStringToDateTime" +"UNBOUNDED" +"UNDROP" +"UNFREEZE" +"UNION" +"UNIQUE" +"UNSET FAKE TIME" +"UNSIGNED" +"UPDATE" +"URL" +"URLHash" +"URLHierarchy" +"URLPathHierarchy" +"USE" +"USING" +"UTCTimestamp" +"UTC_timestamp" +"UUID" +"UUIDNumToString" +"UUIDStringToNum" +"UUIDToNum" +"UUIDv7ToDateTime" +"VALID UNTIL" +"VALUES" +"VARBINARY" +"VARCHAR" +"VARCHAR2" +"VARYING" +"VAR_POP" +"VAR_POPArgMax" +"VAR_POPArgMin" +"VAR_POPArray" +"VAR_POPDistinct" +"VAR_POPForEach" +"VAR_POPIf" +"VAR_POPMap" +"VAR_POPMerge" +"VAR_POPNull" +"VAR_POPOrDefault" +"VAR_POPOrNull" +"VAR_POPResample" +"VAR_POPSimpleState" +"VAR_POPState" +"VAR_SAMP" +"VAR_SAMPArgMax" +"VAR_SAMPArgMin" +"VAR_SAMPArray" +"VAR_SAMPDistinct" +"VAR_SAMPForEach" +"VAR_SAMPIf" +"VAR_SAMPMap" +"VAR_SAMPMerge" +"VAR_SAMPNull" +"VAR_SAMPOrDefault" +"VAR_SAMPOrNull" +"VAR_SAMPResample" +"VAR_SAMPSimpleState" +"VAR_SAMPState" +"VIEW" +"VISIBLE" +"Variant" +"WATCH" +"WATERMARK" +"WEEK" +"WEEKS" +"WHEN" +"WHERE" +"WINDOW" +"WITH ADMIN OPTION" +"WITH CHECK" +"WITH FILL" +"WITH GRANT OPTION" +"WITH NAME" +"WITH REPLACE OPTION" +"WITH TIES" +"WITH" +"WITH_ITEMINDEX" +"WK" +"WRITABLE" +"WW" +"YEAR" +"YEARS" +"YY" +"YYYY" +"YYYYMMDDToDate" +"YYYYMMDDToDate32" +"YYYYMMDDhhmmssToDateTime" +"YYYYMMDDhhmmssToDateTime64" +"ZKPATH" +"_CAST" +"__actionName" +"__bitBoolMaskAnd" +"__bitBoolMaskOr" +"__bitSwapLastTwo" +"__bitWrapperFunc" +"__getScalar" +"__scalarSubqueryResult" +"abs" +"accurateCast" +"accurateCastOrDefault" +"accurateCastOrNull" +"acos" +"acosh" "addDate" "addDays" "addHours" @@ -20,19 +976,16 @@ "addMonths" "addNanoseconds" "addQuarters" -"addressToLine" -"addressToLineWithInlines" -"addressToSymbol" "addSeconds" "addTupleOfIntervals" "addWeeks" "addYears" -"ADMIN OPTION FOR" +"addressToLine" +"addressToLineWithInlines" +"addressToSymbol" "aes_decrypt_mysql" "aes_encrypt_mysql" -"AFTER" "age" -"AggregateFunction" "aggThrow" "aggThrowArgMax" "aggThrowArgMin" @@ -48,24 +1001,7 @@ "aggThrowResample" "aggThrowSimpleState" "aggThrowState" -"ALGORITHM" -"ALIAS" -"ALL" -"ALLOWED_LATENESS" "alphaTokens" -"ALTER COLUMN" -"ALTER DATABASE" -"ALTER LIVE VIEW" -"ALTER POLICY" -"ALTER PROFILE" -"ALTER QUOTA" -"ALTER ROLE" -"ALTER ROW POLICY" -"ALTER SETTINGS PROFILE" -"ALTER TABLE" -"ALTER TEMPORARY TABLE" -"ALTER USER" -"ALTER" "analysisOfVariance" "analysisOfVarianceArgMax" "analysisOfVarianceArgMin" @@ -81,8 +1017,6 @@ "analysisOfVarianceResample" "analysisOfVarianceSimpleState" "analysisOfVarianceState" -"AND STDOUT" -"AND" "and" "anova" "anovaArgMax" @@ -99,8 +1033,6 @@ "anovaResample" "anovaSimpleState" "anovaState" -"ANTI" -"ANY" "any" "anyArgMax" "anyArgMin" @@ -206,10 +1138,7 @@ "any_value_respect_nullsResample" "any_value_respect_nullsSimpleState" "any_value_respect_nullsState" -"APPEND" "appendTrailingCharIfAbsent" -"APPLY DELETED MASK" -"APPLY" "approx_top_count" "approx_top_countArgMax" "approx_top_countArgMin" @@ -285,11 +1214,9 @@ "argMinResample" "argMinSimpleState" "argMinState" -"ARRAY JOIN" -"Array" "array" -"arrayAll" "arrayAUC" +"arrayAll" "arrayAvg" "arrayCompact" "arrayConcat" @@ -382,31 +1309,13 @@ "array_concat_aggResample" "array_concat_aggSimpleState" "array_concat_aggState" -"AS" -"ASC" -"ASCENDING" "ascii" "asin" "asinh" -"ASOF" -"ASSUME" "assumeNotNull" -"AST" -"ASYNC" "atan" "atan2" "atanh" -"ATTACH PART" -"ATTACH PARTITION" -"ATTACH POLICY" -"ATTACH PROFILE" -"ATTACH QUOTA" -"ATTACH ROLE" -"ATTACH ROW POLICY" -"ATTACH SETTINGS PROFILE" -"ATTACH USER" -"ATTACH" -"AUTO_INCREMENT" "avg" "avgArgMax" "avgArgMin" @@ -437,8 +1346,6 @@ "avgWeightedResample" "avgWeightedSimpleState" "avgWeightedState" -"AZURE" -"BACKUP" "bagexpansion" "bar" "base58Decode" @@ -447,17 +1354,24 @@ "base64Encode" "base64URLDecode" "base64URLEncode" -"basename" "base_backup" -"BCRYPT_HASH" -"BCRYPT_PASSWORD" -"BEGIN TRANSACTION" -"BETWEEN" -"BIDIRECTIONAL" +"basename" "bin" "bitAnd" "bitCount" "bitHammingDistance" +"bitNot" +"bitOr" +"bitPositionsToArray" +"bitRotateLeft" +"bitRotateRight" +"bitShiftLeft" +"bitShiftRight" +"bitSlice" +"bitTest" +"bitTestAll" +"bitTestAny" +"bitXor" "bitmapAnd" "bitmapAndCardinality" "bitmapAndnot" @@ -479,68 +1393,11 @@ "bitmapXorCardinality" "bitmaskToArray" "bitmaskToList" -"bitNot" -"bitOr" -"bitPositionsToArray" -"bitRotateLeft" -"bitRotateRight" -"bitShiftLeft" -"bitShiftRight" -"bitSlice" -"bitTest" -"bitTestAll" -"bitTestAny" -"bitXor" -"BIT_AND" -"BIT_ANDArgMax" -"BIT_ANDArgMin" -"BIT_ANDArray" -"BIT_ANDDistinct" -"BIT_ANDForEach" -"BIT_ANDIf" -"BIT_ANDMap" -"BIT_ANDMerge" -"BIT_ANDNull" -"BIT_ANDOrDefault" -"BIT_ANDOrNull" -"BIT_ANDResample" -"BIT_ANDSimpleState" -"BIT_ANDState" -"BIT_OR" -"BIT_ORArgMax" -"BIT_ORArgMin" -"BIT_ORArray" -"BIT_ORDistinct" -"BIT_ORForEach" -"BIT_ORIf" -"BIT_ORMap" -"BIT_ORMerge" -"BIT_ORNull" -"BIT_OROrDefault" -"BIT_OROrNull" -"BIT_ORResample" -"BIT_ORSimpleState" -"BIT_ORState" -"BIT_XOR" -"BIT_XORArgMax" -"BIT_XORArgMin" -"BIT_XORArray" -"BIT_XORDistinct" -"BIT_XORForEach" -"BIT_XORIf" -"BIT_XORMap" -"BIT_XORMerge" -"BIT_XORNull" -"BIT_XOROrDefault" -"BIT_XOROrNull" -"BIT_XORResample" -"BIT_XORSimpleState" -"BIT_XORState" -"BLAKE3" "blockNumber" "blockSerializedSize" "blockSize" -"BOTH" +"bool" +"boolean" "boundingRatio" "boundingRatioArgMax" "boundingRatioArgMin" @@ -557,18 +1414,14 @@ "boundingRatioSimpleState" "boundingRatioState" "buildId" -"BY" "byteHammingDistance" "byteSize" "byteSlice" "byteSwap" -"CASCADE" -"CASE" "caseWithExpr" "caseWithExpression" "caseWithoutExpr" "caseWithoutExpression" -"CAST" "catboostEvaluate" "categoricalInformationValue" "categoricalInformationValueArgMax" @@ -588,46 +1441,17 @@ "cbrt" "ceil" "ceiling" -"CHANGE" -"CHANGEABLE_IN_READONLY" -"CHANGED" "changeDay" "changeHour" "changeMinute" "changeMonth" "changeSecond" "changeYear" -"CHAR VARYING" -"CHAR" "char" -"CHARACTER LARGE OBJECT" -"CHARACTER VARYING" -"CHARACTER" -"CHARACTER_LENGTH" -"CHAR_LENGTH" -"CHECK ALL TABLES" -"CHECK TABLE" -"CHECK" "cityHash64" "clamp" -"CLEANUP" -"CLEAR COLUMN" -"CLEAR INDEX" -"CLEAR PROJECTION" -"CLEAR STATISTICS" -"CLUSTER" -"CLUSTERS" "cluster_host_ids" -"CN" "coalesce" -"CODEC" -"COLLATE" -"COLUMN" -"COLUMNS" -"COMMENT COLUMN" -"COMMENT" -"COMMIT" -"COMPRESSION" "concat" "concatAssumeInjective" "concatWithSeparator" @@ -635,8 +1459,6 @@ "concat_ws" "connectionId" "connection_id" -"CONST" -"CONSTRAINT" "contingency" "contingencyArgMax" "contingencyArgMin" @@ -813,36 +1635,6 @@ "covarSampStableSimpleState" "covarSampStableState" "covarSampState" -"COVAR_POP" -"COVAR_POPArgMax" -"COVAR_POPArgMin" -"COVAR_POPArray" -"COVAR_POPDistinct" -"COVAR_POPForEach" -"COVAR_POPIf" -"COVAR_POPMap" -"COVAR_POPMerge" -"COVAR_POPNull" -"COVAR_POPOrDefault" -"COVAR_POPOrNull" -"COVAR_POPResample" -"COVAR_POPSimpleState" -"COVAR_POPState" -"COVAR_SAMP" -"COVAR_SAMPArgMax" -"COVAR_SAMPArgMin" -"COVAR_SAMPArray" -"COVAR_SAMPDistinct" -"COVAR_SAMPForEach" -"COVAR_SAMPIf" -"COVAR_SAMPMap" -"COVAR_SAMPMerge" -"COVAR_SAMPNull" -"COVAR_SAMPOrDefault" -"COVAR_SAMPOrNull" -"COVAR_SAMPResample" -"COVAR_SAMPSimpleState" -"COVAR_SAMPState" "cramersV" "cramersVArgMax" "cramersVArgMin" @@ -873,38 +1665,16 @@ "cramersVResample" "cramersVSimpleState" "cramersVState" -"CRC32" -"CRC32IEEE" -"CRC64" -"CREATE POLICY" -"CREATE PROFILE" -"CREATE QUOTA" -"CREATE ROLE" -"CREATE ROW POLICY" -"CREATE SETTINGS PROFILE" -"CREATE TABLE" -"CREATE TEMPORARY TABLE" -"CREATE USER" -"CREATE" -"CROSS" -"CUBE" "curdate" -"CURRENT GRANTS" -"CURRENT QUOTA" -"CURRENT ROLES" -"CURRENT ROW" -"CURRENT TRANSACTION" "currentDatabase" "currentProfiles" "currentRoles" "currentSchemas" -"CURRENTUSER" "currentUser" "current_database" "current_date" "current_schemas" "current_timestamp" -"CURRENT_USER" "current_user" "cutFragment" "cutIPv6" @@ -920,59 +1690,25 @@ "cutToFirstSignificantSubdomainWithWWWRFC" "cutURLParameter" "cutWWW" -"D" "damerauLevenshteinDistance" -"DATA INNER UUID" -"DATA" -"DATABASE" -"DATABASES" -"DATE" -"Date" -"DATEADD" -"DATEDIFF" "dateDiff" "dateName" -"DATESUB" -"DateTime" -"DateTime64" "dateTime64ToSnowflake" "dateTime64ToSnowflakeID" "dateTimeToSnowflake" "dateTimeToSnowflakeID" "dateTrunc" -"DATE_ADD" -"DATE_DIFF" "date_diff" -"DATE_FORMAT" -"DATE_SUB" -"DATE_TRUNC" -"DAY" -"DAYOFMONTH" -"DAYOFWEEK" -"DAYOFYEAR" -"DAYS" -"DD" -"Decimal" -"Decimal128" -"Decimal32" -"Decimal64" "decodeHTMLComponent" "decodeURLComponent" "decodeURLFormComponent" "decodeXMLComponent" "decrypt" -"DEDUPLICATE" -"DEFAULT DATABASE" -"DEFAULT ROLE" -"DEFAULT" "defaultProfiles" "defaultRoles" "defaultValueOfArgumentType" "defaultValueOfTypeName" -"DEFINER" "degrees" -"DELETE WHERE" -"DELETE" "deltaSum" "deltaSumArgMax" "deltaSumArgMin" @@ -1034,13 +1770,6 @@ "dense_rankResample" "dense_rankSimpleState" "dense_rankState" -"DEPENDS ON" -"DESC" -"DESCENDING" -"DESCRIBE" -"DETACH PART" -"DETACH PARTITION" -"DETACH" "detectCharset" "detectLanguage" "detectLanguageMixed" @@ -1060,6 +1789,10 @@ "dictGetFloat64" "dictGetFloat64OrDefault" "dictGetHierarchy" +"dictGetIPv4" +"dictGetIPv4OrDefault" +"dictGetIPv6" +"dictGetIPv6OrDefault" "dictGetInt16" "dictGetInt16OrDefault" "dictGetInt32" @@ -1068,10 +1801,6 @@ "dictGetInt64OrDefault" "dictGetInt8" "dictGetInt8OrDefault" -"dictGetIPv4" -"dictGetIPv4OrDefault" -"dictGetIPv6" -"dictGetIPv6OrDefault" "dictGetOrDefault" "dictGetOrNull" "dictGetString" @@ -1087,19 +1816,13 @@ "dictGetUUID" "dictGetUUIDOrDefault" "dictHas" -"DICTIONARIES" -"DICTIONARY" "dictIsIn" -"DISK" "displayName" "distanceL1" "distanceL2" "distanceL2Squared" "distanceLinf" "distanceLp" -"DISTINCT ON" -"DISTINCT" -"DIV" "divide" "divideDecimal" "domain" @@ -1107,30 +1830,12 @@ "domainWithoutWWW" "domainWithoutWWWRFC" "dotProduct" -"DOUBLE_SHA1_HASH" -"DOUBLE_SHA1_PASSWORD" -"DROP COLUMN" -"DROP CONSTRAINT" -"DROP DEFAULT" -"DROP DETACHED PART" -"DROP DETACHED PARTITION" -"DROP INDEX" -"DROP PART" -"DROP PARTITION" -"DROP PROJECTION" -"DROP STATISTICS" -"DROP TABLE" -"DROP TEMPORARY TABLE" -"DROP" "dumpColumnStructure" "dynamicElement" "dynamicType" "e" "editDistance" "editDistanceUTF8" -"ELSE" -"EMPTY AS" -"EMPTY" "empty" "emptyArrayDate" "emptyArrayDateTime" @@ -1146,18 +1851,14 @@ "emptyArrayUInt32" "emptyArrayUInt64" "emptyArrayUInt8" -"ENABLED ROLES" "enabledProfiles" "enabledRoles" "encodeURLComponent" "encodeURLFormComponent" "encodeXMLComponent" "encrypt" -"END" "endsWith" "endsWithUTF8" -"ENFORCED" -"ENGINE" "entropy" "entropyArgMax" "entropyArgMin" @@ -1173,32 +1874,14 @@ "entropyResample" "entropySimpleState" "entropyState" -"Enum" -"Enum16" -"Enum8" -"EPHEMERAL SEQUENTIAL" -"EPHEMERAL" "equals" "erf" "erfc" "errorCodeToName" -"ESTIMATE" "evalMLMethod" -"EVENT" -"EVENTS" -"EVERY" -"EXCEPT DATABASE" -"EXCEPT DATABASES" -"EXCEPT TABLE" -"EXCEPT TABLES" -"EXCEPT" -"EXCHANGE DICTIONARIES" -"EXCHANGE TABLES" -"EXISTS" "exp" "exp10" "exp2" -"EXPLAIN" "exponentialMovingAverage" "exponentialMovingAverageArgMax" "exponentialMovingAverageArgMin" @@ -1274,10 +1957,6 @@ "exponentialTimeDecayedSumResample" "exponentialTimeDecayedSumSimpleState" "exponentialTimeDecayedSumState" -"EXPRESSION" -"EXTENDED" -"EXTERNAL DDL FROM" -"EXTRACT" "extract" "extractAll" "extractAllGroups" @@ -1291,24 +1970,13 @@ "extractURLParameterNames" "extractURLParameters" "factorial" -"FALSE" "farmFingerprint64" "farmHash64" -"FETCH PART" -"FETCH PARTITION" -"FETCH" -"FIELDS" -"FILE" "file" -"FILESYSTEM CACHE" -"FILESYSTEM CACHES" "filesystemAvailable" "filesystemCapacity" "filesystemUnreserved" -"FILTER" -"FINAL" "finalizeAggregation" -"FIRST" "firstLine" "firstSignificantSubdomain" "firstSignificantSubdomainCustom" @@ -1344,7 +2012,6 @@ "first_value_respect_nullsResample" "first_value_respect_nullsSimpleState" "first_value_respect_nullsState" -"FixedString" "flameGraph" "flameGraphArgMax" "flameGraphArgMin" @@ -1362,16 +2029,7 @@ "flameGraphState" "flatten" "flattenTuple" -"Float32" -"Float64" "floor" -"FOLLOWING" -"FOR" -"ForEach" -"FOREIGN KEY" -"FOREIGN" -"FORGET PARTITION" -"FORMAT" "format" "formatDateTime" "formatDateTimeInJodaSyntax" @@ -1385,31 +2043,19 @@ "formatReadableTimeDelta" "formatRow" "formatRowNoNewline" -"FORMAT_BYTES" -"FQDN" "fragment" -"FREEZE" -"FROM INFILE" -"FROM SHARD" -"FROM" "fromDaysSinceYearZero" "fromDaysSinceYearZero32" "fromModifiedJulianDay" "fromModifiedJulianDayOrNull" +"fromUTCTimestamp" "fromUnixTimestamp" "fromUnixTimestamp64Micro" "fromUnixTimestamp64Milli" "fromUnixTimestamp64Nano" "fromUnixTimestampInJodaSyntax" -"fromUTCTimestamp" -"FROM_BASE64" -"FROM_DAYS" -"FROM_UNIXTIME" "from_utc_timestamp" -"FULL" "fullHostName" -"FULLTEXT" -"FUNCTION" "fuzzBits" "gccMurmurHash" "gcd" @@ -1419,11 +2065,11 @@ "generateUUIDv4" "generateUUIDv7" "geoDistance" +"geoToH3" +"geoToS2" "geohashDecode" "geohashEncode" "geohashesInBox" -"geoToH3" -"geoToS2" "getClientHTTPHeader" "getMacro" "getOSKernelVersion" @@ -1432,9 +2078,6 @@ "getSizeOfEnumType" "getSubcolumn" "getTypeSerializationStreams" -"GLOBAL IN" -"GLOBAL NOT IN" -"GLOBAL" "globalIn" "globalInIgnoreSet" "globalNotIn" @@ -1444,16 +2087,11 @@ "globalNullIn" "globalNullInIgnoreSet" "globalVariable" -"GRANT OPTION FOR" -"GRANT" -"GRANTEES" -"GRANULARITY" "greatCircleAngle" "greatCircleDistance" "greater" "greaterOrEquals" "greatest" -"GROUP BY" "groupArray" "groupArrayArgMax" "groupArrayArgMin" @@ -1589,6 +2227,36 @@ "groupBitAndResample" "groupBitAndSimpleState" "groupBitAndState" +"groupBitOr" +"groupBitOrArgMax" +"groupBitOrArgMin" +"groupBitOrArray" +"groupBitOrDistinct" +"groupBitOrForEach" +"groupBitOrIf" +"groupBitOrMap" +"groupBitOrMerge" +"groupBitOrNull" +"groupBitOrOrDefault" +"groupBitOrOrNull" +"groupBitOrResample" +"groupBitOrSimpleState" +"groupBitOrState" +"groupBitXor" +"groupBitXorArgMax" +"groupBitXorArgMin" +"groupBitXorArray" +"groupBitXorDistinct" +"groupBitXorForEach" +"groupBitXorIf" +"groupBitXorMap" +"groupBitXorMerge" +"groupBitXorNull" +"groupBitXorOrDefault" +"groupBitXorOrNull" +"groupBitXorResample" +"groupBitXorSimpleState" +"groupBitXorState" "groupBitmap" "groupBitmapAnd" "groupBitmapAndArgMax" @@ -1648,36 +2316,6 @@ "groupBitmapXorResample" "groupBitmapXorSimpleState" "groupBitmapXorState" -"groupBitOr" -"groupBitOrArgMax" -"groupBitOrArgMin" -"groupBitOrArray" -"groupBitOrDistinct" -"groupBitOrForEach" -"groupBitOrIf" -"groupBitOrMap" -"groupBitOrMerge" -"groupBitOrNull" -"groupBitOrOrDefault" -"groupBitOrOrNull" -"groupBitOrResample" -"groupBitOrSimpleState" -"groupBitOrState" -"groupBitXor" -"groupBitXorArgMax" -"groupBitXorArgMin" -"groupBitXorArray" -"groupBitXorDistinct" -"groupBitXorForEach" -"groupBitXorIf" -"groupBitXorMap" -"groupBitXorMerge" -"groupBitXorNull" -"groupBitXorOrDefault" -"groupBitXorOrNull" -"groupBitXorResample" -"groupBitXorSimpleState" -"groupBitXorState" "groupConcat" "groupConcatArgMax" "groupConcatArgMin" @@ -1693,8 +2331,6 @@ "groupConcatResample" "groupConcatSimpleState" "groupConcatState" -"GROUPING SETS" -"GROUPS" "groupUniqArray" "groupUniqArrayArgMax" "groupUniqArrayArgMin" @@ -1725,7 +2361,6 @@ "group_concatResample" "group_concatSimpleState" "group_concatState" -"H" "h3CellAreaM2" "h3CellAreaRads2" "h3Distance" @@ -1753,7 +2388,6 @@ "h3IsPentagon" "h3IsResClassIII" "h3IsValid" -"h3kRing" "h3Line" "h3NumHexagons" "h3PointDistKm" @@ -1766,12 +2400,12 @@ "h3ToParent" "h3ToString" "h3UnidirectionalEdgeIsValid" +"h3kRing" "halfMD5" "has" "hasAll" "hasAny" "hasColumnInTable" -"HASH" "hasSubsequence" "hasSubsequenceCaseInsensitive" "hasSubsequenceCaseInsensitiveUTF8" @@ -1782,11 +2416,7 @@ "hasTokenCaseInsensitive" "hasTokenCaseInsensitiveOrNull" "hasTokenOrNull" -"HAVING" -"HDFS" "hex" -"HH" -"HIERARCHICAL" "hilbertDecode" "hilbertEncode" "histogram" @@ -1808,62 +2438,33 @@ "hop" "hopEnd" "hopStart" -"HOST" "hostName" "hostname" -"HOUR" -"HOURS" -"HTTP" "hypot" -"ID" -"IDENTIFIED" "identity" "idnaDecode" "idnaEncode" -"IF EMPTY" -"IF EXISTS" -"IF NOT EXISTS" "if" "ifNotFinite" "ifNull" -"IGNORE NULLS" "ignore" -"ILIKE" "ilike" -"IN PARTITION" -"IN" "in" -"INDEX" -"INDEXES" +"inIgnoreSet" "indexHint" "indexOf" -"INDICES" -"INET6_ATON" -"INET6_NTOA" -"INET_ATON" -"INET_NTOA" -"INHERIT" -"inIgnoreSet" "initcap" "initcapUTF8" -"initializeAggregation" "initialQueryID" "initial_query_id" -"INJECTIVE" -"INNER" -"INSERT INTO" +"initializeAggregation" "instr" -"Int16" -"Int32" -"Int64" -"Int8" "intDiv" "intDivOrZero" -"INTERPOLATE" -"INTERSECT" -"INTERVAL" -"IntervalDay" -"IntervalHour" +"intExp10" +"intExp2" +"intHash32" +"intHash64" "intervalLengthSum" "intervalLengthSumArgMax" "intervalLengthSumArgMin" @@ -1879,42 +2480,13 @@ "intervalLengthSumResample" "intervalLengthSumSimpleState" "intervalLengthSumState" -"IntervalMinute" -"IntervalMonth" -"IntervalQuarter" -"IntervalSecond" -"IntervalWeek" -"IntervalYear" -"intExp10" -"intExp2" -"intHash32" -"intHash64" -"INTO OUTFILE" -"INVISIBLE" -"INVOKER" -"IP" -"IPv4CIDRToRange" -"IPv4NumToString" -"IPv4NumToStringClassC" -"IPv4StringToNum" -"IPv4StringToNumOrDefault" -"IPv4StringToNumOrNull" -"IPv4ToIPv6" -"IPv6CIDRToRange" -"IPv6NumToString" -"IPv6StringToNum" -"IPv6StringToNumOrDefault" -"IPv6StringToNumOrNull" -"IS NOT DISTINCT FROM" -"IS NOT NULL" -"IS NULL" "isConstant" "isDecimalOverflow" "isFinite" -"isInfinite" "isIPAddressInRange" "isIPv4String" "isIPv6String" +"isInfinite" "isNaN" "isNotDistinctFrom" "isNotNull" @@ -1923,46 +2495,15 @@ "isValidJSON" "isValidUTF8" "isZeroOrNull" -"IS_OBJECT_ID" "jaroSimilarity" "jaroWinklerSimilarity" "javaHash" "javaHashUTF16LE" -"JOIN" "joinGet" "joinGetOrNull" -"JSONArrayLength" -"JSONExtract" -"JSONExtractArrayRaw" -"JSONExtractBool" -"JSONExtractFloat" -"JSONExtractInt" -"JSONExtractKeys" -"JSONExtractKeysAndValues" -"JSONExtractKeysAndValuesRaw" -"JSONExtractRaw" -"JSONExtractString" -"JSONExtractUInt" -"JSONHas" -"JSONKey" -"JSONLength" -"JSONMergePatch" "jsonMergePatch" -"JSONType" -"JSON_ARRAY_LENGTH" -"JSON_EXISTS" -"JSON_QUERY" -"JSON_VALUE" "jumpConsistentHash" -"JWT" "kafkaMurmurHash" -"KERBEROS" -"KEY BY" -"KEY" -"KEYED BY" -"KEYS" -"KILL" -"KIND" "kolmogorovSmirnovTest" "kolmogorovSmirnovTestArgMax" "kolmogorovSmirnovTestArgMin" @@ -2011,14 +2552,6 @@ "kurtSampResample" "kurtSampSimpleState" "kurtSampState" -"L1Distance" -"L1Norm" -"L1Normalize" -"L2Distance" -"L2Norm" -"L2Normalize" -"L2SquaredDistance" -"L2SquaredNorm" "lagInFrame" "lagInFrameArgMax" "lagInFrameArgMin" @@ -2034,7 +2567,6 @@ "lagInFrameResample" "lagInFrameSimpleState" "lagInFrameState" -"LARGE OBJECT" "largestTriangleThreeBuckets" "largestTriangleThreeBucketsArgMax" "largestTriangleThreeBucketsArgMin" @@ -2050,8 +2582,6 @@ "largestTriangleThreeBucketsResample" "largestTriangleThreeBucketsSimpleState" "largestTriangleThreeBucketsState" -"LAST" -"LAST_DAY" "last_value" "last_valueArgMax" "last_valueArgMin" @@ -2082,10 +2612,8 @@ "last_value_respect_nullsResample" "last_value_respect_nullsSimpleState" "last_value_respect_nullsState" -"LAYOUT" "lcase" "lcm" -"LDAP" "leadInFrame" "leadInFrameArgMax" "leadInFrameArgMin" @@ -2101,10 +2629,7 @@ "leadInFrameResample" "leadInFrameSimpleState" "leadInFrameState" -"LEADING" "least" -"LEFT ARRAY JOIN" -"LEFT" "left" "leftPad" "leftPadUTF8" @@ -2112,42 +2637,24 @@ "lemmatize" "length" "lengthUTF8" -"LESS THAN" "less" "lessOrEquals" -"LEVEL" "levenshteinDistance" "levenshteinDistanceUTF8" "lgamma" -"LIFETIME" -"LIGHTWEIGHT" -"LIKE" "like" -"LIMIT" -"LINEAR" -"LinfDistance" -"LinfNorm" -"LinfNormalize" -"LIST" -"LIVE" "ln" -"LOCAL" "locate" "log" "log10" "log1p" "log2" "logTrace" -"LowCardinality" "lowCardinalityIndices" "lowCardinalityKeys" "lower" "lowerUTF8" "lpad" -"LpDistance" -"LpNorm" -"LpNormalize" -"LTRIM" "ltrim" "lttb" "lttbArgMax" @@ -2164,10 +2671,6 @@ "lttbResample" "lttbSimpleState" "lttbState" -"M" -"MACNumToString" -"MACStringToNum" -"MACStringToOUI" "makeDate" "makeDate32" "makeDateTime" @@ -2208,18 +2711,8 @@ "mapSubtract" "mapUpdate" "mapValues" -"MAP_FROM_ARRAYS" -"MATCH" "match" -"MATERIALIZE COLUMN" -"MATERIALIZE INDEX" -"MATERIALIZE PROJECTION" -"MATERIALIZE STATISTICS" -"MATERIALIZE TTL" -"MATERIALIZE" "materialize" -"MATERIALIZED" -"MAX" "max" "max2" "maxArgMax" @@ -2281,9 +2774,6 @@ "maxResample" "maxSimpleState" "maxState" -"MCS" -"MD4" -"MD5" "meanZTest" "meanZTestArgMax" "meanZTestArgMin" @@ -2524,19 +3014,8 @@ "medianTimingWeightedResample" "medianTimingWeightedSimpleState" "medianTimingWeightedState" -"MEMORY" -"Merge" -"MERGES" -"METRICS INNER UUID" -"METRICS" "metroHash64" -"MI" -"MICROSECOND" -"MICROSECONDS" "mid" -"MILLISECOND" -"MILLISECONDS" -"MIN" "min" "min2" "minArgMax" @@ -2572,37 +3051,15 @@ "minSimpleState" "minState" "minus" -"MINUTE" -"MINUTES" "mismatches" -"MM" -"MOD" "mod" -"MODIFY COLUMN" -"MODIFY COMMENT" -"MODIFY DEFINER" -"MODIFY ORDER BY" -"MODIFY QUERY" -"MODIFY REFRESH" -"MODIFY SAMPLE BY" -"MODIFY SETTING" -"MODIFY SQL SECURITY" -"MODIFY STATISTICS" -"MODIFY TTL" -"MODIFY" "modulo" "moduloLegacy" "moduloOrZero" -"MONTH" "monthName" -"MONTHS" "mortonDecode" "mortonEncode" -"MOVE PART" -"MOVE PARTITION" -"MOVE" "movingXXX" -"MS" "multiFuzzyMatchAllIndices" "multiFuzzyMatchAny" "multiFuzzyMatchAnyIndex" @@ -2610,8 +3067,6 @@ "multiMatchAllIndices" "multiMatchAny" "multiMatchAnyIndex" -"multiply" -"multiplyDecimal" "multiSearchAllPositions" "multiSearchAllPositionsCaseInsensitive" "multiSearchAllPositionsCaseInsensitiveUTF8" @@ -2628,23 +3083,17 @@ "multiSearchFirstPositionCaseInsensitive" "multiSearchFirstPositionCaseInsensitiveUTF8" "multiSearchFirstPositionUTF8" +"multiply" +"multiplyDecimal" "murmurHash2_32" "murmurHash2_64" "murmurHash3_128" "murmurHash3_32" "murmurHash3_64" -"MUTATION" -"N" -"NAME" -"NAMED COLLECTION" -"NANOSECOND" -"NANOSECONDS" "negate" "neighbor" -"Nested" "nested" "netloc" -"NEXT" "ngramDistance" "ngramDistanceCaseInsensitive" "ngramDistanceCaseInsensitiveUTF8" @@ -2657,7 +3106,6 @@ "ngramMinHashCaseInsensitive" "ngramMinHashCaseInsensitiveUTF8" "ngramMinHashUTF8" -"ngrams" "ngramSearch" "ngramSearchCaseInsensitive" "ngramSearchCaseInsensitiveUTF8" @@ -2666,10 +3114,7 @@ "ngramSimHashCaseInsensitive" "ngramSimHashCaseInsensitiveUTF8" "ngramSimHashUTF8" -"NO ACTION" -"NO DELAY" -"NO LIMITS" -"NONE" +"ngrams" "nonNegativeDerivative" "nonNegativeDerivativeArgMax" "nonNegativeDerivativeArgMin" @@ -2685,8 +3130,11 @@ "nonNegativeDerivativeResample" "nonNegativeDerivativeSimpleState" "nonNegativeDerivativeState" -"normalizedQueryHash" -"normalizedQueryHashKeepNames" +"normL1" +"normL2" +"normL2Squared" +"normLinf" +"normLp" "normalizeL1" "normalizeL2" "normalizeLinf" @@ -2697,23 +3145,17 @@ "normalizeUTF8NFD" "normalizeUTF8NFKC" "normalizeUTF8NFKD" -"normL1" -"normL2" -"normL2Squared" -"normLinf" -"normLp" -"NOT BETWEEN" -"NOT IDENTIFIED" -"NOT ILIKE" -"NOT IN" -"NOT KEYED" -"NOT LIKE" -"NOT OVERRIDABLE" -"NOT" +"normalizedQueryHash" +"normalizedQueryHashKeepNames" "not" "notEmpty" "notEquals" -"Nothing" +"notILike" +"notIn" +"notInIgnoreSet" +"notLike" +"notNullIn" +"notNullInIgnoreSet" "nothing" "nothingArgMax" "nothingArgMin" @@ -2758,17 +3200,9 @@ "nothingUInt64Resample" "nothingUInt64SimpleState" "nothingUInt64State" -"notILike" -"notIn" -"notInIgnoreSet" -"notLike" -"notNullIn" -"notNullInIgnoreSet" "now" "now64" "nowInBlock" -"NO_PASSWORD" -"NS" "nth_value" "nth_valueArgMax" "nth_valueArgMin" @@ -2799,28 +3233,10 @@ "ntileResample" "ntileSimpleState" "ntileState" -"NULL" -"Null" -"Nullable" "nullIf" "nullIn" "nullInIgnoreSet" -"NULLS" -"OCTET_LENGTH" -"OFFSET" -"ON DELETE" -"ON UPDATE" -"ON VOLUME" -"ON" -"ONLY" -"OPTIMIZE TABLE" -"OR REPLACE" -"OR" "or" -"ORDER BY" -"OUTER" -"OVER" -"OVERRIDABLE" "parseDateTime" "parseDateTime32BestEffort" "parseDateTime32BestEffortOrNull" @@ -2846,15 +3262,8 @@ "parseReadableSizeOrNull" "parseReadableSizeOrZero" "parseTimeDelta" -"PART" -"PARTIAL" -"PARTITION BY" -"PARTITION" "partitionID" "partitionId" -"PARTITIONS" -"PART_MOVE_TO_SHARD" -"PASTE" "path" "pathFull" "percentRank" @@ -2887,15 +3296,7 @@ "percent_rankResample" "percent_rankSimpleState" "percent_rankState" -"PERIODIC REFRESH" -"PERMANENTLY" -"PERMISSIVE" -"PERSISTENT SEQUENTIAL" -"PERSISTENT" "pi" -"PIPELINE" -"PLAINTEXT_PASSWORD" -"PLAN" "plus" "pmod" "pointInEllipses" @@ -2916,7 +3317,6 @@ "polygonsUnionSpherical" "polygonsWithinCartesian" "polygonsWithinSpherical" -"POPULATE" "port" "portRFC" "position" @@ -2927,23 +3327,11 @@ "positive_modulo" "pow" "power" -"PRECEDING" -"PRECISION" -"PREWHERE" -"PRIMARY KEY" -"PRIMARY" "printf" -"PROFILE" -"PROJECTION" "proportionsZTest" -"Protobuf" "protocol" -"PULL" "punycodeDecode" "punycodeEncode" -"Q" -"QQ" -"QUALIFY" "quantile" "quantileArgMax" "quantileArgMin" @@ -3137,6 +3525,68 @@ "quantileOrDefault" "quantileOrNull" "quantileResample" +"quantileSimpleState" +"quantileState" +"quantileTDigest" +"quantileTDigestArgMax" +"quantileTDigestArgMin" +"quantileTDigestArray" +"quantileTDigestDistinct" +"quantileTDigestForEach" +"quantileTDigestIf" +"quantileTDigestMap" +"quantileTDigestMerge" +"quantileTDigestNull" +"quantileTDigestOrDefault" +"quantileTDigestOrNull" +"quantileTDigestResample" +"quantileTDigestSimpleState" +"quantileTDigestState" +"quantileTDigestWeighted" +"quantileTDigestWeightedArgMax" +"quantileTDigestWeightedArgMin" +"quantileTDigestWeightedArray" +"quantileTDigestWeightedDistinct" +"quantileTDigestWeightedForEach" +"quantileTDigestWeightedIf" +"quantileTDigestWeightedMap" +"quantileTDigestWeightedMerge" +"quantileTDigestWeightedNull" +"quantileTDigestWeightedOrDefault" +"quantileTDigestWeightedOrNull" +"quantileTDigestWeightedResample" +"quantileTDigestWeightedSimpleState" +"quantileTDigestWeightedState" +"quantileTiming" +"quantileTimingArgMax" +"quantileTimingArgMin" +"quantileTimingArray" +"quantileTimingDistinct" +"quantileTimingForEach" +"quantileTimingIf" +"quantileTimingMap" +"quantileTimingMerge" +"quantileTimingNull" +"quantileTimingOrDefault" +"quantileTimingOrNull" +"quantileTimingResample" +"quantileTimingSimpleState" +"quantileTimingState" +"quantileTimingWeighted" +"quantileTimingWeightedArgMax" +"quantileTimingWeightedArgMin" +"quantileTimingWeightedArray" +"quantileTimingWeightedDistinct" +"quantileTimingWeightedForEach" +"quantileTimingWeightedIf" +"quantileTimingWeightedMap" +"quantileTimingWeightedMerge" +"quantileTimingWeightedNull" +"quantileTimingWeightedOrDefault" +"quantileTimingWeightedOrNull" +"quantileTimingWeightedResample" +"quantileTimingWeightedSimpleState" +"quantileTimingWeightedState" "quantiles" "quantilesArgMax" "quantilesArgMin" @@ -3309,7 +3759,6 @@ "quantilesGKSimpleState" "quantilesGKState" "quantilesIf" -"quantileSimpleState" "quantilesInterpolatedWeighted" "quantilesInterpolatedWeightedArgMax" "quantilesInterpolatedWeightedArgMin" @@ -3333,7 +3782,6 @@ "quantilesResample" "quantilesSimpleState" "quantilesState" -"quantileState" "quantilesTDigest" "quantilesTDigestArgMax" "quantilesTDigestArgMin" @@ -3394,75 +3842,10 @@ "quantilesTimingWeightedResample" "quantilesTimingWeightedSimpleState" "quantilesTimingWeightedState" -"quantileTDigest" -"quantileTDigestArgMax" -"quantileTDigestArgMin" -"quantileTDigestArray" -"quantileTDigestDistinct" -"quantileTDigestForEach" -"quantileTDigestIf" -"quantileTDigestMap" -"quantileTDigestMerge" -"quantileTDigestNull" -"quantileTDigestOrDefault" -"quantileTDigestOrNull" -"quantileTDigestResample" -"quantileTDigestSimpleState" -"quantileTDigestState" -"quantileTDigestWeighted" -"quantileTDigestWeightedArgMax" -"quantileTDigestWeightedArgMin" -"quantileTDigestWeightedArray" -"quantileTDigestWeightedDistinct" -"quantileTDigestWeightedForEach" -"quantileTDigestWeightedIf" -"quantileTDigestWeightedMap" -"quantileTDigestWeightedMerge" -"quantileTDigestWeightedNull" -"quantileTDigestWeightedOrDefault" -"quantileTDigestWeightedOrNull" -"quantileTDigestWeightedResample" -"quantileTDigestWeightedSimpleState" -"quantileTDigestWeightedState" -"quantileTiming" -"quantileTimingArgMax" -"quantileTimingArgMin" -"quantileTimingArray" -"quantileTimingDistinct" -"quantileTimingForEach" -"quantileTimingIf" -"quantileTimingMap" -"quantileTimingMerge" -"quantileTimingNull" -"quantileTimingOrDefault" -"quantileTimingOrNull" -"quantileTimingResample" -"quantileTimingSimpleState" -"quantileTimingState" -"quantileTimingWeighted" -"quantileTimingWeightedArgMax" -"quantileTimingWeightedArgMin" -"quantileTimingWeightedArray" -"quantileTimingWeightedDistinct" -"quantileTimingWeightedForEach" -"quantileTimingWeightedIf" -"quantileTimingWeightedMap" -"quantileTimingWeightedMerge" -"quantileTimingWeightedNull" -"quantileTimingWeightedOrDefault" -"quantileTimingWeightedOrNull" -"quantileTimingWeightedResample" -"quantileTimingWeightedSimpleState" -"quantileTimingWeightedState" -"QUARTER" -"QUARTERS" -"QUERY TREE" -"QUERY" "queryID" "queryString" "queryStringAndFragment" "query_id" -"QUOTA" "radians" "rand" "rand32" @@ -3477,16 +3860,13 @@ "randLogNormal" "randNegativeBinomial" "randNormal" -"randomFixedString" -"RANDOMIZE FOR" -"RANDOMIZED" -"randomPrintableASCII" -"randomString" -"randomStringUTF8" "randPoisson" "randStudentT" "randUniform" -"RANGE" +"randomFixedString" +"randomPrintableASCII" +"randomString" +"randomStringUTF8" "range" "rank" "rankArgMax" @@ -3518,24 +3898,14 @@ "rankResample" "rankSimpleState" "rankState" -"READONLY" "readWKTLineString" "readWKTMultiLineString" "readWKTMultiPolygon" "readWKTPoint" "readWKTPolygon" "readWKTRing" -"REALM" -"RECOMPRESS" -"RECURSIVE" -"REFERENCES" -"REFRESH" -"REGEXP" "regexpExtract" "regexpQuoteMeta" -"REGEXP_EXTRACT" -"REGEXP_MATCHES" -"REGEXP_REPLACE" "regionHierarchy" "regionIn" "regionToArea" @@ -3566,31 +3936,13 @@ "reinterpretAsUInt64" "reinterpretAsUInt8" "reinterpretAsUUID" -"REMOVE SAMPLE BY" -"REMOVE TTL" -"REMOVE" -"RENAME COLUMN" -"RENAME DATABASE" -"RENAME DICTIONARY" -"RENAME TABLE" -"RENAME TO" -"RENAME" "repeat" -"REPLACE PARTITION" -"REPLACE" "replace" "replaceAll" "replaceOne" "replaceRegexpAll" "replaceRegexpOne" "replicate" -"Resample" -"RESET SETTING" -"RESPECT NULLS" -"RESTORE" -"RESTRICT" -"RESTRICTIVE" -"RESUME" "retention" "retentionArgMax" "retentionArgMin" @@ -3609,24 +3961,18 @@ "reverse" "reverseUTF8" "revision" -"REVOKE" -"RIGHT" "right" "rightPad" "rightPadUTF8" "rightUTF8" -"ROLLBACK" -"ROLLUP" "round" "roundAge" "roundBankers" "roundDown" "roundDuration" "roundToExp2" -"ROW" "rowNumberInAllBlocks" "rowNumberInBlock" -"ROWS" "row_number" "row_numberArgMax" "row_numberArgMin" @@ -3643,13 +3989,11 @@ "row_numberSimpleState" "row_numberState" "rpad" -"RTRIM" "rtrim" "runningAccumulate" "runningConcurrency" "runningDifference" "runningDifferenceStartingWithFirstValue" -"S" "s2CapContains" "s2CapUnion" "s2CellsIntersect" @@ -3659,18 +4003,7 @@ "s2RectIntersection" "s2RectUnion" "s2ToGeo" -"S3" -"SALT" -"SAMPLE BY" -"SAMPLE" -"SAN" "scalarProduct" -"SCHEMA" -"SCHEME" -"SECOND" -"SECONDS" -"SELECT" -"SEMI" "sequenceCount" "sequenceCountArgMax" "sequenceCountArgMin" @@ -3719,43 +4052,14 @@ "seriesDecomposeSTL" "seriesOutliersDetectTukey" "seriesPeriodDetectFFT" -"SERVER" "serverTimeZone" "serverTimezone" "serverUUID" -"SET DEFAULT ROLE" -"SET DEFAULT" -"SET FAKE TIME" -"SET NULL" -"SET ROLE DEFAULT" -"SET ROLE" -"SET TRANSACTION SNAPSHOT" -"SET" -"SETTINGS" -"SHA1" -"SHA224" -"SHA256" -"SHA256_HASH" -"SHA256_PASSWORD" -"SHA384" -"SHA512" -"SHA512_256" "shardCount" "shardNum" -"SHOW ACCESS" -"SHOW CREATE" -"SHOW ENGINES" -"SHOW FUNCTIONS" -"SHOW GRANTS" -"SHOW PRIVILEGES" -"SHOW PROCESSLIST" -"SHOW SETTING" -"SHOW" "showCertificate" "sigmoid" "sign" -"SIGNED" -"SIMPLE" "simpleJSONExtractBool" "simpleJSONExtractFloat" "simpleJSONExtractInt" @@ -3838,39 +4142,37 @@ "snowflakeToDateTime" "snowflakeToDateTime64" "soundex" -"SOURCE" "space" "sparkBar" -"sparkbar" "sparkBarArgMax" -"sparkbarArgMax" "sparkBarArgMin" -"sparkbarArgMin" "sparkBarArray" -"sparkbarArray" "sparkBarDistinct" -"sparkbarDistinct" "sparkBarForEach" -"sparkbarForEach" "sparkBarIf" -"sparkbarIf" "sparkBarMap" -"sparkbarMap" "sparkBarMerge" -"sparkbarMerge" "sparkBarNull" -"sparkbarNull" "sparkBarOrDefault" -"sparkbarOrDefault" "sparkBarOrNull" -"sparkbarOrNull" "sparkBarResample" -"sparkbarResample" "sparkBarSimpleState" -"sparkbarSimpleState" "sparkBarState" +"sparkbar" +"sparkbarArgMax" +"sparkbarArgMin" +"sparkbarArray" +"sparkbarDistinct" +"sparkbarForEach" +"sparkbarIf" +"sparkbarMap" +"sparkbarMerge" +"sparkbarNull" +"sparkbarOrDefault" +"sparkbarOrNull" +"sparkbarResample" +"sparkbarSimpleState" "sparkbarState" -"SPATIAL" "splitByAlpha" "splitByChar" "splitByNonAlpha" @@ -3880,31 +4182,9 @@ "sqid" "sqidDecode" "sqidEncode" -"SQL SECURITY" -"SQL_TSI_DAY" -"SQL_TSI_HOUR" -"SQL_TSI_MICROSECOND" -"SQL_TSI_MILLISECOND" -"SQL_TSI_MINUTE" -"SQL_TSI_MONTH" -"SQL_TSI_NANOSECOND" -"SQL_TSI_QUARTER" -"SQL_TSI_SECOND" -"SQL_TSI_WEEK" -"SQL_TSI_YEAR" "sqrt" -"SS" -"SSH_KEY" -"SSL_CERTIFICATE" -"START TRANSACTION" "startsWith" "startsWithUTF8" -"State" -"STATISTICS" -"STD" -"STDArgMax" -"STDArgMin" -"STDArray" "stddevPop" "stddevPopArgMax" "stddevPopArgMin" @@ -3965,49 +4245,7 @@ "stddevSampStableSimpleState" "stddevSampStableState" "stddevSampState" -"STDDEV_POP" -"STDDEV_POPArgMax" -"STDDEV_POPArgMin" -"STDDEV_POPArray" -"STDDEV_POPDistinct" -"STDDEV_POPForEach" -"STDDEV_POPIf" -"STDDEV_POPMap" -"STDDEV_POPMerge" -"STDDEV_POPNull" -"STDDEV_POPOrDefault" -"STDDEV_POPOrNull" -"STDDEV_POPResample" -"STDDEV_POPSimpleState" -"STDDEV_POPState" -"STDDEV_SAMP" -"STDDEV_SAMPArgMax" -"STDDEV_SAMPArgMin" -"STDDEV_SAMPArray" -"STDDEV_SAMPDistinct" -"STDDEV_SAMPForEach" -"STDDEV_SAMPIf" -"STDDEV_SAMPMap" -"STDDEV_SAMPMerge" -"STDDEV_SAMPNull" -"STDDEV_SAMPOrDefault" -"STDDEV_SAMPOrNull" -"STDDEV_SAMPResample" -"STDDEV_SAMPSimpleState" -"STDDEV_SAMPState" -"STDDistinct" -"STDForEach" -"STDIf" -"STDMap" -"STDMerge" -"STDNull" -"STDOrDefault" -"STDOrNull" -"STDResample" -"STDSimpleState" -"STDState" "stem" -"STEP" "stochasticLinearRegression" "stochasticLinearRegressionArgMax" "stochasticLinearRegressionArgMin" @@ -4038,17 +4276,13 @@ "stochasticLogisticRegressionResample" "stochasticLogisticRegressionSimpleState" "stochasticLogisticRegressionState" -"STORAGE" -"STRICT" -"STRICTLY_ASCENDING" -"String" +"str_to_date" +"str_to_map" "stringJaccardIndex" "stringJaccardIndexUTF8" "stringToH3" "structureToCapnProtoSchema" "structureToProtobufSchema" -"str_to_date" -"str_to_map" "studentTTest" "studentTTestArgMax" "studentTTestArgMin" @@ -4066,16 +4300,11 @@ "studentTTestState" "subBitmap" "subDate" -"SUBPARTITION BY" -"SUBPARTITION" -"SUBPARTITIONS" "substr" -"SUBSTRING" "substring" "substringIndex" "substringIndexUTF8" "substringUTF8" -"SUBSTRING_INDEX" "subtractDays" "subtractHours" "subtractInterval" @@ -4157,21 +4386,6 @@ "sumMapFilteredWithOverflowResample" "sumMapFilteredWithOverflowSimpleState" "sumMapFilteredWithOverflowState" -"sumMappedArrays" -"sumMappedArraysArgMax" -"sumMappedArraysArgMin" -"sumMappedArraysArray" -"sumMappedArraysDistinct" -"sumMappedArraysForEach" -"sumMappedArraysIf" -"sumMappedArraysMap" -"sumMappedArraysMerge" -"sumMappedArraysNull" -"sumMappedArraysOrDefault" -"sumMappedArraysOrNull" -"sumMappedArraysResample" -"sumMappedArraysSimpleState" -"sumMappedArraysState" "sumMapWithOverflow" "sumMapWithOverflowArgMax" "sumMapWithOverflowArgMin" @@ -4187,6 +4401,21 @@ "sumMapWithOverflowResample" "sumMapWithOverflowSimpleState" "sumMapWithOverflowState" +"sumMappedArrays" +"sumMappedArraysArgMax" +"sumMappedArraysArgMin" +"sumMappedArraysArray" +"sumMappedArraysDistinct" +"sumMappedArraysForEach" +"sumMappedArraysIf" +"sumMappedArraysMap" +"sumMappedArraysMerge" +"sumMappedArraysNull" +"sumMappedArraysOrDefault" +"sumMappedArraysOrNull" +"sumMappedArraysResample" +"sumMappedArraysSimpleState" +"sumMappedArraysState" "sumMerge" "sumNull" "sumOrDefault" @@ -4209,24 +4438,11 @@ "sumWithOverflowResample" "sumWithOverflowSimpleState" "sumWithOverflowState" -"SUSPEND" -"SVG" "svg" -"SYNC" "synonyms" -"SYNTAX" -"SYSTEM" -"TABLE OVERRIDE" -"TABLE" -"TABLES" -"TAGS INNER UUID" -"TAGS" "tan" "tanh" "tcpPort" -"TEMPORARY TABLE" -"TEMPORARY" -"TEST" "tgamma" "theilsU" "theilsUArgMax" @@ -4243,34 +4459,20 @@ "theilsUResample" "theilsUSimpleState" "theilsUState" -"THEN" "throwIf" "tid" "timeDiff" "timeSlot" "timeSlots" -"TIMESTAMP" -"timestamp" -"TIMESTAMPADD" -"TIMESTAMPDIFF" -"timestampDiff" -"TIMESTAMPSUB" -"TIMESTAMP_ADD" -"TIMESTAMP_DIFF" -"timestamp_diff" -"TIMESTAMP_SUB" "timeZone" -"timezone" "timeZoneOf" -"timezoneOf" "timeZoneOffset" +"timestamp" +"timestampDiff" +"timestamp_diff" +"timezone" +"timezoneOf" "timezoneOffset" -"TO DISK" -"TO INNER UUID" -"TO SHARD" -"TO TABLE" -"TO VOLUME" -"TO" "toBool" "toColumnTypeName" "toDate" @@ -4290,7 +4492,6 @@ "toDateTimeOrDefault" "toDateTimeOrNull" "toDateTimeOrZero" -"today" "toDayOfMonth" "toDayOfWeek" "toDayOfYear" @@ -4322,6 +4523,16 @@ "toFloat64OrNull" "toFloat64OrZero" "toHour" +"toIPv4" +"toIPv4OrDefault" +"toIPv4OrNull" +"toIPv4OrZero" +"toIPv6" +"toIPv6OrDefault" +"toIPv6OrNull" +"toIPv6OrZero" +"toISOWeek" +"toISOYear" "toInt128" "toInt128OrDefault" "toInt128OrNull" @@ -4357,18 +4568,7 @@ "toIntervalSecond" "toIntervalWeek" "toIntervalYear" -"toIPv4" -"toIPv4OrDefault" -"toIPv4OrNull" -"toIPv4OrZero" -"toIPv6" -"toIPv6OrDefault" -"toIPv6OrNull" -"toIPv6OrZero" -"toISOWeek" -"toISOYear" "toJSONString" -"tokens" "toLastDayOfMonth" "toLastDayOfWeek" "toLowCardinality" @@ -4379,7 +4579,82 @@ "toMonday" "toMonth" "toNullable" -"TOP" +"toQuarter" +"toRelativeDayNum" +"toRelativeHourNum" +"toRelativeMinuteNum" +"toRelativeMonthNum" +"toRelativeQuarterNum" +"toRelativeSecondNum" +"toRelativeWeekNum" +"toRelativeYearNum" +"toSecond" +"toStartOfDay" +"toStartOfFifteenMinutes" +"toStartOfFiveMinute" +"toStartOfFiveMinutes" +"toStartOfHour" +"toStartOfISOYear" +"toStartOfInterval" +"toStartOfMicrosecond" +"toStartOfMillisecond" +"toStartOfMinute" +"toStartOfMonth" +"toStartOfNanosecond" +"toStartOfQuarter" +"toStartOfSecond" +"toStartOfTenMinutes" +"toStartOfWeek" +"toStartOfYear" +"toString" +"toStringCutToZero" +"toTime" +"toTimeZone" +"toTimezone" +"toTypeName" +"toUInt128" +"toUInt128OrDefault" +"toUInt128OrNull" +"toUInt128OrZero" +"toUInt16" +"toUInt16OrDefault" +"toUInt16OrNull" +"toUInt16OrZero" +"toUInt256" +"toUInt256OrDefault" +"toUInt256OrNull" +"toUInt256OrZero" +"toUInt32" +"toUInt32OrDefault" +"toUInt32OrNull" +"toUInt32OrZero" +"toUInt64" +"toUInt64OrDefault" +"toUInt64OrNull" +"toUInt64OrZero" +"toUInt8" +"toUInt8OrDefault" +"toUInt8OrNull" +"toUInt8OrZero" +"toUTCTimestamp" +"toUUID" +"toUUIDOrDefault" +"toUUIDOrNull" +"toUUIDOrZero" +"toUnixTimestamp" +"toUnixTimestamp64Micro" +"toUnixTimestamp64Milli" +"toUnixTimestamp64Nano" +"toValidUTF8" +"toWeek" +"toYYYYMM" +"toYYYYMMDD" +"toYYYYMMDDhhmmss" +"toYear" +"toYearWeek" +"to_utc_timestamp" +"today" +"tokens" "topK" "topKArgMax" "topKArgMin" @@ -4412,102 +4687,17 @@ "topKWeightedState" "topLevelDomain" "topLevelDomainRFC" -"toQuarter" -"toRelativeDayNum" -"toRelativeHourNum" -"toRelativeMinuteNum" -"toRelativeMonthNum" -"toRelativeQuarterNum" -"toRelativeSecondNum" -"toRelativeWeekNum" -"toRelativeYearNum" -"toSecond" -"toStartOfDay" -"toStartOfFifteenMinutes" -"toStartOfFiveMinute" -"toStartOfFiveMinutes" -"toStartOfHour" -"toStartOfInterval" -"toStartOfISOYear" -"toStartOfMicrosecond" -"toStartOfMillisecond" -"toStartOfMinute" -"toStartOfMonth" -"toStartOfNanosecond" -"toStartOfQuarter" -"toStartOfSecond" -"toStartOfTenMinutes" -"toStartOfWeek" -"toStartOfYear" -"toString" -"toStringCutToZero" -"TOTALS" -"toTime" -"toTimeZone" -"toTimezone" -"toTypeName" -"toUInt128" -"toUInt128OrDefault" -"toUInt128OrNull" -"toUInt128OrZero" -"toUInt16" -"toUInt16OrDefault" -"toUInt16OrNull" -"toUInt16OrZero" -"toUInt256" -"toUInt256OrDefault" -"toUInt256OrNull" -"toUInt256OrZero" -"toUInt32" -"toUInt32OrDefault" -"toUInt32OrNull" -"toUInt32OrZero" -"toUInt64" -"toUInt64OrDefault" -"toUInt64OrNull" -"toUInt64OrZero" -"toUInt8" -"toUInt8OrDefault" -"toUInt8OrNull" -"toUInt8OrZero" -"toUnixTimestamp" -"toUnixTimestamp64Micro" -"toUnixTimestamp64Milli" -"toUnixTimestamp64Nano" -"toUTCTimestamp" -"toUUID" -"toUUIDOrDefault" -"toUUIDOrNull" -"toUUIDOrZero" -"toValidUTF8" -"toWeek" -"toYear" -"toYearWeek" -"toYYYYMM" -"toYYYYMMDD" -"toYYYYMMDDhhmmss" -"TO_BASE64" -"TO_DAYS" -"TO_UNIXTIME" -"to_utc_timestamp" -"TRACKING ONLY" -"TRAILING" -"TRANSACTION" "transactionID" "transactionLatestSnapshot" "transactionOldestSnapshot" "transform" "translate" "translateUTF8" -"TRIGGER" -"TRIM" "trim" "trimBoth" "trimLeft" "trimRight" -"TRUE" "trunc" -"TRUNCATE" "truncate" "tryBase58Decode" "tryBase64Decode" @@ -4515,11 +4705,9 @@ "tryDecrypt" "tryIdnaEncode" "tryPunycodeDecode" -"TTL" "tumble" "tumbleEnd" "tumbleStart" -"Tuple" "tuple" "tupleConcat" "tupleDivide" @@ -4539,20 +4727,9 @@ "tupleNegate" "tuplePlus" "tupleToNameValuePairs" -"TYPE" -"TYPEOF" "ucase" -"UInt16" -"UInt32" -"UInt64" -"UInt8" -"ULIDStringToDateTime" "unbin" -"UNBOUNDED" -"UNDROP" -"UNFREEZE" "unhex" -"UNION" "uniq" "uniqArgMax" "uniqArgMin" @@ -4646,7 +4823,6 @@ "uniqThetaSimpleState" "uniqThetaState" "uniqThetaUnion" -"UNIQUE" "uniqUpTo" "uniqUpToArgMax" "uniqUpToArgMin" @@ -4662,31 +4838,11 @@ "uniqUpToResample" "uniqUpToSimpleState" "uniqUpToState" -"UNSET FAKE TIME" -"UNSIGNED" -"UPDATE" "upper" "upperUTF8" "uptime" -"URL" -"URLHash" -"URLHierarchy" -"URLPathHierarchy" -"USE" "user" -"USING" -"UTCTimestamp" -"UTC_timestamp" -"UUID" -"UUIDNumToString" -"UUIDStringToNum" -"UUIDToNum" -"UUIDv7ToDateTime" -"VALID UNTIL" "validateNestedArraySizes" -"VALUES" -"variantElement" -"variantType" "varPop" "varPopArgMax" "varPopArgMin" @@ -4747,42 +4903,11 @@ "varSampStableSimpleState" "varSampStableState" "varSampState" -"VARYING" -"VAR_POP" -"VAR_POPArgMax" -"VAR_POPArgMin" -"VAR_POPArray" -"VAR_POPDistinct" -"VAR_POPForEach" -"VAR_POPIf" -"VAR_POPMap" -"VAR_POPMerge" -"VAR_POPNull" -"VAR_POPOrDefault" -"VAR_POPOrNull" -"VAR_POPResample" -"VAR_POPSimpleState" -"VAR_POPState" -"VAR_SAMP" -"VAR_SAMPArgMax" -"VAR_SAMPArgMin" -"VAR_SAMPArray" -"VAR_SAMPDistinct" -"VAR_SAMPForEach" -"VAR_SAMPIf" -"VAR_SAMPMap" -"VAR_SAMPMerge" -"VAR_SAMPNull" -"VAR_SAMPOrDefault" -"VAR_SAMPOrNull" -"VAR_SAMPResample" -"VAR_SAMPSimpleState" -"VAR_SAMPState" +"variantElement" +"variantType" "vectorDifference" "vectorSum" "version" -"VIEW" -"VISIBLE" "visibleWidth" "visitParamExtractBool" "visitParamExtractFloat" @@ -4791,11 +4916,7 @@ "visitParamExtractString" "visitParamExtractUInt" "visitParamHas" -"WATCH" -"WATERMARK" -"WEEK" "week" -"WEEKS" "welchTTest" "welchTTestArgMax" "welchTTestArgMin" @@ -4811,11 +4932,8 @@ "welchTTestResample" "welchTTestSimpleState" "welchTTestState" -"WHEN" -"WHERE" "widthBucket" "width_bucket" -"WINDOW" "windowFunnel" "windowFunnelArgMax" "windowFunnelArgMin" @@ -4832,16 +4950,6 @@ "windowFunnelSimpleState" "windowFunnelState" "windowID" -"WITH ADMIN OPTION" -"WITH CHECK" -"WITH FILL" -"WITH GRANT OPTION" -"WITH NAME" -"WITH REPLACE OPTION" -"WITH TIES" -"WITH" -"WITH_ITEMINDEX" -"WK" "wkt" "wordShingleMinHash" "wordShingleMinHashArg" @@ -4855,31 +4963,12 @@ "wordShingleSimHashCaseInsensitive" "wordShingleSimHashCaseInsensitiveUTF8" "wordShingleSimHashUTF8" -"WRITABLE" -"WW" "wyHash64" "xor" -"xxh3" "xxHash32" "xxHash64" +"xxh3" "yandexConsistentHash" -"YEAR" -"YEARS" "yearweek" "yesterday" -"YY" -"YYYY" -"YYYYMMDDhhmmssToDateTime" -"YYYYMMDDhhmmssToDateTime64" -"YYYYMMDDToDate" -"YYYYMMDDToDate32" -"ZKPATH" "zookeeperSessionUptime" -"_CAST" -"__actionName" -"__bitBoolMaskAnd" -"__bitBoolMaskOr" -"__bitSwapLastTwo" -"__bitWrapperFunc" -"__getScalar" -"__scalarSubqueryResult" diff --git a/tests/fuzz/dictionaries/datatypes.dict b/tests/fuzz/dictionaries/datatypes.dict index e562595fb67..797905203b2 100644 --- a/tests/fuzz/dictionaries/datatypes.dict +++ b/tests/fuzz/dictionaries/datatypes.dict @@ -1,4283 +1,137 @@ -"BIT_AND" -"BIT_ANDArgMax" -"BIT_ANDArgMin" -"BIT_ANDArray" -"BIT_ANDDistinct" -"BIT_ANDForEach" -"BIT_ANDIf" -"BIT_ANDMap" -"BIT_ANDMerge" -"BIT_ANDNull" -"BIT_ANDOrDefault" -"BIT_ANDOrNull" -"BIT_ANDResample" -"BIT_ANDSimpleState" -"BIT_ANDState" -"BIT_OR" -"BIT_ORArgMax" -"BIT_ORArgMin" -"BIT_ORArray" -"BIT_ORDistinct" -"BIT_ORForEach" -"BIT_ORIf" -"BIT_ORMap" -"BIT_ORMerge" -"BIT_ORNull" -"BIT_OROrDefault" -"BIT_OROrNull" -"BIT_ORResample" -"BIT_ORSimpleState" -"BIT_ORState" -"BIT_XOR" -"BIT_XORArgMax" -"BIT_XORArgMin" -"BIT_XORArray" -"BIT_XORDistinct" -"BIT_XORForEach" -"BIT_XORIf" -"BIT_XORMap" -"BIT_XORMerge" -"BIT_XORNull" -"BIT_XOROrDefault" -"BIT_XOROrNull" -"BIT_XORResample" -"BIT_XORSimpleState" -"BIT_XORState" -"BLAKE3" -"CAST" -"CHARACTER_LENGTH" -"CHAR_LENGTH" -"COVAR_POP" -"COVAR_POPArgMax" -"COVAR_POPArgMin" -"COVAR_POPArray" -"COVAR_POPDistinct" -"COVAR_POPForEach" -"COVAR_POPIf" -"COVAR_POPMap" -"COVAR_POPMerge" -"COVAR_POPNull" -"COVAR_POPOrDefault" -"COVAR_POPOrNull" -"COVAR_POPResample" -"COVAR_POPSimpleState" -"COVAR_POPState" -"COVAR_SAMP" -"COVAR_SAMPArgMax" -"COVAR_SAMPArgMin" -"COVAR_SAMPArray" -"COVAR_SAMPDistinct" -"COVAR_SAMPForEach" -"COVAR_SAMPIf" -"COVAR_SAMPMap" -"COVAR_SAMPMerge" -"COVAR_SAMPNull" -"COVAR_SAMPOrDefault" -"COVAR_SAMPOrNull" -"COVAR_SAMPResample" -"COVAR_SAMPSimpleState" -"COVAR_SAMPState" -"CRC32" -"CRC32IEEE" -"CRC64" -"DATABASE" -"DATE" -"DATE_DIFF" -"DATE_FORMAT" -"DATE_TRUNC" -"DAY" -"DAYOFMONTH" -"DAYOFWEEK" -"DAYOFYEAR" -"FORMAT_BYTES" -"FQDN" -"FROM_BASE64" -"FROM_DAYS" -"FROM_UNIXTIME" -"HOUR" -"INET6_ATON" -"INET6_NTOA" -"INET_ATON" -"INET_NTOA" -"IPv4CIDRToRange" -"IPv4NumToString" -"IPv4NumToStringClassC" -"IPv4StringToNum" -"IPv4StringToNumOrDefault" -"IPv4StringToNumOrNull" -"IPv4ToIPv6" -"IPv6CIDRToRange" -"IPv6NumToString" -"IPv6StringToNum" -"IPv6StringToNumOrDefault" -"IPv6StringToNumOrNull" -"JSONArrayLength" -"JSONExtract" -"JSONExtractArrayRaw" -"JSONExtractBool" -"JSONExtractFloat" -"JSONExtractInt" -"JSONExtractKeys" -"JSONExtractKeysAndValues" -"JSONExtractKeysAndValuesRaw" -"JSONExtractRaw" -"JSONExtractString" -"JSONExtractUInt" -"JSONHas" -"JSONKey" -"JSONLength" -"JSONMergePatch" -"JSONType" -"JSON_ARRAY_LENGTH" -"JSON_EXISTS" -"JSON_QUERY" -"JSON_VALUE" -"L1Distance" -"L1Norm" -"L1Normalize" -"L2Distance" -"L2Norm" -"L2Normalize" -"L2SquaredDistance" -"L2SquaredNorm" -"LAST_DAY" -"LinfDistance" -"LinfNorm" -"LinfNormalize" -"LpDistance" -"LpNorm" -"LpNormalize" -"MACNumToString" -"MACStringToNum" -"MACStringToOUI" -"MAP_FROM_ARRAYS" -"MD4" -"MD5" -"MILLISECOND" -"MINUTE" -"MONTH" -"OCTET_LENGTH" -"QUARTER" -"REGEXP_EXTRACT" -"REGEXP_MATCHES" -"REGEXP_REPLACE" -"SCHEMA" -"SECOND" -"SHA1" -"SHA224" -"SHA256" -"SHA384" -"SHA512" -"SHA512_256" -"STD" -"STDArgMax" -"STDArgMin" -"STDArray" -"STDDEV_POP" -"STDDEV_POPArgMax" -"STDDEV_POPArgMin" -"STDDEV_POPArray" -"STDDEV_POPDistinct" -"STDDEV_POPForEach" -"STDDEV_POPIf" -"STDDEV_POPMap" -"STDDEV_POPMerge" -"STDDEV_POPNull" -"STDDEV_POPOrDefault" -"STDDEV_POPOrNull" -"STDDEV_POPResample" -"STDDEV_POPSimpleState" -"STDDEV_POPState" -"STDDEV_SAMP" -"STDDEV_SAMPArgMax" -"STDDEV_SAMPArgMin" -"STDDEV_SAMPArray" -"STDDEV_SAMPDistinct" -"STDDEV_SAMPForEach" -"STDDEV_SAMPIf" -"STDDEV_SAMPMap" -"STDDEV_SAMPMerge" -"STDDEV_SAMPNull" -"STDDEV_SAMPOrDefault" -"STDDEV_SAMPOrNull" -"STDDEV_SAMPResample" -"STDDEV_SAMPSimpleState" -"STDDEV_SAMPState" -"STDDistinct" -"STDForEach" -"STDIf" -"STDMap" -"STDMerge" -"STDNull" -"STDOrDefault" -"STDOrNull" -"STDResample" -"STDSimpleState" -"STDState" -"SUBSTRING_INDEX" -"SVG" -"TIMESTAMP_DIFF" -"TO_BASE64" -"TO_DAYS" -"TO_UNIXTIME" -"ULIDStringToDateTime" -"URLHash" -"URLHierarchy" -"URLPathHierarchy" -"UTCTimestamp" -"UTC_timestamp" -"UUIDNumToString" -"UUIDStringToNum" -"UUIDToNum" -"UUIDv7ToDateTime" -"VAR_POP" -"VAR_POPArgMax" -"VAR_POPArgMin" -"VAR_POPArray" -"VAR_POPDistinct" -"VAR_POPForEach" -"VAR_POPIf" -"VAR_POPMap" -"VAR_POPMerge" -"VAR_POPNull" -"VAR_POPOrDefault" -"VAR_POPOrNull" -"VAR_POPResample" -"VAR_POPSimpleState" -"VAR_POPState" -"VAR_SAMP" -"VAR_SAMPArgMax" -"VAR_SAMPArgMin" -"VAR_SAMPArray" -"VAR_SAMPDistinct" -"VAR_SAMPForEach" -"VAR_SAMPIf" -"VAR_SAMPMap" -"VAR_SAMPMerge" -"VAR_SAMPNull" -"VAR_SAMPOrDefault" -"VAR_SAMPOrNull" -"VAR_SAMPResample" -"VAR_SAMPSimpleState" -"VAR_SAMPState" +"AggregateFunction" +"Array" +"BIGINT" +"BIGINT SIGNED" +"BIGINT UNSIGNED" +"BINARY" +"BINARY LARGE OBJECT" +"BINARY VARYING" +"BIT" +"BLOB" +"BYTE" +"BYTEA" +"Bool" +"CHAR" +"CHAR LARGE OBJECT" +"CHAR VARYING" +"CHARACTER" +"CHARACTER LARGE OBJECT" +"CHARACTER VARYING" +"CLOB" +"DEC" +"DOUBLE" +"DOUBLE PRECISION" +"Date" +"Date32" +"DateTime" +"DateTime32" +"DateTime64" +"Decimal" +"Decimal128" +"Decimal256" +"Decimal32" +"Decimal64" +"Dynamic" +"ENUM" +"Enum" +"Enum16" +"Enum8" +"FIXED" +"FLOAT" +"FixedString" +"Float32" +"Float64" +"GEOMETRY" +"INET4" +"INET6" +"INT" +"INT SIGNED" +"INT UNSIGNED" +"INT1" +"INT1 SIGNED" +"INT1 UNSIGNED" +"INTEGER" +"INTEGER SIGNED" +"INTEGER UNSIGNED" +"IPv4" +"IPv6" +"Int128" +"Int16" +"Int256" +"Int32" +"Int64" +"Int8" +"IntervalDay" +"IntervalHour" +"IntervalMicrosecond" +"IntervalMillisecond" +"IntervalMinute" +"IntervalMonth" +"IntervalNanosecond" +"IntervalQuarter" +"IntervalSecond" +"IntervalWeek" +"IntervalYear" +"JSON" +"LONGBLOB" +"LONGTEXT" +"LineString" +"LowCardinality" +"MEDIUMBLOB" +"MEDIUMINT" +"MEDIUMINT SIGNED" +"MEDIUMINT UNSIGNED" +"MEDIUMTEXT" +"Map" +"MultiLineString" +"MultiPolygon" +"NATIONAL CHAR" +"NATIONAL CHAR VARYING" +"NATIONAL CHARACTER" +"NATIONAL CHARACTER LARGE OBJECT" +"NATIONAL CHARACTER VARYING" +"NCHAR" +"NCHAR LARGE OBJECT" +"NCHAR VARYING" +"NUMERIC" +"NVARCHAR" +"Nested" +"Nothing" +"Nullable" +"Object" +"Point" +"Polygon" +"REAL" +"Ring" +"SET" +"SIGNED" +"SINGLE" +"SMALLINT" +"SMALLINT SIGNED" +"SMALLINT UNSIGNED" +"SimpleAggregateFunction" +"String" +"TEXT" +"TIME" +"TIMESTAMP" +"TINYBLOB" +"TINYINT" +"TINYINT SIGNED" +"TINYINT UNSIGNED" +"TINYTEXT" +"Tuple" +"UInt128" +"UInt16" +"UInt256" +"UInt32" +"UInt64" +"UInt8" +"UNSIGNED" +"UUID" +"VARBINARY" +"VARCHAR" +"VARCHAR2" +"Variant" "YEAR" -"YYYYMMDDToDate" -"YYYYMMDDToDate32" -"YYYYMMDDhhmmssToDateTime" -"YYYYMMDDhhmmssToDateTime64" -"_CAST" -"__actionName" -"__bitBoolMaskAnd" -"__bitBoolMaskOr" -"__bitSwapLastTwo" -"__bitWrapperFunc" -"__getScalar" -"__scalarSubqueryResult" -"abs" -"accurateCast" -"accurateCastOrDefault" -"accurateCastOrNull" -"acos" -"acosh" -"addDate" -"addDays" -"addHours" -"addInterval" -"addMicroseconds" -"addMilliseconds" -"addMinutes" -"addMonths" -"addNanoseconds" -"addQuarters" -"addSeconds" -"addTupleOfIntervals" -"addWeeks" -"addYears" -"addressToLine" -"addressToLineWithInlines" -"addressToSymbol" -"aes_decrypt_mysql" -"aes_encrypt_mysql" -"age" -"aggThrow" -"aggThrowArgMax" -"aggThrowArgMin" -"aggThrowArray" -"aggThrowDistinct" -"aggThrowForEach" -"aggThrowIf" -"aggThrowMap" -"aggThrowMerge" -"aggThrowNull" -"aggThrowOrDefault" -"aggThrowOrNull" -"aggThrowResample" -"aggThrowSimpleState" -"aggThrowState" -"alphaTokens" -"analysisOfVariance" -"analysisOfVarianceArgMax" -"analysisOfVarianceArgMin" -"analysisOfVarianceArray" -"analysisOfVarianceDistinct" -"analysisOfVarianceForEach" -"analysisOfVarianceIf" -"analysisOfVarianceMap" -"analysisOfVarianceMerge" -"analysisOfVarianceNull" -"analysisOfVarianceOrDefault" -"analysisOfVarianceOrNull" -"analysisOfVarianceResample" -"analysisOfVarianceSimpleState" -"analysisOfVarianceState" -"and" -"anova" -"anovaArgMax" -"anovaArgMin" -"anovaArray" -"anovaDistinct" -"anovaForEach" -"anovaIf" -"anovaMap" -"anovaMerge" -"anovaNull" -"anovaOrDefault" -"anovaOrNull" -"anovaResample" -"anovaSimpleState" -"anovaState" -"any" -"anyArgMax" -"anyArgMin" -"anyArray" -"anyDistinct" -"anyForEach" -"anyHeavy" -"anyHeavyArgMax" -"anyHeavyArgMin" -"anyHeavyArray" -"anyHeavyDistinct" -"anyHeavyForEach" -"anyHeavyIf" -"anyHeavyMap" -"anyHeavyMerge" -"anyHeavyNull" -"anyHeavyOrDefault" -"anyHeavyOrNull" -"anyHeavyResample" -"anyHeavySimpleState" -"anyHeavyState" -"anyIf" -"anyLast" -"anyLastArgMax" -"anyLastArgMin" -"anyLastArray" -"anyLastDistinct" -"anyLastForEach" -"anyLastIf" -"anyLastMap" -"anyLastMerge" -"anyLastNull" -"anyLastOrDefault" -"anyLastOrNull" -"anyLastResample" -"anyLastSimpleState" -"anyLastState" -"anyLast_respect_nulls" -"anyLast_respect_nullsArgMax" -"anyLast_respect_nullsArgMin" -"anyLast_respect_nullsArray" -"anyLast_respect_nullsDistinct" -"anyLast_respect_nullsForEach" -"anyLast_respect_nullsIf" -"anyLast_respect_nullsMap" -"anyLast_respect_nullsMerge" -"anyLast_respect_nullsNull" -"anyLast_respect_nullsOrDefault" -"anyLast_respect_nullsOrNull" -"anyLast_respect_nullsResample" -"anyLast_respect_nullsSimpleState" -"anyLast_respect_nullsState" -"anyMap" -"anyMerge" -"anyNull" -"anyOrDefault" -"anyOrNull" -"anyResample" -"anySimpleState" -"anyState" -"any_respect_nulls" -"any_respect_nullsArgMax" -"any_respect_nullsArgMin" -"any_respect_nullsArray" -"any_respect_nullsDistinct" -"any_respect_nullsForEach" -"any_respect_nullsIf" -"any_respect_nullsMap" -"any_respect_nullsMerge" -"any_respect_nullsNull" -"any_respect_nullsOrDefault" -"any_respect_nullsOrNull" -"any_respect_nullsResample" -"any_respect_nullsSimpleState" -"any_respect_nullsState" -"any_value" -"any_valueArgMax" -"any_valueArgMin" -"any_valueArray" -"any_valueDistinct" -"any_valueForEach" -"any_valueIf" -"any_valueMap" -"any_valueMerge" -"any_valueNull" -"any_valueOrDefault" -"any_valueOrNull" -"any_valueResample" -"any_valueSimpleState" -"any_valueState" -"any_value_respect_nulls" -"any_value_respect_nullsArgMax" -"any_value_respect_nullsArgMin" -"any_value_respect_nullsArray" -"any_value_respect_nullsDistinct" -"any_value_respect_nullsForEach" -"any_value_respect_nullsIf" -"any_value_respect_nullsMap" -"any_value_respect_nullsMerge" -"any_value_respect_nullsNull" -"any_value_respect_nullsOrDefault" -"any_value_respect_nullsOrNull" -"any_value_respect_nullsResample" -"any_value_respect_nullsSimpleState" -"any_value_respect_nullsState" -"appendTrailingCharIfAbsent" -"approx_top_count" -"approx_top_countArgMax" -"approx_top_countArgMin" -"approx_top_countArray" -"approx_top_countDistinct" -"approx_top_countForEach" -"approx_top_countIf" -"approx_top_countMap" -"approx_top_countMerge" -"approx_top_countNull" -"approx_top_countOrDefault" -"approx_top_countOrNull" -"approx_top_countResample" -"approx_top_countSimpleState" -"approx_top_countState" -"approx_top_k" -"approx_top_kArgMax" -"approx_top_kArgMin" -"approx_top_kArray" -"approx_top_kDistinct" -"approx_top_kForEach" -"approx_top_kIf" -"approx_top_kMap" -"approx_top_kMerge" -"approx_top_kNull" -"approx_top_kOrDefault" -"approx_top_kOrNull" -"approx_top_kResample" -"approx_top_kSimpleState" -"approx_top_kState" -"approx_top_sum" -"approx_top_sumArgMax" -"approx_top_sumArgMin" -"approx_top_sumArray" -"approx_top_sumDistinct" -"approx_top_sumForEach" -"approx_top_sumIf" -"approx_top_sumMap" -"approx_top_sumMerge" -"approx_top_sumNull" -"approx_top_sumOrDefault" -"approx_top_sumOrNull" -"approx_top_sumResample" -"approx_top_sumSimpleState" -"approx_top_sumState" -"argMax" -"argMaxArgMax" -"argMaxArgMin" -"argMaxArray" -"argMaxDistinct" -"argMaxForEach" -"argMaxIf" -"argMaxMap" -"argMaxMerge" -"argMaxNull" -"argMaxOrDefault" -"argMaxOrNull" -"argMaxResample" -"argMaxSimpleState" -"argMaxState" -"argMin" -"argMinArgMax" -"argMinArgMin" -"argMinArray" -"argMinDistinct" -"argMinForEach" -"argMinIf" -"argMinMap" -"argMinMerge" -"argMinNull" -"argMinOrDefault" -"argMinOrNull" -"argMinResample" -"argMinSimpleState" -"argMinState" -"array" -"arrayAUC" -"arrayAll" -"arrayAvg" -"arrayCompact" -"arrayConcat" -"arrayCount" -"arrayCumSum" -"arrayCumSumNonNegative" -"arrayDifference" -"arrayDistinct" -"arrayDotProduct" -"arrayElement" -"arrayEnumerate" -"arrayEnumerateDense" -"arrayEnumerateDenseRanked" -"arrayEnumerateUniq" -"arrayEnumerateUniqRanked" -"arrayExists" -"arrayFill" -"arrayFilter" -"arrayFirst" -"arrayFirstIndex" -"arrayFirstOrNull" -"arrayFlatten" -"arrayFold" -"arrayIntersect" -"arrayJaccardIndex" -"arrayJoin" -"arrayLast" -"arrayLastIndex" -"arrayLastOrNull" -"arrayMap" -"arrayMax" -"arrayMin" -"arrayPartialReverseSort" -"arrayPartialShuffle" -"arrayPartialSort" -"arrayPopBack" -"arrayPopFront" -"arrayProduct" -"arrayPushBack" -"arrayPushFront" -"arrayRandomSample" -"arrayReduce" -"arrayReduceInRanges" -"arrayResize" -"arrayReverse" -"arrayReverseFill" -"arrayReverseSort" -"arrayReverseSplit" -"arrayRotateLeft" -"arrayRotateRight" -"arrayShiftLeft" -"arrayShiftRight" -"arrayShingles" -"arrayShuffle" -"arraySlice" -"arraySort" -"arraySplit" -"arrayStringConcat" -"arraySum" -"arrayUniq" -"arrayWithConstant" -"arrayZip" -"array_agg" -"array_aggArgMax" -"array_aggArgMin" -"array_aggArray" -"array_aggDistinct" -"array_aggForEach" -"array_aggIf" -"array_aggMap" -"array_aggMerge" -"array_aggNull" -"array_aggOrDefault" -"array_aggOrNull" -"array_aggResample" -"array_aggSimpleState" -"array_aggState" -"array_concat_agg" -"array_concat_aggArgMax" -"array_concat_aggArgMin" -"array_concat_aggArray" -"array_concat_aggDistinct" -"array_concat_aggForEach" -"array_concat_aggIf" -"array_concat_aggMap" -"array_concat_aggMerge" -"array_concat_aggNull" -"array_concat_aggOrDefault" -"array_concat_aggOrNull" -"array_concat_aggResample" -"array_concat_aggSimpleState" -"array_concat_aggState" -"ascii" -"asin" -"asinh" -"assumeNotNull" -"atan" -"atan2" -"atanh" -"avg" -"avgArgMax" -"avgArgMin" -"avgArray" -"avgDistinct" -"avgForEach" -"avgIf" -"avgMap" -"avgMerge" -"avgNull" -"avgOrDefault" -"avgOrNull" -"avgResample" -"avgSimpleState" -"avgState" -"avgWeighted" -"avgWeightedArgMax" -"avgWeightedArgMin" -"avgWeightedArray" -"avgWeightedDistinct" -"avgWeightedForEach" -"avgWeightedIf" -"avgWeightedMap" -"avgWeightedMerge" -"avgWeightedNull" -"avgWeightedOrDefault" -"avgWeightedOrNull" -"avgWeightedResample" -"avgWeightedSimpleState" -"avgWeightedState" -"bar" -"base58Decode" -"base58Encode" -"base64Decode" -"base64Encode" -"base64URLDecode" -"base64URLEncode" -"basename" -"bin" -"bitAnd" -"bitCount" -"bitHammingDistance" -"bitNot" -"bitOr" -"bitPositionsToArray" -"bitRotateLeft" -"bitRotateRight" -"bitShiftLeft" -"bitShiftRight" -"bitSlice" -"bitTest" -"bitTestAll" -"bitTestAny" -"bitXor" -"bitmapAnd" -"bitmapAndCardinality" -"bitmapAndnot" -"bitmapAndnotCardinality" -"bitmapBuild" -"bitmapCardinality" -"bitmapContains" -"bitmapHasAll" -"bitmapHasAny" -"bitmapMax" -"bitmapMin" -"bitmapOr" -"bitmapOrCardinality" -"bitmapSubsetInRange" -"bitmapSubsetLimit" -"bitmapToArray" -"bitmapTransform" -"bitmapXor" -"bitmapXorCardinality" -"bitmaskToArray" -"bitmaskToList" -"blockNumber" -"blockSerializedSize" -"blockSize" -"boundingRatio" -"boundingRatioArgMax" -"boundingRatioArgMin" -"boundingRatioArray" -"boundingRatioDistinct" -"boundingRatioForEach" -"boundingRatioIf" -"boundingRatioMap" -"boundingRatioMerge" -"boundingRatioNull" -"boundingRatioOrDefault" -"boundingRatioOrNull" -"boundingRatioResample" -"boundingRatioSimpleState" -"boundingRatioState" -"buildId" -"byteHammingDistance" -"byteSize" -"byteSlice" -"byteSwap" -"caseWithExpr" -"caseWithExpression" -"caseWithoutExpr" -"caseWithoutExpression" -"catboostEvaluate" -"categoricalInformationValue" -"categoricalInformationValueArgMax" -"categoricalInformationValueArgMin" -"categoricalInformationValueArray" -"categoricalInformationValueDistinct" -"categoricalInformationValueForEach" -"categoricalInformationValueIf" -"categoricalInformationValueMap" -"categoricalInformationValueMerge" -"categoricalInformationValueNull" -"categoricalInformationValueOrDefault" -"categoricalInformationValueOrNull" -"categoricalInformationValueResample" -"categoricalInformationValueSimpleState" -"categoricalInformationValueState" -"cbrt" -"ceil" -"ceiling" -"changeDay" -"changeHour" -"changeMinute" -"changeMonth" -"changeSecond" -"changeYear" -"char" -"cityHash64" -"clamp" -"coalesce" -"concat" -"concatAssumeInjective" -"concatWithSeparator" -"concatWithSeparatorAssumeInjective" -"concat_ws" -"connectionId" -"connection_id" -"contingency" -"contingencyArgMax" -"contingencyArgMin" -"contingencyArray" -"contingencyDistinct" -"contingencyForEach" -"contingencyIf" -"contingencyMap" -"contingencyMerge" -"contingencyNull" -"contingencyOrDefault" -"contingencyOrNull" -"contingencyResample" -"contingencySimpleState" -"contingencyState" -"convertCharset" -"corr" -"corrArgMax" -"corrArgMin" -"corrArray" -"corrDistinct" -"corrForEach" -"corrIf" -"corrMap" -"corrMatrix" -"corrMatrixArgMax" -"corrMatrixArgMin" -"corrMatrixArray" -"corrMatrixDistinct" -"corrMatrixForEach" -"corrMatrixIf" -"corrMatrixMap" -"corrMatrixMerge" -"corrMatrixNull" -"corrMatrixOrDefault" -"corrMatrixOrNull" -"corrMatrixResample" -"corrMatrixSimpleState" -"corrMatrixState" -"corrMerge" -"corrNull" -"corrOrDefault" -"corrOrNull" -"corrResample" -"corrSimpleState" -"corrStable" -"corrStableArgMax" -"corrStableArgMin" -"corrStableArray" -"corrStableDistinct" -"corrStableForEach" -"corrStableIf" -"corrStableMap" -"corrStableMerge" -"corrStableNull" -"corrStableOrDefault" -"corrStableOrNull" -"corrStableResample" -"corrStableSimpleState" -"corrStableState" -"corrState" -"cos" -"cosh" -"cosineDistance" -"count" -"countArgMax" -"countArgMin" -"countArray" -"countDigits" -"countDistinct" -"countEqual" -"countForEach" -"countIf" -"countMap" -"countMatches" -"countMatchesCaseInsensitive" -"countMerge" -"countNull" -"countOrDefault" -"countOrNull" -"countResample" -"countSimpleState" -"countState" -"countSubstrings" -"countSubstringsCaseInsensitive" -"countSubstringsCaseInsensitiveUTF8" -"covarPop" -"covarPopArgMax" -"covarPopArgMin" -"covarPopArray" -"covarPopDistinct" -"covarPopForEach" -"covarPopIf" -"covarPopMap" -"covarPopMatrix" -"covarPopMatrixArgMax" -"covarPopMatrixArgMin" -"covarPopMatrixArray" -"covarPopMatrixDistinct" -"covarPopMatrixForEach" -"covarPopMatrixIf" -"covarPopMatrixMap" -"covarPopMatrixMerge" -"covarPopMatrixNull" -"covarPopMatrixOrDefault" -"covarPopMatrixOrNull" -"covarPopMatrixResample" -"covarPopMatrixSimpleState" -"covarPopMatrixState" -"covarPopMerge" -"covarPopNull" -"covarPopOrDefault" -"covarPopOrNull" -"covarPopResample" -"covarPopSimpleState" -"covarPopStable" -"covarPopStableArgMax" -"covarPopStableArgMin" -"covarPopStableArray" -"covarPopStableDistinct" -"covarPopStableForEach" -"covarPopStableIf" -"covarPopStableMap" -"covarPopStableMerge" -"covarPopStableNull" -"covarPopStableOrDefault" -"covarPopStableOrNull" -"covarPopStableResample" -"covarPopStableSimpleState" -"covarPopStableState" -"covarPopState" -"covarSamp" -"covarSampArgMax" -"covarSampArgMin" -"covarSampArray" -"covarSampDistinct" -"covarSampForEach" -"covarSampIf" -"covarSampMap" -"covarSampMatrix" -"covarSampMatrixArgMax" -"covarSampMatrixArgMin" -"covarSampMatrixArray" -"covarSampMatrixDistinct" -"covarSampMatrixForEach" -"covarSampMatrixIf" -"covarSampMatrixMap" -"covarSampMatrixMerge" -"covarSampMatrixNull" -"covarSampMatrixOrDefault" -"covarSampMatrixOrNull" -"covarSampMatrixResample" -"covarSampMatrixSimpleState" -"covarSampMatrixState" -"covarSampMerge" -"covarSampNull" -"covarSampOrDefault" -"covarSampOrNull" -"covarSampResample" -"covarSampSimpleState" -"covarSampStable" -"covarSampStableArgMax" -"covarSampStableArgMin" -"covarSampStableArray" -"covarSampStableDistinct" -"covarSampStableForEach" -"covarSampStableIf" -"covarSampStableMap" -"covarSampStableMerge" -"covarSampStableNull" -"covarSampStableOrDefault" -"covarSampStableOrNull" -"covarSampStableResample" -"covarSampStableSimpleState" -"covarSampStableState" -"covarSampState" -"cramersV" -"cramersVArgMax" -"cramersVArgMin" -"cramersVArray" -"cramersVBiasCorrected" -"cramersVBiasCorrectedArgMax" -"cramersVBiasCorrectedArgMin" -"cramersVBiasCorrectedArray" -"cramersVBiasCorrectedDistinct" -"cramersVBiasCorrectedForEach" -"cramersVBiasCorrectedIf" -"cramersVBiasCorrectedMap" -"cramersVBiasCorrectedMerge" -"cramersVBiasCorrectedNull" -"cramersVBiasCorrectedOrDefault" -"cramersVBiasCorrectedOrNull" -"cramersVBiasCorrectedResample" -"cramersVBiasCorrectedSimpleState" -"cramersVBiasCorrectedState" -"cramersVDistinct" -"cramersVForEach" -"cramersVIf" -"cramersVMap" -"cramersVMerge" -"cramersVNull" -"cramersVOrDefault" -"cramersVOrNull" -"cramersVResample" -"cramersVSimpleState" -"cramersVState" -"curdate" -"currentDatabase" -"currentProfiles" -"currentRoles" -"currentSchemas" -"currentUser" -"current_database" -"current_date" -"current_schemas" -"current_timestamp" -"current_user" -"cutFragment" -"cutIPv6" -"cutQueryString" -"cutQueryStringAndFragment" -"cutToFirstSignificantSubdomain" -"cutToFirstSignificantSubdomainCustom" -"cutToFirstSignificantSubdomainCustomRFC" -"cutToFirstSignificantSubdomainCustomWithWWW" -"cutToFirstSignificantSubdomainCustomWithWWWRFC" -"cutToFirstSignificantSubdomainRFC" -"cutToFirstSignificantSubdomainWithWWW" -"cutToFirstSignificantSubdomainWithWWWRFC" -"cutURLParameter" -"cutWWW" -"damerauLevenshteinDistance" -"dateDiff" -"dateName" -"dateTime64ToSnowflake" -"dateTime64ToSnowflakeID" -"dateTimeToSnowflake" -"dateTimeToSnowflakeID" -"dateTrunc" -"date_diff" -"decodeHTMLComponent" -"decodeURLComponent" -"decodeURLFormComponent" -"decodeXMLComponent" -"decrypt" -"defaultProfiles" -"defaultRoles" -"defaultValueOfArgumentType" -"defaultValueOfTypeName" -"degrees" -"deltaSum" -"deltaSumArgMax" -"deltaSumArgMin" -"deltaSumArray" -"deltaSumDistinct" -"deltaSumForEach" -"deltaSumIf" -"deltaSumMap" -"deltaSumMerge" -"deltaSumNull" -"deltaSumOrDefault" -"deltaSumOrNull" -"deltaSumResample" -"deltaSumSimpleState" -"deltaSumState" -"deltaSumTimestamp" -"deltaSumTimestampArgMax" -"deltaSumTimestampArgMin" -"deltaSumTimestampArray" -"deltaSumTimestampDistinct" -"deltaSumTimestampForEach" -"deltaSumTimestampIf" -"deltaSumTimestampMap" -"deltaSumTimestampMerge" -"deltaSumTimestampNull" -"deltaSumTimestampOrDefault" -"deltaSumTimestampOrNull" -"deltaSumTimestampResample" -"deltaSumTimestampSimpleState" -"deltaSumTimestampState" -"demangle" -"denseRank" -"denseRankArgMax" -"denseRankArgMin" -"denseRankArray" -"denseRankDistinct" -"denseRankForEach" -"denseRankIf" -"denseRankMap" -"denseRankMerge" -"denseRankNull" -"denseRankOrDefault" -"denseRankOrNull" -"denseRankResample" -"denseRankSimpleState" -"denseRankState" -"dense_rank" -"dense_rankArgMax" -"dense_rankArgMin" -"dense_rankArray" -"dense_rankDistinct" -"dense_rankForEach" -"dense_rankIf" -"dense_rankMap" -"dense_rankMerge" -"dense_rankNull" -"dense_rankOrDefault" -"dense_rankOrNull" -"dense_rankResample" -"dense_rankSimpleState" -"dense_rankState" -"detectCharset" -"detectLanguage" -"detectLanguageMixed" -"detectLanguageUnknown" -"detectProgrammingLanguage" -"detectTonality" -"dictGet" -"dictGetAll" -"dictGetChildren" -"dictGetDate" -"dictGetDateOrDefault" -"dictGetDateTime" -"dictGetDateTimeOrDefault" -"dictGetDescendants" -"dictGetFloat32" -"dictGetFloat32OrDefault" -"dictGetFloat64" -"dictGetFloat64OrDefault" -"dictGetHierarchy" -"dictGetIPv4" -"dictGetIPv4OrDefault" -"dictGetIPv6" -"dictGetIPv6OrDefault" -"dictGetInt16" -"dictGetInt16OrDefault" -"dictGetInt32" -"dictGetInt32OrDefault" -"dictGetInt64" -"dictGetInt64OrDefault" -"dictGetInt8" -"dictGetInt8OrDefault" -"dictGetOrDefault" -"dictGetOrNull" -"dictGetString" -"dictGetStringOrDefault" -"dictGetUInt16" -"dictGetUInt16OrDefault" -"dictGetUInt32" -"dictGetUInt32OrDefault" -"dictGetUInt64" -"dictGetUInt64OrDefault" -"dictGetUInt8" -"dictGetUInt8OrDefault" -"dictGetUUID" -"dictGetUUIDOrDefault" -"dictHas" -"dictIsIn" -"displayName" -"distanceL1" -"distanceL2" -"distanceL2Squared" -"distanceLinf" -"distanceLp" -"divide" -"divideDecimal" -"domain" -"domainRFC" -"domainWithoutWWW" -"domainWithoutWWWRFC" -"dotProduct" -"dumpColumnStructure" -"dynamicElement" -"dynamicType" -"e" -"editDistance" -"editDistanceUTF8" -"empty" -"emptyArrayDate" -"emptyArrayDateTime" -"emptyArrayFloat32" -"emptyArrayFloat64" -"emptyArrayInt16" -"emptyArrayInt32" -"emptyArrayInt64" -"emptyArrayInt8" -"emptyArrayString" -"emptyArrayToSingle" -"emptyArrayUInt16" -"emptyArrayUInt32" -"emptyArrayUInt64" -"emptyArrayUInt8" -"enabledProfiles" -"enabledRoles" -"encodeURLComponent" -"encodeURLFormComponent" -"encodeXMLComponent" -"encrypt" -"endsWith" -"endsWithUTF8" -"entropy" -"entropyArgMax" -"entropyArgMin" -"entropyArray" -"entropyDistinct" -"entropyForEach" -"entropyIf" -"entropyMap" -"entropyMerge" -"entropyNull" -"entropyOrDefault" -"entropyOrNull" -"entropyResample" -"entropySimpleState" -"entropyState" -"equals" -"erf" -"erfc" -"errorCodeToName" -"evalMLMethod" -"exp" -"exp10" -"exp2" -"exponentialMovingAverage" -"exponentialMovingAverageArgMax" -"exponentialMovingAverageArgMin" -"exponentialMovingAverageArray" -"exponentialMovingAverageDistinct" -"exponentialMovingAverageForEach" -"exponentialMovingAverageIf" -"exponentialMovingAverageMap" -"exponentialMovingAverageMerge" -"exponentialMovingAverageNull" -"exponentialMovingAverageOrDefault" -"exponentialMovingAverageOrNull" -"exponentialMovingAverageResample" -"exponentialMovingAverageSimpleState" -"exponentialMovingAverageState" -"exponentialTimeDecayedAvg" -"exponentialTimeDecayedAvgArgMax" -"exponentialTimeDecayedAvgArgMin" -"exponentialTimeDecayedAvgArray" -"exponentialTimeDecayedAvgDistinct" -"exponentialTimeDecayedAvgForEach" -"exponentialTimeDecayedAvgIf" -"exponentialTimeDecayedAvgMap" -"exponentialTimeDecayedAvgMerge" -"exponentialTimeDecayedAvgNull" -"exponentialTimeDecayedAvgOrDefault" -"exponentialTimeDecayedAvgOrNull" -"exponentialTimeDecayedAvgResample" -"exponentialTimeDecayedAvgSimpleState" -"exponentialTimeDecayedAvgState" -"exponentialTimeDecayedCount" -"exponentialTimeDecayedCountArgMax" -"exponentialTimeDecayedCountArgMin" -"exponentialTimeDecayedCountArray" -"exponentialTimeDecayedCountDistinct" -"exponentialTimeDecayedCountForEach" -"exponentialTimeDecayedCountIf" -"exponentialTimeDecayedCountMap" -"exponentialTimeDecayedCountMerge" -"exponentialTimeDecayedCountNull" -"exponentialTimeDecayedCountOrDefault" -"exponentialTimeDecayedCountOrNull" -"exponentialTimeDecayedCountResample" -"exponentialTimeDecayedCountSimpleState" -"exponentialTimeDecayedCountState" -"exponentialTimeDecayedMax" -"exponentialTimeDecayedMaxArgMax" -"exponentialTimeDecayedMaxArgMin" -"exponentialTimeDecayedMaxArray" -"exponentialTimeDecayedMaxDistinct" -"exponentialTimeDecayedMaxForEach" -"exponentialTimeDecayedMaxIf" -"exponentialTimeDecayedMaxMap" -"exponentialTimeDecayedMaxMerge" -"exponentialTimeDecayedMaxNull" -"exponentialTimeDecayedMaxOrDefault" -"exponentialTimeDecayedMaxOrNull" -"exponentialTimeDecayedMaxResample" -"exponentialTimeDecayedMaxSimpleState" -"exponentialTimeDecayedMaxState" -"exponentialTimeDecayedSum" -"exponentialTimeDecayedSumArgMax" -"exponentialTimeDecayedSumArgMin" -"exponentialTimeDecayedSumArray" -"exponentialTimeDecayedSumDistinct" -"exponentialTimeDecayedSumForEach" -"exponentialTimeDecayedSumIf" -"exponentialTimeDecayedSumMap" -"exponentialTimeDecayedSumMerge" -"exponentialTimeDecayedSumNull" -"exponentialTimeDecayedSumOrDefault" -"exponentialTimeDecayedSumOrNull" -"exponentialTimeDecayedSumResample" -"exponentialTimeDecayedSumSimpleState" -"exponentialTimeDecayedSumState" -"extract" -"extractAll" -"extractAllGroups" -"extractAllGroupsHorizontal" -"extractAllGroupsVertical" -"extractGroups" -"extractKeyValuePairs" -"extractKeyValuePairsWithEscaping" -"extractTextFromHTML" -"extractURLParameter" -"extractURLParameterNames" -"extractURLParameters" -"factorial" -"farmFingerprint64" -"farmHash64" -"file" -"filesystemAvailable" -"filesystemCapacity" -"filesystemUnreserved" -"finalizeAggregation" -"firstLine" -"firstSignificantSubdomain" -"firstSignificantSubdomainCustom" -"firstSignificantSubdomainCustomRFC" -"firstSignificantSubdomainRFC" -"first_value" -"first_valueArgMax" -"first_valueArgMin" -"first_valueArray" -"first_valueDistinct" -"first_valueForEach" -"first_valueIf" -"first_valueMap" -"first_valueMerge" -"first_valueNull" -"first_valueOrDefault" -"first_valueOrNull" -"first_valueResample" -"first_valueSimpleState" -"first_valueState" -"first_value_respect_nulls" -"first_value_respect_nullsArgMax" -"first_value_respect_nullsArgMin" -"first_value_respect_nullsArray" -"first_value_respect_nullsDistinct" -"first_value_respect_nullsForEach" -"first_value_respect_nullsIf" -"first_value_respect_nullsMap" -"first_value_respect_nullsMerge" -"first_value_respect_nullsNull" -"first_value_respect_nullsOrDefault" -"first_value_respect_nullsOrNull" -"first_value_respect_nullsResample" -"first_value_respect_nullsSimpleState" -"first_value_respect_nullsState" -"flameGraph" -"flameGraphArgMax" -"flameGraphArgMin" -"flameGraphArray" -"flameGraphDistinct" -"flameGraphForEach" -"flameGraphIf" -"flameGraphMap" -"flameGraphMerge" -"flameGraphNull" -"flameGraphOrDefault" -"flameGraphOrNull" -"flameGraphResample" -"flameGraphSimpleState" -"flameGraphState" -"flatten" -"flattenTuple" -"floor" -"format" -"formatDateTime" -"formatDateTimeInJodaSyntax" -"formatQuery" -"formatQueryOrNull" -"formatQuerySingleLine" -"formatQuerySingleLineOrNull" -"formatReadableDecimalSize" -"formatReadableQuantity" -"formatReadableSize" -"formatReadableTimeDelta" -"formatRow" -"formatRowNoNewline" -"fragment" -"fromDaysSinceYearZero" -"fromDaysSinceYearZero32" -"fromModifiedJulianDay" -"fromModifiedJulianDayOrNull" -"fromUTCTimestamp" -"fromUnixTimestamp" -"fromUnixTimestamp64Micro" -"fromUnixTimestamp64Milli" -"fromUnixTimestamp64Nano" -"fromUnixTimestampInJodaSyntax" -"from_utc_timestamp" -"fullHostName" -"fuzzBits" -"gccMurmurHash" -"gcd" -"generateRandomStructure" -"generateSnowflakeID" -"generateULID" -"generateUUIDv4" -"generateUUIDv7" -"geoDistance" -"geoToH3" -"geoToS2" -"geohashDecode" -"geohashEncode" -"geohashesInBox" -"getClientHTTPHeader" -"getMacro" -"getOSKernelVersion" -"getServerPort" -"getSetting" -"getSizeOfEnumType" -"getSubcolumn" -"getTypeSerializationStreams" -"globalIn" -"globalInIgnoreSet" -"globalNotIn" -"globalNotInIgnoreSet" -"globalNotNullIn" -"globalNotNullInIgnoreSet" -"globalNullIn" -"globalNullInIgnoreSet" -"globalVariable" -"greatCircleAngle" -"greatCircleDistance" -"greater" -"greaterOrEquals" -"greatest" -"groupArray" -"groupArrayArgMax" -"groupArrayArgMin" -"groupArrayArray" -"groupArrayDistinct" -"groupArrayForEach" -"groupArrayIf" -"groupArrayInsertAt" -"groupArrayInsertAtArgMax" -"groupArrayInsertAtArgMin" -"groupArrayInsertAtArray" -"groupArrayInsertAtDistinct" -"groupArrayInsertAtForEach" -"groupArrayInsertAtIf" -"groupArrayInsertAtMap" -"groupArrayInsertAtMerge" -"groupArrayInsertAtNull" -"groupArrayInsertAtOrDefault" -"groupArrayInsertAtOrNull" -"groupArrayInsertAtResample" -"groupArrayInsertAtSimpleState" -"groupArrayInsertAtState" -"groupArrayIntersect" -"groupArrayIntersectArgMax" -"groupArrayIntersectArgMin" -"groupArrayIntersectArray" -"groupArrayIntersectDistinct" -"groupArrayIntersectForEach" -"groupArrayIntersectIf" -"groupArrayIntersectMap" -"groupArrayIntersectMerge" -"groupArrayIntersectNull" -"groupArrayIntersectOrDefault" -"groupArrayIntersectOrNull" -"groupArrayIntersectResample" -"groupArrayIntersectSimpleState" -"groupArrayIntersectState" -"groupArrayLast" -"groupArrayLastArgMax" -"groupArrayLastArgMin" -"groupArrayLastArray" -"groupArrayLastDistinct" -"groupArrayLastForEach" -"groupArrayLastIf" -"groupArrayLastMap" -"groupArrayLastMerge" -"groupArrayLastNull" -"groupArrayLastOrDefault" -"groupArrayLastOrNull" -"groupArrayLastResample" -"groupArrayLastSimpleState" -"groupArrayLastState" -"groupArrayMap" -"groupArrayMerge" -"groupArrayMovingAvg" -"groupArrayMovingAvgArgMax" -"groupArrayMovingAvgArgMin" -"groupArrayMovingAvgArray" -"groupArrayMovingAvgDistinct" -"groupArrayMovingAvgForEach" -"groupArrayMovingAvgIf" -"groupArrayMovingAvgMap" -"groupArrayMovingAvgMerge" -"groupArrayMovingAvgNull" -"groupArrayMovingAvgOrDefault" -"groupArrayMovingAvgOrNull" -"groupArrayMovingAvgResample" -"groupArrayMovingAvgSimpleState" -"groupArrayMovingAvgState" -"groupArrayMovingSum" -"groupArrayMovingSumArgMax" -"groupArrayMovingSumArgMin" -"groupArrayMovingSumArray" -"groupArrayMovingSumDistinct" -"groupArrayMovingSumForEach" -"groupArrayMovingSumIf" -"groupArrayMovingSumMap" -"groupArrayMovingSumMerge" -"groupArrayMovingSumNull" -"groupArrayMovingSumOrDefault" -"groupArrayMovingSumOrNull" -"groupArrayMovingSumResample" -"groupArrayMovingSumSimpleState" -"groupArrayMovingSumState" -"groupArrayNull" -"groupArrayOrDefault" -"groupArrayOrNull" -"groupArrayResample" -"groupArraySample" -"groupArraySampleArgMax" -"groupArraySampleArgMin" -"groupArraySampleArray" -"groupArraySampleDistinct" -"groupArraySampleForEach" -"groupArraySampleIf" -"groupArraySampleMap" -"groupArraySampleMerge" -"groupArraySampleNull" -"groupArraySampleOrDefault" -"groupArraySampleOrNull" -"groupArraySampleResample" -"groupArraySampleSimpleState" -"groupArraySampleState" -"groupArraySimpleState" -"groupArraySorted" -"groupArraySortedArgMax" -"groupArraySortedArgMin" -"groupArraySortedArray" -"groupArraySortedDistinct" -"groupArraySortedForEach" -"groupArraySortedIf" -"groupArraySortedMap" -"groupArraySortedMerge" -"groupArraySortedNull" -"groupArraySortedOrDefault" -"groupArraySortedOrNull" -"groupArraySortedResample" -"groupArraySortedSimpleState" -"groupArraySortedState" -"groupArrayState" -"groupBitAnd" -"groupBitAndArgMax" -"groupBitAndArgMin" -"groupBitAndArray" -"groupBitAndDistinct" -"groupBitAndForEach" -"groupBitAndIf" -"groupBitAndMap" -"groupBitAndMerge" -"groupBitAndNull" -"groupBitAndOrDefault" -"groupBitAndOrNull" -"groupBitAndResample" -"groupBitAndSimpleState" -"groupBitAndState" -"groupBitOr" -"groupBitOrArgMax" -"groupBitOrArgMin" -"groupBitOrArray" -"groupBitOrDistinct" -"groupBitOrForEach" -"groupBitOrIf" -"groupBitOrMap" -"groupBitOrMerge" -"groupBitOrNull" -"groupBitOrOrDefault" -"groupBitOrOrNull" -"groupBitOrResample" -"groupBitOrSimpleState" -"groupBitOrState" -"groupBitXor" -"groupBitXorArgMax" -"groupBitXorArgMin" -"groupBitXorArray" -"groupBitXorDistinct" -"groupBitXorForEach" -"groupBitXorIf" -"groupBitXorMap" -"groupBitXorMerge" -"groupBitXorNull" -"groupBitXorOrDefault" -"groupBitXorOrNull" -"groupBitXorResample" -"groupBitXorSimpleState" -"groupBitXorState" -"groupBitmap" -"groupBitmapAnd" -"groupBitmapAndArgMax" -"groupBitmapAndArgMin" -"groupBitmapAndArray" -"groupBitmapAndDistinct" -"groupBitmapAndForEach" -"groupBitmapAndIf" -"groupBitmapAndMap" -"groupBitmapAndMerge" -"groupBitmapAndNull" -"groupBitmapAndOrDefault" -"groupBitmapAndOrNull" -"groupBitmapAndResample" -"groupBitmapAndSimpleState" -"groupBitmapAndState" -"groupBitmapArgMax" -"groupBitmapArgMin" -"groupBitmapArray" -"groupBitmapDistinct" -"groupBitmapForEach" -"groupBitmapIf" -"groupBitmapMap" -"groupBitmapMerge" -"groupBitmapNull" -"groupBitmapOr" -"groupBitmapOrArgMax" -"groupBitmapOrArgMin" -"groupBitmapOrArray" -"groupBitmapOrDefault" -"groupBitmapOrDistinct" -"groupBitmapOrForEach" -"groupBitmapOrIf" -"groupBitmapOrMap" -"groupBitmapOrMerge" -"groupBitmapOrNull" -"groupBitmapOrNull" -"groupBitmapOrOrDefault" -"groupBitmapOrOrNull" -"groupBitmapOrResample" -"groupBitmapOrSimpleState" -"groupBitmapOrState" -"groupBitmapResample" -"groupBitmapSimpleState" -"groupBitmapState" -"groupBitmapXor" -"groupBitmapXorArgMax" -"groupBitmapXorArgMin" -"groupBitmapXorArray" -"groupBitmapXorDistinct" -"groupBitmapXorForEach" -"groupBitmapXorIf" -"groupBitmapXorMap" -"groupBitmapXorMerge" -"groupBitmapXorNull" -"groupBitmapXorOrDefault" -"groupBitmapXorOrNull" -"groupBitmapXorResample" -"groupBitmapXorSimpleState" -"groupBitmapXorState" -"groupConcat" -"groupConcatArgMax" -"groupConcatArgMin" -"groupConcatArray" -"groupConcatDistinct" -"groupConcatForEach" -"groupConcatIf" -"groupConcatMap" -"groupConcatMerge" -"groupConcatNull" -"groupConcatOrDefault" -"groupConcatOrNull" -"groupConcatResample" -"groupConcatSimpleState" -"groupConcatState" -"groupUniqArray" -"groupUniqArrayArgMax" -"groupUniqArrayArgMin" -"groupUniqArrayArray" -"groupUniqArrayDistinct" -"groupUniqArrayForEach" -"groupUniqArrayIf" -"groupUniqArrayMap" -"groupUniqArrayMerge" -"groupUniqArrayNull" -"groupUniqArrayOrDefault" -"groupUniqArrayOrNull" -"groupUniqArrayResample" -"groupUniqArraySimpleState" -"groupUniqArrayState" -"group_concat" -"group_concatArgMax" -"group_concatArgMin" -"group_concatArray" -"group_concatDistinct" -"group_concatForEach" -"group_concatIf" -"group_concatMap" -"group_concatMerge" -"group_concatNull" -"group_concatOrDefault" -"group_concatOrNull" -"group_concatResample" -"group_concatSimpleState" -"group_concatState" -"h3CellAreaM2" -"h3CellAreaRads2" -"h3Distance" -"h3EdgeAngle" -"h3EdgeLengthKm" -"h3EdgeLengthM" -"h3ExactEdgeLengthKm" -"h3ExactEdgeLengthM" -"h3ExactEdgeLengthRads" -"h3GetBaseCell" -"h3GetDestinationIndexFromUnidirectionalEdge" -"h3GetFaces" -"h3GetIndexesFromUnidirectionalEdge" -"h3GetOriginIndexFromUnidirectionalEdge" -"h3GetPentagonIndexes" -"h3GetRes0Indexes" -"h3GetResolution" -"h3GetUnidirectionalEdge" -"h3GetUnidirectionalEdgeBoundary" -"h3GetUnidirectionalEdgesFromHexagon" -"h3HexAreaKm2" -"h3HexAreaM2" -"h3HexRing" -"h3IndexesAreNeighbors" -"h3IsPentagon" -"h3IsResClassIII" -"h3IsValid" -"h3Line" -"h3NumHexagons" -"h3PointDistKm" -"h3PointDistM" -"h3PointDistRads" -"h3ToCenterChild" -"h3ToChildren" -"h3ToGeo" -"h3ToGeoBoundary" -"h3ToParent" -"h3ToString" -"h3UnidirectionalEdgeIsValid" -"h3kRing" -"halfMD5" -"has" -"hasAll" -"hasAny" -"hasColumnInTable" -"hasSubsequence" -"hasSubsequenceCaseInsensitive" -"hasSubsequenceCaseInsensitiveUTF8" -"hasSubsequenceUTF8" -"hasSubstr" -"hasThreadFuzzer" -"hasToken" -"hasTokenCaseInsensitive" -"hasTokenCaseInsensitiveOrNull" -"hasTokenOrNull" -"hex" -"hilbertDecode" -"hilbertEncode" -"histogram" -"histogramArgMax" -"histogramArgMin" -"histogramArray" -"histogramDistinct" -"histogramForEach" -"histogramIf" -"histogramMap" -"histogramMerge" -"histogramNull" -"histogramOrDefault" -"histogramOrNull" -"histogramResample" -"histogramSimpleState" -"histogramState" -"hiveHash" -"hop" -"hopEnd" -"hopStart" -"hostName" -"hostname" -"hypot" -"identity" -"idnaDecode" -"idnaEncode" -"if" -"ifNotFinite" -"ifNull" -"ignore" -"ilike" -"in" -"inIgnoreSet" -"indexHint" -"indexOf" -"initcap" -"initcapUTF8" -"initialQueryID" -"initial_query_id" -"initializeAggregation" -"instr" -"intDiv" -"intDivOrZero" -"intExp10" -"intExp2" -"intHash32" -"intHash64" -"intervalLengthSum" -"intervalLengthSumArgMax" -"intervalLengthSumArgMin" -"intervalLengthSumArray" -"intervalLengthSumDistinct" -"intervalLengthSumForEach" -"intervalLengthSumIf" -"intervalLengthSumMap" -"intervalLengthSumMerge" -"intervalLengthSumNull" -"intervalLengthSumOrDefault" -"intervalLengthSumOrNull" -"intervalLengthSumResample" -"intervalLengthSumSimpleState" -"intervalLengthSumState" -"isConstant" -"isDecimalOverflow" -"isFinite" -"isIPAddressInRange" -"isIPv4String" -"isIPv6String" -"isInfinite" -"isNaN" -"isNotDistinctFrom" -"isNotNull" -"isNull" -"isNullable" -"isValidJSON" -"isValidUTF8" -"isZeroOrNull" -"jaroSimilarity" -"jaroWinklerSimilarity" -"javaHash" -"javaHashUTF16LE" -"joinGet" -"joinGetOrNull" -"jsonMergePatch" -"jumpConsistentHash" -"kafkaMurmurHash" -"kolmogorovSmirnovTest" -"kolmogorovSmirnovTestArgMax" -"kolmogorovSmirnovTestArgMin" -"kolmogorovSmirnovTestArray" -"kolmogorovSmirnovTestDistinct" -"kolmogorovSmirnovTestForEach" -"kolmogorovSmirnovTestIf" -"kolmogorovSmirnovTestMap" -"kolmogorovSmirnovTestMerge" -"kolmogorovSmirnovTestNull" -"kolmogorovSmirnovTestOrDefault" -"kolmogorovSmirnovTestOrNull" -"kolmogorovSmirnovTestResample" -"kolmogorovSmirnovTestSimpleState" -"kolmogorovSmirnovTestState" -"kostikConsistentHash" -"kql_array_sort_asc" -"kql_array_sort_desc" -"kurtPop" -"kurtPopArgMax" -"kurtPopArgMin" -"kurtPopArray" -"kurtPopDistinct" -"kurtPopForEach" -"kurtPopIf" -"kurtPopMap" -"kurtPopMerge" -"kurtPopNull" -"kurtPopOrDefault" -"kurtPopOrNull" -"kurtPopResample" -"kurtPopSimpleState" -"kurtPopState" -"kurtSamp" -"kurtSampArgMax" -"kurtSampArgMin" -"kurtSampArray" -"kurtSampDistinct" -"kurtSampForEach" -"kurtSampIf" -"kurtSampMap" -"kurtSampMerge" -"kurtSampNull" -"kurtSampOrDefault" -"kurtSampOrNull" -"kurtSampResample" -"kurtSampSimpleState" -"kurtSampState" -"lagInFrame" -"lagInFrameArgMax" -"lagInFrameArgMin" -"lagInFrameArray" -"lagInFrameDistinct" -"lagInFrameForEach" -"lagInFrameIf" -"lagInFrameMap" -"lagInFrameMerge" -"lagInFrameNull" -"lagInFrameOrDefault" -"lagInFrameOrNull" -"lagInFrameResample" -"lagInFrameSimpleState" -"lagInFrameState" -"largestTriangleThreeBuckets" -"largestTriangleThreeBucketsArgMax" -"largestTriangleThreeBucketsArgMin" -"largestTriangleThreeBucketsArray" -"largestTriangleThreeBucketsDistinct" -"largestTriangleThreeBucketsForEach" -"largestTriangleThreeBucketsIf" -"largestTriangleThreeBucketsMap" -"largestTriangleThreeBucketsMerge" -"largestTriangleThreeBucketsNull" -"largestTriangleThreeBucketsOrDefault" -"largestTriangleThreeBucketsOrNull" -"largestTriangleThreeBucketsResample" -"largestTriangleThreeBucketsSimpleState" -"largestTriangleThreeBucketsState" -"last_value" -"last_valueArgMax" -"last_valueArgMin" -"last_valueArray" -"last_valueDistinct" -"last_valueForEach" -"last_valueIf" -"last_valueMap" -"last_valueMerge" -"last_valueNull" -"last_valueOrDefault" -"last_valueOrNull" -"last_valueResample" -"last_valueSimpleState" -"last_valueState" -"last_value_respect_nulls" -"last_value_respect_nullsArgMax" -"last_value_respect_nullsArgMin" -"last_value_respect_nullsArray" -"last_value_respect_nullsDistinct" -"last_value_respect_nullsForEach" -"last_value_respect_nullsIf" -"last_value_respect_nullsMap" -"last_value_respect_nullsMerge" -"last_value_respect_nullsNull" -"last_value_respect_nullsOrDefault" -"last_value_respect_nullsOrNull" -"last_value_respect_nullsResample" -"last_value_respect_nullsSimpleState" -"last_value_respect_nullsState" -"lcase" -"lcm" -"leadInFrame" -"leadInFrameArgMax" -"leadInFrameArgMin" -"leadInFrameArray" -"leadInFrameDistinct" -"leadInFrameForEach" -"leadInFrameIf" -"leadInFrameMap" -"leadInFrameMerge" -"leadInFrameNull" -"leadInFrameOrDefault" -"leadInFrameOrNull" -"leadInFrameResample" -"leadInFrameSimpleState" -"leadInFrameState" -"least" -"left" -"leftPad" -"leftPadUTF8" -"leftUTF8" -"lemmatize" -"length" -"lengthUTF8" -"less" -"lessOrEquals" -"levenshteinDistance" -"levenshteinDistanceUTF8" -"lgamma" -"like" -"ln" -"locate" -"log" -"log10" -"log1p" -"log2" -"logTrace" -"lowCardinalityIndices" -"lowCardinalityKeys" -"lower" -"lowerUTF8" -"lpad" -"ltrim" -"lttb" -"lttbArgMax" -"lttbArgMin" -"lttbArray" -"lttbDistinct" -"lttbForEach" -"lttbIf" -"lttbMap" -"lttbMerge" -"lttbNull" -"lttbOrDefault" -"lttbOrNull" -"lttbResample" -"lttbSimpleState" -"lttbState" -"makeDate" -"makeDate32" -"makeDateTime" -"makeDateTime64" -"mannWhitneyUTest" -"mannWhitneyUTestArgMax" -"mannWhitneyUTestArgMin" -"mannWhitneyUTestArray" -"mannWhitneyUTestDistinct" -"mannWhitneyUTestForEach" -"mannWhitneyUTestIf" -"mannWhitneyUTestMap" -"mannWhitneyUTestMerge" -"mannWhitneyUTestNull" -"mannWhitneyUTestOrDefault" -"mannWhitneyUTestOrNull" -"mannWhitneyUTestResample" -"mannWhitneyUTestSimpleState" -"mannWhitneyUTestState" -"map" -"mapAdd" -"mapAll" -"mapApply" -"mapConcat" -"mapContains" -"mapContainsKeyLike" -"mapExists" -"mapExtractKeyLike" -"mapFilter" -"mapFromArrays" -"mapFromString" -"mapKeys" -"mapPartialReverseSort" -"mapPartialSort" -"mapPopulateSeries" -"mapReverseSort" -"mapSort" -"mapSubtract" -"mapUpdate" -"mapValues" -"match" -"materialize" -"max" -"max2" -"maxArgMax" -"maxArgMin" -"maxArray" -"maxDistinct" -"maxForEach" -"maxIf" -"maxIntersections" -"maxIntersectionsArgMax" -"maxIntersectionsArgMin" -"maxIntersectionsArray" -"maxIntersectionsDistinct" -"maxIntersectionsForEach" -"maxIntersectionsIf" -"maxIntersectionsMap" -"maxIntersectionsMerge" -"maxIntersectionsNull" -"maxIntersectionsOrDefault" -"maxIntersectionsOrNull" -"maxIntersectionsPosition" -"maxIntersectionsPositionArgMax" -"maxIntersectionsPositionArgMin" -"maxIntersectionsPositionArray" -"maxIntersectionsPositionDistinct" -"maxIntersectionsPositionForEach" -"maxIntersectionsPositionIf" -"maxIntersectionsPositionMap" -"maxIntersectionsPositionMerge" -"maxIntersectionsPositionNull" -"maxIntersectionsPositionOrDefault" -"maxIntersectionsPositionOrNull" -"maxIntersectionsPositionResample" -"maxIntersectionsPositionSimpleState" -"maxIntersectionsPositionState" -"maxIntersectionsResample" -"maxIntersectionsSimpleState" -"maxIntersectionsState" -"maxMap" -"maxMappedArrays" -"maxMappedArraysArgMax" -"maxMappedArraysArgMin" -"maxMappedArraysArray" -"maxMappedArraysDistinct" -"maxMappedArraysForEach" -"maxMappedArraysIf" -"maxMappedArraysMap" -"maxMappedArraysMerge" -"maxMappedArraysNull" -"maxMappedArraysOrDefault" -"maxMappedArraysOrNull" -"maxMappedArraysResample" -"maxMappedArraysSimpleState" -"maxMappedArraysState" -"maxMerge" -"maxNull" -"maxOrDefault" -"maxOrNull" -"maxResample" -"maxSimpleState" -"maxState" -"meanZTest" -"meanZTestArgMax" -"meanZTestArgMin" -"meanZTestArray" -"meanZTestDistinct" -"meanZTestForEach" -"meanZTestIf" -"meanZTestMap" -"meanZTestMerge" -"meanZTestNull" -"meanZTestOrDefault" -"meanZTestOrNull" -"meanZTestResample" -"meanZTestSimpleState" -"meanZTestState" -"median" -"medianArgMax" -"medianArgMin" -"medianArray" -"medianBFloat16" -"medianBFloat16ArgMax" -"medianBFloat16ArgMin" -"medianBFloat16Array" -"medianBFloat16Distinct" -"medianBFloat16ForEach" -"medianBFloat16If" -"medianBFloat16Map" -"medianBFloat16Merge" -"medianBFloat16Null" -"medianBFloat16OrDefault" -"medianBFloat16OrNull" -"medianBFloat16Resample" -"medianBFloat16SimpleState" -"medianBFloat16State" -"medianBFloat16Weighted" -"medianBFloat16WeightedArgMax" -"medianBFloat16WeightedArgMin" -"medianBFloat16WeightedArray" -"medianBFloat16WeightedDistinct" -"medianBFloat16WeightedForEach" -"medianBFloat16WeightedIf" -"medianBFloat16WeightedMap" -"medianBFloat16WeightedMerge" -"medianBFloat16WeightedNull" -"medianBFloat16WeightedOrDefault" -"medianBFloat16WeightedOrNull" -"medianBFloat16WeightedResample" -"medianBFloat16WeightedSimpleState" -"medianBFloat16WeightedState" -"medianDD" -"medianDDArgMax" -"medianDDArgMin" -"medianDDArray" -"medianDDDistinct" -"medianDDForEach" -"medianDDIf" -"medianDDMap" -"medianDDMerge" -"medianDDNull" -"medianDDOrDefault" -"medianDDOrNull" -"medianDDResample" -"medianDDSimpleState" -"medianDDState" -"medianDeterministic" -"medianDeterministicArgMax" -"medianDeterministicArgMin" -"medianDeterministicArray" -"medianDeterministicDistinct" -"medianDeterministicForEach" -"medianDeterministicIf" -"medianDeterministicMap" -"medianDeterministicMerge" -"medianDeterministicNull" -"medianDeterministicOrDefault" -"medianDeterministicOrNull" -"medianDeterministicResample" -"medianDeterministicSimpleState" -"medianDeterministicState" -"medianDistinct" -"medianExact" -"medianExactArgMax" -"medianExactArgMin" -"medianExactArray" -"medianExactDistinct" -"medianExactForEach" -"medianExactHigh" -"medianExactHighArgMax" -"medianExactHighArgMin" -"medianExactHighArray" -"medianExactHighDistinct" -"medianExactHighForEach" -"medianExactHighIf" -"medianExactHighMap" -"medianExactHighMerge" -"medianExactHighNull" -"medianExactHighOrDefault" -"medianExactHighOrNull" -"medianExactHighResample" -"medianExactHighSimpleState" -"medianExactHighState" -"medianExactIf" -"medianExactLow" -"medianExactLowArgMax" -"medianExactLowArgMin" -"medianExactLowArray" -"medianExactLowDistinct" -"medianExactLowForEach" -"medianExactLowIf" -"medianExactLowMap" -"medianExactLowMerge" -"medianExactLowNull" -"medianExactLowOrDefault" -"medianExactLowOrNull" -"medianExactLowResample" -"medianExactLowSimpleState" -"medianExactLowState" -"medianExactMap" -"medianExactMerge" -"medianExactNull" -"medianExactOrDefault" -"medianExactOrNull" -"medianExactResample" -"medianExactSimpleState" -"medianExactState" -"medianExactWeighted" -"medianExactWeightedArgMax" -"medianExactWeightedArgMin" -"medianExactWeightedArray" -"medianExactWeightedDistinct" -"medianExactWeightedForEach" -"medianExactWeightedIf" -"medianExactWeightedMap" -"medianExactWeightedMerge" -"medianExactWeightedNull" -"medianExactWeightedOrDefault" -"medianExactWeightedOrNull" -"medianExactWeightedResample" -"medianExactWeightedSimpleState" -"medianExactWeightedState" -"medianForEach" -"medianGK" -"medianGKArgMax" -"medianGKArgMin" -"medianGKArray" -"medianGKDistinct" -"medianGKForEach" -"medianGKIf" -"medianGKMap" -"medianGKMerge" -"medianGKNull" -"medianGKOrDefault" -"medianGKOrNull" -"medianGKResample" -"medianGKSimpleState" -"medianGKState" -"medianIf" -"medianInterpolatedWeighted" -"medianInterpolatedWeightedArgMax" -"medianInterpolatedWeightedArgMin" -"medianInterpolatedWeightedArray" -"medianInterpolatedWeightedDistinct" -"medianInterpolatedWeightedForEach" -"medianInterpolatedWeightedIf" -"medianInterpolatedWeightedMap" -"medianInterpolatedWeightedMerge" -"medianInterpolatedWeightedNull" -"medianInterpolatedWeightedOrDefault" -"medianInterpolatedWeightedOrNull" -"medianInterpolatedWeightedResample" -"medianInterpolatedWeightedSimpleState" -"medianInterpolatedWeightedState" -"medianMap" -"medianMerge" -"medianNull" -"medianOrDefault" -"medianOrNull" -"medianResample" -"medianSimpleState" -"medianState" -"medianTDigest" -"medianTDigestArgMax" -"medianTDigestArgMin" -"medianTDigestArray" -"medianTDigestDistinct" -"medianTDigestForEach" -"medianTDigestIf" -"medianTDigestMap" -"medianTDigestMerge" -"medianTDigestNull" -"medianTDigestOrDefault" -"medianTDigestOrNull" -"medianTDigestResample" -"medianTDigestSimpleState" -"medianTDigestState" -"medianTDigestWeighted" -"medianTDigestWeightedArgMax" -"medianTDigestWeightedArgMin" -"medianTDigestWeightedArray" -"medianTDigestWeightedDistinct" -"medianTDigestWeightedForEach" -"medianTDigestWeightedIf" -"medianTDigestWeightedMap" -"medianTDigestWeightedMerge" -"medianTDigestWeightedNull" -"medianTDigestWeightedOrDefault" -"medianTDigestWeightedOrNull" -"medianTDigestWeightedResample" -"medianTDigestWeightedSimpleState" -"medianTDigestWeightedState" -"medianTiming" -"medianTimingArgMax" -"medianTimingArgMin" -"medianTimingArray" -"medianTimingDistinct" -"medianTimingForEach" -"medianTimingIf" -"medianTimingMap" -"medianTimingMerge" -"medianTimingNull" -"medianTimingOrDefault" -"medianTimingOrNull" -"medianTimingResample" -"medianTimingSimpleState" -"medianTimingState" -"medianTimingWeighted" -"medianTimingWeightedArgMax" -"medianTimingWeightedArgMin" -"medianTimingWeightedArray" -"medianTimingWeightedDistinct" -"medianTimingWeightedForEach" -"medianTimingWeightedIf" -"medianTimingWeightedMap" -"medianTimingWeightedMerge" -"medianTimingWeightedNull" -"medianTimingWeightedOrDefault" -"medianTimingWeightedOrNull" -"medianTimingWeightedResample" -"medianTimingWeightedSimpleState" -"medianTimingWeightedState" -"metroHash64" -"mid" -"min" -"min2" -"minArgMax" -"minArgMin" -"minArray" -"minDistinct" -"minForEach" -"minIf" -"minMap" -"minMappedArrays" -"minMappedArraysArgMax" -"minMappedArraysArgMin" -"minMappedArraysArray" -"minMappedArraysDistinct" -"minMappedArraysForEach" -"minMappedArraysIf" -"minMappedArraysMap" -"minMappedArraysMerge" -"minMappedArraysNull" -"minMappedArraysOrDefault" -"minMappedArraysOrNull" -"minMappedArraysResample" -"minMappedArraysSimpleState" -"minMappedArraysState" -"minMerge" -"minNull" -"minOrDefault" -"minOrNull" -"minResample" -"minSampleSizeContinous" -"minSampleSizeContinuous" -"minSampleSizeConversion" -"minSimpleState" -"minState" -"minus" -"mismatches" -"mod" -"modulo" -"moduloLegacy" -"moduloOrZero" -"monthName" -"mortonDecode" -"mortonEncode" -"multiFuzzyMatchAllIndices" -"multiFuzzyMatchAny" -"multiFuzzyMatchAnyIndex" -"multiIf" -"multiMatchAllIndices" -"multiMatchAny" -"multiMatchAnyIndex" -"multiSearchAllPositions" -"multiSearchAllPositionsCaseInsensitive" -"multiSearchAllPositionsCaseInsensitiveUTF8" -"multiSearchAllPositionsUTF8" -"multiSearchAny" -"multiSearchAnyCaseInsensitive" -"multiSearchAnyCaseInsensitiveUTF8" -"multiSearchAnyUTF8" -"multiSearchFirstIndex" -"multiSearchFirstIndexCaseInsensitive" -"multiSearchFirstIndexCaseInsensitiveUTF8" -"multiSearchFirstIndexUTF8" -"multiSearchFirstPosition" -"multiSearchFirstPositionCaseInsensitive" -"multiSearchFirstPositionCaseInsensitiveUTF8" -"multiSearchFirstPositionUTF8" -"multiply" -"multiplyDecimal" -"murmurHash2_32" -"murmurHash2_64" -"murmurHash3_128" -"murmurHash3_32" -"murmurHash3_64" -"negate" -"neighbor" -"nested" -"netloc" -"ngramDistance" -"ngramDistanceCaseInsensitive" -"ngramDistanceCaseInsensitiveUTF8" -"ngramDistanceUTF8" -"ngramMinHash" -"ngramMinHashArg" -"ngramMinHashArgCaseInsensitive" -"ngramMinHashArgCaseInsensitiveUTF8" -"ngramMinHashArgUTF8" -"ngramMinHashCaseInsensitive" -"ngramMinHashCaseInsensitiveUTF8" -"ngramMinHashUTF8" -"ngramSearch" -"ngramSearchCaseInsensitive" -"ngramSearchCaseInsensitiveUTF8" -"ngramSearchUTF8" -"ngramSimHash" -"ngramSimHashCaseInsensitive" -"ngramSimHashCaseInsensitiveUTF8" -"ngramSimHashUTF8" -"ngrams" -"nonNegativeDerivative" -"nonNegativeDerivativeArgMax" -"nonNegativeDerivativeArgMin" -"nonNegativeDerivativeArray" -"nonNegativeDerivativeDistinct" -"nonNegativeDerivativeForEach" -"nonNegativeDerivativeIf" -"nonNegativeDerivativeMap" -"nonNegativeDerivativeMerge" -"nonNegativeDerivativeNull" -"nonNegativeDerivativeOrDefault" -"nonNegativeDerivativeOrNull" -"nonNegativeDerivativeResample" -"nonNegativeDerivativeSimpleState" -"nonNegativeDerivativeState" -"normL1" -"normL2" -"normL2Squared" -"normLinf" -"normLp" -"normalizeL1" -"normalizeL2" -"normalizeLinf" -"normalizeLp" -"normalizeQuery" -"normalizeQueryKeepNames" -"normalizeUTF8NFC" -"normalizeUTF8NFD" -"normalizeUTF8NFKC" -"normalizeUTF8NFKD" -"normalizedQueryHash" -"normalizedQueryHashKeepNames" -"not" -"notEmpty" -"notEquals" -"notILike" -"notIn" -"notInIgnoreSet" -"notLike" -"notNullIn" -"notNullInIgnoreSet" -"nothing" -"nothingArgMax" -"nothingArgMin" -"nothingArray" -"nothingDistinct" -"nothingForEach" -"nothingIf" -"nothingMap" -"nothingMerge" -"nothingNull" -"nothingNull" -"nothingNullArgMax" -"nothingNullArgMin" -"nothingNullArray" -"nothingNullDistinct" -"nothingNullForEach" -"nothingNullIf" -"nothingNullMap" -"nothingNullMerge" -"nothingNullNull" -"nothingNullOrDefault" -"nothingNullOrNull" -"nothingNullResample" -"nothingNullSimpleState" -"nothingNullState" -"nothingOrDefault" -"nothingOrNull" -"nothingResample" -"nothingSimpleState" -"nothingState" -"nothingUInt64" -"nothingUInt64ArgMax" -"nothingUInt64ArgMin" -"nothingUInt64Array" -"nothingUInt64Distinct" -"nothingUInt64ForEach" -"nothingUInt64If" -"nothingUInt64Map" -"nothingUInt64Merge" -"nothingUInt64Null" -"nothingUInt64OrDefault" -"nothingUInt64OrNull" -"nothingUInt64Resample" -"nothingUInt64SimpleState" -"nothingUInt64State" -"now" -"now64" -"nowInBlock" -"nth_value" -"nth_valueArgMax" -"nth_valueArgMin" -"nth_valueArray" -"nth_valueDistinct" -"nth_valueForEach" -"nth_valueIf" -"nth_valueMap" -"nth_valueMerge" -"nth_valueNull" -"nth_valueOrDefault" -"nth_valueOrNull" -"nth_valueResample" -"nth_valueSimpleState" -"nth_valueState" -"ntile" -"ntileArgMax" -"ntileArgMin" -"ntileArray" -"ntileDistinct" -"ntileForEach" -"ntileIf" -"ntileMap" -"ntileMerge" -"ntileNull" -"ntileOrDefault" -"ntileOrNull" -"ntileResample" -"ntileSimpleState" -"ntileState" -"nullIf" -"nullIn" -"nullInIgnoreSet" -"or" -"parseDateTime" -"parseDateTime32BestEffort" -"parseDateTime32BestEffortOrNull" -"parseDateTime32BestEffortOrZero" -"parseDateTime64BestEffort" -"parseDateTime64BestEffortOrNull" -"parseDateTime64BestEffortOrZero" -"parseDateTime64BestEffortUS" -"parseDateTime64BestEffortUSOrNull" -"parseDateTime64BestEffortUSOrZero" -"parseDateTimeBestEffort" -"parseDateTimeBestEffortOrNull" -"parseDateTimeBestEffortOrZero" -"parseDateTimeBestEffortUS" -"parseDateTimeBestEffortUSOrNull" -"parseDateTimeBestEffortUSOrZero" -"parseDateTimeInJodaSyntax" -"parseDateTimeInJodaSyntaxOrNull" -"parseDateTimeInJodaSyntaxOrZero" -"parseDateTimeOrNull" -"parseDateTimeOrZero" -"parseReadableSize" -"parseReadableSizeOrNull" -"parseReadableSizeOrZero" -"parseTimeDelta" -"partitionID" -"partitionId" -"path" -"pathFull" -"percentRank" -"percentRankArgMax" -"percentRankArgMin" -"percentRankArray" -"percentRankDistinct" -"percentRankForEach" -"percentRankIf" -"percentRankMap" -"percentRankMerge" -"percentRankNull" -"percentRankOrDefault" -"percentRankOrNull" -"percentRankResample" -"percentRankSimpleState" -"percentRankState" -"percent_rank" -"percent_rankArgMax" -"percent_rankArgMin" -"percent_rankArray" -"percent_rankDistinct" -"percent_rankForEach" -"percent_rankIf" -"percent_rankMap" -"percent_rankMerge" -"percent_rankNull" -"percent_rankOrDefault" -"percent_rankOrNull" -"percent_rankResample" -"percent_rankSimpleState" -"percent_rankState" -"pi" -"plus" -"pmod" -"pointInEllipses" -"pointInPolygon" -"polygonAreaCartesian" -"polygonAreaSpherical" -"polygonConvexHullCartesian" -"polygonPerimeterCartesian" -"polygonPerimeterSpherical" -"polygonsDistanceCartesian" -"polygonsDistanceSpherical" -"polygonsEqualsCartesian" -"polygonsIntersectionCartesian" -"polygonsIntersectionSpherical" -"polygonsSymDifferenceCartesian" -"polygonsSymDifferenceSpherical" -"polygonsUnionCartesian" -"polygonsUnionSpherical" -"polygonsWithinCartesian" -"polygonsWithinSpherical" -"port" -"portRFC" -"position" -"positionCaseInsensitive" -"positionCaseInsensitiveUTF8" -"positionUTF8" -"positiveModulo" -"positive_modulo" -"pow" -"power" -"printf" -"proportionsZTest" -"protocol" -"punycodeDecode" -"punycodeEncode" -"quantile" -"quantileArgMax" -"quantileArgMin" -"quantileArray" -"quantileBFloat16" -"quantileBFloat16ArgMax" -"quantileBFloat16ArgMin" -"quantileBFloat16Array" -"quantileBFloat16Distinct" -"quantileBFloat16ForEach" -"quantileBFloat16If" -"quantileBFloat16Map" -"quantileBFloat16Merge" -"quantileBFloat16Null" -"quantileBFloat16OrDefault" -"quantileBFloat16OrNull" -"quantileBFloat16Resample" -"quantileBFloat16SimpleState" -"quantileBFloat16State" -"quantileBFloat16Weighted" -"quantileBFloat16WeightedArgMax" -"quantileBFloat16WeightedArgMin" -"quantileBFloat16WeightedArray" -"quantileBFloat16WeightedDistinct" -"quantileBFloat16WeightedForEach" -"quantileBFloat16WeightedIf" -"quantileBFloat16WeightedMap" -"quantileBFloat16WeightedMerge" -"quantileBFloat16WeightedNull" -"quantileBFloat16WeightedOrDefault" -"quantileBFloat16WeightedOrNull" -"quantileBFloat16WeightedResample" -"quantileBFloat16WeightedSimpleState" -"quantileBFloat16WeightedState" -"quantileDD" -"quantileDDArgMax" -"quantileDDArgMin" -"quantileDDArray" -"quantileDDDistinct" -"quantileDDForEach" -"quantileDDIf" -"quantileDDMap" -"quantileDDMerge" -"quantileDDNull" -"quantileDDOrDefault" -"quantileDDOrNull" -"quantileDDResample" -"quantileDDSimpleState" -"quantileDDState" -"quantileDeterministic" -"quantileDeterministicArgMax" -"quantileDeterministicArgMin" -"quantileDeterministicArray" -"quantileDeterministicDistinct" -"quantileDeterministicForEach" -"quantileDeterministicIf" -"quantileDeterministicMap" -"quantileDeterministicMerge" -"quantileDeterministicNull" -"quantileDeterministicOrDefault" -"quantileDeterministicOrNull" -"quantileDeterministicResample" -"quantileDeterministicSimpleState" -"quantileDeterministicState" -"quantileDistinct" -"quantileExact" -"quantileExactArgMax" -"quantileExactArgMin" -"quantileExactArray" -"quantileExactDistinct" -"quantileExactExclusive" -"quantileExactExclusiveArgMax" -"quantileExactExclusiveArgMin" -"quantileExactExclusiveArray" -"quantileExactExclusiveDistinct" -"quantileExactExclusiveForEach" -"quantileExactExclusiveIf" -"quantileExactExclusiveMap" -"quantileExactExclusiveMerge" -"quantileExactExclusiveNull" -"quantileExactExclusiveOrDefault" -"quantileExactExclusiveOrNull" -"quantileExactExclusiveResample" -"quantileExactExclusiveSimpleState" -"quantileExactExclusiveState" -"quantileExactForEach" -"quantileExactHigh" -"quantileExactHighArgMax" -"quantileExactHighArgMin" -"quantileExactHighArray" -"quantileExactHighDistinct" -"quantileExactHighForEach" -"quantileExactHighIf" -"quantileExactHighMap" -"quantileExactHighMerge" -"quantileExactHighNull" -"quantileExactHighOrDefault" -"quantileExactHighOrNull" -"quantileExactHighResample" -"quantileExactHighSimpleState" -"quantileExactHighState" -"quantileExactIf" -"quantileExactInclusive" -"quantileExactInclusiveArgMax" -"quantileExactInclusiveArgMin" -"quantileExactInclusiveArray" -"quantileExactInclusiveDistinct" -"quantileExactInclusiveForEach" -"quantileExactInclusiveIf" -"quantileExactInclusiveMap" -"quantileExactInclusiveMerge" -"quantileExactInclusiveNull" -"quantileExactInclusiveOrDefault" -"quantileExactInclusiveOrNull" -"quantileExactInclusiveResample" -"quantileExactInclusiveSimpleState" -"quantileExactInclusiveState" -"quantileExactLow" -"quantileExactLowArgMax" -"quantileExactLowArgMin" -"quantileExactLowArray" -"quantileExactLowDistinct" -"quantileExactLowForEach" -"quantileExactLowIf" -"quantileExactLowMap" -"quantileExactLowMerge" -"quantileExactLowNull" -"quantileExactLowOrDefault" -"quantileExactLowOrNull" -"quantileExactLowResample" -"quantileExactLowSimpleState" -"quantileExactLowState" -"quantileExactMap" -"quantileExactMerge" -"quantileExactNull" -"quantileExactOrDefault" -"quantileExactOrNull" -"quantileExactResample" -"quantileExactSimpleState" -"quantileExactState" -"quantileExactWeighted" -"quantileExactWeightedArgMax" -"quantileExactWeightedArgMin" -"quantileExactWeightedArray" -"quantileExactWeightedDistinct" -"quantileExactWeightedForEach" -"quantileExactWeightedIf" -"quantileExactWeightedMap" -"quantileExactWeightedMerge" -"quantileExactWeightedNull" -"quantileExactWeightedOrDefault" -"quantileExactWeightedOrNull" -"quantileExactWeightedResample" -"quantileExactWeightedSimpleState" -"quantileExactWeightedState" -"quantileForEach" -"quantileGK" -"quantileGKArgMax" -"quantileGKArgMin" -"quantileGKArray" -"quantileGKDistinct" -"quantileGKForEach" -"quantileGKIf" -"quantileGKMap" -"quantileGKMerge" -"quantileGKNull" -"quantileGKOrDefault" -"quantileGKOrNull" -"quantileGKResample" -"quantileGKSimpleState" -"quantileGKState" -"quantileIf" -"quantileInterpolatedWeighted" -"quantileInterpolatedWeightedArgMax" -"quantileInterpolatedWeightedArgMin" -"quantileInterpolatedWeightedArray" -"quantileInterpolatedWeightedDistinct" -"quantileInterpolatedWeightedForEach" -"quantileInterpolatedWeightedIf" -"quantileInterpolatedWeightedMap" -"quantileInterpolatedWeightedMerge" -"quantileInterpolatedWeightedNull" -"quantileInterpolatedWeightedOrDefault" -"quantileInterpolatedWeightedOrNull" -"quantileInterpolatedWeightedResample" -"quantileInterpolatedWeightedSimpleState" -"quantileInterpolatedWeightedState" -"quantileMap" -"quantileMerge" -"quantileNull" -"quantileOrDefault" -"quantileOrNull" -"quantileResample" -"quantileSimpleState" -"quantileState" -"quantileTDigest" -"quantileTDigestArgMax" -"quantileTDigestArgMin" -"quantileTDigestArray" -"quantileTDigestDistinct" -"quantileTDigestForEach" -"quantileTDigestIf" -"quantileTDigestMap" -"quantileTDigestMerge" -"quantileTDigestNull" -"quantileTDigestOrDefault" -"quantileTDigestOrNull" -"quantileTDigestResample" -"quantileTDigestSimpleState" -"quantileTDigestState" -"quantileTDigestWeighted" -"quantileTDigestWeightedArgMax" -"quantileTDigestWeightedArgMin" -"quantileTDigestWeightedArray" -"quantileTDigestWeightedDistinct" -"quantileTDigestWeightedForEach" -"quantileTDigestWeightedIf" -"quantileTDigestWeightedMap" -"quantileTDigestWeightedMerge" -"quantileTDigestWeightedNull" -"quantileTDigestWeightedOrDefault" -"quantileTDigestWeightedOrNull" -"quantileTDigestWeightedResample" -"quantileTDigestWeightedSimpleState" -"quantileTDigestWeightedState" -"quantileTiming" -"quantileTimingArgMax" -"quantileTimingArgMin" -"quantileTimingArray" -"quantileTimingDistinct" -"quantileTimingForEach" -"quantileTimingIf" -"quantileTimingMap" -"quantileTimingMerge" -"quantileTimingNull" -"quantileTimingOrDefault" -"quantileTimingOrNull" -"quantileTimingResample" -"quantileTimingSimpleState" -"quantileTimingState" -"quantileTimingWeighted" -"quantileTimingWeightedArgMax" -"quantileTimingWeightedArgMin" -"quantileTimingWeightedArray" -"quantileTimingWeightedDistinct" -"quantileTimingWeightedForEach" -"quantileTimingWeightedIf" -"quantileTimingWeightedMap" -"quantileTimingWeightedMerge" -"quantileTimingWeightedNull" -"quantileTimingWeightedOrDefault" -"quantileTimingWeightedOrNull" -"quantileTimingWeightedResample" -"quantileTimingWeightedSimpleState" -"quantileTimingWeightedState" -"quantiles" -"quantilesArgMax" -"quantilesArgMin" -"quantilesArray" -"quantilesBFloat16" -"quantilesBFloat16ArgMax" -"quantilesBFloat16ArgMin" -"quantilesBFloat16Array" -"quantilesBFloat16Distinct" -"quantilesBFloat16ForEach" -"quantilesBFloat16If" -"quantilesBFloat16Map" -"quantilesBFloat16Merge" -"quantilesBFloat16Null" -"quantilesBFloat16OrDefault" -"quantilesBFloat16OrNull" -"quantilesBFloat16Resample" -"quantilesBFloat16SimpleState" -"quantilesBFloat16State" -"quantilesBFloat16Weighted" -"quantilesBFloat16WeightedArgMax" -"quantilesBFloat16WeightedArgMin" -"quantilesBFloat16WeightedArray" -"quantilesBFloat16WeightedDistinct" -"quantilesBFloat16WeightedForEach" -"quantilesBFloat16WeightedIf" -"quantilesBFloat16WeightedMap" -"quantilesBFloat16WeightedMerge" -"quantilesBFloat16WeightedNull" -"quantilesBFloat16WeightedOrDefault" -"quantilesBFloat16WeightedOrNull" -"quantilesBFloat16WeightedResample" -"quantilesBFloat16WeightedSimpleState" -"quantilesBFloat16WeightedState" -"quantilesDD" -"quantilesDDArgMax" -"quantilesDDArgMin" -"quantilesDDArray" -"quantilesDDDistinct" -"quantilesDDForEach" -"quantilesDDIf" -"quantilesDDMap" -"quantilesDDMerge" -"quantilesDDNull" -"quantilesDDOrDefault" -"quantilesDDOrNull" -"quantilesDDResample" -"quantilesDDSimpleState" -"quantilesDDState" -"quantilesDeterministic" -"quantilesDeterministicArgMax" -"quantilesDeterministicArgMin" -"quantilesDeterministicArray" -"quantilesDeterministicDistinct" -"quantilesDeterministicForEach" -"quantilesDeterministicIf" -"quantilesDeterministicMap" -"quantilesDeterministicMerge" -"quantilesDeterministicNull" -"quantilesDeterministicOrDefault" -"quantilesDeterministicOrNull" -"quantilesDeterministicResample" -"quantilesDeterministicSimpleState" -"quantilesDeterministicState" -"quantilesDistinct" -"quantilesExact" -"quantilesExactArgMax" -"quantilesExactArgMin" -"quantilesExactArray" -"quantilesExactDistinct" -"quantilesExactExclusive" -"quantilesExactExclusiveArgMax" -"quantilesExactExclusiveArgMin" -"quantilesExactExclusiveArray" -"quantilesExactExclusiveDistinct" -"quantilesExactExclusiveForEach" -"quantilesExactExclusiveIf" -"quantilesExactExclusiveMap" -"quantilesExactExclusiveMerge" -"quantilesExactExclusiveNull" -"quantilesExactExclusiveOrDefault" -"quantilesExactExclusiveOrNull" -"quantilesExactExclusiveResample" -"quantilesExactExclusiveSimpleState" -"quantilesExactExclusiveState" -"quantilesExactForEach" -"quantilesExactHigh" -"quantilesExactHighArgMax" -"quantilesExactHighArgMin" -"quantilesExactHighArray" -"quantilesExactHighDistinct" -"quantilesExactHighForEach" -"quantilesExactHighIf" -"quantilesExactHighMap" -"quantilesExactHighMerge" -"quantilesExactHighNull" -"quantilesExactHighOrDefault" -"quantilesExactHighOrNull" -"quantilesExactHighResample" -"quantilesExactHighSimpleState" -"quantilesExactHighState" -"quantilesExactIf" -"quantilesExactInclusive" -"quantilesExactInclusiveArgMax" -"quantilesExactInclusiveArgMin" -"quantilesExactInclusiveArray" -"quantilesExactInclusiveDistinct" -"quantilesExactInclusiveForEach" -"quantilesExactInclusiveIf" -"quantilesExactInclusiveMap" -"quantilesExactInclusiveMerge" -"quantilesExactInclusiveNull" -"quantilesExactInclusiveOrDefault" -"quantilesExactInclusiveOrNull" -"quantilesExactInclusiveResample" -"quantilesExactInclusiveSimpleState" -"quantilesExactInclusiveState" -"quantilesExactLow" -"quantilesExactLowArgMax" -"quantilesExactLowArgMin" -"quantilesExactLowArray" -"quantilesExactLowDistinct" -"quantilesExactLowForEach" -"quantilesExactLowIf" -"quantilesExactLowMap" -"quantilesExactLowMerge" -"quantilesExactLowNull" -"quantilesExactLowOrDefault" -"quantilesExactLowOrNull" -"quantilesExactLowResample" -"quantilesExactLowSimpleState" -"quantilesExactLowState" -"quantilesExactMap" -"quantilesExactMerge" -"quantilesExactNull" -"quantilesExactOrDefault" -"quantilesExactOrNull" -"quantilesExactResample" -"quantilesExactSimpleState" -"quantilesExactState" -"quantilesExactWeighted" -"quantilesExactWeightedArgMax" -"quantilesExactWeightedArgMin" -"quantilesExactWeightedArray" -"quantilesExactWeightedDistinct" -"quantilesExactWeightedForEach" -"quantilesExactWeightedIf" -"quantilesExactWeightedMap" -"quantilesExactWeightedMerge" -"quantilesExactWeightedNull" -"quantilesExactWeightedOrDefault" -"quantilesExactWeightedOrNull" -"quantilesExactWeightedResample" -"quantilesExactWeightedSimpleState" -"quantilesExactWeightedState" -"quantilesForEach" -"quantilesGK" -"quantilesGKArgMax" -"quantilesGKArgMin" -"quantilesGKArray" -"quantilesGKDistinct" -"quantilesGKForEach" -"quantilesGKIf" -"quantilesGKMap" -"quantilesGKMerge" -"quantilesGKNull" -"quantilesGKOrDefault" -"quantilesGKOrNull" -"quantilesGKResample" -"quantilesGKSimpleState" -"quantilesGKState" -"quantilesIf" -"quantilesInterpolatedWeighted" -"quantilesInterpolatedWeightedArgMax" -"quantilesInterpolatedWeightedArgMin" -"quantilesInterpolatedWeightedArray" -"quantilesInterpolatedWeightedDistinct" -"quantilesInterpolatedWeightedForEach" -"quantilesInterpolatedWeightedIf" -"quantilesInterpolatedWeightedMap" -"quantilesInterpolatedWeightedMerge" -"quantilesInterpolatedWeightedNull" -"quantilesInterpolatedWeightedOrDefault" -"quantilesInterpolatedWeightedOrNull" -"quantilesInterpolatedWeightedResample" -"quantilesInterpolatedWeightedSimpleState" -"quantilesInterpolatedWeightedState" -"quantilesMap" -"quantilesMerge" -"quantilesNull" -"quantilesOrDefault" -"quantilesOrNull" -"quantilesResample" -"quantilesSimpleState" -"quantilesState" -"quantilesTDigest" -"quantilesTDigestArgMax" -"quantilesTDigestArgMin" -"quantilesTDigestArray" -"quantilesTDigestDistinct" -"quantilesTDigestForEach" -"quantilesTDigestIf" -"quantilesTDigestMap" -"quantilesTDigestMerge" -"quantilesTDigestNull" -"quantilesTDigestOrDefault" -"quantilesTDigestOrNull" -"quantilesTDigestResample" -"quantilesTDigestSimpleState" -"quantilesTDigestState" -"quantilesTDigestWeighted" -"quantilesTDigestWeightedArgMax" -"quantilesTDigestWeightedArgMin" -"quantilesTDigestWeightedArray" -"quantilesTDigestWeightedDistinct" -"quantilesTDigestWeightedForEach" -"quantilesTDigestWeightedIf" -"quantilesTDigestWeightedMap" -"quantilesTDigestWeightedMerge" -"quantilesTDigestWeightedNull" -"quantilesTDigestWeightedOrDefault" -"quantilesTDigestWeightedOrNull" -"quantilesTDigestWeightedResample" -"quantilesTDigestWeightedSimpleState" -"quantilesTDigestWeightedState" -"quantilesTiming" -"quantilesTimingArgMax" -"quantilesTimingArgMin" -"quantilesTimingArray" -"quantilesTimingDistinct" -"quantilesTimingForEach" -"quantilesTimingIf" -"quantilesTimingMap" -"quantilesTimingMerge" -"quantilesTimingNull" -"quantilesTimingOrDefault" -"quantilesTimingOrNull" -"quantilesTimingResample" -"quantilesTimingSimpleState" -"quantilesTimingState" -"quantilesTimingWeighted" -"quantilesTimingWeightedArgMax" -"quantilesTimingWeightedArgMin" -"quantilesTimingWeightedArray" -"quantilesTimingWeightedDistinct" -"quantilesTimingWeightedForEach" -"quantilesTimingWeightedIf" -"quantilesTimingWeightedMap" -"quantilesTimingWeightedMerge" -"quantilesTimingWeightedNull" -"quantilesTimingWeightedOrDefault" -"quantilesTimingWeightedOrNull" -"quantilesTimingWeightedResample" -"quantilesTimingWeightedSimpleState" -"quantilesTimingWeightedState" -"queryID" -"queryString" -"queryStringAndFragment" -"query_id" -"radians" -"rand" -"rand32" -"rand64" -"randBernoulli" -"randBinomial" -"randCanonical" -"randChiSquared" -"randConstant" -"randExponential" -"randFisherF" -"randLogNormal" -"randNegativeBinomial" -"randNormal" -"randPoisson" -"randStudentT" -"randUniform" -"randomFixedString" -"randomPrintableASCII" -"randomString" -"randomStringUTF8" -"range" -"rank" -"rankArgMax" -"rankArgMin" -"rankArray" -"rankCorr" -"rankCorrArgMax" -"rankCorrArgMin" -"rankCorrArray" -"rankCorrDistinct" -"rankCorrForEach" -"rankCorrIf" -"rankCorrMap" -"rankCorrMerge" -"rankCorrNull" -"rankCorrOrDefault" -"rankCorrOrNull" -"rankCorrResample" -"rankCorrSimpleState" -"rankCorrState" -"rankDistinct" -"rankForEach" -"rankIf" -"rankMap" -"rankMerge" -"rankNull" -"rankOrDefault" -"rankOrNull" -"rankResample" -"rankSimpleState" -"rankState" -"readWKTLineString" -"readWKTMultiLineString" -"readWKTMultiPolygon" -"readWKTPoint" -"readWKTPolygon" -"readWKTRing" -"regexpExtract" -"regexpQuoteMeta" -"regionHierarchy" -"regionIn" -"regionToArea" -"regionToCity" -"regionToContinent" -"regionToCountry" -"regionToDistrict" -"regionToName" -"regionToPopulation" -"regionToTopContinent" -"reinterpret" -"reinterpretAsDate" -"reinterpretAsDateTime" -"reinterpretAsFixedString" -"reinterpretAsFloat32" -"reinterpretAsFloat64" -"reinterpretAsInt128" -"reinterpretAsInt16" -"reinterpretAsInt256" -"reinterpretAsInt32" -"reinterpretAsInt64" -"reinterpretAsInt8" -"reinterpretAsString" -"reinterpretAsUInt128" -"reinterpretAsUInt16" -"reinterpretAsUInt256" -"reinterpretAsUInt32" -"reinterpretAsUInt64" -"reinterpretAsUInt8" -"reinterpretAsUUID" -"repeat" -"replace" -"replaceAll" -"replaceOne" -"replaceRegexpAll" -"replaceRegexpOne" -"replicate" -"retention" -"retentionArgMax" -"retentionArgMin" -"retentionArray" -"retentionDistinct" -"retentionForEach" -"retentionIf" -"retentionMap" -"retentionMerge" -"retentionNull" -"retentionOrDefault" -"retentionOrNull" -"retentionResample" -"retentionSimpleState" -"retentionState" -"reverse" -"reverseUTF8" -"revision" -"right" -"rightPad" -"rightPadUTF8" -"rightUTF8" -"round" -"roundAge" -"roundBankers" -"roundDown" -"roundDuration" -"roundToExp2" -"rowNumberInAllBlocks" -"rowNumberInBlock" -"row_number" -"row_numberArgMax" -"row_numberArgMin" -"row_numberArray" -"row_numberDistinct" -"row_numberForEach" -"row_numberIf" -"row_numberMap" -"row_numberMerge" -"row_numberNull" -"row_numberOrDefault" -"row_numberOrNull" -"row_numberResample" -"row_numberSimpleState" -"row_numberState" -"rpad" -"rtrim" -"runningAccumulate" -"runningConcurrency" -"runningDifference" -"runningDifferenceStartingWithFirstValue" -"s2CapContains" -"s2CapUnion" -"s2CellsIntersect" -"s2GetNeighbors" -"s2RectAdd" -"s2RectContains" -"s2RectIntersection" -"s2RectUnion" -"s2ToGeo" -"scalarProduct" -"sequenceCount" -"sequenceCountArgMax" -"sequenceCountArgMin" -"sequenceCountArray" -"sequenceCountDistinct" -"sequenceCountForEach" -"sequenceCountIf" -"sequenceCountMap" -"sequenceCountMerge" -"sequenceCountNull" -"sequenceCountOrDefault" -"sequenceCountOrNull" -"sequenceCountResample" -"sequenceCountSimpleState" -"sequenceCountState" -"sequenceMatch" -"sequenceMatchArgMax" -"sequenceMatchArgMin" -"sequenceMatchArray" -"sequenceMatchDistinct" -"sequenceMatchForEach" -"sequenceMatchIf" -"sequenceMatchMap" -"sequenceMatchMerge" -"sequenceMatchNull" -"sequenceMatchOrDefault" -"sequenceMatchOrNull" -"sequenceMatchResample" -"sequenceMatchSimpleState" -"sequenceMatchState" -"sequenceNextNode" -"sequenceNextNodeArgMax" -"sequenceNextNodeArgMin" -"sequenceNextNodeArray" -"sequenceNextNodeDistinct" -"sequenceNextNodeForEach" -"sequenceNextNodeIf" -"sequenceNextNodeMap" -"sequenceNextNodeMerge" -"sequenceNextNodeNull" -"sequenceNextNodeOrDefault" -"sequenceNextNodeOrNull" -"sequenceNextNodeResample" -"sequenceNextNodeSimpleState" -"sequenceNextNodeState" -"seriesDecomposeSTL" -"seriesOutliersDetectTukey" -"seriesPeriodDetectFFT" -"serverTimeZone" -"serverTimezone" -"serverUUID" -"shardCount" -"shardNum" -"showCertificate" -"sigmoid" -"sign" -"simpleJSONExtractBool" -"simpleJSONExtractFloat" -"simpleJSONExtractInt" -"simpleJSONExtractRaw" -"simpleJSONExtractString" -"simpleJSONExtractUInt" -"simpleJSONHas" -"simpleLinearRegression" -"simpleLinearRegressionArgMax" -"simpleLinearRegressionArgMin" -"simpleLinearRegressionArray" -"simpleLinearRegressionDistinct" -"simpleLinearRegressionForEach" -"simpleLinearRegressionIf" -"simpleLinearRegressionMap" -"simpleLinearRegressionMerge" -"simpleLinearRegressionNull" -"simpleLinearRegressionOrDefault" -"simpleLinearRegressionOrNull" -"simpleLinearRegressionResample" -"simpleLinearRegressionSimpleState" -"simpleLinearRegressionState" -"sin" -"singleValueOrNull" -"singleValueOrNullArgMax" -"singleValueOrNullArgMin" -"singleValueOrNullArray" -"singleValueOrNullDistinct" -"singleValueOrNullForEach" -"singleValueOrNullIf" -"singleValueOrNullMap" -"singleValueOrNullMerge" -"singleValueOrNullNull" -"singleValueOrNullOrDefault" -"singleValueOrNullOrNull" -"singleValueOrNullResample" -"singleValueOrNullSimpleState" -"singleValueOrNullState" -"sinh" -"sipHash128" -"sipHash128Keyed" -"sipHash128Reference" -"sipHash128ReferenceKeyed" -"sipHash64" -"sipHash64Keyed" -"skewPop" -"skewPopArgMax" -"skewPopArgMin" -"skewPopArray" -"skewPopDistinct" -"skewPopForEach" -"skewPopIf" -"skewPopMap" -"skewPopMerge" -"skewPopNull" -"skewPopOrDefault" -"skewPopOrNull" -"skewPopResample" -"skewPopSimpleState" -"skewPopState" -"skewSamp" -"skewSampArgMax" -"skewSampArgMin" -"skewSampArray" -"skewSampDistinct" -"skewSampForEach" -"skewSampIf" -"skewSampMap" -"skewSampMerge" -"skewSampNull" -"skewSampOrDefault" -"skewSampOrNull" -"skewSampResample" -"skewSampSimpleState" -"skewSampState" -"sleep" -"sleepEachRow" -"snowflakeIDToDateTime" -"snowflakeIDToDateTime64" -"snowflakeToDateTime" -"snowflakeToDateTime64" -"soundex" -"space" -"sparkBar" -"sparkBarArgMax" -"sparkBarArgMin" -"sparkBarArray" -"sparkBarDistinct" -"sparkBarForEach" -"sparkBarIf" -"sparkBarMap" -"sparkBarMerge" -"sparkBarNull" -"sparkBarOrDefault" -"sparkBarOrNull" -"sparkBarResample" -"sparkBarSimpleState" -"sparkBarState" -"sparkbar" -"sparkbarArgMax" -"sparkbarArgMin" -"sparkbarArray" -"sparkbarDistinct" -"sparkbarForEach" -"sparkbarIf" -"sparkbarMap" -"sparkbarMerge" -"sparkbarNull" -"sparkbarOrDefault" -"sparkbarOrNull" -"sparkbarResample" -"sparkbarSimpleState" -"sparkbarState" -"splitByAlpha" -"splitByChar" -"splitByNonAlpha" -"splitByRegexp" -"splitByString" -"splitByWhitespace" -"sqid" -"sqidDecode" -"sqidEncode" -"sqrt" -"startsWith" -"startsWithUTF8" -"stddevPop" -"stddevPopArgMax" -"stddevPopArgMin" -"stddevPopArray" -"stddevPopDistinct" -"stddevPopForEach" -"stddevPopIf" -"stddevPopMap" -"stddevPopMerge" -"stddevPopNull" -"stddevPopOrDefault" -"stddevPopOrNull" -"stddevPopResample" -"stddevPopSimpleState" -"stddevPopStable" -"stddevPopStableArgMax" -"stddevPopStableArgMin" -"stddevPopStableArray" -"stddevPopStableDistinct" -"stddevPopStableForEach" -"stddevPopStableIf" -"stddevPopStableMap" -"stddevPopStableMerge" -"stddevPopStableNull" -"stddevPopStableOrDefault" -"stddevPopStableOrNull" -"stddevPopStableResample" -"stddevPopStableSimpleState" -"stddevPopStableState" -"stddevPopState" -"stddevSamp" -"stddevSampArgMax" -"stddevSampArgMin" -"stddevSampArray" -"stddevSampDistinct" -"stddevSampForEach" -"stddevSampIf" -"stddevSampMap" -"stddevSampMerge" -"stddevSampNull" -"stddevSampOrDefault" -"stddevSampOrNull" -"stddevSampResample" -"stddevSampSimpleState" -"stddevSampStable" -"stddevSampStableArgMax" -"stddevSampStableArgMin" -"stddevSampStableArray" -"stddevSampStableDistinct" -"stddevSampStableForEach" -"stddevSampStableIf" -"stddevSampStableMap" -"stddevSampStableMerge" -"stddevSampStableNull" -"stddevSampStableOrDefault" -"stddevSampStableOrNull" -"stddevSampStableResample" -"stddevSampStableSimpleState" -"stddevSampStableState" -"stddevSampState" -"stem" -"stochasticLinearRegression" -"stochasticLinearRegressionArgMax" -"stochasticLinearRegressionArgMin" -"stochasticLinearRegressionArray" -"stochasticLinearRegressionDistinct" -"stochasticLinearRegressionForEach" -"stochasticLinearRegressionIf" -"stochasticLinearRegressionMap" -"stochasticLinearRegressionMerge" -"stochasticLinearRegressionNull" -"stochasticLinearRegressionOrDefault" -"stochasticLinearRegressionOrNull" -"stochasticLinearRegressionResample" -"stochasticLinearRegressionSimpleState" -"stochasticLinearRegressionState" -"stochasticLogisticRegression" -"stochasticLogisticRegressionArgMax" -"stochasticLogisticRegressionArgMin" -"stochasticLogisticRegressionArray" -"stochasticLogisticRegressionDistinct" -"stochasticLogisticRegressionForEach" -"stochasticLogisticRegressionIf" -"stochasticLogisticRegressionMap" -"stochasticLogisticRegressionMerge" -"stochasticLogisticRegressionNull" -"stochasticLogisticRegressionOrDefault" -"stochasticLogisticRegressionOrNull" -"stochasticLogisticRegressionResample" -"stochasticLogisticRegressionSimpleState" -"stochasticLogisticRegressionState" -"str_to_date" -"str_to_map" -"stringJaccardIndex" -"stringJaccardIndexUTF8" -"stringToH3" -"structureToCapnProtoSchema" -"structureToProtobufSchema" -"studentTTest" -"studentTTestArgMax" -"studentTTestArgMin" -"studentTTestArray" -"studentTTestDistinct" -"studentTTestForEach" -"studentTTestIf" -"studentTTestMap" -"studentTTestMerge" -"studentTTestNull" -"studentTTestOrDefault" -"studentTTestOrNull" -"studentTTestResample" -"studentTTestSimpleState" -"studentTTestState" -"subBitmap" -"subDate" -"substr" -"substring" -"substringIndex" -"substringIndexUTF8" -"substringUTF8" -"subtractDays" -"subtractHours" -"subtractInterval" -"subtractMicroseconds" -"subtractMilliseconds" -"subtractMinutes" -"subtractMonths" -"subtractNanoseconds" -"subtractQuarters" -"subtractSeconds" -"subtractTupleOfIntervals" -"subtractWeeks" -"subtractYears" -"sum" -"sumArgMax" -"sumArgMin" -"sumArray" -"sumCount" -"sumCountArgMax" -"sumCountArgMin" -"sumCountArray" -"sumCountDistinct" -"sumCountForEach" -"sumCountIf" -"sumCountMap" -"sumCountMerge" -"sumCountNull" -"sumCountOrDefault" -"sumCountOrNull" -"sumCountResample" -"sumCountSimpleState" -"sumCountState" -"sumDistinct" -"sumForEach" -"sumIf" -"sumKahan" -"sumKahanArgMax" -"sumKahanArgMin" -"sumKahanArray" -"sumKahanDistinct" -"sumKahanForEach" -"sumKahanIf" -"sumKahanMap" -"sumKahanMerge" -"sumKahanNull" -"sumKahanOrDefault" -"sumKahanOrNull" -"sumKahanResample" -"sumKahanSimpleState" -"sumKahanState" -"sumMap" -"sumMapFiltered" -"sumMapFilteredArgMax" -"sumMapFilteredArgMin" -"sumMapFilteredArray" -"sumMapFilteredDistinct" -"sumMapFilteredForEach" -"sumMapFilteredIf" -"sumMapFilteredMap" -"sumMapFilteredMerge" -"sumMapFilteredNull" -"sumMapFilteredOrDefault" -"sumMapFilteredOrNull" -"sumMapFilteredResample" -"sumMapFilteredSimpleState" -"sumMapFilteredState" -"sumMapFilteredWithOverflow" -"sumMapFilteredWithOverflowArgMax" -"sumMapFilteredWithOverflowArgMin" -"sumMapFilteredWithOverflowArray" -"sumMapFilteredWithOverflowDistinct" -"sumMapFilteredWithOverflowForEach" -"sumMapFilteredWithOverflowIf" -"sumMapFilteredWithOverflowMap" -"sumMapFilteredWithOverflowMerge" -"sumMapFilteredWithOverflowNull" -"sumMapFilteredWithOverflowOrDefault" -"sumMapFilteredWithOverflowOrNull" -"sumMapFilteredWithOverflowResample" -"sumMapFilteredWithOverflowSimpleState" -"sumMapFilteredWithOverflowState" -"sumMapWithOverflow" -"sumMapWithOverflowArgMax" -"sumMapWithOverflowArgMin" -"sumMapWithOverflowArray" -"sumMapWithOverflowDistinct" -"sumMapWithOverflowForEach" -"sumMapWithOverflowIf" -"sumMapWithOverflowMap" -"sumMapWithOverflowMerge" -"sumMapWithOverflowNull" -"sumMapWithOverflowOrDefault" -"sumMapWithOverflowOrNull" -"sumMapWithOverflowResample" -"sumMapWithOverflowSimpleState" -"sumMapWithOverflowState" -"sumMappedArrays" -"sumMappedArraysArgMax" -"sumMappedArraysArgMin" -"sumMappedArraysArray" -"sumMappedArraysDistinct" -"sumMappedArraysForEach" -"sumMappedArraysIf" -"sumMappedArraysMap" -"sumMappedArraysMerge" -"sumMappedArraysNull" -"sumMappedArraysOrDefault" -"sumMappedArraysOrNull" -"sumMappedArraysResample" -"sumMappedArraysSimpleState" -"sumMappedArraysState" -"sumMerge" -"sumNull" -"sumOrDefault" -"sumOrNull" -"sumResample" -"sumSimpleState" -"sumState" -"sumWithOverflow" -"sumWithOverflowArgMax" -"sumWithOverflowArgMin" -"sumWithOverflowArray" -"sumWithOverflowDistinct" -"sumWithOverflowForEach" -"sumWithOverflowIf" -"sumWithOverflowMap" -"sumWithOverflowMerge" -"sumWithOverflowNull" -"sumWithOverflowOrDefault" -"sumWithOverflowOrNull" -"sumWithOverflowResample" -"sumWithOverflowSimpleState" -"sumWithOverflowState" -"svg" -"synonyms" -"tan" -"tanh" -"tcpPort" -"tgamma" -"theilsU" -"theilsUArgMax" -"theilsUArgMin" -"theilsUArray" -"theilsUDistinct" -"theilsUForEach" -"theilsUIf" -"theilsUMap" -"theilsUMerge" -"theilsUNull" -"theilsUOrDefault" -"theilsUOrNull" -"theilsUResample" -"theilsUSimpleState" -"theilsUState" -"throwIf" -"tid" -"timeDiff" -"timeSlot" -"timeSlots" -"timeZone" -"timeZoneOf" -"timeZoneOffset" -"timestamp" -"timestampDiff" -"timestamp_diff" -"timezone" -"timezoneOf" -"timezoneOffset" -"toBool" -"toColumnTypeName" -"toDate" -"toDate32" -"toDate32OrDefault" -"toDate32OrNull" -"toDate32OrZero" -"toDateOrDefault" -"toDateOrNull" -"toDateOrZero" -"toDateTime" -"toDateTime32" -"toDateTime64" -"toDateTime64OrDefault" -"toDateTime64OrNull" -"toDateTime64OrZero" -"toDateTimeOrDefault" -"toDateTimeOrNull" -"toDateTimeOrZero" -"toDayOfMonth" -"toDayOfWeek" -"toDayOfYear" -"toDaysSinceYearZero" -"toDecimal128" -"toDecimal128OrDefault" -"toDecimal128OrNull" -"toDecimal128OrZero" -"toDecimal256" -"toDecimal256OrDefault" -"toDecimal256OrNull" -"toDecimal256OrZero" -"toDecimal32" -"toDecimal32OrDefault" -"toDecimal32OrNull" -"toDecimal32OrZero" -"toDecimal64" -"toDecimal64OrDefault" -"toDecimal64OrNull" -"toDecimal64OrZero" -"toDecimalString" -"toFixedString" -"toFloat32" -"toFloat32OrDefault" -"toFloat32OrNull" -"toFloat32OrZero" -"toFloat64" -"toFloat64OrDefault" -"toFloat64OrNull" -"toFloat64OrZero" -"toHour" -"toIPv4" -"toIPv4OrDefault" -"toIPv4OrNull" -"toIPv4OrZero" -"toIPv6" -"toIPv6OrDefault" -"toIPv6OrNull" -"toIPv6OrZero" -"toISOWeek" -"toISOYear" -"toInt128" -"toInt128OrDefault" -"toInt128OrNull" -"toInt128OrZero" -"toInt16" -"toInt16OrDefault" -"toInt16OrNull" -"toInt16OrZero" -"toInt256" -"toInt256OrDefault" -"toInt256OrNull" -"toInt256OrZero" -"toInt32" -"toInt32OrDefault" -"toInt32OrNull" -"toInt32OrZero" -"toInt64" -"toInt64OrDefault" -"toInt64OrNull" -"toInt64OrZero" -"toInt8" -"toInt8OrDefault" -"toInt8OrNull" -"toInt8OrZero" -"toIntervalDay" -"toIntervalHour" -"toIntervalMicrosecond" -"toIntervalMillisecond" -"toIntervalMinute" -"toIntervalMonth" -"toIntervalNanosecond" -"toIntervalQuarter" -"toIntervalSecond" -"toIntervalWeek" -"toIntervalYear" -"toJSONString" -"toLastDayOfMonth" -"toLastDayOfWeek" -"toLowCardinality" -"toMillisecond" -"toMinute" -"toModifiedJulianDay" -"toModifiedJulianDayOrNull" -"toMonday" -"toMonth" -"toNullable" -"toQuarter" -"toRelativeDayNum" -"toRelativeHourNum" -"toRelativeMinuteNum" -"toRelativeMonthNum" -"toRelativeQuarterNum" -"toRelativeSecondNum" -"toRelativeWeekNum" -"toRelativeYearNum" -"toSecond" -"toStartOfDay" -"toStartOfFifteenMinutes" -"toStartOfFiveMinute" -"toStartOfFiveMinutes" -"toStartOfHour" -"toStartOfISOYear" -"toStartOfInterval" -"toStartOfMicrosecond" -"toStartOfMillisecond" -"toStartOfMinute" -"toStartOfMonth" -"toStartOfNanosecond" -"toStartOfQuarter" -"toStartOfSecond" -"toStartOfTenMinutes" -"toStartOfWeek" -"toStartOfYear" -"toString" -"toStringCutToZero" -"toTime" -"toTimeZone" -"toTimezone" -"toTypeName" -"toUInt128" -"toUInt128OrDefault" -"toUInt128OrNull" -"toUInt128OrZero" -"toUInt16" -"toUInt16OrDefault" -"toUInt16OrNull" -"toUInt16OrZero" -"toUInt256" -"toUInt256OrDefault" -"toUInt256OrNull" -"toUInt256OrZero" -"toUInt32" -"toUInt32OrDefault" -"toUInt32OrNull" -"toUInt32OrZero" -"toUInt64" -"toUInt64OrDefault" -"toUInt64OrNull" -"toUInt64OrZero" -"toUInt8" -"toUInt8OrDefault" -"toUInt8OrNull" -"toUInt8OrZero" -"toUTCTimestamp" -"toUUID" -"toUUIDOrDefault" -"toUUIDOrNull" -"toUUIDOrZero" -"toUnixTimestamp" -"toUnixTimestamp64Micro" -"toUnixTimestamp64Milli" -"toUnixTimestamp64Nano" -"toValidUTF8" -"toWeek" -"toYYYYMM" -"toYYYYMMDD" -"toYYYYMMDDhhmmss" -"toYear" -"toYearWeek" -"to_utc_timestamp" -"today" -"tokens" -"topK" -"topKArgMax" -"topKArgMin" -"topKArray" -"topKDistinct" -"topKForEach" -"topKIf" -"topKMap" -"topKMerge" -"topKNull" -"topKOrDefault" -"topKOrNull" -"topKResample" -"topKSimpleState" -"topKState" -"topKWeighted" -"topKWeightedArgMax" -"topKWeightedArgMin" -"topKWeightedArray" -"topKWeightedDistinct" -"topKWeightedForEach" -"topKWeightedIf" -"topKWeightedMap" -"topKWeightedMerge" -"topKWeightedNull" -"topKWeightedOrDefault" -"topKWeightedOrNull" -"topKWeightedResample" -"topKWeightedSimpleState" -"topKWeightedState" -"topLevelDomain" -"topLevelDomainRFC" -"transactionID" -"transactionLatestSnapshot" -"transactionOldestSnapshot" -"transform" -"translate" -"translateUTF8" -"trim" -"trimBoth" -"trimLeft" -"trimRight" -"trunc" -"truncate" -"tryBase58Decode" -"tryBase64Decode" -"tryBase64URLDecode" -"tryDecrypt" -"tryIdnaEncode" -"tryPunycodeDecode" -"tumble" -"tumbleEnd" -"tumbleStart" -"tuple" -"tupleConcat" -"tupleDivide" -"tupleDivideByNumber" -"tupleElement" -"tupleHammingDistance" -"tupleIntDiv" -"tupleIntDivByNumber" -"tupleIntDivOrZero" -"tupleIntDivOrZeroByNumber" -"tupleMinus" -"tupleModulo" -"tupleModuloByNumber" -"tupleMultiply" -"tupleMultiplyByNumber" -"tupleNames" -"tupleNegate" -"tuplePlus" -"tupleToNameValuePairs" -"ucase" -"unbin" -"unhex" -"uniq" -"uniqArgMax" -"uniqArgMin" -"uniqArray" -"uniqCombined" -"uniqCombined64" -"uniqCombined64ArgMax" -"uniqCombined64ArgMin" -"uniqCombined64Array" -"uniqCombined64Distinct" -"uniqCombined64ForEach" -"uniqCombined64If" -"uniqCombined64Map" -"uniqCombined64Merge" -"uniqCombined64Null" -"uniqCombined64OrDefault" -"uniqCombined64OrNull" -"uniqCombined64Resample" -"uniqCombined64SimpleState" -"uniqCombined64State" -"uniqCombinedArgMax" -"uniqCombinedArgMin" -"uniqCombinedArray" -"uniqCombinedDistinct" -"uniqCombinedForEach" -"uniqCombinedIf" -"uniqCombinedMap" -"uniqCombinedMerge" -"uniqCombinedNull" -"uniqCombinedOrDefault" -"uniqCombinedOrNull" -"uniqCombinedResample" -"uniqCombinedSimpleState" -"uniqCombinedState" -"uniqDistinct" -"uniqExact" -"uniqExactArgMax" -"uniqExactArgMin" -"uniqExactArray" -"uniqExactDistinct" -"uniqExactForEach" -"uniqExactIf" -"uniqExactMap" -"uniqExactMerge" -"uniqExactNull" -"uniqExactOrDefault" -"uniqExactOrNull" -"uniqExactResample" -"uniqExactSimpleState" -"uniqExactState" -"uniqForEach" -"uniqHLL12" -"uniqHLL12ArgMax" -"uniqHLL12ArgMin" -"uniqHLL12Array" -"uniqHLL12Distinct" -"uniqHLL12ForEach" -"uniqHLL12If" -"uniqHLL12Map" -"uniqHLL12Merge" -"uniqHLL12Null" -"uniqHLL12OrDefault" -"uniqHLL12OrNull" -"uniqHLL12Resample" -"uniqHLL12SimpleState" -"uniqHLL12State" -"uniqIf" -"uniqMap" -"uniqMerge" -"uniqNull" -"uniqOrDefault" -"uniqOrNull" -"uniqResample" -"uniqSimpleState" -"uniqState" -"uniqTheta" -"uniqThetaArgMax" -"uniqThetaArgMin" -"uniqThetaArray" -"uniqThetaDistinct" -"uniqThetaForEach" -"uniqThetaIf" -"uniqThetaIntersect" -"uniqThetaMap" -"uniqThetaMerge" -"uniqThetaNot" -"uniqThetaNull" -"uniqThetaOrDefault" -"uniqThetaOrNull" -"uniqThetaResample" -"uniqThetaSimpleState" -"uniqThetaState" -"uniqThetaUnion" -"uniqUpTo" -"uniqUpToArgMax" -"uniqUpToArgMin" -"uniqUpToArray" -"uniqUpToDistinct" -"uniqUpToForEach" -"uniqUpToIf" -"uniqUpToMap" -"uniqUpToMerge" -"uniqUpToNull" -"uniqUpToOrDefault" -"uniqUpToOrNull" -"uniqUpToResample" -"uniqUpToSimpleState" -"uniqUpToState" -"upper" -"upperUTF8" -"uptime" -"user" -"validateNestedArraySizes" -"varPop" -"varPopArgMax" -"varPopArgMin" -"varPopArray" -"varPopDistinct" -"varPopForEach" -"varPopIf" -"varPopMap" -"varPopMerge" -"varPopNull" -"varPopOrDefault" -"varPopOrNull" -"varPopResample" -"varPopSimpleState" -"varPopStable" -"varPopStableArgMax" -"varPopStableArgMin" -"varPopStableArray" -"varPopStableDistinct" -"varPopStableForEach" -"varPopStableIf" -"varPopStableMap" -"varPopStableMerge" -"varPopStableNull" -"varPopStableOrDefault" -"varPopStableOrNull" -"varPopStableResample" -"varPopStableSimpleState" -"varPopStableState" -"varPopState" -"varSamp" -"varSampArgMax" -"varSampArgMin" -"varSampArray" -"varSampDistinct" -"varSampForEach" -"varSampIf" -"varSampMap" -"varSampMerge" -"varSampNull" -"varSampOrDefault" -"varSampOrNull" -"varSampResample" -"varSampSimpleState" -"varSampStable" -"varSampStableArgMax" -"varSampStableArgMin" -"varSampStableArray" -"varSampStableDistinct" -"varSampStableForEach" -"varSampStableIf" -"varSampStableMap" -"varSampStableMerge" -"varSampStableNull" -"varSampStableOrDefault" -"varSampStableOrNull" -"varSampStableResample" -"varSampStableSimpleState" -"varSampStableState" -"varSampState" -"variantElement" -"variantType" -"vectorDifference" -"vectorSum" -"version" -"visibleWidth" -"visitParamExtractBool" -"visitParamExtractFloat" -"visitParamExtractInt" -"visitParamExtractRaw" -"visitParamExtractString" -"visitParamExtractUInt" -"visitParamHas" -"week" -"welchTTest" -"welchTTestArgMax" -"welchTTestArgMin" -"welchTTestArray" -"welchTTestDistinct" -"welchTTestForEach" -"welchTTestIf" -"welchTTestMap" -"welchTTestMerge" -"welchTTestNull" -"welchTTestOrDefault" -"welchTTestOrNull" -"welchTTestResample" -"welchTTestSimpleState" -"welchTTestState" -"widthBucket" -"width_bucket" -"windowFunnel" -"windowFunnelArgMax" -"windowFunnelArgMin" -"windowFunnelArray" -"windowFunnelDistinct" -"windowFunnelForEach" -"windowFunnelIf" -"windowFunnelMap" -"windowFunnelMerge" -"windowFunnelNull" -"windowFunnelOrDefault" -"windowFunnelOrNull" -"windowFunnelResample" -"windowFunnelSimpleState" -"windowFunnelState" -"windowID" -"wkt" -"wordShingleMinHash" -"wordShingleMinHashArg" -"wordShingleMinHashArgCaseInsensitive" -"wordShingleMinHashArgCaseInsensitiveUTF8" -"wordShingleMinHashArgUTF8" -"wordShingleMinHashCaseInsensitive" -"wordShingleMinHashCaseInsensitiveUTF8" -"wordShingleMinHashUTF8" -"wordShingleSimHash" -"wordShingleSimHashCaseInsensitive" -"wordShingleSimHashCaseInsensitiveUTF8" -"wordShingleSimHashUTF8" -"wyHash64" -"xor" -"xxHash32" -"xxHash64" -"xxh3" -"yandexConsistentHash" -"yearweek" -"yesterday" -"zookeeperSessionUptime" +"bool" +"boolean" diff --git a/tests/fuzz/dictionaries/functions.dict b/tests/fuzz/dictionaries/functions.dict index e562595fb67..e69de29bb2d 100644 --- a/tests/fuzz/dictionaries/functions.dict +++ b/tests/fuzz/dictionaries/functions.dict @@ -1,4283 +0,0 @@ -"BIT_AND" -"BIT_ANDArgMax" -"BIT_ANDArgMin" -"BIT_ANDArray" -"BIT_ANDDistinct" -"BIT_ANDForEach" -"BIT_ANDIf" -"BIT_ANDMap" -"BIT_ANDMerge" -"BIT_ANDNull" -"BIT_ANDOrDefault" -"BIT_ANDOrNull" -"BIT_ANDResample" -"BIT_ANDSimpleState" -"BIT_ANDState" -"BIT_OR" -"BIT_ORArgMax" -"BIT_ORArgMin" -"BIT_ORArray" -"BIT_ORDistinct" -"BIT_ORForEach" -"BIT_ORIf" -"BIT_ORMap" -"BIT_ORMerge" -"BIT_ORNull" -"BIT_OROrDefault" -"BIT_OROrNull" -"BIT_ORResample" -"BIT_ORSimpleState" -"BIT_ORState" -"BIT_XOR" -"BIT_XORArgMax" -"BIT_XORArgMin" -"BIT_XORArray" -"BIT_XORDistinct" -"BIT_XORForEach" -"BIT_XORIf" -"BIT_XORMap" -"BIT_XORMerge" -"BIT_XORNull" -"BIT_XOROrDefault" -"BIT_XOROrNull" -"BIT_XORResample" -"BIT_XORSimpleState" -"BIT_XORState" -"BLAKE3" -"CAST" -"CHARACTER_LENGTH" -"CHAR_LENGTH" -"COVAR_POP" -"COVAR_POPArgMax" -"COVAR_POPArgMin" -"COVAR_POPArray" -"COVAR_POPDistinct" -"COVAR_POPForEach" -"COVAR_POPIf" -"COVAR_POPMap" -"COVAR_POPMerge" -"COVAR_POPNull" -"COVAR_POPOrDefault" -"COVAR_POPOrNull" -"COVAR_POPResample" -"COVAR_POPSimpleState" -"COVAR_POPState" -"COVAR_SAMP" -"COVAR_SAMPArgMax" -"COVAR_SAMPArgMin" -"COVAR_SAMPArray" -"COVAR_SAMPDistinct" -"COVAR_SAMPForEach" -"COVAR_SAMPIf" -"COVAR_SAMPMap" -"COVAR_SAMPMerge" -"COVAR_SAMPNull" -"COVAR_SAMPOrDefault" -"COVAR_SAMPOrNull" -"COVAR_SAMPResample" -"COVAR_SAMPSimpleState" -"COVAR_SAMPState" -"CRC32" -"CRC32IEEE" -"CRC64" -"DATABASE" -"DATE" -"DATE_DIFF" -"DATE_FORMAT" -"DATE_TRUNC" -"DAY" -"DAYOFMONTH" -"DAYOFWEEK" -"DAYOFYEAR" -"FORMAT_BYTES" -"FQDN" -"FROM_BASE64" -"FROM_DAYS" -"FROM_UNIXTIME" -"HOUR" -"INET6_ATON" -"INET6_NTOA" -"INET_ATON" -"INET_NTOA" -"IPv4CIDRToRange" -"IPv4NumToString" -"IPv4NumToStringClassC" -"IPv4StringToNum" -"IPv4StringToNumOrDefault" -"IPv4StringToNumOrNull" -"IPv4ToIPv6" -"IPv6CIDRToRange" -"IPv6NumToString" -"IPv6StringToNum" -"IPv6StringToNumOrDefault" -"IPv6StringToNumOrNull" -"JSONArrayLength" -"JSONExtract" -"JSONExtractArrayRaw" -"JSONExtractBool" -"JSONExtractFloat" -"JSONExtractInt" -"JSONExtractKeys" -"JSONExtractKeysAndValues" -"JSONExtractKeysAndValuesRaw" -"JSONExtractRaw" -"JSONExtractString" -"JSONExtractUInt" -"JSONHas" -"JSONKey" -"JSONLength" -"JSONMergePatch" -"JSONType" -"JSON_ARRAY_LENGTH" -"JSON_EXISTS" -"JSON_QUERY" -"JSON_VALUE" -"L1Distance" -"L1Norm" -"L1Normalize" -"L2Distance" -"L2Norm" -"L2Normalize" -"L2SquaredDistance" -"L2SquaredNorm" -"LAST_DAY" -"LinfDistance" -"LinfNorm" -"LinfNormalize" -"LpDistance" -"LpNorm" -"LpNormalize" -"MACNumToString" -"MACStringToNum" -"MACStringToOUI" -"MAP_FROM_ARRAYS" -"MD4" -"MD5" -"MILLISECOND" -"MINUTE" -"MONTH" -"OCTET_LENGTH" -"QUARTER" -"REGEXP_EXTRACT" -"REGEXP_MATCHES" -"REGEXP_REPLACE" -"SCHEMA" -"SECOND" -"SHA1" -"SHA224" -"SHA256" -"SHA384" -"SHA512" -"SHA512_256" -"STD" -"STDArgMax" -"STDArgMin" -"STDArray" -"STDDEV_POP" -"STDDEV_POPArgMax" -"STDDEV_POPArgMin" -"STDDEV_POPArray" -"STDDEV_POPDistinct" -"STDDEV_POPForEach" -"STDDEV_POPIf" -"STDDEV_POPMap" -"STDDEV_POPMerge" -"STDDEV_POPNull" -"STDDEV_POPOrDefault" -"STDDEV_POPOrNull" -"STDDEV_POPResample" -"STDDEV_POPSimpleState" -"STDDEV_POPState" -"STDDEV_SAMP" -"STDDEV_SAMPArgMax" -"STDDEV_SAMPArgMin" -"STDDEV_SAMPArray" -"STDDEV_SAMPDistinct" -"STDDEV_SAMPForEach" -"STDDEV_SAMPIf" -"STDDEV_SAMPMap" -"STDDEV_SAMPMerge" -"STDDEV_SAMPNull" -"STDDEV_SAMPOrDefault" -"STDDEV_SAMPOrNull" -"STDDEV_SAMPResample" -"STDDEV_SAMPSimpleState" -"STDDEV_SAMPState" -"STDDistinct" -"STDForEach" -"STDIf" -"STDMap" -"STDMerge" -"STDNull" -"STDOrDefault" -"STDOrNull" -"STDResample" -"STDSimpleState" -"STDState" -"SUBSTRING_INDEX" -"SVG" -"TIMESTAMP_DIFF" -"TO_BASE64" -"TO_DAYS" -"TO_UNIXTIME" -"ULIDStringToDateTime" -"URLHash" -"URLHierarchy" -"URLPathHierarchy" -"UTCTimestamp" -"UTC_timestamp" -"UUIDNumToString" -"UUIDStringToNum" -"UUIDToNum" -"UUIDv7ToDateTime" -"VAR_POP" -"VAR_POPArgMax" -"VAR_POPArgMin" -"VAR_POPArray" -"VAR_POPDistinct" -"VAR_POPForEach" -"VAR_POPIf" -"VAR_POPMap" -"VAR_POPMerge" -"VAR_POPNull" -"VAR_POPOrDefault" -"VAR_POPOrNull" -"VAR_POPResample" -"VAR_POPSimpleState" -"VAR_POPState" -"VAR_SAMP" -"VAR_SAMPArgMax" -"VAR_SAMPArgMin" -"VAR_SAMPArray" -"VAR_SAMPDistinct" -"VAR_SAMPForEach" -"VAR_SAMPIf" -"VAR_SAMPMap" -"VAR_SAMPMerge" -"VAR_SAMPNull" -"VAR_SAMPOrDefault" -"VAR_SAMPOrNull" -"VAR_SAMPResample" -"VAR_SAMPSimpleState" -"VAR_SAMPState" -"YEAR" -"YYYYMMDDToDate" -"YYYYMMDDToDate32" -"YYYYMMDDhhmmssToDateTime" -"YYYYMMDDhhmmssToDateTime64" -"_CAST" -"__actionName" -"__bitBoolMaskAnd" -"__bitBoolMaskOr" -"__bitSwapLastTwo" -"__bitWrapperFunc" -"__getScalar" -"__scalarSubqueryResult" -"abs" -"accurateCast" -"accurateCastOrDefault" -"accurateCastOrNull" -"acos" -"acosh" -"addDate" -"addDays" -"addHours" -"addInterval" -"addMicroseconds" -"addMilliseconds" -"addMinutes" -"addMonths" -"addNanoseconds" -"addQuarters" -"addSeconds" -"addTupleOfIntervals" -"addWeeks" -"addYears" -"addressToLine" -"addressToLineWithInlines" -"addressToSymbol" -"aes_decrypt_mysql" -"aes_encrypt_mysql" -"age" -"aggThrow" -"aggThrowArgMax" -"aggThrowArgMin" -"aggThrowArray" -"aggThrowDistinct" -"aggThrowForEach" -"aggThrowIf" -"aggThrowMap" -"aggThrowMerge" -"aggThrowNull" -"aggThrowOrDefault" -"aggThrowOrNull" -"aggThrowResample" -"aggThrowSimpleState" -"aggThrowState" -"alphaTokens" -"analysisOfVariance" -"analysisOfVarianceArgMax" -"analysisOfVarianceArgMin" -"analysisOfVarianceArray" -"analysisOfVarianceDistinct" -"analysisOfVarianceForEach" -"analysisOfVarianceIf" -"analysisOfVarianceMap" -"analysisOfVarianceMerge" -"analysisOfVarianceNull" -"analysisOfVarianceOrDefault" -"analysisOfVarianceOrNull" -"analysisOfVarianceResample" -"analysisOfVarianceSimpleState" -"analysisOfVarianceState" -"and" -"anova" -"anovaArgMax" -"anovaArgMin" -"anovaArray" -"anovaDistinct" -"anovaForEach" -"anovaIf" -"anovaMap" -"anovaMerge" -"anovaNull" -"anovaOrDefault" -"anovaOrNull" -"anovaResample" -"anovaSimpleState" -"anovaState" -"any" -"anyArgMax" -"anyArgMin" -"anyArray" -"anyDistinct" -"anyForEach" -"anyHeavy" -"anyHeavyArgMax" -"anyHeavyArgMin" -"anyHeavyArray" -"anyHeavyDistinct" -"anyHeavyForEach" -"anyHeavyIf" -"anyHeavyMap" -"anyHeavyMerge" -"anyHeavyNull" -"anyHeavyOrDefault" -"anyHeavyOrNull" -"anyHeavyResample" -"anyHeavySimpleState" -"anyHeavyState" -"anyIf" -"anyLast" -"anyLastArgMax" -"anyLastArgMin" -"anyLastArray" -"anyLastDistinct" -"anyLastForEach" -"anyLastIf" -"anyLastMap" -"anyLastMerge" -"anyLastNull" -"anyLastOrDefault" -"anyLastOrNull" -"anyLastResample" -"anyLastSimpleState" -"anyLastState" -"anyLast_respect_nulls" -"anyLast_respect_nullsArgMax" -"anyLast_respect_nullsArgMin" -"anyLast_respect_nullsArray" -"anyLast_respect_nullsDistinct" -"anyLast_respect_nullsForEach" -"anyLast_respect_nullsIf" -"anyLast_respect_nullsMap" -"anyLast_respect_nullsMerge" -"anyLast_respect_nullsNull" -"anyLast_respect_nullsOrDefault" -"anyLast_respect_nullsOrNull" -"anyLast_respect_nullsResample" -"anyLast_respect_nullsSimpleState" -"anyLast_respect_nullsState" -"anyMap" -"anyMerge" -"anyNull" -"anyOrDefault" -"anyOrNull" -"anyResample" -"anySimpleState" -"anyState" -"any_respect_nulls" -"any_respect_nullsArgMax" -"any_respect_nullsArgMin" -"any_respect_nullsArray" -"any_respect_nullsDistinct" -"any_respect_nullsForEach" -"any_respect_nullsIf" -"any_respect_nullsMap" -"any_respect_nullsMerge" -"any_respect_nullsNull" -"any_respect_nullsOrDefault" -"any_respect_nullsOrNull" -"any_respect_nullsResample" -"any_respect_nullsSimpleState" -"any_respect_nullsState" -"any_value" -"any_valueArgMax" -"any_valueArgMin" -"any_valueArray" -"any_valueDistinct" -"any_valueForEach" -"any_valueIf" -"any_valueMap" -"any_valueMerge" -"any_valueNull" -"any_valueOrDefault" -"any_valueOrNull" -"any_valueResample" -"any_valueSimpleState" -"any_valueState" -"any_value_respect_nulls" -"any_value_respect_nullsArgMax" -"any_value_respect_nullsArgMin" -"any_value_respect_nullsArray" -"any_value_respect_nullsDistinct" -"any_value_respect_nullsForEach" -"any_value_respect_nullsIf" -"any_value_respect_nullsMap" -"any_value_respect_nullsMerge" -"any_value_respect_nullsNull" -"any_value_respect_nullsOrDefault" -"any_value_respect_nullsOrNull" -"any_value_respect_nullsResample" -"any_value_respect_nullsSimpleState" -"any_value_respect_nullsState" -"appendTrailingCharIfAbsent" -"approx_top_count" -"approx_top_countArgMax" -"approx_top_countArgMin" -"approx_top_countArray" -"approx_top_countDistinct" -"approx_top_countForEach" -"approx_top_countIf" -"approx_top_countMap" -"approx_top_countMerge" -"approx_top_countNull" -"approx_top_countOrDefault" -"approx_top_countOrNull" -"approx_top_countResample" -"approx_top_countSimpleState" -"approx_top_countState" -"approx_top_k" -"approx_top_kArgMax" -"approx_top_kArgMin" -"approx_top_kArray" -"approx_top_kDistinct" -"approx_top_kForEach" -"approx_top_kIf" -"approx_top_kMap" -"approx_top_kMerge" -"approx_top_kNull" -"approx_top_kOrDefault" -"approx_top_kOrNull" -"approx_top_kResample" -"approx_top_kSimpleState" -"approx_top_kState" -"approx_top_sum" -"approx_top_sumArgMax" -"approx_top_sumArgMin" -"approx_top_sumArray" -"approx_top_sumDistinct" -"approx_top_sumForEach" -"approx_top_sumIf" -"approx_top_sumMap" -"approx_top_sumMerge" -"approx_top_sumNull" -"approx_top_sumOrDefault" -"approx_top_sumOrNull" -"approx_top_sumResample" -"approx_top_sumSimpleState" -"approx_top_sumState" -"argMax" -"argMaxArgMax" -"argMaxArgMin" -"argMaxArray" -"argMaxDistinct" -"argMaxForEach" -"argMaxIf" -"argMaxMap" -"argMaxMerge" -"argMaxNull" -"argMaxOrDefault" -"argMaxOrNull" -"argMaxResample" -"argMaxSimpleState" -"argMaxState" -"argMin" -"argMinArgMax" -"argMinArgMin" -"argMinArray" -"argMinDistinct" -"argMinForEach" -"argMinIf" -"argMinMap" -"argMinMerge" -"argMinNull" -"argMinOrDefault" -"argMinOrNull" -"argMinResample" -"argMinSimpleState" -"argMinState" -"array" -"arrayAUC" -"arrayAll" -"arrayAvg" -"arrayCompact" -"arrayConcat" -"arrayCount" -"arrayCumSum" -"arrayCumSumNonNegative" -"arrayDifference" -"arrayDistinct" -"arrayDotProduct" -"arrayElement" -"arrayEnumerate" -"arrayEnumerateDense" -"arrayEnumerateDenseRanked" -"arrayEnumerateUniq" -"arrayEnumerateUniqRanked" -"arrayExists" -"arrayFill" -"arrayFilter" -"arrayFirst" -"arrayFirstIndex" -"arrayFirstOrNull" -"arrayFlatten" -"arrayFold" -"arrayIntersect" -"arrayJaccardIndex" -"arrayJoin" -"arrayLast" -"arrayLastIndex" -"arrayLastOrNull" -"arrayMap" -"arrayMax" -"arrayMin" -"arrayPartialReverseSort" -"arrayPartialShuffle" -"arrayPartialSort" -"arrayPopBack" -"arrayPopFront" -"arrayProduct" -"arrayPushBack" -"arrayPushFront" -"arrayRandomSample" -"arrayReduce" -"arrayReduceInRanges" -"arrayResize" -"arrayReverse" -"arrayReverseFill" -"arrayReverseSort" -"arrayReverseSplit" -"arrayRotateLeft" -"arrayRotateRight" -"arrayShiftLeft" -"arrayShiftRight" -"arrayShingles" -"arrayShuffle" -"arraySlice" -"arraySort" -"arraySplit" -"arrayStringConcat" -"arraySum" -"arrayUniq" -"arrayWithConstant" -"arrayZip" -"array_agg" -"array_aggArgMax" -"array_aggArgMin" -"array_aggArray" -"array_aggDistinct" -"array_aggForEach" -"array_aggIf" -"array_aggMap" -"array_aggMerge" -"array_aggNull" -"array_aggOrDefault" -"array_aggOrNull" -"array_aggResample" -"array_aggSimpleState" -"array_aggState" -"array_concat_agg" -"array_concat_aggArgMax" -"array_concat_aggArgMin" -"array_concat_aggArray" -"array_concat_aggDistinct" -"array_concat_aggForEach" -"array_concat_aggIf" -"array_concat_aggMap" -"array_concat_aggMerge" -"array_concat_aggNull" -"array_concat_aggOrDefault" -"array_concat_aggOrNull" -"array_concat_aggResample" -"array_concat_aggSimpleState" -"array_concat_aggState" -"ascii" -"asin" -"asinh" -"assumeNotNull" -"atan" -"atan2" -"atanh" -"avg" -"avgArgMax" -"avgArgMin" -"avgArray" -"avgDistinct" -"avgForEach" -"avgIf" -"avgMap" -"avgMerge" -"avgNull" -"avgOrDefault" -"avgOrNull" -"avgResample" -"avgSimpleState" -"avgState" -"avgWeighted" -"avgWeightedArgMax" -"avgWeightedArgMin" -"avgWeightedArray" -"avgWeightedDistinct" -"avgWeightedForEach" -"avgWeightedIf" -"avgWeightedMap" -"avgWeightedMerge" -"avgWeightedNull" -"avgWeightedOrDefault" -"avgWeightedOrNull" -"avgWeightedResample" -"avgWeightedSimpleState" -"avgWeightedState" -"bar" -"base58Decode" -"base58Encode" -"base64Decode" -"base64Encode" -"base64URLDecode" -"base64URLEncode" -"basename" -"bin" -"bitAnd" -"bitCount" -"bitHammingDistance" -"bitNot" -"bitOr" -"bitPositionsToArray" -"bitRotateLeft" -"bitRotateRight" -"bitShiftLeft" -"bitShiftRight" -"bitSlice" -"bitTest" -"bitTestAll" -"bitTestAny" -"bitXor" -"bitmapAnd" -"bitmapAndCardinality" -"bitmapAndnot" -"bitmapAndnotCardinality" -"bitmapBuild" -"bitmapCardinality" -"bitmapContains" -"bitmapHasAll" -"bitmapHasAny" -"bitmapMax" -"bitmapMin" -"bitmapOr" -"bitmapOrCardinality" -"bitmapSubsetInRange" -"bitmapSubsetLimit" -"bitmapToArray" -"bitmapTransform" -"bitmapXor" -"bitmapXorCardinality" -"bitmaskToArray" -"bitmaskToList" -"blockNumber" -"blockSerializedSize" -"blockSize" -"boundingRatio" -"boundingRatioArgMax" -"boundingRatioArgMin" -"boundingRatioArray" -"boundingRatioDistinct" -"boundingRatioForEach" -"boundingRatioIf" -"boundingRatioMap" -"boundingRatioMerge" -"boundingRatioNull" -"boundingRatioOrDefault" -"boundingRatioOrNull" -"boundingRatioResample" -"boundingRatioSimpleState" -"boundingRatioState" -"buildId" -"byteHammingDistance" -"byteSize" -"byteSlice" -"byteSwap" -"caseWithExpr" -"caseWithExpression" -"caseWithoutExpr" -"caseWithoutExpression" -"catboostEvaluate" -"categoricalInformationValue" -"categoricalInformationValueArgMax" -"categoricalInformationValueArgMin" -"categoricalInformationValueArray" -"categoricalInformationValueDistinct" -"categoricalInformationValueForEach" -"categoricalInformationValueIf" -"categoricalInformationValueMap" -"categoricalInformationValueMerge" -"categoricalInformationValueNull" -"categoricalInformationValueOrDefault" -"categoricalInformationValueOrNull" -"categoricalInformationValueResample" -"categoricalInformationValueSimpleState" -"categoricalInformationValueState" -"cbrt" -"ceil" -"ceiling" -"changeDay" -"changeHour" -"changeMinute" -"changeMonth" -"changeSecond" -"changeYear" -"char" -"cityHash64" -"clamp" -"coalesce" -"concat" -"concatAssumeInjective" -"concatWithSeparator" -"concatWithSeparatorAssumeInjective" -"concat_ws" -"connectionId" -"connection_id" -"contingency" -"contingencyArgMax" -"contingencyArgMin" -"contingencyArray" -"contingencyDistinct" -"contingencyForEach" -"contingencyIf" -"contingencyMap" -"contingencyMerge" -"contingencyNull" -"contingencyOrDefault" -"contingencyOrNull" -"contingencyResample" -"contingencySimpleState" -"contingencyState" -"convertCharset" -"corr" -"corrArgMax" -"corrArgMin" -"corrArray" -"corrDistinct" -"corrForEach" -"corrIf" -"corrMap" -"corrMatrix" -"corrMatrixArgMax" -"corrMatrixArgMin" -"corrMatrixArray" -"corrMatrixDistinct" -"corrMatrixForEach" -"corrMatrixIf" -"corrMatrixMap" -"corrMatrixMerge" -"corrMatrixNull" -"corrMatrixOrDefault" -"corrMatrixOrNull" -"corrMatrixResample" -"corrMatrixSimpleState" -"corrMatrixState" -"corrMerge" -"corrNull" -"corrOrDefault" -"corrOrNull" -"corrResample" -"corrSimpleState" -"corrStable" -"corrStableArgMax" -"corrStableArgMin" -"corrStableArray" -"corrStableDistinct" -"corrStableForEach" -"corrStableIf" -"corrStableMap" -"corrStableMerge" -"corrStableNull" -"corrStableOrDefault" -"corrStableOrNull" -"corrStableResample" -"corrStableSimpleState" -"corrStableState" -"corrState" -"cos" -"cosh" -"cosineDistance" -"count" -"countArgMax" -"countArgMin" -"countArray" -"countDigits" -"countDistinct" -"countEqual" -"countForEach" -"countIf" -"countMap" -"countMatches" -"countMatchesCaseInsensitive" -"countMerge" -"countNull" -"countOrDefault" -"countOrNull" -"countResample" -"countSimpleState" -"countState" -"countSubstrings" -"countSubstringsCaseInsensitive" -"countSubstringsCaseInsensitiveUTF8" -"covarPop" -"covarPopArgMax" -"covarPopArgMin" -"covarPopArray" -"covarPopDistinct" -"covarPopForEach" -"covarPopIf" -"covarPopMap" -"covarPopMatrix" -"covarPopMatrixArgMax" -"covarPopMatrixArgMin" -"covarPopMatrixArray" -"covarPopMatrixDistinct" -"covarPopMatrixForEach" -"covarPopMatrixIf" -"covarPopMatrixMap" -"covarPopMatrixMerge" -"covarPopMatrixNull" -"covarPopMatrixOrDefault" -"covarPopMatrixOrNull" -"covarPopMatrixResample" -"covarPopMatrixSimpleState" -"covarPopMatrixState" -"covarPopMerge" -"covarPopNull" -"covarPopOrDefault" -"covarPopOrNull" -"covarPopResample" -"covarPopSimpleState" -"covarPopStable" -"covarPopStableArgMax" -"covarPopStableArgMin" -"covarPopStableArray" -"covarPopStableDistinct" -"covarPopStableForEach" -"covarPopStableIf" -"covarPopStableMap" -"covarPopStableMerge" -"covarPopStableNull" -"covarPopStableOrDefault" -"covarPopStableOrNull" -"covarPopStableResample" -"covarPopStableSimpleState" -"covarPopStableState" -"covarPopState" -"covarSamp" -"covarSampArgMax" -"covarSampArgMin" -"covarSampArray" -"covarSampDistinct" -"covarSampForEach" -"covarSampIf" -"covarSampMap" -"covarSampMatrix" -"covarSampMatrixArgMax" -"covarSampMatrixArgMin" -"covarSampMatrixArray" -"covarSampMatrixDistinct" -"covarSampMatrixForEach" -"covarSampMatrixIf" -"covarSampMatrixMap" -"covarSampMatrixMerge" -"covarSampMatrixNull" -"covarSampMatrixOrDefault" -"covarSampMatrixOrNull" -"covarSampMatrixResample" -"covarSampMatrixSimpleState" -"covarSampMatrixState" -"covarSampMerge" -"covarSampNull" -"covarSampOrDefault" -"covarSampOrNull" -"covarSampResample" -"covarSampSimpleState" -"covarSampStable" -"covarSampStableArgMax" -"covarSampStableArgMin" -"covarSampStableArray" -"covarSampStableDistinct" -"covarSampStableForEach" -"covarSampStableIf" -"covarSampStableMap" -"covarSampStableMerge" -"covarSampStableNull" -"covarSampStableOrDefault" -"covarSampStableOrNull" -"covarSampStableResample" -"covarSampStableSimpleState" -"covarSampStableState" -"covarSampState" -"cramersV" -"cramersVArgMax" -"cramersVArgMin" -"cramersVArray" -"cramersVBiasCorrected" -"cramersVBiasCorrectedArgMax" -"cramersVBiasCorrectedArgMin" -"cramersVBiasCorrectedArray" -"cramersVBiasCorrectedDistinct" -"cramersVBiasCorrectedForEach" -"cramersVBiasCorrectedIf" -"cramersVBiasCorrectedMap" -"cramersVBiasCorrectedMerge" -"cramersVBiasCorrectedNull" -"cramersVBiasCorrectedOrDefault" -"cramersVBiasCorrectedOrNull" -"cramersVBiasCorrectedResample" -"cramersVBiasCorrectedSimpleState" -"cramersVBiasCorrectedState" -"cramersVDistinct" -"cramersVForEach" -"cramersVIf" -"cramersVMap" -"cramersVMerge" -"cramersVNull" -"cramersVOrDefault" -"cramersVOrNull" -"cramersVResample" -"cramersVSimpleState" -"cramersVState" -"curdate" -"currentDatabase" -"currentProfiles" -"currentRoles" -"currentSchemas" -"currentUser" -"current_database" -"current_date" -"current_schemas" -"current_timestamp" -"current_user" -"cutFragment" -"cutIPv6" -"cutQueryString" -"cutQueryStringAndFragment" -"cutToFirstSignificantSubdomain" -"cutToFirstSignificantSubdomainCustom" -"cutToFirstSignificantSubdomainCustomRFC" -"cutToFirstSignificantSubdomainCustomWithWWW" -"cutToFirstSignificantSubdomainCustomWithWWWRFC" -"cutToFirstSignificantSubdomainRFC" -"cutToFirstSignificantSubdomainWithWWW" -"cutToFirstSignificantSubdomainWithWWWRFC" -"cutURLParameter" -"cutWWW" -"damerauLevenshteinDistance" -"dateDiff" -"dateName" -"dateTime64ToSnowflake" -"dateTime64ToSnowflakeID" -"dateTimeToSnowflake" -"dateTimeToSnowflakeID" -"dateTrunc" -"date_diff" -"decodeHTMLComponent" -"decodeURLComponent" -"decodeURLFormComponent" -"decodeXMLComponent" -"decrypt" -"defaultProfiles" -"defaultRoles" -"defaultValueOfArgumentType" -"defaultValueOfTypeName" -"degrees" -"deltaSum" -"deltaSumArgMax" -"deltaSumArgMin" -"deltaSumArray" -"deltaSumDistinct" -"deltaSumForEach" -"deltaSumIf" -"deltaSumMap" -"deltaSumMerge" -"deltaSumNull" -"deltaSumOrDefault" -"deltaSumOrNull" -"deltaSumResample" -"deltaSumSimpleState" -"deltaSumState" -"deltaSumTimestamp" -"deltaSumTimestampArgMax" -"deltaSumTimestampArgMin" -"deltaSumTimestampArray" -"deltaSumTimestampDistinct" -"deltaSumTimestampForEach" -"deltaSumTimestampIf" -"deltaSumTimestampMap" -"deltaSumTimestampMerge" -"deltaSumTimestampNull" -"deltaSumTimestampOrDefault" -"deltaSumTimestampOrNull" -"deltaSumTimestampResample" -"deltaSumTimestampSimpleState" -"deltaSumTimestampState" -"demangle" -"denseRank" -"denseRankArgMax" -"denseRankArgMin" -"denseRankArray" -"denseRankDistinct" -"denseRankForEach" -"denseRankIf" -"denseRankMap" -"denseRankMerge" -"denseRankNull" -"denseRankOrDefault" -"denseRankOrNull" -"denseRankResample" -"denseRankSimpleState" -"denseRankState" -"dense_rank" -"dense_rankArgMax" -"dense_rankArgMin" -"dense_rankArray" -"dense_rankDistinct" -"dense_rankForEach" -"dense_rankIf" -"dense_rankMap" -"dense_rankMerge" -"dense_rankNull" -"dense_rankOrDefault" -"dense_rankOrNull" -"dense_rankResample" -"dense_rankSimpleState" -"dense_rankState" -"detectCharset" -"detectLanguage" -"detectLanguageMixed" -"detectLanguageUnknown" -"detectProgrammingLanguage" -"detectTonality" -"dictGet" -"dictGetAll" -"dictGetChildren" -"dictGetDate" -"dictGetDateOrDefault" -"dictGetDateTime" -"dictGetDateTimeOrDefault" -"dictGetDescendants" -"dictGetFloat32" -"dictGetFloat32OrDefault" -"dictGetFloat64" -"dictGetFloat64OrDefault" -"dictGetHierarchy" -"dictGetIPv4" -"dictGetIPv4OrDefault" -"dictGetIPv6" -"dictGetIPv6OrDefault" -"dictGetInt16" -"dictGetInt16OrDefault" -"dictGetInt32" -"dictGetInt32OrDefault" -"dictGetInt64" -"dictGetInt64OrDefault" -"dictGetInt8" -"dictGetInt8OrDefault" -"dictGetOrDefault" -"dictGetOrNull" -"dictGetString" -"dictGetStringOrDefault" -"dictGetUInt16" -"dictGetUInt16OrDefault" -"dictGetUInt32" -"dictGetUInt32OrDefault" -"dictGetUInt64" -"dictGetUInt64OrDefault" -"dictGetUInt8" -"dictGetUInt8OrDefault" -"dictGetUUID" -"dictGetUUIDOrDefault" -"dictHas" -"dictIsIn" -"displayName" -"distanceL1" -"distanceL2" -"distanceL2Squared" -"distanceLinf" -"distanceLp" -"divide" -"divideDecimal" -"domain" -"domainRFC" -"domainWithoutWWW" -"domainWithoutWWWRFC" -"dotProduct" -"dumpColumnStructure" -"dynamicElement" -"dynamicType" -"e" -"editDistance" -"editDistanceUTF8" -"empty" -"emptyArrayDate" -"emptyArrayDateTime" -"emptyArrayFloat32" -"emptyArrayFloat64" -"emptyArrayInt16" -"emptyArrayInt32" -"emptyArrayInt64" -"emptyArrayInt8" -"emptyArrayString" -"emptyArrayToSingle" -"emptyArrayUInt16" -"emptyArrayUInt32" -"emptyArrayUInt64" -"emptyArrayUInt8" -"enabledProfiles" -"enabledRoles" -"encodeURLComponent" -"encodeURLFormComponent" -"encodeXMLComponent" -"encrypt" -"endsWith" -"endsWithUTF8" -"entropy" -"entropyArgMax" -"entropyArgMin" -"entropyArray" -"entropyDistinct" -"entropyForEach" -"entropyIf" -"entropyMap" -"entropyMerge" -"entropyNull" -"entropyOrDefault" -"entropyOrNull" -"entropyResample" -"entropySimpleState" -"entropyState" -"equals" -"erf" -"erfc" -"errorCodeToName" -"evalMLMethod" -"exp" -"exp10" -"exp2" -"exponentialMovingAverage" -"exponentialMovingAverageArgMax" -"exponentialMovingAverageArgMin" -"exponentialMovingAverageArray" -"exponentialMovingAverageDistinct" -"exponentialMovingAverageForEach" -"exponentialMovingAverageIf" -"exponentialMovingAverageMap" -"exponentialMovingAverageMerge" -"exponentialMovingAverageNull" -"exponentialMovingAverageOrDefault" -"exponentialMovingAverageOrNull" -"exponentialMovingAverageResample" -"exponentialMovingAverageSimpleState" -"exponentialMovingAverageState" -"exponentialTimeDecayedAvg" -"exponentialTimeDecayedAvgArgMax" -"exponentialTimeDecayedAvgArgMin" -"exponentialTimeDecayedAvgArray" -"exponentialTimeDecayedAvgDistinct" -"exponentialTimeDecayedAvgForEach" -"exponentialTimeDecayedAvgIf" -"exponentialTimeDecayedAvgMap" -"exponentialTimeDecayedAvgMerge" -"exponentialTimeDecayedAvgNull" -"exponentialTimeDecayedAvgOrDefault" -"exponentialTimeDecayedAvgOrNull" -"exponentialTimeDecayedAvgResample" -"exponentialTimeDecayedAvgSimpleState" -"exponentialTimeDecayedAvgState" -"exponentialTimeDecayedCount" -"exponentialTimeDecayedCountArgMax" -"exponentialTimeDecayedCountArgMin" -"exponentialTimeDecayedCountArray" -"exponentialTimeDecayedCountDistinct" -"exponentialTimeDecayedCountForEach" -"exponentialTimeDecayedCountIf" -"exponentialTimeDecayedCountMap" -"exponentialTimeDecayedCountMerge" -"exponentialTimeDecayedCountNull" -"exponentialTimeDecayedCountOrDefault" -"exponentialTimeDecayedCountOrNull" -"exponentialTimeDecayedCountResample" -"exponentialTimeDecayedCountSimpleState" -"exponentialTimeDecayedCountState" -"exponentialTimeDecayedMax" -"exponentialTimeDecayedMaxArgMax" -"exponentialTimeDecayedMaxArgMin" -"exponentialTimeDecayedMaxArray" -"exponentialTimeDecayedMaxDistinct" -"exponentialTimeDecayedMaxForEach" -"exponentialTimeDecayedMaxIf" -"exponentialTimeDecayedMaxMap" -"exponentialTimeDecayedMaxMerge" -"exponentialTimeDecayedMaxNull" -"exponentialTimeDecayedMaxOrDefault" -"exponentialTimeDecayedMaxOrNull" -"exponentialTimeDecayedMaxResample" -"exponentialTimeDecayedMaxSimpleState" -"exponentialTimeDecayedMaxState" -"exponentialTimeDecayedSum" -"exponentialTimeDecayedSumArgMax" -"exponentialTimeDecayedSumArgMin" -"exponentialTimeDecayedSumArray" -"exponentialTimeDecayedSumDistinct" -"exponentialTimeDecayedSumForEach" -"exponentialTimeDecayedSumIf" -"exponentialTimeDecayedSumMap" -"exponentialTimeDecayedSumMerge" -"exponentialTimeDecayedSumNull" -"exponentialTimeDecayedSumOrDefault" -"exponentialTimeDecayedSumOrNull" -"exponentialTimeDecayedSumResample" -"exponentialTimeDecayedSumSimpleState" -"exponentialTimeDecayedSumState" -"extract" -"extractAll" -"extractAllGroups" -"extractAllGroupsHorizontal" -"extractAllGroupsVertical" -"extractGroups" -"extractKeyValuePairs" -"extractKeyValuePairsWithEscaping" -"extractTextFromHTML" -"extractURLParameter" -"extractURLParameterNames" -"extractURLParameters" -"factorial" -"farmFingerprint64" -"farmHash64" -"file" -"filesystemAvailable" -"filesystemCapacity" -"filesystemUnreserved" -"finalizeAggregation" -"firstLine" -"firstSignificantSubdomain" -"firstSignificantSubdomainCustom" -"firstSignificantSubdomainCustomRFC" -"firstSignificantSubdomainRFC" -"first_value" -"first_valueArgMax" -"first_valueArgMin" -"first_valueArray" -"first_valueDistinct" -"first_valueForEach" -"first_valueIf" -"first_valueMap" -"first_valueMerge" -"first_valueNull" -"first_valueOrDefault" -"first_valueOrNull" -"first_valueResample" -"first_valueSimpleState" -"first_valueState" -"first_value_respect_nulls" -"first_value_respect_nullsArgMax" -"first_value_respect_nullsArgMin" -"first_value_respect_nullsArray" -"first_value_respect_nullsDistinct" -"first_value_respect_nullsForEach" -"first_value_respect_nullsIf" -"first_value_respect_nullsMap" -"first_value_respect_nullsMerge" -"first_value_respect_nullsNull" -"first_value_respect_nullsOrDefault" -"first_value_respect_nullsOrNull" -"first_value_respect_nullsResample" -"first_value_respect_nullsSimpleState" -"first_value_respect_nullsState" -"flameGraph" -"flameGraphArgMax" -"flameGraphArgMin" -"flameGraphArray" -"flameGraphDistinct" -"flameGraphForEach" -"flameGraphIf" -"flameGraphMap" -"flameGraphMerge" -"flameGraphNull" -"flameGraphOrDefault" -"flameGraphOrNull" -"flameGraphResample" -"flameGraphSimpleState" -"flameGraphState" -"flatten" -"flattenTuple" -"floor" -"format" -"formatDateTime" -"formatDateTimeInJodaSyntax" -"formatQuery" -"formatQueryOrNull" -"formatQuerySingleLine" -"formatQuerySingleLineOrNull" -"formatReadableDecimalSize" -"formatReadableQuantity" -"formatReadableSize" -"formatReadableTimeDelta" -"formatRow" -"formatRowNoNewline" -"fragment" -"fromDaysSinceYearZero" -"fromDaysSinceYearZero32" -"fromModifiedJulianDay" -"fromModifiedJulianDayOrNull" -"fromUTCTimestamp" -"fromUnixTimestamp" -"fromUnixTimestamp64Micro" -"fromUnixTimestamp64Milli" -"fromUnixTimestamp64Nano" -"fromUnixTimestampInJodaSyntax" -"from_utc_timestamp" -"fullHostName" -"fuzzBits" -"gccMurmurHash" -"gcd" -"generateRandomStructure" -"generateSnowflakeID" -"generateULID" -"generateUUIDv4" -"generateUUIDv7" -"geoDistance" -"geoToH3" -"geoToS2" -"geohashDecode" -"geohashEncode" -"geohashesInBox" -"getClientHTTPHeader" -"getMacro" -"getOSKernelVersion" -"getServerPort" -"getSetting" -"getSizeOfEnumType" -"getSubcolumn" -"getTypeSerializationStreams" -"globalIn" -"globalInIgnoreSet" -"globalNotIn" -"globalNotInIgnoreSet" -"globalNotNullIn" -"globalNotNullInIgnoreSet" -"globalNullIn" -"globalNullInIgnoreSet" -"globalVariable" -"greatCircleAngle" -"greatCircleDistance" -"greater" -"greaterOrEquals" -"greatest" -"groupArray" -"groupArrayArgMax" -"groupArrayArgMin" -"groupArrayArray" -"groupArrayDistinct" -"groupArrayForEach" -"groupArrayIf" -"groupArrayInsertAt" -"groupArrayInsertAtArgMax" -"groupArrayInsertAtArgMin" -"groupArrayInsertAtArray" -"groupArrayInsertAtDistinct" -"groupArrayInsertAtForEach" -"groupArrayInsertAtIf" -"groupArrayInsertAtMap" -"groupArrayInsertAtMerge" -"groupArrayInsertAtNull" -"groupArrayInsertAtOrDefault" -"groupArrayInsertAtOrNull" -"groupArrayInsertAtResample" -"groupArrayInsertAtSimpleState" -"groupArrayInsertAtState" -"groupArrayIntersect" -"groupArrayIntersectArgMax" -"groupArrayIntersectArgMin" -"groupArrayIntersectArray" -"groupArrayIntersectDistinct" -"groupArrayIntersectForEach" -"groupArrayIntersectIf" -"groupArrayIntersectMap" -"groupArrayIntersectMerge" -"groupArrayIntersectNull" -"groupArrayIntersectOrDefault" -"groupArrayIntersectOrNull" -"groupArrayIntersectResample" -"groupArrayIntersectSimpleState" -"groupArrayIntersectState" -"groupArrayLast" -"groupArrayLastArgMax" -"groupArrayLastArgMin" -"groupArrayLastArray" -"groupArrayLastDistinct" -"groupArrayLastForEach" -"groupArrayLastIf" -"groupArrayLastMap" -"groupArrayLastMerge" -"groupArrayLastNull" -"groupArrayLastOrDefault" -"groupArrayLastOrNull" -"groupArrayLastResample" -"groupArrayLastSimpleState" -"groupArrayLastState" -"groupArrayMap" -"groupArrayMerge" -"groupArrayMovingAvg" -"groupArrayMovingAvgArgMax" -"groupArrayMovingAvgArgMin" -"groupArrayMovingAvgArray" -"groupArrayMovingAvgDistinct" -"groupArrayMovingAvgForEach" -"groupArrayMovingAvgIf" -"groupArrayMovingAvgMap" -"groupArrayMovingAvgMerge" -"groupArrayMovingAvgNull" -"groupArrayMovingAvgOrDefault" -"groupArrayMovingAvgOrNull" -"groupArrayMovingAvgResample" -"groupArrayMovingAvgSimpleState" -"groupArrayMovingAvgState" -"groupArrayMovingSum" -"groupArrayMovingSumArgMax" -"groupArrayMovingSumArgMin" -"groupArrayMovingSumArray" -"groupArrayMovingSumDistinct" -"groupArrayMovingSumForEach" -"groupArrayMovingSumIf" -"groupArrayMovingSumMap" -"groupArrayMovingSumMerge" -"groupArrayMovingSumNull" -"groupArrayMovingSumOrDefault" -"groupArrayMovingSumOrNull" -"groupArrayMovingSumResample" -"groupArrayMovingSumSimpleState" -"groupArrayMovingSumState" -"groupArrayNull" -"groupArrayOrDefault" -"groupArrayOrNull" -"groupArrayResample" -"groupArraySample" -"groupArraySampleArgMax" -"groupArraySampleArgMin" -"groupArraySampleArray" -"groupArraySampleDistinct" -"groupArraySampleForEach" -"groupArraySampleIf" -"groupArraySampleMap" -"groupArraySampleMerge" -"groupArraySampleNull" -"groupArraySampleOrDefault" -"groupArraySampleOrNull" -"groupArraySampleResample" -"groupArraySampleSimpleState" -"groupArraySampleState" -"groupArraySimpleState" -"groupArraySorted" -"groupArraySortedArgMax" -"groupArraySortedArgMin" -"groupArraySortedArray" -"groupArraySortedDistinct" -"groupArraySortedForEach" -"groupArraySortedIf" -"groupArraySortedMap" -"groupArraySortedMerge" -"groupArraySortedNull" -"groupArraySortedOrDefault" -"groupArraySortedOrNull" -"groupArraySortedResample" -"groupArraySortedSimpleState" -"groupArraySortedState" -"groupArrayState" -"groupBitAnd" -"groupBitAndArgMax" -"groupBitAndArgMin" -"groupBitAndArray" -"groupBitAndDistinct" -"groupBitAndForEach" -"groupBitAndIf" -"groupBitAndMap" -"groupBitAndMerge" -"groupBitAndNull" -"groupBitAndOrDefault" -"groupBitAndOrNull" -"groupBitAndResample" -"groupBitAndSimpleState" -"groupBitAndState" -"groupBitOr" -"groupBitOrArgMax" -"groupBitOrArgMin" -"groupBitOrArray" -"groupBitOrDistinct" -"groupBitOrForEach" -"groupBitOrIf" -"groupBitOrMap" -"groupBitOrMerge" -"groupBitOrNull" -"groupBitOrOrDefault" -"groupBitOrOrNull" -"groupBitOrResample" -"groupBitOrSimpleState" -"groupBitOrState" -"groupBitXor" -"groupBitXorArgMax" -"groupBitXorArgMin" -"groupBitXorArray" -"groupBitXorDistinct" -"groupBitXorForEach" -"groupBitXorIf" -"groupBitXorMap" -"groupBitXorMerge" -"groupBitXorNull" -"groupBitXorOrDefault" -"groupBitXorOrNull" -"groupBitXorResample" -"groupBitXorSimpleState" -"groupBitXorState" -"groupBitmap" -"groupBitmapAnd" -"groupBitmapAndArgMax" -"groupBitmapAndArgMin" -"groupBitmapAndArray" -"groupBitmapAndDistinct" -"groupBitmapAndForEach" -"groupBitmapAndIf" -"groupBitmapAndMap" -"groupBitmapAndMerge" -"groupBitmapAndNull" -"groupBitmapAndOrDefault" -"groupBitmapAndOrNull" -"groupBitmapAndResample" -"groupBitmapAndSimpleState" -"groupBitmapAndState" -"groupBitmapArgMax" -"groupBitmapArgMin" -"groupBitmapArray" -"groupBitmapDistinct" -"groupBitmapForEach" -"groupBitmapIf" -"groupBitmapMap" -"groupBitmapMerge" -"groupBitmapNull" -"groupBitmapOr" -"groupBitmapOrArgMax" -"groupBitmapOrArgMin" -"groupBitmapOrArray" -"groupBitmapOrDefault" -"groupBitmapOrDistinct" -"groupBitmapOrForEach" -"groupBitmapOrIf" -"groupBitmapOrMap" -"groupBitmapOrMerge" -"groupBitmapOrNull" -"groupBitmapOrNull" -"groupBitmapOrOrDefault" -"groupBitmapOrOrNull" -"groupBitmapOrResample" -"groupBitmapOrSimpleState" -"groupBitmapOrState" -"groupBitmapResample" -"groupBitmapSimpleState" -"groupBitmapState" -"groupBitmapXor" -"groupBitmapXorArgMax" -"groupBitmapXorArgMin" -"groupBitmapXorArray" -"groupBitmapXorDistinct" -"groupBitmapXorForEach" -"groupBitmapXorIf" -"groupBitmapXorMap" -"groupBitmapXorMerge" -"groupBitmapXorNull" -"groupBitmapXorOrDefault" -"groupBitmapXorOrNull" -"groupBitmapXorResample" -"groupBitmapXorSimpleState" -"groupBitmapXorState" -"groupConcat" -"groupConcatArgMax" -"groupConcatArgMin" -"groupConcatArray" -"groupConcatDistinct" -"groupConcatForEach" -"groupConcatIf" -"groupConcatMap" -"groupConcatMerge" -"groupConcatNull" -"groupConcatOrDefault" -"groupConcatOrNull" -"groupConcatResample" -"groupConcatSimpleState" -"groupConcatState" -"groupUniqArray" -"groupUniqArrayArgMax" -"groupUniqArrayArgMin" -"groupUniqArrayArray" -"groupUniqArrayDistinct" -"groupUniqArrayForEach" -"groupUniqArrayIf" -"groupUniqArrayMap" -"groupUniqArrayMerge" -"groupUniqArrayNull" -"groupUniqArrayOrDefault" -"groupUniqArrayOrNull" -"groupUniqArrayResample" -"groupUniqArraySimpleState" -"groupUniqArrayState" -"group_concat" -"group_concatArgMax" -"group_concatArgMin" -"group_concatArray" -"group_concatDistinct" -"group_concatForEach" -"group_concatIf" -"group_concatMap" -"group_concatMerge" -"group_concatNull" -"group_concatOrDefault" -"group_concatOrNull" -"group_concatResample" -"group_concatSimpleState" -"group_concatState" -"h3CellAreaM2" -"h3CellAreaRads2" -"h3Distance" -"h3EdgeAngle" -"h3EdgeLengthKm" -"h3EdgeLengthM" -"h3ExactEdgeLengthKm" -"h3ExactEdgeLengthM" -"h3ExactEdgeLengthRads" -"h3GetBaseCell" -"h3GetDestinationIndexFromUnidirectionalEdge" -"h3GetFaces" -"h3GetIndexesFromUnidirectionalEdge" -"h3GetOriginIndexFromUnidirectionalEdge" -"h3GetPentagonIndexes" -"h3GetRes0Indexes" -"h3GetResolution" -"h3GetUnidirectionalEdge" -"h3GetUnidirectionalEdgeBoundary" -"h3GetUnidirectionalEdgesFromHexagon" -"h3HexAreaKm2" -"h3HexAreaM2" -"h3HexRing" -"h3IndexesAreNeighbors" -"h3IsPentagon" -"h3IsResClassIII" -"h3IsValid" -"h3Line" -"h3NumHexagons" -"h3PointDistKm" -"h3PointDistM" -"h3PointDistRads" -"h3ToCenterChild" -"h3ToChildren" -"h3ToGeo" -"h3ToGeoBoundary" -"h3ToParent" -"h3ToString" -"h3UnidirectionalEdgeIsValid" -"h3kRing" -"halfMD5" -"has" -"hasAll" -"hasAny" -"hasColumnInTable" -"hasSubsequence" -"hasSubsequenceCaseInsensitive" -"hasSubsequenceCaseInsensitiveUTF8" -"hasSubsequenceUTF8" -"hasSubstr" -"hasThreadFuzzer" -"hasToken" -"hasTokenCaseInsensitive" -"hasTokenCaseInsensitiveOrNull" -"hasTokenOrNull" -"hex" -"hilbertDecode" -"hilbertEncode" -"histogram" -"histogramArgMax" -"histogramArgMin" -"histogramArray" -"histogramDistinct" -"histogramForEach" -"histogramIf" -"histogramMap" -"histogramMerge" -"histogramNull" -"histogramOrDefault" -"histogramOrNull" -"histogramResample" -"histogramSimpleState" -"histogramState" -"hiveHash" -"hop" -"hopEnd" -"hopStart" -"hostName" -"hostname" -"hypot" -"identity" -"idnaDecode" -"idnaEncode" -"if" -"ifNotFinite" -"ifNull" -"ignore" -"ilike" -"in" -"inIgnoreSet" -"indexHint" -"indexOf" -"initcap" -"initcapUTF8" -"initialQueryID" -"initial_query_id" -"initializeAggregation" -"instr" -"intDiv" -"intDivOrZero" -"intExp10" -"intExp2" -"intHash32" -"intHash64" -"intervalLengthSum" -"intervalLengthSumArgMax" -"intervalLengthSumArgMin" -"intervalLengthSumArray" -"intervalLengthSumDistinct" -"intervalLengthSumForEach" -"intervalLengthSumIf" -"intervalLengthSumMap" -"intervalLengthSumMerge" -"intervalLengthSumNull" -"intervalLengthSumOrDefault" -"intervalLengthSumOrNull" -"intervalLengthSumResample" -"intervalLengthSumSimpleState" -"intervalLengthSumState" -"isConstant" -"isDecimalOverflow" -"isFinite" -"isIPAddressInRange" -"isIPv4String" -"isIPv6String" -"isInfinite" -"isNaN" -"isNotDistinctFrom" -"isNotNull" -"isNull" -"isNullable" -"isValidJSON" -"isValidUTF8" -"isZeroOrNull" -"jaroSimilarity" -"jaroWinklerSimilarity" -"javaHash" -"javaHashUTF16LE" -"joinGet" -"joinGetOrNull" -"jsonMergePatch" -"jumpConsistentHash" -"kafkaMurmurHash" -"kolmogorovSmirnovTest" -"kolmogorovSmirnovTestArgMax" -"kolmogorovSmirnovTestArgMin" -"kolmogorovSmirnovTestArray" -"kolmogorovSmirnovTestDistinct" -"kolmogorovSmirnovTestForEach" -"kolmogorovSmirnovTestIf" -"kolmogorovSmirnovTestMap" -"kolmogorovSmirnovTestMerge" -"kolmogorovSmirnovTestNull" -"kolmogorovSmirnovTestOrDefault" -"kolmogorovSmirnovTestOrNull" -"kolmogorovSmirnovTestResample" -"kolmogorovSmirnovTestSimpleState" -"kolmogorovSmirnovTestState" -"kostikConsistentHash" -"kql_array_sort_asc" -"kql_array_sort_desc" -"kurtPop" -"kurtPopArgMax" -"kurtPopArgMin" -"kurtPopArray" -"kurtPopDistinct" -"kurtPopForEach" -"kurtPopIf" -"kurtPopMap" -"kurtPopMerge" -"kurtPopNull" -"kurtPopOrDefault" -"kurtPopOrNull" -"kurtPopResample" -"kurtPopSimpleState" -"kurtPopState" -"kurtSamp" -"kurtSampArgMax" -"kurtSampArgMin" -"kurtSampArray" -"kurtSampDistinct" -"kurtSampForEach" -"kurtSampIf" -"kurtSampMap" -"kurtSampMerge" -"kurtSampNull" -"kurtSampOrDefault" -"kurtSampOrNull" -"kurtSampResample" -"kurtSampSimpleState" -"kurtSampState" -"lagInFrame" -"lagInFrameArgMax" -"lagInFrameArgMin" -"lagInFrameArray" -"lagInFrameDistinct" -"lagInFrameForEach" -"lagInFrameIf" -"lagInFrameMap" -"lagInFrameMerge" -"lagInFrameNull" -"lagInFrameOrDefault" -"lagInFrameOrNull" -"lagInFrameResample" -"lagInFrameSimpleState" -"lagInFrameState" -"largestTriangleThreeBuckets" -"largestTriangleThreeBucketsArgMax" -"largestTriangleThreeBucketsArgMin" -"largestTriangleThreeBucketsArray" -"largestTriangleThreeBucketsDistinct" -"largestTriangleThreeBucketsForEach" -"largestTriangleThreeBucketsIf" -"largestTriangleThreeBucketsMap" -"largestTriangleThreeBucketsMerge" -"largestTriangleThreeBucketsNull" -"largestTriangleThreeBucketsOrDefault" -"largestTriangleThreeBucketsOrNull" -"largestTriangleThreeBucketsResample" -"largestTriangleThreeBucketsSimpleState" -"largestTriangleThreeBucketsState" -"last_value" -"last_valueArgMax" -"last_valueArgMin" -"last_valueArray" -"last_valueDistinct" -"last_valueForEach" -"last_valueIf" -"last_valueMap" -"last_valueMerge" -"last_valueNull" -"last_valueOrDefault" -"last_valueOrNull" -"last_valueResample" -"last_valueSimpleState" -"last_valueState" -"last_value_respect_nulls" -"last_value_respect_nullsArgMax" -"last_value_respect_nullsArgMin" -"last_value_respect_nullsArray" -"last_value_respect_nullsDistinct" -"last_value_respect_nullsForEach" -"last_value_respect_nullsIf" -"last_value_respect_nullsMap" -"last_value_respect_nullsMerge" -"last_value_respect_nullsNull" -"last_value_respect_nullsOrDefault" -"last_value_respect_nullsOrNull" -"last_value_respect_nullsResample" -"last_value_respect_nullsSimpleState" -"last_value_respect_nullsState" -"lcase" -"lcm" -"leadInFrame" -"leadInFrameArgMax" -"leadInFrameArgMin" -"leadInFrameArray" -"leadInFrameDistinct" -"leadInFrameForEach" -"leadInFrameIf" -"leadInFrameMap" -"leadInFrameMerge" -"leadInFrameNull" -"leadInFrameOrDefault" -"leadInFrameOrNull" -"leadInFrameResample" -"leadInFrameSimpleState" -"leadInFrameState" -"least" -"left" -"leftPad" -"leftPadUTF8" -"leftUTF8" -"lemmatize" -"length" -"lengthUTF8" -"less" -"lessOrEquals" -"levenshteinDistance" -"levenshteinDistanceUTF8" -"lgamma" -"like" -"ln" -"locate" -"log" -"log10" -"log1p" -"log2" -"logTrace" -"lowCardinalityIndices" -"lowCardinalityKeys" -"lower" -"lowerUTF8" -"lpad" -"ltrim" -"lttb" -"lttbArgMax" -"lttbArgMin" -"lttbArray" -"lttbDistinct" -"lttbForEach" -"lttbIf" -"lttbMap" -"lttbMerge" -"lttbNull" -"lttbOrDefault" -"lttbOrNull" -"lttbResample" -"lttbSimpleState" -"lttbState" -"makeDate" -"makeDate32" -"makeDateTime" -"makeDateTime64" -"mannWhitneyUTest" -"mannWhitneyUTestArgMax" -"mannWhitneyUTestArgMin" -"mannWhitneyUTestArray" -"mannWhitneyUTestDistinct" -"mannWhitneyUTestForEach" -"mannWhitneyUTestIf" -"mannWhitneyUTestMap" -"mannWhitneyUTestMerge" -"mannWhitneyUTestNull" -"mannWhitneyUTestOrDefault" -"mannWhitneyUTestOrNull" -"mannWhitneyUTestResample" -"mannWhitneyUTestSimpleState" -"mannWhitneyUTestState" -"map" -"mapAdd" -"mapAll" -"mapApply" -"mapConcat" -"mapContains" -"mapContainsKeyLike" -"mapExists" -"mapExtractKeyLike" -"mapFilter" -"mapFromArrays" -"mapFromString" -"mapKeys" -"mapPartialReverseSort" -"mapPartialSort" -"mapPopulateSeries" -"mapReverseSort" -"mapSort" -"mapSubtract" -"mapUpdate" -"mapValues" -"match" -"materialize" -"max" -"max2" -"maxArgMax" -"maxArgMin" -"maxArray" -"maxDistinct" -"maxForEach" -"maxIf" -"maxIntersections" -"maxIntersectionsArgMax" -"maxIntersectionsArgMin" -"maxIntersectionsArray" -"maxIntersectionsDistinct" -"maxIntersectionsForEach" -"maxIntersectionsIf" -"maxIntersectionsMap" -"maxIntersectionsMerge" -"maxIntersectionsNull" -"maxIntersectionsOrDefault" -"maxIntersectionsOrNull" -"maxIntersectionsPosition" -"maxIntersectionsPositionArgMax" -"maxIntersectionsPositionArgMin" -"maxIntersectionsPositionArray" -"maxIntersectionsPositionDistinct" -"maxIntersectionsPositionForEach" -"maxIntersectionsPositionIf" -"maxIntersectionsPositionMap" -"maxIntersectionsPositionMerge" -"maxIntersectionsPositionNull" -"maxIntersectionsPositionOrDefault" -"maxIntersectionsPositionOrNull" -"maxIntersectionsPositionResample" -"maxIntersectionsPositionSimpleState" -"maxIntersectionsPositionState" -"maxIntersectionsResample" -"maxIntersectionsSimpleState" -"maxIntersectionsState" -"maxMap" -"maxMappedArrays" -"maxMappedArraysArgMax" -"maxMappedArraysArgMin" -"maxMappedArraysArray" -"maxMappedArraysDistinct" -"maxMappedArraysForEach" -"maxMappedArraysIf" -"maxMappedArraysMap" -"maxMappedArraysMerge" -"maxMappedArraysNull" -"maxMappedArraysOrDefault" -"maxMappedArraysOrNull" -"maxMappedArraysResample" -"maxMappedArraysSimpleState" -"maxMappedArraysState" -"maxMerge" -"maxNull" -"maxOrDefault" -"maxOrNull" -"maxResample" -"maxSimpleState" -"maxState" -"meanZTest" -"meanZTestArgMax" -"meanZTestArgMin" -"meanZTestArray" -"meanZTestDistinct" -"meanZTestForEach" -"meanZTestIf" -"meanZTestMap" -"meanZTestMerge" -"meanZTestNull" -"meanZTestOrDefault" -"meanZTestOrNull" -"meanZTestResample" -"meanZTestSimpleState" -"meanZTestState" -"median" -"medianArgMax" -"medianArgMin" -"medianArray" -"medianBFloat16" -"medianBFloat16ArgMax" -"medianBFloat16ArgMin" -"medianBFloat16Array" -"medianBFloat16Distinct" -"medianBFloat16ForEach" -"medianBFloat16If" -"medianBFloat16Map" -"medianBFloat16Merge" -"medianBFloat16Null" -"medianBFloat16OrDefault" -"medianBFloat16OrNull" -"medianBFloat16Resample" -"medianBFloat16SimpleState" -"medianBFloat16State" -"medianBFloat16Weighted" -"medianBFloat16WeightedArgMax" -"medianBFloat16WeightedArgMin" -"medianBFloat16WeightedArray" -"medianBFloat16WeightedDistinct" -"medianBFloat16WeightedForEach" -"medianBFloat16WeightedIf" -"medianBFloat16WeightedMap" -"medianBFloat16WeightedMerge" -"medianBFloat16WeightedNull" -"medianBFloat16WeightedOrDefault" -"medianBFloat16WeightedOrNull" -"medianBFloat16WeightedResample" -"medianBFloat16WeightedSimpleState" -"medianBFloat16WeightedState" -"medianDD" -"medianDDArgMax" -"medianDDArgMin" -"medianDDArray" -"medianDDDistinct" -"medianDDForEach" -"medianDDIf" -"medianDDMap" -"medianDDMerge" -"medianDDNull" -"medianDDOrDefault" -"medianDDOrNull" -"medianDDResample" -"medianDDSimpleState" -"medianDDState" -"medianDeterministic" -"medianDeterministicArgMax" -"medianDeterministicArgMin" -"medianDeterministicArray" -"medianDeterministicDistinct" -"medianDeterministicForEach" -"medianDeterministicIf" -"medianDeterministicMap" -"medianDeterministicMerge" -"medianDeterministicNull" -"medianDeterministicOrDefault" -"medianDeterministicOrNull" -"medianDeterministicResample" -"medianDeterministicSimpleState" -"medianDeterministicState" -"medianDistinct" -"medianExact" -"medianExactArgMax" -"medianExactArgMin" -"medianExactArray" -"medianExactDistinct" -"medianExactForEach" -"medianExactHigh" -"medianExactHighArgMax" -"medianExactHighArgMin" -"medianExactHighArray" -"medianExactHighDistinct" -"medianExactHighForEach" -"medianExactHighIf" -"medianExactHighMap" -"medianExactHighMerge" -"medianExactHighNull" -"medianExactHighOrDefault" -"medianExactHighOrNull" -"medianExactHighResample" -"medianExactHighSimpleState" -"medianExactHighState" -"medianExactIf" -"medianExactLow" -"medianExactLowArgMax" -"medianExactLowArgMin" -"medianExactLowArray" -"medianExactLowDistinct" -"medianExactLowForEach" -"medianExactLowIf" -"medianExactLowMap" -"medianExactLowMerge" -"medianExactLowNull" -"medianExactLowOrDefault" -"medianExactLowOrNull" -"medianExactLowResample" -"medianExactLowSimpleState" -"medianExactLowState" -"medianExactMap" -"medianExactMerge" -"medianExactNull" -"medianExactOrDefault" -"medianExactOrNull" -"medianExactResample" -"medianExactSimpleState" -"medianExactState" -"medianExactWeighted" -"medianExactWeightedArgMax" -"medianExactWeightedArgMin" -"medianExactWeightedArray" -"medianExactWeightedDistinct" -"medianExactWeightedForEach" -"medianExactWeightedIf" -"medianExactWeightedMap" -"medianExactWeightedMerge" -"medianExactWeightedNull" -"medianExactWeightedOrDefault" -"medianExactWeightedOrNull" -"medianExactWeightedResample" -"medianExactWeightedSimpleState" -"medianExactWeightedState" -"medianForEach" -"medianGK" -"medianGKArgMax" -"medianGKArgMin" -"medianGKArray" -"medianGKDistinct" -"medianGKForEach" -"medianGKIf" -"medianGKMap" -"medianGKMerge" -"medianGKNull" -"medianGKOrDefault" -"medianGKOrNull" -"medianGKResample" -"medianGKSimpleState" -"medianGKState" -"medianIf" -"medianInterpolatedWeighted" -"medianInterpolatedWeightedArgMax" -"medianInterpolatedWeightedArgMin" -"medianInterpolatedWeightedArray" -"medianInterpolatedWeightedDistinct" -"medianInterpolatedWeightedForEach" -"medianInterpolatedWeightedIf" -"medianInterpolatedWeightedMap" -"medianInterpolatedWeightedMerge" -"medianInterpolatedWeightedNull" -"medianInterpolatedWeightedOrDefault" -"medianInterpolatedWeightedOrNull" -"medianInterpolatedWeightedResample" -"medianInterpolatedWeightedSimpleState" -"medianInterpolatedWeightedState" -"medianMap" -"medianMerge" -"medianNull" -"medianOrDefault" -"medianOrNull" -"medianResample" -"medianSimpleState" -"medianState" -"medianTDigest" -"medianTDigestArgMax" -"medianTDigestArgMin" -"medianTDigestArray" -"medianTDigestDistinct" -"medianTDigestForEach" -"medianTDigestIf" -"medianTDigestMap" -"medianTDigestMerge" -"medianTDigestNull" -"medianTDigestOrDefault" -"medianTDigestOrNull" -"medianTDigestResample" -"medianTDigestSimpleState" -"medianTDigestState" -"medianTDigestWeighted" -"medianTDigestWeightedArgMax" -"medianTDigestWeightedArgMin" -"medianTDigestWeightedArray" -"medianTDigestWeightedDistinct" -"medianTDigestWeightedForEach" -"medianTDigestWeightedIf" -"medianTDigestWeightedMap" -"medianTDigestWeightedMerge" -"medianTDigestWeightedNull" -"medianTDigestWeightedOrDefault" -"medianTDigestWeightedOrNull" -"medianTDigestWeightedResample" -"medianTDigestWeightedSimpleState" -"medianTDigestWeightedState" -"medianTiming" -"medianTimingArgMax" -"medianTimingArgMin" -"medianTimingArray" -"medianTimingDistinct" -"medianTimingForEach" -"medianTimingIf" -"medianTimingMap" -"medianTimingMerge" -"medianTimingNull" -"medianTimingOrDefault" -"medianTimingOrNull" -"medianTimingResample" -"medianTimingSimpleState" -"medianTimingState" -"medianTimingWeighted" -"medianTimingWeightedArgMax" -"medianTimingWeightedArgMin" -"medianTimingWeightedArray" -"medianTimingWeightedDistinct" -"medianTimingWeightedForEach" -"medianTimingWeightedIf" -"medianTimingWeightedMap" -"medianTimingWeightedMerge" -"medianTimingWeightedNull" -"medianTimingWeightedOrDefault" -"medianTimingWeightedOrNull" -"medianTimingWeightedResample" -"medianTimingWeightedSimpleState" -"medianTimingWeightedState" -"metroHash64" -"mid" -"min" -"min2" -"minArgMax" -"minArgMin" -"minArray" -"minDistinct" -"minForEach" -"minIf" -"minMap" -"minMappedArrays" -"minMappedArraysArgMax" -"minMappedArraysArgMin" -"minMappedArraysArray" -"minMappedArraysDistinct" -"minMappedArraysForEach" -"minMappedArraysIf" -"minMappedArraysMap" -"minMappedArraysMerge" -"minMappedArraysNull" -"minMappedArraysOrDefault" -"minMappedArraysOrNull" -"minMappedArraysResample" -"minMappedArraysSimpleState" -"minMappedArraysState" -"minMerge" -"minNull" -"minOrDefault" -"minOrNull" -"minResample" -"minSampleSizeContinous" -"minSampleSizeContinuous" -"minSampleSizeConversion" -"minSimpleState" -"minState" -"minus" -"mismatches" -"mod" -"modulo" -"moduloLegacy" -"moduloOrZero" -"monthName" -"mortonDecode" -"mortonEncode" -"multiFuzzyMatchAllIndices" -"multiFuzzyMatchAny" -"multiFuzzyMatchAnyIndex" -"multiIf" -"multiMatchAllIndices" -"multiMatchAny" -"multiMatchAnyIndex" -"multiSearchAllPositions" -"multiSearchAllPositionsCaseInsensitive" -"multiSearchAllPositionsCaseInsensitiveUTF8" -"multiSearchAllPositionsUTF8" -"multiSearchAny" -"multiSearchAnyCaseInsensitive" -"multiSearchAnyCaseInsensitiveUTF8" -"multiSearchAnyUTF8" -"multiSearchFirstIndex" -"multiSearchFirstIndexCaseInsensitive" -"multiSearchFirstIndexCaseInsensitiveUTF8" -"multiSearchFirstIndexUTF8" -"multiSearchFirstPosition" -"multiSearchFirstPositionCaseInsensitive" -"multiSearchFirstPositionCaseInsensitiveUTF8" -"multiSearchFirstPositionUTF8" -"multiply" -"multiplyDecimal" -"murmurHash2_32" -"murmurHash2_64" -"murmurHash3_128" -"murmurHash3_32" -"murmurHash3_64" -"negate" -"neighbor" -"nested" -"netloc" -"ngramDistance" -"ngramDistanceCaseInsensitive" -"ngramDistanceCaseInsensitiveUTF8" -"ngramDistanceUTF8" -"ngramMinHash" -"ngramMinHashArg" -"ngramMinHashArgCaseInsensitive" -"ngramMinHashArgCaseInsensitiveUTF8" -"ngramMinHashArgUTF8" -"ngramMinHashCaseInsensitive" -"ngramMinHashCaseInsensitiveUTF8" -"ngramMinHashUTF8" -"ngramSearch" -"ngramSearchCaseInsensitive" -"ngramSearchCaseInsensitiveUTF8" -"ngramSearchUTF8" -"ngramSimHash" -"ngramSimHashCaseInsensitive" -"ngramSimHashCaseInsensitiveUTF8" -"ngramSimHashUTF8" -"ngrams" -"nonNegativeDerivative" -"nonNegativeDerivativeArgMax" -"nonNegativeDerivativeArgMin" -"nonNegativeDerivativeArray" -"nonNegativeDerivativeDistinct" -"nonNegativeDerivativeForEach" -"nonNegativeDerivativeIf" -"nonNegativeDerivativeMap" -"nonNegativeDerivativeMerge" -"nonNegativeDerivativeNull" -"nonNegativeDerivativeOrDefault" -"nonNegativeDerivativeOrNull" -"nonNegativeDerivativeResample" -"nonNegativeDerivativeSimpleState" -"nonNegativeDerivativeState" -"normL1" -"normL2" -"normL2Squared" -"normLinf" -"normLp" -"normalizeL1" -"normalizeL2" -"normalizeLinf" -"normalizeLp" -"normalizeQuery" -"normalizeQueryKeepNames" -"normalizeUTF8NFC" -"normalizeUTF8NFD" -"normalizeUTF8NFKC" -"normalizeUTF8NFKD" -"normalizedQueryHash" -"normalizedQueryHashKeepNames" -"not" -"notEmpty" -"notEquals" -"notILike" -"notIn" -"notInIgnoreSet" -"notLike" -"notNullIn" -"notNullInIgnoreSet" -"nothing" -"nothingArgMax" -"nothingArgMin" -"nothingArray" -"nothingDistinct" -"nothingForEach" -"nothingIf" -"nothingMap" -"nothingMerge" -"nothingNull" -"nothingNull" -"nothingNullArgMax" -"nothingNullArgMin" -"nothingNullArray" -"nothingNullDistinct" -"nothingNullForEach" -"nothingNullIf" -"nothingNullMap" -"nothingNullMerge" -"nothingNullNull" -"nothingNullOrDefault" -"nothingNullOrNull" -"nothingNullResample" -"nothingNullSimpleState" -"nothingNullState" -"nothingOrDefault" -"nothingOrNull" -"nothingResample" -"nothingSimpleState" -"nothingState" -"nothingUInt64" -"nothingUInt64ArgMax" -"nothingUInt64ArgMin" -"nothingUInt64Array" -"nothingUInt64Distinct" -"nothingUInt64ForEach" -"nothingUInt64If" -"nothingUInt64Map" -"nothingUInt64Merge" -"nothingUInt64Null" -"nothingUInt64OrDefault" -"nothingUInt64OrNull" -"nothingUInt64Resample" -"nothingUInt64SimpleState" -"nothingUInt64State" -"now" -"now64" -"nowInBlock" -"nth_value" -"nth_valueArgMax" -"nth_valueArgMin" -"nth_valueArray" -"nth_valueDistinct" -"nth_valueForEach" -"nth_valueIf" -"nth_valueMap" -"nth_valueMerge" -"nth_valueNull" -"nth_valueOrDefault" -"nth_valueOrNull" -"nth_valueResample" -"nth_valueSimpleState" -"nth_valueState" -"ntile" -"ntileArgMax" -"ntileArgMin" -"ntileArray" -"ntileDistinct" -"ntileForEach" -"ntileIf" -"ntileMap" -"ntileMerge" -"ntileNull" -"ntileOrDefault" -"ntileOrNull" -"ntileResample" -"ntileSimpleState" -"ntileState" -"nullIf" -"nullIn" -"nullInIgnoreSet" -"or" -"parseDateTime" -"parseDateTime32BestEffort" -"parseDateTime32BestEffortOrNull" -"parseDateTime32BestEffortOrZero" -"parseDateTime64BestEffort" -"parseDateTime64BestEffortOrNull" -"parseDateTime64BestEffortOrZero" -"parseDateTime64BestEffortUS" -"parseDateTime64BestEffortUSOrNull" -"parseDateTime64BestEffortUSOrZero" -"parseDateTimeBestEffort" -"parseDateTimeBestEffortOrNull" -"parseDateTimeBestEffortOrZero" -"parseDateTimeBestEffortUS" -"parseDateTimeBestEffortUSOrNull" -"parseDateTimeBestEffortUSOrZero" -"parseDateTimeInJodaSyntax" -"parseDateTimeInJodaSyntaxOrNull" -"parseDateTimeInJodaSyntaxOrZero" -"parseDateTimeOrNull" -"parseDateTimeOrZero" -"parseReadableSize" -"parseReadableSizeOrNull" -"parseReadableSizeOrZero" -"parseTimeDelta" -"partitionID" -"partitionId" -"path" -"pathFull" -"percentRank" -"percentRankArgMax" -"percentRankArgMin" -"percentRankArray" -"percentRankDistinct" -"percentRankForEach" -"percentRankIf" -"percentRankMap" -"percentRankMerge" -"percentRankNull" -"percentRankOrDefault" -"percentRankOrNull" -"percentRankResample" -"percentRankSimpleState" -"percentRankState" -"percent_rank" -"percent_rankArgMax" -"percent_rankArgMin" -"percent_rankArray" -"percent_rankDistinct" -"percent_rankForEach" -"percent_rankIf" -"percent_rankMap" -"percent_rankMerge" -"percent_rankNull" -"percent_rankOrDefault" -"percent_rankOrNull" -"percent_rankResample" -"percent_rankSimpleState" -"percent_rankState" -"pi" -"plus" -"pmod" -"pointInEllipses" -"pointInPolygon" -"polygonAreaCartesian" -"polygonAreaSpherical" -"polygonConvexHullCartesian" -"polygonPerimeterCartesian" -"polygonPerimeterSpherical" -"polygonsDistanceCartesian" -"polygonsDistanceSpherical" -"polygonsEqualsCartesian" -"polygonsIntersectionCartesian" -"polygonsIntersectionSpherical" -"polygonsSymDifferenceCartesian" -"polygonsSymDifferenceSpherical" -"polygonsUnionCartesian" -"polygonsUnionSpherical" -"polygonsWithinCartesian" -"polygonsWithinSpherical" -"port" -"portRFC" -"position" -"positionCaseInsensitive" -"positionCaseInsensitiveUTF8" -"positionUTF8" -"positiveModulo" -"positive_modulo" -"pow" -"power" -"printf" -"proportionsZTest" -"protocol" -"punycodeDecode" -"punycodeEncode" -"quantile" -"quantileArgMax" -"quantileArgMin" -"quantileArray" -"quantileBFloat16" -"quantileBFloat16ArgMax" -"quantileBFloat16ArgMin" -"quantileBFloat16Array" -"quantileBFloat16Distinct" -"quantileBFloat16ForEach" -"quantileBFloat16If" -"quantileBFloat16Map" -"quantileBFloat16Merge" -"quantileBFloat16Null" -"quantileBFloat16OrDefault" -"quantileBFloat16OrNull" -"quantileBFloat16Resample" -"quantileBFloat16SimpleState" -"quantileBFloat16State" -"quantileBFloat16Weighted" -"quantileBFloat16WeightedArgMax" -"quantileBFloat16WeightedArgMin" -"quantileBFloat16WeightedArray" -"quantileBFloat16WeightedDistinct" -"quantileBFloat16WeightedForEach" -"quantileBFloat16WeightedIf" -"quantileBFloat16WeightedMap" -"quantileBFloat16WeightedMerge" -"quantileBFloat16WeightedNull" -"quantileBFloat16WeightedOrDefault" -"quantileBFloat16WeightedOrNull" -"quantileBFloat16WeightedResample" -"quantileBFloat16WeightedSimpleState" -"quantileBFloat16WeightedState" -"quantileDD" -"quantileDDArgMax" -"quantileDDArgMin" -"quantileDDArray" -"quantileDDDistinct" -"quantileDDForEach" -"quantileDDIf" -"quantileDDMap" -"quantileDDMerge" -"quantileDDNull" -"quantileDDOrDefault" -"quantileDDOrNull" -"quantileDDResample" -"quantileDDSimpleState" -"quantileDDState" -"quantileDeterministic" -"quantileDeterministicArgMax" -"quantileDeterministicArgMin" -"quantileDeterministicArray" -"quantileDeterministicDistinct" -"quantileDeterministicForEach" -"quantileDeterministicIf" -"quantileDeterministicMap" -"quantileDeterministicMerge" -"quantileDeterministicNull" -"quantileDeterministicOrDefault" -"quantileDeterministicOrNull" -"quantileDeterministicResample" -"quantileDeterministicSimpleState" -"quantileDeterministicState" -"quantileDistinct" -"quantileExact" -"quantileExactArgMax" -"quantileExactArgMin" -"quantileExactArray" -"quantileExactDistinct" -"quantileExactExclusive" -"quantileExactExclusiveArgMax" -"quantileExactExclusiveArgMin" -"quantileExactExclusiveArray" -"quantileExactExclusiveDistinct" -"quantileExactExclusiveForEach" -"quantileExactExclusiveIf" -"quantileExactExclusiveMap" -"quantileExactExclusiveMerge" -"quantileExactExclusiveNull" -"quantileExactExclusiveOrDefault" -"quantileExactExclusiveOrNull" -"quantileExactExclusiveResample" -"quantileExactExclusiveSimpleState" -"quantileExactExclusiveState" -"quantileExactForEach" -"quantileExactHigh" -"quantileExactHighArgMax" -"quantileExactHighArgMin" -"quantileExactHighArray" -"quantileExactHighDistinct" -"quantileExactHighForEach" -"quantileExactHighIf" -"quantileExactHighMap" -"quantileExactHighMerge" -"quantileExactHighNull" -"quantileExactHighOrDefault" -"quantileExactHighOrNull" -"quantileExactHighResample" -"quantileExactHighSimpleState" -"quantileExactHighState" -"quantileExactIf" -"quantileExactInclusive" -"quantileExactInclusiveArgMax" -"quantileExactInclusiveArgMin" -"quantileExactInclusiveArray" -"quantileExactInclusiveDistinct" -"quantileExactInclusiveForEach" -"quantileExactInclusiveIf" -"quantileExactInclusiveMap" -"quantileExactInclusiveMerge" -"quantileExactInclusiveNull" -"quantileExactInclusiveOrDefault" -"quantileExactInclusiveOrNull" -"quantileExactInclusiveResample" -"quantileExactInclusiveSimpleState" -"quantileExactInclusiveState" -"quantileExactLow" -"quantileExactLowArgMax" -"quantileExactLowArgMin" -"quantileExactLowArray" -"quantileExactLowDistinct" -"quantileExactLowForEach" -"quantileExactLowIf" -"quantileExactLowMap" -"quantileExactLowMerge" -"quantileExactLowNull" -"quantileExactLowOrDefault" -"quantileExactLowOrNull" -"quantileExactLowResample" -"quantileExactLowSimpleState" -"quantileExactLowState" -"quantileExactMap" -"quantileExactMerge" -"quantileExactNull" -"quantileExactOrDefault" -"quantileExactOrNull" -"quantileExactResample" -"quantileExactSimpleState" -"quantileExactState" -"quantileExactWeighted" -"quantileExactWeightedArgMax" -"quantileExactWeightedArgMin" -"quantileExactWeightedArray" -"quantileExactWeightedDistinct" -"quantileExactWeightedForEach" -"quantileExactWeightedIf" -"quantileExactWeightedMap" -"quantileExactWeightedMerge" -"quantileExactWeightedNull" -"quantileExactWeightedOrDefault" -"quantileExactWeightedOrNull" -"quantileExactWeightedResample" -"quantileExactWeightedSimpleState" -"quantileExactWeightedState" -"quantileForEach" -"quantileGK" -"quantileGKArgMax" -"quantileGKArgMin" -"quantileGKArray" -"quantileGKDistinct" -"quantileGKForEach" -"quantileGKIf" -"quantileGKMap" -"quantileGKMerge" -"quantileGKNull" -"quantileGKOrDefault" -"quantileGKOrNull" -"quantileGKResample" -"quantileGKSimpleState" -"quantileGKState" -"quantileIf" -"quantileInterpolatedWeighted" -"quantileInterpolatedWeightedArgMax" -"quantileInterpolatedWeightedArgMin" -"quantileInterpolatedWeightedArray" -"quantileInterpolatedWeightedDistinct" -"quantileInterpolatedWeightedForEach" -"quantileInterpolatedWeightedIf" -"quantileInterpolatedWeightedMap" -"quantileInterpolatedWeightedMerge" -"quantileInterpolatedWeightedNull" -"quantileInterpolatedWeightedOrDefault" -"quantileInterpolatedWeightedOrNull" -"quantileInterpolatedWeightedResample" -"quantileInterpolatedWeightedSimpleState" -"quantileInterpolatedWeightedState" -"quantileMap" -"quantileMerge" -"quantileNull" -"quantileOrDefault" -"quantileOrNull" -"quantileResample" -"quantileSimpleState" -"quantileState" -"quantileTDigest" -"quantileTDigestArgMax" -"quantileTDigestArgMin" -"quantileTDigestArray" -"quantileTDigestDistinct" -"quantileTDigestForEach" -"quantileTDigestIf" -"quantileTDigestMap" -"quantileTDigestMerge" -"quantileTDigestNull" -"quantileTDigestOrDefault" -"quantileTDigestOrNull" -"quantileTDigestResample" -"quantileTDigestSimpleState" -"quantileTDigestState" -"quantileTDigestWeighted" -"quantileTDigestWeightedArgMax" -"quantileTDigestWeightedArgMin" -"quantileTDigestWeightedArray" -"quantileTDigestWeightedDistinct" -"quantileTDigestWeightedForEach" -"quantileTDigestWeightedIf" -"quantileTDigestWeightedMap" -"quantileTDigestWeightedMerge" -"quantileTDigestWeightedNull" -"quantileTDigestWeightedOrDefault" -"quantileTDigestWeightedOrNull" -"quantileTDigestWeightedResample" -"quantileTDigestWeightedSimpleState" -"quantileTDigestWeightedState" -"quantileTiming" -"quantileTimingArgMax" -"quantileTimingArgMin" -"quantileTimingArray" -"quantileTimingDistinct" -"quantileTimingForEach" -"quantileTimingIf" -"quantileTimingMap" -"quantileTimingMerge" -"quantileTimingNull" -"quantileTimingOrDefault" -"quantileTimingOrNull" -"quantileTimingResample" -"quantileTimingSimpleState" -"quantileTimingState" -"quantileTimingWeighted" -"quantileTimingWeightedArgMax" -"quantileTimingWeightedArgMin" -"quantileTimingWeightedArray" -"quantileTimingWeightedDistinct" -"quantileTimingWeightedForEach" -"quantileTimingWeightedIf" -"quantileTimingWeightedMap" -"quantileTimingWeightedMerge" -"quantileTimingWeightedNull" -"quantileTimingWeightedOrDefault" -"quantileTimingWeightedOrNull" -"quantileTimingWeightedResample" -"quantileTimingWeightedSimpleState" -"quantileTimingWeightedState" -"quantiles" -"quantilesArgMax" -"quantilesArgMin" -"quantilesArray" -"quantilesBFloat16" -"quantilesBFloat16ArgMax" -"quantilesBFloat16ArgMin" -"quantilesBFloat16Array" -"quantilesBFloat16Distinct" -"quantilesBFloat16ForEach" -"quantilesBFloat16If" -"quantilesBFloat16Map" -"quantilesBFloat16Merge" -"quantilesBFloat16Null" -"quantilesBFloat16OrDefault" -"quantilesBFloat16OrNull" -"quantilesBFloat16Resample" -"quantilesBFloat16SimpleState" -"quantilesBFloat16State" -"quantilesBFloat16Weighted" -"quantilesBFloat16WeightedArgMax" -"quantilesBFloat16WeightedArgMin" -"quantilesBFloat16WeightedArray" -"quantilesBFloat16WeightedDistinct" -"quantilesBFloat16WeightedForEach" -"quantilesBFloat16WeightedIf" -"quantilesBFloat16WeightedMap" -"quantilesBFloat16WeightedMerge" -"quantilesBFloat16WeightedNull" -"quantilesBFloat16WeightedOrDefault" -"quantilesBFloat16WeightedOrNull" -"quantilesBFloat16WeightedResample" -"quantilesBFloat16WeightedSimpleState" -"quantilesBFloat16WeightedState" -"quantilesDD" -"quantilesDDArgMax" -"quantilesDDArgMin" -"quantilesDDArray" -"quantilesDDDistinct" -"quantilesDDForEach" -"quantilesDDIf" -"quantilesDDMap" -"quantilesDDMerge" -"quantilesDDNull" -"quantilesDDOrDefault" -"quantilesDDOrNull" -"quantilesDDResample" -"quantilesDDSimpleState" -"quantilesDDState" -"quantilesDeterministic" -"quantilesDeterministicArgMax" -"quantilesDeterministicArgMin" -"quantilesDeterministicArray" -"quantilesDeterministicDistinct" -"quantilesDeterministicForEach" -"quantilesDeterministicIf" -"quantilesDeterministicMap" -"quantilesDeterministicMerge" -"quantilesDeterministicNull" -"quantilesDeterministicOrDefault" -"quantilesDeterministicOrNull" -"quantilesDeterministicResample" -"quantilesDeterministicSimpleState" -"quantilesDeterministicState" -"quantilesDistinct" -"quantilesExact" -"quantilesExactArgMax" -"quantilesExactArgMin" -"quantilesExactArray" -"quantilesExactDistinct" -"quantilesExactExclusive" -"quantilesExactExclusiveArgMax" -"quantilesExactExclusiveArgMin" -"quantilesExactExclusiveArray" -"quantilesExactExclusiveDistinct" -"quantilesExactExclusiveForEach" -"quantilesExactExclusiveIf" -"quantilesExactExclusiveMap" -"quantilesExactExclusiveMerge" -"quantilesExactExclusiveNull" -"quantilesExactExclusiveOrDefault" -"quantilesExactExclusiveOrNull" -"quantilesExactExclusiveResample" -"quantilesExactExclusiveSimpleState" -"quantilesExactExclusiveState" -"quantilesExactForEach" -"quantilesExactHigh" -"quantilesExactHighArgMax" -"quantilesExactHighArgMin" -"quantilesExactHighArray" -"quantilesExactHighDistinct" -"quantilesExactHighForEach" -"quantilesExactHighIf" -"quantilesExactHighMap" -"quantilesExactHighMerge" -"quantilesExactHighNull" -"quantilesExactHighOrDefault" -"quantilesExactHighOrNull" -"quantilesExactHighResample" -"quantilesExactHighSimpleState" -"quantilesExactHighState" -"quantilesExactIf" -"quantilesExactInclusive" -"quantilesExactInclusiveArgMax" -"quantilesExactInclusiveArgMin" -"quantilesExactInclusiveArray" -"quantilesExactInclusiveDistinct" -"quantilesExactInclusiveForEach" -"quantilesExactInclusiveIf" -"quantilesExactInclusiveMap" -"quantilesExactInclusiveMerge" -"quantilesExactInclusiveNull" -"quantilesExactInclusiveOrDefault" -"quantilesExactInclusiveOrNull" -"quantilesExactInclusiveResample" -"quantilesExactInclusiveSimpleState" -"quantilesExactInclusiveState" -"quantilesExactLow" -"quantilesExactLowArgMax" -"quantilesExactLowArgMin" -"quantilesExactLowArray" -"quantilesExactLowDistinct" -"quantilesExactLowForEach" -"quantilesExactLowIf" -"quantilesExactLowMap" -"quantilesExactLowMerge" -"quantilesExactLowNull" -"quantilesExactLowOrDefault" -"quantilesExactLowOrNull" -"quantilesExactLowResample" -"quantilesExactLowSimpleState" -"quantilesExactLowState" -"quantilesExactMap" -"quantilesExactMerge" -"quantilesExactNull" -"quantilesExactOrDefault" -"quantilesExactOrNull" -"quantilesExactResample" -"quantilesExactSimpleState" -"quantilesExactState" -"quantilesExactWeighted" -"quantilesExactWeightedArgMax" -"quantilesExactWeightedArgMin" -"quantilesExactWeightedArray" -"quantilesExactWeightedDistinct" -"quantilesExactWeightedForEach" -"quantilesExactWeightedIf" -"quantilesExactWeightedMap" -"quantilesExactWeightedMerge" -"quantilesExactWeightedNull" -"quantilesExactWeightedOrDefault" -"quantilesExactWeightedOrNull" -"quantilesExactWeightedResample" -"quantilesExactWeightedSimpleState" -"quantilesExactWeightedState" -"quantilesForEach" -"quantilesGK" -"quantilesGKArgMax" -"quantilesGKArgMin" -"quantilesGKArray" -"quantilesGKDistinct" -"quantilesGKForEach" -"quantilesGKIf" -"quantilesGKMap" -"quantilesGKMerge" -"quantilesGKNull" -"quantilesGKOrDefault" -"quantilesGKOrNull" -"quantilesGKResample" -"quantilesGKSimpleState" -"quantilesGKState" -"quantilesIf" -"quantilesInterpolatedWeighted" -"quantilesInterpolatedWeightedArgMax" -"quantilesInterpolatedWeightedArgMin" -"quantilesInterpolatedWeightedArray" -"quantilesInterpolatedWeightedDistinct" -"quantilesInterpolatedWeightedForEach" -"quantilesInterpolatedWeightedIf" -"quantilesInterpolatedWeightedMap" -"quantilesInterpolatedWeightedMerge" -"quantilesInterpolatedWeightedNull" -"quantilesInterpolatedWeightedOrDefault" -"quantilesInterpolatedWeightedOrNull" -"quantilesInterpolatedWeightedResample" -"quantilesInterpolatedWeightedSimpleState" -"quantilesInterpolatedWeightedState" -"quantilesMap" -"quantilesMerge" -"quantilesNull" -"quantilesOrDefault" -"quantilesOrNull" -"quantilesResample" -"quantilesSimpleState" -"quantilesState" -"quantilesTDigest" -"quantilesTDigestArgMax" -"quantilesTDigestArgMin" -"quantilesTDigestArray" -"quantilesTDigestDistinct" -"quantilesTDigestForEach" -"quantilesTDigestIf" -"quantilesTDigestMap" -"quantilesTDigestMerge" -"quantilesTDigestNull" -"quantilesTDigestOrDefault" -"quantilesTDigestOrNull" -"quantilesTDigestResample" -"quantilesTDigestSimpleState" -"quantilesTDigestState" -"quantilesTDigestWeighted" -"quantilesTDigestWeightedArgMax" -"quantilesTDigestWeightedArgMin" -"quantilesTDigestWeightedArray" -"quantilesTDigestWeightedDistinct" -"quantilesTDigestWeightedForEach" -"quantilesTDigestWeightedIf" -"quantilesTDigestWeightedMap" -"quantilesTDigestWeightedMerge" -"quantilesTDigestWeightedNull" -"quantilesTDigestWeightedOrDefault" -"quantilesTDigestWeightedOrNull" -"quantilesTDigestWeightedResample" -"quantilesTDigestWeightedSimpleState" -"quantilesTDigestWeightedState" -"quantilesTiming" -"quantilesTimingArgMax" -"quantilesTimingArgMin" -"quantilesTimingArray" -"quantilesTimingDistinct" -"quantilesTimingForEach" -"quantilesTimingIf" -"quantilesTimingMap" -"quantilesTimingMerge" -"quantilesTimingNull" -"quantilesTimingOrDefault" -"quantilesTimingOrNull" -"quantilesTimingResample" -"quantilesTimingSimpleState" -"quantilesTimingState" -"quantilesTimingWeighted" -"quantilesTimingWeightedArgMax" -"quantilesTimingWeightedArgMin" -"quantilesTimingWeightedArray" -"quantilesTimingWeightedDistinct" -"quantilesTimingWeightedForEach" -"quantilesTimingWeightedIf" -"quantilesTimingWeightedMap" -"quantilesTimingWeightedMerge" -"quantilesTimingWeightedNull" -"quantilesTimingWeightedOrDefault" -"quantilesTimingWeightedOrNull" -"quantilesTimingWeightedResample" -"quantilesTimingWeightedSimpleState" -"quantilesTimingWeightedState" -"queryID" -"queryString" -"queryStringAndFragment" -"query_id" -"radians" -"rand" -"rand32" -"rand64" -"randBernoulli" -"randBinomial" -"randCanonical" -"randChiSquared" -"randConstant" -"randExponential" -"randFisherF" -"randLogNormal" -"randNegativeBinomial" -"randNormal" -"randPoisson" -"randStudentT" -"randUniform" -"randomFixedString" -"randomPrintableASCII" -"randomString" -"randomStringUTF8" -"range" -"rank" -"rankArgMax" -"rankArgMin" -"rankArray" -"rankCorr" -"rankCorrArgMax" -"rankCorrArgMin" -"rankCorrArray" -"rankCorrDistinct" -"rankCorrForEach" -"rankCorrIf" -"rankCorrMap" -"rankCorrMerge" -"rankCorrNull" -"rankCorrOrDefault" -"rankCorrOrNull" -"rankCorrResample" -"rankCorrSimpleState" -"rankCorrState" -"rankDistinct" -"rankForEach" -"rankIf" -"rankMap" -"rankMerge" -"rankNull" -"rankOrDefault" -"rankOrNull" -"rankResample" -"rankSimpleState" -"rankState" -"readWKTLineString" -"readWKTMultiLineString" -"readWKTMultiPolygon" -"readWKTPoint" -"readWKTPolygon" -"readWKTRing" -"regexpExtract" -"regexpQuoteMeta" -"regionHierarchy" -"regionIn" -"regionToArea" -"regionToCity" -"regionToContinent" -"regionToCountry" -"regionToDistrict" -"regionToName" -"regionToPopulation" -"regionToTopContinent" -"reinterpret" -"reinterpretAsDate" -"reinterpretAsDateTime" -"reinterpretAsFixedString" -"reinterpretAsFloat32" -"reinterpretAsFloat64" -"reinterpretAsInt128" -"reinterpretAsInt16" -"reinterpretAsInt256" -"reinterpretAsInt32" -"reinterpretAsInt64" -"reinterpretAsInt8" -"reinterpretAsString" -"reinterpretAsUInt128" -"reinterpretAsUInt16" -"reinterpretAsUInt256" -"reinterpretAsUInt32" -"reinterpretAsUInt64" -"reinterpretAsUInt8" -"reinterpretAsUUID" -"repeat" -"replace" -"replaceAll" -"replaceOne" -"replaceRegexpAll" -"replaceRegexpOne" -"replicate" -"retention" -"retentionArgMax" -"retentionArgMin" -"retentionArray" -"retentionDistinct" -"retentionForEach" -"retentionIf" -"retentionMap" -"retentionMerge" -"retentionNull" -"retentionOrDefault" -"retentionOrNull" -"retentionResample" -"retentionSimpleState" -"retentionState" -"reverse" -"reverseUTF8" -"revision" -"right" -"rightPad" -"rightPadUTF8" -"rightUTF8" -"round" -"roundAge" -"roundBankers" -"roundDown" -"roundDuration" -"roundToExp2" -"rowNumberInAllBlocks" -"rowNumberInBlock" -"row_number" -"row_numberArgMax" -"row_numberArgMin" -"row_numberArray" -"row_numberDistinct" -"row_numberForEach" -"row_numberIf" -"row_numberMap" -"row_numberMerge" -"row_numberNull" -"row_numberOrDefault" -"row_numberOrNull" -"row_numberResample" -"row_numberSimpleState" -"row_numberState" -"rpad" -"rtrim" -"runningAccumulate" -"runningConcurrency" -"runningDifference" -"runningDifferenceStartingWithFirstValue" -"s2CapContains" -"s2CapUnion" -"s2CellsIntersect" -"s2GetNeighbors" -"s2RectAdd" -"s2RectContains" -"s2RectIntersection" -"s2RectUnion" -"s2ToGeo" -"scalarProduct" -"sequenceCount" -"sequenceCountArgMax" -"sequenceCountArgMin" -"sequenceCountArray" -"sequenceCountDistinct" -"sequenceCountForEach" -"sequenceCountIf" -"sequenceCountMap" -"sequenceCountMerge" -"sequenceCountNull" -"sequenceCountOrDefault" -"sequenceCountOrNull" -"sequenceCountResample" -"sequenceCountSimpleState" -"sequenceCountState" -"sequenceMatch" -"sequenceMatchArgMax" -"sequenceMatchArgMin" -"sequenceMatchArray" -"sequenceMatchDistinct" -"sequenceMatchForEach" -"sequenceMatchIf" -"sequenceMatchMap" -"sequenceMatchMerge" -"sequenceMatchNull" -"sequenceMatchOrDefault" -"sequenceMatchOrNull" -"sequenceMatchResample" -"sequenceMatchSimpleState" -"sequenceMatchState" -"sequenceNextNode" -"sequenceNextNodeArgMax" -"sequenceNextNodeArgMin" -"sequenceNextNodeArray" -"sequenceNextNodeDistinct" -"sequenceNextNodeForEach" -"sequenceNextNodeIf" -"sequenceNextNodeMap" -"sequenceNextNodeMerge" -"sequenceNextNodeNull" -"sequenceNextNodeOrDefault" -"sequenceNextNodeOrNull" -"sequenceNextNodeResample" -"sequenceNextNodeSimpleState" -"sequenceNextNodeState" -"seriesDecomposeSTL" -"seriesOutliersDetectTukey" -"seriesPeriodDetectFFT" -"serverTimeZone" -"serverTimezone" -"serverUUID" -"shardCount" -"shardNum" -"showCertificate" -"sigmoid" -"sign" -"simpleJSONExtractBool" -"simpleJSONExtractFloat" -"simpleJSONExtractInt" -"simpleJSONExtractRaw" -"simpleJSONExtractString" -"simpleJSONExtractUInt" -"simpleJSONHas" -"simpleLinearRegression" -"simpleLinearRegressionArgMax" -"simpleLinearRegressionArgMin" -"simpleLinearRegressionArray" -"simpleLinearRegressionDistinct" -"simpleLinearRegressionForEach" -"simpleLinearRegressionIf" -"simpleLinearRegressionMap" -"simpleLinearRegressionMerge" -"simpleLinearRegressionNull" -"simpleLinearRegressionOrDefault" -"simpleLinearRegressionOrNull" -"simpleLinearRegressionResample" -"simpleLinearRegressionSimpleState" -"simpleLinearRegressionState" -"sin" -"singleValueOrNull" -"singleValueOrNullArgMax" -"singleValueOrNullArgMin" -"singleValueOrNullArray" -"singleValueOrNullDistinct" -"singleValueOrNullForEach" -"singleValueOrNullIf" -"singleValueOrNullMap" -"singleValueOrNullMerge" -"singleValueOrNullNull" -"singleValueOrNullOrDefault" -"singleValueOrNullOrNull" -"singleValueOrNullResample" -"singleValueOrNullSimpleState" -"singleValueOrNullState" -"sinh" -"sipHash128" -"sipHash128Keyed" -"sipHash128Reference" -"sipHash128ReferenceKeyed" -"sipHash64" -"sipHash64Keyed" -"skewPop" -"skewPopArgMax" -"skewPopArgMin" -"skewPopArray" -"skewPopDistinct" -"skewPopForEach" -"skewPopIf" -"skewPopMap" -"skewPopMerge" -"skewPopNull" -"skewPopOrDefault" -"skewPopOrNull" -"skewPopResample" -"skewPopSimpleState" -"skewPopState" -"skewSamp" -"skewSampArgMax" -"skewSampArgMin" -"skewSampArray" -"skewSampDistinct" -"skewSampForEach" -"skewSampIf" -"skewSampMap" -"skewSampMerge" -"skewSampNull" -"skewSampOrDefault" -"skewSampOrNull" -"skewSampResample" -"skewSampSimpleState" -"skewSampState" -"sleep" -"sleepEachRow" -"snowflakeIDToDateTime" -"snowflakeIDToDateTime64" -"snowflakeToDateTime" -"snowflakeToDateTime64" -"soundex" -"space" -"sparkBar" -"sparkBarArgMax" -"sparkBarArgMin" -"sparkBarArray" -"sparkBarDistinct" -"sparkBarForEach" -"sparkBarIf" -"sparkBarMap" -"sparkBarMerge" -"sparkBarNull" -"sparkBarOrDefault" -"sparkBarOrNull" -"sparkBarResample" -"sparkBarSimpleState" -"sparkBarState" -"sparkbar" -"sparkbarArgMax" -"sparkbarArgMin" -"sparkbarArray" -"sparkbarDistinct" -"sparkbarForEach" -"sparkbarIf" -"sparkbarMap" -"sparkbarMerge" -"sparkbarNull" -"sparkbarOrDefault" -"sparkbarOrNull" -"sparkbarResample" -"sparkbarSimpleState" -"sparkbarState" -"splitByAlpha" -"splitByChar" -"splitByNonAlpha" -"splitByRegexp" -"splitByString" -"splitByWhitespace" -"sqid" -"sqidDecode" -"sqidEncode" -"sqrt" -"startsWith" -"startsWithUTF8" -"stddevPop" -"stddevPopArgMax" -"stddevPopArgMin" -"stddevPopArray" -"stddevPopDistinct" -"stddevPopForEach" -"stddevPopIf" -"stddevPopMap" -"stddevPopMerge" -"stddevPopNull" -"stddevPopOrDefault" -"stddevPopOrNull" -"stddevPopResample" -"stddevPopSimpleState" -"stddevPopStable" -"stddevPopStableArgMax" -"stddevPopStableArgMin" -"stddevPopStableArray" -"stddevPopStableDistinct" -"stddevPopStableForEach" -"stddevPopStableIf" -"stddevPopStableMap" -"stddevPopStableMerge" -"stddevPopStableNull" -"stddevPopStableOrDefault" -"stddevPopStableOrNull" -"stddevPopStableResample" -"stddevPopStableSimpleState" -"stddevPopStableState" -"stddevPopState" -"stddevSamp" -"stddevSampArgMax" -"stddevSampArgMin" -"stddevSampArray" -"stddevSampDistinct" -"stddevSampForEach" -"stddevSampIf" -"stddevSampMap" -"stddevSampMerge" -"stddevSampNull" -"stddevSampOrDefault" -"stddevSampOrNull" -"stddevSampResample" -"stddevSampSimpleState" -"stddevSampStable" -"stddevSampStableArgMax" -"stddevSampStableArgMin" -"stddevSampStableArray" -"stddevSampStableDistinct" -"stddevSampStableForEach" -"stddevSampStableIf" -"stddevSampStableMap" -"stddevSampStableMerge" -"stddevSampStableNull" -"stddevSampStableOrDefault" -"stddevSampStableOrNull" -"stddevSampStableResample" -"stddevSampStableSimpleState" -"stddevSampStableState" -"stddevSampState" -"stem" -"stochasticLinearRegression" -"stochasticLinearRegressionArgMax" -"stochasticLinearRegressionArgMin" -"stochasticLinearRegressionArray" -"stochasticLinearRegressionDistinct" -"stochasticLinearRegressionForEach" -"stochasticLinearRegressionIf" -"stochasticLinearRegressionMap" -"stochasticLinearRegressionMerge" -"stochasticLinearRegressionNull" -"stochasticLinearRegressionOrDefault" -"stochasticLinearRegressionOrNull" -"stochasticLinearRegressionResample" -"stochasticLinearRegressionSimpleState" -"stochasticLinearRegressionState" -"stochasticLogisticRegression" -"stochasticLogisticRegressionArgMax" -"stochasticLogisticRegressionArgMin" -"stochasticLogisticRegressionArray" -"stochasticLogisticRegressionDistinct" -"stochasticLogisticRegressionForEach" -"stochasticLogisticRegressionIf" -"stochasticLogisticRegressionMap" -"stochasticLogisticRegressionMerge" -"stochasticLogisticRegressionNull" -"stochasticLogisticRegressionOrDefault" -"stochasticLogisticRegressionOrNull" -"stochasticLogisticRegressionResample" -"stochasticLogisticRegressionSimpleState" -"stochasticLogisticRegressionState" -"str_to_date" -"str_to_map" -"stringJaccardIndex" -"stringJaccardIndexUTF8" -"stringToH3" -"structureToCapnProtoSchema" -"structureToProtobufSchema" -"studentTTest" -"studentTTestArgMax" -"studentTTestArgMin" -"studentTTestArray" -"studentTTestDistinct" -"studentTTestForEach" -"studentTTestIf" -"studentTTestMap" -"studentTTestMerge" -"studentTTestNull" -"studentTTestOrDefault" -"studentTTestOrNull" -"studentTTestResample" -"studentTTestSimpleState" -"studentTTestState" -"subBitmap" -"subDate" -"substr" -"substring" -"substringIndex" -"substringIndexUTF8" -"substringUTF8" -"subtractDays" -"subtractHours" -"subtractInterval" -"subtractMicroseconds" -"subtractMilliseconds" -"subtractMinutes" -"subtractMonths" -"subtractNanoseconds" -"subtractQuarters" -"subtractSeconds" -"subtractTupleOfIntervals" -"subtractWeeks" -"subtractYears" -"sum" -"sumArgMax" -"sumArgMin" -"sumArray" -"sumCount" -"sumCountArgMax" -"sumCountArgMin" -"sumCountArray" -"sumCountDistinct" -"sumCountForEach" -"sumCountIf" -"sumCountMap" -"sumCountMerge" -"sumCountNull" -"sumCountOrDefault" -"sumCountOrNull" -"sumCountResample" -"sumCountSimpleState" -"sumCountState" -"sumDistinct" -"sumForEach" -"sumIf" -"sumKahan" -"sumKahanArgMax" -"sumKahanArgMin" -"sumKahanArray" -"sumKahanDistinct" -"sumKahanForEach" -"sumKahanIf" -"sumKahanMap" -"sumKahanMerge" -"sumKahanNull" -"sumKahanOrDefault" -"sumKahanOrNull" -"sumKahanResample" -"sumKahanSimpleState" -"sumKahanState" -"sumMap" -"sumMapFiltered" -"sumMapFilteredArgMax" -"sumMapFilteredArgMin" -"sumMapFilteredArray" -"sumMapFilteredDistinct" -"sumMapFilteredForEach" -"sumMapFilteredIf" -"sumMapFilteredMap" -"sumMapFilteredMerge" -"sumMapFilteredNull" -"sumMapFilteredOrDefault" -"sumMapFilteredOrNull" -"sumMapFilteredResample" -"sumMapFilteredSimpleState" -"sumMapFilteredState" -"sumMapFilteredWithOverflow" -"sumMapFilteredWithOverflowArgMax" -"sumMapFilteredWithOverflowArgMin" -"sumMapFilteredWithOverflowArray" -"sumMapFilteredWithOverflowDistinct" -"sumMapFilteredWithOverflowForEach" -"sumMapFilteredWithOverflowIf" -"sumMapFilteredWithOverflowMap" -"sumMapFilteredWithOverflowMerge" -"sumMapFilteredWithOverflowNull" -"sumMapFilteredWithOverflowOrDefault" -"sumMapFilteredWithOverflowOrNull" -"sumMapFilteredWithOverflowResample" -"sumMapFilteredWithOverflowSimpleState" -"sumMapFilteredWithOverflowState" -"sumMapWithOverflow" -"sumMapWithOverflowArgMax" -"sumMapWithOverflowArgMin" -"sumMapWithOverflowArray" -"sumMapWithOverflowDistinct" -"sumMapWithOverflowForEach" -"sumMapWithOverflowIf" -"sumMapWithOverflowMap" -"sumMapWithOverflowMerge" -"sumMapWithOverflowNull" -"sumMapWithOverflowOrDefault" -"sumMapWithOverflowOrNull" -"sumMapWithOverflowResample" -"sumMapWithOverflowSimpleState" -"sumMapWithOverflowState" -"sumMappedArrays" -"sumMappedArraysArgMax" -"sumMappedArraysArgMin" -"sumMappedArraysArray" -"sumMappedArraysDistinct" -"sumMappedArraysForEach" -"sumMappedArraysIf" -"sumMappedArraysMap" -"sumMappedArraysMerge" -"sumMappedArraysNull" -"sumMappedArraysOrDefault" -"sumMappedArraysOrNull" -"sumMappedArraysResample" -"sumMappedArraysSimpleState" -"sumMappedArraysState" -"sumMerge" -"sumNull" -"sumOrDefault" -"sumOrNull" -"sumResample" -"sumSimpleState" -"sumState" -"sumWithOverflow" -"sumWithOverflowArgMax" -"sumWithOverflowArgMin" -"sumWithOverflowArray" -"sumWithOverflowDistinct" -"sumWithOverflowForEach" -"sumWithOverflowIf" -"sumWithOverflowMap" -"sumWithOverflowMerge" -"sumWithOverflowNull" -"sumWithOverflowOrDefault" -"sumWithOverflowOrNull" -"sumWithOverflowResample" -"sumWithOverflowSimpleState" -"sumWithOverflowState" -"svg" -"synonyms" -"tan" -"tanh" -"tcpPort" -"tgamma" -"theilsU" -"theilsUArgMax" -"theilsUArgMin" -"theilsUArray" -"theilsUDistinct" -"theilsUForEach" -"theilsUIf" -"theilsUMap" -"theilsUMerge" -"theilsUNull" -"theilsUOrDefault" -"theilsUOrNull" -"theilsUResample" -"theilsUSimpleState" -"theilsUState" -"throwIf" -"tid" -"timeDiff" -"timeSlot" -"timeSlots" -"timeZone" -"timeZoneOf" -"timeZoneOffset" -"timestamp" -"timestampDiff" -"timestamp_diff" -"timezone" -"timezoneOf" -"timezoneOffset" -"toBool" -"toColumnTypeName" -"toDate" -"toDate32" -"toDate32OrDefault" -"toDate32OrNull" -"toDate32OrZero" -"toDateOrDefault" -"toDateOrNull" -"toDateOrZero" -"toDateTime" -"toDateTime32" -"toDateTime64" -"toDateTime64OrDefault" -"toDateTime64OrNull" -"toDateTime64OrZero" -"toDateTimeOrDefault" -"toDateTimeOrNull" -"toDateTimeOrZero" -"toDayOfMonth" -"toDayOfWeek" -"toDayOfYear" -"toDaysSinceYearZero" -"toDecimal128" -"toDecimal128OrDefault" -"toDecimal128OrNull" -"toDecimal128OrZero" -"toDecimal256" -"toDecimal256OrDefault" -"toDecimal256OrNull" -"toDecimal256OrZero" -"toDecimal32" -"toDecimal32OrDefault" -"toDecimal32OrNull" -"toDecimal32OrZero" -"toDecimal64" -"toDecimal64OrDefault" -"toDecimal64OrNull" -"toDecimal64OrZero" -"toDecimalString" -"toFixedString" -"toFloat32" -"toFloat32OrDefault" -"toFloat32OrNull" -"toFloat32OrZero" -"toFloat64" -"toFloat64OrDefault" -"toFloat64OrNull" -"toFloat64OrZero" -"toHour" -"toIPv4" -"toIPv4OrDefault" -"toIPv4OrNull" -"toIPv4OrZero" -"toIPv6" -"toIPv6OrDefault" -"toIPv6OrNull" -"toIPv6OrZero" -"toISOWeek" -"toISOYear" -"toInt128" -"toInt128OrDefault" -"toInt128OrNull" -"toInt128OrZero" -"toInt16" -"toInt16OrDefault" -"toInt16OrNull" -"toInt16OrZero" -"toInt256" -"toInt256OrDefault" -"toInt256OrNull" -"toInt256OrZero" -"toInt32" -"toInt32OrDefault" -"toInt32OrNull" -"toInt32OrZero" -"toInt64" -"toInt64OrDefault" -"toInt64OrNull" -"toInt64OrZero" -"toInt8" -"toInt8OrDefault" -"toInt8OrNull" -"toInt8OrZero" -"toIntervalDay" -"toIntervalHour" -"toIntervalMicrosecond" -"toIntervalMillisecond" -"toIntervalMinute" -"toIntervalMonth" -"toIntervalNanosecond" -"toIntervalQuarter" -"toIntervalSecond" -"toIntervalWeek" -"toIntervalYear" -"toJSONString" -"toLastDayOfMonth" -"toLastDayOfWeek" -"toLowCardinality" -"toMillisecond" -"toMinute" -"toModifiedJulianDay" -"toModifiedJulianDayOrNull" -"toMonday" -"toMonth" -"toNullable" -"toQuarter" -"toRelativeDayNum" -"toRelativeHourNum" -"toRelativeMinuteNum" -"toRelativeMonthNum" -"toRelativeQuarterNum" -"toRelativeSecondNum" -"toRelativeWeekNum" -"toRelativeYearNum" -"toSecond" -"toStartOfDay" -"toStartOfFifteenMinutes" -"toStartOfFiveMinute" -"toStartOfFiveMinutes" -"toStartOfHour" -"toStartOfISOYear" -"toStartOfInterval" -"toStartOfMicrosecond" -"toStartOfMillisecond" -"toStartOfMinute" -"toStartOfMonth" -"toStartOfNanosecond" -"toStartOfQuarter" -"toStartOfSecond" -"toStartOfTenMinutes" -"toStartOfWeek" -"toStartOfYear" -"toString" -"toStringCutToZero" -"toTime" -"toTimeZone" -"toTimezone" -"toTypeName" -"toUInt128" -"toUInt128OrDefault" -"toUInt128OrNull" -"toUInt128OrZero" -"toUInt16" -"toUInt16OrDefault" -"toUInt16OrNull" -"toUInt16OrZero" -"toUInt256" -"toUInt256OrDefault" -"toUInt256OrNull" -"toUInt256OrZero" -"toUInt32" -"toUInt32OrDefault" -"toUInt32OrNull" -"toUInt32OrZero" -"toUInt64" -"toUInt64OrDefault" -"toUInt64OrNull" -"toUInt64OrZero" -"toUInt8" -"toUInt8OrDefault" -"toUInt8OrNull" -"toUInt8OrZero" -"toUTCTimestamp" -"toUUID" -"toUUIDOrDefault" -"toUUIDOrNull" -"toUUIDOrZero" -"toUnixTimestamp" -"toUnixTimestamp64Micro" -"toUnixTimestamp64Milli" -"toUnixTimestamp64Nano" -"toValidUTF8" -"toWeek" -"toYYYYMM" -"toYYYYMMDD" -"toYYYYMMDDhhmmss" -"toYear" -"toYearWeek" -"to_utc_timestamp" -"today" -"tokens" -"topK" -"topKArgMax" -"topKArgMin" -"topKArray" -"topKDistinct" -"topKForEach" -"topKIf" -"topKMap" -"topKMerge" -"topKNull" -"topKOrDefault" -"topKOrNull" -"topKResample" -"topKSimpleState" -"topKState" -"topKWeighted" -"topKWeightedArgMax" -"topKWeightedArgMin" -"topKWeightedArray" -"topKWeightedDistinct" -"topKWeightedForEach" -"topKWeightedIf" -"topKWeightedMap" -"topKWeightedMerge" -"topKWeightedNull" -"topKWeightedOrDefault" -"topKWeightedOrNull" -"topKWeightedResample" -"topKWeightedSimpleState" -"topKWeightedState" -"topLevelDomain" -"topLevelDomainRFC" -"transactionID" -"transactionLatestSnapshot" -"transactionOldestSnapshot" -"transform" -"translate" -"translateUTF8" -"trim" -"trimBoth" -"trimLeft" -"trimRight" -"trunc" -"truncate" -"tryBase58Decode" -"tryBase64Decode" -"tryBase64URLDecode" -"tryDecrypt" -"tryIdnaEncode" -"tryPunycodeDecode" -"tumble" -"tumbleEnd" -"tumbleStart" -"tuple" -"tupleConcat" -"tupleDivide" -"tupleDivideByNumber" -"tupleElement" -"tupleHammingDistance" -"tupleIntDiv" -"tupleIntDivByNumber" -"tupleIntDivOrZero" -"tupleIntDivOrZeroByNumber" -"tupleMinus" -"tupleModulo" -"tupleModuloByNumber" -"tupleMultiply" -"tupleMultiplyByNumber" -"tupleNames" -"tupleNegate" -"tuplePlus" -"tupleToNameValuePairs" -"ucase" -"unbin" -"unhex" -"uniq" -"uniqArgMax" -"uniqArgMin" -"uniqArray" -"uniqCombined" -"uniqCombined64" -"uniqCombined64ArgMax" -"uniqCombined64ArgMin" -"uniqCombined64Array" -"uniqCombined64Distinct" -"uniqCombined64ForEach" -"uniqCombined64If" -"uniqCombined64Map" -"uniqCombined64Merge" -"uniqCombined64Null" -"uniqCombined64OrDefault" -"uniqCombined64OrNull" -"uniqCombined64Resample" -"uniqCombined64SimpleState" -"uniqCombined64State" -"uniqCombinedArgMax" -"uniqCombinedArgMin" -"uniqCombinedArray" -"uniqCombinedDistinct" -"uniqCombinedForEach" -"uniqCombinedIf" -"uniqCombinedMap" -"uniqCombinedMerge" -"uniqCombinedNull" -"uniqCombinedOrDefault" -"uniqCombinedOrNull" -"uniqCombinedResample" -"uniqCombinedSimpleState" -"uniqCombinedState" -"uniqDistinct" -"uniqExact" -"uniqExactArgMax" -"uniqExactArgMin" -"uniqExactArray" -"uniqExactDistinct" -"uniqExactForEach" -"uniqExactIf" -"uniqExactMap" -"uniqExactMerge" -"uniqExactNull" -"uniqExactOrDefault" -"uniqExactOrNull" -"uniqExactResample" -"uniqExactSimpleState" -"uniqExactState" -"uniqForEach" -"uniqHLL12" -"uniqHLL12ArgMax" -"uniqHLL12ArgMin" -"uniqHLL12Array" -"uniqHLL12Distinct" -"uniqHLL12ForEach" -"uniqHLL12If" -"uniqHLL12Map" -"uniqHLL12Merge" -"uniqHLL12Null" -"uniqHLL12OrDefault" -"uniqHLL12OrNull" -"uniqHLL12Resample" -"uniqHLL12SimpleState" -"uniqHLL12State" -"uniqIf" -"uniqMap" -"uniqMerge" -"uniqNull" -"uniqOrDefault" -"uniqOrNull" -"uniqResample" -"uniqSimpleState" -"uniqState" -"uniqTheta" -"uniqThetaArgMax" -"uniqThetaArgMin" -"uniqThetaArray" -"uniqThetaDistinct" -"uniqThetaForEach" -"uniqThetaIf" -"uniqThetaIntersect" -"uniqThetaMap" -"uniqThetaMerge" -"uniqThetaNot" -"uniqThetaNull" -"uniqThetaOrDefault" -"uniqThetaOrNull" -"uniqThetaResample" -"uniqThetaSimpleState" -"uniqThetaState" -"uniqThetaUnion" -"uniqUpTo" -"uniqUpToArgMax" -"uniqUpToArgMin" -"uniqUpToArray" -"uniqUpToDistinct" -"uniqUpToForEach" -"uniqUpToIf" -"uniqUpToMap" -"uniqUpToMerge" -"uniqUpToNull" -"uniqUpToOrDefault" -"uniqUpToOrNull" -"uniqUpToResample" -"uniqUpToSimpleState" -"uniqUpToState" -"upper" -"upperUTF8" -"uptime" -"user" -"validateNestedArraySizes" -"varPop" -"varPopArgMax" -"varPopArgMin" -"varPopArray" -"varPopDistinct" -"varPopForEach" -"varPopIf" -"varPopMap" -"varPopMerge" -"varPopNull" -"varPopOrDefault" -"varPopOrNull" -"varPopResample" -"varPopSimpleState" -"varPopStable" -"varPopStableArgMax" -"varPopStableArgMin" -"varPopStableArray" -"varPopStableDistinct" -"varPopStableForEach" -"varPopStableIf" -"varPopStableMap" -"varPopStableMerge" -"varPopStableNull" -"varPopStableOrDefault" -"varPopStableOrNull" -"varPopStableResample" -"varPopStableSimpleState" -"varPopStableState" -"varPopState" -"varSamp" -"varSampArgMax" -"varSampArgMin" -"varSampArray" -"varSampDistinct" -"varSampForEach" -"varSampIf" -"varSampMap" -"varSampMerge" -"varSampNull" -"varSampOrDefault" -"varSampOrNull" -"varSampResample" -"varSampSimpleState" -"varSampStable" -"varSampStableArgMax" -"varSampStableArgMin" -"varSampStableArray" -"varSampStableDistinct" -"varSampStableForEach" -"varSampStableIf" -"varSampStableMap" -"varSampStableMerge" -"varSampStableNull" -"varSampStableOrDefault" -"varSampStableOrNull" -"varSampStableResample" -"varSampStableSimpleState" -"varSampStableState" -"varSampState" -"variantElement" -"variantType" -"vectorDifference" -"vectorSum" -"version" -"visibleWidth" -"visitParamExtractBool" -"visitParamExtractFloat" -"visitParamExtractInt" -"visitParamExtractRaw" -"visitParamExtractString" -"visitParamExtractUInt" -"visitParamHas" -"week" -"welchTTest" -"welchTTestArgMax" -"welchTTestArgMin" -"welchTTestArray" -"welchTTestDistinct" -"welchTTestForEach" -"welchTTestIf" -"welchTTestMap" -"welchTTestMerge" -"welchTTestNull" -"welchTTestOrDefault" -"welchTTestOrNull" -"welchTTestResample" -"welchTTestSimpleState" -"welchTTestState" -"widthBucket" -"width_bucket" -"windowFunnel" -"windowFunnelArgMax" -"windowFunnelArgMin" -"windowFunnelArray" -"windowFunnelDistinct" -"windowFunnelForEach" -"windowFunnelIf" -"windowFunnelMap" -"windowFunnelMerge" -"windowFunnelNull" -"windowFunnelOrDefault" -"windowFunnelOrNull" -"windowFunnelResample" -"windowFunnelSimpleState" -"windowFunnelState" -"windowID" -"wkt" -"wordShingleMinHash" -"wordShingleMinHashArg" -"wordShingleMinHashArgCaseInsensitive" -"wordShingleMinHashArgCaseInsensitiveUTF8" -"wordShingleMinHashArgUTF8" -"wordShingleMinHashCaseInsensitive" -"wordShingleMinHashCaseInsensitiveUTF8" -"wordShingleMinHashUTF8" -"wordShingleSimHash" -"wordShingleSimHashCaseInsensitive" -"wordShingleSimHashCaseInsensitiveUTF8" -"wordShingleSimHashUTF8" -"wyHash64" -"xor" -"xxHash32" -"xxHash64" -"xxh3" -"yandexConsistentHash" -"yearweek" -"yesterday" -"zookeeperSessionUptime" diff --git a/tests/fuzz/update_dict.sh b/tests/fuzz/update_dict.sh new file mode 100755 index 00000000000..a83c9167129 --- /dev/null +++ b/tests/fuzz/update_dict.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -euo pipefail + +SCRIPT_DIR=$(dirname "$(realpath "$0")") +ROOT_PATH="$(git rev-parse --show-toplevel)" +CLICKHOUSE_BIN="${CLICKHOUSE_BIN:-$ROOT_PATH/build/programs/clickhouse}" +DICTIONARIES_DIR="$SCRIPT_DIR/dictionaries" + +echo "Generating functions dict" +$CLICKHOUSE_BIN local -q "SELECT * FROM (SELECT DISTINCT concat('\"', name, '\"') as res FROM system.functions ORDER BY name UNION ALL SELECT concat('\"', a.name, b.name, '\"') as res FROM system.functions as a CROSS JOIN system.aggregate_function_combinators as b WHERE a.is_aggregate = 1) ORDER BY res" > "$DICTIONARIES_DIR/functions.dict" + +echo "Generating data types dict" +$CLICKHOUSE_BIN local -q "SELECT DISTINCT concat('\"', name, '\"') as res FROM system.data_type_families ORDER BY name" > "$DICTIONARIES_DIR/datatypes.dict" + +echo "Generating keywords dict" +$CLICKHOUSE_BIN local -q "SELECT DISTINCT concat('\"', keyword, '\"') as res FROM system.keywords ORDER BY keyword" > "$DICTIONARIES_DIR/keywords.dict" + +echo "Merging dictionaries into all.dict" +cat "$DICTIONARIES_DIR"/* | LC_ALL=C sort | uniq > "$SCRIPT_DIR/all.dict" \ No newline at end of file From 69893aaa25e3c459f3480955534e05456e7aaa64 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 13 Aug 2024 16:19:25 +0200 Subject: [PATCH 2167/2361] Lower memory usage --- docker/test/stateless/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index c70cbe1fe45..874095e39dc 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -391,8 +391,8 @@ done # wait for minio to flush its batch if it has any sleep 1 clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE" -clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" -clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" # Stop server so we can safely read data with clickhouse-local. # Why do we read data with clickhouse-local? From 20e20b97c9bddf3a88b0ba8e3a388fe54496fcb2 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 13 Aug 2024 16:20:27 +0200 Subject: [PATCH 2168/2361] Fix test storage_join_direct_join --- tests/performance/storage_join_direct_join.xml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/performance/storage_join_direct_join.xml b/tests/performance/storage_join_direct_join.xml index 867108ac2b7..0e67abb275e 100644 --- a/tests/performance/storage_join_direct_join.xml +++ b/tests/performance/storage_join_direct_join.xml @@ -16,4 +16,7 @@ SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null; SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null SETTINGS allow_experimental_analyzer=1 + + DROP TABLE IF EXISTS keys + DROP TABLE IF EXISTS dict From 0593650565d717c773fec265056256de98f86f7f Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Tue, 13 Aug 2024 14:30:42 +0000 Subject: [PATCH 2169/2361] Update dict files Previous commit had the files from the script ran from the root directory, which returned an error. --- tests/fuzz/dictionaries/functions.dict | 4283 ++++++++++++++++++++++++ 1 file changed, 4283 insertions(+) diff --git a/tests/fuzz/dictionaries/functions.dict b/tests/fuzz/dictionaries/functions.dict index e69de29bb2d..e562595fb67 100644 --- a/tests/fuzz/dictionaries/functions.dict +++ b/tests/fuzz/dictionaries/functions.dict @@ -0,0 +1,4283 @@ +"BIT_AND" +"BIT_ANDArgMax" +"BIT_ANDArgMin" +"BIT_ANDArray" +"BIT_ANDDistinct" +"BIT_ANDForEach" +"BIT_ANDIf" +"BIT_ANDMap" +"BIT_ANDMerge" +"BIT_ANDNull" +"BIT_ANDOrDefault" +"BIT_ANDOrNull" +"BIT_ANDResample" +"BIT_ANDSimpleState" +"BIT_ANDState" +"BIT_OR" +"BIT_ORArgMax" +"BIT_ORArgMin" +"BIT_ORArray" +"BIT_ORDistinct" +"BIT_ORForEach" +"BIT_ORIf" +"BIT_ORMap" +"BIT_ORMerge" +"BIT_ORNull" +"BIT_OROrDefault" +"BIT_OROrNull" +"BIT_ORResample" +"BIT_ORSimpleState" +"BIT_ORState" +"BIT_XOR" +"BIT_XORArgMax" +"BIT_XORArgMin" +"BIT_XORArray" +"BIT_XORDistinct" +"BIT_XORForEach" +"BIT_XORIf" +"BIT_XORMap" +"BIT_XORMerge" +"BIT_XORNull" +"BIT_XOROrDefault" +"BIT_XOROrNull" +"BIT_XORResample" +"BIT_XORSimpleState" +"BIT_XORState" +"BLAKE3" +"CAST" +"CHARACTER_LENGTH" +"CHAR_LENGTH" +"COVAR_POP" +"COVAR_POPArgMax" +"COVAR_POPArgMin" +"COVAR_POPArray" +"COVAR_POPDistinct" +"COVAR_POPForEach" +"COVAR_POPIf" +"COVAR_POPMap" +"COVAR_POPMerge" +"COVAR_POPNull" +"COVAR_POPOrDefault" +"COVAR_POPOrNull" +"COVAR_POPResample" +"COVAR_POPSimpleState" +"COVAR_POPState" +"COVAR_SAMP" +"COVAR_SAMPArgMax" +"COVAR_SAMPArgMin" +"COVAR_SAMPArray" +"COVAR_SAMPDistinct" +"COVAR_SAMPForEach" +"COVAR_SAMPIf" +"COVAR_SAMPMap" +"COVAR_SAMPMerge" +"COVAR_SAMPNull" +"COVAR_SAMPOrDefault" +"COVAR_SAMPOrNull" +"COVAR_SAMPResample" +"COVAR_SAMPSimpleState" +"COVAR_SAMPState" +"CRC32" +"CRC32IEEE" +"CRC64" +"DATABASE" +"DATE" +"DATE_DIFF" +"DATE_FORMAT" +"DATE_TRUNC" +"DAY" +"DAYOFMONTH" +"DAYOFWEEK" +"DAYOFYEAR" +"FORMAT_BYTES" +"FQDN" +"FROM_BASE64" +"FROM_DAYS" +"FROM_UNIXTIME" +"HOUR" +"INET6_ATON" +"INET6_NTOA" +"INET_ATON" +"INET_NTOA" +"IPv4CIDRToRange" +"IPv4NumToString" +"IPv4NumToStringClassC" +"IPv4StringToNum" +"IPv4StringToNumOrDefault" +"IPv4StringToNumOrNull" +"IPv4ToIPv6" +"IPv6CIDRToRange" +"IPv6NumToString" +"IPv6StringToNum" +"IPv6StringToNumOrDefault" +"IPv6StringToNumOrNull" +"JSONArrayLength" +"JSONExtract" +"JSONExtractArrayRaw" +"JSONExtractBool" +"JSONExtractFloat" +"JSONExtractInt" +"JSONExtractKeys" +"JSONExtractKeysAndValues" +"JSONExtractKeysAndValuesRaw" +"JSONExtractRaw" +"JSONExtractString" +"JSONExtractUInt" +"JSONHas" +"JSONKey" +"JSONLength" +"JSONMergePatch" +"JSONType" +"JSON_ARRAY_LENGTH" +"JSON_EXISTS" +"JSON_QUERY" +"JSON_VALUE" +"L1Distance" +"L1Norm" +"L1Normalize" +"L2Distance" +"L2Norm" +"L2Normalize" +"L2SquaredDistance" +"L2SquaredNorm" +"LAST_DAY" +"LinfDistance" +"LinfNorm" +"LinfNormalize" +"LpDistance" +"LpNorm" +"LpNormalize" +"MACNumToString" +"MACStringToNum" +"MACStringToOUI" +"MAP_FROM_ARRAYS" +"MD4" +"MD5" +"MILLISECOND" +"MINUTE" +"MONTH" +"OCTET_LENGTH" +"QUARTER" +"REGEXP_EXTRACT" +"REGEXP_MATCHES" +"REGEXP_REPLACE" +"SCHEMA" +"SECOND" +"SHA1" +"SHA224" +"SHA256" +"SHA384" +"SHA512" +"SHA512_256" +"STD" +"STDArgMax" +"STDArgMin" +"STDArray" +"STDDEV_POP" +"STDDEV_POPArgMax" +"STDDEV_POPArgMin" +"STDDEV_POPArray" +"STDDEV_POPDistinct" +"STDDEV_POPForEach" +"STDDEV_POPIf" +"STDDEV_POPMap" +"STDDEV_POPMerge" +"STDDEV_POPNull" +"STDDEV_POPOrDefault" +"STDDEV_POPOrNull" +"STDDEV_POPResample" +"STDDEV_POPSimpleState" +"STDDEV_POPState" +"STDDEV_SAMP" +"STDDEV_SAMPArgMax" +"STDDEV_SAMPArgMin" +"STDDEV_SAMPArray" +"STDDEV_SAMPDistinct" +"STDDEV_SAMPForEach" +"STDDEV_SAMPIf" +"STDDEV_SAMPMap" +"STDDEV_SAMPMerge" +"STDDEV_SAMPNull" +"STDDEV_SAMPOrDefault" +"STDDEV_SAMPOrNull" +"STDDEV_SAMPResample" +"STDDEV_SAMPSimpleState" +"STDDEV_SAMPState" +"STDDistinct" +"STDForEach" +"STDIf" +"STDMap" +"STDMerge" +"STDNull" +"STDOrDefault" +"STDOrNull" +"STDResample" +"STDSimpleState" +"STDState" +"SUBSTRING_INDEX" +"SVG" +"TIMESTAMP_DIFF" +"TO_BASE64" +"TO_DAYS" +"TO_UNIXTIME" +"ULIDStringToDateTime" +"URLHash" +"URLHierarchy" +"URLPathHierarchy" +"UTCTimestamp" +"UTC_timestamp" +"UUIDNumToString" +"UUIDStringToNum" +"UUIDToNum" +"UUIDv7ToDateTime" +"VAR_POP" +"VAR_POPArgMax" +"VAR_POPArgMin" +"VAR_POPArray" +"VAR_POPDistinct" +"VAR_POPForEach" +"VAR_POPIf" +"VAR_POPMap" +"VAR_POPMerge" +"VAR_POPNull" +"VAR_POPOrDefault" +"VAR_POPOrNull" +"VAR_POPResample" +"VAR_POPSimpleState" +"VAR_POPState" +"VAR_SAMP" +"VAR_SAMPArgMax" +"VAR_SAMPArgMin" +"VAR_SAMPArray" +"VAR_SAMPDistinct" +"VAR_SAMPForEach" +"VAR_SAMPIf" +"VAR_SAMPMap" +"VAR_SAMPMerge" +"VAR_SAMPNull" +"VAR_SAMPOrDefault" +"VAR_SAMPOrNull" +"VAR_SAMPResample" +"VAR_SAMPSimpleState" +"VAR_SAMPState" +"YEAR" +"YYYYMMDDToDate" +"YYYYMMDDToDate32" +"YYYYMMDDhhmmssToDateTime" +"YYYYMMDDhhmmssToDateTime64" +"_CAST" +"__actionName" +"__bitBoolMaskAnd" +"__bitBoolMaskOr" +"__bitSwapLastTwo" +"__bitWrapperFunc" +"__getScalar" +"__scalarSubqueryResult" +"abs" +"accurateCast" +"accurateCastOrDefault" +"accurateCastOrNull" +"acos" +"acosh" +"addDate" +"addDays" +"addHours" +"addInterval" +"addMicroseconds" +"addMilliseconds" +"addMinutes" +"addMonths" +"addNanoseconds" +"addQuarters" +"addSeconds" +"addTupleOfIntervals" +"addWeeks" +"addYears" +"addressToLine" +"addressToLineWithInlines" +"addressToSymbol" +"aes_decrypt_mysql" +"aes_encrypt_mysql" +"age" +"aggThrow" +"aggThrowArgMax" +"aggThrowArgMin" +"aggThrowArray" +"aggThrowDistinct" +"aggThrowForEach" +"aggThrowIf" +"aggThrowMap" +"aggThrowMerge" +"aggThrowNull" +"aggThrowOrDefault" +"aggThrowOrNull" +"aggThrowResample" +"aggThrowSimpleState" +"aggThrowState" +"alphaTokens" +"analysisOfVariance" +"analysisOfVarianceArgMax" +"analysisOfVarianceArgMin" +"analysisOfVarianceArray" +"analysisOfVarianceDistinct" +"analysisOfVarianceForEach" +"analysisOfVarianceIf" +"analysisOfVarianceMap" +"analysisOfVarianceMerge" +"analysisOfVarianceNull" +"analysisOfVarianceOrDefault" +"analysisOfVarianceOrNull" +"analysisOfVarianceResample" +"analysisOfVarianceSimpleState" +"analysisOfVarianceState" +"and" +"anova" +"anovaArgMax" +"anovaArgMin" +"anovaArray" +"anovaDistinct" +"anovaForEach" +"anovaIf" +"anovaMap" +"anovaMerge" +"anovaNull" +"anovaOrDefault" +"anovaOrNull" +"anovaResample" +"anovaSimpleState" +"anovaState" +"any" +"anyArgMax" +"anyArgMin" +"anyArray" +"anyDistinct" +"anyForEach" +"anyHeavy" +"anyHeavyArgMax" +"anyHeavyArgMin" +"anyHeavyArray" +"anyHeavyDistinct" +"anyHeavyForEach" +"anyHeavyIf" +"anyHeavyMap" +"anyHeavyMerge" +"anyHeavyNull" +"anyHeavyOrDefault" +"anyHeavyOrNull" +"anyHeavyResample" +"anyHeavySimpleState" +"anyHeavyState" +"anyIf" +"anyLast" +"anyLastArgMax" +"anyLastArgMin" +"anyLastArray" +"anyLastDistinct" +"anyLastForEach" +"anyLastIf" +"anyLastMap" +"anyLastMerge" +"anyLastNull" +"anyLastOrDefault" +"anyLastOrNull" +"anyLastResample" +"anyLastSimpleState" +"anyLastState" +"anyLast_respect_nulls" +"anyLast_respect_nullsArgMax" +"anyLast_respect_nullsArgMin" +"anyLast_respect_nullsArray" +"anyLast_respect_nullsDistinct" +"anyLast_respect_nullsForEach" +"anyLast_respect_nullsIf" +"anyLast_respect_nullsMap" +"anyLast_respect_nullsMerge" +"anyLast_respect_nullsNull" +"anyLast_respect_nullsOrDefault" +"anyLast_respect_nullsOrNull" +"anyLast_respect_nullsResample" +"anyLast_respect_nullsSimpleState" +"anyLast_respect_nullsState" +"anyMap" +"anyMerge" +"anyNull" +"anyOrDefault" +"anyOrNull" +"anyResample" +"anySimpleState" +"anyState" +"any_respect_nulls" +"any_respect_nullsArgMax" +"any_respect_nullsArgMin" +"any_respect_nullsArray" +"any_respect_nullsDistinct" +"any_respect_nullsForEach" +"any_respect_nullsIf" +"any_respect_nullsMap" +"any_respect_nullsMerge" +"any_respect_nullsNull" +"any_respect_nullsOrDefault" +"any_respect_nullsOrNull" +"any_respect_nullsResample" +"any_respect_nullsSimpleState" +"any_respect_nullsState" +"any_value" +"any_valueArgMax" +"any_valueArgMin" +"any_valueArray" +"any_valueDistinct" +"any_valueForEach" +"any_valueIf" +"any_valueMap" +"any_valueMerge" +"any_valueNull" +"any_valueOrDefault" +"any_valueOrNull" +"any_valueResample" +"any_valueSimpleState" +"any_valueState" +"any_value_respect_nulls" +"any_value_respect_nullsArgMax" +"any_value_respect_nullsArgMin" +"any_value_respect_nullsArray" +"any_value_respect_nullsDistinct" +"any_value_respect_nullsForEach" +"any_value_respect_nullsIf" +"any_value_respect_nullsMap" +"any_value_respect_nullsMerge" +"any_value_respect_nullsNull" +"any_value_respect_nullsOrDefault" +"any_value_respect_nullsOrNull" +"any_value_respect_nullsResample" +"any_value_respect_nullsSimpleState" +"any_value_respect_nullsState" +"appendTrailingCharIfAbsent" +"approx_top_count" +"approx_top_countArgMax" +"approx_top_countArgMin" +"approx_top_countArray" +"approx_top_countDistinct" +"approx_top_countForEach" +"approx_top_countIf" +"approx_top_countMap" +"approx_top_countMerge" +"approx_top_countNull" +"approx_top_countOrDefault" +"approx_top_countOrNull" +"approx_top_countResample" +"approx_top_countSimpleState" +"approx_top_countState" +"approx_top_k" +"approx_top_kArgMax" +"approx_top_kArgMin" +"approx_top_kArray" +"approx_top_kDistinct" +"approx_top_kForEach" +"approx_top_kIf" +"approx_top_kMap" +"approx_top_kMerge" +"approx_top_kNull" +"approx_top_kOrDefault" +"approx_top_kOrNull" +"approx_top_kResample" +"approx_top_kSimpleState" +"approx_top_kState" +"approx_top_sum" +"approx_top_sumArgMax" +"approx_top_sumArgMin" +"approx_top_sumArray" +"approx_top_sumDistinct" +"approx_top_sumForEach" +"approx_top_sumIf" +"approx_top_sumMap" +"approx_top_sumMerge" +"approx_top_sumNull" +"approx_top_sumOrDefault" +"approx_top_sumOrNull" +"approx_top_sumResample" +"approx_top_sumSimpleState" +"approx_top_sumState" +"argMax" +"argMaxArgMax" +"argMaxArgMin" +"argMaxArray" +"argMaxDistinct" +"argMaxForEach" +"argMaxIf" +"argMaxMap" +"argMaxMerge" +"argMaxNull" +"argMaxOrDefault" +"argMaxOrNull" +"argMaxResample" +"argMaxSimpleState" +"argMaxState" +"argMin" +"argMinArgMax" +"argMinArgMin" +"argMinArray" +"argMinDistinct" +"argMinForEach" +"argMinIf" +"argMinMap" +"argMinMerge" +"argMinNull" +"argMinOrDefault" +"argMinOrNull" +"argMinResample" +"argMinSimpleState" +"argMinState" +"array" +"arrayAUC" +"arrayAll" +"arrayAvg" +"arrayCompact" +"arrayConcat" +"arrayCount" +"arrayCumSum" +"arrayCumSumNonNegative" +"arrayDifference" +"arrayDistinct" +"arrayDotProduct" +"arrayElement" +"arrayEnumerate" +"arrayEnumerateDense" +"arrayEnumerateDenseRanked" +"arrayEnumerateUniq" +"arrayEnumerateUniqRanked" +"arrayExists" +"arrayFill" +"arrayFilter" +"arrayFirst" +"arrayFirstIndex" +"arrayFirstOrNull" +"arrayFlatten" +"arrayFold" +"arrayIntersect" +"arrayJaccardIndex" +"arrayJoin" +"arrayLast" +"arrayLastIndex" +"arrayLastOrNull" +"arrayMap" +"arrayMax" +"arrayMin" +"arrayPartialReverseSort" +"arrayPartialShuffle" +"arrayPartialSort" +"arrayPopBack" +"arrayPopFront" +"arrayProduct" +"arrayPushBack" +"arrayPushFront" +"arrayRandomSample" +"arrayReduce" +"arrayReduceInRanges" +"arrayResize" +"arrayReverse" +"arrayReverseFill" +"arrayReverseSort" +"arrayReverseSplit" +"arrayRotateLeft" +"arrayRotateRight" +"arrayShiftLeft" +"arrayShiftRight" +"arrayShingles" +"arrayShuffle" +"arraySlice" +"arraySort" +"arraySplit" +"arrayStringConcat" +"arraySum" +"arrayUniq" +"arrayWithConstant" +"arrayZip" +"array_agg" +"array_aggArgMax" +"array_aggArgMin" +"array_aggArray" +"array_aggDistinct" +"array_aggForEach" +"array_aggIf" +"array_aggMap" +"array_aggMerge" +"array_aggNull" +"array_aggOrDefault" +"array_aggOrNull" +"array_aggResample" +"array_aggSimpleState" +"array_aggState" +"array_concat_agg" +"array_concat_aggArgMax" +"array_concat_aggArgMin" +"array_concat_aggArray" +"array_concat_aggDistinct" +"array_concat_aggForEach" +"array_concat_aggIf" +"array_concat_aggMap" +"array_concat_aggMerge" +"array_concat_aggNull" +"array_concat_aggOrDefault" +"array_concat_aggOrNull" +"array_concat_aggResample" +"array_concat_aggSimpleState" +"array_concat_aggState" +"ascii" +"asin" +"asinh" +"assumeNotNull" +"atan" +"atan2" +"atanh" +"avg" +"avgArgMax" +"avgArgMin" +"avgArray" +"avgDistinct" +"avgForEach" +"avgIf" +"avgMap" +"avgMerge" +"avgNull" +"avgOrDefault" +"avgOrNull" +"avgResample" +"avgSimpleState" +"avgState" +"avgWeighted" +"avgWeightedArgMax" +"avgWeightedArgMin" +"avgWeightedArray" +"avgWeightedDistinct" +"avgWeightedForEach" +"avgWeightedIf" +"avgWeightedMap" +"avgWeightedMerge" +"avgWeightedNull" +"avgWeightedOrDefault" +"avgWeightedOrNull" +"avgWeightedResample" +"avgWeightedSimpleState" +"avgWeightedState" +"bar" +"base58Decode" +"base58Encode" +"base64Decode" +"base64Encode" +"base64URLDecode" +"base64URLEncode" +"basename" +"bin" +"bitAnd" +"bitCount" +"bitHammingDistance" +"bitNot" +"bitOr" +"bitPositionsToArray" +"bitRotateLeft" +"bitRotateRight" +"bitShiftLeft" +"bitShiftRight" +"bitSlice" +"bitTest" +"bitTestAll" +"bitTestAny" +"bitXor" +"bitmapAnd" +"bitmapAndCardinality" +"bitmapAndnot" +"bitmapAndnotCardinality" +"bitmapBuild" +"bitmapCardinality" +"bitmapContains" +"bitmapHasAll" +"bitmapHasAny" +"bitmapMax" +"bitmapMin" +"bitmapOr" +"bitmapOrCardinality" +"bitmapSubsetInRange" +"bitmapSubsetLimit" +"bitmapToArray" +"bitmapTransform" +"bitmapXor" +"bitmapXorCardinality" +"bitmaskToArray" +"bitmaskToList" +"blockNumber" +"blockSerializedSize" +"blockSize" +"boundingRatio" +"boundingRatioArgMax" +"boundingRatioArgMin" +"boundingRatioArray" +"boundingRatioDistinct" +"boundingRatioForEach" +"boundingRatioIf" +"boundingRatioMap" +"boundingRatioMerge" +"boundingRatioNull" +"boundingRatioOrDefault" +"boundingRatioOrNull" +"boundingRatioResample" +"boundingRatioSimpleState" +"boundingRatioState" +"buildId" +"byteHammingDistance" +"byteSize" +"byteSlice" +"byteSwap" +"caseWithExpr" +"caseWithExpression" +"caseWithoutExpr" +"caseWithoutExpression" +"catboostEvaluate" +"categoricalInformationValue" +"categoricalInformationValueArgMax" +"categoricalInformationValueArgMin" +"categoricalInformationValueArray" +"categoricalInformationValueDistinct" +"categoricalInformationValueForEach" +"categoricalInformationValueIf" +"categoricalInformationValueMap" +"categoricalInformationValueMerge" +"categoricalInformationValueNull" +"categoricalInformationValueOrDefault" +"categoricalInformationValueOrNull" +"categoricalInformationValueResample" +"categoricalInformationValueSimpleState" +"categoricalInformationValueState" +"cbrt" +"ceil" +"ceiling" +"changeDay" +"changeHour" +"changeMinute" +"changeMonth" +"changeSecond" +"changeYear" +"char" +"cityHash64" +"clamp" +"coalesce" +"concat" +"concatAssumeInjective" +"concatWithSeparator" +"concatWithSeparatorAssumeInjective" +"concat_ws" +"connectionId" +"connection_id" +"contingency" +"contingencyArgMax" +"contingencyArgMin" +"contingencyArray" +"contingencyDistinct" +"contingencyForEach" +"contingencyIf" +"contingencyMap" +"contingencyMerge" +"contingencyNull" +"contingencyOrDefault" +"contingencyOrNull" +"contingencyResample" +"contingencySimpleState" +"contingencyState" +"convertCharset" +"corr" +"corrArgMax" +"corrArgMin" +"corrArray" +"corrDistinct" +"corrForEach" +"corrIf" +"corrMap" +"corrMatrix" +"corrMatrixArgMax" +"corrMatrixArgMin" +"corrMatrixArray" +"corrMatrixDistinct" +"corrMatrixForEach" +"corrMatrixIf" +"corrMatrixMap" +"corrMatrixMerge" +"corrMatrixNull" +"corrMatrixOrDefault" +"corrMatrixOrNull" +"corrMatrixResample" +"corrMatrixSimpleState" +"corrMatrixState" +"corrMerge" +"corrNull" +"corrOrDefault" +"corrOrNull" +"corrResample" +"corrSimpleState" +"corrStable" +"corrStableArgMax" +"corrStableArgMin" +"corrStableArray" +"corrStableDistinct" +"corrStableForEach" +"corrStableIf" +"corrStableMap" +"corrStableMerge" +"corrStableNull" +"corrStableOrDefault" +"corrStableOrNull" +"corrStableResample" +"corrStableSimpleState" +"corrStableState" +"corrState" +"cos" +"cosh" +"cosineDistance" +"count" +"countArgMax" +"countArgMin" +"countArray" +"countDigits" +"countDistinct" +"countEqual" +"countForEach" +"countIf" +"countMap" +"countMatches" +"countMatchesCaseInsensitive" +"countMerge" +"countNull" +"countOrDefault" +"countOrNull" +"countResample" +"countSimpleState" +"countState" +"countSubstrings" +"countSubstringsCaseInsensitive" +"countSubstringsCaseInsensitiveUTF8" +"covarPop" +"covarPopArgMax" +"covarPopArgMin" +"covarPopArray" +"covarPopDistinct" +"covarPopForEach" +"covarPopIf" +"covarPopMap" +"covarPopMatrix" +"covarPopMatrixArgMax" +"covarPopMatrixArgMin" +"covarPopMatrixArray" +"covarPopMatrixDistinct" +"covarPopMatrixForEach" +"covarPopMatrixIf" +"covarPopMatrixMap" +"covarPopMatrixMerge" +"covarPopMatrixNull" +"covarPopMatrixOrDefault" +"covarPopMatrixOrNull" +"covarPopMatrixResample" +"covarPopMatrixSimpleState" +"covarPopMatrixState" +"covarPopMerge" +"covarPopNull" +"covarPopOrDefault" +"covarPopOrNull" +"covarPopResample" +"covarPopSimpleState" +"covarPopStable" +"covarPopStableArgMax" +"covarPopStableArgMin" +"covarPopStableArray" +"covarPopStableDistinct" +"covarPopStableForEach" +"covarPopStableIf" +"covarPopStableMap" +"covarPopStableMerge" +"covarPopStableNull" +"covarPopStableOrDefault" +"covarPopStableOrNull" +"covarPopStableResample" +"covarPopStableSimpleState" +"covarPopStableState" +"covarPopState" +"covarSamp" +"covarSampArgMax" +"covarSampArgMin" +"covarSampArray" +"covarSampDistinct" +"covarSampForEach" +"covarSampIf" +"covarSampMap" +"covarSampMatrix" +"covarSampMatrixArgMax" +"covarSampMatrixArgMin" +"covarSampMatrixArray" +"covarSampMatrixDistinct" +"covarSampMatrixForEach" +"covarSampMatrixIf" +"covarSampMatrixMap" +"covarSampMatrixMerge" +"covarSampMatrixNull" +"covarSampMatrixOrDefault" +"covarSampMatrixOrNull" +"covarSampMatrixResample" +"covarSampMatrixSimpleState" +"covarSampMatrixState" +"covarSampMerge" +"covarSampNull" +"covarSampOrDefault" +"covarSampOrNull" +"covarSampResample" +"covarSampSimpleState" +"covarSampStable" +"covarSampStableArgMax" +"covarSampStableArgMin" +"covarSampStableArray" +"covarSampStableDistinct" +"covarSampStableForEach" +"covarSampStableIf" +"covarSampStableMap" +"covarSampStableMerge" +"covarSampStableNull" +"covarSampStableOrDefault" +"covarSampStableOrNull" +"covarSampStableResample" +"covarSampStableSimpleState" +"covarSampStableState" +"covarSampState" +"cramersV" +"cramersVArgMax" +"cramersVArgMin" +"cramersVArray" +"cramersVBiasCorrected" +"cramersVBiasCorrectedArgMax" +"cramersVBiasCorrectedArgMin" +"cramersVBiasCorrectedArray" +"cramersVBiasCorrectedDistinct" +"cramersVBiasCorrectedForEach" +"cramersVBiasCorrectedIf" +"cramersVBiasCorrectedMap" +"cramersVBiasCorrectedMerge" +"cramersVBiasCorrectedNull" +"cramersVBiasCorrectedOrDefault" +"cramersVBiasCorrectedOrNull" +"cramersVBiasCorrectedResample" +"cramersVBiasCorrectedSimpleState" +"cramersVBiasCorrectedState" +"cramersVDistinct" +"cramersVForEach" +"cramersVIf" +"cramersVMap" +"cramersVMerge" +"cramersVNull" +"cramersVOrDefault" +"cramersVOrNull" +"cramersVResample" +"cramersVSimpleState" +"cramersVState" +"curdate" +"currentDatabase" +"currentProfiles" +"currentRoles" +"currentSchemas" +"currentUser" +"current_database" +"current_date" +"current_schemas" +"current_timestamp" +"current_user" +"cutFragment" +"cutIPv6" +"cutQueryString" +"cutQueryStringAndFragment" +"cutToFirstSignificantSubdomain" +"cutToFirstSignificantSubdomainCustom" +"cutToFirstSignificantSubdomainCustomRFC" +"cutToFirstSignificantSubdomainCustomWithWWW" +"cutToFirstSignificantSubdomainCustomWithWWWRFC" +"cutToFirstSignificantSubdomainRFC" +"cutToFirstSignificantSubdomainWithWWW" +"cutToFirstSignificantSubdomainWithWWWRFC" +"cutURLParameter" +"cutWWW" +"damerauLevenshteinDistance" +"dateDiff" +"dateName" +"dateTime64ToSnowflake" +"dateTime64ToSnowflakeID" +"dateTimeToSnowflake" +"dateTimeToSnowflakeID" +"dateTrunc" +"date_diff" +"decodeHTMLComponent" +"decodeURLComponent" +"decodeURLFormComponent" +"decodeXMLComponent" +"decrypt" +"defaultProfiles" +"defaultRoles" +"defaultValueOfArgumentType" +"defaultValueOfTypeName" +"degrees" +"deltaSum" +"deltaSumArgMax" +"deltaSumArgMin" +"deltaSumArray" +"deltaSumDistinct" +"deltaSumForEach" +"deltaSumIf" +"deltaSumMap" +"deltaSumMerge" +"deltaSumNull" +"deltaSumOrDefault" +"deltaSumOrNull" +"deltaSumResample" +"deltaSumSimpleState" +"deltaSumState" +"deltaSumTimestamp" +"deltaSumTimestampArgMax" +"deltaSumTimestampArgMin" +"deltaSumTimestampArray" +"deltaSumTimestampDistinct" +"deltaSumTimestampForEach" +"deltaSumTimestampIf" +"deltaSumTimestampMap" +"deltaSumTimestampMerge" +"deltaSumTimestampNull" +"deltaSumTimestampOrDefault" +"deltaSumTimestampOrNull" +"deltaSumTimestampResample" +"deltaSumTimestampSimpleState" +"deltaSumTimestampState" +"demangle" +"denseRank" +"denseRankArgMax" +"denseRankArgMin" +"denseRankArray" +"denseRankDistinct" +"denseRankForEach" +"denseRankIf" +"denseRankMap" +"denseRankMerge" +"denseRankNull" +"denseRankOrDefault" +"denseRankOrNull" +"denseRankResample" +"denseRankSimpleState" +"denseRankState" +"dense_rank" +"dense_rankArgMax" +"dense_rankArgMin" +"dense_rankArray" +"dense_rankDistinct" +"dense_rankForEach" +"dense_rankIf" +"dense_rankMap" +"dense_rankMerge" +"dense_rankNull" +"dense_rankOrDefault" +"dense_rankOrNull" +"dense_rankResample" +"dense_rankSimpleState" +"dense_rankState" +"detectCharset" +"detectLanguage" +"detectLanguageMixed" +"detectLanguageUnknown" +"detectProgrammingLanguage" +"detectTonality" +"dictGet" +"dictGetAll" +"dictGetChildren" +"dictGetDate" +"dictGetDateOrDefault" +"dictGetDateTime" +"dictGetDateTimeOrDefault" +"dictGetDescendants" +"dictGetFloat32" +"dictGetFloat32OrDefault" +"dictGetFloat64" +"dictGetFloat64OrDefault" +"dictGetHierarchy" +"dictGetIPv4" +"dictGetIPv4OrDefault" +"dictGetIPv6" +"dictGetIPv6OrDefault" +"dictGetInt16" +"dictGetInt16OrDefault" +"dictGetInt32" +"dictGetInt32OrDefault" +"dictGetInt64" +"dictGetInt64OrDefault" +"dictGetInt8" +"dictGetInt8OrDefault" +"dictGetOrDefault" +"dictGetOrNull" +"dictGetString" +"dictGetStringOrDefault" +"dictGetUInt16" +"dictGetUInt16OrDefault" +"dictGetUInt32" +"dictGetUInt32OrDefault" +"dictGetUInt64" +"dictGetUInt64OrDefault" +"dictGetUInt8" +"dictGetUInt8OrDefault" +"dictGetUUID" +"dictGetUUIDOrDefault" +"dictHas" +"dictIsIn" +"displayName" +"distanceL1" +"distanceL2" +"distanceL2Squared" +"distanceLinf" +"distanceLp" +"divide" +"divideDecimal" +"domain" +"domainRFC" +"domainWithoutWWW" +"domainWithoutWWWRFC" +"dotProduct" +"dumpColumnStructure" +"dynamicElement" +"dynamicType" +"e" +"editDistance" +"editDistanceUTF8" +"empty" +"emptyArrayDate" +"emptyArrayDateTime" +"emptyArrayFloat32" +"emptyArrayFloat64" +"emptyArrayInt16" +"emptyArrayInt32" +"emptyArrayInt64" +"emptyArrayInt8" +"emptyArrayString" +"emptyArrayToSingle" +"emptyArrayUInt16" +"emptyArrayUInt32" +"emptyArrayUInt64" +"emptyArrayUInt8" +"enabledProfiles" +"enabledRoles" +"encodeURLComponent" +"encodeURLFormComponent" +"encodeXMLComponent" +"encrypt" +"endsWith" +"endsWithUTF8" +"entropy" +"entropyArgMax" +"entropyArgMin" +"entropyArray" +"entropyDistinct" +"entropyForEach" +"entropyIf" +"entropyMap" +"entropyMerge" +"entropyNull" +"entropyOrDefault" +"entropyOrNull" +"entropyResample" +"entropySimpleState" +"entropyState" +"equals" +"erf" +"erfc" +"errorCodeToName" +"evalMLMethod" +"exp" +"exp10" +"exp2" +"exponentialMovingAverage" +"exponentialMovingAverageArgMax" +"exponentialMovingAverageArgMin" +"exponentialMovingAverageArray" +"exponentialMovingAverageDistinct" +"exponentialMovingAverageForEach" +"exponentialMovingAverageIf" +"exponentialMovingAverageMap" +"exponentialMovingAverageMerge" +"exponentialMovingAverageNull" +"exponentialMovingAverageOrDefault" +"exponentialMovingAverageOrNull" +"exponentialMovingAverageResample" +"exponentialMovingAverageSimpleState" +"exponentialMovingAverageState" +"exponentialTimeDecayedAvg" +"exponentialTimeDecayedAvgArgMax" +"exponentialTimeDecayedAvgArgMin" +"exponentialTimeDecayedAvgArray" +"exponentialTimeDecayedAvgDistinct" +"exponentialTimeDecayedAvgForEach" +"exponentialTimeDecayedAvgIf" +"exponentialTimeDecayedAvgMap" +"exponentialTimeDecayedAvgMerge" +"exponentialTimeDecayedAvgNull" +"exponentialTimeDecayedAvgOrDefault" +"exponentialTimeDecayedAvgOrNull" +"exponentialTimeDecayedAvgResample" +"exponentialTimeDecayedAvgSimpleState" +"exponentialTimeDecayedAvgState" +"exponentialTimeDecayedCount" +"exponentialTimeDecayedCountArgMax" +"exponentialTimeDecayedCountArgMin" +"exponentialTimeDecayedCountArray" +"exponentialTimeDecayedCountDistinct" +"exponentialTimeDecayedCountForEach" +"exponentialTimeDecayedCountIf" +"exponentialTimeDecayedCountMap" +"exponentialTimeDecayedCountMerge" +"exponentialTimeDecayedCountNull" +"exponentialTimeDecayedCountOrDefault" +"exponentialTimeDecayedCountOrNull" +"exponentialTimeDecayedCountResample" +"exponentialTimeDecayedCountSimpleState" +"exponentialTimeDecayedCountState" +"exponentialTimeDecayedMax" +"exponentialTimeDecayedMaxArgMax" +"exponentialTimeDecayedMaxArgMin" +"exponentialTimeDecayedMaxArray" +"exponentialTimeDecayedMaxDistinct" +"exponentialTimeDecayedMaxForEach" +"exponentialTimeDecayedMaxIf" +"exponentialTimeDecayedMaxMap" +"exponentialTimeDecayedMaxMerge" +"exponentialTimeDecayedMaxNull" +"exponentialTimeDecayedMaxOrDefault" +"exponentialTimeDecayedMaxOrNull" +"exponentialTimeDecayedMaxResample" +"exponentialTimeDecayedMaxSimpleState" +"exponentialTimeDecayedMaxState" +"exponentialTimeDecayedSum" +"exponentialTimeDecayedSumArgMax" +"exponentialTimeDecayedSumArgMin" +"exponentialTimeDecayedSumArray" +"exponentialTimeDecayedSumDistinct" +"exponentialTimeDecayedSumForEach" +"exponentialTimeDecayedSumIf" +"exponentialTimeDecayedSumMap" +"exponentialTimeDecayedSumMerge" +"exponentialTimeDecayedSumNull" +"exponentialTimeDecayedSumOrDefault" +"exponentialTimeDecayedSumOrNull" +"exponentialTimeDecayedSumResample" +"exponentialTimeDecayedSumSimpleState" +"exponentialTimeDecayedSumState" +"extract" +"extractAll" +"extractAllGroups" +"extractAllGroupsHorizontal" +"extractAllGroupsVertical" +"extractGroups" +"extractKeyValuePairs" +"extractKeyValuePairsWithEscaping" +"extractTextFromHTML" +"extractURLParameter" +"extractURLParameterNames" +"extractURLParameters" +"factorial" +"farmFingerprint64" +"farmHash64" +"file" +"filesystemAvailable" +"filesystemCapacity" +"filesystemUnreserved" +"finalizeAggregation" +"firstLine" +"firstSignificantSubdomain" +"firstSignificantSubdomainCustom" +"firstSignificantSubdomainCustomRFC" +"firstSignificantSubdomainRFC" +"first_value" +"first_valueArgMax" +"first_valueArgMin" +"first_valueArray" +"first_valueDistinct" +"first_valueForEach" +"first_valueIf" +"first_valueMap" +"first_valueMerge" +"first_valueNull" +"first_valueOrDefault" +"first_valueOrNull" +"first_valueResample" +"first_valueSimpleState" +"first_valueState" +"first_value_respect_nulls" +"first_value_respect_nullsArgMax" +"first_value_respect_nullsArgMin" +"first_value_respect_nullsArray" +"first_value_respect_nullsDistinct" +"first_value_respect_nullsForEach" +"first_value_respect_nullsIf" +"first_value_respect_nullsMap" +"first_value_respect_nullsMerge" +"first_value_respect_nullsNull" +"first_value_respect_nullsOrDefault" +"first_value_respect_nullsOrNull" +"first_value_respect_nullsResample" +"first_value_respect_nullsSimpleState" +"first_value_respect_nullsState" +"flameGraph" +"flameGraphArgMax" +"flameGraphArgMin" +"flameGraphArray" +"flameGraphDistinct" +"flameGraphForEach" +"flameGraphIf" +"flameGraphMap" +"flameGraphMerge" +"flameGraphNull" +"flameGraphOrDefault" +"flameGraphOrNull" +"flameGraphResample" +"flameGraphSimpleState" +"flameGraphState" +"flatten" +"flattenTuple" +"floor" +"format" +"formatDateTime" +"formatDateTimeInJodaSyntax" +"formatQuery" +"formatQueryOrNull" +"formatQuerySingleLine" +"formatQuerySingleLineOrNull" +"formatReadableDecimalSize" +"formatReadableQuantity" +"formatReadableSize" +"formatReadableTimeDelta" +"formatRow" +"formatRowNoNewline" +"fragment" +"fromDaysSinceYearZero" +"fromDaysSinceYearZero32" +"fromModifiedJulianDay" +"fromModifiedJulianDayOrNull" +"fromUTCTimestamp" +"fromUnixTimestamp" +"fromUnixTimestamp64Micro" +"fromUnixTimestamp64Milli" +"fromUnixTimestamp64Nano" +"fromUnixTimestampInJodaSyntax" +"from_utc_timestamp" +"fullHostName" +"fuzzBits" +"gccMurmurHash" +"gcd" +"generateRandomStructure" +"generateSnowflakeID" +"generateULID" +"generateUUIDv4" +"generateUUIDv7" +"geoDistance" +"geoToH3" +"geoToS2" +"geohashDecode" +"geohashEncode" +"geohashesInBox" +"getClientHTTPHeader" +"getMacro" +"getOSKernelVersion" +"getServerPort" +"getSetting" +"getSizeOfEnumType" +"getSubcolumn" +"getTypeSerializationStreams" +"globalIn" +"globalInIgnoreSet" +"globalNotIn" +"globalNotInIgnoreSet" +"globalNotNullIn" +"globalNotNullInIgnoreSet" +"globalNullIn" +"globalNullInIgnoreSet" +"globalVariable" +"greatCircleAngle" +"greatCircleDistance" +"greater" +"greaterOrEquals" +"greatest" +"groupArray" +"groupArrayArgMax" +"groupArrayArgMin" +"groupArrayArray" +"groupArrayDistinct" +"groupArrayForEach" +"groupArrayIf" +"groupArrayInsertAt" +"groupArrayInsertAtArgMax" +"groupArrayInsertAtArgMin" +"groupArrayInsertAtArray" +"groupArrayInsertAtDistinct" +"groupArrayInsertAtForEach" +"groupArrayInsertAtIf" +"groupArrayInsertAtMap" +"groupArrayInsertAtMerge" +"groupArrayInsertAtNull" +"groupArrayInsertAtOrDefault" +"groupArrayInsertAtOrNull" +"groupArrayInsertAtResample" +"groupArrayInsertAtSimpleState" +"groupArrayInsertAtState" +"groupArrayIntersect" +"groupArrayIntersectArgMax" +"groupArrayIntersectArgMin" +"groupArrayIntersectArray" +"groupArrayIntersectDistinct" +"groupArrayIntersectForEach" +"groupArrayIntersectIf" +"groupArrayIntersectMap" +"groupArrayIntersectMerge" +"groupArrayIntersectNull" +"groupArrayIntersectOrDefault" +"groupArrayIntersectOrNull" +"groupArrayIntersectResample" +"groupArrayIntersectSimpleState" +"groupArrayIntersectState" +"groupArrayLast" +"groupArrayLastArgMax" +"groupArrayLastArgMin" +"groupArrayLastArray" +"groupArrayLastDistinct" +"groupArrayLastForEach" +"groupArrayLastIf" +"groupArrayLastMap" +"groupArrayLastMerge" +"groupArrayLastNull" +"groupArrayLastOrDefault" +"groupArrayLastOrNull" +"groupArrayLastResample" +"groupArrayLastSimpleState" +"groupArrayLastState" +"groupArrayMap" +"groupArrayMerge" +"groupArrayMovingAvg" +"groupArrayMovingAvgArgMax" +"groupArrayMovingAvgArgMin" +"groupArrayMovingAvgArray" +"groupArrayMovingAvgDistinct" +"groupArrayMovingAvgForEach" +"groupArrayMovingAvgIf" +"groupArrayMovingAvgMap" +"groupArrayMovingAvgMerge" +"groupArrayMovingAvgNull" +"groupArrayMovingAvgOrDefault" +"groupArrayMovingAvgOrNull" +"groupArrayMovingAvgResample" +"groupArrayMovingAvgSimpleState" +"groupArrayMovingAvgState" +"groupArrayMovingSum" +"groupArrayMovingSumArgMax" +"groupArrayMovingSumArgMin" +"groupArrayMovingSumArray" +"groupArrayMovingSumDistinct" +"groupArrayMovingSumForEach" +"groupArrayMovingSumIf" +"groupArrayMovingSumMap" +"groupArrayMovingSumMerge" +"groupArrayMovingSumNull" +"groupArrayMovingSumOrDefault" +"groupArrayMovingSumOrNull" +"groupArrayMovingSumResample" +"groupArrayMovingSumSimpleState" +"groupArrayMovingSumState" +"groupArrayNull" +"groupArrayOrDefault" +"groupArrayOrNull" +"groupArrayResample" +"groupArraySample" +"groupArraySampleArgMax" +"groupArraySampleArgMin" +"groupArraySampleArray" +"groupArraySampleDistinct" +"groupArraySampleForEach" +"groupArraySampleIf" +"groupArraySampleMap" +"groupArraySampleMerge" +"groupArraySampleNull" +"groupArraySampleOrDefault" +"groupArraySampleOrNull" +"groupArraySampleResample" +"groupArraySampleSimpleState" +"groupArraySampleState" +"groupArraySimpleState" +"groupArraySorted" +"groupArraySortedArgMax" +"groupArraySortedArgMin" +"groupArraySortedArray" +"groupArraySortedDistinct" +"groupArraySortedForEach" +"groupArraySortedIf" +"groupArraySortedMap" +"groupArraySortedMerge" +"groupArraySortedNull" +"groupArraySortedOrDefault" +"groupArraySortedOrNull" +"groupArraySortedResample" +"groupArraySortedSimpleState" +"groupArraySortedState" +"groupArrayState" +"groupBitAnd" +"groupBitAndArgMax" +"groupBitAndArgMin" +"groupBitAndArray" +"groupBitAndDistinct" +"groupBitAndForEach" +"groupBitAndIf" +"groupBitAndMap" +"groupBitAndMerge" +"groupBitAndNull" +"groupBitAndOrDefault" +"groupBitAndOrNull" +"groupBitAndResample" +"groupBitAndSimpleState" +"groupBitAndState" +"groupBitOr" +"groupBitOrArgMax" +"groupBitOrArgMin" +"groupBitOrArray" +"groupBitOrDistinct" +"groupBitOrForEach" +"groupBitOrIf" +"groupBitOrMap" +"groupBitOrMerge" +"groupBitOrNull" +"groupBitOrOrDefault" +"groupBitOrOrNull" +"groupBitOrResample" +"groupBitOrSimpleState" +"groupBitOrState" +"groupBitXor" +"groupBitXorArgMax" +"groupBitXorArgMin" +"groupBitXorArray" +"groupBitXorDistinct" +"groupBitXorForEach" +"groupBitXorIf" +"groupBitXorMap" +"groupBitXorMerge" +"groupBitXorNull" +"groupBitXorOrDefault" +"groupBitXorOrNull" +"groupBitXorResample" +"groupBitXorSimpleState" +"groupBitXorState" +"groupBitmap" +"groupBitmapAnd" +"groupBitmapAndArgMax" +"groupBitmapAndArgMin" +"groupBitmapAndArray" +"groupBitmapAndDistinct" +"groupBitmapAndForEach" +"groupBitmapAndIf" +"groupBitmapAndMap" +"groupBitmapAndMerge" +"groupBitmapAndNull" +"groupBitmapAndOrDefault" +"groupBitmapAndOrNull" +"groupBitmapAndResample" +"groupBitmapAndSimpleState" +"groupBitmapAndState" +"groupBitmapArgMax" +"groupBitmapArgMin" +"groupBitmapArray" +"groupBitmapDistinct" +"groupBitmapForEach" +"groupBitmapIf" +"groupBitmapMap" +"groupBitmapMerge" +"groupBitmapNull" +"groupBitmapOr" +"groupBitmapOrArgMax" +"groupBitmapOrArgMin" +"groupBitmapOrArray" +"groupBitmapOrDefault" +"groupBitmapOrDistinct" +"groupBitmapOrForEach" +"groupBitmapOrIf" +"groupBitmapOrMap" +"groupBitmapOrMerge" +"groupBitmapOrNull" +"groupBitmapOrNull" +"groupBitmapOrOrDefault" +"groupBitmapOrOrNull" +"groupBitmapOrResample" +"groupBitmapOrSimpleState" +"groupBitmapOrState" +"groupBitmapResample" +"groupBitmapSimpleState" +"groupBitmapState" +"groupBitmapXor" +"groupBitmapXorArgMax" +"groupBitmapXorArgMin" +"groupBitmapXorArray" +"groupBitmapXorDistinct" +"groupBitmapXorForEach" +"groupBitmapXorIf" +"groupBitmapXorMap" +"groupBitmapXorMerge" +"groupBitmapXorNull" +"groupBitmapXorOrDefault" +"groupBitmapXorOrNull" +"groupBitmapXorResample" +"groupBitmapXorSimpleState" +"groupBitmapXorState" +"groupConcat" +"groupConcatArgMax" +"groupConcatArgMin" +"groupConcatArray" +"groupConcatDistinct" +"groupConcatForEach" +"groupConcatIf" +"groupConcatMap" +"groupConcatMerge" +"groupConcatNull" +"groupConcatOrDefault" +"groupConcatOrNull" +"groupConcatResample" +"groupConcatSimpleState" +"groupConcatState" +"groupUniqArray" +"groupUniqArrayArgMax" +"groupUniqArrayArgMin" +"groupUniqArrayArray" +"groupUniqArrayDistinct" +"groupUniqArrayForEach" +"groupUniqArrayIf" +"groupUniqArrayMap" +"groupUniqArrayMerge" +"groupUniqArrayNull" +"groupUniqArrayOrDefault" +"groupUniqArrayOrNull" +"groupUniqArrayResample" +"groupUniqArraySimpleState" +"groupUniqArrayState" +"group_concat" +"group_concatArgMax" +"group_concatArgMin" +"group_concatArray" +"group_concatDistinct" +"group_concatForEach" +"group_concatIf" +"group_concatMap" +"group_concatMerge" +"group_concatNull" +"group_concatOrDefault" +"group_concatOrNull" +"group_concatResample" +"group_concatSimpleState" +"group_concatState" +"h3CellAreaM2" +"h3CellAreaRads2" +"h3Distance" +"h3EdgeAngle" +"h3EdgeLengthKm" +"h3EdgeLengthM" +"h3ExactEdgeLengthKm" +"h3ExactEdgeLengthM" +"h3ExactEdgeLengthRads" +"h3GetBaseCell" +"h3GetDestinationIndexFromUnidirectionalEdge" +"h3GetFaces" +"h3GetIndexesFromUnidirectionalEdge" +"h3GetOriginIndexFromUnidirectionalEdge" +"h3GetPentagonIndexes" +"h3GetRes0Indexes" +"h3GetResolution" +"h3GetUnidirectionalEdge" +"h3GetUnidirectionalEdgeBoundary" +"h3GetUnidirectionalEdgesFromHexagon" +"h3HexAreaKm2" +"h3HexAreaM2" +"h3HexRing" +"h3IndexesAreNeighbors" +"h3IsPentagon" +"h3IsResClassIII" +"h3IsValid" +"h3Line" +"h3NumHexagons" +"h3PointDistKm" +"h3PointDistM" +"h3PointDistRads" +"h3ToCenterChild" +"h3ToChildren" +"h3ToGeo" +"h3ToGeoBoundary" +"h3ToParent" +"h3ToString" +"h3UnidirectionalEdgeIsValid" +"h3kRing" +"halfMD5" +"has" +"hasAll" +"hasAny" +"hasColumnInTable" +"hasSubsequence" +"hasSubsequenceCaseInsensitive" +"hasSubsequenceCaseInsensitiveUTF8" +"hasSubsequenceUTF8" +"hasSubstr" +"hasThreadFuzzer" +"hasToken" +"hasTokenCaseInsensitive" +"hasTokenCaseInsensitiveOrNull" +"hasTokenOrNull" +"hex" +"hilbertDecode" +"hilbertEncode" +"histogram" +"histogramArgMax" +"histogramArgMin" +"histogramArray" +"histogramDistinct" +"histogramForEach" +"histogramIf" +"histogramMap" +"histogramMerge" +"histogramNull" +"histogramOrDefault" +"histogramOrNull" +"histogramResample" +"histogramSimpleState" +"histogramState" +"hiveHash" +"hop" +"hopEnd" +"hopStart" +"hostName" +"hostname" +"hypot" +"identity" +"idnaDecode" +"idnaEncode" +"if" +"ifNotFinite" +"ifNull" +"ignore" +"ilike" +"in" +"inIgnoreSet" +"indexHint" +"indexOf" +"initcap" +"initcapUTF8" +"initialQueryID" +"initial_query_id" +"initializeAggregation" +"instr" +"intDiv" +"intDivOrZero" +"intExp10" +"intExp2" +"intHash32" +"intHash64" +"intervalLengthSum" +"intervalLengthSumArgMax" +"intervalLengthSumArgMin" +"intervalLengthSumArray" +"intervalLengthSumDistinct" +"intervalLengthSumForEach" +"intervalLengthSumIf" +"intervalLengthSumMap" +"intervalLengthSumMerge" +"intervalLengthSumNull" +"intervalLengthSumOrDefault" +"intervalLengthSumOrNull" +"intervalLengthSumResample" +"intervalLengthSumSimpleState" +"intervalLengthSumState" +"isConstant" +"isDecimalOverflow" +"isFinite" +"isIPAddressInRange" +"isIPv4String" +"isIPv6String" +"isInfinite" +"isNaN" +"isNotDistinctFrom" +"isNotNull" +"isNull" +"isNullable" +"isValidJSON" +"isValidUTF8" +"isZeroOrNull" +"jaroSimilarity" +"jaroWinklerSimilarity" +"javaHash" +"javaHashUTF16LE" +"joinGet" +"joinGetOrNull" +"jsonMergePatch" +"jumpConsistentHash" +"kafkaMurmurHash" +"kolmogorovSmirnovTest" +"kolmogorovSmirnovTestArgMax" +"kolmogorovSmirnovTestArgMin" +"kolmogorovSmirnovTestArray" +"kolmogorovSmirnovTestDistinct" +"kolmogorovSmirnovTestForEach" +"kolmogorovSmirnovTestIf" +"kolmogorovSmirnovTestMap" +"kolmogorovSmirnovTestMerge" +"kolmogorovSmirnovTestNull" +"kolmogorovSmirnovTestOrDefault" +"kolmogorovSmirnovTestOrNull" +"kolmogorovSmirnovTestResample" +"kolmogorovSmirnovTestSimpleState" +"kolmogorovSmirnovTestState" +"kostikConsistentHash" +"kql_array_sort_asc" +"kql_array_sort_desc" +"kurtPop" +"kurtPopArgMax" +"kurtPopArgMin" +"kurtPopArray" +"kurtPopDistinct" +"kurtPopForEach" +"kurtPopIf" +"kurtPopMap" +"kurtPopMerge" +"kurtPopNull" +"kurtPopOrDefault" +"kurtPopOrNull" +"kurtPopResample" +"kurtPopSimpleState" +"kurtPopState" +"kurtSamp" +"kurtSampArgMax" +"kurtSampArgMin" +"kurtSampArray" +"kurtSampDistinct" +"kurtSampForEach" +"kurtSampIf" +"kurtSampMap" +"kurtSampMerge" +"kurtSampNull" +"kurtSampOrDefault" +"kurtSampOrNull" +"kurtSampResample" +"kurtSampSimpleState" +"kurtSampState" +"lagInFrame" +"lagInFrameArgMax" +"lagInFrameArgMin" +"lagInFrameArray" +"lagInFrameDistinct" +"lagInFrameForEach" +"lagInFrameIf" +"lagInFrameMap" +"lagInFrameMerge" +"lagInFrameNull" +"lagInFrameOrDefault" +"lagInFrameOrNull" +"lagInFrameResample" +"lagInFrameSimpleState" +"lagInFrameState" +"largestTriangleThreeBuckets" +"largestTriangleThreeBucketsArgMax" +"largestTriangleThreeBucketsArgMin" +"largestTriangleThreeBucketsArray" +"largestTriangleThreeBucketsDistinct" +"largestTriangleThreeBucketsForEach" +"largestTriangleThreeBucketsIf" +"largestTriangleThreeBucketsMap" +"largestTriangleThreeBucketsMerge" +"largestTriangleThreeBucketsNull" +"largestTriangleThreeBucketsOrDefault" +"largestTriangleThreeBucketsOrNull" +"largestTriangleThreeBucketsResample" +"largestTriangleThreeBucketsSimpleState" +"largestTriangleThreeBucketsState" +"last_value" +"last_valueArgMax" +"last_valueArgMin" +"last_valueArray" +"last_valueDistinct" +"last_valueForEach" +"last_valueIf" +"last_valueMap" +"last_valueMerge" +"last_valueNull" +"last_valueOrDefault" +"last_valueOrNull" +"last_valueResample" +"last_valueSimpleState" +"last_valueState" +"last_value_respect_nulls" +"last_value_respect_nullsArgMax" +"last_value_respect_nullsArgMin" +"last_value_respect_nullsArray" +"last_value_respect_nullsDistinct" +"last_value_respect_nullsForEach" +"last_value_respect_nullsIf" +"last_value_respect_nullsMap" +"last_value_respect_nullsMerge" +"last_value_respect_nullsNull" +"last_value_respect_nullsOrDefault" +"last_value_respect_nullsOrNull" +"last_value_respect_nullsResample" +"last_value_respect_nullsSimpleState" +"last_value_respect_nullsState" +"lcase" +"lcm" +"leadInFrame" +"leadInFrameArgMax" +"leadInFrameArgMin" +"leadInFrameArray" +"leadInFrameDistinct" +"leadInFrameForEach" +"leadInFrameIf" +"leadInFrameMap" +"leadInFrameMerge" +"leadInFrameNull" +"leadInFrameOrDefault" +"leadInFrameOrNull" +"leadInFrameResample" +"leadInFrameSimpleState" +"leadInFrameState" +"least" +"left" +"leftPad" +"leftPadUTF8" +"leftUTF8" +"lemmatize" +"length" +"lengthUTF8" +"less" +"lessOrEquals" +"levenshteinDistance" +"levenshteinDistanceUTF8" +"lgamma" +"like" +"ln" +"locate" +"log" +"log10" +"log1p" +"log2" +"logTrace" +"lowCardinalityIndices" +"lowCardinalityKeys" +"lower" +"lowerUTF8" +"lpad" +"ltrim" +"lttb" +"lttbArgMax" +"lttbArgMin" +"lttbArray" +"lttbDistinct" +"lttbForEach" +"lttbIf" +"lttbMap" +"lttbMerge" +"lttbNull" +"lttbOrDefault" +"lttbOrNull" +"lttbResample" +"lttbSimpleState" +"lttbState" +"makeDate" +"makeDate32" +"makeDateTime" +"makeDateTime64" +"mannWhitneyUTest" +"mannWhitneyUTestArgMax" +"mannWhitneyUTestArgMin" +"mannWhitneyUTestArray" +"mannWhitneyUTestDistinct" +"mannWhitneyUTestForEach" +"mannWhitneyUTestIf" +"mannWhitneyUTestMap" +"mannWhitneyUTestMerge" +"mannWhitneyUTestNull" +"mannWhitneyUTestOrDefault" +"mannWhitneyUTestOrNull" +"mannWhitneyUTestResample" +"mannWhitneyUTestSimpleState" +"mannWhitneyUTestState" +"map" +"mapAdd" +"mapAll" +"mapApply" +"mapConcat" +"mapContains" +"mapContainsKeyLike" +"mapExists" +"mapExtractKeyLike" +"mapFilter" +"mapFromArrays" +"mapFromString" +"mapKeys" +"mapPartialReverseSort" +"mapPartialSort" +"mapPopulateSeries" +"mapReverseSort" +"mapSort" +"mapSubtract" +"mapUpdate" +"mapValues" +"match" +"materialize" +"max" +"max2" +"maxArgMax" +"maxArgMin" +"maxArray" +"maxDistinct" +"maxForEach" +"maxIf" +"maxIntersections" +"maxIntersectionsArgMax" +"maxIntersectionsArgMin" +"maxIntersectionsArray" +"maxIntersectionsDistinct" +"maxIntersectionsForEach" +"maxIntersectionsIf" +"maxIntersectionsMap" +"maxIntersectionsMerge" +"maxIntersectionsNull" +"maxIntersectionsOrDefault" +"maxIntersectionsOrNull" +"maxIntersectionsPosition" +"maxIntersectionsPositionArgMax" +"maxIntersectionsPositionArgMin" +"maxIntersectionsPositionArray" +"maxIntersectionsPositionDistinct" +"maxIntersectionsPositionForEach" +"maxIntersectionsPositionIf" +"maxIntersectionsPositionMap" +"maxIntersectionsPositionMerge" +"maxIntersectionsPositionNull" +"maxIntersectionsPositionOrDefault" +"maxIntersectionsPositionOrNull" +"maxIntersectionsPositionResample" +"maxIntersectionsPositionSimpleState" +"maxIntersectionsPositionState" +"maxIntersectionsResample" +"maxIntersectionsSimpleState" +"maxIntersectionsState" +"maxMap" +"maxMappedArrays" +"maxMappedArraysArgMax" +"maxMappedArraysArgMin" +"maxMappedArraysArray" +"maxMappedArraysDistinct" +"maxMappedArraysForEach" +"maxMappedArraysIf" +"maxMappedArraysMap" +"maxMappedArraysMerge" +"maxMappedArraysNull" +"maxMappedArraysOrDefault" +"maxMappedArraysOrNull" +"maxMappedArraysResample" +"maxMappedArraysSimpleState" +"maxMappedArraysState" +"maxMerge" +"maxNull" +"maxOrDefault" +"maxOrNull" +"maxResample" +"maxSimpleState" +"maxState" +"meanZTest" +"meanZTestArgMax" +"meanZTestArgMin" +"meanZTestArray" +"meanZTestDistinct" +"meanZTestForEach" +"meanZTestIf" +"meanZTestMap" +"meanZTestMerge" +"meanZTestNull" +"meanZTestOrDefault" +"meanZTestOrNull" +"meanZTestResample" +"meanZTestSimpleState" +"meanZTestState" +"median" +"medianArgMax" +"medianArgMin" +"medianArray" +"medianBFloat16" +"medianBFloat16ArgMax" +"medianBFloat16ArgMin" +"medianBFloat16Array" +"medianBFloat16Distinct" +"medianBFloat16ForEach" +"medianBFloat16If" +"medianBFloat16Map" +"medianBFloat16Merge" +"medianBFloat16Null" +"medianBFloat16OrDefault" +"medianBFloat16OrNull" +"medianBFloat16Resample" +"medianBFloat16SimpleState" +"medianBFloat16State" +"medianBFloat16Weighted" +"medianBFloat16WeightedArgMax" +"medianBFloat16WeightedArgMin" +"medianBFloat16WeightedArray" +"medianBFloat16WeightedDistinct" +"medianBFloat16WeightedForEach" +"medianBFloat16WeightedIf" +"medianBFloat16WeightedMap" +"medianBFloat16WeightedMerge" +"medianBFloat16WeightedNull" +"medianBFloat16WeightedOrDefault" +"medianBFloat16WeightedOrNull" +"medianBFloat16WeightedResample" +"medianBFloat16WeightedSimpleState" +"medianBFloat16WeightedState" +"medianDD" +"medianDDArgMax" +"medianDDArgMin" +"medianDDArray" +"medianDDDistinct" +"medianDDForEach" +"medianDDIf" +"medianDDMap" +"medianDDMerge" +"medianDDNull" +"medianDDOrDefault" +"medianDDOrNull" +"medianDDResample" +"medianDDSimpleState" +"medianDDState" +"medianDeterministic" +"medianDeterministicArgMax" +"medianDeterministicArgMin" +"medianDeterministicArray" +"medianDeterministicDistinct" +"medianDeterministicForEach" +"medianDeterministicIf" +"medianDeterministicMap" +"medianDeterministicMerge" +"medianDeterministicNull" +"medianDeterministicOrDefault" +"medianDeterministicOrNull" +"medianDeterministicResample" +"medianDeterministicSimpleState" +"medianDeterministicState" +"medianDistinct" +"medianExact" +"medianExactArgMax" +"medianExactArgMin" +"medianExactArray" +"medianExactDistinct" +"medianExactForEach" +"medianExactHigh" +"medianExactHighArgMax" +"medianExactHighArgMin" +"medianExactHighArray" +"medianExactHighDistinct" +"medianExactHighForEach" +"medianExactHighIf" +"medianExactHighMap" +"medianExactHighMerge" +"medianExactHighNull" +"medianExactHighOrDefault" +"medianExactHighOrNull" +"medianExactHighResample" +"medianExactHighSimpleState" +"medianExactHighState" +"medianExactIf" +"medianExactLow" +"medianExactLowArgMax" +"medianExactLowArgMin" +"medianExactLowArray" +"medianExactLowDistinct" +"medianExactLowForEach" +"medianExactLowIf" +"medianExactLowMap" +"medianExactLowMerge" +"medianExactLowNull" +"medianExactLowOrDefault" +"medianExactLowOrNull" +"medianExactLowResample" +"medianExactLowSimpleState" +"medianExactLowState" +"medianExactMap" +"medianExactMerge" +"medianExactNull" +"medianExactOrDefault" +"medianExactOrNull" +"medianExactResample" +"medianExactSimpleState" +"medianExactState" +"medianExactWeighted" +"medianExactWeightedArgMax" +"medianExactWeightedArgMin" +"medianExactWeightedArray" +"medianExactWeightedDistinct" +"medianExactWeightedForEach" +"medianExactWeightedIf" +"medianExactWeightedMap" +"medianExactWeightedMerge" +"medianExactWeightedNull" +"medianExactWeightedOrDefault" +"medianExactWeightedOrNull" +"medianExactWeightedResample" +"medianExactWeightedSimpleState" +"medianExactWeightedState" +"medianForEach" +"medianGK" +"medianGKArgMax" +"medianGKArgMin" +"medianGKArray" +"medianGKDistinct" +"medianGKForEach" +"medianGKIf" +"medianGKMap" +"medianGKMerge" +"medianGKNull" +"medianGKOrDefault" +"medianGKOrNull" +"medianGKResample" +"medianGKSimpleState" +"medianGKState" +"medianIf" +"medianInterpolatedWeighted" +"medianInterpolatedWeightedArgMax" +"medianInterpolatedWeightedArgMin" +"medianInterpolatedWeightedArray" +"medianInterpolatedWeightedDistinct" +"medianInterpolatedWeightedForEach" +"medianInterpolatedWeightedIf" +"medianInterpolatedWeightedMap" +"medianInterpolatedWeightedMerge" +"medianInterpolatedWeightedNull" +"medianInterpolatedWeightedOrDefault" +"medianInterpolatedWeightedOrNull" +"medianInterpolatedWeightedResample" +"medianInterpolatedWeightedSimpleState" +"medianInterpolatedWeightedState" +"medianMap" +"medianMerge" +"medianNull" +"medianOrDefault" +"medianOrNull" +"medianResample" +"medianSimpleState" +"medianState" +"medianTDigest" +"medianTDigestArgMax" +"medianTDigestArgMin" +"medianTDigestArray" +"medianTDigestDistinct" +"medianTDigestForEach" +"medianTDigestIf" +"medianTDigestMap" +"medianTDigestMerge" +"medianTDigestNull" +"medianTDigestOrDefault" +"medianTDigestOrNull" +"medianTDigestResample" +"medianTDigestSimpleState" +"medianTDigestState" +"medianTDigestWeighted" +"medianTDigestWeightedArgMax" +"medianTDigestWeightedArgMin" +"medianTDigestWeightedArray" +"medianTDigestWeightedDistinct" +"medianTDigestWeightedForEach" +"medianTDigestWeightedIf" +"medianTDigestWeightedMap" +"medianTDigestWeightedMerge" +"medianTDigestWeightedNull" +"medianTDigestWeightedOrDefault" +"medianTDigestWeightedOrNull" +"medianTDigestWeightedResample" +"medianTDigestWeightedSimpleState" +"medianTDigestWeightedState" +"medianTiming" +"medianTimingArgMax" +"medianTimingArgMin" +"medianTimingArray" +"medianTimingDistinct" +"medianTimingForEach" +"medianTimingIf" +"medianTimingMap" +"medianTimingMerge" +"medianTimingNull" +"medianTimingOrDefault" +"medianTimingOrNull" +"medianTimingResample" +"medianTimingSimpleState" +"medianTimingState" +"medianTimingWeighted" +"medianTimingWeightedArgMax" +"medianTimingWeightedArgMin" +"medianTimingWeightedArray" +"medianTimingWeightedDistinct" +"medianTimingWeightedForEach" +"medianTimingWeightedIf" +"medianTimingWeightedMap" +"medianTimingWeightedMerge" +"medianTimingWeightedNull" +"medianTimingWeightedOrDefault" +"medianTimingWeightedOrNull" +"medianTimingWeightedResample" +"medianTimingWeightedSimpleState" +"medianTimingWeightedState" +"metroHash64" +"mid" +"min" +"min2" +"minArgMax" +"minArgMin" +"minArray" +"minDistinct" +"minForEach" +"minIf" +"minMap" +"minMappedArrays" +"minMappedArraysArgMax" +"minMappedArraysArgMin" +"minMappedArraysArray" +"minMappedArraysDistinct" +"minMappedArraysForEach" +"minMappedArraysIf" +"minMappedArraysMap" +"minMappedArraysMerge" +"minMappedArraysNull" +"minMappedArraysOrDefault" +"minMappedArraysOrNull" +"minMappedArraysResample" +"minMappedArraysSimpleState" +"minMappedArraysState" +"minMerge" +"minNull" +"minOrDefault" +"minOrNull" +"minResample" +"minSampleSizeContinous" +"minSampleSizeContinuous" +"minSampleSizeConversion" +"minSimpleState" +"minState" +"minus" +"mismatches" +"mod" +"modulo" +"moduloLegacy" +"moduloOrZero" +"monthName" +"mortonDecode" +"mortonEncode" +"multiFuzzyMatchAllIndices" +"multiFuzzyMatchAny" +"multiFuzzyMatchAnyIndex" +"multiIf" +"multiMatchAllIndices" +"multiMatchAny" +"multiMatchAnyIndex" +"multiSearchAllPositions" +"multiSearchAllPositionsCaseInsensitive" +"multiSearchAllPositionsCaseInsensitiveUTF8" +"multiSearchAllPositionsUTF8" +"multiSearchAny" +"multiSearchAnyCaseInsensitive" +"multiSearchAnyCaseInsensitiveUTF8" +"multiSearchAnyUTF8" +"multiSearchFirstIndex" +"multiSearchFirstIndexCaseInsensitive" +"multiSearchFirstIndexCaseInsensitiveUTF8" +"multiSearchFirstIndexUTF8" +"multiSearchFirstPosition" +"multiSearchFirstPositionCaseInsensitive" +"multiSearchFirstPositionCaseInsensitiveUTF8" +"multiSearchFirstPositionUTF8" +"multiply" +"multiplyDecimal" +"murmurHash2_32" +"murmurHash2_64" +"murmurHash3_128" +"murmurHash3_32" +"murmurHash3_64" +"negate" +"neighbor" +"nested" +"netloc" +"ngramDistance" +"ngramDistanceCaseInsensitive" +"ngramDistanceCaseInsensitiveUTF8" +"ngramDistanceUTF8" +"ngramMinHash" +"ngramMinHashArg" +"ngramMinHashArgCaseInsensitive" +"ngramMinHashArgCaseInsensitiveUTF8" +"ngramMinHashArgUTF8" +"ngramMinHashCaseInsensitive" +"ngramMinHashCaseInsensitiveUTF8" +"ngramMinHashUTF8" +"ngramSearch" +"ngramSearchCaseInsensitive" +"ngramSearchCaseInsensitiveUTF8" +"ngramSearchUTF8" +"ngramSimHash" +"ngramSimHashCaseInsensitive" +"ngramSimHashCaseInsensitiveUTF8" +"ngramSimHashUTF8" +"ngrams" +"nonNegativeDerivative" +"nonNegativeDerivativeArgMax" +"nonNegativeDerivativeArgMin" +"nonNegativeDerivativeArray" +"nonNegativeDerivativeDistinct" +"nonNegativeDerivativeForEach" +"nonNegativeDerivativeIf" +"nonNegativeDerivativeMap" +"nonNegativeDerivativeMerge" +"nonNegativeDerivativeNull" +"nonNegativeDerivativeOrDefault" +"nonNegativeDerivativeOrNull" +"nonNegativeDerivativeResample" +"nonNegativeDerivativeSimpleState" +"nonNegativeDerivativeState" +"normL1" +"normL2" +"normL2Squared" +"normLinf" +"normLp" +"normalizeL1" +"normalizeL2" +"normalizeLinf" +"normalizeLp" +"normalizeQuery" +"normalizeQueryKeepNames" +"normalizeUTF8NFC" +"normalizeUTF8NFD" +"normalizeUTF8NFKC" +"normalizeUTF8NFKD" +"normalizedQueryHash" +"normalizedQueryHashKeepNames" +"not" +"notEmpty" +"notEquals" +"notILike" +"notIn" +"notInIgnoreSet" +"notLike" +"notNullIn" +"notNullInIgnoreSet" +"nothing" +"nothingArgMax" +"nothingArgMin" +"nothingArray" +"nothingDistinct" +"nothingForEach" +"nothingIf" +"nothingMap" +"nothingMerge" +"nothingNull" +"nothingNull" +"nothingNullArgMax" +"nothingNullArgMin" +"nothingNullArray" +"nothingNullDistinct" +"nothingNullForEach" +"nothingNullIf" +"nothingNullMap" +"nothingNullMerge" +"nothingNullNull" +"nothingNullOrDefault" +"nothingNullOrNull" +"nothingNullResample" +"nothingNullSimpleState" +"nothingNullState" +"nothingOrDefault" +"nothingOrNull" +"nothingResample" +"nothingSimpleState" +"nothingState" +"nothingUInt64" +"nothingUInt64ArgMax" +"nothingUInt64ArgMin" +"nothingUInt64Array" +"nothingUInt64Distinct" +"nothingUInt64ForEach" +"nothingUInt64If" +"nothingUInt64Map" +"nothingUInt64Merge" +"nothingUInt64Null" +"nothingUInt64OrDefault" +"nothingUInt64OrNull" +"nothingUInt64Resample" +"nothingUInt64SimpleState" +"nothingUInt64State" +"now" +"now64" +"nowInBlock" +"nth_value" +"nth_valueArgMax" +"nth_valueArgMin" +"nth_valueArray" +"nth_valueDistinct" +"nth_valueForEach" +"nth_valueIf" +"nth_valueMap" +"nth_valueMerge" +"nth_valueNull" +"nth_valueOrDefault" +"nth_valueOrNull" +"nth_valueResample" +"nth_valueSimpleState" +"nth_valueState" +"ntile" +"ntileArgMax" +"ntileArgMin" +"ntileArray" +"ntileDistinct" +"ntileForEach" +"ntileIf" +"ntileMap" +"ntileMerge" +"ntileNull" +"ntileOrDefault" +"ntileOrNull" +"ntileResample" +"ntileSimpleState" +"ntileState" +"nullIf" +"nullIn" +"nullInIgnoreSet" +"or" +"parseDateTime" +"parseDateTime32BestEffort" +"parseDateTime32BestEffortOrNull" +"parseDateTime32BestEffortOrZero" +"parseDateTime64BestEffort" +"parseDateTime64BestEffortOrNull" +"parseDateTime64BestEffortOrZero" +"parseDateTime64BestEffortUS" +"parseDateTime64BestEffortUSOrNull" +"parseDateTime64BestEffortUSOrZero" +"parseDateTimeBestEffort" +"parseDateTimeBestEffortOrNull" +"parseDateTimeBestEffortOrZero" +"parseDateTimeBestEffortUS" +"parseDateTimeBestEffortUSOrNull" +"parseDateTimeBestEffortUSOrZero" +"parseDateTimeInJodaSyntax" +"parseDateTimeInJodaSyntaxOrNull" +"parseDateTimeInJodaSyntaxOrZero" +"parseDateTimeOrNull" +"parseDateTimeOrZero" +"parseReadableSize" +"parseReadableSizeOrNull" +"parseReadableSizeOrZero" +"parseTimeDelta" +"partitionID" +"partitionId" +"path" +"pathFull" +"percentRank" +"percentRankArgMax" +"percentRankArgMin" +"percentRankArray" +"percentRankDistinct" +"percentRankForEach" +"percentRankIf" +"percentRankMap" +"percentRankMerge" +"percentRankNull" +"percentRankOrDefault" +"percentRankOrNull" +"percentRankResample" +"percentRankSimpleState" +"percentRankState" +"percent_rank" +"percent_rankArgMax" +"percent_rankArgMin" +"percent_rankArray" +"percent_rankDistinct" +"percent_rankForEach" +"percent_rankIf" +"percent_rankMap" +"percent_rankMerge" +"percent_rankNull" +"percent_rankOrDefault" +"percent_rankOrNull" +"percent_rankResample" +"percent_rankSimpleState" +"percent_rankState" +"pi" +"plus" +"pmod" +"pointInEllipses" +"pointInPolygon" +"polygonAreaCartesian" +"polygonAreaSpherical" +"polygonConvexHullCartesian" +"polygonPerimeterCartesian" +"polygonPerimeterSpherical" +"polygonsDistanceCartesian" +"polygonsDistanceSpherical" +"polygonsEqualsCartesian" +"polygonsIntersectionCartesian" +"polygonsIntersectionSpherical" +"polygonsSymDifferenceCartesian" +"polygonsSymDifferenceSpherical" +"polygonsUnionCartesian" +"polygonsUnionSpherical" +"polygonsWithinCartesian" +"polygonsWithinSpherical" +"port" +"portRFC" +"position" +"positionCaseInsensitive" +"positionCaseInsensitiveUTF8" +"positionUTF8" +"positiveModulo" +"positive_modulo" +"pow" +"power" +"printf" +"proportionsZTest" +"protocol" +"punycodeDecode" +"punycodeEncode" +"quantile" +"quantileArgMax" +"quantileArgMin" +"quantileArray" +"quantileBFloat16" +"quantileBFloat16ArgMax" +"quantileBFloat16ArgMin" +"quantileBFloat16Array" +"quantileBFloat16Distinct" +"quantileBFloat16ForEach" +"quantileBFloat16If" +"quantileBFloat16Map" +"quantileBFloat16Merge" +"quantileBFloat16Null" +"quantileBFloat16OrDefault" +"quantileBFloat16OrNull" +"quantileBFloat16Resample" +"quantileBFloat16SimpleState" +"quantileBFloat16State" +"quantileBFloat16Weighted" +"quantileBFloat16WeightedArgMax" +"quantileBFloat16WeightedArgMin" +"quantileBFloat16WeightedArray" +"quantileBFloat16WeightedDistinct" +"quantileBFloat16WeightedForEach" +"quantileBFloat16WeightedIf" +"quantileBFloat16WeightedMap" +"quantileBFloat16WeightedMerge" +"quantileBFloat16WeightedNull" +"quantileBFloat16WeightedOrDefault" +"quantileBFloat16WeightedOrNull" +"quantileBFloat16WeightedResample" +"quantileBFloat16WeightedSimpleState" +"quantileBFloat16WeightedState" +"quantileDD" +"quantileDDArgMax" +"quantileDDArgMin" +"quantileDDArray" +"quantileDDDistinct" +"quantileDDForEach" +"quantileDDIf" +"quantileDDMap" +"quantileDDMerge" +"quantileDDNull" +"quantileDDOrDefault" +"quantileDDOrNull" +"quantileDDResample" +"quantileDDSimpleState" +"quantileDDState" +"quantileDeterministic" +"quantileDeterministicArgMax" +"quantileDeterministicArgMin" +"quantileDeterministicArray" +"quantileDeterministicDistinct" +"quantileDeterministicForEach" +"quantileDeterministicIf" +"quantileDeterministicMap" +"quantileDeterministicMerge" +"quantileDeterministicNull" +"quantileDeterministicOrDefault" +"quantileDeterministicOrNull" +"quantileDeterministicResample" +"quantileDeterministicSimpleState" +"quantileDeterministicState" +"quantileDistinct" +"quantileExact" +"quantileExactArgMax" +"quantileExactArgMin" +"quantileExactArray" +"quantileExactDistinct" +"quantileExactExclusive" +"quantileExactExclusiveArgMax" +"quantileExactExclusiveArgMin" +"quantileExactExclusiveArray" +"quantileExactExclusiveDistinct" +"quantileExactExclusiveForEach" +"quantileExactExclusiveIf" +"quantileExactExclusiveMap" +"quantileExactExclusiveMerge" +"quantileExactExclusiveNull" +"quantileExactExclusiveOrDefault" +"quantileExactExclusiveOrNull" +"quantileExactExclusiveResample" +"quantileExactExclusiveSimpleState" +"quantileExactExclusiveState" +"quantileExactForEach" +"quantileExactHigh" +"quantileExactHighArgMax" +"quantileExactHighArgMin" +"quantileExactHighArray" +"quantileExactHighDistinct" +"quantileExactHighForEach" +"quantileExactHighIf" +"quantileExactHighMap" +"quantileExactHighMerge" +"quantileExactHighNull" +"quantileExactHighOrDefault" +"quantileExactHighOrNull" +"quantileExactHighResample" +"quantileExactHighSimpleState" +"quantileExactHighState" +"quantileExactIf" +"quantileExactInclusive" +"quantileExactInclusiveArgMax" +"quantileExactInclusiveArgMin" +"quantileExactInclusiveArray" +"quantileExactInclusiveDistinct" +"quantileExactInclusiveForEach" +"quantileExactInclusiveIf" +"quantileExactInclusiveMap" +"quantileExactInclusiveMerge" +"quantileExactInclusiveNull" +"quantileExactInclusiveOrDefault" +"quantileExactInclusiveOrNull" +"quantileExactInclusiveResample" +"quantileExactInclusiveSimpleState" +"quantileExactInclusiveState" +"quantileExactLow" +"quantileExactLowArgMax" +"quantileExactLowArgMin" +"quantileExactLowArray" +"quantileExactLowDistinct" +"quantileExactLowForEach" +"quantileExactLowIf" +"quantileExactLowMap" +"quantileExactLowMerge" +"quantileExactLowNull" +"quantileExactLowOrDefault" +"quantileExactLowOrNull" +"quantileExactLowResample" +"quantileExactLowSimpleState" +"quantileExactLowState" +"quantileExactMap" +"quantileExactMerge" +"quantileExactNull" +"quantileExactOrDefault" +"quantileExactOrNull" +"quantileExactResample" +"quantileExactSimpleState" +"quantileExactState" +"quantileExactWeighted" +"quantileExactWeightedArgMax" +"quantileExactWeightedArgMin" +"quantileExactWeightedArray" +"quantileExactWeightedDistinct" +"quantileExactWeightedForEach" +"quantileExactWeightedIf" +"quantileExactWeightedMap" +"quantileExactWeightedMerge" +"quantileExactWeightedNull" +"quantileExactWeightedOrDefault" +"quantileExactWeightedOrNull" +"quantileExactWeightedResample" +"quantileExactWeightedSimpleState" +"quantileExactWeightedState" +"quantileForEach" +"quantileGK" +"quantileGKArgMax" +"quantileGKArgMin" +"quantileGKArray" +"quantileGKDistinct" +"quantileGKForEach" +"quantileGKIf" +"quantileGKMap" +"quantileGKMerge" +"quantileGKNull" +"quantileGKOrDefault" +"quantileGKOrNull" +"quantileGKResample" +"quantileGKSimpleState" +"quantileGKState" +"quantileIf" +"quantileInterpolatedWeighted" +"quantileInterpolatedWeightedArgMax" +"quantileInterpolatedWeightedArgMin" +"quantileInterpolatedWeightedArray" +"quantileInterpolatedWeightedDistinct" +"quantileInterpolatedWeightedForEach" +"quantileInterpolatedWeightedIf" +"quantileInterpolatedWeightedMap" +"quantileInterpolatedWeightedMerge" +"quantileInterpolatedWeightedNull" +"quantileInterpolatedWeightedOrDefault" +"quantileInterpolatedWeightedOrNull" +"quantileInterpolatedWeightedResample" +"quantileInterpolatedWeightedSimpleState" +"quantileInterpolatedWeightedState" +"quantileMap" +"quantileMerge" +"quantileNull" +"quantileOrDefault" +"quantileOrNull" +"quantileResample" +"quantileSimpleState" +"quantileState" +"quantileTDigest" +"quantileTDigestArgMax" +"quantileTDigestArgMin" +"quantileTDigestArray" +"quantileTDigestDistinct" +"quantileTDigestForEach" +"quantileTDigestIf" +"quantileTDigestMap" +"quantileTDigestMerge" +"quantileTDigestNull" +"quantileTDigestOrDefault" +"quantileTDigestOrNull" +"quantileTDigestResample" +"quantileTDigestSimpleState" +"quantileTDigestState" +"quantileTDigestWeighted" +"quantileTDigestWeightedArgMax" +"quantileTDigestWeightedArgMin" +"quantileTDigestWeightedArray" +"quantileTDigestWeightedDistinct" +"quantileTDigestWeightedForEach" +"quantileTDigestWeightedIf" +"quantileTDigestWeightedMap" +"quantileTDigestWeightedMerge" +"quantileTDigestWeightedNull" +"quantileTDigestWeightedOrDefault" +"quantileTDigestWeightedOrNull" +"quantileTDigestWeightedResample" +"quantileTDigestWeightedSimpleState" +"quantileTDigestWeightedState" +"quantileTiming" +"quantileTimingArgMax" +"quantileTimingArgMin" +"quantileTimingArray" +"quantileTimingDistinct" +"quantileTimingForEach" +"quantileTimingIf" +"quantileTimingMap" +"quantileTimingMerge" +"quantileTimingNull" +"quantileTimingOrDefault" +"quantileTimingOrNull" +"quantileTimingResample" +"quantileTimingSimpleState" +"quantileTimingState" +"quantileTimingWeighted" +"quantileTimingWeightedArgMax" +"quantileTimingWeightedArgMin" +"quantileTimingWeightedArray" +"quantileTimingWeightedDistinct" +"quantileTimingWeightedForEach" +"quantileTimingWeightedIf" +"quantileTimingWeightedMap" +"quantileTimingWeightedMerge" +"quantileTimingWeightedNull" +"quantileTimingWeightedOrDefault" +"quantileTimingWeightedOrNull" +"quantileTimingWeightedResample" +"quantileTimingWeightedSimpleState" +"quantileTimingWeightedState" +"quantiles" +"quantilesArgMax" +"quantilesArgMin" +"quantilesArray" +"quantilesBFloat16" +"quantilesBFloat16ArgMax" +"quantilesBFloat16ArgMin" +"quantilesBFloat16Array" +"quantilesBFloat16Distinct" +"quantilesBFloat16ForEach" +"quantilesBFloat16If" +"quantilesBFloat16Map" +"quantilesBFloat16Merge" +"quantilesBFloat16Null" +"quantilesBFloat16OrDefault" +"quantilesBFloat16OrNull" +"quantilesBFloat16Resample" +"quantilesBFloat16SimpleState" +"quantilesBFloat16State" +"quantilesBFloat16Weighted" +"quantilesBFloat16WeightedArgMax" +"quantilesBFloat16WeightedArgMin" +"quantilesBFloat16WeightedArray" +"quantilesBFloat16WeightedDistinct" +"quantilesBFloat16WeightedForEach" +"quantilesBFloat16WeightedIf" +"quantilesBFloat16WeightedMap" +"quantilesBFloat16WeightedMerge" +"quantilesBFloat16WeightedNull" +"quantilesBFloat16WeightedOrDefault" +"quantilesBFloat16WeightedOrNull" +"quantilesBFloat16WeightedResample" +"quantilesBFloat16WeightedSimpleState" +"quantilesBFloat16WeightedState" +"quantilesDD" +"quantilesDDArgMax" +"quantilesDDArgMin" +"quantilesDDArray" +"quantilesDDDistinct" +"quantilesDDForEach" +"quantilesDDIf" +"quantilesDDMap" +"quantilesDDMerge" +"quantilesDDNull" +"quantilesDDOrDefault" +"quantilesDDOrNull" +"quantilesDDResample" +"quantilesDDSimpleState" +"quantilesDDState" +"quantilesDeterministic" +"quantilesDeterministicArgMax" +"quantilesDeterministicArgMin" +"quantilesDeterministicArray" +"quantilesDeterministicDistinct" +"quantilesDeterministicForEach" +"quantilesDeterministicIf" +"quantilesDeterministicMap" +"quantilesDeterministicMerge" +"quantilesDeterministicNull" +"quantilesDeterministicOrDefault" +"quantilesDeterministicOrNull" +"quantilesDeterministicResample" +"quantilesDeterministicSimpleState" +"quantilesDeterministicState" +"quantilesDistinct" +"quantilesExact" +"quantilesExactArgMax" +"quantilesExactArgMin" +"quantilesExactArray" +"quantilesExactDistinct" +"quantilesExactExclusive" +"quantilesExactExclusiveArgMax" +"quantilesExactExclusiveArgMin" +"quantilesExactExclusiveArray" +"quantilesExactExclusiveDistinct" +"quantilesExactExclusiveForEach" +"quantilesExactExclusiveIf" +"quantilesExactExclusiveMap" +"quantilesExactExclusiveMerge" +"quantilesExactExclusiveNull" +"quantilesExactExclusiveOrDefault" +"quantilesExactExclusiveOrNull" +"quantilesExactExclusiveResample" +"quantilesExactExclusiveSimpleState" +"quantilesExactExclusiveState" +"quantilesExactForEach" +"quantilesExactHigh" +"quantilesExactHighArgMax" +"quantilesExactHighArgMin" +"quantilesExactHighArray" +"quantilesExactHighDistinct" +"quantilesExactHighForEach" +"quantilesExactHighIf" +"quantilesExactHighMap" +"quantilesExactHighMerge" +"quantilesExactHighNull" +"quantilesExactHighOrDefault" +"quantilesExactHighOrNull" +"quantilesExactHighResample" +"quantilesExactHighSimpleState" +"quantilesExactHighState" +"quantilesExactIf" +"quantilesExactInclusive" +"quantilesExactInclusiveArgMax" +"quantilesExactInclusiveArgMin" +"quantilesExactInclusiveArray" +"quantilesExactInclusiveDistinct" +"quantilesExactInclusiveForEach" +"quantilesExactInclusiveIf" +"quantilesExactInclusiveMap" +"quantilesExactInclusiveMerge" +"quantilesExactInclusiveNull" +"quantilesExactInclusiveOrDefault" +"quantilesExactInclusiveOrNull" +"quantilesExactInclusiveResample" +"quantilesExactInclusiveSimpleState" +"quantilesExactInclusiveState" +"quantilesExactLow" +"quantilesExactLowArgMax" +"quantilesExactLowArgMin" +"quantilesExactLowArray" +"quantilesExactLowDistinct" +"quantilesExactLowForEach" +"quantilesExactLowIf" +"quantilesExactLowMap" +"quantilesExactLowMerge" +"quantilesExactLowNull" +"quantilesExactLowOrDefault" +"quantilesExactLowOrNull" +"quantilesExactLowResample" +"quantilesExactLowSimpleState" +"quantilesExactLowState" +"quantilesExactMap" +"quantilesExactMerge" +"quantilesExactNull" +"quantilesExactOrDefault" +"quantilesExactOrNull" +"quantilesExactResample" +"quantilesExactSimpleState" +"quantilesExactState" +"quantilesExactWeighted" +"quantilesExactWeightedArgMax" +"quantilesExactWeightedArgMin" +"quantilesExactWeightedArray" +"quantilesExactWeightedDistinct" +"quantilesExactWeightedForEach" +"quantilesExactWeightedIf" +"quantilesExactWeightedMap" +"quantilesExactWeightedMerge" +"quantilesExactWeightedNull" +"quantilesExactWeightedOrDefault" +"quantilesExactWeightedOrNull" +"quantilesExactWeightedResample" +"quantilesExactWeightedSimpleState" +"quantilesExactWeightedState" +"quantilesForEach" +"quantilesGK" +"quantilesGKArgMax" +"quantilesGKArgMin" +"quantilesGKArray" +"quantilesGKDistinct" +"quantilesGKForEach" +"quantilesGKIf" +"quantilesGKMap" +"quantilesGKMerge" +"quantilesGKNull" +"quantilesGKOrDefault" +"quantilesGKOrNull" +"quantilesGKResample" +"quantilesGKSimpleState" +"quantilesGKState" +"quantilesIf" +"quantilesInterpolatedWeighted" +"quantilesInterpolatedWeightedArgMax" +"quantilesInterpolatedWeightedArgMin" +"quantilesInterpolatedWeightedArray" +"quantilesInterpolatedWeightedDistinct" +"quantilesInterpolatedWeightedForEach" +"quantilesInterpolatedWeightedIf" +"quantilesInterpolatedWeightedMap" +"quantilesInterpolatedWeightedMerge" +"quantilesInterpolatedWeightedNull" +"quantilesInterpolatedWeightedOrDefault" +"quantilesInterpolatedWeightedOrNull" +"quantilesInterpolatedWeightedResample" +"quantilesInterpolatedWeightedSimpleState" +"quantilesInterpolatedWeightedState" +"quantilesMap" +"quantilesMerge" +"quantilesNull" +"quantilesOrDefault" +"quantilesOrNull" +"quantilesResample" +"quantilesSimpleState" +"quantilesState" +"quantilesTDigest" +"quantilesTDigestArgMax" +"quantilesTDigestArgMin" +"quantilesTDigestArray" +"quantilesTDigestDistinct" +"quantilesTDigestForEach" +"quantilesTDigestIf" +"quantilesTDigestMap" +"quantilesTDigestMerge" +"quantilesTDigestNull" +"quantilesTDigestOrDefault" +"quantilesTDigestOrNull" +"quantilesTDigestResample" +"quantilesTDigestSimpleState" +"quantilesTDigestState" +"quantilesTDigestWeighted" +"quantilesTDigestWeightedArgMax" +"quantilesTDigestWeightedArgMin" +"quantilesTDigestWeightedArray" +"quantilesTDigestWeightedDistinct" +"quantilesTDigestWeightedForEach" +"quantilesTDigestWeightedIf" +"quantilesTDigestWeightedMap" +"quantilesTDigestWeightedMerge" +"quantilesTDigestWeightedNull" +"quantilesTDigestWeightedOrDefault" +"quantilesTDigestWeightedOrNull" +"quantilesTDigestWeightedResample" +"quantilesTDigestWeightedSimpleState" +"quantilesTDigestWeightedState" +"quantilesTiming" +"quantilesTimingArgMax" +"quantilesTimingArgMin" +"quantilesTimingArray" +"quantilesTimingDistinct" +"quantilesTimingForEach" +"quantilesTimingIf" +"quantilesTimingMap" +"quantilesTimingMerge" +"quantilesTimingNull" +"quantilesTimingOrDefault" +"quantilesTimingOrNull" +"quantilesTimingResample" +"quantilesTimingSimpleState" +"quantilesTimingState" +"quantilesTimingWeighted" +"quantilesTimingWeightedArgMax" +"quantilesTimingWeightedArgMin" +"quantilesTimingWeightedArray" +"quantilesTimingWeightedDistinct" +"quantilesTimingWeightedForEach" +"quantilesTimingWeightedIf" +"quantilesTimingWeightedMap" +"quantilesTimingWeightedMerge" +"quantilesTimingWeightedNull" +"quantilesTimingWeightedOrDefault" +"quantilesTimingWeightedOrNull" +"quantilesTimingWeightedResample" +"quantilesTimingWeightedSimpleState" +"quantilesTimingWeightedState" +"queryID" +"queryString" +"queryStringAndFragment" +"query_id" +"radians" +"rand" +"rand32" +"rand64" +"randBernoulli" +"randBinomial" +"randCanonical" +"randChiSquared" +"randConstant" +"randExponential" +"randFisherF" +"randLogNormal" +"randNegativeBinomial" +"randNormal" +"randPoisson" +"randStudentT" +"randUniform" +"randomFixedString" +"randomPrintableASCII" +"randomString" +"randomStringUTF8" +"range" +"rank" +"rankArgMax" +"rankArgMin" +"rankArray" +"rankCorr" +"rankCorrArgMax" +"rankCorrArgMin" +"rankCorrArray" +"rankCorrDistinct" +"rankCorrForEach" +"rankCorrIf" +"rankCorrMap" +"rankCorrMerge" +"rankCorrNull" +"rankCorrOrDefault" +"rankCorrOrNull" +"rankCorrResample" +"rankCorrSimpleState" +"rankCorrState" +"rankDistinct" +"rankForEach" +"rankIf" +"rankMap" +"rankMerge" +"rankNull" +"rankOrDefault" +"rankOrNull" +"rankResample" +"rankSimpleState" +"rankState" +"readWKTLineString" +"readWKTMultiLineString" +"readWKTMultiPolygon" +"readWKTPoint" +"readWKTPolygon" +"readWKTRing" +"regexpExtract" +"regexpQuoteMeta" +"regionHierarchy" +"regionIn" +"regionToArea" +"regionToCity" +"regionToContinent" +"regionToCountry" +"regionToDistrict" +"regionToName" +"regionToPopulation" +"regionToTopContinent" +"reinterpret" +"reinterpretAsDate" +"reinterpretAsDateTime" +"reinterpretAsFixedString" +"reinterpretAsFloat32" +"reinterpretAsFloat64" +"reinterpretAsInt128" +"reinterpretAsInt16" +"reinterpretAsInt256" +"reinterpretAsInt32" +"reinterpretAsInt64" +"reinterpretAsInt8" +"reinterpretAsString" +"reinterpretAsUInt128" +"reinterpretAsUInt16" +"reinterpretAsUInt256" +"reinterpretAsUInt32" +"reinterpretAsUInt64" +"reinterpretAsUInt8" +"reinterpretAsUUID" +"repeat" +"replace" +"replaceAll" +"replaceOne" +"replaceRegexpAll" +"replaceRegexpOne" +"replicate" +"retention" +"retentionArgMax" +"retentionArgMin" +"retentionArray" +"retentionDistinct" +"retentionForEach" +"retentionIf" +"retentionMap" +"retentionMerge" +"retentionNull" +"retentionOrDefault" +"retentionOrNull" +"retentionResample" +"retentionSimpleState" +"retentionState" +"reverse" +"reverseUTF8" +"revision" +"right" +"rightPad" +"rightPadUTF8" +"rightUTF8" +"round" +"roundAge" +"roundBankers" +"roundDown" +"roundDuration" +"roundToExp2" +"rowNumberInAllBlocks" +"rowNumberInBlock" +"row_number" +"row_numberArgMax" +"row_numberArgMin" +"row_numberArray" +"row_numberDistinct" +"row_numberForEach" +"row_numberIf" +"row_numberMap" +"row_numberMerge" +"row_numberNull" +"row_numberOrDefault" +"row_numberOrNull" +"row_numberResample" +"row_numberSimpleState" +"row_numberState" +"rpad" +"rtrim" +"runningAccumulate" +"runningConcurrency" +"runningDifference" +"runningDifferenceStartingWithFirstValue" +"s2CapContains" +"s2CapUnion" +"s2CellsIntersect" +"s2GetNeighbors" +"s2RectAdd" +"s2RectContains" +"s2RectIntersection" +"s2RectUnion" +"s2ToGeo" +"scalarProduct" +"sequenceCount" +"sequenceCountArgMax" +"sequenceCountArgMin" +"sequenceCountArray" +"sequenceCountDistinct" +"sequenceCountForEach" +"sequenceCountIf" +"sequenceCountMap" +"sequenceCountMerge" +"sequenceCountNull" +"sequenceCountOrDefault" +"sequenceCountOrNull" +"sequenceCountResample" +"sequenceCountSimpleState" +"sequenceCountState" +"sequenceMatch" +"sequenceMatchArgMax" +"sequenceMatchArgMin" +"sequenceMatchArray" +"sequenceMatchDistinct" +"sequenceMatchForEach" +"sequenceMatchIf" +"sequenceMatchMap" +"sequenceMatchMerge" +"sequenceMatchNull" +"sequenceMatchOrDefault" +"sequenceMatchOrNull" +"sequenceMatchResample" +"sequenceMatchSimpleState" +"sequenceMatchState" +"sequenceNextNode" +"sequenceNextNodeArgMax" +"sequenceNextNodeArgMin" +"sequenceNextNodeArray" +"sequenceNextNodeDistinct" +"sequenceNextNodeForEach" +"sequenceNextNodeIf" +"sequenceNextNodeMap" +"sequenceNextNodeMerge" +"sequenceNextNodeNull" +"sequenceNextNodeOrDefault" +"sequenceNextNodeOrNull" +"sequenceNextNodeResample" +"sequenceNextNodeSimpleState" +"sequenceNextNodeState" +"seriesDecomposeSTL" +"seriesOutliersDetectTukey" +"seriesPeriodDetectFFT" +"serverTimeZone" +"serverTimezone" +"serverUUID" +"shardCount" +"shardNum" +"showCertificate" +"sigmoid" +"sign" +"simpleJSONExtractBool" +"simpleJSONExtractFloat" +"simpleJSONExtractInt" +"simpleJSONExtractRaw" +"simpleJSONExtractString" +"simpleJSONExtractUInt" +"simpleJSONHas" +"simpleLinearRegression" +"simpleLinearRegressionArgMax" +"simpleLinearRegressionArgMin" +"simpleLinearRegressionArray" +"simpleLinearRegressionDistinct" +"simpleLinearRegressionForEach" +"simpleLinearRegressionIf" +"simpleLinearRegressionMap" +"simpleLinearRegressionMerge" +"simpleLinearRegressionNull" +"simpleLinearRegressionOrDefault" +"simpleLinearRegressionOrNull" +"simpleLinearRegressionResample" +"simpleLinearRegressionSimpleState" +"simpleLinearRegressionState" +"sin" +"singleValueOrNull" +"singleValueOrNullArgMax" +"singleValueOrNullArgMin" +"singleValueOrNullArray" +"singleValueOrNullDistinct" +"singleValueOrNullForEach" +"singleValueOrNullIf" +"singleValueOrNullMap" +"singleValueOrNullMerge" +"singleValueOrNullNull" +"singleValueOrNullOrDefault" +"singleValueOrNullOrNull" +"singleValueOrNullResample" +"singleValueOrNullSimpleState" +"singleValueOrNullState" +"sinh" +"sipHash128" +"sipHash128Keyed" +"sipHash128Reference" +"sipHash128ReferenceKeyed" +"sipHash64" +"sipHash64Keyed" +"skewPop" +"skewPopArgMax" +"skewPopArgMin" +"skewPopArray" +"skewPopDistinct" +"skewPopForEach" +"skewPopIf" +"skewPopMap" +"skewPopMerge" +"skewPopNull" +"skewPopOrDefault" +"skewPopOrNull" +"skewPopResample" +"skewPopSimpleState" +"skewPopState" +"skewSamp" +"skewSampArgMax" +"skewSampArgMin" +"skewSampArray" +"skewSampDistinct" +"skewSampForEach" +"skewSampIf" +"skewSampMap" +"skewSampMerge" +"skewSampNull" +"skewSampOrDefault" +"skewSampOrNull" +"skewSampResample" +"skewSampSimpleState" +"skewSampState" +"sleep" +"sleepEachRow" +"snowflakeIDToDateTime" +"snowflakeIDToDateTime64" +"snowflakeToDateTime" +"snowflakeToDateTime64" +"soundex" +"space" +"sparkBar" +"sparkBarArgMax" +"sparkBarArgMin" +"sparkBarArray" +"sparkBarDistinct" +"sparkBarForEach" +"sparkBarIf" +"sparkBarMap" +"sparkBarMerge" +"sparkBarNull" +"sparkBarOrDefault" +"sparkBarOrNull" +"sparkBarResample" +"sparkBarSimpleState" +"sparkBarState" +"sparkbar" +"sparkbarArgMax" +"sparkbarArgMin" +"sparkbarArray" +"sparkbarDistinct" +"sparkbarForEach" +"sparkbarIf" +"sparkbarMap" +"sparkbarMerge" +"sparkbarNull" +"sparkbarOrDefault" +"sparkbarOrNull" +"sparkbarResample" +"sparkbarSimpleState" +"sparkbarState" +"splitByAlpha" +"splitByChar" +"splitByNonAlpha" +"splitByRegexp" +"splitByString" +"splitByWhitespace" +"sqid" +"sqidDecode" +"sqidEncode" +"sqrt" +"startsWith" +"startsWithUTF8" +"stddevPop" +"stddevPopArgMax" +"stddevPopArgMin" +"stddevPopArray" +"stddevPopDistinct" +"stddevPopForEach" +"stddevPopIf" +"stddevPopMap" +"stddevPopMerge" +"stddevPopNull" +"stddevPopOrDefault" +"stddevPopOrNull" +"stddevPopResample" +"stddevPopSimpleState" +"stddevPopStable" +"stddevPopStableArgMax" +"stddevPopStableArgMin" +"stddevPopStableArray" +"stddevPopStableDistinct" +"stddevPopStableForEach" +"stddevPopStableIf" +"stddevPopStableMap" +"stddevPopStableMerge" +"stddevPopStableNull" +"stddevPopStableOrDefault" +"stddevPopStableOrNull" +"stddevPopStableResample" +"stddevPopStableSimpleState" +"stddevPopStableState" +"stddevPopState" +"stddevSamp" +"stddevSampArgMax" +"stddevSampArgMin" +"stddevSampArray" +"stddevSampDistinct" +"stddevSampForEach" +"stddevSampIf" +"stddevSampMap" +"stddevSampMerge" +"stddevSampNull" +"stddevSampOrDefault" +"stddevSampOrNull" +"stddevSampResample" +"stddevSampSimpleState" +"stddevSampStable" +"stddevSampStableArgMax" +"stddevSampStableArgMin" +"stddevSampStableArray" +"stddevSampStableDistinct" +"stddevSampStableForEach" +"stddevSampStableIf" +"stddevSampStableMap" +"stddevSampStableMerge" +"stddevSampStableNull" +"stddevSampStableOrDefault" +"stddevSampStableOrNull" +"stddevSampStableResample" +"stddevSampStableSimpleState" +"stddevSampStableState" +"stddevSampState" +"stem" +"stochasticLinearRegression" +"stochasticLinearRegressionArgMax" +"stochasticLinearRegressionArgMin" +"stochasticLinearRegressionArray" +"stochasticLinearRegressionDistinct" +"stochasticLinearRegressionForEach" +"stochasticLinearRegressionIf" +"stochasticLinearRegressionMap" +"stochasticLinearRegressionMerge" +"stochasticLinearRegressionNull" +"stochasticLinearRegressionOrDefault" +"stochasticLinearRegressionOrNull" +"stochasticLinearRegressionResample" +"stochasticLinearRegressionSimpleState" +"stochasticLinearRegressionState" +"stochasticLogisticRegression" +"stochasticLogisticRegressionArgMax" +"stochasticLogisticRegressionArgMin" +"stochasticLogisticRegressionArray" +"stochasticLogisticRegressionDistinct" +"stochasticLogisticRegressionForEach" +"stochasticLogisticRegressionIf" +"stochasticLogisticRegressionMap" +"stochasticLogisticRegressionMerge" +"stochasticLogisticRegressionNull" +"stochasticLogisticRegressionOrDefault" +"stochasticLogisticRegressionOrNull" +"stochasticLogisticRegressionResample" +"stochasticLogisticRegressionSimpleState" +"stochasticLogisticRegressionState" +"str_to_date" +"str_to_map" +"stringJaccardIndex" +"stringJaccardIndexUTF8" +"stringToH3" +"structureToCapnProtoSchema" +"structureToProtobufSchema" +"studentTTest" +"studentTTestArgMax" +"studentTTestArgMin" +"studentTTestArray" +"studentTTestDistinct" +"studentTTestForEach" +"studentTTestIf" +"studentTTestMap" +"studentTTestMerge" +"studentTTestNull" +"studentTTestOrDefault" +"studentTTestOrNull" +"studentTTestResample" +"studentTTestSimpleState" +"studentTTestState" +"subBitmap" +"subDate" +"substr" +"substring" +"substringIndex" +"substringIndexUTF8" +"substringUTF8" +"subtractDays" +"subtractHours" +"subtractInterval" +"subtractMicroseconds" +"subtractMilliseconds" +"subtractMinutes" +"subtractMonths" +"subtractNanoseconds" +"subtractQuarters" +"subtractSeconds" +"subtractTupleOfIntervals" +"subtractWeeks" +"subtractYears" +"sum" +"sumArgMax" +"sumArgMin" +"sumArray" +"sumCount" +"sumCountArgMax" +"sumCountArgMin" +"sumCountArray" +"sumCountDistinct" +"sumCountForEach" +"sumCountIf" +"sumCountMap" +"sumCountMerge" +"sumCountNull" +"sumCountOrDefault" +"sumCountOrNull" +"sumCountResample" +"sumCountSimpleState" +"sumCountState" +"sumDistinct" +"sumForEach" +"sumIf" +"sumKahan" +"sumKahanArgMax" +"sumKahanArgMin" +"sumKahanArray" +"sumKahanDistinct" +"sumKahanForEach" +"sumKahanIf" +"sumKahanMap" +"sumKahanMerge" +"sumKahanNull" +"sumKahanOrDefault" +"sumKahanOrNull" +"sumKahanResample" +"sumKahanSimpleState" +"sumKahanState" +"sumMap" +"sumMapFiltered" +"sumMapFilteredArgMax" +"sumMapFilteredArgMin" +"sumMapFilteredArray" +"sumMapFilteredDistinct" +"sumMapFilteredForEach" +"sumMapFilteredIf" +"sumMapFilteredMap" +"sumMapFilteredMerge" +"sumMapFilteredNull" +"sumMapFilteredOrDefault" +"sumMapFilteredOrNull" +"sumMapFilteredResample" +"sumMapFilteredSimpleState" +"sumMapFilteredState" +"sumMapFilteredWithOverflow" +"sumMapFilteredWithOverflowArgMax" +"sumMapFilteredWithOverflowArgMin" +"sumMapFilteredWithOverflowArray" +"sumMapFilteredWithOverflowDistinct" +"sumMapFilteredWithOverflowForEach" +"sumMapFilteredWithOverflowIf" +"sumMapFilteredWithOverflowMap" +"sumMapFilteredWithOverflowMerge" +"sumMapFilteredWithOverflowNull" +"sumMapFilteredWithOverflowOrDefault" +"sumMapFilteredWithOverflowOrNull" +"sumMapFilteredWithOverflowResample" +"sumMapFilteredWithOverflowSimpleState" +"sumMapFilteredWithOverflowState" +"sumMapWithOverflow" +"sumMapWithOverflowArgMax" +"sumMapWithOverflowArgMin" +"sumMapWithOverflowArray" +"sumMapWithOverflowDistinct" +"sumMapWithOverflowForEach" +"sumMapWithOverflowIf" +"sumMapWithOverflowMap" +"sumMapWithOverflowMerge" +"sumMapWithOverflowNull" +"sumMapWithOverflowOrDefault" +"sumMapWithOverflowOrNull" +"sumMapWithOverflowResample" +"sumMapWithOverflowSimpleState" +"sumMapWithOverflowState" +"sumMappedArrays" +"sumMappedArraysArgMax" +"sumMappedArraysArgMin" +"sumMappedArraysArray" +"sumMappedArraysDistinct" +"sumMappedArraysForEach" +"sumMappedArraysIf" +"sumMappedArraysMap" +"sumMappedArraysMerge" +"sumMappedArraysNull" +"sumMappedArraysOrDefault" +"sumMappedArraysOrNull" +"sumMappedArraysResample" +"sumMappedArraysSimpleState" +"sumMappedArraysState" +"sumMerge" +"sumNull" +"sumOrDefault" +"sumOrNull" +"sumResample" +"sumSimpleState" +"sumState" +"sumWithOverflow" +"sumWithOverflowArgMax" +"sumWithOverflowArgMin" +"sumWithOverflowArray" +"sumWithOverflowDistinct" +"sumWithOverflowForEach" +"sumWithOverflowIf" +"sumWithOverflowMap" +"sumWithOverflowMerge" +"sumWithOverflowNull" +"sumWithOverflowOrDefault" +"sumWithOverflowOrNull" +"sumWithOverflowResample" +"sumWithOverflowSimpleState" +"sumWithOverflowState" +"svg" +"synonyms" +"tan" +"tanh" +"tcpPort" +"tgamma" +"theilsU" +"theilsUArgMax" +"theilsUArgMin" +"theilsUArray" +"theilsUDistinct" +"theilsUForEach" +"theilsUIf" +"theilsUMap" +"theilsUMerge" +"theilsUNull" +"theilsUOrDefault" +"theilsUOrNull" +"theilsUResample" +"theilsUSimpleState" +"theilsUState" +"throwIf" +"tid" +"timeDiff" +"timeSlot" +"timeSlots" +"timeZone" +"timeZoneOf" +"timeZoneOffset" +"timestamp" +"timestampDiff" +"timestamp_diff" +"timezone" +"timezoneOf" +"timezoneOffset" +"toBool" +"toColumnTypeName" +"toDate" +"toDate32" +"toDate32OrDefault" +"toDate32OrNull" +"toDate32OrZero" +"toDateOrDefault" +"toDateOrNull" +"toDateOrZero" +"toDateTime" +"toDateTime32" +"toDateTime64" +"toDateTime64OrDefault" +"toDateTime64OrNull" +"toDateTime64OrZero" +"toDateTimeOrDefault" +"toDateTimeOrNull" +"toDateTimeOrZero" +"toDayOfMonth" +"toDayOfWeek" +"toDayOfYear" +"toDaysSinceYearZero" +"toDecimal128" +"toDecimal128OrDefault" +"toDecimal128OrNull" +"toDecimal128OrZero" +"toDecimal256" +"toDecimal256OrDefault" +"toDecimal256OrNull" +"toDecimal256OrZero" +"toDecimal32" +"toDecimal32OrDefault" +"toDecimal32OrNull" +"toDecimal32OrZero" +"toDecimal64" +"toDecimal64OrDefault" +"toDecimal64OrNull" +"toDecimal64OrZero" +"toDecimalString" +"toFixedString" +"toFloat32" +"toFloat32OrDefault" +"toFloat32OrNull" +"toFloat32OrZero" +"toFloat64" +"toFloat64OrDefault" +"toFloat64OrNull" +"toFloat64OrZero" +"toHour" +"toIPv4" +"toIPv4OrDefault" +"toIPv4OrNull" +"toIPv4OrZero" +"toIPv6" +"toIPv6OrDefault" +"toIPv6OrNull" +"toIPv6OrZero" +"toISOWeek" +"toISOYear" +"toInt128" +"toInt128OrDefault" +"toInt128OrNull" +"toInt128OrZero" +"toInt16" +"toInt16OrDefault" +"toInt16OrNull" +"toInt16OrZero" +"toInt256" +"toInt256OrDefault" +"toInt256OrNull" +"toInt256OrZero" +"toInt32" +"toInt32OrDefault" +"toInt32OrNull" +"toInt32OrZero" +"toInt64" +"toInt64OrDefault" +"toInt64OrNull" +"toInt64OrZero" +"toInt8" +"toInt8OrDefault" +"toInt8OrNull" +"toInt8OrZero" +"toIntervalDay" +"toIntervalHour" +"toIntervalMicrosecond" +"toIntervalMillisecond" +"toIntervalMinute" +"toIntervalMonth" +"toIntervalNanosecond" +"toIntervalQuarter" +"toIntervalSecond" +"toIntervalWeek" +"toIntervalYear" +"toJSONString" +"toLastDayOfMonth" +"toLastDayOfWeek" +"toLowCardinality" +"toMillisecond" +"toMinute" +"toModifiedJulianDay" +"toModifiedJulianDayOrNull" +"toMonday" +"toMonth" +"toNullable" +"toQuarter" +"toRelativeDayNum" +"toRelativeHourNum" +"toRelativeMinuteNum" +"toRelativeMonthNum" +"toRelativeQuarterNum" +"toRelativeSecondNum" +"toRelativeWeekNum" +"toRelativeYearNum" +"toSecond" +"toStartOfDay" +"toStartOfFifteenMinutes" +"toStartOfFiveMinute" +"toStartOfFiveMinutes" +"toStartOfHour" +"toStartOfISOYear" +"toStartOfInterval" +"toStartOfMicrosecond" +"toStartOfMillisecond" +"toStartOfMinute" +"toStartOfMonth" +"toStartOfNanosecond" +"toStartOfQuarter" +"toStartOfSecond" +"toStartOfTenMinutes" +"toStartOfWeek" +"toStartOfYear" +"toString" +"toStringCutToZero" +"toTime" +"toTimeZone" +"toTimezone" +"toTypeName" +"toUInt128" +"toUInt128OrDefault" +"toUInt128OrNull" +"toUInt128OrZero" +"toUInt16" +"toUInt16OrDefault" +"toUInt16OrNull" +"toUInt16OrZero" +"toUInt256" +"toUInt256OrDefault" +"toUInt256OrNull" +"toUInt256OrZero" +"toUInt32" +"toUInt32OrDefault" +"toUInt32OrNull" +"toUInt32OrZero" +"toUInt64" +"toUInt64OrDefault" +"toUInt64OrNull" +"toUInt64OrZero" +"toUInt8" +"toUInt8OrDefault" +"toUInt8OrNull" +"toUInt8OrZero" +"toUTCTimestamp" +"toUUID" +"toUUIDOrDefault" +"toUUIDOrNull" +"toUUIDOrZero" +"toUnixTimestamp" +"toUnixTimestamp64Micro" +"toUnixTimestamp64Milli" +"toUnixTimestamp64Nano" +"toValidUTF8" +"toWeek" +"toYYYYMM" +"toYYYYMMDD" +"toYYYYMMDDhhmmss" +"toYear" +"toYearWeek" +"to_utc_timestamp" +"today" +"tokens" +"topK" +"topKArgMax" +"topKArgMin" +"topKArray" +"topKDistinct" +"topKForEach" +"topKIf" +"topKMap" +"topKMerge" +"topKNull" +"topKOrDefault" +"topKOrNull" +"topKResample" +"topKSimpleState" +"topKState" +"topKWeighted" +"topKWeightedArgMax" +"topKWeightedArgMin" +"topKWeightedArray" +"topKWeightedDistinct" +"topKWeightedForEach" +"topKWeightedIf" +"topKWeightedMap" +"topKWeightedMerge" +"topKWeightedNull" +"topKWeightedOrDefault" +"topKWeightedOrNull" +"topKWeightedResample" +"topKWeightedSimpleState" +"topKWeightedState" +"topLevelDomain" +"topLevelDomainRFC" +"transactionID" +"transactionLatestSnapshot" +"transactionOldestSnapshot" +"transform" +"translate" +"translateUTF8" +"trim" +"trimBoth" +"trimLeft" +"trimRight" +"trunc" +"truncate" +"tryBase58Decode" +"tryBase64Decode" +"tryBase64URLDecode" +"tryDecrypt" +"tryIdnaEncode" +"tryPunycodeDecode" +"tumble" +"tumbleEnd" +"tumbleStart" +"tuple" +"tupleConcat" +"tupleDivide" +"tupleDivideByNumber" +"tupleElement" +"tupleHammingDistance" +"tupleIntDiv" +"tupleIntDivByNumber" +"tupleIntDivOrZero" +"tupleIntDivOrZeroByNumber" +"tupleMinus" +"tupleModulo" +"tupleModuloByNumber" +"tupleMultiply" +"tupleMultiplyByNumber" +"tupleNames" +"tupleNegate" +"tuplePlus" +"tupleToNameValuePairs" +"ucase" +"unbin" +"unhex" +"uniq" +"uniqArgMax" +"uniqArgMin" +"uniqArray" +"uniqCombined" +"uniqCombined64" +"uniqCombined64ArgMax" +"uniqCombined64ArgMin" +"uniqCombined64Array" +"uniqCombined64Distinct" +"uniqCombined64ForEach" +"uniqCombined64If" +"uniqCombined64Map" +"uniqCombined64Merge" +"uniqCombined64Null" +"uniqCombined64OrDefault" +"uniqCombined64OrNull" +"uniqCombined64Resample" +"uniqCombined64SimpleState" +"uniqCombined64State" +"uniqCombinedArgMax" +"uniqCombinedArgMin" +"uniqCombinedArray" +"uniqCombinedDistinct" +"uniqCombinedForEach" +"uniqCombinedIf" +"uniqCombinedMap" +"uniqCombinedMerge" +"uniqCombinedNull" +"uniqCombinedOrDefault" +"uniqCombinedOrNull" +"uniqCombinedResample" +"uniqCombinedSimpleState" +"uniqCombinedState" +"uniqDistinct" +"uniqExact" +"uniqExactArgMax" +"uniqExactArgMin" +"uniqExactArray" +"uniqExactDistinct" +"uniqExactForEach" +"uniqExactIf" +"uniqExactMap" +"uniqExactMerge" +"uniqExactNull" +"uniqExactOrDefault" +"uniqExactOrNull" +"uniqExactResample" +"uniqExactSimpleState" +"uniqExactState" +"uniqForEach" +"uniqHLL12" +"uniqHLL12ArgMax" +"uniqHLL12ArgMin" +"uniqHLL12Array" +"uniqHLL12Distinct" +"uniqHLL12ForEach" +"uniqHLL12If" +"uniqHLL12Map" +"uniqHLL12Merge" +"uniqHLL12Null" +"uniqHLL12OrDefault" +"uniqHLL12OrNull" +"uniqHLL12Resample" +"uniqHLL12SimpleState" +"uniqHLL12State" +"uniqIf" +"uniqMap" +"uniqMerge" +"uniqNull" +"uniqOrDefault" +"uniqOrNull" +"uniqResample" +"uniqSimpleState" +"uniqState" +"uniqTheta" +"uniqThetaArgMax" +"uniqThetaArgMin" +"uniqThetaArray" +"uniqThetaDistinct" +"uniqThetaForEach" +"uniqThetaIf" +"uniqThetaIntersect" +"uniqThetaMap" +"uniqThetaMerge" +"uniqThetaNot" +"uniqThetaNull" +"uniqThetaOrDefault" +"uniqThetaOrNull" +"uniqThetaResample" +"uniqThetaSimpleState" +"uniqThetaState" +"uniqThetaUnion" +"uniqUpTo" +"uniqUpToArgMax" +"uniqUpToArgMin" +"uniqUpToArray" +"uniqUpToDistinct" +"uniqUpToForEach" +"uniqUpToIf" +"uniqUpToMap" +"uniqUpToMerge" +"uniqUpToNull" +"uniqUpToOrDefault" +"uniqUpToOrNull" +"uniqUpToResample" +"uniqUpToSimpleState" +"uniqUpToState" +"upper" +"upperUTF8" +"uptime" +"user" +"validateNestedArraySizes" +"varPop" +"varPopArgMax" +"varPopArgMin" +"varPopArray" +"varPopDistinct" +"varPopForEach" +"varPopIf" +"varPopMap" +"varPopMerge" +"varPopNull" +"varPopOrDefault" +"varPopOrNull" +"varPopResample" +"varPopSimpleState" +"varPopStable" +"varPopStableArgMax" +"varPopStableArgMin" +"varPopStableArray" +"varPopStableDistinct" +"varPopStableForEach" +"varPopStableIf" +"varPopStableMap" +"varPopStableMerge" +"varPopStableNull" +"varPopStableOrDefault" +"varPopStableOrNull" +"varPopStableResample" +"varPopStableSimpleState" +"varPopStableState" +"varPopState" +"varSamp" +"varSampArgMax" +"varSampArgMin" +"varSampArray" +"varSampDistinct" +"varSampForEach" +"varSampIf" +"varSampMap" +"varSampMerge" +"varSampNull" +"varSampOrDefault" +"varSampOrNull" +"varSampResample" +"varSampSimpleState" +"varSampStable" +"varSampStableArgMax" +"varSampStableArgMin" +"varSampStableArray" +"varSampStableDistinct" +"varSampStableForEach" +"varSampStableIf" +"varSampStableMap" +"varSampStableMerge" +"varSampStableNull" +"varSampStableOrDefault" +"varSampStableOrNull" +"varSampStableResample" +"varSampStableSimpleState" +"varSampStableState" +"varSampState" +"variantElement" +"variantType" +"vectorDifference" +"vectorSum" +"version" +"visibleWidth" +"visitParamExtractBool" +"visitParamExtractFloat" +"visitParamExtractInt" +"visitParamExtractRaw" +"visitParamExtractString" +"visitParamExtractUInt" +"visitParamHas" +"week" +"welchTTest" +"welchTTestArgMax" +"welchTTestArgMin" +"welchTTestArray" +"welchTTestDistinct" +"welchTTestForEach" +"welchTTestIf" +"welchTTestMap" +"welchTTestMerge" +"welchTTestNull" +"welchTTestOrDefault" +"welchTTestOrNull" +"welchTTestResample" +"welchTTestSimpleState" +"welchTTestState" +"widthBucket" +"width_bucket" +"windowFunnel" +"windowFunnelArgMax" +"windowFunnelArgMin" +"windowFunnelArray" +"windowFunnelDistinct" +"windowFunnelForEach" +"windowFunnelIf" +"windowFunnelMap" +"windowFunnelMerge" +"windowFunnelNull" +"windowFunnelOrDefault" +"windowFunnelOrNull" +"windowFunnelResample" +"windowFunnelSimpleState" +"windowFunnelState" +"windowID" +"wkt" +"wordShingleMinHash" +"wordShingleMinHashArg" +"wordShingleMinHashArgCaseInsensitive" +"wordShingleMinHashArgCaseInsensitiveUTF8" +"wordShingleMinHashArgUTF8" +"wordShingleMinHashCaseInsensitive" +"wordShingleMinHashCaseInsensitiveUTF8" +"wordShingleMinHashUTF8" +"wordShingleSimHash" +"wordShingleSimHashCaseInsensitive" +"wordShingleSimHashCaseInsensitiveUTF8" +"wordShingleSimHashUTF8" +"wyHash64" +"xor" +"xxHash32" +"xxHash64" +"xxh3" +"yandexConsistentHash" +"yearweek" +"yesterday" +"zookeeperSessionUptime" From 04286bc270f9f473a07bc4ae27ae61d96256f775 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 13 Aug 2024 14:45:05 +0000 Subject: [PATCH 2170/2361] Add status to PipelineExecutor. Verify status of pusing pipeline. --- programs/obfuscator/Obfuscator.cpp | 2 +- src/Client/LocalConnection.cpp | 8 +++- src/Interpreters/SystemLog.cpp | 2 +- src/Processors/Executors/ExecutingGraph.cpp | 19 ++++----- src/Processors/Executors/ExecutingGraph.h | 11 ++++- src/Processors/Executors/PipelineExecutor.cpp | 31 +++++++++----- src/Processors/Executors/PipelineExecutor.h | 20 +++++++++- .../PushingAsyncPipelineExecutor.cpp | 21 +++++++--- .../Executors/PushingAsyncPipelineExecutor.h | 7 +++- .../Executors/PushingPipelineExecutor.cpp | 40 ++++++++++++++----- .../Executors/PushingPipelineExecutor.h | 9 +++-- .../Transforms/CreatingSetsTransform.cpp | 3 +- src/Server/GRPCServer.cpp | 3 +- src/Server/TCPHandler.cpp | 13 ++++-- src/Storages/Distributed/DistributedSink.cpp | 6 ++- src/Storages/StorageBuffer.cpp | 3 +- src/Storages/tests/gtest_storage_log.cpp | 2 +- ...221_insert_timeout_overflow_mode.reference | 2 + .../03221_insert_timeout_overflow_mode.sh | 8 ++++ 19 files changed, 153 insertions(+), 57 deletions(-) create mode 100644 tests/queries/0_stateless/03221_insert_timeout_overflow_mode.reference create mode 100755 tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 688ae1a1143..7c13215e350 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1462,7 +1462,7 @@ try while (in_executor.pull(block)) { Columns columns = obfuscator.generate(block.getColumns()); - out_executor.push(header.cloneWithColumns(columns)); + std::ignore = out_executor.push(header.cloneWithColumns(columns)); processed_rows += block.rows(); if (!silent) std::cerr << "Processed " << processed_rows << " rows\n"; diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index 072184e0a66..8f1e0958002 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -287,13 +287,17 @@ void LocalConnection::sendData(const Block & block, const String &, bool) if (!block) return; + bool inserted = false; if (state->pushing_async_executor) - state->pushing_async_executor->push(block); + inserted = state->pushing_async_executor->push(block); else if (state->pushing_executor) - state->pushing_executor->push(block); + inserted = state->pushing_executor->push(block); else throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown executor"); + if (!inserted) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send data"); + if (send_profile_events) sendProfileEvents(); } diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 572481e6b12..0cad56af00a 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -556,7 +556,7 @@ void SystemLog::flushImpl(const std::vector & to_flush, PushingPipelineExecutor executor(io.pipeline); executor.start(); - executor.push(block); + std::ignore = executor.push(block); executor.finish(); } catch (...) diff --git a/src/Processors/Executors/ExecutingGraph.cpp b/src/Processors/Executors/ExecutingGraph.cpp index 6d5b60d8159..10470325bb8 100644 --- a/src/Processors/Executors/ExecutingGraph.cpp +++ b/src/Processors/Executors/ExecutingGraph.cpp @@ -96,7 +96,7 @@ bool ExecutingGraph::addEdges(uint64_t node) return was_edge_added; } -bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) +ExecutingGraph::UpdateNodeStatus ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) { auto & cur_node = *nodes[pid]; Processors new_processors; @@ -108,7 +108,7 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) catch (...) { cur_node.exception = std::current_exception(); - return false; + return UpdateNodeStatus::Exception; } { @@ -118,7 +118,7 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) { for (auto & processor : new_processors) processor->cancel(); - return false; + return UpdateNodeStatus::Cancelled; } processors->insert(processors->end(), new_processors.begin(), new_processors.end()); @@ -178,7 +178,7 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) } } - return true; + return UpdateNodeStatus::Done; } void ExecutingGraph::initializeExecution(Queue & queue) @@ -213,7 +213,7 @@ void ExecutingGraph::initializeExecution(Queue & queue) } -bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue) +ExecutingGraph::UpdateNodeStatus ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue) { std::stack updated_edges; std::stack updated_processors; @@ -309,7 +309,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue catch (...) { node.exception = std::current_exception(); - return false; + return UpdateNodeStatus::Exception; } #ifndef NDEBUG @@ -386,8 +386,9 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue read_lock.unlock(); { std::unique_lock lock(nodes_mutex); - if (!expandPipeline(updated_processors, pid)) - return false; + auto status = expandPipeline(updated_processors, pid); + if (status != UpdateNodeStatus::Done) + return status; } read_lock.lock(); @@ -397,7 +398,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue } } - return true; + return UpdateNodeStatus::Done; } void ExecutingGraph::cancel(bool cancel_all_processors) diff --git a/src/Processors/Executors/ExecutingGraph.h b/src/Processors/Executors/ExecutingGraph.h index 71dcd360a2c..e1a6ac96203 100644 --- a/src/Processors/Executors/ExecutingGraph.h +++ b/src/Processors/Executors/ExecutingGraph.h @@ -138,10 +138,17 @@ public: /// Traverse graph the first time to update all the childless nodes. void initializeExecution(Queue & queue); + enum class UpdateNodeStatus + { + Done, + Exception, + Cancelled, + }; + /// Update processor with pid number (call IProcessor::prepare). /// Check parents and children of current processor and push them to stacks if they also need to be updated. /// If processor wants to be expanded, lock will be upgraded to get write access to pipeline. - bool updateNode(uint64_t pid, Queue & queue, Queue & async_queue); + UpdateNodeStatus updateNode(uint64_t pid, Queue & queue, Queue & async_queue); void cancel(bool cancel_all_processors = true); @@ -155,7 +162,7 @@ private: /// Update graph after processor (pid) returned ExpandPipeline status. /// All new nodes and nodes with updated ports are pushed into stack. - bool expandPipeline(std::stack & stack, uint64_t pid); + UpdateNodeStatus expandPipeline(std::stack & stack, uint64_t pid); std::shared_ptr processors; std::vector source_processors; diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 82cad471a29..23b3a6d9f5f 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -77,9 +77,9 @@ const Processors & PipelineExecutor::getProcessors() const return graph->getProcessors(); } -void PipelineExecutor::cancel() +void PipelineExecutor::cancel(ExecutionStatus reason) { - cancelled = true; + tryUpdateExecutionStatus(ExecutionStatus::Executing, reason); finish(); graph->cancel(); } @@ -98,6 +98,11 @@ void PipelineExecutor::finish() tasks.finish(); } +bool PipelineExecutor::tryUpdateExecutionStatus(ExecutionStatus expected, ExecutionStatus desired) +{ + return execution_status.compare_exchange_strong(expected, desired); +} + void PipelineExecutor::execute(size_t num_threads, bool concurrency_control) { checkTimeLimit(); @@ -120,7 +125,7 @@ void PipelineExecutor::execute(size_t num_threads, bool concurrency_control) } catch (...) { - span.addAttribute(ExecutionStatus::fromCurrentException()); + span.addAttribute(DB::ExecutionStatus::fromCurrentException()); #ifndef NDEBUG LOG_TRACE(log, "Exception while executing query. Current state:\n{}", dumpPipeline()); @@ -169,7 +174,7 @@ bool PipelineExecutor::checkTimeLimitSoft() // We call cancel here so that all processors are notified and tasks waken up // so that the "break" is faster and doesn't wait for long events if (!continuing) - cancel(); + cancel(ExecutionStatus::CancelledByTimeout); return continuing; } @@ -195,7 +200,8 @@ void PipelineExecutor::finalizeExecution() { checkTimeLimit(); - if (cancelled) + auto status = execution_status.load(); + if (status == ExecutionStatus::CancelledByTimeout || status == ExecutionStatus::CancelledByUser) return; bool all_processors_finished = true; @@ -271,7 +277,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie break; if (!context.executeTask()) - cancel(); + cancel(ExecutionStatus::Exception); if (tasks.isFinished()) break; @@ -289,11 +295,13 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie Queue async_queue; /// Prepare processor after execution. - if (!graph->updateNode(context.getProcessorID(), queue, async_queue)) - cancel(); + auto status = graph->updateNode(context.getProcessorID(), queue, async_queue); + if (status == ExecutingGraph::UpdateNodeStatus::Exception) + cancel(ExecutionStatus::Exception); /// Push other tasks to global queue. - tasks.pushTasks(queue, async_queue, context); + if (status == ExecutingGraph::UpdateNodeStatus::Done) + tasks.pushTasks(queue, async_queue, context); } #ifndef NDEBUG @@ -309,7 +317,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie { /// spawnThreads can throw an exception, for example CANNOT_SCHEDULE_TASK. /// We should cancel execution properly before rethrow. - cancel(); + cancel(ExecutionStatus::Exception); throw; } @@ -328,6 +336,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie void PipelineExecutor::initializeExecution(size_t num_threads, bool concurrency_control) { is_execution_initialized = true; + tryUpdateExecutionStatus(ExecutionStatus::NotStarted, ExecutionStatus::Executing); size_t use_threads = num_threads; @@ -393,7 +402,7 @@ void PipelineExecutor::executeImpl(size_t num_threads, bool concurrency_control) { /// If finished_flag is not set, there was an exception. /// Cancel execution in this case. - cancel(); + cancel(ExecutionStatus::Exception); if (pool) pool->wait(); } diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index ae119355cb5..79d0a29d4e1 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -48,8 +48,20 @@ public: const Processors & getProcessors() const; + enum class ExecutionStatus + { + NotStarted, + Executing, + Finished, + Exception, + CancelledByUser, + CancelledByTimeout, + }; + /// Cancel execution. May be called from another thread. - void cancel(); + void cancel() { cancel(ExecutionStatus::CancelledByUser); } + + ExecutionStatus getExecutionStatus() const { return execution_status.load(); } /// Cancel processors which only read data from source. May be called from another thread. void cancelReading(); @@ -81,7 +93,7 @@ private: /// system.opentelemetry_span_log bool trace_processors = false; - std::atomic_bool cancelled = false; + std::atomic execution_status = ExecutionStatus::NotStarted; std::atomic_bool cancelled_reading = false; LoggerPtr log = getLogger("PipelineExecutor"); @@ -105,6 +117,10 @@ private: void executeStepImpl(size_t thread_num, std::atomic_bool * yield_flag = nullptr); void executeSingleThread(size_t thread_num); void finish(); + void cancel(ExecutionStatus reason); + + /// If execution_status == from, change it to desired. + bool tryUpdateExecutionStatus(ExecutionStatus expected, ExecutionStatus desired); String dumpPipeline() const; }; diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp index 830a96533ed..db5cf451c9e 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp @@ -176,7 +176,17 @@ void PushingAsyncPipelineExecutor::start() data->thread = ThreadFromGlobalPool(std::move(func)); } -void PushingAsyncPipelineExecutor::push(Chunk chunk) +static void checkExecutionStatus(PipelineExecutor::ExecutionStatus status) +{ + if (status == PipelineExecutor::ExecutionStatus::CancelledByTimeout + || status == PipelineExecutor::ExecutionStatus::CancelledByUser) + return; + + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); +} + +bool PushingAsyncPipelineExecutor::push(Chunk chunk) { if (!started) start(); @@ -185,13 +195,14 @@ void PushingAsyncPipelineExecutor::push(Chunk chunk) data->rethrowExceptionIfHas(); if (!is_pushed) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Pipeline for PushingAsyncPipelineExecutor was finished before all data was inserted"); + checkExecutionStatus(data->executor->getExecutionStatus()); + + return is_pushed; } -void PushingAsyncPipelineExecutor::push(Block block) +bool PushingAsyncPipelineExecutor::push(Block block) { - push(Chunk(block.getColumns(), block.rows())); + return push(Chunk(block.getColumns(), block.rows())); } void PushingAsyncPipelineExecutor::finish() diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.h b/src/Processors/Executors/PushingAsyncPipelineExecutor.h index f976cd4c339..7835aaf596f 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.h +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.h @@ -36,8 +36,11 @@ public: void start(); - void push(Chunk chunk); - void push(Block block); + /// Return 'true' if push was successful. + /// Return 'false' if pipline was cancelled without exception. + /// This may happen in case of timeout_overflow_mode = 'break' OR internal bug. + [[nodiscard]] bool push(Chunk chunk); + [[nodiscard]] bool push(Block block); void finish(); diff --git a/src/Processors/Executors/PushingPipelineExecutor.cpp b/src/Processors/Executors/PushingPipelineExecutor.cpp index 696932932df..3133cfd9a1e 100644 --- a/src/Processors/Executors/PushingPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingPipelineExecutor.cpp @@ -80,36 +80,56 @@ const Block & PushingPipelineExecutor::getHeader() const return pushing_source->getPort().getHeader(); } +static void checkExecutionStatus(PipelineExecutor::ExecutionStatus status) +{ + if (status == PipelineExecutor::ExecutionStatus::CancelledByTimeout + || status == PipelineExecutor::ExecutionStatus::CancelledByUser) + return; -void PushingPipelineExecutor::start() + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); +} + +bool PushingPipelineExecutor::start() { if (started) - return; + return true; started = true; executor = std::make_shared(pipeline.processors, pipeline.process_list_element); executor->setReadProgressCallback(pipeline.getReadProgressCallback()); if (!executor->executeStep(&input_wait_flag)) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); + { + checkExecutionStatus(executor->getExecutionStatus()); + return false; + } + + return true; } -void PushingPipelineExecutor::push(Chunk chunk) +bool PushingPipelineExecutor::push(Chunk chunk) { if (!started) - start(); + { + if (!start()) + return false; + } pushing_source->setData(std::move(chunk)); if (!executor->executeStep(&input_wait_flag)) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); + { + checkExecutionStatus(executor->getExecutionStatus()); + return false; + } + + return true; } -void PushingPipelineExecutor::push(Block block) +bool PushingPipelineExecutor::push(Block block) { - push(Chunk(block.getColumns(), block.rows())); + return push(Chunk(block.getColumns(), block.rows())); } void PushingPipelineExecutor::finish() diff --git a/src/Processors/Executors/PushingPipelineExecutor.h b/src/Processors/Executors/PushingPipelineExecutor.h index f549c9482db..4021f61fb6b 100644 --- a/src/Processors/Executors/PushingPipelineExecutor.h +++ b/src/Processors/Executors/PushingPipelineExecutor.h @@ -35,10 +35,13 @@ public: /// Get structure of returned block or chunk. const Block & getHeader() const; - void start(); + bool start(); - void push(Chunk chunk); - void push(Block block); + /// Return 'true' if push was successful. + /// Return 'false' if pipline was cancelled without exception. + /// This may happen in case of timeout_overflow_mode = 'break' OR internal bug. + [[nodiscard]] bool push(Chunk chunk); + [[nodiscard]] bool push(Block block); void finish(); diff --git a/src/Processors/Transforms/CreatingSetsTransform.cpp b/src/Processors/Transforms/CreatingSetsTransform.cpp index eeb8f4a6060..857233ac028 100644 --- a/src/Processors/Transforms/CreatingSetsTransform.cpp +++ b/src/Processors/Transforms/CreatingSetsTransform.cpp @@ -215,7 +215,8 @@ void CreatingSetsTransform::consume(Chunk chunk) if (!done_with_table) { block = materializeBlock(block); - executor->push(block); + if (!executor->push(block)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot insert into a table"); rows_to_transfer += block.rows(); bytes_to_transfer += block.bytes(); diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index d8a4d7f0e1f..c261d76ef33 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -1012,7 +1012,8 @@ namespace while (pipeline_executor->pull(block)) { if (block) - executor.push(block); + if (!executor.push(block)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send data"); } if (isQueryCancelled()) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 448dfafbd9d..283b60b533c 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -932,12 +932,18 @@ void TCPHandler::processInsertQuery() executor.start(); if (processed_data) - executor.push(std::move(processed_data)); + { + if (!executor.push(std::move(processed_data))) + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); + } else startInsertQuery(); while (readDataNext()) - executor.push(std::move(state.block_for_insert)); + { + if (!executor.push(std::move(state.block_for_insert))) + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); + } if (state.cancellation_status == CancellationStatus::FULLY_CANCELLED) executor.cancel(); @@ -2034,7 +2040,8 @@ bool TCPHandler::receiveData(bool scalar) QueryPipeline temporary_table_out(storage->write(ASTPtr(), metadata_snapshot, query_context, /*async_insert=*/false)); PushingPipelineExecutor executor(temporary_table_out); executor.start(); - executor.push(block); + if (!executor.push(block)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot insert into temporary table"); executor.finish(); } else if (state.need_receive_data_for_input) diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index e3e73e42096..69f9e5b9380 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -89,7 +89,8 @@ static void writeBlockConvert(PushingPipelineExecutor & executor, const Block & { Block adopted_block = adoptBlock(executor.getHeader(), block, log); for (size_t i = 0; i < repeats; ++i) - executor.push(adopted_block); + if (!executor.push(adopted_block)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send data"); } @@ -408,7 +409,8 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si CurrentMetrics::Increment metric_increment{CurrentMetrics::DistributedSend}; Block adopted_shard_block = adoptBlock(job.executor->getHeader(), shard_block, log); - job.executor->push(adopted_shard_block); + if (!job.executor->push(adopted_shard_block)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send data"); } else // local { diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index f753d369d2d..3223a2813a3 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -1069,7 +1069,8 @@ void StorageBuffer::writeBlockToDestination(const Block & block, StoragePtr tabl auto block_io = interpreter.execute(); PushingPipelineExecutor executor(block_io.pipeline); executor.start(); - executor.push(std::move(block_to_write)); + if (!executor.push(std::move(block_to_write))) + throw Exception(ErrorCodes::LOGICAL_ERROR, "StorageBuffer could not write data to destination table"); executor.finish(); } diff --git a/src/Storages/tests/gtest_storage_log.cpp b/src/Storages/tests/gtest_storage_log.cpp index d75f3616f21..60890337cb4 100644 --- a/src/Storages/tests/gtest_storage_log.cpp +++ b/src/Storages/tests/gtest_storage_log.cpp @@ -98,7 +98,7 @@ std::string writeData(int rows, DB::StoragePtr & table, const DB::ContextPtr con QueryPipeline pipeline(table->write({}, metadata_snapshot, context, /*async_insert=*/false)); PushingPipelineExecutor executor(pipeline); - executor.push(block); + std::ignore = executor.push(block); executor.finish(); return data; diff --git a/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.reference b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.reference new file mode 100644 index 00000000000..68538c3f75b --- /dev/null +++ b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.reference @@ -0,0 +1,2 @@ +QUERY_WAS_CANCELLED +QUERY_WAS_CANCELLED diff --git a/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh new file mode 100755 index 00000000000..030c5211b2d --- /dev/null +++ b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query "create table null_t (number UInt64) engine = Null;" +${CLICKHOUSE_CLIENT} --query "select sleep(0.1) from system.numbers settings max_block_size = 1 format Native" 2>/dev/null | ${CLICKHOUSE_CLIENT} --max_execution_time = 0.3 --timeout_overflow_mode = 'break' --query "insert into null_t format Native" 2>&1 | grep -o "QUERY_WAS_CANCELLED" From 5a6090ad05117c76a4b37071a6362f30f395b235 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 13 Aug 2024 16:25:07 +0200 Subject: [PATCH 2171/2361] Fix --- src/Processors/Sources/PostgreSQLSource.cpp | 12 ++++++------ src/Processors/Sources/PostgreSQLSource.h | 14 +++++++++----- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/Processors/Sources/PostgreSQLSource.cpp b/src/Processors/Sources/PostgreSQLSource.cpp index a3d6fd691d8..b9bda46bd10 100644 --- a/src/Processors/Sources/PostgreSQLSource.cpp +++ b/src/Processors/Sources/PostgreSQLSource.cpp @@ -35,9 +35,9 @@ PostgreSQLSource::PostgreSQLSource( const Block & sample_block, UInt64 max_block_size_) : ISource(sample_block.cloneEmpty()) - , query_str(query_str_) , max_block_size(max_block_size_) , connection_holder(std::move(connection_holder_)) + , query_str(query_str_) { init(sample_block); } @@ -51,10 +51,10 @@ PostgreSQLSource::PostgreSQLSource( UInt64 max_block_size_, bool auto_commit_) : ISource(sample_block.cloneEmpty()) - , query_str(query_str_) - , tx(std::move(tx_)) , max_block_size(max_block_size_) , auto_commit(auto_commit_) + , query_str(query_str_) + , tx(std::move(tx_)) { init(sample_block); } @@ -204,15 +204,15 @@ PostgreSQLSource::~PostgreSQLSource() */ stream->close(); } - - stream.reset(); - tx.reset(); } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); } + stream.reset(); + tx.reset(); + if (connection_holder) connection_holder->setBroken(); } diff --git a/src/Processors/Sources/PostgreSQLSource.h b/src/Processors/Sources/PostgreSQLSource.h index 8a648ae8bb5..319c5d8d7c2 100644 --- a/src/Processors/Sources/PostgreSQLSource.h +++ b/src/Processors/Sources/PostgreSQLSource.h @@ -38,14 +38,12 @@ protected: UInt64 max_block_size_, bool auto_commit_); - String query_str; - std::shared_ptr tx; - std::unique_ptr stream; - Status prepare() override; - void onStart(); Chunk generate() override; + + void onStart(); + void onFinish(); private: @@ -61,6 +59,12 @@ private: postgres::ConnectionHolderPtr connection_holder; std::unordered_map array_info; + +protected: + String query_str; + /// tx and stream must be destroyed before connection_holder. + std::shared_ptr tx; + std::unique_ptr stream; }; From 7ebb6efb2e413c44a82f2ac25d6dcb8e0da2f59a Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 13 Aug 2024 15:10:56 +0000 Subject: [PATCH 2172/2361] Style check --- tests/queries/0_stateless/00652_mergetree_mutations.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00652_mergetree_mutations.sh b/tests/queries/0_stateless/00652_mergetree_mutations.sh index 6be0ebf882f..edb306d3883 100755 --- a/tests/queries/0_stateless/00652_mergetree_mutations.sh +++ b/tests/queries/0_stateless/00652_mergetree_mutations.sh @@ -73,7 +73,7 @@ sleep 0.1 for i in {1..10} do - if [ $(${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' and table = 'mutations_cleaner'") -eq 2 ]; then + if [ "$(${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' and table = 'mutations_cleaner'")" -eq 2 ]; then break fi From ae614648a3397c4738b85ab8d138419387c562ed Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 13 Aug 2024 15:13:42 +0000 Subject: [PATCH 2173/2361] trigger sync From c5ae139c972d46d1e0bfa6ab5f165a049b6786f5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 13 Aug 2024 15:18:07 +0000 Subject: [PATCH 2174/2361] Cleanup. --- programs/obfuscator/Obfuscator.cpp | 2 +- src/Client/LocalConnection.cpp | 8 ++--- src/Interpreters/SystemLog.cpp | 2 +- .../PushingAsyncPipelineExecutor.cpp | 15 ++++---- .../Executors/PushingAsyncPipelineExecutor.h | 7 ++-- .../Executors/PushingPipelineExecutor.cpp | 34 ++++++------------- .../Executors/PushingPipelineExecutor.h | 9 ++--- .../Transforms/CreatingSetsTransform.cpp | 3 +- src/Server/GRPCServer.cpp | 3 +- src/Server/TCPHandler.cpp | 13 ++----- src/Storages/Distributed/DistributedSink.cpp | 6 ++-- src/Storages/StorageBuffer.cpp | 3 +- src/Storages/tests/gtest_storage_log.cpp | 2 +- 13 files changed, 36 insertions(+), 71 deletions(-) diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 7c13215e350..688ae1a1143 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -1462,7 +1462,7 @@ try while (in_executor.pull(block)) { Columns columns = obfuscator.generate(block.getColumns()); - std::ignore = out_executor.push(header.cloneWithColumns(columns)); + out_executor.push(header.cloneWithColumns(columns)); processed_rows += block.rows(); if (!silent) std::cerr << "Processed " << processed_rows << " rows\n"; diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp index 8f1e0958002..072184e0a66 100644 --- a/src/Client/LocalConnection.cpp +++ b/src/Client/LocalConnection.cpp @@ -287,17 +287,13 @@ void LocalConnection::sendData(const Block & block, const String &, bool) if (!block) return; - bool inserted = false; if (state->pushing_async_executor) - inserted = state->pushing_async_executor->push(block); + state->pushing_async_executor->push(block); else if (state->pushing_executor) - inserted = state->pushing_executor->push(block); + state->pushing_executor->push(block); else throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown executor"); - if (!inserted) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send data"); - if (send_profile_events) sendProfileEvents(); } diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 0cad56af00a..572481e6b12 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -556,7 +556,7 @@ void SystemLog::flushImpl(const std::vector & to_flush, PushingPipelineExecutor executor(io.pipeline); executor.start(); - std::ignore = executor.push(block); + executor.push(block); executor.finish(); } catch (...) diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp index db5cf451c9e..866d224a08d 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp @@ -15,6 +15,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int QUERY_WAS_CANCELLED; } class PushingAsyncSource : public ISource @@ -176,17 +177,17 @@ void PushingAsyncPipelineExecutor::start() data->thread = ThreadFromGlobalPool(std::move(func)); } -static void checkExecutionStatus(PipelineExecutor::ExecutionStatus status) +[[noreturn]] static void throwOnExecutionStatus(PipelineExecutor::ExecutionStatus status) { if (status == PipelineExecutor::ExecutionStatus::CancelledByTimeout || status == PipelineExecutor::ExecutionStatus::CancelledByUser) - return; + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); throw Exception(ErrorCodes::LOGICAL_ERROR, "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); } -bool PushingAsyncPipelineExecutor::push(Chunk chunk) +void PushingAsyncPipelineExecutor::push(Chunk chunk) { if (!started) start(); @@ -195,14 +196,12 @@ bool PushingAsyncPipelineExecutor::push(Chunk chunk) data->rethrowExceptionIfHas(); if (!is_pushed) - checkExecutionStatus(data->executor->getExecutionStatus()); - - return is_pushed; + throwOnExecutionStatus(data->executor->getExecutionStatus()); } -bool PushingAsyncPipelineExecutor::push(Block block) +void PushingAsyncPipelineExecutor::push(Block block) { - return push(Chunk(block.getColumns(), block.rows())); + push(Chunk(block.getColumns(), block.rows())); } void PushingAsyncPipelineExecutor::finish() diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.h b/src/Processors/Executors/PushingAsyncPipelineExecutor.h index 7835aaf596f..f976cd4c339 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.h +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.h @@ -36,11 +36,8 @@ public: void start(); - /// Return 'true' if push was successful. - /// Return 'false' if pipline was cancelled without exception. - /// This may happen in case of timeout_overflow_mode = 'break' OR internal bug. - [[nodiscard]] bool push(Chunk chunk); - [[nodiscard]] bool push(Block block); + void push(Chunk chunk); + void push(Block block); void finish(); diff --git a/src/Processors/Executors/PushingPipelineExecutor.cpp b/src/Processors/Executors/PushingPipelineExecutor.cpp index 3133cfd9a1e..7a1c0111a3a 100644 --- a/src/Processors/Executors/PushingPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingPipelineExecutor.cpp @@ -11,6 +11,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int QUERY_WAS_CANCELLED; } class PushingSource : public ISource @@ -80,56 +81,43 @@ const Block & PushingPipelineExecutor::getHeader() const return pushing_source->getPort().getHeader(); } -static void checkExecutionStatus(PipelineExecutor::ExecutionStatus status) +[[noreturn]] static void throwOnExecutionStatus(PipelineExecutor::ExecutionStatus status) { if (status == PipelineExecutor::ExecutionStatus::CancelledByTimeout || status == PipelineExecutor::ExecutionStatus::CancelledByUser) - return; + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); throw Exception(ErrorCodes::LOGICAL_ERROR, "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); } -bool PushingPipelineExecutor::start() +void PushingPipelineExecutor::start() { if (started) - return true; + return; started = true; executor = std::make_shared(pipeline.processors, pipeline.process_list_element); executor->setReadProgressCallback(pipeline.getReadProgressCallback()); if (!executor->executeStep(&input_wait_flag)) - { - checkExecutionStatus(executor->getExecutionStatus()); - return false; - } - - return true; + throwOnExecutionStatus(executor->getExecutionStatus()); } -bool PushingPipelineExecutor::push(Chunk chunk) +void PushingPipelineExecutor::push(Chunk chunk) { if (!started) - { - if (!start()) - return false; - } + start(); pushing_source->setData(std::move(chunk)); if (!executor->executeStep(&input_wait_flag)) - { - checkExecutionStatus(executor->getExecutionStatus()); - return false; - } - - return true; + throwOnExecutionStatus(executor->getExecutionStatus()); } -bool PushingPipelineExecutor::push(Block block) +void PushingPipelineExecutor::push(Block block) { - return push(Chunk(block.getColumns(), block.rows())); + push(Chunk(block.getColumns(), block.rows())); } void PushingPipelineExecutor::finish() diff --git a/src/Processors/Executors/PushingPipelineExecutor.h b/src/Processors/Executors/PushingPipelineExecutor.h index 4021f61fb6b..f549c9482db 100644 --- a/src/Processors/Executors/PushingPipelineExecutor.h +++ b/src/Processors/Executors/PushingPipelineExecutor.h @@ -35,13 +35,10 @@ public: /// Get structure of returned block or chunk. const Block & getHeader() const; - bool start(); + void start(); - /// Return 'true' if push was successful. - /// Return 'false' if pipline was cancelled without exception. - /// This may happen in case of timeout_overflow_mode = 'break' OR internal bug. - [[nodiscard]] bool push(Chunk chunk); - [[nodiscard]] bool push(Block block); + void push(Chunk chunk); + void push(Block block); void finish(); diff --git a/src/Processors/Transforms/CreatingSetsTransform.cpp b/src/Processors/Transforms/CreatingSetsTransform.cpp index 857233ac028..eeb8f4a6060 100644 --- a/src/Processors/Transforms/CreatingSetsTransform.cpp +++ b/src/Processors/Transforms/CreatingSetsTransform.cpp @@ -215,8 +215,7 @@ void CreatingSetsTransform::consume(Chunk chunk) if (!done_with_table) { block = materializeBlock(block); - if (!executor->push(block)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot insert into a table"); + executor->push(block); rows_to_transfer += block.rows(); bytes_to_transfer += block.bytes(); diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index c261d76ef33..d8a4d7f0e1f 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -1012,8 +1012,7 @@ namespace while (pipeline_executor->pull(block)) { if (block) - if (!executor.push(block)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send data"); + executor.push(block); } if (isQueryCancelled()) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 283b60b533c..448dfafbd9d 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -932,18 +932,12 @@ void TCPHandler::processInsertQuery() executor.start(); if (processed_data) - { - if (!executor.push(std::move(processed_data))) - throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); - } + executor.push(std::move(processed_data)); else startInsertQuery(); while (readDataNext()) - { - if (!executor.push(std::move(state.block_for_insert))) - throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); - } + executor.push(std::move(state.block_for_insert)); if (state.cancellation_status == CancellationStatus::FULLY_CANCELLED) executor.cancel(); @@ -2040,8 +2034,7 @@ bool TCPHandler::receiveData(bool scalar) QueryPipeline temporary_table_out(storage->write(ASTPtr(), metadata_snapshot, query_context, /*async_insert=*/false)); PushingPipelineExecutor executor(temporary_table_out); executor.start(); - if (!executor.push(block)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot insert into temporary table"); + executor.push(block); executor.finish(); } else if (state.need_receive_data_for_input) diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index 69f9e5b9380..e3e73e42096 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -89,8 +89,7 @@ static void writeBlockConvert(PushingPipelineExecutor & executor, const Block & { Block adopted_block = adoptBlock(executor.getHeader(), block, log); for (size_t i = 0; i < repeats; ++i) - if (!executor.push(adopted_block)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send data"); + executor.push(adopted_block); } @@ -409,8 +408,7 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si CurrentMetrics::Increment metric_increment{CurrentMetrics::DistributedSend}; Block adopted_shard_block = adoptBlock(job.executor->getHeader(), shard_block, log); - if (!job.executor->push(adopted_shard_block)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot send data"); + job.executor->push(adopted_shard_block); } else // local { diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index 3223a2813a3..f753d369d2d 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -1069,8 +1069,7 @@ void StorageBuffer::writeBlockToDestination(const Block & block, StoragePtr tabl auto block_io = interpreter.execute(); PushingPipelineExecutor executor(block_io.pipeline); executor.start(); - if (!executor.push(std::move(block_to_write))) - throw Exception(ErrorCodes::LOGICAL_ERROR, "StorageBuffer could not write data to destination table"); + executor.push(std::move(block_to_write)); executor.finish(); } diff --git a/src/Storages/tests/gtest_storage_log.cpp b/src/Storages/tests/gtest_storage_log.cpp index 60890337cb4..d75f3616f21 100644 --- a/src/Storages/tests/gtest_storage_log.cpp +++ b/src/Storages/tests/gtest_storage_log.cpp @@ -98,7 +98,7 @@ std::string writeData(int rows, DB::StoragePtr & table, const DB::ContextPtr con QueryPipeline pipeline(table->write({}, metadata_snapshot, context, /*async_insert=*/false)); PushingPipelineExecutor executor(pipeline); - std::ignore = executor.push(block); + executor.push(block); executor.finish(); return data; From 94cc37a39f53f884cc7c6cd81a76c41bb2ea8565 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 11:49:42 +0000 Subject: [PATCH 2175/2361] Remove robin-map submodule At some point, usearch stopped to use robin-map. --- .gitmodules | 3 --- contrib/CMakeLists.txt | 3 +-- contrib/robin-map | 1 - contrib/robin-map-cmake/CMakeLists.txt | 1 - contrib/usearch-cmake/CMakeLists.txt | 2 -- 5 files changed, 1 insertion(+), 9 deletions(-) delete mode 160000 contrib/robin-map delete mode 100644 contrib/robin-map-cmake/CMakeLists.txt diff --git a/.gitmodules b/.gitmodules index 0a66031de8d..cdee6a43ad8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -345,9 +345,6 @@ [submodule "contrib/FP16"] path = contrib/FP16 url = https://github.com/Maratyszcza/FP16.git -[submodule "contrib/robin-map"] - path = contrib/robin-map - url = https://github.com/Tessil/robin-map.git [submodule "contrib/aklomp-base64"] path = contrib/aklomp-base64 url = https://github.com/aklomp/base64.git diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index dc2ad2a3150..d7489bc5c0e 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -209,9 +209,8 @@ endif() option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES}) if (ENABLE_USEARCH) add_contrib (FP16-cmake FP16) - add_contrib (robin-map-cmake robin-map) add_contrib (SimSIMD-cmake SimSIMD) - add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD + add_contrib (usearch-cmake usearch) # requires: FP16, SimdSIMD else () message(STATUS "Not using USearch") endif () diff --git a/contrib/robin-map b/contrib/robin-map deleted file mode 160000 index 851a59e0e30..00000000000 --- a/contrib/robin-map +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d diff --git a/contrib/robin-map-cmake/CMakeLists.txt b/contrib/robin-map-cmake/CMakeLists.txt deleted file mode 100644 index f82ad705dcc..00000000000 --- a/contrib/robin-map-cmake/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -# See contrib/usearch-cmake/CMakeLists.txt diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 6be622275ae..83221e3810f 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -1,5 +1,4 @@ set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16") -set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map") set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") @@ -7,7 +6,6 @@ add_library(_usearch INTERFACE) target_include_directories(_usearch SYSTEM INTERFACE ${FP16_PROJECT_DIR}/include - ${ROBIN_MAP_PROJECT_DIR}/include ${SIMSIMD_PROJECT_DIR}/include ${USEARCH_PROJECT_DIR}/include) From 5ca85674e6fd51633c8fa636bed71002dd2cd281 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Aug 2024 19:29:04 +0000 Subject: [PATCH 2176/2361] Bump usearch to 2.3.2 --- contrib/SimSIMD | 2 +- contrib/usearch | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index de2cb75b9e9..c98e4635f3c 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf +Subproject commit c98e4635f3cca9e33918fe1bdca23571162e0c28 diff --git a/contrib/usearch b/contrib/usearch index 30810452bec..65b5d178f05 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356 +Subproject commit 65b5d178f053d21480796d214b6ca04172d854a4 From 98c18eb341481d3daf610282c58960eb89188960 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Aug 2024 19:50:53 +0000 Subject: [PATCH 2177/2361] Bump usearch to 2.4.1 --- contrib/usearch | 2 +- .../MergeTree/MergeTreeIndexVectorSimilarity.cpp | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/contrib/usearch b/contrib/usearch index 65b5d178f05..e811aa8c1d0 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 65b5d178f053d21480796d214b6ca04172d854a4 +Subproject commit e811aa8c1d07dfb3725e05fedb550f91fe44a324 diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 5b0793fa0c8..083311a6602 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -95,9 +95,14 @@ USearchIndexWithSerialization::USearchIndexWithSerialization( unum::usearch::metric_kind_t metric_kind, unum::usearch::scalar_kind_t scalar_kind, UsearchHnswParams usearch_hnsw_params) - : Base(Base::make(unum::usearch::metric_punned_t(dimensions, metric_kind, scalar_kind), - unum::usearch::index_dense_config_t(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search))) { + unum::usearch::metric_punned_t metric(dimensions, metric_kind, scalar_kind); + + unum::usearch::index_dense_config_t config(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search); + config.enable_key_lookups = false; /// we don't do row-to-vector lookups + + USearchIndex usearch_index = USearchIndex::make(metric, config); + swap(usearch_index); } void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const From dbe66e6092f6a6e232108f5b586253d0a017f66a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 09:47:17 +0000 Subject: [PATCH 2178/2361] Bump usearch to 2.5.1 --- contrib/usearch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/usearch b/contrib/usearch index e811aa8c1d0..f2b4bff52b7 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit e811aa8c1d07dfb3725e05fedb550f91fe44a324 +Subproject commit f2b4bff52b74a0bf33067bc034ba68bb785753ee From 383d2816e66c1ad67c9f4b962b52efd2cb8d1b53 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 09:56:11 +0000 Subject: [PATCH 2179/2361] Bump usearch to 2.6.1 --- contrib/usearch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/usearch b/contrib/usearch index f2b4bff52b7..a7bc711dfb9 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit f2b4bff52b74a0bf33067bc034ba68bb785753ee +Subproject commit a7bc711dfb9e5665a1aee89d3a0297a211f2b97d From 88f2d2e67df5acbca8c4df45be00ab23d42e090c Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 10:00:12 +0000 Subject: [PATCH 2180/2361] Bump usearch to v2.7.8 --- contrib/SimSIMD | 2 +- contrib/usearch | 2 +- src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index c98e4635f3c..8f2c8881e44 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit c98e4635f3cca9e33918fe1bdca23571162e0c28 +Subproject commit 8f2c8881e440a55cfea246996984662623b4d5dd diff --git a/contrib/usearch b/contrib/usearch index a7bc711dfb9..b58cdb4025b 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit a7bc711dfb9e5665a1aee89d3a0297a211f2b97d +Subproject commit b58cdb4025b68b55800dcc9f36fa33b43c003a7e diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 083311a6602..346f69140bb 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -364,7 +364,7 @@ std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(Mer ProfileEvents::increment(ProfileEvents::USearchSearchVisitedMembers, result.visited_members); ProfileEvents::increment(ProfileEvents::USearchSearchComputedDistances, result.computed_distances); - std::vector neighbors(result.size()); /// indexes of dots which were closest to the reference vector + std::vector neighbors(result.size()); /// indexes of dots which were closest to the reference vector std::vector distances(result.size()); result.dump_to(neighbors.data(), distances.data()); From fe7da4e7d1f9f76c9bb1d13fc0baa01433f069a6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 10:23:12 +0000 Subject: [PATCH 2181/2361] Bump usearch to 2.8.16 --- contrib/SimSIMD | 2 +- contrib/usearch | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index 8f2c8881e44..fed0b4f8ec6 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit 8f2c8881e440a55cfea246996984662623b4d5dd +Subproject commit fed0b4f8ec6c1fb75d47e554ae8ca9188fc068f4 diff --git a/contrib/usearch b/contrib/usearch index b58cdb4025b..81edcb7936b 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit b58cdb4025b68b55800dcc9f36fa33b43c003a7e +Subproject commit 81edcb7936b3aba701997ae6b1af59a61df280e1 From bd09e948ba710a9013fdfa477288162936bc6f85 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 12:21:43 +0000 Subject: [PATCH 2182/2361] Bump usearch to 2.9.2 --- contrib/SimSIMD | 2 +- contrib/usearch | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index fed0b4f8ec6..02665027985 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit fed0b4f8ec6c1fb75d47e554ae8ca9188fc068f4 +Subproject commit 02665027985a578bd91514011c31a0bbe302304d diff --git a/contrib/usearch b/contrib/usearch index 81edcb7936b..3ba2661f46f 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 81edcb7936b3aba701997ae6b1af59a61df280e1 +Subproject commit 3ba2661f46fbc0065113e11f29404020210ebb53 From 92aed17e7cfdd78340df66075520713b2dee5c66 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 12:24:01 +0000 Subject: [PATCH 2183/2361] Bump usearch to 2.10.5 --- contrib/SimSIMD | 2 +- contrib/usearch | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index 02665027985..127ead1da7c 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit 02665027985a578bd91514011c31a0bbe302304d +Subproject commit 127ead1da7c39957b30a50dd85e74814edb022d6 diff --git a/contrib/usearch b/contrib/usearch index 3ba2661f46f..fa1019941fe 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 3ba2661f46fbc0065113e11f29404020210ebb53 +Subproject commit fa1019941fe71f359516543ff4ec9f6fa8f0cb80 From 72efc8308c8cda9a2015fd1c5a1057c5b4a5675a Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 12:26:49 +0000 Subject: [PATCH 2184/2361] Bump usearch to 2.11.7 --- contrib/SimSIMD | 2 +- contrib/usearch | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index 127ead1da7c..18d17686124 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit 127ead1da7c39957b30a50dd85e74814edb022d6 +Subproject commit 18d17686124ddebd9fe55eee56b2e0273a613d4b diff --git a/contrib/usearch b/contrib/usearch index fa1019941fe..bc83df3c0b7 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit fa1019941fe71f359516543ff4ec9f6fa8f0cb80 +Subproject commit bc83df3c0b7da8376574a3ca2b48f0738365c205 From 58d76fabf68c48c08e25fca8d4f6318f86010625 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 12:28:09 +0000 Subject: [PATCH 2185/2361] Bump usearch to 2.12.0 --- contrib/usearch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/usearch b/contrib/usearch index bc83df3c0b7..e6c81f78c64 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit bc83df3c0b7da8376574a3ca2b48f0738365c205 +Subproject commit e6c81f78c64c0d8119f854691a06e60660638a25 From dcf96fa9f4363f7607e9b5ed82056d94c49a6ee3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 13 Aug 2024 17:57:06 +0200 Subject: [PATCH 2186/2361] Update 03221_insert_timeout_overflow_mode.sh --- tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh index 030c5211b2d..db943a665cb 100755 --- a/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh +++ b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh @@ -5,4 +5,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "create table null_t (number UInt64) engine = Null;" -${CLICKHOUSE_CLIENT} --query "select sleep(0.1) from system.numbers settings max_block_size = 1 format Native" 2>/dev/null | ${CLICKHOUSE_CLIENT} --max_execution_time = 0.3 --timeout_overflow_mode = 'break' --query "insert into null_t format Native" 2>&1 | grep -o "QUERY_WAS_CANCELLED" +${CLICKHOUSE_CLIENT} --query "select sleep(0.1) from system.numbers settings max_block_size = 1 format Native" 2>/dev/null | ${CLICKHOUSE_CLIENT} --max_execution_time 0.3 --timeout_overflow_mode break --query "insert into null_t format Native" 2>&1 | grep -o "QUERY_WAS_CANCELLED" From 7e209ebdf686e374fdf764cb6acb3e7de83e927f Mon Sep 17 00:00:00 2001 From: Austin Bruch Date: Tue, 13 Aug 2024 12:30:17 -0400 Subject: [PATCH 2187/2361] Remove trailing colon in header for consistency --- docs/en/sql-reference/table-functions/file.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 7908a3cb934..3243e6cf569 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -103,7 +103,7 @@ LIMIT 2; └─────────┴─────────┴─────────┘ ``` -### Inserting data from a file into a table: +### Inserting data from a file into a table ``` sql INSERT INTO FUNCTION From 6d2f17ea3b018dd54d2010cf3fbac3d6aa46142e Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 17:12:38 +0000 Subject: [PATCH 2188/2361] Add more tests --- src/Columns/ColumnObject.cpp | 1 - .../03211_nested_json_merges.reference.j2 | 84 +++++++++++++++---- .../03211_nested_json_merges.sql.j2 | 19 ++++- .../0_stateless/03222_json_squashing.sql | 16 ++-- ...ested_json_in_shared_data_merges.reference | 81 ++++++++++++++++++ ...3223_nested_json_in_shared_data_merges.sql | 26 ++++++ 6 files changed, 203 insertions(+), 24 deletions(-) create mode 100644 tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.reference create mode 100644 tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.sql diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 0645c10340d..e1a3fbf3b18 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -5,7 +5,6 @@ #include #include #include -#include namespace DB { diff --git a/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 b/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 index e05e9ced0b1..9b6ed82abed 100644 --- a/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 +++ b/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 @@ -1,3 +1,4 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; Dynamic paths 300000 c 150000 d @@ -19,26 +20,20 @@ Shared data paths 150000 d 150000 e Dynamic paths -300000 c -150000 d -Shared data paths -Dynamic paths -300000 c -Shared data paths -150000 d -Dynamic paths -600000 f -300000 c -150000 e -Shared data paths -150000 d -Dynamic paths 600000 f +450000 c Shared data paths 300000 c 150000 d 150000 e Dynamic paths +750000 c +Shared data paths +600000 f +150000 d +150000 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +Dynamic paths 300000 c 150000 d Shared data paths @@ -59,6 +54,20 @@ Shared data paths 150000 d 150000 e Dynamic paths +600000 f +450000 c +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +750000 c +Shared data paths +600000 f +150000 d +150000 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths 300000 c 150000 d Shared data paths @@ -78,3 +87,50 @@ Shared data paths 300000 c 150000 d 150000 e +Dynamic paths +600000 f +450000 c +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +750000 c +Shared data paths +600000 f +150000 d +150000 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +600000 f +450000 c +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +750000 c +Shared data paths +600000 f +150000 d +150000 e diff --git a/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 b/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 index 4ab5a5da0ef..0af998e22bb 100644 --- a/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 +++ b/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 @@ -1,4 +1,4 @@ --- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan +-- Tags: no-fasttest, long, no-tsan, no-asan, no-msan, no-ubsan set allow_experimental_json_type = 1; @@ -9,6 +9,8 @@ drop table if exists test; 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;', 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} +select '{{ create_command }}'; + {{ create_command }} system stop merges test; @@ -27,6 +29,7 @@ select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from te select 'Shared data paths'; select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system stop merges test; insert into test select number, toJSONString(map('b', arrayMap(x -> map('e', x), range(number % 5 + 1)))) from numbers(50000); insert into test select number, toJSONString(map('b', arrayMap(x -> map('f', x), range(number % 5 + 1)))) from numbers(200000); @@ -41,6 +44,20 @@ select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from te select 'Shared data paths'; select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system stop merges test; +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(150000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + drop table test; {% endfor -%} diff --git a/tests/queries/0_stateless/03222_json_squashing.sql b/tests/queries/0_stateless/03222_json_squashing.sql index b6bda7a702f..53090c5cb88 100644 --- a/tests/queries/0_stateless/03222_json_squashing.sql +++ b/tests/queries/0_stateless/03222_json_squashing.sql @@ -6,7 +6,7 @@ set max_block_size = 1000; drop table if exists test; create table test (json JSON) engine=MergeTree order by tuple(); -insert into test select multiIf(number < 1000, '{}'::JSON, number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON) from numbers(1000000); +insert into test select multiIf(number < 1000, '{}'::JSON, number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON) from numbers(20000); select 'All paths'; select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; select 'Dynamic paths'; @@ -15,7 +15,7 @@ select 'Shared data paths'; select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; truncate table test; -insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON, number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON, '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON) from numbers(1000000); +insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON, number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON, '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON) from numbers(20000); select 'All paths'; select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; select 'Dynamic paths'; @@ -25,7 +25,7 @@ select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by drop table test; create table test (json JSON(max_dynamic_paths=2)) engine=MergeTree order by tuple(); -insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=2), number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2)) from numbers(1000000); +insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=2), number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2)) from numbers(20000); select 'All paths'; select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; select 'Dynamic paths'; @@ -34,7 +34,7 @@ select 'Shared data paths'; select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; truncate table test; -insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON(max_dynamic_paths=2)) from numbers(1000000); +insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON(max_dynamic_paths=2)) from numbers(20000); select 'All paths'; select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; select 'Dynamic paths'; @@ -43,7 +43,7 @@ select 'Shared data paths'; select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; truncate table test; -insert into test select multiIf(number < 1000, '{"a" : 42}'::JSON(max_dynamic_paths=2), number < 3000, '{"b" : "Hello", "c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43}'::JSON(max_dynamic_paths=2)) from numbers(1000000); +insert into test select multiIf(number < 1000, '{"a" : 42}'::JSON(max_dynamic_paths=2), number < 3000, '{"b" : "Hello", "c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43}'::JSON(max_dynamic_paths=2)) from numbers(20000); select 'All paths'; select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; select 'Dynamic paths'; @@ -53,7 +53,7 @@ select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by drop table test; create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple(); -insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8)) from numbers(1000000); +insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); select 'All paths'; select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; select 'Dynamic paths'; @@ -62,7 +62,7 @@ select 'Shared data paths'; select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; truncate table test; -insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00", "g" : "Hello2"}]}')::JSON(max_dynamic_paths=8)) from numbers(1000000); +insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00", "g" : "Hello2"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); select 'All paths'; select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; select 'Dynamic paths'; @@ -71,7 +71,7 @@ select 'Shared data paths'; select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; truncate table test; -insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00"}]}')::JSON(max_dynamic_paths=8)) from numbers(1000000); +insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); select 'All paths'; select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; select 'Dynamic paths'; diff --git a/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.reference b/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.reference new file mode 100644 index 00000000000..6c01506e800 --- /dev/null +++ b/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.reference @@ -0,0 +1,81 @@ +All paths +['b'] +['b'] +['b'] +['b'] +['b'] +['c'] +['c'] +['c'] +['c'] +['c'] +Dynamic paths +['b'] +['b'] +['b'] +['b'] +['b'] +[] +[] +[] +[] +[] +Shared data paths +[] +[] +[] +[] +[] +['c'] +['c'] +['c'] +['c'] +['c'] +All paths +['b'] +['b'] +['b'] +['b'] +['b'] +['c'] +['c'] +['c'] +['c'] +['c'] +['b'] +['b'] +['b'] +['b'] +['b'] +Dynamic paths +['b'] +['b'] +['b'] +['b'] +['b'] +['c'] +['c'] +['c'] +['c'] +['c'] +['b'] +['b'] +['b'] +['b'] +['b'] +Shared data paths +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] diff --git a/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.sql b/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.sql new file mode 100644 index 00000000000..311eba37772 --- /dev/null +++ b/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.sql @@ -0,0 +1,26 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1; +insert into test select materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +insert into test select materialize('{"aa1" : 42, "aa2" : 42, "aa3" : 42, "aa4" : 42, "aa5" : 42, "aa6" : 42, "aa7" : 42, "aa8" : 42, "a" : [{"c" : 42}]}') from numbers(5); +optimize table test final; + +select 'All paths'; +select JSONAllPaths(arrayJoin(json.a[])) from test; +select 'Dynamic paths'; +select JSONDynamicPaths(arrayJoin(json.a[])) from test; +select 'Shared data paths'; +select JSONSharedDataPaths(arrayJoin(json.a[])) from test; + +insert into test select materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; + +select 'All paths'; +select JSONAllPaths(arrayJoin(json.a[])) from test; +select 'Dynamic paths'; +select JSONDynamicPaths(arrayJoin(json.a[])) from test; +select 'Shared data paths'; +select JSONSharedDataPaths(arrayJoin(json.a[])) from test; + +drop table test; From 22b31ef81f036ad8a65e7172e6be2270674ac09f Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 17:42:47 +0000 Subject: [PATCH 2189/2361] Add more tests --- .../03209_json_type_horizontal_merges.sql.j2 | 4 +- .../03209_json_type_merges_small.reference.j2 | 272 ++++++++++++++++++ .../03209_json_type_merges_small.sql.j2 | 76 +++++ .../03209_json_type_vertical_merges.sql.j2 | 4 +- ...3211_nested_json_merges_small.reference.j2 | 136 +++++++++ .../03211_nested_json_merges_small.sql.j2 | 63 ++++ 6 files changed, 551 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/03209_json_type_merges_small.reference.j2 create mode 100644 tests/queries/0_stateless/03209_json_type_merges_small.sql.j2 create mode 100644 tests/queries/0_stateless/03211_nested_json_merges_small.reference.j2 create mode 100644 tests/queries/0_stateless/03211_nested_json_merges_small.sql.j2 diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 index 05b5d8d6095..cc143e4ceef 100644 --- a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 @@ -4,8 +4,8 @@ set allow_experimental_json_type = 1; drop table if exists test; -{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;', - 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;'] -%} +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10;'] -%} select '{{ create_command }}'; diff --git a/tests/queries/0_stateless/03209_json_type_merges_small.reference.j2 b/tests/queries/0_stateless/03209_json_type_merges_small.reference.j2 new file mode 100644 index 00000000000..f953dee10fe --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_merges_small.reference.j2 @@ -0,0 +1,272 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10; +Dynamic paths +10 a +9 b +8 c +7 d +6 e +Shared data paths +Dynamic paths +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +1 g +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +10 a +9 b +4 c +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +12 c +10 a +Shared data paths +9 b +7 d +6 e +1 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10; +Dynamic paths +10 a +9 b +8 c +7 d +6 e +Shared data paths +Dynamic paths +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +1 g +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +10 a +9 b +4 c +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +12 c +10 a +Shared data paths +9 b +7 d +6 e +1 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +10 a +9 b +8 c +7 d +6 e +Shared data paths +Dynamic paths +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +1 g +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +10 a +9 b +4 c +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +12 c +10 a +Shared data paths +9 b +7 d +6 e +1 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +10 a +9 b +8 c +7 d +6 e +Shared data paths +Dynamic paths +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +1 g +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +10 a +9 b +4 c +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +12 c +10 a +Shared data paths +9 b +7 d +6 e +1 g diff --git a/tests/queries/0_stateless/03209_json_type_merges_small.sql.j2 b/tests/queries/0_stateless/03209_json_type_merges_small.sql.j2 new file mode 100644 index 00000000000..e4b64ac7561 --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_merges_small.sql.j2 @@ -0,0 +1,76 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} + +select '{{ create_command }}'; + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(10); +insert into test select number, toJSONString(map('b', number)) from numbers(9); +insert into test select number, toJSONString(map('c', number)) from numbers(8); +insert into test select number, toJSONString(map('d', number)) from numbers(7); +insert into test select number, toJSONString(map('e', number)) from numbers(6); +insert into test select number, '{}' from numbers(100000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('f', number)) from numbers(20); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('g', number)) from numbers(1); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('c', number)) from numbers(4); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 b/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 index d17d8a48537..e427db7677f 100644 --- a/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 @@ -4,8 +4,8 @@ set allow_experimental_json_type = 1; drop table if exists test; -{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;', - 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760;'] -%} +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} select '{{ create_command }}'; diff --git a/tests/queries/0_stateless/03211_nested_json_merges_small.reference.j2 b/tests/queries/0_stateless/03211_nested_json_merges_small.reference.j2 new file mode 100644 index 00000000000..76339dba3e3 --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges_small.reference.j2 @@ -0,0 +1,136 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; +Dynamic paths +30 c +15 d +Shared data paths +Dynamic paths +30 c +Shared data paths +15 d +Dynamic paths +60 f +30 c +15 e +Shared data paths +15 d +Dynamic paths +60 f +Shared data paths +30 c +15 d +15 e +Dynamic paths +60 f +45 c +Shared data paths +30 c +15 d +15 e +Dynamic paths +75 c +Shared data paths +60 f +15 d +15 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +Dynamic paths +30 c +15 d +Shared data paths +Dynamic paths +30 c +Shared data paths +15 d +Dynamic paths +60 f +30 c +15 e +Shared data paths +15 d +Dynamic paths +60 f +Shared data paths +30 c +15 d +15 e +Dynamic paths +60 f +45 c +Shared data paths +30 c +15 d +15 e +Dynamic paths +75 c +Shared data paths +60 f +15 d +15 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +30 c +15 d +Shared data paths +Dynamic paths +30 c +Shared data paths +15 d +Dynamic paths +60 f +30 c +15 e +Shared data paths +15 d +Dynamic paths +60 f +Shared data paths +30 c +15 d +15 e +Dynamic paths +60 f +45 c +Shared data paths +30 c +15 d +15 e +Dynamic paths +75 c +Shared data paths +60 f +15 d +15 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +30 c +15 d +Shared data paths +Dynamic paths +30 c +Shared data paths +15 d +Dynamic paths +60 f +30 c +15 e +Shared data paths +15 d +Dynamic paths +60 f +Shared data paths +30 c +15 d +15 e +Dynamic paths +60 f +45 c +Shared data paths +30 c +15 d +15 e +Dynamic paths +75 c +Shared data paths +60 f +15 d +15 e diff --git a/tests/queries/0_stateless/03211_nested_json_merges_small.sql.j2 b/tests/queries/0_stateless/03211_nested_json_merges_small.sql.j2 new file mode 100644 index 00000000000..86e5a6c71c9 --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges_small.sql.j2 @@ -0,0 +1,63 @@ +-- Tags: no-fasttest, long, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} + +select '{{ create_command }}'; + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(10); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(10); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('d', x), range(number % 5 + 1)))) from numbers(5); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('b', arrayMap(x -> map('e', x), range(number % 5 + 1)))) from numbers(5); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('f', x), range(number % 5 + 1)))) from numbers(20); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(15); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} From 9833ef0bed218afdc1927181c11bac306fe21dda Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 13 Aug 2024 17:50:13 +0000 Subject: [PATCH 2190/2361] slightly better --- .../Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h | 2 +- src/Processors/Merges/IMergingTransform.h | 6 +++--- src/Storages/MergeTree/MergeTask.cpp | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h index 39171c5a978..c34028b1cba 100644 --- a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h +++ b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h @@ -50,7 +50,7 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; - MergedStats getMergedStats() const override { return {.bytes = accumulated_bytes, .rows = accumulated_rows, .blocks = chunk_num}; } + MergedStats getMergedStats() const override { return {.bytes = accumulated_bytes, .rows = accumulated_rows, .blocks = chunk_num}; } private: Chunk prepareToMerge(); diff --git a/src/Processors/Merges/IMergingTransform.h b/src/Processors/Merges/IMergingTransform.h index fba5b038618..e5cd3bdde46 100644 --- a/src/Processors/Merges/IMergingTransform.h +++ b/src/Processors/Merges/IMergingTransform.h @@ -113,7 +113,7 @@ public: void work() override { - Stopwatch watch; + Stopwatch watch{CLOCK_MONOTONIC_COARSE}; if (!state.init_chunks.empty()) algorithm.initialize(std::move(state.init_chunks)); @@ -180,12 +180,12 @@ protected: if (seconds == 0.0) { - LOG_DEBUG(log, "{}: {} blocks, {} rows, {} bytes in 0 sec.", + LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in 0 sec.", transform_message, stats.blocks, stats.rows, stats.bytes); } else { - LOG_DEBUG(log, "{}: {} blocks, {} rows, {} bytes in {} sec., {} rows/sec., {}/sec.", + LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in {} sec., {} rows/sec., {}/sec.", transform_message, stats.blocks, stats.rows, stats.bytes, seconds, stats.rows / seconds, ReadableSize(stats.bytes / seconds)); } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 3aa4d764685..95e00773bae 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -945,7 +945,7 @@ bool MergeTask::MergeProjectionsStage::finalizeProjectionsAndWholeMerge() const MergeTask::StageRuntimeContextPtr MergeTask::MergeProjectionsStage::getContextForNextStage() { /// Do not increment for projection stage because time is already accounted in main task. - /// The projection stage has its own empty projection stage which may add a drift of severals milliseconds. + /// The projection stage has its own empty projection stage which may add a drift of several milliseconds. if (global_ctx->parent_part == nullptr) { ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); From c75112827c2d73620c0a5c579b04dfe4049deb56 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 18:05:27 +0000 Subject: [PATCH 2191/2361] Fis style --- src/Columns/ColumnObject.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index e1a3fbf3b18..d4f752cda95 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1212,10 +1212,10 @@ void ColumnObject::prepareForSquashing(const std::vector & source_col typed_paths_source_columns.reserve(typed_paths.size()); std::unordered_map dynamic_paths_source_columns; dynamic_paths_source_columns.reserve(dynamic_paths.size()); - + for (const auto & [path, column] : typed_paths) typed_paths_source_columns[path].reserve(source_columns.size()); - + for (const auto & [path, column] : dynamic_paths) dynamic_paths_source_columns[path].reserve(source_columns.size()); @@ -1225,10 +1225,10 @@ void ColumnObject::prepareForSquashing(const std::vector & source_col const auto & source_object_column = assert_cast(*source_column); total_size += source_object_column.size(); shared_data_source_columns.push_back(source_object_column.shared_data); - + for (const auto & [path, column] : source_object_column.typed_paths) typed_paths_source_columns.at(path).push_back(column); - + for (const auto & [path, column] : source_object_column.dynamic_paths) { if (dynamic_paths.contains(path)) @@ -1237,7 +1237,7 @@ void ColumnObject::prepareForSquashing(const std::vector & source_col } shared_data->prepareForSquashing(shared_data_source_columns); - + for (const auto & [path, source_typed_columns] : typed_paths_source_columns) typed_paths[path]->prepareForSquashing(source_typed_columns); From 6af5fedf420c667e2a7866c89dfe0bd1d2ff37dd Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 13 Aug 2024 19:26:35 +0000 Subject: [PATCH 2192/2361] Update autogenerated version to 24.9.1.1 and contributors --- cmake/autogenerated_versions.txt | 10 +++++----- .../StorageSystemContributors.generated.cpp | 17 +++++++++++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index d69646d3694..c82038804fe 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54489) +SET(VERSION_REVISION 54490) SET(VERSION_MAJOR 24) -SET(VERSION_MINOR 8) +SET(VERSION_MINOR 9) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af) -SET(VERSION_DESCRIBE v24.8.1.1-testing) -SET(VERSION_STRING 24.8.1.1) +SET(VERSION_GITHASH e02b434d2fc0c4fbee29ca675deab7474d274608) +SET(VERSION_DESCRIBE v24.9.1.1-testing) +SET(VERSION_STRING 24.9.1.1) # end of autochange diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index 35b9c0008c6..eb6f0382d15 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -457,6 +457,7 @@ const char * auto_contributors[] { "Gleb-Tretyakov", "GoGoWen2021", "Gosha Letov", + "Graham Campbell", "Gregory", "Grigorii Sokolik", "Grigory", @@ -472,6 +473,7 @@ const char * auto_contributors[] { "Habibullah Oladepo", "HaiBo Li", "Hakob Saghatelyan", + "Halersson Paris", "Hamoon", "Han Fei", "Han Shukai", @@ -541,6 +543,7 @@ const char * auto_contributors[] { "JackyWoo", "Jacob Hayes", "Jacob Herrington", + "Jacob Reckhard", "Jai Jhala", "Jake Bamrah", "Jake Liu", @@ -661,6 +664,7 @@ const char * auto_contributors[] { "LaurieLY", "Lee sungju", "Lemore", + "Lennard Eijsackers", "Leonardo Cecchi", "Leonardo Maciel", "Leonid Krylov", @@ -804,6 +808,7 @@ const char * auto_contributors[] { "Mingliang Pan", "Misko Lee", "Misz606", + "MiÑhael Stetsyuk", "MochiXu", "Mohamad Fadhil", "Mohammad Arab Anvari", @@ -922,6 +927,7 @@ const char * auto_contributors[] { "Pervakov Grigorii", "Pervakov Grigory", "Peter", + "Peter Nguyen", "Petr Vasilev", "Pham Anh Tuan", "Philip Hallstrom", @@ -981,6 +987,7 @@ const char * auto_contributors[] { "Ronald Bradford", "Rory Crispin", "Roy Bellingan", + "Ruihang Xia", "Ruslan", "Ruslan Mardugalliamov", "Ruslan Savchenko", @@ -1000,9 +1007,11 @@ const char * auto_contributors[] { "Sami Kerola", "Samuel Chou", "Samuel Colvin", + "Samuele Guerrini", "San", "Sanjam Panda", "Sariel", + "Sasha Sheikin", "Saulius Valatka", "Sean Haynes", "Sean Lafferty", @@ -1202,6 +1211,7 @@ const char * auto_contributors[] { "Vladimir Makarov", "Vladimir Mihailenco", "Vladimir Smirnov", + "Vladimir Varankin", "Vladislav Rassokhin", "Vladislav Smirnov", "Vladislav V", @@ -1275,6 +1285,7 @@ const char * auto_contributors[] { "Zhichun Wu", "Zhiguo Zhou", "Zhipeng", + "Zhukova, Maria", "Zhuo Qiu", "Zijie Lu", "Zimu Li", @@ -1502,6 +1513,7 @@ const char * auto_contributors[] { "hchen9", "hcz", "hdhoang", + "heguangnan", "heleihelei", "helifu", "hendrik-m", @@ -1572,6 +1584,7 @@ const char * auto_contributors[] { "kevinyhzou", "kgurjev", "khamadiev", + "khodyrevyurii", "kigerzhang", "kirillikoff", "kmeaw", @@ -1787,6 +1800,7 @@ const char * auto_contributors[] { "ruslandoga", "ryzuo", "s-kat", + "sakulali", "sanjam", "santaux", "santrancisco", @@ -1804,6 +1818,7 @@ const char * auto_contributors[] { "shabroo", "shangshujie", "shedx", + "shiyer7474", "shuai-xu", "shuchaome", "shuyang", @@ -1901,6 +1916,7 @@ const char * auto_contributors[] { "wzl", "xPoSx", "xbthink", + "xc0derx", "xiao", "xiaolei565", "xiebin", @@ -1964,6 +1980,7 @@ const char * auto_contributors[] { "zkun", "zlx19950903", "zombee0", + "zoomxi", "zvonand", "zvrr", "zvvr", From a9226f49e7e052d2c392214afe32f4d6de1d6d62 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 13 Aug 2024 20:24:40 +0000 Subject: [PATCH 2193/2361] remove name with cyrillic letter --- src/Storages/System/StorageSystemContributors.generated.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index eb6f0382d15..67dfe3bfe86 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -808,7 +808,6 @@ const char * auto_contributors[] { "Mingliang Pan", "Misko Lee", "Misz606", - "MiÑhael Stetsyuk", "MochiXu", "Mohamad Fadhil", "Mohammad Arab Anvari", From 24ddea69453588bb5c126cf9951c3b0bc6fcafcf Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 20:49:16 +0000 Subject: [PATCH 2194/2361] Fix tests --- src/Parsers/ParserDataType.cpp | 11 +++++------ .../02553_new_type_json_attach_partition.sql | 1 + 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index e941c05eba1..6e6e8758c51 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -242,10 +242,12 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) /// Allow mixed lists of nested and normal types. /// Parameters are either: - /// - Nested table elements; + /// - Nested table element; + /// - Tuple element /// - Enum element in form of 'a' = 1; /// - literal; - /// - Dynamic type arguments; + /// - Dynamic type argument; + /// - JSON type argument; /// - another data type (or identifier); size_t arg_num = 0; @@ -271,7 +273,7 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ObjectArgumentParser parser; parser.parse(pos, arg, expected); } - else if (type_name == "Nested") + else if (boost::to_lower_copy(type_name) == "nested") { ParserNameTypePair name_and_type_parser; name_and_type_parser.parse(pos, arg, expected); @@ -334,9 +336,6 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ++arg_num; } - if (pos->type == TokenType::Comma) - // ignore trailing comma inside Nested structures like Tuple(Int, Tuple(Int, String),) - ++pos; if (pos->type != TokenType::ClosingRoundBracket) return false; ++pos; diff --git a/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql b/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql index 8a5abd31cb4..c7d4c0b5d55 100644 --- a/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql +++ b/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql @@ -5,6 +5,7 @@ DROP TABLE IF EXISTS t_json_attach_partition; CREATE TABLE t_json_attach_partition(b UInt64, c JSON) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": 1}}; + ALTER TABLE t_json_attach_partition DETACH PARTITION tuple(); INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": [1, 2]}}; From 0133806e6c991325b5ace29469d8f450969c3ec0 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Tue, 13 Aug 2024 20:55:51 +0000 Subject: [PATCH 2195/2361] Fix --- .../Formats/Impl/ArrowColumnToCHColumn.cpp | 9 ++++++++ .../Formats/Impl/ParquetBlockInputFormat.cpp | 23 ++++++++----------- ...arquet_big_integer_compatibility.reference | 1 + ...02786_parquet_big_integer_compatibility.sh | 3 +++ 4 files changed, 23 insertions(+), 13 deletions(-) diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp index ed91913de4d..5e7f763dfbc 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp @@ -743,6 +743,15 @@ static ColumnWithTypeAndName readNonNullableColumnFromArrowColumn( case TypeIndex::IPv6: return readIPv6ColumnFromBinaryData(arrow_column, column_name); /// ORC format outputs big integers as binary column, because there is no fixed binary in ORC. + /// + /// When ORC/Parquet file says the type is "byte array" or "fixed len byte array", + /// but the clickhouse query says to interpret the column as e.g. Int128, it + /// may mean one of two things: + /// * The byte array is the 16 bytes of Int128, little-endian. + /// * The byte array is an ASCII string containing the Int128 formatted in base 10. + /// There's no reliable way to distinguish these cases. We just guess: if the + /// byte array is variable-length, and the length is different from sizeof(type), + /// we parse as text, otherwise as binary. case TypeIndex::Int128: return readColumnWithBigNumberFromBinaryData(arrow_column, column_name, type_hint); case TypeIndex::UInt128: diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index 3f2e701afc2..c6167e572df 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -224,21 +224,18 @@ static Field decodePlainParquetValueSlow(const std::string & data, parquet::Type if (data.empty()) return Field(); - /// Long integers. - auto reinterpret_fixed_string = [&](auto x) - { - if (data.size() != sizeof(x)) - throw Exception(ErrorCodes::CANNOT_PARSE_NUMBER, "Unexpected {} size: {}", fieldTypeToString(Field::TypeToEnum::value), data.size()); - memcpy(&x, data.data(), data.size()); - return Field(x); - }; + /// Long integers, encoded either as text or as little-endian bytes. + /// The parquet file doesn't know that it's numbers, so the min/max are produced by comparing + /// strings lexicographically. So these min and max are mostly useless to us. + /// There's one case where they're not useless: min == max; currently we don't make use of this. switch (type_hint) { - case TypeIndex::UInt128: return reinterpret_fixed_string(UInt128(0)); - case TypeIndex::UInt256: return reinterpret_fixed_string(UInt256(0)); - case TypeIndex::Int128: return reinterpret_fixed_string(Int128(0)); - case TypeIndex::Int256: return reinterpret_fixed_string(Int256(0)); - case TypeIndex::IPv6: return reinterpret_fixed_string(IPv6(0)); + case TypeIndex::UInt128: + case TypeIndex::UInt256: + case TypeIndex::Int128: + case TypeIndex::Int256: + case TypeIndex::IPv6: + return Field(); default: break; } diff --git a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference index 7764974255b..877bb5f390f 100644 --- a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference +++ b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference @@ -1 +1,2 @@ 424242424242424242424242424242424242424242424242424242 +22707864971053448441042714569797161695738549521977760418632926980540162388532 diff --git a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh index 8865b2e7aab..0f590027f19 100755 --- a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh +++ b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh @@ -5,5 +5,8 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh +# This is parsed as text. $CLICKHOUSE_LOCAL -q "select toString(424242424242424242424242424242424242424242424242424242::UInt256) as x format Parquet" | $CLICKHOUSE_LOCAL --input-format=Parquet --structure='x UInt256' -q "select * from table" +# But this is parsed as binary because text length happens to be 32 bytes. Not ideal. +$CLICKHOUSE_LOCAL -q "select toString(42424242424242424242424242424242::UInt256) as x format Parquet" | $CLICKHOUSE_LOCAL --input-format=Parquet --structure='x UInt256' -q "select * from table" From b9ffa929ba418d54e5e140470f21d4347ac0eab9 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 13 Aug 2024 21:08:53 +0000 Subject: [PATCH 2196/2361] Fix: min marks to read overflow with parallel replicas --- .../MergeTree/MergeTreeIndexGranularity.cpp | 12 ++++++++++-- ..._replicas_min_marks_to_read_overflow.reference | 10 ++++++++++ ...rallel_replicas_min_marks_to_read_overflow.sql | 15 +++++++++++++++ 3 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference create mode 100644 tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index 2a45ab1d927..2f9a4a47b11 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -103,8 +103,16 @@ size_t MergeTreeIndexGranularity::countMarksForRows(size_t from_mark, size_t num /// This is a heuristic to respect min_marks_to_read which is ignored by MergeTreeReadPool in case of remote disk. /// See comment in IMergeTreeSelectAlgorithm. - if (min_marks_to_read && from_mark + 2 * min_marks_to_read <= to_mark) - to_mark = from_mark + min_marks_to_read; + if (min_marks_to_read) + { + // check that ... + bool overflow = ((1ULL << 63) & min_marks_to_read); // further multiplication by 2 will not overflow + if (!overflow) + overflow = (std::numeric_limits::max() - from_mark) < 2 * min_marks_to_read; // further addition will not overflow + + if (!overflow && from_mark + 2 * min_marks_to_read <= to_mark) + to_mark = from_mark + min_marks_to_read; + } return getRowsCountInRange(from_mark, std::max(1UL, to_mark)) - offset_in_rows; } diff --git a/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference new file mode 100644 index 00000000000..7fafd4d13ea --- /dev/null +++ b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference @@ -0,0 +1,10 @@ +100 100 +101 101 +102 102 +103 103 +104 104 +105 105 +106 106 +107 107 +108 108 +109 109 diff --git a/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql new file mode 100644 index 00000000000..112373e5db2 --- /dev/null +++ b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test__fuzz_22 SYNC; + +CREATE TABLE test__fuzz_22 (k Float32, v String) ENGINE = ReplicatedMergeTree('/clickhouse/03222/{database}/test__fuzz_22', 'r1') ORDER BY k SETTINGS index_granularity = 1; + +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(10_000); + +SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +SELECT k, v +FROM test__fuzz_22 +ORDER BY k +LIMIT 100, 10 +SETTINGS merge_tree_min_rows_for_concurrent_read = 9223372036854775806; + +DROP TABLE test__fuzz_22 SYNC; From 0c9e1a061f825e5b9c5d623d90d4d898cd05e44c Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 13 Aug 2024 18:49:18 +0200 Subject: [PATCH 2197/2361] CI: Create new release branch workflow updates --- .github/workflows/create_release.yml | 2 +- tests/ci/ci_utils.py | 5 + tests/ci/create_release.py | 240 ++++++++++++++++----------- tests/ci/docker_server.py | 2 +- tests/ci/version_helper.py | 17 +- 5 files changed, 161 insertions(+), 105 deletions(-) diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index d4993b373df..73613c65266 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -129,9 +129,9 @@ jobs: if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash run: | - python3 ./tests/ci/create_release.py --set-progress-completed git reset --hard HEAD git checkout "$GITHUB_REF_NAME" + python3 ./tests/ci/create_release.py --set-progress-completed - name: Create GH Release if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index d807f5be09f..b8778e0cc50 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -167,6 +167,11 @@ class GH: latest_branch = Shell.get_output( 'gh pr list --label release --repo ClickHouse/ClickHouse --search "sort:created" -L1 --json headRefName' ) + if latest_branch: + latest_branch = json.loads(latest_branch)[0]["headRefName"] + print( + f"Latest branch [{latest_branch}], release branch [{branch}], release latest [{latest_branch == branch}]" + ) return latest_branch == branch diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index 27eba273ce0..b5ea61e1952 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -61,6 +61,7 @@ class ReleaseContextManager: # create initial release info self.release_info = ReleaseInfo( release_branch="NA", + release_type="NA", commit_sha=args.ref, release_tag="NA", version="NA", @@ -93,6 +94,7 @@ class ReleaseContextManager: @dataclasses.dataclass class ReleaseInfo: version: str + release_type: str release_tag: str release_branch: str commit_sha: str @@ -131,7 +133,7 @@ class ReleaseInfo: return self def prepare( - self, commit_ref: str, release_type: str, skip_tag_check: bool + self, commit_ref: str, release_type: str, _skip_tag_check: bool ) -> "ReleaseInfo": version = None release_branch = None @@ -143,17 +145,18 @@ class ReleaseInfo: assert release_type in ("patch", "new") if release_type == "new": # check commit_ref is right and on a right branch - Shell.check( - f"git merge-base --is-ancestor {commit_ref} origin/master", - strict=True, - verbose=True, - ) + if commit_ref != "master": + Shell.check( + f"git merge-base --is-ancestor {commit_ref} origin/master", + strict=True, + verbose=True, + ) with checkout(commit_ref): commit_sha = Shell.get_output_or_raise(f"git rev-list -n1 {commit_ref}") # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) - release_branch = "master" + release_branch = f"{version.major}.{version.minor}" expected_prev_tag = f"v{version.major}.{version.minor}.1.1-new" version.bump().with_description(VersionType.NEW) assert ( @@ -204,10 +207,11 @@ class ReleaseInfo: expected_tag_prefix ) and git.latest_tag.endswith(expected_tag_suffix): pass - elif not skip_tag_check: - assert ( - False - ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]. Already Released?" + # TODO: uncomment and check with dry-run + # elif not skip_tag_check: + # assert ( + # False + # ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]. Already Released?" previous_release_sha = Shell.get_output_or_raise( f"git rev-list -n1 {previous_release_tag}" @@ -238,6 +242,7 @@ class ReleaseInfo: self.release_progress = ReleaseProgress.STARTED self.progress_status = ReleaseProgressDescription.OK self.latest = latest_release + self.release_type = release_type return self def push_release_tag(self, dry_run: bool) -> None: @@ -262,16 +267,15 @@ class ReleaseInfo: @staticmethod def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None: cmd = f"gh api repos/{CI.Envs.GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}" - Shell.check(cmd, dry_run=dry_run, strict=True) + res = Shell.check(cmd, dry_run=dry_run, verbose=True) + if not res: + # not a critical error - do not fail. branch might be created already (recovery case) + print("WARNING: failed to create backport labels for the new branch") def push_new_release_branch(self, dry_run: bool) -> None: - assert ( - self.release_branch == "master" - ), "New release branch can be created only for release type [new]" git = Git() version = get_version_from_repo(git=git) - new_release_branch = f"{version.major}.{version.minor}" - stable_release_type = version.get_stable_release_type() + new_release_branch = self.release_branch version_after_release = copy(version) version_after_release.bump() assert ( @@ -285,11 +289,8 @@ class ReleaseInfo: print( f"Create and push new release branch [{new_release_branch}], commit [{self.commit_sha}]" ) - with checkout(self.release_branch): + with checkout("master"): with checkout_new(new_release_branch): - pr_labels = f"--label {CI.Labels.RELEASE}" - if stable_release_type == VersionType.LTS: - pr_labels += f" --label {CI.Labels.RELEASE_LTS}" cmd_push_branch = ( f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}" ) @@ -302,67 +303,108 @@ class ReleaseInfo: ReleaseInfo._create_gh_label( f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run ) - Shell.check( - f"""gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}' - --head {new_release_branch} {pr_labels} - --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.' - """, - dry_run=dry_run, - strict=True, - verbose=True, - ) def get_version_bump_branch(self): return f"bump_version_{self.version}" def update_version_and_contributors_list(self, dry_run: bool) -> None: - # Bump version, update contributors list, create PR - branch_upd_version_contributors = self.get_version_bump_branch() + # Bump version, update contributors list, create on release branch with checkout(self.commit_sha): git = Git() version = get_version_from_repo(git=git) - if self.release_branch == "master": + if self.release_type == "patch": + assert ( + version.string == self.version + ), f"BUG: version in release info does not match version in git commit, expected [{self.version}], got [{version.string}]" + version.bump_patch() + else: + version.reset_tweak() + version.with_description(version.get_stable_release_type()) + + with checkout(self.release_branch): + update_cmake_version(version) + update_contributors(raise_error=True) + cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" + cmd_push_branch = f"{GIT_PREFIX} push" + Shell.check( + cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True + ) + Shell.check(cmd_push_branch, strict=True, dry_run=dry_run, verbose=True) + if dry_run: + Shell.check( + f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + Shell.check( + f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + + # TODO: move to new GH step? + if self.release_type == "new": + print("Update version on master branch") + branch_upd_version_contributors = self.get_version_bump_branch() + with checkout(self.commit_sha): + git = Git() + version = get_version_from_repo(git=git) version.bump() version.with_description(VersionType.TESTING) - else: - version.with_description(version.get_stable_release_type()) - assert ( - version.string == self.version - ), f"BUG: version in release info does not match version in git commit, expected [{self.version}], got [{version.string}]" - with checkout(self.release_branch): - with checkout_new(branch_upd_version_contributors): - update_cmake_version(version) - update_contributors(raise_error=True) - cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" - cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" - actor = os.getenv("GITHUB_ACTOR", "") or "me" - body = f"Automatic version bump after release {self.release_tag}\n### Changelog category (leave one):\n- Not for changelog (changelog entry is not required)\n" - cmd_create_pr = f"gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body \"{body}\" --assignee {actor}" + with checkout("master"): + with checkout_new(branch_upd_version_contributors): + update_cmake_version(version) + update_contributors(raise_error=True) + cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" + cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" + actor = os.getenv("GITHUB_ACTOR", "") or "me" + body = f"Automatic version bump after release {self.release_tag}\n### Changelog category (leave one):\n- Not for changelog (changelog entry is not required)\n" + cmd_create_pr = f"gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base master --body \"{body}\" --assignee {actor}" + Shell.check( + cmd_commit_version_upd, + strict=True, + dry_run=dry_run, + verbose=True, + ) + Shell.check( + cmd_push_branch, strict=True, dry_run=dry_run, verbose=True + ) + Shell.check( + cmd_create_pr, strict=True, dry_run=dry_run, verbose=True + ) + if dry_run: + Shell.check( + f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + Shell.check( + f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + self.version_bump_pr = "dry-run" + else: + self.version_bump_pr = GH.get_pr_url_by_branch( + branch=branch_upd_version_contributors + ) + + # TODO: move to new GH step? + print("Create Release PR") + with checkout(self.release_branch): + pr_labels = f"--label {CI.Labels.RELEASE}" + if version.get_stable_release_type() == VersionType.LTS: + pr_labels += f" --label {CI.Labels.RELEASE_LTS}" Shell.check( - cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True + f"""gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Release pull request for branch {self.release_branch}' \ + --head {self.release_branch} {pr_labels} \ + --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.'""", + dry_run=dry_run, + strict=True, + verbose=True, ) - Shell.check(cmd_push_branch, strict=True, dry_run=dry_run, verbose=True) - Shell.check(cmd_create_pr, strict=True, dry_run=dry_run, verbose=True) - if dry_run: - Shell.check( - f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", - verbose=True, - ) - Shell.check( - f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", - verbose=True, - ) - self.version_bump_pr = "dry-run" - else: - self.version_bump_pr = GH.get_pr_url_by_branch( - branch=branch_upd_version_contributors - ) def get_change_log_branch(self): return f"auto/{self.release_tag}" def update_release_info(self, dry_run: bool) -> "ReleaseInfo": - if self.release_branch != "master": + if self.release_type == "patch": if not self.changelog_pr: branch = self.get_change_log_branch() if not dry_run: @@ -371,21 +413,22 @@ class ReleaseInfo: url = "dry-run" print(f"ChangeLog PR url [{url}]") self.changelog_pr = url - - if not self.version_bump_pr: - branch = self.get_version_bump_branch() - if not dry_run: - url = GH.get_pr_url_by_branch(branch=branch) - else: - url = "dry-run" - print(f"Version bump PR url [{url}]") - self.version_bump_pr = url - - self.release_url = f"https://github.com/{CI.Envs.GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" - print(f"Release url [{self.release_url}]") - self.docker = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version" + else: + # new release branch - find version bump pr on a master branch + branch = self.get_version_bump_branch() + if not dry_run: + url = GH.get_pr_url_by_branch(branch=branch) + else: + url = "dry-run" + print(f"Version bump PR url [{url}]") + self.version_bump_pr = url + + self.release_url = f"https://github.com/{CI.Envs.GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" + print(f"Release url [{self.release_url}]") + self.dump() + return self def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None: @@ -410,35 +453,40 @@ class ReleaseInfo: def merge_prs(self, dry_run: bool) -> None: repo = CI.Envs.GITHUB_REPOSITORY - assert self.version_bump_pr - if dry_run: - version_bump_pr_num = 12345 - else: - version_bump_pr_num = int(self.version_bump_pr.split("/")[-1]) - print("Merging Version bump PR") - res_1 = Shell.check( - f"gh pr merge {version_bump_pr_num} --repo {repo} --merge --auto", - verbose=True, - dry_run=dry_run, - ) - - res_2 = True - if not self.release_tag.endswith("-new"): + if self.release_type == "patch": assert self.changelog_pr print("Merging ChangeLog PR") if dry_run: changelog_pr_num = 23456 else: changelog_pr_num = int(self.changelog_pr.split("/")[-1]) - res_2 = Shell.check( + res = Shell.check( f"gh pr merge {changelog_pr_num} --repo {repo} --merge --auto", verbose=True, dry_run=dry_run, ) else: - assert not self.changelog_pr + if not dry_run: + assert not self.changelog_pr + res = True - self.prs_merged = res_1 and res_2 + if self.release_type == "new": + assert self.version_bump_pr + print("Merging Version Bump PR") + if dry_run: + version_bump_pr = 23456 + else: + version_bump_pr = int(self.version_bump_pr.split("/")[-1]) + res = res and Shell.check( + f"gh pr merge {version_bump_pr} --repo {repo} --merge --auto", + verbose=True, + dry_run=dry_run, + ) + else: + if not dry_run: + assert not self.changelog_pr + + self.prs_merged = res class RepoTypes: @@ -759,7 +807,7 @@ if __name__ == "__main__": release_info.prepare( commit_ref=args.ref, release_type=args.release_type, - skip_tag_check=args.skip_tag_check, + _skip_tag_check=args.skip_tag_check, ) if args.download_packages: diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 3251ec5644e..34439c19f0a 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -70,7 +70,7 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--tag-type", type=str, - choices=("head", "release", "latest-release"), + choices=("head", "release", "release-latest"), default="head", help="defines required tags for resulting docker image. " "head - for master image (tag: head) " diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index 07a7a9601c0..b20b2bb25cf 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -85,6 +85,16 @@ class ClickHouseVersion: self._tweak = 1 return self + def bump_patch(self) -> "ClickHouseVersion": + self._revision += 1 + self._patch += 1 + self._tweak = 1 + return self + + def reset_tweak(self) -> "ClickHouseVersion": + self._tweak = 1 + return self + def major_update(self) -> "ClickHouseVersion": if self._git is not None: self._git.update() @@ -104,13 +114,6 @@ class ClickHouseVersion: self.major, self.minor, self.patch + 1, self.revision, self._git ) - def reset_tweak(self) -> "ClickHouseVersion": - if self._git is not None: - self._git.update() - return ClickHouseVersion( - self.major, self.minor, self.patch, self.revision, self._git, 1 - ) - @property def major(self) -> int: return self._major From b5134fd4903b91250bb6db16a8d52ff0b2469686 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 13 Aug 2024 16:13:25 +0100 Subject: [PATCH 2198/2361] fix build --- base/base/cgroupsv2.cpp | 6 +++++- base/base/cgroupsv2.h | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/base/base/cgroupsv2.cpp b/base/base/cgroupsv2.cpp index b4ca8271d64..e0e37c8729b 100644 --- a/base/base/cgroupsv2.cpp +++ b/base/base/cgroupsv2.cpp @@ -51,8 +51,9 @@ fs::path cgroupV2PathOfProcess() #endif } -std::optional getCgroupsV2PathContainingFile(std::string_view file_name) +std::optional getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name) { +#if defined(OS_LINUX) if (!cgroupsV2Enabled()) return {}; @@ -70,4 +71,7 @@ std::optional getCgroupsV2PathContainingFile(std::string_view file_ current_cgroup = current_cgroup.parent_path(); } return {}; +#else + return {}; +#endif } diff --git a/base/base/cgroupsv2.h b/base/base/cgroupsv2.h index 925a399471e..a6276474254 100644 --- a/base/base/cgroupsv2.h +++ b/base/base/cgroupsv2.h @@ -19,4 +19,4 @@ std::filesystem::path cgroupV2PathOfProcess(); /// Returns the most nested cgroup dir containing the specified file. /// If cgroups v2 is not enabled - returns an empty optional. -std::optional getCgroupsV2PathContainingFile(std::string_view file_name); +std::optional getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name); From 835fc9ca7601df8295997aaf22e8f1e5792abc90 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 22:37:46 +0000 Subject: [PATCH 2199/2361] Add more tests, fix conflicts --- .../Serializations/SerializationObject.cpp | 7 ++++++ ...n_merges_new_type_in_shared_data.reference | 4 +++ ...24_json_merges_new_type_in_shared_data.sql | 12 +++++++++ ...n_merges_new_type_in_shared_data.reference | 22 ++++++++++++++++ ...ed_json_merges_new_type_in_shared_data.sql | 25 +++++++++++++++++++ 5 files changed, 70 insertions(+) create mode 100644 tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.reference create mode 100644 tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.sql create mode 100644 tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.reference create mode 100644 tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.sql diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index a1580ffedd4..2dd25e540cc 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -411,6 +411,12 @@ void SerializationObject::serializeBinaryBulkWithMultipleStreams( const auto & shared_data = column_object.getSharedDataPtr(); auto * object_state = checkAndGetState(state); + if (column_object.getMaxDynamicPaths() != object_state->max_dynamic_paths) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_paths parameter of Object. Expected: {}, Got: {}", object_state->max_dynamic_paths, column_object.getMaxDynamicPaths()); + + if (column_object.getDynamicPaths().size() != object_state->sorted_dynamic_paths.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of number of dynamic paths in Object. Expected: {}, Got: {}", object_state->sorted_dynamic_paths.size(), column_object.getDynamicPaths().size()); + settings.path.push_back(Substream::ObjectData); for (const auto & path : sorted_typed_paths) @@ -532,6 +538,7 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( /// If it's a new object column, set dynamic paths and statistics. if (column_object.empty()) { + column_object.setMaxDynamicPaths(structure_state->max_dynamic_paths); column_object.setDynamicPaths(structure_state->sorted_dynamic_paths); column_object.setStatistics(structure_state->statistics); } diff --git a/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.reference b/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.reference new file mode 100644 index 00000000000..9d58b3a35db --- /dev/null +++ b/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.reference @@ -0,0 +1,4 @@ +Array(Nullable(Int64)) true +Int64 false +Array(Nullable(Int64)) false +Int64 false diff --git a/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.sql b/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.sql new file mode 100644 index 00000000000..c96d67c0d47 --- /dev/null +++ b/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.sql @@ -0,0 +1,12 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=1)) engine=MergeTree order by tuple() settings min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; +insert into test select '{"b" : 42}' from numbers(5); +insert into test select '{"a" : 42, "b" : [1, 2, 3]}' from numbers(5); +optimize table test final; +select distinct dynamicType(json.b) as type, isDynamicElementInSharedData(json.b) from test order by type; +insert into test select '{"b" : 42}' from numbers(5); +optimize table test final; +select distinct dynamicType(json.b) as type, isDynamicElementInSharedData(json.b) from test order by type; +drop table test; diff --git a/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.reference b/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.reference new file mode 100644 index 00000000000..b45d9bb97da --- /dev/null +++ b/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.reference @@ -0,0 +1,22 @@ +Array(JSON(max_dynamic_types=16, max_dynamic_paths=2)) true +Int64 false +Array(JSON(max_dynamic_types=16, max_dynamic_paths=2)) false +Int64 false +['c'] +['d'] +Array(JSON(max_dynamic_types=16, max_dynamic_paths=2)) false +Int64 false +['c'] +['d'] +Int64 true +None false +Int64 true +None false +Array(JSON(max_dynamic_types=16, max_dynamic_paths=2)) false +Int64 false +['c'] +['d'] +Int64 false +None false +Int64 false +None false diff --git a/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.sql b/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.sql new file mode 100644 index 00000000000..b22b8b4fb75 --- /dev/null +++ b/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.sql @@ -0,0 +1,25 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +insert into test select materialize('{"a1" : 42, "a2" : 42, "a3" : 42, "a4" : 42, "a5" : 42, "a6" : 42, "a7" : 42, "a8" : 42, "a" : [{"c" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +insert into test select materialize('{"a1" : 42, "a2" : 42, "a3" : 42, "a4" : 42, "a5" : 42, "a6" : 42, "a7" : 42, "a8" : 42, "a" : [{"d" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONSharedDataPaths(arrayJoin(json.a[])) as path from test order by path; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONDynamicPaths(arrayJoin(json.a[])) as path from test order by path; +select distinct dynamicType(arrayJoin(json.a[].c)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].c)) from test order by type; +select distinct dynamicType(arrayJoin(json.a[].d)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].d)) from test order by type; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONDynamicPaths(arrayJoin(json.a[])) as path from test order by path; +select distinct dynamicType(arrayJoin(json.a[].c)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].c)) from test order by type; +select distinct dynamicType(arrayJoin(json.a[].d)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].d)) from test order by type; +drop table test; From 217963757e9a57fd3cadb4de68f6c2914ba67a91 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 23:21:24 +0000 Subject: [PATCH 2200/2361] Fix --- src/Columns/ColumnDynamic.cpp | 26 +++++++++++++------------- src/Columns/ColumnDynamic.h | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index 212e1adabca..1f37add9d2d 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -16,7 +16,6 @@ #include #include #include -#include namespace DB { @@ -56,6 +55,7 @@ ColumnDynamic::ColumnDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_ ColumnDynamic::ColumnDynamic( MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_) : variant_column(std::move(variant_column_)) + , variant_column_ptr(assert_cast(variant_column.get())) , max_dynamic_types(max_dynamic_types_) , global_max_dynamic_types(global_max_dynamic_types_) , statistics(statistics_) @@ -320,7 +320,7 @@ void ColumnDynamic::doInsertFrom(const IColumn & src_, size_t n) } auto & variant_col = getVariantColumn(); - const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); + const auto & src_variant_col = dynamic_src.getVariantColumn(); auto src_global_discr = src_variant_col.globalDiscriminatorAt(n); auto src_offset = src_variant_col.offsetAt(n); @@ -346,7 +346,7 @@ void ColumnDynamic::doInsertFrom(const IColumn & src_, size_t n) /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { - variant_column_ptr->insertFrom(*dynamic_src.variant_column, n, *global_discriminators_mapping); + variant_col.insertFrom(*dynamic_src.variant_column, n, *global_discriminators_mapping); return; } @@ -364,7 +364,7 @@ void ColumnDynamic::doInsertFrom(const IColumn & src_, size_t n) if (addNewVariant(variant_type)) { auto discr = variant_info.variant_name_to_discriminator[dynamic_src.variant_info.variant_names[src_global_discr]]; - variant_column_ptr->insertIntoVariantFrom(discr, src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(n)); + variant_col.insertIntoVariantFrom(discr, src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(n)); return; } @@ -388,16 +388,15 @@ void ColumnDynamic::doInsertRangeFrom(const IColumn & src_, size_t start, size_t "[start({}) + length({}) > src.size()({})]", start, length, src_.size()); const auto & dynamic_src = assert_cast(src_); + auto & variant_col = getVariantColumn(); /// Check if we have the same variants in both columns. if (variant_info.variant_names == dynamic_src.variant_info.variant_names) { - variant_column_ptr->insertRangeFrom(*dynamic_src.variant_column, start, length); + variant_col.insertRangeFrom(*dynamic_src.variant_column, start, length); return; } - auto & variant_col = getVariantColumn(); - /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { @@ -604,15 +603,15 @@ void ColumnDynamic::doInsertManyFrom(const IColumn & src_, size_t position, size #endif { const auto & dynamic_src = assert_cast(src_); + auto & variant_col = getVariantColumn(); /// Check if we have the same variants in both columns. if (variant_info.variant_names == dynamic_src.variant_info.variant_names) { - variant_column_ptr->insertManyFrom(*dynamic_src.variant_column, position, length); + variant_col.insertManyFrom(*dynamic_src.variant_column, position, length); return; } - auto & variant_col = getVariantColumn(); const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); auto src_global_discr = src_variant_col.globalDiscriminatorAt(position); auto src_offset = src_variant_col.offsetAt(position); @@ -647,7 +646,7 @@ void ColumnDynamic::doInsertManyFrom(const IColumn & src_, size_t position, size /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { - variant_column_ptr->insertManyFrom(*dynamic_src.variant_column, position, length, *global_discriminators_mapping); + variant_col.insertManyFrom(*dynamic_src.variant_column, position, length, *global_discriminators_mapping); return; } @@ -663,7 +662,7 @@ void ColumnDynamic::doInsertManyFrom(const IColumn & src_, size_t position, size if (addNewVariant(variant_type)) { auto discr = variant_info.variant_name_to_discriminator[dynamic_src.variant_info.variant_names[src_global_discr]]; - variant_column_ptr->insertManyIntoVariantFrom(discr, src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(position), length); + variant_col.insertManyIntoVariantFrom(discr, src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(position), length); return; } @@ -810,7 +809,8 @@ const char * ColumnDynamic::skipSerializedInArena(const char * pos) const void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const { - auto discr = variant_column_ptr->globalDiscriminatorAt(n); + const auto & variant_col = getVariantColumn(); + auto discr = variant_col.globalDiscriminatorAt(n); if (discr == ColumnVariant::NULL_DISCRIMINATOR) { hash.update(discr); @@ -818,7 +818,7 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const } hash.update(variant_info.variant_names[discr]); - variant_column_ptr->getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_column_ptr->offsetAt(n), hash); + variant_col.getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_col.offsetAt(n), hash); } #if !defined(DEBUG_OR_SANITIZER_BUILD) diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index eb454881d68..2ae862de3af 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -106,7 +106,7 @@ public: return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, global_max_dynamic_types_, statistics_); } - static MutablePtr create(size_t max_dynamic_types_ = ColumnVariant::MAX_NESTED_COLUMNS) + static MutablePtr create(size_t max_dynamic_types_ = MAX_DYNAMIC_TYPES_LIMIT) { return Base::create(max_dynamic_types_); } From 46f41d171863ba968dacf6cf224ae5da751b87c1 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 13 Aug 2024 23:51:49 +0000 Subject: [PATCH 2201/2361] Fix --- src/Parsers/ParserDataType.cpp | 2 +- .../01548_create_table_compound_column_format.reference | 4 ++-- .../0_stateless/01548_create_table_compound_column_format.sh | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index 6e6e8758c51..d86b659df90 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -273,7 +273,7 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ObjectArgumentParser parser; parser.parse(pos, arg, expected); } - else if (boost::to_lower_copy(type_name) == "nested") + else if (type_name == "Nested") { ParserNameTypePair name_and_type_parser; name_and_type_parser.parse(pos, arg, expected); diff --git a/tests/queries/0_stateless/01548_create_table_compound_column_format.reference b/tests/queries/0_stateless/01548_create_table_compound_column_format.reference index c6c4dcdfa4a..21e31e8f034 100644 --- a/tests/queries/0_stateless/01548_create_table_compound_column_format.reference +++ b/tests/queries/0_stateless/01548_create_table_compound_column_format.reference @@ -1,12 +1,12 @@ CREATE TABLE test ( `a` Int64, - `b` NESTED(a Int64) + `b` Nested(a Int64) ) ENGINE = TinyLog CREATE TABLE test ( `a` Int64, - `b` TUPLE(a Int64) + `b` Tuple(a Int64) ) ENGINE = TinyLog diff --git a/tests/queries/0_stateless/01548_create_table_compound_column_format.sh b/tests/queries/0_stateless/01548_create_table_compound_column_format.sh index 99e3aed2825..9065af17dc1 100755 --- a/tests/queries/0_stateless/01548_create_table_compound_column_format.sh +++ b/tests/queries/0_stateless/01548_create_table_compound_column_format.sh @@ -4,6 +4,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "CREATE TABLE test(a Int64, b NESTED(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT +echo "CREATE TABLE test(a Int64, b Nested(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT -echo "CREATE TABLE test(a Int64, b TUPLE(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT \ No newline at end of file +echo "CREATE TABLE test(a Int64, b Tuple(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT \ No newline at end of file From f6f79e188d6c5a16bb327f9e62dce506ea8e8a19 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 14 Aug 2024 00:23:18 +0000 Subject: [PATCH 2202/2361] Apply libunwind changes needed for musl --- contrib/libunwind | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libunwind b/contrib/libunwind index a89d904befe..601db0b0e03 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit a89d904befea07814628c6ce0b44083c4e149c62 +Subproject commit 601db0b0e03018c01710470a37703b618f9cf08b From e302e2ef323045bfa7158c0890594ec6ee75c8f1 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 14 Aug 2024 01:15:16 +0000 Subject: [PATCH 2203/2361] Conflicts --- src/Interpreters/InterpreterSystemQuery.cpp | 3 ++- src/Storages/MaterializedView/RefreshTask.cpp | 2 +- src/Storages/MaterializedView/RefreshTask.h | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index aa69854fee8..21c8b44b374 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -667,7 +667,8 @@ BlockIO InterpreterSystemQuery::execute() task->run(); break; case Type::WAIT_VIEW: - getRefreshTask()->wait(); + for (const auto & task : getRefreshTasks()) + task->wait(); break; case Type::CANCEL_VIEW: for (const auto & task : getRefreshTasks()) diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index 0b51ae85116..ed5a6652288 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -37,7 +37,7 @@ RefreshTask::RefreshTask( refresh_settings.applyChanges(strategy.settings->changes); } -RefreshTaskHolder RefreshTask::create( +OwnedRefreshTask RefreshTask::create( StorageMaterializedView * view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy) diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index a17932d34e8..ad9d949e18e 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -26,7 +26,8 @@ public: RefreshTask(StorageMaterializedView * view_, const ASTRefreshStrategy & strategy); /// The only proper way to construct task - static RefreshTaskHolder create( + static OwnedRefreshTask create( + StorageMaterializedView * view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy); From da1c98a771c5b6cd74b3e1ba00fd4e01574489e9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 14 Aug 2024 04:54:33 +0200 Subject: [PATCH 2204/2361] Update the limits --- docker/test/stateless/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 874095e39dc..2e6e7bbebe5 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -391,8 +391,8 @@ done # wait for minio to flush its batch if it has any sleep 1 clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE" -clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" -clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_bytes 0 --max_result_rows 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_bytes 0 --max_result_rows 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" # Stop server so we can safely read data with clickhouse-local. # Why do we read data with clickhouse-local? From 6170a8663fc85b95cda4a7617975b06cc6c007f6 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Aug 2024 12:31:13 +0000 Subject: [PATCH 2205/2361] Bump usearch to 2.13.2 --- contrib/SimSIMD | 2 +- contrib/usearch | 2 +- .../mergetree-family/annindexes.md | 2 + .../MergeTreeIndexVectorSimilarity.cpp | 121 +++++++++++------- .../MergeTreeIndexVectorSimilarity.h | 26 ++-- ...r_search_index_creation_negative.reference | 2 - ..._vector_search_index_creation_negative.sql | 6 - 7 files changed, 90 insertions(+), 71 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index 18d17686124..91a76d1ac51 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit 18d17686124ddebd9fe55eee56b2e0273a613d4b +Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26 diff --git a/contrib/usearch b/contrib/usearch index e6c81f78c64..e21a5778a0d 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit e6c81f78c64c0d8119f854691a06e60660638a25 +Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4 diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index e73d6f07a32..097b0f5850a 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -59,6 +59,8 @@ Parameters: - `ef_construction`: (optional, default: 128) - `ef_search`: (optional, default: 64) +Value 0 for parameters `m`, `ef_construction`, and `ef_search` refers to the default value. + Example: ```sql diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 346f69140bb..fbbc66bd8db 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -2,9 +2,6 @@ #if USE_USEARCH -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wpass-failed" - #include #include #include @@ -46,15 +43,15 @@ namespace { /// The only indexing method currently supported by USearch -std::set methods = {"hnsw"}; +const std::set methods = {"hnsw"}; /// Maps from user-facing name to internal name -std::unordered_map distanceFunctionToMetricKind = { +const std::unordered_map distanceFunctionToMetricKind = { {"L2Distance", unum::usearch::metric_kind_t::l2sq_k}, {"cosineDistance", unum::usearch::metric_kind_t::cos_k}}; /// Maps from user-facing name to internal name -std::unordered_map quantizationToScalarKind = { +const std::unordered_map quantizationToScalarKind = { {"f32", unum::usearch::scalar_kind_t::f32_k}, {"f16", unum::usearch::scalar_kind_t::f16_k}, {"i8", unum::usearch::scalar_kind_t::i8_k}}; @@ -96,13 +93,18 @@ USearchIndexWithSerialization::USearchIndexWithSerialization( unum::usearch::scalar_kind_t scalar_kind, UsearchHnswParams usearch_hnsw_params) { - unum::usearch::metric_punned_t metric(dimensions, metric_kind, scalar_kind); + USearchIndex::metric_t metric(dimensions, metric_kind, scalar_kind); unum::usearch::index_dense_config_t config(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search); config.enable_key_lookups = false; /// we don't do row-to-vector lookups - USearchIndex usearch_index = USearchIndex::make(metric, config); - swap(usearch_index); + if (auto error = config.validate(); error) /// already called in vectorSimilarityIndexValidator, call again because usearch may change the config in-place + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid parameters passed to vector similarity index. Error: {}", String(error.release())); + + if (auto result = USearchIndex::make(metric, config); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not create vector similarity index. Error: {}", String(result.error.release())); + else + swap(result.index); } void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const @@ -113,9 +115,8 @@ void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const return true; }; - auto result = Base::save_to_stream(callback); - if (result.error) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not save vector similarity index, error: " + String(result.error.release())); + if (auto result = Base::save_to_stream(callback); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not save vector similarity index. Error: {}", String(result.error.release())); } void USearchIndexWithSerialization::deserialize(ReadBuffer & istr) @@ -126,26 +127,43 @@ void USearchIndexWithSerialization::deserialize(ReadBuffer & istr) return true; }; - auto result = Base::load_from_stream(callback); - if (result.error) + if (auto result = Base::load_from_stream(callback); !result) /// See the comment in MergeTreeIndexGranuleVectorSimilarity::deserializeBinary why we throw here - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index, error: " + String(result.error.release()) + " Please drop the index and create it again."); + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index. Please drop the index and create it again. Error: {}", String(result.error.release())); + + if (!try_reserve(limits())) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for usearch index"); } USearchIndexWithSerialization::Statistics USearchIndexWithSerialization::getStatistics() const { + USearchIndex::stats_t global_stats = Base::stats(); + Statistics statistics = { .max_level = max_level(), .connectivity = connectivity(), - .size = size(), /// number of vectors - .capacity = capacity(), /// number of vectors reserved - .memory_usage = memory_usage(), /// in bytes, the value is not exact + .size = size(), + .capacity = capacity(), + .memory_usage = memory_usage(), .bytes_per_vector = bytes_per_vector(), .scalar_words = scalar_words(), - .statistics = stats()}; + .nodes = global_stats.nodes, + .edges = global_stats.edges, + .max_edges = global_stats.max_edges, + .level_stats = {}}; + + for (size_t i = 0; i < statistics.max_level; ++i) + statistics.level_stats.push_back(Base::stats(i)); + return statistics; } +String USearchIndexWithSerialization::Statistics::toString() const +{ + return fmt::format("max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}, bytes_per_vector = {}, scalar_words = {}, nodes = {}, edges = {}, max_edges = {}", + max_level, connectivity, size, capacity, ReadableSize(memory_usage), bytes_per_vector, scalar_words, nodes, edges, max_edges); + +} MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, const Block & index_sample_block_, @@ -186,8 +204,7 @@ void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr) index->serialize(ostr); auto statistics = index->getStatistics(); - LOG_TRACE(logger, "Wrote vector similarity index: max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}", - statistics.max_level, statistics.connectivity, statistics.size, statistics.capacity, ReadableSize(statistics.memory_usage)); + LOG_TRACE(logger, "Wrote vector similarity index: {}", statistics.toString()); } void MergeTreeIndexGranuleVectorSimilarity::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/) @@ -209,8 +226,7 @@ void MergeTreeIndexGranuleVectorSimilarity::deserializeBinary(ReadBuffer & istr, index->deserialize(istr); auto statistics = index->getStatistics(); - LOG_TRACE(logger, "Loaded vector similarity index: max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}", - statistics.max_level, statistics.connectivity, statistics.size, statistics.capacity, ReadableSize(statistics.memory_usage)); + LOG_TRACE(logger, "Loaded vector similarity index: {}", statistics.toString()); } MergeTreeIndexAggregatorVectorSimilarity::MergeTreeIndexAggregatorVectorSimilarity( @@ -290,19 +306,24 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ if (!index) index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); + /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp + if (index->size() + num_rows > std::numeric_limits::max()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index in column {} would exceed 4 billion entries", index_column_name); + /// Reserving space is mandatory - if (!index->reserve(roundUpToPowerOfTwoOrZero(index->size() + num_rows))) + if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + num_rows))) throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); for (size_t row = 0; row < num_rows; ++row) { - auto rc = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); - if (!rc) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index, error: " + String(rc.error.release())); - - ProfileEvents::increment(ProfileEvents::USearchAddCount); - ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, rc.visited_members); - ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, rc.computed_distances); + if (auto result = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release())); + else + { + ProfileEvents::increment(ProfileEvents::USearchAddCount); + ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members); + ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances); + } } } else @@ -356,17 +377,16 @@ std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(Mer const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); - auto result = index->search(reference_vector.data(), limit); - if (result.error) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not search in vector similarity index, error: " + String(result.error.release())); + auto search_result = index->search(reference_vector.data(), limit); + if (!search_result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not search in vector similarity index. Error: {}", String(search_result.error.release())); ProfileEvents::increment(ProfileEvents::USearchSearchCount); - ProfileEvents::increment(ProfileEvents::USearchSearchVisitedMembers, result.visited_members); - ProfileEvents::increment(ProfileEvents::USearchSearchComputedDistances, result.computed_distances); + ProfileEvents::increment(ProfileEvents::USearchSearchVisitedMembers, search_result.visited_members); + ProfileEvents::increment(ProfileEvents::USearchSearchComputedDistances, search_result.computed_distances); - std::vector neighbors(result.size()); /// indexes of dots which were closest to the reference vector - std::vector distances(result.size()); - result.dump_to(neighbors.data(), distances.data()); + std::vector neighbors(search_result.size()); /// indexes of vectors which were closest to the reference vector + search_result.dump_to(neighbors.data()); std::vector granules; granules.reserve(neighbors.size()); @@ -414,14 +434,13 @@ MergeTreeIndexConditionPtr MergeTreeIndexVectorSimilarity::createIndexCondition( MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index) { - const bool has_six_args = (index.arguments.size() == 6); - + /// Default parameters: unum::usearch::metric_kind_t metric_kind = distanceFunctionToMetricKind.at(index.arguments[1].safeGet()); - - /// use defaults for the other parameters unum::usearch::scalar_kind_t scalar_kind = unum::usearch::scalar_kind_t::f32_k; UsearchHnswParams usearch_hnsw_params; + /// Optional parameters: + const bool has_six_args = (index.arguments.size() == 6); if (has_six_args) { scalar_kind = quantizationToScalarKind.at(index.arguments[2].safeGet()); @@ -466,12 +485,16 @@ void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* atta { if (!quantizationToScalarKind.contains(index.arguments[2].safeGet())) throw Exception(ErrorCodes::INCORRECT_DATA, "Third argument (quantization) of vector similarity index is not supported. Supported quantizations are: {}", joinByComma(quantizationToScalarKind)); - if (index.arguments[3].safeGet() < 2) - throw Exception(ErrorCodes::INCORRECT_DATA, "Fourth argument (M) of vector similarity index must be > 1"); - if (index.arguments[4].safeGet() < 1) - throw Exception(ErrorCodes::INCORRECT_DATA, "Fifth argument (ef_construction) of vector similarity index must be > 0"); - if (index.arguments[5].safeGet() < 1) - throw Exception(ErrorCodes::INCORRECT_DATA, "Sixth argument (ef_search) of vector similarity index must be > 0"); + + /// Call Usearche's own parameter validation method for HNSW-specific parameters + UInt64 m = index.arguments[3].safeGet(); + UInt64 ef_construction = index.arguments[4].safeGet(); + UInt64 ef_search = index.arguments[5].safeGet(); + + unum::usearch::index_dense_config_t config(m, ef_construction, ef_search); + + if (auto error = config.validate(); error) + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid parameters passed to vector similarity index. Error: {}", String(error.release())); } /// Check that the index is created on a single column diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h index f7098c1626c..c4c03254d2d 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h @@ -4,12 +4,9 @@ #if USE_USEARCH -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wpass-failed" -# include -# include -# include -#pragma clang diagnostic pop +#include +#include +#include namespace DB { @@ -21,7 +18,7 @@ struct UsearchHnswParams size_t ef_search = unum::usearch::default_expansion_search(); }; -using USearchIndex = unum::usearch::index_dense_gt; +using USearchIndex = unum::usearch::index_dense_t; class USearchIndexWithSerialization : public USearchIndex { @@ -41,13 +38,18 @@ public: { size_t max_level; size_t connectivity; - size_t size; - size_t capacity; - size_t memory_usage; - /// advanced stats: + size_t size; /// number of indexed vectors + size_t capacity; /// reserved number of indexed vectors + size_t memory_usage; /// byte size (not exact) size_t bytes_per_vector; size_t scalar_words; - Base::stats_t statistics; + size_t nodes; + size_t edges; + size_t max_edges; + + std::vector level_stats; /// for debugging, excluded from getStatistics() + + String toString() const; }; Statistics getStatistics() const; diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference index b6d034208d0..f18daa6e02e 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference @@ -3,8 +3,6 @@ Two or six index arguments 2nd argument (distance function) must be String and L2Distance or cosineDistance 3nd argument (quantization), if given, must be String and f32, f16, ... 4nd argument (M), if given, must be UInt64 and > 1 -5nd argument (ef_construction), if given, must be UInt64 and > 0 -6nd argument (ef_search), if given, must be UInt64 and > 0 Must be created on single column Must be created on Array(Float32) columns Rejects INSERTs of Arrays with different sizes diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql index 7c2ddfe81fc..de9d37e1000 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql @@ -27,12 +27,6 @@ CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similar SELECT '4nd argument (M), if given, must be UInt64 and > 1'; CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 'invalid', 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 1, 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -SELECT '5nd argument (ef_construction), if given, must be UInt64 and > 0'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 'invalid', 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 0, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -SELECT '6nd argument (ef_search), if given, must be UInt64 and > 0'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 1, 'invalid')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 1, 0)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } SELECT 'Must be created on single column'; CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } From ca5fd57db7d7718a275b31200447a986019e3349 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 14 Aug 2024 09:35:54 +0000 Subject: [PATCH 2206/2361] Fix builds --- src/Common/StringHashForHeterogeneousLookup.h | 5 +++++ .../Serializations/tests/gtest_object_serialization.cpp | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Common/StringHashForHeterogeneousLookup.h b/src/Common/StringHashForHeterogeneousLookup.h index 0983fd460d6..56d8ccf0009 100644 --- a/src/Common/StringHashForHeterogeneousLookup.h +++ b/src/Common/StringHashForHeterogeneousLookup.h @@ -20,6 +20,11 @@ struct StringHashForHeterogeneousLookup { return hash_type()(str); } + + auto operator()(const char * data) const + { + return hash_type()(data); + } }; } diff --git a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp index 8a0c712fa86..f104b75af9b 100644 --- a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp +++ b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp @@ -17,7 +17,7 @@ TEST(ObjectSerialization, FieldBinarySerialization) ReadBufferFromString istr(ostr.str()); Field object2; serialization->deserializeBinary(object2, istr, FormatSettings()); - ASSERT_EQ(object1, object2.get()); + ASSERT_EQ(object1, object2.safeGet()); } From 8dec996686449e5a541157fdfd5ffb65b2208998 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 12 Aug 2024 06:35:44 +0000 Subject: [PATCH 2207/2361] Fix non-deterministic result order in test_storage_mysql.test_mysql_distributed --- tests/integration/test_storage_mysql/test.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py index 5948954ff5f..c724c5bb498 100644 --- a/tests/integration/test_storage_mysql/test.py +++ b/tests/integration/test_storage_mysql/test.py @@ -445,7 +445,7 @@ def test_mysql_distributed(started_cluster): query = "SELECT * FROM (" for i in range(3): query += "SELECT name FROM test_replicas UNION DISTINCT " - query += "SELECT name FROM test_replicas)" + query += "SELECT name FROM test_replicas) ORDER BY name" result = node2.query(query) assert result == "host2\nhost3\nhost4\n" @@ -827,6 +827,9 @@ def test_settings(started_cluster): f"with settings: connect_timeout={connect_timeout}, read_write_timeout={rw_timeout}" ) + node1.query("DROP DATABASE IF EXISTS m") + node1.query("DROP DATABASE IF EXISTS mm") + rw_timeout = 40123001 connect_timeout = 40123002 node1.query( @@ -855,6 +858,9 @@ def test_settings(started_cluster): f"with settings: connect_timeout={connect_timeout}, read_write_timeout={rw_timeout}" ) + node1.query("DROP DATABASE m") + node1.query("DROP DATABASE mm") + drop_mysql_table(conn, table_name) conn.close() @@ -930,6 +936,9 @@ def test_joins(started_cluster): conn.commit() + node1.query("DROP TABLE IF EXISTS test_joins_table_users") + node1.query("DROP TABLE IF EXISTS test_joins_table_tickets") + node1.query( """ CREATE TABLE test_joins_table_users @@ -964,6 +973,9 @@ def test_joins(started_cluster): """ ) == "281607\tFeedback\t2024-06-25 12:09:41\tuser@example.com\n" + node1.query("DROP TABLE test_joins_table_users") + node1.query("DROP TABLE test_joins_table_tickets") + if __name__ == "__main__": with contextmanager(started_cluster)() as cluster: From 9fe31773bdeeeada849a60822a7409ee1aa8782f Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 14 Aug 2024 09:52:12 +0000 Subject: [PATCH 2208/2361] Fix part name in 00961_check_table --- tests/queries/0_stateless/00961_check_table.reference | 2 +- tests/queries/0_stateless/00961_check_table.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00961_check_table.reference b/tests/queries/0_stateless/00961_check_table.reference index a0a054898b9..686285bb6aa 100644 --- a/tests/queries/0_stateless/00961_check_table.reference +++ b/tests/queries/0_stateless/00961_check_table.reference @@ -14,4 +14,4 @@ ======== 201902_4_5_1 1 ======== -201801_1_1_0 1 +201801_1_1_2 1 diff --git a/tests/queries/0_stateless/00961_check_table.sql b/tests/queries/0_stateless/00961_check_table.sql index a6abe8103d5..fc3c5435670 100644 --- a/tests/queries/0_stateless/00961_check_table.sql +++ b/tests/queries/0_stateless/00961_check_table.sql @@ -39,6 +39,6 @@ CHECK TABLE mt_table PARTITION 201902 SETTINGS max_threads = 1; SELECT '========'; -CHECK TABLE mt_table PART '201801_1_1_0'; +CHECK TABLE mt_table PART '201801_1_1_2'; DROP TABLE IF EXISTS mt_table; From 8d7319ccab75d8f5a401bdd63697f7bd6508d27f Mon Sep 17 00:00:00 2001 From: Duc Canh Le Date: Wed, 14 Aug 2024 10:18:00 +0000 Subject: [PATCH 2209/2361] fix wrong format of SYSTEM SYNC REPLICA query Signed-off-by: Duc Canh Le --- src/Parsers/ASTSystemQuery.cpp | 45 ++++++++++--------- ...03205_system_sync_replica_format.reference | 1 + .../03205_system_sync_replica_format.sql | 1 + 3 files changed, 25 insertions(+), 22 deletions(-) create mode 100644 tests/queries/0_stateless/03205_system_sync_replica_format.reference create mode 100644 tests/queries/0_stateless/03205_system_sync_replica_format.sql diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index a730ea0ba3d..7780544d5c2 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -198,6 +198,29 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s print_database_table(); } + if (sync_replica_mode != SyncReplicaMode::DEFAULT) + { + settings.ostr << ' '; + print_keyword(magic_enum::enum_name(sync_replica_mode)); + + // If the mode is LIGHTWEIGHT and specific source replicas are specified + if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty()) + { + settings.ostr << ' '; + print_keyword("FROM"); + settings.ostr << ' '; + + bool first = true; + for (const auto & src : src_replicas) + { + if (!first) + settings.ostr << ", "; + first = false; + settings.ostr << quoteString(src); + } + } + } + if (query_settings) { settings.ostr << (settings.hilite ? hilite_keyword : "") << settings.nl_or_ws << "SETTINGS " << (settings.hilite ? hilite_none : ""); @@ -233,28 +256,6 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s print_identifier(disk); } - if (sync_replica_mode != SyncReplicaMode::DEFAULT) - { - settings.ostr << ' '; - print_keyword(magic_enum::enum_name(sync_replica_mode)); - - // If the mode is LIGHTWEIGHT and specific source replicas are specified - if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty()) - { - settings.ostr << ' '; - print_keyword("FROM"); - settings.ostr << ' '; - - bool first = true; - for (const auto & src : src_replicas) - { - if (!first) - settings.ostr << ", "; - first = false; - settings.ostr << quoteString(src); - } - } - } break; } case Type::SYNC_DATABASE_REPLICA: diff --git a/tests/queries/0_stateless/03205_system_sync_replica_format.reference b/tests/queries/0_stateless/03205_system_sync_replica_format.reference new file mode 100644 index 00000000000..aad51dd90b0 --- /dev/null +++ b/tests/queries/0_stateless/03205_system_sync_replica_format.reference @@ -0,0 +1 @@ +SYSTEM SYNC REPLICA db.`table` LIGHTWEIGHT diff --git a/tests/queries/0_stateless/03205_system_sync_replica_format.sql b/tests/queries/0_stateless/03205_system_sync_replica_format.sql new file mode 100644 index 00000000000..329bce80afc --- /dev/null +++ b/tests/queries/0_stateless/03205_system_sync_replica_format.sql @@ -0,0 +1 @@ +SELECT formatQuery('SYSTEM SYNC REPLICA db.table LIGHTWEIGHT'); From 0e0272b2ffdf4286c5ef9766c90b88e096469e92 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 14 Aug 2024 10:21:23 +0000 Subject: [PATCH 2210/2361] Better check for overflow + limit min_marks_for_concurrent_read --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 18 ++++++++++++++++-- .../MergeTree/MergeTreeIndexGranularity.cpp | 15 +++++++++------ ...plicas_min_marks_to_read_overflow.reference | 11 +++++++++++ ...lel_replicas_min_marks_to_read_overflow.sql | 10 +++++++++- 4 files changed, 45 insertions(+), 9 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 901d7c61167..b5b46ef9f41 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -350,7 +350,14 @@ Pipe ReadFromMergeTree::readFromPoolParallelReplicas( /// We have a special logic for local replica. It has to read less data, because in some cases it should /// merge states of aggregate functions or do some other important stuff other than reading from Disk. - const auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; + auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; + if (pool_settings.min_marks_for_concurrent_read > std::numeric_limits::max()) + { + /// limit min marks to read in case it's big, happened in test since due to settings randomzation + pool_settings.min_marks_for_concurrent_read = std::numeric_limits::max(); + multiplier = 1.0f; + } + if (auto result = pool_settings.min_marks_for_concurrent_read * multiplier; canConvertTo(result)) pool_settings.min_marks_for_concurrent_read = static_cast(result); else @@ -519,7 +526,14 @@ Pipe ReadFromMergeTree::readInOrder( .number_of_current_replica = client_info.number_of_current_replica, }; - const auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; + auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; + if (pool_settings.min_marks_for_concurrent_read > std::numeric_limits::max()) + { + /// limit min marks to read in case it's big, happened in test since due to settings randomzation + pool_settings.min_marks_for_concurrent_read = std::numeric_limits::max(); + multiplier = 1.0f; + } + if (auto result = pool_settings.min_marks_for_concurrent_read * multiplier; canConvertTo(result)) pool_settings.min_marks_for_concurrent_read = static_cast(result); else diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index 2f9a4a47b11..2b924284857 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -105,13 +105,16 @@ size_t MergeTreeIndexGranularity::countMarksForRows(size_t from_mark, size_t num /// See comment in IMergeTreeSelectAlgorithm. if (min_marks_to_read) { - // check that ... - bool overflow = ((1ULL << 63) & min_marks_to_read); // further multiplication by 2 will not overflow - if (!overflow) - overflow = (std::numeric_limits::max() - from_mark) < 2 * min_marks_to_read; // further addition will not overflow + // check overflow + size_t min_marks_to_read_2 = 0; + bool overflow = common::mulOverflow(min_marks_to_read, 2, min_marks_to_read_2); - if (!overflow && from_mark + 2 * min_marks_to_read <= to_mark) - to_mark = from_mark + min_marks_to_read; + size_t to_mark_overwrite = 0; + if (!overflow) + overflow = common::addOverflow(from_mark, min_marks_to_read_2, to_mark_overwrite); + + if (!overflow && to_mark_overwrite < to_mark) + to_mark = to_mark_overwrite; } return getRowsCountInRange(from_mark, std::max(1UL, to_mark)) - offset_in_rows; diff --git a/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference index 7fafd4d13ea..b6c452ba328 100644 --- a/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference +++ b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference @@ -1,3 +1,14 @@ +1006 +1007 +1008 +1009 +101 +1010 +1011 +1012 +1013 +1014 +--- 100 100 101 101 102 102 diff --git a/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql index 112373e5db2..6f486f8f0fe 100644 --- a/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql +++ b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql @@ -6,10 +6,18 @@ INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(10_000); SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; +SELECT v +FROM test__fuzz_22 +ORDER BY v +LIMIT 10, 10 +SETTINGS merge_tree_min_rows_for_concurrent_read = 9223372036854775806; + +SELECT '---'; + SELECT k, v FROM test__fuzz_22 ORDER BY k LIMIT 100, 10 -SETTINGS merge_tree_min_rows_for_concurrent_read = 9223372036854775806; +SETTINGS optimize_read_in_order=1, merge_tree_min_rows_for_concurrent_read = 9223372036854775806; DROP TABLE test__fuzz_22 SYNC; From 56d6ef5c4a015f5851923f2c420538456564e790 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 14 Aug 2024 10:53:07 +0000 Subject: [PATCH 2211/2361] Fix 02995_index_10 timeout --- tests/queries/0_stateless/02995_index_10.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02995_index_10.sh b/tests/queries/0_stateless/02995_index_10.sh index 813cc49cbd8..e7e7d3c3b42 100755 --- a/tests/queries/0_stateless/02995_index_10.sh +++ b/tests/queries/0_stateless/02995_index_10.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -37,8 +37,9 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String -HAVING count() > 0; -" +HAVING count() > 0 +SETTINGS trace_profile_events=0 -- test is too slow with profiling +;" done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} "DROP TABLE test" +${CLICKHOUSE_CLIENT} -q "DROP TABLE test" From 962bf1d821a498aaeb6f16e5d4205272cfd00001 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Wed, 14 Aug 2024 13:37:14 +0200 Subject: [PATCH 2212/2361] CI: Fix for critical bug fix regex --- tests/ci/changelog.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py index 39e426945d3..b7f73f22016 100755 --- a/tests/ci/changelog.py +++ b/tests/ci/changelog.py @@ -115,7 +115,6 @@ def get_descriptions(prs: PullRequests) -> Dict[str, List[Description]]: # pylint: enable=protected-access if repo_name not in repos: repos[repo_name] = pr.base.repo - in_changelog = False merge_commit = pr.merge_commit_sha if merge_commit is None: logging.warning("PR %s does not have merge-commit, skipping", pr.number) @@ -291,7 +290,7 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri # Normalize bug fixes if ( re.match( - r"(?i)bug\Wfix", + r".*(?i)bug\Wfix", category, ) # Map "Critical Bug Fix" to "Bug fix" category for changelog From fc23f1c1ff5e114298f6b169fbeef80082f776d6 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 14 Aug 2024 12:20:33 +0000 Subject: [PATCH 2213/2361] Fix tests --- src/DataTypes/DataTypeObject.cpp | 9 ++++----- src/Formats/JSONExtractTree.cpp | 4 ++-- src/Functions/JSONPaths.cpp | 8 ++++++++ src/IO/parseDateTimeBestEffort.cpp | 4 ++-- .../0_stateless/01825_new_type_json_10.sql | 1 + .../01825_new_type_json_12.reference | 2 +- .../0_stateless/01825_new_type_json_18.sql | 2 ++ .../0_stateless/01825_new_type_json_9.sql | 1 + .../01825_new_type_json_in_array.sql | 2 ++ .../01825_new_type_json_insert_select.sql | 8 +++++++- .../03205_json_cast_from_string.reference | 4 ++-- .../03205_json_cast_from_string.sql | 6 +++--- ...3206_json_parsing_and_formatting.reference | 20 +++++++++---------- .../03206_json_parsing_and_formatting.sh | 4 ++-- ...9_json_type_horizontal_merges.reference.j2 | 4 ++-- ...209_json_type_vertical_merges.reference.j2 | 4 ++-- 16 files changed, 51 insertions(+), 32 deletions(-) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index b5d1f429001..d6395155397 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -224,7 +224,7 @@ void replaceJSONTypeNameIfNeeded(String & type_name, size_t max_dynamic_paths, s fmt::format( "JSON(max_dynamic_paths={}, max_dynamic_types={})", max_dynamic_paths / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, - std::max(max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))); + max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR)); pos = type_name.find("JSON", pos + 4); } } @@ -458,10 +458,9 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject:: auto * literal = function->arguments->children[1]->as(); /// Is 1000000 a good maximum for max paths? - size_t min_value = identifier_name == "max_dynamic_types" ? 1 : 0; - size_t max_value = identifier_name == "max_dynamic_types" ? 255 : 1000000; - if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.safeGet() < min_value || literal->value.safeGet() > max_value) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'{}' parameter for {} type should be a positive integer between {} and {}. Got {}", identifier_name, magic_enum::enum_name(schema_format), min_value, max_value, function->arguments->children[1]->formatForErrorMessage()); + size_t max_value = identifier_name == "max_dynamic_types" ? ColumnDynamic::MAX_DYNAMIC_TYPES_LIMIT : 1000000; + if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.safeGet() > max_value) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'{}' parameter for {} type should be a positive integer between 0 and {}. Got {}", identifier_name, magic_enum::enum_name(schema_format), max_value, function->arguments->children[1]->formatForErrorMessage()); if (identifier_name == "max_dynamic_types") max_dynamic_types = literal->value.safeGet(); diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 6a568d945fb..cac4dfbb077 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1582,12 +1582,12 @@ public: const std::unordered_set & paths_to_skip_, const std::vector & path_regexps_to_skip_, size_t max_dynamic_paths_, - size_t max_dynamic_types) + size_t max_dynamic_types_) : typed_path_nodes(std::move(typed_path_nodes_)) , paths_to_skip(paths_to_skip_) , dynamic_node(std::make_unique>( max_dynamic_paths_ / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, - std::max(max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR, 1lu))) + max_dynamic_types_ / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR)) , dynamic_serialization(std::make_shared()) { sorted_paths_to_skip.assign(paths_to_skip.begin(), paths_to_skip.end()); diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp index 35613e40aac..dfb0386e370 100644 --- a/src/Functions/JSONPaths.cpp +++ b/src/Functions/JSONPaths.cpp @@ -351,6 +351,14 @@ private: auto global_discr = variant_column.globalDiscriminatorAt(i); /// We don't output path with NULL values. It should be checked before calling getDynamicValueType. chassert(global_discr != ColumnVariant::NULL_DISCRIMINATOR); + if (global_discr == dynamic_column->getSharedVariantDiscriminator()) + { + auto value = dynamic_column->getSharedVariant().getDataAt(variant_column.offsetAt(i)); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + return type->getName(); + } + return variant_info.variant_names[global_discr]; } diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index e046e837689..a2afa4ce654 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -560,8 +560,8 @@ ReturnType parseDateTimeBestEffortImpl( else return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected word"); - while (!in.eof() && isAlphaASCII(*in.position())) - ++in.position(); +// while (!in.eof() && isAlphaASCII(*in.position())) +// ++in.position(); /// For RFC 2822 if (has_day_of_week) diff --git a/tests/queries/0_stateless/01825_new_type_json_10.sql b/tests/queries/0_stateless/01825_new_type_json_10.sql index a313adb4757..f586cc4477b 100644 --- a/tests/queries/0_stateless/01825_new_type_json_10.sql +++ b/tests/queries/0_stateless/01825_new_type_json_10.sql @@ -6,6 +6,7 @@ DROP TABLE IF EXISTS t_json_10; CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 1, "c": [{"d": 10, "e": [31]}, {"d": 20, "e": [63, 127]}]}} {"a": {"b": 2, "c": []}} + INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 3, "c": [{"f": 20, "e": [32]}, {"f": 30, "e": [64, 128]}]}} {"a": {"b": 4, "c": []}} SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(o)) as path FROM t_json_10 order by path; diff --git a/tests/queries/0_stateless/01825_new_type_json_12.reference b/tests/queries/0_stateless/01825_new_type_json_12.reference index 36f014f0178..10770498c4a 100644 --- a/tests/queries/0_stateless/01825_new_type_json_12.reference +++ b/tests/queries/0_stateless/01825_new_type_json_12.reference @@ -9,5 +9,5 @@ ('key_6','String') ('key_7','Float64') ('key_7','Int64') -{"obj":{"id":"1","key_0":[{"key_1":[{"key_3":[{"key_4":"1048576","key_5":0.0001048576,"key_6":25.5,"key_7":"1025"},{"key_6":"","key_7":"2"}]}]},{},{"key_1":[{"key_3":[{"key_5":-1,"key_6":"aqbjfiruu","key_7":-922337203685477600},{"key_4":"","key_6":"","key_7":"65537"}]},{"key_3":[{"key_4":"ghdqyeiom","key_5":1048575,"key_7":21474836.48}]}]}]}} +{"obj":{"id":"1","key_0":[{"key_1":[{"key_3":[{"key_4":"1048576","key_5":0.0001048576,"key_6":25.5,"key_7":"1025"},{"key_6":"","key_7":"2"}]}]},{},{"key_1":[{"key_3":[{"key_5":-1,"key_6":"aqbjfiruu","key_7":-922337203685477600},{"key_4":"","key_6":"","key_7":65537}]},{"key_3":[{"key_4":"ghdqyeiom","key_5":1048575,"key_7":21474836.48}]}]}]}} [[[1048576,NULL]],[],[[NULL,''],['ghdqyeiom']]] [[[0.0001048576,NULL]],[],[[-1,NULL],[1048575]]] [[[25.5,'']],[],[['aqbjfiruu',''],[NULL]]] [[[1025,2]],[],[[-922337203685477600,65537],[21474836.48]]] diff --git a/tests/queries/0_stateless/01825_new_type_json_18.sql b/tests/queries/0_stateless/01825_new_type_json_18.sql index 6066ff66fd7..469d37776ea 100644 --- a/tests/queries/0_stateless/01825_new_type_json_18.sql +++ b/tests/queries/0_stateless/01825_new_type_json_18.sql @@ -8,9 +8,11 @@ CREATE TABLE t_json_2(id UInt64, data JSON) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO t_json_2 FORMAT JSONEachRow {"id": 1, "data" : {"k1": 1}}; + SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; TRUNCATE TABLE t_json_2; INSERT INTO t_json_2 FORMAT JSONEachRow {"id": 1, "data" : {"k1": [1, 2]}}; + SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; diff --git a/tests/queries/0_stateless/01825_new_type_json_9.sql b/tests/queries/0_stateless/01825_new_type_json_9.sql index 626016a0a81..161a1d37f2e 100644 --- a/tests/queries/0_stateless/01825_new_type_json_9.sql +++ b/tests/queries/0_stateless/01825_new_type_json_9.sql @@ -7,6 +7,7 @@ SET allow_experimental_json_type = 1; CREATE TABLE t_json(id UInt64, obj JSON) ENGINE = MergeTree ORDER BY id; INSERT INTO t_json format JSONEachRow {"id": 1, "obj": {"foo": 1, "k1": 2}}; + INSERT INTO t_json format JSONEachRow {"id": 2, "obj": {"foo": 1, "k2": 2}}; OPTIMIZE TABLE t_json FINAL; diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.sql b/tests/queries/0_stateless/01825_new_type_json_in_array.sql index fbf47e8607d..42ab1f64681 100644 --- a/tests/queries/0_stateless/01825_new_type_json_in_array.sql +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.sql @@ -7,6 +7,7 @@ DROP TABLE IF EXISTS t_json_array; CREATE TABLE t_json_array (id UInt32, arr Array(JSON)) ENGINE = MergeTree ORDER BY id; INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": 1, "k2": {"k3": 2, "k4": 3}}, {"k1": 2, "k2": {"k5": "foo"}}]} + INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": 3, "k2": {"k3": 4, "k4": 5}}]} @@ -18,6 +19,7 @@ SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arr))) as path FROM t_ TRUNCATE TABLE t_json_array; INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": [{"k2": "aaa", "k3": "bbb"}, {"k2": "ccc"}]}]} + INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": [{"k3": "ddd", "k4": 10}, {"k4": 20}], "k5": {"k6": "foo"}}]} SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/01825_new_type_json_insert_select.sql b/tests/queries/0_stateless/01825_new_type_json_insert_select.sql index 6219cd2ede2..aff920c06ee 100644 --- a/tests/queries/0_stateless/01825_new_type_json_insert_select.sql +++ b/tests/queries/0_stateless/01825_new_type_json_insert_select.sql @@ -9,12 +9,14 @@ CREATE TABLE type_json_src (id UInt32, data JSON) ENGINE = MergeTree ORDER BY id CREATE TABLE type_json_dst AS type_json_src; INSERT INTO type_json_src VALUES (1, '{"k1": 1, "k2": "foo"}'); + INSERT INTO type_json_dst SELECT * FROM type_json_src; SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; SELECT id, data FROM type_json_dst ORDER BY id; INSERT INTO type_json_src VALUES (2, '{"k1": 2, "k2": "bar"}') (3, '{"k1": 3, "k3": "aaa"}'); + INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id > 1; SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; @@ -23,10 +25,12 @@ SELECT id, data FROM type_json_dst ORDER BY id; INSERT INTO type_json_dst VALUES (4, '{"arr": [{"k11": 5, "k22": 6}, {"k11": 7, "k33": 8}]}'); INSERT INTO type_json_src VALUES (5, '{"arr": "not array"}'); + INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; TRUNCATE TABLE type_json_src; -INSERT INTO type_json_src VALUES (6, '{"arr": [{"k22": "str1"}]}') +INSERT INTO type_json_src VALUES (6, '{"arr": [{"k22": "str1"}]}'); + INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; @@ -45,6 +49,7 @@ SET max_insert_threads = 1; SET output_format_json_named_tuples_as_objects = 1; INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; + INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; INSERT INTO type_json_dst SELECT data FROM type_json_src; @@ -56,6 +61,7 @@ TRUNCATE TABLE type_json_src; TRUNCATE TABLE type_json_dst; INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; + INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; INSERT INTO type_json_dst SELECT data FROM type_json_src; diff --git a/tests/queries/0_stateless/03205_json_cast_from_string.reference b/tests/queries/0_stateless/03205_json_cast_from_string.reference index 1c4d820b3b4..b9ac477eef4 100644 --- a/tests/queries/0_stateless/03205_json_cast_from_string.reference +++ b/tests/queries/0_stateless/03205_json_cast_from_string.reference @@ -13,6 +13,6 @@ {"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64','a.b.e':'Int64'} {'a.f':'Int64','g':'Int64'} {"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64'} {'a.b.e':'Int64','a.f':'Int64','g':'Int64'} {"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} -{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'String','a.b.e':'String','a.f':'Int64','g':'Int64'} {'a.b.c.d':'String','a.b.e':'String'} {'a.f':'Int64','g':'Int64'} -{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'String','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'String'} {'a.b.e':'Int64','a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64','a.b.e':'Int64'} {'a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64'} {'a.b.e':'Int64','a.f':'Int64','g':'Int64'} {"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} diff --git a/tests/queries/0_stateless/03205_json_cast_from_string.sql b/tests/queries/0_stateless/03205_json_cast_from_string.sql index 6ab88826c86..5ceee134c51 100644 --- a/tests/queries/0_stateless/03205_json_cast_from_string.sql +++ b/tests/queries/0_stateless/03205_json_cast_from_string.sql @@ -17,6 +17,6 @@ select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); -select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2, max_dynamic_types=1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); -select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1, max_dynamic_types=1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); -select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0, max_dynamic_types=1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); diff --git a/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference b/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference index fa6fed341ba..94777ffab1f 100644 --- a/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference +++ b/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference @@ -48,16 +48,16 @@ JSON(a.b.c UInt32, max_dynamic_paths=0) {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} {'a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} JSON(a.b.c UInt32, max_dynamic_types=1) -{"a":{"b":{"c":1,"d":"[0,1]"}},"b":"2020-01-01","c":"42","d":{"e":{"f":"[\\"s1\\",\\"s2\\"]"}}} -{"a":{"b":{"c":2,"d":"[2,3]"}},"b":"[1,2,3]","d":{"e":{"g":"43"}}} -{"a":{"b":{"c":3,"d":"[4,5]"}},"e":"Hello, World!"} -{"a":{"b":{"c":4,"d":"[6,7]"}},"c":"43"} -{"a":{"b":{"c":5,"d":"[8,9]"}},"b":"[\\"b1\\",\\"b2\\"]","d":{"e":{"f":"[\\"s3\\",\\"s4\\"]","g":"44","h":"2020-02-02 10:00:00"}}} -{'a.b.c':'UInt32','a.b.d':'String','b':'String','c':'String','d.e.f':'String'} {'a.b.d':'String','b':'String','c':'String','d.e.f':'String'} {} -{'a.b.c':'UInt32','a.b.d':'String','b':'String','d.e.g':'String'} {'a.b.d':'String','b':'String','d.e.g':'String'} {} -{'a.b.c':'UInt32','a.b.d':'String','e':'String'} {'a.b.d':'String','e':'String'} {} -{'a.b.c':'UInt32','a.b.d':'String','c':'String'} {'a.b.d':'String','c':'String'} {} -{'a.b.c':'UInt32','a.b.d':'String','b':'String','d.e.f':'String','d.e.g':'String','d.e.h':'String'} {'a.b.d':'String','b':'String','d.e.f':'String','d.e.g':'String','d.e.h':'String'} {} +{"a":{"b":{"c":1,"d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.d':'Array(Nullable(Int64))','e':'String'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {} Test small max_read_buffer_size {"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} {"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} diff --git a/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh b/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh index 44bef4f0e95..7e53e4388ec 100755 --- a/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh +++ b/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh @@ -31,8 +31,8 @@ $CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DA $CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=0)')" echo "JSON(a.b.c UInt32, max_dynamic_types=1)" -$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_types=1)')" -$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_types=1)')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_types=0)')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_types=0)')" echo "Test small max_read_buffer_size" $CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=1 -q "select json from file($DATA_FILE, JSONAsObject)" diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 index 42f24e7f26f..ea4e1da7181 100644 --- a/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 @@ -1,4 +1,4 @@ -create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10; Dynamic paths 100000 a 90000 b @@ -66,7 +66,7 @@ Shared data paths 70000 d 60000 e 10000 g -create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10; Dynamic paths 100000 a 90000 b diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 index 99257f1d6ce..d292b1454c6 100644 --- a/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 @@ -1,4 +1,4 @@ -create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1; Dynamic paths 100000 a 90000 b @@ -66,7 +66,7 @@ Shared data paths 70000 d 60000 e 10000 g -create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760; +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1; Dynamic paths 100000 a 90000 b From fc9929dc3d87a6f8065d4f46fe5002fab8d5537e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 14 Aug 2024 14:15:05 +0000 Subject: [PATCH 2214/2361] Make tests with azurite repeatable --- tests/integration/helpers/cluster.py | 16 ++++++++++++++ .../integration/test_storage_s3_queue/test.py | 21 ++++++++++++------- 2 files changed, 29 insertions(+), 8 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 215718463e8..a97d0f9c340 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -570,6 +570,8 @@ class ClickHouseCluster: self.spark_session = None self.with_azurite = False + self.azurite_container = "cont" + self.blob_service_client = None self._azurite_port = 0 # available when with_hdfs == True @@ -2692,6 +2694,20 @@ class ClickHouseCluster: connection_string ) logging.debug(blob_service_client.get_account_information()) + containers = [c for c in blob_service_client.list_containers(name_starts_with=self.azurite_container) if c.name == self.azurite_container] + if len(containers) > 0: + for c in containers: + blob_service_client.delete_container(c) + + container_client = blob_service_client.get_container_client(self.azurite_container) + if container_client.exists(): + logging.debug(f"azurite container '{self.azurite_container}' exist, deleting all blobs") + for b in container_client.list_blobs(): + container_client.delete_blob(b.name) + else: + logging.debug(f"azurite container '{self.azurite_container}' doesn't exist, creating it") + container_client.create_container() + self.blob_service_client = blob_service_client return except Exception as ex: diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 00ef8499594..ff723d0792a 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -13,7 +13,6 @@ from uuid import uuid4 AVAILABLE_MODES = ["unordered", "ordered"] DEFAULT_AUTH = ["'minio'", "'minio123'"] NO_AUTH = ["NOSIGN"] -AZURE_CONTAINER_NAME = "cont" def prepare_public_s3_bucket(started_cluster): @@ -75,6 +74,17 @@ def s3_queue_setup_teardown(started_cluster): objects = list(minio.list_objects(started_cluster.minio_bucket, recursive=True)) for obj in objects: minio.remove_object(started_cluster.minio_bucket, obj.object_name) + + container_client = started_cluster.blob_service_client.get_container_client( + started_cluster.azurite_container + ) + + if container_client.exists(): + blob_names = [b.name for b in container_client.list_blobs()] + logging.debug(f"Deleting blobs: {blob_names}") + for b in blob_names: + container_client.delete_blob(b) + yield # run test @@ -129,11 +139,6 @@ def started_cluster(): cluster.start() logging.info("Cluster started") - container_client = cluster.blob_service_client.get_container_client( - AZURE_CONTAINER_NAME - ) - container_client.create_container() - yield cluster finally: cluster.shutdown() @@ -190,7 +195,7 @@ def put_s3_file_content(started_cluster, filename, data, bucket=None): def put_azure_file_content(started_cluster, filename, data, bucket=None): client = started_cluster.blob_service_client.get_blob_client( - AZURE_CONTAINER_NAME, filename + started_cluster.azurite_container, filename ) buf = io.BytesIO(data) client.upload_blob(buf, "BlockBlob", len(data)) @@ -313,7 +318,7 @@ def test_delete_after_processing(started_cluster, mode, engine_name): assert len(objects) == 0 else: client = started_cluster.blob_service_client.get_container_client( - AZURE_CONTAINER_NAME + started_cluster.azurite_container ) objects_iterator = client.list_blobs(files_path) for objects in objects_iterator: From 920b88846b585a335b71c6f36a208f8cce8a5a74 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 14 Aug 2024 13:14:01 +0000 Subject: [PATCH 2215/2361] Optionally re-enable compilation with -O0 --- CMakeLists.txt | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 884d5be42de..6fa91fa002f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -322,17 +322,21 @@ if (DISABLE_OMIT_FRAME_POINTER) set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer") endif() +# Before you start hating your debugger because it refuses to show variables (''), try building with -DDEBUG_O_LEVEL="0" +# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263 +set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") -set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") +set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") -set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") +set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") -set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") +set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") if (OS_DARWIN) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") From 07c4a072fe14567ebe9809d19f869fc7a948f73e Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 14 Aug 2024 16:45:02 +0200 Subject: [PATCH 2216/2361] Add debug logging --- src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp | 9 ++++++--- src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp | 3 ++- src/Interpreters/Cache/FileCache.cpp | 11 ++++++++++- src/Interpreters/Cache/FileCache.h | 3 ++- src/Interpreters/Cache/FileSegment.cpp | 8 ++++++-- src/Interpreters/Cache/FileSegment.h | 6 +++++- src/Interpreters/Cache/WriteBufferToFileSegment.cpp | 6 ++++-- 7 files changed, 35 insertions(+), 11 deletions(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index b471f3fc58f..286d06bc424 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -645,8 +645,9 @@ void CachedOnDiskReadBufferFromFile::predownload(FileSegment & file_segment) ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, current_impl_buffer_size); + std::string failure_reason; bool continue_predownload = file_segment.reserve( - current_predownload_size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds); + current_predownload_size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, failure_reason); if (continue_predownload) { LOG_TEST(log, "Left to predownload: {}, buffer size: {}", bytes_to_predownload, current_impl_buffer_size); @@ -1002,7 +1003,8 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() { chassert(file_offset_of_buffer_end + size - 1 <= file_segment.range().right); - bool success = file_segment.reserve(size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds); + std::string failure_reason; + bool success = file_segment.reserve(size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, failure_reason); if (success) { chassert(file_segment.getCurrentWriteOffset() == static_cast(implementation_buffer->getPosition())); @@ -1028,7 +1030,8 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() LOG_TRACE(log, "Bypassing cache because writeCache method failed"); } else - LOG_TRACE(log, "No space left in cache to reserve {} bytes, will continue without cache download", size); + LOG_TRACE(log, "No space left in cache to reserve {} bytes, reason: {}, " + "will continue without cache download", failure_reason, size); if (!success) { diff --git a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp index 382c4a80cc4..103ae0e1832 100644 --- a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp @@ -91,7 +91,8 @@ bool FileSegmentRangeWriter::write(char * data, size_t size, size_t offset, File size_t size_to_write = std::min(available_size, size); - bool reserved = file_segment->reserve(size_to_write, reserve_space_lock_wait_timeout_milliseconds); + std::string failure_reason; + bool reserved = file_segment->reserve(size_to_write, reserve_space_lock_wait_timeout_milliseconds, failure_reason); if (!reserved) { appendFilesystemCacheLog(*file_segment); diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 13c70b38543..b1a1f629b00 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -804,7 +804,8 @@ bool FileCache::tryReserve( const size_t size, FileCacheReserveStat & reserve_stat, const UserInfo & user, - size_t lock_wait_timeout_milliseconds) + size_t lock_wait_timeout_milliseconds, + std::string & failure_reason) { ProfileEventTimeIncrement watch(ProfileEvents::FilesystemCacheReserveMicroseconds); @@ -817,6 +818,7 @@ bool FileCache::tryReserve( if (cache_is_being_resized.load(std::memory_order_relaxed)) { ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfCacheResize); + failure_reason = "cache is being resized"; return false; } @@ -824,6 +826,7 @@ bool FileCache::tryReserve( if (!cache_lock) { ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfLockContention); + failure_reason = "cache contention"; return false; } @@ -847,6 +850,7 @@ bool FileCache::tryReserve( LOG_TEST(log, "Query limit exceeded, space reservation failed, " "recache_on_query_limit_exceeded is disabled (while reserving for {}:{})", file_segment.key(), file_segment.offset()); + failure_reason = "query limit exceeded"; return false; } @@ -877,6 +881,7 @@ bool FileCache::tryReserve( if (!query_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, {}, user.user_id, cache_lock)) { + failure_reason = "cannot evict enough space for query limit"; return false; } @@ -891,11 +896,15 @@ bool FileCache::tryReserve( if (!main_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, queue_iterator, user.user_id, cache_lock)) { + failure_reason = "cannot evict enough space"; return false; } if (!file_segment.getKeyMetadata()->createBaseDirectory()) + { + failure_reason = "not enough space on device"; return false; + } if (eviction_candidates.size() > 0) { diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 07be802a940..efa504689eb 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -165,7 +165,8 @@ public: size_t size, FileCacheReserveStat & stat, const UserInfo & user, - size_t lock_wait_timeout_milliseconds); + size_t lock_wait_timeout_milliseconds, + std::string & failure_reason); std::vector getFileSegmentInfos(const UserID & user_id); diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index c46fb978ae4..cfbdfbaa257 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -502,7 +502,11 @@ LockedKeyPtr FileSegment::lockKeyMetadata(bool assert_exists) const return metadata->tryLock(); } -bool FileSegment::reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat) +bool FileSegment::reserve( + size_t size_to_reserve, + size_t lock_wait_timeout_milliseconds, + std::string & failure_reason, + FileCacheReserveStat * reserve_stat) { if (!size_to_reserve) throw Exception(ErrorCodes::LOGICAL_ERROR, "Zero space reservation is not allowed"); @@ -554,7 +558,7 @@ bool FileSegment::reserve(size_t size_to_reserve, size_t lock_wait_timeout_milli if (!reserve_stat) reserve_stat = &dummy_stat; - bool reserved = cache->tryReserve(*this, size_to_reserve, *reserve_stat, getKeyMetadata()->user, lock_wait_timeout_milliseconds); + bool reserved = cache->tryReserve(*this, size_to_reserve, *reserve_stat, getKeyMetadata()->user, lock_wait_timeout_milliseconds, failure_reason); if (!reserved) setDownloadFailedUnlocked(lock()); diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index 25ffb880b45..e90ebdbf8fe 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -201,7 +201,11 @@ public: /// Try to reserve exactly `size` bytes (in addition to the getDownloadedSize() bytes already downloaded). /// Returns true if reservation was successful, false otherwise. - bool reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat = nullptr); + bool reserve( + size_t size_to_reserve, + size_t lock_wait_timeout_milliseconds, + std::string & failure_reason, + FileCacheReserveStat * reserve_stat = nullptr); /// Write data into reserved space. void write(char * from, size_t size, size_t offset_in_file); diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp index e6ebf6ad50c..e43bbacdbc5 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp @@ -75,7 +75,8 @@ void WriteBufferToFileSegment::nextImpl() FileCacheReserveStat reserve_stat; /// In case of an error, we don't need to finalize the file segment /// because it will be deleted soon and completed in the holder's destructor. - bool ok = file_segment->reserve(bytes_to_write, reserve_space_lock_wait_timeout_milliseconds, &reserve_stat); + std::string failure_reason; + bool ok = file_segment->reserve(bytes_to_write, reserve_space_lock_wait_timeout_milliseconds, failure_reason, &reserve_stat); if (!ok) { @@ -84,9 +85,10 @@ void WriteBufferToFileSegment::nextImpl() reserve_stat_msg += fmt::format("{} hold {}, can release {}; ", toString(kind), ReadableSize(stat.non_releasable_size), ReadableSize(stat.releasable_size)); - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Failed to reserve {} bytes for {}: {}(segment info: {})", + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Failed to reserve {} bytes for {}: reason {}, {}(segment info: {})", bytes_to_write, file_segment->getKind() == FileSegmentKind::Temporary ? "temporary file" : "the file in cache", + failure_reason, reserve_stat_msg, file_segment->getInfoForLog() ); From 4827b8bb1c7a77e50912ab40d5c009c43d20f6ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 14 Aug 2024 14:56:02 +0000 Subject: [PATCH 2217/2361] Make S3Queue tests repeatable --- .../integration/test_storage_s3_queue/test.py | 97 ++++++++++++++----- 1 file changed, 72 insertions(+), 25 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index ff723d0792a..08a8a7cac81 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1,6 +1,7 @@ import io import logging import random +import string import time import pytest @@ -267,6 +268,10 @@ def create_mv( ) +def generate_random_string(length=6): + return "".join(random.choice(string.ascii_lowercase) for i in range(length)) + + @pytest.mark.parametrize("mode", ["unordered", "ordered"]) @pytest.mark.parametrize("engine_name", ["S3Queue", "AzureQueue"]) def test_delete_after_processing(started_cluster, mode, engine_name): @@ -276,6 +281,8 @@ def test_delete_after_processing(started_cluster, mode, engine_name): files_path = f"{table_name}_data" files_num = 5 row_num = 10 + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" if engine_name == "S3Queue": storage = "s3" else: @@ -290,7 +297,7 @@ def test_delete_after_processing(started_cluster, mode, engine_name): table_name, mode, files_path, - additional_settings={"after_processing": "delete"}, + additional_settings={"after_processing": "delete", "keeper_path": keeper_path}, engine_name=engine_name, ) create_mv(node, table_name, dst_table_name) @@ -333,7 +340,8 @@ def test_failed_retry(started_cluster, mode, engine_name): dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" file_path = f"{files_path}/trash_test.csv" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" retries_num = 3 values = [ @@ -391,7 +399,8 @@ def test_failed_retry(started_cluster, mode, engine_name): def test_direct_select_file(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"test.direct_select_file_{mode}" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" file_path = f"{files_path}/test.csv" @@ -496,8 +505,17 @@ def test_direct_select_multiple_files(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"direct_select_multiple_files_{mode}" files_path = f"{table_name}_data" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" - create_table(started_cluster, node, table_name, mode, files_path) + create_table( + started_cluster, + node, + table_name, + mode, + files_path, + additional_settings={"keeper_path": keeper_path}, + ) for i in range(5): rand_values = [[random.randint(0, 50) for _ in range(3)] for _ in range(10)] values_csv = ( @@ -520,14 +538,23 @@ def test_direct_select_multiple_files(started_cluster, mode): @pytest.mark.parametrize("mode", AVAILABLE_MODES) -def test_streaming_to_view_(started_cluster, mode): +def test_streaming_to_view(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"streaming_to_view_{mode}" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" total_values = generate_random_files(started_cluster, files_path, 10) - create_table(started_cluster, node, table_name, mode, files_path) + create_table( + started_cluster, + node, + table_name, + mode, + files_path, + additional_settings={"keeper_path": keeper_path}, + ) create_mv(node, table_name, dst_table_name) expected_values = set([tuple(i) for i in total_values]) @@ -549,7 +576,8 @@ def test_streaming_to_many_views(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"streaming_to_many_views_{mode}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" for i in range(3): @@ -587,7 +615,8 @@ def test_streaming_to_many_views(started_cluster, mode): def test_multiple_tables_meta_mismatch(started_cluster): node = started_cluster.instances["instance"] table_name = f"multiple_tables_meta_mismatch" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" create_table( @@ -680,7 +709,8 @@ def test_multiple_tables_streaming_sync(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"multiple_tables_streaming_sync_{mode}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 @@ -763,7 +793,8 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): node_2 = started_cluster.instances["instance2"] table_name = f"multiple_tables_streaming_sync_distributed_{mode}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 row_num = 50 @@ -838,7 +869,8 @@ def test_max_set_age(started_cluster): node = started_cluster.instances["instance"] table_name = "max_set_age" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" max_age = 20 files_to_generate = 10 @@ -949,10 +981,9 @@ def test_max_set_age(started_cluster): def test_max_set_size(started_cluster): node = started_cluster.instances["instance"] table_name = f"max_set_size" - dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" - max_age = 10 files_to_generate = 10 create_table( @@ -996,7 +1027,8 @@ def test_drop_table(started_cluster): node = started_cluster.instances["instance"] table_name = f"test_drop" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 @@ -1029,6 +1061,8 @@ def test_s3_client_reused(started_cluster): table_name = f"test.test_s3_client_reused" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" row_num = 10 def get_created_s3_clients_count(): @@ -1062,6 +1096,7 @@ def test_s3_client_reused(started_cluster): additional_settings={ "after_processing": "delete", "s3queue_processing_threads_num": 1, + "keeper_path": keeper_path, }, auth=NO_AUTH, bucket=started_cluster.minio_public_bucket, @@ -1119,7 +1154,8 @@ def test_processing_threads(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"processing_threads_{mode}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 processing_threads = 32 @@ -1186,7 +1222,8 @@ def test_shards(started_cluster, mode, processing_threads): node = started_cluster.instances["instance"] table_name = f"test_shards_{mode}_{processing_threads}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 shards_num = 3 @@ -1313,7 +1350,8 @@ def test_shards_distributed(started_cluster, mode, processing_threads): node_2 = started_cluster.instances["instance2"] table_name = f"test_shards_distributed_{mode}_{processing_threads}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 row_num = 300 @@ -1466,8 +1504,8 @@ def test_settings_check(started_cluster): node = started_cluster.instances["instance"] node_2 = started_cluster.instances["instance2"] table_name = f"test_settings_check" - dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" mode = "ordered" @@ -1509,7 +1547,10 @@ def test_processed_file_setting(started_cluster, processing_threads): node = started_cluster.instances["instance"] table_name = f"test_processed_file_setting_{processing_threads}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}_{processing_threads}" + # A unique path is necessary for repeatable tests + keeper_path = ( + f"/clickhouse/test_{table_name}_{processing_threads}_{generate_random_string()}" + ) files_path = f"{table_name}_data" files_to_generate = 10 @@ -1560,7 +1601,10 @@ def test_processed_file_setting_distributed(started_cluster, processing_threads) node_2 = started_cluster.instances["instance2"] table_name = f"test_processed_file_setting_distributed_{processing_threads}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = ( + f"/clickhouse/test_{table_name}_{processing_threads}_{generate_random_string()}" + ) files_path = f"{table_name}_data" files_to_generate = 10 @@ -1614,7 +1658,8 @@ def test_upgrade(started_cluster): table_name = f"test_upgrade" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 10 @@ -1655,7 +1700,8 @@ def test_exception_during_insert(started_cluster): table_name = f"test_exception_during_insert" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 10 @@ -1708,7 +1754,8 @@ def test_commit_on_limit(started_cluster): table_name = f"test_commit_on_limit" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 10 From c2f6751dafe3ecd0841f85799c0b15ae91e7cef9 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 14 Aug 2024 17:16:16 +0200 Subject: [PATCH 2218/2361] fix bug in key condition --- src/Storages/MergeTree/KeyCondition.cpp | 29 ++----------------- .../03221_key_condition_bug.reference | 1 + .../0_stateless/03221_key_condition_bug.sql | 11 +++++++ 3 files changed, 14 insertions(+), 27 deletions(-) create mode 100644 tests/queries/0_stateless/03221_key_condition_bug.reference create mode 100644 tests/queries/0_stateless/03221_key_condition_bug.sql diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index dfb43c4e75d..d0391dd9695 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -888,31 +888,6 @@ static Field applyFunctionForField( return (*col)[0]; } -static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & current_type, const FieldRef & field) -{ - /// Fallback for fields without block reference. - if (field.isExplicit()) - return applyFunctionForField(func, current_type, field); - - String result_name = "_" + func->getName() + "_" + toString(field.column_idx); - const auto & columns = field.columns; - size_t result_idx = columns->size(); - - for (size_t i = 0; i < result_idx; ++i) - { - if ((*columns)[i].name == result_name) - result_idx = i; - } - - if (result_idx == columns->size()) - { - ColumnsWithTypeAndName args{(*columns)[field.column_idx]}; - field.columns->emplace_back(ColumnWithTypeAndName {nullptr, func->getResultType(), result_name}); - (*columns)[result_idx].column = func->execute(args, (*columns)[result_idx].type, columns->front().column->size()); - } - - return {field.columns, field.row_idx, result_idx}; -} /// Sequentially applies functions to the column, returns `true` /// if all function arguments are compatible with functions @@ -2563,13 +2538,13 @@ std::optional KeyCondition::applyMonotonicFunctionsChainToRange( /// Thus we can safely use isNull() as an -Inf/+Inf indicator here. if (!key_range.left.isNull()) { - key_range.left = applyFunction(func, current_type, key_range.left); + key_range.left = applyFunctionForField(func, current_type, key_range.left); key_range.left_included = true; } if (!key_range.right.isNull()) { - key_range.right = applyFunction(func, current_type, key_range.right); + key_range.right = applyFunctionForField(func, current_type, key_range.right); key_range.right_included = true; } diff --git a/tests/queries/0_stateless/03221_key_condition_bug.reference b/tests/queries/0_stateless/03221_key_condition_bug.reference new file mode 100644 index 00000000000..e373ee695f6 --- /dev/null +++ b/tests/queries/0_stateless/03221_key_condition_bug.reference @@ -0,0 +1 @@ +50 diff --git a/tests/queries/0_stateless/03221_key_condition_bug.sql b/tests/queries/0_stateless/03221_key_condition_bug.sql new file mode 100644 index 00000000000..bac3e631a81 --- /dev/null +++ b/tests/queries/0_stateless/03221_key_condition_bug.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS report_metrics_v2 +( + `a` UInt64 +) Engine = MergeTree() +ORDER BY a; + +insert into report_metrics_v2 SELECT * FROM system.numbers LIMIT 50000; + +SELECT count(*) from report_metrics_v2 WHERE (intDiv(a, 50) = 200) AND (intDiv(a, 50000) = 0); + +DROP TABLE report_metrics_v2; From 2aef696856cbded795aedc23b7b8963799b8ebe6 Mon Sep 17 00:00:00 2001 From: serxa Date: Wed, 14 Aug 2024 15:27:37 +0000 Subject: [PATCH 2219/2361] make test runnable multiple times --- .../test_delayed_replica_failover/test.py | 27 ++++++++++++++----- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py index 1116d225b8c..ed63a47e030 100644 --- a/tests/integration/test_delayed_replica_failover/test.py +++ b/tests/integration/test_delayed_replica_failover/test.py @@ -20,21 +20,29 @@ node_1_2 = cluster.add_instance("node_1_2", with_zookeeper=True) node_2_1 = cluster.add_instance("node_2_1", with_zookeeper=True) node_2_2 = cluster.add_instance("node_2_2", with_zookeeper=True) +# For test to be runnable multiple times +seqno = 0 @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() + yield cluster + finally: + cluster.shutdown() + +@pytest.fixture(scope="function", autouse=True) +def create_tables(): + global seqno + try: + seqno += 1 for shard in (1, 2): for replica in (1, 2): node = cluster.instances["node_{}_{}".format(shard, replica)] node.query( - """ -CREATE TABLE replicated (d Date, x UInt32) ENGINE = - ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{instance}') PARTITION BY toYYYYMM(d) ORDER BY d""".format( - shard=shard, instance=node.name - ) + f"CREATE TABLE replicated (d Date, x UInt32) ENGINE = " + f"ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated_{seqno}', '{node.name}') PARTITION BY toYYYYMM(d) ORDER BY d" ) node_1_1.query( @@ -42,10 +50,15 @@ CREATE TABLE replicated (d Date, x UInt32) ENGINE = "Distributed('test_cluster', 'default', 'replicated')" ) - yield cluster + yield finally: - cluster.shutdown() + node_1_1.query("DROP TABLE distributed") + + node_1_1.query("DROP TABLE replicated") + node_1_2.query("DROP TABLE replicated") + node_2_1.query("DROP TABLE replicated") + node_2_2.query("DROP TABLE replicated") def test(started_cluster): From 23bb47293d3de355710cb93f6bd7dcd7b130ea79 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 14 Aug 2024 18:13:00 +0200 Subject: [PATCH 2220/2361] cache result column with ptr hex string --- src/Storages/MergeTree/KeyCondition.cpp | 30 +++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index d0391dd9695..bad9c2b422c 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -888,6 +888,32 @@ static Field applyFunctionForField( return (*col)[0]; } +static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & current_type, const FieldRef & field) +{ + /// Fallback for fields without block reference. + if (field.isExplicit()) + return applyFunctionForField(func, current_type, field); + + std::stringstream buf; + buf << "_" << func.get() << "_" << toString(field.column_idx); + String result_name = buf.str(); + const auto & columns = field.columns; + size_t result_idx = columns->size(); + + for (size_t i = 0; i < result_idx; ++i) + { + if ((*columns)[i].name == result_name) + result_idx = i; + } + + if (result_idx == columns->size()) + { + ColumnsWithTypeAndName args{(*columns)[field.column_idx]}; + field.columns->emplace_back(ColumnWithTypeAndName {nullptr, func->getResultType(), result_name}); + (*columns)[result_idx].column = func->execute(args, (*columns)[result_idx].type, columns->front().column->size()); + } + return {field.columns, field.row_idx, result_idx}; +} /// Sequentially applies functions to the column, returns `true` /// if all function arguments are compatible with functions @@ -2538,13 +2564,13 @@ std::optional KeyCondition::applyMonotonicFunctionsChainToRange( /// Thus we can safely use isNull() as an -Inf/+Inf indicator here. if (!key_range.left.isNull()) { - key_range.left = applyFunctionForField(func, current_type, key_range.left); + key_range.left = applyFunction(func, current_type, key_range.left); key_range.left_included = true; } if (!key_range.right.isNull()) { - key_range.right = applyFunctionForField(func, current_type, key_range.right); + key_range.right = applyFunction(func, current_type, key_range.right); key_range.right_included = true; } From 537f3bcd76fbbbfb1ee4b7b718b0630fad95509c Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 14 Aug 2024 16:13:17 +0000 Subject: [PATCH 2221/2361] Automatic style fix --- tests/integration/helpers/cluster.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index a97d0f9c340..0b6cf03d467 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2694,18 +2694,30 @@ class ClickHouseCluster: connection_string ) logging.debug(blob_service_client.get_account_information()) - containers = [c for c in blob_service_client.list_containers(name_starts_with=self.azurite_container) if c.name == self.azurite_container] + containers = [ + c + for c in blob_service_client.list_containers( + name_starts_with=self.azurite_container + ) + if c.name == self.azurite_container + ] if len(containers) > 0: for c in containers: blob_service_client.delete_container(c) - container_client = blob_service_client.get_container_client(self.azurite_container) + container_client = blob_service_client.get_container_client( + self.azurite_container + ) if container_client.exists(): - logging.debug(f"azurite container '{self.azurite_container}' exist, deleting all blobs") + logging.debug( + f"azurite container '{self.azurite_container}' exist, deleting all blobs" + ) for b in container_client.list_blobs(): container_client.delete_blob(b.name) else: - logging.debug(f"azurite container '{self.azurite_container}' doesn't exist, creating it") + logging.debug( + f"azurite container '{self.azurite_container}' doesn't exist, creating it" + ) container_client.create_container() self.blob_service_client = blob_service_client From 8e6096dee72acc5ee75eb05ddfb9384767f52648 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 14 Aug 2024 16:13:22 +0000 Subject: [PATCH 2222/2361] Automatic style fix --- tests/integration/test_delayed_replica_failover/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py index ed63a47e030..f1034e26b25 100644 --- a/tests/integration/test_delayed_replica_failover/test.py +++ b/tests/integration/test_delayed_replica_failover/test.py @@ -23,6 +23,7 @@ node_2_2 = cluster.add_instance("node_2_2", with_zookeeper=True) # For test to be runnable multiple times seqno = 0 + @pytest.fixture(scope="module") def started_cluster(): try: From acc88af2cbe5f993cf244cf44a0c448c2e76bf37 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 14 Aug 2024 18:27:20 +0200 Subject: [PATCH 2223/2361] fix style --- src/Storages/MergeTree/KeyCondition.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index bad9c2b422c..a297440e8f8 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -894,8 +894,10 @@ static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & if (field.isExplicit()) return applyFunctionForField(func, current_type, field); - std::stringstream buf; - buf << "_" << func.get() << "_" << toString(field.column_idx); + WriteBufferFromOwnString buf; + writeText("_", buf); + writePointerHex(func.get(), buf); + writeText("_" + toString(field.column_idx), buf); String result_name = buf.str(); const auto & columns = field.columns; size_t result_idx = columns->size(); @@ -912,6 +914,7 @@ static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & field.columns->emplace_back(ColumnWithTypeAndName {nullptr, func->getResultType(), result_name}); (*columns)[result_idx].column = func->execute(args, (*columns)[result_idx].type, columns->front().column->size()); } + return {field.columns, field.row_idx, result_idx}; } From cf58e8c1e37dda01b73b0cbd2553e3b460aa28ad Mon Sep 17 00:00:00 2001 From: serxa Date: Wed, 14 Aug 2024 16:35:45 +0000 Subject: [PATCH 2224/2361] fix data race in `DynamicResourceManager::updateConfiguration` --- .../Nodes/DynamicResourceManager.cpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp b/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp index 01aa7df48d3..6b9f6318903 100644 --- a/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp +++ b/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp @@ -184,14 +184,20 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi // Resource update leads to loss of runtime data of nodes and may lead to temporary violation of constraints (e.g. limits) // Try to minimise this by reusing "equal" resources (initialized with the same configuration). + std::vector resources_to_attach; for (auto & [name, new_resource] : new_state->resources) { if (auto iter = state->resources.find(name); iter != state->resources.end()) // Resource update { State::ResourcePtr old_resource = iter->second; if (old_resource->equals(*new_resource)) + { new_resource = old_resource; // Rewrite with older version to avoid loss of runtime data + continue; + } } + // It is new or updated resource + resources_to_attach.emplace_back(new_resource); } // Commit new state @@ -199,17 +205,14 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi state = new_state; // Attach new and updated resources to the scheduler - for (auto & [name, resource] : new_state->resources) + for (auto & resource : resources_to_attach) { const SchedulerNodePtr & root = resource->nodes.find("/")->second.ptr; - if (root->parent == nullptr) + resource->attached_to = &scheduler; + scheduler.event_queue->enqueue([this, root] { - resource->attached_to = &scheduler; - scheduler.event_queue->enqueue([this, root] - { - scheduler.attachChild(root); - }); - } + scheduler.attachChild(root); + }); } // NOTE: after mutex unlock `state` became available for Classifier(s) and must be immutable From 61b96ed7498a19c315d03d6d23330c06837dc990 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 14 Aug 2024 16:43:12 +0000 Subject: [PATCH 2225/2361] Make rest of the tests repeatable --- .../integration/test_storage_s3_queue/test.py | 77 ++++++++++++------- 1 file changed, 48 insertions(+), 29 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 08a8a7cac81..664d537a8d1 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -68,8 +68,8 @@ def s3_queue_setup_teardown(started_cluster): instance = started_cluster.instances["instance"] instance_2 = started_cluster.instances["instance2"] - instance.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") - instance_2.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") + instance.query("DROP DATABASE IF EXISTS default; CREATE DATABASE default;") + instance_2.query("DROP DATABASE IF EXISTS default; CREATE DATABASE default;") minio = started_cluster.minio_client objects = list(minio.list_objects(started_cluster.minio_bucket, recursive=True)) @@ -276,7 +276,7 @@ def generate_random_string(length=6): @pytest.mark.parametrize("engine_name", ["S3Queue", "AzureQueue"]) def test_delete_after_processing(started_cluster, mode, engine_name): node = started_cluster.instances["instance"] - table_name = f"test.delete_after_processing_{mode}_{engine_name}" + table_name = f"delete_after_processing_{mode}_{engine_name}" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" files_num = 5 @@ -336,7 +336,7 @@ def test_delete_after_processing(started_cluster, mode, engine_name): @pytest.mark.parametrize("engine_name", ["S3Queue", "AzureQueue"]) def test_failed_retry(started_cluster, mode, engine_name): node = started_cluster.instances["instance"] - table_name = f"test.failed_retry_{mode}_{engine_name}" + table_name = f"failed_retry_{mode}_{engine_name}" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" file_path = f"{files_path}/trash_test.csv" @@ -398,9 +398,9 @@ def test_failed_retry(started_cluster, mode, engine_name): @pytest.mark.parametrize("mode", AVAILABLE_MODES) def test_direct_select_file(started_cluster, mode): node = started_cluster.instances["instance"] - table_name = f"test.direct_select_file_{mode}" + table_name = f"direct_select_file_{mode}" # A unique path is necessary for repeatable tests - keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" + keeper_path = f"/clickhouse/test_{table_name}_{mode}_{generate_random_string()}" files_path = f"{table_name}_data" file_path = f"{files_path}/test.csv" @@ -461,7 +461,7 @@ def test_direct_select_file(started_cluster, mode): ] == [] # New table with different zookeeper path - keeper_path = f"/clickhouse/test_{table_name}_{mode}_2" + keeper_path = f"{keeper_path}_2" create_table( started_cluster, node, @@ -791,10 +791,12 @@ def test_multiple_tables_streaming_sync(started_cluster, mode): def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): node = started_cluster.instances["instance"] node_2 = started_cluster.instances["instance2"] - table_name = f"multiple_tables_streaming_sync_distributed_{mode}" + # A unique table name is necessary for repeatable tests + table_name = ( + f"multiple_tables_streaming_sync_distributed_{mode}_{generate_random_string()}" + ) dst_table_name = f"{table_name}_dst" - # A unique path is necessary for repeatable tests - keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" + keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" files_to_generate = 300 row_num = 50 @@ -1058,7 +1060,7 @@ def test_drop_table(started_cluster): def test_s3_client_reused(started_cluster): node = started_cluster.instances["instance"] - table_name = f"test.test_s3_client_reused" + table_name = f"test_s3_client_reused" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" # A unique path is necessary for repeatable tests @@ -1698,10 +1700,10 @@ def test_upgrade(started_cluster): def test_exception_during_insert(started_cluster): node = started_cluster.instances["instance_too_many_parts"] - table_name = f"test_exception_during_insert" + # A unique table name is necessary for repeatable tests + table_name = f"test_exception_during_insert_{generate_random_string()}" dst_table_name = f"{table_name}_dst" - # A unique path is necessary for repeatable tests - keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" + keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" files_to_generate = 10 @@ -1715,6 +1717,7 @@ def test_exception_during_insert(started_cluster): "keeper_path": keeper_path, }, ) + node.rotate_logs() total_values = generate_random_files( started_cluster, files_path, files_to_generate, start_ind=0, row_num=1 ) @@ -1731,34 +1734,49 @@ def test_exception_during_insert(started_cluster): ) assert "Too many parts" in exception + original_parts_to_throw_insert = 0 + modified_parts_to_throw_insert = 10 node.replace_in_config( "/etc/clickhouse-server/config.d/merge_tree.xml", - "parts_to_throw_insert>0", - "parts_to_throw_insert>10", + f"parts_to_throw_insert>{original_parts_to_throw_insert}", + f"parts_to_throw_insert>{modified_parts_to_throw_insert}", ) - node.restart_clickhouse() + try: + node.restart_clickhouse() - def get_count(): - return int(node.query(f"SELECT count() FROM {dst_table_name}")) + def get_count(): + return int(node.query(f"SELECT count() FROM {dst_table_name}")) - expected_rows = 10 - for _ in range(20): - if expected_rows == get_count(): - break - time.sleep(1) - assert expected_rows == get_count() + expected_rows = 10 + for _ in range(20): + if expected_rows == get_count(): + break + time.sleep(1) + assert expected_rows == get_count() + finally: + node.replace_in_config( + "/etc/clickhouse-server/config.d/merge_tree.xml", + f"parts_to_throw_insert>{modified_parts_to_throw_insert}", + f"parts_to_throw_insert>{original_parts_to_throw_insert}", + ) + node.restart_clickhouse() def test_commit_on_limit(started_cluster): node = started_cluster.instances["instance"] - table_name = f"test_commit_on_limit" + # A unique table name is necessary for repeatable tests + table_name = f"test_commit_on_limit_{generate_random_string()}" dst_table_name = f"{table_name}_dst" - # A unique path is necessary for repeatable tests - keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" + keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" files_to_generate = 10 + failed_files_event_before = int( + node.query( + "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" + ) + ) create_table( started_cluster, node, @@ -1833,7 +1851,8 @@ def test_commit_on_limit(started_cluster): assert "test_999999.csv" in get_processed_files() - assert 1 == int( + node.count_in_log(f"Setting file {files_path}/test_9999.csv as failed") + assert failed_files_event_before + 1 == int( node.query( "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" ) From f11478398ec563218644eb3d8c16ae6f223c1a13 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 14 Aug 2024 16:43:26 +0000 Subject: [PATCH 2226/2361] Update version_date.tsv and changelogs after v24.3.7.30-lts --- docs/changelogs/v24.3.7.30-lts.md | 29 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 30 insertions(+) create mode 100644 docs/changelogs/v24.3.7.30-lts.md diff --git a/docs/changelogs/v24.3.7.30-lts.md b/docs/changelogs/v24.3.7.30-lts.md new file mode 100644 index 00000000000..f945a54840f --- /dev/null +++ b/docs/changelogs/v24.3.7.30-lts.md @@ -0,0 +1,29 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.7.30-lts (c8a28cf4331) FIXME as compared to v24.3.6.48-lts (b2d33c3c45d) + +#### Improvement +* Backported in [#68103](https://github.com/ClickHouse/ClickHouse/issues/68103): Distinguish booleans and integers while parsing values for custom settings: ``` SET custom_a = true; SET custom_b = 1; ```. [#62206](https://github.com/ClickHouse/ClickHouse/pull/62206) ([Vitaly Baranov](https://github.com/vitlibar)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#67931](https://github.com/ClickHouse/ClickHouse/issues/67931): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#68062](https://github.com/ClickHouse/ClickHouse/issues/68062): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)). +* Backported in [#67812](https://github.com/ClickHouse/ClickHouse/issues/67812): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67848](https://github.com/ClickHouse/ClickHouse/issues/67848): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#68271](https://github.com/ClickHouse/ClickHouse/issues/68271): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#67806](https://github.com/ClickHouse/ClickHouse/issues/67806): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67834](https://github.com/ClickHouse/ClickHouse/issues/67834): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#68206](https://github.com/ClickHouse/ClickHouse/issues/68206): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#68089](https://github.com/ClickHouse/ClickHouse/issues/68089): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#68120](https://github.com/ClickHouse/ClickHouse/issues/68120): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Update version after release. [#67676](https://github.com/ClickHouse/ClickHouse/pull/67676) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Backported in [#68074](https://github.com/ClickHouse/ClickHouse/issues/68074): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index f46353277e2..71a4a722a36 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -13,6 +13,7 @@ v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.7.30-lts 2024-08-14 v24.3.6.48-lts 2024-08-02 v24.3.5.46-lts 2024-07-03 v24.3.4.147-lts 2024-06-13 From 7b1bca2b488685e3953c9b2950d788d565bff73d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Wed, 14 Aug 2024 16:55:59 +0000 Subject: [PATCH 2227/2361] Add missing assertion --- tests/integration/test_storage_s3_queue/test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 664d537a8d1..34fb1eaf1fe 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1851,7 +1851,9 @@ def test_commit_on_limit(started_cluster): assert "test_999999.csv" in get_processed_files() - node.count_in_log(f"Setting file {files_path}/test_9999.csv as failed") + assert 1 == int( + node.count_in_log(f"Setting file {files_path}/test_9999.csv as failed") + ) assert failed_files_event_before + 1 == int( node.query( "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" From 209d4eb016a58acb27826ce96e61db884490b66f Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 14 Aug 2024 19:00:17 +0200 Subject: [PATCH 2228/2361] Fix build --- src/Interpreters/Cache/Metadata.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 7e4b76d3cc6..6399691bcf6 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -705,7 +705,8 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optionalavailable(); - if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds)) + std::string failure_reason; + if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds, failure_reason)) { LOG_TEST( log, "Failed to reserve space during background download " From 207d160dd2fff9cb0cb4ec95970cd79c4e13d86b Mon Sep 17 00:00:00 2001 From: Han Fei Date: Wed, 14 Aug 2024 19:01:54 +0200 Subject: [PATCH 2229/2361] address comments --- src/Storages/MergeTree/KeyCondition.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index a297440e8f8..98c02b3e894 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -888,12 +888,17 @@ static Field applyFunctionForField( return (*col)[0]; } +/// applyFunction will execute the function with one `field` or the column which `field` refers to. static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & current_type, const FieldRef & field) { + chassert(func != nullptr); /// Fallback for fields without block reference. if (field.isExplicit()) return applyFunctionForField(func, current_type, field); + /// We will cache the function result inside `field.columns`, because this function will call many times + /// from many fields from same column. When the column is huge, for example there are thousands of marks, we need a cache. + /// The cache key is like `_[function_pointer]_[param_column_id]` to identify a unique pair. WriteBufferFromOwnString buf; writeText("_", buf); writePointerHex(func.get(), buf); @@ -910,6 +915,7 @@ static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & if (result_idx == columns->size()) { + /// When cache is missed, we calculate the whole column where the field comes from. This will avoid repeated calculation. ColumnsWithTypeAndName args{(*columns)[field.column_idx]}; field.columns->emplace_back(ColumnWithTypeAndName {nullptr, func->getResultType(), result_name}); (*columns)[result_idx].column = func->execute(args, (*columns)[result_idx].type, columns->front().column->size()); From 2e5f45a7ad4924affb1ff8b0e5a40b59b6549621 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Wed, 14 Aug 2024 17:53:33 +0000 Subject: [PATCH 2230/2361] rename: S3DiskNoKeyErrors -> DiskS3NoSuchKeyErrors --- src/Common/CurrentMetrics.cpp | 2 +- src/IO/S3/Client.cpp | 4 ++-- tests/integration/test_checking_s3_blobs_paranoid/test.py | 2 +- tests/integration/test_storage_delta/test.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index b6dd14d292c..67890568941 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -307,7 +307,7 @@ M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \ M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \ \ - M(S3DiskNoKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \ + M(DiskS3NoSuchKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \ #ifdef APPLY_FOR_EXTERNAL_METRICS #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index a966e370ca1..8338a235387 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -46,7 +46,7 @@ namespace ProfileEvents namespace CurrentMetrics { - extern const Metric S3DiskNoKeyErrors; + extern const Metric DiskS3NoSuchKeyErrors; } namespace DB @@ -701,7 +701,7 @@ RequestResult Client::processRequestResult(RequestResult && outcome) const return std::forward(outcome); if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) - CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); + CurrentMetrics::add(CurrentMetrics::DiskS3NoSuchKeyErrors); String enriched_message = fmt::format( "{} {}", diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index 73f2888ce00..76a0f30f82e 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -708,7 +708,7 @@ def test_no_key_found_disk(cluster, broken_s3): """ SELECT value FROM system.metrics - WHERE metric = 'S3DiskNoKeyErrors' + WHERE metric = 'DiskS3NoSuchKeyErrors' """ ).strip() ) diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index 054b79ff6fe..a595d01e6b3 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -464,7 +464,7 @@ def test_restart_broken(started_cluster): """ SELECT value FROM system.metrics - WHERE metric = 'S3DiskNoKeyErrors' + WHERE metric = 'DiskS3NoSuchKeyErrors' """ ).strip() ) From 5e037e5ba852ebd1984d957f21a4925fea8de2ff Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Wed, 14 Aug 2024 20:45:50 +0200 Subject: [PATCH 2231/2361] CI: Minor fixes for changelog and release exceptions --- .github/workflows/create_release.yml | 1 + tests/ci/changelog.py | 2 -- tests/ci/create_release.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 73613c65266..eb16c25f604 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -101,6 +101,7 @@ jobs: --volume=".:/wd" --workdir="/wd" \ clickhouse/style-test \ ./tests/ci/changelog.py -v --debug-helpers \ + --gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \ --jobs=5 \ --output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py index b7f73f22016..554ba339892 100755 --- a/tests/ci/changelog.py +++ b/tests/ci/changelog.py @@ -19,7 +19,6 @@ from env_helper import TEMP_PATH from git_helper import git_runner, is_shallow from github_helper import GitHub, PullRequest, PullRequests, Repository from s3_helper import S3Helper -from get_robot_token import get_best_robot_token from ci_utils import Shell from version_helper import ( FILE_WITH_VERSION_PATH, @@ -172,7 +171,6 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--gh-user-or-token", help="user name or GH token to authenticate", - default=get_best_robot_token(), ) parser.add_argument( "--gh-password", diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index b5ea61e1952..68268b033fe 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -484,7 +484,7 @@ class ReleaseInfo: ) else: if not dry_run: - assert not self.changelog_pr + assert not self.version_bump_pr self.prs_merged = res From 7251bc59f88293e789de7bfcd33f43511a9b04f5 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 14 Aug 2024 20:22:19 +0000 Subject: [PATCH 2232/2361] Fix tidy build --- src/Columns/ColumnObject.cpp | 6 +++--- src/Columns/ColumnObject.h | 6 +++--- src/DataTypes/DataTypesBinaryEncoding.cpp | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index d4f752cda95..b7194ef50e7 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -250,7 +250,7 @@ void ColumnObject::insertData(const char *, size_t) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertData is not supported for {}", getName()); } -ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(const std::string_view path) +ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(std::string_view path) { if (dynamic_paths.size() == max_dynamic_paths) return nullptr; @@ -263,7 +263,7 @@ ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(const std::string_view path return it_ptr->second; } -void ColumnObject::addNewDynamicPath(const std::string_view path) +void ColumnObject::addNewDynamicPath(std::string_view path) { if (!tryToAddNewDynamicPath(path)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add new dynamic path as the limit ({}) on dynamic paths is reached", max_dynamic_paths); @@ -605,7 +605,7 @@ void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::Co } } -void ColumnObject::serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const std::string_view path, const IColumn & column, size_t n) +void ColumnObject::serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, std::string_view path, const IColumn & column, size_t n) { /// Don't store Null values in shared data. We consider Null value equivalent to the absence /// of this path in the row because we cannot distinguish these 2 cases for dynamic paths. diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index 8412a50281a..db31674daec 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -215,15 +215,15 @@ public: /// Try to add new dynamic path. Returns pointer to the new dynamic /// path column or nullptr if limit on dynamic paths is reached. - ColumnDynamic * tryToAddNewDynamicPath(const std::string_view path); + ColumnDynamic * tryToAddNewDynamicPath( std::string_view path); /// Throws an exception if cannot add. - void addNewDynamicPath(const std::string_view path); + void addNewDynamicPath(std::string_view path); void setDynamicPaths(const std::vector & paths); void setMaxDynamicPaths(size_t max_dynamic_paths_); void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; } - void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, const std::string_view path, const IColumn & column, size_t n); + void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, std::string_view path, const IColumn & column, size_t n); void deserializeValueFromSharedData(const ColumnString * shared_data_values, size_t n, IColumn & column) const; /// Paths in shared data are sorted in each row. Use this method to find the lower bound for specific path in the row. diff --git a/src/DataTypes/DataTypesBinaryEncoding.cpp b/src/DataTypes/DataTypesBinaryEncoding.cpp index 5c32c21bbf3..dc0f2f3f5aa 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.cpp +++ b/src/DataTypes/DataTypesBinaryEncoding.cpp @@ -100,7 +100,7 @@ enum class BinaryTypeIndex : uint8_t /// In future we can introduce more arguments in the JSON data type definition. /// To support such changes, use versioning in the serialization of JSON type. -static const UInt8 TYPE_JSON_SERIALIZATION_VERSION = 0; +const UInt8 TYPE_JSON_SERIALIZATION_VERSION = 0; BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) { From da4da136674e5b9e587d535742331d232639e11f Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 14 Aug 2024 22:50:14 +0200 Subject: [PATCH 2233/2361] Fix style --- src/Columns/ColumnObject.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index db31674daec..f530ed29ef3 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -215,7 +215,7 @@ public: /// Try to add new dynamic path. Returns pointer to the new dynamic /// path column or nullptr if limit on dynamic paths is reached. - ColumnDynamic * tryToAddNewDynamicPath( std::string_view path); + ColumnDynamic * tryToAddNewDynamicPath(std::string_view path); /// Throws an exception if cannot add. void addNewDynamicPath(std::string_view path); From 3ca00440af8129b64c45cba47f212b8f897b9a54 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 14 Aug 2024 20:51:23 +0000 Subject: [PATCH 2234/2361] Fix off-by-one inline function info in stack traces --- src/Common/StackTrace.cpp | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index 76277cbc993..80329b4aec1 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -248,8 +248,31 @@ void StackTrace::forEachFrame( auto dwarf_it = dwarfs.try_emplace(object->name, object->elf).first; DB::Dwarf::LocationInfo location; - if (dwarf_it->second.findAddress( - uintptr_t(current_frame.physical_addr), location, mode, inline_frames)) + uintptr_t adjusted_addr = uintptr_t(current_frame.physical_addr); + if (i > 0) + { + /// For non-innermost stack frames, the address points to the *next* instruction + /// after the `call` instruction. But we want the line number and inline function + /// information for the `call` instruction. So subtract 1 from the address. + /// Caveats: + /// * The `call` instruction can be longer than 1 byte, so addr-1 is in the middle + /// of the instruction. That's ok for debug info lookup: address ranges in debug + /// info cover the whole instruction. + /// * If the stack trace unwound out of a signal handler, the stack frame just + /// outside the signal didn't do a function call. It was interrupted by signal. + /// There's no `call` instruction, and decrementing the address is incorrect. + /// We may get incorrect line number and inlined functions in this case. + /// Unfortunate. + /// Note that libunwind, when producing this stack trace, knows whether this + /// frame is interrupted by signal or not. We could propagate this information + /// from libunwind to here and avoid subtracting 1 in this case, but currently + /// we don't do this. + /// But we don't do the decrement for findSymbol() below (because `call` is + /// ~never the last instruction of a function), so the function name should be + /// correct for both pre-signal frames and regular frames. + adjusted_addr -= 1; + } + if (dwarf_it->second.findAddress(adjusted_addr, location, mode, inline_frames)) { current_frame.file = location.file.toString(); current_frame.line = location.line; From aa38024b0e0cdc4a839446df2e7de974efc6b7e7 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 14 Aug 2024 20:59:08 +0000 Subject: [PATCH 2235/2361] Fix UBSan: lower upper bound for min_marks_for_concurrent_read --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 3ece7b1c5c8..734e67bda24 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -353,10 +353,11 @@ Pipe ReadFromMergeTree::readFromPoolParallelReplicas( /// We have a special logic for local replica. It has to read less data, because in some cases it should /// merge states of aggregate functions or do some other important stuff other than reading from Disk. auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; - if (pool_settings.min_marks_for_concurrent_read > std::numeric_limits::max()) + const auto min_marks_for_concurrent_read_limit = std::numeric_limits::max() >> 1; + if (pool_settings.min_marks_for_concurrent_read > min_marks_for_concurrent_read_limit) { /// limit min marks to read in case it's big, happened in test since due to settings randomzation - pool_settings.min_marks_for_concurrent_read = std::numeric_limits::max(); + pool_settings.min_marks_for_concurrent_read = min_marks_for_concurrent_read_limit; multiplier = 1.0f; } @@ -529,10 +530,11 @@ Pipe ReadFromMergeTree::readInOrder( }; auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; - if (pool_settings.min_marks_for_concurrent_read > std::numeric_limits::max()) + const auto min_marks_for_concurrent_read_limit = std::numeric_limits::max() >> 1; + if (pool_settings.min_marks_for_concurrent_read > min_marks_for_concurrent_read_limit) { /// limit min marks to read in case it's big, happened in test since due to settings randomzation - pool_settings.min_marks_for_concurrent_read = std::numeric_limits::max(); + pool_settings.min_marks_for_concurrent_read = min_marks_for_concurrent_read_limit; multiplier = 1.0f; } From 43f4a3665d0c2efa8a4b93dc8b39de0ca81985d1 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 14 Aug 2024 21:35:10 +0000 Subject: [PATCH 2236/2361] Check that setProcessListElement() is not called on global context --- src/Interpreters/Context.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 4a08fd5fe5b..3cc09370e86 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -2957,6 +2957,9 @@ ProgressCallback Context::getProgressCallback() const void Context::setProcessListElement(QueryStatusPtr elem) { + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have process list element"); + /// Set to a session or query. In the session, only one query is processed at a time. Therefore, the lock is not needed. process_list_elem = elem; has_process_list_elem = elem.get(); From 9f44cd85925b3d691afd71a9ecb9ccc9ec3bf57a Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 14 Aug 2024 22:19:17 +0000 Subject: [PATCH 2237/2361] fix typo --- src/Storages/MergeTree/MergeTreeSettings.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 6c9a29c5bb9..149f9a3e80b 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -84,7 +84,7 @@ struct Settings; M(Bool, exclude_deleted_rows_for_part_size_in_merge, false, "Use an estimated source part size (excluding lightweight deleted rows) when selecting parts to merge", 0) \ M(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \ M(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \ - M(Milliseconds, merge_preferred_step_execution_time_ms, 100, "Target time to execetion of one step of merge. Can be exceeded if one step takes longer time", 0) \ + M(Milliseconds, merge_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge. Can be exceeded if one step takes longer time", 0) \ \ /** Inserts settings. */ \ M(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ From b077f2cc9c11b01c443eb1ff976457965f7297ee Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Thu, 15 Aug 2024 02:31:10 +0200 Subject: [PATCH 2238/2361] performance comparison test for output_format_parquet_write_page_index setting added new performance comparison test for output_format_parquet_write_page_index setting --- tests/performance/parquet_read_with_index.xml | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 tests/performance/parquet_read_with_index.xml diff --git a/tests/performance/parquet_read_with_index.xml b/tests/performance/parquet_read_with_index.xml new file mode 100644 index 00000000000..1bb2d8eb4a2 --- /dev/null +++ b/tests/performance/parquet_read_with_index.xml @@ -0,0 +1,30 @@ + + + INSERT INTO FUNCTION file('test_pq_index', Parquet) SELECT * FROM generateRandom('int64_column Nullable(Int64), tuple_column Tuple(a Nullable(String), b Nullable(Float64), c Tuple(i UInt32, j UInt32)),array_tuple_column Array(Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))), map_tuple_column Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') limit 1000000 SETTINGS output_format_parquet_use_custom_encoder=false, output_format_parquet_write_page_index=true + + + + SELECT * FROM file('test_pq_index', Parquet, 'tuple_column Tuple(a Nullable(String))') Format Null + + + + SELECT tuple_column.a FROM file('test_pq_index', Parquet) Format Null + + + + SELECT tuple_column.a FROM file('test_pq_index', Parquet, 'tuple_column Tuple(a Nullable(String))') Format Null + + + + SELECT tuple_column.c.i FROM file('test_pq_index', Parquet) Format Null + + + + SELECT * FROM file('test_pq_index', Parquet, 'array_tuple_column Array (Tuple(a Nullable(String)))') Format Null + + + + SELECT * FROM file('test_pq_index', Parquet, 'map_tuple_column Map(String, Tuple(a Nullable(String)))') Format Null + + + From 690c4d0803366a3a6fe1887e5c01b35d80f501f9 Mon Sep 17 00:00:00 2001 From: lgbo-ustc Date: Thu, 15 Aug 2024 09:04:22 +0800 Subject: [PATCH 2239/2361] update --- src/Processors/Transforms/WindowTransform.cpp | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index cae817380e0..bd11aa4cd28 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -1157,14 +1157,7 @@ void WindowTransform::appendChunk(Chunk & chunk) // Initialize output columns. for (auto & ws : workspaces) { - if (ws.window_function_impl) - block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices)); - else - { - /// `castColumn` returns nullptr at default, so it's OK to put nullptr as a placeholder here - /// it should not be used in fact. - block.casted_columns.push_back(nullptr); - } + block.casted_columns.push_back(ws.window_function_impl ? ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices) : nullptr); block.output_columns.push_back(ws.aggregate_function->getResultType() ->createColumn()); From e6c03886f06ee19e3d677be073814154c1842ef6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 15 Aug 2024 03:55:12 +0200 Subject: [PATCH 2240/2361] Add a test for #57324 --- .../03223_analyzer_with_cube_fuzz.reference | 0 .../03223_analyzer_with_cube_fuzz.sql | 27 +++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.reference create mode 100644 tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql diff --git a/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.reference b/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql b/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql new file mode 100644 index 00000000000..c19d11fbe0c --- /dev/null +++ b/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`a` Int64, `b` Int64) ENGINE = MergeTree ORDER BY a; +CREATE TABLE t2 (`key` Int32, `val` Int64) ENGINE = MergeTree ORDER BY key; +insert into t1 Select number, number from numbers(100000); +insert into t2 Select number, number from numbers(100000); + + +SELECT + 1 * 1000.0001, + (count(1.) = -2147483647) AND (count(a) = 1.1920928955078125e-7) AND (count(val) = 1048577) AND (sum(val) = ((NULL * 1048576) / -9223372036854775807)) AND (sum(a) = ((9223372036854775806 * 10000000000.) / 1048575)) +FROM +( + SELECT + a, + val + FROM t1 + FULL OUTER JOIN t2 ON (t1.a = t2.key) OR (1 * inf) OR (t1.b = t2.key) +) +GROUP BY '65537' + WITH CUBE +FORMAT Null +SETTINGS max_block_size = 100, join_use_nulls = 1, max_execution_time = 1., max_result_rows = 0, max_result_bytes = 0; -- { serverError TIMEOUT_EXCEEDED } + +DROP TABLE t1; +DROP TABLE t2; From 7bebc448f30a026e41563553788f463243aa18fd Mon Sep 17 00:00:00 2001 From: megao Date: Thu, 15 Aug 2024 10:42:22 +0800 Subject: [PATCH 2241/2361] fix progress value of view_refreshview --- src/Storages/System/StorageSystemViewRefreshes.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemViewRefreshes.cpp b/src/Storages/System/StorageSystemViewRefreshes.cpp index 30539ed6b6a..97065f2b2fb 100644 --- a/src/Storages/System/StorageSystemViewRefreshes.cpp +++ b/src/Storages/System/StorageSystemViewRefreshes.cpp @@ -86,7 +86,8 @@ void StorageSystemViewRefreshes::fillData( res_columns[i++]->insert(refresh.exception_message); res_columns[i++]->insert(refresh.refresh_count); - res_columns[i++]->insert(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read); +// res_columns[i++]->insert(std::min(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read, static_cast(1))); + res_columns[i++]->insert(std::min(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read, 1.0)); res_columns[i++]->insert(refresh.progress.elapsed_ns / 1e9); res_columns[i++]->insert(refresh.progress.read_rows); res_columns[i++]->insert(refresh.progress.read_bytes); From c9bfff3934ba5257dbfd1d43294b27155c6aec30 Mon Sep 17 00:00:00 2001 From: megao Date: Thu, 15 Aug 2024 10:52:17 +0800 Subject: [PATCH 2242/2361] fix progress value of view_refreshview --- src/Storages/System/StorageSystemViewRefreshes.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/System/StorageSystemViewRefreshes.cpp b/src/Storages/System/StorageSystemViewRefreshes.cpp index 97065f2b2fb..3941c4c39c2 100644 --- a/src/Storages/System/StorageSystemViewRefreshes.cpp +++ b/src/Storages/System/StorageSystemViewRefreshes.cpp @@ -86,7 +86,6 @@ void StorageSystemViewRefreshes::fillData( res_columns[i++]->insert(refresh.exception_message); res_columns[i++]->insert(refresh.refresh_count); -// res_columns[i++]->insert(std::min(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read, static_cast(1))); res_columns[i++]->insert(std::min(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read, 1.0)); res_columns[i++]->insert(refresh.progress.elapsed_ns / 1e9); res_columns[i++]->insert(refresh.progress.read_rows); From 0bb076a4d381fcc4e9827bebedbbe46ded9b9278 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 08:08:00 +0000 Subject: [PATCH 2243/2361] Improve schema inference of date times --- src/Core/Settings.h | 1 + src/Core/SettingsChangesHistory.cpp | 1 + src/Formats/EscapingRuleUtils.cpp | 5 +- src/Formats/FormatFactory.cpp | 1 + src/Formats/FormatSettings.h | 1 + src/Formats/SchemaInferenceUtils.cpp | 119 +++++--- src/IO/ReadHelpers.cpp | 29 +- src/IO/ReadHelpers.h | 77 +++-- src/IO/parseDateTimeBestEffort.cpp | 77 ++++- src/IO/parseDateTimeBestEffort.h | 8 + .../03222_date_time_inference.reference | 253 +++++++++++++++++ .../0_stateless/03222_date_time_inference.sql | 268 ++++++++++++++++++ 12 files changed, 761 insertions(+), 79 deletions(-) create mode 100644 tests/queries/0_stateless/03222_date_time_inference.reference create mode 100644 tests/queries/0_stateless/03222_date_time_inference.sql diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0808e8eb49f..ad6cc89c5cd 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -1136,6 +1136,7 @@ class IColumn; M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \ + M(Bool, input_format_try_infer_datetimes_only_datetime64, false, "When input_format_try_infer_datetimes is enabled, infer only DateTime64 but not DateTime types", 0) \ M(Bool, input_format_try_infer_exponent_floats, false, "Try to infer floats in exponential notation while schema inference in text formats (except JSON, where exponent numbers are always inferred)", 0) \ M(Bool, output_format_markdown_escape_special_characters, false, "Escape special characters in Markdown", 0) \ M(Bool, input_format_protobuf_flatten_google_wrappers, false, "Enable Google wrappers for regular non-nested columns, e.g. google.protobuf.StringValue 'str' for String column 'str'. For Nullable columns empty wrappers are recognized as defaults, and missing as nulls", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 20a8721c10e..b344a141a46 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -88,6 +88,7 @@ static std::initializer_list(have_dates) + static_cast(have_datetimes))); + bool have_datetimes = type_indexes.contains(TypeIndex::DateTime); + bool have_datetimes64 = type_indexes.contains(TypeIndex::DateTime64); + bool all_dates_or_datetimes = (type_indexes.size() == (static_cast(have_dates) + static_cast(have_datetimes) + static_cast(have_datetimes64))); - if (!all_dates_or_datetimes && (have_dates || have_datetimes)) + if (!all_dates_or_datetimes && (have_dates || have_datetimes || have_datetimes64)) { for (auto & type : data_types) { - if (isDate(type) || isDateTime64(type)) + if (isDate(type) || isDateTime(type) || isDateTime64(type)) type = std::make_shared(); } type_indexes.erase(TypeIndex::Date); type_indexes.erase(TypeIndex::DateTime); + type_indexes.erase(TypeIndex::DateTime64); type_indexes.insert(TypeIndex::String); return; } - if (have_dates && have_datetimes) + for (auto & type : data_types) { - for (auto & type : data_types) + if (isDate(type) && (have_datetimes || have_datetimes64)) { - if (isDate(type)) + if (have_datetimes64) type = std::make_shared(9); + else + type = std::make_shared(); + type_indexes.erase(TypeIndex::Date); + } + else if (isDateTime(type) && have_datetimes64) + { + type = std::make_shared(9); + type_indexes.erase(TypeIndex::DateTime); } - - type_indexes.erase(TypeIndex::Date); } } @@ -697,55 +705,87 @@ namespace bool tryInferDate(std::string_view field) { - if (field.empty()) + /// Minimum length of Date text representation is 8 (YYYY-M-D) and maximum is 10 (YYYY-MM-DD) + if (field.size() < 8 || field.size() > 10) return false; - ReadBufferFromString buf(field); - Float64 tmp_float; /// Check if it's just a number, and if so, don't try to infer Date from it, /// because we can interpret this number as a Date (for example 20000101 will be 2000-01-01) /// and it will lead to inferring Date instead of simple Int64/UInt64 in some cases. - if (tryReadFloatText(tmp_float, buf) && buf.eof()) - return false; - - buf.seek(0, SEEK_SET); /// Return position to the beginning - - DayNum tmp; - return tryReadDateText(tmp, buf) && buf.eof(); - } - - bool tryInferDateTime(std::string_view field, const FormatSettings & settings) - { - if (field.empty()) + if (std::all_of(field.begin(), field.end(), isNumericASCII)) return false; ReadBufferFromString buf(field); - Float64 tmp_float; + DayNum tmp; + return tryReadDateText(tmp, buf, DateLUT::instance(), /*allowed_delimiters=*/"-/:") && buf.eof(); + } + + DataTypePtr tryInferDateTimeOrDateTime64(std::string_view field, const FormatSettings & settings) + { + /// Don't try to infer DateTime if string is too long. + /// It's difficult to say what is the real maximum length of + /// DateTime we can parse using BestEffort approach. + /// 50 symbols is more or less valid limit for date times that makes sense. + if (field.empty() || field.size() > 50) + return nullptr; + + /// Check that we have at least one digit, don't infer datetime form strings like "Apr"/"May"/etc. + if (!std::any_of(field.begin(), field.end(), isNumericASCII)) + return nullptr; + /// Check if it's just a number, and if so, don't try to infer DateTime from it, /// because we can interpret this number as a timestamp and it will lead to - /// inferring DateTime instead of simple Int64/Float64 in some cases. + /// inferring DateTime instead of simple Int64 in some cases. + if (std::all_of(field.begin(), field.end(), isNumericASCII)) + return nullptr; + + ReadBufferFromString buf(field); + Float64 tmp_float; + /// Check if it's a float value, and if so, don't try to infer DateTime from it, + /// because it will lead to inferring DateTime instead of simple Float64 in some cases. if (tryReadFloatText(tmp_float, buf) && buf.eof()) - return false; + return nullptr; + + buf.seek(0, SEEK_SET); /// Return position to the beginning + if (!settings.try_infer_datetimes_only_datetime64) + { + time_t tmp; + switch (settings.date_time_input_format) + { + case FormatSettings::DateTimeInputFormat::Basic: + if (tryReadDateTimeText(tmp, buf, DateLUT::instance(), /*allowed_date_delimiters=*/"-/:", /*allowed_time_delimiters=*/":") && buf.eof()) + return std::make_shared(); + break; + case FormatSettings::DateTimeInputFormat::BestEffort: + if (tryParseDateTimeBestEffortStrict(tmp, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof()) + return std::make_shared(); + break; + case FormatSettings::DateTimeInputFormat::BestEffortUS: + if (tryParseDateTimeBestEffortUSStrict(tmp, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof()) + return std::make_shared(); + break; + } + } buf.seek(0, SEEK_SET); /// Return position to the beginning DateTime64 tmp; switch (settings.date_time_input_format) { case FormatSettings::DateTimeInputFormat::Basic: - if (tryReadDateTime64Text(tmp, 9, buf) && buf.eof()) - return true; + if (tryReadDateTime64Text(tmp, 9, buf, DateLUT::instance(), /*allowed_date_delimiters=*/"-/:", /*allowed_time_delimiters=*/":") && buf.eof()) + return std::make_shared(9); break; case FormatSettings::DateTimeInputFormat::BestEffort: - if (tryParseDateTime64BestEffort(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC")) && buf.eof()) - return true; + if (tryParseDateTime64BestEffortStrict(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof()) + return std::make_shared(9); break; case FormatSettings::DateTimeInputFormat::BestEffortUS: - if (tryParseDateTime64BestEffortUS(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC")) && buf.eof()) - return true; + if (tryParseDateTime64BestEffortUSStrict(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof()) + return std::make_shared(9); break; } - return false; + return nullptr; } template @@ -1439,8 +1479,11 @@ DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const Forma if (settings.try_infer_dates && tryInferDate(field)) return std::make_shared(); - if (settings.try_infer_datetimes && tryInferDateTime(field, settings)) - return std::make_shared(9); + if (settings.try_infer_datetimes) + { + if (auto type = tryInferDateTimeOrDateTime64(field, settings)) + return type; + } return nullptr; } diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index 9559462e62b..48d788512e4 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -1271,7 +1271,7 @@ template void readJSONArrayInto, void>(PaddedPODArray, bool>(PaddedPODArray & s, ReadBuffer & buf); template -ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) +ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters) { static constexpr bool throw_exception = std::is_same_v; @@ -1318,6 +1318,9 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) } else { + if (!isSymbolIn(*buf.position(), allowed_delimiters)) + return error(); + ++buf.position(); if (!append_digit(month)) @@ -1325,7 +1328,11 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) append_digit(month); if (!buf.eof() && !isNumericASCII(*buf.position())) + { + if (!isSymbolIn(*buf.position(), allowed_delimiters)) + return error(); ++buf.position(); + } else return error(); @@ -1338,12 +1345,12 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) return ReturnType(true); } -template void readDateTextFallback(LocalDate &, ReadBuffer &); -template bool readDateTextFallback(LocalDate &, ReadBuffer &); +template void readDateTextFallback(LocalDate &, ReadBuffer &, const char * allowed_delimiters); +template bool readDateTextFallback(LocalDate &, ReadBuffer &, const char * allowed_delimiters); template -ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut) +ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters, const char * allowed_time_delimiters) { static constexpr bool throw_exception = std::is_same_v; @@ -1413,6 +1420,9 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[2]) || !isNumericASCII(s[3]) || !isNumericASCII(s[5]) || !isNumericASCII(s[6]) || !isNumericASCII(s[8]) || !isNumericASCII(s[9])) return false; + + if (!isSymbolIn(s[4], allowed_date_delimiters) || !isSymbolIn(s[7], allowed_date_delimiters)) + return false; } UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); @@ -1443,6 +1453,9 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[3]) || !isNumericASCII(s[4]) || !isNumericASCII(s[6]) || !isNumericASCII(s[7])) return false; + + if (!isSymbolIn(s[2], allowed_time_delimiters) || !isSymbolIn(s[5], allowed_time_delimiters)) + return false; } hour = (s[0] - '0') * 10 + (s[1] - '0'); @@ -1488,10 +1501,10 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D return ReturnType(true); } -template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); -template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); -template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); -template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); +template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *); +template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *); +template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *); +template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *); template diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index ffba4fafb5c..39e1cb12b5c 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -703,13 +703,28 @@ struct NullOutput }; template -ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf); +ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters); + +inline bool isSymbolIn(char symbol, const char * symbols) +{ + if (symbols == nullptr) + return true; + + const char * pos = symbols; + while (*pos) + { + if (*pos == symbol) + return true; + ++pos; + } + return false; +} /// In YYYY-MM-DD format. /// For convenience, Month and Day parts can have single digit instead of two digits. /// Any separators other than '-' are supported. template -inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) +inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; @@ -753,6 +768,9 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) } else { + if (!isSymbolIn(pos[-1], allowed_delimiters)) + return error(); + if (!isNumericASCII(pos[0])) return error(); @@ -768,6 +786,9 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) if (isNumericASCII(pos[-1]) || !isNumericASCII(pos[0])) return error(); + if (!isSymbolIn(pos[-1], allowed_delimiters)) + return error(); + day = pos[0] - '0'; if (isNumericASCII(pos[1])) { @@ -783,7 +804,7 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) return ReturnType(true); } else - return readDateTextFallback(date, buf); + return readDateTextFallback(date, buf, allowed_delimiters); } inline void convertToDayNum(DayNum & date, ExtendedDayNum & from) @@ -797,15 +818,15 @@ inline void convertToDayNum(DayNum & date, ExtendedDayNum & from) } template -inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut) +inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; LocalDate local_date; if constexpr (throw_exception) - readDateTextImpl(local_date, buf); - else if (!readDateTextImpl(local_date, buf)) + readDateTextImpl(local_date, buf, allowed_delimiters); + else if (!readDateTextImpl(local_date, buf, allowed_delimiters)) return false; ExtendedDayNum ret = date_lut.makeDayNum(local_date.year(), local_date.month(), local_date.day()); @@ -814,15 +835,15 @@ inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLU } template -inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut) +inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; LocalDate local_date; if constexpr (throw_exception) - readDateTextImpl(local_date, buf); - else if (!readDateTextImpl(local_date, buf)) + readDateTextImpl(local_date, buf, allowed_delimiters); + else if (!readDateTextImpl(local_date, buf, allowed_delimiters)) return false; /// When the parameter is out of rule or out of range, Date32 uses 1925-01-01 as the default value (-DateLUT::instance().getDayNumOffsetEpoch(), -16436) and Date uses 1970-01-01. @@ -846,19 +867,19 @@ inline void readDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTI readDateTextImpl(date, buf, date_lut); } -inline bool tryReadDateText(LocalDate & date, ReadBuffer & buf) +inline bool tryReadDateText(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters = nullptr) { - return readDateTextImpl(date, buf); + return readDateTextImpl(date, buf, allowed_delimiters); } -inline bool tryReadDateText(DayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline bool tryReadDateText(DayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_delimiters = nullptr) { - return readDateTextImpl(date, buf, time_zone); + return readDateTextImpl(date, buf, time_zone, allowed_delimiters); } -inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_delimiters = nullptr) { - return readDateTextImpl(date, buf, time_zone); + return readDateTextImpl(date, buf, time_zone, allowed_delimiters); } UUID parseUUID(std::span src); @@ -975,13 +996,13 @@ inline T parseFromString(std::string_view str) template -ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut); +ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr); /** In YYYY-MM-DD hh:mm:ss or YYYY-MM-DD format, according to specified time zone. * As an exception, also supported parsing of unix timestamp in form of decimal number. */ template -inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut) +inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; @@ -1014,6 +1035,9 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[2]) || !isNumericASCII(s[3]) || !isNumericASCII(s[5]) || !isNumericASCII(s[6]) || !isNumericASCII(s[8]) || !isNumericASCII(s[9])) return ReturnType(false); + + if (!isSymbolIn(s[4], allowed_date_delimiters) || !isSymbolIn(s[7], allowed_date_delimiters)) + return ReturnType(false); } UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); @@ -1033,6 +1057,9 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons if (!isNumericASCII(s[11]) || !isNumericASCII(s[12]) || !isNumericASCII(s[14]) || !isNumericASCII(s[15]) || !isNumericASCII(s[17]) || !isNumericASCII(s[18])) return ReturnType(false); + + if (!isSymbolIn(s[13], allowed_time_delimiters) || !isSymbolIn(s[16], allowed_time_delimiters)) + return ReturnType(false); } hour = (s[11] - '0') * 10 + (s[12] - '0'); @@ -1057,11 +1084,11 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons return readIntTextImpl(datetime, buf); } else - return readDateTimeTextFallback(datetime, buf, date_lut); + return readDateTimeTextFallback(datetime, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters); } template -inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut) +inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; @@ -1075,7 +1102,7 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re { try { - readDateTimeTextImpl(whole, buf, date_lut); + readDateTimeTextImpl(whole, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters); } catch (const DB::Exception &) { @@ -1085,7 +1112,7 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re } else { - auto ok = readDateTimeTextImpl(whole, buf, date_lut); + auto ok = readDateTimeTextImpl(whole, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters); if (!ok && (buf.eof() || *buf.position() != '.')) return ReturnType(false); } @@ -1168,14 +1195,14 @@ inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer readDateTimeTextImpl(datetime64, scale, buf, date_lut); } -inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr) { - return readDateTimeTextImpl(datetime, buf, time_zone); + return readDateTimeTextImpl(datetime, buf, time_zone, allowed_date_delimiters, allowed_time_delimiters); } -inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance(), const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr) { - return readDateTimeTextImpl(datetime64, scale, buf, date_lut); + return readDateTimeTextImpl(datetime64, scale, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters); } inline void readDateTimeText(LocalDateTime & datetime, ReadBuffer & buf) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index e046e837689..68122a37df6 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -82,13 +82,14 @@ struct DateTimeSubsecondPart UInt8 digits; }; -template +template ReturnType parseDateTimeBestEffortImpl( time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, - DateTimeSubsecondPart * fractional) + DateTimeSubsecondPart * fractional, + const char * allowed_date_delimiters = nullptr) { auto on_error = [&](int error_code [[maybe_unused]], FormatStringHelper fmt_string [[maybe_unused]], @@ -170,22 +171,36 @@ ReturnType parseDateTimeBestEffortImpl( fractional->digits = 3; readDecimalNumber<3>(fractional->value, digits + 10); } + else if constexpr (strict) + { + /// Fractional part is not allowed. + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected fractional part"); + } return ReturnType(true); } else if (num_digits == 10 && !year && !has_time) { + if (strict && month) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated"); + /// This is unix timestamp. readDecimalNumber<10>(res, digits); return ReturnType(true); } else if (num_digits == 9 && !year && !has_time) { + if (strict && month) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated"); + /// This is unix timestamp. readDecimalNumber<9>(res, digits); return ReturnType(true); } else if (num_digits == 14 && !year && !has_time) { + if (strict && month) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated"); + /// This is YYYYMMDDhhmmss readDecimalNumber<4>(year, digits); readDecimalNumber<2>(month, digits + 4); @@ -197,6 +212,9 @@ ReturnType parseDateTimeBestEffortImpl( } else if (num_digits == 8 && !year) { + if (strict && month) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated"); + /// This is YYYYMMDD readDecimalNumber<4>(year, digits); readDecimalNumber<2>(month, digits + 4); @@ -272,6 +290,9 @@ ReturnType parseDateTimeBestEffortImpl( else return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected number of decimal digits after year and month: {}", num_digits); } + + if (!isSymbolIn(delimiter_after_year, allowed_date_delimiters)) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: '{}' delimiter between date parts is not allowed", delimiter_after_year); } } else if (num_digits == 2 || num_digits == 1) @@ -403,9 +424,16 @@ ReturnType parseDateTimeBestEffortImpl( else { if (day_of_month) + { + if (strict && hour) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: hour component is duplicated"); + hour = hour_or_day_of_month_or_month; + } else + { day_of_month = hour_or_day_of_month_or_month; + } } } else if (num_digits != 0) @@ -446,6 +474,11 @@ ReturnType parseDateTimeBestEffortImpl( fractional->digits = num_digits; readDecimalNumber(fractional->value, num_digits, digits); } + else if (strict) + { + /// Fractional part is not allowed. + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected fractional part"); + } } else if (c == '+' || c == '-') { @@ -582,12 +615,24 @@ ReturnType parseDateTimeBestEffortImpl( return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: neither Date nor Time was parsed successfully"); if (!day_of_month) + { + if constexpr (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: day of month is required"); day_of_month = 1; + } + if (!month) + { + if constexpr (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month is required"); month = 1; + } if (!year) { + if constexpr (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: year is required"); + /// If year is not specified, it will be the current year if the date is unknown or not greater than today, /// otherwise it will be the previous year. /// This convoluted logic is needed to parse the syslog format, which looks as follows: "Mar 3 01:33:48". @@ -654,20 +699,20 @@ ReturnType parseDateTimeBestEffortImpl( return ReturnType(true); } -template -ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) +template +ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters = nullptr) { time_t whole; DateTimeSubsecondPart subsecond = {0, 0}; // needs to be explicitly initialized sine it could be missing from input string if constexpr (std::is_same_v) { - if (!parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond)) + if (!parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters)) return false; } else { - parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond); + parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters); } @@ -730,4 +775,24 @@ bool tryParseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone); } +bool tryParseDateTimeBestEffortStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters) +{ + return parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone, nullptr, allowed_date_delimiters); +} + +bool tryParseDateTimeBestEffortUSStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters) +{ + return parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone, nullptr, allowed_date_delimiters); +} + +bool tryParseDateTime64BestEffortStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters) +{ + return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone, allowed_date_delimiters); +} + +bool tryParseDateTime64BestEffortUSStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters) +{ + return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone, allowed_date_delimiters); +} + } diff --git a/src/IO/parseDateTimeBestEffort.h b/src/IO/parseDateTimeBestEffort.h index 22af44f9e76..6dd052b67a3 100644 --- a/src/IO/parseDateTimeBestEffort.h +++ b/src/IO/parseDateTimeBestEffort.h @@ -63,4 +63,12 @@ void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, bool tryParseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); void parseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); bool tryParseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); + +/// More strict version of best effort parsing. Requires day, month and year to be present, checks for allowed +/// delimiters between date components, makes additional correctness checks. Used in schema inference if date times. +bool tryParseDateTimeBestEffortStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters); +bool tryParseDateTimeBestEffortUSStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters); +bool tryParseDateTime64BestEffortStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters); +bool tryParseDateTime64BestEffortUSStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters); + } diff --git a/tests/queries/0_stateless/03222_date_time_inference.reference b/tests/queries/0_stateless/03222_date_time_inference.reference new file mode 100644 index 00000000000..3288308a1d0 --- /dev/null +++ b/tests/queries/0_stateless/03222_date_time_inference.reference @@ -0,0 +1,253 @@ +Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +String +2020_01_01 String +2020_1_01 String +2020_01_1 String +2020_1_1 String +2020a01a01 String +2020a1a01 String +2020a01a1 String +2020a1a1 String +20200101 String +DateTime +2020-01-02 18:42:42 DateTime +2020-01-02 18:42:42 DateTime +2020-01-02 18:42:42 DateTime +String +2020_01_01 42:42:42 String +2020a01a01 42:42:42 String +2020-01-01 42.42.42 String +2020-01-01 42 42 42 String +2020-01-01 42a42a42 String +DateTime64 +2020-01-02 18:42:42.424200000 DateTime64(9) +2020-01-02 18:42:42.424200000 DateTime64(9) +2020-01-02 18:42:42.424200000 DateTime64(9) +String +2020_01_01 42:42:42.4242 String +2020a01a01 42:42:42.4242 String +2020-01-01 42.42.42.4242 String +2020-01-01 42 42 42.4242 String +2020-01-01 42a42a42.4242 String +DateTime/DateTime64 best effort +2000-01-01 00:00:00 DateTime +2000-01-01 01:00:00 DateTime +2000-01-01 01:00:00.000000000 DateTime64(9) +2017-01-01 22:02:03 DateTime +2017-01-01 22:02:03.000000000 DateTime64(9) +2017-01-01 21:02:03 DateTime +2017-01-01 21:02:03.000000000 DateTime64(9) +2017-01-01 22:02:03 DateTime +2017-01-01 22:02:03.000000000 DateTime64(9) +2017-01-02 01:02:03 DateTime +2017-01-02 01:02:03.000000000 DateTime64(9) +1970-01-02 01:02:03 DateTime +1970-01-02 01:02:03.000000000 DateTime64(9) +1970-01-02 01:02:03 DateTime +1970-01-02 01:02:03.000000000 DateTime64(9) +2018-02-11 03:40:50 DateTime +2018-02-11 03:40:50.000000000 DateTime64(9) +2000-04-17 01:02:03 DateTime +2000-04-17 01:02:03.000000000 DateTime64(9) +1970-01-02 01:00:00 DateTime +1970-01-02 01:00:00.000000000 DateTime64(9) +1970-01-02 01:02:03 DateTime +1970-01-02 01:02:03.000000000 DateTime64(9) +1970-01-02 01:02:03 DateTime +1970-01-02 01:02:03.000000000 DateTime64(9) +2015-12-31 20:00:00 DateTime +2015-12-31 20:00:00 DateTime +2016-01-01 00:00:00 DateTime +2016-01-01 00:00:00 DateTime +2017-01-01 22:02:03 DateTime +2017-01-01 22:02:03.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 04:04:05 DateTime +2017-01-02 04:04:05.000000000 DateTime64(9) +2017-01-02 02:34:05 DateTime +2017-01-02 02:34:05.000000000 DateTime64(9) +2017-01-02 00:04:05 DateTime +2017-01-02 00:04:05.000000000 DateTime64(9) +2017-01-02 02:04:05 DateTime +2017-01-02 02:04:05.000000000 DateTime64(9) +2017-01-02 00:04:05 DateTime +2017-01-02 00:04:05.000000000 DateTime64(9) +2017-01-01 18:04:05 DateTime +2017-01-01 18:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-01 23:04:05 DateTime +2017-01-01 23:04:05.000000000 DateTime64(9) +2017-02-01 23:04:05 DateTime +2017-02-01 23:04:05.000000000 DateTime64(9) +2017-06-01 23:04:05 DateTime +2017-06-01 23:04:05.000000000 DateTime64(9) +2017-01-02 00:04:05 DateTime +2017-01-02 00:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 04:04:05 DateTime +2017-01-02 04:04:05.000000000 DateTime64(9) +2017-01-02 04:04:05 DateTime +2017-01-02 04:04:05.000000000 DateTime64(9) +2017-01-02 02:04:05 DateTime +2017-01-02 02:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-04-01 11:22:33 DateTime +2017-04-01 11:22:33.000000000 DateTime64(9) +2017-04-01 22:02:03 DateTime +2017-04-01 22:02:03.000000000 DateTime64(9) +2017-04-01 22:02:03 DateTime +2017-04-01 22:02:03.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-04-02 11:22:33 DateTime +2017-04-02 11:22:33.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-04-02 01:22:33 DateTime +2017-04-02 01:22:33.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-04-02 01:02:33 DateTime +2017-04-02 01:02:33.000000000 DateTime64(9) +2017-04-01 22:02:03 DateTime +2017-04-01 22:02:03.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-04-01 22:02:03 DateTime +2017-04-01 22:02:03.000000000 DateTime64(9) +2017-04-01 21:02:03 DateTime +2017-04-01 21:02:03.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-01-01 22:02:03 DateTime +2017-01-01 22:02:03.000000000 DateTime64(9) +2017-04-25 01:02:03 DateTime +2017-04-25 01:02:03.000000000 DateTime64(9) +2017-04-25 01:02:03 DateTime +2017-04-25 01:02:03.000000000 DateTime64(9) +2017-01-25 01:02:03 DateTime +2017-01-25 01:02:03.000000000 DateTime64(9) +2017-01-24 22:02:03 DateTime +2017-01-24 22:02:03.000000000 DateTime64(9) +2017-01-25 13:02:03 DateTime +2017-01-25 13:02:03.000000000 DateTime64(9) +2017-01-25 01:02:03 DateTime +2017-01-25 01:02:03.000000000 DateTime64(9) +2017-01-25 01:02:03 DateTime +2017-01-25 01:02:03.000000000 DateTime64(9) +2017-01-24 22:02:03 DateTime +2017-01-24 22:02:03.000000000 DateTime64(9) +2017-01-24 22:02:03 DateTime +2017-01-24 22:02:03.000000000 DateTime64(9) +2017-01-25 10:02:03 DateTime +2017-01-25 10:02:03.000000000 DateTime64(9) +2017-01-25 10:02:03 DateTime +2017-01-25 10:02:03.000000000 DateTime64(9) +2017-01-25 10:02:03 DateTime +2017-01-25 10:02:03.000000000 DateTime64(9) +2017-01-25 09:32:03 DateTime +2017-01-25 09:32:03.000000000 DateTime64(9) +2017-01-25 01:02:03 DateTime +2017-01-25 01:02:03.000000000 DateTime64(9) +2017-01-25 13:02:03 DateTime +2017-01-25 13:02:03.000000000 DateTime64(9) +2017-01-25 13:02:03 DateTime +2017-01-25 13:02:03.000000000 DateTime64(9) +2017-01-25 10:02:03 DateTime +2017-01-25 10:02:03.000000000 DateTime64(9) +2018-02-11 03:40:50 DateTime +2018-02-11 03:40:50.000000000 DateTime64(9) +2018-02-11 03:40:50 DateTime +2018-02-11 03:40:50.000000000 DateTime64(9) +String +2 String +20 String +200 String +2000 String +20000 String +200001 String +2000010 String +20000101 String +200001010 String +2000010101 String +20000101010 String +200001010101 String +2000010101010 String +20000101010101 String +2.1 String +20.1 String +200.1 String +2000.1 String +20000.1 String +200001.1 String +2000010.1 String +20000101.1 String +200001010.1 String +2000010101.1 String +20000101010.1 String +200001010101.1 String +2000010101010.1 String +20000101010101.1 String +Mar String +Mar1 String +Mar 1 String +Mar01 String +Mar 01 String +Mar2020 String +Mar 2020 String +Mar012020 String +Mar 012020 String +Mar01012020 String +Mar 01012020 String +Mar0101202001 String +Mar 0101202001 String +Mar010120200101 String +Mar 010120200101 String +Mar01012020010101 String +Mar 01012020010101 String +Mar01012020010101.000 String +Mar 0101202001010101.000 String +2000 01 01 01:00:00 String +2000 01 01 01:00:00.000 String +2000a01a01 01:00:00 String +2000a01a01 01:00:00.000 String +2000-01-01 01 00 00 String +2000-01-01 01 00 00.000 String +2000-01-01 01-00-00 String +2000-01-01 01-00-00.000 String +2000-01-01 01a00a00 String +2000-01-01 01a00a00.000 String +2000-01 01:00:00 String +2000-01 01:00:00.000 String +2000 01 String +2000-01 String +Mar 2000 00:00:00 String +Mar 2000 00:00:00.000 String +2000 00:00:00 String +2000 00:00:00.000 String +Mar 2000-01-01 00:00:00 String +Mar 2000-01-01 00:00:00.000 String diff --git a/tests/queries/0_stateless/03222_date_time_inference.sql b/tests/queries/0_stateless/03222_date_time_inference.sql new file mode 100644 index 00000000000..01266a88d55 --- /dev/null +++ b/tests/queries/0_stateless/03222_date_time_inference.sql @@ -0,0 +1,268 @@ +set input_format_try_infer_datetimes = 1; +set input_format_try_infer_dates = 1; +set schema_inference_make_columns_nullable = 0; +set input_format_json_try_infer_numbers_from_strings = 0; + +select 'Date'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:1:01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:1:1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-1-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-1-1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/1/01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/1/1"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_1_01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_1_1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a1a01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a1a1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20200101"}'); + +select 'DateTime'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42:42:42"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42.42.42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42 42 42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42a42a42"}'); + +select 'DateTime64'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42:42:42.4242"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42.42.42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42 42 42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42a42a42.4242"}'); + +set date_time_input_format='best_effort'; +select 'DateTime/DateTime64 best effort'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 MSK+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 MSK+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/1970 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/1970 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/70 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/70 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "11 Feb 2018 06:40:50.000 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "17 Apr 2000 2 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "17 Apr 2000 2 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102010203Z.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1970/01/02 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1970/01/02 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01UTC"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "201701 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "201701 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+030"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+030"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+900"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+900"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05GMT"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000GMT"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD Feb"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD Feb"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD Jun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD Jun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05-0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000-0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 01 11:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 01 11:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 01:2:3 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 01:2:3.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:02:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:02:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 11:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 11:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:03"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:03.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0400"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0400"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 2 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 2 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Jan 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Jan 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 01:02:03"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 01:02:03.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z+03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z+03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +0300 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +0300 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z+03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z+03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +03:30 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +03:30 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z Mon"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z Mon"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z PM +03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z PM +03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 11 Feb 2018 06:40:50.000 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun, 11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun, 11 Feb 2018 06:40:50.000 +0300"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 0101202001010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01 01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01 01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000a01a01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000a01a01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01 00 00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01 00 00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01-00-00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01-00-00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01a00a00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01a00a00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000 00:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 00:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000-01-01 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000-01-01 00:00:00.000"}'); + + From 8950491fa5af2f1abacbba86181f16fb512b8004 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Aug 2024 11:18:15 +0200 Subject: [PATCH 2244/2361] Fix unit test build --- src/Interpreters/tests/gtest_filecache.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index 36acc319f4e..fd602ab5918 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -246,7 +246,8 @@ void download(FileSegment & file_segment) ASSERT_EQ(file_segment.state(), State::DOWNLOADING); ASSERT_EQ(file_segment.getDownloadedSize(), 0); - ASSERT_TRUE(file_segment.reserve(file_segment.range().size(), 1000)); + std::string failure_reason; + ASSERT_TRUE(file_segment.reserve(file_segment.range().size(), 1000, failure_reason)); download(cache_base_path, file_segment); ASSERT_EQ(file_segment.state(), State::DOWNLOADING); @@ -258,7 +259,8 @@ void assertDownloadFails(FileSegment & file_segment) { ASSERT_EQ(file_segment.getOrSetDownloader(), FileSegment::getCallerId()); ASSERT_EQ(file_segment.getDownloadedSize(), 0); - ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000)); + std::string failure_reason; + ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000, failure_reason)); file_segment.complete(); } @@ -957,10 +959,11 @@ TEST_F(FileCacheTest, temporaryData) { ASSERT_EQ(some_data_holder->size(), 5); + std::string failure_reason; for (auto & segment : *some_data_holder) { ASSERT_TRUE(segment->getOrSetDownloader() == DB::FileSegment::getCallerId()); - ASSERT_TRUE(segment->reserve(segment->range().size(), 1000)); + ASSERT_TRUE(segment->reserve(segment->range().size(), 1000, failure_reason)); download(*segment); segment->complete(); } From 3af8ba2deb99a0450c448d12a9c1da6858aec987 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 15 Aug 2024 11:21:20 +0200 Subject: [PATCH 2245/2361] Revert "[RFC] Fix settings/current_database in system.processes for async BACKUP/RESTORE" --- src/Backups/BackupsWorker.cpp | 4 ---- src/Interpreters/ProcessList.h | 3 --- 2 files changed, 7 deletions(-) diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 8b45c816817..0b93ae6d547 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -490,8 +490,6 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context /// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously. auto process_list_element = context_in_use->getProcessListElement(); - /// Update context to preserve query information in processlist (settings, current_database) - process_list_element->updateContext(context_in_use); thread_pool.scheduleOrThrowOnError( [this, @@ -855,8 +853,6 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt /// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously. auto process_list_element = context_in_use->getProcessListElement(); - /// Update context to preserve query information in processlist (settings, current_database) - process_list_element->updateContext(context_in_use); thread_pool.scheduleOrThrowOnError( [this, diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index 248ba947bc1..accb73e12df 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -244,9 +244,6 @@ public: /// Same as checkTimeLimit but it never throws [[nodiscard]] bool checkTimeLimitSoft(); - /// Use it in case of the query left in background to execute asynchronously - void updateContext(ContextWeakPtr weak_context) { context = std::move(weak_context); } - /// Get the reference for the start of the query. Used to synchronize with other Stopwatches UInt64 getQueryCPUStartTime() { return watch.getStart(); } }; From d18b6c63d408c4a70f3e541ce1956a4229bfe452 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1nos=20Benjamin=20Antal?= Date: Thu, 15 Aug 2024 09:41:03 +0000 Subject: [PATCH 2246/2361] Change name of default azurite container to avoid clashing with azure blob storage tests --- tests/integration/helpers/cluster.py | 2 +- tests/integration/test_storage_s3_queue/test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 0b6cf03d467..53f4f1e1f26 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -570,7 +570,7 @@ class ClickHouseCluster: self.spark_session = None self.with_azurite = False - self.azurite_container = "cont" + self.azurite_container = "azurite-container" self.blob_service_client = None self._azurite_port = 0 diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 34fb1eaf1fe..9e3ee19179a 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -232,7 +232,7 @@ def create_table( url = f"http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{files_path}/" engine_def = f"{engine_name}('{url}', {auth_params}, {file_format})" else: - engine_def = f"{engine_name}('{started_cluster.env_variables['AZURITE_CONNECTION_STRING']}', 'cont', '{files_path}/', 'CSV')" + engine_def = f"{engine_name}('{started_cluster.env_variables['AZURITE_CONNECTION_STRING']}', '{started_cluster.azurite_container}', '{files_path}/', 'CSV')" node.query(f"DROP TABLE IF EXISTS {table_name}") create_query = f""" From 9f6e472b0cf03f8018b149ad5f7541b4ddec5735 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Thu, 15 Aug 2024 11:47:41 +0200 Subject: [PATCH 2247/2361] process regexp flags correctly --- src/Common/OptimizedRegularExpression.cpp | 40 ++++++++++++++--------- src/Common/tests/gtest_optimize_re.cpp | 2 ++ 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp index 712cab80aff..04e5f846adf 100644 --- a/src/Common/OptimizedRegularExpression.cpp +++ b/src/Common/OptimizedRegularExpression.cpp @@ -244,33 +244,41 @@ const char * analyzeImpl( is_trivial = false; if (!in_square_braces) { - /// Check for case-insensitive flag. - if (pos + 1 < end && pos[1] == '?') + /// it means flag negation + /// there are various possible flags + /// actually only imsU are supported by re2 + auto is_flag_char = [](char x) { - for (size_t offset = 2; pos + offset < end; ++offset) + return x == '-' || x == 'i' || x == 'm' || x == 's' || x == 'U' || x == 'u'; + }; + /// Check for case-insensitive flag. + if (pos + 2 < end && pos[1] == '?' && is_flag_char(pos[2])) + { + size_t offset = 2; + for (; pos + offset < end; ++offset) { - if (pos[offset] == '-' /// it means flag negation - /// various possible flags, actually only imsU are supported by re2 - || (pos[offset] >= 'a' && pos[offset] <= 'z') - || (pos[offset] >= 'A' && pos[offset] <= 'Z')) + if (pos[offset] == 'i') { - if (pos[offset] == 'i') - { - /// Actually it can be negated case-insensitive flag. But we don't care. - has_case_insensitive_flag = true; - break; - } + /// Actually it can be negated case-insensitive flag. But we don't care. + has_case_insensitive_flag = true; } - else + else if (!is_flag_char(pos[offset])) break; } + pos += offset; + /// if this group only contains flags, we have nothing to do. + if (*pos == ')') + { + ++pos; + break; + } } /// (?:regex) means non-capturing parentheses group - if (pos + 2 < end && pos[1] == '?' && pos[2] == ':') + else if (pos + 2 < end && pos[1] == '?' && pos[2] == ':') { pos += 2; } - if (pos + 3 < end && pos[1] == '?' && (pos[2] == '<' || pos[2] == '\'' || (pos[2] == 'P' && pos[3] == '<'))) + else if (pos + 3 < end && pos[1] == '?' && (pos[2] == '<' || pos[2] == '\'' || (pos[2] == 'P' && pos[3] == '<'))) { pos = skipNameCapturingGroup(pos, pos[2] == 'P' ? 3: 2, end); } diff --git a/src/Common/tests/gtest_optimize_re.cpp b/src/Common/tests/gtest_optimize_re.cpp index a9fcb918b24..0730a13f160 100644 --- a/src/Common/tests/gtest_optimize_re.cpp +++ b/src/Common/tests/gtest_optimize_re.cpp @@ -19,6 +19,8 @@ TEST(OptimizeRE, analyze) }; test_f("abc", "abc", {}, true, true); test_f("c([^k]*)de", ""); + test_f("(?-s)bob", "bob", {}, false, true); + test_f("(?s)bob", "bob", {}, false, true); test_f("abc(de)fg", "abcdefg", {}, false, true); test_f("abc(de|xyz)fg", "abc", {"abcdefg", "abcxyzfg"}, false, true); test_f("abc(de?f|xyz)fg", "abc", {"abcd", "abcxyzfg"}, false, true); From bea8e65f4fcfa99cdae21ff4776d509ba4fcd0d7 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 09:48:28 +0000 Subject: [PATCH 2248/2361] Fix tests --- src/IO/parseDateTimeBestEffort.cpp | 20 ++++++++++++++++--- ...ed_dates_in_csv_schema_inference.reference | 2 +- ...03033_dynamic_text_serialization.reference | 10 +++++----- .../03199_json_extract_dynamic.reference | 2 +- ...ad_for_schema_inference_in_cache.reference | 2 +- .../0_stateless/03222_date_time_inference.sql | 1 + 6 files changed, 26 insertions(+), 11 deletions(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index 68122a37df6..f220577f2cb 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -82,7 +82,7 @@ struct DateTimeSubsecondPart UInt8 digits; }; -template +template ReturnType parseDateTimeBestEffortImpl( time_t & res, ReadBuffer & in, @@ -686,6 +686,20 @@ ReturnType parseDateTimeBestEffortImpl( } }; + if constexpr (strict) + { + if constexpr (is_64) + { + if (year < 1900) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime64: year {} is less than minimum supported year 1900", year); + } + else + { + if (year < 1970) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: year {} is less than minimum supported year 1970", year); + } + } + if (has_time_zone_offset) { res = utc_time_zone.makeDateTime(year, month, day_of_month, hour, minute, second); @@ -707,12 +721,12 @@ ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuf if constexpr (std::is_same_v) { - if (!parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters)) + if (!parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters)) return false; } else { - parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters); + parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters); } diff --git a/tests/queries/0_stateless/02228_unquoted_dates_in_csv_schema_inference.reference b/tests/queries/0_stateless/02228_unquoted_dates_in_csv_schema_inference.reference index be82d744a3b..56293ca0e5d 100644 --- a/tests/queries/0_stateless/02228_unquoted_dates_in_csv_schema_inference.reference +++ b/tests/queries/0_stateless/02228_unquoted_dates_in_csv_schema_inference.reference @@ -1 +1 @@ -c1 Nullable(DateTime64(9)) +c1 Nullable(DateTime) diff --git a/tests/queries/0_stateless/03033_dynamic_text_serialization.reference b/tests/queries/0_stateless/03033_dynamic_text_serialization.reference index d965245266c..aa7b3fc83a2 100644 --- a/tests/queries/0_stateless/03033_dynamic_text_serialization.reference +++ b/tests/queries/0_stateless/03033_dynamic_text_serialization.reference @@ -4,7 +4,7 @@ JSON {"d":"str","dynamicType(d)":"String"} {"d":["1","2","3"],"dynamicType(d)":"Array(Int64)"} {"d":"2020-01-01","dynamicType(d)":"Date"} -{"d":"2020-01-01 10:00:00.000000000","dynamicType(d)":"DateTime64(9)"} +{"d":"2020-01-01 10:00:00","dynamicType(d)":"DateTime"} {"d":{"a":"42","b":"str"},"dynamicType(d)":"Tuple(a Int64, b String)"} {"d":{"a":"43"},"dynamicType(d)":"Tuple(a Int64)"} {"d":{"a":"44","c":["1","2","3"]},"dynamicType(d)":"Tuple(a Int64, c Array(Int64))"} @@ -22,7 +22,7 @@ CSV "str","String" "[1,2,3]","Array(Int64)" "2020-01-01","Date" -"2020-01-01 10:00:00.000000000","DateTime64(9)" +"2020-01-01 10:00:00","DateTime" "[1, 'str', [1, 2, 3]]","String" \N,"None" true,"Bool" @@ -32,18 +32,18 @@ TSV str String [1,2,3] Array(Int64) 2020-01-01 Date -2020-01-01 10:00:00.000000000 DateTime64(9) +2020-01-01 10:00:00 DateTime [1, \'str\', [1, 2, 3]] String \N None true Bool Values -(42,'Int64'),(42.42,'Float64'),('str','String'),([1,2,3],'Array(Int64)'),('2020-01-01','Date'),('2020-01-01 10:00:00.000000000','DateTime64(9)'),(NULL,'None'),(true,'Bool') +(42,'Int64'),(42.42,'Float64'),('str','String'),([1,2,3],'Array(Int64)'),('2020-01-01','Date'),('2020-01-01 10:00:00','DateTime'),(NULL,'None'),(true,'Bool') Cast using parsing 42 Int64 42.42 Float64 [1,2,3] Array(Int64) 2020-01-01 Date -2020-01-01 10:00:00.000000000 DateTime64(9) +2020-01-01 10:00:00 DateTime \N None true Bool 42 Int64 diff --git a/tests/queries/0_stateless/03199_json_extract_dynamic.reference b/tests/queries/0_stateless/03199_json_extract_dynamic.reference index 759b7763cd1..955106946ea 100644 --- a/tests/queries/0_stateless/03199_json_extract_dynamic.reference +++ b/tests/queries/0_stateless/03199_json_extract_dynamic.reference @@ -12,7 +12,7 @@ Hello String [1,2,3] Array(Nullable(Int64)) ['str1','str2','str3'] Array(Nullable(String)) [[[1],[2,3,4]],[[5,6],[7]]] Array(Array(Array(Nullable(Int64)))) -['2020-01-01 00:00:00.000000000','2020-01-01 00:00:00.000000000'] Array(Nullable(DateTime64(9))) +['2020-01-01 00:00:00','2020-01-01 00:00:00'] Array(Nullable(DateTime)) ['2020-01-01','2020-01-01 date'] Array(Nullable(String)) ['2020-01-01','2020-01-01 00:00:00','str'] Array(Nullable(String)) ['2020-01-01','2020-01-01 00:00:00','42'] Array(Nullable(String)) diff --git a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference index cd109daac52..13b1138d1c4 100644 --- a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference +++ b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference @@ -1,2 +1,2 @@ x Nullable(Int64) -schema_inference_hints=, max_rows_to_read_for_schema_inference=25000, max_bytes_to_read_for_schema_inference=1000, schema_inference_make_columns_nullable=true, try_infer_integers=true, try_infer_dates=true, try_infer_datetimes=true, try_infer_numbers_from_strings=false, read_bools_as_numbers=true, read_bools_as_strings=true, read_objects_as_strings=true, read_numbers_as_strings=true, read_arrays_as_strings=true, try_infer_objects_as_tuples=true, infer_incomplete_types_as_strings=true, try_infer_objects=false, use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects=false +schema_inference_hints=, max_rows_to_read_for_schema_inference=25000, max_bytes_to_read_for_schema_inference=1000, schema_inference_make_columns_nullable=true, try_infer_integers=true, try_infer_dates=true, try_infer_datetimes=true, try_infer_datetimes_only_datetime64=false, try_infer_numbers_from_strings=false, read_bools_as_numbers=true, read_bools_as_strings=true, read_objects_as_strings=true, read_numbers_as_strings=true, read_arrays_as_strings=true, try_infer_objects_as_tuples=true, infer_incomplete_types_as_strings=true, try_infer_objects=false, use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects=false diff --git a/tests/queries/0_stateless/03222_date_time_inference.sql b/tests/queries/0_stateless/03222_date_time_inference.sql index 01266a88d55..ebd472294be 100644 --- a/tests/queries/0_stateless/03222_date_time_inference.sql +++ b/tests/queries/0_stateless/03222_date_time_inference.sql @@ -2,6 +2,7 @@ set input_format_try_infer_datetimes = 1; set input_format_try_infer_dates = 1; set schema_inference_make_columns_nullable = 0; set input_format_json_try_infer_numbers_from_strings = 0; +set session_timezone = 'UTC'; select 'Date'; select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01"}'); From ebe9f6de99d9473495d760cec6b1c0aab8fd5db9 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 14 Aug 2024 15:03:55 +0000 Subject: [PATCH 2249/2361] Enable fp16, don't enable simsimd --- contrib/usearch-cmake/CMakeLists.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 83221e3810f..df131e0c528 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -9,4 +9,14 @@ target_include_directories(_usearch SYSTEM INTERFACE ${SIMSIMD_PROJECT_DIR}/include ${USEARCH_PROJECT_DIR}/include) +target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB) + +# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD) +# ^^ simsimd is not enabled at the moment. Reasons: +# - Vectorization is important for raw scans but not so much for HNSW. We use usearch only for HNSW. +# - Simsimd does compile-time dispatch (choice of SIMD kernels determined by capabilities of the build machine) or dynamic dispatch (SIMD +# kernels chosen at runtime based on cpuid instruction). Since current builds are limited to SSE 4.2 (x86) and NEON (ARM), the speedup of +# the former would be moderate compared to AVX-512 / SVE. The latter is at the moment too fragile with respect to portability across x86 +# and ARM machines ... certain conbinations of quantizations / distance functions / SIMD instructions are not implemented at the moment. + add_library(ch_contrib::usearch ALIAS _usearch) From 6739320bdc72901f756dbef88e25f19435231531 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 14 Aug 2024 15:47:35 +0000 Subject: [PATCH 2250/2361] Don't call 'validate' ('make' calls it under the hood already) --- src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index fbbc66bd8db..77259b18bb9 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -98,9 +98,6 @@ USearchIndexWithSerialization::USearchIndexWithSerialization( unum::usearch::index_dense_config_t config(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search); config.enable_key_lookups = false; /// we don't do row-to-vector lookups - if (auto error = config.validate(); error) /// already called in vectorSimilarityIndexValidator, call again because usearch may change the config in-place - throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid parameters passed to vector similarity index. Error: {}", String(error.release())); - if (auto result = USearchIndex::make(metric, config); !result) throw Exception(ErrorCodes::INCORRECT_DATA, "Could not create vector similarity index. Error: {}", String(result.error.release())); else From d4bbe2e9f086669e9d75d178299eb246353a36b4 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 15 Aug 2024 09:32:17 +0000 Subject: [PATCH 2251/2361] Add tests for non-default quantization --- .../02354_vector_search_queries.reference | 55 +++++++++++++++++++ .../02354_vector_search_queries.sql | 51 +++++++++++++++++ 2 files changed, 106 insertions(+) diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference index 7c8e4c0ca59..5cd5ac006db 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.reference +++ b/tests/queries/0_stateless/02354_vector_search_queries.reference @@ -56,3 +56,58 @@ Expression (Projection) Condition: true Parts: 1/1 Granules: 4/4 +-- Non-default quantization +1 [2,3.2] 2.3323807824711897 +2 [4.2,3.4] 4.427188573446585 +0 [4.6,2.3] 4.609772130377966 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_f32) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 2/4 +1 [2,3.2] 2.3323807824711897 +2 [4.2,3.4] 4.427188573446585 +0 [4.6,2.3] 4.609772130377966 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_f16) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 2/4 +1 [2,3.2] 2.3323807824711897 +2 [4.2,3.4] 4.427188573446585 +0 [4.6,2.3] 4.609772130377966 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_i8) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 2/4 diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index dbf0fca32ab..486910ddbbb 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -78,3 +78,54 @@ LIMIT 3 SETTINGS max_limit_for_ann_queries = 2; -- LIMIT 3 > 2 --> don't use the ann index DROP TABLE tab; + +SELECT '-- Non-default quantization'; +CREATE TABLE tab_f32(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 0, 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_f16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f16', 0, 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_i8(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'i8', 0, 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab_f32 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_f16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_i8 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_f16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_f16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_i8 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_i8 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab_f32; +DROP TABLE tab_f16; +DROP TABLE tab_i8; From b9548504d9c3c9dc2ce2e1010cb058e73e18b789 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 14 Aug 2024 15:44:22 +0000 Subject: [PATCH 2252/2361] Cosmetics --- .../MergeTreeIndexVectorSimilarity.cpp | 127 +++++++++--------- .../02354_vector_search_bugs.reference | 1 + .../0_stateless/02354_vector_search_bugs.sql | 6 + ...r_search_index_creation_negative.reference | 1 - ..._vector_search_index_creation_negative.sql | 5 - .../02354_vector_search_queries.reference | 8 +- .../02354_vector_search_queries.sql | 9 +- 7 files changed, 73 insertions(+), 84 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 77259b18bb9..b215aeae495 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -55,6 +55,7 @@ const std::unordered_map quantizationToSca {"f32", unum::usearch::scalar_kind_t::f32_k}, {"f16", unum::usearch::scalar_kind_t::f16_k}, {"i8", unum::usearch::scalar_kind_t::i8_k}}; +/// Usearch provides more quantizations but ^^ above ones seem the only ones comprehensively supported accross all distance functions. template concept is_set = std::same_as>; @@ -253,8 +254,7 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ throw Exception( ErrorCodes::LOGICAL_ERROR, "The provided position is not less than the number of block rows. Position: {}, Block rows: {}.", - *pos, - block.rows()); + *pos, block.rows()); size_t rows_read = std::min(limit, block.rows() - *pos); @@ -268,63 +268,64 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); const String & index_column_name = index_sample_block.getByPosition(0).name; - ColumnPtr column_cut = block.getByName(index_column_name).column->cut(*pos, rows_read); + const ColumnPtr & index_column = block.getByName(index_column_name).column; + ColumnPtr column_cut = index_column->cut(*pos, rows_read); - if (const auto & column_array = typeid_cast(column_cut.get())) + const auto * column_array = typeid_cast(column_cut.get()); + if (!column_array) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float32) column"); + + if (column_array->empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); + + /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays + /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default + /// values which is also empty. + if (column_array->isDefaultAt(0)) + throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); + + const size_t rows = column_array->size(); + + const auto & column_array_offsets = column_array->getOffsets(); + const size_t dimensions = column_array_offsets[0]; + + if (!index) + index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); + + /// Also check that previously inserted blocks have the same size as this block. + /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across + /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. + if (index->dimensions() != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); + + /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp + if (index->size() + rows > std::numeric_limits::max()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); + + const auto & column_array_data = column_array->getData(); + const auto & column_array_data_float = typeid_cast(column_array_data); + const auto & column_array_data_float_data = column_array_data_float.getData(); + + /// Check all sizes are the same + for (size_t row = 0; row < rows - 1; ++row) + if (column_array_offsets[row + 1] - column_array_offsets[row] != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); + + /// Reserving space is mandatory + if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + rows))) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); + + for (size_t row = 0; row < rows; ++row) { - const auto & column_array_data = column_array->getData(); - const auto & column_array_data_float = typeid_cast(column_array_data); - const auto & column_array_data_float_data = column_array_data_float.getData(); - - const auto & column_array_offsets = column_array->getOffsets(); - const size_t num_rows = column_array_offsets.size(); - - if (column_array->empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); - - /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays - /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default - /// values which is also empty. - if (column_array->isDefaultAt(0)) - throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); - - /// Check all sizes are the same - const size_t dimensions = column_array_offsets[0]; - for (size_t i = 0; i < num_rows - 1; ++i) - if (column_array_offsets[i + 1] - column_array_offsets[i] != dimensions) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); - - /// Also check that previously inserted blocks have the same size as this block. - /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across - /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. - if (index && index->dimensions() != dimensions) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); - - if (!index) - index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); - - /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp - if (index->size() + num_rows > std::numeric_limits::max()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index in column {} would exceed 4 billion entries", index_column_name); - - /// Reserving space is mandatory - if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + num_rows))) - throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); - - for (size_t row = 0; row < num_rows; ++row) + if (auto result = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release())); + else { - if (auto result = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result) - throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release())); - else - { - ProfileEvents::increment(ProfileEvents::USearchAddCount); - ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members); - ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances); - } + ProfileEvents::increment(ProfileEvents::USearchAddCount); + ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members); + ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances); } } - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float32) column"); *pos += rows_read; } @@ -483,7 +484,7 @@ void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* atta if (!quantizationToScalarKind.contains(index.arguments[2].safeGet())) throw Exception(ErrorCodes::INCORRECT_DATA, "Third argument (quantization) of vector similarity index is not supported. Supported quantizations are: {}", joinByComma(quantizationToScalarKind)); - /// Call Usearche's own parameter validation method for HNSW-specific parameters + /// Call Usearch's own parameter validation method for HNSW-specific parameters UInt64 m = index.arguments[3].safeGet(); UInt64 ef_construction = index.arguments[4].safeGet(); UInt64 ef_search = index.arguments[5].safeGet(); @@ -498,18 +499,14 @@ void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* atta if (index.column_names.size() != 1 || index.data_types.size() != 1) throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Vector similarity indexes must be created on a single column"); - /// Check data type of the indexed column: + /// Check that the data type is Array(Float32) DataTypePtr data_type = index.sample_block.getDataTypes()[0]; - if (const auto * data_type_array = typeid_cast(data_type.get())) - { - TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); - if (!WhichDataType(nested_type_index).isFloat32()) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); - } - else - { + const auto * data_type_array = typeid_cast(data_type.get()); + if (!data_type_array) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); + TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); + if (!WhichDataType(nested_type_index).isFloat32()) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); - } } } diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.reference b/tests/queries/0_stateless/02354_vector_search_bugs.reference index a27b086e118..8da05c8a7c0 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.reference +++ b/tests/queries/0_stateless/02354_vector_search_bugs.reference @@ -1,3 +1,4 @@ +Rejects INSERTs of Arrays with different sizes Issue #52258: Empty Arrays or Arrays with default values are rejected It is possible to create parts with different Array vector sizes but there will be an error at query time Correctness of index with > 1 mark diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql index 7c66b4b8e45..51e2e6ce2b7 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ b/tests/queries/0_stateless/02354_vector_search_bugs.sql @@ -7,6 +7,12 @@ SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make DROP TABLE IF EXISTS tab; +SELECT 'Rejects INSERTs of Arrays with different sizes'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; +INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } +DROP TABLE tab; + SELECT 'Issue #52258: Empty Arrays or Arrays with default values are rejected'; CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree() ORDER BY id; diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference index f18daa6e02e..5963f4b5834 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference @@ -5,4 +5,3 @@ Two or six index arguments 4nd argument (M), if given, must be UInt64 and > 1 Must be created on single column Must be created on Array(Float32) columns -Rejects INSERTs of Arrays with different sizes diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql index de9d37e1000..b39b8d3e754 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql @@ -38,8 +38,3 @@ CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE vector_similarity('hn CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } - -SELECT 'Rejects INSERTs of Arrays with different sizes'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference index 5cd5ac006db..374ed1f8abd 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.reference +++ b/tests/queries/0_stateless/02354_vector_search_queries.reference @@ -1,9 +1,7 @@ 10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block -- ORDER-BY-type 5 [0,2] 0 6 [0,2.1] 0.09999990463256836 7 [0,2.2] 0.20000004768371582 -- ORDER-BY-type, EXPLAIN Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -20,11 +18,9 @@ Expression (Projection) Parts: 1/1 Granules: 1/1 12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block -- ORDER-BY-type 6 [0,2] 0 7 [0,2.1] 0.09999990463256836 8 [0,2.2] 0.20000004768371582 -- ORDER-BY-type, EXPLAIN Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -41,11 +37,11 @@ Expression (Projection) Parts: 1/1 Granules: 2/4 Special cases -- ORDER-BY-type +-- Non-default metric, M, ef_construction, ef_search 6 [1,9.3] 0.005731362878640178 1 [2,3.2] 0.15200169244542905 7 [5.5,4.7] 0.3503476876550442 -- Special case: setting "max_limit_for_ann_queries" +-- Setting "max_limit_for_ann_queries" Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index 486910ddbbb..2c6a7f10776 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -14,14 +14,12 @@ CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similar INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); -SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- ORDER-BY-type, EXPLAIN'; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) @@ -37,14 +35,12 @@ SELECT '12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexe CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); -SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- ORDER-BY-type, EXPLAIN'; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) @@ -56,19 +52,18 @@ DROP TABLE tab; SELECT 'Special cases'; -- Not a systematic test, just to check that no bad things happen. --- Test with non-default metric, M, ef_construction, ef_search +SELECT '-- Non-default metric, M, ef_construction, ef_search'; CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f32', 42, 99, 66) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); -SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, cosineDistance(vec, reference_vec) FROM tab ORDER BY cosineDistance(vec, reference_vec) LIMIT 3; -SELECT '- Special case: setting "max_limit_for_ann_queries"'; +SELECT '-- Setting "max_limit_for_ann_queries"'; EXPLAIN indexes=1 WITH [0.0, 2.0] as reference_vec SELECT id, vec, cosineDistance(vec, reference_vec) From 38a2b0dcc7c0d84136f00a3d5a7a3afbf86cccbb Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 15 Aug 2024 10:42:06 +0000 Subject: [PATCH 2253/2361] Allow Array(Float64) as type of underlying column --- .../mergetree-family/annindexes.md | 8 +- .../MergeTreeIndexVectorSimilarity.cpp | 79 ++++++++++++------- .../MergeTree/VectorSimilarityCondition.cpp | 4 +- .../MergeTree/VectorSimilarityCondition.h | 4 +- ..._vector_search_index_creation_negative.sql | 2 +- .../02354_vector_search_queries.reference | 4 + .../02354_vector_search_queries.sql | 12 +++ 7 files changed, 76 insertions(+), 37 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 097b0f5850a..1057ccb5fee 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -22,10 +22,10 @@ ORDER BY Distance(vectors, Point) LIMIT N ``` -`vectors` contains N-dimensional values of type [Array(Float32)](../../../sql-reference/data-types/array.md), for example embeddings. -Function `Distance` computes the distance between two vectors. Often, the Euclidean (L2) distance is chosen as distance function but [other -distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17, -0.33, ...)`, and `N` limits the number of search results. +`vectors` contains N-dimensional values of type [Array(Float32)](../../../sql-reference/data-types/array.md) or Array(Float64), for example +embeddings. Function `Distance` computes the distance between two vectors. Often, the Euclidean (L2) distance is chosen as distance function +but [other distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, +e.g. `(0.17, 0.33, ...)`, and `N` limits the number of search results. This query returns the top-`N` closest points to the reference point. Parameter `N` limits the number of returned values which is useful for situations where `MaxDistance` is difficult to determine in advance. diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index b215aeae495..9376cdf7562 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -248,6 +248,40 @@ MergeTreeIndexGranulePtr MergeTreeIndexAggregatorVectorSimilarity::getGranuleAnd return granule; } +namespace +{ + +template +void updateImpl(const ColumnArray * column_array, const ColumnArray::Offsets & column_array_offsets, USearchIndexWithSerializationPtr & index, size_t dimensions, size_t rows) +{ + const auto & column_array_data = column_array->getData(); + const auto & column_array_data_float = typeid_cast(column_array_data); + const auto & column_array_data_float_data = column_array_data_float.getData(); + + /// Check all sizes are the same + for (size_t row = 0; row < rows - 1; ++row) + if (column_array_offsets[row + 1] - column_array_offsets[row] != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); + + /// Reserving space is mandatory + if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + rows))) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); + + for (size_t row = 0; row < rows; ++row) + { + if (auto result = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release())); + else + { + ProfileEvents::increment(ProfileEvents::USearchAddCount); + ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members); + ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances); + } + } +} + +} + void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_t * pos, size_t limit) { if (*pos >= block.rows()) @@ -273,7 +307,7 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ const auto * column_array = typeid_cast(column_cut.get()); if (!column_array) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float32) column"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float*) column"); if (column_array->empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); @@ -302,30 +336,19 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ if (index->size() + rows > std::numeric_limits::max()) throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); - const auto & column_array_data = column_array->getData(); - const auto & column_array_data_float = typeid_cast(column_array_data); - const auto & column_array_data_float_data = column_array_data_float.getData(); + DataTypePtr data_type = block.getDataTypes()[0]; + const auto * data_type_array = typeid_cast(data_type.get()); + if (!data_type_array) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); + const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); - /// Check all sizes are the same - for (size_t row = 0; row < rows - 1; ++row) - if (column_array_offsets[row + 1] - column_array_offsets[row] != dimensions) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); + if (WhichDataType(nested_type_index).isFloat32()) + updateImpl(column_array, column_array_offsets, index, dimensions, rows); + else if (WhichDataType(nested_type_index).isFloat64()) + updateImpl(column_array, column_array_offsets, index, dimensions, rows); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); - /// Reserving space is mandatory - if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + rows))) - throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); - - for (size_t row = 0; row < rows; ++row) - { - if (auto result = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result) - throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release())); - else - { - ProfileEvents::increment(ProfileEvents::USearchAddCount); - ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members); - ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances); - } - } *pos += rows_read; } @@ -373,7 +396,7 @@ std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(Mer "does not match the dimension in the index ({})", vector_similarity_condition.getDimensions(), index->dimensions()); - const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); + const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); auto search_result = index->search(reference_vector.data(), limit); if (!search_result) @@ -499,14 +522,14 @@ void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* atta if (index.column_names.size() != 1 || index.data_types.size() != 1) throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Vector similarity indexes must be created on a single column"); - /// Check that the data type is Array(Float32) + /// Check that the data type is Array(Float*) DataTypePtr data_type = index.sample_block.getDataTypes()[0]; const auto * data_type_array = typeid_cast(data_type.get()); if (!data_type_array) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float*)"); TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); - if (!WhichDataType(nested_type_index).isFloat32()) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); + if (!WhichDataType(nested_type_index).isFloat()) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float*)"); } } diff --git a/src/Storages/MergeTree/VectorSimilarityCondition.cpp b/src/Storages/MergeTree/VectorSimilarityCondition.cpp index 2e53b4ecb3a..c8f33857640 100644 --- a/src/Storages/MergeTree/VectorSimilarityCondition.cpp +++ b/src/Storages/MergeTree/VectorSimilarityCondition.cpp @@ -24,7 +24,7 @@ namespace { template -void extractReferenceVectorFromLiteral(std::vector & reference_vector, Literal literal) +void extractReferenceVectorFromLiteral(std::vector & reference_vector, Literal literal) { Float64 float_element_of_reference_vector; Int64 int_element_of_reference_vector; @@ -72,7 +72,7 @@ UInt64 VectorSimilarityCondition::getLimit() const throw Exception(ErrorCodes::LOGICAL_ERROR, "No LIMIT section in query, not supported"); } -std::vector VectorSimilarityCondition::getReferenceVector() const +std::vector VectorSimilarityCondition::getReferenceVector() const { if (index_is_useful && query_information.has_value()) return query_information->reference_vector; diff --git a/src/Storages/MergeTree/VectorSimilarityCondition.h b/src/Storages/MergeTree/VectorSimilarityCondition.h index fd339ed715d..2380f8f46b0 100644 --- a/src/Storages/MergeTree/VectorSimilarityCondition.h +++ b/src/Storages/MergeTree/VectorSimilarityCondition.h @@ -60,7 +60,7 @@ public: L2 }; - std::vector reference_vector; + std::vector reference_vector; DistanceFunction distance_function; String column_name; UInt64 limit; @@ -70,7 +70,7 @@ public: /// Returns false if query can be speeded up by an ANN index, true otherwise. bool alwaysUnknownOrTrue(String distance_function) const; - std::vector getReferenceVector() const; + std::vector getReferenceVector() const; size_t getDimensions() const; String getColumnName() const; Info::DistanceFunction getDistanceFunction() const; diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql index b39b8d3e754..e8e6aaee1b2 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql @@ -35,6 +35,6 @@ SELECT 'Must be created on Array(Float32) columns'; SET allow_suspicious_low_cardinality_types = 1; CREATE TABLE tab(id Int32, vec UInt64, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Array(UInt64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference index 374ed1f8abd..cb3a8c801b1 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.reference +++ b/tests/queries/0_stateless/02354_vector_search_queries.reference @@ -107,3 +107,7 @@ Expression (Projection) Description: vector_similarity GRANULARITY 2 Parts: 1/1 Granules: 2/4 +-- Index on Array(Float64) column +6 [0,2] 0 +7 [0,2.1] 0.10000000000000009 +8 [0,2.2] 0.20000000000000018 diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index 2c6a7f10776..fbf8427d8fe 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -124,3 +124,15 @@ LIMIT 3; DROP TABLE tab_f32; DROP TABLE tab_f16; DROP TABLE tab_i8; + +SELECT '-- Index on Array(Float64) column'; +CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab; From 0a10f0ceb3fab80e5dcfab5ebbebbdbfcdaff6c1 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 11:27:12 +0000 Subject: [PATCH 2254/2361] Update tests --- .../02325_dates_schema_inference.reference | 58 +++++++++---------- tests/queries/0_stateless/02404_data.CSV | 10 ++++ .../0_stateless/02404_data.CSVWithNames | 11 ++++ .../0_stateless/02404_data.CustomSeparated | 10 ++++ .../0_stateless/02404_data.JSONCompactEachRow | 10 ++++ .../0_stateless/02404_data.JSONEachRow | 10 ++++ tests/queries/0_stateless/02404_data.TSKV | 10 ++++ tests/queries/0_stateless/02404_data.TSV | 10 ++++ .../0_stateless/02404_data.TSVWithNames | 11 ++++ tests/queries/0_stateless/02404_data.Values | 1 + ...ce_cache_respect_format_settings.reference | 18 +++--- 11 files changed, 121 insertions(+), 38 deletions(-) create mode 100644 tests/queries/0_stateless/02404_data.CSV create mode 100644 tests/queries/0_stateless/02404_data.CSVWithNames create mode 100644 tests/queries/0_stateless/02404_data.CustomSeparated create mode 100644 tests/queries/0_stateless/02404_data.JSONCompactEachRow create mode 100644 tests/queries/0_stateless/02404_data.JSONEachRow create mode 100644 tests/queries/0_stateless/02404_data.TSKV create mode 100644 tests/queries/0_stateless/02404_data.TSV create mode 100644 tests/queries/0_stateless/02404_data.TSVWithNames create mode 100644 tests/queries/0_stateless/02404_data.Values diff --git a/tests/queries/0_stateless/02325_dates_schema_inference.reference b/tests/queries/0_stateless/02325_dates_schema_inference.reference index c8eebd3262e..124f105220d 100644 --- a/tests/queries/0_stateless/02325_dates_schema_inference.reference +++ b/tests/queries/0_stateless/02325_dates_schema_inference.reference @@ -1,29 +1,29 @@ JSONEachRow x Nullable(Date) x Nullable(DateTime64(9)) -x Nullable(DateTime64(9)) +x Nullable(DateTime) x Array(Nullable(Date)) -x Array(Nullable(DateTime64(9))) -x Array(Nullable(DateTime64(9))) -x Tuple(\n date1 Nullable(DateTime64(9)),\n date2 Nullable(Date)) -x Array(Nullable(DateTime64(9))) -x Array(Nullable(DateTime64(9))) -x Nullable(DateTime64(9)) +x Array(Nullable(DateTime)) +x Array(Nullable(DateTime)) +x Tuple(\n date1 Nullable(DateTime),\n date2 Nullable(Date)) +x Array(Nullable(DateTime)) +x Array(Nullable(DateTime)) +x Nullable(DateTime) x Array(Nullable(String)) x Nullable(String) x Array(Nullable(String)) -x Tuple(\n key1 Array(Array(Nullable(DateTime64(9)))),\n key2 Array(Array(Nullable(String)))) +x Tuple(\n key1 Array(Array(Nullable(DateTime))),\n key2 Array(Array(Nullable(String)))) CSV c1 Nullable(Date) c1 Nullable(DateTime64(9)) -c1 Nullable(DateTime64(9)) +c1 Nullable(DateTime) c1 Array(Nullable(Date)) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Map(String, Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Nullable(DateTime64(9)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Map(String, Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Nullable(DateTime) c1 Array(Nullable(String)) c1 Nullable(String) c1 Array(Nullable(String)) @@ -31,14 +31,14 @@ c1 Map(String, Array(Array(Nullable(String)))) TSV c1 Nullable(Date) c1 Nullable(DateTime64(9)) -c1 Nullable(DateTime64(9)) +c1 Nullable(DateTime) c1 Array(Nullable(Date)) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Map(String, Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Nullable(DateTime64(9)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Map(String, Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Nullable(DateTime) c1 Array(Nullable(String)) c1 Nullable(String) c1 Array(Nullable(String)) @@ -46,14 +46,14 @@ c1 Map(String, Array(Array(Nullable(String)))) Values c1 Nullable(Date) c1 Nullable(DateTime64(9)) -c1 Nullable(DateTime64(9)) +c1 Nullable(DateTime) c1 Array(Nullable(Date)) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Map(String, Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Nullable(DateTime64(9)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Map(String, Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Nullable(DateTime) c1 Array(Nullable(String)) c1 Nullable(String) c1 Array(Nullable(String)) diff --git a/tests/queries/0_stateless/02404_data.CSV b/tests/queries/0_stateless/02404_data.CSV new file mode 100644 index 00000000000..2d8b5c8daa8 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.CSV @@ -0,0 +1,10 @@ +0,"1970-01-01" +1,"1970-01-02" +2,"1970-01-03" +3,"1970-01-04" +4,"1970-01-05" +5,"1970-01-06" +6,"1970-01-07" +7,"1970-01-08" +8,"1970-01-09" +9,"1970-01-10" diff --git a/tests/queries/0_stateless/02404_data.CSVWithNames b/tests/queries/0_stateless/02404_data.CSVWithNames new file mode 100644 index 00000000000..34647008916 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.CSVWithNames @@ -0,0 +1,11 @@ +"number","toDate(number)" +0,"1970-01-01" +1,"1970-01-02" +2,"1970-01-03" +3,"1970-01-04" +4,"1970-01-05" +5,"1970-01-06" +6,"1970-01-07" +7,"1970-01-08" +8,"1970-01-09" +9,"1970-01-10" diff --git a/tests/queries/0_stateless/02404_data.CustomSeparated b/tests/queries/0_stateless/02404_data.CustomSeparated new file mode 100644 index 00000000000..f3ae1663536 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.CustomSeparated @@ -0,0 +1,10 @@ +0 1970-01-01 +1 1970-01-02 +2 1970-01-03 +3 1970-01-04 +4 1970-01-05 +5 1970-01-06 +6 1970-01-07 +7 1970-01-08 +8 1970-01-09 +9 1970-01-10 diff --git a/tests/queries/0_stateless/02404_data.JSONCompactEachRow b/tests/queries/0_stateless/02404_data.JSONCompactEachRow new file mode 100644 index 00000000000..de2e0986aab --- /dev/null +++ b/tests/queries/0_stateless/02404_data.JSONCompactEachRow @@ -0,0 +1,10 @@ +["0", "1970-01-01"] +["1", "1970-01-02"] +["2", "1970-01-03"] +["3", "1970-01-04"] +["4", "1970-01-05"] +["5", "1970-01-06"] +["6", "1970-01-07"] +["7", "1970-01-08"] +["8", "1970-01-09"] +["9", "1970-01-10"] diff --git a/tests/queries/0_stateless/02404_data.JSONEachRow b/tests/queries/0_stateless/02404_data.JSONEachRow new file mode 100644 index 00000000000..e77256ac7fc --- /dev/null +++ b/tests/queries/0_stateless/02404_data.JSONEachRow @@ -0,0 +1,10 @@ +{"number":"0","toDate(number)":"1970-01-01"} +{"number":"1","toDate(number)":"1970-01-02"} +{"number":"2","toDate(number)":"1970-01-03"} +{"number":"3","toDate(number)":"1970-01-04"} +{"number":"4","toDate(number)":"1970-01-05"} +{"number":"5","toDate(number)":"1970-01-06"} +{"number":"6","toDate(number)":"1970-01-07"} +{"number":"7","toDate(number)":"1970-01-08"} +{"number":"8","toDate(number)":"1970-01-09"} +{"number":"9","toDate(number)":"1970-01-10"} diff --git a/tests/queries/0_stateless/02404_data.TSKV b/tests/queries/0_stateless/02404_data.TSKV new file mode 100644 index 00000000000..70f7ad33c8b --- /dev/null +++ b/tests/queries/0_stateless/02404_data.TSKV @@ -0,0 +1,10 @@ +number=0 toDate(number)=1970-01-01 +number=1 toDate(number)=1970-01-02 +number=2 toDate(number)=1970-01-03 +number=3 toDate(number)=1970-01-04 +number=4 toDate(number)=1970-01-05 +number=5 toDate(number)=1970-01-06 +number=6 toDate(number)=1970-01-07 +number=7 toDate(number)=1970-01-08 +number=8 toDate(number)=1970-01-09 +number=9 toDate(number)=1970-01-10 diff --git a/tests/queries/0_stateless/02404_data.TSV b/tests/queries/0_stateless/02404_data.TSV new file mode 100644 index 00000000000..f3ae1663536 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.TSV @@ -0,0 +1,10 @@ +0 1970-01-01 +1 1970-01-02 +2 1970-01-03 +3 1970-01-04 +4 1970-01-05 +5 1970-01-06 +6 1970-01-07 +7 1970-01-08 +8 1970-01-09 +9 1970-01-10 diff --git a/tests/queries/0_stateless/02404_data.TSVWithNames b/tests/queries/0_stateless/02404_data.TSVWithNames new file mode 100644 index 00000000000..23310234a8c --- /dev/null +++ b/tests/queries/0_stateless/02404_data.TSVWithNames @@ -0,0 +1,11 @@ +number toDate(number) +0 1970-01-01 +1 1970-01-02 +2 1970-01-03 +3 1970-01-04 +4 1970-01-05 +5 1970-01-06 +6 1970-01-07 +7 1970-01-08 +8 1970-01-09 +9 1970-01-10 diff --git a/tests/queries/0_stateless/02404_data.Values b/tests/queries/0_stateless/02404_data.Values new file mode 100644 index 00000000000..d9a621d7ec9 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.Values @@ -0,0 +1 @@ +(0,'1970-01-01'),(1,'1970-01-02'),(2,'1970-01-03'),(3,'1970-01-04'),(4,'1970-01-05'),(5,'1970-01-06'),(6,'1970-01-07'),(7,'1970-01-08'),(8,'1970-01-09'),(9,'1970-01-10') \ No newline at end of file diff --git a/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference b/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference index 049603328d9..3d6b1021916 100644 --- a/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference +++ b/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference @@ -4,7 +4,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 @@ -14,7 +14,7 @@ toDate(number) Nullable(Date) number Nullable(Float64) toDate(number) Nullable(Date) number Nullable(Int64) -toDate(number) Nullable(DateTime64(9)) +toDate(number) Nullable(DateTime) number Nullable(Int64) toDate(number) Nullable(Date) 4 @@ -24,7 +24,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 @@ -34,7 +34,7 @@ toDate(number) Nullable(Date) number Nullable(Float64) toDate(number) Nullable(Date) number Nullable(Int64) -toDate(number) Nullable(DateTime64(9)) +toDate(number) Nullable(DateTime) number Nullable(Int64) toDate(number) Nullable(Date) 4 @@ -44,7 +44,7 @@ toDate(number) Nullable(Date) number Nullable(Float64) toDate(number) Nullable(Date) number Nullable(Int64) -toDate(number) Nullable(DateTime64(9)) +toDate(number) Nullable(DateTime) number Nullable(Int64) toDate(number) Nullable(Date) 4 @@ -54,7 +54,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 @@ -64,7 +64,7 @@ toDate(number) Nullable(Date) number Nullable(Float64) toDate(number) Nullable(Date) number Nullable(Int64) -toDate(number) Nullable(DateTime64(9)) +toDate(number) Nullable(DateTime) number Nullable(Int64) toDate(number) Nullable(Date) 4 @@ -74,7 +74,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 @@ -84,7 +84,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 From 50a8cee0c5a4cd067cee2dc5584401b15283b3cd Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 11:39:04 +0000 Subject: [PATCH 2255/2361] Update docs --- docs/en/interfaces/schema-inference.md | 93 +++++++++++++++++--------- 1 file changed, 62 insertions(+), 31 deletions(-) diff --git a/docs/en/interfaces/schema-inference.md b/docs/en/interfaces/schema-inference.md index 05fae994cbe..4afba20d76c 100644 --- a/docs/en/interfaces/schema-inference.md +++ b/docs/en/interfaces/schema-inference.md @@ -359,13 +359,14 @@ DESC format(JSONEachRow, '{"int" : 42, "float" : 42.42, "string" : "Hello, World Dates, DateTimes: ```sql -DESC format(JSONEachRow, '{"date" : "2022-01-01", "datetime" : "2022-01-01 00:00:00"}') +DESC format(JSONEachRow, '{"date" : "2022-01-01", "datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}') ``` ```response -┌─name─────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠-│ date │ Nullable(Date) │ │ │ │ │ │ -│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │ -└──────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠+│ date │ Nullable(Date) │ │ │ │ │ │ +│ datetime │ Nullable(DateTime) │ │ │ │ │ │ +│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` Arrays: @@ -759,12 +760,13 @@ DESC format(CSV, 'Hello world!,World hello!') Dates, DateTimes: ```sql -DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00"') +DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00","2022-01-01 00:00:00.000"') ``` ```response ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠│ c1 │ Nullable(Date) │ │ │ │ │ │ -│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +│ c2 │ Nullable(DateTime) │ │ │ │ │ │ +│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` @@ -956,12 +958,13 @@ DESC format(TSKV, 'int=42 float=42.42 bool=true string=Hello,World!\n') Dates, DateTimes: ```sql -DESC format(TSV, '2020-01-01 2020-01-01 00:00:00') +DESC format(TSV, '2020-01-01 2020-01-01 00:00:00 2022-01-01 00:00:00.000') ``` ```response ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠│ c1 │ Nullable(Date) │ │ │ │ │ │ -│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +│ c2 │ Nullable(DateTime) │ │ │ │ │ │ +│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` @@ -1126,12 +1129,13 @@ DESC format(Values, $$(42, 42.42, true, 'Hello,World!')$$) Dates, DateTimes: ```sql -DESC format(Values, $$('2020-01-01', '2020-01-01 00:00:00')$$) -``` + DESC format(Values, $$('2020-01-01', '2020-01-01 00:00:00', '2022-01-01 00:00:00.000')$$) + ``` ```response ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠│ c1 │ Nullable(Date) │ │ │ │ │ │ -│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +│ c2 │ Nullable(DateTime) │ │ │ │ │ │ +│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` @@ -1504,8 +1508,8 @@ DESC format(JSONEachRow, $$ #### input_format_try_infer_datetimes -If enabled, ClickHouse will try to infer type `DateTime64` from string fields in schema inference for text formats. -If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime64(9)`, +If enabled, ClickHouse will try to infer type `DateTime` or `DateTime64` from string fields in schema inference for text formats. +If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime` or `DateTime64(9)` (if any datetime had fractional part), if at least one field was not parsed as datetime, the result type will be `String`. Enabled by default. @@ -1513,39 +1517,66 @@ Enabled by default. **Examples** ```sql -SET input_format_try_infer_datetimes = 0 +SET input_format_try_infer_datetimes = 0; DESC format(JSONEachRow, $$ - {"datetime" : "2021-01-01 00:00:00.000"} - {"datetime" : "2022-01-01 00:00:00.000"} + {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"} + {"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"} $$) ``` ```response -┌─name─────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠-│ datetime │ Nullable(String) │ │ │ │ │ │ -└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +┌─name───────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠+│ datetime │ Nullable(String) │ │ │ │ │ │ +│ datetime64 │ Nullable(String) │ │ │ │ │ │ +└────────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` ```sql -SET input_format_try_infer_datetimes = 1 +SET input_format_try_infer_datetimes = 1; DESC format(JSONEachRow, $$ - {"datetime" : "2021-01-01 00:00:00.000"} - {"datetime" : "2022-01-01 00:00:00.000"} + {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"} + {"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"} $$) ``` ```response -┌─name─────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠-│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │ -└──────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠+│ datetime │ Nullable(DateTime) │ │ │ │ │ │ +│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` ```sql DESC format(JSONEachRow, $$ - {"datetime" : "2021-01-01 00:00:00.000"} - {"datetime" : "unknown"} + {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"} + {"datetime" : "unknown", "datetime64" : "unknown"} $$) ``` ```response -┌─name─────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠-│ datetime │ Nullable(String) │ │ │ │ │ │ -└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +┌─name───────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠+│ datetime │ Nullable(String) │ │ │ │ │ │ +│ datetime64 │ Nullable(String) │ │ │ │ │ │ +└────────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +``` + +#### input_format_try_infer_datetimes_only_datetime64 + +If enabled, ClickHouse will always infer `DateTime64(9)` when `input_format_try_infer_datetimes` is enabled even if datetime values don't contain fractional part. + +Disabled by default. + +**Examples** + +```sql +SET input_format_try_infer_datetimes = 1; +SET input_format_try_infer_datetimes_only_datetime64 = 1; +DESC format(JSONEachRow, $$ + {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"} + {"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"} + $$) +``` + +```text +┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┠+│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │ +│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings-formats.md#date_time_input_format) From 65cbe94d545b51df22d069246dafdea69d693268 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 15 Aug 2024 14:05:33 +0200 Subject: [PATCH 2256/2361] Update DeltaLakeMetadata.cpp --- src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp index 89b48f08438..ffb987bfa82 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp @@ -332,7 +332,7 @@ struct DeltaLakeMetadataImpl WhichDataType which(check_type->getTypeId()); if (which.isStringOrFixedString()) return value; - else if (isBool(data_type)) + else if (isBool(check_type)) return parse(value); else if (which.isInt8()) return parse(value); From 7e006f658127c4efe4c53c0c79880a6b09955755 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Aug 2024 14:09:48 +0200 Subject: [PATCH 2257/2361] Update delta lake test --- .../DataLakes/DeltaLakeMetadata.cpp | 5 +-- tests/integration/test_storage_delta/test.py | 32 +++++++++---------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp index c896a760597..30001a59a59 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp @@ -262,10 +262,11 @@ struct DeltaLakeMetadataImpl partition_name, file_schema.toNamesAndTypesDescription()); } + LOG_TEST(log, "Partition {} value is {} (data type: {}, file: {})", + partition_name, value, name_and_type->type->getName(), filename); + auto field = getFieldValue(value, name_and_type->type); current_partition_columns.emplace_back(*name_and_type, field); - - LOG_TEST(log, "Partition {} value is {} (for {})", partition_name, value, filename); } } } diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index a595d01e6b3..75a4b6cc221 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -572,7 +572,7 @@ def test_partition_columns(started_cluster): "test" + str(i), datetime.strptime(f"2000-01-0{i}", "%Y-%m-%d"), i, - False, + False if i % 2 == 0 else True, ) ] df = spark.createDataFrame(data=data, schema=schema) @@ -622,15 +622,15 @@ def test_partition_columns(started_cluster): ENGINE=DeltaLake('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{result_file}/', 'minio', 'minio123')""" ) assert ( - """1 test1 2000-01-01 1 false + """1 test1 2000-01-01 1 true 2 test2 2000-01-02 2 false -3 test3 2000-01-03 3 false +3 test3 2000-01-03 3 true 4 test4 2000-01-04 4 false -5 test5 2000-01-05 5 false +5 test5 2000-01-05 5 true 6 test6 2000-01-06 6 false -7 test7 2000-01-07 7 false +7 test7 2000-01-07 7 true 8 test8 2000-01-08 8 false -9 test9 2000-01-09 9 false""" +9 test9 2000-01-09 9 true""" == instance.query(f"SELECT * FROM {TABLE_NAME} ORDER BY b").strip() ) @@ -670,7 +670,7 @@ test9 2000-01-09 9""" "test" + str(i), datetime.strptime(f"2000-01-{i}", "%Y-%m-%d"), i, - False, + False if i % 2 == 0 else True, ) ] df = spark.createDataFrame(data=data, schema=schema) @@ -696,23 +696,23 @@ test9 2000-01-09 9""" assert result == num_rows * 2 assert ( - """1 test1 2000-01-01 1 false + """1 test1 2000-01-01 1 true 2 test2 2000-01-02 2 false -3 test3 2000-01-03 3 false +3 test3 2000-01-03 3 true 4 test4 2000-01-04 4 false -5 test5 2000-01-05 5 false +5 test5 2000-01-05 5 true 6 test6 2000-01-06 6 false -7 test7 2000-01-07 7 false +7 test7 2000-01-07 7 true 8 test8 2000-01-08 8 false -9 test9 2000-01-09 9 false +9 test9 2000-01-09 9 true 10 test10 2000-01-10 10 false -11 test11 2000-01-11 11 false +11 test11 2000-01-11 11 true 12 test12 2000-01-12 12 false -13 test13 2000-01-13 13 false +13 test13 2000-01-13 13 true 14 test14 2000-01-14 14 false -15 test15 2000-01-15 15 false +15 test15 2000-01-15 15 true 16 test16 2000-01-16 16 false -17 test17 2000-01-17 17 false +17 test17 2000-01-17 17 true 18 test18 2000-01-18 18 false""" == instance.query( f""" From 03bfb1562b56d96963e75bbd14b6759ae103e52a Mon Sep 17 00:00:00 2001 From: Han Fei Date: Thu, 15 Aug 2024 14:26:01 +0200 Subject: [PATCH 2258/2361] fix overflow --- src/Common/OptimizedRegularExpression.cpp | 2 ++ src/Common/tests/gtest_optimize_re.cpp | 1 + 2 files changed, 3 insertions(+) diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp index 04e5f846adf..2cdb3409487 100644 --- a/src/Common/OptimizedRegularExpression.cpp +++ b/src/Common/OptimizedRegularExpression.cpp @@ -266,6 +266,8 @@ const char * analyzeImpl( break; } pos += offset; + if (pos == end) + return pos; /// if this group only contains flags, we have nothing to do. if (*pos == ')') { diff --git a/src/Common/tests/gtest_optimize_re.cpp b/src/Common/tests/gtest_optimize_re.cpp index 0730a13f160..d6735c3ccfe 100644 --- a/src/Common/tests/gtest_optimize_re.cpp +++ b/src/Common/tests/gtest_optimize_re.cpp @@ -21,6 +21,7 @@ TEST(OptimizeRE, analyze) test_f("c([^k]*)de", ""); test_f("(?-s)bob", "bob", {}, false, true); test_f("(?s)bob", "bob", {}, false, true); + test_f("(?ssss", ""); test_f("abc(de)fg", "abcdefg", {}, false, true); test_f("abc(de|xyz)fg", "abc", {"abcdefg", "abcxyzfg"}, false, true); test_f("abc(de?f|xyz)fg", "abc", {"abcd", "abcxyzfg"}, false, true); From 7acf74437f26f194fd17a6f7d5dcc6a422dde722 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 15 Aug 2024 12:29:59 +0000 Subject: [PATCH 2259/2361] Fix spelling --- src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 9376cdf7562..27bfcbbddcf 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -55,7 +55,7 @@ const std::unordered_map quantizationToSca {"f32", unum::usearch::scalar_kind_t::f32_k}, {"f16", unum::usearch::scalar_kind_t::f16_k}, {"i8", unum::usearch::scalar_kind_t::i8_k}}; -/// Usearch provides more quantizations but ^^ above ones seem the only ones comprehensively supported accross all distance functions. +/// Usearch provides more quantizations but ^^ above ones seem the only ones comprehensively supported across all distance functions. template concept is_set = std::same_as>; From 657bbce23f6d764dc0172f9de3d6bc7fcd06fe10 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Aug 2024 14:38:20 +0200 Subject: [PATCH 2260/2361] Add a test --- tests/integration/test_mask_sensitive_info/test.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/integration/test_mask_sensitive_info/test.py b/tests/integration/test_mask_sensitive_info/test.py index 6f6dc4d287f..8d5345082ff 100644 --- a/tests/integration/test_mask_sensitive_info/test.py +++ b/tests/integration/test_mask_sensitive_info/test.py @@ -202,6 +202,10 @@ def test_create_table(): f"S3Queue('http://minio1:9001/root/data/', 'CSV', 'gzip') settings mode = 'ordered'", f"S3Queue('http://minio1:9001/root/data/', 'minio', '{password}', 'CSV') settings mode = 'ordered'", f"S3Queue('http://minio1:9001/root/data/', 'minio', '{password}', 'CSV', 'gzip') settings mode = 'ordered'", + ( + f"Iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')", + "DNS_ERROR", + ), ] def make_test_case(i): @@ -266,6 +270,7 @@ def test_create_table(): # due to sensitive data substituion the query will be normalized, so not "settings" but "SETTINGS" "CREATE TABLE table19 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV') SETTINGS mode = 'ordered'", "CREATE TABLE table20 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV', 'gzip') SETTINGS mode = 'ordered'", + "CREATE TABLE table21 (`x` int) ENGINE = Iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')", ], must_not_contain=[password], ) @@ -387,6 +392,7 @@ def test_table_functions(): f"azureBlobStorageCluster('test_shard_localhost', '{azure_storage_account_url}', 'cont', 'test_simple_15.csv', '{azure_account_name}', '{azure_account_key}', 'CSV', 'none', 'auto')", f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, connection_string = '{azure_conn_string}', container = 'cont', blob_path = 'test_simple_16.csv', format = 'CSV')", f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, storage_account_url = '{azure_storage_account_url}', container = 'cont', blob_path = 'test_simple_17.csv', account_name = '{azure_account_name}', account_key = '{azure_account_key}')", + f"iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')", ] def make_test_case(i): @@ -478,6 +484,7 @@ def test_table_functions(): f"CREATE TABLE tablefunc48 (`x` int) AS azureBlobStorageCluster('test_shard_localhost', '{azure_storage_account_url}', 'cont', 'test_simple_15.csv', '{azure_account_name}', '[HIDDEN]', 'CSV', 'none', 'auto')", f"CREATE TABLE tablefunc49 (x int) AS azureBlobStorageCluster('test_shard_localhost', named_collection_2, connection_string = '{azure_conn_string}', container = 'cont', blob_path = 'test_simple_16.csv', format = 'CSV')", f"CREATE TABLE tablefunc50 (`x` int) AS azureBlobStorageCluster('test_shard_localhost', named_collection_2, storage_account_url = '{azure_storage_account_url}', container = 'cont', blob_path = 'test_simple_17.csv', account_name = '{azure_account_name}', account_key = '[HIDDEN]')", + "CREATE TABLE tablefunc51 (`x` int) AS iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')", ], must_not_contain=[password], ) From c8e185296918b3bccad35a567139d0cff7f42f22 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 15 Aug 2024 12:40:27 +0000 Subject: [PATCH 2261/2361] Revert "Follow-up to #63898, pt. II" This reverts commit e2e9ae776d0e0795a565fa7ed6a29671fcda377e. --- .../0_stateless/00366_multi_statements.sh | 20 +++--- .../00443_preferred_block_size_bytes.sh | 4 +- ...ess_to_temporary_table_in_readonly_mode.sh | 20 +++--- ...4_performance_introspection_and_logging.sh | 4 +- ..._fetch_merged_or_mutated_part_zookeeper.sh | 6 +- ...d_optimize_skip_select_on_unused_shards.sh | 30 ++++----- ...p_select_on_unused_shards_with_prewhere.sh | 26 ++++---- ...ated_minimalistic_part_header_zookeeper.sh | 4 +- .../queries/0_stateless/00837_minmax_index.sh | 2 +- .../queries/0_stateless/00838_unique_index.sh | 2 +- .../0_stateless/00907_set_index_max_rows.sh | 2 +- .../0_stateless/00908_bloom_filter_index.sh | 10 +-- .../queries/0_stateless/00942_mutate_index.sh | 2 +- .../0_stateless/00943_materialize_index.sh | 4 +- .../00944_clear_index_in_partition.sh | 2 +- .../00964_bloom_index_string_functions.sh | 2 +- .../00965_set_index_string_functions.sh | 2 +- .../00974_primary_key_for_lowCardinality.sh | 4 +- tests/queries/0_stateless/00990_hasToken.sh | 2 +- .../01013_sync_replica_timeout_zookeeper.sh | 6 +- ...th_nondeterministic_functions_zookeeper.sh | 4 +- .../01037_polygon_dicts_correctness_all.sh | 6 +- .../01037_polygon_dicts_correctness_fast.sh | 6 +- .../01037_polygon_dicts_simple_functions.sh | 6 +- .../01055_minmax_index_compact_parts.sh | 2 +- .../01077_mutations_index_consistency.sh | 10 +-- .../01187_set_profile_as_setting.sh | 8 +-- ..._block_size_rows_for_materialized_views.sh | 2 +- .../01307_multiple_leaders_zookeeper.sh | 10 +-- .../0_stateless/01415_sticking_mutations.sh | 2 +- tests/queries/0_stateless/01451_dist_logs.sh | 2 +- .../01459_manual_write_to_replicas.sh | 4 +- .../01459_manual_write_to_replicas_quorum.sh | 4 +- ..._write_to_replicas_quorum_detach_attach.sh | 4 +- ...house_server_start_with_embedded_config.sh | 2 +- .../0_stateless/01508_format_regexp_raw.sh | 4 +- .../01509_dictionary_preallocate.sh | 2 +- ...01510_format_regexp_raw_low_cardinality.sh | 4 +- .../0_stateless/01526_initial_query_id.sh | 2 +- .../01599_mutation_query_params.sh | 2 +- .../01600_quota_by_forwarded_ip.sh | 4 +- .../01684_ssd_cache_dictionary_simple_key.sh | 2 +- .../01685_ssd_cache_dictionary_complex_key.sh | 2 +- .../01691_parser_data_type_exponential.sh | 2 +- ...ojections_optimize_aggregation_in_order.sh | 2 +- ...s_partial_optimize_aggregation_in_order.sh | 2 +- .../01753_optimize_aggregation_in_order.sh | 2 +- .../01758_optimize_skip_unused_shards_once.sh | 2 +- ...91_dist_INSERT_block_structure_mismatch.sh | 2 +- .../01814_distributed_push_down_limit.sh | 6 +- .../01853_dictionary_cache_duplicates.sh | 8 +-- .../01872_initial_query_start_time.sh | 2 +- ...75_ssd_cache_dictionary_decimal256_type.sh | 2 +- .../01890_materialized_distributed_join.sh | 2 +- .../01903_ssd_cache_dictionary_array_type.sh | 2 +- ..._cache_dictionary_default_nullable_type.sh | 2 +- ...1927_query_views_log_matview_exceptions.sh | 4 +- .../0_stateless/01947_multiple_pipe_read.sh | 4 +- .../02003_memory_limit_in_client.sh | 32 ++++----- .../02021_create_database_with_comment.sh | 2 +- .../02050_client_profile_events.sh | 8 +-- .../02221_parallel_replicas_bug.sh | 2 +- .../02221_system_zookeeper_unrestricted.sh | 4 +- ...2221_system_zookeeper_unrestricted_like.sh | 4 +- ...parallel_distributed_insert_select_view.sh | 6 +- ...arallel_reading_from_replicas_benchmark.sh | 4 +- .../02232_allow_only_replicated_engine.sh | 6 +- .../0_stateless/02250_ON_CLUSTER_grant.sh | 4 +- tests/queries/0_stateless/02262_column_ttl.sh | 4 +- .../0_stateless/02286_parallel_final.sh | 4 +- ..._distinct_in_order_optimization_explain.sh | 66 +++++++++---------- ..._column_ttl_expired_column_optimization.sh | 2 +- .../0_stateless/02361_fsync_profile_events.sh | 4 +- ...7_extend_protocol_with_query_parameters.sh | 10 +-- ...ting_by_input_stream_properties_explain.sh | 2 +- .../0_stateless/02417_load_marks_async.sh | 2 +- 76 files changed, 229 insertions(+), 229 deletions(-) diff --git a/tests/queries/0_stateless/00366_multi_statements.sh b/tests/queries/0_stateless/00366_multi_statements.sh index 8546e547581..0b2e80fe457 100755 --- a/tests/queries/0_stateless/00366_multi_statements.sh +++ b/tests/queries/0_stateless/00366_multi_statements.sh @@ -14,22 +14,22 @@ $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" -$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" -$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT -n --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2" +$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2;" +$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;" +$CLICKHOUSE_CLIENT -n --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;" $CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" $CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);" -$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" -$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" +$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES (1),(2),(3);" +$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366" +$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" +$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "SELECT 1" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "SELECT 1;" @@ -48,4 +48,4 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=INSERT+INTO+t_00366+VALUES" -d "(7),(8),(9)" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT --query="DROP TABLE t_00366;" +$CLICKHOUSE_CLIENT -n --query="DROP TABLE t_00366;" diff --git a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh index 0635fbc2a57..27b9f5c00c7 100755 --- a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh +++ b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh @@ -43,10 +43,10 @@ popd > /dev/null #SCRIPTDIR=`dirname "$SCRIPTPATH"` SCRIPTDIR=$SCRIPTPATH -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED rm "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout diff --git a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh index 5550fa69d3d..560b97a1d1b 100755 --- a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh +++ b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS test_readonly; CREATE TABLE test_readonly ( ID Int @@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT --query=" ################ # Try to create temporary table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 1; CREATE TEMPORARY TABLE readonly ( ID Int @@ -26,7 +26,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 1; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -34,7 +34,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 1; DROP TABLE test_readonly; " 2> /dev/null; @@ -46,7 +46,7 @@ CODE=$?; ################ # Try to create temporary table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 2; CREATE TEMPORARY TABLE readonly ( ID Int @@ -58,7 +58,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 2; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -66,7 +66,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 2; DROP TABLE test_readonly; " 2> /dev/null; @@ -78,7 +78,7 @@ CODE=$?; ################ # Try to create temporary table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 0; CREATE TEMPORARY TABLE readonly ( ID Int @@ -90,7 +90,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 0; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -98,7 +98,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET readonly = 0; DROP TABLE test_readonly; " 2> /dev/null; diff --git a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index e9a4369a5bf..93fd0c4a977 100755 --- a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh +++ b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -19,13 +19,13 @@ settings="$server_logs --log_queries=1 --log_query_threads=1 --log_profile_event # Test insert logging on each block and checkPacket() method -$CLICKHOUSE_CLIENT $settings -q " +$CLICKHOUSE_CLIENT $settings -n -q " DROP TABLE IF EXISTS null_00634; CREATE TABLE null_00634 (i UInt8) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple();" head -c 1000 /dev/zero | $CLICKHOUSE_CLIENT $settings --max_insert_block_size=10 --min_insert_block_size_rows=1 --min_insert_block_size_bytes=1 -q "INSERT INTO null_00634 FORMAT RowBinary" -$CLICKHOUSE_CLIENT $settings -q " +$CLICKHOUSE_CLIENT $settings -n -q " SELECT count() FROM null_00634; DROP TABLE null_00634;" diff --git a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh index d69e14bdbb9..96d5764780f 100755 --- a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh +++ b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/mergetree_mutations.lib -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" DROP TABLE IF EXISTS fetches_r1 SYNC; DROP TABLE IF EXISTS fetches_r2 SYNC" @@ -17,7 +17,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE fetches_r2(x UInt32) ENGINE Replicate SETTINGS prefer_fetch_merged_part_time_threshold=0, \ prefer_fetch_merged_part_size_threshold=0" -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET insert_keeper_fault_injection_probability=0; INSERT INTO fetches_r1 VALUES (1); INSERT INTO fetches_r1 VALUES (2); @@ -51,6 +51,6 @@ ${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA fetches_r2" ${CLICKHOUSE_CLIENT} --query="SELECT '*** Check data after fetch/clone of mutated part ***'" ${CLICKHOUSE_CLIENT} --query="SELECT _part, * FROM fetches_r2 ORDER BY x" -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" DROP TABLE fetches_r1 SYNC; DROP TABLE fetches_r2 SYNC" diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh index 989096a26d6..09f20284402 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh @@ -25,83 +25,83 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed WHERE a = 0 AND b | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Should pass now -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0; " # Should still fail because of matching unavailable shard -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Try more complext expressions for constant folding - all should pass. -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 1 AND a = 0 AND b = 0; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a IN (0, 1) AND b IN (0, 1); " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR a = 1 AND b = 1; " # TODO: should pass one day. -#${CLICKHOUSE_CLIENT} --query=" +#${CLICKHOUSE_CLIENT} -n --query=" # SET optimize_skip_unused_shards = 1; # SELECT count(*) FROM distributed WHERE a = 0 AND b >= 0 AND b <= 1; #" -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND c = 0; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND c != 10; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND (a+b)*b != 12; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE (a = 0 OR a = 1) AND (b = 0 OR b = 1); " # These ones should fail. -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b <= 1; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND c = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 OR a = 1 AND b = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR c = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh index b3dff2ea69a..035907bddd7 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh @@ -30,73 +30,73 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed_00754 PREWHERE a | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Should pass now -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0; " # Should still fail because of matching unavailable shard -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Try more complex expressions for constant folding - all should pass. -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 1 AND a = 0 WHERE b = 0; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 1 WHERE b = 1 AND length(c) = 5; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a IN (0, 1) AND b IN (0, 1) WHERE c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a IN (0, 1) WHERE b IN (0, 1) AND c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR a = 1 AND b = 1 WHERE c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE (a = 0 OR a = 1) WHERE (b = 0 OR b = 1); " # These should fail. -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b <= 1; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 WHERE c LIKE '%l%'; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 OR a = 1 AND b = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} --query=" +${CLICKHOUSE_CLIENT} -n --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR c LIKE '%l%'; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh index 8f7a1a9ae98..12d889a7137 100755 --- a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh +++ b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS part_header_r1; DROP TABLE IF EXISTS part_header_r2; @@ -62,7 +62,7 @@ do [[ $count1 == 1 && $count2 == 1 ]] && break done -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " SELECT '*** Test part removal ***'; SELECT '*** replica 1 ***'; diff --git a/tests/queries/0_stateless/00837_minmax_index.sh b/tests/queries/0_stateless/00837_minmax_index.sh index ff487f50ee0..e4de0b9ebfc 100755 --- a/tests/queries/0_stateless/00837_minmax_index.sh +++ b/tests/queries/0_stateless/00837_minmax_index.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00838_unique_index.sh b/tests/queries/0_stateless/00838_unique_index.sh index a3aba4f26b6..b267b6a8eb3 100755 --- a/tests/queries/0_stateless/00838_unique_index.sh +++ b/tests/queries/0_stateless/00838_unique_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE set_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00907_set_index_max_rows.sh b/tests/queries/0_stateless/00907_set_index_max_rows.sh index bdd0f36346f..3707aaf2ca6 100755 --- a/tests/queries/0_stateless/00907_set_index_max_rows.sh +++ b/tests/queries/0_stateless/00907_set_index_max_rows.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE set_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh index 3bd169dd6df..25a6567b894 100755 --- a/tests/queries/0_stateless/00908_bloom_filter_index.sh +++ b/tests/queries/0_stateless/00908_bloom_filter_index.sh @@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx3;" # NGRAM BF -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE bloom_filter_idx ( k UInt64, @@ -22,7 +22,7 @@ CREATE TABLE bloom_filter_idx ORDER BY k SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi';" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE bloom_filter_idx2 ( k UInt64, @@ -109,7 +109,7 @@ $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT count() FROM bloom # TOKEN BF -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE bloom_filter_idx3 ( k UInt64, @@ -147,7 +147,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE bloom_filter_idx2" $CLICKHOUSE_CLIENT --query="DROP TABLE bloom_filter_idx3" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx_na;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE bloom_filter_idx_na ( na Array(Array(String)), @@ -156,7 +156,7 @@ CREATE TABLE bloom_filter_idx_na ORDER BY na" 2>&1 | grep -c 'DB::Exception: Unexpected type Array(Array(String)) of bloom filter index' # NGRAM BF with IPv6 -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE bloom_filter_ipv6_idx ( foo IPv6, diff --git a/tests/queries/0_stateless/00942_mutate_index.sh b/tests/queries/0_stateless/00942_mutate_index.sh index e1e23639e85..6ebb30c25b9 100755 --- a/tests/queries/0_stateless/00942_mutate_index.sh +++ b/tests/queries/0_stateless/00942_mutate_index.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00943_materialize_index.sh b/tests/queries/0_stateless/00943_materialize_index.sh index e4a585fce97..6ff7d34a9d7 100755 --- a/tests/queries/0_stateless/00943_materialize_index.sh +++ b/tests/queries/0_stateless/00943_materialize_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE minmax_idx ( u64 UInt64, @@ -34,7 +34,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO minmax_idx VALUES $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0 FORMAT JSON" | grep "rows_read" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" ALTER TABLE minmax_idx ADD INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1 SETTINGS mutations_sync = 2;" $CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx IN PARTITION 1 SETTINGS mutations_sync = 2;" diff --git a/tests/queries/0_stateless/00944_clear_index_in_partition.sh b/tests/queries/0_stateless/00944_clear_index_in_partition.sh index a12536da239..4655077960f 100755 --- a/tests/queries/0_stateless/00944_clear_index_in_partition.sh +++ b/tests/queries/0_stateless/00944_clear_index_in_partition.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh index 9e410f09b13..e2ec7fd42e4 100755 --- a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh +++ b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx;" # NGRAM BF -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE bloom_filter_idx ( k UInt64, diff --git a/tests/queries/0_stateless/00965_set_index_string_functions.sh b/tests/queries/0_stateless/00965_set_index_string_functions.sh index 0f29c3dd2f2..8892fb11752 100755 --- a/tests/queries/0_stateless/00965_set_index_string_functions.sh +++ b/tests/queries/0_stateless/00965_set_index_string_functions.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE set_idx ( k UInt64, diff --git a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh index ba260042f47..389d433c7e2 100755 --- a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh +++ b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS lowString;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS string;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" create table lowString ( a LowCardinality(String), @@ -18,7 +18,7 @@ ENGINE = MergeTree() PARTITION BY toYYYYMM(b) ORDER BY (a)" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" create table string ( a String, diff --git a/tests/queries/0_stateless/00990_hasToken.sh b/tests/queries/0_stateless/00990_hasToken.sh index d79472aa5a5..6a1d4ff5ccf 100755 --- a/tests/queries/0_stateless/00990_hasToken.sh +++ b/tests/queries/0_stateless/00990_hasToken.sh @@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # We should have correct env vars from shell_config.sh to run this test -python3 "$CURDIR"/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -m +python3 "$CURDIR"/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -nm diff --git a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh index 54f1bbe29dc..55bbfb3ff11 100755 --- a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh +++ b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) R1=table_1013_1 R2=table_1013_2 -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -n -q " DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; @@ -19,13 +19,13 @@ ${CLICKHOUSE_CLIENT} -q " INSERT INTO $R1 VALUES (1) " -timeout 10s ${CLICKHOUSE_CLIENT} -q " +timeout 10s ${CLICKHOUSE_CLIENT} -n -q " SET receive_timeout=1; SYSTEM SYNC REPLICA $R2 " 2>&1 | grep -F -q "Code: 159. DB::Exception" && echo 'OK' || echo 'Failed!' # By dropping tables all related SYNC REPLICA queries would be terminated as well -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -n -q " DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; " diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index 053fd9d9d49..4f35b69da0b 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -9,7 +9,7 @@ R1=table_1017_1 R2=table_1017_2 T1=table_1017_merge -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -n -q " DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; @@ -68,7 +68,7 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" --allow_nondeterministic_mutations=1 2>&1 \ && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -n -q " DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh index 5c67fe08fbf..9a26f78a8ee 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh @@ -14,7 +14,7 @@ declare -a SearchTypes=("POLYGON" "POLYGON_SIMPLE" "POLYGON_INDEX_EACH" "POLYGON tar -xf "${CURDIR}"/01037_test_data_search.tar.gz -C "${DATA_DIR}" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS points; CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " @@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --max_insert_block_si rm "${DATA_DIR}"/01037_point_data -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array @@ -43,7 +43,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT --query=" + $CLICKHOUSE_CLIENT -n --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh index 591978d1129..47f7a5c1c4f 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh @@ -14,7 +14,7 @@ declare -a SearchTypes=("POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") tar -xf "${CURDIR}"/01037_test_data_perf.tar.gz -C "${DATA_DIR}" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " @@ -22,7 +22,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --min_chunk_bytes_for rm "${DATA_DIR}"/01037_point_data -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array @@ -42,7 +42,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT --query=" + $CLICKHOUSE_CLIENT -n --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array diff --git a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh index ac033ff4eb8..d1ee3f283bc 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) TMP_DIR=${CLICKHOUSE_TMP}/tmp mkdir -p $TMP_DIR -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array (key Array(Array(Array(Array(Float64)))), name String, value UInt64) ENGINE = Memory; @@ -53,7 +53,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT --query=" + $CLICKHOUSE_CLIENT -n --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array ( @@ -106,7 +106,7 @@ do diff -q "${CURDIR}/01037_polygon_dicts_simple_functions.ans" "$outputFile" done -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP DICTIONARY dict_array; DROP DICTIONARY dict_tuple; DROP TABLE polygons_array; diff --git a/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh index 29ce4da02ed..0b14ef8f6fa 100755 --- a/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh +++ b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/01077_mutations_index_consistency.sh b/tests/queries/0_stateless/01077_mutations_index_consistency.sh index f103692de56..ffbe3692b64 100755 --- a/tests/queries/0_stateless/01077_mutations_index_consistency.sh +++ b/tests/queries/0_stateless/01077_mutations_index_consistency.sh @@ -7,13 +7,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS movement" -$CLICKHOUSE_CLIENT --query "CREATE TABLE movement (date DateTime('Asia/Istanbul')) Engine = MergeTree ORDER BY (toStartOfHour(date)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';" +$CLICKHOUSE_CLIENT -n --query "CREATE TABLE movement (date DateTime('Asia/Istanbul')) Engine = MergeTree ORDER BY (toStartOfHour(date)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';" $CLICKHOUSE_CLIENT --query "insert into movement select toDateTime('2020-01-22 00:00:00', 'Asia/Istanbul') + number%(23*3600) from numbers(1000000);" $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE movement FINAL" -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " SELECT count(), toStartOfHour(date) AS Hour @@ -26,7 +26,7 @@ ORDER BY Hour DESC $CLICKHOUSE_CLIENT --query "alter table movement delete where date >= toDateTime('2020-01-22T16:00:00', 'Asia/Istanbul') and date < toDateTime('2020-01-22T17:00:00', 'Asia/Istanbul') SETTINGS mutations_sync = 2" -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " SELECT count(), toStartOfHour(date) AS Hour @@ -37,7 +37,7 @@ ORDER BY Hour DESC " | grep "16:00:00" | wc -l -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " SELECT count(), toStartOfHour(date) AS Hour @@ -48,7 +48,7 @@ ORDER BY Hour DESC " | grep "22:00:00" | cut -f1 -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " SELECT count(), toStartOfHour(date) AS Hour diff --git a/tests/queries/0_stateless/01187_set_profile_as_setting.sh b/tests/queries/0_stateless/01187_set_profile_as_setting.sh index f6c6fd0be34..42f596c45d6 100755 --- a/tests/queries/0_stateless/01187_set_profile_as_setting.sh +++ b/tests/queries/0_stateless/01187_set_profile_as_setting.sh @@ -7,11 +7,11 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q "select value, changed from system.settings where name='readonly';" -$CLICKHOUSE_CLIENT -m -q "set profile='default'; select value, changed from system.settings where name='readonly';" -$CLICKHOUSE_CLIENT -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" 2>&1| grep -Fa "Cannot modify 'send_logs_level' setting in readonly mode" > /dev/null && echo "OK" +$CLICKHOUSE_CLIENT -n -m -q "select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -n -m -q "set profile='default'; select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -n -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" 2>&1| grep -Fa "Cannot modify 'send_logs_level' setting in readonly mode" > /dev/null && echo "OK" CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=fatal/g') -$CLICKHOUSE_CLIENT -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -n -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=select+value,changed+from+system.settings+where+name='readonly'" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&profile=default&query=select+value,changed+from+system.settings+where+name='readonly'" diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index 1ec53399958..5f82731c54e 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -10,7 +10,7 @@ set -o pipefail # shellcheck disable=SC2120 function execute() { - ${CLICKHOUSE_CLIENT} "$@" + ${CLICKHOUSE_CLIENT} -n "$@" } # diff --git a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh index 02aa0f76be5..db986e74b6b 100755 --- a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh +++ b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh @@ -12,8 +12,8 @@ DATA_SIZE=200 SEQ=$(seq 0 $(($NUM_REPLICAS - 1))) -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE IF EXISTS r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done function thread() { @@ -30,6 +30,6 @@ done wait -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "SELECT count(), sum(x) FROM r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "DROP TABLE r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "SYSTEM SYNC REPLICA r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "SELECT count(), sum(x) FROM r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE r$REPLICA"; done diff --git a/tests/queries/0_stateless/01415_sticking_mutations.sh b/tests/queries/0_stateless/01415_sticking_mutations.sh index 97467c3ce9d..b7c8768a65d 100755 --- a/tests/queries/0_stateless/01415_sticking_mutations.sh +++ b/tests/queries/0_stateless/01415_sticking_mutations.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS sticking_mutations" function check_sticky_mutations() { - $CLICKHOUSE_CLIENT --query "CREATE TABLE sticking_mutations ( + $CLICKHOUSE_CLIENT -n --query "CREATE TABLE sticking_mutations ( date Date, key UInt64, value1 String, diff --git a/tests/queries/0_stateless/01451_dist_logs.sh b/tests/queries/0_stateless/01451_dist_logs.sh index e281e232bb5..23dee7a827d 100755 --- a/tests/queries/0_stateless/01451_dist_logs.sh +++ b/tests/queries/0_stateless/01451_dist_logs.sh @@ -10,4 +10,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # triggered not for the first query for _ in {1..20}; do echo "select * from remote('127.{2,3}', system.numbers) where number = 10 limit 1;" -done | ${CLICKHOUSE_CLIENT} 2>/dev/null +done | ${CLICKHOUSE_CLIENT} -n 2>/dev/null diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh index cc574557438..56620d848a3 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -31,7 +31,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh index 24ea3ba3835..91a73471557 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh @@ -16,7 +16,7 @@ unset CLICKHOUSE_WRITE_COVERAGE NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -39,7 +39,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh index a2ef0d52328..1f76a2efc6b 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh @@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=6 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -36,7 +36,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh index 6954fef7314..29593ea4fb5 100755 --- a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh +++ b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh @@ -34,7 +34,7 @@ done # Check access rights -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " DROP DATABASE IF EXISTS test; CREATE DATABASE test; USE test; diff --git a/tests/queries/0_stateless/01508_format_regexp_raw.sh b/tests/queries/0_stateless/01508_format_regexp_raw.sh index 52613c28b2f..8cf1bd73566 100755 --- a/tests/queries/0_stateless/01508_format_regexp_raw.sh +++ b/tests/queries/0_stateless/01508_format_regexp_raw.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --query " +${CLICKHOUSE_CLIENT} -n --query " DROP TABLE IF EXISTS t; CREATE TABLE t (a String, b String) ENGINE = Memory; " @@ -12,7 +12,7 @@ CREATE TABLE t (a String, b String) ENGINE = Memory; ${CLICKHOUSE_CLIENT} --format_regexp_escaping_rule 'Raw' --format_regexp '^(.+?) separator (.+?)$' --query ' INSERT INTO t FORMAT Regexp abc\ separator Hello, world!' -${CLICKHOUSE_CLIENT} --query " +${CLICKHOUSE_CLIENT} -n --query " SELECT * FROM t; DROP TABLE t; " diff --git a/tests/queries/0_stateless/01509_dictionary_preallocate.sh b/tests/queries/0_stateless/01509_dictionary_preallocate.sh index 0459f69b0ad..2a22a307a08 100755 --- a/tests/queries/0_stateless/01509_dictionary_preallocate.sh +++ b/tests/queries/0_stateless/01509_dictionary_preallocate.sh @@ -15,7 +15,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # PREALLOCATE attribute (and also for the history/greppability, that it was # such). -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS data_01509; DROP DICTIONARY IF EXISTS dict_01509; CREATE TABLE data_01509 diff --git a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh index dc178d081bf..594caca7d04 100755 --- a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh +++ b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --query " +${CLICKHOUSE_CLIENT} -n --query " DROP TABLE IF EXISTS t; CREATE TABLE t (a String, b LowCardinality(Nullable(String))) ENGINE = Memory; " @@ -12,7 +12,7 @@ CREATE TABLE t (a String, b LowCardinality(Nullable(String))) ENGINE = Memory; ${CLICKHOUSE_CLIENT} --format_regexp_escaping_rule 'Raw' --format_regexp '^(.+?) separator (.+?)$' --query ' INSERT INTO t FORMAT Regexp abc\ separator Hello, world!' -${CLICKHOUSE_CLIENT} --query " +${CLICKHOUSE_CLIENT} -n --query " SELECT * FROM t; DROP TABLE t; " diff --git a/tests/queries/0_stateless/01526_initial_query_id.sh b/tests/queries/0_stateless/01526_initial_query_id.sh index 8ba27a04d60..e77764ee34e 100755 --- a/tests/queries/0_stateless/01526_initial_query_id.sh +++ b/tests/queries/0_stateless/01526_initial_query_id.sh @@ -15,7 +15,7 @@ ${CLICKHOUSE_CURL} \ --get \ --data-urlencode "query=select 1 format Null" -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -n -q " system flush logs; select interface, initial_query_id = query_id from system.query_log diff --git a/tests/queries/0_stateless/01599_mutation_query_params.sh b/tests/queries/0_stateless/01599_mutation_query_params.sh index 5b604c96028..52b0131a9c2 100755 --- a/tests/queries/0_stateless/01599_mutation_query_params.sh +++ b/tests/queries/0_stateless/01599_mutation_query_params.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS test; CREATE TABLE test diff --git a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh index 834eba8f25c..1d768c8b027 100755 --- a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh +++ b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " CREATE USER quoted_by_ip_${CLICKHOUSE_DATABASE}; CREATE USER quoted_by_forwarded_ip_${CLICKHOUSE_DATABASE}; @@ -57,7 +57,7 @@ ${CLICKHOUSE_CURL} -H 'X-Forwarded-For: 5.6.7.8, 1.2.3.4' -sS "${CLICKHOUSE_URL} ${CLICKHOUSE_CURL} -H 'X-Forwarded-For: 1.2.3.4, 5.6.7.8' -sS "${CLICKHOUSE_URL}&user=quoted_by_forwarded_ip_${CLICKHOUSE_DATABASE}" -d "SELECT count() FROM numbers(10)" -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT -n --query " DROP QUOTA IF EXISTS quota_by_ip_${CLICKHOUSE_DATABASE}; DROP QUOTA IF EXISTS quota_by_forwarded_ip; diff --git a/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh b/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh index 6a7eb975c87..0e5c2862066 100755 --- a/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh +++ b/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP DATABASE IF EXISTS 01684_database_for_cache_dictionary; CREATE DATABASE 01684_database_for_cache_dictionary; diff --git a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh index c2d222a86ea..55061b9a643 100755 --- a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh +++ b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" CREATE TABLE complex_key_simple_attributes_source_table ( id UInt64, diff --git a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh index 5d115e09a79..f8004f9350d 100755 --- a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh +++ b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # Check that DataType parser does not have exponential complexity in the case found by fuzzer. -for _ in {1..10}; do ${CLICKHOUSE_CLIENT} --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done +for _ in {1..10}; do ${CLICKHOUSE_CLIENT} -n --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done diff --git a/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh index f38e53f898a..a166837e01a 100755 --- a/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS in_order_agg_01710; CREATE TABLE in_order_agg_01710 diff --git a/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh index 01537524730..ee73974e8a4 100755 --- a/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS in_order_agg_partial_01710; CREATE TABLE in_order_agg_partial_01710 diff --git a/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh index f9681ebe4f5..2a7345f4865 100755 --- a/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --optimize_aggregation_in_order=1 -m -q " +$CLICKHOUSE_CLIENT --optimize_aggregation_in_order=1 -nm -q " drop table if exists data_01753; create table data_01753 (key Int) engine=MergeTree() order by key as select * from numbers(8); select * from data_01753 group by key settings max_block_size=1; diff --git a/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh b/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh index 3c9e12f780b..b963f3a618f 100755 --- a/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh +++ b/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -m -q " +$CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -nm -q " create table dist_01758 as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy); select * from dist_01758 where dummy = 0 format Null; " |& grep -o "StorageDistributed (dist_01758).*" diff --git a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh index ee46f8194b9..9c51b82282c 100755 --- a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh +++ b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -m -q " +$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -nm -q " DROP TABLE IF EXISTS tmp_01683; DROP TABLE IF EXISTS dist_01683; diff --git a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh index f3e8ceffff6..4b75102e9cf 100755 --- a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh +++ b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh @@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function setup() { - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " drop table if exists data_01814; drop table if exists dist_01814; @@ -24,7 +24,7 @@ function setup() function cleanup() { - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " drop table data_01814; drop table dist_01814; " @@ -67,7 +67,7 @@ function test_distributed_push_down_limit_with_query_log() $CLICKHOUSE_CLIENT "${settings_and_opts[@]}" -q "select * from $table group by key limit $offset, 10" - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " system flush logs; select read_rows from system.query_log where diff --git a/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh b/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh index 2b122debff5..218320772c9 100755 --- a/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh +++ b/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function run_test_once() { - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " DROP TABLE IF EXISTS simple_key_source_table_01863; CREATE TABLE simple_key_source_table_01863 ( @@ -29,13 +29,13 @@ function run_test_once() LIFETIME(MIN 0 MAX 1000); " - prev=$($CLICKHOUSE_CLIENT -m -q "SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1") - curr=$($CLICKHOUSE_CLIENT -m -q " + prev=$($CLICKHOUSE_CLIENT -nm -q "SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1") + curr=$($CLICKHOUSE_CLIENT -nm -q " SELECT toUInt64(1) as key, dictGet('simple_key_cache_dictionary_01863', 'value', key) FORMAT Null; SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1 ") - $CLICKHOUSE_CLIENT -m -q " + $CLICKHOUSE_CLIENT -nm -q " DROP DICTIONARY simple_key_cache_dictionary_01863; DROP TABLE simple_key_source_table_01863; " diff --git a/tests/queries/0_stateless/01872_initial_query_start_time.sh b/tests/queries/0_stateless/01872_initial_query_start_time.sh index ff3e1954c75..6a935602ea4 100755 --- a/tests/queries/0_stateless/01872_initial_query_start_time.sh +++ b/tests/queries/0_stateless/01872_initial_query_start_time.sh @@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} -q "create table m (dummy UInt8) ENGINE = Distributed('test query_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") ${CLICKHOUSE_CLIENT} -q "select * from m format Null" "--query_id=$query_id" -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -n -q " system flush logs; select anyIf(initial_query_start_time, is_initial_query) = anyIf(initial_query_start_time, not is_initial_query), diff --git a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh index 1294ba53e82..8336229a643 100755 --- a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh +++ b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" SET allow_experimental_bigint_types = 1; DROP TABLE IF EXISTS dictionary_decimal_source_table; diff --git a/tests/queries/0_stateless/01890_materialized_distributed_join.sh b/tests/queries/0_stateless/01890_materialized_distributed_join.sh index 5c04ee8b214..88f7dcf9a69 100755 --- a/tests/queries/0_stateless/01890_materialized_distributed_join.sh +++ b/tests/queries/0_stateless/01890_materialized_distributed_join.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists test_distributed; drop table if exists test_source; drop table if exists test_shard; diff --git a/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh b/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh index a44106414ea..853445daf3f 100755 --- a/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh +++ b/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS dictionary_array_source_table; CREATE TABLE dictionary_array_source_table ( diff --git a/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh b/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh index a5c65ca87a7..0b555cf82c2 100755 --- a/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh +++ b/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query=" +$CLICKHOUSE_CLIENT -n --query=" DROP TABLE IF EXISTS dictionary_nullable_source_table; CREATE TABLE dictionary_nullable_source_table ( diff --git a/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh b/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh index 608107c76d6..47d5e733480 100755 --- a/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh +++ b/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function cleanup() { - ${CLICKHOUSE_CLIENT} -q " + ${CLICKHOUSE_CLIENT} -n -q " DROP TABLE IF EXISTS matview_exception_a_to_c; DROP TABLE IF EXISTS matview_exception_a_to_b; DROP TABLE IF EXISTS table_exception_c; @@ -17,7 +17,7 @@ function cleanup() function setup() { - ${CLICKHOUSE_CLIENT} -q " + ${CLICKHOUSE_CLIENT} -n -q " CREATE TABLE table_exception_a (a String, b Int64) ENGINE = MergeTree ORDER BY b; CREATE TABLE table_exception_b (a Float64, b Int64) ENGINE = MergeTree ORDER BY tuple(); CREATE TABLE table_exception_c (a Float64) ENGINE = MergeTree ORDER BY a; diff --git a/tests/queries/0_stateless/01947_multiple_pipe_read.sh b/tests/queries/0_stateless/01947_multiple_pipe_read.sh index 51709eb574e..06a18a55e6e 100755 --- a/tests/queries/0_stateless/01947_multiple_pipe_read.sh +++ b/tests/queries/0_stateless/01947_multiple_pipe_read.sh @@ -11,7 +11,7 @@ cat "$SAMPLE_FILE" echo '******************' echo 'Read twice from a regular file' -${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table; select * from table;' --file "$SAMPLE_FILE" +${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -n -q 'select * from table; select * from table;' --file "$SAMPLE_FILE" echo '---' ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table WHERE x IN (select x from table);' --file "$SAMPLE_FILE" echo '---' @@ -19,7 +19,7 @@ ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table UNI echo '******************' echo 'Read twice from file descriptor that corresponds to a regular file' -${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table; select * from table;' < "$SAMPLE_FILE" +${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -n -q 'select * from table; select * from table;' < "$SAMPLE_FILE" echo '---' ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table WHERE x IN (select x from table);' < "$SAMPLE_FILE" echo '---' diff --git a/tests/queries/0_stateless/02003_memory_limit_in_client.sh b/tests/queries/0_stateless/02003_memory_limit_in_client.sh index 15cacbff8c5..96028f4847a 100755 --- a/tests/queries/0_stateless/02003_memory_limit_in_client.sh +++ b/tests/queries/0_stateless/02003_memory_limit_in_client.sh @@ -4,21 +4,21 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" diff --git a/tests/queries/0_stateless/02021_create_database_with_comment.sh b/tests/queries/0_stateless/02021_create_database_with_comment.sh index d87b0794c91..f77397dc482 100755 --- a/tests/queries/0_stateless/02021_create_database_with_comment.sh +++ b/tests/queries/0_stateless/02021_create_database_with_comment.sh @@ -20,7 +20,7 @@ function test_db_comments() local ENGINE_NAME="$1" echo "engine : ${ENGINE_NAME}" - $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -m <& /dev/null +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' >& /dev/null echo $? echo 'regression test for overlap profile events snapshots between queries' -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'regression test for overlap profile events snapshots between queries (clickhouse-local)' -$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' +$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'print everything' profile_events="$( @@ -35,5 +35,5 @@ profile_events="$( test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)" echo 'check that ProfileEvents is new for each query' -sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') +sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') test "$sleep_function_calls" -eq 1 && echo OK || echo "FAIL ($sleep_function_calls)" diff --git a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh index a382b3859f3..3c44a2a7ba7 100755 --- a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh +++ b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh @@ -4,4 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 -m < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null +${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 -nm < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh index e23a272a4e8..db94c59d2de 100755 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table_2" -${CLICKHOUSE_CLIENT} -q" +${CLICKHOUSE_CLIENT} -n -q" CREATE TABLE sample_table ( key UInt64 ) @@ -16,7 +16,7 @@ ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_ ORDER BY tuple(); " -${CLICKHOUSE_CLIENT} -q" +${CLICKHOUSE_CLIENT} -n -q" CREATE TABLE sample_table_2 ( key UInt64 ) diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh index 6381d811d5d..c62ec14b340 100755 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table_2;" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE sample_table ( +${CLICKHOUSE_CLIENT} -n --query="CREATE TABLE sample_table ( key UInt64 ) ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_system_zookeeper_unrestricted_like', '1') @@ -16,7 +16,7 @@ ORDER BY tuple(); DROP TABLE IF EXISTS sample_table SYNC;" -${CLICKHOUSE_CLIENT} --query "CREATE TABLE sample_table_2 ( +${CLICKHOUSE_CLIENT} -n --query "CREATE TABLE sample_table_2 ( key UInt64 ) ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_system_zookeeper_unrestricted_like_2', '1') diff --git a/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh b/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh index 63111cc32e4..376a49fd820 100755 --- a/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh +++ b/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists dst_02225; drop table if exists src_02225; create table dst_02225 (key Int) engine=Memory(); @@ -14,7 +14,7 @@ create table src_02225 (key Int) engine=Memory(); insert into src_02225 values (1); " -$CLICKHOUSE_CLIENT --param_database=$CLICKHOUSE_DATABASE -m -q " +$CLICKHOUSE_CLIENT --param_database=$CLICKHOUSE_DATABASE -nm -q " truncate table dst_02225; insert into function remote('127.{1,2}', currentDatabase(), dst_02225, key) select * from remote('127.{1,2}', view(select * from {database:Identifier}.src_02225), key) @@ -29,7 +29,7 @@ settings parallel_distributed_insert_select=2, max_distributed_depth=1; select * from dst_02225; " -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table src_02225; drop table dst_02225; " diff --git a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh index 177b373641f..bc90f4b2c11 100755 --- a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh +++ b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists data_02226; create table data_02226 (key Int) engine=MergeTree() order by key as select * from numbers(1); @@ -24,7 +24,7 @@ opts=( $CLICKHOUSE_BENCHMARK --query "select * from remote('127.1', $CLICKHOUSE_DATABASE, data_02226)" "${opts[@]}" >& /dev/null ret=$? -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table data_02226; " diff --git a/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh b/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh index e47a3033681..d1a3825d286 100755 --- a/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh +++ b/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh @@ -12,9 +12,9 @@ ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO us ${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON Memory, TABLE ENGINE ON MergeTree, TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_memory (x UInt32) engine = Memory;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt (x UInt32) engine = ReplicatedMergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt (x UInt32) engine = ReplicatedMergeTree order by x;" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" ${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh b/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh index 09f9c0c8a98..66417e9694a 100755 --- a/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh +++ b/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function cleanup() { - $CLICKHOUSE_CLIENT -mq " + $CLICKHOUSE_CLIENT -nmq " DROP USER IF EXISTS with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; DROP USER IF EXISTS without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; DROP DATABASE IF EXISTS db_with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; @@ -15,7 +15,7 @@ function cleanup() cleanup trap cleanup EXIT -$CLICKHOUSE_CLIENT -mq " +$CLICKHOUSE_CLIENT -nmq " CREATE USER with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; CREATE USER without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; diff --git a/tests/queries/0_stateless/02262_column_ttl.sh b/tests/queries/0_stateless/02262_column_ttl.sh index c620d3b6d9c..b5e29c9b2a1 100755 --- a/tests/queries/0_stateless/02262_column_ttl.sh +++ b/tests/queries/0_stateless/02262_column_ttl.sh @@ -14,7 +14,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # note, that this should be written in .sh since we need $CLICKHOUSE_DATABASE # not 'default' to catch text_log -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists ttl_02262; drop table if exists this_text_log; @@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -m -q " ttl_02262_uuid=$($CLICKHOUSE_CLIENT -q "select uuid from system.tables where database = '$CLICKHOUSE_DATABASE' and name = 'ttl_02262'") -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " -- OPTIMIZE TABLE x FINAL will be done in background -- attach to it's log, via table UUID in query_id (see merger/mutator code). create materialized view this_text_log engine=Memory() as diff --git a/tests/queries/0_stateless/02286_parallel_final.sh b/tests/queries/0_stateless/02286_parallel_final.sh index 47dfad42e11..0ac510208f3 100755 --- a/tests/queries/0_stateless/02286_parallel_final.sh +++ b/tests/queries/0_stateless/02286_parallel_final.sh @@ -9,7 +9,7 @@ echo "Test intersecting ranges" test_random_values() { layers=$1 - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " drop table if exists tbl_8parts_${layers}granules_rnd; create table tbl_8parts_${layers}granules_rnd (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 % 8); insert into tbl_8parts_${layers}granules_rnd select number, 1 from numbers_mt($((layers * 8 * 8192))); @@ -29,7 +29,7 @@ echo "Test non intersecting ranges" test_sequential_values() { layers=$1 - $CLICKHOUSE_CLIENT -q " + $CLICKHOUSE_CLIENT -n -q " drop table if exists tbl_8parts_${layers}granules_seq; create table tbl_8parts_${layers}granules_seq (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 / $((layers * 8192)))::UInt64; insert into tbl_8parts_${layers}granules_seq select number, 1 from numbers_mt($((layers * 8 * 8192))); diff --git a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh index 953485c3a1f..bd7e6be3987 100755 --- a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh +++ b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh @@ -23,99 +23,99 @@ $CLICKHOUSE_CLIENT -q "insert into distinct_in_order_explain select number % num $CLICKHOUSE_CLIENT -q "select '-- disable optimize_distinct_in_order'" $CLICKHOUSE_CLIENT -q "select '-- distinct all primary key columns -> ordinary distinct'" -$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- enable optimize_distinct_in_order'" $CLICKHOUSE_CLIENT -q "select '-- distinct with all primary key columns -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column in distinct -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by the same columns -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a, b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a, b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by columns are prefix of distinct columns -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column in distinct but non-primary key prefix -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column _not_ in distinct -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix -> ordinary distinct'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by column in distinct -> final distinct optimization only'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by column _not_ in distinct -> ordinary distinct'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by a" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by a" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by _const_ column in distinct -> ordinary distinct'" -$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, 1 as x from distinct_in_order_explain order by x" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, 1 as x from distinct_in_order_explain order by x" | eval $FIND_DISTINCT echo "-- Check reading in order for distinct" echo "-- disabled, distinct columns match sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -q "$DISABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -nq "$DISABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "-- enabled, distinct columns match sorting key" # read_in_order_two_level_merge_threshold is set here to avoid repeating MergeTreeInOrder in output -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns DON't form prefix of sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "-- enabled, distinct columns contains constant columns, non-const columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns contains constant columns, non-const columns match prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, b, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, b, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, only part of distinct columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "=== disable new analyzer ===" DISABLE_ANALYZER="set enable_analyzer=0" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" -$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES echo "-- check that reading in order optimization for ORDER BY and DISTINCT applied correctly in the same query" ENABLE_READ_IN_ORDER="set optimize_read_in_order=1" echo "-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns" -$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization i.e. it contains columns from DISTINCT clause" -$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization, but direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (1), - it contains columns from ORDER BY clause" -$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (2), - direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that disabling other 'read in order' optimizations do not disable distinct in order optimization" -$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES echo "=== enable new analyzer ===" ENABLE_ANALYZER="set enable_analyzer=1" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" -$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES echo "-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns" -$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization i.e. it contains columns from DISTINCT clause" -$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization, but direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (1), - it contains columns from ORDER BY clause" -$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (2), - direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that disabling other 'read in order' optimizations do not disable distinct in order optimization" -$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES $CLICKHOUSE_CLIENT -q "drop table if exists distinct_in_order_explain sync" diff --git a/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh b/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh index 490f8361682..96f80d65878 100755 --- a/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh +++ b/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) data_path="$CLICKHOUSE_TMP/local" -$CLICKHOUSE_LOCAL --path "$data_path" -m -q " +$CLICKHOUSE_LOCAL --path "$data_path" -nm -q " create table ttl_02335 ( date Date, key Int, diff --git a/tests/queries/0_stateless/02361_fsync_profile_events.sh b/tests/queries/0_stateless/02361_fsync_profile_events.sh index 73bf3fa120a..98c9cf9b7b4 100755 --- a/tests/queries/0_stateless/02361_fsync_profile_events.sh +++ b/tests/queries/0_stateless/02361_fsync_profile_events.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -m -q " +$CLICKHOUSE_CLIENT -nm -q " drop table if exists data_fsync_pe; create table data_fsync_pe (key Int) engine=MergeTree() @@ -27,7 +27,7 @@ for i in {1..100}; do $CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data_fsync_pe values (1)" read -r FileSync FileOpen DirectorySync FileSyncElapsedMicroseconds DirectorySyncElapsedMicroseconds <<<"$( - $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " + $CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " system flush logs; select diff --git a/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh b/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh index 46396d38747..71e3b6961f8 100755 --- a/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh +++ b/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh @@ -24,7 +24,7 @@ $CLICKHOUSE_CLIENT \ table_name="t_02377_extend_protocol_with_query_parameters_$RANDOM$RANDOM" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -n -q " create table $table_name( id Int64, arr Array(UInt8), @@ -57,17 +57,17 @@ $CLICKHOUSE_CLIENT \ # it is possible to set parameter for the current session -$CLICKHOUSE_CLIENT -q "set param_n = 42; select {n: UInt8}" +$CLICKHOUSE_CLIENT -n -q "set param_n = 42; select {n: UInt8}" # and it will not be visible to other sessions -$CLICKHOUSE_CLIENT -q "select {n: UInt8} -- { serverError 456 }" +$CLICKHOUSE_CLIENT -n -q "select {n: UInt8} -- { serverError 456 }" # the same parameter could be set multiple times within one session (new value overrides the previous one) -$CLICKHOUSE_CLIENT -q "set param_n = 12; set param_n = 13; select {n: UInt8}" +$CLICKHOUSE_CLIENT -n -q "set param_n = 12; set param_n = 13; select {n: UInt8}" # multiple different parameters could be defined within each session -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT -n -q " set param_a = 13, param_b = 'str'; set param_c = '2022-08-04 18:30:53'; set param_d = '{\'10\': [11, 12], \'13\': [14, 15]}'; diff --git a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh index 974f10e2f24..4b9793da5bb 100755 --- a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh +++ b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh @@ -15,7 +15,7 @@ FIND_SORTMODE="$GREP_SORTMODE | $TRIM_LEADING_SPACES" function explain_sorting { echo "-- QUERY: "$1 - $CLICKHOUSE_CLIENT --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -q "$1" | eval $FIND_SORTING + $CLICKHOUSE_CLIENT --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTING } function explain_sortmode { diff --git a/tests/queries/0_stateless/02417_load_marks_async.sh b/tests/queries/0_stateless/02417_load_marks_async.sh index bcede9e4f5e..950656e7ab6 100755 --- a/tests/queries/0_stateless/02417_load_marks_async.sh +++ b/tests/queries/0_stateless/02417_load_marks_async.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS test;" -${CLICKHOUSE_CLIENT} -q " +${CLICKHOUSE_CLIENT} -n -q " CREATE TABLE test ( n0 UInt64, From 3af92fc65258f117dce6156a5c60d93267a65ab1 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 15 Aug 2024 12:40:52 +0000 Subject: [PATCH 2262/2361] Reduce number of changed tests --- .../01013_sync_replica_timeout_zookeeper.sh | 6 +++--- ...ations_with_nondeterministic_functions_zookeeper.sh | 4 ++-- .../0_stateless/01037_polygon_dicts_correctness_all.sh | 6 +++--- .../01037_polygon_dicts_correctness_fast.sh | 6 +++--- .../01037_polygon_dicts_simple_functions.sh | 6 +++--- .../0_stateless/01055_minmax_index_compact_parts.sh | 2 +- .../0_stateless/01077_mutations_index_consistency.sh | 10 +++++----- .../0_stateless/01187_set_profile_as_setting.sh | 8 ++++---- ...in_insert_block_size_rows_for_materialized_views.sh | 2 +- .../0_stateless/01307_multiple_leaders_zookeeper.sh | 10 +++++----- tests/queries/0_stateless/01415_sticking_mutations.sh | 2 +- tests/queries/0_stateless/01451_dist_logs.sh | 2 +- .../0_stateless/01459_manual_write_to_replicas.sh | 4 ++-- .../01459_manual_write_to_replicas_quorum.sh | 4 ++-- ...59_manual_write_to_replicas_quorum_detach_attach.sh | 4 ++-- ...507_clickhouse_server_start_with_embedded_config.sh | 2 +- tests/queries/0_stateless/01508_format_regexp_raw.sh | 4 ++-- .../0_stateless/01509_dictionary_preallocate.sh | 2 +- .../01510_format_regexp_raw_low_cardinality.sh | 4 ++-- tests/queries/0_stateless/01526_initial_query_id.sh | 2 +- .../queries/0_stateless/01599_mutation_query_params.sh | 2 +- .../queries/0_stateless/01600_quota_by_forwarded_ip.sh | 4 ++-- .../01684_ssd_cache_dictionary_simple_key.sh | 2 +- .../01685_ssd_cache_dictionary_complex_key.sh | 2 +- .../0_stateless/01691_parser_data_type_exponential.sh | 2 +- .../01710_projections_optimize_aggregation_in_order.sh | 2 +- ...rojections_partial_optimize_aggregation_in_order.sh | 2 +- .../0_stateless/01753_optimize_aggregation_in_order.sh | 2 +- .../01758_optimize_skip_unused_shards_once.sh | 2 +- .../01791_dist_INSERT_block_structure_mismatch.sh | 2 +- .../0_stateless/01814_distributed_push_down_limit.sh | 6 +++--- .../0_stateless/01853_dictionary_cache_duplicates.sh | 8 ++++---- .../0_stateless/01872_initial_query_start_time.sh | 2 +- .../01875_ssd_cache_dictionary_decimal256_type.sh | 2 +- .../0_stateless/01890_materialized_distributed_join.sh | 2 +- .../01903_ssd_cache_dictionary_array_type.sh | 2 +- ...01904_ssd_cache_dictionary_default_nullable_type.sh | 2 +- .../01927_query_views_log_matview_exceptions.sh | 4 ++-- tests/queries/0_stateless/01947_multiple_pipe_read.sh | 4 ++-- 39 files changed, 72 insertions(+), 72 deletions(-) diff --git a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh index 55bbfb3ff11..54f1bbe29dc 100755 --- a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh +++ b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) R1=table_1013_1 R2=table_1013_2 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; @@ -19,13 +19,13 @@ ${CLICKHOUSE_CLIENT} -n -q " INSERT INTO $R1 VALUES (1) " -timeout 10s ${CLICKHOUSE_CLIENT} -n -q " +timeout 10s ${CLICKHOUSE_CLIENT} -q " SET receive_timeout=1; SYSTEM SYNC REPLICA $R2 " 2>&1 | grep -F -q "Code: 159. DB::Exception" && echo 'OK' || echo 'Failed!' # By dropping tables all related SYNC REPLICA queries would be terminated as well -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; " diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index 4f35b69da0b..053fd9d9d49 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -9,7 +9,7 @@ R1=table_1017_1 R2=table_1017_2 T1=table_1017_merge -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; @@ -68,7 +68,7 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" --allow_nondeterministic_mutations=1 2>&1 \ && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh index 9a26f78a8ee..5c67fe08fbf 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh @@ -14,7 +14,7 @@ declare -a SearchTypes=("POLYGON" "POLYGON_SIMPLE" "POLYGON_INDEX_EACH" "POLYGON tar -xf "${CURDIR}"/01037_test_data_search.tar.gz -C "${DATA_DIR}" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS points; CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " @@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --max_insert_block_si rm "${DATA_DIR}"/01037_point_data -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array @@ -43,7 +43,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh index 47f7a5c1c4f..591978d1129 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh @@ -14,7 +14,7 @@ declare -a SearchTypes=("POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") tar -xf "${CURDIR}"/01037_test_data_perf.tar.gz -C "${DATA_DIR}" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " @@ -22,7 +22,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --min_chunk_bytes_for rm "${DATA_DIR}"/01037_point_data -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array @@ -42,7 +42,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array diff --git a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh index d1ee3f283bc..ac033ff4eb8 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) TMP_DIR=${CLICKHOUSE_TMP}/tmp mkdir -p $TMP_DIR -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array (key Array(Array(Array(Array(Float64)))), name String, value UInt64) ENGINE = Memory; @@ -53,7 +53,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array ( @@ -106,7 +106,7 @@ do diff -q "${CURDIR}/01037_polygon_dicts_simple_functions.ans" "$outputFile" done -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP DICTIONARY dict_array; DROP DICTIONARY dict_tuple; DROP TABLE polygons_array; diff --git a/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh index 0b14ef8f6fa..29ce4da02ed 100755 --- a/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh +++ b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/01077_mutations_index_consistency.sh b/tests/queries/0_stateless/01077_mutations_index_consistency.sh index ffbe3692b64..f103692de56 100755 --- a/tests/queries/0_stateless/01077_mutations_index_consistency.sh +++ b/tests/queries/0_stateless/01077_mutations_index_consistency.sh @@ -7,13 +7,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS movement" -$CLICKHOUSE_CLIENT -n --query "CREATE TABLE movement (date DateTime('Asia/Istanbul')) Engine = MergeTree ORDER BY (toStartOfHour(date)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';" +$CLICKHOUSE_CLIENT --query "CREATE TABLE movement (date DateTime('Asia/Istanbul')) Engine = MergeTree ORDER BY (toStartOfHour(date)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';" $CLICKHOUSE_CLIENT --query "insert into movement select toDateTime('2020-01-22 00:00:00', 'Asia/Istanbul') + number%(23*3600) from numbers(1000000);" $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE movement FINAL" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -26,7 +26,7 @@ ORDER BY Hour DESC $CLICKHOUSE_CLIENT --query "alter table movement delete where date >= toDateTime('2020-01-22T16:00:00', 'Asia/Istanbul') and date < toDateTime('2020-01-22T17:00:00', 'Asia/Istanbul') SETTINGS mutations_sync = 2" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -37,7 +37,7 @@ ORDER BY Hour DESC " | grep "16:00:00" | wc -l -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -48,7 +48,7 @@ ORDER BY Hour DESC " | grep "22:00:00" | cut -f1 -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour diff --git a/tests/queries/0_stateless/01187_set_profile_as_setting.sh b/tests/queries/0_stateless/01187_set_profile_as_setting.sh index 42f596c45d6..f6c6fd0be34 100755 --- a/tests/queries/0_stateless/01187_set_profile_as_setting.sh +++ b/tests/queries/0_stateless/01187_set_profile_as_setting.sh @@ -7,11 +7,11 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -m -q "select value, changed from system.settings where name='readonly';" -$CLICKHOUSE_CLIENT -n -m -q "set profile='default'; select value, changed from system.settings where name='readonly';" -$CLICKHOUSE_CLIENT -n -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" 2>&1| grep -Fa "Cannot modify 'send_logs_level' setting in readonly mode" > /dev/null && echo "OK" +$CLICKHOUSE_CLIENT -m -q "select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='default'; select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" 2>&1| grep -Fa "Cannot modify 'send_logs_level' setting in readonly mode" > /dev/null && echo "OK" CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=fatal/g') -$CLICKHOUSE_CLIENT -n -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=select+value,changed+from+system.settings+where+name='readonly'" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&profile=default&query=select+value,changed+from+system.settings+where+name='readonly'" diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index 5f82731c54e..1ec53399958 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -10,7 +10,7 @@ set -o pipefail # shellcheck disable=SC2120 function execute() { - ${CLICKHOUSE_CLIENT} -n "$@" + ${CLICKHOUSE_CLIENT} "$@" } # diff --git a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh index db986e74b6b..02aa0f76be5 100755 --- a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh +++ b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh @@ -12,8 +12,8 @@ DATA_SIZE=200 SEQ=$(seq 0 $(($NUM_REPLICAS - 1))) -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE IF EXISTS r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done function thread() { @@ -30,6 +30,6 @@ done wait -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "SYSTEM SYNC REPLICA r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "SELECT count(), sum(x) FROM r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "SELECT count(), sum(x) FROM r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "DROP TABLE r$REPLICA"; done diff --git a/tests/queries/0_stateless/01415_sticking_mutations.sh b/tests/queries/0_stateless/01415_sticking_mutations.sh index b7c8768a65d..97467c3ce9d 100755 --- a/tests/queries/0_stateless/01415_sticking_mutations.sh +++ b/tests/queries/0_stateless/01415_sticking_mutations.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS sticking_mutations" function check_sticky_mutations() { - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE sticking_mutations ( + $CLICKHOUSE_CLIENT --query "CREATE TABLE sticking_mutations ( date Date, key UInt64, value1 String, diff --git a/tests/queries/0_stateless/01451_dist_logs.sh b/tests/queries/0_stateless/01451_dist_logs.sh index 23dee7a827d..e281e232bb5 100755 --- a/tests/queries/0_stateless/01451_dist_logs.sh +++ b/tests/queries/0_stateless/01451_dist_logs.sh @@ -10,4 +10,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # triggered not for the first query for _ in {1..20}; do echo "select * from remote('127.{2,3}', system.numbers) where number = 10 limit 1;" -done | ${CLICKHOUSE_CLIENT} -n 2>/dev/null +done | ${CLICKHOUSE_CLIENT} 2>/dev/null diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh index 56620d848a3..cc574557438 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -31,7 +31,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh index 91a73471557..24ea3ba3835 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh @@ -16,7 +16,7 @@ unset CLICKHOUSE_WRITE_COVERAGE NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -39,7 +39,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh index 1f76a2efc6b..a2ef0d52328 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh @@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=6 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -36,7 +36,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh index 29593ea4fb5..6954fef7314 100755 --- a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh +++ b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh @@ -34,7 +34,7 @@ done # Check access rights -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP DATABASE IF EXISTS test; CREATE DATABASE test; USE test; diff --git a/tests/queries/0_stateless/01508_format_regexp_raw.sh b/tests/queries/0_stateless/01508_format_regexp_raw.sh index 8cf1bd73566..52613c28b2f 100755 --- a/tests/queries/0_stateless/01508_format_regexp_raw.sh +++ b/tests/queries/0_stateless/01508_format_regexp_raw.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t; CREATE TABLE t (a String, b String) ENGINE = Memory; " @@ -12,7 +12,7 @@ CREATE TABLE t (a String, b String) ENGINE = Memory; ${CLICKHOUSE_CLIENT} --format_regexp_escaping_rule 'Raw' --format_regexp '^(.+?) separator (.+?)$' --query ' INSERT INTO t FORMAT Regexp abc\ separator Hello, world!' -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t; DROP TABLE t; " diff --git a/tests/queries/0_stateless/01509_dictionary_preallocate.sh b/tests/queries/0_stateless/01509_dictionary_preallocate.sh index 2a22a307a08..0459f69b0ad 100755 --- a/tests/queries/0_stateless/01509_dictionary_preallocate.sh +++ b/tests/queries/0_stateless/01509_dictionary_preallocate.sh @@ -15,7 +15,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # PREALLOCATE attribute (and also for the history/greppability, that it was # such). -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS data_01509; DROP DICTIONARY IF EXISTS dict_01509; CREATE TABLE data_01509 diff --git a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh index 594caca7d04..dc178d081bf 100755 --- a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh +++ b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t; CREATE TABLE t (a String, b LowCardinality(Nullable(String))) ENGINE = Memory; " @@ -12,7 +12,7 @@ CREATE TABLE t (a String, b LowCardinality(Nullable(String))) ENGINE = Memory; ${CLICKHOUSE_CLIENT} --format_regexp_escaping_rule 'Raw' --format_regexp '^(.+?) separator (.+?)$' --query ' INSERT INTO t FORMAT Regexp abc\ separator Hello, world!' -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t; DROP TABLE t; " diff --git a/tests/queries/0_stateless/01526_initial_query_id.sh b/tests/queries/0_stateless/01526_initial_query_id.sh index e77764ee34e..8ba27a04d60 100755 --- a/tests/queries/0_stateless/01526_initial_query_id.sh +++ b/tests/queries/0_stateless/01526_initial_query_id.sh @@ -15,7 +15,7 @@ ${CLICKHOUSE_CURL} \ --get \ --data-urlencode "query=select 1 format Null" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " system flush logs; select interface, initial_query_id = query_id from system.query_log diff --git a/tests/queries/0_stateless/01599_mutation_query_params.sh b/tests/queries/0_stateless/01599_mutation_query_params.sh index 52b0131a9c2..5b604c96028 100755 --- a/tests/queries/0_stateless/01599_mutation_query_params.sh +++ b/tests/queries/0_stateless/01599_mutation_query_params.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS test; CREATE TABLE test diff --git a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh index 1d768c8b027..834eba8f25c 100755 --- a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh +++ b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " CREATE USER quoted_by_ip_${CLICKHOUSE_DATABASE}; CREATE USER quoted_by_forwarded_ip_${CLICKHOUSE_DATABASE}; @@ -57,7 +57,7 @@ ${CLICKHOUSE_CURL} -H 'X-Forwarded-For: 5.6.7.8, 1.2.3.4' -sS "${CLICKHOUSE_URL} ${CLICKHOUSE_CURL} -H 'X-Forwarded-For: 1.2.3.4, 5.6.7.8' -sS "${CLICKHOUSE_URL}&user=quoted_by_forwarded_ip_${CLICKHOUSE_DATABASE}" -d "SELECT count() FROM numbers(10)" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP QUOTA IF EXISTS quota_by_ip_${CLICKHOUSE_DATABASE}; DROP QUOTA IF EXISTS quota_by_forwarded_ip; diff --git a/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh b/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh index 0e5c2862066..6a7eb975c87 100755 --- a/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh +++ b/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP DATABASE IF EXISTS 01684_database_for_cache_dictionary; CREATE DATABASE 01684_database_for_cache_dictionary; diff --git a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh index 55061b9a643..c2d222a86ea 100755 --- a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh +++ b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE complex_key_simple_attributes_source_table ( id UInt64, diff --git a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh index f8004f9350d..5d115e09a79 100755 --- a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh +++ b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # Check that DataType parser does not have exponential complexity in the case found by fuzzer. -for _ in {1..10}; do ${CLICKHOUSE_CLIENT} -n --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done +for _ in {1..10}; do ${CLICKHOUSE_CLIENT} --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done diff --git a/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh index a166837e01a..f38e53f898a 100755 --- a/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS in_order_agg_01710; CREATE TABLE in_order_agg_01710 diff --git a/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh index ee73974e8a4..01537524730 100755 --- a/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS in_order_agg_partial_01710; CREATE TABLE in_order_agg_partial_01710 diff --git a/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh index 2a7345f4865..f9681ebe4f5 100755 --- a/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --optimize_aggregation_in_order=1 -nm -q " +$CLICKHOUSE_CLIENT --optimize_aggregation_in_order=1 -m -q " drop table if exists data_01753; create table data_01753 (key Int) engine=MergeTree() order by key as select * from numbers(8); select * from data_01753 group by key settings max_block_size=1; diff --git a/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh b/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh index b963f3a618f..3c9e12f780b 100755 --- a/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh +++ b/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -nm -q " +$CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -m -q " create table dist_01758 as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy); select * from dist_01758 where dummy = 0 format Null; " |& grep -o "StorageDistributed (dist_01758).*" diff --git a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh index 9c51b82282c..ee46f8194b9 100755 --- a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh +++ b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -nm -q " +$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -m -q " DROP TABLE IF EXISTS tmp_01683; DROP TABLE IF EXISTS dist_01683; diff --git a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh index 4b75102e9cf..f3e8ceffff6 100755 --- a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh +++ b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh @@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function setup() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " drop table if exists data_01814; drop table if exists dist_01814; @@ -24,7 +24,7 @@ function setup() function cleanup() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " drop table data_01814; drop table dist_01814; " @@ -67,7 +67,7 @@ function test_distributed_push_down_limit_with_query_log() $CLICKHOUSE_CLIENT "${settings_and_opts[@]}" -q "select * from $table group by key limit $offset, 10" - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " system flush logs; select read_rows from system.query_log where diff --git a/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh b/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh index 218320772c9..2b122debff5 100755 --- a/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh +++ b/tests/queries/0_stateless/01853_dictionary_cache_duplicates.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function run_test_once() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS simple_key_source_table_01863; CREATE TABLE simple_key_source_table_01863 ( @@ -29,13 +29,13 @@ function run_test_once() LIFETIME(MIN 0 MAX 1000); " - prev=$($CLICKHOUSE_CLIENT -nm -q "SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1") - curr=$($CLICKHOUSE_CLIENT -nm -q " + prev=$($CLICKHOUSE_CLIENT -m -q "SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1") + curr=$($CLICKHOUSE_CLIENT -m -q " SELECT toUInt64(1) as key, dictGet('simple_key_cache_dictionary_01863', 'value', key) FORMAT Null; SELECT value FROM system.events WHERE event = 'DictCacheKeysRequestedMiss' SETTINGS system_events_show_zero_values=1 ") - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " DROP DICTIONARY simple_key_cache_dictionary_01863; DROP TABLE simple_key_source_table_01863; " diff --git a/tests/queries/0_stateless/01872_initial_query_start_time.sh b/tests/queries/0_stateless/01872_initial_query_start_time.sh index 6a935602ea4..ff3e1954c75 100755 --- a/tests/queries/0_stateless/01872_initial_query_start_time.sh +++ b/tests/queries/0_stateless/01872_initial_query_start_time.sh @@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} -q "create table m (dummy UInt8) ENGINE = Distributed('test query_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") ${CLICKHOUSE_CLIENT} -q "select * from m format Null" "--query_id=$query_id" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " system flush logs; select anyIf(initial_query_start_time, is_initial_query) = anyIf(initial_query_start_time, not is_initial_query), diff --git a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh index 8336229a643..1294ba53e82 100755 --- a/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh +++ b/tests/queries/0_stateless/01875_ssd_cache_dictionary_decimal256_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET allow_experimental_bigint_types = 1; DROP TABLE IF EXISTS dictionary_decimal_source_table; diff --git a/tests/queries/0_stateless/01890_materialized_distributed_join.sh b/tests/queries/0_stateless/01890_materialized_distributed_join.sh index 88f7dcf9a69..5c04ee8b214 100755 --- a/tests/queries/0_stateless/01890_materialized_distributed_join.sh +++ b/tests/queries/0_stateless/01890_materialized_distributed_join.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists test_distributed; drop table if exists test_source; drop table if exists test_shard; diff --git a/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh b/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh index 853445daf3f..a44106414ea 100755 --- a/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh +++ b/tests/queries/0_stateless/01903_ssd_cache_dictionary_array_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS dictionary_array_source_table; CREATE TABLE dictionary_array_source_table ( diff --git a/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh b/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh index 0b555cf82c2..a5c65ca87a7 100755 --- a/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh +++ b/tests/queries/0_stateless/01904_ssd_cache_dictionary_default_nullable_type.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS dictionary_nullable_source_table; CREATE TABLE dictionary_nullable_source_table ( diff --git a/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh b/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh index 47d5e733480..608107c76d6 100755 --- a/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh +++ b/tests/queries/0_stateless/01927_query_views_log_matview_exceptions.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function cleanup() { - ${CLICKHOUSE_CLIENT} -n -q " + ${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS matview_exception_a_to_c; DROP TABLE IF EXISTS matview_exception_a_to_b; DROP TABLE IF EXISTS table_exception_c; @@ -17,7 +17,7 @@ function cleanup() function setup() { - ${CLICKHOUSE_CLIENT} -n -q " + ${CLICKHOUSE_CLIENT} -q " CREATE TABLE table_exception_a (a String, b Int64) ENGINE = MergeTree ORDER BY b; CREATE TABLE table_exception_b (a Float64, b Int64) ENGINE = MergeTree ORDER BY tuple(); CREATE TABLE table_exception_c (a Float64) ENGINE = MergeTree ORDER BY a; diff --git a/tests/queries/0_stateless/01947_multiple_pipe_read.sh b/tests/queries/0_stateless/01947_multiple_pipe_read.sh index 06a18a55e6e..51709eb574e 100755 --- a/tests/queries/0_stateless/01947_multiple_pipe_read.sh +++ b/tests/queries/0_stateless/01947_multiple_pipe_read.sh @@ -11,7 +11,7 @@ cat "$SAMPLE_FILE" echo '******************' echo 'Read twice from a regular file' -${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -n -q 'select * from table; select * from table;' --file "$SAMPLE_FILE" +${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table; select * from table;' --file "$SAMPLE_FILE" echo '---' ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table WHERE x IN (select x from table);' --file "$SAMPLE_FILE" echo '---' @@ -19,7 +19,7 @@ ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table UNI echo '******************' echo 'Read twice from file descriptor that corresponds to a regular file' -${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -n -q 'select * from table; select * from table;' < "$SAMPLE_FILE" +${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table; select * from table;' < "$SAMPLE_FILE" echo '---' ${CLICKHOUSE_LOCAL} --structure 'x UInt64, s String' -q 'select * from table WHERE x IN (select x from table);' < "$SAMPLE_FILE" echo '---' From 47ed2204bd1e46ca6ef0556180372cc38fde5153 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 15 Aug 2024 12:38:24 +0000 Subject: [PATCH 2263/2361] Follow-up to ClickHouse#63898 --- .../0_stateless/00366_multi_statements.sh | 20 +++--- .../00443_preferred_block_size_bytes.sh | 4 +- ...ess_to_temporary_table_in_readonly_mode.sh | 20 +++--- ...4_performance_introspection_and_logging.sh | 4 +- ..._fetch_merged_or_mutated_part_zookeeper.sh | 6 +- ...d_optimize_skip_select_on_unused_shards.sh | 30 ++++----- ...p_select_on_unused_shards_with_prewhere.sh | 26 ++++---- ...ated_minimalistic_part_header_zookeeper.sh | 4 +- .../queries/0_stateless/00837_minmax_index.sh | 2 +- .../queries/0_stateless/00838_unique_index.sh | 2 +- .../0_stateless/00907_set_index_max_rows.sh | 2 +- .../0_stateless/00908_bloom_filter_index.sh | 10 +-- .../queries/0_stateless/00942_mutate_index.sh | 2 +- .../0_stateless/00943_materialize_index.sh | 4 +- .../00944_clear_index_in_partition.sh | 2 +- .../00964_bloom_index_string_functions.sh | 2 +- .../00965_set_index_string_functions.sh | 2 +- .../00974_primary_key_for_lowCardinality.sh | 4 +- tests/queries/0_stateless/00990_hasToken.sh | 2 +- .../02003_memory_limit_in_client.sh | 32 ++++----- .../02021_create_database_with_comment.sh | 2 +- .../02050_client_profile_events.sh | 8 +-- .../02221_parallel_replicas_bug.sh | 2 +- .../02221_system_zookeeper_unrestricted.sh | 4 +- ...2221_system_zookeeper_unrestricted_like.sh | 4 +- ...parallel_distributed_insert_select_view.sh | 6 +- ...arallel_reading_from_replicas_benchmark.sh | 4 +- .../02232_allow_only_replicated_engine.sh | 6 +- .../0_stateless/02250_ON_CLUSTER_grant.sh | 4 +- tests/queries/0_stateless/02262_column_ttl.sh | 4 +- .../0_stateless/02286_parallel_final.sh | 4 +- ..._distinct_in_order_optimization_explain.sh | 66 +++++++++---------- ..._column_ttl_expired_column_optimization.sh | 2 +- .../0_stateless/02361_fsync_profile_events.sh | 4 +- ...7_extend_protocol_with_query_parameters.sh | 10 +-- ...ting_by_input_stream_properties_explain.sh | 2 +- .../0_stateless/02417_load_marks_async.sh | 2 +- 37 files changed, 157 insertions(+), 157 deletions(-) diff --git a/tests/queries/0_stateless/00366_multi_statements.sh b/tests/queries/0_stateless/00366_multi_statements.sh index 0b2e80fe457..8546e547581 100755 --- a/tests/queries/0_stateless/00366_multi_statements.sh +++ b/tests/queries/0_stateless/00366_multi_statements.sh @@ -14,22 +14,22 @@ $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2" -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2;" -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;" +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;" $CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" $CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES (1),(2),(3);" -$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" -$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366" +$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);" +$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" +$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" +$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "SELECT 1" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "SELECT 1;" @@ -48,4 +48,4 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=INSERT+INTO+t_00366+VALUES" -d "(7),(8),(9)" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="DROP TABLE t_00366;" +$CLICKHOUSE_CLIENT --query="DROP TABLE t_00366;" diff --git a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh index 27b9f5c00c7..0635fbc2a57 100755 --- a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh +++ b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh @@ -43,10 +43,10 @@ popd > /dev/null #SCRIPTDIR=`dirname "$SCRIPTPATH"` SCRIPTDIR=$SCRIPTPATH -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED rm "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout diff --git a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh index 560b97a1d1b..5550fa69d3d 100755 --- a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh +++ b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS test_readonly; CREATE TABLE test_readonly ( ID Int @@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT -n --query=" ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; CREATE TEMPORARY TABLE readonly ( ID Int @@ -26,7 +26,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -34,7 +34,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; DROP TABLE test_readonly; " 2> /dev/null; @@ -46,7 +46,7 @@ CODE=$?; ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; CREATE TEMPORARY TABLE readonly ( ID Int @@ -58,7 +58,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -66,7 +66,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; DROP TABLE test_readonly; " 2> /dev/null; @@ -78,7 +78,7 @@ CODE=$?; ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; CREATE TEMPORARY TABLE readonly ( ID Int @@ -90,7 +90,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -98,7 +98,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; DROP TABLE test_readonly; " 2> /dev/null; diff --git a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index 93fd0c4a977..e9a4369a5bf 100755 --- a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh +++ b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -19,13 +19,13 @@ settings="$server_logs --log_queries=1 --log_query_threads=1 --log_profile_event # Test insert logging on each block and checkPacket() method -$CLICKHOUSE_CLIENT $settings -n -q " +$CLICKHOUSE_CLIENT $settings -q " DROP TABLE IF EXISTS null_00634; CREATE TABLE null_00634 (i UInt8) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple();" head -c 1000 /dev/zero | $CLICKHOUSE_CLIENT $settings --max_insert_block_size=10 --min_insert_block_size_rows=1 --min_insert_block_size_bytes=1 -q "INSERT INTO null_00634 FORMAT RowBinary" -$CLICKHOUSE_CLIENT $settings -n -q " +$CLICKHOUSE_CLIENT $settings -q " SELECT count() FROM null_00634; DROP TABLE null_00634;" diff --git a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh index 96d5764780f..d69e14bdbb9 100755 --- a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh +++ b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/mergetree_mutations.lib -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS fetches_r1 SYNC; DROP TABLE IF EXISTS fetches_r2 SYNC" @@ -17,7 +17,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE fetches_r2(x UInt32) ENGINE Replicate SETTINGS prefer_fetch_merged_part_time_threshold=0, \ prefer_fetch_merged_part_size_threshold=0" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET insert_keeper_fault_injection_probability=0; INSERT INTO fetches_r1 VALUES (1); INSERT INTO fetches_r1 VALUES (2); @@ -51,6 +51,6 @@ ${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA fetches_r2" ${CLICKHOUSE_CLIENT} --query="SELECT '*** Check data after fetch/clone of mutated part ***'" ${CLICKHOUSE_CLIENT} --query="SELECT _part, * FROM fetches_r2 ORDER BY x" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE fetches_r1 SYNC; DROP TABLE fetches_r2 SYNC" diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh index 09f20284402..989096a26d6 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh @@ -25,83 +25,83 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed WHERE a = 0 AND b | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Should pass now -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0; " # Should still fail because of matching unavailable shard -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Try more complext expressions for constant folding - all should pass. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 1 AND a = 0 AND b = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a IN (0, 1) AND b IN (0, 1); " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR a = 1 AND b = 1; " # TODO: should pass one day. -#${CLICKHOUSE_CLIENT} -n --query=" +#${CLICKHOUSE_CLIENT} --query=" # SET optimize_skip_unused_shards = 1; # SELECT count(*) FROM distributed WHERE a = 0 AND b >= 0 AND b <= 1; #" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND c = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND c != 10; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND (a+b)*b != 12; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE (a = 0 OR a = 1) AND (b = 0 OR b = 1); " # These ones should fail. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b <= 1; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND c = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 OR a = 1 AND b = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR c = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh index 035907bddd7..b3dff2ea69a 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh @@ -30,73 +30,73 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed_00754 PREWHERE a | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Should pass now -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0; " # Should still fail because of matching unavailable shard -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Try more complex expressions for constant folding - all should pass. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 1 AND a = 0 WHERE b = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 1 WHERE b = 1 AND length(c) = 5; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a IN (0, 1) AND b IN (0, 1) WHERE c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a IN (0, 1) WHERE b IN (0, 1) AND c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR a = 1 AND b = 1 WHERE c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE (a = 0 OR a = 1) WHERE (b = 0 OR b = 1); " # These should fail. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b <= 1; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 WHERE c LIKE '%l%'; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 OR a = 1 AND b = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR c LIKE '%l%'; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh index 12d889a7137..8f7a1a9ae98 100755 --- a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh +++ b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS part_header_r1; DROP TABLE IF EXISTS part_header_r2; @@ -62,7 +62,7 @@ do [[ $count1 == 1 && $count2 == 1 ]] && break done -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SELECT '*** Test part removal ***'; SELECT '*** replica 1 ***'; diff --git a/tests/queries/0_stateless/00837_minmax_index.sh b/tests/queries/0_stateless/00837_minmax_index.sh index e4de0b9ebfc..ff487f50ee0 100755 --- a/tests/queries/0_stateless/00837_minmax_index.sh +++ b/tests/queries/0_stateless/00837_minmax_index.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00838_unique_index.sh b/tests/queries/0_stateless/00838_unique_index.sh index b267b6a8eb3..a3aba4f26b6 100755 --- a/tests/queries/0_stateless/00838_unique_index.sh +++ b/tests/queries/0_stateless/00838_unique_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00907_set_index_max_rows.sh b/tests/queries/0_stateless/00907_set_index_max_rows.sh index 3707aaf2ca6..bdd0f36346f 100755 --- a/tests/queries/0_stateless/00907_set_index_max_rows.sh +++ b/tests/queries/0_stateless/00907_set_index_max_rows.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh index 25a6567b894..3bd169dd6df 100755 --- a/tests/queries/0_stateless/00908_bloom_filter_index.sh +++ b/tests/queries/0_stateless/00908_bloom_filter_index.sh @@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx3;" # NGRAM BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx ( k UInt64, @@ -22,7 +22,7 @@ CREATE TABLE bloom_filter_idx ORDER BY k SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi';" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx2 ( k UInt64, @@ -109,7 +109,7 @@ $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT count() FROM bloom # TOKEN BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx3 ( k UInt64, @@ -147,7 +147,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE bloom_filter_idx2" $CLICKHOUSE_CLIENT --query="DROP TABLE bloom_filter_idx3" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx_na;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx_na ( na Array(Array(String)), @@ -156,7 +156,7 @@ CREATE TABLE bloom_filter_idx_na ORDER BY na" 2>&1 | grep -c 'DB::Exception: Unexpected type Array(Array(String)) of bloom filter index' # NGRAM BF with IPv6 -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_ipv6_idx ( foo IPv6, diff --git a/tests/queries/0_stateless/00942_mutate_index.sh b/tests/queries/0_stateless/00942_mutate_index.sh index 6ebb30c25b9..e1e23639e85 100755 --- a/tests/queries/0_stateless/00942_mutate_index.sh +++ b/tests/queries/0_stateless/00942_mutate_index.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00943_materialize_index.sh b/tests/queries/0_stateless/00943_materialize_index.sh index 6ff7d34a9d7..e4a585fce97 100755 --- a/tests/queries/0_stateless/00943_materialize_index.sh +++ b/tests/queries/0_stateless/00943_materialize_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, @@ -34,7 +34,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO minmax_idx VALUES $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0 FORMAT JSON" | grep "rows_read" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" ALTER TABLE minmax_idx ADD INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1 SETTINGS mutations_sync = 2;" $CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx IN PARTITION 1 SETTINGS mutations_sync = 2;" diff --git a/tests/queries/0_stateless/00944_clear_index_in_partition.sh b/tests/queries/0_stateless/00944_clear_index_in_partition.sh index 4655077960f..a12536da239 100755 --- a/tests/queries/0_stateless/00944_clear_index_in_partition.sh +++ b/tests/queries/0_stateless/00944_clear_index_in_partition.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh index e2ec7fd42e4..9e410f09b13 100755 --- a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh +++ b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx;" # NGRAM BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx ( k UInt64, diff --git a/tests/queries/0_stateless/00965_set_index_string_functions.sh b/tests/queries/0_stateless/00965_set_index_string_functions.sh index 8892fb11752..0f29c3dd2f2 100755 --- a/tests/queries/0_stateless/00965_set_index_string_functions.sh +++ b/tests/queries/0_stateless/00965_set_index_string_functions.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( k UInt64, diff --git a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh index 389d433c7e2..ba260042f47 100755 --- a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh +++ b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS lowString;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS string;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" create table lowString ( a LowCardinality(String), @@ -18,7 +18,7 @@ ENGINE = MergeTree() PARTITION BY toYYYYMM(b) ORDER BY (a)" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" create table string ( a String, diff --git a/tests/queries/0_stateless/00990_hasToken.sh b/tests/queries/0_stateless/00990_hasToken.sh index 6a1d4ff5ccf..d79472aa5a5 100755 --- a/tests/queries/0_stateless/00990_hasToken.sh +++ b/tests/queries/0_stateless/00990_hasToken.sh @@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # We should have correct env vars from shell_config.sh to run this test -python3 "$CURDIR"/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -nm +python3 "$CURDIR"/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -m diff --git a/tests/queries/0_stateless/02003_memory_limit_in_client.sh b/tests/queries/0_stateless/02003_memory_limit_in_client.sh index 96028f4847a..15cacbff8c5 100755 --- a/tests/queries/0_stateless/02003_memory_limit_in_client.sh +++ b/tests/queries/0_stateless/02003_memory_limit_in_client.sh @@ -4,21 +4,21 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client=1 -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" diff --git a/tests/queries/0_stateless/02021_create_database_with_comment.sh b/tests/queries/0_stateless/02021_create_database_with_comment.sh index f77397dc482..d87b0794c91 100755 --- a/tests/queries/0_stateless/02021_create_database_with_comment.sh +++ b/tests/queries/0_stateless/02021_create_database_with_comment.sh @@ -20,7 +20,7 @@ function test_db_comments() local ENGINE_NAME="$1" echo "engine : ${ENGINE_NAME}" - $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -nm <& /dev/null +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' >& /dev/null echo $? echo 'regression test for overlap profile events snapshots between queries' -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'regression test for overlap profile events snapshots between queries (clickhouse-local)' -$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' +$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'print everything' profile_events="$( @@ -35,5 +35,5 @@ profile_events="$( test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)" echo 'check that ProfileEvents is new for each query' -sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') +sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') test "$sleep_function_calls" -eq 1 && echo OK || echo "FAIL ($sleep_function_calls)" diff --git a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh index 3c44a2a7ba7..a382b3859f3 100755 --- a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh +++ b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh @@ -4,4 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 -nm < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null +${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 -m < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh index db94c59d2de..e23a272a4e8 100755 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table_2" -${CLICKHOUSE_CLIENT} -n -q" +${CLICKHOUSE_CLIENT} -q" CREATE TABLE sample_table ( key UInt64 ) @@ -16,7 +16,7 @@ ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_ ORDER BY tuple(); " -${CLICKHOUSE_CLIENT} -n -q" +${CLICKHOUSE_CLIENT} -q" CREATE TABLE sample_table_2 ( key UInt64 ) diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh index c62ec14b340..6381d811d5d 100755 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table_2;" -${CLICKHOUSE_CLIENT} -n --query="CREATE TABLE sample_table ( +${CLICKHOUSE_CLIENT} --query="CREATE TABLE sample_table ( key UInt64 ) ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_system_zookeeper_unrestricted_like', '1') @@ -16,7 +16,7 @@ ORDER BY tuple(); DROP TABLE IF EXISTS sample_table SYNC;" -${CLICKHOUSE_CLIENT} -n --query "CREATE TABLE sample_table_2 ( +${CLICKHOUSE_CLIENT} --query "CREATE TABLE sample_table_2 ( key UInt64 ) ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_system_zookeeper_unrestricted_like_2', '1') diff --git a/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh b/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh index 376a49fd820..63111cc32e4 100755 --- a/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh +++ b/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists dst_02225; drop table if exists src_02225; create table dst_02225 (key Int) engine=Memory(); @@ -14,7 +14,7 @@ create table src_02225 (key Int) engine=Memory(); insert into src_02225 values (1); " -$CLICKHOUSE_CLIENT --param_database=$CLICKHOUSE_DATABASE -nm -q " +$CLICKHOUSE_CLIENT --param_database=$CLICKHOUSE_DATABASE -m -q " truncate table dst_02225; insert into function remote('127.{1,2}', currentDatabase(), dst_02225, key) select * from remote('127.{1,2}', view(select * from {database:Identifier}.src_02225), key) @@ -29,7 +29,7 @@ settings parallel_distributed_insert_select=2, max_distributed_depth=1; select * from dst_02225; " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table src_02225; drop table dst_02225; " diff --git a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh index bc90f4b2c11..177b373641f 100755 --- a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh +++ b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data_02226; create table data_02226 (key Int) engine=MergeTree() order by key as select * from numbers(1); @@ -24,7 +24,7 @@ opts=( $CLICKHOUSE_BENCHMARK --query "select * from remote('127.1', $CLICKHOUSE_DATABASE, data_02226)" "${opts[@]}" >& /dev/null ret=$? -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table data_02226; " diff --git a/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh b/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh index d1a3825d286..e47a3033681 100755 --- a/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh +++ b/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh @@ -12,9 +12,9 @@ ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO us ${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON Memory, TABLE ENGINE ON MergeTree, TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_memory (x UInt32) engine = Memory;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt (x UInt32) engine = ReplicatedMergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt (x UInt32) engine = ReplicatedMergeTree order by x;" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" ${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh b/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh index 66417e9694a..09f9c0c8a98 100755 --- a/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh +++ b/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function cleanup() { - $CLICKHOUSE_CLIENT -nmq " + $CLICKHOUSE_CLIENT -mq " DROP USER IF EXISTS with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; DROP USER IF EXISTS without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; DROP DATABASE IF EXISTS db_with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; @@ -15,7 +15,7 @@ function cleanup() cleanup trap cleanup EXIT -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " CREATE USER with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; CREATE USER without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; diff --git a/tests/queries/0_stateless/02262_column_ttl.sh b/tests/queries/0_stateless/02262_column_ttl.sh index b5e29c9b2a1..c620d3b6d9c 100755 --- a/tests/queries/0_stateless/02262_column_ttl.sh +++ b/tests/queries/0_stateless/02262_column_ttl.sh @@ -14,7 +14,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # note, that this should be written in .sh since we need $CLICKHOUSE_DATABASE # not 'default' to catch text_log -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists ttl_02262; drop table if exists this_text_log; @@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -nm -q " ttl_02262_uuid=$($CLICKHOUSE_CLIENT -q "select uuid from system.tables where database = '$CLICKHOUSE_DATABASE' and name = 'ttl_02262'") -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " -- OPTIMIZE TABLE x FINAL will be done in background -- attach to it's log, via table UUID in query_id (see merger/mutator code). create materialized view this_text_log engine=Memory() as diff --git a/tests/queries/0_stateless/02286_parallel_final.sh b/tests/queries/0_stateless/02286_parallel_final.sh index 0ac510208f3..47dfad42e11 100755 --- a/tests/queries/0_stateless/02286_parallel_final.sh +++ b/tests/queries/0_stateless/02286_parallel_final.sh @@ -9,7 +9,7 @@ echo "Test intersecting ranges" test_random_values() { layers=$1 - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " drop table if exists tbl_8parts_${layers}granules_rnd; create table tbl_8parts_${layers}granules_rnd (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 % 8); insert into tbl_8parts_${layers}granules_rnd select number, 1 from numbers_mt($((layers * 8 * 8192))); @@ -29,7 +29,7 @@ echo "Test non intersecting ranges" test_sequential_values() { layers=$1 - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " drop table if exists tbl_8parts_${layers}granules_seq; create table tbl_8parts_${layers}granules_seq (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 / $((layers * 8192)))::UInt64; insert into tbl_8parts_${layers}granules_seq select number, 1 from numbers_mt($((layers * 8 * 8192))); diff --git a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh index bd7e6be3987..953485c3a1f 100755 --- a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh +++ b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh @@ -23,99 +23,99 @@ $CLICKHOUSE_CLIENT -q "insert into distinct_in_order_explain select number % num $CLICKHOUSE_CLIENT -q "select '-- disable optimize_distinct_in_order'" $CLICKHOUSE_CLIENT -q "select '-- distinct all primary key columns -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- enable optimize_distinct_in_order'" $CLICKHOUSE_CLIENT -q "select '-- distinct with all primary key columns -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column in distinct -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by the same columns -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a, b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a, b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by columns are prefix of distinct columns -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column in distinct but non-primary key prefix -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column _not_ in distinct -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by column in distinct -> final distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by column _not_ in distinct -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by a" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by a" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by _const_ column in distinct -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, 1 as x from distinct_in_order_explain order by x" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, 1 as x from distinct_in_order_explain order by x" | eval $FIND_DISTINCT echo "-- Check reading in order for distinct" echo "-- disabled, distinct columns match sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$DISABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$DISABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "-- enabled, distinct columns match sorting key" # read_in_order_two_level_merge_threshold is set here to avoid repeating MergeTreeInOrder in output -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns DON't form prefix of sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "-- enabled, distinct columns contains constant columns, non-const columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns contains constant columns, non-const columns match prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, b, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, b, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, only part of distinct columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "=== disable new analyzer ===" DISABLE_ANALYZER="set enable_analyzer=0" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES echo "-- check that reading in order optimization for ORDER BY and DISTINCT applied correctly in the same query" ENABLE_READ_IN_ORDER="set optimize_read_in_order=1" echo "-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization i.e. it contains columns from DISTINCT clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization, but direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (1), - it contains columns from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (2), - direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that disabling other 'read in order' optimizations do not disable distinct in order optimization" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES echo "=== enable new analyzer ===" ENABLE_ANALYZER="set enable_analyzer=1" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES echo "-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization i.e. it contains columns from DISTINCT clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization, but direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (1), - it contains columns from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (2), - direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that disabling other 'read in order' optimizations do not disable distinct in order optimization" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES $CLICKHOUSE_CLIENT -q "drop table if exists distinct_in_order_explain sync" diff --git a/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh b/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh index 96f80d65878..490f8361682 100755 --- a/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh +++ b/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) data_path="$CLICKHOUSE_TMP/local" -$CLICKHOUSE_LOCAL --path "$data_path" -nm -q " +$CLICKHOUSE_LOCAL --path "$data_path" -m -q " create table ttl_02335 ( date Date, key Int, diff --git a/tests/queries/0_stateless/02361_fsync_profile_events.sh b/tests/queries/0_stateless/02361_fsync_profile_events.sh index 98c9cf9b7b4..73bf3fa120a 100755 --- a/tests/queries/0_stateless/02361_fsync_profile_events.sh +++ b/tests/queries/0_stateless/02361_fsync_profile_events.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data_fsync_pe; create table data_fsync_pe (key Int) engine=MergeTree() @@ -27,7 +27,7 @@ for i in {1..100}; do $CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data_fsync_pe values (1)" read -r FileSync FileOpen DirectorySync FileSyncElapsedMicroseconds DirectorySyncElapsedMicroseconds <<<"$( - $CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " + $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " system flush logs; select diff --git a/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh b/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh index 71e3b6961f8..46396d38747 100755 --- a/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh +++ b/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh @@ -24,7 +24,7 @@ $CLICKHOUSE_CLIENT \ table_name="t_02377_extend_protocol_with_query_parameters_$RANDOM$RANDOM" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " create table $table_name( id Int64, arr Array(UInt8), @@ -57,17 +57,17 @@ $CLICKHOUSE_CLIENT \ # it is possible to set parameter for the current session -$CLICKHOUSE_CLIENT -n -q "set param_n = 42; select {n: UInt8}" +$CLICKHOUSE_CLIENT -q "set param_n = 42; select {n: UInt8}" # and it will not be visible to other sessions -$CLICKHOUSE_CLIENT -n -q "select {n: UInt8} -- { serverError 456 }" +$CLICKHOUSE_CLIENT -q "select {n: UInt8} -- { serverError 456 }" # the same parameter could be set multiple times within one session (new value overrides the previous one) -$CLICKHOUSE_CLIENT -n -q "set param_n = 12; set param_n = 13; select {n: UInt8}" +$CLICKHOUSE_CLIENT -q "set param_n = 12; set param_n = 13; select {n: UInt8}" # multiple different parameters could be defined within each session -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " set param_a = 13, param_b = 'str'; set param_c = '2022-08-04 18:30:53'; set param_d = '{\'10\': [11, 12], \'13\': [14, 15]}'; diff --git a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh index 4b9793da5bb..974f10e2f24 100755 --- a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh +++ b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh @@ -15,7 +15,7 @@ FIND_SORTMODE="$GREP_SORTMODE | $TRIM_LEADING_SPACES" function explain_sorting { echo "-- QUERY: "$1 - $CLICKHOUSE_CLIENT --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTING + $CLICKHOUSE_CLIENT --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -q "$1" | eval $FIND_SORTING } function explain_sortmode { diff --git a/tests/queries/0_stateless/02417_load_marks_async.sh b/tests/queries/0_stateless/02417_load_marks_async.sh index 950656e7ab6..bcede9e4f5e 100755 --- a/tests/queries/0_stateless/02417_load_marks_async.sh +++ b/tests/queries/0_stateless/02417_load_marks_async.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS test;" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " CREATE TABLE test ( n0 UInt64, From 9fec833d95a8d2d2727ab7d16d4de89049db82a2 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 15 Aug 2024 13:42:57 +0000 Subject: [PATCH 2264/2361] fix test 03221_mutation_analyzer_skip_part --- .../03221_mutate_profile_events.sh | 53 +++++++++++++++++++ .../03221_mutate_profile_events.sql | 33 ------------ 2 files changed, 53 insertions(+), 33 deletions(-) create mode 100755 tests/queries/0_stateless/03221_mutate_profile_events.sh delete mode 100644 tests/queries/0_stateless/03221_mutate_profile_events.sql diff --git a/tests/queries/0_stateless/03221_mutate_profile_events.sh b/tests/queries/0_stateless/03221_mutate_profile_events.sh new file mode 100755 index 00000000000..3758db905e0 --- /dev/null +++ b/tests/queries/0_stateless/03221_mutate_profile_events.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Tags: no-random-settings, no-random-merge-tree-settings + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query " + DROP TABLE IF EXISTS t_mutate_profile_events; + + CREATE TABLE t_mutate_profile_events (key UInt64, id UInt64, v1 UInt64, v2 UInt64) + ENGINE = MergeTree ORDER BY id PARTITION BY key + SETTINGS min_bytes_for_wide_part = 0; + + INSERT INTO t_mutate_profile_events SELECT 1, number, number, number FROM numbers(10000); + INSERT INTO t_mutate_profile_events SELECT 2, number, number, number FROM numbers(10000); + + SET mutations_sync = 2; + + ALTER TABLE t_mutate_profile_events UPDATE v1 = 1000 WHERE key = 1; + ALTER TABLE t_mutate_profile_events DELETE WHERE key = 2 AND v2 % 10 = 0; +" + +# Mutation query may return before the entry is added to part log. +# So, we may have to retry the flush of logs until all entries are actually flushed. +for _ in {1..10}; do + ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" + res=$(${CLICKHOUSE_CLIENT} --query "SELECT count() FROM system.part_log WHERE database = currentDatabase() AND table = 't_mutate_profile_events' AND event_type = 'MutatePart'") + + if [[ $res -eq 4 ]]; then + break + fi + + sleep 2.0 +done + +${CLICKHOUSE_CLIENT} --query " + SELECT + splitByChar('_', part_name)[-1] AS version, + sum(ProfileEvents['MutationTotalParts']), + sum(ProfileEvents['MutationUntouchedParts']), + sum(ProfileEvents['MutatedRows']), + sum(ProfileEvents['MutatedUncompressedBytes']), + sum(ProfileEvents['MutationAllPartColumns']), + sum(ProfileEvents['MutationSomePartColumns']), + sum(ProfileEvents['MutationTotalMilliseconds']) > 0, + sum(ProfileEvents['MutationExecuteMilliseconds']) > 0, + FROM system.part_log + WHERE database = currentDatabase() AND table = 't_mutate_profile_events' AND event_type = 'MutatePart' + GROUP BY version ORDER BY version; + + DROP TABLE IF EXISTS t_mutate_profile_events; +" diff --git a/tests/queries/0_stateless/03221_mutate_profile_events.sql b/tests/queries/0_stateless/03221_mutate_profile_events.sql deleted file mode 100644 index e9f7f9670bd..00000000000 --- a/tests/queries/0_stateless/03221_mutate_profile_events.sql +++ /dev/null @@ -1,33 +0,0 @@ --- Tags: no-random-settings, no-random-merge-tree-settings - -DROP TABLE IF EXISTS t_mutate_profile_events; - -CREATE TABLE t_mutate_profile_events (key UInt64, id UInt64, v1 UInt64, v2 UInt64) -ENGINE = MergeTree ORDER BY id PARTITION BY key -SETTINGS min_bytes_for_wide_part = 0; - -INSERT INTO t_mutate_profile_events SELECT 1, number, number, number FROM numbers(10000); -INSERT INTO t_mutate_profile_events SELECT 2, number, number, number FROM numbers(10000); - -SET mutations_sync = 2; - -ALTER TABLE t_mutate_profile_events UPDATE v1 = 1000 WHERE key = 1; -ALTER TABLE t_mutate_profile_events DELETE WHERE key = 2 AND v2 % 10 = 0; - -SYSTEM FLUSH LOGS; - -SELECT - splitByChar('_', part_name)[-1] AS version, - sum(ProfileEvents['MutationTotalParts']), - sum(ProfileEvents['MutationUntouchedParts']), - sum(ProfileEvents['MutatedRows']), - sum(ProfileEvents['MutatedUncompressedBytes']), - sum(ProfileEvents['MutationAllPartColumns']), - sum(ProfileEvents['MutationSomePartColumns']), - sum(ProfileEvents['MutationTotalMilliseconds']) > 0, - sum(ProfileEvents['MutationExecuteMilliseconds']) > 0, -FROM system.part_log -WHERE database = currentDatabase() AND table = 't_mutate_profile_events' AND event_type = 'MutatePart' -GROUP BY version ORDER BY version; - -DROP TABLE IF EXISTS t_mutate_profile_events From fb037bcc722939f8d01fbd63c155a9a816c83f94 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 15 Aug 2024 13:48:43 +0000 Subject: [PATCH 2265/2361] move to mergetree setting and add more info --- .../operations/settings/merge-tree-settings.md | 17 +++++++++++++++-- docs/en/sql-reference/statements/delete.md | 3 +-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index a3bd919d3ce..a13aacc76e6 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -1042,10 +1042,23 @@ Compression rates of LZ4 or ZSTD improve on average by 20-40%. This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values. High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting. -### deduplicate_merge_projection_mode +## lightweight_mutation_projection_mode + +By default, lightweight delete `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. So the default value would be `throw`. +However, this option can change the behavior. With the value either `drop` or `rebuild`, deletes will work with projections. `drop` would delete the projection so it might be fast in the current query as projection gets deleted but slow in future queries as no projection attached. +`rebuild` would rebuild the projection which might affect the performance of the current query, but might speedup for future queries. A good thing is that these options would only work in the part level, +which means projections in the part that don't get touched would stay intact instead of triggering any action like drop or rebuild. + +Possible values: + +- throw, drop, rebuild + +Default value: throw + +## deduplicate_merge_projection_mode Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting. -It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. +It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level. Possible values: diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md index 88a9c933519..78142f880fe 100644 --- a/docs/en/sql-reference/statements/delete.md +++ b/docs/en/sql-reference/statements/delete.md @@ -38,8 +38,7 @@ If you anticipate frequent deletes, consider using a [custom partitioning key](/ ### Lightweight `DELETE`s with projections -By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance. -However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'`, deletes will work with projections. +By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` can change the behavior. ## Performance considerations when using lightweight `DELETE` From bdd0e01545a93b8bda34f667ede94f0e0faaa665 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Thu, 15 Aug 2024 14:06:21 +0200 Subject: [PATCH 2266/2361] CI: Auto release workflow --- .github/actions/debug/action.yml | 18 +++++ .github/workflows/auto_releases.yml | 109 +++++++++++++++++++++++++++ .github/workflows/create_release.yml | 21 ++++++ 3 files changed, 148 insertions(+) create mode 100644 .github/actions/debug/action.yml create mode 100644 .github/workflows/auto_releases.yml diff --git a/.github/actions/debug/action.yml b/.github/actions/debug/action.yml new file mode 100644 index 00000000000..e1fe3f28024 --- /dev/null +++ b/.github/actions/debug/action.yml @@ -0,0 +1,18 @@ +name: DebugInfo +description: Prints workflow debug info + +runs: + using: "composite" + steps: + - name: Print envs + shell: bash + run: | + echo "::group::Envs" + env + echo "::endgroup::" + - name: Print Event.json + shell: bash + run: | + echo "::group::Event.json" + python3 -m json.tool "$GITHUB_EVENT_PATH" + echo "::endgroup::" diff --git a/.github/workflows/auto_releases.yml b/.github/workflows/auto_releases.yml new file mode 100644 index 00000000000..c159907187c --- /dev/null +++ b/.github/workflows/auto_releases.yml @@ -0,0 +1,109 @@ +name: AutoReleases + +env: + PYTHONUNBUFFERED: 1 + +concurrency: + group: autoreleases + +on: + # schedule: + # - cron: '0 9 * * *' + workflow_dispatch: + inputs: + dry-run: + description: 'Dry run' + required: false + default: true + type: boolean + +jobs: + AutoReleaseInfo: + runs-on: [self-hosted, style-checker-aarch64] + outputs: + data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }} + dry_run: ${{ steps.info.outputs.DRY_RUN }} + steps: + - name: Debug Info + uses: ./.github/actions/debug + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" + - name: Check out repository code + uses: ClickHouse/checkout@v1 + - name: Prepare Info + id: info + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 auto_release.py --prepare + echo "::group::Auto Release Info" + python3 -m json.tool /tmp/autorelease_info.json + echo "::endgroup::" + { + echo 'AUTO_RELEASE_PARAMS<> "$GITHUB_ENV" + { + echo 'AUTO_RELEASE_PARAMS<> "$GITHUB_OUTPUT" + echo "DRY_RUN=true" >> "$GITHUB_OUTPUT" + - name: Post Release Branch statuses + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 auto_release.py --post-status + - name: Clean up + uses: ./.github/actions/clean + + Release_0: + needs: AutoReleaseInfo + name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].release_branch }} + if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].ready }} + uses: ./.github/workflows/create_release.yml + with: + ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }} + type: patch + dry-run: ${{ needs.AutoReleaseInfo.outputs.dry_run }} +# +# Release_1: +# needs: [AutoReleaseInfo, Release_0] +# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].release_branch }} +# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].ready }} +# uses: ./.github/workflows/create_release.yml +# with: +# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[1].commit_sha }} +# type: patch +# dry-run: ${{ env.DRY_RUN }} +# +# Release_2: +# needs: [AutoReleaseInfo, Release_1] +# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].release_branch }} +# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[2].ready }} +# uses: ./.github/workflow/create_release.yml +# with: +# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[0].commit_sha }} +# type: patch +# dry-run: ${{ env.DRY_RUN }} +# +# Release_3: +# needs: [AutoReleaseInfo, Release_2] +# name: Release ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].release_branch }} +# if: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3] && fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].ready }} +# uses: ./.github/workflow/create_release.yml +# with: +# ref: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases[3].commit_sha }} +# type: patch +# dry-run: ${{ env.DRY_RUN }} + +# - name: Post Slack Message +# if: ${{ !cancelled() }} +# run: | +# cd "$GITHUB_WORKSPACE/tests/ci" +# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }} diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index eb16c25f604..1553d689227 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -2,6 +2,7 @@ name: CreateRelease concurrency: group: release + 'on': workflow_dispatch: inputs: @@ -26,6 +27,26 @@ concurrency: required: false default: false type: boolean + workflow_call: + inputs: + ref: + description: 'Git reference (branch or commit sha) from which to create the release' + required: true + type: string + type: + description: 'The type of release: "new" for a new release or "patch" for a patch release' + required: true + type: string + only-repo: + description: 'Run only repos updates including docker (repo-recovery, tests)' + required: false + default: false + type: boolean + dry-run: + description: 'Dry run' + required: false + default: false + type: boolean jobs: CreateRelease: From 7d01c3131265d0dfae24fbf6ab91c71073573765 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Aug 2024 16:01:13 +0200 Subject: [PATCH 2267/2361] Delete old code of named collections --- src/Core/PostgreSQL/PoolWithFailover.cpp | 2 +- src/Core/PostgreSQL/PoolWithFailover.h | 9 +- src/Dictionaries/HTTPDictionarySource.cpp | 26 +- src/Dictionaries/MongoDBDictionarySource.cpp | 61 ++-- .../PostgreSQLDictionarySource.cpp | 131 ++++++-- .../ExternalDataSourceConfiguration.cpp | 288 ------------------ .../ExternalDataSourceConfiguration.h | 92 ------ src/Storages/NamedCollectionsHelpers.h | 2 +- src/Storages/StorageExternalDistributed.h | 2 - src/TableFunctions/TableFunctionMongoDB.cpp | 1 - src/TableFunctions/TableFunctionRedis.cpp | 1 - 11 files changed, 170 insertions(+), 445 deletions(-) delete mode 100644 src/Storages/ExternalDataSourceConfiguration.cpp delete mode 100644 src/Storages/ExternalDataSourceConfiguration.h diff --git a/src/Core/PostgreSQL/PoolWithFailover.cpp b/src/Core/PostgreSQL/PoolWithFailover.cpp index 5014564dbe0..054fc3b2226 100644 --- a/src/Core/PostgreSQL/PoolWithFailover.cpp +++ b/src/Core/PostgreSQL/PoolWithFailover.cpp @@ -23,7 +23,7 @@ namespace postgres { PoolWithFailover::PoolWithFailover( - const DB::ExternalDataSourcesConfigurationByPriority & configurations_by_priority, + const ReplicasConfigurationByPriority & configurations_by_priority, size_t pool_size, size_t pool_wait_timeout_, size_t max_tries_, diff --git a/src/Core/PostgreSQL/PoolWithFailover.h b/src/Core/PostgreSQL/PoolWithFailover.h index 502a9a9b7d7..2237c752367 100644 --- a/src/Core/PostgreSQL/PoolWithFailover.h +++ b/src/Core/PostgreSQL/PoolWithFailover.h @@ -8,7 +8,6 @@ #include "ConnectionHolder.h" #include #include -#include #include @@ -20,12 +19,12 @@ namespace postgres class PoolWithFailover { - -using RemoteDescription = std::vector>; - public: + using ReplicasConfigurationByPriority = std::map>; + using RemoteDescription = std::vector>; + PoolWithFailover( - const DB::ExternalDataSourcesConfigurationByPriority & configurations_by_priority, + const ReplicasConfigurationByPriority & configurations_by_priority, size_t pool_size, size_t pool_wait_timeout, size_t max_tries_, diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index 663c63dd6c6..d6df03b39df 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -8,12 +8,12 @@ #include #include #include -#include #include #include #include "DictionarySourceFactory.h" #include "DictionarySourceHelpers.h" #include "DictionaryStructure.h" +#include #include "registerDictionaries.h" @@ -223,21 +223,23 @@ void registerDictionarySourceHTTP(DictionarySourceFactory & factory) String endpoint; String format; - auto named_collection = created_from_ddl - ? getURLBasedDataSourceConfiguration(config, settings_config_prefix, global_context) - : std::nullopt; + auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, global_context) : nullptr; if (named_collection) { - url = named_collection->configuration.url; - endpoint = named_collection->configuration.endpoint; - format = named_collection->configuration.format; + validateNamedCollection( + *named_collection, + /* required_keys */{}, + /* optional_keys */ValidateKeysMultiset{ + "url", "endpoint", "user", "credentials.user", "password", "credentials.password", "format", "compression_method", "structure", "name"}); - credentials.setUsername(named_collection->configuration.user); - credentials.setPassword(named_collection->configuration.password); + url = named_collection->getOrDefault("url", ""); + endpoint = named_collection->getOrDefault("endpoint", ""); + format = named_collection->getOrDefault("format", ""); - header_entries.reserve(named_collection->configuration.headers.size()); - for (const auto & [key, value] : named_collection->configuration.headers) - header_entries.emplace_back(key, value); + credentials.setUsername(named_collection->getAnyOrDefault({"user", "credentials.user"}, "")); + credentials.setPassword(named_collection->getAnyOrDefault({"password", "credentials.password"}, "")); + + header_entries = getHeadersFromNamedCollection(*named_collection); } else { diff --git a/src/Dictionaries/MongoDBDictionarySource.cpp b/src/Dictionaries/MongoDBDictionarySource.cpp index c30a6f90e44..7bacfdab3d2 100644 --- a/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/src/Dictionaries/MongoDBDictionarySource.cpp @@ -1,15 +1,12 @@ #include "MongoDBDictionarySource.h" #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" -#include #include +#include namespace DB { -static const std::unordered_set dictionary_allowed_keys = { - "host", "port", "user", "password", "db", "database", "uri", "collection", "name", "method", "options"}; - void registerDictionarySourceMongoDB(DictionarySourceFactory & factory) { auto create_mongo_db_dictionary = []( @@ -22,35 +19,53 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory) bool created_from_ddl) { const auto config_prefix = root_config_prefix + ".mongodb"; - ExternalDataSourceConfiguration configuration; - auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key); }; - auto named_collection = getExternalDataSourceConfiguration(config, config_prefix, context, has_config_key); + auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, config_prefix, context) : nullptr; + + String host, username, password, database, method, options, collection; + UInt16 port; if (named_collection) { - configuration = named_collection->configuration; + validateNamedCollection( + *named_collection, + /* required_keys */{"collection"}, + /* optional_keys */ValidateKeysMultiset{ + "host", "port", "user", "password", "db", "database", "uri", "name", "method", "options"}); + + host = named_collection->getOrDefault("host", ""); + port = static_cast(named_collection->getOrDefault("port", 0)); + username = named_collection->getOrDefault("user", ""); + password = named_collection->getOrDefault("password", ""); + database = named_collection->getAnyOrDefault({"db", "database"}, ""); + method = named_collection->getOrDefault("method", ""); + collection = named_collection->getOrDefault("collection", ""); + options = named_collection->getOrDefault("options", ""); } else { - configuration.host = config.getString(config_prefix + ".host", ""); - configuration.port = config.getUInt(config_prefix + ".port", 0); - configuration.username = config.getString(config_prefix + ".user", ""); - configuration.password = config.getString(config_prefix + ".password", ""); - configuration.database = config.getString(config_prefix + ".db", ""); + host = config.getString(config_prefix + ".host", ""); + port = config.getUInt(config_prefix + ".port", 0); + username = config.getString(config_prefix + ".user", ""); + password = config.getString(config_prefix + ".password", ""); + database = config.getString(config_prefix + ".db", ""); + method = config.getString(config_prefix + ".method", ""); + collection = config.getString(config_prefix + ".collection"); + options = config.getString(config_prefix + ".options", ""); } if (created_from_ddl) - context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); + context->getRemoteHostFilter().checkHostAndPort(host, toString(port)); - return std::make_unique(dict_struct, + return std::make_unique( + dict_struct, config.getString(config_prefix + ".uri", ""), - configuration.host, - configuration.port, - configuration.username, - configuration.password, - config.getString(config_prefix + ".method", ""), - configuration.database, - config.getString(config_prefix + ".collection"), - config.getString(config_prefix + ".options", ""), + host, + port, + username, + password, + method, + database, + collection, + options, sample_block); }; diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index f62a9a009d8..fd026a97cd4 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -13,7 +13,7 @@ #include "readInvalidateQuery.h" #include #include -#include +#include #include #endif @@ -30,7 +30,7 @@ namespace ErrorCodes static const UInt64 max_block_size = 8192; -static const std::unordered_set dictionary_allowed_keys = { +static const ValidateKeysMultiset dictionary_allowed_keys = { "host", "port", "user", "password", "db", "database", "table", "schema", "update_field", "update_lag", "invalidate_query", "query", "where", "name", "priority"}; @@ -179,6 +179,19 @@ std::string PostgreSQLDictionarySource::toString() const #endif +static void validateConfigKeys( + const Poco::Util::AbstractConfiguration & dict_config, const String & config_prefix) +{ + Poco::Util::AbstractConfiguration::Keys config_keys; + dict_config.keys(config_prefix, config_keys); + for (const auto & config_key : config_keys) + { + if (dictionary_allowed_keys.contains(config_key) || startsWith(config_key, "replica")) + continue; + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected key `{}` in dictionary source configuration", config_key); + } +} + void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory) { auto create_table_source = [=](const DictionaryStructure & dict_struct, @@ -191,38 +204,118 @@ void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory) { #if USE_LIBPQXX const auto settings_config_prefix = config_prefix + ".postgresql"; - auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key) || key.starts_with("replica"); }; - auto configuration = getExternalDataSourceConfigurationByPriority(config, settings_config_prefix, context, has_config_key); const auto & settings = context->getSettingsRef(); + std::optional dictionary_configuration; + String database, schema, table; + postgres::PoolWithFailover::ReplicasConfigurationByPriority replicas_by_priority; + + auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, context) : nullptr; + if (named_collection) + { + validateNamedCollection>(*named_collection, {}, dictionary_allowed_keys); + + StoragePostgreSQL::Configuration common_configuration; + common_configuration.host = named_collection->getOrDefault("host", ""); + common_configuration.port = named_collection->getOrDefault("port", 0); + common_configuration.username = named_collection->getOrDefault("user", ""); + common_configuration.password = named_collection->getOrDefault("password", ""); + common_configuration.database = named_collection->getAnyOrDefault({"database", "db"}, ""); + common_configuration.schema = named_collection->getOrDefault("schema", ""); + common_configuration.table = named_collection->getOrDefault("table", ""); + + dictionary_configuration.emplace(PostgreSQLDictionarySource::Configuration{ + .db = common_configuration.database, + .schema = common_configuration.schema, + .table = common_configuration.table, + .query = named_collection->getOrDefault("query", ""), + .where = named_collection->getOrDefault("where", ""), + .invalidate_query = named_collection->getOrDefault("invalidate_query", ""), + .update_field = named_collection->getOrDefault("update_field", ""), + .update_lag = named_collection->getOrDefault("update_lag", 1), + }); + + replicas_by_priority[0].emplace_back(common_configuration); + } + else + { + validateConfigKeys(config, settings_config_prefix); + + StoragePostgreSQL::Configuration common_configuration; + common_configuration.host = config.getString(settings_config_prefix + ".host", ""); + common_configuration.port = config.getUInt(settings_config_prefix + ".port", 0); + common_configuration.username = config.getString(settings_config_prefix + ".user", ""); + common_configuration.password = config.getString(settings_config_prefix + ".password", ""); + common_configuration.database = config.getString(fmt::format("{}.database", settings_config_prefix), config.getString(fmt::format("{}.db", settings_config_prefix), "")); + common_configuration.schema = config.getString(fmt::format("{}.schema", settings_config_prefix), ""); + common_configuration.table = config.getString(fmt::format("{}.table", settings_config_prefix), ""); + + dictionary_configuration.emplace(PostgreSQLDictionarySource::Configuration + { + .db = common_configuration.database, + .schema = common_configuration.schema, + .table = common_configuration.table, + .query = config.getString(fmt::format("{}.query", settings_config_prefix), ""), + .where = config.getString(fmt::format("{}.where", settings_config_prefix), ""), + .invalidate_query = config.getString(fmt::format("{}.invalidate_query", settings_config_prefix), ""), + .update_field = config.getString(fmt::format("{}.update_field", settings_config_prefix), ""), + .update_lag = config.getUInt64(fmt::format("{}.update_lag", settings_config_prefix), 1) + }); + + + if (config.has(settings_config_prefix + ".replica")) + { + Poco::Util::AbstractConfiguration::Keys config_keys; + config.keys(settings_config_prefix, config_keys); + + for (const auto & config_key : config_keys) + { + if (config_key.starts_with("replica")) + { + String replica_name = settings_config_prefix + "." + config_key; + StoragePostgreSQL::Configuration replica_configuration{common_configuration}; + + size_t priority = config.getInt(replica_name + ".priority", 0); + replica_configuration.host = config.getString(replica_name + ".host", common_configuration.host); + replica_configuration.port = config.getUInt(replica_name + ".port", common_configuration.port); + replica_configuration.username = config.getString(replica_name + ".user", common_configuration.username); + replica_configuration.password = config.getString(replica_name + ".password", common_configuration.password); + + if (replica_configuration.host.empty() || replica_configuration.port == 0 + || replica_configuration.username.empty() || replica_configuration.password.empty()) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Named collection of connection parameters is missing some " + "of the parameters and no other dictionary parameters are added"); + } + + replicas_by_priority[priority].emplace_back(replica_configuration); + } + } + } + else + { + replicas_by_priority[0].emplace_back(common_configuration); + } + } if (created_from_ddl) { - for (const auto & replicas : configuration.replicas_configurations) - for (const auto & replica : replicas.second) + for (const auto & [_, replicas] : replicas_by_priority) + for (const auto & replica : replicas) context->getRemoteHostFilter().checkHostAndPort(replica.host, toString(replica.port)); } + auto pool = std::make_shared( - configuration.replicas_configurations, + replicas_by_priority, settings.postgresql_connection_pool_size, settings.postgresql_connection_pool_wait_timeout, settings.postgresql_connection_pool_retries, settings.postgresql_connection_pool_auto_close_connection, settings.postgresql_connection_attempt_timeout); - PostgreSQLDictionarySource::Configuration dictionary_configuration - { - .db = configuration.database, - .schema = configuration.schema, - .table = configuration.table, - .query = config.getString(fmt::format("{}.query", settings_config_prefix), ""), - .where = config.getString(fmt::format("{}.where", settings_config_prefix), ""), - .invalidate_query = config.getString(fmt::format("{}.invalidate_query", settings_config_prefix), ""), - .update_field = config.getString(fmt::format("{}.update_field", settings_config_prefix), ""), - .update_lag = config.getUInt64(fmt::format("{}.update_lag", settings_config_prefix), 1) - }; - return std::make_unique(dict_struct, dictionary_configuration, pool, sample_block); + return std::make_unique(dict_struct, dictionary_configuration.value(), pool, sample_block); #else (void)dict_struct; (void)config; diff --git a/src/Storages/ExternalDataSourceConfiguration.cpp b/src/Storages/ExternalDataSourceConfiguration.cpp deleted file mode 100644 index 41979f8d91c..00000000000 --- a/src/Storages/ExternalDataSourceConfiguration.cpp +++ /dev/null @@ -1,288 +0,0 @@ -#include "ExternalDataSourceConfiguration.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - -IMPLEMENT_SETTINGS_TRAITS(EmptySettingsTraits, EMPTY_SETTINGS) - -static const std::unordered_set dictionary_allowed_keys = { - "host", "port", "user", "password", "quota_key", "db", - "database", "table", "schema", "replica", - "update_field", "update_lag", "invalidate_query", "query", - "where", "name", "secure", "uri", "collection"}; - - -template -SettingsChanges getSettingsChangesFromConfig( - const BaseSettings & settings, const Poco::Util::AbstractConfiguration & config, const String & config_prefix) -{ - SettingsChanges config_settings; - for (const auto & setting : settings.all()) - { - const auto & setting_name = setting.getName(); - auto setting_value = config.getString(config_prefix + '.' + setting_name, ""); - if (!setting_value.empty()) - config_settings.emplace_back(setting_name, setting_value); - } - return config_settings; -} - - -String ExternalDataSourceConfiguration::toString() const -{ - WriteBufferFromOwnString configuration_info; - configuration_info << "username: " << username << "\t"; - if (addresses.empty()) - { - configuration_info << "host: " << host << "\t"; - configuration_info << "port: " << port << "\t"; - } - else - { - for (const auto & [replica_host, replica_port] : addresses) - { - configuration_info << "host: " << replica_host << "\t"; - configuration_info << "port: " << replica_port << "\t"; - } - } - return configuration_info.str(); -} - - -void ExternalDataSourceConfiguration::set(const ExternalDataSourceConfiguration & conf) -{ - host = conf.host; - port = conf.port; - username = conf.username; - password = conf.password; - quota_key = conf.quota_key; - database = conf.database; - table = conf.table; - schema = conf.schema; - addresses = conf.addresses; - addresses_expr = conf.addresses_expr; -} - - -static void validateConfigKeys( - const Poco::Util::AbstractConfiguration & dict_config, const String & config_prefix, HasConfigKeyFunc has_config_key_func) -{ - Poco::Util::AbstractConfiguration::Keys config_keys; - dict_config.keys(config_prefix, config_keys); - for (const auto & config_key : config_keys) - { - if (!has_config_key_func(config_key)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected key `{}` in dictionary source configuration", config_key); - } -} - -template -std::optional getExternalDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, - ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings & settings) -{ - validateConfigKeys(dict_config, dict_config_prefix, has_config_key); - ExternalDataSourceConfiguration configuration; - - auto collection_name = dict_config.getString(dict_config_prefix + ".name", ""); - if (!collection_name.empty()) - { - const auto & config = context->getConfigRef(); - const auto & collection_prefix = fmt::format("named_collections.{}", collection_name); - validateConfigKeys(dict_config, collection_prefix, has_config_key); - auto config_settings = getSettingsChangesFromConfig(settings, config, collection_prefix); - auto dict_settings = getSettingsChangesFromConfig(settings, dict_config, dict_config_prefix); - /// dictionary config settings override collection settings. - config_settings.insert(config_settings.end(), dict_settings.begin(), dict_settings.end()); - - if (!config.has(collection_prefix)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection_name); - - configuration.host = dict_config.getString(dict_config_prefix + ".host", config.getString(collection_prefix + ".host", "")); - configuration.port = dict_config.getInt(dict_config_prefix + ".port", config.getUInt(collection_prefix + ".port", 0)); - configuration.username = dict_config.getString(dict_config_prefix + ".user", config.getString(collection_prefix + ".user", "")); - configuration.password = dict_config.getString(dict_config_prefix + ".password", config.getString(collection_prefix + ".password", "")); - configuration.quota_key = dict_config.getString(dict_config_prefix + ".quota_key", config.getString(collection_prefix + ".quota_key", "")); - configuration.database = dict_config.getString(dict_config_prefix + ".db", config.getString(dict_config_prefix + ".database", - config.getString(collection_prefix + ".db", config.getString(collection_prefix + ".database", "")))); - configuration.table = dict_config.getString(dict_config_prefix + ".table", config.getString(collection_prefix + ".table", "")); - configuration.schema = dict_config.getString(dict_config_prefix + ".schema", config.getString(collection_prefix + ".schema", "")); - - if (configuration.host.empty() || configuration.port == 0 || configuration.username.empty() || configuration.table.empty()) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Named collection of connection parameters is missing some " - "of the parameters and dictionary parameters are not added"); - } - return ExternalDataSourceInfo{.configuration = configuration, .settings_changes = config_settings}; - } - return std::nullopt; -} - -std::optional getURLBasedDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context) -{ - URLBasedDataSourceConfiguration configuration; - auto collection_name = dict_config.getString(dict_config_prefix + ".name", ""); - if (!collection_name.empty()) - { - const auto & config = context->getConfigRef(); - const auto & collection_prefix = fmt::format("named_collections.{}", collection_name); - - if (!config.has(collection_prefix)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection_name); - - configuration.url = - dict_config.getString(dict_config_prefix + ".url", config.getString(collection_prefix + ".url", "")); - configuration.endpoint = - dict_config.getString(dict_config_prefix + ".endpoint", config.getString(collection_prefix + ".endpoint", "")); - configuration.format = - dict_config.getString(dict_config_prefix + ".format", config.getString(collection_prefix + ".format", "")); - configuration.compression_method = - dict_config.getString(dict_config_prefix + ".compression", config.getString(collection_prefix + ".compression_method", "")); - configuration.structure = - dict_config.getString(dict_config_prefix + ".structure", config.getString(collection_prefix + ".structure", "")); - configuration.user = - dict_config.getString(dict_config_prefix + ".credentials.user", config.getString(collection_prefix + ".credentials.user", "")); - configuration.password = - dict_config.getString(dict_config_prefix + ".credentials.password", config.getString(collection_prefix + ".credentials.password", "")); - - String headers_prefix; - const Poco::Util::AbstractConfiguration *headers_config = nullptr; - if (dict_config.has(dict_config_prefix + ".headers")) - { - headers_prefix = dict_config_prefix + ".headers"; - headers_config = &dict_config; - } - else - { - headers_prefix = collection_prefix + ".headers"; - headers_config = &config; - } - - if (headers_config) - { - Poco::Util::AbstractConfiguration::Keys header_keys; - headers_config->keys(headers_prefix, header_keys); - headers_prefix += "."; - for (const auto & header : header_keys) - { - const auto header_prefix = headers_prefix + header; - configuration.headers.emplace_back( - headers_config->getString(header_prefix + ".name"), - headers_config->getString(header_prefix + ".value")); - } - } - - return URLBasedDataSourceConfig{ .configuration = configuration }; - } - - return std::nullopt; -} - -ExternalDataSourcesByPriority getExternalDataSourceConfigurationByPriority( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context, HasConfigKeyFunc has_config_key) -{ - validateConfigKeys(dict_config, dict_config_prefix, has_config_key); - ExternalDataSourceConfiguration common_configuration; - - auto named_collection = getExternalDataSourceConfiguration(dict_config, dict_config_prefix, context, has_config_key); - if (named_collection) - { - common_configuration = named_collection->configuration; - } - else - { - common_configuration.host = dict_config.getString(dict_config_prefix + ".host", ""); - common_configuration.port = dict_config.getUInt(dict_config_prefix + ".port", 0); - common_configuration.username = dict_config.getString(dict_config_prefix + ".user", ""); - common_configuration.password = dict_config.getString(dict_config_prefix + ".password", ""); - common_configuration.quota_key = dict_config.getString(dict_config_prefix + ".quota_key", ""); - common_configuration.database = dict_config.getString(dict_config_prefix + ".db", dict_config.getString(dict_config_prefix + ".database", "")); - common_configuration.table = dict_config.getString(fmt::format("{}.table", dict_config_prefix), ""); - common_configuration.schema = dict_config.getString(fmt::format("{}.schema", dict_config_prefix), ""); - } - - ExternalDataSourcesByPriority configuration - { - .database = common_configuration.database, - .table = common_configuration.table, - .schema = common_configuration.schema, - .replicas_configurations = {} - }; - - if (dict_config.has(dict_config_prefix + ".replica")) - { - Poco::Util::AbstractConfiguration::Keys config_keys; - dict_config.keys(dict_config_prefix, config_keys); - - for (const auto & config_key : config_keys) - { - if (config_key.starts_with("replica")) - { - ExternalDataSourceConfiguration replica_configuration(common_configuration); - String replica_name = dict_config_prefix + "." + config_key; - validateConfigKeys(dict_config, replica_name, has_config_key); - - size_t priority = dict_config.getInt(replica_name + ".priority", 0); - replica_configuration.host = dict_config.getString(replica_name + ".host", common_configuration.host); - replica_configuration.port = dict_config.getUInt(replica_name + ".port", common_configuration.port); - replica_configuration.username = dict_config.getString(replica_name + ".user", common_configuration.username); - replica_configuration.password = dict_config.getString(replica_name + ".password", common_configuration.password); - replica_configuration.quota_key = dict_config.getString(replica_name + ".quota_key", common_configuration.quota_key); - - if (replica_configuration.host.empty() || replica_configuration.port == 0 - || replica_configuration.username.empty() || replica_configuration.password.empty()) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Named collection of connection parameters is missing some " - "of the parameters and no other dictionary parameters are added"); - } - - configuration.replicas_configurations[priority].emplace_back(replica_configuration); - } - } - } - else - { - configuration.replicas_configurations[0].emplace_back(common_configuration); - } - - return configuration; -} - - -void URLBasedDataSourceConfiguration::set(const URLBasedDataSourceConfiguration & conf) -{ - url = conf.url; - format = conf.format; - compression_method = conf.compression_method; - structure = conf.structure; - http_method = conf.http_method; - headers = conf.headers; -} - -template -std::optional getExternalDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, - ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings & settings); - -template -SettingsChanges getSettingsChangesFromConfig( - const BaseSettings & settings, const Poco::Util::AbstractConfiguration & config, const String & config_prefix); - -} diff --git a/src/Storages/ExternalDataSourceConfiguration.h b/src/Storages/ExternalDataSourceConfiguration.h deleted file mode 100644 index c703c9ce999..00000000000 --- a/src/Storages/ExternalDataSourceConfiguration.h +++ /dev/null @@ -1,92 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ - -#define EMPTY_SETTINGS(M, ALIAS) -DECLARE_SETTINGS_TRAITS(EmptySettingsTraits, EMPTY_SETTINGS) - -struct EmptySettings : public BaseSettings {}; - -struct ExternalDataSourceConfiguration -{ - String host; - UInt16 port = 0; - String username = "default"; - String password; - String quota_key; - String database; - String table; - String schema; - - std::vector> addresses; /// Failover replicas. - String addresses_expr; - - String toString() const; - - void set(const ExternalDataSourceConfiguration & conf); -}; - - -using StorageSpecificArgs = std::vector>; - -struct ExternalDataSourceInfo -{ - ExternalDataSourceConfiguration configuration; - SettingsChanges settings_changes; -}; - -using HasConfigKeyFunc = std::function; - -template -std::optional getExternalDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, - ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings & settings = {}); - - -/// Highest priority is 0, the bigger the number in map, the less the priority. -using ExternalDataSourcesConfigurationByPriority = std::map>; - -struct ExternalDataSourcesByPriority -{ - String database; - String table; - String schema; - ExternalDataSourcesConfigurationByPriority replicas_configurations; -}; - -ExternalDataSourcesByPriority -getExternalDataSourceConfigurationByPriority(const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context, HasConfigKeyFunc has_config_key); - -struct URLBasedDataSourceConfiguration -{ - String url; - String endpoint; - String format = "auto"; - String compression_method = "auto"; - String structure = "auto"; - - String user; - String password; - - HTTPHeaderEntries headers; - String http_method; - - void set(const URLBasedDataSourceConfiguration & conf); -}; - -struct URLBasedDataSourceConfig -{ - URLBasedDataSourceConfiguration configuration; -}; - -std::optional getURLBasedDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context); - -} diff --git a/src/Storages/NamedCollectionsHelpers.h b/src/Storages/NamedCollectionsHelpers.h index f444a581eb6..bf2da7235a2 100644 --- a/src/Storages/NamedCollectionsHelpers.h +++ b/src/Storages/NamedCollectionsHelpers.h @@ -133,7 +133,7 @@ void validateNamedCollection( { throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Unexpected key {} in named collection. Required keys: {}, optional keys: {}", + "Unexpected key `{}` in named collection. Required keys: {}, optional keys: {}", backQuoteIfNeed(key), fmt::join(required_keys, ", "), fmt::join(optional_keys, ", ")); } } diff --git a/src/Storages/StorageExternalDistributed.h b/src/Storages/StorageExternalDistributed.h index c4d37c3e5cc..56c7fe86f34 100644 --- a/src/Storages/StorageExternalDistributed.h +++ b/src/Storages/StorageExternalDistributed.h @@ -8,8 +8,6 @@ namespace DB { -struct ExternalDataSourceConfiguration; - /// Storages MySQL and PostgreSQL use ConnectionPoolWithFailover and support multiple replicas. /// This class unites multiple storages with replicas into multiple shards with replicas. /// A query to external database is passed to one replica on each shard, the result is united. diff --git a/src/TableFunctions/TableFunctionMongoDB.cpp b/src/TableFunctions/TableFunctionMongoDB.cpp index b2cf1b4675e..94279d1bf6d 100644 --- a/src/TableFunctions/TableFunctionMongoDB.cpp +++ b/src/TableFunctions/TableFunctionMongoDB.cpp @@ -1,5 +1,4 @@ #include -#include #include diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index f87ba6d1c6d..aca751c2840 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -15,7 +15,6 @@ #include #include -#include namespace DB From f6e1eb1643888c2b8bbc179899cbc4bacaee5b78 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Aug 2024 16:31:48 +0200 Subject: [PATCH 2268/2361] Fix style check --- src/Dictionaries/HTTPDictionarySource.cpp | 2 +- src/Dictionaries/PostgreSQLDictionarySource.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index d6df03b39df..bf19f912723 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -230,7 +230,7 @@ void registerDictionarySourceHTTP(DictionarySourceFactory & factory) *named_collection, /* required_keys */{}, /* optional_keys */ValidateKeysMultiset{ - "url", "endpoint", "user", "credentials.user", "password", "credentials.password", "format", "compression_method", "structure", "name"}); + "url", "endpoint", "user", "credentials.user", "password", "credentials.password", "format", "compression_method", "structure", "name"}); url = named_collection->getOrDefault("url", ""); endpoint = named_collection->getOrDefault("endpoint", ""); diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index fd026a97cd4..fd426de126d 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -24,6 +24,7 @@ namespace DB namespace ErrorCodes { extern const int SUPPORT_IS_DISABLED; + extern const int BAD_ARGUMENTS; } #if USE_LIBPQXX From 4548957f5a73600d227dcc26926c7678dd284c5b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 15 Aug 2024 15:07:07 +0000 Subject: [PATCH 2269/2361] Add default status to not-prepared processors. --- src/Processors/Executors/ExecutingGraph.cpp | 4 ++-- src/Processors/Executors/ExecutingGraph.h | 2 +- src/Processors/Executors/PipelineExecutor.cpp | 2 +- src/Processors/IProcessor.cpp | 7 +++++-- src/Processors/IProcessor.h | 2 +- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/Processors/Executors/ExecutingGraph.cpp b/src/Processors/Executors/ExecutingGraph.cpp index 6d5b60d8159..02ae8af5f52 100644 --- a/src/Processors/Executors/ExecutingGraph.cpp +++ b/src/Processors/Executors/ExecutingGraph.cpp @@ -279,7 +279,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue try { auto & processor = *node.processor; - IProcessor::Status last_status = node.last_processor_status; + const auto last_status = node.last_processor_status; IProcessor::Status status = processor.prepare(node.updated_input_ports, node.updated_output_ports); node.last_processor_status = status; @@ -319,7 +319,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue node.updated_input_ports.clear(); node.updated_output_ports.clear(); - switch (node.last_processor_status) + switch (*node.last_processor_status) { case IProcessor::Status::NeedData: case IProcessor::Status::PortFull: diff --git a/src/Processors/Executors/ExecutingGraph.h b/src/Processors/Executors/ExecutingGraph.h index 71dcd360a2c..6f91c120d47 100644 --- a/src/Processors/Executors/ExecutingGraph.h +++ b/src/Processors/Executors/ExecutingGraph.h @@ -92,7 +92,7 @@ public: std::exception_ptr exception; /// Last state for profiling. - IProcessor::Status last_processor_status = IProcessor::Status::NeedData; + std::optional last_processor_status; /// Ports which have changed their state since last processor->prepare() call. /// They changed when neighbour processors interact with connected ports. diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 82cad471a29..d4630f21688 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -432,7 +432,7 @@ String PipelineExecutor::dumpPipeline() const } } - std::vector statuses; + std::vector> statuses; std::vector proc_list; statuses.reserve(graph->nodes.size()); proc_list.reserve(graph->nodes.size()); diff --git a/src/Processors/IProcessor.cpp b/src/Processors/IProcessor.cpp index edb4d662d8b..fc595a7b565 100644 --- a/src/Processors/IProcessor.cpp +++ b/src/Processors/IProcessor.cpp @@ -55,9 +55,12 @@ void IProcessor::dump() const } -std::string IProcessor::statusToName(Status status) +std::string IProcessor::statusToName(std::optional status) { - switch (status) + if (status == std::nullopt) + return "NotStarted"; + + switch (*status) { case Status::NeedData: return "NeedData"; diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index f1ce044d92f..02b8a3daa28 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -162,7 +162,7 @@ public: ExpandPipeline, }; - static std::string statusToName(Status status); + static std::string statusToName(std::optional status); /** Method 'prepare' is responsible for all cheap ("instantaneous": O(1) of data volume, no wait) calculations. * From 7f0406a7265058b3dd3e5fde4866126971050a8b Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 15:53:09 +0000 Subject: [PATCH 2270/2361] Remove JSONEmpty function and allow JSON in empty function --- .../sql-reference/functions/json-functions.md | 32 ----- src/Functions/JSONEmpty.cpp | 126 ------------------ src/Functions/empty.cpp | 123 ++++++++++++++++- ...ead_subcolumns_2_compact_merge_tree.sql.j2 | 4 +- .../03207_json_read_subcolumns_2_memory.sql | 4 +- ...n_read_subcolumns_2_wide_merge_tree.sql.j2 | 4 +- ...ead_subcolumns_2_compact_merge_tree.sql.j2 | 4 +- ...array_of_json_read_subcolumns_2_memory.sql | 4 +- ...n_read_subcolumns_2_wide_merge_tree.sql.j2 | 4 +- .../aspell-ignore/en/aspell-dict.txt | 1 - 10 files changed, 134 insertions(+), 172 deletions(-) delete mode 100644 src/Functions/JSONEmpty.cpp diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index fd1f924c18f..26fe888ab49 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -1359,35 +1359,3 @@ SELECT json, JSONSharedDataPathsWithTypes(json) FROM test; │ {"a":["1","2","3"],"c":"2020-01-01"} │ {'c':'Date'} │ └──────────────────────────────────────┴────────────────────────────────────┘ ``` - -### JSONEmpty - -Checks whether the input [JSON](../data-types/newjson.md) object is empty. - -``` sql -JSONEmpty(json) -``` - -**Arguments** - -- `json` — [JSON](../data-types/newjson.md). - -**Returned value** - -- Returns `1` for an empty JSON object or `0` for a non-empty JSON object. [UInt8](../data-types/int-uint.md). - -**Example** - -``` sql -CREATE TABLE test (json JSON) ENGINE = Memory; -INSERT INTO test FORMAT JSONEachRow {"json" : {}}, {"json" : {"a" : [1, 2, 3], "b" : "2020-01-01"}}, {"json" : {}}, -SELECT json, JSONEmpty(json) FROM test; -``` - -```text -┌─json─────────────────────────────────┬─JSONEmpty(json)─┠-│ {} │ 1 │ -│ {"a":["1","2","3"],"b":"2020-01-01"} │ 0 │ -│ {} │ 1 │ -└──────────────────────────────────────┴─────────────────┘ -``` diff --git a/src/Functions/JSONEmpty.cpp b/src/Functions/JSONEmpty.cpp deleted file mode 100644 index cdd1ac2f237..00000000000 --- a/src/Functions/JSONEmpty.cpp +++ /dev/null @@ -1,126 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int BAD_ARGUMENTS; -} - - -namespace -{ - -/// Implements the function JSONEmpty which returns true if provided JSON object is empty and false otherwise. -class FunctionJSONEmpty : public IFunction -{ -public: - static constexpr auto name = "JSONEmpty"; - - static FunctionPtr create(ContextPtr) { return std::make_shared(); } - - std::string getName() const override - { - return name; - } - - size_t getNumberOfArguments() const override { return 1; } - bool useDefaultImplementationForConstants() const override { return true; } - bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } - - DataTypePtr getReturnTypeImpl(const DataTypes & data_types) const override - { - if (data_types.size() != 1 || data_types[0]->getTypeId() != TypeIndex::Object) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires single argument with type JSON", getName()); - return std::make_shared(); - } - - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override - { - const ColumnWithTypeAndName & elem = arguments[0]; - const auto * object_column = typeid_cast(elem.column.get()); - if (!object_column) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected column type in function {}. Expected Object column, got {}", getName(), elem.column->getName()); - - auto res = DataTypeUInt8().createColumn(); - auto & data = typeid_cast(*res).getData(); - const auto & typed_paths = object_column->getTypedPaths(); - size_t size = object_column->size(); - /// If object column has at least 1 typed path, it will never be empty, because these paths always have values. - if (!typed_paths.empty()) - { - data.resize_fill(size, 0); - return res; - } - - const auto & dynamic_paths = object_column->getDynamicPaths(); - const auto & shared_data = object_column->getSharedDataPtr(); - data.reserve(size); - for (size_t i = 0; i != size; ++i) - { - bool empty = true; - /// Check if there is no paths in shared data. - if (!shared_data->isDefaultAt(i)) - { - empty = false; - } - /// Check that all dynamic paths have NULL value in this row. - else - { - for (const auto & [path, column] : dynamic_paths) - { - if (!column->isNullAt(i)) - { - empty = false; - break; - } - } - } - - data.push_back(empty); - } - - return res; - } -}; - -} - -REGISTER_FUNCTION(JSONEmpty) -{ - factory.registerFunction(FunctionDocumentation{ - .description = R"( -Checks whether the input JSON object is empty. -)", - .syntax = {"JSONEmpty(json)"}, - .arguments = {{"json", "JSON column"}}, - .examples = {{{ - "Example", - R"( -CREATE TABLE test (json JSON) ENGINE = Memory; -INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {}}, {"json" : {"b" : "Hello"}}, {"json" : {}} -SELECT json, JSONEmpty(json) FROM test; -)", - R"( -┌─json──────────┬─JSONEmpty(json)─┠-│ {"a":"42"} │ 0 │ -│ {} │ 1 │ -│ {"b":"Hello"} │ 0 │ -│ {} │ 1 │ -└───────────────┴─────────────────┘ - -)"}}}, - .categories{"JSON"}, - }); -} - -} diff --git a/src/Functions/empty.cpp b/src/Functions/empty.cpp index 51811d21a0c..f97e6bfb258 100644 --- a/src/Functions/empty.cpp +++ b/src/Functions/empty.cpp @@ -2,10 +2,18 @@ #include #include #include +#include namespace DB { + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; +} + namespace { @@ -13,13 +21,126 @@ struct NameEmpty { static constexpr auto name = "empty"; }; + using FunctionEmpty = FunctionStringOrArrayToT, NameEmpty, UInt8, false>; +/// Implements the empty function for JSON type. +class ExecutableFunctionJSONEmpty : public IExecutableFunction +{ +public: + std::string getName() const override { return NameEmpty::name; } + +private: + bool useDefaultImplementationForConstants() const override { return true; } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + { + const ColumnWithTypeAndName & elem = arguments[0]; + const auto * object_column = typeid_cast(elem.column.get()); + if (!object_column) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected column type in function {}. Expected Object column, got {}", getName(), elem.column->getName()); + + auto res = DataTypeUInt8().createColumn(); + auto & data = typeid_cast(*res).getData(); + const auto & typed_paths = object_column->getTypedPaths(); + size_t size = object_column->size(); + /// If object column has at least 1 typed path, it will never be empty, because these paths always have values. + if (!typed_paths.empty()) + { + data.resize_fill(size, 0); + return res; + } + + const auto & dynamic_paths = object_column->getDynamicPaths(); + const auto & shared_data = object_column->getSharedDataPtr(); + data.reserve(size); + for (size_t i = 0; i != size; ++i) + { + bool empty = true; + /// Check if there is no paths in shared data. + if (!shared_data->isDefaultAt(i)) + { + empty = false; + } + /// Check that all dynamic paths have NULL value in this row. + else + { + for (const auto & [path, column] : dynamic_paths) + { + if (!column->isNullAt(i)) + { + empty = false; + break; + } + } + } + + data.push_back(empty); + } + + return res; + } +}; + +class FunctionEmptyJSON final : public IFunctionBase +{ +public: + FunctionEmptyJSON(const DataTypes & argument_types_, const DataTypePtr & return_type_) : argument_types(argument_types_), return_type(return_type_) {} + + String getName() const override { return NameEmpty::name; } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + const DataTypes & getArgumentTypes() const override { return argument_types; } + const DataTypePtr & getResultType() const override { return return_type; } + + ExecutableFunctionPtr prepare(const ColumnsWithTypeAndName &) const override + { + return std::make_unique(); + } + +private: + DataTypes argument_types; + DataTypePtr return_type; +}; + +class FunctionEmptyOverloadResolver final : public IFunctionOverloadResolver +{ +public: + static constexpr auto name = NameEmpty::name; + + static FunctionOverloadResolverPtr create(ContextPtr) + { + return std::make_unique(); + } + + String getName() const override { return NameEmpty::name; } + size_t getNumberOfArguments() const override { return 1; } + + FunctionBasePtr buildImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type) const override + { + DataTypes argument_types; + argument_types.reserve(arguments.size()); + for (const auto & arg : arguments) + argument_types.push_back(arg.type); + + if (argument_types.size() == 1 && isObject(argument_types[0])) + return std::make_shared(argument_types, return_type); + + return std::make_shared(std::make_shared(), argument_types, return_type); + } + + DataTypePtr getReturnTypeImpl(const DataTypes &) const override + { + return std::make_shared(); + } +}; + } REGISTER_FUNCTION(Empty) { - factory.registerFunction(); + factory.registerFunction(); } } diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 index 942489ec8fc..6c33044b5d8 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 @@ -109,12 +109,12 @@ select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.: select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; -select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0; +select count() from test where empty(json.^a) and json.a.b.c == 0; select json.^a, json.a.b.c from test order by id format Null; select json, json.^a, json.a.b.c from test format Null; select json, json.^a, json.a.b.c from test order by id format Null; -select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null; +select count() from test where empty(json.^a) and json.a.b.d is Null; select json.^a, json.a.b.d from test order by id format Null; select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql index 3c791118c9c..cc646987c80 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql @@ -106,12 +106,12 @@ select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.: select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; -select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0; +select count() from test where empty(json.^a) and json.a.b.c == 0; select json.^a, json.a.b.c from test order by id format Null; select json, json.^a, json.a.b.c from test format Null; select json, json.^a, json.a.b.c from test order by id format Null; -select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null; +select count() from test where empty(json.^a) and json.a.b.d is Null; select json.^a, json.a.b.d from test order by id format Null; select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 index 325ce2dcbf5..ab4e0437c15 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 @@ -109,12 +109,12 @@ select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.: select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; -select count() from test where JSONEmpty(json.^a) and json.a.b.c == 0; +select count() from test where empty(json.^a) and json.a.b.c == 0; select json.^a, json.a.b.c from test order by id format Null; select json, json.^a, json.a.b.c from test format Null; select json, json.^a, json.a.b.c from test order by id format Null; -select count() from test where JSONEmpty(json.^a) and json.a.b.d is Null; +select count() from test where empty(json.^a) and json.a.b.d is Null; select json.^a, json.a.b.d from test order by id format Null; select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 index e1e0c07a5df..e3930165602 100644 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 @@ -40,8 +40,8 @@ select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0. select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); -select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; -select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql index 1cd5e2b8d47..9274b9b9cf7 100644 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql @@ -37,8 +37,8 @@ select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0. select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); -select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; -select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 index 89223316aa3..3010fa0e2de 100644 --- a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 @@ -40,8 +40,8 @@ select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0. select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); -select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; -select count() from test where JSONEmpty(arrayJoin(json.a.r[].^b)) and JSONEmpty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 0289296c580..fd836d93143 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -411,7 +411,6 @@ JSONDynamicPaths JSONDynamicPathsWithTypes JSONEachRow JSONEachRowWithProgress -JSONEmpty JSONExtract JSONExtractArrayRaw JSONExtractBool From 65493ccf12f6a58d898544f484ad95a54724854d Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 15:57:21 +0000 Subject: [PATCH 2271/2361] Remove unused error code --- src/Functions/empty.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Functions/empty.cpp b/src/Functions/empty.cpp index f97e6bfb258..b696385e8c9 100644 --- a/src/Functions/empty.cpp +++ b/src/Functions/empty.cpp @@ -10,7 +10,6 @@ namespace DB namespace ErrorCodes { - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int BAD_ARGUMENTS; } From b8055eb346c1eabe006d33e905134c58eaac66e3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 15 Aug 2024 17:03:14 +0000 Subject: [PATCH 2272/2361] Fixing test. --- .../0_stateless/02210_processors_profile_log.reference | 4 ++-- tests/queries/0_stateless/02210_processors_profile_log.sql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02210_processors_profile_log.reference b/tests/queries/0_stateless/02210_processors_profile_log.reference index 035bd9897ad..12ba17103da 100644 --- a/tests/queries/0_stateless/02210_processors_profile_log.reference +++ b/tests/queries/0_stateless/02210_processors_profile_log.reference @@ -6,6 +6,6 @@ ExpressionTransform ExpressionTransform 1 1 1 1 1 LazyOutputFormat 1 1 1 0 0 LimitsCheckingTransform 1 1 1 1 1 -NullSource 1 0 0 0 0 -NullSource 1 0 0 0 0 +NullSource 0 0 0 0 0 +NullSource 0 0 0 0 0 SourceFromSingleChunk 1 0 0 1 1 diff --git a/tests/queries/0_stateless/02210_processors_profile_log.sql b/tests/queries/0_stateless/02210_processors_profile_log.sql index 75e5bcbb585..a850f4312b3 100644 --- a/tests/queries/0_stateless/02210_processors_profile_log.sql +++ b/tests/queries/0_stateless/02210_processors_profile_log.sql @@ -20,8 +20,8 @@ SELECT -- SourceFromSingleChunk, that feed data to ExpressionTransform, -- will feed first block and then wait in PortFull. name = 'SourceFromSingleChunk', output_wait_elapsed_us >= 0.9e6 ? 1 : output_wait_elapsed_us, - -- NullSource/LazyOutputFormatLazyOutputFormat are the outputs - -- so they cannot starts to execute before sleep(1) will be executed. + -- LazyOutputFormatLazyOutputFormat is the output + -- so it cannot starts to execute before sleep(1) will be executed. input_wait_elapsed_us>=1e6 ? 1 : input_wait_elapsed_us) elapsed, input_rows, From 5dcf3f0c59c1db273e56eec8e8631c09252036a6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 15 Aug 2024 19:39:35 +0200 Subject: [PATCH 2273/2361] Update 03223_analyzer_with_cube_fuzz.sql --- tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql b/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql index c19d11fbe0c..f3bccc79b3f 100644 --- a/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql +++ b/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql @@ -1,3 +1,5 @@ +SET enable_analyzer = 1; + DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; From 75f951dae57c4de27cdba28792622a35597cf31c Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Thu, 15 Aug 2024 17:59:31 +0000 Subject: [PATCH 2274/2361] limit task time for mutations --- src/Storages/MergeTree/MergeTask.cpp | 5 +- src/Storages/MergeTree/MergeTreeSettings.h | 2 +- src/Storages/MergeTree/MutateTask.cpp | 88 +++++++++++++--------- 3 files changed, 57 insertions(+), 38 deletions(-) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index ee15bc7f711..26cb821f33b 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -9,7 +9,6 @@ #include #include #include -#include "base/types.h" #include #include #include @@ -521,7 +520,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::execute() bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() { Stopwatch watch(CLOCK_MONOTONIC_COARSE); - UInt64 step_time_ms = global_ctx->data->getSettings()->merge_preferred_step_execution_time_ms.totalMilliseconds(); + UInt64 step_time_ms = global_ctx->data->getSettings()->background_task_preferred_step_execution_time_ms.totalMilliseconds(); do { @@ -751,7 +750,7 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const bool MergeTask::VerticalMergeStage::executeVerticalMergeForOneColumn() const { Stopwatch watch(CLOCK_MONOTONIC_COARSE); - UInt64 step_time_ms = global_ctx->data->getSettings()->merge_preferred_step_execution_time_ms.totalMilliseconds(); + UInt64 step_time_ms = global_ctx->data->getSettings()->background_task_preferred_step_execution_time_ms.totalMilliseconds(); do { diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 149f9a3e80b..de1f0f60cfc 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -84,7 +84,7 @@ struct Settings; M(Bool, exclude_deleted_rows_for_part_size_in_merge, false, "Use an estimated source part size (excluding lightweight deleted rows) when selecting parts to merge", 0) \ M(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \ M(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \ - M(Milliseconds, merge_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge. Can be exceeded if one step takes longer time", 0) \ + M(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \ \ /** Inserts settings. */ \ M(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 14c274d7f64..40648439887 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -1257,6 +1257,8 @@ public: private: void prepare(); bool mutateOriginalPartAndPrepareProjections(); + void writeTempProjectionPart(size_t projection_idx, Chunk chunk); + void finalizeTempProjections(); bool iterateThroughAllProjections(); void constructTaskForProjectionPartsMerge(); void finalize(); @@ -1307,10 +1309,22 @@ void PartMergerWriter::prepare() bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() { - Block cur_block; - Block projection_header; - if (MutationHelpers::checkOperationIsNotCanceled(*ctx->merges_blocker, ctx->mutate_entry) && ctx->mutating_executor->pull(cur_block)) + Stopwatch watch(CLOCK_MONOTONIC_COARSE); + UInt64 step_time_ms = ctx->data->getSettings()->background_task_preferred_step_execution_time_ms.totalMilliseconds(); + + do { + Block cur_block; + Block projection_header; + + MutationHelpers::checkOperationIsNotCanceled(*ctx->merges_blocker, ctx->mutate_entry); + + if (!ctx->mutating_executor->pull(cur_block)) + { + finalizeTempProjections(); + return false; + } + if (ctx->minmax_idx) ctx->minmax_idx->update(cur_block, MergeTreeData::getMinMaxColumnsNames(ctx->metadata_snapshot->getPartitionKey())); @@ -1322,46 +1336,56 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() for (size_t i = 0, size = ctx->projections_to_build.size(); i < size; ++i) { - const auto & projection = *ctx->projections_to_build[i]; + Chunk squashed_chunk; - ProfileEventTimeIncrement watch(ProfileEvents::MutateTaskProjectionsCalculationMicroseconds); - Block block_to_squash = projection.calculate(cur_block, ctx->context); - projection_squashes[i].setHeader(block_to_squash.cloneEmpty()); - - Chunk squashed_chunk = Squashing::squash(projection_squashes[i].add({block_to_squash.getColumns(), block_to_squash.rows()})); - if (squashed_chunk) { - auto result = projection_squashes[i].getHeader().cloneWithColumns(squashed_chunk.detachColumns()); - auto tmp_part = MergeTreeDataWriter::writeTempProjectionPart( - *ctx->data, ctx->log, result, projection, ctx->new_data_part.get(), ++block_num); - tmp_part.finalize(); - tmp_part.part->getDataPartStorage().commitTransaction(); - projection_parts[projection.name].emplace_back(std::move(tmp_part.part)); + ProfileEventTimeIncrement projection_watch(ProfileEvents::MutateTaskProjectionsCalculationMicroseconds); + Block block_to_squash = ctx->projections_to_build[i]->calculate(cur_block, ctx->context); + + projection_squashes[i].setHeader(block_to_squash.cloneEmpty()); + squashed_chunk = Squashing::squash(projection_squashes[i].add({block_to_squash.getColumns(), block_to_squash.rows()})); } + + if (squashed_chunk) + writeTempProjectionPart(i, std::move(squashed_chunk)); } (*ctx->mutate_entry)->rows_written += cur_block.rows(); (*ctx->mutate_entry)->bytes_written_uncompressed += cur_block.bytes(); + } while (watch.elapsedMilliseconds() < step_time_ms); - /// Need execute again - return true; - } + /// Need execute again + return true; +} +void PartMergerWriter::writeTempProjectionPart(size_t projection_idx, Chunk chunk) +{ + const auto & projection = *ctx->projections_to_build[projection_idx]; + const auto & projection_plan = projection_squashes[projection_idx]; + + auto result = projection_plan.getHeader().cloneWithColumns(chunk.detachColumns()); + + auto tmp_part = MergeTreeDataWriter::writeTempProjectionPart( + *ctx->data, + ctx->log, + result, + projection, + ctx->new_data_part.get(), + ++block_num); + + tmp_part.finalize(); + tmp_part.part->getDataPartStorage().commitTransaction(); + projection_parts[projection.name].emplace_back(std::move(tmp_part.part)); +} + +void PartMergerWriter::finalizeTempProjections() +{ // Write the last block for (size_t i = 0, size = ctx->projections_to_build.size(); i < size; ++i) { - const auto & projection = *ctx->projections_to_build[i]; - auto & projection_squash_plan = projection_squashes[i]; - auto squashed_chunk = Squashing::squash(projection_squash_plan.flush()); + auto squashed_chunk = Squashing::squash(projection_squashes[i].flush()); if (squashed_chunk) - { - auto result = projection_squash_plan.getHeader().cloneWithColumns(squashed_chunk.detachColumns()); - auto temp_part = MergeTreeDataWriter::writeTempProjectionPart( - *ctx->data, ctx->log, result, projection, ctx->new_data_part.get(), ++block_num); - temp_part.finalize(); - temp_part.part->getDataPartStorage().commitTransaction(); - projection_parts[projection.name].emplace_back(std::move(temp_part.part)); - } + writeTempProjectionPart(i, std::move(squashed_chunk)); } projection_parts_iterator = std::make_move_iterator(projection_parts.begin()); @@ -1369,12 +1393,8 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() /// Maybe there are no projections ? if (projection_parts_iterator != std::make_move_iterator(projection_parts.end())) constructTaskForProjectionPartsMerge(); - - /// Let's move on to the next stage - return false; } - void PartMergerWriter::constructTaskForProjectionPartsMerge() { auto && [name, parts] = *projection_parts_iterator; From a5cc3a3a9cb7d40cc28a00016abbc08ba433e16a Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 15 Aug 2024 20:03:22 +0200 Subject: [PATCH 2275/2361] Update log message --- src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 286d06bc424..56bfa019819 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -1031,7 +1031,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() } else LOG_TRACE(log, "No space left in cache to reserve {} bytes, reason: {}, " - "will continue without cache download", failure_reason, size); + "will continue without cache download", size, failure_reason); if (!success) { From 6c085e0a939caeaf913a98ec83ce8d62e7a1413e Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 15 Aug 2024 20:51:46 +0200 Subject: [PATCH 2276/2361] Fix empty types check --- src/Functions/empty.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/Functions/empty.cpp b/src/Functions/empty.cpp index b696385e8c9..4475f622b56 100644 --- a/src/Functions/empty.cpp +++ b/src/Functions/empty.cpp @@ -131,6 +131,15 @@ public: DataTypePtr getReturnTypeImpl(const DataTypes &) const override { + if (!isStringOrFixedString(arguments[0]) + && !isArray(arguments[0]) + && !isMap(arguments[0]) + && !isUUID(arguments[0]) + && !isIPv6(arguments[0]) + && !isIPv4(arguments[0]) + && !isObject(arguments[0])) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", arguments[0]->getName(), getName()); + return std::make_shared(); } }; From 5445669d4e296a367f4c807b227d5ae3888f2c3e Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 15 Aug 2024 20:55:36 +0200 Subject: [PATCH 2277/2361] Don't create new dynamic path for null values --- src/Formats/JSONExtractTree.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index cac4dfbb077..d8c4ed229ca 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1731,6 +1731,11 @@ private: return false; } } + /// Don't create new dynamic paths for null and don't insert null values into shared data. + /// We consider null equivalent to the absence of this path. + else if (element.isNull()) + { + } /// Try to add a new dynamic path. else if (auto * dynamic_column = column_object.tryToAddNewDynamicPath(current_path)) { @@ -1740,10 +1745,8 @@ private: return false; } } - /// Otherwise this path should go to the shared data. - /// Don't insert null values into shared data. /// We consider null equivalent to the absence of this path. - else if (!element.isNull()) + else { auto tmp_dynamic_column = ColumnDynamic::create(); tmp_dynamic_column->reserve(1); From 32203edb07a27e0218925163fb902606efaffb9f Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 19:24:31 +0000 Subject: [PATCH 2278/2361] Fix --- src/Functions/empty.cpp | 3 ++- src/Storages/AlterCommands.cpp | 15 ++++++++++++--- .../03225_alter_to_json_not_supported.reference | 0 .../03225_alter_to_json_not_supported.sql | 15 +++++++++++++++ 4 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/03225_alter_to_json_not_supported.reference create mode 100644 tests/queries/0_stateless/03225_alter_to_json_not_supported.sql diff --git a/src/Functions/empty.cpp b/src/Functions/empty.cpp index 4475f622b56..ddb503668cf 100644 --- a/src/Functions/empty.cpp +++ b/src/Functions/empty.cpp @@ -11,6 +11,7 @@ namespace DB namespace ErrorCodes { extern const int BAD_ARGUMENTS; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; } namespace @@ -129,7 +130,7 @@ public: return std::make_shared(std::make_shared(), argument_types, return_type); } - DataTypePtr getReturnTypeImpl(const DataTypes &) const override + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { if (!isStringOrFixedString(arguments[0]) && !isArray(arguments[0]) diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index d5780e32db3..8fbd6cbd29d 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -1403,14 +1404,22 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const const GetColumnsOptions options(GetColumnsOptions::All); const auto old_data_type = all_columns.getColumn(options, column_name).type; - bool new_type_has_object = command.data_type->hasDynamicSubcolumnsDeprecated(); - bool old_type_has_object = old_data_type->hasDynamicSubcolumnsDeprecated(); + bool new_type_has_deprecated_object = command.data_type->hasDynamicSubcolumnsDeprecated(); + bool old_type_has_deprecated_object = old_data_type->hasDynamicSubcolumnsDeprecated(); - if (new_type_has_object || old_type_has_object) + if (new_type_has_deprecated_object || old_type_has_deprecated_object) throw Exception( ErrorCodes::BAD_ARGUMENTS, "The change of data type {} of column {} to {} is not allowed. It has known bugs", old_data_type->getName(), backQuote(column_name), command.data_type->getName()); + + bool has_object_type = isObject(command.data_type); + command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); + if (has_object_type) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "The change of data type {} of column {} to {} is not supported.", + old_data_type->getName(), backQuote(column_name), command.data_type->getName()); } if (command.isRemovingProperty()) diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.reference b/tests/queries/0_stateless/03225_alter_to_json_not_supported.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql b/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql new file mode 100644 index 00000000000..398494d56de --- /dev/null +++ b/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql @@ -0,0 +1,15 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (s String) engine=MergeTree order by tuple(); +alter table test modify column s JSON; -- { serverError BAD_ARGUMENTS } +drop table test; + +create table test (s Array(String)) engine=MergeTree order by tuple(); +alter table test modify column s Array(JSON); -- { serverError BAD_ARGUMENTS } +drop table test; + +create table test (s Tuple(String, String)) engine=MergeTree order by tuple(); +alter table test modify column s Tuple(JSON, String); -- { serverError BAD_ARGUMENTS } +drop table test; + From 077f10a4ada2a561111207d8e99e22d2c8e48f40 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 15 Aug 2024 18:26:48 +0200 Subject: [PATCH 2279/2361] Fix build --- src/Dictionaries/PostgreSQLDictionarySource.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index fd426de126d..8e472f85a6e 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -4,6 +4,7 @@ #include #include #include "DictionarySourceFactory.h" +#include #include "registerDictionaries.h" #if USE_LIBPQXX @@ -13,7 +14,6 @@ #include "readInvalidateQuery.h" #include #include -#include #include #endif @@ -27,14 +27,14 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } -#if USE_LIBPQXX - -static const UInt64 max_block_size = 8192; - static const ValidateKeysMultiset dictionary_allowed_keys = { "host", "port", "user", "password", "db", "database", "table", "schema", "update_field", "update_lag", "invalidate_query", "query", "where", "name", "priority"}; +#if USE_LIBPQXX + +static const UInt64 max_block_size = 8192; + namespace { ExternalQueryBuilder makeExternalQueryBuilder(const DictionaryStructure & dict_struct, const String & schema, const String & table, const String & query, const String & where) @@ -178,8 +178,6 @@ std::string PostgreSQLDictionarySource::toString() const return "PostgreSQL: " + configuration.db + '.' + configuration.table + (where.empty() ? "" : ", where: " + where); } -#endif - static void validateConfigKeys( const Poco::Util::AbstractConfiguration & dict_config, const String & config_prefix) { @@ -193,6 +191,8 @@ static void validateConfigKeys( } } +#endif + void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory) { auto create_table_source = [=](const DictionaryStructure & dict_struct, From 2a005ee186eb71cc9e4a13f987cfda7f1a9228f1 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 20:42:43 +0000 Subject: [PATCH 2280/2361] Don't allow alter update on columns with dynamic subcolumns --- src/Interpreters/MutationsInterpreter.cpp | 6 ++++++ ...226_alter_update_dynamic_json_not_supported.reference | 0 .../03226_alter_update_dynamic_json_not_supported.sql | 9 +++++++++ 3 files changed, 15 insertions(+) create mode 100644 tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.reference create mode 100644 tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.sql diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 24635870e62..0b93b5989b1 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -500,6 +500,12 @@ static void validateUpdateColumns( throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "There is no column {} in table", backQuote(column_name)); } } + else if (storage_columns.getColumn(GetColumnsOptions::Ordinary, column_name).type->hasDynamicSubcolumns()) + { + throw Exception(ErrorCodes::CANNOT_UPDATE_COLUMN, + "Cannot update column {} with type {}: updates of columns with dynamic subcolumns are not supported", + backQuote(column_name), storage_columns.getColumn(GetColumnsOptions::Ordinary, column_name).type->getName()); + } } } diff --git a/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.reference b/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.sql b/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.sql new file mode 100644 index 00000000000..720f8670c83 --- /dev/null +++ b/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.sql @@ -0,0 +1,9 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (d Dynamic, json JSON) engine=MergeTree order by tuple(); +alter table test update d = 42 where 1; -- {serverError CANNOT_UPDATE_COLUMN} +alter table test update json = '{}' where 1; -- {serverError CANNOT_UPDATE_COLUMN} +drop table test; + From bf44d17eb8999af5441bdc56c2e9047aedc3de39 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 20:55:55 +0000 Subject: [PATCH 2281/2361] Fix comment --- src/Formats/JSONExtractTree.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index d8c4ed229ca..5e9f8a3690b 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1726,7 +1726,7 @@ private: } } else if (!dynamic_node->insertResultToColumn(*dynamic_it->second, element, insert_settings, format_settings, error)) - { + { error += fmt::format(" (while reading path {})", current_path); return false; } @@ -1745,7 +1745,7 @@ private: return false; } } - /// We consider null equivalent to the absence of this path. + /// Otherwise this path should go to the shared data. else { auto tmp_dynamic_column = ColumnDynamic::create(); From 182c00eba71acc602c60e0cd24be5c1696dd3c42 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Thu, 15 Aug 2024 23:17:10 +0200 Subject: [PATCH 2282/2361] Fix style --- src/Formats/JSONExtractTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 5e9f8a3690b..122224535a7 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1726,7 +1726,7 @@ private: } } else if (!dynamic_node->insertResultToColumn(*dynamic_it->second, element, insert_settings, format_settings, error)) - { + { error += fmt::format(" (while reading path {})", current_path); return false; } From a49ef43286e31525f8829e3307a9231be0ce417c Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Thu, 15 Aug 2024 21:33:26 +0000 Subject: [PATCH 2283/2361] Fix 01119_session_log flakiness --- ...9_session_log.sql => 01119_session_log.sh} | 37 +++++++++++++++---- 1 file changed, 30 insertions(+), 7 deletions(-) rename tests/queries/0_stateless/{01119_session_log.sql => 01119_session_log.sh} (73%) mode change 100644 => 100755 diff --git a/tests/queries/0_stateless/01119_session_log.sql b/tests/queries/0_stateless/01119_session_log.sh old mode 100644 new mode 100755 similarity index 73% rename from tests/queries/0_stateless/01119_session_log.sql rename to tests/queries/0_stateless/01119_session_log.sh index 55f6228797a..809d300fada --- a/tests/queries/0_stateless/01119_session_log.sql +++ b/tests/queries/0_stateless/01119_session_log.sh @@ -1,5 +1,20 @@ --- Tags: no-fasttest +#!/usr/bin/env bash +# Tags: no-fasttest +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +session_log_query_prefix=" +system flush logs; +select distinct type, user, auth_type, toString(client_address)!='::ffff:0.0.0.0' as a, client_port!=0 as b, interface from system.session_log +where user in ('default', 'nonexistsnt_user_1119', ' ', ' INTERSERVER SECRET ') +and interface in ('HTTP', 'TCP', 'TCP_Interserver') +and (user != 'default' or (a=1 and b=1)) -- FIXME: we should not write uninitialized address and port (but we do sometimes) +and event_time >= now() - interval 5 minute" + +$CLICKHOUSE_CLIENT -nm -q " select * from remote('127.0.0.2', system, one, 'default', ''); select * from remote('127.0.0.2', system, one, 'default', 'wrong password'); -- { serverError AUTHENTICATION_FAILED } select * from remote('127.0.0.2', system, one, 'nonexistsnt_user_1119', ''); -- { serverError AUTHENTICATION_FAILED } @@ -16,9 +31,17 @@ select * from url('http://127.0.0.1:8123/?query=select+1&user=+++', LineAsString select * from cluster('test_cluster_interserver_secret', system, one); -system flush logs; -select distinct type, user, auth_type, toString(client_address)!='::ffff:0.0.0.0' as a, client_port!=0 as b, interface from system.session_log -where user in ('default', 'nonexistsnt_user_1119', ' ', ' INTERSERVER SECRET ') -and interface in ('HTTP', 'TCP', 'TCP_Interserver') -and (user != 'default' or (a=1 and b=1)) -- FIXME: we should not write uninitialized address and port (but we do sometimes) -and event_time >= now() - interval 5 minute order by type, user, interface; +$session_log_query_prefix and type != 'Logout' order by type, user, interface; +" + +# Wait for logout events. +for attempt in {1..10} +do + if [ "`$CLICKHOUSE_CLIENT -q "$session_log_query_prefix and type = 'Logout'" | wc -l`" -eq 3 ] + then + break + fi + sleep 2 +done + +$CLICKHOUSE_CLIENT -q "$session_log_query_prefix and type = 'Logout' order by user, interface" From db691e4cba1c5eaee10868a481ced4b729d73bc6 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 15 Aug 2024 22:35:07 +0000 Subject: [PATCH 2284/2361] Update tests --- .../01825_new_type_json_ephemeral.reference | 2 +- ...3206_json_parsing_and_formatting.reference | 32 +-- ...bcolumns_1_compact_merge_tree.reference.j2 | 200 +++++++++--------- ...07_json_read_subcolumns_1_memory.reference | 100 ++++----- ..._subcolumns_1_wide_merge_tree.reference.j2 | 200 +++++++++--------- ...bcolumns_2_compact_merge_tree.reference.j2 | 4 +- ...07_json_read_subcolumns_2_memory.reference | 2 +- ..._subcolumns_2_wide_merge_tree.reference.j2 | 4 +- 8 files changed, 272 insertions(+), 272 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference b/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference index e8891cf6a56..7efe8cea252 100644 --- a/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference +++ b/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference @@ -1 +1 @@ -PushEvent some-repo {"actor":{"avatar_url":"https:\\/\\/avatars.githubusercontent.com\\/u\\/123213213?","display_login":"github-actions","gravatar_id":"","id":"123123123","login":"github-actions[bot]","url":"https:\\/\\/api.github.com\\/users\\/github-actions[bot]"},"created_at":"2022-01-04 07:00:00.000000000","repo":{"id":"1001001010101","name":"some-repo","url":"https:\\/\\/api.github.com\\/repos\\/some-repo"},"type":"PushEvent"} +PushEvent some-repo {"actor":{"avatar_url":"https:\\/\\/avatars.githubusercontent.com\\/u\\/123213213?","display_login":"github-actions","gravatar_id":"","id":"123123123","login":"github-actions[bot]","url":"https:\\/\\/api.github.com\\/users\\/github-actions[bot]"},"created_at":"2022-01-04 07:00:00","repo":{"id":"1001001010101","name":"some-repo","url":"https:\\/\\/api.github.com\\/repos\\/some-repo"},"type":"PushEvent"} diff --git a/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference b/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference index 94777ffab1f..75e55e0376d 100644 --- a/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference +++ b/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference @@ -3,17 +3,17 @@ JSON with no arguments {"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','e':'String'} {} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} -{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {} 1 2020-01-01 {"e":{"f":["s1","s2"]}} 2 [1,2,3] {"e":{"g":"43"}} 3 \N {} 4 \N {} -5 ['b1','b2'] {"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}} +5 ['b1','b2'] {"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}} JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP '.*h.*') {"a":{"b":{"c":1,"d":[false,true]}},"b":"2020-01-01"} {"a":{"b":{"c":2,"d":[true,true]}},"b":["1","2","3"]} @@ -30,55 +30,55 @@ JSON(a.b.c UInt32, max_dynamic_paths=2) {"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.d':'Array(Nullable(Int64))','b':'Date'} {'c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))'} {'d.e.g':'Int64'} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.d':'Array(Nullable(Int64))'} {'e':'String'} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.d':'Array(Nullable(Int64))'} {'c':'Int64'} -{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))'} {'d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))'} {'d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} JSON(a.b.c UInt32, max_dynamic_paths=0) {"a":{"b":{"c":1,"d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} {"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {} {'a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} {'a.b.d':'Array(Nullable(Int64))','c':'Int64'} -{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} JSON(a.b.c UInt32, max_dynamic_types=1) {"a":{"b":{"c":1,"d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} {"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.d':'Array(Nullable(Int64))','e':'String'} {} {'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} -{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime64(9)'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {} Test small max_read_buffer_size {"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} {"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} {"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} {"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} {"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} {"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} {"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} {"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} Test PrettyJSONEachRow { "json": { @@ -176,7 +176,7 @@ Test PrettyJSONEachRow "s4" ], "g" : "44", - "h" : "2020-02-02 10:00:00.000000000" + "h" : "2020-02-02 10:00:00" } } } @@ -186,10 +186,10 @@ Test TSV {"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} {"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} {"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} -{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00.000000000"}}} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} Test CSV "{""a"":{""b"":{""c"":""1"",""d"":[""0"",""1""]}},""b"":""2020-01-01"",""c"":""42"",""d"":{""e"":{""f"":[""s1"",""s2""]}}}" "{""a"":{""b"":{""c"":""2"",""d"":[""2"",""3""]}},""b"":[""1"",""2"",""3""],""d"":{""e"":{""g"":""43""}}}" "{""a"":{""b"":{""c"":""3"",""d"":[""4"",""5""]}},""e"":""Hello, World!""}" "{""a"":{""b"":{""c"":""4"",""d"":[""6"",""7""]}},""c"":""43""}" -"{""a"":{""b"":{""c"":""5"",""d"":[""8"",""9""]}},""b"":[""b1"",""b2""],""d"":{""e"":{""f"":[""s3"",""s4""],""g"":""44"",""h"":""2020-02-02 10:00:00.000000000""}}}" +"{""a"":{""b"":{""c"":""5"",""d"":[""8"",""9""]}},""b"":[""b1"",""b2""],""d"":{""e"":{""f"":[""s3"",""s4""],""g"":""44"",""h"":""2020-02-02 10:00:00""}}}" diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 index 972cfd9c37f..a93a2259442 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 @@ -1,6 +1,6 @@ ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._25','Int64') @@ -17,7 +17,7 @@ { "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -54,16 +54,16 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -100,8 +100,8 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } @@ -116,15 +116,15 @@ "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -132,7 +132,7 @@ "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { @@ -148,23 +148,23 @@ "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -176,28 +176,28 @@ "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -220,19 +220,19 @@ "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -265,13 +265,13 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], @@ -280,7 +280,7 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -310,19 +310,19 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -349,19 +349,19 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -370,50 +370,50 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._25','Int64') @@ -430,7 +430,7 @@ { "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -467,16 +467,16 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -513,8 +513,8 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } @@ -529,15 +529,15 @@ "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -545,7 +545,7 @@ "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { @@ -561,23 +561,23 @@ "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -589,28 +589,28 @@ "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -633,19 +633,19 @@ "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -678,13 +678,13 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], @@ -693,7 +693,7 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -723,19 +723,19 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -762,19 +762,19 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -783,44 +783,44 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference index a7361856bc1..6276be52c0d 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference @@ -1,6 +1,6 @@ ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._25','Int64') @@ -17,7 +17,7 @@ { "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -54,16 +54,16 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -100,8 +100,8 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } @@ -116,15 +116,15 @@ "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -132,7 +132,7 @@ "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { @@ -148,23 +148,23 @@ "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -176,28 +176,28 @@ "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -220,19 +220,19 @@ "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -265,13 +265,13 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], @@ -280,7 +280,7 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -310,19 +310,19 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -349,19 +349,19 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -370,44 +370,44 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 index 972cfd9c37f..a93a2259442 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 @@ -1,6 +1,6 @@ ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._25','Int64') @@ -17,7 +17,7 @@ { "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -54,16 +54,16 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -100,8 +100,8 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } @@ -116,15 +116,15 @@ "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -132,7 +132,7 @@ "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { @@ -148,23 +148,23 @@ "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -176,28 +176,28 @@ "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -220,19 +220,19 @@ "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -265,13 +265,13 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], @@ -280,7 +280,7 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -310,19 +310,19 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -349,19 +349,19 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -370,50 +370,50 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._25','Int64') @@ -430,7 +430,7 @@ { "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -467,16 +467,16 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], @@ -513,8 +513,8 @@ "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] } @@ -529,15 +529,15 @@ "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -545,7 +545,7 @@ "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { @@ -561,23 +561,23 @@ "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -589,28 +589,28 @@ "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } @@ -633,19 +633,19 @@ "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -678,13 +678,13 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], @@ -693,7 +693,7 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -723,19 +723,19 @@ "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -762,19 +762,19 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], @@ -783,44 +783,44 @@ "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"] + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } { - "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], - "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35.000000000","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36.000000000","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37.000000000","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38.000000000","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39.000000000","e":"str_39"}}], - "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35.000000000", "1970-01-01 00:00:36.000000000", "1970-01-01 00:00:37.000000000", "1970-01-01 00:00:38.000000000", "1970-01-01 00:00:39.000000000"], + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] } diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 index 13343b21a8c..e1e69879cfb 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 @@ -1,6 +1,6 @@ ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._0','Int64') @@ -33,7 +33,7 @@ 0 ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._0','Int64') diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference index 6c455b1bb0d..1ef53fb5716 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference @@ -1,6 +1,6 @@ ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._0','Int64') diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 index 13343b21a8c..e1e69879cfb 100644 --- a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 @@ -1,6 +1,6 @@ ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._0','Int64') @@ -33,7 +33,7 @@ 0 ('a.b.c','UInt32') ('a.b.d','Array(Nullable(String))') -('a.b.d','DateTime64(9)') +('a.b.d','DateTime') ('a.b.d','Int64') ('a.b.e','String') ('b.b._0','Int64') From dc2f455b810bbe3c457e294fbb4518bdb1766eca Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 16 Aug 2024 00:19:39 +0000 Subject: [PATCH 2285/2361] Try to mitigate 02818_memory_profiler_sample_min_max_allocation_size flakiness --- .../02818_memory_profiler_sample_min_max_allocation_size.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02818_memory_profiler_sample_min_max_allocation_size.sh b/tests/queries/0_stateless/02818_memory_profiler_sample_min_max_allocation_size.sh index 9234c428147..e2afc1d208c 100755 --- a/tests/queries/0_stateless/02818_memory_profiler_sample_min_max_allocation_size.sh +++ b/tests/queries/0_stateless/02818_memory_profiler_sample_min_max_allocation_size.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh query_id="${CLICKHOUSE_DATABASE}_min_max_allocation_size_$RANDOM$RANDOM" -${CLICKHOUSE_CLIENT} --query_id="$query_id" --memory_profiler_sample_min_allocation_size=4096 --memory_profiler_sample_max_allocation_size=8192 --log_queries=1 --max_threads=1 --max_untracked_memory=0 --memory_profiler_sample_probability=1 --query "select randomPrintableASCII(number) from numbers(1000) FORMAT Null" +${CLICKHOUSE_CLIENT} --query_id="$query_id" --memory_profiler_sample_min_allocation_size=4096 --memory_profiler_sample_max_allocation_size=16384 --log_queries=1 --max_threads=1 --max_untracked_memory=0 --memory_profiler_sample_probability=1 --query "select randomPrintableASCII(number) from numbers(1000) FORMAT Null" ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" @@ -14,4 +14,4 @@ ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" ${CLICKHOUSE_CLIENT} --query "SELECT countDistinct(abs(size)) > 0 FROM system.trace_log where query_id='$query_id' and trace_type = 'MemorySample'" # show wrong allocations -${CLICKHOUSE_CLIENT} --query "SELECT abs(size) FROM system.trace_log where query_id='$query_id' and trace_type = 'MemorySample' and (abs(size) > 8192 or abs(size) < 4096)" +${CLICKHOUSE_CLIENT} --query "SELECT abs(size) FROM system.trace_log where query_id='$query_id' and trace_type = 'MemorySample' and (abs(size) > 16384 or abs(size) < 4096)" From fb7afa779c7ea7d56c67da10a634133fe8416f97 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Aug 2024 03:59:17 +0200 Subject: [PATCH 2286/2361] Support expressions with tuples like `expr().name` --- src/Parsers/ExpressionListParsers.cpp | 17 ++++++++++++++--- .../03224_tuple_element_identifier.reference | 4 ++++ .../03224_tuple_element_identifier.sql | 13 +++++++++++++ 3 files changed, 31 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/03224_tuple_element_identifier.reference create mode 100644 tests/queries/0_stateless/03224_tuple_element_identifier.sql diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index d38dc6d5f37..ad6b8e13ea6 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -2811,8 +2811,8 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po if (op.type == OperatorType::TupleElement) { ASTPtr tmp; - if (asterisk_parser.parse(pos, tmp, expected) || - columns_matcher_parser.parse(pos, tmp, expected)) + if (asterisk_parser.parse(pos, tmp, expected) + || columns_matcher_parser.parse(pos, tmp, expected)) { if (auto * asterisk = tmp->as()) { @@ -2833,6 +2833,17 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po layers.back()->pushOperand(std::move(tmp)); return Action::OPERATOR; } + + /// If it is an identifier, + /// replace it with literal, because an expression `expr().elem` + /// should be transformed to `tupleElement(expr(), 'elem')` for query analysis, + /// otherwise the identifier `elem` will not be found. + if (ParserIdentifier().parse(pos, tmp, expected)) + { + layers.back()->pushOperator(op); + layers.back()->pushOperand(std::make_shared(tmp->as()->name())); + return Action::OPERATOR; + } } /// isNull & isNotNull are postfix unary operators @@ -2863,7 +2874,7 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po layers.push_back(std::make_unique()); if (op.type == OperatorType::StartBetween || op.type == OperatorType::StartNotBetween) - layers.back()->between_counter++; + ++layers.back()->between_counter; return Action::OPERAND; } diff --git a/tests/queries/0_stateless/03224_tuple_element_identifier.reference b/tests/queries/0_stateless/03224_tuple_element_identifier.reference new file mode 100644 index 00000000000..0fc9e7410c1 --- /dev/null +++ b/tests/queries/0_stateless/03224_tuple_element_identifier.reference @@ -0,0 +1,4 @@ +([('wtf')]) [('wtf')] wtf +([('wtf')]) [('wtf')] wtf +Hello +('Hello') Hello Hello Hello diff --git a/tests/queries/0_stateless/03224_tuple_element_identifier.sql b/tests/queries/0_stateless/03224_tuple_element_identifier.sql new file mode 100644 index 00000000000..2a7fb9a97a3 --- /dev/null +++ b/tests/queries/0_stateless/03224_tuple_element_identifier.sql @@ -0,0 +1,13 @@ +SET enable_analyzer = 1; + +SELECT JSONExtract('{"hello":[{"world":"wtf"}]}', 'Tuple(hello Array(Tuple(world String)))') AS x, + x.hello, x.hello[1].world; + +SELECT JSONExtract('{"hello":[{" wow ":"wtf"}]}', 'Tuple(hello Array(Tuple(` wow ` String)))') AS x, + x.hello, x.hello[1].` wow `; + +SELECT JSONExtract('{"hello":[{" wow ":"wtf"}]}', 'Tuple(hello Array(Tuple(` wow ` String)))') AS x, + x.hello, x.hello[1].`wow`; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK } + +SELECT ('Hello' AS world,).world; +SELECT ('Hello' AS world,) AS t, t.world, (t).world, identity(t).world; From 1b52466cbbbbccad74ca9c92c650758c1dfdb8d2 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 16 Aug 2024 05:36:03 +0000 Subject: [PATCH 2287/2361] () --- src/Common/StackTrace.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index 80329b4aec1..bd01b639913 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -267,7 +267,7 @@ void StackTrace::forEachFrame( /// frame is interrupted by signal or not. We could propagate this information /// from libunwind to here and avoid subtracting 1 in this case, but currently /// we don't do this. - /// But we don't do the decrement for findSymbol() below (because `call` is + /// But we don't do the decrement for findSymbol below (because `call` is /// ~never the last instruction of a function), so the function name should be /// correct for both pre-signal frames and regular frames. adjusted_addr -= 1; From 253188381759d062967c9bb3b9d0907f30eab61a Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Fri, 16 Aug 2024 05:39:50 +0000 Subject: [PATCH 2288/2361] she sells seashells by seashore the shells that she sells are seashells im sure --- tests/queries/0_stateless/01119_session_log.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01119_session_log.sh b/tests/queries/0_stateless/01119_session_log.sh index 809d300fada..2d17b545276 100755 --- a/tests/queries/0_stateless/01119_session_log.sh +++ b/tests/queries/0_stateless/01119_session_log.sh @@ -35,7 +35,7 @@ $session_log_query_prefix and type != 'Logout' order by type, user, interface; " # Wait for logout events. -for attempt in {1..10} +for _ in {1..10} do if [ "`$CLICKHOUSE_CLIENT -q "$session_log_query_prefix and type = 'Logout'" | wc -l`" -eq 3 ] then From a85f544205f2e782d4f6c16c0622728475db3571 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 16 Aug 2024 08:47:28 +0000 Subject: [PATCH 2289/2361] Update analyzer_tech_debt.txt --- tests/analyzer_tech_debt.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/analyzer_tech_debt.txt b/tests/analyzer_tech_debt.txt index bd92465e1aa..c8edbdc5932 100644 --- a/tests/analyzer_tech_debt.txt +++ b/tests/analyzer_tech_debt.txt @@ -1,4 +1,3 @@ 01624_soft_constraints -02354_vector_search_queries # Check after ConstantNode refactoring 02944_variant_as_common_type From 1b49e2492521c54b0e6240d412af847d3fa21221 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 16 Aug 2024 11:26:31 +0200 Subject: [PATCH 2290/2361] Fix clang-tidy --- src/Dictionaries/PostgreSQLDictionarySource.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index 8e472f85a6e..b1bab17e2e9 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -208,7 +208,6 @@ void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory) const auto & settings = context->getSettingsRef(); std::optional dictionary_configuration; - String database, schema, table; postgres::PoolWithFailover::ReplicasConfigurationByPriority replicas_by_priority; auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, context) : nullptr; From 60a6e893a40761eb46655e76cb6a3fe5f177019c Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Fri, 16 Aug 2024 17:56:12 +0800 Subject: [PATCH 2291/2361] first commit --- src/Common/examples/CMakeLists.txt | 5 + src/Common/examples/utf8_upper_lower.cpp | 27 ++ src/Functions/LowerUpperImpl.h | 1 - src/Functions/LowerUpperUTF8Impl.h | 283 +++--------------- src/Functions/initcapUTF8.cpp | 3 +- src/Functions/lowerUTF8.cpp | 25 +- src/Functions/upperUTF8.cpp | 24 +- .../00170_lower_upper_utf8.reference | 4 + .../0_stateless/00170_lower_upper_utf8.sql | 11 + .../00233_position_function_family.sql | 3 + .../0_stateless/00761_lower_utf8_bug.sql | 3 + .../0_stateless/01278_random_string_utf8.sql | 3 + .../0_stateless/01431_utf8_ubsan.reference | 4 +- .../queries/0_stateless/01431_utf8_ubsan.sql | 3 + .../0_stateless/01590_countSubstrings.sql | 3 + ...71_lower_upper_utf8_row_overlaps.reference | 4 +- .../02071_lower_upper_utf8_row_overlaps.sql | 3 + ...new_functions_must_be_documented.reference | 2 - .../02514_if_with_lazy_low_cardinality.sql | 3 + .../0_stateless/02807_lower_utf8_msan.sql | 3 + tests/queries/0_stateless/03015_peder1001.sql | 3 + 21 files changed, 159 insertions(+), 261 deletions(-) create mode 100644 src/Common/examples/utf8_upper_lower.cpp diff --git a/src/Common/examples/CMakeLists.txt b/src/Common/examples/CMakeLists.txt index 69580d4ad0e..8383e80d09d 100644 --- a/src/Common/examples/CMakeLists.txt +++ b/src/Common/examples/CMakeLists.txt @@ -92,3 +92,8 @@ endif() clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp) target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io clickhouse_common_config) + +if (TARGET ch_contrib::icu) + clickhouse_add_executable (utf8_upper_lower utf8_upper_lower.cpp) + target_link_libraries (utf8_upper_lower PRIVATE ch_contrib::icu) +endif () diff --git a/src/Common/examples/utf8_upper_lower.cpp b/src/Common/examples/utf8_upper_lower.cpp new file mode 100644 index 00000000000..826e1763105 --- /dev/null +++ b/src/Common/examples/utf8_upper_lower.cpp @@ -0,0 +1,27 @@ +#include +#include + +std::string utf8_to_lower(const std::string & input) +{ + icu::UnicodeString unicodeInput(input.c_str(), "UTF-8"); + unicodeInput.toLower(); + std::string output; + unicodeInput.toUTF8String(output); + return output; +} + +std::string utf8_to_upper(const std::string & input) +{ + icu::UnicodeString unicodeInput(input.c_str(), "UTF-8"); + unicodeInput.toUpper(); + std::string output; + unicodeInput.toUTF8String(output); + return output; +} + +int main() +{ + std::string input = "ır"; + std::cout << "upper:" << utf8_to_upper(input) << std::endl; + return 0; +} diff --git a/src/Functions/LowerUpperImpl.h b/src/Functions/LowerUpperImpl.h index d463ef96e16..a52703d10c8 100644 --- a/src/Functions/LowerUpperImpl.h +++ b/src/Functions/LowerUpperImpl.h @@ -1,7 +1,6 @@ #pragma once #include - namespace DB { diff --git a/src/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h index eedabca5b22..5da085f48e5 100644 --- a/src/Functions/LowerUpperUTF8Impl.h +++ b/src/Functions/LowerUpperUTF8Impl.h @@ -1,15 +1,14 @@ #pragma once + +#include "config.h" + +#if USE_ICU + #include #include -#include -#include +#include +#include #include -#include - -#ifdef __SSE2__ -#include -#endif - namespace DB { @@ -19,71 +18,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } -/// xor or do nothing -template -UInt8 xor_or_identity(const UInt8 c, const int mask) -{ - return c ^ mask; -} - -template <> -inline UInt8 xor_or_identity(const UInt8 c, const int) -{ - return c; -} - -/// It is caller's responsibility to ensure the presence of a valid cyrillic sequence in array -template -inline void UTF8CyrillicToCase(const UInt8 *& src, UInt8 *& dst) -{ - if (src[0] == 0xD0u && (src[1] >= 0x80u && src[1] <= 0x8Fu)) - { - /// ЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐ - *dst++ = xor_or_identity(*src++, 0x1); - *dst++ = xor_or_identity(*src++, 0x10); - } - else if (src[0] == 0xD1u && (src[1] >= 0x90u && src[1] <= 0x9Fu)) - { - /// ÑёђѓєѕіїјљњћќÑўџ - *dst++ = xor_or_identity(*src++, 0x1); - *dst++ = xor_or_identity(*src++, 0x10); - } - else if (src[0] == 0xD0u && (src[1] >= 0x90u && src[1] <= 0x9Fu)) - { - /// Ð-П - *dst++ = *src++; - *dst++ = xor_or_identity(*src++, 0x20); - } - else if (src[0] == 0xD0u && (src[1] >= 0xB0u && src[1] <= 0xBFu)) - { - /// а-п - *dst++ = *src++; - *dst++ = xor_or_identity(*src++, 0x20); - } - else if (src[0] == 0xD0u && (src[1] >= 0xA0u && src[1] <= 0xAFu)) - { - /// Р-Я - *dst++ = xor_or_identity(*src++, 0x1); - *dst++ = xor_or_identity(*src++, 0x20); - } - else if (src[0] == 0xD1u && (src[1] >= 0x80u && src[1] <= 0x8Fu)) - { - /// Ñ€-Ñ - *dst++ = xor_or_identity(*src++, 0x1); - *dst++ = xor_or_identity(*src++, 0x20); - } -} - - -/** If the string contains UTF-8 encoded text, convert it to the lower (upper) case. - * Note: It is assumed that after the character is converted to another case, - * the length of its multibyte sequence in UTF-8 does not change. - * Otherwise, the behavior is undefined. - */ -template +template struct LowerUpperUTF8Impl { static void vector( @@ -103,180 +38,46 @@ struct LowerUpperUTF8Impl return; } - res_data.resize_exact(data.size()); - res_offsets.assign(offsets); - array(data.data(), data.data() + data.size(), offsets, res_data.data()); + res_data.resize(data.size()); + res_offsets.resize_exact(offsets.size()); + + String output; + size_t curr_offset = 0; + for (size_t i = 0; i < offsets.size(); ++i) + { + const auto * data_start = reinterpret_cast(&data[offsets[i - 1]]); + size_t size = offsets[i] - offsets[i - 1]; + + icu::UnicodeString input(data_start, static_cast(size), "UTF-8"); + if constexpr (upper) + input.toUpper(); + else + input.toLower(); + + output.clear(); + input.toUTF8String(output); + + /// For valid UTF-8 input strings, ICU sometimes produces output with extra '\0's at the end. Only the data before the first + /// '\0' is valid. It the input is not valid UTF-8, then the behavior of lower/upperUTF8 is undefined by definition. In this + /// case, the behavior is also reasonable. + const char * res_end = find_last_not_symbols_or_null<'\0'>(output.data(), output.data() + output.size()); + size_t valid_size = res_end ? res_end - output.data() + 1 : 0; + + res_data.resize(curr_offset + valid_size + 1); + memcpy(&res_data[curr_offset], output.data(), valid_size); + res_data[curr_offset + valid_size] = 0; + + curr_offset += valid_size + 1; + res_offsets[i] = curr_offset; + } } static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Functions lowerUTF8 and upperUTF8 cannot work with FixedString argument"); } - - /** Converts a single code point starting at `src` to desired case, storing result starting at `dst`. - * `src` and `dst` are incremented by corresponding sequence lengths. */ - static bool toCase(const UInt8 *& src, const UInt8 * src_end, UInt8 *& dst, bool partial) - { - if (src[0] <= ascii_upper_bound) - { - if (*src >= not_case_lower_bound && *src <= not_case_upper_bound) - *dst++ = *src++ ^ flip_case_mask; - else - *dst++ = *src++; - } - else if (src + 1 < src_end - && ((src[0] == 0xD0u && (src[1] >= 0x80u && src[1] <= 0xBFu)) || (src[0] == 0xD1u && (src[1] >= 0x80u && src[1] <= 0x9Fu)))) - { - cyrillic_to_case(src, dst); - } - else if (src + 1 < src_end && src[0] == 0xC2u) - { - /// Punctuation U+0080 - U+00BF, UTF-8: C2 80 - C2 BF - *dst++ = *src++; - *dst++ = *src++; - } - else if (src + 2 < src_end && src[0] == 0xE2u) - { - /// Characters U+2000 - U+2FFF, UTF-8: E2 80 80 - E2 BF BF - *dst++ = *src++; - *dst++ = *src++; - *dst++ = *src++; - } - else - { - size_t src_sequence_length = UTF8::seqLength(*src); - /// In case partial buffer was passed (due to SSE optimization) - /// we cannot convert it with current src_end, but we may have more - /// bytes to convert and eventually got correct symbol. - if (partial && src_sequence_length > static_cast(src_end - src)) - return false; - - auto src_code_point = UTF8::convertUTF8ToCodePoint(src, src_end - src); - if (src_code_point) - { - int dst_code_point = to_case(*src_code_point); - if (dst_code_point > 0) - { - size_t dst_sequence_length = UTF8::convertCodePointToUTF8(dst_code_point, dst, src_end - src); - assert(dst_sequence_length <= 4); - - /// We don't support cases when lowercase and uppercase characters occupy different number of bytes in UTF-8. - /// As an example, this happens for ß and ẞ. - if (dst_sequence_length == src_sequence_length) - { - src += dst_sequence_length; - dst += dst_sequence_length; - return true; - } - } - } - - *dst = *src; - ++dst; - ++src; - } - - return true; - } - -private: - static constexpr auto ascii_upper_bound = '\x7f'; - static constexpr auto flip_case_mask = 'A' ^ 'a'; - - static void array(const UInt8 * src, const UInt8 * src_end, const ColumnString::Offsets & offsets, UInt8 * dst) - { - const auto * offset_it = offsets.begin(); - const UInt8 * begin = src; - -#ifdef __SSE2__ - static constexpr auto bytes_sse = sizeof(__m128i); - - /// If we are before this position, we can still read at least bytes_sse. - const auto * src_end_sse = src_end - bytes_sse + 1; - - /// SSE2 packed comparison operate on signed types, hence compare (c < 0) instead of (c > 0x7f) - const auto v_zero = _mm_setzero_si128(); - const auto v_not_case_lower_bound = _mm_set1_epi8(not_case_lower_bound - 1); - const auto v_not_case_upper_bound = _mm_set1_epi8(not_case_upper_bound + 1); - const auto v_flip_case_mask = _mm_set1_epi8(flip_case_mask); - - while (src < src_end_sse) - { - const auto chars = _mm_loadu_si128(reinterpret_cast(src)); - - /// check for ASCII - const auto is_not_ascii = _mm_cmplt_epi8(chars, v_zero); - const auto mask_is_not_ascii = _mm_movemask_epi8(is_not_ascii); - - /// ASCII - if (mask_is_not_ascii == 0) - { - const auto is_not_case - = _mm_and_si128(_mm_cmpgt_epi8(chars, v_not_case_lower_bound), _mm_cmplt_epi8(chars, v_not_case_upper_bound)); - const auto mask_is_not_case = _mm_movemask_epi8(is_not_case); - - /// everything in correct case ASCII - if (mask_is_not_case == 0) - _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), chars); - else - { - /// ASCII in mixed case - /// keep `flip_case_mask` only where necessary, zero out elsewhere - const auto xor_mask = _mm_and_si128(v_flip_case_mask, is_not_case); - - /// flip case by applying calculated mask - const auto cased_chars = _mm_xor_si128(chars, xor_mask); - - /// store result back to destination - _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), cased_chars); - } - - src += bytes_sse; - dst += bytes_sse; - } - else - { - /// UTF-8 - - /// Find the offset of the next string after src - size_t offset_from_begin = src - begin; - while (offset_from_begin >= *offset_it) - ++offset_it; - - /// Do not allow one row influence another (since row may have invalid sequence, and break the next) - const UInt8 * row_end = begin + *offset_it; - chassert(row_end >= src); - const UInt8 * expected_end = std::min(src + bytes_sse, row_end); - - while (src < expected_end) - { - if (!toCase(src, expected_end, dst, /* partial= */ true)) - { - /// Fallback to handling byte by byte. - src_end_sse = src; - break; - } - } - } - } - - /// Find the offset of the next string after src - size_t offset_from_begin = src - begin; - while (offset_it != offsets.end() && offset_from_begin >= *offset_it) - ++offset_it; -#endif - - /// handle remaining symbols, row by row (to avoid influence of bad UTF8 symbols from one row, to another) - while (src < src_end) - { - const UInt8 * row_end = begin + *offset_it; - chassert(row_end >= src); - - while (src < row_end) - toCase(src, row_end, dst, /* partial= */ false); - ++offset_it; - } - } }; } + +#endif diff --git a/src/Functions/initcapUTF8.cpp b/src/Functions/initcapUTF8.cpp index 282d846094e..004586dce26 100644 --- a/src/Functions/initcapUTF8.cpp +++ b/src/Functions/initcapUTF8.cpp @@ -1,9 +1,8 @@ #include #include -#include #include #include - +#include namespace DB { diff --git a/src/Functions/lowerUTF8.cpp b/src/Functions/lowerUTF8.cpp index 7adb0069121..e2f7cb84730 100644 --- a/src/Functions/lowerUTF8.cpp +++ b/src/Functions/lowerUTF8.cpp @@ -1,9 +1,10 @@ -#include +#include "config.h" + +#if USE_ICU + +#include #include #include -#include -#include - namespace DB { @@ -15,13 +16,25 @@ struct NameLowerUTF8 static constexpr auto name = "lowerUTF8"; }; -using FunctionLowerUTF8 = FunctionStringToString>, NameLowerUTF8>; +using FunctionLowerUTF8 = FunctionStringToString, NameLowerUTF8>; } REGISTER_FUNCTION(LowerUTF8) { - factory.registerFunction(); + FunctionDocumentation::Description description + = R"(Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.)"; + FunctionDocumentation::Syntax syntax = "lowerUTF8(input)"; + FunctionDocumentation::Arguments arguments = {{"input", "Input with String type"}}; + FunctionDocumentation::ReturnedValue returned_value = "A String data type value"; + FunctionDocumentation::Examples examples = { + {"first", "SELECT lowerUTF8('München') as Lowerutf8;", "münchen"}, + }; + FunctionDocumentation::Categories categories = {"String"}; + + factory.registerFunction({description, syntax, arguments, returned_value, examples, categories}); } } + +#endif diff --git a/src/Functions/upperUTF8.cpp b/src/Functions/upperUTF8.cpp index 659e67f0ef3..ef26430331f 100644 --- a/src/Functions/upperUTF8.cpp +++ b/src/Functions/upperUTF8.cpp @@ -1,8 +1,10 @@ +#include "config.h" + +#if USE_ICU + +#include #include #include -#include -#include - namespace DB { @@ -14,13 +16,25 @@ struct NameUpperUTF8 static constexpr auto name = "upperUTF8"; }; -using FunctionUpperUTF8 = FunctionStringToString>, NameUpperUTF8>; +using FunctionUpperUTF8 = FunctionStringToString, NameUpperUTF8>; } REGISTER_FUNCTION(UpperUTF8) { - factory.registerFunction(); + FunctionDocumentation::Description description + = R"(Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.)"; + FunctionDocumentation::Syntax syntax = "upperUTF8(input)"; + FunctionDocumentation::Arguments arguments = {{"input", "Input with String type"}}; + FunctionDocumentation::ReturnedValue returned_value = "A String data type value"; + FunctionDocumentation::Examples examples = { + {"first", "SELECT upperUTF8('München') as Upperutf8;", "MÃœNCHEN"}, + }; + FunctionDocumentation::Categories categories = {"String"}; + + factory.registerFunction({description, syntax, arguments, returned_value, examples, categories}); } } + +#endif diff --git a/tests/queries/0_stateless/00170_lower_upper_utf8.reference b/tests/queries/0_stateless/00170_lower_upper_utf8.reference index f202cb75513..3c644f22b9b 100644 --- a/tests/queries/0_stateless/00170_lower_upper_utf8.reference +++ b/tests/queries/0_stateless/00170_lower_upper_utf8.reference @@ -22,3 +22,7 @@ 1 1 1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/00170_lower_upper_utf8.sql b/tests/queries/0_stateless/00170_lower_upper_utf8.sql index 4caba2033ff..85b6c5c6095 100644 --- a/tests/queries/0_stateless/00170_lower_upper_utf8.sql +++ b/tests/queries/0_stateless/00170_lower_upper_utf8.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + select lower('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str; select lowerUTF8('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str; select lower('AaAaAaAaAaAaAaA012345789,.!aAaA') = 'aaaaaaaaaaaaaaa012345789,.!aaaa'; @@ -27,3 +30,11 @@ select sum(lower(materialize('aaaaÐБВГAAAAaaAA')) = materialize('aaaaÐБВ select sum(upper(materialize('aaaaÐБВГAAAAaaAA')) = materialize('AAAAÐБВГAAAAAAAA')) = count() from system.one array join range(16384) as n; select sum(lowerUTF8(materialize('aaaaÐБВГAAAAaaAA')) = materialize('aaaaабвгaaaaaaaa')) = count() from system.one array join range(16384) as n; select sum(upperUTF8(materialize('aaaaÐБВГAAAAaaAA')) = materialize('AAAAÐБВГAAAAAAAA')) = count() from system.one array join range(16384) as n; + +-- Turkish language +select upperUTF8('ır') = 'IR'; +select lowerUTF8('ır') = 'ır'; + +-- German language +select upper('öäüß') = 'öäüß'; +select lower('ÖÄÜẞ') = 'ÖÄÜẞ'; diff --git a/tests/queries/0_stateless/00233_position_function_family.sql b/tests/queries/0_stateless/00233_position_function_family.sql index dd7394bc39a..d6668cb7ba4 100644 --- a/tests/queries/0_stateless/00233_position_function_family.sql +++ b/tests/queries/0_stateless/00233_position_function_family.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SET send_logs_level = 'fatal'; select 1 = position('', ''); diff --git a/tests/queries/0_stateless/00761_lower_utf8_bug.sql b/tests/queries/0_stateless/00761_lower_utf8_bug.sql index de20b894331..a0ab55edc15 100644 --- a/tests/queries/0_stateless/00761_lower_utf8_bug.sql +++ b/tests/queries/0_stateless/00761_lower_utf8_bug.sql @@ -1 +1,4 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SELECT lowerUTF8('\xF0') = lowerUTF8('\xF0'); diff --git a/tests/queries/0_stateless/01278_random_string_utf8.sql b/tests/queries/0_stateless/01278_random_string_utf8.sql index da2dc48c3e1..290d6a0c759 100644 --- a/tests/queries/0_stateless/01278_random_string_utf8.sql +++ b/tests/queries/0_stateless/01278_random_string_utf8.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SELECT randomStringUTF8('string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT lengthUTF8(randomStringUTF8(100)); SELECT toTypeName(randomStringUTF8(10)); diff --git a/tests/queries/0_stateless/01431_utf8_ubsan.reference b/tests/queries/0_stateless/01431_utf8_ubsan.reference index c98c950d535..dc785e57851 100644 --- a/tests/queries/0_stateless/01431_utf8_ubsan.reference +++ b/tests/queries/0_stateless/01431_utf8_ubsan.reference @@ -1,2 +1,2 @@ -FF -FF +EFBFBD +EFBFBD diff --git a/tests/queries/0_stateless/01431_utf8_ubsan.sql b/tests/queries/0_stateless/01431_utf8_ubsan.sql index d6a299225b1..3a28e023805 100644 --- a/tests/queries/0_stateless/01431_utf8_ubsan.sql +++ b/tests/queries/0_stateless/01431_utf8_ubsan.sql @@ -1,2 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SELECT hex(lowerUTF8('\xFF')); SELECT hex(upperUTF8('\xFF')); diff --git a/tests/queries/0_stateless/01590_countSubstrings.sql b/tests/queries/0_stateless/01590_countSubstrings.sql index b38cbb7d188..5ec4f412d7f 100644 --- a/tests/queries/0_stateless/01590_countSubstrings.sql +++ b/tests/queries/0_stateless/01590_countSubstrings.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + -- -- countSubstrings -- diff --git a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference index a3bac432482..deabef61a88 100644 --- a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference +++ b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference @@ -5,9 +5,9 @@ insert into utf8_overlap values ('\xe2'), ('Foo⚊BarBazBam'), ('\xe2'), ('Foo -- MONOGRAM FOR YANG with lowerUTF8(str) as l_, upperUTF8(str) as u_, '0x' || hex(str) as h_ select length(str), if(l_ == '\xe2', h_, l_), if(u_ == '\xe2', h_, u_) from utf8_overlap format CSV; -1,"0xE2","0xE2" +1,"�","�" 15,"foo⚊barbazbam","FOO⚊BARBAZBAM" -1,"0xE2","0xE2" +1,"�","�" 15,"foo⚊barbazbam","FOO⚊BARBAZBAM" -- NOTE: regression test for introduced bug -- https://github.com/ClickHouse/ClickHouse/issues/42756 diff --git a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql index 8ca0a3f5f75..d175e0659d0 100644 --- a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql +++ b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + drop table if exists utf8_overlap; create table utf8_overlap (str String) engine=Memory(); diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index c39f1fb1ce9..0980e25b70f 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -416,7 +416,6 @@ logTrace lowCardinalityIndices lowCardinalityKeys lower -lowerUTF8 makeDate makeDate32 makeDateTime @@ -897,7 +896,6 @@ tupleToNameValuePairs unbin unhex upper -upperUTF8 uptime validateNestedArraySizes version diff --git a/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql b/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql index 80e3c0a9ece..b169cfd0ab9 100644 --- a/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql +++ b/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + create table if not exists t (`arr.key` Array(LowCardinality(String)), `arr.value` Array(LowCardinality(String))) engine = Memory; insert into t (`arr.key`, `arr.value`) values (['a'], ['b']); select if(true, if(lowerUTF8(arr.key) = 'a', 1, 2), 3) as x from t left array join arr; diff --git a/tests/queries/0_stateless/02807_lower_utf8_msan.sql b/tests/queries/0_stateless/02807_lower_utf8_msan.sql index e9eb18bf615..95f224577f7 100644 --- a/tests/queries/0_stateless/02807_lower_utf8_msan.sql +++ b/tests/queries/0_stateless/02807_lower_utf8_msan.sql @@ -1,2 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SELECT lowerUTF8(arrayJoin(['©--------------------------------------', '©--------------------'])) ORDER BY 1; SELECT upperUTF8(materialize('aaaaÐБВГaaaaaaaaaaaaÐБВГAAAAaaAA')) FROM numbers(2); diff --git a/tests/queries/0_stateless/03015_peder1001.sql b/tests/queries/0_stateless/03015_peder1001.sql index 810503207f2..df8e4db1536 100644 --- a/tests/queries/0_stateless/03015_peder1001.sql +++ b/tests/queries/0_stateless/03015_peder1001.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + DROP TABLE IF EXISTS test_data; CREATE TABLE test_data From 4600b270dafec20b276ab83eb557270c24cb4169 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Fri, 16 Aug 2024 17:58:54 +0800 Subject: [PATCH 2292/2361] remote icu contrib --- .gitmodules | 3 --- contrib/icu | 1 - 2 files changed, 4 deletions(-) delete mode 160000 contrib/icu diff --git a/.gitmodules b/.gitmodules index 7fdfb1103c5..164da311930 100644 --- a/.gitmodules +++ b/.gitmodules @@ -106,9 +106,6 @@ [submodule "contrib/icudata"] path = contrib/icudata url = https://github.com/ClickHouse/icudata -[submodule "contrib/icu"] - path = contrib/icu - url = https://github.com/unicode-org/icu [submodule "contrib/flatbuffers"] path = contrib/flatbuffers url = https://github.com/ClickHouse/flatbuffers diff --git a/contrib/icu b/contrib/icu deleted file mode 160000 index 7750081bda4..00000000000 --- a/contrib/icu +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625 From 3ee741bd5e33d16b2f5711a8f2b06fca1a64b7bc Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Fri, 16 Aug 2024 18:04:15 +0800 Subject: [PATCH 2293/2361] add submodule contrib/icu from clickhouse --- .gitmodules | 4 ++++ contrib/icu | 1 + 2 files changed, 5 insertions(+) create mode 160000 contrib/icu diff --git a/.gitmodules b/.gitmodules index 164da311930..a8cc6a07caf 100644 --- a/.gitmodules +++ b/.gitmodules @@ -372,3 +372,7 @@ [submodule "contrib/numactl"] path = contrib/numactl url = https://github.com/ClickHouse/numactl.git +[submodule "contrib/icu"] + path = contrib/icu + url = https://github.com/ClickHouse/icu + branch = ClickHouse/release-75-1 diff --git a/contrib/icu b/contrib/icu new file mode 160000 index 00000000000..4216173eeeb --- /dev/null +++ b/contrib/icu @@ -0,0 +1 @@ +Subproject commit 4216173eeeb39c1d4caaa54a68860e800412d273 From b76277ca00b3d91052bf90b83b20f730dcf96b2e Mon Sep 17 00:00:00 2001 From: neoman36 Date: Fri, 16 Aug 2024 12:23:20 +0200 Subject: [PATCH 2294/2361] named-collections.md is updated --- docs/en/operations/named-collections.md | 22 +++++++++++++++++-- docs/ru/operations/named-collections.md | 29 ++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md index 59ee05d1f9e..16c75814e5a 100644 --- a/docs/en/operations/named-collections.md +++ b/docs/en/operations/named-collections.md @@ -307,8 +307,22 @@ SELECT dictGet('dict', 'B', 2); ## Named collections for accessing PostgreSQL database -The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md). +The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md). Additionally, there are aliases: +- `username` for `user` +- `db` for `database`. + +Parameter `addresses_expr` is used in a collection instead of `host:port`. The parameter is optional, because there are other optional ones: `host`, `hostname`, `port`. The following pseudocode explains the priority: + +```sql +CASE + WHEN collection['addresses_expr'] != '' THEN collection['addresses_expr'] + WHEN collection['host'] != '' THEN collection['host'] || ':' || if(collection['port'] != '', collection['port'], '5432') + WHEN collection['hostname'] != '' THEN collection['hostname'] || ':' || if(collection['port'] != '', collection['port'], '5432') +END +``` + +Example of creation: ```sql CREATE NAMED COLLECTION mypg AS user = 'pguser', @@ -316,7 +330,7 @@ password = 'jw8s0F4', host = '127.0.0.1', port = 5432, database = 'test', -schema = 'test_schema', +schema = 'test_schema' ``` Example of configuration: @@ -369,6 +383,10 @@ SELECT * FROM mypgtable; └───┘ ``` +:::note +PostgreSQL copies data from the named collection when the table is being created. A change in the collection does not affect the existing tables. +::: + ### Example of using named collections with database with engine PostgreSQL ```sql diff --git a/docs/ru/operations/named-collections.md b/docs/ru/operations/named-collections.md index 48ee7c9f15d..67656f24ba3 100644 --- a/docs/ru/operations/named-collections.md +++ b/docs/ru/operations/named-collections.md @@ -146,7 +146,30 @@ SELECT dictGet('dict', 'B', 2); ## Пример иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¸Ð¼ÐµÐ½Ð¾Ð²Ð°Ð½Ð½Ñ‹Ñ… Ñоединений Ñ Ð±Ð°Ð·Ð¾Ð¹ данных PostgreSQL -ОпиÑание параметров Ñмотрите [postgresql](../sql-reference/table-functions/postgresql.md). +ОпиÑание параметров Ñмотрите [postgresql](../sql-reference/table-functions/postgresql.md). Дополнительно еÑÑ‚ÑŒ алиаÑÑ‹: +- `username` Ð´Ð»Ñ `user` +- `db` Ð´Ð»Ñ `database`. + +Параметр `addresses_expr` иÑпользуетÑÑ Ð² коллекции вмеÑто `host:port`. Параметр опционален, потому что еÑÑ‚ÑŒ так же другие: `host`, `hostname`, `port`. Следующий пÑевдокод показывает приоритет: + +```sql +CASE + WHEN collection['addresses_expr'] != '' THEN collection['addresses_expr'] + WHEN collection['host'] != '' THEN collection['host'] || ':' || if(collection['port'] != '', collection['port'], '5432') + WHEN collection['hostname'] != '' THEN collection['hostname'] || ':' || if(collection['port'] != '', collection['port'], '5432') +END +``` + +Пример ÑозданиÑ: +```sql +CREATE NAMED COLLECTION mypg AS +user = 'pguser', +password = 'jw8s0F4', +host = '127.0.0.1', +port = 5432, +database = 'test', +schema = 'test_schema' +``` Пример конфигурации: ```xml @@ -199,6 +222,10 @@ SELECT * FROM mypgtable; └───┘ ``` +:::note +PostgreSQL копирует данные из named collection при Ñоздании таблицы. Ð˜Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð² коллекции не влиÑÑŽÑ‚ на ÑущеÑтвующие таблицы. +::: + ### Пример иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¸Ð¼ÐµÐ½Ð¾Ð²Ð°Ð½Ð½Ñ‹Ñ… Ñоединений базой данных Ñ Ð´Ð²Ð¸Ð¶ÐºÐ¾Ð¼ PostgreSQL ```sql From f17655f13fcadb9babbc859e45e9f38cb32ad9e3 Mon Sep 17 00:00:00 2001 From: Dani Pozo Date: Thu, 1 Aug 2024 11:01:27 +0200 Subject: [PATCH 2295/2361] Load filesystem cache metadata asynchronously --- src/Common/StatusFile.cpp | 2 +- src/Disks/IO/ReadBufferFromRemoteFSGather.cpp | 35 +++--- .../Cached/CachedObjectStorage.cpp | 5 +- src/Interpreters/Cache/FileCache.cpp | 71 +++++++++--- src/Interpreters/Cache/FileCache.h | 10 ++ src/Interpreters/Cache/FileCacheSettings.cpp | 3 + src/Interpreters/Cache/FileCacheSettings.h | 1 + .../InterpreterDescribeCacheQuery.cpp | 2 + src/Interpreters/TemporaryDataOnDisk.cpp | 4 +- src/Interpreters/tests/gtest_filecache.cpp | 11 ++ .../System/StorageSystemFilesystemCache.cpp | 3 + .../StorageSystemFilesystemCacheSettings.cpp | 2 + tests/config/config.d/storage_conf.xml | 2 + tests/config/config.d/storage_conf_02944.xml | 1 + .../integration/test_filesystem_cache/test.py | 106 +++++++++++++++--- .../02344_describe_cache.reference | 2 +- .../0_stateless/02344_describe_cache.sh | 2 +- ...8_filesystem_cache_as_collection.reference | 4 +- .../02908_filesystem_cache_as_collection.sql | 4 +- ...ge_cache_setting_without_restart.reference | 14 +-- ...lly_change_filesystem_cache_size.reference | 10 +- 21 files changed, 227 insertions(+), 67 deletions(-) diff --git a/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp index 80464f38082..0bbb7ff411d 100644 --- a/src/Common/StatusFile.cpp +++ b/src/Common/StatusFile.cpp @@ -51,7 +51,7 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) std::string contents; { ReadBufferFromFile in(path, 1024); - LimitReadBuffer limit_in(in, 1024, /* trow_exception */ false, /* exact_limit */ {}); + LimitReadBuffer limit_in(in, 1024, /* throw_exception */ false, /* exact_limit */ {}); readStringUntilEOF(contents, limit_in); } diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index bb9761a3905..c96f5f0c931 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -80,20 +80,27 @@ SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(c if (with_file_cache) { - auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path); - buf = std::make_unique( - object_path, - cache_key, - settings.remote_fs_cache, - FileCache::getCommonUser(), - [=, this]() { return read_buffer_creator(/* restricted_seek */true, object); }, - settings, - query_id, - object.bytes_size, - /* allow_seeks */false, - /* use_external_buffer */true, - /* read_until_position */std::nullopt, - cache_log); + if (settings.remote_fs_cache->isInitialized()) + { + auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path); + buf = std::make_unique( + object_path, + cache_key, + settings.remote_fs_cache, + FileCache::getCommonUser(), + [=, this]() { return read_buffer_creator(/* restricted_seek */true, object); }, + settings, + query_id, + object.bytes_size, + /* allow_seeks */false, + /* use_external_buffer */true, + /* read_until_position */std::nullopt, + cache_log); + } + else + { + settings.remote_fs_cache->throwInitExceptionIfNeeded(); + } } /// Can't wrap CachedOnDiskReadBufferFromFile in CachedInMemoryReadBufferFromFile because the diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index fb817005399..ab0d357119c 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -99,7 +99,7 @@ std::unique_ptr CachedObjectStorage::writeObject( /// N /// Need to remove even if cache_on_write == false. removeCacheIfExists(object.remote_path); - if (cache_on_write) + if (cache_on_write && cache->isInitialized()) { auto key = getCacheKey(object.remote_path); return std::make_unique( @@ -122,7 +122,8 @@ void CachedObjectStorage::removeCacheIfExists(const std::string & path_key_for_c return; /// Add try catch? - cache->removeKeyIfExists(getCacheKey(path_key_for_cache), FileCache::getCommonUser().user_id); + if (cache->isInitialized()) + cache->removeKeyIfExists(getCacheKey(path_key_for_cache), FileCache::getCommonUser().user_id); } void CachedObjectStorage::removeObject(const StoredObject & object) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index e3925163362..4c35c0f7f4c 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -11,11 +11,15 @@ #include #include #include +#include +#include #include #include #include +#include #include +#include namespace fs = std::filesystem; @@ -88,6 +92,7 @@ FileCache::FileCache(const std::string & cache_name, const FileCacheSettings & s , bypass_cache_threshold(settings.enable_bypass_cache_with_threshold ? settings.bypass_cache_threshold : 0) , boundary_alignment(settings.boundary_alignment) , load_metadata_threads(settings.load_metadata_threads) + , load_metadata_asynchronously(settings.load_metadata_asynchronously) , write_cache_per_user_directory(settings.write_cache_per_user_id_directory) , keep_current_size_to_max_ratio(1 - settings.keep_free_space_size_ratio) , keep_current_elements_to_max_ratio(1 - settings.keep_free_space_elements_ratio) @@ -136,7 +141,17 @@ const FileCache::UserInfo & FileCache::getInternalUser() bool FileCache::isInitialized() const { - return is_initialized.load(std::memory_order_seq_cst); + return is_initialized; +} + +void FileCache::throwInitExceptionIfNeeded() +{ + if (load_metadata_asynchronously) + return; + + std::lock_guard lock(init_mutex); + if (init_exception) + std::rethrow_exception(init_exception); } const String & FileCache::getBasePath() const @@ -170,6 +185,35 @@ void FileCache::assertInitialized() const } void FileCache::initialize() +{ + // Prevent initialize() from running twice. This may be caused by two cache disks being created with the same path (see integration/test_filesystem_cache). + callOnce(initialize_called, [&] { + bool need_to_load_metadata = fs::exists(getBasePath()); + try + { + if (!need_to_load_metadata) + fs::create_directories(getBasePath()); + status_file = make_unique(fs::path(getBasePath()) / "status", StatusFile::write_full_info); + } + catch (...) + { + init_exception = std::current_exception(); + tryLogCurrentException(__PRETTY_FUNCTION__); + throw; + } + + if (load_metadata_asynchronously) + { + load_metadata_main_thread = ThreadFromGlobalPool([this, need_to_load_metadata] { initializeImpl(need_to_load_metadata); }); + } + else + { + initializeImpl(need_to_load_metadata); + } + }); +} + +void FileCache::initializeImpl(bool load_metadata) { std::lock_guard lock(init_mutex); @@ -178,16 +222,10 @@ void FileCache::initialize() try { - if (fs::exists(getBasePath())) - { + if (load_metadata) loadMetadata(); - } - else - { - fs::create_directories(getBasePath()); - } - status_file = make_unique(fs::path(getBasePath()) / "status", StatusFile::write_full_info); + metadata.startup(); } catch (...) { @@ -196,8 +234,6 @@ void FileCache::initialize() throw; } - metadata.startup(); - if (keep_current_size_to_max_ratio != 1 || keep_current_elements_to_max_ratio != 1) { keep_up_free_space_ratio_task = Context::getGlobalContextInstance()->getSchedulePool().createTask(log->name(), [this] { freeSpaceRatioKeepingThreadFunc(); }); @@ -205,6 +241,7 @@ void FileCache::initialize() } is_initialized = true; + LOG_TEST(log, "Initialized cache from {}", metadata.getBaseDirectory()); } CachePriorityGuard::Lock FileCache::lockCache() const @@ -1185,7 +1222,6 @@ void FileCache::loadMetadataImpl() std::vector loading_threads; std::exception_ptr first_exception; std::mutex set_exception_mutex; - std::atomic stop_loading = false; LOG_INFO(log, "Loading filesystem cache with {} threads from {}", load_metadata_threads, metadata.getBaseDirectory()); @@ -1195,7 +1231,7 @@ void FileCache::loadMetadataImpl() { loading_threads.emplace_back([&] { - while (!stop_loading) + while (!stop_loading_metadata) { try { @@ -1212,7 +1248,7 @@ void FileCache::loadMetadataImpl() if (!first_exception) first_exception = std::current_exception(); } - stop_loading = true; + stop_loading_metadata = true; return; } } @@ -1225,7 +1261,7 @@ void FileCache::loadMetadataImpl() if (!first_exception) first_exception = std::current_exception(); } - stop_loading = true; + stop_loading_metadata = true; break; } } @@ -1412,6 +1448,11 @@ FileCache::~FileCache() void FileCache::deactivateBackgroundOperations() { shutdown.store(true); + + stop_loading_metadata = true; + if (load_metadata_main_thread.joinable()) + load_metadata_main_thread.join(); + metadata.shutdown(); if (keep_up_free_space_ratio_task) keep_up_free_space_ratio_task->deactivate(); diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 07be802a940..579472eb824 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -8,6 +8,7 @@ #include +#include #include #include #include @@ -82,6 +83,9 @@ public: bool isInitialized() const; + /// Throws if `!load_metadata_asynchronously` and there is an exception in `init_exception` + void throwInitExceptionIfNeeded(); + const String & getBasePath() const; static Key createKeyForPath(const String & path); @@ -198,6 +202,9 @@ private: const size_t bypass_cache_threshold; const size_t boundary_alignment; size_t load_metadata_threads; + const bool load_metadata_asynchronously; + std::atomic stop_loading_metadata = false; + ThreadFromGlobalPool load_metadata_main_thread; const bool write_cache_per_user_directory; BackgroundSchedulePool::TaskHolder keep_up_free_space_ratio_task; @@ -209,6 +216,7 @@ private: std::exception_ptr init_exception; std::atomic is_initialized = false; + OnceFlag initialize_called; mutable std::mutex init_mutex; std::unique_ptr status_file; std::atomic shutdown = false; @@ -246,6 +254,8 @@ private: */ FileCacheQueryLimitPtr query_limit; + void initializeImpl(bool load_metadata); + void assertInitialized() const; void assertCacheCorrectness(); diff --git a/src/Interpreters/Cache/FileCacheSettings.cpp b/src/Interpreters/Cache/FileCacheSettings.cpp index c68ff3183c6..e162d6b7551 100644 --- a/src/Interpreters/Cache/FileCacheSettings.cpp +++ b/src/Interpreters/Cache/FileCacheSettings.cpp @@ -65,6 +65,9 @@ void FileCacheSettings::loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetStrin if (has("load_metadata_threads")) load_metadata_threads = get_uint("load_metadata_threads"); + if (has("load_metadata_asynchronously")) + load_metadata_asynchronously = get_uint("load_metadata_asynchronously"); + if (boundary_alignment > max_file_segment_size) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Setting `boundary_alignment` cannot exceed `max_file_segment_size`"); diff --git a/src/Interpreters/Cache/FileCacheSettings.h b/src/Interpreters/Cache/FileCacheSettings.h index 93ded202947..72a2b6c3369 100644 --- a/src/Interpreters/Cache/FileCacheSettings.h +++ b/src/Interpreters/Cache/FileCacheSettings.h @@ -32,6 +32,7 @@ struct FileCacheSettings size_t background_download_queue_size_limit = FILECACHE_DEFAULT_BACKGROUND_DOWNLOAD_QUEUE_SIZE_LIMIT; size_t load_metadata_threads = FILECACHE_DEFAULT_LOAD_METADATA_THREADS; + bool load_metadata_asynchronously = false; bool write_cache_per_user_id_directory = false; diff --git a/src/Interpreters/InterpreterDescribeCacheQuery.cpp b/src/Interpreters/InterpreterDescribeCacheQuery.cpp index c7e863bf260..c7464dc6b77 100644 --- a/src/Interpreters/InterpreterDescribeCacheQuery.cpp +++ b/src/Interpreters/InterpreterDescribeCacheQuery.cpp @@ -20,6 +20,7 @@ static Block getSampleBlock() ColumnWithTypeAndName{std::make_shared(), "max_size"}, ColumnWithTypeAndName{std::make_shared(), "max_elements"}, ColumnWithTypeAndName{std::make_shared(), "max_file_segment_size"}, + ColumnWithTypeAndName{std::make_shared(), "is_initialized"}, ColumnWithTypeAndName{std::make_shared(), "boundary_alignment"}, ColumnWithTypeAndName{std::make_shared>(), "cache_on_write_operations"}, ColumnWithTypeAndName{std::make_shared>(), "cache_hits_threshold"}, @@ -50,6 +51,7 @@ BlockIO InterpreterDescribeCacheQuery::execute() res_columns[i++]->insert(settings.max_size); res_columns[i++]->insert(settings.max_elements); res_columns[i++]->insert(settings.max_file_segment_size); + res_columns[i++]->insert(cache->isInitialized()); res_columns[i++]->insert(settings.boundary_alignment); res_columns[i++]->insert(settings.cache_on_write_operations); res_columns[i++]->insert(settings.cache_hits_threshold); diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 7f0fb8cd6ca..3259d7b67d6 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -65,7 +65,7 @@ TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, Cu std::unique_ptr TemporaryDataOnDisk::createRawStream(size_t max_file_size) { - if (file_cache) + if (file_cache && file_cache->isInitialized()) { auto holder = createCacheFile(max_file_size); return std::make_unique(std::move(holder)); @@ -81,7 +81,7 @@ std::unique_ptr TemporaryDataOnDisk::createRawStream(si TemporaryFileStream & TemporaryDataOnDisk::createStream(const Block & header, size_t max_file_size) { - if (file_cache) + if (file_cache && file_cache->isInitialized()) { auto holder = createCacheFile(max_file_size); diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index 36acc319f4e..5e2d3ee8219 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -42,6 +43,7 @@ #include #include +using namespace std::chrono_literals; namespace fs = std::filesystem; using namespace DB; @@ -358,9 +360,11 @@ TEST_F(FileCacheTest, LRUPolicy) settings.max_size = 30; settings.max_elements = 5; settings.boundary_alignment = 1; + settings.load_metadata_asynchronously = false; const size_t file_size = INT_MAX; // the value doesn't really matter because boundary_alignment == 1. + const auto user = FileCache::getCommonUser(); { std::cerr << "Step 1\n"; @@ -815,6 +819,7 @@ TEST_F(FileCacheTest, writeBuffer) settings.max_elements = 5; settings.max_file_segment_size = 5; settings.base_path = cache_base_path; + settings.load_metadata_asynchronously = false; FileCache cache("6", settings); cache.initialize(); @@ -946,6 +951,7 @@ TEST_F(FileCacheTest, temporaryData) settings.max_size = 10_KiB; settings.max_file_segment_size = 1_KiB; settings.base_path = cache_base_path; + settings.load_metadata_asynchronously = false; DB::FileCache file_cache("7", settings); file_cache.initialize(); @@ -1073,6 +1079,7 @@ TEST_F(FileCacheTest, CachedReadBuffer) settings.max_size = 30; settings.max_elements = 10; settings.boundary_alignment = 1; + settings.load_metadata_asynchronously = false; ReadSettings read_settings; read_settings.enable_filesystem_cache = true; @@ -1092,6 +1099,7 @@ TEST_F(FileCacheTest, CachedReadBuffer) auto cache = std::make_shared("8", settings); cache->initialize(); + auto key = cache->createKeyForPath(file_path); const auto user = FileCache::getCommonUser(); @@ -1132,6 +1140,7 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) settings.max_size = 10_KiB; settings.max_file_segment_size = 1_KiB; settings.base_path = cache_base_path; + settings.load_metadata_asynchronously = false; DB::FileCache file_cache("cache", settings); file_cache.initialize(); @@ -1195,6 +1204,7 @@ TEST_F(FileCacheTest, SLRUPolicy) settings.max_size = 40; settings.max_elements = 6; settings.boundary_alignment = 1; + settings.load_metadata_asynchronously = false; settings.cache_policy = "SLRU"; settings.slru_size_ratio = 0.5; @@ -1307,6 +1317,7 @@ TEST_F(FileCacheTest, SLRUPolicy) settings2.boundary_alignment = 1; settings2.cache_policy = "SLRU"; settings2.slru_size_ratio = 0.5; + settings.load_metadata_asynchronously = false; auto cache = std::make_shared("slru_2", settings2); cache->initialize(); diff --git a/src/Storages/System/StorageSystemFilesystemCache.cpp b/src/Storages/System/StorageSystemFilesystemCache.cpp index cfb388bc232..0e972d8411b 100644 --- a/src/Storages/System/StorageSystemFilesystemCache.cpp +++ b/src/Storages/System/StorageSystemFilesystemCache.cpp @@ -47,6 +47,9 @@ void StorageSystemFilesystemCache::fillData(MutableColumns & res_columns, Contex for (const auto & [cache_name, cache_data] : caches) { const auto & cache = cache_data->cache; + if (!cache->isInitialized()) + continue; + cache->iterate([&](const FileSegment::Info & file_segment) { size_t i = 0; diff --git a/src/Storages/System/StorageSystemFilesystemCacheSettings.cpp b/src/Storages/System/StorageSystemFilesystemCacheSettings.cpp index 8915032baf7..c6bba6b8598 100644 --- a/src/Storages/System/StorageSystemFilesystemCacheSettings.cpp +++ b/src/Storages/System/StorageSystemFilesystemCacheSettings.cpp @@ -21,6 +21,7 @@ ColumnsDescription StorageSystemFilesystemCacheSettings::getColumnsDescription() {"path", std::make_shared(), "Cache directory"}, {"max_size", std::make_shared(), "Cache size limit by the number of bytes"}, {"max_elements", std::make_shared(), "Cache size limit by the number of elements"}, + {"is_initialized", std::make_shared(), "Whether the cache is initialized and ready to be used"}, {"current_size", std::make_shared(), "Current cache size by the number of bytes"}, {"current_elements", std::make_shared(), "Current cache size by the number of elements"}, {"max_file_segment_size", std::make_shared(), "Maximum allowed file segment size"}, @@ -56,6 +57,7 @@ void StorageSystemFilesystemCacheSettings::fillData( res_columns[i++]->insert(settings.base_path); res_columns[i++]->insert(settings.max_size); res_columns[i++]->insert(settings.max_elements); + res_columns[i++]->insert(cache->isInitialized()); res_columns[i++]->insert(cache->getUsedCacheSize()); res_columns[i++]->insert(cache->getFileSegmentsNum()); res_columns[i++]->insert(settings.max_file_segment_size); diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index e106e3a0e6b..091071f0637 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -27,6 +27,7 @@ 0.3 0.15 0.15 + 0 cache @@ -37,6 +38,7 @@ 100 0 0 + 0 diff --git a/tests/config/config.d/storage_conf_02944.xml b/tests/config/config.d/storage_conf_02944.xml index 5f45640a923..08d78900229 100644 --- a/tests/config/config.d/storage_conf_02944.xml +++ b/tests/config/config.d/storage_conf_02944.xml @@ -19,6 +19,7 @@ 10 100 0 + 0 diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index 17a8dd8b6e1..aee8bd25c2e 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -1,6 +1,7 @@ import logging import time import os +import random import pytest from helpers.cluster import ClickHouseCluster @@ -30,14 +31,6 @@ def cluster(): "config.d/storage_conf_2.xml", ], ) - cluster.add_instance( - "node_no_filesystem_caches_path", - main_configs=[ - "config.d/storage_conf.xml", - "config.d/remove_filesystem_caches_path.xml", - ], - stay_alive=True, - ) cluster.add_instance( "node_force_read_through_cache_on_merge", main_configs=[ @@ -59,6 +52,51 @@ def cluster(): cluster.shutdown() +@pytest.fixture(scope="function") +def non_shared_cluster(): + """ + For tests that cannot run in parallel against the same node/cluster (see test_custom_cached_disk, which relies on + changing server settings at runtime) + """ + try: + # Randomize the cluster name + cluster = ClickHouseCluster(f"{__file__}_non_shared_{random.randint(0, 10**7)}") + cluster.add_instance( + "node_no_filesystem_caches_path", + main_configs=[ + "config.d/storage_conf.xml", + "config.d/remove_filesystem_caches_path.xml", + ], + stay_alive=True, + ) + + logging.info("Starting test-exclusive cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def wait_for_cache_initialized(node, cache_path, max_attempts=50): + initialized = False + attempts = 0 + while not initialized: + query_result = node.query( + "SELECT path FROM system.filesystem_cache_settings WHERE is_initialized" + ) + initialized = cache_path in query_result + + if initialized: + break + + time.sleep(0.1) + attempts += 1 + if attempts >= max_attempts: + raise "Stopped waiting for cache to be initialized" + + @pytest.mark.parametrize("node_name", ["node"]) def test_parallel_cache_loading_on_startup(cluster, node_name): node = cluster.instances[node_name] @@ -71,14 +109,21 @@ def test_parallel_cache_loading_on_startup(cluster, node_name): ORDER BY value SETTINGS disk = disk( type = cache, - path = 'paralel_loading_test', + name = 'parallel_loading_test', + path = 'parallel_loading_test', disk = 'hdd_blob', max_file_segment_size = '1Ki', boundary_alignment = '1Ki', max_size = '1Gi', max_elements = 10000000, load_metadata_threads = 30); + """ + ) + wait_for_cache_initialized(node, "parallel_loading_test") + + node.query( + """ SYSTEM DROP FILESYSTEM CACHE; INSERT INTO test SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000000; SELECT * FROM test FORMAT Null; @@ -103,6 +148,7 @@ def test_parallel_cache_loading_on_startup(cluster, node_name): ) node.restart_clickhouse() + wait_for_cache_initialized(node, "parallel_loading_test") # < because of additional files loaded into cache on server startup. assert cache_count <= int(node.query("SELECT count() FROM system.filesystem_cache")) @@ -131,7 +177,7 @@ def test_caches_with_the_same_configuration(cluster, node_name): node = cluster.instances[node_name] cache_path = "cache1" - node.query(f"SYSTEM DROP FILESYSTEM CACHE;") + node.query("SYSTEM DROP FILESYSTEM CACHE;") for table in ["test", "test2"]: node.query( f""" @@ -142,14 +188,20 @@ def test_caches_with_the_same_configuration(cluster, node_name): ORDER BY value SETTINGS disk = disk( type = cache, - name = {table}, + name = '{table}', path = '{cache_path}', disk = 'hdd_blob', max_file_segment_size = '1Ki', boundary_alignment = '1Ki', cache_on_write_operations=1, max_size = '1Mi'); + """ + ) + wait_for_cache_initialized(node, cache_path) + + node.query( + f""" SET enable_filesystem_cache_on_write_operations=1; INSERT INTO {table} SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000; @@ -195,9 +247,8 @@ def test_caches_with_the_same_configuration(cluster, node_name): @pytest.mark.parametrize("node_name", ["node_caches_with_same_path"]) def test_caches_with_the_same_configuration_2(cluster, node_name): node = cluster.instances[node_name] - cache_path = "cache1" - node.query(f"SYSTEM DROP FILESYSTEM CACHE;") + node.query("SYSTEM DROP FILESYSTEM CACHE;") for table in ["cache1", "cache2"]: node.query( f""" @@ -207,7 +258,13 @@ def test_caches_with_the_same_configuration_2(cluster, node_name): Engine=MergeTree() ORDER BY value SETTINGS disk = '{table}'; + """ + ) + wait_for_cache_initialized(node, "cache1") + + node.query( + f""" SET enable_filesystem_cache_on_write_operations=1; INSERT INTO {table} SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000; @@ -227,8 +284,8 @@ def test_caches_with_the_same_configuration_2(cluster, node_name): ) -def test_custom_cached_disk(cluster): - node = cluster.instances["node_no_filesystem_caches_path"] +def test_custom_cached_disk(non_shared_cluster): + node = non_shared_cluster.instances["node_no_filesystem_caches_path"] assert "Cannot create cached custom disk without" in node.query_and_get_error( f""" @@ -377,6 +434,7 @@ def test_force_filesystem_cache_on_merges(cluster): ORDER BY value SETTINGS disk = disk( type = cache, + name = 'force_cache_on_merges', path = 'force_cache_on_merges', disk = 'hdd_blob', max_file_segment_size = '1Ki', @@ -385,7 +443,13 @@ def test_force_filesystem_cache_on_merges(cluster): max_size = '10Gi', max_elements = 10000000, load_metadata_threads = 30); + """ + ) + wait_for_cache_initialized(node, "force_cache_on_merges") + + node.query( + """ SYSTEM DROP FILESYSTEM CACHE; INSERT INTO test SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000000; INSERT INTO test SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000000; @@ -441,7 +505,13 @@ SETTINGS disk = disk(type = cache, path = "test_system_sync_filesystem_cache", delayed_cleanup_interval_ms = 10000000, disk = hdd_blob), min_bytes_for_wide_part = 10485760; + """ + ) + wait_for_cache_initialized(node, "test_system_sync_filesystem_cache") + + node.query( + """ INSERT INTO test SELECT 1, 'test'; """ ) @@ -525,7 +595,13 @@ SETTINGS disk = disk(type = cache, keep_free_space_elements_ratio = {elements_ratio}, disk = hdd_blob), min_bytes_for_wide_part = 10485760; + """ + ) + wait_for_cache_initialized(node, "test_keep_up_size_ratio") + + node.query( + """ INSERT INTO test SELECT randomString(200); """ ) diff --git a/tests/queries/0_stateless/02344_describe_cache.reference b/tests/queries/0_stateless/02344_describe_cache.reference index 6895606eb2b..13429b14866 100644 --- a/tests/queries/0_stateless/02344_describe_cache.reference +++ b/tests/queries/0_stateless/02344_describe_cache.reference @@ -1,2 +1,2 @@ 1 -102400 10000000 33554432 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/02344_describe_cache_test 0 5000 0 16 +102400 10000000 33554432 1 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/02344_describe_cache_test 0 5000 0 16 diff --git a/tests/queries/0_stateless/02344_describe_cache.sh b/tests/queries/0_stateless/02344_describe_cache.sh index d91661db9bc..c5373b4d7e3 100755 --- a/tests/queries/0_stateless/02344_describe_cache.sh +++ b/tests/queries/0_stateless/02344_describe_cache.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT -nm --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() -SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk'); +SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0); """ $CLICKHOUSE_CLIENT -nm --query """ diff --git a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference index d4191af1594..41a60204eab 100644 --- a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference +++ b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference @@ -1,2 +1,2 @@ -1048576 10000000 33554432 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection_sql 0 5000 0 16 -1048576 10000000 33554432 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection 0 5000 0 16 +1048576 10000000 33554432 1 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection_sql 0 5000 0 16 +1048576 10000000 33554432 1 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection 0 5000 0 16 diff --git a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql index c7216833bc9..127baa8304e 100644 --- a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql +++ b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql @@ -3,8 +3,8 @@ CREATE NAMED COLLECTION IF NOT EXISTS cache_collection_sql AS path = 'collection_sql', max_size = '1Mi'; DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) -ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME', cache_name='cache_collection_sql'); +ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME', cache_name='cache_collection_sql', load_metadata_asynchronously = 0); DESCRIBE FILESYSTEM CACHE '$CLICHOUSE_TEST_UNIQUE_NAME'; CREATE TABLE test2 (a Int32, b String) -ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME_2', cache_name='cache_collection'); +ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME_2', cache_name='cache_collection', load_metadata_asynchronously = 0); DESCRIBE FILESYSTEM CACHE '$CLICHOUSE_TEST_UNIQUE_NAME_2'; diff --git a/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference b/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference index 17a25d82824..0f64d0393b2 100644 --- a/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference +++ b/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference @@ -1,7 +1,7 @@ -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 10 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 5 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 15 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 2 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 10 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 5 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 15 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 2 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 16 diff --git a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference index 298cc908178..c6bbcdc20c2 100644 --- a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference +++ b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference @@ -1,20 +1,20 @@ -100 10 10 10 0 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +100 10 10 1 10 0 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 0 10 98 set max_size from 100 to 10 -10 10 10 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +10 10 10 1 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 1 8 set max_size from 10 to 100 -100 10 10 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +100 10 10 1 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 10 98 set max_elements from 10 to 2 -100 2 10 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +100 2 10 1 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 2 18 set max_elements from 2 to 10 -100 10 10 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +100 10 10 1 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 10 98 From c542fb57cdbd1d2a17544476f924fc3e13f56ebf Mon Sep 17 00:00:00 2001 From: neoman36 Date: Fri, 16 Aug 2024 13:01:23 +0200 Subject: [PATCH 2296/2361] named-collections.md is updated 2 --- docs/en/operations/named-collections.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md index 16c75814e5a..340a5a8f87a 100644 --- a/docs/en/operations/named-collections.md +++ b/docs/en/operations/named-collections.md @@ -312,7 +312,7 @@ The description of parameters see [postgresql](../sql-reference/table-functions/ - `username` for `user` - `db` for `database`. -Parameter `addresses_expr` is used in a collection instead of `host:port`. The parameter is optional, because there are other optional ones: `host`, `hostname`, `port`. The following pseudocode explains the priority: +Parameter `addresses_expr` is used in a collection instead of `host:port`. The parameter is optional, because there are other optional ones: `host`, `hostname`, `port`. The following pseudo code explains the priority: ```sql CASE From b1c18e2dd42051fb045431278eceae9dd22563bd Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 16 Aug 2024 13:29:25 +0000 Subject: [PATCH 2297/2361] Fix check for allowed date symbols in date time best effort schema infernece --- src/IO/parseDateTimeBestEffort.cpp | 4 ++-- tests/queries/0_stateless/03222_date_time_inference.reference | 1 + tests/queries/0_stateless/03222_date_time_inference.sql | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index 8d798bf725c..03429b46b2a 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -350,7 +350,7 @@ ReturnType parseDateTimeBestEffortImpl( if (month && !day_of_month) day_of_month = hour_or_day_of_month_or_month; } - else if (checkChar('/', in) || checkChar('.', in) || checkChar('-', in)) + else if ((!in.eof() && isSymbolIn(*in.position(), allowed_date_delimiters)) && (checkChar('/', in) || checkChar('.', in) || checkChar('-', in))) { if (day_of_month) return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: day of month is duplicated"); @@ -399,7 +399,7 @@ ReturnType parseDateTimeBestEffortImpl( if (month > 12) std::swap(month, day_of_month); - if (checkChar('/', in) || checkChar('.', in) || checkChar('-', in)) + if ((!in.eof() && isSymbolIn(*in.position(), allowed_date_delimiters)) && (checkChar('/', in) || checkChar('.', in) || checkChar('-', in))) { if (year) return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: year component is duplicated"); diff --git a/tests/queries/0_stateless/03222_date_time_inference.reference b/tests/queries/0_stateless/03222_date_time_inference.reference index 3288308a1d0..838b103f106 100644 --- a/tests/queries/0_stateless/03222_date_time_inference.reference +++ b/tests/queries/0_stateless/03222_date_time_inference.reference @@ -251,3 +251,4 @@ Mar 2000 00:00:00.000 String 2000 00:00:00.000 String Mar 2000-01-01 00:00:00 String Mar 2000-01-01 00:00:00.000 String +1.7.10 String diff --git a/tests/queries/0_stateless/03222_date_time_inference.sql b/tests/queries/0_stateless/03222_date_time_inference.sql index ebd472294be..b0c4df530cf 100644 --- a/tests/queries/0_stateless/03222_date_time_inference.sql +++ b/tests/queries/0_stateless/03222_date_time_inference.sql @@ -265,5 +265,5 @@ select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 00:00:00"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 00:00:00.000"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000-01-01 00:00:00"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000-01-01 00:00:00.000"}'); - +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1.7.10"}'); From 4975264c9d39d560c32799dcf14bd3f9d40fcfa4 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 16 Aug 2024 14:08:07 +0000 Subject: [PATCH 2298/2361] More strict parsing --- src/IO/parseDateTimeBestEffort.cpp | 23 ++++--- .../03222_date_time_inference.reference | 67 ++++++++++++------- .../0_stateless/03222_date_time_inference.sql | 19 ++++++ 3 files changed, 75 insertions(+), 34 deletions(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index 03429b46b2a..52bcdc6bbb4 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -180,8 +180,8 @@ ReturnType parseDateTimeBestEffortImpl( } else if (num_digits == 10 && !year && !has_time) { - if (strict && month) - return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated"); + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow timestamps"); /// This is unix timestamp. readDecimalNumber<10>(res, digits); @@ -189,8 +189,8 @@ ReturnType parseDateTimeBestEffortImpl( } else if (num_digits == 9 && !year && !has_time) { - if (strict && month) - return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated"); + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow timestamps"); /// This is unix timestamp. readDecimalNumber<9>(res, digits); @@ -198,8 +198,8 @@ ReturnType parseDateTimeBestEffortImpl( } else if (num_digits == 14 && !year && !has_time) { - if (strict && month) - return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated"); + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow date times without separators"); /// This is YYYYMMDDhhmmss readDecimalNumber<4>(year, digits); @@ -212,8 +212,8 @@ ReturnType parseDateTimeBestEffortImpl( } else if (num_digits == 8 && !year) { - if (strict && month) - return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month component is duplicated"); + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow date times without separators"); /// This is YYYYMMDD readDecimalNumber<4>(year, digits); @@ -222,6 +222,9 @@ ReturnType parseDateTimeBestEffortImpl( } else if (num_digits == 6) { + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow date times without separators"); + /// This is YYYYMM or hhmmss if (!year && !month) { @@ -593,8 +596,8 @@ ReturnType parseDateTimeBestEffortImpl( else return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected word"); -// while (!in.eof() && isAlphaASCII(*in.position())) -// ++in.position(); + while (!in.eof() && isAlphaASCII(*in.position())) + ++in.position(); /// For RFC 2822 if (has_day_of_week) diff --git a/tests/queries/0_stateless/03222_date_time_inference.reference b/tests/queries/0_stateless/03222_date_time_inference.reference index 838b103f106..221ab1fe5f5 100644 --- a/tests/queries/0_stateless/03222_date_time_inference.reference +++ b/tests/queries/0_stateless/03222_date_time_inference.reference @@ -45,34 +45,34 @@ DateTime/DateTime64 best effort 2000-01-01 00:00:00 DateTime 2000-01-01 01:00:00 DateTime 2000-01-01 01:00:00.000000000 DateTime64(9) -2017-01-01 22:02:03 DateTime -2017-01-01 22:02:03.000000000 DateTime64(9) -2017-01-01 21:02:03 DateTime -2017-01-01 21:02:03.000000000 DateTime64(9) -2017-01-01 22:02:03 DateTime -2017-01-01 22:02:03.000000000 DateTime64(9) -2017-01-02 01:02:03 DateTime -2017-01-02 01:02:03.000000000 DateTime64(9) -1970-01-02 01:02:03 DateTime -1970-01-02 01:02:03.000000000 DateTime64(9) -1970-01-02 01:02:03 DateTime -1970-01-02 01:02:03.000000000 DateTime64(9) +02/01/17 010203 MSK String +02/01/17 010203.000 MSK String +02/01/17 010203 MSK+0100 String +02/01/17 010203.000 MSK+0100 String +02/01/17 010203 UTC+0300 String +02/01/17 010203.000 UTC+0300 String +02/01/17 010203Z String +02/01/17 010203.000Z String +02/01/1970 010203Z String +02/01/1970 010203.000Z String +02/01/70 010203Z String +02/01/70 010203.000Z String 2018-02-11 03:40:50 DateTime 2018-02-11 03:40:50.000000000 DateTime64(9) 2000-04-17 01:02:03 DateTime 2000-04-17 01:02:03.000000000 DateTime64(9) -1970-01-02 01:00:00 DateTime -1970-01-02 01:00:00.000000000 DateTime64(9) -1970-01-02 01:02:03 DateTime -1970-01-02 01:02:03.000000000 DateTime64(9) -1970-01-02 01:02:03 DateTime -1970-01-02 01:02:03.000000000 DateTime64(9) +19700102 01:00:00 String +19700102 01:00:00.000 String +19700102010203Z String +19700102010203Z.000 String +1970/01/02 010203Z String +1970/01/02 010203.000Z String 2015-12-31 20:00:00 DateTime 2015-12-31 20:00:00 DateTime 2016-01-01 00:00:00 DateTime 2016-01-01 00:00:00 DateTime -2017-01-01 22:02:03 DateTime -2017-01-01 22:02:03.000000000 DateTime64(9) +201701 02 010203 UTC+0300 String +201701 02 010203.000 UTC+0300 String 2017-01-02 03:04:05 DateTime 2017-01-02 03:04:05.000000000 DateTime64(9) 2017-01-02 03:04:05 DateTime @@ -117,8 +117,8 @@ DateTime/DateTime64 best effort 2017-01-02 03:04:05.000000000 DateTime64(9) 2017-04-01 11:22:33 DateTime 2017-04-01 11:22:33.000000000 DateTime64(9) -2017-04-01 22:02:03 DateTime -2017-04-01 22:02:03.000000000 DateTime64(9) +2017 Apr 02 010203 UTC+0300 String +2017 Apr 02 010203.000 UTC+0300 String 2017-04-01 22:02:03 DateTime 2017-04-01 22:02:03.000000000 DateTime64(9) 2017-04-02 01:02:03 DateTime @@ -143,8 +143,8 @@ DateTime/DateTime64 best effort 2017-04-01 21:02:03.000000000 DateTime64(9) 2017-04-02 01:02:03 DateTime 2017-04-02 01:02:03.000000000 DateTime64(9) -2017-01-01 22:02:03 DateTime -2017-01-01 22:02:03.000000000 DateTime64(9) +2017 Jan 02 010203 UTC+0300 String +2017 Jan 02 010203.000 UTC+0300 String 2017-04-25 01:02:03 DateTime 2017-04-25 01:02:03.000000000 DateTime64(9) 2017-04-25 01:02:03 DateTime @@ -231,6 +231,25 @@ Mar01012020010101 String Mar 01012020010101 String Mar01012020010101.000 String Mar 0101202001010101.000 String +Sun String +Sun1 String +Sun 1 String +Sun01 String +Sun 01 String +Sun2020 String +Sun 2020 String +Sun012020 String +Sun 012020 String +Sun01012020 String +Sun 01012020 String +Sun0101202001 String +Sun 0101202001 String +Sun010120200101 String +Sun 010120200101 String +Sun01012020010101 String +Sun 01012020010101 String +Sun01012020010101.000 String +Sun 0101202001010101.000 String 2000 01 01 01:00:00 String 2000 01 01 01:00:00.000 String 2000a01a01 01:00:00 String diff --git a/tests/queries/0_stateless/03222_date_time_inference.sql b/tests/queries/0_stateless/03222_date_time_inference.sql index b0c4df530cf..b16f72c72f4 100644 --- a/tests/queries/0_stateless/03222_date_time_inference.sql +++ b/tests/queries/0_stateless/03222_date_time_inference.sql @@ -245,6 +245,25 @@ select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020010101"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01012020010101"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020010101.000"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 0101202001010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 0101202001010101.000"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01 01 01:00:00"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01 01 01:00:00.000"}'); select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000a01a01 01:00:00"}'); From 6bd65dbfa5c5d450e355dc64c110db65d2f56cbb Mon Sep 17 00:00:00 2001 From: Aleksei Filatov Date: Fri, 16 Aug 2024 15:07:53 +0000 Subject: [PATCH 2299/2361] Use HTTP/1.1 for external HTTP authentication --- src/Access/HTTPAuthClient.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Access/HTTPAuthClient.h b/src/Access/HTTPAuthClient.h index a8b56cf05a7..a1b97a729a3 100644 --- a/src/Access/HTTPAuthClient.h +++ b/src/Access/HTTPAuthClient.h @@ -82,7 +82,8 @@ public: Result authenticate(const String & user_name, const String & password) const { - Poco::Net::HTTPRequest request{Poco::Net::HTTPRequest::HTTP_GET, this->getURI().getPathAndQuery()}; + Poco::Net::HTTPRequest request{ + Poco::Net::HTTPRequest::HTTP_GET, this->getURI().getPathAndQuery(), Poco::Net::HTTPRequest::HTTP_1_1}; Poco::Net::HTTPBasicCredentials basic_credentials{user_name, password}; basic_credentials.authenticate(request); From 0dba461d7f9228fa81facdeabeffc983e181d9e0 Mon Sep 17 00:00:00 2001 From: Christoph Wurm Date: Fri, 16 Aug 2024 16:46:57 +0100 Subject: [PATCH 2300/2361] Update gui.md: Links, formatting, spelling --- docs/en/interfaces/third-party/gui.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index 5b7615485ca..8d9dce983bc 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -10,7 +10,7 @@ sidebar_label: Visual Interfaces ### ch-ui {#ch-ui} -[ch-ui](https://github.com/caioricciuti/ch-ui) is a simple React.js app interface for ClickHouse databases, designed for executing queries and visualizing data. Built with React and the ClickHouse client for web, it offers a sleek and user-friendly UI for easy database interactions. +[ch-ui](https://github.com/caioricciuti/ch-ui) is a simple React.js app interface for ClickHouse databases designed for executing queries and visualizing data. Built with React and the ClickHouse client for web, it offers a sleek and user-friendly UI for easy database interactions. Features: @@ -25,7 +25,7 @@ Web interface for ClickHouse in the [Tabix](https://github.com/tabixio/tabix) pr Features: -- Works with ClickHouse directly from the browser, without the need to install additional software. +- Works with ClickHouse directly from the browser without the need to install additional software. - Query editor with syntax highlighting. - Auto-completion of commands. - Tools for graphical analysis of query execution. @@ -63,7 +63,7 @@ Features: - Table list with filtering and metadata. - Table preview with filtering and sorting. -- Read-only queries execution. +- Read-only query execution. ### Redash {#redash} @@ -75,23 +75,23 @@ Features: - Powerful editor of queries. - Database explorer. -- Visualization tools, that allow you to represent data in different forms. +- Visualization tool that allows you to represent data in different forms. ### Grafana {#grafana} [Grafana](https://grafana.com/grafana/plugins/grafana-clickhouse-datasource/) is a platform for monitoring and visualization. -"Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data driven culture. Trusted and loved by the community" — grafana.com. +"Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data-driven culture. Trusted and loved by the community" — grafana.com. -ClickHouse datasource plugin provides a support for ClickHouse as a backend database. +ClickHouse data source plugin provides support for ClickHouse as a backend database. -### qryn (#qryn) +### qryn {#qryn} [qryn](https://metrico.in) is a polyglot, high-performance observability stack for ClickHouse _(formerly cLoki)_ with native Grafana integrations allowing users to ingest and analyze logs, metrics and telemetry traces from any agent supporting Loki/LogQL, Prometheus/PromQL, OTLP/Tempo, Elastic, InfluxDB and many more. Features: -- Built in Explore UI and LogQL CLI for querying, extracting and visualizing data +- Built-in Explore UI and LogQL CLI for querying, extracting and visualizing data - Native Grafana APIs support for querying, processing, ingesting, tracing and alerting without plugins - Powerful pipeline to dynamically search, filter and extract data from logs, events, traces and beyond - Ingestion and PUSH APIs transparently compatible with LogQL, PromQL, InfluxDB, Elastic and many more @@ -139,7 +139,7 @@ Features: ### DBM {#dbm} -[DBM](https://dbm.incubator.edurt.io/) DBM is a visual management tool for ClickHouse! +[DBM](https://github.com/devlive-community/dbm) DBM is a visual management tool for ClickHouse! Features: @@ -151,7 +151,7 @@ Features: - Support custom query - Support multiple data sources management(connection test, monitoring) - Support monitor (processor, connection, query) -- Support migrate data +- Support migrating data ### Bytebase {#bytebase} @@ -169,7 +169,7 @@ Features: ### Zeppelin-Interpreter-for-ClickHouse {#zeppelin-interpreter-for-clickhouse} -[Zeppelin-Interpreter-for-ClickHouse](https://github.com/SiderZhang/Zeppelin-Interpreter-for-ClickHouse) is a [Zeppelin](https://zeppelin.apache.org) interpreter for ClickHouse. Compared with JDBC interpreter, it can provide better timeout control for long running queries. +[Zeppelin-Interpreter-for-ClickHouse](https://github.com/SiderZhang/Zeppelin-Interpreter-for-ClickHouse) is a [Zeppelin](https://zeppelin.apache.org) interpreter for ClickHouse. Compared with the JDBC interpreter, it can provide better timeout control for long-running queries. ### ClickCat {#clickcat} @@ -179,7 +179,7 @@ Features: - An online SQL editor which can run your SQL code without any installing. - You can observe all processes and mutations. For those unfinished processes, you can kill them in ui. -- The Metrics contains Cluster Analysis,Data Analysis,Query Analysis. +- The Metrics contain Cluster Analysis, Data Analysis, and Query Analysis. ### ClickVisual {#clickvisual} @@ -332,7 +332,7 @@ Learn more about the product at [TABLUM.IO](https://tablum.io/) ### CKMAN {#ckman} -[CKMAN] (https://www.github.com/housepower/ckman) is a tool for managing and monitoring ClickHouse clusters! +[CKMAN](https://www.github.com/housepower/ckman) is a tool for managing and monitoring ClickHouse clusters! Features: From 45e06de3267486296cc1452c981a78688a2193ae Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 16 Aug 2024 18:01:43 +0200 Subject: [PATCH 2301/2361] Minor update in Dynamic/JSON serializations --- src/DataTypes/Serializations/SerializationObject.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 2dd25e540cc..0042aa6d89d 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -199,7 +199,7 @@ void SerializationObject::serializeBinaryBulkStatePrefix( auto object_state = std::make_shared(serialization_version); object_state->max_dynamic_paths = column_object.getMaxDynamicPaths(); /// Write max_dynamic_paths parameter. - writeBinaryLittleEndian(object_state->max_dynamic_paths, *stream); + writeVarUInt(object_state->max_dynamic_paths, *stream); /// Write all dynamic paths in sorted order. object_state->sorted_dynamic_paths.reserve(dynamic_paths.size()); for (const auto & [path, _] : dynamic_paths) @@ -354,7 +354,7 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeOb readBinaryLittleEndian(serialization_version, *structure_stream); auto structure_state = std::make_shared(serialization_version); /// Read max_dynamic_paths parameter. - readBinaryLittleEndian(structure_state->max_dynamic_paths, *structure_stream); + readVarUInt(structure_state->max_dynamic_paths, *structure_stream); /// Read the sorted list of dynamic paths. size_t dynamic_paths_size; readVarUInt(dynamic_paths_size, *structure_stream); From c85d5e753899503f93a8f9ca7b67776d386d9130 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Fri, 16 Aug 2024 18:02:51 +0200 Subject: [PATCH 2302/2361] Update Dynamic serialization --- src/DataTypes/Serializations/SerializationDynamic.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 6bba87c40fa..ab24779ced2 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -115,7 +115,7 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( dynamic_state->max_dynamic_types = column_dynamic.getMaxDynamicTypes(); /// Write max_dynamic_types parameter, because it can differ from the max_dynamic_types /// that is specified in the Dynamic type (we could decrease it before merge). - writeBinaryLittleEndian(dynamic_state->max_dynamic_types, *stream); + writeVarUInt(dynamic_state->max_dynamic_types, *stream); dynamic_state->variant_type = variant_info.variant_type; dynamic_state->variant_names = variant_info.variant_names; @@ -123,7 +123,7 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( /// Write information about variants. size_t num_variants = dynamic_state->variant_names.size() - 1; /// Don't write shared variant, Dynamic column should always have it. - writeBinaryLittleEndian(num_variants, *stream); + writeVarUInt(num_variants, *stream); if (settings.data_types_binary_encoding) { const auto & variants = assert_cast(*dynamic_state->variant_type).getVariants(); @@ -252,11 +252,11 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD readBinaryLittleEndian(structure_version, *structure_stream); auto structure_state = std::make_shared(structure_version); /// Read max_dynamic_types parameter. - readBinaryLittleEndian(structure_state->max_dynamic_types, *structure_stream); + readVarUInt(structure_state->max_dynamic_types, *structure_stream); /// Read information about variants. DataTypes variants; size_t num_variants; - readBinaryLittleEndian(num_variants, *structure_stream); + readVarUInt(num_variants, *structure_stream); variants.reserve(num_variants + 1); /// +1 for shared variant. if (settings.data_types_binary_encoding) { From fd266284e1a2c5170183f7e1d2425b4b2b6ca103 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Aug 2024 19:49:47 +0200 Subject: [PATCH 2303/2361] Fix test `02122_join_group_by_timeout` --- tests/queries/0_stateless/02122_join_group_by_timeout.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02122_join_group_by_timeout.sh b/tests/queries/0_stateless/02122_join_group_by_timeout.sh index b4644878544..17d8200f20c 100755 --- a/tests/queries/0_stateless/02122_join_group_by_timeout.sh +++ b/tests/queries/0_stateless/02122_join_group_by_timeout.sh @@ -15,6 +15,7 @@ fi # TCP CLIENT: As of today (02/12/21) uses PullingAsyncPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) +### However, in the test, the server can be overloaded, so we assert query duration in the interval of 1 to 60 seconds. query_id=$(random_str 12) $CLICKHOUSE_CLIENT --query_id "$query_id" --max_execution_time 1 -q " SELECT * FROM @@ -33,7 +34,7 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" --max_execution_time 1 -q " FORMAT Null " 2>&1 | grep -m1 -o "Code: 159" $CLICKHOUSE_CLIENT -q "system flush logs" -${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" +${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) BETWEEN 1 AND 60 from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" ### Should stop pulling data and return what has been generated already (return code 0) @@ -52,7 +53,7 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" -q " " echo $? $CLICKHOUSE_CLIENT -q "system flush logs" -${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" +${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) BETWEEN 1 AND 60 from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" # HTTP CLIENT: As of today (02/12/21) uses PullingPipelineExecutor From faad7f4ba27f665ecdb4fa6212a695f86413746a Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Fri, 16 Aug 2024 20:35:44 +0000 Subject: [PATCH 2304/2361] Fix Broken pipe error for 03149_numbers_max_block_size_zero.sh The error appeared while debugging https://github.com/ClickHouse/clickhouse-private/issues/14225 Logs: https://pastila.nl/?00628486/754eaf7d96fd03ceecdf1a45458867dc#B9vFn07WAielph/Z5lHbrQ== From the `man grep`: > -q, --quiet, --silent > <...> Exit immediately with zero status if any match is found, even if an error was detected. When `grep -q` finds a match, it immediately exits with status `0 and closes its side of the pipe. If the clickhouse-client is still trying to send data through the pipe, it leads to SIGPIPE signal. Use grep -c instead. It is less efficient, but the output in this test is small. We should also revisit how we handle SIGPIPE signal, e.g. the server should not try to send logs if it already encountered the Broken pipe error. --- tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh b/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh index 6f70a0d2536..2c4669325bb 100755 --- a/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh +++ b/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh @@ -4,4 +4,4 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "SELECT count(*) FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c SETTINGS max_block_size = 0" 2>&1 | grep -q "Sanity check: 'max_block_size' cannot be 0. Set to default value" && echo "OK" || echo "FAIL" +$CLICKHOUSE_CLIENT -q "SELECT count(*) FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c SETTINGS max_block_size = 0" 2>&1 | [ $(grep -c "Sanity check: 'max_block_size' cannot be 0. Set to default value") -gt 0 ] && echo "OK" || echo "FAIL" From a6d5047bb09640bcf99bef84f655602d7dfb3361 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 16 Aug 2024 21:29:46 +0100 Subject: [PATCH 2305/2361] impl --- .../MergeTree/MergeTreePrefetchedReadPool.cpp | 4 ++++ .../MergeTree/MergeTreeReadPoolBase.cpp | 6 +++++ .../MergeTreeReadPoolParallelReplicas.cpp | 5 +++++ ...rgeTreeReadPoolParallelReplicasInOrder.cpp | 5 +++++ .../ParallelReplicasReadingCoordinator.cpp | 4 ++++ ...icas_read_task_size_overflow_bug.reference | 0 ...l_replicas_read_task_size_overflow_bug.sql | 22 +++++++++++++++++++ 7 files changed, 46 insertions(+) create mode 100644 tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.reference create mode 100644 tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.sql diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index a9b77fb6c03..7081eb716f5 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -395,6 +395,10 @@ void MergeTreePrefetchedReadPool::fillPerThreadTasks(size_t threads, size_t sum_ part_stat.prefetch_step_marks = std::max(part_stat.prefetch_step_marks, per_part_infos[i]->min_marks_per_task); + if (part_stat.prefetch_step_marks == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + LOG_DEBUG( log, "Part: {}, sum_marks: {}, approx mark size: {}, prefetch_step_bytes: {}, prefetch_step_marks: {}, (ranges: {})", diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 6d2560bc9c7..9d3c38822e1 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -13,6 +13,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } MergeTreeReadPoolBase::MergeTreeReadPoolBase( @@ -85,6 +86,11 @@ static size_t calculateMinMarksPerTask( min_marks_per_task = heuristic_min_marks; } } + + if (min_marks_per_task == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + LOG_TEST(&Poco::Logger::get("MergeTreeReadPoolBase"), "Will use min_marks_per_task={}", min_marks_per_task); return min_marks_per_task; } diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp index 33eaf5a49bd..d23072771f2 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp @@ -8,6 +8,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas( @@ -38,6 +39,10 @@ MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas( for (const auto & info : per_part_infos) min_marks_per_task = std::max(min_marks_per_task, info->min_marks_per_task); + if (min_marks_per_task == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + extension.all_callback( InitialAllRangesAnnouncement(coordination_mode, parts_ranges.getDescriptions(), extension.number_of_current_replica)); } diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp index 6b5cf978423..42ffc4304b2 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp @@ -6,6 +6,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } MergeTreeReadPoolParallelReplicasInOrder::MergeTreeReadPoolParallelReplicasInOrder( @@ -37,6 +38,10 @@ MergeTreeReadPoolParallelReplicasInOrder::MergeTreeReadPoolParallelReplicasInOrd for (const auto & info : per_part_infos) min_marks_per_task = std::max(min_marks_per_task, info->min_marks_per_task); + if (min_marks_per_task == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + for (const auto & part : parts_ranges) request.push_back({part.data_part->info, MarkRanges{}}); diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp index f46b4de10b7..ee47fe3549a 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp @@ -1004,6 +1004,10 @@ void ParallelReplicasReadingCoordinator::handleInitialAllRangesAnnouncement(Init ParallelReadResponse ParallelReplicasReadingCoordinator::handleRequest(ParallelReadRequest request) { + if (request.min_number_of_marks == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + ProfileEventTimeIncrement watch(ProfileEvents::ParallelReplicasHandleRequestMicroseconds); std::lock_guard lock(mutex); diff --git a/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.reference b/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.sql b/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.sql new file mode 100644 index 00000000000..984c7fe0db7 --- /dev/null +++ b/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS test__fuzz_22 SYNC; + +CREATE TABLE test__fuzz_22 (k Float32, v String) ENGINE = MergeTree ORDER BY k SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES test__fuzz_22; + +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(1); +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(1); +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(1); +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(1); + +SET allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', + merge_tree_min_rows_for_concurrent_read = 9223372036854775806, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 9223372036854775806; + + SELECT v + FROM test__fuzz_22 +ORDER BY v + LIMIT 10, 10 +SETTINGS max_threads = 4 + FORMAT Null; -- { serverError BAD_ARGUMENTS } + +DROP TABLE test__fuzz_22 SYNC; From 4b11522990f0f3557355a1123f877a9394c438d3 Mon Sep 17 00:00:00 2001 From: Justin de Guzman Date: Fri, 16 Aug 2024 14:06:55 -0700 Subject: [PATCH 2306/2361] Update JSON link --- docs/en/sql-reference/data-types/json.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index 12ab9f1dddc..e48b308a620 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -79,6 +79,5 @@ SELECT * FROM json FORMAT JSONEachRow ## Related Content -- [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json) +- [Using JSON in ClickHouse](/en/integrations/data-formats/json/overview) - [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) -- \ No newline at end of file From cc7d22a7b83440cfbf7d37086ece7fac222f24de Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Aug 2024 23:08:16 +0200 Subject: [PATCH 2307/2361] Proper parsing of the PostgreSQL-style CAST operator --- src/Parsers/ExpressionElementParsers.cpp | 26 +++++++++++-------- ..._proper_parsing_of_cast_operator.reference | 4 +++ .../03227_proper_parsing_of_cast_operator.sql | 6 +++++ 3 files changed, 25 insertions(+), 11 deletions(-) create mode 100644 tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.reference create mode 100644 tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.sql diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index dd22b80b1cb..ffa1bd93ded 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -853,9 +853,9 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected /// Parse numbers (including decimals), strings, arrays and tuples of them. + Pos begin = pos; const char * data_begin = pos->begin; const char * data_end = pos->end; - bool is_string_literal = pos->type == StringLiteral; if (pos->type == Minus) { @@ -866,7 +866,7 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected data_end = pos->end; ++pos; } - else if (pos->type == Number || is_string_literal) + else if (pos->type == Number || pos->type == StringLiteral) { ++pos; } @@ -939,18 +939,22 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected { String s; size_t data_size = data_end - data_begin; - if (is_string_literal) + if (begin->type == StringLiteral) { - ReadBufferFromMemory buf(data_begin, data_size); - readQuotedStringWithSQLStyle(s, buf); - assert(buf.count() == data_size); + ASTPtr literal; + if (ParserStringLiteral().parse(begin, literal, expected)) + { + node = createFunctionCast(literal, type_ast); + return true; + } + return false; } else - s = String(data_begin, data_size); - - auto literal = std::make_shared(std::move(s)); - node = createFunctionCast(literal, type_ast); - return true; + { + auto literal = std::make_shared(String(data_begin, data_size)); + node = createFunctionCast(literal, type_ast); + return true; + } } return false; diff --git a/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.reference b/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.reference new file mode 100644 index 00000000000..2127d396bb3 --- /dev/null +++ b/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.reference @@ -0,0 +1,4 @@ +414243 +ABC +A +{"a": \'A\'} diff --git a/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.sql b/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.sql new file mode 100644 index 00000000000..0c2e7dc582a --- /dev/null +++ b/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.sql @@ -0,0 +1,6 @@ +SELECT '414243'::String; +SELECT x'414243'::String; +SELECT b'01000001'::String; +SELECT '{"a": \'\x41\'}'::String; +SELECT '{"a": \'\x4\'}'::String; -- { clientError SYNTAX_ERROR } +SELECT '{"a": \'a\x4\'}'::String; -- { clientError SYNTAX_ERROR } From aee031ad4468b870073dc46770d07cea07aa829f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Aug 2024 23:25:49 +0200 Subject: [PATCH 2308/2361] Slightly better --- src/Parsers/ExpressionElementParsers.cpp | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index ffa1bd93ded..726326bfc85 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -856,6 +856,7 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected Pos begin = pos; const char * data_begin = pos->begin; const char * data_end = pos->end; + ASTPtr string_literal; if (pos->type == Minus) { @@ -866,10 +867,15 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected data_end = pos->end; ++pos; } - else if (pos->type == Number || pos->type == StringLiteral) + else if (pos->type == Number) { ++pos; } + else if (pos->type == StringLiteral) + { + if (!ParserStringLiteral().parse(begin, string_literal, expected)) + return false; + } else if (isOneOf(pos->type)) { TokenType last_token = OpeningSquareBracket; @@ -939,15 +945,10 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected { String s; size_t data_size = data_end - data_begin; - if (begin->type == StringLiteral) + if (string_literal) { - ASTPtr literal; - if (ParserStringLiteral().parse(begin, literal, expected)) - { - node = createFunctionCast(literal, type_ast); - return true; - } - return false; + node = createFunctionCast(string_literal, type_ast); + return true; } else { From 2a5de86fe4f21d77671a317d52b3f378062c7555 Mon Sep 17 00:00:00 2001 From: Julia Kartseva Date: Fri, 16 Aug 2024 21:46:21 +0000 Subject: [PATCH 2309/2361] shellcheck --- .../queries/0_stateless/03149_numbers_max_block_size_zero.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh b/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh index 2c4669325bb..7f606d889a7 100755 --- a/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh +++ b/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash +# shellcheck disable=SC2266 CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "SELECT count(*) FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c SETTINGS max_block_size = 0" 2>&1 | [ $(grep -c "Sanity check: 'max_block_size' cannot be 0. Set to default value") -gt 0 ] && echo "OK" || echo "FAIL" +$CLICKHOUSE_CLIENT -q "SELECT count(*) FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c SETTINGS max_block_size = 0" 2>&1 | + [ "$(grep -c "Sanity check: 'max_block_size' cannot be 0. Set to default value")" -gt 0 ] && echo "OK" || echo "FAIL" From d952f7cff579d28a51eea428aee7460121862ce5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Aug 2024 23:50:26 +0200 Subject: [PATCH 2310/2361] Update test --- tests/queries/0_stateless/01825_new_type_json_ghdata.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh index acb4925ce6e..ee702300094 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" ${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1 -cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata FORMAT JSONAsObject" +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} --max_block_size 8192 --max_insert_block_size 8192 --max_insert_threads 1 --min_insert_block_size_bytes 0 --min_insert_block_size_rows 0 -q "INSERT INTO ghdata FORMAT JSONAsObject" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" From b98249ea7fda526a7a561862fcc4a721e5a4587f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 00:06:47 +0200 Subject: [PATCH 2311/2361] Use temporary tables for input and output in clickhouse-local --- programs/local/LocalServer.cpp | 2 +- tests/queries/0_stateless/01191_rename_dictionary.sql | 1 + .../02141_clickhouse_local_interactive_table.reference | 4 ++-- .../0_stateless/02141_clickhouse_local_interactive_table.sh | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 200beea7b63..a8b774562f9 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -367,7 +367,7 @@ std::string LocalServer::getInitialCreateTableQuery() else table_structure = "(" + table_structure + ")"; - return fmt::format("CREATE TABLE {} {} ENGINE = File({}, {});", + return fmt::format("CREATE TEMPORARY TABLE {} {} ENGINE = File({}, {});", table_name, table_structure, data_format, table_file); } diff --git a/tests/queries/0_stateless/01191_rename_dictionary.sql b/tests/queries/0_stateless/01191_rename_dictionary.sql index c5012dabc81..be95e5a7d4b 100644 --- a/tests/queries/0_stateless/01191_rename_dictionary.sql +++ b/tests/queries/0_stateless/01191_rename_dictionary.sql @@ -27,6 +27,7 @@ RENAME DICTIONARY test_01191.t TO test_01191.dict1; -- {serverError INCORRECT_QU DROP DICTIONARY test_01191.t; -- {serverError INCORRECT_QUERY} DROP TABLE test_01191.t; +DROP DATABASE IF EXISTS dummy_db; CREATE DATABASE dummy_db ENGINE=Atomic; RENAME DICTIONARY test_01191.dict TO dummy_db.dict1; RENAME DICTIONARY dummy_db.dict1 TO test_01191.dict; diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference index 0bb8966cbe4..0e74c0a083e 100644 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') -CREATE TABLE foo.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh index 934d87616ac..3a95e59416a 100755 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh @@ -4,5 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' -$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' +$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' +$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' From 8ba142559ca05295670b0a899610ab613c2d5658 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 00:09:39 +0200 Subject: [PATCH 2312/2361] Pass-through RENAME and UUID-related operations in Overlay database to underlying databases --- src/Databases/DatabasesOverlay.cpp | 47 ++++++++++++++++++++++++++++++ src/Databases/DatabasesOverlay.h | 9 ++++++ src/Interpreters/StorageID.h | 1 - 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 801356b3dd7..495733e15fd 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -14,6 +14,8 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int CANNOT_GET_CREATE_TABLE_QUERY; + extern const int BAD_ARGUMENTS; + extern const int UNKNOWN_TABLE; } DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_) @@ -124,6 +126,39 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab getEngineName()); } +void DatabasesOverlay::renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) +{ + for (auto & db : databases) + { + if (db->isTableExist(name, current_context)) + { + if (DatabasesOverlay * to_overlay_database = typeid_cast(&to_database)) + { + /// Renaming from Overlay database inside itself or into another Overlay database. + /// Just use the first database in the overlay as a destination. + if (to_overlay_database->databases.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The destination Overlay database {} does not have any members", to_database.getDatabaseName()); + + db->renameTable(current_context, name, *to_overlay_database->databases[0], to_name, exchange, dictionary); + } + else + { + /// Renaming into a different type of database. E.g. from Overlay on top of Atomic database into just Atomic database. + db->renameTable(current_context, name, to_database, to_name, exchange, dictionary); + } + + return; + } + } + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(getDatabaseName()), backQuote(name)); +} + ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const { ASTPtr result = nullptr; @@ -178,6 +213,18 @@ String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const return result; } +UUID DatabasesOverlay::getUUID() const +{ + UUID result = UUIDHelpers::Nil; + for (const auto & db : databases) + { + result = db->getUUID(); + if (result != UUIDHelpers::Nil) + break; + } + return result; +} + UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const { UUID result = UUIDHelpers::Nil; diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h index b0c7e7e4032..40c653e5cb5 100644 --- a/src/Databases/DatabasesOverlay.h +++ b/src/Databases/DatabasesOverlay.h @@ -35,12 +35,21 @@ public: StoragePtr detachTable(ContextPtr context, const String & table_name) override; + void renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) override; + ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override; ASTPtr getCreateDatabaseQuery() const override; String getTableDataPath(const String & table_name) const override; String getTableDataPath(const ASTCreateQuery & query) const override; + UUID getUUID() const override; UUID tryGetTableUUID(const String & table_name) const override; void drop(ContextPtr context) override; diff --git a/src/Interpreters/StorageID.h b/src/Interpreters/StorageID.h index f9afbc7b98d..ad55d16e284 100644 --- a/src/Interpreters/StorageID.h +++ b/src/Interpreters/StorageID.h @@ -27,7 +27,6 @@ class ASTQueryWithTableAndOutput; class ASTTableIdentifier; class Context; -// TODO(ilezhankin): refactor and merge |ASTTableIdentifier| struct StorageID { String database_name; From da0a8051d8c8e8c2c72145e15cdf5a96e99641d2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 00:22:57 +0200 Subject: [PATCH 2313/2361] Miscellaneous changes in database engines --- src/Databases/DatabaseLazy.cpp | 2 +- src/Databases/DatabaseLazy.h | 2 +- src/Databases/DatabaseOnDisk.cpp | 10 +++++----- src/Databases/DatabaseOnDisk.h | 4 ++-- src/Databases/DatabaseOrdinary.cpp | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index 3fb6d30fcb8..2ccdd8510a8 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -52,7 +52,7 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, void DatabaseLazy::loadStoredObjects(ContextMutablePtr local_context, LoadingStrictnessLevel /*mode*/) { - iterateMetadataFiles(local_context, [this, &local_context](const String & file_name) + iterateMetadataFiles([this, &local_context](const String & file_name) { const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 41cfb751141..aeac130594f 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -12,7 +12,7 @@ class DatabaseLazyIterator; class Context; /** Lazy engine of databases. - * Works like DatabaseOrdinary, but stores in memory only the cache. + * Works like DatabaseOrdinary, but stores only recently accessed tables in memory. * Can be used only with *Log engines. */ class DatabaseLazy final : public DatabaseOnDisk diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 734f354d9a5..c80e4def94e 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -568,14 +568,14 @@ void DatabaseOnDisk::drop(ContextPtr local_context) assert(TSA_SUPPRESS_WARNING_FOR_READ(tables).empty()); if (local_context->getSettingsRef().force_remove_data_recursively_on_drop) { - (void)fs::remove_all(local_context->getPath() + getDataPath()); + (void)fs::remove_all(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove_all(getMetadataPath()); } else { try { - (void)fs::remove(local_context->getPath() + getDataPath()); + (void)fs::remove(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove(getMetadataPath()); } catch (const fs::filesystem_error & e) @@ -613,7 +613,7 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n } } -void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const +void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const { auto process_tmp_drop_metadata_file = [&](const String & file_name) { @@ -621,7 +621,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat static const char * tmp_drop_ext = ".sql.tmp_drop"; const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext)); - if (fs::exists(local_context->getPath() + getDataPath() + '/' + object_name)) + if (fs::exists(std::filesystem::path(getContext()->getPath()) / data_path / object_name)) { fs::rename(getMetadataPath() + file_name, getMetadataPath() + object_name + ".sql"); LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name)); @@ -638,7 +638,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat std::vector> metadata_files; fs::directory_iterator dir_end; - for (fs::directory_iterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it) + for (fs::directory_iterator dir_it(metadata_path); dir_it != dir_end; ++dir_it) { String file_name = dir_it->path().filename(); /// For '.svn', '.gitignore' directory and similar. diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 12656068643..ffc95a7c128 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -64,7 +64,7 @@ public: time_t getObjectMetadataModificationTime(const String & object_name) const override; String getDataPath() const override { return data_path; } - String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } + String getTableDataPath(const String & table_name) const override { return std::filesystem::path(data_path) / escapeForFileName(table_name) / ""; } String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } @@ -83,7 +83,7 @@ protected: using IteratingFunction = std::function; - void iterateMetadataFiles(ContextPtr context, const IteratingFunction & process_metadata_file) const; + void iterateMetadataFiles(const IteratingFunction & process_metadata_file) const; ASTPtr getCreateTableQueryImpl( const String & table_name, diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 8808261654f..dd8a3f42ea8 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -55,7 +55,7 @@ static constexpr size_t METADATA_FILE_BUFFER_SIZE = 32768; static constexpr const char * const CONVERT_TO_REPLICATED_FLAG_NAME = "convert_to_replicated"; DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata_path_, ContextPtr context_) - : DatabaseOrdinary(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseOrdinary (" + name_ + ")", context_) + : DatabaseOrdinary(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseOrdinary (" + name_ + ")", context_) { } @@ -265,7 +265,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables } }; - iterateMetadataFiles(local_context, process_metadata); + iterateMetadataFiles(process_metadata); size_t objects_in_database = metadata.parsed_tables.size() - prev_tables_count; size_t dictionaries_in_database = metadata.total_dictionaries - prev_total_dictionaries; From 07fa798ffa53216fd37dac51bceb327665fc8dda Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 16 Aug 2024 23:31:41 +0000 Subject: [PATCH 2314/2361] add total in system.one, test --- src/Storages/System/StorageSystemOne.cpp | 5 ++++- .../System/StorageSystemViewRefreshes.cpp | 2 +- ...3221_refreshable_matview_progress.reference | 2 ++ .../03221_refreshable_matview_progress.sql | 18 ++++++++++++++++++ 4 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03221_refreshable_matview_progress.reference create mode 100644 tests/queries/0_stateless/03221_refreshable_matview_progress.sql diff --git a/src/Storages/System/StorageSystemOne.cpp b/src/Storages/System/StorageSystemOne.cpp index 936d55e61a0..70377715dc3 100644 --- a/src/Storages/System/StorageSystemOne.cpp +++ b/src/Storages/System/StorageSystemOne.cpp @@ -41,7 +41,10 @@ Pipe StorageSystemOne::read( auto column = DataTypeUInt8().createColumnConst(1, 0u)->convertToFullColumnIfConst(); Chunk chunk({ std::move(column) }, 1); - return Pipe(std::make_shared(std::move(header), std::move(chunk))); + auto source = std::make_shared(std::move(header), std::move(chunk)); + source->addTotalRowsApprox(1); + + return Pipe(source); } diff --git a/src/Storages/System/StorageSystemViewRefreshes.cpp b/src/Storages/System/StorageSystemViewRefreshes.cpp index 3941c4c39c2..30539ed6b6a 100644 --- a/src/Storages/System/StorageSystemViewRefreshes.cpp +++ b/src/Storages/System/StorageSystemViewRefreshes.cpp @@ -86,7 +86,7 @@ void StorageSystemViewRefreshes::fillData( res_columns[i++]->insert(refresh.exception_message); res_columns[i++]->insert(refresh.refresh_count); - res_columns[i++]->insert(std::min(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read, 1.0)); + res_columns[i++]->insert(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read); res_columns[i++]->insert(refresh.progress.elapsed_ns / 1e9); res_columns[i++]->insert(refresh.progress.read_rows); res_columns[i++]->insert(refresh.progress.read_bytes); diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.reference b/tests/queries/0_stateless/03221_refreshable_matview_progress.reference new file mode 100644 index 00000000000..5ed392e61c7 --- /dev/null +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.reference @@ -0,0 +1,2 @@ +0 +4 4 1 diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql new file mode 100644 index 00000000000..4794359fd2b --- /dev/null +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql @@ -0,0 +1,18 @@ +set allow_experimental_refreshable_materialized_view=1; + +CREATE MATERIALIZED VIEW 03221_rmv +REFRESH AFTER 1 SECOND +( +x UInt64 +) +ENGINE = Memory +AS SELECT number AS x +FROM numbers(3) +UNION ALL +SELECT rand64() AS x; + +SELECT sleep(2); + +SELECT read_rows, total_rows, progress FROM system.view_refreshes WHERE view = '03221_rmv'; + +DROP TABLE 03221_rmv; From 142d7b15828c9eeef145fd56d56b33d1004fb68c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 01:35:49 +0200 Subject: [PATCH 2315/2361] Miscellaneous changes in BaseDaemon --- src/Daemon/BaseDaemon.cpp | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index e7ae8ea5a1d..f75699881bd 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -16,39 +16,29 @@ #include #if defined(OS_LINUX) - #include +#include #endif #include #include #include - #include #include #include -#include #include -#include #include #include #include #include #include - #include #include #include -#include #include -#include #include -#include #include -#include #include -#include -#include #include #include #include @@ -459,13 +449,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() signal_listener_thread.start(*signal_listener); #if defined(__ELF__) && !defined(OS_FREEBSD) - String build_id_hex = SymbolIndex::instance().getBuildIDHex(); - if (build_id_hex.empty()) - build_id = ""; - else - build_id = build_id_hex; -#else - build_id = ""; + build_id = SymbolIndex::instance().getBuildIDHex(); #endif git_hash = GIT_HASH; From 83608cb7bfcbf0801994cc916ed36b91c7113307 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 01:40:07 +0200 Subject: [PATCH 2316/2361] Miscellaneous changes from #66999 (2) --- base/base/CMakeLists.txt | 3 +++ {src/Daemon => base/base}/GitHash.cpp.in | 0 src/Daemon/CMakeLists.txt | 3 --- 3 files changed, 3 insertions(+), 3 deletions(-) rename {src/Daemon => base/base}/GitHash.cpp.in (100%) diff --git a/base/base/CMakeLists.txt b/base/base/CMakeLists.txt index 247028b96e0..3d236f52c36 100644 --- a/base/base/CMakeLists.txt +++ b/base/base/CMakeLists.txt @@ -8,6 +8,8 @@ endif () # when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16. set (CMAKE_CXX_STANDARD 20) +configure_file(GitHash.cpp.in GitHash.generated.cpp) + set (SRCS argsToConfig.cpp cgroupsv2.cpp @@ -33,6 +35,7 @@ set (SRCS safeExit.cpp throwError.cpp Numa.cpp + GitHash.generated.cpp ) add_library (common ${SRCS}) diff --git a/src/Daemon/GitHash.cpp.in b/base/base/GitHash.cpp.in similarity index 100% rename from src/Daemon/GitHash.cpp.in rename to base/base/GitHash.cpp.in diff --git a/src/Daemon/CMakeLists.txt b/src/Daemon/CMakeLists.txt index 35ea2122dbb..2068af2200d 100644 --- a/src/Daemon/CMakeLists.txt +++ b/src/Daemon/CMakeLists.txt @@ -1,10 +1,7 @@ -configure_file(GitHash.cpp.in GitHash.generated.cpp) - add_library (daemon BaseDaemon.cpp GraphiteWriter.cpp SentryWriter.cpp - GitHash.generated.cpp ) target_link_libraries (daemon PUBLIC loggers common PRIVATE clickhouse_parsers clickhouse_common_io clickhouse_common_config) From 4b68ba23c0372331401cd327ca5849ba9d1bce8d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 03:43:53 +0200 Subject: [PATCH 2317/2361] Pretty print tuples in CREATE TABLE statements --- programs/format/Format.cpp | 6 +++++- src/Client/ClientBase.cpp | 6 +++++- src/Core/ExternalTable.cpp | 4 ++-- src/Core/Settings.h | 2 +- src/DataTypes/IDataType.cpp | 1 - src/Databases/DatabaseOnDisk.cpp | 2 +- src/Functions/formatQuery.cpp | 8 +++++++- .../InterpreterShowCreateQuery.cpp | 5 ++++- .../formatWithPossiblyHidingSecrets.h | 3 ++- src/Parsers/ASTColumnDeclaration.cpp | 8 ++------ src/Parsers/ASTDataType.cpp | 19 ++++++++++++++----- src/Parsers/ASTExpressionList.cpp | 5 ++--- src/Parsers/ASTNameTypePair.cpp | 6 +----- src/Parsers/IAST.cpp | 3 ++- src/Parsers/IAST.h | 14 +++++++++----- 15 files changed, 57 insertions(+), 35 deletions(-) diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index a434c9171e9..4af77533c53 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -264,7 +264,11 @@ int mainEntryClickHouseFormat(int argc, char ** argv) if (!backslash) { WriteBufferFromOwnString str_buf; - formatAST(*res, str_buf, hilite, oneline || approx_query_length < max_line_length); + oneline = oneline || approx_query_length < max_line_length; + IAST::FormatSettings settings(str_buf, oneline, hilite); + settings.show_secrets = true; + settings.print_pretty_type_names = !oneline; + res->format(settings); if (insert_query_payload) { diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 187ef079eda..74357d33f1c 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -331,7 +331,11 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Setting { output_stream << std::endl; WriteBufferFromOStream res_buf(output_stream, 4096); - formatAST(*res, res_buf); + IAST::FormatSettings format_settings(res_buf, /* one_line */ false); + format_settings.hilite = true; + format_settings.show_secrets = true; + format_settings.print_pretty_type_names = true; + res->format(format_settings); res_buf.finalize(); output_stream << std::endl << std::endl; } diff --git a/src/Core/ExternalTable.cpp b/src/Core/ExternalTable.cpp index c2bcf6ec651..4ff0d7092d8 100644 --- a/src/Core/ExternalTable.cpp +++ b/src/Core/ExternalTable.cpp @@ -85,7 +85,7 @@ void BaseExternalTable::parseStructureFromStructureField(const std::string & arg /// We use `formatWithPossiblyHidingSensitiveData` instead of `getColumnNameWithoutAlias` because `column->type` is an ASTFunction. /// `getColumnNameWithoutAlias` will return name of the function with `(arguments)` even if arguments is empty. if (column) - structure.emplace_back(column->name, column->type->formatWithPossiblyHidingSensitiveData(0, true, true)); + structure.emplace_back(column->name, column->type->formatWithPossiblyHidingSensitiveData(0, true, true, false)); else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Error while parsing table structure: expected column definition, got {}", child->formatForErrorMessage()); } @@ -102,7 +102,7 @@ void BaseExternalTable::parseStructureFromTypesField(const std::string & argumen throw Exception(ErrorCodes::BAD_ARGUMENTS, "Error while parsing table structure: {}", error); for (size_t i = 0; i < type_list_raw->children.size(); ++i) - structure.emplace_back("_" + toString(i + 1), type_list_raw->children[i]->formatWithPossiblyHidingSensitiveData(0, true, true)); + structure.emplace_back("_" + toString(i + 1), type_list_raw->children[i]->formatWithPossiblyHidingSensitiveData(0, true, true, false)); } void BaseExternalTable::initSampleBlock() diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0d84ad9022a..1a71494c8cd 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -882,7 +882,7 @@ class IColumn; M(Bool, use_json_alias_for_old_object_type, false, "When enabled, JSON type alias will create old experimental Object type instead of a new JSON type", 0) \ M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0) \ M(Bool, create_index_ignore_unique, false, "Ignore UNIQUE keyword in CREATE UNIQUE INDEX. Made for SQL compatibility tests.", 0) \ - M(Bool, print_pretty_type_names, true, "Print pretty type names in DESCRIBE query and toTypeName() function", 0) \ + M(Bool, print_pretty_type_names, true, "Print pretty type names in the DESCRIBE query and `toTypeName` function", 0) \ M(Bool, create_table_empty_primary_key_by_default, false, "Allow to create *MergeTree tables with empty primary key when ORDER BY and PRIMARY KEY not specified", 0) \ M(Bool, allow_named_collection_override_by_default, true, "Allow named collections' fields override by default.", 0) \ M(SQLSecurityType, default_normal_view_sql_security, SQLSecurityType::INVOKER, "Allows to set a default value for SQL SECURITY option when creating a normal view.", 0) \ diff --git a/src/DataTypes/IDataType.cpp b/src/DataTypes/IDataType.cpp index 7fd8a85aeca..49e5b2d022e 100644 --- a/src/DataTypes/IDataType.cpp +++ b/src/DataTypes/IDataType.cpp @@ -8,7 +8,6 @@ #include #include -#include #include #include diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 734f354d9a5..4e1ddd8cc77 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -504,7 +504,7 @@ void DatabaseOnDisk::renameTable( } -/// It returns create table statement (even if table is detached) +/// It returns the create table statement (even if table is detached) ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, ContextPtr, bool throw_on_error) const { ASTPtr ast; diff --git a/src/Functions/formatQuery.cpp b/src/Functions/formatQuery.cpp index 9591ea95254..be633bdfe37 100644 --- a/src/Functions/formatQuery.cpp +++ b/src/Functions/formatQuery.cpp @@ -43,6 +43,7 @@ public: max_query_size = settings.max_query_size; max_parser_depth = settings.max_parser_depth; max_parser_backtracks = settings.max_parser_backtracks; + print_pretty_type_names = settings.print_pretty_type_names; } String getName() const override { return name; } @@ -138,7 +139,11 @@ private: } } - formatAST(*ast, buf, /*hilite*/ false, /*single_line*/ output_formatting == OutputFormatting::SingleLine); + IAST::FormatSettings settings(buf, output_formatting == OutputFormatting::SingleLine, /*hilite*/ false); + settings.show_secrets = true; + settings.print_pretty_type_names = print_pretty_type_names; + ast->format(settings); + auto formatted = buf.stringView(); const size_t res_data_new_size = res_data_size + formatted.size() + 1; @@ -165,6 +170,7 @@ private: size_t max_query_size; size_t max_parser_depth; size_t max_parser_backtracks; + bool print_pretty_type_names; }; } diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index e5549b2e539..7af6b4948f8 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -97,7 +97,10 @@ QueryPipeline InterpreterShowCreateQuery::executeImpl() } MutableColumnPtr column = ColumnString::create(); - column->insert(format({.ctx = getContext(), .query = *create_query, .one_line = false})); + column->insert(format({ + .ctx = getContext(), + .query = *create_query, + .one_line = false})); return QueryPipeline(std::make_shared(Block{{ std::move(column), diff --git a/src/Interpreters/formatWithPossiblyHidingSecrets.h b/src/Interpreters/formatWithPossiblyHidingSecrets.h index ea8c295b169..14e84f1d1a4 100644 --- a/src/Interpreters/formatWithPossiblyHidingSecrets.h +++ b/src/Interpreters/formatWithPossiblyHidingSecrets.h @@ -25,7 +25,8 @@ inline String format(const SecretHidingFormatSettings & settings) && settings.ctx->getSettingsRef().format_display_secrets_in_show_and_select && settings.ctx->getAccess()->isGranted(AccessType::displaySecretsInShowAndSelect); - return settings.query.formatWithPossiblyHidingSensitiveData(settings.max_length, settings.one_line, show_secrets); + return settings.query.formatWithPossiblyHidingSensitiveData( + settings.max_length, settings.one_line, show_secrets, settings.ctx->getSettingsRef().print_pretty_type_names); } } diff --git a/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp index c96499095d5..23d653012f8 100644 --- a/src/Parsers/ASTColumnDeclaration.cpp +++ b/src/Parsers/ASTColumnDeclaration.cpp @@ -66,17 +66,13 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & format_settings, Fo { frame.need_parens = false; - /// We have to always backquote column names to avoid ambiguouty with INDEX and other declarations in CREATE query. + /// We have to always backquote column names to avoid ambiguity with INDEX and other declarations in CREATE query. format_settings.ostr << backQuote(name); if (type) { format_settings.ostr << ' '; - - FormatStateStacked type_frame = frame; - type_frame.indent = 0; - - type->formatImpl(format_settings, state, type_frame); + type->formatImpl(format_settings, state, frame); } if (null_modifier) diff --git a/src/Parsers/ASTDataType.cpp b/src/Parsers/ASTDataType.cpp index 3c17ae8c380..21f56e5f7a2 100644 --- a/src/Parsers/ASTDataType.cpp +++ b/src/Parsers/ASTDataType.cpp @@ -40,12 +40,21 @@ void ASTDataType::formatImpl(const FormatSettings & settings, FormatState & stat { settings.ostr << '(' << (settings.hilite ? hilite_none : ""); - for (size_t i = 0, size = arguments->children.size(); i < size; ++i) + if (!settings.one_line && settings.print_pretty_type_names && name == "Tuple") { - if (i != 0) - settings.ostr << ", "; - - arguments->children[i]->formatImpl(settings, state, frame); + ++frame.indent; + std::string indent_str = settings.one_line ? "" : "\n" + std::string(4 * frame.indent, ' '); + for (size_t i = 0, size = arguments->children.size(); i < size; ++i) + { + if (i != 0) + settings.ostr << ','; + settings.ostr << indent_str; + arguments->children[i]->formatImpl(settings, state, frame); + } + } + else + { + arguments->formatImpl(settings, state, frame); } settings.ostr << (settings.hilite ? hilite_function : "") << ')'; diff --git a/src/Parsers/ASTExpressionList.cpp b/src/Parsers/ASTExpressionList.cpp index 61ac482af82..f345b0c6a6f 100644 --- a/src/Parsers/ASTExpressionList.cpp +++ b/src/Parsers/ASTExpressionList.cpp @@ -42,7 +42,8 @@ void ASTExpressionList::formatImpl(const FormatSettings & settings, FormatState void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { - std::string indent_str = "\n" + std::string(4 * (frame.indent + 1), ' '); + ++frame.indent; + std::string indent_str = "\n" + std::string(4 * frame.indent, ' '); if (frame.expression_list_prepend_whitespace) { @@ -50,8 +51,6 @@ void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, For settings.ostr << ' '; } - ++frame.indent; - for (size_t i = 0, size = children.size(); i < size; ++i) { if (i && separator) diff --git a/src/Parsers/ASTNameTypePair.cpp b/src/Parsers/ASTNameTypePair.cpp index e4066081a9b..1515700365f 100644 --- a/src/Parsers/ASTNameTypePair.cpp +++ b/src/Parsers/ASTNameTypePair.cpp @@ -23,12 +23,8 @@ ASTPtr ASTNameTypePair::clone() const void ASTNameTypePair::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { - std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); - - settings.ostr << indent_str << backQuoteIfNeed(name) << ' '; + settings.ostr << backQuoteIfNeed(name) << ' '; type->formatImpl(settings, state, frame); } } - - diff --git a/src/Parsers/IAST.cpp b/src/Parsers/IAST.cpp index 37d7f458d61..5bd2c92c60a 100644 --- a/src/Parsers/IAST.cpp +++ b/src/Parsers/IAST.cpp @@ -165,11 +165,12 @@ size_t IAST::checkDepthImpl(size_t max_depth) const return res; } -String IAST::formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets) const +String IAST::formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets, bool print_pretty_type_names) const { WriteBufferFromOwnString buf; FormatSettings settings(buf, one_line); settings.show_secrets = show_secrets; + settings.print_pretty_type_names = print_pretty_type_names; format(settings); return wipeSensitiveDataAndCutToLength(buf.str(), max_length); } diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index e2cf7579667..2293d50b0ec 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -201,6 +201,7 @@ public: bool show_secrets; /// Show secret parts of the AST (e.g. passwords, encryption keys). char nl_or_ws; /// Newline or whitespace. LiteralEscapingStyle literal_escaping_style; + bool print_pretty_type_names; explicit FormatSettings( WriteBuffer & ostr_, @@ -209,7 +210,8 @@ public: bool always_quote_identifiers_ = false, IdentifierQuotingStyle identifier_quoting_style_ = IdentifierQuotingStyle::Backticks, bool show_secrets_ = true, - LiteralEscapingStyle literal_escaping_style_ = LiteralEscapingStyle::Regular) + LiteralEscapingStyle literal_escaping_style_ = LiteralEscapingStyle::Regular, + bool print_pretty_type_names_ = false) : ostr(ostr_) , one_line(one_line_) , hilite(hilite_) @@ -218,6 +220,7 @@ public: , show_secrets(show_secrets_) , nl_or_ws(one_line ? ' ' : '\n') , literal_escaping_style(literal_escaping_style_) + , print_pretty_type_names(print_pretty_type_names_) { } @@ -230,6 +233,7 @@ public: , show_secrets(other.show_secrets) , nl_or_ws(other.nl_or_ws) , literal_escaping_style(other.literal_escaping_style) + , print_pretty_type_names(other.print_pretty_type_names) { } @@ -251,7 +255,7 @@ public: /// The state that is copied when each node is formatted. For example, nesting level. struct FormatStateStacked { - UInt8 indent = 0; + UInt16 indent = 0; bool need_parens = false; bool expression_list_always_start_on_new_line = false; /// Line feed and indent before expression list even if it's of single element. bool expression_list_prepend_whitespace = false; /// Prepend whitespace (if it is required) @@ -274,7 +278,7 @@ public: /// Secrets are displayed regarding show_secrets, then SensitiveDataMasker is applied. /// You can use Interpreters/formatWithPossiblyHidingSecrets.h for convenience. - String formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets) const; + String formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets, bool print_pretty_type_names) const; /** formatForLogging and formatForErrorMessage always hide secrets. This inconsistent * behaviour is due to the fact such functions are called from Client which knows nothing about @@ -283,12 +287,12 @@ public: */ String formatForLogging(size_t max_length = 0) const { - return formatWithPossiblyHidingSensitiveData(max_length, true, false); + return formatWithPossiblyHidingSensitiveData(max_length, true, false, false); } String formatForErrorMessage() const { - return formatWithPossiblyHidingSensitiveData(0, true, false); + return formatWithPossiblyHidingSensitiveData(0, true, false, false); } virtual bool hasSecretParts() const { return childrenHaveSecretParts(); } From 566e043c2c84a979ca1c05996f6e3a4303708bff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 03:44:36 +0200 Subject: [PATCH 2318/2361] Add a test --- ...print_pretty_tuples_create_query.reference | 56 +++++++++++++++++++ .../03227_print_pretty_tuples_create_query.sh | 35 ++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference create mode 100755 tests/queries/0_stateless/03227_print_pretty_tuples_create_query.sh diff --git a/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference new file mode 100644 index 00000000000..c65dc32a224 --- /dev/null +++ b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference @@ -0,0 +1,56 @@ + +SHOW CREATE TABLE: +CREATE TABLE test.test +( + `x` Tuple( + a String, + b Array(Tuple( + c Tuple( + e String), + d String))), + `y` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192 +CREATE TABLE test.test +( + `x` Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), + `y` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192 + +clickhouse-format: +CREATE TABLE test +( + `x` Tuple( + a String, + b Array(Tuple( + c Tuple( + e String), + d String))), + `y` String +) +ORDER BY tuple() +CREATE TABLE test (`x` Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), `y` String) ORDER BY tuple() + +formatQuery: +CREATE TABLE test +( + `x` Tuple( + a String, + b Array(Tuple( + c Tuple( + e String), + d String))), + `y` String +) +ORDER BY tuple() +CREATE TABLE test +( + `x` Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), + `y` String +) +ORDER BY tuple() diff --git a/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.sh b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.sh new file mode 100755 index 00000000000..e5614f9f228 --- /dev/null +++ b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-asan, no-msan, no-tsan +# ^ requires S3 + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo +echo "SHOW CREATE TABLE:" +${CLICKHOUSE_CLIENT} --output-format Raw --query " + DROP TABLE IF EXISTS test; + CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY (); + SET print_pretty_type_names = 1; + SHOW CREATE TABLE test; + SET print_pretty_type_names = 0; + SHOW CREATE TABLE test; + DROP TABLE test; +" + +echo +echo "clickhouse-format:" +${CLICKHOUSE_FORMAT} --query " + CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY () +" +${CLICKHOUSE_FORMAT} --oneline --query " + CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY () +" + +echo +echo "formatQuery:" +${CLICKHOUSE_CLIENT} --output-format Raw --query " + SELECT formatQuery('CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY ()') SETTINGS print_pretty_type_names = 1; + SELECT formatQuery('CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY ()') SETTINGS print_pretty_type_names = 0; +" From 3065b386a4e033a802bbc4855d5113e814648848 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 03:49:45 +0200 Subject: [PATCH 2319/2361] Update documentation --- src/Core/Settings.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 1a71494c8cd..d8837d26e54 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -882,7 +882,7 @@ class IColumn; M(Bool, use_json_alias_for_old_object_type, false, "When enabled, JSON type alias will create old experimental Object type instead of a new JSON type", 0) \ M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0) \ M(Bool, create_index_ignore_unique, false, "Ignore UNIQUE keyword in CREATE UNIQUE INDEX. Made for SQL compatibility tests.", 0) \ - M(Bool, print_pretty_type_names, true, "Print pretty type names in the DESCRIBE query and `toTypeName` function", 0) \ + M(Bool, print_pretty_type_names, true, "Print pretty type names in the DESCRIBE query and `toTypeName` function, as well as in the `SHOW CREATE TABLE` query and the `formatQuery` function.", 0) \ M(Bool, create_table_empty_primary_key_by_default, false, "Allow to create *MergeTree tables with empty primary key when ORDER BY and PRIMARY KEY not specified", 0) \ M(Bool, allow_named_collection_override_by_default, true, "Allow named collections' fields override by default.", 0) \ M(SQLSecurityType, default_normal_view_sql_security, SQLSecurityType::INVOKER, "Allows to set a default value for SQL SECURITY option when creating a normal view.", 0) \ From 02ec4e2f92ac769f92aebdb714d0d8da1a924984 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 04:00:31 +0200 Subject: [PATCH 2320/2361] Fix build --- src/Parsers/ExpressionElementParsers.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 726326bfc85..61b5723072e 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -943,7 +943,6 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (ParserToken(DoubleColon).ignore(pos, expected) && ParserDataType().parse(pos, type_ast, expected)) { - String s; size_t data_size = data_end - data_begin; if (string_literal) { From 7a5df67b3b3b5bd7a8481562e0293150427fef90 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 17 Aug 2024 05:08:58 +0200 Subject: [PATCH 2321/2361] Fix style --- src/Interpreters/InterpreterShowCreateQuery.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index 7af6b4948f8..3de6b755609 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -97,10 +97,12 @@ QueryPipeline InterpreterShowCreateQuery::executeImpl() } MutableColumnPtr column = ColumnString::create(); - column->insert(format({ + column->insert(format( + { .ctx = getContext(), .query = *create_query, - .one_line = false})); + .one_line = false + })); return QueryPipeline(std::make_shared(Block{{ std::move(column), From 330086c621c860524b68ce7598d12c8db958101d Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 17 Aug 2024 03:46:33 +0000 Subject: [PATCH 2322/2361] update 02136_scalar_progress results according to fixed bug --- tests/queries/0_stateless/02136_scalar_progress.reference | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/02136_scalar_progress.reference b/tests/queries/0_stateless/02136_scalar_progress.reference index 5378c52de89..b8957f78e6d 100644 --- a/tests/queries/0_stateless/02136_scalar_progress.reference +++ b/tests/queries/0_stateless/02136_scalar_progress.reference @@ -1,6 +1,7 @@ < X-ClickHouse-Progress: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"0","result_bytes":"0"} < X-ClickHouse-Progress: {"read_rows":"65505","read_bytes":"524040","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"0","result_bytes":"0"} < X-ClickHouse-Progress: {"read_rows":"100000","read_bytes":"800000","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"0","result_bytes":"0"} -< X-ClickHouse-Progress: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"0","result_bytes":"0"} -< X-ClickHouse-Progress: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"1","result_bytes":"272"} -< X-ClickHouse-Summary: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"1","result_bytes":"272"} +< X-ClickHouse-Progress: {"read_rows":"100000","read_bytes":"800000","written_rows":"0","written_bytes":"0","total_rows_to_read":"100001","result_rows":"0","result_bytes":"0"} +< X-ClickHouse-Progress: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100001","result_rows":"0","result_bytes":"0"} +< X-ClickHouse-Progress: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100001","result_rows":"1","result_bytes":"272"} +< X-ClickHouse-Summary: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100001","result_rows":"1","result_bytes":"272"} From 00891a2dd8f69dde0b1fe364b6891fd3629d5dbe Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 17 Aug 2024 13:57:24 +0000 Subject: [PATCH 2323/2361] fix test --- .../queries/0_stateless/03221_refreshable_matview_progress.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql index 4794359fd2b..30228277bb5 100644 --- a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql @@ -1,3 +1,5 @@ +-- Tags: no-ordinary-database + set allow_experimental_refreshable_materialized_view=1; CREATE MATERIALIZED VIEW 03221_rmv From 9dee9ecfb4adee4cff099f13afe61bdc2c38170e Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Sat, 17 Aug 2024 10:33:35 -0600 Subject: [PATCH 2324/2361] Fix incorrect default value for postgresql_connection_pool_auto_close_connection in docs --- docs/en/operations/settings/settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index de601fe02dc..5bf1fe197ae 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1381,7 +1381,7 @@ Default value: `2`. Close connection before returning connection to the pool. -Default value: true. +Default value: false. ## odbc_bridge_connection_pool_size {#odbc-bridge-connection-pool-size} From a42b12725ab37df74a96e85f7c644c90bd6e30f6 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 16 Aug 2024 17:39:09 +0200 Subject: [PATCH 2325/2361] CI: Native build for package_aarch64 --- tests/ci/ci_config.py | 3 ++- tests/ci/ci_definitions.py | 1 + tests/ci/test_ci_config.py | 14 ++++++++++---- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 7a19eb6f827..173c6c9c931 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -94,7 +94,8 @@ class CI: package_type="deb", static_binary_name="aarch64", additional_pkgs=True, - ) + ), + runner_type=Runners.BUILDER_ARM, ), BuildNames.PACKAGE_ASAN: CommonJobConfigs.BUILD.with_properties( build_config=BuildConfig( diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 48847b0d7a6..1bed9db06f2 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -57,6 +57,7 @@ class Runners(metaclass=WithIter): """ BUILDER = "builder" + BUILDER_ARM = "builder-aarch64" STYLE_CHECKER = "style-checker" STYLE_CHECKER_ARM = "style-checker-aarch64" FUNC_TESTER = "func-tester" diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 525b3bf367b..c3e55aeac06 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -35,10 +35,16 @@ class TestCIConfig(unittest.TestCase): f"Job [{job}] must have style-checker(-aarch64) runner", ) elif "binary_" in job.lower() or "package_" in job.lower(): - self.assertTrue( - CI.JOB_CONFIGS[job].runner_type == CI.Runners.BUILDER, - f"Job [{job}] must have [{CI.Runners.BUILDER}] runner", - ) + if job.lower() == CI.BuildNames.PACKAGE_AARCH64: + self.assertTrue( + CI.JOB_CONFIGS[job].runner_type in (CI.Runners.BUILDER_ARM,), + f"Job [{job}] must have [{CI.Runners.BUILDER_ARM}] runner", + ) + else: + self.assertTrue( + CI.JOB_CONFIGS[job].runner_type in (CI.Runners.BUILDER,), + f"Job [{job}] must have [{CI.Runners.BUILDER}] runner", + ) elif "aarch64" in job.lower(): self.assertTrue( "aarch" in CI.JOB_CONFIGS[job].runner_type, From 7432400fd0c07b7c967f47b1536706b8f791fcb1 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 16 Aug 2024 21:06:58 +0200 Subject: [PATCH 2326/2361] revert hacks made to prevent OOM in aarch64 --- cmake/limit_jobs.cmake | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/cmake/limit_jobs.cmake b/cmake/limit_jobs.cmake index 17d8dd42a2c..8e48fc9b9d8 100644 --- a/cmake/limit_jobs.cmake +++ b/cmake/limit_jobs.cmake @@ -42,19 +42,9 @@ endif () # But use 2 parallel jobs, since: # - this is what llvm does # - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO) - if (ARCH_AARCH64) - # aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency - message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.") - set (PARALLEL_LINK_JOBS 1) - if (LINKER_NAME MATCHES "lld") - math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4) - set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}") - endif() - elseif (PARALLEL_LINK_JOBS GREATER 2) - message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.") - set (PARALLEL_LINK_JOBS 2) - endif () +if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2) + message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.") + set (PARALLEL_LINK_JOBS 2) endif() message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).") From c68b597eee752d5921d4469af01104a27681296a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 17 Aug 2024 20:03:18 +0000 Subject: [PATCH 2327/2361] fix test --- .../queries/0_stateless/03221_refreshable_matview_progress.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql index 30228277bb5..1be276c485c 100644 --- a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql @@ -15,6 +15,6 @@ SELECT rand64() AS x; SELECT sleep(2); -SELECT read_rows, total_rows, progress FROM system.view_refreshes WHERE view = '03221_rmv'; +SELECT read_rows, total_rows, progress FROM system.view_refreshes WHERE database = currentDatabase() and view = '03221_rmv'; DROP TABLE 03221_rmv; From a36688b07387822cae18eca1fbc798a517119e02 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 03:02:20 +0200 Subject: [PATCH 2328/2361] Fix error --- src/Parsers/ASTDataType.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Parsers/ASTDataType.cpp b/src/Parsers/ASTDataType.cpp index 21f56e5f7a2..4211347fb74 100644 --- a/src/Parsers/ASTDataType.cpp +++ b/src/Parsers/ASTDataType.cpp @@ -54,6 +54,7 @@ void ASTDataType::formatImpl(const FormatSettings & settings, FormatState & stat } else { + frame.expression_list_prepend_whitespace = false; arguments->formatImpl(settings, state, frame); } From 3c021e02b69f9237d9765d6295fd843ad2503398 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 03:02:26 +0200 Subject: [PATCH 2329/2361] Fix tests --- .../0_stateless/01458_named_tuple_millin.reference | 8 ++++++-- .../01504_compression_multiple_streams.reference | 8 ++++---- ...01548_create_table_compound_column_format.reference | 3 ++- .../01881_aggregate_functions_versioning.reference | 2 +- .../02117_show_create_table_system.reference | 10 ++++++++-- .../02286_tuple_numeric_identifier.reference | 2 +- .../02907_backup_restore_flatten_nested.reference | 4 ++-- 7 files changed, 24 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/01458_named_tuple_millin.reference b/tests/queries/0_stateless/01458_named_tuple_millin.reference index 954dfe36563..86561570985 100644 --- a/tests/queries/0_stateless/01458_named_tuple_millin.reference +++ b/tests/queries/0_stateless/01458_named_tuple_millin.reference @@ -1,12 +1,16 @@ CREATE TABLE default.tuple ( - `j` Tuple(a Int8, b String) + `j` Tuple( + a Int8, + b String) ) ENGINE = Memory j Tuple(\n a Int8,\n b String) CREATE TABLE default.tuple ( - `j` Tuple(a Int8, b String) + `j` Tuple( + a Int8, + b String) ) ENGINE = Memory j Tuple(\n a Int8,\n b String) diff --git a/tests/queries/0_stateless/01504_compression_multiple_streams.reference b/tests/queries/0_stateless/01504_compression_multiple_streams.reference index 4d3aba66526..14cdce72044 100644 --- a/tests/queries/0_stateless/01504_compression_multiple_streams.reference +++ b/tests/queries/0_stateless/01504_compression_multiple_streams.reference @@ -1,20 +1,20 @@ 1 1 [[1]] (1,[1]) 1 1 [[1]] (1,[1]) -CREATE TABLE default.columns_with_multiple_streams\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(UInt32, Array(UInt64)) CODEC(T64, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192 +CREATE TABLE default.columns_with_multiple_streams\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(\n UInt32,\n Array(UInt64)) CODEC(T64, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) -CREATE TABLE default.columns_with_multiple_streams\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(UInt32, Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192 +CREATE TABLE default.columns_with_multiple_streams\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(\n UInt32,\n Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) 3 3 [[3]] (3,[3]) 1 1 [[1]] (1,[1]) 1 1 [[1]] (1,[1]) -CREATE TABLE default.columns_with_multiple_streams_compact\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(UInt32, Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000, index_granularity = 8192 +CREATE TABLE default.columns_with_multiple_streams_compact\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(\n UInt32,\n Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000, index_granularity = 8192 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) -CREATE TABLE default.columns_with_multiple_streams_compact\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(UInt32, Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000, index_granularity = 8192 +CREATE TABLE default.columns_with_multiple_streams_compact\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(\n UInt32,\n Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000, index_granularity = 8192 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) 3 3 [[3]] (3,[3]) diff --git a/tests/queries/0_stateless/01548_create_table_compound_column_format.reference b/tests/queries/0_stateless/01548_create_table_compound_column_format.reference index 21e31e8f034..c23cc57548b 100644 --- a/tests/queries/0_stateless/01548_create_table_compound_column_format.reference +++ b/tests/queries/0_stateless/01548_create_table_compound_column_format.reference @@ -7,6 +7,7 @@ ENGINE = TinyLog CREATE TABLE test ( `a` Int64, - `b` Tuple(a Int64) + `b` Tuple( + a Int64) ) ENGINE = TinyLog diff --git a/tests/queries/0_stateless/01881_aggregate_functions_versioning.reference b/tests/queries/0_stateless/01881_aggregate_functions_versioning.reference index c30c4ca7e74..e15f312c2c8 100644 --- a/tests/queries/0_stateless/01881_aggregate_functions_versioning.reference +++ b/tests/queries/0_stateless/01881_aggregate_functions_versioning.reference @@ -1 +1 @@ -CREATE TABLE default.test_table\n(\n `col1` DateTime,\n `col2` Int64,\n `col3` AggregateFunction(1, sumMap, Tuple(Array(UInt8), Array(UInt8)))\n)\nENGINE = AggregatingMergeTree\nORDER BY (col1, col2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.test_table\n(\n `col1` DateTime,\n `col2` Int64,\n `col3` AggregateFunction(1, sumMap, Tuple(\n Array(UInt8),\n Array(UInt8)))\n)\nENGINE = AggregatingMergeTree\nORDER BY (col1, col2)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 32e8b2f4312..638a46a142f 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -510,9 +510,15 @@ CREATE TABLE system.parts `rows_where_ttl_info.max` Array(DateTime), `projections` Array(String), `visible` UInt8, - `creation_tid` Tuple(UInt64, UInt64, UUID), + `creation_tid` Tuple( + UInt64, + UInt64, + UUID), `removal_tid_lock` UInt64, - `removal_tid` Tuple(UInt64, UInt64, UUID), + `removal_tid` Tuple( + UInt64, + UInt64, + UUID), `creation_csn` UInt64, `removal_csn` UInt64, `has_lightweight_delete` UInt8, diff --git a/tests/queries/0_stateless/02286_tuple_numeric_identifier.reference b/tests/queries/0_stateless/02286_tuple_numeric_identifier.reference index 21348493d1d..916cdaf83cd 100644 --- a/tests/queries/0_stateless/02286_tuple_numeric_identifier.reference +++ b/tests/queries/0_stateless/02286_tuple_numeric_identifier.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.t_tuple_numeric\n(\n `t` Tuple(`1` Tuple(`2` Int32, `3` Int32), `4` Int32)\n)\nENGINE = Memory +CREATE TABLE default.t_tuple_numeric\n(\n `t` Tuple(\n `1` Tuple(\n `2` Int32,\n `3` Int32),\n `4` Int32)\n)\nENGINE = Memory {"t":{"1":{"2":2,"3":3},"4":4}} 2 3 4 2 3 4 diff --git a/tests/queries/0_stateless/02907_backup_restore_flatten_nested.reference b/tests/queries/0_stateless/02907_backup_restore_flatten_nested.reference index aa8f22f590a..0db19f0591a 100644 --- a/tests/queries/0_stateless/02907_backup_restore_flatten_nested.reference +++ b/tests/queries/0_stateless/02907_backup_restore_flatten_nested.reference @@ -1,8 +1,8 @@ BACKUP_CREATED -CREATE TABLE default.test\n(\n `test` Array(Tuple(foo String, bar Float64))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.test\n(\n `test` Array(Tuple(\n foo String,\n bar Float64))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 BACKUP_CREATED CREATE TABLE default.test2\n(\n `test` Nested(foo String, bar Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 RESTORED -CREATE TABLE default.test\n(\n `test` Array(Tuple(foo String, bar Float64))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.test\n(\n `test` Array(Tuple(\n foo String,\n bar Float64))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 RESTORED CREATE TABLE default.test2\n(\n `test` Nested(foo String, bar Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 From 60dd7e962a19e92cef4e3ab40d2607b3f5a59e90 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 05:56:27 +0200 Subject: [PATCH 2330/2361] Fix tests --- programs/format/Format.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp index 4af77533c53..f07387bd395 100644 --- a/programs/format/Format.cpp +++ b/programs/format/Format.cpp @@ -264,10 +264,10 @@ int mainEntryClickHouseFormat(int argc, char ** argv) if (!backslash) { WriteBufferFromOwnString str_buf; - oneline = oneline || approx_query_length < max_line_length; - IAST::FormatSettings settings(str_buf, oneline, hilite); + bool oneline_current_query = oneline || approx_query_length < max_line_length; + IAST::FormatSettings settings(str_buf, oneline_current_query, hilite); settings.show_secrets = true; - settings.print_pretty_type_names = !oneline; + settings.print_pretty_type_names = !oneline_current_query; res->format(settings); if (insert_query_payload) @@ -311,7 +311,11 @@ int mainEntryClickHouseFormat(int argc, char ** argv) else { WriteBufferFromOwnString str_buf; - formatAST(*res, str_buf, hilite, oneline); + bool oneline_current_query = oneline || approx_query_length < max_line_length; + IAST::FormatSettings settings(str_buf, oneline_current_query, hilite); + settings.show_secrets = true; + settings.print_pretty_type_names = !oneline_current_query; + res->format(settings); auto res_string = str_buf.str(); WriteBufferFromOStream res_cout(std::cout, 4096); From 1691e4c4977dbaf44bedf61cdf248e7a1997d407 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 06:14:21 +0200 Subject: [PATCH 2331/2361] Fix test --- tests/queries/0_stateless/01825_new_type_json_ghdata.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh index ee702300094..fbd7d897fb8 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh @@ -8,7 +8,8 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" ${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1 -cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} --max_block_size 8192 --max_insert_block_size 8192 --max_insert_threads 1 --min_insert_block_size_bytes 0 --min_insert_block_size_rows 0 -q "INSERT INTO ghdata FORMAT JSONAsObject" +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} \ + --max_memory_usage 10G --query "INSERT INTO ghdata FORMAT JSONAsObject" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" @@ -16,7 +17,7 @@ ${CLICKHOUSE_CLIENT} -q \ "SELECT data.repo.name, count() AS stars FROM ghdata \ WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" -${CLICKHOUSE_CLIENT} --allow_experimental_analyzer=1 -q \ +${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ "SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ ARRAY JOIN data.payload.commits[].author.name \ GROUP BY name ORDER BY c DESC, name LIMIT 5" From 7f2c61799dc984add3056f9c77c4e64476ad917b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 06:14:59 +0200 Subject: [PATCH 2332/2361] Fix test --- .../0_stateless/01825_new_type_json_ghdata_insert_select.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh index ef87034ff89..2afec5ba7fe 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh @@ -13,10 +13,10 @@ ${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2 (data JSON) ENGINE = MergeTree OR ${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_string (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" ${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_from_string (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1 -cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2 FORMAT JSONAsObject" +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO ghdata_2 FORMAT JSONAsObject" cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_string FORMAT JSONAsString" -${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_from_string SELECT data FROM ghdata_2_string" +${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO ghdata_2_from_string SELECT data FROM ghdata_2_string" ${CLICKHOUSE_CLIENT} -q "SELECT \ (SELECT mapSort(groupUniqArrayMap(JSONAllPathsWithTypes(data))), sum(cityHash64(toString(data))) FROM ghdata_2_from_string) = \ From d34d41d1e240f0df6256a827661174a30011f204 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 18 Aug 2024 04:34:38 +0000 Subject: [PATCH 2333/2361] fix test --- .../queries/0_stateless/03221_refreshable_matview_progress.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql index 1be276c485c..de8de41fd04 100644 --- a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql @@ -16,5 +16,3 @@ SELECT rand64() AS x; SELECT sleep(2); SELECT read_rows, total_rows, progress FROM system.view_refreshes WHERE database = currentDatabase() and view = '03221_rmv'; - -DROP TABLE 03221_rmv; From 03ab872f5c628613e0e187d4e1ad3ca9b9148bf6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 07:34:44 +0200 Subject: [PATCH 2334/2361] Fix error --- src/IO/S3/PocoHTTPClient.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 3b7ec4d1d9c..eb65460ce13 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -20,6 +20,7 @@ #include #include + namespace Aws::Http::Standard { class StandardHttpResponse; @@ -27,15 +28,17 @@ class StandardHttpResponse; namespace DB { - class Context; } + namespace DB::S3 { + class ClientFactory; class PocoHTTPClient; + struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration { std::function per_request_configuration; @@ -76,6 +79,7 @@ private: friend ClientFactory; }; + class PocoHTTPResponse : public Aws::Http::Standard::StandardHttpResponse { public: @@ -115,6 +119,7 @@ private: Aws::Utils::Stream::ResponseStream body_stream; }; + class PocoHTTPClient : public Aws::Http::HttpClient { public: @@ -170,10 +175,10 @@ protected: std::function error_report; ConnectionTimeouts timeouts; const RemoteHostFilter & remote_host_filter; - unsigned int s3_max_redirects; + unsigned int s3_max_redirects = 0; bool s3_use_adaptive_timeouts = true; bool enable_s3_requests_logging = false; - bool for_disk_s3; + bool for_disk_s3 = false; /// Limits get request per second rate for GET, SELECT and all other requests, excluding throttled by put throttler /// (i.e. throttles GetObject, HeadObject) From 4e91c663a62529d27ddcf6d364338f001e1706eb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 08:28:16 +0200 Subject: [PATCH 2335/2361] Fix error --- src/IO/S3/PocoHTTPClient.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index dc7dcdc6793..3e060e21c51 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -397,8 +397,11 @@ void PocoHTTPClient::makeRequestInternalImpl( try { - const auto proxy_configuration = per_request_configuration(); - for (unsigned int attempt = 0; attempt <= s3_max_redirects; ++attempt) + ProxyConfiguration proxy_configuration; + if (per_request_configuration) + proxy_configuration = per_request_configuration(); + + for (size_t attempt = 0; attempt <= s3_max_redirects; ++attempt) { Poco::URI target_uri(uri); @@ -516,7 +519,6 @@ void PocoHTTPClient::makeRequestInternalImpl( LOG_TEST(log, "Redirecting request to new location: {}", location); addMetric(request, S3MetricType::Redirects); - continue; } @@ -564,9 +566,9 @@ void PocoHTTPClient::makeRequestInternalImpl( } else { - if (status_code == 429 || status_code == 503) - { // API throttling + { + /// API throttling addMetric(request, S3MetricType::Throttling); } else if (status_code >= 300) From 7071942858e44053b92b8386e68251be7718e3b5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 09:05:45 +0200 Subject: [PATCH 2336/2361] Miscellanous changes from #66999 --- programs/keeper/Keeper.cpp | 4 +++- src/Daemon/BaseDaemon.cpp | 4 +--- src/Daemon/BaseDaemon.h | 1 - 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index a447a9e50f6..ced661d9772 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -66,6 +66,8 @@ /// A minimal file used when the keeper is run without installation INCBIN(keeper_resource_embedded_xml, SOURCE_DIR "/programs/keeper/keeper_embedded.xml"); +extern const char * GIT_HASH; + int mainEntryClickHouseKeeper(int argc, char ** argv) { DB::Keeper app; @@ -675,7 +677,7 @@ void Keeper::logRevision() const "Starting ClickHouse Keeper {} (revision: {}, git hash: {}, build id: {}), PID {}", VERSION_STRING, ClickHouseRevision::getVersionRevision(), - git_hash.empty() ? "" : git_hash, + GIT_HASH, build_id.empty() ? "" : build_id, getpid()); } diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index f75699881bd..c42bf7641d2 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -452,8 +452,6 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() build_id = SymbolIndex::instance().getBuildIDHex(); #endif - git_hash = GIT_HASH; - #if defined(OS_LINUX) std::string executable_path = getExecutablePath(); @@ -466,7 +464,7 @@ void BaseDaemon::logRevision() const { logger().information("Starting " + std::string{VERSION_FULL} + " (revision: " + std::to_string(ClickHouseRevision::getVersionRevision()) - + ", git hash: " + (git_hash.empty() ? "" : git_hash) + + ", git hash: " + std::string(GIT_HASH) + ", build id: " + (build_id.empty() ? "" : build_id) + ")" + ", PID " + std::to_string(getpid())); } diff --git a/src/Daemon/BaseDaemon.h b/src/Daemon/BaseDaemon.h index b15aa74fcf3..a6efa94a567 100644 --- a/src/Daemon/BaseDaemon.h +++ b/src/Daemon/BaseDaemon.h @@ -165,7 +165,6 @@ protected: Poco::Util::AbstractConfiguration * last_configuration = nullptr; String build_id; - String git_hash; String stored_binary_hash; bool should_setup_watchdog = false; From 2a48aaad561f52539edbede94015c35e264bd344 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 09:12:49 +0200 Subject: [PATCH 2337/2361] Fix build --- src/Interpreters/executeQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index fe87eed5570..decc16a3704 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -786,7 +786,7 @@ static std::tuple executeQueryImpl( /// Verify that AST formatting is consistent: /// If you format AST, parse it back, and format it again, you get the same string. - String formatted1 = ast->formatWithPossiblyHidingSensitiveData(0, true, true); + String formatted1 = ast->formatWithPossiblyHidingSensitiveData(0, true, true, false); /// The query can become more verbose after formatting, so: size_t new_max_query_size = max_query_size > 0 ? (1000 + 2 * max_query_size) : 0; @@ -811,7 +811,7 @@ static std::tuple executeQueryImpl( chassert(ast2); - String formatted2 = ast2->formatWithPossiblyHidingSensitiveData(0, true, true); + String formatted2 = ast2->formatWithPossiblyHidingSensitiveData(0, true, true, false); if (formatted1 != formatted2) throw Exception(ErrorCodes::LOGICAL_ERROR, From 4f7e3e8374acd98496092e8f8a219af6755a2f70 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 09:38:00 +0200 Subject: [PATCH 2338/2361] Fix test 01017_uniqCombined_memory_usage --- .../0_stateless/01017_uniqCombined_memory_usage.sql | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql index c13a0859183..eca370d94af 100644 --- a/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql +++ b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql @@ -7,7 +7,8 @@ -- sizeof(HLL) is (2^K * 6 / 8) -- hence max_memory_usage for 100 rows = (96<<10)*100 = 9830400 -SET use_uncompressed_cache = 0; +SET use_uncompressed_cache = 0; +SET memory_profiler_step = 1; -- HashTable for UInt32 (used until (1<<13) elements), hence 8192 elements SELECT 'UInt32'; @@ -31,14 +32,14 @@ SELECT 'K=16'; SELECT 'UInt32'; SET max_memory_usage = 2000000; SELECT sum(u) FROM (SELECT intDiv(number, 4096) AS k, uniqCombined(16)(number % 4096) u FROM numbers(4096 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } -SET max_memory_usage = 4915200; +SET max_memory_usage = 5230000; SELECT sum(u) FROM (SELECT intDiv(number, 4096) AS k, uniqCombined(16)(number % 4096) u FROM numbers(4096 * 100) GROUP BY k); -- HashTable for UInt64 (used until (1<<11) elements), hence 2048 elements SELECT 'UInt64'; SET max_memory_usage = 2000000; SELECT sum(u) FROM (SELECT intDiv(number, 2048) AS k, uniqCombined(16)(reinterpretAsString(number % 2048)) u FROM numbers(2048 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } -SET max_memory_usage = 4915200; +SET max_memory_usage = 5900000; SELECT sum(u) FROM (SELECT intDiv(number, 2048) AS k, uniqCombined(16)(reinterpretAsString(number % 2048)) u FROM numbers(2048 * 100) GROUP BY k); SELECT 'K=18'; From ec8554c85aeae5dcc8367ce09d093c5526ef1d47 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 09:41:29 +0200 Subject: [PATCH 2339/2361] Fix build --- src/Common/SignalHandlers.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Common/SignalHandlers.cpp b/src/Common/SignalHandlers.cpp index c4358da2453..6ac6cbcae29 100644 --- a/src/Common/SignalHandlers.cpp +++ b/src/Common/SignalHandlers.cpp @@ -18,13 +18,17 @@ namespace DB { + namespace ErrorCodes { extern const int CANNOT_SET_SIGNAL_HANDLER; extern const int CANNOT_SEND_SIGNAL; } + } +extern const char * GIT_HASH; + using namespace DB; @@ -334,7 +338,7 @@ void SignalListener::onTerminate(std::string_view message, UInt32 thread_num) co size_t pos = message.find('\n'); LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) {}", - VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", daemon ? daemon->git_hash : "", thread_num, message.substr(0, pos)); + VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, thread_num, message.substr(0, pos)); /// Print trace from std::terminate exception line-by-line to make it easy for grep. while (pos != std::string_view::npos) @@ -368,7 +372,7 @@ try LOG_FATAL(log, "########## Short fault info ############"); LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) Received signal {}", - VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", daemon ? daemon->git_hash : "", + VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, thread_num, sig); std::string signal_description = "Unknown signal"; @@ -434,13 +438,13 @@ try if (query_id.empty()) { LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (no query) Received signal {} ({})", - VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", daemon ? daemon->git_hash : "", + VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, thread_num, signal_description, sig); } else { LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (query_id: {}) (query: {}) Received signal {} ({})", - VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", daemon ? daemon->git_hash : "", + VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, thread_num, query_id, query, signal_description, sig); } From 01841d00eab668ecfa96a702e7f3cda68fce52e4 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 18 Aug 2024 12:18:24 +0000 Subject: [PATCH 2340/2361] fix test --- .../0_stateless/03221_refreshable_matview_progress.sql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql index de8de41fd04..ecb385c9bfa 100644 --- a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql @@ -3,7 +3,7 @@ set allow_experimental_refreshable_materialized_view=1; CREATE MATERIALIZED VIEW 03221_rmv -REFRESH AFTER 1 SECOND +REFRESH AFTER 10 SECOND ( x UInt64 ) @@ -16,3 +16,5 @@ SELECT rand64() AS x; SELECT sleep(2); SELECT read_rows, total_rows, progress FROM system.view_refreshes WHERE database = currentDatabase() and view = '03221_rmv'; + +DROP TABLE 03221_rmv; From f5308635d193859cdb19a71040006278a21bdc51 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 18 Aug 2024 15:25:07 +0200 Subject: [PATCH 2341/2361] Revert "Improve compatibility of `upper/lowerUTF8` with Spark" --- .gitmodules | 7 +- contrib/icu | 2 +- src/Common/examples/CMakeLists.txt | 5 - src/Common/examples/utf8_upper_lower.cpp | 27 -- src/Functions/LowerUpperImpl.h | 1 + src/Functions/LowerUpperUTF8Impl.h | 283 +++++++++++++++--- src/Functions/initcapUTF8.cpp | 3 +- src/Functions/lowerUTF8.cpp | 25 +- src/Functions/upperUTF8.cpp | 24 +- .../00170_lower_upper_utf8.reference | 4 - .../0_stateless/00170_lower_upper_utf8.sql | 11 - .../00233_position_function_family.sql | 3 - .../0_stateless/00761_lower_utf8_bug.sql | 3 - .../0_stateless/01278_random_string_utf8.sql | 3 - .../0_stateless/01431_utf8_ubsan.reference | 4 +- .../queries/0_stateless/01431_utf8_ubsan.sql | 3 - .../0_stateless/01590_countSubstrings.sql | 3 - ...71_lower_upper_utf8_row_overlaps.reference | 4 +- .../02071_lower_upper_utf8_row_overlaps.sql | 3 - ...new_functions_must_be_documented.reference | 2 + .../02514_if_with_lazy_low_cardinality.sql | 3 - .../0_stateless/02807_lower_utf8_msan.sql | 3 - tests/queries/0_stateless/03015_peder1001.sql | 3 - 23 files changed, 265 insertions(+), 164 deletions(-) delete mode 100644 src/Common/examples/utf8_upper_lower.cpp diff --git a/.gitmodules b/.gitmodules index f18844e5eb4..cdee6a43ad8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -106,6 +106,9 @@ [submodule "contrib/icudata"] path = contrib/icudata url = https://github.com/ClickHouse/icudata +[submodule "contrib/icu"] + path = contrib/icu + url = https://github.com/unicode-org/icu [submodule "contrib/flatbuffers"] path = contrib/flatbuffers url = https://github.com/ClickHouse/flatbuffers @@ -366,7 +369,3 @@ [submodule "contrib/numactl"] path = contrib/numactl url = https://github.com/ClickHouse/numactl.git -[submodule "contrib/icu"] - path = contrib/icu - url = https://github.com/ClickHouse/icu - branch = ClickHouse/release-75-1 diff --git a/contrib/icu b/contrib/icu index 4216173eeeb..7750081bda4 160000 --- a/contrib/icu +++ b/contrib/icu @@ -1 +1 @@ -Subproject commit 4216173eeeb39c1d4caaa54a68860e800412d273 +Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625 diff --git a/src/Common/examples/CMakeLists.txt b/src/Common/examples/CMakeLists.txt index 8383e80d09d..69580d4ad0e 100644 --- a/src/Common/examples/CMakeLists.txt +++ b/src/Common/examples/CMakeLists.txt @@ -92,8 +92,3 @@ endif() clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp) target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io clickhouse_common_config) - -if (TARGET ch_contrib::icu) - clickhouse_add_executable (utf8_upper_lower utf8_upper_lower.cpp) - target_link_libraries (utf8_upper_lower PRIVATE ch_contrib::icu) -endif () diff --git a/src/Common/examples/utf8_upper_lower.cpp b/src/Common/examples/utf8_upper_lower.cpp deleted file mode 100644 index 826e1763105..00000000000 --- a/src/Common/examples/utf8_upper_lower.cpp +++ /dev/null @@ -1,27 +0,0 @@ -#include -#include - -std::string utf8_to_lower(const std::string & input) -{ - icu::UnicodeString unicodeInput(input.c_str(), "UTF-8"); - unicodeInput.toLower(); - std::string output; - unicodeInput.toUTF8String(output); - return output; -} - -std::string utf8_to_upper(const std::string & input) -{ - icu::UnicodeString unicodeInput(input.c_str(), "UTF-8"); - unicodeInput.toUpper(); - std::string output; - unicodeInput.toUTF8String(output); - return output; -} - -int main() -{ - std::string input = "ır"; - std::cout << "upper:" << utf8_to_upper(input) << std::endl; - return 0; -} diff --git a/src/Functions/LowerUpperImpl.h b/src/Functions/LowerUpperImpl.h index a52703d10c8..d463ef96e16 100644 --- a/src/Functions/LowerUpperImpl.h +++ b/src/Functions/LowerUpperImpl.h @@ -1,6 +1,7 @@ #pragma once #include + namespace DB { diff --git a/src/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h index 5da085f48e5..eedabca5b22 100644 --- a/src/Functions/LowerUpperUTF8Impl.h +++ b/src/Functions/LowerUpperUTF8Impl.h @@ -1,14 +1,15 @@ #pragma once - -#include "config.h" - -#if USE_ICU - #include #include -#include -#include +#include +#include #include +#include + +#ifdef __SSE2__ +#include +#endif + namespace DB { @@ -18,7 +19,71 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } -template +/// xor or do nothing +template +UInt8 xor_or_identity(const UInt8 c, const int mask) +{ + return c ^ mask; +} + +template <> +inline UInt8 xor_or_identity(const UInt8 c, const int) +{ + return c; +} + +/// It is caller's responsibility to ensure the presence of a valid cyrillic sequence in array +template +inline void UTF8CyrillicToCase(const UInt8 *& src, UInt8 *& dst) +{ + if (src[0] == 0xD0u && (src[1] >= 0x80u && src[1] <= 0x8Fu)) + { + /// ЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐ + *dst++ = xor_or_identity(*src++, 0x1); + *dst++ = xor_or_identity(*src++, 0x10); + } + else if (src[0] == 0xD1u && (src[1] >= 0x90u && src[1] <= 0x9Fu)) + { + /// ÑёђѓєѕіїјљњћќÑўџ + *dst++ = xor_or_identity(*src++, 0x1); + *dst++ = xor_or_identity(*src++, 0x10); + } + else if (src[0] == 0xD0u && (src[1] >= 0x90u && src[1] <= 0x9Fu)) + { + /// Ð-П + *dst++ = *src++; + *dst++ = xor_or_identity(*src++, 0x20); + } + else if (src[0] == 0xD0u && (src[1] >= 0xB0u && src[1] <= 0xBFu)) + { + /// а-п + *dst++ = *src++; + *dst++ = xor_or_identity(*src++, 0x20); + } + else if (src[0] == 0xD0u && (src[1] >= 0xA0u && src[1] <= 0xAFu)) + { + /// Р-Я + *dst++ = xor_or_identity(*src++, 0x1); + *dst++ = xor_or_identity(*src++, 0x20); + } + else if (src[0] == 0xD1u && (src[1] >= 0x80u && src[1] <= 0x8Fu)) + { + /// Ñ€-Ñ + *dst++ = xor_or_identity(*src++, 0x1); + *dst++ = xor_or_identity(*src++, 0x20); + } +} + + +/** If the string contains UTF-8 encoded text, convert it to the lower (upper) case. + * Note: It is assumed that after the character is converted to another case, + * the length of its multibyte sequence in UTF-8 does not change. + * Otherwise, the behavior is undefined. + */ +template struct LowerUpperUTF8Impl { static void vector( @@ -38,46 +103,180 @@ struct LowerUpperUTF8Impl return; } - res_data.resize(data.size()); - res_offsets.resize_exact(offsets.size()); - - String output; - size_t curr_offset = 0; - for (size_t i = 0; i < offsets.size(); ++i) - { - const auto * data_start = reinterpret_cast(&data[offsets[i - 1]]); - size_t size = offsets[i] - offsets[i - 1]; - - icu::UnicodeString input(data_start, static_cast(size), "UTF-8"); - if constexpr (upper) - input.toUpper(); - else - input.toLower(); - - output.clear(); - input.toUTF8String(output); - - /// For valid UTF-8 input strings, ICU sometimes produces output with extra '\0's at the end. Only the data before the first - /// '\0' is valid. It the input is not valid UTF-8, then the behavior of lower/upperUTF8 is undefined by definition. In this - /// case, the behavior is also reasonable. - const char * res_end = find_last_not_symbols_or_null<'\0'>(output.data(), output.data() + output.size()); - size_t valid_size = res_end ? res_end - output.data() + 1 : 0; - - res_data.resize(curr_offset + valid_size + 1); - memcpy(&res_data[curr_offset], output.data(), valid_size); - res_data[curr_offset + valid_size] = 0; - - curr_offset += valid_size + 1; - res_offsets[i] = curr_offset; - } + res_data.resize_exact(data.size()); + res_offsets.assign(offsets); + array(data.data(), data.data() + data.size(), offsets, res_data.data()); } static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Functions lowerUTF8 and upperUTF8 cannot work with FixedString argument"); } + + /** Converts a single code point starting at `src` to desired case, storing result starting at `dst`. + * `src` and `dst` are incremented by corresponding sequence lengths. */ + static bool toCase(const UInt8 *& src, const UInt8 * src_end, UInt8 *& dst, bool partial) + { + if (src[0] <= ascii_upper_bound) + { + if (*src >= not_case_lower_bound && *src <= not_case_upper_bound) + *dst++ = *src++ ^ flip_case_mask; + else + *dst++ = *src++; + } + else if (src + 1 < src_end + && ((src[0] == 0xD0u && (src[1] >= 0x80u && src[1] <= 0xBFu)) || (src[0] == 0xD1u && (src[1] >= 0x80u && src[1] <= 0x9Fu)))) + { + cyrillic_to_case(src, dst); + } + else if (src + 1 < src_end && src[0] == 0xC2u) + { + /// Punctuation U+0080 - U+00BF, UTF-8: C2 80 - C2 BF + *dst++ = *src++; + *dst++ = *src++; + } + else if (src + 2 < src_end && src[0] == 0xE2u) + { + /// Characters U+2000 - U+2FFF, UTF-8: E2 80 80 - E2 BF BF + *dst++ = *src++; + *dst++ = *src++; + *dst++ = *src++; + } + else + { + size_t src_sequence_length = UTF8::seqLength(*src); + /// In case partial buffer was passed (due to SSE optimization) + /// we cannot convert it with current src_end, but we may have more + /// bytes to convert and eventually got correct symbol. + if (partial && src_sequence_length > static_cast(src_end - src)) + return false; + + auto src_code_point = UTF8::convertUTF8ToCodePoint(src, src_end - src); + if (src_code_point) + { + int dst_code_point = to_case(*src_code_point); + if (dst_code_point > 0) + { + size_t dst_sequence_length = UTF8::convertCodePointToUTF8(dst_code_point, dst, src_end - src); + assert(dst_sequence_length <= 4); + + /// We don't support cases when lowercase and uppercase characters occupy different number of bytes in UTF-8. + /// As an example, this happens for ß and ẞ. + if (dst_sequence_length == src_sequence_length) + { + src += dst_sequence_length; + dst += dst_sequence_length; + return true; + } + } + } + + *dst = *src; + ++dst; + ++src; + } + + return true; + } + +private: + static constexpr auto ascii_upper_bound = '\x7f'; + static constexpr auto flip_case_mask = 'A' ^ 'a'; + + static void array(const UInt8 * src, const UInt8 * src_end, const ColumnString::Offsets & offsets, UInt8 * dst) + { + const auto * offset_it = offsets.begin(); + const UInt8 * begin = src; + +#ifdef __SSE2__ + static constexpr auto bytes_sse = sizeof(__m128i); + + /// If we are before this position, we can still read at least bytes_sse. + const auto * src_end_sse = src_end - bytes_sse + 1; + + /// SSE2 packed comparison operate on signed types, hence compare (c < 0) instead of (c > 0x7f) + const auto v_zero = _mm_setzero_si128(); + const auto v_not_case_lower_bound = _mm_set1_epi8(not_case_lower_bound - 1); + const auto v_not_case_upper_bound = _mm_set1_epi8(not_case_upper_bound + 1); + const auto v_flip_case_mask = _mm_set1_epi8(flip_case_mask); + + while (src < src_end_sse) + { + const auto chars = _mm_loadu_si128(reinterpret_cast(src)); + + /// check for ASCII + const auto is_not_ascii = _mm_cmplt_epi8(chars, v_zero); + const auto mask_is_not_ascii = _mm_movemask_epi8(is_not_ascii); + + /// ASCII + if (mask_is_not_ascii == 0) + { + const auto is_not_case + = _mm_and_si128(_mm_cmpgt_epi8(chars, v_not_case_lower_bound), _mm_cmplt_epi8(chars, v_not_case_upper_bound)); + const auto mask_is_not_case = _mm_movemask_epi8(is_not_case); + + /// everything in correct case ASCII + if (mask_is_not_case == 0) + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), chars); + else + { + /// ASCII in mixed case + /// keep `flip_case_mask` only where necessary, zero out elsewhere + const auto xor_mask = _mm_and_si128(v_flip_case_mask, is_not_case); + + /// flip case by applying calculated mask + const auto cased_chars = _mm_xor_si128(chars, xor_mask); + + /// store result back to destination + _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), cased_chars); + } + + src += bytes_sse; + dst += bytes_sse; + } + else + { + /// UTF-8 + + /// Find the offset of the next string after src + size_t offset_from_begin = src - begin; + while (offset_from_begin >= *offset_it) + ++offset_it; + + /// Do not allow one row influence another (since row may have invalid sequence, and break the next) + const UInt8 * row_end = begin + *offset_it; + chassert(row_end >= src); + const UInt8 * expected_end = std::min(src + bytes_sse, row_end); + + while (src < expected_end) + { + if (!toCase(src, expected_end, dst, /* partial= */ true)) + { + /// Fallback to handling byte by byte. + src_end_sse = src; + break; + } + } + } + } + + /// Find the offset of the next string after src + size_t offset_from_begin = src - begin; + while (offset_it != offsets.end() && offset_from_begin >= *offset_it) + ++offset_it; +#endif + + /// handle remaining symbols, row by row (to avoid influence of bad UTF8 symbols from one row, to another) + while (src < src_end) + { + const UInt8 * row_end = begin + *offset_it; + chassert(row_end >= src); + + while (src < row_end) + toCase(src, row_end, dst, /* partial= */ false); + ++offset_it; + } + } }; } - -#endif diff --git a/src/Functions/initcapUTF8.cpp b/src/Functions/initcapUTF8.cpp index 004586dce26..282d846094e 100644 --- a/src/Functions/initcapUTF8.cpp +++ b/src/Functions/initcapUTF8.cpp @@ -1,8 +1,9 @@ #include #include +#include #include #include -#include + namespace DB { diff --git a/src/Functions/lowerUTF8.cpp b/src/Functions/lowerUTF8.cpp index e2f7cb84730..7adb0069121 100644 --- a/src/Functions/lowerUTF8.cpp +++ b/src/Functions/lowerUTF8.cpp @@ -1,10 +1,9 @@ -#include "config.h" - -#if USE_ICU - -#include +#include #include #include +#include +#include + namespace DB { @@ -16,25 +15,13 @@ struct NameLowerUTF8 static constexpr auto name = "lowerUTF8"; }; -using FunctionLowerUTF8 = FunctionStringToString, NameLowerUTF8>; +using FunctionLowerUTF8 = FunctionStringToString>, NameLowerUTF8>; } REGISTER_FUNCTION(LowerUTF8) { - FunctionDocumentation::Description description - = R"(Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.)"; - FunctionDocumentation::Syntax syntax = "lowerUTF8(input)"; - FunctionDocumentation::Arguments arguments = {{"input", "Input with String type"}}; - FunctionDocumentation::ReturnedValue returned_value = "A String data type value"; - FunctionDocumentation::Examples examples = { - {"first", "SELECT lowerUTF8('München') as Lowerutf8;", "münchen"}, - }; - FunctionDocumentation::Categories categories = {"String"}; - - factory.registerFunction({description, syntax, arguments, returned_value, examples, categories}); + factory.registerFunction(); } } - -#endif diff --git a/src/Functions/upperUTF8.cpp b/src/Functions/upperUTF8.cpp index ef26430331f..659e67f0ef3 100644 --- a/src/Functions/upperUTF8.cpp +++ b/src/Functions/upperUTF8.cpp @@ -1,10 +1,8 @@ -#include "config.h" - -#if USE_ICU - -#include #include #include +#include +#include + namespace DB { @@ -16,25 +14,13 @@ struct NameUpperUTF8 static constexpr auto name = "upperUTF8"; }; -using FunctionUpperUTF8 = FunctionStringToString, NameUpperUTF8>; +using FunctionUpperUTF8 = FunctionStringToString>, NameUpperUTF8>; } REGISTER_FUNCTION(UpperUTF8) { - FunctionDocumentation::Description description - = R"(Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.)"; - FunctionDocumentation::Syntax syntax = "upperUTF8(input)"; - FunctionDocumentation::Arguments arguments = {{"input", "Input with String type"}}; - FunctionDocumentation::ReturnedValue returned_value = "A String data type value"; - FunctionDocumentation::Examples examples = { - {"first", "SELECT upperUTF8('München') as Upperutf8;", "MÃœNCHEN"}, - }; - FunctionDocumentation::Categories categories = {"String"}; - - factory.registerFunction({description, syntax, arguments, returned_value, examples, categories}); + factory.registerFunction(); } } - -#endif diff --git a/tests/queries/0_stateless/00170_lower_upper_utf8.reference b/tests/queries/0_stateless/00170_lower_upper_utf8.reference index 3c644f22b9b..f202cb75513 100644 --- a/tests/queries/0_stateless/00170_lower_upper_utf8.reference +++ b/tests/queries/0_stateless/00170_lower_upper_utf8.reference @@ -22,7 +22,3 @@ 1 1 1 -1 -1 -1 -1 diff --git a/tests/queries/0_stateless/00170_lower_upper_utf8.sql b/tests/queries/0_stateless/00170_lower_upper_utf8.sql index 85b6c5c6095..4caba2033ff 100644 --- a/tests/queries/0_stateless/00170_lower_upper_utf8.sql +++ b/tests/queries/0_stateless/00170_lower_upper_utf8.sql @@ -1,6 +1,3 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - select lower('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str; select lowerUTF8('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str; select lower('AaAaAaAaAaAaAaA012345789,.!aAaA') = 'aaaaaaaaaaaaaaa012345789,.!aaaa'; @@ -30,11 +27,3 @@ select sum(lower(materialize('aaaaÐБВГAAAAaaAA')) = materialize('aaaaÐБВ select sum(upper(materialize('aaaaÐБВГAAAAaaAA')) = materialize('AAAAÐБВГAAAAAAAA')) = count() from system.one array join range(16384) as n; select sum(lowerUTF8(materialize('aaaaÐБВГAAAAaaAA')) = materialize('aaaaабвгaaaaaaaa')) = count() from system.one array join range(16384) as n; select sum(upperUTF8(materialize('aaaaÐБВГAAAAaaAA')) = materialize('AAAAÐБВГAAAAAAAA')) = count() from system.one array join range(16384) as n; - --- Turkish language -select upperUTF8('ır') = 'IR'; -select lowerUTF8('ır') = 'ır'; - --- German language -select upper('öäüß') = 'öäüß'; -select lower('ÖÄÜẞ') = 'ÖÄÜẞ'; diff --git a/tests/queries/0_stateless/00233_position_function_family.sql b/tests/queries/0_stateless/00233_position_function_family.sql index d6668cb7ba4..dd7394bc39a 100644 --- a/tests/queries/0_stateless/00233_position_function_family.sql +++ b/tests/queries/0_stateless/00233_position_function_family.sql @@ -1,6 +1,3 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - SET send_logs_level = 'fatal'; select 1 = position('', ''); diff --git a/tests/queries/0_stateless/00761_lower_utf8_bug.sql b/tests/queries/0_stateless/00761_lower_utf8_bug.sql index a0ab55edc15..de20b894331 100644 --- a/tests/queries/0_stateless/00761_lower_utf8_bug.sql +++ b/tests/queries/0_stateless/00761_lower_utf8_bug.sql @@ -1,4 +1 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - SELECT lowerUTF8('\xF0') = lowerUTF8('\xF0'); diff --git a/tests/queries/0_stateless/01278_random_string_utf8.sql b/tests/queries/0_stateless/01278_random_string_utf8.sql index 290d6a0c759..da2dc48c3e1 100644 --- a/tests/queries/0_stateless/01278_random_string_utf8.sql +++ b/tests/queries/0_stateless/01278_random_string_utf8.sql @@ -1,6 +1,3 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - SELECT randomStringUTF8('string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT lengthUTF8(randomStringUTF8(100)); SELECT toTypeName(randomStringUTF8(10)); diff --git a/tests/queries/0_stateless/01431_utf8_ubsan.reference b/tests/queries/0_stateless/01431_utf8_ubsan.reference index dc785e57851..c98c950d535 100644 --- a/tests/queries/0_stateless/01431_utf8_ubsan.reference +++ b/tests/queries/0_stateless/01431_utf8_ubsan.reference @@ -1,2 +1,2 @@ -EFBFBD -EFBFBD +FF +FF diff --git a/tests/queries/0_stateless/01431_utf8_ubsan.sql b/tests/queries/0_stateless/01431_utf8_ubsan.sql index 3a28e023805..d6a299225b1 100644 --- a/tests/queries/0_stateless/01431_utf8_ubsan.sql +++ b/tests/queries/0_stateless/01431_utf8_ubsan.sql @@ -1,5 +1,2 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - SELECT hex(lowerUTF8('\xFF')); SELECT hex(upperUTF8('\xFF')); diff --git a/tests/queries/0_stateless/01590_countSubstrings.sql b/tests/queries/0_stateless/01590_countSubstrings.sql index 5ec4f412d7f..b38cbb7d188 100644 --- a/tests/queries/0_stateless/01590_countSubstrings.sql +++ b/tests/queries/0_stateless/01590_countSubstrings.sql @@ -1,6 +1,3 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - -- -- countSubstrings -- diff --git a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference index deabef61a88..a3bac432482 100644 --- a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference +++ b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference @@ -5,9 +5,9 @@ insert into utf8_overlap values ('\xe2'), ('Foo⚊BarBazBam'), ('\xe2'), ('Foo -- MONOGRAM FOR YANG with lowerUTF8(str) as l_, upperUTF8(str) as u_, '0x' || hex(str) as h_ select length(str), if(l_ == '\xe2', h_, l_), if(u_ == '\xe2', h_, u_) from utf8_overlap format CSV; -1,"�","�" +1,"0xE2","0xE2" 15,"foo⚊barbazbam","FOO⚊BARBAZBAM" -1,"�","�" +1,"0xE2","0xE2" 15,"foo⚊barbazbam","FOO⚊BARBAZBAM" -- NOTE: regression test for introduced bug -- https://github.com/ClickHouse/ClickHouse/issues/42756 diff --git a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql index d175e0659d0..8ca0a3f5f75 100644 --- a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql +++ b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql @@ -1,6 +1,3 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - drop table if exists utf8_overlap; create table utf8_overlap (str String) engine=Memory(); diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index 0980e25b70f..c39f1fb1ce9 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -416,6 +416,7 @@ logTrace lowCardinalityIndices lowCardinalityKeys lower +lowerUTF8 makeDate makeDate32 makeDateTime @@ -896,6 +897,7 @@ tupleToNameValuePairs unbin unhex upper +upperUTF8 uptime validateNestedArraySizes version diff --git a/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql b/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql index b169cfd0ab9..80e3c0a9ece 100644 --- a/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql +++ b/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql @@ -1,6 +1,3 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - create table if not exists t (`arr.key` Array(LowCardinality(String)), `arr.value` Array(LowCardinality(String))) engine = Memory; insert into t (`arr.key`, `arr.value`) values (['a'], ['b']); select if(true, if(lowerUTF8(arr.key) = 'a', 1, 2), 3) as x from t left array join arr; diff --git a/tests/queries/0_stateless/02807_lower_utf8_msan.sql b/tests/queries/0_stateless/02807_lower_utf8_msan.sql index 95f224577f7..e9eb18bf615 100644 --- a/tests/queries/0_stateless/02807_lower_utf8_msan.sql +++ b/tests/queries/0_stateless/02807_lower_utf8_msan.sql @@ -1,5 +1,2 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - SELECT lowerUTF8(arrayJoin(['©--------------------------------------', '©--------------------'])) ORDER BY 1; SELECT upperUTF8(materialize('aaaaÐБВГaaaaaaaaaaaaÐБВГAAAAaaAA')) FROM numbers(2); diff --git a/tests/queries/0_stateless/03015_peder1001.sql b/tests/queries/0_stateless/03015_peder1001.sql index df8e4db1536..810503207f2 100644 --- a/tests/queries/0_stateless/03015_peder1001.sql +++ b/tests/queries/0_stateless/03015_peder1001.sql @@ -1,6 +1,3 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - DROP TABLE IF EXISTS test_data; CREATE TABLE test_data From 427016a450cad536e8cbaf4de04d07313456aa4b Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 6 Aug 2024 21:39:41 +0200 Subject: [PATCH 2342/2361] CI: Functional tests to store artifacts on timeout --- docker/test/fasttest/run.sh | 18 +--------- docker/test/sqllogic/Dockerfile | 1 - docker/test/sqllogic/run.sh | 2 +- docker/test/sqltest/Dockerfile | 1 - docker/test/stateful/run.sh | 25 +------------- docker/test/stateless/Dockerfile | 1 - docker/test/stateless/run.sh | 21 ++---------- docker/test/stateless/utils.lib | 16 --------- tests/ci/ci.py | 30 ++++++++-------- tests/ci/ci_definitions.py | 3 +- tests/ci/functional_test_check.py | 37 +++++++++++++++----- tests/ci/report.py | 11 +++--- tests/ci/tee_popen.py | 57 +++++++++++++++++++++++++++---- 13 files changed, 107 insertions(+), 116 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 394d31addb1..9920326b11c 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -256,22 +256,6 @@ function configure rm -f "$FASTTEST_DATA/config.d/secure_ports.xml" } -function timeout_with_logging() { - local exit_code=0 - - timeout -s TERM --preserve-status "${@}" || exit_code="${?}" - - echo "Checking if it is a timeout. The code 124 will indicate a timeout." - if [[ "${exit_code}" -eq "124" ]] - then - echo "The command 'timeout ${*}' has been killed by timeout." - else - echo "No, it isn't a timeout." - fi - - return $exit_code -} - function run_tests { clickhouse-server --version @@ -340,7 +324,7 @@ case "$stage" in configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt" ;& "run_tests") - timeout_with_logging 35m bash -c run_tests ||: + run_tests ||: /process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \ --out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \ --out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv" diff --git a/docker/test/sqllogic/Dockerfile b/docker/test/sqllogic/Dockerfile index 1425e12cd84..6397526388e 100644 --- a/docker/test/sqllogic/Dockerfile +++ b/docker/test/sqllogic/Dockerfile @@ -35,7 +35,6 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \ ENV TZ=Europe/Amsterdam -ENV MAX_RUN_TIME=9000 RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git" diff --git a/docker/test/sqllogic/run.sh b/docker/test/sqllogic/run.sh index ccba344035e..32368980f9b 100755 --- a/docker/test/sqllogic/run.sh +++ b/docker/test/sqllogic/run.sh @@ -94,7 +94,7 @@ function run_tests() export -f run_tests -timeout "${MAX_RUN_TIME:-9000}" bash -c run_tests || echo "timeout reached" >&2 +run_tests #/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv diff --git a/docker/test/sqltest/Dockerfile b/docker/test/sqltest/Dockerfile index 71d915b0c7a..b805bb03c2b 100644 --- a/docker/test/sqltest/Dockerfile +++ b/docker/test/sqltest/Dockerfile @@ -22,7 +22,6 @@ ARG sqltest_repo="https://github.com/elliotchance/sqltest/" RUN git clone ${sqltest_repo} ENV TZ=UTC -ENV MAX_RUN_TIME=900 RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone COPY run.sh / diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 3a4f0d97993..c072eeb0fa8 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -4,9 +4,6 @@ source /setup_export_logs.sh set -e -x -MAX_RUN_TIME=${MAX_RUN_TIME:-3600} -MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 3600 : MAX_RUN_TIME)) - # Choose random timezone for this test run TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)" echo "Choosen random timezone $TZ" @@ -123,9 +120,6 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] clickhouse-client --query "DROP TABLE datasets.hits_v1" clickhouse-client --query "DROP TABLE datasets.visits_v1" - - MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours) - MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited) else clickhouse-client --query "CREATE DATABASE test" clickhouse-client --query "SHOW TABLES FROM test" @@ -257,24 +251,7 @@ function run_tests() export -f run_tests -function timeout_with_logging() { - local exit_code=0 - - timeout -s TERM --preserve-status "${@}" || exit_code="${?}" - - echo "Checking if it is a timeout. The code 124 will indicate a timeout." - if [[ "${exit_code}" -eq "124" ]] - then - echo "The command 'timeout ${*}' has been killed by timeout." - else - echo "No, it isn't a timeout." - fi - - return $exit_code -} - -TIMEOUT=$((MAX_RUN_TIME - 700)) -timeout_with_logging "$TIMEOUT" bash -c run_tests ||: +run_tests ||: echo "Files in current directory" ls -la ./ diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index d8eb072328f..b0c4914a4e8 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -65,7 +65,6 @@ ENV TZ=Europe/Amsterdam RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ENV NUM_TRIES=1 -ENV MAX_RUN_TIME=0 # Unrelated to vars in setup_minio.sh, but should be the same there # to have the same binaries for local running scenario diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index c70cbe1fe45..ad0cd321cc5 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -12,9 +12,6 @@ dmesg --clear # fail on errors, verbose and export all env variables set -e -x -a -MAX_RUN_TIME=${MAX_RUN_TIME:-9000} -MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 9000 : MAX_RUN_TIME)) - USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0} USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0} @@ -308,8 +305,6 @@ function run_tests() try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')" - TIMEOUT=$((MAX_RUN_TIME - 800 > 8400 ? 8400 : MAX_RUN_TIME - 800)) - START_TIME=${SECONDS} set +e TEST_ARGS=( @@ -324,32 +319,22 @@ function run_tests() --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" ) - timeout --preserve-status --signal TERM --kill-after 60m ${TIMEOUT}s clickhouse-test "${TEST_ARGS[@]}" 2>&1 \ + clickhouse-test "${TEST_ARGS[@]}" 2>&1 \ | ts '%Y-%m-%d %H:%M:%S' \ | tee -a test_output/test_result.txt set -e - DURATION=$((SECONDS - START_TIME)) - - echo "Elapsed ${DURATION} seconds." - if [[ $DURATION -ge $TIMEOUT ]] - then - echo "It looks like the command is terminated by the timeout, which is ${TIMEOUT} seconds." - fi } export -f run_tests - -# This should be enough to setup job and collect artifacts -TIMEOUT=$((MAX_RUN_TIME - 700)) if [ "$NUM_TRIES" -gt "1" ]; then # We don't run tests with Ordinary database in PRs, only in master. # So run new/changed tests with Ordinary at least once in flaky check. - timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \ + NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests \ | sed 's/All tests have finished/Redacted: a message about tests finish is deleted/' | sed 's/No tests were run/Redacted: a message about no tests run is deleted/' ||: fi -timeout_with_logging "$TIMEOUT" bash -c run_tests ||: +run_tests ||: echo "Files in current directory" ls -la ./ diff --git a/docker/test/stateless/utils.lib b/docker/test/stateless/utils.lib index cb257536c36..31cd67254b4 100644 --- a/docker/test/stateless/utils.lib +++ b/docker/test/stateless/utils.lib @@ -40,22 +40,6 @@ function fn_exists() { declare -F "$1" > /dev/null; } -function timeout_with_logging() { - local exit_code=0 - - timeout -s TERM --preserve-status "${@}" || exit_code="${?}" - - echo "Checking if it is a timeout. The code 124 will indicate a timeout." - if [[ "${exit_code}" -eq "124" ]] - then - echo "The command 'timeout ${*}' has been killed by timeout." - else - echo "No, it isn't a timeout." - fi - - return $exit_code -} - function collect_core_dumps() { find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 49b597333dc..1208d8642ad 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -50,7 +50,6 @@ from github_helper import GitHub from pr_info import PRInfo from report import ( ERROR, - FAILURE, PENDING, SUCCESS, BuildResult, @@ -62,11 +61,11 @@ from report import ( FAIL, ) from s3_helper import S3Helper -from stopwatch import Stopwatch from tee_popen import TeePopen from ci_cache import CiCache from ci_settings import CiSettings from ci_buddy import CIBuddy +from stopwatch import Stopwatch from version_helper import get_version_from_repo # pylint: disable=too-many-lines @@ -370,8 +369,8 @@ def _pre_action(s3, job_name, batch, indata, pr_info): # skip_status = SUCCESS already there GH.print_in_group("Commit Status Data", job_status) - # create pre report - jr = JobReport.create_pre_report(status=skip_status, job_skipped=to_be_skipped) + # create dummy report + jr = JobReport.create_dummy(status=skip_status, job_skipped=to_be_skipped) jr.dump() if not to_be_skipped: @@ -990,19 +989,20 @@ def _run_test(job_name: str, run_command: str) -> int: stopwatch = Stopwatch() job_log = Path(TEMP_PATH) / "job_log.txt" with TeePopen(run_command, job_log, env, timeout) as process: + print(f"Job process started, pid [{process.process.pid}]") retcode = process.wait() if retcode != 0: print(f"Run action failed for: [{job_name}] with exit code [{retcode}]") - if timeout and process.timeout_exceeded: - print(f"Timeout {timeout} exceeded, dumping the job report") - JobReport( - status=FAILURE, - description=f"Timeout {timeout} exceeded", - test_results=[TestResult.create_check_timeout_expired(timeout)], - start_time=stopwatch.start_time_str, - duration=stopwatch.duration_seconds, - additional_files=[job_log], - ).dump() + if process.timeout_exceeded: + print(f"Job timed out: [{job_name}] exit code [{retcode}]") + assert JobReport.exist(), "JobReport real or dummy must be present" + jr = JobReport.load() + if jr.dummy: + print( + f"ERROR: Run action failed with timeout and did not generate JobReport - update dummy report with execution time" + ) + jr.test_results = [TestResult.create_check_timeout_expired()] + jr.duration = stopwatch.duration_seconds print(f"Run action done for: [{job_name}]") return retcode @@ -1205,7 +1205,7 @@ def main() -> int: job_report ), "BUG. There must be job report either real report, or pre-report if job was killed" error_description = "" - if not job_report.pre_report: + if not job_report.dummy: # it's a real job report ch_helper = ClickHouseHelper() check_url = "" diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 1bed9db06f2..1d1c39f913d 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -332,7 +332,7 @@ class JobConfig: # will be triggered for the job if omitted in CI workflow yml run_command: str = "" # job timeout, seconds - timeout: Optional[int] = None + timeout: int = 7200 # sets number of batches for a multi-batch job num_batches: int = 1 # label that enables job in CI, if set digest isn't used @@ -421,7 +421,6 @@ class CommonJobConfigs: ), run_command='functional_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, - timeout=9000, ) STATEFUL_TEST = JobConfig( job_name_keyword="stateful", diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index b7391eff01b..d08f98fa05f 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -5,10 +5,11 @@ import csv import logging import os import re +import signal import subprocess import sys from pathlib import Path -from typing import List, Tuple +from typing import List, Tuple, Optional from build_download_helper import download_all_deb_packages from clickhouse_helper import CiLogsCredentials @@ -25,11 +26,12 @@ from report import ( TestResults, read_test_results, FAILURE, + TestResult, ) from stopwatch import Stopwatch from tee_popen import TeePopen from ci_config import CI -from ci_utils import Utils +from ci_utils import Utils, Shell NO_CHANGES_MSG = "Nothing to run" @@ -113,10 +115,6 @@ def get_run_command( if flaky_check: envs.append("-e NUM_TRIES=50") - envs.append("-e MAX_RUN_TIME=2800") - else: - max_run_time = os.getenv("MAX_RUN_TIME", "0") - envs.append(f"-e MAX_RUN_TIME={max_run_time}") envs += [f"-e {e}" for e in additional_envs] @@ -128,7 +126,7 @@ def get_run_command( ) return ( - f"docker run --volume={builds_path}:/package_folder " + f"docker run --rm --name func-tester --volume={builds_path}:/package_folder " # For dmesg and sysctl "--privileged " f"{ci_logs_args}" @@ -198,7 +196,7 @@ def process_results( state, description = status[0][0], status[0][1] if ret_code != 0: state = ERROR - description += " (but script exited with an error)" + description = f"Job failed, exit code: {ret_code}. " + description try: results_path = result_directory / "test_results.tsv" @@ -240,7 +238,19 @@ def parse_args(): return parser.parse_args() +test_process = None # type: Optional[TeePopen] +timeout_expired = False + + +def handle_sigterm(signum, _frame): + print(f"WARNING: Received signal {signum}") + global timeout_expired + timeout_expired = True + Shell.check(f"docker exec func-tester pkill -f clickhouse-test", verbose=True) + + def main(): + signal.signal(signal.SIGTERM, handle_sigterm) logging.basicConfig(level=logging.INFO) for handler in logging.root.handlers: # pylint: disable=protected-access @@ -328,11 +338,13 @@ def main(): logging.info("Going to run func tests: %s", run_command) with TeePopen(run_command, run_log_path) as process: + global test_process + test_process = process retcode = process.wait() if retcode == 0: logging.info("Run successfully") else: - logging.info("Run failed") + logging.info("Run failed, exit code %s", retcode) try: subprocess.check_call( @@ -348,6 +360,13 @@ def main(): state, description, test_results, additional_logs = process_results( retcode, result_path, server_log_path ) + if timeout_expired: + description = "Timeout expired" + state = FAILURE + test_results.insert( + 0, TestResult.create_check_timeout_expired(stopwatch.duration_seconds) + ) + else: print( "This is validate bugfix or flaky check run, but no changes test to run - skip with success" diff --git a/tests/ci/report.py b/tests/ci/report.py index 6779a6dae96..c2632719aef 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -249,6 +249,7 @@ JOB_REPORT_FILE = Path(GITHUB_WORKSPACE) / "job_report.json" JOB_STARTED_TEST_NAME = "STARTED" JOB_FINISHED_TEST_NAME = "COMPLETED" +JOB_TIMEOUT_TEST_NAME = "Job Timeout Expired" @dataclass @@ -277,8 +278,8 @@ class TestResult: self.log_files.append(log_path) @staticmethod - def create_check_timeout_expired(timeout: float) -> "TestResult": - return TestResult("Check timeout expired", "FAIL", timeout) + def create_check_timeout_expired(duration: Optional[float] = None) -> "TestResult": + return TestResult(JOB_TIMEOUT_TEST_NAME, "FAIL", time=duration) TestResults = List[TestResult] @@ -303,7 +304,7 @@ class JobReport: # indicates that this is not real job report but report for the job that was skipped by rerun check job_skipped: bool = False # indicates that report generated by CI script in order to check later if job was killed before real report is generated - pre_report: bool = False + dummy: bool = False exit_code: int = -1 @staticmethod @@ -311,7 +312,7 @@ class JobReport: return datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") @classmethod - def create_pre_report(cls, status: str, job_skipped: bool) -> "JobReport": + def create_dummy(cls, status: str, job_skipped: bool) -> "JobReport": return JobReport( status=status, description="", @@ -320,7 +321,7 @@ class JobReport: duration=0.0, additional_files=[], job_skipped=job_skipped, - pre_report=True, + dummy=True, ) def update_duration(self): diff --git a/tests/ci/tee_popen.py b/tests/ci/tee_popen.py index 13db50df53f..ad3e62dab9c 100644 --- a/tests/ci/tee_popen.py +++ b/tests/ci/tee_popen.py @@ -2,6 +2,8 @@ import logging import os +import signal +import subprocess import sys from io import TextIOWrapper from pathlib import Path @@ -30,20 +32,35 @@ class TeePopen: self._process = None # type: Optional[Popen] self.timeout = timeout self.timeout_exceeded = False + self.terminated_by_sigterm = False + self.terminated_by_sigkill = False + self.pid = 0 def _check_timeout(self) -> None: if self.timeout is None: return sleep(self.timeout) + logging.warning( + "Timeout exceeded. Send SIGTERM to process %s, timeout %s", + self.process.pid, + self.timeout, + ) + self.send_signal(signal.SIGTERM) + time_wait = 0 + self.terminated_by_sigterm = True self.timeout_exceeded = True + while self.process.poll() is None and time_wait < 100: + print("wait...") + wait = 5 + sleep(wait) + time_wait += wait while self.process.poll() is None: - logging.warning( - "Killing process %s, timeout %s exceeded", - self.process.pid, - self.timeout, + logging.error( + "Process is still running. Send SIGKILL", ) - os.killpg(self.process.pid, 9) - sleep(10) + self.send_signal(signal.SIGKILL) + self.terminated_by_sigkill = True + sleep(5) def __enter__(self) -> "TeePopen": self.process = Popen( @@ -57,6 +74,9 @@ class TeePopen: bufsize=1, errors="backslashreplace", ) + sleep(1) + self.pid = self._get_child_pid() + print(f"Subprocess started, pid [{self.process.pid}], child pid [{self.pid}]") if self.timeout is not None and self.timeout > 0: t = Thread(target=self._check_timeout) t.daemon = True # does not block the program from exit @@ -77,6 +97,22 @@ class TeePopen: self.log_file.close() + def _get_child_pid(self): + # linux only + ps_command = f"ps --ppid {self.process.pid} -o pid=" + res = "NA" + try: + result = subprocess.run( + ps_command, shell=True, capture_output=True, text=True + ) + res = result.stdout.strip() + pid = int(res) + return pid + except Exception as e: + print(f"Failed to get child's pid, command [{ps_command}], result [{res}]") + print(f"ERROR: getting Python subprocess PID: {e}") + return self.process.pid + def wait(self) -> int: if self.process.stdout is not None: for line in self.process.stdout: @@ -85,6 +121,15 @@ class TeePopen: return self.process.wait() + def poll(self): + return self.process.poll() + + def send_signal(self, signal_num): + if self.pid: + os.kill(self.pid, signal_num) + else: + print("ERROR: no process to send signal") + @property def process(self) -> Popen: if self._process is not None: From 8e35b082b2e3315655110bdce4238217dfe85914 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Sat, 10 Aug 2024 10:01:16 +0200 Subject: [PATCH 2343/2361] teepopen fix --- tests/ci/tee_popen.py | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/tests/ci/tee_popen.py b/tests/ci/tee_popen.py index ad3e62dab9c..53b0a0f6c2c 100644 --- a/tests/ci/tee_popen.py +++ b/tests/ci/tee_popen.py @@ -3,7 +3,6 @@ import logging import os import signal -import subprocess import sys from io import TextIOWrapper from pathlib import Path @@ -34,7 +33,6 @@ class TeePopen: self.timeout_exceeded = False self.terminated_by_sigterm = False self.terminated_by_sigkill = False - self.pid = 0 def _check_timeout(self) -> None: if self.timeout is None: @@ -75,8 +73,7 @@ class TeePopen: errors="backslashreplace", ) sleep(1) - self.pid = self._get_child_pid() - print(f"Subprocess started, pid [{self.process.pid}], child pid [{self.pid}]") + print(f"Subprocess started, pid [{self.process.pid}]") if self.timeout is not None and self.timeout > 0: t = Thread(target=self._check_timeout) t.daemon = True # does not block the program from exit @@ -97,22 +94,6 @@ class TeePopen: self.log_file.close() - def _get_child_pid(self): - # linux only - ps_command = f"ps --ppid {self.process.pid} -o pid=" - res = "NA" - try: - result = subprocess.run( - ps_command, shell=True, capture_output=True, text=True - ) - res = result.stdout.strip() - pid = int(res) - return pid - except Exception as e: - print(f"Failed to get child's pid, command [{ps_command}], result [{res}]") - print(f"ERROR: getting Python subprocess PID: {e}") - return self.process.pid - def wait(self) -> int: if self.process.stdout is not None: for line in self.process.stdout: @@ -125,10 +106,7 @@ class TeePopen: return self.process.poll() def send_signal(self, signal_num): - if self.pid: - os.kill(self.pid, signal_num) - else: - print("ERROR: no process to send signal") + os.killpg(self.process.pid, signal_num) @property def process(self) -> Popen: From 66fa5a154a8895f481a598616df93f7cb83e42cd Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Mon, 12 Aug 2024 02:34:22 +0200 Subject: [PATCH 2344/2361] tune timeouts, batches --- tests/ci/ci_config.py | 7 ++++--- tests/ci/ci_definitions.py | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 173c6c9c931..99f4ed38475 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -316,6 +316,7 @@ class CI: JobNames.STATEFUL_TEST_PARALLEL_REPL_TSAN: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], random_bucket="parrepl_with_sanitizer", + timeout=3600, ), JobNames.STATELESS_TEST_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2 @@ -346,7 +347,7 @@ class CI: required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4 ), JobNames.STATELESS_TEST_S3_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2 + required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=1 ), JobNames.STATELESS_TEST_AZURE_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN], num_batches=3, release_only=True @@ -401,14 +402,14 @@ class CI: required_builds=[BuildNames.PACKAGE_ASAN], release_only=True, num_batches=4 ), JobNames.INTEGRATION_TEST_ASAN_OLD_ANALYZER: CommonJobConfigs.INTEGRATION_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], num_batches=6 + required_builds=[BuildNames.PACKAGE_ASAN], num_batches=4 ), JobNames.INTEGRATION_TEST_TSAN: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], num_batches=6 ), JobNames.INTEGRATION_TEST_ARM: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - num_batches=6, + num_batches=3, runner_type=Runners.FUNC_TESTER_ARM, ), JobNames.INTEGRATION_TEST: CommonJobConfigs.INTEGRATION_TEST.with_properties( diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 1d1c39f913d..13c222b10b9 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -465,6 +465,7 @@ class CommonJobConfigs: ), run_command="upgrade_check.py", runner_type=Runners.STRESS_TESTER, + timeout=3600, ) INTEGRATION_TEST = JobConfig( job_name_keyword="integration", From 1deeca40dbbbc14373e51d830b851b54b82e5efa Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Thu, 15 Aug 2024 13:11:10 +0200 Subject: [PATCH 2345/2361] Handling timeout in integration tests --- tests/ci/ci.py | 13 ++++++++- tests/ci/ci_config.py | 3 ++- tests/ci/integration_test_check.py | 2 +- tests/ci/integration_tests_runner.py | 40 +++++++++++++++++++++++++++- 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 1208d8642ad..a9ae078b449 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1003,6 +1003,7 @@ def _run_test(job_name: str, run_command: str) -> int: ) jr.test_results = [TestResult.create_check_timeout_expired()] jr.duration = stopwatch.duration_seconds + jr.additional_files += [job_log] print(f"Run action done for: [{job_name}]") return retcode @@ -1329,10 +1330,20 @@ def main() -> int: if CI.is_test_job(args.job_name): gh = GitHub(get_best_robot_token(), per_page=100) commit = get_commit(gh, pr_info.sha) + check_url = "" + if job_report.test_results or job_report.additional_files: + check_url = upload_result_helper.upload_results( + s3, + pr_info.number, + pr_info.sha, + job_report.test_results, + job_report.additional_files, + job_report.check_name or _get_ext_check_name(args.job_name), + ) post_commit_status( commit, ERROR, - "", + check_url, "Error: " + error_description, _get_ext_check_name(args.job_name), pr_info, diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 99f4ed38475..b5e424c2b3f 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -402,7 +402,8 @@ class CI: required_builds=[BuildNames.PACKAGE_ASAN], release_only=True, num_batches=4 ), JobNames.INTEGRATION_TEST_ASAN_OLD_ANALYZER: CommonJobConfigs.INTEGRATION_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], num_batches=4 + required_builds=[BuildNames.PACKAGE_ASAN], + num_batches=3, ), JobNames.INTEGRATION_TEST_TSAN: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], num_batches=6 diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py index 6245f0490fc..7232ca375a1 100644 --- a/tests/ci/integration_test_check.py +++ b/tests/ci/integration_test_check.py @@ -29,7 +29,7 @@ from stopwatch import Stopwatch import integration_tests_runner as runner from ci_config import CI -from ci_utils import Utils +from ci_utils import Utils, Shell def get_json_params_dict( diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index f5dbef4f6db..d3cd3d16de1 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -9,6 +9,7 @@ import random import re import shlex import shutil +import signal import string import subprocess import sys @@ -16,11 +17,13 @@ import time import zlib # for crc32 from collections import defaultdict from itertools import chain -from typing import Any, Dict +from typing import Any, Dict, Optional from env_helper import IS_CI from integration_test_images import IMAGES from tee_popen import TeePopen +from report import JOB_TIMEOUT_TEST_NAME +from stopwatch import Stopwatch MAX_RETRY = 1 NUM_WORKERS = 5 @@ -621,6 +624,9 @@ class ClickhouseIntegrationTestsRunner: test_data_dirs = {} for i in range(num_tries): + if timeout_expired: + print("Timeout expired - break test group execution") + break logging.info("Running test group %s for the %s retry", test_group, i) clear_ip_tables_and_restart_daemons() @@ -657,6 +663,8 @@ class ClickhouseIntegrationTestsRunner: logging.info("Executing cmd: %s", cmd) # ignore retcode, since it meaningful due to pipe to tee with subprocess.Popen(cmd, shell=True, stderr=log, stdout=log) as proc: + global runner_subprocess + runner_subprocess = proc proc.wait() extra_logs_names = [log_basename] @@ -780,6 +788,9 @@ class ClickhouseIntegrationTestsRunner: logs = [] tries_num = 1 if should_fail else FLAKY_TRIES_COUNT for i in range(tries_num): + if timeout_expired: + print("Timeout expired - break flaky check execution") + break final_retry += 1 logging.info("Running tests for the %s time", i) counters, tests_times, log_paths = self.try_run_test_group( @@ -839,6 +850,7 @@ class ClickhouseIntegrationTestsRunner: return result_state, status_text, test_result, logs def run_impl(self, repo_path, build_path): + stopwatch = Stopwatch() if self.flaky_check or self.bugfix_validate_check: return self.run_flaky_check( repo_path, build_path, should_fail=self.bugfix_validate_check @@ -921,6 +933,9 @@ class ClickhouseIntegrationTestsRunner: random.shuffle(items_to_run) for group, tests in items_to_run: + if timeout_expired: + print("Timeout expired - break tests execution") + break logging.info("Running test group %s containing %s tests", group, len(tests)) group_counters, group_test_times, log_paths = self.try_run_test_group( repo_path, group, tests, MAX_RETRY, NUM_WORKERS, 0 @@ -981,6 +996,17 @@ class ClickhouseIntegrationTestsRunner: status_text = "Timeout, " + status_text result_state = "failure" + if timeout_expired: + logging.error( + "Job killed by external timeout signal - setting status to failure!" + ) + status_text = "Job timeout expired, " + status_text + result_state = "failure" + # add mock test case to make timeout visible in job report and in ci db + test_result.insert( + 0, (JOB_TIMEOUT_TEST_NAME, "FAIL", f"{stopwatch.duration_seconds}", "") + ) + if not counters or sum(len(counter) for counter in counters.values()) == 0: status_text = "No tests found for some reason! It's a bug" result_state = "failure" @@ -1001,6 +1027,7 @@ def write_results(results_file, status_file, results, status): def run(): + signal.signal(signal.SIGTERM, handle_sigterm) logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") repo_path = os.environ.get("CLICKHOUSE_TESTS_REPO_PATH") @@ -1035,5 +1062,16 @@ def run(): logging.info("Result written") +timeout_expired = False +runner_subprocess = None # type:Optional[subprocess.Popen] + + +def handle_sigterm(signum, _frame): + print(f"WARNING: Received signal {signum}") + global timeout_expired + timeout_expired = True + runner_subprocess.send_signal(signal.SIGTERM) + + if __name__ == "__main__": run() From dde7ee29fc594f87bb35880bede845c4d4f29423 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 16 Aug 2024 10:22:12 +0200 Subject: [PATCH 2346/2361] sort tests in report by status --- tests/ci/ci_config.py | 8 ++++---- tests/ci/integration_test_check.py | 2 +- tests/ci/integration_tests_runner.py | 3 ++- tests/ci/report.py | 19 +++++++++++++++---- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index b5e424c2b3f..8ce0b9fde5a 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -344,7 +344,7 @@ class CI: runner_type=Runners.FUNC_TESTER_ARM, ), JobNames.STATELESS_TEST_OLD_ANALYZER_S3_REPLICATED_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4 + required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=2 ), JobNames.STATELESS_TEST_S3_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=1 @@ -354,7 +354,7 @@ class CI: ), JobNames.STATELESS_TEST_S3_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], - num_batches=4, + num_batches=3, ), JobNames.STRESS_TEST_DEBUG: CommonJobConfigs.STRESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_DEBUG], @@ -403,14 +403,14 @@ class CI: ), JobNames.INTEGRATION_TEST_ASAN_OLD_ANALYZER: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN], - num_batches=3, + num_batches=6, ), JobNames.INTEGRATION_TEST_TSAN: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], num_batches=6 ), JobNames.INTEGRATION_TEST_ARM: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - num_batches=3, + num_batches=6, runner_type=Runners.FUNC_TESTER_ARM, ), JobNames.INTEGRATION_TEST: CommonJobConfigs.INTEGRATION_TEST.with_properties( diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py index 7232ca375a1..6245f0490fc 100644 --- a/tests/ci/integration_test_check.py +++ b/tests/ci/integration_test_check.py @@ -29,7 +29,7 @@ from stopwatch import Stopwatch import integration_tests_runner as runner from ci_config import CI -from ci_utils import Utils, Shell +from ci_utils import Utils def get_json_params_dict( diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index d3cd3d16de1..c3b71b85022 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -1070,7 +1070,8 @@ def handle_sigterm(signum, _frame): print(f"WARNING: Received signal {signum}") global timeout_expired timeout_expired = True - runner_subprocess.send_signal(signal.SIGTERM) + if runner_subprocess: + runner_subprocess.send_signal(signal.SIGTERM) if __name__ == "__main__": diff --git a/tests/ci/report.py b/tests/ci/report.py index c2632719aef..a1b25b994c7 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -742,10 +742,21 @@ def create_test_html_report( has_test_time = any(tr.time is not None for tr in test_results) has_log_urls = False - # Display entires with logs at the top (they correspond to failed tests) - test_results.sort( - key=lambda result: result.raw_logs is None and result.log_files is None - ) + def sort_key(status): + if "fail" in status.lower(): + return 0 + elif "error" in status.lower(): + return 1 + elif "not" in status.lower(): + return 2 + elif "ok" in status.lower(): + return 10 + elif "success" in status.lower(): + return 9 + else: + return 5 + + test_results.sort(key=lambda result: sort_key(result.status)) for test_result in test_results: colspan = 0 From 90330077e5595c000cec82c8a0819db339296d33 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 18 Aug 2024 17:56:44 +0000 Subject: [PATCH 2347/2361] fix test --- .../queries/0_stateless/03221_refreshable_matview_progress.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql index ecb385c9bfa..98e1c48478d 100644 --- a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql @@ -1,4 +1,4 @@ --- Tags: no-ordinary-database +-- Tags: no-replicated-database, no-ordinary-database set allow_experimental_refreshable_materialized_view=1; From 683b84e6b66edbfbba12a3c56aad9aefd717bde2 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sun, 18 Aug 2024 19:37:52 +0100 Subject: [PATCH 2348/2361] fix --- src/Storages/MergeTree/MergeTreeReadPool.cpp | 4 ++++ src/Storages/MergeTree/MergeTreeReadPoolBase.cpp | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index cc321cd5a4d..9927d369104 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -235,6 +235,10 @@ void MergeTreeReadPool::fillPerThreadInfo(size_t threads, size_t sum_marks) const auto part_idx = current_parts.back().part_idx; const auto min_marks_per_task = per_part_infos[part_idx]->min_marks_per_task; + if (min_marks_per_task == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + /// Do not get too few rows from part. if (marks_in_part >= min_marks_per_task && need_marks < min_marks_per_task) need_marks = min_marks_per_task; diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 9d3c38822e1..1cc13102794 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -87,10 +87,6 @@ static size_t calculateMinMarksPerTask( } } - if (min_marks_per_task == 0) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); - LOG_TEST(&Poco::Logger::get("MergeTreeReadPoolBase"), "Will use min_marks_per_task={}", min_marks_per_task); return min_marks_per_task; } From 3883627aad0e23105ec9e1f985039cd27dcf227d Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Sun, 18 Aug 2024 20:51:40 +0100 Subject: [PATCH 2349/2361] fix --- src/Storages/MergeTree/MergeTreeReadPool.cpp | 1 + src/Storages/MergeTree/MergeTreeReadPoolBase.cpp | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 9927d369104..23c314e48f5 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -24,6 +24,7 @@ namespace ErrorCodes { extern const int CANNOT_SCHEDULE_TASK; extern const int LOGICAL_ERROR; +extern const int BAD_ARGUMENTS; } MergeTreeReadPool::MergeTreeReadPool( diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 1cc13102794..95a10454f9e 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -13,7 +13,6 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; - extern const int BAD_ARGUMENTS; } MergeTreeReadPoolBase::MergeTreeReadPoolBase( From a258b4fb3dcdb8fa2484132aa72e9cece618d488 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 22:18:31 +0200 Subject: [PATCH 2350/2361] Fix race condition in MergeTree restarting thread --- .../ReplicatedMergeTreeRestartingThread.cpp | 15 ++++++++++++++- .../ReplicatedMergeTreeRestartingThread.h | 11 ++--------- src/Storages/StorageReplicatedMergeTree.cpp | 10 ++++------ 3 files changed, 20 insertions(+), 16 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 05fd6f6915b..d3ccda904b6 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -49,6 +48,20 @@ ReplicatedMergeTreeRestartingThread::ReplicatedMergeTreeRestartingThread(Storage task = storage.getContext()->getSchedulePool().createTask(log_name, [this]{ run(); }); } +void ReplicatedMergeTreeRestartingThread::start(bool schedule) +{ + LOG_TRACE(log, "Starting the restating thread, schedule: {}", schedule); + if (schedule) + task->activateAndSchedule(); + else + task->activate(); +} + +void ReplicatedMergeTreeRestartingThread::wakeup() +{ + task->schedule(); +} + void ReplicatedMergeTreeRestartingThread::run() { if (need_stop) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index 01071d80e8b..d719505ae5e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -24,16 +24,9 @@ class ReplicatedMergeTreeRestartingThread public: explicit ReplicatedMergeTreeRestartingThread(StorageReplicatedMergeTree & storage_); - void start(bool schedule = true) - { - LOG_TRACE(log, "Starting restating thread, schedule: {}", schedule); - if (schedule) - task->activateAndSchedule(); - else - task->activate(); - } + void start(bool schedule); - void wakeup() { task->schedule(); } + void wakeup(); void shutdown(bool part_of_full_shutdown); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 068ff1387b3..ff8e362aa36 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5194,17 +5194,16 @@ void StorageReplicatedMergeTree::startupImpl(bool from_attach_thread) startBeingLeader(); - /// Activate replica in a separate thread if we are not calling from attach thread - restarting_thread.start(/*schedule=*/!from_attach_thread); - if (from_attach_thread) { LOG_TRACE(log, "Trying to startup table from right now"); - /// Try activating replica in current thread. + /// Try activating replica in the current thread. restarting_thread.run(); + restarting_thread.start(false); } else { + restarting_thread.start(true); /// Wait while restarting_thread finishing initialization. /// NOTE It does not mean that replication is actually started after receiving this event. /// It only means that an attempt to startup replication was made. @@ -5225,7 +5224,7 @@ void StorageReplicatedMergeTree::startupImpl(bool from_attach_thread) session_expired_callback_handler = EventNotifier::instance().subscribe(Coordination::Error::ZSESSIONEXPIRED, [this]() { LOG_TEST(log, "Received event for expired session. Waking up restarting thread"); - restarting_thread.start(); + restarting_thread.start(true); }); startBackgroundMovesIfNeeded(); @@ -5294,7 +5293,6 @@ void StorageReplicatedMergeTree::flushAndPrepareForShutdown() LOG_TRACE(log, "The attach thread is shutdown"); } - restarting_thread.shutdown(/* part_of_full_shutdown */true); /// Explicitly set the event, because the restarting thread will not set it again startup_event.set(); From 3e5d070b8fb47600979a1a7cc672edc3a7327004 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 22:40:42 +0200 Subject: [PATCH 2351/2361] Fix tests --- .../00804_test_deflate_qpl_codec_compression.reference | 2 +- .../00804_test_zstd_qat_codec_compression.reference | 2 +- .../03227_print_pretty_tuples_create_query.reference | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference index a2178f5eda7..a6e03404f2b 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(DEFLATE_QPL),\n `data` String CODEC(DEFLATE_QPL),\n `ddd` Date CODEC(DEFLATE_QPL),\n `ddd32` Date32 CODEC(DEFLATE_QPL),\n `somenum` Float64 CODEC(DEFLATE_QPL),\n `somestr` FixedString(3) CODEC(DEFLATE_QPL),\n `othernum` Int64 CODEC(DEFLATE_QPL),\n `somearray` Array(UInt8) CODEC(DEFLATE_QPL),\n `somemap` Map(String, UInt32) CODEC(DEFLATE_QPL),\n `sometuple` Tuple(UInt16, UInt64) CODEC(DEFLATE_QPL)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(DEFLATE_QPL),\n `data` String CODEC(DEFLATE_QPL),\n `ddd` Date CODEC(DEFLATE_QPL),\n `ddd32` Date32 CODEC(DEFLATE_QPL),\n `somenum` Float64 CODEC(DEFLATE_QPL),\n `somestr` FixedString(3) CODEC(DEFLATE_QPL),\n `othernum` Int64 CODEC(DEFLATE_QPL),\n `somearray` Array(UInt8) CODEC(DEFLATE_QPL),\n `somemap` Map(String, UInt32) CODEC(DEFLATE_QPL),\n `sometuple` Tuple(\n UInt16,\n UInt64) CODEC(DEFLATE_QPL)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 hello 2018-12-14 2018-12-14 1.1 aaa 5 [1,2,3] {'k1':1,'k2':2} (1,2) 2 world 2018-12-15 2018-12-15 2.2 bbb 6 [4,5,6] {'k3':3,'k4':4} (3,4) 3 ! 2018-12-16 2018-12-16 3.3 ccc 7 [7,8,9] {'k5':5,'k6':6} (5,6) diff --git a/tests/queries/0_stateless/00804_test_zstd_qat_codec_compression.reference b/tests/queries/0_stateless/00804_test_zstd_qat_codec_compression.reference index 31a4360469f..ff70403ce7a 100644 --- a/tests/queries/0_stateless/00804_test_zstd_qat_codec_compression.reference +++ b/tests/queries/0_stateless/00804_test_zstd_qat_codec_compression.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(ZSTD_QAT(1)),\n `data` String CODEC(ZSTD_QAT(1)),\n `ddd` Date CODEC(ZSTD_QAT(1)),\n `ddd32` Date32 CODEC(ZSTD_QAT(1)),\n `somenum` Float64 CODEC(ZSTD_QAT(1)),\n `somestr` FixedString(3) CODEC(ZSTD_QAT(1)),\n `othernum` Int64 CODEC(ZSTD_QAT(1)),\n `somearray` Array(UInt8) CODEC(ZSTD_QAT(1)),\n `somemap` Map(String, UInt32) CODEC(ZSTD_QAT(1)),\n `sometuple` Tuple(UInt16, UInt64) CODEC(ZSTD_QAT(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(ZSTD_QAT(1)),\n `data` String CODEC(ZSTD_QAT(1)),\n `ddd` Date CODEC(ZSTD_QAT(1)),\n `ddd32` Date32 CODEC(ZSTD_QAT(1)),\n `somenum` Float64 CODEC(ZSTD_QAT(1)),\n `somestr` FixedString(3) CODEC(ZSTD_QAT(1)),\n `othernum` Int64 CODEC(ZSTD_QAT(1)),\n `somearray` Array(UInt8) CODEC(ZSTD_QAT(1)),\n `somemap` Map(String, UInt32) CODEC(ZSTD_QAT(1)),\n `sometuple` Tuple(\n UInt16,\n UInt64) CODEC(ZSTD_QAT(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 hello 2018-12-14 2018-12-14 1.1 aaa 5 [1,2,3] {'k1':1,'k2':2} (1,2) 2 world 2018-12-15 2018-12-15 2.2 bbb 6 [4,5,6] {'k3':3,'k4':4} (3,4) 3 ! 2018-12-16 2018-12-16 3.3 ccc 7 [7,8,9] {'k5':5,'k6':6} (5,6) diff --git a/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference index c65dc32a224..afaaaaa6119 100644 --- a/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference +++ b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference @@ -1,6 +1,6 @@ SHOW CREATE TABLE: -CREATE TABLE test.test +CREATE TABLE default.test ( `x` Tuple( a String, @@ -13,7 +13,7 @@ CREATE TABLE test.test ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192 -CREATE TABLE test.test +CREATE TABLE default.test ( `x` Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), `y` String From 8f2c20806a7b757beecb99e52a8d5a1dcab8df07 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 22:45:13 +0200 Subject: [PATCH 2352/2361] Fix test `01079_bad_alters_zookeeper_long` --- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- tests/clickhouse-test | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 068ff1387b3..66dac0dfe31 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -6342,7 +6342,7 @@ void StorageReplicatedMergeTree::alter( "Metadata on replica is not up to date with common metadata in Zookeeper. " "It means that this replica still not applied some of previous alters." " Probably too many alters executing concurrently (highly not recommended). " - "You can retry this error"); + "You can retry the query"); /// Cannot retry automatically, because some zookeeper ops were lost on the first attempt. Will retry on DDLWorker-level. if (query_context->getZooKeeperMetadataTransaction()) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a3d7e0e922d..1203ad3730a 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -59,6 +59,7 @@ MESSAGES_TO_RETRY = [ "is already started to be removing by another replica right now", # This is from LSan, and it indicates its own internal problem: "Unable to get registers from thread", + "You can retry", ] MAX_RETRIES = 3 From 207ef87782eca80410ed5747d971be7ddce65e6c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 18 Aug 2024 22:55:38 +0200 Subject: [PATCH 2353/2361] Fix tests --- tests/queries/0_stateless/01825_new_type_json_ghdata.sh | 3 ++- .../0_stateless/01825_type_json_ghdata_insert_select.sh | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh index fbd7d897fb8..b2f20d825dd 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-s3-storage +# ^ no-s3-storage: too memory hungry CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh b/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh index 711194e71a1..17398d9a0c1 100755 --- a/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh +++ b/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-s3-storage +# ^ no-s3-storage: too memory hungry CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 5ed3c29d4a1e1992671af7168c0a1b01757d97bb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 19 Aug 2024 01:18:12 +0200 Subject: [PATCH 2354/2361] Update tests --- tests/queries/0_stateless/01825_new_type_json_ghdata.sh | 2 +- .../queries/0_stateless/01825_type_json_ghdata_insert_select.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh index b2f20d825dd..33940caec29 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, no-s3-storage +# Tags: no-fasttest, no-s3-storage, long # ^ no-s3-storage: too memory hungry CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) diff --git a/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh b/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh index 17398d9a0c1..fc503b345d9 100755 --- a/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh +++ b/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, no-s3-storage +# Tags: no-fasttest, no-s3-storage, long # ^ no-s3-storage: too memory hungry CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) From 2a8c9b8518175d81632cf7ca48c10522b737e6b4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 19 Aug 2024 07:59:14 +0200 Subject: [PATCH 2355/2361] Fix tests --- .../0_stateless/01825_new_type_json_ghdata_insert_select.sh | 3 ++- tests/queries/0_stateless/01825_type_json_ghdata.sh | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh index 2afec5ba7fe..568ba2bd185 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-fasttest, long +# Tags: no-fasttest, no-s3-storage, long +# ^ no-s3-storage: it is memory-hungry CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01825_type_json_ghdata.sh b/tests/queries/0_stateless/01825_type_json_ghdata.sh index 2686e2c8eb1..7e952de6c08 100755 --- a/tests/queries/0_stateless/01825_type_json_ghdata.sh +++ b/tests/queries/0_stateless/01825_type_json_ghdata.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-s3-storage, long +# ^ no-s3-storage: it is memory-hungry CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From 625b186b4d699fcbdd3befdb349c132d3fa65e74 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 19 Aug 2024 10:03:38 +0000 Subject: [PATCH 2356/2361] Fix build with -DENABLE_LIBRARIES=0 --- src/DataTypes/DataTypeObject.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index d6395155397..3997e892e9f 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -18,13 +18,15 @@ #include #include +#include "config.h" + #if USE_SIMDJSON -#include +# include +#elif USE_RAPIDJSON +# include +#else +# include #endif -#if USE_RAPIDJSON -#include -#endif -#include namespace DB { @@ -105,7 +107,7 @@ SerializationPtr DataTypeObject::doGetDefaultSerialization() const switch (schema_format) { case SchemaFormat::JSON: -#ifdef USE_SIMDJSON +#if USE_SIMDJSON return std::make_shared>( std::move(typed_path_serializations), paths_to_skip, From 2456df0d57301acb0e8c74a96e6411d95329d37c Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 19 Aug 2024 13:39:29 +0200 Subject: [PATCH 2357/2361] Add a test --- .../test_dictionaries_postgresql/test.py | 53 ++++++++++++++++++- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_dictionaries_postgresql/test.py b/tests/integration/test_dictionaries_postgresql/test.py index 516ac27ea26..c845a0b3d8b 100644 --- a/tests/integration/test_dictionaries_postgresql/test.py +++ b/tests/integration/test_dictionaries_postgresql/test.py @@ -530,10 +530,59 @@ def test_bad_configuration(started_cluster): """ ) - node1.query_and_get_error( + assert "Unexpected key `dbbb`" in node1.query_and_get_error( "SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(1))" ) - assert node1.contains_in_log("Unexpected key `dbbb`") + + +def test_named_collection_from_ddl(started_cluster): + cursor = started_cluster.postgres_conn.cursor() + cursor.execute("DROP TABLE IF EXISTS test_table") + cursor.execute("CREATE TABLE test_table (id integer, value integer)") + + node1.query( + """ + DROP NAMED COLLECTION IF EXISTS pg_conn; + CREATE NAMED COLLECTION pg_conn + AS user = 'postgres', password = 'mysecretpassword', host = 'postgres1', port = 5432, database = 'postgres', table = 'test_table'; + """ + ) + + cursor.execute( + "INSERT INTO test_table SELECT i, i FROM generate_series(0, 99) as t(i)" + ) + + node1.query( + """ + DROP DICTIONARY IF EXISTS postgres_dict; + CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) + PRIMARY KEY id + SOURCE(POSTGRESQL(NAME pg_conn)) + LIFETIME(MIN 1 MAX 2) + LAYOUT(HASHED()); + """ + ) + result = node1.query("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") + assert int(result.strip()) == 99 + + node1.query( + """ + DROP NAMED COLLECTION IF EXISTS pg_conn_2; + CREATE NAMED COLLECTION pg_conn_2 + AS user = 'postgres', password = 'mysecretpassword', host = 'postgres1', port = 5432, dbbb = 'postgres', table = 'test_table'; + """ + ) + node1.query( + """ + DROP DICTIONARY IF EXISTS postgres_dict; + CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) + PRIMARY KEY id + SOURCE(POSTGRESQL(NAME pg_conn_2)) + LIFETIME(MIN 1 MAX 2) + LAYOUT(HASHED()); + """ + ) + assert "Unexpected key `dbbb`" in node1.query_and_get_error("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") if __name__ == "__main__": From 612416d5344720dade987fd3f4b44c31986742bb Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 19 Aug 2024 11:56:22 +0000 Subject: [PATCH 2358/2361] Automatic style fix --- tests/integration/test_dictionaries_postgresql/test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_dictionaries_postgresql/test.py b/tests/integration/test_dictionaries_postgresql/test.py index c845a0b3d8b..010ecdb5084 100644 --- a/tests/integration/test_dictionaries_postgresql/test.py +++ b/tests/integration/test_dictionaries_postgresql/test.py @@ -582,7 +582,9 @@ def test_named_collection_from_ddl(started_cluster): LAYOUT(HASHED()); """ ) - assert "Unexpected key `dbbb`" in node1.query_and_get_error("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") + assert "Unexpected key `dbbb`" in node1.query_and_get_error( + "SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))" + ) if __name__ == "__main__": From a2eec5f0ae198a2f7991ce69b50a1e169d46bec3 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Mon, 19 Aug 2024 13:56:40 +0200 Subject: [PATCH 2359/2361] CI: Minor release workflow fix --- .github/workflows/release_branches.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index a5cd6321e8c..82826794ea3 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -482,7 +482,7 @@ jobs: if: ${{ !failure() }} run: | # update overall ci report - python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - name: Check Workflow results if: ${{ !cancelled() }} run: | @@ -490,5 +490,4 @@ jobs: cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF - python3 ./tests/ci/ci_buddy.py --check-wf-status From ecd60eab5f218883e1bd7e113ac2e49a557b88f8 Mon Sep 17 00:00:00 2001 From: Nikita Fomichev Date: Mon, 19 Aug 2024 17:03:53 +0200 Subject: [PATCH 2360/2361] Stateless tests: increase hung check timeout --- tests/clickhouse-test | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 1203ad3730a..5fb892597f7 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2572,12 +2572,12 @@ def do_run_tests(jobs, test_suite: TestSuite): try: clickhouse_execute( args, - query="SELECT 1 /*hang up check*/", - max_http_retries=5, - timeout=20, + query="SELECT 1 /*hung check*/", + max_http_retries=20, + timeout=10, ) except Exception: - print("Hang up check failed") + print("Hung check failed") server_died.set() if server_died.is_set(): From cc8a40ef98f640b3eb2961ba559d65805afc2fd7 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Mon, 19 Aug 2024 17:23:50 +0200 Subject: [PATCH 2361/2361] CI: tidy build timeout 2h -> 3h --- tests/ci/ci_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 8ce0b9fde5a..5453bffd9c6 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -163,6 +163,7 @@ class CI: tidy=True, comment="clang-tidy is used for static analysis", ), + timeout=10800, ), BuildNames.BINARY_DARWIN: CommonJobConfigs.BUILD.with_properties( build_config=BuildConfig(